]> git.saurik.com Git - apple/xnu.git/blob - EXTERNAL_HEADERS/architecture/arm/arm_neon.h
xnu-6153.81.5.tar.gz
[apple/xnu.git] / EXTERNAL_HEADERS / architecture / arm / arm_neon.h
1 /*===---- arm_neon.h - ARM Neon intrinsics ---------------------------------===
2 *
3 * Permission is hereby granted, free of charge, to any person obtaining a copy
4 * of this software and associated documentation files (the "Software"), to deal
5 * in the Software without restriction, including without limitation the rights
6 * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
7 * copies of the Software, and to permit persons to whom the Software is
8 * furnished to do so, subject to the following conditions:
9 *
10 * The above copyright notice and this permission notice shall be included in
11 * all copies or substantial portions of the Software.
12 *
13 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
14 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
15 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
16 * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
17 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
18 * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
19 * THE SOFTWARE.
20 *
21 *===-----------------------------------------------------------------------===
22 */
23
24 #ifndef __ARM_NEON_H
25 #define __ARM_NEON_H
26
27 #if !defined(__ARM_NEON)
28 #error "NEON support not enabled"
29 #endif
30
31 #include <stdint.h>
32
33 #pragma clang diagnostic push
34 #pragma clang diagnostic ignored "-Wfloat-equal"
35 #pragma clang diagnostic ignored "-Wvector-conversion"
36
37 typedef float float32_t;
38 typedef __fp16 float16_t;
39 #ifdef __aarch64__
40 typedef double float64_t;
41 #endif
42
43 #ifdef __aarch64__
44 typedef uint8_t poly8_t;
45 typedef uint16_t poly16_t;
46 typedef uint64_t poly64_t;
47 typedef __uint128_t poly128_t;
48 #else
49 typedef int8_t poly8_t;
50 typedef int16_t poly16_t;
51 #endif
52 typedef __attribute__((neon_vector_type(8))) int8_t int8x8_t;
53 typedef __attribute__((neon_vector_type(16))) int8_t int8x16_t;
54 typedef __attribute__((neon_vector_type(4))) int16_t int16x4_t;
55 typedef __attribute__((neon_vector_type(8))) int16_t int16x8_t;
56 typedef __attribute__((neon_vector_type(2))) int32_t int32x2_t;
57 typedef __attribute__((neon_vector_type(4))) int32_t int32x4_t;
58 typedef __attribute__((neon_vector_type(1))) int64_t int64x1_t;
59 typedef __attribute__((neon_vector_type(2))) int64_t int64x2_t;
60 typedef __attribute__((neon_vector_type(8))) uint8_t uint8x8_t;
61 typedef __attribute__((neon_vector_type(16))) uint8_t uint8x16_t;
62 typedef __attribute__((neon_vector_type(4))) uint16_t uint16x4_t;
63 typedef __attribute__((neon_vector_type(8))) uint16_t uint16x8_t;
64 typedef __attribute__((neon_vector_type(2))) uint32_t uint32x2_t;
65 typedef __attribute__((neon_vector_type(4))) uint32_t uint32x4_t;
66 typedef __attribute__((neon_vector_type(1))) uint64_t uint64x1_t;
67 typedef __attribute__((neon_vector_type(2))) uint64_t uint64x2_t;
68 typedef __attribute__((neon_vector_type(4))) float16_t float16x4_t;
69 typedef __attribute__((neon_vector_type(8))) float16_t float16x8_t;
70 typedef __attribute__((neon_vector_type(2))) float32_t float32x2_t;
71 typedef __attribute__((neon_vector_type(4))) float32_t float32x4_t;
72 #ifdef __aarch64__
73 typedef __attribute__((neon_vector_type(1))) float64_t float64x1_t;
74 typedef __attribute__((neon_vector_type(2))) float64_t float64x2_t;
75 #endif
76 typedef __attribute__((neon_polyvector_type(8))) poly8_t poly8x8_t;
77 typedef __attribute__((neon_polyvector_type(16))) poly8_t poly8x16_t;
78 typedef __attribute__((neon_polyvector_type(4))) poly16_t poly16x4_t;
79 typedef __attribute__((neon_polyvector_type(8))) poly16_t poly16x8_t;
80 #ifdef __aarch64__
81 typedef __attribute__((neon_polyvector_type(1))) poly64_t poly64x1_t;
82 typedef __attribute__((neon_polyvector_type(2))) poly64_t poly64x2_t;
83 #endif
84
85 typedef struct int8x8x2_t {
86 int8x8_t val[2];
87 } int8x8x2_t;
88
89 typedef struct int8x16x2_t {
90 int8x16_t val[2];
91 } int8x16x2_t;
92
93 typedef struct int16x4x2_t {
94 int16x4_t val[2];
95 } int16x4x2_t;
96
97 typedef struct int16x8x2_t {
98 int16x8_t val[2];
99 } int16x8x2_t;
100
101 typedef struct int32x2x2_t {
102 int32x2_t val[2];
103 } int32x2x2_t;
104
105 typedef struct int32x4x2_t {
106 int32x4_t val[2];
107 } int32x4x2_t;
108
109 typedef struct int64x1x2_t {
110 int64x1_t val[2];
111 } int64x1x2_t;
112
113 typedef struct int64x2x2_t {
114 int64x2_t val[2];
115 } int64x2x2_t;
116
117 typedef struct uint8x8x2_t {
118 uint8x8_t val[2];
119 } uint8x8x2_t;
120
121 typedef struct uint8x16x2_t {
122 uint8x16_t val[2];
123 } uint8x16x2_t;
124
125 typedef struct uint16x4x2_t {
126 uint16x4_t val[2];
127 } uint16x4x2_t;
128
129 typedef struct uint16x8x2_t {
130 uint16x8_t val[2];
131 } uint16x8x2_t;
132
133 typedef struct uint32x2x2_t {
134 uint32x2_t val[2];
135 } uint32x2x2_t;
136
137 typedef struct uint32x4x2_t {
138 uint32x4_t val[2];
139 } uint32x4x2_t;
140
141 typedef struct uint64x1x2_t {
142 uint64x1_t val[2];
143 } uint64x1x2_t;
144
145 typedef struct uint64x2x2_t {
146 uint64x2_t val[2];
147 } uint64x2x2_t;
148
149 typedef struct float16x4x2_t {
150 float16x4_t val[2];
151 } float16x4x2_t;
152
153 typedef struct float16x8x2_t {
154 float16x8_t val[2];
155 } float16x8x2_t;
156
157 typedef struct float32x2x2_t {
158 float32x2_t val[2];
159 } float32x2x2_t;
160
161 typedef struct float32x4x2_t {
162 float32x4_t val[2];
163 } float32x4x2_t;
164
165 #ifdef __aarch64__
166 typedef struct float64x1x2_t {
167 float64x1_t val[2];
168 } float64x1x2_t;
169
170 typedef struct float64x2x2_t {
171 float64x2_t val[2];
172 } float64x2x2_t;
173
174 #endif
175 typedef struct poly8x8x2_t {
176 poly8x8_t val[2];
177 } poly8x8x2_t;
178
179 typedef struct poly8x16x2_t {
180 poly8x16_t val[2];
181 } poly8x16x2_t;
182
183 typedef struct poly16x4x2_t {
184 poly16x4_t val[2];
185 } poly16x4x2_t;
186
187 typedef struct poly16x8x2_t {
188 poly16x8_t val[2];
189 } poly16x8x2_t;
190
191 #ifdef __aarch64__
192 typedef struct poly64x1x2_t {
193 poly64x1_t val[2];
194 } poly64x1x2_t;
195
196 typedef struct poly64x2x2_t {
197 poly64x2_t val[2];
198 } poly64x2x2_t;
199
200 #endif
201 typedef struct int8x8x3_t {
202 int8x8_t val[3];
203 } int8x8x3_t;
204
205 typedef struct int8x16x3_t {
206 int8x16_t val[3];
207 } int8x16x3_t;
208
209 typedef struct int16x4x3_t {
210 int16x4_t val[3];
211 } int16x4x3_t;
212
213 typedef struct int16x8x3_t {
214 int16x8_t val[3];
215 } int16x8x3_t;
216
217 typedef struct int32x2x3_t {
218 int32x2_t val[3];
219 } int32x2x3_t;
220
221 typedef struct int32x4x3_t {
222 int32x4_t val[3];
223 } int32x4x3_t;
224
225 typedef struct int64x1x3_t {
226 int64x1_t val[3];
227 } int64x1x3_t;
228
229 typedef struct int64x2x3_t {
230 int64x2_t val[3];
231 } int64x2x3_t;
232
233 typedef struct uint8x8x3_t {
234 uint8x8_t val[3];
235 } uint8x8x3_t;
236
237 typedef struct uint8x16x3_t {
238 uint8x16_t val[3];
239 } uint8x16x3_t;
240
241 typedef struct uint16x4x3_t {
242 uint16x4_t val[3];
243 } uint16x4x3_t;
244
245 typedef struct uint16x8x3_t {
246 uint16x8_t val[3];
247 } uint16x8x3_t;
248
249 typedef struct uint32x2x3_t {
250 uint32x2_t val[3];
251 } uint32x2x3_t;
252
253 typedef struct uint32x4x3_t {
254 uint32x4_t val[3];
255 } uint32x4x3_t;
256
257 typedef struct uint64x1x3_t {
258 uint64x1_t val[3];
259 } uint64x1x3_t;
260
261 typedef struct uint64x2x3_t {
262 uint64x2_t val[3];
263 } uint64x2x3_t;
264
265 typedef struct float16x4x3_t {
266 float16x4_t val[3];
267 } float16x4x3_t;
268
269 typedef struct float16x8x3_t {
270 float16x8_t val[3];
271 } float16x8x3_t;
272
273 typedef struct float32x2x3_t {
274 float32x2_t val[3];
275 } float32x2x3_t;
276
277 typedef struct float32x4x3_t {
278 float32x4_t val[3];
279 } float32x4x3_t;
280
281 #ifdef __aarch64__
282 typedef struct float64x1x3_t {
283 float64x1_t val[3];
284 } float64x1x3_t;
285
286 typedef struct float64x2x3_t {
287 float64x2_t val[3];
288 } float64x2x3_t;
289
290 #endif
291 typedef struct poly8x8x3_t {
292 poly8x8_t val[3];
293 } poly8x8x3_t;
294
295 typedef struct poly8x16x3_t {
296 poly8x16_t val[3];
297 } poly8x16x3_t;
298
299 typedef struct poly16x4x3_t {
300 poly16x4_t val[3];
301 } poly16x4x3_t;
302
303 typedef struct poly16x8x3_t {
304 poly16x8_t val[3];
305 } poly16x8x3_t;
306
307 #ifdef __aarch64__
308 typedef struct poly64x1x3_t {
309 poly64x1_t val[3];
310 } poly64x1x3_t;
311
312 typedef struct poly64x2x3_t {
313 poly64x2_t val[3];
314 } poly64x2x3_t;
315
316 #endif
317 typedef struct int8x8x4_t {
318 int8x8_t val[4];
319 } int8x8x4_t;
320
321 typedef struct int8x16x4_t {
322 int8x16_t val[4];
323 } int8x16x4_t;
324
325 typedef struct int16x4x4_t {
326 int16x4_t val[4];
327 } int16x4x4_t;
328
329 typedef struct int16x8x4_t {
330 int16x8_t val[4];
331 } int16x8x4_t;
332
333 typedef struct int32x2x4_t {
334 int32x2_t val[4];
335 } int32x2x4_t;
336
337 typedef struct int32x4x4_t {
338 int32x4_t val[4];
339 } int32x4x4_t;
340
341 typedef struct int64x1x4_t {
342 int64x1_t val[4];
343 } int64x1x4_t;
344
345 typedef struct int64x2x4_t {
346 int64x2_t val[4];
347 } int64x2x4_t;
348
349 typedef struct uint8x8x4_t {
350 uint8x8_t val[4];
351 } uint8x8x4_t;
352
353 typedef struct uint8x16x4_t {
354 uint8x16_t val[4];
355 } uint8x16x4_t;
356
357 typedef struct uint16x4x4_t {
358 uint16x4_t val[4];
359 } uint16x4x4_t;
360
361 typedef struct uint16x8x4_t {
362 uint16x8_t val[4];
363 } uint16x8x4_t;
364
365 typedef struct uint32x2x4_t {
366 uint32x2_t val[4];
367 } uint32x2x4_t;
368
369 typedef struct uint32x4x4_t {
370 uint32x4_t val[4];
371 } uint32x4x4_t;
372
373 typedef struct uint64x1x4_t {
374 uint64x1_t val[4];
375 } uint64x1x4_t;
376
377 typedef struct uint64x2x4_t {
378 uint64x2_t val[4];
379 } uint64x2x4_t;
380
381 typedef struct float16x4x4_t {
382 float16x4_t val[4];
383 } float16x4x4_t;
384
385 typedef struct float16x8x4_t {
386 float16x8_t val[4];
387 } float16x8x4_t;
388
389 typedef struct float32x2x4_t {
390 float32x2_t val[4];
391 } float32x2x4_t;
392
393 typedef struct float32x4x4_t {
394 float32x4_t val[4];
395 } float32x4x4_t;
396
397 #ifdef __aarch64__
398 typedef struct float64x1x4_t {
399 float64x1_t val[4];
400 } float64x1x4_t;
401
402 typedef struct float64x2x4_t {
403 float64x2_t val[4];
404 } float64x2x4_t;
405
406 #endif
407 typedef struct poly8x8x4_t {
408 poly8x8_t val[4];
409 } poly8x8x4_t;
410
411 typedef struct poly8x16x4_t {
412 poly8x16_t val[4];
413 } poly8x16x4_t;
414
415 typedef struct poly16x4x4_t {
416 poly16x4_t val[4];
417 } poly16x4x4_t;
418
419 typedef struct poly16x8x4_t {
420 poly16x8_t val[4];
421 } poly16x8x4_t;
422
423 #ifdef __aarch64__
424 typedef struct poly64x1x4_t {
425 poly64x1_t val[4];
426 } poly64x1x4_t;
427
428 typedef struct poly64x2x4_t {
429 poly64x2_t val[4];
430 } poly64x2x4_t;
431
432 #endif
433
434 #define __ai static inline __attribute__((__always_inline__, __nodebug__))
435
436 #ifdef __LITTLE_ENDIAN__
437 __ai uint8x16_t vabdq_u8(uint8x16_t __p0, uint8x16_t __p1) {
438 uint8x16_t __ret;
439 __ret = (uint8x16_t) __builtin_neon_vabdq_v((int8x16_t)__p0, (int8x16_t)__p1, 48);
440 return __ret;
441 }
442 #else
443 __ai uint8x16_t vabdq_u8(uint8x16_t __p0, uint8x16_t __p1) {
444 uint8x16_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
445 uint8x16_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
446 uint8x16_t __ret;
447 __ret = (uint8x16_t) __builtin_neon_vabdq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 48);
448 __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
449 return __ret;
450 }
451 __ai uint8x16_t __noswap_vabdq_u8(uint8x16_t __p0, uint8x16_t __p1) {
452 uint8x16_t __ret;
453 __ret = (uint8x16_t) __builtin_neon_vabdq_v((int8x16_t)__p0, (int8x16_t)__p1, 48);
454 return __ret;
455 }
456 #endif
457
458 #ifdef __LITTLE_ENDIAN__
459 __ai uint32x4_t vabdq_u32(uint32x4_t __p0, uint32x4_t __p1) {
460 uint32x4_t __ret;
461 __ret = (uint32x4_t) __builtin_neon_vabdq_v((int8x16_t)__p0, (int8x16_t)__p1, 50);
462 return __ret;
463 }
464 #else
465 __ai uint32x4_t vabdq_u32(uint32x4_t __p0, uint32x4_t __p1) {
466 uint32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
467 uint32x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
468 uint32x4_t __ret;
469 __ret = (uint32x4_t) __builtin_neon_vabdq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 50);
470 __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
471 return __ret;
472 }
473 __ai uint32x4_t __noswap_vabdq_u32(uint32x4_t __p0, uint32x4_t __p1) {
474 uint32x4_t __ret;
475 __ret = (uint32x4_t) __builtin_neon_vabdq_v((int8x16_t)__p0, (int8x16_t)__p1, 50);
476 return __ret;
477 }
478 #endif
479
480 #ifdef __LITTLE_ENDIAN__
481 __ai uint16x8_t vabdq_u16(uint16x8_t __p0, uint16x8_t __p1) {
482 uint16x8_t __ret;
483 __ret = (uint16x8_t) __builtin_neon_vabdq_v((int8x16_t)__p0, (int8x16_t)__p1, 49);
484 return __ret;
485 }
486 #else
487 __ai uint16x8_t vabdq_u16(uint16x8_t __p0, uint16x8_t __p1) {
488 uint16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
489 uint16x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
490 uint16x8_t __ret;
491 __ret = (uint16x8_t) __builtin_neon_vabdq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 49);
492 __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
493 return __ret;
494 }
495 __ai uint16x8_t __noswap_vabdq_u16(uint16x8_t __p0, uint16x8_t __p1) {
496 uint16x8_t __ret;
497 __ret = (uint16x8_t) __builtin_neon_vabdq_v((int8x16_t)__p0, (int8x16_t)__p1, 49);
498 return __ret;
499 }
500 #endif
501
502 #ifdef __LITTLE_ENDIAN__
503 __ai int8x16_t vabdq_s8(int8x16_t __p0, int8x16_t __p1) {
504 int8x16_t __ret;
505 __ret = (int8x16_t) __builtin_neon_vabdq_v((int8x16_t)__p0, (int8x16_t)__p1, 32);
506 return __ret;
507 }
508 #else
509 __ai int8x16_t vabdq_s8(int8x16_t __p0, int8x16_t __p1) {
510 int8x16_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
511 int8x16_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
512 int8x16_t __ret;
513 __ret = (int8x16_t) __builtin_neon_vabdq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 32);
514 __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
515 return __ret;
516 }
517 __ai int8x16_t __noswap_vabdq_s8(int8x16_t __p0, int8x16_t __p1) {
518 int8x16_t __ret;
519 __ret = (int8x16_t) __builtin_neon_vabdq_v((int8x16_t)__p0, (int8x16_t)__p1, 32);
520 return __ret;
521 }
522 #endif
523
524 #ifdef __LITTLE_ENDIAN__
525 __ai float32x4_t vabdq_f32(float32x4_t __p0, float32x4_t __p1) {
526 float32x4_t __ret;
527 __ret = (float32x4_t) __builtin_neon_vabdq_v((int8x16_t)__p0, (int8x16_t)__p1, 41);
528 return __ret;
529 }
530 #else
531 __ai float32x4_t vabdq_f32(float32x4_t __p0, float32x4_t __p1) {
532 float32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
533 float32x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
534 float32x4_t __ret;
535 __ret = (float32x4_t) __builtin_neon_vabdq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 41);
536 __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
537 return __ret;
538 }
539 #endif
540
541 #ifdef __LITTLE_ENDIAN__
542 __ai int32x4_t vabdq_s32(int32x4_t __p0, int32x4_t __p1) {
543 int32x4_t __ret;
544 __ret = (int32x4_t) __builtin_neon_vabdq_v((int8x16_t)__p0, (int8x16_t)__p1, 34);
545 return __ret;
546 }
547 #else
548 __ai int32x4_t vabdq_s32(int32x4_t __p0, int32x4_t __p1) {
549 int32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
550 int32x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
551 int32x4_t __ret;
552 __ret = (int32x4_t) __builtin_neon_vabdq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 34);
553 __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
554 return __ret;
555 }
556 __ai int32x4_t __noswap_vabdq_s32(int32x4_t __p0, int32x4_t __p1) {
557 int32x4_t __ret;
558 __ret = (int32x4_t) __builtin_neon_vabdq_v((int8x16_t)__p0, (int8x16_t)__p1, 34);
559 return __ret;
560 }
561 #endif
562
563 #ifdef __LITTLE_ENDIAN__
564 __ai int16x8_t vabdq_s16(int16x8_t __p0, int16x8_t __p1) {
565 int16x8_t __ret;
566 __ret = (int16x8_t) __builtin_neon_vabdq_v((int8x16_t)__p0, (int8x16_t)__p1, 33);
567 return __ret;
568 }
569 #else
570 __ai int16x8_t vabdq_s16(int16x8_t __p0, int16x8_t __p1) {
571 int16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
572 int16x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
573 int16x8_t __ret;
574 __ret = (int16x8_t) __builtin_neon_vabdq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 33);
575 __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
576 return __ret;
577 }
578 __ai int16x8_t __noswap_vabdq_s16(int16x8_t __p0, int16x8_t __p1) {
579 int16x8_t __ret;
580 __ret = (int16x8_t) __builtin_neon_vabdq_v((int8x16_t)__p0, (int8x16_t)__p1, 33);
581 return __ret;
582 }
583 #endif
584
585 #ifdef __LITTLE_ENDIAN__
586 __ai uint8x8_t vabd_u8(uint8x8_t __p0, uint8x8_t __p1) {
587 uint8x8_t __ret;
588 __ret = (uint8x8_t) __builtin_neon_vabd_v((int8x8_t)__p0, (int8x8_t)__p1, 16);
589 return __ret;
590 }
591 #else
592 __ai uint8x8_t vabd_u8(uint8x8_t __p0, uint8x8_t __p1) {
593 uint8x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
594 uint8x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
595 uint8x8_t __ret;
596 __ret = (uint8x8_t) __builtin_neon_vabd_v((int8x8_t)__rev0, (int8x8_t)__rev1, 16);
597 __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
598 return __ret;
599 }
600 __ai uint8x8_t __noswap_vabd_u8(uint8x8_t __p0, uint8x8_t __p1) {
601 uint8x8_t __ret;
602 __ret = (uint8x8_t) __builtin_neon_vabd_v((int8x8_t)__p0, (int8x8_t)__p1, 16);
603 return __ret;
604 }
605 #endif
606
607 #ifdef __LITTLE_ENDIAN__
608 __ai uint32x2_t vabd_u32(uint32x2_t __p0, uint32x2_t __p1) {
609 uint32x2_t __ret;
610 __ret = (uint32x2_t) __builtin_neon_vabd_v((int8x8_t)__p0, (int8x8_t)__p1, 18);
611 return __ret;
612 }
613 #else
614 __ai uint32x2_t vabd_u32(uint32x2_t __p0, uint32x2_t __p1) {
615 uint32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
616 uint32x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
617 uint32x2_t __ret;
618 __ret = (uint32x2_t) __builtin_neon_vabd_v((int8x8_t)__rev0, (int8x8_t)__rev1, 18);
619 __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
620 return __ret;
621 }
622 __ai uint32x2_t __noswap_vabd_u32(uint32x2_t __p0, uint32x2_t __p1) {
623 uint32x2_t __ret;
624 __ret = (uint32x2_t) __builtin_neon_vabd_v((int8x8_t)__p0, (int8x8_t)__p1, 18);
625 return __ret;
626 }
627 #endif
628
629 #ifdef __LITTLE_ENDIAN__
630 __ai uint16x4_t vabd_u16(uint16x4_t __p0, uint16x4_t __p1) {
631 uint16x4_t __ret;
632 __ret = (uint16x4_t) __builtin_neon_vabd_v((int8x8_t)__p0, (int8x8_t)__p1, 17);
633 return __ret;
634 }
635 #else
636 __ai uint16x4_t vabd_u16(uint16x4_t __p0, uint16x4_t __p1) {
637 uint16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
638 uint16x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
639 uint16x4_t __ret;
640 __ret = (uint16x4_t) __builtin_neon_vabd_v((int8x8_t)__rev0, (int8x8_t)__rev1, 17);
641 __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
642 return __ret;
643 }
644 __ai uint16x4_t __noswap_vabd_u16(uint16x4_t __p0, uint16x4_t __p1) {
645 uint16x4_t __ret;
646 __ret = (uint16x4_t) __builtin_neon_vabd_v((int8x8_t)__p0, (int8x8_t)__p1, 17);
647 return __ret;
648 }
649 #endif
650
651 #ifdef __LITTLE_ENDIAN__
652 __ai int8x8_t vabd_s8(int8x8_t __p0, int8x8_t __p1) {
653 int8x8_t __ret;
654 __ret = (int8x8_t) __builtin_neon_vabd_v((int8x8_t)__p0, (int8x8_t)__p1, 0);
655 return __ret;
656 }
657 #else
658 __ai int8x8_t vabd_s8(int8x8_t __p0, int8x8_t __p1) {
659 int8x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
660 int8x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
661 int8x8_t __ret;
662 __ret = (int8x8_t) __builtin_neon_vabd_v((int8x8_t)__rev0, (int8x8_t)__rev1, 0);
663 __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
664 return __ret;
665 }
666 __ai int8x8_t __noswap_vabd_s8(int8x8_t __p0, int8x8_t __p1) {
667 int8x8_t __ret;
668 __ret = (int8x8_t) __builtin_neon_vabd_v((int8x8_t)__p0, (int8x8_t)__p1, 0);
669 return __ret;
670 }
671 #endif
672
673 #ifdef __LITTLE_ENDIAN__
674 __ai float32x2_t vabd_f32(float32x2_t __p0, float32x2_t __p1) {
675 float32x2_t __ret;
676 __ret = (float32x2_t) __builtin_neon_vabd_v((int8x8_t)__p0, (int8x8_t)__p1, 9);
677 return __ret;
678 }
679 #else
680 __ai float32x2_t vabd_f32(float32x2_t __p0, float32x2_t __p1) {
681 float32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
682 float32x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
683 float32x2_t __ret;
684 __ret = (float32x2_t) __builtin_neon_vabd_v((int8x8_t)__rev0, (int8x8_t)__rev1, 9);
685 __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
686 return __ret;
687 }
688 #endif
689
690 #ifdef __LITTLE_ENDIAN__
691 __ai int32x2_t vabd_s32(int32x2_t __p0, int32x2_t __p1) {
692 int32x2_t __ret;
693 __ret = (int32x2_t) __builtin_neon_vabd_v((int8x8_t)__p0, (int8x8_t)__p1, 2);
694 return __ret;
695 }
696 #else
697 __ai int32x2_t vabd_s32(int32x2_t __p0, int32x2_t __p1) {
698 int32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
699 int32x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
700 int32x2_t __ret;
701 __ret = (int32x2_t) __builtin_neon_vabd_v((int8x8_t)__rev0, (int8x8_t)__rev1, 2);
702 __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
703 return __ret;
704 }
705 __ai int32x2_t __noswap_vabd_s32(int32x2_t __p0, int32x2_t __p1) {
706 int32x2_t __ret;
707 __ret = (int32x2_t) __builtin_neon_vabd_v((int8x8_t)__p0, (int8x8_t)__p1, 2);
708 return __ret;
709 }
710 #endif
711
712 #ifdef __LITTLE_ENDIAN__
713 __ai int16x4_t vabd_s16(int16x4_t __p0, int16x4_t __p1) {
714 int16x4_t __ret;
715 __ret = (int16x4_t) __builtin_neon_vabd_v((int8x8_t)__p0, (int8x8_t)__p1, 1);
716 return __ret;
717 }
718 #else
719 __ai int16x4_t vabd_s16(int16x4_t __p0, int16x4_t __p1) {
720 int16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
721 int16x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
722 int16x4_t __ret;
723 __ret = (int16x4_t) __builtin_neon_vabd_v((int8x8_t)__rev0, (int8x8_t)__rev1, 1);
724 __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
725 return __ret;
726 }
727 __ai int16x4_t __noswap_vabd_s16(int16x4_t __p0, int16x4_t __p1) {
728 int16x4_t __ret;
729 __ret = (int16x4_t) __builtin_neon_vabd_v((int8x8_t)__p0, (int8x8_t)__p1, 1);
730 return __ret;
731 }
732 #endif
733
734 #ifdef __LITTLE_ENDIAN__
735 __ai int8x16_t vabsq_s8(int8x16_t __p0) {
736 int8x16_t __ret;
737 __ret = (int8x16_t) __builtin_neon_vabsq_v((int8x16_t)__p0, 32);
738 return __ret;
739 }
740 #else
741 __ai int8x16_t vabsq_s8(int8x16_t __p0) {
742 int8x16_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
743 int8x16_t __ret;
744 __ret = (int8x16_t) __builtin_neon_vabsq_v((int8x16_t)__rev0, 32);
745 __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
746 return __ret;
747 }
748 #endif
749
750 #ifdef __LITTLE_ENDIAN__
751 __ai float32x4_t vabsq_f32(float32x4_t __p0) {
752 float32x4_t __ret;
753 __ret = (float32x4_t) __builtin_neon_vabsq_v((int8x16_t)__p0, 41);
754 return __ret;
755 }
756 #else
757 __ai float32x4_t vabsq_f32(float32x4_t __p0) {
758 float32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
759 float32x4_t __ret;
760 __ret = (float32x4_t) __builtin_neon_vabsq_v((int8x16_t)__rev0, 41);
761 __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
762 return __ret;
763 }
764 #endif
765
766 #ifdef __LITTLE_ENDIAN__
767 __ai int32x4_t vabsq_s32(int32x4_t __p0) {
768 int32x4_t __ret;
769 __ret = (int32x4_t) __builtin_neon_vabsq_v((int8x16_t)__p0, 34);
770 return __ret;
771 }
772 #else
773 __ai int32x4_t vabsq_s32(int32x4_t __p0) {
774 int32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
775 int32x4_t __ret;
776 __ret = (int32x4_t) __builtin_neon_vabsq_v((int8x16_t)__rev0, 34);
777 __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
778 return __ret;
779 }
780 #endif
781
782 #ifdef __LITTLE_ENDIAN__
783 __ai int16x8_t vabsq_s16(int16x8_t __p0) {
784 int16x8_t __ret;
785 __ret = (int16x8_t) __builtin_neon_vabsq_v((int8x16_t)__p0, 33);
786 return __ret;
787 }
788 #else
789 __ai int16x8_t vabsq_s16(int16x8_t __p0) {
790 int16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
791 int16x8_t __ret;
792 __ret = (int16x8_t) __builtin_neon_vabsq_v((int8x16_t)__rev0, 33);
793 __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
794 return __ret;
795 }
796 #endif
797
798 #ifdef __LITTLE_ENDIAN__
799 __ai int8x8_t vabs_s8(int8x8_t __p0) {
800 int8x8_t __ret;
801 __ret = (int8x8_t) __builtin_neon_vabs_v((int8x8_t)__p0, 0);
802 return __ret;
803 }
804 #else
805 __ai int8x8_t vabs_s8(int8x8_t __p0) {
806 int8x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
807 int8x8_t __ret;
808 __ret = (int8x8_t) __builtin_neon_vabs_v((int8x8_t)__rev0, 0);
809 __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
810 return __ret;
811 }
812 #endif
813
814 #ifdef __LITTLE_ENDIAN__
815 __ai float32x2_t vabs_f32(float32x2_t __p0) {
816 float32x2_t __ret;
817 __ret = (float32x2_t) __builtin_neon_vabs_v((int8x8_t)__p0, 9);
818 return __ret;
819 }
820 #else
821 __ai float32x2_t vabs_f32(float32x2_t __p0) {
822 float32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
823 float32x2_t __ret;
824 __ret = (float32x2_t) __builtin_neon_vabs_v((int8x8_t)__rev0, 9);
825 __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
826 return __ret;
827 }
828 #endif
829
830 #ifdef __LITTLE_ENDIAN__
831 __ai int32x2_t vabs_s32(int32x2_t __p0) {
832 int32x2_t __ret;
833 __ret = (int32x2_t) __builtin_neon_vabs_v((int8x8_t)__p0, 2);
834 return __ret;
835 }
836 #else
837 __ai int32x2_t vabs_s32(int32x2_t __p0) {
838 int32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
839 int32x2_t __ret;
840 __ret = (int32x2_t) __builtin_neon_vabs_v((int8x8_t)__rev0, 2);
841 __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
842 return __ret;
843 }
844 #endif
845
846 #ifdef __LITTLE_ENDIAN__
847 __ai int16x4_t vabs_s16(int16x4_t __p0) {
848 int16x4_t __ret;
849 __ret = (int16x4_t) __builtin_neon_vabs_v((int8x8_t)__p0, 1);
850 return __ret;
851 }
852 #else
853 __ai int16x4_t vabs_s16(int16x4_t __p0) {
854 int16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
855 int16x4_t __ret;
856 __ret = (int16x4_t) __builtin_neon_vabs_v((int8x8_t)__rev0, 1);
857 __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
858 return __ret;
859 }
860 #endif
861
862 #ifdef __LITTLE_ENDIAN__
863 __ai uint8x16_t vaddq_u8(uint8x16_t __p0, uint8x16_t __p1) {
864 uint8x16_t __ret;
865 __ret = __p0 + __p1;
866 return __ret;
867 }
868 #else
869 __ai uint8x16_t vaddq_u8(uint8x16_t __p0, uint8x16_t __p1) {
870 uint8x16_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
871 uint8x16_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
872 uint8x16_t __ret;
873 __ret = __rev0 + __rev1;
874 __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
875 return __ret;
876 }
877 #endif
878
879 #ifdef __LITTLE_ENDIAN__
880 __ai uint32x4_t vaddq_u32(uint32x4_t __p0, uint32x4_t __p1) {
881 uint32x4_t __ret;
882 __ret = __p0 + __p1;
883 return __ret;
884 }
885 #else
886 __ai uint32x4_t vaddq_u32(uint32x4_t __p0, uint32x4_t __p1) {
887 uint32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
888 uint32x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
889 uint32x4_t __ret;
890 __ret = __rev0 + __rev1;
891 __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
892 return __ret;
893 }
894 #endif
895
896 #ifdef __LITTLE_ENDIAN__
897 __ai uint64x2_t vaddq_u64(uint64x2_t __p0, uint64x2_t __p1) {
898 uint64x2_t __ret;
899 __ret = __p0 + __p1;
900 return __ret;
901 }
902 #else
903 __ai uint64x2_t vaddq_u64(uint64x2_t __p0, uint64x2_t __p1) {
904 uint64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
905 uint64x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
906 uint64x2_t __ret;
907 __ret = __rev0 + __rev1;
908 __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
909 return __ret;
910 }
911 #endif
912
913 #ifdef __LITTLE_ENDIAN__
914 __ai uint16x8_t vaddq_u16(uint16x8_t __p0, uint16x8_t __p1) {
915 uint16x8_t __ret;
916 __ret = __p0 + __p1;
917 return __ret;
918 }
919 #else
920 __ai uint16x8_t vaddq_u16(uint16x8_t __p0, uint16x8_t __p1) {
921 uint16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
922 uint16x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
923 uint16x8_t __ret;
924 __ret = __rev0 + __rev1;
925 __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
926 return __ret;
927 }
928 #endif
929
930 #ifdef __LITTLE_ENDIAN__
931 __ai int8x16_t vaddq_s8(int8x16_t __p0, int8x16_t __p1) {
932 int8x16_t __ret;
933 __ret = __p0 + __p1;
934 return __ret;
935 }
936 #else
937 __ai int8x16_t vaddq_s8(int8x16_t __p0, int8x16_t __p1) {
938 int8x16_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
939 int8x16_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
940 int8x16_t __ret;
941 __ret = __rev0 + __rev1;
942 __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
943 return __ret;
944 }
945 #endif
946
947 #ifdef __LITTLE_ENDIAN__
948 __ai float32x4_t vaddq_f32(float32x4_t __p0, float32x4_t __p1) {
949 float32x4_t __ret;
950 __ret = __p0 + __p1;
951 return __ret;
952 }
953 #else
954 __ai float32x4_t vaddq_f32(float32x4_t __p0, float32x4_t __p1) {
955 float32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
956 float32x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
957 float32x4_t __ret;
958 __ret = __rev0 + __rev1;
959 __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
960 return __ret;
961 }
962 #endif
963
964 #ifdef __LITTLE_ENDIAN__
965 __ai int32x4_t vaddq_s32(int32x4_t __p0, int32x4_t __p1) {
966 int32x4_t __ret;
967 __ret = __p0 + __p1;
968 return __ret;
969 }
970 #else
971 __ai int32x4_t vaddq_s32(int32x4_t __p0, int32x4_t __p1) {
972 int32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
973 int32x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
974 int32x4_t __ret;
975 __ret = __rev0 + __rev1;
976 __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
977 return __ret;
978 }
979 #endif
980
981 #ifdef __LITTLE_ENDIAN__
982 __ai int64x2_t vaddq_s64(int64x2_t __p0, int64x2_t __p1) {
983 int64x2_t __ret;
984 __ret = __p0 + __p1;
985 return __ret;
986 }
987 #else
988 __ai int64x2_t vaddq_s64(int64x2_t __p0, int64x2_t __p1) {
989 int64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
990 int64x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
991 int64x2_t __ret;
992 __ret = __rev0 + __rev1;
993 __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
994 return __ret;
995 }
996 #endif
997
998 #ifdef __LITTLE_ENDIAN__
999 __ai int16x8_t vaddq_s16(int16x8_t __p0, int16x8_t __p1) {
1000 int16x8_t __ret;
1001 __ret = __p0 + __p1;
1002 return __ret;
1003 }
1004 #else
1005 __ai int16x8_t vaddq_s16(int16x8_t __p0, int16x8_t __p1) {
1006 int16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
1007 int16x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
1008 int16x8_t __ret;
1009 __ret = __rev0 + __rev1;
1010 __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
1011 return __ret;
1012 }
1013 #endif
1014
1015 #ifdef __LITTLE_ENDIAN__
1016 __ai uint8x8_t vadd_u8(uint8x8_t __p0, uint8x8_t __p1) {
1017 uint8x8_t __ret;
1018 __ret = __p0 + __p1;
1019 return __ret;
1020 }
1021 #else
1022 __ai uint8x8_t vadd_u8(uint8x8_t __p0, uint8x8_t __p1) {
1023 uint8x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
1024 uint8x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
1025 uint8x8_t __ret;
1026 __ret = __rev0 + __rev1;
1027 __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
1028 return __ret;
1029 }
1030 #endif
1031
1032 #ifdef __LITTLE_ENDIAN__
1033 __ai uint32x2_t vadd_u32(uint32x2_t __p0, uint32x2_t __p1) {
1034 uint32x2_t __ret;
1035 __ret = __p0 + __p1;
1036 return __ret;
1037 }
1038 #else
1039 __ai uint32x2_t vadd_u32(uint32x2_t __p0, uint32x2_t __p1) {
1040 uint32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
1041 uint32x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
1042 uint32x2_t __ret;
1043 __ret = __rev0 + __rev1;
1044 __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
1045 return __ret;
1046 }
1047 #endif
1048
1049 #ifdef __LITTLE_ENDIAN__
1050 __ai uint64x1_t vadd_u64(uint64x1_t __p0, uint64x1_t __p1) {
1051 uint64x1_t __ret;
1052 __ret = __p0 + __p1;
1053 return __ret;
1054 }
1055 #else
1056 __ai uint64x1_t vadd_u64(uint64x1_t __p0, uint64x1_t __p1) {
1057 uint64x1_t __ret;
1058 __ret = __p0 + __p1;
1059 return __ret;
1060 }
1061 #endif
1062
1063 #ifdef __LITTLE_ENDIAN__
1064 __ai uint16x4_t vadd_u16(uint16x4_t __p0, uint16x4_t __p1) {
1065 uint16x4_t __ret;
1066 __ret = __p0 + __p1;
1067 return __ret;
1068 }
1069 #else
1070 __ai uint16x4_t vadd_u16(uint16x4_t __p0, uint16x4_t __p1) {
1071 uint16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
1072 uint16x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
1073 uint16x4_t __ret;
1074 __ret = __rev0 + __rev1;
1075 __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
1076 return __ret;
1077 }
1078 #endif
1079
1080 #ifdef __LITTLE_ENDIAN__
1081 __ai int8x8_t vadd_s8(int8x8_t __p0, int8x8_t __p1) {
1082 int8x8_t __ret;
1083 __ret = __p0 + __p1;
1084 return __ret;
1085 }
1086 #else
1087 __ai int8x8_t vadd_s8(int8x8_t __p0, int8x8_t __p1) {
1088 int8x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
1089 int8x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
1090 int8x8_t __ret;
1091 __ret = __rev0 + __rev1;
1092 __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
1093 return __ret;
1094 }
1095 #endif
1096
1097 #ifdef __LITTLE_ENDIAN__
1098 __ai float32x2_t vadd_f32(float32x2_t __p0, float32x2_t __p1) {
1099 float32x2_t __ret;
1100 __ret = __p0 + __p1;
1101 return __ret;
1102 }
1103 #else
1104 __ai float32x2_t vadd_f32(float32x2_t __p0, float32x2_t __p1) {
1105 float32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
1106 float32x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
1107 float32x2_t __ret;
1108 __ret = __rev0 + __rev1;
1109 __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
1110 return __ret;
1111 }
1112 #endif
1113
1114 #ifdef __LITTLE_ENDIAN__
1115 __ai int32x2_t vadd_s32(int32x2_t __p0, int32x2_t __p1) {
1116 int32x2_t __ret;
1117 __ret = __p0 + __p1;
1118 return __ret;
1119 }
1120 #else
1121 __ai int32x2_t vadd_s32(int32x2_t __p0, int32x2_t __p1) {
1122 int32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
1123 int32x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
1124 int32x2_t __ret;
1125 __ret = __rev0 + __rev1;
1126 __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
1127 return __ret;
1128 }
1129 #endif
1130
1131 #ifdef __LITTLE_ENDIAN__
1132 __ai int64x1_t vadd_s64(int64x1_t __p0, int64x1_t __p1) {
1133 int64x1_t __ret;
1134 __ret = __p0 + __p1;
1135 return __ret;
1136 }
1137 #else
1138 __ai int64x1_t vadd_s64(int64x1_t __p0, int64x1_t __p1) {
1139 int64x1_t __ret;
1140 __ret = __p0 + __p1;
1141 return __ret;
1142 }
1143 #endif
1144
1145 #ifdef __LITTLE_ENDIAN__
1146 __ai int16x4_t vadd_s16(int16x4_t __p0, int16x4_t __p1) {
1147 int16x4_t __ret;
1148 __ret = __p0 + __p1;
1149 return __ret;
1150 }
1151 #else
1152 __ai int16x4_t vadd_s16(int16x4_t __p0, int16x4_t __p1) {
1153 int16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
1154 int16x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
1155 int16x4_t __ret;
1156 __ret = __rev0 + __rev1;
1157 __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
1158 return __ret;
1159 }
1160 #endif
1161
1162 #ifdef __LITTLE_ENDIAN__
1163 __ai uint16x4_t vaddhn_u32(uint32x4_t __p0, uint32x4_t __p1) {
1164 uint16x4_t __ret;
1165 __ret = (uint16x4_t) __builtin_neon_vaddhn_v((int8x16_t)__p0, (int8x16_t)__p1, 17);
1166 return __ret;
1167 }
1168 #else
1169 __ai uint16x4_t vaddhn_u32(uint32x4_t __p0, uint32x4_t __p1) {
1170 uint32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
1171 uint32x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
1172 uint16x4_t __ret;
1173 __ret = (uint16x4_t) __builtin_neon_vaddhn_v((int8x16_t)__rev0, (int8x16_t)__rev1, 17);
1174 __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
1175 return __ret;
1176 }
1177 __ai uint16x4_t __noswap_vaddhn_u32(uint32x4_t __p0, uint32x4_t __p1) {
1178 uint16x4_t __ret;
1179 __ret = (uint16x4_t) __builtin_neon_vaddhn_v((int8x16_t)__p0, (int8x16_t)__p1, 17);
1180 return __ret;
1181 }
1182 #endif
1183
1184 #ifdef __LITTLE_ENDIAN__
1185 __ai uint32x2_t vaddhn_u64(uint64x2_t __p0, uint64x2_t __p1) {
1186 uint32x2_t __ret;
1187 __ret = (uint32x2_t) __builtin_neon_vaddhn_v((int8x16_t)__p0, (int8x16_t)__p1, 18);
1188 return __ret;
1189 }
1190 #else
1191 __ai uint32x2_t vaddhn_u64(uint64x2_t __p0, uint64x2_t __p1) {
1192 uint64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
1193 uint64x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
1194 uint32x2_t __ret;
1195 __ret = (uint32x2_t) __builtin_neon_vaddhn_v((int8x16_t)__rev0, (int8x16_t)__rev1, 18);
1196 __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
1197 return __ret;
1198 }
1199 __ai uint32x2_t __noswap_vaddhn_u64(uint64x2_t __p0, uint64x2_t __p1) {
1200 uint32x2_t __ret;
1201 __ret = (uint32x2_t) __builtin_neon_vaddhn_v((int8x16_t)__p0, (int8x16_t)__p1, 18);
1202 return __ret;
1203 }
1204 #endif
1205
1206 #ifdef __LITTLE_ENDIAN__
1207 __ai uint8x8_t vaddhn_u16(uint16x8_t __p0, uint16x8_t __p1) {
1208 uint8x8_t __ret;
1209 __ret = (uint8x8_t) __builtin_neon_vaddhn_v((int8x16_t)__p0, (int8x16_t)__p1, 16);
1210 return __ret;
1211 }
1212 #else
1213 __ai uint8x8_t vaddhn_u16(uint16x8_t __p0, uint16x8_t __p1) {
1214 uint16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
1215 uint16x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
1216 uint8x8_t __ret;
1217 __ret = (uint8x8_t) __builtin_neon_vaddhn_v((int8x16_t)__rev0, (int8x16_t)__rev1, 16);
1218 __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
1219 return __ret;
1220 }
1221 __ai uint8x8_t __noswap_vaddhn_u16(uint16x8_t __p0, uint16x8_t __p1) {
1222 uint8x8_t __ret;
1223 __ret = (uint8x8_t) __builtin_neon_vaddhn_v((int8x16_t)__p0, (int8x16_t)__p1, 16);
1224 return __ret;
1225 }
1226 #endif
1227
1228 #ifdef __LITTLE_ENDIAN__
1229 __ai int16x4_t vaddhn_s32(int32x4_t __p0, int32x4_t __p1) {
1230 int16x4_t __ret;
1231 __ret = (int16x4_t) __builtin_neon_vaddhn_v((int8x16_t)__p0, (int8x16_t)__p1, 1);
1232 return __ret;
1233 }
1234 #else
1235 __ai int16x4_t vaddhn_s32(int32x4_t __p0, int32x4_t __p1) {
1236 int32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
1237 int32x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
1238 int16x4_t __ret;
1239 __ret = (int16x4_t) __builtin_neon_vaddhn_v((int8x16_t)__rev0, (int8x16_t)__rev1, 1);
1240 __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
1241 return __ret;
1242 }
1243 __ai int16x4_t __noswap_vaddhn_s32(int32x4_t __p0, int32x4_t __p1) {
1244 int16x4_t __ret;
1245 __ret = (int16x4_t) __builtin_neon_vaddhn_v((int8x16_t)__p0, (int8x16_t)__p1, 1);
1246 return __ret;
1247 }
1248 #endif
1249
1250 #ifdef __LITTLE_ENDIAN__
1251 __ai int32x2_t vaddhn_s64(int64x2_t __p0, int64x2_t __p1) {
1252 int32x2_t __ret;
1253 __ret = (int32x2_t) __builtin_neon_vaddhn_v((int8x16_t)__p0, (int8x16_t)__p1, 2);
1254 return __ret;
1255 }
1256 #else
1257 __ai int32x2_t vaddhn_s64(int64x2_t __p0, int64x2_t __p1) {
1258 int64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
1259 int64x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
1260 int32x2_t __ret;
1261 __ret = (int32x2_t) __builtin_neon_vaddhn_v((int8x16_t)__rev0, (int8x16_t)__rev1, 2);
1262 __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
1263 return __ret;
1264 }
1265 __ai int32x2_t __noswap_vaddhn_s64(int64x2_t __p0, int64x2_t __p1) {
1266 int32x2_t __ret;
1267 __ret = (int32x2_t) __builtin_neon_vaddhn_v((int8x16_t)__p0, (int8x16_t)__p1, 2);
1268 return __ret;
1269 }
1270 #endif
1271
1272 #ifdef __LITTLE_ENDIAN__
1273 __ai int8x8_t vaddhn_s16(int16x8_t __p0, int16x8_t __p1) {
1274 int8x8_t __ret;
1275 __ret = (int8x8_t) __builtin_neon_vaddhn_v((int8x16_t)__p0, (int8x16_t)__p1, 0);
1276 return __ret;
1277 }
1278 #else
1279 __ai int8x8_t vaddhn_s16(int16x8_t __p0, int16x8_t __p1) {
1280 int16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
1281 int16x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
1282 int8x8_t __ret;
1283 __ret = (int8x8_t) __builtin_neon_vaddhn_v((int8x16_t)__rev0, (int8x16_t)__rev1, 0);
1284 __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
1285 return __ret;
1286 }
1287 __ai int8x8_t __noswap_vaddhn_s16(int16x8_t __p0, int16x8_t __p1) {
1288 int8x8_t __ret;
1289 __ret = (int8x8_t) __builtin_neon_vaddhn_v((int8x16_t)__p0, (int8x16_t)__p1, 0);
1290 return __ret;
1291 }
1292 #endif
1293
1294 #ifdef __LITTLE_ENDIAN__
1295 __ai uint8x16_t vandq_u8(uint8x16_t __p0, uint8x16_t __p1) {
1296 uint8x16_t __ret;
1297 __ret = __p0 & __p1;
1298 return __ret;
1299 }
1300 #else
1301 __ai uint8x16_t vandq_u8(uint8x16_t __p0, uint8x16_t __p1) {
1302 uint8x16_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
1303 uint8x16_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
1304 uint8x16_t __ret;
1305 __ret = __rev0 & __rev1;
1306 __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
1307 return __ret;
1308 }
1309 #endif
1310
1311 #ifdef __LITTLE_ENDIAN__
1312 __ai uint32x4_t vandq_u32(uint32x4_t __p0, uint32x4_t __p1) {
1313 uint32x4_t __ret;
1314 __ret = __p0 & __p1;
1315 return __ret;
1316 }
1317 #else
1318 __ai uint32x4_t vandq_u32(uint32x4_t __p0, uint32x4_t __p1) {
1319 uint32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
1320 uint32x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
1321 uint32x4_t __ret;
1322 __ret = __rev0 & __rev1;
1323 __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
1324 return __ret;
1325 }
1326 #endif
1327
1328 #ifdef __LITTLE_ENDIAN__
1329 __ai uint64x2_t vandq_u64(uint64x2_t __p0, uint64x2_t __p1) {
1330 uint64x2_t __ret;
1331 __ret = __p0 & __p1;
1332 return __ret;
1333 }
1334 #else
1335 __ai uint64x2_t vandq_u64(uint64x2_t __p0, uint64x2_t __p1) {
1336 uint64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
1337 uint64x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
1338 uint64x2_t __ret;
1339 __ret = __rev0 & __rev1;
1340 __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
1341 return __ret;
1342 }
1343 #endif
1344
1345 #ifdef __LITTLE_ENDIAN__
1346 __ai uint16x8_t vandq_u16(uint16x8_t __p0, uint16x8_t __p1) {
1347 uint16x8_t __ret;
1348 __ret = __p0 & __p1;
1349 return __ret;
1350 }
1351 #else
1352 __ai uint16x8_t vandq_u16(uint16x8_t __p0, uint16x8_t __p1) {
1353 uint16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
1354 uint16x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
1355 uint16x8_t __ret;
1356 __ret = __rev0 & __rev1;
1357 __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
1358 return __ret;
1359 }
1360 #endif
1361
1362 #ifdef __LITTLE_ENDIAN__
1363 __ai int8x16_t vandq_s8(int8x16_t __p0, int8x16_t __p1) {
1364 int8x16_t __ret;
1365 __ret = __p0 & __p1;
1366 return __ret;
1367 }
1368 #else
1369 __ai int8x16_t vandq_s8(int8x16_t __p0, int8x16_t __p1) {
1370 int8x16_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
1371 int8x16_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
1372 int8x16_t __ret;
1373 __ret = __rev0 & __rev1;
1374 __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
1375 return __ret;
1376 }
1377 #endif
1378
1379 #ifdef __LITTLE_ENDIAN__
1380 __ai int32x4_t vandq_s32(int32x4_t __p0, int32x4_t __p1) {
1381 int32x4_t __ret;
1382 __ret = __p0 & __p1;
1383 return __ret;
1384 }
1385 #else
1386 __ai int32x4_t vandq_s32(int32x4_t __p0, int32x4_t __p1) {
1387 int32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
1388 int32x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
1389 int32x4_t __ret;
1390 __ret = __rev0 & __rev1;
1391 __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
1392 return __ret;
1393 }
1394 #endif
1395
1396 #ifdef __LITTLE_ENDIAN__
1397 __ai int64x2_t vandq_s64(int64x2_t __p0, int64x2_t __p1) {
1398 int64x2_t __ret;
1399 __ret = __p0 & __p1;
1400 return __ret;
1401 }
1402 #else
1403 __ai int64x2_t vandq_s64(int64x2_t __p0, int64x2_t __p1) {
1404 int64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
1405 int64x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
1406 int64x2_t __ret;
1407 __ret = __rev0 & __rev1;
1408 __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
1409 return __ret;
1410 }
1411 #endif
1412
1413 #ifdef __LITTLE_ENDIAN__
1414 __ai int16x8_t vandq_s16(int16x8_t __p0, int16x8_t __p1) {
1415 int16x8_t __ret;
1416 __ret = __p0 & __p1;
1417 return __ret;
1418 }
1419 #else
1420 __ai int16x8_t vandq_s16(int16x8_t __p0, int16x8_t __p1) {
1421 int16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
1422 int16x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
1423 int16x8_t __ret;
1424 __ret = __rev0 & __rev1;
1425 __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
1426 return __ret;
1427 }
1428 #endif
1429
1430 #ifdef __LITTLE_ENDIAN__
1431 __ai uint8x8_t vand_u8(uint8x8_t __p0, uint8x8_t __p1) {
1432 uint8x8_t __ret;
1433 __ret = __p0 & __p1;
1434 return __ret;
1435 }
1436 #else
1437 __ai uint8x8_t vand_u8(uint8x8_t __p0, uint8x8_t __p1) {
1438 uint8x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
1439 uint8x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
1440 uint8x8_t __ret;
1441 __ret = __rev0 & __rev1;
1442 __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
1443 return __ret;
1444 }
1445 #endif
1446
1447 #ifdef __LITTLE_ENDIAN__
1448 __ai uint32x2_t vand_u32(uint32x2_t __p0, uint32x2_t __p1) {
1449 uint32x2_t __ret;
1450 __ret = __p0 & __p1;
1451 return __ret;
1452 }
1453 #else
1454 __ai uint32x2_t vand_u32(uint32x2_t __p0, uint32x2_t __p1) {
1455 uint32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
1456 uint32x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
1457 uint32x2_t __ret;
1458 __ret = __rev0 & __rev1;
1459 __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
1460 return __ret;
1461 }
1462 #endif
1463
1464 #ifdef __LITTLE_ENDIAN__
1465 __ai uint64x1_t vand_u64(uint64x1_t __p0, uint64x1_t __p1) {
1466 uint64x1_t __ret;
1467 __ret = __p0 & __p1;
1468 return __ret;
1469 }
1470 #else
1471 __ai uint64x1_t vand_u64(uint64x1_t __p0, uint64x1_t __p1) {
1472 uint64x1_t __ret;
1473 __ret = __p0 & __p1;
1474 return __ret;
1475 }
1476 #endif
1477
1478 #ifdef __LITTLE_ENDIAN__
1479 __ai uint16x4_t vand_u16(uint16x4_t __p0, uint16x4_t __p1) {
1480 uint16x4_t __ret;
1481 __ret = __p0 & __p1;
1482 return __ret;
1483 }
1484 #else
1485 __ai uint16x4_t vand_u16(uint16x4_t __p0, uint16x4_t __p1) {
1486 uint16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
1487 uint16x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
1488 uint16x4_t __ret;
1489 __ret = __rev0 & __rev1;
1490 __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
1491 return __ret;
1492 }
1493 #endif
1494
1495 #ifdef __LITTLE_ENDIAN__
1496 __ai int8x8_t vand_s8(int8x8_t __p0, int8x8_t __p1) {
1497 int8x8_t __ret;
1498 __ret = __p0 & __p1;
1499 return __ret;
1500 }
1501 #else
1502 __ai int8x8_t vand_s8(int8x8_t __p0, int8x8_t __p1) {
1503 int8x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
1504 int8x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
1505 int8x8_t __ret;
1506 __ret = __rev0 & __rev1;
1507 __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
1508 return __ret;
1509 }
1510 #endif
1511
1512 #ifdef __LITTLE_ENDIAN__
1513 __ai int32x2_t vand_s32(int32x2_t __p0, int32x2_t __p1) {
1514 int32x2_t __ret;
1515 __ret = __p0 & __p1;
1516 return __ret;
1517 }
1518 #else
1519 __ai int32x2_t vand_s32(int32x2_t __p0, int32x2_t __p1) {
1520 int32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
1521 int32x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
1522 int32x2_t __ret;
1523 __ret = __rev0 & __rev1;
1524 __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
1525 return __ret;
1526 }
1527 #endif
1528
1529 #ifdef __LITTLE_ENDIAN__
1530 __ai int64x1_t vand_s64(int64x1_t __p0, int64x1_t __p1) {
1531 int64x1_t __ret;
1532 __ret = __p0 & __p1;
1533 return __ret;
1534 }
1535 #else
1536 __ai int64x1_t vand_s64(int64x1_t __p0, int64x1_t __p1) {
1537 int64x1_t __ret;
1538 __ret = __p0 & __p1;
1539 return __ret;
1540 }
1541 #endif
1542
1543 #ifdef __LITTLE_ENDIAN__
1544 __ai int16x4_t vand_s16(int16x4_t __p0, int16x4_t __p1) {
1545 int16x4_t __ret;
1546 __ret = __p0 & __p1;
1547 return __ret;
1548 }
1549 #else
1550 __ai int16x4_t vand_s16(int16x4_t __p0, int16x4_t __p1) {
1551 int16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
1552 int16x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
1553 int16x4_t __ret;
1554 __ret = __rev0 & __rev1;
1555 __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
1556 return __ret;
1557 }
1558 #endif
1559
1560 #ifdef __LITTLE_ENDIAN__
1561 __ai uint8x16_t vbicq_u8(uint8x16_t __p0, uint8x16_t __p1) {
1562 uint8x16_t __ret;
1563 __ret = __p0 & ~__p1;
1564 return __ret;
1565 }
1566 #else
1567 __ai uint8x16_t vbicq_u8(uint8x16_t __p0, uint8x16_t __p1) {
1568 uint8x16_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
1569 uint8x16_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
1570 uint8x16_t __ret;
1571 __ret = __rev0 & ~__rev1;
1572 __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
1573 return __ret;
1574 }
1575 #endif
1576
1577 #ifdef __LITTLE_ENDIAN__
1578 __ai uint32x4_t vbicq_u32(uint32x4_t __p0, uint32x4_t __p1) {
1579 uint32x4_t __ret;
1580 __ret = __p0 & ~__p1;
1581 return __ret;
1582 }
1583 #else
1584 __ai uint32x4_t vbicq_u32(uint32x4_t __p0, uint32x4_t __p1) {
1585 uint32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
1586 uint32x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
1587 uint32x4_t __ret;
1588 __ret = __rev0 & ~__rev1;
1589 __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
1590 return __ret;
1591 }
1592 #endif
1593
1594 #ifdef __LITTLE_ENDIAN__
1595 __ai uint64x2_t vbicq_u64(uint64x2_t __p0, uint64x2_t __p1) {
1596 uint64x2_t __ret;
1597 __ret = __p0 & ~__p1;
1598 return __ret;
1599 }
1600 #else
1601 __ai uint64x2_t vbicq_u64(uint64x2_t __p0, uint64x2_t __p1) {
1602 uint64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
1603 uint64x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
1604 uint64x2_t __ret;
1605 __ret = __rev0 & ~__rev1;
1606 __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
1607 return __ret;
1608 }
1609 #endif
1610
1611 #ifdef __LITTLE_ENDIAN__
1612 __ai uint16x8_t vbicq_u16(uint16x8_t __p0, uint16x8_t __p1) {
1613 uint16x8_t __ret;
1614 __ret = __p0 & ~__p1;
1615 return __ret;
1616 }
1617 #else
1618 __ai uint16x8_t vbicq_u16(uint16x8_t __p0, uint16x8_t __p1) {
1619 uint16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
1620 uint16x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
1621 uint16x8_t __ret;
1622 __ret = __rev0 & ~__rev1;
1623 __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
1624 return __ret;
1625 }
1626 #endif
1627
1628 #ifdef __LITTLE_ENDIAN__
1629 __ai int8x16_t vbicq_s8(int8x16_t __p0, int8x16_t __p1) {
1630 int8x16_t __ret;
1631 __ret = __p0 & ~__p1;
1632 return __ret;
1633 }
1634 #else
1635 __ai int8x16_t vbicq_s8(int8x16_t __p0, int8x16_t __p1) {
1636 int8x16_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
1637 int8x16_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
1638 int8x16_t __ret;
1639 __ret = __rev0 & ~__rev1;
1640 __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
1641 return __ret;
1642 }
1643 #endif
1644
1645 #ifdef __LITTLE_ENDIAN__
1646 __ai int32x4_t vbicq_s32(int32x4_t __p0, int32x4_t __p1) {
1647 int32x4_t __ret;
1648 __ret = __p0 & ~__p1;
1649 return __ret;
1650 }
1651 #else
1652 __ai int32x4_t vbicq_s32(int32x4_t __p0, int32x4_t __p1) {
1653 int32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
1654 int32x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
1655 int32x4_t __ret;
1656 __ret = __rev0 & ~__rev1;
1657 __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
1658 return __ret;
1659 }
1660 #endif
1661
1662 #ifdef __LITTLE_ENDIAN__
1663 __ai int64x2_t vbicq_s64(int64x2_t __p0, int64x2_t __p1) {
1664 int64x2_t __ret;
1665 __ret = __p0 & ~__p1;
1666 return __ret;
1667 }
1668 #else
1669 __ai int64x2_t vbicq_s64(int64x2_t __p0, int64x2_t __p1) {
1670 int64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
1671 int64x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
1672 int64x2_t __ret;
1673 __ret = __rev0 & ~__rev1;
1674 __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
1675 return __ret;
1676 }
1677 #endif
1678
1679 #ifdef __LITTLE_ENDIAN__
1680 __ai int16x8_t vbicq_s16(int16x8_t __p0, int16x8_t __p1) {
1681 int16x8_t __ret;
1682 __ret = __p0 & ~__p1;
1683 return __ret;
1684 }
1685 #else
1686 __ai int16x8_t vbicq_s16(int16x8_t __p0, int16x8_t __p1) {
1687 int16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
1688 int16x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
1689 int16x8_t __ret;
1690 __ret = __rev0 & ~__rev1;
1691 __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
1692 return __ret;
1693 }
1694 #endif
1695
1696 #ifdef __LITTLE_ENDIAN__
1697 __ai uint8x8_t vbic_u8(uint8x8_t __p0, uint8x8_t __p1) {
1698 uint8x8_t __ret;
1699 __ret = __p0 & ~__p1;
1700 return __ret;
1701 }
1702 #else
1703 __ai uint8x8_t vbic_u8(uint8x8_t __p0, uint8x8_t __p1) {
1704 uint8x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
1705 uint8x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
1706 uint8x8_t __ret;
1707 __ret = __rev0 & ~__rev1;
1708 __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
1709 return __ret;
1710 }
1711 #endif
1712
1713 #ifdef __LITTLE_ENDIAN__
1714 __ai uint32x2_t vbic_u32(uint32x2_t __p0, uint32x2_t __p1) {
1715 uint32x2_t __ret;
1716 __ret = __p0 & ~__p1;
1717 return __ret;
1718 }
1719 #else
1720 __ai uint32x2_t vbic_u32(uint32x2_t __p0, uint32x2_t __p1) {
1721 uint32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
1722 uint32x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
1723 uint32x2_t __ret;
1724 __ret = __rev0 & ~__rev1;
1725 __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
1726 return __ret;
1727 }
1728 #endif
1729
1730 #ifdef __LITTLE_ENDIAN__
1731 __ai uint64x1_t vbic_u64(uint64x1_t __p0, uint64x1_t __p1) {
1732 uint64x1_t __ret;
1733 __ret = __p0 & ~__p1;
1734 return __ret;
1735 }
1736 #else
1737 __ai uint64x1_t vbic_u64(uint64x1_t __p0, uint64x1_t __p1) {
1738 uint64x1_t __ret;
1739 __ret = __p0 & ~__p1;
1740 return __ret;
1741 }
1742 #endif
1743
1744 #ifdef __LITTLE_ENDIAN__
1745 __ai uint16x4_t vbic_u16(uint16x4_t __p0, uint16x4_t __p1) {
1746 uint16x4_t __ret;
1747 __ret = __p0 & ~__p1;
1748 return __ret;
1749 }
1750 #else
1751 __ai uint16x4_t vbic_u16(uint16x4_t __p0, uint16x4_t __p1) {
1752 uint16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
1753 uint16x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
1754 uint16x4_t __ret;
1755 __ret = __rev0 & ~__rev1;
1756 __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
1757 return __ret;
1758 }
1759 #endif
1760
1761 #ifdef __LITTLE_ENDIAN__
1762 __ai int8x8_t vbic_s8(int8x8_t __p0, int8x8_t __p1) {
1763 int8x8_t __ret;
1764 __ret = __p0 & ~__p1;
1765 return __ret;
1766 }
1767 #else
1768 __ai int8x8_t vbic_s8(int8x8_t __p0, int8x8_t __p1) {
1769 int8x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
1770 int8x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
1771 int8x8_t __ret;
1772 __ret = __rev0 & ~__rev1;
1773 __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
1774 return __ret;
1775 }
1776 #endif
1777
1778 #ifdef __LITTLE_ENDIAN__
1779 __ai int32x2_t vbic_s32(int32x2_t __p0, int32x2_t __p1) {
1780 int32x2_t __ret;
1781 __ret = __p0 & ~__p1;
1782 return __ret;
1783 }
1784 #else
1785 __ai int32x2_t vbic_s32(int32x2_t __p0, int32x2_t __p1) {
1786 int32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
1787 int32x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
1788 int32x2_t __ret;
1789 __ret = __rev0 & ~__rev1;
1790 __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
1791 return __ret;
1792 }
1793 #endif
1794
1795 #ifdef __LITTLE_ENDIAN__
1796 __ai int64x1_t vbic_s64(int64x1_t __p0, int64x1_t __p1) {
1797 int64x1_t __ret;
1798 __ret = __p0 & ~__p1;
1799 return __ret;
1800 }
1801 #else
1802 __ai int64x1_t vbic_s64(int64x1_t __p0, int64x1_t __p1) {
1803 int64x1_t __ret;
1804 __ret = __p0 & ~__p1;
1805 return __ret;
1806 }
1807 #endif
1808
1809 #ifdef __LITTLE_ENDIAN__
1810 __ai int16x4_t vbic_s16(int16x4_t __p0, int16x4_t __p1) {
1811 int16x4_t __ret;
1812 __ret = __p0 & ~__p1;
1813 return __ret;
1814 }
1815 #else
1816 __ai int16x4_t vbic_s16(int16x4_t __p0, int16x4_t __p1) {
1817 int16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
1818 int16x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
1819 int16x4_t __ret;
1820 __ret = __rev0 & ~__rev1;
1821 __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
1822 return __ret;
1823 }
1824 #endif
1825
1826 #ifdef __LITTLE_ENDIAN__
1827 __ai poly8x8_t vbsl_p8(uint8x8_t __p0, poly8x8_t __p1, poly8x8_t __p2) {
1828 poly8x8_t __ret;
1829 __ret = (poly8x8_t) __builtin_neon_vbsl_v((int8x8_t)__p0, (int8x8_t)__p1, (int8x8_t)__p2, 4);
1830 return __ret;
1831 }
1832 #else
1833 __ai poly8x8_t vbsl_p8(uint8x8_t __p0, poly8x8_t __p1, poly8x8_t __p2) {
1834 uint8x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
1835 poly8x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
1836 poly8x8_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, 7, 6, 5, 4, 3, 2, 1, 0);
1837 poly8x8_t __ret;
1838 __ret = (poly8x8_t) __builtin_neon_vbsl_v((int8x8_t)__rev0, (int8x8_t)__rev1, (int8x8_t)__rev2, 4);
1839 __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
1840 return __ret;
1841 }
1842 #endif
1843
1844 #ifdef __LITTLE_ENDIAN__
1845 __ai poly16x4_t vbsl_p16(uint16x4_t __p0, poly16x4_t __p1, poly16x4_t __p2) {
1846 poly16x4_t __ret;
1847 __ret = (poly16x4_t) __builtin_neon_vbsl_v((int8x8_t)__p0, (int8x8_t)__p1, (int8x8_t)__p2, 5);
1848 return __ret;
1849 }
1850 #else
1851 __ai poly16x4_t vbsl_p16(uint16x4_t __p0, poly16x4_t __p1, poly16x4_t __p2) {
1852 uint16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
1853 poly16x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
1854 poly16x4_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, 3, 2, 1, 0);
1855 poly16x4_t __ret;
1856 __ret = (poly16x4_t) __builtin_neon_vbsl_v((int8x8_t)__rev0, (int8x8_t)__rev1, (int8x8_t)__rev2, 5);
1857 __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
1858 return __ret;
1859 }
1860 #endif
1861
1862 #ifdef __LITTLE_ENDIAN__
1863 __ai poly8x16_t vbslq_p8(uint8x16_t __p0, poly8x16_t __p1, poly8x16_t __p2) {
1864 poly8x16_t __ret;
1865 __ret = (poly8x16_t) __builtin_neon_vbslq_v((int8x16_t)__p0, (int8x16_t)__p1, (int8x16_t)__p2, 36);
1866 return __ret;
1867 }
1868 #else
1869 __ai poly8x16_t vbslq_p8(uint8x16_t __p0, poly8x16_t __p1, poly8x16_t __p2) {
1870 uint8x16_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
1871 poly8x16_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
1872 poly8x16_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
1873 poly8x16_t __ret;
1874 __ret = (poly8x16_t) __builtin_neon_vbslq_v((int8x16_t)__rev0, (int8x16_t)__rev1, (int8x16_t)__rev2, 36);
1875 __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
1876 return __ret;
1877 }
1878 #endif
1879
1880 #ifdef __LITTLE_ENDIAN__
1881 __ai poly16x8_t vbslq_p16(uint16x8_t __p0, poly16x8_t __p1, poly16x8_t __p2) {
1882 poly16x8_t __ret;
1883 __ret = (poly16x8_t) __builtin_neon_vbslq_v((int8x16_t)__p0, (int8x16_t)__p1, (int8x16_t)__p2, 37);
1884 return __ret;
1885 }
1886 #else
1887 __ai poly16x8_t vbslq_p16(uint16x8_t __p0, poly16x8_t __p1, poly16x8_t __p2) {
1888 uint16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
1889 poly16x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
1890 poly16x8_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, 7, 6, 5, 4, 3, 2, 1, 0);
1891 poly16x8_t __ret;
1892 __ret = (poly16x8_t) __builtin_neon_vbslq_v((int8x16_t)__rev0, (int8x16_t)__rev1, (int8x16_t)__rev2, 37);
1893 __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
1894 return __ret;
1895 }
1896 #endif
1897
1898 #ifdef __LITTLE_ENDIAN__
1899 __ai uint8x16_t vbslq_u8(uint8x16_t __p0, uint8x16_t __p1, uint8x16_t __p2) {
1900 uint8x16_t __ret;
1901 __ret = (uint8x16_t) __builtin_neon_vbslq_v((int8x16_t)__p0, (int8x16_t)__p1, (int8x16_t)__p2, 48);
1902 return __ret;
1903 }
1904 #else
1905 __ai uint8x16_t vbslq_u8(uint8x16_t __p0, uint8x16_t __p1, uint8x16_t __p2) {
1906 uint8x16_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
1907 uint8x16_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
1908 uint8x16_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
1909 uint8x16_t __ret;
1910 __ret = (uint8x16_t) __builtin_neon_vbslq_v((int8x16_t)__rev0, (int8x16_t)__rev1, (int8x16_t)__rev2, 48);
1911 __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
1912 return __ret;
1913 }
1914 #endif
1915
1916 #ifdef __LITTLE_ENDIAN__
1917 __ai uint32x4_t vbslq_u32(uint32x4_t __p0, uint32x4_t __p1, uint32x4_t __p2) {
1918 uint32x4_t __ret;
1919 __ret = (uint32x4_t) __builtin_neon_vbslq_v((int8x16_t)__p0, (int8x16_t)__p1, (int8x16_t)__p2, 50);
1920 return __ret;
1921 }
1922 #else
1923 __ai uint32x4_t vbslq_u32(uint32x4_t __p0, uint32x4_t __p1, uint32x4_t __p2) {
1924 uint32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
1925 uint32x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
1926 uint32x4_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, 3, 2, 1, 0);
1927 uint32x4_t __ret;
1928 __ret = (uint32x4_t) __builtin_neon_vbslq_v((int8x16_t)__rev0, (int8x16_t)__rev1, (int8x16_t)__rev2, 50);
1929 __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
1930 return __ret;
1931 }
1932 #endif
1933
1934 #ifdef __LITTLE_ENDIAN__
1935 __ai uint64x2_t vbslq_u64(uint64x2_t __p0, uint64x2_t __p1, uint64x2_t __p2) {
1936 uint64x2_t __ret;
1937 __ret = (uint64x2_t) __builtin_neon_vbslq_v((int8x16_t)__p0, (int8x16_t)__p1, (int8x16_t)__p2, 51);
1938 return __ret;
1939 }
1940 #else
1941 __ai uint64x2_t vbslq_u64(uint64x2_t __p0, uint64x2_t __p1, uint64x2_t __p2) {
1942 uint64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
1943 uint64x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
1944 uint64x2_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, 1, 0);
1945 uint64x2_t __ret;
1946 __ret = (uint64x2_t) __builtin_neon_vbslq_v((int8x16_t)__rev0, (int8x16_t)__rev1, (int8x16_t)__rev2, 51);
1947 __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
1948 return __ret;
1949 }
1950 #endif
1951
1952 #ifdef __LITTLE_ENDIAN__
1953 __ai uint16x8_t vbslq_u16(uint16x8_t __p0, uint16x8_t __p1, uint16x8_t __p2) {
1954 uint16x8_t __ret;
1955 __ret = (uint16x8_t) __builtin_neon_vbslq_v((int8x16_t)__p0, (int8x16_t)__p1, (int8x16_t)__p2, 49);
1956 return __ret;
1957 }
1958 #else
1959 __ai uint16x8_t vbslq_u16(uint16x8_t __p0, uint16x8_t __p1, uint16x8_t __p2) {
1960 uint16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
1961 uint16x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
1962 uint16x8_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, 7, 6, 5, 4, 3, 2, 1, 0);
1963 uint16x8_t __ret;
1964 __ret = (uint16x8_t) __builtin_neon_vbslq_v((int8x16_t)__rev0, (int8x16_t)__rev1, (int8x16_t)__rev2, 49);
1965 __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
1966 return __ret;
1967 }
1968 #endif
1969
1970 #ifdef __LITTLE_ENDIAN__
1971 __ai int8x16_t vbslq_s8(uint8x16_t __p0, int8x16_t __p1, int8x16_t __p2) {
1972 int8x16_t __ret;
1973 __ret = (int8x16_t) __builtin_neon_vbslq_v((int8x16_t)__p0, (int8x16_t)__p1, (int8x16_t)__p2, 32);
1974 return __ret;
1975 }
1976 #else
1977 __ai int8x16_t vbslq_s8(uint8x16_t __p0, int8x16_t __p1, int8x16_t __p2) {
1978 uint8x16_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
1979 int8x16_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
1980 int8x16_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
1981 int8x16_t __ret;
1982 __ret = (int8x16_t) __builtin_neon_vbslq_v((int8x16_t)__rev0, (int8x16_t)__rev1, (int8x16_t)__rev2, 32);
1983 __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
1984 return __ret;
1985 }
1986 #endif
1987
1988 #ifdef __LITTLE_ENDIAN__
1989 __ai float32x4_t vbslq_f32(uint32x4_t __p0, float32x4_t __p1, float32x4_t __p2) {
1990 float32x4_t __ret;
1991 __ret = (float32x4_t) __builtin_neon_vbslq_v((int8x16_t)__p0, (int8x16_t)__p1, (int8x16_t)__p2, 41);
1992 return __ret;
1993 }
1994 #else
1995 __ai float32x4_t vbslq_f32(uint32x4_t __p0, float32x4_t __p1, float32x4_t __p2) {
1996 uint32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
1997 float32x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
1998 float32x4_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, 3, 2, 1, 0);
1999 float32x4_t __ret;
2000 __ret = (float32x4_t) __builtin_neon_vbslq_v((int8x16_t)__rev0, (int8x16_t)__rev1, (int8x16_t)__rev2, 41);
2001 __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
2002 return __ret;
2003 }
2004 #endif
2005
2006 #ifdef __LITTLE_ENDIAN__
2007 __ai int32x4_t vbslq_s32(uint32x4_t __p0, int32x4_t __p1, int32x4_t __p2) {
2008 int32x4_t __ret;
2009 __ret = (int32x4_t) __builtin_neon_vbslq_v((int8x16_t)__p0, (int8x16_t)__p1, (int8x16_t)__p2, 34);
2010 return __ret;
2011 }
2012 #else
2013 __ai int32x4_t vbslq_s32(uint32x4_t __p0, int32x4_t __p1, int32x4_t __p2) {
2014 uint32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
2015 int32x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
2016 int32x4_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, 3, 2, 1, 0);
2017 int32x4_t __ret;
2018 __ret = (int32x4_t) __builtin_neon_vbslq_v((int8x16_t)__rev0, (int8x16_t)__rev1, (int8x16_t)__rev2, 34);
2019 __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
2020 return __ret;
2021 }
2022 #endif
2023
2024 #ifdef __LITTLE_ENDIAN__
2025 __ai int64x2_t vbslq_s64(uint64x2_t __p0, int64x2_t __p1, int64x2_t __p2) {
2026 int64x2_t __ret;
2027 __ret = (int64x2_t) __builtin_neon_vbslq_v((int8x16_t)__p0, (int8x16_t)__p1, (int8x16_t)__p2, 35);
2028 return __ret;
2029 }
2030 #else
2031 __ai int64x2_t vbslq_s64(uint64x2_t __p0, int64x2_t __p1, int64x2_t __p2) {
2032 uint64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
2033 int64x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
2034 int64x2_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, 1, 0);
2035 int64x2_t __ret;
2036 __ret = (int64x2_t) __builtin_neon_vbslq_v((int8x16_t)__rev0, (int8x16_t)__rev1, (int8x16_t)__rev2, 35);
2037 __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
2038 return __ret;
2039 }
2040 #endif
2041
2042 #ifdef __LITTLE_ENDIAN__
2043 __ai int16x8_t vbslq_s16(uint16x8_t __p0, int16x8_t __p1, int16x8_t __p2) {
2044 int16x8_t __ret;
2045 __ret = (int16x8_t) __builtin_neon_vbslq_v((int8x16_t)__p0, (int8x16_t)__p1, (int8x16_t)__p2, 33);
2046 return __ret;
2047 }
2048 #else
2049 __ai int16x8_t vbslq_s16(uint16x8_t __p0, int16x8_t __p1, int16x8_t __p2) {
2050 uint16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
2051 int16x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
2052 int16x8_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, 7, 6, 5, 4, 3, 2, 1, 0);
2053 int16x8_t __ret;
2054 __ret = (int16x8_t) __builtin_neon_vbslq_v((int8x16_t)__rev0, (int8x16_t)__rev1, (int8x16_t)__rev2, 33);
2055 __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
2056 return __ret;
2057 }
2058 #endif
2059
2060 #ifdef __LITTLE_ENDIAN__
2061 __ai uint8x8_t vbsl_u8(uint8x8_t __p0, uint8x8_t __p1, uint8x8_t __p2) {
2062 uint8x8_t __ret;
2063 __ret = (uint8x8_t) __builtin_neon_vbsl_v((int8x8_t)__p0, (int8x8_t)__p1, (int8x8_t)__p2, 16);
2064 return __ret;
2065 }
2066 #else
2067 __ai uint8x8_t vbsl_u8(uint8x8_t __p0, uint8x8_t __p1, uint8x8_t __p2) {
2068 uint8x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
2069 uint8x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
2070 uint8x8_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, 7, 6, 5, 4, 3, 2, 1, 0);
2071 uint8x8_t __ret;
2072 __ret = (uint8x8_t) __builtin_neon_vbsl_v((int8x8_t)__rev0, (int8x8_t)__rev1, (int8x8_t)__rev2, 16);
2073 __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
2074 return __ret;
2075 }
2076 #endif
2077
2078 #ifdef __LITTLE_ENDIAN__
2079 __ai uint32x2_t vbsl_u32(uint32x2_t __p0, uint32x2_t __p1, uint32x2_t __p2) {
2080 uint32x2_t __ret;
2081 __ret = (uint32x2_t) __builtin_neon_vbsl_v((int8x8_t)__p0, (int8x8_t)__p1, (int8x8_t)__p2, 18);
2082 return __ret;
2083 }
2084 #else
2085 __ai uint32x2_t vbsl_u32(uint32x2_t __p0, uint32x2_t __p1, uint32x2_t __p2) {
2086 uint32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
2087 uint32x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
2088 uint32x2_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, 1, 0);
2089 uint32x2_t __ret;
2090 __ret = (uint32x2_t) __builtin_neon_vbsl_v((int8x8_t)__rev0, (int8x8_t)__rev1, (int8x8_t)__rev2, 18);
2091 __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
2092 return __ret;
2093 }
2094 #endif
2095
2096 #ifdef __LITTLE_ENDIAN__
2097 __ai uint64x1_t vbsl_u64(uint64x1_t __p0, uint64x1_t __p1, uint64x1_t __p2) {
2098 uint64x1_t __ret;
2099 __ret = (uint64x1_t) __builtin_neon_vbsl_v((int8x8_t)__p0, (int8x8_t)__p1, (int8x8_t)__p2, 19);
2100 return __ret;
2101 }
2102 #else
2103 __ai uint64x1_t vbsl_u64(uint64x1_t __p0, uint64x1_t __p1, uint64x1_t __p2) {
2104 uint64x1_t __ret;
2105 __ret = (uint64x1_t) __builtin_neon_vbsl_v((int8x8_t)__p0, (int8x8_t)__p1, (int8x8_t)__p2, 19);
2106 return __ret;
2107 }
2108 #endif
2109
2110 #ifdef __LITTLE_ENDIAN__
2111 __ai uint16x4_t vbsl_u16(uint16x4_t __p0, uint16x4_t __p1, uint16x4_t __p2) {
2112 uint16x4_t __ret;
2113 __ret = (uint16x4_t) __builtin_neon_vbsl_v((int8x8_t)__p0, (int8x8_t)__p1, (int8x8_t)__p2, 17);
2114 return __ret;
2115 }
2116 #else
2117 __ai uint16x4_t vbsl_u16(uint16x4_t __p0, uint16x4_t __p1, uint16x4_t __p2) {
2118 uint16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
2119 uint16x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
2120 uint16x4_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, 3, 2, 1, 0);
2121 uint16x4_t __ret;
2122 __ret = (uint16x4_t) __builtin_neon_vbsl_v((int8x8_t)__rev0, (int8x8_t)__rev1, (int8x8_t)__rev2, 17);
2123 __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
2124 return __ret;
2125 }
2126 #endif
2127
2128 #ifdef __LITTLE_ENDIAN__
2129 __ai int8x8_t vbsl_s8(uint8x8_t __p0, int8x8_t __p1, int8x8_t __p2) {
2130 int8x8_t __ret;
2131 __ret = (int8x8_t) __builtin_neon_vbsl_v((int8x8_t)__p0, (int8x8_t)__p1, (int8x8_t)__p2, 0);
2132 return __ret;
2133 }
2134 #else
2135 __ai int8x8_t vbsl_s8(uint8x8_t __p0, int8x8_t __p1, int8x8_t __p2) {
2136 uint8x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
2137 int8x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
2138 int8x8_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, 7, 6, 5, 4, 3, 2, 1, 0);
2139 int8x8_t __ret;
2140 __ret = (int8x8_t) __builtin_neon_vbsl_v((int8x8_t)__rev0, (int8x8_t)__rev1, (int8x8_t)__rev2, 0);
2141 __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
2142 return __ret;
2143 }
2144 #endif
2145
2146 #ifdef __LITTLE_ENDIAN__
2147 __ai float32x2_t vbsl_f32(uint32x2_t __p0, float32x2_t __p1, float32x2_t __p2) {
2148 float32x2_t __ret;
2149 __ret = (float32x2_t) __builtin_neon_vbsl_v((int8x8_t)__p0, (int8x8_t)__p1, (int8x8_t)__p2, 9);
2150 return __ret;
2151 }
2152 #else
2153 __ai float32x2_t vbsl_f32(uint32x2_t __p0, float32x2_t __p1, float32x2_t __p2) {
2154 uint32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
2155 float32x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
2156 float32x2_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, 1, 0);
2157 float32x2_t __ret;
2158 __ret = (float32x2_t) __builtin_neon_vbsl_v((int8x8_t)__rev0, (int8x8_t)__rev1, (int8x8_t)__rev2, 9);
2159 __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
2160 return __ret;
2161 }
2162 #endif
2163
2164 #ifdef __LITTLE_ENDIAN__
2165 __ai int32x2_t vbsl_s32(uint32x2_t __p0, int32x2_t __p1, int32x2_t __p2) {
2166 int32x2_t __ret;
2167 __ret = (int32x2_t) __builtin_neon_vbsl_v((int8x8_t)__p0, (int8x8_t)__p1, (int8x8_t)__p2, 2);
2168 return __ret;
2169 }
2170 #else
2171 __ai int32x2_t vbsl_s32(uint32x2_t __p0, int32x2_t __p1, int32x2_t __p2) {
2172 uint32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
2173 int32x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
2174 int32x2_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, 1, 0);
2175 int32x2_t __ret;
2176 __ret = (int32x2_t) __builtin_neon_vbsl_v((int8x8_t)__rev0, (int8x8_t)__rev1, (int8x8_t)__rev2, 2);
2177 __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
2178 return __ret;
2179 }
2180 #endif
2181
2182 #ifdef __LITTLE_ENDIAN__
2183 __ai int64x1_t vbsl_s64(uint64x1_t __p0, int64x1_t __p1, int64x1_t __p2) {
2184 int64x1_t __ret;
2185 __ret = (int64x1_t) __builtin_neon_vbsl_v((int8x8_t)__p0, (int8x8_t)__p1, (int8x8_t)__p2, 3);
2186 return __ret;
2187 }
2188 #else
2189 __ai int64x1_t vbsl_s64(uint64x1_t __p0, int64x1_t __p1, int64x1_t __p2) {
2190 int64x1_t __ret;
2191 __ret = (int64x1_t) __builtin_neon_vbsl_v((int8x8_t)__p0, (int8x8_t)__p1, (int8x8_t)__p2, 3);
2192 return __ret;
2193 }
2194 #endif
2195
2196 #ifdef __LITTLE_ENDIAN__
2197 __ai int16x4_t vbsl_s16(uint16x4_t __p0, int16x4_t __p1, int16x4_t __p2) {
2198 int16x4_t __ret;
2199 __ret = (int16x4_t) __builtin_neon_vbsl_v((int8x8_t)__p0, (int8x8_t)__p1, (int8x8_t)__p2, 1);
2200 return __ret;
2201 }
2202 #else
2203 __ai int16x4_t vbsl_s16(uint16x4_t __p0, int16x4_t __p1, int16x4_t __p2) {
2204 uint16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
2205 int16x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
2206 int16x4_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, 3, 2, 1, 0);
2207 int16x4_t __ret;
2208 __ret = (int16x4_t) __builtin_neon_vbsl_v((int8x8_t)__rev0, (int8x8_t)__rev1, (int8x8_t)__rev2, 1);
2209 __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
2210 return __ret;
2211 }
2212 #endif
2213
2214 #ifdef __LITTLE_ENDIAN__
2215 __ai uint32x4_t vcageq_f32(float32x4_t __p0, float32x4_t __p1) {
2216 uint32x4_t __ret;
2217 __ret = (uint32x4_t) __builtin_neon_vcageq_v((int8x16_t)__p0, (int8x16_t)__p1, 50);
2218 return __ret;
2219 }
2220 #else
2221 __ai uint32x4_t vcageq_f32(float32x4_t __p0, float32x4_t __p1) {
2222 float32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
2223 float32x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
2224 uint32x4_t __ret;
2225 __ret = (uint32x4_t) __builtin_neon_vcageq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 50);
2226 __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
2227 return __ret;
2228 }
2229 #endif
2230
2231 #ifdef __LITTLE_ENDIAN__
2232 __ai uint32x2_t vcage_f32(float32x2_t __p0, float32x2_t __p1) {
2233 uint32x2_t __ret;
2234 __ret = (uint32x2_t) __builtin_neon_vcage_v((int8x8_t)__p0, (int8x8_t)__p1, 18);
2235 return __ret;
2236 }
2237 #else
2238 __ai uint32x2_t vcage_f32(float32x2_t __p0, float32x2_t __p1) {
2239 float32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
2240 float32x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
2241 uint32x2_t __ret;
2242 __ret = (uint32x2_t) __builtin_neon_vcage_v((int8x8_t)__rev0, (int8x8_t)__rev1, 18);
2243 __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
2244 return __ret;
2245 }
2246 #endif
2247
2248 #ifdef __LITTLE_ENDIAN__
2249 __ai uint32x4_t vcagtq_f32(float32x4_t __p0, float32x4_t __p1) {
2250 uint32x4_t __ret;
2251 __ret = (uint32x4_t) __builtin_neon_vcagtq_v((int8x16_t)__p0, (int8x16_t)__p1, 50);
2252 return __ret;
2253 }
2254 #else
2255 __ai uint32x4_t vcagtq_f32(float32x4_t __p0, float32x4_t __p1) {
2256 float32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
2257 float32x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
2258 uint32x4_t __ret;
2259 __ret = (uint32x4_t) __builtin_neon_vcagtq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 50);
2260 __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
2261 return __ret;
2262 }
2263 #endif
2264
2265 #ifdef __LITTLE_ENDIAN__
2266 __ai uint32x2_t vcagt_f32(float32x2_t __p0, float32x2_t __p1) {
2267 uint32x2_t __ret;
2268 __ret = (uint32x2_t) __builtin_neon_vcagt_v((int8x8_t)__p0, (int8x8_t)__p1, 18);
2269 return __ret;
2270 }
2271 #else
2272 __ai uint32x2_t vcagt_f32(float32x2_t __p0, float32x2_t __p1) {
2273 float32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
2274 float32x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
2275 uint32x2_t __ret;
2276 __ret = (uint32x2_t) __builtin_neon_vcagt_v((int8x8_t)__rev0, (int8x8_t)__rev1, 18);
2277 __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
2278 return __ret;
2279 }
2280 #endif
2281
2282 #ifdef __LITTLE_ENDIAN__
2283 __ai uint32x4_t vcaleq_f32(float32x4_t __p0, float32x4_t __p1) {
2284 uint32x4_t __ret;
2285 __ret = (uint32x4_t) __builtin_neon_vcaleq_v((int8x16_t)__p0, (int8x16_t)__p1, 50);
2286 return __ret;
2287 }
2288 #else
2289 __ai uint32x4_t vcaleq_f32(float32x4_t __p0, float32x4_t __p1) {
2290 float32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
2291 float32x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
2292 uint32x4_t __ret;
2293 __ret = (uint32x4_t) __builtin_neon_vcaleq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 50);
2294 __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
2295 return __ret;
2296 }
2297 #endif
2298
2299 #ifdef __LITTLE_ENDIAN__
2300 __ai uint32x2_t vcale_f32(float32x2_t __p0, float32x2_t __p1) {
2301 uint32x2_t __ret;
2302 __ret = (uint32x2_t) __builtin_neon_vcale_v((int8x8_t)__p0, (int8x8_t)__p1, 18);
2303 return __ret;
2304 }
2305 #else
2306 __ai uint32x2_t vcale_f32(float32x2_t __p0, float32x2_t __p1) {
2307 float32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
2308 float32x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
2309 uint32x2_t __ret;
2310 __ret = (uint32x2_t) __builtin_neon_vcale_v((int8x8_t)__rev0, (int8x8_t)__rev1, 18);
2311 __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
2312 return __ret;
2313 }
2314 #endif
2315
2316 #ifdef __LITTLE_ENDIAN__
2317 __ai uint32x4_t vcaltq_f32(float32x4_t __p0, float32x4_t __p1) {
2318 uint32x4_t __ret;
2319 __ret = (uint32x4_t) __builtin_neon_vcaltq_v((int8x16_t)__p0, (int8x16_t)__p1, 50);
2320 return __ret;
2321 }
2322 #else
2323 __ai uint32x4_t vcaltq_f32(float32x4_t __p0, float32x4_t __p1) {
2324 float32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
2325 float32x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
2326 uint32x4_t __ret;
2327 __ret = (uint32x4_t) __builtin_neon_vcaltq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 50);
2328 __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
2329 return __ret;
2330 }
2331 #endif
2332
2333 #ifdef __LITTLE_ENDIAN__
2334 __ai uint32x2_t vcalt_f32(float32x2_t __p0, float32x2_t __p1) {
2335 uint32x2_t __ret;
2336 __ret = (uint32x2_t) __builtin_neon_vcalt_v((int8x8_t)__p0, (int8x8_t)__p1, 18);
2337 return __ret;
2338 }
2339 #else
2340 __ai uint32x2_t vcalt_f32(float32x2_t __p0, float32x2_t __p1) {
2341 float32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
2342 float32x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
2343 uint32x2_t __ret;
2344 __ret = (uint32x2_t) __builtin_neon_vcalt_v((int8x8_t)__rev0, (int8x8_t)__rev1, 18);
2345 __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
2346 return __ret;
2347 }
2348 #endif
2349
2350 #ifdef __LITTLE_ENDIAN__
2351 __ai uint8x8_t vceq_p8(poly8x8_t __p0, poly8x8_t __p1) {
2352 uint8x8_t __ret;
2353 __ret = (uint8x8_t)(__p0 == __p1);
2354 return __ret;
2355 }
2356 #else
2357 __ai uint8x8_t vceq_p8(poly8x8_t __p0, poly8x8_t __p1) {
2358 poly8x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
2359 poly8x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
2360 uint8x8_t __ret;
2361 __ret = (uint8x8_t)(__rev0 == __rev1);
2362 __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
2363 return __ret;
2364 }
2365 #endif
2366
2367 #ifdef __LITTLE_ENDIAN__
2368 __ai uint8x16_t vceqq_p8(poly8x16_t __p0, poly8x16_t __p1) {
2369 uint8x16_t __ret;
2370 __ret = (uint8x16_t)(__p0 == __p1);
2371 return __ret;
2372 }
2373 #else
2374 __ai uint8x16_t vceqq_p8(poly8x16_t __p0, poly8x16_t __p1) {
2375 poly8x16_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
2376 poly8x16_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
2377 uint8x16_t __ret;
2378 __ret = (uint8x16_t)(__rev0 == __rev1);
2379 __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
2380 return __ret;
2381 }
2382 #endif
2383
2384 #ifdef __LITTLE_ENDIAN__
2385 __ai uint8x16_t vceqq_u8(uint8x16_t __p0, uint8x16_t __p1) {
2386 uint8x16_t __ret;
2387 __ret = (uint8x16_t)(__p0 == __p1);
2388 return __ret;
2389 }
2390 #else
2391 __ai uint8x16_t vceqq_u8(uint8x16_t __p0, uint8x16_t __p1) {
2392 uint8x16_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
2393 uint8x16_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
2394 uint8x16_t __ret;
2395 __ret = (uint8x16_t)(__rev0 == __rev1);
2396 __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
2397 return __ret;
2398 }
2399 #endif
2400
2401 #ifdef __LITTLE_ENDIAN__
2402 __ai uint32x4_t vceqq_u32(uint32x4_t __p0, uint32x4_t __p1) {
2403 uint32x4_t __ret;
2404 __ret = (uint32x4_t)(__p0 == __p1);
2405 return __ret;
2406 }
2407 #else
2408 __ai uint32x4_t vceqq_u32(uint32x4_t __p0, uint32x4_t __p1) {
2409 uint32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
2410 uint32x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
2411 uint32x4_t __ret;
2412 __ret = (uint32x4_t)(__rev0 == __rev1);
2413 __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
2414 return __ret;
2415 }
2416 #endif
2417
2418 #ifdef __LITTLE_ENDIAN__
2419 __ai uint16x8_t vceqq_u16(uint16x8_t __p0, uint16x8_t __p1) {
2420 uint16x8_t __ret;
2421 __ret = (uint16x8_t)(__p0 == __p1);
2422 return __ret;
2423 }
2424 #else
2425 __ai uint16x8_t vceqq_u16(uint16x8_t __p0, uint16x8_t __p1) {
2426 uint16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
2427 uint16x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
2428 uint16x8_t __ret;
2429 __ret = (uint16x8_t)(__rev0 == __rev1);
2430 __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
2431 return __ret;
2432 }
2433 #endif
2434
2435 #ifdef __LITTLE_ENDIAN__
2436 __ai uint8x16_t vceqq_s8(int8x16_t __p0, int8x16_t __p1) {
2437 uint8x16_t __ret;
2438 __ret = (uint8x16_t)(__p0 == __p1);
2439 return __ret;
2440 }
2441 #else
2442 __ai uint8x16_t vceqq_s8(int8x16_t __p0, int8x16_t __p1) {
2443 int8x16_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
2444 int8x16_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
2445 uint8x16_t __ret;
2446 __ret = (uint8x16_t)(__rev0 == __rev1);
2447 __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
2448 return __ret;
2449 }
2450 #endif
2451
2452 #ifdef __LITTLE_ENDIAN__
2453 __ai uint32x4_t vceqq_f32(float32x4_t __p0, float32x4_t __p1) {
2454 uint32x4_t __ret;
2455 __ret = (uint32x4_t)(__p0 == __p1);
2456 return __ret;
2457 }
2458 #else
2459 __ai uint32x4_t vceqq_f32(float32x4_t __p0, float32x4_t __p1) {
2460 float32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
2461 float32x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
2462 uint32x4_t __ret;
2463 __ret = (uint32x4_t)(__rev0 == __rev1);
2464 __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
2465 return __ret;
2466 }
2467 #endif
2468
2469 #ifdef __LITTLE_ENDIAN__
2470 __ai uint32x4_t vceqq_s32(int32x4_t __p0, int32x4_t __p1) {
2471 uint32x4_t __ret;
2472 __ret = (uint32x4_t)(__p0 == __p1);
2473 return __ret;
2474 }
2475 #else
2476 __ai uint32x4_t vceqq_s32(int32x4_t __p0, int32x4_t __p1) {
2477 int32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
2478 int32x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
2479 uint32x4_t __ret;
2480 __ret = (uint32x4_t)(__rev0 == __rev1);
2481 __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
2482 return __ret;
2483 }
2484 #endif
2485
2486 #ifdef __LITTLE_ENDIAN__
2487 __ai uint16x8_t vceqq_s16(int16x8_t __p0, int16x8_t __p1) {
2488 uint16x8_t __ret;
2489 __ret = (uint16x8_t)(__p0 == __p1);
2490 return __ret;
2491 }
2492 #else
2493 __ai uint16x8_t vceqq_s16(int16x8_t __p0, int16x8_t __p1) {
2494 int16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
2495 int16x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
2496 uint16x8_t __ret;
2497 __ret = (uint16x8_t)(__rev0 == __rev1);
2498 __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
2499 return __ret;
2500 }
2501 #endif
2502
2503 #ifdef __LITTLE_ENDIAN__
2504 __ai uint8x8_t vceq_u8(uint8x8_t __p0, uint8x8_t __p1) {
2505 uint8x8_t __ret;
2506 __ret = (uint8x8_t)(__p0 == __p1);
2507 return __ret;
2508 }
2509 #else
2510 __ai uint8x8_t vceq_u8(uint8x8_t __p0, uint8x8_t __p1) {
2511 uint8x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
2512 uint8x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
2513 uint8x8_t __ret;
2514 __ret = (uint8x8_t)(__rev0 == __rev1);
2515 __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
2516 return __ret;
2517 }
2518 #endif
2519
2520 #ifdef __LITTLE_ENDIAN__
2521 __ai uint32x2_t vceq_u32(uint32x2_t __p0, uint32x2_t __p1) {
2522 uint32x2_t __ret;
2523 __ret = (uint32x2_t)(__p0 == __p1);
2524 return __ret;
2525 }
2526 #else
2527 __ai uint32x2_t vceq_u32(uint32x2_t __p0, uint32x2_t __p1) {
2528 uint32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
2529 uint32x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
2530 uint32x2_t __ret;
2531 __ret = (uint32x2_t)(__rev0 == __rev1);
2532 __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
2533 return __ret;
2534 }
2535 #endif
2536
2537 #ifdef __LITTLE_ENDIAN__
2538 __ai uint16x4_t vceq_u16(uint16x4_t __p0, uint16x4_t __p1) {
2539 uint16x4_t __ret;
2540 __ret = (uint16x4_t)(__p0 == __p1);
2541 return __ret;
2542 }
2543 #else
2544 __ai uint16x4_t vceq_u16(uint16x4_t __p0, uint16x4_t __p1) {
2545 uint16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
2546 uint16x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
2547 uint16x4_t __ret;
2548 __ret = (uint16x4_t)(__rev0 == __rev1);
2549 __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
2550 return __ret;
2551 }
2552 #endif
2553
2554 #ifdef __LITTLE_ENDIAN__
2555 __ai uint8x8_t vceq_s8(int8x8_t __p0, int8x8_t __p1) {
2556 uint8x8_t __ret;
2557 __ret = (uint8x8_t)(__p0 == __p1);
2558 return __ret;
2559 }
2560 #else
2561 __ai uint8x8_t vceq_s8(int8x8_t __p0, int8x8_t __p1) {
2562 int8x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
2563 int8x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
2564 uint8x8_t __ret;
2565 __ret = (uint8x8_t)(__rev0 == __rev1);
2566 __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
2567 return __ret;
2568 }
2569 #endif
2570
2571 #ifdef __LITTLE_ENDIAN__
2572 __ai uint32x2_t vceq_f32(float32x2_t __p0, float32x2_t __p1) {
2573 uint32x2_t __ret;
2574 __ret = (uint32x2_t)(__p0 == __p1);
2575 return __ret;
2576 }
2577 #else
2578 __ai uint32x2_t vceq_f32(float32x2_t __p0, float32x2_t __p1) {
2579 float32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
2580 float32x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
2581 uint32x2_t __ret;
2582 __ret = (uint32x2_t)(__rev0 == __rev1);
2583 __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
2584 return __ret;
2585 }
2586 #endif
2587
2588 #ifdef __LITTLE_ENDIAN__
2589 __ai uint32x2_t vceq_s32(int32x2_t __p0, int32x2_t __p1) {
2590 uint32x2_t __ret;
2591 __ret = (uint32x2_t)(__p0 == __p1);
2592 return __ret;
2593 }
2594 #else
2595 __ai uint32x2_t vceq_s32(int32x2_t __p0, int32x2_t __p1) {
2596 int32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
2597 int32x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
2598 uint32x2_t __ret;
2599 __ret = (uint32x2_t)(__rev0 == __rev1);
2600 __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
2601 return __ret;
2602 }
2603 #endif
2604
2605 #ifdef __LITTLE_ENDIAN__
2606 __ai uint16x4_t vceq_s16(int16x4_t __p0, int16x4_t __p1) {
2607 uint16x4_t __ret;
2608 __ret = (uint16x4_t)(__p0 == __p1);
2609 return __ret;
2610 }
2611 #else
2612 __ai uint16x4_t vceq_s16(int16x4_t __p0, int16x4_t __p1) {
2613 int16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
2614 int16x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
2615 uint16x4_t __ret;
2616 __ret = (uint16x4_t)(__rev0 == __rev1);
2617 __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
2618 return __ret;
2619 }
2620 #endif
2621
2622 #ifdef __LITTLE_ENDIAN__
2623 __ai uint8x16_t vcgeq_u8(uint8x16_t __p0, uint8x16_t __p1) {
2624 uint8x16_t __ret;
2625 __ret = (uint8x16_t)(__p0 >= __p1);
2626 return __ret;
2627 }
2628 #else
2629 __ai uint8x16_t vcgeq_u8(uint8x16_t __p0, uint8x16_t __p1) {
2630 uint8x16_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
2631 uint8x16_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
2632 uint8x16_t __ret;
2633 __ret = (uint8x16_t)(__rev0 >= __rev1);
2634 __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
2635 return __ret;
2636 }
2637 #endif
2638
2639 #ifdef __LITTLE_ENDIAN__
2640 __ai uint32x4_t vcgeq_u32(uint32x4_t __p0, uint32x4_t __p1) {
2641 uint32x4_t __ret;
2642 __ret = (uint32x4_t)(__p0 >= __p1);
2643 return __ret;
2644 }
2645 #else
2646 __ai uint32x4_t vcgeq_u32(uint32x4_t __p0, uint32x4_t __p1) {
2647 uint32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
2648 uint32x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
2649 uint32x4_t __ret;
2650 __ret = (uint32x4_t)(__rev0 >= __rev1);
2651 __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
2652 return __ret;
2653 }
2654 #endif
2655
2656 #ifdef __LITTLE_ENDIAN__
2657 __ai uint16x8_t vcgeq_u16(uint16x8_t __p0, uint16x8_t __p1) {
2658 uint16x8_t __ret;
2659 __ret = (uint16x8_t)(__p0 >= __p1);
2660 return __ret;
2661 }
2662 #else
2663 __ai uint16x8_t vcgeq_u16(uint16x8_t __p0, uint16x8_t __p1) {
2664 uint16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
2665 uint16x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
2666 uint16x8_t __ret;
2667 __ret = (uint16x8_t)(__rev0 >= __rev1);
2668 __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
2669 return __ret;
2670 }
2671 #endif
2672
2673 #ifdef __LITTLE_ENDIAN__
2674 __ai uint8x16_t vcgeq_s8(int8x16_t __p0, int8x16_t __p1) {
2675 uint8x16_t __ret;
2676 __ret = (uint8x16_t)(__p0 >= __p1);
2677 return __ret;
2678 }
2679 #else
2680 __ai uint8x16_t vcgeq_s8(int8x16_t __p0, int8x16_t __p1) {
2681 int8x16_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
2682 int8x16_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
2683 uint8x16_t __ret;
2684 __ret = (uint8x16_t)(__rev0 >= __rev1);
2685 __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
2686 return __ret;
2687 }
2688 #endif
2689
2690 #ifdef __LITTLE_ENDIAN__
2691 __ai uint32x4_t vcgeq_f32(float32x4_t __p0, float32x4_t __p1) {
2692 uint32x4_t __ret;
2693 __ret = (uint32x4_t)(__p0 >= __p1);
2694 return __ret;
2695 }
2696 #else
2697 __ai uint32x4_t vcgeq_f32(float32x4_t __p0, float32x4_t __p1) {
2698 float32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
2699 float32x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
2700 uint32x4_t __ret;
2701 __ret = (uint32x4_t)(__rev0 >= __rev1);
2702 __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
2703 return __ret;
2704 }
2705 #endif
2706
2707 #ifdef __LITTLE_ENDIAN__
2708 __ai uint32x4_t vcgeq_s32(int32x4_t __p0, int32x4_t __p1) {
2709 uint32x4_t __ret;
2710 __ret = (uint32x4_t)(__p0 >= __p1);
2711 return __ret;
2712 }
2713 #else
2714 __ai uint32x4_t vcgeq_s32(int32x4_t __p0, int32x4_t __p1) {
2715 int32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
2716 int32x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
2717 uint32x4_t __ret;
2718 __ret = (uint32x4_t)(__rev0 >= __rev1);
2719 __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
2720 return __ret;
2721 }
2722 #endif
2723
2724 #ifdef __LITTLE_ENDIAN__
2725 __ai uint16x8_t vcgeq_s16(int16x8_t __p0, int16x8_t __p1) {
2726 uint16x8_t __ret;
2727 __ret = (uint16x8_t)(__p0 >= __p1);
2728 return __ret;
2729 }
2730 #else
2731 __ai uint16x8_t vcgeq_s16(int16x8_t __p0, int16x8_t __p1) {
2732 int16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
2733 int16x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
2734 uint16x8_t __ret;
2735 __ret = (uint16x8_t)(__rev0 >= __rev1);
2736 __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
2737 return __ret;
2738 }
2739 #endif
2740
2741 #ifdef __LITTLE_ENDIAN__
2742 __ai uint8x8_t vcge_u8(uint8x8_t __p0, uint8x8_t __p1) {
2743 uint8x8_t __ret;
2744 __ret = (uint8x8_t)(__p0 >= __p1);
2745 return __ret;
2746 }
2747 #else
2748 __ai uint8x8_t vcge_u8(uint8x8_t __p0, uint8x8_t __p1) {
2749 uint8x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
2750 uint8x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
2751 uint8x8_t __ret;
2752 __ret = (uint8x8_t)(__rev0 >= __rev1);
2753 __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
2754 return __ret;
2755 }
2756 #endif
2757
2758 #ifdef __LITTLE_ENDIAN__
2759 __ai uint32x2_t vcge_u32(uint32x2_t __p0, uint32x2_t __p1) {
2760 uint32x2_t __ret;
2761 __ret = (uint32x2_t)(__p0 >= __p1);
2762 return __ret;
2763 }
2764 #else
2765 __ai uint32x2_t vcge_u32(uint32x2_t __p0, uint32x2_t __p1) {
2766 uint32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
2767 uint32x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
2768 uint32x2_t __ret;
2769 __ret = (uint32x2_t)(__rev0 >= __rev1);
2770 __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
2771 return __ret;
2772 }
2773 #endif
2774
2775 #ifdef __LITTLE_ENDIAN__
2776 __ai uint16x4_t vcge_u16(uint16x4_t __p0, uint16x4_t __p1) {
2777 uint16x4_t __ret;
2778 __ret = (uint16x4_t)(__p0 >= __p1);
2779 return __ret;
2780 }
2781 #else
2782 __ai uint16x4_t vcge_u16(uint16x4_t __p0, uint16x4_t __p1) {
2783 uint16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
2784 uint16x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
2785 uint16x4_t __ret;
2786 __ret = (uint16x4_t)(__rev0 >= __rev1);
2787 __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
2788 return __ret;
2789 }
2790 #endif
2791
2792 #ifdef __LITTLE_ENDIAN__
2793 __ai uint8x8_t vcge_s8(int8x8_t __p0, int8x8_t __p1) {
2794 uint8x8_t __ret;
2795 __ret = (uint8x8_t)(__p0 >= __p1);
2796 return __ret;
2797 }
2798 #else
2799 __ai uint8x8_t vcge_s8(int8x8_t __p0, int8x8_t __p1) {
2800 int8x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
2801 int8x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
2802 uint8x8_t __ret;
2803 __ret = (uint8x8_t)(__rev0 >= __rev1);
2804 __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
2805 return __ret;
2806 }
2807 #endif
2808
2809 #ifdef __LITTLE_ENDIAN__
2810 __ai uint32x2_t vcge_f32(float32x2_t __p0, float32x2_t __p1) {
2811 uint32x2_t __ret;
2812 __ret = (uint32x2_t)(__p0 >= __p1);
2813 return __ret;
2814 }
2815 #else
2816 __ai uint32x2_t vcge_f32(float32x2_t __p0, float32x2_t __p1) {
2817 float32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
2818 float32x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
2819 uint32x2_t __ret;
2820 __ret = (uint32x2_t)(__rev0 >= __rev1);
2821 __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
2822 return __ret;
2823 }
2824 #endif
2825
2826 #ifdef __LITTLE_ENDIAN__
2827 __ai uint32x2_t vcge_s32(int32x2_t __p0, int32x2_t __p1) {
2828 uint32x2_t __ret;
2829 __ret = (uint32x2_t)(__p0 >= __p1);
2830 return __ret;
2831 }
2832 #else
2833 __ai uint32x2_t vcge_s32(int32x2_t __p0, int32x2_t __p1) {
2834 int32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
2835 int32x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
2836 uint32x2_t __ret;
2837 __ret = (uint32x2_t)(__rev0 >= __rev1);
2838 __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
2839 return __ret;
2840 }
2841 #endif
2842
2843 #ifdef __LITTLE_ENDIAN__
2844 __ai uint16x4_t vcge_s16(int16x4_t __p0, int16x4_t __p1) {
2845 uint16x4_t __ret;
2846 __ret = (uint16x4_t)(__p0 >= __p1);
2847 return __ret;
2848 }
2849 #else
2850 __ai uint16x4_t vcge_s16(int16x4_t __p0, int16x4_t __p1) {
2851 int16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
2852 int16x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
2853 uint16x4_t __ret;
2854 __ret = (uint16x4_t)(__rev0 >= __rev1);
2855 __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
2856 return __ret;
2857 }
2858 #endif
2859
2860 #ifdef __LITTLE_ENDIAN__
2861 __ai uint8x16_t vcgtq_u8(uint8x16_t __p0, uint8x16_t __p1) {
2862 uint8x16_t __ret;
2863 __ret = (uint8x16_t)(__p0 > __p1);
2864 return __ret;
2865 }
2866 #else
2867 __ai uint8x16_t vcgtq_u8(uint8x16_t __p0, uint8x16_t __p1) {
2868 uint8x16_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
2869 uint8x16_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
2870 uint8x16_t __ret;
2871 __ret = (uint8x16_t)(__rev0 > __rev1);
2872 __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
2873 return __ret;
2874 }
2875 #endif
2876
2877 #ifdef __LITTLE_ENDIAN__
2878 __ai uint32x4_t vcgtq_u32(uint32x4_t __p0, uint32x4_t __p1) {
2879 uint32x4_t __ret;
2880 __ret = (uint32x4_t)(__p0 > __p1);
2881 return __ret;
2882 }
2883 #else
2884 __ai uint32x4_t vcgtq_u32(uint32x4_t __p0, uint32x4_t __p1) {
2885 uint32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
2886 uint32x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
2887 uint32x4_t __ret;
2888 __ret = (uint32x4_t)(__rev0 > __rev1);
2889 __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
2890 return __ret;
2891 }
2892 #endif
2893
2894 #ifdef __LITTLE_ENDIAN__
2895 __ai uint16x8_t vcgtq_u16(uint16x8_t __p0, uint16x8_t __p1) {
2896 uint16x8_t __ret;
2897 __ret = (uint16x8_t)(__p0 > __p1);
2898 return __ret;
2899 }
2900 #else
2901 __ai uint16x8_t vcgtq_u16(uint16x8_t __p0, uint16x8_t __p1) {
2902 uint16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
2903 uint16x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
2904 uint16x8_t __ret;
2905 __ret = (uint16x8_t)(__rev0 > __rev1);
2906 __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
2907 return __ret;
2908 }
2909 #endif
2910
2911 #ifdef __LITTLE_ENDIAN__
2912 __ai uint8x16_t vcgtq_s8(int8x16_t __p0, int8x16_t __p1) {
2913 uint8x16_t __ret;
2914 __ret = (uint8x16_t)(__p0 > __p1);
2915 return __ret;
2916 }
2917 #else
2918 __ai uint8x16_t vcgtq_s8(int8x16_t __p0, int8x16_t __p1) {
2919 int8x16_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
2920 int8x16_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
2921 uint8x16_t __ret;
2922 __ret = (uint8x16_t)(__rev0 > __rev1);
2923 __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
2924 return __ret;
2925 }
2926 #endif
2927
2928 #ifdef __LITTLE_ENDIAN__
2929 __ai uint32x4_t vcgtq_f32(float32x4_t __p0, float32x4_t __p1) {
2930 uint32x4_t __ret;
2931 __ret = (uint32x4_t)(__p0 > __p1);
2932 return __ret;
2933 }
2934 #else
2935 __ai uint32x4_t vcgtq_f32(float32x4_t __p0, float32x4_t __p1) {
2936 float32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
2937 float32x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
2938 uint32x4_t __ret;
2939 __ret = (uint32x4_t)(__rev0 > __rev1);
2940 __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
2941 return __ret;
2942 }
2943 #endif
2944
2945 #ifdef __LITTLE_ENDIAN__
2946 __ai uint32x4_t vcgtq_s32(int32x4_t __p0, int32x4_t __p1) {
2947 uint32x4_t __ret;
2948 __ret = (uint32x4_t)(__p0 > __p1);
2949 return __ret;
2950 }
2951 #else
2952 __ai uint32x4_t vcgtq_s32(int32x4_t __p0, int32x4_t __p1) {
2953 int32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
2954 int32x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
2955 uint32x4_t __ret;
2956 __ret = (uint32x4_t)(__rev0 > __rev1);
2957 __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
2958 return __ret;
2959 }
2960 #endif
2961
2962 #ifdef __LITTLE_ENDIAN__
2963 __ai uint16x8_t vcgtq_s16(int16x8_t __p0, int16x8_t __p1) {
2964 uint16x8_t __ret;
2965 __ret = (uint16x8_t)(__p0 > __p1);
2966 return __ret;
2967 }
2968 #else
2969 __ai uint16x8_t vcgtq_s16(int16x8_t __p0, int16x8_t __p1) {
2970 int16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
2971 int16x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
2972 uint16x8_t __ret;
2973 __ret = (uint16x8_t)(__rev0 > __rev1);
2974 __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
2975 return __ret;
2976 }
2977 #endif
2978
2979 #ifdef __LITTLE_ENDIAN__
2980 __ai uint8x8_t vcgt_u8(uint8x8_t __p0, uint8x8_t __p1) {
2981 uint8x8_t __ret;
2982 __ret = (uint8x8_t)(__p0 > __p1);
2983 return __ret;
2984 }
2985 #else
2986 __ai uint8x8_t vcgt_u8(uint8x8_t __p0, uint8x8_t __p1) {
2987 uint8x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
2988 uint8x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
2989 uint8x8_t __ret;
2990 __ret = (uint8x8_t)(__rev0 > __rev1);
2991 __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
2992 return __ret;
2993 }
2994 #endif
2995
2996 #ifdef __LITTLE_ENDIAN__
2997 __ai uint32x2_t vcgt_u32(uint32x2_t __p0, uint32x2_t __p1) {
2998 uint32x2_t __ret;
2999 __ret = (uint32x2_t)(__p0 > __p1);
3000 return __ret;
3001 }
3002 #else
3003 __ai uint32x2_t vcgt_u32(uint32x2_t __p0, uint32x2_t __p1) {
3004 uint32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
3005 uint32x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
3006 uint32x2_t __ret;
3007 __ret = (uint32x2_t)(__rev0 > __rev1);
3008 __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
3009 return __ret;
3010 }
3011 #endif
3012
3013 #ifdef __LITTLE_ENDIAN__
3014 __ai uint16x4_t vcgt_u16(uint16x4_t __p0, uint16x4_t __p1) {
3015 uint16x4_t __ret;
3016 __ret = (uint16x4_t)(__p0 > __p1);
3017 return __ret;
3018 }
3019 #else
3020 __ai uint16x4_t vcgt_u16(uint16x4_t __p0, uint16x4_t __p1) {
3021 uint16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
3022 uint16x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
3023 uint16x4_t __ret;
3024 __ret = (uint16x4_t)(__rev0 > __rev1);
3025 __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
3026 return __ret;
3027 }
3028 #endif
3029
3030 #ifdef __LITTLE_ENDIAN__
3031 __ai uint8x8_t vcgt_s8(int8x8_t __p0, int8x8_t __p1) {
3032 uint8x8_t __ret;
3033 __ret = (uint8x8_t)(__p0 > __p1);
3034 return __ret;
3035 }
3036 #else
3037 __ai uint8x8_t vcgt_s8(int8x8_t __p0, int8x8_t __p1) {
3038 int8x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
3039 int8x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
3040 uint8x8_t __ret;
3041 __ret = (uint8x8_t)(__rev0 > __rev1);
3042 __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
3043 return __ret;
3044 }
3045 #endif
3046
3047 #ifdef __LITTLE_ENDIAN__
3048 __ai uint32x2_t vcgt_f32(float32x2_t __p0, float32x2_t __p1) {
3049 uint32x2_t __ret;
3050 __ret = (uint32x2_t)(__p0 > __p1);
3051 return __ret;
3052 }
3053 #else
3054 __ai uint32x2_t vcgt_f32(float32x2_t __p0, float32x2_t __p1) {
3055 float32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
3056 float32x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
3057 uint32x2_t __ret;
3058 __ret = (uint32x2_t)(__rev0 > __rev1);
3059 __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
3060 return __ret;
3061 }
3062 #endif
3063
3064 #ifdef __LITTLE_ENDIAN__
3065 __ai uint32x2_t vcgt_s32(int32x2_t __p0, int32x2_t __p1) {
3066 uint32x2_t __ret;
3067 __ret = (uint32x2_t)(__p0 > __p1);
3068 return __ret;
3069 }
3070 #else
3071 __ai uint32x2_t vcgt_s32(int32x2_t __p0, int32x2_t __p1) {
3072 int32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
3073 int32x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
3074 uint32x2_t __ret;
3075 __ret = (uint32x2_t)(__rev0 > __rev1);
3076 __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
3077 return __ret;
3078 }
3079 #endif
3080
3081 #ifdef __LITTLE_ENDIAN__
3082 __ai uint16x4_t vcgt_s16(int16x4_t __p0, int16x4_t __p1) {
3083 uint16x4_t __ret;
3084 __ret = (uint16x4_t)(__p0 > __p1);
3085 return __ret;
3086 }
3087 #else
3088 __ai uint16x4_t vcgt_s16(int16x4_t __p0, int16x4_t __p1) {
3089 int16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
3090 int16x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
3091 uint16x4_t __ret;
3092 __ret = (uint16x4_t)(__rev0 > __rev1);
3093 __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
3094 return __ret;
3095 }
3096 #endif
3097
3098 #ifdef __LITTLE_ENDIAN__
3099 __ai uint8x16_t vcleq_u8(uint8x16_t __p0, uint8x16_t __p1) {
3100 uint8x16_t __ret;
3101 __ret = (uint8x16_t)(__p0 <= __p1);
3102 return __ret;
3103 }
3104 #else
3105 __ai uint8x16_t vcleq_u8(uint8x16_t __p0, uint8x16_t __p1) {
3106 uint8x16_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
3107 uint8x16_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
3108 uint8x16_t __ret;
3109 __ret = (uint8x16_t)(__rev0 <= __rev1);
3110 __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
3111 return __ret;
3112 }
3113 #endif
3114
3115 #ifdef __LITTLE_ENDIAN__
3116 __ai uint32x4_t vcleq_u32(uint32x4_t __p0, uint32x4_t __p1) {
3117 uint32x4_t __ret;
3118 __ret = (uint32x4_t)(__p0 <= __p1);
3119 return __ret;
3120 }
3121 #else
3122 __ai uint32x4_t vcleq_u32(uint32x4_t __p0, uint32x4_t __p1) {
3123 uint32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
3124 uint32x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
3125 uint32x4_t __ret;
3126 __ret = (uint32x4_t)(__rev0 <= __rev1);
3127 __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
3128 return __ret;
3129 }
3130 #endif
3131
3132 #ifdef __LITTLE_ENDIAN__
3133 __ai uint16x8_t vcleq_u16(uint16x8_t __p0, uint16x8_t __p1) {
3134 uint16x8_t __ret;
3135 __ret = (uint16x8_t)(__p0 <= __p1);
3136 return __ret;
3137 }
3138 #else
3139 __ai uint16x8_t vcleq_u16(uint16x8_t __p0, uint16x8_t __p1) {
3140 uint16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
3141 uint16x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
3142 uint16x8_t __ret;
3143 __ret = (uint16x8_t)(__rev0 <= __rev1);
3144 __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
3145 return __ret;
3146 }
3147 #endif
3148
3149 #ifdef __LITTLE_ENDIAN__
3150 __ai uint8x16_t vcleq_s8(int8x16_t __p0, int8x16_t __p1) {
3151 uint8x16_t __ret;
3152 __ret = (uint8x16_t)(__p0 <= __p1);
3153 return __ret;
3154 }
3155 #else
3156 __ai uint8x16_t vcleq_s8(int8x16_t __p0, int8x16_t __p1) {
3157 int8x16_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
3158 int8x16_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
3159 uint8x16_t __ret;
3160 __ret = (uint8x16_t)(__rev0 <= __rev1);
3161 __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
3162 return __ret;
3163 }
3164 #endif
3165
3166 #ifdef __LITTLE_ENDIAN__
3167 __ai uint32x4_t vcleq_f32(float32x4_t __p0, float32x4_t __p1) {
3168 uint32x4_t __ret;
3169 __ret = (uint32x4_t)(__p0 <= __p1);
3170 return __ret;
3171 }
3172 #else
3173 __ai uint32x4_t vcleq_f32(float32x4_t __p0, float32x4_t __p1) {
3174 float32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
3175 float32x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
3176 uint32x4_t __ret;
3177 __ret = (uint32x4_t)(__rev0 <= __rev1);
3178 __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
3179 return __ret;
3180 }
3181 #endif
3182
3183 #ifdef __LITTLE_ENDIAN__
3184 __ai uint32x4_t vcleq_s32(int32x4_t __p0, int32x4_t __p1) {
3185 uint32x4_t __ret;
3186 __ret = (uint32x4_t)(__p0 <= __p1);
3187 return __ret;
3188 }
3189 #else
3190 __ai uint32x4_t vcleq_s32(int32x4_t __p0, int32x4_t __p1) {
3191 int32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
3192 int32x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
3193 uint32x4_t __ret;
3194 __ret = (uint32x4_t)(__rev0 <= __rev1);
3195 __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
3196 return __ret;
3197 }
3198 #endif
3199
3200 #ifdef __LITTLE_ENDIAN__
3201 __ai uint16x8_t vcleq_s16(int16x8_t __p0, int16x8_t __p1) {
3202 uint16x8_t __ret;
3203 __ret = (uint16x8_t)(__p0 <= __p1);
3204 return __ret;
3205 }
3206 #else
3207 __ai uint16x8_t vcleq_s16(int16x8_t __p0, int16x8_t __p1) {
3208 int16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
3209 int16x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
3210 uint16x8_t __ret;
3211 __ret = (uint16x8_t)(__rev0 <= __rev1);
3212 __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
3213 return __ret;
3214 }
3215 #endif
3216
3217 #ifdef __LITTLE_ENDIAN__
3218 __ai uint8x8_t vcle_u8(uint8x8_t __p0, uint8x8_t __p1) {
3219 uint8x8_t __ret;
3220 __ret = (uint8x8_t)(__p0 <= __p1);
3221 return __ret;
3222 }
3223 #else
3224 __ai uint8x8_t vcle_u8(uint8x8_t __p0, uint8x8_t __p1) {
3225 uint8x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
3226 uint8x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
3227 uint8x8_t __ret;
3228 __ret = (uint8x8_t)(__rev0 <= __rev1);
3229 __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
3230 return __ret;
3231 }
3232 #endif
3233
3234 #ifdef __LITTLE_ENDIAN__
3235 __ai uint32x2_t vcle_u32(uint32x2_t __p0, uint32x2_t __p1) {
3236 uint32x2_t __ret;
3237 __ret = (uint32x2_t)(__p0 <= __p1);
3238 return __ret;
3239 }
3240 #else
3241 __ai uint32x2_t vcle_u32(uint32x2_t __p0, uint32x2_t __p1) {
3242 uint32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
3243 uint32x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
3244 uint32x2_t __ret;
3245 __ret = (uint32x2_t)(__rev0 <= __rev1);
3246 __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
3247 return __ret;
3248 }
3249 #endif
3250
3251 #ifdef __LITTLE_ENDIAN__
3252 __ai uint16x4_t vcle_u16(uint16x4_t __p0, uint16x4_t __p1) {
3253 uint16x4_t __ret;
3254 __ret = (uint16x4_t)(__p0 <= __p1);
3255 return __ret;
3256 }
3257 #else
3258 __ai uint16x4_t vcle_u16(uint16x4_t __p0, uint16x4_t __p1) {
3259 uint16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
3260 uint16x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
3261 uint16x4_t __ret;
3262 __ret = (uint16x4_t)(__rev0 <= __rev1);
3263 __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
3264 return __ret;
3265 }
3266 #endif
3267
3268 #ifdef __LITTLE_ENDIAN__
3269 __ai uint8x8_t vcle_s8(int8x8_t __p0, int8x8_t __p1) {
3270 uint8x8_t __ret;
3271 __ret = (uint8x8_t)(__p0 <= __p1);
3272 return __ret;
3273 }
3274 #else
3275 __ai uint8x8_t vcle_s8(int8x8_t __p0, int8x8_t __p1) {
3276 int8x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
3277 int8x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
3278 uint8x8_t __ret;
3279 __ret = (uint8x8_t)(__rev0 <= __rev1);
3280 __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
3281 return __ret;
3282 }
3283 #endif
3284
3285 #ifdef __LITTLE_ENDIAN__
3286 __ai uint32x2_t vcle_f32(float32x2_t __p0, float32x2_t __p1) {
3287 uint32x2_t __ret;
3288 __ret = (uint32x2_t)(__p0 <= __p1);
3289 return __ret;
3290 }
3291 #else
3292 __ai uint32x2_t vcle_f32(float32x2_t __p0, float32x2_t __p1) {
3293 float32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
3294 float32x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
3295 uint32x2_t __ret;
3296 __ret = (uint32x2_t)(__rev0 <= __rev1);
3297 __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
3298 return __ret;
3299 }
3300 #endif
3301
3302 #ifdef __LITTLE_ENDIAN__
3303 __ai uint32x2_t vcle_s32(int32x2_t __p0, int32x2_t __p1) {
3304 uint32x2_t __ret;
3305 __ret = (uint32x2_t)(__p0 <= __p1);
3306 return __ret;
3307 }
3308 #else
3309 __ai uint32x2_t vcle_s32(int32x2_t __p0, int32x2_t __p1) {
3310 int32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
3311 int32x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
3312 uint32x2_t __ret;
3313 __ret = (uint32x2_t)(__rev0 <= __rev1);
3314 __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
3315 return __ret;
3316 }
3317 #endif
3318
3319 #ifdef __LITTLE_ENDIAN__
3320 __ai uint16x4_t vcle_s16(int16x4_t __p0, int16x4_t __p1) {
3321 uint16x4_t __ret;
3322 __ret = (uint16x4_t)(__p0 <= __p1);
3323 return __ret;
3324 }
3325 #else
3326 __ai uint16x4_t vcle_s16(int16x4_t __p0, int16x4_t __p1) {
3327 int16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
3328 int16x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
3329 uint16x4_t __ret;
3330 __ret = (uint16x4_t)(__rev0 <= __rev1);
3331 __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
3332 return __ret;
3333 }
3334 #endif
3335
3336 #ifdef __LITTLE_ENDIAN__
3337 __ai int8x16_t vclsq_s8(int8x16_t __p0) {
3338 int8x16_t __ret;
3339 __ret = (int8x16_t) __builtin_neon_vclsq_v((int8x16_t)__p0, 32);
3340 return __ret;
3341 }
3342 #else
3343 __ai int8x16_t vclsq_s8(int8x16_t __p0) {
3344 int8x16_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
3345 int8x16_t __ret;
3346 __ret = (int8x16_t) __builtin_neon_vclsq_v((int8x16_t)__rev0, 32);
3347 __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
3348 return __ret;
3349 }
3350 #endif
3351
3352 #ifdef __LITTLE_ENDIAN__
3353 __ai int32x4_t vclsq_s32(int32x4_t __p0) {
3354 int32x4_t __ret;
3355 __ret = (int32x4_t) __builtin_neon_vclsq_v((int8x16_t)__p0, 34);
3356 return __ret;
3357 }
3358 #else
3359 __ai int32x4_t vclsq_s32(int32x4_t __p0) {
3360 int32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
3361 int32x4_t __ret;
3362 __ret = (int32x4_t) __builtin_neon_vclsq_v((int8x16_t)__rev0, 34);
3363 __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
3364 return __ret;
3365 }
3366 #endif
3367
3368 #ifdef __LITTLE_ENDIAN__
3369 __ai int16x8_t vclsq_s16(int16x8_t __p0) {
3370 int16x8_t __ret;
3371 __ret = (int16x8_t) __builtin_neon_vclsq_v((int8x16_t)__p0, 33);
3372 return __ret;
3373 }
3374 #else
3375 __ai int16x8_t vclsq_s16(int16x8_t __p0) {
3376 int16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
3377 int16x8_t __ret;
3378 __ret = (int16x8_t) __builtin_neon_vclsq_v((int8x16_t)__rev0, 33);
3379 __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
3380 return __ret;
3381 }
3382 #endif
3383
3384 #ifdef __LITTLE_ENDIAN__
3385 __ai int8x8_t vcls_s8(int8x8_t __p0) {
3386 int8x8_t __ret;
3387 __ret = (int8x8_t) __builtin_neon_vcls_v((int8x8_t)__p0, 0);
3388 return __ret;
3389 }
3390 #else
3391 __ai int8x8_t vcls_s8(int8x8_t __p0) {
3392 int8x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
3393 int8x8_t __ret;
3394 __ret = (int8x8_t) __builtin_neon_vcls_v((int8x8_t)__rev0, 0);
3395 __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
3396 return __ret;
3397 }
3398 #endif
3399
3400 #ifdef __LITTLE_ENDIAN__
3401 __ai int32x2_t vcls_s32(int32x2_t __p0) {
3402 int32x2_t __ret;
3403 __ret = (int32x2_t) __builtin_neon_vcls_v((int8x8_t)__p0, 2);
3404 return __ret;
3405 }
3406 #else
3407 __ai int32x2_t vcls_s32(int32x2_t __p0) {
3408 int32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
3409 int32x2_t __ret;
3410 __ret = (int32x2_t) __builtin_neon_vcls_v((int8x8_t)__rev0, 2);
3411 __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
3412 return __ret;
3413 }
3414 #endif
3415
3416 #ifdef __LITTLE_ENDIAN__
3417 __ai int16x4_t vcls_s16(int16x4_t __p0) {
3418 int16x4_t __ret;
3419 __ret = (int16x4_t) __builtin_neon_vcls_v((int8x8_t)__p0, 1);
3420 return __ret;
3421 }
3422 #else
3423 __ai int16x4_t vcls_s16(int16x4_t __p0) {
3424 int16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
3425 int16x4_t __ret;
3426 __ret = (int16x4_t) __builtin_neon_vcls_v((int8x8_t)__rev0, 1);
3427 __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
3428 return __ret;
3429 }
3430 #endif
3431
3432 #ifdef __LITTLE_ENDIAN__
3433 __ai uint8x16_t vcltq_u8(uint8x16_t __p0, uint8x16_t __p1) {
3434 uint8x16_t __ret;
3435 __ret = (uint8x16_t)(__p0 < __p1);
3436 return __ret;
3437 }
3438 #else
3439 __ai uint8x16_t vcltq_u8(uint8x16_t __p0, uint8x16_t __p1) {
3440 uint8x16_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
3441 uint8x16_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
3442 uint8x16_t __ret;
3443 __ret = (uint8x16_t)(__rev0 < __rev1);
3444 __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
3445 return __ret;
3446 }
3447 #endif
3448
3449 #ifdef __LITTLE_ENDIAN__
3450 __ai uint32x4_t vcltq_u32(uint32x4_t __p0, uint32x4_t __p1) {
3451 uint32x4_t __ret;
3452 __ret = (uint32x4_t)(__p0 < __p1);
3453 return __ret;
3454 }
3455 #else
3456 __ai uint32x4_t vcltq_u32(uint32x4_t __p0, uint32x4_t __p1) {
3457 uint32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
3458 uint32x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
3459 uint32x4_t __ret;
3460 __ret = (uint32x4_t)(__rev0 < __rev1);
3461 __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
3462 return __ret;
3463 }
3464 #endif
3465
3466 #ifdef __LITTLE_ENDIAN__
3467 __ai uint16x8_t vcltq_u16(uint16x8_t __p0, uint16x8_t __p1) {
3468 uint16x8_t __ret;
3469 __ret = (uint16x8_t)(__p0 < __p1);
3470 return __ret;
3471 }
3472 #else
3473 __ai uint16x8_t vcltq_u16(uint16x8_t __p0, uint16x8_t __p1) {
3474 uint16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
3475 uint16x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
3476 uint16x8_t __ret;
3477 __ret = (uint16x8_t)(__rev0 < __rev1);
3478 __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
3479 return __ret;
3480 }
3481 #endif
3482
3483 #ifdef __LITTLE_ENDIAN__
3484 __ai uint8x16_t vcltq_s8(int8x16_t __p0, int8x16_t __p1) {
3485 uint8x16_t __ret;
3486 __ret = (uint8x16_t)(__p0 < __p1);
3487 return __ret;
3488 }
3489 #else
3490 __ai uint8x16_t vcltq_s8(int8x16_t __p0, int8x16_t __p1) {
3491 int8x16_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
3492 int8x16_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
3493 uint8x16_t __ret;
3494 __ret = (uint8x16_t)(__rev0 < __rev1);
3495 __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
3496 return __ret;
3497 }
3498 #endif
3499
3500 #ifdef __LITTLE_ENDIAN__
3501 __ai uint32x4_t vcltq_f32(float32x4_t __p0, float32x4_t __p1) {
3502 uint32x4_t __ret;
3503 __ret = (uint32x4_t)(__p0 < __p1);
3504 return __ret;
3505 }
3506 #else
3507 __ai uint32x4_t vcltq_f32(float32x4_t __p0, float32x4_t __p1) {
3508 float32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
3509 float32x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
3510 uint32x4_t __ret;
3511 __ret = (uint32x4_t)(__rev0 < __rev1);
3512 __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
3513 return __ret;
3514 }
3515 #endif
3516
3517 #ifdef __LITTLE_ENDIAN__
3518 __ai uint32x4_t vcltq_s32(int32x4_t __p0, int32x4_t __p1) {
3519 uint32x4_t __ret;
3520 __ret = (uint32x4_t)(__p0 < __p1);
3521 return __ret;
3522 }
3523 #else
3524 __ai uint32x4_t vcltq_s32(int32x4_t __p0, int32x4_t __p1) {
3525 int32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
3526 int32x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
3527 uint32x4_t __ret;
3528 __ret = (uint32x4_t)(__rev0 < __rev1);
3529 __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
3530 return __ret;
3531 }
3532 #endif
3533
3534 #ifdef __LITTLE_ENDIAN__
3535 __ai uint16x8_t vcltq_s16(int16x8_t __p0, int16x8_t __p1) {
3536 uint16x8_t __ret;
3537 __ret = (uint16x8_t)(__p0 < __p1);
3538 return __ret;
3539 }
3540 #else
3541 __ai uint16x8_t vcltq_s16(int16x8_t __p0, int16x8_t __p1) {
3542 int16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
3543 int16x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
3544 uint16x8_t __ret;
3545 __ret = (uint16x8_t)(__rev0 < __rev1);
3546 __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
3547 return __ret;
3548 }
3549 #endif
3550
3551 #ifdef __LITTLE_ENDIAN__
3552 __ai uint8x8_t vclt_u8(uint8x8_t __p0, uint8x8_t __p1) {
3553 uint8x8_t __ret;
3554 __ret = (uint8x8_t)(__p0 < __p1);
3555 return __ret;
3556 }
3557 #else
3558 __ai uint8x8_t vclt_u8(uint8x8_t __p0, uint8x8_t __p1) {
3559 uint8x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
3560 uint8x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
3561 uint8x8_t __ret;
3562 __ret = (uint8x8_t)(__rev0 < __rev1);
3563 __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
3564 return __ret;
3565 }
3566 #endif
3567
3568 #ifdef __LITTLE_ENDIAN__
3569 __ai uint32x2_t vclt_u32(uint32x2_t __p0, uint32x2_t __p1) {
3570 uint32x2_t __ret;
3571 __ret = (uint32x2_t)(__p0 < __p1);
3572 return __ret;
3573 }
3574 #else
3575 __ai uint32x2_t vclt_u32(uint32x2_t __p0, uint32x2_t __p1) {
3576 uint32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
3577 uint32x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
3578 uint32x2_t __ret;
3579 __ret = (uint32x2_t)(__rev0 < __rev1);
3580 __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
3581 return __ret;
3582 }
3583 #endif
3584
3585 #ifdef __LITTLE_ENDIAN__
3586 __ai uint16x4_t vclt_u16(uint16x4_t __p0, uint16x4_t __p1) {
3587 uint16x4_t __ret;
3588 __ret = (uint16x4_t)(__p0 < __p1);
3589 return __ret;
3590 }
3591 #else
3592 __ai uint16x4_t vclt_u16(uint16x4_t __p0, uint16x4_t __p1) {
3593 uint16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
3594 uint16x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
3595 uint16x4_t __ret;
3596 __ret = (uint16x4_t)(__rev0 < __rev1);
3597 __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
3598 return __ret;
3599 }
3600 #endif
3601
3602 #ifdef __LITTLE_ENDIAN__
3603 __ai uint8x8_t vclt_s8(int8x8_t __p0, int8x8_t __p1) {
3604 uint8x8_t __ret;
3605 __ret = (uint8x8_t)(__p0 < __p1);
3606 return __ret;
3607 }
3608 #else
3609 __ai uint8x8_t vclt_s8(int8x8_t __p0, int8x8_t __p1) {
3610 int8x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
3611 int8x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
3612 uint8x8_t __ret;
3613 __ret = (uint8x8_t)(__rev0 < __rev1);
3614 __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
3615 return __ret;
3616 }
3617 #endif
3618
3619 #ifdef __LITTLE_ENDIAN__
3620 __ai uint32x2_t vclt_f32(float32x2_t __p0, float32x2_t __p1) {
3621 uint32x2_t __ret;
3622 __ret = (uint32x2_t)(__p0 < __p1);
3623 return __ret;
3624 }
3625 #else
3626 __ai uint32x2_t vclt_f32(float32x2_t __p0, float32x2_t __p1) {
3627 float32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
3628 float32x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
3629 uint32x2_t __ret;
3630 __ret = (uint32x2_t)(__rev0 < __rev1);
3631 __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
3632 return __ret;
3633 }
3634 #endif
3635
3636 #ifdef __LITTLE_ENDIAN__
3637 __ai uint32x2_t vclt_s32(int32x2_t __p0, int32x2_t __p1) {
3638 uint32x2_t __ret;
3639 __ret = (uint32x2_t)(__p0 < __p1);
3640 return __ret;
3641 }
3642 #else
3643 __ai uint32x2_t vclt_s32(int32x2_t __p0, int32x2_t __p1) {
3644 int32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
3645 int32x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
3646 uint32x2_t __ret;
3647 __ret = (uint32x2_t)(__rev0 < __rev1);
3648 __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
3649 return __ret;
3650 }
3651 #endif
3652
3653 #ifdef __LITTLE_ENDIAN__
3654 __ai uint16x4_t vclt_s16(int16x4_t __p0, int16x4_t __p1) {
3655 uint16x4_t __ret;
3656 __ret = (uint16x4_t)(__p0 < __p1);
3657 return __ret;
3658 }
3659 #else
3660 __ai uint16x4_t vclt_s16(int16x4_t __p0, int16x4_t __p1) {
3661 int16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
3662 int16x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
3663 uint16x4_t __ret;
3664 __ret = (uint16x4_t)(__rev0 < __rev1);
3665 __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
3666 return __ret;
3667 }
3668 #endif
3669
3670 #ifdef __LITTLE_ENDIAN__
3671 __ai uint8x16_t vclzq_u8(uint8x16_t __p0) {
3672 uint8x16_t __ret;
3673 __ret = (uint8x16_t) __builtin_neon_vclzq_v((int8x16_t)__p0, 48);
3674 return __ret;
3675 }
3676 #else
3677 __ai uint8x16_t vclzq_u8(uint8x16_t __p0) {
3678 uint8x16_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
3679 uint8x16_t __ret;
3680 __ret = (uint8x16_t) __builtin_neon_vclzq_v((int8x16_t)__rev0, 48);
3681 __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
3682 return __ret;
3683 }
3684 #endif
3685
3686 #ifdef __LITTLE_ENDIAN__
3687 __ai uint32x4_t vclzq_u32(uint32x4_t __p0) {
3688 uint32x4_t __ret;
3689 __ret = (uint32x4_t) __builtin_neon_vclzq_v((int8x16_t)__p0, 50);
3690 return __ret;
3691 }
3692 #else
3693 __ai uint32x4_t vclzq_u32(uint32x4_t __p0) {
3694 uint32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
3695 uint32x4_t __ret;
3696 __ret = (uint32x4_t) __builtin_neon_vclzq_v((int8x16_t)__rev0, 50);
3697 __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
3698 return __ret;
3699 }
3700 #endif
3701
3702 #ifdef __LITTLE_ENDIAN__
3703 __ai uint16x8_t vclzq_u16(uint16x8_t __p0) {
3704 uint16x8_t __ret;
3705 __ret = (uint16x8_t) __builtin_neon_vclzq_v((int8x16_t)__p0, 49);
3706 return __ret;
3707 }
3708 #else
3709 __ai uint16x8_t vclzq_u16(uint16x8_t __p0) {
3710 uint16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
3711 uint16x8_t __ret;
3712 __ret = (uint16x8_t) __builtin_neon_vclzq_v((int8x16_t)__rev0, 49);
3713 __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
3714 return __ret;
3715 }
3716 #endif
3717
3718 #ifdef __LITTLE_ENDIAN__
3719 __ai int8x16_t vclzq_s8(int8x16_t __p0) {
3720 int8x16_t __ret;
3721 __ret = (int8x16_t) __builtin_neon_vclzq_v((int8x16_t)__p0, 32);
3722 return __ret;
3723 }
3724 #else
3725 __ai int8x16_t vclzq_s8(int8x16_t __p0) {
3726 int8x16_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
3727 int8x16_t __ret;
3728 __ret = (int8x16_t) __builtin_neon_vclzq_v((int8x16_t)__rev0, 32);
3729 __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
3730 return __ret;
3731 }
3732 #endif
3733
3734 #ifdef __LITTLE_ENDIAN__
3735 __ai int32x4_t vclzq_s32(int32x4_t __p0) {
3736 int32x4_t __ret;
3737 __ret = (int32x4_t) __builtin_neon_vclzq_v((int8x16_t)__p0, 34);
3738 return __ret;
3739 }
3740 #else
3741 __ai int32x4_t vclzq_s32(int32x4_t __p0) {
3742 int32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
3743 int32x4_t __ret;
3744 __ret = (int32x4_t) __builtin_neon_vclzq_v((int8x16_t)__rev0, 34);
3745 __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
3746 return __ret;
3747 }
3748 #endif
3749
3750 #ifdef __LITTLE_ENDIAN__
3751 __ai int16x8_t vclzq_s16(int16x8_t __p0) {
3752 int16x8_t __ret;
3753 __ret = (int16x8_t) __builtin_neon_vclzq_v((int8x16_t)__p0, 33);
3754 return __ret;
3755 }
3756 #else
3757 __ai int16x8_t vclzq_s16(int16x8_t __p0) {
3758 int16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
3759 int16x8_t __ret;
3760 __ret = (int16x8_t) __builtin_neon_vclzq_v((int8x16_t)__rev0, 33);
3761 __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
3762 return __ret;
3763 }
3764 #endif
3765
3766 #ifdef __LITTLE_ENDIAN__
3767 __ai uint8x8_t vclz_u8(uint8x8_t __p0) {
3768 uint8x8_t __ret;
3769 __ret = (uint8x8_t) __builtin_neon_vclz_v((int8x8_t)__p0, 16);
3770 return __ret;
3771 }
3772 #else
3773 __ai uint8x8_t vclz_u8(uint8x8_t __p0) {
3774 uint8x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
3775 uint8x8_t __ret;
3776 __ret = (uint8x8_t) __builtin_neon_vclz_v((int8x8_t)__rev0, 16);
3777 __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
3778 return __ret;
3779 }
3780 #endif
3781
3782 #ifdef __LITTLE_ENDIAN__
3783 __ai uint32x2_t vclz_u32(uint32x2_t __p0) {
3784 uint32x2_t __ret;
3785 __ret = (uint32x2_t) __builtin_neon_vclz_v((int8x8_t)__p0, 18);
3786 return __ret;
3787 }
3788 #else
3789 __ai uint32x2_t vclz_u32(uint32x2_t __p0) {
3790 uint32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
3791 uint32x2_t __ret;
3792 __ret = (uint32x2_t) __builtin_neon_vclz_v((int8x8_t)__rev0, 18);
3793 __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
3794 return __ret;
3795 }
3796 #endif
3797
3798 #ifdef __LITTLE_ENDIAN__
3799 __ai uint16x4_t vclz_u16(uint16x4_t __p0) {
3800 uint16x4_t __ret;
3801 __ret = (uint16x4_t) __builtin_neon_vclz_v((int8x8_t)__p0, 17);
3802 return __ret;
3803 }
3804 #else
3805 __ai uint16x4_t vclz_u16(uint16x4_t __p0) {
3806 uint16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
3807 uint16x4_t __ret;
3808 __ret = (uint16x4_t) __builtin_neon_vclz_v((int8x8_t)__rev0, 17);
3809 __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
3810 return __ret;
3811 }
3812 #endif
3813
3814 #ifdef __LITTLE_ENDIAN__
3815 __ai int8x8_t vclz_s8(int8x8_t __p0) {
3816 int8x8_t __ret;
3817 __ret = (int8x8_t) __builtin_neon_vclz_v((int8x8_t)__p0, 0);
3818 return __ret;
3819 }
3820 #else
3821 __ai int8x8_t vclz_s8(int8x8_t __p0) {
3822 int8x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
3823 int8x8_t __ret;
3824 __ret = (int8x8_t) __builtin_neon_vclz_v((int8x8_t)__rev0, 0);
3825 __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
3826 return __ret;
3827 }
3828 #endif
3829
3830 #ifdef __LITTLE_ENDIAN__
3831 __ai int32x2_t vclz_s32(int32x2_t __p0) {
3832 int32x2_t __ret;
3833 __ret = (int32x2_t) __builtin_neon_vclz_v((int8x8_t)__p0, 2);
3834 return __ret;
3835 }
3836 #else
3837 __ai int32x2_t vclz_s32(int32x2_t __p0) {
3838 int32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
3839 int32x2_t __ret;
3840 __ret = (int32x2_t) __builtin_neon_vclz_v((int8x8_t)__rev0, 2);
3841 __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
3842 return __ret;
3843 }
3844 #endif
3845
3846 #ifdef __LITTLE_ENDIAN__
3847 __ai int16x4_t vclz_s16(int16x4_t __p0) {
3848 int16x4_t __ret;
3849 __ret = (int16x4_t) __builtin_neon_vclz_v((int8x8_t)__p0, 1);
3850 return __ret;
3851 }
3852 #else
3853 __ai int16x4_t vclz_s16(int16x4_t __p0) {
3854 int16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
3855 int16x4_t __ret;
3856 __ret = (int16x4_t) __builtin_neon_vclz_v((int8x8_t)__rev0, 1);
3857 __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
3858 return __ret;
3859 }
3860 #endif
3861
3862 #ifdef __LITTLE_ENDIAN__
3863 __ai poly8x8_t vcnt_p8(poly8x8_t __p0) {
3864 poly8x8_t __ret;
3865 __ret = (poly8x8_t) __builtin_neon_vcnt_v((int8x8_t)__p0, 4);
3866 return __ret;
3867 }
3868 #else
3869 __ai poly8x8_t vcnt_p8(poly8x8_t __p0) {
3870 poly8x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
3871 poly8x8_t __ret;
3872 __ret = (poly8x8_t) __builtin_neon_vcnt_v((int8x8_t)__rev0, 4);
3873 __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
3874 return __ret;
3875 }
3876 #endif
3877
3878 #ifdef __LITTLE_ENDIAN__
3879 __ai poly8x16_t vcntq_p8(poly8x16_t __p0) {
3880 poly8x16_t __ret;
3881 __ret = (poly8x16_t) __builtin_neon_vcntq_v((int8x16_t)__p0, 36);
3882 return __ret;
3883 }
3884 #else
3885 __ai poly8x16_t vcntq_p8(poly8x16_t __p0) {
3886 poly8x16_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
3887 poly8x16_t __ret;
3888 __ret = (poly8x16_t) __builtin_neon_vcntq_v((int8x16_t)__rev0, 36);
3889 __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
3890 return __ret;
3891 }
3892 #endif
3893
3894 #ifdef __LITTLE_ENDIAN__
3895 __ai uint8x16_t vcntq_u8(uint8x16_t __p0) {
3896 uint8x16_t __ret;
3897 __ret = (uint8x16_t) __builtin_neon_vcntq_v((int8x16_t)__p0, 48);
3898 return __ret;
3899 }
3900 #else
3901 __ai uint8x16_t vcntq_u8(uint8x16_t __p0) {
3902 uint8x16_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
3903 uint8x16_t __ret;
3904 __ret = (uint8x16_t) __builtin_neon_vcntq_v((int8x16_t)__rev0, 48);
3905 __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
3906 return __ret;
3907 }
3908 #endif
3909
3910 #ifdef __LITTLE_ENDIAN__
3911 __ai int8x16_t vcntq_s8(int8x16_t __p0) {
3912 int8x16_t __ret;
3913 __ret = (int8x16_t) __builtin_neon_vcntq_v((int8x16_t)__p0, 32);
3914 return __ret;
3915 }
3916 #else
3917 __ai int8x16_t vcntq_s8(int8x16_t __p0) {
3918 int8x16_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
3919 int8x16_t __ret;
3920 __ret = (int8x16_t) __builtin_neon_vcntq_v((int8x16_t)__rev0, 32);
3921 __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
3922 return __ret;
3923 }
3924 #endif
3925
3926 #ifdef __LITTLE_ENDIAN__
3927 __ai uint8x8_t vcnt_u8(uint8x8_t __p0) {
3928 uint8x8_t __ret;
3929 __ret = (uint8x8_t) __builtin_neon_vcnt_v((int8x8_t)__p0, 16);
3930 return __ret;
3931 }
3932 #else
3933 __ai uint8x8_t vcnt_u8(uint8x8_t __p0) {
3934 uint8x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
3935 uint8x8_t __ret;
3936 __ret = (uint8x8_t) __builtin_neon_vcnt_v((int8x8_t)__rev0, 16);
3937 __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
3938 return __ret;
3939 }
3940 #endif
3941
3942 #ifdef __LITTLE_ENDIAN__
3943 __ai int8x8_t vcnt_s8(int8x8_t __p0) {
3944 int8x8_t __ret;
3945 __ret = (int8x8_t) __builtin_neon_vcnt_v((int8x8_t)__p0, 0);
3946 return __ret;
3947 }
3948 #else
3949 __ai int8x8_t vcnt_s8(int8x8_t __p0) {
3950 int8x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
3951 int8x8_t __ret;
3952 __ret = (int8x8_t) __builtin_neon_vcnt_v((int8x8_t)__rev0, 0);
3953 __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
3954 return __ret;
3955 }
3956 #endif
3957
3958 #ifdef __LITTLE_ENDIAN__
3959 __ai poly8x16_t vcombine_p8(poly8x8_t __p0, poly8x8_t __p1) {
3960 poly8x16_t __ret;
3961 __ret = __builtin_shufflevector(__p0, __p1, 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15);
3962 return __ret;
3963 }
3964 #else
3965 __ai poly8x16_t vcombine_p8(poly8x8_t __p0, poly8x8_t __p1) {
3966 poly8x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
3967 poly8x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
3968 poly8x16_t __ret;
3969 __ret = __builtin_shufflevector(__rev0, __rev1, 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15);
3970 __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
3971 return __ret;
3972 }
3973 #endif
3974
3975 #ifdef __LITTLE_ENDIAN__
3976 __ai poly16x8_t vcombine_p16(poly16x4_t __p0, poly16x4_t __p1) {
3977 poly16x8_t __ret;
3978 __ret = __builtin_shufflevector(__p0, __p1, 0, 1, 2, 3, 4, 5, 6, 7);
3979 return __ret;
3980 }
3981 #else
3982 __ai poly16x8_t vcombine_p16(poly16x4_t __p0, poly16x4_t __p1) {
3983 poly16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
3984 poly16x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
3985 poly16x8_t __ret;
3986 __ret = __builtin_shufflevector(__rev0, __rev1, 0, 1, 2, 3, 4, 5, 6, 7);
3987 __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
3988 return __ret;
3989 }
3990 #endif
3991
3992 #ifdef __LITTLE_ENDIAN__
3993 __ai uint8x16_t vcombine_u8(uint8x8_t __p0, uint8x8_t __p1) {
3994 uint8x16_t __ret;
3995 __ret = __builtin_shufflevector(__p0, __p1, 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15);
3996 return __ret;
3997 }
3998 #else
3999 __ai uint8x16_t vcombine_u8(uint8x8_t __p0, uint8x8_t __p1) {
4000 uint8x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
4001 uint8x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
4002 uint8x16_t __ret;
4003 __ret = __builtin_shufflevector(__rev0, __rev1, 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15);
4004 __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
4005 return __ret;
4006 }
4007 __ai uint8x16_t __noswap_vcombine_u8(uint8x8_t __p0, uint8x8_t __p1) {
4008 uint8x16_t __ret;
4009 __ret = __builtin_shufflevector(__p0, __p1, 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15);
4010 return __ret;
4011 }
4012 #endif
4013
4014 #ifdef __LITTLE_ENDIAN__
4015 __ai uint32x4_t vcombine_u32(uint32x2_t __p0, uint32x2_t __p1) {
4016 uint32x4_t __ret;
4017 __ret = __builtin_shufflevector(__p0, __p1, 0, 1, 2, 3);
4018 return __ret;
4019 }
4020 #else
4021 __ai uint32x4_t vcombine_u32(uint32x2_t __p0, uint32x2_t __p1) {
4022 uint32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
4023 uint32x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
4024 uint32x4_t __ret;
4025 __ret = __builtin_shufflevector(__rev0, __rev1, 0, 1, 2, 3);
4026 __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
4027 return __ret;
4028 }
4029 __ai uint32x4_t __noswap_vcombine_u32(uint32x2_t __p0, uint32x2_t __p1) {
4030 uint32x4_t __ret;
4031 __ret = __builtin_shufflevector(__p0, __p1, 0, 1, 2, 3);
4032 return __ret;
4033 }
4034 #endif
4035
4036 #ifdef __LITTLE_ENDIAN__
4037 __ai uint64x2_t vcombine_u64(uint64x1_t __p0, uint64x1_t __p1) {
4038 uint64x2_t __ret;
4039 __ret = __builtin_shufflevector(__p0, __p1, 0, 1);
4040 return __ret;
4041 }
4042 #else
4043 __ai uint64x2_t vcombine_u64(uint64x1_t __p0, uint64x1_t __p1) {
4044 uint64x2_t __ret;
4045 __ret = __builtin_shufflevector(__p0, __p1, 0, 1);
4046 __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
4047 return __ret;
4048 }
4049 #endif
4050
4051 #ifdef __LITTLE_ENDIAN__
4052 __ai uint16x8_t vcombine_u16(uint16x4_t __p0, uint16x4_t __p1) {
4053 uint16x8_t __ret;
4054 __ret = __builtin_shufflevector(__p0, __p1, 0, 1, 2, 3, 4, 5, 6, 7);
4055 return __ret;
4056 }
4057 #else
4058 __ai uint16x8_t vcombine_u16(uint16x4_t __p0, uint16x4_t __p1) {
4059 uint16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
4060 uint16x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
4061 uint16x8_t __ret;
4062 __ret = __builtin_shufflevector(__rev0, __rev1, 0, 1, 2, 3, 4, 5, 6, 7);
4063 __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
4064 return __ret;
4065 }
4066 __ai uint16x8_t __noswap_vcombine_u16(uint16x4_t __p0, uint16x4_t __p1) {
4067 uint16x8_t __ret;
4068 __ret = __builtin_shufflevector(__p0, __p1, 0, 1, 2, 3, 4, 5, 6, 7);
4069 return __ret;
4070 }
4071 #endif
4072
4073 #ifdef __LITTLE_ENDIAN__
4074 __ai int8x16_t vcombine_s8(int8x8_t __p0, int8x8_t __p1) {
4075 int8x16_t __ret;
4076 __ret = __builtin_shufflevector(__p0, __p1, 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15);
4077 return __ret;
4078 }
4079 #else
4080 __ai int8x16_t vcombine_s8(int8x8_t __p0, int8x8_t __p1) {
4081 int8x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
4082 int8x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
4083 int8x16_t __ret;
4084 __ret = __builtin_shufflevector(__rev0, __rev1, 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15);
4085 __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
4086 return __ret;
4087 }
4088 __ai int8x16_t __noswap_vcombine_s8(int8x8_t __p0, int8x8_t __p1) {
4089 int8x16_t __ret;
4090 __ret = __builtin_shufflevector(__p0, __p1, 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15);
4091 return __ret;
4092 }
4093 #endif
4094
4095 #ifdef __LITTLE_ENDIAN__
4096 __ai float32x4_t vcombine_f32(float32x2_t __p0, float32x2_t __p1) {
4097 float32x4_t __ret;
4098 __ret = __builtin_shufflevector(__p0, __p1, 0, 1, 2, 3);
4099 return __ret;
4100 }
4101 #else
4102 __ai float32x4_t vcombine_f32(float32x2_t __p0, float32x2_t __p1) {
4103 float32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
4104 float32x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
4105 float32x4_t __ret;
4106 __ret = __builtin_shufflevector(__rev0, __rev1, 0, 1, 2, 3);
4107 __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
4108 return __ret;
4109 }
4110 __ai float32x4_t __noswap_vcombine_f32(float32x2_t __p0, float32x2_t __p1) {
4111 float32x4_t __ret;
4112 __ret = __builtin_shufflevector(__p0, __p1, 0, 1, 2, 3);
4113 return __ret;
4114 }
4115 #endif
4116
4117 #ifdef __LITTLE_ENDIAN__
4118 __ai float16x8_t vcombine_f16(float16x4_t __p0, float16x4_t __p1) {
4119 float16x8_t __ret;
4120 __ret = __builtin_shufflevector(__p0, __p1, 0, 1, 2, 3, 4, 5, 6, 7);
4121 return __ret;
4122 }
4123 #else
4124 __ai float16x8_t vcombine_f16(float16x4_t __p0, float16x4_t __p1) {
4125 float16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
4126 float16x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
4127 float16x8_t __ret;
4128 __ret = __builtin_shufflevector(__rev0, __rev1, 0, 1, 2, 3, 4, 5, 6, 7);
4129 __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
4130 return __ret;
4131 }
4132 __ai float16x8_t __noswap_vcombine_f16(float16x4_t __p0, float16x4_t __p1) {
4133 float16x8_t __ret;
4134 __ret = __builtin_shufflevector(__p0, __p1, 0, 1, 2, 3, 4, 5, 6, 7);
4135 return __ret;
4136 }
4137 #endif
4138
4139 #ifdef __LITTLE_ENDIAN__
4140 __ai int32x4_t vcombine_s32(int32x2_t __p0, int32x2_t __p1) {
4141 int32x4_t __ret;
4142 __ret = __builtin_shufflevector(__p0, __p1, 0, 1, 2, 3);
4143 return __ret;
4144 }
4145 #else
4146 __ai int32x4_t vcombine_s32(int32x2_t __p0, int32x2_t __p1) {
4147 int32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
4148 int32x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
4149 int32x4_t __ret;
4150 __ret = __builtin_shufflevector(__rev0, __rev1, 0, 1, 2, 3);
4151 __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
4152 return __ret;
4153 }
4154 __ai int32x4_t __noswap_vcombine_s32(int32x2_t __p0, int32x2_t __p1) {
4155 int32x4_t __ret;
4156 __ret = __builtin_shufflevector(__p0, __p1, 0, 1, 2, 3);
4157 return __ret;
4158 }
4159 #endif
4160
4161 #ifdef __LITTLE_ENDIAN__
4162 __ai int64x2_t vcombine_s64(int64x1_t __p0, int64x1_t __p1) {
4163 int64x2_t __ret;
4164 __ret = __builtin_shufflevector(__p0, __p1, 0, 1);
4165 return __ret;
4166 }
4167 #else
4168 __ai int64x2_t vcombine_s64(int64x1_t __p0, int64x1_t __p1) {
4169 int64x2_t __ret;
4170 __ret = __builtin_shufflevector(__p0, __p1, 0, 1);
4171 __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
4172 return __ret;
4173 }
4174 #endif
4175
4176 #ifdef __LITTLE_ENDIAN__
4177 __ai int16x8_t vcombine_s16(int16x4_t __p0, int16x4_t __p1) {
4178 int16x8_t __ret;
4179 __ret = __builtin_shufflevector(__p0, __p1, 0, 1, 2, 3, 4, 5, 6, 7);
4180 return __ret;
4181 }
4182 #else
4183 __ai int16x8_t vcombine_s16(int16x4_t __p0, int16x4_t __p1) {
4184 int16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
4185 int16x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
4186 int16x8_t __ret;
4187 __ret = __builtin_shufflevector(__rev0, __rev1, 0, 1, 2, 3, 4, 5, 6, 7);
4188 __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
4189 return __ret;
4190 }
4191 __ai int16x8_t __noswap_vcombine_s16(int16x4_t __p0, int16x4_t __p1) {
4192 int16x8_t __ret;
4193 __ret = __builtin_shufflevector(__p0, __p1, 0, 1, 2, 3, 4, 5, 6, 7);
4194 return __ret;
4195 }
4196 #endif
4197
4198 #ifdef __LITTLE_ENDIAN__
4199 __ai poly8x8_t vcreate_p8(uint64_t __p0) {
4200 poly8x8_t __ret;
4201 __ret = (poly8x8_t)(__p0);
4202 return __ret;
4203 }
4204 #else
4205 __ai poly8x8_t vcreate_p8(uint64_t __p0) {
4206 poly8x8_t __ret;
4207 __ret = (poly8x8_t)(__p0);
4208 return __ret;
4209 }
4210 #endif
4211
4212 #ifdef __LITTLE_ENDIAN__
4213 __ai poly16x4_t vcreate_p16(uint64_t __p0) {
4214 poly16x4_t __ret;
4215 __ret = (poly16x4_t)(__p0);
4216 return __ret;
4217 }
4218 #else
4219 __ai poly16x4_t vcreate_p16(uint64_t __p0) {
4220 poly16x4_t __ret;
4221 __ret = (poly16x4_t)(__p0);
4222 return __ret;
4223 }
4224 #endif
4225
4226 #ifdef __LITTLE_ENDIAN__
4227 __ai uint8x8_t vcreate_u8(uint64_t __p0) {
4228 uint8x8_t __ret;
4229 __ret = (uint8x8_t)(__p0);
4230 return __ret;
4231 }
4232 #else
4233 __ai uint8x8_t vcreate_u8(uint64_t __p0) {
4234 uint8x8_t __ret;
4235 __ret = (uint8x8_t)(__p0);
4236 return __ret;
4237 }
4238 #endif
4239
4240 #ifdef __LITTLE_ENDIAN__
4241 __ai uint32x2_t vcreate_u32(uint64_t __p0) {
4242 uint32x2_t __ret;
4243 __ret = (uint32x2_t)(__p0);
4244 return __ret;
4245 }
4246 #else
4247 __ai uint32x2_t vcreate_u32(uint64_t __p0) {
4248 uint32x2_t __ret;
4249 __ret = (uint32x2_t)(__p0);
4250 return __ret;
4251 }
4252 #endif
4253
4254 #ifdef __LITTLE_ENDIAN__
4255 __ai uint64x1_t vcreate_u64(uint64_t __p0) {
4256 uint64x1_t __ret;
4257 __ret = (uint64x1_t)(__p0);
4258 return __ret;
4259 }
4260 #else
4261 __ai uint64x1_t vcreate_u64(uint64_t __p0) {
4262 uint64x1_t __ret;
4263 __ret = (uint64x1_t)(__p0);
4264 return __ret;
4265 }
4266 #endif
4267
4268 #ifdef __LITTLE_ENDIAN__
4269 __ai uint16x4_t vcreate_u16(uint64_t __p0) {
4270 uint16x4_t __ret;
4271 __ret = (uint16x4_t)(__p0);
4272 return __ret;
4273 }
4274 #else
4275 __ai uint16x4_t vcreate_u16(uint64_t __p0) {
4276 uint16x4_t __ret;
4277 __ret = (uint16x4_t)(__p0);
4278 return __ret;
4279 }
4280 #endif
4281
4282 #ifdef __LITTLE_ENDIAN__
4283 __ai int8x8_t vcreate_s8(uint64_t __p0) {
4284 int8x8_t __ret;
4285 __ret = (int8x8_t)(__p0);
4286 return __ret;
4287 }
4288 #else
4289 __ai int8x8_t vcreate_s8(uint64_t __p0) {
4290 int8x8_t __ret;
4291 __ret = (int8x8_t)(__p0);
4292 return __ret;
4293 }
4294 #endif
4295
4296 #ifdef __LITTLE_ENDIAN__
4297 __ai float32x2_t vcreate_f32(uint64_t __p0) {
4298 float32x2_t __ret;
4299 __ret = (float32x2_t)(__p0);
4300 return __ret;
4301 }
4302 #else
4303 __ai float32x2_t vcreate_f32(uint64_t __p0) {
4304 float32x2_t __ret;
4305 __ret = (float32x2_t)(__p0);
4306 return __ret;
4307 }
4308 #endif
4309
4310 #ifdef __LITTLE_ENDIAN__
4311 __ai float16x4_t vcreate_f16(uint64_t __p0) {
4312 float16x4_t __ret;
4313 __ret = (float16x4_t)(__p0);
4314 return __ret;
4315 }
4316 #else
4317 __ai float16x4_t vcreate_f16(uint64_t __p0) {
4318 float16x4_t __ret;
4319 __ret = (float16x4_t)(__p0);
4320 return __ret;
4321 }
4322 #endif
4323
4324 #ifdef __LITTLE_ENDIAN__
4325 __ai int32x2_t vcreate_s32(uint64_t __p0) {
4326 int32x2_t __ret;
4327 __ret = (int32x2_t)(__p0);
4328 return __ret;
4329 }
4330 #else
4331 __ai int32x2_t vcreate_s32(uint64_t __p0) {
4332 int32x2_t __ret;
4333 __ret = (int32x2_t)(__p0);
4334 return __ret;
4335 }
4336 #endif
4337
4338 #ifdef __LITTLE_ENDIAN__
4339 __ai int64x1_t vcreate_s64(uint64_t __p0) {
4340 int64x1_t __ret;
4341 __ret = (int64x1_t)(__p0);
4342 return __ret;
4343 }
4344 #else
4345 __ai int64x1_t vcreate_s64(uint64_t __p0) {
4346 int64x1_t __ret;
4347 __ret = (int64x1_t)(__p0);
4348 return __ret;
4349 }
4350 #endif
4351
4352 #ifdef __LITTLE_ENDIAN__
4353 __ai int16x4_t vcreate_s16(uint64_t __p0) {
4354 int16x4_t __ret;
4355 __ret = (int16x4_t)(__p0);
4356 return __ret;
4357 }
4358 #else
4359 __ai int16x4_t vcreate_s16(uint64_t __p0) {
4360 int16x4_t __ret;
4361 __ret = (int16x4_t)(__p0);
4362 return __ret;
4363 }
4364 #endif
4365
4366 #ifdef __LITTLE_ENDIAN__
4367 __ai float32x4_t vcvtq_f32_u32(uint32x4_t __p0) {
4368 float32x4_t __ret;
4369 __ret = (float32x4_t) __builtin_neon_vcvtq_f32_v((int8x16_t)__p0, 50);
4370 return __ret;
4371 }
4372 #else
4373 __ai float32x4_t vcvtq_f32_u32(uint32x4_t __p0) {
4374 uint32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
4375 float32x4_t __ret;
4376 __ret = (float32x4_t) __builtin_neon_vcvtq_f32_v((int8x16_t)__rev0, 50);
4377 __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
4378 return __ret;
4379 }
4380 #endif
4381
4382 #ifdef __LITTLE_ENDIAN__
4383 __ai float32x4_t vcvtq_f32_s32(int32x4_t __p0) {
4384 float32x4_t __ret;
4385 __ret = (float32x4_t) __builtin_neon_vcvtq_f32_v((int8x16_t)__p0, 34);
4386 return __ret;
4387 }
4388 #else
4389 __ai float32x4_t vcvtq_f32_s32(int32x4_t __p0) {
4390 int32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
4391 float32x4_t __ret;
4392 __ret = (float32x4_t) __builtin_neon_vcvtq_f32_v((int8x16_t)__rev0, 34);
4393 __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
4394 return __ret;
4395 }
4396 #endif
4397
4398 #ifdef __LITTLE_ENDIAN__
4399 __ai float32x2_t vcvt_f32_u32(uint32x2_t __p0) {
4400 float32x2_t __ret;
4401 __ret = (float32x2_t) __builtin_neon_vcvt_f32_v((int8x8_t)__p0, 18);
4402 return __ret;
4403 }
4404 #else
4405 __ai float32x2_t vcvt_f32_u32(uint32x2_t __p0) {
4406 uint32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
4407 float32x2_t __ret;
4408 __ret = (float32x2_t) __builtin_neon_vcvt_f32_v((int8x8_t)__rev0, 18);
4409 __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
4410 return __ret;
4411 }
4412 #endif
4413
4414 #ifdef __LITTLE_ENDIAN__
4415 __ai float32x2_t vcvt_f32_s32(int32x2_t __p0) {
4416 float32x2_t __ret;
4417 __ret = (float32x2_t) __builtin_neon_vcvt_f32_v((int8x8_t)__p0, 2);
4418 return __ret;
4419 }
4420 #else
4421 __ai float32x2_t vcvt_f32_s32(int32x2_t __p0) {
4422 int32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
4423 float32x2_t __ret;
4424 __ret = (float32x2_t) __builtin_neon_vcvt_f32_v((int8x8_t)__rev0, 2);
4425 __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
4426 return __ret;
4427 }
4428 #endif
4429
4430 #ifdef __LITTLE_ENDIAN__
4431 #define vcvtq_n_f32_u32(__p0, __p1) __extension__ ({ \
4432 uint32x4_t __s0 = __p0; \
4433 float32x4_t __ret; \
4434 __ret = (float32x4_t) __builtin_neon_vcvtq_n_f32_v((int8x16_t)__s0, __p1, 50); \
4435 __ret; \
4436 })
4437 #else
4438 #define vcvtq_n_f32_u32(__p0, __p1) __extension__ ({ \
4439 uint32x4_t __s0 = __p0; \
4440 uint32x4_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \
4441 float32x4_t __ret; \
4442 __ret = (float32x4_t) __builtin_neon_vcvtq_n_f32_v((int8x16_t)__rev0, __p1, 50); \
4443 __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \
4444 __ret; \
4445 })
4446 #endif
4447
4448 #ifdef __LITTLE_ENDIAN__
4449 #define vcvtq_n_f32_s32(__p0, __p1) __extension__ ({ \
4450 int32x4_t __s0 = __p0; \
4451 float32x4_t __ret; \
4452 __ret = (float32x4_t) __builtin_neon_vcvtq_n_f32_v((int8x16_t)__s0, __p1, 34); \
4453 __ret; \
4454 })
4455 #else
4456 #define vcvtq_n_f32_s32(__p0, __p1) __extension__ ({ \
4457 int32x4_t __s0 = __p0; \
4458 int32x4_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \
4459 float32x4_t __ret; \
4460 __ret = (float32x4_t) __builtin_neon_vcvtq_n_f32_v((int8x16_t)__rev0, __p1, 34); \
4461 __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \
4462 __ret; \
4463 })
4464 #endif
4465
4466 #ifdef __LITTLE_ENDIAN__
4467 #define vcvt_n_f32_u32(__p0, __p1) __extension__ ({ \
4468 uint32x2_t __s0 = __p0; \
4469 float32x2_t __ret; \
4470 __ret = (float32x2_t) __builtin_neon_vcvt_n_f32_v((int8x8_t)__s0, __p1, 18); \
4471 __ret; \
4472 })
4473 #else
4474 #define vcvt_n_f32_u32(__p0, __p1) __extension__ ({ \
4475 uint32x2_t __s0 = __p0; \
4476 uint32x2_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 1, 0); \
4477 float32x2_t __ret; \
4478 __ret = (float32x2_t) __builtin_neon_vcvt_n_f32_v((int8x8_t)__rev0, __p1, 18); \
4479 __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \
4480 __ret; \
4481 })
4482 #endif
4483
4484 #ifdef __LITTLE_ENDIAN__
4485 #define vcvt_n_f32_s32(__p0, __p1) __extension__ ({ \
4486 int32x2_t __s0 = __p0; \
4487 float32x2_t __ret; \
4488 __ret = (float32x2_t) __builtin_neon_vcvt_n_f32_v((int8x8_t)__s0, __p1, 2); \
4489 __ret; \
4490 })
4491 #else
4492 #define vcvt_n_f32_s32(__p0, __p1) __extension__ ({ \
4493 int32x2_t __s0 = __p0; \
4494 int32x2_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 1, 0); \
4495 float32x2_t __ret; \
4496 __ret = (float32x2_t) __builtin_neon_vcvt_n_f32_v((int8x8_t)__rev0, __p1, 2); \
4497 __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \
4498 __ret; \
4499 })
4500 #endif
4501
4502 #ifdef __LITTLE_ENDIAN__
4503 #define vcvtq_n_s32_f32(__p0, __p1) __extension__ ({ \
4504 float32x4_t __s0 = __p0; \
4505 int32x4_t __ret; \
4506 __ret = (int32x4_t) __builtin_neon_vcvtq_n_s32_v((int8x16_t)__s0, __p1, 34); \
4507 __ret; \
4508 })
4509 #else
4510 #define vcvtq_n_s32_f32(__p0, __p1) __extension__ ({ \
4511 float32x4_t __s0 = __p0; \
4512 float32x4_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \
4513 int32x4_t __ret; \
4514 __ret = (int32x4_t) __builtin_neon_vcvtq_n_s32_v((int8x16_t)__rev0, __p1, 34); \
4515 __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \
4516 __ret; \
4517 })
4518 #endif
4519
4520 #ifdef __LITTLE_ENDIAN__
4521 #define vcvt_n_s32_f32(__p0, __p1) __extension__ ({ \
4522 float32x2_t __s0 = __p0; \
4523 int32x2_t __ret; \
4524 __ret = (int32x2_t) __builtin_neon_vcvt_n_s32_v((int8x8_t)__s0, __p1, 2); \
4525 __ret; \
4526 })
4527 #else
4528 #define vcvt_n_s32_f32(__p0, __p1) __extension__ ({ \
4529 float32x2_t __s0 = __p0; \
4530 float32x2_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 1, 0); \
4531 int32x2_t __ret; \
4532 __ret = (int32x2_t) __builtin_neon_vcvt_n_s32_v((int8x8_t)__rev0, __p1, 2); \
4533 __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \
4534 __ret; \
4535 })
4536 #endif
4537
4538 #ifdef __LITTLE_ENDIAN__
4539 #define vcvtq_n_u32_f32(__p0, __p1) __extension__ ({ \
4540 float32x4_t __s0 = __p0; \
4541 uint32x4_t __ret; \
4542 __ret = (uint32x4_t) __builtin_neon_vcvtq_n_u32_v((int8x16_t)__s0, __p1, 50); \
4543 __ret; \
4544 })
4545 #else
4546 #define vcvtq_n_u32_f32(__p0, __p1) __extension__ ({ \
4547 float32x4_t __s0 = __p0; \
4548 float32x4_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \
4549 uint32x4_t __ret; \
4550 __ret = (uint32x4_t) __builtin_neon_vcvtq_n_u32_v((int8x16_t)__rev0, __p1, 50); \
4551 __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \
4552 __ret; \
4553 })
4554 #endif
4555
4556 #ifdef __LITTLE_ENDIAN__
4557 #define vcvt_n_u32_f32(__p0, __p1) __extension__ ({ \
4558 float32x2_t __s0 = __p0; \
4559 uint32x2_t __ret; \
4560 __ret = (uint32x2_t) __builtin_neon_vcvt_n_u32_v((int8x8_t)__s0, __p1, 18); \
4561 __ret; \
4562 })
4563 #else
4564 #define vcvt_n_u32_f32(__p0, __p1) __extension__ ({ \
4565 float32x2_t __s0 = __p0; \
4566 float32x2_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 1, 0); \
4567 uint32x2_t __ret; \
4568 __ret = (uint32x2_t) __builtin_neon_vcvt_n_u32_v((int8x8_t)__rev0, __p1, 18); \
4569 __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \
4570 __ret; \
4571 })
4572 #endif
4573
4574 #ifdef __LITTLE_ENDIAN__
4575 __ai int32x4_t vcvtq_s32_f32(float32x4_t __p0) {
4576 int32x4_t __ret;
4577 __ret = (int32x4_t) __builtin_neon_vcvtq_s32_v((int8x16_t)__p0, 34);
4578 return __ret;
4579 }
4580 #else
4581 __ai int32x4_t vcvtq_s32_f32(float32x4_t __p0) {
4582 float32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
4583 int32x4_t __ret;
4584 __ret = (int32x4_t) __builtin_neon_vcvtq_s32_v((int8x16_t)__rev0, 34);
4585 __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
4586 return __ret;
4587 }
4588 #endif
4589
4590 #ifdef __LITTLE_ENDIAN__
4591 __ai int32x2_t vcvt_s32_f32(float32x2_t __p0) {
4592 int32x2_t __ret;
4593 __ret = (int32x2_t) __builtin_neon_vcvt_s32_v((int8x8_t)__p0, 2);
4594 return __ret;
4595 }
4596 #else
4597 __ai int32x2_t vcvt_s32_f32(float32x2_t __p0) {
4598 float32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
4599 int32x2_t __ret;
4600 __ret = (int32x2_t) __builtin_neon_vcvt_s32_v((int8x8_t)__rev0, 2);
4601 __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
4602 return __ret;
4603 }
4604 #endif
4605
4606 #ifdef __LITTLE_ENDIAN__
4607 __ai uint32x4_t vcvtq_u32_f32(float32x4_t __p0) {
4608 uint32x4_t __ret;
4609 __ret = (uint32x4_t) __builtin_neon_vcvtq_u32_v((int8x16_t)__p0, 50);
4610 return __ret;
4611 }
4612 #else
4613 __ai uint32x4_t vcvtq_u32_f32(float32x4_t __p0) {
4614 float32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
4615 uint32x4_t __ret;
4616 __ret = (uint32x4_t) __builtin_neon_vcvtq_u32_v((int8x16_t)__rev0, 50);
4617 __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
4618 return __ret;
4619 }
4620 #endif
4621
4622 #ifdef __LITTLE_ENDIAN__
4623 __ai uint32x2_t vcvt_u32_f32(float32x2_t __p0) {
4624 uint32x2_t __ret;
4625 __ret = (uint32x2_t) __builtin_neon_vcvt_u32_v((int8x8_t)__p0, 18);
4626 return __ret;
4627 }
4628 #else
4629 __ai uint32x2_t vcvt_u32_f32(float32x2_t __p0) {
4630 float32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
4631 uint32x2_t __ret;
4632 __ret = (uint32x2_t) __builtin_neon_vcvt_u32_v((int8x8_t)__rev0, 18);
4633 __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
4634 return __ret;
4635 }
4636 #endif
4637
4638 #ifdef __LITTLE_ENDIAN__
4639 #define vdup_lane_p8(__p0, __p1) __extension__ ({ \
4640 poly8x8_t __s0 = __p0; \
4641 poly8x8_t __ret; \
4642 __ret = __builtin_shufflevector(__s0, __s0, __p1, __p1, __p1, __p1, __p1, __p1, __p1, __p1); \
4643 __ret; \
4644 })
4645 #else
4646 #define vdup_lane_p8(__p0, __p1) __extension__ ({ \
4647 poly8x8_t __s0 = __p0; \
4648 poly8x8_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 7, 6, 5, 4, 3, 2, 1, 0); \
4649 poly8x8_t __ret; \
4650 __ret = __builtin_shufflevector(__rev0, __rev0, __p1, __p1, __p1, __p1, __p1, __p1, __p1, __p1); \
4651 __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); \
4652 __ret; \
4653 })
4654 #endif
4655
4656 #ifdef __LITTLE_ENDIAN__
4657 #define vdup_lane_p16(__p0, __p1) __extension__ ({ \
4658 poly16x4_t __s0 = __p0; \
4659 poly16x4_t __ret; \
4660 __ret = __builtin_shufflevector(__s0, __s0, __p1, __p1, __p1, __p1); \
4661 __ret; \
4662 })
4663 #else
4664 #define vdup_lane_p16(__p0, __p1) __extension__ ({ \
4665 poly16x4_t __s0 = __p0; \
4666 poly16x4_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \
4667 poly16x4_t __ret; \
4668 __ret = __builtin_shufflevector(__rev0, __rev0, __p1, __p1, __p1, __p1); \
4669 __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \
4670 __ret; \
4671 })
4672 #endif
4673
4674 #ifdef __LITTLE_ENDIAN__
4675 #define vdupq_lane_p8(__p0, __p1) __extension__ ({ \
4676 poly8x8_t __s0 = __p0; \
4677 poly8x16_t __ret; \
4678 __ret = __builtin_shufflevector(__s0, __s0, __p1, __p1, __p1, __p1, __p1, __p1, __p1, __p1, __p1, __p1, __p1, __p1, __p1, __p1, __p1, __p1); \
4679 __ret; \
4680 })
4681 #else
4682 #define vdupq_lane_p8(__p0, __p1) __extension__ ({ \
4683 poly8x8_t __s0 = __p0; \
4684 poly8x8_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 7, 6, 5, 4, 3, 2, 1, 0); \
4685 poly8x16_t __ret; \
4686 __ret = __builtin_shufflevector(__rev0, __rev0, __p1, __p1, __p1, __p1, __p1, __p1, __p1, __p1, __p1, __p1, __p1, __p1, __p1, __p1, __p1, __p1); \
4687 __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
4688 __ret; \
4689 })
4690 #endif
4691
4692 #ifdef __LITTLE_ENDIAN__
4693 #define vdupq_lane_p16(__p0, __p1) __extension__ ({ \
4694 poly16x4_t __s0 = __p0; \
4695 poly16x8_t __ret; \
4696 __ret = __builtin_shufflevector(__s0, __s0, __p1, __p1, __p1, __p1, __p1, __p1, __p1, __p1); \
4697 __ret; \
4698 })
4699 #else
4700 #define vdupq_lane_p16(__p0, __p1) __extension__ ({ \
4701 poly16x4_t __s0 = __p0; \
4702 poly16x4_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \
4703 poly16x8_t __ret; \
4704 __ret = __builtin_shufflevector(__rev0, __rev0, __p1, __p1, __p1, __p1, __p1, __p1, __p1, __p1); \
4705 __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); \
4706 __ret; \
4707 })
4708 #endif
4709
4710 #ifdef __LITTLE_ENDIAN__
4711 #define vdupq_lane_u8(__p0, __p1) __extension__ ({ \
4712 uint8x8_t __s0 = __p0; \
4713 uint8x16_t __ret; \
4714 __ret = __builtin_shufflevector(__s0, __s0, __p1, __p1, __p1, __p1, __p1, __p1, __p1, __p1, __p1, __p1, __p1, __p1, __p1, __p1, __p1, __p1); \
4715 __ret; \
4716 })
4717 #else
4718 #define vdupq_lane_u8(__p0, __p1) __extension__ ({ \
4719 uint8x8_t __s0 = __p0; \
4720 uint8x8_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 7, 6, 5, 4, 3, 2, 1, 0); \
4721 uint8x16_t __ret; \
4722 __ret = __builtin_shufflevector(__rev0, __rev0, __p1, __p1, __p1, __p1, __p1, __p1, __p1, __p1, __p1, __p1, __p1, __p1, __p1, __p1, __p1, __p1); \
4723 __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
4724 __ret; \
4725 })
4726 #endif
4727
4728 #ifdef __LITTLE_ENDIAN__
4729 #define vdupq_lane_u32(__p0, __p1) __extension__ ({ \
4730 uint32x2_t __s0 = __p0; \
4731 uint32x4_t __ret; \
4732 __ret = __builtin_shufflevector(__s0, __s0, __p1, __p1, __p1, __p1); \
4733 __ret; \
4734 })
4735 #else
4736 #define vdupq_lane_u32(__p0, __p1) __extension__ ({ \
4737 uint32x2_t __s0 = __p0; \
4738 uint32x2_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 1, 0); \
4739 uint32x4_t __ret; \
4740 __ret = __builtin_shufflevector(__rev0, __rev0, __p1, __p1, __p1, __p1); \
4741 __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \
4742 __ret; \
4743 })
4744 #endif
4745
4746 #ifdef __LITTLE_ENDIAN__
4747 #define vdupq_lane_u64(__p0, __p1) __extension__ ({ \
4748 uint64x1_t __s0 = __p0; \
4749 uint64x2_t __ret; \
4750 __ret = __builtin_shufflevector(__s0, __s0, __p1, __p1); \
4751 __ret; \
4752 })
4753 #else
4754 #define vdupq_lane_u64(__p0, __p1) __extension__ ({ \
4755 uint64x1_t __s0 = __p0; \
4756 uint64x2_t __ret; \
4757 __ret = __builtin_shufflevector(__s0, __s0, __p1, __p1); \
4758 __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \
4759 __ret; \
4760 })
4761 #endif
4762
4763 #ifdef __LITTLE_ENDIAN__
4764 #define vdupq_lane_u16(__p0, __p1) __extension__ ({ \
4765 uint16x4_t __s0 = __p0; \
4766 uint16x8_t __ret; \
4767 __ret = __builtin_shufflevector(__s0, __s0, __p1, __p1, __p1, __p1, __p1, __p1, __p1, __p1); \
4768 __ret; \
4769 })
4770 #else
4771 #define vdupq_lane_u16(__p0, __p1) __extension__ ({ \
4772 uint16x4_t __s0 = __p0; \
4773 uint16x4_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \
4774 uint16x8_t __ret; \
4775 __ret = __builtin_shufflevector(__rev0, __rev0, __p1, __p1, __p1, __p1, __p1, __p1, __p1, __p1); \
4776 __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); \
4777 __ret; \
4778 })
4779 #endif
4780
4781 #ifdef __LITTLE_ENDIAN__
4782 #define vdupq_lane_s8(__p0, __p1) __extension__ ({ \
4783 int8x8_t __s0 = __p0; \
4784 int8x16_t __ret; \
4785 __ret = __builtin_shufflevector(__s0, __s0, __p1, __p1, __p1, __p1, __p1, __p1, __p1, __p1, __p1, __p1, __p1, __p1, __p1, __p1, __p1, __p1); \
4786 __ret; \
4787 })
4788 #else
4789 #define vdupq_lane_s8(__p0, __p1) __extension__ ({ \
4790 int8x8_t __s0 = __p0; \
4791 int8x8_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 7, 6, 5, 4, 3, 2, 1, 0); \
4792 int8x16_t __ret; \
4793 __ret = __builtin_shufflevector(__rev0, __rev0, __p1, __p1, __p1, __p1, __p1, __p1, __p1, __p1, __p1, __p1, __p1, __p1, __p1, __p1, __p1, __p1); \
4794 __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
4795 __ret; \
4796 })
4797 #endif
4798
4799 #ifdef __LITTLE_ENDIAN__
4800 #define vdupq_lane_f32(__p0, __p1) __extension__ ({ \
4801 float32x2_t __s0 = __p0; \
4802 float32x4_t __ret; \
4803 __ret = __builtin_shufflevector(__s0, __s0, __p1, __p1, __p1, __p1); \
4804 __ret; \
4805 })
4806 #else
4807 #define vdupq_lane_f32(__p0, __p1) __extension__ ({ \
4808 float32x2_t __s0 = __p0; \
4809 float32x2_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 1, 0); \
4810 float32x4_t __ret; \
4811 __ret = __builtin_shufflevector(__rev0, __rev0, __p1, __p1, __p1, __p1); \
4812 __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \
4813 __ret; \
4814 })
4815 #endif
4816
4817 #ifdef __LITTLE_ENDIAN__
4818 #define vdupq_lane_s32(__p0, __p1) __extension__ ({ \
4819 int32x2_t __s0 = __p0; \
4820 int32x4_t __ret; \
4821 __ret = __builtin_shufflevector(__s0, __s0, __p1, __p1, __p1, __p1); \
4822 __ret; \
4823 })
4824 #else
4825 #define vdupq_lane_s32(__p0, __p1) __extension__ ({ \
4826 int32x2_t __s0 = __p0; \
4827 int32x2_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 1, 0); \
4828 int32x4_t __ret; \
4829 __ret = __builtin_shufflevector(__rev0, __rev0, __p1, __p1, __p1, __p1); \
4830 __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \
4831 __ret; \
4832 })
4833 #endif
4834
4835 #ifdef __LITTLE_ENDIAN__
4836 #define vdupq_lane_s64(__p0, __p1) __extension__ ({ \
4837 int64x1_t __s0 = __p0; \
4838 int64x2_t __ret; \
4839 __ret = __builtin_shufflevector(__s0, __s0, __p1, __p1); \
4840 __ret; \
4841 })
4842 #else
4843 #define vdupq_lane_s64(__p0, __p1) __extension__ ({ \
4844 int64x1_t __s0 = __p0; \
4845 int64x2_t __ret; \
4846 __ret = __builtin_shufflevector(__s0, __s0, __p1, __p1); \
4847 __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \
4848 __ret; \
4849 })
4850 #endif
4851
4852 #ifdef __LITTLE_ENDIAN__
4853 #define vdupq_lane_s16(__p0, __p1) __extension__ ({ \
4854 int16x4_t __s0 = __p0; \
4855 int16x8_t __ret; \
4856 __ret = __builtin_shufflevector(__s0, __s0, __p1, __p1, __p1, __p1, __p1, __p1, __p1, __p1); \
4857 __ret; \
4858 })
4859 #else
4860 #define vdupq_lane_s16(__p0, __p1) __extension__ ({ \
4861 int16x4_t __s0 = __p0; \
4862 int16x4_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \
4863 int16x8_t __ret; \
4864 __ret = __builtin_shufflevector(__rev0, __rev0, __p1, __p1, __p1, __p1, __p1, __p1, __p1, __p1); \
4865 __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); \
4866 __ret; \
4867 })
4868 #endif
4869
4870 #ifdef __LITTLE_ENDIAN__
4871 #define vdup_lane_u8(__p0, __p1) __extension__ ({ \
4872 uint8x8_t __s0 = __p0; \
4873 uint8x8_t __ret; \
4874 __ret = __builtin_shufflevector(__s0, __s0, __p1, __p1, __p1, __p1, __p1, __p1, __p1, __p1); \
4875 __ret; \
4876 })
4877 #else
4878 #define vdup_lane_u8(__p0, __p1) __extension__ ({ \
4879 uint8x8_t __s0 = __p0; \
4880 uint8x8_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 7, 6, 5, 4, 3, 2, 1, 0); \
4881 uint8x8_t __ret; \
4882 __ret = __builtin_shufflevector(__rev0, __rev0, __p1, __p1, __p1, __p1, __p1, __p1, __p1, __p1); \
4883 __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); \
4884 __ret; \
4885 })
4886 #endif
4887
4888 #ifdef __LITTLE_ENDIAN__
4889 #define vdup_lane_u32(__p0, __p1) __extension__ ({ \
4890 uint32x2_t __s0 = __p0; \
4891 uint32x2_t __ret; \
4892 __ret = __builtin_shufflevector(__s0, __s0, __p1, __p1); \
4893 __ret; \
4894 })
4895 #else
4896 #define vdup_lane_u32(__p0, __p1) __extension__ ({ \
4897 uint32x2_t __s0 = __p0; \
4898 uint32x2_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 1, 0); \
4899 uint32x2_t __ret; \
4900 __ret = __builtin_shufflevector(__rev0, __rev0, __p1, __p1); \
4901 __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \
4902 __ret; \
4903 })
4904 #endif
4905
4906 #ifdef __LITTLE_ENDIAN__
4907 #define vdup_lane_u64(__p0, __p1) __extension__ ({ \
4908 uint64x1_t __s0 = __p0; \
4909 uint64x1_t __ret; \
4910 __ret = __builtin_shufflevector(__s0, __s0, __p1); \
4911 __ret; \
4912 })
4913 #else
4914 #define vdup_lane_u64(__p0, __p1) __extension__ ({ \
4915 uint64x1_t __s0 = __p0; \
4916 uint64x1_t __ret; \
4917 __ret = __builtin_shufflevector(__s0, __s0, __p1); \
4918 __ret; \
4919 })
4920 #endif
4921
4922 #ifdef __LITTLE_ENDIAN__
4923 #define vdup_lane_u16(__p0, __p1) __extension__ ({ \
4924 uint16x4_t __s0 = __p0; \
4925 uint16x4_t __ret; \
4926 __ret = __builtin_shufflevector(__s0, __s0, __p1, __p1, __p1, __p1); \
4927 __ret; \
4928 })
4929 #else
4930 #define vdup_lane_u16(__p0, __p1) __extension__ ({ \
4931 uint16x4_t __s0 = __p0; \
4932 uint16x4_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \
4933 uint16x4_t __ret; \
4934 __ret = __builtin_shufflevector(__rev0, __rev0, __p1, __p1, __p1, __p1); \
4935 __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \
4936 __ret; \
4937 })
4938 #endif
4939
4940 #ifdef __LITTLE_ENDIAN__
4941 #define vdup_lane_s8(__p0, __p1) __extension__ ({ \
4942 int8x8_t __s0 = __p0; \
4943 int8x8_t __ret; \
4944 __ret = __builtin_shufflevector(__s0, __s0, __p1, __p1, __p1, __p1, __p1, __p1, __p1, __p1); \
4945 __ret; \
4946 })
4947 #else
4948 #define vdup_lane_s8(__p0, __p1) __extension__ ({ \
4949 int8x8_t __s0 = __p0; \
4950 int8x8_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 7, 6, 5, 4, 3, 2, 1, 0); \
4951 int8x8_t __ret; \
4952 __ret = __builtin_shufflevector(__rev0, __rev0, __p1, __p1, __p1, __p1, __p1, __p1, __p1, __p1); \
4953 __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); \
4954 __ret; \
4955 })
4956 #endif
4957
4958 #ifdef __LITTLE_ENDIAN__
4959 #define vdup_lane_f32(__p0, __p1) __extension__ ({ \
4960 float32x2_t __s0 = __p0; \
4961 float32x2_t __ret; \
4962 __ret = __builtin_shufflevector(__s0, __s0, __p1, __p1); \
4963 __ret; \
4964 })
4965 #else
4966 #define vdup_lane_f32(__p0, __p1) __extension__ ({ \
4967 float32x2_t __s0 = __p0; \
4968 float32x2_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 1, 0); \
4969 float32x2_t __ret; \
4970 __ret = __builtin_shufflevector(__rev0, __rev0, __p1, __p1); \
4971 __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \
4972 __ret; \
4973 })
4974 #endif
4975
4976 #ifdef __LITTLE_ENDIAN__
4977 #define vdup_lane_s32(__p0, __p1) __extension__ ({ \
4978 int32x2_t __s0 = __p0; \
4979 int32x2_t __ret; \
4980 __ret = __builtin_shufflevector(__s0, __s0, __p1, __p1); \
4981 __ret; \
4982 })
4983 #else
4984 #define vdup_lane_s32(__p0, __p1) __extension__ ({ \
4985 int32x2_t __s0 = __p0; \
4986 int32x2_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 1, 0); \
4987 int32x2_t __ret; \
4988 __ret = __builtin_shufflevector(__rev0, __rev0, __p1, __p1); \
4989 __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \
4990 __ret; \
4991 })
4992 #endif
4993
4994 #ifdef __LITTLE_ENDIAN__
4995 #define vdup_lane_s64(__p0, __p1) __extension__ ({ \
4996 int64x1_t __s0 = __p0; \
4997 int64x1_t __ret; \
4998 __ret = __builtin_shufflevector(__s0, __s0, __p1); \
4999 __ret; \
5000 })
5001 #else
5002 #define vdup_lane_s64(__p0, __p1) __extension__ ({ \
5003 int64x1_t __s0 = __p0; \
5004 int64x1_t __ret; \
5005 __ret = __builtin_shufflevector(__s0, __s0, __p1); \
5006 __ret; \
5007 })
5008 #endif
5009
5010 #ifdef __LITTLE_ENDIAN__
5011 #define vdup_lane_s16(__p0, __p1) __extension__ ({ \
5012 int16x4_t __s0 = __p0; \
5013 int16x4_t __ret; \
5014 __ret = __builtin_shufflevector(__s0, __s0, __p1, __p1, __p1, __p1); \
5015 __ret; \
5016 })
5017 #else
5018 #define vdup_lane_s16(__p0, __p1) __extension__ ({ \
5019 int16x4_t __s0 = __p0; \
5020 int16x4_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \
5021 int16x4_t __ret; \
5022 __ret = __builtin_shufflevector(__rev0, __rev0, __p1, __p1, __p1, __p1); \
5023 __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \
5024 __ret; \
5025 })
5026 #endif
5027
5028 #ifdef __LITTLE_ENDIAN__
5029 __ai poly8x8_t vdup_n_p8(poly8_t __p0) {
5030 poly8x8_t __ret;
5031 __ret = (poly8x8_t) {__p0, __p0, __p0, __p0, __p0, __p0, __p0, __p0};
5032 return __ret;
5033 }
5034 #else
5035 __ai poly8x8_t vdup_n_p8(poly8_t __p0) {
5036 poly8x8_t __ret;
5037 __ret = (poly8x8_t) {__p0, __p0, __p0, __p0, __p0, __p0, __p0, __p0};
5038 __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
5039 return __ret;
5040 }
5041 #endif
5042
5043 #ifdef __LITTLE_ENDIAN__
5044 __ai poly16x4_t vdup_n_p16(poly16_t __p0) {
5045 poly16x4_t __ret;
5046 __ret = (poly16x4_t) {__p0, __p0, __p0, __p0};
5047 return __ret;
5048 }
5049 #else
5050 __ai poly16x4_t vdup_n_p16(poly16_t __p0) {
5051 poly16x4_t __ret;
5052 __ret = (poly16x4_t) {__p0, __p0, __p0, __p0};
5053 __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
5054 return __ret;
5055 }
5056 #endif
5057
5058 #ifdef __LITTLE_ENDIAN__
5059 __ai poly8x16_t vdupq_n_p8(poly8_t __p0) {
5060 poly8x16_t __ret;
5061 __ret = (poly8x16_t) {__p0, __p0, __p0, __p0, __p0, __p0, __p0, __p0, __p0, __p0, __p0, __p0, __p0, __p0, __p0, __p0};
5062 return __ret;
5063 }
5064 #else
5065 __ai poly8x16_t vdupq_n_p8(poly8_t __p0) {
5066 poly8x16_t __ret;
5067 __ret = (poly8x16_t) {__p0, __p0, __p0, __p0, __p0, __p0, __p0, __p0, __p0, __p0, __p0, __p0, __p0, __p0, __p0, __p0};
5068 __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
5069 return __ret;
5070 }
5071 #endif
5072
5073 #ifdef __LITTLE_ENDIAN__
5074 __ai poly16x8_t vdupq_n_p16(poly16_t __p0) {
5075 poly16x8_t __ret;
5076 __ret = (poly16x8_t) {__p0, __p0, __p0, __p0, __p0, __p0, __p0, __p0};
5077 return __ret;
5078 }
5079 #else
5080 __ai poly16x8_t vdupq_n_p16(poly16_t __p0) {
5081 poly16x8_t __ret;
5082 __ret = (poly16x8_t) {__p0, __p0, __p0, __p0, __p0, __p0, __p0, __p0};
5083 __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
5084 return __ret;
5085 }
5086 #endif
5087
5088 #ifdef __LITTLE_ENDIAN__
5089 __ai uint8x16_t vdupq_n_u8(uint8_t __p0) {
5090 uint8x16_t __ret;
5091 __ret = (uint8x16_t) {__p0, __p0, __p0, __p0, __p0, __p0, __p0, __p0, __p0, __p0, __p0, __p0, __p0, __p0, __p0, __p0};
5092 return __ret;
5093 }
5094 #else
5095 __ai uint8x16_t vdupq_n_u8(uint8_t __p0) {
5096 uint8x16_t __ret;
5097 __ret = (uint8x16_t) {__p0, __p0, __p0, __p0, __p0, __p0, __p0, __p0, __p0, __p0, __p0, __p0, __p0, __p0, __p0, __p0};
5098 __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
5099 return __ret;
5100 }
5101 #endif
5102
5103 #ifdef __LITTLE_ENDIAN__
5104 __ai uint32x4_t vdupq_n_u32(uint32_t __p0) {
5105 uint32x4_t __ret;
5106 __ret = (uint32x4_t) {__p0, __p0, __p0, __p0};
5107 return __ret;
5108 }
5109 #else
5110 __ai uint32x4_t vdupq_n_u32(uint32_t __p0) {
5111 uint32x4_t __ret;
5112 __ret = (uint32x4_t) {__p0, __p0, __p0, __p0};
5113 __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
5114 return __ret;
5115 }
5116 #endif
5117
5118 #ifdef __LITTLE_ENDIAN__
5119 __ai uint64x2_t vdupq_n_u64(uint64_t __p0) {
5120 uint64x2_t __ret;
5121 __ret = (uint64x2_t) {__p0, __p0};
5122 return __ret;
5123 }
5124 #else
5125 __ai uint64x2_t vdupq_n_u64(uint64_t __p0) {
5126 uint64x2_t __ret;
5127 __ret = (uint64x2_t) {__p0, __p0};
5128 __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
5129 return __ret;
5130 }
5131 #endif
5132
5133 #ifdef __LITTLE_ENDIAN__
5134 __ai uint16x8_t vdupq_n_u16(uint16_t __p0) {
5135 uint16x8_t __ret;
5136 __ret = (uint16x8_t) {__p0, __p0, __p0, __p0, __p0, __p0, __p0, __p0};
5137 return __ret;
5138 }
5139 #else
5140 __ai uint16x8_t vdupq_n_u16(uint16_t __p0) {
5141 uint16x8_t __ret;
5142 __ret = (uint16x8_t) {__p0, __p0, __p0, __p0, __p0, __p0, __p0, __p0};
5143 __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
5144 return __ret;
5145 }
5146 #endif
5147
5148 #ifdef __LITTLE_ENDIAN__
5149 __ai int8x16_t vdupq_n_s8(int8_t __p0) {
5150 int8x16_t __ret;
5151 __ret = (int8x16_t) {__p0, __p0, __p0, __p0, __p0, __p0, __p0, __p0, __p0, __p0, __p0, __p0, __p0, __p0, __p0, __p0};
5152 return __ret;
5153 }
5154 #else
5155 __ai int8x16_t vdupq_n_s8(int8_t __p0) {
5156 int8x16_t __ret;
5157 __ret = (int8x16_t) {__p0, __p0, __p0, __p0, __p0, __p0, __p0, __p0, __p0, __p0, __p0, __p0, __p0, __p0, __p0, __p0};
5158 __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
5159 return __ret;
5160 }
5161 #endif
5162
5163 #ifdef __LITTLE_ENDIAN__
5164 __ai float32x4_t vdupq_n_f32(float32_t __p0) {
5165 float32x4_t __ret;
5166 __ret = (float32x4_t) {__p0, __p0, __p0, __p0};
5167 return __ret;
5168 }
5169 #else
5170 __ai float32x4_t vdupq_n_f32(float32_t __p0) {
5171 float32x4_t __ret;
5172 __ret = (float32x4_t) {__p0, __p0, __p0, __p0};
5173 __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
5174 return __ret;
5175 }
5176 #endif
5177
5178 #ifdef __LITTLE_ENDIAN__
5179 #define vdupq_n_f16(__p0) __extension__ ({ \
5180 float16_t __s0 = __p0; \
5181 float16x8_t __ret; \
5182 __ret = (float16x8_t) {__s0, __s0, __s0, __s0, __s0, __s0, __s0, __s0}; \
5183 __ret; \
5184 })
5185 #else
5186 #define vdupq_n_f16(__p0) __extension__ ({ \
5187 float16_t __s0 = __p0; \
5188 float16x8_t __ret; \
5189 __ret = (float16x8_t) {__s0, __s0, __s0, __s0, __s0, __s0, __s0, __s0}; \
5190 __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); \
5191 __ret; \
5192 })
5193 #endif
5194
5195 #ifdef __LITTLE_ENDIAN__
5196 __ai int32x4_t vdupq_n_s32(int32_t __p0) {
5197 int32x4_t __ret;
5198 __ret = (int32x4_t) {__p0, __p0, __p0, __p0};
5199 return __ret;
5200 }
5201 #else
5202 __ai int32x4_t vdupq_n_s32(int32_t __p0) {
5203 int32x4_t __ret;
5204 __ret = (int32x4_t) {__p0, __p0, __p0, __p0};
5205 __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
5206 return __ret;
5207 }
5208 #endif
5209
5210 #ifdef __LITTLE_ENDIAN__
5211 __ai int64x2_t vdupq_n_s64(int64_t __p0) {
5212 int64x2_t __ret;
5213 __ret = (int64x2_t) {__p0, __p0};
5214 return __ret;
5215 }
5216 #else
5217 __ai int64x2_t vdupq_n_s64(int64_t __p0) {
5218 int64x2_t __ret;
5219 __ret = (int64x2_t) {__p0, __p0};
5220 __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
5221 return __ret;
5222 }
5223 #endif
5224
5225 #ifdef __LITTLE_ENDIAN__
5226 __ai int16x8_t vdupq_n_s16(int16_t __p0) {
5227 int16x8_t __ret;
5228 __ret = (int16x8_t) {__p0, __p0, __p0, __p0, __p0, __p0, __p0, __p0};
5229 return __ret;
5230 }
5231 #else
5232 __ai int16x8_t vdupq_n_s16(int16_t __p0) {
5233 int16x8_t __ret;
5234 __ret = (int16x8_t) {__p0, __p0, __p0, __p0, __p0, __p0, __p0, __p0};
5235 __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
5236 return __ret;
5237 }
5238 #endif
5239
5240 #ifdef __LITTLE_ENDIAN__
5241 __ai uint8x8_t vdup_n_u8(uint8_t __p0) {
5242 uint8x8_t __ret;
5243 __ret = (uint8x8_t) {__p0, __p0, __p0, __p0, __p0, __p0, __p0, __p0};
5244 return __ret;
5245 }
5246 #else
5247 __ai uint8x8_t vdup_n_u8(uint8_t __p0) {
5248 uint8x8_t __ret;
5249 __ret = (uint8x8_t) {__p0, __p0, __p0, __p0, __p0, __p0, __p0, __p0};
5250 __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
5251 return __ret;
5252 }
5253 #endif
5254
5255 #ifdef __LITTLE_ENDIAN__
5256 __ai uint32x2_t vdup_n_u32(uint32_t __p0) {
5257 uint32x2_t __ret;
5258 __ret = (uint32x2_t) {__p0, __p0};
5259 return __ret;
5260 }
5261 #else
5262 __ai uint32x2_t vdup_n_u32(uint32_t __p0) {
5263 uint32x2_t __ret;
5264 __ret = (uint32x2_t) {__p0, __p0};
5265 __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
5266 return __ret;
5267 }
5268 #endif
5269
5270 #ifdef __LITTLE_ENDIAN__
5271 __ai uint64x1_t vdup_n_u64(uint64_t __p0) {
5272 uint64x1_t __ret;
5273 __ret = (uint64x1_t) {__p0};
5274 return __ret;
5275 }
5276 #else
5277 __ai uint64x1_t vdup_n_u64(uint64_t __p0) {
5278 uint64x1_t __ret;
5279 __ret = (uint64x1_t) {__p0};
5280 return __ret;
5281 }
5282 #endif
5283
5284 #ifdef __LITTLE_ENDIAN__
5285 __ai uint16x4_t vdup_n_u16(uint16_t __p0) {
5286 uint16x4_t __ret;
5287 __ret = (uint16x4_t) {__p0, __p0, __p0, __p0};
5288 return __ret;
5289 }
5290 #else
5291 __ai uint16x4_t vdup_n_u16(uint16_t __p0) {
5292 uint16x4_t __ret;
5293 __ret = (uint16x4_t) {__p0, __p0, __p0, __p0};
5294 __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
5295 return __ret;
5296 }
5297 #endif
5298
5299 #ifdef __LITTLE_ENDIAN__
5300 __ai int8x8_t vdup_n_s8(int8_t __p0) {
5301 int8x8_t __ret;
5302 __ret = (int8x8_t) {__p0, __p0, __p0, __p0, __p0, __p0, __p0, __p0};
5303 return __ret;
5304 }
5305 #else
5306 __ai int8x8_t vdup_n_s8(int8_t __p0) {
5307 int8x8_t __ret;
5308 __ret = (int8x8_t) {__p0, __p0, __p0, __p0, __p0, __p0, __p0, __p0};
5309 __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
5310 return __ret;
5311 }
5312 #endif
5313
5314 #ifdef __LITTLE_ENDIAN__
5315 __ai float32x2_t vdup_n_f32(float32_t __p0) {
5316 float32x2_t __ret;
5317 __ret = (float32x2_t) {__p0, __p0};
5318 return __ret;
5319 }
5320 #else
5321 __ai float32x2_t vdup_n_f32(float32_t __p0) {
5322 float32x2_t __ret;
5323 __ret = (float32x2_t) {__p0, __p0};
5324 __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
5325 return __ret;
5326 }
5327 #endif
5328
5329 #ifdef __LITTLE_ENDIAN__
5330 #define vdup_n_f16(__p0) __extension__ ({ \
5331 float16_t __s0 = __p0; \
5332 float16x4_t __ret; \
5333 __ret = (float16x4_t) {__s0, __s0, __s0, __s0}; \
5334 __ret; \
5335 })
5336 #else
5337 #define vdup_n_f16(__p0) __extension__ ({ \
5338 float16_t __s0 = __p0; \
5339 float16x4_t __ret; \
5340 __ret = (float16x4_t) {__s0, __s0, __s0, __s0}; \
5341 __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \
5342 __ret; \
5343 })
5344 #endif
5345
5346 #ifdef __LITTLE_ENDIAN__
5347 __ai int32x2_t vdup_n_s32(int32_t __p0) {
5348 int32x2_t __ret;
5349 __ret = (int32x2_t) {__p0, __p0};
5350 return __ret;
5351 }
5352 #else
5353 __ai int32x2_t vdup_n_s32(int32_t __p0) {
5354 int32x2_t __ret;
5355 __ret = (int32x2_t) {__p0, __p0};
5356 __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
5357 return __ret;
5358 }
5359 #endif
5360
5361 #ifdef __LITTLE_ENDIAN__
5362 __ai int64x1_t vdup_n_s64(int64_t __p0) {
5363 int64x1_t __ret;
5364 __ret = (int64x1_t) {__p0};
5365 return __ret;
5366 }
5367 #else
5368 __ai int64x1_t vdup_n_s64(int64_t __p0) {
5369 int64x1_t __ret;
5370 __ret = (int64x1_t) {__p0};
5371 return __ret;
5372 }
5373 #endif
5374
5375 #ifdef __LITTLE_ENDIAN__
5376 __ai int16x4_t vdup_n_s16(int16_t __p0) {
5377 int16x4_t __ret;
5378 __ret = (int16x4_t) {__p0, __p0, __p0, __p0};
5379 return __ret;
5380 }
5381 #else
5382 __ai int16x4_t vdup_n_s16(int16_t __p0) {
5383 int16x4_t __ret;
5384 __ret = (int16x4_t) {__p0, __p0, __p0, __p0};
5385 __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
5386 return __ret;
5387 }
5388 #endif
5389
5390 #ifdef __LITTLE_ENDIAN__
5391 __ai uint8x16_t veorq_u8(uint8x16_t __p0, uint8x16_t __p1) {
5392 uint8x16_t __ret;
5393 __ret = __p0 ^ __p1;
5394 return __ret;
5395 }
5396 #else
5397 __ai uint8x16_t veorq_u8(uint8x16_t __p0, uint8x16_t __p1) {
5398 uint8x16_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
5399 uint8x16_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
5400 uint8x16_t __ret;
5401 __ret = __rev0 ^ __rev1;
5402 __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
5403 return __ret;
5404 }
5405 #endif
5406
5407 #ifdef __LITTLE_ENDIAN__
5408 __ai uint32x4_t veorq_u32(uint32x4_t __p0, uint32x4_t __p1) {
5409 uint32x4_t __ret;
5410 __ret = __p0 ^ __p1;
5411 return __ret;
5412 }
5413 #else
5414 __ai uint32x4_t veorq_u32(uint32x4_t __p0, uint32x4_t __p1) {
5415 uint32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
5416 uint32x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
5417 uint32x4_t __ret;
5418 __ret = __rev0 ^ __rev1;
5419 __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
5420 return __ret;
5421 }
5422 #endif
5423
5424 #ifdef __LITTLE_ENDIAN__
5425 __ai uint64x2_t veorq_u64(uint64x2_t __p0, uint64x2_t __p1) {
5426 uint64x2_t __ret;
5427 __ret = __p0 ^ __p1;
5428 return __ret;
5429 }
5430 #else
5431 __ai uint64x2_t veorq_u64(uint64x2_t __p0, uint64x2_t __p1) {
5432 uint64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
5433 uint64x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
5434 uint64x2_t __ret;
5435 __ret = __rev0 ^ __rev1;
5436 __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
5437 return __ret;
5438 }
5439 #endif
5440
5441 #ifdef __LITTLE_ENDIAN__
5442 __ai uint16x8_t veorq_u16(uint16x8_t __p0, uint16x8_t __p1) {
5443 uint16x8_t __ret;
5444 __ret = __p0 ^ __p1;
5445 return __ret;
5446 }
5447 #else
5448 __ai uint16x8_t veorq_u16(uint16x8_t __p0, uint16x8_t __p1) {
5449 uint16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
5450 uint16x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
5451 uint16x8_t __ret;
5452 __ret = __rev0 ^ __rev1;
5453 __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
5454 return __ret;
5455 }
5456 #endif
5457
5458 #ifdef __LITTLE_ENDIAN__
5459 __ai int8x16_t veorq_s8(int8x16_t __p0, int8x16_t __p1) {
5460 int8x16_t __ret;
5461 __ret = __p0 ^ __p1;
5462 return __ret;
5463 }
5464 #else
5465 __ai int8x16_t veorq_s8(int8x16_t __p0, int8x16_t __p1) {
5466 int8x16_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
5467 int8x16_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
5468 int8x16_t __ret;
5469 __ret = __rev0 ^ __rev1;
5470 __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
5471 return __ret;
5472 }
5473 #endif
5474
5475 #ifdef __LITTLE_ENDIAN__
5476 __ai int32x4_t veorq_s32(int32x4_t __p0, int32x4_t __p1) {
5477 int32x4_t __ret;
5478 __ret = __p0 ^ __p1;
5479 return __ret;
5480 }
5481 #else
5482 __ai int32x4_t veorq_s32(int32x4_t __p0, int32x4_t __p1) {
5483 int32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
5484 int32x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
5485 int32x4_t __ret;
5486 __ret = __rev0 ^ __rev1;
5487 __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
5488 return __ret;
5489 }
5490 #endif
5491
5492 #ifdef __LITTLE_ENDIAN__
5493 __ai int64x2_t veorq_s64(int64x2_t __p0, int64x2_t __p1) {
5494 int64x2_t __ret;
5495 __ret = __p0 ^ __p1;
5496 return __ret;
5497 }
5498 #else
5499 __ai int64x2_t veorq_s64(int64x2_t __p0, int64x2_t __p1) {
5500 int64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
5501 int64x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
5502 int64x2_t __ret;
5503 __ret = __rev0 ^ __rev1;
5504 __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
5505 return __ret;
5506 }
5507 #endif
5508
5509 #ifdef __LITTLE_ENDIAN__
5510 __ai int16x8_t veorq_s16(int16x8_t __p0, int16x8_t __p1) {
5511 int16x8_t __ret;
5512 __ret = __p0 ^ __p1;
5513 return __ret;
5514 }
5515 #else
5516 __ai int16x8_t veorq_s16(int16x8_t __p0, int16x8_t __p1) {
5517 int16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
5518 int16x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
5519 int16x8_t __ret;
5520 __ret = __rev0 ^ __rev1;
5521 __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
5522 return __ret;
5523 }
5524 #endif
5525
5526 #ifdef __LITTLE_ENDIAN__
5527 __ai uint8x8_t veor_u8(uint8x8_t __p0, uint8x8_t __p1) {
5528 uint8x8_t __ret;
5529 __ret = __p0 ^ __p1;
5530 return __ret;
5531 }
5532 #else
5533 __ai uint8x8_t veor_u8(uint8x8_t __p0, uint8x8_t __p1) {
5534 uint8x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
5535 uint8x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
5536 uint8x8_t __ret;
5537 __ret = __rev0 ^ __rev1;
5538 __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
5539 return __ret;
5540 }
5541 #endif
5542
5543 #ifdef __LITTLE_ENDIAN__
5544 __ai uint32x2_t veor_u32(uint32x2_t __p0, uint32x2_t __p1) {
5545 uint32x2_t __ret;
5546 __ret = __p0 ^ __p1;
5547 return __ret;
5548 }
5549 #else
5550 __ai uint32x2_t veor_u32(uint32x2_t __p0, uint32x2_t __p1) {
5551 uint32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
5552 uint32x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
5553 uint32x2_t __ret;
5554 __ret = __rev0 ^ __rev1;
5555 __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
5556 return __ret;
5557 }
5558 #endif
5559
5560 #ifdef __LITTLE_ENDIAN__
5561 __ai uint64x1_t veor_u64(uint64x1_t __p0, uint64x1_t __p1) {
5562 uint64x1_t __ret;
5563 __ret = __p0 ^ __p1;
5564 return __ret;
5565 }
5566 #else
5567 __ai uint64x1_t veor_u64(uint64x1_t __p0, uint64x1_t __p1) {
5568 uint64x1_t __ret;
5569 __ret = __p0 ^ __p1;
5570 return __ret;
5571 }
5572 #endif
5573
5574 #ifdef __LITTLE_ENDIAN__
5575 __ai uint16x4_t veor_u16(uint16x4_t __p0, uint16x4_t __p1) {
5576 uint16x4_t __ret;
5577 __ret = __p0 ^ __p1;
5578 return __ret;
5579 }
5580 #else
5581 __ai uint16x4_t veor_u16(uint16x4_t __p0, uint16x4_t __p1) {
5582 uint16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
5583 uint16x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
5584 uint16x4_t __ret;
5585 __ret = __rev0 ^ __rev1;
5586 __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
5587 return __ret;
5588 }
5589 #endif
5590
5591 #ifdef __LITTLE_ENDIAN__
5592 __ai int8x8_t veor_s8(int8x8_t __p0, int8x8_t __p1) {
5593 int8x8_t __ret;
5594 __ret = __p0 ^ __p1;
5595 return __ret;
5596 }
5597 #else
5598 __ai int8x8_t veor_s8(int8x8_t __p0, int8x8_t __p1) {
5599 int8x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
5600 int8x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
5601 int8x8_t __ret;
5602 __ret = __rev0 ^ __rev1;
5603 __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
5604 return __ret;
5605 }
5606 #endif
5607
5608 #ifdef __LITTLE_ENDIAN__
5609 __ai int32x2_t veor_s32(int32x2_t __p0, int32x2_t __p1) {
5610 int32x2_t __ret;
5611 __ret = __p0 ^ __p1;
5612 return __ret;
5613 }
5614 #else
5615 __ai int32x2_t veor_s32(int32x2_t __p0, int32x2_t __p1) {
5616 int32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
5617 int32x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
5618 int32x2_t __ret;
5619 __ret = __rev0 ^ __rev1;
5620 __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
5621 return __ret;
5622 }
5623 #endif
5624
5625 #ifdef __LITTLE_ENDIAN__
5626 __ai int64x1_t veor_s64(int64x1_t __p0, int64x1_t __p1) {
5627 int64x1_t __ret;
5628 __ret = __p0 ^ __p1;
5629 return __ret;
5630 }
5631 #else
5632 __ai int64x1_t veor_s64(int64x1_t __p0, int64x1_t __p1) {
5633 int64x1_t __ret;
5634 __ret = __p0 ^ __p1;
5635 return __ret;
5636 }
5637 #endif
5638
5639 #ifdef __LITTLE_ENDIAN__
5640 __ai int16x4_t veor_s16(int16x4_t __p0, int16x4_t __p1) {
5641 int16x4_t __ret;
5642 __ret = __p0 ^ __p1;
5643 return __ret;
5644 }
5645 #else
5646 __ai int16x4_t veor_s16(int16x4_t __p0, int16x4_t __p1) {
5647 int16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
5648 int16x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
5649 int16x4_t __ret;
5650 __ret = __rev0 ^ __rev1;
5651 __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
5652 return __ret;
5653 }
5654 #endif
5655
5656 #ifdef __LITTLE_ENDIAN__
5657 #define vext_p8(__p0, __p1, __p2) __extension__ ({ \
5658 poly8x8_t __s0 = __p0; \
5659 poly8x8_t __s1 = __p1; \
5660 poly8x8_t __ret; \
5661 __ret = (poly8x8_t) __builtin_neon_vext_v((int8x8_t)__s0, (int8x8_t)__s1, __p2, 4); \
5662 __ret; \
5663 })
5664 #else
5665 #define vext_p8(__p0, __p1, __p2) __extension__ ({ \
5666 poly8x8_t __s0 = __p0; \
5667 poly8x8_t __s1 = __p1; \
5668 poly8x8_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 7, 6, 5, 4, 3, 2, 1, 0); \
5669 poly8x8_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 7, 6, 5, 4, 3, 2, 1, 0); \
5670 poly8x8_t __ret; \
5671 __ret = (poly8x8_t) __builtin_neon_vext_v((int8x8_t)__rev0, (int8x8_t)__rev1, __p2, 4); \
5672 __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); \
5673 __ret; \
5674 })
5675 #endif
5676
5677 #ifdef __LITTLE_ENDIAN__
5678 #define vext_p16(__p0, __p1, __p2) __extension__ ({ \
5679 poly16x4_t __s0 = __p0; \
5680 poly16x4_t __s1 = __p1; \
5681 poly16x4_t __ret; \
5682 __ret = (poly16x4_t) __builtin_neon_vext_v((int8x8_t)__s0, (int8x8_t)__s1, __p2, 5); \
5683 __ret; \
5684 })
5685 #else
5686 #define vext_p16(__p0, __p1, __p2) __extension__ ({ \
5687 poly16x4_t __s0 = __p0; \
5688 poly16x4_t __s1 = __p1; \
5689 poly16x4_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \
5690 poly16x4_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 3, 2, 1, 0); \
5691 poly16x4_t __ret; \
5692 __ret = (poly16x4_t) __builtin_neon_vext_v((int8x8_t)__rev0, (int8x8_t)__rev1, __p2, 5); \
5693 __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \
5694 __ret; \
5695 })
5696 #endif
5697
5698 #ifdef __LITTLE_ENDIAN__
5699 #define vextq_p8(__p0, __p1, __p2) __extension__ ({ \
5700 poly8x16_t __s0 = __p0; \
5701 poly8x16_t __s1 = __p1; \
5702 poly8x16_t __ret; \
5703 __ret = (poly8x16_t) __builtin_neon_vextq_v((int8x16_t)__s0, (int8x16_t)__s1, __p2, 36); \
5704 __ret; \
5705 })
5706 #else
5707 #define vextq_p8(__p0, __p1, __p2) __extension__ ({ \
5708 poly8x16_t __s0 = __p0; \
5709 poly8x16_t __s1 = __p1; \
5710 poly8x16_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
5711 poly8x16_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
5712 poly8x16_t __ret; \
5713 __ret = (poly8x16_t) __builtin_neon_vextq_v((int8x16_t)__rev0, (int8x16_t)__rev1, __p2, 36); \
5714 __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
5715 __ret; \
5716 })
5717 #endif
5718
5719 #ifdef __LITTLE_ENDIAN__
5720 #define vextq_p16(__p0, __p1, __p2) __extension__ ({ \
5721 poly16x8_t __s0 = __p0; \
5722 poly16x8_t __s1 = __p1; \
5723 poly16x8_t __ret; \
5724 __ret = (poly16x8_t) __builtin_neon_vextq_v((int8x16_t)__s0, (int8x16_t)__s1, __p2, 37); \
5725 __ret; \
5726 })
5727 #else
5728 #define vextq_p16(__p0, __p1, __p2) __extension__ ({ \
5729 poly16x8_t __s0 = __p0; \
5730 poly16x8_t __s1 = __p1; \
5731 poly16x8_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 7, 6, 5, 4, 3, 2, 1, 0); \
5732 poly16x8_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 7, 6, 5, 4, 3, 2, 1, 0); \
5733 poly16x8_t __ret; \
5734 __ret = (poly16x8_t) __builtin_neon_vextq_v((int8x16_t)__rev0, (int8x16_t)__rev1, __p2, 37); \
5735 __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); \
5736 __ret; \
5737 })
5738 #endif
5739
5740 #ifdef __LITTLE_ENDIAN__
5741 #define vextq_u8(__p0, __p1, __p2) __extension__ ({ \
5742 uint8x16_t __s0 = __p0; \
5743 uint8x16_t __s1 = __p1; \
5744 uint8x16_t __ret; \
5745 __ret = (uint8x16_t) __builtin_neon_vextq_v((int8x16_t)__s0, (int8x16_t)__s1, __p2, 48); \
5746 __ret; \
5747 })
5748 #else
5749 #define vextq_u8(__p0, __p1, __p2) __extension__ ({ \
5750 uint8x16_t __s0 = __p0; \
5751 uint8x16_t __s1 = __p1; \
5752 uint8x16_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
5753 uint8x16_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
5754 uint8x16_t __ret; \
5755 __ret = (uint8x16_t) __builtin_neon_vextq_v((int8x16_t)__rev0, (int8x16_t)__rev1, __p2, 48); \
5756 __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
5757 __ret; \
5758 })
5759 #endif
5760
5761 #ifdef __LITTLE_ENDIAN__
5762 #define vextq_u32(__p0, __p1, __p2) __extension__ ({ \
5763 uint32x4_t __s0 = __p0; \
5764 uint32x4_t __s1 = __p1; \
5765 uint32x4_t __ret; \
5766 __ret = (uint32x4_t) __builtin_neon_vextq_v((int8x16_t)__s0, (int8x16_t)__s1, __p2, 50); \
5767 __ret; \
5768 })
5769 #else
5770 #define vextq_u32(__p0, __p1, __p2) __extension__ ({ \
5771 uint32x4_t __s0 = __p0; \
5772 uint32x4_t __s1 = __p1; \
5773 uint32x4_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \
5774 uint32x4_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 3, 2, 1, 0); \
5775 uint32x4_t __ret; \
5776 __ret = (uint32x4_t) __builtin_neon_vextq_v((int8x16_t)__rev0, (int8x16_t)__rev1, __p2, 50); \
5777 __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \
5778 __ret; \
5779 })
5780 #endif
5781
5782 #ifdef __LITTLE_ENDIAN__
5783 #define vextq_u64(__p0, __p1, __p2) __extension__ ({ \
5784 uint64x2_t __s0 = __p0; \
5785 uint64x2_t __s1 = __p1; \
5786 uint64x2_t __ret; \
5787 __ret = (uint64x2_t) __builtin_neon_vextq_v((int8x16_t)__s0, (int8x16_t)__s1, __p2, 51); \
5788 __ret; \
5789 })
5790 #else
5791 #define vextq_u64(__p0, __p1, __p2) __extension__ ({ \
5792 uint64x2_t __s0 = __p0; \
5793 uint64x2_t __s1 = __p1; \
5794 uint64x2_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 1, 0); \
5795 uint64x2_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 1, 0); \
5796 uint64x2_t __ret; \
5797 __ret = (uint64x2_t) __builtin_neon_vextq_v((int8x16_t)__rev0, (int8x16_t)__rev1, __p2, 51); \
5798 __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \
5799 __ret; \
5800 })
5801 #endif
5802
5803 #ifdef __LITTLE_ENDIAN__
5804 #define vextq_u16(__p0, __p1, __p2) __extension__ ({ \
5805 uint16x8_t __s0 = __p0; \
5806 uint16x8_t __s1 = __p1; \
5807 uint16x8_t __ret; \
5808 __ret = (uint16x8_t) __builtin_neon_vextq_v((int8x16_t)__s0, (int8x16_t)__s1, __p2, 49); \
5809 __ret; \
5810 })
5811 #else
5812 #define vextq_u16(__p0, __p1, __p2) __extension__ ({ \
5813 uint16x8_t __s0 = __p0; \
5814 uint16x8_t __s1 = __p1; \
5815 uint16x8_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 7, 6, 5, 4, 3, 2, 1, 0); \
5816 uint16x8_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 7, 6, 5, 4, 3, 2, 1, 0); \
5817 uint16x8_t __ret; \
5818 __ret = (uint16x8_t) __builtin_neon_vextq_v((int8x16_t)__rev0, (int8x16_t)__rev1, __p2, 49); \
5819 __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); \
5820 __ret; \
5821 })
5822 #endif
5823
5824 #ifdef __LITTLE_ENDIAN__
5825 #define vextq_s8(__p0, __p1, __p2) __extension__ ({ \
5826 int8x16_t __s0 = __p0; \
5827 int8x16_t __s1 = __p1; \
5828 int8x16_t __ret; \
5829 __ret = (int8x16_t) __builtin_neon_vextq_v((int8x16_t)__s0, (int8x16_t)__s1, __p2, 32); \
5830 __ret; \
5831 })
5832 #else
5833 #define vextq_s8(__p0, __p1, __p2) __extension__ ({ \
5834 int8x16_t __s0 = __p0; \
5835 int8x16_t __s1 = __p1; \
5836 int8x16_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
5837 int8x16_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
5838 int8x16_t __ret; \
5839 __ret = (int8x16_t) __builtin_neon_vextq_v((int8x16_t)__rev0, (int8x16_t)__rev1, __p2, 32); \
5840 __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
5841 __ret; \
5842 })
5843 #endif
5844
5845 #ifdef __LITTLE_ENDIAN__
5846 #define vextq_f32(__p0, __p1, __p2) __extension__ ({ \
5847 float32x4_t __s0 = __p0; \
5848 float32x4_t __s1 = __p1; \
5849 float32x4_t __ret; \
5850 __ret = (float32x4_t) __builtin_neon_vextq_v((int8x16_t)__s0, (int8x16_t)__s1, __p2, 41); \
5851 __ret; \
5852 })
5853 #else
5854 #define vextq_f32(__p0, __p1, __p2) __extension__ ({ \
5855 float32x4_t __s0 = __p0; \
5856 float32x4_t __s1 = __p1; \
5857 float32x4_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \
5858 float32x4_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 3, 2, 1, 0); \
5859 float32x4_t __ret; \
5860 __ret = (float32x4_t) __builtin_neon_vextq_v((int8x16_t)__rev0, (int8x16_t)__rev1, __p2, 41); \
5861 __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \
5862 __ret; \
5863 })
5864 #endif
5865
5866 #ifdef __LITTLE_ENDIAN__
5867 #define vextq_s32(__p0, __p1, __p2) __extension__ ({ \
5868 int32x4_t __s0 = __p0; \
5869 int32x4_t __s1 = __p1; \
5870 int32x4_t __ret; \
5871 __ret = (int32x4_t) __builtin_neon_vextq_v((int8x16_t)__s0, (int8x16_t)__s1, __p2, 34); \
5872 __ret; \
5873 })
5874 #else
5875 #define vextq_s32(__p0, __p1, __p2) __extension__ ({ \
5876 int32x4_t __s0 = __p0; \
5877 int32x4_t __s1 = __p1; \
5878 int32x4_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \
5879 int32x4_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 3, 2, 1, 0); \
5880 int32x4_t __ret; \
5881 __ret = (int32x4_t) __builtin_neon_vextq_v((int8x16_t)__rev0, (int8x16_t)__rev1, __p2, 34); \
5882 __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \
5883 __ret; \
5884 })
5885 #endif
5886
5887 #ifdef __LITTLE_ENDIAN__
5888 #define vextq_s64(__p0, __p1, __p2) __extension__ ({ \
5889 int64x2_t __s0 = __p0; \
5890 int64x2_t __s1 = __p1; \
5891 int64x2_t __ret; \
5892 __ret = (int64x2_t) __builtin_neon_vextq_v((int8x16_t)__s0, (int8x16_t)__s1, __p2, 35); \
5893 __ret; \
5894 })
5895 #else
5896 #define vextq_s64(__p0, __p1, __p2) __extension__ ({ \
5897 int64x2_t __s0 = __p0; \
5898 int64x2_t __s1 = __p1; \
5899 int64x2_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 1, 0); \
5900 int64x2_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 1, 0); \
5901 int64x2_t __ret; \
5902 __ret = (int64x2_t) __builtin_neon_vextq_v((int8x16_t)__rev0, (int8x16_t)__rev1, __p2, 35); \
5903 __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \
5904 __ret; \
5905 })
5906 #endif
5907
5908 #ifdef __LITTLE_ENDIAN__
5909 #define vextq_s16(__p0, __p1, __p2) __extension__ ({ \
5910 int16x8_t __s0 = __p0; \
5911 int16x8_t __s1 = __p1; \
5912 int16x8_t __ret; \
5913 __ret = (int16x8_t) __builtin_neon_vextq_v((int8x16_t)__s0, (int8x16_t)__s1, __p2, 33); \
5914 __ret; \
5915 })
5916 #else
5917 #define vextq_s16(__p0, __p1, __p2) __extension__ ({ \
5918 int16x8_t __s0 = __p0; \
5919 int16x8_t __s1 = __p1; \
5920 int16x8_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 7, 6, 5, 4, 3, 2, 1, 0); \
5921 int16x8_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 7, 6, 5, 4, 3, 2, 1, 0); \
5922 int16x8_t __ret; \
5923 __ret = (int16x8_t) __builtin_neon_vextq_v((int8x16_t)__rev0, (int8x16_t)__rev1, __p2, 33); \
5924 __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); \
5925 __ret; \
5926 })
5927 #endif
5928
5929 #ifdef __LITTLE_ENDIAN__
5930 #define vext_u8(__p0, __p1, __p2) __extension__ ({ \
5931 uint8x8_t __s0 = __p0; \
5932 uint8x8_t __s1 = __p1; \
5933 uint8x8_t __ret; \
5934 __ret = (uint8x8_t) __builtin_neon_vext_v((int8x8_t)__s0, (int8x8_t)__s1, __p2, 16); \
5935 __ret; \
5936 })
5937 #else
5938 #define vext_u8(__p0, __p1, __p2) __extension__ ({ \
5939 uint8x8_t __s0 = __p0; \
5940 uint8x8_t __s1 = __p1; \
5941 uint8x8_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 7, 6, 5, 4, 3, 2, 1, 0); \
5942 uint8x8_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 7, 6, 5, 4, 3, 2, 1, 0); \
5943 uint8x8_t __ret; \
5944 __ret = (uint8x8_t) __builtin_neon_vext_v((int8x8_t)__rev0, (int8x8_t)__rev1, __p2, 16); \
5945 __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); \
5946 __ret; \
5947 })
5948 #endif
5949
5950 #ifdef __LITTLE_ENDIAN__
5951 #define vext_u32(__p0, __p1, __p2) __extension__ ({ \
5952 uint32x2_t __s0 = __p0; \
5953 uint32x2_t __s1 = __p1; \
5954 uint32x2_t __ret; \
5955 __ret = (uint32x2_t) __builtin_neon_vext_v((int8x8_t)__s0, (int8x8_t)__s1, __p2, 18); \
5956 __ret; \
5957 })
5958 #else
5959 #define vext_u32(__p0, __p1, __p2) __extension__ ({ \
5960 uint32x2_t __s0 = __p0; \
5961 uint32x2_t __s1 = __p1; \
5962 uint32x2_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 1, 0); \
5963 uint32x2_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 1, 0); \
5964 uint32x2_t __ret; \
5965 __ret = (uint32x2_t) __builtin_neon_vext_v((int8x8_t)__rev0, (int8x8_t)__rev1, __p2, 18); \
5966 __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \
5967 __ret; \
5968 })
5969 #endif
5970
5971 #ifdef __LITTLE_ENDIAN__
5972 #define vext_u64(__p0, __p1, __p2) __extension__ ({ \
5973 uint64x1_t __s0 = __p0; \
5974 uint64x1_t __s1 = __p1; \
5975 uint64x1_t __ret; \
5976 __ret = (uint64x1_t) __builtin_neon_vext_v((int8x8_t)__s0, (int8x8_t)__s1, __p2, 19); \
5977 __ret; \
5978 })
5979 #else
5980 #define vext_u64(__p0, __p1, __p2) __extension__ ({ \
5981 uint64x1_t __s0 = __p0; \
5982 uint64x1_t __s1 = __p1; \
5983 uint64x1_t __ret; \
5984 __ret = (uint64x1_t) __builtin_neon_vext_v((int8x8_t)__s0, (int8x8_t)__s1, __p2, 19); \
5985 __ret; \
5986 })
5987 #endif
5988
5989 #ifdef __LITTLE_ENDIAN__
5990 #define vext_u16(__p0, __p1, __p2) __extension__ ({ \
5991 uint16x4_t __s0 = __p0; \
5992 uint16x4_t __s1 = __p1; \
5993 uint16x4_t __ret; \
5994 __ret = (uint16x4_t) __builtin_neon_vext_v((int8x8_t)__s0, (int8x8_t)__s1, __p2, 17); \
5995 __ret; \
5996 })
5997 #else
5998 #define vext_u16(__p0, __p1, __p2) __extension__ ({ \
5999 uint16x4_t __s0 = __p0; \
6000 uint16x4_t __s1 = __p1; \
6001 uint16x4_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \
6002 uint16x4_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 3, 2, 1, 0); \
6003 uint16x4_t __ret; \
6004 __ret = (uint16x4_t) __builtin_neon_vext_v((int8x8_t)__rev0, (int8x8_t)__rev1, __p2, 17); \
6005 __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \
6006 __ret; \
6007 })
6008 #endif
6009
6010 #ifdef __LITTLE_ENDIAN__
6011 #define vext_s8(__p0, __p1, __p2) __extension__ ({ \
6012 int8x8_t __s0 = __p0; \
6013 int8x8_t __s1 = __p1; \
6014 int8x8_t __ret; \
6015 __ret = (int8x8_t) __builtin_neon_vext_v((int8x8_t)__s0, (int8x8_t)__s1, __p2, 0); \
6016 __ret; \
6017 })
6018 #else
6019 #define vext_s8(__p0, __p1, __p2) __extension__ ({ \
6020 int8x8_t __s0 = __p0; \
6021 int8x8_t __s1 = __p1; \
6022 int8x8_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 7, 6, 5, 4, 3, 2, 1, 0); \
6023 int8x8_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 7, 6, 5, 4, 3, 2, 1, 0); \
6024 int8x8_t __ret; \
6025 __ret = (int8x8_t) __builtin_neon_vext_v((int8x8_t)__rev0, (int8x8_t)__rev1, __p2, 0); \
6026 __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); \
6027 __ret; \
6028 })
6029 #endif
6030
6031 #ifdef __LITTLE_ENDIAN__
6032 #define vext_f32(__p0, __p1, __p2) __extension__ ({ \
6033 float32x2_t __s0 = __p0; \
6034 float32x2_t __s1 = __p1; \
6035 float32x2_t __ret; \
6036 __ret = (float32x2_t) __builtin_neon_vext_v((int8x8_t)__s0, (int8x8_t)__s1, __p2, 9); \
6037 __ret; \
6038 })
6039 #else
6040 #define vext_f32(__p0, __p1, __p2) __extension__ ({ \
6041 float32x2_t __s0 = __p0; \
6042 float32x2_t __s1 = __p1; \
6043 float32x2_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 1, 0); \
6044 float32x2_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 1, 0); \
6045 float32x2_t __ret; \
6046 __ret = (float32x2_t) __builtin_neon_vext_v((int8x8_t)__rev0, (int8x8_t)__rev1, __p2, 9); \
6047 __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \
6048 __ret; \
6049 })
6050 #endif
6051
6052 #ifdef __LITTLE_ENDIAN__
6053 #define vext_s32(__p0, __p1, __p2) __extension__ ({ \
6054 int32x2_t __s0 = __p0; \
6055 int32x2_t __s1 = __p1; \
6056 int32x2_t __ret; \
6057 __ret = (int32x2_t) __builtin_neon_vext_v((int8x8_t)__s0, (int8x8_t)__s1, __p2, 2); \
6058 __ret; \
6059 })
6060 #else
6061 #define vext_s32(__p0, __p1, __p2) __extension__ ({ \
6062 int32x2_t __s0 = __p0; \
6063 int32x2_t __s1 = __p1; \
6064 int32x2_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 1, 0); \
6065 int32x2_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 1, 0); \
6066 int32x2_t __ret; \
6067 __ret = (int32x2_t) __builtin_neon_vext_v((int8x8_t)__rev0, (int8x8_t)__rev1, __p2, 2); \
6068 __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \
6069 __ret; \
6070 })
6071 #endif
6072
6073 #ifdef __LITTLE_ENDIAN__
6074 #define vext_s64(__p0, __p1, __p2) __extension__ ({ \
6075 int64x1_t __s0 = __p0; \
6076 int64x1_t __s1 = __p1; \
6077 int64x1_t __ret; \
6078 __ret = (int64x1_t) __builtin_neon_vext_v((int8x8_t)__s0, (int8x8_t)__s1, __p2, 3); \
6079 __ret; \
6080 })
6081 #else
6082 #define vext_s64(__p0, __p1, __p2) __extension__ ({ \
6083 int64x1_t __s0 = __p0; \
6084 int64x1_t __s1 = __p1; \
6085 int64x1_t __ret; \
6086 __ret = (int64x1_t) __builtin_neon_vext_v((int8x8_t)__s0, (int8x8_t)__s1, __p2, 3); \
6087 __ret; \
6088 })
6089 #endif
6090
6091 #ifdef __LITTLE_ENDIAN__
6092 #define vext_s16(__p0, __p1, __p2) __extension__ ({ \
6093 int16x4_t __s0 = __p0; \
6094 int16x4_t __s1 = __p1; \
6095 int16x4_t __ret; \
6096 __ret = (int16x4_t) __builtin_neon_vext_v((int8x8_t)__s0, (int8x8_t)__s1, __p2, 1); \
6097 __ret; \
6098 })
6099 #else
6100 #define vext_s16(__p0, __p1, __p2) __extension__ ({ \
6101 int16x4_t __s0 = __p0; \
6102 int16x4_t __s1 = __p1; \
6103 int16x4_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \
6104 int16x4_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 3, 2, 1, 0); \
6105 int16x4_t __ret; \
6106 __ret = (int16x4_t) __builtin_neon_vext_v((int8x8_t)__rev0, (int8x8_t)__rev1, __p2, 1); \
6107 __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \
6108 __ret; \
6109 })
6110 #endif
6111
6112 #ifdef __LITTLE_ENDIAN__
6113 __ai poly8x8_t vget_high_p8(poly8x16_t __p0) {
6114 poly8x8_t __ret;
6115 __ret = __builtin_shufflevector(__p0, __p0, 8, 9, 10, 11, 12, 13, 14, 15);
6116 return __ret;
6117 }
6118 #else
6119 __ai poly8x8_t vget_high_p8(poly8x16_t __p0) {
6120 poly8x16_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
6121 poly8x8_t __ret;
6122 __ret = __builtin_shufflevector(__rev0, __rev0, 8, 9, 10, 11, 12, 13, 14, 15);
6123 __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
6124 return __ret;
6125 }
6126 __ai poly8x8_t __noswap_vget_high_p8(poly8x16_t __p0) {
6127 poly8x8_t __ret;
6128 __ret = __builtin_shufflevector(__p0, __p0, 8, 9, 10, 11, 12, 13, 14, 15);
6129 return __ret;
6130 }
6131 #endif
6132
6133 #ifdef __LITTLE_ENDIAN__
6134 __ai poly16x4_t vget_high_p16(poly16x8_t __p0) {
6135 poly16x4_t __ret;
6136 __ret = __builtin_shufflevector(__p0, __p0, 4, 5, 6, 7);
6137 return __ret;
6138 }
6139 #else
6140 __ai poly16x4_t vget_high_p16(poly16x8_t __p0) {
6141 poly16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
6142 poly16x4_t __ret;
6143 __ret = __builtin_shufflevector(__rev0, __rev0, 4, 5, 6, 7);
6144 __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
6145 return __ret;
6146 }
6147 #endif
6148
6149 #ifdef __LITTLE_ENDIAN__
6150 __ai uint8x8_t vget_high_u8(uint8x16_t __p0) {
6151 uint8x8_t __ret;
6152 __ret = __builtin_shufflevector(__p0, __p0, 8, 9, 10, 11, 12, 13, 14, 15);
6153 return __ret;
6154 }
6155 #else
6156 __ai uint8x8_t vget_high_u8(uint8x16_t __p0) {
6157 uint8x16_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
6158 uint8x8_t __ret;
6159 __ret = __builtin_shufflevector(__rev0, __rev0, 8, 9, 10, 11, 12, 13, 14, 15);
6160 __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
6161 return __ret;
6162 }
6163 __ai uint8x8_t __noswap_vget_high_u8(uint8x16_t __p0) {
6164 uint8x8_t __ret;
6165 __ret = __builtin_shufflevector(__p0, __p0, 8, 9, 10, 11, 12, 13, 14, 15);
6166 return __ret;
6167 }
6168 #endif
6169
6170 #ifdef __LITTLE_ENDIAN__
6171 __ai uint32x2_t vget_high_u32(uint32x4_t __p0) {
6172 uint32x2_t __ret;
6173 __ret = __builtin_shufflevector(__p0, __p0, 2, 3);
6174 return __ret;
6175 }
6176 #else
6177 __ai uint32x2_t vget_high_u32(uint32x4_t __p0) {
6178 uint32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
6179 uint32x2_t __ret;
6180 __ret = __builtin_shufflevector(__rev0, __rev0, 2, 3);
6181 __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
6182 return __ret;
6183 }
6184 __ai uint32x2_t __noswap_vget_high_u32(uint32x4_t __p0) {
6185 uint32x2_t __ret;
6186 __ret = __builtin_shufflevector(__p0, __p0, 2, 3);
6187 return __ret;
6188 }
6189 #endif
6190
6191 #ifdef __LITTLE_ENDIAN__
6192 __ai uint64x1_t vget_high_u64(uint64x2_t __p0) {
6193 uint64x1_t __ret;
6194 __ret = __builtin_shufflevector(__p0, __p0, 1);
6195 return __ret;
6196 }
6197 #else
6198 __ai uint64x1_t vget_high_u64(uint64x2_t __p0) {
6199 uint64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
6200 uint64x1_t __ret;
6201 __ret = __builtin_shufflevector(__rev0, __rev0, 1);
6202 return __ret;
6203 }
6204 #endif
6205
6206 #ifdef __LITTLE_ENDIAN__
6207 __ai uint16x4_t vget_high_u16(uint16x8_t __p0) {
6208 uint16x4_t __ret;
6209 __ret = __builtin_shufflevector(__p0, __p0, 4, 5, 6, 7);
6210 return __ret;
6211 }
6212 #else
6213 __ai uint16x4_t vget_high_u16(uint16x8_t __p0) {
6214 uint16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
6215 uint16x4_t __ret;
6216 __ret = __builtin_shufflevector(__rev0, __rev0, 4, 5, 6, 7);
6217 __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
6218 return __ret;
6219 }
6220 __ai uint16x4_t __noswap_vget_high_u16(uint16x8_t __p0) {
6221 uint16x4_t __ret;
6222 __ret = __builtin_shufflevector(__p0, __p0, 4, 5, 6, 7);
6223 return __ret;
6224 }
6225 #endif
6226
6227 #ifdef __LITTLE_ENDIAN__
6228 __ai int8x8_t vget_high_s8(int8x16_t __p0) {
6229 int8x8_t __ret;
6230 __ret = __builtin_shufflevector(__p0, __p0, 8, 9, 10, 11, 12, 13, 14, 15);
6231 return __ret;
6232 }
6233 #else
6234 __ai int8x8_t vget_high_s8(int8x16_t __p0) {
6235 int8x16_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
6236 int8x8_t __ret;
6237 __ret = __builtin_shufflevector(__rev0, __rev0, 8, 9, 10, 11, 12, 13, 14, 15);
6238 __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
6239 return __ret;
6240 }
6241 __ai int8x8_t __noswap_vget_high_s8(int8x16_t __p0) {
6242 int8x8_t __ret;
6243 __ret = __builtin_shufflevector(__p0, __p0, 8, 9, 10, 11, 12, 13, 14, 15);
6244 return __ret;
6245 }
6246 #endif
6247
6248 #ifdef __LITTLE_ENDIAN__
6249 __ai float32x2_t vget_high_f32(float32x4_t __p0) {
6250 float32x2_t __ret;
6251 __ret = __builtin_shufflevector(__p0, __p0, 2, 3);
6252 return __ret;
6253 }
6254 #else
6255 __ai float32x2_t vget_high_f32(float32x4_t __p0) {
6256 float32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
6257 float32x2_t __ret;
6258 __ret = __builtin_shufflevector(__rev0, __rev0, 2, 3);
6259 __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
6260 return __ret;
6261 }
6262 __ai float32x2_t __noswap_vget_high_f32(float32x4_t __p0) {
6263 float32x2_t __ret;
6264 __ret = __builtin_shufflevector(__p0, __p0, 2, 3);
6265 return __ret;
6266 }
6267 #endif
6268
6269 #ifdef __LITTLE_ENDIAN__
6270 __ai float16x4_t vget_high_f16(float16x8_t __p0) {
6271 float16x4_t __ret;
6272 __ret = __builtin_shufflevector(__p0, __p0, 4, 5, 6, 7);
6273 return __ret;
6274 }
6275 #else
6276 __ai float16x4_t vget_high_f16(float16x8_t __p0) {
6277 float16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
6278 float16x4_t __ret;
6279 __ret = __builtin_shufflevector(__rev0, __rev0, 4, 5, 6, 7);
6280 __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
6281 return __ret;
6282 }
6283 __ai float16x4_t __noswap_vget_high_f16(float16x8_t __p0) {
6284 float16x4_t __ret;
6285 __ret = __builtin_shufflevector(__p0, __p0, 4, 5, 6, 7);
6286 return __ret;
6287 }
6288 #endif
6289
6290 #ifdef __LITTLE_ENDIAN__
6291 __ai int32x2_t vget_high_s32(int32x4_t __p0) {
6292 int32x2_t __ret;
6293 __ret = __builtin_shufflevector(__p0, __p0, 2, 3);
6294 return __ret;
6295 }
6296 #else
6297 __ai int32x2_t vget_high_s32(int32x4_t __p0) {
6298 int32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
6299 int32x2_t __ret;
6300 __ret = __builtin_shufflevector(__rev0, __rev0, 2, 3);
6301 __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
6302 return __ret;
6303 }
6304 __ai int32x2_t __noswap_vget_high_s32(int32x4_t __p0) {
6305 int32x2_t __ret;
6306 __ret = __builtin_shufflevector(__p0, __p0, 2, 3);
6307 return __ret;
6308 }
6309 #endif
6310
6311 #ifdef __LITTLE_ENDIAN__
6312 __ai int64x1_t vget_high_s64(int64x2_t __p0) {
6313 int64x1_t __ret;
6314 __ret = __builtin_shufflevector(__p0, __p0, 1);
6315 return __ret;
6316 }
6317 #else
6318 __ai int64x1_t vget_high_s64(int64x2_t __p0) {
6319 int64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
6320 int64x1_t __ret;
6321 __ret = __builtin_shufflevector(__rev0, __rev0, 1);
6322 return __ret;
6323 }
6324 #endif
6325
6326 #ifdef __LITTLE_ENDIAN__
6327 __ai int16x4_t vget_high_s16(int16x8_t __p0) {
6328 int16x4_t __ret;
6329 __ret = __builtin_shufflevector(__p0, __p0, 4, 5, 6, 7);
6330 return __ret;
6331 }
6332 #else
6333 __ai int16x4_t vget_high_s16(int16x8_t __p0) {
6334 int16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
6335 int16x4_t __ret;
6336 __ret = __builtin_shufflevector(__rev0, __rev0, 4, 5, 6, 7);
6337 __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
6338 return __ret;
6339 }
6340 __ai int16x4_t __noswap_vget_high_s16(int16x8_t __p0) {
6341 int16x4_t __ret;
6342 __ret = __builtin_shufflevector(__p0, __p0, 4, 5, 6, 7);
6343 return __ret;
6344 }
6345 #endif
6346
6347 #ifdef __LITTLE_ENDIAN__
6348 #define vget_lane_p8(__p0, __p1) __extension__ ({ \
6349 poly8x8_t __s0 = __p0; \
6350 poly8_t __ret; \
6351 __ret = (poly8_t) __builtin_neon_vget_lane_i8((int8x8_t)__s0, __p1); \
6352 __ret; \
6353 })
6354 #else
6355 #define vget_lane_p8(__p0, __p1) __extension__ ({ \
6356 poly8x8_t __s0 = __p0; \
6357 poly8x8_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 7, 6, 5, 4, 3, 2, 1, 0); \
6358 poly8_t __ret; \
6359 __ret = (poly8_t) __builtin_neon_vget_lane_i8((int8x8_t)__rev0, __p1); \
6360 __ret; \
6361 })
6362 #define __noswap_vget_lane_p8(__p0, __p1) __extension__ ({ \
6363 poly8x8_t __s0 = __p0; \
6364 poly8_t __ret; \
6365 __ret = (poly8_t) __builtin_neon_vget_lane_i8((int8x8_t)__s0, __p1); \
6366 __ret; \
6367 })
6368 #endif
6369
6370 #ifdef __LITTLE_ENDIAN__
6371 #define vget_lane_p16(__p0, __p1) __extension__ ({ \
6372 poly16x4_t __s0 = __p0; \
6373 poly16_t __ret; \
6374 __ret = (poly16_t) __builtin_neon_vget_lane_i16((int8x8_t)__s0, __p1); \
6375 __ret; \
6376 })
6377 #else
6378 #define vget_lane_p16(__p0, __p1) __extension__ ({ \
6379 poly16x4_t __s0 = __p0; \
6380 poly16x4_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \
6381 poly16_t __ret; \
6382 __ret = (poly16_t) __builtin_neon_vget_lane_i16((int8x8_t)__rev0, __p1); \
6383 __ret; \
6384 })
6385 #define __noswap_vget_lane_p16(__p0, __p1) __extension__ ({ \
6386 poly16x4_t __s0 = __p0; \
6387 poly16_t __ret; \
6388 __ret = (poly16_t) __builtin_neon_vget_lane_i16((int8x8_t)__s0, __p1); \
6389 __ret; \
6390 })
6391 #endif
6392
6393 #ifdef __LITTLE_ENDIAN__
6394 #define vgetq_lane_p8(__p0, __p1) __extension__ ({ \
6395 poly8x16_t __s0 = __p0; \
6396 poly8_t __ret; \
6397 __ret = (poly8_t) __builtin_neon_vgetq_lane_i8((int8x16_t)__s0, __p1); \
6398 __ret; \
6399 })
6400 #else
6401 #define vgetq_lane_p8(__p0, __p1) __extension__ ({ \
6402 poly8x16_t __s0 = __p0; \
6403 poly8x16_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
6404 poly8_t __ret; \
6405 __ret = (poly8_t) __builtin_neon_vgetq_lane_i8((int8x16_t)__rev0, __p1); \
6406 __ret; \
6407 })
6408 #define __noswap_vgetq_lane_p8(__p0, __p1) __extension__ ({ \
6409 poly8x16_t __s0 = __p0; \
6410 poly8_t __ret; \
6411 __ret = (poly8_t) __builtin_neon_vgetq_lane_i8((int8x16_t)__s0, __p1); \
6412 __ret; \
6413 })
6414 #endif
6415
6416 #ifdef __LITTLE_ENDIAN__
6417 #define vgetq_lane_p16(__p0, __p1) __extension__ ({ \
6418 poly16x8_t __s0 = __p0; \
6419 poly16_t __ret; \
6420 __ret = (poly16_t) __builtin_neon_vgetq_lane_i16((int8x16_t)__s0, __p1); \
6421 __ret; \
6422 })
6423 #else
6424 #define vgetq_lane_p16(__p0, __p1) __extension__ ({ \
6425 poly16x8_t __s0 = __p0; \
6426 poly16x8_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 7, 6, 5, 4, 3, 2, 1, 0); \
6427 poly16_t __ret; \
6428 __ret = (poly16_t) __builtin_neon_vgetq_lane_i16((int8x16_t)__rev0, __p1); \
6429 __ret; \
6430 })
6431 #define __noswap_vgetq_lane_p16(__p0, __p1) __extension__ ({ \
6432 poly16x8_t __s0 = __p0; \
6433 poly16_t __ret; \
6434 __ret = (poly16_t) __builtin_neon_vgetq_lane_i16((int8x16_t)__s0, __p1); \
6435 __ret; \
6436 })
6437 #endif
6438
6439 #ifdef __LITTLE_ENDIAN__
6440 #define vgetq_lane_u8(__p0, __p1) __extension__ ({ \
6441 uint8x16_t __s0 = __p0; \
6442 uint8_t __ret; \
6443 __ret = (uint8_t) __builtin_neon_vgetq_lane_i8((int8x16_t)__s0, __p1); \
6444 __ret; \
6445 })
6446 #else
6447 #define vgetq_lane_u8(__p0, __p1) __extension__ ({ \
6448 uint8x16_t __s0 = __p0; \
6449 uint8x16_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
6450 uint8_t __ret; \
6451 __ret = (uint8_t) __builtin_neon_vgetq_lane_i8((int8x16_t)__rev0, __p1); \
6452 __ret; \
6453 })
6454 #define __noswap_vgetq_lane_u8(__p0, __p1) __extension__ ({ \
6455 uint8x16_t __s0 = __p0; \
6456 uint8_t __ret; \
6457 __ret = (uint8_t) __builtin_neon_vgetq_lane_i8((int8x16_t)__s0, __p1); \
6458 __ret; \
6459 })
6460 #endif
6461
6462 #ifdef __LITTLE_ENDIAN__
6463 #define vgetq_lane_u32(__p0, __p1) __extension__ ({ \
6464 uint32x4_t __s0 = __p0; \
6465 uint32_t __ret; \
6466 __ret = (uint32_t) __builtin_neon_vgetq_lane_i32((int8x16_t)__s0, __p1); \
6467 __ret; \
6468 })
6469 #else
6470 #define vgetq_lane_u32(__p0, __p1) __extension__ ({ \
6471 uint32x4_t __s0 = __p0; \
6472 uint32x4_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \
6473 uint32_t __ret; \
6474 __ret = (uint32_t) __builtin_neon_vgetq_lane_i32((int8x16_t)__rev0, __p1); \
6475 __ret; \
6476 })
6477 #define __noswap_vgetq_lane_u32(__p0, __p1) __extension__ ({ \
6478 uint32x4_t __s0 = __p0; \
6479 uint32_t __ret; \
6480 __ret = (uint32_t) __builtin_neon_vgetq_lane_i32((int8x16_t)__s0, __p1); \
6481 __ret; \
6482 })
6483 #endif
6484
6485 #ifdef __LITTLE_ENDIAN__
6486 #define vgetq_lane_u64(__p0, __p1) __extension__ ({ \
6487 uint64x2_t __s0 = __p0; \
6488 uint64_t __ret; \
6489 __ret = (uint64_t) __builtin_neon_vgetq_lane_i64((int8x16_t)__s0, __p1); \
6490 __ret; \
6491 })
6492 #else
6493 #define vgetq_lane_u64(__p0, __p1) __extension__ ({ \
6494 uint64x2_t __s0 = __p0; \
6495 uint64x2_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 1, 0); \
6496 uint64_t __ret; \
6497 __ret = (uint64_t) __builtin_neon_vgetq_lane_i64((int8x16_t)__rev0, __p1); \
6498 __ret; \
6499 })
6500 #define __noswap_vgetq_lane_u64(__p0, __p1) __extension__ ({ \
6501 uint64x2_t __s0 = __p0; \
6502 uint64_t __ret; \
6503 __ret = (uint64_t) __builtin_neon_vgetq_lane_i64((int8x16_t)__s0, __p1); \
6504 __ret; \
6505 })
6506 #endif
6507
6508 #ifdef __LITTLE_ENDIAN__
6509 #define vgetq_lane_u16(__p0, __p1) __extension__ ({ \
6510 uint16x8_t __s0 = __p0; \
6511 uint16_t __ret; \
6512 __ret = (uint16_t) __builtin_neon_vgetq_lane_i16((int8x16_t)__s0, __p1); \
6513 __ret; \
6514 })
6515 #else
6516 #define vgetq_lane_u16(__p0, __p1) __extension__ ({ \
6517 uint16x8_t __s0 = __p0; \
6518 uint16x8_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 7, 6, 5, 4, 3, 2, 1, 0); \
6519 uint16_t __ret; \
6520 __ret = (uint16_t) __builtin_neon_vgetq_lane_i16((int8x16_t)__rev0, __p1); \
6521 __ret; \
6522 })
6523 #define __noswap_vgetq_lane_u16(__p0, __p1) __extension__ ({ \
6524 uint16x8_t __s0 = __p0; \
6525 uint16_t __ret; \
6526 __ret = (uint16_t) __builtin_neon_vgetq_lane_i16((int8x16_t)__s0, __p1); \
6527 __ret; \
6528 })
6529 #endif
6530
6531 #ifdef __LITTLE_ENDIAN__
6532 #define vgetq_lane_s8(__p0, __p1) __extension__ ({ \
6533 int8x16_t __s0 = __p0; \
6534 int8_t __ret; \
6535 __ret = (int8_t) __builtin_neon_vgetq_lane_i8((int8x16_t)__s0, __p1); \
6536 __ret; \
6537 })
6538 #else
6539 #define vgetq_lane_s8(__p0, __p1) __extension__ ({ \
6540 int8x16_t __s0 = __p0; \
6541 int8x16_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
6542 int8_t __ret; \
6543 __ret = (int8_t) __builtin_neon_vgetq_lane_i8((int8x16_t)__rev0, __p1); \
6544 __ret; \
6545 })
6546 #define __noswap_vgetq_lane_s8(__p0, __p1) __extension__ ({ \
6547 int8x16_t __s0 = __p0; \
6548 int8_t __ret; \
6549 __ret = (int8_t) __builtin_neon_vgetq_lane_i8((int8x16_t)__s0, __p1); \
6550 __ret; \
6551 })
6552 #endif
6553
6554 #ifdef __LITTLE_ENDIAN__
6555 #define vgetq_lane_f32(__p0, __p1) __extension__ ({ \
6556 float32x4_t __s0 = __p0; \
6557 float32_t __ret; \
6558 __ret = (float32_t) __builtin_neon_vgetq_lane_f32((int8x16_t)__s0, __p1); \
6559 __ret; \
6560 })
6561 #else
6562 #define vgetq_lane_f32(__p0, __p1) __extension__ ({ \
6563 float32x4_t __s0 = __p0; \
6564 float32x4_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \
6565 float32_t __ret; \
6566 __ret = (float32_t) __builtin_neon_vgetq_lane_f32((int8x16_t)__rev0, __p1); \
6567 __ret; \
6568 })
6569 #define __noswap_vgetq_lane_f32(__p0, __p1) __extension__ ({ \
6570 float32x4_t __s0 = __p0; \
6571 float32_t __ret; \
6572 __ret = (float32_t) __builtin_neon_vgetq_lane_f32((int8x16_t)__s0, __p1); \
6573 __ret; \
6574 })
6575 #endif
6576
6577 #ifdef __LITTLE_ENDIAN__
6578 #define vgetq_lane_f16(__p0, __p1) __extension__ ({ \
6579 float16x8_t __s0 = __p0; \
6580 float16_t __ret; \
6581 __ret = (float16_t) __builtin_neon_vgetq_lane_f16((int8x16_t)__s0, __p1); \
6582 __ret; \
6583 })
6584 #else
6585 #define vgetq_lane_f16(__p0, __p1) __extension__ ({ \
6586 float16x8_t __s0 = __p0; \
6587 float16x8_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 7, 6, 5, 4, 3, 2, 1, 0); \
6588 float16_t __ret; \
6589 __ret = (float16_t) __builtin_neon_vgetq_lane_f16((int8x16_t)__rev0, __p1); \
6590 __ret; \
6591 })
6592 #define __noswap_vgetq_lane_f16(__p0, __p1) __extension__ ({ \
6593 float16x8_t __s0 = __p0; \
6594 float16_t __ret; \
6595 __ret = (float16_t) __builtin_neon_vgetq_lane_f16((int8x16_t)__s0, __p1); \
6596 __ret; \
6597 })
6598 #endif
6599
6600 #ifdef __LITTLE_ENDIAN__
6601 #define vgetq_lane_s32(__p0, __p1) __extension__ ({ \
6602 int32x4_t __s0 = __p0; \
6603 int32_t __ret; \
6604 __ret = (int32_t) __builtin_neon_vgetq_lane_i32((int8x16_t)__s0, __p1); \
6605 __ret; \
6606 })
6607 #else
6608 #define vgetq_lane_s32(__p0, __p1) __extension__ ({ \
6609 int32x4_t __s0 = __p0; \
6610 int32x4_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \
6611 int32_t __ret; \
6612 __ret = (int32_t) __builtin_neon_vgetq_lane_i32((int8x16_t)__rev0, __p1); \
6613 __ret; \
6614 })
6615 #define __noswap_vgetq_lane_s32(__p0, __p1) __extension__ ({ \
6616 int32x4_t __s0 = __p0; \
6617 int32_t __ret; \
6618 __ret = (int32_t) __builtin_neon_vgetq_lane_i32((int8x16_t)__s0, __p1); \
6619 __ret; \
6620 })
6621 #endif
6622
6623 #ifdef __LITTLE_ENDIAN__
6624 #define vgetq_lane_s64(__p0, __p1) __extension__ ({ \
6625 int64x2_t __s0 = __p0; \
6626 int64_t __ret; \
6627 __ret = (int64_t) __builtin_neon_vgetq_lane_i64((int8x16_t)__s0, __p1); \
6628 __ret; \
6629 })
6630 #else
6631 #define vgetq_lane_s64(__p0, __p1) __extension__ ({ \
6632 int64x2_t __s0 = __p0; \
6633 int64x2_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 1, 0); \
6634 int64_t __ret; \
6635 __ret = (int64_t) __builtin_neon_vgetq_lane_i64((int8x16_t)__rev0, __p1); \
6636 __ret; \
6637 })
6638 #define __noswap_vgetq_lane_s64(__p0, __p1) __extension__ ({ \
6639 int64x2_t __s0 = __p0; \
6640 int64_t __ret; \
6641 __ret = (int64_t) __builtin_neon_vgetq_lane_i64((int8x16_t)__s0, __p1); \
6642 __ret; \
6643 })
6644 #endif
6645
6646 #ifdef __LITTLE_ENDIAN__
6647 #define vgetq_lane_s16(__p0, __p1) __extension__ ({ \
6648 int16x8_t __s0 = __p0; \
6649 int16_t __ret; \
6650 __ret = (int16_t) __builtin_neon_vgetq_lane_i16((int8x16_t)__s0, __p1); \
6651 __ret; \
6652 })
6653 #else
6654 #define vgetq_lane_s16(__p0, __p1) __extension__ ({ \
6655 int16x8_t __s0 = __p0; \
6656 int16x8_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 7, 6, 5, 4, 3, 2, 1, 0); \
6657 int16_t __ret; \
6658 __ret = (int16_t) __builtin_neon_vgetq_lane_i16((int8x16_t)__rev0, __p1); \
6659 __ret; \
6660 })
6661 #define __noswap_vgetq_lane_s16(__p0, __p1) __extension__ ({ \
6662 int16x8_t __s0 = __p0; \
6663 int16_t __ret; \
6664 __ret = (int16_t) __builtin_neon_vgetq_lane_i16((int8x16_t)__s0, __p1); \
6665 __ret; \
6666 })
6667 #endif
6668
6669 #ifdef __LITTLE_ENDIAN__
6670 #define vget_lane_u8(__p0, __p1) __extension__ ({ \
6671 uint8x8_t __s0 = __p0; \
6672 uint8_t __ret; \
6673 __ret = (uint8_t) __builtin_neon_vget_lane_i8((int8x8_t)__s0, __p1); \
6674 __ret; \
6675 })
6676 #else
6677 #define vget_lane_u8(__p0, __p1) __extension__ ({ \
6678 uint8x8_t __s0 = __p0; \
6679 uint8x8_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 7, 6, 5, 4, 3, 2, 1, 0); \
6680 uint8_t __ret; \
6681 __ret = (uint8_t) __builtin_neon_vget_lane_i8((int8x8_t)__rev0, __p1); \
6682 __ret; \
6683 })
6684 #define __noswap_vget_lane_u8(__p0, __p1) __extension__ ({ \
6685 uint8x8_t __s0 = __p0; \
6686 uint8_t __ret; \
6687 __ret = (uint8_t) __builtin_neon_vget_lane_i8((int8x8_t)__s0, __p1); \
6688 __ret; \
6689 })
6690 #endif
6691
6692 #ifdef __LITTLE_ENDIAN__
6693 #define vget_lane_u32(__p0, __p1) __extension__ ({ \
6694 uint32x2_t __s0 = __p0; \
6695 uint32_t __ret; \
6696 __ret = (uint32_t) __builtin_neon_vget_lane_i32((int8x8_t)__s0, __p1); \
6697 __ret; \
6698 })
6699 #else
6700 #define vget_lane_u32(__p0, __p1) __extension__ ({ \
6701 uint32x2_t __s0 = __p0; \
6702 uint32x2_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 1, 0); \
6703 uint32_t __ret; \
6704 __ret = (uint32_t) __builtin_neon_vget_lane_i32((int8x8_t)__rev0, __p1); \
6705 __ret; \
6706 })
6707 #define __noswap_vget_lane_u32(__p0, __p1) __extension__ ({ \
6708 uint32x2_t __s0 = __p0; \
6709 uint32_t __ret; \
6710 __ret = (uint32_t) __builtin_neon_vget_lane_i32((int8x8_t)__s0, __p1); \
6711 __ret; \
6712 })
6713 #endif
6714
6715 #ifdef __LITTLE_ENDIAN__
6716 #define vget_lane_u64(__p0, __p1) __extension__ ({ \
6717 uint64x1_t __s0 = __p0; \
6718 uint64_t __ret; \
6719 __ret = (uint64_t) __builtin_neon_vget_lane_i64((int8x8_t)__s0, __p1); \
6720 __ret; \
6721 })
6722 #else
6723 #define vget_lane_u64(__p0, __p1) __extension__ ({ \
6724 uint64x1_t __s0 = __p0; \
6725 uint64_t __ret; \
6726 __ret = (uint64_t) __builtin_neon_vget_lane_i64((int8x8_t)__s0, __p1); \
6727 __ret; \
6728 })
6729 #define __noswap_vget_lane_u64(__p0, __p1) __extension__ ({ \
6730 uint64x1_t __s0 = __p0; \
6731 uint64_t __ret; \
6732 __ret = (uint64_t) __builtin_neon_vget_lane_i64((int8x8_t)__s0, __p1); \
6733 __ret; \
6734 })
6735 #endif
6736
6737 #ifdef __LITTLE_ENDIAN__
6738 #define vget_lane_u16(__p0, __p1) __extension__ ({ \
6739 uint16x4_t __s0 = __p0; \
6740 uint16_t __ret; \
6741 __ret = (uint16_t) __builtin_neon_vget_lane_i16((int8x8_t)__s0, __p1); \
6742 __ret; \
6743 })
6744 #else
6745 #define vget_lane_u16(__p0, __p1) __extension__ ({ \
6746 uint16x4_t __s0 = __p0; \
6747 uint16x4_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \
6748 uint16_t __ret; \
6749 __ret = (uint16_t) __builtin_neon_vget_lane_i16((int8x8_t)__rev0, __p1); \
6750 __ret; \
6751 })
6752 #define __noswap_vget_lane_u16(__p0, __p1) __extension__ ({ \
6753 uint16x4_t __s0 = __p0; \
6754 uint16_t __ret; \
6755 __ret = (uint16_t) __builtin_neon_vget_lane_i16((int8x8_t)__s0, __p1); \
6756 __ret; \
6757 })
6758 #endif
6759
6760 #ifdef __LITTLE_ENDIAN__
6761 #define vget_lane_s8(__p0, __p1) __extension__ ({ \
6762 int8x8_t __s0 = __p0; \
6763 int8_t __ret; \
6764 __ret = (int8_t) __builtin_neon_vget_lane_i8((int8x8_t)__s0, __p1); \
6765 __ret; \
6766 })
6767 #else
6768 #define vget_lane_s8(__p0, __p1) __extension__ ({ \
6769 int8x8_t __s0 = __p0; \
6770 int8x8_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 7, 6, 5, 4, 3, 2, 1, 0); \
6771 int8_t __ret; \
6772 __ret = (int8_t) __builtin_neon_vget_lane_i8((int8x8_t)__rev0, __p1); \
6773 __ret; \
6774 })
6775 #define __noswap_vget_lane_s8(__p0, __p1) __extension__ ({ \
6776 int8x8_t __s0 = __p0; \
6777 int8_t __ret; \
6778 __ret = (int8_t) __builtin_neon_vget_lane_i8((int8x8_t)__s0, __p1); \
6779 __ret; \
6780 })
6781 #endif
6782
6783 #ifdef __LITTLE_ENDIAN__
6784 #define vget_lane_f32(__p0, __p1) __extension__ ({ \
6785 float32x2_t __s0 = __p0; \
6786 float32_t __ret; \
6787 __ret = (float32_t) __builtin_neon_vget_lane_f32((int8x8_t)__s0, __p1); \
6788 __ret; \
6789 })
6790 #else
6791 #define vget_lane_f32(__p0, __p1) __extension__ ({ \
6792 float32x2_t __s0 = __p0; \
6793 float32x2_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 1, 0); \
6794 float32_t __ret; \
6795 __ret = (float32_t) __builtin_neon_vget_lane_f32((int8x8_t)__rev0, __p1); \
6796 __ret; \
6797 })
6798 #define __noswap_vget_lane_f32(__p0, __p1) __extension__ ({ \
6799 float32x2_t __s0 = __p0; \
6800 float32_t __ret; \
6801 __ret = (float32_t) __builtin_neon_vget_lane_f32((int8x8_t)__s0, __p1); \
6802 __ret; \
6803 })
6804 #endif
6805
6806 #ifdef __LITTLE_ENDIAN__
6807 #define vget_lane_f16(__p0, __p1) __extension__ ({ \
6808 float16x4_t __s0 = __p0; \
6809 float16_t __ret; \
6810 __ret = (float16_t) __builtin_neon_vget_lane_f16((int8x8_t)__s0, __p1); \
6811 __ret; \
6812 })
6813 #else
6814 #define vget_lane_f16(__p0, __p1) __extension__ ({ \
6815 float16x4_t __s0 = __p0; \
6816 float16x4_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \
6817 float16_t __ret; \
6818 __ret = (float16_t) __builtin_neon_vget_lane_f16((int8x8_t)__rev0, __p1); \
6819 __ret; \
6820 })
6821 #define __noswap_vget_lane_f16(__p0, __p1) __extension__ ({ \
6822 float16x4_t __s0 = __p0; \
6823 float16_t __ret; \
6824 __ret = (float16_t) __builtin_neon_vget_lane_f16((int8x8_t)__s0, __p1); \
6825 __ret; \
6826 })
6827 #endif
6828
6829 #ifdef __LITTLE_ENDIAN__
6830 #define vget_lane_s32(__p0, __p1) __extension__ ({ \
6831 int32x2_t __s0 = __p0; \
6832 int32_t __ret; \
6833 __ret = (int32_t) __builtin_neon_vget_lane_i32((int8x8_t)__s0, __p1); \
6834 __ret; \
6835 })
6836 #else
6837 #define vget_lane_s32(__p0, __p1) __extension__ ({ \
6838 int32x2_t __s0 = __p0; \
6839 int32x2_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 1, 0); \
6840 int32_t __ret; \
6841 __ret = (int32_t) __builtin_neon_vget_lane_i32((int8x8_t)__rev0, __p1); \
6842 __ret; \
6843 })
6844 #define __noswap_vget_lane_s32(__p0, __p1) __extension__ ({ \
6845 int32x2_t __s0 = __p0; \
6846 int32_t __ret; \
6847 __ret = (int32_t) __builtin_neon_vget_lane_i32((int8x8_t)__s0, __p1); \
6848 __ret; \
6849 })
6850 #endif
6851
6852 #ifdef __LITTLE_ENDIAN__
6853 #define vget_lane_s64(__p0, __p1) __extension__ ({ \
6854 int64x1_t __s0 = __p0; \
6855 int64_t __ret; \
6856 __ret = (int64_t) __builtin_neon_vget_lane_i64((int8x8_t)__s0, __p1); \
6857 __ret; \
6858 })
6859 #else
6860 #define vget_lane_s64(__p0, __p1) __extension__ ({ \
6861 int64x1_t __s0 = __p0; \
6862 int64_t __ret; \
6863 __ret = (int64_t) __builtin_neon_vget_lane_i64((int8x8_t)__s0, __p1); \
6864 __ret; \
6865 })
6866 #define __noswap_vget_lane_s64(__p0, __p1) __extension__ ({ \
6867 int64x1_t __s0 = __p0; \
6868 int64_t __ret; \
6869 __ret = (int64_t) __builtin_neon_vget_lane_i64((int8x8_t)__s0, __p1); \
6870 __ret; \
6871 })
6872 #endif
6873
6874 #ifdef __LITTLE_ENDIAN__
6875 #define vget_lane_s16(__p0, __p1) __extension__ ({ \
6876 int16x4_t __s0 = __p0; \
6877 int16_t __ret; \
6878 __ret = (int16_t) __builtin_neon_vget_lane_i16((int8x8_t)__s0, __p1); \
6879 __ret; \
6880 })
6881 #else
6882 #define vget_lane_s16(__p0, __p1) __extension__ ({ \
6883 int16x4_t __s0 = __p0; \
6884 int16x4_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \
6885 int16_t __ret; \
6886 __ret = (int16_t) __builtin_neon_vget_lane_i16((int8x8_t)__rev0, __p1); \
6887 __ret; \
6888 })
6889 #define __noswap_vget_lane_s16(__p0, __p1) __extension__ ({ \
6890 int16x4_t __s0 = __p0; \
6891 int16_t __ret; \
6892 __ret = (int16_t) __builtin_neon_vget_lane_i16((int8x8_t)__s0, __p1); \
6893 __ret; \
6894 })
6895 #endif
6896
6897 #ifdef __LITTLE_ENDIAN__
6898 __ai poly8x8_t vget_low_p8(poly8x16_t __p0) {
6899 poly8x8_t __ret;
6900 __ret = __builtin_shufflevector(__p0, __p0, 0, 1, 2, 3, 4, 5, 6, 7);
6901 return __ret;
6902 }
6903 #else
6904 __ai poly8x8_t vget_low_p8(poly8x16_t __p0) {
6905 poly8x16_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
6906 poly8x8_t __ret;
6907 __ret = __builtin_shufflevector(__rev0, __rev0, 0, 1, 2, 3, 4, 5, 6, 7);
6908 __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
6909 return __ret;
6910 }
6911 #endif
6912
6913 #ifdef __LITTLE_ENDIAN__
6914 __ai poly16x4_t vget_low_p16(poly16x8_t __p0) {
6915 poly16x4_t __ret;
6916 __ret = __builtin_shufflevector(__p0, __p0, 0, 1, 2, 3);
6917 return __ret;
6918 }
6919 #else
6920 __ai poly16x4_t vget_low_p16(poly16x8_t __p0) {
6921 poly16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
6922 poly16x4_t __ret;
6923 __ret = __builtin_shufflevector(__rev0, __rev0, 0, 1, 2, 3);
6924 __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
6925 return __ret;
6926 }
6927 #endif
6928
6929 #ifdef __LITTLE_ENDIAN__
6930 __ai uint8x8_t vget_low_u8(uint8x16_t __p0) {
6931 uint8x8_t __ret;
6932 __ret = __builtin_shufflevector(__p0, __p0, 0, 1, 2, 3, 4, 5, 6, 7);
6933 return __ret;
6934 }
6935 #else
6936 __ai uint8x8_t vget_low_u8(uint8x16_t __p0) {
6937 uint8x16_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
6938 uint8x8_t __ret;
6939 __ret = __builtin_shufflevector(__rev0, __rev0, 0, 1, 2, 3, 4, 5, 6, 7);
6940 __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
6941 return __ret;
6942 }
6943 #endif
6944
6945 #ifdef __LITTLE_ENDIAN__
6946 __ai uint32x2_t vget_low_u32(uint32x4_t __p0) {
6947 uint32x2_t __ret;
6948 __ret = __builtin_shufflevector(__p0, __p0, 0, 1);
6949 return __ret;
6950 }
6951 #else
6952 __ai uint32x2_t vget_low_u32(uint32x4_t __p0) {
6953 uint32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
6954 uint32x2_t __ret;
6955 __ret = __builtin_shufflevector(__rev0, __rev0, 0, 1);
6956 __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
6957 return __ret;
6958 }
6959 #endif
6960
6961 #ifdef __LITTLE_ENDIAN__
6962 __ai uint64x1_t vget_low_u64(uint64x2_t __p0) {
6963 uint64x1_t __ret;
6964 __ret = __builtin_shufflevector(__p0, __p0, 0);
6965 return __ret;
6966 }
6967 #else
6968 __ai uint64x1_t vget_low_u64(uint64x2_t __p0) {
6969 uint64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
6970 uint64x1_t __ret;
6971 __ret = __builtin_shufflevector(__rev0, __rev0, 0);
6972 return __ret;
6973 }
6974 #endif
6975
6976 #ifdef __LITTLE_ENDIAN__
6977 __ai uint16x4_t vget_low_u16(uint16x8_t __p0) {
6978 uint16x4_t __ret;
6979 __ret = __builtin_shufflevector(__p0, __p0, 0, 1, 2, 3);
6980 return __ret;
6981 }
6982 #else
6983 __ai uint16x4_t vget_low_u16(uint16x8_t __p0) {
6984 uint16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
6985 uint16x4_t __ret;
6986 __ret = __builtin_shufflevector(__rev0, __rev0, 0, 1, 2, 3);
6987 __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
6988 return __ret;
6989 }
6990 #endif
6991
6992 #ifdef __LITTLE_ENDIAN__
6993 __ai int8x8_t vget_low_s8(int8x16_t __p0) {
6994 int8x8_t __ret;
6995 __ret = __builtin_shufflevector(__p0, __p0, 0, 1, 2, 3, 4, 5, 6, 7);
6996 return __ret;
6997 }
6998 #else
6999 __ai int8x8_t vget_low_s8(int8x16_t __p0) {
7000 int8x16_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
7001 int8x8_t __ret;
7002 __ret = __builtin_shufflevector(__rev0, __rev0, 0, 1, 2, 3, 4, 5, 6, 7);
7003 __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
7004 return __ret;
7005 }
7006 #endif
7007
7008 #ifdef __LITTLE_ENDIAN__
7009 __ai float32x2_t vget_low_f32(float32x4_t __p0) {
7010 float32x2_t __ret;
7011 __ret = __builtin_shufflevector(__p0, __p0, 0, 1);
7012 return __ret;
7013 }
7014 #else
7015 __ai float32x2_t vget_low_f32(float32x4_t __p0) {
7016 float32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
7017 float32x2_t __ret;
7018 __ret = __builtin_shufflevector(__rev0, __rev0, 0, 1);
7019 __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
7020 return __ret;
7021 }
7022 #endif
7023
7024 #ifdef __LITTLE_ENDIAN__
7025 __ai float16x4_t vget_low_f16(float16x8_t __p0) {
7026 float16x4_t __ret;
7027 __ret = __builtin_shufflevector(__p0, __p0, 0, 1, 2, 3);
7028 return __ret;
7029 }
7030 #else
7031 __ai float16x4_t vget_low_f16(float16x8_t __p0) {
7032 float16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
7033 float16x4_t __ret;
7034 __ret = __builtin_shufflevector(__rev0, __rev0, 0, 1, 2, 3);
7035 __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
7036 return __ret;
7037 }
7038 #endif
7039
7040 #ifdef __LITTLE_ENDIAN__
7041 __ai int32x2_t vget_low_s32(int32x4_t __p0) {
7042 int32x2_t __ret;
7043 __ret = __builtin_shufflevector(__p0, __p0, 0, 1);
7044 return __ret;
7045 }
7046 #else
7047 __ai int32x2_t vget_low_s32(int32x4_t __p0) {
7048 int32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
7049 int32x2_t __ret;
7050 __ret = __builtin_shufflevector(__rev0, __rev0, 0, 1);
7051 __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
7052 return __ret;
7053 }
7054 #endif
7055
7056 #ifdef __LITTLE_ENDIAN__
7057 __ai int64x1_t vget_low_s64(int64x2_t __p0) {
7058 int64x1_t __ret;
7059 __ret = __builtin_shufflevector(__p0, __p0, 0);
7060 return __ret;
7061 }
7062 #else
7063 __ai int64x1_t vget_low_s64(int64x2_t __p0) {
7064 int64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
7065 int64x1_t __ret;
7066 __ret = __builtin_shufflevector(__rev0, __rev0, 0);
7067 return __ret;
7068 }
7069 #endif
7070
7071 #ifdef __LITTLE_ENDIAN__
7072 __ai int16x4_t vget_low_s16(int16x8_t __p0) {
7073 int16x4_t __ret;
7074 __ret = __builtin_shufflevector(__p0, __p0, 0, 1, 2, 3);
7075 return __ret;
7076 }
7077 #else
7078 __ai int16x4_t vget_low_s16(int16x8_t __p0) {
7079 int16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
7080 int16x4_t __ret;
7081 __ret = __builtin_shufflevector(__rev0, __rev0, 0, 1, 2, 3);
7082 __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
7083 return __ret;
7084 }
7085 #endif
7086
7087 #ifdef __LITTLE_ENDIAN__
7088 __ai uint8x16_t vhaddq_u8(uint8x16_t __p0, uint8x16_t __p1) {
7089 uint8x16_t __ret;
7090 __ret = (uint8x16_t) __builtin_neon_vhaddq_v((int8x16_t)__p0, (int8x16_t)__p1, 48);
7091 return __ret;
7092 }
7093 #else
7094 __ai uint8x16_t vhaddq_u8(uint8x16_t __p0, uint8x16_t __p1) {
7095 uint8x16_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
7096 uint8x16_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
7097 uint8x16_t __ret;
7098 __ret = (uint8x16_t) __builtin_neon_vhaddq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 48);
7099 __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
7100 return __ret;
7101 }
7102 #endif
7103
7104 #ifdef __LITTLE_ENDIAN__
7105 __ai uint32x4_t vhaddq_u32(uint32x4_t __p0, uint32x4_t __p1) {
7106 uint32x4_t __ret;
7107 __ret = (uint32x4_t) __builtin_neon_vhaddq_v((int8x16_t)__p0, (int8x16_t)__p1, 50);
7108 return __ret;
7109 }
7110 #else
7111 __ai uint32x4_t vhaddq_u32(uint32x4_t __p0, uint32x4_t __p1) {
7112 uint32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
7113 uint32x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
7114 uint32x4_t __ret;
7115 __ret = (uint32x4_t) __builtin_neon_vhaddq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 50);
7116 __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
7117 return __ret;
7118 }
7119 #endif
7120
7121 #ifdef __LITTLE_ENDIAN__
7122 __ai uint16x8_t vhaddq_u16(uint16x8_t __p0, uint16x8_t __p1) {
7123 uint16x8_t __ret;
7124 __ret = (uint16x8_t) __builtin_neon_vhaddq_v((int8x16_t)__p0, (int8x16_t)__p1, 49);
7125 return __ret;
7126 }
7127 #else
7128 __ai uint16x8_t vhaddq_u16(uint16x8_t __p0, uint16x8_t __p1) {
7129 uint16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
7130 uint16x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
7131 uint16x8_t __ret;
7132 __ret = (uint16x8_t) __builtin_neon_vhaddq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 49);
7133 __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
7134 return __ret;
7135 }
7136 #endif
7137
7138 #ifdef __LITTLE_ENDIAN__
7139 __ai int8x16_t vhaddq_s8(int8x16_t __p0, int8x16_t __p1) {
7140 int8x16_t __ret;
7141 __ret = (int8x16_t) __builtin_neon_vhaddq_v((int8x16_t)__p0, (int8x16_t)__p1, 32);
7142 return __ret;
7143 }
7144 #else
7145 __ai int8x16_t vhaddq_s8(int8x16_t __p0, int8x16_t __p1) {
7146 int8x16_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
7147 int8x16_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
7148 int8x16_t __ret;
7149 __ret = (int8x16_t) __builtin_neon_vhaddq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 32);
7150 __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
7151 return __ret;
7152 }
7153 #endif
7154
7155 #ifdef __LITTLE_ENDIAN__
7156 __ai int32x4_t vhaddq_s32(int32x4_t __p0, int32x4_t __p1) {
7157 int32x4_t __ret;
7158 __ret = (int32x4_t) __builtin_neon_vhaddq_v((int8x16_t)__p0, (int8x16_t)__p1, 34);
7159 return __ret;
7160 }
7161 #else
7162 __ai int32x4_t vhaddq_s32(int32x4_t __p0, int32x4_t __p1) {
7163 int32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
7164 int32x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
7165 int32x4_t __ret;
7166 __ret = (int32x4_t) __builtin_neon_vhaddq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 34);
7167 __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
7168 return __ret;
7169 }
7170 #endif
7171
7172 #ifdef __LITTLE_ENDIAN__
7173 __ai int16x8_t vhaddq_s16(int16x8_t __p0, int16x8_t __p1) {
7174 int16x8_t __ret;
7175 __ret = (int16x8_t) __builtin_neon_vhaddq_v((int8x16_t)__p0, (int8x16_t)__p1, 33);
7176 return __ret;
7177 }
7178 #else
7179 __ai int16x8_t vhaddq_s16(int16x8_t __p0, int16x8_t __p1) {
7180 int16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
7181 int16x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
7182 int16x8_t __ret;
7183 __ret = (int16x8_t) __builtin_neon_vhaddq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 33);
7184 __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
7185 return __ret;
7186 }
7187 #endif
7188
7189 #ifdef __LITTLE_ENDIAN__
7190 __ai uint8x8_t vhadd_u8(uint8x8_t __p0, uint8x8_t __p1) {
7191 uint8x8_t __ret;
7192 __ret = (uint8x8_t) __builtin_neon_vhadd_v((int8x8_t)__p0, (int8x8_t)__p1, 16);
7193 return __ret;
7194 }
7195 #else
7196 __ai uint8x8_t vhadd_u8(uint8x8_t __p0, uint8x8_t __p1) {
7197 uint8x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
7198 uint8x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
7199 uint8x8_t __ret;
7200 __ret = (uint8x8_t) __builtin_neon_vhadd_v((int8x8_t)__rev0, (int8x8_t)__rev1, 16);
7201 __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
7202 return __ret;
7203 }
7204 #endif
7205
7206 #ifdef __LITTLE_ENDIAN__
7207 __ai uint32x2_t vhadd_u32(uint32x2_t __p0, uint32x2_t __p1) {
7208 uint32x2_t __ret;
7209 __ret = (uint32x2_t) __builtin_neon_vhadd_v((int8x8_t)__p0, (int8x8_t)__p1, 18);
7210 return __ret;
7211 }
7212 #else
7213 __ai uint32x2_t vhadd_u32(uint32x2_t __p0, uint32x2_t __p1) {
7214 uint32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
7215 uint32x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
7216 uint32x2_t __ret;
7217 __ret = (uint32x2_t) __builtin_neon_vhadd_v((int8x8_t)__rev0, (int8x8_t)__rev1, 18);
7218 __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
7219 return __ret;
7220 }
7221 #endif
7222
7223 #ifdef __LITTLE_ENDIAN__
7224 __ai uint16x4_t vhadd_u16(uint16x4_t __p0, uint16x4_t __p1) {
7225 uint16x4_t __ret;
7226 __ret = (uint16x4_t) __builtin_neon_vhadd_v((int8x8_t)__p0, (int8x8_t)__p1, 17);
7227 return __ret;
7228 }
7229 #else
7230 __ai uint16x4_t vhadd_u16(uint16x4_t __p0, uint16x4_t __p1) {
7231 uint16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
7232 uint16x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
7233 uint16x4_t __ret;
7234 __ret = (uint16x4_t) __builtin_neon_vhadd_v((int8x8_t)__rev0, (int8x8_t)__rev1, 17);
7235 __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
7236 return __ret;
7237 }
7238 #endif
7239
7240 #ifdef __LITTLE_ENDIAN__
7241 __ai int8x8_t vhadd_s8(int8x8_t __p0, int8x8_t __p1) {
7242 int8x8_t __ret;
7243 __ret = (int8x8_t) __builtin_neon_vhadd_v((int8x8_t)__p0, (int8x8_t)__p1, 0);
7244 return __ret;
7245 }
7246 #else
7247 __ai int8x8_t vhadd_s8(int8x8_t __p0, int8x8_t __p1) {
7248 int8x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
7249 int8x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
7250 int8x8_t __ret;
7251 __ret = (int8x8_t) __builtin_neon_vhadd_v((int8x8_t)__rev0, (int8x8_t)__rev1, 0);
7252 __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
7253 return __ret;
7254 }
7255 #endif
7256
7257 #ifdef __LITTLE_ENDIAN__
7258 __ai int32x2_t vhadd_s32(int32x2_t __p0, int32x2_t __p1) {
7259 int32x2_t __ret;
7260 __ret = (int32x2_t) __builtin_neon_vhadd_v((int8x8_t)__p0, (int8x8_t)__p1, 2);
7261 return __ret;
7262 }
7263 #else
7264 __ai int32x2_t vhadd_s32(int32x2_t __p0, int32x2_t __p1) {
7265 int32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
7266 int32x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
7267 int32x2_t __ret;
7268 __ret = (int32x2_t) __builtin_neon_vhadd_v((int8x8_t)__rev0, (int8x8_t)__rev1, 2);
7269 __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
7270 return __ret;
7271 }
7272 #endif
7273
7274 #ifdef __LITTLE_ENDIAN__
7275 __ai int16x4_t vhadd_s16(int16x4_t __p0, int16x4_t __p1) {
7276 int16x4_t __ret;
7277 __ret = (int16x4_t) __builtin_neon_vhadd_v((int8x8_t)__p0, (int8x8_t)__p1, 1);
7278 return __ret;
7279 }
7280 #else
7281 __ai int16x4_t vhadd_s16(int16x4_t __p0, int16x4_t __p1) {
7282 int16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
7283 int16x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
7284 int16x4_t __ret;
7285 __ret = (int16x4_t) __builtin_neon_vhadd_v((int8x8_t)__rev0, (int8x8_t)__rev1, 1);
7286 __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
7287 return __ret;
7288 }
7289 #endif
7290
7291 #ifdef __LITTLE_ENDIAN__
7292 __ai uint8x16_t vhsubq_u8(uint8x16_t __p0, uint8x16_t __p1) {
7293 uint8x16_t __ret;
7294 __ret = (uint8x16_t) __builtin_neon_vhsubq_v((int8x16_t)__p0, (int8x16_t)__p1, 48);
7295 return __ret;
7296 }
7297 #else
7298 __ai uint8x16_t vhsubq_u8(uint8x16_t __p0, uint8x16_t __p1) {
7299 uint8x16_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
7300 uint8x16_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
7301 uint8x16_t __ret;
7302 __ret = (uint8x16_t) __builtin_neon_vhsubq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 48);
7303 __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
7304 return __ret;
7305 }
7306 #endif
7307
7308 #ifdef __LITTLE_ENDIAN__
7309 __ai uint32x4_t vhsubq_u32(uint32x4_t __p0, uint32x4_t __p1) {
7310 uint32x4_t __ret;
7311 __ret = (uint32x4_t) __builtin_neon_vhsubq_v((int8x16_t)__p0, (int8x16_t)__p1, 50);
7312 return __ret;
7313 }
7314 #else
7315 __ai uint32x4_t vhsubq_u32(uint32x4_t __p0, uint32x4_t __p1) {
7316 uint32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
7317 uint32x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
7318 uint32x4_t __ret;
7319 __ret = (uint32x4_t) __builtin_neon_vhsubq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 50);
7320 __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
7321 return __ret;
7322 }
7323 #endif
7324
7325 #ifdef __LITTLE_ENDIAN__
7326 __ai uint16x8_t vhsubq_u16(uint16x8_t __p0, uint16x8_t __p1) {
7327 uint16x8_t __ret;
7328 __ret = (uint16x8_t) __builtin_neon_vhsubq_v((int8x16_t)__p0, (int8x16_t)__p1, 49);
7329 return __ret;
7330 }
7331 #else
7332 __ai uint16x8_t vhsubq_u16(uint16x8_t __p0, uint16x8_t __p1) {
7333 uint16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
7334 uint16x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
7335 uint16x8_t __ret;
7336 __ret = (uint16x8_t) __builtin_neon_vhsubq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 49);
7337 __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
7338 return __ret;
7339 }
7340 #endif
7341
7342 #ifdef __LITTLE_ENDIAN__
7343 __ai int8x16_t vhsubq_s8(int8x16_t __p0, int8x16_t __p1) {
7344 int8x16_t __ret;
7345 __ret = (int8x16_t) __builtin_neon_vhsubq_v((int8x16_t)__p0, (int8x16_t)__p1, 32);
7346 return __ret;
7347 }
7348 #else
7349 __ai int8x16_t vhsubq_s8(int8x16_t __p0, int8x16_t __p1) {
7350 int8x16_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
7351 int8x16_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
7352 int8x16_t __ret;
7353 __ret = (int8x16_t) __builtin_neon_vhsubq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 32);
7354 __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
7355 return __ret;
7356 }
7357 #endif
7358
7359 #ifdef __LITTLE_ENDIAN__
7360 __ai int32x4_t vhsubq_s32(int32x4_t __p0, int32x4_t __p1) {
7361 int32x4_t __ret;
7362 __ret = (int32x4_t) __builtin_neon_vhsubq_v((int8x16_t)__p0, (int8x16_t)__p1, 34);
7363 return __ret;
7364 }
7365 #else
7366 __ai int32x4_t vhsubq_s32(int32x4_t __p0, int32x4_t __p1) {
7367 int32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
7368 int32x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
7369 int32x4_t __ret;
7370 __ret = (int32x4_t) __builtin_neon_vhsubq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 34);
7371 __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
7372 return __ret;
7373 }
7374 #endif
7375
7376 #ifdef __LITTLE_ENDIAN__
7377 __ai int16x8_t vhsubq_s16(int16x8_t __p0, int16x8_t __p1) {
7378 int16x8_t __ret;
7379 __ret = (int16x8_t) __builtin_neon_vhsubq_v((int8x16_t)__p0, (int8x16_t)__p1, 33);
7380 return __ret;
7381 }
7382 #else
7383 __ai int16x8_t vhsubq_s16(int16x8_t __p0, int16x8_t __p1) {
7384 int16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
7385 int16x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
7386 int16x8_t __ret;
7387 __ret = (int16x8_t) __builtin_neon_vhsubq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 33);
7388 __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
7389 return __ret;
7390 }
7391 #endif
7392
7393 #ifdef __LITTLE_ENDIAN__
7394 __ai uint8x8_t vhsub_u8(uint8x8_t __p0, uint8x8_t __p1) {
7395 uint8x8_t __ret;
7396 __ret = (uint8x8_t) __builtin_neon_vhsub_v((int8x8_t)__p0, (int8x8_t)__p1, 16);
7397 return __ret;
7398 }
7399 #else
7400 __ai uint8x8_t vhsub_u8(uint8x8_t __p0, uint8x8_t __p1) {
7401 uint8x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
7402 uint8x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
7403 uint8x8_t __ret;
7404 __ret = (uint8x8_t) __builtin_neon_vhsub_v((int8x8_t)__rev0, (int8x8_t)__rev1, 16);
7405 __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
7406 return __ret;
7407 }
7408 #endif
7409
7410 #ifdef __LITTLE_ENDIAN__
7411 __ai uint32x2_t vhsub_u32(uint32x2_t __p0, uint32x2_t __p1) {
7412 uint32x2_t __ret;
7413 __ret = (uint32x2_t) __builtin_neon_vhsub_v((int8x8_t)__p0, (int8x8_t)__p1, 18);
7414 return __ret;
7415 }
7416 #else
7417 __ai uint32x2_t vhsub_u32(uint32x2_t __p0, uint32x2_t __p1) {
7418 uint32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
7419 uint32x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
7420 uint32x2_t __ret;
7421 __ret = (uint32x2_t) __builtin_neon_vhsub_v((int8x8_t)__rev0, (int8x8_t)__rev1, 18);
7422 __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
7423 return __ret;
7424 }
7425 #endif
7426
7427 #ifdef __LITTLE_ENDIAN__
7428 __ai uint16x4_t vhsub_u16(uint16x4_t __p0, uint16x4_t __p1) {
7429 uint16x4_t __ret;
7430 __ret = (uint16x4_t) __builtin_neon_vhsub_v((int8x8_t)__p0, (int8x8_t)__p1, 17);
7431 return __ret;
7432 }
7433 #else
7434 __ai uint16x4_t vhsub_u16(uint16x4_t __p0, uint16x4_t __p1) {
7435 uint16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
7436 uint16x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
7437 uint16x4_t __ret;
7438 __ret = (uint16x4_t) __builtin_neon_vhsub_v((int8x8_t)__rev0, (int8x8_t)__rev1, 17);
7439 __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
7440 return __ret;
7441 }
7442 #endif
7443
7444 #ifdef __LITTLE_ENDIAN__
7445 __ai int8x8_t vhsub_s8(int8x8_t __p0, int8x8_t __p1) {
7446 int8x8_t __ret;
7447 __ret = (int8x8_t) __builtin_neon_vhsub_v((int8x8_t)__p0, (int8x8_t)__p1, 0);
7448 return __ret;
7449 }
7450 #else
7451 __ai int8x8_t vhsub_s8(int8x8_t __p0, int8x8_t __p1) {
7452 int8x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
7453 int8x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
7454 int8x8_t __ret;
7455 __ret = (int8x8_t) __builtin_neon_vhsub_v((int8x8_t)__rev0, (int8x8_t)__rev1, 0);
7456 __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
7457 return __ret;
7458 }
7459 #endif
7460
7461 #ifdef __LITTLE_ENDIAN__
7462 __ai int32x2_t vhsub_s32(int32x2_t __p0, int32x2_t __p1) {
7463 int32x2_t __ret;
7464 __ret = (int32x2_t) __builtin_neon_vhsub_v((int8x8_t)__p0, (int8x8_t)__p1, 2);
7465 return __ret;
7466 }
7467 #else
7468 __ai int32x2_t vhsub_s32(int32x2_t __p0, int32x2_t __p1) {
7469 int32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
7470 int32x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
7471 int32x2_t __ret;
7472 __ret = (int32x2_t) __builtin_neon_vhsub_v((int8x8_t)__rev0, (int8x8_t)__rev1, 2);
7473 __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
7474 return __ret;
7475 }
7476 #endif
7477
7478 #ifdef __LITTLE_ENDIAN__
7479 __ai int16x4_t vhsub_s16(int16x4_t __p0, int16x4_t __p1) {
7480 int16x4_t __ret;
7481 __ret = (int16x4_t) __builtin_neon_vhsub_v((int8x8_t)__p0, (int8x8_t)__p1, 1);
7482 return __ret;
7483 }
7484 #else
7485 __ai int16x4_t vhsub_s16(int16x4_t __p0, int16x4_t __p1) {
7486 int16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
7487 int16x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
7488 int16x4_t __ret;
7489 __ret = (int16x4_t) __builtin_neon_vhsub_v((int8x8_t)__rev0, (int8x8_t)__rev1, 1);
7490 __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
7491 return __ret;
7492 }
7493 #endif
7494
7495 #ifdef __LITTLE_ENDIAN__
7496 #define vld1_p8(__p0) __extension__ ({ \
7497 poly8x8_t __ret; \
7498 __ret = (poly8x8_t) __builtin_neon_vld1_v(__p0, 4); \
7499 __ret; \
7500 })
7501 #else
7502 #define vld1_p8(__p0) __extension__ ({ \
7503 poly8x8_t __ret; \
7504 __ret = (poly8x8_t) __builtin_neon_vld1_v(__p0, 4); \
7505 __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); \
7506 __ret; \
7507 })
7508 #endif
7509
7510 #ifdef __LITTLE_ENDIAN__
7511 #define vld1_p16(__p0) __extension__ ({ \
7512 poly16x4_t __ret; \
7513 __ret = (poly16x4_t) __builtin_neon_vld1_v(__p0, 5); \
7514 __ret; \
7515 })
7516 #else
7517 #define vld1_p16(__p0) __extension__ ({ \
7518 poly16x4_t __ret; \
7519 __ret = (poly16x4_t) __builtin_neon_vld1_v(__p0, 5); \
7520 __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \
7521 __ret; \
7522 })
7523 #endif
7524
7525 #ifdef __LITTLE_ENDIAN__
7526 #define vld1q_p8(__p0) __extension__ ({ \
7527 poly8x16_t __ret; \
7528 __ret = (poly8x16_t) __builtin_neon_vld1q_v(__p0, 36); \
7529 __ret; \
7530 })
7531 #else
7532 #define vld1q_p8(__p0) __extension__ ({ \
7533 poly8x16_t __ret; \
7534 __ret = (poly8x16_t) __builtin_neon_vld1q_v(__p0, 36); \
7535 __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
7536 __ret; \
7537 })
7538 #endif
7539
7540 #ifdef __LITTLE_ENDIAN__
7541 #define vld1q_p16(__p0) __extension__ ({ \
7542 poly16x8_t __ret; \
7543 __ret = (poly16x8_t) __builtin_neon_vld1q_v(__p0, 37); \
7544 __ret; \
7545 })
7546 #else
7547 #define vld1q_p16(__p0) __extension__ ({ \
7548 poly16x8_t __ret; \
7549 __ret = (poly16x8_t) __builtin_neon_vld1q_v(__p0, 37); \
7550 __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); \
7551 __ret; \
7552 })
7553 #endif
7554
7555 #ifdef __LITTLE_ENDIAN__
7556 #define vld1q_u8(__p0) __extension__ ({ \
7557 uint8x16_t __ret; \
7558 __ret = (uint8x16_t) __builtin_neon_vld1q_v(__p0, 48); \
7559 __ret; \
7560 })
7561 #else
7562 #define vld1q_u8(__p0) __extension__ ({ \
7563 uint8x16_t __ret; \
7564 __ret = (uint8x16_t) __builtin_neon_vld1q_v(__p0, 48); \
7565 __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
7566 __ret; \
7567 })
7568 #endif
7569
7570 #ifdef __LITTLE_ENDIAN__
7571 #define vld1q_u32(__p0) __extension__ ({ \
7572 uint32x4_t __ret; \
7573 __ret = (uint32x4_t) __builtin_neon_vld1q_v(__p0, 50); \
7574 __ret; \
7575 })
7576 #else
7577 #define vld1q_u32(__p0) __extension__ ({ \
7578 uint32x4_t __ret; \
7579 __ret = (uint32x4_t) __builtin_neon_vld1q_v(__p0, 50); \
7580 __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \
7581 __ret; \
7582 })
7583 #endif
7584
7585 #ifdef __LITTLE_ENDIAN__
7586 #define vld1q_u64(__p0) __extension__ ({ \
7587 uint64x2_t __ret; \
7588 __ret = (uint64x2_t) __builtin_neon_vld1q_v(__p0, 51); \
7589 __ret; \
7590 })
7591 #else
7592 #define vld1q_u64(__p0) __extension__ ({ \
7593 uint64x2_t __ret; \
7594 __ret = (uint64x2_t) __builtin_neon_vld1q_v(__p0, 51); \
7595 __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \
7596 __ret; \
7597 })
7598 #endif
7599
7600 #ifdef __LITTLE_ENDIAN__
7601 #define vld1q_u16(__p0) __extension__ ({ \
7602 uint16x8_t __ret; \
7603 __ret = (uint16x8_t) __builtin_neon_vld1q_v(__p0, 49); \
7604 __ret; \
7605 })
7606 #else
7607 #define vld1q_u16(__p0) __extension__ ({ \
7608 uint16x8_t __ret; \
7609 __ret = (uint16x8_t) __builtin_neon_vld1q_v(__p0, 49); \
7610 __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); \
7611 __ret; \
7612 })
7613 #endif
7614
7615 #ifdef __LITTLE_ENDIAN__
7616 #define vld1q_s8(__p0) __extension__ ({ \
7617 int8x16_t __ret; \
7618 __ret = (int8x16_t) __builtin_neon_vld1q_v(__p0, 32); \
7619 __ret; \
7620 })
7621 #else
7622 #define vld1q_s8(__p0) __extension__ ({ \
7623 int8x16_t __ret; \
7624 __ret = (int8x16_t) __builtin_neon_vld1q_v(__p0, 32); \
7625 __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
7626 __ret; \
7627 })
7628 #endif
7629
7630 #ifdef __LITTLE_ENDIAN__
7631 #define vld1q_f32(__p0) __extension__ ({ \
7632 float32x4_t __ret; \
7633 __ret = (float32x4_t) __builtin_neon_vld1q_v(__p0, 41); \
7634 __ret; \
7635 })
7636 #else
7637 #define vld1q_f32(__p0) __extension__ ({ \
7638 float32x4_t __ret; \
7639 __ret = (float32x4_t) __builtin_neon_vld1q_v(__p0, 41); \
7640 __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \
7641 __ret; \
7642 })
7643 #endif
7644
7645 #ifdef __LITTLE_ENDIAN__
7646 #define vld1q_f16(__p0) __extension__ ({ \
7647 float16x8_t __ret; \
7648 __ret = (float16x8_t) __builtin_neon_vld1q_v(__p0, 40); \
7649 __ret; \
7650 })
7651 #else
7652 #define vld1q_f16(__p0) __extension__ ({ \
7653 float16x8_t __ret; \
7654 __ret = (float16x8_t) __builtin_neon_vld1q_v(__p0, 40); \
7655 __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); \
7656 __ret; \
7657 })
7658 #endif
7659
7660 #ifdef __LITTLE_ENDIAN__
7661 #define vld1q_s32(__p0) __extension__ ({ \
7662 int32x4_t __ret; \
7663 __ret = (int32x4_t) __builtin_neon_vld1q_v(__p0, 34); \
7664 __ret; \
7665 })
7666 #else
7667 #define vld1q_s32(__p0) __extension__ ({ \
7668 int32x4_t __ret; \
7669 __ret = (int32x4_t) __builtin_neon_vld1q_v(__p0, 34); \
7670 __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \
7671 __ret; \
7672 })
7673 #endif
7674
7675 #ifdef __LITTLE_ENDIAN__
7676 #define vld1q_s64(__p0) __extension__ ({ \
7677 int64x2_t __ret; \
7678 __ret = (int64x2_t) __builtin_neon_vld1q_v(__p0, 35); \
7679 __ret; \
7680 })
7681 #else
7682 #define vld1q_s64(__p0) __extension__ ({ \
7683 int64x2_t __ret; \
7684 __ret = (int64x2_t) __builtin_neon_vld1q_v(__p0, 35); \
7685 __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \
7686 __ret; \
7687 })
7688 #endif
7689
7690 #ifdef __LITTLE_ENDIAN__
7691 #define vld1q_s16(__p0) __extension__ ({ \
7692 int16x8_t __ret; \
7693 __ret = (int16x8_t) __builtin_neon_vld1q_v(__p0, 33); \
7694 __ret; \
7695 })
7696 #else
7697 #define vld1q_s16(__p0) __extension__ ({ \
7698 int16x8_t __ret; \
7699 __ret = (int16x8_t) __builtin_neon_vld1q_v(__p0, 33); \
7700 __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); \
7701 __ret; \
7702 })
7703 #endif
7704
7705 #ifdef __LITTLE_ENDIAN__
7706 #define vld1_u8(__p0) __extension__ ({ \
7707 uint8x8_t __ret; \
7708 __ret = (uint8x8_t) __builtin_neon_vld1_v(__p0, 16); \
7709 __ret; \
7710 })
7711 #else
7712 #define vld1_u8(__p0) __extension__ ({ \
7713 uint8x8_t __ret; \
7714 __ret = (uint8x8_t) __builtin_neon_vld1_v(__p0, 16); \
7715 __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); \
7716 __ret; \
7717 })
7718 #endif
7719
7720 #ifdef __LITTLE_ENDIAN__
7721 #define vld1_u32(__p0) __extension__ ({ \
7722 uint32x2_t __ret; \
7723 __ret = (uint32x2_t) __builtin_neon_vld1_v(__p0, 18); \
7724 __ret; \
7725 })
7726 #else
7727 #define vld1_u32(__p0) __extension__ ({ \
7728 uint32x2_t __ret; \
7729 __ret = (uint32x2_t) __builtin_neon_vld1_v(__p0, 18); \
7730 __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \
7731 __ret; \
7732 })
7733 #endif
7734
7735 #ifdef __LITTLE_ENDIAN__
7736 #define vld1_u64(__p0) __extension__ ({ \
7737 uint64x1_t __ret; \
7738 __ret = (uint64x1_t) __builtin_neon_vld1_v(__p0, 19); \
7739 __ret; \
7740 })
7741 #else
7742 #define vld1_u64(__p0) __extension__ ({ \
7743 uint64x1_t __ret; \
7744 __ret = (uint64x1_t) __builtin_neon_vld1_v(__p0, 19); \
7745 __ret; \
7746 })
7747 #endif
7748
7749 #ifdef __LITTLE_ENDIAN__
7750 #define vld1_u16(__p0) __extension__ ({ \
7751 uint16x4_t __ret; \
7752 __ret = (uint16x4_t) __builtin_neon_vld1_v(__p0, 17); \
7753 __ret; \
7754 })
7755 #else
7756 #define vld1_u16(__p0) __extension__ ({ \
7757 uint16x4_t __ret; \
7758 __ret = (uint16x4_t) __builtin_neon_vld1_v(__p0, 17); \
7759 __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \
7760 __ret; \
7761 })
7762 #endif
7763
7764 #ifdef __LITTLE_ENDIAN__
7765 #define vld1_s8(__p0) __extension__ ({ \
7766 int8x8_t __ret; \
7767 __ret = (int8x8_t) __builtin_neon_vld1_v(__p0, 0); \
7768 __ret; \
7769 })
7770 #else
7771 #define vld1_s8(__p0) __extension__ ({ \
7772 int8x8_t __ret; \
7773 __ret = (int8x8_t) __builtin_neon_vld1_v(__p0, 0); \
7774 __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); \
7775 __ret; \
7776 })
7777 #endif
7778
7779 #ifdef __LITTLE_ENDIAN__
7780 #define vld1_f32(__p0) __extension__ ({ \
7781 float32x2_t __ret; \
7782 __ret = (float32x2_t) __builtin_neon_vld1_v(__p0, 9); \
7783 __ret; \
7784 })
7785 #else
7786 #define vld1_f32(__p0) __extension__ ({ \
7787 float32x2_t __ret; \
7788 __ret = (float32x2_t) __builtin_neon_vld1_v(__p0, 9); \
7789 __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \
7790 __ret; \
7791 })
7792 #endif
7793
7794 #ifdef __LITTLE_ENDIAN__
7795 #define vld1_f16(__p0) __extension__ ({ \
7796 float16x4_t __ret; \
7797 __ret = (float16x4_t) __builtin_neon_vld1_v(__p0, 8); \
7798 __ret; \
7799 })
7800 #else
7801 #define vld1_f16(__p0) __extension__ ({ \
7802 float16x4_t __ret; \
7803 __ret = (float16x4_t) __builtin_neon_vld1_v(__p0, 8); \
7804 __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \
7805 __ret; \
7806 })
7807 #endif
7808
7809 #ifdef __LITTLE_ENDIAN__
7810 #define vld1_s32(__p0) __extension__ ({ \
7811 int32x2_t __ret; \
7812 __ret = (int32x2_t) __builtin_neon_vld1_v(__p0, 2); \
7813 __ret; \
7814 })
7815 #else
7816 #define vld1_s32(__p0) __extension__ ({ \
7817 int32x2_t __ret; \
7818 __ret = (int32x2_t) __builtin_neon_vld1_v(__p0, 2); \
7819 __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \
7820 __ret; \
7821 })
7822 #endif
7823
7824 #ifdef __LITTLE_ENDIAN__
7825 #define vld1_s64(__p0) __extension__ ({ \
7826 int64x1_t __ret; \
7827 __ret = (int64x1_t) __builtin_neon_vld1_v(__p0, 3); \
7828 __ret; \
7829 })
7830 #else
7831 #define vld1_s64(__p0) __extension__ ({ \
7832 int64x1_t __ret; \
7833 __ret = (int64x1_t) __builtin_neon_vld1_v(__p0, 3); \
7834 __ret; \
7835 })
7836 #endif
7837
7838 #ifdef __LITTLE_ENDIAN__
7839 #define vld1_s16(__p0) __extension__ ({ \
7840 int16x4_t __ret; \
7841 __ret = (int16x4_t) __builtin_neon_vld1_v(__p0, 1); \
7842 __ret; \
7843 })
7844 #else
7845 #define vld1_s16(__p0) __extension__ ({ \
7846 int16x4_t __ret; \
7847 __ret = (int16x4_t) __builtin_neon_vld1_v(__p0, 1); \
7848 __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \
7849 __ret; \
7850 })
7851 #endif
7852
7853 #ifdef __LITTLE_ENDIAN__
7854 #define vld1_dup_p8(__p0) __extension__ ({ \
7855 poly8x8_t __ret; \
7856 __ret = (poly8x8_t) __builtin_neon_vld1_dup_v(__p0, 4); \
7857 __ret; \
7858 })
7859 #else
7860 #define vld1_dup_p8(__p0) __extension__ ({ \
7861 poly8x8_t __ret; \
7862 __ret = (poly8x8_t) __builtin_neon_vld1_dup_v(__p0, 4); \
7863 __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); \
7864 __ret; \
7865 })
7866 #endif
7867
7868 #ifdef __LITTLE_ENDIAN__
7869 #define vld1_dup_p16(__p0) __extension__ ({ \
7870 poly16x4_t __ret; \
7871 __ret = (poly16x4_t) __builtin_neon_vld1_dup_v(__p0, 5); \
7872 __ret; \
7873 })
7874 #else
7875 #define vld1_dup_p16(__p0) __extension__ ({ \
7876 poly16x4_t __ret; \
7877 __ret = (poly16x4_t) __builtin_neon_vld1_dup_v(__p0, 5); \
7878 __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \
7879 __ret; \
7880 })
7881 #endif
7882
7883 #ifdef __LITTLE_ENDIAN__
7884 #define vld1q_dup_p8(__p0) __extension__ ({ \
7885 poly8x16_t __ret; \
7886 __ret = (poly8x16_t) __builtin_neon_vld1q_dup_v(__p0, 36); \
7887 __ret; \
7888 })
7889 #else
7890 #define vld1q_dup_p8(__p0) __extension__ ({ \
7891 poly8x16_t __ret; \
7892 __ret = (poly8x16_t) __builtin_neon_vld1q_dup_v(__p0, 36); \
7893 __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
7894 __ret; \
7895 })
7896 #endif
7897
7898 #ifdef __LITTLE_ENDIAN__
7899 #define vld1q_dup_p16(__p0) __extension__ ({ \
7900 poly16x8_t __ret; \
7901 __ret = (poly16x8_t) __builtin_neon_vld1q_dup_v(__p0, 37); \
7902 __ret; \
7903 })
7904 #else
7905 #define vld1q_dup_p16(__p0) __extension__ ({ \
7906 poly16x8_t __ret; \
7907 __ret = (poly16x8_t) __builtin_neon_vld1q_dup_v(__p0, 37); \
7908 __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); \
7909 __ret; \
7910 })
7911 #endif
7912
7913 #ifdef __LITTLE_ENDIAN__
7914 #define vld1q_dup_u8(__p0) __extension__ ({ \
7915 uint8x16_t __ret; \
7916 __ret = (uint8x16_t) __builtin_neon_vld1q_dup_v(__p0, 48); \
7917 __ret; \
7918 })
7919 #else
7920 #define vld1q_dup_u8(__p0) __extension__ ({ \
7921 uint8x16_t __ret; \
7922 __ret = (uint8x16_t) __builtin_neon_vld1q_dup_v(__p0, 48); \
7923 __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
7924 __ret; \
7925 })
7926 #endif
7927
7928 #ifdef __LITTLE_ENDIAN__
7929 #define vld1q_dup_u32(__p0) __extension__ ({ \
7930 uint32x4_t __ret; \
7931 __ret = (uint32x4_t) __builtin_neon_vld1q_dup_v(__p0, 50); \
7932 __ret; \
7933 })
7934 #else
7935 #define vld1q_dup_u32(__p0) __extension__ ({ \
7936 uint32x4_t __ret; \
7937 __ret = (uint32x4_t) __builtin_neon_vld1q_dup_v(__p0, 50); \
7938 __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \
7939 __ret; \
7940 })
7941 #endif
7942
7943 #ifdef __LITTLE_ENDIAN__
7944 #define vld1q_dup_u64(__p0) __extension__ ({ \
7945 uint64x2_t __ret; \
7946 __ret = (uint64x2_t) __builtin_neon_vld1q_dup_v(__p0, 51); \
7947 __ret; \
7948 })
7949 #else
7950 #define vld1q_dup_u64(__p0) __extension__ ({ \
7951 uint64x2_t __ret; \
7952 __ret = (uint64x2_t) __builtin_neon_vld1q_dup_v(__p0, 51); \
7953 __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \
7954 __ret; \
7955 })
7956 #endif
7957
7958 #ifdef __LITTLE_ENDIAN__
7959 #define vld1q_dup_u16(__p0) __extension__ ({ \
7960 uint16x8_t __ret; \
7961 __ret = (uint16x8_t) __builtin_neon_vld1q_dup_v(__p0, 49); \
7962 __ret; \
7963 })
7964 #else
7965 #define vld1q_dup_u16(__p0) __extension__ ({ \
7966 uint16x8_t __ret; \
7967 __ret = (uint16x8_t) __builtin_neon_vld1q_dup_v(__p0, 49); \
7968 __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); \
7969 __ret; \
7970 })
7971 #endif
7972
7973 #ifdef __LITTLE_ENDIAN__
7974 #define vld1q_dup_s8(__p0) __extension__ ({ \
7975 int8x16_t __ret; \
7976 __ret = (int8x16_t) __builtin_neon_vld1q_dup_v(__p0, 32); \
7977 __ret; \
7978 })
7979 #else
7980 #define vld1q_dup_s8(__p0) __extension__ ({ \
7981 int8x16_t __ret; \
7982 __ret = (int8x16_t) __builtin_neon_vld1q_dup_v(__p0, 32); \
7983 __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
7984 __ret; \
7985 })
7986 #endif
7987
7988 #ifdef __LITTLE_ENDIAN__
7989 #define vld1q_dup_f32(__p0) __extension__ ({ \
7990 float32x4_t __ret; \
7991 __ret = (float32x4_t) __builtin_neon_vld1q_dup_v(__p0, 41); \
7992 __ret; \
7993 })
7994 #else
7995 #define vld1q_dup_f32(__p0) __extension__ ({ \
7996 float32x4_t __ret; \
7997 __ret = (float32x4_t) __builtin_neon_vld1q_dup_v(__p0, 41); \
7998 __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \
7999 __ret; \
8000 })
8001 #endif
8002
8003 #ifdef __LITTLE_ENDIAN__
8004 #define vld1q_dup_f16(__p0) __extension__ ({ \
8005 float16x8_t __ret; \
8006 __ret = (float16x8_t) __builtin_neon_vld1q_dup_v(__p0, 40); \
8007 __ret; \
8008 })
8009 #else
8010 #define vld1q_dup_f16(__p0) __extension__ ({ \
8011 float16x8_t __ret; \
8012 __ret = (float16x8_t) __builtin_neon_vld1q_dup_v(__p0, 40); \
8013 __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); \
8014 __ret; \
8015 })
8016 #endif
8017
8018 #ifdef __LITTLE_ENDIAN__
8019 #define vld1q_dup_s32(__p0) __extension__ ({ \
8020 int32x4_t __ret; \
8021 __ret = (int32x4_t) __builtin_neon_vld1q_dup_v(__p0, 34); \
8022 __ret; \
8023 })
8024 #else
8025 #define vld1q_dup_s32(__p0) __extension__ ({ \
8026 int32x4_t __ret; \
8027 __ret = (int32x4_t) __builtin_neon_vld1q_dup_v(__p0, 34); \
8028 __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \
8029 __ret; \
8030 })
8031 #endif
8032
8033 #ifdef __LITTLE_ENDIAN__
8034 #define vld1q_dup_s64(__p0) __extension__ ({ \
8035 int64x2_t __ret; \
8036 __ret = (int64x2_t) __builtin_neon_vld1q_dup_v(__p0, 35); \
8037 __ret; \
8038 })
8039 #else
8040 #define vld1q_dup_s64(__p0) __extension__ ({ \
8041 int64x2_t __ret; \
8042 __ret = (int64x2_t) __builtin_neon_vld1q_dup_v(__p0, 35); \
8043 __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \
8044 __ret; \
8045 })
8046 #endif
8047
8048 #ifdef __LITTLE_ENDIAN__
8049 #define vld1q_dup_s16(__p0) __extension__ ({ \
8050 int16x8_t __ret; \
8051 __ret = (int16x8_t) __builtin_neon_vld1q_dup_v(__p0, 33); \
8052 __ret; \
8053 })
8054 #else
8055 #define vld1q_dup_s16(__p0) __extension__ ({ \
8056 int16x8_t __ret; \
8057 __ret = (int16x8_t) __builtin_neon_vld1q_dup_v(__p0, 33); \
8058 __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); \
8059 __ret; \
8060 })
8061 #endif
8062
8063 #ifdef __LITTLE_ENDIAN__
8064 #define vld1_dup_u8(__p0) __extension__ ({ \
8065 uint8x8_t __ret; \
8066 __ret = (uint8x8_t) __builtin_neon_vld1_dup_v(__p0, 16); \
8067 __ret; \
8068 })
8069 #else
8070 #define vld1_dup_u8(__p0) __extension__ ({ \
8071 uint8x8_t __ret; \
8072 __ret = (uint8x8_t) __builtin_neon_vld1_dup_v(__p0, 16); \
8073 __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); \
8074 __ret; \
8075 })
8076 #endif
8077
8078 #ifdef __LITTLE_ENDIAN__
8079 #define vld1_dup_u32(__p0) __extension__ ({ \
8080 uint32x2_t __ret; \
8081 __ret = (uint32x2_t) __builtin_neon_vld1_dup_v(__p0, 18); \
8082 __ret; \
8083 })
8084 #else
8085 #define vld1_dup_u32(__p0) __extension__ ({ \
8086 uint32x2_t __ret; \
8087 __ret = (uint32x2_t) __builtin_neon_vld1_dup_v(__p0, 18); \
8088 __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \
8089 __ret; \
8090 })
8091 #endif
8092
8093 #ifdef __LITTLE_ENDIAN__
8094 #define vld1_dup_u64(__p0) __extension__ ({ \
8095 uint64x1_t __ret; \
8096 __ret = (uint64x1_t) __builtin_neon_vld1_dup_v(__p0, 19); \
8097 __ret; \
8098 })
8099 #else
8100 #define vld1_dup_u64(__p0) __extension__ ({ \
8101 uint64x1_t __ret; \
8102 __ret = (uint64x1_t) __builtin_neon_vld1_dup_v(__p0, 19); \
8103 __ret; \
8104 })
8105 #endif
8106
8107 #ifdef __LITTLE_ENDIAN__
8108 #define vld1_dup_u16(__p0) __extension__ ({ \
8109 uint16x4_t __ret; \
8110 __ret = (uint16x4_t) __builtin_neon_vld1_dup_v(__p0, 17); \
8111 __ret; \
8112 })
8113 #else
8114 #define vld1_dup_u16(__p0) __extension__ ({ \
8115 uint16x4_t __ret; \
8116 __ret = (uint16x4_t) __builtin_neon_vld1_dup_v(__p0, 17); \
8117 __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \
8118 __ret; \
8119 })
8120 #endif
8121
8122 #ifdef __LITTLE_ENDIAN__
8123 #define vld1_dup_s8(__p0) __extension__ ({ \
8124 int8x8_t __ret; \
8125 __ret = (int8x8_t) __builtin_neon_vld1_dup_v(__p0, 0); \
8126 __ret; \
8127 })
8128 #else
8129 #define vld1_dup_s8(__p0) __extension__ ({ \
8130 int8x8_t __ret; \
8131 __ret = (int8x8_t) __builtin_neon_vld1_dup_v(__p0, 0); \
8132 __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); \
8133 __ret; \
8134 })
8135 #endif
8136
8137 #ifdef __LITTLE_ENDIAN__
8138 #define vld1_dup_f32(__p0) __extension__ ({ \
8139 float32x2_t __ret; \
8140 __ret = (float32x2_t) __builtin_neon_vld1_dup_v(__p0, 9); \
8141 __ret; \
8142 })
8143 #else
8144 #define vld1_dup_f32(__p0) __extension__ ({ \
8145 float32x2_t __ret; \
8146 __ret = (float32x2_t) __builtin_neon_vld1_dup_v(__p0, 9); \
8147 __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \
8148 __ret; \
8149 })
8150 #endif
8151
8152 #ifdef __LITTLE_ENDIAN__
8153 #define vld1_dup_f16(__p0) __extension__ ({ \
8154 float16x4_t __ret; \
8155 __ret = (float16x4_t) __builtin_neon_vld1_dup_v(__p0, 8); \
8156 __ret; \
8157 })
8158 #else
8159 #define vld1_dup_f16(__p0) __extension__ ({ \
8160 float16x4_t __ret; \
8161 __ret = (float16x4_t) __builtin_neon_vld1_dup_v(__p0, 8); \
8162 __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \
8163 __ret; \
8164 })
8165 #endif
8166
8167 #ifdef __LITTLE_ENDIAN__
8168 #define vld1_dup_s32(__p0) __extension__ ({ \
8169 int32x2_t __ret; \
8170 __ret = (int32x2_t) __builtin_neon_vld1_dup_v(__p0, 2); \
8171 __ret; \
8172 })
8173 #else
8174 #define vld1_dup_s32(__p0) __extension__ ({ \
8175 int32x2_t __ret; \
8176 __ret = (int32x2_t) __builtin_neon_vld1_dup_v(__p0, 2); \
8177 __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \
8178 __ret; \
8179 })
8180 #endif
8181
8182 #ifdef __LITTLE_ENDIAN__
8183 #define vld1_dup_s64(__p0) __extension__ ({ \
8184 int64x1_t __ret; \
8185 __ret = (int64x1_t) __builtin_neon_vld1_dup_v(__p0, 3); \
8186 __ret; \
8187 })
8188 #else
8189 #define vld1_dup_s64(__p0) __extension__ ({ \
8190 int64x1_t __ret; \
8191 __ret = (int64x1_t) __builtin_neon_vld1_dup_v(__p0, 3); \
8192 __ret; \
8193 })
8194 #endif
8195
8196 #ifdef __LITTLE_ENDIAN__
8197 #define vld1_dup_s16(__p0) __extension__ ({ \
8198 int16x4_t __ret; \
8199 __ret = (int16x4_t) __builtin_neon_vld1_dup_v(__p0, 1); \
8200 __ret; \
8201 })
8202 #else
8203 #define vld1_dup_s16(__p0) __extension__ ({ \
8204 int16x4_t __ret; \
8205 __ret = (int16x4_t) __builtin_neon_vld1_dup_v(__p0, 1); \
8206 __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \
8207 __ret; \
8208 })
8209 #endif
8210
8211 #ifdef __LITTLE_ENDIAN__
8212 #define vld1_lane_p8(__p0, __p1, __p2) __extension__ ({ \
8213 poly8x8_t __s1 = __p1; \
8214 poly8x8_t __ret; \
8215 __ret = (poly8x8_t) __builtin_neon_vld1_lane_v(__p0, (int8x8_t)__s1, __p2, 4); \
8216 __ret; \
8217 })
8218 #else
8219 #define vld1_lane_p8(__p0, __p1, __p2) __extension__ ({ \
8220 poly8x8_t __s1 = __p1; \
8221 poly8x8_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 7, 6, 5, 4, 3, 2, 1, 0); \
8222 poly8x8_t __ret; \
8223 __ret = (poly8x8_t) __builtin_neon_vld1_lane_v(__p0, (int8x8_t)__rev1, __p2, 4); \
8224 __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); \
8225 __ret; \
8226 })
8227 #endif
8228
8229 #ifdef __LITTLE_ENDIAN__
8230 #define vld1_lane_p16(__p0, __p1, __p2) __extension__ ({ \
8231 poly16x4_t __s1 = __p1; \
8232 poly16x4_t __ret; \
8233 __ret = (poly16x4_t) __builtin_neon_vld1_lane_v(__p0, (int8x8_t)__s1, __p2, 5); \
8234 __ret; \
8235 })
8236 #else
8237 #define vld1_lane_p16(__p0, __p1, __p2) __extension__ ({ \
8238 poly16x4_t __s1 = __p1; \
8239 poly16x4_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 3, 2, 1, 0); \
8240 poly16x4_t __ret; \
8241 __ret = (poly16x4_t) __builtin_neon_vld1_lane_v(__p0, (int8x8_t)__rev1, __p2, 5); \
8242 __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \
8243 __ret; \
8244 })
8245 #endif
8246
8247 #ifdef __LITTLE_ENDIAN__
8248 #define vld1q_lane_p8(__p0, __p1, __p2) __extension__ ({ \
8249 poly8x16_t __s1 = __p1; \
8250 poly8x16_t __ret; \
8251 __ret = (poly8x16_t) __builtin_neon_vld1q_lane_v(__p0, (int8x16_t)__s1, __p2, 36); \
8252 __ret; \
8253 })
8254 #else
8255 #define vld1q_lane_p8(__p0, __p1, __p2) __extension__ ({ \
8256 poly8x16_t __s1 = __p1; \
8257 poly8x16_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
8258 poly8x16_t __ret; \
8259 __ret = (poly8x16_t) __builtin_neon_vld1q_lane_v(__p0, (int8x16_t)__rev1, __p2, 36); \
8260 __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
8261 __ret; \
8262 })
8263 #endif
8264
8265 #ifdef __LITTLE_ENDIAN__
8266 #define vld1q_lane_p16(__p0, __p1, __p2) __extension__ ({ \
8267 poly16x8_t __s1 = __p1; \
8268 poly16x8_t __ret; \
8269 __ret = (poly16x8_t) __builtin_neon_vld1q_lane_v(__p0, (int8x16_t)__s1, __p2, 37); \
8270 __ret; \
8271 })
8272 #else
8273 #define vld1q_lane_p16(__p0, __p1, __p2) __extension__ ({ \
8274 poly16x8_t __s1 = __p1; \
8275 poly16x8_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 7, 6, 5, 4, 3, 2, 1, 0); \
8276 poly16x8_t __ret; \
8277 __ret = (poly16x8_t) __builtin_neon_vld1q_lane_v(__p0, (int8x16_t)__rev1, __p2, 37); \
8278 __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); \
8279 __ret; \
8280 })
8281 #endif
8282
8283 #ifdef __LITTLE_ENDIAN__
8284 #define vld1q_lane_u8(__p0, __p1, __p2) __extension__ ({ \
8285 uint8x16_t __s1 = __p1; \
8286 uint8x16_t __ret; \
8287 __ret = (uint8x16_t) __builtin_neon_vld1q_lane_v(__p0, (int8x16_t)__s1, __p2, 48); \
8288 __ret; \
8289 })
8290 #else
8291 #define vld1q_lane_u8(__p0, __p1, __p2) __extension__ ({ \
8292 uint8x16_t __s1 = __p1; \
8293 uint8x16_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
8294 uint8x16_t __ret; \
8295 __ret = (uint8x16_t) __builtin_neon_vld1q_lane_v(__p0, (int8x16_t)__rev1, __p2, 48); \
8296 __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
8297 __ret; \
8298 })
8299 #endif
8300
8301 #ifdef __LITTLE_ENDIAN__
8302 #define vld1q_lane_u32(__p0, __p1, __p2) __extension__ ({ \
8303 uint32x4_t __s1 = __p1; \
8304 uint32x4_t __ret; \
8305 __ret = (uint32x4_t) __builtin_neon_vld1q_lane_v(__p0, (int8x16_t)__s1, __p2, 50); \
8306 __ret; \
8307 })
8308 #else
8309 #define vld1q_lane_u32(__p0, __p1, __p2) __extension__ ({ \
8310 uint32x4_t __s1 = __p1; \
8311 uint32x4_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 3, 2, 1, 0); \
8312 uint32x4_t __ret; \
8313 __ret = (uint32x4_t) __builtin_neon_vld1q_lane_v(__p0, (int8x16_t)__rev1, __p2, 50); \
8314 __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \
8315 __ret; \
8316 })
8317 #endif
8318
8319 #ifdef __LITTLE_ENDIAN__
8320 #define vld1q_lane_u64(__p0, __p1, __p2) __extension__ ({ \
8321 uint64x2_t __s1 = __p1; \
8322 uint64x2_t __ret; \
8323 __ret = (uint64x2_t) __builtin_neon_vld1q_lane_v(__p0, (int8x16_t)__s1, __p2, 51); \
8324 __ret; \
8325 })
8326 #else
8327 #define vld1q_lane_u64(__p0, __p1, __p2) __extension__ ({ \
8328 uint64x2_t __s1 = __p1; \
8329 uint64x2_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 1, 0); \
8330 uint64x2_t __ret; \
8331 __ret = (uint64x2_t) __builtin_neon_vld1q_lane_v(__p0, (int8x16_t)__rev1, __p2, 51); \
8332 __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \
8333 __ret; \
8334 })
8335 #endif
8336
8337 #ifdef __LITTLE_ENDIAN__
8338 #define vld1q_lane_u16(__p0, __p1, __p2) __extension__ ({ \
8339 uint16x8_t __s1 = __p1; \
8340 uint16x8_t __ret; \
8341 __ret = (uint16x8_t) __builtin_neon_vld1q_lane_v(__p0, (int8x16_t)__s1, __p2, 49); \
8342 __ret; \
8343 })
8344 #else
8345 #define vld1q_lane_u16(__p0, __p1, __p2) __extension__ ({ \
8346 uint16x8_t __s1 = __p1; \
8347 uint16x8_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 7, 6, 5, 4, 3, 2, 1, 0); \
8348 uint16x8_t __ret; \
8349 __ret = (uint16x8_t) __builtin_neon_vld1q_lane_v(__p0, (int8x16_t)__rev1, __p2, 49); \
8350 __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); \
8351 __ret; \
8352 })
8353 #endif
8354
8355 #ifdef __LITTLE_ENDIAN__
8356 #define vld1q_lane_s8(__p0, __p1, __p2) __extension__ ({ \
8357 int8x16_t __s1 = __p1; \
8358 int8x16_t __ret; \
8359 __ret = (int8x16_t) __builtin_neon_vld1q_lane_v(__p0, (int8x16_t)__s1, __p2, 32); \
8360 __ret; \
8361 })
8362 #else
8363 #define vld1q_lane_s8(__p0, __p1, __p2) __extension__ ({ \
8364 int8x16_t __s1 = __p1; \
8365 int8x16_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
8366 int8x16_t __ret; \
8367 __ret = (int8x16_t) __builtin_neon_vld1q_lane_v(__p0, (int8x16_t)__rev1, __p2, 32); \
8368 __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
8369 __ret; \
8370 })
8371 #endif
8372
8373 #ifdef __LITTLE_ENDIAN__
8374 #define vld1q_lane_f32(__p0, __p1, __p2) __extension__ ({ \
8375 float32x4_t __s1 = __p1; \
8376 float32x4_t __ret; \
8377 __ret = (float32x4_t) __builtin_neon_vld1q_lane_v(__p0, (int8x16_t)__s1, __p2, 41); \
8378 __ret; \
8379 })
8380 #else
8381 #define vld1q_lane_f32(__p0, __p1, __p2) __extension__ ({ \
8382 float32x4_t __s1 = __p1; \
8383 float32x4_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 3, 2, 1, 0); \
8384 float32x4_t __ret; \
8385 __ret = (float32x4_t) __builtin_neon_vld1q_lane_v(__p0, (int8x16_t)__rev1, __p2, 41); \
8386 __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \
8387 __ret; \
8388 })
8389 #endif
8390
8391 #ifdef __LITTLE_ENDIAN__
8392 #define vld1q_lane_f16(__p0, __p1, __p2) __extension__ ({ \
8393 float16x8_t __s1 = __p1; \
8394 float16x8_t __ret; \
8395 __ret = (float16x8_t) __builtin_neon_vld1q_lane_v(__p0, (int8x16_t)__s1, __p2, 40); \
8396 __ret; \
8397 })
8398 #else
8399 #define vld1q_lane_f16(__p0, __p1, __p2) __extension__ ({ \
8400 float16x8_t __s1 = __p1; \
8401 float16x8_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 7, 6, 5, 4, 3, 2, 1, 0); \
8402 float16x8_t __ret; \
8403 __ret = (float16x8_t) __builtin_neon_vld1q_lane_v(__p0, (int8x16_t)__rev1, __p2, 40); \
8404 __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); \
8405 __ret; \
8406 })
8407 #endif
8408
8409 #ifdef __LITTLE_ENDIAN__
8410 #define vld1q_lane_s32(__p0, __p1, __p2) __extension__ ({ \
8411 int32x4_t __s1 = __p1; \
8412 int32x4_t __ret; \
8413 __ret = (int32x4_t) __builtin_neon_vld1q_lane_v(__p0, (int8x16_t)__s1, __p2, 34); \
8414 __ret; \
8415 })
8416 #else
8417 #define vld1q_lane_s32(__p0, __p1, __p2) __extension__ ({ \
8418 int32x4_t __s1 = __p1; \
8419 int32x4_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 3, 2, 1, 0); \
8420 int32x4_t __ret; \
8421 __ret = (int32x4_t) __builtin_neon_vld1q_lane_v(__p0, (int8x16_t)__rev1, __p2, 34); \
8422 __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \
8423 __ret; \
8424 })
8425 #endif
8426
8427 #ifdef __LITTLE_ENDIAN__
8428 #define vld1q_lane_s64(__p0, __p1, __p2) __extension__ ({ \
8429 int64x2_t __s1 = __p1; \
8430 int64x2_t __ret; \
8431 __ret = (int64x2_t) __builtin_neon_vld1q_lane_v(__p0, (int8x16_t)__s1, __p2, 35); \
8432 __ret; \
8433 })
8434 #else
8435 #define vld1q_lane_s64(__p0, __p1, __p2) __extension__ ({ \
8436 int64x2_t __s1 = __p1; \
8437 int64x2_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 1, 0); \
8438 int64x2_t __ret; \
8439 __ret = (int64x2_t) __builtin_neon_vld1q_lane_v(__p0, (int8x16_t)__rev1, __p2, 35); \
8440 __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \
8441 __ret; \
8442 })
8443 #endif
8444
8445 #ifdef __LITTLE_ENDIAN__
8446 #define vld1q_lane_s16(__p0, __p1, __p2) __extension__ ({ \
8447 int16x8_t __s1 = __p1; \
8448 int16x8_t __ret; \
8449 __ret = (int16x8_t) __builtin_neon_vld1q_lane_v(__p0, (int8x16_t)__s1, __p2, 33); \
8450 __ret; \
8451 })
8452 #else
8453 #define vld1q_lane_s16(__p0, __p1, __p2) __extension__ ({ \
8454 int16x8_t __s1 = __p1; \
8455 int16x8_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 7, 6, 5, 4, 3, 2, 1, 0); \
8456 int16x8_t __ret; \
8457 __ret = (int16x8_t) __builtin_neon_vld1q_lane_v(__p0, (int8x16_t)__rev1, __p2, 33); \
8458 __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); \
8459 __ret; \
8460 })
8461 #endif
8462
8463 #ifdef __LITTLE_ENDIAN__
8464 #define vld1_lane_u8(__p0, __p1, __p2) __extension__ ({ \
8465 uint8x8_t __s1 = __p1; \
8466 uint8x8_t __ret; \
8467 __ret = (uint8x8_t) __builtin_neon_vld1_lane_v(__p0, (int8x8_t)__s1, __p2, 16); \
8468 __ret; \
8469 })
8470 #else
8471 #define vld1_lane_u8(__p0, __p1, __p2) __extension__ ({ \
8472 uint8x8_t __s1 = __p1; \
8473 uint8x8_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 7, 6, 5, 4, 3, 2, 1, 0); \
8474 uint8x8_t __ret; \
8475 __ret = (uint8x8_t) __builtin_neon_vld1_lane_v(__p0, (int8x8_t)__rev1, __p2, 16); \
8476 __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); \
8477 __ret; \
8478 })
8479 #endif
8480
8481 #ifdef __LITTLE_ENDIAN__
8482 #define vld1_lane_u32(__p0, __p1, __p2) __extension__ ({ \
8483 uint32x2_t __s1 = __p1; \
8484 uint32x2_t __ret; \
8485 __ret = (uint32x2_t) __builtin_neon_vld1_lane_v(__p0, (int8x8_t)__s1, __p2, 18); \
8486 __ret; \
8487 })
8488 #else
8489 #define vld1_lane_u32(__p0, __p1, __p2) __extension__ ({ \
8490 uint32x2_t __s1 = __p1; \
8491 uint32x2_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 1, 0); \
8492 uint32x2_t __ret; \
8493 __ret = (uint32x2_t) __builtin_neon_vld1_lane_v(__p0, (int8x8_t)__rev1, __p2, 18); \
8494 __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \
8495 __ret; \
8496 })
8497 #endif
8498
8499 #ifdef __LITTLE_ENDIAN__
8500 #define vld1_lane_u64(__p0, __p1, __p2) __extension__ ({ \
8501 uint64x1_t __s1 = __p1; \
8502 uint64x1_t __ret; \
8503 __ret = (uint64x1_t) __builtin_neon_vld1_lane_v(__p0, (int8x8_t)__s1, __p2, 19); \
8504 __ret; \
8505 })
8506 #else
8507 #define vld1_lane_u64(__p0, __p1, __p2) __extension__ ({ \
8508 uint64x1_t __s1 = __p1; \
8509 uint64x1_t __ret; \
8510 __ret = (uint64x1_t) __builtin_neon_vld1_lane_v(__p0, (int8x8_t)__s1, __p2, 19); \
8511 __ret; \
8512 })
8513 #endif
8514
8515 #ifdef __LITTLE_ENDIAN__
8516 #define vld1_lane_u16(__p0, __p1, __p2) __extension__ ({ \
8517 uint16x4_t __s1 = __p1; \
8518 uint16x4_t __ret; \
8519 __ret = (uint16x4_t) __builtin_neon_vld1_lane_v(__p0, (int8x8_t)__s1, __p2, 17); \
8520 __ret; \
8521 })
8522 #else
8523 #define vld1_lane_u16(__p0, __p1, __p2) __extension__ ({ \
8524 uint16x4_t __s1 = __p1; \
8525 uint16x4_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 3, 2, 1, 0); \
8526 uint16x4_t __ret; \
8527 __ret = (uint16x4_t) __builtin_neon_vld1_lane_v(__p0, (int8x8_t)__rev1, __p2, 17); \
8528 __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \
8529 __ret; \
8530 })
8531 #endif
8532
8533 #ifdef __LITTLE_ENDIAN__
8534 #define vld1_lane_s8(__p0, __p1, __p2) __extension__ ({ \
8535 int8x8_t __s1 = __p1; \
8536 int8x8_t __ret; \
8537 __ret = (int8x8_t) __builtin_neon_vld1_lane_v(__p0, (int8x8_t)__s1, __p2, 0); \
8538 __ret; \
8539 })
8540 #else
8541 #define vld1_lane_s8(__p0, __p1, __p2) __extension__ ({ \
8542 int8x8_t __s1 = __p1; \
8543 int8x8_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 7, 6, 5, 4, 3, 2, 1, 0); \
8544 int8x8_t __ret; \
8545 __ret = (int8x8_t) __builtin_neon_vld1_lane_v(__p0, (int8x8_t)__rev1, __p2, 0); \
8546 __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); \
8547 __ret; \
8548 })
8549 #endif
8550
8551 #ifdef __LITTLE_ENDIAN__
8552 #define vld1_lane_f32(__p0, __p1, __p2) __extension__ ({ \
8553 float32x2_t __s1 = __p1; \
8554 float32x2_t __ret; \
8555 __ret = (float32x2_t) __builtin_neon_vld1_lane_v(__p0, (int8x8_t)__s1, __p2, 9); \
8556 __ret; \
8557 })
8558 #else
8559 #define vld1_lane_f32(__p0, __p1, __p2) __extension__ ({ \
8560 float32x2_t __s1 = __p1; \
8561 float32x2_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 1, 0); \
8562 float32x2_t __ret; \
8563 __ret = (float32x2_t) __builtin_neon_vld1_lane_v(__p0, (int8x8_t)__rev1, __p2, 9); \
8564 __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \
8565 __ret; \
8566 })
8567 #endif
8568
8569 #ifdef __LITTLE_ENDIAN__
8570 #define vld1_lane_f16(__p0, __p1, __p2) __extension__ ({ \
8571 float16x4_t __s1 = __p1; \
8572 float16x4_t __ret; \
8573 __ret = (float16x4_t) __builtin_neon_vld1_lane_v(__p0, (int8x8_t)__s1, __p2, 8); \
8574 __ret; \
8575 })
8576 #else
8577 #define vld1_lane_f16(__p0, __p1, __p2) __extension__ ({ \
8578 float16x4_t __s1 = __p1; \
8579 float16x4_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 3, 2, 1, 0); \
8580 float16x4_t __ret; \
8581 __ret = (float16x4_t) __builtin_neon_vld1_lane_v(__p0, (int8x8_t)__rev1, __p2, 8); \
8582 __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \
8583 __ret; \
8584 })
8585 #endif
8586
8587 #ifdef __LITTLE_ENDIAN__
8588 #define vld1_lane_s32(__p0, __p1, __p2) __extension__ ({ \
8589 int32x2_t __s1 = __p1; \
8590 int32x2_t __ret; \
8591 __ret = (int32x2_t) __builtin_neon_vld1_lane_v(__p0, (int8x8_t)__s1, __p2, 2); \
8592 __ret; \
8593 })
8594 #else
8595 #define vld1_lane_s32(__p0, __p1, __p2) __extension__ ({ \
8596 int32x2_t __s1 = __p1; \
8597 int32x2_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 1, 0); \
8598 int32x2_t __ret; \
8599 __ret = (int32x2_t) __builtin_neon_vld1_lane_v(__p0, (int8x8_t)__rev1, __p2, 2); \
8600 __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \
8601 __ret; \
8602 })
8603 #endif
8604
8605 #ifdef __LITTLE_ENDIAN__
8606 #define vld1_lane_s64(__p0, __p1, __p2) __extension__ ({ \
8607 int64x1_t __s1 = __p1; \
8608 int64x1_t __ret; \
8609 __ret = (int64x1_t) __builtin_neon_vld1_lane_v(__p0, (int8x8_t)__s1, __p2, 3); \
8610 __ret; \
8611 })
8612 #else
8613 #define vld1_lane_s64(__p0, __p1, __p2) __extension__ ({ \
8614 int64x1_t __s1 = __p1; \
8615 int64x1_t __ret; \
8616 __ret = (int64x1_t) __builtin_neon_vld1_lane_v(__p0, (int8x8_t)__s1, __p2, 3); \
8617 __ret; \
8618 })
8619 #endif
8620
8621 #ifdef __LITTLE_ENDIAN__
8622 #define vld1_lane_s16(__p0, __p1, __p2) __extension__ ({ \
8623 int16x4_t __s1 = __p1; \
8624 int16x4_t __ret; \
8625 __ret = (int16x4_t) __builtin_neon_vld1_lane_v(__p0, (int8x8_t)__s1, __p2, 1); \
8626 __ret; \
8627 })
8628 #else
8629 #define vld1_lane_s16(__p0, __p1, __p2) __extension__ ({ \
8630 int16x4_t __s1 = __p1; \
8631 int16x4_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 3, 2, 1, 0); \
8632 int16x4_t __ret; \
8633 __ret = (int16x4_t) __builtin_neon_vld1_lane_v(__p0, (int8x8_t)__rev1, __p2, 1); \
8634 __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \
8635 __ret; \
8636 })
8637 #endif
8638
8639 #ifdef __LITTLE_ENDIAN__
8640 #define vld2_p8(__p0) __extension__ ({ \
8641 poly8x8x2_t __ret; \
8642 __builtin_neon_vld2_v(&__ret, __p0, 4); \
8643 __ret; \
8644 })
8645 #else
8646 #define vld2_p8(__p0) __extension__ ({ \
8647 poly8x8x2_t __ret; \
8648 __builtin_neon_vld2_v(&__ret, __p0, 4); \
8649 \
8650 __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \
8651 __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \
8652 __ret; \
8653 })
8654 #endif
8655
8656 #ifdef __LITTLE_ENDIAN__
8657 #define vld2_p16(__p0) __extension__ ({ \
8658 poly16x4x2_t __ret; \
8659 __builtin_neon_vld2_v(&__ret, __p0, 5); \
8660 __ret; \
8661 })
8662 #else
8663 #define vld2_p16(__p0) __extension__ ({ \
8664 poly16x4x2_t __ret; \
8665 __builtin_neon_vld2_v(&__ret, __p0, 5); \
8666 \
8667 __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 3, 2, 1, 0); \
8668 __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 3, 2, 1, 0); \
8669 __ret; \
8670 })
8671 #endif
8672
8673 #ifdef __LITTLE_ENDIAN__
8674 #define vld2q_p8(__p0) __extension__ ({ \
8675 poly8x16x2_t __ret; \
8676 __builtin_neon_vld2q_v(&__ret, __p0, 36); \
8677 __ret; \
8678 })
8679 #else
8680 #define vld2q_p8(__p0) __extension__ ({ \
8681 poly8x16x2_t __ret; \
8682 __builtin_neon_vld2q_v(&__ret, __p0, 36); \
8683 \
8684 __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
8685 __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
8686 __ret; \
8687 })
8688 #endif
8689
8690 #ifdef __LITTLE_ENDIAN__
8691 #define vld2q_p16(__p0) __extension__ ({ \
8692 poly16x8x2_t __ret; \
8693 __builtin_neon_vld2q_v(&__ret, __p0, 37); \
8694 __ret; \
8695 })
8696 #else
8697 #define vld2q_p16(__p0) __extension__ ({ \
8698 poly16x8x2_t __ret; \
8699 __builtin_neon_vld2q_v(&__ret, __p0, 37); \
8700 \
8701 __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \
8702 __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \
8703 __ret; \
8704 })
8705 #endif
8706
8707 #ifdef __LITTLE_ENDIAN__
8708 #define vld2q_u8(__p0) __extension__ ({ \
8709 uint8x16x2_t __ret; \
8710 __builtin_neon_vld2q_v(&__ret, __p0, 48); \
8711 __ret; \
8712 })
8713 #else
8714 #define vld2q_u8(__p0) __extension__ ({ \
8715 uint8x16x2_t __ret; \
8716 __builtin_neon_vld2q_v(&__ret, __p0, 48); \
8717 \
8718 __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
8719 __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
8720 __ret; \
8721 })
8722 #endif
8723
8724 #ifdef __LITTLE_ENDIAN__
8725 #define vld2q_u32(__p0) __extension__ ({ \
8726 uint32x4x2_t __ret; \
8727 __builtin_neon_vld2q_v(&__ret, __p0, 50); \
8728 __ret; \
8729 })
8730 #else
8731 #define vld2q_u32(__p0) __extension__ ({ \
8732 uint32x4x2_t __ret; \
8733 __builtin_neon_vld2q_v(&__ret, __p0, 50); \
8734 \
8735 __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 3, 2, 1, 0); \
8736 __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 3, 2, 1, 0); \
8737 __ret; \
8738 })
8739 #endif
8740
8741 #ifdef __LITTLE_ENDIAN__
8742 #define vld2q_u16(__p0) __extension__ ({ \
8743 uint16x8x2_t __ret; \
8744 __builtin_neon_vld2q_v(&__ret, __p0, 49); \
8745 __ret; \
8746 })
8747 #else
8748 #define vld2q_u16(__p0) __extension__ ({ \
8749 uint16x8x2_t __ret; \
8750 __builtin_neon_vld2q_v(&__ret, __p0, 49); \
8751 \
8752 __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \
8753 __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \
8754 __ret; \
8755 })
8756 #endif
8757
8758 #ifdef __LITTLE_ENDIAN__
8759 #define vld2q_s8(__p0) __extension__ ({ \
8760 int8x16x2_t __ret; \
8761 __builtin_neon_vld2q_v(&__ret, __p0, 32); \
8762 __ret; \
8763 })
8764 #else
8765 #define vld2q_s8(__p0) __extension__ ({ \
8766 int8x16x2_t __ret; \
8767 __builtin_neon_vld2q_v(&__ret, __p0, 32); \
8768 \
8769 __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
8770 __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
8771 __ret; \
8772 })
8773 #endif
8774
8775 #ifdef __LITTLE_ENDIAN__
8776 #define vld2q_f32(__p0) __extension__ ({ \
8777 float32x4x2_t __ret; \
8778 __builtin_neon_vld2q_v(&__ret, __p0, 41); \
8779 __ret; \
8780 })
8781 #else
8782 #define vld2q_f32(__p0) __extension__ ({ \
8783 float32x4x2_t __ret; \
8784 __builtin_neon_vld2q_v(&__ret, __p0, 41); \
8785 \
8786 __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 3, 2, 1, 0); \
8787 __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 3, 2, 1, 0); \
8788 __ret; \
8789 })
8790 #endif
8791
8792 #ifdef __LITTLE_ENDIAN__
8793 #define vld2q_f16(__p0) __extension__ ({ \
8794 float16x8x2_t __ret; \
8795 __builtin_neon_vld2q_v(&__ret, __p0, 40); \
8796 __ret; \
8797 })
8798 #else
8799 #define vld2q_f16(__p0) __extension__ ({ \
8800 float16x8x2_t __ret; \
8801 __builtin_neon_vld2q_v(&__ret, __p0, 40); \
8802 \
8803 __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \
8804 __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \
8805 __ret; \
8806 })
8807 #endif
8808
8809 #ifdef __LITTLE_ENDIAN__
8810 #define vld2q_s32(__p0) __extension__ ({ \
8811 int32x4x2_t __ret; \
8812 __builtin_neon_vld2q_v(&__ret, __p0, 34); \
8813 __ret; \
8814 })
8815 #else
8816 #define vld2q_s32(__p0) __extension__ ({ \
8817 int32x4x2_t __ret; \
8818 __builtin_neon_vld2q_v(&__ret, __p0, 34); \
8819 \
8820 __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 3, 2, 1, 0); \
8821 __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 3, 2, 1, 0); \
8822 __ret; \
8823 })
8824 #endif
8825
8826 #ifdef __LITTLE_ENDIAN__
8827 #define vld2q_s16(__p0) __extension__ ({ \
8828 int16x8x2_t __ret; \
8829 __builtin_neon_vld2q_v(&__ret, __p0, 33); \
8830 __ret; \
8831 })
8832 #else
8833 #define vld2q_s16(__p0) __extension__ ({ \
8834 int16x8x2_t __ret; \
8835 __builtin_neon_vld2q_v(&__ret, __p0, 33); \
8836 \
8837 __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \
8838 __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \
8839 __ret; \
8840 })
8841 #endif
8842
8843 #ifdef __LITTLE_ENDIAN__
8844 #define vld2_u8(__p0) __extension__ ({ \
8845 uint8x8x2_t __ret; \
8846 __builtin_neon_vld2_v(&__ret, __p0, 16); \
8847 __ret; \
8848 })
8849 #else
8850 #define vld2_u8(__p0) __extension__ ({ \
8851 uint8x8x2_t __ret; \
8852 __builtin_neon_vld2_v(&__ret, __p0, 16); \
8853 \
8854 __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \
8855 __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \
8856 __ret; \
8857 })
8858 #endif
8859
8860 #ifdef __LITTLE_ENDIAN__
8861 #define vld2_u32(__p0) __extension__ ({ \
8862 uint32x2x2_t __ret; \
8863 __builtin_neon_vld2_v(&__ret, __p0, 18); \
8864 __ret; \
8865 })
8866 #else
8867 #define vld2_u32(__p0) __extension__ ({ \
8868 uint32x2x2_t __ret; \
8869 __builtin_neon_vld2_v(&__ret, __p0, 18); \
8870 \
8871 __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 1, 0); \
8872 __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 1, 0); \
8873 __ret; \
8874 })
8875 #endif
8876
8877 #ifdef __LITTLE_ENDIAN__
8878 #define vld2_u64(__p0) __extension__ ({ \
8879 uint64x1x2_t __ret; \
8880 __builtin_neon_vld2_v(&__ret, __p0, 19); \
8881 __ret; \
8882 })
8883 #else
8884 #define vld2_u64(__p0) __extension__ ({ \
8885 uint64x1x2_t __ret; \
8886 __builtin_neon_vld2_v(&__ret, __p0, 19); \
8887 __ret; \
8888 })
8889 #endif
8890
8891 #ifdef __LITTLE_ENDIAN__
8892 #define vld2_u16(__p0) __extension__ ({ \
8893 uint16x4x2_t __ret; \
8894 __builtin_neon_vld2_v(&__ret, __p0, 17); \
8895 __ret; \
8896 })
8897 #else
8898 #define vld2_u16(__p0) __extension__ ({ \
8899 uint16x4x2_t __ret; \
8900 __builtin_neon_vld2_v(&__ret, __p0, 17); \
8901 \
8902 __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 3, 2, 1, 0); \
8903 __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 3, 2, 1, 0); \
8904 __ret; \
8905 })
8906 #endif
8907
8908 #ifdef __LITTLE_ENDIAN__
8909 #define vld2_s8(__p0) __extension__ ({ \
8910 int8x8x2_t __ret; \
8911 __builtin_neon_vld2_v(&__ret, __p0, 0); \
8912 __ret; \
8913 })
8914 #else
8915 #define vld2_s8(__p0) __extension__ ({ \
8916 int8x8x2_t __ret; \
8917 __builtin_neon_vld2_v(&__ret, __p0, 0); \
8918 \
8919 __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \
8920 __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \
8921 __ret; \
8922 })
8923 #endif
8924
8925 #ifdef __LITTLE_ENDIAN__
8926 #define vld2_f32(__p0) __extension__ ({ \
8927 float32x2x2_t __ret; \
8928 __builtin_neon_vld2_v(&__ret, __p0, 9); \
8929 __ret; \
8930 })
8931 #else
8932 #define vld2_f32(__p0) __extension__ ({ \
8933 float32x2x2_t __ret; \
8934 __builtin_neon_vld2_v(&__ret, __p0, 9); \
8935 \
8936 __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 1, 0); \
8937 __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 1, 0); \
8938 __ret; \
8939 })
8940 #endif
8941
8942 #ifdef __LITTLE_ENDIAN__
8943 #define vld2_f16(__p0) __extension__ ({ \
8944 float16x4x2_t __ret; \
8945 __builtin_neon_vld2_v(&__ret, __p0, 8); \
8946 __ret; \
8947 })
8948 #else
8949 #define vld2_f16(__p0) __extension__ ({ \
8950 float16x4x2_t __ret; \
8951 __builtin_neon_vld2_v(&__ret, __p0, 8); \
8952 \
8953 __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 3, 2, 1, 0); \
8954 __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 3, 2, 1, 0); \
8955 __ret; \
8956 })
8957 #endif
8958
8959 #ifdef __LITTLE_ENDIAN__
8960 #define vld2_s32(__p0) __extension__ ({ \
8961 int32x2x2_t __ret; \
8962 __builtin_neon_vld2_v(&__ret, __p0, 2); \
8963 __ret; \
8964 })
8965 #else
8966 #define vld2_s32(__p0) __extension__ ({ \
8967 int32x2x2_t __ret; \
8968 __builtin_neon_vld2_v(&__ret, __p0, 2); \
8969 \
8970 __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 1, 0); \
8971 __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 1, 0); \
8972 __ret; \
8973 })
8974 #endif
8975
8976 #ifdef __LITTLE_ENDIAN__
8977 #define vld2_s64(__p0) __extension__ ({ \
8978 int64x1x2_t __ret; \
8979 __builtin_neon_vld2_v(&__ret, __p0, 3); \
8980 __ret; \
8981 })
8982 #else
8983 #define vld2_s64(__p0) __extension__ ({ \
8984 int64x1x2_t __ret; \
8985 __builtin_neon_vld2_v(&__ret, __p0, 3); \
8986 __ret; \
8987 })
8988 #endif
8989
8990 #ifdef __LITTLE_ENDIAN__
8991 #define vld2_s16(__p0) __extension__ ({ \
8992 int16x4x2_t __ret; \
8993 __builtin_neon_vld2_v(&__ret, __p0, 1); \
8994 __ret; \
8995 })
8996 #else
8997 #define vld2_s16(__p0) __extension__ ({ \
8998 int16x4x2_t __ret; \
8999 __builtin_neon_vld2_v(&__ret, __p0, 1); \
9000 \
9001 __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 3, 2, 1, 0); \
9002 __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 3, 2, 1, 0); \
9003 __ret; \
9004 })
9005 #endif
9006
9007 #ifdef __LITTLE_ENDIAN__
9008 #define vld2_dup_p8(__p0) __extension__ ({ \
9009 poly8x8x2_t __ret; \
9010 __builtin_neon_vld2_dup_v(&__ret, __p0, 4); \
9011 __ret; \
9012 })
9013 #else
9014 #define vld2_dup_p8(__p0) __extension__ ({ \
9015 poly8x8x2_t __ret; \
9016 __builtin_neon_vld2_dup_v(&__ret, __p0, 4); \
9017 \
9018 __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \
9019 __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \
9020 __ret; \
9021 })
9022 #endif
9023
9024 #ifdef __LITTLE_ENDIAN__
9025 #define vld2_dup_p16(__p0) __extension__ ({ \
9026 poly16x4x2_t __ret; \
9027 __builtin_neon_vld2_dup_v(&__ret, __p0, 5); \
9028 __ret; \
9029 })
9030 #else
9031 #define vld2_dup_p16(__p0) __extension__ ({ \
9032 poly16x4x2_t __ret; \
9033 __builtin_neon_vld2_dup_v(&__ret, __p0, 5); \
9034 \
9035 __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 3, 2, 1, 0); \
9036 __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 3, 2, 1, 0); \
9037 __ret; \
9038 })
9039 #endif
9040
9041 #ifdef __LITTLE_ENDIAN__
9042 #define vld2_dup_u8(__p0) __extension__ ({ \
9043 uint8x8x2_t __ret; \
9044 __builtin_neon_vld2_dup_v(&__ret, __p0, 16); \
9045 __ret; \
9046 })
9047 #else
9048 #define vld2_dup_u8(__p0) __extension__ ({ \
9049 uint8x8x2_t __ret; \
9050 __builtin_neon_vld2_dup_v(&__ret, __p0, 16); \
9051 \
9052 __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \
9053 __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \
9054 __ret; \
9055 })
9056 #endif
9057
9058 #ifdef __LITTLE_ENDIAN__
9059 #define vld2_dup_u32(__p0) __extension__ ({ \
9060 uint32x2x2_t __ret; \
9061 __builtin_neon_vld2_dup_v(&__ret, __p0, 18); \
9062 __ret; \
9063 })
9064 #else
9065 #define vld2_dup_u32(__p0) __extension__ ({ \
9066 uint32x2x2_t __ret; \
9067 __builtin_neon_vld2_dup_v(&__ret, __p0, 18); \
9068 \
9069 __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 1, 0); \
9070 __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 1, 0); \
9071 __ret; \
9072 })
9073 #endif
9074
9075 #ifdef __LITTLE_ENDIAN__
9076 #define vld2_dup_u64(__p0) __extension__ ({ \
9077 uint64x1x2_t __ret; \
9078 __builtin_neon_vld2_dup_v(&__ret, __p0, 19); \
9079 __ret; \
9080 })
9081 #else
9082 #define vld2_dup_u64(__p0) __extension__ ({ \
9083 uint64x1x2_t __ret; \
9084 __builtin_neon_vld2_dup_v(&__ret, __p0, 19); \
9085 __ret; \
9086 })
9087 #endif
9088
9089 #ifdef __LITTLE_ENDIAN__
9090 #define vld2_dup_u16(__p0) __extension__ ({ \
9091 uint16x4x2_t __ret; \
9092 __builtin_neon_vld2_dup_v(&__ret, __p0, 17); \
9093 __ret; \
9094 })
9095 #else
9096 #define vld2_dup_u16(__p0) __extension__ ({ \
9097 uint16x4x2_t __ret; \
9098 __builtin_neon_vld2_dup_v(&__ret, __p0, 17); \
9099 \
9100 __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 3, 2, 1, 0); \
9101 __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 3, 2, 1, 0); \
9102 __ret; \
9103 })
9104 #endif
9105
9106 #ifdef __LITTLE_ENDIAN__
9107 #define vld2_dup_s8(__p0) __extension__ ({ \
9108 int8x8x2_t __ret; \
9109 __builtin_neon_vld2_dup_v(&__ret, __p0, 0); \
9110 __ret; \
9111 })
9112 #else
9113 #define vld2_dup_s8(__p0) __extension__ ({ \
9114 int8x8x2_t __ret; \
9115 __builtin_neon_vld2_dup_v(&__ret, __p0, 0); \
9116 \
9117 __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \
9118 __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \
9119 __ret; \
9120 })
9121 #endif
9122
9123 #ifdef __LITTLE_ENDIAN__
9124 #define vld2_dup_f32(__p0) __extension__ ({ \
9125 float32x2x2_t __ret; \
9126 __builtin_neon_vld2_dup_v(&__ret, __p0, 9); \
9127 __ret; \
9128 })
9129 #else
9130 #define vld2_dup_f32(__p0) __extension__ ({ \
9131 float32x2x2_t __ret; \
9132 __builtin_neon_vld2_dup_v(&__ret, __p0, 9); \
9133 \
9134 __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 1, 0); \
9135 __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 1, 0); \
9136 __ret; \
9137 })
9138 #endif
9139
9140 #ifdef __LITTLE_ENDIAN__
9141 #define vld2_dup_f16(__p0) __extension__ ({ \
9142 float16x4x2_t __ret; \
9143 __builtin_neon_vld2_dup_v(&__ret, __p0, 8); \
9144 __ret; \
9145 })
9146 #else
9147 #define vld2_dup_f16(__p0) __extension__ ({ \
9148 float16x4x2_t __ret; \
9149 __builtin_neon_vld2_dup_v(&__ret, __p0, 8); \
9150 \
9151 __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 3, 2, 1, 0); \
9152 __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 3, 2, 1, 0); \
9153 __ret; \
9154 })
9155 #endif
9156
9157 #ifdef __LITTLE_ENDIAN__
9158 #define vld2_dup_s32(__p0) __extension__ ({ \
9159 int32x2x2_t __ret; \
9160 __builtin_neon_vld2_dup_v(&__ret, __p0, 2); \
9161 __ret; \
9162 })
9163 #else
9164 #define vld2_dup_s32(__p0) __extension__ ({ \
9165 int32x2x2_t __ret; \
9166 __builtin_neon_vld2_dup_v(&__ret, __p0, 2); \
9167 \
9168 __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 1, 0); \
9169 __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 1, 0); \
9170 __ret; \
9171 })
9172 #endif
9173
9174 #ifdef __LITTLE_ENDIAN__
9175 #define vld2_dup_s64(__p0) __extension__ ({ \
9176 int64x1x2_t __ret; \
9177 __builtin_neon_vld2_dup_v(&__ret, __p0, 3); \
9178 __ret; \
9179 })
9180 #else
9181 #define vld2_dup_s64(__p0) __extension__ ({ \
9182 int64x1x2_t __ret; \
9183 __builtin_neon_vld2_dup_v(&__ret, __p0, 3); \
9184 __ret; \
9185 })
9186 #endif
9187
9188 #ifdef __LITTLE_ENDIAN__
9189 #define vld2_dup_s16(__p0) __extension__ ({ \
9190 int16x4x2_t __ret; \
9191 __builtin_neon_vld2_dup_v(&__ret, __p0, 1); \
9192 __ret; \
9193 })
9194 #else
9195 #define vld2_dup_s16(__p0) __extension__ ({ \
9196 int16x4x2_t __ret; \
9197 __builtin_neon_vld2_dup_v(&__ret, __p0, 1); \
9198 \
9199 __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 3, 2, 1, 0); \
9200 __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 3, 2, 1, 0); \
9201 __ret; \
9202 })
9203 #endif
9204
9205 #ifdef __LITTLE_ENDIAN__
9206 #define vld2_lane_p8(__p0, __p1, __p2) __extension__ ({ \
9207 poly8x8x2_t __s1 = __p1; \
9208 poly8x8x2_t __ret; \
9209 __builtin_neon_vld2_lane_v(&__ret, __p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], __p2, 4); \
9210 __ret; \
9211 })
9212 #else
9213 #define vld2_lane_p8(__p0, __p1, __p2) __extension__ ({ \
9214 poly8x8x2_t __s1 = __p1; \
9215 poly8x8x2_t __rev1; \
9216 __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \
9217 __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \
9218 poly8x8x2_t __ret; \
9219 __builtin_neon_vld2_lane_v(&__ret, __p0, (int8x8_t)__rev1.val[0], (int8x8_t)__rev1.val[1], __p2, 4); \
9220 \
9221 __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \
9222 __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \
9223 __ret; \
9224 })
9225 #endif
9226
9227 #ifdef __LITTLE_ENDIAN__
9228 #define vld2_lane_p16(__p0, __p1, __p2) __extension__ ({ \
9229 poly16x4x2_t __s1 = __p1; \
9230 poly16x4x2_t __ret; \
9231 __builtin_neon_vld2_lane_v(&__ret, __p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], __p2, 5); \
9232 __ret; \
9233 })
9234 #else
9235 #define vld2_lane_p16(__p0, __p1, __p2) __extension__ ({ \
9236 poly16x4x2_t __s1 = __p1; \
9237 poly16x4x2_t __rev1; \
9238 __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 3, 2, 1, 0); \
9239 __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 3, 2, 1, 0); \
9240 poly16x4x2_t __ret; \
9241 __builtin_neon_vld2_lane_v(&__ret, __p0, (int8x8_t)__rev1.val[0], (int8x8_t)__rev1.val[1], __p2, 5); \
9242 \
9243 __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 3, 2, 1, 0); \
9244 __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 3, 2, 1, 0); \
9245 __ret; \
9246 })
9247 #endif
9248
9249 #ifdef __LITTLE_ENDIAN__
9250 #define vld2q_lane_p16(__p0, __p1, __p2) __extension__ ({ \
9251 poly16x8x2_t __s1 = __p1; \
9252 poly16x8x2_t __ret; \
9253 __builtin_neon_vld2q_lane_v(&__ret, __p0, (int8x16_t)__s1.val[0], (int8x16_t)__s1.val[1], __p2, 37); \
9254 __ret; \
9255 })
9256 #else
9257 #define vld2q_lane_p16(__p0, __p1, __p2) __extension__ ({ \
9258 poly16x8x2_t __s1 = __p1; \
9259 poly16x8x2_t __rev1; \
9260 __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \
9261 __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \
9262 poly16x8x2_t __ret; \
9263 __builtin_neon_vld2q_lane_v(&__ret, __p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], __p2, 37); \
9264 \
9265 __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \
9266 __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \
9267 __ret; \
9268 })
9269 #endif
9270
9271 #ifdef __LITTLE_ENDIAN__
9272 #define vld2q_lane_u32(__p0, __p1, __p2) __extension__ ({ \
9273 uint32x4x2_t __s1 = __p1; \
9274 uint32x4x2_t __ret; \
9275 __builtin_neon_vld2q_lane_v(&__ret, __p0, (int8x16_t)__s1.val[0], (int8x16_t)__s1.val[1], __p2, 50); \
9276 __ret; \
9277 })
9278 #else
9279 #define vld2q_lane_u32(__p0, __p1, __p2) __extension__ ({ \
9280 uint32x4x2_t __s1 = __p1; \
9281 uint32x4x2_t __rev1; \
9282 __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 3, 2, 1, 0); \
9283 __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 3, 2, 1, 0); \
9284 uint32x4x2_t __ret; \
9285 __builtin_neon_vld2q_lane_v(&__ret, __p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], __p2, 50); \
9286 \
9287 __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 3, 2, 1, 0); \
9288 __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 3, 2, 1, 0); \
9289 __ret; \
9290 })
9291 #endif
9292
9293 #ifdef __LITTLE_ENDIAN__
9294 #define vld2q_lane_u16(__p0, __p1, __p2) __extension__ ({ \
9295 uint16x8x2_t __s1 = __p1; \
9296 uint16x8x2_t __ret; \
9297 __builtin_neon_vld2q_lane_v(&__ret, __p0, (int8x16_t)__s1.val[0], (int8x16_t)__s1.val[1], __p2, 49); \
9298 __ret; \
9299 })
9300 #else
9301 #define vld2q_lane_u16(__p0, __p1, __p2) __extension__ ({ \
9302 uint16x8x2_t __s1 = __p1; \
9303 uint16x8x2_t __rev1; \
9304 __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \
9305 __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \
9306 uint16x8x2_t __ret; \
9307 __builtin_neon_vld2q_lane_v(&__ret, __p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], __p2, 49); \
9308 \
9309 __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \
9310 __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \
9311 __ret; \
9312 })
9313 #endif
9314
9315 #ifdef __LITTLE_ENDIAN__
9316 #define vld2q_lane_f32(__p0, __p1, __p2) __extension__ ({ \
9317 float32x4x2_t __s1 = __p1; \
9318 float32x4x2_t __ret; \
9319 __builtin_neon_vld2q_lane_v(&__ret, __p0, __s1.val[0], __s1.val[1], __p2, 41); \
9320 __ret; \
9321 })
9322 #else
9323 #define vld2q_lane_f32(__p0, __p1, __p2) __extension__ ({ \
9324 float32x4x2_t __s1 = __p1; \
9325 float32x4x2_t __rev1; \
9326 __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 3, 2, 1, 0); \
9327 __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 3, 2, 1, 0); \
9328 float32x4x2_t __ret; \
9329 __builtin_neon_vld2q_lane_v(&__ret, __p0, __rev1.val[0], __rev1.val[1], __p2, 41); \
9330 \
9331 __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 3, 2, 1, 0); \
9332 __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 3, 2, 1, 0); \
9333 __ret; \
9334 })
9335 #endif
9336
9337 #ifdef __LITTLE_ENDIAN__
9338 #define vld2q_lane_f16(__p0, __p1, __p2) __extension__ ({ \
9339 float16x8x2_t __s1 = __p1; \
9340 float16x8x2_t __ret; \
9341 __builtin_neon_vld2q_lane_v(&__ret, __p0, __s1.val[0], __s1.val[1], __p2, 40); \
9342 __ret; \
9343 })
9344 #else
9345 #define vld2q_lane_f16(__p0, __p1, __p2) __extension__ ({ \
9346 float16x8x2_t __s1 = __p1; \
9347 float16x8x2_t __rev1; \
9348 __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \
9349 __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \
9350 float16x8x2_t __ret; \
9351 __builtin_neon_vld2q_lane_v(&__ret, __p0, __rev1.val[0], __rev1.val[1], __p2, 40); \
9352 \
9353 __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \
9354 __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \
9355 __ret; \
9356 })
9357 #endif
9358
9359 #ifdef __LITTLE_ENDIAN__
9360 #define vld2q_lane_s32(__p0, __p1, __p2) __extension__ ({ \
9361 int32x4x2_t __s1 = __p1; \
9362 int32x4x2_t __ret; \
9363 __builtin_neon_vld2q_lane_v(&__ret, __p0, __s1.val[0], __s1.val[1], __p2, 34); \
9364 __ret; \
9365 })
9366 #else
9367 #define vld2q_lane_s32(__p0, __p1, __p2) __extension__ ({ \
9368 int32x4x2_t __s1 = __p1; \
9369 int32x4x2_t __rev1; \
9370 __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 3, 2, 1, 0); \
9371 __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 3, 2, 1, 0); \
9372 int32x4x2_t __ret; \
9373 __builtin_neon_vld2q_lane_v(&__ret, __p0, __rev1.val[0], __rev1.val[1], __p2, 34); \
9374 \
9375 __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 3, 2, 1, 0); \
9376 __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 3, 2, 1, 0); \
9377 __ret; \
9378 })
9379 #endif
9380
9381 #ifdef __LITTLE_ENDIAN__
9382 #define vld2q_lane_s16(__p0, __p1, __p2) __extension__ ({ \
9383 int16x8x2_t __s1 = __p1; \
9384 int16x8x2_t __ret; \
9385 __builtin_neon_vld2q_lane_v(&__ret, __p0, __s1.val[0], __s1.val[1], __p2, 33); \
9386 __ret; \
9387 })
9388 #else
9389 #define vld2q_lane_s16(__p0, __p1, __p2) __extension__ ({ \
9390 int16x8x2_t __s1 = __p1; \
9391 int16x8x2_t __rev1; \
9392 __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \
9393 __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \
9394 int16x8x2_t __ret; \
9395 __builtin_neon_vld2q_lane_v(&__ret, __p0, __rev1.val[0], __rev1.val[1], __p2, 33); \
9396 \
9397 __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \
9398 __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \
9399 __ret; \
9400 })
9401 #endif
9402
9403 #ifdef __LITTLE_ENDIAN__
9404 #define vld2_lane_u8(__p0, __p1, __p2) __extension__ ({ \
9405 uint8x8x2_t __s1 = __p1; \
9406 uint8x8x2_t __ret; \
9407 __builtin_neon_vld2_lane_v(&__ret, __p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], __p2, 16); \
9408 __ret; \
9409 })
9410 #else
9411 #define vld2_lane_u8(__p0, __p1, __p2) __extension__ ({ \
9412 uint8x8x2_t __s1 = __p1; \
9413 uint8x8x2_t __rev1; \
9414 __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \
9415 __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \
9416 uint8x8x2_t __ret; \
9417 __builtin_neon_vld2_lane_v(&__ret, __p0, (int8x8_t)__rev1.val[0], (int8x8_t)__rev1.val[1], __p2, 16); \
9418 \
9419 __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \
9420 __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \
9421 __ret; \
9422 })
9423 #endif
9424
9425 #ifdef __LITTLE_ENDIAN__
9426 #define vld2_lane_u32(__p0, __p1, __p2) __extension__ ({ \
9427 uint32x2x2_t __s1 = __p1; \
9428 uint32x2x2_t __ret; \
9429 __builtin_neon_vld2_lane_v(&__ret, __p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], __p2, 18); \
9430 __ret; \
9431 })
9432 #else
9433 #define vld2_lane_u32(__p0, __p1, __p2) __extension__ ({ \
9434 uint32x2x2_t __s1 = __p1; \
9435 uint32x2x2_t __rev1; \
9436 __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 1, 0); \
9437 __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 1, 0); \
9438 uint32x2x2_t __ret; \
9439 __builtin_neon_vld2_lane_v(&__ret, __p0, (int8x8_t)__rev1.val[0], (int8x8_t)__rev1.val[1], __p2, 18); \
9440 \
9441 __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 1, 0); \
9442 __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 1, 0); \
9443 __ret; \
9444 })
9445 #endif
9446
9447 #ifdef __LITTLE_ENDIAN__
9448 #define vld2_lane_u16(__p0, __p1, __p2) __extension__ ({ \
9449 uint16x4x2_t __s1 = __p1; \
9450 uint16x4x2_t __ret; \
9451 __builtin_neon_vld2_lane_v(&__ret, __p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], __p2, 17); \
9452 __ret; \
9453 })
9454 #else
9455 #define vld2_lane_u16(__p0, __p1, __p2) __extension__ ({ \
9456 uint16x4x2_t __s1 = __p1; \
9457 uint16x4x2_t __rev1; \
9458 __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 3, 2, 1, 0); \
9459 __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 3, 2, 1, 0); \
9460 uint16x4x2_t __ret; \
9461 __builtin_neon_vld2_lane_v(&__ret, __p0, (int8x8_t)__rev1.val[0], (int8x8_t)__rev1.val[1], __p2, 17); \
9462 \
9463 __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 3, 2, 1, 0); \
9464 __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 3, 2, 1, 0); \
9465 __ret; \
9466 })
9467 #endif
9468
9469 #ifdef __LITTLE_ENDIAN__
9470 #define vld2_lane_s8(__p0, __p1, __p2) __extension__ ({ \
9471 int8x8x2_t __s1 = __p1; \
9472 int8x8x2_t __ret; \
9473 __builtin_neon_vld2_lane_v(&__ret, __p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], __p2, 0); \
9474 __ret; \
9475 })
9476 #else
9477 #define vld2_lane_s8(__p0, __p1, __p2) __extension__ ({ \
9478 int8x8x2_t __s1 = __p1; \
9479 int8x8x2_t __rev1; \
9480 __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \
9481 __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \
9482 int8x8x2_t __ret; \
9483 __builtin_neon_vld2_lane_v(&__ret, __p0, (int8x8_t)__rev1.val[0], (int8x8_t)__rev1.val[1], __p2, 0); \
9484 \
9485 __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \
9486 __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \
9487 __ret; \
9488 })
9489 #endif
9490
9491 #ifdef __LITTLE_ENDIAN__
9492 #define vld2_lane_f32(__p0, __p1, __p2) __extension__ ({ \
9493 float32x2x2_t __s1 = __p1; \
9494 float32x2x2_t __ret; \
9495 __builtin_neon_vld2_lane_v(&__ret, __p0, __s1.val[0], __s1.val[1], __p2, 9); \
9496 __ret; \
9497 })
9498 #else
9499 #define vld2_lane_f32(__p0, __p1, __p2) __extension__ ({ \
9500 float32x2x2_t __s1 = __p1; \
9501 float32x2x2_t __rev1; \
9502 __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 1, 0); \
9503 __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 1, 0); \
9504 float32x2x2_t __ret; \
9505 __builtin_neon_vld2_lane_v(&__ret, __p0, __rev1.val[0], __rev1.val[1], __p2, 9); \
9506 \
9507 __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 1, 0); \
9508 __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 1, 0); \
9509 __ret; \
9510 })
9511 #endif
9512
9513 #ifdef __LITTLE_ENDIAN__
9514 #define vld2_lane_f16(__p0, __p1, __p2) __extension__ ({ \
9515 float16x4x2_t __s1 = __p1; \
9516 float16x4x2_t __ret; \
9517 __builtin_neon_vld2_lane_v(&__ret, __p0, __s1.val[0], __s1.val[1], __p2, 8); \
9518 __ret; \
9519 })
9520 #else
9521 #define vld2_lane_f16(__p0, __p1, __p2) __extension__ ({ \
9522 float16x4x2_t __s1 = __p1; \
9523 float16x4x2_t __rev1; \
9524 __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 3, 2, 1, 0); \
9525 __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 3, 2, 1, 0); \
9526 float16x4x2_t __ret; \
9527 __builtin_neon_vld2_lane_v(&__ret, __p0, __rev1.val[0], __rev1.val[1], __p2, 8); \
9528 \
9529 __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 3, 2, 1, 0); \
9530 __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 3, 2, 1, 0); \
9531 __ret; \
9532 })
9533 #endif
9534
9535 #ifdef __LITTLE_ENDIAN__
9536 #define vld2_lane_s32(__p0, __p1, __p2) __extension__ ({ \
9537 int32x2x2_t __s1 = __p1; \
9538 int32x2x2_t __ret; \
9539 __builtin_neon_vld2_lane_v(&__ret, __p0, __s1.val[0], __s1.val[1], __p2, 2); \
9540 __ret; \
9541 })
9542 #else
9543 #define vld2_lane_s32(__p0, __p1, __p2) __extension__ ({ \
9544 int32x2x2_t __s1 = __p1; \
9545 int32x2x2_t __rev1; \
9546 __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 1, 0); \
9547 __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 1, 0); \
9548 int32x2x2_t __ret; \
9549 __builtin_neon_vld2_lane_v(&__ret, __p0, __rev1.val[0], __rev1.val[1], __p2, 2); \
9550 \
9551 __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 1, 0); \
9552 __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 1, 0); \
9553 __ret; \
9554 })
9555 #endif
9556
9557 #ifdef __LITTLE_ENDIAN__
9558 #define vld2_lane_s16(__p0, __p1, __p2) __extension__ ({ \
9559 int16x4x2_t __s1 = __p1; \
9560 int16x4x2_t __ret; \
9561 __builtin_neon_vld2_lane_v(&__ret, __p0, __s1.val[0], __s1.val[1], __p2, 1); \
9562 __ret; \
9563 })
9564 #else
9565 #define vld2_lane_s16(__p0, __p1, __p2) __extension__ ({ \
9566 int16x4x2_t __s1 = __p1; \
9567 int16x4x2_t __rev1; \
9568 __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 3, 2, 1, 0); \
9569 __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 3, 2, 1, 0); \
9570 int16x4x2_t __ret; \
9571 __builtin_neon_vld2_lane_v(&__ret, __p0, __rev1.val[0], __rev1.val[1], __p2, 1); \
9572 \
9573 __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 3, 2, 1, 0); \
9574 __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 3, 2, 1, 0); \
9575 __ret; \
9576 })
9577 #endif
9578
9579 #ifdef __LITTLE_ENDIAN__
9580 #define vld3_p8(__p0) __extension__ ({ \
9581 poly8x8x3_t __ret; \
9582 __builtin_neon_vld3_v(&__ret, __p0, 4); \
9583 __ret; \
9584 })
9585 #else
9586 #define vld3_p8(__p0) __extension__ ({ \
9587 poly8x8x3_t __ret; \
9588 __builtin_neon_vld3_v(&__ret, __p0, 4); \
9589 \
9590 __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \
9591 __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \
9592 __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 7, 6, 5, 4, 3, 2, 1, 0); \
9593 __ret; \
9594 })
9595 #endif
9596
9597 #ifdef __LITTLE_ENDIAN__
9598 #define vld3_p16(__p0) __extension__ ({ \
9599 poly16x4x3_t __ret; \
9600 __builtin_neon_vld3_v(&__ret, __p0, 5); \
9601 __ret; \
9602 })
9603 #else
9604 #define vld3_p16(__p0) __extension__ ({ \
9605 poly16x4x3_t __ret; \
9606 __builtin_neon_vld3_v(&__ret, __p0, 5); \
9607 \
9608 __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 3, 2, 1, 0); \
9609 __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 3, 2, 1, 0); \
9610 __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 3, 2, 1, 0); \
9611 __ret; \
9612 })
9613 #endif
9614
9615 #ifdef __LITTLE_ENDIAN__
9616 #define vld3q_p8(__p0) __extension__ ({ \
9617 poly8x16x3_t __ret; \
9618 __builtin_neon_vld3q_v(&__ret, __p0, 36); \
9619 __ret; \
9620 })
9621 #else
9622 #define vld3q_p8(__p0) __extension__ ({ \
9623 poly8x16x3_t __ret; \
9624 __builtin_neon_vld3q_v(&__ret, __p0, 36); \
9625 \
9626 __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
9627 __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
9628 __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
9629 __ret; \
9630 })
9631 #endif
9632
9633 #ifdef __LITTLE_ENDIAN__
9634 #define vld3q_p16(__p0) __extension__ ({ \
9635 poly16x8x3_t __ret; \
9636 __builtin_neon_vld3q_v(&__ret, __p0, 37); \
9637 __ret; \
9638 })
9639 #else
9640 #define vld3q_p16(__p0) __extension__ ({ \
9641 poly16x8x3_t __ret; \
9642 __builtin_neon_vld3q_v(&__ret, __p0, 37); \
9643 \
9644 __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \
9645 __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \
9646 __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 7, 6, 5, 4, 3, 2, 1, 0); \
9647 __ret; \
9648 })
9649 #endif
9650
9651 #ifdef __LITTLE_ENDIAN__
9652 #define vld3q_u8(__p0) __extension__ ({ \
9653 uint8x16x3_t __ret; \
9654 __builtin_neon_vld3q_v(&__ret, __p0, 48); \
9655 __ret; \
9656 })
9657 #else
9658 #define vld3q_u8(__p0) __extension__ ({ \
9659 uint8x16x3_t __ret; \
9660 __builtin_neon_vld3q_v(&__ret, __p0, 48); \
9661 \
9662 __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
9663 __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
9664 __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
9665 __ret; \
9666 })
9667 #endif
9668
9669 #ifdef __LITTLE_ENDIAN__
9670 #define vld3q_u32(__p0) __extension__ ({ \
9671 uint32x4x3_t __ret; \
9672 __builtin_neon_vld3q_v(&__ret, __p0, 50); \
9673 __ret; \
9674 })
9675 #else
9676 #define vld3q_u32(__p0) __extension__ ({ \
9677 uint32x4x3_t __ret; \
9678 __builtin_neon_vld3q_v(&__ret, __p0, 50); \
9679 \
9680 __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 3, 2, 1, 0); \
9681 __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 3, 2, 1, 0); \
9682 __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 3, 2, 1, 0); \
9683 __ret; \
9684 })
9685 #endif
9686
9687 #ifdef __LITTLE_ENDIAN__
9688 #define vld3q_u16(__p0) __extension__ ({ \
9689 uint16x8x3_t __ret; \
9690 __builtin_neon_vld3q_v(&__ret, __p0, 49); \
9691 __ret; \
9692 })
9693 #else
9694 #define vld3q_u16(__p0) __extension__ ({ \
9695 uint16x8x3_t __ret; \
9696 __builtin_neon_vld3q_v(&__ret, __p0, 49); \
9697 \
9698 __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \
9699 __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \
9700 __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 7, 6, 5, 4, 3, 2, 1, 0); \
9701 __ret; \
9702 })
9703 #endif
9704
9705 #ifdef __LITTLE_ENDIAN__
9706 #define vld3q_s8(__p0) __extension__ ({ \
9707 int8x16x3_t __ret; \
9708 __builtin_neon_vld3q_v(&__ret, __p0, 32); \
9709 __ret; \
9710 })
9711 #else
9712 #define vld3q_s8(__p0) __extension__ ({ \
9713 int8x16x3_t __ret; \
9714 __builtin_neon_vld3q_v(&__ret, __p0, 32); \
9715 \
9716 __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
9717 __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
9718 __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
9719 __ret; \
9720 })
9721 #endif
9722
9723 #ifdef __LITTLE_ENDIAN__
9724 #define vld3q_f32(__p0) __extension__ ({ \
9725 float32x4x3_t __ret; \
9726 __builtin_neon_vld3q_v(&__ret, __p0, 41); \
9727 __ret; \
9728 })
9729 #else
9730 #define vld3q_f32(__p0) __extension__ ({ \
9731 float32x4x3_t __ret; \
9732 __builtin_neon_vld3q_v(&__ret, __p0, 41); \
9733 \
9734 __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 3, 2, 1, 0); \
9735 __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 3, 2, 1, 0); \
9736 __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 3, 2, 1, 0); \
9737 __ret; \
9738 })
9739 #endif
9740
9741 #ifdef __LITTLE_ENDIAN__
9742 #define vld3q_f16(__p0) __extension__ ({ \
9743 float16x8x3_t __ret; \
9744 __builtin_neon_vld3q_v(&__ret, __p0, 40); \
9745 __ret; \
9746 })
9747 #else
9748 #define vld3q_f16(__p0) __extension__ ({ \
9749 float16x8x3_t __ret; \
9750 __builtin_neon_vld3q_v(&__ret, __p0, 40); \
9751 \
9752 __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \
9753 __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \
9754 __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 7, 6, 5, 4, 3, 2, 1, 0); \
9755 __ret; \
9756 })
9757 #endif
9758
9759 #ifdef __LITTLE_ENDIAN__
9760 #define vld3q_s32(__p0) __extension__ ({ \
9761 int32x4x3_t __ret; \
9762 __builtin_neon_vld3q_v(&__ret, __p0, 34); \
9763 __ret; \
9764 })
9765 #else
9766 #define vld3q_s32(__p0) __extension__ ({ \
9767 int32x4x3_t __ret; \
9768 __builtin_neon_vld3q_v(&__ret, __p0, 34); \
9769 \
9770 __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 3, 2, 1, 0); \
9771 __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 3, 2, 1, 0); \
9772 __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 3, 2, 1, 0); \
9773 __ret; \
9774 })
9775 #endif
9776
9777 #ifdef __LITTLE_ENDIAN__
9778 #define vld3q_s16(__p0) __extension__ ({ \
9779 int16x8x3_t __ret; \
9780 __builtin_neon_vld3q_v(&__ret, __p0, 33); \
9781 __ret; \
9782 })
9783 #else
9784 #define vld3q_s16(__p0) __extension__ ({ \
9785 int16x8x3_t __ret; \
9786 __builtin_neon_vld3q_v(&__ret, __p0, 33); \
9787 \
9788 __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \
9789 __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \
9790 __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 7, 6, 5, 4, 3, 2, 1, 0); \
9791 __ret; \
9792 })
9793 #endif
9794
9795 #ifdef __LITTLE_ENDIAN__
9796 #define vld3_u8(__p0) __extension__ ({ \
9797 uint8x8x3_t __ret; \
9798 __builtin_neon_vld3_v(&__ret, __p0, 16); \
9799 __ret; \
9800 })
9801 #else
9802 #define vld3_u8(__p0) __extension__ ({ \
9803 uint8x8x3_t __ret; \
9804 __builtin_neon_vld3_v(&__ret, __p0, 16); \
9805 \
9806 __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \
9807 __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \
9808 __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 7, 6, 5, 4, 3, 2, 1, 0); \
9809 __ret; \
9810 })
9811 #endif
9812
9813 #ifdef __LITTLE_ENDIAN__
9814 #define vld3_u32(__p0) __extension__ ({ \
9815 uint32x2x3_t __ret; \
9816 __builtin_neon_vld3_v(&__ret, __p0, 18); \
9817 __ret; \
9818 })
9819 #else
9820 #define vld3_u32(__p0) __extension__ ({ \
9821 uint32x2x3_t __ret; \
9822 __builtin_neon_vld3_v(&__ret, __p0, 18); \
9823 \
9824 __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 1, 0); \
9825 __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 1, 0); \
9826 __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 1, 0); \
9827 __ret; \
9828 })
9829 #endif
9830
9831 #ifdef __LITTLE_ENDIAN__
9832 #define vld3_u64(__p0) __extension__ ({ \
9833 uint64x1x3_t __ret; \
9834 __builtin_neon_vld3_v(&__ret, __p0, 19); \
9835 __ret; \
9836 })
9837 #else
9838 #define vld3_u64(__p0) __extension__ ({ \
9839 uint64x1x3_t __ret; \
9840 __builtin_neon_vld3_v(&__ret, __p0, 19); \
9841 __ret; \
9842 })
9843 #endif
9844
9845 #ifdef __LITTLE_ENDIAN__
9846 #define vld3_u16(__p0) __extension__ ({ \
9847 uint16x4x3_t __ret; \
9848 __builtin_neon_vld3_v(&__ret, __p0, 17); \
9849 __ret; \
9850 })
9851 #else
9852 #define vld3_u16(__p0) __extension__ ({ \
9853 uint16x4x3_t __ret; \
9854 __builtin_neon_vld3_v(&__ret, __p0, 17); \
9855 \
9856 __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 3, 2, 1, 0); \
9857 __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 3, 2, 1, 0); \
9858 __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 3, 2, 1, 0); \
9859 __ret; \
9860 })
9861 #endif
9862
9863 #ifdef __LITTLE_ENDIAN__
9864 #define vld3_s8(__p0) __extension__ ({ \
9865 int8x8x3_t __ret; \
9866 __builtin_neon_vld3_v(&__ret, __p0, 0); \
9867 __ret; \
9868 })
9869 #else
9870 #define vld3_s8(__p0) __extension__ ({ \
9871 int8x8x3_t __ret; \
9872 __builtin_neon_vld3_v(&__ret, __p0, 0); \
9873 \
9874 __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \
9875 __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \
9876 __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 7, 6, 5, 4, 3, 2, 1, 0); \
9877 __ret; \
9878 })
9879 #endif
9880
9881 #ifdef __LITTLE_ENDIAN__
9882 #define vld3_f32(__p0) __extension__ ({ \
9883 float32x2x3_t __ret; \
9884 __builtin_neon_vld3_v(&__ret, __p0, 9); \
9885 __ret; \
9886 })
9887 #else
9888 #define vld3_f32(__p0) __extension__ ({ \
9889 float32x2x3_t __ret; \
9890 __builtin_neon_vld3_v(&__ret, __p0, 9); \
9891 \
9892 __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 1, 0); \
9893 __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 1, 0); \
9894 __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 1, 0); \
9895 __ret; \
9896 })
9897 #endif
9898
9899 #ifdef __LITTLE_ENDIAN__
9900 #define vld3_f16(__p0) __extension__ ({ \
9901 float16x4x3_t __ret; \
9902 __builtin_neon_vld3_v(&__ret, __p0, 8); \
9903 __ret; \
9904 })
9905 #else
9906 #define vld3_f16(__p0) __extension__ ({ \
9907 float16x4x3_t __ret; \
9908 __builtin_neon_vld3_v(&__ret, __p0, 8); \
9909 \
9910 __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 3, 2, 1, 0); \
9911 __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 3, 2, 1, 0); \
9912 __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 3, 2, 1, 0); \
9913 __ret; \
9914 })
9915 #endif
9916
9917 #ifdef __LITTLE_ENDIAN__
9918 #define vld3_s32(__p0) __extension__ ({ \
9919 int32x2x3_t __ret; \
9920 __builtin_neon_vld3_v(&__ret, __p0, 2); \
9921 __ret; \
9922 })
9923 #else
9924 #define vld3_s32(__p0) __extension__ ({ \
9925 int32x2x3_t __ret; \
9926 __builtin_neon_vld3_v(&__ret, __p0, 2); \
9927 \
9928 __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 1, 0); \
9929 __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 1, 0); \
9930 __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 1, 0); \
9931 __ret; \
9932 })
9933 #endif
9934
9935 #ifdef __LITTLE_ENDIAN__
9936 #define vld3_s64(__p0) __extension__ ({ \
9937 int64x1x3_t __ret; \
9938 __builtin_neon_vld3_v(&__ret, __p0, 3); \
9939 __ret; \
9940 })
9941 #else
9942 #define vld3_s64(__p0) __extension__ ({ \
9943 int64x1x3_t __ret; \
9944 __builtin_neon_vld3_v(&__ret, __p0, 3); \
9945 __ret; \
9946 })
9947 #endif
9948
9949 #ifdef __LITTLE_ENDIAN__
9950 #define vld3_s16(__p0) __extension__ ({ \
9951 int16x4x3_t __ret; \
9952 __builtin_neon_vld3_v(&__ret, __p0, 1); \
9953 __ret; \
9954 })
9955 #else
9956 #define vld3_s16(__p0) __extension__ ({ \
9957 int16x4x3_t __ret; \
9958 __builtin_neon_vld3_v(&__ret, __p0, 1); \
9959 \
9960 __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 3, 2, 1, 0); \
9961 __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 3, 2, 1, 0); \
9962 __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 3, 2, 1, 0); \
9963 __ret; \
9964 })
9965 #endif
9966
9967 #ifdef __LITTLE_ENDIAN__
9968 #define vld3_dup_p8(__p0) __extension__ ({ \
9969 poly8x8x3_t __ret; \
9970 __builtin_neon_vld3_dup_v(&__ret, __p0, 4); \
9971 __ret; \
9972 })
9973 #else
9974 #define vld3_dup_p8(__p0) __extension__ ({ \
9975 poly8x8x3_t __ret; \
9976 __builtin_neon_vld3_dup_v(&__ret, __p0, 4); \
9977 \
9978 __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \
9979 __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \
9980 __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 7, 6, 5, 4, 3, 2, 1, 0); \
9981 __ret; \
9982 })
9983 #endif
9984
9985 #ifdef __LITTLE_ENDIAN__
9986 #define vld3_dup_p16(__p0) __extension__ ({ \
9987 poly16x4x3_t __ret; \
9988 __builtin_neon_vld3_dup_v(&__ret, __p0, 5); \
9989 __ret; \
9990 })
9991 #else
9992 #define vld3_dup_p16(__p0) __extension__ ({ \
9993 poly16x4x3_t __ret; \
9994 __builtin_neon_vld3_dup_v(&__ret, __p0, 5); \
9995 \
9996 __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 3, 2, 1, 0); \
9997 __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 3, 2, 1, 0); \
9998 __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 3, 2, 1, 0); \
9999 __ret; \
10000 })
10001 #endif
10002
10003 #ifdef __LITTLE_ENDIAN__
10004 #define vld3_dup_u8(__p0) __extension__ ({ \
10005 uint8x8x3_t __ret; \
10006 __builtin_neon_vld3_dup_v(&__ret, __p0, 16); \
10007 __ret; \
10008 })
10009 #else
10010 #define vld3_dup_u8(__p0) __extension__ ({ \
10011 uint8x8x3_t __ret; \
10012 __builtin_neon_vld3_dup_v(&__ret, __p0, 16); \
10013 \
10014 __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \
10015 __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \
10016 __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 7, 6, 5, 4, 3, 2, 1, 0); \
10017 __ret; \
10018 })
10019 #endif
10020
10021 #ifdef __LITTLE_ENDIAN__
10022 #define vld3_dup_u32(__p0) __extension__ ({ \
10023 uint32x2x3_t __ret; \
10024 __builtin_neon_vld3_dup_v(&__ret, __p0, 18); \
10025 __ret; \
10026 })
10027 #else
10028 #define vld3_dup_u32(__p0) __extension__ ({ \
10029 uint32x2x3_t __ret; \
10030 __builtin_neon_vld3_dup_v(&__ret, __p0, 18); \
10031 \
10032 __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 1, 0); \
10033 __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 1, 0); \
10034 __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 1, 0); \
10035 __ret; \
10036 })
10037 #endif
10038
10039 #ifdef __LITTLE_ENDIAN__
10040 #define vld3_dup_u64(__p0) __extension__ ({ \
10041 uint64x1x3_t __ret; \
10042 __builtin_neon_vld3_dup_v(&__ret, __p0, 19); \
10043 __ret; \
10044 })
10045 #else
10046 #define vld3_dup_u64(__p0) __extension__ ({ \
10047 uint64x1x3_t __ret; \
10048 __builtin_neon_vld3_dup_v(&__ret, __p0, 19); \
10049 __ret; \
10050 })
10051 #endif
10052
10053 #ifdef __LITTLE_ENDIAN__
10054 #define vld3_dup_u16(__p0) __extension__ ({ \
10055 uint16x4x3_t __ret; \
10056 __builtin_neon_vld3_dup_v(&__ret, __p0, 17); \
10057 __ret; \
10058 })
10059 #else
10060 #define vld3_dup_u16(__p0) __extension__ ({ \
10061 uint16x4x3_t __ret; \
10062 __builtin_neon_vld3_dup_v(&__ret, __p0, 17); \
10063 \
10064 __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 3, 2, 1, 0); \
10065 __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 3, 2, 1, 0); \
10066 __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 3, 2, 1, 0); \
10067 __ret; \
10068 })
10069 #endif
10070
10071 #ifdef __LITTLE_ENDIAN__
10072 #define vld3_dup_s8(__p0) __extension__ ({ \
10073 int8x8x3_t __ret; \
10074 __builtin_neon_vld3_dup_v(&__ret, __p0, 0); \
10075 __ret; \
10076 })
10077 #else
10078 #define vld3_dup_s8(__p0) __extension__ ({ \
10079 int8x8x3_t __ret; \
10080 __builtin_neon_vld3_dup_v(&__ret, __p0, 0); \
10081 \
10082 __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \
10083 __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \
10084 __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 7, 6, 5, 4, 3, 2, 1, 0); \
10085 __ret; \
10086 })
10087 #endif
10088
10089 #ifdef __LITTLE_ENDIAN__
10090 #define vld3_dup_f32(__p0) __extension__ ({ \
10091 float32x2x3_t __ret; \
10092 __builtin_neon_vld3_dup_v(&__ret, __p0, 9); \
10093 __ret; \
10094 })
10095 #else
10096 #define vld3_dup_f32(__p0) __extension__ ({ \
10097 float32x2x3_t __ret; \
10098 __builtin_neon_vld3_dup_v(&__ret, __p0, 9); \
10099 \
10100 __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 1, 0); \
10101 __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 1, 0); \
10102 __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 1, 0); \
10103 __ret; \
10104 })
10105 #endif
10106
10107 #ifdef __LITTLE_ENDIAN__
10108 #define vld3_dup_f16(__p0) __extension__ ({ \
10109 float16x4x3_t __ret; \
10110 __builtin_neon_vld3_dup_v(&__ret, __p0, 8); \
10111 __ret; \
10112 })
10113 #else
10114 #define vld3_dup_f16(__p0) __extension__ ({ \
10115 float16x4x3_t __ret; \
10116 __builtin_neon_vld3_dup_v(&__ret, __p0, 8); \
10117 \
10118 __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 3, 2, 1, 0); \
10119 __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 3, 2, 1, 0); \
10120 __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 3, 2, 1, 0); \
10121 __ret; \
10122 })
10123 #endif
10124
10125 #ifdef __LITTLE_ENDIAN__
10126 #define vld3_dup_s32(__p0) __extension__ ({ \
10127 int32x2x3_t __ret; \
10128 __builtin_neon_vld3_dup_v(&__ret, __p0, 2); \
10129 __ret; \
10130 })
10131 #else
10132 #define vld3_dup_s32(__p0) __extension__ ({ \
10133 int32x2x3_t __ret; \
10134 __builtin_neon_vld3_dup_v(&__ret, __p0, 2); \
10135 \
10136 __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 1, 0); \
10137 __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 1, 0); \
10138 __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 1, 0); \
10139 __ret; \
10140 })
10141 #endif
10142
10143 #ifdef __LITTLE_ENDIAN__
10144 #define vld3_dup_s64(__p0) __extension__ ({ \
10145 int64x1x3_t __ret; \
10146 __builtin_neon_vld3_dup_v(&__ret, __p0, 3); \
10147 __ret; \
10148 })
10149 #else
10150 #define vld3_dup_s64(__p0) __extension__ ({ \
10151 int64x1x3_t __ret; \
10152 __builtin_neon_vld3_dup_v(&__ret, __p0, 3); \
10153 __ret; \
10154 })
10155 #endif
10156
10157 #ifdef __LITTLE_ENDIAN__
10158 #define vld3_dup_s16(__p0) __extension__ ({ \
10159 int16x4x3_t __ret; \
10160 __builtin_neon_vld3_dup_v(&__ret, __p0, 1); \
10161 __ret; \
10162 })
10163 #else
10164 #define vld3_dup_s16(__p0) __extension__ ({ \
10165 int16x4x3_t __ret; \
10166 __builtin_neon_vld3_dup_v(&__ret, __p0, 1); \
10167 \
10168 __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 3, 2, 1, 0); \
10169 __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 3, 2, 1, 0); \
10170 __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 3, 2, 1, 0); \
10171 __ret; \
10172 })
10173 #endif
10174
10175 #ifdef __LITTLE_ENDIAN__
10176 #define vld3_lane_p8(__p0, __p1, __p2) __extension__ ({ \
10177 poly8x8x3_t __s1 = __p1; \
10178 poly8x8x3_t __ret; \
10179 __builtin_neon_vld3_lane_v(&__ret, __p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], (int8x8_t)__s1.val[2], __p2, 4); \
10180 __ret; \
10181 })
10182 #else
10183 #define vld3_lane_p8(__p0, __p1, __p2) __extension__ ({ \
10184 poly8x8x3_t __s1 = __p1; \
10185 poly8x8x3_t __rev1; \
10186 __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \
10187 __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \
10188 __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 7, 6, 5, 4, 3, 2, 1, 0); \
10189 poly8x8x3_t __ret; \
10190 __builtin_neon_vld3_lane_v(&__ret, __p0, (int8x8_t)__rev1.val[0], (int8x8_t)__rev1.val[1], (int8x8_t)__rev1.val[2], __p2, 4); \
10191 \
10192 __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \
10193 __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \
10194 __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 7, 6, 5, 4, 3, 2, 1, 0); \
10195 __ret; \
10196 })
10197 #endif
10198
10199 #ifdef __LITTLE_ENDIAN__
10200 #define vld3_lane_p16(__p0, __p1, __p2) __extension__ ({ \
10201 poly16x4x3_t __s1 = __p1; \
10202 poly16x4x3_t __ret; \
10203 __builtin_neon_vld3_lane_v(&__ret, __p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], (int8x8_t)__s1.val[2], __p2, 5); \
10204 __ret; \
10205 })
10206 #else
10207 #define vld3_lane_p16(__p0, __p1, __p2) __extension__ ({ \
10208 poly16x4x3_t __s1 = __p1; \
10209 poly16x4x3_t __rev1; \
10210 __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 3, 2, 1, 0); \
10211 __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 3, 2, 1, 0); \
10212 __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 3, 2, 1, 0); \
10213 poly16x4x3_t __ret; \
10214 __builtin_neon_vld3_lane_v(&__ret, __p0, (int8x8_t)__rev1.val[0], (int8x8_t)__rev1.val[1], (int8x8_t)__rev1.val[2], __p2, 5); \
10215 \
10216 __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 3, 2, 1, 0); \
10217 __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 3, 2, 1, 0); \
10218 __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 3, 2, 1, 0); \
10219 __ret; \
10220 })
10221 #endif
10222
10223 #ifdef __LITTLE_ENDIAN__
10224 #define vld3q_lane_p16(__p0, __p1, __p2) __extension__ ({ \
10225 poly16x8x3_t __s1 = __p1; \
10226 poly16x8x3_t __ret; \
10227 __builtin_neon_vld3q_lane_v(&__ret, __p0, (int8x16_t)__s1.val[0], (int8x16_t)__s1.val[1], (int8x16_t)__s1.val[2], __p2, 37); \
10228 __ret; \
10229 })
10230 #else
10231 #define vld3q_lane_p16(__p0, __p1, __p2) __extension__ ({ \
10232 poly16x8x3_t __s1 = __p1; \
10233 poly16x8x3_t __rev1; \
10234 __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \
10235 __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \
10236 __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 7, 6, 5, 4, 3, 2, 1, 0); \
10237 poly16x8x3_t __ret; \
10238 __builtin_neon_vld3q_lane_v(&__ret, __p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], (int8x16_t)__rev1.val[2], __p2, 37); \
10239 \
10240 __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \
10241 __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \
10242 __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 7, 6, 5, 4, 3, 2, 1, 0); \
10243 __ret; \
10244 })
10245 #endif
10246
10247 #ifdef __LITTLE_ENDIAN__
10248 #define vld3q_lane_u32(__p0, __p1, __p2) __extension__ ({ \
10249 uint32x4x3_t __s1 = __p1; \
10250 uint32x4x3_t __ret; \
10251 __builtin_neon_vld3q_lane_v(&__ret, __p0, (int8x16_t)__s1.val[0], (int8x16_t)__s1.val[1], (int8x16_t)__s1.val[2], __p2, 50); \
10252 __ret; \
10253 })
10254 #else
10255 #define vld3q_lane_u32(__p0, __p1, __p2) __extension__ ({ \
10256 uint32x4x3_t __s1 = __p1; \
10257 uint32x4x3_t __rev1; \
10258 __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 3, 2, 1, 0); \
10259 __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 3, 2, 1, 0); \
10260 __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 3, 2, 1, 0); \
10261 uint32x4x3_t __ret; \
10262 __builtin_neon_vld3q_lane_v(&__ret, __p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], (int8x16_t)__rev1.val[2], __p2, 50); \
10263 \
10264 __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 3, 2, 1, 0); \
10265 __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 3, 2, 1, 0); \
10266 __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 3, 2, 1, 0); \
10267 __ret; \
10268 })
10269 #endif
10270
10271 #ifdef __LITTLE_ENDIAN__
10272 #define vld3q_lane_u16(__p0, __p1, __p2) __extension__ ({ \
10273 uint16x8x3_t __s1 = __p1; \
10274 uint16x8x3_t __ret; \
10275 __builtin_neon_vld3q_lane_v(&__ret, __p0, (int8x16_t)__s1.val[0], (int8x16_t)__s1.val[1], (int8x16_t)__s1.val[2], __p2, 49); \
10276 __ret; \
10277 })
10278 #else
10279 #define vld3q_lane_u16(__p0, __p1, __p2) __extension__ ({ \
10280 uint16x8x3_t __s1 = __p1; \
10281 uint16x8x3_t __rev1; \
10282 __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \
10283 __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \
10284 __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 7, 6, 5, 4, 3, 2, 1, 0); \
10285 uint16x8x3_t __ret; \
10286 __builtin_neon_vld3q_lane_v(&__ret, __p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], (int8x16_t)__rev1.val[2], __p2, 49); \
10287 \
10288 __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \
10289 __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \
10290 __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 7, 6, 5, 4, 3, 2, 1, 0); \
10291 __ret; \
10292 })
10293 #endif
10294
10295 #ifdef __LITTLE_ENDIAN__
10296 #define vld3q_lane_f32(__p0, __p1, __p2) __extension__ ({ \
10297 float32x4x3_t __s1 = __p1; \
10298 float32x4x3_t __ret; \
10299 __builtin_neon_vld3q_lane_v(&__ret, __p0, __s1.val[0], __s1.val[1], __s1.val[2], __p2, 41); \
10300 __ret; \
10301 })
10302 #else
10303 #define vld3q_lane_f32(__p0, __p1, __p2) __extension__ ({ \
10304 float32x4x3_t __s1 = __p1; \
10305 float32x4x3_t __rev1; \
10306 __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 3, 2, 1, 0); \
10307 __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 3, 2, 1, 0); \
10308 __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 3, 2, 1, 0); \
10309 float32x4x3_t __ret; \
10310 __builtin_neon_vld3q_lane_v(&__ret, __p0, __rev1.val[0], __rev1.val[1], __rev1.val[2], __p2, 41); \
10311 \
10312 __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 3, 2, 1, 0); \
10313 __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 3, 2, 1, 0); \
10314 __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 3, 2, 1, 0); \
10315 __ret; \
10316 })
10317 #endif
10318
10319 #ifdef __LITTLE_ENDIAN__
10320 #define vld3q_lane_f16(__p0, __p1, __p2) __extension__ ({ \
10321 float16x8x3_t __s1 = __p1; \
10322 float16x8x3_t __ret; \
10323 __builtin_neon_vld3q_lane_v(&__ret, __p0, __s1.val[0], __s1.val[1], __s1.val[2], __p2, 40); \
10324 __ret; \
10325 })
10326 #else
10327 #define vld3q_lane_f16(__p0, __p1, __p2) __extension__ ({ \
10328 float16x8x3_t __s1 = __p1; \
10329 float16x8x3_t __rev1; \
10330 __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \
10331 __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \
10332 __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 7, 6, 5, 4, 3, 2, 1, 0); \
10333 float16x8x3_t __ret; \
10334 __builtin_neon_vld3q_lane_v(&__ret, __p0, __rev1.val[0], __rev1.val[1], __rev1.val[2], __p2, 40); \
10335 \
10336 __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \
10337 __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \
10338 __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 7, 6, 5, 4, 3, 2, 1, 0); \
10339 __ret; \
10340 })
10341 #endif
10342
10343 #ifdef __LITTLE_ENDIAN__
10344 #define vld3q_lane_s32(__p0, __p1, __p2) __extension__ ({ \
10345 int32x4x3_t __s1 = __p1; \
10346 int32x4x3_t __ret; \
10347 __builtin_neon_vld3q_lane_v(&__ret, __p0, __s1.val[0], __s1.val[1], __s1.val[2], __p2, 34); \
10348 __ret; \
10349 })
10350 #else
10351 #define vld3q_lane_s32(__p0, __p1, __p2) __extension__ ({ \
10352 int32x4x3_t __s1 = __p1; \
10353 int32x4x3_t __rev1; \
10354 __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 3, 2, 1, 0); \
10355 __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 3, 2, 1, 0); \
10356 __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 3, 2, 1, 0); \
10357 int32x4x3_t __ret; \
10358 __builtin_neon_vld3q_lane_v(&__ret, __p0, __rev1.val[0], __rev1.val[1], __rev1.val[2], __p2, 34); \
10359 \
10360 __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 3, 2, 1, 0); \
10361 __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 3, 2, 1, 0); \
10362 __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 3, 2, 1, 0); \
10363 __ret; \
10364 })
10365 #endif
10366
10367 #ifdef __LITTLE_ENDIAN__
10368 #define vld3q_lane_s16(__p0, __p1, __p2) __extension__ ({ \
10369 int16x8x3_t __s1 = __p1; \
10370 int16x8x3_t __ret; \
10371 __builtin_neon_vld3q_lane_v(&__ret, __p0, __s1.val[0], __s1.val[1], __s1.val[2], __p2, 33); \
10372 __ret; \
10373 })
10374 #else
10375 #define vld3q_lane_s16(__p0, __p1, __p2) __extension__ ({ \
10376 int16x8x3_t __s1 = __p1; \
10377 int16x8x3_t __rev1; \
10378 __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \
10379 __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \
10380 __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 7, 6, 5, 4, 3, 2, 1, 0); \
10381 int16x8x3_t __ret; \
10382 __builtin_neon_vld3q_lane_v(&__ret, __p0, __rev1.val[0], __rev1.val[1], __rev1.val[2], __p2, 33); \
10383 \
10384 __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \
10385 __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \
10386 __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 7, 6, 5, 4, 3, 2, 1, 0); \
10387 __ret; \
10388 })
10389 #endif
10390
10391 #ifdef __LITTLE_ENDIAN__
10392 #define vld3_lane_u8(__p0, __p1, __p2) __extension__ ({ \
10393 uint8x8x3_t __s1 = __p1; \
10394 uint8x8x3_t __ret; \
10395 __builtin_neon_vld3_lane_v(&__ret, __p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], (int8x8_t)__s1.val[2], __p2, 16); \
10396 __ret; \
10397 })
10398 #else
10399 #define vld3_lane_u8(__p0, __p1, __p2) __extension__ ({ \
10400 uint8x8x3_t __s1 = __p1; \
10401 uint8x8x3_t __rev1; \
10402 __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \
10403 __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \
10404 __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 7, 6, 5, 4, 3, 2, 1, 0); \
10405 uint8x8x3_t __ret; \
10406 __builtin_neon_vld3_lane_v(&__ret, __p0, (int8x8_t)__rev1.val[0], (int8x8_t)__rev1.val[1], (int8x8_t)__rev1.val[2], __p2, 16); \
10407 \
10408 __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \
10409 __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \
10410 __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 7, 6, 5, 4, 3, 2, 1, 0); \
10411 __ret; \
10412 })
10413 #endif
10414
10415 #ifdef __LITTLE_ENDIAN__
10416 #define vld3_lane_u32(__p0, __p1, __p2) __extension__ ({ \
10417 uint32x2x3_t __s1 = __p1; \
10418 uint32x2x3_t __ret; \
10419 __builtin_neon_vld3_lane_v(&__ret, __p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], (int8x8_t)__s1.val[2], __p2, 18); \
10420 __ret; \
10421 })
10422 #else
10423 #define vld3_lane_u32(__p0, __p1, __p2) __extension__ ({ \
10424 uint32x2x3_t __s1 = __p1; \
10425 uint32x2x3_t __rev1; \
10426 __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 1, 0); \
10427 __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 1, 0); \
10428 __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 1, 0); \
10429 uint32x2x3_t __ret; \
10430 __builtin_neon_vld3_lane_v(&__ret, __p0, (int8x8_t)__rev1.val[0], (int8x8_t)__rev1.val[1], (int8x8_t)__rev1.val[2], __p2, 18); \
10431 \
10432 __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 1, 0); \
10433 __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 1, 0); \
10434 __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 1, 0); \
10435 __ret; \
10436 })
10437 #endif
10438
10439 #ifdef __LITTLE_ENDIAN__
10440 #define vld3_lane_u16(__p0, __p1, __p2) __extension__ ({ \
10441 uint16x4x3_t __s1 = __p1; \
10442 uint16x4x3_t __ret; \
10443 __builtin_neon_vld3_lane_v(&__ret, __p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], (int8x8_t)__s1.val[2], __p2, 17); \
10444 __ret; \
10445 })
10446 #else
10447 #define vld3_lane_u16(__p0, __p1, __p2) __extension__ ({ \
10448 uint16x4x3_t __s1 = __p1; \
10449 uint16x4x3_t __rev1; \
10450 __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 3, 2, 1, 0); \
10451 __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 3, 2, 1, 0); \
10452 __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 3, 2, 1, 0); \
10453 uint16x4x3_t __ret; \
10454 __builtin_neon_vld3_lane_v(&__ret, __p0, (int8x8_t)__rev1.val[0], (int8x8_t)__rev1.val[1], (int8x8_t)__rev1.val[2], __p2, 17); \
10455 \
10456 __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 3, 2, 1, 0); \
10457 __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 3, 2, 1, 0); \
10458 __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 3, 2, 1, 0); \
10459 __ret; \
10460 })
10461 #endif
10462
10463 #ifdef __LITTLE_ENDIAN__
10464 #define vld3_lane_s8(__p0, __p1, __p2) __extension__ ({ \
10465 int8x8x3_t __s1 = __p1; \
10466 int8x8x3_t __ret; \
10467 __builtin_neon_vld3_lane_v(&__ret, __p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], (int8x8_t)__s1.val[2], __p2, 0); \
10468 __ret; \
10469 })
10470 #else
10471 #define vld3_lane_s8(__p0, __p1, __p2) __extension__ ({ \
10472 int8x8x3_t __s1 = __p1; \
10473 int8x8x3_t __rev1; \
10474 __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \
10475 __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \
10476 __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 7, 6, 5, 4, 3, 2, 1, 0); \
10477 int8x8x3_t __ret; \
10478 __builtin_neon_vld3_lane_v(&__ret, __p0, (int8x8_t)__rev1.val[0], (int8x8_t)__rev1.val[1], (int8x8_t)__rev1.val[2], __p2, 0); \
10479 \
10480 __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \
10481 __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \
10482 __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 7, 6, 5, 4, 3, 2, 1, 0); \
10483 __ret; \
10484 })
10485 #endif
10486
10487 #ifdef __LITTLE_ENDIAN__
10488 #define vld3_lane_f32(__p0, __p1, __p2) __extension__ ({ \
10489 float32x2x3_t __s1 = __p1; \
10490 float32x2x3_t __ret; \
10491 __builtin_neon_vld3_lane_v(&__ret, __p0, __s1.val[0], __s1.val[1], __s1.val[2], __p2, 9); \
10492 __ret; \
10493 })
10494 #else
10495 #define vld3_lane_f32(__p0, __p1, __p2) __extension__ ({ \
10496 float32x2x3_t __s1 = __p1; \
10497 float32x2x3_t __rev1; \
10498 __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 1, 0); \
10499 __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 1, 0); \
10500 __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 1, 0); \
10501 float32x2x3_t __ret; \
10502 __builtin_neon_vld3_lane_v(&__ret, __p0, __rev1.val[0], __rev1.val[1], __rev1.val[2], __p2, 9); \
10503 \
10504 __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 1, 0); \
10505 __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 1, 0); \
10506 __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 1, 0); \
10507 __ret; \
10508 })
10509 #endif
10510
10511 #ifdef __LITTLE_ENDIAN__
10512 #define vld3_lane_f16(__p0, __p1, __p2) __extension__ ({ \
10513 float16x4x3_t __s1 = __p1; \
10514 float16x4x3_t __ret; \
10515 __builtin_neon_vld3_lane_v(&__ret, __p0, __s1.val[0], __s1.val[1], __s1.val[2], __p2, 8); \
10516 __ret; \
10517 })
10518 #else
10519 #define vld3_lane_f16(__p0, __p1, __p2) __extension__ ({ \
10520 float16x4x3_t __s1 = __p1; \
10521 float16x4x3_t __rev1; \
10522 __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 3, 2, 1, 0); \
10523 __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 3, 2, 1, 0); \
10524 __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 3, 2, 1, 0); \
10525 float16x4x3_t __ret; \
10526 __builtin_neon_vld3_lane_v(&__ret, __p0, __rev1.val[0], __rev1.val[1], __rev1.val[2], __p2, 8); \
10527 \
10528 __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 3, 2, 1, 0); \
10529 __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 3, 2, 1, 0); \
10530 __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 3, 2, 1, 0); \
10531 __ret; \
10532 })
10533 #endif
10534
10535 #ifdef __LITTLE_ENDIAN__
10536 #define vld3_lane_s32(__p0, __p1, __p2) __extension__ ({ \
10537 int32x2x3_t __s1 = __p1; \
10538 int32x2x3_t __ret; \
10539 __builtin_neon_vld3_lane_v(&__ret, __p0, __s1.val[0], __s1.val[1], __s1.val[2], __p2, 2); \
10540 __ret; \
10541 })
10542 #else
10543 #define vld3_lane_s32(__p0, __p1, __p2) __extension__ ({ \
10544 int32x2x3_t __s1 = __p1; \
10545 int32x2x3_t __rev1; \
10546 __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 1, 0); \
10547 __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 1, 0); \
10548 __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 1, 0); \
10549 int32x2x3_t __ret; \
10550 __builtin_neon_vld3_lane_v(&__ret, __p0, __rev1.val[0], __rev1.val[1], __rev1.val[2], __p2, 2); \
10551 \
10552 __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 1, 0); \
10553 __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 1, 0); \
10554 __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 1, 0); \
10555 __ret; \
10556 })
10557 #endif
10558
10559 #ifdef __LITTLE_ENDIAN__
10560 #define vld3_lane_s16(__p0, __p1, __p2) __extension__ ({ \
10561 int16x4x3_t __s1 = __p1; \
10562 int16x4x3_t __ret; \
10563 __builtin_neon_vld3_lane_v(&__ret, __p0, __s1.val[0], __s1.val[1], __s1.val[2], __p2, 1); \
10564 __ret; \
10565 })
10566 #else
10567 #define vld3_lane_s16(__p0, __p1, __p2) __extension__ ({ \
10568 int16x4x3_t __s1 = __p1; \
10569 int16x4x3_t __rev1; \
10570 __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 3, 2, 1, 0); \
10571 __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 3, 2, 1, 0); \
10572 __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 3, 2, 1, 0); \
10573 int16x4x3_t __ret; \
10574 __builtin_neon_vld3_lane_v(&__ret, __p0, __rev1.val[0], __rev1.val[1], __rev1.val[2], __p2, 1); \
10575 \
10576 __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 3, 2, 1, 0); \
10577 __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 3, 2, 1, 0); \
10578 __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 3, 2, 1, 0); \
10579 __ret; \
10580 })
10581 #endif
10582
10583 #ifdef __LITTLE_ENDIAN__
10584 #define vld4_p8(__p0) __extension__ ({ \
10585 poly8x8x4_t __ret; \
10586 __builtin_neon_vld4_v(&__ret, __p0, 4); \
10587 __ret; \
10588 })
10589 #else
10590 #define vld4_p8(__p0) __extension__ ({ \
10591 poly8x8x4_t __ret; \
10592 __builtin_neon_vld4_v(&__ret, __p0, 4); \
10593 \
10594 __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \
10595 __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \
10596 __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 7, 6, 5, 4, 3, 2, 1, 0); \
10597 __ret.val[3] = __builtin_shufflevector(__ret.val[3], __ret.val[3], 7, 6, 5, 4, 3, 2, 1, 0); \
10598 __ret; \
10599 })
10600 #endif
10601
10602 #ifdef __LITTLE_ENDIAN__
10603 #define vld4_p16(__p0) __extension__ ({ \
10604 poly16x4x4_t __ret; \
10605 __builtin_neon_vld4_v(&__ret, __p0, 5); \
10606 __ret; \
10607 })
10608 #else
10609 #define vld4_p16(__p0) __extension__ ({ \
10610 poly16x4x4_t __ret; \
10611 __builtin_neon_vld4_v(&__ret, __p0, 5); \
10612 \
10613 __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 3, 2, 1, 0); \
10614 __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 3, 2, 1, 0); \
10615 __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 3, 2, 1, 0); \
10616 __ret.val[3] = __builtin_shufflevector(__ret.val[3], __ret.val[3], 3, 2, 1, 0); \
10617 __ret; \
10618 })
10619 #endif
10620
10621 #ifdef __LITTLE_ENDIAN__
10622 #define vld4q_p8(__p0) __extension__ ({ \
10623 poly8x16x4_t __ret; \
10624 __builtin_neon_vld4q_v(&__ret, __p0, 36); \
10625 __ret; \
10626 })
10627 #else
10628 #define vld4q_p8(__p0) __extension__ ({ \
10629 poly8x16x4_t __ret; \
10630 __builtin_neon_vld4q_v(&__ret, __p0, 36); \
10631 \
10632 __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
10633 __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
10634 __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
10635 __ret.val[3] = __builtin_shufflevector(__ret.val[3], __ret.val[3], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
10636 __ret; \
10637 })
10638 #endif
10639
10640 #ifdef __LITTLE_ENDIAN__
10641 #define vld4q_p16(__p0) __extension__ ({ \
10642 poly16x8x4_t __ret; \
10643 __builtin_neon_vld4q_v(&__ret, __p0, 37); \
10644 __ret; \
10645 })
10646 #else
10647 #define vld4q_p16(__p0) __extension__ ({ \
10648 poly16x8x4_t __ret; \
10649 __builtin_neon_vld4q_v(&__ret, __p0, 37); \
10650 \
10651 __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \
10652 __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \
10653 __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 7, 6, 5, 4, 3, 2, 1, 0); \
10654 __ret.val[3] = __builtin_shufflevector(__ret.val[3], __ret.val[3], 7, 6, 5, 4, 3, 2, 1, 0); \
10655 __ret; \
10656 })
10657 #endif
10658
10659 #ifdef __LITTLE_ENDIAN__
10660 #define vld4q_u8(__p0) __extension__ ({ \
10661 uint8x16x4_t __ret; \
10662 __builtin_neon_vld4q_v(&__ret, __p0, 48); \
10663 __ret; \
10664 })
10665 #else
10666 #define vld4q_u8(__p0) __extension__ ({ \
10667 uint8x16x4_t __ret; \
10668 __builtin_neon_vld4q_v(&__ret, __p0, 48); \
10669 \
10670 __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
10671 __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
10672 __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
10673 __ret.val[3] = __builtin_shufflevector(__ret.val[3], __ret.val[3], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
10674 __ret; \
10675 })
10676 #endif
10677
10678 #ifdef __LITTLE_ENDIAN__
10679 #define vld4q_u32(__p0) __extension__ ({ \
10680 uint32x4x4_t __ret; \
10681 __builtin_neon_vld4q_v(&__ret, __p0, 50); \
10682 __ret; \
10683 })
10684 #else
10685 #define vld4q_u32(__p0) __extension__ ({ \
10686 uint32x4x4_t __ret; \
10687 __builtin_neon_vld4q_v(&__ret, __p0, 50); \
10688 \
10689 __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 3, 2, 1, 0); \
10690 __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 3, 2, 1, 0); \
10691 __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 3, 2, 1, 0); \
10692 __ret.val[3] = __builtin_shufflevector(__ret.val[3], __ret.val[3], 3, 2, 1, 0); \
10693 __ret; \
10694 })
10695 #endif
10696
10697 #ifdef __LITTLE_ENDIAN__
10698 #define vld4q_u16(__p0) __extension__ ({ \
10699 uint16x8x4_t __ret; \
10700 __builtin_neon_vld4q_v(&__ret, __p0, 49); \
10701 __ret; \
10702 })
10703 #else
10704 #define vld4q_u16(__p0) __extension__ ({ \
10705 uint16x8x4_t __ret; \
10706 __builtin_neon_vld4q_v(&__ret, __p0, 49); \
10707 \
10708 __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \
10709 __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \
10710 __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 7, 6, 5, 4, 3, 2, 1, 0); \
10711 __ret.val[3] = __builtin_shufflevector(__ret.val[3], __ret.val[3], 7, 6, 5, 4, 3, 2, 1, 0); \
10712 __ret; \
10713 })
10714 #endif
10715
10716 #ifdef __LITTLE_ENDIAN__
10717 #define vld4q_s8(__p0) __extension__ ({ \
10718 int8x16x4_t __ret; \
10719 __builtin_neon_vld4q_v(&__ret, __p0, 32); \
10720 __ret; \
10721 })
10722 #else
10723 #define vld4q_s8(__p0) __extension__ ({ \
10724 int8x16x4_t __ret; \
10725 __builtin_neon_vld4q_v(&__ret, __p0, 32); \
10726 \
10727 __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
10728 __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
10729 __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
10730 __ret.val[3] = __builtin_shufflevector(__ret.val[3], __ret.val[3], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
10731 __ret; \
10732 })
10733 #endif
10734
10735 #ifdef __LITTLE_ENDIAN__
10736 #define vld4q_f32(__p0) __extension__ ({ \
10737 float32x4x4_t __ret; \
10738 __builtin_neon_vld4q_v(&__ret, __p0, 41); \
10739 __ret; \
10740 })
10741 #else
10742 #define vld4q_f32(__p0) __extension__ ({ \
10743 float32x4x4_t __ret; \
10744 __builtin_neon_vld4q_v(&__ret, __p0, 41); \
10745 \
10746 __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 3, 2, 1, 0); \
10747 __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 3, 2, 1, 0); \
10748 __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 3, 2, 1, 0); \
10749 __ret.val[3] = __builtin_shufflevector(__ret.val[3], __ret.val[3], 3, 2, 1, 0); \
10750 __ret; \
10751 })
10752 #endif
10753
10754 #ifdef __LITTLE_ENDIAN__
10755 #define vld4q_f16(__p0) __extension__ ({ \
10756 float16x8x4_t __ret; \
10757 __builtin_neon_vld4q_v(&__ret, __p0, 40); \
10758 __ret; \
10759 })
10760 #else
10761 #define vld4q_f16(__p0) __extension__ ({ \
10762 float16x8x4_t __ret; \
10763 __builtin_neon_vld4q_v(&__ret, __p0, 40); \
10764 \
10765 __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \
10766 __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \
10767 __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 7, 6, 5, 4, 3, 2, 1, 0); \
10768 __ret.val[3] = __builtin_shufflevector(__ret.val[3], __ret.val[3], 7, 6, 5, 4, 3, 2, 1, 0); \
10769 __ret; \
10770 })
10771 #endif
10772
10773 #ifdef __LITTLE_ENDIAN__
10774 #define vld4q_s32(__p0) __extension__ ({ \
10775 int32x4x4_t __ret; \
10776 __builtin_neon_vld4q_v(&__ret, __p0, 34); \
10777 __ret; \
10778 })
10779 #else
10780 #define vld4q_s32(__p0) __extension__ ({ \
10781 int32x4x4_t __ret; \
10782 __builtin_neon_vld4q_v(&__ret, __p0, 34); \
10783 \
10784 __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 3, 2, 1, 0); \
10785 __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 3, 2, 1, 0); \
10786 __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 3, 2, 1, 0); \
10787 __ret.val[3] = __builtin_shufflevector(__ret.val[3], __ret.val[3], 3, 2, 1, 0); \
10788 __ret; \
10789 })
10790 #endif
10791
10792 #ifdef __LITTLE_ENDIAN__
10793 #define vld4q_s16(__p0) __extension__ ({ \
10794 int16x8x4_t __ret; \
10795 __builtin_neon_vld4q_v(&__ret, __p0, 33); \
10796 __ret; \
10797 })
10798 #else
10799 #define vld4q_s16(__p0) __extension__ ({ \
10800 int16x8x4_t __ret; \
10801 __builtin_neon_vld4q_v(&__ret, __p0, 33); \
10802 \
10803 __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \
10804 __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \
10805 __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 7, 6, 5, 4, 3, 2, 1, 0); \
10806 __ret.val[3] = __builtin_shufflevector(__ret.val[3], __ret.val[3], 7, 6, 5, 4, 3, 2, 1, 0); \
10807 __ret; \
10808 })
10809 #endif
10810
10811 #ifdef __LITTLE_ENDIAN__
10812 #define vld4_u8(__p0) __extension__ ({ \
10813 uint8x8x4_t __ret; \
10814 __builtin_neon_vld4_v(&__ret, __p0, 16); \
10815 __ret; \
10816 })
10817 #else
10818 #define vld4_u8(__p0) __extension__ ({ \
10819 uint8x8x4_t __ret; \
10820 __builtin_neon_vld4_v(&__ret, __p0, 16); \
10821 \
10822 __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \
10823 __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \
10824 __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 7, 6, 5, 4, 3, 2, 1, 0); \
10825 __ret.val[3] = __builtin_shufflevector(__ret.val[3], __ret.val[3], 7, 6, 5, 4, 3, 2, 1, 0); \
10826 __ret; \
10827 })
10828 #endif
10829
10830 #ifdef __LITTLE_ENDIAN__
10831 #define vld4_u32(__p0) __extension__ ({ \
10832 uint32x2x4_t __ret; \
10833 __builtin_neon_vld4_v(&__ret, __p0, 18); \
10834 __ret; \
10835 })
10836 #else
10837 #define vld4_u32(__p0) __extension__ ({ \
10838 uint32x2x4_t __ret; \
10839 __builtin_neon_vld4_v(&__ret, __p0, 18); \
10840 \
10841 __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 1, 0); \
10842 __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 1, 0); \
10843 __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 1, 0); \
10844 __ret.val[3] = __builtin_shufflevector(__ret.val[3], __ret.val[3], 1, 0); \
10845 __ret; \
10846 })
10847 #endif
10848
10849 #ifdef __LITTLE_ENDIAN__
10850 #define vld4_u64(__p0) __extension__ ({ \
10851 uint64x1x4_t __ret; \
10852 __builtin_neon_vld4_v(&__ret, __p0, 19); \
10853 __ret; \
10854 })
10855 #else
10856 #define vld4_u64(__p0) __extension__ ({ \
10857 uint64x1x4_t __ret; \
10858 __builtin_neon_vld4_v(&__ret, __p0, 19); \
10859 __ret; \
10860 })
10861 #endif
10862
10863 #ifdef __LITTLE_ENDIAN__
10864 #define vld4_u16(__p0) __extension__ ({ \
10865 uint16x4x4_t __ret; \
10866 __builtin_neon_vld4_v(&__ret, __p0, 17); \
10867 __ret; \
10868 })
10869 #else
10870 #define vld4_u16(__p0) __extension__ ({ \
10871 uint16x4x4_t __ret; \
10872 __builtin_neon_vld4_v(&__ret, __p0, 17); \
10873 \
10874 __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 3, 2, 1, 0); \
10875 __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 3, 2, 1, 0); \
10876 __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 3, 2, 1, 0); \
10877 __ret.val[3] = __builtin_shufflevector(__ret.val[3], __ret.val[3], 3, 2, 1, 0); \
10878 __ret; \
10879 })
10880 #endif
10881
10882 #ifdef __LITTLE_ENDIAN__
10883 #define vld4_s8(__p0) __extension__ ({ \
10884 int8x8x4_t __ret; \
10885 __builtin_neon_vld4_v(&__ret, __p0, 0); \
10886 __ret; \
10887 })
10888 #else
10889 #define vld4_s8(__p0) __extension__ ({ \
10890 int8x8x4_t __ret; \
10891 __builtin_neon_vld4_v(&__ret, __p0, 0); \
10892 \
10893 __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \
10894 __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \
10895 __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 7, 6, 5, 4, 3, 2, 1, 0); \
10896 __ret.val[3] = __builtin_shufflevector(__ret.val[3], __ret.val[3], 7, 6, 5, 4, 3, 2, 1, 0); \
10897 __ret; \
10898 })
10899 #endif
10900
10901 #ifdef __LITTLE_ENDIAN__
10902 #define vld4_f32(__p0) __extension__ ({ \
10903 float32x2x4_t __ret; \
10904 __builtin_neon_vld4_v(&__ret, __p0, 9); \
10905 __ret; \
10906 })
10907 #else
10908 #define vld4_f32(__p0) __extension__ ({ \
10909 float32x2x4_t __ret; \
10910 __builtin_neon_vld4_v(&__ret, __p0, 9); \
10911 \
10912 __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 1, 0); \
10913 __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 1, 0); \
10914 __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 1, 0); \
10915 __ret.val[3] = __builtin_shufflevector(__ret.val[3], __ret.val[3], 1, 0); \
10916 __ret; \
10917 })
10918 #endif
10919
10920 #ifdef __LITTLE_ENDIAN__
10921 #define vld4_f16(__p0) __extension__ ({ \
10922 float16x4x4_t __ret; \
10923 __builtin_neon_vld4_v(&__ret, __p0, 8); \
10924 __ret; \
10925 })
10926 #else
10927 #define vld4_f16(__p0) __extension__ ({ \
10928 float16x4x4_t __ret; \
10929 __builtin_neon_vld4_v(&__ret, __p0, 8); \
10930 \
10931 __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 3, 2, 1, 0); \
10932 __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 3, 2, 1, 0); \
10933 __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 3, 2, 1, 0); \
10934 __ret.val[3] = __builtin_shufflevector(__ret.val[3], __ret.val[3], 3, 2, 1, 0); \
10935 __ret; \
10936 })
10937 #endif
10938
10939 #ifdef __LITTLE_ENDIAN__
10940 #define vld4_s32(__p0) __extension__ ({ \
10941 int32x2x4_t __ret; \
10942 __builtin_neon_vld4_v(&__ret, __p0, 2); \
10943 __ret; \
10944 })
10945 #else
10946 #define vld4_s32(__p0) __extension__ ({ \
10947 int32x2x4_t __ret; \
10948 __builtin_neon_vld4_v(&__ret, __p0, 2); \
10949 \
10950 __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 1, 0); \
10951 __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 1, 0); \
10952 __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 1, 0); \
10953 __ret.val[3] = __builtin_shufflevector(__ret.val[3], __ret.val[3], 1, 0); \
10954 __ret; \
10955 })
10956 #endif
10957
10958 #ifdef __LITTLE_ENDIAN__
10959 #define vld4_s64(__p0) __extension__ ({ \
10960 int64x1x4_t __ret; \
10961 __builtin_neon_vld4_v(&__ret, __p0, 3); \
10962 __ret; \
10963 })
10964 #else
10965 #define vld4_s64(__p0) __extension__ ({ \
10966 int64x1x4_t __ret; \
10967 __builtin_neon_vld4_v(&__ret, __p0, 3); \
10968 __ret; \
10969 })
10970 #endif
10971
10972 #ifdef __LITTLE_ENDIAN__
10973 #define vld4_s16(__p0) __extension__ ({ \
10974 int16x4x4_t __ret; \
10975 __builtin_neon_vld4_v(&__ret, __p0, 1); \
10976 __ret; \
10977 })
10978 #else
10979 #define vld4_s16(__p0) __extension__ ({ \
10980 int16x4x4_t __ret; \
10981 __builtin_neon_vld4_v(&__ret, __p0, 1); \
10982 \
10983 __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 3, 2, 1, 0); \
10984 __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 3, 2, 1, 0); \
10985 __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 3, 2, 1, 0); \
10986 __ret.val[3] = __builtin_shufflevector(__ret.val[3], __ret.val[3], 3, 2, 1, 0); \
10987 __ret; \
10988 })
10989 #endif
10990
10991 #ifdef __LITTLE_ENDIAN__
10992 #define vld4_dup_p8(__p0) __extension__ ({ \
10993 poly8x8x4_t __ret; \
10994 __builtin_neon_vld4_dup_v(&__ret, __p0, 4); \
10995 __ret; \
10996 })
10997 #else
10998 #define vld4_dup_p8(__p0) __extension__ ({ \
10999 poly8x8x4_t __ret; \
11000 __builtin_neon_vld4_dup_v(&__ret, __p0, 4); \
11001 \
11002 __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \
11003 __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \
11004 __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 7, 6, 5, 4, 3, 2, 1, 0); \
11005 __ret.val[3] = __builtin_shufflevector(__ret.val[3], __ret.val[3], 7, 6, 5, 4, 3, 2, 1, 0); \
11006 __ret; \
11007 })
11008 #endif
11009
11010 #ifdef __LITTLE_ENDIAN__
11011 #define vld4_dup_p16(__p0) __extension__ ({ \
11012 poly16x4x4_t __ret; \
11013 __builtin_neon_vld4_dup_v(&__ret, __p0, 5); \
11014 __ret; \
11015 })
11016 #else
11017 #define vld4_dup_p16(__p0) __extension__ ({ \
11018 poly16x4x4_t __ret; \
11019 __builtin_neon_vld4_dup_v(&__ret, __p0, 5); \
11020 \
11021 __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 3, 2, 1, 0); \
11022 __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 3, 2, 1, 0); \
11023 __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 3, 2, 1, 0); \
11024 __ret.val[3] = __builtin_shufflevector(__ret.val[3], __ret.val[3], 3, 2, 1, 0); \
11025 __ret; \
11026 })
11027 #endif
11028
11029 #ifdef __LITTLE_ENDIAN__
11030 #define vld4_dup_u8(__p0) __extension__ ({ \
11031 uint8x8x4_t __ret; \
11032 __builtin_neon_vld4_dup_v(&__ret, __p0, 16); \
11033 __ret; \
11034 })
11035 #else
11036 #define vld4_dup_u8(__p0) __extension__ ({ \
11037 uint8x8x4_t __ret; \
11038 __builtin_neon_vld4_dup_v(&__ret, __p0, 16); \
11039 \
11040 __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \
11041 __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \
11042 __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 7, 6, 5, 4, 3, 2, 1, 0); \
11043 __ret.val[3] = __builtin_shufflevector(__ret.val[3], __ret.val[3], 7, 6, 5, 4, 3, 2, 1, 0); \
11044 __ret; \
11045 })
11046 #endif
11047
11048 #ifdef __LITTLE_ENDIAN__
11049 #define vld4_dup_u32(__p0) __extension__ ({ \
11050 uint32x2x4_t __ret; \
11051 __builtin_neon_vld4_dup_v(&__ret, __p0, 18); \
11052 __ret; \
11053 })
11054 #else
11055 #define vld4_dup_u32(__p0) __extension__ ({ \
11056 uint32x2x4_t __ret; \
11057 __builtin_neon_vld4_dup_v(&__ret, __p0, 18); \
11058 \
11059 __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 1, 0); \
11060 __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 1, 0); \
11061 __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 1, 0); \
11062 __ret.val[3] = __builtin_shufflevector(__ret.val[3], __ret.val[3], 1, 0); \
11063 __ret; \
11064 })
11065 #endif
11066
11067 #ifdef __LITTLE_ENDIAN__
11068 #define vld4_dup_u64(__p0) __extension__ ({ \
11069 uint64x1x4_t __ret; \
11070 __builtin_neon_vld4_dup_v(&__ret, __p0, 19); \
11071 __ret; \
11072 })
11073 #else
11074 #define vld4_dup_u64(__p0) __extension__ ({ \
11075 uint64x1x4_t __ret; \
11076 __builtin_neon_vld4_dup_v(&__ret, __p0, 19); \
11077 __ret; \
11078 })
11079 #endif
11080
11081 #ifdef __LITTLE_ENDIAN__
11082 #define vld4_dup_u16(__p0) __extension__ ({ \
11083 uint16x4x4_t __ret; \
11084 __builtin_neon_vld4_dup_v(&__ret, __p0, 17); \
11085 __ret; \
11086 })
11087 #else
11088 #define vld4_dup_u16(__p0) __extension__ ({ \
11089 uint16x4x4_t __ret; \
11090 __builtin_neon_vld4_dup_v(&__ret, __p0, 17); \
11091 \
11092 __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 3, 2, 1, 0); \
11093 __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 3, 2, 1, 0); \
11094 __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 3, 2, 1, 0); \
11095 __ret.val[3] = __builtin_shufflevector(__ret.val[3], __ret.val[3], 3, 2, 1, 0); \
11096 __ret; \
11097 })
11098 #endif
11099
11100 #ifdef __LITTLE_ENDIAN__
11101 #define vld4_dup_s8(__p0) __extension__ ({ \
11102 int8x8x4_t __ret; \
11103 __builtin_neon_vld4_dup_v(&__ret, __p0, 0); \
11104 __ret; \
11105 })
11106 #else
11107 #define vld4_dup_s8(__p0) __extension__ ({ \
11108 int8x8x4_t __ret; \
11109 __builtin_neon_vld4_dup_v(&__ret, __p0, 0); \
11110 \
11111 __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \
11112 __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \
11113 __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 7, 6, 5, 4, 3, 2, 1, 0); \
11114 __ret.val[3] = __builtin_shufflevector(__ret.val[3], __ret.val[3], 7, 6, 5, 4, 3, 2, 1, 0); \
11115 __ret; \
11116 })
11117 #endif
11118
11119 #ifdef __LITTLE_ENDIAN__
11120 #define vld4_dup_f32(__p0) __extension__ ({ \
11121 float32x2x4_t __ret; \
11122 __builtin_neon_vld4_dup_v(&__ret, __p0, 9); \
11123 __ret; \
11124 })
11125 #else
11126 #define vld4_dup_f32(__p0) __extension__ ({ \
11127 float32x2x4_t __ret; \
11128 __builtin_neon_vld4_dup_v(&__ret, __p0, 9); \
11129 \
11130 __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 1, 0); \
11131 __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 1, 0); \
11132 __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 1, 0); \
11133 __ret.val[3] = __builtin_shufflevector(__ret.val[3], __ret.val[3], 1, 0); \
11134 __ret; \
11135 })
11136 #endif
11137
11138 #ifdef __LITTLE_ENDIAN__
11139 #define vld4_dup_f16(__p0) __extension__ ({ \
11140 float16x4x4_t __ret; \
11141 __builtin_neon_vld4_dup_v(&__ret, __p0, 8); \
11142 __ret; \
11143 })
11144 #else
11145 #define vld4_dup_f16(__p0) __extension__ ({ \
11146 float16x4x4_t __ret; \
11147 __builtin_neon_vld4_dup_v(&__ret, __p0, 8); \
11148 \
11149 __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 3, 2, 1, 0); \
11150 __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 3, 2, 1, 0); \
11151 __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 3, 2, 1, 0); \
11152 __ret.val[3] = __builtin_shufflevector(__ret.val[3], __ret.val[3], 3, 2, 1, 0); \
11153 __ret; \
11154 })
11155 #endif
11156
11157 #ifdef __LITTLE_ENDIAN__
11158 #define vld4_dup_s32(__p0) __extension__ ({ \
11159 int32x2x4_t __ret; \
11160 __builtin_neon_vld4_dup_v(&__ret, __p0, 2); \
11161 __ret; \
11162 })
11163 #else
11164 #define vld4_dup_s32(__p0) __extension__ ({ \
11165 int32x2x4_t __ret; \
11166 __builtin_neon_vld4_dup_v(&__ret, __p0, 2); \
11167 \
11168 __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 1, 0); \
11169 __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 1, 0); \
11170 __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 1, 0); \
11171 __ret.val[3] = __builtin_shufflevector(__ret.val[3], __ret.val[3], 1, 0); \
11172 __ret; \
11173 })
11174 #endif
11175
11176 #ifdef __LITTLE_ENDIAN__
11177 #define vld4_dup_s64(__p0) __extension__ ({ \
11178 int64x1x4_t __ret; \
11179 __builtin_neon_vld4_dup_v(&__ret, __p0, 3); \
11180 __ret; \
11181 })
11182 #else
11183 #define vld4_dup_s64(__p0) __extension__ ({ \
11184 int64x1x4_t __ret; \
11185 __builtin_neon_vld4_dup_v(&__ret, __p0, 3); \
11186 __ret; \
11187 })
11188 #endif
11189
11190 #ifdef __LITTLE_ENDIAN__
11191 #define vld4_dup_s16(__p0) __extension__ ({ \
11192 int16x4x4_t __ret; \
11193 __builtin_neon_vld4_dup_v(&__ret, __p0, 1); \
11194 __ret; \
11195 })
11196 #else
11197 #define vld4_dup_s16(__p0) __extension__ ({ \
11198 int16x4x4_t __ret; \
11199 __builtin_neon_vld4_dup_v(&__ret, __p0, 1); \
11200 \
11201 __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 3, 2, 1, 0); \
11202 __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 3, 2, 1, 0); \
11203 __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 3, 2, 1, 0); \
11204 __ret.val[3] = __builtin_shufflevector(__ret.val[3], __ret.val[3], 3, 2, 1, 0); \
11205 __ret; \
11206 })
11207 #endif
11208
11209 #ifdef __LITTLE_ENDIAN__
11210 #define vld4_lane_p8(__p0, __p1, __p2) __extension__ ({ \
11211 poly8x8x4_t __s1 = __p1; \
11212 poly8x8x4_t __ret; \
11213 __builtin_neon_vld4_lane_v(&__ret, __p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], (int8x8_t)__s1.val[2], (int8x8_t)__s1.val[3], __p2, 4); \
11214 __ret; \
11215 })
11216 #else
11217 #define vld4_lane_p8(__p0, __p1, __p2) __extension__ ({ \
11218 poly8x8x4_t __s1 = __p1; \
11219 poly8x8x4_t __rev1; \
11220 __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \
11221 __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \
11222 __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 7, 6, 5, 4, 3, 2, 1, 0); \
11223 __rev1.val[3] = __builtin_shufflevector(__s1.val[3], __s1.val[3], 7, 6, 5, 4, 3, 2, 1, 0); \
11224 poly8x8x4_t __ret; \
11225 __builtin_neon_vld4_lane_v(&__ret, __p0, (int8x8_t)__rev1.val[0], (int8x8_t)__rev1.val[1], (int8x8_t)__rev1.val[2], (int8x8_t)__rev1.val[3], __p2, 4); \
11226 \
11227 __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \
11228 __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \
11229 __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 7, 6, 5, 4, 3, 2, 1, 0); \
11230 __ret.val[3] = __builtin_shufflevector(__ret.val[3], __ret.val[3], 7, 6, 5, 4, 3, 2, 1, 0); \
11231 __ret; \
11232 })
11233 #endif
11234
11235 #ifdef __LITTLE_ENDIAN__
11236 #define vld4_lane_p16(__p0, __p1, __p2) __extension__ ({ \
11237 poly16x4x4_t __s1 = __p1; \
11238 poly16x4x4_t __ret; \
11239 __builtin_neon_vld4_lane_v(&__ret, __p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], (int8x8_t)__s1.val[2], (int8x8_t)__s1.val[3], __p2, 5); \
11240 __ret; \
11241 })
11242 #else
11243 #define vld4_lane_p16(__p0, __p1, __p2) __extension__ ({ \
11244 poly16x4x4_t __s1 = __p1; \
11245 poly16x4x4_t __rev1; \
11246 __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 3, 2, 1, 0); \
11247 __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 3, 2, 1, 0); \
11248 __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 3, 2, 1, 0); \
11249 __rev1.val[3] = __builtin_shufflevector(__s1.val[3], __s1.val[3], 3, 2, 1, 0); \
11250 poly16x4x4_t __ret; \
11251 __builtin_neon_vld4_lane_v(&__ret, __p0, (int8x8_t)__rev1.val[0], (int8x8_t)__rev1.val[1], (int8x8_t)__rev1.val[2], (int8x8_t)__rev1.val[3], __p2, 5); \
11252 \
11253 __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 3, 2, 1, 0); \
11254 __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 3, 2, 1, 0); \
11255 __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 3, 2, 1, 0); \
11256 __ret.val[3] = __builtin_shufflevector(__ret.val[3], __ret.val[3], 3, 2, 1, 0); \
11257 __ret; \
11258 })
11259 #endif
11260
11261 #ifdef __LITTLE_ENDIAN__
11262 #define vld4q_lane_p16(__p0, __p1, __p2) __extension__ ({ \
11263 poly16x8x4_t __s1 = __p1; \
11264 poly16x8x4_t __ret; \
11265 __builtin_neon_vld4q_lane_v(&__ret, __p0, (int8x16_t)__s1.val[0], (int8x16_t)__s1.val[1], (int8x16_t)__s1.val[2], (int8x16_t)__s1.val[3], __p2, 37); \
11266 __ret; \
11267 })
11268 #else
11269 #define vld4q_lane_p16(__p0, __p1, __p2) __extension__ ({ \
11270 poly16x8x4_t __s1 = __p1; \
11271 poly16x8x4_t __rev1; \
11272 __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \
11273 __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \
11274 __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 7, 6, 5, 4, 3, 2, 1, 0); \
11275 __rev1.val[3] = __builtin_shufflevector(__s1.val[3], __s1.val[3], 7, 6, 5, 4, 3, 2, 1, 0); \
11276 poly16x8x4_t __ret; \
11277 __builtin_neon_vld4q_lane_v(&__ret, __p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], (int8x16_t)__rev1.val[2], (int8x16_t)__rev1.val[3], __p2, 37); \
11278 \
11279 __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \
11280 __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \
11281 __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 7, 6, 5, 4, 3, 2, 1, 0); \
11282 __ret.val[3] = __builtin_shufflevector(__ret.val[3], __ret.val[3], 7, 6, 5, 4, 3, 2, 1, 0); \
11283 __ret; \
11284 })
11285 #endif
11286
11287 #ifdef __LITTLE_ENDIAN__
11288 #define vld4q_lane_u32(__p0, __p1, __p2) __extension__ ({ \
11289 uint32x4x4_t __s1 = __p1; \
11290 uint32x4x4_t __ret; \
11291 __builtin_neon_vld4q_lane_v(&__ret, __p0, (int8x16_t)__s1.val[0], (int8x16_t)__s1.val[1], (int8x16_t)__s1.val[2], (int8x16_t)__s1.val[3], __p2, 50); \
11292 __ret; \
11293 })
11294 #else
11295 #define vld4q_lane_u32(__p0, __p1, __p2) __extension__ ({ \
11296 uint32x4x4_t __s1 = __p1; \
11297 uint32x4x4_t __rev1; \
11298 __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 3, 2, 1, 0); \
11299 __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 3, 2, 1, 0); \
11300 __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 3, 2, 1, 0); \
11301 __rev1.val[3] = __builtin_shufflevector(__s1.val[3], __s1.val[3], 3, 2, 1, 0); \
11302 uint32x4x4_t __ret; \
11303 __builtin_neon_vld4q_lane_v(&__ret, __p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], (int8x16_t)__rev1.val[2], (int8x16_t)__rev1.val[3], __p2, 50); \
11304 \
11305 __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 3, 2, 1, 0); \
11306 __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 3, 2, 1, 0); \
11307 __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 3, 2, 1, 0); \
11308 __ret.val[3] = __builtin_shufflevector(__ret.val[3], __ret.val[3], 3, 2, 1, 0); \
11309 __ret; \
11310 })
11311 #endif
11312
11313 #ifdef __LITTLE_ENDIAN__
11314 #define vld4q_lane_u16(__p0, __p1, __p2) __extension__ ({ \
11315 uint16x8x4_t __s1 = __p1; \
11316 uint16x8x4_t __ret; \
11317 __builtin_neon_vld4q_lane_v(&__ret, __p0, (int8x16_t)__s1.val[0], (int8x16_t)__s1.val[1], (int8x16_t)__s1.val[2], (int8x16_t)__s1.val[3], __p2, 49); \
11318 __ret; \
11319 })
11320 #else
11321 #define vld4q_lane_u16(__p0, __p1, __p2) __extension__ ({ \
11322 uint16x8x4_t __s1 = __p1; \
11323 uint16x8x4_t __rev1; \
11324 __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \
11325 __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \
11326 __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 7, 6, 5, 4, 3, 2, 1, 0); \
11327 __rev1.val[3] = __builtin_shufflevector(__s1.val[3], __s1.val[3], 7, 6, 5, 4, 3, 2, 1, 0); \
11328 uint16x8x4_t __ret; \
11329 __builtin_neon_vld4q_lane_v(&__ret, __p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], (int8x16_t)__rev1.val[2], (int8x16_t)__rev1.val[3], __p2, 49); \
11330 \
11331 __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \
11332 __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \
11333 __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 7, 6, 5, 4, 3, 2, 1, 0); \
11334 __ret.val[3] = __builtin_shufflevector(__ret.val[3], __ret.val[3], 7, 6, 5, 4, 3, 2, 1, 0); \
11335 __ret; \
11336 })
11337 #endif
11338
11339 #ifdef __LITTLE_ENDIAN__
11340 #define vld4q_lane_f32(__p0, __p1, __p2) __extension__ ({ \
11341 float32x4x4_t __s1 = __p1; \
11342 float32x4x4_t __ret; \
11343 __builtin_neon_vld4q_lane_v(&__ret, __p0, __s1.val[0], __s1.val[1], __s1.val[2], __s1.val[3], __p2, 41); \
11344 __ret; \
11345 })
11346 #else
11347 #define vld4q_lane_f32(__p0, __p1, __p2) __extension__ ({ \
11348 float32x4x4_t __s1 = __p1; \
11349 float32x4x4_t __rev1; \
11350 __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 3, 2, 1, 0); \
11351 __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 3, 2, 1, 0); \
11352 __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 3, 2, 1, 0); \
11353 __rev1.val[3] = __builtin_shufflevector(__s1.val[3], __s1.val[3], 3, 2, 1, 0); \
11354 float32x4x4_t __ret; \
11355 __builtin_neon_vld4q_lane_v(&__ret, __p0, __rev1.val[0], __rev1.val[1], __rev1.val[2], __rev1.val[3], __p2, 41); \
11356 \
11357 __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 3, 2, 1, 0); \
11358 __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 3, 2, 1, 0); \
11359 __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 3, 2, 1, 0); \
11360 __ret.val[3] = __builtin_shufflevector(__ret.val[3], __ret.val[3], 3, 2, 1, 0); \
11361 __ret; \
11362 })
11363 #endif
11364
11365 #ifdef __LITTLE_ENDIAN__
11366 #define vld4q_lane_f16(__p0, __p1, __p2) __extension__ ({ \
11367 float16x8x4_t __s1 = __p1; \
11368 float16x8x4_t __ret; \
11369 __builtin_neon_vld4q_lane_v(&__ret, __p0, __s1.val[0], __s1.val[1], __s1.val[2], __s1.val[3], __p2, 40); \
11370 __ret; \
11371 })
11372 #else
11373 #define vld4q_lane_f16(__p0, __p1, __p2) __extension__ ({ \
11374 float16x8x4_t __s1 = __p1; \
11375 float16x8x4_t __rev1; \
11376 __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \
11377 __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \
11378 __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 7, 6, 5, 4, 3, 2, 1, 0); \
11379 __rev1.val[3] = __builtin_shufflevector(__s1.val[3], __s1.val[3], 7, 6, 5, 4, 3, 2, 1, 0); \
11380 float16x8x4_t __ret; \
11381 __builtin_neon_vld4q_lane_v(&__ret, __p0, __rev1.val[0], __rev1.val[1], __rev1.val[2], __rev1.val[3], __p2, 40); \
11382 \
11383 __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \
11384 __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \
11385 __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 7, 6, 5, 4, 3, 2, 1, 0); \
11386 __ret.val[3] = __builtin_shufflevector(__ret.val[3], __ret.val[3], 7, 6, 5, 4, 3, 2, 1, 0); \
11387 __ret; \
11388 })
11389 #endif
11390
11391 #ifdef __LITTLE_ENDIAN__
11392 #define vld4q_lane_s32(__p0, __p1, __p2) __extension__ ({ \
11393 int32x4x4_t __s1 = __p1; \
11394 int32x4x4_t __ret; \
11395 __builtin_neon_vld4q_lane_v(&__ret, __p0, __s1.val[0], __s1.val[1], __s1.val[2], __s1.val[3], __p2, 34); \
11396 __ret; \
11397 })
11398 #else
11399 #define vld4q_lane_s32(__p0, __p1, __p2) __extension__ ({ \
11400 int32x4x4_t __s1 = __p1; \
11401 int32x4x4_t __rev1; \
11402 __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 3, 2, 1, 0); \
11403 __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 3, 2, 1, 0); \
11404 __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 3, 2, 1, 0); \
11405 __rev1.val[3] = __builtin_shufflevector(__s1.val[3], __s1.val[3], 3, 2, 1, 0); \
11406 int32x4x4_t __ret; \
11407 __builtin_neon_vld4q_lane_v(&__ret, __p0, __rev1.val[0], __rev1.val[1], __rev1.val[2], __rev1.val[3], __p2, 34); \
11408 \
11409 __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 3, 2, 1, 0); \
11410 __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 3, 2, 1, 0); \
11411 __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 3, 2, 1, 0); \
11412 __ret.val[3] = __builtin_shufflevector(__ret.val[3], __ret.val[3], 3, 2, 1, 0); \
11413 __ret; \
11414 })
11415 #endif
11416
11417 #ifdef __LITTLE_ENDIAN__
11418 #define vld4q_lane_s16(__p0, __p1, __p2) __extension__ ({ \
11419 int16x8x4_t __s1 = __p1; \
11420 int16x8x4_t __ret; \
11421 __builtin_neon_vld4q_lane_v(&__ret, __p0, __s1.val[0], __s1.val[1], __s1.val[2], __s1.val[3], __p2, 33); \
11422 __ret; \
11423 })
11424 #else
11425 #define vld4q_lane_s16(__p0, __p1, __p2) __extension__ ({ \
11426 int16x8x4_t __s1 = __p1; \
11427 int16x8x4_t __rev1; \
11428 __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \
11429 __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \
11430 __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 7, 6, 5, 4, 3, 2, 1, 0); \
11431 __rev1.val[3] = __builtin_shufflevector(__s1.val[3], __s1.val[3], 7, 6, 5, 4, 3, 2, 1, 0); \
11432 int16x8x4_t __ret; \
11433 __builtin_neon_vld4q_lane_v(&__ret, __p0, __rev1.val[0], __rev1.val[1], __rev1.val[2], __rev1.val[3], __p2, 33); \
11434 \
11435 __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \
11436 __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \
11437 __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 7, 6, 5, 4, 3, 2, 1, 0); \
11438 __ret.val[3] = __builtin_shufflevector(__ret.val[3], __ret.val[3], 7, 6, 5, 4, 3, 2, 1, 0); \
11439 __ret; \
11440 })
11441 #endif
11442
11443 #ifdef __LITTLE_ENDIAN__
11444 #define vld4_lane_u8(__p0, __p1, __p2) __extension__ ({ \
11445 uint8x8x4_t __s1 = __p1; \
11446 uint8x8x4_t __ret; \
11447 __builtin_neon_vld4_lane_v(&__ret, __p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], (int8x8_t)__s1.val[2], (int8x8_t)__s1.val[3], __p2, 16); \
11448 __ret; \
11449 })
11450 #else
11451 #define vld4_lane_u8(__p0, __p1, __p2) __extension__ ({ \
11452 uint8x8x4_t __s1 = __p1; \
11453 uint8x8x4_t __rev1; \
11454 __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \
11455 __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \
11456 __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 7, 6, 5, 4, 3, 2, 1, 0); \
11457 __rev1.val[3] = __builtin_shufflevector(__s1.val[3], __s1.val[3], 7, 6, 5, 4, 3, 2, 1, 0); \
11458 uint8x8x4_t __ret; \
11459 __builtin_neon_vld4_lane_v(&__ret, __p0, (int8x8_t)__rev1.val[0], (int8x8_t)__rev1.val[1], (int8x8_t)__rev1.val[2], (int8x8_t)__rev1.val[3], __p2, 16); \
11460 \
11461 __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \
11462 __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \
11463 __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 7, 6, 5, 4, 3, 2, 1, 0); \
11464 __ret.val[3] = __builtin_shufflevector(__ret.val[3], __ret.val[3], 7, 6, 5, 4, 3, 2, 1, 0); \
11465 __ret; \
11466 })
11467 #endif
11468
11469 #ifdef __LITTLE_ENDIAN__
11470 #define vld4_lane_u32(__p0, __p1, __p2) __extension__ ({ \
11471 uint32x2x4_t __s1 = __p1; \
11472 uint32x2x4_t __ret; \
11473 __builtin_neon_vld4_lane_v(&__ret, __p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], (int8x8_t)__s1.val[2], (int8x8_t)__s1.val[3], __p2, 18); \
11474 __ret; \
11475 })
11476 #else
11477 #define vld4_lane_u32(__p0, __p1, __p2) __extension__ ({ \
11478 uint32x2x4_t __s1 = __p1; \
11479 uint32x2x4_t __rev1; \
11480 __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 1, 0); \
11481 __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 1, 0); \
11482 __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 1, 0); \
11483 __rev1.val[3] = __builtin_shufflevector(__s1.val[3], __s1.val[3], 1, 0); \
11484 uint32x2x4_t __ret; \
11485 __builtin_neon_vld4_lane_v(&__ret, __p0, (int8x8_t)__rev1.val[0], (int8x8_t)__rev1.val[1], (int8x8_t)__rev1.val[2], (int8x8_t)__rev1.val[3], __p2, 18); \
11486 \
11487 __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 1, 0); \
11488 __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 1, 0); \
11489 __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 1, 0); \
11490 __ret.val[3] = __builtin_shufflevector(__ret.val[3], __ret.val[3], 1, 0); \
11491 __ret; \
11492 })
11493 #endif
11494
11495 #ifdef __LITTLE_ENDIAN__
11496 #define vld4_lane_u16(__p0, __p1, __p2) __extension__ ({ \
11497 uint16x4x4_t __s1 = __p1; \
11498 uint16x4x4_t __ret; \
11499 __builtin_neon_vld4_lane_v(&__ret, __p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], (int8x8_t)__s1.val[2], (int8x8_t)__s1.val[3], __p2, 17); \
11500 __ret; \
11501 })
11502 #else
11503 #define vld4_lane_u16(__p0, __p1, __p2) __extension__ ({ \
11504 uint16x4x4_t __s1 = __p1; \
11505 uint16x4x4_t __rev1; \
11506 __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 3, 2, 1, 0); \
11507 __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 3, 2, 1, 0); \
11508 __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 3, 2, 1, 0); \
11509 __rev1.val[3] = __builtin_shufflevector(__s1.val[3], __s1.val[3], 3, 2, 1, 0); \
11510 uint16x4x4_t __ret; \
11511 __builtin_neon_vld4_lane_v(&__ret, __p0, (int8x8_t)__rev1.val[0], (int8x8_t)__rev1.val[1], (int8x8_t)__rev1.val[2], (int8x8_t)__rev1.val[3], __p2, 17); \
11512 \
11513 __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 3, 2, 1, 0); \
11514 __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 3, 2, 1, 0); \
11515 __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 3, 2, 1, 0); \
11516 __ret.val[3] = __builtin_shufflevector(__ret.val[3], __ret.val[3], 3, 2, 1, 0); \
11517 __ret; \
11518 })
11519 #endif
11520
11521 #ifdef __LITTLE_ENDIAN__
11522 #define vld4_lane_s8(__p0, __p1, __p2) __extension__ ({ \
11523 int8x8x4_t __s1 = __p1; \
11524 int8x8x4_t __ret; \
11525 __builtin_neon_vld4_lane_v(&__ret, __p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], (int8x8_t)__s1.val[2], (int8x8_t)__s1.val[3], __p2, 0); \
11526 __ret; \
11527 })
11528 #else
11529 #define vld4_lane_s8(__p0, __p1, __p2) __extension__ ({ \
11530 int8x8x4_t __s1 = __p1; \
11531 int8x8x4_t __rev1; \
11532 __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \
11533 __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \
11534 __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 7, 6, 5, 4, 3, 2, 1, 0); \
11535 __rev1.val[3] = __builtin_shufflevector(__s1.val[3], __s1.val[3], 7, 6, 5, 4, 3, 2, 1, 0); \
11536 int8x8x4_t __ret; \
11537 __builtin_neon_vld4_lane_v(&__ret, __p0, (int8x8_t)__rev1.val[0], (int8x8_t)__rev1.val[1], (int8x8_t)__rev1.val[2], (int8x8_t)__rev1.val[3], __p2, 0); \
11538 \
11539 __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \
11540 __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \
11541 __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 7, 6, 5, 4, 3, 2, 1, 0); \
11542 __ret.val[3] = __builtin_shufflevector(__ret.val[3], __ret.val[3], 7, 6, 5, 4, 3, 2, 1, 0); \
11543 __ret; \
11544 })
11545 #endif
11546
11547 #ifdef __LITTLE_ENDIAN__
11548 #define vld4_lane_f32(__p0, __p1, __p2) __extension__ ({ \
11549 float32x2x4_t __s1 = __p1; \
11550 float32x2x4_t __ret; \
11551 __builtin_neon_vld4_lane_v(&__ret, __p0, __s1.val[0], __s1.val[1], __s1.val[2], __s1.val[3], __p2, 9); \
11552 __ret; \
11553 })
11554 #else
11555 #define vld4_lane_f32(__p0, __p1, __p2) __extension__ ({ \
11556 float32x2x4_t __s1 = __p1; \
11557 float32x2x4_t __rev1; \
11558 __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 1, 0); \
11559 __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 1, 0); \
11560 __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 1, 0); \
11561 __rev1.val[3] = __builtin_shufflevector(__s1.val[3], __s1.val[3], 1, 0); \
11562 float32x2x4_t __ret; \
11563 __builtin_neon_vld4_lane_v(&__ret, __p0, __rev1.val[0], __rev1.val[1], __rev1.val[2], __rev1.val[3], __p2, 9); \
11564 \
11565 __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 1, 0); \
11566 __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 1, 0); \
11567 __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 1, 0); \
11568 __ret.val[3] = __builtin_shufflevector(__ret.val[3], __ret.val[3], 1, 0); \
11569 __ret; \
11570 })
11571 #endif
11572
11573 #ifdef __LITTLE_ENDIAN__
11574 #define vld4_lane_f16(__p0, __p1, __p2) __extension__ ({ \
11575 float16x4x4_t __s1 = __p1; \
11576 float16x4x4_t __ret; \
11577 __builtin_neon_vld4_lane_v(&__ret, __p0, __s1.val[0], __s1.val[1], __s1.val[2], __s1.val[3], __p2, 8); \
11578 __ret; \
11579 })
11580 #else
11581 #define vld4_lane_f16(__p0, __p1, __p2) __extension__ ({ \
11582 float16x4x4_t __s1 = __p1; \
11583 float16x4x4_t __rev1; \
11584 __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 3, 2, 1, 0); \
11585 __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 3, 2, 1, 0); \
11586 __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 3, 2, 1, 0); \
11587 __rev1.val[3] = __builtin_shufflevector(__s1.val[3], __s1.val[3], 3, 2, 1, 0); \
11588 float16x4x4_t __ret; \
11589 __builtin_neon_vld4_lane_v(&__ret, __p0, __rev1.val[0], __rev1.val[1], __rev1.val[2], __rev1.val[3], __p2, 8); \
11590 \
11591 __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 3, 2, 1, 0); \
11592 __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 3, 2, 1, 0); \
11593 __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 3, 2, 1, 0); \
11594 __ret.val[3] = __builtin_shufflevector(__ret.val[3], __ret.val[3], 3, 2, 1, 0); \
11595 __ret; \
11596 })
11597 #endif
11598
11599 #ifdef __LITTLE_ENDIAN__
11600 #define vld4_lane_s32(__p0, __p1, __p2) __extension__ ({ \
11601 int32x2x4_t __s1 = __p1; \
11602 int32x2x4_t __ret; \
11603 __builtin_neon_vld4_lane_v(&__ret, __p0, __s1.val[0], __s1.val[1], __s1.val[2], __s1.val[3], __p2, 2); \
11604 __ret; \
11605 })
11606 #else
11607 #define vld4_lane_s32(__p0, __p1, __p2) __extension__ ({ \
11608 int32x2x4_t __s1 = __p1; \
11609 int32x2x4_t __rev1; \
11610 __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 1, 0); \
11611 __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 1, 0); \
11612 __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 1, 0); \
11613 __rev1.val[3] = __builtin_shufflevector(__s1.val[3], __s1.val[3], 1, 0); \
11614 int32x2x4_t __ret; \
11615 __builtin_neon_vld4_lane_v(&__ret, __p0, __rev1.val[0], __rev1.val[1], __rev1.val[2], __rev1.val[3], __p2, 2); \
11616 \
11617 __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 1, 0); \
11618 __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 1, 0); \
11619 __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 1, 0); \
11620 __ret.val[3] = __builtin_shufflevector(__ret.val[3], __ret.val[3], 1, 0); \
11621 __ret; \
11622 })
11623 #endif
11624
11625 #ifdef __LITTLE_ENDIAN__
11626 #define vld4_lane_s16(__p0, __p1, __p2) __extension__ ({ \
11627 int16x4x4_t __s1 = __p1; \
11628 int16x4x4_t __ret; \
11629 __builtin_neon_vld4_lane_v(&__ret, __p0, __s1.val[0], __s1.val[1], __s1.val[2], __s1.val[3], __p2, 1); \
11630 __ret; \
11631 })
11632 #else
11633 #define vld4_lane_s16(__p0, __p1, __p2) __extension__ ({ \
11634 int16x4x4_t __s1 = __p1; \
11635 int16x4x4_t __rev1; \
11636 __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 3, 2, 1, 0); \
11637 __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 3, 2, 1, 0); \
11638 __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 3, 2, 1, 0); \
11639 __rev1.val[3] = __builtin_shufflevector(__s1.val[3], __s1.val[3], 3, 2, 1, 0); \
11640 int16x4x4_t __ret; \
11641 __builtin_neon_vld4_lane_v(&__ret, __p0, __rev1.val[0], __rev1.val[1], __rev1.val[2], __rev1.val[3], __p2, 1); \
11642 \
11643 __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 3, 2, 1, 0); \
11644 __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 3, 2, 1, 0); \
11645 __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 3, 2, 1, 0); \
11646 __ret.val[3] = __builtin_shufflevector(__ret.val[3], __ret.val[3], 3, 2, 1, 0); \
11647 __ret; \
11648 })
11649 #endif
11650
11651 #ifdef __LITTLE_ENDIAN__
11652 __ai uint8x16_t vmaxq_u8(uint8x16_t __p0, uint8x16_t __p1) {
11653 uint8x16_t __ret;
11654 __ret = (uint8x16_t) __builtin_neon_vmaxq_v((int8x16_t)__p0, (int8x16_t)__p1, 48);
11655 return __ret;
11656 }
11657 #else
11658 __ai uint8x16_t vmaxq_u8(uint8x16_t __p0, uint8x16_t __p1) {
11659 uint8x16_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
11660 uint8x16_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
11661 uint8x16_t __ret;
11662 __ret = (uint8x16_t) __builtin_neon_vmaxq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 48);
11663 __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
11664 return __ret;
11665 }
11666 #endif
11667
11668 #ifdef __LITTLE_ENDIAN__
11669 __ai uint32x4_t vmaxq_u32(uint32x4_t __p0, uint32x4_t __p1) {
11670 uint32x4_t __ret;
11671 __ret = (uint32x4_t) __builtin_neon_vmaxq_v((int8x16_t)__p0, (int8x16_t)__p1, 50);
11672 return __ret;
11673 }
11674 #else
11675 __ai uint32x4_t vmaxq_u32(uint32x4_t __p0, uint32x4_t __p1) {
11676 uint32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
11677 uint32x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
11678 uint32x4_t __ret;
11679 __ret = (uint32x4_t) __builtin_neon_vmaxq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 50);
11680 __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
11681 return __ret;
11682 }
11683 #endif
11684
11685 #ifdef __LITTLE_ENDIAN__
11686 __ai uint16x8_t vmaxq_u16(uint16x8_t __p0, uint16x8_t __p1) {
11687 uint16x8_t __ret;
11688 __ret = (uint16x8_t) __builtin_neon_vmaxq_v((int8x16_t)__p0, (int8x16_t)__p1, 49);
11689 return __ret;
11690 }
11691 #else
11692 __ai uint16x8_t vmaxq_u16(uint16x8_t __p0, uint16x8_t __p1) {
11693 uint16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
11694 uint16x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
11695 uint16x8_t __ret;
11696 __ret = (uint16x8_t) __builtin_neon_vmaxq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 49);
11697 __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
11698 return __ret;
11699 }
11700 #endif
11701
11702 #ifdef __LITTLE_ENDIAN__
11703 __ai int8x16_t vmaxq_s8(int8x16_t __p0, int8x16_t __p1) {
11704 int8x16_t __ret;
11705 __ret = (int8x16_t) __builtin_neon_vmaxq_v((int8x16_t)__p0, (int8x16_t)__p1, 32);
11706 return __ret;
11707 }
11708 #else
11709 __ai int8x16_t vmaxq_s8(int8x16_t __p0, int8x16_t __p1) {
11710 int8x16_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
11711 int8x16_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
11712 int8x16_t __ret;
11713 __ret = (int8x16_t) __builtin_neon_vmaxq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 32);
11714 __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
11715 return __ret;
11716 }
11717 #endif
11718
11719 #ifdef __LITTLE_ENDIAN__
11720 __ai float32x4_t vmaxq_f32(float32x4_t __p0, float32x4_t __p1) {
11721 float32x4_t __ret;
11722 __ret = (float32x4_t) __builtin_neon_vmaxq_v((int8x16_t)__p0, (int8x16_t)__p1, 41);
11723 return __ret;
11724 }
11725 #else
11726 __ai float32x4_t vmaxq_f32(float32x4_t __p0, float32x4_t __p1) {
11727 float32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
11728 float32x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
11729 float32x4_t __ret;
11730 __ret = (float32x4_t) __builtin_neon_vmaxq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 41);
11731 __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
11732 return __ret;
11733 }
11734 #endif
11735
11736 #ifdef __LITTLE_ENDIAN__
11737 __ai int32x4_t vmaxq_s32(int32x4_t __p0, int32x4_t __p1) {
11738 int32x4_t __ret;
11739 __ret = (int32x4_t) __builtin_neon_vmaxq_v((int8x16_t)__p0, (int8x16_t)__p1, 34);
11740 return __ret;
11741 }
11742 #else
11743 __ai int32x4_t vmaxq_s32(int32x4_t __p0, int32x4_t __p1) {
11744 int32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
11745 int32x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
11746 int32x4_t __ret;
11747 __ret = (int32x4_t) __builtin_neon_vmaxq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 34);
11748 __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
11749 return __ret;
11750 }
11751 #endif
11752
11753 #ifdef __LITTLE_ENDIAN__
11754 __ai int16x8_t vmaxq_s16(int16x8_t __p0, int16x8_t __p1) {
11755 int16x8_t __ret;
11756 __ret = (int16x8_t) __builtin_neon_vmaxq_v((int8x16_t)__p0, (int8x16_t)__p1, 33);
11757 return __ret;
11758 }
11759 #else
11760 __ai int16x8_t vmaxq_s16(int16x8_t __p0, int16x8_t __p1) {
11761 int16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
11762 int16x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
11763 int16x8_t __ret;
11764 __ret = (int16x8_t) __builtin_neon_vmaxq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 33);
11765 __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
11766 return __ret;
11767 }
11768 #endif
11769
11770 #ifdef __LITTLE_ENDIAN__
11771 __ai uint8x8_t vmax_u8(uint8x8_t __p0, uint8x8_t __p1) {
11772 uint8x8_t __ret;
11773 __ret = (uint8x8_t) __builtin_neon_vmax_v((int8x8_t)__p0, (int8x8_t)__p1, 16);
11774 return __ret;
11775 }
11776 #else
11777 __ai uint8x8_t vmax_u8(uint8x8_t __p0, uint8x8_t __p1) {
11778 uint8x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
11779 uint8x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
11780 uint8x8_t __ret;
11781 __ret = (uint8x8_t) __builtin_neon_vmax_v((int8x8_t)__rev0, (int8x8_t)__rev1, 16);
11782 __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
11783 return __ret;
11784 }
11785 #endif
11786
11787 #ifdef __LITTLE_ENDIAN__
11788 __ai uint32x2_t vmax_u32(uint32x2_t __p0, uint32x2_t __p1) {
11789 uint32x2_t __ret;
11790 __ret = (uint32x2_t) __builtin_neon_vmax_v((int8x8_t)__p0, (int8x8_t)__p1, 18);
11791 return __ret;
11792 }
11793 #else
11794 __ai uint32x2_t vmax_u32(uint32x2_t __p0, uint32x2_t __p1) {
11795 uint32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
11796 uint32x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
11797 uint32x2_t __ret;
11798 __ret = (uint32x2_t) __builtin_neon_vmax_v((int8x8_t)__rev0, (int8x8_t)__rev1, 18);
11799 __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
11800 return __ret;
11801 }
11802 #endif
11803
11804 #ifdef __LITTLE_ENDIAN__
11805 __ai uint16x4_t vmax_u16(uint16x4_t __p0, uint16x4_t __p1) {
11806 uint16x4_t __ret;
11807 __ret = (uint16x4_t) __builtin_neon_vmax_v((int8x8_t)__p0, (int8x8_t)__p1, 17);
11808 return __ret;
11809 }
11810 #else
11811 __ai uint16x4_t vmax_u16(uint16x4_t __p0, uint16x4_t __p1) {
11812 uint16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
11813 uint16x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
11814 uint16x4_t __ret;
11815 __ret = (uint16x4_t) __builtin_neon_vmax_v((int8x8_t)__rev0, (int8x8_t)__rev1, 17);
11816 __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
11817 return __ret;
11818 }
11819 #endif
11820
11821 #ifdef __LITTLE_ENDIAN__
11822 __ai int8x8_t vmax_s8(int8x8_t __p0, int8x8_t __p1) {
11823 int8x8_t __ret;
11824 __ret = (int8x8_t) __builtin_neon_vmax_v((int8x8_t)__p0, (int8x8_t)__p1, 0);
11825 return __ret;
11826 }
11827 #else
11828 __ai int8x8_t vmax_s8(int8x8_t __p0, int8x8_t __p1) {
11829 int8x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
11830 int8x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
11831 int8x8_t __ret;
11832 __ret = (int8x8_t) __builtin_neon_vmax_v((int8x8_t)__rev0, (int8x8_t)__rev1, 0);
11833 __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
11834 return __ret;
11835 }
11836 #endif
11837
11838 #ifdef __LITTLE_ENDIAN__
11839 __ai float32x2_t vmax_f32(float32x2_t __p0, float32x2_t __p1) {
11840 float32x2_t __ret;
11841 __ret = (float32x2_t) __builtin_neon_vmax_v((int8x8_t)__p0, (int8x8_t)__p1, 9);
11842 return __ret;
11843 }
11844 #else
11845 __ai float32x2_t vmax_f32(float32x2_t __p0, float32x2_t __p1) {
11846 float32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
11847 float32x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
11848 float32x2_t __ret;
11849 __ret = (float32x2_t) __builtin_neon_vmax_v((int8x8_t)__rev0, (int8x8_t)__rev1, 9);
11850 __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
11851 return __ret;
11852 }
11853 #endif
11854
11855 #ifdef __LITTLE_ENDIAN__
11856 __ai int32x2_t vmax_s32(int32x2_t __p0, int32x2_t __p1) {
11857 int32x2_t __ret;
11858 __ret = (int32x2_t) __builtin_neon_vmax_v((int8x8_t)__p0, (int8x8_t)__p1, 2);
11859 return __ret;
11860 }
11861 #else
11862 __ai int32x2_t vmax_s32(int32x2_t __p0, int32x2_t __p1) {
11863 int32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
11864 int32x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
11865 int32x2_t __ret;
11866 __ret = (int32x2_t) __builtin_neon_vmax_v((int8x8_t)__rev0, (int8x8_t)__rev1, 2);
11867 __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
11868 return __ret;
11869 }
11870 #endif
11871
11872 #ifdef __LITTLE_ENDIAN__
11873 __ai int16x4_t vmax_s16(int16x4_t __p0, int16x4_t __p1) {
11874 int16x4_t __ret;
11875 __ret = (int16x4_t) __builtin_neon_vmax_v((int8x8_t)__p0, (int8x8_t)__p1, 1);
11876 return __ret;
11877 }
11878 #else
11879 __ai int16x4_t vmax_s16(int16x4_t __p0, int16x4_t __p1) {
11880 int16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
11881 int16x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
11882 int16x4_t __ret;
11883 __ret = (int16x4_t) __builtin_neon_vmax_v((int8x8_t)__rev0, (int8x8_t)__rev1, 1);
11884 __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
11885 return __ret;
11886 }
11887 #endif
11888
11889 #ifdef __LITTLE_ENDIAN__
11890 __ai uint8x16_t vminq_u8(uint8x16_t __p0, uint8x16_t __p1) {
11891 uint8x16_t __ret;
11892 __ret = (uint8x16_t) __builtin_neon_vminq_v((int8x16_t)__p0, (int8x16_t)__p1, 48);
11893 return __ret;
11894 }
11895 #else
11896 __ai uint8x16_t vminq_u8(uint8x16_t __p0, uint8x16_t __p1) {
11897 uint8x16_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
11898 uint8x16_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
11899 uint8x16_t __ret;
11900 __ret = (uint8x16_t) __builtin_neon_vminq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 48);
11901 __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
11902 return __ret;
11903 }
11904 #endif
11905
11906 #ifdef __LITTLE_ENDIAN__
11907 __ai uint32x4_t vminq_u32(uint32x4_t __p0, uint32x4_t __p1) {
11908 uint32x4_t __ret;
11909 __ret = (uint32x4_t) __builtin_neon_vminq_v((int8x16_t)__p0, (int8x16_t)__p1, 50);
11910 return __ret;
11911 }
11912 #else
11913 __ai uint32x4_t vminq_u32(uint32x4_t __p0, uint32x4_t __p1) {
11914 uint32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
11915 uint32x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
11916 uint32x4_t __ret;
11917 __ret = (uint32x4_t) __builtin_neon_vminq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 50);
11918 __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
11919 return __ret;
11920 }
11921 #endif
11922
11923 #ifdef __LITTLE_ENDIAN__
11924 __ai uint16x8_t vminq_u16(uint16x8_t __p0, uint16x8_t __p1) {
11925 uint16x8_t __ret;
11926 __ret = (uint16x8_t) __builtin_neon_vminq_v((int8x16_t)__p0, (int8x16_t)__p1, 49);
11927 return __ret;
11928 }
11929 #else
11930 __ai uint16x8_t vminq_u16(uint16x8_t __p0, uint16x8_t __p1) {
11931 uint16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
11932 uint16x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
11933 uint16x8_t __ret;
11934 __ret = (uint16x8_t) __builtin_neon_vminq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 49);
11935 __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
11936 return __ret;
11937 }
11938 #endif
11939
11940 #ifdef __LITTLE_ENDIAN__
11941 __ai int8x16_t vminq_s8(int8x16_t __p0, int8x16_t __p1) {
11942 int8x16_t __ret;
11943 __ret = (int8x16_t) __builtin_neon_vminq_v((int8x16_t)__p0, (int8x16_t)__p1, 32);
11944 return __ret;
11945 }
11946 #else
11947 __ai int8x16_t vminq_s8(int8x16_t __p0, int8x16_t __p1) {
11948 int8x16_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
11949 int8x16_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
11950 int8x16_t __ret;
11951 __ret = (int8x16_t) __builtin_neon_vminq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 32);
11952 __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
11953 return __ret;
11954 }
11955 #endif
11956
11957 #ifdef __LITTLE_ENDIAN__
11958 __ai float32x4_t vminq_f32(float32x4_t __p0, float32x4_t __p1) {
11959 float32x4_t __ret;
11960 __ret = (float32x4_t) __builtin_neon_vminq_v((int8x16_t)__p0, (int8x16_t)__p1, 41);
11961 return __ret;
11962 }
11963 #else
11964 __ai float32x4_t vminq_f32(float32x4_t __p0, float32x4_t __p1) {
11965 float32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
11966 float32x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
11967 float32x4_t __ret;
11968 __ret = (float32x4_t) __builtin_neon_vminq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 41);
11969 __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
11970 return __ret;
11971 }
11972 #endif
11973
11974 #ifdef __LITTLE_ENDIAN__
11975 __ai int32x4_t vminq_s32(int32x4_t __p0, int32x4_t __p1) {
11976 int32x4_t __ret;
11977 __ret = (int32x4_t) __builtin_neon_vminq_v((int8x16_t)__p0, (int8x16_t)__p1, 34);
11978 return __ret;
11979 }
11980 #else
11981 __ai int32x4_t vminq_s32(int32x4_t __p0, int32x4_t __p1) {
11982 int32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
11983 int32x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
11984 int32x4_t __ret;
11985 __ret = (int32x4_t) __builtin_neon_vminq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 34);
11986 __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
11987 return __ret;
11988 }
11989 #endif
11990
11991 #ifdef __LITTLE_ENDIAN__
11992 __ai int16x8_t vminq_s16(int16x8_t __p0, int16x8_t __p1) {
11993 int16x8_t __ret;
11994 __ret = (int16x8_t) __builtin_neon_vminq_v((int8x16_t)__p0, (int8x16_t)__p1, 33);
11995 return __ret;
11996 }
11997 #else
11998 __ai int16x8_t vminq_s16(int16x8_t __p0, int16x8_t __p1) {
11999 int16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
12000 int16x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
12001 int16x8_t __ret;
12002 __ret = (int16x8_t) __builtin_neon_vminq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 33);
12003 __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
12004 return __ret;
12005 }
12006 #endif
12007
12008 #ifdef __LITTLE_ENDIAN__
12009 __ai uint8x8_t vmin_u8(uint8x8_t __p0, uint8x8_t __p1) {
12010 uint8x8_t __ret;
12011 __ret = (uint8x8_t) __builtin_neon_vmin_v((int8x8_t)__p0, (int8x8_t)__p1, 16);
12012 return __ret;
12013 }
12014 #else
12015 __ai uint8x8_t vmin_u8(uint8x8_t __p0, uint8x8_t __p1) {
12016 uint8x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
12017 uint8x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
12018 uint8x8_t __ret;
12019 __ret = (uint8x8_t) __builtin_neon_vmin_v((int8x8_t)__rev0, (int8x8_t)__rev1, 16);
12020 __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
12021 return __ret;
12022 }
12023 #endif
12024
12025 #ifdef __LITTLE_ENDIAN__
12026 __ai uint32x2_t vmin_u32(uint32x2_t __p0, uint32x2_t __p1) {
12027 uint32x2_t __ret;
12028 __ret = (uint32x2_t) __builtin_neon_vmin_v((int8x8_t)__p0, (int8x8_t)__p1, 18);
12029 return __ret;
12030 }
12031 #else
12032 __ai uint32x2_t vmin_u32(uint32x2_t __p0, uint32x2_t __p1) {
12033 uint32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
12034 uint32x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
12035 uint32x2_t __ret;
12036 __ret = (uint32x2_t) __builtin_neon_vmin_v((int8x8_t)__rev0, (int8x8_t)__rev1, 18);
12037 __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
12038 return __ret;
12039 }
12040 #endif
12041
12042 #ifdef __LITTLE_ENDIAN__
12043 __ai uint16x4_t vmin_u16(uint16x4_t __p0, uint16x4_t __p1) {
12044 uint16x4_t __ret;
12045 __ret = (uint16x4_t) __builtin_neon_vmin_v((int8x8_t)__p0, (int8x8_t)__p1, 17);
12046 return __ret;
12047 }
12048 #else
12049 __ai uint16x4_t vmin_u16(uint16x4_t __p0, uint16x4_t __p1) {
12050 uint16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
12051 uint16x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
12052 uint16x4_t __ret;
12053 __ret = (uint16x4_t) __builtin_neon_vmin_v((int8x8_t)__rev0, (int8x8_t)__rev1, 17);
12054 __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
12055 return __ret;
12056 }
12057 #endif
12058
12059 #ifdef __LITTLE_ENDIAN__
12060 __ai int8x8_t vmin_s8(int8x8_t __p0, int8x8_t __p1) {
12061 int8x8_t __ret;
12062 __ret = (int8x8_t) __builtin_neon_vmin_v((int8x8_t)__p0, (int8x8_t)__p1, 0);
12063 return __ret;
12064 }
12065 #else
12066 __ai int8x8_t vmin_s8(int8x8_t __p0, int8x8_t __p1) {
12067 int8x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
12068 int8x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
12069 int8x8_t __ret;
12070 __ret = (int8x8_t) __builtin_neon_vmin_v((int8x8_t)__rev0, (int8x8_t)__rev1, 0);
12071 __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
12072 return __ret;
12073 }
12074 #endif
12075
12076 #ifdef __LITTLE_ENDIAN__
12077 __ai float32x2_t vmin_f32(float32x2_t __p0, float32x2_t __p1) {
12078 float32x2_t __ret;
12079 __ret = (float32x2_t) __builtin_neon_vmin_v((int8x8_t)__p0, (int8x8_t)__p1, 9);
12080 return __ret;
12081 }
12082 #else
12083 __ai float32x2_t vmin_f32(float32x2_t __p0, float32x2_t __p1) {
12084 float32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
12085 float32x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
12086 float32x2_t __ret;
12087 __ret = (float32x2_t) __builtin_neon_vmin_v((int8x8_t)__rev0, (int8x8_t)__rev1, 9);
12088 __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
12089 return __ret;
12090 }
12091 #endif
12092
12093 #ifdef __LITTLE_ENDIAN__
12094 __ai int32x2_t vmin_s32(int32x2_t __p0, int32x2_t __p1) {
12095 int32x2_t __ret;
12096 __ret = (int32x2_t) __builtin_neon_vmin_v((int8x8_t)__p0, (int8x8_t)__p1, 2);
12097 return __ret;
12098 }
12099 #else
12100 __ai int32x2_t vmin_s32(int32x2_t __p0, int32x2_t __p1) {
12101 int32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
12102 int32x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
12103 int32x2_t __ret;
12104 __ret = (int32x2_t) __builtin_neon_vmin_v((int8x8_t)__rev0, (int8x8_t)__rev1, 2);
12105 __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
12106 return __ret;
12107 }
12108 #endif
12109
12110 #ifdef __LITTLE_ENDIAN__
12111 __ai int16x4_t vmin_s16(int16x4_t __p0, int16x4_t __p1) {
12112 int16x4_t __ret;
12113 __ret = (int16x4_t) __builtin_neon_vmin_v((int8x8_t)__p0, (int8x8_t)__p1, 1);
12114 return __ret;
12115 }
12116 #else
12117 __ai int16x4_t vmin_s16(int16x4_t __p0, int16x4_t __p1) {
12118 int16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
12119 int16x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
12120 int16x4_t __ret;
12121 __ret = (int16x4_t) __builtin_neon_vmin_v((int8x8_t)__rev0, (int8x8_t)__rev1, 1);
12122 __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
12123 return __ret;
12124 }
12125 #endif
12126
12127 #ifdef __LITTLE_ENDIAN__
12128 __ai uint8x16_t vmlaq_u8(uint8x16_t __p0, uint8x16_t __p1, uint8x16_t __p2) {
12129 uint8x16_t __ret;
12130 __ret = __p0 + __p1 * __p2;
12131 return __ret;
12132 }
12133 #else
12134 __ai uint8x16_t vmlaq_u8(uint8x16_t __p0, uint8x16_t __p1, uint8x16_t __p2) {
12135 uint8x16_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
12136 uint8x16_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
12137 uint8x16_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
12138 uint8x16_t __ret;
12139 __ret = __rev0 + __rev1 * __rev2;
12140 __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
12141 return __ret;
12142 }
12143 #endif
12144
12145 #ifdef __LITTLE_ENDIAN__
12146 __ai uint32x4_t vmlaq_u32(uint32x4_t __p0, uint32x4_t __p1, uint32x4_t __p2) {
12147 uint32x4_t __ret;
12148 __ret = __p0 + __p1 * __p2;
12149 return __ret;
12150 }
12151 #else
12152 __ai uint32x4_t vmlaq_u32(uint32x4_t __p0, uint32x4_t __p1, uint32x4_t __p2) {
12153 uint32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
12154 uint32x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
12155 uint32x4_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, 3, 2, 1, 0);
12156 uint32x4_t __ret;
12157 __ret = __rev0 + __rev1 * __rev2;
12158 __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
12159 return __ret;
12160 }
12161 #endif
12162
12163 #ifdef __LITTLE_ENDIAN__
12164 __ai uint16x8_t vmlaq_u16(uint16x8_t __p0, uint16x8_t __p1, uint16x8_t __p2) {
12165 uint16x8_t __ret;
12166 __ret = __p0 + __p1 * __p2;
12167 return __ret;
12168 }
12169 #else
12170 __ai uint16x8_t vmlaq_u16(uint16x8_t __p0, uint16x8_t __p1, uint16x8_t __p2) {
12171 uint16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
12172 uint16x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
12173 uint16x8_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, 7, 6, 5, 4, 3, 2, 1, 0);
12174 uint16x8_t __ret;
12175 __ret = __rev0 + __rev1 * __rev2;
12176 __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
12177 return __ret;
12178 }
12179 #endif
12180
12181 #ifdef __LITTLE_ENDIAN__
12182 __ai int8x16_t vmlaq_s8(int8x16_t __p0, int8x16_t __p1, int8x16_t __p2) {
12183 int8x16_t __ret;
12184 __ret = __p0 + __p1 * __p2;
12185 return __ret;
12186 }
12187 #else
12188 __ai int8x16_t vmlaq_s8(int8x16_t __p0, int8x16_t __p1, int8x16_t __p2) {
12189 int8x16_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
12190 int8x16_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
12191 int8x16_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
12192 int8x16_t __ret;
12193 __ret = __rev0 + __rev1 * __rev2;
12194 __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
12195 return __ret;
12196 }
12197 #endif
12198
12199 #ifdef __LITTLE_ENDIAN__
12200 __ai float32x4_t vmlaq_f32(float32x4_t __p0, float32x4_t __p1, float32x4_t __p2) {
12201 float32x4_t __ret;
12202 __ret = __p0 + __p1 * __p2;
12203 return __ret;
12204 }
12205 #else
12206 __ai float32x4_t vmlaq_f32(float32x4_t __p0, float32x4_t __p1, float32x4_t __p2) {
12207 float32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
12208 float32x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
12209 float32x4_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, 3, 2, 1, 0);
12210 float32x4_t __ret;
12211 __ret = __rev0 + __rev1 * __rev2;
12212 __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
12213 return __ret;
12214 }
12215 #endif
12216
12217 #ifdef __LITTLE_ENDIAN__
12218 __ai int32x4_t vmlaq_s32(int32x4_t __p0, int32x4_t __p1, int32x4_t __p2) {
12219 int32x4_t __ret;
12220 __ret = __p0 + __p1 * __p2;
12221 return __ret;
12222 }
12223 #else
12224 __ai int32x4_t vmlaq_s32(int32x4_t __p0, int32x4_t __p1, int32x4_t __p2) {
12225 int32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
12226 int32x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
12227 int32x4_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, 3, 2, 1, 0);
12228 int32x4_t __ret;
12229 __ret = __rev0 + __rev1 * __rev2;
12230 __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
12231 return __ret;
12232 }
12233 #endif
12234
12235 #ifdef __LITTLE_ENDIAN__
12236 __ai int16x8_t vmlaq_s16(int16x8_t __p0, int16x8_t __p1, int16x8_t __p2) {
12237 int16x8_t __ret;
12238 __ret = __p0 + __p1 * __p2;
12239 return __ret;
12240 }
12241 #else
12242 __ai int16x8_t vmlaq_s16(int16x8_t __p0, int16x8_t __p1, int16x8_t __p2) {
12243 int16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
12244 int16x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
12245 int16x8_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, 7, 6, 5, 4, 3, 2, 1, 0);
12246 int16x8_t __ret;
12247 __ret = __rev0 + __rev1 * __rev2;
12248 __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
12249 return __ret;
12250 }
12251 #endif
12252
12253 #ifdef __LITTLE_ENDIAN__
12254 __ai uint8x8_t vmla_u8(uint8x8_t __p0, uint8x8_t __p1, uint8x8_t __p2) {
12255 uint8x8_t __ret;
12256 __ret = __p0 + __p1 * __p2;
12257 return __ret;
12258 }
12259 #else
12260 __ai uint8x8_t vmla_u8(uint8x8_t __p0, uint8x8_t __p1, uint8x8_t __p2) {
12261 uint8x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
12262 uint8x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
12263 uint8x8_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, 7, 6, 5, 4, 3, 2, 1, 0);
12264 uint8x8_t __ret;
12265 __ret = __rev0 + __rev1 * __rev2;
12266 __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
12267 return __ret;
12268 }
12269 #endif
12270
12271 #ifdef __LITTLE_ENDIAN__
12272 __ai uint32x2_t vmla_u32(uint32x2_t __p0, uint32x2_t __p1, uint32x2_t __p2) {
12273 uint32x2_t __ret;
12274 __ret = __p0 + __p1 * __p2;
12275 return __ret;
12276 }
12277 #else
12278 __ai uint32x2_t vmla_u32(uint32x2_t __p0, uint32x2_t __p1, uint32x2_t __p2) {
12279 uint32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
12280 uint32x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
12281 uint32x2_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, 1, 0);
12282 uint32x2_t __ret;
12283 __ret = __rev0 + __rev1 * __rev2;
12284 __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
12285 return __ret;
12286 }
12287 #endif
12288
12289 #ifdef __LITTLE_ENDIAN__
12290 __ai uint16x4_t vmla_u16(uint16x4_t __p0, uint16x4_t __p1, uint16x4_t __p2) {
12291 uint16x4_t __ret;
12292 __ret = __p0 + __p1 * __p2;
12293 return __ret;
12294 }
12295 #else
12296 __ai uint16x4_t vmla_u16(uint16x4_t __p0, uint16x4_t __p1, uint16x4_t __p2) {
12297 uint16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
12298 uint16x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
12299 uint16x4_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, 3, 2, 1, 0);
12300 uint16x4_t __ret;
12301 __ret = __rev0 + __rev1 * __rev2;
12302 __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
12303 return __ret;
12304 }
12305 #endif
12306
12307 #ifdef __LITTLE_ENDIAN__
12308 __ai int8x8_t vmla_s8(int8x8_t __p0, int8x8_t __p1, int8x8_t __p2) {
12309 int8x8_t __ret;
12310 __ret = __p0 + __p1 * __p2;
12311 return __ret;
12312 }
12313 #else
12314 __ai int8x8_t vmla_s8(int8x8_t __p0, int8x8_t __p1, int8x8_t __p2) {
12315 int8x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
12316 int8x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
12317 int8x8_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, 7, 6, 5, 4, 3, 2, 1, 0);
12318 int8x8_t __ret;
12319 __ret = __rev0 + __rev1 * __rev2;
12320 __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
12321 return __ret;
12322 }
12323 #endif
12324
12325 #ifdef __LITTLE_ENDIAN__
12326 __ai float32x2_t vmla_f32(float32x2_t __p0, float32x2_t __p1, float32x2_t __p2) {
12327 float32x2_t __ret;
12328 __ret = __p0 + __p1 * __p2;
12329 return __ret;
12330 }
12331 #else
12332 __ai float32x2_t vmla_f32(float32x2_t __p0, float32x2_t __p1, float32x2_t __p2) {
12333 float32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
12334 float32x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
12335 float32x2_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, 1, 0);
12336 float32x2_t __ret;
12337 __ret = __rev0 + __rev1 * __rev2;
12338 __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
12339 return __ret;
12340 }
12341 #endif
12342
12343 #ifdef __LITTLE_ENDIAN__
12344 __ai int32x2_t vmla_s32(int32x2_t __p0, int32x2_t __p1, int32x2_t __p2) {
12345 int32x2_t __ret;
12346 __ret = __p0 + __p1 * __p2;
12347 return __ret;
12348 }
12349 #else
12350 __ai int32x2_t vmla_s32(int32x2_t __p0, int32x2_t __p1, int32x2_t __p2) {
12351 int32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
12352 int32x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
12353 int32x2_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, 1, 0);
12354 int32x2_t __ret;
12355 __ret = __rev0 + __rev1 * __rev2;
12356 __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
12357 return __ret;
12358 }
12359 #endif
12360
12361 #ifdef __LITTLE_ENDIAN__
12362 __ai int16x4_t vmla_s16(int16x4_t __p0, int16x4_t __p1, int16x4_t __p2) {
12363 int16x4_t __ret;
12364 __ret = __p0 + __p1 * __p2;
12365 return __ret;
12366 }
12367 #else
12368 __ai int16x4_t vmla_s16(int16x4_t __p0, int16x4_t __p1, int16x4_t __p2) {
12369 int16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
12370 int16x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
12371 int16x4_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, 3, 2, 1, 0);
12372 int16x4_t __ret;
12373 __ret = __rev0 + __rev1 * __rev2;
12374 __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
12375 return __ret;
12376 }
12377 #endif
12378
12379 #ifdef __LITTLE_ENDIAN__
12380 #define vmlaq_lane_u32(__p0, __p1, __p2, __p3) __extension__ ({ \
12381 uint32x4_t __s0 = __p0; \
12382 uint32x4_t __s1 = __p1; \
12383 uint32x2_t __s2 = __p2; \
12384 uint32x4_t __ret; \
12385 __ret = __s0 + __s1 * __builtin_shufflevector(__s2, __s2, __p3, __p3, __p3, __p3); \
12386 __ret; \
12387 })
12388 #else
12389 #define vmlaq_lane_u32(__p0, __p1, __p2, __p3) __extension__ ({ \
12390 uint32x4_t __s0 = __p0; \
12391 uint32x4_t __s1 = __p1; \
12392 uint32x2_t __s2 = __p2; \
12393 uint32x4_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \
12394 uint32x4_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 3, 2, 1, 0); \
12395 uint32x2_t __rev2; __rev2 = __builtin_shufflevector(__s2, __s2, 1, 0); \
12396 uint32x4_t __ret; \
12397 __ret = __rev0 + __rev1 * __builtin_shufflevector(__rev2, __rev2, __p3, __p3, __p3, __p3); \
12398 __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \
12399 __ret; \
12400 })
12401 #endif
12402
12403 #ifdef __LITTLE_ENDIAN__
12404 #define vmlaq_lane_u16(__p0, __p1, __p2, __p3) __extension__ ({ \
12405 uint16x8_t __s0 = __p0; \
12406 uint16x8_t __s1 = __p1; \
12407 uint16x4_t __s2 = __p2; \
12408 uint16x8_t __ret; \
12409 __ret = __s0 + __s1 * __builtin_shufflevector(__s2, __s2, __p3, __p3, __p3, __p3, __p3, __p3, __p3, __p3); \
12410 __ret; \
12411 })
12412 #else
12413 #define vmlaq_lane_u16(__p0, __p1, __p2, __p3) __extension__ ({ \
12414 uint16x8_t __s0 = __p0; \
12415 uint16x8_t __s1 = __p1; \
12416 uint16x4_t __s2 = __p2; \
12417 uint16x8_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 7, 6, 5, 4, 3, 2, 1, 0); \
12418 uint16x8_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 7, 6, 5, 4, 3, 2, 1, 0); \
12419 uint16x4_t __rev2; __rev2 = __builtin_shufflevector(__s2, __s2, 3, 2, 1, 0); \
12420 uint16x8_t __ret; \
12421 __ret = __rev0 + __rev1 * __builtin_shufflevector(__rev2, __rev2, __p3, __p3, __p3, __p3, __p3, __p3, __p3, __p3); \
12422 __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); \
12423 __ret; \
12424 })
12425 #endif
12426
12427 #ifdef __LITTLE_ENDIAN__
12428 #define vmlaq_lane_f32(__p0, __p1, __p2, __p3) __extension__ ({ \
12429 float32x4_t __s0 = __p0; \
12430 float32x4_t __s1 = __p1; \
12431 float32x2_t __s2 = __p2; \
12432 float32x4_t __ret; \
12433 __ret = __s0 + __s1 * __builtin_shufflevector(__s2, __s2, __p3, __p3, __p3, __p3); \
12434 __ret; \
12435 })
12436 #else
12437 #define vmlaq_lane_f32(__p0, __p1, __p2, __p3) __extension__ ({ \
12438 float32x4_t __s0 = __p0; \
12439 float32x4_t __s1 = __p1; \
12440 float32x2_t __s2 = __p2; \
12441 float32x4_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \
12442 float32x4_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 3, 2, 1, 0); \
12443 float32x2_t __rev2; __rev2 = __builtin_shufflevector(__s2, __s2, 1, 0); \
12444 float32x4_t __ret; \
12445 __ret = __rev0 + __rev1 * __builtin_shufflevector(__rev2, __rev2, __p3, __p3, __p3, __p3); \
12446 __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \
12447 __ret; \
12448 })
12449 #endif
12450
12451 #ifdef __LITTLE_ENDIAN__
12452 #define vmlaq_lane_s32(__p0, __p1, __p2, __p3) __extension__ ({ \
12453 int32x4_t __s0 = __p0; \
12454 int32x4_t __s1 = __p1; \
12455 int32x2_t __s2 = __p2; \
12456 int32x4_t __ret; \
12457 __ret = __s0 + __s1 * __builtin_shufflevector(__s2, __s2, __p3, __p3, __p3, __p3); \
12458 __ret; \
12459 })
12460 #else
12461 #define vmlaq_lane_s32(__p0, __p1, __p2, __p3) __extension__ ({ \
12462 int32x4_t __s0 = __p0; \
12463 int32x4_t __s1 = __p1; \
12464 int32x2_t __s2 = __p2; \
12465 int32x4_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \
12466 int32x4_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 3, 2, 1, 0); \
12467 int32x2_t __rev2; __rev2 = __builtin_shufflevector(__s2, __s2, 1, 0); \
12468 int32x4_t __ret; \
12469 __ret = __rev0 + __rev1 * __builtin_shufflevector(__rev2, __rev2, __p3, __p3, __p3, __p3); \
12470 __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \
12471 __ret; \
12472 })
12473 #endif
12474
12475 #ifdef __LITTLE_ENDIAN__
12476 #define vmlaq_lane_s16(__p0, __p1, __p2, __p3) __extension__ ({ \
12477 int16x8_t __s0 = __p0; \
12478 int16x8_t __s1 = __p1; \
12479 int16x4_t __s2 = __p2; \
12480 int16x8_t __ret; \
12481 __ret = __s0 + __s1 * __builtin_shufflevector(__s2, __s2, __p3, __p3, __p3, __p3, __p3, __p3, __p3, __p3); \
12482 __ret; \
12483 })
12484 #else
12485 #define vmlaq_lane_s16(__p0, __p1, __p2, __p3) __extension__ ({ \
12486 int16x8_t __s0 = __p0; \
12487 int16x8_t __s1 = __p1; \
12488 int16x4_t __s2 = __p2; \
12489 int16x8_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 7, 6, 5, 4, 3, 2, 1, 0); \
12490 int16x8_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 7, 6, 5, 4, 3, 2, 1, 0); \
12491 int16x4_t __rev2; __rev2 = __builtin_shufflevector(__s2, __s2, 3, 2, 1, 0); \
12492 int16x8_t __ret; \
12493 __ret = __rev0 + __rev1 * __builtin_shufflevector(__rev2, __rev2, __p3, __p3, __p3, __p3, __p3, __p3, __p3, __p3); \
12494 __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); \
12495 __ret; \
12496 })
12497 #endif
12498
12499 #ifdef __LITTLE_ENDIAN__
12500 #define vmla_lane_u32(__p0, __p1, __p2, __p3) __extension__ ({ \
12501 uint32x2_t __s0 = __p0; \
12502 uint32x2_t __s1 = __p1; \
12503 uint32x2_t __s2 = __p2; \
12504 uint32x2_t __ret; \
12505 __ret = __s0 + __s1 * __builtin_shufflevector(__s2, __s2, __p3, __p3); \
12506 __ret; \
12507 })
12508 #else
12509 #define vmla_lane_u32(__p0, __p1, __p2, __p3) __extension__ ({ \
12510 uint32x2_t __s0 = __p0; \
12511 uint32x2_t __s1 = __p1; \
12512 uint32x2_t __s2 = __p2; \
12513 uint32x2_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 1, 0); \
12514 uint32x2_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 1, 0); \
12515 uint32x2_t __rev2; __rev2 = __builtin_shufflevector(__s2, __s2, 1, 0); \
12516 uint32x2_t __ret; \
12517 __ret = __rev0 + __rev1 * __builtin_shufflevector(__rev2, __rev2, __p3, __p3); \
12518 __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \
12519 __ret; \
12520 })
12521 #endif
12522
12523 #ifdef __LITTLE_ENDIAN__
12524 #define vmla_lane_u16(__p0, __p1, __p2, __p3) __extension__ ({ \
12525 uint16x4_t __s0 = __p0; \
12526 uint16x4_t __s1 = __p1; \
12527 uint16x4_t __s2 = __p2; \
12528 uint16x4_t __ret; \
12529 __ret = __s0 + __s1 * __builtin_shufflevector(__s2, __s2, __p3, __p3, __p3, __p3); \
12530 __ret; \
12531 })
12532 #else
12533 #define vmla_lane_u16(__p0, __p1, __p2, __p3) __extension__ ({ \
12534 uint16x4_t __s0 = __p0; \
12535 uint16x4_t __s1 = __p1; \
12536 uint16x4_t __s2 = __p2; \
12537 uint16x4_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \
12538 uint16x4_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 3, 2, 1, 0); \
12539 uint16x4_t __rev2; __rev2 = __builtin_shufflevector(__s2, __s2, 3, 2, 1, 0); \
12540 uint16x4_t __ret; \
12541 __ret = __rev0 + __rev1 * __builtin_shufflevector(__rev2, __rev2, __p3, __p3, __p3, __p3); \
12542 __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \
12543 __ret; \
12544 })
12545 #endif
12546
12547 #ifdef __LITTLE_ENDIAN__
12548 #define vmla_lane_f32(__p0, __p1, __p2, __p3) __extension__ ({ \
12549 float32x2_t __s0 = __p0; \
12550 float32x2_t __s1 = __p1; \
12551 float32x2_t __s2 = __p2; \
12552 float32x2_t __ret; \
12553 __ret = __s0 + __s1 * __builtin_shufflevector(__s2, __s2, __p3, __p3); \
12554 __ret; \
12555 })
12556 #else
12557 #define vmla_lane_f32(__p0, __p1, __p2, __p3) __extension__ ({ \
12558 float32x2_t __s0 = __p0; \
12559 float32x2_t __s1 = __p1; \
12560 float32x2_t __s2 = __p2; \
12561 float32x2_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 1, 0); \
12562 float32x2_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 1, 0); \
12563 float32x2_t __rev2; __rev2 = __builtin_shufflevector(__s2, __s2, 1, 0); \
12564 float32x2_t __ret; \
12565 __ret = __rev0 + __rev1 * __builtin_shufflevector(__rev2, __rev2, __p3, __p3); \
12566 __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \
12567 __ret; \
12568 })
12569 #endif
12570
12571 #ifdef __LITTLE_ENDIAN__
12572 #define vmla_lane_s32(__p0, __p1, __p2, __p3) __extension__ ({ \
12573 int32x2_t __s0 = __p0; \
12574 int32x2_t __s1 = __p1; \
12575 int32x2_t __s2 = __p2; \
12576 int32x2_t __ret; \
12577 __ret = __s0 + __s1 * __builtin_shufflevector(__s2, __s2, __p3, __p3); \
12578 __ret; \
12579 })
12580 #else
12581 #define vmla_lane_s32(__p0, __p1, __p2, __p3) __extension__ ({ \
12582 int32x2_t __s0 = __p0; \
12583 int32x2_t __s1 = __p1; \
12584 int32x2_t __s2 = __p2; \
12585 int32x2_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 1, 0); \
12586 int32x2_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 1, 0); \
12587 int32x2_t __rev2; __rev2 = __builtin_shufflevector(__s2, __s2, 1, 0); \
12588 int32x2_t __ret; \
12589 __ret = __rev0 + __rev1 * __builtin_shufflevector(__rev2, __rev2, __p3, __p3); \
12590 __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \
12591 __ret; \
12592 })
12593 #endif
12594
12595 #ifdef __LITTLE_ENDIAN__
12596 #define vmla_lane_s16(__p0, __p1, __p2, __p3) __extension__ ({ \
12597 int16x4_t __s0 = __p0; \
12598 int16x4_t __s1 = __p1; \
12599 int16x4_t __s2 = __p2; \
12600 int16x4_t __ret; \
12601 __ret = __s0 + __s1 * __builtin_shufflevector(__s2, __s2, __p3, __p3, __p3, __p3); \
12602 __ret; \
12603 })
12604 #else
12605 #define vmla_lane_s16(__p0, __p1, __p2, __p3) __extension__ ({ \
12606 int16x4_t __s0 = __p0; \
12607 int16x4_t __s1 = __p1; \
12608 int16x4_t __s2 = __p2; \
12609 int16x4_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \
12610 int16x4_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 3, 2, 1, 0); \
12611 int16x4_t __rev2; __rev2 = __builtin_shufflevector(__s2, __s2, 3, 2, 1, 0); \
12612 int16x4_t __ret; \
12613 __ret = __rev0 + __rev1 * __builtin_shufflevector(__rev2, __rev2, __p3, __p3, __p3, __p3); \
12614 __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \
12615 __ret; \
12616 })
12617 #endif
12618
12619 #ifdef __LITTLE_ENDIAN__
12620 __ai uint32x4_t vmlaq_n_u32(uint32x4_t __p0, uint32x4_t __p1, uint32_t __p2) {
12621 uint32x4_t __ret;
12622 __ret = __p0 + __p1 * (uint32x4_t) {__p2, __p2, __p2, __p2};
12623 return __ret;
12624 }
12625 #else
12626 __ai uint32x4_t vmlaq_n_u32(uint32x4_t __p0, uint32x4_t __p1, uint32_t __p2) {
12627 uint32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
12628 uint32x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
12629 uint32x4_t __ret;
12630 __ret = __rev0 + __rev1 * (uint32x4_t) {__p2, __p2, __p2, __p2};
12631 __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
12632 return __ret;
12633 }
12634 #endif
12635
12636 #ifdef __LITTLE_ENDIAN__
12637 __ai uint16x8_t vmlaq_n_u16(uint16x8_t __p0, uint16x8_t __p1, uint16_t __p2) {
12638 uint16x8_t __ret;
12639 __ret = __p0 + __p1 * (uint16x8_t) {__p2, __p2, __p2, __p2, __p2, __p2, __p2, __p2};
12640 return __ret;
12641 }
12642 #else
12643 __ai uint16x8_t vmlaq_n_u16(uint16x8_t __p0, uint16x8_t __p1, uint16_t __p2) {
12644 uint16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
12645 uint16x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
12646 uint16x8_t __ret;
12647 __ret = __rev0 + __rev1 * (uint16x8_t) {__p2, __p2, __p2, __p2, __p2, __p2, __p2, __p2};
12648 __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
12649 return __ret;
12650 }
12651 #endif
12652
12653 #ifdef __LITTLE_ENDIAN__
12654 __ai float32x4_t vmlaq_n_f32(float32x4_t __p0, float32x4_t __p1, float32_t __p2) {
12655 float32x4_t __ret;
12656 __ret = __p0 + __p1 * (float32x4_t) {__p2, __p2, __p2, __p2};
12657 return __ret;
12658 }
12659 #else
12660 __ai float32x4_t vmlaq_n_f32(float32x4_t __p0, float32x4_t __p1, float32_t __p2) {
12661 float32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
12662 float32x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
12663 float32x4_t __ret;
12664 __ret = __rev0 + __rev1 * (float32x4_t) {__p2, __p2, __p2, __p2};
12665 __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
12666 return __ret;
12667 }
12668 #endif
12669
12670 #ifdef __LITTLE_ENDIAN__
12671 __ai int32x4_t vmlaq_n_s32(int32x4_t __p0, int32x4_t __p1, int32_t __p2) {
12672 int32x4_t __ret;
12673 __ret = __p0 + __p1 * (int32x4_t) {__p2, __p2, __p2, __p2};
12674 return __ret;
12675 }
12676 #else
12677 __ai int32x4_t vmlaq_n_s32(int32x4_t __p0, int32x4_t __p1, int32_t __p2) {
12678 int32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
12679 int32x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
12680 int32x4_t __ret;
12681 __ret = __rev0 + __rev1 * (int32x4_t) {__p2, __p2, __p2, __p2};
12682 __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
12683 return __ret;
12684 }
12685 #endif
12686
12687 #ifdef __LITTLE_ENDIAN__
12688 __ai int16x8_t vmlaq_n_s16(int16x8_t __p0, int16x8_t __p1, int16_t __p2) {
12689 int16x8_t __ret;
12690 __ret = __p0 + __p1 * (int16x8_t) {__p2, __p2, __p2, __p2, __p2, __p2, __p2, __p2};
12691 return __ret;
12692 }
12693 #else
12694 __ai int16x8_t vmlaq_n_s16(int16x8_t __p0, int16x8_t __p1, int16_t __p2) {
12695 int16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
12696 int16x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
12697 int16x8_t __ret;
12698 __ret = __rev0 + __rev1 * (int16x8_t) {__p2, __p2, __p2, __p2, __p2, __p2, __p2, __p2};
12699 __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
12700 return __ret;
12701 }
12702 #endif
12703
12704 #ifdef __LITTLE_ENDIAN__
12705 __ai uint32x2_t vmla_n_u32(uint32x2_t __p0, uint32x2_t __p1, uint32_t __p2) {
12706 uint32x2_t __ret;
12707 __ret = __p0 + __p1 * (uint32x2_t) {__p2, __p2};
12708 return __ret;
12709 }
12710 #else
12711 __ai uint32x2_t vmla_n_u32(uint32x2_t __p0, uint32x2_t __p1, uint32_t __p2) {
12712 uint32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
12713 uint32x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
12714 uint32x2_t __ret;
12715 __ret = __rev0 + __rev1 * (uint32x2_t) {__p2, __p2};
12716 __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
12717 return __ret;
12718 }
12719 #endif
12720
12721 #ifdef __LITTLE_ENDIAN__
12722 __ai uint16x4_t vmla_n_u16(uint16x4_t __p0, uint16x4_t __p1, uint16_t __p2) {
12723 uint16x4_t __ret;
12724 __ret = __p0 + __p1 * (uint16x4_t) {__p2, __p2, __p2, __p2};
12725 return __ret;
12726 }
12727 #else
12728 __ai uint16x4_t vmla_n_u16(uint16x4_t __p0, uint16x4_t __p1, uint16_t __p2) {
12729 uint16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
12730 uint16x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
12731 uint16x4_t __ret;
12732 __ret = __rev0 + __rev1 * (uint16x4_t) {__p2, __p2, __p2, __p2};
12733 __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
12734 return __ret;
12735 }
12736 #endif
12737
12738 #ifdef __LITTLE_ENDIAN__
12739 __ai float32x2_t vmla_n_f32(float32x2_t __p0, float32x2_t __p1, float32_t __p2) {
12740 float32x2_t __ret;
12741 __ret = __p0 + __p1 * (float32x2_t) {__p2, __p2};
12742 return __ret;
12743 }
12744 #else
12745 __ai float32x2_t vmla_n_f32(float32x2_t __p0, float32x2_t __p1, float32_t __p2) {
12746 float32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
12747 float32x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
12748 float32x2_t __ret;
12749 __ret = __rev0 + __rev1 * (float32x2_t) {__p2, __p2};
12750 __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
12751 return __ret;
12752 }
12753 #endif
12754
12755 #ifdef __LITTLE_ENDIAN__
12756 __ai int32x2_t vmla_n_s32(int32x2_t __p0, int32x2_t __p1, int32_t __p2) {
12757 int32x2_t __ret;
12758 __ret = __p0 + __p1 * (int32x2_t) {__p2, __p2};
12759 return __ret;
12760 }
12761 #else
12762 __ai int32x2_t vmla_n_s32(int32x2_t __p0, int32x2_t __p1, int32_t __p2) {
12763 int32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
12764 int32x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
12765 int32x2_t __ret;
12766 __ret = __rev0 + __rev1 * (int32x2_t) {__p2, __p2};
12767 __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
12768 return __ret;
12769 }
12770 #endif
12771
12772 #ifdef __LITTLE_ENDIAN__
12773 __ai int16x4_t vmla_n_s16(int16x4_t __p0, int16x4_t __p1, int16_t __p2) {
12774 int16x4_t __ret;
12775 __ret = __p0 + __p1 * (int16x4_t) {__p2, __p2, __p2, __p2};
12776 return __ret;
12777 }
12778 #else
12779 __ai int16x4_t vmla_n_s16(int16x4_t __p0, int16x4_t __p1, int16_t __p2) {
12780 int16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
12781 int16x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
12782 int16x4_t __ret;
12783 __ret = __rev0 + __rev1 * (int16x4_t) {__p2, __p2, __p2, __p2};
12784 __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
12785 return __ret;
12786 }
12787 #endif
12788
12789 #ifdef __LITTLE_ENDIAN__
12790 __ai uint8x16_t vmlsq_u8(uint8x16_t __p0, uint8x16_t __p1, uint8x16_t __p2) {
12791 uint8x16_t __ret;
12792 __ret = __p0 - __p1 * __p2;
12793 return __ret;
12794 }
12795 #else
12796 __ai uint8x16_t vmlsq_u8(uint8x16_t __p0, uint8x16_t __p1, uint8x16_t __p2) {
12797 uint8x16_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
12798 uint8x16_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
12799 uint8x16_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
12800 uint8x16_t __ret;
12801 __ret = __rev0 - __rev1 * __rev2;
12802 __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
12803 return __ret;
12804 }
12805 #endif
12806
12807 #ifdef __LITTLE_ENDIAN__
12808 __ai uint32x4_t vmlsq_u32(uint32x4_t __p0, uint32x4_t __p1, uint32x4_t __p2) {
12809 uint32x4_t __ret;
12810 __ret = __p0 - __p1 * __p2;
12811 return __ret;
12812 }
12813 #else
12814 __ai uint32x4_t vmlsq_u32(uint32x4_t __p0, uint32x4_t __p1, uint32x4_t __p2) {
12815 uint32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
12816 uint32x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
12817 uint32x4_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, 3, 2, 1, 0);
12818 uint32x4_t __ret;
12819 __ret = __rev0 - __rev1 * __rev2;
12820 __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
12821 return __ret;
12822 }
12823 #endif
12824
12825 #ifdef __LITTLE_ENDIAN__
12826 __ai uint16x8_t vmlsq_u16(uint16x8_t __p0, uint16x8_t __p1, uint16x8_t __p2) {
12827 uint16x8_t __ret;
12828 __ret = __p0 - __p1 * __p2;
12829 return __ret;
12830 }
12831 #else
12832 __ai uint16x8_t vmlsq_u16(uint16x8_t __p0, uint16x8_t __p1, uint16x8_t __p2) {
12833 uint16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
12834 uint16x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
12835 uint16x8_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, 7, 6, 5, 4, 3, 2, 1, 0);
12836 uint16x8_t __ret;
12837 __ret = __rev0 - __rev1 * __rev2;
12838 __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
12839 return __ret;
12840 }
12841 #endif
12842
12843 #ifdef __LITTLE_ENDIAN__
12844 __ai int8x16_t vmlsq_s8(int8x16_t __p0, int8x16_t __p1, int8x16_t __p2) {
12845 int8x16_t __ret;
12846 __ret = __p0 - __p1 * __p2;
12847 return __ret;
12848 }
12849 #else
12850 __ai int8x16_t vmlsq_s8(int8x16_t __p0, int8x16_t __p1, int8x16_t __p2) {
12851 int8x16_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
12852 int8x16_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
12853 int8x16_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
12854 int8x16_t __ret;
12855 __ret = __rev0 - __rev1 * __rev2;
12856 __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
12857 return __ret;
12858 }
12859 #endif
12860
12861 #ifdef __LITTLE_ENDIAN__
12862 __ai float32x4_t vmlsq_f32(float32x4_t __p0, float32x4_t __p1, float32x4_t __p2) {
12863 float32x4_t __ret;
12864 __ret = __p0 - __p1 * __p2;
12865 return __ret;
12866 }
12867 #else
12868 __ai float32x4_t vmlsq_f32(float32x4_t __p0, float32x4_t __p1, float32x4_t __p2) {
12869 float32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
12870 float32x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
12871 float32x4_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, 3, 2, 1, 0);
12872 float32x4_t __ret;
12873 __ret = __rev0 - __rev1 * __rev2;
12874 __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
12875 return __ret;
12876 }
12877 #endif
12878
12879 #ifdef __LITTLE_ENDIAN__
12880 __ai int32x4_t vmlsq_s32(int32x4_t __p0, int32x4_t __p1, int32x4_t __p2) {
12881 int32x4_t __ret;
12882 __ret = __p0 - __p1 * __p2;
12883 return __ret;
12884 }
12885 #else
12886 __ai int32x4_t vmlsq_s32(int32x4_t __p0, int32x4_t __p1, int32x4_t __p2) {
12887 int32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
12888 int32x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
12889 int32x4_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, 3, 2, 1, 0);
12890 int32x4_t __ret;
12891 __ret = __rev0 - __rev1 * __rev2;
12892 __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
12893 return __ret;
12894 }
12895 #endif
12896
12897 #ifdef __LITTLE_ENDIAN__
12898 __ai int16x8_t vmlsq_s16(int16x8_t __p0, int16x8_t __p1, int16x8_t __p2) {
12899 int16x8_t __ret;
12900 __ret = __p0 - __p1 * __p2;
12901 return __ret;
12902 }
12903 #else
12904 __ai int16x8_t vmlsq_s16(int16x8_t __p0, int16x8_t __p1, int16x8_t __p2) {
12905 int16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
12906 int16x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
12907 int16x8_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, 7, 6, 5, 4, 3, 2, 1, 0);
12908 int16x8_t __ret;
12909 __ret = __rev0 - __rev1 * __rev2;
12910 __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
12911 return __ret;
12912 }
12913 #endif
12914
12915 #ifdef __LITTLE_ENDIAN__
12916 __ai uint8x8_t vmls_u8(uint8x8_t __p0, uint8x8_t __p1, uint8x8_t __p2) {
12917 uint8x8_t __ret;
12918 __ret = __p0 - __p1 * __p2;
12919 return __ret;
12920 }
12921 #else
12922 __ai uint8x8_t vmls_u8(uint8x8_t __p0, uint8x8_t __p1, uint8x8_t __p2) {
12923 uint8x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
12924 uint8x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
12925 uint8x8_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, 7, 6, 5, 4, 3, 2, 1, 0);
12926 uint8x8_t __ret;
12927 __ret = __rev0 - __rev1 * __rev2;
12928 __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
12929 return __ret;
12930 }
12931 #endif
12932
12933 #ifdef __LITTLE_ENDIAN__
12934 __ai uint32x2_t vmls_u32(uint32x2_t __p0, uint32x2_t __p1, uint32x2_t __p2) {
12935 uint32x2_t __ret;
12936 __ret = __p0 - __p1 * __p2;
12937 return __ret;
12938 }
12939 #else
12940 __ai uint32x2_t vmls_u32(uint32x2_t __p0, uint32x2_t __p1, uint32x2_t __p2) {
12941 uint32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
12942 uint32x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
12943 uint32x2_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, 1, 0);
12944 uint32x2_t __ret;
12945 __ret = __rev0 - __rev1 * __rev2;
12946 __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
12947 return __ret;
12948 }
12949 #endif
12950
12951 #ifdef __LITTLE_ENDIAN__
12952 __ai uint16x4_t vmls_u16(uint16x4_t __p0, uint16x4_t __p1, uint16x4_t __p2) {
12953 uint16x4_t __ret;
12954 __ret = __p0 - __p1 * __p2;
12955 return __ret;
12956 }
12957 #else
12958 __ai uint16x4_t vmls_u16(uint16x4_t __p0, uint16x4_t __p1, uint16x4_t __p2) {
12959 uint16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
12960 uint16x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
12961 uint16x4_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, 3, 2, 1, 0);
12962 uint16x4_t __ret;
12963 __ret = __rev0 - __rev1 * __rev2;
12964 __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
12965 return __ret;
12966 }
12967 #endif
12968
12969 #ifdef __LITTLE_ENDIAN__
12970 __ai int8x8_t vmls_s8(int8x8_t __p0, int8x8_t __p1, int8x8_t __p2) {
12971 int8x8_t __ret;
12972 __ret = __p0 - __p1 * __p2;
12973 return __ret;
12974 }
12975 #else
12976 __ai int8x8_t vmls_s8(int8x8_t __p0, int8x8_t __p1, int8x8_t __p2) {
12977 int8x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
12978 int8x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
12979 int8x8_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, 7, 6, 5, 4, 3, 2, 1, 0);
12980 int8x8_t __ret;
12981 __ret = __rev0 - __rev1 * __rev2;
12982 __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
12983 return __ret;
12984 }
12985 #endif
12986
12987 #ifdef __LITTLE_ENDIAN__
12988 __ai float32x2_t vmls_f32(float32x2_t __p0, float32x2_t __p1, float32x2_t __p2) {
12989 float32x2_t __ret;
12990 __ret = __p0 - __p1 * __p2;
12991 return __ret;
12992 }
12993 #else
12994 __ai float32x2_t vmls_f32(float32x2_t __p0, float32x2_t __p1, float32x2_t __p2) {
12995 float32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
12996 float32x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
12997 float32x2_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, 1, 0);
12998 float32x2_t __ret;
12999 __ret = __rev0 - __rev1 * __rev2;
13000 __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
13001 return __ret;
13002 }
13003 #endif
13004
13005 #ifdef __LITTLE_ENDIAN__
13006 __ai int32x2_t vmls_s32(int32x2_t __p0, int32x2_t __p1, int32x2_t __p2) {
13007 int32x2_t __ret;
13008 __ret = __p0 - __p1 * __p2;
13009 return __ret;
13010 }
13011 #else
13012 __ai int32x2_t vmls_s32(int32x2_t __p0, int32x2_t __p1, int32x2_t __p2) {
13013 int32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
13014 int32x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
13015 int32x2_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, 1, 0);
13016 int32x2_t __ret;
13017 __ret = __rev0 - __rev1 * __rev2;
13018 __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
13019 return __ret;
13020 }
13021 #endif
13022
13023 #ifdef __LITTLE_ENDIAN__
13024 __ai int16x4_t vmls_s16(int16x4_t __p0, int16x4_t __p1, int16x4_t __p2) {
13025 int16x4_t __ret;
13026 __ret = __p0 - __p1 * __p2;
13027 return __ret;
13028 }
13029 #else
13030 __ai int16x4_t vmls_s16(int16x4_t __p0, int16x4_t __p1, int16x4_t __p2) {
13031 int16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
13032 int16x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
13033 int16x4_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, 3, 2, 1, 0);
13034 int16x4_t __ret;
13035 __ret = __rev0 - __rev1 * __rev2;
13036 __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
13037 return __ret;
13038 }
13039 #endif
13040
13041 #ifdef __LITTLE_ENDIAN__
13042 #define vmlsq_lane_u32(__p0, __p1, __p2, __p3) __extension__ ({ \
13043 uint32x4_t __s0 = __p0; \
13044 uint32x4_t __s1 = __p1; \
13045 uint32x2_t __s2 = __p2; \
13046 uint32x4_t __ret; \
13047 __ret = __s0 - __s1 * __builtin_shufflevector(__s2, __s2, __p3, __p3, __p3, __p3); \
13048 __ret; \
13049 })
13050 #else
13051 #define vmlsq_lane_u32(__p0, __p1, __p2, __p3) __extension__ ({ \
13052 uint32x4_t __s0 = __p0; \
13053 uint32x4_t __s1 = __p1; \
13054 uint32x2_t __s2 = __p2; \
13055 uint32x4_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \
13056 uint32x4_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 3, 2, 1, 0); \
13057 uint32x2_t __rev2; __rev2 = __builtin_shufflevector(__s2, __s2, 1, 0); \
13058 uint32x4_t __ret; \
13059 __ret = __rev0 - __rev1 * __builtin_shufflevector(__rev2, __rev2, __p3, __p3, __p3, __p3); \
13060 __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \
13061 __ret; \
13062 })
13063 #endif
13064
13065 #ifdef __LITTLE_ENDIAN__
13066 #define vmlsq_lane_u16(__p0, __p1, __p2, __p3) __extension__ ({ \
13067 uint16x8_t __s0 = __p0; \
13068 uint16x8_t __s1 = __p1; \
13069 uint16x4_t __s2 = __p2; \
13070 uint16x8_t __ret; \
13071 __ret = __s0 - __s1 * __builtin_shufflevector(__s2, __s2, __p3, __p3, __p3, __p3, __p3, __p3, __p3, __p3); \
13072 __ret; \
13073 })
13074 #else
13075 #define vmlsq_lane_u16(__p0, __p1, __p2, __p3) __extension__ ({ \
13076 uint16x8_t __s0 = __p0; \
13077 uint16x8_t __s1 = __p1; \
13078 uint16x4_t __s2 = __p2; \
13079 uint16x8_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 7, 6, 5, 4, 3, 2, 1, 0); \
13080 uint16x8_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 7, 6, 5, 4, 3, 2, 1, 0); \
13081 uint16x4_t __rev2; __rev2 = __builtin_shufflevector(__s2, __s2, 3, 2, 1, 0); \
13082 uint16x8_t __ret; \
13083 __ret = __rev0 - __rev1 * __builtin_shufflevector(__rev2, __rev2, __p3, __p3, __p3, __p3, __p3, __p3, __p3, __p3); \
13084 __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); \
13085 __ret; \
13086 })
13087 #endif
13088
13089 #ifdef __LITTLE_ENDIAN__
13090 #define vmlsq_lane_f32(__p0, __p1, __p2, __p3) __extension__ ({ \
13091 float32x4_t __s0 = __p0; \
13092 float32x4_t __s1 = __p1; \
13093 float32x2_t __s2 = __p2; \
13094 float32x4_t __ret; \
13095 __ret = __s0 - __s1 * __builtin_shufflevector(__s2, __s2, __p3, __p3, __p3, __p3); \
13096 __ret; \
13097 })
13098 #else
13099 #define vmlsq_lane_f32(__p0, __p1, __p2, __p3) __extension__ ({ \
13100 float32x4_t __s0 = __p0; \
13101 float32x4_t __s1 = __p1; \
13102 float32x2_t __s2 = __p2; \
13103 float32x4_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \
13104 float32x4_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 3, 2, 1, 0); \
13105 float32x2_t __rev2; __rev2 = __builtin_shufflevector(__s2, __s2, 1, 0); \
13106 float32x4_t __ret; \
13107 __ret = __rev0 - __rev1 * __builtin_shufflevector(__rev2, __rev2, __p3, __p3, __p3, __p3); \
13108 __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \
13109 __ret; \
13110 })
13111 #endif
13112
13113 #ifdef __LITTLE_ENDIAN__
13114 #define vmlsq_lane_s32(__p0, __p1, __p2, __p3) __extension__ ({ \
13115 int32x4_t __s0 = __p0; \
13116 int32x4_t __s1 = __p1; \
13117 int32x2_t __s2 = __p2; \
13118 int32x4_t __ret; \
13119 __ret = __s0 - __s1 * __builtin_shufflevector(__s2, __s2, __p3, __p3, __p3, __p3); \
13120 __ret; \
13121 })
13122 #else
13123 #define vmlsq_lane_s32(__p0, __p1, __p2, __p3) __extension__ ({ \
13124 int32x4_t __s0 = __p0; \
13125 int32x4_t __s1 = __p1; \
13126 int32x2_t __s2 = __p2; \
13127 int32x4_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \
13128 int32x4_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 3, 2, 1, 0); \
13129 int32x2_t __rev2; __rev2 = __builtin_shufflevector(__s2, __s2, 1, 0); \
13130 int32x4_t __ret; \
13131 __ret = __rev0 - __rev1 * __builtin_shufflevector(__rev2, __rev2, __p3, __p3, __p3, __p3); \
13132 __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \
13133 __ret; \
13134 })
13135 #endif
13136
13137 #ifdef __LITTLE_ENDIAN__
13138 #define vmlsq_lane_s16(__p0, __p1, __p2, __p3) __extension__ ({ \
13139 int16x8_t __s0 = __p0; \
13140 int16x8_t __s1 = __p1; \
13141 int16x4_t __s2 = __p2; \
13142 int16x8_t __ret; \
13143 __ret = __s0 - __s1 * __builtin_shufflevector(__s2, __s2, __p3, __p3, __p3, __p3, __p3, __p3, __p3, __p3); \
13144 __ret; \
13145 })
13146 #else
13147 #define vmlsq_lane_s16(__p0, __p1, __p2, __p3) __extension__ ({ \
13148 int16x8_t __s0 = __p0; \
13149 int16x8_t __s1 = __p1; \
13150 int16x4_t __s2 = __p2; \
13151 int16x8_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 7, 6, 5, 4, 3, 2, 1, 0); \
13152 int16x8_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 7, 6, 5, 4, 3, 2, 1, 0); \
13153 int16x4_t __rev2; __rev2 = __builtin_shufflevector(__s2, __s2, 3, 2, 1, 0); \
13154 int16x8_t __ret; \
13155 __ret = __rev0 - __rev1 * __builtin_shufflevector(__rev2, __rev2, __p3, __p3, __p3, __p3, __p3, __p3, __p3, __p3); \
13156 __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); \
13157 __ret; \
13158 })
13159 #endif
13160
13161 #ifdef __LITTLE_ENDIAN__
13162 #define vmls_lane_u32(__p0, __p1, __p2, __p3) __extension__ ({ \
13163 uint32x2_t __s0 = __p0; \
13164 uint32x2_t __s1 = __p1; \
13165 uint32x2_t __s2 = __p2; \
13166 uint32x2_t __ret; \
13167 __ret = __s0 - __s1 * __builtin_shufflevector(__s2, __s2, __p3, __p3); \
13168 __ret; \
13169 })
13170 #else
13171 #define vmls_lane_u32(__p0, __p1, __p2, __p3) __extension__ ({ \
13172 uint32x2_t __s0 = __p0; \
13173 uint32x2_t __s1 = __p1; \
13174 uint32x2_t __s2 = __p2; \
13175 uint32x2_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 1, 0); \
13176 uint32x2_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 1, 0); \
13177 uint32x2_t __rev2; __rev2 = __builtin_shufflevector(__s2, __s2, 1, 0); \
13178 uint32x2_t __ret; \
13179 __ret = __rev0 - __rev1 * __builtin_shufflevector(__rev2, __rev2, __p3, __p3); \
13180 __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \
13181 __ret; \
13182 })
13183 #endif
13184
13185 #ifdef __LITTLE_ENDIAN__
13186 #define vmls_lane_u16(__p0, __p1, __p2, __p3) __extension__ ({ \
13187 uint16x4_t __s0 = __p0; \
13188 uint16x4_t __s1 = __p1; \
13189 uint16x4_t __s2 = __p2; \
13190 uint16x4_t __ret; \
13191 __ret = __s0 - __s1 * __builtin_shufflevector(__s2, __s2, __p3, __p3, __p3, __p3); \
13192 __ret; \
13193 })
13194 #else
13195 #define vmls_lane_u16(__p0, __p1, __p2, __p3) __extension__ ({ \
13196 uint16x4_t __s0 = __p0; \
13197 uint16x4_t __s1 = __p1; \
13198 uint16x4_t __s2 = __p2; \
13199 uint16x4_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \
13200 uint16x4_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 3, 2, 1, 0); \
13201 uint16x4_t __rev2; __rev2 = __builtin_shufflevector(__s2, __s2, 3, 2, 1, 0); \
13202 uint16x4_t __ret; \
13203 __ret = __rev0 - __rev1 * __builtin_shufflevector(__rev2, __rev2, __p3, __p3, __p3, __p3); \
13204 __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \
13205 __ret; \
13206 })
13207 #endif
13208
13209 #ifdef __LITTLE_ENDIAN__
13210 #define vmls_lane_f32(__p0, __p1, __p2, __p3) __extension__ ({ \
13211 float32x2_t __s0 = __p0; \
13212 float32x2_t __s1 = __p1; \
13213 float32x2_t __s2 = __p2; \
13214 float32x2_t __ret; \
13215 __ret = __s0 - __s1 * __builtin_shufflevector(__s2, __s2, __p3, __p3); \
13216 __ret; \
13217 })
13218 #else
13219 #define vmls_lane_f32(__p0, __p1, __p2, __p3) __extension__ ({ \
13220 float32x2_t __s0 = __p0; \
13221 float32x2_t __s1 = __p1; \
13222 float32x2_t __s2 = __p2; \
13223 float32x2_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 1, 0); \
13224 float32x2_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 1, 0); \
13225 float32x2_t __rev2; __rev2 = __builtin_shufflevector(__s2, __s2, 1, 0); \
13226 float32x2_t __ret; \
13227 __ret = __rev0 - __rev1 * __builtin_shufflevector(__rev2, __rev2, __p3, __p3); \
13228 __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \
13229 __ret; \
13230 })
13231 #endif
13232
13233 #ifdef __LITTLE_ENDIAN__
13234 #define vmls_lane_s32(__p0, __p1, __p2, __p3) __extension__ ({ \
13235 int32x2_t __s0 = __p0; \
13236 int32x2_t __s1 = __p1; \
13237 int32x2_t __s2 = __p2; \
13238 int32x2_t __ret; \
13239 __ret = __s0 - __s1 * __builtin_shufflevector(__s2, __s2, __p3, __p3); \
13240 __ret; \
13241 })
13242 #else
13243 #define vmls_lane_s32(__p0, __p1, __p2, __p3) __extension__ ({ \
13244 int32x2_t __s0 = __p0; \
13245 int32x2_t __s1 = __p1; \
13246 int32x2_t __s2 = __p2; \
13247 int32x2_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 1, 0); \
13248 int32x2_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 1, 0); \
13249 int32x2_t __rev2; __rev2 = __builtin_shufflevector(__s2, __s2, 1, 0); \
13250 int32x2_t __ret; \
13251 __ret = __rev0 - __rev1 * __builtin_shufflevector(__rev2, __rev2, __p3, __p3); \
13252 __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \
13253 __ret; \
13254 })
13255 #endif
13256
13257 #ifdef __LITTLE_ENDIAN__
13258 #define vmls_lane_s16(__p0, __p1, __p2, __p3) __extension__ ({ \
13259 int16x4_t __s0 = __p0; \
13260 int16x4_t __s1 = __p1; \
13261 int16x4_t __s2 = __p2; \
13262 int16x4_t __ret; \
13263 __ret = __s0 - __s1 * __builtin_shufflevector(__s2, __s2, __p3, __p3, __p3, __p3); \
13264 __ret; \
13265 })
13266 #else
13267 #define vmls_lane_s16(__p0, __p1, __p2, __p3) __extension__ ({ \
13268 int16x4_t __s0 = __p0; \
13269 int16x4_t __s1 = __p1; \
13270 int16x4_t __s2 = __p2; \
13271 int16x4_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \
13272 int16x4_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 3, 2, 1, 0); \
13273 int16x4_t __rev2; __rev2 = __builtin_shufflevector(__s2, __s2, 3, 2, 1, 0); \
13274 int16x4_t __ret; \
13275 __ret = __rev0 - __rev1 * __builtin_shufflevector(__rev2, __rev2, __p3, __p3, __p3, __p3); \
13276 __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \
13277 __ret; \
13278 })
13279 #endif
13280
13281 #ifdef __LITTLE_ENDIAN__
13282 __ai uint32x4_t vmlsq_n_u32(uint32x4_t __p0, uint32x4_t __p1, uint32_t __p2) {
13283 uint32x4_t __ret;
13284 __ret = __p0 - __p1 * (uint32x4_t) {__p2, __p2, __p2, __p2};
13285 return __ret;
13286 }
13287 #else
13288 __ai uint32x4_t vmlsq_n_u32(uint32x4_t __p0, uint32x4_t __p1, uint32_t __p2) {
13289 uint32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
13290 uint32x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
13291 uint32x4_t __ret;
13292 __ret = __rev0 - __rev1 * (uint32x4_t) {__p2, __p2, __p2, __p2};
13293 __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
13294 return __ret;
13295 }
13296 #endif
13297
13298 #ifdef __LITTLE_ENDIAN__
13299 __ai uint16x8_t vmlsq_n_u16(uint16x8_t __p0, uint16x8_t __p1, uint16_t __p2) {
13300 uint16x8_t __ret;
13301 __ret = __p0 - __p1 * (uint16x8_t) {__p2, __p2, __p2, __p2, __p2, __p2, __p2, __p2};
13302 return __ret;
13303 }
13304 #else
13305 __ai uint16x8_t vmlsq_n_u16(uint16x8_t __p0, uint16x8_t __p1, uint16_t __p2) {
13306 uint16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
13307 uint16x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
13308 uint16x8_t __ret;
13309 __ret = __rev0 - __rev1 * (uint16x8_t) {__p2, __p2, __p2, __p2, __p2, __p2, __p2, __p2};
13310 __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
13311 return __ret;
13312 }
13313 #endif
13314
13315 #ifdef __LITTLE_ENDIAN__
13316 __ai float32x4_t vmlsq_n_f32(float32x4_t __p0, float32x4_t __p1, float32_t __p2) {
13317 float32x4_t __ret;
13318 __ret = __p0 - __p1 * (float32x4_t) {__p2, __p2, __p2, __p2};
13319 return __ret;
13320 }
13321 #else
13322 __ai float32x4_t vmlsq_n_f32(float32x4_t __p0, float32x4_t __p1, float32_t __p2) {
13323 float32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
13324 float32x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
13325 float32x4_t __ret;
13326 __ret = __rev0 - __rev1 * (float32x4_t) {__p2, __p2, __p2, __p2};
13327 __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
13328 return __ret;
13329 }
13330 #endif
13331
13332 #ifdef __LITTLE_ENDIAN__
13333 __ai int32x4_t vmlsq_n_s32(int32x4_t __p0, int32x4_t __p1, int32_t __p2) {
13334 int32x4_t __ret;
13335 __ret = __p0 - __p1 * (int32x4_t) {__p2, __p2, __p2, __p2};
13336 return __ret;
13337 }
13338 #else
13339 __ai int32x4_t vmlsq_n_s32(int32x4_t __p0, int32x4_t __p1, int32_t __p2) {
13340 int32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
13341 int32x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
13342 int32x4_t __ret;
13343 __ret = __rev0 - __rev1 * (int32x4_t) {__p2, __p2, __p2, __p2};
13344 __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
13345 return __ret;
13346 }
13347 #endif
13348
13349 #ifdef __LITTLE_ENDIAN__
13350 __ai int16x8_t vmlsq_n_s16(int16x8_t __p0, int16x8_t __p1, int16_t __p2) {
13351 int16x8_t __ret;
13352 __ret = __p0 - __p1 * (int16x8_t) {__p2, __p2, __p2, __p2, __p2, __p2, __p2, __p2};
13353 return __ret;
13354 }
13355 #else
13356 __ai int16x8_t vmlsq_n_s16(int16x8_t __p0, int16x8_t __p1, int16_t __p2) {
13357 int16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
13358 int16x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
13359 int16x8_t __ret;
13360 __ret = __rev0 - __rev1 * (int16x8_t) {__p2, __p2, __p2, __p2, __p2, __p2, __p2, __p2};
13361 __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
13362 return __ret;
13363 }
13364 #endif
13365
13366 #ifdef __LITTLE_ENDIAN__
13367 __ai uint32x2_t vmls_n_u32(uint32x2_t __p0, uint32x2_t __p1, uint32_t __p2) {
13368 uint32x2_t __ret;
13369 __ret = __p0 - __p1 * (uint32x2_t) {__p2, __p2};
13370 return __ret;
13371 }
13372 #else
13373 __ai uint32x2_t vmls_n_u32(uint32x2_t __p0, uint32x2_t __p1, uint32_t __p2) {
13374 uint32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
13375 uint32x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
13376 uint32x2_t __ret;
13377 __ret = __rev0 - __rev1 * (uint32x2_t) {__p2, __p2};
13378 __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
13379 return __ret;
13380 }
13381 #endif
13382
13383 #ifdef __LITTLE_ENDIAN__
13384 __ai uint16x4_t vmls_n_u16(uint16x4_t __p0, uint16x4_t __p1, uint16_t __p2) {
13385 uint16x4_t __ret;
13386 __ret = __p0 - __p1 * (uint16x4_t) {__p2, __p2, __p2, __p2};
13387 return __ret;
13388 }
13389 #else
13390 __ai uint16x4_t vmls_n_u16(uint16x4_t __p0, uint16x4_t __p1, uint16_t __p2) {
13391 uint16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
13392 uint16x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
13393 uint16x4_t __ret;
13394 __ret = __rev0 - __rev1 * (uint16x4_t) {__p2, __p2, __p2, __p2};
13395 __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
13396 return __ret;
13397 }
13398 #endif
13399
13400 #ifdef __LITTLE_ENDIAN__
13401 __ai float32x2_t vmls_n_f32(float32x2_t __p0, float32x2_t __p1, float32_t __p2) {
13402 float32x2_t __ret;
13403 __ret = __p0 - __p1 * (float32x2_t) {__p2, __p2};
13404 return __ret;
13405 }
13406 #else
13407 __ai float32x2_t vmls_n_f32(float32x2_t __p0, float32x2_t __p1, float32_t __p2) {
13408 float32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
13409 float32x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
13410 float32x2_t __ret;
13411 __ret = __rev0 - __rev1 * (float32x2_t) {__p2, __p2};
13412 __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
13413 return __ret;
13414 }
13415 #endif
13416
13417 #ifdef __LITTLE_ENDIAN__
13418 __ai int32x2_t vmls_n_s32(int32x2_t __p0, int32x2_t __p1, int32_t __p2) {
13419 int32x2_t __ret;
13420 __ret = __p0 - __p1 * (int32x2_t) {__p2, __p2};
13421 return __ret;
13422 }
13423 #else
13424 __ai int32x2_t vmls_n_s32(int32x2_t __p0, int32x2_t __p1, int32_t __p2) {
13425 int32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
13426 int32x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
13427 int32x2_t __ret;
13428 __ret = __rev0 - __rev1 * (int32x2_t) {__p2, __p2};
13429 __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
13430 return __ret;
13431 }
13432 #endif
13433
13434 #ifdef __LITTLE_ENDIAN__
13435 __ai int16x4_t vmls_n_s16(int16x4_t __p0, int16x4_t __p1, int16_t __p2) {
13436 int16x4_t __ret;
13437 __ret = __p0 - __p1 * (int16x4_t) {__p2, __p2, __p2, __p2};
13438 return __ret;
13439 }
13440 #else
13441 __ai int16x4_t vmls_n_s16(int16x4_t __p0, int16x4_t __p1, int16_t __p2) {
13442 int16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
13443 int16x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
13444 int16x4_t __ret;
13445 __ret = __rev0 - __rev1 * (int16x4_t) {__p2, __p2, __p2, __p2};
13446 __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
13447 return __ret;
13448 }
13449 #endif
13450
13451 #ifdef __LITTLE_ENDIAN__
13452 __ai poly8x8_t vmov_n_p8(poly8_t __p0) {
13453 poly8x8_t __ret;
13454 __ret = (poly8x8_t) {__p0, __p0, __p0, __p0, __p0, __p0, __p0, __p0};
13455 return __ret;
13456 }
13457 #else
13458 __ai poly8x8_t vmov_n_p8(poly8_t __p0) {
13459 poly8x8_t __ret;
13460 __ret = (poly8x8_t) {__p0, __p0, __p0, __p0, __p0, __p0, __p0, __p0};
13461 __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
13462 return __ret;
13463 }
13464 #endif
13465
13466 #ifdef __LITTLE_ENDIAN__
13467 __ai poly16x4_t vmov_n_p16(poly16_t __p0) {
13468 poly16x4_t __ret;
13469 __ret = (poly16x4_t) {__p0, __p0, __p0, __p0};
13470 return __ret;
13471 }
13472 #else
13473 __ai poly16x4_t vmov_n_p16(poly16_t __p0) {
13474 poly16x4_t __ret;
13475 __ret = (poly16x4_t) {__p0, __p0, __p0, __p0};
13476 __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
13477 return __ret;
13478 }
13479 #endif
13480
13481 #ifdef __LITTLE_ENDIAN__
13482 __ai poly8x16_t vmovq_n_p8(poly8_t __p0) {
13483 poly8x16_t __ret;
13484 __ret = (poly8x16_t) {__p0, __p0, __p0, __p0, __p0, __p0, __p0, __p0, __p0, __p0, __p0, __p0, __p0, __p0, __p0, __p0};
13485 return __ret;
13486 }
13487 #else
13488 __ai poly8x16_t vmovq_n_p8(poly8_t __p0) {
13489 poly8x16_t __ret;
13490 __ret = (poly8x16_t) {__p0, __p0, __p0, __p0, __p0, __p0, __p0, __p0, __p0, __p0, __p0, __p0, __p0, __p0, __p0, __p0};
13491 __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
13492 return __ret;
13493 }
13494 #endif
13495
13496 #ifdef __LITTLE_ENDIAN__
13497 __ai poly16x8_t vmovq_n_p16(poly16_t __p0) {
13498 poly16x8_t __ret;
13499 __ret = (poly16x8_t) {__p0, __p0, __p0, __p0, __p0, __p0, __p0, __p0};
13500 return __ret;
13501 }
13502 #else
13503 __ai poly16x8_t vmovq_n_p16(poly16_t __p0) {
13504 poly16x8_t __ret;
13505 __ret = (poly16x8_t) {__p0, __p0, __p0, __p0, __p0, __p0, __p0, __p0};
13506 __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
13507 return __ret;
13508 }
13509 #endif
13510
13511 #ifdef __LITTLE_ENDIAN__
13512 __ai uint8x16_t vmovq_n_u8(uint8_t __p0) {
13513 uint8x16_t __ret;
13514 __ret = (uint8x16_t) {__p0, __p0, __p0, __p0, __p0, __p0, __p0, __p0, __p0, __p0, __p0, __p0, __p0, __p0, __p0, __p0};
13515 return __ret;
13516 }
13517 #else
13518 __ai uint8x16_t vmovq_n_u8(uint8_t __p0) {
13519 uint8x16_t __ret;
13520 __ret = (uint8x16_t) {__p0, __p0, __p0, __p0, __p0, __p0, __p0, __p0, __p0, __p0, __p0, __p0, __p0, __p0, __p0, __p0};
13521 __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
13522 return __ret;
13523 }
13524 #endif
13525
13526 #ifdef __LITTLE_ENDIAN__
13527 __ai uint32x4_t vmovq_n_u32(uint32_t __p0) {
13528 uint32x4_t __ret;
13529 __ret = (uint32x4_t) {__p0, __p0, __p0, __p0};
13530 return __ret;
13531 }
13532 #else
13533 __ai uint32x4_t vmovq_n_u32(uint32_t __p0) {
13534 uint32x4_t __ret;
13535 __ret = (uint32x4_t) {__p0, __p0, __p0, __p0};
13536 __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
13537 return __ret;
13538 }
13539 #endif
13540
13541 #ifdef __LITTLE_ENDIAN__
13542 __ai uint64x2_t vmovq_n_u64(uint64_t __p0) {
13543 uint64x2_t __ret;
13544 __ret = (uint64x2_t) {__p0, __p0};
13545 return __ret;
13546 }
13547 #else
13548 __ai uint64x2_t vmovq_n_u64(uint64_t __p0) {
13549 uint64x2_t __ret;
13550 __ret = (uint64x2_t) {__p0, __p0};
13551 __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
13552 return __ret;
13553 }
13554 #endif
13555
13556 #ifdef __LITTLE_ENDIAN__
13557 __ai uint16x8_t vmovq_n_u16(uint16_t __p0) {
13558 uint16x8_t __ret;
13559 __ret = (uint16x8_t) {__p0, __p0, __p0, __p0, __p0, __p0, __p0, __p0};
13560 return __ret;
13561 }
13562 #else
13563 __ai uint16x8_t vmovq_n_u16(uint16_t __p0) {
13564 uint16x8_t __ret;
13565 __ret = (uint16x8_t) {__p0, __p0, __p0, __p0, __p0, __p0, __p0, __p0};
13566 __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
13567 return __ret;
13568 }
13569 #endif
13570
13571 #ifdef __LITTLE_ENDIAN__
13572 __ai int8x16_t vmovq_n_s8(int8_t __p0) {
13573 int8x16_t __ret;
13574 __ret = (int8x16_t) {__p0, __p0, __p0, __p0, __p0, __p0, __p0, __p0, __p0, __p0, __p0, __p0, __p0, __p0, __p0, __p0};
13575 return __ret;
13576 }
13577 #else
13578 __ai int8x16_t vmovq_n_s8(int8_t __p0) {
13579 int8x16_t __ret;
13580 __ret = (int8x16_t) {__p0, __p0, __p0, __p0, __p0, __p0, __p0, __p0, __p0, __p0, __p0, __p0, __p0, __p0, __p0, __p0};
13581 __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
13582 return __ret;
13583 }
13584 #endif
13585
13586 #ifdef __LITTLE_ENDIAN__
13587 __ai float32x4_t vmovq_n_f32(float32_t __p0) {
13588 float32x4_t __ret;
13589 __ret = (float32x4_t) {__p0, __p0, __p0, __p0};
13590 return __ret;
13591 }
13592 #else
13593 __ai float32x4_t vmovq_n_f32(float32_t __p0) {
13594 float32x4_t __ret;
13595 __ret = (float32x4_t) {__p0, __p0, __p0, __p0};
13596 __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
13597 return __ret;
13598 }
13599 #endif
13600
13601 #ifdef __LITTLE_ENDIAN__
13602 #define vmovq_n_f16(__p0) __extension__ ({ \
13603 float16_t __s0 = __p0; \
13604 float16x8_t __ret; \
13605 __ret = (float16x8_t) {__s0, __s0, __s0, __s0, __s0, __s0, __s0, __s0}; \
13606 __ret; \
13607 })
13608 #else
13609 #define vmovq_n_f16(__p0) __extension__ ({ \
13610 float16_t __s0 = __p0; \
13611 float16x8_t __ret; \
13612 __ret = (float16x8_t) {__s0, __s0, __s0, __s0, __s0, __s0, __s0, __s0}; \
13613 __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); \
13614 __ret; \
13615 })
13616 #endif
13617
13618 #ifdef __LITTLE_ENDIAN__
13619 __ai int32x4_t vmovq_n_s32(int32_t __p0) {
13620 int32x4_t __ret;
13621 __ret = (int32x4_t) {__p0, __p0, __p0, __p0};
13622 return __ret;
13623 }
13624 #else
13625 __ai int32x4_t vmovq_n_s32(int32_t __p0) {
13626 int32x4_t __ret;
13627 __ret = (int32x4_t) {__p0, __p0, __p0, __p0};
13628 __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
13629 return __ret;
13630 }
13631 #endif
13632
13633 #ifdef __LITTLE_ENDIAN__
13634 __ai int64x2_t vmovq_n_s64(int64_t __p0) {
13635 int64x2_t __ret;
13636 __ret = (int64x2_t) {__p0, __p0};
13637 return __ret;
13638 }
13639 #else
13640 __ai int64x2_t vmovq_n_s64(int64_t __p0) {
13641 int64x2_t __ret;
13642 __ret = (int64x2_t) {__p0, __p0};
13643 __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
13644 return __ret;
13645 }
13646 #endif
13647
13648 #ifdef __LITTLE_ENDIAN__
13649 __ai int16x8_t vmovq_n_s16(int16_t __p0) {
13650 int16x8_t __ret;
13651 __ret = (int16x8_t) {__p0, __p0, __p0, __p0, __p0, __p0, __p0, __p0};
13652 return __ret;
13653 }
13654 #else
13655 __ai int16x8_t vmovq_n_s16(int16_t __p0) {
13656 int16x8_t __ret;
13657 __ret = (int16x8_t) {__p0, __p0, __p0, __p0, __p0, __p0, __p0, __p0};
13658 __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
13659 return __ret;
13660 }
13661 #endif
13662
13663 #ifdef __LITTLE_ENDIAN__
13664 __ai uint8x8_t vmov_n_u8(uint8_t __p0) {
13665 uint8x8_t __ret;
13666 __ret = (uint8x8_t) {__p0, __p0, __p0, __p0, __p0, __p0, __p0, __p0};
13667 return __ret;
13668 }
13669 #else
13670 __ai uint8x8_t vmov_n_u8(uint8_t __p0) {
13671 uint8x8_t __ret;
13672 __ret = (uint8x8_t) {__p0, __p0, __p0, __p0, __p0, __p0, __p0, __p0};
13673 __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
13674 return __ret;
13675 }
13676 #endif
13677
13678 #ifdef __LITTLE_ENDIAN__
13679 __ai uint32x2_t vmov_n_u32(uint32_t __p0) {
13680 uint32x2_t __ret;
13681 __ret = (uint32x2_t) {__p0, __p0};
13682 return __ret;
13683 }
13684 #else
13685 __ai uint32x2_t vmov_n_u32(uint32_t __p0) {
13686 uint32x2_t __ret;
13687 __ret = (uint32x2_t) {__p0, __p0};
13688 __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
13689 return __ret;
13690 }
13691 #endif
13692
13693 #ifdef __LITTLE_ENDIAN__
13694 __ai uint64x1_t vmov_n_u64(uint64_t __p0) {
13695 uint64x1_t __ret;
13696 __ret = (uint64x1_t) {__p0};
13697 return __ret;
13698 }
13699 #else
13700 __ai uint64x1_t vmov_n_u64(uint64_t __p0) {
13701 uint64x1_t __ret;
13702 __ret = (uint64x1_t) {__p0};
13703 return __ret;
13704 }
13705 #endif
13706
13707 #ifdef __LITTLE_ENDIAN__
13708 __ai uint16x4_t vmov_n_u16(uint16_t __p0) {
13709 uint16x4_t __ret;
13710 __ret = (uint16x4_t) {__p0, __p0, __p0, __p0};
13711 return __ret;
13712 }
13713 #else
13714 __ai uint16x4_t vmov_n_u16(uint16_t __p0) {
13715 uint16x4_t __ret;
13716 __ret = (uint16x4_t) {__p0, __p0, __p0, __p0};
13717 __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
13718 return __ret;
13719 }
13720 #endif
13721
13722 #ifdef __LITTLE_ENDIAN__
13723 __ai int8x8_t vmov_n_s8(int8_t __p0) {
13724 int8x8_t __ret;
13725 __ret = (int8x8_t) {__p0, __p0, __p0, __p0, __p0, __p0, __p0, __p0};
13726 return __ret;
13727 }
13728 #else
13729 __ai int8x8_t vmov_n_s8(int8_t __p0) {
13730 int8x8_t __ret;
13731 __ret = (int8x8_t) {__p0, __p0, __p0, __p0, __p0, __p0, __p0, __p0};
13732 __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
13733 return __ret;
13734 }
13735 #endif
13736
13737 #ifdef __LITTLE_ENDIAN__
13738 __ai float32x2_t vmov_n_f32(float32_t __p0) {
13739 float32x2_t __ret;
13740 __ret = (float32x2_t) {__p0, __p0};
13741 return __ret;
13742 }
13743 #else
13744 __ai float32x2_t vmov_n_f32(float32_t __p0) {
13745 float32x2_t __ret;
13746 __ret = (float32x2_t) {__p0, __p0};
13747 __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
13748 return __ret;
13749 }
13750 #endif
13751
13752 #ifdef __LITTLE_ENDIAN__
13753 #define vmov_n_f16(__p0) __extension__ ({ \
13754 float16_t __s0 = __p0; \
13755 float16x4_t __ret; \
13756 __ret = (float16x4_t) {__s0, __s0, __s0, __s0}; \
13757 __ret; \
13758 })
13759 #else
13760 #define vmov_n_f16(__p0) __extension__ ({ \
13761 float16_t __s0 = __p0; \
13762 float16x4_t __ret; \
13763 __ret = (float16x4_t) {__s0, __s0, __s0, __s0}; \
13764 __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \
13765 __ret; \
13766 })
13767 #endif
13768
13769 #ifdef __LITTLE_ENDIAN__
13770 __ai int32x2_t vmov_n_s32(int32_t __p0) {
13771 int32x2_t __ret;
13772 __ret = (int32x2_t) {__p0, __p0};
13773 return __ret;
13774 }
13775 #else
13776 __ai int32x2_t vmov_n_s32(int32_t __p0) {
13777 int32x2_t __ret;
13778 __ret = (int32x2_t) {__p0, __p0};
13779 __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
13780 return __ret;
13781 }
13782 #endif
13783
13784 #ifdef __LITTLE_ENDIAN__
13785 __ai int64x1_t vmov_n_s64(int64_t __p0) {
13786 int64x1_t __ret;
13787 __ret = (int64x1_t) {__p0};
13788 return __ret;
13789 }
13790 #else
13791 __ai int64x1_t vmov_n_s64(int64_t __p0) {
13792 int64x1_t __ret;
13793 __ret = (int64x1_t) {__p0};
13794 return __ret;
13795 }
13796 #endif
13797
13798 #ifdef __LITTLE_ENDIAN__
13799 __ai int16x4_t vmov_n_s16(int16_t __p0) {
13800 int16x4_t __ret;
13801 __ret = (int16x4_t) {__p0, __p0, __p0, __p0};
13802 return __ret;
13803 }
13804 #else
13805 __ai int16x4_t vmov_n_s16(int16_t __p0) {
13806 int16x4_t __ret;
13807 __ret = (int16x4_t) {__p0, __p0, __p0, __p0};
13808 __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
13809 return __ret;
13810 }
13811 #endif
13812
13813 #ifdef __LITTLE_ENDIAN__
13814 __ai uint16x8_t vmovl_u8(uint8x8_t __p0) {
13815 uint16x8_t __ret;
13816 __ret = (uint16x8_t) __builtin_neon_vmovl_v((int8x8_t)__p0, 49);
13817 return __ret;
13818 }
13819 #else
13820 __ai uint16x8_t vmovl_u8(uint8x8_t __p0) {
13821 uint8x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
13822 uint16x8_t __ret;
13823 __ret = (uint16x8_t) __builtin_neon_vmovl_v((int8x8_t)__rev0, 49);
13824 __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
13825 return __ret;
13826 }
13827 __ai uint16x8_t __noswap_vmovl_u8(uint8x8_t __p0) {
13828 uint16x8_t __ret;
13829 __ret = (uint16x8_t) __builtin_neon_vmovl_v((int8x8_t)__p0, 49);
13830 return __ret;
13831 }
13832 #endif
13833
13834 #ifdef __LITTLE_ENDIAN__
13835 __ai uint64x2_t vmovl_u32(uint32x2_t __p0) {
13836 uint64x2_t __ret;
13837 __ret = (uint64x2_t) __builtin_neon_vmovl_v((int8x8_t)__p0, 51);
13838 return __ret;
13839 }
13840 #else
13841 __ai uint64x2_t vmovl_u32(uint32x2_t __p0) {
13842 uint32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
13843 uint64x2_t __ret;
13844 __ret = (uint64x2_t) __builtin_neon_vmovl_v((int8x8_t)__rev0, 51);
13845 __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
13846 return __ret;
13847 }
13848 __ai uint64x2_t __noswap_vmovl_u32(uint32x2_t __p0) {
13849 uint64x2_t __ret;
13850 __ret = (uint64x2_t) __builtin_neon_vmovl_v((int8x8_t)__p0, 51);
13851 return __ret;
13852 }
13853 #endif
13854
13855 #ifdef __LITTLE_ENDIAN__
13856 __ai uint32x4_t vmovl_u16(uint16x4_t __p0) {
13857 uint32x4_t __ret;
13858 __ret = (uint32x4_t) __builtin_neon_vmovl_v((int8x8_t)__p0, 50);
13859 return __ret;
13860 }
13861 #else
13862 __ai uint32x4_t vmovl_u16(uint16x4_t __p0) {
13863 uint16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
13864 uint32x4_t __ret;
13865 __ret = (uint32x4_t) __builtin_neon_vmovl_v((int8x8_t)__rev0, 50);
13866 __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
13867 return __ret;
13868 }
13869 __ai uint32x4_t __noswap_vmovl_u16(uint16x4_t __p0) {
13870 uint32x4_t __ret;
13871 __ret = (uint32x4_t) __builtin_neon_vmovl_v((int8x8_t)__p0, 50);
13872 return __ret;
13873 }
13874 #endif
13875
13876 #ifdef __LITTLE_ENDIAN__
13877 __ai int16x8_t vmovl_s8(int8x8_t __p0) {
13878 int16x8_t __ret;
13879 __ret = (int16x8_t) __builtin_neon_vmovl_v((int8x8_t)__p0, 33);
13880 return __ret;
13881 }
13882 #else
13883 __ai int16x8_t vmovl_s8(int8x8_t __p0) {
13884 int8x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
13885 int16x8_t __ret;
13886 __ret = (int16x8_t) __builtin_neon_vmovl_v((int8x8_t)__rev0, 33);
13887 __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
13888 return __ret;
13889 }
13890 __ai int16x8_t __noswap_vmovl_s8(int8x8_t __p0) {
13891 int16x8_t __ret;
13892 __ret = (int16x8_t) __builtin_neon_vmovl_v((int8x8_t)__p0, 33);
13893 return __ret;
13894 }
13895 #endif
13896
13897 #ifdef __LITTLE_ENDIAN__
13898 __ai int64x2_t vmovl_s32(int32x2_t __p0) {
13899 int64x2_t __ret;
13900 __ret = (int64x2_t) __builtin_neon_vmovl_v((int8x8_t)__p0, 35);
13901 return __ret;
13902 }
13903 #else
13904 __ai int64x2_t vmovl_s32(int32x2_t __p0) {
13905 int32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
13906 int64x2_t __ret;
13907 __ret = (int64x2_t) __builtin_neon_vmovl_v((int8x8_t)__rev0, 35);
13908 __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
13909 return __ret;
13910 }
13911 __ai int64x2_t __noswap_vmovl_s32(int32x2_t __p0) {
13912 int64x2_t __ret;
13913 __ret = (int64x2_t) __builtin_neon_vmovl_v((int8x8_t)__p0, 35);
13914 return __ret;
13915 }
13916 #endif
13917
13918 #ifdef __LITTLE_ENDIAN__
13919 __ai int32x4_t vmovl_s16(int16x4_t __p0) {
13920 int32x4_t __ret;
13921 __ret = (int32x4_t) __builtin_neon_vmovl_v((int8x8_t)__p0, 34);
13922 return __ret;
13923 }
13924 #else
13925 __ai int32x4_t vmovl_s16(int16x4_t __p0) {
13926 int16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
13927 int32x4_t __ret;
13928 __ret = (int32x4_t) __builtin_neon_vmovl_v((int8x8_t)__rev0, 34);
13929 __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
13930 return __ret;
13931 }
13932 __ai int32x4_t __noswap_vmovl_s16(int16x4_t __p0) {
13933 int32x4_t __ret;
13934 __ret = (int32x4_t) __builtin_neon_vmovl_v((int8x8_t)__p0, 34);
13935 return __ret;
13936 }
13937 #endif
13938
13939 #ifdef __LITTLE_ENDIAN__
13940 __ai uint16x4_t vmovn_u32(uint32x4_t __p0) {
13941 uint16x4_t __ret;
13942 __ret = (uint16x4_t) __builtin_neon_vmovn_v((int8x16_t)__p0, 17);
13943 return __ret;
13944 }
13945 #else
13946 __ai uint16x4_t vmovn_u32(uint32x4_t __p0) {
13947 uint32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
13948 uint16x4_t __ret;
13949 __ret = (uint16x4_t) __builtin_neon_vmovn_v((int8x16_t)__rev0, 17);
13950 __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
13951 return __ret;
13952 }
13953 __ai uint16x4_t __noswap_vmovn_u32(uint32x4_t __p0) {
13954 uint16x4_t __ret;
13955 __ret = (uint16x4_t) __builtin_neon_vmovn_v((int8x16_t)__p0, 17);
13956 return __ret;
13957 }
13958 #endif
13959
13960 #ifdef __LITTLE_ENDIAN__
13961 __ai uint32x2_t vmovn_u64(uint64x2_t __p0) {
13962 uint32x2_t __ret;
13963 __ret = (uint32x2_t) __builtin_neon_vmovn_v((int8x16_t)__p0, 18);
13964 return __ret;
13965 }
13966 #else
13967 __ai uint32x2_t vmovn_u64(uint64x2_t __p0) {
13968 uint64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
13969 uint32x2_t __ret;
13970 __ret = (uint32x2_t) __builtin_neon_vmovn_v((int8x16_t)__rev0, 18);
13971 __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
13972 return __ret;
13973 }
13974 __ai uint32x2_t __noswap_vmovn_u64(uint64x2_t __p0) {
13975 uint32x2_t __ret;
13976 __ret = (uint32x2_t) __builtin_neon_vmovn_v((int8x16_t)__p0, 18);
13977 return __ret;
13978 }
13979 #endif
13980
13981 #ifdef __LITTLE_ENDIAN__
13982 __ai uint8x8_t vmovn_u16(uint16x8_t __p0) {
13983 uint8x8_t __ret;
13984 __ret = (uint8x8_t) __builtin_neon_vmovn_v((int8x16_t)__p0, 16);
13985 return __ret;
13986 }
13987 #else
13988 __ai uint8x8_t vmovn_u16(uint16x8_t __p0) {
13989 uint16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
13990 uint8x8_t __ret;
13991 __ret = (uint8x8_t) __builtin_neon_vmovn_v((int8x16_t)__rev0, 16);
13992 __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
13993 return __ret;
13994 }
13995 __ai uint8x8_t __noswap_vmovn_u16(uint16x8_t __p0) {
13996 uint8x8_t __ret;
13997 __ret = (uint8x8_t) __builtin_neon_vmovn_v((int8x16_t)__p0, 16);
13998 return __ret;
13999 }
14000 #endif
14001
14002 #ifdef __LITTLE_ENDIAN__
14003 __ai int16x4_t vmovn_s32(int32x4_t __p0) {
14004 int16x4_t __ret;
14005 __ret = (int16x4_t) __builtin_neon_vmovn_v((int8x16_t)__p0, 1);
14006 return __ret;
14007 }
14008 #else
14009 __ai int16x4_t vmovn_s32(int32x4_t __p0) {
14010 int32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
14011 int16x4_t __ret;
14012 __ret = (int16x4_t) __builtin_neon_vmovn_v((int8x16_t)__rev0, 1);
14013 __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
14014 return __ret;
14015 }
14016 __ai int16x4_t __noswap_vmovn_s32(int32x4_t __p0) {
14017 int16x4_t __ret;
14018 __ret = (int16x4_t) __builtin_neon_vmovn_v((int8x16_t)__p0, 1);
14019 return __ret;
14020 }
14021 #endif
14022
14023 #ifdef __LITTLE_ENDIAN__
14024 __ai int32x2_t vmovn_s64(int64x2_t __p0) {
14025 int32x2_t __ret;
14026 __ret = (int32x2_t) __builtin_neon_vmovn_v((int8x16_t)__p0, 2);
14027 return __ret;
14028 }
14029 #else
14030 __ai int32x2_t vmovn_s64(int64x2_t __p0) {
14031 int64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
14032 int32x2_t __ret;
14033 __ret = (int32x2_t) __builtin_neon_vmovn_v((int8x16_t)__rev0, 2);
14034 __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
14035 return __ret;
14036 }
14037 __ai int32x2_t __noswap_vmovn_s64(int64x2_t __p0) {
14038 int32x2_t __ret;
14039 __ret = (int32x2_t) __builtin_neon_vmovn_v((int8x16_t)__p0, 2);
14040 return __ret;
14041 }
14042 #endif
14043
14044 #ifdef __LITTLE_ENDIAN__
14045 __ai int8x8_t vmovn_s16(int16x8_t __p0) {
14046 int8x8_t __ret;
14047 __ret = (int8x8_t) __builtin_neon_vmovn_v((int8x16_t)__p0, 0);
14048 return __ret;
14049 }
14050 #else
14051 __ai int8x8_t vmovn_s16(int16x8_t __p0) {
14052 int16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
14053 int8x8_t __ret;
14054 __ret = (int8x8_t) __builtin_neon_vmovn_v((int8x16_t)__rev0, 0);
14055 __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
14056 return __ret;
14057 }
14058 __ai int8x8_t __noswap_vmovn_s16(int16x8_t __p0) {
14059 int8x8_t __ret;
14060 __ret = (int8x8_t) __builtin_neon_vmovn_v((int8x16_t)__p0, 0);
14061 return __ret;
14062 }
14063 #endif
14064
14065 #ifdef __LITTLE_ENDIAN__
14066 __ai uint8x16_t vmulq_u8(uint8x16_t __p0, uint8x16_t __p1) {
14067 uint8x16_t __ret;
14068 __ret = __p0 * __p1;
14069 return __ret;
14070 }
14071 #else
14072 __ai uint8x16_t vmulq_u8(uint8x16_t __p0, uint8x16_t __p1) {
14073 uint8x16_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
14074 uint8x16_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
14075 uint8x16_t __ret;
14076 __ret = __rev0 * __rev1;
14077 __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
14078 return __ret;
14079 }
14080 #endif
14081
14082 #ifdef __LITTLE_ENDIAN__
14083 __ai uint32x4_t vmulq_u32(uint32x4_t __p0, uint32x4_t __p1) {
14084 uint32x4_t __ret;
14085 __ret = __p0 * __p1;
14086 return __ret;
14087 }
14088 #else
14089 __ai uint32x4_t vmulq_u32(uint32x4_t __p0, uint32x4_t __p1) {
14090 uint32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
14091 uint32x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
14092 uint32x4_t __ret;
14093 __ret = __rev0 * __rev1;
14094 __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
14095 return __ret;
14096 }
14097 #endif
14098
14099 #ifdef __LITTLE_ENDIAN__
14100 __ai uint16x8_t vmulq_u16(uint16x8_t __p0, uint16x8_t __p1) {
14101 uint16x8_t __ret;
14102 __ret = __p0 * __p1;
14103 return __ret;
14104 }
14105 #else
14106 __ai uint16x8_t vmulq_u16(uint16x8_t __p0, uint16x8_t __p1) {
14107 uint16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
14108 uint16x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
14109 uint16x8_t __ret;
14110 __ret = __rev0 * __rev1;
14111 __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
14112 return __ret;
14113 }
14114 #endif
14115
14116 #ifdef __LITTLE_ENDIAN__
14117 __ai int8x16_t vmulq_s8(int8x16_t __p0, int8x16_t __p1) {
14118 int8x16_t __ret;
14119 __ret = __p0 * __p1;
14120 return __ret;
14121 }
14122 #else
14123 __ai int8x16_t vmulq_s8(int8x16_t __p0, int8x16_t __p1) {
14124 int8x16_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
14125 int8x16_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
14126 int8x16_t __ret;
14127 __ret = __rev0 * __rev1;
14128 __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
14129 return __ret;
14130 }
14131 #endif
14132
14133 #ifdef __LITTLE_ENDIAN__
14134 __ai float32x4_t vmulq_f32(float32x4_t __p0, float32x4_t __p1) {
14135 float32x4_t __ret;
14136 __ret = __p0 * __p1;
14137 return __ret;
14138 }
14139 #else
14140 __ai float32x4_t vmulq_f32(float32x4_t __p0, float32x4_t __p1) {
14141 float32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
14142 float32x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
14143 float32x4_t __ret;
14144 __ret = __rev0 * __rev1;
14145 __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
14146 return __ret;
14147 }
14148 #endif
14149
14150 #ifdef __LITTLE_ENDIAN__
14151 __ai int32x4_t vmulq_s32(int32x4_t __p0, int32x4_t __p1) {
14152 int32x4_t __ret;
14153 __ret = __p0 * __p1;
14154 return __ret;
14155 }
14156 #else
14157 __ai int32x4_t vmulq_s32(int32x4_t __p0, int32x4_t __p1) {
14158 int32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
14159 int32x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
14160 int32x4_t __ret;
14161 __ret = __rev0 * __rev1;
14162 __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
14163 return __ret;
14164 }
14165 #endif
14166
14167 #ifdef __LITTLE_ENDIAN__
14168 __ai int16x8_t vmulq_s16(int16x8_t __p0, int16x8_t __p1) {
14169 int16x8_t __ret;
14170 __ret = __p0 * __p1;
14171 return __ret;
14172 }
14173 #else
14174 __ai int16x8_t vmulq_s16(int16x8_t __p0, int16x8_t __p1) {
14175 int16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
14176 int16x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
14177 int16x8_t __ret;
14178 __ret = __rev0 * __rev1;
14179 __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
14180 return __ret;
14181 }
14182 #endif
14183
14184 #ifdef __LITTLE_ENDIAN__
14185 __ai uint8x8_t vmul_u8(uint8x8_t __p0, uint8x8_t __p1) {
14186 uint8x8_t __ret;
14187 __ret = __p0 * __p1;
14188 return __ret;
14189 }
14190 #else
14191 __ai uint8x8_t vmul_u8(uint8x8_t __p0, uint8x8_t __p1) {
14192 uint8x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
14193 uint8x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
14194 uint8x8_t __ret;
14195 __ret = __rev0 * __rev1;
14196 __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
14197 return __ret;
14198 }
14199 #endif
14200
14201 #ifdef __LITTLE_ENDIAN__
14202 __ai uint32x2_t vmul_u32(uint32x2_t __p0, uint32x2_t __p1) {
14203 uint32x2_t __ret;
14204 __ret = __p0 * __p1;
14205 return __ret;
14206 }
14207 #else
14208 __ai uint32x2_t vmul_u32(uint32x2_t __p0, uint32x2_t __p1) {
14209 uint32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
14210 uint32x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
14211 uint32x2_t __ret;
14212 __ret = __rev0 * __rev1;
14213 __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
14214 return __ret;
14215 }
14216 #endif
14217
14218 #ifdef __LITTLE_ENDIAN__
14219 __ai uint16x4_t vmul_u16(uint16x4_t __p0, uint16x4_t __p1) {
14220 uint16x4_t __ret;
14221 __ret = __p0 * __p1;
14222 return __ret;
14223 }
14224 #else
14225 __ai uint16x4_t vmul_u16(uint16x4_t __p0, uint16x4_t __p1) {
14226 uint16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
14227 uint16x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
14228 uint16x4_t __ret;
14229 __ret = __rev0 * __rev1;
14230 __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
14231 return __ret;
14232 }
14233 #endif
14234
14235 #ifdef __LITTLE_ENDIAN__
14236 __ai int8x8_t vmul_s8(int8x8_t __p0, int8x8_t __p1) {
14237 int8x8_t __ret;
14238 __ret = __p0 * __p1;
14239 return __ret;
14240 }
14241 #else
14242 __ai int8x8_t vmul_s8(int8x8_t __p0, int8x8_t __p1) {
14243 int8x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
14244 int8x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
14245 int8x8_t __ret;
14246 __ret = __rev0 * __rev1;
14247 __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
14248 return __ret;
14249 }
14250 #endif
14251
14252 #ifdef __LITTLE_ENDIAN__
14253 __ai float32x2_t vmul_f32(float32x2_t __p0, float32x2_t __p1) {
14254 float32x2_t __ret;
14255 __ret = __p0 * __p1;
14256 return __ret;
14257 }
14258 #else
14259 __ai float32x2_t vmul_f32(float32x2_t __p0, float32x2_t __p1) {
14260 float32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
14261 float32x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
14262 float32x2_t __ret;
14263 __ret = __rev0 * __rev1;
14264 __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
14265 return __ret;
14266 }
14267 #endif
14268
14269 #ifdef __LITTLE_ENDIAN__
14270 __ai int32x2_t vmul_s32(int32x2_t __p0, int32x2_t __p1) {
14271 int32x2_t __ret;
14272 __ret = __p0 * __p1;
14273 return __ret;
14274 }
14275 #else
14276 __ai int32x2_t vmul_s32(int32x2_t __p0, int32x2_t __p1) {
14277 int32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
14278 int32x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
14279 int32x2_t __ret;
14280 __ret = __rev0 * __rev1;
14281 __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
14282 return __ret;
14283 }
14284 #endif
14285
14286 #ifdef __LITTLE_ENDIAN__
14287 __ai int16x4_t vmul_s16(int16x4_t __p0, int16x4_t __p1) {
14288 int16x4_t __ret;
14289 __ret = __p0 * __p1;
14290 return __ret;
14291 }
14292 #else
14293 __ai int16x4_t vmul_s16(int16x4_t __p0, int16x4_t __p1) {
14294 int16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
14295 int16x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
14296 int16x4_t __ret;
14297 __ret = __rev0 * __rev1;
14298 __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
14299 return __ret;
14300 }
14301 #endif
14302
14303 #ifdef __LITTLE_ENDIAN__
14304 __ai poly8x8_t vmul_p8(poly8x8_t __p0, poly8x8_t __p1) {
14305 poly8x8_t __ret;
14306 __ret = (poly8x8_t) __builtin_neon_vmul_v((int8x8_t)__p0, (int8x8_t)__p1, 4);
14307 return __ret;
14308 }
14309 #else
14310 __ai poly8x8_t vmul_p8(poly8x8_t __p0, poly8x8_t __p1) {
14311 poly8x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
14312 poly8x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
14313 poly8x8_t __ret;
14314 __ret = (poly8x8_t) __builtin_neon_vmul_v((int8x8_t)__rev0, (int8x8_t)__rev1, 4);
14315 __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
14316 return __ret;
14317 }
14318 #endif
14319
14320 #ifdef __LITTLE_ENDIAN__
14321 __ai poly8x16_t vmulq_p8(poly8x16_t __p0, poly8x16_t __p1) {
14322 poly8x16_t __ret;
14323 __ret = (poly8x16_t) __builtin_neon_vmulq_v((int8x16_t)__p0, (int8x16_t)__p1, 36);
14324 return __ret;
14325 }
14326 #else
14327 __ai poly8x16_t vmulq_p8(poly8x16_t __p0, poly8x16_t __p1) {
14328 poly8x16_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
14329 poly8x16_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
14330 poly8x16_t __ret;
14331 __ret = (poly8x16_t) __builtin_neon_vmulq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 36);
14332 __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
14333 return __ret;
14334 }
14335 #endif
14336
14337 #ifdef __LITTLE_ENDIAN__
14338 #define vmulq_lane_u32(__p0, __p1, __p2) __extension__ ({ \
14339 uint32x4_t __s0 = __p0; \
14340 uint32x2_t __s1 = __p1; \
14341 uint32x4_t __ret; \
14342 __ret = __s0 * __builtin_shufflevector(__s1, __s1, __p2, __p2, __p2, __p2); \
14343 __ret; \
14344 })
14345 #else
14346 #define vmulq_lane_u32(__p0, __p1, __p2) __extension__ ({ \
14347 uint32x4_t __s0 = __p0; \
14348 uint32x2_t __s1 = __p1; \
14349 uint32x4_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \
14350 uint32x2_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 1, 0); \
14351 uint32x4_t __ret; \
14352 __ret = __rev0 * __builtin_shufflevector(__rev1, __rev1, __p2, __p2, __p2, __p2); \
14353 __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \
14354 __ret; \
14355 })
14356 #endif
14357
14358 #ifdef __LITTLE_ENDIAN__
14359 #define vmulq_lane_u16(__p0, __p1, __p2) __extension__ ({ \
14360 uint16x8_t __s0 = __p0; \
14361 uint16x4_t __s1 = __p1; \
14362 uint16x8_t __ret; \
14363 __ret = __s0 * __builtin_shufflevector(__s1, __s1, __p2, __p2, __p2, __p2, __p2, __p2, __p2, __p2); \
14364 __ret; \
14365 })
14366 #else
14367 #define vmulq_lane_u16(__p0, __p1, __p2) __extension__ ({ \
14368 uint16x8_t __s0 = __p0; \
14369 uint16x4_t __s1 = __p1; \
14370 uint16x8_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 7, 6, 5, 4, 3, 2, 1, 0); \
14371 uint16x4_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 3, 2, 1, 0); \
14372 uint16x8_t __ret; \
14373 __ret = __rev0 * __builtin_shufflevector(__rev1, __rev1, __p2, __p2, __p2, __p2, __p2, __p2, __p2, __p2); \
14374 __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); \
14375 __ret; \
14376 })
14377 #endif
14378
14379 #ifdef __LITTLE_ENDIAN__
14380 #define vmulq_lane_f32(__p0, __p1, __p2) __extension__ ({ \
14381 float32x4_t __s0 = __p0; \
14382 float32x2_t __s1 = __p1; \
14383 float32x4_t __ret; \
14384 __ret = __s0 * __builtin_shufflevector(__s1, __s1, __p2, __p2, __p2, __p2); \
14385 __ret; \
14386 })
14387 #else
14388 #define vmulq_lane_f32(__p0, __p1, __p2) __extension__ ({ \
14389 float32x4_t __s0 = __p0; \
14390 float32x2_t __s1 = __p1; \
14391 float32x4_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \
14392 float32x2_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 1, 0); \
14393 float32x4_t __ret; \
14394 __ret = __rev0 * __builtin_shufflevector(__rev1, __rev1, __p2, __p2, __p2, __p2); \
14395 __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \
14396 __ret; \
14397 })
14398 #endif
14399
14400 #ifdef __LITTLE_ENDIAN__
14401 #define vmulq_lane_s32(__p0, __p1, __p2) __extension__ ({ \
14402 int32x4_t __s0 = __p0; \
14403 int32x2_t __s1 = __p1; \
14404 int32x4_t __ret; \
14405 __ret = __s0 * __builtin_shufflevector(__s1, __s1, __p2, __p2, __p2, __p2); \
14406 __ret; \
14407 })
14408 #else
14409 #define vmulq_lane_s32(__p0, __p1, __p2) __extension__ ({ \
14410 int32x4_t __s0 = __p0; \
14411 int32x2_t __s1 = __p1; \
14412 int32x4_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \
14413 int32x2_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 1, 0); \
14414 int32x4_t __ret; \
14415 __ret = __rev0 * __builtin_shufflevector(__rev1, __rev1, __p2, __p2, __p2, __p2); \
14416 __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \
14417 __ret; \
14418 })
14419 #endif
14420
14421 #ifdef __LITTLE_ENDIAN__
14422 #define vmulq_lane_s16(__p0, __p1, __p2) __extension__ ({ \
14423 int16x8_t __s0 = __p0; \
14424 int16x4_t __s1 = __p1; \
14425 int16x8_t __ret; \
14426 __ret = __s0 * __builtin_shufflevector(__s1, __s1, __p2, __p2, __p2, __p2, __p2, __p2, __p2, __p2); \
14427 __ret; \
14428 })
14429 #else
14430 #define vmulq_lane_s16(__p0, __p1, __p2) __extension__ ({ \
14431 int16x8_t __s0 = __p0; \
14432 int16x4_t __s1 = __p1; \
14433 int16x8_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 7, 6, 5, 4, 3, 2, 1, 0); \
14434 int16x4_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 3, 2, 1, 0); \
14435 int16x8_t __ret; \
14436 __ret = __rev0 * __builtin_shufflevector(__rev1, __rev1, __p2, __p2, __p2, __p2, __p2, __p2, __p2, __p2); \
14437 __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); \
14438 __ret; \
14439 })
14440 #endif
14441
14442 #ifdef __LITTLE_ENDIAN__
14443 #define vmul_lane_u32(__p0, __p1, __p2) __extension__ ({ \
14444 uint32x2_t __s0 = __p0; \
14445 uint32x2_t __s1 = __p1; \
14446 uint32x2_t __ret; \
14447 __ret = __s0 * __builtin_shufflevector(__s1, __s1, __p2, __p2); \
14448 __ret; \
14449 })
14450 #else
14451 #define vmul_lane_u32(__p0, __p1, __p2) __extension__ ({ \
14452 uint32x2_t __s0 = __p0; \
14453 uint32x2_t __s1 = __p1; \
14454 uint32x2_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 1, 0); \
14455 uint32x2_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 1, 0); \
14456 uint32x2_t __ret; \
14457 __ret = __rev0 * __builtin_shufflevector(__rev1, __rev1, __p2, __p2); \
14458 __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \
14459 __ret; \
14460 })
14461 #endif
14462
14463 #ifdef __LITTLE_ENDIAN__
14464 #define vmul_lane_u16(__p0, __p1, __p2) __extension__ ({ \
14465 uint16x4_t __s0 = __p0; \
14466 uint16x4_t __s1 = __p1; \
14467 uint16x4_t __ret; \
14468 __ret = __s0 * __builtin_shufflevector(__s1, __s1, __p2, __p2, __p2, __p2); \
14469 __ret; \
14470 })
14471 #else
14472 #define vmul_lane_u16(__p0, __p1, __p2) __extension__ ({ \
14473 uint16x4_t __s0 = __p0; \
14474 uint16x4_t __s1 = __p1; \
14475 uint16x4_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \
14476 uint16x4_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 3, 2, 1, 0); \
14477 uint16x4_t __ret; \
14478 __ret = __rev0 * __builtin_shufflevector(__rev1, __rev1, __p2, __p2, __p2, __p2); \
14479 __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \
14480 __ret; \
14481 })
14482 #endif
14483
14484 #ifdef __LITTLE_ENDIAN__
14485 #define vmul_lane_f32(__p0, __p1, __p2) __extension__ ({ \
14486 float32x2_t __s0 = __p0; \
14487 float32x2_t __s1 = __p1; \
14488 float32x2_t __ret; \
14489 __ret = __s0 * __builtin_shufflevector(__s1, __s1, __p2, __p2); \
14490 __ret; \
14491 })
14492 #else
14493 #define vmul_lane_f32(__p0, __p1, __p2) __extension__ ({ \
14494 float32x2_t __s0 = __p0; \
14495 float32x2_t __s1 = __p1; \
14496 float32x2_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 1, 0); \
14497 float32x2_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 1, 0); \
14498 float32x2_t __ret; \
14499 __ret = __rev0 * __builtin_shufflevector(__rev1, __rev1, __p2, __p2); \
14500 __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \
14501 __ret; \
14502 })
14503 #endif
14504
14505 #ifdef __LITTLE_ENDIAN__
14506 #define vmul_lane_s32(__p0, __p1, __p2) __extension__ ({ \
14507 int32x2_t __s0 = __p0; \
14508 int32x2_t __s1 = __p1; \
14509 int32x2_t __ret; \
14510 __ret = __s0 * __builtin_shufflevector(__s1, __s1, __p2, __p2); \
14511 __ret; \
14512 })
14513 #else
14514 #define vmul_lane_s32(__p0, __p1, __p2) __extension__ ({ \
14515 int32x2_t __s0 = __p0; \
14516 int32x2_t __s1 = __p1; \
14517 int32x2_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 1, 0); \
14518 int32x2_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 1, 0); \
14519 int32x2_t __ret; \
14520 __ret = __rev0 * __builtin_shufflevector(__rev1, __rev1, __p2, __p2); \
14521 __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \
14522 __ret; \
14523 })
14524 #endif
14525
14526 #ifdef __LITTLE_ENDIAN__
14527 #define vmul_lane_s16(__p0, __p1, __p2) __extension__ ({ \
14528 int16x4_t __s0 = __p0; \
14529 int16x4_t __s1 = __p1; \
14530 int16x4_t __ret; \
14531 __ret = __s0 * __builtin_shufflevector(__s1, __s1, __p2, __p2, __p2, __p2); \
14532 __ret; \
14533 })
14534 #else
14535 #define vmul_lane_s16(__p0, __p1, __p2) __extension__ ({ \
14536 int16x4_t __s0 = __p0; \
14537 int16x4_t __s1 = __p1; \
14538 int16x4_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \
14539 int16x4_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 3, 2, 1, 0); \
14540 int16x4_t __ret; \
14541 __ret = __rev0 * __builtin_shufflevector(__rev1, __rev1, __p2, __p2, __p2, __p2); \
14542 __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \
14543 __ret; \
14544 })
14545 #endif
14546
14547 #ifdef __LITTLE_ENDIAN__
14548 __ai uint32x4_t vmulq_n_u32(uint32x4_t __p0, uint32_t __p1) {
14549 uint32x4_t __ret;
14550 __ret = __p0 * (uint32x4_t) {__p1, __p1, __p1, __p1};
14551 return __ret;
14552 }
14553 #else
14554 __ai uint32x4_t vmulq_n_u32(uint32x4_t __p0, uint32_t __p1) {
14555 uint32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
14556 uint32x4_t __ret;
14557 __ret = __rev0 * (uint32x4_t) {__p1, __p1, __p1, __p1};
14558 __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
14559 return __ret;
14560 }
14561 #endif
14562
14563 #ifdef __LITTLE_ENDIAN__
14564 __ai uint16x8_t vmulq_n_u16(uint16x8_t __p0, uint16_t __p1) {
14565 uint16x8_t __ret;
14566 __ret = __p0 * (uint16x8_t) {__p1, __p1, __p1, __p1, __p1, __p1, __p1, __p1};
14567 return __ret;
14568 }
14569 #else
14570 __ai uint16x8_t vmulq_n_u16(uint16x8_t __p0, uint16_t __p1) {
14571 uint16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
14572 uint16x8_t __ret;
14573 __ret = __rev0 * (uint16x8_t) {__p1, __p1, __p1, __p1, __p1, __p1, __p1, __p1};
14574 __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
14575 return __ret;
14576 }
14577 #endif
14578
14579 #ifdef __LITTLE_ENDIAN__
14580 __ai float32x4_t vmulq_n_f32(float32x4_t __p0, float32_t __p1) {
14581 float32x4_t __ret;
14582 __ret = __p0 * (float32x4_t) {__p1, __p1, __p1, __p1};
14583 return __ret;
14584 }
14585 #else
14586 __ai float32x4_t vmulq_n_f32(float32x4_t __p0, float32_t __p1) {
14587 float32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
14588 float32x4_t __ret;
14589 __ret = __rev0 * (float32x4_t) {__p1, __p1, __p1, __p1};
14590 __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
14591 return __ret;
14592 }
14593 #endif
14594
14595 #ifdef __LITTLE_ENDIAN__
14596 __ai int32x4_t vmulq_n_s32(int32x4_t __p0, int32_t __p1) {
14597 int32x4_t __ret;
14598 __ret = __p0 * (int32x4_t) {__p1, __p1, __p1, __p1};
14599 return __ret;
14600 }
14601 #else
14602 __ai int32x4_t vmulq_n_s32(int32x4_t __p0, int32_t __p1) {
14603 int32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
14604 int32x4_t __ret;
14605 __ret = __rev0 * (int32x4_t) {__p1, __p1, __p1, __p1};
14606 __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
14607 return __ret;
14608 }
14609 #endif
14610
14611 #ifdef __LITTLE_ENDIAN__
14612 __ai int16x8_t vmulq_n_s16(int16x8_t __p0, int16_t __p1) {
14613 int16x8_t __ret;
14614 __ret = __p0 * (int16x8_t) {__p1, __p1, __p1, __p1, __p1, __p1, __p1, __p1};
14615 return __ret;
14616 }
14617 #else
14618 __ai int16x8_t vmulq_n_s16(int16x8_t __p0, int16_t __p1) {
14619 int16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
14620 int16x8_t __ret;
14621 __ret = __rev0 * (int16x8_t) {__p1, __p1, __p1, __p1, __p1, __p1, __p1, __p1};
14622 __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
14623 return __ret;
14624 }
14625 #endif
14626
14627 #ifdef __LITTLE_ENDIAN__
14628 __ai uint32x2_t vmul_n_u32(uint32x2_t __p0, uint32_t __p1) {
14629 uint32x2_t __ret;
14630 __ret = __p0 * (uint32x2_t) {__p1, __p1};
14631 return __ret;
14632 }
14633 #else
14634 __ai uint32x2_t vmul_n_u32(uint32x2_t __p0, uint32_t __p1) {
14635 uint32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
14636 uint32x2_t __ret;
14637 __ret = __rev0 * (uint32x2_t) {__p1, __p1};
14638 __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
14639 return __ret;
14640 }
14641 #endif
14642
14643 #ifdef __LITTLE_ENDIAN__
14644 __ai uint16x4_t vmul_n_u16(uint16x4_t __p0, uint16_t __p1) {
14645 uint16x4_t __ret;
14646 __ret = __p0 * (uint16x4_t) {__p1, __p1, __p1, __p1};
14647 return __ret;
14648 }
14649 #else
14650 __ai uint16x4_t vmul_n_u16(uint16x4_t __p0, uint16_t __p1) {
14651 uint16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
14652 uint16x4_t __ret;
14653 __ret = __rev0 * (uint16x4_t) {__p1, __p1, __p1, __p1};
14654 __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
14655 return __ret;
14656 }
14657 #endif
14658
14659 #ifdef __LITTLE_ENDIAN__
14660 __ai float32x2_t vmul_n_f32(float32x2_t __p0, float32_t __p1) {
14661 float32x2_t __ret;
14662 __ret = __p0 * (float32x2_t) {__p1, __p1};
14663 return __ret;
14664 }
14665 #else
14666 __ai float32x2_t vmul_n_f32(float32x2_t __p0, float32_t __p1) {
14667 float32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
14668 float32x2_t __ret;
14669 __ret = __rev0 * (float32x2_t) {__p1, __p1};
14670 __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
14671 return __ret;
14672 }
14673 #endif
14674
14675 #ifdef __LITTLE_ENDIAN__
14676 __ai int32x2_t vmul_n_s32(int32x2_t __p0, int32_t __p1) {
14677 int32x2_t __ret;
14678 __ret = __p0 * (int32x2_t) {__p1, __p1};
14679 return __ret;
14680 }
14681 #else
14682 __ai int32x2_t vmul_n_s32(int32x2_t __p0, int32_t __p1) {
14683 int32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
14684 int32x2_t __ret;
14685 __ret = __rev0 * (int32x2_t) {__p1, __p1};
14686 __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
14687 return __ret;
14688 }
14689 #endif
14690
14691 #ifdef __LITTLE_ENDIAN__
14692 __ai int16x4_t vmul_n_s16(int16x4_t __p0, int16_t __p1) {
14693 int16x4_t __ret;
14694 __ret = __p0 * (int16x4_t) {__p1, __p1, __p1, __p1};
14695 return __ret;
14696 }
14697 #else
14698 __ai int16x4_t vmul_n_s16(int16x4_t __p0, int16_t __p1) {
14699 int16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
14700 int16x4_t __ret;
14701 __ret = __rev0 * (int16x4_t) {__p1, __p1, __p1, __p1};
14702 __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
14703 return __ret;
14704 }
14705 #endif
14706
14707 #ifdef __LITTLE_ENDIAN__
14708 __ai poly16x8_t vmull_p8(poly8x8_t __p0, poly8x8_t __p1) {
14709 poly16x8_t __ret;
14710 __ret = (poly16x8_t) __builtin_neon_vmull_v((int8x8_t)__p0, (int8x8_t)__p1, 37);
14711 return __ret;
14712 }
14713 #else
14714 __ai poly16x8_t vmull_p8(poly8x8_t __p0, poly8x8_t __p1) {
14715 poly8x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
14716 poly8x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
14717 poly16x8_t __ret;
14718 __ret = (poly16x8_t) __builtin_neon_vmull_v((int8x8_t)__rev0, (int8x8_t)__rev1, 37);
14719 __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
14720 return __ret;
14721 }
14722 __ai poly16x8_t __noswap_vmull_p8(poly8x8_t __p0, poly8x8_t __p1) {
14723 poly16x8_t __ret;
14724 __ret = (poly16x8_t) __builtin_neon_vmull_v((int8x8_t)__p0, (int8x8_t)__p1, 37);
14725 return __ret;
14726 }
14727 #endif
14728
14729 #ifdef __LITTLE_ENDIAN__
14730 __ai uint16x8_t vmull_u8(uint8x8_t __p0, uint8x8_t __p1) {
14731 uint16x8_t __ret;
14732 __ret = (uint16x8_t) __builtin_neon_vmull_v((int8x8_t)__p0, (int8x8_t)__p1, 49);
14733 return __ret;
14734 }
14735 #else
14736 __ai uint16x8_t vmull_u8(uint8x8_t __p0, uint8x8_t __p1) {
14737 uint8x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
14738 uint8x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
14739 uint16x8_t __ret;
14740 __ret = (uint16x8_t) __builtin_neon_vmull_v((int8x8_t)__rev0, (int8x8_t)__rev1, 49);
14741 __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
14742 return __ret;
14743 }
14744 __ai uint16x8_t __noswap_vmull_u8(uint8x8_t __p0, uint8x8_t __p1) {
14745 uint16x8_t __ret;
14746 __ret = (uint16x8_t) __builtin_neon_vmull_v((int8x8_t)__p0, (int8x8_t)__p1, 49);
14747 return __ret;
14748 }
14749 #endif
14750
14751 #ifdef __LITTLE_ENDIAN__
14752 __ai uint64x2_t vmull_u32(uint32x2_t __p0, uint32x2_t __p1) {
14753 uint64x2_t __ret;
14754 __ret = (uint64x2_t) __builtin_neon_vmull_v((int8x8_t)__p0, (int8x8_t)__p1, 51);
14755 return __ret;
14756 }
14757 #else
14758 __ai uint64x2_t vmull_u32(uint32x2_t __p0, uint32x2_t __p1) {
14759 uint32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
14760 uint32x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
14761 uint64x2_t __ret;
14762 __ret = (uint64x2_t) __builtin_neon_vmull_v((int8x8_t)__rev0, (int8x8_t)__rev1, 51);
14763 __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
14764 return __ret;
14765 }
14766 __ai uint64x2_t __noswap_vmull_u32(uint32x2_t __p0, uint32x2_t __p1) {
14767 uint64x2_t __ret;
14768 __ret = (uint64x2_t) __builtin_neon_vmull_v((int8x8_t)__p0, (int8x8_t)__p1, 51);
14769 return __ret;
14770 }
14771 #endif
14772
14773 #ifdef __LITTLE_ENDIAN__
14774 __ai uint32x4_t vmull_u16(uint16x4_t __p0, uint16x4_t __p1) {
14775 uint32x4_t __ret;
14776 __ret = (uint32x4_t) __builtin_neon_vmull_v((int8x8_t)__p0, (int8x8_t)__p1, 50);
14777 return __ret;
14778 }
14779 #else
14780 __ai uint32x4_t vmull_u16(uint16x4_t __p0, uint16x4_t __p1) {
14781 uint16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
14782 uint16x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
14783 uint32x4_t __ret;
14784 __ret = (uint32x4_t) __builtin_neon_vmull_v((int8x8_t)__rev0, (int8x8_t)__rev1, 50);
14785 __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
14786 return __ret;
14787 }
14788 __ai uint32x4_t __noswap_vmull_u16(uint16x4_t __p0, uint16x4_t __p1) {
14789 uint32x4_t __ret;
14790 __ret = (uint32x4_t) __builtin_neon_vmull_v((int8x8_t)__p0, (int8x8_t)__p1, 50);
14791 return __ret;
14792 }
14793 #endif
14794
14795 #ifdef __LITTLE_ENDIAN__
14796 __ai int16x8_t vmull_s8(int8x8_t __p0, int8x8_t __p1) {
14797 int16x8_t __ret;
14798 __ret = (int16x8_t) __builtin_neon_vmull_v((int8x8_t)__p0, (int8x8_t)__p1, 33);
14799 return __ret;
14800 }
14801 #else
14802 __ai int16x8_t vmull_s8(int8x8_t __p0, int8x8_t __p1) {
14803 int8x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
14804 int8x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
14805 int16x8_t __ret;
14806 __ret = (int16x8_t) __builtin_neon_vmull_v((int8x8_t)__rev0, (int8x8_t)__rev1, 33);
14807 __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
14808 return __ret;
14809 }
14810 __ai int16x8_t __noswap_vmull_s8(int8x8_t __p0, int8x8_t __p1) {
14811 int16x8_t __ret;
14812 __ret = (int16x8_t) __builtin_neon_vmull_v((int8x8_t)__p0, (int8x8_t)__p1, 33);
14813 return __ret;
14814 }
14815 #endif
14816
14817 #ifdef __LITTLE_ENDIAN__
14818 __ai int64x2_t vmull_s32(int32x2_t __p0, int32x2_t __p1) {
14819 int64x2_t __ret;
14820 __ret = (int64x2_t) __builtin_neon_vmull_v((int8x8_t)__p0, (int8x8_t)__p1, 35);
14821 return __ret;
14822 }
14823 #else
14824 __ai int64x2_t vmull_s32(int32x2_t __p0, int32x2_t __p1) {
14825 int32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
14826 int32x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
14827 int64x2_t __ret;
14828 __ret = (int64x2_t) __builtin_neon_vmull_v((int8x8_t)__rev0, (int8x8_t)__rev1, 35);
14829 __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
14830 return __ret;
14831 }
14832 __ai int64x2_t __noswap_vmull_s32(int32x2_t __p0, int32x2_t __p1) {
14833 int64x2_t __ret;
14834 __ret = (int64x2_t) __builtin_neon_vmull_v((int8x8_t)__p0, (int8x8_t)__p1, 35);
14835 return __ret;
14836 }
14837 #endif
14838
14839 #ifdef __LITTLE_ENDIAN__
14840 __ai int32x4_t vmull_s16(int16x4_t __p0, int16x4_t __p1) {
14841 int32x4_t __ret;
14842 __ret = (int32x4_t) __builtin_neon_vmull_v((int8x8_t)__p0, (int8x8_t)__p1, 34);
14843 return __ret;
14844 }
14845 #else
14846 __ai int32x4_t vmull_s16(int16x4_t __p0, int16x4_t __p1) {
14847 int16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
14848 int16x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
14849 int32x4_t __ret;
14850 __ret = (int32x4_t) __builtin_neon_vmull_v((int8x8_t)__rev0, (int8x8_t)__rev1, 34);
14851 __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
14852 return __ret;
14853 }
14854 __ai int32x4_t __noswap_vmull_s16(int16x4_t __p0, int16x4_t __p1) {
14855 int32x4_t __ret;
14856 __ret = (int32x4_t) __builtin_neon_vmull_v((int8x8_t)__p0, (int8x8_t)__p1, 34);
14857 return __ret;
14858 }
14859 #endif
14860
14861 #ifdef __LITTLE_ENDIAN__
14862 #define vmull_lane_u32(__p0, __p1, __p2) __extension__ ({ \
14863 uint32x2_t __s0 = __p0; \
14864 uint32x2_t __s1 = __p1; \
14865 uint64x2_t __ret; \
14866 __ret = vmull_u32(__s0, __builtin_shufflevector(__s1, __s1, __p2, __p2)); \
14867 __ret; \
14868 })
14869 #else
14870 #define vmull_lane_u32(__p0, __p1, __p2) __extension__ ({ \
14871 uint32x2_t __s0 = __p0; \
14872 uint32x2_t __s1 = __p1; \
14873 uint32x2_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 1, 0); \
14874 uint32x2_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 1, 0); \
14875 uint64x2_t __ret; \
14876 __ret = __noswap_vmull_u32(__rev0, __builtin_shufflevector(__rev1, __rev1, __p2, __p2)); \
14877 __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \
14878 __ret; \
14879 })
14880 #endif
14881
14882 #ifdef __LITTLE_ENDIAN__
14883 #define vmull_lane_u16(__p0, __p1, __p2) __extension__ ({ \
14884 uint16x4_t __s0 = __p0; \
14885 uint16x4_t __s1 = __p1; \
14886 uint32x4_t __ret; \
14887 __ret = vmull_u16(__s0, __builtin_shufflevector(__s1, __s1, __p2, __p2, __p2, __p2)); \
14888 __ret; \
14889 })
14890 #else
14891 #define vmull_lane_u16(__p0, __p1, __p2) __extension__ ({ \
14892 uint16x4_t __s0 = __p0; \
14893 uint16x4_t __s1 = __p1; \
14894 uint16x4_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \
14895 uint16x4_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 3, 2, 1, 0); \
14896 uint32x4_t __ret; \
14897 __ret = __noswap_vmull_u16(__rev0, __builtin_shufflevector(__rev1, __rev1, __p2, __p2, __p2, __p2)); \
14898 __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \
14899 __ret; \
14900 })
14901 #endif
14902
14903 #ifdef __LITTLE_ENDIAN__
14904 #define vmull_lane_s32(__p0, __p1, __p2) __extension__ ({ \
14905 int32x2_t __s0 = __p0; \
14906 int32x2_t __s1 = __p1; \
14907 int64x2_t __ret; \
14908 __ret = vmull_s32(__s0, __builtin_shufflevector(__s1, __s1, __p2, __p2)); \
14909 __ret; \
14910 })
14911 #else
14912 #define vmull_lane_s32(__p0, __p1, __p2) __extension__ ({ \
14913 int32x2_t __s0 = __p0; \
14914 int32x2_t __s1 = __p1; \
14915 int32x2_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 1, 0); \
14916 int32x2_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 1, 0); \
14917 int64x2_t __ret; \
14918 __ret = __noswap_vmull_s32(__rev0, __builtin_shufflevector(__rev1, __rev1, __p2, __p2)); \
14919 __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \
14920 __ret; \
14921 })
14922 #endif
14923
14924 #ifdef __LITTLE_ENDIAN__
14925 #define vmull_lane_s16(__p0, __p1, __p2) __extension__ ({ \
14926 int16x4_t __s0 = __p0; \
14927 int16x4_t __s1 = __p1; \
14928 int32x4_t __ret; \
14929 __ret = vmull_s16(__s0, __builtin_shufflevector(__s1, __s1, __p2, __p2, __p2, __p2)); \
14930 __ret; \
14931 })
14932 #else
14933 #define vmull_lane_s16(__p0, __p1, __p2) __extension__ ({ \
14934 int16x4_t __s0 = __p0; \
14935 int16x4_t __s1 = __p1; \
14936 int16x4_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \
14937 int16x4_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 3, 2, 1, 0); \
14938 int32x4_t __ret; \
14939 __ret = __noswap_vmull_s16(__rev0, __builtin_shufflevector(__rev1, __rev1, __p2, __p2, __p2, __p2)); \
14940 __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \
14941 __ret; \
14942 })
14943 #endif
14944
14945 #ifdef __LITTLE_ENDIAN__
14946 __ai uint64x2_t vmull_n_u32(uint32x2_t __p0, uint32_t __p1) {
14947 uint64x2_t __ret;
14948 __ret = (uint64x2_t) __builtin_neon_vmull_v((int8x8_t)__p0, (int8x8_t)(uint32x2_t) {__p1, __p1}, 51);
14949 return __ret;
14950 }
14951 #else
14952 __ai uint64x2_t vmull_n_u32(uint32x2_t __p0, uint32_t __p1) {
14953 uint32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
14954 uint64x2_t __ret;
14955 __ret = (uint64x2_t) __builtin_neon_vmull_v((int8x8_t)__rev0, (int8x8_t)(uint32x2_t) {__p1, __p1}, 51);
14956 __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
14957 return __ret;
14958 }
14959 __ai uint64x2_t __noswap_vmull_n_u32(uint32x2_t __p0, uint32_t __p1) {
14960 uint64x2_t __ret;
14961 __ret = (uint64x2_t) __builtin_neon_vmull_v((int8x8_t)__p0, (int8x8_t)(uint32x2_t) {__p1, __p1}, 51);
14962 return __ret;
14963 }
14964 #endif
14965
14966 #ifdef __LITTLE_ENDIAN__
14967 __ai uint32x4_t vmull_n_u16(uint16x4_t __p0, uint16_t __p1) {
14968 uint32x4_t __ret;
14969 __ret = (uint32x4_t) __builtin_neon_vmull_v((int8x8_t)__p0, (int8x8_t)(uint16x4_t) {__p1, __p1, __p1, __p1}, 50);
14970 return __ret;
14971 }
14972 #else
14973 __ai uint32x4_t vmull_n_u16(uint16x4_t __p0, uint16_t __p1) {
14974 uint16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
14975 uint32x4_t __ret;
14976 __ret = (uint32x4_t) __builtin_neon_vmull_v((int8x8_t)__rev0, (int8x8_t)(uint16x4_t) {__p1, __p1, __p1, __p1}, 50);
14977 __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
14978 return __ret;
14979 }
14980 __ai uint32x4_t __noswap_vmull_n_u16(uint16x4_t __p0, uint16_t __p1) {
14981 uint32x4_t __ret;
14982 __ret = (uint32x4_t) __builtin_neon_vmull_v((int8x8_t)__p0, (int8x8_t)(uint16x4_t) {__p1, __p1, __p1, __p1}, 50);
14983 return __ret;
14984 }
14985 #endif
14986
14987 #ifdef __LITTLE_ENDIAN__
14988 __ai int64x2_t vmull_n_s32(int32x2_t __p0, int32_t __p1) {
14989 int64x2_t __ret;
14990 __ret = (int64x2_t) __builtin_neon_vmull_v((int8x8_t)__p0, (int8x8_t)(int32x2_t) {__p1, __p1}, 35);
14991 return __ret;
14992 }
14993 #else
14994 __ai int64x2_t vmull_n_s32(int32x2_t __p0, int32_t __p1) {
14995 int32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
14996 int64x2_t __ret;
14997 __ret = (int64x2_t) __builtin_neon_vmull_v((int8x8_t)__rev0, (int8x8_t)(int32x2_t) {__p1, __p1}, 35);
14998 __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
14999 return __ret;
15000 }
15001 __ai int64x2_t __noswap_vmull_n_s32(int32x2_t __p0, int32_t __p1) {
15002 int64x2_t __ret;
15003 __ret = (int64x2_t) __builtin_neon_vmull_v((int8x8_t)__p0, (int8x8_t)(int32x2_t) {__p1, __p1}, 35);
15004 return __ret;
15005 }
15006 #endif
15007
15008 #ifdef __LITTLE_ENDIAN__
15009 __ai int32x4_t vmull_n_s16(int16x4_t __p0, int16_t __p1) {
15010 int32x4_t __ret;
15011 __ret = (int32x4_t) __builtin_neon_vmull_v((int8x8_t)__p0, (int8x8_t)(int16x4_t) {__p1, __p1, __p1, __p1}, 34);
15012 return __ret;
15013 }
15014 #else
15015 __ai int32x4_t vmull_n_s16(int16x4_t __p0, int16_t __p1) {
15016 int16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
15017 int32x4_t __ret;
15018 __ret = (int32x4_t) __builtin_neon_vmull_v((int8x8_t)__rev0, (int8x8_t)(int16x4_t) {__p1, __p1, __p1, __p1}, 34);
15019 __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
15020 return __ret;
15021 }
15022 __ai int32x4_t __noswap_vmull_n_s16(int16x4_t __p0, int16_t __p1) {
15023 int32x4_t __ret;
15024 __ret = (int32x4_t) __builtin_neon_vmull_v((int8x8_t)__p0, (int8x8_t)(int16x4_t) {__p1, __p1, __p1, __p1}, 34);
15025 return __ret;
15026 }
15027 #endif
15028
15029 #ifdef __LITTLE_ENDIAN__
15030 __ai poly8x8_t vmvn_p8(poly8x8_t __p0) {
15031 poly8x8_t __ret;
15032 __ret = ~__p0;
15033 return __ret;
15034 }
15035 #else
15036 __ai poly8x8_t vmvn_p8(poly8x8_t __p0) {
15037 poly8x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
15038 poly8x8_t __ret;
15039 __ret = ~__rev0;
15040 __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
15041 return __ret;
15042 }
15043 #endif
15044
15045 #ifdef __LITTLE_ENDIAN__
15046 __ai poly8x16_t vmvnq_p8(poly8x16_t __p0) {
15047 poly8x16_t __ret;
15048 __ret = ~__p0;
15049 return __ret;
15050 }
15051 #else
15052 __ai poly8x16_t vmvnq_p8(poly8x16_t __p0) {
15053 poly8x16_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
15054 poly8x16_t __ret;
15055 __ret = ~__rev0;
15056 __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
15057 return __ret;
15058 }
15059 #endif
15060
15061 #ifdef __LITTLE_ENDIAN__
15062 __ai uint8x16_t vmvnq_u8(uint8x16_t __p0) {
15063 uint8x16_t __ret;
15064 __ret = ~__p0;
15065 return __ret;
15066 }
15067 #else
15068 __ai uint8x16_t vmvnq_u8(uint8x16_t __p0) {
15069 uint8x16_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
15070 uint8x16_t __ret;
15071 __ret = ~__rev0;
15072 __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
15073 return __ret;
15074 }
15075 #endif
15076
15077 #ifdef __LITTLE_ENDIAN__
15078 __ai uint32x4_t vmvnq_u32(uint32x4_t __p0) {
15079 uint32x4_t __ret;
15080 __ret = ~__p0;
15081 return __ret;
15082 }
15083 #else
15084 __ai uint32x4_t vmvnq_u32(uint32x4_t __p0) {
15085 uint32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
15086 uint32x4_t __ret;
15087 __ret = ~__rev0;
15088 __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
15089 return __ret;
15090 }
15091 #endif
15092
15093 #ifdef __LITTLE_ENDIAN__
15094 __ai uint16x8_t vmvnq_u16(uint16x8_t __p0) {
15095 uint16x8_t __ret;
15096 __ret = ~__p0;
15097 return __ret;
15098 }
15099 #else
15100 __ai uint16x8_t vmvnq_u16(uint16x8_t __p0) {
15101 uint16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
15102 uint16x8_t __ret;
15103 __ret = ~__rev0;
15104 __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
15105 return __ret;
15106 }
15107 #endif
15108
15109 #ifdef __LITTLE_ENDIAN__
15110 __ai int8x16_t vmvnq_s8(int8x16_t __p0) {
15111 int8x16_t __ret;
15112 __ret = ~__p0;
15113 return __ret;
15114 }
15115 #else
15116 __ai int8x16_t vmvnq_s8(int8x16_t __p0) {
15117 int8x16_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
15118 int8x16_t __ret;
15119 __ret = ~__rev0;
15120 __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
15121 return __ret;
15122 }
15123 #endif
15124
15125 #ifdef __LITTLE_ENDIAN__
15126 __ai int32x4_t vmvnq_s32(int32x4_t __p0) {
15127 int32x4_t __ret;
15128 __ret = ~__p0;
15129 return __ret;
15130 }
15131 #else
15132 __ai int32x4_t vmvnq_s32(int32x4_t __p0) {
15133 int32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
15134 int32x4_t __ret;
15135 __ret = ~__rev0;
15136 __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
15137 return __ret;
15138 }
15139 #endif
15140
15141 #ifdef __LITTLE_ENDIAN__
15142 __ai int16x8_t vmvnq_s16(int16x8_t __p0) {
15143 int16x8_t __ret;
15144 __ret = ~__p0;
15145 return __ret;
15146 }
15147 #else
15148 __ai int16x8_t vmvnq_s16(int16x8_t __p0) {
15149 int16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
15150 int16x8_t __ret;
15151 __ret = ~__rev0;
15152 __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
15153 return __ret;
15154 }
15155 #endif
15156
15157 #ifdef __LITTLE_ENDIAN__
15158 __ai uint8x8_t vmvn_u8(uint8x8_t __p0) {
15159 uint8x8_t __ret;
15160 __ret = ~__p0;
15161 return __ret;
15162 }
15163 #else
15164 __ai uint8x8_t vmvn_u8(uint8x8_t __p0) {
15165 uint8x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
15166 uint8x8_t __ret;
15167 __ret = ~__rev0;
15168 __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
15169 return __ret;
15170 }
15171 #endif
15172
15173 #ifdef __LITTLE_ENDIAN__
15174 __ai uint32x2_t vmvn_u32(uint32x2_t __p0) {
15175 uint32x2_t __ret;
15176 __ret = ~__p0;
15177 return __ret;
15178 }
15179 #else
15180 __ai uint32x2_t vmvn_u32(uint32x2_t __p0) {
15181 uint32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
15182 uint32x2_t __ret;
15183 __ret = ~__rev0;
15184 __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
15185 return __ret;
15186 }
15187 #endif
15188
15189 #ifdef __LITTLE_ENDIAN__
15190 __ai uint16x4_t vmvn_u16(uint16x4_t __p0) {
15191 uint16x4_t __ret;
15192 __ret = ~__p0;
15193 return __ret;
15194 }
15195 #else
15196 __ai uint16x4_t vmvn_u16(uint16x4_t __p0) {
15197 uint16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
15198 uint16x4_t __ret;
15199 __ret = ~__rev0;
15200 __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
15201 return __ret;
15202 }
15203 #endif
15204
15205 #ifdef __LITTLE_ENDIAN__
15206 __ai int8x8_t vmvn_s8(int8x8_t __p0) {
15207 int8x8_t __ret;
15208 __ret = ~__p0;
15209 return __ret;
15210 }
15211 #else
15212 __ai int8x8_t vmvn_s8(int8x8_t __p0) {
15213 int8x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
15214 int8x8_t __ret;
15215 __ret = ~__rev0;
15216 __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
15217 return __ret;
15218 }
15219 #endif
15220
15221 #ifdef __LITTLE_ENDIAN__
15222 __ai int32x2_t vmvn_s32(int32x2_t __p0) {
15223 int32x2_t __ret;
15224 __ret = ~__p0;
15225 return __ret;
15226 }
15227 #else
15228 __ai int32x2_t vmvn_s32(int32x2_t __p0) {
15229 int32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
15230 int32x2_t __ret;
15231 __ret = ~__rev0;
15232 __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
15233 return __ret;
15234 }
15235 #endif
15236
15237 #ifdef __LITTLE_ENDIAN__
15238 __ai int16x4_t vmvn_s16(int16x4_t __p0) {
15239 int16x4_t __ret;
15240 __ret = ~__p0;
15241 return __ret;
15242 }
15243 #else
15244 __ai int16x4_t vmvn_s16(int16x4_t __p0) {
15245 int16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
15246 int16x4_t __ret;
15247 __ret = ~__rev0;
15248 __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
15249 return __ret;
15250 }
15251 #endif
15252
15253 #ifdef __LITTLE_ENDIAN__
15254 __ai int8x16_t vnegq_s8(int8x16_t __p0) {
15255 int8x16_t __ret;
15256 __ret = -__p0;
15257 return __ret;
15258 }
15259 #else
15260 __ai int8x16_t vnegq_s8(int8x16_t __p0) {
15261 int8x16_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
15262 int8x16_t __ret;
15263 __ret = -__rev0;
15264 __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
15265 return __ret;
15266 }
15267 #endif
15268
15269 #ifdef __LITTLE_ENDIAN__
15270 __ai float32x4_t vnegq_f32(float32x4_t __p0) {
15271 float32x4_t __ret;
15272 __ret = -__p0;
15273 return __ret;
15274 }
15275 #else
15276 __ai float32x4_t vnegq_f32(float32x4_t __p0) {
15277 float32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
15278 float32x4_t __ret;
15279 __ret = -__rev0;
15280 __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
15281 return __ret;
15282 }
15283 #endif
15284
15285 #ifdef __LITTLE_ENDIAN__
15286 __ai int32x4_t vnegq_s32(int32x4_t __p0) {
15287 int32x4_t __ret;
15288 __ret = -__p0;
15289 return __ret;
15290 }
15291 #else
15292 __ai int32x4_t vnegq_s32(int32x4_t __p0) {
15293 int32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
15294 int32x4_t __ret;
15295 __ret = -__rev0;
15296 __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
15297 return __ret;
15298 }
15299 #endif
15300
15301 #ifdef __LITTLE_ENDIAN__
15302 __ai int16x8_t vnegq_s16(int16x8_t __p0) {
15303 int16x8_t __ret;
15304 __ret = -__p0;
15305 return __ret;
15306 }
15307 #else
15308 __ai int16x8_t vnegq_s16(int16x8_t __p0) {
15309 int16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
15310 int16x8_t __ret;
15311 __ret = -__rev0;
15312 __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
15313 return __ret;
15314 }
15315 #endif
15316
15317 #ifdef __LITTLE_ENDIAN__
15318 __ai int8x8_t vneg_s8(int8x8_t __p0) {
15319 int8x8_t __ret;
15320 __ret = -__p0;
15321 return __ret;
15322 }
15323 #else
15324 __ai int8x8_t vneg_s8(int8x8_t __p0) {
15325 int8x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
15326 int8x8_t __ret;
15327 __ret = -__rev0;
15328 __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
15329 return __ret;
15330 }
15331 #endif
15332
15333 #ifdef __LITTLE_ENDIAN__
15334 __ai float32x2_t vneg_f32(float32x2_t __p0) {
15335 float32x2_t __ret;
15336 __ret = -__p0;
15337 return __ret;
15338 }
15339 #else
15340 __ai float32x2_t vneg_f32(float32x2_t __p0) {
15341 float32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
15342 float32x2_t __ret;
15343 __ret = -__rev0;
15344 __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
15345 return __ret;
15346 }
15347 #endif
15348
15349 #ifdef __LITTLE_ENDIAN__
15350 __ai int32x2_t vneg_s32(int32x2_t __p0) {
15351 int32x2_t __ret;
15352 __ret = -__p0;
15353 return __ret;
15354 }
15355 #else
15356 __ai int32x2_t vneg_s32(int32x2_t __p0) {
15357 int32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
15358 int32x2_t __ret;
15359 __ret = -__rev0;
15360 __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
15361 return __ret;
15362 }
15363 #endif
15364
15365 #ifdef __LITTLE_ENDIAN__
15366 __ai int16x4_t vneg_s16(int16x4_t __p0) {
15367 int16x4_t __ret;
15368 __ret = -__p0;
15369 return __ret;
15370 }
15371 #else
15372 __ai int16x4_t vneg_s16(int16x4_t __p0) {
15373 int16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
15374 int16x4_t __ret;
15375 __ret = -__rev0;
15376 __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
15377 return __ret;
15378 }
15379 #endif
15380
15381 #ifdef __LITTLE_ENDIAN__
15382 __ai uint8x16_t vornq_u8(uint8x16_t __p0, uint8x16_t __p1) {
15383 uint8x16_t __ret;
15384 __ret = __p0 | ~__p1;
15385 return __ret;
15386 }
15387 #else
15388 __ai uint8x16_t vornq_u8(uint8x16_t __p0, uint8x16_t __p1) {
15389 uint8x16_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
15390 uint8x16_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
15391 uint8x16_t __ret;
15392 __ret = __rev0 | ~__rev1;
15393 __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
15394 return __ret;
15395 }
15396 #endif
15397
15398 #ifdef __LITTLE_ENDIAN__
15399 __ai uint32x4_t vornq_u32(uint32x4_t __p0, uint32x4_t __p1) {
15400 uint32x4_t __ret;
15401 __ret = __p0 | ~__p1;
15402 return __ret;
15403 }
15404 #else
15405 __ai uint32x4_t vornq_u32(uint32x4_t __p0, uint32x4_t __p1) {
15406 uint32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
15407 uint32x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
15408 uint32x4_t __ret;
15409 __ret = __rev0 | ~__rev1;
15410 __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
15411 return __ret;
15412 }
15413 #endif
15414
15415 #ifdef __LITTLE_ENDIAN__
15416 __ai uint64x2_t vornq_u64(uint64x2_t __p0, uint64x2_t __p1) {
15417 uint64x2_t __ret;
15418 __ret = __p0 | ~__p1;
15419 return __ret;
15420 }
15421 #else
15422 __ai uint64x2_t vornq_u64(uint64x2_t __p0, uint64x2_t __p1) {
15423 uint64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
15424 uint64x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
15425 uint64x2_t __ret;
15426 __ret = __rev0 | ~__rev1;
15427 __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
15428 return __ret;
15429 }
15430 #endif
15431
15432 #ifdef __LITTLE_ENDIAN__
15433 __ai uint16x8_t vornq_u16(uint16x8_t __p0, uint16x8_t __p1) {
15434 uint16x8_t __ret;
15435 __ret = __p0 | ~__p1;
15436 return __ret;
15437 }
15438 #else
15439 __ai uint16x8_t vornq_u16(uint16x8_t __p0, uint16x8_t __p1) {
15440 uint16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
15441 uint16x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
15442 uint16x8_t __ret;
15443 __ret = __rev0 | ~__rev1;
15444 __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
15445 return __ret;
15446 }
15447 #endif
15448
15449 #ifdef __LITTLE_ENDIAN__
15450 __ai int8x16_t vornq_s8(int8x16_t __p0, int8x16_t __p1) {
15451 int8x16_t __ret;
15452 __ret = __p0 | ~__p1;
15453 return __ret;
15454 }
15455 #else
15456 __ai int8x16_t vornq_s8(int8x16_t __p0, int8x16_t __p1) {
15457 int8x16_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
15458 int8x16_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
15459 int8x16_t __ret;
15460 __ret = __rev0 | ~__rev1;
15461 __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
15462 return __ret;
15463 }
15464 #endif
15465
15466 #ifdef __LITTLE_ENDIAN__
15467 __ai int32x4_t vornq_s32(int32x4_t __p0, int32x4_t __p1) {
15468 int32x4_t __ret;
15469 __ret = __p0 | ~__p1;
15470 return __ret;
15471 }
15472 #else
15473 __ai int32x4_t vornq_s32(int32x4_t __p0, int32x4_t __p1) {
15474 int32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
15475 int32x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
15476 int32x4_t __ret;
15477 __ret = __rev0 | ~__rev1;
15478 __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
15479 return __ret;
15480 }
15481 #endif
15482
15483 #ifdef __LITTLE_ENDIAN__
15484 __ai int64x2_t vornq_s64(int64x2_t __p0, int64x2_t __p1) {
15485 int64x2_t __ret;
15486 __ret = __p0 | ~__p1;
15487 return __ret;
15488 }
15489 #else
15490 __ai int64x2_t vornq_s64(int64x2_t __p0, int64x2_t __p1) {
15491 int64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
15492 int64x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
15493 int64x2_t __ret;
15494 __ret = __rev0 | ~__rev1;
15495 __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
15496 return __ret;
15497 }
15498 #endif
15499
15500 #ifdef __LITTLE_ENDIAN__
15501 __ai int16x8_t vornq_s16(int16x8_t __p0, int16x8_t __p1) {
15502 int16x8_t __ret;
15503 __ret = __p0 | ~__p1;
15504 return __ret;
15505 }
15506 #else
15507 __ai int16x8_t vornq_s16(int16x8_t __p0, int16x8_t __p1) {
15508 int16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
15509 int16x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
15510 int16x8_t __ret;
15511 __ret = __rev0 | ~__rev1;
15512 __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
15513 return __ret;
15514 }
15515 #endif
15516
15517 #ifdef __LITTLE_ENDIAN__
15518 __ai uint8x8_t vorn_u8(uint8x8_t __p0, uint8x8_t __p1) {
15519 uint8x8_t __ret;
15520 __ret = __p0 | ~__p1;
15521 return __ret;
15522 }
15523 #else
15524 __ai uint8x8_t vorn_u8(uint8x8_t __p0, uint8x8_t __p1) {
15525 uint8x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
15526 uint8x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
15527 uint8x8_t __ret;
15528 __ret = __rev0 | ~__rev1;
15529 __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
15530 return __ret;
15531 }
15532 #endif
15533
15534 #ifdef __LITTLE_ENDIAN__
15535 __ai uint32x2_t vorn_u32(uint32x2_t __p0, uint32x2_t __p1) {
15536 uint32x2_t __ret;
15537 __ret = __p0 | ~__p1;
15538 return __ret;
15539 }
15540 #else
15541 __ai uint32x2_t vorn_u32(uint32x2_t __p0, uint32x2_t __p1) {
15542 uint32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
15543 uint32x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
15544 uint32x2_t __ret;
15545 __ret = __rev0 | ~__rev1;
15546 __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
15547 return __ret;
15548 }
15549 #endif
15550
15551 #ifdef __LITTLE_ENDIAN__
15552 __ai uint64x1_t vorn_u64(uint64x1_t __p0, uint64x1_t __p1) {
15553 uint64x1_t __ret;
15554 __ret = __p0 | ~__p1;
15555 return __ret;
15556 }
15557 #else
15558 __ai uint64x1_t vorn_u64(uint64x1_t __p0, uint64x1_t __p1) {
15559 uint64x1_t __ret;
15560 __ret = __p0 | ~__p1;
15561 return __ret;
15562 }
15563 #endif
15564
15565 #ifdef __LITTLE_ENDIAN__
15566 __ai uint16x4_t vorn_u16(uint16x4_t __p0, uint16x4_t __p1) {
15567 uint16x4_t __ret;
15568 __ret = __p0 | ~__p1;
15569 return __ret;
15570 }
15571 #else
15572 __ai uint16x4_t vorn_u16(uint16x4_t __p0, uint16x4_t __p1) {
15573 uint16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
15574 uint16x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
15575 uint16x4_t __ret;
15576 __ret = __rev0 | ~__rev1;
15577 __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
15578 return __ret;
15579 }
15580 #endif
15581
15582 #ifdef __LITTLE_ENDIAN__
15583 __ai int8x8_t vorn_s8(int8x8_t __p0, int8x8_t __p1) {
15584 int8x8_t __ret;
15585 __ret = __p0 | ~__p1;
15586 return __ret;
15587 }
15588 #else
15589 __ai int8x8_t vorn_s8(int8x8_t __p0, int8x8_t __p1) {
15590 int8x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
15591 int8x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
15592 int8x8_t __ret;
15593 __ret = __rev0 | ~__rev1;
15594 __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
15595 return __ret;
15596 }
15597 #endif
15598
15599 #ifdef __LITTLE_ENDIAN__
15600 __ai int32x2_t vorn_s32(int32x2_t __p0, int32x2_t __p1) {
15601 int32x2_t __ret;
15602 __ret = __p0 | ~__p1;
15603 return __ret;
15604 }
15605 #else
15606 __ai int32x2_t vorn_s32(int32x2_t __p0, int32x2_t __p1) {
15607 int32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
15608 int32x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
15609 int32x2_t __ret;
15610 __ret = __rev0 | ~__rev1;
15611 __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
15612 return __ret;
15613 }
15614 #endif
15615
15616 #ifdef __LITTLE_ENDIAN__
15617 __ai int64x1_t vorn_s64(int64x1_t __p0, int64x1_t __p1) {
15618 int64x1_t __ret;
15619 __ret = __p0 | ~__p1;
15620 return __ret;
15621 }
15622 #else
15623 __ai int64x1_t vorn_s64(int64x1_t __p0, int64x1_t __p1) {
15624 int64x1_t __ret;
15625 __ret = __p0 | ~__p1;
15626 return __ret;
15627 }
15628 #endif
15629
15630 #ifdef __LITTLE_ENDIAN__
15631 __ai int16x4_t vorn_s16(int16x4_t __p0, int16x4_t __p1) {
15632 int16x4_t __ret;
15633 __ret = __p0 | ~__p1;
15634 return __ret;
15635 }
15636 #else
15637 __ai int16x4_t vorn_s16(int16x4_t __p0, int16x4_t __p1) {
15638 int16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
15639 int16x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
15640 int16x4_t __ret;
15641 __ret = __rev0 | ~__rev1;
15642 __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
15643 return __ret;
15644 }
15645 #endif
15646
15647 #ifdef __LITTLE_ENDIAN__
15648 __ai uint8x16_t vorrq_u8(uint8x16_t __p0, uint8x16_t __p1) {
15649 uint8x16_t __ret;
15650 __ret = __p0 | __p1;
15651 return __ret;
15652 }
15653 #else
15654 __ai uint8x16_t vorrq_u8(uint8x16_t __p0, uint8x16_t __p1) {
15655 uint8x16_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
15656 uint8x16_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
15657 uint8x16_t __ret;
15658 __ret = __rev0 | __rev1;
15659 __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
15660 return __ret;
15661 }
15662 #endif
15663
15664 #ifdef __LITTLE_ENDIAN__
15665 __ai uint32x4_t vorrq_u32(uint32x4_t __p0, uint32x4_t __p1) {
15666 uint32x4_t __ret;
15667 __ret = __p0 | __p1;
15668 return __ret;
15669 }
15670 #else
15671 __ai uint32x4_t vorrq_u32(uint32x4_t __p0, uint32x4_t __p1) {
15672 uint32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
15673 uint32x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
15674 uint32x4_t __ret;
15675 __ret = __rev0 | __rev1;
15676 __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
15677 return __ret;
15678 }
15679 #endif
15680
15681 #ifdef __LITTLE_ENDIAN__
15682 __ai uint64x2_t vorrq_u64(uint64x2_t __p0, uint64x2_t __p1) {
15683 uint64x2_t __ret;
15684 __ret = __p0 | __p1;
15685 return __ret;
15686 }
15687 #else
15688 __ai uint64x2_t vorrq_u64(uint64x2_t __p0, uint64x2_t __p1) {
15689 uint64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
15690 uint64x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
15691 uint64x2_t __ret;
15692 __ret = __rev0 | __rev1;
15693 __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
15694 return __ret;
15695 }
15696 #endif
15697
15698 #ifdef __LITTLE_ENDIAN__
15699 __ai uint16x8_t vorrq_u16(uint16x8_t __p0, uint16x8_t __p1) {
15700 uint16x8_t __ret;
15701 __ret = __p0 | __p1;
15702 return __ret;
15703 }
15704 #else
15705 __ai uint16x8_t vorrq_u16(uint16x8_t __p0, uint16x8_t __p1) {
15706 uint16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
15707 uint16x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
15708 uint16x8_t __ret;
15709 __ret = __rev0 | __rev1;
15710 __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
15711 return __ret;
15712 }
15713 #endif
15714
15715 #ifdef __LITTLE_ENDIAN__
15716 __ai int8x16_t vorrq_s8(int8x16_t __p0, int8x16_t __p1) {
15717 int8x16_t __ret;
15718 __ret = __p0 | __p1;
15719 return __ret;
15720 }
15721 #else
15722 __ai int8x16_t vorrq_s8(int8x16_t __p0, int8x16_t __p1) {
15723 int8x16_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
15724 int8x16_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
15725 int8x16_t __ret;
15726 __ret = __rev0 | __rev1;
15727 __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
15728 return __ret;
15729 }
15730 #endif
15731
15732 #ifdef __LITTLE_ENDIAN__
15733 __ai int32x4_t vorrq_s32(int32x4_t __p0, int32x4_t __p1) {
15734 int32x4_t __ret;
15735 __ret = __p0 | __p1;
15736 return __ret;
15737 }
15738 #else
15739 __ai int32x4_t vorrq_s32(int32x4_t __p0, int32x4_t __p1) {
15740 int32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
15741 int32x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
15742 int32x4_t __ret;
15743 __ret = __rev0 | __rev1;
15744 __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
15745 return __ret;
15746 }
15747 #endif
15748
15749 #ifdef __LITTLE_ENDIAN__
15750 __ai int64x2_t vorrq_s64(int64x2_t __p0, int64x2_t __p1) {
15751 int64x2_t __ret;
15752 __ret = __p0 | __p1;
15753 return __ret;
15754 }
15755 #else
15756 __ai int64x2_t vorrq_s64(int64x2_t __p0, int64x2_t __p1) {
15757 int64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
15758 int64x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
15759 int64x2_t __ret;
15760 __ret = __rev0 | __rev1;
15761 __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
15762 return __ret;
15763 }
15764 #endif
15765
15766 #ifdef __LITTLE_ENDIAN__
15767 __ai int16x8_t vorrq_s16(int16x8_t __p0, int16x8_t __p1) {
15768 int16x8_t __ret;
15769 __ret = __p0 | __p1;
15770 return __ret;
15771 }
15772 #else
15773 __ai int16x8_t vorrq_s16(int16x8_t __p0, int16x8_t __p1) {
15774 int16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
15775 int16x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
15776 int16x8_t __ret;
15777 __ret = __rev0 | __rev1;
15778 __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
15779 return __ret;
15780 }
15781 #endif
15782
15783 #ifdef __LITTLE_ENDIAN__
15784 __ai uint8x8_t vorr_u8(uint8x8_t __p0, uint8x8_t __p1) {
15785 uint8x8_t __ret;
15786 __ret = __p0 | __p1;
15787 return __ret;
15788 }
15789 #else
15790 __ai uint8x8_t vorr_u8(uint8x8_t __p0, uint8x8_t __p1) {
15791 uint8x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
15792 uint8x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
15793 uint8x8_t __ret;
15794 __ret = __rev0 | __rev1;
15795 __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
15796 return __ret;
15797 }
15798 #endif
15799
15800 #ifdef __LITTLE_ENDIAN__
15801 __ai uint32x2_t vorr_u32(uint32x2_t __p0, uint32x2_t __p1) {
15802 uint32x2_t __ret;
15803 __ret = __p0 | __p1;
15804 return __ret;
15805 }
15806 #else
15807 __ai uint32x2_t vorr_u32(uint32x2_t __p0, uint32x2_t __p1) {
15808 uint32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
15809 uint32x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
15810 uint32x2_t __ret;
15811 __ret = __rev0 | __rev1;
15812 __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
15813 return __ret;
15814 }
15815 #endif
15816
15817 #ifdef __LITTLE_ENDIAN__
15818 __ai uint64x1_t vorr_u64(uint64x1_t __p0, uint64x1_t __p1) {
15819 uint64x1_t __ret;
15820 __ret = __p0 | __p1;
15821 return __ret;
15822 }
15823 #else
15824 __ai uint64x1_t vorr_u64(uint64x1_t __p0, uint64x1_t __p1) {
15825 uint64x1_t __ret;
15826 __ret = __p0 | __p1;
15827 return __ret;
15828 }
15829 #endif
15830
15831 #ifdef __LITTLE_ENDIAN__
15832 __ai uint16x4_t vorr_u16(uint16x4_t __p0, uint16x4_t __p1) {
15833 uint16x4_t __ret;
15834 __ret = __p0 | __p1;
15835 return __ret;
15836 }
15837 #else
15838 __ai uint16x4_t vorr_u16(uint16x4_t __p0, uint16x4_t __p1) {
15839 uint16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
15840 uint16x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
15841 uint16x4_t __ret;
15842 __ret = __rev0 | __rev1;
15843 __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
15844 return __ret;
15845 }
15846 #endif
15847
15848 #ifdef __LITTLE_ENDIAN__
15849 __ai int8x8_t vorr_s8(int8x8_t __p0, int8x8_t __p1) {
15850 int8x8_t __ret;
15851 __ret = __p0 | __p1;
15852 return __ret;
15853 }
15854 #else
15855 __ai int8x8_t vorr_s8(int8x8_t __p0, int8x8_t __p1) {
15856 int8x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
15857 int8x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
15858 int8x8_t __ret;
15859 __ret = __rev0 | __rev1;
15860 __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
15861 return __ret;
15862 }
15863 #endif
15864
15865 #ifdef __LITTLE_ENDIAN__
15866 __ai int32x2_t vorr_s32(int32x2_t __p0, int32x2_t __p1) {
15867 int32x2_t __ret;
15868 __ret = __p0 | __p1;
15869 return __ret;
15870 }
15871 #else
15872 __ai int32x2_t vorr_s32(int32x2_t __p0, int32x2_t __p1) {
15873 int32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
15874 int32x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
15875 int32x2_t __ret;
15876 __ret = __rev0 | __rev1;
15877 __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
15878 return __ret;
15879 }
15880 #endif
15881
15882 #ifdef __LITTLE_ENDIAN__
15883 __ai int64x1_t vorr_s64(int64x1_t __p0, int64x1_t __p1) {
15884 int64x1_t __ret;
15885 __ret = __p0 | __p1;
15886 return __ret;
15887 }
15888 #else
15889 __ai int64x1_t vorr_s64(int64x1_t __p0, int64x1_t __p1) {
15890 int64x1_t __ret;
15891 __ret = __p0 | __p1;
15892 return __ret;
15893 }
15894 #endif
15895
15896 #ifdef __LITTLE_ENDIAN__
15897 __ai int16x4_t vorr_s16(int16x4_t __p0, int16x4_t __p1) {
15898 int16x4_t __ret;
15899 __ret = __p0 | __p1;
15900 return __ret;
15901 }
15902 #else
15903 __ai int16x4_t vorr_s16(int16x4_t __p0, int16x4_t __p1) {
15904 int16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
15905 int16x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
15906 int16x4_t __ret;
15907 __ret = __rev0 | __rev1;
15908 __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
15909 return __ret;
15910 }
15911 #endif
15912
15913 #ifdef __LITTLE_ENDIAN__
15914 __ai uint16x8_t vpadalq_u8(uint16x8_t __p0, uint8x16_t __p1) {
15915 uint16x8_t __ret;
15916 __ret = (uint16x8_t) __builtin_neon_vpadalq_v((int8x16_t)__p0, (int8x16_t)__p1, 49);
15917 return __ret;
15918 }
15919 #else
15920 __ai uint16x8_t vpadalq_u8(uint16x8_t __p0, uint8x16_t __p1) {
15921 uint16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
15922 uint8x16_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
15923 uint16x8_t __ret;
15924 __ret = (uint16x8_t) __builtin_neon_vpadalq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 49);
15925 __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
15926 return __ret;
15927 }
15928 #endif
15929
15930 #ifdef __LITTLE_ENDIAN__
15931 __ai uint64x2_t vpadalq_u32(uint64x2_t __p0, uint32x4_t __p1) {
15932 uint64x2_t __ret;
15933 __ret = (uint64x2_t) __builtin_neon_vpadalq_v((int8x16_t)__p0, (int8x16_t)__p1, 51);
15934 return __ret;
15935 }
15936 #else
15937 __ai uint64x2_t vpadalq_u32(uint64x2_t __p0, uint32x4_t __p1) {
15938 uint64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
15939 uint32x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
15940 uint64x2_t __ret;
15941 __ret = (uint64x2_t) __builtin_neon_vpadalq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 51);
15942 __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
15943 return __ret;
15944 }
15945 #endif
15946
15947 #ifdef __LITTLE_ENDIAN__
15948 __ai uint32x4_t vpadalq_u16(uint32x4_t __p0, uint16x8_t __p1) {
15949 uint32x4_t __ret;
15950 __ret = (uint32x4_t) __builtin_neon_vpadalq_v((int8x16_t)__p0, (int8x16_t)__p1, 50);
15951 return __ret;
15952 }
15953 #else
15954 __ai uint32x4_t vpadalq_u16(uint32x4_t __p0, uint16x8_t __p1) {
15955 uint32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
15956 uint16x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
15957 uint32x4_t __ret;
15958 __ret = (uint32x4_t) __builtin_neon_vpadalq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 50);
15959 __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
15960 return __ret;
15961 }
15962 #endif
15963
15964 #ifdef __LITTLE_ENDIAN__
15965 __ai int16x8_t vpadalq_s8(int16x8_t __p0, int8x16_t __p1) {
15966 int16x8_t __ret;
15967 __ret = (int16x8_t) __builtin_neon_vpadalq_v((int8x16_t)__p0, (int8x16_t)__p1, 33);
15968 return __ret;
15969 }
15970 #else
15971 __ai int16x8_t vpadalq_s8(int16x8_t __p0, int8x16_t __p1) {
15972 int16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
15973 int8x16_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
15974 int16x8_t __ret;
15975 __ret = (int16x8_t) __builtin_neon_vpadalq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 33);
15976 __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
15977 return __ret;
15978 }
15979 #endif
15980
15981 #ifdef __LITTLE_ENDIAN__
15982 __ai int64x2_t vpadalq_s32(int64x2_t __p0, int32x4_t __p1) {
15983 int64x2_t __ret;
15984 __ret = (int64x2_t) __builtin_neon_vpadalq_v((int8x16_t)__p0, (int8x16_t)__p1, 35);
15985 return __ret;
15986 }
15987 #else
15988 __ai int64x2_t vpadalq_s32(int64x2_t __p0, int32x4_t __p1) {
15989 int64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
15990 int32x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
15991 int64x2_t __ret;
15992 __ret = (int64x2_t) __builtin_neon_vpadalq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 35);
15993 __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
15994 return __ret;
15995 }
15996 #endif
15997
15998 #ifdef __LITTLE_ENDIAN__
15999 __ai int32x4_t vpadalq_s16(int32x4_t __p0, int16x8_t __p1) {
16000 int32x4_t __ret;
16001 __ret = (int32x4_t) __builtin_neon_vpadalq_v((int8x16_t)__p0, (int8x16_t)__p1, 34);
16002 return __ret;
16003 }
16004 #else
16005 __ai int32x4_t vpadalq_s16(int32x4_t __p0, int16x8_t __p1) {
16006 int32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
16007 int16x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
16008 int32x4_t __ret;
16009 __ret = (int32x4_t) __builtin_neon_vpadalq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 34);
16010 __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
16011 return __ret;
16012 }
16013 #endif
16014
16015 #ifdef __LITTLE_ENDIAN__
16016 __ai uint16x4_t vpadal_u8(uint16x4_t __p0, uint8x8_t __p1) {
16017 uint16x4_t __ret;
16018 __ret = (uint16x4_t) __builtin_neon_vpadal_v((int8x8_t)__p0, (int8x8_t)__p1, 17);
16019 return __ret;
16020 }
16021 #else
16022 __ai uint16x4_t vpadal_u8(uint16x4_t __p0, uint8x8_t __p1) {
16023 uint16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
16024 uint8x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
16025 uint16x4_t __ret;
16026 __ret = (uint16x4_t) __builtin_neon_vpadal_v((int8x8_t)__rev0, (int8x8_t)__rev1, 17);
16027 __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
16028 return __ret;
16029 }
16030 #endif
16031
16032 #ifdef __LITTLE_ENDIAN__
16033 __ai uint64x1_t vpadal_u32(uint64x1_t __p0, uint32x2_t __p1) {
16034 uint64x1_t __ret;
16035 __ret = (uint64x1_t) __builtin_neon_vpadal_v((int8x8_t)__p0, (int8x8_t)__p1, 19);
16036 return __ret;
16037 }
16038 #else
16039 __ai uint64x1_t vpadal_u32(uint64x1_t __p0, uint32x2_t __p1) {
16040 uint32x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
16041 uint64x1_t __ret;
16042 __ret = (uint64x1_t) __builtin_neon_vpadal_v((int8x8_t)__p0, (int8x8_t)__rev1, 19);
16043 return __ret;
16044 }
16045 #endif
16046
16047 #ifdef __LITTLE_ENDIAN__
16048 __ai uint32x2_t vpadal_u16(uint32x2_t __p0, uint16x4_t __p1) {
16049 uint32x2_t __ret;
16050 __ret = (uint32x2_t) __builtin_neon_vpadal_v((int8x8_t)__p0, (int8x8_t)__p1, 18);
16051 return __ret;
16052 }
16053 #else
16054 __ai uint32x2_t vpadal_u16(uint32x2_t __p0, uint16x4_t __p1) {
16055 uint32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
16056 uint16x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
16057 uint32x2_t __ret;
16058 __ret = (uint32x2_t) __builtin_neon_vpadal_v((int8x8_t)__rev0, (int8x8_t)__rev1, 18);
16059 __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
16060 return __ret;
16061 }
16062 #endif
16063
16064 #ifdef __LITTLE_ENDIAN__
16065 __ai int16x4_t vpadal_s8(int16x4_t __p0, int8x8_t __p1) {
16066 int16x4_t __ret;
16067 __ret = (int16x4_t) __builtin_neon_vpadal_v((int8x8_t)__p0, (int8x8_t)__p1, 1);
16068 return __ret;
16069 }
16070 #else
16071 __ai int16x4_t vpadal_s8(int16x4_t __p0, int8x8_t __p1) {
16072 int16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
16073 int8x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
16074 int16x4_t __ret;
16075 __ret = (int16x4_t) __builtin_neon_vpadal_v((int8x8_t)__rev0, (int8x8_t)__rev1, 1);
16076 __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
16077 return __ret;
16078 }
16079 #endif
16080
16081 #ifdef __LITTLE_ENDIAN__
16082 __ai int64x1_t vpadal_s32(int64x1_t __p0, int32x2_t __p1) {
16083 int64x1_t __ret;
16084 __ret = (int64x1_t) __builtin_neon_vpadal_v((int8x8_t)__p0, (int8x8_t)__p1, 3);
16085 return __ret;
16086 }
16087 #else
16088 __ai int64x1_t vpadal_s32(int64x1_t __p0, int32x2_t __p1) {
16089 int32x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
16090 int64x1_t __ret;
16091 __ret = (int64x1_t) __builtin_neon_vpadal_v((int8x8_t)__p0, (int8x8_t)__rev1, 3);
16092 return __ret;
16093 }
16094 #endif
16095
16096 #ifdef __LITTLE_ENDIAN__
16097 __ai int32x2_t vpadal_s16(int32x2_t __p0, int16x4_t __p1) {
16098 int32x2_t __ret;
16099 __ret = (int32x2_t) __builtin_neon_vpadal_v((int8x8_t)__p0, (int8x8_t)__p1, 2);
16100 return __ret;
16101 }
16102 #else
16103 __ai int32x2_t vpadal_s16(int32x2_t __p0, int16x4_t __p1) {
16104 int32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
16105 int16x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
16106 int32x2_t __ret;
16107 __ret = (int32x2_t) __builtin_neon_vpadal_v((int8x8_t)__rev0, (int8x8_t)__rev1, 2);
16108 __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
16109 return __ret;
16110 }
16111 #endif
16112
16113 #ifdef __LITTLE_ENDIAN__
16114 __ai uint8x8_t vpadd_u8(uint8x8_t __p0, uint8x8_t __p1) {
16115 uint8x8_t __ret;
16116 __ret = (uint8x8_t) __builtin_neon_vpadd_v((int8x8_t)__p0, (int8x8_t)__p1, 16);
16117 return __ret;
16118 }
16119 #else
16120 __ai uint8x8_t vpadd_u8(uint8x8_t __p0, uint8x8_t __p1) {
16121 uint8x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
16122 uint8x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
16123 uint8x8_t __ret;
16124 __ret = (uint8x8_t) __builtin_neon_vpadd_v((int8x8_t)__rev0, (int8x8_t)__rev1, 16);
16125 __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
16126 return __ret;
16127 }
16128 #endif
16129
16130 #ifdef __LITTLE_ENDIAN__
16131 __ai uint32x2_t vpadd_u32(uint32x2_t __p0, uint32x2_t __p1) {
16132 uint32x2_t __ret;
16133 __ret = (uint32x2_t) __builtin_neon_vpadd_v((int8x8_t)__p0, (int8x8_t)__p1, 18);
16134 return __ret;
16135 }
16136 #else
16137 __ai uint32x2_t vpadd_u32(uint32x2_t __p0, uint32x2_t __p1) {
16138 uint32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
16139 uint32x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
16140 uint32x2_t __ret;
16141 __ret = (uint32x2_t) __builtin_neon_vpadd_v((int8x8_t)__rev0, (int8x8_t)__rev1, 18);
16142 __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
16143 return __ret;
16144 }
16145 #endif
16146
16147 #ifdef __LITTLE_ENDIAN__
16148 __ai uint16x4_t vpadd_u16(uint16x4_t __p0, uint16x4_t __p1) {
16149 uint16x4_t __ret;
16150 __ret = (uint16x4_t) __builtin_neon_vpadd_v((int8x8_t)__p0, (int8x8_t)__p1, 17);
16151 return __ret;
16152 }
16153 #else
16154 __ai uint16x4_t vpadd_u16(uint16x4_t __p0, uint16x4_t __p1) {
16155 uint16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
16156 uint16x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
16157 uint16x4_t __ret;
16158 __ret = (uint16x4_t) __builtin_neon_vpadd_v((int8x8_t)__rev0, (int8x8_t)__rev1, 17);
16159 __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
16160 return __ret;
16161 }
16162 #endif
16163
16164 #ifdef __LITTLE_ENDIAN__
16165 __ai int8x8_t vpadd_s8(int8x8_t __p0, int8x8_t __p1) {
16166 int8x8_t __ret;
16167 __ret = (int8x8_t) __builtin_neon_vpadd_v((int8x8_t)__p0, (int8x8_t)__p1, 0);
16168 return __ret;
16169 }
16170 #else
16171 __ai int8x8_t vpadd_s8(int8x8_t __p0, int8x8_t __p1) {
16172 int8x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
16173 int8x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
16174 int8x8_t __ret;
16175 __ret = (int8x8_t) __builtin_neon_vpadd_v((int8x8_t)__rev0, (int8x8_t)__rev1, 0);
16176 __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
16177 return __ret;
16178 }
16179 #endif
16180
16181 #ifdef __LITTLE_ENDIAN__
16182 __ai float32x2_t vpadd_f32(float32x2_t __p0, float32x2_t __p1) {
16183 float32x2_t __ret;
16184 __ret = (float32x2_t) __builtin_neon_vpadd_v((int8x8_t)__p0, (int8x8_t)__p1, 9);
16185 return __ret;
16186 }
16187 #else
16188 __ai float32x2_t vpadd_f32(float32x2_t __p0, float32x2_t __p1) {
16189 float32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
16190 float32x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
16191 float32x2_t __ret;
16192 __ret = (float32x2_t) __builtin_neon_vpadd_v((int8x8_t)__rev0, (int8x8_t)__rev1, 9);
16193 __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
16194 return __ret;
16195 }
16196 #endif
16197
16198 #ifdef __LITTLE_ENDIAN__
16199 __ai int32x2_t vpadd_s32(int32x2_t __p0, int32x2_t __p1) {
16200 int32x2_t __ret;
16201 __ret = (int32x2_t) __builtin_neon_vpadd_v((int8x8_t)__p0, (int8x8_t)__p1, 2);
16202 return __ret;
16203 }
16204 #else
16205 __ai int32x2_t vpadd_s32(int32x2_t __p0, int32x2_t __p1) {
16206 int32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
16207 int32x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
16208 int32x2_t __ret;
16209 __ret = (int32x2_t) __builtin_neon_vpadd_v((int8x8_t)__rev0, (int8x8_t)__rev1, 2);
16210 __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
16211 return __ret;
16212 }
16213 #endif
16214
16215 #ifdef __LITTLE_ENDIAN__
16216 __ai int16x4_t vpadd_s16(int16x4_t __p0, int16x4_t __p1) {
16217 int16x4_t __ret;
16218 __ret = (int16x4_t) __builtin_neon_vpadd_v((int8x8_t)__p0, (int8x8_t)__p1, 1);
16219 return __ret;
16220 }
16221 #else
16222 __ai int16x4_t vpadd_s16(int16x4_t __p0, int16x4_t __p1) {
16223 int16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
16224 int16x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
16225 int16x4_t __ret;
16226 __ret = (int16x4_t) __builtin_neon_vpadd_v((int8x8_t)__rev0, (int8x8_t)__rev1, 1);
16227 __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
16228 return __ret;
16229 }
16230 #endif
16231
16232 #ifdef __LITTLE_ENDIAN__
16233 __ai uint16x8_t vpaddlq_u8(uint8x16_t __p0) {
16234 uint16x8_t __ret;
16235 __ret = (uint16x8_t) __builtin_neon_vpaddlq_v((int8x16_t)__p0, 49);
16236 return __ret;
16237 }
16238 #else
16239 __ai uint16x8_t vpaddlq_u8(uint8x16_t __p0) {
16240 uint8x16_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
16241 uint16x8_t __ret;
16242 __ret = (uint16x8_t) __builtin_neon_vpaddlq_v((int8x16_t)__rev0, 49);
16243 __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
16244 return __ret;
16245 }
16246 #endif
16247
16248 #ifdef __LITTLE_ENDIAN__
16249 __ai uint64x2_t vpaddlq_u32(uint32x4_t __p0) {
16250 uint64x2_t __ret;
16251 __ret = (uint64x2_t) __builtin_neon_vpaddlq_v((int8x16_t)__p0, 51);
16252 return __ret;
16253 }
16254 #else
16255 __ai uint64x2_t vpaddlq_u32(uint32x4_t __p0) {
16256 uint32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
16257 uint64x2_t __ret;
16258 __ret = (uint64x2_t) __builtin_neon_vpaddlq_v((int8x16_t)__rev0, 51);
16259 __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
16260 return __ret;
16261 }
16262 #endif
16263
16264 #ifdef __LITTLE_ENDIAN__
16265 __ai uint32x4_t vpaddlq_u16(uint16x8_t __p0) {
16266 uint32x4_t __ret;
16267 __ret = (uint32x4_t) __builtin_neon_vpaddlq_v((int8x16_t)__p0, 50);
16268 return __ret;
16269 }
16270 #else
16271 __ai uint32x4_t vpaddlq_u16(uint16x8_t __p0) {
16272 uint16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
16273 uint32x4_t __ret;
16274 __ret = (uint32x4_t) __builtin_neon_vpaddlq_v((int8x16_t)__rev0, 50);
16275 __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
16276 return __ret;
16277 }
16278 #endif
16279
16280 #ifdef __LITTLE_ENDIAN__
16281 __ai int16x8_t vpaddlq_s8(int8x16_t __p0) {
16282 int16x8_t __ret;
16283 __ret = (int16x8_t) __builtin_neon_vpaddlq_v((int8x16_t)__p0, 33);
16284 return __ret;
16285 }
16286 #else
16287 __ai int16x8_t vpaddlq_s8(int8x16_t __p0) {
16288 int8x16_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
16289 int16x8_t __ret;
16290 __ret = (int16x8_t) __builtin_neon_vpaddlq_v((int8x16_t)__rev0, 33);
16291 __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
16292 return __ret;
16293 }
16294 #endif
16295
16296 #ifdef __LITTLE_ENDIAN__
16297 __ai int64x2_t vpaddlq_s32(int32x4_t __p0) {
16298 int64x2_t __ret;
16299 __ret = (int64x2_t) __builtin_neon_vpaddlq_v((int8x16_t)__p0, 35);
16300 return __ret;
16301 }
16302 #else
16303 __ai int64x2_t vpaddlq_s32(int32x4_t __p0) {
16304 int32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
16305 int64x2_t __ret;
16306 __ret = (int64x2_t) __builtin_neon_vpaddlq_v((int8x16_t)__rev0, 35);
16307 __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
16308 return __ret;
16309 }
16310 #endif
16311
16312 #ifdef __LITTLE_ENDIAN__
16313 __ai int32x4_t vpaddlq_s16(int16x8_t __p0) {
16314 int32x4_t __ret;
16315 __ret = (int32x4_t) __builtin_neon_vpaddlq_v((int8x16_t)__p0, 34);
16316 return __ret;
16317 }
16318 #else
16319 __ai int32x4_t vpaddlq_s16(int16x8_t __p0) {
16320 int16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
16321 int32x4_t __ret;
16322 __ret = (int32x4_t) __builtin_neon_vpaddlq_v((int8x16_t)__rev0, 34);
16323 __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
16324 return __ret;
16325 }
16326 #endif
16327
16328 #ifdef __LITTLE_ENDIAN__
16329 __ai uint16x4_t vpaddl_u8(uint8x8_t __p0) {
16330 uint16x4_t __ret;
16331 __ret = (uint16x4_t) __builtin_neon_vpaddl_v((int8x8_t)__p0, 17);
16332 return __ret;
16333 }
16334 #else
16335 __ai uint16x4_t vpaddl_u8(uint8x8_t __p0) {
16336 uint8x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
16337 uint16x4_t __ret;
16338 __ret = (uint16x4_t) __builtin_neon_vpaddl_v((int8x8_t)__rev0, 17);
16339 __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
16340 return __ret;
16341 }
16342 #endif
16343
16344 #ifdef __LITTLE_ENDIAN__
16345 __ai uint64x1_t vpaddl_u32(uint32x2_t __p0) {
16346 uint64x1_t __ret;
16347 __ret = (uint64x1_t) __builtin_neon_vpaddl_v((int8x8_t)__p0, 19);
16348 return __ret;
16349 }
16350 #else
16351 __ai uint64x1_t vpaddl_u32(uint32x2_t __p0) {
16352 uint32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
16353 uint64x1_t __ret;
16354 __ret = (uint64x1_t) __builtin_neon_vpaddl_v((int8x8_t)__rev0, 19);
16355 return __ret;
16356 }
16357 #endif
16358
16359 #ifdef __LITTLE_ENDIAN__
16360 __ai uint32x2_t vpaddl_u16(uint16x4_t __p0) {
16361 uint32x2_t __ret;
16362 __ret = (uint32x2_t) __builtin_neon_vpaddl_v((int8x8_t)__p0, 18);
16363 return __ret;
16364 }
16365 #else
16366 __ai uint32x2_t vpaddl_u16(uint16x4_t __p0) {
16367 uint16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
16368 uint32x2_t __ret;
16369 __ret = (uint32x2_t) __builtin_neon_vpaddl_v((int8x8_t)__rev0, 18);
16370 __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
16371 return __ret;
16372 }
16373 #endif
16374
16375 #ifdef __LITTLE_ENDIAN__
16376 __ai int16x4_t vpaddl_s8(int8x8_t __p0) {
16377 int16x4_t __ret;
16378 __ret = (int16x4_t) __builtin_neon_vpaddl_v((int8x8_t)__p0, 1);
16379 return __ret;
16380 }
16381 #else
16382 __ai int16x4_t vpaddl_s8(int8x8_t __p0) {
16383 int8x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
16384 int16x4_t __ret;
16385 __ret = (int16x4_t) __builtin_neon_vpaddl_v((int8x8_t)__rev0, 1);
16386 __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
16387 return __ret;
16388 }
16389 #endif
16390
16391 #ifdef __LITTLE_ENDIAN__
16392 __ai int64x1_t vpaddl_s32(int32x2_t __p0) {
16393 int64x1_t __ret;
16394 __ret = (int64x1_t) __builtin_neon_vpaddl_v((int8x8_t)__p0, 3);
16395 return __ret;
16396 }
16397 #else
16398 __ai int64x1_t vpaddl_s32(int32x2_t __p0) {
16399 int32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
16400 int64x1_t __ret;
16401 __ret = (int64x1_t) __builtin_neon_vpaddl_v((int8x8_t)__rev0, 3);
16402 return __ret;
16403 }
16404 #endif
16405
16406 #ifdef __LITTLE_ENDIAN__
16407 __ai int32x2_t vpaddl_s16(int16x4_t __p0) {
16408 int32x2_t __ret;
16409 __ret = (int32x2_t) __builtin_neon_vpaddl_v((int8x8_t)__p0, 2);
16410 return __ret;
16411 }
16412 #else
16413 __ai int32x2_t vpaddl_s16(int16x4_t __p0) {
16414 int16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
16415 int32x2_t __ret;
16416 __ret = (int32x2_t) __builtin_neon_vpaddl_v((int8x8_t)__rev0, 2);
16417 __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
16418 return __ret;
16419 }
16420 #endif
16421
16422 #ifdef __LITTLE_ENDIAN__
16423 __ai uint8x8_t vpmax_u8(uint8x8_t __p0, uint8x8_t __p1) {
16424 uint8x8_t __ret;
16425 __ret = (uint8x8_t) __builtin_neon_vpmax_v((int8x8_t)__p0, (int8x8_t)__p1, 16);
16426 return __ret;
16427 }
16428 #else
16429 __ai uint8x8_t vpmax_u8(uint8x8_t __p0, uint8x8_t __p1) {
16430 uint8x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
16431 uint8x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
16432 uint8x8_t __ret;
16433 __ret = (uint8x8_t) __builtin_neon_vpmax_v((int8x8_t)__rev0, (int8x8_t)__rev1, 16);
16434 __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
16435 return __ret;
16436 }
16437 #endif
16438
16439 #ifdef __LITTLE_ENDIAN__
16440 __ai uint32x2_t vpmax_u32(uint32x2_t __p0, uint32x2_t __p1) {
16441 uint32x2_t __ret;
16442 __ret = (uint32x2_t) __builtin_neon_vpmax_v((int8x8_t)__p0, (int8x8_t)__p1, 18);
16443 return __ret;
16444 }
16445 #else
16446 __ai uint32x2_t vpmax_u32(uint32x2_t __p0, uint32x2_t __p1) {
16447 uint32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
16448 uint32x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
16449 uint32x2_t __ret;
16450 __ret = (uint32x2_t) __builtin_neon_vpmax_v((int8x8_t)__rev0, (int8x8_t)__rev1, 18);
16451 __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
16452 return __ret;
16453 }
16454 #endif
16455
16456 #ifdef __LITTLE_ENDIAN__
16457 __ai uint16x4_t vpmax_u16(uint16x4_t __p0, uint16x4_t __p1) {
16458 uint16x4_t __ret;
16459 __ret = (uint16x4_t) __builtin_neon_vpmax_v((int8x8_t)__p0, (int8x8_t)__p1, 17);
16460 return __ret;
16461 }
16462 #else
16463 __ai uint16x4_t vpmax_u16(uint16x4_t __p0, uint16x4_t __p1) {
16464 uint16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
16465 uint16x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
16466 uint16x4_t __ret;
16467 __ret = (uint16x4_t) __builtin_neon_vpmax_v((int8x8_t)__rev0, (int8x8_t)__rev1, 17);
16468 __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
16469 return __ret;
16470 }
16471 #endif
16472
16473 #ifdef __LITTLE_ENDIAN__
16474 __ai int8x8_t vpmax_s8(int8x8_t __p0, int8x8_t __p1) {
16475 int8x8_t __ret;
16476 __ret = (int8x8_t) __builtin_neon_vpmax_v((int8x8_t)__p0, (int8x8_t)__p1, 0);
16477 return __ret;
16478 }
16479 #else
16480 __ai int8x8_t vpmax_s8(int8x8_t __p0, int8x8_t __p1) {
16481 int8x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
16482 int8x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
16483 int8x8_t __ret;
16484 __ret = (int8x8_t) __builtin_neon_vpmax_v((int8x8_t)__rev0, (int8x8_t)__rev1, 0);
16485 __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
16486 return __ret;
16487 }
16488 #endif
16489
16490 #ifdef __LITTLE_ENDIAN__
16491 __ai float32x2_t vpmax_f32(float32x2_t __p0, float32x2_t __p1) {
16492 float32x2_t __ret;
16493 __ret = (float32x2_t) __builtin_neon_vpmax_v((int8x8_t)__p0, (int8x8_t)__p1, 9);
16494 return __ret;
16495 }
16496 #else
16497 __ai float32x2_t vpmax_f32(float32x2_t __p0, float32x2_t __p1) {
16498 float32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
16499 float32x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
16500 float32x2_t __ret;
16501 __ret = (float32x2_t) __builtin_neon_vpmax_v((int8x8_t)__rev0, (int8x8_t)__rev1, 9);
16502 __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
16503 return __ret;
16504 }
16505 #endif
16506
16507 #ifdef __LITTLE_ENDIAN__
16508 __ai int32x2_t vpmax_s32(int32x2_t __p0, int32x2_t __p1) {
16509 int32x2_t __ret;
16510 __ret = (int32x2_t) __builtin_neon_vpmax_v((int8x8_t)__p0, (int8x8_t)__p1, 2);
16511 return __ret;
16512 }
16513 #else
16514 __ai int32x2_t vpmax_s32(int32x2_t __p0, int32x2_t __p1) {
16515 int32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
16516 int32x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
16517 int32x2_t __ret;
16518 __ret = (int32x2_t) __builtin_neon_vpmax_v((int8x8_t)__rev0, (int8x8_t)__rev1, 2);
16519 __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
16520 return __ret;
16521 }
16522 #endif
16523
16524 #ifdef __LITTLE_ENDIAN__
16525 __ai int16x4_t vpmax_s16(int16x4_t __p0, int16x4_t __p1) {
16526 int16x4_t __ret;
16527 __ret = (int16x4_t) __builtin_neon_vpmax_v((int8x8_t)__p0, (int8x8_t)__p1, 1);
16528 return __ret;
16529 }
16530 #else
16531 __ai int16x4_t vpmax_s16(int16x4_t __p0, int16x4_t __p1) {
16532 int16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
16533 int16x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
16534 int16x4_t __ret;
16535 __ret = (int16x4_t) __builtin_neon_vpmax_v((int8x8_t)__rev0, (int8x8_t)__rev1, 1);
16536 __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
16537 return __ret;
16538 }
16539 #endif
16540
16541 #ifdef __LITTLE_ENDIAN__
16542 __ai uint8x8_t vpmin_u8(uint8x8_t __p0, uint8x8_t __p1) {
16543 uint8x8_t __ret;
16544 __ret = (uint8x8_t) __builtin_neon_vpmin_v((int8x8_t)__p0, (int8x8_t)__p1, 16);
16545 return __ret;
16546 }
16547 #else
16548 __ai uint8x8_t vpmin_u8(uint8x8_t __p0, uint8x8_t __p1) {
16549 uint8x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
16550 uint8x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
16551 uint8x8_t __ret;
16552 __ret = (uint8x8_t) __builtin_neon_vpmin_v((int8x8_t)__rev0, (int8x8_t)__rev1, 16);
16553 __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
16554 return __ret;
16555 }
16556 #endif
16557
16558 #ifdef __LITTLE_ENDIAN__
16559 __ai uint32x2_t vpmin_u32(uint32x2_t __p0, uint32x2_t __p1) {
16560 uint32x2_t __ret;
16561 __ret = (uint32x2_t) __builtin_neon_vpmin_v((int8x8_t)__p0, (int8x8_t)__p1, 18);
16562 return __ret;
16563 }
16564 #else
16565 __ai uint32x2_t vpmin_u32(uint32x2_t __p0, uint32x2_t __p1) {
16566 uint32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
16567 uint32x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
16568 uint32x2_t __ret;
16569 __ret = (uint32x2_t) __builtin_neon_vpmin_v((int8x8_t)__rev0, (int8x8_t)__rev1, 18);
16570 __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
16571 return __ret;
16572 }
16573 #endif
16574
16575 #ifdef __LITTLE_ENDIAN__
16576 __ai uint16x4_t vpmin_u16(uint16x4_t __p0, uint16x4_t __p1) {
16577 uint16x4_t __ret;
16578 __ret = (uint16x4_t) __builtin_neon_vpmin_v((int8x8_t)__p0, (int8x8_t)__p1, 17);
16579 return __ret;
16580 }
16581 #else
16582 __ai uint16x4_t vpmin_u16(uint16x4_t __p0, uint16x4_t __p1) {
16583 uint16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
16584 uint16x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
16585 uint16x4_t __ret;
16586 __ret = (uint16x4_t) __builtin_neon_vpmin_v((int8x8_t)__rev0, (int8x8_t)__rev1, 17);
16587 __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
16588 return __ret;
16589 }
16590 #endif
16591
16592 #ifdef __LITTLE_ENDIAN__
16593 __ai int8x8_t vpmin_s8(int8x8_t __p0, int8x8_t __p1) {
16594 int8x8_t __ret;
16595 __ret = (int8x8_t) __builtin_neon_vpmin_v((int8x8_t)__p0, (int8x8_t)__p1, 0);
16596 return __ret;
16597 }
16598 #else
16599 __ai int8x8_t vpmin_s8(int8x8_t __p0, int8x8_t __p1) {
16600 int8x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
16601 int8x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
16602 int8x8_t __ret;
16603 __ret = (int8x8_t) __builtin_neon_vpmin_v((int8x8_t)__rev0, (int8x8_t)__rev1, 0);
16604 __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
16605 return __ret;
16606 }
16607 #endif
16608
16609 #ifdef __LITTLE_ENDIAN__
16610 __ai float32x2_t vpmin_f32(float32x2_t __p0, float32x2_t __p1) {
16611 float32x2_t __ret;
16612 __ret = (float32x2_t) __builtin_neon_vpmin_v((int8x8_t)__p0, (int8x8_t)__p1, 9);
16613 return __ret;
16614 }
16615 #else
16616 __ai float32x2_t vpmin_f32(float32x2_t __p0, float32x2_t __p1) {
16617 float32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
16618 float32x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
16619 float32x2_t __ret;
16620 __ret = (float32x2_t) __builtin_neon_vpmin_v((int8x8_t)__rev0, (int8x8_t)__rev1, 9);
16621 __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
16622 return __ret;
16623 }
16624 #endif
16625
16626 #ifdef __LITTLE_ENDIAN__
16627 __ai int32x2_t vpmin_s32(int32x2_t __p0, int32x2_t __p1) {
16628 int32x2_t __ret;
16629 __ret = (int32x2_t) __builtin_neon_vpmin_v((int8x8_t)__p0, (int8x8_t)__p1, 2);
16630 return __ret;
16631 }
16632 #else
16633 __ai int32x2_t vpmin_s32(int32x2_t __p0, int32x2_t __p1) {
16634 int32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
16635 int32x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
16636 int32x2_t __ret;
16637 __ret = (int32x2_t) __builtin_neon_vpmin_v((int8x8_t)__rev0, (int8x8_t)__rev1, 2);
16638 __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
16639 return __ret;
16640 }
16641 #endif
16642
16643 #ifdef __LITTLE_ENDIAN__
16644 __ai int16x4_t vpmin_s16(int16x4_t __p0, int16x4_t __p1) {
16645 int16x4_t __ret;
16646 __ret = (int16x4_t) __builtin_neon_vpmin_v((int8x8_t)__p0, (int8x8_t)__p1, 1);
16647 return __ret;
16648 }
16649 #else
16650 __ai int16x4_t vpmin_s16(int16x4_t __p0, int16x4_t __p1) {
16651 int16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
16652 int16x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
16653 int16x4_t __ret;
16654 __ret = (int16x4_t) __builtin_neon_vpmin_v((int8x8_t)__rev0, (int8x8_t)__rev1, 1);
16655 __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
16656 return __ret;
16657 }
16658 #endif
16659
16660 #ifdef __LITTLE_ENDIAN__
16661 __ai int8x16_t vqabsq_s8(int8x16_t __p0) {
16662 int8x16_t __ret;
16663 __ret = (int8x16_t) __builtin_neon_vqabsq_v((int8x16_t)__p0, 32);
16664 return __ret;
16665 }
16666 #else
16667 __ai int8x16_t vqabsq_s8(int8x16_t __p0) {
16668 int8x16_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
16669 int8x16_t __ret;
16670 __ret = (int8x16_t) __builtin_neon_vqabsq_v((int8x16_t)__rev0, 32);
16671 __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
16672 return __ret;
16673 }
16674 #endif
16675
16676 #ifdef __LITTLE_ENDIAN__
16677 __ai int32x4_t vqabsq_s32(int32x4_t __p0) {
16678 int32x4_t __ret;
16679 __ret = (int32x4_t) __builtin_neon_vqabsq_v((int8x16_t)__p0, 34);
16680 return __ret;
16681 }
16682 #else
16683 __ai int32x4_t vqabsq_s32(int32x4_t __p0) {
16684 int32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
16685 int32x4_t __ret;
16686 __ret = (int32x4_t) __builtin_neon_vqabsq_v((int8x16_t)__rev0, 34);
16687 __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
16688 return __ret;
16689 }
16690 #endif
16691
16692 #ifdef __LITTLE_ENDIAN__
16693 __ai int16x8_t vqabsq_s16(int16x8_t __p0) {
16694 int16x8_t __ret;
16695 __ret = (int16x8_t) __builtin_neon_vqabsq_v((int8x16_t)__p0, 33);
16696 return __ret;
16697 }
16698 #else
16699 __ai int16x8_t vqabsq_s16(int16x8_t __p0) {
16700 int16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
16701 int16x8_t __ret;
16702 __ret = (int16x8_t) __builtin_neon_vqabsq_v((int8x16_t)__rev0, 33);
16703 __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
16704 return __ret;
16705 }
16706 #endif
16707
16708 #ifdef __LITTLE_ENDIAN__
16709 __ai int8x8_t vqabs_s8(int8x8_t __p0) {
16710 int8x8_t __ret;
16711 __ret = (int8x8_t) __builtin_neon_vqabs_v((int8x8_t)__p0, 0);
16712 return __ret;
16713 }
16714 #else
16715 __ai int8x8_t vqabs_s8(int8x8_t __p0) {
16716 int8x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
16717 int8x8_t __ret;
16718 __ret = (int8x8_t) __builtin_neon_vqabs_v((int8x8_t)__rev0, 0);
16719 __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
16720 return __ret;
16721 }
16722 #endif
16723
16724 #ifdef __LITTLE_ENDIAN__
16725 __ai int32x2_t vqabs_s32(int32x2_t __p0) {
16726 int32x2_t __ret;
16727 __ret = (int32x2_t) __builtin_neon_vqabs_v((int8x8_t)__p0, 2);
16728 return __ret;
16729 }
16730 #else
16731 __ai int32x2_t vqabs_s32(int32x2_t __p0) {
16732 int32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
16733 int32x2_t __ret;
16734 __ret = (int32x2_t) __builtin_neon_vqabs_v((int8x8_t)__rev0, 2);
16735 __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
16736 return __ret;
16737 }
16738 #endif
16739
16740 #ifdef __LITTLE_ENDIAN__
16741 __ai int16x4_t vqabs_s16(int16x4_t __p0) {
16742 int16x4_t __ret;
16743 __ret = (int16x4_t) __builtin_neon_vqabs_v((int8x8_t)__p0, 1);
16744 return __ret;
16745 }
16746 #else
16747 __ai int16x4_t vqabs_s16(int16x4_t __p0) {
16748 int16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
16749 int16x4_t __ret;
16750 __ret = (int16x4_t) __builtin_neon_vqabs_v((int8x8_t)__rev0, 1);
16751 __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
16752 return __ret;
16753 }
16754 #endif
16755
16756 #ifdef __LITTLE_ENDIAN__
16757 __ai uint8x16_t vqaddq_u8(uint8x16_t __p0, uint8x16_t __p1) {
16758 uint8x16_t __ret;
16759 __ret = (uint8x16_t) __builtin_neon_vqaddq_v((int8x16_t)__p0, (int8x16_t)__p1, 48);
16760 return __ret;
16761 }
16762 #else
16763 __ai uint8x16_t vqaddq_u8(uint8x16_t __p0, uint8x16_t __p1) {
16764 uint8x16_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
16765 uint8x16_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
16766 uint8x16_t __ret;
16767 __ret = (uint8x16_t) __builtin_neon_vqaddq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 48);
16768 __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
16769 return __ret;
16770 }
16771 #endif
16772
16773 #ifdef __LITTLE_ENDIAN__
16774 __ai uint32x4_t vqaddq_u32(uint32x4_t __p0, uint32x4_t __p1) {
16775 uint32x4_t __ret;
16776 __ret = (uint32x4_t) __builtin_neon_vqaddq_v((int8x16_t)__p0, (int8x16_t)__p1, 50);
16777 return __ret;
16778 }
16779 #else
16780 __ai uint32x4_t vqaddq_u32(uint32x4_t __p0, uint32x4_t __p1) {
16781 uint32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
16782 uint32x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
16783 uint32x4_t __ret;
16784 __ret = (uint32x4_t) __builtin_neon_vqaddq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 50);
16785 __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
16786 return __ret;
16787 }
16788 #endif
16789
16790 #ifdef __LITTLE_ENDIAN__
16791 __ai uint64x2_t vqaddq_u64(uint64x2_t __p0, uint64x2_t __p1) {
16792 uint64x2_t __ret;
16793 __ret = (uint64x2_t) __builtin_neon_vqaddq_v((int8x16_t)__p0, (int8x16_t)__p1, 51);
16794 return __ret;
16795 }
16796 #else
16797 __ai uint64x2_t vqaddq_u64(uint64x2_t __p0, uint64x2_t __p1) {
16798 uint64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
16799 uint64x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
16800 uint64x2_t __ret;
16801 __ret = (uint64x2_t) __builtin_neon_vqaddq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 51);
16802 __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
16803 return __ret;
16804 }
16805 #endif
16806
16807 #ifdef __LITTLE_ENDIAN__
16808 __ai uint16x8_t vqaddq_u16(uint16x8_t __p0, uint16x8_t __p1) {
16809 uint16x8_t __ret;
16810 __ret = (uint16x8_t) __builtin_neon_vqaddq_v((int8x16_t)__p0, (int8x16_t)__p1, 49);
16811 return __ret;
16812 }
16813 #else
16814 __ai uint16x8_t vqaddq_u16(uint16x8_t __p0, uint16x8_t __p1) {
16815 uint16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
16816 uint16x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
16817 uint16x8_t __ret;
16818 __ret = (uint16x8_t) __builtin_neon_vqaddq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 49);
16819 __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
16820 return __ret;
16821 }
16822 #endif
16823
16824 #ifdef __LITTLE_ENDIAN__
16825 __ai int8x16_t vqaddq_s8(int8x16_t __p0, int8x16_t __p1) {
16826 int8x16_t __ret;
16827 __ret = (int8x16_t) __builtin_neon_vqaddq_v((int8x16_t)__p0, (int8x16_t)__p1, 32);
16828 return __ret;
16829 }
16830 #else
16831 __ai int8x16_t vqaddq_s8(int8x16_t __p0, int8x16_t __p1) {
16832 int8x16_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
16833 int8x16_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
16834 int8x16_t __ret;
16835 __ret = (int8x16_t) __builtin_neon_vqaddq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 32);
16836 __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
16837 return __ret;
16838 }
16839 #endif
16840
16841 #ifdef __LITTLE_ENDIAN__
16842 __ai int32x4_t vqaddq_s32(int32x4_t __p0, int32x4_t __p1) {
16843 int32x4_t __ret;
16844 __ret = (int32x4_t) __builtin_neon_vqaddq_v((int8x16_t)__p0, (int8x16_t)__p1, 34);
16845 return __ret;
16846 }
16847 #else
16848 __ai int32x4_t vqaddq_s32(int32x4_t __p0, int32x4_t __p1) {
16849 int32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
16850 int32x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
16851 int32x4_t __ret;
16852 __ret = (int32x4_t) __builtin_neon_vqaddq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 34);
16853 __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
16854 return __ret;
16855 }
16856 __ai int32x4_t __noswap_vqaddq_s32(int32x4_t __p0, int32x4_t __p1) {
16857 int32x4_t __ret;
16858 __ret = (int32x4_t) __builtin_neon_vqaddq_v((int8x16_t)__p0, (int8x16_t)__p1, 34);
16859 return __ret;
16860 }
16861 #endif
16862
16863 #ifdef __LITTLE_ENDIAN__
16864 __ai int64x2_t vqaddq_s64(int64x2_t __p0, int64x2_t __p1) {
16865 int64x2_t __ret;
16866 __ret = (int64x2_t) __builtin_neon_vqaddq_v((int8x16_t)__p0, (int8x16_t)__p1, 35);
16867 return __ret;
16868 }
16869 #else
16870 __ai int64x2_t vqaddq_s64(int64x2_t __p0, int64x2_t __p1) {
16871 int64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
16872 int64x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
16873 int64x2_t __ret;
16874 __ret = (int64x2_t) __builtin_neon_vqaddq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 35);
16875 __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
16876 return __ret;
16877 }
16878 #endif
16879
16880 #ifdef __LITTLE_ENDIAN__
16881 __ai int16x8_t vqaddq_s16(int16x8_t __p0, int16x8_t __p1) {
16882 int16x8_t __ret;
16883 __ret = (int16x8_t) __builtin_neon_vqaddq_v((int8x16_t)__p0, (int8x16_t)__p1, 33);
16884 return __ret;
16885 }
16886 #else
16887 __ai int16x8_t vqaddq_s16(int16x8_t __p0, int16x8_t __p1) {
16888 int16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
16889 int16x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
16890 int16x8_t __ret;
16891 __ret = (int16x8_t) __builtin_neon_vqaddq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 33);
16892 __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
16893 return __ret;
16894 }
16895 __ai int16x8_t __noswap_vqaddq_s16(int16x8_t __p0, int16x8_t __p1) {
16896 int16x8_t __ret;
16897 __ret = (int16x8_t) __builtin_neon_vqaddq_v((int8x16_t)__p0, (int8x16_t)__p1, 33);
16898 return __ret;
16899 }
16900 #endif
16901
16902 #ifdef __LITTLE_ENDIAN__
16903 __ai uint8x8_t vqadd_u8(uint8x8_t __p0, uint8x8_t __p1) {
16904 uint8x8_t __ret;
16905 __ret = (uint8x8_t) __builtin_neon_vqadd_v((int8x8_t)__p0, (int8x8_t)__p1, 16);
16906 return __ret;
16907 }
16908 #else
16909 __ai uint8x8_t vqadd_u8(uint8x8_t __p0, uint8x8_t __p1) {
16910 uint8x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
16911 uint8x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
16912 uint8x8_t __ret;
16913 __ret = (uint8x8_t) __builtin_neon_vqadd_v((int8x8_t)__rev0, (int8x8_t)__rev1, 16);
16914 __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
16915 return __ret;
16916 }
16917 #endif
16918
16919 #ifdef __LITTLE_ENDIAN__
16920 __ai uint32x2_t vqadd_u32(uint32x2_t __p0, uint32x2_t __p1) {
16921 uint32x2_t __ret;
16922 __ret = (uint32x2_t) __builtin_neon_vqadd_v((int8x8_t)__p0, (int8x8_t)__p1, 18);
16923 return __ret;
16924 }
16925 #else
16926 __ai uint32x2_t vqadd_u32(uint32x2_t __p0, uint32x2_t __p1) {
16927 uint32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
16928 uint32x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
16929 uint32x2_t __ret;
16930 __ret = (uint32x2_t) __builtin_neon_vqadd_v((int8x8_t)__rev0, (int8x8_t)__rev1, 18);
16931 __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
16932 return __ret;
16933 }
16934 #endif
16935
16936 #ifdef __LITTLE_ENDIAN__
16937 __ai uint64x1_t vqadd_u64(uint64x1_t __p0, uint64x1_t __p1) {
16938 uint64x1_t __ret;
16939 __ret = (uint64x1_t) __builtin_neon_vqadd_v((int8x8_t)__p0, (int8x8_t)__p1, 19);
16940 return __ret;
16941 }
16942 #else
16943 __ai uint64x1_t vqadd_u64(uint64x1_t __p0, uint64x1_t __p1) {
16944 uint64x1_t __ret;
16945 __ret = (uint64x1_t) __builtin_neon_vqadd_v((int8x8_t)__p0, (int8x8_t)__p1, 19);
16946 return __ret;
16947 }
16948 #endif
16949
16950 #ifdef __LITTLE_ENDIAN__
16951 __ai uint16x4_t vqadd_u16(uint16x4_t __p0, uint16x4_t __p1) {
16952 uint16x4_t __ret;
16953 __ret = (uint16x4_t) __builtin_neon_vqadd_v((int8x8_t)__p0, (int8x8_t)__p1, 17);
16954 return __ret;
16955 }
16956 #else
16957 __ai uint16x4_t vqadd_u16(uint16x4_t __p0, uint16x4_t __p1) {
16958 uint16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
16959 uint16x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
16960 uint16x4_t __ret;
16961 __ret = (uint16x4_t) __builtin_neon_vqadd_v((int8x8_t)__rev0, (int8x8_t)__rev1, 17);
16962 __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
16963 return __ret;
16964 }
16965 #endif
16966
16967 #ifdef __LITTLE_ENDIAN__
16968 __ai int8x8_t vqadd_s8(int8x8_t __p0, int8x8_t __p1) {
16969 int8x8_t __ret;
16970 __ret = (int8x8_t) __builtin_neon_vqadd_v((int8x8_t)__p0, (int8x8_t)__p1, 0);
16971 return __ret;
16972 }
16973 #else
16974 __ai int8x8_t vqadd_s8(int8x8_t __p0, int8x8_t __p1) {
16975 int8x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
16976 int8x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
16977 int8x8_t __ret;
16978 __ret = (int8x8_t) __builtin_neon_vqadd_v((int8x8_t)__rev0, (int8x8_t)__rev1, 0);
16979 __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
16980 return __ret;
16981 }
16982 #endif
16983
16984 #ifdef __LITTLE_ENDIAN__
16985 __ai int32x2_t vqadd_s32(int32x2_t __p0, int32x2_t __p1) {
16986 int32x2_t __ret;
16987 __ret = (int32x2_t) __builtin_neon_vqadd_v((int8x8_t)__p0, (int8x8_t)__p1, 2);
16988 return __ret;
16989 }
16990 #else
16991 __ai int32x2_t vqadd_s32(int32x2_t __p0, int32x2_t __p1) {
16992 int32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
16993 int32x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
16994 int32x2_t __ret;
16995 __ret = (int32x2_t) __builtin_neon_vqadd_v((int8x8_t)__rev0, (int8x8_t)__rev1, 2);
16996 __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
16997 return __ret;
16998 }
16999 __ai int32x2_t __noswap_vqadd_s32(int32x2_t __p0, int32x2_t __p1) {
17000 int32x2_t __ret;
17001 __ret = (int32x2_t) __builtin_neon_vqadd_v((int8x8_t)__p0, (int8x8_t)__p1, 2);
17002 return __ret;
17003 }
17004 #endif
17005
17006 #ifdef __LITTLE_ENDIAN__
17007 __ai int64x1_t vqadd_s64(int64x1_t __p0, int64x1_t __p1) {
17008 int64x1_t __ret;
17009 __ret = (int64x1_t) __builtin_neon_vqadd_v((int8x8_t)__p0, (int8x8_t)__p1, 3);
17010 return __ret;
17011 }
17012 #else
17013 __ai int64x1_t vqadd_s64(int64x1_t __p0, int64x1_t __p1) {
17014 int64x1_t __ret;
17015 __ret = (int64x1_t) __builtin_neon_vqadd_v((int8x8_t)__p0, (int8x8_t)__p1, 3);
17016 return __ret;
17017 }
17018 #endif
17019
17020 #ifdef __LITTLE_ENDIAN__
17021 __ai int16x4_t vqadd_s16(int16x4_t __p0, int16x4_t __p1) {
17022 int16x4_t __ret;
17023 __ret = (int16x4_t) __builtin_neon_vqadd_v((int8x8_t)__p0, (int8x8_t)__p1, 1);
17024 return __ret;
17025 }
17026 #else
17027 __ai int16x4_t vqadd_s16(int16x4_t __p0, int16x4_t __p1) {
17028 int16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
17029 int16x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
17030 int16x4_t __ret;
17031 __ret = (int16x4_t) __builtin_neon_vqadd_v((int8x8_t)__rev0, (int8x8_t)__rev1, 1);
17032 __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
17033 return __ret;
17034 }
17035 __ai int16x4_t __noswap_vqadd_s16(int16x4_t __p0, int16x4_t __p1) {
17036 int16x4_t __ret;
17037 __ret = (int16x4_t) __builtin_neon_vqadd_v((int8x8_t)__p0, (int8x8_t)__p1, 1);
17038 return __ret;
17039 }
17040 #endif
17041
17042 #ifdef __LITTLE_ENDIAN__
17043 __ai int64x2_t vqdmlal_s32(int64x2_t __p0, int32x2_t __p1, int32x2_t __p2) {
17044 int64x2_t __ret;
17045 __ret = (int64x2_t) __builtin_neon_vqdmlal_v((int8x16_t)__p0, (int8x8_t)__p1, (int8x8_t)__p2, 35);
17046 return __ret;
17047 }
17048 #else
17049 __ai int64x2_t vqdmlal_s32(int64x2_t __p0, int32x2_t __p1, int32x2_t __p2) {
17050 int64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
17051 int32x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
17052 int32x2_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, 1, 0);
17053 int64x2_t __ret;
17054 __ret = (int64x2_t) __builtin_neon_vqdmlal_v((int8x16_t)__rev0, (int8x8_t)__rev1, (int8x8_t)__rev2, 35);
17055 __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
17056 return __ret;
17057 }
17058 __ai int64x2_t __noswap_vqdmlal_s32(int64x2_t __p0, int32x2_t __p1, int32x2_t __p2) {
17059 int64x2_t __ret;
17060 __ret = (int64x2_t) __builtin_neon_vqdmlal_v((int8x16_t)__p0, (int8x8_t)__p1, (int8x8_t)__p2, 35);
17061 return __ret;
17062 }
17063 #endif
17064
17065 #ifdef __LITTLE_ENDIAN__
17066 __ai int32x4_t vqdmlal_s16(int32x4_t __p0, int16x4_t __p1, int16x4_t __p2) {
17067 int32x4_t __ret;
17068 __ret = (int32x4_t) __builtin_neon_vqdmlal_v((int8x16_t)__p0, (int8x8_t)__p1, (int8x8_t)__p2, 34);
17069 return __ret;
17070 }
17071 #else
17072 __ai int32x4_t vqdmlal_s16(int32x4_t __p0, int16x4_t __p1, int16x4_t __p2) {
17073 int32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
17074 int16x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
17075 int16x4_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, 3, 2, 1, 0);
17076 int32x4_t __ret;
17077 __ret = (int32x4_t) __builtin_neon_vqdmlal_v((int8x16_t)__rev0, (int8x8_t)__rev1, (int8x8_t)__rev2, 34);
17078 __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
17079 return __ret;
17080 }
17081 __ai int32x4_t __noswap_vqdmlal_s16(int32x4_t __p0, int16x4_t __p1, int16x4_t __p2) {
17082 int32x4_t __ret;
17083 __ret = (int32x4_t) __builtin_neon_vqdmlal_v((int8x16_t)__p0, (int8x8_t)__p1, (int8x8_t)__p2, 34);
17084 return __ret;
17085 }
17086 #endif
17087
17088 #ifdef __LITTLE_ENDIAN__
17089 #define vqdmlal_lane_s32(__p0, __p1, __p2, __p3) __extension__ ({ \
17090 int64x2_t __s0 = __p0; \
17091 int32x2_t __s1 = __p1; \
17092 int32x2_t __s2 = __p2; \
17093 int64x2_t __ret; \
17094 __ret = vqdmlal_s32(__s0, __s1, __builtin_shufflevector(__s2, __s2, __p3, __p3)); \
17095 __ret; \
17096 })
17097 #else
17098 #define vqdmlal_lane_s32(__p0, __p1, __p2, __p3) __extension__ ({ \
17099 int64x2_t __s0 = __p0; \
17100 int32x2_t __s1 = __p1; \
17101 int32x2_t __s2 = __p2; \
17102 int64x2_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 1, 0); \
17103 int32x2_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 1, 0); \
17104 int32x2_t __rev2; __rev2 = __builtin_shufflevector(__s2, __s2, 1, 0); \
17105 int64x2_t __ret; \
17106 __ret = __noswap_vqdmlal_s32(__rev0, __rev1, __builtin_shufflevector(__rev2, __rev2, __p3, __p3)); \
17107 __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \
17108 __ret; \
17109 })
17110 #endif
17111
17112 #ifdef __LITTLE_ENDIAN__
17113 #define vqdmlal_lane_s16(__p0, __p1, __p2, __p3) __extension__ ({ \
17114 int32x4_t __s0 = __p0; \
17115 int16x4_t __s1 = __p1; \
17116 int16x4_t __s2 = __p2; \
17117 int32x4_t __ret; \
17118 __ret = vqdmlal_s16(__s0, __s1, __builtin_shufflevector(__s2, __s2, __p3, __p3, __p3, __p3)); \
17119 __ret; \
17120 })
17121 #else
17122 #define vqdmlal_lane_s16(__p0, __p1, __p2, __p3) __extension__ ({ \
17123 int32x4_t __s0 = __p0; \
17124 int16x4_t __s1 = __p1; \
17125 int16x4_t __s2 = __p2; \
17126 int32x4_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \
17127 int16x4_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 3, 2, 1, 0); \
17128 int16x4_t __rev2; __rev2 = __builtin_shufflevector(__s2, __s2, 3, 2, 1, 0); \
17129 int32x4_t __ret; \
17130 __ret = __noswap_vqdmlal_s16(__rev0, __rev1, __builtin_shufflevector(__rev2, __rev2, __p3, __p3, __p3, __p3)); \
17131 __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \
17132 __ret; \
17133 })
17134 #endif
17135
17136 #ifdef __LITTLE_ENDIAN__
17137 __ai int64x2_t vqdmlal_n_s32(int64x2_t __p0, int32x2_t __p1, int32_t __p2) {
17138 int64x2_t __ret;
17139 __ret = (int64x2_t) __builtin_neon_vqdmlal_v((int8x16_t)__p0, (int8x8_t)__p1, (int8x8_t)(int32x2_t) {__p2, __p2}, 35);
17140 return __ret;
17141 }
17142 #else
17143 __ai int64x2_t vqdmlal_n_s32(int64x2_t __p0, int32x2_t __p1, int32_t __p2) {
17144 int64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
17145 int32x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
17146 int64x2_t __ret;
17147 __ret = (int64x2_t) __builtin_neon_vqdmlal_v((int8x16_t)__rev0, (int8x8_t)__rev1, (int8x8_t)(int32x2_t) {__p2, __p2}, 35);
17148 __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
17149 return __ret;
17150 }
17151 __ai int64x2_t __noswap_vqdmlal_n_s32(int64x2_t __p0, int32x2_t __p1, int32_t __p2) {
17152 int64x2_t __ret;
17153 __ret = (int64x2_t) __builtin_neon_vqdmlal_v((int8x16_t)__p0, (int8x8_t)__p1, (int8x8_t)(int32x2_t) {__p2, __p2}, 35);
17154 return __ret;
17155 }
17156 #endif
17157
17158 #ifdef __LITTLE_ENDIAN__
17159 __ai int32x4_t vqdmlal_n_s16(int32x4_t __p0, int16x4_t __p1, int16_t __p2) {
17160 int32x4_t __ret;
17161 __ret = (int32x4_t) __builtin_neon_vqdmlal_v((int8x16_t)__p0, (int8x8_t)__p1, (int8x8_t)(int16x4_t) {__p2, __p2, __p2, __p2}, 34);
17162 return __ret;
17163 }
17164 #else
17165 __ai int32x4_t vqdmlal_n_s16(int32x4_t __p0, int16x4_t __p1, int16_t __p2) {
17166 int32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
17167 int16x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
17168 int32x4_t __ret;
17169 __ret = (int32x4_t) __builtin_neon_vqdmlal_v((int8x16_t)__rev0, (int8x8_t)__rev1, (int8x8_t)(int16x4_t) {__p2, __p2, __p2, __p2}, 34);
17170 __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
17171 return __ret;
17172 }
17173 __ai int32x4_t __noswap_vqdmlal_n_s16(int32x4_t __p0, int16x4_t __p1, int16_t __p2) {
17174 int32x4_t __ret;
17175 __ret = (int32x4_t) __builtin_neon_vqdmlal_v((int8x16_t)__p0, (int8x8_t)__p1, (int8x8_t)(int16x4_t) {__p2, __p2, __p2, __p2}, 34);
17176 return __ret;
17177 }
17178 #endif
17179
17180 #ifdef __LITTLE_ENDIAN__
17181 __ai int64x2_t vqdmlsl_s32(int64x2_t __p0, int32x2_t __p1, int32x2_t __p2) {
17182 int64x2_t __ret;
17183 __ret = (int64x2_t) __builtin_neon_vqdmlsl_v((int8x16_t)__p0, (int8x8_t)__p1, (int8x8_t)__p2, 35);
17184 return __ret;
17185 }
17186 #else
17187 __ai int64x2_t vqdmlsl_s32(int64x2_t __p0, int32x2_t __p1, int32x2_t __p2) {
17188 int64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
17189 int32x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
17190 int32x2_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, 1, 0);
17191 int64x2_t __ret;
17192 __ret = (int64x2_t) __builtin_neon_vqdmlsl_v((int8x16_t)__rev0, (int8x8_t)__rev1, (int8x8_t)__rev2, 35);
17193 __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
17194 return __ret;
17195 }
17196 __ai int64x2_t __noswap_vqdmlsl_s32(int64x2_t __p0, int32x2_t __p1, int32x2_t __p2) {
17197 int64x2_t __ret;
17198 __ret = (int64x2_t) __builtin_neon_vqdmlsl_v((int8x16_t)__p0, (int8x8_t)__p1, (int8x8_t)__p2, 35);
17199 return __ret;
17200 }
17201 #endif
17202
17203 #ifdef __LITTLE_ENDIAN__
17204 __ai int32x4_t vqdmlsl_s16(int32x4_t __p0, int16x4_t __p1, int16x4_t __p2) {
17205 int32x4_t __ret;
17206 __ret = (int32x4_t) __builtin_neon_vqdmlsl_v((int8x16_t)__p0, (int8x8_t)__p1, (int8x8_t)__p2, 34);
17207 return __ret;
17208 }
17209 #else
17210 __ai int32x4_t vqdmlsl_s16(int32x4_t __p0, int16x4_t __p1, int16x4_t __p2) {
17211 int32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
17212 int16x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
17213 int16x4_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, 3, 2, 1, 0);
17214 int32x4_t __ret;
17215 __ret = (int32x4_t) __builtin_neon_vqdmlsl_v((int8x16_t)__rev0, (int8x8_t)__rev1, (int8x8_t)__rev2, 34);
17216 __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
17217 return __ret;
17218 }
17219 __ai int32x4_t __noswap_vqdmlsl_s16(int32x4_t __p0, int16x4_t __p1, int16x4_t __p2) {
17220 int32x4_t __ret;
17221 __ret = (int32x4_t) __builtin_neon_vqdmlsl_v((int8x16_t)__p0, (int8x8_t)__p1, (int8x8_t)__p2, 34);
17222 return __ret;
17223 }
17224 #endif
17225
17226 #ifdef __LITTLE_ENDIAN__
17227 #define vqdmlsl_lane_s32(__p0, __p1, __p2, __p3) __extension__ ({ \
17228 int64x2_t __s0 = __p0; \
17229 int32x2_t __s1 = __p1; \
17230 int32x2_t __s2 = __p2; \
17231 int64x2_t __ret; \
17232 __ret = vqdmlsl_s32(__s0, __s1, __builtin_shufflevector(__s2, __s2, __p3, __p3)); \
17233 __ret; \
17234 })
17235 #else
17236 #define vqdmlsl_lane_s32(__p0, __p1, __p2, __p3) __extension__ ({ \
17237 int64x2_t __s0 = __p0; \
17238 int32x2_t __s1 = __p1; \
17239 int32x2_t __s2 = __p2; \
17240 int64x2_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 1, 0); \
17241 int32x2_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 1, 0); \
17242 int32x2_t __rev2; __rev2 = __builtin_shufflevector(__s2, __s2, 1, 0); \
17243 int64x2_t __ret; \
17244 __ret = __noswap_vqdmlsl_s32(__rev0, __rev1, __builtin_shufflevector(__rev2, __rev2, __p3, __p3)); \
17245 __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \
17246 __ret; \
17247 })
17248 #endif
17249
17250 #ifdef __LITTLE_ENDIAN__
17251 #define vqdmlsl_lane_s16(__p0, __p1, __p2, __p3) __extension__ ({ \
17252 int32x4_t __s0 = __p0; \
17253 int16x4_t __s1 = __p1; \
17254 int16x4_t __s2 = __p2; \
17255 int32x4_t __ret; \
17256 __ret = vqdmlsl_s16(__s0, __s1, __builtin_shufflevector(__s2, __s2, __p3, __p3, __p3, __p3)); \
17257 __ret; \
17258 })
17259 #else
17260 #define vqdmlsl_lane_s16(__p0, __p1, __p2, __p3) __extension__ ({ \
17261 int32x4_t __s0 = __p0; \
17262 int16x4_t __s1 = __p1; \
17263 int16x4_t __s2 = __p2; \
17264 int32x4_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \
17265 int16x4_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 3, 2, 1, 0); \
17266 int16x4_t __rev2; __rev2 = __builtin_shufflevector(__s2, __s2, 3, 2, 1, 0); \
17267 int32x4_t __ret; \
17268 __ret = __noswap_vqdmlsl_s16(__rev0, __rev1, __builtin_shufflevector(__rev2, __rev2, __p3, __p3, __p3, __p3)); \
17269 __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \
17270 __ret; \
17271 })
17272 #endif
17273
17274 #ifdef __LITTLE_ENDIAN__
17275 __ai int64x2_t vqdmlsl_n_s32(int64x2_t __p0, int32x2_t __p1, int32_t __p2) {
17276 int64x2_t __ret;
17277 __ret = (int64x2_t) __builtin_neon_vqdmlsl_v((int8x16_t)__p0, (int8x8_t)__p1, (int8x8_t)(int32x2_t) {__p2, __p2}, 35);
17278 return __ret;
17279 }
17280 #else
17281 __ai int64x2_t vqdmlsl_n_s32(int64x2_t __p0, int32x2_t __p1, int32_t __p2) {
17282 int64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
17283 int32x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
17284 int64x2_t __ret;
17285 __ret = (int64x2_t) __builtin_neon_vqdmlsl_v((int8x16_t)__rev0, (int8x8_t)__rev1, (int8x8_t)(int32x2_t) {__p2, __p2}, 35);
17286 __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
17287 return __ret;
17288 }
17289 __ai int64x2_t __noswap_vqdmlsl_n_s32(int64x2_t __p0, int32x2_t __p1, int32_t __p2) {
17290 int64x2_t __ret;
17291 __ret = (int64x2_t) __builtin_neon_vqdmlsl_v((int8x16_t)__p0, (int8x8_t)__p1, (int8x8_t)(int32x2_t) {__p2, __p2}, 35);
17292 return __ret;
17293 }
17294 #endif
17295
17296 #ifdef __LITTLE_ENDIAN__
17297 __ai int32x4_t vqdmlsl_n_s16(int32x4_t __p0, int16x4_t __p1, int16_t __p2) {
17298 int32x4_t __ret;
17299 __ret = (int32x4_t) __builtin_neon_vqdmlsl_v((int8x16_t)__p0, (int8x8_t)__p1, (int8x8_t)(int16x4_t) {__p2, __p2, __p2, __p2}, 34);
17300 return __ret;
17301 }
17302 #else
17303 __ai int32x4_t vqdmlsl_n_s16(int32x4_t __p0, int16x4_t __p1, int16_t __p2) {
17304 int32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
17305 int16x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
17306 int32x4_t __ret;
17307 __ret = (int32x4_t) __builtin_neon_vqdmlsl_v((int8x16_t)__rev0, (int8x8_t)__rev1, (int8x8_t)(int16x4_t) {__p2, __p2, __p2, __p2}, 34);
17308 __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
17309 return __ret;
17310 }
17311 __ai int32x4_t __noswap_vqdmlsl_n_s16(int32x4_t __p0, int16x4_t __p1, int16_t __p2) {
17312 int32x4_t __ret;
17313 __ret = (int32x4_t) __builtin_neon_vqdmlsl_v((int8x16_t)__p0, (int8x8_t)__p1, (int8x8_t)(int16x4_t) {__p2, __p2, __p2, __p2}, 34);
17314 return __ret;
17315 }
17316 #endif
17317
17318 #ifdef __LITTLE_ENDIAN__
17319 __ai int32x4_t vqdmulhq_s32(int32x4_t __p0, int32x4_t __p1) {
17320 int32x4_t __ret;
17321 __ret = (int32x4_t) __builtin_neon_vqdmulhq_v((int8x16_t)__p0, (int8x16_t)__p1, 34);
17322 return __ret;
17323 }
17324 #else
17325 __ai int32x4_t vqdmulhq_s32(int32x4_t __p0, int32x4_t __p1) {
17326 int32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
17327 int32x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
17328 int32x4_t __ret;
17329 __ret = (int32x4_t) __builtin_neon_vqdmulhq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 34);
17330 __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
17331 return __ret;
17332 }
17333 __ai int32x4_t __noswap_vqdmulhq_s32(int32x4_t __p0, int32x4_t __p1) {
17334 int32x4_t __ret;
17335 __ret = (int32x4_t) __builtin_neon_vqdmulhq_v((int8x16_t)__p0, (int8x16_t)__p1, 34);
17336 return __ret;
17337 }
17338 #endif
17339
17340 #ifdef __LITTLE_ENDIAN__
17341 __ai int16x8_t vqdmulhq_s16(int16x8_t __p0, int16x8_t __p1) {
17342 int16x8_t __ret;
17343 __ret = (int16x8_t) __builtin_neon_vqdmulhq_v((int8x16_t)__p0, (int8x16_t)__p1, 33);
17344 return __ret;
17345 }
17346 #else
17347 __ai int16x8_t vqdmulhq_s16(int16x8_t __p0, int16x8_t __p1) {
17348 int16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
17349 int16x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
17350 int16x8_t __ret;
17351 __ret = (int16x8_t) __builtin_neon_vqdmulhq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 33);
17352 __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
17353 return __ret;
17354 }
17355 __ai int16x8_t __noswap_vqdmulhq_s16(int16x8_t __p0, int16x8_t __p1) {
17356 int16x8_t __ret;
17357 __ret = (int16x8_t) __builtin_neon_vqdmulhq_v((int8x16_t)__p0, (int8x16_t)__p1, 33);
17358 return __ret;
17359 }
17360 #endif
17361
17362 #ifdef __LITTLE_ENDIAN__
17363 __ai int32x2_t vqdmulh_s32(int32x2_t __p0, int32x2_t __p1) {
17364 int32x2_t __ret;
17365 __ret = (int32x2_t) __builtin_neon_vqdmulh_v((int8x8_t)__p0, (int8x8_t)__p1, 2);
17366 return __ret;
17367 }
17368 #else
17369 __ai int32x2_t vqdmulh_s32(int32x2_t __p0, int32x2_t __p1) {
17370 int32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
17371 int32x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
17372 int32x2_t __ret;
17373 __ret = (int32x2_t) __builtin_neon_vqdmulh_v((int8x8_t)__rev0, (int8x8_t)__rev1, 2);
17374 __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
17375 return __ret;
17376 }
17377 __ai int32x2_t __noswap_vqdmulh_s32(int32x2_t __p0, int32x2_t __p1) {
17378 int32x2_t __ret;
17379 __ret = (int32x2_t) __builtin_neon_vqdmulh_v((int8x8_t)__p0, (int8x8_t)__p1, 2);
17380 return __ret;
17381 }
17382 #endif
17383
17384 #ifdef __LITTLE_ENDIAN__
17385 __ai int16x4_t vqdmulh_s16(int16x4_t __p0, int16x4_t __p1) {
17386 int16x4_t __ret;
17387 __ret = (int16x4_t) __builtin_neon_vqdmulh_v((int8x8_t)__p0, (int8x8_t)__p1, 1);
17388 return __ret;
17389 }
17390 #else
17391 __ai int16x4_t vqdmulh_s16(int16x4_t __p0, int16x4_t __p1) {
17392 int16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
17393 int16x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
17394 int16x4_t __ret;
17395 __ret = (int16x4_t) __builtin_neon_vqdmulh_v((int8x8_t)__rev0, (int8x8_t)__rev1, 1);
17396 __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
17397 return __ret;
17398 }
17399 __ai int16x4_t __noswap_vqdmulh_s16(int16x4_t __p0, int16x4_t __p1) {
17400 int16x4_t __ret;
17401 __ret = (int16x4_t) __builtin_neon_vqdmulh_v((int8x8_t)__p0, (int8x8_t)__p1, 1);
17402 return __ret;
17403 }
17404 #endif
17405
17406 #ifdef __LITTLE_ENDIAN__
17407 #define vqdmulhq_lane_s32(__p0, __p1, __p2) __extension__ ({ \
17408 int32x4_t __s0 = __p0; \
17409 int32x2_t __s1 = __p1; \
17410 int32x4_t __ret; \
17411 __ret = vqdmulhq_s32(__s0, __builtin_shufflevector(__s1, __s1, __p2, __p2, __p2, __p2)); \
17412 __ret; \
17413 })
17414 #else
17415 #define vqdmulhq_lane_s32(__p0, __p1, __p2) __extension__ ({ \
17416 int32x4_t __s0 = __p0; \
17417 int32x2_t __s1 = __p1; \
17418 int32x4_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \
17419 int32x2_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 1, 0); \
17420 int32x4_t __ret; \
17421 __ret = __noswap_vqdmulhq_s32(__rev0, __builtin_shufflevector(__rev1, __rev1, __p2, __p2, __p2, __p2)); \
17422 __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \
17423 __ret; \
17424 })
17425 #endif
17426
17427 #ifdef __LITTLE_ENDIAN__
17428 #define vqdmulhq_lane_s16(__p0, __p1, __p2) __extension__ ({ \
17429 int16x8_t __s0 = __p0; \
17430 int16x4_t __s1 = __p1; \
17431 int16x8_t __ret; \
17432 __ret = vqdmulhq_s16(__s0, __builtin_shufflevector(__s1, __s1, __p2, __p2, __p2, __p2, __p2, __p2, __p2, __p2)); \
17433 __ret; \
17434 })
17435 #else
17436 #define vqdmulhq_lane_s16(__p0, __p1, __p2) __extension__ ({ \
17437 int16x8_t __s0 = __p0; \
17438 int16x4_t __s1 = __p1; \
17439 int16x8_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 7, 6, 5, 4, 3, 2, 1, 0); \
17440 int16x4_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 3, 2, 1, 0); \
17441 int16x8_t __ret; \
17442 __ret = __noswap_vqdmulhq_s16(__rev0, __builtin_shufflevector(__rev1, __rev1, __p2, __p2, __p2, __p2, __p2, __p2, __p2, __p2)); \
17443 __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); \
17444 __ret; \
17445 })
17446 #endif
17447
17448 #ifdef __LITTLE_ENDIAN__
17449 #define vqdmulh_lane_s32(__p0, __p1, __p2) __extension__ ({ \
17450 int32x2_t __s0 = __p0; \
17451 int32x2_t __s1 = __p1; \
17452 int32x2_t __ret; \
17453 __ret = vqdmulh_s32(__s0, __builtin_shufflevector(__s1, __s1, __p2, __p2)); \
17454 __ret; \
17455 })
17456 #else
17457 #define vqdmulh_lane_s32(__p0, __p1, __p2) __extension__ ({ \
17458 int32x2_t __s0 = __p0; \
17459 int32x2_t __s1 = __p1; \
17460 int32x2_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 1, 0); \
17461 int32x2_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 1, 0); \
17462 int32x2_t __ret; \
17463 __ret = __noswap_vqdmulh_s32(__rev0, __builtin_shufflevector(__rev1, __rev1, __p2, __p2)); \
17464 __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \
17465 __ret; \
17466 })
17467 #endif
17468
17469 #ifdef __LITTLE_ENDIAN__
17470 #define vqdmulh_lane_s16(__p0, __p1, __p2) __extension__ ({ \
17471 int16x4_t __s0 = __p0; \
17472 int16x4_t __s1 = __p1; \
17473 int16x4_t __ret; \
17474 __ret = vqdmulh_s16(__s0, __builtin_shufflevector(__s1, __s1, __p2, __p2, __p2, __p2)); \
17475 __ret; \
17476 })
17477 #else
17478 #define vqdmulh_lane_s16(__p0, __p1, __p2) __extension__ ({ \
17479 int16x4_t __s0 = __p0; \
17480 int16x4_t __s1 = __p1; \
17481 int16x4_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \
17482 int16x4_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 3, 2, 1, 0); \
17483 int16x4_t __ret; \
17484 __ret = __noswap_vqdmulh_s16(__rev0, __builtin_shufflevector(__rev1, __rev1, __p2, __p2, __p2, __p2)); \
17485 __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \
17486 __ret; \
17487 })
17488 #endif
17489
17490 #ifdef __LITTLE_ENDIAN__
17491 __ai int32x4_t vqdmulhq_n_s32(int32x4_t __p0, int32_t __p1) {
17492 int32x4_t __ret;
17493 __ret = (int32x4_t) __builtin_neon_vqdmulhq_v((int8x16_t)__p0, (int8x16_t)(int32x4_t) {__p1, __p1, __p1, __p1}, 34);
17494 return __ret;
17495 }
17496 #else
17497 __ai int32x4_t vqdmulhq_n_s32(int32x4_t __p0, int32_t __p1) {
17498 int32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
17499 int32x4_t __ret;
17500 __ret = (int32x4_t) __builtin_neon_vqdmulhq_v((int8x16_t)__rev0, (int8x16_t)(int32x4_t) {__p1, __p1, __p1, __p1}, 34);
17501 __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
17502 return __ret;
17503 }
17504 #endif
17505
17506 #ifdef __LITTLE_ENDIAN__
17507 __ai int16x8_t vqdmulhq_n_s16(int16x8_t __p0, int16_t __p1) {
17508 int16x8_t __ret;
17509 __ret = (int16x8_t) __builtin_neon_vqdmulhq_v((int8x16_t)__p0, (int8x16_t)(int16x8_t) {__p1, __p1, __p1, __p1, __p1, __p1, __p1, __p1}, 33);
17510 return __ret;
17511 }
17512 #else
17513 __ai int16x8_t vqdmulhq_n_s16(int16x8_t __p0, int16_t __p1) {
17514 int16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
17515 int16x8_t __ret;
17516 __ret = (int16x8_t) __builtin_neon_vqdmulhq_v((int8x16_t)__rev0, (int8x16_t)(int16x8_t) {__p1, __p1, __p1, __p1, __p1, __p1, __p1, __p1}, 33);
17517 __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
17518 return __ret;
17519 }
17520 #endif
17521
17522 #ifdef __LITTLE_ENDIAN__
17523 __ai int32x2_t vqdmulh_n_s32(int32x2_t __p0, int32_t __p1) {
17524 int32x2_t __ret;
17525 __ret = (int32x2_t) __builtin_neon_vqdmulh_v((int8x8_t)__p0, (int8x8_t)(int32x2_t) {__p1, __p1}, 2);
17526 return __ret;
17527 }
17528 #else
17529 __ai int32x2_t vqdmulh_n_s32(int32x2_t __p0, int32_t __p1) {
17530 int32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
17531 int32x2_t __ret;
17532 __ret = (int32x2_t) __builtin_neon_vqdmulh_v((int8x8_t)__rev0, (int8x8_t)(int32x2_t) {__p1, __p1}, 2);
17533 __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
17534 return __ret;
17535 }
17536 #endif
17537
17538 #ifdef __LITTLE_ENDIAN__
17539 __ai int16x4_t vqdmulh_n_s16(int16x4_t __p0, int16_t __p1) {
17540 int16x4_t __ret;
17541 __ret = (int16x4_t) __builtin_neon_vqdmulh_v((int8x8_t)__p0, (int8x8_t)(int16x4_t) {__p1, __p1, __p1, __p1}, 1);
17542 return __ret;
17543 }
17544 #else
17545 __ai int16x4_t vqdmulh_n_s16(int16x4_t __p0, int16_t __p1) {
17546 int16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
17547 int16x4_t __ret;
17548 __ret = (int16x4_t) __builtin_neon_vqdmulh_v((int8x8_t)__rev0, (int8x8_t)(int16x4_t) {__p1, __p1, __p1, __p1}, 1);
17549 __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
17550 return __ret;
17551 }
17552 #endif
17553
17554 #ifdef __LITTLE_ENDIAN__
17555 __ai int64x2_t vqdmull_s32(int32x2_t __p0, int32x2_t __p1) {
17556 int64x2_t __ret;
17557 __ret = (int64x2_t) __builtin_neon_vqdmull_v((int8x8_t)__p0, (int8x8_t)__p1, 35);
17558 return __ret;
17559 }
17560 #else
17561 __ai int64x2_t vqdmull_s32(int32x2_t __p0, int32x2_t __p1) {
17562 int32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
17563 int32x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
17564 int64x2_t __ret;
17565 __ret = (int64x2_t) __builtin_neon_vqdmull_v((int8x8_t)__rev0, (int8x8_t)__rev1, 35);
17566 __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
17567 return __ret;
17568 }
17569 __ai int64x2_t __noswap_vqdmull_s32(int32x2_t __p0, int32x2_t __p1) {
17570 int64x2_t __ret;
17571 __ret = (int64x2_t) __builtin_neon_vqdmull_v((int8x8_t)__p0, (int8x8_t)__p1, 35);
17572 return __ret;
17573 }
17574 #endif
17575
17576 #ifdef __LITTLE_ENDIAN__
17577 __ai int32x4_t vqdmull_s16(int16x4_t __p0, int16x4_t __p1) {
17578 int32x4_t __ret;
17579 __ret = (int32x4_t) __builtin_neon_vqdmull_v((int8x8_t)__p0, (int8x8_t)__p1, 34);
17580 return __ret;
17581 }
17582 #else
17583 __ai int32x4_t vqdmull_s16(int16x4_t __p0, int16x4_t __p1) {
17584 int16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
17585 int16x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
17586 int32x4_t __ret;
17587 __ret = (int32x4_t) __builtin_neon_vqdmull_v((int8x8_t)__rev0, (int8x8_t)__rev1, 34);
17588 __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
17589 return __ret;
17590 }
17591 __ai int32x4_t __noswap_vqdmull_s16(int16x4_t __p0, int16x4_t __p1) {
17592 int32x4_t __ret;
17593 __ret = (int32x4_t) __builtin_neon_vqdmull_v((int8x8_t)__p0, (int8x8_t)__p1, 34);
17594 return __ret;
17595 }
17596 #endif
17597
17598 #ifdef __LITTLE_ENDIAN__
17599 #define vqdmull_lane_s32(__p0, __p1, __p2) __extension__ ({ \
17600 int32x2_t __s0 = __p0; \
17601 int32x2_t __s1 = __p1; \
17602 int64x2_t __ret; \
17603 __ret = vqdmull_s32(__s0, __builtin_shufflevector(__s1, __s1, __p2, __p2)); \
17604 __ret; \
17605 })
17606 #else
17607 #define vqdmull_lane_s32(__p0, __p1, __p2) __extension__ ({ \
17608 int32x2_t __s0 = __p0; \
17609 int32x2_t __s1 = __p1; \
17610 int32x2_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 1, 0); \
17611 int32x2_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 1, 0); \
17612 int64x2_t __ret; \
17613 __ret = __noswap_vqdmull_s32(__rev0, __builtin_shufflevector(__rev1, __rev1, __p2, __p2)); \
17614 __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \
17615 __ret; \
17616 })
17617 #endif
17618
17619 #ifdef __LITTLE_ENDIAN__
17620 #define vqdmull_lane_s16(__p0, __p1, __p2) __extension__ ({ \
17621 int16x4_t __s0 = __p0; \
17622 int16x4_t __s1 = __p1; \
17623 int32x4_t __ret; \
17624 __ret = vqdmull_s16(__s0, __builtin_shufflevector(__s1, __s1, __p2, __p2, __p2, __p2)); \
17625 __ret; \
17626 })
17627 #else
17628 #define vqdmull_lane_s16(__p0, __p1, __p2) __extension__ ({ \
17629 int16x4_t __s0 = __p0; \
17630 int16x4_t __s1 = __p1; \
17631 int16x4_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \
17632 int16x4_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 3, 2, 1, 0); \
17633 int32x4_t __ret; \
17634 __ret = __noswap_vqdmull_s16(__rev0, __builtin_shufflevector(__rev1, __rev1, __p2, __p2, __p2, __p2)); \
17635 __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \
17636 __ret; \
17637 })
17638 #endif
17639
17640 #ifdef __LITTLE_ENDIAN__
17641 __ai int64x2_t vqdmull_n_s32(int32x2_t __p0, int32_t __p1) {
17642 int64x2_t __ret;
17643 __ret = (int64x2_t) __builtin_neon_vqdmull_v((int8x8_t)__p0, (int8x8_t)(int32x2_t) {__p1, __p1}, 35);
17644 return __ret;
17645 }
17646 #else
17647 __ai int64x2_t vqdmull_n_s32(int32x2_t __p0, int32_t __p1) {
17648 int32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
17649 int64x2_t __ret;
17650 __ret = (int64x2_t) __builtin_neon_vqdmull_v((int8x8_t)__rev0, (int8x8_t)(int32x2_t) {__p1, __p1}, 35);
17651 __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
17652 return __ret;
17653 }
17654 __ai int64x2_t __noswap_vqdmull_n_s32(int32x2_t __p0, int32_t __p1) {
17655 int64x2_t __ret;
17656 __ret = (int64x2_t) __builtin_neon_vqdmull_v((int8x8_t)__p0, (int8x8_t)(int32x2_t) {__p1, __p1}, 35);
17657 return __ret;
17658 }
17659 #endif
17660
17661 #ifdef __LITTLE_ENDIAN__
17662 __ai int32x4_t vqdmull_n_s16(int16x4_t __p0, int16_t __p1) {
17663 int32x4_t __ret;
17664 __ret = (int32x4_t) __builtin_neon_vqdmull_v((int8x8_t)__p0, (int8x8_t)(int16x4_t) {__p1, __p1, __p1, __p1}, 34);
17665 return __ret;
17666 }
17667 #else
17668 __ai int32x4_t vqdmull_n_s16(int16x4_t __p0, int16_t __p1) {
17669 int16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
17670 int32x4_t __ret;
17671 __ret = (int32x4_t) __builtin_neon_vqdmull_v((int8x8_t)__rev0, (int8x8_t)(int16x4_t) {__p1, __p1, __p1, __p1}, 34);
17672 __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
17673 return __ret;
17674 }
17675 __ai int32x4_t __noswap_vqdmull_n_s16(int16x4_t __p0, int16_t __p1) {
17676 int32x4_t __ret;
17677 __ret = (int32x4_t) __builtin_neon_vqdmull_v((int8x8_t)__p0, (int8x8_t)(int16x4_t) {__p1, __p1, __p1, __p1}, 34);
17678 return __ret;
17679 }
17680 #endif
17681
17682 #ifdef __LITTLE_ENDIAN__
17683 __ai uint16x4_t vqmovn_u32(uint32x4_t __p0) {
17684 uint16x4_t __ret;
17685 __ret = (uint16x4_t) __builtin_neon_vqmovn_v((int8x16_t)__p0, 17);
17686 return __ret;
17687 }
17688 #else
17689 __ai uint16x4_t vqmovn_u32(uint32x4_t __p0) {
17690 uint32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
17691 uint16x4_t __ret;
17692 __ret = (uint16x4_t) __builtin_neon_vqmovn_v((int8x16_t)__rev0, 17);
17693 __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
17694 return __ret;
17695 }
17696 __ai uint16x4_t __noswap_vqmovn_u32(uint32x4_t __p0) {
17697 uint16x4_t __ret;
17698 __ret = (uint16x4_t) __builtin_neon_vqmovn_v((int8x16_t)__p0, 17);
17699 return __ret;
17700 }
17701 #endif
17702
17703 #ifdef __LITTLE_ENDIAN__
17704 __ai uint32x2_t vqmovn_u64(uint64x2_t __p0) {
17705 uint32x2_t __ret;
17706 __ret = (uint32x2_t) __builtin_neon_vqmovn_v((int8x16_t)__p0, 18);
17707 return __ret;
17708 }
17709 #else
17710 __ai uint32x2_t vqmovn_u64(uint64x2_t __p0) {
17711 uint64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
17712 uint32x2_t __ret;
17713 __ret = (uint32x2_t) __builtin_neon_vqmovn_v((int8x16_t)__rev0, 18);
17714 __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
17715 return __ret;
17716 }
17717 __ai uint32x2_t __noswap_vqmovn_u64(uint64x2_t __p0) {
17718 uint32x2_t __ret;
17719 __ret = (uint32x2_t) __builtin_neon_vqmovn_v((int8x16_t)__p0, 18);
17720 return __ret;
17721 }
17722 #endif
17723
17724 #ifdef __LITTLE_ENDIAN__
17725 __ai uint8x8_t vqmovn_u16(uint16x8_t __p0) {
17726 uint8x8_t __ret;
17727 __ret = (uint8x8_t) __builtin_neon_vqmovn_v((int8x16_t)__p0, 16);
17728 return __ret;
17729 }
17730 #else
17731 __ai uint8x8_t vqmovn_u16(uint16x8_t __p0) {
17732 uint16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
17733 uint8x8_t __ret;
17734 __ret = (uint8x8_t) __builtin_neon_vqmovn_v((int8x16_t)__rev0, 16);
17735 __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
17736 return __ret;
17737 }
17738 __ai uint8x8_t __noswap_vqmovn_u16(uint16x8_t __p0) {
17739 uint8x8_t __ret;
17740 __ret = (uint8x8_t) __builtin_neon_vqmovn_v((int8x16_t)__p0, 16);
17741 return __ret;
17742 }
17743 #endif
17744
17745 #ifdef __LITTLE_ENDIAN__
17746 __ai int16x4_t vqmovn_s32(int32x4_t __p0) {
17747 int16x4_t __ret;
17748 __ret = (int16x4_t) __builtin_neon_vqmovn_v((int8x16_t)__p0, 1);
17749 return __ret;
17750 }
17751 #else
17752 __ai int16x4_t vqmovn_s32(int32x4_t __p0) {
17753 int32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
17754 int16x4_t __ret;
17755 __ret = (int16x4_t) __builtin_neon_vqmovn_v((int8x16_t)__rev0, 1);
17756 __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
17757 return __ret;
17758 }
17759 __ai int16x4_t __noswap_vqmovn_s32(int32x4_t __p0) {
17760 int16x4_t __ret;
17761 __ret = (int16x4_t) __builtin_neon_vqmovn_v((int8x16_t)__p0, 1);
17762 return __ret;
17763 }
17764 #endif
17765
17766 #ifdef __LITTLE_ENDIAN__
17767 __ai int32x2_t vqmovn_s64(int64x2_t __p0) {
17768 int32x2_t __ret;
17769 __ret = (int32x2_t) __builtin_neon_vqmovn_v((int8x16_t)__p0, 2);
17770 return __ret;
17771 }
17772 #else
17773 __ai int32x2_t vqmovn_s64(int64x2_t __p0) {
17774 int64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
17775 int32x2_t __ret;
17776 __ret = (int32x2_t) __builtin_neon_vqmovn_v((int8x16_t)__rev0, 2);
17777 __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
17778 return __ret;
17779 }
17780 __ai int32x2_t __noswap_vqmovn_s64(int64x2_t __p0) {
17781 int32x2_t __ret;
17782 __ret = (int32x2_t) __builtin_neon_vqmovn_v((int8x16_t)__p0, 2);
17783 return __ret;
17784 }
17785 #endif
17786
17787 #ifdef __LITTLE_ENDIAN__
17788 __ai int8x8_t vqmovn_s16(int16x8_t __p0) {
17789 int8x8_t __ret;
17790 __ret = (int8x8_t) __builtin_neon_vqmovn_v((int8x16_t)__p0, 0);
17791 return __ret;
17792 }
17793 #else
17794 __ai int8x8_t vqmovn_s16(int16x8_t __p0) {
17795 int16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
17796 int8x8_t __ret;
17797 __ret = (int8x8_t) __builtin_neon_vqmovn_v((int8x16_t)__rev0, 0);
17798 __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
17799 return __ret;
17800 }
17801 __ai int8x8_t __noswap_vqmovn_s16(int16x8_t __p0) {
17802 int8x8_t __ret;
17803 __ret = (int8x8_t) __builtin_neon_vqmovn_v((int8x16_t)__p0, 0);
17804 return __ret;
17805 }
17806 #endif
17807
17808 #ifdef __LITTLE_ENDIAN__
17809 __ai uint16x4_t vqmovun_s32(int32x4_t __p0) {
17810 uint16x4_t __ret;
17811 __ret = (uint16x4_t) __builtin_neon_vqmovun_v((int8x16_t)__p0, 17);
17812 return __ret;
17813 }
17814 #else
17815 __ai uint16x4_t vqmovun_s32(int32x4_t __p0) {
17816 int32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
17817 uint16x4_t __ret;
17818 __ret = (uint16x4_t) __builtin_neon_vqmovun_v((int8x16_t)__rev0, 17);
17819 __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
17820 return __ret;
17821 }
17822 __ai uint16x4_t __noswap_vqmovun_s32(int32x4_t __p0) {
17823 uint16x4_t __ret;
17824 __ret = (uint16x4_t) __builtin_neon_vqmovun_v((int8x16_t)__p0, 17);
17825 return __ret;
17826 }
17827 #endif
17828
17829 #ifdef __LITTLE_ENDIAN__
17830 __ai uint32x2_t vqmovun_s64(int64x2_t __p0) {
17831 uint32x2_t __ret;
17832 __ret = (uint32x2_t) __builtin_neon_vqmovun_v((int8x16_t)__p0, 18);
17833 return __ret;
17834 }
17835 #else
17836 __ai uint32x2_t vqmovun_s64(int64x2_t __p0) {
17837 int64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
17838 uint32x2_t __ret;
17839 __ret = (uint32x2_t) __builtin_neon_vqmovun_v((int8x16_t)__rev0, 18);
17840 __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
17841 return __ret;
17842 }
17843 __ai uint32x2_t __noswap_vqmovun_s64(int64x2_t __p0) {
17844 uint32x2_t __ret;
17845 __ret = (uint32x2_t) __builtin_neon_vqmovun_v((int8x16_t)__p0, 18);
17846 return __ret;
17847 }
17848 #endif
17849
17850 #ifdef __LITTLE_ENDIAN__
17851 __ai uint8x8_t vqmovun_s16(int16x8_t __p0) {
17852 uint8x8_t __ret;
17853 __ret = (uint8x8_t) __builtin_neon_vqmovun_v((int8x16_t)__p0, 16);
17854 return __ret;
17855 }
17856 #else
17857 __ai uint8x8_t vqmovun_s16(int16x8_t __p0) {
17858 int16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
17859 uint8x8_t __ret;
17860 __ret = (uint8x8_t) __builtin_neon_vqmovun_v((int8x16_t)__rev0, 16);
17861 __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
17862 return __ret;
17863 }
17864 __ai uint8x8_t __noswap_vqmovun_s16(int16x8_t __p0) {
17865 uint8x8_t __ret;
17866 __ret = (uint8x8_t) __builtin_neon_vqmovun_v((int8x16_t)__p0, 16);
17867 return __ret;
17868 }
17869 #endif
17870
17871 #ifdef __LITTLE_ENDIAN__
17872 __ai int8x16_t vqnegq_s8(int8x16_t __p0) {
17873 int8x16_t __ret;
17874 __ret = (int8x16_t) __builtin_neon_vqnegq_v((int8x16_t)__p0, 32);
17875 return __ret;
17876 }
17877 #else
17878 __ai int8x16_t vqnegq_s8(int8x16_t __p0) {
17879 int8x16_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
17880 int8x16_t __ret;
17881 __ret = (int8x16_t) __builtin_neon_vqnegq_v((int8x16_t)__rev0, 32);
17882 __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
17883 return __ret;
17884 }
17885 #endif
17886
17887 #ifdef __LITTLE_ENDIAN__
17888 __ai int32x4_t vqnegq_s32(int32x4_t __p0) {
17889 int32x4_t __ret;
17890 __ret = (int32x4_t) __builtin_neon_vqnegq_v((int8x16_t)__p0, 34);
17891 return __ret;
17892 }
17893 #else
17894 __ai int32x4_t vqnegq_s32(int32x4_t __p0) {
17895 int32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
17896 int32x4_t __ret;
17897 __ret = (int32x4_t) __builtin_neon_vqnegq_v((int8x16_t)__rev0, 34);
17898 __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
17899 return __ret;
17900 }
17901 #endif
17902
17903 #ifdef __LITTLE_ENDIAN__
17904 __ai int16x8_t vqnegq_s16(int16x8_t __p0) {
17905 int16x8_t __ret;
17906 __ret = (int16x8_t) __builtin_neon_vqnegq_v((int8x16_t)__p0, 33);
17907 return __ret;
17908 }
17909 #else
17910 __ai int16x8_t vqnegq_s16(int16x8_t __p0) {
17911 int16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
17912 int16x8_t __ret;
17913 __ret = (int16x8_t) __builtin_neon_vqnegq_v((int8x16_t)__rev0, 33);
17914 __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
17915 return __ret;
17916 }
17917 #endif
17918
17919 #ifdef __LITTLE_ENDIAN__
17920 __ai int8x8_t vqneg_s8(int8x8_t __p0) {
17921 int8x8_t __ret;
17922 __ret = (int8x8_t) __builtin_neon_vqneg_v((int8x8_t)__p0, 0);
17923 return __ret;
17924 }
17925 #else
17926 __ai int8x8_t vqneg_s8(int8x8_t __p0) {
17927 int8x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
17928 int8x8_t __ret;
17929 __ret = (int8x8_t) __builtin_neon_vqneg_v((int8x8_t)__rev0, 0);
17930 __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
17931 return __ret;
17932 }
17933 #endif
17934
17935 #ifdef __LITTLE_ENDIAN__
17936 __ai int32x2_t vqneg_s32(int32x2_t __p0) {
17937 int32x2_t __ret;
17938 __ret = (int32x2_t) __builtin_neon_vqneg_v((int8x8_t)__p0, 2);
17939 return __ret;
17940 }
17941 #else
17942 __ai int32x2_t vqneg_s32(int32x2_t __p0) {
17943 int32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
17944 int32x2_t __ret;
17945 __ret = (int32x2_t) __builtin_neon_vqneg_v((int8x8_t)__rev0, 2);
17946 __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
17947 return __ret;
17948 }
17949 #endif
17950
17951 #ifdef __LITTLE_ENDIAN__
17952 __ai int16x4_t vqneg_s16(int16x4_t __p0) {
17953 int16x4_t __ret;
17954 __ret = (int16x4_t) __builtin_neon_vqneg_v((int8x8_t)__p0, 1);
17955 return __ret;
17956 }
17957 #else
17958 __ai int16x4_t vqneg_s16(int16x4_t __p0) {
17959 int16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
17960 int16x4_t __ret;
17961 __ret = (int16x4_t) __builtin_neon_vqneg_v((int8x8_t)__rev0, 1);
17962 __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
17963 return __ret;
17964 }
17965 #endif
17966
17967 #ifdef __LITTLE_ENDIAN__
17968 __ai int32x4_t vqrdmulhq_s32(int32x4_t __p0, int32x4_t __p1) {
17969 int32x4_t __ret;
17970 __ret = (int32x4_t) __builtin_neon_vqrdmulhq_v((int8x16_t)__p0, (int8x16_t)__p1, 34);
17971 return __ret;
17972 }
17973 #else
17974 __ai int32x4_t vqrdmulhq_s32(int32x4_t __p0, int32x4_t __p1) {
17975 int32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
17976 int32x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
17977 int32x4_t __ret;
17978 __ret = (int32x4_t) __builtin_neon_vqrdmulhq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 34);
17979 __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
17980 return __ret;
17981 }
17982 __ai int32x4_t __noswap_vqrdmulhq_s32(int32x4_t __p0, int32x4_t __p1) {
17983 int32x4_t __ret;
17984 __ret = (int32x4_t) __builtin_neon_vqrdmulhq_v((int8x16_t)__p0, (int8x16_t)__p1, 34);
17985 return __ret;
17986 }
17987 #endif
17988
17989 #ifdef __LITTLE_ENDIAN__
17990 __ai int16x8_t vqrdmulhq_s16(int16x8_t __p0, int16x8_t __p1) {
17991 int16x8_t __ret;
17992 __ret = (int16x8_t) __builtin_neon_vqrdmulhq_v((int8x16_t)__p0, (int8x16_t)__p1, 33);
17993 return __ret;
17994 }
17995 #else
17996 __ai int16x8_t vqrdmulhq_s16(int16x8_t __p0, int16x8_t __p1) {
17997 int16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
17998 int16x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
17999 int16x8_t __ret;
18000 __ret = (int16x8_t) __builtin_neon_vqrdmulhq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 33);
18001 __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
18002 return __ret;
18003 }
18004 __ai int16x8_t __noswap_vqrdmulhq_s16(int16x8_t __p0, int16x8_t __p1) {
18005 int16x8_t __ret;
18006 __ret = (int16x8_t) __builtin_neon_vqrdmulhq_v((int8x16_t)__p0, (int8x16_t)__p1, 33);
18007 return __ret;
18008 }
18009 #endif
18010
18011 #ifdef __LITTLE_ENDIAN__
18012 __ai int32x2_t vqrdmulh_s32(int32x2_t __p0, int32x2_t __p1) {
18013 int32x2_t __ret;
18014 __ret = (int32x2_t) __builtin_neon_vqrdmulh_v((int8x8_t)__p0, (int8x8_t)__p1, 2);
18015 return __ret;
18016 }
18017 #else
18018 __ai int32x2_t vqrdmulh_s32(int32x2_t __p0, int32x2_t __p1) {
18019 int32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
18020 int32x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
18021 int32x2_t __ret;
18022 __ret = (int32x2_t) __builtin_neon_vqrdmulh_v((int8x8_t)__rev0, (int8x8_t)__rev1, 2);
18023 __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
18024 return __ret;
18025 }
18026 __ai int32x2_t __noswap_vqrdmulh_s32(int32x2_t __p0, int32x2_t __p1) {
18027 int32x2_t __ret;
18028 __ret = (int32x2_t) __builtin_neon_vqrdmulh_v((int8x8_t)__p0, (int8x8_t)__p1, 2);
18029 return __ret;
18030 }
18031 #endif
18032
18033 #ifdef __LITTLE_ENDIAN__
18034 __ai int16x4_t vqrdmulh_s16(int16x4_t __p0, int16x4_t __p1) {
18035 int16x4_t __ret;
18036 __ret = (int16x4_t) __builtin_neon_vqrdmulh_v((int8x8_t)__p0, (int8x8_t)__p1, 1);
18037 return __ret;
18038 }
18039 #else
18040 __ai int16x4_t vqrdmulh_s16(int16x4_t __p0, int16x4_t __p1) {
18041 int16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
18042 int16x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
18043 int16x4_t __ret;
18044 __ret = (int16x4_t) __builtin_neon_vqrdmulh_v((int8x8_t)__rev0, (int8x8_t)__rev1, 1);
18045 __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
18046 return __ret;
18047 }
18048 __ai int16x4_t __noswap_vqrdmulh_s16(int16x4_t __p0, int16x4_t __p1) {
18049 int16x4_t __ret;
18050 __ret = (int16x4_t) __builtin_neon_vqrdmulh_v((int8x8_t)__p0, (int8x8_t)__p1, 1);
18051 return __ret;
18052 }
18053 #endif
18054
18055 #ifdef __LITTLE_ENDIAN__
18056 #define vqrdmulhq_lane_s32(__p0, __p1, __p2) __extension__ ({ \
18057 int32x4_t __s0 = __p0; \
18058 int32x2_t __s1 = __p1; \
18059 int32x4_t __ret; \
18060 __ret = vqrdmulhq_s32(__s0, __builtin_shufflevector(__s1, __s1, __p2, __p2, __p2, __p2)); \
18061 __ret; \
18062 })
18063 #else
18064 #define vqrdmulhq_lane_s32(__p0, __p1, __p2) __extension__ ({ \
18065 int32x4_t __s0 = __p0; \
18066 int32x2_t __s1 = __p1; \
18067 int32x4_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \
18068 int32x2_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 1, 0); \
18069 int32x4_t __ret; \
18070 __ret = __noswap_vqrdmulhq_s32(__rev0, __builtin_shufflevector(__rev1, __rev1, __p2, __p2, __p2, __p2)); \
18071 __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \
18072 __ret; \
18073 })
18074 #endif
18075
18076 #ifdef __LITTLE_ENDIAN__
18077 #define vqrdmulhq_lane_s16(__p0, __p1, __p2) __extension__ ({ \
18078 int16x8_t __s0 = __p0; \
18079 int16x4_t __s1 = __p1; \
18080 int16x8_t __ret; \
18081 __ret = vqrdmulhq_s16(__s0, __builtin_shufflevector(__s1, __s1, __p2, __p2, __p2, __p2, __p2, __p2, __p2, __p2)); \
18082 __ret; \
18083 })
18084 #else
18085 #define vqrdmulhq_lane_s16(__p0, __p1, __p2) __extension__ ({ \
18086 int16x8_t __s0 = __p0; \
18087 int16x4_t __s1 = __p1; \
18088 int16x8_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 7, 6, 5, 4, 3, 2, 1, 0); \
18089 int16x4_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 3, 2, 1, 0); \
18090 int16x8_t __ret; \
18091 __ret = __noswap_vqrdmulhq_s16(__rev0, __builtin_shufflevector(__rev1, __rev1, __p2, __p2, __p2, __p2, __p2, __p2, __p2, __p2)); \
18092 __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); \
18093 __ret; \
18094 })
18095 #endif
18096
18097 #ifdef __LITTLE_ENDIAN__
18098 #define vqrdmulh_lane_s32(__p0, __p1, __p2) __extension__ ({ \
18099 int32x2_t __s0 = __p0; \
18100 int32x2_t __s1 = __p1; \
18101 int32x2_t __ret; \
18102 __ret = vqrdmulh_s32(__s0, __builtin_shufflevector(__s1, __s1, __p2, __p2)); \
18103 __ret; \
18104 })
18105 #else
18106 #define vqrdmulh_lane_s32(__p0, __p1, __p2) __extension__ ({ \
18107 int32x2_t __s0 = __p0; \
18108 int32x2_t __s1 = __p1; \
18109 int32x2_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 1, 0); \
18110 int32x2_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 1, 0); \
18111 int32x2_t __ret; \
18112 __ret = __noswap_vqrdmulh_s32(__rev0, __builtin_shufflevector(__rev1, __rev1, __p2, __p2)); \
18113 __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \
18114 __ret; \
18115 })
18116 #endif
18117
18118 #ifdef __LITTLE_ENDIAN__
18119 #define vqrdmulh_lane_s16(__p0, __p1, __p2) __extension__ ({ \
18120 int16x4_t __s0 = __p0; \
18121 int16x4_t __s1 = __p1; \
18122 int16x4_t __ret; \
18123 __ret = vqrdmulh_s16(__s0, __builtin_shufflevector(__s1, __s1, __p2, __p2, __p2, __p2)); \
18124 __ret; \
18125 })
18126 #else
18127 #define vqrdmulh_lane_s16(__p0, __p1, __p2) __extension__ ({ \
18128 int16x4_t __s0 = __p0; \
18129 int16x4_t __s1 = __p1; \
18130 int16x4_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \
18131 int16x4_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 3, 2, 1, 0); \
18132 int16x4_t __ret; \
18133 __ret = __noswap_vqrdmulh_s16(__rev0, __builtin_shufflevector(__rev1, __rev1, __p2, __p2, __p2, __p2)); \
18134 __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \
18135 __ret; \
18136 })
18137 #endif
18138
18139 #ifdef __LITTLE_ENDIAN__
18140 __ai int32x4_t vqrdmulhq_n_s32(int32x4_t __p0, int32_t __p1) {
18141 int32x4_t __ret;
18142 __ret = (int32x4_t) __builtin_neon_vqrdmulhq_v((int8x16_t)__p0, (int8x16_t)(int32x4_t) {__p1, __p1, __p1, __p1}, 34);
18143 return __ret;
18144 }
18145 #else
18146 __ai int32x4_t vqrdmulhq_n_s32(int32x4_t __p0, int32_t __p1) {
18147 int32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
18148 int32x4_t __ret;
18149 __ret = (int32x4_t) __builtin_neon_vqrdmulhq_v((int8x16_t)__rev0, (int8x16_t)(int32x4_t) {__p1, __p1, __p1, __p1}, 34);
18150 __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
18151 return __ret;
18152 }
18153 #endif
18154
18155 #ifdef __LITTLE_ENDIAN__
18156 __ai int16x8_t vqrdmulhq_n_s16(int16x8_t __p0, int16_t __p1) {
18157 int16x8_t __ret;
18158 __ret = (int16x8_t) __builtin_neon_vqrdmulhq_v((int8x16_t)__p0, (int8x16_t)(int16x8_t) {__p1, __p1, __p1, __p1, __p1, __p1, __p1, __p1}, 33);
18159 return __ret;
18160 }
18161 #else
18162 __ai int16x8_t vqrdmulhq_n_s16(int16x8_t __p0, int16_t __p1) {
18163 int16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
18164 int16x8_t __ret;
18165 __ret = (int16x8_t) __builtin_neon_vqrdmulhq_v((int8x16_t)__rev0, (int8x16_t)(int16x8_t) {__p1, __p1, __p1, __p1, __p1, __p1, __p1, __p1}, 33);
18166 __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
18167 return __ret;
18168 }
18169 #endif
18170
18171 #ifdef __LITTLE_ENDIAN__
18172 __ai int32x2_t vqrdmulh_n_s32(int32x2_t __p0, int32_t __p1) {
18173 int32x2_t __ret;
18174 __ret = (int32x2_t) __builtin_neon_vqrdmulh_v((int8x8_t)__p0, (int8x8_t)(int32x2_t) {__p1, __p1}, 2);
18175 return __ret;
18176 }
18177 #else
18178 __ai int32x2_t vqrdmulh_n_s32(int32x2_t __p0, int32_t __p1) {
18179 int32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
18180 int32x2_t __ret;
18181 __ret = (int32x2_t) __builtin_neon_vqrdmulh_v((int8x8_t)__rev0, (int8x8_t)(int32x2_t) {__p1, __p1}, 2);
18182 __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
18183 return __ret;
18184 }
18185 #endif
18186
18187 #ifdef __LITTLE_ENDIAN__
18188 __ai int16x4_t vqrdmulh_n_s16(int16x4_t __p0, int16_t __p1) {
18189 int16x4_t __ret;
18190 __ret = (int16x4_t) __builtin_neon_vqrdmulh_v((int8x8_t)__p0, (int8x8_t)(int16x4_t) {__p1, __p1, __p1, __p1}, 1);
18191 return __ret;
18192 }
18193 #else
18194 __ai int16x4_t vqrdmulh_n_s16(int16x4_t __p0, int16_t __p1) {
18195 int16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
18196 int16x4_t __ret;
18197 __ret = (int16x4_t) __builtin_neon_vqrdmulh_v((int8x8_t)__rev0, (int8x8_t)(int16x4_t) {__p1, __p1, __p1, __p1}, 1);
18198 __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
18199 return __ret;
18200 }
18201 #endif
18202
18203 #ifdef __LITTLE_ENDIAN__
18204 __ai uint8x16_t vqrshlq_u8(uint8x16_t __p0, int8x16_t __p1) {
18205 uint8x16_t __ret;
18206 __ret = (uint8x16_t) __builtin_neon_vqrshlq_v((int8x16_t)__p0, (int8x16_t)__p1, 48);
18207 return __ret;
18208 }
18209 #else
18210 __ai uint8x16_t vqrshlq_u8(uint8x16_t __p0, int8x16_t __p1) {
18211 uint8x16_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
18212 int8x16_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
18213 uint8x16_t __ret;
18214 __ret = (uint8x16_t) __builtin_neon_vqrshlq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 48);
18215 __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
18216 return __ret;
18217 }
18218 #endif
18219
18220 #ifdef __LITTLE_ENDIAN__
18221 __ai uint32x4_t vqrshlq_u32(uint32x4_t __p0, int32x4_t __p1) {
18222 uint32x4_t __ret;
18223 __ret = (uint32x4_t) __builtin_neon_vqrshlq_v((int8x16_t)__p0, (int8x16_t)__p1, 50);
18224 return __ret;
18225 }
18226 #else
18227 __ai uint32x4_t vqrshlq_u32(uint32x4_t __p0, int32x4_t __p1) {
18228 uint32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
18229 int32x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
18230 uint32x4_t __ret;
18231 __ret = (uint32x4_t) __builtin_neon_vqrshlq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 50);
18232 __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
18233 return __ret;
18234 }
18235 #endif
18236
18237 #ifdef __LITTLE_ENDIAN__
18238 __ai uint64x2_t vqrshlq_u64(uint64x2_t __p0, int64x2_t __p1) {
18239 uint64x2_t __ret;
18240 __ret = (uint64x2_t) __builtin_neon_vqrshlq_v((int8x16_t)__p0, (int8x16_t)__p1, 51);
18241 return __ret;
18242 }
18243 #else
18244 __ai uint64x2_t vqrshlq_u64(uint64x2_t __p0, int64x2_t __p1) {
18245 uint64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
18246 int64x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
18247 uint64x2_t __ret;
18248 __ret = (uint64x2_t) __builtin_neon_vqrshlq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 51);
18249 __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
18250 return __ret;
18251 }
18252 #endif
18253
18254 #ifdef __LITTLE_ENDIAN__
18255 __ai uint16x8_t vqrshlq_u16(uint16x8_t __p0, int16x8_t __p1) {
18256 uint16x8_t __ret;
18257 __ret = (uint16x8_t) __builtin_neon_vqrshlq_v((int8x16_t)__p0, (int8x16_t)__p1, 49);
18258 return __ret;
18259 }
18260 #else
18261 __ai uint16x8_t vqrshlq_u16(uint16x8_t __p0, int16x8_t __p1) {
18262 uint16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
18263 int16x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
18264 uint16x8_t __ret;
18265 __ret = (uint16x8_t) __builtin_neon_vqrshlq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 49);
18266 __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
18267 return __ret;
18268 }
18269 #endif
18270
18271 #ifdef __LITTLE_ENDIAN__
18272 __ai int8x16_t vqrshlq_s8(int8x16_t __p0, int8x16_t __p1) {
18273 int8x16_t __ret;
18274 __ret = (int8x16_t) __builtin_neon_vqrshlq_v((int8x16_t)__p0, (int8x16_t)__p1, 32);
18275 return __ret;
18276 }
18277 #else
18278 __ai int8x16_t vqrshlq_s8(int8x16_t __p0, int8x16_t __p1) {
18279 int8x16_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
18280 int8x16_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
18281 int8x16_t __ret;
18282 __ret = (int8x16_t) __builtin_neon_vqrshlq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 32);
18283 __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
18284 return __ret;
18285 }
18286 #endif
18287
18288 #ifdef __LITTLE_ENDIAN__
18289 __ai int32x4_t vqrshlq_s32(int32x4_t __p0, int32x4_t __p1) {
18290 int32x4_t __ret;
18291 __ret = (int32x4_t) __builtin_neon_vqrshlq_v((int8x16_t)__p0, (int8x16_t)__p1, 34);
18292 return __ret;
18293 }
18294 #else
18295 __ai int32x4_t vqrshlq_s32(int32x4_t __p0, int32x4_t __p1) {
18296 int32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
18297 int32x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
18298 int32x4_t __ret;
18299 __ret = (int32x4_t) __builtin_neon_vqrshlq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 34);
18300 __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
18301 return __ret;
18302 }
18303 #endif
18304
18305 #ifdef __LITTLE_ENDIAN__
18306 __ai int64x2_t vqrshlq_s64(int64x2_t __p0, int64x2_t __p1) {
18307 int64x2_t __ret;
18308 __ret = (int64x2_t) __builtin_neon_vqrshlq_v((int8x16_t)__p0, (int8x16_t)__p1, 35);
18309 return __ret;
18310 }
18311 #else
18312 __ai int64x2_t vqrshlq_s64(int64x2_t __p0, int64x2_t __p1) {
18313 int64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
18314 int64x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
18315 int64x2_t __ret;
18316 __ret = (int64x2_t) __builtin_neon_vqrshlq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 35);
18317 __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
18318 return __ret;
18319 }
18320 #endif
18321
18322 #ifdef __LITTLE_ENDIAN__
18323 __ai int16x8_t vqrshlq_s16(int16x8_t __p0, int16x8_t __p1) {
18324 int16x8_t __ret;
18325 __ret = (int16x8_t) __builtin_neon_vqrshlq_v((int8x16_t)__p0, (int8x16_t)__p1, 33);
18326 return __ret;
18327 }
18328 #else
18329 __ai int16x8_t vqrshlq_s16(int16x8_t __p0, int16x8_t __p1) {
18330 int16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
18331 int16x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
18332 int16x8_t __ret;
18333 __ret = (int16x8_t) __builtin_neon_vqrshlq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 33);
18334 __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
18335 return __ret;
18336 }
18337 #endif
18338
18339 #ifdef __LITTLE_ENDIAN__
18340 __ai uint8x8_t vqrshl_u8(uint8x8_t __p0, int8x8_t __p1) {
18341 uint8x8_t __ret;
18342 __ret = (uint8x8_t) __builtin_neon_vqrshl_v((int8x8_t)__p0, (int8x8_t)__p1, 16);
18343 return __ret;
18344 }
18345 #else
18346 __ai uint8x8_t vqrshl_u8(uint8x8_t __p0, int8x8_t __p1) {
18347 uint8x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
18348 int8x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
18349 uint8x8_t __ret;
18350 __ret = (uint8x8_t) __builtin_neon_vqrshl_v((int8x8_t)__rev0, (int8x8_t)__rev1, 16);
18351 __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
18352 return __ret;
18353 }
18354 #endif
18355
18356 #ifdef __LITTLE_ENDIAN__
18357 __ai uint32x2_t vqrshl_u32(uint32x2_t __p0, int32x2_t __p1) {
18358 uint32x2_t __ret;
18359 __ret = (uint32x2_t) __builtin_neon_vqrshl_v((int8x8_t)__p0, (int8x8_t)__p1, 18);
18360 return __ret;
18361 }
18362 #else
18363 __ai uint32x2_t vqrshl_u32(uint32x2_t __p0, int32x2_t __p1) {
18364 uint32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
18365 int32x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
18366 uint32x2_t __ret;
18367 __ret = (uint32x2_t) __builtin_neon_vqrshl_v((int8x8_t)__rev0, (int8x8_t)__rev1, 18);
18368 __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
18369 return __ret;
18370 }
18371 #endif
18372
18373 #ifdef __LITTLE_ENDIAN__
18374 __ai uint64x1_t vqrshl_u64(uint64x1_t __p0, int64x1_t __p1) {
18375 uint64x1_t __ret;
18376 __ret = (uint64x1_t) __builtin_neon_vqrshl_v((int8x8_t)__p0, (int8x8_t)__p1, 19);
18377 return __ret;
18378 }
18379 #else
18380 __ai uint64x1_t vqrshl_u64(uint64x1_t __p0, int64x1_t __p1) {
18381 uint64x1_t __ret;
18382 __ret = (uint64x1_t) __builtin_neon_vqrshl_v((int8x8_t)__p0, (int8x8_t)__p1, 19);
18383 return __ret;
18384 }
18385 #endif
18386
18387 #ifdef __LITTLE_ENDIAN__
18388 __ai uint16x4_t vqrshl_u16(uint16x4_t __p0, int16x4_t __p1) {
18389 uint16x4_t __ret;
18390 __ret = (uint16x4_t) __builtin_neon_vqrshl_v((int8x8_t)__p0, (int8x8_t)__p1, 17);
18391 return __ret;
18392 }
18393 #else
18394 __ai uint16x4_t vqrshl_u16(uint16x4_t __p0, int16x4_t __p1) {
18395 uint16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
18396 int16x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
18397 uint16x4_t __ret;
18398 __ret = (uint16x4_t) __builtin_neon_vqrshl_v((int8x8_t)__rev0, (int8x8_t)__rev1, 17);
18399 __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
18400 return __ret;
18401 }
18402 #endif
18403
18404 #ifdef __LITTLE_ENDIAN__
18405 __ai int8x8_t vqrshl_s8(int8x8_t __p0, int8x8_t __p1) {
18406 int8x8_t __ret;
18407 __ret = (int8x8_t) __builtin_neon_vqrshl_v((int8x8_t)__p0, (int8x8_t)__p1, 0);
18408 return __ret;
18409 }
18410 #else
18411 __ai int8x8_t vqrshl_s8(int8x8_t __p0, int8x8_t __p1) {
18412 int8x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
18413 int8x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
18414 int8x8_t __ret;
18415 __ret = (int8x8_t) __builtin_neon_vqrshl_v((int8x8_t)__rev0, (int8x8_t)__rev1, 0);
18416 __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
18417 return __ret;
18418 }
18419 #endif
18420
18421 #ifdef __LITTLE_ENDIAN__
18422 __ai int32x2_t vqrshl_s32(int32x2_t __p0, int32x2_t __p1) {
18423 int32x2_t __ret;
18424 __ret = (int32x2_t) __builtin_neon_vqrshl_v((int8x8_t)__p0, (int8x8_t)__p1, 2);
18425 return __ret;
18426 }
18427 #else
18428 __ai int32x2_t vqrshl_s32(int32x2_t __p0, int32x2_t __p1) {
18429 int32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
18430 int32x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
18431 int32x2_t __ret;
18432 __ret = (int32x2_t) __builtin_neon_vqrshl_v((int8x8_t)__rev0, (int8x8_t)__rev1, 2);
18433 __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
18434 return __ret;
18435 }
18436 #endif
18437
18438 #ifdef __LITTLE_ENDIAN__
18439 __ai int64x1_t vqrshl_s64(int64x1_t __p0, int64x1_t __p1) {
18440 int64x1_t __ret;
18441 __ret = (int64x1_t) __builtin_neon_vqrshl_v((int8x8_t)__p0, (int8x8_t)__p1, 3);
18442 return __ret;
18443 }
18444 #else
18445 __ai int64x1_t vqrshl_s64(int64x1_t __p0, int64x1_t __p1) {
18446 int64x1_t __ret;
18447 __ret = (int64x1_t) __builtin_neon_vqrshl_v((int8x8_t)__p0, (int8x8_t)__p1, 3);
18448 return __ret;
18449 }
18450 #endif
18451
18452 #ifdef __LITTLE_ENDIAN__
18453 __ai int16x4_t vqrshl_s16(int16x4_t __p0, int16x4_t __p1) {
18454 int16x4_t __ret;
18455 __ret = (int16x4_t) __builtin_neon_vqrshl_v((int8x8_t)__p0, (int8x8_t)__p1, 1);
18456 return __ret;
18457 }
18458 #else
18459 __ai int16x4_t vqrshl_s16(int16x4_t __p0, int16x4_t __p1) {
18460 int16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
18461 int16x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
18462 int16x4_t __ret;
18463 __ret = (int16x4_t) __builtin_neon_vqrshl_v((int8x8_t)__rev0, (int8x8_t)__rev1, 1);
18464 __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
18465 return __ret;
18466 }
18467 #endif
18468
18469 #ifdef __LITTLE_ENDIAN__
18470 #define vqrshrn_n_u32(__p0, __p1) __extension__ ({ \
18471 uint32x4_t __s0 = __p0; \
18472 uint16x4_t __ret; \
18473 __ret = (uint16x4_t) __builtin_neon_vqrshrn_n_v((int8x16_t)__s0, __p1, 17); \
18474 __ret; \
18475 })
18476 #else
18477 #define vqrshrn_n_u32(__p0, __p1) __extension__ ({ \
18478 uint32x4_t __s0 = __p0; \
18479 uint32x4_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \
18480 uint16x4_t __ret; \
18481 __ret = (uint16x4_t) __builtin_neon_vqrshrn_n_v((int8x16_t)__rev0, __p1, 17); \
18482 __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \
18483 __ret; \
18484 })
18485 #define __noswap_vqrshrn_n_u32(__p0, __p1) __extension__ ({ \
18486 uint32x4_t __s0 = __p0; \
18487 uint16x4_t __ret; \
18488 __ret = (uint16x4_t) __builtin_neon_vqrshrn_n_v((int8x16_t)__s0, __p1, 17); \
18489 __ret; \
18490 })
18491 #endif
18492
18493 #ifdef __LITTLE_ENDIAN__
18494 #define vqrshrn_n_u64(__p0, __p1) __extension__ ({ \
18495 uint64x2_t __s0 = __p0; \
18496 uint32x2_t __ret; \
18497 __ret = (uint32x2_t) __builtin_neon_vqrshrn_n_v((int8x16_t)__s0, __p1, 18); \
18498 __ret; \
18499 })
18500 #else
18501 #define vqrshrn_n_u64(__p0, __p1) __extension__ ({ \
18502 uint64x2_t __s0 = __p0; \
18503 uint64x2_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 1, 0); \
18504 uint32x2_t __ret; \
18505 __ret = (uint32x2_t) __builtin_neon_vqrshrn_n_v((int8x16_t)__rev0, __p1, 18); \
18506 __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \
18507 __ret; \
18508 })
18509 #define __noswap_vqrshrn_n_u64(__p0, __p1) __extension__ ({ \
18510 uint64x2_t __s0 = __p0; \
18511 uint32x2_t __ret; \
18512 __ret = (uint32x2_t) __builtin_neon_vqrshrn_n_v((int8x16_t)__s0, __p1, 18); \
18513 __ret; \
18514 })
18515 #endif
18516
18517 #ifdef __LITTLE_ENDIAN__
18518 #define vqrshrn_n_u16(__p0, __p1) __extension__ ({ \
18519 uint16x8_t __s0 = __p0; \
18520 uint8x8_t __ret; \
18521 __ret = (uint8x8_t) __builtin_neon_vqrshrn_n_v((int8x16_t)__s0, __p1, 16); \
18522 __ret; \
18523 })
18524 #else
18525 #define vqrshrn_n_u16(__p0, __p1) __extension__ ({ \
18526 uint16x8_t __s0 = __p0; \
18527 uint16x8_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 7, 6, 5, 4, 3, 2, 1, 0); \
18528 uint8x8_t __ret; \
18529 __ret = (uint8x8_t) __builtin_neon_vqrshrn_n_v((int8x16_t)__rev0, __p1, 16); \
18530 __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); \
18531 __ret; \
18532 })
18533 #define __noswap_vqrshrn_n_u16(__p0, __p1) __extension__ ({ \
18534 uint16x8_t __s0 = __p0; \
18535 uint8x8_t __ret; \
18536 __ret = (uint8x8_t) __builtin_neon_vqrshrn_n_v((int8x16_t)__s0, __p1, 16); \
18537 __ret; \
18538 })
18539 #endif
18540
18541 #ifdef __LITTLE_ENDIAN__
18542 #define vqrshrn_n_s32(__p0, __p1) __extension__ ({ \
18543 int32x4_t __s0 = __p0; \
18544 int16x4_t __ret; \
18545 __ret = (int16x4_t) __builtin_neon_vqrshrn_n_v((int8x16_t)__s0, __p1, 1); \
18546 __ret; \
18547 })
18548 #else
18549 #define vqrshrn_n_s32(__p0, __p1) __extension__ ({ \
18550 int32x4_t __s0 = __p0; \
18551 int32x4_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \
18552 int16x4_t __ret; \
18553 __ret = (int16x4_t) __builtin_neon_vqrshrn_n_v((int8x16_t)__rev0, __p1, 1); \
18554 __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \
18555 __ret; \
18556 })
18557 #define __noswap_vqrshrn_n_s32(__p0, __p1) __extension__ ({ \
18558 int32x4_t __s0 = __p0; \
18559 int16x4_t __ret; \
18560 __ret = (int16x4_t) __builtin_neon_vqrshrn_n_v((int8x16_t)__s0, __p1, 1); \
18561 __ret; \
18562 })
18563 #endif
18564
18565 #ifdef __LITTLE_ENDIAN__
18566 #define vqrshrn_n_s64(__p0, __p1) __extension__ ({ \
18567 int64x2_t __s0 = __p0; \
18568 int32x2_t __ret; \
18569 __ret = (int32x2_t) __builtin_neon_vqrshrn_n_v((int8x16_t)__s0, __p1, 2); \
18570 __ret; \
18571 })
18572 #else
18573 #define vqrshrn_n_s64(__p0, __p1) __extension__ ({ \
18574 int64x2_t __s0 = __p0; \
18575 int64x2_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 1, 0); \
18576 int32x2_t __ret; \
18577 __ret = (int32x2_t) __builtin_neon_vqrshrn_n_v((int8x16_t)__rev0, __p1, 2); \
18578 __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \
18579 __ret; \
18580 })
18581 #define __noswap_vqrshrn_n_s64(__p0, __p1) __extension__ ({ \
18582 int64x2_t __s0 = __p0; \
18583 int32x2_t __ret; \
18584 __ret = (int32x2_t) __builtin_neon_vqrshrn_n_v((int8x16_t)__s0, __p1, 2); \
18585 __ret; \
18586 })
18587 #endif
18588
18589 #ifdef __LITTLE_ENDIAN__
18590 #define vqrshrn_n_s16(__p0, __p1) __extension__ ({ \
18591 int16x8_t __s0 = __p0; \
18592 int8x8_t __ret; \
18593 __ret = (int8x8_t) __builtin_neon_vqrshrn_n_v((int8x16_t)__s0, __p1, 0); \
18594 __ret; \
18595 })
18596 #else
18597 #define vqrshrn_n_s16(__p0, __p1) __extension__ ({ \
18598 int16x8_t __s0 = __p0; \
18599 int16x8_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 7, 6, 5, 4, 3, 2, 1, 0); \
18600 int8x8_t __ret; \
18601 __ret = (int8x8_t) __builtin_neon_vqrshrn_n_v((int8x16_t)__rev0, __p1, 0); \
18602 __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); \
18603 __ret; \
18604 })
18605 #define __noswap_vqrshrn_n_s16(__p0, __p1) __extension__ ({ \
18606 int16x8_t __s0 = __p0; \
18607 int8x8_t __ret; \
18608 __ret = (int8x8_t) __builtin_neon_vqrshrn_n_v((int8x16_t)__s0, __p1, 0); \
18609 __ret; \
18610 })
18611 #endif
18612
18613 #ifdef __LITTLE_ENDIAN__
18614 #define vqrshrun_n_s32(__p0, __p1) __extension__ ({ \
18615 int32x4_t __s0 = __p0; \
18616 uint16x4_t __ret; \
18617 __ret = (uint16x4_t) __builtin_neon_vqrshrun_n_v((int8x16_t)__s0, __p1, 17); \
18618 __ret; \
18619 })
18620 #else
18621 #define vqrshrun_n_s32(__p0, __p1) __extension__ ({ \
18622 int32x4_t __s0 = __p0; \
18623 int32x4_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \
18624 uint16x4_t __ret; \
18625 __ret = (uint16x4_t) __builtin_neon_vqrshrun_n_v((int8x16_t)__rev0, __p1, 17); \
18626 __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \
18627 __ret; \
18628 })
18629 #define __noswap_vqrshrun_n_s32(__p0, __p1) __extension__ ({ \
18630 int32x4_t __s0 = __p0; \
18631 uint16x4_t __ret; \
18632 __ret = (uint16x4_t) __builtin_neon_vqrshrun_n_v((int8x16_t)__s0, __p1, 17); \
18633 __ret; \
18634 })
18635 #endif
18636
18637 #ifdef __LITTLE_ENDIAN__
18638 #define vqrshrun_n_s64(__p0, __p1) __extension__ ({ \
18639 int64x2_t __s0 = __p0; \
18640 uint32x2_t __ret; \
18641 __ret = (uint32x2_t) __builtin_neon_vqrshrun_n_v((int8x16_t)__s0, __p1, 18); \
18642 __ret; \
18643 })
18644 #else
18645 #define vqrshrun_n_s64(__p0, __p1) __extension__ ({ \
18646 int64x2_t __s0 = __p0; \
18647 int64x2_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 1, 0); \
18648 uint32x2_t __ret; \
18649 __ret = (uint32x2_t) __builtin_neon_vqrshrun_n_v((int8x16_t)__rev0, __p1, 18); \
18650 __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \
18651 __ret; \
18652 })
18653 #define __noswap_vqrshrun_n_s64(__p0, __p1) __extension__ ({ \
18654 int64x2_t __s0 = __p0; \
18655 uint32x2_t __ret; \
18656 __ret = (uint32x2_t) __builtin_neon_vqrshrun_n_v((int8x16_t)__s0, __p1, 18); \
18657 __ret; \
18658 })
18659 #endif
18660
18661 #ifdef __LITTLE_ENDIAN__
18662 #define vqrshrun_n_s16(__p0, __p1) __extension__ ({ \
18663 int16x8_t __s0 = __p0; \
18664 uint8x8_t __ret; \
18665 __ret = (uint8x8_t) __builtin_neon_vqrshrun_n_v((int8x16_t)__s0, __p1, 16); \
18666 __ret; \
18667 })
18668 #else
18669 #define vqrshrun_n_s16(__p0, __p1) __extension__ ({ \
18670 int16x8_t __s0 = __p0; \
18671 int16x8_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 7, 6, 5, 4, 3, 2, 1, 0); \
18672 uint8x8_t __ret; \
18673 __ret = (uint8x8_t) __builtin_neon_vqrshrun_n_v((int8x16_t)__rev0, __p1, 16); \
18674 __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); \
18675 __ret; \
18676 })
18677 #define __noswap_vqrshrun_n_s16(__p0, __p1) __extension__ ({ \
18678 int16x8_t __s0 = __p0; \
18679 uint8x8_t __ret; \
18680 __ret = (uint8x8_t) __builtin_neon_vqrshrun_n_v((int8x16_t)__s0, __p1, 16); \
18681 __ret; \
18682 })
18683 #endif
18684
18685 #ifdef __LITTLE_ENDIAN__
18686 __ai uint8x16_t vqshlq_u8(uint8x16_t __p0, int8x16_t __p1) {
18687 uint8x16_t __ret;
18688 __ret = (uint8x16_t) __builtin_neon_vqshlq_v((int8x16_t)__p0, (int8x16_t)__p1, 48);
18689 return __ret;
18690 }
18691 #else
18692 __ai uint8x16_t vqshlq_u8(uint8x16_t __p0, int8x16_t __p1) {
18693 uint8x16_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
18694 int8x16_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
18695 uint8x16_t __ret;
18696 __ret = (uint8x16_t) __builtin_neon_vqshlq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 48);
18697 __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
18698 return __ret;
18699 }
18700 #endif
18701
18702 #ifdef __LITTLE_ENDIAN__
18703 __ai uint32x4_t vqshlq_u32(uint32x4_t __p0, int32x4_t __p1) {
18704 uint32x4_t __ret;
18705 __ret = (uint32x4_t) __builtin_neon_vqshlq_v((int8x16_t)__p0, (int8x16_t)__p1, 50);
18706 return __ret;
18707 }
18708 #else
18709 __ai uint32x4_t vqshlq_u32(uint32x4_t __p0, int32x4_t __p1) {
18710 uint32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
18711 int32x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
18712 uint32x4_t __ret;
18713 __ret = (uint32x4_t) __builtin_neon_vqshlq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 50);
18714 __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
18715 return __ret;
18716 }
18717 #endif
18718
18719 #ifdef __LITTLE_ENDIAN__
18720 __ai uint64x2_t vqshlq_u64(uint64x2_t __p0, int64x2_t __p1) {
18721 uint64x2_t __ret;
18722 __ret = (uint64x2_t) __builtin_neon_vqshlq_v((int8x16_t)__p0, (int8x16_t)__p1, 51);
18723 return __ret;
18724 }
18725 #else
18726 __ai uint64x2_t vqshlq_u64(uint64x2_t __p0, int64x2_t __p1) {
18727 uint64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
18728 int64x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
18729 uint64x2_t __ret;
18730 __ret = (uint64x2_t) __builtin_neon_vqshlq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 51);
18731 __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
18732 return __ret;
18733 }
18734 #endif
18735
18736 #ifdef __LITTLE_ENDIAN__
18737 __ai uint16x8_t vqshlq_u16(uint16x8_t __p0, int16x8_t __p1) {
18738 uint16x8_t __ret;
18739 __ret = (uint16x8_t) __builtin_neon_vqshlq_v((int8x16_t)__p0, (int8x16_t)__p1, 49);
18740 return __ret;
18741 }
18742 #else
18743 __ai uint16x8_t vqshlq_u16(uint16x8_t __p0, int16x8_t __p1) {
18744 uint16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
18745 int16x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
18746 uint16x8_t __ret;
18747 __ret = (uint16x8_t) __builtin_neon_vqshlq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 49);
18748 __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
18749 return __ret;
18750 }
18751 #endif
18752
18753 #ifdef __LITTLE_ENDIAN__
18754 __ai int8x16_t vqshlq_s8(int8x16_t __p0, int8x16_t __p1) {
18755 int8x16_t __ret;
18756 __ret = (int8x16_t) __builtin_neon_vqshlq_v((int8x16_t)__p0, (int8x16_t)__p1, 32);
18757 return __ret;
18758 }
18759 #else
18760 __ai int8x16_t vqshlq_s8(int8x16_t __p0, int8x16_t __p1) {
18761 int8x16_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
18762 int8x16_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
18763 int8x16_t __ret;
18764 __ret = (int8x16_t) __builtin_neon_vqshlq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 32);
18765 __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
18766 return __ret;
18767 }
18768 #endif
18769
18770 #ifdef __LITTLE_ENDIAN__
18771 __ai int32x4_t vqshlq_s32(int32x4_t __p0, int32x4_t __p1) {
18772 int32x4_t __ret;
18773 __ret = (int32x4_t) __builtin_neon_vqshlq_v((int8x16_t)__p0, (int8x16_t)__p1, 34);
18774 return __ret;
18775 }
18776 #else
18777 __ai int32x4_t vqshlq_s32(int32x4_t __p0, int32x4_t __p1) {
18778 int32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
18779 int32x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
18780 int32x4_t __ret;
18781 __ret = (int32x4_t) __builtin_neon_vqshlq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 34);
18782 __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
18783 return __ret;
18784 }
18785 #endif
18786
18787 #ifdef __LITTLE_ENDIAN__
18788 __ai int64x2_t vqshlq_s64(int64x2_t __p0, int64x2_t __p1) {
18789 int64x2_t __ret;
18790 __ret = (int64x2_t) __builtin_neon_vqshlq_v((int8x16_t)__p0, (int8x16_t)__p1, 35);
18791 return __ret;
18792 }
18793 #else
18794 __ai int64x2_t vqshlq_s64(int64x2_t __p0, int64x2_t __p1) {
18795 int64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
18796 int64x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
18797 int64x2_t __ret;
18798 __ret = (int64x2_t) __builtin_neon_vqshlq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 35);
18799 __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
18800 return __ret;
18801 }
18802 #endif
18803
18804 #ifdef __LITTLE_ENDIAN__
18805 __ai int16x8_t vqshlq_s16(int16x8_t __p0, int16x8_t __p1) {
18806 int16x8_t __ret;
18807 __ret = (int16x8_t) __builtin_neon_vqshlq_v((int8x16_t)__p0, (int8x16_t)__p1, 33);
18808 return __ret;
18809 }
18810 #else
18811 __ai int16x8_t vqshlq_s16(int16x8_t __p0, int16x8_t __p1) {
18812 int16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
18813 int16x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
18814 int16x8_t __ret;
18815 __ret = (int16x8_t) __builtin_neon_vqshlq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 33);
18816 __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
18817 return __ret;
18818 }
18819 #endif
18820
18821 #ifdef __LITTLE_ENDIAN__
18822 __ai uint8x8_t vqshl_u8(uint8x8_t __p0, int8x8_t __p1) {
18823 uint8x8_t __ret;
18824 __ret = (uint8x8_t) __builtin_neon_vqshl_v((int8x8_t)__p0, (int8x8_t)__p1, 16);
18825 return __ret;
18826 }
18827 #else
18828 __ai uint8x8_t vqshl_u8(uint8x8_t __p0, int8x8_t __p1) {
18829 uint8x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
18830 int8x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
18831 uint8x8_t __ret;
18832 __ret = (uint8x8_t) __builtin_neon_vqshl_v((int8x8_t)__rev0, (int8x8_t)__rev1, 16);
18833 __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
18834 return __ret;
18835 }
18836 #endif
18837
18838 #ifdef __LITTLE_ENDIAN__
18839 __ai uint32x2_t vqshl_u32(uint32x2_t __p0, int32x2_t __p1) {
18840 uint32x2_t __ret;
18841 __ret = (uint32x2_t) __builtin_neon_vqshl_v((int8x8_t)__p0, (int8x8_t)__p1, 18);
18842 return __ret;
18843 }
18844 #else
18845 __ai uint32x2_t vqshl_u32(uint32x2_t __p0, int32x2_t __p1) {
18846 uint32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
18847 int32x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
18848 uint32x2_t __ret;
18849 __ret = (uint32x2_t) __builtin_neon_vqshl_v((int8x8_t)__rev0, (int8x8_t)__rev1, 18);
18850 __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
18851 return __ret;
18852 }
18853 #endif
18854
18855 #ifdef __LITTLE_ENDIAN__
18856 __ai uint64x1_t vqshl_u64(uint64x1_t __p0, int64x1_t __p1) {
18857 uint64x1_t __ret;
18858 __ret = (uint64x1_t) __builtin_neon_vqshl_v((int8x8_t)__p0, (int8x8_t)__p1, 19);
18859 return __ret;
18860 }
18861 #else
18862 __ai uint64x1_t vqshl_u64(uint64x1_t __p0, int64x1_t __p1) {
18863 uint64x1_t __ret;
18864 __ret = (uint64x1_t) __builtin_neon_vqshl_v((int8x8_t)__p0, (int8x8_t)__p1, 19);
18865 return __ret;
18866 }
18867 #endif
18868
18869 #ifdef __LITTLE_ENDIAN__
18870 __ai uint16x4_t vqshl_u16(uint16x4_t __p0, int16x4_t __p1) {
18871 uint16x4_t __ret;
18872 __ret = (uint16x4_t) __builtin_neon_vqshl_v((int8x8_t)__p0, (int8x8_t)__p1, 17);
18873 return __ret;
18874 }
18875 #else
18876 __ai uint16x4_t vqshl_u16(uint16x4_t __p0, int16x4_t __p1) {
18877 uint16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
18878 int16x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
18879 uint16x4_t __ret;
18880 __ret = (uint16x4_t) __builtin_neon_vqshl_v((int8x8_t)__rev0, (int8x8_t)__rev1, 17);
18881 __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
18882 return __ret;
18883 }
18884 #endif
18885
18886 #ifdef __LITTLE_ENDIAN__
18887 __ai int8x8_t vqshl_s8(int8x8_t __p0, int8x8_t __p1) {
18888 int8x8_t __ret;
18889 __ret = (int8x8_t) __builtin_neon_vqshl_v((int8x8_t)__p0, (int8x8_t)__p1, 0);
18890 return __ret;
18891 }
18892 #else
18893 __ai int8x8_t vqshl_s8(int8x8_t __p0, int8x8_t __p1) {
18894 int8x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
18895 int8x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
18896 int8x8_t __ret;
18897 __ret = (int8x8_t) __builtin_neon_vqshl_v((int8x8_t)__rev0, (int8x8_t)__rev1, 0);
18898 __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
18899 return __ret;
18900 }
18901 #endif
18902
18903 #ifdef __LITTLE_ENDIAN__
18904 __ai int32x2_t vqshl_s32(int32x2_t __p0, int32x2_t __p1) {
18905 int32x2_t __ret;
18906 __ret = (int32x2_t) __builtin_neon_vqshl_v((int8x8_t)__p0, (int8x8_t)__p1, 2);
18907 return __ret;
18908 }
18909 #else
18910 __ai int32x2_t vqshl_s32(int32x2_t __p0, int32x2_t __p1) {
18911 int32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
18912 int32x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
18913 int32x2_t __ret;
18914 __ret = (int32x2_t) __builtin_neon_vqshl_v((int8x8_t)__rev0, (int8x8_t)__rev1, 2);
18915 __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
18916 return __ret;
18917 }
18918 #endif
18919
18920 #ifdef __LITTLE_ENDIAN__
18921 __ai int64x1_t vqshl_s64(int64x1_t __p0, int64x1_t __p1) {
18922 int64x1_t __ret;
18923 __ret = (int64x1_t) __builtin_neon_vqshl_v((int8x8_t)__p0, (int8x8_t)__p1, 3);
18924 return __ret;
18925 }
18926 #else
18927 __ai int64x1_t vqshl_s64(int64x1_t __p0, int64x1_t __p1) {
18928 int64x1_t __ret;
18929 __ret = (int64x1_t) __builtin_neon_vqshl_v((int8x8_t)__p0, (int8x8_t)__p1, 3);
18930 return __ret;
18931 }
18932 #endif
18933
18934 #ifdef __LITTLE_ENDIAN__
18935 __ai int16x4_t vqshl_s16(int16x4_t __p0, int16x4_t __p1) {
18936 int16x4_t __ret;
18937 __ret = (int16x4_t) __builtin_neon_vqshl_v((int8x8_t)__p0, (int8x8_t)__p1, 1);
18938 return __ret;
18939 }
18940 #else
18941 __ai int16x4_t vqshl_s16(int16x4_t __p0, int16x4_t __p1) {
18942 int16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
18943 int16x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
18944 int16x4_t __ret;
18945 __ret = (int16x4_t) __builtin_neon_vqshl_v((int8x8_t)__rev0, (int8x8_t)__rev1, 1);
18946 __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
18947 return __ret;
18948 }
18949 #endif
18950
18951 #ifdef __LITTLE_ENDIAN__
18952 #define vqshlq_n_u8(__p0, __p1) __extension__ ({ \
18953 uint8x16_t __s0 = __p0; \
18954 uint8x16_t __ret; \
18955 __ret = (uint8x16_t) __builtin_neon_vqshlq_n_v((int8x16_t)__s0, __p1, 48); \
18956 __ret; \
18957 })
18958 #else
18959 #define vqshlq_n_u8(__p0, __p1) __extension__ ({ \
18960 uint8x16_t __s0 = __p0; \
18961 uint8x16_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
18962 uint8x16_t __ret; \
18963 __ret = (uint8x16_t) __builtin_neon_vqshlq_n_v((int8x16_t)__rev0, __p1, 48); \
18964 __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
18965 __ret; \
18966 })
18967 #endif
18968
18969 #ifdef __LITTLE_ENDIAN__
18970 #define vqshlq_n_u32(__p0, __p1) __extension__ ({ \
18971 uint32x4_t __s0 = __p0; \
18972 uint32x4_t __ret; \
18973 __ret = (uint32x4_t) __builtin_neon_vqshlq_n_v((int8x16_t)__s0, __p1, 50); \
18974 __ret; \
18975 })
18976 #else
18977 #define vqshlq_n_u32(__p0, __p1) __extension__ ({ \
18978 uint32x4_t __s0 = __p0; \
18979 uint32x4_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \
18980 uint32x4_t __ret; \
18981 __ret = (uint32x4_t) __builtin_neon_vqshlq_n_v((int8x16_t)__rev0, __p1, 50); \
18982 __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \
18983 __ret; \
18984 })
18985 #endif
18986
18987 #ifdef __LITTLE_ENDIAN__
18988 #define vqshlq_n_u64(__p0, __p1) __extension__ ({ \
18989 uint64x2_t __s0 = __p0; \
18990 uint64x2_t __ret; \
18991 __ret = (uint64x2_t) __builtin_neon_vqshlq_n_v((int8x16_t)__s0, __p1, 51); \
18992 __ret; \
18993 })
18994 #else
18995 #define vqshlq_n_u64(__p0, __p1) __extension__ ({ \
18996 uint64x2_t __s0 = __p0; \
18997 uint64x2_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 1, 0); \
18998 uint64x2_t __ret; \
18999 __ret = (uint64x2_t) __builtin_neon_vqshlq_n_v((int8x16_t)__rev0, __p1, 51); \
19000 __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \
19001 __ret; \
19002 })
19003 #endif
19004
19005 #ifdef __LITTLE_ENDIAN__
19006 #define vqshlq_n_u16(__p0, __p1) __extension__ ({ \
19007 uint16x8_t __s0 = __p0; \
19008 uint16x8_t __ret; \
19009 __ret = (uint16x8_t) __builtin_neon_vqshlq_n_v((int8x16_t)__s0, __p1, 49); \
19010 __ret; \
19011 })
19012 #else
19013 #define vqshlq_n_u16(__p0, __p1) __extension__ ({ \
19014 uint16x8_t __s0 = __p0; \
19015 uint16x8_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 7, 6, 5, 4, 3, 2, 1, 0); \
19016 uint16x8_t __ret; \
19017 __ret = (uint16x8_t) __builtin_neon_vqshlq_n_v((int8x16_t)__rev0, __p1, 49); \
19018 __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); \
19019 __ret; \
19020 })
19021 #endif
19022
19023 #ifdef __LITTLE_ENDIAN__
19024 #define vqshlq_n_s8(__p0, __p1) __extension__ ({ \
19025 int8x16_t __s0 = __p0; \
19026 int8x16_t __ret; \
19027 __ret = (int8x16_t) __builtin_neon_vqshlq_n_v((int8x16_t)__s0, __p1, 32); \
19028 __ret; \
19029 })
19030 #else
19031 #define vqshlq_n_s8(__p0, __p1) __extension__ ({ \
19032 int8x16_t __s0 = __p0; \
19033 int8x16_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
19034 int8x16_t __ret; \
19035 __ret = (int8x16_t) __builtin_neon_vqshlq_n_v((int8x16_t)__rev0, __p1, 32); \
19036 __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
19037 __ret; \
19038 })
19039 #endif
19040
19041 #ifdef __LITTLE_ENDIAN__
19042 #define vqshlq_n_s32(__p0, __p1) __extension__ ({ \
19043 int32x4_t __s0 = __p0; \
19044 int32x4_t __ret; \
19045 __ret = (int32x4_t) __builtin_neon_vqshlq_n_v((int8x16_t)__s0, __p1, 34); \
19046 __ret; \
19047 })
19048 #else
19049 #define vqshlq_n_s32(__p0, __p1) __extension__ ({ \
19050 int32x4_t __s0 = __p0; \
19051 int32x4_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \
19052 int32x4_t __ret; \
19053 __ret = (int32x4_t) __builtin_neon_vqshlq_n_v((int8x16_t)__rev0, __p1, 34); \
19054 __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \
19055 __ret; \
19056 })
19057 #endif
19058
19059 #ifdef __LITTLE_ENDIAN__
19060 #define vqshlq_n_s64(__p0, __p1) __extension__ ({ \
19061 int64x2_t __s0 = __p0; \
19062 int64x2_t __ret; \
19063 __ret = (int64x2_t) __builtin_neon_vqshlq_n_v((int8x16_t)__s0, __p1, 35); \
19064 __ret; \
19065 })
19066 #else
19067 #define vqshlq_n_s64(__p0, __p1) __extension__ ({ \
19068 int64x2_t __s0 = __p0; \
19069 int64x2_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 1, 0); \
19070 int64x2_t __ret; \
19071 __ret = (int64x2_t) __builtin_neon_vqshlq_n_v((int8x16_t)__rev0, __p1, 35); \
19072 __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \
19073 __ret; \
19074 })
19075 #endif
19076
19077 #ifdef __LITTLE_ENDIAN__
19078 #define vqshlq_n_s16(__p0, __p1) __extension__ ({ \
19079 int16x8_t __s0 = __p0; \
19080 int16x8_t __ret; \
19081 __ret = (int16x8_t) __builtin_neon_vqshlq_n_v((int8x16_t)__s0, __p1, 33); \
19082 __ret; \
19083 })
19084 #else
19085 #define vqshlq_n_s16(__p0, __p1) __extension__ ({ \
19086 int16x8_t __s0 = __p0; \
19087 int16x8_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 7, 6, 5, 4, 3, 2, 1, 0); \
19088 int16x8_t __ret; \
19089 __ret = (int16x8_t) __builtin_neon_vqshlq_n_v((int8x16_t)__rev0, __p1, 33); \
19090 __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); \
19091 __ret; \
19092 })
19093 #endif
19094
19095 #ifdef __LITTLE_ENDIAN__
19096 #define vqshl_n_u8(__p0, __p1) __extension__ ({ \
19097 uint8x8_t __s0 = __p0; \
19098 uint8x8_t __ret; \
19099 __ret = (uint8x8_t) __builtin_neon_vqshl_n_v((int8x8_t)__s0, __p1, 16); \
19100 __ret; \
19101 })
19102 #else
19103 #define vqshl_n_u8(__p0, __p1) __extension__ ({ \
19104 uint8x8_t __s0 = __p0; \
19105 uint8x8_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 7, 6, 5, 4, 3, 2, 1, 0); \
19106 uint8x8_t __ret; \
19107 __ret = (uint8x8_t) __builtin_neon_vqshl_n_v((int8x8_t)__rev0, __p1, 16); \
19108 __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); \
19109 __ret; \
19110 })
19111 #endif
19112
19113 #ifdef __LITTLE_ENDIAN__
19114 #define vqshl_n_u32(__p0, __p1) __extension__ ({ \
19115 uint32x2_t __s0 = __p0; \
19116 uint32x2_t __ret; \
19117 __ret = (uint32x2_t) __builtin_neon_vqshl_n_v((int8x8_t)__s0, __p1, 18); \
19118 __ret; \
19119 })
19120 #else
19121 #define vqshl_n_u32(__p0, __p1) __extension__ ({ \
19122 uint32x2_t __s0 = __p0; \
19123 uint32x2_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 1, 0); \
19124 uint32x2_t __ret; \
19125 __ret = (uint32x2_t) __builtin_neon_vqshl_n_v((int8x8_t)__rev0, __p1, 18); \
19126 __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \
19127 __ret; \
19128 })
19129 #endif
19130
19131 #ifdef __LITTLE_ENDIAN__
19132 #define vqshl_n_u64(__p0, __p1) __extension__ ({ \
19133 uint64x1_t __s0 = __p0; \
19134 uint64x1_t __ret; \
19135 __ret = (uint64x1_t) __builtin_neon_vqshl_n_v((int8x8_t)__s0, __p1, 19); \
19136 __ret; \
19137 })
19138 #else
19139 #define vqshl_n_u64(__p0, __p1) __extension__ ({ \
19140 uint64x1_t __s0 = __p0; \
19141 uint64x1_t __ret; \
19142 __ret = (uint64x1_t) __builtin_neon_vqshl_n_v((int8x8_t)__s0, __p1, 19); \
19143 __ret; \
19144 })
19145 #endif
19146
19147 #ifdef __LITTLE_ENDIAN__
19148 #define vqshl_n_u16(__p0, __p1) __extension__ ({ \
19149 uint16x4_t __s0 = __p0; \
19150 uint16x4_t __ret; \
19151 __ret = (uint16x4_t) __builtin_neon_vqshl_n_v((int8x8_t)__s0, __p1, 17); \
19152 __ret; \
19153 })
19154 #else
19155 #define vqshl_n_u16(__p0, __p1) __extension__ ({ \
19156 uint16x4_t __s0 = __p0; \
19157 uint16x4_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \
19158 uint16x4_t __ret; \
19159 __ret = (uint16x4_t) __builtin_neon_vqshl_n_v((int8x8_t)__rev0, __p1, 17); \
19160 __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \
19161 __ret; \
19162 })
19163 #endif
19164
19165 #ifdef __LITTLE_ENDIAN__
19166 #define vqshl_n_s8(__p0, __p1) __extension__ ({ \
19167 int8x8_t __s0 = __p0; \
19168 int8x8_t __ret; \
19169 __ret = (int8x8_t) __builtin_neon_vqshl_n_v((int8x8_t)__s0, __p1, 0); \
19170 __ret; \
19171 })
19172 #else
19173 #define vqshl_n_s8(__p0, __p1) __extension__ ({ \
19174 int8x8_t __s0 = __p0; \
19175 int8x8_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 7, 6, 5, 4, 3, 2, 1, 0); \
19176 int8x8_t __ret; \
19177 __ret = (int8x8_t) __builtin_neon_vqshl_n_v((int8x8_t)__rev0, __p1, 0); \
19178 __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); \
19179 __ret; \
19180 })
19181 #endif
19182
19183 #ifdef __LITTLE_ENDIAN__
19184 #define vqshl_n_s32(__p0, __p1) __extension__ ({ \
19185 int32x2_t __s0 = __p0; \
19186 int32x2_t __ret; \
19187 __ret = (int32x2_t) __builtin_neon_vqshl_n_v((int8x8_t)__s0, __p1, 2); \
19188 __ret; \
19189 })
19190 #else
19191 #define vqshl_n_s32(__p0, __p1) __extension__ ({ \
19192 int32x2_t __s0 = __p0; \
19193 int32x2_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 1, 0); \
19194 int32x2_t __ret; \
19195 __ret = (int32x2_t) __builtin_neon_vqshl_n_v((int8x8_t)__rev0, __p1, 2); \
19196 __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \
19197 __ret; \
19198 })
19199 #endif
19200
19201 #ifdef __LITTLE_ENDIAN__
19202 #define vqshl_n_s64(__p0, __p1) __extension__ ({ \
19203 int64x1_t __s0 = __p0; \
19204 int64x1_t __ret; \
19205 __ret = (int64x1_t) __builtin_neon_vqshl_n_v((int8x8_t)__s0, __p1, 3); \
19206 __ret; \
19207 })
19208 #else
19209 #define vqshl_n_s64(__p0, __p1) __extension__ ({ \
19210 int64x1_t __s0 = __p0; \
19211 int64x1_t __ret; \
19212 __ret = (int64x1_t) __builtin_neon_vqshl_n_v((int8x8_t)__s0, __p1, 3); \
19213 __ret; \
19214 })
19215 #endif
19216
19217 #ifdef __LITTLE_ENDIAN__
19218 #define vqshl_n_s16(__p0, __p1) __extension__ ({ \
19219 int16x4_t __s0 = __p0; \
19220 int16x4_t __ret; \
19221 __ret = (int16x4_t) __builtin_neon_vqshl_n_v((int8x8_t)__s0, __p1, 1); \
19222 __ret; \
19223 })
19224 #else
19225 #define vqshl_n_s16(__p0, __p1) __extension__ ({ \
19226 int16x4_t __s0 = __p0; \
19227 int16x4_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \
19228 int16x4_t __ret; \
19229 __ret = (int16x4_t) __builtin_neon_vqshl_n_v((int8x8_t)__rev0, __p1, 1); \
19230 __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \
19231 __ret; \
19232 })
19233 #endif
19234
19235 #ifdef __LITTLE_ENDIAN__
19236 #define vqshluq_n_s8(__p0, __p1) __extension__ ({ \
19237 int8x16_t __s0 = __p0; \
19238 uint8x16_t __ret; \
19239 __ret = (uint8x16_t) __builtin_neon_vqshluq_n_v((int8x16_t)__s0, __p1, 48); \
19240 __ret; \
19241 })
19242 #else
19243 #define vqshluq_n_s8(__p0, __p1) __extension__ ({ \
19244 int8x16_t __s0 = __p0; \
19245 int8x16_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
19246 uint8x16_t __ret; \
19247 __ret = (uint8x16_t) __builtin_neon_vqshluq_n_v((int8x16_t)__rev0, __p1, 48); \
19248 __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
19249 __ret; \
19250 })
19251 #endif
19252
19253 #ifdef __LITTLE_ENDIAN__
19254 #define vqshluq_n_s32(__p0, __p1) __extension__ ({ \
19255 int32x4_t __s0 = __p0; \
19256 uint32x4_t __ret; \
19257 __ret = (uint32x4_t) __builtin_neon_vqshluq_n_v((int8x16_t)__s0, __p1, 50); \
19258 __ret; \
19259 })
19260 #else
19261 #define vqshluq_n_s32(__p0, __p1) __extension__ ({ \
19262 int32x4_t __s0 = __p0; \
19263 int32x4_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \
19264 uint32x4_t __ret; \
19265 __ret = (uint32x4_t) __builtin_neon_vqshluq_n_v((int8x16_t)__rev0, __p1, 50); \
19266 __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \
19267 __ret; \
19268 })
19269 #endif
19270
19271 #ifdef __LITTLE_ENDIAN__
19272 #define vqshluq_n_s64(__p0, __p1) __extension__ ({ \
19273 int64x2_t __s0 = __p0; \
19274 uint64x2_t __ret; \
19275 __ret = (uint64x2_t) __builtin_neon_vqshluq_n_v((int8x16_t)__s0, __p1, 51); \
19276 __ret; \
19277 })
19278 #else
19279 #define vqshluq_n_s64(__p0, __p1) __extension__ ({ \
19280 int64x2_t __s0 = __p0; \
19281 int64x2_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 1, 0); \
19282 uint64x2_t __ret; \
19283 __ret = (uint64x2_t) __builtin_neon_vqshluq_n_v((int8x16_t)__rev0, __p1, 51); \
19284 __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \
19285 __ret; \
19286 })
19287 #endif
19288
19289 #ifdef __LITTLE_ENDIAN__
19290 #define vqshluq_n_s16(__p0, __p1) __extension__ ({ \
19291 int16x8_t __s0 = __p0; \
19292 uint16x8_t __ret; \
19293 __ret = (uint16x8_t) __builtin_neon_vqshluq_n_v((int8x16_t)__s0, __p1, 49); \
19294 __ret; \
19295 })
19296 #else
19297 #define vqshluq_n_s16(__p0, __p1) __extension__ ({ \
19298 int16x8_t __s0 = __p0; \
19299 int16x8_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 7, 6, 5, 4, 3, 2, 1, 0); \
19300 uint16x8_t __ret; \
19301 __ret = (uint16x8_t) __builtin_neon_vqshluq_n_v((int8x16_t)__rev0, __p1, 49); \
19302 __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); \
19303 __ret; \
19304 })
19305 #endif
19306
19307 #ifdef __LITTLE_ENDIAN__
19308 #define vqshlu_n_s8(__p0, __p1) __extension__ ({ \
19309 int8x8_t __s0 = __p0; \
19310 uint8x8_t __ret; \
19311 __ret = (uint8x8_t) __builtin_neon_vqshlu_n_v((int8x8_t)__s0, __p1, 16); \
19312 __ret; \
19313 })
19314 #else
19315 #define vqshlu_n_s8(__p0, __p1) __extension__ ({ \
19316 int8x8_t __s0 = __p0; \
19317 int8x8_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 7, 6, 5, 4, 3, 2, 1, 0); \
19318 uint8x8_t __ret; \
19319 __ret = (uint8x8_t) __builtin_neon_vqshlu_n_v((int8x8_t)__rev0, __p1, 16); \
19320 __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); \
19321 __ret; \
19322 })
19323 #endif
19324
19325 #ifdef __LITTLE_ENDIAN__
19326 #define vqshlu_n_s32(__p0, __p1) __extension__ ({ \
19327 int32x2_t __s0 = __p0; \
19328 uint32x2_t __ret; \
19329 __ret = (uint32x2_t) __builtin_neon_vqshlu_n_v((int8x8_t)__s0, __p1, 18); \
19330 __ret; \
19331 })
19332 #else
19333 #define vqshlu_n_s32(__p0, __p1) __extension__ ({ \
19334 int32x2_t __s0 = __p0; \
19335 int32x2_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 1, 0); \
19336 uint32x2_t __ret; \
19337 __ret = (uint32x2_t) __builtin_neon_vqshlu_n_v((int8x8_t)__rev0, __p1, 18); \
19338 __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \
19339 __ret; \
19340 })
19341 #endif
19342
19343 #ifdef __LITTLE_ENDIAN__
19344 #define vqshlu_n_s64(__p0, __p1) __extension__ ({ \
19345 int64x1_t __s0 = __p0; \
19346 uint64x1_t __ret; \
19347 __ret = (uint64x1_t) __builtin_neon_vqshlu_n_v((int8x8_t)__s0, __p1, 19); \
19348 __ret; \
19349 })
19350 #else
19351 #define vqshlu_n_s64(__p0, __p1) __extension__ ({ \
19352 int64x1_t __s0 = __p0; \
19353 uint64x1_t __ret; \
19354 __ret = (uint64x1_t) __builtin_neon_vqshlu_n_v((int8x8_t)__s0, __p1, 19); \
19355 __ret; \
19356 })
19357 #endif
19358
19359 #ifdef __LITTLE_ENDIAN__
19360 #define vqshlu_n_s16(__p0, __p1) __extension__ ({ \
19361 int16x4_t __s0 = __p0; \
19362 uint16x4_t __ret; \
19363 __ret = (uint16x4_t) __builtin_neon_vqshlu_n_v((int8x8_t)__s0, __p1, 17); \
19364 __ret; \
19365 })
19366 #else
19367 #define vqshlu_n_s16(__p0, __p1) __extension__ ({ \
19368 int16x4_t __s0 = __p0; \
19369 int16x4_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \
19370 uint16x4_t __ret; \
19371 __ret = (uint16x4_t) __builtin_neon_vqshlu_n_v((int8x8_t)__rev0, __p1, 17); \
19372 __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \
19373 __ret; \
19374 })
19375 #endif
19376
19377 #ifdef __LITTLE_ENDIAN__
19378 #define vqshrn_n_u32(__p0, __p1) __extension__ ({ \
19379 uint32x4_t __s0 = __p0; \
19380 uint16x4_t __ret; \
19381 __ret = (uint16x4_t) __builtin_neon_vqshrn_n_v((int8x16_t)__s0, __p1, 17); \
19382 __ret; \
19383 })
19384 #else
19385 #define vqshrn_n_u32(__p0, __p1) __extension__ ({ \
19386 uint32x4_t __s0 = __p0; \
19387 uint32x4_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \
19388 uint16x4_t __ret; \
19389 __ret = (uint16x4_t) __builtin_neon_vqshrn_n_v((int8x16_t)__rev0, __p1, 17); \
19390 __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \
19391 __ret; \
19392 })
19393 #define __noswap_vqshrn_n_u32(__p0, __p1) __extension__ ({ \
19394 uint32x4_t __s0 = __p0; \
19395 uint16x4_t __ret; \
19396 __ret = (uint16x4_t) __builtin_neon_vqshrn_n_v((int8x16_t)__s0, __p1, 17); \
19397 __ret; \
19398 })
19399 #endif
19400
19401 #ifdef __LITTLE_ENDIAN__
19402 #define vqshrn_n_u64(__p0, __p1) __extension__ ({ \
19403 uint64x2_t __s0 = __p0; \
19404 uint32x2_t __ret; \
19405 __ret = (uint32x2_t) __builtin_neon_vqshrn_n_v((int8x16_t)__s0, __p1, 18); \
19406 __ret; \
19407 })
19408 #else
19409 #define vqshrn_n_u64(__p0, __p1) __extension__ ({ \
19410 uint64x2_t __s0 = __p0; \
19411 uint64x2_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 1, 0); \
19412 uint32x2_t __ret; \
19413 __ret = (uint32x2_t) __builtin_neon_vqshrn_n_v((int8x16_t)__rev0, __p1, 18); \
19414 __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \
19415 __ret; \
19416 })
19417 #define __noswap_vqshrn_n_u64(__p0, __p1) __extension__ ({ \
19418 uint64x2_t __s0 = __p0; \
19419 uint32x2_t __ret; \
19420 __ret = (uint32x2_t) __builtin_neon_vqshrn_n_v((int8x16_t)__s0, __p1, 18); \
19421 __ret; \
19422 })
19423 #endif
19424
19425 #ifdef __LITTLE_ENDIAN__
19426 #define vqshrn_n_u16(__p0, __p1) __extension__ ({ \
19427 uint16x8_t __s0 = __p0; \
19428 uint8x8_t __ret; \
19429 __ret = (uint8x8_t) __builtin_neon_vqshrn_n_v((int8x16_t)__s0, __p1, 16); \
19430 __ret; \
19431 })
19432 #else
19433 #define vqshrn_n_u16(__p0, __p1) __extension__ ({ \
19434 uint16x8_t __s0 = __p0; \
19435 uint16x8_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 7, 6, 5, 4, 3, 2, 1, 0); \
19436 uint8x8_t __ret; \
19437 __ret = (uint8x8_t) __builtin_neon_vqshrn_n_v((int8x16_t)__rev0, __p1, 16); \
19438 __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); \
19439 __ret; \
19440 })
19441 #define __noswap_vqshrn_n_u16(__p0, __p1) __extension__ ({ \
19442 uint16x8_t __s0 = __p0; \
19443 uint8x8_t __ret; \
19444 __ret = (uint8x8_t) __builtin_neon_vqshrn_n_v((int8x16_t)__s0, __p1, 16); \
19445 __ret; \
19446 })
19447 #endif
19448
19449 #ifdef __LITTLE_ENDIAN__
19450 #define vqshrn_n_s32(__p0, __p1) __extension__ ({ \
19451 int32x4_t __s0 = __p0; \
19452 int16x4_t __ret; \
19453 __ret = (int16x4_t) __builtin_neon_vqshrn_n_v((int8x16_t)__s0, __p1, 1); \
19454 __ret; \
19455 })
19456 #else
19457 #define vqshrn_n_s32(__p0, __p1) __extension__ ({ \
19458 int32x4_t __s0 = __p0; \
19459 int32x4_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \
19460 int16x4_t __ret; \
19461 __ret = (int16x4_t) __builtin_neon_vqshrn_n_v((int8x16_t)__rev0, __p1, 1); \
19462 __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \
19463 __ret; \
19464 })
19465 #define __noswap_vqshrn_n_s32(__p0, __p1) __extension__ ({ \
19466 int32x4_t __s0 = __p0; \
19467 int16x4_t __ret; \
19468 __ret = (int16x4_t) __builtin_neon_vqshrn_n_v((int8x16_t)__s0, __p1, 1); \
19469 __ret; \
19470 })
19471 #endif
19472
19473 #ifdef __LITTLE_ENDIAN__
19474 #define vqshrn_n_s64(__p0, __p1) __extension__ ({ \
19475 int64x2_t __s0 = __p0; \
19476 int32x2_t __ret; \
19477 __ret = (int32x2_t) __builtin_neon_vqshrn_n_v((int8x16_t)__s0, __p1, 2); \
19478 __ret; \
19479 })
19480 #else
19481 #define vqshrn_n_s64(__p0, __p1) __extension__ ({ \
19482 int64x2_t __s0 = __p0; \
19483 int64x2_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 1, 0); \
19484 int32x2_t __ret; \
19485 __ret = (int32x2_t) __builtin_neon_vqshrn_n_v((int8x16_t)__rev0, __p1, 2); \
19486 __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \
19487 __ret; \
19488 })
19489 #define __noswap_vqshrn_n_s64(__p0, __p1) __extension__ ({ \
19490 int64x2_t __s0 = __p0; \
19491 int32x2_t __ret; \
19492 __ret = (int32x2_t) __builtin_neon_vqshrn_n_v((int8x16_t)__s0, __p1, 2); \
19493 __ret; \
19494 })
19495 #endif
19496
19497 #ifdef __LITTLE_ENDIAN__
19498 #define vqshrn_n_s16(__p0, __p1) __extension__ ({ \
19499 int16x8_t __s0 = __p0; \
19500 int8x8_t __ret; \
19501 __ret = (int8x8_t) __builtin_neon_vqshrn_n_v((int8x16_t)__s0, __p1, 0); \
19502 __ret; \
19503 })
19504 #else
19505 #define vqshrn_n_s16(__p0, __p1) __extension__ ({ \
19506 int16x8_t __s0 = __p0; \
19507 int16x8_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 7, 6, 5, 4, 3, 2, 1, 0); \
19508 int8x8_t __ret; \
19509 __ret = (int8x8_t) __builtin_neon_vqshrn_n_v((int8x16_t)__rev0, __p1, 0); \
19510 __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); \
19511 __ret; \
19512 })
19513 #define __noswap_vqshrn_n_s16(__p0, __p1) __extension__ ({ \
19514 int16x8_t __s0 = __p0; \
19515 int8x8_t __ret; \
19516 __ret = (int8x8_t) __builtin_neon_vqshrn_n_v((int8x16_t)__s0, __p1, 0); \
19517 __ret; \
19518 })
19519 #endif
19520
19521 #ifdef __LITTLE_ENDIAN__
19522 #define vqshrun_n_s32(__p0, __p1) __extension__ ({ \
19523 int32x4_t __s0 = __p0; \
19524 uint16x4_t __ret; \
19525 __ret = (uint16x4_t) __builtin_neon_vqshrun_n_v((int8x16_t)__s0, __p1, 17); \
19526 __ret; \
19527 })
19528 #else
19529 #define vqshrun_n_s32(__p0, __p1) __extension__ ({ \
19530 int32x4_t __s0 = __p0; \
19531 int32x4_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \
19532 uint16x4_t __ret; \
19533 __ret = (uint16x4_t) __builtin_neon_vqshrun_n_v((int8x16_t)__rev0, __p1, 17); \
19534 __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \
19535 __ret; \
19536 })
19537 #define __noswap_vqshrun_n_s32(__p0, __p1) __extension__ ({ \
19538 int32x4_t __s0 = __p0; \
19539 uint16x4_t __ret; \
19540 __ret = (uint16x4_t) __builtin_neon_vqshrun_n_v((int8x16_t)__s0, __p1, 17); \
19541 __ret; \
19542 })
19543 #endif
19544
19545 #ifdef __LITTLE_ENDIAN__
19546 #define vqshrun_n_s64(__p0, __p1) __extension__ ({ \
19547 int64x2_t __s0 = __p0; \
19548 uint32x2_t __ret; \
19549 __ret = (uint32x2_t) __builtin_neon_vqshrun_n_v((int8x16_t)__s0, __p1, 18); \
19550 __ret; \
19551 })
19552 #else
19553 #define vqshrun_n_s64(__p0, __p1) __extension__ ({ \
19554 int64x2_t __s0 = __p0; \
19555 int64x2_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 1, 0); \
19556 uint32x2_t __ret; \
19557 __ret = (uint32x2_t) __builtin_neon_vqshrun_n_v((int8x16_t)__rev0, __p1, 18); \
19558 __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \
19559 __ret; \
19560 })
19561 #define __noswap_vqshrun_n_s64(__p0, __p1) __extension__ ({ \
19562 int64x2_t __s0 = __p0; \
19563 uint32x2_t __ret; \
19564 __ret = (uint32x2_t) __builtin_neon_vqshrun_n_v((int8x16_t)__s0, __p1, 18); \
19565 __ret; \
19566 })
19567 #endif
19568
19569 #ifdef __LITTLE_ENDIAN__
19570 #define vqshrun_n_s16(__p0, __p1) __extension__ ({ \
19571 int16x8_t __s0 = __p0; \
19572 uint8x8_t __ret; \
19573 __ret = (uint8x8_t) __builtin_neon_vqshrun_n_v((int8x16_t)__s0, __p1, 16); \
19574 __ret; \
19575 })
19576 #else
19577 #define vqshrun_n_s16(__p0, __p1) __extension__ ({ \
19578 int16x8_t __s0 = __p0; \
19579 int16x8_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 7, 6, 5, 4, 3, 2, 1, 0); \
19580 uint8x8_t __ret; \
19581 __ret = (uint8x8_t) __builtin_neon_vqshrun_n_v((int8x16_t)__rev0, __p1, 16); \
19582 __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); \
19583 __ret; \
19584 })
19585 #define __noswap_vqshrun_n_s16(__p0, __p1) __extension__ ({ \
19586 int16x8_t __s0 = __p0; \
19587 uint8x8_t __ret; \
19588 __ret = (uint8x8_t) __builtin_neon_vqshrun_n_v((int8x16_t)__s0, __p1, 16); \
19589 __ret; \
19590 })
19591 #endif
19592
19593 #ifdef __LITTLE_ENDIAN__
19594 __ai uint8x16_t vqsubq_u8(uint8x16_t __p0, uint8x16_t __p1) {
19595 uint8x16_t __ret;
19596 __ret = (uint8x16_t) __builtin_neon_vqsubq_v((int8x16_t)__p0, (int8x16_t)__p1, 48);
19597 return __ret;
19598 }
19599 #else
19600 __ai uint8x16_t vqsubq_u8(uint8x16_t __p0, uint8x16_t __p1) {
19601 uint8x16_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
19602 uint8x16_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
19603 uint8x16_t __ret;
19604 __ret = (uint8x16_t) __builtin_neon_vqsubq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 48);
19605 __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
19606 return __ret;
19607 }
19608 #endif
19609
19610 #ifdef __LITTLE_ENDIAN__
19611 __ai uint32x4_t vqsubq_u32(uint32x4_t __p0, uint32x4_t __p1) {
19612 uint32x4_t __ret;
19613 __ret = (uint32x4_t) __builtin_neon_vqsubq_v((int8x16_t)__p0, (int8x16_t)__p1, 50);
19614 return __ret;
19615 }
19616 #else
19617 __ai uint32x4_t vqsubq_u32(uint32x4_t __p0, uint32x4_t __p1) {
19618 uint32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
19619 uint32x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
19620 uint32x4_t __ret;
19621 __ret = (uint32x4_t) __builtin_neon_vqsubq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 50);
19622 __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
19623 return __ret;
19624 }
19625 #endif
19626
19627 #ifdef __LITTLE_ENDIAN__
19628 __ai uint64x2_t vqsubq_u64(uint64x2_t __p0, uint64x2_t __p1) {
19629 uint64x2_t __ret;
19630 __ret = (uint64x2_t) __builtin_neon_vqsubq_v((int8x16_t)__p0, (int8x16_t)__p1, 51);
19631 return __ret;
19632 }
19633 #else
19634 __ai uint64x2_t vqsubq_u64(uint64x2_t __p0, uint64x2_t __p1) {
19635 uint64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
19636 uint64x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
19637 uint64x2_t __ret;
19638 __ret = (uint64x2_t) __builtin_neon_vqsubq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 51);
19639 __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
19640 return __ret;
19641 }
19642 #endif
19643
19644 #ifdef __LITTLE_ENDIAN__
19645 __ai uint16x8_t vqsubq_u16(uint16x8_t __p0, uint16x8_t __p1) {
19646 uint16x8_t __ret;
19647 __ret = (uint16x8_t) __builtin_neon_vqsubq_v((int8x16_t)__p0, (int8x16_t)__p1, 49);
19648 return __ret;
19649 }
19650 #else
19651 __ai uint16x8_t vqsubq_u16(uint16x8_t __p0, uint16x8_t __p1) {
19652 uint16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
19653 uint16x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
19654 uint16x8_t __ret;
19655 __ret = (uint16x8_t) __builtin_neon_vqsubq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 49);
19656 __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
19657 return __ret;
19658 }
19659 #endif
19660
19661 #ifdef __LITTLE_ENDIAN__
19662 __ai int8x16_t vqsubq_s8(int8x16_t __p0, int8x16_t __p1) {
19663 int8x16_t __ret;
19664 __ret = (int8x16_t) __builtin_neon_vqsubq_v((int8x16_t)__p0, (int8x16_t)__p1, 32);
19665 return __ret;
19666 }
19667 #else
19668 __ai int8x16_t vqsubq_s8(int8x16_t __p0, int8x16_t __p1) {
19669 int8x16_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
19670 int8x16_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
19671 int8x16_t __ret;
19672 __ret = (int8x16_t) __builtin_neon_vqsubq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 32);
19673 __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
19674 return __ret;
19675 }
19676 #endif
19677
19678 #ifdef __LITTLE_ENDIAN__
19679 __ai int32x4_t vqsubq_s32(int32x4_t __p0, int32x4_t __p1) {
19680 int32x4_t __ret;
19681 __ret = (int32x4_t) __builtin_neon_vqsubq_v((int8x16_t)__p0, (int8x16_t)__p1, 34);
19682 return __ret;
19683 }
19684 #else
19685 __ai int32x4_t vqsubq_s32(int32x4_t __p0, int32x4_t __p1) {
19686 int32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
19687 int32x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
19688 int32x4_t __ret;
19689 __ret = (int32x4_t) __builtin_neon_vqsubq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 34);
19690 __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
19691 return __ret;
19692 }
19693 __ai int32x4_t __noswap_vqsubq_s32(int32x4_t __p0, int32x4_t __p1) {
19694 int32x4_t __ret;
19695 __ret = (int32x4_t) __builtin_neon_vqsubq_v((int8x16_t)__p0, (int8x16_t)__p1, 34);
19696 return __ret;
19697 }
19698 #endif
19699
19700 #ifdef __LITTLE_ENDIAN__
19701 __ai int64x2_t vqsubq_s64(int64x2_t __p0, int64x2_t __p1) {
19702 int64x2_t __ret;
19703 __ret = (int64x2_t) __builtin_neon_vqsubq_v((int8x16_t)__p0, (int8x16_t)__p1, 35);
19704 return __ret;
19705 }
19706 #else
19707 __ai int64x2_t vqsubq_s64(int64x2_t __p0, int64x2_t __p1) {
19708 int64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
19709 int64x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
19710 int64x2_t __ret;
19711 __ret = (int64x2_t) __builtin_neon_vqsubq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 35);
19712 __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
19713 return __ret;
19714 }
19715 #endif
19716
19717 #ifdef __LITTLE_ENDIAN__
19718 __ai int16x8_t vqsubq_s16(int16x8_t __p0, int16x8_t __p1) {
19719 int16x8_t __ret;
19720 __ret = (int16x8_t) __builtin_neon_vqsubq_v((int8x16_t)__p0, (int8x16_t)__p1, 33);
19721 return __ret;
19722 }
19723 #else
19724 __ai int16x8_t vqsubq_s16(int16x8_t __p0, int16x8_t __p1) {
19725 int16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
19726 int16x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
19727 int16x8_t __ret;
19728 __ret = (int16x8_t) __builtin_neon_vqsubq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 33);
19729 __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
19730 return __ret;
19731 }
19732 __ai int16x8_t __noswap_vqsubq_s16(int16x8_t __p0, int16x8_t __p1) {
19733 int16x8_t __ret;
19734 __ret = (int16x8_t) __builtin_neon_vqsubq_v((int8x16_t)__p0, (int8x16_t)__p1, 33);
19735 return __ret;
19736 }
19737 #endif
19738
19739 #ifdef __LITTLE_ENDIAN__
19740 __ai uint8x8_t vqsub_u8(uint8x8_t __p0, uint8x8_t __p1) {
19741 uint8x8_t __ret;
19742 __ret = (uint8x8_t) __builtin_neon_vqsub_v((int8x8_t)__p0, (int8x8_t)__p1, 16);
19743 return __ret;
19744 }
19745 #else
19746 __ai uint8x8_t vqsub_u8(uint8x8_t __p0, uint8x8_t __p1) {
19747 uint8x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
19748 uint8x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
19749 uint8x8_t __ret;
19750 __ret = (uint8x8_t) __builtin_neon_vqsub_v((int8x8_t)__rev0, (int8x8_t)__rev1, 16);
19751 __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
19752 return __ret;
19753 }
19754 #endif
19755
19756 #ifdef __LITTLE_ENDIAN__
19757 __ai uint32x2_t vqsub_u32(uint32x2_t __p0, uint32x2_t __p1) {
19758 uint32x2_t __ret;
19759 __ret = (uint32x2_t) __builtin_neon_vqsub_v((int8x8_t)__p0, (int8x8_t)__p1, 18);
19760 return __ret;
19761 }
19762 #else
19763 __ai uint32x2_t vqsub_u32(uint32x2_t __p0, uint32x2_t __p1) {
19764 uint32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
19765 uint32x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
19766 uint32x2_t __ret;
19767 __ret = (uint32x2_t) __builtin_neon_vqsub_v((int8x8_t)__rev0, (int8x8_t)__rev1, 18);
19768 __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
19769 return __ret;
19770 }
19771 #endif
19772
19773 #ifdef __LITTLE_ENDIAN__
19774 __ai uint64x1_t vqsub_u64(uint64x1_t __p0, uint64x1_t __p1) {
19775 uint64x1_t __ret;
19776 __ret = (uint64x1_t) __builtin_neon_vqsub_v((int8x8_t)__p0, (int8x8_t)__p1, 19);
19777 return __ret;
19778 }
19779 #else
19780 __ai uint64x1_t vqsub_u64(uint64x1_t __p0, uint64x1_t __p1) {
19781 uint64x1_t __ret;
19782 __ret = (uint64x1_t) __builtin_neon_vqsub_v((int8x8_t)__p0, (int8x8_t)__p1, 19);
19783 return __ret;
19784 }
19785 #endif
19786
19787 #ifdef __LITTLE_ENDIAN__
19788 __ai uint16x4_t vqsub_u16(uint16x4_t __p0, uint16x4_t __p1) {
19789 uint16x4_t __ret;
19790 __ret = (uint16x4_t) __builtin_neon_vqsub_v((int8x8_t)__p0, (int8x8_t)__p1, 17);
19791 return __ret;
19792 }
19793 #else
19794 __ai uint16x4_t vqsub_u16(uint16x4_t __p0, uint16x4_t __p1) {
19795 uint16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
19796 uint16x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
19797 uint16x4_t __ret;
19798 __ret = (uint16x4_t) __builtin_neon_vqsub_v((int8x8_t)__rev0, (int8x8_t)__rev1, 17);
19799 __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
19800 return __ret;
19801 }
19802 #endif
19803
19804 #ifdef __LITTLE_ENDIAN__
19805 __ai int8x8_t vqsub_s8(int8x8_t __p0, int8x8_t __p1) {
19806 int8x8_t __ret;
19807 __ret = (int8x8_t) __builtin_neon_vqsub_v((int8x8_t)__p0, (int8x8_t)__p1, 0);
19808 return __ret;
19809 }
19810 #else
19811 __ai int8x8_t vqsub_s8(int8x8_t __p0, int8x8_t __p1) {
19812 int8x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
19813 int8x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
19814 int8x8_t __ret;
19815 __ret = (int8x8_t) __builtin_neon_vqsub_v((int8x8_t)__rev0, (int8x8_t)__rev1, 0);
19816 __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
19817 return __ret;
19818 }
19819 #endif
19820
19821 #ifdef __LITTLE_ENDIAN__
19822 __ai int32x2_t vqsub_s32(int32x2_t __p0, int32x2_t __p1) {
19823 int32x2_t __ret;
19824 __ret = (int32x2_t) __builtin_neon_vqsub_v((int8x8_t)__p0, (int8x8_t)__p1, 2);
19825 return __ret;
19826 }
19827 #else
19828 __ai int32x2_t vqsub_s32(int32x2_t __p0, int32x2_t __p1) {
19829 int32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
19830 int32x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
19831 int32x2_t __ret;
19832 __ret = (int32x2_t) __builtin_neon_vqsub_v((int8x8_t)__rev0, (int8x8_t)__rev1, 2);
19833 __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
19834 return __ret;
19835 }
19836 __ai int32x2_t __noswap_vqsub_s32(int32x2_t __p0, int32x2_t __p1) {
19837 int32x2_t __ret;
19838 __ret = (int32x2_t) __builtin_neon_vqsub_v((int8x8_t)__p0, (int8x8_t)__p1, 2);
19839 return __ret;
19840 }
19841 #endif
19842
19843 #ifdef __LITTLE_ENDIAN__
19844 __ai int64x1_t vqsub_s64(int64x1_t __p0, int64x1_t __p1) {
19845 int64x1_t __ret;
19846 __ret = (int64x1_t) __builtin_neon_vqsub_v((int8x8_t)__p0, (int8x8_t)__p1, 3);
19847 return __ret;
19848 }
19849 #else
19850 __ai int64x1_t vqsub_s64(int64x1_t __p0, int64x1_t __p1) {
19851 int64x1_t __ret;
19852 __ret = (int64x1_t) __builtin_neon_vqsub_v((int8x8_t)__p0, (int8x8_t)__p1, 3);
19853 return __ret;
19854 }
19855 #endif
19856
19857 #ifdef __LITTLE_ENDIAN__
19858 __ai int16x4_t vqsub_s16(int16x4_t __p0, int16x4_t __p1) {
19859 int16x4_t __ret;
19860 __ret = (int16x4_t) __builtin_neon_vqsub_v((int8x8_t)__p0, (int8x8_t)__p1, 1);
19861 return __ret;
19862 }
19863 #else
19864 __ai int16x4_t vqsub_s16(int16x4_t __p0, int16x4_t __p1) {
19865 int16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
19866 int16x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
19867 int16x4_t __ret;
19868 __ret = (int16x4_t) __builtin_neon_vqsub_v((int8x8_t)__rev0, (int8x8_t)__rev1, 1);
19869 __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
19870 return __ret;
19871 }
19872 __ai int16x4_t __noswap_vqsub_s16(int16x4_t __p0, int16x4_t __p1) {
19873 int16x4_t __ret;
19874 __ret = (int16x4_t) __builtin_neon_vqsub_v((int8x8_t)__p0, (int8x8_t)__p1, 1);
19875 return __ret;
19876 }
19877 #endif
19878
19879 #ifdef __LITTLE_ENDIAN__
19880 __ai uint16x4_t vraddhn_u32(uint32x4_t __p0, uint32x4_t __p1) {
19881 uint16x4_t __ret;
19882 __ret = (uint16x4_t) __builtin_neon_vraddhn_v((int8x16_t)__p0, (int8x16_t)__p1, 17);
19883 return __ret;
19884 }
19885 #else
19886 __ai uint16x4_t vraddhn_u32(uint32x4_t __p0, uint32x4_t __p1) {
19887 uint32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
19888 uint32x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
19889 uint16x4_t __ret;
19890 __ret = (uint16x4_t) __builtin_neon_vraddhn_v((int8x16_t)__rev0, (int8x16_t)__rev1, 17);
19891 __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
19892 return __ret;
19893 }
19894 __ai uint16x4_t __noswap_vraddhn_u32(uint32x4_t __p0, uint32x4_t __p1) {
19895 uint16x4_t __ret;
19896 __ret = (uint16x4_t) __builtin_neon_vraddhn_v((int8x16_t)__p0, (int8x16_t)__p1, 17);
19897 return __ret;
19898 }
19899 #endif
19900
19901 #ifdef __LITTLE_ENDIAN__
19902 __ai uint32x2_t vraddhn_u64(uint64x2_t __p0, uint64x2_t __p1) {
19903 uint32x2_t __ret;
19904 __ret = (uint32x2_t) __builtin_neon_vraddhn_v((int8x16_t)__p0, (int8x16_t)__p1, 18);
19905 return __ret;
19906 }
19907 #else
19908 __ai uint32x2_t vraddhn_u64(uint64x2_t __p0, uint64x2_t __p1) {
19909 uint64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
19910 uint64x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
19911 uint32x2_t __ret;
19912 __ret = (uint32x2_t) __builtin_neon_vraddhn_v((int8x16_t)__rev0, (int8x16_t)__rev1, 18);
19913 __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
19914 return __ret;
19915 }
19916 __ai uint32x2_t __noswap_vraddhn_u64(uint64x2_t __p0, uint64x2_t __p1) {
19917 uint32x2_t __ret;
19918 __ret = (uint32x2_t) __builtin_neon_vraddhn_v((int8x16_t)__p0, (int8x16_t)__p1, 18);
19919 return __ret;
19920 }
19921 #endif
19922
19923 #ifdef __LITTLE_ENDIAN__
19924 __ai uint8x8_t vraddhn_u16(uint16x8_t __p0, uint16x8_t __p1) {
19925 uint8x8_t __ret;
19926 __ret = (uint8x8_t) __builtin_neon_vraddhn_v((int8x16_t)__p0, (int8x16_t)__p1, 16);
19927 return __ret;
19928 }
19929 #else
19930 __ai uint8x8_t vraddhn_u16(uint16x8_t __p0, uint16x8_t __p1) {
19931 uint16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
19932 uint16x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
19933 uint8x8_t __ret;
19934 __ret = (uint8x8_t) __builtin_neon_vraddhn_v((int8x16_t)__rev0, (int8x16_t)__rev1, 16);
19935 __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
19936 return __ret;
19937 }
19938 __ai uint8x8_t __noswap_vraddhn_u16(uint16x8_t __p0, uint16x8_t __p1) {
19939 uint8x8_t __ret;
19940 __ret = (uint8x8_t) __builtin_neon_vraddhn_v((int8x16_t)__p0, (int8x16_t)__p1, 16);
19941 return __ret;
19942 }
19943 #endif
19944
19945 #ifdef __LITTLE_ENDIAN__
19946 __ai int16x4_t vraddhn_s32(int32x4_t __p0, int32x4_t __p1) {
19947 int16x4_t __ret;
19948 __ret = (int16x4_t) __builtin_neon_vraddhn_v((int8x16_t)__p0, (int8x16_t)__p1, 1);
19949 return __ret;
19950 }
19951 #else
19952 __ai int16x4_t vraddhn_s32(int32x4_t __p0, int32x4_t __p1) {
19953 int32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
19954 int32x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
19955 int16x4_t __ret;
19956 __ret = (int16x4_t) __builtin_neon_vraddhn_v((int8x16_t)__rev0, (int8x16_t)__rev1, 1);
19957 __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
19958 return __ret;
19959 }
19960 __ai int16x4_t __noswap_vraddhn_s32(int32x4_t __p0, int32x4_t __p1) {
19961 int16x4_t __ret;
19962 __ret = (int16x4_t) __builtin_neon_vraddhn_v((int8x16_t)__p0, (int8x16_t)__p1, 1);
19963 return __ret;
19964 }
19965 #endif
19966
19967 #ifdef __LITTLE_ENDIAN__
19968 __ai int32x2_t vraddhn_s64(int64x2_t __p0, int64x2_t __p1) {
19969 int32x2_t __ret;
19970 __ret = (int32x2_t) __builtin_neon_vraddhn_v((int8x16_t)__p0, (int8x16_t)__p1, 2);
19971 return __ret;
19972 }
19973 #else
19974 __ai int32x2_t vraddhn_s64(int64x2_t __p0, int64x2_t __p1) {
19975 int64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
19976 int64x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
19977 int32x2_t __ret;
19978 __ret = (int32x2_t) __builtin_neon_vraddhn_v((int8x16_t)__rev0, (int8x16_t)__rev1, 2);
19979 __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
19980 return __ret;
19981 }
19982 __ai int32x2_t __noswap_vraddhn_s64(int64x2_t __p0, int64x2_t __p1) {
19983 int32x2_t __ret;
19984 __ret = (int32x2_t) __builtin_neon_vraddhn_v((int8x16_t)__p0, (int8x16_t)__p1, 2);
19985 return __ret;
19986 }
19987 #endif
19988
19989 #ifdef __LITTLE_ENDIAN__
19990 __ai int8x8_t vraddhn_s16(int16x8_t __p0, int16x8_t __p1) {
19991 int8x8_t __ret;
19992 __ret = (int8x8_t) __builtin_neon_vraddhn_v((int8x16_t)__p0, (int8x16_t)__p1, 0);
19993 return __ret;
19994 }
19995 #else
19996 __ai int8x8_t vraddhn_s16(int16x8_t __p0, int16x8_t __p1) {
19997 int16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
19998 int16x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
19999 int8x8_t __ret;
20000 __ret = (int8x8_t) __builtin_neon_vraddhn_v((int8x16_t)__rev0, (int8x16_t)__rev1, 0);
20001 __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
20002 return __ret;
20003 }
20004 __ai int8x8_t __noswap_vraddhn_s16(int16x8_t __p0, int16x8_t __p1) {
20005 int8x8_t __ret;
20006 __ret = (int8x8_t) __builtin_neon_vraddhn_v((int8x16_t)__p0, (int8x16_t)__p1, 0);
20007 return __ret;
20008 }
20009 #endif
20010
20011 #ifdef __LITTLE_ENDIAN__
20012 __ai uint32x4_t vrecpeq_u32(uint32x4_t __p0) {
20013 uint32x4_t __ret;
20014 __ret = (uint32x4_t) __builtin_neon_vrecpeq_v((int8x16_t)__p0, 50);
20015 return __ret;
20016 }
20017 #else
20018 __ai uint32x4_t vrecpeq_u32(uint32x4_t __p0) {
20019 uint32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
20020 uint32x4_t __ret;
20021 __ret = (uint32x4_t) __builtin_neon_vrecpeq_v((int8x16_t)__rev0, 50);
20022 __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
20023 return __ret;
20024 }
20025 #endif
20026
20027 #ifdef __LITTLE_ENDIAN__
20028 __ai float32x4_t vrecpeq_f32(float32x4_t __p0) {
20029 float32x4_t __ret;
20030 __ret = (float32x4_t) __builtin_neon_vrecpeq_v((int8x16_t)__p0, 41);
20031 return __ret;
20032 }
20033 #else
20034 __ai float32x4_t vrecpeq_f32(float32x4_t __p0) {
20035 float32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
20036 float32x4_t __ret;
20037 __ret = (float32x4_t) __builtin_neon_vrecpeq_v((int8x16_t)__rev0, 41);
20038 __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
20039 return __ret;
20040 }
20041 #endif
20042
20043 #ifdef __LITTLE_ENDIAN__
20044 __ai uint32x2_t vrecpe_u32(uint32x2_t __p0) {
20045 uint32x2_t __ret;
20046 __ret = (uint32x2_t) __builtin_neon_vrecpe_v((int8x8_t)__p0, 18);
20047 return __ret;
20048 }
20049 #else
20050 __ai uint32x2_t vrecpe_u32(uint32x2_t __p0) {
20051 uint32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
20052 uint32x2_t __ret;
20053 __ret = (uint32x2_t) __builtin_neon_vrecpe_v((int8x8_t)__rev0, 18);
20054 __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
20055 return __ret;
20056 }
20057 #endif
20058
20059 #ifdef __LITTLE_ENDIAN__
20060 __ai float32x2_t vrecpe_f32(float32x2_t __p0) {
20061 float32x2_t __ret;
20062 __ret = (float32x2_t) __builtin_neon_vrecpe_v((int8x8_t)__p0, 9);
20063 return __ret;
20064 }
20065 #else
20066 __ai float32x2_t vrecpe_f32(float32x2_t __p0) {
20067 float32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
20068 float32x2_t __ret;
20069 __ret = (float32x2_t) __builtin_neon_vrecpe_v((int8x8_t)__rev0, 9);
20070 __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
20071 return __ret;
20072 }
20073 #endif
20074
20075 #ifdef __LITTLE_ENDIAN__
20076 __ai float32x4_t vrecpsq_f32(float32x4_t __p0, float32x4_t __p1) {
20077 float32x4_t __ret;
20078 __ret = (float32x4_t) __builtin_neon_vrecpsq_v((int8x16_t)__p0, (int8x16_t)__p1, 41);
20079 return __ret;
20080 }
20081 #else
20082 __ai float32x4_t vrecpsq_f32(float32x4_t __p0, float32x4_t __p1) {
20083 float32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
20084 float32x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
20085 float32x4_t __ret;
20086 __ret = (float32x4_t) __builtin_neon_vrecpsq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 41);
20087 __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
20088 return __ret;
20089 }
20090 #endif
20091
20092 #ifdef __LITTLE_ENDIAN__
20093 __ai float32x2_t vrecps_f32(float32x2_t __p0, float32x2_t __p1) {
20094 float32x2_t __ret;
20095 __ret = (float32x2_t) __builtin_neon_vrecps_v((int8x8_t)__p0, (int8x8_t)__p1, 9);
20096 return __ret;
20097 }
20098 #else
20099 __ai float32x2_t vrecps_f32(float32x2_t __p0, float32x2_t __p1) {
20100 float32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
20101 float32x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
20102 float32x2_t __ret;
20103 __ret = (float32x2_t) __builtin_neon_vrecps_v((int8x8_t)__rev0, (int8x8_t)__rev1, 9);
20104 __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
20105 return __ret;
20106 }
20107 #endif
20108
20109 #ifdef __LITTLE_ENDIAN__
20110 __ai poly8x8_t vrev16_p8(poly8x8_t __p0) {
20111 poly8x8_t __ret;
20112 __ret = __builtin_shufflevector(__p0, __p0, 1, 0, 3, 2, 5, 4, 7, 6);
20113 return __ret;
20114 }
20115 #else
20116 __ai poly8x8_t vrev16_p8(poly8x8_t __p0) {
20117 poly8x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
20118 poly8x8_t __ret;
20119 __ret = __builtin_shufflevector(__rev0, __rev0, 1, 0, 3, 2, 5, 4, 7, 6);
20120 __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
20121 return __ret;
20122 }
20123 #endif
20124
20125 #ifdef __LITTLE_ENDIAN__
20126 __ai poly8x16_t vrev16q_p8(poly8x16_t __p0) {
20127 poly8x16_t __ret;
20128 __ret = __builtin_shufflevector(__p0, __p0, 1, 0, 3, 2, 5, 4, 7, 6, 9, 8, 11, 10, 13, 12, 15, 14);
20129 return __ret;
20130 }
20131 #else
20132 __ai poly8x16_t vrev16q_p8(poly8x16_t __p0) {
20133 poly8x16_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
20134 poly8x16_t __ret;
20135 __ret = __builtin_shufflevector(__rev0, __rev0, 1, 0, 3, 2, 5, 4, 7, 6, 9, 8, 11, 10, 13, 12, 15, 14);
20136 __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
20137 return __ret;
20138 }
20139 #endif
20140
20141 #ifdef __LITTLE_ENDIAN__
20142 __ai uint8x16_t vrev16q_u8(uint8x16_t __p0) {
20143 uint8x16_t __ret;
20144 __ret = __builtin_shufflevector(__p0, __p0, 1, 0, 3, 2, 5, 4, 7, 6, 9, 8, 11, 10, 13, 12, 15, 14);
20145 return __ret;
20146 }
20147 #else
20148 __ai uint8x16_t vrev16q_u8(uint8x16_t __p0) {
20149 uint8x16_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
20150 uint8x16_t __ret;
20151 __ret = __builtin_shufflevector(__rev0, __rev0, 1, 0, 3, 2, 5, 4, 7, 6, 9, 8, 11, 10, 13, 12, 15, 14);
20152 __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
20153 return __ret;
20154 }
20155 #endif
20156
20157 #ifdef __LITTLE_ENDIAN__
20158 __ai int8x16_t vrev16q_s8(int8x16_t __p0) {
20159 int8x16_t __ret;
20160 __ret = __builtin_shufflevector(__p0, __p0, 1, 0, 3, 2, 5, 4, 7, 6, 9, 8, 11, 10, 13, 12, 15, 14);
20161 return __ret;
20162 }
20163 #else
20164 __ai int8x16_t vrev16q_s8(int8x16_t __p0) {
20165 int8x16_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
20166 int8x16_t __ret;
20167 __ret = __builtin_shufflevector(__rev0, __rev0, 1, 0, 3, 2, 5, 4, 7, 6, 9, 8, 11, 10, 13, 12, 15, 14);
20168 __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
20169 return __ret;
20170 }
20171 #endif
20172
20173 #ifdef __LITTLE_ENDIAN__
20174 __ai uint8x8_t vrev16_u8(uint8x8_t __p0) {
20175 uint8x8_t __ret;
20176 __ret = __builtin_shufflevector(__p0, __p0, 1, 0, 3, 2, 5, 4, 7, 6);
20177 return __ret;
20178 }
20179 #else
20180 __ai uint8x8_t vrev16_u8(uint8x8_t __p0) {
20181 uint8x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
20182 uint8x8_t __ret;
20183 __ret = __builtin_shufflevector(__rev0, __rev0, 1, 0, 3, 2, 5, 4, 7, 6);
20184 __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
20185 return __ret;
20186 }
20187 #endif
20188
20189 #ifdef __LITTLE_ENDIAN__
20190 __ai int8x8_t vrev16_s8(int8x8_t __p0) {
20191 int8x8_t __ret;
20192 __ret = __builtin_shufflevector(__p0, __p0, 1, 0, 3, 2, 5, 4, 7, 6);
20193 return __ret;
20194 }
20195 #else
20196 __ai int8x8_t vrev16_s8(int8x8_t __p0) {
20197 int8x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
20198 int8x8_t __ret;
20199 __ret = __builtin_shufflevector(__rev0, __rev0, 1, 0, 3, 2, 5, 4, 7, 6);
20200 __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
20201 return __ret;
20202 }
20203 #endif
20204
20205 #ifdef __LITTLE_ENDIAN__
20206 __ai poly8x8_t vrev32_p8(poly8x8_t __p0) {
20207 poly8x8_t __ret;
20208 __ret = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0, 7, 6, 5, 4);
20209 return __ret;
20210 }
20211 #else
20212 __ai poly8x8_t vrev32_p8(poly8x8_t __p0) {
20213 poly8x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
20214 poly8x8_t __ret;
20215 __ret = __builtin_shufflevector(__rev0, __rev0, 3, 2, 1, 0, 7, 6, 5, 4);
20216 __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
20217 return __ret;
20218 }
20219 #endif
20220
20221 #ifdef __LITTLE_ENDIAN__
20222 __ai poly16x4_t vrev32_p16(poly16x4_t __p0) {
20223 poly16x4_t __ret;
20224 __ret = __builtin_shufflevector(__p0, __p0, 1, 0, 3, 2);
20225 return __ret;
20226 }
20227 #else
20228 __ai poly16x4_t vrev32_p16(poly16x4_t __p0) {
20229 poly16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
20230 poly16x4_t __ret;
20231 __ret = __builtin_shufflevector(__rev0, __rev0, 1, 0, 3, 2);
20232 __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
20233 return __ret;
20234 }
20235 #endif
20236
20237 #ifdef __LITTLE_ENDIAN__
20238 __ai poly8x16_t vrev32q_p8(poly8x16_t __p0) {
20239 poly8x16_t __ret;
20240 __ret = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0, 7, 6, 5, 4, 11, 10, 9, 8, 15, 14, 13, 12);
20241 return __ret;
20242 }
20243 #else
20244 __ai poly8x16_t vrev32q_p8(poly8x16_t __p0) {
20245 poly8x16_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
20246 poly8x16_t __ret;
20247 __ret = __builtin_shufflevector(__rev0, __rev0, 3, 2, 1, 0, 7, 6, 5, 4, 11, 10, 9, 8, 15, 14, 13, 12);
20248 __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
20249 return __ret;
20250 }
20251 #endif
20252
20253 #ifdef __LITTLE_ENDIAN__
20254 __ai poly16x8_t vrev32q_p16(poly16x8_t __p0) {
20255 poly16x8_t __ret;
20256 __ret = __builtin_shufflevector(__p0, __p0, 1, 0, 3, 2, 5, 4, 7, 6);
20257 return __ret;
20258 }
20259 #else
20260 __ai poly16x8_t vrev32q_p16(poly16x8_t __p0) {
20261 poly16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
20262 poly16x8_t __ret;
20263 __ret = __builtin_shufflevector(__rev0, __rev0, 1, 0, 3, 2, 5, 4, 7, 6);
20264 __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
20265 return __ret;
20266 }
20267 #endif
20268
20269 #ifdef __LITTLE_ENDIAN__
20270 __ai uint8x16_t vrev32q_u8(uint8x16_t __p0) {
20271 uint8x16_t __ret;
20272 __ret = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0, 7, 6, 5, 4, 11, 10, 9, 8, 15, 14, 13, 12);
20273 return __ret;
20274 }
20275 #else
20276 __ai uint8x16_t vrev32q_u8(uint8x16_t __p0) {
20277 uint8x16_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
20278 uint8x16_t __ret;
20279 __ret = __builtin_shufflevector(__rev0, __rev0, 3, 2, 1, 0, 7, 6, 5, 4, 11, 10, 9, 8, 15, 14, 13, 12);
20280 __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
20281 return __ret;
20282 }
20283 #endif
20284
20285 #ifdef __LITTLE_ENDIAN__
20286 __ai uint16x8_t vrev32q_u16(uint16x8_t __p0) {
20287 uint16x8_t __ret;
20288 __ret = __builtin_shufflevector(__p0, __p0, 1, 0, 3, 2, 5, 4, 7, 6);
20289 return __ret;
20290 }
20291 #else
20292 __ai uint16x8_t vrev32q_u16(uint16x8_t __p0) {
20293 uint16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
20294 uint16x8_t __ret;
20295 __ret = __builtin_shufflevector(__rev0, __rev0, 1, 0, 3, 2, 5, 4, 7, 6);
20296 __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
20297 return __ret;
20298 }
20299 #endif
20300
20301 #ifdef __LITTLE_ENDIAN__
20302 __ai int8x16_t vrev32q_s8(int8x16_t __p0) {
20303 int8x16_t __ret;
20304 __ret = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0, 7, 6, 5, 4, 11, 10, 9, 8, 15, 14, 13, 12);
20305 return __ret;
20306 }
20307 #else
20308 __ai int8x16_t vrev32q_s8(int8x16_t __p0) {
20309 int8x16_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
20310 int8x16_t __ret;
20311 __ret = __builtin_shufflevector(__rev0, __rev0, 3, 2, 1, 0, 7, 6, 5, 4, 11, 10, 9, 8, 15, 14, 13, 12);
20312 __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
20313 return __ret;
20314 }
20315 #endif
20316
20317 #ifdef __LITTLE_ENDIAN__
20318 __ai int16x8_t vrev32q_s16(int16x8_t __p0) {
20319 int16x8_t __ret;
20320 __ret = __builtin_shufflevector(__p0, __p0, 1, 0, 3, 2, 5, 4, 7, 6);
20321 return __ret;
20322 }
20323 #else
20324 __ai int16x8_t vrev32q_s16(int16x8_t __p0) {
20325 int16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
20326 int16x8_t __ret;
20327 __ret = __builtin_shufflevector(__rev0, __rev0, 1, 0, 3, 2, 5, 4, 7, 6);
20328 __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
20329 return __ret;
20330 }
20331 #endif
20332
20333 #ifdef __LITTLE_ENDIAN__
20334 __ai uint8x8_t vrev32_u8(uint8x8_t __p0) {
20335 uint8x8_t __ret;
20336 __ret = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0, 7, 6, 5, 4);
20337 return __ret;
20338 }
20339 #else
20340 __ai uint8x8_t vrev32_u8(uint8x8_t __p0) {
20341 uint8x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
20342 uint8x8_t __ret;
20343 __ret = __builtin_shufflevector(__rev0, __rev0, 3, 2, 1, 0, 7, 6, 5, 4);
20344 __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
20345 return __ret;
20346 }
20347 #endif
20348
20349 #ifdef __LITTLE_ENDIAN__
20350 __ai uint16x4_t vrev32_u16(uint16x4_t __p0) {
20351 uint16x4_t __ret;
20352 __ret = __builtin_shufflevector(__p0, __p0, 1, 0, 3, 2);
20353 return __ret;
20354 }
20355 #else
20356 __ai uint16x4_t vrev32_u16(uint16x4_t __p0) {
20357 uint16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
20358 uint16x4_t __ret;
20359 __ret = __builtin_shufflevector(__rev0, __rev0, 1, 0, 3, 2);
20360 __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
20361 return __ret;
20362 }
20363 #endif
20364
20365 #ifdef __LITTLE_ENDIAN__
20366 __ai int8x8_t vrev32_s8(int8x8_t __p0) {
20367 int8x8_t __ret;
20368 __ret = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0, 7, 6, 5, 4);
20369 return __ret;
20370 }
20371 #else
20372 __ai int8x8_t vrev32_s8(int8x8_t __p0) {
20373 int8x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
20374 int8x8_t __ret;
20375 __ret = __builtin_shufflevector(__rev0, __rev0, 3, 2, 1, 0, 7, 6, 5, 4);
20376 __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
20377 return __ret;
20378 }
20379 #endif
20380
20381 #ifdef __LITTLE_ENDIAN__
20382 __ai int16x4_t vrev32_s16(int16x4_t __p0) {
20383 int16x4_t __ret;
20384 __ret = __builtin_shufflevector(__p0, __p0, 1, 0, 3, 2);
20385 return __ret;
20386 }
20387 #else
20388 __ai int16x4_t vrev32_s16(int16x4_t __p0) {
20389 int16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
20390 int16x4_t __ret;
20391 __ret = __builtin_shufflevector(__rev0, __rev0, 1, 0, 3, 2);
20392 __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
20393 return __ret;
20394 }
20395 #endif
20396
20397 #ifdef __LITTLE_ENDIAN__
20398 __ai poly8x8_t vrev64_p8(poly8x8_t __p0) {
20399 poly8x8_t __ret;
20400 __ret = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
20401 return __ret;
20402 }
20403 #else
20404 __ai poly8x8_t vrev64_p8(poly8x8_t __p0) {
20405 poly8x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
20406 poly8x8_t __ret;
20407 __ret = __builtin_shufflevector(__rev0, __rev0, 7, 6, 5, 4, 3, 2, 1, 0);
20408 __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
20409 return __ret;
20410 }
20411 #endif
20412
20413 #ifdef __LITTLE_ENDIAN__
20414 __ai poly16x4_t vrev64_p16(poly16x4_t __p0) {
20415 poly16x4_t __ret;
20416 __ret = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
20417 return __ret;
20418 }
20419 #else
20420 __ai poly16x4_t vrev64_p16(poly16x4_t __p0) {
20421 poly16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
20422 poly16x4_t __ret;
20423 __ret = __builtin_shufflevector(__rev0, __rev0, 3, 2, 1, 0);
20424 __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
20425 return __ret;
20426 }
20427 #endif
20428
20429 #ifdef __LITTLE_ENDIAN__
20430 __ai poly8x16_t vrev64q_p8(poly8x16_t __p0) {
20431 poly8x16_t __ret;
20432 __ret = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0, 15, 14, 13, 12, 11, 10, 9, 8);
20433 return __ret;
20434 }
20435 #else
20436 __ai poly8x16_t vrev64q_p8(poly8x16_t __p0) {
20437 poly8x16_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
20438 poly8x16_t __ret;
20439 __ret = __builtin_shufflevector(__rev0, __rev0, 7, 6, 5, 4, 3, 2, 1, 0, 15, 14, 13, 12, 11, 10, 9, 8);
20440 __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
20441 return __ret;
20442 }
20443 #endif
20444
20445 #ifdef __LITTLE_ENDIAN__
20446 __ai poly16x8_t vrev64q_p16(poly16x8_t __p0) {
20447 poly16x8_t __ret;
20448 __ret = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0, 7, 6, 5, 4);
20449 return __ret;
20450 }
20451 #else
20452 __ai poly16x8_t vrev64q_p16(poly16x8_t __p0) {
20453 poly16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
20454 poly16x8_t __ret;
20455 __ret = __builtin_shufflevector(__rev0, __rev0, 3, 2, 1, 0, 7, 6, 5, 4);
20456 __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
20457 return __ret;
20458 }
20459 #endif
20460
20461 #ifdef __LITTLE_ENDIAN__
20462 __ai uint8x16_t vrev64q_u8(uint8x16_t __p0) {
20463 uint8x16_t __ret;
20464 __ret = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0, 15, 14, 13, 12, 11, 10, 9, 8);
20465 return __ret;
20466 }
20467 #else
20468 __ai uint8x16_t vrev64q_u8(uint8x16_t __p0) {
20469 uint8x16_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
20470 uint8x16_t __ret;
20471 __ret = __builtin_shufflevector(__rev0, __rev0, 7, 6, 5, 4, 3, 2, 1, 0, 15, 14, 13, 12, 11, 10, 9, 8);
20472 __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
20473 return __ret;
20474 }
20475 #endif
20476
20477 #ifdef __LITTLE_ENDIAN__
20478 __ai uint32x4_t vrev64q_u32(uint32x4_t __p0) {
20479 uint32x4_t __ret;
20480 __ret = __builtin_shufflevector(__p0, __p0, 1, 0, 3, 2);
20481 return __ret;
20482 }
20483 #else
20484 __ai uint32x4_t vrev64q_u32(uint32x4_t __p0) {
20485 uint32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
20486 uint32x4_t __ret;
20487 __ret = __builtin_shufflevector(__rev0, __rev0, 1, 0, 3, 2);
20488 __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
20489 return __ret;
20490 }
20491 #endif
20492
20493 #ifdef __LITTLE_ENDIAN__
20494 __ai uint16x8_t vrev64q_u16(uint16x8_t __p0) {
20495 uint16x8_t __ret;
20496 __ret = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0, 7, 6, 5, 4);
20497 return __ret;
20498 }
20499 #else
20500 __ai uint16x8_t vrev64q_u16(uint16x8_t __p0) {
20501 uint16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
20502 uint16x8_t __ret;
20503 __ret = __builtin_shufflevector(__rev0, __rev0, 3, 2, 1, 0, 7, 6, 5, 4);
20504 __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
20505 return __ret;
20506 }
20507 #endif
20508
20509 #ifdef __LITTLE_ENDIAN__
20510 __ai int8x16_t vrev64q_s8(int8x16_t __p0) {
20511 int8x16_t __ret;
20512 __ret = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0, 15, 14, 13, 12, 11, 10, 9, 8);
20513 return __ret;
20514 }
20515 #else
20516 __ai int8x16_t vrev64q_s8(int8x16_t __p0) {
20517 int8x16_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
20518 int8x16_t __ret;
20519 __ret = __builtin_shufflevector(__rev0, __rev0, 7, 6, 5, 4, 3, 2, 1, 0, 15, 14, 13, 12, 11, 10, 9, 8);
20520 __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
20521 return __ret;
20522 }
20523 #endif
20524
20525 #ifdef __LITTLE_ENDIAN__
20526 __ai float32x4_t vrev64q_f32(float32x4_t __p0) {
20527 float32x4_t __ret;
20528 __ret = __builtin_shufflevector(__p0, __p0, 1, 0, 3, 2);
20529 return __ret;
20530 }
20531 #else
20532 __ai float32x4_t vrev64q_f32(float32x4_t __p0) {
20533 float32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
20534 float32x4_t __ret;
20535 __ret = __builtin_shufflevector(__rev0, __rev0, 1, 0, 3, 2);
20536 __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
20537 return __ret;
20538 }
20539 #endif
20540
20541 #ifdef __LITTLE_ENDIAN__
20542 __ai int32x4_t vrev64q_s32(int32x4_t __p0) {
20543 int32x4_t __ret;
20544 __ret = __builtin_shufflevector(__p0, __p0, 1, 0, 3, 2);
20545 return __ret;
20546 }
20547 #else
20548 __ai int32x4_t vrev64q_s32(int32x4_t __p0) {
20549 int32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
20550 int32x4_t __ret;
20551 __ret = __builtin_shufflevector(__rev0, __rev0, 1, 0, 3, 2);
20552 __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
20553 return __ret;
20554 }
20555 #endif
20556
20557 #ifdef __LITTLE_ENDIAN__
20558 __ai int16x8_t vrev64q_s16(int16x8_t __p0) {
20559 int16x8_t __ret;
20560 __ret = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0, 7, 6, 5, 4);
20561 return __ret;
20562 }
20563 #else
20564 __ai int16x8_t vrev64q_s16(int16x8_t __p0) {
20565 int16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
20566 int16x8_t __ret;
20567 __ret = __builtin_shufflevector(__rev0, __rev0, 3, 2, 1, 0, 7, 6, 5, 4);
20568 __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
20569 return __ret;
20570 }
20571 #endif
20572
20573 #ifdef __LITTLE_ENDIAN__
20574 __ai uint8x8_t vrev64_u8(uint8x8_t __p0) {
20575 uint8x8_t __ret;
20576 __ret = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
20577 return __ret;
20578 }
20579 #else
20580 __ai uint8x8_t vrev64_u8(uint8x8_t __p0) {
20581 uint8x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
20582 uint8x8_t __ret;
20583 __ret = __builtin_shufflevector(__rev0, __rev0, 7, 6, 5, 4, 3, 2, 1, 0);
20584 __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
20585 return __ret;
20586 }
20587 #endif
20588
20589 #ifdef __LITTLE_ENDIAN__
20590 __ai uint32x2_t vrev64_u32(uint32x2_t __p0) {
20591 uint32x2_t __ret;
20592 __ret = __builtin_shufflevector(__p0, __p0, 1, 0);
20593 return __ret;
20594 }
20595 #else
20596 __ai uint32x2_t vrev64_u32(uint32x2_t __p0) {
20597 uint32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
20598 uint32x2_t __ret;
20599 __ret = __builtin_shufflevector(__rev0, __rev0, 1, 0);
20600 __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
20601 return __ret;
20602 }
20603 #endif
20604
20605 #ifdef __LITTLE_ENDIAN__
20606 __ai uint16x4_t vrev64_u16(uint16x4_t __p0) {
20607 uint16x4_t __ret;
20608 __ret = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
20609 return __ret;
20610 }
20611 #else
20612 __ai uint16x4_t vrev64_u16(uint16x4_t __p0) {
20613 uint16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
20614 uint16x4_t __ret;
20615 __ret = __builtin_shufflevector(__rev0, __rev0, 3, 2, 1, 0);
20616 __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
20617 return __ret;
20618 }
20619 #endif
20620
20621 #ifdef __LITTLE_ENDIAN__
20622 __ai int8x8_t vrev64_s8(int8x8_t __p0) {
20623 int8x8_t __ret;
20624 __ret = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
20625 return __ret;
20626 }
20627 #else
20628 __ai int8x8_t vrev64_s8(int8x8_t __p0) {
20629 int8x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
20630 int8x8_t __ret;
20631 __ret = __builtin_shufflevector(__rev0, __rev0, 7, 6, 5, 4, 3, 2, 1, 0);
20632 __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
20633 return __ret;
20634 }
20635 #endif
20636
20637 #ifdef __LITTLE_ENDIAN__
20638 __ai float32x2_t vrev64_f32(float32x2_t __p0) {
20639 float32x2_t __ret;
20640 __ret = __builtin_shufflevector(__p0, __p0, 1, 0);
20641 return __ret;
20642 }
20643 #else
20644 __ai float32x2_t vrev64_f32(float32x2_t __p0) {
20645 float32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
20646 float32x2_t __ret;
20647 __ret = __builtin_shufflevector(__rev0, __rev0, 1, 0);
20648 __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
20649 return __ret;
20650 }
20651 #endif
20652
20653 #ifdef __LITTLE_ENDIAN__
20654 __ai int32x2_t vrev64_s32(int32x2_t __p0) {
20655 int32x2_t __ret;
20656 __ret = __builtin_shufflevector(__p0, __p0, 1, 0);
20657 return __ret;
20658 }
20659 #else
20660 __ai int32x2_t vrev64_s32(int32x2_t __p0) {
20661 int32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
20662 int32x2_t __ret;
20663 __ret = __builtin_shufflevector(__rev0, __rev0, 1, 0);
20664 __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
20665 return __ret;
20666 }
20667 #endif
20668
20669 #ifdef __LITTLE_ENDIAN__
20670 __ai int16x4_t vrev64_s16(int16x4_t __p0) {
20671 int16x4_t __ret;
20672 __ret = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
20673 return __ret;
20674 }
20675 #else
20676 __ai int16x4_t vrev64_s16(int16x4_t __p0) {
20677 int16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
20678 int16x4_t __ret;
20679 __ret = __builtin_shufflevector(__rev0, __rev0, 3, 2, 1, 0);
20680 __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
20681 return __ret;
20682 }
20683 #endif
20684
20685 #ifdef __LITTLE_ENDIAN__
20686 __ai uint8x16_t vrhaddq_u8(uint8x16_t __p0, uint8x16_t __p1) {
20687 uint8x16_t __ret;
20688 __ret = (uint8x16_t) __builtin_neon_vrhaddq_v((int8x16_t)__p0, (int8x16_t)__p1, 48);
20689 return __ret;
20690 }
20691 #else
20692 __ai uint8x16_t vrhaddq_u8(uint8x16_t __p0, uint8x16_t __p1) {
20693 uint8x16_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
20694 uint8x16_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
20695 uint8x16_t __ret;
20696 __ret = (uint8x16_t) __builtin_neon_vrhaddq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 48);
20697 __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
20698 return __ret;
20699 }
20700 #endif
20701
20702 #ifdef __LITTLE_ENDIAN__
20703 __ai uint32x4_t vrhaddq_u32(uint32x4_t __p0, uint32x4_t __p1) {
20704 uint32x4_t __ret;
20705 __ret = (uint32x4_t) __builtin_neon_vrhaddq_v((int8x16_t)__p0, (int8x16_t)__p1, 50);
20706 return __ret;
20707 }
20708 #else
20709 __ai uint32x4_t vrhaddq_u32(uint32x4_t __p0, uint32x4_t __p1) {
20710 uint32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
20711 uint32x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
20712 uint32x4_t __ret;
20713 __ret = (uint32x4_t) __builtin_neon_vrhaddq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 50);
20714 __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
20715 return __ret;
20716 }
20717 #endif
20718
20719 #ifdef __LITTLE_ENDIAN__
20720 __ai uint16x8_t vrhaddq_u16(uint16x8_t __p0, uint16x8_t __p1) {
20721 uint16x8_t __ret;
20722 __ret = (uint16x8_t) __builtin_neon_vrhaddq_v((int8x16_t)__p0, (int8x16_t)__p1, 49);
20723 return __ret;
20724 }
20725 #else
20726 __ai uint16x8_t vrhaddq_u16(uint16x8_t __p0, uint16x8_t __p1) {
20727 uint16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
20728 uint16x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
20729 uint16x8_t __ret;
20730 __ret = (uint16x8_t) __builtin_neon_vrhaddq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 49);
20731 __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
20732 return __ret;
20733 }
20734 #endif
20735
20736 #ifdef __LITTLE_ENDIAN__
20737 __ai int8x16_t vrhaddq_s8(int8x16_t __p0, int8x16_t __p1) {
20738 int8x16_t __ret;
20739 __ret = (int8x16_t) __builtin_neon_vrhaddq_v((int8x16_t)__p0, (int8x16_t)__p1, 32);
20740 return __ret;
20741 }
20742 #else
20743 __ai int8x16_t vrhaddq_s8(int8x16_t __p0, int8x16_t __p1) {
20744 int8x16_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
20745 int8x16_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
20746 int8x16_t __ret;
20747 __ret = (int8x16_t) __builtin_neon_vrhaddq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 32);
20748 __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
20749 return __ret;
20750 }
20751 #endif
20752
20753 #ifdef __LITTLE_ENDIAN__
20754 __ai int32x4_t vrhaddq_s32(int32x4_t __p0, int32x4_t __p1) {
20755 int32x4_t __ret;
20756 __ret = (int32x4_t) __builtin_neon_vrhaddq_v((int8x16_t)__p0, (int8x16_t)__p1, 34);
20757 return __ret;
20758 }
20759 #else
20760 __ai int32x4_t vrhaddq_s32(int32x4_t __p0, int32x4_t __p1) {
20761 int32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
20762 int32x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
20763 int32x4_t __ret;
20764 __ret = (int32x4_t) __builtin_neon_vrhaddq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 34);
20765 __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
20766 return __ret;
20767 }
20768 #endif
20769
20770 #ifdef __LITTLE_ENDIAN__
20771 __ai int16x8_t vrhaddq_s16(int16x8_t __p0, int16x8_t __p1) {
20772 int16x8_t __ret;
20773 __ret = (int16x8_t) __builtin_neon_vrhaddq_v((int8x16_t)__p0, (int8x16_t)__p1, 33);
20774 return __ret;
20775 }
20776 #else
20777 __ai int16x8_t vrhaddq_s16(int16x8_t __p0, int16x8_t __p1) {
20778 int16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
20779 int16x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
20780 int16x8_t __ret;
20781 __ret = (int16x8_t) __builtin_neon_vrhaddq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 33);
20782 __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
20783 return __ret;
20784 }
20785 #endif
20786
20787 #ifdef __LITTLE_ENDIAN__
20788 __ai uint8x8_t vrhadd_u8(uint8x8_t __p0, uint8x8_t __p1) {
20789 uint8x8_t __ret;
20790 __ret = (uint8x8_t) __builtin_neon_vrhadd_v((int8x8_t)__p0, (int8x8_t)__p1, 16);
20791 return __ret;
20792 }
20793 #else
20794 __ai uint8x8_t vrhadd_u8(uint8x8_t __p0, uint8x8_t __p1) {
20795 uint8x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
20796 uint8x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
20797 uint8x8_t __ret;
20798 __ret = (uint8x8_t) __builtin_neon_vrhadd_v((int8x8_t)__rev0, (int8x8_t)__rev1, 16);
20799 __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
20800 return __ret;
20801 }
20802 #endif
20803
20804 #ifdef __LITTLE_ENDIAN__
20805 __ai uint32x2_t vrhadd_u32(uint32x2_t __p0, uint32x2_t __p1) {
20806 uint32x2_t __ret;
20807 __ret = (uint32x2_t) __builtin_neon_vrhadd_v((int8x8_t)__p0, (int8x8_t)__p1, 18);
20808 return __ret;
20809 }
20810 #else
20811 __ai uint32x2_t vrhadd_u32(uint32x2_t __p0, uint32x2_t __p1) {
20812 uint32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
20813 uint32x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
20814 uint32x2_t __ret;
20815 __ret = (uint32x2_t) __builtin_neon_vrhadd_v((int8x8_t)__rev0, (int8x8_t)__rev1, 18);
20816 __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
20817 return __ret;
20818 }
20819 #endif
20820
20821 #ifdef __LITTLE_ENDIAN__
20822 __ai uint16x4_t vrhadd_u16(uint16x4_t __p0, uint16x4_t __p1) {
20823 uint16x4_t __ret;
20824 __ret = (uint16x4_t) __builtin_neon_vrhadd_v((int8x8_t)__p0, (int8x8_t)__p1, 17);
20825 return __ret;
20826 }
20827 #else
20828 __ai uint16x4_t vrhadd_u16(uint16x4_t __p0, uint16x4_t __p1) {
20829 uint16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
20830 uint16x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
20831 uint16x4_t __ret;
20832 __ret = (uint16x4_t) __builtin_neon_vrhadd_v((int8x8_t)__rev0, (int8x8_t)__rev1, 17);
20833 __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
20834 return __ret;
20835 }
20836 #endif
20837
20838 #ifdef __LITTLE_ENDIAN__
20839 __ai int8x8_t vrhadd_s8(int8x8_t __p0, int8x8_t __p1) {
20840 int8x8_t __ret;
20841 __ret = (int8x8_t) __builtin_neon_vrhadd_v((int8x8_t)__p0, (int8x8_t)__p1, 0);
20842 return __ret;
20843 }
20844 #else
20845 __ai int8x8_t vrhadd_s8(int8x8_t __p0, int8x8_t __p1) {
20846 int8x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
20847 int8x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
20848 int8x8_t __ret;
20849 __ret = (int8x8_t) __builtin_neon_vrhadd_v((int8x8_t)__rev0, (int8x8_t)__rev1, 0);
20850 __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
20851 return __ret;
20852 }
20853 #endif
20854
20855 #ifdef __LITTLE_ENDIAN__
20856 __ai int32x2_t vrhadd_s32(int32x2_t __p0, int32x2_t __p1) {
20857 int32x2_t __ret;
20858 __ret = (int32x2_t) __builtin_neon_vrhadd_v((int8x8_t)__p0, (int8x8_t)__p1, 2);
20859 return __ret;
20860 }
20861 #else
20862 __ai int32x2_t vrhadd_s32(int32x2_t __p0, int32x2_t __p1) {
20863 int32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
20864 int32x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
20865 int32x2_t __ret;
20866 __ret = (int32x2_t) __builtin_neon_vrhadd_v((int8x8_t)__rev0, (int8x8_t)__rev1, 2);
20867 __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
20868 return __ret;
20869 }
20870 #endif
20871
20872 #ifdef __LITTLE_ENDIAN__
20873 __ai int16x4_t vrhadd_s16(int16x4_t __p0, int16x4_t __p1) {
20874 int16x4_t __ret;
20875 __ret = (int16x4_t) __builtin_neon_vrhadd_v((int8x8_t)__p0, (int8x8_t)__p1, 1);
20876 return __ret;
20877 }
20878 #else
20879 __ai int16x4_t vrhadd_s16(int16x4_t __p0, int16x4_t __p1) {
20880 int16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
20881 int16x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
20882 int16x4_t __ret;
20883 __ret = (int16x4_t) __builtin_neon_vrhadd_v((int8x8_t)__rev0, (int8x8_t)__rev1, 1);
20884 __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
20885 return __ret;
20886 }
20887 #endif
20888
20889 #ifdef __LITTLE_ENDIAN__
20890 __ai uint8x16_t vrshlq_u8(uint8x16_t __p0, int8x16_t __p1) {
20891 uint8x16_t __ret;
20892 __ret = (uint8x16_t) __builtin_neon_vrshlq_v((int8x16_t)__p0, (int8x16_t)__p1, 48);
20893 return __ret;
20894 }
20895 #else
20896 __ai uint8x16_t vrshlq_u8(uint8x16_t __p0, int8x16_t __p1) {
20897 uint8x16_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
20898 int8x16_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
20899 uint8x16_t __ret;
20900 __ret = (uint8x16_t) __builtin_neon_vrshlq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 48);
20901 __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
20902 return __ret;
20903 }
20904 #endif
20905
20906 #ifdef __LITTLE_ENDIAN__
20907 __ai uint32x4_t vrshlq_u32(uint32x4_t __p0, int32x4_t __p1) {
20908 uint32x4_t __ret;
20909 __ret = (uint32x4_t) __builtin_neon_vrshlq_v((int8x16_t)__p0, (int8x16_t)__p1, 50);
20910 return __ret;
20911 }
20912 #else
20913 __ai uint32x4_t vrshlq_u32(uint32x4_t __p0, int32x4_t __p1) {
20914 uint32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
20915 int32x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
20916 uint32x4_t __ret;
20917 __ret = (uint32x4_t) __builtin_neon_vrshlq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 50);
20918 __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
20919 return __ret;
20920 }
20921 #endif
20922
20923 #ifdef __LITTLE_ENDIAN__
20924 __ai uint64x2_t vrshlq_u64(uint64x2_t __p0, int64x2_t __p1) {
20925 uint64x2_t __ret;
20926 __ret = (uint64x2_t) __builtin_neon_vrshlq_v((int8x16_t)__p0, (int8x16_t)__p1, 51);
20927 return __ret;
20928 }
20929 #else
20930 __ai uint64x2_t vrshlq_u64(uint64x2_t __p0, int64x2_t __p1) {
20931 uint64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
20932 int64x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
20933 uint64x2_t __ret;
20934 __ret = (uint64x2_t) __builtin_neon_vrshlq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 51);
20935 __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
20936 return __ret;
20937 }
20938 #endif
20939
20940 #ifdef __LITTLE_ENDIAN__
20941 __ai uint16x8_t vrshlq_u16(uint16x8_t __p0, int16x8_t __p1) {
20942 uint16x8_t __ret;
20943 __ret = (uint16x8_t) __builtin_neon_vrshlq_v((int8x16_t)__p0, (int8x16_t)__p1, 49);
20944 return __ret;
20945 }
20946 #else
20947 __ai uint16x8_t vrshlq_u16(uint16x8_t __p0, int16x8_t __p1) {
20948 uint16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
20949 int16x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
20950 uint16x8_t __ret;
20951 __ret = (uint16x8_t) __builtin_neon_vrshlq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 49);
20952 __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
20953 return __ret;
20954 }
20955 #endif
20956
20957 #ifdef __LITTLE_ENDIAN__
20958 __ai int8x16_t vrshlq_s8(int8x16_t __p0, int8x16_t __p1) {
20959 int8x16_t __ret;
20960 __ret = (int8x16_t) __builtin_neon_vrshlq_v((int8x16_t)__p0, (int8x16_t)__p1, 32);
20961 return __ret;
20962 }
20963 #else
20964 __ai int8x16_t vrshlq_s8(int8x16_t __p0, int8x16_t __p1) {
20965 int8x16_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
20966 int8x16_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
20967 int8x16_t __ret;
20968 __ret = (int8x16_t) __builtin_neon_vrshlq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 32);
20969 __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
20970 return __ret;
20971 }
20972 #endif
20973
20974 #ifdef __LITTLE_ENDIAN__
20975 __ai int32x4_t vrshlq_s32(int32x4_t __p0, int32x4_t __p1) {
20976 int32x4_t __ret;
20977 __ret = (int32x4_t) __builtin_neon_vrshlq_v((int8x16_t)__p0, (int8x16_t)__p1, 34);
20978 return __ret;
20979 }
20980 #else
20981 __ai int32x4_t vrshlq_s32(int32x4_t __p0, int32x4_t __p1) {
20982 int32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
20983 int32x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
20984 int32x4_t __ret;
20985 __ret = (int32x4_t) __builtin_neon_vrshlq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 34);
20986 __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
20987 return __ret;
20988 }
20989 #endif
20990
20991 #ifdef __LITTLE_ENDIAN__
20992 __ai int64x2_t vrshlq_s64(int64x2_t __p0, int64x2_t __p1) {
20993 int64x2_t __ret;
20994 __ret = (int64x2_t) __builtin_neon_vrshlq_v((int8x16_t)__p0, (int8x16_t)__p1, 35);
20995 return __ret;
20996 }
20997 #else
20998 __ai int64x2_t vrshlq_s64(int64x2_t __p0, int64x2_t __p1) {
20999 int64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
21000 int64x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
21001 int64x2_t __ret;
21002 __ret = (int64x2_t) __builtin_neon_vrshlq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 35);
21003 __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
21004 return __ret;
21005 }
21006 #endif
21007
21008 #ifdef __LITTLE_ENDIAN__
21009 __ai int16x8_t vrshlq_s16(int16x8_t __p0, int16x8_t __p1) {
21010 int16x8_t __ret;
21011 __ret = (int16x8_t) __builtin_neon_vrshlq_v((int8x16_t)__p0, (int8x16_t)__p1, 33);
21012 return __ret;
21013 }
21014 #else
21015 __ai int16x8_t vrshlq_s16(int16x8_t __p0, int16x8_t __p1) {
21016 int16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
21017 int16x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
21018 int16x8_t __ret;
21019 __ret = (int16x8_t) __builtin_neon_vrshlq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 33);
21020 __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
21021 return __ret;
21022 }
21023 #endif
21024
21025 #ifdef __LITTLE_ENDIAN__
21026 __ai uint8x8_t vrshl_u8(uint8x8_t __p0, int8x8_t __p1) {
21027 uint8x8_t __ret;
21028 __ret = (uint8x8_t) __builtin_neon_vrshl_v((int8x8_t)__p0, (int8x8_t)__p1, 16);
21029 return __ret;
21030 }
21031 #else
21032 __ai uint8x8_t vrshl_u8(uint8x8_t __p0, int8x8_t __p1) {
21033 uint8x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
21034 int8x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
21035 uint8x8_t __ret;
21036 __ret = (uint8x8_t) __builtin_neon_vrshl_v((int8x8_t)__rev0, (int8x8_t)__rev1, 16);
21037 __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
21038 return __ret;
21039 }
21040 #endif
21041
21042 #ifdef __LITTLE_ENDIAN__
21043 __ai uint32x2_t vrshl_u32(uint32x2_t __p0, int32x2_t __p1) {
21044 uint32x2_t __ret;
21045 __ret = (uint32x2_t) __builtin_neon_vrshl_v((int8x8_t)__p0, (int8x8_t)__p1, 18);
21046 return __ret;
21047 }
21048 #else
21049 __ai uint32x2_t vrshl_u32(uint32x2_t __p0, int32x2_t __p1) {
21050 uint32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
21051 int32x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
21052 uint32x2_t __ret;
21053 __ret = (uint32x2_t) __builtin_neon_vrshl_v((int8x8_t)__rev0, (int8x8_t)__rev1, 18);
21054 __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
21055 return __ret;
21056 }
21057 #endif
21058
21059 #ifdef __LITTLE_ENDIAN__
21060 __ai uint64x1_t vrshl_u64(uint64x1_t __p0, int64x1_t __p1) {
21061 uint64x1_t __ret;
21062 __ret = (uint64x1_t) __builtin_neon_vrshl_v((int8x8_t)__p0, (int8x8_t)__p1, 19);
21063 return __ret;
21064 }
21065 #else
21066 __ai uint64x1_t vrshl_u64(uint64x1_t __p0, int64x1_t __p1) {
21067 uint64x1_t __ret;
21068 __ret = (uint64x1_t) __builtin_neon_vrshl_v((int8x8_t)__p0, (int8x8_t)__p1, 19);
21069 return __ret;
21070 }
21071 #endif
21072
21073 #ifdef __LITTLE_ENDIAN__
21074 __ai uint16x4_t vrshl_u16(uint16x4_t __p0, int16x4_t __p1) {
21075 uint16x4_t __ret;
21076 __ret = (uint16x4_t) __builtin_neon_vrshl_v((int8x8_t)__p0, (int8x8_t)__p1, 17);
21077 return __ret;
21078 }
21079 #else
21080 __ai uint16x4_t vrshl_u16(uint16x4_t __p0, int16x4_t __p1) {
21081 uint16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
21082 int16x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
21083 uint16x4_t __ret;
21084 __ret = (uint16x4_t) __builtin_neon_vrshl_v((int8x8_t)__rev0, (int8x8_t)__rev1, 17);
21085 __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
21086 return __ret;
21087 }
21088 #endif
21089
21090 #ifdef __LITTLE_ENDIAN__
21091 __ai int8x8_t vrshl_s8(int8x8_t __p0, int8x8_t __p1) {
21092 int8x8_t __ret;
21093 __ret = (int8x8_t) __builtin_neon_vrshl_v((int8x8_t)__p0, (int8x8_t)__p1, 0);
21094 return __ret;
21095 }
21096 #else
21097 __ai int8x8_t vrshl_s8(int8x8_t __p0, int8x8_t __p1) {
21098 int8x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
21099 int8x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
21100 int8x8_t __ret;
21101 __ret = (int8x8_t) __builtin_neon_vrshl_v((int8x8_t)__rev0, (int8x8_t)__rev1, 0);
21102 __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
21103 return __ret;
21104 }
21105 #endif
21106
21107 #ifdef __LITTLE_ENDIAN__
21108 __ai int32x2_t vrshl_s32(int32x2_t __p0, int32x2_t __p1) {
21109 int32x2_t __ret;
21110 __ret = (int32x2_t) __builtin_neon_vrshl_v((int8x8_t)__p0, (int8x8_t)__p1, 2);
21111 return __ret;
21112 }
21113 #else
21114 __ai int32x2_t vrshl_s32(int32x2_t __p0, int32x2_t __p1) {
21115 int32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
21116 int32x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
21117 int32x2_t __ret;
21118 __ret = (int32x2_t) __builtin_neon_vrshl_v((int8x8_t)__rev0, (int8x8_t)__rev1, 2);
21119 __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
21120 return __ret;
21121 }
21122 #endif
21123
21124 #ifdef __LITTLE_ENDIAN__
21125 __ai int64x1_t vrshl_s64(int64x1_t __p0, int64x1_t __p1) {
21126 int64x1_t __ret;
21127 __ret = (int64x1_t) __builtin_neon_vrshl_v((int8x8_t)__p0, (int8x8_t)__p1, 3);
21128 return __ret;
21129 }
21130 #else
21131 __ai int64x1_t vrshl_s64(int64x1_t __p0, int64x1_t __p1) {
21132 int64x1_t __ret;
21133 __ret = (int64x1_t) __builtin_neon_vrshl_v((int8x8_t)__p0, (int8x8_t)__p1, 3);
21134 return __ret;
21135 }
21136 #endif
21137
21138 #ifdef __LITTLE_ENDIAN__
21139 __ai int16x4_t vrshl_s16(int16x4_t __p0, int16x4_t __p1) {
21140 int16x4_t __ret;
21141 __ret = (int16x4_t) __builtin_neon_vrshl_v((int8x8_t)__p0, (int8x8_t)__p1, 1);
21142 return __ret;
21143 }
21144 #else
21145 __ai int16x4_t vrshl_s16(int16x4_t __p0, int16x4_t __p1) {
21146 int16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
21147 int16x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
21148 int16x4_t __ret;
21149 __ret = (int16x4_t) __builtin_neon_vrshl_v((int8x8_t)__rev0, (int8x8_t)__rev1, 1);
21150 __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
21151 return __ret;
21152 }
21153 #endif
21154
21155 #ifdef __LITTLE_ENDIAN__
21156 #define vrshrq_n_u8(__p0, __p1) __extension__ ({ \
21157 uint8x16_t __s0 = __p0; \
21158 uint8x16_t __ret; \
21159 __ret = (uint8x16_t) __builtin_neon_vrshrq_n_v((int8x16_t)__s0, __p1, 48); \
21160 __ret; \
21161 })
21162 #else
21163 #define vrshrq_n_u8(__p0, __p1) __extension__ ({ \
21164 uint8x16_t __s0 = __p0; \
21165 uint8x16_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
21166 uint8x16_t __ret; \
21167 __ret = (uint8x16_t) __builtin_neon_vrshrq_n_v((int8x16_t)__rev0, __p1, 48); \
21168 __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
21169 __ret; \
21170 })
21171 #endif
21172
21173 #ifdef __LITTLE_ENDIAN__
21174 #define vrshrq_n_u32(__p0, __p1) __extension__ ({ \
21175 uint32x4_t __s0 = __p0; \
21176 uint32x4_t __ret; \
21177 __ret = (uint32x4_t) __builtin_neon_vrshrq_n_v((int8x16_t)__s0, __p1, 50); \
21178 __ret; \
21179 })
21180 #else
21181 #define vrshrq_n_u32(__p0, __p1) __extension__ ({ \
21182 uint32x4_t __s0 = __p0; \
21183 uint32x4_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \
21184 uint32x4_t __ret; \
21185 __ret = (uint32x4_t) __builtin_neon_vrshrq_n_v((int8x16_t)__rev0, __p1, 50); \
21186 __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \
21187 __ret; \
21188 })
21189 #endif
21190
21191 #ifdef __LITTLE_ENDIAN__
21192 #define vrshrq_n_u64(__p0, __p1) __extension__ ({ \
21193 uint64x2_t __s0 = __p0; \
21194 uint64x2_t __ret; \
21195 __ret = (uint64x2_t) __builtin_neon_vrshrq_n_v((int8x16_t)__s0, __p1, 51); \
21196 __ret; \
21197 })
21198 #else
21199 #define vrshrq_n_u64(__p0, __p1) __extension__ ({ \
21200 uint64x2_t __s0 = __p0; \
21201 uint64x2_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 1, 0); \
21202 uint64x2_t __ret; \
21203 __ret = (uint64x2_t) __builtin_neon_vrshrq_n_v((int8x16_t)__rev0, __p1, 51); \
21204 __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \
21205 __ret; \
21206 })
21207 #endif
21208
21209 #ifdef __LITTLE_ENDIAN__
21210 #define vrshrq_n_u16(__p0, __p1) __extension__ ({ \
21211 uint16x8_t __s0 = __p0; \
21212 uint16x8_t __ret; \
21213 __ret = (uint16x8_t) __builtin_neon_vrshrq_n_v((int8x16_t)__s0, __p1, 49); \
21214 __ret; \
21215 })
21216 #else
21217 #define vrshrq_n_u16(__p0, __p1) __extension__ ({ \
21218 uint16x8_t __s0 = __p0; \
21219 uint16x8_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 7, 6, 5, 4, 3, 2, 1, 0); \
21220 uint16x8_t __ret; \
21221 __ret = (uint16x8_t) __builtin_neon_vrshrq_n_v((int8x16_t)__rev0, __p1, 49); \
21222 __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); \
21223 __ret; \
21224 })
21225 #endif
21226
21227 #ifdef __LITTLE_ENDIAN__
21228 #define vrshrq_n_s8(__p0, __p1) __extension__ ({ \
21229 int8x16_t __s0 = __p0; \
21230 int8x16_t __ret; \
21231 __ret = (int8x16_t) __builtin_neon_vrshrq_n_v((int8x16_t)__s0, __p1, 32); \
21232 __ret; \
21233 })
21234 #else
21235 #define vrshrq_n_s8(__p0, __p1) __extension__ ({ \
21236 int8x16_t __s0 = __p0; \
21237 int8x16_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
21238 int8x16_t __ret; \
21239 __ret = (int8x16_t) __builtin_neon_vrshrq_n_v((int8x16_t)__rev0, __p1, 32); \
21240 __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
21241 __ret; \
21242 })
21243 #endif
21244
21245 #ifdef __LITTLE_ENDIAN__
21246 #define vrshrq_n_s32(__p0, __p1) __extension__ ({ \
21247 int32x4_t __s0 = __p0; \
21248 int32x4_t __ret; \
21249 __ret = (int32x4_t) __builtin_neon_vrshrq_n_v((int8x16_t)__s0, __p1, 34); \
21250 __ret; \
21251 })
21252 #else
21253 #define vrshrq_n_s32(__p0, __p1) __extension__ ({ \
21254 int32x4_t __s0 = __p0; \
21255 int32x4_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \
21256 int32x4_t __ret; \
21257 __ret = (int32x4_t) __builtin_neon_vrshrq_n_v((int8x16_t)__rev0, __p1, 34); \
21258 __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \
21259 __ret; \
21260 })
21261 #endif
21262
21263 #ifdef __LITTLE_ENDIAN__
21264 #define vrshrq_n_s64(__p0, __p1) __extension__ ({ \
21265 int64x2_t __s0 = __p0; \
21266 int64x2_t __ret; \
21267 __ret = (int64x2_t) __builtin_neon_vrshrq_n_v((int8x16_t)__s0, __p1, 35); \
21268 __ret; \
21269 })
21270 #else
21271 #define vrshrq_n_s64(__p0, __p1) __extension__ ({ \
21272 int64x2_t __s0 = __p0; \
21273 int64x2_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 1, 0); \
21274 int64x2_t __ret; \
21275 __ret = (int64x2_t) __builtin_neon_vrshrq_n_v((int8x16_t)__rev0, __p1, 35); \
21276 __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \
21277 __ret; \
21278 })
21279 #endif
21280
21281 #ifdef __LITTLE_ENDIAN__
21282 #define vrshrq_n_s16(__p0, __p1) __extension__ ({ \
21283 int16x8_t __s0 = __p0; \
21284 int16x8_t __ret; \
21285 __ret = (int16x8_t) __builtin_neon_vrshrq_n_v((int8x16_t)__s0, __p1, 33); \
21286 __ret; \
21287 })
21288 #else
21289 #define vrshrq_n_s16(__p0, __p1) __extension__ ({ \
21290 int16x8_t __s0 = __p0; \
21291 int16x8_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 7, 6, 5, 4, 3, 2, 1, 0); \
21292 int16x8_t __ret; \
21293 __ret = (int16x8_t) __builtin_neon_vrshrq_n_v((int8x16_t)__rev0, __p1, 33); \
21294 __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); \
21295 __ret; \
21296 })
21297 #endif
21298
21299 #ifdef __LITTLE_ENDIAN__
21300 #define vrshr_n_u8(__p0, __p1) __extension__ ({ \
21301 uint8x8_t __s0 = __p0; \
21302 uint8x8_t __ret; \
21303 __ret = (uint8x8_t) __builtin_neon_vrshr_n_v((int8x8_t)__s0, __p1, 16); \
21304 __ret; \
21305 })
21306 #else
21307 #define vrshr_n_u8(__p0, __p1) __extension__ ({ \
21308 uint8x8_t __s0 = __p0; \
21309 uint8x8_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 7, 6, 5, 4, 3, 2, 1, 0); \
21310 uint8x8_t __ret; \
21311 __ret = (uint8x8_t) __builtin_neon_vrshr_n_v((int8x8_t)__rev0, __p1, 16); \
21312 __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); \
21313 __ret; \
21314 })
21315 #endif
21316
21317 #ifdef __LITTLE_ENDIAN__
21318 #define vrshr_n_u32(__p0, __p1) __extension__ ({ \
21319 uint32x2_t __s0 = __p0; \
21320 uint32x2_t __ret; \
21321 __ret = (uint32x2_t) __builtin_neon_vrshr_n_v((int8x8_t)__s0, __p1, 18); \
21322 __ret; \
21323 })
21324 #else
21325 #define vrshr_n_u32(__p0, __p1) __extension__ ({ \
21326 uint32x2_t __s0 = __p0; \
21327 uint32x2_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 1, 0); \
21328 uint32x2_t __ret; \
21329 __ret = (uint32x2_t) __builtin_neon_vrshr_n_v((int8x8_t)__rev0, __p1, 18); \
21330 __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \
21331 __ret; \
21332 })
21333 #endif
21334
21335 #ifdef __LITTLE_ENDIAN__
21336 #define vrshr_n_u64(__p0, __p1) __extension__ ({ \
21337 uint64x1_t __s0 = __p0; \
21338 uint64x1_t __ret; \
21339 __ret = (uint64x1_t) __builtin_neon_vrshr_n_v((int8x8_t)__s0, __p1, 19); \
21340 __ret; \
21341 })
21342 #else
21343 #define vrshr_n_u64(__p0, __p1) __extension__ ({ \
21344 uint64x1_t __s0 = __p0; \
21345 uint64x1_t __ret; \
21346 __ret = (uint64x1_t) __builtin_neon_vrshr_n_v((int8x8_t)__s0, __p1, 19); \
21347 __ret; \
21348 })
21349 #endif
21350
21351 #ifdef __LITTLE_ENDIAN__
21352 #define vrshr_n_u16(__p0, __p1) __extension__ ({ \
21353 uint16x4_t __s0 = __p0; \
21354 uint16x4_t __ret; \
21355 __ret = (uint16x4_t) __builtin_neon_vrshr_n_v((int8x8_t)__s0, __p1, 17); \
21356 __ret; \
21357 })
21358 #else
21359 #define vrshr_n_u16(__p0, __p1) __extension__ ({ \
21360 uint16x4_t __s0 = __p0; \
21361 uint16x4_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \
21362 uint16x4_t __ret; \
21363 __ret = (uint16x4_t) __builtin_neon_vrshr_n_v((int8x8_t)__rev0, __p1, 17); \
21364 __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \
21365 __ret; \
21366 })
21367 #endif
21368
21369 #ifdef __LITTLE_ENDIAN__
21370 #define vrshr_n_s8(__p0, __p1) __extension__ ({ \
21371 int8x8_t __s0 = __p0; \
21372 int8x8_t __ret; \
21373 __ret = (int8x8_t) __builtin_neon_vrshr_n_v((int8x8_t)__s0, __p1, 0); \
21374 __ret; \
21375 })
21376 #else
21377 #define vrshr_n_s8(__p0, __p1) __extension__ ({ \
21378 int8x8_t __s0 = __p0; \
21379 int8x8_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 7, 6, 5, 4, 3, 2, 1, 0); \
21380 int8x8_t __ret; \
21381 __ret = (int8x8_t) __builtin_neon_vrshr_n_v((int8x8_t)__rev0, __p1, 0); \
21382 __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); \
21383 __ret; \
21384 })
21385 #endif
21386
21387 #ifdef __LITTLE_ENDIAN__
21388 #define vrshr_n_s32(__p0, __p1) __extension__ ({ \
21389 int32x2_t __s0 = __p0; \
21390 int32x2_t __ret; \
21391 __ret = (int32x2_t) __builtin_neon_vrshr_n_v((int8x8_t)__s0, __p1, 2); \
21392 __ret; \
21393 })
21394 #else
21395 #define vrshr_n_s32(__p0, __p1) __extension__ ({ \
21396 int32x2_t __s0 = __p0; \
21397 int32x2_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 1, 0); \
21398 int32x2_t __ret; \
21399 __ret = (int32x2_t) __builtin_neon_vrshr_n_v((int8x8_t)__rev0, __p1, 2); \
21400 __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \
21401 __ret; \
21402 })
21403 #endif
21404
21405 #ifdef __LITTLE_ENDIAN__
21406 #define vrshr_n_s64(__p0, __p1) __extension__ ({ \
21407 int64x1_t __s0 = __p0; \
21408 int64x1_t __ret; \
21409 __ret = (int64x1_t) __builtin_neon_vrshr_n_v((int8x8_t)__s0, __p1, 3); \
21410 __ret; \
21411 })
21412 #else
21413 #define vrshr_n_s64(__p0, __p1) __extension__ ({ \
21414 int64x1_t __s0 = __p0; \
21415 int64x1_t __ret; \
21416 __ret = (int64x1_t) __builtin_neon_vrshr_n_v((int8x8_t)__s0, __p1, 3); \
21417 __ret; \
21418 })
21419 #endif
21420
21421 #ifdef __LITTLE_ENDIAN__
21422 #define vrshr_n_s16(__p0, __p1) __extension__ ({ \
21423 int16x4_t __s0 = __p0; \
21424 int16x4_t __ret; \
21425 __ret = (int16x4_t) __builtin_neon_vrshr_n_v((int8x8_t)__s0, __p1, 1); \
21426 __ret; \
21427 })
21428 #else
21429 #define vrshr_n_s16(__p0, __p1) __extension__ ({ \
21430 int16x4_t __s0 = __p0; \
21431 int16x4_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \
21432 int16x4_t __ret; \
21433 __ret = (int16x4_t) __builtin_neon_vrshr_n_v((int8x8_t)__rev0, __p1, 1); \
21434 __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \
21435 __ret; \
21436 })
21437 #endif
21438
21439 #ifdef __LITTLE_ENDIAN__
21440 #define vrshrn_n_u32(__p0, __p1) __extension__ ({ \
21441 uint32x4_t __s0 = __p0; \
21442 uint16x4_t __ret; \
21443 __ret = (uint16x4_t) __builtin_neon_vrshrn_n_v((int8x16_t)__s0, __p1, 17); \
21444 __ret; \
21445 })
21446 #else
21447 #define vrshrn_n_u32(__p0, __p1) __extension__ ({ \
21448 uint32x4_t __s0 = __p0; \
21449 uint32x4_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \
21450 uint16x4_t __ret; \
21451 __ret = (uint16x4_t) __builtin_neon_vrshrn_n_v((int8x16_t)__rev0, __p1, 17); \
21452 __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \
21453 __ret; \
21454 })
21455 #define __noswap_vrshrn_n_u32(__p0, __p1) __extension__ ({ \
21456 uint32x4_t __s0 = __p0; \
21457 uint16x4_t __ret; \
21458 __ret = (uint16x4_t) __builtin_neon_vrshrn_n_v((int8x16_t)__s0, __p1, 17); \
21459 __ret; \
21460 })
21461 #endif
21462
21463 #ifdef __LITTLE_ENDIAN__
21464 #define vrshrn_n_u64(__p0, __p1) __extension__ ({ \
21465 uint64x2_t __s0 = __p0; \
21466 uint32x2_t __ret; \
21467 __ret = (uint32x2_t) __builtin_neon_vrshrn_n_v((int8x16_t)__s0, __p1, 18); \
21468 __ret; \
21469 })
21470 #else
21471 #define vrshrn_n_u64(__p0, __p1) __extension__ ({ \
21472 uint64x2_t __s0 = __p0; \
21473 uint64x2_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 1, 0); \
21474 uint32x2_t __ret; \
21475 __ret = (uint32x2_t) __builtin_neon_vrshrn_n_v((int8x16_t)__rev0, __p1, 18); \
21476 __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \
21477 __ret; \
21478 })
21479 #define __noswap_vrshrn_n_u64(__p0, __p1) __extension__ ({ \
21480 uint64x2_t __s0 = __p0; \
21481 uint32x2_t __ret; \
21482 __ret = (uint32x2_t) __builtin_neon_vrshrn_n_v((int8x16_t)__s0, __p1, 18); \
21483 __ret; \
21484 })
21485 #endif
21486
21487 #ifdef __LITTLE_ENDIAN__
21488 #define vrshrn_n_u16(__p0, __p1) __extension__ ({ \
21489 uint16x8_t __s0 = __p0; \
21490 uint8x8_t __ret; \
21491 __ret = (uint8x8_t) __builtin_neon_vrshrn_n_v((int8x16_t)__s0, __p1, 16); \
21492 __ret; \
21493 })
21494 #else
21495 #define vrshrn_n_u16(__p0, __p1) __extension__ ({ \
21496 uint16x8_t __s0 = __p0; \
21497 uint16x8_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 7, 6, 5, 4, 3, 2, 1, 0); \
21498 uint8x8_t __ret; \
21499 __ret = (uint8x8_t) __builtin_neon_vrshrn_n_v((int8x16_t)__rev0, __p1, 16); \
21500 __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); \
21501 __ret; \
21502 })
21503 #define __noswap_vrshrn_n_u16(__p0, __p1) __extension__ ({ \
21504 uint16x8_t __s0 = __p0; \
21505 uint8x8_t __ret; \
21506 __ret = (uint8x8_t) __builtin_neon_vrshrn_n_v((int8x16_t)__s0, __p1, 16); \
21507 __ret; \
21508 })
21509 #endif
21510
21511 #ifdef __LITTLE_ENDIAN__
21512 #define vrshrn_n_s32(__p0, __p1) __extension__ ({ \
21513 int32x4_t __s0 = __p0; \
21514 int16x4_t __ret; \
21515 __ret = (int16x4_t) __builtin_neon_vrshrn_n_v((int8x16_t)__s0, __p1, 1); \
21516 __ret; \
21517 })
21518 #else
21519 #define vrshrn_n_s32(__p0, __p1) __extension__ ({ \
21520 int32x4_t __s0 = __p0; \
21521 int32x4_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \
21522 int16x4_t __ret; \
21523 __ret = (int16x4_t) __builtin_neon_vrshrn_n_v((int8x16_t)__rev0, __p1, 1); \
21524 __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \
21525 __ret; \
21526 })
21527 #define __noswap_vrshrn_n_s32(__p0, __p1) __extension__ ({ \
21528 int32x4_t __s0 = __p0; \
21529 int16x4_t __ret; \
21530 __ret = (int16x4_t) __builtin_neon_vrshrn_n_v((int8x16_t)__s0, __p1, 1); \
21531 __ret; \
21532 })
21533 #endif
21534
21535 #ifdef __LITTLE_ENDIAN__
21536 #define vrshrn_n_s64(__p0, __p1) __extension__ ({ \
21537 int64x2_t __s0 = __p0; \
21538 int32x2_t __ret; \
21539 __ret = (int32x2_t) __builtin_neon_vrshrn_n_v((int8x16_t)__s0, __p1, 2); \
21540 __ret; \
21541 })
21542 #else
21543 #define vrshrn_n_s64(__p0, __p1) __extension__ ({ \
21544 int64x2_t __s0 = __p0; \
21545 int64x2_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 1, 0); \
21546 int32x2_t __ret; \
21547 __ret = (int32x2_t) __builtin_neon_vrshrn_n_v((int8x16_t)__rev0, __p1, 2); \
21548 __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \
21549 __ret; \
21550 })
21551 #define __noswap_vrshrn_n_s64(__p0, __p1) __extension__ ({ \
21552 int64x2_t __s0 = __p0; \
21553 int32x2_t __ret; \
21554 __ret = (int32x2_t) __builtin_neon_vrshrn_n_v((int8x16_t)__s0, __p1, 2); \
21555 __ret; \
21556 })
21557 #endif
21558
21559 #ifdef __LITTLE_ENDIAN__
21560 #define vrshrn_n_s16(__p0, __p1) __extension__ ({ \
21561 int16x8_t __s0 = __p0; \
21562 int8x8_t __ret; \
21563 __ret = (int8x8_t) __builtin_neon_vrshrn_n_v((int8x16_t)__s0, __p1, 0); \
21564 __ret; \
21565 })
21566 #else
21567 #define vrshrn_n_s16(__p0, __p1) __extension__ ({ \
21568 int16x8_t __s0 = __p0; \
21569 int16x8_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 7, 6, 5, 4, 3, 2, 1, 0); \
21570 int8x8_t __ret; \
21571 __ret = (int8x8_t) __builtin_neon_vrshrn_n_v((int8x16_t)__rev0, __p1, 0); \
21572 __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); \
21573 __ret; \
21574 })
21575 #define __noswap_vrshrn_n_s16(__p0, __p1) __extension__ ({ \
21576 int16x8_t __s0 = __p0; \
21577 int8x8_t __ret; \
21578 __ret = (int8x8_t) __builtin_neon_vrshrn_n_v((int8x16_t)__s0, __p1, 0); \
21579 __ret; \
21580 })
21581 #endif
21582
21583 #ifdef __LITTLE_ENDIAN__
21584 __ai uint32x4_t vrsqrteq_u32(uint32x4_t __p0) {
21585 uint32x4_t __ret;
21586 __ret = (uint32x4_t) __builtin_neon_vrsqrteq_v((int8x16_t)__p0, 50);
21587 return __ret;
21588 }
21589 #else
21590 __ai uint32x4_t vrsqrteq_u32(uint32x4_t __p0) {
21591 uint32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
21592 uint32x4_t __ret;
21593 __ret = (uint32x4_t) __builtin_neon_vrsqrteq_v((int8x16_t)__rev0, 50);
21594 __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
21595 return __ret;
21596 }
21597 #endif
21598
21599 #ifdef __LITTLE_ENDIAN__
21600 __ai float32x4_t vrsqrteq_f32(float32x4_t __p0) {
21601 float32x4_t __ret;
21602 __ret = (float32x4_t) __builtin_neon_vrsqrteq_v((int8x16_t)__p0, 41);
21603 return __ret;
21604 }
21605 #else
21606 __ai float32x4_t vrsqrteq_f32(float32x4_t __p0) {
21607 float32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
21608 float32x4_t __ret;
21609 __ret = (float32x4_t) __builtin_neon_vrsqrteq_v((int8x16_t)__rev0, 41);
21610 __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
21611 return __ret;
21612 }
21613 #endif
21614
21615 #ifdef __LITTLE_ENDIAN__
21616 __ai uint32x2_t vrsqrte_u32(uint32x2_t __p0) {
21617 uint32x2_t __ret;
21618 __ret = (uint32x2_t) __builtin_neon_vrsqrte_v((int8x8_t)__p0, 18);
21619 return __ret;
21620 }
21621 #else
21622 __ai uint32x2_t vrsqrte_u32(uint32x2_t __p0) {
21623 uint32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
21624 uint32x2_t __ret;
21625 __ret = (uint32x2_t) __builtin_neon_vrsqrte_v((int8x8_t)__rev0, 18);
21626 __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
21627 return __ret;
21628 }
21629 #endif
21630
21631 #ifdef __LITTLE_ENDIAN__
21632 __ai float32x2_t vrsqrte_f32(float32x2_t __p0) {
21633 float32x2_t __ret;
21634 __ret = (float32x2_t) __builtin_neon_vrsqrte_v((int8x8_t)__p0, 9);
21635 return __ret;
21636 }
21637 #else
21638 __ai float32x2_t vrsqrte_f32(float32x2_t __p0) {
21639 float32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
21640 float32x2_t __ret;
21641 __ret = (float32x2_t) __builtin_neon_vrsqrte_v((int8x8_t)__rev0, 9);
21642 __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
21643 return __ret;
21644 }
21645 #endif
21646
21647 #ifdef __LITTLE_ENDIAN__
21648 __ai float32x4_t vrsqrtsq_f32(float32x4_t __p0, float32x4_t __p1) {
21649 float32x4_t __ret;
21650 __ret = (float32x4_t) __builtin_neon_vrsqrtsq_v((int8x16_t)__p0, (int8x16_t)__p1, 41);
21651 return __ret;
21652 }
21653 #else
21654 __ai float32x4_t vrsqrtsq_f32(float32x4_t __p0, float32x4_t __p1) {
21655 float32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
21656 float32x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
21657 float32x4_t __ret;
21658 __ret = (float32x4_t) __builtin_neon_vrsqrtsq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 41);
21659 __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
21660 return __ret;
21661 }
21662 #endif
21663
21664 #ifdef __LITTLE_ENDIAN__
21665 __ai float32x2_t vrsqrts_f32(float32x2_t __p0, float32x2_t __p1) {
21666 float32x2_t __ret;
21667 __ret = (float32x2_t) __builtin_neon_vrsqrts_v((int8x8_t)__p0, (int8x8_t)__p1, 9);
21668 return __ret;
21669 }
21670 #else
21671 __ai float32x2_t vrsqrts_f32(float32x2_t __p0, float32x2_t __p1) {
21672 float32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
21673 float32x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
21674 float32x2_t __ret;
21675 __ret = (float32x2_t) __builtin_neon_vrsqrts_v((int8x8_t)__rev0, (int8x8_t)__rev1, 9);
21676 __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
21677 return __ret;
21678 }
21679 #endif
21680
21681 #ifdef __LITTLE_ENDIAN__
21682 #define vrsraq_n_u8(__p0, __p1, __p2) __extension__ ({ \
21683 uint8x16_t __s0 = __p0; \
21684 uint8x16_t __s1 = __p1; \
21685 uint8x16_t __ret; \
21686 __ret = (uint8x16_t) __builtin_neon_vrsraq_n_v((int8x16_t)__s0, (int8x16_t)__s1, __p2, 48); \
21687 __ret; \
21688 })
21689 #else
21690 #define vrsraq_n_u8(__p0, __p1, __p2) __extension__ ({ \
21691 uint8x16_t __s0 = __p0; \
21692 uint8x16_t __s1 = __p1; \
21693 uint8x16_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
21694 uint8x16_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
21695 uint8x16_t __ret; \
21696 __ret = (uint8x16_t) __builtin_neon_vrsraq_n_v((int8x16_t)__rev0, (int8x16_t)__rev1, __p2, 48); \
21697 __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
21698 __ret; \
21699 })
21700 #endif
21701
21702 #ifdef __LITTLE_ENDIAN__
21703 #define vrsraq_n_u32(__p0, __p1, __p2) __extension__ ({ \
21704 uint32x4_t __s0 = __p0; \
21705 uint32x4_t __s1 = __p1; \
21706 uint32x4_t __ret; \
21707 __ret = (uint32x4_t) __builtin_neon_vrsraq_n_v((int8x16_t)__s0, (int8x16_t)__s1, __p2, 50); \
21708 __ret; \
21709 })
21710 #else
21711 #define vrsraq_n_u32(__p0, __p1, __p2) __extension__ ({ \
21712 uint32x4_t __s0 = __p0; \
21713 uint32x4_t __s1 = __p1; \
21714 uint32x4_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \
21715 uint32x4_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 3, 2, 1, 0); \
21716 uint32x4_t __ret; \
21717 __ret = (uint32x4_t) __builtin_neon_vrsraq_n_v((int8x16_t)__rev0, (int8x16_t)__rev1, __p2, 50); \
21718 __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \
21719 __ret; \
21720 })
21721 #endif
21722
21723 #ifdef __LITTLE_ENDIAN__
21724 #define vrsraq_n_u64(__p0, __p1, __p2) __extension__ ({ \
21725 uint64x2_t __s0 = __p0; \
21726 uint64x2_t __s1 = __p1; \
21727 uint64x2_t __ret; \
21728 __ret = (uint64x2_t) __builtin_neon_vrsraq_n_v((int8x16_t)__s0, (int8x16_t)__s1, __p2, 51); \
21729 __ret; \
21730 })
21731 #else
21732 #define vrsraq_n_u64(__p0, __p1, __p2) __extension__ ({ \
21733 uint64x2_t __s0 = __p0; \
21734 uint64x2_t __s1 = __p1; \
21735 uint64x2_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 1, 0); \
21736 uint64x2_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 1, 0); \
21737 uint64x2_t __ret; \
21738 __ret = (uint64x2_t) __builtin_neon_vrsraq_n_v((int8x16_t)__rev0, (int8x16_t)__rev1, __p2, 51); \
21739 __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \
21740 __ret; \
21741 })
21742 #endif
21743
21744 #ifdef __LITTLE_ENDIAN__
21745 #define vrsraq_n_u16(__p0, __p1, __p2) __extension__ ({ \
21746 uint16x8_t __s0 = __p0; \
21747 uint16x8_t __s1 = __p1; \
21748 uint16x8_t __ret; \
21749 __ret = (uint16x8_t) __builtin_neon_vrsraq_n_v((int8x16_t)__s0, (int8x16_t)__s1, __p2, 49); \
21750 __ret; \
21751 })
21752 #else
21753 #define vrsraq_n_u16(__p0, __p1, __p2) __extension__ ({ \
21754 uint16x8_t __s0 = __p0; \
21755 uint16x8_t __s1 = __p1; \
21756 uint16x8_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 7, 6, 5, 4, 3, 2, 1, 0); \
21757 uint16x8_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 7, 6, 5, 4, 3, 2, 1, 0); \
21758 uint16x8_t __ret; \
21759 __ret = (uint16x8_t) __builtin_neon_vrsraq_n_v((int8x16_t)__rev0, (int8x16_t)__rev1, __p2, 49); \
21760 __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); \
21761 __ret; \
21762 })
21763 #endif
21764
21765 #ifdef __LITTLE_ENDIAN__
21766 #define vrsraq_n_s8(__p0, __p1, __p2) __extension__ ({ \
21767 int8x16_t __s0 = __p0; \
21768 int8x16_t __s1 = __p1; \
21769 int8x16_t __ret; \
21770 __ret = (int8x16_t) __builtin_neon_vrsraq_n_v((int8x16_t)__s0, (int8x16_t)__s1, __p2, 32); \
21771 __ret; \
21772 })
21773 #else
21774 #define vrsraq_n_s8(__p0, __p1, __p2) __extension__ ({ \
21775 int8x16_t __s0 = __p0; \
21776 int8x16_t __s1 = __p1; \
21777 int8x16_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
21778 int8x16_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
21779 int8x16_t __ret; \
21780 __ret = (int8x16_t) __builtin_neon_vrsraq_n_v((int8x16_t)__rev0, (int8x16_t)__rev1, __p2, 32); \
21781 __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
21782 __ret; \
21783 })
21784 #endif
21785
21786 #ifdef __LITTLE_ENDIAN__
21787 #define vrsraq_n_s32(__p0, __p1, __p2) __extension__ ({ \
21788 int32x4_t __s0 = __p0; \
21789 int32x4_t __s1 = __p1; \
21790 int32x4_t __ret; \
21791 __ret = (int32x4_t) __builtin_neon_vrsraq_n_v((int8x16_t)__s0, (int8x16_t)__s1, __p2, 34); \
21792 __ret; \
21793 })
21794 #else
21795 #define vrsraq_n_s32(__p0, __p1, __p2) __extension__ ({ \
21796 int32x4_t __s0 = __p0; \
21797 int32x4_t __s1 = __p1; \
21798 int32x4_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \
21799 int32x4_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 3, 2, 1, 0); \
21800 int32x4_t __ret; \
21801 __ret = (int32x4_t) __builtin_neon_vrsraq_n_v((int8x16_t)__rev0, (int8x16_t)__rev1, __p2, 34); \
21802 __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \
21803 __ret; \
21804 })
21805 #endif
21806
21807 #ifdef __LITTLE_ENDIAN__
21808 #define vrsraq_n_s64(__p0, __p1, __p2) __extension__ ({ \
21809 int64x2_t __s0 = __p0; \
21810 int64x2_t __s1 = __p1; \
21811 int64x2_t __ret; \
21812 __ret = (int64x2_t) __builtin_neon_vrsraq_n_v((int8x16_t)__s0, (int8x16_t)__s1, __p2, 35); \
21813 __ret; \
21814 })
21815 #else
21816 #define vrsraq_n_s64(__p0, __p1, __p2) __extension__ ({ \
21817 int64x2_t __s0 = __p0; \
21818 int64x2_t __s1 = __p1; \
21819 int64x2_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 1, 0); \
21820 int64x2_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 1, 0); \
21821 int64x2_t __ret; \
21822 __ret = (int64x2_t) __builtin_neon_vrsraq_n_v((int8x16_t)__rev0, (int8x16_t)__rev1, __p2, 35); \
21823 __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \
21824 __ret; \
21825 })
21826 #endif
21827
21828 #ifdef __LITTLE_ENDIAN__
21829 #define vrsraq_n_s16(__p0, __p1, __p2) __extension__ ({ \
21830 int16x8_t __s0 = __p0; \
21831 int16x8_t __s1 = __p1; \
21832 int16x8_t __ret; \
21833 __ret = (int16x8_t) __builtin_neon_vrsraq_n_v((int8x16_t)__s0, (int8x16_t)__s1, __p2, 33); \
21834 __ret; \
21835 })
21836 #else
21837 #define vrsraq_n_s16(__p0, __p1, __p2) __extension__ ({ \
21838 int16x8_t __s0 = __p0; \
21839 int16x8_t __s1 = __p1; \
21840 int16x8_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 7, 6, 5, 4, 3, 2, 1, 0); \
21841 int16x8_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 7, 6, 5, 4, 3, 2, 1, 0); \
21842 int16x8_t __ret; \
21843 __ret = (int16x8_t) __builtin_neon_vrsraq_n_v((int8x16_t)__rev0, (int8x16_t)__rev1, __p2, 33); \
21844 __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); \
21845 __ret; \
21846 })
21847 #endif
21848
21849 #ifdef __LITTLE_ENDIAN__
21850 #define vrsra_n_u8(__p0, __p1, __p2) __extension__ ({ \
21851 uint8x8_t __s0 = __p0; \
21852 uint8x8_t __s1 = __p1; \
21853 uint8x8_t __ret; \
21854 __ret = (uint8x8_t) __builtin_neon_vrsra_n_v((int8x8_t)__s0, (int8x8_t)__s1, __p2, 16); \
21855 __ret; \
21856 })
21857 #else
21858 #define vrsra_n_u8(__p0, __p1, __p2) __extension__ ({ \
21859 uint8x8_t __s0 = __p0; \
21860 uint8x8_t __s1 = __p1; \
21861 uint8x8_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 7, 6, 5, 4, 3, 2, 1, 0); \
21862 uint8x8_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 7, 6, 5, 4, 3, 2, 1, 0); \
21863 uint8x8_t __ret; \
21864 __ret = (uint8x8_t) __builtin_neon_vrsra_n_v((int8x8_t)__rev0, (int8x8_t)__rev1, __p2, 16); \
21865 __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); \
21866 __ret; \
21867 })
21868 #endif
21869
21870 #ifdef __LITTLE_ENDIAN__
21871 #define vrsra_n_u32(__p0, __p1, __p2) __extension__ ({ \
21872 uint32x2_t __s0 = __p0; \
21873 uint32x2_t __s1 = __p1; \
21874 uint32x2_t __ret; \
21875 __ret = (uint32x2_t) __builtin_neon_vrsra_n_v((int8x8_t)__s0, (int8x8_t)__s1, __p2, 18); \
21876 __ret; \
21877 })
21878 #else
21879 #define vrsra_n_u32(__p0, __p1, __p2) __extension__ ({ \
21880 uint32x2_t __s0 = __p0; \
21881 uint32x2_t __s1 = __p1; \
21882 uint32x2_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 1, 0); \
21883 uint32x2_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 1, 0); \
21884 uint32x2_t __ret; \
21885 __ret = (uint32x2_t) __builtin_neon_vrsra_n_v((int8x8_t)__rev0, (int8x8_t)__rev1, __p2, 18); \
21886 __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \
21887 __ret; \
21888 })
21889 #endif
21890
21891 #ifdef __LITTLE_ENDIAN__
21892 #define vrsra_n_u64(__p0, __p1, __p2) __extension__ ({ \
21893 uint64x1_t __s0 = __p0; \
21894 uint64x1_t __s1 = __p1; \
21895 uint64x1_t __ret; \
21896 __ret = (uint64x1_t) __builtin_neon_vrsra_n_v((int8x8_t)__s0, (int8x8_t)__s1, __p2, 19); \
21897 __ret; \
21898 })
21899 #else
21900 #define vrsra_n_u64(__p0, __p1, __p2) __extension__ ({ \
21901 uint64x1_t __s0 = __p0; \
21902 uint64x1_t __s1 = __p1; \
21903 uint64x1_t __ret; \
21904 __ret = (uint64x1_t) __builtin_neon_vrsra_n_v((int8x8_t)__s0, (int8x8_t)__s1, __p2, 19); \
21905 __ret; \
21906 })
21907 #endif
21908
21909 #ifdef __LITTLE_ENDIAN__
21910 #define vrsra_n_u16(__p0, __p1, __p2) __extension__ ({ \
21911 uint16x4_t __s0 = __p0; \
21912 uint16x4_t __s1 = __p1; \
21913 uint16x4_t __ret; \
21914 __ret = (uint16x4_t) __builtin_neon_vrsra_n_v((int8x8_t)__s0, (int8x8_t)__s1, __p2, 17); \
21915 __ret; \
21916 })
21917 #else
21918 #define vrsra_n_u16(__p0, __p1, __p2) __extension__ ({ \
21919 uint16x4_t __s0 = __p0; \
21920 uint16x4_t __s1 = __p1; \
21921 uint16x4_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \
21922 uint16x4_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 3, 2, 1, 0); \
21923 uint16x4_t __ret; \
21924 __ret = (uint16x4_t) __builtin_neon_vrsra_n_v((int8x8_t)__rev0, (int8x8_t)__rev1, __p2, 17); \
21925 __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \
21926 __ret; \
21927 })
21928 #endif
21929
21930 #ifdef __LITTLE_ENDIAN__
21931 #define vrsra_n_s8(__p0, __p1, __p2) __extension__ ({ \
21932 int8x8_t __s0 = __p0; \
21933 int8x8_t __s1 = __p1; \
21934 int8x8_t __ret; \
21935 __ret = (int8x8_t) __builtin_neon_vrsra_n_v((int8x8_t)__s0, (int8x8_t)__s1, __p2, 0); \
21936 __ret; \
21937 })
21938 #else
21939 #define vrsra_n_s8(__p0, __p1, __p2) __extension__ ({ \
21940 int8x8_t __s0 = __p0; \
21941 int8x8_t __s1 = __p1; \
21942 int8x8_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 7, 6, 5, 4, 3, 2, 1, 0); \
21943 int8x8_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 7, 6, 5, 4, 3, 2, 1, 0); \
21944 int8x8_t __ret; \
21945 __ret = (int8x8_t) __builtin_neon_vrsra_n_v((int8x8_t)__rev0, (int8x8_t)__rev1, __p2, 0); \
21946 __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); \
21947 __ret; \
21948 })
21949 #endif
21950
21951 #ifdef __LITTLE_ENDIAN__
21952 #define vrsra_n_s32(__p0, __p1, __p2) __extension__ ({ \
21953 int32x2_t __s0 = __p0; \
21954 int32x2_t __s1 = __p1; \
21955 int32x2_t __ret; \
21956 __ret = (int32x2_t) __builtin_neon_vrsra_n_v((int8x8_t)__s0, (int8x8_t)__s1, __p2, 2); \
21957 __ret; \
21958 })
21959 #else
21960 #define vrsra_n_s32(__p0, __p1, __p2) __extension__ ({ \
21961 int32x2_t __s0 = __p0; \
21962 int32x2_t __s1 = __p1; \
21963 int32x2_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 1, 0); \
21964 int32x2_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 1, 0); \
21965 int32x2_t __ret; \
21966 __ret = (int32x2_t) __builtin_neon_vrsra_n_v((int8x8_t)__rev0, (int8x8_t)__rev1, __p2, 2); \
21967 __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \
21968 __ret; \
21969 })
21970 #endif
21971
21972 #ifdef __LITTLE_ENDIAN__
21973 #define vrsra_n_s64(__p0, __p1, __p2) __extension__ ({ \
21974 int64x1_t __s0 = __p0; \
21975 int64x1_t __s1 = __p1; \
21976 int64x1_t __ret; \
21977 __ret = (int64x1_t) __builtin_neon_vrsra_n_v((int8x8_t)__s0, (int8x8_t)__s1, __p2, 3); \
21978 __ret; \
21979 })
21980 #else
21981 #define vrsra_n_s64(__p0, __p1, __p2) __extension__ ({ \
21982 int64x1_t __s0 = __p0; \
21983 int64x1_t __s1 = __p1; \
21984 int64x1_t __ret; \
21985 __ret = (int64x1_t) __builtin_neon_vrsra_n_v((int8x8_t)__s0, (int8x8_t)__s1, __p2, 3); \
21986 __ret; \
21987 })
21988 #endif
21989
21990 #ifdef __LITTLE_ENDIAN__
21991 #define vrsra_n_s16(__p0, __p1, __p2) __extension__ ({ \
21992 int16x4_t __s0 = __p0; \
21993 int16x4_t __s1 = __p1; \
21994 int16x4_t __ret; \
21995 __ret = (int16x4_t) __builtin_neon_vrsra_n_v((int8x8_t)__s0, (int8x8_t)__s1, __p2, 1); \
21996 __ret; \
21997 })
21998 #else
21999 #define vrsra_n_s16(__p0, __p1, __p2) __extension__ ({ \
22000 int16x4_t __s0 = __p0; \
22001 int16x4_t __s1 = __p1; \
22002 int16x4_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \
22003 int16x4_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 3, 2, 1, 0); \
22004 int16x4_t __ret; \
22005 __ret = (int16x4_t) __builtin_neon_vrsra_n_v((int8x8_t)__rev0, (int8x8_t)__rev1, __p2, 1); \
22006 __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \
22007 __ret; \
22008 })
22009 #endif
22010
22011 #ifdef __LITTLE_ENDIAN__
22012 __ai uint16x4_t vrsubhn_u32(uint32x4_t __p0, uint32x4_t __p1) {
22013 uint16x4_t __ret;
22014 __ret = (uint16x4_t) __builtin_neon_vrsubhn_v((int8x16_t)__p0, (int8x16_t)__p1, 17);
22015 return __ret;
22016 }
22017 #else
22018 __ai uint16x4_t vrsubhn_u32(uint32x4_t __p0, uint32x4_t __p1) {
22019 uint32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
22020 uint32x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
22021 uint16x4_t __ret;
22022 __ret = (uint16x4_t) __builtin_neon_vrsubhn_v((int8x16_t)__rev0, (int8x16_t)__rev1, 17);
22023 __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
22024 return __ret;
22025 }
22026 __ai uint16x4_t __noswap_vrsubhn_u32(uint32x4_t __p0, uint32x4_t __p1) {
22027 uint16x4_t __ret;
22028 __ret = (uint16x4_t) __builtin_neon_vrsubhn_v((int8x16_t)__p0, (int8x16_t)__p1, 17);
22029 return __ret;
22030 }
22031 #endif
22032
22033 #ifdef __LITTLE_ENDIAN__
22034 __ai uint32x2_t vrsubhn_u64(uint64x2_t __p0, uint64x2_t __p1) {
22035 uint32x2_t __ret;
22036 __ret = (uint32x2_t) __builtin_neon_vrsubhn_v((int8x16_t)__p0, (int8x16_t)__p1, 18);
22037 return __ret;
22038 }
22039 #else
22040 __ai uint32x2_t vrsubhn_u64(uint64x2_t __p0, uint64x2_t __p1) {
22041 uint64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
22042 uint64x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
22043 uint32x2_t __ret;
22044 __ret = (uint32x2_t) __builtin_neon_vrsubhn_v((int8x16_t)__rev0, (int8x16_t)__rev1, 18);
22045 __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
22046 return __ret;
22047 }
22048 __ai uint32x2_t __noswap_vrsubhn_u64(uint64x2_t __p0, uint64x2_t __p1) {
22049 uint32x2_t __ret;
22050 __ret = (uint32x2_t) __builtin_neon_vrsubhn_v((int8x16_t)__p0, (int8x16_t)__p1, 18);
22051 return __ret;
22052 }
22053 #endif
22054
22055 #ifdef __LITTLE_ENDIAN__
22056 __ai uint8x8_t vrsubhn_u16(uint16x8_t __p0, uint16x8_t __p1) {
22057 uint8x8_t __ret;
22058 __ret = (uint8x8_t) __builtin_neon_vrsubhn_v((int8x16_t)__p0, (int8x16_t)__p1, 16);
22059 return __ret;
22060 }
22061 #else
22062 __ai uint8x8_t vrsubhn_u16(uint16x8_t __p0, uint16x8_t __p1) {
22063 uint16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
22064 uint16x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
22065 uint8x8_t __ret;
22066 __ret = (uint8x8_t) __builtin_neon_vrsubhn_v((int8x16_t)__rev0, (int8x16_t)__rev1, 16);
22067 __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
22068 return __ret;
22069 }
22070 __ai uint8x8_t __noswap_vrsubhn_u16(uint16x8_t __p0, uint16x8_t __p1) {
22071 uint8x8_t __ret;
22072 __ret = (uint8x8_t) __builtin_neon_vrsubhn_v((int8x16_t)__p0, (int8x16_t)__p1, 16);
22073 return __ret;
22074 }
22075 #endif
22076
22077 #ifdef __LITTLE_ENDIAN__
22078 __ai int16x4_t vrsubhn_s32(int32x4_t __p0, int32x4_t __p1) {
22079 int16x4_t __ret;
22080 __ret = (int16x4_t) __builtin_neon_vrsubhn_v((int8x16_t)__p0, (int8x16_t)__p1, 1);
22081 return __ret;
22082 }
22083 #else
22084 __ai int16x4_t vrsubhn_s32(int32x4_t __p0, int32x4_t __p1) {
22085 int32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
22086 int32x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
22087 int16x4_t __ret;
22088 __ret = (int16x4_t) __builtin_neon_vrsubhn_v((int8x16_t)__rev0, (int8x16_t)__rev1, 1);
22089 __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
22090 return __ret;
22091 }
22092 __ai int16x4_t __noswap_vrsubhn_s32(int32x4_t __p0, int32x4_t __p1) {
22093 int16x4_t __ret;
22094 __ret = (int16x4_t) __builtin_neon_vrsubhn_v((int8x16_t)__p0, (int8x16_t)__p1, 1);
22095 return __ret;
22096 }
22097 #endif
22098
22099 #ifdef __LITTLE_ENDIAN__
22100 __ai int32x2_t vrsubhn_s64(int64x2_t __p0, int64x2_t __p1) {
22101 int32x2_t __ret;
22102 __ret = (int32x2_t) __builtin_neon_vrsubhn_v((int8x16_t)__p0, (int8x16_t)__p1, 2);
22103 return __ret;
22104 }
22105 #else
22106 __ai int32x2_t vrsubhn_s64(int64x2_t __p0, int64x2_t __p1) {
22107 int64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
22108 int64x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
22109 int32x2_t __ret;
22110 __ret = (int32x2_t) __builtin_neon_vrsubhn_v((int8x16_t)__rev0, (int8x16_t)__rev1, 2);
22111 __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
22112 return __ret;
22113 }
22114 __ai int32x2_t __noswap_vrsubhn_s64(int64x2_t __p0, int64x2_t __p1) {
22115 int32x2_t __ret;
22116 __ret = (int32x2_t) __builtin_neon_vrsubhn_v((int8x16_t)__p0, (int8x16_t)__p1, 2);
22117 return __ret;
22118 }
22119 #endif
22120
22121 #ifdef __LITTLE_ENDIAN__
22122 __ai int8x8_t vrsubhn_s16(int16x8_t __p0, int16x8_t __p1) {
22123 int8x8_t __ret;
22124 __ret = (int8x8_t) __builtin_neon_vrsubhn_v((int8x16_t)__p0, (int8x16_t)__p1, 0);
22125 return __ret;
22126 }
22127 #else
22128 __ai int8x8_t vrsubhn_s16(int16x8_t __p0, int16x8_t __p1) {
22129 int16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
22130 int16x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
22131 int8x8_t __ret;
22132 __ret = (int8x8_t) __builtin_neon_vrsubhn_v((int8x16_t)__rev0, (int8x16_t)__rev1, 0);
22133 __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
22134 return __ret;
22135 }
22136 __ai int8x8_t __noswap_vrsubhn_s16(int16x8_t __p0, int16x8_t __p1) {
22137 int8x8_t __ret;
22138 __ret = (int8x8_t) __builtin_neon_vrsubhn_v((int8x16_t)__p0, (int8x16_t)__p1, 0);
22139 return __ret;
22140 }
22141 #endif
22142
22143 #ifdef __LITTLE_ENDIAN__
22144 #define vset_lane_p8(__p0, __p1, __p2) __extension__ ({ \
22145 poly8_t __s0 = __p0; \
22146 poly8x8_t __s1 = __p1; \
22147 poly8x8_t __ret; \
22148 __ret = (poly8x8_t) __builtin_neon_vset_lane_i8(__s0, (int8x8_t)__s1, __p2); \
22149 __ret; \
22150 })
22151 #else
22152 #define vset_lane_p8(__p0, __p1, __p2) __extension__ ({ \
22153 poly8_t __s0 = __p0; \
22154 poly8x8_t __s1 = __p1; \
22155 poly8x8_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 7, 6, 5, 4, 3, 2, 1, 0); \
22156 poly8x8_t __ret; \
22157 __ret = (poly8x8_t) __builtin_neon_vset_lane_i8(__s0, (int8x8_t)__rev1, __p2); \
22158 __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); \
22159 __ret; \
22160 })
22161 #define __noswap_vset_lane_p8(__p0, __p1, __p2) __extension__ ({ \
22162 poly8_t __s0 = __p0; \
22163 poly8x8_t __s1 = __p1; \
22164 poly8x8_t __ret; \
22165 __ret = (poly8x8_t) __builtin_neon_vset_lane_i8(__s0, (int8x8_t)__s1, __p2); \
22166 __ret; \
22167 })
22168 #endif
22169
22170 #ifdef __LITTLE_ENDIAN__
22171 #define vset_lane_p16(__p0, __p1, __p2) __extension__ ({ \
22172 poly16_t __s0 = __p0; \
22173 poly16x4_t __s1 = __p1; \
22174 poly16x4_t __ret; \
22175 __ret = (poly16x4_t) __builtin_neon_vset_lane_i16(__s0, (int8x8_t)__s1, __p2); \
22176 __ret; \
22177 })
22178 #else
22179 #define vset_lane_p16(__p0, __p1, __p2) __extension__ ({ \
22180 poly16_t __s0 = __p0; \
22181 poly16x4_t __s1 = __p1; \
22182 poly16x4_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 3, 2, 1, 0); \
22183 poly16x4_t __ret; \
22184 __ret = (poly16x4_t) __builtin_neon_vset_lane_i16(__s0, (int8x8_t)__rev1, __p2); \
22185 __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \
22186 __ret; \
22187 })
22188 #define __noswap_vset_lane_p16(__p0, __p1, __p2) __extension__ ({ \
22189 poly16_t __s0 = __p0; \
22190 poly16x4_t __s1 = __p1; \
22191 poly16x4_t __ret; \
22192 __ret = (poly16x4_t) __builtin_neon_vset_lane_i16(__s0, (int8x8_t)__s1, __p2); \
22193 __ret; \
22194 })
22195 #endif
22196
22197 #ifdef __LITTLE_ENDIAN__
22198 #define vsetq_lane_p8(__p0, __p1, __p2) __extension__ ({ \
22199 poly8_t __s0 = __p0; \
22200 poly8x16_t __s1 = __p1; \
22201 poly8x16_t __ret; \
22202 __ret = (poly8x16_t) __builtin_neon_vsetq_lane_i8(__s0, (int8x16_t)__s1, __p2); \
22203 __ret; \
22204 })
22205 #else
22206 #define vsetq_lane_p8(__p0, __p1, __p2) __extension__ ({ \
22207 poly8_t __s0 = __p0; \
22208 poly8x16_t __s1 = __p1; \
22209 poly8x16_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
22210 poly8x16_t __ret; \
22211 __ret = (poly8x16_t) __builtin_neon_vsetq_lane_i8(__s0, (int8x16_t)__rev1, __p2); \
22212 __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
22213 __ret; \
22214 })
22215 #define __noswap_vsetq_lane_p8(__p0, __p1, __p2) __extension__ ({ \
22216 poly8_t __s0 = __p0; \
22217 poly8x16_t __s1 = __p1; \
22218 poly8x16_t __ret; \
22219 __ret = (poly8x16_t) __builtin_neon_vsetq_lane_i8(__s0, (int8x16_t)__s1, __p2); \
22220 __ret; \
22221 })
22222 #endif
22223
22224 #ifdef __LITTLE_ENDIAN__
22225 #define vsetq_lane_p16(__p0, __p1, __p2) __extension__ ({ \
22226 poly16_t __s0 = __p0; \
22227 poly16x8_t __s1 = __p1; \
22228 poly16x8_t __ret; \
22229 __ret = (poly16x8_t) __builtin_neon_vsetq_lane_i16(__s0, (int8x16_t)__s1, __p2); \
22230 __ret; \
22231 })
22232 #else
22233 #define vsetq_lane_p16(__p0, __p1, __p2) __extension__ ({ \
22234 poly16_t __s0 = __p0; \
22235 poly16x8_t __s1 = __p1; \
22236 poly16x8_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 7, 6, 5, 4, 3, 2, 1, 0); \
22237 poly16x8_t __ret; \
22238 __ret = (poly16x8_t) __builtin_neon_vsetq_lane_i16(__s0, (int8x16_t)__rev1, __p2); \
22239 __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); \
22240 __ret; \
22241 })
22242 #define __noswap_vsetq_lane_p16(__p0, __p1, __p2) __extension__ ({ \
22243 poly16_t __s0 = __p0; \
22244 poly16x8_t __s1 = __p1; \
22245 poly16x8_t __ret; \
22246 __ret = (poly16x8_t) __builtin_neon_vsetq_lane_i16(__s0, (int8x16_t)__s1, __p2); \
22247 __ret; \
22248 })
22249 #endif
22250
22251 #ifdef __LITTLE_ENDIAN__
22252 #define vsetq_lane_u8(__p0, __p1, __p2) __extension__ ({ \
22253 uint8_t __s0 = __p0; \
22254 uint8x16_t __s1 = __p1; \
22255 uint8x16_t __ret; \
22256 __ret = (uint8x16_t) __builtin_neon_vsetq_lane_i8(__s0, (int8x16_t)__s1, __p2); \
22257 __ret; \
22258 })
22259 #else
22260 #define vsetq_lane_u8(__p0, __p1, __p2) __extension__ ({ \
22261 uint8_t __s0 = __p0; \
22262 uint8x16_t __s1 = __p1; \
22263 uint8x16_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
22264 uint8x16_t __ret; \
22265 __ret = (uint8x16_t) __builtin_neon_vsetq_lane_i8(__s0, (int8x16_t)__rev1, __p2); \
22266 __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
22267 __ret; \
22268 })
22269 #define __noswap_vsetq_lane_u8(__p0, __p1, __p2) __extension__ ({ \
22270 uint8_t __s0 = __p0; \
22271 uint8x16_t __s1 = __p1; \
22272 uint8x16_t __ret; \
22273 __ret = (uint8x16_t) __builtin_neon_vsetq_lane_i8(__s0, (int8x16_t)__s1, __p2); \
22274 __ret; \
22275 })
22276 #endif
22277
22278 #ifdef __LITTLE_ENDIAN__
22279 #define vsetq_lane_u32(__p0, __p1, __p2) __extension__ ({ \
22280 uint32_t __s0 = __p0; \
22281 uint32x4_t __s1 = __p1; \
22282 uint32x4_t __ret; \
22283 __ret = (uint32x4_t) __builtin_neon_vsetq_lane_i32(__s0, (int8x16_t)__s1, __p2); \
22284 __ret; \
22285 })
22286 #else
22287 #define vsetq_lane_u32(__p0, __p1, __p2) __extension__ ({ \
22288 uint32_t __s0 = __p0; \
22289 uint32x4_t __s1 = __p1; \
22290 uint32x4_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 3, 2, 1, 0); \
22291 uint32x4_t __ret; \
22292 __ret = (uint32x4_t) __builtin_neon_vsetq_lane_i32(__s0, (int8x16_t)__rev1, __p2); \
22293 __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \
22294 __ret; \
22295 })
22296 #define __noswap_vsetq_lane_u32(__p0, __p1, __p2) __extension__ ({ \
22297 uint32_t __s0 = __p0; \
22298 uint32x4_t __s1 = __p1; \
22299 uint32x4_t __ret; \
22300 __ret = (uint32x4_t) __builtin_neon_vsetq_lane_i32(__s0, (int8x16_t)__s1, __p2); \
22301 __ret; \
22302 })
22303 #endif
22304
22305 #ifdef __LITTLE_ENDIAN__
22306 #define vsetq_lane_u64(__p0, __p1, __p2) __extension__ ({ \
22307 uint64_t __s0 = __p0; \
22308 uint64x2_t __s1 = __p1; \
22309 uint64x2_t __ret; \
22310 __ret = (uint64x2_t) __builtin_neon_vsetq_lane_i64(__s0, (int8x16_t)__s1, __p2); \
22311 __ret; \
22312 })
22313 #else
22314 #define vsetq_lane_u64(__p0, __p1, __p2) __extension__ ({ \
22315 uint64_t __s0 = __p0; \
22316 uint64x2_t __s1 = __p1; \
22317 uint64x2_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 1, 0); \
22318 uint64x2_t __ret; \
22319 __ret = (uint64x2_t) __builtin_neon_vsetq_lane_i64(__s0, (int8x16_t)__rev1, __p2); \
22320 __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \
22321 __ret; \
22322 })
22323 #define __noswap_vsetq_lane_u64(__p0, __p1, __p2) __extension__ ({ \
22324 uint64_t __s0 = __p0; \
22325 uint64x2_t __s1 = __p1; \
22326 uint64x2_t __ret; \
22327 __ret = (uint64x2_t) __builtin_neon_vsetq_lane_i64(__s0, (int8x16_t)__s1, __p2); \
22328 __ret; \
22329 })
22330 #endif
22331
22332 #ifdef __LITTLE_ENDIAN__
22333 #define vsetq_lane_u16(__p0, __p1, __p2) __extension__ ({ \
22334 uint16_t __s0 = __p0; \
22335 uint16x8_t __s1 = __p1; \
22336 uint16x8_t __ret; \
22337 __ret = (uint16x8_t) __builtin_neon_vsetq_lane_i16(__s0, (int8x16_t)__s1, __p2); \
22338 __ret; \
22339 })
22340 #else
22341 #define vsetq_lane_u16(__p0, __p1, __p2) __extension__ ({ \
22342 uint16_t __s0 = __p0; \
22343 uint16x8_t __s1 = __p1; \
22344 uint16x8_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 7, 6, 5, 4, 3, 2, 1, 0); \
22345 uint16x8_t __ret; \
22346 __ret = (uint16x8_t) __builtin_neon_vsetq_lane_i16(__s0, (int8x16_t)__rev1, __p2); \
22347 __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); \
22348 __ret; \
22349 })
22350 #define __noswap_vsetq_lane_u16(__p0, __p1, __p2) __extension__ ({ \
22351 uint16_t __s0 = __p0; \
22352 uint16x8_t __s1 = __p1; \
22353 uint16x8_t __ret; \
22354 __ret = (uint16x8_t) __builtin_neon_vsetq_lane_i16(__s0, (int8x16_t)__s1, __p2); \
22355 __ret; \
22356 })
22357 #endif
22358
22359 #ifdef __LITTLE_ENDIAN__
22360 #define vsetq_lane_s8(__p0, __p1, __p2) __extension__ ({ \
22361 int8_t __s0 = __p0; \
22362 int8x16_t __s1 = __p1; \
22363 int8x16_t __ret; \
22364 __ret = (int8x16_t) __builtin_neon_vsetq_lane_i8(__s0, (int8x16_t)__s1, __p2); \
22365 __ret; \
22366 })
22367 #else
22368 #define vsetq_lane_s8(__p0, __p1, __p2) __extension__ ({ \
22369 int8_t __s0 = __p0; \
22370 int8x16_t __s1 = __p1; \
22371 int8x16_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
22372 int8x16_t __ret; \
22373 __ret = (int8x16_t) __builtin_neon_vsetq_lane_i8(__s0, (int8x16_t)__rev1, __p2); \
22374 __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
22375 __ret; \
22376 })
22377 #define __noswap_vsetq_lane_s8(__p0, __p1, __p2) __extension__ ({ \
22378 int8_t __s0 = __p0; \
22379 int8x16_t __s1 = __p1; \
22380 int8x16_t __ret; \
22381 __ret = (int8x16_t) __builtin_neon_vsetq_lane_i8(__s0, (int8x16_t)__s1, __p2); \
22382 __ret; \
22383 })
22384 #endif
22385
22386 #ifdef __LITTLE_ENDIAN__
22387 #define vsetq_lane_f32(__p0, __p1, __p2) __extension__ ({ \
22388 float32_t __s0 = __p0; \
22389 float32x4_t __s1 = __p1; \
22390 float32x4_t __ret; \
22391 __ret = (float32x4_t) __builtin_neon_vsetq_lane_f32(__s0, (int8x16_t)__s1, __p2); \
22392 __ret; \
22393 })
22394 #else
22395 #define vsetq_lane_f32(__p0, __p1, __p2) __extension__ ({ \
22396 float32_t __s0 = __p0; \
22397 float32x4_t __s1 = __p1; \
22398 float32x4_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 3, 2, 1, 0); \
22399 float32x4_t __ret; \
22400 __ret = (float32x4_t) __builtin_neon_vsetq_lane_f32(__s0, (int8x16_t)__rev1, __p2); \
22401 __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \
22402 __ret; \
22403 })
22404 #define __noswap_vsetq_lane_f32(__p0, __p1, __p2) __extension__ ({ \
22405 float32_t __s0 = __p0; \
22406 float32x4_t __s1 = __p1; \
22407 float32x4_t __ret; \
22408 __ret = (float32x4_t) __builtin_neon_vsetq_lane_f32(__s0, (int8x16_t)__s1, __p2); \
22409 __ret; \
22410 })
22411 #endif
22412
22413 #ifdef __LITTLE_ENDIAN__
22414 #define vsetq_lane_f16(__p0, __p1, __p2) __extension__ ({ \
22415 float16_t __s0 = __p0; \
22416 float16x8_t __s1 = __p1; \
22417 float16x8_t __ret; \
22418 __ret = (float16x8_t) __builtin_neon_vsetq_lane_f16(__s0, (int8x16_t)__s1, __p2); \
22419 __ret; \
22420 })
22421 #else
22422 #define vsetq_lane_f16(__p0, __p1, __p2) __extension__ ({ \
22423 float16_t __s0 = __p0; \
22424 float16x8_t __s1 = __p1; \
22425 float16x8_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 7, 6, 5, 4, 3, 2, 1, 0); \
22426 float16x8_t __ret; \
22427 __ret = (float16x8_t) __builtin_neon_vsetq_lane_f16(__s0, (int8x16_t)__rev1, __p2); \
22428 __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); \
22429 __ret; \
22430 })
22431 #endif
22432
22433 #ifdef __LITTLE_ENDIAN__
22434 #define vsetq_lane_s32(__p0, __p1, __p2) __extension__ ({ \
22435 int32_t __s0 = __p0; \
22436 int32x4_t __s1 = __p1; \
22437 int32x4_t __ret; \
22438 __ret = (int32x4_t) __builtin_neon_vsetq_lane_i32(__s0, (int8x16_t)__s1, __p2); \
22439 __ret; \
22440 })
22441 #else
22442 #define vsetq_lane_s32(__p0, __p1, __p2) __extension__ ({ \
22443 int32_t __s0 = __p0; \
22444 int32x4_t __s1 = __p1; \
22445 int32x4_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 3, 2, 1, 0); \
22446 int32x4_t __ret; \
22447 __ret = (int32x4_t) __builtin_neon_vsetq_lane_i32(__s0, (int8x16_t)__rev1, __p2); \
22448 __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \
22449 __ret; \
22450 })
22451 #define __noswap_vsetq_lane_s32(__p0, __p1, __p2) __extension__ ({ \
22452 int32_t __s0 = __p0; \
22453 int32x4_t __s1 = __p1; \
22454 int32x4_t __ret; \
22455 __ret = (int32x4_t) __builtin_neon_vsetq_lane_i32(__s0, (int8x16_t)__s1, __p2); \
22456 __ret; \
22457 })
22458 #endif
22459
22460 #ifdef __LITTLE_ENDIAN__
22461 #define vsetq_lane_s64(__p0, __p1, __p2) __extension__ ({ \
22462 int64_t __s0 = __p0; \
22463 int64x2_t __s1 = __p1; \
22464 int64x2_t __ret; \
22465 __ret = (int64x2_t) __builtin_neon_vsetq_lane_i64(__s0, (int8x16_t)__s1, __p2); \
22466 __ret; \
22467 })
22468 #else
22469 #define vsetq_lane_s64(__p0, __p1, __p2) __extension__ ({ \
22470 int64_t __s0 = __p0; \
22471 int64x2_t __s1 = __p1; \
22472 int64x2_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 1, 0); \
22473 int64x2_t __ret; \
22474 __ret = (int64x2_t) __builtin_neon_vsetq_lane_i64(__s0, (int8x16_t)__rev1, __p2); \
22475 __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \
22476 __ret; \
22477 })
22478 #define __noswap_vsetq_lane_s64(__p0, __p1, __p2) __extension__ ({ \
22479 int64_t __s0 = __p0; \
22480 int64x2_t __s1 = __p1; \
22481 int64x2_t __ret; \
22482 __ret = (int64x2_t) __builtin_neon_vsetq_lane_i64(__s0, (int8x16_t)__s1, __p2); \
22483 __ret; \
22484 })
22485 #endif
22486
22487 #ifdef __LITTLE_ENDIAN__
22488 #define vsetq_lane_s16(__p0, __p1, __p2) __extension__ ({ \
22489 int16_t __s0 = __p0; \
22490 int16x8_t __s1 = __p1; \
22491 int16x8_t __ret; \
22492 __ret = (int16x8_t) __builtin_neon_vsetq_lane_i16(__s0, (int8x16_t)__s1, __p2); \
22493 __ret; \
22494 })
22495 #else
22496 #define vsetq_lane_s16(__p0, __p1, __p2) __extension__ ({ \
22497 int16_t __s0 = __p0; \
22498 int16x8_t __s1 = __p1; \
22499 int16x8_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 7, 6, 5, 4, 3, 2, 1, 0); \
22500 int16x8_t __ret; \
22501 __ret = (int16x8_t) __builtin_neon_vsetq_lane_i16(__s0, (int8x16_t)__rev1, __p2); \
22502 __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); \
22503 __ret; \
22504 })
22505 #define __noswap_vsetq_lane_s16(__p0, __p1, __p2) __extension__ ({ \
22506 int16_t __s0 = __p0; \
22507 int16x8_t __s1 = __p1; \
22508 int16x8_t __ret; \
22509 __ret = (int16x8_t) __builtin_neon_vsetq_lane_i16(__s0, (int8x16_t)__s1, __p2); \
22510 __ret; \
22511 })
22512 #endif
22513
22514 #ifdef __LITTLE_ENDIAN__
22515 #define vset_lane_u8(__p0, __p1, __p2) __extension__ ({ \
22516 uint8_t __s0 = __p0; \
22517 uint8x8_t __s1 = __p1; \
22518 uint8x8_t __ret; \
22519 __ret = (uint8x8_t) __builtin_neon_vset_lane_i8(__s0, (int8x8_t)__s1, __p2); \
22520 __ret; \
22521 })
22522 #else
22523 #define vset_lane_u8(__p0, __p1, __p2) __extension__ ({ \
22524 uint8_t __s0 = __p0; \
22525 uint8x8_t __s1 = __p1; \
22526 uint8x8_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 7, 6, 5, 4, 3, 2, 1, 0); \
22527 uint8x8_t __ret; \
22528 __ret = (uint8x8_t) __builtin_neon_vset_lane_i8(__s0, (int8x8_t)__rev1, __p2); \
22529 __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); \
22530 __ret; \
22531 })
22532 #define __noswap_vset_lane_u8(__p0, __p1, __p2) __extension__ ({ \
22533 uint8_t __s0 = __p0; \
22534 uint8x8_t __s1 = __p1; \
22535 uint8x8_t __ret; \
22536 __ret = (uint8x8_t) __builtin_neon_vset_lane_i8(__s0, (int8x8_t)__s1, __p2); \
22537 __ret; \
22538 })
22539 #endif
22540
22541 #ifdef __LITTLE_ENDIAN__
22542 #define vset_lane_u32(__p0, __p1, __p2) __extension__ ({ \
22543 uint32_t __s0 = __p0; \
22544 uint32x2_t __s1 = __p1; \
22545 uint32x2_t __ret; \
22546 __ret = (uint32x2_t) __builtin_neon_vset_lane_i32(__s0, (int8x8_t)__s1, __p2); \
22547 __ret; \
22548 })
22549 #else
22550 #define vset_lane_u32(__p0, __p1, __p2) __extension__ ({ \
22551 uint32_t __s0 = __p0; \
22552 uint32x2_t __s1 = __p1; \
22553 uint32x2_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 1, 0); \
22554 uint32x2_t __ret; \
22555 __ret = (uint32x2_t) __builtin_neon_vset_lane_i32(__s0, (int8x8_t)__rev1, __p2); \
22556 __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \
22557 __ret; \
22558 })
22559 #define __noswap_vset_lane_u32(__p0, __p1, __p2) __extension__ ({ \
22560 uint32_t __s0 = __p0; \
22561 uint32x2_t __s1 = __p1; \
22562 uint32x2_t __ret; \
22563 __ret = (uint32x2_t) __builtin_neon_vset_lane_i32(__s0, (int8x8_t)__s1, __p2); \
22564 __ret; \
22565 })
22566 #endif
22567
22568 #ifdef __LITTLE_ENDIAN__
22569 #define vset_lane_u64(__p0, __p1, __p2) __extension__ ({ \
22570 uint64_t __s0 = __p0; \
22571 uint64x1_t __s1 = __p1; \
22572 uint64x1_t __ret; \
22573 __ret = (uint64x1_t) __builtin_neon_vset_lane_i64(__s0, (int8x8_t)__s1, __p2); \
22574 __ret; \
22575 })
22576 #else
22577 #define vset_lane_u64(__p0, __p1, __p2) __extension__ ({ \
22578 uint64_t __s0 = __p0; \
22579 uint64x1_t __s1 = __p1; \
22580 uint64x1_t __ret; \
22581 __ret = (uint64x1_t) __builtin_neon_vset_lane_i64(__s0, (int8x8_t)__s1, __p2); \
22582 __ret; \
22583 })
22584 #define __noswap_vset_lane_u64(__p0, __p1, __p2) __extension__ ({ \
22585 uint64_t __s0 = __p0; \
22586 uint64x1_t __s1 = __p1; \
22587 uint64x1_t __ret; \
22588 __ret = (uint64x1_t) __builtin_neon_vset_lane_i64(__s0, (int8x8_t)__s1, __p2); \
22589 __ret; \
22590 })
22591 #endif
22592
22593 #ifdef __LITTLE_ENDIAN__
22594 #define vset_lane_u16(__p0, __p1, __p2) __extension__ ({ \
22595 uint16_t __s0 = __p0; \
22596 uint16x4_t __s1 = __p1; \
22597 uint16x4_t __ret; \
22598 __ret = (uint16x4_t) __builtin_neon_vset_lane_i16(__s0, (int8x8_t)__s1, __p2); \
22599 __ret; \
22600 })
22601 #else
22602 #define vset_lane_u16(__p0, __p1, __p2) __extension__ ({ \
22603 uint16_t __s0 = __p0; \
22604 uint16x4_t __s1 = __p1; \
22605 uint16x4_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 3, 2, 1, 0); \
22606 uint16x4_t __ret; \
22607 __ret = (uint16x4_t) __builtin_neon_vset_lane_i16(__s0, (int8x8_t)__rev1, __p2); \
22608 __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \
22609 __ret; \
22610 })
22611 #define __noswap_vset_lane_u16(__p0, __p1, __p2) __extension__ ({ \
22612 uint16_t __s0 = __p0; \
22613 uint16x4_t __s1 = __p1; \
22614 uint16x4_t __ret; \
22615 __ret = (uint16x4_t) __builtin_neon_vset_lane_i16(__s0, (int8x8_t)__s1, __p2); \
22616 __ret; \
22617 })
22618 #endif
22619
22620 #ifdef __LITTLE_ENDIAN__
22621 #define vset_lane_s8(__p0, __p1, __p2) __extension__ ({ \
22622 int8_t __s0 = __p0; \
22623 int8x8_t __s1 = __p1; \
22624 int8x8_t __ret; \
22625 __ret = (int8x8_t) __builtin_neon_vset_lane_i8(__s0, (int8x8_t)__s1, __p2); \
22626 __ret; \
22627 })
22628 #else
22629 #define vset_lane_s8(__p0, __p1, __p2) __extension__ ({ \
22630 int8_t __s0 = __p0; \
22631 int8x8_t __s1 = __p1; \
22632 int8x8_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 7, 6, 5, 4, 3, 2, 1, 0); \
22633 int8x8_t __ret; \
22634 __ret = (int8x8_t) __builtin_neon_vset_lane_i8(__s0, (int8x8_t)__rev1, __p2); \
22635 __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); \
22636 __ret; \
22637 })
22638 #define __noswap_vset_lane_s8(__p0, __p1, __p2) __extension__ ({ \
22639 int8_t __s0 = __p0; \
22640 int8x8_t __s1 = __p1; \
22641 int8x8_t __ret; \
22642 __ret = (int8x8_t) __builtin_neon_vset_lane_i8(__s0, (int8x8_t)__s1, __p2); \
22643 __ret; \
22644 })
22645 #endif
22646
22647 #ifdef __LITTLE_ENDIAN__
22648 #define vset_lane_f32(__p0, __p1, __p2) __extension__ ({ \
22649 float32_t __s0 = __p0; \
22650 float32x2_t __s1 = __p1; \
22651 float32x2_t __ret; \
22652 __ret = (float32x2_t) __builtin_neon_vset_lane_f32(__s0, (int8x8_t)__s1, __p2); \
22653 __ret; \
22654 })
22655 #else
22656 #define vset_lane_f32(__p0, __p1, __p2) __extension__ ({ \
22657 float32_t __s0 = __p0; \
22658 float32x2_t __s1 = __p1; \
22659 float32x2_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 1, 0); \
22660 float32x2_t __ret; \
22661 __ret = (float32x2_t) __builtin_neon_vset_lane_f32(__s0, (int8x8_t)__rev1, __p2); \
22662 __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \
22663 __ret; \
22664 })
22665 #define __noswap_vset_lane_f32(__p0, __p1, __p2) __extension__ ({ \
22666 float32_t __s0 = __p0; \
22667 float32x2_t __s1 = __p1; \
22668 float32x2_t __ret; \
22669 __ret = (float32x2_t) __builtin_neon_vset_lane_f32(__s0, (int8x8_t)__s1, __p2); \
22670 __ret; \
22671 })
22672 #endif
22673
22674 #ifdef __LITTLE_ENDIAN__
22675 #define vset_lane_f16(__p0, __p1, __p2) __extension__ ({ \
22676 float16_t __s0 = __p0; \
22677 float16x4_t __s1 = __p1; \
22678 float16x4_t __ret; \
22679 __ret = (float16x4_t) __builtin_neon_vset_lane_f16(__s0, (int8x8_t)__s1, __p2); \
22680 __ret; \
22681 })
22682 #else
22683 #define vset_lane_f16(__p0, __p1, __p2) __extension__ ({ \
22684 float16_t __s0 = __p0; \
22685 float16x4_t __s1 = __p1; \
22686 float16x4_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 3, 2, 1, 0); \
22687 float16x4_t __ret; \
22688 __ret = (float16x4_t) __builtin_neon_vset_lane_f16(__s0, (int8x8_t)__rev1, __p2); \
22689 __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \
22690 __ret; \
22691 })
22692 #endif
22693
22694 #ifdef __LITTLE_ENDIAN__
22695 #define vset_lane_s32(__p0, __p1, __p2) __extension__ ({ \
22696 int32_t __s0 = __p0; \
22697 int32x2_t __s1 = __p1; \
22698 int32x2_t __ret; \
22699 __ret = (int32x2_t) __builtin_neon_vset_lane_i32(__s0, (int8x8_t)__s1, __p2); \
22700 __ret; \
22701 })
22702 #else
22703 #define vset_lane_s32(__p0, __p1, __p2) __extension__ ({ \
22704 int32_t __s0 = __p0; \
22705 int32x2_t __s1 = __p1; \
22706 int32x2_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 1, 0); \
22707 int32x2_t __ret; \
22708 __ret = (int32x2_t) __builtin_neon_vset_lane_i32(__s0, (int8x8_t)__rev1, __p2); \
22709 __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \
22710 __ret; \
22711 })
22712 #define __noswap_vset_lane_s32(__p0, __p1, __p2) __extension__ ({ \
22713 int32_t __s0 = __p0; \
22714 int32x2_t __s1 = __p1; \
22715 int32x2_t __ret; \
22716 __ret = (int32x2_t) __builtin_neon_vset_lane_i32(__s0, (int8x8_t)__s1, __p2); \
22717 __ret; \
22718 })
22719 #endif
22720
22721 #ifdef __LITTLE_ENDIAN__
22722 #define vset_lane_s64(__p0, __p1, __p2) __extension__ ({ \
22723 int64_t __s0 = __p0; \
22724 int64x1_t __s1 = __p1; \
22725 int64x1_t __ret; \
22726 __ret = (int64x1_t) __builtin_neon_vset_lane_i64(__s0, (int8x8_t)__s1, __p2); \
22727 __ret; \
22728 })
22729 #else
22730 #define vset_lane_s64(__p0, __p1, __p2) __extension__ ({ \
22731 int64_t __s0 = __p0; \
22732 int64x1_t __s1 = __p1; \
22733 int64x1_t __ret; \
22734 __ret = (int64x1_t) __builtin_neon_vset_lane_i64(__s0, (int8x8_t)__s1, __p2); \
22735 __ret; \
22736 })
22737 #define __noswap_vset_lane_s64(__p0, __p1, __p2) __extension__ ({ \
22738 int64_t __s0 = __p0; \
22739 int64x1_t __s1 = __p1; \
22740 int64x1_t __ret; \
22741 __ret = (int64x1_t) __builtin_neon_vset_lane_i64(__s0, (int8x8_t)__s1, __p2); \
22742 __ret; \
22743 })
22744 #endif
22745
22746 #ifdef __LITTLE_ENDIAN__
22747 #define vset_lane_s16(__p0, __p1, __p2) __extension__ ({ \
22748 int16_t __s0 = __p0; \
22749 int16x4_t __s1 = __p1; \
22750 int16x4_t __ret; \
22751 __ret = (int16x4_t) __builtin_neon_vset_lane_i16(__s0, (int8x8_t)__s1, __p2); \
22752 __ret; \
22753 })
22754 #else
22755 #define vset_lane_s16(__p0, __p1, __p2) __extension__ ({ \
22756 int16_t __s0 = __p0; \
22757 int16x4_t __s1 = __p1; \
22758 int16x4_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 3, 2, 1, 0); \
22759 int16x4_t __ret; \
22760 __ret = (int16x4_t) __builtin_neon_vset_lane_i16(__s0, (int8x8_t)__rev1, __p2); \
22761 __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \
22762 __ret; \
22763 })
22764 #define __noswap_vset_lane_s16(__p0, __p1, __p2) __extension__ ({ \
22765 int16_t __s0 = __p0; \
22766 int16x4_t __s1 = __p1; \
22767 int16x4_t __ret; \
22768 __ret = (int16x4_t) __builtin_neon_vset_lane_i16(__s0, (int8x8_t)__s1, __p2); \
22769 __ret; \
22770 })
22771 #endif
22772
22773 #ifdef __LITTLE_ENDIAN__
22774 __ai uint8x16_t vshlq_u8(uint8x16_t __p0, int8x16_t __p1) {
22775 uint8x16_t __ret;
22776 __ret = (uint8x16_t) __builtin_neon_vshlq_v((int8x16_t)__p0, (int8x16_t)__p1, 48);
22777 return __ret;
22778 }
22779 #else
22780 __ai uint8x16_t vshlq_u8(uint8x16_t __p0, int8x16_t __p1) {
22781 uint8x16_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
22782 int8x16_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
22783 uint8x16_t __ret;
22784 __ret = (uint8x16_t) __builtin_neon_vshlq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 48);
22785 __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
22786 return __ret;
22787 }
22788 #endif
22789
22790 #ifdef __LITTLE_ENDIAN__
22791 __ai uint32x4_t vshlq_u32(uint32x4_t __p0, int32x4_t __p1) {
22792 uint32x4_t __ret;
22793 __ret = (uint32x4_t) __builtin_neon_vshlq_v((int8x16_t)__p0, (int8x16_t)__p1, 50);
22794 return __ret;
22795 }
22796 #else
22797 __ai uint32x4_t vshlq_u32(uint32x4_t __p0, int32x4_t __p1) {
22798 uint32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
22799 int32x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
22800 uint32x4_t __ret;
22801 __ret = (uint32x4_t) __builtin_neon_vshlq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 50);
22802 __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
22803 return __ret;
22804 }
22805 #endif
22806
22807 #ifdef __LITTLE_ENDIAN__
22808 __ai uint64x2_t vshlq_u64(uint64x2_t __p0, int64x2_t __p1) {
22809 uint64x2_t __ret;
22810 __ret = (uint64x2_t) __builtin_neon_vshlq_v((int8x16_t)__p0, (int8x16_t)__p1, 51);
22811 return __ret;
22812 }
22813 #else
22814 __ai uint64x2_t vshlq_u64(uint64x2_t __p0, int64x2_t __p1) {
22815 uint64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
22816 int64x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
22817 uint64x2_t __ret;
22818 __ret = (uint64x2_t) __builtin_neon_vshlq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 51);
22819 __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
22820 return __ret;
22821 }
22822 #endif
22823
22824 #ifdef __LITTLE_ENDIAN__
22825 __ai uint16x8_t vshlq_u16(uint16x8_t __p0, int16x8_t __p1) {
22826 uint16x8_t __ret;
22827 __ret = (uint16x8_t) __builtin_neon_vshlq_v((int8x16_t)__p0, (int8x16_t)__p1, 49);
22828 return __ret;
22829 }
22830 #else
22831 __ai uint16x8_t vshlq_u16(uint16x8_t __p0, int16x8_t __p1) {
22832 uint16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
22833 int16x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
22834 uint16x8_t __ret;
22835 __ret = (uint16x8_t) __builtin_neon_vshlq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 49);
22836 __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
22837 return __ret;
22838 }
22839 #endif
22840
22841 #ifdef __LITTLE_ENDIAN__
22842 __ai int8x16_t vshlq_s8(int8x16_t __p0, int8x16_t __p1) {
22843 int8x16_t __ret;
22844 __ret = (int8x16_t) __builtin_neon_vshlq_v((int8x16_t)__p0, (int8x16_t)__p1, 32);
22845 return __ret;
22846 }
22847 #else
22848 __ai int8x16_t vshlq_s8(int8x16_t __p0, int8x16_t __p1) {
22849 int8x16_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
22850 int8x16_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
22851 int8x16_t __ret;
22852 __ret = (int8x16_t) __builtin_neon_vshlq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 32);
22853 __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
22854 return __ret;
22855 }
22856 #endif
22857
22858 #ifdef __LITTLE_ENDIAN__
22859 __ai int32x4_t vshlq_s32(int32x4_t __p0, int32x4_t __p1) {
22860 int32x4_t __ret;
22861 __ret = (int32x4_t) __builtin_neon_vshlq_v((int8x16_t)__p0, (int8x16_t)__p1, 34);
22862 return __ret;
22863 }
22864 #else
22865 __ai int32x4_t vshlq_s32(int32x4_t __p0, int32x4_t __p1) {
22866 int32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
22867 int32x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
22868 int32x4_t __ret;
22869 __ret = (int32x4_t) __builtin_neon_vshlq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 34);
22870 __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
22871 return __ret;
22872 }
22873 #endif
22874
22875 #ifdef __LITTLE_ENDIAN__
22876 __ai int64x2_t vshlq_s64(int64x2_t __p0, int64x2_t __p1) {
22877 int64x2_t __ret;
22878 __ret = (int64x2_t) __builtin_neon_vshlq_v((int8x16_t)__p0, (int8x16_t)__p1, 35);
22879 return __ret;
22880 }
22881 #else
22882 __ai int64x2_t vshlq_s64(int64x2_t __p0, int64x2_t __p1) {
22883 int64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
22884 int64x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
22885 int64x2_t __ret;
22886 __ret = (int64x2_t) __builtin_neon_vshlq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 35);
22887 __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
22888 return __ret;
22889 }
22890 #endif
22891
22892 #ifdef __LITTLE_ENDIAN__
22893 __ai int16x8_t vshlq_s16(int16x8_t __p0, int16x8_t __p1) {
22894 int16x8_t __ret;
22895 __ret = (int16x8_t) __builtin_neon_vshlq_v((int8x16_t)__p0, (int8x16_t)__p1, 33);
22896 return __ret;
22897 }
22898 #else
22899 __ai int16x8_t vshlq_s16(int16x8_t __p0, int16x8_t __p1) {
22900 int16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
22901 int16x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
22902 int16x8_t __ret;
22903 __ret = (int16x8_t) __builtin_neon_vshlq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 33);
22904 __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
22905 return __ret;
22906 }
22907 #endif
22908
22909 #ifdef __LITTLE_ENDIAN__
22910 __ai uint8x8_t vshl_u8(uint8x8_t __p0, int8x8_t __p1) {
22911 uint8x8_t __ret;
22912 __ret = (uint8x8_t) __builtin_neon_vshl_v((int8x8_t)__p0, (int8x8_t)__p1, 16);
22913 return __ret;
22914 }
22915 #else
22916 __ai uint8x8_t vshl_u8(uint8x8_t __p0, int8x8_t __p1) {
22917 uint8x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
22918 int8x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
22919 uint8x8_t __ret;
22920 __ret = (uint8x8_t) __builtin_neon_vshl_v((int8x8_t)__rev0, (int8x8_t)__rev1, 16);
22921 __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
22922 return __ret;
22923 }
22924 #endif
22925
22926 #ifdef __LITTLE_ENDIAN__
22927 __ai uint32x2_t vshl_u32(uint32x2_t __p0, int32x2_t __p1) {
22928 uint32x2_t __ret;
22929 __ret = (uint32x2_t) __builtin_neon_vshl_v((int8x8_t)__p0, (int8x8_t)__p1, 18);
22930 return __ret;
22931 }
22932 #else
22933 __ai uint32x2_t vshl_u32(uint32x2_t __p0, int32x2_t __p1) {
22934 uint32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
22935 int32x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
22936 uint32x2_t __ret;
22937 __ret = (uint32x2_t) __builtin_neon_vshl_v((int8x8_t)__rev0, (int8x8_t)__rev1, 18);
22938 __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
22939 return __ret;
22940 }
22941 #endif
22942
22943 #ifdef __LITTLE_ENDIAN__
22944 __ai uint64x1_t vshl_u64(uint64x1_t __p0, int64x1_t __p1) {
22945 uint64x1_t __ret;
22946 __ret = (uint64x1_t) __builtin_neon_vshl_v((int8x8_t)__p0, (int8x8_t)__p1, 19);
22947 return __ret;
22948 }
22949 #else
22950 __ai uint64x1_t vshl_u64(uint64x1_t __p0, int64x1_t __p1) {
22951 uint64x1_t __ret;
22952 __ret = (uint64x1_t) __builtin_neon_vshl_v((int8x8_t)__p0, (int8x8_t)__p1, 19);
22953 return __ret;
22954 }
22955 #endif
22956
22957 #ifdef __LITTLE_ENDIAN__
22958 __ai uint16x4_t vshl_u16(uint16x4_t __p0, int16x4_t __p1) {
22959 uint16x4_t __ret;
22960 __ret = (uint16x4_t) __builtin_neon_vshl_v((int8x8_t)__p0, (int8x8_t)__p1, 17);
22961 return __ret;
22962 }
22963 #else
22964 __ai uint16x4_t vshl_u16(uint16x4_t __p0, int16x4_t __p1) {
22965 uint16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
22966 int16x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
22967 uint16x4_t __ret;
22968 __ret = (uint16x4_t) __builtin_neon_vshl_v((int8x8_t)__rev0, (int8x8_t)__rev1, 17);
22969 __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
22970 return __ret;
22971 }
22972 #endif
22973
22974 #ifdef __LITTLE_ENDIAN__
22975 __ai int8x8_t vshl_s8(int8x8_t __p0, int8x8_t __p1) {
22976 int8x8_t __ret;
22977 __ret = (int8x8_t) __builtin_neon_vshl_v((int8x8_t)__p0, (int8x8_t)__p1, 0);
22978 return __ret;
22979 }
22980 #else
22981 __ai int8x8_t vshl_s8(int8x8_t __p0, int8x8_t __p1) {
22982 int8x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
22983 int8x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
22984 int8x8_t __ret;
22985 __ret = (int8x8_t) __builtin_neon_vshl_v((int8x8_t)__rev0, (int8x8_t)__rev1, 0);
22986 __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
22987 return __ret;
22988 }
22989 #endif
22990
22991 #ifdef __LITTLE_ENDIAN__
22992 __ai int32x2_t vshl_s32(int32x2_t __p0, int32x2_t __p1) {
22993 int32x2_t __ret;
22994 __ret = (int32x2_t) __builtin_neon_vshl_v((int8x8_t)__p0, (int8x8_t)__p1, 2);
22995 return __ret;
22996 }
22997 #else
22998 __ai int32x2_t vshl_s32(int32x2_t __p0, int32x2_t __p1) {
22999 int32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
23000 int32x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
23001 int32x2_t __ret;
23002 __ret = (int32x2_t) __builtin_neon_vshl_v((int8x8_t)__rev0, (int8x8_t)__rev1, 2);
23003 __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
23004 return __ret;
23005 }
23006 #endif
23007
23008 #ifdef __LITTLE_ENDIAN__
23009 __ai int64x1_t vshl_s64(int64x1_t __p0, int64x1_t __p1) {
23010 int64x1_t __ret;
23011 __ret = (int64x1_t) __builtin_neon_vshl_v((int8x8_t)__p0, (int8x8_t)__p1, 3);
23012 return __ret;
23013 }
23014 #else
23015 __ai int64x1_t vshl_s64(int64x1_t __p0, int64x1_t __p1) {
23016 int64x1_t __ret;
23017 __ret = (int64x1_t) __builtin_neon_vshl_v((int8x8_t)__p0, (int8x8_t)__p1, 3);
23018 return __ret;
23019 }
23020 #endif
23021
23022 #ifdef __LITTLE_ENDIAN__
23023 __ai int16x4_t vshl_s16(int16x4_t __p0, int16x4_t __p1) {
23024 int16x4_t __ret;
23025 __ret = (int16x4_t) __builtin_neon_vshl_v((int8x8_t)__p0, (int8x8_t)__p1, 1);
23026 return __ret;
23027 }
23028 #else
23029 __ai int16x4_t vshl_s16(int16x4_t __p0, int16x4_t __p1) {
23030 int16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
23031 int16x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
23032 int16x4_t __ret;
23033 __ret = (int16x4_t) __builtin_neon_vshl_v((int8x8_t)__rev0, (int8x8_t)__rev1, 1);
23034 __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
23035 return __ret;
23036 }
23037 #endif
23038
23039 #ifdef __LITTLE_ENDIAN__
23040 #define vshlq_n_u8(__p0, __p1) __extension__ ({ \
23041 uint8x16_t __s0 = __p0; \
23042 uint8x16_t __ret; \
23043 __ret = (uint8x16_t) __builtin_neon_vshlq_n_v((int8x16_t)__s0, __p1, 48); \
23044 __ret; \
23045 })
23046 #else
23047 #define vshlq_n_u8(__p0, __p1) __extension__ ({ \
23048 uint8x16_t __s0 = __p0; \
23049 uint8x16_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
23050 uint8x16_t __ret; \
23051 __ret = (uint8x16_t) __builtin_neon_vshlq_n_v((int8x16_t)__rev0, __p1, 48); \
23052 __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
23053 __ret; \
23054 })
23055 #endif
23056
23057 #ifdef __LITTLE_ENDIAN__
23058 #define vshlq_n_u32(__p0, __p1) __extension__ ({ \
23059 uint32x4_t __s0 = __p0; \
23060 uint32x4_t __ret; \
23061 __ret = (uint32x4_t) __builtin_neon_vshlq_n_v((int8x16_t)__s0, __p1, 50); \
23062 __ret; \
23063 })
23064 #else
23065 #define vshlq_n_u32(__p0, __p1) __extension__ ({ \
23066 uint32x4_t __s0 = __p0; \
23067 uint32x4_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \
23068 uint32x4_t __ret; \
23069 __ret = (uint32x4_t) __builtin_neon_vshlq_n_v((int8x16_t)__rev0, __p1, 50); \
23070 __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \
23071 __ret; \
23072 })
23073 #endif
23074
23075 #ifdef __LITTLE_ENDIAN__
23076 #define vshlq_n_u64(__p0, __p1) __extension__ ({ \
23077 uint64x2_t __s0 = __p0; \
23078 uint64x2_t __ret; \
23079 __ret = (uint64x2_t) __builtin_neon_vshlq_n_v((int8x16_t)__s0, __p1, 51); \
23080 __ret; \
23081 })
23082 #else
23083 #define vshlq_n_u64(__p0, __p1) __extension__ ({ \
23084 uint64x2_t __s0 = __p0; \
23085 uint64x2_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 1, 0); \
23086 uint64x2_t __ret; \
23087 __ret = (uint64x2_t) __builtin_neon_vshlq_n_v((int8x16_t)__rev0, __p1, 51); \
23088 __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \
23089 __ret; \
23090 })
23091 #endif
23092
23093 #ifdef __LITTLE_ENDIAN__
23094 #define vshlq_n_u16(__p0, __p1) __extension__ ({ \
23095 uint16x8_t __s0 = __p0; \
23096 uint16x8_t __ret; \
23097 __ret = (uint16x8_t) __builtin_neon_vshlq_n_v((int8x16_t)__s0, __p1, 49); \
23098 __ret; \
23099 })
23100 #else
23101 #define vshlq_n_u16(__p0, __p1) __extension__ ({ \
23102 uint16x8_t __s0 = __p0; \
23103 uint16x8_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 7, 6, 5, 4, 3, 2, 1, 0); \
23104 uint16x8_t __ret; \
23105 __ret = (uint16x8_t) __builtin_neon_vshlq_n_v((int8x16_t)__rev0, __p1, 49); \
23106 __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); \
23107 __ret; \
23108 })
23109 #endif
23110
23111 #ifdef __LITTLE_ENDIAN__
23112 #define vshlq_n_s8(__p0, __p1) __extension__ ({ \
23113 int8x16_t __s0 = __p0; \
23114 int8x16_t __ret; \
23115 __ret = (int8x16_t) __builtin_neon_vshlq_n_v((int8x16_t)__s0, __p1, 32); \
23116 __ret; \
23117 })
23118 #else
23119 #define vshlq_n_s8(__p0, __p1) __extension__ ({ \
23120 int8x16_t __s0 = __p0; \
23121 int8x16_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
23122 int8x16_t __ret; \
23123 __ret = (int8x16_t) __builtin_neon_vshlq_n_v((int8x16_t)__rev0, __p1, 32); \
23124 __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
23125 __ret; \
23126 })
23127 #endif
23128
23129 #ifdef __LITTLE_ENDIAN__
23130 #define vshlq_n_s32(__p0, __p1) __extension__ ({ \
23131 int32x4_t __s0 = __p0; \
23132 int32x4_t __ret; \
23133 __ret = (int32x4_t) __builtin_neon_vshlq_n_v((int8x16_t)__s0, __p1, 34); \
23134 __ret; \
23135 })
23136 #else
23137 #define vshlq_n_s32(__p0, __p1) __extension__ ({ \
23138 int32x4_t __s0 = __p0; \
23139 int32x4_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \
23140 int32x4_t __ret; \
23141 __ret = (int32x4_t) __builtin_neon_vshlq_n_v((int8x16_t)__rev0, __p1, 34); \
23142 __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \
23143 __ret; \
23144 })
23145 #endif
23146
23147 #ifdef __LITTLE_ENDIAN__
23148 #define vshlq_n_s64(__p0, __p1) __extension__ ({ \
23149 int64x2_t __s0 = __p0; \
23150 int64x2_t __ret; \
23151 __ret = (int64x2_t) __builtin_neon_vshlq_n_v((int8x16_t)__s0, __p1, 35); \
23152 __ret; \
23153 })
23154 #else
23155 #define vshlq_n_s64(__p0, __p1) __extension__ ({ \
23156 int64x2_t __s0 = __p0; \
23157 int64x2_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 1, 0); \
23158 int64x2_t __ret; \
23159 __ret = (int64x2_t) __builtin_neon_vshlq_n_v((int8x16_t)__rev0, __p1, 35); \
23160 __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \
23161 __ret; \
23162 })
23163 #endif
23164
23165 #ifdef __LITTLE_ENDIAN__
23166 #define vshlq_n_s16(__p0, __p1) __extension__ ({ \
23167 int16x8_t __s0 = __p0; \
23168 int16x8_t __ret; \
23169 __ret = (int16x8_t) __builtin_neon_vshlq_n_v((int8x16_t)__s0, __p1, 33); \
23170 __ret; \
23171 })
23172 #else
23173 #define vshlq_n_s16(__p0, __p1) __extension__ ({ \
23174 int16x8_t __s0 = __p0; \
23175 int16x8_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 7, 6, 5, 4, 3, 2, 1, 0); \
23176 int16x8_t __ret; \
23177 __ret = (int16x8_t) __builtin_neon_vshlq_n_v((int8x16_t)__rev0, __p1, 33); \
23178 __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); \
23179 __ret; \
23180 })
23181 #endif
23182
23183 #ifdef __LITTLE_ENDIAN__
23184 #define vshl_n_u8(__p0, __p1) __extension__ ({ \
23185 uint8x8_t __s0 = __p0; \
23186 uint8x8_t __ret; \
23187 __ret = (uint8x8_t) __builtin_neon_vshl_n_v((int8x8_t)__s0, __p1, 16); \
23188 __ret; \
23189 })
23190 #else
23191 #define vshl_n_u8(__p0, __p1) __extension__ ({ \
23192 uint8x8_t __s0 = __p0; \
23193 uint8x8_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 7, 6, 5, 4, 3, 2, 1, 0); \
23194 uint8x8_t __ret; \
23195 __ret = (uint8x8_t) __builtin_neon_vshl_n_v((int8x8_t)__rev0, __p1, 16); \
23196 __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); \
23197 __ret; \
23198 })
23199 #endif
23200
23201 #ifdef __LITTLE_ENDIAN__
23202 #define vshl_n_u32(__p0, __p1) __extension__ ({ \
23203 uint32x2_t __s0 = __p0; \
23204 uint32x2_t __ret; \
23205 __ret = (uint32x2_t) __builtin_neon_vshl_n_v((int8x8_t)__s0, __p1, 18); \
23206 __ret; \
23207 })
23208 #else
23209 #define vshl_n_u32(__p0, __p1) __extension__ ({ \
23210 uint32x2_t __s0 = __p0; \
23211 uint32x2_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 1, 0); \
23212 uint32x2_t __ret; \
23213 __ret = (uint32x2_t) __builtin_neon_vshl_n_v((int8x8_t)__rev0, __p1, 18); \
23214 __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \
23215 __ret; \
23216 })
23217 #endif
23218
23219 #ifdef __LITTLE_ENDIAN__
23220 #define vshl_n_u64(__p0, __p1) __extension__ ({ \
23221 uint64x1_t __s0 = __p0; \
23222 uint64x1_t __ret; \
23223 __ret = (uint64x1_t) __builtin_neon_vshl_n_v((int8x8_t)__s0, __p1, 19); \
23224 __ret; \
23225 })
23226 #else
23227 #define vshl_n_u64(__p0, __p1) __extension__ ({ \
23228 uint64x1_t __s0 = __p0; \
23229 uint64x1_t __ret; \
23230 __ret = (uint64x1_t) __builtin_neon_vshl_n_v((int8x8_t)__s0, __p1, 19); \
23231 __ret; \
23232 })
23233 #endif
23234
23235 #ifdef __LITTLE_ENDIAN__
23236 #define vshl_n_u16(__p0, __p1) __extension__ ({ \
23237 uint16x4_t __s0 = __p0; \
23238 uint16x4_t __ret; \
23239 __ret = (uint16x4_t) __builtin_neon_vshl_n_v((int8x8_t)__s0, __p1, 17); \
23240 __ret; \
23241 })
23242 #else
23243 #define vshl_n_u16(__p0, __p1) __extension__ ({ \
23244 uint16x4_t __s0 = __p0; \
23245 uint16x4_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \
23246 uint16x4_t __ret; \
23247 __ret = (uint16x4_t) __builtin_neon_vshl_n_v((int8x8_t)__rev0, __p1, 17); \
23248 __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \
23249 __ret; \
23250 })
23251 #endif
23252
23253 #ifdef __LITTLE_ENDIAN__
23254 #define vshl_n_s8(__p0, __p1) __extension__ ({ \
23255 int8x8_t __s0 = __p0; \
23256 int8x8_t __ret; \
23257 __ret = (int8x8_t) __builtin_neon_vshl_n_v((int8x8_t)__s0, __p1, 0); \
23258 __ret; \
23259 })
23260 #else
23261 #define vshl_n_s8(__p0, __p1) __extension__ ({ \
23262 int8x8_t __s0 = __p0; \
23263 int8x8_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 7, 6, 5, 4, 3, 2, 1, 0); \
23264 int8x8_t __ret; \
23265 __ret = (int8x8_t) __builtin_neon_vshl_n_v((int8x8_t)__rev0, __p1, 0); \
23266 __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); \
23267 __ret; \
23268 })
23269 #endif
23270
23271 #ifdef __LITTLE_ENDIAN__
23272 #define vshl_n_s32(__p0, __p1) __extension__ ({ \
23273 int32x2_t __s0 = __p0; \
23274 int32x2_t __ret; \
23275 __ret = (int32x2_t) __builtin_neon_vshl_n_v((int8x8_t)__s0, __p1, 2); \
23276 __ret; \
23277 })
23278 #else
23279 #define vshl_n_s32(__p0, __p1) __extension__ ({ \
23280 int32x2_t __s0 = __p0; \
23281 int32x2_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 1, 0); \
23282 int32x2_t __ret; \
23283 __ret = (int32x2_t) __builtin_neon_vshl_n_v((int8x8_t)__rev0, __p1, 2); \
23284 __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \
23285 __ret; \
23286 })
23287 #endif
23288
23289 #ifdef __LITTLE_ENDIAN__
23290 #define vshl_n_s64(__p0, __p1) __extension__ ({ \
23291 int64x1_t __s0 = __p0; \
23292 int64x1_t __ret; \
23293 __ret = (int64x1_t) __builtin_neon_vshl_n_v((int8x8_t)__s0, __p1, 3); \
23294 __ret; \
23295 })
23296 #else
23297 #define vshl_n_s64(__p0, __p1) __extension__ ({ \
23298 int64x1_t __s0 = __p0; \
23299 int64x1_t __ret; \
23300 __ret = (int64x1_t) __builtin_neon_vshl_n_v((int8x8_t)__s0, __p1, 3); \
23301 __ret; \
23302 })
23303 #endif
23304
23305 #ifdef __LITTLE_ENDIAN__
23306 #define vshl_n_s16(__p0, __p1) __extension__ ({ \
23307 int16x4_t __s0 = __p0; \
23308 int16x4_t __ret; \
23309 __ret = (int16x4_t) __builtin_neon_vshl_n_v((int8x8_t)__s0, __p1, 1); \
23310 __ret; \
23311 })
23312 #else
23313 #define vshl_n_s16(__p0, __p1) __extension__ ({ \
23314 int16x4_t __s0 = __p0; \
23315 int16x4_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \
23316 int16x4_t __ret; \
23317 __ret = (int16x4_t) __builtin_neon_vshl_n_v((int8x8_t)__rev0, __p1, 1); \
23318 __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \
23319 __ret; \
23320 })
23321 #endif
23322
23323 #ifdef __LITTLE_ENDIAN__
23324 #define vshll_n_u8(__p0, __p1) __extension__ ({ \
23325 uint8x8_t __s0 = __p0; \
23326 uint16x8_t __ret; \
23327 __ret = (uint16x8_t) __builtin_neon_vshll_n_v((int8x8_t)__s0, __p1, 49); \
23328 __ret; \
23329 })
23330 #else
23331 #define vshll_n_u8(__p0, __p1) __extension__ ({ \
23332 uint8x8_t __s0 = __p0; \
23333 uint8x8_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 7, 6, 5, 4, 3, 2, 1, 0); \
23334 uint16x8_t __ret; \
23335 __ret = (uint16x8_t) __builtin_neon_vshll_n_v((int8x8_t)__rev0, __p1, 49); \
23336 __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); \
23337 __ret; \
23338 })
23339 #define __noswap_vshll_n_u8(__p0, __p1) __extension__ ({ \
23340 uint8x8_t __s0 = __p0; \
23341 uint16x8_t __ret; \
23342 __ret = (uint16x8_t) __builtin_neon_vshll_n_v((int8x8_t)__s0, __p1, 49); \
23343 __ret; \
23344 })
23345 #endif
23346
23347 #ifdef __LITTLE_ENDIAN__
23348 #define vshll_n_u32(__p0, __p1) __extension__ ({ \
23349 uint32x2_t __s0 = __p0; \
23350 uint64x2_t __ret; \
23351 __ret = (uint64x2_t) __builtin_neon_vshll_n_v((int8x8_t)__s0, __p1, 51); \
23352 __ret; \
23353 })
23354 #else
23355 #define vshll_n_u32(__p0, __p1) __extension__ ({ \
23356 uint32x2_t __s0 = __p0; \
23357 uint32x2_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 1, 0); \
23358 uint64x2_t __ret; \
23359 __ret = (uint64x2_t) __builtin_neon_vshll_n_v((int8x8_t)__rev0, __p1, 51); \
23360 __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \
23361 __ret; \
23362 })
23363 #define __noswap_vshll_n_u32(__p0, __p1) __extension__ ({ \
23364 uint32x2_t __s0 = __p0; \
23365 uint64x2_t __ret; \
23366 __ret = (uint64x2_t) __builtin_neon_vshll_n_v((int8x8_t)__s0, __p1, 51); \
23367 __ret; \
23368 })
23369 #endif
23370
23371 #ifdef __LITTLE_ENDIAN__
23372 #define vshll_n_u16(__p0, __p1) __extension__ ({ \
23373 uint16x4_t __s0 = __p0; \
23374 uint32x4_t __ret; \
23375 __ret = (uint32x4_t) __builtin_neon_vshll_n_v((int8x8_t)__s0, __p1, 50); \
23376 __ret; \
23377 })
23378 #else
23379 #define vshll_n_u16(__p0, __p1) __extension__ ({ \
23380 uint16x4_t __s0 = __p0; \
23381 uint16x4_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \
23382 uint32x4_t __ret; \
23383 __ret = (uint32x4_t) __builtin_neon_vshll_n_v((int8x8_t)__rev0, __p1, 50); \
23384 __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \
23385 __ret; \
23386 })
23387 #define __noswap_vshll_n_u16(__p0, __p1) __extension__ ({ \
23388 uint16x4_t __s0 = __p0; \
23389 uint32x4_t __ret; \
23390 __ret = (uint32x4_t) __builtin_neon_vshll_n_v((int8x8_t)__s0, __p1, 50); \
23391 __ret; \
23392 })
23393 #endif
23394
23395 #ifdef __LITTLE_ENDIAN__
23396 #define vshll_n_s8(__p0, __p1) __extension__ ({ \
23397 int8x8_t __s0 = __p0; \
23398 int16x8_t __ret; \
23399 __ret = (int16x8_t) __builtin_neon_vshll_n_v((int8x8_t)__s0, __p1, 33); \
23400 __ret; \
23401 })
23402 #else
23403 #define vshll_n_s8(__p0, __p1) __extension__ ({ \
23404 int8x8_t __s0 = __p0; \
23405 int8x8_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 7, 6, 5, 4, 3, 2, 1, 0); \
23406 int16x8_t __ret; \
23407 __ret = (int16x8_t) __builtin_neon_vshll_n_v((int8x8_t)__rev0, __p1, 33); \
23408 __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); \
23409 __ret; \
23410 })
23411 #define __noswap_vshll_n_s8(__p0, __p1) __extension__ ({ \
23412 int8x8_t __s0 = __p0; \
23413 int16x8_t __ret; \
23414 __ret = (int16x8_t) __builtin_neon_vshll_n_v((int8x8_t)__s0, __p1, 33); \
23415 __ret; \
23416 })
23417 #endif
23418
23419 #ifdef __LITTLE_ENDIAN__
23420 #define vshll_n_s32(__p0, __p1) __extension__ ({ \
23421 int32x2_t __s0 = __p0; \
23422 int64x2_t __ret; \
23423 __ret = (int64x2_t) __builtin_neon_vshll_n_v((int8x8_t)__s0, __p1, 35); \
23424 __ret; \
23425 })
23426 #else
23427 #define vshll_n_s32(__p0, __p1) __extension__ ({ \
23428 int32x2_t __s0 = __p0; \
23429 int32x2_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 1, 0); \
23430 int64x2_t __ret; \
23431 __ret = (int64x2_t) __builtin_neon_vshll_n_v((int8x8_t)__rev0, __p1, 35); \
23432 __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \
23433 __ret; \
23434 })
23435 #define __noswap_vshll_n_s32(__p0, __p1) __extension__ ({ \
23436 int32x2_t __s0 = __p0; \
23437 int64x2_t __ret; \
23438 __ret = (int64x2_t) __builtin_neon_vshll_n_v((int8x8_t)__s0, __p1, 35); \
23439 __ret; \
23440 })
23441 #endif
23442
23443 #ifdef __LITTLE_ENDIAN__
23444 #define vshll_n_s16(__p0, __p1) __extension__ ({ \
23445 int16x4_t __s0 = __p0; \
23446 int32x4_t __ret; \
23447 __ret = (int32x4_t) __builtin_neon_vshll_n_v((int8x8_t)__s0, __p1, 34); \
23448 __ret; \
23449 })
23450 #else
23451 #define vshll_n_s16(__p0, __p1) __extension__ ({ \
23452 int16x4_t __s0 = __p0; \
23453 int16x4_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \
23454 int32x4_t __ret; \
23455 __ret = (int32x4_t) __builtin_neon_vshll_n_v((int8x8_t)__rev0, __p1, 34); \
23456 __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \
23457 __ret; \
23458 })
23459 #define __noswap_vshll_n_s16(__p0, __p1) __extension__ ({ \
23460 int16x4_t __s0 = __p0; \
23461 int32x4_t __ret; \
23462 __ret = (int32x4_t) __builtin_neon_vshll_n_v((int8x8_t)__s0, __p1, 34); \
23463 __ret; \
23464 })
23465 #endif
23466
23467 #ifdef __LITTLE_ENDIAN__
23468 #define vshrq_n_u8(__p0, __p1) __extension__ ({ \
23469 uint8x16_t __s0 = __p0; \
23470 uint8x16_t __ret; \
23471 __ret = (uint8x16_t) __builtin_neon_vshrq_n_v((int8x16_t)__s0, __p1, 48); \
23472 __ret; \
23473 })
23474 #else
23475 #define vshrq_n_u8(__p0, __p1) __extension__ ({ \
23476 uint8x16_t __s0 = __p0; \
23477 uint8x16_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
23478 uint8x16_t __ret; \
23479 __ret = (uint8x16_t) __builtin_neon_vshrq_n_v((int8x16_t)__rev0, __p1, 48); \
23480 __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
23481 __ret; \
23482 })
23483 #endif
23484
23485 #ifdef __LITTLE_ENDIAN__
23486 #define vshrq_n_u32(__p0, __p1) __extension__ ({ \
23487 uint32x4_t __s0 = __p0; \
23488 uint32x4_t __ret; \
23489 __ret = (uint32x4_t) __builtin_neon_vshrq_n_v((int8x16_t)__s0, __p1, 50); \
23490 __ret; \
23491 })
23492 #else
23493 #define vshrq_n_u32(__p0, __p1) __extension__ ({ \
23494 uint32x4_t __s0 = __p0; \
23495 uint32x4_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \
23496 uint32x4_t __ret; \
23497 __ret = (uint32x4_t) __builtin_neon_vshrq_n_v((int8x16_t)__rev0, __p1, 50); \
23498 __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \
23499 __ret; \
23500 })
23501 #endif
23502
23503 #ifdef __LITTLE_ENDIAN__
23504 #define vshrq_n_u64(__p0, __p1) __extension__ ({ \
23505 uint64x2_t __s0 = __p0; \
23506 uint64x2_t __ret; \
23507 __ret = (uint64x2_t) __builtin_neon_vshrq_n_v((int8x16_t)__s0, __p1, 51); \
23508 __ret; \
23509 })
23510 #else
23511 #define vshrq_n_u64(__p0, __p1) __extension__ ({ \
23512 uint64x2_t __s0 = __p0; \
23513 uint64x2_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 1, 0); \
23514 uint64x2_t __ret; \
23515 __ret = (uint64x2_t) __builtin_neon_vshrq_n_v((int8x16_t)__rev0, __p1, 51); \
23516 __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \
23517 __ret; \
23518 })
23519 #endif
23520
23521 #ifdef __LITTLE_ENDIAN__
23522 #define vshrq_n_u16(__p0, __p1) __extension__ ({ \
23523 uint16x8_t __s0 = __p0; \
23524 uint16x8_t __ret; \
23525 __ret = (uint16x8_t) __builtin_neon_vshrq_n_v((int8x16_t)__s0, __p1, 49); \
23526 __ret; \
23527 })
23528 #else
23529 #define vshrq_n_u16(__p0, __p1) __extension__ ({ \
23530 uint16x8_t __s0 = __p0; \
23531 uint16x8_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 7, 6, 5, 4, 3, 2, 1, 0); \
23532 uint16x8_t __ret; \
23533 __ret = (uint16x8_t) __builtin_neon_vshrq_n_v((int8x16_t)__rev0, __p1, 49); \
23534 __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); \
23535 __ret; \
23536 })
23537 #endif
23538
23539 #ifdef __LITTLE_ENDIAN__
23540 #define vshrq_n_s8(__p0, __p1) __extension__ ({ \
23541 int8x16_t __s0 = __p0; \
23542 int8x16_t __ret; \
23543 __ret = (int8x16_t) __builtin_neon_vshrq_n_v((int8x16_t)__s0, __p1, 32); \
23544 __ret; \
23545 })
23546 #else
23547 #define vshrq_n_s8(__p0, __p1) __extension__ ({ \
23548 int8x16_t __s0 = __p0; \
23549 int8x16_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
23550 int8x16_t __ret; \
23551 __ret = (int8x16_t) __builtin_neon_vshrq_n_v((int8x16_t)__rev0, __p1, 32); \
23552 __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
23553 __ret; \
23554 })
23555 #endif
23556
23557 #ifdef __LITTLE_ENDIAN__
23558 #define vshrq_n_s32(__p0, __p1) __extension__ ({ \
23559 int32x4_t __s0 = __p0; \
23560 int32x4_t __ret; \
23561 __ret = (int32x4_t) __builtin_neon_vshrq_n_v((int8x16_t)__s0, __p1, 34); \
23562 __ret; \
23563 })
23564 #else
23565 #define vshrq_n_s32(__p0, __p1) __extension__ ({ \
23566 int32x4_t __s0 = __p0; \
23567 int32x4_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \
23568 int32x4_t __ret; \
23569 __ret = (int32x4_t) __builtin_neon_vshrq_n_v((int8x16_t)__rev0, __p1, 34); \
23570 __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \
23571 __ret; \
23572 })
23573 #endif
23574
23575 #ifdef __LITTLE_ENDIAN__
23576 #define vshrq_n_s64(__p0, __p1) __extension__ ({ \
23577 int64x2_t __s0 = __p0; \
23578 int64x2_t __ret; \
23579 __ret = (int64x2_t) __builtin_neon_vshrq_n_v((int8x16_t)__s0, __p1, 35); \
23580 __ret; \
23581 })
23582 #else
23583 #define vshrq_n_s64(__p0, __p1) __extension__ ({ \
23584 int64x2_t __s0 = __p0; \
23585 int64x2_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 1, 0); \
23586 int64x2_t __ret; \
23587 __ret = (int64x2_t) __builtin_neon_vshrq_n_v((int8x16_t)__rev0, __p1, 35); \
23588 __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \
23589 __ret; \
23590 })
23591 #endif
23592
23593 #ifdef __LITTLE_ENDIAN__
23594 #define vshrq_n_s16(__p0, __p1) __extension__ ({ \
23595 int16x8_t __s0 = __p0; \
23596 int16x8_t __ret; \
23597 __ret = (int16x8_t) __builtin_neon_vshrq_n_v((int8x16_t)__s0, __p1, 33); \
23598 __ret; \
23599 })
23600 #else
23601 #define vshrq_n_s16(__p0, __p1) __extension__ ({ \
23602 int16x8_t __s0 = __p0; \
23603 int16x8_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 7, 6, 5, 4, 3, 2, 1, 0); \
23604 int16x8_t __ret; \
23605 __ret = (int16x8_t) __builtin_neon_vshrq_n_v((int8x16_t)__rev0, __p1, 33); \
23606 __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); \
23607 __ret; \
23608 })
23609 #endif
23610
23611 #ifdef __LITTLE_ENDIAN__
23612 #define vshr_n_u8(__p0, __p1) __extension__ ({ \
23613 uint8x8_t __s0 = __p0; \
23614 uint8x8_t __ret; \
23615 __ret = (uint8x8_t) __builtin_neon_vshr_n_v((int8x8_t)__s0, __p1, 16); \
23616 __ret; \
23617 })
23618 #else
23619 #define vshr_n_u8(__p0, __p1) __extension__ ({ \
23620 uint8x8_t __s0 = __p0; \
23621 uint8x8_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 7, 6, 5, 4, 3, 2, 1, 0); \
23622 uint8x8_t __ret; \
23623 __ret = (uint8x8_t) __builtin_neon_vshr_n_v((int8x8_t)__rev0, __p1, 16); \
23624 __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); \
23625 __ret; \
23626 })
23627 #endif
23628
23629 #ifdef __LITTLE_ENDIAN__
23630 #define vshr_n_u32(__p0, __p1) __extension__ ({ \
23631 uint32x2_t __s0 = __p0; \
23632 uint32x2_t __ret; \
23633 __ret = (uint32x2_t) __builtin_neon_vshr_n_v((int8x8_t)__s0, __p1, 18); \
23634 __ret; \
23635 })
23636 #else
23637 #define vshr_n_u32(__p0, __p1) __extension__ ({ \
23638 uint32x2_t __s0 = __p0; \
23639 uint32x2_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 1, 0); \
23640 uint32x2_t __ret; \
23641 __ret = (uint32x2_t) __builtin_neon_vshr_n_v((int8x8_t)__rev0, __p1, 18); \
23642 __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \
23643 __ret; \
23644 })
23645 #endif
23646
23647 #ifdef __LITTLE_ENDIAN__
23648 #define vshr_n_u64(__p0, __p1) __extension__ ({ \
23649 uint64x1_t __s0 = __p0; \
23650 uint64x1_t __ret; \
23651 __ret = (uint64x1_t) __builtin_neon_vshr_n_v((int8x8_t)__s0, __p1, 19); \
23652 __ret; \
23653 })
23654 #else
23655 #define vshr_n_u64(__p0, __p1) __extension__ ({ \
23656 uint64x1_t __s0 = __p0; \
23657 uint64x1_t __ret; \
23658 __ret = (uint64x1_t) __builtin_neon_vshr_n_v((int8x8_t)__s0, __p1, 19); \
23659 __ret; \
23660 })
23661 #endif
23662
23663 #ifdef __LITTLE_ENDIAN__
23664 #define vshr_n_u16(__p0, __p1) __extension__ ({ \
23665 uint16x4_t __s0 = __p0; \
23666 uint16x4_t __ret; \
23667 __ret = (uint16x4_t) __builtin_neon_vshr_n_v((int8x8_t)__s0, __p1, 17); \
23668 __ret; \
23669 })
23670 #else
23671 #define vshr_n_u16(__p0, __p1) __extension__ ({ \
23672 uint16x4_t __s0 = __p0; \
23673 uint16x4_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \
23674 uint16x4_t __ret; \
23675 __ret = (uint16x4_t) __builtin_neon_vshr_n_v((int8x8_t)__rev0, __p1, 17); \
23676 __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \
23677 __ret; \
23678 })
23679 #endif
23680
23681 #ifdef __LITTLE_ENDIAN__
23682 #define vshr_n_s8(__p0, __p1) __extension__ ({ \
23683 int8x8_t __s0 = __p0; \
23684 int8x8_t __ret; \
23685 __ret = (int8x8_t) __builtin_neon_vshr_n_v((int8x8_t)__s0, __p1, 0); \
23686 __ret; \
23687 })
23688 #else
23689 #define vshr_n_s8(__p0, __p1) __extension__ ({ \
23690 int8x8_t __s0 = __p0; \
23691 int8x8_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 7, 6, 5, 4, 3, 2, 1, 0); \
23692 int8x8_t __ret; \
23693 __ret = (int8x8_t) __builtin_neon_vshr_n_v((int8x8_t)__rev0, __p1, 0); \
23694 __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); \
23695 __ret; \
23696 })
23697 #endif
23698
23699 #ifdef __LITTLE_ENDIAN__
23700 #define vshr_n_s32(__p0, __p1) __extension__ ({ \
23701 int32x2_t __s0 = __p0; \
23702 int32x2_t __ret; \
23703 __ret = (int32x2_t) __builtin_neon_vshr_n_v((int8x8_t)__s0, __p1, 2); \
23704 __ret; \
23705 })
23706 #else
23707 #define vshr_n_s32(__p0, __p1) __extension__ ({ \
23708 int32x2_t __s0 = __p0; \
23709 int32x2_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 1, 0); \
23710 int32x2_t __ret; \
23711 __ret = (int32x2_t) __builtin_neon_vshr_n_v((int8x8_t)__rev0, __p1, 2); \
23712 __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \
23713 __ret; \
23714 })
23715 #endif
23716
23717 #ifdef __LITTLE_ENDIAN__
23718 #define vshr_n_s64(__p0, __p1) __extension__ ({ \
23719 int64x1_t __s0 = __p0; \
23720 int64x1_t __ret; \
23721 __ret = (int64x1_t) __builtin_neon_vshr_n_v((int8x8_t)__s0, __p1, 3); \
23722 __ret; \
23723 })
23724 #else
23725 #define vshr_n_s64(__p0, __p1) __extension__ ({ \
23726 int64x1_t __s0 = __p0; \
23727 int64x1_t __ret; \
23728 __ret = (int64x1_t) __builtin_neon_vshr_n_v((int8x8_t)__s0, __p1, 3); \
23729 __ret; \
23730 })
23731 #endif
23732
23733 #ifdef __LITTLE_ENDIAN__
23734 #define vshr_n_s16(__p0, __p1) __extension__ ({ \
23735 int16x4_t __s0 = __p0; \
23736 int16x4_t __ret; \
23737 __ret = (int16x4_t) __builtin_neon_vshr_n_v((int8x8_t)__s0, __p1, 1); \
23738 __ret; \
23739 })
23740 #else
23741 #define vshr_n_s16(__p0, __p1) __extension__ ({ \
23742 int16x4_t __s0 = __p0; \
23743 int16x4_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \
23744 int16x4_t __ret; \
23745 __ret = (int16x4_t) __builtin_neon_vshr_n_v((int8x8_t)__rev0, __p1, 1); \
23746 __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \
23747 __ret; \
23748 })
23749 #endif
23750
23751 #ifdef __LITTLE_ENDIAN__
23752 #define vshrn_n_u32(__p0, __p1) __extension__ ({ \
23753 uint32x4_t __s0 = __p0; \
23754 uint16x4_t __ret; \
23755 __ret = (uint16x4_t) __builtin_neon_vshrn_n_v((int8x16_t)__s0, __p1, 17); \
23756 __ret; \
23757 })
23758 #else
23759 #define vshrn_n_u32(__p0, __p1) __extension__ ({ \
23760 uint32x4_t __s0 = __p0; \
23761 uint32x4_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \
23762 uint16x4_t __ret; \
23763 __ret = (uint16x4_t) __builtin_neon_vshrn_n_v((int8x16_t)__rev0, __p1, 17); \
23764 __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \
23765 __ret; \
23766 })
23767 #define __noswap_vshrn_n_u32(__p0, __p1) __extension__ ({ \
23768 uint32x4_t __s0 = __p0; \
23769 uint16x4_t __ret; \
23770 __ret = (uint16x4_t) __builtin_neon_vshrn_n_v((int8x16_t)__s0, __p1, 17); \
23771 __ret; \
23772 })
23773 #endif
23774
23775 #ifdef __LITTLE_ENDIAN__
23776 #define vshrn_n_u64(__p0, __p1) __extension__ ({ \
23777 uint64x2_t __s0 = __p0; \
23778 uint32x2_t __ret; \
23779 __ret = (uint32x2_t) __builtin_neon_vshrn_n_v((int8x16_t)__s0, __p1, 18); \
23780 __ret; \
23781 })
23782 #else
23783 #define vshrn_n_u64(__p0, __p1) __extension__ ({ \
23784 uint64x2_t __s0 = __p0; \
23785 uint64x2_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 1, 0); \
23786 uint32x2_t __ret; \
23787 __ret = (uint32x2_t) __builtin_neon_vshrn_n_v((int8x16_t)__rev0, __p1, 18); \
23788 __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \
23789 __ret; \
23790 })
23791 #define __noswap_vshrn_n_u64(__p0, __p1) __extension__ ({ \
23792 uint64x2_t __s0 = __p0; \
23793 uint32x2_t __ret; \
23794 __ret = (uint32x2_t) __builtin_neon_vshrn_n_v((int8x16_t)__s0, __p1, 18); \
23795 __ret; \
23796 })
23797 #endif
23798
23799 #ifdef __LITTLE_ENDIAN__
23800 #define vshrn_n_u16(__p0, __p1) __extension__ ({ \
23801 uint16x8_t __s0 = __p0; \
23802 uint8x8_t __ret; \
23803 __ret = (uint8x8_t) __builtin_neon_vshrn_n_v((int8x16_t)__s0, __p1, 16); \
23804 __ret; \
23805 })
23806 #else
23807 #define vshrn_n_u16(__p0, __p1) __extension__ ({ \
23808 uint16x8_t __s0 = __p0; \
23809 uint16x8_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 7, 6, 5, 4, 3, 2, 1, 0); \
23810 uint8x8_t __ret; \
23811 __ret = (uint8x8_t) __builtin_neon_vshrn_n_v((int8x16_t)__rev0, __p1, 16); \
23812 __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); \
23813 __ret; \
23814 })
23815 #define __noswap_vshrn_n_u16(__p0, __p1) __extension__ ({ \
23816 uint16x8_t __s0 = __p0; \
23817 uint8x8_t __ret; \
23818 __ret = (uint8x8_t) __builtin_neon_vshrn_n_v((int8x16_t)__s0, __p1, 16); \
23819 __ret; \
23820 })
23821 #endif
23822
23823 #ifdef __LITTLE_ENDIAN__
23824 #define vshrn_n_s32(__p0, __p1) __extension__ ({ \
23825 int32x4_t __s0 = __p0; \
23826 int16x4_t __ret; \
23827 __ret = (int16x4_t) __builtin_neon_vshrn_n_v((int8x16_t)__s0, __p1, 1); \
23828 __ret; \
23829 })
23830 #else
23831 #define vshrn_n_s32(__p0, __p1) __extension__ ({ \
23832 int32x4_t __s0 = __p0; \
23833 int32x4_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \
23834 int16x4_t __ret; \
23835 __ret = (int16x4_t) __builtin_neon_vshrn_n_v((int8x16_t)__rev0, __p1, 1); \
23836 __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \
23837 __ret; \
23838 })
23839 #define __noswap_vshrn_n_s32(__p0, __p1) __extension__ ({ \
23840 int32x4_t __s0 = __p0; \
23841 int16x4_t __ret; \
23842 __ret = (int16x4_t) __builtin_neon_vshrn_n_v((int8x16_t)__s0, __p1, 1); \
23843 __ret; \
23844 })
23845 #endif
23846
23847 #ifdef __LITTLE_ENDIAN__
23848 #define vshrn_n_s64(__p0, __p1) __extension__ ({ \
23849 int64x2_t __s0 = __p0; \
23850 int32x2_t __ret; \
23851 __ret = (int32x2_t) __builtin_neon_vshrn_n_v((int8x16_t)__s0, __p1, 2); \
23852 __ret; \
23853 })
23854 #else
23855 #define vshrn_n_s64(__p0, __p1) __extension__ ({ \
23856 int64x2_t __s0 = __p0; \
23857 int64x2_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 1, 0); \
23858 int32x2_t __ret; \
23859 __ret = (int32x2_t) __builtin_neon_vshrn_n_v((int8x16_t)__rev0, __p1, 2); \
23860 __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \
23861 __ret; \
23862 })
23863 #define __noswap_vshrn_n_s64(__p0, __p1) __extension__ ({ \
23864 int64x2_t __s0 = __p0; \
23865 int32x2_t __ret; \
23866 __ret = (int32x2_t) __builtin_neon_vshrn_n_v((int8x16_t)__s0, __p1, 2); \
23867 __ret; \
23868 })
23869 #endif
23870
23871 #ifdef __LITTLE_ENDIAN__
23872 #define vshrn_n_s16(__p0, __p1) __extension__ ({ \
23873 int16x8_t __s0 = __p0; \
23874 int8x8_t __ret; \
23875 __ret = (int8x8_t) __builtin_neon_vshrn_n_v((int8x16_t)__s0, __p1, 0); \
23876 __ret; \
23877 })
23878 #else
23879 #define vshrn_n_s16(__p0, __p1) __extension__ ({ \
23880 int16x8_t __s0 = __p0; \
23881 int16x8_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 7, 6, 5, 4, 3, 2, 1, 0); \
23882 int8x8_t __ret; \
23883 __ret = (int8x8_t) __builtin_neon_vshrn_n_v((int8x16_t)__rev0, __p1, 0); \
23884 __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); \
23885 __ret; \
23886 })
23887 #define __noswap_vshrn_n_s16(__p0, __p1) __extension__ ({ \
23888 int16x8_t __s0 = __p0; \
23889 int8x8_t __ret; \
23890 __ret = (int8x8_t) __builtin_neon_vshrn_n_v((int8x16_t)__s0, __p1, 0); \
23891 __ret; \
23892 })
23893 #endif
23894
23895 #ifdef __LITTLE_ENDIAN__
23896 #define vsli_n_p8(__p0, __p1, __p2) __extension__ ({ \
23897 poly8x8_t __s0 = __p0; \
23898 poly8x8_t __s1 = __p1; \
23899 poly8x8_t __ret; \
23900 __ret = (poly8x8_t) __builtin_neon_vsli_n_v((int8x8_t)__s0, (int8x8_t)__s1, __p2, 4); \
23901 __ret; \
23902 })
23903 #else
23904 #define vsli_n_p8(__p0, __p1, __p2) __extension__ ({ \
23905 poly8x8_t __s0 = __p0; \
23906 poly8x8_t __s1 = __p1; \
23907 poly8x8_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 7, 6, 5, 4, 3, 2, 1, 0); \
23908 poly8x8_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 7, 6, 5, 4, 3, 2, 1, 0); \
23909 poly8x8_t __ret; \
23910 __ret = (poly8x8_t) __builtin_neon_vsli_n_v((int8x8_t)__rev0, (int8x8_t)__rev1, __p2, 4); \
23911 __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); \
23912 __ret; \
23913 })
23914 #endif
23915
23916 #ifdef __LITTLE_ENDIAN__
23917 #define vsli_n_p16(__p0, __p1, __p2) __extension__ ({ \
23918 poly16x4_t __s0 = __p0; \
23919 poly16x4_t __s1 = __p1; \
23920 poly16x4_t __ret; \
23921 __ret = (poly16x4_t) __builtin_neon_vsli_n_v((int8x8_t)__s0, (int8x8_t)__s1, __p2, 5); \
23922 __ret; \
23923 })
23924 #else
23925 #define vsli_n_p16(__p0, __p1, __p2) __extension__ ({ \
23926 poly16x4_t __s0 = __p0; \
23927 poly16x4_t __s1 = __p1; \
23928 poly16x4_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \
23929 poly16x4_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 3, 2, 1, 0); \
23930 poly16x4_t __ret; \
23931 __ret = (poly16x4_t) __builtin_neon_vsli_n_v((int8x8_t)__rev0, (int8x8_t)__rev1, __p2, 5); \
23932 __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \
23933 __ret; \
23934 })
23935 #endif
23936
23937 #ifdef __LITTLE_ENDIAN__
23938 #define vsliq_n_p8(__p0, __p1, __p2) __extension__ ({ \
23939 poly8x16_t __s0 = __p0; \
23940 poly8x16_t __s1 = __p1; \
23941 poly8x16_t __ret; \
23942 __ret = (poly8x16_t) __builtin_neon_vsliq_n_v((int8x16_t)__s0, (int8x16_t)__s1, __p2, 36); \
23943 __ret; \
23944 })
23945 #else
23946 #define vsliq_n_p8(__p0, __p1, __p2) __extension__ ({ \
23947 poly8x16_t __s0 = __p0; \
23948 poly8x16_t __s1 = __p1; \
23949 poly8x16_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
23950 poly8x16_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
23951 poly8x16_t __ret; \
23952 __ret = (poly8x16_t) __builtin_neon_vsliq_n_v((int8x16_t)__rev0, (int8x16_t)__rev1, __p2, 36); \
23953 __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
23954 __ret; \
23955 })
23956 #endif
23957
23958 #ifdef __LITTLE_ENDIAN__
23959 #define vsliq_n_p16(__p0, __p1, __p2) __extension__ ({ \
23960 poly16x8_t __s0 = __p0; \
23961 poly16x8_t __s1 = __p1; \
23962 poly16x8_t __ret; \
23963 __ret = (poly16x8_t) __builtin_neon_vsliq_n_v((int8x16_t)__s0, (int8x16_t)__s1, __p2, 37); \
23964 __ret; \
23965 })
23966 #else
23967 #define vsliq_n_p16(__p0, __p1, __p2) __extension__ ({ \
23968 poly16x8_t __s0 = __p0; \
23969 poly16x8_t __s1 = __p1; \
23970 poly16x8_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 7, 6, 5, 4, 3, 2, 1, 0); \
23971 poly16x8_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 7, 6, 5, 4, 3, 2, 1, 0); \
23972 poly16x8_t __ret; \
23973 __ret = (poly16x8_t) __builtin_neon_vsliq_n_v((int8x16_t)__rev0, (int8x16_t)__rev1, __p2, 37); \
23974 __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); \
23975 __ret; \
23976 })
23977 #endif
23978
23979 #ifdef __LITTLE_ENDIAN__
23980 #define vsliq_n_u8(__p0, __p1, __p2) __extension__ ({ \
23981 uint8x16_t __s0 = __p0; \
23982 uint8x16_t __s1 = __p1; \
23983 uint8x16_t __ret; \
23984 __ret = (uint8x16_t) __builtin_neon_vsliq_n_v((int8x16_t)__s0, (int8x16_t)__s1, __p2, 48); \
23985 __ret; \
23986 })
23987 #else
23988 #define vsliq_n_u8(__p0, __p1, __p2) __extension__ ({ \
23989 uint8x16_t __s0 = __p0; \
23990 uint8x16_t __s1 = __p1; \
23991 uint8x16_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
23992 uint8x16_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
23993 uint8x16_t __ret; \
23994 __ret = (uint8x16_t) __builtin_neon_vsliq_n_v((int8x16_t)__rev0, (int8x16_t)__rev1, __p2, 48); \
23995 __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
23996 __ret; \
23997 })
23998 #endif
23999
24000 #ifdef __LITTLE_ENDIAN__
24001 #define vsliq_n_u32(__p0, __p1, __p2) __extension__ ({ \
24002 uint32x4_t __s0 = __p0; \
24003 uint32x4_t __s1 = __p1; \
24004 uint32x4_t __ret; \
24005 __ret = (uint32x4_t) __builtin_neon_vsliq_n_v((int8x16_t)__s0, (int8x16_t)__s1, __p2, 50); \
24006 __ret; \
24007 })
24008 #else
24009 #define vsliq_n_u32(__p0, __p1, __p2) __extension__ ({ \
24010 uint32x4_t __s0 = __p0; \
24011 uint32x4_t __s1 = __p1; \
24012 uint32x4_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \
24013 uint32x4_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 3, 2, 1, 0); \
24014 uint32x4_t __ret; \
24015 __ret = (uint32x4_t) __builtin_neon_vsliq_n_v((int8x16_t)__rev0, (int8x16_t)__rev1, __p2, 50); \
24016 __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \
24017 __ret; \
24018 })
24019 #endif
24020
24021 #ifdef __LITTLE_ENDIAN__
24022 #define vsliq_n_u64(__p0, __p1, __p2) __extension__ ({ \
24023 uint64x2_t __s0 = __p0; \
24024 uint64x2_t __s1 = __p1; \
24025 uint64x2_t __ret; \
24026 __ret = (uint64x2_t) __builtin_neon_vsliq_n_v((int8x16_t)__s0, (int8x16_t)__s1, __p2, 51); \
24027 __ret; \
24028 })
24029 #else
24030 #define vsliq_n_u64(__p0, __p1, __p2) __extension__ ({ \
24031 uint64x2_t __s0 = __p0; \
24032 uint64x2_t __s1 = __p1; \
24033 uint64x2_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 1, 0); \
24034 uint64x2_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 1, 0); \
24035 uint64x2_t __ret; \
24036 __ret = (uint64x2_t) __builtin_neon_vsliq_n_v((int8x16_t)__rev0, (int8x16_t)__rev1, __p2, 51); \
24037 __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \
24038 __ret; \
24039 })
24040 #endif
24041
24042 #ifdef __LITTLE_ENDIAN__
24043 #define vsliq_n_u16(__p0, __p1, __p2) __extension__ ({ \
24044 uint16x8_t __s0 = __p0; \
24045 uint16x8_t __s1 = __p1; \
24046 uint16x8_t __ret; \
24047 __ret = (uint16x8_t) __builtin_neon_vsliq_n_v((int8x16_t)__s0, (int8x16_t)__s1, __p2, 49); \
24048 __ret; \
24049 })
24050 #else
24051 #define vsliq_n_u16(__p0, __p1, __p2) __extension__ ({ \
24052 uint16x8_t __s0 = __p0; \
24053 uint16x8_t __s1 = __p1; \
24054 uint16x8_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 7, 6, 5, 4, 3, 2, 1, 0); \
24055 uint16x8_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 7, 6, 5, 4, 3, 2, 1, 0); \
24056 uint16x8_t __ret; \
24057 __ret = (uint16x8_t) __builtin_neon_vsliq_n_v((int8x16_t)__rev0, (int8x16_t)__rev1, __p2, 49); \
24058 __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); \
24059 __ret; \
24060 })
24061 #endif
24062
24063 #ifdef __LITTLE_ENDIAN__
24064 #define vsliq_n_s8(__p0, __p1, __p2) __extension__ ({ \
24065 int8x16_t __s0 = __p0; \
24066 int8x16_t __s1 = __p1; \
24067 int8x16_t __ret; \
24068 __ret = (int8x16_t) __builtin_neon_vsliq_n_v((int8x16_t)__s0, (int8x16_t)__s1, __p2, 32); \
24069 __ret; \
24070 })
24071 #else
24072 #define vsliq_n_s8(__p0, __p1, __p2) __extension__ ({ \
24073 int8x16_t __s0 = __p0; \
24074 int8x16_t __s1 = __p1; \
24075 int8x16_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
24076 int8x16_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
24077 int8x16_t __ret; \
24078 __ret = (int8x16_t) __builtin_neon_vsliq_n_v((int8x16_t)__rev0, (int8x16_t)__rev1, __p2, 32); \
24079 __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
24080 __ret; \
24081 })
24082 #endif
24083
24084 #ifdef __LITTLE_ENDIAN__
24085 #define vsliq_n_s32(__p0, __p1, __p2) __extension__ ({ \
24086 int32x4_t __s0 = __p0; \
24087 int32x4_t __s1 = __p1; \
24088 int32x4_t __ret; \
24089 __ret = (int32x4_t) __builtin_neon_vsliq_n_v((int8x16_t)__s0, (int8x16_t)__s1, __p2, 34); \
24090 __ret; \
24091 })
24092 #else
24093 #define vsliq_n_s32(__p0, __p1, __p2) __extension__ ({ \
24094 int32x4_t __s0 = __p0; \
24095 int32x4_t __s1 = __p1; \
24096 int32x4_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \
24097 int32x4_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 3, 2, 1, 0); \
24098 int32x4_t __ret; \
24099 __ret = (int32x4_t) __builtin_neon_vsliq_n_v((int8x16_t)__rev0, (int8x16_t)__rev1, __p2, 34); \
24100 __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \
24101 __ret; \
24102 })
24103 #endif
24104
24105 #ifdef __LITTLE_ENDIAN__
24106 #define vsliq_n_s64(__p0, __p1, __p2) __extension__ ({ \
24107 int64x2_t __s0 = __p0; \
24108 int64x2_t __s1 = __p1; \
24109 int64x2_t __ret; \
24110 __ret = (int64x2_t) __builtin_neon_vsliq_n_v((int8x16_t)__s0, (int8x16_t)__s1, __p2, 35); \
24111 __ret; \
24112 })
24113 #else
24114 #define vsliq_n_s64(__p0, __p1, __p2) __extension__ ({ \
24115 int64x2_t __s0 = __p0; \
24116 int64x2_t __s1 = __p1; \
24117 int64x2_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 1, 0); \
24118 int64x2_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 1, 0); \
24119 int64x2_t __ret; \
24120 __ret = (int64x2_t) __builtin_neon_vsliq_n_v((int8x16_t)__rev0, (int8x16_t)__rev1, __p2, 35); \
24121 __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \
24122 __ret; \
24123 })
24124 #endif
24125
24126 #ifdef __LITTLE_ENDIAN__
24127 #define vsliq_n_s16(__p0, __p1, __p2) __extension__ ({ \
24128 int16x8_t __s0 = __p0; \
24129 int16x8_t __s1 = __p1; \
24130 int16x8_t __ret; \
24131 __ret = (int16x8_t) __builtin_neon_vsliq_n_v((int8x16_t)__s0, (int8x16_t)__s1, __p2, 33); \
24132 __ret; \
24133 })
24134 #else
24135 #define vsliq_n_s16(__p0, __p1, __p2) __extension__ ({ \
24136 int16x8_t __s0 = __p0; \
24137 int16x8_t __s1 = __p1; \
24138 int16x8_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 7, 6, 5, 4, 3, 2, 1, 0); \
24139 int16x8_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 7, 6, 5, 4, 3, 2, 1, 0); \
24140 int16x8_t __ret; \
24141 __ret = (int16x8_t) __builtin_neon_vsliq_n_v((int8x16_t)__rev0, (int8x16_t)__rev1, __p2, 33); \
24142 __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); \
24143 __ret; \
24144 })
24145 #endif
24146
24147 #ifdef __LITTLE_ENDIAN__
24148 #define vsli_n_u8(__p0, __p1, __p2) __extension__ ({ \
24149 uint8x8_t __s0 = __p0; \
24150 uint8x8_t __s1 = __p1; \
24151 uint8x8_t __ret; \
24152 __ret = (uint8x8_t) __builtin_neon_vsli_n_v((int8x8_t)__s0, (int8x8_t)__s1, __p2, 16); \
24153 __ret; \
24154 })
24155 #else
24156 #define vsli_n_u8(__p0, __p1, __p2) __extension__ ({ \
24157 uint8x8_t __s0 = __p0; \
24158 uint8x8_t __s1 = __p1; \
24159 uint8x8_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 7, 6, 5, 4, 3, 2, 1, 0); \
24160 uint8x8_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 7, 6, 5, 4, 3, 2, 1, 0); \
24161 uint8x8_t __ret; \
24162 __ret = (uint8x8_t) __builtin_neon_vsli_n_v((int8x8_t)__rev0, (int8x8_t)__rev1, __p2, 16); \
24163 __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); \
24164 __ret; \
24165 })
24166 #endif
24167
24168 #ifdef __LITTLE_ENDIAN__
24169 #define vsli_n_u32(__p0, __p1, __p2) __extension__ ({ \
24170 uint32x2_t __s0 = __p0; \
24171 uint32x2_t __s1 = __p1; \
24172 uint32x2_t __ret; \
24173 __ret = (uint32x2_t) __builtin_neon_vsli_n_v((int8x8_t)__s0, (int8x8_t)__s1, __p2, 18); \
24174 __ret; \
24175 })
24176 #else
24177 #define vsli_n_u32(__p0, __p1, __p2) __extension__ ({ \
24178 uint32x2_t __s0 = __p0; \
24179 uint32x2_t __s1 = __p1; \
24180 uint32x2_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 1, 0); \
24181 uint32x2_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 1, 0); \
24182 uint32x2_t __ret; \
24183 __ret = (uint32x2_t) __builtin_neon_vsli_n_v((int8x8_t)__rev0, (int8x8_t)__rev1, __p2, 18); \
24184 __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \
24185 __ret; \
24186 })
24187 #endif
24188
24189 #ifdef __LITTLE_ENDIAN__
24190 #define vsli_n_u64(__p0, __p1, __p2) __extension__ ({ \
24191 uint64x1_t __s0 = __p0; \
24192 uint64x1_t __s1 = __p1; \
24193 uint64x1_t __ret; \
24194 __ret = (uint64x1_t) __builtin_neon_vsli_n_v((int8x8_t)__s0, (int8x8_t)__s1, __p2, 19); \
24195 __ret; \
24196 })
24197 #else
24198 #define vsli_n_u64(__p0, __p1, __p2) __extension__ ({ \
24199 uint64x1_t __s0 = __p0; \
24200 uint64x1_t __s1 = __p1; \
24201 uint64x1_t __ret; \
24202 __ret = (uint64x1_t) __builtin_neon_vsli_n_v((int8x8_t)__s0, (int8x8_t)__s1, __p2, 19); \
24203 __ret; \
24204 })
24205 #endif
24206
24207 #ifdef __LITTLE_ENDIAN__
24208 #define vsli_n_u16(__p0, __p1, __p2) __extension__ ({ \
24209 uint16x4_t __s0 = __p0; \
24210 uint16x4_t __s1 = __p1; \
24211 uint16x4_t __ret; \
24212 __ret = (uint16x4_t) __builtin_neon_vsli_n_v((int8x8_t)__s0, (int8x8_t)__s1, __p2, 17); \
24213 __ret; \
24214 })
24215 #else
24216 #define vsli_n_u16(__p0, __p1, __p2) __extension__ ({ \
24217 uint16x4_t __s0 = __p0; \
24218 uint16x4_t __s1 = __p1; \
24219 uint16x4_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \
24220 uint16x4_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 3, 2, 1, 0); \
24221 uint16x4_t __ret; \
24222 __ret = (uint16x4_t) __builtin_neon_vsli_n_v((int8x8_t)__rev0, (int8x8_t)__rev1, __p2, 17); \
24223 __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \
24224 __ret; \
24225 })
24226 #endif
24227
24228 #ifdef __LITTLE_ENDIAN__
24229 #define vsli_n_s8(__p0, __p1, __p2) __extension__ ({ \
24230 int8x8_t __s0 = __p0; \
24231 int8x8_t __s1 = __p1; \
24232 int8x8_t __ret; \
24233 __ret = (int8x8_t) __builtin_neon_vsli_n_v((int8x8_t)__s0, (int8x8_t)__s1, __p2, 0); \
24234 __ret; \
24235 })
24236 #else
24237 #define vsli_n_s8(__p0, __p1, __p2) __extension__ ({ \
24238 int8x8_t __s0 = __p0; \
24239 int8x8_t __s1 = __p1; \
24240 int8x8_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 7, 6, 5, 4, 3, 2, 1, 0); \
24241 int8x8_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 7, 6, 5, 4, 3, 2, 1, 0); \
24242 int8x8_t __ret; \
24243 __ret = (int8x8_t) __builtin_neon_vsli_n_v((int8x8_t)__rev0, (int8x8_t)__rev1, __p2, 0); \
24244 __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); \
24245 __ret; \
24246 })
24247 #endif
24248
24249 #ifdef __LITTLE_ENDIAN__
24250 #define vsli_n_s32(__p0, __p1, __p2) __extension__ ({ \
24251 int32x2_t __s0 = __p0; \
24252 int32x2_t __s1 = __p1; \
24253 int32x2_t __ret; \
24254 __ret = (int32x2_t) __builtin_neon_vsli_n_v((int8x8_t)__s0, (int8x8_t)__s1, __p2, 2); \
24255 __ret; \
24256 })
24257 #else
24258 #define vsli_n_s32(__p0, __p1, __p2) __extension__ ({ \
24259 int32x2_t __s0 = __p0; \
24260 int32x2_t __s1 = __p1; \
24261 int32x2_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 1, 0); \
24262 int32x2_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 1, 0); \
24263 int32x2_t __ret; \
24264 __ret = (int32x2_t) __builtin_neon_vsli_n_v((int8x8_t)__rev0, (int8x8_t)__rev1, __p2, 2); \
24265 __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \
24266 __ret; \
24267 })
24268 #endif
24269
24270 #ifdef __LITTLE_ENDIAN__
24271 #define vsli_n_s64(__p0, __p1, __p2) __extension__ ({ \
24272 int64x1_t __s0 = __p0; \
24273 int64x1_t __s1 = __p1; \
24274 int64x1_t __ret; \
24275 __ret = (int64x1_t) __builtin_neon_vsli_n_v((int8x8_t)__s0, (int8x8_t)__s1, __p2, 3); \
24276 __ret; \
24277 })
24278 #else
24279 #define vsli_n_s64(__p0, __p1, __p2) __extension__ ({ \
24280 int64x1_t __s0 = __p0; \
24281 int64x1_t __s1 = __p1; \
24282 int64x1_t __ret; \
24283 __ret = (int64x1_t) __builtin_neon_vsli_n_v((int8x8_t)__s0, (int8x8_t)__s1, __p2, 3); \
24284 __ret; \
24285 })
24286 #endif
24287
24288 #ifdef __LITTLE_ENDIAN__
24289 #define vsli_n_s16(__p0, __p1, __p2) __extension__ ({ \
24290 int16x4_t __s0 = __p0; \
24291 int16x4_t __s1 = __p1; \
24292 int16x4_t __ret; \
24293 __ret = (int16x4_t) __builtin_neon_vsli_n_v((int8x8_t)__s0, (int8x8_t)__s1, __p2, 1); \
24294 __ret; \
24295 })
24296 #else
24297 #define vsli_n_s16(__p0, __p1, __p2) __extension__ ({ \
24298 int16x4_t __s0 = __p0; \
24299 int16x4_t __s1 = __p1; \
24300 int16x4_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \
24301 int16x4_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 3, 2, 1, 0); \
24302 int16x4_t __ret; \
24303 __ret = (int16x4_t) __builtin_neon_vsli_n_v((int8x8_t)__rev0, (int8x8_t)__rev1, __p2, 1); \
24304 __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \
24305 __ret; \
24306 })
24307 #endif
24308
24309 #ifdef __LITTLE_ENDIAN__
24310 #define vsraq_n_u8(__p0, __p1, __p2) __extension__ ({ \
24311 uint8x16_t __s0 = __p0; \
24312 uint8x16_t __s1 = __p1; \
24313 uint8x16_t __ret; \
24314 __ret = (uint8x16_t) __builtin_neon_vsraq_n_v((int8x16_t)__s0, (int8x16_t)__s1, __p2, 48); \
24315 __ret; \
24316 })
24317 #else
24318 #define vsraq_n_u8(__p0, __p1, __p2) __extension__ ({ \
24319 uint8x16_t __s0 = __p0; \
24320 uint8x16_t __s1 = __p1; \
24321 uint8x16_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
24322 uint8x16_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
24323 uint8x16_t __ret; \
24324 __ret = (uint8x16_t) __builtin_neon_vsraq_n_v((int8x16_t)__rev0, (int8x16_t)__rev1, __p2, 48); \
24325 __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
24326 __ret; \
24327 })
24328 #endif
24329
24330 #ifdef __LITTLE_ENDIAN__
24331 #define vsraq_n_u32(__p0, __p1, __p2) __extension__ ({ \
24332 uint32x4_t __s0 = __p0; \
24333 uint32x4_t __s1 = __p1; \
24334 uint32x4_t __ret; \
24335 __ret = (uint32x4_t) __builtin_neon_vsraq_n_v((int8x16_t)__s0, (int8x16_t)__s1, __p2, 50); \
24336 __ret; \
24337 })
24338 #else
24339 #define vsraq_n_u32(__p0, __p1, __p2) __extension__ ({ \
24340 uint32x4_t __s0 = __p0; \
24341 uint32x4_t __s1 = __p1; \
24342 uint32x4_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \
24343 uint32x4_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 3, 2, 1, 0); \
24344 uint32x4_t __ret; \
24345 __ret = (uint32x4_t) __builtin_neon_vsraq_n_v((int8x16_t)__rev0, (int8x16_t)__rev1, __p2, 50); \
24346 __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \
24347 __ret; \
24348 })
24349 #endif
24350
24351 #ifdef __LITTLE_ENDIAN__
24352 #define vsraq_n_u64(__p0, __p1, __p2) __extension__ ({ \
24353 uint64x2_t __s0 = __p0; \
24354 uint64x2_t __s1 = __p1; \
24355 uint64x2_t __ret; \
24356 __ret = (uint64x2_t) __builtin_neon_vsraq_n_v((int8x16_t)__s0, (int8x16_t)__s1, __p2, 51); \
24357 __ret; \
24358 })
24359 #else
24360 #define vsraq_n_u64(__p0, __p1, __p2) __extension__ ({ \
24361 uint64x2_t __s0 = __p0; \
24362 uint64x2_t __s1 = __p1; \
24363 uint64x2_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 1, 0); \
24364 uint64x2_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 1, 0); \
24365 uint64x2_t __ret; \
24366 __ret = (uint64x2_t) __builtin_neon_vsraq_n_v((int8x16_t)__rev0, (int8x16_t)__rev1, __p2, 51); \
24367 __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \
24368 __ret; \
24369 })
24370 #endif
24371
24372 #ifdef __LITTLE_ENDIAN__
24373 #define vsraq_n_u16(__p0, __p1, __p2) __extension__ ({ \
24374 uint16x8_t __s0 = __p0; \
24375 uint16x8_t __s1 = __p1; \
24376 uint16x8_t __ret; \
24377 __ret = (uint16x8_t) __builtin_neon_vsraq_n_v((int8x16_t)__s0, (int8x16_t)__s1, __p2, 49); \
24378 __ret; \
24379 })
24380 #else
24381 #define vsraq_n_u16(__p0, __p1, __p2) __extension__ ({ \
24382 uint16x8_t __s0 = __p0; \
24383 uint16x8_t __s1 = __p1; \
24384 uint16x8_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 7, 6, 5, 4, 3, 2, 1, 0); \
24385 uint16x8_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 7, 6, 5, 4, 3, 2, 1, 0); \
24386 uint16x8_t __ret; \
24387 __ret = (uint16x8_t) __builtin_neon_vsraq_n_v((int8x16_t)__rev0, (int8x16_t)__rev1, __p2, 49); \
24388 __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); \
24389 __ret; \
24390 })
24391 #endif
24392
24393 #ifdef __LITTLE_ENDIAN__
24394 #define vsraq_n_s8(__p0, __p1, __p2) __extension__ ({ \
24395 int8x16_t __s0 = __p0; \
24396 int8x16_t __s1 = __p1; \
24397 int8x16_t __ret; \
24398 __ret = (int8x16_t) __builtin_neon_vsraq_n_v((int8x16_t)__s0, (int8x16_t)__s1, __p2, 32); \
24399 __ret; \
24400 })
24401 #else
24402 #define vsraq_n_s8(__p0, __p1, __p2) __extension__ ({ \
24403 int8x16_t __s0 = __p0; \
24404 int8x16_t __s1 = __p1; \
24405 int8x16_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
24406 int8x16_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
24407 int8x16_t __ret; \
24408 __ret = (int8x16_t) __builtin_neon_vsraq_n_v((int8x16_t)__rev0, (int8x16_t)__rev1, __p2, 32); \
24409 __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
24410 __ret; \
24411 })
24412 #endif
24413
24414 #ifdef __LITTLE_ENDIAN__
24415 #define vsraq_n_s32(__p0, __p1, __p2) __extension__ ({ \
24416 int32x4_t __s0 = __p0; \
24417 int32x4_t __s1 = __p1; \
24418 int32x4_t __ret; \
24419 __ret = (int32x4_t) __builtin_neon_vsraq_n_v((int8x16_t)__s0, (int8x16_t)__s1, __p2, 34); \
24420 __ret; \
24421 })
24422 #else
24423 #define vsraq_n_s32(__p0, __p1, __p2) __extension__ ({ \
24424 int32x4_t __s0 = __p0; \
24425 int32x4_t __s1 = __p1; \
24426 int32x4_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \
24427 int32x4_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 3, 2, 1, 0); \
24428 int32x4_t __ret; \
24429 __ret = (int32x4_t) __builtin_neon_vsraq_n_v((int8x16_t)__rev0, (int8x16_t)__rev1, __p2, 34); \
24430 __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \
24431 __ret; \
24432 })
24433 #endif
24434
24435 #ifdef __LITTLE_ENDIAN__
24436 #define vsraq_n_s64(__p0, __p1, __p2) __extension__ ({ \
24437 int64x2_t __s0 = __p0; \
24438 int64x2_t __s1 = __p1; \
24439 int64x2_t __ret; \
24440 __ret = (int64x2_t) __builtin_neon_vsraq_n_v((int8x16_t)__s0, (int8x16_t)__s1, __p2, 35); \
24441 __ret; \
24442 })
24443 #else
24444 #define vsraq_n_s64(__p0, __p1, __p2) __extension__ ({ \
24445 int64x2_t __s0 = __p0; \
24446 int64x2_t __s1 = __p1; \
24447 int64x2_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 1, 0); \
24448 int64x2_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 1, 0); \
24449 int64x2_t __ret; \
24450 __ret = (int64x2_t) __builtin_neon_vsraq_n_v((int8x16_t)__rev0, (int8x16_t)__rev1, __p2, 35); \
24451 __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \
24452 __ret; \
24453 })
24454 #endif
24455
24456 #ifdef __LITTLE_ENDIAN__
24457 #define vsraq_n_s16(__p0, __p1, __p2) __extension__ ({ \
24458 int16x8_t __s0 = __p0; \
24459 int16x8_t __s1 = __p1; \
24460 int16x8_t __ret; \
24461 __ret = (int16x8_t) __builtin_neon_vsraq_n_v((int8x16_t)__s0, (int8x16_t)__s1, __p2, 33); \
24462 __ret; \
24463 })
24464 #else
24465 #define vsraq_n_s16(__p0, __p1, __p2) __extension__ ({ \
24466 int16x8_t __s0 = __p0; \
24467 int16x8_t __s1 = __p1; \
24468 int16x8_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 7, 6, 5, 4, 3, 2, 1, 0); \
24469 int16x8_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 7, 6, 5, 4, 3, 2, 1, 0); \
24470 int16x8_t __ret; \
24471 __ret = (int16x8_t) __builtin_neon_vsraq_n_v((int8x16_t)__rev0, (int8x16_t)__rev1, __p2, 33); \
24472 __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); \
24473 __ret; \
24474 })
24475 #endif
24476
24477 #ifdef __LITTLE_ENDIAN__
24478 #define vsra_n_u8(__p0, __p1, __p2) __extension__ ({ \
24479 uint8x8_t __s0 = __p0; \
24480 uint8x8_t __s1 = __p1; \
24481 uint8x8_t __ret; \
24482 __ret = (uint8x8_t) __builtin_neon_vsra_n_v((int8x8_t)__s0, (int8x8_t)__s1, __p2, 16); \
24483 __ret; \
24484 })
24485 #else
24486 #define vsra_n_u8(__p0, __p1, __p2) __extension__ ({ \
24487 uint8x8_t __s0 = __p0; \
24488 uint8x8_t __s1 = __p1; \
24489 uint8x8_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 7, 6, 5, 4, 3, 2, 1, 0); \
24490 uint8x8_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 7, 6, 5, 4, 3, 2, 1, 0); \
24491 uint8x8_t __ret; \
24492 __ret = (uint8x8_t) __builtin_neon_vsra_n_v((int8x8_t)__rev0, (int8x8_t)__rev1, __p2, 16); \
24493 __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); \
24494 __ret; \
24495 })
24496 #endif
24497
24498 #ifdef __LITTLE_ENDIAN__
24499 #define vsra_n_u32(__p0, __p1, __p2) __extension__ ({ \
24500 uint32x2_t __s0 = __p0; \
24501 uint32x2_t __s1 = __p1; \
24502 uint32x2_t __ret; \
24503 __ret = (uint32x2_t) __builtin_neon_vsra_n_v((int8x8_t)__s0, (int8x8_t)__s1, __p2, 18); \
24504 __ret; \
24505 })
24506 #else
24507 #define vsra_n_u32(__p0, __p1, __p2) __extension__ ({ \
24508 uint32x2_t __s0 = __p0; \
24509 uint32x2_t __s1 = __p1; \
24510 uint32x2_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 1, 0); \
24511 uint32x2_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 1, 0); \
24512 uint32x2_t __ret; \
24513 __ret = (uint32x2_t) __builtin_neon_vsra_n_v((int8x8_t)__rev0, (int8x8_t)__rev1, __p2, 18); \
24514 __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \
24515 __ret; \
24516 })
24517 #endif
24518
24519 #ifdef __LITTLE_ENDIAN__
24520 #define vsra_n_u64(__p0, __p1, __p2) __extension__ ({ \
24521 uint64x1_t __s0 = __p0; \
24522 uint64x1_t __s1 = __p1; \
24523 uint64x1_t __ret; \
24524 __ret = (uint64x1_t) __builtin_neon_vsra_n_v((int8x8_t)__s0, (int8x8_t)__s1, __p2, 19); \
24525 __ret; \
24526 })
24527 #else
24528 #define vsra_n_u64(__p0, __p1, __p2) __extension__ ({ \
24529 uint64x1_t __s0 = __p0; \
24530 uint64x1_t __s1 = __p1; \
24531 uint64x1_t __ret; \
24532 __ret = (uint64x1_t) __builtin_neon_vsra_n_v((int8x8_t)__s0, (int8x8_t)__s1, __p2, 19); \
24533 __ret; \
24534 })
24535 #endif
24536
24537 #ifdef __LITTLE_ENDIAN__
24538 #define vsra_n_u16(__p0, __p1, __p2) __extension__ ({ \
24539 uint16x4_t __s0 = __p0; \
24540 uint16x4_t __s1 = __p1; \
24541 uint16x4_t __ret; \
24542 __ret = (uint16x4_t) __builtin_neon_vsra_n_v((int8x8_t)__s0, (int8x8_t)__s1, __p2, 17); \
24543 __ret; \
24544 })
24545 #else
24546 #define vsra_n_u16(__p0, __p1, __p2) __extension__ ({ \
24547 uint16x4_t __s0 = __p0; \
24548 uint16x4_t __s1 = __p1; \
24549 uint16x4_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \
24550 uint16x4_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 3, 2, 1, 0); \
24551 uint16x4_t __ret; \
24552 __ret = (uint16x4_t) __builtin_neon_vsra_n_v((int8x8_t)__rev0, (int8x8_t)__rev1, __p2, 17); \
24553 __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \
24554 __ret; \
24555 })
24556 #endif
24557
24558 #ifdef __LITTLE_ENDIAN__
24559 #define vsra_n_s8(__p0, __p1, __p2) __extension__ ({ \
24560 int8x8_t __s0 = __p0; \
24561 int8x8_t __s1 = __p1; \
24562 int8x8_t __ret; \
24563 __ret = (int8x8_t) __builtin_neon_vsra_n_v((int8x8_t)__s0, (int8x8_t)__s1, __p2, 0); \
24564 __ret; \
24565 })
24566 #else
24567 #define vsra_n_s8(__p0, __p1, __p2) __extension__ ({ \
24568 int8x8_t __s0 = __p0; \
24569 int8x8_t __s1 = __p1; \
24570 int8x8_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 7, 6, 5, 4, 3, 2, 1, 0); \
24571 int8x8_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 7, 6, 5, 4, 3, 2, 1, 0); \
24572 int8x8_t __ret; \
24573 __ret = (int8x8_t) __builtin_neon_vsra_n_v((int8x8_t)__rev0, (int8x8_t)__rev1, __p2, 0); \
24574 __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); \
24575 __ret; \
24576 })
24577 #endif
24578
24579 #ifdef __LITTLE_ENDIAN__
24580 #define vsra_n_s32(__p0, __p1, __p2) __extension__ ({ \
24581 int32x2_t __s0 = __p0; \
24582 int32x2_t __s1 = __p1; \
24583 int32x2_t __ret; \
24584 __ret = (int32x2_t) __builtin_neon_vsra_n_v((int8x8_t)__s0, (int8x8_t)__s1, __p2, 2); \
24585 __ret; \
24586 })
24587 #else
24588 #define vsra_n_s32(__p0, __p1, __p2) __extension__ ({ \
24589 int32x2_t __s0 = __p0; \
24590 int32x2_t __s1 = __p1; \
24591 int32x2_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 1, 0); \
24592 int32x2_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 1, 0); \
24593 int32x2_t __ret; \
24594 __ret = (int32x2_t) __builtin_neon_vsra_n_v((int8x8_t)__rev0, (int8x8_t)__rev1, __p2, 2); \
24595 __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \
24596 __ret; \
24597 })
24598 #endif
24599
24600 #ifdef __LITTLE_ENDIAN__
24601 #define vsra_n_s64(__p0, __p1, __p2) __extension__ ({ \
24602 int64x1_t __s0 = __p0; \
24603 int64x1_t __s1 = __p1; \
24604 int64x1_t __ret; \
24605 __ret = (int64x1_t) __builtin_neon_vsra_n_v((int8x8_t)__s0, (int8x8_t)__s1, __p2, 3); \
24606 __ret; \
24607 })
24608 #else
24609 #define vsra_n_s64(__p0, __p1, __p2) __extension__ ({ \
24610 int64x1_t __s0 = __p0; \
24611 int64x1_t __s1 = __p1; \
24612 int64x1_t __ret; \
24613 __ret = (int64x1_t) __builtin_neon_vsra_n_v((int8x8_t)__s0, (int8x8_t)__s1, __p2, 3); \
24614 __ret; \
24615 })
24616 #endif
24617
24618 #ifdef __LITTLE_ENDIAN__
24619 #define vsra_n_s16(__p0, __p1, __p2) __extension__ ({ \
24620 int16x4_t __s0 = __p0; \
24621 int16x4_t __s1 = __p1; \
24622 int16x4_t __ret; \
24623 __ret = (int16x4_t) __builtin_neon_vsra_n_v((int8x8_t)__s0, (int8x8_t)__s1, __p2, 1); \
24624 __ret; \
24625 })
24626 #else
24627 #define vsra_n_s16(__p0, __p1, __p2) __extension__ ({ \
24628 int16x4_t __s0 = __p0; \
24629 int16x4_t __s1 = __p1; \
24630 int16x4_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \
24631 int16x4_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 3, 2, 1, 0); \
24632 int16x4_t __ret; \
24633 __ret = (int16x4_t) __builtin_neon_vsra_n_v((int8x8_t)__rev0, (int8x8_t)__rev1, __p2, 1); \
24634 __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \
24635 __ret; \
24636 })
24637 #endif
24638
24639 #ifdef __LITTLE_ENDIAN__
24640 #define vsri_n_p8(__p0, __p1, __p2) __extension__ ({ \
24641 poly8x8_t __s0 = __p0; \
24642 poly8x8_t __s1 = __p1; \
24643 poly8x8_t __ret; \
24644 __ret = (poly8x8_t) __builtin_neon_vsri_n_v((int8x8_t)__s0, (int8x8_t)__s1, __p2, 4); \
24645 __ret; \
24646 })
24647 #else
24648 #define vsri_n_p8(__p0, __p1, __p2) __extension__ ({ \
24649 poly8x8_t __s0 = __p0; \
24650 poly8x8_t __s1 = __p1; \
24651 poly8x8_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 7, 6, 5, 4, 3, 2, 1, 0); \
24652 poly8x8_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 7, 6, 5, 4, 3, 2, 1, 0); \
24653 poly8x8_t __ret; \
24654 __ret = (poly8x8_t) __builtin_neon_vsri_n_v((int8x8_t)__rev0, (int8x8_t)__rev1, __p2, 4); \
24655 __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); \
24656 __ret; \
24657 })
24658 #endif
24659
24660 #ifdef __LITTLE_ENDIAN__
24661 #define vsri_n_p16(__p0, __p1, __p2) __extension__ ({ \
24662 poly16x4_t __s0 = __p0; \
24663 poly16x4_t __s1 = __p1; \
24664 poly16x4_t __ret; \
24665 __ret = (poly16x4_t) __builtin_neon_vsri_n_v((int8x8_t)__s0, (int8x8_t)__s1, __p2, 5); \
24666 __ret; \
24667 })
24668 #else
24669 #define vsri_n_p16(__p0, __p1, __p2) __extension__ ({ \
24670 poly16x4_t __s0 = __p0; \
24671 poly16x4_t __s1 = __p1; \
24672 poly16x4_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \
24673 poly16x4_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 3, 2, 1, 0); \
24674 poly16x4_t __ret; \
24675 __ret = (poly16x4_t) __builtin_neon_vsri_n_v((int8x8_t)__rev0, (int8x8_t)__rev1, __p2, 5); \
24676 __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \
24677 __ret; \
24678 })
24679 #endif
24680
24681 #ifdef __LITTLE_ENDIAN__
24682 #define vsriq_n_p8(__p0, __p1, __p2) __extension__ ({ \
24683 poly8x16_t __s0 = __p0; \
24684 poly8x16_t __s1 = __p1; \
24685 poly8x16_t __ret; \
24686 __ret = (poly8x16_t) __builtin_neon_vsriq_n_v((int8x16_t)__s0, (int8x16_t)__s1, __p2, 36); \
24687 __ret; \
24688 })
24689 #else
24690 #define vsriq_n_p8(__p0, __p1, __p2) __extension__ ({ \
24691 poly8x16_t __s0 = __p0; \
24692 poly8x16_t __s1 = __p1; \
24693 poly8x16_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
24694 poly8x16_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
24695 poly8x16_t __ret; \
24696 __ret = (poly8x16_t) __builtin_neon_vsriq_n_v((int8x16_t)__rev0, (int8x16_t)__rev1, __p2, 36); \
24697 __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
24698 __ret; \
24699 })
24700 #endif
24701
24702 #ifdef __LITTLE_ENDIAN__
24703 #define vsriq_n_p16(__p0, __p1, __p2) __extension__ ({ \
24704 poly16x8_t __s0 = __p0; \
24705 poly16x8_t __s1 = __p1; \
24706 poly16x8_t __ret; \
24707 __ret = (poly16x8_t) __builtin_neon_vsriq_n_v((int8x16_t)__s0, (int8x16_t)__s1, __p2, 37); \
24708 __ret; \
24709 })
24710 #else
24711 #define vsriq_n_p16(__p0, __p1, __p2) __extension__ ({ \
24712 poly16x8_t __s0 = __p0; \
24713 poly16x8_t __s1 = __p1; \
24714 poly16x8_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 7, 6, 5, 4, 3, 2, 1, 0); \
24715 poly16x8_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 7, 6, 5, 4, 3, 2, 1, 0); \
24716 poly16x8_t __ret; \
24717 __ret = (poly16x8_t) __builtin_neon_vsriq_n_v((int8x16_t)__rev0, (int8x16_t)__rev1, __p2, 37); \
24718 __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); \
24719 __ret; \
24720 })
24721 #endif
24722
24723 #ifdef __LITTLE_ENDIAN__
24724 #define vsriq_n_u8(__p0, __p1, __p2) __extension__ ({ \
24725 uint8x16_t __s0 = __p0; \
24726 uint8x16_t __s1 = __p1; \
24727 uint8x16_t __ret; \
24728 __ret = (uint8x16_t) __builtin_neon_vsriq_n_v((int8x16_t)__s0, (int8x16_t)__s1, __p2, 48); \
24729 __ret; \
24730 })
24731 #else
24732 #define vsriq_n_u8(__p0, __p1, __p2) __extension__ ({ \
24733 uint8x16_t __s0 = __p0; \
24734 uint8x16_t __s1 = __p1; \
24735 uint8x16_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
24736 uint8x16_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
24737 uint8x16_t __ret; \
24738 __ret = (uint8x16_t) __builtin_neon_vsriq_n_v((int8x16_t)__rev0, (int8x16_t)__rev1, __p2, 48); \
24739 __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
24740 __ret; \
24741 })
24742 #endif
24743
24744 #ifdef __LITTLE_ENDIAN__
24745 #define vsriq_n_u32(__p0, __p1, __p2) __extension__ ({ \
24746 uint32x4_t __s0 = __p0; \
24747 uint32x4_t __s1 = __p1; \
24748 uint32x4_t __ret; \
24749 __ret = (uint32x4_t) __builtin_neon_vsriq_n_v((int8x16_t)__s0, (int8x16_t)__s1, __p2, 50); \
24750 __ret; \
24751 })
24752 #else
24753 #define vsriq_n_u32(__p0, __p1, __p2) __extension__ ({ \
24754 uint32x4_t __s0 = __p0; \
24755 uint32x4_t __s1 = __p1; \
24756 uint32x4_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \
24757 uint32x4_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 3, 2, 1, 0); \
24758 uint32x4_t __ret; \
24759 __ret = (uint32x4_t) __builtin_neon_vsriq_n_v((int8x16_t)__rev0, (int8x16_t)__rev1, __p2, 50); \
24760 __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \
24761 __ret; \
24762 })
24763 #endif
24764
24765 #ifdef __LITTLE_ENDIAN__
24766 #define vsriq_n_u64(__p0, __p1, __p2) __extension__ ({ \
24767 uint64x2_t __s0 = __p0; \
24768 uint64x2_t __s1 = __p1; \
24769 uint64x2_t __ret; \
24770 __ret = (uint64x2_t) __builtin_neon_vsriq_n_v((int8x16_t)__s0, (int8x16_t)__s1, __p2, 51); \
24771 __ret; \
24772 })
24773 #else
24774 #define vsriq_n_u64(__p0, __p1, __p2) __extension__ ({ \
24775 uint64x2_t __s0 = __p0; \
24776 uint64x2_t __s1 = __p1; \
24777 uint64x2_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 1, 0); \
24778 uint64x2_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 1, 0); \
24779 uint64x2_t __ret; \
24780 __ret = (uint64x2_t) __builtin_neon_vsriq_n_v((int8x16_t)__rev0, (int8x16_t)__rev1, __p2, 51); \
24781 __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \
24782 __ret; \
24783 })
24784 #endif
24785
24786 #ifdef __LITTLE_ENDIAN__
24787 #define vsriq_n_u16(__p0, __p1, __p2) __extension__ ({ \
24788 uint16x8_t __s0 = __p0; \
24789 uint16x8_t __s1 = __p1; \
24790 uint16x8_t __ret; \
24791 __ret = (uint16x8_t) __builtin_neon_vsriq_n_v((int8x16_t)__s0, (int8x16_t)__s1, __p2, 49); \
24792 __ret; \
24793 })
24794 #else
24795 #define vsriq_n_u16(__p0, __p1, __p2) __extension__ ({ \
24796 uint16x8_t __s0 = __p0; \
24797 uint16x8_t __s1 = __p1; \
24798 uint16x8_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 7, 6, 5, 4, 3, 2, 1, 0); \
24799 uint16x8_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 7, 6, 5, 4, 3, 2, 1, 0); \
24800 uint16x8_t __ret; \
24801 __ret = (uint16x8_t) __builtin_neon_vsriq_n_v((int8x16_t)__rev0, (int8x16_t)__rev1, __p2, 49); \
24802 __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); \
24803 __ret; \
24804 })
24805 #endif
24806
24807 #ifdef __LITTLE_ENDIAN__
24808 #define vsriq_n_s8(__p0, __p1, __p2) __extension__ ({ \
24809 int8x16_t __s0 = __p0; \
24810 int8x16_t __s1 = __p1; \
24811 int8x16_t __ret; \
24812 __ret = (int8x16_t) __builtin_neon_vsriq_n_v((int8x16_t)__s0, (int8x16_t)__s1, __p2, 32); \
24813 __ret; \
24814 })
24815 #else
24816 #define vsriq_n_s8(__p0, __p1, __p2) __extension__ ({ \
24817 int8x16_t __s0 = __p0; \
24818 int8x16_t __s1 = __p1; \
24819 int8x16_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
24820 int8x16_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
24821 int8x16_t __ret; \
24822 __ret = (int8x16_t) __builtin_neon_vsriq_n_v((int8x16_t)__rev0, (int8x16_t)__rev1, __p2, 32); \
24823 __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
24824 __ret; \
24825 })
24826 #endif
24827
24828 #ifdef __LITTLE_ENDIAN__
24829 #define vsriq_n_s32(__p0, __p1, __p2) __extension__ ({ \
24830 int32x4_t __s0 = __p0; \
24831 int32x4_t __s1 = __p1; \
24832 int32x4_t __ret; \
24833 __ret = (int32x4_t) __builtin_neon_vsriq_n_v((int8x16_t)__s0, (int8x16_t)__s1, __p2, 34); \
24834 __ret; \
24835 })
24836 #else
24837 #define vsriq_n_s32(__p0, __p1, __p2) __extension__ ({ \
24838 int32x4_t __s0 = __p0; \
24839 int32x4_t __s1 = __p1; \
24840 int32x4_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \
24841 int32x4_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 3, 2, 1, 0); \
24842 int32x4_t __ret; \
24843 __ret = (int32x4_t) __builtin_neon_vsriq_n_v((int8x16_t)__rev0, (int8x16_t)__rev1, __p2, 34); \
24844 __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \
24845 __ret; \
24846 })
24847 #endif
24848
24849 #ifdef __LITTLE_ENDIAN__
24850 #define vsriq_n_s64(__p0, __p1, __p2) __extension__ ({ \
24851 int64x2_t __s0 = __p0; \
24852 int64x2_t __s1 = __p1; \
24853 int64x2_t __ret; \
24854 __ret = (int64x2_t) __builtin_neon_vsriq_n_v((int8x16_t)__s0, (int8x16_t)__s1, __p2, 35); \
24855 __ret; \
24856 })
24857 #else
24858 #define vsriq_n_s64(__p0, __p1, __p2) __extension__ ({ \
24859 int64x2_t __s0 = __p0; \
24860 int64x2_t __s1 = __p1; \
24861 int64x2_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 1, 0); \
24862 int64x2_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 1, 0); \
24863 int64x2_t __ret; \
24864 __ret = (int64x2_t) __builtin_neon_vsriq_n_v((int8x16_t)__rev0, (int8x16_t)__rev1, __p2, 35); \
24865 __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \
24866 __ret; \
24867 })
24868 #endif
24869
24870 #ifdef __LITTLE_ENDIAN__
24871 #define vsriq_n_s16(__p0, __p1, __p2) __extension__ ({ \
24872 int16x8_t __s0 = __p0; \
24873 int16x8_t __s1 = __p1; \
24874 int16x8_t __ret; \
24875 __ret = (int16x8_t) __builtin_neon_vsriq_n_v((int8x16_t)__s0, (int8x16_t)__s1, __p2, 33); \
24876 __ret; \
24877 })
24878 #else
24879 #define vsriq_n_s16(__p0, __p1, __p2) __extension__ ({ \
24880 int16x8_t __s0 = __p0; \
24881 int16x8_t __s1 = __p1; \
24882 int16x8_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 7, 6, 5, 4, 3, 2, 1, 0); \
24883 int16x8_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 7, 6, 5, 4, 3, 2, 1, 0); \
24884 int16x8_t __ret; \
24885 __ret = (int16x8_t) __builtin_neon_vsriq_n_v((int8x16_t)__rev0, (int8x16_t)__rev1, __p2, 33); \
24886 __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); \
24887 __ret; \
24888 })
24889 #endif
24890
24891 #ifdef __LITTLE_ENDIAN__
24892 #define vsri_n_u8(__p0, __p1, __p2) __extension__ ({ \
24893 uint8x8_t __s0 = __p0; \
24894 uint8x8_t __s1 = __p1; \
24895 uint8x8_t __ret; \
24896 __ret = (uint8x8_t) __builtin_neon_vsri_n_v((int8x8_t)__s0, (int8x8_t)__s1, __p2, 16); \
24897 __ret; \
24898 })
24899 #else
24900 #define vsri_n_u8(__p0, __p1, __p2) __extension__ ({ \
24901 uint8x8_t __s0 = __p0; \
24902 uint8x8_t __s1 = __p1; \
24903 uint8x8_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 7, 6, 5, 4, 3, 2, 1, 0); \
24904 uint8x8_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 7, 6, 5, 4, 3, 2, 1, 0); \
24905 uint8x8_t __ret; \
24906 __ret = (uint8x8_t) __builtin_neon_vsri_n_v((int8x8_t)__rev0, (int8x8_t)__rev1, __p2, 16); \
24907 __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); \
24908 __ret; \
24909 })
24910 #endif
24911
24912 #ifdef __LITTLE_ENDIAN__
24913 #define vsri_n_u32(__p0, __p1, __p2) __extension__ ({ \
24914 uint32x2_t __s0 = __p0; \
24915 uint32x2_t __s1 = __p1; \
24916 uint32x2_t __ret; \
24917 __ret = (uint32x2_t) __builtin_neon_vsri_n_v((int8x8_t)__s0, (int8x8_t)__s1, __p2, 18); \
24918 __ret; \
24919 })
24920 #else
24921 #define vsri_n_u32(__p0, __p1, __p2) __extension__ ({ \
24922 uint32x2_t __s0 = __p0; \
24923 uint32x2_t __s1 = __p1; \
24924 uint32x2_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 1, 0); \
24925 uint32x2_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 1, 0); \
24926 uint32x2_t __ret; \
24927 __ret = (uint32x2_t) __builtin_neon_vsri_n_v((int8x8_t)__rev0, (int8x8_t)__rev1, __p2, 18); \
24928 __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \
24929 __ret; \
24930 })
24931 #endif
24932
24933 #ifdef __LITTLE_ENDIAN__
24934 #define vsri_n_u64(__p0, __p1, __p2) __extension__ ({ \
24935 uint64x1_t __s0 = __p0; \
24936 uint64x1_t __s1 = __p1; \
24937 uint64x1_t __ret; \
24938 __ret = (uint64x1_t) __builtin_neon_vsri_n_v((int8x8_t)__s0, (int8x8_t)__s1, __p2, 19); \
24939 __ret; \
24940 })
24941 #else
24942 #define vsri_n_u64(__p0, __p1, __p2) __extension__ ({ \
24943 uint64x1_t __s0 = __p0; \
24944 uint64x1_t __s1 = __p1; \
24945 uint64x1_t __ret; \
24946 __ret = (uint64x1_t) __builtin_neon_vsri_n_v((int8x8_t)__s0, (int8x8_t)__s1, __p2, 19); \
24947 __ret; \
24948 })
24949 #endif
24950
24951 #ifdef __LITTLE_ENDIAN__
24952 #define vsri_n_u16(__p0, __p1, __p2) __extension__ ({ \
24953 uint16x4_t __s0 = __p0; \
24954 uint16x4_t __s1 = __p1; \
24955 uint16x4_t __ret; \
24956 __ret = (uint16x4_t) __builtin_neon_vsri_n_v((int8x8_t)__s0, (int8x8_t)__s1, __p2, 17); \
24957 __ret; \
24958 })
24959 #else
24960 #define vsri_n_u16(__p0, __p1, __p2) __extension__ ({ \
24961 uint16x4_t __s0 = __p0; \
24962 uint16x4_t __s1 = __p1; \
24963 uint16x4_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \
24964 uint16x4_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 3, 2, 1, 0); \
24965 uint16x4_t __ret; \
24966 __ret = (uint16x4_t) __builtin_neon_vsri_n_v((int8x8_t)__rev0, (int8x8_t)__rev1, __p2, 17); \
24967 __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \
24968 __ret; \
24969 })
24970 #endif
24971
24972 #ifdef __LITTLE_ENDIAN__
24973 #define vsri_n_s8(__p0, __p1, __p2) __extension__ ({ \
24974 int8x8_t __s0 = __p0; \
24975 int8x8_t __s1 = __p1; \
24976 int8x8_t __ret; \
24977 __ret = (int8x8_t) __builtin_neon_vsri_n_v((int8x8_t)__s0, (int8x8_t)__s1, __p2, 0); \
24978 __ret; \
24979 })
24980 #else
24981 #define vsri_n_s8(__p0, __p1, __p2) __extension__ ({ \
24982 int8x8_t __s0 = __p0; \
24983 int8x8_t __s1 = __p1; \
24984 int8x8_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 7, 6, 5, 4, 3, 2, 1, 0); \
24985 int8x8_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 7, 6, 5, 4, 3, 2, 1, 0); \
24986 int8x8_t __ret; \
24987 __ret = (int8x8_t) __builtin_neon_vsri_n_v((int8x8_t)__rev0, (int8x8_t)__rev1, __p2, 0); \
24988 __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); \
24989 __ret; \
24990 })
24991 #endif
24992
24993 #ifdef __LITTLE_ENDIAN__
24994 #define vsri_n_s32(__p0, __p1, __p2) __extension__ ({ \
24995 int32x2_t __s0 = __p0; \
24996 int32x2_t __s1 = __p1; \
24997 int32x2_t __ret; \
24998 __ret = (int32x2_t) __builtin_neon_vsri_n_v((int8x8_t)__s0, (int8x8_t)__s1, __p2, 2); \
24999 __ret; \
25000 })
25001 #else
25002 #define vsri_n_s32(__p0, __p1, __p2) __extension__ ({ \
25003 int32x2_t __s0 = __p0; \
25004 int32x2_t __s1 = __p1; \
25005 int32x2_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 1, 0); \
25006 int32x2_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 1, 0); \
25007 int32x2_t __ret; \
25008 __ret = (int32x2_t) __builtin_neon_vsri_n_v((int8x8_t)__rev0, (int8x8_t)__rev1, __p2, 2); \
25009 __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \
25010 __ret; \
25011 })
25012 #endif
25013
25014 #ifdef __LITTLE_ENDIAN__
25015 #define vsri_n_s64(__p0, __p1, __p2) __extension__ ({ \
25016 int64x1_t __s0 = __p0; \
25017 int64x1_t __s1 = __p1; \
25018 int64x1_t __ret; \
25019 __ret = (int64x1_t) __builtin_neon_vsri_n_v((int8x8_t)__s0, (int8x8_t)__s1, __p2, 3); \
25020 __ret; \
25021 })
25022 #else
25023 #define vsri_n_s64(__p0, __p1, __p2) __extension__ ({ \
25024 int64x1_t __s0 = __p0; \
25025 int64x1_t __s1 = __p1; \
25026 int64x1_t __ret; \
25027 __ret = (int64x1_t) __builtin_neon_vsri_n_v((int8x8_t)__s0, (int8x8_t)__s1, __p2, 3); \
25028 __ret; \
25029 })
25030 #endif
25031
25032 #ifdef __LITTLE_ENDIAN__
25033 #define vsri_n_s16(__p0, __p1, __p2) __extension__ ({ \
25034 int16x4_t __s0 = __p0; \
25035 int16x4_t __s1 = __p1; \
25036 int16x4_t __ret; \
25037 __ret = (int16x4_t) __builtin_neon_vsri_n_v((int8x8_t)__s0, (int8x8_t)__s1, __p2, 1); \
25038 __ret; \
25039 })
25040 #else
25041 #define vsri_n_s16(__p0, __p1, __p2) __extension__ ({ \
25042 int16x4_t __s0 = __p0; \
25043 int16x4_t __s1 = __p1; \
25044 int16x4_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \
25045 int16x4_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 3, 2, 1, 0); \
25046 int16x4_t __ret; \
25047 __ret = (int16x4_t) __builtin_neon_vsri_n_v((int8x8_t)__rev0, (int8x8_t)__rev1, __p2, 1); \
25048 __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \
25049 __ret; \
25050 })
25051 #endif
25052
25053 #ifdef __LITTLE_ENDIAN__
25054 #define vst1_p8(__p0, __p1) __extension__ ({ \
25055 poly8x8_t __s1 = __p1; \
25056 __builtin_neon_vst1_v(__p0, (int8x8_t)__s1, 4); \
25057 })
25058 #else
25059 #define vst1_p8(__p0, __p1) __extension__ ({ \
25060 poly8x8_t __s1 = __p1; \
25061 poly8x8_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 7, 6, 5, 4, 3, 2, 1, 0); \
25062 __builtin_neon_vst1_v(__p0, (int8x8_t)__rev1, 4); \
25063 })
25064 #endif
25065
25066 #ifdef __LITTLE_ENDIAN__
25067 #define vst1_p16(__p0, __p1) __extension__ ({ \
25068 poly16x4_t __s1 = __p1; \
25069 __builtin_neon_vst1_v(__p0, (int8x8_t)__s1, 5); \
25070 })
25071 #else
25072 #define vst1_p16(__p0, __p1) __extension__ ({ \
25073 poly16x4_t __s1 = __p1; \
25074 poly16x4_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 3, 2, 1, 0); \
25075 __builtin_neon_vst1_v(__p0, (int8x8_t)__rev1, 5); \
25076 })
25077 #endif
25078
25079 #ifdef __LITTLE_ENDIAN__
25080 #define vst1q_p8(__p0, __p1) __extension__ ({ \
25081 poly8x16_t __s1 = __p1; \
25082 __builtin_neon_vst1q_v(__p0, (int8x16_t)__s1, 36); \
25083 })
25084 #else
25085 #define vst1q_p8(__p0, __p1) __extension__ ({ \
25086 poly8x16_t __s1 = __p1; \
25087 poly8x16_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
25088 __builtin_neon_vst1q_v(__p0, (int8x16_t)__rev1, 36); \
25089 })
25090 #endif
25091
25092 #ifdef __LITTLE_ENDIAN__
25093 #define vst1q_p16(__p0, __p1) __extension__ ({ \
25094 poly16x8_t __s1 = __p1; \
25095 __builtin_neon_vst1q_v(__p0, (int8x16_t)__s1, 37); \
25096 })
25097 #else
25098 #define vst1q_p16(__p0, __p1) __extension__ ({ \
25099 poly16x8_t __s1 = __p1; \
25100 poly16x8_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 7, 6, 5, 4, 3, 2, 1, 0); \
25101 __builtin_neon_vst1q_v(__p0, (int8x16_t)__rev1, 37); \
25102 })
25103 #endif
25104
25105 #ifdef __LITTLE_ENDIAN__
25106 #define vst1q_u8(__p0, __p1) __extension__ ({ \
25107 uint8x16_t __s1 = __p1; \
25108 __builtin_neon_vst1q_v(__p0, (int8x16_t)__s1, 48); \
25109 })
25110 #else
25111 #define vst1q_u8(__p0, __p1) __extension__ ({ \
25112 uint8x16_t __s1 = __p1; \
25113 uint8x16_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
25114 __builtin_neon_vst1q_v(__p0, (int8x16_t)__rev1, 48); \
25115 })
25116 #endif
25117
25118 #ifdef __LITTLE_ENDIAN__
25119 #define vst1q_u32(__p0, __p1) __extension__ ({ \
25120 uint32x4_t __s1 = __p1; \
25121 __builtin_neon_vst1q_v(__p0, (int8x16_t)__s1, 50); \
25122 })
25123 #else
25124 #define vst1q_u32(__p0, __p1) __extension__ ({ \
25125 uint32x4_t __s1 = __p1; \
25126 uint32x4_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 3, 2, 1, 0); \
25127 __builtin_neon_vst1q_v(__p0, (int8x16_t)__rev1, 50); \
25128 })
25129 #endif
25130
25131 #ifdef __LITTLE_ENDIAN__
25132 #define vst1q_u64(__p0, __p1) __extension__ ({ \
25133 uint64x2_t __s1 = __p1; \
25134 __builtin_neon_vst1q_v(__p0, (int8x16_t)__s1, 51); \
25135 })
25136 #else
25137 #define vst1q_u64(__p0, __p1) __extension__ ({ \
25138 uint64x2_t __s1 = __p1; \
25139 uint64x2_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 1, 0); \
25140 __builtin_neon_vst1q_v(__p0, (int8x16_t)__rev1, 51); \
25141 })
25142 #endif
25143
25144 #ifdef __LITTLE_ENDIAN__
25145 #define vst1q_u16(__p0, __p1) __extension__ ({ \
25146 uint16x8_t __s1 = __p1; \
25147 __builtin_neon_vst1q_v(__p0, (int8x16_t)__s1, 49); \
25148 })
25149 #else
25150 #define vst1q_u16(__p0, __p1) __extension__ ({ \
25151 uint16x8_t __s1 = __p1; \
25152 uint16x8_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 7, 6, 5, 4, 3, 2, 1, 0); \
25153 __builtin_neon_vst1q_v(__p0, (int8x16_t)__rev1, 49); \
25154 })
25155 #endif
25156
25157 #ifdef __LITTLE_ENDIAN__
25158 #define vst1q_s8(__p0, __p1) __extension__ ({ \
25159 int8x16_t __s1 = __p1; \
25160 __builtin_neon_vst1q_v(__p0, (int8x16_t)__s1, 32); \
25161 })
25162 #else
25163 #define vst1q_s8(__p0, __p1) __extension__ ({ \
25164 int8x16_t __s1 = __p1; \
25165 int8x16_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
25166 __builtin_neon_vst1q_v(__p0, (int8x16_t)__rev1, 32); \
25167 })
25168 #endif
25169
25170 #ifdef __LITTLE_ENDIAN__
25171 #define vst1q_f32(__p0, __p1) __extension__ ({ \
25172 float32x4_t __s1 = __p1; \
25173 __builtin_neon_vst1q_v(__p0, (int8x16_t)__s1, 41); \
25174 })
25175 #else
25176 #define vst1q_f32(__p0, __p1) __extension__ ({ \
25177 float32x4_t __s1 = __p1; \
25178 float32x4_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 3, 2, 1, 0); \
25179 __builtin_neon_vst1q_v(__p0, (int8x16_t)__rev1, 41); \
25180 })
25181 #endif
25182
25183 #ifdef __LITTLE_ENDIAN__
25184 #define vst1q_f16(__p0, __p1) __extension__ ({ \
25185 float16x8_t __s1 = __p1; \
25186 __builtin_neon_vst1q_v(__p0, (int8x16_t)__s1, 40); \
25187 })
25188 #else
25189 #define vst1q_f16(__p0, __p1) __extension__ ({ \
25190 float16x8_t __s1 = __p1; \
25191 float16x8_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 7, 6, 5, 4, 3, 2, 1, 0); \
25192 __builtin_neon_vst1q_v(__p0, (int8x16_t)__rev1, 40); \
25193 })
25194 #endif
25195
25196 #ifdef __LITTLE_ENDIAN__
25197 #define vst1q_s32(__p0, __p1) __extension__ ({ \
25198 int32x4_t __s1 = __p1; \
25199 __builtin_neon_vst1q_v(__p0, (int8x16_t)__s1, 34); \
25200 })
25201 #else
25202 #define vst1q_s32(__p0, __p1) __extension__ ({ \
25203 int32x4_t __s1 = __p1; \
25204 int32x4_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 3, 2, 1, 0); \
25205 __builtin_neon_vst1q_v(__p0, (int8x16_t)__rev1, 34); \
25206 })
25207 #endif
25208
25209 #ifdef __LITTLE_ENDIAN__
25210 #define vst1q_s64(__p0, __p1) __extension__ ({ \
25211 int64x2_t __s1 = __p1; \
25212 __builtin_neon_vst1q_v(__p0, (int8x16_t)__s1, 35); \
25213 })
25214 #else
25215 #define vst1q_s64(__p0, __p1) __extension__ ({ \
25216 int64x2_t __s1 = __p1; \
25217 int64x2_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 1, 0); \
25218 __builtin_neon_vst1q_v(__p0, (int8x16_t)__rev1, 35); \
25219 })
25220 #endif
25221
25222 #ifdef __LITTLE_ENDIAN__
25223 #define vst1q_s16(__p0, __p1) __extension__ ({ \
25224 int16x8_t __s1 = __p1; \
25225 __builtin_neon_vst1q_v(__p0, (int8x16_t)__s1, 33); \
25226 })
25227 #else
25228 #define vst1q_s16(__p0, __p1) __extension__ ({ \
25229 int16x8_t __s1 = __p1; \
25230 int16x8_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 7, 6, 5, 4, 3, 2, 1, 0); \
25231 __builtin_neon_vst1q_v(__p0, (int8x16_t)__rev1, 33); \
25232 })
25233 #endif
25234
25235 #ifdef __LITTLE_ENDIAN__
25236 #define vst1_u8(__p0, __p1) __extension__ ({ \
25237 uint8x8_t __s1 = __p1; \
25238 __builtin_neon_vst1_v(__p0, (int8x8_t)__s1, 16); \
25239 })
25240 #else
25241 #define vst1_u8(__p0, __p1) __extension__ ({ \
25242 uint8x8_t __s1 = __p1; \
25243 uint8x8_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 7, 6, 5, 4, 3, 2, 1, 0); \
25244 __builtin_neon_vst1_v(__p0, (int8x8_t)__rev1, 16); \
25245 })
25246 #endif
25247
25248 #ifdef __LITTLE_ENDIAN__
25249 #define vst1_u32(__p0, __p1) __extension__ ({ \
25250 uint32x2_t __s1 = __p1; \
25251 __builtin_neon_vst1_v(__p0, (int8x8_t)__s1, 18); \
25252 })
25253 #else
25254 #define vst1_u32(__p0, __p1) __extension__ ({ \
25255 uint32x2_t __s1 = __p1; \
25256 uint32x2_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 1, 0); \
25257 __builtin_neon_vst1_v(__p0, (int8x8_t)__rev1, 18); \
25258 })
25259 #endif
25260
25261 #ifdef __LITTLE_ENDIAN__
25262 #define vst1_u64(__p0, __p1) __extension__ ({ \
25263 uint64x1_t __s1 = __p1; \
25264 __builtin_neon_vst1_v(__p0, (int8x8_t)__s1, 19); \
25265 })
25266 #else
25267 #define vst1_u64(__p0, __p1) __extension__ ({ \
25268 uint64x1_t __s1 = __p1; \
25269 __builtin_neon_vst1_v(__p0, (int8x8_t)__s1, 19); \
25270 })
25271 #endif
25272
25273 #ifdef __LITTLE_ENDIAN__
25274 #define vst1_u16(__p0, __p1) __extension__ ({ \
25275 uint16x4_t __s1 = __p1; \
25276 __builtin_neon_vst1_v(__p0, (int8x8_t)__s1, 17); \
25277 })
25278 #else
25279 #define vst1_u16(__p0, __p1) __extension__ ({ \
25280 uint16x4_t __s1 = __p1; \
25281 uint16x4_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 3, 2, 1, 0); \
25282 __builtin_neon_vst1_v(__p0, (int8x8_t)__rev1, 17); \
25283 })
25284 #endif
25285
25286 #ifdef __LITTLE_ENDIAN__
25287 #define vst1_s8(__p0, __p1) __extension__ ({ \
25288 int8x8_t __s1 = __p1; \
25289 __builtin_neon_vst1_v(__p0, (int8x8_t)__s1, 0); \
25290 })
25291 #else
25292 #define vst1_s8(__p0, __p1) __extension__ ({ \
25293 int8x8_t __s1 = __p1; \
25294 int8x8_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 7, 6, 5, 4, 3, 2, 1, 0); \
25295 __builtin_neon_vst1_v(__p0, (int8x8_t)__rev1, 0); \
25296 })
25297 #endif
25298
25299 #ifdef __LITTLE_ENDIAN__
25300 #define vst1_f32(__p0, __p1) __extension__ ({ \
25301 float32x2_t __s1 = __p1; \
25302 __builtin_neon_vst1_v(__p0, (int8x8_t)__s1, 9); \
25303 })
25304 #else
25305 #define vst1_f32(__p0, __p1) __extension__ ({ \
25306 float32x2_t __s1 = __p1; \
25307 float32x2_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 1, 0); \
25308 __builtin_neon_vst1_v(__p0, (int8x8_t)__rev1, 9); \
25309 })
25310 #endif
25311
25312 #ifdef __LITTLE_ENDIAN__
25313 #define vst1_f16(__p0, __p1) __extension__ ({ \
25314 float16x4_t __s1 = __p1; \
25315 __builtin_neon_vst1_v(__p0, (int8x8_t)__s1, 8); \
25316 })
25317 #else
25318 #define vst1_f16(__p0, __p1) __extension__ ({ \
25319 float16x4_t __s1 = __p1; \
25320 float16x4_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 3, 2, 1, 0); \
25321 __builtin_neon_vst1_v(__p0, (int8x8_t)__rev1, 8); \
25322 })
25323 #endif
25324
25325 #ifdef __LITTLE_ENDIAN__
25326 #define vst1_s32(__p0, __p1) __extension__ ({ \
25327 int32x2_t __s1 = __p1; \
25328 __builtin_neon_vst1_v(__p0, (int8x8_t)__s1, 2); \
25329 })
25330 #else
25331 #define vst1_s32(__p0, __p1) __extension__ ({ \
25332 int32x2_t __s1 = __p1; \
25333 int32x2_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 1, 0); \
25334 __builtin_neon_vst1_v(__p0, (int8x8_t)__rev1, 2); \
25335 })
25336 #endif
25337
25338 #ifdef __LITTLE_ENDIAN__
25339 #define vst1_s64(__p0, __p1) __extension__ ({ \
25340 int64x1_t __s1 = __p1; \
25341 __builtin_neon_vst1_v(__p0, (int8x8_t)__s1, 3); \
25342 })
25343 #else
25344 #define vst1_s64(__p0, __p1) __extension__ ({ \
25345 int64x1_t __s1 = __p1; \
25346 __builtin_neon_vst1_v(__p0, (int8x8_t)__s1, 3); \
25347 })
25348 #endif
25349
25350 #ifdef __LITTLE_ENDIAN__
25351 #define vst1_s16(__p0, __p1) __extension__ ({ \
25352 int16x4_t __s1 = __p1; \
25353 __builtin_neon_vst1_v(__p0, (int8x8_t)__s1, 1); \
25354 })
25355 #else
25356 #define vst1_s16(__p0, __p1) __extension__ ({ \
25357 int16x4_t __s1 = __p1; \
25358 int16x4_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 3, 2, 1, 0); \
25359 __builtin_neon_vst1_v(__p0, (int8x8_t)__rev1, 1); \
25360 })
25361 #endif
25362
25363 #ifdef __LITTLE_ENDIAN__
25364 #define vst1_lane_p8(__p0, __p1, __p2) __extension__ ({ \
25365 poly8x8_t __s1 = __p1; \
25366 __builtin_neon_vst1_lane_v(__p0, (int8x8_t)__s1, __p2, 4); \
25367 })
25368 #else
25369 #define vst1_lane_p8(__p0, __p1, __p2) __extension__ ({ \
25370 poly8x8_t __s1 = __p1; \
25371 poly8x8_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 7, 6, 5, 4, 3, 2, 1, 0); \
25372 __builtin_neon_vst1_lane_v(__p0, (int8x8_t)__rev1, __p2, 4); \
25373 })
25374 #endif
25375
25376 #ifdef __LITTLE_ENDIAN__
25377 #define vst1_lane_p16(__p0, __p1, __p2) __extension__ ({ \
25378 poly16x4_t __s1 = __p1; \
25379 __builtin_neon_vst1_lane_v(__p0, (int8x8_t)__s1, __p2, 5); \
25380 })
25381 #else
25382 #define vst1_lane_p16(__p0, __p1, __p2) __extension__ ({ \
25383 poly16x4_t __s1 = __p1; \
25384 poly16x4_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 3, 2, 1, 0); \
25385 __builtin_neon_vst1_lane_v(__p0, (int8x8_t)__rev1, __p2, 5); \
25386 })
25387 #endif
25388
25389 #ifdef __LITTLE_ENDIAN__
25390 #define vst1q_lane_p8(__p0, __p1, __p2) __extension__ ({ \
25391 poly8x16_t __s1 = __p1; \
25392 __builtin_neon_vst1q_lane_v(__p0, (int8x16_t)__s1, __p2, 36); \
25393 })
25394 #else
25395 #define vst1q_lane_p8(__p0, __p1, __p2) __extension__ ({ \
25396 poly8x16_t __s1 = __p1; \
25397 poly8x16_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
25398 __builtin_neon_vst1q_lane_v(__p0, (int8x16_t)__rev1, __p2, 36); \
25399 })
25400 #endif
25401
25402 #ifdef __LITTLE_ENDIAN__
25403 #define vst1q_lane_p16(__p0, __p1, __p2) __extension__ ({ \
25404 poly16x8_t __s1 = __p1; \
25405 __builtin_neon_vst1q_lane_v(__p0, (int8x16_t)__s1, __p2, 37); \
25406 })
25407 #else
25408 #define vst1q_lane_p16(__p0, __p1, __p2) __extension__ ({ \
25409 poly16x8_t __s1 = __p1; \
25410 poly16x8_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 7, 6, 5, 4, 3, 2, 1, 0); \
25411 __builtin_neon_vst1q_lane_v(__p0, (int8x16_t)__rev1, __p2, 37); \
25412 })
25413 #endif
25414
25415 #ifdef __LITTLE_ENDIAN__
25416 #define vst1q_lane_u8(__p0, __p1, __p2) __extension__ ({ \
25417 uint8x16_t __s1 = __p1; \
25418 __builtin_neon_vst1q_lane_v(__p0, (int8x16_t)__s1, __p2, 48); \
25419 })
25420 #else
25421 #define vst1q_lane_u8(__p0, __p1, __p2) __extension__ ({ \
25422 uint8x16_t __s1 = __p1; \
25423 uint8x16_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
25424 __builtin_neon_vst1q_lane_v(__p0, (int8x16_t)__rev1, __p2, 48); \
25425 })
25426 #endif
25427
25428 #ifdef __LITTLE_ENDIAN__
25429 #define vst1q_lane_u32(__p0, __p1, __p2) __extension__ ({ \
25430 uint32x4_t __s1 = __p1; \
25431 __builtin_neon_vst1q_lane_v(__p0, (int8x16_t)__s1, __p2, 50); \
25432 })
25433 #else
25434 #define vst1q_lane_u32(__p0, __p1, __p2) __extension__ ({ \
25435 uint32x4_t __s1 = __p1; \
25436 uint32x4_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 3, 2, 1, 0); \
25437 __builtin_neon_vst1q_lane_v(__p0, (int8x16_t)__rev1, __p2, 50); \
25438 })
25439 #endif
25440
25441 #ifdef __LITTLE_ENDIAN__
25442 #define vst1q_lane_u64(__p0, __p1, __p2) __extension__ ({ \
25443 uint64x2_t __s1 = __p1; \
25444 __builtin_neon_vst1q_lane_v(__p0, (int8x16_t)__s1, __p2, 51); \
25445 })
25446 #else
25447 #define vst1q_lane_u64(__p0, __p1, __p2) __extension__ ({ \
25448 uint64x2_t __s1 = __p1; \
25449 uint64x2_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 1, 0); \
25450 __builtin_neon_vst1q_lane_v(__p0, (int8x16_t)__rev1, __p2, 51); \
25451 })
25452 #endif
25453
25454 #ifdef __LITTLE_ENDIAN__
25455 #define vst1q_lane_u16(__p0, __p1, __p2) __extension__ ({ \
25456 uint16x8_t __s1 = __p1; \
25457 __builtin_neon_vst1q_lane_v(__p0, (int8x16_t)__s1, __p2, 49); \
25458 })
25459 #else
25460 #define vst1q_lane_u16(__p0, __p1, __p2) __extension__ ({ \
25461 uint16x8_t __s1 = __p1; \
25462 uint16x8_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 7, 6, 5, 4, 3, 2, 1, 0); \
25463 __builtin_neon_vst1q_lane_v(__p0, (int8x16_t)__rev1, __p2, 49); \
25464 })
25465 #endif
25466
25467 #ifdef __LITTLE_ENDIAN__
25468 #define vst1q_lane_s8(__p0, __p1, __p2) __extension__ ({ \
25469 int8x16_t __s1 = __p1; \
25470 __builtin_neon_vst1q_lane_v(__p0, (int8x16_t)__s1, __p2, 32); \
25471 })
25472 #else
25473 #define vst1q_lane_s8(__p0, __p1, __p2) __extension__ ({ \
25474 int8x16_t __s1 = __p1; \
25475 int8x16_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
25476 __builtin_neon_vst1q_lane_v(__p0, (int8x16_t)__rev1, __p2, 32); \
25477 })
25478 #endif
25479
25480 #ifdef __LITTLE_ENDIAN__
25481 #define vst1q_lane_f32(__p0, __p1, __p2) __extension__ ({ \
25482 float32x4_t __s1 = __p1; \
25483 __builtin_neon_vst1q_lane_v(__p0, (int8x16_t)__s1, __p2, 41); \
25484 })
25485 #else
25486 #define vst1q_lane_f32(__p0, __p1, __p2) __extension__ ({ \
25487 float32x4_t __s1 = __p1; \
25488 float32x4_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 3, 2, 1, 0); \
25489 __builtin_neon_vst1q_lane_v(__p0, (int8x16_t)__rev1, __p2, 41); \
25490 })
25491 #endif
25492
25493 #ifdef __LITTLE_ENDIAN__
25494 #define vst1q_lane_f16(__p0, __p1, __p2) __extension__ ({ \
25495 float16x8_t __s1 = __p1; \
25496 __builtin_neon_vst1q_lane_v(__p0, (int8x16_t)__s1, __p2, 40); \
25497 })
25498 #else
25499 #define vst1q_lane_f16(__p0, __p1, __p2) __extension__ ({ \
25500 float16x8_t __s1 = __p1; \
25501 float16x8_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 7, 6, 5, 4, 3, 2, 1, 0); \
25502 __builtin_neon_vst1q_lane_v(__p0, (int8x16_t)__rev1, __p2, 40); \
25503 })
25504 #endif
25505
25506 #ifdef __LITTLE_ENDIAN__
25507 #define vst1q_lane_s32(__p0, __p1, __p2) __extension__ ({ \
25508 int32x4_t __s1 = __p1; \
25509 __builtin_neon_vst1q_lane_v(__p0, (int8x16_t)__s1, __p2, 34); \
25510 })
25511 #else
25512 #define vst1q_lane_s32(__p0, __p1, __p2) __extension__ ({ \
25513 int32x4_t __s1 = __p1; \
25514 int32x4_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 3, 2, 1, 0); \
25515 __builtin_neon_vst1q_lane_v(__p0, (int8x16_t)__rev1, __p2, 34); \
25516 })
25517 #endif
25518
25519 #ifdef __LITTLE_ENDIAN__
25520 #define vst1q_lane_s64(__p0, __p1, __p2) __extension__ ({ \
25521 int64x2_t __s1 = __p1; \
25522 __builtin_neon_vst1q_lane_v(__p0, (int8x16_t)__s1, __p2, 35); \
25523 })
25524 #else
25525 #define vst1q_lane_s64(__p0, __p1, __p2) __extension__ ({ \
25526 int64x2_t __s1 = __p1; \
25527 int64x2_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 1, 0); \
25528 __builtin_neon_vst1q_lane_v(__p0, (int8x16_t)__rev1, __p2, 35); \
25529 })
25530 #endif
25531
25532 #ifdef __LITTLE_ENDIAN__
25533 #define vst1q_lane_s16(__p0, __p1, __p2) __extension__ ({ \
25534 int16x8_t __s1 = __p1; \
25535 __builtin_neon_vst1q_lane_v(__p0, (int8x16_t)__s1, __p2, 33); \
25536 })
25537 #else
25538 #define vst1q_lane_s16(__p0, __p1, __p2) __extension__ ({ \
25539 int16x8_t __s1 = __p1; \
25540 int16x8_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 7, 6, 5, 4, 3, 2, 1, 0); \
25541 __builtin_neon_vst1q_lane_v(__p0, (int8x16_t)__rev1, __p2, 33); \
25542 })
25543 #endif
25544
25545 #ifdef __LITTLE_ENDIAN__
25546 #define vst1_lane_u8(__p0, __p1, __p2) __extension__ ({ \
25547 uint8x8_t __s1 = __p1; \
25548 __builtin_neon_vst1_lane_v(__p0, (int8x8_t)__s1, __p2, 16); \
25549 })
25550 #else
25551 #define vst1_lane_u8(__p0, __p1, __p2) __extension__ ({ \
25552 uint8x8_t __s1 = __p1; \
25553 uint8x8_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 7, 6, 5, 4, 3, 2, 1, 0); \
25554 __builtin_neon_vst1_lane_v(__p0, (int8x8_t)__rev1, __p2, 16); \
25555 })
25556 #endif
25557
25558 #ifdef __LITTLE_ENDIAN__
25559 #define vst1_lane_u32(__p0, __p1, __p2) __extension__ ({ \
25560 uint32x2_t __s1 = __p1; \
25561 __builtin_neon_vst1_lane_v(__p0, (int8x8_t)__s1, __p2, 18); \
25562 })
25563 #else
25564 #define vst1_lane_u32(__p0, __p1, __p2) __extension__ ({ \
25565 uint32x2_t __s1 = __p1; \
25566 uint32x2_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 1, 0); \
25567 __builtin_neon_vst1_lane_v(__p0, (int8x8_t)__rev1, __p2, 18); \
25568 })
25569 #endif
25570
25571 #ifdef __LITTLE_ENDIAN__
25572 #define vst1_lane_u64(__p0, __p1, __p2) __extension__ ({ \
25573 uint64x1_t __s1 = __p1; \
25574 __builtin_neon_vst1_lane_v(__p0, (int8x8_t)__s1, __p2, 19); \
25575 })
25576 #else
25577 #define vst1_lane_u64(__p0, __p1, __p2) __extension__ ({ \
25578 uint64x1_t __s1 = __p1; \
25579 __builtin_neon_vst1_lane_v(__p0, (int8x8_t)__s1, __p2, 19); \
25580 })
25581 #endif
25582
25583 #ifdef __LITTLE_ENDIAN__
25584 #define vst1_lane_u16(__p0, __p1, __p2) __extension__ ({ \
25585 uint16x4_t __s1 = __p1; \
25586 __builtin_neon_vst1_lane_v(__p0, (int8x8_t)__s1, __p2, 17); \
25587 })
25588 #else
25589 #define vst1_lane_u16(__p0, __p1, __p2) __extension__ ({ \
25590 uint16x4_t __s1 = __p1; \
25591 uint16x4_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 3, 2, 1, 0); \
25592 __builtin_neon_vst1_lane_v(__p0, (int8x8_t)__rev1, __p2, 17); \
25593 })
25594 #endif
25595
25596 #ifdef __LITTLE_ENDIAN__
25597 #define vst1_lane_s8(__p0, __p1, __p2) __extension__ ({ \
25598 int8x8_t __s1 = __p1; \
25599 __builtin_neon_vst1_lane_v(__p0, (int8x8_t)__s1, __p2, 0); \
25600 })
25601 #else
25602 #define vst1_lane_s8(__p0, __p1, __p2) __extension__ ({ \
25603 int8x8_t __s1 = __p1; \
25604 int8x8_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 7, 6, 5, 4, 3, 2, 1, 0); \
25605 __builtin_neon_vst1_lane_v(__p0, (int8x8_t)__rev1, __p2, 0); \
25606 })
25607 #endif
25608
25609 #ifdef __LITTLE_ENDIAN__
25610 #define vst1_lane_f32(__p0, __p1, __p2) __extension__ ({ \
25611 float32x2_t __s1 = __p1; \
25612 __builtin_neon_vst1_lane_v(__p0, (int8x8_t)__s1, __p2, 9); \
25613 })
25614 #else
25615 #define vst1_lane_f32(__p0, __p1, __p2) __extension__ ({ \
25616 float32x2_t __s1 = __p1; \
25617 float32x2_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 1, 0); \
25618 __builtin_neon_vst1_lane_v(__p0, (int8x8_t)__rev1, __p2, 9); \
25619 })
25620 #endif
25621
25622 #ifdef __LITTLE_ENDIAN__
25623 #define vst1_lane_f16(__p0, __p1, __p2) __extension__ ({ \
25624 float16x4_t __s1 = __p1; \
25625 __builtin_neon_vst1_lane_v(__p0, (int8x8_t)__s1, __p2, 8); \
25626 })
25627 #else
25628 #define vst1_lane_f16(__p0, __p1, __p2) __extension__ ({ \
25629 float16x4_t __s1 = __p1; \
25630 float16x4_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 3, 2, 1, 0); \
25631 __builtin_neon_vst1_lane_v(__p0, (int8x8_t)__rev1, __p2, 8); \
25632 })
25633 #endif
25634
25635 #ifdef __LITTLE_ENDIAN__
25636 #define vst1_lane_s32(__p0, __p1, __p2) __extension__ ({ \
25637 int32x2_t __s1 = __p1; \
25638 __builtin_neon_vst1_lane_v(__p0, (int8x8_t)__s1, __p2, 2); \
25639 })
25640 #else
25641 #define vst1_lane_s32(__p0, __p1, __p2) __extension__ ({ \
25642 int32x2_t __s1 = __p1; \
25643 int32x2_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 1, 0); \
25644 __builtin_neon_vst1_lane_v(__p0, (int8x8_t)__rev1, __p2, 2); \
25645 })
25646 #endif
25647
25648 #ifdef __LITTLE_ENDIAN__
25649 #define vst1_lane_s64(__p0, __p1, __p2) __extension__ ({ \
25650 int64x1_t __s1 = __p1; \
25651 __builtin_neon_vst1_lane_v(__p0, (int8x8_t)__s1, __p2, 3); \
25652 })
25653 #else
25654 #define vst1_lane_s64(__p0, __p1, __p2) __extension__ ({ \
25655 int64x1_t __s1 = __p1; \
25656 __builtin_neon_vst1_lane_v(__p0, (int8x8_t)__s1, __p2, 3); \
25657 })
25658 #endif
25659
25660 #ifdef __LITTLE_ENDIAN__
25661 #define vst1_lane_s16(__p0, __p1, __p2) __extension__ ({ \
25662 int16x4_t __s1 = __p1; \
25663 __builtin_neon_vst1_lane_v(__p0, (int8x8_t)__s1, __p2, 1); \
25664 })
25665 #else
25666 #define vst1_lane_s16(__p0, __p1, __p2) __extension__ ({ \
25667 int16x4_t __s1 = __p1; \
25668 int16x4_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 3, 2, 1, 0); \
25669 __builtin_neon_vst1_lane_v(__p0, (int8x8_t)__rev1, __p2, 1); \
25670 })
25671 #endif
25672
25673 #ifdef __LITTLE_ENDIAN__
25674 #define vst2_p8(__p0, __p1) __extension__ ({ \
25675 poly8x8x2_t __s1 = __p1; \
25676 __builtin_neon_vst2_v(__p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], 4); \
25677 })
25678 #else
25679 #define vst2_p8(__p0, __p1) __extension__ ({ \
25680 poly8x8x2_t __s1 = __p1; \
25681 poly8x8x2_t __rev1; \
25682 __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \
25683 __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \
25684 __builtin_neon_vst2_v(__p0, (int8x8_t)__rev1.val[0], (int8x8_t)__rev1.val[1], 4); \
25685 })
25686 #endif
25687
25688 #ifdef __LITTLE_ENDIAN__
25689 #define vst2_p16(__p0, __p1) __extension__ ({ \
25690 poly16x4x2_t __s1 = __p1; \
25691 __builtin_neon_vst2_v(__p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], 5); \
25692 })
25693 #else
25694 #define vst2_p16(__p0, __p1) __extension__ ({ \
25695 poly16x4x2_t __s1 = __p1; \
25696 poly16x4x2_t __rev1; \
25697 __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 3, 2, 1, 0); \
25698 __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 3, 2, 1, 0); \
25699 __builtin_neon_vst2_v(__p0, (int8x8_t)__rev1.val[0], (int8x8_t)__rev1.val[1], 5); \
25700 })
25701 #endif
25702
25703 #ifdef __LITTLE_ENDIAN__
25704 #define vst2q_p8(__p0, __p1) __extension__ ({ \
25705 poly8x16x2_t __s1 = __p1; \
25706 __builtin_neon_vst2q_v(__p0, (int8x16_t)__s1.val[0], (int8x16_t)__s1.val[1], 36); \
25707 })
25708 #else
25709 #define vst2q_p8(__p0, __p1) __extension__ ({ \
25710 poly8x16x2_t __s1 = __p1; \
25711 poly8x16x2_t __rev1; \
25712 __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
25713 __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
25714 __builtin_neon_vst2q_v(__p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], 36); \
25715 })
25716 #endif
25717
25718 #ifdef __LITTLE_ENDIAN__
25719 #define vst2q_p16(__p0, __p1) __extension__ ({ \
25720 poly16x8x2_t __s1 = __p1; \
25721 __builtin_neon_vst2q_v(__p0, (int8x16_t)__s1.val[0], (int8x16_t)__s1.val[1], 37); \
25722 })
25723 #else
25724 #define vst2q_p16(__p0, __p1) __extension__ ({ \
25725 poly16x8x2_t __s1 = __p1; \
25726 poly16x8x2_t __rev1; \
25727 __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \
25728 __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \
25729 __builtin_neon_vst2q_v(__p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], 37); \
25730 })
25731 #endif
25732
25733 #ifdef __LITTLE_ENDIAN__
25734 #define vst2q_u8(__p0, __p1) __extension__ ({ \
25735 uint8x16x2_t __s1 = __p1; \
25736 __builtin_neon_vst2q_v(__p0, (int8x16_t)__s1.val[0], (int8x16_t)__s1.val[1], 48); \
25737 })
25738 #else
25739 #define vst2q_u8(__p0, __p1) __extension__ ({ \
25740 uint8x16x2_t __s1 = __p1; \
25741 uint8x16x2_t __rev1; \
25742 __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
25743 __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
25744 __builtin_neon_vst2q_v(__p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], 48); \
25745 })
25746 #endif
25747
25748 #ifdef __LITTLE_ENDIAN__
25749 #define vst2q_u32(__p0, __p1) __extension__ ({ \
25750 uint32x4x2_t __s1 = __p1; \
25751 __builtin_neon_vst2q_v(__p0, (int8x16_t)__s1.val[0], (int8x16_t)__s1.val[1], 50); \
25752 })
25753 #else
25754 #define vst2q_u32(__p0, __p1) __extension__ ({ \
25755 uint32x4x2_t __s1 = __p1; \
25756 uint32x4x2_t __rev1; \
25757 __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 3, 2, 1, 0); \
25758 __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 3, 2, 1, 0); \
25759 __builtin_neon_vst2q_v(__p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], 50); \
25760 })
25761 #endif
25762
25763 #ifdef __LITTLE_ENDIAN__
25764 #define vst2q_u16(__p0, __p1) __extension__ ({ \
25765 uint16x8x2_t __s1 = __p1; \
25766 __builtin_neon_vst2q_v(__p0, (int8x16_t)__s1.val[0], (int8x16_t)__s1.val[1], 49); \
25767 })
25768 #else
25769 #define vst2q_u16(__p0, __p1) __extension__ ({ \
25770 uint16x8x2_t __s1 = __p1; \
25771 uint16x8x2_t __rev1; \
25772 __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \
25773 __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \
25774 __builtin_neon_vst2q_v(__p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], 49); \
25775 })
25776 #endif
25777
25778 #ifdef __LITTLE_ENDIAN__
25779 #define vst2q_s8(__p0, __p1) __extension__ ({ \
25780 int8x16x2_t __s1 = __p1; \
25781 __builtin_neon_vst2q_v(__p0, (int8x16_t)__s1.val[0], (int8x16_t)__s1.val[1], 32); \
25782 })
25783 #else
25784 #define vst2q_s8(__p0, __p1) __extension__ ({ \
25785 int8x16x2_t __s1 = __p1; \
25786 int8x16x2_t __rev1; \
25787 __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
25788 __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
25789 __builtin_neon_vst2q_v(__p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], 32); \
25790 })
25791 #endif
25792
25793 #ifdef __LITTLE_ENDIAN__
25794 #define vst2q_f32(__p0, __p1) __extension__ ({ \
25795 float32x4x2_t __s1 = __p1; \
25796 __builtin_neon_vst2q_v(__p0, __s1.val[0], __s1.val[1], 41); \
25797 })
25798 #else
25799 #define vst2q_f32(__p0, __p1) __extension__ ({ \
25800 float32x4x2_t __s1 = __p1; \
25801 float32x4x2_t __rev1; \
25802 __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 3, 2, 1, 0); \
25803 __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 3, 2, 1, 0); \
25804 __builtin_neon_vst2q_v(__p0, __rev1.val[0], __rev1.val[1], 41); \
25805 })
25806 #endif
25807
25808 #ifdef __LITTLE_ENDIAN__
25809 #define vst2q_f16(__p0, __p1) __extension__ ({ \
25810 float16x8x2_t __s1 = __p1; \
25811 __builtin_neon_vst2q_v(__p0, __s1.val[0], __s1.val[1], 40); \
25812 })
25813 #else
25814 #define vst2q_f16(__p0, __p1) __extension__ ({ \
25815 float16x8x2_t __s1 = __p1; \
25816 float16x8x2_t __rev1; \
25817 __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \
25818 __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \
25819 __builtin_neon_vst2q_v(__p0, __rev1.val[0], __rev1.val[1], 40); \
25820 })
25821 #endif
25822
25823 #ifdef __LITTLE_ENDIAN__
25824 #define vst2q_s32(__p0, __p1) __extension__ ({ \
25825 int32x4x2_t __s1 = __p1; \
25826 __builtin_neon_vst2q_v(__p0, __s1.val[0], __s1.val[1], 34); \
25827 })
25828 #else
25829 #define vst2q_s32(__p0, __p1) __extension__ ({ \
25830 int32x4x2_t __s1 = __p1; \
25831 int32x4x2_t __rev1; \
25832 __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 3, 2, 1, 0); \
25833 __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 3, 2, 1, 0); \
25834 __builtin_neon_vst2q_v(__p0, __rev1.val[0], __rev1.val[1], 34); \
25835 })
25836 #endif
25837
25838 #ifdef __LITTLE_ENDIAN__
25839 #define vst2q_s16(__p0, __p1) __extension__ ({ \
25840 int16x8x2_t __s1 = __p1; \
25841 __builtin_neon_vst2q_v(__p0, __s1.val[0], __s1.val[1], 33); \
25842 })
25843 #else
25844 #define vst2q_s16(__p0, __p1) __extension__ ({ \
25845 int16x8x2_t __s1 = __p1; \
25846 int16x8x2_t __rev1; \
25847 __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \
25848 __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \
25849 __builtin_neon_vst2q_v(__p0, __rev1.val[0], __rev1.val[1], 33); \
25850 })
25851 #endif
25852
25853 #ifdef __LITTLE_ENDIAN__
25854 #define vst2_u8(__p0, __p1) __extension__ ({ \
25855 uint8x8x2_t __s1 = __p1; \
25856 __builtin_neon_vst2_v(__p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], 16); \
25857 })
25858 #else
25859 #define vst2_u8(__p0, __p1) __extension__ ({ \
25860 uint8x8x2_t __s1 = __p1; \
25861 uint8x8x2_t __rev1; \
25862 __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \
25863 __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \
25864 __builtin_neon_vst2_v(__p0, (int8x8_t)__rev1.val[0], (int8x8_t)__rev1.val[1], 16); \
25865 })
25866 #endif
25867
25868 #ifdef __LITTLE_ENDIAN__
25869 #define vst2_u32(__p0, __p1) __extension__ ({ \
25870 uint32x2x2_t __s1 = __p1; \
25871 __builtin_neon_vst2_v(__p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], 18); \
25872 })
25873 #else
25874 #define vst2_u32(__p0, __p1) __extension__ ({ \
25875 uint32x2x2_t __s1 = __p1; \
25876 uint32x2x2_t __rev1; \
25877 __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 1, 0); \
25878 __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 1, 0); \
25879 __builtin_neon_vst2_v(__p0, (int8x8_t)__rev1.val[0], (int8x8_t)__rev1.val[1], 18); \
25880 })
25881 #endif
25882
25883 #ifdef __LITTLE_ENDIAN__
25884 #define vst2_u64(__p0, __p1) __extension__ ({ \
25885 uint64x1x2_t __s1 = __p1; \
25886 __builtin_neon_vst2_v(__p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], 19); \
25887 })
25888 #else
25889 #define vst2_u64(__p0, __p1) __extension__ ({ \
25890 uint64x1x2_t __s1 = __p1; \
25891 __builtin_neon_vst2_v(__p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], 19); \
25892 })
25893 #endif
25894
25895 #ifdef __LITTLE_ENDIAN__
25896 #define vst2_u16(__p0, __p1) __extension__ ({ \
25897 uint16x4x2_t __s1 = __p1; \
25898 __builtin_neon_vst2_v(__p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], 17); \
25899 })
25900 #else
25901 #define vst2_u16(__p0, __p1) __extension__ ({ \
25902 uint16x4x2_t __s1 = __p1; \
25903 uint16x4x2_t __rev1; \
25904 __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 3, 2, 1, 0); \
25905 __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 3, 2, 1, 0); \
25906 __builtin_neon_vst2_v(__p0, (int8x8_t)__rev1.val[0], (int8x8_t)__rev1.val[1], 17); \
25907 })
25908 #endif
25909
25910 #ifdef __LITTLE_ENDIAN__
25911 #define vst2_s8(__p0, __p1) __extension__ ({ \
25912 int8x8x2_t __s1 = __p1; \
25913 __builtin_neon_vst2_v(__p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], 0); \
25914 })
25915 #else
25916 #define vst2_s8(__p0, __p1) __extension__ ({ \
25917 int8x8x2_t __s1 = __p1; \
25918 int8x8x2_t __rev1; \
25919 __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \
25920 __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \
25921 __builtin_neon_vst2_v(__p0, (int8x8_t)__rev1.val[0], (int8x8_t)__rev1.val[1], 0); \
25922 })
25923 #endif
25924
25925 #ifdef __LITTLE_ENDIAN__
25926 #define vst2_f32(__p0, __p1) __extension__ ({ \
25927 float32x2x2_t __s1 = __p1; \
25928 __builtin_neon_vst2_v(__p0, __s1.val[0], __s1.val[1], 9); \
25929 })
25930 #else
25931 #define vst2_f32(__p0, __p1) __extension__ ({ \
25932 float32x2x2_t __s1 = __p1; \
25933 float32x2x2_t __rev1; \
25934 __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 1, 0); \
25935 __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 1, 0); \
25936 __builtin_neon_vst2_v(__p0, __rev1.val[0], __rev1.val[1], 9); \
25937 })
25938 #endif
25939
25940 #ifdef __LITTLE_ENDIAN__
25941 #define vst2_f16(__p0, __p1) __extension__ ({ \
25942 float16x4x2_t __s1 = __p1; \
25943 __builtin_neon_vst2_v(__p0, __s1.val[0], __s1.val[1], 8); \
25944 })
25945 #else
25946 #define vst2_f16(__p0, __p1) __extension__ ({ \
25947 float16x4x2_t __s1 = __p1; \
25948 float16x4x2_t __rev1; \
25949 __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 3, 2, 1, 0); \
25950 __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 3, 2, 1, 0); \
25951 __builtin_neon_vst2_v(__p0, __rev1.val[0], __rev1.val[1], 8); \
25952 })
25953 #endif
25954
25955 #ifdef __LITTLE_ENDIAN__
25956 #define vst2_s32(__p0, __p1) __extension__ ({ \
25957 int32x2x2_t __s1 = __p1; \
25958 __builtin_neon_vst2_v(__p0, __s1.val[0], __s1.val[1], 2); \
25959 })
25960 #else
25961 #define vst2_s32(__p0, __p1) __extension__ ({ \
25962 int32x2x2_t __s1 = __p1; \
25963 int32x2x2_t __rev1; \
25964 __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 1, 0); \
25965 __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 1, 0); \
25966 __builtin_neon_vst2_v(__p0, __rev1.val[0], __rev1.val[1], 2); \
25967 })
25968 #endif
25969
25970 #ifdef __LITTLE_ENDIAN__
25971 #define vst2_s64(__p0, __p1) __extension__ ({ \
25972 int64x1x2_t __s1 = __p1; \
25973 __builtin_neon_vst2_v(__p0, __s1.val[0], __s1.val[1], 3); \
25974 })
25975 #else
25976 #define vst2_s64(__p0, __p1) __extension__ ({ \
25977 int64x1x2_t __s1 = __p1; \
25978 __builtin_neon_vst2_v(__p0, __s1.val[0], __s1.val[1], 3); \
25979 })
25980 #endif
25981
25982 #ifdef __LITTLE_ENDIAN__
25983 #define vst2_s16(__p0, __p1) __extension__ ({ \
25984 int16x4x2_t __s1 = __p1; \
25985 __builtin_neon_vst2_v(__p0, __s1.val[0], __s1.val[1], 1); \
25986 })
25987 #else
25988 #define vst2_s16(__p0, __p1) __extension__ ({ \
25989 int16x4x2_t __s1 = __p1; \
25990 int16x4x2_t __rev1; \
25991 __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 3, 2, 1, 0); \
25992 __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 3, 2, 1, 0); \
25993 __builtin_neon_vst2_v(__p0, __rev1.val[0], __rev1.val[1], 1); \
25994 })
25995 #endif
25996
25997 #ifdef __LITTLE_ENDIAN__
25998 #define vst2_lane_p8(__p0, __p1, __p2) __extension__ ({ \
25999 poly8x8x2_t __s1 = __p1; \
26000 __builtin_neon_vst2_lane_v(__p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], __p2, 4); \
26001 })
26002 #else
26003 #define vst2_lane_p8(__p0, __p1, __p2) __extension__ ({ \
26004 poly8x8x2_t __s1 = __p1; \
26005 poly8x8x2_t __rev1; \
26006 __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \
26007 __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \
26008 __builtin_neon_vst2_lane_v(__p0, (int8x8_t)__rev1.val[0], (int8x8_t)__rev1.val[1], __p2, 4); \
26009 })
26010 #endif
26011
26012 #ifdef __LITTLE_ENDIAN__
26013 #define vst2_lane_p16(__p0, __p1, __p2) __extension__ ({ \
26014 poly16x4x2_t __s1 = __p1; \
26015 __builtin_neon_vst2_lane_v(__p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], __p2, 5); \
26016 })
26017 #else
26018 #define vst2_lane_p16(__p0, __p1, __p2) __extension__ ({ \
26019 poly16x4x2_t __s1 = __p1; \
26020 poly16x4x2_t __rev1; \
26021 __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 3, 2, 1, 0); \
26022 __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 3, 2, 1, 0); \
26023 __builtin_neon_vst2_lane_v(__p0, (int8x8_t)__rev1.val[0], (int8x8_t)__rev1.val[1], __p2, 5); \
26024 })
26025 #endif
26026
26027 #ifdef __LITTLE_ENDIAN__
26028 #define vst2q_lane_p16(__p0, __p1, __p2) __extension__ ({ \
26029 poly16x8x2_t __s1 = __p1; \
26030 __builtin_neon_vst2q_lane_v(__p0, (int8x16_t)__s1.val[0], (int8x16_t)__s1.val[1], __p2, 37); \
26031 })
26032 #else
26033 #define vst2q_lane_p16(__p0, __p1, __p2) __extension__ ({ \
26034 poly16x8x2_t __s1 = __p1; \
26035 poly16x8x2_t __rev1; \
26036 __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \
26037 __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \
26038 __builtin_neon_vst2q_lane_v(__p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], __p2, 37); \
26039 })
26040 #endif
26041
26042 #ifdef __LITTLE_ENDIAN__
26043 #define vst2q_lane_u32(__p0, __p1, __p2) __extension__ ({ \
26044 uint32x4x2_t __s1 = __p1; \
26045 __builtin_neon_vst2q_lane_v(__p0, (int8x16_t)__s1.val[0], (int8x16_t)__s1.val[1], __p2, 50); \
26046 })
26047 #else
26048 #define vst2q_lane_u32(__p0, __p1, __p2) __extension__ ({ \
26049 uint32x4x2_t __s1 = __p1; \
26050 uint32x4x2_t __rev1; \
26051 __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 3, 2, 1, 0); \
26052 __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 3, 2, 1, 0); \
26053 __builtin_neon_vst2q_lane_v(__p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], __p2, 50); \
26054 })
26055 #endif
26056
26057 #ifdef __LITTLE_ENDIAN__
26058 #define vst2q_lane_u16(__p0, __p1, __p2) __extension__ ({ \
26059 uint16x8x2_t __s1 = __p1; \
26060 __builtin_neon_vst2q_lane_v(__p0, (int8x16_t)__s1.val[0], (int8x16_t)__s1.val[1], __p2, 49); \
26061 })
26062 #else
26063 #define vst2q_lane_u16(__p0, __p1, __p2) __extension__ ({ \
26064 uint16x8x2_t __s1 = __p1; \
26065 uint16x8x2_t __rev1; \
26066 __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \
26067 __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \
26068 __builtin_neon_vst2q_lane_v(__p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], __p2, 49); \
26069 })
26070 #endif
26071
26072 #ifdef __LITTLE_ENDIAN__
26073 #define vst2q_lane_f32(__p0, __p1, __p2) __extension__ ({ \
26074 float32x4x2_t __s1 = __p1; \
26075 __builtin_neon_vst2q_lane_v(__p0, __s1.val[0], __s1.val[1], __p2, 41); \
26076 })
26077 #else
26078 #define vst2q_lane_f32(__p0, __p1, __p2) __extension__ ({ \
26079 float32x4x2_t __s1 = __p1; \
26080 float32x4x2_t __rev1; \
26081 __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 3, 2, 1, 0); \
26082 __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 3, 2, 1, 0); \
26083 __builtin_neon_vst2q_lane_v(__p0, __rev1.val[0], __rev1.val[1], __p2, 41); \
26084 })
26085 #endif
26086
26087 #ifdef __LITTLE_ENDIAN__
26088 #define vst2q_lane_f16(__p0, __p1, __p2) __extension__ ({ \
26089 float16x8x2_t __s1 = __p1; \
26090 __builtin_neon_vst2q_lane_v(__p0, __s1.val[0], __s1.val[1], __p2, 40); \
26091 })
26092 #else
26093 #define vst2q_lane_f16(__p0, __p1, __p2) __extension__ ({ \
26094 float16x8x2_t __s1 = __p1; \
26095 float16x8x2_t __rev1; \
26096 __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \
26097 __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \
26098 __builtin_neon_vst2q_lane_v(__p0, __rev1.val[0], __rev1.val[1], __p2, 40); \
26099 })
26100 #endif
26101
26102 #ifdef __LITTLE_ENDIAN__
26103 #define vst2q_lane_s32(__p0, __p1, __p2) __extension__ ({ \
26104 int32x4x2_t __s1 = __p1; \
26105 __builtin_neon_vst2q_lane_v(__p0, __s1.val[0], __s1.val[1], __p2, 34); \
26106 })
26107 #else
26108 #define vst2q_lane_s32(__p0, __p1, __p2) __extension__ ({ \
26109 int32x4x2_t __s1 = __p1; \
26110 int32x4x2_t __rev1; \
26111 __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 3, 2, 1, 0); \
26112 __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 3, 2, 1, 0); \
26113 __builtin_neon_vst2q_lane_v(__p0, __rev1.val[0], __rev1.val[1], __p2, 34); \
26114 })
26115 #endif
26116
26117 #ifdef __LITTLE_ENDIAN__
26118 #define vst2q_lane_s16(__p0, __p1, __p2) __extension__ ({ \
26119 int16x8x2_t __s1 = __p1; \
26120 __builtin_neon_vst2q_lane_v(__p0, __s1.val[0], __s1.val[1], __p2, 33); \
26121 })
26122 #else
26123 #define vst2q_lane_s16(__p0, __p1, __p2) __extension__ ({ \
26124 int16x8x2_t __s1 = __p1; \
26125 int16x8x2_t __rev1; \
26126 __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \
26127 __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \
26128 __builtin_neon_vst2q_lane_v(__p0, __rev1.val[0], __rev1.val[1], __p2, 33); \
26129 })
26130 #endif
26131
26132 #ifdef __LITTLE_ENDIAN__
26133 #define vst2_lane_u8(__p0, __p1, __p2) __extension__ ({ \
26134 uint8x8x2_t __s1 = __p1; \
26135 __builtin_neon_vst2_lane_v(__p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], __p2, 16); \
26136 })
26137 #else
26138 #define vst2_lane_u8(__p0, __p1, __p2) __extension__ ({ \
26139 uint8x8x2_t __s1 = __p1; \
26140 uint8x8x2_t __rev1; \
26141 __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \
26142 __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \
26143 __builtin_neon_vst2_lane_v(__p0, (int8x8_t)__rev1.val[0], (int8x8_t)__rev1.val[1], __p2, 16); \
26144 })
26145 #endif
26146
26147 #ifdef __LITTLE_ENDIAN__
26148 #define vst2_lane_u32(__p0, __p1, __p2) __extension__ ({ \
26149 uint32x2x2_t __s1 = __p1; \
26150 __builtin_neon_vst2_lane_v(__p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], __p2, 18); \
26151 })
26152 #else
26153 #define vst2_lane_u32(__p0, __p1, __p2) __extension__ ({ \
26154 uint32x2x2_t __s1 = __p1; \
26155 uint32x2x2_t __rev1; \
26156 __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 1, 0); \
26157 __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 1, 0); \
26158 __builtin_neon_vst2_lane_v(__p0, (int8x8_t)__rev1.val[0], (int8x8_t)__rev1.val[1], __p2, 18); \
26159 })
26160 #endif
26161
26162 #ifdef __LITTLE_ENDIAN__
26163 #define vst2_lane_u16(__p0, __p1, __p2) __extension__ ({ \
26164 uint16x4x2_t __s1 = __p1; \
26165 __builtin_neon_vst2_lane_v(__p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], __p2, 17); \
26166 })
26167 #else
26168 #define vst2_lane_u16(__p0, __p1, __p2) __extension__ ({ \
26169 uint16x4x2_t __s1 = __p1; \
26170 uint16x4x2_t __rev1; \
26171 __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 3, 2, 1, 0); \
26172 __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 3, 2, 1, 0); \
26173 __builtin_neon_vst2_lane_v(__p0, (int8x8_t)__rev1.val[0], (int8x8_t)__rev1.val[1], __p2, 17); \
26174 })
26175 #endif
26176
26177 #ifdef __LITTLE_ENDIAN__
26178 #define vst2_lane_s8(__p0, __p1, __p2) __extension__ ({ \
26179 int8x8x2_t __s1 = __p1; \
26180 __builtin_neon_vst2_lane_v(__p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], __p2, 0); \
26181 })
26182 #else
26183 #define vst2_lane_s8(__p0, __p1, __p2) __extension__ ({ \
26184 int8x8x2_t __s1 = __p1; \
26185 int8x8x2_t __rev1; \
26186 __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \
26187 __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \
26188 __builtin_neon_vst2_lane_v(__p0, (int8x8_t)__rev1.val[0], (int8x8_t)__rev1.val[1], __p2, 0); \
26189 })
26190 #endif
26191
26192 #ifdef __LITTLE_ENDIAN__
26193 #define vst2_lane_f32(__p0, __p1, __p2) __extension__ ({ \
26194 float32x2x2_t __s1 = __p1; \
26195 __builtin_neon_vst2_lane_v(__p0, __s1.val[0], __s1.val[1], __p2, 9); \
26196 })
26197 #else
26198 #define vst2_lane_f32(__p0, __p1, __p2) __extension__ ({ \
26199 float32x2x2_t __s1 = __p1; \
26200 float32x2x2_t __rev1; \
26201 __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 1, 0); \
26202 __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 1, 0); \
26203 __builtin_neon_vst2_lane_v(__p0, __rev1.val[0], __rev1.val[1], __p2, 9); \
26204 })
26205 #endif
26206
26207 #ifdef __LITTLE_ENDIAN__
26208 #define vst2_lane_f16(__p0, __p1, __p2) __extension__ ({ \
26209 float16x4x2_t __s1 = __p1; \
26210 __builtin_neon_vst2_lane_v(__p0, __s1.val[0], __s1.val[1], __p2, 8); \
26211 })
26212 #else
26213 #define vst2_lane_f16(__p0, __p1, __p2) __extension__ ({ \
26214 float16x4x2_t __s1 = __p1; \
26215 float16x4x2_t __rev1; \
26216 __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 3, 2, 1, 0); \
26217 __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 3, 2, 1, 0); \
26218 __builtin_neon_vst2_lane_v(__p0, __rev1.val[0], __rev1.val[1], __p2, 8); \
26219 })
26220 #endif
26221
26222 #ifdef __LITTLE_ENDIAN__
26223 #define vst2_lane_s32(__p0, __p1, __p2) __extension__ ({ \
26224 int32x2x2_t __s1 = __p1; \
26225 __builtin_neon_vst2_lane_v(__p0, __s1.val[0], __s1.val[1], __p2, 2); \
26226 })
26227 #else
26228 #define vst2_lane_s32(__p0, __p1, __p2) __extension__ ({ \
26229 int32x2x2_t __s1 = __p1; \
26230 int32x2x2_t __rev1; \
26231 __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 1, 0); \
26232 __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 1, 0); \
26233 __builtin_neon_vst2_lane_v(__p0, __rev1.val[0], __rev1.val[1], __p2, 2); \
26234 })
26235 #endif
26236
26237 #ifdef __LITTLE_ENDIAN__
26238 #define vst2_lane_s16(__p0, __p1, __p2) __extension__ ({ \
26239 int16x4x2_t __s1 = __p1; \
26240 __builtin_neon_vst2_lane_v(__p0, __s1.val[0], __s1.val[1], __p2, 1); \
26241 })
26242 #else
26243 #define vst2_lane_s16(__p0, __p1, __p2) __extension__ ({ \
26244 int16x4x2_t __s1 = __p1; \
26245 int16x4x2_t __rev1; \
26246 __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 3, 2, 1, 0); \
26247 __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 3, 2, 1, 0); \
26248 __builtin_neon_vst2_lane_v(__p0, __rev1.val[0], __rev1.val[1], __p2, 1); \
26249 })
26250 #endif
26251
26252 #ifdef __LITTLE_ENDIAN__
26253 #define vst3_p8(__p0, __p1) __extension__ ({ \
26254 poly8x8x3_t __s1 = __p1; \
26255 __builtin_neon_vst3_v(__p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], (int8x8_t)__s1.val[2], 4); \
26256 })
26257 #else
26258 #define vst3_p8(__p0, __p1) __extension__ ({ \
26259 poly8x8x3_t __s1 = __p1; \
26260 poly8x8x3_t __rev1; \
26261 __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \
26262 __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \
26263 __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 7, 6, 5, 4, 3, 2, 1, 0); \
26264 __builtin_neon_vst3_v(__p0, (int8x8_t)__rev1.val[0], (int8x8_t)__rev1.val[1], (int8x8_t)__rev1.val[2], 4); \
26265 })
26266 #endif
26267
26268 #ifdef __LITTLE_ENDIAN__
26269 #define vst3_p16(__p0, __p1) __extension__ ({ \
26270 poly16x4x3_t __s1 = __p1; \
26271 __builtin_neon_vst3_v(__p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], (int8x8_t)__s1.val[2], 5); \
26272 })
26273 #else
26274 #define vst3_p16(__p0, __p1) __extension__ ({ \
26275 poly16x4x3_t __s1 = __p1; \
26276 poly16x4x3_t __rev1; \
26277 __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 3, 2, 1, 0); \
26278 __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 3, 2, 1, 0); \
26279 __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 3, 2, 1, 0); \
26280 __builtin_neon_vst3_v(__p0, (int8x8_t)__rev1.val[0], (int8x8_t)__rev1.val[1], (int8x8_t)__rev1.val[2], 5); \
26281 })
26282 #endif
26283
26284 #ifdef __LITTLE_ENDIAN__
26285 #define vst3q_p8(__p0, __p1) __extension__ ({ \
26286 poly8x16x3_t __s1 = __p1; \
26287 __builtin_neon_vst3q_v(__p0, (int8x16_t)__s1.val[0], (int8x16_t)__s1.val[1], (int8x16_t)__s1.val[2], 36); \
26288 })
26289 #else
26290 #define vst3q_p8(__p0, __p1) __extension__ ({ \
26291 poly8x16x3_t __s1 = __p1; \
26292 poly8x16x3_t __rev1; \
26293 __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
26294 __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
26295 __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
26296 __builtin_neon_vst3q_v(__p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], (int8x16_t)__rev1.val[2], 36); \
26297 })
26298 #endif
26299
26300 #ifdef __LITTLE_ENDIAN__
26301 #define vst3q_p16(__p0, __p1) __extension__ ({ \
26302 poly16x8x3_t __s1 = __p1; \
26303 __builtin_neon_vst3q_v(__p0, (int8x16_t)__s1.val[0], (int8x16_t)__s1.val[1], (int8x16_t)__s1.val[2], 37); \
26304 })
26305 #else
26306 #define vst3q_p16(__p0, __p1) __extension__ ({ \
26307 poly16x8x3_t __s1 = __p1; \
26308 poly16x8x3_t __rev1; \
26309 __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \
26310 __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \
26311 __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 7, 6, 5, 4, 3, 2, 1, 0); \
26312 __builtin_neon_vst3q_v(__p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], (int8x16_t)__rev1.val[2], 37); \
26313 })
26314 #endif
26315
26316 #ifdef __LITTLE_ENDIAN__
26317 #define vst3q_u8(__p0, __p1) __extension__ ({ \
26318 uint8x16x3_t __s1 = __p1; \
26319 __builtin_neon_vst3q_v(__p0, (int8x16_t)__s1.val[0], (int8x16_t)__s1.val[1], (int8x16_t)__s1.val[2], 48); \
26320 })
26321 #else
26322 #define vst3q_u8(__p0, __p1) __extension__ ({ \
26323 uint8x16x3_t __s1 = __p1; \
26324 uint8x16x3_t __rev1; \
26325 __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
26326 __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
26327 __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
26328 __builtin_neon_vst3q_v(__p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], (int8x16_t)__rev1.val[2], 48); \
26329 })
26330 #endif
26331
26332 #ifdef __LITTLE_ENDIAN__
26333 #define vst3q_u32(__p0, __p1) __extension__ ({ \
26334 uint32x4x3_t __s1 = __p1; \
26335 __builtin_neon_vst3q_v(__p0, (int8x16_t)__s1.val[0], (int8x16_t)__s1.val[1], (int8x16_t)__s1.val[2], 50); \
26336 })
26337 #else
26338 #define vst3q_u32(__p0, __p1) __extension__ ({ \
26339 uint32x4x3_t __s1 = __p1; \
26340 uint32x4x3_t __rev1; \
26341 __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 3, 2, 1, 0); \
26342 __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 3, 2, 1, 0); \
26343 __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 3, 2, 1, 0); \
26344 __builtin_neon_vst3q_v(__p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], (int8x16_t)__rev1.val[2], 50); \
26345 })
26346 #endif
26347
26348 #ifdef __LITTLE_ENDIAN__
26349 #define vst3q_u16(__p0, __p1) __extension__ ({ \
26350 uint16x8x3_t __s1 = __p1; \
26351 __builtin_neon_vst3q_v(__p0, (int8x16_t)__s1.val[0], (int8x16_t)__s1.val[1], (int8x16_t)__s1.val[2], 49); \
26352 })
26353 #else
26354 #define vst3q_u16(__p0, __p1) __extension__ ({ \
26355 uint16x8x3_t __s1 = __p1; \
26356 uint16x8x3_t __rev1; \
26357 __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \
26358 __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \
26359 __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 7, 6, 5, 4, 3, 2, 1, 0); \
26360 __builtin_neon_vst3q_v(__p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], (int8x16_t)__rev1.val[2], 49); \
26361 })
26362 #endif
26363
26364 #ifdef __LITTLE_ENDIAN__
26365 #define vst3q_s8(__p0, __p1) __extension__ ({ \
26366 int8x16x3_t __s1 = __p1; \
26367 __builtin_neon_vst3q_v(__p0, (int8x16_t)__s1.val[0], (int8x16_t)__s1.val[1], (int8x16_t)__s1.val[2], 32); \
26368 })
26369 #else
26370 #define vst3q_s8(__p0, __p1) __extension__ ({ \
26371 int8x16x3_t __s1 = __p1; \
26372 int8x16x3_t __rev1; \
26373 __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
26374 __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
26375 __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
26376 __builtin_neon_vst3q_v(__p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], (int8x16_t)__rev1.val[2], 32); \
26377 })
26378 #endif
26379
26380 #ifdef __LITTLE_ENDIAN__
26381 #define vst3q_f32(__p0, __p1) __extension__ ({ \
26382 float32x4x3_t __s1 = __p1; \
26383 __builtin_neon_vst3q_v(__p0, __s1.val[0], __s1.val[1], __s1.val[2], 41); \
26384 })
26385 #else
26386 #define vst3q_f32(__p0, __p1) __extension__ ({ \
26387 float32x4x3_t __s1 = __p1; \
26388 float32x4x3_t __rev1; \
26389 __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 3, 2, 1, 0); \
26390 __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 3, 2, 1, 0); \
26391 __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 3, 2, 1, 0); \
26392 __builtin_neon_vst3q_v(__p0, __rev1.val[0], __rev1.val[1], __rev1.val[2], 41); \
26393 })
26394 #endif
26395
26396 #ifdef __LITTLE_ENDIAN__
26397 #define vst3q_f16(__p0, __p1) __extension__ ({ \
26398 float16x8x3_t __s1 = __p1; \
26399 __builtin_neon_vst3q_v(__p0, __s1.val[0], __s1.val[1], __s1.val[2], 40); \
26400 })
26401 #else
26402 #define vst3q_f16(__p0, __p1) __extension__ ({ \
26403 float16x8x3_t __s1 = __p1; \
26404 float16x8x3_t __rev1; \
26405 __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \
26406 __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \
26407 __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 7, 6, 5, 4, 3, 2, 1, 0); \
26408 __builtin_neon_vst3q_v(__p0, __rev1.val[0], __rev1.val[1], __rev1.val[2], 40); \
26409 })
26410 #endif
26411
26412 #ifdef __LITTLE_ENDIAN__
26413 #define vst3q_s32(__p0, __p1) __extension__ ({ \
26414 int32x4x3_t __s1 = __p1; \
26415 __builtin_neon_vst3q_v(__p0, __s1.val[0], __s1.val[1], __s1.val[2], 34); \
26416 })
26417 #else
26418 #define vst3q_s32(__p0, __p1) __extension__ ({ \
26419 int32x4x3_t __s1 = __p1; \
26420 int32x4x3_t __rev1; \
26421 __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 3, 2, 1, 0); \
26422 __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 3, 2, 1, 0); \
26423 __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 3, 2, 1, 0); \
26424 __builtin_neon_vst3q_v(__p0, __rev1.val[0], __rev1.val[1], __rev1.val[2], 34); \
26425 })
26426 #endif
26427
26428 #ifdef __LITTLE_ENDIAN__
26429 #define vst3q_s16(__p0, __p1) __extension__ ({ \
26430 int16x8x3_t __s1 = __p1; \
26431 __builtin_neon_vst3q_v(__p0, __s1.val[0], __s1.val[1], __s1.val[2], 33); \
26432 })
26433 #else
26434 #define vst3q_s16(__p0, __p1) __extension__ ({ \
26435 int16x8x3_t __s1 = __p1; \
26436 int16x8x3_t __rev1; \
26437 __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \
26438 __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \
26439 __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 7, 6, 5, 4, 3, 2, 1, 0); \
26440 __builtin_neon_vst3q_v(__p0, __rev1.val[0], __rev1.val[1], __rev1.val[2], 33); \
26441 })
26442 #endif
26443
26444 #ifdef __LITTLE_ENDIAN__
26445 #define vst3_u8(__p0, __p1) __extension__ ({ \
26446 uint8x8x3_t __s1 = __p1; \
26447 __builtin_neon_vst3_v(__p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], (int8x8_t)__s1.val[2], 16); \
26448 })
26449 #else
26450 #define vst3_u8(__p0, __p1) __extension__ ({ \
26451 uint8x8x3_t __s1 = __p1; \
26452 uint8x8x3_t __rev1; \
26453 __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \
26454 __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \
26455 __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 7, 6, 5, 4, 3, 2, 1, 0); \
26456 __builtin_neon_vst3_v(__p0, (int8x8_t)__rev1.val[0], (int8x8_t)__rev1.val[1], (int8x8_t)__rev1.val[2], 16); \
26457 })
26458 #endif
26459
26460 #ifdef __LITTLE_ENDIAN__
26461 #define vst3_u32(__p0, __p1) __extension__ ({ \
26462 uint32x2x3_t __s1 = __p1; \
26463 __builtin_neon_vst3_v(__p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], (int8x8_t)__s1.val[2], 18); \
26464 })
26465 #else
26466 #define vst3_u32(__p0, __p1) __extension__ ({ \
26467 uint32x2x3_t __s1 = __p1; \
26468 uint32x2x3_t __rev1; \
26469 __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 1, 0); \
26470 __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 1, 0); \
26471 __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 1, 0); \
26472 __builtin_neon_vst3_v(__p0, (int8x8_t)__rev1.val[0], (int8x8_t)__rev1.val[1], (int8x8_t)__rev1.val[2], 18); \
26473 })
26474 #endif
26475
26476 #ifdef __LITTLE_ENDIAN__
26477 #define vst3_u64(__p0, __p1) __extension__ ({ \
26478 uint64x1x3_t __s1 = __p1; \
26479 __builtin_neon_vst3_v(__p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], (int8x8_t)__s1.val[2], 19); \
26480 })
26481 #else
26482 #define vst3_u64(__p0, __p1) __extension__ ({ \
26483 uint64x1x3_t __s1 = __p1; \
26484 __builtin_neon_vst3_v(__p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], (int8x8_t)__s1.val[2], 19); \
26485 })
26486 #endif
26487
26488 #ifdef __LITTLE_ENDIAN__
26489 #define vst3_u16(__p0, __p1) __extension__ ({ \
26490 uint16x4x3_t __s1 = __p1; \
26491 __builtin_neon_vst3_v(__p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], (int8x8_t)__s1.val[2], 17); \
26492 })
26493 #else
26494 #define vst3_u16(__p0, __p1) __extension__ ({ \
26495 uint16x4x3_t __s1 = __p1; \
26496 uint16x4x3_t __rev1; \
26497 __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 3, 2, 1, 0); \
26498 __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 3, 2, 1, 0); \
26499 __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 3, 2, 1, 0); \
26500 __builtin_neon_vst3_v(__p0, (int8x8_t)__rev1.val[0], (int8x8_t)__rev1.val[1], (int8x8_t)__rev1.val[2], 17); \
26501 })
26502 #endif
26503
26504 #ifdef __LITTLE_ENDIAN__
26505 #define vst3_s8(__p0, __p1) __extension__ ({ \
26506 int8x8x3_t __s1 = __p1; \
26507 __builtin_neon_vst3_v(__p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], (int8x8_t)__s1.val[2], 0); \
26508 })
26509 #else
26510 #define vst3_s8(__p0, __p1) __extension__ ({ \
26511 int8x8x3_t __s1 = __p1; \
26512 int8x8x3_t __rev1; \
26513 __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \
26514 __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \
26515 __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 7, 6, 5, 4, 3, 2, 1, 0); \
26516 __builtin_neon_vst3_v(__p0, (int8x8_t)__rev1.val[0], (int8x8_t)__rev1.val[1], (int8x8_t)__rev1.val[2], 0); \
26517 })
26518 #endif
26519
26520 #ifdef __LITTLE_ENDIAN__
26521 #define vst3_f32(__p0, __p1) __extension__ ({ \
26522 float32x2x3_t __s1 = __p1; \
26523 __builtin_neon_vst3_v(__p0, __s1.val[0], __s1.val[1], __s1.val[2], 9); \
26524 })
26525 #else
26526 #define vst3_f32(__p0, __p1) __extension__ ({ \
26527 float32x2x3_t __s1 = __p1; \
26528 float32x2x3_t __rev1; \
26529 __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 1, 0); \
26530 __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 1, 0); \
26531 __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 1, 0); \
26532 __builtin_neon_vst3_v(__p0, __rev1.val[0], __rev1.val[1], __rev1.val[2], 9); \
26533 })
26534 #endif
26535
26536 #ifdef __LITTLE_ENDIAN__
26537 #define vst3_f16(__p0, __p1) __extension__ ({ \
26538 float16x4x3_t __s1 = __p1; \
26539 __builtin_neon_vst3_v(__p0, __s1.val[0], __s1.val[1], __s1.val[2], 8); \
26540 })
26541 #else
26542 #define vst3_f16(__p0, __p1) __extension__ ({ \
26543 float16x4x3_t __s1 = __p1; \
26544 float16x4x3_t __rev1; \
26545 __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 3, 2, 1, 0); \
26546 __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 3, 2, 1, 0); \
26547 __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 3, 2, 1, 0); \
26548 __builtin_neon_vst3_v(__p0, __rev1.val[0], __rev1.val[1], __rev1.val[2], 8); \
26549 })
26550 #endif
26551
26552 #ifdef __LITTLE_ENDIAN__
26553 #define vst3_s32(__p0, __p1) __extension__ ({ \
26554 int32x2x3_t __s1 = __p1; \
26555 __builtin_neon_vst3_v(__p0, __s1.val[0], __s1.val[1], __s1.val[2], 2); \
26556 })
26557 #else
26558 #define vst3_s32(__p0, __p1) __extension__ ({ \
26559 int32x2x3_t __s1 = __p1; \
26560 int32x2x3_t __rev1; \
26561 __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 1, 0); \
26562 __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 1, 0); \
26563 __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 1, 0); \
26564 __builtin_neon_vst3_v(__p0, __rev1.val[0], __rev1.val[1], __rev1.val[2], 2); \
26565 })
26566 #endif
26567
26568 #ifdef __LITTLE_ENDIAN__
26569 #define vst3_s64(__p0, __p1) __extension__ ({ \
26570 int64x1x3_t __s1 = __p1; \
26571 __builtin_neon_vst3_v(__p0, __s1.val[0], __s1.val[1], __s1.val[2], 3); \
26572 })
26573 #else
26574 #define vst3_s64(__p0, __p1) __extension__ ({ \
26575 int64x1x3_t __s1 = __p1; \
26576 __builtin_neon_vst3_v(__p0, __s1.val[0], __s1.val[1], __s1.val[2], 3); \
26577 })
26578 #endif
26579
26580 #ifdef __LITTLE_ENDIAN__
26581 #define vst3_s16(__p0, __p1) __extension__ ({ \
26582 int16x4x3_t __s1 = __p1; \
26583 __builtin_neon_vst3_v(__p0, __s1.val[0], __s1.val[1], __s1.val[2], 1); \
26584 })
26585 #else
26586 #define vst3_s16(__p0, __p1) __extension__ ({ \
26587 int16x4x3_t __s1 = __p1; \
26588 int16x4x3_t __rev1; \
26589 __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 3, 2, 1, 0); \
26590 __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 3, 2, 1, 0); \
26591 __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 3, 2, 1, 0); \
26592 __builtin_neon_vst3_v(__p0, __rev1.val[0], __rev1.val[1], __rev1.val[2], 1); \
26593 })
26594 #endif
26595
26596 #ifdef __LITTLE_ENDIAN__
26597 #define vst3_lane_p8(__p0, __p1, __p2) __extension__ ({ \
26598 poly8x8x3_t __s1 = __p1; \
26599 __builtin_neon_vst3_lane_v(__p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], (int8x8_t)__s1.val[2], __p2, 4); \
26600 })
26601 #else
26602 #define vst3_lane_p8(__p0, __p1, __p2) __extension__ ({ \
26603 poly8x8x3_t __s1 = __p1; \
26604 poly8x8x3_t __rev1; \
26605 __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \
26606 __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \
26607 __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 7, 6, 5, 4, 3, 2, 1, 0); \
26608 __builtin_neon_vst3_lane_v(__p0, (int8x8_t)__rev1.val[0], (int8x8_t)__rev1.val[1], (int8x8_t)__rev1.val[2], __p2, 4); \
26609 })
26610 #endif
26611
26612 #ifdef __LITTLE_ENDIAN__
26613 #define vst3_lane_p16(__p0, __p1, __p2) __extension__ ({ \
26614 poly16x4x3_t __s1 = __p1; \
26615 __builtin_neon_vst3_lane_v(__p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], (int8x8_t)__s1.val[2], __p2, 5); \
26616 })
26617 #else
26618 #define vst3_lane_p16(__p0, __p1, __p2) __extension__ ({ \
26619 poly16x4x3_t __s1 = __p1; \
26620 poly16x4x3_t __rev1; \
26621 __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 3, 2, 1, 0); \
26622 __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 3, 2, 1, 0); \
26623 __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 3, 2, 1, 0); \
26624 __builtin_neon_vst3_lane_v(__p0, (int8x8_t)__rev1.val[0], (int8x8_t)__rev1.val[1], (int8x8_t)__rev1.val[2], __p2, 5); \
26625 })
26626 #endif
26627
26628 #ifdef __LITTLE_ENDIAN__
26629 #define vst3q_lane_p16(__p0, __p1, __p2) __extension__ ({ \
26630 poly16x8x3_t __s1 = __p1; \
26631 __builtin_neon_vst3q_lane_v(__p0, (int8x16_t)__s1.val[0], (int8x16_t)__s1.val[1], (int8x16_t)__s1.val[2], __p2, 37); \
26632 })
26633 #else
26634 #define vst3q_lane_p16(__p0, __p1, __p2) __extension__ ({ \
26635 poly16x8x3_t __s1 = __p1; \
26636 poly16x8x3_t __rev1; \
26637 __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \
26638 __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \
26639 __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 7, 6, 5, 4, 3, 2, 1, 0); \
26640 __builtin_neon_vst3q_lane_v(__p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], (int8x16_t)__rev1.val[2], __p2, 37); \
26641 })
26642 #endif
26643
26644 #ifdef __LITTLE_ENDIAN__
26645 #define vst3q_lane_u32(__p0, __p1, __p2) __extension__ ({ \
26646 uint32x4x3_t __s1 = __p1; \
26647 __builtin_neon_vst3q_lane_v(__p0, (int8x16_t)__s1.val[0], (int8x16_t)__s1.val[1], (int8x16_t)__s1.val[2], __p2, 50); \
26648 })
26649 #else
26650 #define vst3q_lane_u32(__p0, __p1, __p2) __extension__ ({ \
26651 uint32x4x3_t __s1 = __p1; \
26652 uint32x4x3_t __rev1; \
26653 __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 3, 2, 1, 0); \
26654 __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 3, 2, 1, 0); \
26655 __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 3, 2, 1, 0); \
26656 __builtin_neon_vst3q_lane_v(__p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], (int8x16_t)__rev1.val[2], __p2, 50); \
26657 })
26658 #endif
26659
26660 #ifdef __LITTLE_ENDIAN__
26661 #define vst3q_lane_u16(__p0, __p1, __p2) __extension__ ({ \
26662 uint16x8x3_t __s1 = __p1; \
26663 __builtin_neon_vst3q_lane_v(__p0, (int8x16_t)__s1.val[0], (int8x16_t)__s1.val[1], (int8x16_t)__s1.val[2], __p2, 49); \
26664 })
26665 #else
26666 #define vst3q_lane_u16(__p0, __p1, __p2) __extension__ ({ \
26667 uint16x8x3_t __s1 = __p1; \
26668 uint16x8x3_t __rev1; \
26669 __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \
26670 __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \
26671 __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 7, 6, 5, 4, 3, 2, 1, 0); \
26672 __builtin_neon_vst3q_lane_v(__p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], (int8x16_t)__rev1.val[2], __p2, 49); \
26673 })
26674 #endif
26675
26676 #ifdef __LITTLE_ENDIAN__
26677 #define vst3q_lane_f32(__p0, __p1, __p2) __extension__ ({ \
26678 float32x4x3_t __s1 = __p1; \
26679 __builtin_neon_vst3q_lane_v(__p0, __s1.val[0], __s1.val[1], __s1.val[2], __p2, 41); \
26680 })
26681 #else
26682 #define vst3q_lane_f32(__p0, __p1, __p2) __extension__ ({ \
26683 float32x4x3_t __s1 = __p1; \
26684 float32x4x3_t __rev1; \
26685 __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 3, 2, 1, 0); \
26686 __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 3, 2, 1, 0); \
26687 __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 3, 2, 1, 0); \
26688 __builtin_neon_vst3q_lane_v(__p0, __rev1.val[0], __rev1.val[1], __rev1.val[2], __p2, 41); \
26689 })
26690 #endif
26691
26692 #ifdef __LITTLE_ENDIAN__
26693 #define vst3q_lane_f16(__p0, __p1, __p2) __extension__ ({ \
26694 float16x8x3_t __s1 = __p1; \
26695 __builtin_neon_vst3q_lane_v(__p0, __s1.val[0], __s1.val[1], __s1.val[2], __p2, 40); \
26696 })
26697 #else
26698 #define vst3q_lane_f16(__p0, __p1, __p2) __extension__ ({ \
26699 float16x8x3_t __s1 = __p1; \
26700 float16x8x3_t __rev1; \
26701 __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \
26702 __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \
26703 __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 7, 6, 5, 4, 3, 2, 1, 0); \
26704 __builtin_neon_vst3q_lane_v(__p0, __rev1.val[0], __rev1.val[1], __rev1.val[2], __p2, 40); \
26705 })
26706 #endif
26707
26708 #ifdef __LITTLE_ENDIAN__
26709 #define vst3q_lane_s32(__p0, __p1, __p2) __extension__ ({ \
26710 int32x4x3_t __s1 = __p1; \
26711 __builtin_neon_vst3q_lane_v(__p0, __s1.val[0], __s1.val[1], __s1.val[2], __p2, 34); \
26712 })
26713 #else
26714 #define vst3q_lane_s32(__p0, __p1, __p2) __extension__ ({ \
26715 int32x4x3_t __s1 = __p1; \
26716 int32x4x3_t __rev1; \
26717 __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 3, 2, 1, 0); \
26718 __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 3, 2, 1, 0); \
26719 __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 3, 2, 1, 0); \
26720 __builtin_neon_vst3q_lane_v(__p0, __rev1.val[0], __rev1.val[1], __rev1.val[2], __p2, 34); \
26721 })
26722 #endif
26723
26724 #ifdef __LITTLE_ENDIAN__
26725 #define vst3q_lane_s16(__p0, __p1, __p2) __extension__ ({ \
26726 int16x8x3_t __s1 = __p1; \
26727 __builtin_neon_vst3q_lane_v(__p0, __s1.val[0], __s1.val[1], __s1.val[2], __p2, 33); \
26728 })
26729 #else
26730 #define vst3q_lane_s16(__p0, __p1, __p2) __extension__ ({ \
26731 int16x8x3_t __s1 = __p1; \
26732 int16x8x3_t __rev1; \
26733 __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \
26734 __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \
26735 __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 7, 6, 5, 4, 3, 2, 1, 0); \
26736 __builtin_neon_vst3q_lane_v(__p0, __rev1.val[0], __rev1.val[1], __rev1.val[2], __p2, 33); \
26737 })
26738 #endif
26739
26740 #ifdef __LITTLE_ENDIAN__
26741 #define vst3_lane_u8(__p0, __p1, __p2) __extension__ ({ \
26742 uint8x8x3_t __s1 = __p1; \
26743 __builtin_neon_vst3_lane_v(__p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], (int8x8_t)__s1.val[2], __p2, 16); \
26744 })
26745 #else
26746 #define vst3_lane_u8(__p0, __p1, __p2) __extension__ ({ \
26747 uint8x8x3_t __s1 = __p1; \
26748 uint8x8x3_t __rev1; \
26749 __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \
26750 __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \
26751 __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 7, 6, 5, 4, 3, 2, 1, 0); \
26752 __builtin_neon_vst3_lane_v(__p0, (int8x8_t)__rev1.val[0], (int8x8_t)__rev1.val[1], (int8x8_t)__rev1.val[2], __p2, 16); \
26753 })
26754 #endif
26755
26756 #ifdef __LITTLE_ENDIAN__
26757 #define vst3_lane_u32(__p0, __p1, __p2) __extension__ ({ \
26758 uint32x2x3_t __s1 = __p1; \
26759 __builtin_neon_vst3_lane_v(__p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], (int8x8_t)__s1.val[2], __p2, 18); \
26760 })
26761 #else
26762 #define vst3_lane_u32(__p0, __p1, __p2) __extension__ ({ \
26763 uint32x2x3_t __s1 = __p1; \
26764 uint32x2x3_t __rev1; \
26765 __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 1, 0); \
26766 __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 1, 0); \
26767 __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 1, 0); \
26768 __builtin_neon_vst3_lane_v(__p0, (int8x8_t)__rev1.val[0], (int8x8_t)__rev1.val[1], (int8x8_t)__rev1.val[2], __p2, 18); \
26769 })
26770 #endif
26771
26772 #ifdef __LITTLE_ENDIAN__
26773 #define vst3_lane_u16(__p0, __p1, __p2) __extension__ ({ \
26774 uint16x4x3_t __s1 = __p1; \
26775 __builtin_neon_vst3_lane_v(__p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], (int8x8_t)__s1.val[2], __p2, 17); \
26776 })
26777 #else
26778 #define vst3_lane_u16(__p0, __p1, __p2) __extension__ ({ \
26779 uint16x4x3_t __s1 = __p1; \
26780 uint16x4x3_t __rev1; \
26781 __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 3, 2, 1, 0); \
26782 __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 3, 2, 1, 0); \
26783 __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 3, 2, 1, 0); \
26784 __builtin_neon_vst3_lane_v(__p0, (int8x8_t)__rev1.val[0], (int8x8_t)__rev1.val[1], (int8x8_t)__rev1.val[2], __p2, 17); \
26785 })
26786 #endif
26787
26788 #ifdef __LITTLE_ENDIAN__
26789 #define vst3_lane_s8(__p0, __p1, __p2) __extension__ ({ \
26790 int8x8x3_t __s1 = __p1; \
26791 __builtin_neon_vst3_lane_v(__p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], (int8x8_t)__s1.val[2], __p2, 0); \
26792 })
26793 #else
26794 #define vst3_lane_s8(__p0, __p1, __p2) __extension__ ({ \
26795 int8x8x3_t __s1 = __p1; \
26796 int8x8x3_t __rev1; \
26797 __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \
26798 __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \
26799 __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 7, 6, 5, 4, 3, 2, 1, 0); \
26800 __builtin_neon_vst3_lane_v(__p0, (int8x8_t)__rev1.val[0], (int8x8_t)__rev1.val[1], (int8x8_t)__rev1.val[2], __p2, 0); \
26801 })
26802 #endif
26803
26804 #ifdef __LITTLE_ENDIAN__
26805 #define vst3_lane_f32(__p0, __p1, __p2) __extension__ ({ \
26806 float32x2x3_t __s1 = __p1; \
26807 __builtin_neon_vst3_lane_v(__p0, __s1.val[0], __s1.val[1], __s1.val[2], __p2, 9); \
26808 })
26809 #else
26810 #define vst3_lane_f32(__p0, __p1, __p2) __extension__ ({ \
26811 float32x2x3_t __s1 = __p1; \
26812 float32x2x3_t __rev1; \
26813 __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 1, 0); \
26814 __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 1, 0); \
26815 __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 1, 0); \
26816 __builtin_neon_vst3_lane_v(__p0, __rev1.val[0], __rev1.val[1], __rev1.val[2], __p2, 9); \
26817 })
26818 #endif
26819
26820 #ifdef __LITTLE_ENDIAN__
26821 #define vst3_lane_f16(__p0, __p1, __p2) __extension__ ({ \
26822 float16x4x3_t __s1 = __p1; \
26823 __builtin_neon_vst3_lane_v(__p0, __s1.val[0], __s1.val[1], __s1.val[2], __p2, 8); \
26824 })
26825 #else
26826 #define vst3_lane_f16(__p0, __p1, __p2) __extension__ ({ \
26827 float16x4x3_t __s1 = __p1; \
26828 float16x4x3_t __rev1; \
26829 __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 3, 2, 1, 0); \
26830 __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 3, 2, 1, 0); \
26831 __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 3, 2, 1, 0); \
26832 __builtin_neon_vst3_lane_v(__p0, __rev1.val[0], __rev1.val[1], __rev1.val[2], __p2, 8); \
26833 })
26834 #endif
26835
26836 #ifdef __LITTLE_ENDIAN__
26837 #define vst3_lane_s32(__p0, __p1, __p2) __extension__ ({ \
26838 int32x2x3_t __s1 = __p1; \
26839 __builtin_neon_vst3_lane_v(__p0, __s1.val[0], __s1.val[1], __s1.val[2], __p2, 2); \
26840 })
26841 #else
26842 #define vst3_lane_s32(__p0, __p1, __p2) __extension__ ({ \
26843 int32x2x3_t __s1 = __p1; \
26844 int32x2x3_t __rev1; \
26845 __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 1, 0); \
26846 __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 1, 0); \
26847 __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 1, 0); \
26848 __builtin_neon_vst3_lane_v(__p0, __rev1.val[0], __rev1.val[1], __rev1.val[2], __p2, 2); \
26849 })
26850 #endif
26851
26852 #ifdef __LITTLE_ENDIAN__
26853 #define vst3_lane_s16(__p0, __p1, __p2) __extension__ ({ \
26854 int16x4x3_t __s1 = __p1; \
26855 __builtin_neon_vst3_lane_v(__p0, __s1.val[0], __s1.val[1], __s1.val[2], __p2, 1); \
26856 })
26857 #else
26858 #define vst3_lane_s16(__p0, __p1, __p2) __extension__ ({ \
26859 int16x4x3_t __s1 = __p1; \
26860 int16x4x3_t __rev1; \
26861 __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 3, 2, 1, 0); \
26862 __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 3, 2, 1, 0); \
26863 __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 3, 2, 1, 0); \
26864 __builtin_neon_vst3_lane_v(__p0, __rev1.val[0], __rev1.val[1], __rev1.val[2], __p2, 1); \
26865 })
26866 #endif
26867
26868 #ifdef __LITTLE_ENDIAN__
26869 #define vst4_p8(__p0, __p1) __extension__ ({ \
26870 poly8x8x4_t __s1 = __p1; \
26871 __builtin_neon_vst4_v(__p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], (int8x8_t)__s1.val[2], (int8x8_t)__s1.val[3], 4); \
26872 })
26873 #else
26874 #define vst4_p8(__p0, __p1) __extension__ ({ \
26875 poly8x8x4_t __s1 = __p1; \
26876 poly8x8x4_t __rev1; \
26877 __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \
26878 __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \
26879 __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 7, 6, 5, 4, 3, 2, 1, 0); \
26880 __rev1.val[3] = __builtin_shufflevector(__s1.val[3], __s1.val[3], 7, 6, 5, 4, 3, 2, 1, 0); \
26881 __builtin_neon_vst4_v(__p0, (int8x8_t)__rev1.val[0], (int8x8_t)__rev1.val[1], (int8x8_t)__rev1.val[2], (int8x8_t)__rev1.val[3], 4); \
26882 })
26883 #endif
26884
26885 #ifdef __LITTLE_ENDIAN__
26886 #define vst4_p16(__p0, __p1) __extension__ ({ \
26887 poly16x4x4_t __s1 = __p1; \
26888 __builtin_neon_vst4_v(__p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], (int8x8_t)__s1.val[2], (int8x8_t)__s1.val[3], 5); \
26889 })
26890 #else
26891 #define vst4_p16(__p0, __p1) __extension__ ({ \
26892 poly16x4x4_t __s1 = __p1; \
26893 poly16x4x4_t __rev1; \
26894 __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 3, 2, 1, 0); \
26895 __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 3, 2, 1, 0); \
26896 __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 3, 2, 1, 0); \
26897 __rev1.val[3] = __builtin_shufflevector(__s1.val[3], __s1.val[3], 3, 2, 1, 0); \
26898 __builtin_neon_vst4_v(__p0, (int8x8_t)__rev1.val[0], (int8x8_t)__rev1.val[1], (int8x8_t)__rev1.val[2], (int8x8_t)__rev1.val[3], 5); \
26899 })
26900 #endif
26901
26902 #ifdef __LITTLE_ENDIAN__
26903 #define vst4q_p8(__p0, __p1) __extension__ ({ \
26904 poly8x16x4_t __s1 = __p1; \
26905 __builtin_neon_vst4q_v(__p0, (int8x16_t)__s1.val[0], (int8x16_t)__s1.val[1], (int8x16_t)__s1.val[2], (int8x16_t)__s1.val[3], 36); \
26906 })
26907 #else
26908 #define vst4q_p8(__p0, __p1) __extension__ ({ \
26909 poly8x16x4_t __s1 = __p1; \
26910 poly8x16x4_t __rev1; \
26911 __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
26912 __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
26913 __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
26914 __rev1.val[3] = __builtin_shufflevector(__s1.val[3], __s1.val[3], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
26915 __builtin_neon_vst4q_v(__p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], (int8x16_t)__rev1.val[2], (int8x16_t)__rev1.val[3], 36); \
26916 })
26917 #endif
26918
26919 #ifdef __LITTLE_ENDIAN__
26920 #define vst4q_p16(__p0, __p1) __extension__ ({ \
26921 poly16x8x4_t __s1 = __p1; \
26922 __builtin_neon_vst4q_v(__p0, (int8x16_t)__s1.val[0], (int8x16_t)__s1.val[1], (int8x16_t)__s1.val[2], (int8x16_t)__s1.val[3], 37); \
26923 })
26924 #else
26925 #define vst4q_p16(__p0, __p1) __extension__ ({ \
26926 poly16x8x4_t __s1 = __p1; \
26927 poly16x8x4_t __rev1; \
26928 __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \
26929 __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \
26930 __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 7, 6, 5, 4, 3, 2, 1, 0); \
26931 __rev1.val[3] = __builtin_shufflevector(__s1.val[3], __s1.val[3], 7, 6, 5, 4, 3, 2, 1, 0); \
26932 __builtin_neon_vst4q_v(__p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], (int8x16_t)__rev1.val[2], (int8x16_t)__rev1.val[3], 37); \
26933 })
26934 #endif
26935
26936 #ifdef __LITTLE_ENDIAN__
26937 #define vst4q_u8(__p0, __p1) __extension__ ({ \
26938 uint8x16x4_t __s1 = __p1; \
26939 __builtin_neon_vst4q_v(__p0, (int8x16_t)__s1.val[0], (int8x16_t)__s1.val[1], (int8x16_t)__s1.val[2], (int8x16_t)__s1.val[3], 48); \
26940 })
26941 #else
26942 #define vst4q_u8(__p0, __p1) __extension__ ({ \
26943 uint8x16x4_t __s1 = __p1; \
26944 uint8x16x4_t __rev1; \
26945 __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
26946 __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
26947 __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
26948 __rev1.val[3] = __builtin_shufflevector(__s1.val[3], __s1.val[3], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
26949 __builtin_neon_vst4q_v(__p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], (int8x16_t)__rev1.val[2], (int8x16_t)__rev1.val[3], 48); \
26950 })
26951 #endif
26952
26953 #ifdef __LITTLE_ENDIAN__
26954 #define vst4q_u32(__p0, __p1) __extension__ ({ \
26955 uint32x4x4_t __s1 = __p1; \
26956 __builtin_neon_vst4q_v(__p0, (int8x16_t)__s1.val[0], (int8x16_t)__s1.val[1], (int8x16_t)__s1.val[2], (int8x16_t)__s1.val[3], 50); \
26957 })
26958 #else
26959 #define vst4q_u32(__p0, __p1) __extension__ ({ \
26960 uint32x4x4_t __s1 = __p1; \
26961 uint32x4x4_t __rev1; \
26962 __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 3, 2, 1, 0); \
26963 __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 3, 2, 1, 0); \
26964 __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 3, 2, 1, 0); \
26965 __rev1.val[3] = __builtin_shufflevector(__s1.val[3], __s1.val[3], 3, 2, 1, 0); \
26966 __builtin_neon_vst4q_v(__p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], (int8x16_t)__rev1.val[2], (int8x16_t)__rev1.val[3], 50); \
26967 })
26968 #endif
26969
26970 #ifdef __LITTLE_ENDIAN__
26971 #define vst4q_u16(__p0, __p1) __extension__ ({ \
26972 uint16x8x4_t __s1 = __p1; \
26973 __builtin_neon_vst4q_v(__p0, (int8x16_t)__s1.val[0], (int8x16_t)__s1.val[1], (int8x16_t)__s1.val[2], (int8x16_t)__s1.val[3], 49); \
26974 })
26975 #else
26976 #define vst4q_u16(__p0, __p1) __extension__ ({ \
26977 uint16x8x4_t __s1 = __p1; \
26978 uint16x8x4_t __rev1; \
26979 __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \
26980 __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \
26981 __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 7, 6, 5, 4, 3, 2, 1, 0); \
26982 __rev1.val[3] = __builtin_shufflevector(__s1.val[3], __s1.val[3], 7, 6, 5, 4, 3, 2, 1, 0); \
26983 __builtin_neon_vst4q_v(__p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], (int8x16_t)__rev1.val[2], (int8x16_t)__rev1.val[3], 49); \
26984 })
26985 #endif
26986
26987 #ifdef __LITTLE_ENDIAN__
26988 #define vst4q_s8(__p0, __p1) __extension__ ({ \
26989 int8x16x4_t __s1 = __p1; \
26990 __builtin_neon_vst4q_v(__p0, (int8x16_t)__s1.val[0], (int8x16_t)__s1.val[1], (int8x16_t)__s1.val[2], (int8x16_t)__s1.val[3], 32); \
26991 })
26992 #else
26993 #define vst4q_s8(__p0, __p1) __extension__ ({ \
26994 int8x16x4_t __s1 = __p1; \
26995 int8x16x4_t __rev1; \
26996 __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
26997 __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
26998 __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
26999 __rev1.val[3] = __builtin_shufflevector(__s1.val[3], __s1.val[3], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
27000 __builtin_neon_vst4q_v(__p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], (int8x16_t)__rev1.val[2], (int8x16_t)__rev1.val[3], 32); \
27001 })
27002 #endif
27003
27004 #ifdef __LITTLE_ENDIAN__
27005 #define vst4q_f32(__p0, __p1) __extension__ ({ \
27006 float32x4x4_t __s1 = __p1; \
27007 __builtin_neon_vst4q_v(__p0, __s1.val[0], __s1.val[1], __s1.val[2], __s1.val[3], 41); \
27008 })
27009 #else
27010 #define vst4q_f32(__p0, __p1) __extension__ ({ \
27011 float32x4x4_t __s1 = __p1; \
27012 float32x4x4_t __rev1; \
27013 __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 3, 2, 1, 0); \
27014 __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 3, 2, 1, 0); \
27015 __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 3, 2, 1, 0); \
27016 __rev1.val[3] = __builtin_shufflevector(__s1.val[3], __s1.val[3], 3, 2, 1, 0); \
27017 __builtin_neon_vst4q_v(__p0, __rev1.val[0], __rev1.val[1], __rev1.val[2], __rev1.val[3], 41); \
27018 })
27019 #endif
27020
27021 #ifdef __LITTLE_ENDIAN__
27022 #define vst4q_f16(__p0, __p1) __extension__ ({ \
27023 float16x8x4_t __s1 = __p1; \
27024 __builtin_neon_vst4q_v(__p0, __s1.val[0], __s1.val[1], __s1.val[2], __s1.val[3], 40); \
27025 })
27026 #else
27027 #define vst4q_f16(__p0, __p1) __extension__ ({ \
27028 float16x8x4_t __s1 = __p1; \
27029 float16x8x4_t __rev1; \
27030 __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \
27031 __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \
27032 __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 7, 6, 5, 4, 3, 2, 1, 0); \
27033 __rev1.val[3] = __builtin_shufflevector(__s1.val[3], __s1.val[3], 7, 6, 5, 4, 3, 2, 1, 0); \
27034 __builtin_neon_vst4q_v(__p0, __rev1.val[0], __rev1.val[1], __rev1.val[2], __rev1.val[3], 40); \
27035 })
27036 #endif
27037
27038 #ifdef __LITTLE_ENDIAN__
27039 #define vst4q_s32(__p0, __p1) __extension__ ({ \
27040 int32x4x4_t __s1 = __p1; \
27041 __builtin_neon_vst4q_v(__p0, __s1.val[0], __s1.val[1], __s1.val[2], __s1.val[3], 34); \
27042 })
27043 #else
27044 #define vst4q_s32(__p0, __p1) __extension__ ({ \
27045 int32x4x4_t __s1 = __p1; \
27046 int32x4x4_t __rev1; \
27047 __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 3, 2, 1, 0); \
27048 __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 3, 2, 1, 0); \
27049 __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 3, 2, 1, 0); \
27050 __rev1.val[3] = __builtin_shufflevector(__s1.val[3], __s1.val[3], 3, 2, 1, 0); \
27051 __builtin_neon_vst4q_v(__p0, __rev1.val[0], __rev1.val[1], __rev1.val[2], __rev1.val[3], 34); \
27052 })
27053 #endif
27054
27055 #ifdef __LITTLE_ENDIAN__
27056 #define vst4q_s16(__p0, __p1) __extension__ ({ \
27057 int16x8x4_t __s1 = __p1; \
27058 __builtin_neon_vst4q_v(__p0, __s1.val[0], __s1.val[1], __s1.val[2], __s1.val[3], 33); \
27059 })
27060 #else
27061 #define vst4q_s16(__p0, __p1) __extension__ ({ \
27062 int16x8x4_t __s1 = __p1; \
27063 int16x8x4_t __rev1; \
27064 __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \
27065 __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \
27066 __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 7, 6, 5, 4, 3, 2, 1, 0); \
27067 __rev1.val[3] = __builtin_shufflevector(__s1.val[3], __s1.val[3], 7, 6, 5, 4, 3, 2, 1, 0); \
27068 __builtin_neon_vst4q_v(__p0, __rev1.val[0], __rev1.val[1], __rev1.val[2], __rev1.val[3], 33); \
27069 })
27070 #endif
27071
27072 #ifdef __LITTLE_ENDIAN__
27073 #define vst4_u8(__p0, __p1) __extension__ ({ \
27074 uint8x8x4_t __s1 = __p1; \
27075 __builtin_neon_vst4_v(__p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], (int8x8_t)__s1.val[2], (int8x8_t)__s1.val[3], 16); \
27076 })
27077 #else
27078 #define vst4_u8(__p0, __p1) __extension__ ({ \
27079 uint8x8x4_t __s1 = __p1; \
27080 uint8x8x4_t __rev1; \
27081 __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \
27082 __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \
27083 __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 7, 6, 5, 4, 3, 2, 1, 0); \
27084 __rev1.val[3] = __builtin_shufflevector(__s1.val[3], __s1.val[3], 7, 6, 5, 4, 3, 2, 1, 0); \
27085 __builtin_neon_vst4_v(__p0, (int8x8_t)__rev1.val[0], (int8x8_t)__rev1.val[1], (int8x8_t)__rev1.val[2], (int8x8_t)__rev1.val[3], 16); \
27086 })
27087 #endif
27088
27089 #ifdef __LITTLE_ENDIAN__
27090 #define vst4_u32(__p0, __p1) __extension__ ({ \
27091 uint32x2x4_t __s1 = __p1; \
27092 __builtin_neon_vst4_v(__p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], (int8x8_t)__s1.val[2], (int8x8_t)__s1.val[3], 18); \
27093 })
27094 #else
27095 #define vst4_u32(__p0, __p1) __extension__ ({ \
27096 uint32x2x4_t __s1 = __p1; \
27097 uint32x2x4_t __rev1; \
27098 __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 1, 0); \
27099 __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 1, 0); \
27100 __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 1, 0); \
27101 __rev1.val[3] = __builtin_shufflevector(__s1.val[3], __s1.val[3], 1, 0); \
27102 __builtin_neon_vst4_v(__p0, (int8x8_t)__rev1.val[0], (int8x8_t)__rev1.val[1], (int8x8_t)__rev1.val[2], (int8x8_t)__rev1.val[3], 18); \
27103 })
27104 #endif
27105
27106 #ifdef __LITTLE_ENDIAN__
27107 #define vst4_u64(__p0, __p1) __extension__ ({ \
27108 uint64x1x4_t __s1 = __p1; \
27109 __builtin_neon_vst4_v(__p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], (int8x8_t)__s1.val[2], (int8x8_t)__s1.val[3], 19); \
27110 })
27111 #else
27112 #define vst4_u64(__p0, __p1) __extension__ ({ \
27113 uint64x1x4_t __s1 = __p1; \
27114 __builtin_neon_vst4_v(__p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], (int8x8_t)__s1.val[2], (int8x8_t)__s1.val[3], 19); \
27115 })
27116 #endif
27117
27118 #ifdef __LITTLE_ENDIAN__
27119 #define vst4_u16(__p0, __p1) __extension__ ({ \
27120 uint16x4x4_t __s1 = __p1; \
27121 __builtin_neon_vst4_v(__p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], (int8x8_t)__s1.val[2], (int8x8_t)__s1.val[3], 17); \
27122 })
27123 #else
27124 #define vst4_u16(__p0, __p1) __extension__ ({ \
27125 uint16x4x4_t __s1 = __p1; \
27126 uint16x4x4_t __rev1; \
27127 __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 3, 2, 1, 0); \
27128 __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 3, 2, 1, 0); \
27129 __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 3, 2, 1, 0); \
27130 __rev1.val[3] = __builtin_shufflevector(__s1.val[3], __s1.val[3], 3, 2, 1, 0); \
27131 __builtin_neon_vst4_v(__p0, (int8x8_t)__rev1.val[0], (int8x8_t)__rev1.val[1], (int8x8_t)__rev1.val[2], (int8x8_t)__rev1.val[3], 17); \
27132 })
27133 #endif
27134
27135 #ifdef __LITTLE_ENDIAN__
27136 #define vst4_s8(__p0, __p1) __extension__ ({ \
27137 int8x8x4_t __s1 = __p1; \
27138 __builtin_neon_vst4_v(__p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], (int8x8_t)__s1.val[2], (int8x8_t)__s1.val[3], 0); \
27139 })
27140 #else
27141 #define vst4_s8(__p0, __p1) __extension__ ({ \
27142 int8x8x4_t __s1 = __p1; \
27143 int8x8x4_t __rev1; \
27144 __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \
27145 __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \
27146 __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 7, 6, 5, 4, 3, 2, 1, 0); \
27147 __rev1.val[3] = __builtin_shufflevector(__s1.val[3], __s1.val[3], 7, 6, 5, 4, 3, 2, 1, 0); \
27148 __builtin_neon_vst4_v(__p0, (int8x8_t)__rev1.val[0], (int8x8_t)__rev1.val[1], (int8x8_t)__rev1.val[2], (int8x8_t)__rev1.val[3], 0); \
27149 })
27150 #endif
27151
27152 #ifdef __LITTLE_ENDIAN__
27153 #define vst4_f32(__p0, __p1) __extension__ ({ \
27154 float32x2x4_t __s1 = __p1; \
27155 __builtin_neon_vst4_v(__p0, __s1.val[0], __s1.val[1], __s1.val[2], __s1.val[3], 9); \
27156 })
27157 #else
27158 #define vst4_f32(__p0, __p1) __extension__ ({ \
27159 float32x2x4_t __s1 = __p1; \
27160 float32x2x4_t __rev1; \
27161 __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 1, 0); \
27162 __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 1, 0); \
27163 __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 1, 0); \
27164 __rev1.val[3] = __builtin_shufflevector(__s1.val[3], __s1.val[3], 1, 0); \
27165 __builtin_neon_vst4_v(__p0, __rev1.val[0], __rev1.val[1], __rev1.val[2], __rev1.val[3], 9); \
27166 })
27167 #endif
27168
27169 #ifdef __LITTLE_ENDIAN__
27170 #define vst4_f16(__p0, __p1) __extension__ ({ \
27171 float16x4x4_t __s1 = __p1; \
27172 __builtin_neon_vst4_v(__p0, __s1.val[0], __s1.val[1], __s1.val[2], __s1.val[3], 8); \
27173 })
27174 #else
27175 #define vst4_f16(__p0, __p1) __extension__ ({ \
27176 float16x4x4_t __s1 = __p1; \
27177 float16x4x4_t __rev1; \
27178 __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 3, 2, 1, 0); \
27179 __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 3, 2, 1, 0); \
27180 __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 3, 2, 1, 0); \
27181 __rev1.val[3] = __builtin_shufflevector(__s1.val[3], __s1.val[3], 3, 2, 1, 0); \
27182 __builtin_neon_vst4_v(__p0, __rev1.val[0], __rev1.val[1], __rev1.val[2], __rev1.val[3], 8); \
27183 })
27184 #endif
27185
27186 #ifdef __LITTLE_ENDIAN__
27187 #define vst4_s32(__p0, __p1) __extension__ ({ \
27188 int32x2x4_t __s1 = __p1; \
27189 __builtin_neon_vst4_v(__p0, __s1.val[0], __s1.val[1], __s1.val[2], __s1.val[3], 2); \
27190 })
27191 #else
27192 #define vst4_s32(__p0, __p1) __extension__ ({ \
27193 int32x2x4_t __s1 = __p1; \
27194 int32x2x4_t __rev1; \
27195 __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 1, 0); \
27196 __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 1, 0); \
27197 __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 1, 0); \
27198 __rev1.val[3] = __builtin_shufflevector(__s1.val[3], __s1.val[3], 1, 0); \
27199 __builtin_neon_vst4_v(__p0, __rev1.val[0], __rev1.val[1], __rev1.val[2], __rev1.val[3], 2); \
27200 })
27201 #endif
27202
27203 #ifdef __LITTLE_ENDIAN__
27204 #define vst4_s64(__p0, __p1) __extension__ ({ \
27205 int64x1x4_t __s1 = __p1; \
27206 __builtin_neon_vst4_v(__p0, __s1.val[0], __s1.val[1], __s1.val[2], __s1.val[3], 3); \
27207 })
27208 #else
27209 #define vst4_s64(__p0, __p1) __extension__ ({ \
27210 int64x1x4_t __s1 = __p1; \
27211 __builtin_neon_vst4_v(__p0, __s1.val[0], __s1.val[1], __s1.val[2], __s1.val[3], 3); \
27212 })
27213 #endif
27214
27215 #ifdef __LITTLE_ENDIAN__
27216 #define vst4_s16(__p0, __p1) __extension__ ({ \
27217 int16x4x4_t __s1 = __p1; \
27218 __builtin_neon_vst4_v(__p0, __s1.val[0], __s1.val[1], __s1.val[2], __s1.val[3], 1); \
27219 })
27220 #else
27221 #define vst4_s16(__p0, __p1) __extension__ ({ \
27222 int16x4x4_t __s1 = __p1; \
27223 int16x4x4_t __rev1; \
27224 __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 3, 2, 1, 0); \
27225 __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 3, 2, 1, 0); \
27226 __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 3, 2, 1, 0); \
27227 __rev1.val[3] = __builtin_shufflevector(__s1.val[3], __s1.val[3], 3, 2, 1, 0); \
27228 __builtin_neon_vst4_v(__p0, __rev1.val[0], __rev1.val[1], __rev1.val[2], __rev1.val[3], 1); \
27229 })
27230 #endif
27231
27232 #ifdef __LITTLE_ENDIAN__
27233 #define vst4_lane_p8(__p0, __p1, __p2) __extension__ ({ \
27234 poly8x8x4_t __s1 = __p1; \
27235 __builtin_neon_vst4_lane_v(__p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], (int8x8_t)__s1.val[2], (int8x8_t)__s1.val[3], __p2, 4); \
27236 })
27237 #else
27238 #define vst4_lane_p8(__p0, __p1, __p2) __extension__ ({ \
27239 poly8x8x4_t __s1 = __p1; \
27240 poly8x8x4_t __rev1; \
27241 __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \
27242 __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \
27243 __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 7, 6, 5, 4, 3, 2, 1, 0); \
27244 __rev1.val[3] = __builtin_shufflevector(__s1.val[3], __s1.val[3], 7, 6, 5, 4, 3, 2, 1, 0); \
27245 __builtin_neon_vst4_lane_v(__p0, (int8x8_t)__rev1.val[0], (int8x8_t)__rev1.val[1], (int8x8_t)__rev1.val[2], (int8x8_t)__rev1.val[3], __p2, 4); \
27246 })
27247 #endif
27248
27249 #ifdef __LITTLE_ENDIAN__
27250 #define vst4_lane_p16(__p0, __p1, __p2) __extension__ ({ \
27251 poly16x4x4_t __s1 = __p1; \
27252 __builtin_neon_vst4_lane_v(__p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], (int8x8_t)__s1.val[2], (int8x8_t)__s1.val[3], __p2, 5); \
27253 })
27254 #else
27255 #define vst4_lane_p16(__p0, __p1, __p2) __extension__ ({ \
27256 poly16x4x4_t __s1 = __p1; \
27257 poly16x4x4_t __rev1; \
27258 __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 3, 2, 1, 0); \
27259 __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 3, 2, 1, 0); \
27260 __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 3, 2, 1, 0); \
27261 __rev1.val[3] = __builtin_shufflevector(__s1.val[3], __s1.val[3], 3, 2, 1, 0); \
27262 __builtin_neon_vst4_lane_v(__p0, (int8x8_t)__rev1.val[0], (int8x8_t)__rev1.val[1], (int8x8_t)__rev1.val[2], (int8x8_t)__rev1.val[3], __p2, 5); \
27263 })
27264 #endif
27265
27266 #ifdef __LITTLE_ENDIAN__
27267 #define vst4q_lane_p16(__p0, __p1, __p2) __extension__ ({ \
27268 poly16x8x4_t __s1 = __p1; \
27269 __builtin_neon_vst4q_lane_v(__p0, (int8x16_t)__s1.val[0], (int8x16_t)__s1.val[1], (int8x16_t)__s1.val[2], (int8x16_t)__s1.val[3], __p2, 37); \
27270 })
27271 #else
27272 #define vst4q_lane_p16(__p0, __p1, __p2) __extension__ ({ \
27273 poly16x8x4_t __s1 = __p1; \
27274 poly16x8x4_t __rev1; \
27275 __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \
27276 __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \
27277 __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 7, 6, 5, 4, 3, 2, 1, 0); \
27278 __rev1.val[3] = __builtin_shufflevector(__s1.val[3], __s1.val[3], 7, 6, 5, 4, 3, 2, 1, 0); \
27279 __builtin_neon_vst4q_lane_v(__p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], (int8x16_t)__rev1.val[2], (int8x16_t)__rev1.val[3], __p2, 37); \
27280 })
27281 #endif
27282
27283 #ifdef __LITTLE_ENDIAN__
27284 #define vst4q_lane_u32(__p0, __p1, __p2) __extension__ ({ \
27285 uint32x4x4_t __s1 = __p1; \
27286 __builtin_neon_vst4q_lane_v(__p0, (int8x16_t)__s1.val[0], (int8x16_t)__s1.val[1], (int8x16_t)__s1.val[2], (int8x16_t)__s1.val[3], __p2, 50); \
27287 })
27288 #else
27289 #define vst4q_lane_u32(__p0, __p1, __p2) __extension__ ({ \
27290 uint32x4x4_t __s1 = __p1; \
27291 uint32x4x4_t __rev1; \
27292 __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 3, 2, 1, 0); \
27293 __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 3, 2, 1, 0); \
27294 __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 3, 2, 1, 0); \
27295 __rev1.val[3] = __builtin_shufflevector(__s1.val[3], __s1.val[3], 3, 2, 1, 0); \
27296 __builtin_neon_vst4q_lane_v(__p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], (int8x16_t)__rev1.val[2], (int8x16_t)__rev1.val[3], __p2, 50); \
27297 })
27298 #endif
27299
27300 #ifdef __LITTLE_ENDIAN__
27301 #define vst4q_lane_u16(__p0, __p1, __p2) __extension__ ({ \
27302 uint16x8x4_t __s1 = __p1; \
27303 __builtin_neon_vst4q_lane_v(__p0, (int8x16_t)__s1.val[0], (int8x16_t)__s1.val[1], (int8x16_t)__s1.val[2], (int8x16_t)__s1.val[3], __p2, 49); \
27304 })
27305 #else
27306 #define vst4q_lane_u16(__p0, __p1, __p2) __extension__ ({ \
27307 uint16x8x4_t __s1 = __p1; \
27308 uint16x8x4_t __rev1; \
27309 __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \
27310 __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \
27311 __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 7, 6, 5, 4, 3, 2, 1, 0); \
27312 __rev1.val[3] = __builtin_shufflevector(__s1.val[3], __s1.val[3], 7, 6, 5, 4, 3, 2, 1, 0); \
27313 __builtin_neon_vst4q_lane_v(__p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], (int8x16_t)__rev1.val[2], (int8x16_t)__rev1.val[3], __p2, 49); \
27314 })
27315 #endif
27316
27317 #ifdef __LITTLE_ENDIAN__
27318 #define vst4q_lane_f32(__p0, __p1, __p2) __extension__ ({ \
27319 float32x4x4_t __s1 = __p1; \
27320 __builtin_neon_vst4q_lane_v(__p0, __s1.val[0], __s1.val[1], __s1.val[2], __s1.val[3], __p2, 41); \
27321 })
27322 #else
27323 #define vst4q_lane_f32(__p0, __p1, __p2) __extension__ ({ \
27324 float32x4x4_t __s1 = __p1; \
27325 float32x4x4_t __rev1; \
27326 __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 3, 2, 1, 0); \
27327 __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 3, 2, 1, 0); \
27328 __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 3, 2, 1, 0); \
27329 __rev1.val[3] = __builtin_shufflevector(__s1.val[3], __s1.val[3], 3, 2, 1, 0); \
27330 __builtin_neon_vst4q_lane_v(__p0, __rev1.val[0], __rev1.val[1], __rev1.val[2], __rev1.val[3], __p2, 41); \
27331 })
27332 #endif
27333
27334 #ifdef __LITTLE_ENDIAN__
27335 #define vst4q_lane_f16(__p0, __p1, __p2) __extension__ ({ \
27336 float16x8x4_t __s1 = __p1; \
27337 __builtin_neon_vst4q_lane_v(__p0, __s1.val[0], __s1.val[1], __s1.val[2], __s1.val[3], __p2, 40); \
27338 })
27339 #else
27340 #define vst4q_lane_f16(__p0, __p1, __p2) __extension__ ({ \
27341 float16x8x4_t __s1 = __p1; \
27342 float16x8x4_t __rev1; \
27343 __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \
27344 __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \
27345 __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 7, 6, 5, 4, 3, 2, 1, 0); \
27346 __rev1.val[3] = __builtin_shufflevector(__s1.val[3], __s1.val[3], 7, 6, 5, 4, 3, 2, 1, 0); \
27347 __builtin_neon_vst4q_lane_v(__p0, __rev1.val[0], __rev1.val[1], __rev1.val[2], __rev1.val[3], __p2, 40); \
27348 })
27349 #endif
27350
27351 #ifdef __LITTLE_ENDIAN__
27352 #define vst4q_lane_s32(__p0, __p1, __p2) __extension__ ({ \
27353 int32x4x4_t __s1 = __p1; \
27354 __builtin_neon_vst4q_lane_v(__p0, __s1.val[0], __s1.val[1], __s1.val[2], __s1.val[3], __p2, 34); \
27355 })
27356 #else
27357 #define vst4q_lane_s32(__p0, __p1, __p2) __extension__ ({ \
27358 int32x4x4_t __s1 = __p1; \
27359 int32x4x4_t __rev1; \
27360 __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 3, 2, 1, 0); \
27361 __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 3, 2, 1, 0); \
27362 __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 3, 2, 1, 0); \
27363 __rev1.val[3] = __builtin_shufflevector(__s1.val[3], __s1.val[3], 3, 2, 1, 0); \
27364 __builtin_neon_vst4q_lane_v(__p0, __rev1.val[0], __rev1.val[1], __rev1.val[2], __rev1.val[3], __p2, 34); \
27365 })
27366 #endif
27367
27368 #ifdef __LITTLE_ENDIAN__
27369 #define vst4q_lane_s16(__p0, __p1, __p2) __extension__ ({ \
27370 int16x8x4_t __s1 = __p1; \
27371 __builtin_neon_vst4q_lane_v(__p0, __s1.val[0], __s1.val[1], __s1.val[2], __s1.val[3], __p2, 33); \
27372 })
27373 #else
27374 #define vst4q_lane_s16(__p0, __p1, __p2) __extension__ ({ \
27375 int16x8x4_t __s1 = __p1; \
27376 int16x8x4_t __rev1; \
27377 __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \
27378 __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \
27379 __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 7, 6, 5, 4, 3, 2, 1, 0); \
27380 __rev1.val[3] = __builtin_shufflevector(__s1.val[3], __s1.val[3], 7, 6, 5, 4, 3, 2, 1, 0); \
27381 __builtin_neon_vst4q_lane_v(__p0, __rev1.val[0], __rev1.val[1], __rev1.val[2], __rev1.val[3], __p2, 33); \
27382 })
27383 #endif
27384
27385 #ifdef __LITTLE_ENDIAN__
27386 #define vst4_lane_u8(__p0, __p1, __p2) __extension__ ({ \
27387 uint8x8x4_t __s1 = __p1; \
27388 __builtin_neon_vst4_lane_v(__p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], (int8x8_t)__s1.val[2], (int8x8_t)__s1.val[3], __p2, 16); \
27389 })
27390 #else
27391 #define vst4_lane_u8(__p0, __p1, __p2) __extension__ ({ \
27392 uint8x8x4_t __s1 = __p1; \
27393 uint8x8x4_t __rev1; \
27394 __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \
27395 __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \
27396 __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 7, 6, 5, 4, 3, 2, 1, 0); \
27397 __rev1.val[3] = __builtin_shufflevector(__s1.val[3], __s1.val[3], 7, 6, 5, 4, 3, 2, 1, 0); \
27398 __builtin_neon_vst4_lane_v(__p0, (int8x8_t)__rev1.val[0], (int8x8_t)__rev1.val[1], (int8x8_t)__rev1.val[2], (int8x8_t)__rev1.val[3], __p2, 16); \
27399 })
27400 #endif
27401
27402 #ifdef __LITTLE_ENDIAN__
27403 #define vst4_lane_u32(__p0, __p1, __p2) __extension__ ({ \
27404 uint32x2x4_t __s1 = __p1; \
27405 __builtin_neon_vst4_lane_v(__p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], (int8x8_t)__s1.val[2], (int8x8_t)__s1.val[3], __p2, 18); \
27406 })
27407 #else
27408 #define vst4_lane_u32(__p0, __p1, __p2) __extension__ ({ \
27409 uint32x2x4_t __s1 = __p1; \
27410 uint32x2x4_t __rev1; \
27411 __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 1, 0); \
27412 __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 1, 0); \
27413 __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 1, 0); \
27414 __rev1.val[3] = __builtin_shufflevector(__s1.val[3], __s1.val[3], 1, 0); \
27415 __builtin_neon_vst4_lane_v(__p0, (int8x8_t)__rev1.val[0], (int8x8_t)__rev1.val[1], (int8x8_t)__rev1.val[2], (int8x8_t)__rev1.val[3], __p2, 18); \
27416 })
27417 #endif
27418
27419 #ifdef __LITTLE_ENDIAN__
27420 #define vst4_lane_u16(__p0, __p1, __p2) __extension__ ({ \
27421 uint16x4x4_t __s1 = __p1; \
27422 __builtin_neon_vst4_lane_v(__p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], (int8x8_t)__s1.val[2], (int8x8_t)__s1.val[3], __p2, 17); \
27423 })
27424 #else
27425 #define vst4_lane_u16(__p0, __p1, __p2) __extension__ ({ \
27426 uint16x4x4_t __s1 = __p1; \
27427 uint16x4x4_t __rev1; \
27428 __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 3, 2, 1, 0); \
27429 __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 3, 2, 1, 0); \
27430 __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 3, 2, 1, 0); \
27431 __rev1.val[3] = __builtin_shufflevector(__s1.val[3], __s1.val[3], 3, 2, 1, 0); \
27432 __builtin_neon_vst4_lane_v(__p0, (int8x8_t)__rev1.val[0], (int8x8_t)__rev1.val[1], (int8x8_t)__rev1.val[2], (int8x8_t)__rev1.val[3], __p2, 17); \
27433 })
27434 #endif
27435
27436 #ifdef __LITTLE_ENDIAN__
27437 #define vst4_lane_s8(__p0, __p1, __p2) __extension__ ({ \
27438 int8x8x4_t __s1 = __p1; \
27439 __builtin_neon_vst4_lane_v(__p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], (int8x8_t)__s1.val[2], (int8x8_t)__s1.val[3], __p2, 0); \
27440 })
27441 #else
27442 #define vst4_lane_s8(__p0, __p1, __p2) __extension__ ({ \
27443 int8x8x4_t __s1 = __p1; \
27444 int8x8x4_t __rev1; \
27445 __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \
27446 __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \
27447 __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 7, 6, 5, 4, 3, 2, 1, 0); \
27448 __rev1.val[3] = __builtin_shufflevector(__s1.val[3], __s1.val[3], 7, 6, 5, 4, 3, 2, 1, 0); \
27449 __builtin_neon_vst4_lane_v(__p0, (int8x8_t)__rev1.val[0], (int8x8_t)__rev1.val[1], (int8x8_t)__rev1.val[2], (int8x8_t)__rev1.val[3], __p2, 0); \
27450 })
27451 #endif
27452
27453 #ifdef __LITTLE_ENDIAN__
27454 #define vst4_lane_f32(__p0, __p1, __p2) __extension__ ({ \
27455 float32x2x4_t __s1 = __p1; \
27456 __builtin_neon_vst4_lane_v(__p0, __s1.val[0], __s1.val[1], __s1.val[2], __s1.val[3], __p2, 9); \
27457 })
27458 #else
27459 #define vst4_lane_f32(__p0, __p1, __p2) __extension__ ({ \
27460 float32x2x4_t __s1 = __p1; \
27461 float32x2x4_t __rev1; \
27462 __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 1, 0); \
27463 __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 1, 0); \
27464 __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 1, 0); \
27465 __rev1.val[3] = __builtin_shufflevector(__s1.val[3], __s1.val[3], 1, 0); \
27466 __builtin_neon_vst4_lane_v(__p0, __rev1.val[0], __rev1.val[1], __rev1.val[2], __rev1.val[3], __p2, 9); \
27467 })
27468 #endif
27469
27470 #ifdef __LITTLE_ENDIAN__
27471 #define vst4_lane_f16(__p0, __p1, __p2) __extension__ ({ \
27472 float16x4x4_t __s1 = __p1; \
27473 __builtin_neon_vst4_lane_v(__p0, __s1.val[0], __s1.val[1], __s1.val[2], __s1.val[3], __p2, 8); \
27474 })
27475 #else
27476 #define vst4_lane_f16(__p0, __p1, __p2) __extension__ ({ \
27477 float16x4x4_t __s1 = __p1; \
27478 float16x4x4_t __rev1; \
27479 __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 3, 2, 1, 0); \
27480 __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 3, 2, 1, 0); \
27481 __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 3, 2, 1, 0); \
27482 __rev1.val[3] = __builtin_shufflevector(__s1.val[3], __s1.val[3], 3, 2, 1, 0); \
27483 __builtin_neon_vst4_lane_v(__p0, __rev1.val[0], __rev1.val[1], __rev1.val[2], __rev1.val[3], __p2, 8); \
27484 })
27485 #endif
27486
27487 #ifdef __LITTLE_ENDIAN__
27488 #define vst4_lane_s32(__p0, __p1, __p2) __extension__ ({ \
27489 int32x2x4_t __s1 = __p1; \
27490 __builtin_neon_vst4_lane_v(__p0, __s1.val[0], __s1.val[1], __s1.val[2], __s1.val[3], __p2, 2); \
27491 })
27492 #else
27493 #define vst4_lane_s32(__p0, __p1, __p2) __extension__ ({ \
27494 int32x2x4_t __s1 = __p1; \
27495 int32x2x4_t __rev1; \
27496 __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 1, 0); \
27497 __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 1, 0); \
27498 __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 1, 0); \
27499 __rev1.val[3] = __builtin_shufflevector(__s1.val[3], __s1.val[3], 1, 0); \
27500 __builtin_neon_vst4_lane_v(__p0, __rev1.val[0], __rev1.val[1], __rev1.val[2], __rev1.val[3], __p2, 2); \
27501 })
27502 #endif
27503
27504 #ifdef __LITTLE_ENDIAN__
27505 #define vst4_lane_s16(__p0, __p1, __p2) __extension__ ({ \
27506 int16x4x4_t __s1 = __p1; \
27507 __builtin_neon_vst4_lane_v(__p0, __s1.val[0], __s1.val[1], __s1.val[2], __s1.val[3], __p2, 1); \
27508 })
27509 #else
27510 #define vst4_lane_s16(__p0, __p1, __p2) __extension__ ({ \
27511 int16x4x4_t __s1 = __p1; \
27512 int16x4x4_t __rev1; \
27513 __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 3, 2, 1, 0); \
27514 __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 3, 2, 1, 0); \
27515 __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 3, 2, 1, 0); \
27516 __rev1.val[3] = __builtin_shufflevector(__s1.val[3], __s1.val[3], 3, 2, 1, 0); \
27517 __builtin_neon_vst4_lane_v(__p0, __rev1.val[0], __rev1.val[1], __rev1.val[2], __rev1.val[3], __p2, 1); \
27518 })
27519 #endif
27520
27521 #ifdef __LITTLE_ENDIAN__
27522 __ai uint8x16_t vsubq_u8(uint8x16_t __p0, uint8x16_t __p1) {
27523 uint8x16_t __ret;
27524 __ret = __p0 - __p1;
27525 return __ret;
27526 }
27527 #else
27528 __ai uint8x16_t vsubq_u8(uint8x16_t __p0, uint8x16_t __p1) {
27529 uint8x16_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
27530 uint8x16_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
27531 uint8x16_t __ret;
27532 __ret = __rev0 - __rev1;
27533 __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
27534 return __ret;
27535 }
27536 #endif
27537
27538 #ifdef __LITTLE_ENDIAN__
27539 __ai uint32x4_t vsubq_u32(uint32x4_t __p0, uint32x4_t __p1) {
27540 uint32x4_t __ret;
27541 __ret = __p0 - __p1;
27542 return __ret;
27543 }
27544 #else
27545 __ai uint32x4_t vsubq_u32(uint32x4_t __p0, uint32x4_t __p1) {
27546 uint32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
27547 uint32x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
27548 uint32x4_t __ret;
27549 __ret = __rev0 - __rev1;
27550 __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
27551 return __ret;
27552 }
27553 #endif
27554
27555 #ifdef __LITTLE_ENDIAN__
27556 __ai uint64x2_t vsubq_u64(uint64x2_t __p0, uint64x2_t __p1) {
27557 uint64x2_t __ret;
27558 __ret = __p0 - __p1;
27559 return __ret;
27560 }
27561 #else
27562 __ai uint64x2_t vsubq_u64(uint64x2_t __p0, uint64x2_t __p1) {
27563 uint64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
27564 uint64x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
27565 uint64x2_t __ret;
27566 __ret = __rev0 - __rev1;
27567 __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
27568 return __ret;
27569 }
27570 #endif
27571
27572 #ifdef __LITTLE_ENDIAN__
27573 __ai uint16x8_t vsubq_u16(uint16x8_t __p0, uint16x8_t __p1) {
27574 uint16x8_t __ret;
27575 __ret = __p0 - __p1;
27576 return __ret;
27577 }
27578 #else
27579 __ai uint16x8_t vsubq_u16(uint16x8_t __p0, uint16x8_t __p1) {
27580 uint16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
27581 uint16x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
27582 uint16x8_t __ret;
27583 __ret = __rev0 - __rev1;
27584 __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
27585 return __ret;
27586 }
27587 #endif
27588
27589 #ifdef __LITTLE_ENDIAN__
27590 __ai int8x16_t vsubq_s8(int8x16_t __p0, int8x16_t __p1) {
27591 int8x16_t __ret;
27592 __ret = __p0 - __p1;
27593 return __ret;
27594 }
27595 #else
27596 __ai int8x16_t vsubq_s8(int8x16_t __p0, int8x16_t __p1) {
27597 int8x16_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
27598 int8x16_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
27599 int8x16_t __ret;
27600 __ret = __rev0 - __rev1;
27601 __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
27602 return __ret;
27603 }
27604 #endif
27605
27606 #ifdef __LITTLE_ENDIAN__
27607 __ai float32x4_t vsubq_f32(float32x4_t __p0, float32x4_t __p1) {
27608 float32x4_t __ret;
27609 __ret = __p0 - __p1;
27610 return __ret;
27611 }
27612 #else
27613 __ai float32x4_t vsubq_f32(float32x4_t __p0, float32x4_t __p1) {
27614 float32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
27615 float32x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
27616 float32x4_t __ret;
27617 __ret = __rev0 - __rev1;
27618 __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
27619 return __ret;
27620 }
27621 #endif
27622
27623 #ifdef __LITTLE_ENDIAN__
27624 __ai int32x4_t vsubq_s32(int32x4_t __p0, int32x4_t __p1) {
27625 int32x4_t __ret;
27626 __ret = __p0 - __p1;
27627 return __ret;
27628 }
27629 #else
27630 __ai int32x4_t vsubq_s32(int32x4_t __p0, int32x4_t __p1) {
27631 int32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
27632 int32x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
27633 int32x4_t __ret;
27634 __ret = __rev0 - __rev1;
27635 __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
27636 return __ret;
27637 }
27638 #endif
27639
27640 #ifdef __LITTLE_ENDIAN__
27641 __ai int64x2_t vsubq_s64(int64x2_t __p0, int64x2_t __p1) {
27642 int64x2_t __ret;
27643 __ret = __p0 - __p1;
27644 return __ret;
27645 }
27646 #else
27647 __ai int64x2_t vsubq_s64(int64x2_t __p0, int64x2_t __p1) {
27648 int64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
27649 int64x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
27650 int64x2_t __ret;
27651 __ret = __rev0 - __rev1;
27652 __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
27653 return __ret;
27654 }
27655 #endif
27656
27657 #ifdef __LITTLE_ENDIAN__
27658 __ai int16x8_t vsubq_s16(int16x8_t __p0, int16x8_t __p1) {
27659 int16x8_t __ret;
27660 __ret = __p0 - __p1;
27661 return __ret;
27662 }
27663 #else
27664 __ai int16x8_t vsubq_s16(int16x8_t __p0, int16x8_t __p1) {
27665 int16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
27666 int16x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
27667 int16x8_t __ret;
27668 __ret = __rev0 - __rev1;
27669 __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
27670 return __ret;
27671 }
27672 #endif
27673
27674 #ifdef __LITTLE_ENDIAN__
27675 __ai uint8x8_t vsub_u8(uint8x8_t __p0, uint8x8_t __p1) {
27676 uint8x8_t __ret;
27677 __ret = __p0 - __p1;
27678 return __ret;
27679 }
27680 #else
27681 __ai uint8x8_t vsub_u8(uint8x8_t __p0, uint8x8_t __p1) {
27682 uint8x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
27683 uint8x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
27684 uint8x8_t __ret;
27685 __ret = __rev0 - __rev1;
27686 __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
27687 return __ret;
27688 }
27689 #endif
27690
27691 #ifdef __LITTLE_ENDIAN__
27692 __ai uint32x2_t vsub_u32(uint32x2_t __p0, uint32x2_t __p1) {
27693 uint32x2_t __ret;
27694 __ret = __p0 - __p1;
27695 return __ret;
27696 }
27697 #else
27698 __ai uint32x2_t vsub_u32(uint32x2_t __p0, uint32x2_t __p1) {
27699 uint32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
27700 uint32x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
27701 uint32x2_t __ret;
27702 __ret = __rev0 - __rev1;
27703 __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
27704 return __ret;
27705 }
27706 #endif
27707
27708 #ifdef __LITTLE_ENDIAN__
27709 __ai uint64x1_t vsub_u64(uint64x1_t __p0, uint64x1_t __p1) {
27710 uint64x1_t __ret;
27711 __ret = __p0 - __p1;
27712 return __ret;
27713 }
27714 #else
27715 __ai uint64x1_t vsub_u64(uint64x1_t __p0, uint64x1_t __p1) {
27716 uint64x1_t __ret;
27717 __ret = __p0 - __p1;
27718 return __ret;
27719 }
27720 #endif
27721
27722 #ifdef __LITTLE_ENDIAN__
27723 __ai uint16x4_t vsub_u16(uint16x4_t __p0, uint16x4_t __p1) {
27724 uint16x4_t __ret;
27725 __ret = __p0 - __p1;
27726 return __ret;
27727 }
27728 #else
27729 __ai uint16x4_t vsub_u16(uint16x4_t __p0, uint16x4_t __p1) {
27730 uint16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
27731 uint16x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
27732 uint16x4_t __ret;
27733 __ret = __rev0 - __rev1;
27734 __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
27735 return __ret;
27736 }
27737 #endif
27738
27739 #ifdef __LITTLE_ENDIAN__
27740 __ai int8x8_t vsub_s8(int8x8_t __p0, int8x8_t __p1) {
27741 int8x8_t __ret;
27742 __ret = __p0 - __p1;
27743 return __ret;
27744 }
27745 #else
27746 __ai int8x8_t vsub_s8(int8x8_t __p0, int8x8_t __p1) {
27747 int8x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
27748 int8x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
27749 int8x8_t __ret;
27750 __ret = __rev0 - __rev1;
27751 __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
27752 return __ret;
27753 }
27754 #endif
27755
27756 #ifdef __LITTLE_ENDIAN__
27757 __ai float32x2_t vsub_f32(float32x2_t __p0, float32x2_t __p1) {
27758 float32x2_t __ret;
27759 __ret = __p0 - __p1;
27760 return __ret;
27761 }
27762 #else
27763 __ai float32x2_t vsub_f32(float32x2_t __p0, float32x2_t __p1) {
27764 float32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
27765 float32x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
27766 float32x2_t __ret;
27767 __ret = __rev0 - __rev1;
27768 __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
27769 return __ret;
27770 }
27771 #endif
27772
27773 #ifdef __LITTLE_ENDIAN__
27774 __ai int32x2_t vsub_s32(int32x2_t __p0, int32x2_t __p1) {
27775 int32x2_t __ret;
27776 __ret = __p0 - __p1;
27777 return __ret;
27778 }
27779 #else
27780 __ai int32x2_t vsub_s32(int32x2_t __p0, int32x2_t __p1) {
27781 int32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
27782 int32x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
27783 int32x2_t __ret;
27784 __ret = __rev0 - __rev1;
27785 __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
27786 return __ret;
27787 }
27788 #endif
27789
27790 #ifdef __LITTLE_ENDIAN__
27791 __ai int64x1_t vsub_s64(int64x1_t __p0, int64x1_t __p1) {
27792 int64x1_t __ret;
27793 __ret = __p0 - __p1;
27794 return __ret;
27795 }
27796 #else
27797 __ai int64x1_t vsub_s64(int64x1_t __p0, int64x1_t __p1) {
27798 int64x1_t __ret;
27799 __ret = __p0 - __p1;
27800 return __ret;
27801 }
27802 #endif
27803
27804 #ifdef __LITTLE_ENDIAN__
27805 __ai int16x4_t vsub_s16(int16x4_t __p0, int16x4_t __p1) {
27806 int16x4_t __ret;
27807 __ret = __p0 - __p1;
27808 return __ret;
27809 }
27810 #else
27811 __ai int16x4_t vsub_s16(int16x4_t __p0, int16x4_t __p1) {
27812 int16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
27813 int16x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
27814 int16x4_t __ret;
27815 __ret = __rev0 - __rev1;
27816 __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
27817 return __ret;
27818 }
27819 #endif
27820
27821 #ifdef __LITTLE_ENDIAN__
27822 __ai uint16x4_t vsubhn_u32(uint32x4_t __p0, uint32x4_t __p1) {
27823 uint16x4_t __ret;
27824 __ret = (uint16x4_t) __builtin_neon_vsubhn_v((int8x16_t)__p0, (int8x16_t)__p1, 17);
27825 return __ret;
27826 }
27827 #else
27828 __ai uint16x4_t vsubhn_u32(uint32x4_t __p0, uint32x4_t __p1) {
27829 uint32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
27830 uint32x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
27831 uint16x4_t __ret;
27832 __ret = (uint16x4_t) __builtin_neon_vsubhn_v((int8x16_t)__rev0, (int8x16_t)__rev1, 17);
27833 __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
27834 return __ret;
27835 }
27836 __ai uint16x4_t __noswap_vsubhn_u32(uint32x4_t __p0, uint32x4_t __p1) {
27837 uint16x4_t __ret;
27838 __ret = (uint16x4_t) __builtin_neon_vsubhn_v((int8x16_t)__p0, (int8x16_t)__p1, 17);
27839 return __ret;
27840 }
27841 #endif
27842
27843 #ifdef __LITTLE_ENDIAN__
27844 __ai uint32x2_t vsubhn_u64(uint64x2_t __p0, uint64x2_t __p1) {
27845 uint32x2_t __ret;
27846 __ret = (uint32x2_t) __builtin_neon_vsubhn_v((int8x16_t)__p0, (int8x16_t)__p1, 18);
27847 return __ret;
27848 }
27849 #else
27850 __ai uint32x2_t vsubhn_u64(uint64x2_t __p0, uint64x2_t __p1) {
27851 uint64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
27852 uint64x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
27853 uint32x2_t __ret;
27854 __ret = (uint32x2_t) __builtin_neon_vsubhn_v((int8x16_t)__rev0, (int8x16_t)__rev1, 18);
27855 __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
27856 return __ret;
27857 }
27858 __ai uint32x2_t __noswap_vsubhn_u64(uint64x2_t __p0, uint64x2_t __p1) {
27859 uint32x2_t __ret;
27860 __ret = (uint32x2_t) __builtin_neon_vsubhn_v((int8x16_t)__p0, (int8x16_t)__p1, 18);
27861 return __ret;
27862 }
27863 #endif
27864
27865 #ifdef __LITTLE_ENDIAN__
27866 __ai uint8x8_t vsubhn_u16(uint16x8_t __p0, uint16x8_t __p1) {
27867 uint8x8_t __ret;
27868 __ret = (uint8x8_t) __builtin_neon_vsubhn_v((int8x16_t)__p0, (int8x16_t)__p1, 16);
27869 return __ret;
27870 }
27871 #else
27872 __ai uint8x8_t vsubhn_u16(uint16x8_t __p0, uint16x8_t __p1) {
27873 uint16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
27874 uint16x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
27875 uint8x8_t __ret;
27876 __ret = (uint8x8_t) __builtin_neon_vsubhn_v((int8x16_t)__rev0, (int8x16_t)__rev1, 16);
27877 __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
27878 return __ret;
27879 }
27880 __ai uint8x8_t __noswap_vsubhn_u16(uint16x8_t __p0, uint16x8_t __p1) {
27881 uint8x8_t __ret;
27882 __ret = (uint8x8_t) __builtin_neon_vsubhn_v((int8x16_t)__p0, (int8x16_t)__p1, 16);
27883 return __ret;
27884 }
27885 #endif
27886
27887 #ifdef __LITTLE_ENDIAN__
27888 __ai int16x4_t vsubhn_s32(int32x4_t __p0, int32x4_t __p1) {
27889 int16x4_t __ret;
27890 __ret = (int16x4_t) __builtin_neon_vsubhn_v((int8x16_t)__p0, (int8x16_t)__p1, 1);
27891 return __ret;
27892 }
27893 #else
27894 __ai int16x4_t vsubhn_s32(int32x4_t __p0, int32x4_t __p1) {
27895 int32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
27896 int32x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
27897 int16x4_t __ret;
27898 __ret = (int16x4_t) __builtin_neon_vsubhn_v((int8x16_t)__rev0, (int8x16_t)__rev1, 1);
27899 __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
27900 return __ret;
27901 }
27902 __ai int16x4_t __noswap_vsubhn_s32(int32x4_t __p0, int32x4_t __p1) {
27903 int16x4_t __ret;
27904 __ret = (int16x4_t) __builtin_neon_vsubhn_v((int8x16_t)__p0, (int8x16_t)__p1, 1);
27905 return __ret;
27906 }
27907 #endif
27908
27909 #ifdef __LITTLE_ENDIAN__
27910 __ai int32x2_t vsubhn_s64(int64x2_t __p0, int64x2_t __p1) {
27911 int32x2_t __ret;
27912 __ret = (int32x2_t) __builtin_neon_vsubhn_v((int8x16_t)__p0, (int8x16_t)__p1, 2);
27913 return __ret;
27914 }
27915 #else
27916 __ai int32x2_t vsubhn_s64(int64x2_t __p0, int64x2_t __p1) {
27917 int64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
27918 int64x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
27919 int32x2_t __ret;
27920 __ret = (int32x2_t) __builtin_neon_vsubhn_v((int8x16_t)__rev0, (int8x16_t)__rev1, 2);
27921 __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
27922 return __ret;
27923 }
27924 __ai int32x2_t __noswap_vsubhn_s64(int64x2_t __p0, int64x2_t __p1) {
27925 int32x2_t __ret;
27926 __ret = (int32x2_t) __builtin_neon_vsubhn_v((int8x16_t)__p0, (int8x16_t)__p1, 2);
27927 return __ret;
27928 }
27929 #endif
27930
27931 #ifdef __LITTLE_ENDIAN__
27932 __ai int8x8_t vsubhn_s16(int16x8_t __p0, int16x8_t __p1) {
27933 int8x8_t __ret;
27934 __ret = (int8x8_t) __builtin_neon_vsubhn_v((int8x16_t)__p0, (int8x16_t)__p1, 0);
27935 return __ret;
27936 }
27937 #else
27938 __ai int8x8_t vsubhn_s16(int16x8_t __p0, int16x8_t __p1) {
27939 int16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
27940 int16x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
27941 int8x8_t __ret;
27942 __ret = (int8x8_t) __builtin_neon_vsubhn_v((int8x16_t)__rev0, (int8x16_t)__rev1, 0);
27943 __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
27944 return __ret;
27945 }
27946 __ai int8x8_t __noswap_vsubhn_s16(int16x8_t __p0, int16x8_t __p1) {
27947 int8x8_t __ret;
27948 __ret = (int8x8_t) __builtin_neon_vsubhn_v((int8x16_t)__p0, (int8x16_t)__p1, 0);
27949 return __ret;
27950 }
27951 #endif
27952
27953 #ifdef __LITTLE_ENDIAN__
27954 __ai uint16x8_t vsubl_u8(uint8x8_t __p0, uint8x8_t __p1) {
27955 uint16x8_t __ret;
27956 __ret = vmovl_u8(__p0) - vmovl_u8(__p1);
27957 return __ret;
27958 }
27959 #else
27960 __ai uint16x8_t vsubl_u8(uint8x8_t __p0, uint8x8_t __p1) {
27961 uint8x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
27962 uint8x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
27963 uint16x8_t __ret;
27964 __ret = __noswap_vmovl_u8(__rev0) - __noswap_vmovl_u8(__rev1);
27965 __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
27966 return __ret;
27967 }
27968 #endif
27969
27970 #ifdef __LITTLE_ENDIAN__
27971 __ai uint64x2_t vsubl_u32(uint32x2_t __p0, uint32x2_t __p1) {
27972 uint64x2_t __ret;
27973 __ret = vmovl_u32(__p0) - vmovl_u32(__p1);
27974 return __ret;
27975 }
27976 #else
27977 __ai uint64x2_t vsubl_u32(uint32x2_t __p0, uint32x2_t __p1) {
27978 uint32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
27979 uint32x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
27980 uint64x2_t __ret;
27981 __ret = __noswap_vmovl_u32(__rev0) - __noswap_vmovl_u32(__rev1);
27982 __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
27983 return __ret;
27984 }
27985 #endif
27986
27987 #ifdef __LITTLE_ENDIAN__
27988 __ai uint32x4_t vsubl_u16(uint16x4_t __p0, uint16x4_t __p1) {
27989 uint32x4_t __ret;
27990 __ret = vmovl_u16(__p0) - vmovl_u16(__p1);
27991 return __ret;
27992 }
27993 #else
27994 __ai uint32x4_t vsubl_u16(uint16x4_t __p0, uint16x4_t __p1) {
27995 uint16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
27996 uint16x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
27997 uint32x4_t __ret;
27998 __ret = __noswap_vmovl_u16(__rev0) - __noswap_vmovl_u16(__rev1);
27999 __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
28000 return __ret;
28001 }
28002 #endif
28003
28004 #ifdef __LITTLE_ENDIAN__
28005 __ai int16x8_t vsubl_s8(int8x8_t __p0, int8x8_t __p1) {
28006 int16x8_t __ret;
28007 __ret = vmovl_s8(__p0) - vmovl_s8(__p1);
28008 return __ret;
28009 }
28010 #else
28011 __ai int16x8_t vsubl_s8(int8x8_t __p0, int8x8_t __p1) {
28012 int8x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
28013 int8x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
28014 int16x8_t __ret;
28015 __ret = __noswap_vmovl_s8(__rev0) - __noswap_vmovl_s8(__rev1);
28016 __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
28017 return __ret;
28018 }
28019 #endif
28020
28021 #ifdef __LITTLE_ENDIAN__
28022 __ai int64x2_t vsubl_s32(int32x2_t __p0, int32x2_t __p1) {
28023 int64x2_t __ret;
28024 __ret = vmovl_s32(__p0) - vmovl_s32(__p1);
28025 return __ret;
28026 }
28027 #else
28028 __ai int64x2_t vsubl_s32(int32x2_t __p0, int32x2_t __p1) {
28029 int32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
28030 int32x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
28031 int64x2_t __ret;
28032 __ret = __noswap_vmovl_s32(__rev0) - __noswap_vmovl_s32(__rev1);
28033 __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
28034 return __ret;
28035 }
28036 #endif
28037
28038 #ifdef __LITTLE_ENDIAN__
28039 __ai int32x4_t vsubl_s16(int16x4_t __p0, int16x4_t __p1) {
28040 int32x4_t __ret;
28041 __ret = vmovl_s16(__p0) - vmovl_s16(__p1);
28042 return __ret;
28043 }
28044 #else
28045 __ai int32x4_t vsubl_s16(int16x4_t __p0, int16x4_t __p1) {
28046 int16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
28047 int16x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
28048 int32x4_t __ret;
28049 __ret = __noswap_vmovl_s16(__rev0) - __noswap_vmovl_s16(__rev1);
28050 __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
28051 return __ret;
28052 }
28053 #endif
28054
28055 #ifdef __LITTLE_ENDIAN__
28056 __ai uint16x8_t vsubw_u8(uint16x8_t __p0, uint8x8_t __p1) {
28057 uint16x8_t __ret;
28058 __ret = __p0 - vmovl_u8(__p1);
28059 return __ret;
28060 }
28061 #else
28062 __ai uint16x8_t vsubw_u8(uint16x8_t __p0, uint8x8_t __p1) {
28063 uint16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
28064 uint8x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
28065 uint16x8_t __ret;
28066 __ret = __rev0 - __noswap_vmovl_u8(__rev1);
28067 __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
28068 return __ret;
28069 }
28070 #endif
28071
28072 #ifdef __LITTLE_ENDIAN__
28073 __ai uint64x2_t vsubw_u32(uint64x2_t __p0, uint32x2_t __p1) {
28074 uint64x2_t __ret;
28075 __ret = __p0 - vmovl_u32(__p1);
28076 return __ret;
28077 }
28078 #else
28079 __ai uint64x2_t vsubw_u32(uint64x2_t __p0, uint32x2_t __p1) {
28080 uint64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
28081 uint32x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
28082 uint64x2_t __ret;
28083 __ret = __rev0 - __noswap_vmovl_u32(__rev1);
28084 __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
28085 return __ret;
28086 }
28087 #endif
28088
28089 #ifdef __LITTLE_ENDIAN__
28090 __ai uint32x4_t vsubw_u16(uint32x4_t __p0, uint16x4_t __p1) {
28091 uint32x4_t __ret;
28092 __ret = __p0 - vmovl_u16(__p1);
28093 return __ret;
28094 }
28095 #else
28096 __ai uint32x4_t vsubw_u16(uint32x4_t __p0, uint16x4_t __p1) {
28097 uint32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
28098 uint16x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
28099 uint32x4_t __ret;
28100 __ret = __rev0 - __noswap_vmovl_u16(__rev1);
28101 __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
28102 return __ret;
28103 }
28104 #endif
28105
28106 #ifdef __LITTLE_ENDIAN__
28107 __ai int16x8_t vsubw_s8(int16x8_t __p0, int8x8_t __p1) {
28108 int16x8_t __ret;
28109 __ret = __p0 - vmovl_s8(__p1);
28110 return __ret;
28111 }
28112 #else
28113 __ai int16x8_t vsubw_s8(int16x8_t __p0, int8x8_t __p1) {
28114 int16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
28115 int8x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
28116 int16x8_t __ret;
28117 __ret = __rev0 - __noswap_vmovl_s8(__rev1);
28118 __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
28119 return __ret;
28120 }
28121 #endif
28122
28123 #ifdef __LITTLE_ENDIAN__
28124 __ai int64x2_t vsubw_s32(int64x2_t __p0, int32x2_t __p1) {
28125 int64x2_t __ret;
28126 __ret = __p0 - vmovl_s32(__p1);
28127 return __ret;
28128 }
28129 #else
28130 __ai int64x2_t vsubw_s32(int64x2_t __p0, int32x2_t __p1) {
28131 int64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
28132 int32x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
28133 int64x2_t __ret;
28134 __ret = __rev0 - __noswap_vmovl_s32(__rev1);
28135 __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
28136 return __ret;
28137 }
28138 #endif
28139
28140 #ifdef __LITTLE_ENDIAN__
28141 __ai int32x4_t vsubw_s16(int32x4_t __p0, int16x4_t __p1) {
28142 int32x4_t __ret;
28143 __ret = __p0 - vmovl_s16(__p1);
28144 return __ret;
28145 }
28146 #else
28147 __ai int32x4_t vsubw_s16(int32x4_t __p0, int16x4_t __p1) {
28148 int32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
28149 int16x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
28150 int32x4_t __ret;
28151 __ret = __rev0 - __noswap_vmovl_s16(__rev1);
28152 __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
28153 return __ret;
28154 }
28155 #endif
28156
28157 #ifdef __LITTLE_ENDIAN__
28158 __ai poly8x8_t vtbl1_p8(poly8x8_t __p0, uint8x8_t __p1) {
28159 poly8x8_t __ret;
28160 __ret = (poly8x8_t) __builtin_neon_vtbl1_v((int8x8_t)__p0, (int8x8_t)__p1, 4);
28161 return __ret;
28162 }
28163 #else
28164 __ai poly8x8_t vtbl1_p8(poly8x8_t __p0, uint8x8_t __p1) {
28165 poly8x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
28166 uint8x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
28167 poly8x8_t __ret;
28168 __ret = (poly8x8_t) __builtin_neon_vtbl1_v((int8x8_t)__rev0, (int8x8_t)__rev1, 4);
28169 __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
28170 return __ret;
28171 }
28172 #endif
28173
28174 #ifdef __LITTLE_ENDIAN__
28175 __ai uint8x8_t vtbl1_u8(uint8x8_t __p0, uint8x8_t __p1) {
28176 uint8x8_t __ret;
28177 __ret = (uint8x8_t) __builtin_neon_vtbl1_v((int8x8_t)__p0, (int8x8_t)__p1, 16);
28178 return __ret;
28179 }
28180 #else
28181 __ai uint8x8_t vtbl1_u8(uint8x8_t __p0, uint8x8_t __p1) {
28182 uint8x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
28183 uint8x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
28184 uint8x8_t __ret;
28185 __ret = (uint8x8_t) __builtin_neon_vtbl1_v((int8x8_t)__rev0, (int8x8_t)__rev1, 16);
28186 __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
28187 return __ret;
28188 }
28189 #endif
28190
28191 #ifdef __LITTLE_ENDIAN__
28192 __ai int8x8_t vtbl1_s8(int8x8_t __p0, int8x8_t __p1) {
28193 int8x8_t __ret;
28194 __ret = (int8x8_t) __builtin_neon_vtbl1_v((int8x8_t)__p0, (int8x8_t)__p1, 0);
28195 return __ret;
28196 }
28197 #else
28198 __ai int8x8_t vtbl1_s8(int8x8_t __p0, int8x8_t __p1) {
28199 int8x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
28200 int8x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
28201 int8x8_t __ret;
28202 __ret = (int8x8_t) __builtin_neon_vtbl1_v((int8x8_t)__rev0, (int8x8_t)__rev1, 0);
28203 __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
28204 return __ret;
28205 }
28206 #endif
28207
28208 #ifdef __LITTLE_ENDIAN__
28209 __ai poly8x8_t vtbl2_p8(poly8x8x2_t __p0, uint8x8_t __p1) {
28210 poly8x8_t __ret;
28211 __ret = (poly8x8_t) __builtin_neon_vtbl2_v((int8x8_t)__p0.val[0], (int8x8_t)__p0.val[1], (int8x8_t)__p1, 4);
28212 return __ret;
28213 }
28214 #else
28215 __ai poly8x8_t vtbl2_p8(poly8x8x2_t __p0, uint8x8_t __p1) {
28216 poly8x8x2_t __rev0;
28217 __rev0.val[0] = __builtin_shufflevector(__p0.val[0], __p0.val[0], 7, 6, 5, 4, 3, 2, 1, 0);
28218 __rev0.val[1] = __builtin_shufflevector(__p0.val[1], __p0.val[1], 7, 6, 5, 4, 3, 2, 1, 0);
28219 uint8x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
28220 poly8x8_t __ret;
28221 __ret = (poly8x8_t) __builtin_neon_vtbl2_v((int8x8_t)__rev0.val[0], (int8x8_t)__rev0.val[1], (int8x8_t)__rev1, 4);
28222 __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
28223 return __ret;
28224 }
28225 #endif
28226
28227 #ifdef __LITTLE_ENDIAN__
28228 __ai uint8x8_t vtbl2_u8(uint8x8x2_t __p0, uint8x8_t __p1) {
28229 uint8x8_t __ret;
28230 __ret = (uint8x8_t) __builtin_neon_vtbl2_v((int8x8_t)__p0.val[0], (int8x8_t)__p0.val[1], (int8x8_t)__p1, 16);
28231 return __ret;
28232 }
28233 #else
28234 __ai uint8x8_t vtbl2_u8(uint8x8x2_t __p0, uint8x8_t __p1) {
28235 uint8x8x2_t __rev0;
28236 __rev0.val[0] = __builtin_shufflevector(__p0.val[0], __p0.val[0], 7, 6, 5, 4, 3, 2, 1, 0);
28237 __rev0.val[1] = __builtin_shufflevector(__p0.val[1], __p0.val[1], 7, 6, 5, 4, 3, 2, 1, 0);
28238 uint8x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
28239 uint8x8_t __ret;
28240 __ret = (uint8x8_t) __builtin_neon_vtbl2_v((int8x8_t)__rev0.val[0], (int8x8_t)__rev0.val[1], (int8x8_t)__rev1, 16);
28241 __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
28242 return __ret;
28243 }
28244 #endif
28245
28246 #ifdef __LITTLE_ENDIAN__
28247 __ai int8x8_t vtbl2_s8(int8x8x2_t __p0, int8x8_t __p1) {
28248 int8x8_t __ret;
28249 __ret = (int8x8_t) __builtin_neon_vtbl2_v((int8x8_t)__p0.val[0], (int8x8_t)__p0.val[1], (int8x8_t)__p1, 0);
28250 return __ret;
28251 }
28252 #else
28253 __ai int8x8_t vtbl2_s8(int8x8x2_t __p0, int8x8_t __p1) {
28254 int8x8x2_t __rev0;
28255 __rev0.val[0] = __builtin_shufflevector(__p0.val[0], __p0.val[0], 7, 6, 5, 4, 3, 2, 1, 0);
28256 __rev0.val[1] = __builtin_shufflevector(__p0.val[1], __p0.val[1], 7, 6, 5, 4, 3, 2, 1, 0);
28257 int8x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
28258 int8x8_t __ret;
28259 __ret = (int8x8_t) __builtin_neon_vtbl2_v((int8x8_t)__rev0.val[0], (int8x8_t)__rev0.val[1], (int8x8_t)__rev1, 0);
28260 __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
28261 return __ret;
28262 }
28263 #endif
28264
28265 #ifdef __LITTLE_ENDIAN__
28266 __ai poly8x8_t vtbl3_p8(poly8x8x3_t __p0, uint8x8_t __p1) {
28267 poly8x8_t __ret;
28268 __ret = (poly8x8_t) __builtin_neon_vtbl3_v((int8x8_t)__p0.val[0], (int8x8_t)__p0.val[1], (int8x8_t)__p0.val[2], (int8x8_t)__p1, 4);
28269 return __ret;
28270 }
28271 #else
28272 __ai poly8x8_t vtbl3_p8(poly8x8x3_t __p0, uint8x8_t __p1) {
28273 poly8x8x3_t __rev0;
28274 __rev0.val[0] = __builtin_shufflevector(__p0.val[0], __p0.val[0], 7, 6, 5, 4, 3, 2, 1, 0);
28275 __rev0.val[1] = __builtin_shufflevector(__p0.val[1], __p0.val[1], 7, 6, 5, 4, 3, 2, 1, 0);
28276 __rev0.val[2] = __builtin_shufflevector(__p0.val[2], __p0.val[2], 7, 6, 5, 4, 3, 2, 1, 0);
28277 uint8x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
28278 poly8x8_t __ret;
28279 __ret = (poly8x8_t) __builtin_neon_vtbl3_v((int8x8_t)__rev0.val[0], (int8x8_t)__rev0.val[1], (int8x8_t)__rev0.val[2], (int8x8_t)__rev1, 4);
28280 __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
28281 return __ret;
28282 }
28283 #endif
28284
28285 #ifdef __LITTLE_ENDIAN__
28286 __ai uint8x8_t vtbl3_u8(uint8x8x3_t __p0, uint8x8_t __p1) {
28287 uint8x8_t __ret;
28288 __ret = (uint8x8_t) __builtin_neon_vtbl3_v((int8x8_t)__p0.val[0], (int8x8_t)__p0.val[1], (int8x8_t)__p0.val[2], (int8x8_t)__p1, 16);
28289 return __ret;
28290 }
28291 #else
28292 __ai uint8x8_t vtbl3_u8(uint8x8x3_t __p0, uint8x8_t __p1) {
28293 uint8x8x3_t __rev0;
28294 __rev0.val[0] = __builtin_shufflevector(__p0.val[0], __p0.val[0], 7, 6, 5, 4, 3, 2, 1, 0);
28295 __rev0.val[1] = __builtin_shufflevector(__p0.val[1], __p0.val[1], 7, 6, 5, 4, 3, 2, 1, 0);
28296 __rev0.val[2] = __builtin_shufflevector(__p0.val[2], __p0.val[2], 7, 6, 5, 4, 3, 2, 1, 0);
28297 uint8x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
28298 uint8x8_t __ret;
28299 __ret = (uint8x8_t) __builtin_neon_vtbl3_v((int8x8_t)__rev0.val[0], (int8x8_t)__rev0.val[1], (int8x8_t)__rev0.val[2], (int8x8_t)__rev1, 16);
28300 __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
28301 return __ret;
28302 }
28303 #endif
28304
28305 #ifdef __LITTLE_ENDIAN__
28306 __ai int8x8_t vtbl3_s8(int8x8x3_t __p0, int8x8_t __p1) {
28307 int8x8_t __ret;
28308 __ret = (int8x8_t) __builtin_neon_vtbl3_v((int8x8_t)__p0.val[0], (int8x8_t)__p0.val[1], (int8x8_t)__p0.val[2], (int8x8_t)__p1, 0);
28309 return __ret;
28310 }
28311 #else
28312 __ai int8x8_t vtbl3_s8(int8x8x3_t __p0, int8x8_t __p1) {
28313 int8x8x3_t __rev0;
28314 __rev0.val[0] = __builtin_shufflevector(__p0.val[0], __p0.val[0], 7, 6, 5, 4, 3, 2, 1, 0);
28315 __rev0.val[1] = __builtin_shufflevector(__p0.val[1], __p0.val[1], 7, 6, 5, 4, 3, 2, 1, 0);
28316 __rev0.val[2] = __builtin_shufflevector(__p0.val[2], __p0.val[2], 7, 6, 5, 4, 3, 2, 1, 0);
28317 int8x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
28318 int8x8_t __ret;
28319 __ret = (int8x8_t) __builtin_neon_vtbl3_v((int8x8_t)__rev0.val[0], (int8x8_t)__rev0.val[1], (int8x8_t)__rev0.val[2], (int8x8_t)__rev1, 0);
28320 __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
28321 return __ret;
28322 }
28323 #endif
28324
28325 #ifdef __LITTLE_ENDIAN__
28326 __ai poly8x8_t vtbl4_p8(poly8x8x4_t __p0, uint8x8_t __p1) {
28327 poly8x8_t __ret;
28328 __ret = (poly8x8_t) __builtin_neon_vtbl4_v((int8x8_t)__p0.val[0], (int8x8_t)__p0.val[1], (int8x8_t)__p0.val[2], (int8x8_t)__p0.val[3], (int8x8_t)__p1, 4);
28329 return __ret;
28330 }
28331 #else
28332 __ai poly8x8_t vtbl4_p8(poly8x8x4_t __p0, uint8x8_t __p1) {
28333 poly8x8x4_t __rev0;
28334 __rev0.val[0] = __builtin_shufflevector(__p0.val[0], __p0.val[0], 7, 6, 5, 4, 3, 2, 1, 0);
28335 __rev0.val[1] = __builtin_shufflevector(__p0.val[1], __p0.val[1], 7, 6, 5, 4, 3, 2, 1, 0);
28336 __rev0.val[2] = __builtin_shufflevector(__p0.val[2], __p0.val[2], 7, 6, 5, 4, 3, 2, 1, 0);
28337 __rev0.val[3] = __builtin_shufflevector(__p0.val[3], __p0.val[3], 7, 6, 5, 4, 3, 2, 1, 0);
28338 uint8x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
28339 poly8x8_t __ret;
28340 __ret = (poly8x8_t) __builtin_neon_vtbl4_v((int8x8_t)__rev0.val[0], (int8x8_t)__rev0.val[1], (int8x8_t)__rev0.val[2], (int8x8_t)__rev0.val[3], (int8x8_t)__rev1, 4);
28341 __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
28342 return __ret;
28343 }
28344 #endif
28345
28346 #ifdef __LITTLE_ENDIAN__
28347 __ai uint8x8_t vtbl4_u8(uint8x8x4_t __p0, uint8x8_t __p1) {
28348 uint8x8_t __ret;
28349 __ret = (uint8x8_t) __builtin_neon_vtbl4_v((int8x8_t)__p0.val[0], (int8x8_t)__p0.val[1], (int8x8_t)__p0.val[2], (int8x8_t)__p0.val[3], (int8x8_t)__p1, 16);
28350 return __ret;
28351 }
28352 #else
28353 __ai uint8x8_t vtbl4_u8(uint8x8x4_t __p0, uint8x8_t __p1) {
28354 uint8x8x4_t __rev0;
28355 __rev0.val[0] = __builtin_shufflevector(__p0.val[0], __p0.val[0], 7, 6, 5, 4, 3, 2, 1, 0);
28356 __rev0.val[1] = __builtin_shufflevector(__p0.val[1], __p0.val[1], 7, 6, 5, 4, 3, 2, 1, 0);
28357 __rev0.val[2] = __builtin_shufflevector(__p0.val[2], __p0.val[2], 7, 6, 5, 4, 3, 2, 1, 0);
28358 __rev0.val[3] = __builtin_shufflevector(__p0.val[3], __p0.val[3], 7, 6, 5, 4, 3, 2, 1, 0);
28359 uint8x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
28360 uint8x8_t __ret;
28361 __ret = (uint8x8_t) __builtin_neon_vtbl4_v((int8x8_t)__rev0.val[0], (int8x8_t)__rev0.val[1], (int8x8_t)__rev0.val[2], (int8x8_t)__rev0.val[3], (int8x8_t)__rev1, 16);
28362 __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
28363 return __ret;
28364 }
28365 #endif
28366
28367 #ifdef __LITTLE_ENDIAN__
28368 __ai int8x8_t vtbl4_s8(int8x8x4_t __p0, int8x8_t __p1) {
28369 int8x8_t __ret;
28370 __ret = (int8x8_t) __builtin_neon_vtbl4_v((int8x8_t)__p0.val[0], (int8x8_t)__p0.val[1], (int8x8_t)__p0.val[2], (int8x8_t)__p0.val[3], (int8x8_t)__p1, 0);
28371 return __ret;
28372 }
28373 #else
28374 __ai int8x8_t vtbl4_s8(int8x8x4_t __p0, int8x8_t __p1) {
28375 int8x8x4_t __rev0;
28376 __rev0.val[0] = __builtin_shufflevector(__p0.val[0], __p0.val[0], 7, 6, 5, 4, 3, 2, 1, 0);
28377 __rev0.val[1] = __builtin_shufflevector(__p0.val[1], __p0.val[1], 7, 6, 5, 4, 3, 2, 1, 0);
28378 __rev0.val[2] = __builtin_shufflevector(__p0.val[2], __p0.val[2], 7, 6, 5, 4, 3, 2, 1, 0);
28379 __rev0.val[3] = __builtin_shufflevector(__p0.val[3], __p0.val[3], 7, 6, 5, 4, 3, 2, 1, 0);
28380 int8x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
28381 int8x8_t __ret;
28382 __ret = (int8x8_t) __builtin_neon_vtbl4_v((int8x8_t)__rev0.val[0], (int8x8_t)__rev0.val[1], (int8x8_t)__rev0.val[2], (int8x8_t)__rev0.val[3], (int8x8_t)__rev1, 0);
28383 __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
28384 return __ret;
28385 }
28386 #endif
28387
28388 #ifdef __LITTLE_ENDIAN__
28389 __ai poly8x8_t vtbx1_p8(poly8x8_t __p0, poly8x8_t __p1, uint8x8_t __p2) {
28390 poly8x8_t __ret;
28391 __ret = (poly8x8_t) __builtin_neon_vtbx1_v((int8x8_t)__p0, (int8x8_t)__p1, (int8x8_t)__p2, 4);
28392 return __ret;
28393 }
28394 #else
28395 __ai poly8x8_t vtbx1_p8(poly8x8_t __p0, poly8x8_t __p1, uint8x8_t __p2) {
28396 poly8x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
28397 poly8x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
28398 uint8x8_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, 7, 6, 5, 4, 3, 2, 1, 0);
28399 poly8x8_t __ret;
28400 __ret = (poly8x8_t) __builtin_neon_vtbx1_v((int8x8_t)__rev0, (int8x8_t)__rev1, (int8x8_t)__rev2, 4);
28401 __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
28402 return __ret;
28403 }
28404 #endif
28405
28406 #ifdef __LITTLE_ENDIAN__
28407 __ai uint8x8_t vtbx1_u8(uint8x8_t __p0, uint8x8_t __p1, uint8x8_t __p2) {
28408 uint8x8_t __ret;
28409 __ret = (uint8x8_t) __builtin_neon_vtbx1_v((int8x8_t)__p0, (int8x8_t)__p1, (int8x8_t)__p2, 16);
28410 return __ret;
28411 }
28412 #else
28413 __ai uint8x8_t vtbx1_u8(uint8x8_t __p0, uint8x8_t __p1, uint8x8_t __p2) {
28414 uint8x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
28415 uint8x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
28416 uint8x8_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, 7, 6, 5, 4, 3, 2, 1, 0);
28417 uint8x8_t __ret;
28418 __ret = (uint8x8_t) __builtin_neon_vtbx1_v((int8x8_t)__rev0, (int8x8_t)__rev1, (int8x8_t)__rev2, 16);
28419 __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
28420 return __ret;
28421 }
28422 #endif
28423
28424 #ifdef __LITTLE_ENDIAN__
28425 __ai int8x8_t vtbx1_s8(int8x8_t __p0, int8x8_t __p1, int8x8_t __p2) {
28426 int8x8_t __ret;
28427 __ret = (int8x8_t) __builtin_neon_vtbx1_v((int8x8_t)__p0, (int8x8_t)__p1, (int8x8_t)__p2, 0);
28428 return __ret;
28429 }
28430 #else
28431 __ai int8x8_t vtbx1_s8(int8x8_t __p0, int8x8_t __p1, int8x8_t __p2) {
28432 int8x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
28433 int8x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
28434 int8x8_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, 7, 6, 5, 4, 3, 2, 1, 0);
28435 int8x8_t __ret;
28436 __ret = (int8x8_t) __builtin_neon_vtbx1_v((int8x8_t)__rev0, (int8x8_t)__rev1, (int8x8_t)__rev2, 0);
28437 __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
28438 return __ret;
28439 }
28440 #endif
28441
28442 #ifdef __LITTLE_ENDIAN__
28443 __ai poly8x8_t vtbx2_p8(poly8x8_t __p0, poly8x8x2_t __p1, uint8x8_t __p2) {
28444 poly8x8_t __ret;
28445 __ret = (poly8x8_t) __builtin_neon_vtbx2_v((int8x8_t)__p0, (int8x8_t)__p1.val[0], (int8x8_t)__p1.val[1], (int8x8_t)__p2, 4);
28446 return __ret;
28447 }
28448 #else
28449 __ai poly8x8_t vtbx2_p8(poly8x8_t __p0, poly8x8x2_t __p1, uint8x8_t __p2) {
28450 poly8x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
28451 poly8x8x2_t __rev1;
28452 __rev1.val[0] = __builtin_shufflevector(__p1.val[0], __p1.val[0], 7, 6, 5, 4, 3, 2, 1, 0);
28453 __rev1.val[1] = __builtin_shufflevector(__p1.val[1], __p1.val[1], 7, 6, 5, 4, 3, 2, 1, 0);
28454 uint8x8_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, 7, 6, 5, 4, 3, 2, 1, 0);
28455 poly8x8_t __ret;
28456 __ret = (poly8x8_t) __builtin_neon_vtbx2_v((int8x8_t)__rev0, (int8x8_t)__rev1.val[0], (int8x8_t)__rev1.val[1], (int8x8_t)__rev2, 4);
28457 __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
28458 return __ret;
28459 }
28460 #endif
28461
28462 #ifdef __LITTLE_ENDIAN__
28463 __ai uint8x8_t vtbx2_u8(uint8x8_t __p0, uint8x8x2_t __p1, uint8x8_t __p2) {
28464 uint8x8_t __ret;
28465 __ret = (uint8x8_t) __builtin_neon_vtbx2_v((int8x8_t)__p0, (int8x8_t)__p1.val[0], (int8x8_t)__p1.val[1], (int8x8_t)__p2, 16);
28466 return __ret;
28467 }
28468 #else
28469 __ai uint8x8_t vtbx2_u8(uint8x8_t __p0, uint8x8x2_t __p1, uint8x8_t __p2) {
28470 uint8x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
28471 uint8x8x2_t __rev1;
28472 __rev1.val[0] = __builtin_shufflevector(__p1.val[0], __p1.val[0], 7, 6, 5, 4, 3, 2, 1, 0);
28473 __rev1.val[1] = __builtin_shufflevector(__p1.val[1], __p1.val[1], 7, 6, 5, 4, 3, 2, 1, 0);
28474 uint8x8_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, 7, 6, 5, 4, 3, 2, 1, 0);
28475 uint8x8_t __ret;
28476 __ret = (uint8x8_t) __builtin_neon_vtbx2_v((int8x8_t)__rev0, (int8x8_t)__rev1.val[0], (int8x8_t)__rev1.val[1], (int8x8_t)__rev2, 16);
28477 __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
28478 return __ret;
28479 }
28480 #endif
28481
28482 #ifdef __LITTLE_ENDIAN__
28483 __ai int8x8_t vtbx2_s8(int8x8_t __p0, int8x8x2_t __p1, int8x8_t __p2) {
28484 int8x8_t __ret;
28485 __ret = (int8x8_t) __builtin_neon_vtbx2_v((int8x8_t)__p0, (int8x8_t)__p1.val[0], (int8x8_t)__p1.val[1], (int8x8_t)__p2, 0);
28486 return __ret;
28487 }
28488 #else
28489 __ai int8x8_t vtbx2_s8(int8x8_t __p0, int8x8x2_t __p1, int8x8_t __p2) {
28490 int8x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
28491 int8x8x2_t __rev1;
28492 __rev1.val[0] = __builtin_shufflevector(__p1.val[0], __p1.val[0], 7, 6, 5, 4, 3, 2, 1, 0);
28493 __rev1.val[1] = __builtin_shufflevector(__p1.val[1], __p1.val[1], 7, 6, 5, 4, 3, 2, 1, 0);
28494 int8x8_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, 7, 6, 5, 4, 3, 2, 1, 0);
28495 int8x8_t __ret;
28496 __ret = (int8x8_t) __builtin_neon_vtbx2_v((int8x8_t)__rev0, (int8x8_t)__rev1.val[0], (int8x8_t)__rev1.val[1], (int8x8_t)__rev2, 0);
28497 __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
28498 return __ret;
28499 }
28500 #endif
28501
28502 #ifdef __LITTLE_ENDIAN__
28503 __ai poly8x8_t vtbx3_p8(poly8x8_t __p0, poly8x8x3_t __p1, uint8x8_t __p2) {
28504 poly8x8_t __ret;
28505 __ret = (poly8x8_t) __builtin_neon_vtbx3_v((int8x8_t)__p0, (int8x8_t)__p1.val[0], (int8x8_t)__p1.val[1], (int8x8_t)__p1.val[2], (int8x8_t)__p2, 4);
28506 return __ret;
28507 }
28508 #else
28509 __ai poly8x8_t vtbx3_p8(poly8x8_t __p0, poly8x8x3_t __p1, uint8x8_t __p2) {
28510 poly8x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
28511 poly8x8x3_t __rev1;
28512 __rev1.val[0] = __builtin_shufflevector(__p1.val[0], __p1.val[0], 7, 6, 5, 4, 3, 2, 1, 0);
28513 __rev1.val[1] = __builtin_shufflevector(__p1.val[1], __p1.val[1], 7, 6, 5, 4, 3, 2, 1, 0);
28514 __rev1.val[2] = __builtin_shufflevector(__p1.val[2], __p1.val[2], 7, 6, 5, 4, 3, 2, 1, 0);
28515 uint8x8_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, 7, 6, 5, 4, 3, 2, 1, 0);
28516 poly8x8_t __ret;
28517 __ret = (poly8x8_t) __builtin_neon_vtbx3_v((int8x8_t)__rev0, (int8x8_t)__rev1.val[0], (int8x8_t)__rev1.val[1], (int8x8_t)__rev1.val[2], (int8x8_t)__rev2, 4);
28518 __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
28519 return __ret;
28520 }
28521 #endif
28522
28523 #ifdef __LITTLE_ENDIAN__
28524 __ai uint8x8_t vtbx3_u8(uint8x8_t __p0, uint8x8x3_t __p1, uint8x8_t __p2) {
28525 uint8x8_t __ret;
28526 __ret = (uint8x8_t) __builtin_neon_vtbx3_v((int8x8_t)__p0, (int8x8_t)__p1.val[0], (int8x8_t)__p1.val[1], (int8x8_t)__p1.val[2], (int8x8_t)__p2, 16);
28527 return __ret;
28528 }
28529 #else
28530 __ai uint8x8_t vtbx3_u8(uint8x8_t __p0, uint8x8x3_t __p1, uint8x8_t __p2) {
28531 uint8x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
28532 uint8x8x3_t __rev1;
28533 __rev1.val[0] = __builtin_shufflevector(__p1.val[0], __p1.val[0], 7, 6, 5, 4, 3, 2, 1, 0);
28534 __rev1.val[1] = __builtin_shufflevector(__p1.val[1], __p1.val[1], 7, 6, 5, 4, 3, 2, 1, 0);
28535 __rev1.val[2] = __builtin_shufflevector(__p1.val[2], __p1.val[2], 7, 6, 5, 4, 3, 2, 1, 0);
28536 uint8x8_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, 7, 6, 5, 4, 3, 2, 1, 0);
28537 uint8x8_t __ret;
28538 __ret = (uint8x8_t) __builtin_neon_vtbx3_v((int8x8_t)__rev0, (int8x8_t)__rev1.val[0], (int8x8_t)__rev1.val[1], (int8x8_t)__rev1.val[2], (int8x8_t)__rev2, 16);
28539 __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
28540 return __ret;
28541 }
28542 #endif
28543
28544 #ifdef __LITTLE_ENDIAN__
28545 __ai int8x8_t vtbx3_s8(int8x8_t __p0, int8x8x3_t __p1, int8x8_t __p2) {
28546 int8x8_t __ret;
28547 __ret = (int8x8_t) __builtin_neon_vtbx3_v((int8x8_t)__p0, (int8x8_t)__p1.val[0], (int8x8_t)__p1.val[1], (int8x8_t)__p1.val[2], (int8x8_t)__p2, 0);
28548 return __ret;
28549 }
28550 #else
28551 __ai int8x8_t vtbx3_s8(int8x8_t __p0, int8x8x3_t __p1, int8x8_t __p2) {
28552 int8x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
28553 int8x8x3_t __rev1;
28554 __rev1.val[0] = __builtin_shufflevector(__p1.val[0], __p1.val[0], 7, 6, 5, 4, 3, 2, 1, 0);
28555 __rev1.val[1] = __builtin_shufflevector(__p1.val[1], __p1.val[1], 7, 6, 5, 4, 3, 2, 1, 0);
28556 __rev1.val[2] = __builtin_shufflevector(__p1.val[2], __p1.val[2], 7, 6, 5, 4, 3, 2, 1, 0);
28557 int8x8_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, 7, 6, 5, 4, 3, 2, 1, 0);
28558 int8x8_t __ret;
28559 __ret = (int8x8_t) __builtin_neon_vtbx3_v((int8x8_t)__rev0, (int8x8_t)__rev1.val[0], (int8x8_t)__rev1.val[1], (int8x8_t)__rev1.val[2], (int8x8_t)__rev2, 0);
28560 __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
28561 return __ret;
28562 }
28563 #endif
28564
28565 #ifdef __LITTLE_ENDIAN__
28566 __ai poly8x8_t vtbx4_p8(poly8x8_t __p0, poly8x8x4_t __p1, uint8x8_t __p2) {
28567 poly8x8_t __ret;
28568 __ret = (poly8x8_t) __builtin_neon_vtbx4_v((int8x8_t)__p0, (int8x8_t)__p1.val[0], (int8x8_t)__p1.val[1], (int8x8_t)__p1.val[2], (int8x8_t)__p1.val[3], (int8x8_t)__p2, 4);
28569 return __ret;
28570 }
28571 #else
28572 __ai poly8x8_t vtbx4_p8(poly8x8_t __p0, poly8x8x4_t __p1, uint8x8_t __p2) {
28573 poly8x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
28574 poly8x8x4_t __rev1;
28575 __rev1.val[0] = __builtin_shufflevector(__p1.val[0], __p1.val[0], 7, 6, 5, 4, 3, 2, 1, 0);
28576 __rev1.val[1] = __builtin_shufflevector(__p1.val[1], __p1.val[1], 7, 6, 5, 4, 3, 2, 1, 0);
28577 __rev1.val[2] = __builtin_shufflevector(__p1.val[2], __p1.val[2], 7, 6, 5, 4, 3, 2, 1, 0);
28578 __rev1.val[3] = __builtin_shufflevector(__p1.val[3], __p1.val[3], 7, 6, 5, 4, 3, 2, 1, 0);
28579 uint8x8_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, 7, 6, 5, 4, 3, 2, 1, 0);
28580 poly8x8_t __ret;
28581 __ret = (poly8x8_t) __builtin_neon_vtbx4_v((int8x8_t)__rev0, (int8x8_t)__rev1.val[0], (int8x8_t)__rev1.val[1], (int8x8_t)__rev1.val[2], (int8x8_t)__rev1.val[3], (int8x8_t)__rev2, 4);
28582 __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
28583 return __ret;
28584 }
28585 #endif
28586
28587 #ifdef __LITTLE_ENDIAN__
28588 __ai uint8x8_t vtbx4_u8(uint8x8_t __p0, uint8x8x4_t __p1, uint8x8_t __p2) {
28589 uint8x8_t __ret;
28590 __ret = (uint8x8_t) __builtin_neon_vtbx4_v((int8x8_t)__p0, (int8x8_t)__p1.val[0], (int8x8_t)__p1.val[1], (int8x8_t)__p1.val[2], (int8x8_t)__p1.val[3], (int8x8_t)__p2, 16);
28591 return __ret;
28592 }
28593 #else
28594 __ai uint8x8_t vtbx4_u8(uint8x8_t __p0, uint8x8x4_t __p1, uint8x8_t __p2) {
28595 uint8x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
28596 uint8x8x4_t __rev1;
28597 __rev1.val[0] = __builtin_shufflevector(__p1.val[0], __p1.val[0], 7, 6, 5, 4, 3, 2, 1, 0);
28598 __rev1.val[1] = __builtin_shufflevector(__p1.val[1], __p1.val[1], 7, 6, 5, 4, 3, 2, 1, 0);
28599 __rev1.val[2] = __builtin_shufflevector(__p1.val[2], __p1.val[2], 7, 6, 5, 4, 3, 2, 1, 0);
28600 __rev1.val[3] = __builtin_shufflevector(__p1.val[3], __p1.val[3], 7, 6, 5, 4, 3, 2, 1, 0);
28601 uint8x8_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, 7, 6, 5, 4, 3, 2, 1, 0);
28602 uint8x8_t __ret;
28603 __ret = (uint8x8_t) __builtin_neon_vtbx4_v((int8x8_t)__rev0, (int8x8_t)__rev1.val[0], (int8x8_t)__rev1.val[1], (int8x8_t)__rev1.val[2], (int8x8_t)__rev1.val[3], (int8x8_t)__rev2, 16);
28604 __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
28605 return __ret;
28606 }
28607 #endif
28608
28609 #ifdef __LITTLE_ENDIAN__
28610 __ai int8x8_t vtbx4_s8(int8x8_t __p0, int8x8x4_t __p1, int8x8_t __p2) {
28611 int8x8_t __ret;
28612 __ret = (int8x8_t) __builtin_neon_vtbx4_v((int8x8_t)__p0, (int8x8_t)__p1.val[0], (int8x8_t)__p1.val[1], (int8x8_t)__p1.val[2], (int8x8_t)__p1.val[3], (int8x8_t)__p2, 0);
28613 return __ret;
28614 }
28615 #else
28616 __ai int8x8_t vtbx4_s8(int8x8_t __p0, int8x8x4_t __p1, int8x8_t __p2) {
28617 int8x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
28618 int8x8x4_t __rev1;
28619 __rev1.val[0] = __builtin_shufflevector(__p1.val[0], __p1.val[0], 7, 6, 5, 4, 3, 2, 1, 0);
28620 __rev1.val[1] = __builtin_shufflevector(__p1.val[1], __p1.val[1], 7, 6, 5, 4, 3, 2, 1, 0);
28621 __rev1.val[2] = __builtin_shufflevector(__p1.val[2], __p1.val[2], 7, 6, 5, 4, 3, 2, 1, 0);
28622 __rev1.val[3] = __builtin_shufflevector(__p1.val[3], __p1.val[3], 7, 6, 5, 4, 3, 2, 1, 0);
28623 int8x8_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, 7, 6, 5, 4, 3, 2, 1, 0);
28624 int8x8_t __ret;
28625 __ret = (int8x8_t) __builtin_neon_vtbx4_v((int8x8_t)__rev0, (int8x8_t)__rev1.val[0], (int8x8_t)__rev1.val[1], (int8x8_t)__rev1.val[2], (int8x8_t)__rev1.val[3], (int8x8_t)__rev2, 0);
28626 __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
28627 return __ret;
28628 }
28629 #endif
28630
28631 #ifdef __LITTLE_ENDIAN__
28632 __ai poly8x8x2_t vtrn_p8(poly8x8_t __p0, poly8x8_t __p1) {
28633 poly8x8x2_t __ret;
28634 __builtin_neon_vtrn_v(&__ret, (int8x8_t)__p0, (int8x8_t)__p1, 4);
28635 return __ret;
28636 }
28637 #else
28638 __ai poly8x8x2_t vtrn_p8(poly8x8_t __p0, poly8x8_t __p1) {
28639 poly8x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
28640 poly8x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
28641 poly8x8x2_t __ret;
28642 __builtin_neon_vtrn_v(&__ret, (int8x8_t)__rev0, (int8x8_t)__rev1, 4);
28643
28644 __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 7, 6, 5, 4, 3, 2, 1, 0);
28645 __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 7, 6, 5, 4, 3, 2, 1, 0);
28646 return __ret;
28647 }
28648 #endif
28649
28650 #ifdef __LITTLE_ENDIAN__
28651 __ai poly16x4x2_t vtrn_p16(poly16x4_t __p0, poly16x4_t __p1) {
28652 poly16x4x2_t __ret;
28653 __builtin_neon_vtrn_v(&__ret, (int8x8_t)__p0, (int8x8_t)__p1, 5);
28654 return __ret;
28655 }
28656 #else
28657 __ai poly16x4x2_t vtrn_p16(poly16x4_t __p0, poly16x4_t __p1) {
28658 poly16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
28659 poly16x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
28660 poly16x4x2_t __ret;
28661 __builtin_neon_vtrn_v(&__ret, (int8x8_t)__rev0, (int8x8_t)__rev1, 5);
28662
28663 __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 3, 2, 1, 0);
28664 __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 3, 2, 1, 0);
28665 return __ret;
28666 }
28667 #endif
28668
28669 #ifdef __LITTLE_ENDIAN__
28670 __ai poly8x16x2_t vtrnq_p8(poly8x16_t __p0, poly8x16_t __p1) {
28671 poly8x16x2_t __ret;
28672 __builtin_neon_vtrnq_v(&__ret, (int8x16_t)__p0, (int8x16_t)__p1, 36);
28673 return __ret;
28674 }
28675 #else
28676 __ai poly8x16x2_t vtrnq_p8(poly8x16_t __p0, poly8x16_t __p1) {
28677 poly8x16_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
28678 poly8x16_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
28679 poly8x16x2_t __ret;
28680 __builtin_neon_vtrnq_v(&__ret, (int8x16_t)__rev0, (int8x16_t)__rev1, 36);
28681
28682 __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
28683 __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
28684 return __ret;
28685 }
28686 #endif
28687
28688 #ifdef __LITTLE_ENDIAN__
28689 __ai poly16x8x2_t vtrnq_p16(poly16x8_t __p0, poly16x8_t __p1) {
28690 poly16x8x2_t __ret;
28691 __builtin_neon_vtrnq_v(&__ret, (int8x16_t)__p0, (int8x16_t)__p1, 37);
28692 return __ret;
28693 }
28694 #else
28695 __ai poly16x8x2_t vtrnq_p16(poly16x8_t __p0, poly16x8_t __p1) {
28696 poly16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
28697 poly16x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
28698 poly16x8x2_t __ret;
28699 __builtin_neon_vtrnq_v(&__ret, (int8x16_t)__rev0, (int8x16_t)__rev1, 37);
28700
28701 __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 7, 6, 5, 4, 3, 2, 1, 0);
28702 __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 7, 6, 5, 4, 3, 2, 1, 0);
28703 return __ret;
28704 }
28705 #endif
28706
28707 #ifdef __LITTLE_ENDIAN__
28708 __ai uint8x16x2_t vtrnq_u8(uint8x16_t __p0, uint8x16_t __p1) {
28709 uint8x16x2_t __ret;
28710 __builtin_neon_vtrnq_v(&__ret, (int8x16_t)__p0, (int8x16_t)__p1, 48);
28711 return __ret;
28712 }
28713 #else
28714 __ai uint8x16x2_t vtrnq_u8(uint8x16_t __p0, uint8x16_t __p1) {
28715 uint8x16_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
28716 uint8x16_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
28717 uint8x16x2_t __ret;
28718 __builtin_neon_vtrnq_v(&__ret, (int8x16_t)__rev0, (int8x16_t)__rev1, 48);
28719
28720 __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
28721 __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
28722 return __ret;
28723 }
28724 #endif
28725
28726 #ifdef __LITTLE_ENDIAN__
28727 __ai uint32x4x2_t vtrnq_u32(uint32x4_t __p0, uint32x4_t __p1) {
28728 uint32x4x2_t __ret;
28729 __builtin_neon_vtrnq_v(&__ret, (int8x16_t)__p0, (int8x16_t)__p1, 50);
28730 return __ret;
28731 }
28732 #else
28733 __ai uint32x4x2_t vtrnq_u32(uint32x4_t __p0, uint32x4_t __p1) {
28734 uint32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
28735 uint32x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
28736 uint32x4x2_t __ret;
28737 __builtin_neon_vtrnq_v(&__ret, (int8x16_t)__rev0, (int8x16_t)__rev1, 50);
28738
28739 __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 3, 2, 1, 0);
28740 __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 3, 2, 1, 0);
28741 return __ret;
28742 }
28743 #endif
28744
28745 #ifdef __LITTLE_ENDIAN__
28746 __ai uint16x8x2_t vtrnq_u16(uint16x8_t __p0, uint16x8_t __p1) {
28747 uint16x8x2_t __ret;
28748 __builtin_neon_vtrnq_v(&__ret, (int8x16_t)__p0, (int8x16_t)__p1, 49);
28749 return __ret;
28750 }
28751 #else
28752 __ai uint16x8x2_t vtrnq_u16(uint16x8_t __p0, uint16x8_t __p1) {
28753 uint16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
28754 uint16x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
28755 uint16x8x2_t __ret;
28756 __builtin_neon_vtrnq_v(&__ret, (int8x16_t)__rev0, (int8x16_t)__rev1, 49);
28757
28758 __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 7, 6, 5, 4, 3, 2, 1, 0);
28759 __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 7, 6, 5, 4, 3, 2, 1, 0);
28760 return __ret;
28761 }
28762 #endif
28763
28764 #ifdef __LITTLE_ENDIAN__
28765 __ai int8x16x2_t vtrnq_s8(int8x16_t __p0, int8x16_t __p1) {
28766 int8x16x2_t __ret;
28767 __builtin_neon_vtrnq_v(&__ret, (int8x16_t)__p0, (int8x16_t)__p1, 32);
28768 return __ret;
28769 }
28770 #else
28771 __ai int8x16x2_t vtrnq_s8(int8x16_t __p0, int8x16_t __p1) {
28772 int8x16_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
28773 int8x16_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
28774 int8x16x2_t __ret;
28775 __builtin_neon_vtrnq_v(&__ret, (int8x16_t)__rev0, (int8x16_t)__rev1, 32);
28776
28777 __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
28778 __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
28779 return __ret;
28780 }
28781 #endif
28782
28783 #ifdef __LITTLE_ENDIAN__
28784 __ai float32x4x2_t vtrnq_f32(float32x4_t __p0, float32x4_t __p1) {
28785 float32x4x2_t __ret;
28786 __builtin_neon_vtrnq_v(&__ret, (int8x16_t)__p0, (int8x16_t)__p1, 41);
28787 return __ret;
28788 }
28789 #else
28790 __ai float32x4x2_t vtrnq_f32(float32x4_t __p0, float32x4_t __p1) {
28791 float32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
28792 float32x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
28793 float32x4x2_t __ret;
28794 __builtin_neon_vtrnq_v(&__ret, (int8x16_t)__rev0, (int8x16_t)__rev1, 41);
28795
28796 __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 3, 2, 1, 0);
28797 __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 3, 2, 1, 0);
28798 return __ret;
28799 }
28800 #endif
28801
28802 #ifdef __LITTLE_ENDIAN__
28803 __ai int32x4x2_t vtrnq_s32(int32x4_t __p0, int32x4_t __p1) {
28804 int32x4x2_t __ret;
28805 __builtin_neon_vtrnq_v(&__ret, (int8x16_t)__p0, (int8x16_t)__p1, 34);
28806 return __ret;
28807 }
28808 #else
28809 __ai int32x4x2_t vtrnq_s32(int32x4_t __p0, int32x4_t __p1) {
28810 int32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
28811 int32x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
28812 int32x4x2_t __ret;
28813 __builtin_neon_vtrnq_v(&__ret, (int8x16_t)__rev0, (int8x16_t)__rev1, 34);
28814
28815 __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 3, 2, 1, 0);
28816 __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 3, 2, 1, 0);
28817 return __ret;
28818 }
28819 #endif
28820
28821 #ifdef __LITTLE_ENDIAN__
28822 __ai int16x8x2_t vtrnq_s16(int16x8_t __p0, int16x8_t __p1) {
28823 int16x8x2_t __ret;
28824 __builtin_neon_vtrnq_v(&__ret, (int8x16_t)__p0, (int8x16_t)__p1, 33);
28825 return __ret;
28826 }
28827 #else
28828 __ai int16x8x2_t vtrnq_s16(int16x8_t __p0, int16x8_t __p1) {
28829 int16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
28830 int16x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
28831 int16x8x2_t __ret;
28832 __builtin_neon_vtrnq_v(&__ret, (int8x16_t)__rev0, (int8x16_t)__rev1, 33);
28833
28834 __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 7, 6, 5, 4, 3, 2, 1, 0);
28835 __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 7, 6, 5, 4, 3, 2, 1, 0);
28836 return __ret;
28837 }
28838 #endif
28839
28840 #ifdef __LITTLE_ENDIAN__
28841 __ai uint8x8x2_t vtrn_u8(uint8x8_t __p0, uint8x8_t __p1) {
28842 uint8x8x2_t __ret;
28843 __builtin_neon_vtrn_v(&__ret, (int8x8_t)__p0, (int8x8_t)__p1, 16);
28844 return __ret;
28845 }
28846 #else
28847 __ai uint8x8x2_t vtrn_u8(uint8x8_t __p0, uint8x8_t __p1) {
28848 uint8x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
28849 uint8x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
28850 uint8x8x2_t __ret;
28851 __builtin_neon_vtrn_v(&__ret, (int8x8_t)__rev0, (int8x8_t)__rev1, 16);
28852
28853 __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 7, 6, 5, 4, 3, 2, 1, 0);
28854 __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 7, 6, 5, 4, 3, 2, 1, 0);
28855 return __ret;
28856 }
28857 #endif
28858
28859 #ifdef __LITTLE_ENDIAN__
28860 __ai uint32x2x2_t vtrn_u32(uint32x2_t __p0, uint32x2_t __p1) {
28861 uint32x2x2_t __ret;
28862 __builtin_neon_vtrn_v(&__ret, (int8x8_t)__p0, (int8x8_t)__p1, 18);
28863 return __ret;
28864 }
28865 #else
28866 __ai uint32x2x2_t vtrn_u32(uint32x2_t __p0, uint32x2_t __p1) {
28867 uint32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
28868 uint32x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
28869 uint32x2x2_t __ret;
28870 __builtin_neon_vtrn_v(&__ret, (int8x8_t)__rev0, (int8x8_t)__rev1, 18);
28871
28872 __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 1, 0);
28873 __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 1, 0);
28874 return __ret;
28875 }
28876 #endif
28877
28878 #ifdef __LITTLE_ENDIAN__
28879 __ai uint16x4x2_t vtrn_u16(uint16x4_t __p0, uint16x4_t __p1) {
28880 uint16x4x2_t __ret;
28881 __builtin_neon_vtrn_v(&__ret, (int8x8_t)__p0, (int8x8_t)__p1, 17);
28882 return __ret;
28883 }
28884 #else
28885 __ai uint16x4x2_t vtrn_u16(uint16x4_t __p0, uint16x4_t __p1) {
28886 uint16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
28887 uint16x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
28888 uint16x4x2_t __ret;
28889 __builtin_neon_vtrn_v(&__ret, (int8x8_t)__rev0, (int8x8_t)__rev1, 17);
28890
28891 __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 3, 2, 1, 0);
28892 __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 3, 2, 1, 0);
28893 return __ret;
28894 }
28895 #endif
28896
28897 #ifdef __LITTLE_ENDIAN__
28898 __ai int8x8x2_t vtrn_s8(int8x8_t __p0, int8x8_t __p1) {
28899 int8x8x2_t __ret;
28900 __builtin_neon_vtrn_v(&__ret, (int8x8_t)__p0, (int8x8_t)__p1, 0);
28901 return __ret;
28902 }
28903 #else
28904 __ai int8x8x2_t vtrn_s8(int8x8_t __p0, int8x8_t __p1) {
28905 int8x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
28906 int8x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
28907 int8x8x2_t __ret;
28908 __builtin_neon_vtrn_v(&__ret, (int8x8_t)__rev0, (int8x8_t)__rev1, 0);
28909
28910 __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 7, 6, 5, 4, 3, 2, 1, 0);
28911 __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 7, 6, 5, 4, 3, 2, 1, 0);
28912 return __ret;
28913 }
28914 #endif
28915
28916 #ifdef __LITTLE_ENDIAN__
28917 __ai float32x2x2_t vtrn_f32(float32x2_t __p0, float32x2_t __p1) {
28918 float32x2x2_t __ret;
28919 __builtin_neon_vtrn_v(&__ret, (int8x8_t)__p0, (int8x8_t)__p1, 9);
28920 return __ret;
28921 }
28922 #else
28923 __ai float32x2x2_t vtrn_f32(float32x2_t __p0, float32x2_t __p1) {
28924 float32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
28925 float32x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
28926 float32x2x2_t __ret;
28927 __builtin_neon_vtrn_v(&__ret, (int8x8_t)__rev0, (int8x8_t)__rev1, 9);
28928
28929 __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 1, 0);
28930 __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 1, 0);
28931 return __ret;
28932 }
28933 #endif
28934
28935 #ifdef __LITTLE_ENDIAN__
28936 __ai int32x2x2_t vtrn_s32(int32x2_t __p0, int32x2_t __p1) {
28937 int32x2x2_t __ret;
28938 __builtin_neon_vtrn_v(&__ret, (int8x8_t)__p0, (int8x8_t)__p1, 2);
28939 return __ret;
28940 }
28941 #else
28942 __ai int32x2x2_t vtrn_s32(int32x2_t __p0, int32x2_t __p1) {
28943 int32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
28944 int32x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
28945 int32x2x2_t __ret;
28946 __builtin_neon_vtrn_v(&__ret, (int8x8_t)__rev0, (int8x8_t)__rev1, 2);
28947
28948 __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 1, 0);
28949 __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 1, 0);
28950 return __ret;
28951 }
28952 #endif
28953
28954 #ifdef __LITTLE_ENDIAN__
28955 __ai int16x4x2_t vtrn_s16(int16x4_t __p0, int16x4_t __p1) {
28956 int16x4x2_t __ret;
28957 __builtin_neon_vtrn_v(&__ret, (int8x8_t)__p0, (int8x8_t)__p1, 1);
28958 return __ret;
28959 }
28960 #else
28961 __ai int16x4x2_t vtrn_s16(int16x4_t __p0, int16x4_t __p1) {
28962 int16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
28963 int16x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
28964 int16x4x2_t __ret;
28965 __builtin_neon_vtrn_v(&__ret, (int8x8_t)__rev0, (int8x8_t)__rev1, 1);
28966
28967 __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 3, 2, 1, 0);
28968 __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 3, 2, 1, 0);
28969 return __ret;
28970 }
28971 #endif
28972
28973 #ifdef __LITTLE_ENDIAN__
28974 __ai uint8x8_t vtst_p8(poly8x8_t __p0, poly8x8_t __p1) {
28975 uint8x8_t __ret;
28976 __ret = (uint8x8_t) __builtin_neon_vtst_v((int8x8_t)__p0, (int8x8_t)__p1, 16);
28977 return __ret;
28978 }
28979 #else
28980 __ai uint8x8_t vtst_p8(poly8x8_t __p0, poly8x8_t __p1) {
28981 poly8x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
28982 poly8x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
28983 uint8x8_t __ret;
28984 __ret = (uint8x8_t) __builtin_neon_vtst_v((int8x8_t)__rev0, (int8x8_t)__rev1, 16);
28985 __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
28986 return __ret;
28987 }
28988 #endif
28989
28990 #ifdef __LITTLE_ENDIAN__
28991 __ai uint16x4_t vtst_p16(poly16x4_t __p0, poly16x4_t __p1) {
28992 uint16x4_t __ret;
28993 __ret = (uint16x4_t) __builtin_neon_vtst_v((int8x8_t)__p0, (int8x8_t)__p1, 17);
28994 return __ret;
28995 }
28996 #else
28997 __ai uint16x4_t vtst_p16(poly16x4_t __p0, poly16x4_t __p1) {
28998 poly16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
28999 poly16x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
29000 uint16x4_t __ret;
29001 __ret = (uint16x4_t) __builtin_neon_vtst_v((int8x8_t)__rev0, (int8x8_t)__rev1, 17);
29002 __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
29003 return __ret;
29004 }
29005 #endif
29006
29007 #ifdef __LITTLE_ENDIAN__
29008 __ai uint8x16_t vtstq_p8(poly8x16_t __p0, poly8x16_t __p1) {
29009 uint8x16_t __ret;
29010 __ret = (uint8x16_t) __builtin_neon_vtstq_v((int8x16_t)__p0, (int8x16_t)__p1, 48);
29011 return __ret;
29012 }
29013 #else
29014 __ai uint8x16_t vtstq_p8(poly8x16_t __p0, poly8x16_t __p1) {
29015 poly8x16_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
29016 poly8x16_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
29017 uint8x16_t __ret;
29018 __ret = (uint8x16_t) __builtin_neon_vtstq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 48);
29019 __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
29020 return __ret;
29021 }
29022 #endif
29023
29024 #ifdef __LITTLE_ENDIAN__
29025 __ai uint16x8_t vtstq_p16(poly16x8_t __p0, poly16x8_t __p1) {
29026 uint16x8_t __ret;
29027 __ret = (uint16x8_t) __builtin_neon_vtstq_v((int8x16_t)__p0, (int8x16_t)__p1, 49);
29028 return __ret;
29029 }
29030 #else
29031 __ai uint16x8_t vtstq_p16(poly16x8_t __p0, poly16x8_t __p1) {
29032 poly16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
29033 poly16x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
29034 uint16x8_t __ret;
29035 __ret = (uint16x8_t) __builtin_neon_vtstq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 49);
29036 __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
29037 return __ret;
29038 }
29039 #endif
29040
29041 #ifdef __LITTLE_ENDIAN__
29042 __ai uint8x16_t vtstq_u8(uint8x16_t __p0, uint8x16_t __p1) {
29043 uint8x16_t __ret;
29044 __ret = (uint8x16_t) __builtin_neon_vtstq_v((int8x16_t)__p0, (int8x16_t)__p1, 48);
29045 return __ret;
29046 }
29047 #else
29048 __ai uint8x16_t vtstq_u8(uint8x16_t __p0, uint8x16_t __p1) {
29049 uint8x16_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
29050 uint8x16_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
29051 uint8x16_t __ret;
29052 __ret = (uint8x16_t) __builtin_neon_vtstq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 48);
29053 __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
29054 return __ret;
29055 }
29056 #endif
29057
29058 #ifdef __LITTLE_ENDIAN__
29059 __ai uint32x4_t vtstq_u32(uint32x4_t __p0, uint32x4_t __p1) {
29060 uint32x4_t __ret;
29061 __ret = (uint32x4_t) __builtin_neon_vtstq_v((int8x16_t)__p0, (int8x16_t)__p1, 50);
29062 return __ret;
29063 }
29064 #else
29065 __ai uint32x4_t vtstq_u32(uint32x4_t __p0, uint32x4_t __p1) {
29066 uint32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
29067 uint32x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
29068 uint32x4_t __ret;
29069 __ret = (uint32x4_t) __builtin_neon_vtstq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 50);
29070 __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
29071 return __ret;
29072 }
29073 #endif
29074
29075 #ifdef __LITTLE_ENDIAN__
29076 __ai uint16x8_t vtstq_u16(uint16x8_t __p0, uint16x8_t __p1) {
29077 uint16x8_t __ret;
29078 __ret = (uint16x8_t) __builtin_neon_vtstq_v((int8x16_t)__p0, (int8x16_t)__p1, 49);
29079 return __ret;
29080 }
29081 #else
29082 __ai uint16x8_t vtstq_u16(uint16x8_t __p0, uint16x8_t __p1) {
29083 uint16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
29084 uint16x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
29085 uint16x8_t __ret;
29086 __ret = (uint16x8_t) __builtin_neon_vtstq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 49);
29087 __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
29088 return __ret;
29089 }
29090 #endif
29091
29092 #ifdef __LITTLE_ENDIAN__
29093 __ai uint8x16_t vtstq_s8(int8x16_t __p0, int8x16_t __p1) {
29094 uint8x16_t __ret;
29095 __ret = (uint8x16_t) __builtin_neon_vtstq_v((int8x16_t)__p0, (int8x16_t)__p1, 48);
29096 return __ret;
29097 }
29098 #else
29099 __ai uint8x16_t vtstq_s8(int8x16_t __p0, int8x16_t __p1) {
29100 int8x16_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
29101 int8x16_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
29102 uint8x16_t __ret;
29103 __ret = (uint8x16_t) __builtin_neon_vtstq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 48);
29104 __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
29105 return __ret;
29106 }
29107 #endif
29108
29109 #ifdef __LITTLE_ENDIAN__
29110 __ai uint32x4_t vtstq_s32(int32x4_t __p0, int32x4_t __p1) {
29111 uint32x4_t __ret;
29112 __ret = (uint32x4_t) __builtin_neon_vtstq_v((int8x16_t)__p0, (int8x16_t)__p1, 50);
29113 return __ret;
29114 }
29115 #else
29116 __ai uint32x4_t vtstq_s32(int32x4_t __p0, int32x4_t __p1) {
29117 int32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
29118 int32x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
29119 uint32x4_t __ret;
29120 __ret = (uint32x4_t) __builtin_neon_vtstq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 50);
29121 __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
29122 return __ret;
29123 }
29124 #endif
29125
29126 #ifdef __LITTLE_ENDIAN__
29127 __ai uint16x8_t vtstq_s16(int16x8_t __p0, int16x8_t __p1) {
29128 uint16x8_t __ret;
29129 __ret = (uint16x8_t) __builtin_neon_vtstq_v((int8x16_t)__p0, (int8x16_t)__p1, 49);
29130 return __ret;
29131 }
29132 #else
29133 __ai uint16x8_t vtstq_s16(int16x8_t __p0, int16x8_t __p1) {
29134 int16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
29135 int16x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
29136 uint16x8_t __ret;
29137 __ret = (uint16x8_t) __builtin_neon_vtstq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 49);
29138 __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
29139 return __ret;
29140 }
29141 #endif
29142
29143 #ifdef __LITTLE_ENDIAN__
29144 __ai uint8x8_t vtst_u8(uint8x8_t __p0, uint8x8_t __p1) {
29145 uint8x8_t __ret;
29146 __ret = (uint8x8_t) __builtin_neon_vtst_v((int8x8_t)__p0, (int8x8_t)__p1, 16);
29147 return __ret;
29148 }
29149 #else
29150 __ai uint8x8_t vtst_u8(uint8x8_t __p0, uint8x8_t __p1) {
29151 uint8x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
29152 uint8x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
29153 uint8x8_t __ret;
29154 __ret = (uint8x8_t) __builtin_neon_vtst_v((int8x8_t)__rev0, (int8x8_t)__rev1, 16);
29155 __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
29156 return __ret;
29157 }
29158 #endif
29159
29160 #ifdef __LITTLE_ENDIAN__
29161 __ai uint32x2_t vtst_u32(uint32x2_t __p0, uint32x2_t __p1) {
29162 uint32x2_t __ret;
29163 __ret = (uint32x2_t) __builtin_neon_vtst_v((int8x8_t)__p0, (int8x8_t)__p1, 18);
29164 return __ret;
29165 }
29166 #else
29167 __ai uint32x2_t vtst_u32(uint32x2_t __p0, uint32x2_t __p1) {
29168 uint32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
29169 uint32x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
29170 uint32x2_t __ret;
29171 __ret = (uint32x2_t) __builtin_neon_vtst_v((int8x8_t)__rev0, (int8x8_t)__rev1, 18);
29172 __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
29173 return __ret;
29174 }
29175 #endif
29176
29177 #ifdef __LITTLE_ENDIAN__
29178 __ai uint16x4_t vtst_u16(uint16x4_t __p0, uint16x4_t __p1) {
29179 uint16x4_t __ret;
29180 __ret = (uint16x4_t) __builtin_neon_vtst_v((int8x8_t)__p0, (int8x8_t)__p1, 17);
29181 return __ret;
29182 }
29183 #else
29184 __ai uint16x4_t vtst_u16(uint16x4_t __p0, uint16x4_t __p1) {
29185 uint16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
29186 uint16x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
29187 uint16x4_t __ret;
29188 __ret = (uint16x4_t) __builtin_neon_vtst_v((int8x8_t)__rev0, (int8x8_t)__rev1, 17);
29189 __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
29190 return __ret;
29191 }
29192 #endif
29193
29194 #ifdef __LITTLE_ENDIAN__
29195 __ai uint8x8_t vtst_s8(int8x8_t __p0, int8x8_t __p1) {
29196 uint8x8_t __ret;
29197 __ret = (uint8x8_t) __builtin_neon_vtst_v((int8x8_t)__p0, (int8x8_t)__p1, 16);
29198 return __ret;
29199 }
29200 #else
29201 __ai uint8x8_t vtst_s8(int8x8_t __p0, int8x8_t __p1) {
29202 int8x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
29203 int8x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
29204 uint8x8_t __ret;
29205 __ret = (uint8x8_t) __builtin_neon_vtst_v((int8x8_t)__rev0, (int8x8_t)__rev1, 16);
29206 __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
29207 return __ret;
29208 }
29209 #endif
29210
29211 #ifdef __LITTLE_ENDIAN__
29212 __ai uint32x2_t vtst_s32(int32x2_t __p0, int32x2_t __p1) {
29213 uint32x2_t __ret;
29214 __ret = (uint32x2_t) __builtin_neon_vtst_v((int8x8_t)__p0, (int8x8_t)__p1, 18);
29215 return __ret;
29216 }
29217 #else
29218 __ai uint32x2_t vtst_s32(int32x2_t __p0, int32x2_t __p1) {
29219 int32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
29220 int32x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
29221 uint32x2_t __ret;
29222 __ret = (uint32x2_t) __builtin_neon_vtst_v((int8x8_t)__rev0, (int8x8_t)__rev1, 18);
29223 __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
29224 return __ret;
29225 }
29226 #endif
29227
29228 #ifdef __LITTLE_ENDIAN__
29229 __ai uint16x4_t vtst_s16(int16x4_t __p0, int16x4_t __p1) {
29230 uint16x4_t __ret;
29231 __ret = (uint16x4_t) __builtin_neon_vtst_v((int8x8_t)__p0, (int8x8_t)__p1, 17);
29232 return __ret;
29233 }
29234 #else
29235 __ai uint16x4_t vtst_s16(int16x4_t __p0, int16x4_t __p1) {
29236 int16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
29237 int16x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
29238 uint16x4_t __ret;
29239 __ret = (uint16x4_t) __builtin_neon_vtst_v((int8x8_t)__rev0, (int8x8_t)__rev1, 17);
29240 __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
29241 return __ret;
29242 }
29243 #endif
29244
29245 #ifdef __LITTLE_ENDIAN__
29246 __ai poly8x8x2_t vuzp_p8(poly8x8_t __p0, poly8x8_t __p1) {
29247 poly8x8x2_t __ret;
29248 __builtin_neon_vuzp_v(&__ret, (int8x8_t)__p0, (int8x8_t)__p1, 4);
29249 return __ret;
29250 }
29251 #else
29252 __ai poly8x8x2_t vuzp_p8(poly8x8_t __p0, poly8x8_t __p1) {
29253 poly8x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
29254 poly8x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
29255 poly8x8x2_t __ret;
29256 __builtin_neon_vuzp_v(&__ret, (int8x8_t)__rev0, (int8x8_t)__rev1, 4);
29257
29258 __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 7, 6, 5, 4, 3, 2, 1, 0);
29259 __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 7, 6, 5, 4, 3, 2, 1, 0);
29260 return __ret;
29261 }
29262 #endif
29263
29264 #ifdef __LITTLE_ENDIAN__
29265 __ai poly16x4x2_t vuzp_p16(poly16x4_t __p0, poly16x4_t __p1) {
29266 poly16x4x2_t __ret;
29267 __builtin_neon_vuzp_v(&__ret, (int8x8_t)__p0, (int8x8_t)__p1, 5);
29268 return __ret;
29269 }
29270 #else
29271 __ai poly16x4x2_t vuzp_p16(poly16x4_t __p0, poly16x4_t __p1) {
29272 poly16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
29273 poly16x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
29274 poly16x4x2_t __ret;
29275 __builtin_neon_vuzp_v(&__ret, (int8x8_t)__rev0, (int8x8_t)__rev1, 5);
29276
29277 __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 3, 2, 1, 0);
29278 __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 3, 2, 1, 0);
29279 return __ret;
29280 }
29281 #endif
29282
29283 #ifdef __LITTLE_ENDIAN__
29284 __ai poly8x16x2_t vuzpq_p8(poly8x16_t __p0, poly8x16_t __p1) {
29285 poly8x16x2_t __ret;
29286 __builtin_neon_vuzpq_v(&__ret, (int8x16_t)__p0, (int8x16_t)__p1, 36);
29287 return __ret;
29288 }
29289 #else
29290 __ai poly8x16x2_t vuzpq_p8(poly8x16_t __p0, poly8x16_t __p1) {
29291 poly8x16_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
29292 poly8x16_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
29293 poly8x16x2_t __ret;
29294 __builtin_neon_vuzpq_v(&__ret, (int8x16_t)__rev0, (int8x16_t)__rev1, 36);
29295
29296 __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
29297 __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
29298 return __ret;
29299 }
29300 #endif
29301
29302 #ifdef __LITTLE_ENDIAN__
29303 __ai poly16x8x2_t vuzpq_p16(poly16x8_t __p0, poly16x8_t __p1) {
29304 poly16x8x2_t __ret;
29305 __builtin_neon_vuzpq_v(&__ret, (int8x16_t)__p0, (int8x16_t)__p1, 37);
29306 return __ret;
29307 }
29308 #else
29309 __ai poly16x8x2_t vuzpq_p16(poly16x8_t __p0, poly16x8_t __p1) {
29310 poly16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
29311 poly16x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
29312 poly16x8x2_t __ret;
29313 __builtin_neon_vuzpq_v(&__ret, (int8x16_t)__rev0, (int8x16_t)__rev1, 37);
29314
29315 __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 7, 6, 5, 4, 3, 2, 1, 0);
29316 __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 7, 6, 5, 4, 3, 2, 1, 0);
29317 return __ret;
29318 }
29319 #endif
29320
29321 #ifdef __LITTLE_ENDIAN__
29322 __ai uint8x16x2_t vuzpq_u8(uint8x16_t __p0, uint8x16_t __p1) {
29323 uint8x16x2_t __ret;
29324 __builtin_neon_vuzpq_v(&__ret, (int8x16_t)__p0, (int8x16_t)__p1, 48);
29325 return __ret;
29326 }
29327 #else
29328 __ai uint8x16x2_t vuzpq_u8(uint8x16_t __p0, uint8x16_t __p1) {
29329 uint8x16_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
29330 uint8x16_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
29331 uint8x16x2_t __ret;
29332 __builtin_neon_vuzpq_v(&__ret, (int8x16_t)__rev0, (int8x16_t)__rev1, 48);
29333
29334 __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
29335 __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
29336 return __ret;
29337 }
29338 #endif
29339
29340 #ifdef __LITTLE_ENDIAN__
29341 __ai uint32x4x2_t vuzpq_u32(uint32x4_t __p0, uint32x4_t __p1) {
29342 uint32x4x2_t __ret;
29343 __builtin_neon_vuzpq_v(&__ret, (int8x16_t)__p0, (int8x16_t)__p1, 50);
29344 return __ret;
29345 }
29346 #else
29347 __ai uint32x4x2_t vuzpq_u32(uint32x4_t __p0, uint32x4_t __p1) {
29348 uint32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
29349 uint32x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
29350 uint32x4x2_t __ret;
29351 __builtin_neon_vuzpq_v(&__ret, (int8x16_t)__rev0, (int8x16_t)__rev1, 50);
29352
29353 __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 3, 2, 1, 0);
29354 __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 3, 2, 1, 0);
29355 return __ret;
29356 }
29357 #endif
29358
29359 #ifdef __LITTLE_ENDIAN__
29360 __ai uint16x8x2_t vuzpq_u16(uint16x8_t __p0, uint16x8_t __p1) {
29361 uint16x8x2_t __ret;
29362 __builtin_neon_vuzpq_v(&__ret, (int8x16_t)__p0, (int8x16_t)__p1, 49);
29363 return __ret;
29364 }
29365 #else
29366 __ai uint16x8x2_t vuzpq_u16(uint16x8_t __p0, uint16x8_t __p1) {
29367 uint16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
29368 uint16x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
29369 uint16x8x2_t __ret;
29370 __builtin_neon_vuzpq_v(&__ret, (int8x16_t)__rev0, (int8x16_t)__rev1, 49);
29371
29372 __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 7, 6, 5, 4, 3, 2, 1, 0);
29373 __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 7, 6, 5, 4, 3, 2, 1, 0);
29374 return __ret;
29375 }
29376 #endif
29377
29378 #ifdef __LITTLE_ENDIAN__
29379 __ai int8x16x2_t vuzpq_s8(int8x16_t __p0, int8x16_t __p1) {
29380 int8x16x2_t __ret;
29381 __builtin_neon_vuzpq_v(&__ret, (int8x16_t)__p0, (int8x16_t)__p1, 32);
29382 return __ret;
29383 }
29384 #else
29385 __ai int8x16x2_t vuzpq_s8(int8x16_t __p0, int8x16_t __p1) {
29386 int8x16_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
29387 int8x16_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
29388 int8x16x2_t __ret;
29389 __builtin_neon_vuzpq_v(&__ret, (int8x16_t)__rev0, (int8x16_t)__rev1, 32);
29390
29391 __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
29392 __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
29393 return __ret;
29394 }
29395 #endif
29396
29397 #ifdef __LITTLE_ENDIAN__
29398 __ai float32x4x2_t vuzpq_f32(float32x4_t __p0, float32x4_t __p1) {
29399 float32x4x2_t __ret;
29400 __builtin_neon_vuzpq_v(&__ret, (int8x16_t)__p0, (int8x16_t)__p1, 41);
29401 return __ret;
29402 }
29403 #else
29404 __ai float32x4x2_t vuzpq_f32(float32x4_t __p0, float32x4_t __p1) {
29405 float32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
29406 float32x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
29407 float32x4x2_t __ret;
29408 __builtin_neon_vuzpq_v(&__ret, (int8x16_t)__rev0, (int8x16_t)__rev1, 41);
29409
29410 __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 3, 2, 1, 0);
29411 __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 3, 2, 1, 0);
29412 return __ret;
29413 }
29414 #endif
29415
29416 #ifdef __LITTLE_ENDIAN__
29417 __ai int32x4x2_t vuzpq_s32(int32x4_t __p0, int32x4_t __p1) {
29418 int32x4x2_t __ret;
29419 __builtin_neon_vuzpq_v(&__ret, (int8x16_t)__p0, (int8x16_t)__p1, 34);
29420 return __ret;
29421 }
29422 #else
29423 __ai int32x4x2_t vuzpq_s32(int32x4_t __p0, int32x4_t __p1) {
29424 int32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
29425 int32x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
29426 int32x4x2_t __ret;
29427 __builtin_neon_vuzpq_v(&__ret, (int8x16_t)__rev0, (int8x16_t)__rev1, 34);
29428
29429 __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 3, 2, 1, 0);
29430 __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 3, 2, 1, 0);
29431 return __ret;
29432 }
29433 #endif
29434
29435 #ifdef __LITTLE_ENDIAN__
29436 __ai int16x8x2_t vuzpq_s16(int16x8_t __p0, int16x8_t __p1) {
29437 int16x8x2_t __ret;
29438 __builtin_neon_vuzpq_v(&__ret, (int8x16_t)__p0, (int8x16_t)__p1, 33);
29439 return __ret;
29440 }
29441 #else
29442 __ai int16x8x2_t vuzpq_s16(int16x8_t __p0, int16x8_t __p1) {
29443 int16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
29444 int16x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
29445 int16x8x2_t __ret;
29446 __builtin_neon_vuzpq_v(&__ret, (int8x16_t)__rev0, (int8x16_t)__rev1, 33);
29447
29448 __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 7, 6, 5, 4, 3, 2, 1, 0);
29449 __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 7, 6, 5, 4, 3, 2, 1, 0);
29450 return __ret;
29451 }
29452 #endif
29453
29454 #ifdef __LITTLE_ENDIAN__
29455 __ai uint8x8x2_t vuzp_u8(uint8x8_t __p0, uint8x8_t __p1) {
29456 uint8x8x2_t __ret;
29457 __builtin_neon_vuzp_v(&__ret, (int8x8_t)__p0, (int8x8_t)__p1, 16);
29458 return __ret;
29459 }
29460 #else
29461 __ai uint8x8x2_t vuzp_u8(uint8x8_t __p0, uint8x8_t __p1) {
29462 uint8x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
29463 uint8x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
29464 uint8x8x2_t __ret;
29465 __builtin_neon_vuzp_v(&__ret, (int8x8_t)__rev0, (int8x8_t)__rev1, 16);
29466
29467 __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 7, 6, 5, 4, 3, 2, 1, 0);
29468 __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 7, 6, 5, 4, 3, 2, 1, 0);
29469 return __ret;
29470 }
29471 #endif
29472
29473 #ifdef __LITTLE_ENDIAN__
29474 __ai uint32x2x2_t vuzp_u32(uint32x2_t __p0, uint32x2_t __p1) {
29475 uint32x2x2_t __ret;
29476 __builtin_neon_vuzp_v(&__ret, (int8x8_t)__p0, (int8x8_t)__p1, 18);
29477 return __ret;
29478 }
29479 #else
29480 __ai uint32x2x2_t vuzp_u32(uint32x2_t __p0, uint32x2_t __p1) {
29481 uint32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
29482 uint32x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
29483 uint32x2x2_t __ret;
29484 __builtin_neon_vuzp_v(&__ret, (int8x8_t)__rev0, (int8x8_t)__rev1, 18);
29485
29486 __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 1, 0);
29487 __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 1, 0);
29488 return __ret;
29489 }
29490 #endif
29491
29492 #ifdef __LITTLE_ENDIAN__
29493 __ai uint16x4x2_t vuzp_u16(uint16x4_t __p0, uint16x4_t __p1) {
29494 uint16x4x2_t __ret;
29495 __builtin_neon_vuzp_v(&__ret, (int8x8_t)__p0, (int8x8_t)__p1, 17);
29496 return __ret;
29497 }
29498 #else
29499 __ai uint16x4x2_t vuzp_u16(uint16x4_t __p0, uint16x4_t __p1) {
29500 uint16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
29501 uint16x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
29502 uint16x4x2_t __ret;
29503 __builtin_neon_vuzp_v(&__ret, (int8x8_t)__rev0, (int8x8_t)__rev1, 17);
29504
29505 __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 3, 2, 1, 0);
29506 __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 3, 2, 1, 0);
29507 return __ret;
29508 }
29509 #endif
29510
29511 #ifdef __LITTLE_ENDIAN__
29512 __ai int8x8x2_t vuzp_s8(int8x8_t __p0, int8x8_t __p1) {
29513 int8x8x2_t __ret;
29514 __builtin_neon_vuzp_v(&__ret, (int8x8_t)__p0, (int8x8_t)__p1, 0);
29515 return __ret;
29516 }
29517 #else
29518 __ai int8x8x2_t vuzp_s8(int8x8_t __p0, int8x8_t __p1) {
29519 int8x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
29520 int8x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
29521 int8x8x2_t __ret;
29522 __builtin_neon_vuzp_v(&__ret, (int8x8_t)__rev0, (int8x8_t)__rev1, 0);
29523
29524 __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 7, 6, 5, 4, 3, 2, 1, 0);
29525 __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 7, 6, 5, 4, 3, 2, 1, 0);
29526 return __ret;
29527 }
29528 #endif
29529
29530 #ifdef __LITTLE_ENDIAN__
29531 __ai float32x2x2_t vuzp_f32(float32x2_t __p0, float32x2_t __p1) {
29532 float32x2x2_t __ret;
29533 __builtin_neon_vuzp_v(&__ret, (int8x8_t)__p0, (int8x8_t)__p1, 9);
29534 return __ret;
29535 }
29536 #else
29537 __ai float32x2x2_t vuzp_f32(float32x2_t __p0, float32x2_t __p1) {
29538 float32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
29539 float32x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
29540 float32x2x2_t __ret;
29541 __builtin_neon_vuzp_v(&__ret, (int8x8_t)__rev0, (int8x8_t)__rev1, 9);
29542
29543 __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 1, 0);
29544 __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 1, 0);
29545 return __ret;
29546 }
29547 #endif
29548
29549 #ifdef __LITTLE_ENDIAN__
29550 __ai int32x2x2_t vuzp_s32(int32x2_t __p0, int32x2_t __p1) {
29551 int32x2x2_t __ret;
29552 __builtin_neon_vuzp_v(&__ret, (int8x8_t)__p0, (int8x8_t)__p1, 2);
29553 return __ret;
29554 }
29555 #else
29556 __ai int32x2x2_t vuzp_s32(int32x2_t __p0, int32x2_t __p1) {
29557 int32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
29558 int32x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
29559 int32x2x2_t __ret;
29560 __builtin_neon_vuzp_v(&__ret, (int8x8_t)__rev0, (int8x8_t)__rev1, 2);
29561
29562 __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 1, 0);
29563 __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 1, 0);
29564 return __ret;
29565 }
29566 #endif
29567
29568 #ifdef __LITTLE_ENDIAN__
29569 __ai int16x4x2_t vuzp_s16(int16x4_t __p0, int16x4_t __p1) {
29570 int16x4x2_t __ret;
29571 __builtin_neon_vuzp_v(&__ret, (int8x8_t)__p0, (int8x8_t)__p1, 1);
29572 return __ret;
29573 }
29574 #else
29575 __ai int16x4x2_t vuzp_s16(int16x4_t __p0, int16x4_t __p1) {
29576 int16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
29577 int16x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
29578 int16x4x2_t __ret;
29579 __builtin_neon_vuzp_v(&__ret, (int8x8_t)__rev0, (int8x8_t)__rev1, 1);
29580
29581 __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 3, 2, 1, 0);
29582 __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 3, 2, 1, 0);
29583 return __ret;
29584 }
29585 #endif
29586
29587 #ifdef __LITTLE_ENDIAN__
29588 __ai poly8x8x2_t vzip_p8(poly8x8_t __p0, poly8x8_t __p1) {
29589 poly8x8x2_t __ret;
29590 __builtin_neon_vzip_v(&__ret, (int8x8_t)__p0, (int8x8_t)__p1, 4);
29591 return __ret;
29592 }
29593 #else
29594 __ai poly8x8x2_t vzip_p8(poly8x8_t __p0, poly8x8_t __p1) {
29595 poly8x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
29596 poly8x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
29597 poly8x8x2_t __ret;
29598 __builtin_neon_vzip_v(&__ret, (int8x8_t)__rev0, (int8x8_t)__rev1, 4);
29599
29600 __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 7, 6, 5, 4, 3, 2, 1, 0);
29601 __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 7, 6, 5, 4, 3, 2, 1, 0);
29602 return __ret;
29603 }
29604 #endif
29605
29606 #ifdef __LITTLE_ENDIAN__
29607 __ai poly16x4x2_t vzip_p16(poly16x4_t __p0, poly16x4_t __p1) {
29608 poly16x4x2_t __ret;
29609 __builtin_neon_vzip_v(&__ret, (int8x8_t)__p0, (int8x8_t)__p1, 5);
29610 return __ret;
29611 }
29612 #else
29613 __ai poly16x4x2_t vzip_p16(poly16x4_t __p0, poly16x4_t __p1) {
29614 poly16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
29615 poly16x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
29616 poly16x4x2_t __ret;
29617 __builtin_neon_vzip_v(&__ret, (int8x8_t)__rev0, (int8x8_t)__rev1, 5);
29618
29619 __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 3, 2, 1, 0);
29620 __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 3, 2, 1, 0);
29621 return __ret;
29622 }
29623 #endif
29624
29625 #ifdef __LITTLE_ENDIAN__
29626 __ai poly8x16x2_t vzipq_p8(poly8x16_t __p0, poly8x16_t __p1) {
29627 poly8x16x2_t __ret;
29628 __builtin_neon_vzipq_v(&__ret, (int8x16_t)__p0, (int8x16_t)__p1, 36);
29629 return __ret;
29630 }
29631 #else
29632 __ai poly8x16x2_t vzipq_p8(poly8x16_t __p0, poly8x16_t __p1) {
29633 poly8x16_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
29634 poly8x16_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
29635 poly8x16x2_t __ret;
29636 __builtin_neon_vzipq_v(&__ret, (int8x16_t)__rev0, (int8x16_t)__rev1, 36);
29637
29638 __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
29639 __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
29640 return __ret;
29641 }
29642 #endif
29643
29644 #ifdef __LITTLE_ENDIAN__
29645 __ai poly16x8x2_t vzipq_p16(poly16x8_t __p0, poly16x8_t __p1) {
29646 poly16x8x2_t __ret;
29647 __builtin_neon_vzipq_v(&__ret, (int8x16_t)__p0, (int8x16_t)__p1, 37);
29648 return __ret;
29649 }
29650 #else
29651 __ai poly16x8x2_t vzipq_p16(poly16x8_t __p0, poly16x8_t __p1) {
29652 poly16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
29653 poly16x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
29654 poly16x8x2_t __ret;
29655 __builtin_neon_vzipq_v(&__ret, (int8x16_t)__rev0, (int8x16_t)__rev1, 37);
29656
29657 __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 7, 6, 5, 4, 3, 2, 1, 0);
29658 __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 7, 6, 5, 4, 3, 2, 1, 0);
29659 return __ret;
29660 }
29661 #endif
29662
29663 #ifdef __LITTLE_ENDIAN__
29664 __ai uint8x16x2_t vzipq_u8(uint8x16_t __p0, uint8x16_t __p1) {
29665 uint8x16x2_t __ret;
29666 __builtin_neon_vzipq_v(&__ret, (int8x16_t)__p0, (int8x16_t)__p1, 48);
29667 return __ret;
29668 }
29669 #else
29670 __ai uint8x16x2_t vzipq_u8(uint8x16_t __p0, uint8x16_t __p1) {
29671 uint8x16_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
29672 uint8x16_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
29673 uint8x16x2_t __ret;
29674 __builtin_neon_vzipq_v(&__ret, (int8x16_t)__rev0, (int8x16_t)__rev1, 48);
29675
29676 __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
29677 __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
29678 return __ret;
29679 }
29680 #endif
29681
29682 #ifdef __LITTLE_ENDIAN__
29683 __ai uint32x4x2_t vzipq_u32(uint32x4_t __p0, uint32x4_t __p1) {
29684 uint32x4x2_t __ret;
29685 __builtin_neon_vzipq_v(&__ret, (int8x16_t)__p0, (int8x16_t)__p1, 50);
29686 return __ret;
29687 }
29688 #else
29689 __ai uint32x4x2_t vzipq_u32(uint32x4_t __p0, uint32x4_t __p1) {
29690 uint32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
29691 uint32x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
29692 uint32x4x2_t __ret;
29693 __builtin_neon_vzipq_v(&__ret, (int8x16_t)__rev0, (int8x16_t)__rev1, 50);
29694
29695 __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 3, 2, 1, 0);
29696 __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 3, 2, 1, 0);
29697 return __ret;
29698 }
29699 #endif
29700
29701 #ifdef __LITTLE_ENDIAN__
29702 __ai uint16x8x2_t vzipq_u16(uint16x8_t __p0, uint16x8_t __p1) {
29703 uint16x8x2_t __ret;
29704 __builtin_neon_vzipq_v(&__ret, (int8x16_t)__p0, (int8x16_t)__p1, 49);
29705 return __ret;
29706 }
29707 #else
29708 __ai uint16x8x2_t vzipq_u16(uint16x8_t __p0, uint16x8_t __p1) {
29709 uint16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
29710 uint16x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
29711 uint16x8x2_t __ret;
29712 __builtin_neon_vzipq_v(&__ret, (int8x16_t)__rev0, (int8x16_t)__rev1, 49);
29713
29714 __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 7, 6, 5, 4, 3, 2, 1, 0);
29715 __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 7, 6, 5, 4, 3, 2, 1, 0);
29716 return __ret;
29717 }
29718 #endif
29719
29720 #ifdef __LITTLE_ENDIAN__
29721 __ai int8x16x2_t vzipq_s8(int8x16_t __p0, int8x16_t __p1) {
29722 int8x16x2_t __ret;
29723 __builtin_neon_vzipq_v(&__ret, (int8x16_t)__p0, (int8x16_t)__p1, 32);
29724 return __ret;
29725 }
29726 #else
29727 __ai int8x16x2_t vzipq_s8(int8x16_t __p0, int8x16_t __p1) {
29728 int8x16_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
29729 int8x16_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
29730 int8x16x2_t __ret;
29731 __builtin_neon_vzipq_v(&__ret, (int8x16_t)__rev0, (int8x16_t)__rev1, 32);
29732
29733 __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
29734 __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
29735 return __ret;
29736 }
29737 #endif
29738
29739 #ifdef __LITTLE_ENDIAN__
29740 __ai float32x4x2_t vzipq_f32(float32x4_t __p0, float32x4_t __p1) {
29741 float32x4x2_t __ret;
29742 __builtin_neon_vzipq_v(&__ret, (int8x16_t)__p0, (int8x16_t)__p1, 41);
29743 return __ret;
29744 }
29745 #else
29746 __ai float32x4x2_t vzipq_f32(float32x4_t __p0, float32x4_t __p1) {
29747 float32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
29748 float32x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
29749 float32x4x2_t __ret;
29750 __builtin_neon_vzipq_v(&__ret, (int8x16_t)__rev0, (int8x16_t)__rev1, 41);
29751
29752 __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 3, 2, 1, 0);
29753 __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 3, 2, 1, 0);
29754 return __ret;
29755 }
29756 #endif
29757
29758 #ifdef __LITTLE_ENDIAN__
29759 __ai int32x4x2_t vzipq_s32(int32x4_t __p0, int32x4_t __p1) {
29760 int32x4x2_t __ret;
29761 __builtin_neon_vzipq_v(&__ret, (int8x16_t)__p0, (int8x16_t)__p1, 34);
29762 return __ret;
29763 }
29764 #else
29765 __ai int32x4x2_t vzipq_s32(int32x4_t __p0, int32x4_t __p1) {
29766 int32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
29767 int32x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
29768 int32x4x2_t __ret;
29769 __builtin_neon_vzipq_v(&__ret, (int8x16_t)__rev0, (int8x16_t)__rev1, 34);
29770
29771 __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 3, 2, 1, 0);
29772 __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 3, 2, 1, 0);
29773 return __ret;
29774 }
29775 #endif
29776
29777 #ifdef __LITTLE_ENDIAN__
29778 __ai int16x8x2_t vzipq_s16(int16x8_t __p0, int16x8_t __p1) {
29779 int16x8x2_t __ret;
29780 __builtin_neon_vzipq_v(&__ret, (int8x16_t)__p0, (int8x16_t)__p1, 33);
29781 return __ret;
29782 }
29783 #else
29784 __ai int16x8x2_t vzipq_s16(int16x8_t __p0, int16x8_t __p1) {
29785 int16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
29786 int16x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
29787 int16x8x2_t __ret;
29788 __builtin_neon_vzipq_v(&__ret, (int8x16_t)__rev0, (int8x16_t)__rev1, 33);
29789
29790 __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 7, 6, 5, 4, 3, 2, 1, 0);
29791 __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 7, 6, 5, 4, 3, 2, 1, 0);
29792 return __ret;
29793 }
29794 #endif
29795
29796 #ifdef __LITTLE_ENDIAN__
29797 __ai uint8x8x2_t vzip_u8(uint8x8_t __p0, uint8x8_t __p1) {
29798 uint8x8x2_t __ret;
29799 __builtin_neon_vzip_v(&__ret, (int8x8_t)__p0, (int8x8_t)__p1, 16);
29800 return __ret;
29801 }
29802 #else
29803 __ai uint8x8x2_t vzip_u8(uint8x8_t __p0, uint8x8_t __p1) {
29804 uint8x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
29805 uint8x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
29806 uint8x8x2_t __ret;
29807 __builtin_neon_vzip_v(&__ret, (int8x8_t)__rev0, (int8x8_t)__rev1, 16);
29808
29809 __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 7, 6, 5, 4, 3, 2, 1, 0);
29810 __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 7, 6, 5, 4, 3, 2, 1, 0);
29811 return __ret;
29812 }
29813 #endif
29814
29815 #ifdef __LITTLE_ENDIAN__
29816 __ai uint32x2x2_t vzip_u32(uint32x2_t __p0, uint32x2_t __p1) {
29817 uint32x2x2_t __ret;
29818 __builtin_neon_vzip_v(&__ret, (int8x8_t)__p0, (int8x8_t)__p1, 18);
29819 return __ret;
29820 }
29821 #else
29822 __ai uint32x2x2_t vzip_u32(uint32x2_t __p0, uint32x2_t __p1) {
29823 uint32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
29824 uint32x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
29825 uint32x2x2_t __ret;
29826 __builtin_neon_vzip_v(&__ret, (int8x8_t)__rev0, (int8x8_t)__rev1, 18);
29827
29828 __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 1, 0);
29829 __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 1, 0);
29830 return __ret;
29831 }
29832 #endif
29833
29834 #ifdef __LITTLE_ENDIAN__
29835 __ai uint16x4x2_t vzip_u16(uint16x4_t __p0, uint16x4_t __p1) {
29836 uint16x4x2_t __ret;
29837 __builtin_neon_vzip_v(&__ret, (int8x8_t)__p0, (int8x8_t)__p1, 17);
29838 return __ret;
29839 }
29840 #else
29841 __ai uint16x4x2_t vzip_u16(uint16x4_t __p0, uint16x4_t __p1) {
29842 uint16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
29843 uint16x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
29844 uint16x4x2_t __ret;
29845 __builtin_neon_vzip_v(&__ret, (int8x8_t)__rev0, (int8x8_t)__rev1, 17);
29846
29847 __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 3, 2, 1, 0);
29848 __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 3, 2, 1, 0);
29849 return __ret;
29850 }
29851 #endif
29852
29853 #ifdef __LITTLE_ENDIAN__
29854 __ai int8x8x2_t vzip_s8(int8x8_t __p0, int8x8_t __p1) {
29855 int8x8x2_t __ret;
29856 __builtin_neon_vzip_v(&__ret, (int8x8_t)__p0, (int8x8_t)__p1, 0);
29857 return __ret;
29858 }
29859 #else
29860 __ai int8x8x2_t vzip_s8(int8x8_t __p0, int8x8_t __p1) {
29861 int8x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
29862 int8x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
29863 int8x8x2_t __ret;
29864 __builtin_neon_vzip_v(&__ret, (int8x8_t)__rev0, (int8x8_t)__rev1, 0);
29865
29866 __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 7, 6, 5, 4, 3, 2, 1, 0);
29867 __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 7, 6, 5, 4, 3, 2, 1, 0);
29868 return __ret;
29869 }
29870 #endif
29871
29872 #ifdef __LITTLE_ENDIAN__
29873 __ai float32x2x2_t vzip_f32(float32x2_t __p0, float32x2_t __p1) {
29874 float32x2x2_t __ret;
29875 __builtin_neon_vzip_v(&__ret, (int8x8_t)__p0, (int8x8_t)__p1, 9);
29876 return __ret;
29877 }
29878 #else
29879 __ai float32x2x2_t vzip_f32(float32x2_t __p0, float32x2_t __p1) {
29880 float32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
29881 float32x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
29882 float32x2x2_t __ret;
29883 __builtin_neon_vzip_v(&__ret, (int8x8_t)__rev0, (int8x8_t)__rev1, 9);
29884
29885 __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 1, 0);
29886 __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 1, 0);
29887 return __ret;
29888 }
29889 #endif
29890
29891 #ifdef __LITTLE_ENDIAN__
29892 __ai int32x2x2_t vzip_s32(int32x2_t __p0, int32x2_t __p1) {
29893 int32x2x2_t __ret;
29894 __builtin_neon_vzip_v(&__ret, (int8x8_t)__p0, (int8x8_t)__p1, 2);
29895 return __ret;
29896 }
29897 #else
29898 __ai int32x2x2_t vzip_s32(int32x2_t __p0, int32x2_t __p1) {
29899 int32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
29900 int32x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
29901 int32x2x2_t __ret;
29902 __builtin_neon_vzip_v(&__ret, (int8x8_t)__rev0, (int8x8_t)__rev1, 2);
29903
29904 __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 1, 0);
29905 __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 1, 0);
29906 return __ret;
29907 }
29908 #endif
29909
29910 #ifdef __LITTLE_ENDIAN__
29911 __ai int16x4x2_t vzip_s16(int16x4_t __p0, int16x4_t __p1) {
29912 int16x4x2_t __ret;
29913 __builtin_neon_vzip_v(&__ret, (int8x8_t)__p0, (int8x8_t)__p1, 1);
29914 return __ret;
29915 }
29916 #else
29917 __ai int16x4x2_t vzip_s16(int16x4_t __p0, int16x4_t __p1) {
29918 int16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
29919 int16x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
29920 int16x4x2_t __ret;
29921 __builtin_neon_vzip_v(&__ret, (int8x8_t)__rev0, (int8x8_t)__rev1, 1);
29922
29923 __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 3, 2, 1, 0);
29924 __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 3, 2, 1, 0);
29925 return __ret;
29926 }
29927 #endif
29928
29929 #if !defined(__aarch64__)
29930 #ifdef __LITTLE_ENDIAN__
29931 __ai poly8x8_t vreinterpret_p8_p16(poly16x4_t __p0) {
29932 poly8x8_t __ret;
29933 __ret = (poly8x8_t)(__p0);
29934 return __ret;
29935 }
29936 #else
29937 __ai poly8x8_t vreinterpret_p8_p16(poly16x4_t __p0) {
29938 poly8x8_t __ret;
29939 __ret = (poly8x8_t)(__p0);
29940 return __ret;
29941 }
29942 #endif
29943
29944 #ifdef __LITTLE_ENDIAN__
29945 __ai poly8x8_t vreinterpret_p8_u8(uint8x8_t __p0) {
29946 poly8x8_t __ret;
29947 __ret = (poly8x8_t)(__p0);
29948 return __ret;
29949 }
29950 #else
29951 __ai poly8x8_t vreinterpret_p8_u8(uint8x8_t __p0) {
29952 poly8x8_t __ret;
29953 __ret = (poly8x8_t)(__p0);
29954 return __ret;
29955 }
29956 #endif
29957
29958 #ifdef __LITTLE_ENDIAN__
29959 __ai poly8x8_t vreinterpret_p8_u32(uint32x2_t __p0) {
29960 poly8x8_t __ret;
29961 __ret = (poly8x8_t)(__p0);
29962 return __ret;
29963 }
29964 #else
29965 __ai poly8x8_t vreinterpret_p8_u32(uint32x2_t __p0) {
29966 poly8x8_t __ret;
29967 __ret = (poly8x8_t)(__p0);
29968 return __ret;
29969 }
29970 #endif
29971
29972 #ifdef __LITTLE_ENDIAN__
29973 __ai poly8x8_t vreinterpret_p8_u64(uint64x1_t __p0) {
29974 poly8x8_t __ret;
29975 __ret = (poly8x8_t)(__p0);
29976 return __ret;
29977 }
29978 #else
29979 __ai poly8x8_t vreinterpret_p8_u64(uint64x1_t __p0) {
29980 poly8x8_t __ret;
29981 __ret = (poly8x8_t)(__p0);
29982 return __ret;
29983 }
29984 #endif
29985
29986 #ifdef __LITTLE_ENDIAN__
29987 __ai poly8x8_t vreinterpret_p8_u16(uint16x4_t __p0) {
29988 poly8x8_t __ret;
29989 __ret = (poly8x8_t)(__p0);
29990 return __ret;
29991 }
29992 #else
29993 __ai poly8x8_t vreinterpret_p8_u16(uint16x4_t __p0) {
29994 poly8x8_t __ret;
29995 __ret = (poly8x8_t)(__p0);
29996 return __ret;
29997 }
29998 #endif
29999
30000 #ifdef __LITTLE_ENDIAN__
30001 __ai poly8x8_t vreinterpret_p8_s8(int8x8_t __p0) {
30002 poly8x8_t __ret;
30003 __ret = (poly8x8_t)(__p0);
30004 return __ret;
30005 }
30006 #else
30007 __ai poly8x8_t vreinterpret_p8_s8(int8x8_t __p0) {
30008 poly8x8_t __ret;
30009 __ret = (poly8x8_t)(__p0);
30010 return __ret;
30011 }
30012 #endif
30013
30014 #ifdef __LITTLE_ENDIAN__
30015 __ai poly8x8_t vreinterpret_p8_f32(float32x2_t __p0) {
30016 poly8x8_t __ret;
30017 __ret = (poly8x8_t)(__p0);
30018 return __ret;
30019 }
30020 #else
30021 __ai poly8x8_t vreinterpret_p8_f32(float32x2_t __p0) {
30022 poly8x8_t __ret;
30023 __ret = (poly8x8_t)(__p0);
30024 return __ret;
30025 }
30026 #endif
30027
30028 #ifdef __LITTLE_ENDIAN__
30029 __ai poly8x8_t vreinterpret_p8_f16(float16x4_t __p0) {
30030 poly8x8_t __ret;
30031 __ret = (poly8x8_t)(__p0);
30032 return __ret;
30033 }
30034 #else
30035 __ai poly8x8_t vreinterpret_p8_f16(float16x4_t __p0) {
30036 poly8x8_t __ret;
30037 __ret = (poly8x8_t)(__p0);
30038 return __ret;
30039 }
30040 #endif
30041
30042 #ifdef __LITTLE_ENDIAN__
30043 __ai poly8x8_t vreinterpret_p8_s32(int32x2_t __p0) {
30044 poly8x8_t __ret;
30045 __ret = (poly8x8_t)(__p0);
30046 return __ret;
30047 }
30048 #else
30049 __ai poly8x8_t vreinterpret_p8_s32(int32x2_t __p0) {
30050 poly8x8_t __ret;
30051 __ret = (poly8x8_t)(__p0);
30052 return __ret;
30053 }
30054 #endif
30055
30056 #ifdef __LITTLE_ENDIAN__
30057 __ai poly8x8_t vreinterpret_p8_s64(int64x1_t __p0) {
30058 poly8x8_t __ret;
30059 __ret = (poly8x8_t)(__p0);
30060 return __ret;
30061 }
30062 #else
30063 __ai poly8x8_t vreinterpret_p8_s64(int64x1_t __p0) {
30064 poly8x8_t __ret;
30065 __ret = (poly8x8_t)(__p0);
30066 return __ret;
30067 }
30068 #endif
30069
30070 #ifdef __LITTLE_ENDIAN__
30071 __ai poly8x8_t vreinterpret_p8_s16(int16x4_t __p0) {
30072 poly8x8_t __ret;
30073 __ret = (poly8x8_t)(__p0);
30074 return __ret;
30075 }
30076 #else
30077 __ai poly8x8_t vreinterpret_p8_s16(int16x4_t __p0) {
30078 poly8x8_t __ret;
30079 __ret = (poly8x8_t)(__p0);
30080 return __ret;
30081 }
30082 #endif
30083
30084 #ifdef __LITTLE_ENDIAN__
30085 __ai poly16x4_t vreinterpret_p16_p8(poly8x8_t __p0) {
30086 poly16x4_t __ret;
30087 __ret = (poly16x4_t)(__p0);
30088 return __ret;
30089 }
30090 #else
30091 __ai poly16x4_t vreinterpret_p16_p8(poly8x8_t __p0) {
30092 poly16x4_t __ret;
30093 __ret = (poly16x4_t)(__p0);
30094 return __ret;
30095 }
30096 #endif
30097
30098 #ifdef __LITTLE_ENDIAN__
30099 __ai poly16x4_t vreinterpret_p16_u8(uint8x8_t __p0) {
30100 poly16x4_t __ret;
30101 __ret = (poly16x4_t)(__p0);
30102 return __ret;
30103 }
30104 #else
30105 __ai poly16x4_t vreinterpret_p16_u8(uint8x8_t __p0) {
30106 poly16x4_t __ret;
30107 __ret = (poly16x4_t)(__p0);
30108 return __ret;
30109 }
30110 #endif
30111
30112 #ifdef __LITTLE_ENDIAN__
30113 __ai poly16x4_t vreinterpret_p16_u32(uint32x2_t __p0) {
30114 poly16x4_t __ret;
30115 __ret = (poly16x4_t)(__p0);
30116 return __ret;
30117 }
30118 #else
30119 __ai poly16x4_t vreinterpret_p16_u32(uint32x2_t __p0) {
30120 poly16x4_t __ret;
30121 __ret = (poly16x4_t)(__p0);
30122 return __ret;
30123 }
30124 #endif
30125
30126 #ifdef __LITTLE_ENDIAN__
30127 __ai poly16x4_t vreinterpret_p16_u64(uint64x1_t __p0) {
30128 poly16x4_t __ret;
30129 __ret = (poly16x4_t)(__p0);
30130 return __ret;
30131 }
30132 #else
30133 __ai poly16x4_t vreinterpret_p16_u64(uint64x1_t __p0) {
30134 poly16x4_t __ret;
30135 __ret = (poly16x4_t)(__p0);
30136 return __ret;
30137 }
30138 #endif
30139
30140 #ifdef __LITTLE_ENDIAN__
30141 __ai poly16x4_t vreinterpret_p16_u16(uint16x4_t __p0) {
30142 poly16x4_t __ret;
30143 __ret = (poly16x4_t)(__p0);
30144 return __ret;
30145 }
30146 #else
30147 __ai poly16x4_t vreinterpret_p16_u16(uint16x4_t __p0) {
30148 poly16x4_t __ret;
30149 __ret = (poly16x4_t)(__p0);
30150 return __ret;
30151 }
30152 #endif
30153
30154 #ifdef __LITTLE_ENDIAN__
30155 __ai poly16x4_t vreinterpret_p16_s8(int8x8_t __p0) {
30156 poly16x4_t __ret;
30157 __ret = (poly16x4_t)(__p0);
30158 return __ret;
30159 }
30160 #else
30161 __ai poly16x4_t vreinterpret_p16_s8(int8x8_t __p0) {
30162 poly16x4_t __ret;
30163 __ret = (poly16x4_t)(__p0);
30164 return __ret;
30165 }
30166 #endif
30167
30168 #ifdef __LITTLE_ENDIAN__
30169 __ai poly16x4_t vreinterpret_p16_f32(float32x2_t __p0) {
30170 poly16x4_t __ret;
30171 __ret = (poly16x4_t)(__p0);
30172 return __ret;
30173 }
30174 #else
30175 __ai poly16x4_t vreinterpret_p16_f32(float32x2_t __p0) {
30176 poly16x4_t __ret;
30177 __ret = (poly16x4_t)(__p0);
30178 return __ret;
30179 }
30180 #endif
30181
30182 #ifdef __LITTLE_ENDIAN__
30183 __ai poly16x4_t vreinterpret_p16_f16(float16x4_t __p0) {
30184 poly16x4_t __ret;
30185 __ret = (poly16x4_t)(__p0);
30186 return __ret;
30187 }
30188 #else
30189 __ai poly16x4_t vreinterpret_p16_f16(float16x4_t __p0) {
30190 poly16x4_t __ret;
30191 __ret = (poly16x4_t)(__p0);
30192 return __ret;
30193 }
30194 #endif
30195
30196 #ifdef __LITTLE_ENDIAN__
30197 __ai poly16x4_t vreinterpret_p16_s32(int32x2_t __p0) {
30198 poly16x4_t __ret;
30199 __ret = (poly16x4_t)(__p0);
30200 return __ret;
30201 }
30202 #else
30203 __ai poly16x4_t vreinterpret_p16_s32(int32x2_t __p0) {
30204 poly16x4_t __ret;
30205 __ret = (poly16x4_t)(__p0);
30206 return __ret;
30207 }
30208 #endif
30209
30210 #ifdef __LITTLE_ENDIAN__
30211 __ai poly16x4_t vreinterpret_p16_s64(int64x1_t __p0) {
30212 poly16x4_t __ret;
30213 __ret = (poly16x4_t)(__p0);
30214 return __ret;
30215 }
30216 #else
30217 __ai poly16x4_t vreinterpret_p16_s64(int64x1_t __p0) {
30218 poly16x4_t __ret;
30219 __ret = (poly16x4_t)(__p0);
30220 return __ret;
30221 }
30222 #endif
30223
30224 #ifdef __LITTLE_ENDIAN__
30225 __ai poly16x4_t vreinterpret_p16_s16(int16x4_t __p0) {
30226 poly16x4_t __ret;
30227 __ret = (poly16x4_t)(__p0);
30228 return __ret;
30229 }
30230 #else
30231 __ai poly16x4_t vreinterpret_p16_s16(int16x4_t __p0) {
30232 poly16x4_t __ret;
30233 __ret = (poly16x4_t)(__p0);
30234 return __ret;
30235 }
30236 #endif
30237
30238 #ifdef __LITTLE_ENDIAN__
30239 __ai poly8x16_t vreinterpretq_p8_p16(poly16x8_t __p0) {
30240 poly8x16_t __ret;
30241 __ret = (poly8x16_t)(__p0);
30242 return __ret;
30243 }
30244 #else
30245 __ai poly8x16_t vreinterpretq_p8_p16(poly16x8_t __p0) {
30246 poly8x16_t __ret;
30247 __ret = (poly8x16_t)(__p0);
30248 return __ret;
30249 }
30250 #endif
30251
30252 #ifdef __LITTLE_ENDIAN__
30253 __ai poly8x16_t vreinterpretq_p8_u8(uint8x16_t __p0) {
30254 poly8x16_t __ret;
30255 __ret = (poly8x16_t)(__p0);
30256 return __ret;
30257 }
30258 #else
30259 __ai poly8x16_t vreinterpretq_p8_u8(uint8x16_t __p0) {
30260 poly8x16_t __ret;
30261 __ret = (poly8x16_t)(__p0);
30262 return __ret;
30263 }
30264 #endif
30265
30266 #ifdef __LITTLE_ENDIAN__
30267 __ai poly8x16_t vreinterpretq_p8_u32(uint32x4_t __p0) {
30268 poly8x16_t __ret;
30269 __ret = (poly8x16_t)(__p0);
30270 return __ret;
30271 }
30272 #else
30273 __ai poly8x16_t vreinterpretq_p8_u32(uint32x4_t __p0) {
30274 poly8x16_t __ret;
30275 __ret = (poly8x16_t)(__p0);
30276 return __ret;
30277 }
30278 #endif
30279
30280 #ifdef __LITTLE_ENDIAN__
30281 __ai poly8x16_t vreinterpretq_p8_u64(uint64x2_t __p0) {
30282 poly8x16_t __ret;
30283 __ret = (poly8x16_t)(__p0);
30284 return __ret;
30285 }
30286 #else
30287 __ai poly8x16_t vreinterpretq_p8_u64(uint64x2_t __p0) {
30288 poly8x16_t __ret;
30289 __ret = (poly8x16_t)(__p0);
30290 return __ret;
30291 }
30292 #endif
30293
30294 #ifdef __LITTLE_ENDIAN__
30295 __ai poly8x16_t vreinterpretq_p8_u16(uint16x8_t __p0) {
30296 poly8x16_t __ret;
30297 __ret = (poly8x16_t)(__p0);
30298 return __ret;
30299 }
30300 #else
30301 __ai poly8x16_t vreinterpretq_p8_u16(uint16x8_t __p0) {
30302 poly8x16_t __ret;
30303 __ret = (poly8x16_t)(__p0);
30304 return __ret;
30305 }
30306 #endif
30307
30308 #ifdef __LITTLE_ENDIAN__
30309 __ai poly8x16_t vreinterpretq_p8_s8(int8x16_t __p0) {
30310 poly8x16_t __ret;
30311 __ret = (poly8x16_t)(__p0);
30312 return __ret;
30313 }
30314 #else
30315 __ai poly8x16_t vreinterpretq_p8_s8(int8x16_t __p0) {
30316 poly8x16_t __ret;
30317 __ret = (poly8x16_t)(__p0);
30318 return __ret;
30319 }
30320 #endif
30321
30322 #ifdef __LITTLE_ENDIAN__
30323 __ai poly8x16_t vreinterpretq_p8_f32(float32x4_t __p0) {
30324 poly8x16_t __ret;
30325 __ret = (poly8x16_t)(__p0);
30326 return __ret;
30327 }
30328 #else
30329 __ai poly8x16_t vreinterpretq_p8_f32(float32x4_t __p0) {
30330 poly8x16_t __ret;
30331 __ret = (poly8x16_t)(__p0);
30332 return __ret;
30333 }
30334 #endif
30335
30336 #ifdef __LITTLE_ENDIAN__
30337 __ai poly8x16_t vreinterpretq_p8_f16(float16x8_t __p0) {
30338 poly8x16_t __ret;
30339 __ret = (poly8x16_t)(__p0);
30340 return __ret;
30341 }
30342 #else
30343 __ai poly8x16_t vreinterpretq_p8_f16(float16x8_t __p0) {
30344 poly8x16_t __ret;
30345 __ret = (poly8x16_t)(__p0);
30346 return __ret;
30347 }
30348 #endif
30349
30350 #ifdef __LITTLE_ENDIAN__
30351 __ai poly8x16_t vreinterpretq_p8_s32(int32x4_t __p0) {
30352 poly8x16_t __ret;
30353 __ret = (poly8x16_t)(__p0);
30354 return __ret;
30355 }
30356 #else
30357 __ai poly8x16_t vreinterpretq_p8_s32(int32x4_t __p0) {
30358 poly8x16_t __ret;
30359 __ret = (poly8x16_t)(__p0);
30360 return __ret;
30361 }
30362 #endif
30363
30364 #ifdef __LITTLE_ENDIAN__
30365 __ai poly8x16_t vreinterpretq_p8_s64(int64x2_t __p0) {
30366 poly8x16_t __ret;
30367 __ret = (poly8x16_t)(__p0);
30368 return __ret;
30369 }
30370 #else
30371 __ai poly8x16_t vreinterpretq_p8_s64(int64x2_t __p0) {
30372 poly8x16_t __ret;
30373 __ret = (poly8x16_t)(__p0);
30374 return __ret;
30375 }
30376 #endif
30377
30378 #ifdef __LITTLE_ENDIAN__
30379 __ai poly8x16_t vreinterpretq_p8_s16(int16x8_t __p0) {
30380 poly8x16_t __ret;
30381 __ret = (poly8x16_t)(__p0);
30382 return __ret;
30383 }
30384 #else
30385 __ai poly8x16_t vreinterpretq_p8_s16(int16x8_t __p0) {
30386 poly8x16_t __ret;
30387 __ret = (poly8x16_t)(__p0);
30388 return __ret;
30389 }
30390 #endif
30391
30392 #ifdef __LITTLE_ENDIAN__
30393 __ai poly16x8_t vreinterpretq_p16_p8(poly8x16_t __p0) {
30394 poly16x8_t __ret;
30395 __ret = (poly16x8_t)(__p0);
30396 return __ret;
30397 }
30398 #else
30399 __ai poly16x8_t vreinterpretq_p16_p8(poly8x16_t __p0) {
30400 poly16x8_t __ret;
30401 __ret = (poly16x8_t)(__p0);
30402 return __ret;
30403 }
30404 #endif
30405
30406 #ifdef __LITTLE_ENDIAN__
30407 __ai poly16x8_t vreinterpretq_p16_u8(uint8x16_t __p0) {
30408 poly16x8_t __ret;
30409 __ret = (poly16x8_t)(__p0);
30410 return __ret;
30411 }
30412 #else
30413 __ai poly16x8_t vreinterpretq_p16_u8(uint8x16_t __p0) {
30414 poly16x8_t __ret;
30415 __ret = (poly16x8_t)(__p0);
30416 return __ret;
30417 }
30418 #endif
30419
30420 #ifdef __LITTLE_ENDIAN__
30421 __ai poly16x8_t vreinterpretq_p16_u32(uint32x4_t __p0) {
30422 poly16x8_t __ret;
30423 __ret = (poly16x8_t)(__p0);
30424 return __ret;
30425 }
30426 #else
30427 __ai poly16x8_t vreinterpretq_p16_u32(uint32x4_t __p0) {
30428 poly16x8_t __ret;
30429 __ret = (poly16x8_t)(__p0);
30430 return __ret;
30431 }
30432 #endif
30433
30434 #ifdef __LITTLE_ENDIAN__
30435 __ai poly16x8_t vreinterpretq_p16_u64(uint64x2_t __p0) {
30436 poly16x8_t __ret;
30437 __ret = (poly16x8_t)(__p0);
30438 return __ret;
30439 }
30440 #else
30441 __ai poly16x8_t vreinterpretq_p16_u64(uint64x2_t __p0) {
30442 poly16x8_t __ret;
30443 __ret = (poly16x8_t)(__p0);
30444 return __ret;
30445 }
30446 #endif
30447
30448 #ifdef __LITTLE_ENDIAN__
30449 __ai poly16x8_t vreinterpretq_p16_u16(uint16x8_t __p0) {
30450 poly16x8_t __ret;
30451 __ret = (poly16x8_t)(__p0);
30452 return __ret;
30453 }
30454 #else
30455 __ai poly16x8_t vreinterpretq_p16_u16(uint16x8_t __p0) {
30456 poly16x8_t __ret;
30457 __ret = (poly16x8_t)(__p0);
30458 return __ret;
30459 }
30460 #endif
30461
30462 #ifdef __LITTLE_ENDIAN__
30463 __ai poly16x8_t vreinterpretq_p16_s8(int8x16_t __p0) {
30464 poly16x8_t __ret;
30465 __ret = (poly16x8_t)(__p0);
30466 return __ret;
30467 }
30468 #else
30469 __ai poly16x8_t vreinterpretq_p16_s8(int8x16_t __p0) {
30470 poly16x8_t __ret;
30471 __ret = (poly16x8_t)(__p0);
30472 return __ret;
30473 }
30474 #endif
30475
30476 #ifdef __LITTLE_ENDIAN__
30477 __ai poly16x8_t vreinterpretq_p16_f32(float32x4_t __p0) {
30478 poly16x8_t __ret;
30479 __ret = (poly16x8_t)(__p0);
30480 return __ret;
30481 }
30482 #else
30483 __ai poly16x8_t vreinterpretq_p16_f32(float32x4_t __p0) {
30484 poly16x8_t __ret;
30485 __ret = (poly16x8_t)(__p0);
30486 return __ret;
30487 }
30488 #endif
30489
30490 #ifdef __LITTLE_ENDIAN__
30491 __ai poly16x8_t vreinterpretq_p16_f16(float16x8_t __p0) {
30492 poly16x8_t __ret;
30493 __ret = (poly16x8_t)(__p0);
30494 return __ret;
30495 }
30496 #else
30497 __ai poly16x8_t vreinterpretq_p16_f16(float16x8_t __p0) {
30498 poly16x8_t __ret;
30499 __ret = (poly16x8_t)(__p0);
30500 return __ret;
30501 }
30502 #endif
30503
30504 #ifdef __LITTLE_ENDIAN__
30505 __ai poly16x8_t vreinterpretq_p16_s32(int32x4_t __p0) {
30506 poly16x8_t __ret;
30507 __ret = (poly16x8_t)(__p0);
30508 return __ret;
30509 }
30510 #else
30511 __ai poly16x8_t vreinterpretq_p16_s32(int32x4_t __p0) {
30512 poly16x8_t __ret;
30513 __ret = (poly16x8_t)(__p0);
30514 return __ret;
30515 }
30516 #endif
30517
30518 #ifdef __LITTLE_ENDIAN__
30519 __ai poly16x8_t vreinterpretq_p16_s64(int64x2_t __p0) {
30520 poly16x8_t __ret;
30521 __ret = (poly16x8_t)(__p0);
30522 return __ret;
30523 }
30524 #else
30525 __ai poly16x8_t vreinterpretq_p16_s64(int64x2_t __p0) {
30526 poly16x8_t __ret;
30527 __ret = (poly16x8_t)(__p0);
30528 return __ret;
30529 }
30530 #endif
30531
30532 #ifdef __LITTLE_ENDIAN__
30533 __ai poly16x8_t vreinterpretq_p16_s16(int16x8_t __p0) {
30534 poly16x8_t __ret;
30535 __ret = (poly16x8_t)(__p0);
30536 return __ret;
30537 }
30538 #else
30539 __ai poly16x8_t vreinterpretq_p16_s16(int16x8_t __p0) {
30540 poly16x8_t __ret;
30541 __ret = (poly16x8_t)(__p0);
30542 return __ret;
30543 }
30544 #endif
30545
30546 #ifdef __LITTLE_ENDIAN__
30547 __ai uint8x16_t vreinterpretq_u8_p8(poly8x16_t __p0) {
30548 uint8x16_t __ret;
30549 __ret = (uint8x16_t)(__p0);
30550 return __ret;
30551 }
30552 #else
30553 __ai uint8x16_t vreinterpretq_u8_p8(poly8x16_t __p0) {
30554 uint8x16_t __ret;
30555 __ret = (uint8x16_t)(__p0);
30556 return __ret;
30557 }
30558 #endif
30559
30560 #ifdef __LITTLE_ENDIAN__
30561 __ai uint8x16_t vreinterpretq_u8_p16(poly16x8_t __p0) {
30562 uint8x16_t __ret;
30563 __ret = (uint8x16_t)(__p0);
30564 return __ret;
30565 }
30566 #else
30567 __ai uint8x16_t vreinterpretq_u8_p16(poly16x8_t __p0) {
30568 uint8x16_t __ret;
30569 __ret = (uint8x16_t)(__p0);
30570 return __ret;
30571 }
30572 #endif
30573
30574 #ifdef __LITTLE_ENDIAN__
30575 __ai uint8x16_t vreinterpretq_u8_u32(uint32x4_t __p0) {
30576 uint8x16_t __ret;
30577 __ret = (uint8x16_t)(__p0);
30578 return __ret;
30579 }
30580 #else
30581 __ai uint8x16_t vreinterpretq_u8_u32(uint32x4_t __p0) {
30582 uint8x16_t __ret;
30583 __ret = (uint8x16_t)(__p0);
30584 return __ret;
30585 }
30586 #endif
30587
30588 #ifdef __LITTLE_ENDIAN__
30589 __ai uint8x16_t vreinterpretq_u8_u64(uint64x2_t __p0) {
30590 uint8x16_t __ret;
30591 __ret = (uint8x16_t)(__p0);
30592 return __ret;
30593 }
30594 #else
30595 __ai uint8x16_t vreinterpretq_u8_u64(uint64x2_t __p0) {
30596 uint8x16_t __ret;
30597 __ret = (uint8x16_t)(__p0);
30598 return __ret;
30599 }
30600 #endif
30601
30602 #ifdef __LITTLE_ENDIAN__
30603 __ai uint8x16_t vreinterpretq_u8_u16(uint16x8_t __p0) {
30604 uint8x16_t __ret;
30605 __ret = (uint8x16_t)(__p0);
30606 return __ret;
30607 }
30608 #else
30609 __ai uint8x16_t vreinterpretq_u8_u16(uint16x8_t __p0) {
30610 uint8x16_t __ret;
30611 __ret = (uint8x16_t)(__p0);
30612 return __ret;
30613 }
30614 #endif
30615
30616 #ifdef __LITTLE_ENDIAN__
30617 __ai uint8x16_t vreinterpretq_u8_s8(int8x16_t __p0) {
30618 uint8x16_t __ret;
30619 __ret = (uint8x16_t)(__p0);
30620 return __ret;
30621 }
30622 #else
30623 __ai uint8x16_t vreinterpretq_u8_s8(int8x16_t __p0) {
30624 uint8x16_t __ret;
30625 __ret = (uint8x16_t)(__p0);
30626 return __ret;
30627 }
30628 #endif
30629
30630 #ifdef __LITTLE_ENDIAN__
30631 __ai uint8x16_t vreinterpretq_u8_f32(float32x4_t __p0) {
30632 uint8x16_t __ret;
30633 __ret = (uint8x16_t)(__p0);
30634 return __ret;
30635 }
30636 #else
30637 __ai uint8x16_t vreinterpretq_u8_f32(float32x4_t __p0) {
30638 uint8x16_t __ret;
30639 __ret = (uint8x16_t)(__p0);
30640 return __ret;
30641 }
30642 #endif
30643
30644 #ifdef __LITTLE_ENDIAN__
30645 __ai uint8x16_t vreinterpretq_u8_f16(float16x8_t __p0) {
30646 uint8x16_t __ret;
30647 __ret = (uint8x16_t)(__p0);
30648 return __ret;
30649 }
30650 #else
30651 __ai uint8x16_t vreinterpretq_u8_f16(float16x8_t __p0) {
30652 uint8x16_t __ret;
30653 __ret = (uint8x16_t)(__p0);
30654 return __ret;
30655 }
30656 #endif
30657
30658 #ifdef __LITTLE_ENDIAN__
30659 __ai uint8x16_t vreinterpretq_u8_s32(int32x4_t __p0) {
30660 uint8x16_t __ret;
30661 __ret = (uint8x16_t)(__p0);
30662 return __ret;
30663 }
30664 #else
30665 __ai uint8x16_t vreinterpretq_u8_s32(int32x4_t __p0) {
30666 uint8x16_t __ret;
30667 __ret = (uint8x16_t)(__p0);
30668 return __ret;
30669 }
30670 #endif
30671
30672 #ifdef __LITTLE_ENDIAN__
30673 __ai uint8x16_t vreinterpretq_u8_s64(int64x2_t __p0) {
30674 uint8x16_t __ret;
30675 __ret = (uint8x16_t)(__p0);
30676 return __ret;
30677 }
30678 #else
30679 __ai uint8x16_t vreinterpretq_u8_s64(int64x2_t __p0) {
30680 uint8x16_t __ret;
30681 __ret = (uint8x16_t)(__p0);
30682 return __ret;
30683 }
30684 #endif
30685
30686 #ifdef __LITTLE_ENDIAN__
30687 __ai uint8x16_t vreinterpretq_u8_s16(int16x8_t __p0) {
30688 uint8x16_t __ret;
30689 __ret = (uint8x16_t)(__p0);
30690 return __ret;
30691 }
30692 #else
30693 __ai uint8x16_t vreinterpretq_u8_s16(int16x8_t __p0) {
30694 uint8x16_t __ret;
30695 __ret = (uint8x16_t)(__p0);
30696 return __ret;
30697 }
30698 #endif
30699
30700 #ifdef __LITTLE_ENDIAN__
30701 __ai uint32x4_t vreinterpretq_u32_p8(poly8x16_t __p0) {
30702 uint32x4_t __ret;
30703 __ret = (uint32x4_t)(__p0);
30704 return __ret;
30705 }
30706 #else
30707 __ai uint32x4_t vreinterpretq_u32_p8(poly8x16_t __p0) {
30708 uint32x4_t __ret;
30709 __ret = (uint32x4_t)(__p0);
30710 return __ret;
30711 }
30712 #endif
30713
30714 #ifdef __LITTLE_ENDIAN__
30715 __ai uint32x4_t vreinterpretq_u32_p16(poly16x8_t __p0) {
30716 uint32x4_t __ret;
30717 __ret = (uint32x4_t)(__p0);
30718 return __ret;
30719 }
30720 #else
30721 __ai uint32x4_t vreinterpretq_u32_p16(poly16x8_t __p0) {
30722 uint32x4_t __ret;
30723 __ret = (uint32x4_t)(__p0);
30724 return __ret;
30725 }
30726 #endif
30727
30728 #ifdef __LITTLE_ENDIAN__
30729 __ai uint32x4_t vreinterpretq_u32_u8(uint8x16_t __p0) {
30730 uint32x4_t __ret;
30731 __ret = (uint32x4_t)(__p0);
30732 return __ret;
30733 }
30734 #else
30735 __ai uint32x4_t vreinterpretq_u32_u8(uint8x16_t __p0) {
30736 uint32x4_t __ret;
30737 __ret = (uint32x4_t)(__p0);
30738 return __ret;
30739 }
30740 #endif
30741
30742 #ifdef __LITTLE_ENDIAN__
30743 __ai uint32x4_t vreinterpretq_u32_u64(uint64x2_t __p0) {
30744 uint32x4_t __ret;
30745 __ret = (uint32x4_t)(__p0);
30746 return __ret;
30747 }
30748 #else
30749 __ai uint32x4_t vreinterpretq_u32_u64(uint64x2_t __p0) {
30750 uint32x4_t __ret;
30751 __ret = (uint32x4_t)(__p0);
30752 return __ret;
30753 }
30754 #endif
30755
30756 #ifdef __LITTLE_ENDIAN__
30757 __ai uint32x4_t vreinterpretq_u32_u16(uint16x8_t __p0) {
30758 uint32x4_t __ret;
30759 __ret = (uint32x4_t)(__p0);
30760 return __ret;
30761 }
30762 #else
30763 __ai uint32x4_t vreinterpretq_u32_u16(uint16x8_t __p0) {
30764 uint32x4_t __ret;
30765 __ret = (uint32x4_t)(__p0);
30766 return __ret;
30767 }
30768 #endif
30769
30770 #ifdef __LITTLE_ENDIAN__
30771 __ai uint32x4_t vreinterpretq_u32_s8(int8x16_t __p0) {
30772 uint32x4_t __ret;
30773 __ret = (uint32x4_t)(__p0);
30774 return __ret;
30775 }
30776 #else
30777 __ai uint32x4_t vreinterpretq_u32_s8(int8x16_t __p0) {
30778 uint32x4_t __ret;
30779 __ret = (uint32x4_t)(__p0);
30780 return __ret;
30781 }
30782 #endif
30783
30784 #ifdef __LITTLE_ENDIAN__
30785 __ai uint32x4_t vreinterpretq_u32_f32(float32x4_t __p0) {
30786 uint32x4_t __ret;
30787 __ret = (uint32x4_t)(__p0);
30788 return __ret;
30789 }
30790 #else
30791 __ai uint32x4_t vreinterpretq_u32_f32(float32x4_t __p0) {
30792 uint32x4_t __ret;
30793 __ret = (uint32x4_t)(__p0);
30794 return __ret;
30795 }
30796 #endif
30797
30798 #ifdef __LITTLE_ENDIAN__
30799 __ai uint32x4_t vreinterpretq_u32_f16(float16x8_t __p0) {
30800 uint32x4_t __ret;
30801 __ret = (uint32x4_t)(__p0);
30802 return __ret;
30803 }
30804 #else
30805 __ai uint32x4_t vreinterpretq_u32_f16(float16x8_t __p0) {
30806 uint32x4_t __ret;
30807 __ret = (uint32x4_t)(__p0);
30808 return __ret;
30809 }
30810 #endif
30811
30812 #ifdef __LITTLE_ENDIAN__
30813 __ai uint32x4_t vreinterpretq_u32_s32(int32x4_t __p0) {
30814 uint32x4_t __ret;
30815 __ret = (uint32x4_t)(__p0);
30816 return __ret;
30817 }
30818 #else
30819 __ai uint32x4_t vreinterpretq_u32_s32(int32x4_t __p0) {
30820 uint32x4_t __ret;
30821 __ret = (uint32x4_t)(__p0);
30822 return __ret;
30823 }
30824 #endif
30825
30826 #ifdef __LITTLE_ENDIAN__
30827 __ai uint32x4_t vreinterpretq_u32_s64(int64x2_t __p0) {
30828 uint32x4_t __ret;
30829 __ret = (uint32x4_t)(__p0);
30830 return __ret;
30831 }
30832 #else
30833 __ai uint32x4_t vreinterpretq_u32_s64(int64x2_t __p0) {
30834 uint32x4_t __ret;
30835 __ret = (uint32x4_t)(__p0);
30836 return __ret;
30837 }
30838 #endif
30839
30840 #ifdef __LITTLE_ENDIAN__
30841 __ai uint32x4_t vreinterpretq_u32_s16(int16x8_t __p0) {
30842 uint32x4_t __ret;
30843 __ret = (uint32x4_t)(__p0);
30844 return __ret;
30845 }
30846 #else
30847 __ai uint32x4_t vreinterpretq_u32_s16(int16x8_t __p0) {
30848 uint32x4_t __ret;
30849 __ret = (uint32x4_t)(__p0);
30850 return __ret;
30851 }
30852 #endif
30853
30854 #ifdef __LITTLE_ENDIAN__
30855 __ai uint64x2_t vreinterpretq_u64_p8(poly8x16_t __p0) {
30856 uint64x2_t __ret;
30857 __ret = (uint64x2_t)(__p0);
30858 return __ret;
30859 }
30860 #else
30861 __ai uint64x2_t vreinterpretq_u64_p8(poly8x16_t __p0) {
30862 uint64x2_t __ret;
30863 __ret = (uint64x2_t)(__p0);
30864 return __ret;
30865 }
30866 #endif
30867
30868 #ifdef __LITTLE_ENDIAN__
30869 __ai uint64x2_t vreinterpretq_u64_p16(poly16x8_t __p0) {
30870 uint64x2_t __ret;
30871 __ret = (uint64x2_t)(__p0);
30872 return __ret;
30873 }
30874 #else
30875 __ai uint64x2_t vreinterpretq_u64_p16(poly16x8_t __p0) {
30876 uint64x2_t __ret;
30877 __ret = (uint64x2_t)(__p0);
30878 return __ret;
30879 }
30880 #endif
30881
30882 #ifdef __LITTLE_ENDIAN__
30883 __ai uint64x2_t vreinterpretq_u64_u8(uint8x16_t __p0) {
30884 uint64x2_t __ret;
30885 __ret = (uint64x2_t)(__p0);
30886 return __ret;
30887 }
30888 #else
30889 __ai uint64x2_t vreinterpretq_u64_u8(uint8x16_t __p0) {
30890 uint64x2_t __ret;
30891 __ret = (uint64x2_t)(__p0);
30892 return __ret;
30893 }
30894 #endif
30895
30896 #ifdef __LITTLE_ENDIAN__
30897 __ai uint64x2_t vreinterpretq_u64_u32(uint32x4_t __p0) {
30898 uint64x2_t __ret;
30899 __ret = (uint64x2_t)(__p0);
30900 return __ret;
30901 }
30902 #else
30903 __ai uint64x2_t vreinterpretq_u64_u32(uint32x4_t __p0) {
30904 uint64x2_t __ret;
30905 __ret = (uint64x2_t)(__p0);
30906 return __ret;
30907 }
30908 #endif
30909
30910 #ifdef __LITTLE_ENDIAN__
30911 __ai uint64x2_t vreinterpretq_u64_u16(uint16x8_t __p0) {
30912 uint64x2_t __ret;
30913 __ret = (uint64x2_t)(__p0);
30914 return __ret;
30915 }
30916 #else
30917 __ai uint64x2_t vreinterpretq_u64_u16(uint16x8_t __p0) {
30918 uint64x2_t __ret;
30919 __ret = (uint64x2_t)(__p0);
30920 return __ret;
30921 }
30922 #endif
30923
30924 #ifdef __LITTLE_ENDIAN__
30925 __ai uint64x2_t vreinterpretq_u64_s8(int8x16_t __p0) {
30926 uint64x2_t __ret;
30927 __ret = (uint64x2_t)(__p0);
30928 return __ret;
30929 }
30930 #else
30931 __ai uint64x2_t vreinterpretq_u64_s8(int8x16_t __p0) {
30932 uint64x2_t __ret;
30933 __ret = (uint64x2_t)(__p0);
30934 return __ret;
30935 }
30936 #endif
30937
30938 #ifdef __LITTLE_ENDIAN__
30939 __ai uint64x2_t vreinterpretq_u64_f32(float32x4_t __p0) {
30940 uint64x2_t __ret;
30941 __ret = (uint64x2_t)(__p0);
30942 return __ret;
30943 }
30944 #else
30945 __ai uint64x2_t vreinterpretq_u64_f32(float32x4_t __p0) {
30946 uint64x2_t __ret;
30947 __ret = (uint64x2_t)(__p0);
30948 return __ret;
30949 }
30950 #endif
30951
30952 #ifdef __LITTLE_ENDIAN__
30953 __ai uint64x2_t vreinterpretq_u64_f16(float16x8_t __p0) {
30954 uint64x2_t __ret;
30955 __ret = (uint64x2_t)(__p0);
30956 return __ret;
30957 }
30958 #else
30959 __ai uint64x2_t vreinterpretq_u64_f16(float16x8_t __p0) {
30960 uint64x2_t __ret;
30961 __ret = (uint64x2_t)(__p0);
30962 return __ret;
30963 }
30964 #endif
30965
30966 #ifdef __LITTLE_ENDIAN__
30967 __ai uint64x2_t vreinterpretq_u64_s32(int32x4_t __p0) {
30968 uint64x2_t __ret;
30969 __ret = (uint64x2_t)(__p0);
30970 return __ret;
30971 }
30972 #else
30973 __ai uint64x2_t vreinterpretq_u64_s32(int32x4_t __p0) {
30974 uint64x2_t __ret;
30975 __ret = (uint64x2_t)(__p0);
30976 return __ret;
30977 }
30978 #endif
30979
30980 #ifdef __LITTLE_ENDIAN__
30981 __ai uint64x2_t vreinterpretq_u64_s64(int64x2_t __p0) {
30982 uint64x2_t __ret;
30983 __ret = (uint64x2_t)(__p0);
30984 return __ret;
30985 }
30986 #else
30987 __ai uint64x2_t vreinterpretq_u64_s64(int64x2_t __p0) {
30988 uint64x2_t __ret;
30989 __ret = (uint64x2_t)(__p0);
30990 return __ret;
30991 }
30992 #endif
30993
30994 #ifdef __LITTLE_ENDIAN__
30995 __ai uint64x2_t vreinterpretq_u64_s16(int16x8_t __p0) {
30996 uint64x2_t __ret;
30997 __ret = (uint64x2_t)(__p0);
30998 return __ret;
30999 }
31000 #else
31001 __ai uint64x2_t vreinterpretq_u64_s16(int16x8_t __p0) {
31002 uint64x2_t __ret;
31003 __ret = (uint64x2_t)(__p0);
31004 return __ret;
31005 }
31006 #endif
31007
31008 #ifdef __LITTLE_ENDIAN__
31009 __ai uint16x8_t vreinterpretq_u16_p8(poly8x16_t __p0) {
31010 uint16x8_t __ret;
31011 __ret = (uint16x8_t)(__p0);
31012 return __ret;
31013 }
31014 #else
31015 __ai uint16x8_t vreinterpretq_u16_p8(poly8x16_t __p0) {
31016 uint16x8_t __ret;
31017 __ret = (uint16x8_t)(__p0);
31018 return __ret;
31019 }
31020 #endif
31021
31022 #ifdef __LITTLE_ENDIAN__
31023 __ai uint16x8_t vreinterpretq_u16_p16(poly16x8_t __p0) {
31024 uint16x8_t __ret;
31025 __ret = (uint16x8_t)(__p0);
31026 return __ret;
31027 }
31028 #else
31029 __ai uint16x8_t vreinterpretq_u16_p16(poly16x8_t __p0) {
31030 uint16x8_t __ret;
31031 __ret = (uint16x8_t)(__p0);
31032 return __ret;
31033 }
31034 #endif
31035
31036 #ifdef __LITTLE_ENDIAN__
31037 __ai uint16x8_t vreinterpretq_u16_u8(uint8x16_t __p0) {
31038 uint16x8_t __ret;
31039 __ret = (uint16x8_t)(__p0);
31040 return __ret;
31041 }
31042 #else
31043 __ai uint16x8_t vreinterpretq_u16_u8(uint8x16_t __p0) {
31044 uint16x8_t __ret;
31045 __ret = (uint16x8_t)(__p0);
31046 return __ret;
31047 }
31048 #endif
31049
31050 #ifdef __LITTLE_ENDIAN__
31051 __ai uint16x8_t vreinterpretq_u16_u32(uint32x4_t __p0) {
31052 uint16x8_t __ret;
31053 __ret = (uint16x8_t)(__p0);
31054 return __ret;
31055 }
31056 #else
31057 __ai uint16x8_t vreinterpretq_u16_u32(uint32x4_t __p0) {
31058 uint16x8_t __ret;
31059 __ret = (uint16x8_t)(__p0);
31060 return __ret;
31061 }
31062 #endif
31063
31064 #ifdef __LITTLE_ENDIAN__
31065 __ai uint16x8_t vreinterpretq_u16_u64(uint64x2_t __p0) {
31066 uint16x8_t __ret;
31067 __ret = (uint16x8_t)(__p0);
31068 return __ret;
31069 }
31070 #else
31071 __ai uint16x8_t vreinterpretq_u16_u64(uint64x2_t __p0) {
31072 uint16x8_t __ret;
31073 __ret = (uint16x8_t)(__p0);
31074 return __ret;
31075 }
31076 #endif
31077
31078 #ifdef __LITTLE_ENDIAN__
31079 __ai uint16x8_t vreinterpretq_u16_s8(int8x16_t __p0) {
31080 uint16x8_t __ret;
31081 __ret = (uint16x8_t)(__p0);
31082 return __ret;
31083 }
31084 #else
31085 __ai uint16x8_t vreinterpretq_u16_s8(int8x16_t __p0) {
31086 uint16x8_t __ret;
31087 __ret = (uint16x8_t)(__p0);
31088 return __ret;
31089 }
31090 #endif
31091
31092 #ifdef __LITTLE_ENDIAN__
31093 __ai uint16x8_t vreinterpretq_u16_f32(float32x4_t __p0) {
31094 uint16x8_t __ret;
31095 __ret = (uint16x8_t)(__p0);
31096 return __ret;
31097 }
31098 #else
31099 __ai uint16x8_t vreinterpretq_u16_f32(float32x4_t __p0) {
31100 uint16x8_t __ret;
31101 __ret = (uint16x8_t)(__p0);
31102 return __ret;
31103 }
31104 #endif
31105
31106 #ifdef __LITTLE_ENDIAN__
31107 __ai uint16x8_t vreinterpretq_u16_f16(float16x8_t __p0) {
31108 uint16x8_t __ret;
31109 __ret = (uint16x8_t)(__p0);
31110 return __ret;
31111 }
31112 #else
31113 __ai uint16x8_t vreinterpretq_u16_f16(float16x8_t __p0) {
31114 uint16x8_t __ret;
31115 __ret = (uint16x8_t)(__p0);
31116 return __ret;
31117 }
31118 #endif
31119
31120 #ifdef __LITTLE_ENDIAN__
31121 __ai uint16x8_t vreinterpretq_u16_s32(int32x4_t __p0) {
31122 uint16x8_t __ret;
31123 __ret = (uint16x8_t)(__p0);
31124 return __ret;
31125 }
31126 #else
31127 __ai uint16x8_t vreinterpretq_u16_s32(int32x4_t __p0) {
31128 uint16x8_t __ret;
31129 __ret = (uint16x8_t)(__p0);
31130 return __ret;
31131 }
31132 #endif
31133
31134 #ifdef __LITTLE_ENDIAN__
31135 __ai uint16x8_t vreinterpretq_u16_s64(int64x2_t __p0) {
31136 uint16x8_t __ret;
31137 __ret = (uint16x8_t)(__p0);
31138 return __ret;
31139 }
31140 #else
31141 __ai uint16x8_t vreinterpretq_u16_s64(int64x2_t __p0) {
31142 uint16x8_t __ret;
31143 __ret = (uint16x8_t)(__p0);
31144 return __ret;
31145 }
31146 #endif
31147
31148 #ifdef __LITTLE_ENDIAN__
31149 __ai uint16x8_t vreinterpretq_u16_s16(int16x8_t __p0) {
31150 uint16x8_t __ret;
31151 __ret = (uint16x8_t)(__p0);
31152 return __ret;
31153 }
31154 #else
31155 __ai uint16x8_t vreinterpretq_u16_s16(int16x8_t __p0) {
31156 uint16x8_t __ret;
31157 __ret = (uint16x8_t)(__p0);
31158 return __ret;
31159 }
31160 #endif
31161
31162 #ifdef __LITTLE_ENDIAN__
31163 __ai int8x16_t vreinterpretq_s8_p8(poly8x16_t __p0) {
31164 int8x16_t __ret;
31165 __ret = (int8x16_t)(__p0);
31166 return __ret;
31167 }
31168 #else
31169 __ai int8x16_t vreinterpretq_s8_p8(poly8x16_t __p0) {
31170 int8x16_t __ret;
31171 __ret = (int8x16_t)(__p0);
31172 return __ret;
31173 }
31174 #endif
31175
31176 #ifdef __LITTLE_ENDIAN__
31177 __ai int8x16_t vreinterpretq_s8_p16(poly16x8_t __p0) {
31178 int8x16_t __ret;
31179 __ret = (int8x16_t)(__p0);
31180 return __ret;
31181 }
31182 #else
31183 __ai int8x16_t vreinterpretq_s8_p16(poly16x8_t __p0) {
31184 int8x16_t __ret;
31185 __ret = (int8x16_t)(__p0);
31186 return __ret;
31187 }
31188 #endif
31189
31190 #ifdef __LITTLE_ENDIAN__
31191 __ai int8x16_t vreinterpretq_s8_u8(uint8x16_t __p0) {
31192 int8x16_t __ret;
31193 __ret = (int8x16_t)(__p0);
31194 return __ret;
31195 }
31196 #else
31197 __ai int8x16_t vreinterpretq_s8_u8(uint8x16_t __p0) {
31198 int8x16_t __ret;
31199 __ret = (int8x16_t)(__p0);
31200 return __ret;
31201 }
31202 #endif
31203
31204 #ifdef __LITTLE_ENDIAN__
31205 __ai int8x16_t vreinterpretq_s8_u32(uint32x4_t __p0) {
31206 int8x16_t __ret;
31207 __ret = (int8x16_t)(__p0);
31208 return __ret;
31209 }
31210 #else
31211 __ai int8x16_t vreinterpretq_s8_u32(uint32x4_t __p0) {
31212 int8x16_t __ret;
31213 __ret = (int8x16_t)(__p0);
31214 return __ret;
31215 }
31216 #endif
31217
31218 #ifdef __LITTLE_ENDIAN__
31219 __ai int8x16_t vreinterpretq_s8_u64(uint64x2_t __p0) {
31220 int8x16_t __ret;
31221 __ret = (int8x16_t)(__p0);
31222 return __ret;
31223 }
31224 #else
31225 __ai int8x16_t vreinterpretq_s8_u64(uint64x2_t __p0) {
31226 int8x16_t __ret;
31227 __ret = (int8x16_t)(__p0);
31228 return __ret;
31229 }
31230 #endif
31231
31232 #ifdef __LITTLE_ENDIAN__
31233 __ai int8x16_t vreinterpretq_s8_u16(uint16x8_t __p0) {
31234 int8x16_t __ret;
31235 __ret = (int8x16_t)(__p0);
31236 return __ret;
31237 }
31238 #else
31239 __ai int8x16_t vreinterpretq_s8_u16(uint16x8_t __p0) {
31240 int8x16_t __ret;
31241 __ret = (int8x16_t)(__p0);
31242 return __ret;
31243 }
31244 #endif
31245
31246 #ifdef __LITTLE_ENDIAN__
31247 __ai int8x16_t vreinterpretq_s8_f32(float32x4_t __p0) {
31248 int8x16_t __ret;
31249 __ret = (int8x16_t)(__p0);
31250 return __ret;
31251 }
31252 #else
31253 __ai int8x16_t vreinterpretq_s8_f32(float32x4_t __p0) {
31254 int8x16_t __ret;
31255 __ret = (int8x16_t)(__p0);
31256 return __ret;
31257 }
31258 #endif
31259
31260 #ifdef __LITTLE_ENDIAN__
31261 __ai int8x16_t vreinterpretq_s8_f16(float16x8_t __p0) {
31262 int8x16_t __ret;
31263 __ret = (int8x16_t)(__p0);
31264 return __ret;
31265 }
31266 #else
31267 __ai int8x16_t vreinterpretq_s8_f16(float16x8_t __p0) {
31268 int8x16_t __ret;
31269 __ret = (int8x16_t)(__p0);
31270 return __ret;
31271 }
31272 #endif
31273
31274 #ifdef __LITTLE_ENDIAN__
31275 __ai int8x16_t vreinterpretq_s8_s32(int32x4_t __p0) {
31276 int8x16_t __ret;
31277 __ret = (int8x16_t)(__p0);
31278 return __ret;
31279 }
31280 #else
31281 __ai int8x16_t vreinterpretq_s8_s32(int32x4_t __p0) {
31282 int8x16_t __ret;
31283 __ret = (int8x16_t)(__p0);
31284 return __ret;
31285 }
31286 #endif
31287
31288 #ifdef __LITTLE_ENDIAN__
31289 __ai int8x16_t vreinterpretq_s8_s64(int64x2_t __p0) {
31290 int8x16_t __ret;
31291 __ret = (int8x16_t)(__p0);
31292 return __ret;
31293 }
31294 #else
31295 __ai int8x16_t vreinterpretq_s8_s64(int64x2_t __p0) {
31296 int8x16_t __ret;
31297 __ret = (int8x16_t)(__p0);
31298 return __ret;
31299 }
31300 #endif
31301
31302 #ifdef __LITTLE_ENDIAN__
31303 __ai int8x16_t vreinterpretq_s8_s16(int16x8_t __p0) {
31304 int8x16_t __ret;
31305 __ret = (int8x16_t)(__p0);
31306 return __ret;
31307 }
31308 #else
31309 __ai int8x16_t vreinterpretq_s8_s16(int16x8_t __p0) {
31310 int8x16_t __ret;
31311 __ret = (int8x16_t)(__p0);
31312 return __ret;
31313 }
31314 #endif
31315
31316 #ifdef __LITTLE_ENDIAN__
31317 __ai float32x4_t vreinterpretq_f32_p8(poly8x16_t __p0) {
31318 float32x4_t __ret;
31319 __ret = (float32x4_t)(__p0);
31320 return __ret;
31321 }
31322 #else
31323 __ai float32x4_t vreinterpretq_f32_p8(poly8x16_t __p0) {
31324 float32x4_t __ret;
31325 __ret = (float32x4_t)(__p0);
31326 return __ret;
31327 }
31328 #endif
31329
31330 #ifdef __LITTLE_ENDIAN__
31331 __ai float32x4_t vreinterpretq_f32_p16(poly16x8_t __p0) {
31332 float32x4_t __ret;
31333 __ret = (float32x4_t)(__p0);
31334 return __ret;
31335 }
31336 #else
31337 __ai float32x4_t vreinterpretq_f32_p16(poly16x8_t __p0) {
31338 float32x4_t __ret;
31339 __ret = (float32x4_t)(__p0);
31340 return __ret;
31341 }
31342 #endif
31343
31344 #ifdef __LITTLE_ENDIAN__
31345 __ai float32x4_t vreinterpretq_f32_u8(uint8x16_t __p0) {
31346 float32x4_t __ret;
31347 __ret = (float32x4_t)(__p0);
31348 return __ret;
31349 }
31350 #else
31351 __ai float32x4_t vreinterpretq_f32_u8(uint8x16_t __p0) {
31352 float32x4_t __ret;
31353 __ret = (float32x4_t)(__p0);
31354 return __ret;
31355 }
31356 #endif
31357
31358 #ifdef __LITTLE_ENDIAN__
31359 __ai float32x4_t vreinterpretq_f32_u32(uint32x4_t __p0) {
31360 float32x4_t __ret;
31361 __ret = (float32x4_t)(__p0);
31362 return __ret;
31363 }
31364 #else
31365 __ai float32x4_t vreinterpretq_f32_u32(uint32x4_t __p0) {
31366 float32x4_t __ret;
31367 __ret = (float32x4_t)(__p0);
31368 return __ret;
31369 }
31370 #endif
31371
31372 #ifdef __LITTLE_ENDIAN__
31373 __ai float32x4_t vreinterpretq_f32_u64(uint64x2_t __p0) {
31374 float32x4_t __ret;
31375 __ret = (float32x4_t)(__p0);
31376 return __ret;
31377 }
31378 #else
31379 __ai float32x4_t vreinterpretq_f32_u64(uint64x2_t __p0) {
31380 float32x4_t __ret;
31381 __ret = (float32x4_t)(__p0);
31382 return __ret;
31383 }
31384 #endif
31385
31386 #ifdef __LITTLE_ENDIAN__
31387 __ai float32x4_t vreinterpretq_f32_u16(uint16x8_t __p0) {
31388 float32x4_t __ret;
31389 __ret = (float32x4_t)(__p0);
31390 return __ret;
31391 }
31392 #else
31393 __ai float32x4_t vreinterpretq_f32_u16(uint16x8_t __p0) {
31394 float32x4_t __ret;
31395 __ret = (float32x4_t)(__p0);
31396 return __ret;
31397 }
31398 #endif
31399
31400 #ifdef __LITTLE_ENDIAN__
31401 __ai float32x4_t vreinterpretq_f32_s8(int8x16_t __p0) {
31402 float32x4_t __ret;
31403 __ret = (float32x4_t)(__p0);
31404 return __ret;
31405 }
31406 #else
31407 __ai float32x4_t vreinterpretq_f32_s8(int8x16_t __p0) {
31408 float32x4_t __ret;
31409 __ret = (float32x4_t)(__p0);
31410 return __ret;
31411 }
31412 #endif
31413
31414 #ifdef __LITTLE_ENDIAN__
31415 __ai float32x4_t vreinterpretq_f32_f16(float16x8_t __p0) {
31416 float32x4_t __ret;
31417 __ret = (float32x4_t)(__p0);
31418 return __ret;
31419 }
31420 #else
31421 __ai float32x4_t vreinterpretq_f32_f16(float16x8_t __p0) {
31422 float32x4_t __ret;
31423 __ret = (float32x4_t)(__p0);
31424 return __ret;
31425 }
31426 #endif
31427
31428 #ifdef __LITTLE_ENDIAN__
31429 __ai float32x4_t vreinterpretq_f32_s32(int32x4_t __p0) {
31430 float32x4_t __ret;
31431 __ret = (float32x4_t)(__p0);
31432 return __ret;
31433 }
31434 #else
31435 __ai float32x4_t vreinterpretq_f32_s32(int32x4_t __p0) {
31436 float32x4_t __ret;
31437 __ret = (float32x4_t)(__p0);
31438 return __ret;
31439 }
31440 #endif
31441
31442 #ifdef __LITTLE_ENDIAN__
31443 __ai float32x4_t vreinterpretq_f32_s64(int64x2_t __p0) {
31444 float32x4_t __ret;
31445 __ret = (float32x4_t)(__p0);
31446 return __ret;
31447 }
31448 #else
31449 __ai float32x4_t vreinterpretq_f32_s64(int64x2_t __p0) {
31450 float32x4_t __ret;
31451 __ret = (float32x4_t)(__p0);
31452 return __ret;
31453 }
31454 #endif
31455
31456 #ifdef __LITTLE_ENDIAN__
31457 __ai float32x4_t vreinterpretq_f32_s16(int16x8_t __p0) {
31458 float32x4_t __ret;
31459 __ret = (float32x4_t)(__p0);
31460 return __ret;
31461 }
31462 #else
31463 __ai float32x4_t vreinterpretq_f32_s16(int16x8_t __p0) {
31464 float32x4_t __ret;
31465 __ret = (float32x4_t)(__p0);
31466 return __ret;
31467 }
31468 #endif
31469
31470 #ifdef __LITTLE_ENDIAN__
31471 __ai float16x8_t vreinterpretq_f16_p8(poly8x16_t __p0) {
31472 float16x8_t __ret;
31473 __ret = (float16x8_t)(__p0);
31474 return __ret;
31475 }
31476 #else
31477 __ai float16x8_t vreinterpretq_f16_p8(poly8x16_t __p0) {
31478 float16x8_t __ret;
31479 __ret = (float16x8_t)(__p0);
31480 return __ret;
31481 }
31482 #endif
31483
31484 #ifdef __LITTLE_ENDIAN__
31485 __ai float16x8_t vreinterpretq_f16_p16(poly16x8_t __p0) {
31486 float16x8_t __ret;
31487 __ret = (float16x8_t)(__p0);
31488 return __ret;
31489 }
31490 #else
31491 __ai float16x8_t vreinterpretq_f16_p16(poly16x8_t __p0) {
31492 float16x8_t __ret;
31493 __ret = (float16x8_t)(__p0);
31494 return __ret;
31495 }
31496 #endif
31497
31498 #ifdef __LITTLE_ENDIAN__
31499 __ai float16x8_t vreinterpretq_f16_u8(uint8x16_t __p0) {
31500 float16x8_t __ret;
31501 __ret = (float16x8_t)(__p0);
31502 return __ret;
31503 }
31504 #else
31505 __ai float16x8_t vreinterpretq_f16_u8(uint8x16_t __p0) {
31506 float16x8_t __ret;
31507 __ret = (float16x8_t)(__p0);
31508 return __ret;
31509 }
31510 #endif
31511
31512 #ifdef __LITTLE_ENDIAN__
31513 __ai float16x8_t vreinterpretq_f16_u32(uint32x4_t __p0) {
31514 float16x8_t __ret;
31515 __ret = (float16x8_t)(__p0);
31516 return __ret;
31517 }
31518 #else
31519 __ai float16x8_t vreinterpretq_f16_u32(uint32x4_t __p0) {
31520 float16x8_t __ret;
31521 __ret = (float16x8_t)(__p0);
31522 return __ret;
31523 }
31524 #endif
31525
31526 #ifdef __LITTLE_ENDIAN__
31527 __ai float16x8_t vreinterpretq_f16_u64(uint64x2_t __p0) {
31528 float16x8_t __ret;
31529 __ret = (float16x8_t)(__p0);
31530 return __ret;
31531 }
31532 #else
31533 __ai float16x8_t vreinterpretq_f16_u64(uint64x2_t __p0) {
31534 float16x8_t __ret;
31535 __ret = (float16x8_t)(__p0);
31536 return __ret;
31537 }
31538 #endif
31539
31540 #ifdef __LITTLE_ENDIAN__
31541 __ai float16x8_t vreinterpretq_f16_u16(uint16x8_t __p0) {
31542 float16x8_t __ret;
31543 __ret = (float16x8_t)(__p0);
31544 return __ret;
31545 }
31546 #else
31547 __ai float16x8_t vreinterpretq_f16_u16(uint16x8_t __p0) {
31548 float16x8_t __ret;
31549 __ret = (float16x8_t)(__p0);
31550 return __ret;
31551 }
31552 #endif
31553
31554 #ifdef __LITTLE_ENDIAN__
31555 __ai float16x8_t vreinterpretq_f16_s8(int8x16_t __p0) {
31556 float16x8_t __ret;
31557 __ret = (float16x8_t)(__p0);
31558 return __ret;
31559 }
31560 #else
31561 __ai float16x8_t vreinterpretq_f16_s8(int8x16_t __p0) {
31562 float16x8_t __ret;
31563 __ret = (float16x8_t)(__p0);
31564 return __ret;
31565 }
31566 #endif
31567
31568 #ifdef __LITTLE_ENDIAN__
31569 __ai float16x8_t vreinterpretq_f16_f32(float32x4_t __p0) {
31570 float16x8_t __ret;
31571 __ret = (float16x8_t)(__p0);
31572 return __ret;
31573 }
31574 #else
31575 __ai float16x8_t vreinterpretq_f16_f32(float32x4_t __p0) {
31576 float16x8_t __ret;
31577 __ret = (float16x8_t)(__p0);
31578 return __ret;
31579 }
31580 #endif
31581
31582 #ifdef __LITTLE_ENDIAN__
31583 __ai float16x8_t vreinterpretq_f16_s32(int32x4_t __p0) {
31584 float16x8_t __ret;
31585 __ret = (float16x8_t)(__p0);
31586 return __ret;
31587 }
31588 #else
31589 __ai float16x8_t vreinterpretq_f16_s32(int32x4_t __p0) {
31590 float16x8_t __ret;
31591 __ret = (float16x8_t)(__p0);
31592 return __ret;
31593 }
31594 #endif
31595
31596 #ifdef __LITTLE_ENDIAN__
31597 __ai float16x8_t vreinterpretq_f16_s64(int64x2_t __p0) {
31598 float16x8_t __ret;
31599 __ret = (float16x8_t)(__p0);
31600 return __ret;
31601 }
31602 #else
31603 __ai float16x8_t vreinterpretq_f16_s64(int64x2_t __p0) {
31604 float16x8_t __ret;
31605 __ret = (float16x8_t)(__p0);
31606 return __ret;
31607 }
31608 #endif
31609
31610 #ifdef __LITTLE_ENDIAN__
31611 __ai float16x8_t vreinterpretq_f16_s16(int16x8_t __p0) {
31612 float16x8_t __ret;
31613 __ret = (float16x8_t)(__p0);
31614 return __ret;
31615 }
31616 #else
31617 __ai float16x8_t vreinterpretq_f16_s16(int16x8_t __p0) {
31618 float16x8_t __ret;
31619 __ret = (float16x8_t)(__p0);
31620 return __ret;
31621 }
31622 #endif
31623
31624 #ifdef __LITTLE_ENDIAN__
31625 __ai int32x4_t vreinterpretq_s32_p8(poly8x16_t __p0) {
31626 int32x4_t __ret;
31627 __ret = (int32x4_t)(__p0);
31628 return __ret;
31629 }
31630 #else
31631 __ai int32x4_t vreinterpretq_s32_p8(poly8x16_t __p0) {
31632 int32x4_t __ret;
31633 __ret = (int32x4_t)(__p0);
31634 return __ret;
31635 }
31636 #endif
31637
31638 #ifdef __LITTLE_ENDIAN__
31639 __ai int32x4_t vreinterpretq_s32_p16(poly16x8_t __p0) {
31640 int32x4_t __ret;
31641 __ret = (int32x4_t)(__p0);
31642 return __ret;
31643 }
31644 #else
31645 __ai int32x4_t vreinterpretq_s32_p16(poly16x8_t __p0) {
31646 int32x4_t __ret;
31647 __ret = (int32x4_t)(__p0);
31648 return __ret;
31649 }
31650 #endif
31651
31652 #ifdef __LITTLE_ENDIAN__
31653 __ai int32x4_t vreinterpretq_s32_u8(uint8x16_t __p0) {
31654 int32x4_t __ret;
31655 __ret = (int32x4_t)(__p0);
31656 return __ret;
31657 }
31658 #else
31659 __ai int32x4_t vreinterpretq_s32_u8(uint8x16_t __p0) {
31660 int32x4_t __ret;
31661 __ret = (int32x4_t)(__p0);
31662 return __ret;
31663 }
31664 #endif
31665
31666 #ifdef __LITTLE_ENDIAN__
31667 __ai int32x4_t vreinterpretq_s32_u32(uint32x4_t __p0) {
31668 int32x4_t __ret;
31669 __ret = (int32x4_t)(__p0);
31670 return __ret;
31671 }
31672 #else
31673 __ai int32x4_t vreinterpretq_s32_u32(uint32x4_t __p0) {
31674 int32x4_t __ret;
31675 __ret = (int32x4_t)(__p0);
31676 return __ret;
31677 }
31678 #endif
31679
31680 #ifdef __LITTLE_ENDIAN__
31681 __ai int32x4_t vreinterpretq_s32_u64(uint64x2_t __p0) {
31682 int32x4_t __ret;
31683 __ret = (int32x4_t)(__p0);
31684 return __ret;
31685 }
31686 #else
31687 __ai int32x4_t vreinterpretq_s32_u64(uint64x2_t __p0) {
31688 int32x4_t __ret;
31689 __ret = (int32x4_t)(__p0);
31690 return __ret;
31691 }
31692 #endif
31693
31694 #ifdef __LITTLE_ENDIAN__
31695 __ai int32x4_t vreinterpretq_s32_u16(uint16x8_t __p0) {
31696 int32x4_t __ret;
31697 __ret = (int32x4_t)(__p0);
31698 return __ret;
31699 }
31700 #else
31701 __ai int32x4_t vreinterpretq_s32_u16(uint16x8_t __p0) {
31702 int32x4_t __ret;
31703 __ret = (int32x4_t)(__p0);
31704 return __ret;
31705 }
31706 #endif
31707
31708 #ifdef __LITTLE_ENDIAN__
31709 __ai int32x4_t vreinterpretq_s32_s8(int8x16_t __p0) {
31710 int32x4_t __ret;
31711 __ret = (int32x4_t)(__p0);
31712 return __ret;
31713 }
31714 #else
31715 __ai int32x4_t vreinterpretq_s32_s8(int8x16_t __p0) {
31716 int32x4_t __ret;
31717 __ret = (int32x4_t)(__p0);
31718 return __ret;
31719 }
31720 #endif
31721
31722 #ifdef __LITTLE_ENDIAN__
31723 __ai int32x4_t vreinterpretq_s32_f32(float32x4_t __p0) {
31724 int32x4_t __ret;
31725 __ret = (int32x4_t)(__p0);
31726 return __ret;
31727 }
31728 #else
31729 __ai int32x4_t vreinterpretq_s32_f32(float32x4_t __p0) {
31730 int32x4_t __ret;
31731 __ret = (int32x4_t)(__p0);
31732 return __ret;
31733 }
31734 #endif
31735
31736 #ifdef __LITTLE_ENDIAN__
31737 __ai int32x4_t vreinterpretq_s32_f16(float16x8_t __p0) {
31738 int32x4_t __ret;
31739 __ret = (int32x4_t)(__p0);
31740 return __ret;
31741 }
31742 #else
31743 __ai int32x4_t vreinterpretq_s32_f16(float16x8_t __p0) {
31744 int32x4_t __ret;
31745 __ret = (int32x4_t)(__p0);
31746 return __ret;
31747 }
31748 #endif
31749
31750 #ifdef __LITTLE_ENDIAN__
31751 __ai int32x4_t vreinterpretq_s32_s64(int64x2_t __p0) {
31752 int32x4_t __ret;
31753 __ret = (int32x4_t)(__p0);
31754 return __ret;
31755 }
31756 #else
31757 __ai int32x4_t vreinterpretq_s32_s64(int64x2_t __p0) {
31758 int32x4_t __ret;
31759 __ret = (int32x4_t)(__p0);
31760 return __ret;
31761 }
31762 #endif
31763
31764 #ifdef __LITTLE_ENDIAN__
31765 __ai int32x4_t vreinterpretq_s32_s16(int16x8_t __p0) {
31766 int32x4_t __ret;
31767 __ret = (int32x4_t)(__p0);
31768 return __ret;
31769 }
31770 #else
31771 __ai int32x4_t vreinterpretq_s32_s16(int16x8_t __p0) {
31772 int32x4_t __ret;
31773 __ret = (int32x4_t)(__p0);
31774 return __ret;
31775 }
31776 #endif
31777
31778 #ifdef __LITTLE_ENDIAN__
31779 __ai int64x2_t vreinterpretq_s64_p8(poly8x16_t __p0) {
31780 int64x2_t __ret;
31781 __ret = (int64x2_t)(__p0);
31782 return __ret;
31783 }
31784 #else
31785 __ai int64x2_t vreinterpretq_s64_p8(poly8x16_t __p0) {
31786 int64x2_t __ret;
31787 __ret = (int64x2_t)(__p0);
31788 return __ret;
31789 }
31790 #endif
31791
31792 #ifdef __LITTLE_ENDIAN__
31793 __ai int64x2_t vreinterpretq_s64_p16(poly16x8_t __p0) {
31794 int64x2_t __ret;
31795 __ret = (int64x2_t)(__p0);
31796 return __ret;
31797 }
31798 #else
31799 __ai int64x2_t vreinterpretq_s64_p16(poly16x8_t __p0) {
31800 int64x2_t __ret;
31801 __ret = (int64x2_t)(__p0);
31802 return __ret;
31803 }
31804 #endif
31805
31806 #ifdef __LITTLE_ENDIAN__
31807 __ai int64x2_t vreinterpretq_s64_u8(uint8x16_t __p0) {
31808 int64x2_t __ret;
31809 __ret = (int64x2_t)(__p0);
31810 return __ret;
31811 }
31812 #else
31813 __ai int64x2_t vreinterpretq_s64_u8(uint8x16_t __p0) {
31814 int64x2_t __ret;
31815 __ret = (int64x2_t)(__p0);
31816 return __ret;
31817 }
31818 #endif
31819
31820 #ifdef __LITTLE_ENDIAN__
31821 __ai int64x2_t vreinterpretq_s64_u32(uint32x4_t __p0) {
31822 int64x2_t __ret;
31823 __ret = (int64x2_t)(__p0);
31824 return __ret;
31825 }
31826 #else
31827 __ai int64x2_t vreinterpretq_s64_u32(uint32x4_t __p0) {
31828 int64x2_t __ret;
31829 __ret = (int64x2_t)(__p0);
31830 return __ret;
31831 }
31832 #endif
31833
31834 #ifdef __LITTLE_ENDIAN__
31835 __ai int64x2_t vreinterpretq_s64_u64(uint64x2_t __p0) {
31836 int64x2_t __ret;
31837 __ret = (int64x2_t)(__p0);
31838 return __ret;
31839 }
31840 #else
31841 __ai int64x2_t vreinterpretq_s64_u64(uint64x2_t __p0) {
31842 int64x2_t __ret;
31843 __ret = (int64x2_t)(__p0);
31844 return __ret;
31845 }
31846 #endif
31847
31848 #ifdef __LITTLE_ENDIAN__
31849 __ai int64x2_t vreinterpretq_s64_u16(uint16x8_t __p0) {
31850 int64x2_t __ret;
31851 __ret = (int64x2_t)(__p0);
31852 return __ret;
31853 }
31854 #else
31855 __ai int64x2_t vreinterpretq_s64_u16(uint16x8_t __p0) {
31856 int64x2_t __ret;
31857 __ret = (int64x2_t)(__p0);
31858 return __ret;
31859 }
31860 #endif
31861
31862 #ifdef __LITTLE_ENDIAN__
31863 __ai int64x2_t vreinterpretq_s64_s8(int8x16_t __p0) {
31864 int64x2_t __ret;
31865 __ret = (int64x2_t)(__p0);
31866 return __ret;
31867 }
31868 #else
31869 __ai int64x2_t vreinterpretq_s64_s8(int8x16_t __p0) {
31870 int64x2_t __ret;
31871 __ret = (int64x2_t)(__p0);
31872 return __ret;
31873 }
31874 #endif
31875
31876 #ifdef __LITTLE_ENDIAN__
31877 __ai int64x2_t vreinterpretq_s64_f32(float32x4_t __p0) {
31878 int64x2_t __ret;
31879 __ret = (int64x2_t)(__p0);
31880 return __ret;
31881 }
31882 #else
31883 __ai int64x2_t vreinterpretq_s64_f32(float32x4_t __p0) {
31884 int64x2_t __ret;
31885 __ret = (int64x2_t)(__p0);
31886 return __ret;
31887 }
31888 #endif
31889
31890 #ifdef __LITTLE_ENDIAN__
31891 __ai int64x2_t vreinterpretq_s64_f16(float16x8_t __p0) {
31892 int64x2_t __ret;
31893 __ret = (int64x2_t)(__p0);
31894 return __ret;
31895 }
31896 #else
31897 __ai int64x2_t vreinterpretq_s64_f16(float16x8_t __p0) {
31898 int64x2_t __ret;
31899 __ret = (int64x2_t)(__p0);
31900 return __ret;
31901 }
31902 #endif
31903
31904 #ifdef __LITTLE_ENDIAN__
31905 __ai int64x2_t vreinterpretq_s64_s32(int32x4_t __p0) {
31906 int64x2_t __ret;
31907 __ret = (int64x2_t)(__p0);
31908 return __ret;
31909 }
31910 #else
31911 __ai int64x2_t vreinterpretq_s64_s32(int32x4_t __p0) {
31912 int64x2_t __ret;
31913 __ret = (int64x2_t)(__p0);
31914 return __ret;
31915 }
31916 #endif
31917
31918 #ifdef __LITTLE_ENDIAN__
31919 __ai int64x2_t vreinterpretq_s64_s16(int16x8_t __p0) {
31920 int64x2_t __ret;
31921 __ret = (int64x2_t)(__p0);
31922 return __ret;
31923 }
31924 #else
31925 __ai int64x2_t vreinterpretq_s64_s16(int16x8_t __p0) {
31926 int64x2_t __ret;
31927 __ret = (int64x2_t)(__p0);
31928 return __ret;
31929 }
31930 #endif
31931
31932 #ifdef __LITTLE_ENDIAN__
31933 __ai int16x8_t vreinterpretq_s16_p8(poly8x16_t __p0) {
31934 int16x8_t __ret;
31935 __ret = (int16x8_t)(__p0);
31936 return __ret;
31937 }
31938 #else
31939 __ai int16x8_t vreinterpretq_s16_p8(poly8x16_t __p0) {
31940 int16x8_t __ret;
31941 __ret = (int16x8_t)(__p0);
31942 return __ret;
31943 }
31944 #endif
31945
31946 #ifdef __LITTLE_ENDIAN__
31947 __ai int16x8_t vreinterpretq_s16_p16(poly16x8_t __p0) {
31948 int16x8_t __ret;
31949 __ret = (int16x8_t)(__p0);
31950 return __ret;
31951 }
31952 #else
31953 __ai int16x8_t vreinterpretq_s16_p16(poly16x8_t __p0) {
31954 int16x8_t __ret;
31955 __ret = (int16x8_t)(__p0);
31956 return __ret;
31957 }
31958 #endif
31959
31960 #ifdef __LITTLE_ENDIAN__
31961 __ai int16x8_t vreinterpretq_s16_u8(uint8x16_t __p0) {
31962 int16x8_t __ret;
31963 __ret = (int16x8_t)(__p0);
31964 return __ret;
31965 }
31966 #else
31967 __ai int16x8_t vreinterpretq_s16_u8(uint8x16_t __p0) {
31968 int16x8_t __ret;
31969 __ret = (int16x8_t)(__p0);
31970 return __ret;
31971 }
31972 #endif
31973
31974 #ifdef __LITTLE_ENDIAN__
31975 __ai int16x8_t vreinterpretq_s16_u32(uint32x4_t __p0) {
31976 int16x8_t __ret;
31977 __ret = (int16x8_t)(__p0);
31978 return __ret;
31979 }
31980 #else
31981 __ai int16x8_t vreinterpretq_s16_u32(uint32x4_t __p0) {
31982 int16x8_t __ret;
31983 __ret = (int16x8_t)(__p0);
31984 return __ret;
31985 }
31986 #endif
31987
31988 #ifdef __LITTLE_ENDIAN__
31989 __ai int16x8_t vreinterpretq_s16_u64(uint64x2_t __p0) {
31990 int16x8_t __ret;
31991 __ret = (int16x8_t)(__p0);
31992 return __ret;
31993 }
31994 #else
31995 __ai int16x8_t vreinterpretq_s16_u64(uint64x2_t __p0) {
31996 int16x8_t __ret;
31997 __ret = (int16x8_t)(__p0);
31998 return __ret;
31999 }
32000 #endif
32001
32002 #ifdef __LITTLE_ENDIAN__
32003 __ai int16x8_t vreinterpretq_s16_u16(uint16x8_t __p0) {
32004 int16x8_t __ret;
32005 __ret = (int16x8_t)(__p0);
32006 return __ret;
32007 }
32008 #else
32009 __ai int16x8_t vreinterpretq_s16_u16(uint16x8_t __p0) {
32010 int16x8_t __ret;
32011 __ret = (int16x8_t)(__p0);
32012 return __ret;
32013 }
32014 #endif
32015
32016 #ifdef __LITTLE_ENDIAN__
32017 __ai int16x8_t vreinterpretq_s16_s8(int8x16_t __p0) {
32018 int16x8_t __ret;
32019 __ret = (int16x8_t)(__p0);
32020 return __ret;
32021 }
32022 #else
32023 __ai int16x8_t vreinterpretq_s16_s8(int8x16_t __p0) {
32024 int16x8_t __ret;
32025 __ret = (int16x8_t)(__p0);
32026 return __ret;
32027 }
32028 #endif
32029
32030 #ifdef __LITTLE_ENDIAN__
32031 __ai int16x8_t vreinterpretq_s16_f32(float32x4_t __p0) {
32032 int16x8_t __ret;
32033 __ret = (int16x8_t)(__p0);
32034 return __ret;
32035 }
32036 #else
32037 __ai int16x8_t vreinterpretq_s16_f32(float32x4_t __p0) {
32038 int16x8_t __ret;
32039 __ret = (int16x8_t)(__p0);
32040 return __ret;
32041 }
32042 #endif
32043
32044 #ifdef __LITTLE_ENDIAN__
32045 __ai int16x8_t vreinterpretq_s16_f16(float16x8_t __p0) {
32046 int16x8_t __ret;
32047 __ret = (int16x8_t)(__p0);
32048 return __ret;
32049 }
32050 #else
32051 __ai int16x8_t vreinterpretq_s16_f16(float16x8_t __p0) {
32052 int16x8_t __ret;
32053 __ret = (int16x8_t)(__p0);
32054 return __ret;
32055 }
32056 #endif
32057
32058 #ifdef __LITTLE_ENDIAN__
32059 __ai int16x8_t vreinterpretq_s16_s32(int32x4_t __p0) {
32060 int16x8_t __ret;
32061 __ret = (int16x8_t)(__p0);
32062 return __ret;
32063 }
32064 #else
32065 __ai int16x8_t vreinterpretq_s16_s32(int32x4_t __p0) {
32066 int16x8_t __ret;
32067 __ret = (int16x8_t)(__p0);
32068 return __ret;
32069 }
32070 #endif
32071
32072 #ifdef __LITTLE_ENDIAN__
32073 __ai int16x8_t vreinterpretq_s16_s64(int64x2_t __p0) {
32074 int16x8_t __ret;
32075 __ret = (int16x8_t)(__p0);
32076 return __ret;
32077 }
32078 #else
32079 __ai int16x8_t vreinterpretq_s16_s64(int64x2_t __p0) {
32080 int16x8_t __ret;
32081 __ret = (int16x8_t)(__p0);
32082 return __ret;
32083 }
32084 #endif
32085
32086 #ifdef __LITTLE_ENDIAN__
32087 __ai uint8x8_t vreinterpret_u8_p8(poly8x8_t __p0) {
32088 uint8x8_t __ret;
32089 __ret = (uint8x8_t)(__p0);
32090 return __ret;
32091 }
32092 #else
32093 __ai uint8x8_t vreinterpret_u8_p8(poly8x8_t __p0) {
32094 uint8x8_t __ret;
32095 __ret = (uint8x8_t)(__p0);
32096 return __ret;
32097 }
32098 #endif
32099
32100 #ifdef __LITTLE_ENDIAN__
32101 __ai uint8x8_t vreinterpret_u8_p16(poly16x4_t __p0) {
32102 uint8x8_t __ret;
32103 __ret = (uint8x8_t)(__p0);
32104 return __ret;
32105 }
32106 #else
32107 __ai uint8x8_t vreinterpret_u8_p16(poly16x4_t __p0) {
32108 uint8x8_t __ret;
32109 __ret = (uint8x8_t)(__p0);
32110 return __ret;
32111 }
32112 #endif
32113
32114 #ifdef __LITTLE_ENDIAN__
32115 __ai uint8x8_t vreinterpret_u8_u32(uint32x2_t __p0) {
32116 uint8x8_t __ret;
32117 __ret = (uint8x8_t)(__p0);
32118 return __ret;
32119 }
32120 #else
32121 __ai uint8x8_t vreinterpret_u8_u32(uint32x2_t __p0) {
32122 uint8x8_t __ret;
32123 __ret = (uint8x8_t)(__p0);
32124 return __ret;
32125 }
32126 #endif
32127
32128 #ifdef __LITTLE_ENDIAN__
32129 __ai uint8x8_t vreinterpret_u8_u64(uint64x1_t __p0) {
32130 uint8x8_t __ret;
32131 __ret = (uint8x8_t)(__p0);
32132 return __ret;
32133 }
32134 #else
32135 __ai uint8x8_t vreinterpret_u8_u64(uint64x1_t __p0) {
32136 uint8x8_t __ret;
32137 __ret = (uint8x8_t)(__p0);
32138 return __ret;
32139 }
32140 #endif
32141
32142 #ifdef __LITTLE_ENDIAN__
32143 __ai uint8x8_t vreinterpret_u8_u16(uint16x4_t __p0) {
32144 uint8x8_t __ret;
32145 __ret = (uint8x8_t)(__p0);
32146 return __ret;
32147 }
32148 #else
32149 __ai uint8x8_t vreinterpret_u8_u16(uint16x4_t __p0) {
32150 uint8x8_t __ret;
32151 __ret = (uint8x8_t)(__p0);
32152 return __ret;
32153 }
32154 #endif
32155
32156 #ifdef __LITTLE_ENDIAN__
32157 __ai uint8x8_t vreinterpret_u8_s8(int8x8_t __p0) {
32158 uint8x8_t __ret;
32159 __ret = (uint8x8_t)(__p0);
32160 return __ret;
32161 }
32162 #else
32163 __ai uint8x8_t vreinterpret_u8_s8(int8x8_t __p0) {
32164 uint8x8_t __ret;
32165 __ret = (uint8x8_t)(__p0);
32166 return __ret;
32167 }
32168 #endif
32169
32170 #ifdef __LITTLE_ENDIAN__
32171 __ai uint8x8_t vreinterpret_u8_f32(float32x2_t __p0) {
32172 uint8x8_t __ret;
32173 __ret = (uint8x8_t)(__p0);
32174 return __ret;
32175 }
32176 #else
32177 __ai uint8x8_t vreinterpret_u8_f32(float32x2_t __p0) {
32178 uint8x8_t __ret;
32179 __ret = (uint8x8_t)(__p0);
32180 return __ret;
32181 }
32182 #endif
32183
32184 #ifdef __LITTLE_ENDIAN__
32185 __ai uint8x8_t vreinterpret_u8_f16(float16x4_t __p0) {
32186 uint8x8_t __ret;
32187 __ret = (uint8x8_t)(__p0);
32188 return __ret;
32189 }
32190 #else
32191 __ai uint8x8_t vreinterpret_u8_f16(float16x4_t __p0) {
32192 uint8x8_t __ret;
32193 __ret = (uint8x8_t)(__p0);
32194 return __ret;
32195 }
32196 #endif
32197
32198 #ifdef __LITTLE_ENDIAN__
32199 __ai uint8x8_t vreinterpret_u8_s32(int32x2_t __p0) {
32200 uint8x8_t __ret;
32201 __ret = (uint8x8_t)(__p0);
32202 return __ret;
32203 }
32204 #else
32205 __ai uint8x8_t vreinterpret_u8_s32(int32x2_t __p0) {
32206 uint8x8_t __ret;
32207 __ret = (uint8x8_t)(__p0);
32208 return __ret;
32209 }
32210 #endif
32211
32212 #ifdef __LITTLE_ENDIAN__
32213 __ai uint8x8_t vreinterpret_u8_s64(int64x1_t __p0) {
32214 uint8x8_t __ret;
32215 __ret = (uint8x8_t)(__p0);
32216 return __ret;
32217 }
32218 #else
32219 __ai uint8x8_t vreinterpret_u8_s64(int64x1_t __p0) {
32220 uint8x8_t __ret;
32221 __ret = (uint8x8_t)(__p0);
32222 return __ret;
32223 }
32224 #endif
32225
32226 #ifdef __LITTLE_ENDIAN__
32227 __ai uint8x8_t vreinterpret_u8_s16(int16x4_t __p0) {
32228 uint8x8_t __ret;
32229 __ret = (uint8x8_t)(__p0);
32230 return __ret;
32231 }
32232 #else
32233 __ai uint8x8_t vreinterpret_u8_s16(int16x4_t __p0) {
32234 uint8x8_t __ret;
32235 __ret = (uint8x8_t)(__p0);
32236 return __ret;
32237 }
32238 #endif
32239
32240 #ifdef __LITTLE_ENDIAN__
32241 __ai uint32x2_t vreinterpret_u32_p8(poly8x8_t __p0) {
32242 uint32x2_t __ret;
32243 __ret = (uint32x2_t)(__p0);
32244 return __ret;
32245 }
32246 #else
32247 __ai uint32x2_t vreinterpret_u32_p8(poly8x8_t __p0) {
32248 uint32x2_t __ret;
32249 __ret = (uint32x2_t)(__p0);
32250 return __ret;
32251 }
32252 #endif
32253
32254 #ifdef __LITTLE_ENDIAN__
32255 __ai uint32x2_t vreinterpret_u32_p16(poly16x4_t __p0) {
32256 uint32x2_t __ret;
32257 __ret = (uint32x2_t)(__p0);
32258 return __ret;
32259 }
32260 #else
32261 __ai uint32x2_t vreinterpret_u32_p16(poly16x4_t __p0) {
32262 uint32x2_t __ret;
32263 __ret = (uint32x2_t)(__p0);
32264 return __ret;
32265 }
32266 #endif
32267
32268 #ifdef __LITTLE_ENDIAN__
32269 __ai uint32x2_t vreinterpret_u32_u8(uint8x8_t __p0) {
32270 uint32x2_t __ret;
32271 __ret = (uint32x2_t)(__p0);
32272 return __ret;
32273 }
32274 #else
32275 __ai uint32x2_t vreinterpret_u32_u8(uint8x8_t __p0) {
32276 uint32x2_t __ret;
32277 __ret = (uint32x2_t)(__p0);
32278 return __ret;
32279 }
32280 #endif
32281
32282 #ifdef __LITTLE_ENDIAN__
32283 __ai uint32x2_t vreinterpret_u32_u64(uint64x1_t __p0) {
32284 uint32x2_t __ret;
32285 __ret = (uint32x2_t)(__p0);
32286 return __ret;
32287 }
32288 #else
32289 __ai uint32x2_t vreinterpret_u32_u64(uint64x1_t __p0) {
32290 uint32x2_t __ret;
32291 __ret = (uint32x2_t)(__p0);
32292 return __ret;
32293 }
32294 #endif
32295
32296 #ifdef __LITTLE_ENDIAN__
32297 __ai uint32x2_t vreinterpret_u32_u16(uint16x4_t __p0) {
32298 uint32x2_t __ret;
32299 __ret = (uint32x2_t)(__p0);
32300 return __ret;
32301 }
32302 #else
32303 __ai uint32x2_t vreinterpret_u32_u16(uint16x4_t __p0) {
32304 uint32x2_t __ret;
32305 __ret = (uint32x2_t)(__p0);
32306 return __ret;
32307 }
32308 #endif
32309
32310 #ifdef __LITTLE_ENDIAN__
32311 __ai uint32x2_t vreinterpret_u32_s8(int8x8_t __p0) {
32312 uint32x2_t __ret;
32313 __ret = (uint32x2_t)(__p0);
32314 return __ret;
32315 }
32316 #else
32317 __ai uint32x2_t vreinterpret_u32_s8(int8x8_t __p0) {
32318 uint32x2_t __ret;
32319 __ret = (uint32x2_t)(__p0);
32320 return __ret;
32321 }
32322 #endif
32323
32324 #ifdef __LITTLE_ENDIAN__
32325 __ai uint32x2_t vreinterpret_u32_f32(float32x2_t __p0) {
32326 uint32x2_t __ret;
32327 __ret = (uint32x2_t)(__p0);
32328 return __ret;
32329 }
32330 #else
32331 __ai uint32x2_t vreinterpret_u32_f32(float32x2_t __p0) {
32332 uint32x2_t __ret;
32333 __ret = (uint32x2_t)(__p0);
32334 return __ret;
32335 }
32336 #endif
32337
32338 #ifdef __LITTLE_ENDIAN__
32339 __ai uint32x2_t vreinterpret_u32_f16(float16x4_t __p0) {
32340 uint32x2_t __ret;
32341 __ret = (uint32x2_t)(__p0);
32342 return __ret;
32343 }
32344 #else
32345 __ai uint32x2_t vreinterpret_u32_f16(float16x4_t __p0) {
32346 uint32x2_t __ret;
32347 __ret = (uint32x2_t)(__p0);
32348 return __ret;
32349 }
32350 #endif
32351
32352 #ifdef __LITTLE_ENDIAN__
32353 __ai uint32x2_t vreinterpret_u32_s32(int32x2_t __p0) {
32354 uint32x2_t __ret;
32355 __ret = (uint32x2_t)(__p0);
32356 return __ret;
32357 }
32358 #else
32359 __ai uint32x2_t vreinterpret_u32_s32(int32x2_t __p0) {
32360 uint32x2_t __ret;
32361 __ret = (uint32x2_t)(__p0);
32362 return __ret;
32363 }
32364 #endif
32365
32366 #ifdef __LITTLE_ENDIAN__
32367 __ai uint32x2_t vreinterpret_u32_s64(int64x1_t __p0) {
32368 uint32x2_t __ret;
32369 __ret = (uint32x2_t)(__p0);
32370 return __ret;
32371 }
32372 #else
32373 __ai uint32x2_t vreinterpret_u32_s64(int64x1_t __p0) {
32374 uint32x2_t __ret;
32375 __ret = (uint32x2_t)(__p0);
32376 return __ret;
32377 }
32378 #endif
32379
32380 #ifdef __LITTLE_ENDIAN__
32381 __ai uint32x2_t vreinterpret_u32_s16(int16x4_t __p0) {
32382 uint32x2_t __ret;
32383 __ret = (uint32x2_t)(__p0);
32384 return __ret;
32385 }
32386 #else
32387 __ai uint32x2_t vreinterpret_u32_s16(int16x4_t __p0) {
32388 uint32x2_t __ret;
32389 __ret = (uint32x2_t)(__p0);
32390 return __ret;
32391 }
32392 #endif
32393
32394 #ifdef __LITTLE_ENDIAN__
32395 __ai uint64x1_t vreinterpret_u64_p8(poly8x8_t __p0) {
32396 uint64x1_t __ret;
32397 __ret = (uint64x1_t)(__p0);
32398 return __ret;
32399 }
32400 #else
32401 __ai uint64x1_t vreinterpret_u64_p8(poly8x8_t __p0) {
32402 uint64x1_t __ret;
32403 __ret = (uint64x1_t)(__p0);
32404 return __ret;
32405 }
32406 #endif
32407
32408 #ifdef __LITTLE_ENDIAN__
32409 __ai uint64x1_t vreinterpret_u64_p16(poly16x4_t __p0) {
32410 uint64x1_t __ret;
32411 __ret = (uint64x1_t)(__p0);
32412 return __ret;
32413 }
32414 #else
32415 __ai uint64x1_t vreinterpret_u64_p16(poly16x4_t __p0) {
32416 uint64x1_t __ret;
32417 __ret = (uint64x1_t)(__p0);
32418 return __ret;
32419 }
32420 #endif
32421
32422 #ifdef __LITTLE_ENDIAN__
32423 __ai uint64x1_t vreinterpret_u64_u8(uint8x8_t __p0) {
32424 uint64x1_t __ret;
32425 __ret = (uint64x1_t)(__p0);
32426 return __ret;
32427 }
32428 #else
32429 __ai uint64x1_t vreinterpret_u64_u8(uint8x8_t __p0) {
32430 uint64x1_t __ret;
32431 __ret = (uint64x1_t)(__p0);
32432 return __ret;
32433 }
32434 #endif
32435
32436 #ifdef __LITTLE_ENDIAN__
32437 __ai uint64x1_t vreinterpret_u64_u32(uint32x2_t __p0) {
32438 uint64x1_t __ret;
32439 __ret = (uint64x1_t)(__p0);
32440 return __ret;
32441 }
32442 #else
32443 __ai uint64x1_t vreinterpret_u64_u32(uint32x2_t __p0) {
32444 uint64x1_t __ret;
32445 __ret = (uint64x1_t)(__p0);
32446 return __ret;
32447 }
32448 #endif
32449
32450 #ifdef __LITTLE_ENDIAN__
32451 __ai uint64x1_t vreinterpret_u64_u16(uint16x4_t __p0) {
32452 uint64x1_t __ret;
32453 __ret = (uint64x1_t)(__p0);
32454 return __ret;
32455 }
32456 #else
32457 __ai uint64x1_t vreinterpret_u64_u16(uint16x4_t __p0) {
32458 uint64x1_t __ret;
32459 __ret = (uint64x1_t)(__p0);
32460 return __ret;
32461 }
32462 #endif
32463
32464 #ifdef __LITTLE_ENDIAN__
32465 __ai uint64x1_t vreinterpret_u64_s8(int8x8_t __p0) {
32466 uint64x1_t __ret;
32467 __ret = (uint64x1_t)(__p0);
32468 return __ret;
32469 }
32470 #else
32471 __ai uint64x1_t vreinterpret_u64_s8(int8x8_t __p0) {
32472 uint64x1_t __ret;
32473 __ret = (uint64x1_t)(__p0);
32474 return __ret;
32475 }
32476 #endif
32477
32478 #ifdef __LITTLE_ENDIAN__
32479 __ai uint64x1_t vreinterpret_u64_f32(float32x2_t __p0) {
32480 uint64x1_t __ret;
32481 __ret = (uint64x1_t)(__p0);
32482 return __ret;
32483 }
32484 #else
32485 __ai uint64x1_t vreinterpret_u64_f32(float32x2_t __p0) {
32486 uint64x1_t __ret;
32487 __ret = (uint64x1_t)(__p0);
32488 return __ret;
32489 }
32490 #endif
32491
32492 #ifdef __LITTLE_ENDIAN__
32493 __ai uint64x1_t vreinterpret_u64_f16(float16x4_t __p0) {
32494 uint64x1_t __ret;
32495 __ret = (uint64x1_t)(__p0);
32496 return __ret;
32497 }
32498 #else
32499 __ai uint64x1_t vreinterpret_u64_f16(float16x4_t __p0) {
32500 uint64x1_t __ret;
32501 __ret = (uint64x1_t)(__p0);
32502 return __ret;
32503 }
32504 #endif
32505
32506 #ifdef __LITTLE_ENDIAN__
32507 __ai uint64x1_t vreinterpret_u64_s32(int32x2_t __p0) {
32508 uint64x1_t __ret;
32509 __ret = (uint64x1_t)(__p0);
32510 return __ret;
32511 }
32512 #else
32513 __ai uint64x1_t vreinterpret_u64_s32(int32x2_t __p0) {
32514 uint64x1_t __ret;
32515 __ret = (uint64x1_t)(__p0);
32516 return __ret;
32517 }
32518 #endif
32519
32520 #ifdef __LITTLE_ENDIAN__
32521 __ai uint64x1_t vreinterpret_u64_s64(int64x1_t __p0) {
32522 uint64x1_t __ret;
32523 __ret = (uint64x1_t)(__p0);
32524 return __ret;
32525 }
32526 #else
32527 __ai uint64x1_t vreinterpret_u64_s64(int64x1_t __p0) {
32528 uint64x1_t __ret;
32529 __ret = (uint64x1_t)(__p0);
32530 return __ret;
32531 }
32532 #endif
32533
32534 #ifdef __LITTLE_ENDIAN__
32535 __ai uint64x1_t vreinterpret_u64_s16(int16x4_t __p0) {
32536 uint64x1_t __ret;
32537 __ret = (uint64x1_t)(__p0);
32538 return __ret;
32539 }
32540 #else
32541 __ai uint64x1_t vreinterpret_u64_s16(int16x4_t __p0) {
32542 uint64x1_t __ret;
32543 __ret = (uint64x1_t)(__p0);
32544 return __ret;
32545 }
32546 #endif
32547
32548 #ifdef __LITTLE_ENDIAN__
32549 __ai uint16x4_t vreinterpret_u16_p8(poly8x8_t __p0) {
32550 uint16x4_t __ret;
32551 __ret = (uint16x4_t)(__p0);
32552 return __ret;
32553 }
32554 #else
32555 __ai uint16x4_t vreinterpret_u16_p8(poly8x8_t __p0) {
32556 uint16x4_t __ret;
32557 __ret = (uint16x4_t)(__p0);
32558 return __ret;
32559 }
32560 #endif
32561
32562 #ifdef __LITTLE_ENDIAN__
32563 __ai uint16x4_t vreinterpret_u16_p16(poly16x4_t __p0) {
32564 uint16x4_t __ret;
32565 __ret = (uint16x4_t)(__p0);
32566 return __ret;
32567 }
32568 #else
32569 __ai uint16x4_t vreinterpret_u16_p16(poly16x4_t __p0) {
32570 uint16x4_t __ret;
32571 __ret = (uint16x4_t)(__p0);
32572 return __ret;
32573 }
32574 #endif
32575
32576 #ifdef __LITTLE_ENDIAN__
32577 __ai uint16x4_t vreinterpret_u16_u8(uint8x8_t __p0) {
32578 uint16x4_t __ret;
32579 __ret = (uint16x4_t)(__p0);
32580 return __ret;
32581 }
32582 #else
32583 __ai uint16x4_t vreinterpret_u16_u8(uint8x8_t __p0) {
32584 uint16x4_t __ret;
32585 __ret = (uint16x4_t)(__p0);
32586 return __ret;
32587 }
32588 #endif
32589
32590 #ifdef __LITTLE_ENDIAN__
32591 __ai uint16x4_t vreinterpret_u16_u32(uint32x2_t __p0) {
32592 uint16x4_t __ret;
32593 __ret = (uint16x4_t)(__p0);
32594 return __ret;
32595 }
32596 #else
32597 __ai uint16x4_t vreinterpret_u16_u32(uint32x2_t __p0) {
32598 uint16x4_t __ret;
32599 __ret = (uint16x4_t)(__p0);
32600 return __ret;
32601 }
32602 #endif
32603
32604 #ifdef __LITTLE_ENDIAN__
32605 __ai uint16x4_t vreinterpret_u16_u64(uint64x1_t __p0) {
32606 uint16x4_t __ret;
32607 __ret = (uint16x4_t)(__p0);
32608 return __ret;
32609 }
32610 #else
32611 __ai uint16x4_t vreinterpret_u16_u64(uint64x1_t __p0) {
32612 uint16x4_t __ret;
32613 __ret = (uint16x4_t)(__p0);
32614 return __ret;
32615 }
32616 #endif
32617
32618 #ifdef __LITTLE_ENDIAN__
32619 __ai uint16x4_t vreinterpret_u16_s8(int8x8_t __p0) {
32620 uint16x4_t __ret;
32621 __ret = (uint16x4_t)(__p0);
32622 return __ret;
32623 }
32624 #else
32625 __ai uint16x4_t vreinterpret_u16_s8(int8x8_t __p0) {
32626 uint16x4_t __ret;
32627 __ret = (uint16x4_t)(__p0);
32628 return __ret;
32629 }
32630 #endif
32631
32632 #ifdef __LITTLE_ENDIAN__
32633 __ai uint16x4_t vreinterpret_u16_f32(float32x2_t __p0) {
32634 uint16x4_t __ret;
32635 __ret = (uint16x4_t)(__p0);
32636 return __ret;
32637 }
32638 #else
32639 __ai uint16x4_t vreinterpret_u16_f32(float32x2_t __p0) {
32640 uint16x4_t __ret;
32641 __ret = (uint16x4_t)(__p0);
32642 return __ret;
32643 }
32644 #endif
32645
32646 #ifdef __LITTLE_ENDIAN__
32647 __ai uint16x4_t vreinterpret_u16_f16(float16x4_t __p0) {
32648 uint16x4_t __ret;
32649 __ret = (uint16x4_t)(__p0);
32650 return __ret;
32651 }
32652 #else
32653 __ai uint16x4_t vreinterpret_u16_f16(float16x4_t __p0) {
32654 uint16x4_t __ret;
32655 __ret = (uint16x4_t)(__p0);
32656 return __ret;
32657 }
32658 #endif
32659
32660 #ifdef __LITTLE_ENDIAN__
32661 __ai uint16x4_t vreinterpret_u16_s32(int32x2_t __p0) {
32662 uint16x4_t __ret;
32663 __ret = (uint16x4_t)(__p0);
32664 return __ret;
32665 }
32666 #else
32667 __ai uint16x4_t vreinterpret_u16_s32(int32x2_t __p0) {
32668 uint16x4_t __ret;
32669 __ret = (uint16x4_t)(__p0);
32670 return __ret;
32671 }
32672 #endif
32673
32674 #ifdef __LITTLE_ENDIAN__
32675 __ai uint16x4_t vreinterpret_u16_s64(int64x1_t __p0) {
32676 uint16x4_t __ret;
32677 __ret = (uint16x4_t)(__p0);
32678 return __ret;
32679 }
32680 #else
32681 __ai uint16x4_t vreinterpret_u16_s64(int64x1_t __p0) {
32682 uint16x4_t __ret;
32683 __ret = (uint16x4_t)(__p0);
32684 return __ret;
32685 }
32686 #endif
32687
32688 #ifdef __LITTLE_ENDIAN__
32689 __ai uint16x4_t vreinterpret_u16_s16(int16x4_t __p0) {
32690 uint16x4_t __ret;
32691 __ret = (uint16x4_t)(__p0);
32692 return __ret;
32693 }
32694 #else
32695 __ai uint16x4_t vreinterpret_u16_s16(int16x4_t __p0) {
32696 uint16x4_t __ret;
32697 __ret = (uint16x4_t)(__p0);
32698 return __ret;
32699 }
32700 #endif
32701
32702 #ifdef __LITTLE_ENDIAN__
32703 __ai int8x8_t vreinterpret_s8_p8(poly8x8_t __p0) {
32704 int8x8_t __ret;
32705 __ret = (int8x8_t)(__p0);
32706 return __ret;
32707 }
32708 #else
32709 __ai int8x8_t vreinterpret_s8_p8(poly8x8_t __p0) {
32710 int8x8_t __ret;
32711 __ret = (int8x8_t)(__p0);
32712 return __ret;
32713 }
32714 #endif
32715
32716 #ifdef __LITTLE_ENDIAN__
32717 __ai int8x8_t vreinterpret_s8_p16(poly16x4_t __p0) {
32718 int8x8_t __ret;
32719 __ret = (int8x8_t)(__p0);
32720 return __ret;
32721 }
32722 #else
32723 __ai int8x8_t vreinterpret_s8_p16(poly16x4_t __p0) {
32724 int8x8_t __ret;
32725 __ret = (int8x8_t)(__p0);
32726 return __ret;
32727 }
32728 #endif
32729
32730 #ifdef __LITTLE_ENDIAN__
32731 __ai int8x8_t vreinterpret_s8_u8(uint8x8_t __p0) {
32732 int8x8_t __ret;
32733 __ret = (int8x8_t)(__p0);
32734 return __ret;
32735 }
32736 #else
32737 __ai int8x8_t vreinterpret_s8_u8(uint8x8_t __p0) {
32738 int8x8_t __ret;
32739 __ret = (int8x8_t)(__p0);
32740 return __ret;
32741 }
32742 #endif
32743
32744 #ifdef __LITTLE_ENDIAN__
32745 __ai int8x8_t vreinterpret_s8_u32(uint32x2_t __p0) {
32746 int8x8_t __ret;
32747 __ret = (int8x8_t)(__p0);
32748 return __ret;
32749 }
32750 #else
32751 __ai int8x8_t vreinterpret_s8_u32(uint32x2_t __p0) {
32752 int8x8_t __ret;
32753 __ret = (int8x8_t)(__p0);
32754 return __ret;
32755 }
32756 #endif
32757
32758 #ifdef __LITTLE_ENDIAN__
32759 __ai int8x8_t vreinterpret_s8_u64(uint64x1_t __p0) {
32760 int8x8_t __ret;
32761 __ret = (int8x8_t)(__p0);
32762 return __ret;
32763 }
32764 #else
32765 __ai int8x8_t vreinterpret_s8_u64(uint64x1_t __p0) {
32766 int8x8_t __ret;
32767 __ret = (int8x8_t)(__p0);
32768 return __ret;
32769 }
32770 #endif
32771
32772 #ifdef __LITTLE_ENDIAN__
32773 __ai int8x8_t vreinterpret_s8_u16(uint16x4_t __p0) {
32774 int8x8_t __ret;
32775 __ret = (int8x8_t)(__p0);
32776 return __ret;
32777 }
32778 #else
32779 __ai int8x8_t vreinterpret_s8_u16(uint16x4_t __p0) {
32780 int8x8_t __ret;
32781 __ret = (int8x8_t)(__p0);
32782 return __ret;
32783 }
32784 #endif
32785
32786 #ifdef __LITTLE_ENDIAN__
32787 __ai int8x8_t vreinterpret_s8_f32(float32x2_t __p0) {
32788 int8x8_t __ret;
32789 __ret = (int8x8_t)(__p0);
32790 return __ret;
32791 }
32792 #else
32793 __ai int8x8_t vreinterpret_s8_f32(float32x2_t __p0) {
32794 int8x8_t __ret;
32795 __ret = (int8x8_t)(__p0);
32796 return __ret;
32797 }
32798 #endif
32799
32800 #ifdef __LITTLE_ENDIAN__
32801 __ai int8x8_t vreinterpret_s8_f16(float16x4_t __p0) {
32802 int8x8_t __ret;
32803 __ret = (int8x8_t)(__p0);
32804 return __ret;
32805 }
32806 #else
32807 __ai int8x8_t vreinterpret_s8_f16(float16x4_t __p0) {
32808 int8x8_t __ret;
32809 __ret = (int8x8_t)(__p0);
32810 return __ret;
32811 }
32812 #endif
32813
32814 #ifdef __LITTLE_ENDIAN__
32815 __ai int8x8_t vreinterpret_s8_s32(int32x2_t __p0) {
32816 int8x8_t __ret;
32817 __ret = (int8x8_t)(__p0);
32818 return __ret;
32819 }
32820 #else
32821 __ai int8x8_t vreinterpret_s8_s32(int32x2_t __p0) {
32822 int8x8_t __ret;
32823 __ret = (int8x8_t)(__p0);
32824 return __ret;
32825 }
32826 #endif
32827
32828 #ifdef __LITTLE_ENDIAN__
32829 __ai int8x8_t vreinterpret_s8_s64(int64x1_t __p0) {
32830 int8x8_t __ret;
32831 __ret = (int8x8_t)(__p0);
32832 return __ret;
32833 }
32834 #else
32835 __ai int8x8_t vreinterpret_s8_s64(int64x1_t __p0) {
32836 int8x8_t __ret;
32837 __ret = (int8x8_t)(__p0);
32838 return __ret;
32839 }
32840 #endif
32841
32842 #ifdef __LITTLE_ENDIAN__
32843 __ai int8x8_t vreinterpret_s8_s16(int16x4_t __p0) {
32844 int8x8_t __ret;
32845 __ret = (int8x8_t)(__p0);
32846 return __ret;
32847 }
32848 #else
32849 __ai int8x8_t vreinterpret_s8_s16(int16x4_t __p0) {
32850 int8x8_t __ret;
32851 __ret = (int8x8_t)(__p0);
32852 return __ret;
32853 }
32854 #endif
32855
32856 #ifdef __LITTLE_ENDIAN__
32857 __ai float32x2_t vreinterpret_f32_p8(poly8x8_t __p0) {
32858 float32x2_t __ret;
32859 __ret = (float32x2_t)(__p0);
32860 return __ret;
32861 }
32862 #else
32863 __ai float32x2_t vreinterpret_f32_p8(poly8x8_t __p0) {
32864 float32x2_t __ret;
32865 __ret = (float32x2_t)(__p0);
32866 return __ret;
32867 }
32868 #endif
32869
32870 #ifdef __LITTLE_ENDIAN__
32871 __ai float32x2_t vreinterpret_f32_p16(poly16x4_t __p0) {
32872 float32x2_t __ret;
32873 __ret = (float32x2_t)(__p0);
32874 return __ret;
32875 }
32876 #else
32877 __ai float32x2_t vreinterpret_f32_p16(poly16x4_t __p0) {
32878 float32x2_t __ret;
32879 __ret = (float32x2_t)(__p0);
32880 return __ret;
32881 }
32882 #endif
32883
32884 #ifdef __LITTLE_ENDIAN__
32885 __ai float32x2_t vreinterpret_f32_u8(uint8x8_t __p0) {
32886 float32x2_t __ret;
32887 __ret = (float32x2_t)(__p0);
32888 return __ret;
32889 }
32890 #else
32891 __ai float32x2_t vreinterpret_f32_u8(uint8x8_t __p0) {
32892 float32x2_t __ret;
32893 __ret = (float32x2_t)(__p0);
32894 return __ret;
32895 }
32896 #endif
32897
32898 #ifdef __LITTLE_ENDIAN__
32899 __ai float32x2_t vreinterpret_f32_u32(uint32x2_t __p0) {
32900 float32x2_t __ret;
32901 __ret = (float32x2_t)(__p0);
32902 return __ret;
32903 }
32904 #else
32905 __ai float32x2_t vreinterpret_f32_u32(uint32x2_t __p0) {
32906 float32x2_t __ret;
32907 __ret = (float32x2_t)(__p0);
32908 return __ret;
32909 }
32910 #endif
32911
32912 #ifdef __LITTLE_ENDIAN__
32913 __ai float32x2_t vreinterpret_f32_u64(uint64x1_t __p0) {
32914 float32x2_t __ret;
32915 __ret = (float32x2_t)(__p0);
32916 return __ret;
32917 }
32918 #else
32919 __ai float32x2_t vreinterpret_f32_u64(uint64x1_t __p0) {
32920 float32x2_t __ret;
32921 __ret = (float32x2_t)(__p0);
32922 return __ret;
32923 }
32924 #endif
32925
32926 #ifdef __LITTLE_ENDIAN__
32927 __ai float32x2_t vreinterpret_f32_u16(uint16x4_t __p0) {
32928 float32x2_t __ret;
32929 __ret = (float32x2_t)(__p0);
32930 return __ret;
32931 }
32932 #else
32933 __ai float32x2_t vreinterpret_f32_u16(uint16x4_t __p0) {
32934 float32x2_t __ret;
32935 __ret = (float32x2_t)(__p0);
32936 return __ret;
32937 }
32938 #endif
32939
32940 #ifdef __LITTLE_ENDIAN__
32941 __ai float32x2_t vreinterpret_f32_s8(int8x8_t __p0) {
32942 float32x2_t __ret;
32943 __ret = (float32x2_t)(__p0);
32944 return __ret;
32945 }
32946 #else
32947 __ai float32x2_t vreinterpret_f32_s8(int8x8_t __p0) {
32948 float32x2_t __ret;
32949 __ret = (float32x2_t)(__p0);
32950 return __ret;
32951 }
32952 #endif
32953
32954 #ifdef __LITTLE_ENDIAN__
32955 __ai float32x2_t vreinterpret_f32_f16(float16x4_t __p0) {
32956 float32x2_t __ret;
32957 __ret = (float32x2_t)(__p0);
32958 return __ret;
32959 }
32960 #else
32961 __ai float32x2_t vreinterpret_f32_f16(float16x4_t __p0) {
32962 float32x2_t __ret;
32963 __ret = (float32x2_t)(__p0);
32964 return __ret;
32965 }
32966 #endif
32967
32968 #ifdef __LITTLE_ENDIAN__
32969 __ai float32x2_t vreinterpret_f32_s32(int32x2_t __p0) {
32970 float32x2_t __ret;
32971 __ret = (float32x2_t)(__p0);
32972 return __ret;
32973 }
32974 #else
32975 __ai float32x2_t vreinterpret_f32_s32(int32x2_t __p0) {
32976 float32x2_t __ret;
32977 __ret = (float32x2_t)(__p0);
32978 return __ret;
32979 }
32980 #endif
32981
32982 #ifdef __LITTLE_ENDIAN__
32983 __ai float32x2_t vreinterpret_f32_s64(int64x1_t __p0) {
32984 float32x2_t __ret;
32985 __ret = (float32x2_t)(__p0);
32986 return __ret;
32987 }
32988 #else
32989 __ai float32x2_t vreinterpret_f32_s64(int64x1_t __p0) {
32990 float32x2_t __ret;
32991 __ret = (float32x2_t)(__p0);
32992 return __ret;
32993 }
32994 #endif
32995
32996 #ifdef __LITTLE_ENDIAN__
32997 __ai float32x2_t vreinterpret_f32_s16(int16x4_t __p0) {
32998 float32x2_t __ret;
32999 __ret = (float32x2_t)(__p0);
33000 return __ret;
33001 }
33002 #else
33003 __ai float32x2_t vreinterpret_f32_s16(int16x4_t __p0) {
33004 float32x2_t __ret;
33005 __ret = (float32x2_t)(__p0);
33006 return __ret;
33007 }
33008 #endif
33009
33010 #ifdef __LITTLE_ENDIAN__
33011 __ai float16x4_t vreinterpret_f16_p8(poly8x8_t __p0) {
33012 float16x4_t __ret;
33013 __ret = (float16x4_t)(__p0);
33014 return __ret;
33015 }
33016 #else
33017 __ai float16x4_t vreinterpret_f16_p8(poly8x8_t __p0) {
33018 float16x4_t __ret;
33019 __ret = (float16x4_t)(__p0);
33020 return __ret;
33021 }
33022 #endif
33023
33024 #ifdef __LITTLE_ENDIAN__
33025 __ai float16x4_t vreinterpret_f16_p16(poly16x4_t __p0) {
33026 float16x4_t __ret;
33027 __ret = (float16x4_t)(__p0);
33028 return __ret;
33029 }
33030 #else
33031 __ai float16x4_t vreinterpret_f16_p16(poly16x4_t __p0) {
33032 float16x4_t __ret;
33033 __ret = (float16x4_t)(__p0);
33034 return __ret;
33035 }
33036 #endif
33037
33038 #ifdef __LITTLE_ENDIAN__
33039 __ai float16x4_t vreinterpret_f16_u8(uint8x8_t __p0) {
33040 float16x4_t __ret;
33041 __ret = (float16x4_t)(__p0);
33042 return __ret;
33043 }
33044 #else
33045 __ai float16x4_t vreinterpret_f16_u8(uint8x8_t __p0) {
33046 float16x4_t __ret;
33047 __ret = (float16x4_t)(__p0);
33048 return __ret;
33049 }
33050 #endif
33051
33052 #ifdef __LITTLE_ENDIAN__
33053 __ai float16x4_t vreinterpret_f16_u32(uint32x2_t __p0) {
33054 float16x4_t __ret;
33055 __ret = (float16x4_t)(__p0);
33056 return __ret;
33057 }
33058 #else
33059 __ai float16x4_t vreinterpret_f16_u32(uint32x2_t __p0) {
33060 float16x4_t __ret;
33061 __ret = (float16x4_t)(__p0);
33062 return __ret;
33063 }
33064 #endif
33065
33066 #ifdef __LITTLE_ENDIAN__
33067 __ai float16x4_t vreinterpret_f16_u64(uint64x1_t __p0) {
33068 float16x4_t __ret;
33069 __ret = (float16x4_t)(__p0);
33070 return __ret;
33071 }
33072 #else
33073 __ai float16x4_t vreinterpret_f16_u64(uint64x1_t __p0) {
33074 float16x4_t __ret;
33075 __ret = (float16x4_t)(__p0);
33076 return __ret;
33077 }
33078 #endif
33079
33080 #ifdef __LITTLE_ENDIAN__
33081 __ai float16x4_t vreinterpret_f16_u16(uint16x4_t __p0) {
33082 float16x4_t __ret;
33083 __ret = (float16x4_t)(__p0);
33084 return __ret;
33085 }
33086 #else
33087 __ai float16x4_t vreinterpret_f16_u16(uint16x4_t __p0) {
33088 float16x4_t __ret;
33089 __ret = (float16x4_t)(__p0);
33090 return __ret;
33091 }
33092 #endif
33093
33094 #ifdef __LITTLE_ENDIAN__
33095 __ai float16x4_t vreinterpret_f16_s8(int8x8_t __p0) {
33096 float16x4_t __ret;
33097 __ret = (float16x4_t)(__p0);
33098 return __ret;
33099 }
33100 #else
33101 __ai float16x4_t vreinterpret_f16_s8(int8x8_t __p0) {
33102 float16x4_t __ret;
33103 __ret = (float16x4_t)(__p0);
33104 return __ret;
33105 }
33106 #endif
33107
33108 #ifdef __LITTLE_ENDIAN__
33109 __ai float16x4_t vreinterpret_f16_f32(float32x2_t __p0) {
33110 float16x4_t __ret;
33111 __ret = (float16x4_t)(__p0);
33112 return __ret;
33113 }
33114 #else
33115 __ai float16x4_t vreinterpret_f16_f32(float32x2_t __p0) {
33116 float16x4_t __ret;
33117 __ret = (float16x4_t)(__p0);
33118 return __ret;
33119 }
33120 #endif
33121
33122 #ifdef __LITTLE_ENDIAN__
33123 __ai float16x4_t vreinterpret_f16_s32(int32x2_t __p0) {
33124 float16x4_t __ret;
33125 __ret = (float16x4_t)(__p0);
33126 return __ret;
33127 }
33128 #else
33129 __ai float16x4_t vreinterpret_f16_s32(int32x2_t __p0) {
33130 float16x4_t __ret;
33131 __ret = (float16x4_t)(__p0);
33132 return __ret;
33133 }
33134 #endif
33135
33136 #ifdef __LITTLE_ENDIAN__
33137 __ai float16x4_t vreinterpret_f16_s64(int64x1_t __p0) {
33138 float16x4_t __ret;
33139 __ret = (float16x4_t)(__p0);
33140 return __ret;
33141 }
33142 #else
33143 __ai float16x4_t vreinterpret_f16_s64(int64x1_t __p0) {
33144 float16x4_t __ret;
33145 __ret = (float16x4_t)(__p0);
33146 return __ret;
33147 }
33148 #endif
33149
33150 #ifdef __LITTLE_ENDIAN__
33151 __ai float16x4_t vreinterpret_f16_s16(int16x4_t __p0) {
33152 float16x4_t __ret;
33153 __ret = (float16x4_t)(__p0);
33154 return __ret;
33155 }
33156 #else
33157 __ai float16x4_t vreinterpret_f16_s16(int16x4_t __p0) {
33158 float16x4_t __ret;
33159 __ret = (float16x4_t)(__p0);
33160 return __ret;
33161 }
33162 #endif
33163
33164 #ifdef __LITTLE_ENDIAN__
33165 __ai int32x2_t vreinterpret_s32_p8(poly8x8_t __p0) {
33166 int32x2_t __ret;
33167 __ret = (int32x2_t)(__p0);
33168 return __ret;
33169 }
33170 #else
33171 __ai int32x2_t vreinterpret_s32_p8(poly8x8_t __p0) {
33172 int32x2_t __ret;
33173 __ret = (int32x2_t)(__p0);
33174 return __ret;
33175 }
33176 #endif
33177
33178 #ifdef __LITTLE_ENDIAN__
33179 __ai int32x2_t vreinterpret_s32_p16(poly16x4_t __p0) {
33180 int32x2_t __ret;
33181 __ret = (int32x2_t)(__p0);
33182 return __ret;
33183 }
33184 #else
33185 __ai int32x2_t vreinterpret_s32_p16(poly16x4_t __p0) {
33186 int32x2_t __ret;
33187 __ret = (int32x2_t)(__p0);
33188 return __ret;
33189 }
33190 #endif
33191
33192 #ifdef __LITTLE_ENDIAN__
33193 __ai int32x2_t vreinterpret_s32_u8(uint8x8_t __p0) {
33194 int32x2_t __ret;
33195 __ret = (int32x2_t)(__p0);
33196 return __ret;
33197 }
33198 #else
33199 __ai int32x2_t vreinterpret_s32_u8(uint8x8_t __p0) {
33200 int32x2_t __ret;
33201 __ret = (int32x2_t)(__p0);
33202 return __ret;
33203 }
33204 #endif
33205
33206 #ifdef __LITTLE_ENDIAN__
33207 __ai int32x2_t vreinterpret_s32_u32(uint32x2_t __p0) {
33208 int32x2_t __ret;
33209 __ret = (int32x2_t)(__p0);
33210 return __ret;
33211 }
33212 #else
33213 __ai int32x2_t vreinterpret_s32_u32(uint32x2_t __p0) {
33214 int32x2_t __ret;
33215 __ret = (int32x2_t)(__p0);
33216 return __ret;
33217 }
33218 #endif
33219
33220 #ifdef __LITTLE_ENDIAN__
33221 __ai int32x2_t vreinterpret_s32_u64(uint64x1_t __p0) {
33222 int32x2_t __ret;
33223 __ret = (int32x2_t)(__p0);
33224 return __ret;
33225 }
33226 #else
33227 __ai int32x2_t vreinterpret_s32_u64(uint64x1_t __p0) {
33228 int32x2_t __ret;
33229 __ret = (int32x2_t)(__p0);
33230 return __ret;
33231 }
33232 #endif
33233
33234 #ifdef __LITTLE_ENDIAN__
33235 __ai int32x2_t vreinterpret_s32_u16(uint16x4_t __p0) {
33236 int32x2_t __ret;
33237 __ret = (int32x2_t)(__p0);
33238 return __ret;
33239 }
33240 #else
33241 __ai int32x2_t vreinterpret_s32_u16(uint16x4_t __p0) {
33242 int32x2_t __ret;
33243 __ret = (int32x2_t)(__p0);
33244 return __ret;
33245 }
33246 #endif
33247
33248 #ifdef __LITTLE_ENDIAN__
33249 __ai int32x2_t vreinterpret_s32_s8(int8x8_t __p0) {
33250 int32x2_t __ret;
33251 __ret = (int32x2_t)(__p0);
33252 return __ret;
33253 }
33254 #else
33255 __ai int32x2_t vreinterpret_s32_s8(int8x8_t __p0) {
33256 int32x2_t __ret;
33257 __ret = (int32x2_t)(__p0);
33258 return __ret;
33259 }
33260 #endif
33261
33262 #ifdef __LITTLE_ENDIAN__
33263 __ai int32x2_t vreinterpret_s32_f32(float32x2_t __p0) {
33264 int32x2_t __ret;
33265 __ret = (int32x2_t)(__p0);
33266 return __ret;
33267 }
33268 #else
33269 __ai int32x2_t vreinterpret_s32_f32(float32x2_t __p0) {
33270 int32x2_t __ret;
33271 __ret = (int32x2_t)(__p0);
33272 return __ret;
33273 }
33274 #endif
33275
33276 #ifdef __LITTLE_ENDIAN__
33277 __ai int32x2_t vreinterpret_s32_f16(float16x4_t __p0) {
33278 int32x2_t __ret;
33279 __ret = (int32x2_t)(__p0);
33280 return __ret;
33281 }
33282 #else
33283 __ai int32x2_t vreinterpret_s32_f16(float16x4_t __p0) {
33284 int32x2_t __ret;
33285 __ret = (int32x2_t)(__p0);
33286 return __ret;
33287 }
33288 #endif
33289
33290 #ifdef __LITTLE_ENDIAN__
33291 __ai int32x2_t vreinterpret_s32_s64(int64x1_t __p0) {
33292 int32x2_t __ret;
33293 __ret = (int32x2_t)(__p0);
33294 return __ret;
33295 }
33296 #else
33297 __ai int32x2_t vreinterpret_s32_s64(int64x1_t __p0) {
33298 int32x2_t __ret;
33299 __ret = (int32x2_t)(__p0);
33300 return __ret;
33301 }
33302 #endif
33303
33304 #ifdef __LITTLE_ENDIAN__
33305 __ai int32x2_t vreinterpret_s32_s16(int16x4_t __p0) {
33306 int32x2_t __ret;
33307 __ret = (int32x2_t)(__p0);
33308 return __ret;
33309 }
33310 #else
33311 __ai int32x2_t vreinterpret_s32_s16(int16x4_t __p0) {
33312 int32x2_t __ret;
33313 __ret = (int32x2_t)(__p0);
33314 return __ret;
33315 }
33316 #endif
33317
33318 #ifdef __LITTLE_ENDIAN__
33319 __ai int64x1_t vreinterpret_s64_p8(poly8x8_t __p0) {
33320 int64x1_t __ret;
33321 __ret = (int64x1_t)(__p0);
33322 return __ret;
33323 }
33324 #else
33325 __ai int64x1_t vreinterpret_s64_p8(poly8x8_t __p0) {
33326 int64x1_t __ret;
33327 __ret = (int64x1_t)(__p0);
33328 return __ret;
33329 }
33330 #endif
33331
33332 #ifdef __LITTLE_ENDIAN__
33333 __ai int64x1_t vreinterpret_s64_p16(poly16x4_t __p0) {
33334 int64x1_t __ret;
33335 __ret = (int64x1_t)(__p0);
33336 return __ret;
33337 }
33338 #else
33339 __ai int64x1_t vreinterpret_s64_p16(poly16x4_t __p0) {
33340 int64x1_t __ret;
33341 __ret = (int64x1_t)(__p0);
33342 return __ret;
33343 }
33344 #endif
33345
33346 #ifdef __LITTLE_ENDIAN__
33347 __ai int64x1_t vreinterpret_s64_u8(uint8x8_t __p0) {
33348 int64x1_t __ret;
33349 __ret = (int64x1_t)(__p0);
33350 return __ret;
33351 }
33352 #else
33353 __ai int64x1_t vreinterpret_s64_u8(uint8x8_t __p0) {
33354 int64x1_t __ret;
33355 __ret = (int64x1_t)(__p0);
33356 return __ret;
33357 }
33358 #endif
33359
33360 #ifdef __LITTLE_ENDIAN__
33361 __ai int64x1_t vreinterpret_s64_u32(uint32x2_t __p0) {
33362 int64x1_t __ret;
33363 __ret = (int64x1_t)(__p0);
33364 return __ret;
33365 }
33366 #else
33367 __ai int64x1_t vreinterpret_s64_u32(uint32x2_t __p0) {
33368 int64x1_t __ret;
33369 __ret = (int64x1_t)(__p0);
33370 return __ret;
33371 }
33372 #endif
33373
33374 #ifdef __LITTLE_ENDIAN__
33375 __ai int64x1_t vreinterpret_s64_u64(uint64x1_t __p0) {
33376 int64x1_t __ret;
33377 __ret = (int64x1_t)(__p0);
33378 return __ret;
33379 }
33380 #else
33381 __ai int64x1_t vreinterpret_s64_u64(uint64x1_t __p0) {
33382 int64x1_t __ret;
33383 __ret = (int64x1_t)(__p0);
33384 return __ret;
33385 }
33386 #endif
33387
33388 #ifdef __LITTLE_ENDIAN__
33389 __ai int64x1_t vreinterpret_s64_u16(uint16x4_t __p0) {
33390 int64x1_t __ret;
33391 __ret = (int64x1_t)(__p0);
33392 return __ret;
33393 }
33394 #else
33395 __ai int64x1_t vreinterpret_s64_u16(uint16x4_t __p0) {
33396 int64x1_t __ret;
33397 __ret = (int64x1_t)(__p0);
33398 return __ret;
33399 }
33400 #endif
33401
33402 #ifdef __LITTLE_ENDIAN__
33403 __ai int64x1_t vreinterpret_s64_s8(int8x8_t __p0) {
33404 int64x1_t __ret;
33405 __ret = (int64x1_t)(__p0);
33406 return __ret;
33407 }
33408 #else
33409 __ai int64x1_t vreinterpret_s64_s8(int8x8_t __p0) {
33410 int64x1_t __ret;
33411 __ret = (int64x1_t)(__p0);
33412 return __ret;
33413 }
33414 #endif
33415
33416 #ifdef __LITTLE_ENDIAN__
33417 __ai int64x1_t vreinterpret_s64_f32(float32x2_t __p0) {
33418 int64x1_t __ret;
33419 __ret = (int64x1_t)(__p0);
33420 return __ret;
33421 }
33422 #else
33423 __ai int64x1_t vreinterpret_s64_f32(float32x2_t __p0) {
33424 int64x1_t __ret;
33425 __ret = (int64x1_t)(__p0);
33426 return __ret;
33427 }
33428 #endif
33429
33430 #ifdef __LITTLE_ENDIAN__
33431 __ai int64x1_t vreinterpret_s64_f16(float16x4_t __p0) {
33432 int64x1_t __ret;
33433 __ret = (int64x1_t)(__p0);
33434 return __ret;
33435 }
33436 #else
33437 __ai int64x1_t vreinterpret_s64_f16(float16x4_t __p0) {
33438 int64x1_t __ret;
33439 __ret = (int64x1_t)(__p0);
33440 return __ret;
33441 }
33442 #endif
33443
33444 #ifdef __LITTLE_ENDIAN__
33445 __ai int64x1_t vreinterpret_s64_s32(int32x2_t __p0) {
33446 int64x1_t __ret;
33447 __ret = (int64x1_t)(__p0);
33448 return __ret;
33449 }
33450 #else
33451 __ai int64x1_t vreinterpret_s64_s32(int32x2_t __p0) {
33452 int64x1_t __ret;
33453 __ret = (int64x1_t)(__p0);
33454 return __ret;
33455 }
33456 #endif
33457
33458 #ifdef __LITTLE_ENDIAN__
33459 __ai int64x1_t vreinterpret_s64_s16(int16x4_t __p0) {
33460 int64x1_t __ret;
33461 __ret = (int64x1_t)(__p0);
33462 return __ret;
33463 }
33464 #else
33465 __ai int64x1_t vreinterpret_s64_s16(int16x4_t __p0) {
33466 int64x1_t __ret;
33467 __ret = (int64x1_t)(__p0);
33468 return __ret;
33469 }
33470 #endif
33471
33472 #ifdef __LITTLE_ENDIAN__
33473 __ai int16x4_t vreinterpret_s16_p8(poly8x8_t __p0) {
33474 int16x4_t __ret;
33475 __ret = (int16x4_t)(__p0);
33476 return __ret;
33477 }
33478 #else
33479 __ai int16x4_t vreinterpret_s16_p8(poly8x8_t __p0) {
33480 int16x4_t __ret;
33481 __ret = (int16x4_t)(__p0);
33482 return __ret;
33483 }
33484 #endif
33485
33486 #ifdef __LITTLE_ENDIAN__
33487 __ai int16x4_t vreinterpret_s16_p16(poly16x4_t __p0) {
33488 int16x4_t __ret;
33489 __ret = (int16x4_t)(__p0);
33490 return __ret;
33491 }
33492 #else
33493 __ai int16x4_t vreinterpret_s16_p16(poly16x4_t __p0) {
33494 int16x4_t __ret;
33495 __ret = (int16x4_t)(__p0);
33496 return __ret;
33497 }
33498 #endif
33499
33500 #ifdef __LITTLE_ENDIAN__
33501 __ai int16x4_t vreinterpret_s16_u8(uint8x8_t __p0) {
33502 int16x4_t __ret;
33503 __ret = (int16x4_t)(__p0);
33504 return __ret;
33505 }
33506 #else
33507 __ai int16x4_t vreinterpret_s16_u8(uint8x8_t __p0) {
33508 int16x4_t __ret;
33509 __ret = (int16x4_t)(__p0);
33510 return __ret;
33511 }
33512 #endif
33513
33514 #ifdef __LITTLE_ENDIAN__
33515 __ai int16x4_t vreinterpret_s16_u32(uint32x2_t __p0) {
33516 int16x4_t __ret;
33517 __ret = (int16x4_t)(__p0);
33518 return __ret;
33519 }
33520 #else
33521 __ai int16x4_t vreinterpret_s16_u32(uint32x2_t __p0) {
33522 int16x4_t __ret;
33523 __ret = (int16x4_t)(__p0);
33524 return __ret;
33525 }
33526 #endif
33527
33528 #ifdef __LITTLE_ENDIAN__
33529 __ai int16x4_t vreinterpret_s16_u64(uint64x1_t __p0) {
33530 int16x4_t __ret;
33531 __ret = (int16x4_t)(__p0);
33532 return __ret;
33533 }
33534 #else
33535 __ai int16x4_t vreinterpret_s16_u64(uint64x1_t __p0) {
33536 int16x4_t __ret;
33537 __ret = (int16x4_t)(__p0);
33538 return __ret;
33539 }
33540 #endif
33541
33542 #ifdef __LITTLE_ENDIAN__
33543 __ai int16x4_t vreinterpret_s16_u16(uint16x4_t __p0) {
33544 int16x4_t __ret;
33545 __ret = (int16x4_t)(__p0);
33546 return __ret;
33547 }
33548 #else
33549 __ai int16x4_t vreinterpret_s16_u16(uint16x4_t __p0) {
33550 int16x4_t __ret;
33551 __ret = (int16x4_t)(__p0);
33552 return __ret;
33553 }
33554 #endif
33555
33556 #ifdef __LITTLE_ENDIAN__
33557 __ai int16x4_t vreinterpret_s16_s8(int8x8_t __p0) {
33558 int16x4_t __ret;
33559 __ret = (int16x4_t)(__p0);
33560 return __ret;
33561 }
33562 #else
33563 __ai int16x4_t vreinterpret_s16_s8(int8x8_t __p0) {
33564 int16x4_t __ret;
33565 __ret = (int16x4_t)(__p0);
33566 return __ret;
33567 }
33568 #endif
33569
33570 #ifdef __LITTLE_ENDIAN__
33571 __ai int16x4_t vreinterpret_s16_f32(float32x2_t __p0) {
33572 int16x4_t __ret;
33573 __ret = (int16x4_t)(__p0);
33574 return __ret;
33575 }
33576 #else
33577 __ai int16x4_t vreinterpret_s16_f32(float32x2_t __p0) {
33578 int16x4_t __ret;
33579 __ret = (int16x4_t)(__p0);
33580 return __ret;
33581 }
33582 #endif
33583
33584 #ifdef __LITTLE_ENDIAN__
33585 __ai int16x4_t vreinterpret_s16_f16(float16x4_t __p0) {
33586 int16x4_t __ret;
33587 __ret = (int16x4_t)(__p0);
33588 return __ret;
33589 }
33590 #else
33591 __ai int16x4_t vreinterpret_s16_f16(float16x4_t __p0) {
33592 int16x4_t __ret;
33593 __ret = (int16x4_t)(__p0);
33594 return __ret;
33595 }
33596 #endif
33597
33598 #ifdef __LITTLE_ENDIAN__
33599 __ai int16x4_t vreinterpret_s16_s32(int32x2_t __p0) {
33600 int16x4_t __ret;
33601 __ret = (int16x4_t)(__p0);
33602 return __ret;
33603 }
33604 #else
33605 __ai int16x4_t vreinterpret_s16_s32(int32x2_t __p0) {
33606 int16x4_t __ret;
33607 __ret = (int16x4_t)(__p0);
33608 return __ret;
33609 }
33610 #endif
33611
33612 #ifdef __LITTLE_ENDIAN__
33613 __ai int16x4_t vreinterpret_s16_s64(int64x1_t __p0) {
33614 int16x4_t __ret;
33615 __ret = (int16x4_t)(__p0);
33616 return __ret;
33617 }
33618 #else
33619 __ai int16x4_t vreinterpret_s16_s64(int64x1_t __p0) {
33620 int16x4_t __ret;
33621 __ret = (int16x4_t)(__p0);
33622 return __ret;
33623 }
33624 #endif
33625
33626 #endif
33627 #if (__ARM_FP & 2)
33628 #ifdef __LITTLE_ENDIAN__
33629 __ai float16x4_t vcvt_f16_f32(float32x4_t __p0) {
33630 float16x4_t __ret;
33631 __ret = (float16x4_t) __builtin_neon_vcvt_f16_f32((int8x16_t)__p0, 8);
33632 return __ret;
33633 }
33634 #else
33635 __ai float16x4_t vcvt_f16_f32(float32x4_t __p0) {
33636 float32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
33637 float16x4_t __ret;
33638 __ret = (float16x4_t) __builtin_neon_vcvt_f16_f32((int8x16_t)__rev0, 8);
33639 __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
33640 return __ret;
33641 }
33642 __ai float16x4_t __noswap_vcvt_f16_f32(float32x4_t __p0) {
33643 float16x4_t __ret;
33644 __ret = (float16x4_t) __builtin_neon_vcvt_f16_f32((int8x16_t)__p0, 8);
33645 return __ret;
33646 }
33647 #endif
33648
33649 #ifdef __LITTLE_ENDIAN__
33650 __ai float32x4_t vcvt_f32_f16(float16x4_t __p0) {
33651 float32x4_t __ret;
33652 __ret = (float32x4_t) __builtin_neon_vcvt_f32_f16((int8x8_t)__p0, 41);
33653 return __ret;
33654 }
33655 #else
33656 __ai float32x4_t vcvt_f32_f16(float16x4_t __p0) {
33657 float16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
33658 float32x4_t __ret;
33659 __ret = (float32x4_t) __builtin_neon_vcvt_f32_f16((int8x8_t)__rev0, 41);
33660 __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
33661 return __ret;
33662 }
33663 __ai float32x4_t __noswap_vcvt_f32_f16(float16x4_t __p0) {
33664 float32x4_t __ret;
33665 __ret = (float32x4_t) __builtin_neon_vcvt_f32_f16((int8x8_t)__p0, 41);
33666 return __ret;
33667 }
33668 #endif
33669
33670 #endif
33671 #if __ARM_ARCH >= 8
33672 #ifdef __LITTLE_ENDIAN__
33673 __ai int32x4_t vcvtaq_s32_f32(float32x4_t __p0) {
33674 int32x4_t __ret;
33675 __ret = (int32x4_t) __builtin_neon_vcvtaq_s32_v((int8x16_t)__p0, 34);
33676 return __ret;
33677 }
33678 #else
33679 __ai int32x4_t vcvtaq_s32_f32(float32x4_t __p0) {
33680 float32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
33681 int32x4_t __ret;
33682 __ret = (int32x4_t) __builtin_neon_vcvtaq_s32_v((int8x16_t)__rev0, 34);
33683 __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
33684 return __ret;
33685 }
33686 #endif
33687
33688 #ifdef __LITTLE_ENDIAN__
33689 __ai int32x2_t vcvta_s32_f32(float32x2_t __p0) {
33690 int32x2_t __ret;
33691 __ret = (int32x2_t) __builtin_neon_vcvta_s32_v((int8x8_t)__p0, 2);
33692 return __ret;
33693 }
33694 #else
33695 __ai int32x2_t vcvta_s32_f32(float32x2_t __p0) {
33696 float32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
33697 int32x2_t __ret;
33698 __ret = (int32x2_t) __builtin_neon_vcvta_s32_v((int8x8_t)__rev0, 2);
33699 __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
33700 return __ret;
33701 }
33702 #endif
33703
33704 #ifdef __LITTLE_ENDIAN__
33705 __ai uint32x4_t vcvtaq_u32_f32(float32x4_t __p0) {
33706 uint32x4_t __ret;
33707 __ret = (uint32x4_t) __builtin_neon_vcvtaq_u32_v((int8x16_t)__p0, 50);
33708 return __ret;
33709 }
33710 #else
33711 __ai uint32x4_t vcvtaq_u32_f32(float32x4_t __p0) {
33712 float32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
33713 uint32x4_t __ret;
33714 __ret = (uint32x4_t) __builtin_neon_vcvtaq_u32_v((int8x16_t)__rev0, 50);
33715 __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
33716 return __ret;
33717 }
33718 #endif
33719
33720 #ifdef __LITTLE_ENDIAN__
33721 __ai uint32x2_t vcvta_u32_f32(float32x2_t __p0) {
33722 uint32x2_t __ret;
33723 __ret = (uint32x2_t) __builtin_neon_vcvta_u32_v((int8x8_t)__p0, 18);
33724 return __ret;
33725 }
33726 #else
33727 __ai uint32x2_t vcvta_u32_f32(float32x2_t __p0) {
33728 float32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
33729 uint32x2_t __ret;
33730 __ret = (uint32x2_t) __builtin_neon_vcvta_u32_v((int8x8_t)__rev0, 18);
33731 __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
33732 return __ret;
33733 }
33734 #endif
33735
33736 #ifdef __LITTLE_ENDIAN__
33737 __ai int32x4_t vcvtmq_s32_f32(float32x4_t __p0) {
33738 int32x4_t __ret;
33739 __ret = (int32x4_t) __builtin_neon_vcvtmq_s32_v((int8x16_t)__p0, 34);
33740 return __ret;
33741 }
33742 #else
33743 __ai int32x4_t vcvtmq_s32_f32(float32x4_t __p0) {
33744 float32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
33745 int32x4_t __ret;
33746 __ret = (int32x4_t) __builtin_neon_vcvtmq_s32_v((int8x16_t)__rev0, 34);
33747 __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
33748 return __ret;
33749 }
33750 #endif
33751
33752 #ifdef __LITTLE_ENDIAN__
33753 __ai int32x2_t vcvtm_s32_f32(float32x2_t __p0) {
33754 int32x2_t __ret;
33755 __ret = (int32x2_t) __builtin_neon_vcvtm_s32_v((int8x8_t)__p0, 2);
33756 return __ret;
33757 }
33758 #else
33759 __ai int32x2_t vcvtm_s32_f32(float32x2_t __p0) {
33760 float32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
33761 int32x2_t __ret;
33762 __ret = (int32x2_t) __builtin_neon_vcvtm_s32_v((int8x8_t)__rev0, 2);
33763 __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
33764 return __ret;
33765 }
33766 #endif
33767
33768 #ifdef __LITTLE_ENDIAN__
33769 __ai uint32x4_t vcvtmq_u32_f32(float32x4_t __p0) {
33770 uint32x4_t __ret;
33771 __ret = (uint32x4_t) __builtin_neon_vcvtmq_u32_v((int8x16_t)__p0, 50);
33772 return __ret;
33773 }
33774 #else
33775 __ai uint32x4_t vcvtmq_u32_f32(float32x4_t __p0) {
33776 float32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
33777 uint32x4_t __ret;
33778 __ret = (uint32x4_t) __builtin_neon_vcvtmq_u32_v((int8x16_t)__rev0, 50);
33779 __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
33780 return __ret;
33781 }
33782 #endif
33783
33784 #ifdef __LITTLE_ENDIAN__
33785 __ai uint32x2_t vcvtm_u32_f32(float32x2_t __p0) {
33786 uint32x2_t __ret;
33787 __ret = (uint32x2_t) __builtin_neon_vcvtm_u32_v((int8x8_t)__p0, 18);
33788 return __ret;
33789 }
33790 #else
33791 __ai uint32x2_t vcvtm_u32_f32(float32x2_t __p0) {
33792 float32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
33793 uint32x2_t __ret;
33794 __ret = (uint32x2_t) __builtin_neon_vcvtm_u32_v((int8x8_t)__rev0, 18);
33795 __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
33796 return __ret;
33797 }
33798 #endif
33799
33800 #ifdef __LITTLE_ENDIAN__
33801 __ai int32x4_t vcvtnq_s32_f32(float32x4_t __p0) {
33802 int32x4_t __ret;
33803 __ret = (int32x4_t) __builtin_neon_vcvtnq_s32_v((int8x16_t)__p0, 34);
33804 return __ret;
33805 }
33806 #else
33807 __ai int32x4_t vcvtnq_s32_f32(float32x4_t __p0) {
33808 float32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
33809 int32x4_t __ret;
33810 __ret = (int32x4_t) __builtin_neon_vcvtnq_s32_v((int8x16_t)__rev0, 34);
33811 __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
33812 return __ret;
33813 }
33814 #endif
33815
33816 #ifdef __LITTLE_ENDIAN__
33817 __ai int32x2_t vcvtn_s32_f32(float32x2_t __p0) {
33818 int32x2_t __ret;
33819 __ret = (int32x2_t) __builtin_neon_vcvtn_s32_v((int8x8_t)__p0, 2);
33820 return __ret;
33821 }
33822 #else
33823 __ai int32x2_t vcvtn_s32_f32(float32x2_t __p0) {
33824 float32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
33825 int32x2_t __ret;
33826 __ret = (int32x2_t) __builtin_neon_vcvtn_s32_v((int8x8_t)__rev0, 2);
33827 __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
33828 return __ret;
33829 }
33830 #endif
33831
33832 #ifdef __LITTLE_ENDIAN__
33833 __ai uint32x4_t vcvtnq_u32_f32(float32x4_t __p0) {
33834 uint32x4_t __ret;
33835 __ret = (uint32x4_t) __builtin_neon_vcvtnq_u32_v((int8x16_t)__p0, 50);
33836 return __ret;
33837 }
33838 #else
33839 __ai uint32x4_t vcvtnq_u32_f32(float32x4_t __p0) {
33840 float32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
33841 uint32x4_t __ret;
33842 __ret = (uint32x4_t) __builtin_neon_vcvtnq_u32_v((int8x16_t)__rev0, 50);
33843 __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
33844 return __ret;
33845 }
33846 #endif
33847
33848 #ifdef __LITTLE_ENDIAN__
33849 __ai uint32x2_t vcvtn_u32_f32(float32x2_t __p0) {
33850 uint32x2_t __ret;
33851 __ret = (uint32x2_t) __builtin_neon_vcvtn_u32_v((int8x8_t)__p0, 18);
33852 return __ret;
33853 }
33854 #else
33855 __ai uint32x2_t vcvtn_u32_f32(float32x2_t __p0) {
33856 float32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
33857 uint32x2_t __ret;
33858 __ret = (uint32x2_t) __builtin_neon_vcvtn_u32_v((int8x8_t)__rev0, 18);
33859 __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
33860 return __ret;
33861 }
33862 #endif
33863
33864 #ifdef __LITTLE_ENDIAN__
33865 __ai int32x4_t vcvtpq_s32_f32(float32x4_t __p0) {
33866 int32x4_t __ret;
33867 __ret = (int32x4_t) __builtin_neon_vcvtpq_s32_v((int8x16_t)__p0, 34);
33868 return __ret;
33869 }
33870 #else
33871 __ai int32x4_t vcvtpq_s32_f32(float32x4_t __p0) {
33872 float32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
33873 int32x4_t __ret;
33874 __ret = (int32x4_t) __builtin_neon_vcvtpq_s32_v((int8x16_t)__rev0, 34);
33875 __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
33876 return __ret;
33877 }
33878 #endif
33879
33880 #ifdef __LITTLE_ENDIAN__
33881 __ai int32x2_t vcvtp_s32_f32(float32x2_t __p0) {
33882 int32x2_t __ret;
33883 __ret = (int32x2_t) __builtin_neon_vcvtp_s32_v((int8x8_t)__p0, 2);
33884 return __ret;
33885 }
33886 #else
33887 __ai int32x2_t vcvtp_s32_f32(float32x2_t __p0) {
33888 float32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
33889 int32x2_t __ret;
33890 __ret = (int32x2_t) __builtin_neon_vcvtp_s32_v((int8x8_t)__rev0, 2);
33891 __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
33892 return __ret;
33893 }
33894 #endif
33895
33896 #ifdef __LITTLE_ENDIAN__
33897 __ai uint32x4_t vcvtpq_u32_f32(float32x4_t __p0) {
33898 uint32x4_t __ret;
33899 __ret = (uint32x4_t) __builtin_neon_vcvtpq_u32_v((int8x16_t)__p0, 50);
33900 return __ret;
33901 }
33902 #else
33903 __ai uint32x4_t vcvtpq_u32_f32(float32x4_t __p0) {
33904 float32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
33905 uint32x4_t __ret;
33906 __ret = (uint32x4_t) __builtin_neon_vcvtpq_u32_v((int8x16_t)__rev0, 50);
33907 __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
33908 return __ret;
33909 }
33910 #endif
33911
33912 #ifdef __LITTLE_ENDIAN__
33913 __ai uint32x2_t vcvtp_u32_f32(float32x2_t __p0) {
33914 uint32x2_t __ret;
33915 __ret = (uint32x2_t) __builtin_neon_vcvtp_u32_v((int8x8_t)__p0, 18);
33916 return __ret;
33917 }
33918 #else
33919 __ai uint32x2_t vcvtp_u32_f32(float32x2_t __p0) {
33920 float32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
33921 uint32x2_t __ret;
33922 __ret = (uint32x2_t) __builtin_neon_vcvtp_u32_v((int8x8_t)__rev0, 18);
33923 __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
33924 return __ret;
33925 }
33926 #endif
33927
33928 #endif
33929 #if __ARM_ARCH >= 8 && defined(__ARM_FEATURE_DIRECTED_ROUNDING)
33930 #ifdef __LITTLE_ENDIAN__
33931 __ai float32x4_t vrndq_f32(float32x4_t __p0) {
33932 float32x4_t __ret;
33933 __ret = (float32x4_t) __builtin_neon_vrndq_v((int8x16_t)__p0, 41);
33934 return __ret;
33935 }
33936 #else
33937 __ai float32x4_t vrndq_f32(float32x4_t __p0) {
33938 float32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
33939 float32x4_t __ret;
33940 __ret = (float32x4_t) __builtin_neon_vrndq_v((int8x16_t)__rev0, 41);
33941 __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
33942 return __ret;
33943 }
33944 #endif
33945
33946 #ifdef __LITTLE_ENDIAN__
33947 __ai float32x2_t vrnd_f32(float32x2_t __p0) {
33948 float32x2_t __ret;
33949 __ret = (float32x2_t) __builtin_neon_vrnd_v((int8x8_t)__p0, 9);
33950 return __ret;
33951 }
33952 #else
33953 __ai float32x2_t vrnd_f32(float32x2_t __p0) {
33954 float32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
33955 float32x2_t __ret;
33956 __ret = (float32x2_t) __builtin_neon_vrnd_v((int8x8_t)__rev0, 9);
33957 __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
33958 return __ret;
33959 }
33960 #endif
33961
33962 #ifdef __LITTLE_ENDIAN__
33963 __ai float32x4_t vrndaq_f32(float32x4_t __p0) {
33964 float32x4_t __ret;
33965 __ret = (float32x4_t) __builtin_neon_vrndaq_v((int8x16_t)__p0, 41);
33966 return __ret;
33967 }
33968 #else
33969 __ai float32x4_t vrndaq_f32(float32x4_t __p0) {
33970 float32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
33971 float32x4_t __ret;
33972 __ret = (float32x4_t) __builtin_neon_vrndaq_v((int8x16_t)__rev0, 41);
33973 __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
33974 return __ret;
33975 }
33976 #endif
33977
33978 #ifdef __LITTLE_ENDIAN__
33979 __ai float32x2_t vrnda_f32(float32x2_t __p0) {
33980 float32x2_t __ret;
33981 __ret = (float32x2_t) __builtin_neon_vrnda_v((int8x8_t)__p0, 9);
33982 return __ret;
33983 }
33984 #else
33985 __ai float32x2_t vrnda_f32(float32x2_t __p0) {
33986 float32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
33987 float32x2_t __ret;
33988 __ret = (float32x2_t) __builtin_neon_vrnda_v((int8x8_t)__rev0, 9);
33989 __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
33990 return __ret;
33991 }
33992 #endif
33993
33994 #ifdef __LITTLE_ENDIAN__
33995 __ai float32x4_t vrndmq_f32(float32x4_t __p0) {
33996 float32x4_t __ret;
33997 __ret = (float32x4_t) __builtin_neon_vrndmq_v((int8x16_t)__p0, 41);
33998 return __ret;
33999 }
34000 #else
34001 __ai float32x4_t vrndmq_f32(float32x4_t __p0) {
34002 float32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
34003 float32x4_t __ret;
34004 __ret = (float32x4_t) __builtin_neon_vrndmq_v((int8x16_t)__rev0, 41);
34005 __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
34006 return __ret;
34007 }
34008 #endif
34009
34010 #ifdef __LITTLE_ENDIAN__
34011 __ai float32x2_t vrndm_f32(float32x2_t __p0) {
34012 float32x2_t __ret;
34013 __ret = (float32x2_t) __builtin_neon_vrndm_v((int8x8_t)__p0, 9);
34014 return __ret;
34015 }
34016 #else
34017 __ai float32x2_t vrndm_f32(float32x2_t __p0) {
34018 float32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
34019 float32x2_t __ret;
34020 __ret = (float32x2_t) __builtin_neon_vrndm_v((int8x8_t)__rev0, 9);
34021 __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
34022 return __ret;
34023 }
34024 #endif
34025
34026 #ifdef __LITTLE_ENDIAN__
34027 __ai float32x4_t vrndnq_f32(float32x4_t __p0) {
34028 float32x4_t __ret;
34029 __ret = (float32x4_t) __builtin_neon_vrndnq_v((int8x16_t)__p0, 41);
34030 return __ret;
34031 }
34032 #else
34033 __ai float32x4_t vrndnq_f32(float32x4_t __p0) {
34034 float32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
34035 float32x4_t __ret;
34036 __ret = (float32x4_t) __builtin_neon_vrndnq_v((int8x16_t)__rev0, 41);
34037 __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
34038 return __ret;
34039 }
34040 #endif
34041
34042 #ifdef __LITTLE_ENDIAN__
34043 __ai float32x2_t vrndn_f32(float32x2_t __p0) {
34044 float32x2_t __ret;
34045 __ret = (float32x2_t) __builtin_neon_vrndn_v((int8x8_t)__p0, 9);
34046 return __ret;
34047 }
34048 #else
34049 __ai float32x2_t vrndn_f32(float32x2_t __p0) {
34050 float32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
34051 float32x2_t __ret;
34052 __ret = (float32x2_t) __builtin_neon_vrndn_v((int8x8_t)__rev0, 9);
34053 __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
34054 return __ret;
34055 }
34056 #endif
34057
34058 #ifdef __LITTLE_ENDIAN__
34059 __ai float32x4_t vrndpq_f32(float32x4_t __p0) {
34060 float32x4_t __ret;
34061 __ret = (float32x4_t) __builtin_neon_vrndpq_v((int8x16_t)__p0, 41);
34062 return __ret;
34063 }
34064 #else
34065 __ai float32x4_t vrndpq_f32(float32x4_t __p0) {
34066 float32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
34067 float32x4_t __ret;
34068 __ret = (float32x4_t) __builtin_neon_vrndpq_v((int8x16_t)__rev0, 41);
34069 __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
34070 return __ret;
34071 }
34072 #endif
34073
34074 #ifdef __LITTLE_ENDIAN__
34075 __ai float32x2_t vrndp_f32(float32x2_t __p0) {
34076 float32x2_t __ret;
34077 __ret = (float32x2_t) __builtin_neon_vrndp_v((int8x8_t)__p0, 9);
34078 return __ret;
34079 }
34080 #else
34081 __ai float32x2_t vrndp_f32(float32x2_t __p0) {
34082 float32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
34083 float32x2_t __ret;
34084 __ret = (float32x2_t) __builtin_neon_vrndp_v((int8x8_t)__rev0, 9);
34085 __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
34086 return __ret;
34087 }
34088 #endif
34089
34090 #ifdef __LITTLE_ENDIAN__
34091 __ai float32x4_t vrndxq_f32(float32x4_t __p0) {
34092 float32x4_t __ret;
34093 __ret = (float32x4_t) __builtin_neon_vrndxq_v((int8x16_t)__p0, 41);
34094 return __ret;
34095 }
34096 #else
34097 __ai float32x4_t vrndxq_f32(float32x4_t __p0) {
34098 float32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
34099 float32x4_t __ret;
34100 __ret = (float32x4_t) __builtin_neon_vrndxq_v((int8x16_t)__rev0, 41);
34101 __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
34102 return __ret;
34103 }
34104 #endif
34105
34106 #ifdef __LITTLE_ENDIAN__
34107 __ai float32x2_t vrndx_f32(float32x2_t __p0) {
34108 float32x2_t __ret;
34109 __ret = (float32x2_t) __builtin_neon_vrndx_v((int8x8_t)__p0, 9);
34110 return __ret;
34111 }
34112 #else
34113 __ai float32x2_t vrndx_f32(float32x2_t __p0) {
34114 float32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
34115 float32x2_t __ret;
34116 __ret = (float32x2_t) __builtin_neon_vrndx_v((int8x8_t)__rev0, 9);
34117 __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
34118 return __ret;
34119 }
34120 #endif
34121
34122 #endif
34123 #if __ARM_ARCH >= 8 && defined(__ARM_FEATURE_NUMERIC_MAXMIN)
34124 #ifdef __LITTLE_ENDIAN__
34125 __ai float32x4_t vmaxnmq_f32(float32x4_t __p0, float32x4_t __p1) {
34126 float32x4_t __ret;
34127 __ret = (float32x4_t) __builtin_neon_vmaxnmq_v((int8x16_t)__p0, (int8x16_t)__p1, 41);
34128 return __ret;
34129 }
34130 #else
34131 __ai float32x4_t vmaxnmq_f32(float32x4_t __p0, float32x4_t __p1) {
34132 float32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
34133 float32x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
34134 float32x4_t __ret;
34135 __ret = (float32x4_t) __builtin_neon_vmaxnmq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 41);
34136 __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
34137 return __ret;
34138 }
34139 #endif
34140
34141 #ifdef __LITTLE_ENDIAN__
34142 __ai float32x2_t vmaxnm_f32(float32x2_t __p0, float32x2_t __p1) {
34143 float32x2_t __ret;
34144 __ret = (float32x2_t) __builtin_neon_vmaxnm_v((int8x8_t)__p0, (int8x8_t)__p1, 9);
34145 return __ret;
34146 }
34147 #else
34148 __ai float32x2_t vmaxnm_f32(float32x2_t __p0, float32x2_t __p1) {
34149 float32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
34150 float32x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
34151 float32x2_t __ret;
34152 __ret = (float32x2_t) __builtin_neon_vmaxnm_v((int8x8_t)__rev0, (int8x8_t)__rev1, 9);
34153 __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
34154 return __ret;
34155 }
34156 #endif
34157
34158 #ifdef __LITTLE_ENDIAN__
34159 __ai float32x4_t vminnmq_f32(float32x4_t __p0, float32x4_t __p1) {
34160 float32x4_t __ret;
34161 __ret = (float32x4_t) __builtin_neon_vminnmq_v((int8x16_t)__p0, (int8x16_t)__p1, 41);
34162 return __ret;
34163 }
34164 #else
34165 __ai float32x4_t vminnmq_f32(float32x4_t __p0, float32x4_t __p1) {
34166 float32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
34167 float32x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
34168 float32x4_t __ret;
34169 __ret = (float32x4_t) __builtin_neon_vminnmq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 41);
34170 __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
34171 return __ret;
34172 }
34173 #endif
34174
34175 #ifdef __LITTLE_ENDIAN__
34176 __ai float32x2_t vminnm_f32(float32x2_t __p0, float32x2_t __p1) {
34177 float32x2_t __ret;
34178 __ret = (float32x2_t) __builtin_neon_vminnm_v((int8x8_t)__p0, (int8x8_t)__p1, 9);
34179 return __ret;
34180 }
34181 #else
34182 __ai float32x2_t vminnm_f32(float32x2_t __p0, float32x2_t __p1) {
34183 float32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
34184 float32x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
34185 float32x2_t __ret;
34186 __ret = (float32x2_t) __builtin_neon_vminnm_v((int8x8_t)__rev0, (int8x8_t)__rev1, 9);
34187 __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
34188 return __ret;
34189 }
34190 #endif
34191
34192 #endif
34193 #if __ARM_ARCH >= 8 && defined(__aarch64__)
34194 #ifdef __LITTLE_ENDIAN__
34195 __ai int64x2_t vcvtaq_s64_f64(float64x2_t __p0) {
34196 int64x2_t __ret;
34197 __ret = (int64x2_t) __builtin_neon_vcvtaq_s64_v((int8x16_t)__p0, 35);
34198 return __ret;
34199 }
34200 #else
34201 __ai int64x2_t vcvtaq_s64_f64(float64x2_t __p0) {
34202 float64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
34203 int64x2_t __ret;
34204 __ret = (int64x2_t) __builtin_neon_vcvtaq_s64_v((int8x16_t)__rev0, 35);
34205 __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
34206 return __ret;
34207 }
34208 #endif
34209
34210 #ifdef __LITTLE_ENDIAN__
34211 __ai int64x1_t vcvta_s64_f64(float64x1_t __p0) {
34212 int64x1_t __ret;
34213 __ret = (int64x1_t) __builtin_neon_vcvta_s64_v((int8x8_t)__p0, 3);
34214 return __ret;
34215 }
34216 #else
34217 __ai int64x1_t vcvta_s64_f64(float64x1_t __p0) {
34218 int64x1_t __ret;
34219 __ret = (int64x1_t) __builtin_neon_vcvta_s64_v((int8x8_t)__p0, 3);
34220 return __ret;
34221 }
34222 #endif
34223
34224 #ifdef __LITTLE_ENDIAN__
34225 __ai uint64x2_t vcvtaq_u64_f64(float64x2_t __p0) {
34226 uint64x2_t __ret;
34227 __ret = (uint64x2_t) __builtin_neon_vcvtaq_u64_v((int8x16_t)__p0, 51);
34228 return __ret;
34229 }
34230 #else
34231 __ai uint64x2_t vcvtaq_u64_f64(float64x2_t __p0) {
34232 float64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
34233 uint64x2_t __ret;
34234 __ret = (uint64x2_t) __builtin_neon_vcvtaq_u64_v((int8x16_t)__rev0, 51);
34235 __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
34236 return __ret;
34237 }
34238 #endif
34239
34240 #ifdef __LITTLE_ENDIAN__
34241 __ai uint64x1_t vcvta_u64_f64(float64x1_t __p0) {
34242 uint64x1_t __ret;
34243 __ret = (uint64x1_t) __builtin_neon_vcvta_u64_v((int8x8_t)__p0, 19);
34244 return __ret;
34245 }
34246 #else
34247 __ai uint64x1_t vcvta_u64_f64(float64x1_t __p0) {
34248 uint64x1_t __ret;
34249 __ret = (uint64x1_t) __builtin_neon_vcvta_u64_v((int8x8_t)__p0, 19);
34250 return __ret;
34251 }
34252 #endif
34253
34254 #ifdef __LITTLE_ENDIAN__
34255 __ai int64x2_t vcvtmq_s64_f64(float64x2_t __p0) {
34256 int64x2_t __ret;
34257 __ret = (int64x2_t) __builtin_neon_vcvtmq_s64_v((int8x16_t)__p0, 35);
34258 return __ret;
34259 }
34260 #else
34261 __ai int64x2_t vcvtmq_s64_f64(float64x2_t __p0) {
34262 float64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
34263 int64x2_t __ret;
34264 __ret = (int64x2_t) __builtin_neon_vcvtmq_s64_v((int8x16_t)__rev0, 35);
34265 __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
34266 return __ret;
34267 }
34268 #endif
34269
34270 #ifdef __LITTLE_ENDIAN__
34271 __ai int64x1_t vcvtm_s64_f64(float64x1_t __p0) {
34272 int64x1_t __ret;
34273 __ret = (int64x1_t) __builtin_neon_vcvtm_s64_v((int8x8_t)__p0, 3);
34274 return __ret;
34275 }
34276 #else
34277 __ai int64x1_t vcvtm_s64_f64(float64x1_t __p0) {
34278 int64x1_t __ret;
34279 __ret = (int64x1_t) __builtin_neon_vcvtm_s64_v((int8x8_t)__p0, 3);
34280 return __ret;
34281 }
34282 #endif
34283
34284 #ifdef __LITTLE_ENDIAN__
34285 __ai uint64x2_t vcvtmq_u64_f64(float64x2_t __p0) {
34286 uint64x2_t __ret;
34287 __ret = (uint64x2_t) __builtin_neon_vcvtmq_u64_v((int8x16_t)__p0, 51);
34288 return __ret;
34289 }
34290 #else
34291 __ai uint64x2_t vcvtmq_u64_f64(float64x2_t __p0) {
34292 float64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
34293 uint64x2_t __ret;
34294 __ret = (uint64x2_t) __builtin_neon_vcvtmq_u64_v((int8x16_t)__rev0, 51);
34295 __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
34296 return __ret;
34297 }
34298 #endif
34299
34300 #ifdef __LITTLE_ENDIAN__
34301 __ai uint64x1_t vcvtm_u64_f64(float64x1_t __p0) {
34302 uint64x1_t __ret;
34303 __ret = (uint64x1_t) __builtin_neon_vcvtm_u64_v((int8x8_t)__p0, 19);
34304 return __ret;
34305 }
34306 #else
34307 __ai uint64x1_t vcvtm_u64_f64(float64x1_t __p0) {
34308 uint64x1_t __ret;
34309 __ret = (uint64x1_t) __builtin_neon_vcvtm_u64_v((int8x8_t)__p0, 19);
34310 return __ret;
34311 }
34312 #endif
34313
34314 #ifdef __LITTLE_ENDIAN__
34315 __ai int64x2_t vcvtnq_s64_f64(float64x2_t __p0) {
34316 int64x2_t __ret;
34317 __ret = (int64x2_t) __builtin_neon_vcvtnq_s64_v((int8x16_t)__p0, 35);
34318 return __ret;
34319 }
34320 #else
34321 __ai int64x2_t vcvtnq_s64_f64(float64x2_t __p0) {
34322 float64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
34323 int64x2_t __ret;
34324 __ret = (int64x2_t) __builtin_neon_vcvtnq_s64_v((int8x16_t)__rev0, 35);
34325 __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
34326 return __ret;
34327 }
34328 #endif
34329
34330 #ifdef __LITTLE_ENDIAN__
34331 __ai int64x1_t vcvtn_s64_f64(float64x1_t __p0) {
34332 int64x1_t __ret;
34333 __ret = (int64x1_t) __builtin_neon_vcvtn_s64_v((int8x8_t)__p0, 3);
34334 return __ret;
34335 }
34336 #else
34337 __ai int64x1_t vcvtn_s64_f64(float64x1_t __p0) {
34338 int64x1_t __ret;
34339 __ret = (int64x1_t) __builtin_neon_vcvtn_s64_v((int8x8_t)__p0, 3);
34340 return __ret;
34341 }
34342 #endif
34343
34344 #ifdef __LITTLE_ENDIAN__
34345 __ai uint64x2_t vcvtnq_u64_f64(float64x2_t __p0) {
34346 uint64x2_t __ret;
34347 __ret = (uint64x2_t) __builtin_neon_vcvtnq_u64_v((int8x16_t)__p0, 51);
34348 return __ret;
34349 }
34350 #else
34351 __ai uint64x2_t vcvtnq_u64_f64(float64x2_t __p0) {
34352 float64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
34353 uint64x2_t __ret;
34354 __ret = (uint64x2_t) __builtin_neon_vcvtnq_u64_v((int8x16_t)__rev0, 51);
34355 __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
34356 return __ret;
34357 }
34358 #endif
34359
34360 #ifdef __LITTLE_ENDIAN__
34361 __ai uint64x1_t vcvtn_u64_f64(float64x1_t __p0) {
34362 uint64x1_t __ret;
34363 __ret = (uint64x1_t) __builtin_neon_vcvtn_u64_v((int8x8_t)__p0, 19);
34364 return __ret;
34365 }
34366 #else
34367 __ai uint64x1_t vcvtn_u64_f64(float64x1_t __p0) {
34368 uint64x1_t __ret;
34369 __ret = (uint64x1_t) __builtin_neon_vcvtn_u64_v((int8x8_t)__p0, 19);
34370 return __ret;
34371 }
34372 #endif
34373
34374 #ifdef __LITTLE_ENDIAN__
34375 __ai int64x2_t vcvtpq_s64_f64(float64x2_t __p0) {
34376 int64x2_t __ret;
34377 __ret = (int64x2_t) __builtin_neon_vcvtpq_s64_v((int8x16_t)__p0, 35);
34378 return __ret;
34379 }
34380 #else
34381 __ai int64x2_t vcvtpq_s64_f64(float64x2_t __p0) {
34382 float64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
34383 int64x2_t __ret;
34384 __ret = (int64x2_t) __builtin_neon_vcvtpq_s64_v((int8x16_t)__rev0, 35);
34385 __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
34386 return __ret;
34387 }
34388 #endif
34389
34390 #ifdef __LITTLE_ENDIAN__
34391 __ai int64x1_t vcvtp_s64_f64(float64x1_t __p0) {
34392 int64x1_t __ret;
34393 __ret = (int64x1_t) __builtin_neon_vcvtp_s64_v((int8x8_t)__p0, 3);
34394 return __ret;
34395 }
34396 #else
34397 __ai int64x1_t vcvtp_s64_f64(float64x1_t __p0) {
34398 int64x1_t __ret;
34399 __ret = (int64x1_t) __builtin_neon_vcvtp_s64_v((int8x8_t)__p0, 3);
34400 return __ret;
34401 }
34402 #endif
34403
34404 #ifdef __LITTLE_ENDIAN__
34405 __ai uint64x2_t vcvtpq_u64_f64(float64x2_t __p0) {
34406 uint64x2_t __ret;
34407 __ret = (uint64x2_t) __builtin_neon_vcvtpq_u64_v((int8x16_t)__p0, 51);
34408 return __ret;
34409 }
34410 #else
34411 __ai uint64x2_t vcvtpq_u64_f64(float64x2_t __p0) {
34412 float64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
34413 uint64x2_t __ret;
34414 __ret = (uint64x2_t) __builtin_neon_vcvtpq_u64_v((int8x16_t)__rev0, 51);
34415 __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
34416 return __ret;
34417 }
34418 #endif
34419
34420 #ifdef __LITTLE_ENDIAN__
34421 __ai uint64x1_t vcvtp_u64_f64(float64x1_t __p0) {
34422 uint64x1_t __ret;
34423 __ret = (uint64x1_t) __builtin_neon_vcvtp_u64_v((int8x8_t)__p0, 19);
34424 return __ret;
34425 }
34426 #else
34427 __ai uint64x1_t vcvtp_u64_f64(float64x1_t __p0) {
34428 uint64x1_t __ret;
34429 __ret = (uint64x1_t) __builtin_neon_vcvtp_u64_v((int8x8_t)__p0, 19);
34430 return __ret;
34431 }
34432 #endif
34433
34434 #ifdef __LITTLE_ENDIAN__
34435 __ai poly8x8_t vreinterpret_p8_p64(poly64x1_t __p0) {
34436 poly8x8_t __ret;
34437 __ret = (poly8x8_t)(__p0);
34438 return __ret;
34439 }
34440 #else
34441 __ai poly8x8_t vreinterpret_p8_p64(poly64x1_t __p0) {
34442 poly8x8_t __ret;
34443 __ret = (poly8x8_t)(__p0);
34444 return __ret;
34445 }
34446 #endif
34447
34448 #ifdef __LITTLE_ENDIAN__
34449 __ai poly8x8_t vreinterpret_p8_p16(poly16x4_t __p0) {
34450 poly8x8_t __ret;
34451 __ret = (poly8x8_t)(__p0);
34452 return __ret;
34453 }
34454 #else
34455 __ai poly8x8_t vreinterpret_p8_p16(poly16x4_t __p0) {
34456 poly8x8_t __ret;
34457 __ret = (poly8x8_t)(__p0);
34458 return __ret;
34459 }
34460 #endif
34461
34462 #ifdef __LITTLE_ENDIAN__
34463 __ai poly8x8_t vreinterpret_p8_u8(uint8x8_t __p0) {
34464 poly8x8_t __ret;
34465 __ret = (poly8x8_t)(__p0);
34466 return __ret;
34467 }
34468 #else
34469 __ai poly8x8_t vreinterpret_p8_u8(uint8x8_t __p0) {
34470 poly8x8_t __ret;
34471 __ret = (poly8x8_t)(__p0);
34472 return __ret;
34473 }
34474 #endif
34475
34476 #ifdef __LITTLE_ENDIAN__
34477 __ai poly8x8_t vreinterpret_p8_u32(uint32x2_t __p0) {
34478 poly8x8_t __ret;
34479 __ret = (poly8x8_t)(__p0);
34480 return __ret;
34481 }
34482 #else
34483 __ai poly8x8_t vreinterpret_p8_u32(uint32x2_t __p0) {
34484 poly8x8_t __ret;
34485 __ret = (poly8x8_t)(__p0);
34486 return __ret;
34487 }
34488 #endif
34489
34490 #ifdef __LITTLE_ENDIAN__
34491 __ai poly8x8_t vreinterpret_p8_u64(uint64x1_t __p0) {
34492 poly8x8_t __ret;
34493 __ret = (poly8x8_t)(__p0);
34494 return __ret;
34495 }
34496 #else
34497 __ai poly8x8_t vreinterpret_p8_u64(uint64x1_t __p0) {
34498 poly8x8_t __ret;
34499 __ret = (poly8x8_t)(__p0);
34500 return __ret;
34501 }
34502 #endif
34503
34504 #ifdef __LITTLE_ENDIAN__
34505 __ai poly8x8_t vreinterpret_p8_u16(uint16x4_t __p0) {
34506 poly8x8_t __ret;
34507 __ret = (poly8x8_t)(__p0);
34508 return __ret;
34509 }
34510 #else
34511 __ai poly8x8_t vreinterpret_p8_u16(uint16x4_t __p0) {
34512 poly8x8_t __ret;
34513 __ret = (poly8x8_t)(__p0);
34514 return __ret;
34515 }
34516 #endif
34517
34518 #ifdef __LITTLE_ENDIAN__
34519 __ai poly8x8_t vreinterpret_p8_s8(int8x8_t __p0) {
34520 poly8x8_t __ret;
34521 __ret = (poly8x8_t)(__p0);
34522 return __ret;
34523 }
34524 #else
34525 __ai poly8x8_t vreinterpret_p8_s8(int8x8_t __p0) {
34526 poly8x8_t __ret;
34527 __ret = (poly8x8_t)(__p0);
34528 return __ret;
34529 }
34530 #endif
34531
34532 #ifdef __LITTLE_ENDIAN__
34533 __ai poly8x8_t vreinterpret_p8_f64(float64x1_t __p0) {
34534 poly8x8_t __ret;
34535 __ret = (poly8x8_t)(__p0);
34536 return __ret;
34537 }
34538 #else
34539 __ai poly8x8_t vreinterpret_p8_f64(float64x1_t __p0) {
34540 poly8x8_t __ret;
34541 __ret = (poly8x8_t)(__p0);
34542 return __ret;
34543 }
34544 #endif
34545
34546 #ifdef __LITTLE_ENDIAN__
34547 __ai poly8x8_t vreinterpret_p8_f32(float32x2_t __p0) {
34548 poly8x8_t __ret;
34549 __ret = (poly8x8_t)(__p0);
34550 return __ret;
34551 }
34552 #else
34553 __ai poly8x8_t vreinterpret_p8_f32(float32x2_t __p0) {
34554 poly8x8_t __ret;
34555 __ret = (poly8x8_t)(__p0);
34556 return __ret;
34557 }
34558 #endif
34559
34560 #ifdef __LITTLE_ENDIAN__
34561 __ai poly8x8_t vreinterpret_p8_f16(float16x4_t __p0) {
34562 poly8x8_t __ret;
34563 __ret = (poly8x8_t)(__p0);
34564 return __ret;
34565 }
34566 #else
34567 __ai poly8x8_t vreinterpret_p8_f16(float16x4_t __p0) {
34568 poly8x8_t __ret;
34569 __ret = (poly8x8_t)(__p0);
34570 return __ret;
34571 }
34572 #endif
34573
34574 #ifdef __LITTLE_ENDIAN__
34575 __ai poly8x8_t vreinterpret_p8_s32(int32x2_t __p0) {
34576 poly8x8_t __ret;
34577 __ret = (poly8x8_t)(__p0);
34578 return __ret;
34579 }
34580 #else
34581 __ai poly8x8_t vreinterpret_p8_s32(int32x2_t __p0) {
34582 poly8x8_t __ret;
34583 __ret = (poly8x8_t)(__p0);
34584 return __ret;
34585 }
34586 #endif
34587
34588 #ifdef __LITTLE_ENDIAN__
34589 __ai poly8x8_t vreinterpret_p8_s64(int64x1_t __p0) {
34590 poly8x8_t __ret;
34591 __ret = (poly8x8_t)(__p0);
34592 return __ret;
34593 }
34594 #else
34595 __ai poly8x8_t vreinterpret_p8_s64(int64x1_t __p0) {
34596 poly8x8_t __ret;
34597 __ret = (poly8x8_t)(__p0);
34598 return __ret;
34599 }
34600 #endif
34601
34602 #ifdef __LITTLE_ENDIAN__
34603 __ai poly8x8_t vreinterpret_p8_s16(int16x4_t __p0) {
34604 poly8x8_t __ret;
34605 __ret = (poly8x8_t)(__p0);
34606 return __ret;
34607 }
34608 #else
34609 __ai poly8x8_t vreinterpret_p8_s16(int16x4_t __p0) {
34610 poly8x8_t __ret;
34611 __ret = (poly8x8_t)(__p0);
34612 return __ret;
34613 }
34614 #endif
34615
34616 #ifdef __LITTLE_ENDIAN__
34617 __ai poly64x1_t vreinterpret_p64_p8(poly8x8_t __p0) {
34618 poly64x1_t __ret;
34619 __ret = (poly64x1_t)(__p0);
34620 return __ret;
34621 }
34622 #else
34623 __ai poly64x1_t vreinterpret_p64_p8(poly8x8_t __p0) {
34624 poly64x1_t __ret;
34625 __ret = (poly64x1_t)(__p0);
34626 return __ret;
34627 }
34628 #endif
34629
34630 #ifdef __LITTLE_ENDIAN__
34631 __ai poly64x1_t vreinterpret_p64_p16(poly16x4_t __p0) {
34632 poly64x1_t __ret;
34633 __ret = (poly64x1_t)(__p0);
34634 return __ret;
34635 }
34636 #else
34637 __ai poly64x1_t vreinterpret_p64_p16(poly16x4_t __p0) {
34638 poly64x1_t __ret;
34639 __ret = (poly64x1_t)(__p0);
34640 return __ret;
34641 }
34642 #endif
34643
34644 #ifdef __LITTLE_ENDIAN__
34645 __ai poly64x1_t vreinterpret_p64_u8(uint8x8_t __p0) {
34646 poly64x1_t __ret;
34647 __ret = (poly64x1_t)(__p0);
34648 return __ret;
34649 }
34650 #else
34651 __ai poly64x1_t vreinterpret_p64_u8(uint8x8_t __p0) {
34652 poly64x1_t __ret;
34653 __ret = (poly64x1_t)(__p0);
34654 return __ret;
34655 }
34656 #endif
34657
34658 #ifdef __LITTLE_ENDIAN__
34659 __ai poly64x1_t vreinterpret_p64_u32(uint32x2_t __p0) {
34660 poly64x1_t __ret;
34661 __ret = (poly64x1_t)(__p0);
34662 return __ret;
34663 }
34664 #else
34665 __ai poly64x1_t vreinterpret_p64_u32(uint32x2_t __p0) {
34666 poly64x1_t __ret;
34667 __ret = (poly64x1_t)(__p0);
34668 return __ret;
34669 }
34670 #endif
34671
34672 #ifdef __LITTLE_ENDIAN__
34673 __ai poly64x1_t vreinterpret_p64_u64(uint64x1_t __p0) {
34674 poly64x1_t __ret;
34675 __ret = (poly64x1_t)(__p0);
34676 return __ret;
34677 }
34678 #else
34679 __ai poly64x1_t vreinterpret_p64_u64(uint64x1_t __p0) {
34680 poly64x1_t __ret;
34681 __ret = (poly64x1_t)(__p0);
34682 return __ret;
34683 }
34684 #endif
34685
34686 #ifdef __LITTLE_ENDIAN__
34687 __ai poly64x1_t vreinterpret_p64_u16(uint16x4_t __p0) {
34688 poly64x1_t __ret;
34689 __ret = (poly64x1_t)(__p0);
34690 return __ret;
34691 }
34692 #else
34693 __ai poly64x1_t vreinterpret_p64_u16(uint16x4_t __p0) {
34694 poly64x1_t __ret;
34695 __ret = (poly64x1_t)(__p0);
34696 return __ret;
34697 }
34698 #endif
34699
34700 #ifdef __LITTLE_ENDIAN__
34701 __ai poly64x1_t vreinterpret_p64_s8(int8x8_t __p0) {
34702 poly64x1_t __ret;
34703 __ret = (poly64x1_t)(__p0);
34704 return __ret;
34705 }
34706 #else
34707 __ai poly64x1_t vreinterpret_p64_s8(int8x8_t __p0) {
34708 poly64x1_t __ret;
34709 __ret = (poly64x1_t)(__p0);
34710 return __ret;
34711 }
34712 #endif
34713
34714 #ifdef __LITTLE_ENDIAN__
34715 __ai poly64x1_t vreinterpret_p64_f64(float64x1_t __p0) {
34716 poly64x1_t __ret;
34717 __ret = (poly64x1_t)(__p0);
34718 return __ret;
34719 }
34720 #else
34721 __ai poly64x1_t vreinterpret_p64_f64(float64x1_t __p0) {
34722 poly64x1_t __ret;
34723 __ret = (poly64x1_t)(__p0);
34724 return __ret;
34725 }
34726 #endif
34727
34728 #ifdef __LITTLE_ENDIAN__
34729 __ai poly64x1_t vreinterpret_p64_f32(float32x2_t __p0) {
34730 poly64x1_t __ret;
34731 __ret = (poly64x1_t)(__p0);
34732 return __ret;
34733 }
34734 #else
34735 __ai poly64x1_t vreinterpret_p64_f32(float32x2_t __p0) {
34736 poly64x1_t __ret;
34737 __ret = (poly64x1_t)(__p0);
34738 return __ret;
34739 }
34740 #endif
34741
34742 #ifdef __LITTLE_ENDIAN__
34743 __ai poly64x1_t vreinterpret_p64_f16(float16x4_t __p0) {
34744 poly64x1_t __ret;
34745 __ret = (poly64x1_t)(__p0);
34746 return __ret;
34747 }
34748 #else
34749 __ai poly64x1_t vreinterpret_p64_f16(float16x4_t __p0) {
34750 poly64x1_t __ret;
34751 __ret = (poly64x1_t)(__p0);
34752 return __ret;
34753 }
34754 #endif
34755
34756 #ifdef __LITTLE_ENDIAN__
34757 __ai poly64x1_t vreinterpret_p64_s32(int32x2_t __p0) {
34758 poly64x1_t __ret;
34759 __ret = (poly64x1_t)(__p0);
34760 return __ret;
34761 }
34762 #else
34763 __ai poly64x1_t vreinterpret_p64_s32(int32x2_t __p0) {
34764 poly64x1_t __ret;
34765 __ret = (poly64x1_t)(__p0);
34766 return __ret;
34767 }
34768 #endif
34769
34770 #ifdef __LITTLE_ENDIAN__
34771 __ai poly64x1_t vreinterpret_p64_s64(int64x1_t __p0) {
34772 poly64x1_t __ret;
34773 __ret = (poly64x1_t)(__p0);
34774 return __ret;
34775 }
34776 #else
34777 __ai poly64x1_t vreinterpret_p64_s64(int64x1_t __p0) {
34778 poly64x1_t __ret;
34779 __ret = (poly64x1_t)(__p0);
34780 return __ret;
34781 }
34782 #endif
34783
34784 #ifdef __LITTLE_ENDIAN__
34785 __ai poly64x1_t vreinterpret_p64_s16(int16x4_t __p0) {
34786 poly64x1_t __ret;
34787 __ret = (poly64x1_t)(__p0);
34788 return __ret;
34789 }
34790 #else
34791 __ai poly64x1_t vreinterpret_p64_s16(int16x4_t __p0) {
34792 poly64x1_t __ret;
34793 __ret = (poly64x1_t)(__p0);
34794 return __ret;
34795 }
34796 #endif
34797
34798 #ifdef __LITTLE_ENDIAN__
34799 __ai poly16x4_t vreinterpret_p16_p8(poly8x8_t __p0) {
34800 poly16x4_t __ret;
34801 __ret = (poly16x4_t)(__p0);
34802 return __ret;
34803 }
34804 #else
34805 __ai poly16x4_t vreinterpret_p16_p8(poly8x8_t __p0) {
34806 poly16x4_t __ret;
34807 __ret = (poly16x4_t)(__p0);
34808 return __ret;
34809 }
34810 #endif
34811
34812 #ifdef __LITTLE_ENDIAN__
34813 __ai poly16x4_t vreinterpret_p16_p64(poly64x1_t __p0) {
34814 poly16x4_t __ret;
34815 __ret = (poly16x4_t)(__p0);
34816 return __ret;
34817 }
34818 #else
34819 __ai poly16x4_t vreinterpret_p16_p64(poly64x1_t __p0) {
34820 poly16x4_t __ret;
34821 __ret = (poly16x4_t)(__p0);
34822 return __ret;
34823 }
34824 #endif
34825
34826 #ifdef __LITTLE_ENDIAN__
34827 __ai poly16x4_t vreinterpret_p16_u8(uint8x8_t __p0) {
34828 poly16x4_t __ret;
34829 __ret = (poly16x4_t)(__p0);
34830 return __ret;
34831 }
34832 #else
34833 __ai poly16x4_t vreinterpret_p16_u8(uint8x8_t __p0) {
34834 poly16x4_t __ret;
34835 __ret = (poly16x4_t)(__p0);
34836 return __ret;
34837 }
34838 #endif
34839
34840 #ifdef __LITTLE_ENDIAN__
34841 __ai poly16x4_t vreinterpret_p16_u32(uint32x2_t __p0) {
34842 poly16x4_t __ret;
34843 __ret = (poly16x4_t)(__p0);
34844 return __ret;
34845 }
34846 #else
34847 __ai poly16x4_t vreinterpret_p16_u32(uint32x2_t __p0) {
34848 poly16x4_t __ret;
34849 __ret = (poly16x4_t)(__p0);
34850 return __ret;
34851 }
34852 #endif
34853
34854 #ifdef __LITTLE_ENDIAN__
34855 __ai poly16x4_t vreinterpret_p16_u64(uint64x1_t __p0) {
34856 poly16x4_t __ret;
34857 __ret = (poly16x4_t)(__p0);
34858 return __ret;
34859 }
34860 #else
34861 __ai poly16x4_t vreinterpret_p16_u64(uint64x1_t __p0) {
34862 poly16x4_t __ret;
34863 __ret = (poly16x4_t)(__p0);
34864 return __ret;
34865 }
34866 #endif
34867
34868 #ifdef __LITTLE_ENDIAN__
34869 __ai poly16x4_t vreinterpret_p16_u16(uint16x4_t __p0) {
34870 poly16x4_t __ret;
34871 __ret = (poly16x4_t)(__p0);
34872 return __ret;
34873 }
34874 #else
34875 __ai poly16x4_t vreinterpret_p16_u16(uint16x4_t __p0) {
34876 poly16x4_t __ret;
34877 __ret = (poly16x4_t)(__p0);
34878 return __ret;
34879 }
34880 #endif
34881
34882 #ifdef __LITTLE_ENDIAN__
34883 __ai poly16x4_t vreinterpret_p16_s8(int8x8_t __p0) {
34884 poly16x4_t __ret;
34885 __ret = (poly16x4_t)(__p0);
34886 return __ret;
34887 }
34888 #else
34889 __ai poly16x4_t vreinterpret_p16_s8(int8x8_t __p0) {
34890 poly16x4_t __ret;
34891 __ret = (poly16x4_t)(__p0);
34892 return __ret;
34893 }
34894 #endif
34895
34896 #ifdef __LITTLE_ENDIAN__
34897 __ai poly16x4_t vreinterpret_p16_f64(float64x1_t __p0) {
34898 poly16x4_t __ret;
34899 __ret = (poly16x4_t)(__p0);
34900 return __ret;
34901 }
34902 #else
34903 __ai poly16x4_t vreinterpret_p16_f64(float64x1_t __p0) {
34904 poly16x4_t __ret;
34905 __ret = (poly16x4_t)(__p0);
34906 return __ret;
34907 }
34908 #endif
34909
34910 #ifdef __LITTLE_ENDIAN__
34911 __ai poly16x4_t vreinterpret_p16_f32(float32x2_t __p0) {
34912 poly16x4_t __ret;
34913 __ret = (poly16x4_t)(__p0);
34914 return __ret;
34915 }
34916 #else
34917 __ai poly16x4_t vreinterpret_p16_f32(float32x2_t __p0) {
34918 poly16x4_t __ret;
34919 __ret = (poly16x4_t)(__p0);
34920 return __ret;
34921 }
34922 #endif
34923
34924 #ifdef __LITTLE_ENDIAN__
34925 __ai poly16x4_t vreinterpret_p16_f16(float16x4_t __p0) {
34926 poly16x4_t __ret;
34927 __ret = (poly16x4_t)(__p0);
34928 return __ret;
34929 }
34930 #else
34931 __ai poly16x4_t vreinterpret_p16_f16(float16x4_t __p0) {
34932 poly16x4_t __ret;
34933 __ret = (poly16x4_t)(__p0);
34934 return __ret;
34935 }
34936 #endif
34937
34938 #ifdef __LITTLE_ENDIAN__
34939 __ai poly16x4_t vreinterpret_p16_s32(int32x2_t __p0) {
34940 poly16x4_t __ret;
34941 __ret = (poly16x4_t)(__p0);
34942 return __ret;
34943 }
34944 #else
34945 __ai poly16x4_t vreinterpret_p16_s32(int32x2_t __p0) {
34946 poly16x4_t __ret;
34947 __ret = (poly16x4_t)(__p0);
34948 return __ret;
34949 }
34950 #endif
34951
34952 #ifdef __LITTLE_ENDIAN__
34953 __ai poly16x4_t vreinterpret_p16_s64(int64x1_t __p0) {
34954 poly16x4_t __ret;
34955 __ret = (poly16x4_t)(__p0);
34956 return __ret;
34957 }
34958 #else
34959 __ai poly16x4_t vreinterpret_p16_s64(int64x1_t __p0) {
34960 poly16x4_t __ret;
34961 __ret = (poly16x4_t)(__p0);
34962 return __ret;
34963 }
34964 #endif
34965
34966 #ifdef __LITTLE_ENDIAN__
34967 __ai poly16x4_t vreinterpret_p16_s16(int16x4_t __p0) {
34968 poly16x4_t __ret;
34969 __ret = (poly16x4_t)(__p0);
34970 return __ret;
34971 }
34972 #else
34973 __ai poly16x4_t vreinterpret_p16_s16(int16x4_t __p0) {
34974 poly16x4_t __ret;
34975 __ret = (poly16x4_t)(__p0);
34976 return __ret;
34977 }
34978 #endif
34979
34980 #ifdef __LITTLE_ENDIAN__
34981 __ai poly8x16_t vreinterpretq_p8_p128(poly128_t __p0) {
34982 poly8x16_t __ret;
34983 __ret = (poly8x16_t)(__p0);
34984 return __ret;
34985 }
34986 #else
34987 __ai poly8x16_t vreinterpretq_p8_p128(poly128_t __p0) {
34988 poly8x16_t __ret;
34989 __ret = (poly8x16_t)(__p0);
34990 return __ret;
34991 }
34992 #endif
34993
34994 #ifdef __LITTLE_ENDIAN__
34995 __ai poly8x16_t vreinterpretq_p8_p64(poly64x2_t __p0) {
34996 poly8x16_t __ret;
34997 __ret = (poly8x16_t)(__p0);
34998 return __ret;
34999 }
35000 #else
35001 __ai poly8x16_t vreinterpretq_p8_p64(poly64x2_t __p0) {
35002 poly8x16_t __ret;
35003 __ret = (poly8x16_t)(__p0);
35004 return __ret;
35005 }
35006 #endif
35007
35008 #ifdef __LITTLE_ENDIAN__
35009 __ai poly8x16_t vreinterpretq_p8_p16(poly16x8_t __p0) {
35010 poly8x16_t __ret;
35011 __ret = (poly8x16_t)(__p0);
35012 return __ret;
35013 }
35014 #else
35015 __ai poly8x16_t vreinterpretq_p8_p16(poly16x8_t __p0) {
35016 poly8x16_t __ret;
35017 __ret = (poly8x16_t)(__p0);
35018 return __ret;
35019 }
35020 #endif
35021
35022 #ifdef __LITTLE_ENDIAN__
35023 __ai poly8x16_t vreinterpretq_p8_u8(uint8x16_t __p0) {
35024 poly8x16_t __ret;
35025 __ret = (poly8x16_t)(__p0);
35026 return __ret;
35027 }
35028 #else
35029 __ai poly8x16_t vreinterpretq_p8_u8(uint8x16_t __p0) {
35030 poly8x16_t __ret;
35031 __ret = (poly8x16_t)(__p0);
35032 return __ret;
35033 }
35034 #endif
35035
35036 #ifdef __LITTLE_ENDIAN__
35037 __ai poly8x16_t vreinterpretq_p8_u32(uint32x4_t __p0) {
35038 poly8x16_t __ret;
35039 __ret = (poly8x16_t)(__p0);
35040 return __ret;
35041 }
35042 #else
35043 __ai poly8x16_t vreinterpretq_p8_u32(uint32x4_t __p0) {
35044 poly8x16_t __ret;
35045 __ret = (poly8x16_t)(__p0);
35046 return __ret;
35047 }
35048 #endif
35049
35050 #ifdef __LITTLE_ENDIAN__
35051 __ai poly8x16_t vreinterpretq_p8_u64(uint64x2_t __p0) {
35052 poly8x16_t __ret;
35053 __ret = (poly8x16_t)(__p0);
35054 return __ret;
35055 }
35056 #else
35057 __ai poly8x16_t vreinterpretq_p8_u64(uint64x2_t __p0) {
35058 poly8x16_t __ret;
35059 __ret = (poly8x16_t)(__p0);
35060 return __ret;
35061 }
35062 #endif
35063
35064 #ifdef __LITTLE_ENDIAN__
35065 __ai poly8x16_t vreinterpretq_p8_u16(uint16x8_t __p0) {
35066 poly8x16_t __ret;
35067 __ret = (poly8x16_t)(__p0);
35068 return __ret;
35069 }
35070 #else
35071 __ai poly8x16_t vreinterpretq_p8_u16(uint16x8_t __p0) {
35072 poly8x16_t __ret;
35073 __ret = (poly8x16_t)(__p0);
35074 return __ret;
35075 }
35076 #endif
35077
35078 #ifdef __LITTLE_ENDIAN__
35079 __ai poly8x16_t vreinterpretq_p8_s8(int8x16_t __p0) {
35080 poly8x16_t __ret;
35081 __ret = (poly8x16_t)(__p0);
35082 return __ret;
35083 }
35084 #else
35085 __ai poly8x16_t vreinterpretq_p8_s8(int8x16_t __p0) {
35086 poly8x16_t __ret;
35087 __ret = (poly8x16_t)(__p0);
35088 return __ret;
35089 }
35090 #endif
35091
35092 #ifdef __LITTLE_ENDIAN__
35093 __ai poly8x16_t vreinterpretq_p8_f64(float64x2_t __p0) {
35094 poly8x16_t __ret;
35095 __ret = (poly8x16_t)(__p0);
35096 return __ret;
35097 }
35098 #else
35099 __ai poly8x16_t vreinterpretq_p8_f64(float64x2_t __p0) {
35100 poly8x16_t __ret;
35101 __ret = (poly8x16_t)(__p0);
35102 return __ret;
35103 }
35104 #endif
35105
35106 #ifdef __LITTLE_ENDIAN__
35107 __ai poly8x16_t vreinterpretq_p8_f32(float32x4_t __p0) {
35108 poly8x16_t __ret;
35109 __ret = (poly8x16_t)(__p0);
35110 return __ret;
35111 }
35112 #else
35113 __ai poly8x16_t vreinterpretq_p8_f32(float32x4_t __p0) {
35114 poly8x16_t __ret;
35115 __ret = (poly8x16_t)(__p0);
35116 return __ret;
35117 }
35118 #endif
35119
35120 #ifdef __LITTLE_ENDIAN__
35121 __ai poly8x16_t vreinterpretq_p8_f16(float16x8_t __p0) {
35122 poly8x16_t __ret;
35123 __ret = (poly8x16_t)(__p0);
35124 return __ret;
35125 }
35126 #else
35127 __ai poly8x16_t vreinterpretq_p8_f16(float16x8_t __p0) {
35128 poly8x16_t __ret;
35129 __ret = (poly8x16_t)(__p0);
35130 return __ret;
35131 }
35132 #endif
35133
35134 #ifdef __LITTLE_ENDIAN__
35135 __ai poly8x16_t vreinterpretq_p8_s32(int32x4_t __p0) {
35136 poly8x16_t __ret;
35137 __ret = (poly8x16_t)(__p0);
35138 return __ret;
35139 }
35140 #else
35141 __ai poly8x16_t vreinterpretq_p8_s32(int32x4_t __p0) {
35142 poly8x16_t __ret;
35143 __ret = (poly8x16_t)(__p0);
35144 return __ret;
35145 }
35146 #endif
35147
35148 #ifdef __LITTLE_ENDIAN__
35149 __ai poly8x16_t vreinterpretq_p8_s64(int64x2_t __p0) {
35150 poly8x16_t __ret;
35151 __ret = (poly8x16_t)(__p0);
35152 return __ret;
35153 }
35154 #else
35155 __ai poly8x16_t vreinterpretq_p8_s64(int64x2_t __p0) {
35156 poly8x16_t __ret;
35157 __ret = (poly8x16_t)(__p0);
35158 return __ret;
35159 }
35160 #endif
35161
35162 #ifdef __LITTLE_ENDIAN__
35163 __ai poly8x16_t vreinterpretq_p8_s16(int16x8_t __p0) {
35164 poly8x16_t __ret;
35165 __ret = (poly8x16_t)(__p0);
35166 return __ret;
35167 }
35168 #else
35169 __ai poly8x16_t vreinterpretq_p8_s16(int16x8_t __p0) {
35170 poly8x16_t __ret;
35171 __ret = (poly8x16_t)(__p0);
35172 return __ret;
35173 }
35174 #endif
35175
35176 #ifdef __LITTLE_ENDIAN__
35177 __ai poly128_t vreinterpretq_p128_p8(poly8x16_t __p0) {
35178 poly128_t __ret;
35179 __ret = (poly128_t)(__p0);
35180 return __ret;
35181 }
35182 #else
35183 __ai poly128_t vreinterpretq_p128_p8(poly8x16_t __p0) {
35184 poly128_t __ret;
35185 __ret = (poly128_t)(__p0);
35186 return __ret;
35187 }
35188 #endif
35189
35190 #ifdef __LITTLE_ENDIAN__
35191 __ai poly128_t vreinterpretq_p128_p64(poly64x2_t __p0) {
35192 poly128_t __ret;
35193 __ret = (poly128_t)(__p0);
35194 return __ret;
35195 }
35196 #else
35197 __ai poly128_t vreinterpretq_p128_p64(poly64x2_t __p0) {
35198 poly128_t __ret;
35199 __ret = (poly128_t)(__p0);
35200 return __ret;
35201 }
35202 #endif
35203
35204 #ifdef __LITTLE_ENDIAN__
35205 __ai poly128_t vreinterpretq_p128_p16(poly16x8_t __p0) {
35206 poly128_t __ret;
35207 __ret = (poly128_t)(__p0);
35208 return __ret;
35209 }
35210 #else
35211 __ai poly128_t vreinterpretq_p128_p16(poly16x8_t __p0) {
35212 poly128_t __ret;
35213 __ret = (poly128_t)(__p0);
35214 return __ret;
35215 }
35216 #endif
35217
35218 #ifdef __LITTLE_ENDIAN__
35219 __ai poly128_t vreinterpretq_p128_u8(uint8x16_t __p0) {
35220 poly128_t __ret;
35221 __ret = (poly128_t)(__p0);
35222 return __ret;
35223 }
35224 #else
35225 __ai poly128_t vreinterpretq_p128_u8(uint8x16_t __p0) {
35226 poly128_t __ret;
35227 __ret = (poly128_t)(__p0);
35228 return __ret;
35229 }
35230 #endif
35231
35232 #ifdef __LITTLE_ENDIAN__
35233 __ai poly128_t vreinterpretq_p128_u32(uint32x4_t __p0) {
35234 poly128_t __ret;
35235 __ret = (poly128_t)(__p0);
35236 return __ret;
35237 }
35238 #else
35239 __ai poly128_t vreinterpretq_p128_u32(uint32x4_t __p0) {
35240 poly128_t __ret;
35241 __ret = (poly128_t)(__p0);
35242 return __ret;
35243 }
35244 #endif
35245
35246 #ifdef __LITTLE_ENDIAN__
35247 __ai poly128_t vreinterpretq_p128_u64(uint64x2_t __p0) {
35248 poly128_t __ret;
35249 __ret = (poly128_t)(__p0);
35250 return __ret;
35251 }
35252 #else
35253 __ai poly128_t vreinterpretq_p128_u64(uint64x2_t __p0) {
35254 poly128_t __ret;
35255 __ret = (poly128_t)(__p0);
35256 return __ret;
35257 }
35258 #endif
35259
35260 #ifdef __LITTLE_ENDIAN__
35261 __ai poly128_t vreinterpretq_p128_u16(uint16x8_t __p0) {
35262 poly128_t __ret;
35263 __ret = (poly128_t)(__p0);
35264 return __ret;
35265 }
35266 #else
35267 __ai poly128_t vreinterpretq_p128_u16(uint16x8_t __p0) {
35268 poly128_t __ret;
35269 __ret = (poly128_t)(__p0);
35270 return __ret;
35271 }
35272 #endif
35273
35274 #ifdef __LITTLE_ENDIAN__
35275 __ai poly128_t vreinterpretq_p128_s8(int8x16_t __p0) {
35276 poly128_t __ret;
35277 __ret = (poly128_t)(__p0);
35278 return __ret;
35279 }
35280 #else
35281 __ai poly128_t vreinterpretq_p128_s8(int8x16_t __p0) {
35282 poly128_t __ret;
35283 __ret = (poly128_t)(__p0);
35284 return __ret;
35285 }
35286 #endif
35287
35288 #ifdef __LITTLE_ENDIAN__
35289 __ai poly128_t vreinterpretq_p128_f64(float64x2_t __p0) {
35290 poly128_t __ret;
35291 __ret = (poly128_t)(__p0);
35292 return __ret;
35293 }
35294 #else
35295 __ai poly128_t vreinterpretq_p128_f64(float64x2_t __p0) {
35296 poly128_t __ret;
35297 __ret = (poly128_t)(__p0);
35298 return __ret;
35299 }
35300 #endif
35301
35302 #ifdef __LITTLE_ENDIAN__
35303 __ai poly128_t vreinterpretq_p128_f32(float32x4_t __p0) {
35304 poly128_t __ret;
35305 __ret = (poly128_t)(__p0);
35306 return __ret;
35307 }
35308 #else
35309 __ai poly128_t vreinterpretq_p128_f32(float32x4_t __p0) {
35310 poly128_t __ret;
35311 __ret = (poly128_t)(__p0);
35312 return __ret;
35313 }
35314 #endif
35315
35316 #ifdef __LITTLE_ENDIAN__
35317 __ai poly128_t vreinterpretq_p128_f16(float16x8_t __p0) {
35318 poly128_t __ret;
35319 __ret = (poly128_t)(__p0);
35320 return __ret;
35321 }
35322 #else
35323 __ai poly128_t vreinterpretq_p128_f16(float16x8_t __p0) {
35324 poly128_t __ret;
35325 __ret = (poly128_t)(__p0);
35326 return __ret;
35327 }
35328 #endif
35329
35330 #ifdef __LITTLE_ENDIAN__
35331 __ai poly128_t vreinterpretq_p128_s32(int32x4_t __p0) {
35332 poly128_t __ret;
35333 __ret = (poly128_t)(__p0);
35334 return __ret;
35335 }
35336 #else
35337 __ai poly128_t vreinterpretq_p128_s32(int32x4_t __p0) {
35338 poly128_t __ret;
35339 __ret = (poly128_t)(__p0);
35340 return __ret;
35341 }
35342 #endif
35343
35344 #ifdef __LITTLE_ENDIAN__
35345 __ai poly128_t vreinterpretq_p128_s64(int64x2_t __p0) {
35346 poly128_t __ret;
35347 __ret = (poly128_t)(__p0);
35348 return __ret;
35349 }
35350 #else
35351 __ai poly128_t vreinterpretq_p128_s64(int64x2_t __p0) {
35352 poly128_t __ret;
35353 __ret = (poly128_t)(__p0);
35354 return __ret;
35355 }
35356 #endif
35357
35358 #ifdef __LITTLE_ENDIAN__
35359 __ai poly128_t vreinterpretq_p128_s16(int16x8_t __p0) {
35360 poly128_t __ret;
35361 __ret = (poly128_t)(__p0);
35362 return __ret;
35363 }
35364 #else
35365 __ai poly128_t vreinterpretq_p128_s16(int16x8_t __p0) {
35366 poly128_t __ret;
35367 __ret = (poly128_t)(__p0);
35368 return __ret;
35369 }
35370 #endif
35371
35372 #ifdef __LITTLE_ENDIAN__
35373 __ai poly64x2_t vreinterpretq_p64_p8(poly8x16_t __p0) {
35374 poly64x2_t __ret;
35375 __ret = (poly64x2_t)(__p0);
35376 return __ret;
35377 }
35378 #else
35379 __ai poly64x2_t vreinterpretq_p64_p8(poly8x16_t __p0) {
35380 poly64x2_t __ret;
35381 __ret = (poly64x2_t)(__p0);
35382 return __ret;
35383 }
35384 #endif
35385
35386 #ifdef __LITTLE_ENDIAN__
35387 __ai poly64x2_t vreinterpretq_p64_p128(poly128_t __p0) {
35388 poly64x2_t __ret;
35389 __ret = (poly64x2_t)(__p0);
35390 return __ret;
35391 }
35392 #else
35393 __ai poly64x2_t vreinterpretq_p64_p128(poly128_t __p0) {
35394 poly64x2_t __ret;
35395 __ret = (poly64x2_t)(__p0);
35396 return __ret;
35397 }
35398 #endif
35399
35400 #ifdef __LITTLE_ENDIAN__
35401 __ai poly64x2_t vreinterpretq_p64_p16(poly16x8_t __p0) {
35402 poly64x2_t __ret;
35403 __ret = (poly64x2_t)(__p0);
35404 return __ret;
35405 }
35406 #else
35407 __ai poly64x2_t vreinterpretq_p64_p16(poly16x8_t __p0) {
35408 poly64x2_t __ret;
35409 __ret = (poly64x2_t)(__p0);
35410 return __ret;
35411 }
35412 #endif
35413
35414 #ifdef __LITTLE_ENDIAN__
35415 __ai poly64x2_t vreinterpretq_p64_u8(uint8x16_t __p0) {
35416 poly64x2_t __ret;
35417 __ret = (poly64x2_t)(__p0);
35418 return __ret;
35419 }
35420 #else
35421 __ai poly64x2_t vreinterpretq_p64_u8(uint8x16_t __p0) {
35422 poly64x2_t __ret;
35423 __ret = (poly64x2_t)(__p0);
35424 return __ret;
35425 }
35426 #endif
35427
35428 #ifdef __LITTLE_ENDIAN__
35429 __ai poly64x2_t vreinterpretq_p64_u32(uint32x4_t __p0) {
35430 poly64x2_t __ret;
35431 __ret = (poly64x2_t)(__p0);
35432 return __ret;
35433 }
35434 #else
35435 __ai poly64x2_t vreinterpretq_p64_u32(uint32x4_t __p0) {
35436 poly64x2_t __ret;
35437 __ret = (poly64x2_t)(__p0);
35438 return __ret;
35439 }
35440 #endif
35441
35442 #ifdef __LITTLE_ENDIAN__
35443 __ai poly64x2_t vreinterpretq_p64_u64(uint64x2_t __p0) {
35444 poly64x2_t __ret;
35445 __ret = (poly64x2_t)(__p0);
35446 return __ret;
35447 }
35448 #else
35449 __ai poly64x2_t vreinterpretq_p64_u64(uint64x2_t __p0) {
35450 poly64x2_t __ret;
35451 __ret = (poly64x2_t)(__p0);
35452 return __ret;
35453 }
35454 #endif
35455
35456 #ifdef __LITTLE_ENDIAN__
35457 __ai poly64x2_t vreinterpretq_p64_u16(uint16x8_t __p0) {
35458 poly64x2_t __ret;
35459 __ret = (poly64x2_t)(__p0);
35460 return __ret;
35461 }
35462 #else
35463 __ai poly64x2_t vreinterpretq_p64_u16(uint16x8_t __p0) {
35464 poly64x2_t __ret;
35465 __ret = (poly64x2_t)(__p0);
35466 return __ret;
35467 }
35468 #endif
35469
35470 #ifdef __LITTLE_ENDIAN__
35471 __ai poly64x2_t vreinterpretq_p64_s8(int8x16_t __p0) {
35472 poly64x2_t __ret;
35473 __ret = (poly64x2_t)(__p0);
35474 return __ret;
35475 }
35476 #else
35477 __ai poly64x2_t vreinterpretq_p64_s8(int8x16_t __p0) {
35478 poly64x2_t __ret;
35479 __ret = (poly64x2_t)(__p0);
35480 return __ret;
35481 }
35482 #endif
35483
35484 #ifdef __LITTLE_ENDIAN__
35485 __ai poly64x2_t vreinterpretq_p64_f64(float64x2_t __p0) {
35486 poly64x2_t __ret;
35487 __ret = (poly64x2_t)(__p0);
35488 return __ret;
35489 }
35490 #else
35491 __ai poly64x2_t vreinterpretq_p64_f64(float64x2_t __p0) {
35492 poly64x2_t __ret;
35493 __ret = (poly64x2_t)(__p0);
35494 return __ret;
35495 }
35496 #endif
35497
35498 #ifdef __LITTLE_ENDIAN__
35499 __ai poly64x2_t vreinterpretq_p64_f32(float32x4_t __p0) {
35500 poly64x2_t __ret;
35501 __ret = (poly64x2_t)(__p0);
35502 return __ret;
35503 }
35504 #else
35505 __ai poly64x2_t vreinterpretq_p64_f32(float32x4_t __p0) {
35506 poly64x2_t __ret;
35507 __ret = (poly64x2_t)(__p0);
35508 return __ret;
35509 }
35510 #endif
35511
35512 #ifdef __LITTLE_ENDIAN__
35513 __ai poly64x2_t vreinterpretq_p64_f16(float16x8_t __p0) {
35514 poly64x2_t __ret;
35515 __ret = (poly64x2_t)(__p0);
35516 return __ret;
35517 }
35518 #else
35519 __ai poly64x2_t vreinterpretq_p64_f16(float16x8_t __p0) {
35520 poly64x2_t __ret;
35521 __ret = (poly64x2_t)(__p0);
35522 return __ret;
35523 }
35524 #endif
35525
35526 #ifdef __LITTLE_ENDIAN__
35527 __ai poly64x2_t vreinterpretq_p64_s32(int32x4_t __p0) {
35528 poly64x2_t __ret;
35529 __ret = (poly64x2_t)(__p0);
35530 return __ret;
35531 }
35532 #else
35533 __ai poly64x2_t vreinterpretq_p64_s32(int32x4_t __p0) {
35534 poly64x2_t __ret;
35535 __ret = (poly64x2_t)(__p0);
35536 return __ret;
35537 }
35538 #endif
35539
35540 #ifdef __LITTLE_ENDIAN__
35541 __ai poly64x2_t vreinterpretq_p64_s64(int64x2_t __p0) {
35542 poly64x2_t __ret;
35543 __ret = (poly64x2_t)(__p0);
35544 return __ret;
35545 }
35546 #else
35547 __ai poly64x2_t vreinterpretq_p64_s64(int64x2_t __p0) {
35548 poly64x2_t __ret;
35549 __ret = (poly64x2_t)(__p0);
35550 return __ret;
35551 }
35552 #endif
35553
35554 #ifdef __LITTLE_ENDIAN__
35555 __ai poly64x2_t vreinterpretq_p64_s16(int16x8_t __p0) {
35556 poly64x2_t __ret;
35557 __ret = (poly64x2_t)(__p0);
35558 return __ret;
35559 }
35560 #else
35561 __ai poly64x2_t vreinterpretq_p64_s16(int16x8_t __p0) {
35562 poly64x2_t __ret;
35563 __ret = (poly64x2_t)(__p0);
35564 return __ret;
35565 }
35566 #endif
35567
35568 #ifdef __LITTLE_ENDIAN__
35569 __ai poly16x8_t vreinterpretq_p16_p8(poly8x16_t __p0) {
35570 poly16x8_t __ret;
35571 __ret = (poly16x8_t)(__p0);
35572 return __ret;
35573 }
35574 #else
35575 __ai poly16x8_t vreinterpretq_p16_p8(poly8x16_t __p0) {
35576 poly16x8_t __ret;
35577 __ret = (poly16x8_t)(__p0);
35578 return __ret;
35579 }
35580 #endif
35581
35582 #ifdef __LITTLE_ENDIAN__
35583 __ai poly16x8_t vreinterpretq_p16_p128(poly128_t __p0) {
35584 poly16x8_t __ret;
35585 __ret = (poly16x8_t)(__p0);
35586 return __ret;
35587 }
35588 #else
35589 __ai poly16x8_t vreinterpretq_p16_p128(poly128_t __p0) {
35590 poly16x8_t __ret;
35591 __ret = (poly16x8_t)(__p0);
35592 return __ret;
35593 }
35594 #endif
35595
35596 #ifdef __LITTLE_ENDIAN__
35597 __ai poly16x8_t vreinterpretq_p16_p64(poly64x2_t __p0) {
35598 poly16x8_t __ret;
35599 __ret = (poly16x8_t)(__p0);
35600 return __ret;
35601 }
35602 #else
35603 __ai poly16x8_t vreinterpretq_p16_p64(poly64x2_t __p0) {
35604 poly16x8_t __ret;
35605 __ret = (poly16x8_t)(__p0);
35606 return __ret;
35607 }
35608 #endif
35609
35610 #ifdef __LITTLE_ENDIAN__
35611 __ai poly16x8_t vreinterpretq_p16_u8(uint8x16_t __p0) {
35612 poly16x8_t __ret;
35613 __ret = (poly16x8_t)(__p0);
35614 return __ret;
35615 }
35616 #else
35617 __ai poly16x8_t vreinterpretq_p16_u8(uint8x16_t __p0) {
35618 poly16x8_t __ret;
35619 __ret = (poly16x8_t)(__p0);
35620 return __ret;
35621 }
35622 #endif
35623
35624 #ifdef __LITTLE_ENDIAN__
35625 __ai poly16x8_t vreinterpretq_p16_u32(uint32x4_t __p0) {
35626 poly16x8_t __ret;
35627 __ret = (poly16x8_t)(__p0);
35628 return __ret;
35629 }
35630 #else
35631 __ai poly16x8_t vreinterpretq_p16_u32(uint32x4_t __p0) {
35632 poly16x8_t __ret;
35633 __ret = (poly16x8_t)(__p0);
35634 return __ret;
35635 }
35636 #endif
35637
35638 #ifdef __LITTLE_ENDIAN__
35639 __ai poly16x8_t vreinterpretq_p16_u64(uint64x2_t __p0) {
35640 poly16x8_t __ret;
35641 __ret = (poly16x8_t)(__p0);
35642 return __ret;
35643 }
35644 #else
35645 __ai poly16x8_t vreinterpretq_p16_u64(uint64x2_t __p0) {
35646 poly16x8_t __ret;
35647 __ret = (poly16x8_t)(__p0);
35648 return __ret;
35649 }
35650 #endif
35651
35652 #ifdef __LITTLE_ENDIAN__
35653 __ai poly16x8_t vreinterpretq_p16_u16(uint16x8_t __p0) {
35654 poly16x8_t __ret;
35655 __ret = (poly16x8_t)(__p0);
35656 return __ret;
35657 }
35658 #else
35659 __ai poly16x8_t vreinterpretq_p16_u16(uint16x8_t __p0) {
35660 poly16x8_t __ret;
35661 __ret = (poly16x8_t)(__p0);
35662 return __ret;
35663 }
35664 #endif
35665
35666 #ifdef __LITTLE_ENDIAN__
35667 __ai poly16x8_t vreinterpretq_p16_s8(int8x16_t __p0) {
35668 poly16x8_t __ret;
35669 __ret = (poly16x8_t)(__p0);
35670 return __ret;
35671 }
35672 #else
35673 __ai poly16x8_t vreinterpretq_p16_s8(int8x16_t __p0) {
35674 poly16x8_t __ret;
35675 __ret = (poly16x8_t)(__p0);
35676 return __ret;
35677 }
35678 #endif
35679
35680 #ifdef __LITTLE_ENDIAN__
35681 __ai poly16x8_t vreinterpretq_p16_f64(float64x2_t __p0) {
35682 poly16x8_t __ret;
35683 __ret = (poly16x8_t)(__p0);
35684 return __ret;
35685 }
35686 #else
35687 __ai poly16x8_t vreinterpretq_p16_f64(float64x2_t __p0) {
35688 poly16x8_t __ret;
35689 __ret = (poly16x8_t)(__p0);
35690 return __ret;
35691 }
35692 #endif
35693
35694 #ifdef __LITTLE_ENDIAN__
35695 __ai poly16x8_t vreinterpretq_p16_f32(float32x4_t __p0) {
35696 poly16x8_t __ret;
35697 __ret = (poly16x8_t)(__p0);
35698 return __ret;
35699 }
35700 #else
35701 __ai poly16x8_t vreinterpretq_p16_f32(float32x4_t __p0) {
35702 poly16x8_t __ret;
35703 __ret = (poly16x8_t)(__p0);
35704 return __ret;
35705 }
35706 #endif
35707
35708 #ifdef __LITTLE_ENDIAN__
35709 __ai poly16x8_t vreinterpretq_p16_f16(float16x8_t __p0) {
35710 poly16x8_t __ret;
35711 __ret = (poly16x8_t)(__p0);
35712 return __ret;
35713 }
35714 #else
35715 __ai poly16x8_t vreinterpretq_p16_f16(float16x8_t __p0) {
35716 poly16x8_t __ret;
35717 __ret = (poly16x8_t)(__p0);
35718 return __ret;
35719 }
35720 #endif
35721
35722 #ifdef __LITTLE_ENDIAN__
35723 __ai poly16x8_t vreinterpretq_p16_s32(int32x4_t __p0) {
35724 poly16x8_t __ret;
35725 __ret = (poly16x8_t)(__p0);
35726 return __ret;
35727 }
35728 #else
35729 __ai poly16x8_t vreinterpretq_p16_s32(int32x4_t __p0) {
35730 poly16x8_t __ret;
35731 __ret = (poly16x8_t)(__p0);
35732 return __ret;
35733 }
35734 #endif
35735
35736 #ifdef __LITTLE_ENDIAN__
35737 __ai poly16x8_t vreinterpretq_p16_s64(int64x2_t __p0) {
35738 poly16x8_t __ret;
35739 __ret = (poly16x8_t)(__p0);
35740 return __ret;
35741 }
35742 #else
35743 __ai poly16x8_t vreinterpretq_p16_s64(int64x2_t __p0) {
35744 poly16x8_t __ret;
35745 __ret = (poly16x8_t)(__p0);
35746 return __ret;
35747 }
35748 #endif
35749
35750 #ifdef __LITTLE_ENDIAN__
35751 __ai poly16x8_t vreinterpretq_p16_s16(int16x8_t __p0) {
35752 poly16x8_t __ret;
35753 __ret = (poly16x8_t)(__p0);
35754 return __ret;
35755 }
35756 #else
35757 __ai poly16x8_t vreinterpretq_p16_s16(int16x8_t __p0) {
35758 poly16x8_t __ret;
35759 __ret = (poly16x8_t)(__p0);
35760 return __ret;
35761 }
35762 #endif
35763
35764 #ifdef __LITTLE_ENDIAN__
35765 __ai uint8x16_t vreinterpretq_u8_p8(poly8x16_t __p0) {
35766 uint8x16_t __ret;
35767 __ret = (uint8x16_t)(__p0);
35768 return __ret;
35769 }
35770 #else
35771 __ai uint8x16_t vreinterpretq_u8_p8(poly8x16_t __p0) {
35772 uint8x16_t __ret;
35773 __ret = (uint8x16_t)(__p0);
35774 return __ret;
35775 }
35776 #endif
35777
35778 #ifdef __LITTLE_ENDIAN__
35779 __ai uint8x16_t vreinterpretq_u8_p128(poly128_t __p0) {
35780 uint8x16_t __ret;
35781 __ret = (uint8x16_t)(__p0);
35782 return __ret;
35783 }
35784 #else
35785 __ai uint8x16_t vreinterpretq_u8_p128(poly128_t __p0) {
35786 uint8x16_t __ret;
35787 __ret = (uint8x16_t)(__p0);
35788 return __ret;
35789 }
35790 #endif
35791
35792 #ifdef __LITTLE_ENDIAN__
35793 __ai uint8x16_t vreinterpretq_u8_p64(poly64x2_t __p0) {
35794 uint8x16_t __ret;
35795 __ret = (uint8x16_t)(__p0);
35796 return __ret;
35797 }
35798 #else
35799 __ai uint8x16_t vreinterpretq_u8_p64(poly64x2_t __p0) {
35800 uint8x16_t __ret;
35801 __ret = (uint8x16_t)(__p0);
35802 return __ret;
35803 }
35804 #endif
35805
35806 #ifdef __LITTLE_ENDIAN__
35807 __ai uint8x16_t vreinterpretq_u8_p16(poly16x8_t __p0) {
35808 uint8x16_t __ret;
35809 __ret = (uint8x16_t)(__p0);
35810 return __ret;
35811 }
35812 #else
35813 __ai uint8x16_t vreinterpretq_u8_p16(poly16x8_t __p0) {
35814 uint8x16_t __ret;
35815 __ret = (uint8x16_t)(__p0);
35816 return __ret;
35817 }
35818 #endif
35819
35820 #ifdef __LITTLE_ENDIAN__
35821 __ai uint8x16_t vreinterpretq_u8_u32(uint32x4_t __p0) {
35822 uint8x16_t __ret;
35823 __ret = (uint8x16_t)(__p0);
35824 return __ret;
35825 }
35826 #else
35827 __ai uint8x16_t vreinterpretq_u8_u32(uint32x4_t __p0) {
35828 uint8x16_t __ret;
35829 __ret = (uint8x16_t)(__p0);
35830 return __ret;
35831 }
35832 #endif
35833
35834 #ifdef __LITTLE_ENDIAN__
35835 __ai uint8x16_t vreinterpretq_u8_u64(uint64x2_t __p0) {
35836 uint8x16_t __ret;
35837 __ret = (uint8x16_t)(__p0);
35838 return __ret;
35839 }
35840 #else
35841 __ai uint8x16_t vreinterpretq_u8_u64(uint64x2_t __p0) {
35842 uint8x16_t __ret;
35843 __ret = (uint8x16_t)(__p0);
35844 return __ret;
35845 }
35846 #endif
35847
35848 #ifdef __LITTLE_ENDIAN__
35849 __ai uint8x16_t vreinterpretq_u8_u16(uint16x8_t __p0) {
35850 uint8x16_t __ret;
35851 __ret = (uint8x16_t)(__p0);
35852 return __ret;
35853 }
35854 #else
35855 __ai uint8x16_t vreinterpretq_u8_u16(uint16x8_t __p0) {
35856 uint8x16_t __ret;
35857 __ret = (uint8x16_t)(__p0);
35858 return __ret;
35859 }
35860 #endif
35861
35862 #ifdef __LITTLE_ENDIAN__
35863 __ai uint8x16_t vreinterpretq_u8_s8(int8x16_t __p0) {
35864 uint8x16_t __ret;
35865 __ret = (uint8x16_t)(__p0);
35866 return __ret;
35867 }
35868 #else
35869 __ai uint8x16_t vreinterpretq_u8_s8(int8x16_t __p0) {
35870 uint8x16_t __ret;
35871 __ret = (uint8x16_t)(__p0);
35872 return __ret;
35873 }
35874 #endif
35875
35876 #ifdef __LITTLE_ENDIAN__
35877 __ai uint8x16_t vreinterpretq_u8_f64(float64x2_t __p0) {
35878 uint8x16_t __ret;
35879 __ret = (uint8x16_t)(__p0);
35880 return __ret;
35881 }
35882 #else
35883 __ai uint8x16_t vreinterpretq_u8_f64(float64x2_t __p0) {
35884 uint8x16_t __ret;
35885 __ret = (uint8x16_t)(__p0);
35886 return __ret;
35887 }
35888 #endif
35889
35890 #ifdef __LITTLE_ENDIAN__
35891 __ai uint8x16_t vreinterpretq_u8_f32(float32x4_t __p0) {
35892 uint8x16_t __ret;
35893 __ret = (uint8x16_t)(__p0);
35894 return __ret;
35895 }
35896 #else
35897 __ai uint8x16_t vreinterpretq_u8_f32(float32x4_t __p0) {
35898 uint8x16_t __ret;
35899 __ret = (uint8x16_t)(__p0);
35900 return __ret;
35901 }
35902 #endif
35903
35904 #ifdef __LITTLE_ENDIAN__
35905 __ai uint8x16_t vreinterpretq_u8_f16(float16x8_t __p0) {
35906 uint8x16_t __ret;
35907 __ret = (uint8x16_t)(__p0);
35908 return __ret;
35909 }
35910 #else
35911 __ai uint8x16_t vreinterpretq_u8_f16(float16x8_t __p0) {
35912 uint8x16_t __ret;
35913 __ret = (uint8x16_t)(__p0);
35914 return __ret;
35915 }
35916 #endif
35917
35918 #ifdef __LITTLE_ENDIAN__
35919 __ai uint8x16_t vreinterpretq_u8_s32(int32x4_t __p0) {
35920 uint8x16_t __ret;
35921 __ret = (uint8x16_t)(__p0);
35922 return __ret;
35923 }
35924 #else
35925 __ai uint8x16_t vreinterpretq_u8_s32(int32x4_t __p0) {
35926 uint8x16_t __ret;
35927 __ret = (uint8x16_t)(__p0);
35928 return __ret;
35929 }
35930 #endif
35931
35932 #ifdef __LITTLE_ENDIAN__
35933 __ai uint8x16_t vreinterpretq_u8_s64(int64x2_t __p0) {
35934 uint8x16_t __ret;
35935 __ret = (uint8x16_t)(__p0);
35936 return __ret;
35937 }
35938 #else
35939 __ai uint8x16_t vreinterpretq_u8_s64(int64x2_t __p0) {
35940 uint8x16_t __ret;
35941 __ret = (uint8x16_t)(__p0);
35942 return __ret;
35943 }
35944 #endif
35945
35946 #ifdef __LITTLE_ENDIAN__
35947 __ai uint8x16_t vreinterpretq_u8_s16(int16x8_t __p0) {
35948 uint8x16_t __ret;
35949 __ret = (uint8x16_t)(__p0);
35950 return __ret;
35951 }
35952 #else
35953 __ai uint8x16_t vreinterpretq_u8_s16(int16x8_t __p0) {
35954 uint8x16_t __ret;
35955 __ret = (uint8x16_t)(__p0);
35956 return __ret;
35957 }
35958 #endif
35959
35960 #ifdef __LITTLE_ENDIAN__
35961 __ai uint32x4_t vreinterpretq_u32_p8(poly8x16_t __p0) {
35962 uint32x4_t __ret;
35963 __ret = (uint32x4_t)(__p0);
35964 return __ret;
35965 }
35966 #else
35967 __ai uint32x4_t vreinterpretq_u32_p8(poly8x16_t __p0) {
35968 uint32x4_t __ret;
35969 __ret = (uint32x4_t)(__p0);
35970 return __ret;
35971 }
35972 #endif
35973
35974 #ifdef __LITTLE_ENDIAN__
35975 __ai uint32x4_t vreinterpretq_u32_p128(poly128_t __p0) {
35976 uint32x4_t __ret;
35977 __ret = (uint32x4_t)(__p0);
35978 return __ret;
35979 }
35980 #else
35981 __ai uint32x4_t vreinterpretq_u32_p128(poly128_t __p0) {
35982 uint32x4_t __ret;
35983 __ret = (uint32x4_t)(__p0);
35984 return __ret;
35985 }
35986 #endif
35987
35988 #ifdef __LITTLE_ENDIAN__
35989 __ai uint32x4_t vreinterpretq_u32_p64(poly64x2_t __p0) {
35990 uint32x4_t __ret;
35991 __ret = (uint32x4_t)(__p0);
35992 return __ret;
35993 }
35994 #else
35995 __ai uint32x4_t vreinterpretq_u32_p64(poly64x2_t __p0) {
35996 uint32x4_t __ret;
35997 __ret = (uint32x4_t)(__p0);
35998 return __ret;
35999 }
36000 #endif
36001
36002 #ifdef __LITTLE_ENDIAN__
36003 __ai uint32x4_t vreinterpretq_u32_p16(poly16x8_t __p0) {
36004 uint32x4_t __ret;
36005 __ret = (uint32x4_t)(__p0);
36006 return __ret;
36007 }
36008 #else
36009 __ai uint32x4_t vreinterpretq_u32_p16(poly16x8_t __p0) {
36010 uint32x4_t __ret;
36011 __ret = (uint32x4_t)(__p0);
36012 return __ret;
36013 }
36014 #endif
36015
36016 #ifdef __LITTLE_ENDIAN__
36017 __ai uint32x4_t vreinterpretq_u32_u8(uint8x16_t __p0) {
36018 uint32x4_t __ret;
36019 __ret = (uint32x4_t)(__p0);
36020 return __ret;
36021 }
36022 #else
36023 __ai uint32x4_t vreinterpretq_u32_u8(uint8x16_t __p0) {
36024 uint32x4_t __ret;
36025 __ret = (uint32x4_t)(__p0);
36026 return __ret;
36027 }
36028 #endif
36029
36030 #ifdef __LITTLE_ENDIAN__
36031 __ai uint32x4_t vreinterpretq_u32_u64(uint64x2_t __p0) {
36032 uint32x4_t __ret;
36033 __ret = (uint32x4_t)(__p0);
36034 return __ret;
36035 }
36036 #else
36037 __ai uint32x4_t vreinterpretq_u32_u64(uint64x2_t __p0) {
36038 uint32x4_t __ret;
36039 __ret = (uint32x4_t)(__p0);
36040 return __ret;
36041 }
36042 #endif
36043
36044 #ifdef __LITTLE_ENDIAN__
36045 __ai uint32x4_t vreinterpretq_u32_u16(uint16x8_t __p0) {
36046 uint32x4_t __ret;
36047 __ret = (uint32x4_t)(__p0);
36048 return __ret;
36049 }
36050 #else
36051 __ai uint32x4_t vreinterpretq_u32_u16(uint16x8_t __p0) {
36052 uint32x4_t __ret;
36053 __ret = (uint32x4_t)(__p0);
36054 return __ret;
36055 }
36056 #endif
36057
36058 #ifdef __LITTLE_ENDIAN__
36059 __ai uint32x4_t vreinterpretq_u32_s8(int8x16_t __p0) {
36060 uint32x4_t __ret;
36061 __ret = (uint32x4_t)(__p0);
36062 return __ret;
36063 }
36064 #else
36065 __ai uint32x4_t vreinterpretq_u32_s8(int8x16_t __p0) {
36066 uint32x4_t __ret;
36067 __ret = (uint32x4_t)(__p0);
36068 return __ret;
36069 }
36070 #endif
36071
36072 #ifdef __LITTLE_ENDIAN__
36073 __ai uint32x4_t vreinterpretq_u32_f64(float64x2_t __p0) {
36074 uint32x4_t __ret;
36075 __ret = (uint32x4_t)(__p0);
36076 return __ret;
36077 }
36078 #else
36079 __ai uint32x4_t vreinterpretq_u32_f64(float64x2_t __p0) {
36080 uint32x4_t __ret;
36081 __ret = (uint32x4_t)(__p0);
36082 return __ret;
36083 }
36084 #endif
36085
36086 #ifdef __LITTLE_ENDIAN__
36087 __ai uint32x4_t vreinterpretq_u32_f32(float32x4_t __p0) {
36088 uint32x4_t __ret;
36089 __ret = (uint32x4_t)(__p0);
36090 return __ret;
36091 }
36092 #else
36093 __ai uint32x4_t vreinterpretq_u32_f32(float32x4_t __p0) {
36094 uint32x4_t __ret;
36095 __ret = (uint32x4_t)(__p0);
36096 return __ret;
36097 }
36098 #endif
36099
36100 #ifdef __LITTLE_ENDIAN__
36101 __ai uint32x4_t vreinterpretq_u32_f16(float16x8_t __p0) {
36102 uint32x4_t __ret;
36103 __ret = (uint32x4_t)(__p0);
36104 return __ret;
36105 }
36106 #else
36107 __ai uint32x4_t vreinterpretq_u32_f16(float16x8_t __p0) {
36108 uint32x4_t __ret;
36109 __ret = (uint32x4_t)(__p0);
36110 return __ret;
36111 }
36112 #endif
36113
36114 #ifdef __LITTLE_ENDIAN__
36115 __ai uint32x4_t vreinterpretq_u32_s32(int32x4_t __p0) {
36116 uint32x4_t __ret;
36117 __ret = (uint32x4_t)(__p0);
36118 return __ret;
36119 }
36120 #else
36121 __ai uint32x4_t vreinterpretq_u32_s32(int32x4_t __p0) {
36122 uint32x4_t __ret;
36123 __ret = (uint32x4_t)(__p0);
36124 return __ret;
36125 }
36126 #endif
36127
36128 #ifdef __LITTLE_ENDIAN__
36129 __ai uint32x4_t vreinterpretq_u32_s64(int64x2_t __p0) {
36130 uint32x4_t __ret;
36131 __ret = (uint32x4_t)(__p0);
36132 return __ret;
36133 }
36134 #else
36135 __ai uint32x4_t vreinterpretq_u32_s64(int64x2_t __p0) {
36136 uint32x4_t __ret;
36137 __ret = (uint32x4_t)(__p0);
36138 return __ret;
36139 }
36140 #endif
36141
36142 #ifdef __LITTLE_ENDIAN__
36143 __ai uint32x4_t vreinterpretq_u32_s16(int16x8_t __p0) {
36144 uint32x4_t __ret;
36145 __ret = (uint32x4_t)(__p0);
36146 return __ret;
36147 }
36148 #else
36149 __ai uint32x4_t vreinterpretq_u32_s16(int16x8_t __p0) {
36150 uint32x4_t __ret;
36151 __ret = (uint32x4_t)(__p0);
36152 return __ret;
36153 }
36154 #endif
36155
36156 #ifdef __LITTLE_ENDIAN__
36157 __ai uint64x2_t vreinterpretq_u64_p8(poly8x16_t __p0) {
36158 uint64x2_t __ret;
36159 __ret = (uint64x2_t)(__p0);
36160 return __ret;
36161 }
36162 #else
36163 __ai uint64x2_t vreinterpretq_u64_p8(poly8x16_t __p0) {
36164 uint64x2_t __ret;
36165 __ret = (uint64x2_t)(__p0);
36166 return __ret;
36167 }
36168 #endif
36169
36170 #ifdef __LITTLE_ENDIAN__
36171 __ai uint64x2_t vreinterpretq_u64_p128(poly128_t __p0) {
36172 uint64x2_t __ret;
36173 __ret = (uint64x2_t)(__p0);
36174 return __ret;
36175 }
36176 #else
36177 __ai uint64x2_t vreinterpretq_u64_p128(poly128_t __p0) {
36178 uint64x2_t __ret;
36179 __ret = (uint64x2_t)(__p0);
36180 return __ret;
36181 }
36182 #endif
36183
36184 #ifdef __LITTLE_ENDIAN__
36185 __ai uint64x2_t vreinterpretq_u64_p64(poly64x2_t __p0) {
36186 uint64x2_t __ret;
36187 __ret = (uint64x2_t)(__p0);
36188 return __ret;
36189 }
36190 #else
36191 __ai uint64x2_t vreinterpretq_u64_p64(poly64x2_t __p0) {
36192 uint64x2_t __ret;
36193 __ret = (uint64x2_t)(__p0);
36194 return __ret;
36195 }
36196 #endif
36197
36198 #ifdef __LITTLE_ENDIAN__
36199 __ai uint64x2_t vreinterpretq_u64_p16(poly16x8_t __p0) {
36200 uint64x2_t __ret;
36201 __ret = (uint64x2_t)(__p0);
36202 return __ret;
36203 }
36204 #else
36205 __ai uint64x2_t vreinterpretq_u64_p16(poly16x8_t __p0) {
36206 uint64x2_t __ret;
36207 __ret = (uint64x2_t)(__p0);
36208 return __ret;
36209 }
36210 #endif
36211
36212 #ifdef __LITTLE_ENDIAN__
36213 __ai uint64x2_t vreinterpretq_u64_u8(uint8x16_t __p0) {
36214 uint64x2_t __ret;
36215 __ret = (uint64x2_t)(__p0);
36216 return __ret;
36217 }
36218 #else
36219 __ai uint64x2_t vreinterpretq_u64_u8(uint8x16_t __p0) {
36220 uint64x2_t __ret;
36221 __ret = (uint64x2_t)(__p0);
36222 return __ret;
36223 }
36224 #endif
36225
36226 #ifdef __LITTLE_ENDIAN__
36227 __ai uint64x2_t vreinterpretq_u64_u32(uint32x4_t __p0) {
36228 uint64x2_t __ret;
36229 __ret = (uint64x2_t)(__p0);
36230 return __ret;
36231 }
36232 #else
36233 __ai uint64x2_t vreinterpretq_u64_u32(uint32x4_t __p0) {
36234 uint64x2_t __ret;
36235 __ret = (uint64x2_t)(__p0);
36236 return __ret;
36237 }
36238 #endif
36239
36240 #ifdef __LITTLE_ENDIAN__
36241 __ai uint64x2_t vreinterpretq_u64_u16(uint16x8_t __p0) {
36242 uint64x2_t __ret;
36243 __ret = (uint64x2_t)(__p0);
36244 return __ret;
36245 }
36246 #else
36247 __ai uint64x2_t vreinterpretq_u64_u16(uint16x8_t __p0) {
36248 uint64x2_t __ret;
36249 __ret = (uint64x2_t)(__p0);
36250 return __ret;
36251 }
36252 #endif
36253
36254 #ifdef __LITTLE_ENDIAN__
36255 __ai uint64x2_t vreinterpretq_u64_s8(int8x16_t __p0) {
36256 uint64x2_t __ret;
36257 __ret = (uint64x2_t)(__p0);
36258 return __ret;
36259 }
36260 #else
36261 __ai uint64x2_t vreinterpretq_u64_s8(int8x16_t __p0) {
36262 uint64x2_t __ret;
36263 __ret = (uint64x2_t)(__p0);
36264 return __ret;
36265 }
36266 #endif
36267
36268 #ifdef __LITTLE_ENDIAN__
36269 __ai uint64x2_t vreinterpretq_u64_f64(float64x2_t __p0) {
36270 uint64x2_t __ret;
36271 __ret = (uint64x2_t)(__p0);
36272 return __ret;
36273 }
36274 #else
36275 __ai uint64x2_t vreinterpretq_u64_f64(float64x2_t __p0) {
36276 uint64x2_t __ret;
36277 __ret = (uint64x2_t)(__p0);
36278 return __ret;
36279 }
36280 #endif
36281
36282 #ifdef __LITTLE_ENDIAN__
36283 __ai uint64x2_t vreinterpretq_u64_f32(float32x4_t __p0) {
36284 uint64x2_t __ret;
36285 __ret = (uint64x2_t)(__p0);
36286 return __ret;
36287 }
36288 #else
36289 __ai uint64x2_t vreinterpretq_u64_f32(float32x4_t __p0) {
36290 uint64x2_t __ret;
36291 __ret = (uint64x2_t)(__p0);
36292 return __ret;
36293 }
36294 #endif
36295
36296 #ifdef __LITTLE_ENDIAN__
36297 __ai uint64x2_t vreinterpretq_u64_f16(float16x8_t __p0) {
36298 uint64x2_t __ret;
36299 __ret = (uint64x2_t)(__p0);
36300 return __ret;
36301 }
36302 #else
36303 __ai uint64x2_t vreinterpretq_u64_f16(float16x8_t __p0) {
36304 uint64x2_t __ret;
36305 __ret = (uint64x2_t)(__p0);
36306 return __ret;
36307 }
36308 #endif
36309
36310 #ifdef __LITTLE_ENDIAN__
36311 __ai uint64x2_t vreinterpretq_u64_s32(int32x4_t __p0) {
36312 uint64x2_t __ret;
36313 __ret = (uint64x2_t)(__p0);
36314 return __ret;
36315 }
36316 #else
36317 __ai uint64x2_t vreinterpretq_u64_s32(int32x4_t __p0) {
36318 uint64x2_t __ret;
36319 __ret = (uint64x2_t)(__p0);
36320 return __ret;
36321 }
36322 #endif
36323
36324 #ifdef __LITTLE_ENDIAN__
36325 __ai uint64x2_t vreinterpretq_u64_s64(int64x2_t __p0) {
36326 uint64x2_t __ret;
36327 __ret = (uint64x2_t)(__p0);
36328 return __ret;
36329 }
36330 #else
36331 __ai uint64x2_t vreinterpretq_u64_s64(int64x2_t __p0) {
36332 uint64x2_t __ret;
36333 __ret = (uint64x2_t)(__p0);
36334 return __ret;
36335 }
36336 #endif
36337
36338 #ifdef __LITTLE_ENDIAN__
36339 __ai uint64x2_t vreinterpretq_u64_s16(int16x8_t __p0) {
36340 uint64x2_t __ret;
36341 __ret = (uint64x2_t)(__p0);
36342 return __ret;
36343 }
36344 #else
36345 __ai uint64x2_t vreinterpretq_u64_s16(int16x8_t __p0) {
36346 uint64x2_t __ret;
36347 __ret = (uint64x2_t)(__p0);
36348 return __ret;
36349 }
36350 #endif
36351
36352 #ifdef __LITTLE_ENDIAN__
36353 __ai uint16x8_t vreinterpretq_u16_p8(poly8x16_t __p0) {
36354 uint16x8_t __ret;
36355 __ret = (uint16x8_t)(__p0);
36356 return __ret;
36357 }
36358 #else
36359 __ai uint16x8_t vreinterpretq_u16_p8(poly8x16_t __p0) {
36360 uint16x8_t __ret;
36361 __ret = (uint16x8_t)(__p0);
36362 return __ret;
36363 }
36364 #endif
36365
36366 #ifdef __LITTLE_ENDIAN__
36367 __ai uint16x8_t vreinterpretq_u16_p128(poly128_t __p0) {
36368 uint16x8_t __ret;
36369 __ret = (uint16x8_t)(__p0);
36370 return __ret;
36371 }
36372 #else
36373 __ai uint16x8_t vreinterpretq_u16_p128(poly128_t __p0) {
36374 uint16x8_t __ret;
36375 __ret = (uint16x8_t)(__p0);
36376 return __ret;
36377 }
36378 #endif
36379
36380 #ifdef __LITTLE_ENDIAN__
36381 __ai uint16x8_t vreinterpretq_u16_p64(poly64x2_t __p0) {
36382 uint16x8_t __ret;
36383 __ret = (uint16x8_t)(__p0);
36384 return __ret;
36385 }
36386 #else
36387 __ai uint16x8_t vreinterpretq_u16_p64(poly64x2_t __p0) {
36388 uint16x8_t __ret;
36389 __ret = (uint16x8_t)(__p0);
36390 return __ret;
36391 }
36392 #endif
36393
36394 #ifdef __LITTLE_ENDIAN__
36395 __ai uint16x8_t vreinterpretq_u16_p16(poly16x8_t __p0) {
36396 uint16x8_t __ret;
36397 __ret = (uint16x8_t)(__p0);
36398 return __ret;
36399 }
36400 #else
36401 __ai uint16x8_t vreinterpretq_u16_p16(poly16x8_t __p0) {
36402 uint16x8_t __ret;
36403 __ret = (uint16x8_t)(__p0);
36404 return __ret;
36405 }
36406 #endif
36407
36408 #ifdef __LITTLE_ENDIAN__
36409 __ai uint16x8_t vreinterpretq_u16_u8(uint8x16_t __p0) {
36410 uint16x8_t __ret;
36411 __ret = (uint16x8_t)(__p0);
36412 return __ret;
36413 }
36414 #else
36415 __ai uint16x8_t vreinterpretq_u16_u8(uint8x16_t __p0) {
36416 uint16x8_t __ret;
36417 __ret = (uint16x8_t)(__p0);
36418 return __ret;
36419 }
36420 #endif
36421
36422 #ifdef __LITTLE_ENDIAN__
36423 __ai uint16x8_t vreinterpretq_u16_u32(uint32x4_t __p0) {
36424 uint16x8_t __ret;
36425 __ret = (uint16x8_t)(__p0);
36426 return __ret;
36427 }
36428 #else
36429 __ai uint16x8_t vreinterpretq_u16_u32(uint32x4_t __p0) {
36430 uint16x8_t __ret;
36431 __ret = (uint16x8_t)(__p0);
36432 return __ret;
36433 }
36434 #endif
36435
36436 #ifdef __LITTLE_ENDIAN__
36437 __ai uint16x8_t vreinterpretq_u16_u64(uint64x2_t __p0) {
36438 uint16x8_t __ret;
36439 __ret = (uint16x8_t)(__p0);
36440 return __ret;
36441 }
36442 #else
36443 __ai uint16x8_t vreinterpretq_u16_u64(uint64x2_t __p0) {
36444 uint16x8_t __ret;
36445 __ret = (uint16x8_t)(__p0);
36446 return __ret;
36447 }
36448 #endif
36449
36450 #ifdef __LITTLE_ENDIAN__
36451 __ai uint16x8_t vreinterpretq_u16_s8(int8x16_t __p0) {
36452 uint16x8_t __ret;
36453 __ret = (uint16x8_t)(__p0);
36454 return __ret;
36455 }
36456 #else
36457 __ai uint16x8_t vreinterpretq_u16_s8(int8x16_t __p0) {
36458 uint16x8_t __ret;
36459 __ret = (uint16x8_t)(__p0);
36460 return __ret;
36461 }
36462 #endif
36463
36464 #ifdef __LITTLE_ENDIAN__
36465 __ai uint16x8_t vreinterpretq_u16_f64(float64x2_t __p0) {
36466 uint16x8_t __ret;
36467 __ret = (uint16x8_t)(__p0);
36468 return __ret;
36469 }
36470 #else
36471 __ai uint16x8_t vreinterpretq_u16_f64(float64x2_t __p0) {
36472 uint16x8_t __ret;
36473 __ret = (uint16x8_t)(__p0);
36474 return __ret;
36475 }
36476 #endif
36477
36478 #ifdef __LITTLE_ENDIAN__
36479 __ai uint16x8_t vreinterpretq_u16_f32(float32x4_t __p0) {
36480 uint16x8_t __ret;
36481 __ret = (uint16x8_t)(__p0);
36482 return __ret;
36483 }
36484 #else
36485 __ai uint16x8_t vreinterpretq_u16_f32(float32x4_t __p0) {
36486 uint16x8_t __ret;
36487 __ret = (uint16x8_t)(__p0);
36488 return __ret;
36489 }
36490 #endif
36491
36492 #ifdef __LITTLE_ENDIAN__
36493 __ai uint16x8_t vreinterpretq_u16_f16(float16x8_t __p0) {
36494 uint16x8_t __ret;
36495 __ret = (uint16x8_t)(__p0);
36496 return __ret;
36497 }
36498 #else
36499 __ai uint16x8_t vreinterpretq_u16_f16(float16x8_t __p0) {
36500 uint16x8_t __ret;
36501 __ret = (uint16x8_t)(__p0);
36502 return __ret;
36503 }
36504 #endif
36505
36506 #ifdef __LITTLE_ENDIAN__
36507 __ai uint16x8_t vreinterpretq_u16_s32(int32x4_t __p0) {
36508 uint16x8_t __ret;
36509 __ret = (uint16x8_t)(__p0);
36510 return __ret;
36511 }
36512 #else
36513 __ai uint16x8_t vreinterpretq_u16_s32(int32x4_t __p0) {
36514 uint16x8_t __ret;
36515 __ret = (uint16x8_t)(__p0);
36516 return __ret;
36517 }
36518 #endif
36519
36520 #ifdef __LITTLE_ENDIAN__
36521 __ai uint16x8_t vreinterpretq_u16_s64(int64x2_t __p0) {
36522 uint16x8_t __ret;
36523 __ret = (uint16x8_t)(__p0);
36524 return __ret;
36525 }
36526 #else
36527 __ai uint16x8_t vreinterpretq_u16_s64(int64x2_t __p0) {
36528 uint16x8_t __ret;
36529 __ret = (uint16x8_t)(__p0);
36530 return __ret;
36531 }
36532 #endif
36533
36534 #ifdef __LITTLE_ENDIAN__
36535 __ai uint16x8_t vreinterpretq_u16_s16(int16x8_t __p0) {
36536 uint16x8_t __ret;
36537 __ret = (uint16x8_t)(__p0);
36538 return __ret;
36539 }
36540 #else
36541 __ai uint16x8_t vreinterpretq_u16_s16(int16x8_t __p0) {
36542 uint16x8_t __ret;
36543 __ret = (uint16x8_t)(__p0);
36544 return __ret;
36545 }
36546 #endif
36547
36548 #ifdef __LITTLE_ENDIAN__
36549 __ai int8x16_t vreinterpretq_s8_p8(poly8x16_t __p0) {
36550 int8x16_t __ret;
36551 __ret = (int8x16_t)(__p0);
36552 return __ret;
36553 }
36554 #else
36555 __ai int8x16_t vreinterpretq_s8_p8(poly8x16_t __p0) {
36556 int8x16_t __ret;
36557 __ret = (int8x16_t)(__p0);
36558 return __ret;
36559 }
36560 #endif
36561
36562 #ifdef __LITTLE_ENDIAN__
36563 __ai int8x16_t vreinterpretq_s8_p128(poly128_t __p0) {
36564 int8x16_t __ret;
36565 __ret = (int8x16_t)(__p0);
36566 return __ret;
36567 }
36568 #else
36569 __ai int8x16_t vreinterpretq_s8_p128(poly128_t __p0) {
36570 int8x16_t __ret;
36571 __ret = (int8x16_t)(__p0);
36572 return __ret;
36573 }
36574 #endif
36575
36576 #ifdef __LITTLE_ENDIAN__
36577 __ai int8x16_t vreinterpretq_s8_p64(poly64x2_t __p0) {
36578 int8x16_t __ret;
36579 __ret = (int8x16_t)(__p0);
36580 return __ret;
36581 }
36582 #else
36583 __ai int8x16_t vreinterpretq_s8_p64(poly64x2_t __p0) {
36584 int8x16_t __ret;
36585 __ret = (int8x16_t)(__p0);
36586 return __ret;
36587 }
36588 #endif
36589
36590 #ifdef __LITTLE_ENDIAN__
36591 __ai int8x16_t vreinterpretq_s8_p16(poly16x8_t __p0) {
36592 int8x16_t __ret;
36593 __ret = (int8x16_t)(__p0);
36594 return __ret;
36595 }
36596 #else
36597 __ai int8x16_t vreinterpretq_s8_p16(poly16x8_t __p0) {
36598 int8x16_t __ret;
36599 __ret = (int8x16_t)(__p0);
36600 return __ret;
36601 }
36602 #endif
36603
36604 #ifdef __LITTLE_ENDIAN__
36605 __ai int8x16_t vreinterpretq_s8_u8(uint8x16_t __p0) {
36606 int8x16_t __ret;
36607 __ret = (int8x16_t)(__p0);
36608 return __ret;
36609 }
36610 #else
36611 __ai int8x16_t vreinterpretq_s8_u8(uint8x16_t __p0) {
36612 int8x16_t __ret;
36613 __ret = (int8x16_t)(__p0);
36614 return __ret;
36615 }
36616 #endif
36617
36618 #ifdef __LITTLE_ENDIAN__
36619 __ai int8x16_t vreinterpretq_s8_u32(uint32x4_t __p0) {
36620 int8x16_t __ret;
36621 __ret = (int8x16_t)(__p0);
36622 return __ret;
36623 }
36624 #else
36625 __ai int8x16_t vreinterpretq_s8_u32(uint32x4_t __p0) {
36626 int8x16_t __ret;
36627 __ret = (int8x16_t)(__p0);
36628 return __ret;
36629 }
36630 #endif
36631
36632 #ifdef __LITTLE_ENDIAN__
36633 __ai int8x16_t vreinterpretq_s8_u64(uint64x2_t __p0) {
36634 int8x16_t __ret;
36635 __ret = (int8x16_t)(__p0);
36636 return __ret;
36637 }
36638 #else
36639 __ai int8x16_t vreinterpretq_s8_u64(uint64x2_t __p0) {
36640 int8x16_t __ret;
36641 __ret = (int8x16_t)(__p0);
36642 return __ret;
36643 }
36644 #endif
36645
36646 #ifdef __LITTLE_ENDIAN__
36647 __ai int8x16_t vreinterpretq_s8_u16(uint16x8_t __p0) {
36648 int8x16_t __ret;
36649 __ret = (int8x16_t)(__p0);
36650 return __ret;
36651 }
36652 #else
36653 __ai int8x16_t vreinterpretq_s8_u16(uint16x8_t __p0) {
36654 int8x16_t __ret;
36655 __ret = (int8x16_t)(__p0);
36656 return __ret;
36657 }
36658 #endif
36659
36660 #ifdef __LITTLE_ENDIAN__
36661 __ai int8x16_t vreinterpretq_s8_f64(float64x2_t __p0) {
36662 int8x16_t __ret;
36663 __ret = (int8x16_t)(__p0);
36664 return __ret;
36665 }
36666 #else
36667 __ai int8x16_t vreinterpretq_s8_f64(float64x2_t __p0) {
36668 int8x16_t __ret;
36669 __ret = (int8x16_t)(__p0);
36670 return __ret;
36671 }
36672 #endif
36673
36674 #ifdef __LITTLE_ENDIAN__
36675 __ai int8x16_t vreinterpretq_s8_f32(float32x4_t __p0) {
36676 int8x16_t __ret;
36677 __ret = (int8x16_t)(__p0);
36678 return __ret;
36679 }
36680 #else
36681 __ai int8x16_t vreinterpretq_s8_f32(float32x4_t __p0) {
36682 int8x16_t __ret;
36683 __ret = (int8x16_t)(__p0);
36684 return __ret;
36685 }
36686 #endif
36687
36688 #ifdef __LITTLE_ENDIAN__
36689 __ai int8x16_t vreinterpretq_s8_f16(float16x8_t __p0) {
36690 int8x16_t __ret;
36691 __ret = (int8x16_t)(__p0);
36692 return __ret;
36693 }
36694 #else
36695 __ai int8x16_t vreinterpretq_s8_f16(float16x8_t __p0) {
36696 int8x16_t __ret;
36697 __ret = (int8x16_t)(__p0);
36698 return __ret;
36699 }
36700 #endif
36701
36702 #ifdef __LITTLE_ENDIAN__
36703 __ai int8x16_t vreinterpretq_s8_s32(int32x4_t __p0) {
36704 int8x16_t __ret;
36705 __ret = (int8x16_t)(__p0);
36706 return __ret;
36707 }
36708 #else
36709 __ai int8x16_t vreinterpretq_s8_s32(int32x4_t __p0) {
36710 int8x16_t __ret;
36711 __ret = (int8x16_t)(__p0);
36712 return __ret;
36713 }
36714 #endif
36715
36716 #ifdef __LITTLE_ENDIAN__
36717 __ai int8x16_t vreinterpretq_s8_s64(int64x2_t __p0) {
36718 int8x16_t __ret;
36719 __ret = (int8x16_t)(__p0);
36720 return __ret;
36721 }
36722 #else
36723 __ai int8x16_t vreinterpretq_s8_s64(int64x2_t __p0) {
36724 int8x16_t __ret;
36725 __ret = (int8x16_t)(__p0);
36726 return __ret;
36727 }
36728 #endif
36729
36730 #ifdef __LITTLE_ENDIAN__
36731 __ai int8x16_t vreinterpretq_s8_s16(int16x8_t __p0) {
36732 int8x16_t __ret;
36733 __ret = (int8x16_t)(__p0);
36734 return __ret;
36735 }
36736 #else
36737 __ai int8x16_t vreinterpretq_s8_s16(int16x8_t __p0) {
36738 int8x16_t __ret;
36739 __ret = (int8x16_t)(__p0);
36740 return __ret;
36741 }
36742 #endif
36743
36744 #ifdef __LITTLE_ENDIAN__
36745 __ai float64x2_t vreinterpretq_f64_p8(poly8x16_t __p0) {
36746 float64x2_t __ret;
36747 __ret = (float64x2_t)(__p0);
36748 return __ret;
36749 }
36750 #else
36751 __ai float64x2_t vreinterpretq_f64_p8(poly8x16_t __p0) {
36752 float64x2_t __ret;
36753 __ret = (float64x2_t)(__p0);
36754 return __ret;
36755 }
36756 #endif
36757
36758 #ifdef __LITTLE_ENDIAN__
36759 __ai float64x2_t vreinterpretq_f64_p128(poly128_t __p0) {
36760 float64x2_t __ret;
36761 __ret = (float64x2_t)(__p0);
36762 return __ret;
36763 }
36764 #else
36765 __ai float64x2_t vreinterpretq_f64_p128(poly128_t __p0) {
36766 float64x2_t __ret;
36767 __ret = (float64x2_t)(__p0);
36768 return __ret;
36769 }
36770 #endif
36771
36772 #ifdef __LITTLE_ENDIAN__
36773 __ai float64x2_t vreinterpretq_f64_p64(poly64x2_t __p0) {
36774 float64x2_t __ret;
36775 __ret = (float64x2_t)(__p0);
36776 return __ret;
36777 }
36778 #else
36779 __ai float64x2_t vreinterpretq_f64_p64(poly64x2_t __p0) {
36780 float64x2_t __ret;
36781 __ret = (float64x2_t)(__p0);
36782 return __ret;
36783 }
36784 #endif
36785
36786 #ifdef __LITTLE_ENDIAN__
36787 __ai float64x2_t vreinterpretq_f64_p16(poly16x8_t __p0) {
36788 float64x2_t __ret;
36789 __ret = (float64x2_t)(__p0);
36790 return __ret;
36791 }
36792 #else
36793 __ai float64x2_t vreinterpretq_f64_p16(poly16x8_t __p0) {
36794 float64x2_t __ret;
36795 __ret = (float64x2_t)(__p0);
36796 return __ret;
36797 }
36798 #endif
36799
36800 #ifdef __LITTLE_ENDIAN__
36801 __ai float64x2_t vreinterpretq_f64_u8(uint8x16_t __p0) {
36802 float64x2_t __ret;
36803 __ret = (float64x2_t)(__p0);
36804 return __ret;
36805 }
36806 #else
36807 __ai float64x2_t vreinterpretq_f64_u8(uint8x16_t __p0) {
36808 float64x2_t __ret;
36809 __ret = (float64x2_t)(__p0);
36810 return __ret;
36811 }
36812 #endif
36813
36814 #ifdef __LITTLE_ENDIAN__
36815 __ai float64x2_t vreinterpretq_f64_u32(uint32x4_t __p0) {
36816 float64x2_t __ret;
36817 __ret = (float64x2_t)(__p0);
36818 return __ret;
36819 }
36820 #else
36821 __ai float64x2_t vreinterpretq_f64_u32(uint32x4_t __p0) {
36822 float64x2_t __ret;
36823 __ret = (float64x2_t)(__p0);
36824 return __ret;
36825 }
36826 #endif
36827
36828 #ifdef __LITTLE_ENDIAN__
36829 __ai float64x2_t vreinterpretq_f64_u64(uint64x2_t __p0) {
36830 float64x2_t __ret;
36831 __ret = (float64x2_t)(__p0);
36832 return __ret;
36833 }
36834 #else
36835 __ai float64x2_t vreinterpretq_f64_u64(uint64x2_t __p0) {
36836 float64x2_t __ret;
36837 __ret = (float64x2_t)(__p0);
36838 return __ret;
36839 }
36840 #endif
36841
36842 #ifdef __LITTLE_ENDIAN__
36843 __ai float64x2_t vreinterpretq_f64_u16(uint16x8_t __p0) {
36844 float64x2_t __ret;
36845 __ret = (float64x2_t)(__p0);
36846 return __ret;
36847 }
36848 #else
36849 __ai float64x2_t vreinterpretq_f64_u16(uint16x8_t __p0) {
36850 float64x2_t __ret;
36851 __ret = (float64x2_t)(__p0);
36852 return __ret;
36853 }
36854 #endif
36855
36856 #ifdef __LITTLE_ENDIAN__
36857 __ai float64x2_t vreinterpretq_f64_s8(int8x16_t __p0) {
36858 float64x2_t __ret;
36859 __ret = (float64x2_t)(__p0);
36860 return __ret;
36861 }
36862 #else
36863 __ai float64x2_t vreinterpretq_f64_s8(int8x16_t __p0) {
36864 float64x2_t __ret;
36865 __ret = (float64x2_t)(__p0);
36866 return __ret;
36867 }
36868 #endif
36869
36870 #ifdef __LITTLE_ENDIAN__
36871 __ai float64x2_t vreinterpretq_f64_f32(float32x4_t __p0) {
36872 float64x2_t __ret;
36873 __ret = (float64x2_t)(__p0);
36874 return __ret;
36875 }
36876 #else
36877 __ai float64x2_t vreinterpretq_f64_f32(float32x4_t __p0) {
36878 float64x2_t __ret;
36879 __ret = (float64x2_t)(__p0);
36880 return __ret;
36881 }
36882 #endif
36883
36884 #ifdef __LITTLE_ENDIAN__
36885 __ai float64x2_t vreinterpretq_f64_f16(float16x8_t __p0) {
36886 float64x2_t __ret;
36887 __ret = (float64x2_t)(__p0);
36888 return __ret;
36889 }
36890 #else
36891 __ai float64x2_t vreinterpretq_f64_f16(float16x8_t __p0) {
36892 float64x2_t __ret;
36893 __ret = (float64x2_t)(__p0);
36894 return __ret;
36895 }
36896 #endif
36897
36898 #ifdef __LITTLE_ENDIAN__
36899 __ai float64x2_t vreinterpretq_f64_s32(int32x4_t __p0) {
36900 float64x2_t __ret;
36901 __ret = (float64x2_t)(__p0);
36902 return __ret;
36903 }
36904 #else
36905 __ai float64x2_t vreinterpretq_f64_s32(int32x4_t __p0) {
36906 float64x2_t __ret;
36907 __ret = (float64x2_t)(__p0);
36908 return __ret;
36909 }
36910 #endif
36911
36912 #ifdef __LITTLE_ENDIAN__
36913 __ai float64x2_t vreinterpretq_f64_s64(int64x2_t __p0) {
36914 float64x2_t __ret;
36915 __ret = (float64x2_t)(__p0);
36916 return __ret;
36917 }
36918 #else
36919 __ai float64x2_t vreinterpretq_f64_s64(int64x2_t __p0) {
36920 float64x2_t __ret;
36921 __ret = (float64x2_t)(__p0);
36922 return __ret;
36923 }
36924 #endif
36925
36926 #ifdef __LITTLE_ENDIAN__
36927 __ai float64x2_t vreinterpretq_f64_s16(int16x8_t __p0) {
36928 float64x2_t __ret;
36929 __ret = (float64x2_t)(__p0);
36930 return __ret;
36931 }
36932 #else
36933 __ai float64x2_t vreinterpretq_f64_s16(int16x8_t __p0) {
36934 float64x2_t __ret;
36935 __ret = (float64x2_t)(__p0);
36936 return __ret;
36937 }
36938 #endif
36939
36940 #ifdef __LITTLE_ENDIAN__
36941 __ai float32x4_t vreinterpretq_f32_p8(poly8x16_t __p0) {
36942 float32x4_t __ret;
36943 __ret = (float32x4_t)(__p0);
36944 return __ret;
36945 }
36946 #else
36947 __ai float32x4_t vreinterpretq_f32_p8(poly8x16_t __p0) {
36948 float32x4_t __ret;
36949 __ret = (float32x4_t)(__p0);
36950 return __ret;
36951 }
36952 #endif
36953
36954 #ifdef __LITTLE_ENDIAN__
36955 __ai float32x4_t vreinterpretq_f32_p128(poly128_t __p0) {
36956 float32x4_t __ret;
36957 __ret = (float32x4_t)(__p0);
36958 return __ret;
36959 }
36960 #else
36961 __ai float32x4_t vreinterpretq_f32_p128(poly128_t __p0) {
36962 float32x4_t __ret;
36963 __ret = (float32x4_t)(__p0);
36964 return __ret;
36965 }
36966 #endif
36967
36968 #ifdef __LITTLE_ENDIAN__
36969 __ai float32x4_t vreinterpretq_f32_p64(poly64x2_t __p0) {
36970 float32x4_t __ret;
36971 __ret = (float32x4_t)(__p0);
36972 return __ret;
36973 }
36974 #else
36975 __ai float32x4_t vreinterpretq_f32_p64(poly64x2_t __p0) {
36976 float32x4_t __ret;
36977 __ret = (float32x4_t)(__p0);
36978 return __ret;
36979 }
36980 #endif
36981
36982 #ifdef __LITTLE_ENDIAN__
36983 __ai float32x4_t vreinterpretq_f32_p16(poly16x8_t __p0) {
36984 float32x4_t __ret;
36985 __ret = (float32x4_t)(__p0);
36986 return __ret;
36987 }
36988 #else
36989 __ai float32x4_t vreinterpretq_f32_p16(poly16x8_t __p0) {
36990 float32x4_t __ret;
36991 __ret = (float32x4_t)(__p0);
36992 return __ret;
36993 }
36994 #endif
36995
36996 #ifdef __LITTLE_ENDIAN__
36997 __ai float32x4_t vreinterpretq_f32_u8(uint8x16_t __p0) {
36998 float32x4_t __ret;
36999 __ret = (float32x4_t)(__p0);
37000 return __ret;
37001 }
37002 #else
37003 __ai float32x4_t vreinterpretq_f32_u8(uint8x16_t __p0) {
37004 float32x4_t __ret;
37005 __ret = (float32x4_t)(__p0);
37006 return __ret;
37007 }
37008 #endif
37009
37010 #ifdef __LITTLE_ENDIAN__
37011 __ai float32x4_t vreinterpretq_f32_u32(uint32x4_t __p0) {
37012 float32x4_t __ret;
37013 __ret = (float32x4_t)(__p0);
37014 return __ret;
37015 }
37016 #else
37017 __ai float32x4_t vreinterpretq_f32_u32(uint32x4_t __p0) {
37018 float32x4_t __ret;
37019 __ret = (float32x4_t)(__p0);
37020 return __ret;
37021 }
37022 #endif
37023
37024 #ifdef __LITTLE_ENDIAN__
37025 __ai float32x4_t vreinterpretq_f32_u64(uint64x2_t __p0) {
37026 float32x4_t __ret;
37027 __ret = (float32x4_t)(__p0);
37028 return __ret;
37029 }
37030 #else
37031 __ai float32x4_t vreinterpretq_f32_u64(uint64x2_t __p0) {
37032 float32x4_t __ret;
37033 __ret = (float32x4_t)(__p0);
37034 return __ret;
37035 }
37036 #endif
37037
37038 #ifdef __LITTLE_ENDIAN__
37039 __ai float32x4_t vreinterpretq_f32_u16(uint16x8_t __p0) {
37040 float32x4_t __ret;
37041 __ret = (float32x4_t)(__p0);
37042 return __ret;
37043 }
37044 #else
37045 __ai float32x4_t vreinterpretq_f32_u16(uint16x8_t __p0) {
37046 float32x4_t __ret;
37047 __ret = (float32x4_t)(__p0);
37048 return __ret;
37049 }
37050 #endif
37051
37052 #ifdef __LITTLE_ENDIAN__
37053 __ai float32x4_t vreinterpretq_f32_s8(int8x16_t __p0) {
37054 float32x4_t __ret;
37055 __ret = (float32x4_t)(__p0);
37056 return __ret;
37057 }
37058 #else
37059 __ai float32x4_t vreinterpretq_f32_s8(int8x16_t __p0) {
37060 float32x4_t __ret;
37061 __ret = (float32x4_t)(__p0);
37062 return __ret;
37063 }
37064 #endif
37065
37066 #ifdef __LITTLE_ENDIAN__
37067 __ai float32x4_t vreinterpretq_f32_f64(float64x2_t __p0) {
37068 float32x4_t __ret;
37069 __ret = (float32x4_t)(__p0);
37070 return __ret;
37071 }
37072 #else
37073 __ai float32x4_t vreinterpretq_f32_f64(float64x2_t __p0) {
37074 float32x4_t __ret;
37075 __ret = (float32x4_t)(__p0);
37076 return __ret;
37077 }
37078 #endif
37079
37080 #ifdef __LITTLE_ENDIAN__
37081 __ai float32x4_t vreinterpretq_f32_f16(float16x8_t __p0) {
37082 float32x4_t __ret;
37083 __ret = (float32x4_t)(__p0);
37084 return __ret;
37085 }
37086 #else
37087 __ai float32x4_t vreinterpretq_f32_f16(float16x8_t __p0) {
37088 float32x4_t __ret;
37089 __ret = (float32x4_t)(__p0);
37090 return __ret;
37091 }
37092 #endif
37093
37094 #ifdef __LITTLE_ENDIAN__
37095 __ai float32x4_t vreinterpretq_f32_s32(int32x4_t __p0) {
37096 float32x4_t __ret;
37097 __ret = (float32x4_t)(__p0);
37098 return __ret;
37099 }
37100 #else
37101 __ai float32x4_t vreinterpretq_f32_s32(int32x4_t __p0) {
37102 float32x4_t __ret;
37103 __ret = (float32x4_t)(__p0);
37104 return __ret;
37105 }
37106 #endif
37107
37108 #ifdef __LITTLE_ENDIAN__
37109 __ai float32x4_t vreinterpretq_f32_s64(int64x2_t __p0) {
37110 float32x4_t __ret;
37111 __ret = (float32x4_t)(__p0);
37112 return __ret;
37113 }
37114 #else
37115 __ai float32x4_t vreinterpretq_f32_s64(int64x2_t __p0) {
37116 float32x4_t __ret;
37117 __ret = (float32x4_t)(__p0);
37118 return __ret;
37119 }
37120 #endif
37121
37122 #ifdef __LITTLE_ENDIAN__
37123 __ai float32x4_t vreinterpretq_f32_s16(int16x8_t __p0) {
37124 float32x4_t __ret;
37125 __ret = (float32x4_t)(__p0);
37126 return __ret;
37127 }
37128 #else
37129 __ai float32x4_t vreinterpretq_f32_s16(int16x8_t __p0) {
37130 float32x4_t __ret;
37131 __ret = (float32x4_t)(__p0);
37132 return __ret;
37133 }
37134 #endif
37135
37136 #ifdef __LITTLE_ENDIAN__
37137 __ai float16x8_t vreinterpretq_f16_p8(poly8x16_t __p0) {
37138 float16x8_t __ret;
37139 __ret = (float16x8_t)(__p0);
37140 return __ret;
37141 }
37142 #else
37143 __ai float16x8_t vreinterpretq_f16_p8(poly8x16_t __p0) {
37144 float16x8_t __ret;
37145 __ret = (float16x8_t)(__p0);
37146 return __ret;
37147 }
37148 #endif
37149
37150 #ifdef __LITTLE_ENDIAN__
37151 __ai float16x8_t vreinterpretq_f16_p128(poly128_t __p0) {
37152 float16x8_t __ret;
37153 __ret = (float16x8_t)(__p0);
37154 return __ret;
37155 }
37156 #else
37157 __ai float16x8_t vreinterpretq_f16_p128(poly128_t __p0) {
37158 float16x8_t __ret;
37159 __ret = (float16x8_t)(__p0);
37160 return __ret;
37161 }
37162 #endif
37163
37164 #ifdef __LITTLE_ENDIAN__
37165 __ai float16x8_t vreinterpretq_f16_p64(poly64x2_t __p0) {
37166 float16x8_t __ret;
37167 __ret = (float16x8_t)(__p0);
37168 return __ret;
37169 }
37170 #else
37171 __ai float16x8_t vreinterpretq_f16_p64(poly64x2_t __p0) {
37172 float16x8_t __ret;
37173 __ret = (float16x8_t)(__p0);
37174 return __ret;
37175 }
37176 #endif
37177
37178 #ifdef __LITTLE_ENDIAN__
37179 __ai float16x8_t vreinterpretq_f16_p16(poly16x8_t __p0) {
37180 float16x8_t __ret;
37181 __ret = (float16x8_t)(__p0);
37182 return __ret;
37183 }
37184 #else
37185 __ai float16x8_t vreinterpretq_f16_p16(poly16x8_t __p0) {
37186 float16x8_t __ret;
37187 __ret = (float16x8_t)(__p0);
37188 return __ret;
37189 }
37190 #endif
37191
37192 #ifdef __LITTLE_ENDIAN__
37193 __ai float16x8_t vreinterpretq_f16_u8(uint8x16_t __p0) {
37194 float16x8_t __ret;
37195 __ret = (float16x8_t)(__p0);
37196 return __ret;
37197 }
37198 #else
37199 __ai float16x8_t vreinterpretq_f16_u8(uint8x16_t __p0) {
37200 float16x8_t __ret;
37201 __ret = (float16x8_t)(__p0);
37202 return __ret;
37203 }
37204 #endif
37205
37206 #ifdef __LITTLE_ENDIAN__
37207 __ai float16x8_t vreinterpretq_f16_u32(uint32x4_t __p0) {
37208 float16x8_t __ret;
37209 __ret = (float16x8_t)(__p0);
37210 return __ret;
37211 }
37212 #else
37213 __ai float16x8_t vreinterpretq_f16_u32(uint32x4_t __p0) {
37214 float16x8_t __ret;
37215 __ret = (float16x8_t)(__p0);
37216 return __ret;
37217 }
37218 #endif
37219
37220 #ifdef __LITTLE_ENDIAN__
37221 __ai float16x8_t vreinterpretq_f16_u64(uint64x2_t __p0) {
37222 float16x8_t __ret;
37223 __ret = (float16x8_t)(__p0);
37224 return __ret;
37225 }
37226 #else
37227 __ai float16x8_t vreinterpretq_f16_u64(uint64x2_t __p0) {
37228 float16x8_t __ret;
37229 __ret = (float16x8_t)(__p0);
37230 return __ret;
37231 }
37232 #endif
37233
37234 #ifdef __LITTLE_ENDIAN__
37235 __ai float16x8_t vreinterpretq_f16_u16(uint16x8_t __p0) {
37236 float16x8_t __ret;
37237 __ret = (float16x8_t)(__p0);
37238 return __ret;
37239 }
37240 #else
37241 __ai float16x8_t vreinterpretq_f16_u16(uint16x8_t __p0) {
37242 float16x8_t __ret;
37243 __ret = (float16x8_t)(__p0);
37244 return __ret;
37245 }
37246 #endif
37247
37248 #ifdef __LITTLE_ENDIAN__
37249 __ai float16x8_t vreinterpretq_f16_s8(int8x16_t __p0) {
37250 float16x8_t __ret;
37251 __ret = (float16x8_t)(__p0);
37252 return __ret;
37253 }
37254 #else
37255 __ai float16x8_t vreinterpretq_f16_s8(int8x16_t __p0) {
37256 float16x8_t __ret;
37257 __ret = (float16x8_t)(__p0);
37258 return __ret;
37259 }
37260 #endif
37261
37262 #ifdef __LITTLE_ENDIAN__
37263 __ai float16x8_t vreinterpretq_f16_f64(float64x2_t __p0) {
37264 float16x8_t __ret;
37265 __ret = (float16x8_t)(__p0);
37266 return __ret;
37267 }
37268 #else
37269 __ai float16x8_t vreinterpretq_f16_f64(float64x2_t __p0) {
37270 float16x8_t __ret;
37271 __ret = (float16x8_t)(__p0);
37272 return __ret;
37273 }
37274 #endif
37275
37276 #ifdef __LITTLE_ENDIAN__
37277 __ai float16x8_t vreinterpretq_f16_f32(float32x4_t __p0) {
37278 float16x8_t __ret;
37279 __ret = (float16x8_t)(__p0);
37280 return __ret;
37281 }
37282 #else
37283 __ai float16x8_t vreinterpretq_f16_f32(float32x4_t __p0) {
37284 float16x8_t __ret;
37285 __ret = (float16x8_t)(__p0);
37286 return __ret;
37287 }
37288 #endif
37289
37290 #ifdef __LITTLE_ENDIAN__
37291 __ai float16x8_t vreinterpretq_f16_s32(int32x4_t __p0) {
37292 float16x8_t __ret;
37293 __ret = (float16x8_t)(__p0);
37294 return __ret;
37295 }
37296 #else
37297 __ai float16x8_t vreinterpretq_f16_s32(int32x4_t __p0) {
37298 float16x8_t __ret;
37299 __ret = (float16x8_t)(__p0);
37300 return __ret;
37301 }
37302 #endif
37303
37304 #ifdef __LITTLE_ENDIAN__
37305 __ai float16x8_t vreinterpretq_f16_s64(int64x2_t __p0) {
37306 float16x8_t __ret;
37307 __ret = (float16x8_t)(__p0);
37308 return __ret;
37309 }
37310 #else
37311 __ai float16x8_t vreinterpretq_f16_s64(int64x2_t __p0) {
37312 float16x8_t __ret;
37313 __ret = (float16x8_t)(__p0);
37314 return __ret;
37315 }
37316 #endif
37317
37318 #ifdef __LITTLE_ENDIAN__
37319 __ai float16x8_t vreinterpretq_f16_s16(int16x8_t __p0) {
37320 float16x8_t __ret;
37321 __ret = (float16x8_t)(__p0);
37322 return __ret;
37323 }
37324 #else
37325 __ai float16x8_t vreinterpretq_f16_s16(int16x8_t __p0) {
37326 float16x8_t __ret;
37327 __ret = (float16x8_t)(__p0);
37328 return __ret;
37329 }
37330 #endif
37331
37332 #ifdef __LITTLE_ENDIAN__
37333 __ai int32x4_t vreinterpretq_s32_p8(poly8x16_t __p0) {
37334 int32x4_t __ret;
37335 __ret = (int32x4_t)(__p0);
37336 return __ret;
37337 }
37338 #else
37339 __ai int32x4_t vreinterpretq_s32_p8(poly8x16_t __p0) {
37340 int32x4_t __ret;
37341 __ret = (int32x4_t)(__p0);
37342 return __ret;
37343 }
37344 #endif
37345
37346 #ifdef __LITTLE_ENDIAN__
37347 __ai int32x4_t vreinterpretq_s32_p128(poly128_t __p0) {
37348 int32x4_t __ret;
37349 __ret = (int32x4_t)(__p0);
37350 return __ret;
37351 }
37352 #else
37353 __ai int32x4_t vreinterpretq_s32_p128(poly128_t __p0) {
37354 int32x4_t __ret;
37355 __ret = (int32x4_t)(__p0);
37356 return __ret;
37357 }
37358 #endif
37359
37360 #ifdef __LITTLE_ENDIAN__
37361 __ai int32x4_t vreinterpretq_s32_p64(poly64x2_t __p0) {
37362 int32x4_t __ret;
37363 __ret = (int32x4_t)(__p0);
37364 return __ret;
37365 }
37366 #else
37367 __ai int32x4_t vreinterpretq_s32_p64(poly64x2_t __p0) {
37368 int32x4_t __ret;
37369 __ret = (int32x4_t)(__p0);
37370 return __ret;
37371 }
37372 #endif
37373
37374 #ifdef __LITTLE_ENDIAN__
37375 __ai int32x4_t vreinterpretq_s32_p16(poly16x8_t __p0) {
37376 int32x4_t __ret;
37377 __ret = (int32x4_t)(__p0);
37378 return __ret;
37379 }
37380 #else
37381 __ai int32x4_t vreinterpretq_s32_p16(poly16x8_t __p0) {
37382 int32x4_t __ret;
37383 __ret = (int32x4_t)(__p0);
37384 return __ret;
37385 }
37386 #endif
37387
37388 #ifdef __LITTLE_ENDIAN__
37389 __ai int32x4_t vreinterpretq_s32_u8(uint8x16_t __p0) {
37390 int32x4_t __ret;
37391 __ret = (int32x4_t)(__p0);
37392 return __ret;
37393 }
37394 #else
37395 __ai int32x4_t vreinterpretq_s32_u8(uint8x16_t __p0) {
37396 int32x4_t __ret;
37397 __ret = (int32x4_t)(__p0);
37398 return __ret;
37399 }
37400 #endif
37401
37402 #ifdef __LITTLE_ENDIAN__
37403 __ai int32x4_t vreinterpretq_s32_u32(uint32x4_t __p0) {
37404 int32x4_t __ret;
37405 __ret = (int32x4_t)(__p0);
37406 return __ret;
37407 }
37408 #else
37409 __ai int32x4_t vreinterpretq_s32_u32(uint32x4_t __p0) {
37410 int32x4_t __ret;
37411 __ret = (int32x4_t)(__p0);
37412 return __ret;
37413 }
37414 #endif
37415
37416 #ifdef __LITTLE_ENDIAN__
37417 __ai int32x4_t vreinterpretq_s32_u64(uint64x2_t __p0) {
37418 int32x4_t __ret;
37419 __ret = (int32x4_t)(__p0);
37420 return __ret;
37421 }
37422 #else
37423 __ai int32x4_t vreinterpretq_s32_u64(uint64x2_t __p0) {
37424 int32x4_t __ret;
37425 __ret = (int32x4_t)(__p0);
37426 return __ret;
37427 }
37428 #endif
37429
37430 #ifdef __LITTLE_ENDIAN__
37431 __ai int32x4_t vreinterpretq_s32_u16(uint16x8_t __p0) {
37432 int32x4_t __ret;
37433 __ret = (int32x4_t)(__p0);
37434 return __ret;
37435 }
37436 #else
37437 __ai int32x4_t vreinterpretq_s32_u16(uint16x8_t __p0) {
37438 int32x4_t __ret;
37439 __ret = (int32x4_t)(__p0);
37440 return __ret;
37441 }
37442 #endif
37443
37444 #ifdef __LITTLE_ENDIAN__
37445 __ai int32x4_t vreinterpretq_s32_s8(int8x16_t __p0) {
37446 int32x4_t __ret;
37447 __ret = (int32x4_t)(__p0);
37448 return __ret;
37449 }
37450 #else
37451 __ai int32x4_t vreinterpretq_s32_s8(int8x16_t __p0) {
37452 int32x4_t __ret;
37453 __ret = (int32x4_t)(__p0);
37454 return __ret;
37455 }
37456 #endif
37457
37458 #ifdef __LITTLE_ENDIAN__
37459 __ai int32x4_t vreinterpretq_s32_f64(float64x2_t __p0) {
37460 int32x4_t __ret;
37461 __ret = (int32x4_t)(__p0);
37462 return __ret;
37463 }
37464 #else
37465 __ai int32x4_t vreinterpretq_s32_f64(float64x2_t __p0) {
37466 int32x4_t __ret;
37467 __ret = (int32x4_t)(__p0);
37468 return __ret;
37469 }
37470 #endif
37471
37472 #ifdef __LITTLE_ENDIAN__
37473 __ai int32x4_t vreinterpretq_s32_f32(float32x4_t __p0) {
37474 int32x4_t __ret;
37475 __ret = (int32x4_t)(__p0);
37476 return __ret;
37477 }
37478 #else
37479 __ai int32x4_t vreinterpretq_s32_f32(float32x4_t __p0) {
37480 int32x4_t __ret;
37481 __ret = (int32x4_t)(__p0);
37482 return __ret;
37483 }
37484 #endif
37485
37486 #ifdef __LITTLE_ENDIAN__
37487 __ai int32x4_t vreinterpretq_s32_f16(float16x8_t __p0) {
37488 int32x4_t __ret;
37489 __ret = (int32x4_t)(__p0);
37490 return __ret;
37491 }
37492 #else
37493 __ai int32x4_t vreinterpretq_s32_f16(float16x8_t __p0) {
37494 int32x4_t __ret;
37495 __ret = (int32x4_t)(__p0);
37496 return __ret;
37497 }
37498 #endif
37499
37500 #ifdef __LITTLE_ENDIAN__
37501 __ai int32x4_t vreinterpretq_s32_s64(int64x2_t __p0) {
37502 int32x4_t __ret;
37503 __ret = (int32x4_t)(__p0);
37504 return __ret;
37505 }
37506 #else
37507 __ai int32x4_t vreinterpretq_s32_s64(int64x2_t __p0) {
37508 int32x4_t __ret;
37509 __ret = (int32x4_t)(__p0);
37510 return __ret;
37511 }
37512 #endif
37513
37514 #ifdef __LITTLE_ENDIAN__
37515 __ai int32x4_t vreinterpretq_s32_s16(int16x8_t __p0) {
37516 int32x4_t __ret;
37517 __ret = (int32x4_t)(__p0);
37518 return __ret;
37519 }
37520 #else
37521 __ai int32x4_t vreinterpretq_s32_s16(int16x8_t __p0) {
37522 int32x4_t __ret;
37523 __ret = (int32x4_t)(__p0);
37524 return __ret;
37525 }
37526 #endif
37527
37528 #ifdef __LITTLE_ENDIAN__
37529 __ai int64x2_t vreinterpretq_s64_p8(poly8x16_t __p0) {
37530 int64x2_t __ret;
37531 __ret = (int64x2_t)(__p0);
37532 return __ret;
37533 }
37534 #else
37535 __ai int64x2_t vreinterpretq_s64_p8(poly8x16_t __p0) {
37536 int64x2_t __ret;
37537 __ret = (int64x2_t)(__p0);
37538 return __ret;
37539 }
37540 #endif
37541
37542 #ifdef __LITTLE_ENDIAN__
37543 __ai int64x2_t vreinterpretq_s64_p128(poly128_t __p0) {
37544 int64x2_t __ret;
37545 __ret = (int64x2_t)(__p0);
37546 return __ret;
37547 }
37548 #else
37549 __ai int64x2_t vreinterpretq_s64_p128(poly128_t __p0) {
37550 int64x2_t __ret;
37551 __ret = (int64x2_t)(__p0);
37552 return __ret;
37553 }
37554 #endif
37555
37556 #ifdef __LITTLE_ENDIAN__
37557 __ai int64x2_t vreinterpretq_s64_p64(poly64x2_t __p0) {
37558 int64x2_t __ret;
37559 __ret = (int64x2_t)(__p0);
37560 return __ret;
37561 }
37562 #else
37563 __ai int64x2_t vreinterpretq_s64_p64(poly64x2_t __p0) {
37564 int64x2_t __ret;
37565 __ret = (int64x2_t)(__p0);
37566 return __ret;
37567 }
37568 #endif
37569
37570 #ifdef __LITTLE_ENDIAN__
37571 __ai int64x2_t vreinterpretq_s64_p16(poly16x8_t __p0) {
37572 int64x2_t __ret;
37573 __ret = (int64x2_t)(__p0);
37574 return __ret;
37575 }
37576 #else
37577 __ai int64x2_t vreinterpretq_s64_p16(poly16x8_t __p0) {
37578 int64x2_t __ret;
37579 __ret = (int64x2_t)(__p0);
37580 return __ret;
37581 }
37582 #endif
37583
37584 #ifdef __LITTLE_ENDIAN__
37585 __ai int64x2_t vreinterpretq_s64_u8(uint8x16_t __p0) {
37586 int64x2_t __ret;
37587 __ret = (int64x2_t)(__p0);
37588 return __ret;
37589 }
37590 #else
37591 __ai int64x2_t vreinterpretq_s64_u8(uint8x16_t __p0) {
37592 int64x2_t __ret;
37593 __ret = (int64x2_t)(__p0);
37594 return __ret;
37595 }
37596 #endif
37597
37598 #ifdef __LITTLE_ENDIAN__
37599 __ai int64x2_t vreinterpretq_s64_u32(uint32x4_t __p0) {
37600 int64x2_t __ret;
37601 __ret = (int64x2_t)(__p0);
37602 return __ret;
37603 }
37604 #else
37605 __ai int64x2_t vreinterpretq_s64_u32(uint32x4_t __p0) {
37606 int64x2_t __ret;
37607 __ret = (int64x2_t)(__p0);
37608 return __ret;
37609 }
37610 #endif
37611
37612 #ifdef __LITTLE_ENDIAN__
37613 __ai int64x2_t vreinterpretq_s64_u64(uint64x2_t __p0) {
37614 int64x2_t __ret;
37615 __ret = (int64x2_t)(__p0);
37616 return __ret;
37617 }
37618 #else
37619 __ai int64x2_t vreinterpretq_s64_u64(uint64x2_t __p0) {
37620 int64x2_t __ret;
37621 __ret = (int64x2_t)(__p0);
37622 return __ret;
37623 }
37624 #endif
37625
37626 #ifdef __LITTLE_ENDIAN__
37627 __ai int64x2_t vreinterpretq_s64_u16(uint16x8_t __p0) {
37628 int64x2_t __ret;
37629 __ret = (int64x2_t)(__p0);
37630 return __ret;
37631 }
37632 #else
37633 __ai int64x2_t vreinterpretq_s64_u16(uint16x8_t __p0) {
37634 int64x2_t __ret;
37635 __ret = (int64x2_t)(__p0);
37636 return __ret;
37637 }
37638 #endif
37639
37640 #ifdef __LITTLE_ENDIAN__
37641 __ai int64x2_t vreinterpretq_s64_s8(int8x16_t __p0) {
37642 int64x2_t __ret;
37643 __ret = (int64x2_t)(__p0);
37644 return __ret;
37645 }
37646 #else
37647 __ai int64x2_t vreinterpretq_s64_s8(int8x16_t __p0) {
37648 int64x2_t __ret;
37649 __ret = (int64x2_t)(__p0);
37650 return __ret;
37651 }
37652 #endif
37653
37654 #ifdef __LITTLE_ENDIAN__
37655 __ai int64x2_t vreinterpretq_s64_f64(float64x2_t __p0) {
37656 int64x2_t __ret;
37657 __ret = (int64x2_t)(__p0);
37658 return __ret;
37659 }
37660 #else
37661 __ai int64x2_t vreinterpretq_s64_f64(float64x2_t __p0) {
37662 int64x2_t __ret;
37663 __ret = (int64x2_t)(__p0);
37664 return __ret;
37665 }
37666 #endif
37667
37668 #ifdef __LITTLE_ENDIAN__
37669 __ai int64x2_t vreinterpretq_s64_f32(float32x4_t __p0) {
37670 int64x2_t __ret;
37671 __ret = (int64x2_t)(__p0);
37672 return __ret;
37673 }
37674 #else
37675 __ai int64x2_t vreinterpretq_s64_f32(float32x4_t __p0) {
37676 int64x2_t __ret;
37677 __ret = (int64x2_t)(__p0);
37678 return __ret;
37679 }
37680 #endif
37681
37682 #ifdef __LITTLE_ENDIAN__
37683 __ai int64x2_t vreinterpretq_s64_f16(float16x8_t __p0) {
37684 int64x2_t __ret;
37685 __ret = (int64x2_t)(__p0);
37686 return __ret;
37687 }
37688 #else
37689 __ai int64x2_t vreinterpretq_s64_f16(float16x8_t __p0) {
37690 int64x2_t __ret;
37691 __ret = (int64x2_t)(__p0);
37692 return __ret;
37693 }
37694 #endif
37695
37696 #ifdef __LITTLE_ENDIAN__
37697 __ai int64x2_t vreinterpretq_s64_s32(int32x4_t __p0) {
37698 int64x2_t __ret;
37699 __ret = (int64x2_t)(__p0);
37700 return __ret;
37701 }
37702 #else
37703 __ai int64x2_t vreinterpretq_s64_s32(int32x4_t __p0) {
37704 int64x2_t __ret;
37705 __ret = (int64x2_t)(__p0);
37706 return __ret;
37707 }
37708 #endif
37709
37710 #ifdef __LITTLE_ENDIAN__
37711 __ai int64x2_t vreinterpretq_s64_s16(int16x8_t __p0) {
37712 int64x2_t __ret;
37713 __ret = (int64x2_t)(__p0);
37714 return __ret;
37715 }
37716 #else
37717 __ai int64x2_t vreinterpretq_s64_s16(int16x8_t __p0) {
37718 int64x2_t __ret;
37719 __ret = (int64x2_t)(__p0);
37720 return __ret;
37721 }
37722 #endif
37723
37724 #ifdef __LITTLE_ENDIAN__
37725 __ai int16x8_t vreinterpretq_s16_p8(poly8x16_t __p0) {
37726 int16x8_t __ret;
37727 __ret = (int16x8_t)(__p0);
37728 return __ret;
37729 }
37730 #else
37731 __ai int16x8_t vreinterpretq_s16_p8(poly8x16_t __p0) {
37732 int16x8_t __ret;
37733 __ret = (int16x8_t)(__p0);
37734 return __ret;
37735 }
37736 #endif
37737
37738 #ifdef __LITTLE_ENDIAN__
37739 __ai int16x8_t vreinterpretq_s16_p128(poly128_t __p0) {
37740 int16x8_t __ret;
37741 __ret = (int16x8_t)(__p0);
37742 return __ret;
37743 }
37744 #else
37745 __ai int16x8_t vreinterpretq_s16_p128(poly128_t __p0) {
37746 int16x8_t __ret;
37747 __ret = (int16x8_t)(__p0);
37748 return __ret;
37749 }
37750 #endif
37751
37752 #ifdef __LITTLE_ENDIAN__
37753 __ai int16x8_t vreinterpretq_s16_p64(poly64x2_t __p0) {
37754 int16x8_t __ret;
37755 __ret = (int16x8_t)(__p0);
37756 return __ret;
37757 }
37758 #else
37759 __ai int16x8_t vreinterpretq_s16_p64(poly64x2_t __p0) {
37760 int16x8_t __ret;
37761 __ret = (int16x8_t)(__p0);
37762 return __ret;
37763 }
37764 #endif
37765
37766 #ifdef __LITTLE_ENDIAN__
37767 __ai int16x8_t vreinterpretq_s16_p16(poly16x8_t __p0) {
37768 int16x8_t __ret;
37769 __ret = (int16x8_t)(__p0);
37770 return __ret;
37771 }
37772 #else
37773 __ai int16x8_t vreinterpretq_s16_p16(poly16x8_t __p0) {
37774 int16x8_t __ret;
37775 __ret = (int16x8_t)(__p0);
37776 return __ret;
37777 }
37778 #endif
37779
37780 #ifdef __LITTLE_ENDIAN__
37781 __ai int16x8_t vreinterpretq_s16_u8(uint8x16_t __p0) {
37782 int16x8_t __ret;
37783 __ret = (int16x8_t)(__p0);
37784 return __ret;
37785 }
37786 #else
37787 __ai int16x8_t vreinterpretq_s16_u8(uint8x16_t __p0) {
37788 int16x8_t __ret;
37789 __ret = (int16x8_t)(__p0);
37790 return __ret;
37791 }
37792 #endif
37793
37794 #ifdef __LITTLE_ENDIAN__
37795 __ai int16x8_t vreinterpretq_s16_u32(uint32x4_t __p0) {
37796 int16x8_t __ret;
37797 __ret = (int16x8_t)(__p0);
37798 return __ret;
37799 }
37800 #else
37801 __ai int16x8_t vreinterpretq_s16_u32(uint32x4_t __p0) {
37802 int16x8_t __ret;
37803 __ret = (int16x8_t)(__p0);
37804 return __ret;
37805 }
37806 #endif
37807
37808 #ifdef __LITTLE_ENDIAN__
37809 __ai int16x8_t vreinterpretq_s16_u64(uint64x2_t __p0) {
37810 int16x8_t __ret;
37811 __ret = (int16x8_t)(__p0);
37812 return __ret;
37813 }
37814 #else
37815 __ai int16x8_t vreinterpretq_s16_u64(uint64x2_t __p0) {
37816 int16x8_t __ret;
37817 __ret = (int16x8_t)(__p0);
37818 return __ret;
37819 }
37820 #endif
37821
37822 #ifdef __LITTLE_ENDIAN__
37823 __ai int16x8_t vreinterpretq_s16_u16(uint16x8_t __p0) {
37824 int16x8_t __ret;
37825 __ret = (int16x8_t)(__p0);
37826 return __ret;
37827 }
37828 #else
37829 __ai int16x8_t vreinterpretq_s16_u16(uint16x8_t __p0) {
37830 int16x8_t __ret;
37831 __ret = (int16x8_t)(__p0);
37832 return __ret;
37833 }
37834 #endif
37835
37836 #ifdef __LITTLE_ENDIAN__
37837 __ai int16x8_t vreinterpretq_s16_s8(int8x16_t __p0) {
37838 int16x8_t __ret;
37839 __ret = (int16x8_t)(__p0);
37840 return __ret;
37841 }
37842 #else
37843 __ai int16x8_t vreinterpretq_s16_s8(int8x16_t __p0) {
37844 int16x8_t __ret;
37845 __ret = (int16x8_t)(__p0);
37846 return __ret;
37847 }
37848 #endif
37849
37850 #ifdef __LITTLE_ENDIAN__
37851 __ai int16x8_t vreinterpretq_s16_f64(float64x2_t __p0) {
37852 int16x8_t __ret;
37853 __ret = (int16x8_t)(__p0);
37854 return __ret;
37855 }
37856 #else
37857 __ai int16x8_t vreinterpretq_s16_f64(float64x2_t __p0) {
37858 int16x8_t __ret;
37859 __ret = (int16x8_t)(__p0);
37860 return __ret;
37861 }
37862 #endif
37863
37864 #ifdef __LITTLE_ENDIAN__
37865 __ai int16x8_t vreinterpretq_s16_f32(float32x4_t __p0) {
37866 int16x8_t __ret;
37867 __ret = (int16x8_t)(__p0);
37868 return __ret;
37869 }
37870 #else
37871 __ai int16x8_t vreinterpretq_s16_f32(float32x4_t __p0) {
37872 int16x8_t __ret;
37873 __ret = (int16x8_t)(__p0);
37874 return __ret;
37875 }
37876 #endif
37877
37878 #ifdef __LITTLE_ENDIAN__
37879 __ai int16x8_t vreinterpretq_s16_f16(float16x8_t __p0) {
37880 int16x8_t __ret;
37881 __ret = (int16x8_t)(__p0);
37882 return __ret;
37883 }
37884 #else
37885 __ai int16x8_t vreinterpretq_s16_f16(float16x8_t __p0) {
37886 int16x8_t __ret;
37887 __ret = (int16x8_t)(__p0);
37888 return __ret;
37889 }
37890 #endif
37891
37892 #ifdef __LITTLE_ENDIAN__
37893 __ai int16x8_t vreinterpretq_s16_s32(int32x4_t __p0) {
37894 int16x8_t __ret;
37895 __ret = (int16x8_t)(__p0);
37896 return __ret;
37897 }
37898 #else
37899 __ai int16x8_t vreinterpretq_s16_s32(int32x4_t __p0) {
37900 int16x8_t __ret;
37901 __ret = (int16x8_t)(__p0);
37902 return __ret;
37903 }
37904 #endif
37905
37906 #ifdef __LITTLE_ENDIAN__
37907 __ai int16x8_t vreinterpretq_s16_s64(int64x2_t __p0) {
37908 int16x8_t __ret;
37909 __ret = (int16x8_t)(__p0);
37910 return __ret;
37911 }
37912 #else
37913 __ai int16x8_t vreinterpretq_s16_s64(int64x2_t __p0) {
37914 int16x8_t __ret;
37915 __ret = (int16x8_t)(__p0);
37916 return __ret;
37917 }
37918 #endif
37919
37920 #ifdef __LITTLE_ENDIAN__
37921 __ai uint8x8_t vreinterpret_u8_p8(poly8x8_t __p0) {
37922 uint8x8_t __ret;
37923 __ret = (uint8x8_t)(__p0);
37924 return __ret;
37925 }
37926 #else
37927 __ai uint8x8_t vreinterpret_u8_p8(poly8x8_t __p0) {
37928 uint8x8_t __ret;
37929 __ret = (uint8x8_t)(__p0);
37930 return __ret;
37931 }
37932 #endif
37933
37934 #ifdef __LITTLE_ENDIAN__
37935 __ai uint8x8_t vreinterpret_u8_p64(poly64x1_t __p0) {
37936 uint8x8_t __ret;
37937 __ret = (uint8x8_t)(__p0);
37938 return __ret;
37939 }
37940 #else
37941 __ai uint8x8_t vreinterpret_u8_p64(poly64x1_t __p0) {
37942 uint8x8_t __ret;
37943 __ret = (uint8x8_t)(__p0);
37944 return __ret;
37945 }
37946 #endif
37947
37948 #ifdef __LITTLE_ENDIAN__
37949 __ai uint8x8_t vreinterpret_u8_p16(poly16x4_t __p0) {
37950 uint8x8_t __ret;
37951 __ret = (uint8x8_t)(__p0);
37952 return __ret;
37953 }
37954 #else
37955 __ai uint8x8_t vreinterpret_u8_p16(poly16x4_t __p0) {
37956 uint8x8_t __ret;
37957 __ret = (uint8x8_t)(__p0);
37958 return __ret;
37959 }
37960 #endif
37961
37962 #ifdef __LITTLE_ENDIAN__
37963 __ai uint8x8_t vreinterpret_u8_u32(uint32x2_t __p0) {
37964 uint8x8_t __ret;
37965 __ret = (uint8x8_t)(__p0);
37966 return __ret;
37967 }
37968 #else
37969 __ai uint8x8_t vreinterpret_u8_u32(uint32x2_t __p0) {
37970 uint8x8_t __ret;
37971 __ret = (uint8x8_t)(__p0);
37972 return __ret;
37973 }
37974 #endif
37975
37976 #ifdef __LITTLE_ENDIAN__
37977 __ai uint8x8_t vreinterpret_u8_u64(uint64x1_t __p0) {
37978 uint8x8_t __ret;
37979 __ret = (uint8x8_t)(__p0);
37980 return __ret;
37981 }
37982 #else
37983 __ai uint8x8_t vreinterpret_u8_u64(uint64x1_t __p0) {
37984 uint8x8_t __ret;
37985 __ret = (uint8x8_t)(__p0);
37986 return __ret;
37987 }
37988 #endif
37989
37990 #ifdef __LITTLE_ENDIAN__
37991 __ai uint8x8_t vreinterpret_u8_u16(uint16x4_t __p0) {
37992 uint8x8_t __ret;
37993 __ret = (uint8x8_t)(__p0);
37994 return __ret;
37995 }
37996 #else
37997 __ai uint8x8_t vreinterpret_u8_u16(uint16x4_t __p0) {
37998 uint8x8_t __ret;
37999 __ret = (uint8x8_t)(__p0);
38000 return __ret;
38001 }
38002 #endif
38003
38004 #ifdef __LITTLE_ENDIAN__
38005 __ai uint8x8_t vreinterpret_u8_s8(int8x8_t __p0) {
38006 uint8x8_t __ret;
38007 __ret = (uint8x8_t)(__p0);
38008 return __ret;
38009 }
38010 #else
38011 __ai uint8x8_t vreinterpret_u8_s8(int8x8_t __p0) {
38012 uint8x8_t __ret;
38013 __ret = (uint8x8_t)(__p0);
38014 return __ret;
38015 }
38016 #endif
38017
38018 #ifdef __LITTLE_ENDIAN__
38019 __ai uint8x8_t vreinterpret_u8_f64(float64x1_t __p0) {
38020 uint8x8_t __ret;
38021 __ret = (uint8x8_t)(__p0);
38022 return __ret;
38023 }
38024 #else
38025 __ai uint8x8_t vreinterpret_u8_f64(float64x1_t __p0) {
38026 uint8x8_t __ret;
38027 __ret = (uint8x8_t)(__p0);
38028 return __ret;
38029 }
38030 #endif
38031
38032 #ifdef __LITTLE_ENDIAN__
38033 __ai uint8x8_t vreinterpret_u8_f32(float32x2_t __p0) {
38034 uint8x8_t __ret;
38035 __ret = (uint8x8_t)(__p0);
38036 return __ret;
38037 }
38038 #else
38039 __ai uint8x8_t vreinterpret_u8_f32(float32x2_t __p0) {
38040 uint8x8_t __ret;
38041 __ret = (uint8x8_t)(__p0);
38042 return __ret;
38043 }
38044 #endif
38045
38046 #ifdef __LITTLE_ENDIAN__
38047 __ai uint8x8_t vreinterpret_u8_f16(float16x4_t __p0) {
38048 uint8x8_t __ret;
38049 __ret = (uint8x8_t)(__p0);
38050 return __ret;
38051 }
38052 #else
38053 __ai uint8x8_t vreinterpret_u8_f16(float16x4_t __p0) {
38054 uint8x8_t __ret;
38055 __ret = (uint8x8_t)(__p0);
38056 return __ret;
38057 }
38058 #endif
38059
38060 #ifdef __LITTLE_ENDIAN__
38061 __ai uint8x8_t vreinterpret_u8_s32(int32x2_t __p0) {
38062 uint8x8_t __ret;
38063 __ret = (uint8x8_t)(__p0);
38064 return __ret;
38065 }
38066 #else
38067 __ai uint8x8_t vreinterpret_u8_s32(int32x2_t __p0) {
38068 uint8x8_t __ret;
38069 __ret = (uint8x8_t)(__p0);
38070 return __ret;
38071 }
38072 #endif
38073
38074 #ifdef __LITTLE_ENDIAN__
38075 __ai uint8x8_t vreinterpret_u8_s64(int64x1_t __p0) {
38076 uint8x8_t __ret;
38077 __ret = (uint8x8_t)(__p0);
38078 return __ret;
38079 }
38080 #else
38081 __ai uint8x8_t vreinterpret_u8_s64(int64x1_t __p0) {
38082 uint8x8_t __ret;
38083 __ret = (uint8x8_t)(__p0);
38084 return __ret;
38085 }
38086 #endif
38087
38088 #ifdef __LITTLE_ENDIAN__
38089 __ai uint8x8_t vreinterpret_u8_s16(int16x4_t __p0) {
38090 uint8x8_t __ret;
38091 __ret = (uint8x8_t)(__p0);
38092 return __ret;
38093 }
38094 #else
38095 __ai uint8x8_t vreinterpret_u8_s16(int16x4_t __p0) {
38096 uint8x8_t __ret;
38097 __ret = (uint8x8_t)(__p0);
38098 return __ret;
38099 }
38100 #endif
38101
38102 #ifdef __LITTLE_ENDIAN__
38103 __ai uint32x2_t vreinterpret_u32_p8(poly8x8_t __p0) {
38104 uint32x2_t __ret;
38105 __ret = (uint32x2_t)(__p0);
38106 return __ret;
38107 }
38108 #else
38109 __ai uint32x2_t vreinterpret_u32_p8(poly8x8_t __p0) {
38110 uint32x2_t __ret;
38111 __ret = (uint32x2_t)(__p0);
38112 return __ret;
38113 }
38114 #endif
38115
38116 #ifdef __LITTLE_ENDIAN__
38117 __ai uint32x2_t vreinterpret_u32_p64(poly64x1_t __p0) {
38118 uint32x2_t __ret;
38119 __ret = (uint32x2_t)(__p0);
38120 return __ret;
38121 }
38122 #else
38123 __ai uint32x2_t vreinterpret_u32_p64(poly64x1_t __p0) {
38124 uint32x2_t __ret;
38125 __ret = (uint32x2_t)(__p0);
38126 return __ret;
38127 }
38128 #endif
38129
38130 #ifdef __LITTLE_ENDIAN__
38131 __ai uint32x2_t vreinterpret_u32_p16(poly16x4_t __p0) {
38132 uint32x2_t __ret;
38133 __ret = (uint32x2_t)(__p0);
38134 return __ret;
38135 }
38136 #else
38137 __ai uint32x2_t vreinterpret_u32_p16(poly16x4_t __p0) {
38138 uint32x2_t __ret;
38139 __ret = (uint32x2_t)(__p0);
38140 return __ret;
38141 }
38142 #endif
38143
38144 #ifdef __LITTLE_ENDIAN__
38145 __ai uint32x2_t vreinterpret_u32_u8(uint8x8_t __p0) {
38146 uint32x2_t __ret;
38147 __ret = (uint32x2_t)(__p0);
38148 return __ret;
38149 }
38150 #else
38151 __ai uint32x2_t vreinterpret_u32_u8(uint8x8_t __p0) {
38152 uint32x2_t __ret;
38153 __ret = (uint32x2_t)(__p0);
38154 return __ret;
38155 }
38156 #endif
38157
38158 #ifdef __LITTLE_ENDIAN__
38159 __ai uint32x2_t vreinterpret_u32_u64(uint64x1_t __p0) {
38160 uint32x2_t __ret;
38161 __ret = (uint32x2_t)(__p0);
38162 return __ret;
38163 }
38164 #else
38165 __ai uint32x2_t vreinterpret_u32_u64(uint64x1_t __p0) {
38166 uint32x2_t __ret;
38167 __ret = (uint32x2_t)(__p0);
38168 return __ret;
38169 }
38170 #endif
38171
38172 #ifdef __LITTLE_ENDIAN__
38173 __ai uint32x2_t vreinterpret_u32_u16(uint16x4_t __p0) {
38174 uint32x2_t __ret;
38175 __ret = (uint32x2_t)(__p0);
38176 return __ret;
38177 }
38178 #else
38179 __ai uint32x2_t vreinterpret_u32_u16(uint16x4_t __p0) {
38180 uint32x2_t __ret;
38181 __ret = (uint32x2_t)(__p0);
38182 return __ret;
38183 }
38184 #endif
38185
38186 #ifdef __LITTLE_ENDIAN__
38187 __ai uint32x2_t vreinterpret_u32_s8(int8x8_t __p0) {
38188 uint32x2_t __ret;
38189 __ret = (uint32x2_t)(__p0);
38190 return __ret;
38191 }
38192 #else
38193 __ai uint32x2_t vreinterpret_u32_s8(int8x8_t __p0) {
38194 uint32x2_t __ret;
38195 __ret = (uint32x2_t)(__p0);
38196 return __ret;
38197 }
38198 #endif
38199
38200 #ifdef __LITTLE_ENDIAN__
38201 __ai uint32x2_t vreinterpret_u32_f64(float64x1_t __p0) {
38202 uint32x2_t __ret;
38203 __ret = (uint32x2_t)(__p0);
38204 return __ret;
38205 }
38206 #else
38207 __ai uint32x2_t vreinterpret_u32_f64(float64x1_t __p0) {
38208 uint32x2_t __ret;
38209 __ret = (uint32x2_t)(__p0);
38210 return __ret;
38211 }
38212 #endif
38213
38214 #ifdef __LITTLE_ENDIAN__
38215 __ai uint32x2_t vreinterpret_u32_f32(float32x2_t __p0) {
38216 uint32x2_t __ret;
38217 __ret = (uint32x2_t)(__p0);
38218 return __ret;
38219 }
38220 #else
38221 __ai uint32x2_t vreinterpret_u32_f32(float32x2_t __p0) {
38222 uint32x2_t __ret;
38223 __ret = (uint32x2_t)(__p0);
38224 return __ret;
38225 }
38226 #endif
38227
38228 #ifdef __LITTLE_ENDIAN__
38229 __ai uint32x2_t vreinterpret_u32_f16(float16x4_t __p0) {
38230 uint32x2_t __ret;
38231 __ret = (uint32x2_t)(__p0);
38232 return __ret;
38233 }
38234 #else
38235 __ai uint32x2_t vreinterpret_u32_f16(float16x4_t __p0) {
38236 uint32x2_t __ret;
38237 __ret = (uint32x2_t)(__p0);
38238 return __ret;
38239 }
38240 #endif
38241
38242 #ifdef __LITTLE_ENDIAN__
38243 __ai uint32x2_t vreinterpret_u32_s32(int32x2_t __p0) {
38244 uint32x2_t __ret;
38245 __ret = (uint32x2_t)(__p0);
38246 return __ret;
38247 }
38248 #else
38249 __ai uint32x2_t vreinterpret_u32_s32(int32x2_t __p0) {
38250 uint32x2_t __ret;
38251 __ret = (uint32x2_t)(__p0);
38252 return __ret;
38253 }
38254 #endif
38255
38256 #ifdef __LITTLE_ENDIAN__
38257 __ai uint32x2_t vreinterpret_u32_s64(int64x1_t __p0) {
38258 uint32x2_t __ret;
38259 __ret = (uint32x2_t)(__p0);
38260 return __ret;
38261 }
38262 #else
38263 __ai uint32x2_t vreinterpret_u32_s64(int64x1_t __p0) {
38264 uint32x2_t __ret;
38265 __ret = (uint32x2_t)(__p0);
38266 return __ret;
38267 }
38268 #endif
38269
38270 #ifdef __LITTLE_ENDIAN__
38271 __ai uint32x2_t vreinterpret_u32_s16(int16x4_t __p0) {
38272 uint32x2_t __ret;
38273 __ret = (uint32x2_t)(__p0);
38274 return __ret;
38275 }
38276 #else
38277 __ai uint32x2_t vreinterpret_u32_s16(int16x4_t __p0) {
38278 uint32x2_t __ret;
38279 __ret = (uint32x2_t)(__p0);
38280 return __ret;
38281 }
38282 #endif
38283
38284 #ifdef __LITTLE_ENDIAN__
38285 __ai uint64x1_t vreinterpret_u64_p8(poly8x8_t __p0) {
38286 uint64x1_t __ret;
38287 __ret = (uint64x1_t)(__p0);
38288 return __ret;
38289 }
38290 #else
38291 __ai uint64x1_t vreinterpret_u64_p8(poly8x8_t __p0) {
38292 uint64x1_t __ret;
38293 __ret = (uint64x1_t)(__p0);
38294 return __ret;
38295 }
38296 #endif
38297
38298 #ifdef __LITTLE_ENDIAN__
38299 __ai uint64x1_t vreinterpret_u64_p64(poly64x1_t __p0) {
38300 uint64x1_t __ret;
38301 __ret = (uint64x1_t)(__p0);
38302 return __ret;
38303 }
38304 #else
38305 __ai uint64x1_t vreinterpret_u64_p64(poly64x1_t __p0) {
38306 uint64x1_t __ret;
38307 __ret = (uint64x1_t)(__p0);
38308 return __ret;
38309 }
38310 #endif
38311
38312 #ifdef __LITTLE_ENDIAN__
38313 __ai uint64x1_t vreinterpret_u64_p16(poly16x4_t __p0) {
38314 uint64x1_t __ret;
38315 __ret = (uint64x1_t)(__p0);
38316 return __ret;
38317 }
38318 #else
38319 __ai uint64x1_t vreinterpret_u64_p16(poly16x4_t __p0) {
38320 uint64x1_t __ret;
38321 __ret = (uint64x1_t)(__p0);
38322 return __ret;
38323 }
38324 #endif
38325
38326 #ifdef __LITTLE_ENDIAN__
38327 __ai uint64x1_t vreinterpret_u64_u8(uint8x8_t __p0) {
38328 uint64x1_t __ret;
38329 __ret = (uint64x1_t)(__p0);
38330 return __ret;
38331 }
38332 #else
38333 __ai uint64x1_t vreinterpret_u64_u8(uint8x8_t __p0) {
38334 uint64x1_t __ret;
38335 __ret = (uint64x1_t)(__p0);
38336 return __ret;
38337 }
38338 #endif
38339
38340 #ifdef __LITTLE_ENDIAN__
38341 __ai uint64x1_t vreinterpret_u64_u32(uint32x2_t __p0) {
38342 uint64x1_t __ret;
38343 __ret = (uint64x1_t)(__p0);
38344 return __ret;
38345 }
38346 #else
38347 __ai uint64x1_t vreinterpret_u64_u32(uint32x2_t __p0) {
38348 uint64x1_t __ret;
38349 __ret = (uint64x1_t)(__p0);
38350 return __ret;
38351 }
38352 #endif
38353
38354 #ifdef __LITTLE_ENDIAN__
38355 __ai uint64x1_t vreinterpret_u64_u16(uint16x4_t __p0) {
38356 uint64x1_t __ret;
38357 __ret = (uint64x1_t)(__p0);
38358 return __ret;
38359 }
38360 #else
38361 __ai uint64x1_t vreinterpret_u64_u16(uint16x4_t __p0) {
38362 uint64x1_t __ret;
38363 __ret = (uint64x1_t)(__p0);
38364 return __ret;
38365 }
38366 #endif
38367
38368 #ifdef __LITTLE_ENDIAN__
38369 __ai uint64x1_t vreinterpret_u64_s8(int8x8_t __p0) {
38370 uint64x1_t __ret;
38371 __ret = (uint64x1_t)(__p0);
38372 return __ret;
38373 }
38374 #else
38375 __ai uint64x1_t vreinterpret_u64_s8(int8x8_t __p0) {
38376 uint64x1_t __ret;
38377 __ret = (uint64x1_t)(__p0);
38378 return __ret;
38379 }
38380 #endif
38381
38382 #ifdef __LITTLE_ENDIAN__
38383 __ai uint64x1_t vreinterpret_u64_f64(float64x1_t __p0) {
38384 uint64x1_t __ret;
38385 __ret = (uint64x1_t)(__p0);
38386 return __ret;
38387 }
38388 #else
38389 __ai uint64x1_t vreinterpret_u64_f64(float64x1_t __p0) {
38390 uint64x1_t __ret;
38391 __ret = (uint64x1_t)(__p0);
38392 return __ret;
38393 }
38394 #endif
38395
38396 #ifdef __LITTLE_ENDIAN__
38397 __ai uint64x1_t vreinterpret_u64_f32(float32x2_t __p0) {
38398 uint64x1_t __ret;
38399 __ret = (uint64x1_t)(__p0);
38400 return __ret;
38401 }
38402 #else
38403 __ai uint64x1_t vreinterpret_u64_f32(float32x2_t __p0) {
38404 uint64x1_t __ret;
38405 __ret = (uint64x1_t)(__p0);
38406 return __ret;
38407 }
38408 #endif
38409
38410 #ifdef __LITTLE_ENDIAN__
38411 __ai uint64x1_t vreinterpret_u64_f16(float16x4_t __p0) {
38412 uint64x1_t __ret;
38413 __ret = (uint64x1_t)(__p0);
38414 return __ret;
38415 }
38416 #else
38417 __ai uint64x1_t vreinterpret_u64_f16(float16x4_t __p0) {
38418 uint64x1_t __ret;
38419 __ret = (uint64x1_t)(__p0);
38420 return __ret;
38421 }
38422 #endif
38423
38424 #ifdef __LITTLE_ENDIAN__
38425 __ai uint64x1_t vreinterpret_u64_s32(int32x2_t __p0) {
38426 uint64x1_t __ret;
38427 __ret = (uint64x1_t)(__p0);
38428 return __ret;
38429 }
38430 #else
38431 __ai uint64x1_t vreinterpret_u64_s32(int32x2_t __p0) {
38432 uint64x1_t __ret;
38433 __ret = (uint64x1_t)(__p0);
38434 return __ret;
38435 }
38436 #endif
38437
38438 #ifdef __LITTLE_ENDIAN__
38439 __ai uint64x1_t vreinterpret_u64_s64(int64x1_t __p0) {
38440 uint64x1_t __ret;
38441 __ret = (uint64x1_t)(__p0);
38442 return __ret;
38443 }
38444 #else
38445 __ai uint64x1_t vreinterpret_u64_s64(int64x1_t __p0) {
38446 uint64x1_t __ret;
38447 __ret = (uint64x1_t)(__p0);
38448 return __ret;
38449 }
38450 #endif
38451
38452 #ifdef __LITTLE_ENDIAN__
38453 __ai uint64x1_t vreinterpret_u64_s16(int16x4_t __p0) {
38454 uint64x1_t __ret;
38455 __ret = (uint64x1_t)(__p0);
38456 return __ret;
38457 }
38458 #else
38459 __ai uint64x1_t vreinterpret_u64_s16(int16x4_t __p0) {
38460 uint64x1_t __ret;
38461 __ret = (uint64x1_t)(__p0);
38462 return __ret;
38463 }
38464 #endif
38465
38466 #ifdef __LITTLE_ENDIAN__
38467 __ai uint16x4_t vreinterpret_u16_p8(poly8x8_t __p0) {
38468 uint16x4_t __ret;
38469 __ret = (uint16x4_t)(__p0);
38470 return __ret;
38471 }
38472 #else
38473 __ai uint16x4_t vreinterpret_u16_p8(poly8x8_t __p0) {
38474 uint16x4_t __ret;
38475 __ret = (uint16x4_t)(__p0);
38476 return __ret;
38477 }
38478 #endif
38479
38480 #ifdef __LITTLE_ENDIAN__
38481 __ai uint16x4_t vreinterpret_u16_p64(poly64x1_t __p0) {
38482 uint16x4_t __ret;
38483 __ret = (uint16x4_t)(__p0);
38484 return __ret;
38485 }
38486 #else
38487 __ai uint16x4_t vreinterpret_u16_p64(poly64x1_t __p0) {
38488 uint16x4_t __ret;
38489 __ret = (uint16x4_t)(__p0);
38490 return __ret;
38491 }
38492 #endif
38493
38494 #ifdef __LITTLE_ENDIAN__
38495 __ai uint16x4_t vreinterpret_u16_p16(poly16x4_t __p0) {
38496 uint16x4_t __ret;
38497 __ret = (uint16x4_t)(__p0);
38498 return __ret;
38499 }
38500 #else
38501 __ai uint16x4_t vreinterpret_u16_p16(poly16x4_t __p0) {
38502 uint16x4_t __ret;
38503 __ret = (uint16x4_t)(__p0);
38504 return __ret;
38505 }
38506 #endif
38507
38508 #ifdef __LITTLE_ENDIAN__
38509 __ai uint16x4_t vreinterpret_u16_u8(uint8x8_t __p0) {
38510 uint16x4_t __ret;
38511 __ret = (uint16x4_t)(__p0);
38512 return __ret;
38513 }
38514 #else
38515 __ai uint16x4_t vreinterpret_u16_u8(uint8x8_t __p0) {
38516 uint16x4_t __ret;
38517 __ret = (uint16x4_t)(__p0);
38518 return __ret;
38519 }
38520 #endif
38521
38522 #ifdef __LITTLE_ENDIAN__
38523 __ai uint16x4_t vreinterpret_u16_u32(uint32x2_t __p0) {
38524 uint16x4_t __ret;
38525 __ret = (uint16x4_t)(__p0);
38526 return __ret;
38527 }
38528 #else
38529 __ai uint16x4_t vreinterpret_u16_u32(uint32x2_t __p0) {
38530 uint16x4_t __ret;
38531 __ret = (uint16x4_t)(__p0);
38532 return __ret;
38533 }
38534 #endif
38535
38536 #ifdef __LITTLE_ENDIAN__
38537 __ai uint16x4_t vreinterpret_u16_u64(uint64x1_t __p0) {
38538 uint16x4_t __ret;
38539 __ret = (uint16x4_t)(__p0);
38540 return __ret;
38541 }
38542 #else
38543 __ai uint16x4_t vreinterpret_u16_u64(uint64x1_t __p0) {
38544 uint16x4_t __ret;
38545 __ret = (uint16x4_t)(__p0);
38546 return __ret;
38547 }
38548 #endif
38549
38550 #ifdef __LITTLE_ENDIAN__
38551 __ai uint16x4_t vreinterpret_u16_s8(int8x8_t __p0) {
38552 uint16x4_t __ret;
38553 __ret = (uint16x4_t)(__p0);
38554 return __ret;
38555 }
38556 #else
38557 __ai uint16x4_t vreinterpret_u16_s8(int8x8_t __p0) {
38558 uint16x4_t __ret;
38559 __ret = (uint16x4_t)(__p0);
38560 return __ret;
38561 }
38562 #endif
38563
38564 #ifdef __LITTLE_ENDIAN__
38565 __ai uint16x4_t vreinterpret_u16_f64(float64x1_t __p0) {
38566 uint16x4_t __ret;
38567 __ret = (uint16x4_t)(__p0);
38568 return __ret;
38569 }
38570 #else
38571 __ai uint16x4_t vreinterpret_u16_f64(float64x1_t __p0) {
38572 uint16x4_t __ret;
38573 __ret = (uint16x4_t)(__p0);
38574 return __ret;
38575 }
38576 #endif
38577
38578 #ifdef __LITTLE_ENDIAN__
38579 __ai uint16x4_t vreinterpret_u16_f32(float32x2_t __p0) {
38580 uint16x4_t __ret;
38581 __ret = (uint16x4_t)(__p0);
38582 return __ret;
38583 }
38584 #else
38585 __ai uint16x4_t vreinterpret_u16_f32(float32x2_t __p0) {
38586 uint16x4_t __ret;
38587 __ret = (uint16x4_t)(__p0);
38588 return __ret;
38589 }
38590 #endif
38591
38592 #ifdef __LITTLE_ENDIAN__
38593 __ai uint16x4_t vreinterpret_u16_f16(float16x4_t __p0) {
38594 uint16x4_t __ret;
38595 __ret = (uint16x4_t)(__p0);
38596 return __ret;
38597 }
38598 #else
38599 __ai uint16x4_t vreinterpret_u16_f16(float16x4_t __p0) {
38600 uint16x4_t __ret;
38601 __ret = (uint16x4_t)(__p0);
38602 return __ret;
38603 }
38604 #endif
38605
38606 #ifdef __LITTLE_ENDIAN__
38607 __ai uint16x4_t vreinterpret_u16_s32(int32x2_t __p0) {
38608 uint16x4_t __ret;
38609 __ret = (uint16x4_t)(__p0);
38610 return __ret;
38611 }
38612 #else
38613 __ai uint16x4_t vreinterpret_u16_s32(int32x2_t __p0) {
38614 uint16x4_t __ret;
38615 __ret = (uint16x4_t)(__p0);
38616 return __ret;
38617 }
38618 #endif
38619
38620 #ifdef __LITTLE_ENDIAN__
38621 __ai uint16x4_t vreinterpret_u16_s64(int64x1_t __p0) {
38622 uint16x4_t __ret;
38623 __ret = (uint16x4_t)(__p0);
38624 return __ret;
38625 }
38626 #else
38627 __ai uint16x4_t vreinterpret_u16_s64(int64x1_t __p0) {
38628 uint16x4_t __ret;
38629 __ret = (uint16x4_t)(__p0);
38630 return __ret;
38631 }
38632 #endif
38633
38634 #ifdef __LITTLE_ENDIAN__
38635 __ai uint16x4_t vreinterpret_u16_s16(int16x4_t __p0) {
38636 uint16x4_t __ret;
38637 __ret = (uint16x4_t)(__p0);
38638 return __ret;
38639 }
38640 #else
38641 __ai uint16x4_t vreinterpret_u16_s16(int16x4_t __p0) {
38642 uint16x4_t __ret;
38643 __ret = (uint16x4_t)(__p0);
38644 return __ret;
38645 }
38646 #endif
38647
38648 #ifdef __LITTLE_ENDIAN__
38649 __ai int8x8_t vreinterpret_s8_p8(poly8x8_t __p0) {
38650 int8x8_t __ret;
38651 __ret = (int8x8_t)(__p0);
38652 return __ret;
38653 }
38654 #else
38655 __ai int8x8_t vreinterpret_s8_p8(poly8x8_t __p0) {
38656 int8x8_t __ret;
38657 __ret = (int8x8_t)(__p0);
38658 return __ret;
38659 }
38660 #endif
38661
38662 #ifdef __LITTLE_ENDIAN__
38663 __ai int8x8_t vreinterpret_s8_p64(poly64x1_t __p0) {
38664 int8x8_t __ret;
38665 __ret = (int8x8_t)(__p0);
38666 return __ret;
38667 }
38668 #else
38669 __ai int8x8_t vreinterpret_s8_p64(poly64x1_t __p0) {
38670 int8x8_t __ret;
38671 __ret = (int8x8_t)(__p0);
38672 return __ret;
38673 }
38674 #endif
38675
38676 #ifdef __LITTLE_ENDIAN__
38677 __ai int8x8_t vreinterpret_s8_p16(poly16x4_t __p0) {
38678 int8x8_t __ret;
38679 __ret = (int8x8_t)(__p0);
38680 return __ret;
38681 }
38682 #else
38683 __ai int8x8_t vreinterpret_s8_p16(poly16x4_t __p0) {
38684 int8x8_t __ret;
38685 __ret = (int8x8_t)(__p0);
38686 return __ret;
38687 }
38688 #endif
38689
38690 #ifdef __LITTLE_ENDIAN__
38691 __ai int8x8_t vreinterpret_s8_u8(uint8x8_t __p0) {
38692 int8x8_t __ret;
38693 __ret = (int8x8_t)(__p0);
38694 return __ret;
38695 }
38696 #else
38697 __ai int8x8_t vreinterpret_s8_u8(uint8x8_t __p0) {
38698 int8x8_t __ret;
38699 __ret = (int8x8_t)(__p0);
38700 return __ret;
38701 }
38702 #endif
38703
38704 #ifdef __LITTLE_ENDIAN__
38705 __ai int8x8_t vreinterpret_s8_u32(uint32x2_t __p0) {
38706 int8x8_t __ret;
38707 __ret = (int8x8_t)(__p0);
38708 return __ret;
38709 }
38710 #else
38711 __ai int8x8_t vreinterpret_s8_u32(uint32x2_t __p0) {
38712 int8x8_t __ret;
38713 __ret = (int8x8_t)(__p0);
38714 return __ret;
38715 }
38716 #endif
38717
38718 #ifdef __LITTLE_ENDIAN__
38719 __ai int8x8_t vreinterpret_s8_u64(uint64x1_t __p0) {
38720 int8x8_t __ret;
38721 __ret = (int8x8_t)(__p0);
38722 return __ret;
38723 }
38724 #else
38725 __ai int8x8_t vreinterpret_s8_u64(uint64x1_t __p0) {
38726 int8x8_t __ret;
38727 __ret = (int8x8_t)(__p0);
38728 return __ret;
38729 }
38730 #endif
38731
38732 #ifdef __LITTLE_ENDIAN__
38733 __ai int8x8_t vreinterpret_s8_u16(uint16x4_t __p0) {
38734 int8x8_t __ret;
38735 __ret = (int8x8_t)(__p0);
38736 return __ret;
38737 }
38738 #else
38739 __ai int8x8_t vreinterpret_s8_u16(uint16x4_t __p0) {
38740 int8x8_t __ret;
38741 __ret = (int8x8_t)(__p0);
38742 return __ret;
38743 }
38744 #endif
38745
38746 #ifdef __LITTLE_ENDIAN__
38747 __ai int8x8_t vreinterpret_s8_f64(float64x1_t __p0) {
38748 int8x8_t __ret;
38749 __ret = (int8x8_t)(__p0);
38750 return __ret;
38751 }
38752 #else
38753 __ai int8x8_t vreinterpret_s8_f64(float64x1_t __p0) {
38754 int8x8_t __ret;
38755 __ret = (int8x8_t)(__p0);
38756 return __ret;
38757 }
38758 #endif
38759
38760 #ifdef __LITTLE_ENDIAN__
38761 __ai int8x8_t vreinterpret_s8_f32(float32x2_t __p0) {
38762 int8x8_t __ret;
38763 __ret = (int8x8_t)(__p0);
38764 return __ret;
38765 }
38766 #else
38767 __ai int8x8_t vreinterpret_s8_f32(float32x2_t __p0) {
38768 int8x8_t __ret;
38769 __ret = (int8x8_t)(__p0);
38770 return __ret;
38771 }
38772 #endif
38773
38774 #ifdef __LITTLE_ENDIAN__
38775 __ai int8x8_t vreinterpret_s8_f16(float16x4_t __p0) {
38776 int8x8_t __ret;
38777 __ret = (int8x8_t)(__p0);
38778 return __ret;
38779 }
38780 #else
38781 __ai int8x8_t vreinterpret_s8_f16(float16x4_t __p0) {
38782 int8x8_t __ret;
38783 __ret = (int8x8_t)(__p0);
38784 return __ret;
38785 }
38786 #endif
38787
38788 #ifdef __LITTLE_ENDIAN__
38789 __ai int8x8_t vreinterpret_s8_s32(int32x2_t __p0) {
38790 int8x8_t __ret;
38791 __ret = (int8x8_t)(__p0);
38792 return __ret;
38793 }
38794 #else
38795 __ai int8x8_t vreinterpret_s8_s32(int32x2_t __p0) {
38796 int8x8_t __ret;
38797 __ret = (int8x8_t)(__p0);
38798 return __ret;
38799 }
38800 #endif
38801
38802 #ifdef __LITTLE_ENDIAN__
38803 __ai int8x8_t vreinterpret_s8_s64(int64x1_t __p0) {
38804 int8x8_t __ret;
38805 __ret = (int8x8_t)(__p0);
38806 return __ret;
38807 }
38808 #else
38809 __ai int8x8_t vreinterpret_s8_s64(int64x1_t __p0) {
38810 int8x8_t __ret;
38811 __ret = (int8x8_t)(__p0);
38812 return __ret;
38813 }
38814 #endif
38815
38816 #ifdef __LITTLE_ENDIAN__
38817 __ai int8x8_t vreinterpret_s8_s16(int16x4_t __p0) {
38818 int8x8_t __ret;
38819 __ret = (int8x8_t)(__p0);
38820 return __ret;
38821 }
38822 #else
38823 __ai int8x8_t vreinterpret_s8_s16(int16x4_t __p0) {
38824 int8x8_t __ret;
38825 __ret = (int8x8_t)(__p0);
38826 return __ret;
38827 }
38828 #endif
38829
38830 #ifdef __LITTLE_ENDIAN__
38831 __ai float64x1_t vreinterpret_f64_p8(poly8x8_t __p0) {
38832 float64x1_t __ret;
38833 __ret = (float64x1_t)(__p0);
38834 return __ret;
38835 }
38836 #else
38837 __ai float64x1_t vreinterpret_f64_p8(poly8x8_t __p0) {
38838 float64x1_t __ret;
38839 __ret = (float64x1_t)(__p0);
38840 return __ret;
38841 }
38842 #endif
38843
38844 #ifdef __LITTLE_ENDIAN__
38845 __ai float64x1_t vreinterpret_f64_p64(poly64x1_t __p0) {
38846 float64x1_t __ret;
38847 __ret = (float64x1_t)(__p0);
38848 return __ret;
38849 }
38850 #else
38851 __ai float64x1_t vreinterpret_f64_p64(poly64x1_t __p0) {
38852 float64x1_t __ret;
38853 __ret = (float64x1_t)(__p0);
38854 return __ret;
38855 }
38856 #endif
38857
38858 #ifdef __LITTLE_ENDIAN__
38859 __ai float64x1_t vreinterpret_f64_p16(poly16x4_t __p0) {
38860 float64x1_t __ret;
38861 __ret = (float64x1_t)(__p0);
38862 return __ret;
38863 }
38864 #else
38865 __ai float64x1_t vreinterpret_f64_p16(poly16x4_t __p0) {
38866 float64x1_t __ret;
38867 __ret = (float64x1_t)(__p0);
38868 return __ret;
38869 }
38870 #endif
38871
38872 #ifdef __LITTLE_ENDIAN__
38873 __ai float64x1_t vreinterpret_f64_u8(uint8x8_t __p0) {
38874 float64x1_t __ret;
38875 __ret = (float64x1_t)(__p0);
38876 return __ret;
38877 }
38878 #else
38879 __ai float64x1_t vreinterpret_f64_u8(uint8x8_t __p0) {
38880 float64x1_t __ret;
38881 __ret = (float64x1_t)(__p0);
38882 return __ret;
38883 }
38884 #endif
38885
38886 #ifdef __LITTLE_ENDIAN__
38887 __ai float64x1_t vreinterpret_f64_u32(uint32x2_t __p0) {
38888 float64x1_t __ret;
38889 __ret = (float64x1_t)(__p0);
38890 return __ret;
38891 }
38892 #else
38893 __ai float64x1_t vreinterpret_f64_u32(uint32x2_t __p0) {
38894 float64x1_t __ret;
38895 __ret = (float64x1_t)(__p0);
38896 return __ret;
38897 }
38898 #endif
38899
38900 #ifdef __LITTLE_ENDIAN__
38901 __ai float64x1_t vreinterpret_f64_u64(uint64x1_t __p0) {
38902 float64x1_t __ret;
38903 __ret = (float64x1_t)(__p0);
38904 return __ret;
38905 }
38906 #else
38907 __ai float64x1_t vreinterpret_f64_u64(uint64x1_t __p0) {
38908 float64x1_t __ret;
38909 __ret = (float64x1_t)(__p0);
38910 return __ret;
38911 }
38912 #endif
38913
38914 #ifdef __LITTLE_ENDIAN__
38915 __ai float64x1_t vreinterpret_f64_u16(uint16x4_t __p0) {
38916 float64x1_t __ret;
38917 __ret = (float64x1_t)(__p0);
38918 return __ret;
38919 }
38920 #else
38921 __ai float64x1_t vreinterpret_f64_u16(uint16x4_t __p0) {
38922 float64x1_t __ret;
38923 __ret = (float64x1_t)(__p0);
38924 return __ret;
38925 }
38926 #endif
38927
38928 #ifdef __LITTLE_ENDIAN__
38929 __ai float64x1_t vreinterpret_f64_s8(int8x8_t __p0) {
38930 float64x1_t __ret;
38931 __ret = (float64x1_t)(__p0);
38932 return __ret;
38933 }
38934 #else
38935 __ai float64x1_t vreinterpret_f64_s8(int8x8_t __p0) {
38936 float64x1_t __ret;
38937 __ret = (float64x1_t)(__p0);
38938 return __ret;
38939 }
38940 #endif
38941
38942 #ifdef __LITTLE_ENDIAN__
38943 __ai float64x1_t vreinterpret_f64_f32(float32x2_t __p0) {
38944 float64x1_t __ret;
38945 __ret = (float64x1_t)(__p0);
38946 return __ret;
38947 }
38948 #else
38949 __ai float64x1_t vreinterpret_f64_f32(float32x2_t __p0) {
38950 float64x1_t __ret;
38951 __ret = (float64x1_t)(__p0);
38952 return __ret;
38953 }
38954 #endif
38955
38956 #ifdef __LITTLE_ENDIAN__
38957 __ai float64x1_t vreinterpret_f64_f16(float16x4_t __p0) {
38958 float64x1_t __ret;
38959 __ret = (float64x1_t)(__p0);
38960 return __ret;
38961 }
38962 #else
38963 __ai float64x1_t vreinterpret_f64_f16(float16x4_t __p0) {
38964 float64x1_t __ret;
38965 __ret = (float64x1_t)(__p0);
38966 return __ret;
38967 }
38968 #endif
38969
38970 #ifdef __LITTLE_ENDIAN__
38971 __ai float64x1_t vreinterpret_f64_s32(int32x2_t __p0) {
38972 float64x1_t __ret;
38973 __ret = (float64x1_t)(__p0);
38974 return __ret;
38975 }
38976 #else
38977 __ai float64x1_t vreinterpret_f64_s32(int32x2_t __p0) {
38978 float64x1_t __ret;
38979 __ret = (float64x1_t)(__p0);
38980 return __ret;
38981 }
38982 #endif
38983
38984 #ifdef __LITTLE_ENDIAN__
38985 __ai float64x1_t vreinterpret_f64_s64(int64x1_t __p0) {
38986 float64x1_t __ret;
38987 __ret = (float64x1_t)(__p0);
38988 return __ret;
38989 }
38990 #else
38991 __ai float64x1_t vreinterpret_f64_s64(int64x1_t __p0) {
38992 float64x1_t __ret;
38993 __ret = (float64x1_t)(__p0);
38994 return __ret;
38995 }
38996 #endif
38997
38998 #ifdef __LITTLE_ENDIAN__
38999 __ai float64x1_t vreinterpret_f64_s16(int16x4_t __p0) {
39000 float64x1_t __ret;
39001 __ret = (float64x1_t)(__p0);
39002 return __ret;
39003 }
39004 #else
39005 __ai float64x1_t vreinterpret_f64_s16(int16x4_t __p0) {
39006 float64x1_t __ret;
39007 __ret = (float64x1_t)(__p0);
39008 return __ret;
39009 }
39010 #endif
39011
39012 #ifdef __LITTLE_ENDIAN__
39013 __ai float32x2_t vreinterpret_f32_p8(poly8x8_t __p0) {
39014 float32x2_t __ret;
39015 __ret = (float32x2_t)(__p0);
39016 return __ret;
39017 }
39018 #else
39019 __ai float32x2_t vreinterpret_f32_p8(poly8x8_t __p0) {
39020 float32x2_t __ret;
39021 __ret = (float32x2_t)(__p0);
39022 return __ret;
39023 }
39024 #endif
39025
39026 #ifdef __LITTLE_ENDIAN__
39027 __ai float32x2_t vreinterpret_f32_p64(poly64x1_t __p0) {
39028 float32x2_t __ret;
39029 __ret = (float32x2_t)(__p0);
39030 return __ret;
39031 }
39032 #else
39033 __ai float32x2_t vreinterpret_f32_p64(poly64x1_t __p0) {
39034 float32x2_t __ret;
39035 __ret = (float32x2_t)(__p0);
39036 return __ret;
39037 }
39038 #endif
39039
39040 #ifdef __LITTLE_ENDIAN__
39041 __ai float32x2_t vreinterpret_f32_p16(poly16x4_t __p0) {
39042 float32x2_t __ret;
39043 __ret = (float32x2_t)(__p0);
39044 return __ret;
39045 }
39046 #else
39047 __ai float32x2_t vreinterpret_f32_p16(poly16x4_t __p0) {
39048 float32x2_t __ret;
39049 __ret = (float32x2_t)(__p0);
39050 return __ret;
39051 }
39052 #endif
39053
39054 #ifdef __LITTLE_ENDIAN__
39055 __ai float32x2_t vreinterpret_f32_u8(uint8x8_t __p0) {
39056 float32x2_t __ret;
39057 __ret = (float32x2_t)(__p0);
39058 return __ret;
39059 }
39060 #else
39061 __ai float32x2_t vreinterpret_f32_u8(uint8x8_t __p0) {
39062 float32x2_t __ret;
39063 __ret = (float32x2_t)(__p0);
39064 return __ret;
39065 }
39066 #endif
39067
39068 #ifdef __LITTLE_ENDIAN__
39069 __ai float32x2_t vreinterpret_f32_u32(uint32x2_t __p0) {
39070 float32x2_t __ret;
39071 __ret = (float32x2_t)(__p0);
39072 return __ret;
39073 }
39074 #else
39075 __ai float32x2_t vreinterpret_f32_u32(uint32x2_t __p0) {
39076 float32x2_t __ret;
39077 __ret = (float32x2_t)(__p0);
39078 return __ret;
39079 }
39080 #endif
39081
39082 #ifdef __LITTLE_ENDIAN__
39083 __ai float32x2_t vreinterpret_f32_u64(uint64x1_t __p0) {
39084 float32x2_t __ret;
39085 __ret = (float32x2_t)(__p0);
39086 return __ret;
39087 }
39088 #else
39089 __ai float32x2_t vreinterpret_f32_u64(uint64x1_t __p0) {
39090 float32x2_t __ret;
39091 __ret = (float32x2_t)(__p0);
39092 return __ret;
39093 }
39094 #endif
39095
39096 #ifdef __LITTLE_ENDIAN__
39097 __ai float32x2_t vreinterpret_f32_u16(uint16x4_t __p0) {
39098 float32x2_t __ret;
39099 __ret = (float32x2_t)(__p0);
39100 return __ret;
39101 }
39102 #else
39103 __ai float32x2_t vreinterpret_f32_u16(uint16x4_t __p0) {
39104 float32x2_t __ret;
39105 __ret = (float32x2_t)(__p0);
39106 return __ret;
39107 }
39108 #endif
39109
39110 #ifdef __LITTLE_ENDIAN__
39111 __ai float32x2_t vreinterpret_f32_s8(int8x8_t __p0) {
39112 float32x2_t __ret;
39113 __ret = (float32x2_t)(__p0);
39114 return __ret;
39115 }
39116 #else
39117 __ai float32x2_t vreinterpret_f32_s8(int8x8_t __p0) {
39118 float32x2_t __ret;
39119 __ret = (float32x2_t)(__p0);
39120 return __ret;
39121 }
39122 #endif
39123
39124 #ifdef __LITTLE_ENDIAN__
39125 __ai float32x2_t vreinterpret_f32_f64(float64x1_t __p0) {
39126 float32x2_t __ret;
39127 __ret = (float32x2_t)(__p0);
39128 return __ret;
39129 }
39130 #else
39131 __ai float32x2_t vreinterpret_f32_f64(float64x1_t __p0) {
39132 float32x2_t __ret;
39133 __ret = (float32x2_t)(__p0);
39134 return __ret;
39135 }
39136 #endif
39137
39138 #ifdef __LITTLE_ENDIAN__
39139 __ai float32x2_t vreinterpret_f32_f16(float16x4_t __p0) {
39140 float32x2_t __ret;
39141 __ret = (float32x2_t)(__p0);
39142 return __ret;
39143 }
39144 #else
39145 __ai float32x2_t vreinterpret_f32_f16(float16x4_t __p0) {
39146 float32x2_t __ret;
39147 __ret = (float32x2_t)(__p0);
39148 return __ret;
39149 }
39150 #endif
39151
39152 #ifdef __LITTLE_ENDIAN__
39153 __ai float32x2_t vreinterpret_f32_s32(int32x2_t __p0) {
39154 float32x2_t __ret;
39155 __ret = (float32x2_t)(__p0);
39156 return __ret;
39157 }
39158 #else
39159 __ai float32x2_t vreinterpret_f32_s32(int32x2_t __p0) {
39160 float32x2_t __ret;
39161 __ret = (float32x2_t)(__p0);
39162 return __ret;
39163 }
39164 #endif
39165
39166 #ifdef __LITTLE_ENDIAN__
39167 __ai float32x2_t vreinterpret_f32_s64(int64x1_t __p0) {
39168 float32x2_t __ret;
39169 __ret = (float32x2_t)(__p0);
39170 return __ret;
39171 }
39172 #else
39173 __ai float32x2_t vreinterpret_f32_s64(int64x1_t __p0) {
39174 float32x2_t __ret;
39175 __ret = (float32x2_t)(__p0);
39176 return __ret;
39177 }
39178 #endif
39179
39180 #ifdef __LITTLE_ENDIAN__
39181 __ai float32x2_t vreinterpret_f32_s16(int16x4_t __p0) {
39182 float32x2_t __ret;
39183 __ret = (float32x2_t)(__p0);
39184 return __ret;
39185 }
39186 #else
39187 __ai float32x2_t vreinterpret_f32_s16(int16x4_t __p0) {
39188 float32x2_t __ret;
39189 __ret = (float32x2_t)(__p0);
39190 return __ret;
39191 }
39192 #endif
39193
39194 #ifdef __LITTLE_ENDIAN__
39195 __ai float16x4_t vreinterpret_f16_p8(poly8x8_t __p0) {
39196 float16x4_t __ret;
39197 __ret = (float16x4_t)(__p0);
39198 return __ret;
39199 }
39200 #else
39201 __ai float16x4_t vreinterpret_f16_p8(poly8x8_t __p0) {
39202 float16x4_t __ret;
39203 __ret = (float16x4_t)(__p0);
39204 return __ret;
39205 }
39206 #endif
39207
39208 #ifdef __LITTLE_ENDIAN__
39209 __ai float16x4_t vreinterpret_f16_p64(poly64x1_t __p0) {
39210 float16x4_t __ret;
39211 __ret = (float16x4_t)(__p0);
39212 return __ret;
39213 }
39214 #else
39215 __ai float16x4_t vreinterpret_f16_p64(poly64x1_t __p0) {
39216 float16x4_t __ret;
39217 __ret = (float16x4_t)(__p0);
39218 return __ret;
39219 }
39220 #endif
39221
39222 #ifdef __LITTLE_ENDIAN__
39223 __ai float16x4_t vreinterpret_f16_p16(poly16x4_t __p0) {
39224 float16x4_t __ret;
39225 __ret = (float16x4_t)(__p0);
39226 return __ret;
39227 }
39228 #else
39229 __ai float16x4_t vreinterpret_f16_p16(poly16x4_t __p0) {
39230 float16x4_t __ret;
39231 __ret = (float16x4_t)(__p0);
39232 return __ret;
39233 }
39234 #endif
39235
39236 #ifdef __LITTLE_ENDIAN__
39237 __ai float16x4_t vreinterpret_f16_u8(uint8x8_t __p0) {
39238 float16x4_t __ret;
39239 __ret = (float16x4_t)(__p0);
39240 return __ret;
39241 }
39242 #else
39243 __ai float16x4_t vreinterpret_f16_u8(uint8x8_t __p0) {
39244 float16x4_t __ret;
39245 __ret = (float16x4_t)(__p0);
39246 return __ret;
39247 }
39248 #endif
39249
39250 #ifdef __LITTLE_ENDIAN__
39251 __ai float16x4_t vreinterpret_f16_u32(uint32x2_t __p0) {
39252 float16x4_t __ret;
39253 __ret = (float16x4_t)(__p0);
39254 return __ret;
39255 }
39256 #else
39257 __ai float16x4_t vreinterpret_f16_u32(uint32x2_t __p0) {
39258 float16x4_t __ret;
39259 __ret = (float16x4_t)(__p0);
39260 return __ret;
39261 }
39262 #endif
39263
39264 #ifdef __LITTLE_ENDIAN__
39265 __ai float16x4_t vreinterpret_f16_u64(uint64x1_t __p0) {
39266 float16x4_t __ret;
39267 __ret = (float16x4_t)(__p0);
39268 return __ret;
39269 }
39270 #else
39271 __ai float16x4_t vreinterpret_f16_u64(uint64x1_t __p0) {
39272 float16x4_t __ret;
39273 __ret = (float16x4_t)(__p0);
39274 return __ret;
39275 }
39276 #endif
39277
39278 #ifdef __LITTLE_ENDIAN__
39279 __ai float16x4_t vreinterpret_f16_u16(uint16x4_t __p0) {
39280 float16x4_t __ret;
39281 __ret = (float16x4_t)(__p0);
39282 return __ret;
39283 }
39284 #else
39285 __ai float16x4_t vreinterpret_f16_u16(uint16x4_t __p0) {
39286 float16x4_t __ret;
39287 __ret = (float16x4_t)(__p0);
39288 return __ret;
39289 }
39290 #endif
39291
39292 #ifdef __LITTLE_ENDIAN__
39293 __ai float16x4_t vreinterpret_f16_s8(int8x8_t __p0) {
39294 float16x4_t __ret;
39295 __ret = (float16x4_t)(__p0);
39296 return __ret;
39297 }
39298 #else
39299 __ai float16x4_t vreinterpret_f16_s8(int8x8_t __p0) {
39300 float16x4_t __ret;
39301 __ret = (float16x4_t)(__p0);
39302 return __ret;
39303 }
39304 #endif
39305
39306 #ifdef __LITTLE_ENDIAN__
39307 __ai float16x4_t vreinterpret_f16_f64(float64x1_t __p0) {
39308 float16x4_t __ret;
39309 __ret = (float16x4_t)(__p0);
39310 return __ret;
39311 }
39312 #else
39313 __ai float16x4_t vreinterpret_f16_f64(float64x1_t __p0) {
39314 float16x4_t __ret;
39315 __ret = (float16x4_t)(__p0);
39316 return __ret;
39317 }
39318 #endif
39319
39320 #ifdef __LITTLE_ENDIAN__
39321 __ai float16x4_t vreinterpret_f16_f32(float32x2_t __p0) {
39322 float16x4_t __ret;
39323 __ret = (float16x4_t)(__p0);
39324 return __ret;
39325 }
39326 #else
39327 __ai float16x4_t vreinterpret_f16_f32(float32x2_t __p0) {
39328 float16x4_t __ret;
39329 __ret = (float16x4_t)(__p0);
39330 return __ret;
39331 }
39332 #endif
39333
39334 #ifdef __LITTLE_ENDIAN__
39335 __ai float16x4_t vreinterpret_f16_s32(int32x2_t __p0) {
39336 float16x4_t __ret;
39337 __ret = (float16x4_t)(__p0);
39338 return __ret;
39339 }
39340 #else
39341 __ai float16x4_t vreinterpret_f16_s32(int32x2_t __p0) {
39342 float16x4_t __ret;
39343 __ret = (float16x4_t)(__p0);
39344 return __ret;
39345 }
39346 #endif
39347
39348 #ifdef __LITTLE_ENDIAN__
39349 __ai float16x4_t vreinterpret_f16_s64(int64x1_t __p0) {
39350 float16x4_t __ret;
39351 __ret = (float16x4_t)(__p0);
39352 return __ret;
39353 }
39354 #else
39355 __ai float16x4_t vreinterpret_f16_s64(int64x1_t __p0) {
39356 float16x4_t __ret;
39357 __ret = (float16x4_t)(__p0);
39358 return __ret;
39359 }
39360 #endif
39361
39362 #ifdef __LITTLE_ENDIAN__
39363 __ai float16x4_t vreinterpret_f16_s16(int16x4_t __p0) {
39364 float16x4_t __ret;
39365 __ret = (float16x4_t)(__p0);
39366 return __ret;
39367 }
39368 #else
39369 __ai float16x4_t vreinterpret_f16_s16(int16x4_t __p0) {
39370 float16x4_t __ret;
39371 __ret = (float16x4_t)(__p0);
39372 return __ret;
39373 }
39374 #endif
39375
39376 #ifdef __LITTLE_ENDIAN__
39377 __ai int32x2_t vreinterpret_s32_p8(poly8x8_t __p0) {
39378 int32x2_t __ret;
39379 __ret = (int32x2_t)(__p0);
39380 return __ret;
39381 }
39382 #else
39383 __ai int32x2_t vreinterpret_s32_p8(poly8x8_t __p0) {
39384 int32x2_t __ret;
39385 __ret = (int32x2_t)(__p0);
39386 return __ret;
39387 }
39388 #endif
39389
39390 #ifdef __LITTLE_ENDIAN__
39391 __ai int32x2_t vreinterpret_s32_p64(poly64x1_t __p0) {
39392 int32x2_t __ret;
39393 __ret = (int32x2_t)(__p0);
39394 return __ret;
39395 }
39396 #else
39397 __ai int32x2_t vreinterpret_s32_p64(poly64x1_t __p0) {
39398 int32x2_t __ret;
39399 __ret = (int32x2_t)(__p0);
39400 return __ret;
39401 }
39402 #endif
39403
39404 #ifdef __LITTLE_ENDIAN__
39405 __ai int32x2_t vreinterpret_s32_p16(poly16x4_t __p0) {
39406 int32x2_t __ret;
39407 __ret = (int32x2_t)(__p0);
39408 return __ret;
39409 }
39410 #else
39411 __ai int32x2_t vreinterpret_s32_p16(poly16x4_t __p0) {
39412 int32x2_t __ret;
39413 __ret = (int32x2_t)(__p0);
39414 return __ret;
39415 }
39416 #endif
39417
39418 #ifdef __LITTLE_ENDIAN__
39419 __ai int32x2_t vreinterpret_s32_u8(uint8x8_t __p0) {
39420 int32x2_t __ret;
39421 __ret = (int32x2_t)(__p0);
39422 return __ret;
39423 }
39424 #else
39425 __ai int32x2_t vreinterpret_s32_u8(uint8x8_t __p0) {
39426 int32x2_t __ret;
39427 __ret = (int32x2_t)(__p0);
39428 return __ret;
39429 }
39430 #endif
39431
39432 #ifdef __LITTLE_ENDIAN__
39433 __ai int32x2_t vreinterpret_s32_u32(uint32x2_t __p0) {
39434 int32x2_t __ret;
39435 __ret = (int32x2_t)(__p0);
39436 return __ret;
39437 }
39438 #else
39439 __ai int32x2_t vreinterpret_s32_u32(uint32x2_t __p0) {
39440 int32x2_t __ret;
39441 __ret = (int32x2_t)(__p0);
39442 return __ret;
39443 }
39444 #endif
39445
39446 #ifdef __LITTLE_ENDIAN__
39447 __ai int32x2_t vreinterpret_s32_u64(uint64x1_t __p0) {
39448 int32x2_t __ret;
39449 __ret = (int32x2_t)(__p0);
39450 return __ret;
39451 }
39452 #else
39453 __ai int32x2_t vreinterpret_s32_u64(uint64x1_t __p0) {
39454 int32x2_t __ret;
39455 __ret = (int32x2_t)(__p0);
39456 return __ret;
39457 }
39458 #endif
39459
39460 #ifdef __LITTLE_ENDIAN__
39461 __ai int32x2_t vreinterpret_s32_u16(uint16x4_t __p0) {
39462 int32x2_t __ret;
39463 __ret = (int32x2_t)(__p0);
39464 return __ret;
39465 }
39466 #else
39467 __ai int32x2_t vreinterpret_s32_u16(uint16x4_t __p0) {
39468 int32x2_t __ret;
39469 __ret = (int32x2_t)(__p0);
39470 return __ret;
39471 }
39472 #endif
39473
39474 #ifdef __LITTLE_ENDIAN__
39475 __ai int32x2_t vreinterpret_s32_s8(int8x8_t __p0) {
39476 int32x2_t __ret;
39477 __ret = (int32x2_t)(__p0);
39478 return __ret;
39479 }
39480 #else
39481 __ai int32x2_t vreinterpret_s32_s8(int8x8_t __p0) {
39482 int32x2_t __ret;
39483 __ret = (int32x2_t)(__p0);
39484 return __ret;
39485 }
39486 #endif
39487
39488 #ifdef __LITTLE_ENDIAN__
39489 __ai int32x2_t vreinterpret_s32_f64(float64x1_t __p0) {
39490 int32x2_t __ret;
39491 __ret = (int32x2_t)(__p0);
39492 return __ret;
39493 }
39494 #else
39495 __ai int32x2_t vreinterpret_s32_f64(float64x1_t __p0) {
39496 int32x2_t __ret;
39497 __ret = (int32x2_t)(__p0);
39498 return __ret;
39499 }
39500 #endif
39501
39502 #ifdef __LITTLE_ENDIAN__
39503 __ai int32x2_t vreinterpret_s32_f32(float32x2_t __p0) {
39504 int32x2_t __ret;
39505 __ret = (int32x2_t)(__p0);
39506 return __ret;
39507 }
39508 #else
39509 __ai int32x2_t vreinterpret_s32_f32(float32x2_t __p0) {
39510 int32x2_t __ret;
39511 __ret = (int32x2_t)(__p0);
39512 return __ret;
39513 }
39514 #endif
39515
39516 #ifdef __LITTLE_ENDIAN__
39517 __ai int32x2_t vreinterpret_s32_f16(float16x4_t __p0) {
39518 int32x2_t __ret;
39519 __ret = (int32x2_t)(__p0);
39520 return __ret;
39521 }
39522 #else
39523 __ai int32x2_t vreinterpret_s32_f16(float16x4_t __p0) {
39524 int32x2_t __ret;
39525 __ret = (int32x2_t)(__p0);
39526 return __ret;
39527 }
39528 #endif
39529
39530 #ifdef __LITTLE_ENDIAN__
39531 __ai int32x2_t vreinterpret_s32_s64(int64x1_t __p0) {
39532 int32x2_t __ret;
39533 __ret = (int32x2_t)(__p0);
39534 return __ret;
39535 }
39536 #else
39537 __ai int32x2_t vreinterpret_s32_s64(int64x1_t __p0) {
39538 int32x2_t __ret;
39539 __ret = (int32x2_t)(__p0);
39540 return __ret;
39541 }
39542 #endif
39543
39544 #ifdef __LITTLE_ENDIAN__
39545 __ai int32x2_t vreinterpret_s32_s16(int16x4_t __p0) {
39546 int32x2_t __ret;
39547 __ret = (int32x2_t)(__p0);
39548 return __ret;
39549 }
39550 #else
39551 __ai int32x2_t vreinterpret_s32_s16(int16x4_t __p0) {
39552 int32x2_t __ret;
39553 __ret = (int32x2_t)(__p0);
39554 return __ret;
39555 }
39556 #endif
39557
39558 #ifdef __LITTLE_ENDIAN__
39559 __ai int64x1_t vreinterpret_s64_p8(poly8x8_t __p0) {
39560 int64x1_t __ret;
39561 __ret = (int64x1_t)(__p0);
39562 return __ret;
39563 }
39564 #else
39565 __ai int64x1_t vreinterpret_s64_p8(poly8x8_t __p0) {
39566 int64x1_t __ret;
39567 __ret = (int64x1_t)(__p0);
39568 return __ret;
39569 }
39570 #endif
39571
39572 #ifdef __LITTLE_ENDIAN__
39573 __ai int64x1_t vreinterpret_s64_p64(poly64x1_t __p0) {
39574 int64x1_t __ret;
39575 __ret = (int64x1_t)(__p0);
39576 return __ret;
39577 }
39578 #else
39579 __ai int64x1_t vreinterpret_s64_p64(poly64x1_t __p0) {
39580 int64x1_t __ret;
39581 __ret = (int64x1_t)(__p0);
39582 return __ret;
39583 }
39584 #endif
39585
39586 #ifdef __LITTLE_ENDIAN__
39587 __ai int64x1_t vreinterpret_s64_p16(poly16x4_t __p0) {
39588 int64x1_t __ret;
39589 __ret = (int64x1_t)(__p0);
39590 return __ret;
39591 }
39592 #else
39593 __ai int64x1_t vreinterpret_s64_p16(poly16x4_t __p0) {
39594 int64x1_t __ret;
39595 __ret = (int64x1_t)(__p0);
39596 return __ret;
39597 }
39598 #endif
39599
39600 #ifdef __LITTLE_ENDIAN__
39601 __ai int64x1_t vreinterpret_s64_u8(uint8x8_t __p0) {
39602 int64x1_t __ret;
39603 __ret = (int64x1_t)(__p0);
39604 return __ret;
39605 }
39606 #else
39607 __ai int64x1_t vreinterpret_s64_u8(uint8x8_t __p0) {
39608 int64x1_t __ret;
39609 __ret = (int64x1_t)(__p0);
39610 return __ret;
39611 }
39612 #endif
39613
39614 #ifdef __LITTLE_ENDIAN__
39615 __ai int64x1_t vreinterpret_s64_u32(uint32x2_t __p0) {
39616 int64x1_t __ret;
39617 __ret = (int64x1_t)(__p0);
39618 return __ret;
39619 }
39620 #else
39621 __ai int64x1_t vreinterpret_s64_u32(uint32x2_t __p0) {
39622 int64x1_t __ret;
39623 __ret = (int64x1_t)(__p0);
39624 return __ret;
39625 }
39626 #endif
39627
39628 #ifdef __LITTLE_ENDIAN__
39629 __ai int64x1_t vreinterpret_s64_u64(uint64x1_t __p0) {
39630 int64x1_t __ret;
39631 __ret = (int64x1_t)(__p0);
39632 return __ret;
39633 }
39634 #else
39635 __ai int64x1_t vreinterpret_s64_u64(uint64x1_t __p0) {
39636 int64x1_t __ret;
39637 __ret = (int64x1_t)(__p0);
39638 return __ret;
39639 }
39640 #endif
39641
39642 #ifdef __LITTLE_ENDIAN__
39643 __ai int64x1_t vreinterpret_s64_u16(uint16x4_t __p0) {
39644 int64x1_t __ret;
39645 __ret = (int64x1_t)(__p0);
39646 return __ret;
39647 }
39648 #else
39649 __ai int64x1_t vreinterpret_s64_u16(uint16x4_t __p0) {
39650 int64x1_t __ret;
39651 __ret = (int64x1_t)(__p0);
39652 return __ret;
39653 }
39654 #endif
39655
39656 #ifdef __LITTLE_ENDIAN__
39657 __ai int64x1_t vreinterpret_s64_s8(int8x8_t __p0) {
39658 int64x1_t __ret;
39659 __ret = (int64x1_t)(__p0);
39660 return __ret;
39661 }
39662 #else
39663 __ai int64x1_t vreinterpret_s64_s8(int8x8_t __p0) {
39664 int64x1_t __ret;
39665 __ret = (int64x1_t)(__p0);
39666 return __ret;
39667 }
39668 #endif
39669
39670 #ifdef __LITTLE_ENDIAN__
39671 __ai int64x1_t vreinterpret_s64_f64(float64x1_t __p0) {
39672 int64x1_t __ret;
39673 __ret = (int64x1_t)(__p0);
39674 return __ret;
39675 }
39676 #else
39677 __ai int64x1_t vreinterpret_s64_f64(float64x1_t __p0) {
39678 int64x1_t __ret;
39679 __ret = (int64x1_t)(__p0);
39680 return __ret;
39681 }
39682 #endif
39683
39684 #ifdef __LITTLE_ENDIAN__
39685 __ai int64x1_t vreinterpret_s64_f32(float32x2_t __p0) {
39686 int64x1_t __ret;
39687 __ret = (int64x1_t)(__p0);
39688 return __ret;
39689 }
39690 #else
39691 __ai int64x1_t vreinterpret_s64_f32(float32x2_t __p0) {
39692 int64x1_t __ret;
39693 __ret = (int64x1_t)(__p0);
39694 return __ret;
39695 }
39696 #endif
39697
39698 #ifdef __LITTLE_ENDIAN__
39699 __ai int64x1_t vreinterpret_s64_f16(float16x4_t __p0) {
39700 int64x1_t __ret;
39701 __ret = (int64x1_t)(__p0);
39702 return __ret;
39703 }
39704 #else
39705 __ai int64x1_t vreinterpret_s64_f16(float16x4_t __p0) {
39706 int64x1_t __ret;
39707 __ret = (int64x1_t)(__p0);
39708 return __ret;
39709 }
39710 #endif
39711
39712 #ifdef __LITTLE_ENDIAN__
39713 __ai int64x1_t vreinterpret_s64_s32(int32x2_t __p0) {
39714 int64x1_t __ret;
39715 __ret = (int64x1_t)(__p0);
39716 return __ret;
39717 }
39718 #else
39719 __ai int64x1_t vreinterpret_s64_s32(int32x2_t __p0) {
39720 int64x1_t __ret;
39721 __ret = (int64x1_t)(__p0);
39722 return __ret;
39723 }
39724 #endif
39725
39726 #ifdef __LITTLE_ENDIAN__
39727 __ai int64x1_t vreinterpret_s64_s16(int16x4_t __p0) {
39728 int64x1_t __ret;
39729 __ret = (int64x1_t)(__p0);
39730 return __ret;
39731 }
39732 #else
39733 __ai int64x1_t vreinterpret_s64_s16(int16x4_t __p0) {
39734 int64x1_t __ret;
39735 __ret = (int64x1_t)(__p0);
39736 return __ret;
39737 }
39738 #endif
39739
39740 #ifdef __LITTLE_ENDIAN__
39741 __ai int16x4_t vreinterpret_s16_p8(poly8x8_t __p0) {
39742 int16x4_t __ret;
39743 __ret = (int16x4_t)(__p0);
39744 return __ret;
39745 }
39746 #else
39747 __ai int16x4_t vreinterpret_s16_p8(poly8x8_t __p0) {
39748 int16x4_t __ret;
39749 __ret = (int16x4_t)(__p0);
39750 return __ret;
39751 }
39752 #endif
39753
39754 #ifdef __LITTLE_ENDIAN__
39755 __ai int16x4_t vreinterpret_s16_p64(poly64x1_t __p0) {
39756 int16x4_t __ret;
39757 __ret = (int16x4_t)(__p0);
39758 return __ret;
39759 }
39760 #else
39761 __ai int16x4_t vreinterpret_s16_p64(poly64x1_t __p0) {
39762 int16x4_t __ret;
39763 __ret = (int16x4_t)(__p0);
39764 return __ret;
39765 }
39766 #endif
39767
39768 #ifdef __LITTLE_ENDIAN__
39769 __ai int16x4_t vreinterpret_s16_p16(poly16x4_t __p0) {
39770 int16x4_t __ret;
39771 __ret = (int16x4_t)(__p0);
39772 return __ret;
39773 }
39774 #else
39775 __ai int16x4_t vreinterpret_s16_p16(poly16x4_t __p0) {
39776 int16x4_t __ret;
39777 __ret = (int16x4_t)(__p0);
39778 return __ret;
39779 }
39780 #endif
39781
39782 #ifdef __LITTLE_ENDIAN__
39783 __ai int16x4_t vreinterpret_s16_u8(uint8x8_t __p0) {
39784 int16x4_t __ret;
39785 __ret = (int16x4_t)(__p0);
39786 return __ret;
39787 }
39788 #else
39789 __ai int16x4_t vreinterpret_s16_u8(uint8x8_t __p0) {
39790 int16x4_t __ret;
39791 __ret = (int16x4_t)(__p0);
39792 return __ret;
39793 }
39794 #endif
39795
39796 #ifdef __LITTLE_ENDIAN__
39797 __ai int16x4_t vreinterpret_s16_u32(uint32x2_t __p0) {
39798 int16x4_t __ret;
39799 __ret = (int16x4_t)(__p0);
39800 return __ret;
39801 }
39802 #else
39803 __ai int16x4_t vreinterpret_s16_u32(uint32x2_t __p0) {
39804 int16x4_t __ret;
39805 __ret = (int16x4_t)(__p0);
39806 return __ret;
39807 }
39808 #endif
39809
39810 #ifdef __LITTLE_ENDIAN__
39811 __ai int16x4_t vreinterpret_s16_u64(uint64x1_t __p0) {
39812 int16x4_t __ret;
39813 __ret = (int16x4_t)(__p0);
39814 return __ret;
39815 }
39816 #else
39817 __ai int16x4_t vreinterpret_s16_u64(uint64x1_t __p0) {
39818 int16x4_t __ret;
39819 __ret = (int16x4_t)(__p0);
39820 return __ret;
39821 }
39822 #endif
39823
39824 #ifdef __LITTLE_ENDIAN__
39825 __ai int16x4_t vreinterpret_s16_u16(uint16x4_t __p0) {
39826 int16x4_t __ret;
39827 __ret = (int16x4_t)(__p0);
39828 return __ret;
39829 }
39830 #else
39831 __ai int16x4_t vreinterpret_s16_u16(uint16x4_t __p0) {
39832 int16x4_t __ret;
39833 __ret = (int16x4_t)(__p0);
39834 return __ret;
39835 }
39836 #endif
39837
39838 #ifdef __LITTLE_ENDIAN__
39839 __ai int16x4_t vreinterpret_s16_s8(int8x8_t __p0) {
39840 int16x4_t __ret;
39841 __ret = (int16x4_t)(__p0);
39842 return __ret;
39843 }
39844 #else
39845 __ai int16x4_t vreinterpret_s16_s8(int8x8_t __p0) {
39846 int16x4_t __ret;
39847 __ret = (int16x4_t)(__p0);
39848 return __ret;
39849 }
39850 #endif
39851
39852 #ifdef __LITTLE_ENDIAN__
39853 __ai int16x4_t vreinterpret_s16_f64(float64x1_t __p0) {
39854 int16x4_t __ret;
39855 __ret = (int16x4_t)(__p0);
39856 return __ret;
39857 }
39858 #else
39859 __ai int16x4_t vreinterpret_s16_f64(float64x1_t __p0) {
39860 int16x4_t __ret;
39861 __ret = (int16x4_t)(__p0);
39862 return __ret;
39863 }
39864 #endif
39865
39866 #ifdef __LITTLE_ENDIAN__
39867 __ai int16x4_t vreinterpret_s16_f32(float32x2_t __p0) {
39868 int16x4_t __ret;
39869 __ret = (int16x4_t)(__p0);
39870 return __ret;
39871 }
39872 #else
39873 __ai int16x4_t vreinterpret_s16_f32(float32x2_t __p0) {
39874 int16x4_t __ret;
39875 __ret = (int16x4_t)(__p0);
39876 return __ret;
39877 }
39878 #endif
39879
39880 #ifdef __LITTLE_ENDIAN__
39881 __ai int16x4_t vreinterpret_s16_f16(float16x4_t __p0) {
39882 int16x4_t __ret;
39883 __ret = (int16x4_t)(__p0);
39884 return __ret;
39885 }
39886 #else
39887 __ai int16x4_t vreinterpret_s16_f16(float16x4_t __p0) {
39888 int16x4_t __ret;
39889 __ret = (int16x4_t)(__p0);
39890 return __ret;
39891 }
39892 #endif
39893
39894 #ifdef __LITTLE_ENDIAN__
39895 __ai int16x4_t vreinterpret_s16_s32(int32x2_t __p0) {
39896 int16x4_t __ret;
39897 __ret = (int16x4_t)(__p0);
39898 return __ret;
39899 }
39900 #else
39901 __ai int16x4_t vreinterpret_s16_s32(int32x2_t __p0) {
39902 int16x4_t __ret;
39903 __ret = (int16x4_t)(__p0);
39904 return __ret;
39905 }
39906 #endif
39907
39908 #ifdef __LITTLE_ENDIAN__
39909 __ai int16x4_t vreinterpret_s16_s64(int64x1_t __p0) {
39910 int16x4_t __ret;
39911 __ret = (int16x4_t)(__p0);
39912 return __ret;
39913 }
39914 #else
39915 __ai int16x4_t vreinterpret_s16_s64(int64x1_t __p0) {
39916 int16x4_t __ret;
39917 __ret = (int16x4_t)(__p0);
39918 return __ret;
39919 }
39920 #endif
39921
39922 #endif
39923 #if __ARM_ARCH >= 8 && defined(__aarch64__) && defined(__ARM_FEATURE_DIRECTED_ROUNDING)
39924 #ifdef __LITTLE_ENDIAN__
39925 __ai float64x2_t vrndq_f64(float64x2_t __p0) {
39926 float64x2_t __ret;
39927 __ret = (float64x2_t) __builtin_neon_vrndq_v((int8x16_t)__p0, 42);
39928 return __ret;
39929 }
39930 #else
39931 __ai float64x2_t vrndq_f64(float64x2_t __p0) {
39932 float64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
39933 float64x2_t __ret;
39934 __ret = (float64x2_t) __builtin_neon_vrndq_v((int8x16_t)__rev0, 42);
39935 __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
39936 return __ret;
39937 }
39938 #endif
39939
39940 #ifdef __LITTLE_ENDIAN__
39941 __ai float64x1_t vrnd_f64(float64x1_t __p0) {
39942 float64x1_t __ret;
39943 __ret = (float64x1_t) __builtin_neon_vrnd_v((int8x8_t)__p0, 10);
39944 return __ret;
39945 }
39946 #else
39947 __ai float64x1_t vrnd_f64(float64x1_t __p0) {
39948 float64x1_t __ret;
39949 __ret = (float64x1_t) __builtin_neon_vrnd_v((int8x8_t)__p0, 10);
39950 return __ret;
39951 }
39952 #endif
39953
39954 #ifdef __LITTLE_ENDIAN__
39955 __ai float64x2_t vrndaq_f64(float64x2_t __p0) {
39956 float64x2_t __ret;
39957 __ret = (float64x2_t) __builtin_neon_vrndaq_v((int8x16_t)__p0, 42);
39958 return __ret;
39959 }
39960 #else
39961 __ai float64x2_t vrndaq_f64(float64x2_t __p0) {
39962 float64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
39963 float64x2_t __ret;
39964 __ret = (float64x2_t) __builtin_neon_vrndaq_v((int8x16_t)__rev0, 42);
39965 __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
39966 return __ret;
39967 }
39968 #endif
39969
39970 #ifdef __LITTLE_ENDIAN__
39971 __ai float64x1_t vrnda_f64(float64x1_t __p0) {
39972 float64x1_t __ret;
39973 __ret = (float64x1_t) __builtin_neon_vrnda_v((int8x8_t)__p0, 10);
39974 return __ret;
39975 }
39976 #else
39977 __ai float64x1_t vrnda_f64(float64x1_t __p0) {
39978 float64x1_t __ret;
39979 __ret = (float64x1_t) __builtin_neon_vrnda_v((int8x8_t)__p0, 10);
39980 return __ret;
39981 }
39982 #endif
39983
39984 #ifdef __LITTLE_ENDIAN__
39985 __ai float64x2_t vrndiq_f64(float64x2_t __p0) {
39986 float64x2_t __ret;
39987 __ret = (float64x2_t) __builtin_neon_vrndiq_v((int8x16_t)__p0, 42);
39988 return __ret;
39989 }
39990 #else
39991 __ai float64x2_t vrndiq_f64(float64x2_t __p0) {
39992 float64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
39993 float64x2_t __ret;
39994 __ret = (float64x2_t) __builtin_neon_vrndiq_v((int8x16_t)__rev0, 42);
39995 __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
39996 return __ret;
39997 }
39998 #endif
39999
40000 #ifdef __LITTLE_ENDIAN__
40001 __ai float32x4_t vrndiq_f32(float32x4_t __p0) {
40002 float32x4_t __ret;
40003 __ret = (float32x4_t) __builtin_neon_vrndiq_v((int8x16_t)__p0, 41);
40004 return __ret;
40005 }
40006 #else
40007 __ai float32x4_t vrndiq_f32(float32x4_t __p0) {
40008 float32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
40009 float32x4_t __ret;
40010 __ret = (float32x4_t) __builtin_neon_vrndiq_v((int8x16_t)__rev0, 41);
40011 __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
40012 return __ret;
40013 }
40014 #endif
40015
40016 #ifdef __LITTLE_ENDIAN__
40017 __ai float64x1_t vrndi_f64(float64x1_t __p0) {
40018 float64x1_t __ret;
40019 __ret = (float64x1_t) __builtin_neon_vrndi_v((int8x8_t)__p0, 10);
40020 return __ret;
40021 }
40022 #else
40023 __ai float64x1_t vrndi_f64(float64x1_t __p0) {
40024 float64x1_t __ret;
40025 __ret = (float64x1_t) __builtin_neon_vrndi_v((int8x8_t)__p0, 10);
40026 return __ret;
40027 }
40028 #endif
40029
40030 #ifdef __LITTLE_ENDIAN__
40031 __ai float32x2_t vrndi_f32(float32x2_t __p0) {
40032 float32x2_t __ret;
40033 __ret = (float32x2_t) __builtin_neon_vrndi_v((int8x8_t)__p0, 9);
40034 return __ret;
40035 }
40036 #else
40037 __ai float32x2_t vrndi_f32(float32x2_t __p0) {
40038 float32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
40039 float32x2_t __ret;
40040 __ret = (float32x2_t) __builtin_neon_vrndi_v((int8x8_t)__rev0, 9);
40041 __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
40042 return __ret;
40043 }
40044 #endif
40045
40046 #ifdef __LITTLE_ENDIAN__
40047 __ai float64x2_t vrndmq_f64(float64x2_t __p0) {
40048 float64x2_t __ret;
40049 __ret = (float64x2_t) __builtin_neon_vrndmq_v((int8x16_t)__p0, 42);
40050 return __ret;
40051 }
40052 #else
40053 __ai float64x2_t vrndmq_f64(float64x2_t __p0) {
40054 float64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
40055 float64x2_t __ret;
40056 __ret = (float64x2_t) __builtin_neon_vrndmq_v((int8x16_t)__rev0, 42);
40057 __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
40058 return __ret;
40059 }
40060 #endif
40061
40062 #ifdef __LITTLE_ENDIAN__
40063 __ai float64x1_t vrndm_f64(float64x1_t __p0) {
40064 float64x1_t __ret;
40065 __ret = (float64x1_t) __builtin_neon_vrndm_v((int8x8_t)__p0, 10);
40066 return __ret;
40067 }
40068 #else
40069 __ai float64x1_t vrndm_f64(float64x1_t __p0) {
40070 float64x1_t __ret;
40071 __ret = (float64x1_t) __builtin_neon_vrndm_v((int8x8_t)__p0, 10);
40072 return __ret;
40073 }
40074 #endif
40075
40076 #ifdef __LITTLE_ENDIAN__
40077 __ai float64x2_t vrndnq_f64(float64x2_t __p0) {
40078 float64x2_t __ret;
40079 __ret = (float64x2_t) __builtin_neon_vrndnq_v((int8x16_t)__p0, 42);
40080 return __ret;
40081 }
40082 #else
40083 __ai float64x2_t vrndnq_f64(float64x2_t __p0) {
40084 float64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
40085 float64x2_t __ret;
40086 __ret = (float64x2_t) __builtin_neon_vrndnq_v((int8x16_t)__rev0, 42);
40087 __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
40088 return __ret;
40089 }
40090 #endif
40091
40092 #ifdef __LITTLE_ENDIAN__
40093 __ai float64x1_t vrndn_f64(float64x1_t __p0) {
40094 float64x1_t __ret;
40095 __ret = (float64x1_t) __builtin_neon_vrndn_v((int8x8_t)__p0, 10);
40096 return __ret;
40097 }
40098 #else
40099 __ai float64x1_t vrndn_f64(float64x1_t __p0) {
40100 float64x1_t __ret;
40101 __ret = (float64x1_t) __builtin_neon_vrndn_v((int8x8_t)__p0, 10);
40102 return __ret;
40103 }
40104 #endif
40105
40106 #ifdef __LITTLE_ENDIAN__
40107 __ai float64x2_t vrndpq_f64(float64x2_t __p0) {
40108 float64x2_t __ret;
40109 __ret = (float64x2_t) __builtin_neon_vrndpq_v((int8x16_t)__p0, 42);
40110 return __ret;
40111 }
40112 #else
40113 __ai float64x2_t vrndpq_f64(float64x2_t __p0) {
40114 float64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
40115 float64x2_t __ret;
40116 __ret = (float64x2_t) __builtin_neon_vrndpq_v((int8x16_t)__rev0, 42);
40117 __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
40118 return __ret;
40119 }
40120 #endif
40121
40122 #ifdef __LITTLE_ENDIAN__
40123 __ai float64x1_t vrndp_f64(float64x1_t __p0) {
40124 float64x1_t __ret;
40125 __ret = (float64x1_t) __builtin_neon_vrndp_v((int8x8_t)__p0, 10);
40126 return __ret;
40127 }
40128 #else
40129 __ai float64x1_t vrndp_f64(float64x1_t __p0) {
40130 float64x1_t __ret;
40131 __ret = (float64x1_t) __builtin_neon_vrndp_v((int8x8_t)__p0, 10);
40132 return __ret;
40133 }
40134 #endif
40135
40136 #ifdef __LITTLE_ENDIAN__
40137 __ai float64x2_t vrndxq_f64(float64x2_t __p0) {
40138 float64x2_t __ret;
40139 __ret = (float64x2_t) __builtin_neon_vrndxq_v((int8x16_t)__p0, 42);
40140 return __ret;
40141 }
40142 #else
40143 __ai float64x2_t vrndxq_f64(float64x2_t __p0) {
40144 float64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
40145 float64x2_t __ret;
40146 __ret = (float64x2_t) __builtin_neon_vrndxq_v((int8x16_t)__rev0, 42);
40147 __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
40148 return __ret;
40149 }
40150 #endif
40151
40152 #ifdef __LITTLE_ENDIAN__
40153 __ai float64x1_t vrndx_f64(float64x1_t __p0) {
40154 float64x1_t __ret;
40155 __ret = (float64x1_t) __builtin_neon_vrndx_v((int8x8_t)__p0, 10);
40156 return __ret;
40157 }
40158 #else
40159 __ai float64x1_t vrndx_f64(float64x1_t __p0) {
40160 float64x1_t __ret;
40161 __ret = (float64x1_t) __builtin_neon_vrndx_v((int8x8_t)__p0, 10);
40162 return __ret;
40163 }
40164 #endif
40165
40166 #endif
40167 #if __ARM_ARCH >= 8 && defined(__aarch64__) && defined(__ARM_FEATURE_NUMERIC_MAXMIN)
40168 #ifdef __LITTLE_ENDIAN__
40169 __ai float64x2_t vmaxnmq_f64(float64x2_t __p0, float64x2_t __p1) {
40170 float64x2_t __ret;
40171 __ret = (float64x2_t) __builtin_neon_vmaxnmq_v((int8x16_t)__p0, (int8x16_t)__p1, 42);
40172 return __ret;
40173 }
40174 #else
40175 __ai float64x2_t vmaxnmq_f64(float64x2_t __p0, float64x2_t __p1) {
40176 float64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
40177 float64x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
40178 float64x2_t __ret;
40179 __ret = (float64x2_t) __builtin_neon_vmaxnmq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 42);
40180 __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
40181 return __ret;
40182 }
40183 #endif
40184
40185 #ifdef __LITTLE_ENDIAN__
40186 __ai float64x1_t vmaxnm_f64(float64x1_t __p0, float64x1_t __p1) {
40187 float64x1_t __ret;
40188 __ret = (float64x1_t) __builtin_neon_vmaxnm_v((int8x8_t)__p0, (int8x8_t)__p1, 10);
40189 return __ret;
40190 }
40191 #else
40192 __ai float64x1_t vmaxnm_f64(float64x1_t __p0, float64x1_t __p1) {
40193 float64x1_t __ret;
40194 __ret = (float64x1_t) __builtin_neon_vmaxnm_v((int8x8_t)__p0, (int8x8_t)__p1, 10);
40195 return __ret;
40196 }
40197 #endif
40198
40199 #ifdef __LITTLE_ENDIAN__
40200 __ai float64x2_t vminnmq_f64(float64x2_t __p0, float64x2_t __p1) {
40201 float64x2_t __ret;
40202 __ret = (float64x2_t) __builtin_neon_vminnmq_v((int8x16_t)__p0, (int8x16_t)__p1, 42);
40203 return __ret;
40204 }
40205 #else
40206 __ai float64x2_t vminnmq_f64(float64x2_t __p0, float64x2_t __p1) {
40207 float64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
40208 float64x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
40209 float64x2_t __ret;
40210 __ret = (float64x2_t) __builtin_neon_vminnmq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 42);
40211 __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
40212 return __ret;
40213 }
40214 #endif
40215
40216 #ifdef __LITTLE_ENDIAN__
40217 __ai float64x1_t vminnm_f64(float64x1_t __p0, float64x1_t __p1) {
40218 float64x1_t __ret;
40219 __ret = (float64x1_t) __builtin_neon_vminnm_v((int8x8_t)__p0, (int8x8_t)__p1, 10);
40220 return __ret;
40221 }
40222 #else
40223 __ai float64x1_t vminnm_f64(float64x1_t __p0, float64x1_t __p1) {
40224 float64x1_t __ret;
40225 __ret = (float64x1_t) __builtin_neon_vminnm_v((int8x8_t)__p0, (int8x8_t)__p1, 10);
40226 return __ret;
40227 }
40228 #endif
40229
40230 #endif
40231 #if __ARM_FEATURE_CRYPTO
40232 #ifdef __LITTLE_ENDIAN__
40233 __ai uint8x16_t vaesdq_u8(uint8x16_t __p0, uint8x16_t __p1) {
40234 uint8x16_t __ret;
40235 __ret = (uint8x16_t) __builtin_neon_vaesdq_v((int8x16_t)__p0, (int8x16_t)__p1, 48);
40236 return __ret;
40237 }
40238 #else
40239 __ai uint8x16_t vaesdq_u8(uint8x16_t __p0, uint8x16_t __p1) {
40240 uint8x16_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
40241 uint8x16_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
40242 uint8x16_t __ret;
40243 __ret = (uint8x16_t) __builtin_neon_vaesdq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 48);
40244 __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
40245 return __ret;
40246 }
40247 #endif
40248
40249 #ifdef __LITTLE_ENDIAN__
40250 __ai uint8x16_t vaeseq_u8(uint8x16_t __p0, uint8x16_t __p1) {
40251 uint8x16_t __ret;
40252 __ret = (uint8x16_t) __builtin_neon_vaeseq_v((int8x16_t)__p0, (int8x16_t)__p1, 48);
40253 return __ret;
40254 }
40255 #else
40256 __ai uint8x16_t vaeseq_u8(uint8x16_t __p0, uint8x16_t __p1) {
40257 uint8x16_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
40258 uint8x16_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
40259 uint8x16_t __ret;
40260 __ret = (uint8x16_t) __builtin_neon_vaeseq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 48);
40261 __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
40262 return __ret;
40263 }
40264 #endif
40265
40266 #ifdef __LITTLE_ENDIAN__
40267 __ai uint8x16_t vaesimcq_u8(uint8x16_t __p0) {
40268 uint8x16_t __ret;
40269 __ret = (uint8x16_t) __builtin_neon_vaesimcq_v((int8x16_t)__p0, 48);
40270 return __ret;
40271 }
40272 #else
40273 __ai uint8x16_t vaesimcq_u8(uint8x16_t __p0) {
40274 uint8x16_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
40275 uint8x16_t __ret;
40276 __ret = (uint8x16_t) __builtin_neon_vaesimcq_v((int8x16_t)__rev0, 48);
40277 __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
40278 return __ret;
40279 }
40280 #endif
40281
40282 #ifdef __LITTLE_ENDIAN__
40283 __ai uint8x16_t vaesmcq_u8(uint8x16_t __p0) {
40284 uint8x16_t __ret;
40285 __ret = (uint8x16_t) __builtin_neon_vaesmcq_v((int8x16_t)__p0, 48);
40286 return __ret;
40287 }
40288 #else
40289 __ai uint8x16_t vaesmcq_u8(uint8x16_t __p0) {
40290 uint8x16_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
40291 uint8x16_t __ret;
40292 __ret = (uint8x16_t) __builtin_neon_vaesmcq_v((int8x16_t)__rev0, 48);
40293 __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
40294 return __ret;
40295 }
40296 #endif
40297
40298 #ifdef __LITTLE_ENDIAN__
40299 __ai uint32x4_t vsha1cq_u32(uint32x4_t __p0, uint32_t __p1, uint32x4_t __p2) {
40300 uint32x4_t __ret;
40301 __ret = (uint32x4_t) __builtin_neon_vsha1cq_u32((int8x16_t)__p0, __p1, (int8x16_t)__p2);
40302 return __ret;
40303 }
40304 #else
40305 __ai uint32x4_t vsha1cq_u32(uint32x4_t __p0, uint32_t __p1, uint32x4_t __p2) {
40306 uint32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
40307 uint32x4_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, 3, 2, 1, 0);
40308 uint32x4_t __ret;
40309 __ret = (uint32x4_t) __builtin_neon_vsha1cq_u32((int8x16_t)__rev0, __p1, (int8x16_t)__rev2);
40310 __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
40311 return __ret;
40312 }
40313 #endif
40314
40315 #ifdef __LITTLE_ENDIAN__
40316 __ai uint32_t vsha1h_u32(uint32_t __p0) {
40317 uint32_t __ret;
40318 __ret = (uint32_t) __builtin_neon_vsha1h_u32(__p0);
40319 return __ret;
40320 }
40321 #else
40322 __ai uint32_t vsha1h_u32(uint32_t __p0) {
40323 uint32_t __ret;
40324 __ret = (uint32_t) __builtin_neon_vsha1h_u32(__p0);
40325 return __ret;
40326 }
40327 #endif
40328
40329 #ifdef __LITTLE_ENDIAN__
40330 __ai uint32x4_t vsha1mq_u32(uint32x4_t __p0, uint32_t __p1, uint32x4_t __p2) {
40331 uint32x4_t __ret;
40332 __ret = (uint32x4_t) __builtin_neon_vsha1mq_u32((int8x16_t)__p0, __p1, (int8x16_t)__p2);
40333 return __ret;
40334 }
40335 #else
40336 __ai uint32x4_t vsha1mq_u32(uint32x4_t __p0, uint32_t __p1, uint32x4_t __p2) {
40337 uint32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
40338 uint32x4_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, 3, 2, 1, 0);
40339 uint32x4_t __ret;
40340 __ret = (uint32x4_t) __builtin_neon_vsha1mq_u32((int8x16_t)__rev0, __p1, (int8x16_t)__rev2);
40341 __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
40342 return __ret;
40343 }
40344 #endif
40345
40346 #ifdef __LITTLE_ENDIAN__
40347 __ai uint32x4_t vsha1pq_u32(uint32x4_t __p0, uint32_t __p1, uint32x4_t __p2) {
40348 uint32x4_t __ret;
40349 __ret = (uint32x4_t) __builtin_neon_vsha1pq_u32((int8x16_t)__p0, __p1, (int8x16_t)__p2);
40350 return __ret;
40351 }
40352 #else
40353 __ai uint32x4_t vsha1pq_u32(uint32x4_t __p0, uint32_t __p1, uint32x4_t __p2) {
40354 uint32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
40355 uint32x4_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, 3, 2, 1, 0);
40356 uint32x4_t __ret;
40357 __ret = (uint32x4_t) __builtin_neon_vsha1pq_u32((int8x16_t)__rev0, __p1, (int8x16_t)__rev2);
40358 __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
40359 return __ret;
40360 }
40361 #endif
40362
40363 #ifdef __LITTLE_ENDIAN__
40364 __ai uint32x4_t vsha1su0q_u32(uint32x4_t __p0, uint32x4_t __p1, uint32x4_t __p2) {
40365 uint32x4_t __ret;
40366 __ret = (uint32x4_t) __builtin_neon_vsha1su0q_v((int8x16_t)__p0, (int8x16_t)__p1, (int8x16_t)__p2, 50);
40367 return __ret;
40368 }
40369 #else
40370 __ai uint32x4_t vsha1su0q_u32(uint32x4_t __p0, uint32x4_t __p1, uint32x4_t __p2) {
40371 uint32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
40372 uint32x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
40373 uint32x4_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, 3, 2, 1, 0);
40374 uint32x4_t __ret;
40375 __ret = (uint32x4_t) __builtin_neon_vsha1su0q_v((int8x16_t)__rev0, (int8x16_t)__rev1, (int8x16_t)__rev2, 50);
40376 __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
40377 return __ret;
40378 }
40379 #endif
40380
40381 #ifdef __LITTLE_ENDIAN__
40382 __ai uint32x4_t vsha1su1q_u32(uint32x4_t __p0, uint32x4_t __p1) {
40383 uint32x4_t __ret;
40384 __ret = (uint32x4_t) __builtin_neon_vsha1su1q_v((int8x16_t)__p0, (int8x16_t)__p1, 50);
40385 return __ret;
40386 }
40387 #else
40388 __ai uint32x4_t vsha1su1q_u32(uint32x4_t __p0, uint32x4_t __p1) {
40389 uint32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
40390 uint32x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
40391 uint32x4_t __ret;
40392 __ret = (uint32x4_t) __builtin_neon_vsha1su1q_v((int8x16_t)__rev0, (int8x16_t)__rev1, 50);
40393 __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
40394 return __ret;
40395 }
40396 #endif
40397
40398 #ifdef __LITTLE_ENDIAN__
40399 __ai uint32x4_t vsha256hq_u32(uint32x4_t __p0, uint32x4_t __p1, uint32x4_t __p2) {
40400 uint32x4_t __ret;
40401 __ret = (uint32x4_t) __builtin_neon_vsha256hq_v((int8x16_t)__p0, (int8x16_t)__p1, (int8x16_t)__p2, 50);
40402 return __ret;
40403 }
40404 #else
40405 __ai uint32x4_t vsha256hq_u32(uint32x4_t __p0, uint32x4_t __p1, uint32x4_t __p2) {
40406 uint32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
40407 uint32x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
40408 uint32x4_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, 3, 2, 1, 0);
40409 uint32x4_t __ret;
40410 __ret = (uint32x4_t) __builtin_neon_vsha256hq_v((int8x16_t)__rev0, (int8x16_t)__rev1, (int8x16_t)__rev2, 50);
40411 __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
40412 return __ret;
40413 }
40414 #endif
40415
40416 #ifdef __LITTLE_ENDIAN__
40417 __ai uint32x4_t vsha256h2q_u32(uint32x4_t __p0, uint32x4_t __p1, uint32x4_t __p2) {
40418 uint32x4_t __ret;
40419 __ret = (uint32x4_t) __builtin_neon_vsha256h2q_v((int8x16_t)__p0, (int8x16_t)__p1, (int8x16_t)__p2, 50);
40420 return __ret;
40421 }
40422 #else
40423 __ai uint32x4_t vsha256h2q_u32(uint32x4_t __p0, uint32x4_t __p1, uint32x4_t __p2) {
40424 uint32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
40425 uint32x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
40426 uint32x4_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, 3, 2, 1, 0);
40427 uint32x4_t __ret;
40428 __ret = (uint32x4_t) __builtin_neon_vsha256h2q_v((int8x16_t)__rev0, (int8x16_t)__rev1, (int8x16_t)__rev2, 50);
40429 __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
40430 return __ret;
40431 }
40432 #endif
40433
40434 #ifdef __LITTLE_ENDIAN__
40435 __ai uint32x4_t vsha256su0q_u32(uint32x4_t __p0, uint32x4_t __p1) {
40436 uint32x4_t __ret;
40437 __ret = (uint32x4_t) __builtin_neon_vsha256su0q_v((int8x16_t)__p0, (int8x16_t)__p1, 50);
40438 return __ret;
40439 }
40440 #else
40441 __ai uint32x4_t vsha256su0q_u32(uint32x4_t __p0, uint32x4_t __p1) {
40442 uint32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
40443 uint32x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
40444 uint32x4_t __ret;
40445 __ret = (uint32x4_t) __builtin_neon_vsha256su0q_v((int8x16_t)__rev0, (int8x16_t)__rev1, 50);
40446 __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
40447 return __ret;
40448 }
40449 #endif
40450
40451 #ifdef __LITTLE_ENDIAN__
40452 __ai uint32x4_t vsha256su1q_u32(uint32x4_t __p0, uint32x4_t __p1, uint32x4_t __p2) {
40453 uint32x4_t __ret;
40454 __ret = (uint32x4_t) __builtin_neon_vsha256su1q_v((int8x16_t)__p0, (int8x16_t)__p1, (int8x16_t)__p2, 50);
40455 return __ret;
40456 }
40457 #else
40458 __ai uint32x4_t vsha256su1q_u32(uint32x4_t __p0, uint32x4_t __p1, uint32x4_t __p2) {
40459 uint32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
40460 uint32x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
40461 uint32x4_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, 3, 2, 1, 0);
40462 uint32x4_t __ret;
40463 __ret = (uint32x4_t) __builtin_neon_vsha256su1q_v((int8x16_t)__rev0, (int8x16_t)__rev1, (int8x16_t)__rev2, 50);
40464 __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
40465 return __ret;
40466 }
40467 #endif
40468
40469 #endif
40470 #if defined(__ARM_FEATURE_FMA)
40471 #ifdef __LITTLE_ENDIAN__
40472 __ai float32x4_t vfmaq_f32(float32x4_t __p0, float32x4_t __p1, float32x4_t __p2) {
40473 float32x4_t __ret;
40474 __ret = (float32x4_t) __builtin_neon_vfmaq_v((int8x16_t)__p0, (int8x16_t)__p1, (int8x16_t)__p2, 41);
40475 return __ret;
40476 }
40477 #else
40478 __ai float32x4_t vfmaq_f32(float32x4_t __p0, float32x4_t __p1, float32x4_t __p2) {
40479 float32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
40480 float32x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
40481 float32x4_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, 3, 2, 1, 0);
40482 float32x4_t __ret;
40483 __ret = (float32x4_t) __builtin_neon_vfmaq_v((int8x16_t)__rev0, (int8x16_t)__rev1, (int8x16_t)__rev2, 41);
40484 __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
40485 return __ret;
40486 }
40487 __ai float32x4_t __noswap_vfmaq_f32(float32x4_t __p0, float32x4_t __p1, float32x4_t __p2) {
40488 float32x4_t __ret;
40489 __ret = (float32x4_t) __builtin_neon_vfmaq_v((int8x16_t)__p0, (int8x16_t)__p1, (int8x16_t)__p2, 41);
40490 return __ret;
40491 }
40492 #endif
40493
40494 #ifdef __LITTLE_ENDIAN__
40495 __ai float32x2_t vfma_f32(float32x2_t __p0, float32x2_t __p1, float32x2_t __p2) {
40496 float32x2_t __ret;
40497 __ret = (float32x2_t) __builtin_neon_vfma_v((int8x8_t)__p0, (int8x8_t)__p1, (int8x8_t)__p2, 9);
40498 return __ret;
40499 }
40500 #else
40501 __ai float32x2_t vfma_f32(float32x2_t __p0, float32x2_t __p1, float32x2_t __p2) {
40502 float32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
40503 float32x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
40504 float32x2_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, 1, 0);
40505 float32x2_t __ret;
40506 __ret = (float32x2_t) __builtin_neon_vfma_v((int8x8_t)__rev0, (int8x8_t)__rev1, (int8x8_t)__rev2, 9);
40507 __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
40508 return __ret;
40509 }
40510 __ai float32x2_t __noswap_vfma_f32(float32x2_t __p0, float32x2_t __p1, float32x2_t __p2) {
40511 float32x2_t __ret;
40512 __ret = (float32x2_t) __builtin_neon_vfma_v((int8x8_t)__p0, (int8x8_t)__p1, (int8x8_t)__p2, 9);
40513 return __ret;
40514 }
40515 #endif
40516
40517 #ifdef __LITTLE_ENDIAN__
40518 __ai float32x4_t vfmsq_f32(float32x4_t __p0, float32x4_t __p1, float32x4_t __p2) {
40519 float32x4_t __ret;
40520 __ret = vfmaq_f32(__p0, -__p1, __p2);
40521 return __ret;
40522 }
40523 #else
40524 __ai float32x4_t vfmsq_f32(float32x4_t __p0, float32x4_t __p1, float32x4_t __p2) {
40525 float32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
40526 float32x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
40527 float32x4_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, 3, 2, 1, 0);
40528 float32x4_t __ret;
40529 __ret = __noswap_vfmaq_f32(__rev0, -__rev1, __rev2);
40530 __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
40531 return __ret;
40532 }
40533 #endif
40534
40535 #ifdef __LITTLE_ENDIAN__
40536 __ai float32x2_t vfms_f32(float32x2_t __p0, float32x2_t __p1, float32x2_t __p2) {
40537 float32x2_t __ret;
40538 __ret = vfma_f32(__p0, -__p1, __p2);
40539 return __ret;
40540 }
40541 #else
40542 __ai float32x2_t vfms_f32(float32x2_t __p0, float32x2_t __p1, float32x2_t __p2) {
40543 float32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
40544 float32x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
40545 float32x2_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, 1, 0);
40546 float32x2_t __ret;
40547 __ret = __noswap_vfma_f32(__rev0, -__rev1, __rev2);
40548 __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
40549 return __ret;
40550 }
40551 #endif
40552
40553 #endif
40554 #if defined(__ARM_FEATURE_FP16_SCALAR_ARITHMETIC)
40555 #ifdef __LITTLE_ENDIAN__
40556 __ai float16_t vabdh_f16(float16_t __p0, float16_t __p1) {
40557 float16_t __ret;
40558 __ret = (float16_t) __builtin_neon_vabdh_f16(__p0, __p1);
40559 return __ret;
40560 }
40561 #else
40562 __ai float16_t vabdh_f16(float16_t __p0, float16_t __p1) {
40563 float16_t __ret;
40564 __ret = (float16_t) __builtin_neon_vabdh_f16(__p0, __p1);
40565 return __ret;
40566 }
40567 #endif
40568
40569 #ifdef __LITTLE_ENDIAN__
40570 __ai float16_t vabsh_f16(float16_t __p0) {
40571 float16_t __ret;
40572 __ret = (float16_t) __builtin_neon_vabsh_f16(__p0);
40573 return __ret;
40574 }
40575 #else
40576 __ai float16_t vabsh_f16(float16_t __p0) {
40577 float16_t __ret;
40578 __ret = (float16_t) __builtin_neon_vabsh_f16(__p0);
40579 return __ret;
40580 }
40581 #endif
40582
40583 #ifdef __LITTLE_ENDIAN__
40584 __ai float16_t vaddh_f16(float16_t __p0, float16_t __p1) {
40585 float16_t __ret;
40586 __ret = (float16_t) __builtin_neon_vaddh_f16(__p0, __p1);
40587 return __ret;
40588 }
40589 #else
40590 __ai float16_t vaddh_f16(float16_t __p0, float16_t __p1) {
40591 float16_t __ret;
40592 __ret = (float16_t) __builtin_neon_vaddh_f16(__p0, __p1);
40593 return __ret;
40594 }
40595 #endif
40596
40597 #ifdef __LITTLE_ENDIAN__
40598 __ai uint32_t vcageh_f16(float16_t __p0, float16_t __p1) {
40599 uint32_t __ret;
40600 __ret = (uint32_t) __builtin_neon_vcageh_f16(__p0, __p1);
40601 return __ret;
40602 }
40603 #else
40604 __ai uint32_t vcageh_f16(float16_t __p0, float16_t __p1) {
40605 uint32_t __ret;
40606 __ret = (uint32_t) __builtin_neon_vcageh_f16(__p0, __p1);
40607 return __ret;
40608 }
40609 #endif
40610
40611 #ifdef __LITTLE_ENDIAN__
40612 __ai uint32_t vcagth_f16(float16_t __p0, float16_t __p1) {
40613 uint32_t __ret;
40614 __ret = (uint32_t) __builtin_neon_vcagth_f16(__p0, __p1);
40615 return __ret;
40616 }
40617 #else
40618 __ai uint32_t vcagth_f16(float16_t __p0, float16_t __p1) {
40619 uint32_t __ret;
40620 __ret = (uint32_t) __builtin_neon_vcagth_f16(__p0, __p1);
40621 return __ret;
40622 }
40623 #endif
40624
40625 #ifdef __LITTLE_ENDIAN__
40626 __ai uint32_t vcaleh_f16(float16_t __p0, float16_t __p1) {
40627 uint32_t __ret;
40628 __ret = (uint32_t) __builtin_neon_vcaleh_f16(__p0, __p1);
40629 return __ret;
40630 }
40631 #else
40632 __ai uint32_t vcaleh_f16(float16_t __p0, float16_t __p1) {
40633 uint32_t __ret;
40634 __ret = (uint32_t) __builtin_neon_vcaleh_f16(__p0, __p1);
40635 return __ret;
40636 }
40637 #endif
40638
40639 #ifdef __LITTLE_ENDIAN__
40640 __ai uint32_t vcalth_f16(float16_t __p0, float16_t __p1) {
40641 uint32_t __ret;
40642 __ret = (uint32_t) __builtin_neon_vcalth_f16(__p0, __p1);
40643 return __ret;
40644 }
40645 #else
40646 __ai uint32_t vcalth_f16(float16_t __p0, float16_t __p1) {
40647 uint32_t __ret;
40648 __ret = (uint32_t) __builtin_neon_vcalth_f16(__p0, __p1);
40649 return __ret;
40650 }
40651 #endif
40652
40653 #ifdef __LITTLE_ENDIAN__
40654 __ai uint32_t vceqh_f16(float16_t __p0, float16_t __p1) {
40655 uint32_t __ret;
40656 __ret = (uint32_t) __builtin_neon_vceqh_f16(__p0, __p1);
40657 return __ret;
40658 }
40659 #else
40660 __ai uint32_t vceqh_f16(float16_t __p0, float16_t __p1) {
40661 uint32_t __ret;
40662 __ret = (uint32_t) __builtin_neon_vceqh_f16(__p0, __p1);
40663 return __ret;
40664 }
40665 #endif
40666
40667 #ifdef __LITTLE_ENDIAN__
40668 __ai uint32_t vceqzh_f16(float16_t __p0) {
40669 uint32_t __ret;
40670 __ret = (uint32_t) __builtin_neon_vceqzh_f16(__p0);
40671 return __ret;
40672 }
40673 #else
40674 __ai uint32_t vceqzh_f16(float16_t __p0) {
40675 uint32_t __ret;
40676 __ret = (uint32_t) __builtin_neon_vceqzh_f16(__p0);
40677 return __ret;
40678 }
40679 #endif
40680
40681 #ifdef __LITTLE_ENDIAN__
40682 __ai uint32_t vcgeh_f16(float16_t __p0, float16_t __p1) {
40683 uint32_t __ret;
40684 __ret = (uint32_t) __builtin_neon_vcgeh_f16(__p0, __p1);
40685 return __ret;
40686 }
40687 #else
40688 __ai uint32_t vcgeh_f16(float16_t __p0, float16_t __p1) {
40689 uint32_t __ret;
40690 __ret = (uint32_t) __builtin_neon_vcgeh_f16(__p0, __p1);
40691 return __ret;
40692 }
40693 #endif
40694
40695 #ifdef __LITTLE_ENDIAN__
40696 __ai uint32_t vcgezh_f16(float16_t __p0) {
40697 uint32_t __ret;
40698 __ret = (uint32_t) __builtin_neon_vcgezh_f16(__p0);
40699 return __ret;
40700 }
40701 #else
40702 __ai uint32_t vcgezh_f16(float16_t __p0) {
40703 uint32_t __ret;
40704 __ret = (uint32_t) __builtin_neon_vcgezh_f16(__p0);
40705 return __ret;
40706 }
40707 #endif
40708
40709 #ifdef __LITTLE_ENDIAN__
40710 __ai uint32_t vcgth_f16(float16_t __p0, float16_t __p1) {
40711 uint32_t __ret;
40712 __ret = (uint32_t) __builtin_neon_vcgth_f16(__p0, __p1);
40713 return __ret;
40714 }
40715 #else
40716 __ai uint32_t vcgth_f16(float16_t __p0, float16_t __p1) {
40717 uint32_t __ret;
40718 __ret = (uint32_t) __builtin_neon_vcgth_f16(__p0, __p1);
40719 return __ret;
40720 }
40721 #endif
40722
40723 #ifdef __LITTLE_ENDIAN__
40724 __ai uint32_t vcgtzh_f16(float16_t __p0) {
40725 uint32_t __ret;
40726 __ret = (uint32_t) __builtin_neon_vcgtzh_f16(__p0);
40727 return __ret;
40728 }
40729 #else
40730 __ai uint32_t vcgtzh_f16(float16_t __p0) {
40731 uint32_t __ret;
40732 __ret = (uint32_t) __builtin_neon_vcgtzh_f16(__p0);
40733 return __ret;
40734 }
40735 #endif
40736
40737 #ifdef __LITTLE_ENDIAN__
40738 __ai uint32_t vcleh_f16(float16_t __p0, float16_t __p1) {
40739 uint32_t __ret;
40740 __ret = (uint32_t) __builtin_neon_vcleh_f16(__p0, __p1);
40741 return __ret;
40742 }
40743 #else
40744 __ai uint32_t vcleh_f16(float16_t __p0, float16_t __p1) {
40745 uint32_t __ret;
40746 __ret = (uint32_t) __builtin_neon_vcleh_f16(__p0, __p1);
40747 return __ret;
40748 }
40749 #endif
40750
40751 #ifdef __LITTLE_ENDIAN__
40752 __ai uint32_t vclezh_f16(float16_t __p0) {
40753 uint32_t __ret;
40754 __ret = (uint32_t) __builtin_neon_vclezh_f16(__p0);
40755 return __ret;
40756 }
40757 #else
40758 __ai uint32_t vclezh_f16(float16_t __p0) {
40759 uint32_t __ret;
40760 __ret = (uint32_t) __builtin_neon_vclezh_f16(__p0);
40761 return __ret;
40762 }
40763 #endif
40764
40765 #ifdef __LITTLE_ENDIAN__
40766 __ai uint32_t vclth_f16(float16_t __p0, float16_t __p1) {
40767 uint32_t __ret;
40768 __ret = (uint32_t) __builtin_neon_vclth_f16(__p0, __p1);
40769 return __ret;
40770 }
40771 #else
40772 __ai uint32_t vclth_f16(float16_t __p0, float16_t __p1) {
40773 uint32_t __ret;
40774 __ret = (uint32_t) __builtin_neon_vclth_f16(__p0, __p1);
40775 return __ret;
40776 }
40777 #endif
40778
40779 #ifdef __LITTLE_ENDIAN__
40780 __ai uint32_t vcltzh_f16(float16_t __p0) {
40781 uint32_t __ret;
40782 __ret = (uint32_t) __builtin_neon_vcltzh_f16(__p0);
40783 return __ret;
40784 }
40785 #else
40786 __ai uint32_t vcltzh_f16(float16_t __p0) {
40787 uint32_t __ret;
40788 __ret = (uint32_t) __builtin_neon_vcltzh_f16(__p0);
40789 return __ret;
40790 }
40791 #endif
40792
40793 #ifdef __LITTLE_ENDIAN__
40794 __ai int32_t vcvth_s16_f16(float16_t __p0) {
40795 int32_t __ret;
40796 __ret = (int32_t) __builtin_neon_vcvth_s16_f16(__p0);
40797 return __ret;
40798 }
40799 #else
40800 __ai int32_t vcvth_s16_f16(float16_t __p0) {
40801 int32_t __ret;
40802 __ret = (int32_t) __builtin_neon_vcvth_s16_f16(__p0);
40803 return __ret;
40804 }
40805 #endif
40806
40807 #ifdef __LITTLE_ENDIAN__
40808 __ai int32_t vcvth_s32_f16(float16_t __p0) {
40809 int32_t __ret;
40810 __ret = (int32_t) __builtin_neon_vcvth_s32_f16(__p0);
40811 return __ret;
40812 }
40813 #else
40814 __ai int32_t vcvth_s32_f16(float16_t __p0) {
40815 int32_t __ret;
40816 __ret = (int32_t) __builtin_neon_vcvth_s32_f16(__p0);
40817 return __ret;
40818 }
40819 #endif
40820
40821 #ifdef __LITTLE_ENDIAN__
40822 __ai int32_t vcvth_s64_f16(float16_t __p0) {
40823 int32_t __ret;
40824 __ret = (int32_t) __builtin_neon_vcvth_s64_f16(__p0);
40825 return __ret;
40826 }
40827 #else
40828 __ai int32_t vcvth_s64_f16(float16_t __p0) {
40829 int32_t __ret;
40830 __ret = (int32_t) __builtin_neon_vcvth_s64_f16(__p0);
40831 return __ret;
40832 }
40833 #endif
40834
40835 #ifdef __LITTLE_ENDIAN__
40836 __ai uint32_t vcvth_u16_f16(float16_t __p0) {
40837 uint32_t __ret;
40838 __ret = (uint32_t) __builtin_neon_vcvth_u16_f16(__p0);
40839 return __ret;
40840 }
40841 #else
40842 __ai uint32_t vcvth_u16_f16(float16_t __p0) {
40843 uint32_t __ret;
40844 __ret = (uint32_t) __builtin_neon_vcvth_u16_f16(__p0);
40845 return __ret;
40846 }
40847 #endif
40848
40849 #ifdef __LITTLE_ENDIAN__
40850 __ai uint32_t vcvth_u32_f16(float16_t __p0) {
40851 uint32_t __ret;
40852 __ret = (uint32_t) __builtin_neon_vcvth_u32_f16(__p0);
40853 return __ret;
40854 }
40855 #else
40856 __ai uint32_t vcvth_u32_f16(float16_t __p0) {
40857 uint32_t __ret;
40858 __ret = (uint32_t) __builtin_neon_vcvth_u32_f16(__p0);
40859 return __ret;
40860 }
40861 #endif
40862
40863 #ifdef __LITTLE_ENDIAN__
40864 __ai uint32_t vcvth_u64_f16(float16_t __p0) {
40865 uint32_t __ret;
40866 __ret = (uint32_t) __builtin_neon_vcvth_u64_f16(__p0);
40867 return __ret;
40868 }
40869 #else
40870 __ai uint32_t vcvth_u64_f16(float16_t __p0) {
40871 uint32_t __ret;
40872 __ret = (uint32_t) __builtin_neon_vcvth_u64_f16(__p0);
40873 return __ret;
40874 }
40875 #endif
40876
40877 #ifdef __LITTLE_ENDIAN__
40878 __ai int32_t vcvtah_s16_f16(float16_t __p0) {
40879 int32_t __ret;
40880 __ret = (int32_t) __builtin_neon_vcvtah_s16_f16(__p0);
40881 return __ret;
40882 }
40883 #else
40884 __ai int32_t vcvtah_s16_f16(float16_t __p0) {
40885 int32_t __ret;
40886 __ret = (int32_t) __builtin_neon_vcvtah_s16_f16(__p0);
40887 return __ret;
40888 }
40889 #endif
40890
40891 #ifdef __LITTLE_ENDIAN__
40892 __ai int32_t vcvtah_s32_f16(float16_t __p0) {
40893 int32_t __ret;
40894 __ret = (int32_t) __builtin_neon_vcvtah_s32_f16(__p0);
40895 return __ret;
40896 }
40897 #else
40898 __ai int32_t vcvtah_s32_f16(float16_t __p0) {
40899 int32_t __ret;
40900 __ret = (int32_t) __builtin_neon_vcvtah_s32_f16(__p0);
40901 return __ret;
40902 }
40903 #endif
40904
40905 #ifdef __LITTLE_ENDIAN__
40906 __ai int32_t vcvtah_s64_f16(float16_t __p0) {
40907 int32_t __ret;
40908 __ret = (int32_t) __builtin_neon_vcvtah_s64_f16(__p0);
40909 return __ret;
40910 }
40911 #else
40912 __ai int32_t vcvtah_s64_f16(float16_t __p0) {
40913 int32_t __ret;
40914 __ret = (int32_t) __builtin_neon_vcvtah_s64_f16(__p0);
40915 return __ret;
40916 }
40917 #endif
40918
40919 #ifdef __LITTLE_ENDIAN__
40920 __ai uint32_t vcvtah_u16_f16(float16_t __p0) {
40921 uint32_t __ret;
40922 __ret = (uint32_t) __builtin_neon_vcvtah_u16_f16(__p0);
40923 return __ret;
40924 }
40925 #else
40926 __ai uint32_t vcvtah_u16_f16(float16_t __p0) {
40927 uint32_t __ret;
40928 __ret = (uint32_t) __builtin_neon_vcvtah_u16_f16(__p0);
40929 return __ret;
40930 }
40931 #endif
40932
40933 #ifdef __LITTLE_ENDIAN__
40934 __ai uint32_t vcvtah_u32_f16(float16_t __p0) {
40935 uint32_t __ret;
40936 __ret = (uint32_t) __builtin_neon_vcvtah_u32_f16(__p0);
40937 return __ret;
40938 }
40939 #else
40940 __ai uint32_t vcvtah_u32_f16(float16_t __p0) {
40941 uint32_t __ret;
40942 __ret = (uint32_t) __builtin_neon_vcvtah_u32_f16(__p0);
40943 return __ret;
40944 }
40945 #endif
40946
40947 #ifdef __LITTLE_ENDIAN__
40948 __ai uint32_t vcvtah_u64_f16(float16_t __p0) {
40949 uint32_t __ret;
40950 __ret = (uint32_t) __builtin_neon_vcvtah_u64_f16(__p0);
40951 return __ret;
40952 }
40953 #else
40954 __ai uint32_t vcvtah_u64_f16(float16_t __p0) {
40955 uint32_t __ret;
40956 __ret = (uint32_t) __builtin_neon_vcvtah_u64_f16(__p0);
40957 return __ret;
40958 }
40959 #endif
40960
40961 #ifdef __LITTLE_ENDIAN__
40962 __ai float16_t vcvth_f16_s32(int32_t __p0) {
40963 float16_t __ret;
40964 __ret = (float16_t) __builtin_neon_vcvth_f16_s32(__p0);
40965 return __ret;
40966 }
40967 #else
40968 __ai float16_t vcvth_f16_s32(int32_t __p0) {
40969 float16_t __ret;
40970 __ret = (float16_t) __builtin_neon_vcvth_f16_s32(__p0);
40971 return __ret;
40972 }
40973 #endif
40974
40975 #ifdef __LITTLE_ENDIAN__
40976 __ai float16_t vcvth_f16_s64(int64_t __p0) {
40977 float16_t __ret;
40978 __ret = (float16_t) __builtin_neon_vcvth_f16_s64(__p0);
40979 return __ret;
40980 }
40981 #else
40982 __ai float16_t vcvth_f16_s64(int64_t __p0) {
40983 float16_t __ret;
40984 __ret = (float16_t) __builtin_neon_vcvth_f16_s64(__p0);
40985 return __ret;
40986 }
40987 #endif
40988
40989 #ifdef __LITTLE_ENDIAN__
40990 __ai float16_t vcvth_f16_s16(int16_t __p0) {
40991 float16_t __ret;
40992 __ret = (float16_t) __builtin_neon_vcvth_f16_s16(__p0);
40993 return __ret;
40994 }
40995 #else
40996 __ai float16_t vcvth_f16_s16(int16_t __p0) {
40997 float16_t __ret;
40998 __ret = (float16_t) __builtin_neon_vcvth_f16_s16(__p0);
40999 return __ret;
41000 }
41001 #endif
41002
41003 #ifdef __LITTLE_ENDIAN__
41004 __ai float16_t vcvth_f16_u32(uint32_t __p0) {
41005 float16_t __ret;
41006 __ret = (float16_t) __builtin_neon_vcvth_f16_u32(__p0);
41007 return __ret;
41008 }
41009 #else
41010 __ai float16_t vcvth_f16_u32(uint32_t __p0) {
41011 float16_t __ret;
41012 __ret = (float16_t) __builtin_neon_vcvth_f16_u32(__p0);
41013 return __ret;
41014 }
41015 #endif
41016
41017 #ifdef __LITTLE_ENDIAN__
41018 __ai float16_t vcvth_f16_u64(uint64_t __p0) {
41019 float16_t __ret;
41020 __ret = (float16_t) __builtin_neon_vcvth_f16_u64(__p0);
41021 return __ret;
41022 }
41023 #else
41024 __ai float16_t vcvth_f16_u64(uint64_t __p0) {
41025 float16_t __ret;
41026 __ret = (float16_t) __builtin_neon_vcvth_f16_u64(__p0);
41027 return __ret;
41028 }
41029 #endif
41030
41031 #ifdef __LITTLE_ENDIAN__
41032 __ai float16_t vcvth_f16_u16(uint16_t __p0) {
41033 float16_t __ret;
41034 __ret = (float16_t) __builtin_neon_vcvth_f16_u16(__p0);
41035 return __ret;
41036 }
41037 #else
41038 __ai float16_t vcvth_f16_u16(uint16_t __p0) {
41039 float16_t __ret;
41040 __ret = (float16_t) __builtin_neon_vcvth_f16_u16(__p0);
41041 return __ret;
41042 }
41043 #endif
41044
41045 #ifdef __LITTLE_ENDIAN__
41046 __ai int32_t vcvtmh_s16_f16(float16_t __p0) {
41047 int32_t __ret;
41048 __ret = (int32_t) __builtin_neon_vcvtmh_s16_f16(__p0);
41049 return __ret;
41050 }
41051 #else
41052 __ai int32_t vcvtmh_s16_f16(float16_t __p0) {
41053 int32_t __ret;
41054 __ret = (int32_t) __builtin_neon_vcvtmh_s16_f16(__p0);
41055 return __ret;
41056 }
41057 #endif
41058
41059 #ifdef __LITTLE_ENDIAN__
41060 __ai int32_t vcvtmh_s32_f16(float16_t __p0) {
41061 int32_t __ret;
41062 __ret = (int32_t) __builtin_neon_vcvtmh_s32_f16(__p0);
41063 return __ret;
41064 }
41065 #else
41066 __ai int32_t vcvtmh_s32_f16(float16_t __p0) {
41067 int32_t __ret;
41068 __ret = (int32_t) __builtin_neon_vcvtmh_s32_f16(__p0);
41069 return __ret;
41070 }
41071 #endif
41072
41073 #ifdef __LITTLE_ENDIAN__
41074 __ai int32_t vcvtmh_s64_f16(float16_t __p0) {
41075 int32_t __ret;
41076 __ret = (int32_t) __builtin_neon_vcvtmh_s64_f16(__p0);
41077 return __ret;
41078 }
41079 #else
41080 __ai int32_t vcvtmh_s64_f16(float16_t __p0) {
41081 int32_t __ret;
41082 __ret = (int32_t) __builtin_neon_vcvtmh_s64_f16(__p0);
41083 return __ret;
41084 }
41085 #endif
41086
41087 #ifdef __LITTLE_ENDIAN__
41088 __ai uint32_t vcvtmh_u16_f16(float16_t __p0) {
41089 uint32_t __ret;
41090 __ret = (uint32_t) __builtin_neon_vcvtmh_u16_f16(__p0);
41091 return __ret;
41092 }
41093 #else
41094 __ai uint32_t vcvtmh_u16_f16(float16_t __p0) {
41095 uint32_t __ret;
41096 __ret = (uint32_t) __builtin_neon_vcvtmh_u16_f16(__p0);
41097 return __ret;
41098 }
41099 #endif
41100
41101 #ifdef __LITTLE_ENDIAN__
41102 __ai uint32_t vcvtmh_u32_f16(float16_t __p0) {
41103 uint32_t __ret;
41104 __ret = (uint32_t) __builtin_neon_vcvtmh_u32_f16(__p0);
41105 return __ret;
41106 }
41107 #else
41108 __ai uint32_t vcvtmh_u32_f16(float16_t __p0) {
41109 uint32_t __ret;
41110 __ret = (uint32_t) __builtin_neon_vcvtmh_u32_f16(__p0);
41111 return __ret;
41112 }
41113 #endif
41114
41115 #ifdef __LITTLE_ENDIAN__
41116 __ai uint32_t vcvtmh_u64_f16(float16_t __p0) {
41117 uint32_t __ret;
41118 __ret = (uint32_t) __builtin_neon_vcvtmh_u64_f16(__p0);
41119 return __ret;
41120 }
41121 #else
41122 __ai uint32_t vcvtmh_u64_f16(float16_t __p0) {
41123 uint32_t __ret;
41124 __ret = (uint32_t) __builtin_neon_vcvtmh_u64_f16(__p0);
41125 return __ret;
41126 }
41127 #endif
41128
41129 #ifdef __LITTLE_ENDIAN__
41130 __ai int32_t vcvtnh_s16_f16(float16_t __p0) {
41131 int32_t __ret;
41132 __ret = (int32_t) __builtin_neon_vcvtnh_s16_f16(__p0);
41133 return __ret;
41134 }
41135 #else
41136 __ai int32_t vcvtnh_s16_f16(float16_t __p0) {
41137 int32_t __ret;
41138 __ret = (int32_t) __builtin_neon_vcvtnh_s16_f16(__p0);
41139 return __ret;
41140 }
41141 #endif
41142
41143 #ifdef __LITTLE_ENDIAN__
41144 __ai int32_t vcvtnh_s32_f16(float16_t __p0) {
41145 int32_t __ret;
41146 __ret = (int32_t) __builtin_neon_vcvtnh_s32_f16(__p0);
41147 return __ret;
41148 }
41149 #else
41150 __ai int32_t vcvtnh_s32_f16(float16_t __p0) {
41151 int32_t __ret;
41152 __ret = (int32_t) __builtin_neon_vcvtnh_s32_f16(__p0);
41153 return __ret;
41154 }
41155 #endif
41156
41157 #ifdef __LITTLE_ENDIAN__
41158 __ai int32_t vcvtnh_s64_f16(float16_t __p0) {
41159 int32_t __ret;
41160 __ret = (int32_t) __builtin_neon_vcvtnh_s64_f16(__p0);
41161 return __ret;
41162 }
41163 #else
41164 __ai int32_t vcvtnh_s64_f16(float16_t __p0) {
41165 int32_t __ret;
41166 __ret = (int32_t) __builtin_neon_vcvtnh_s64_f16(__p0);
41167 return __ret;
41168 }
41169 #endif
41170
41171 #ifdef __LITTLE_ENDIAN__
41172 __ai uint32_t vcvtnh_u16_f16(float16_t __p0) {
41173 uint32_t __ret;
41174 __ret = (uint32_t) __builtin_neon_vcvtnh_u16_f16(__p0);
41175 return __ret;
41176 }
41177 #else
41178 __ai uint32_t vcvtnh_u16_f16(float16_t __p0) {
41179 uint32_t __ret;
41180 __ret = (uint32_t) __builtin_neon_vcvtnh_u16_f16(__p0);
41181 return __ret;
41182 }
41183 #endif
41184
41185 #ifdef __LITTLE_ENDIAN__
41186 __ai uint32_t vcvtnh_u32_f16(float16_t __p0) {
41187 uint32_t __ret;
41188 __ret = (uint32_t) __builtin_neon_vcvtnh_u32_f16(__p0);
41189 return __ret;
41190 }
41191 #else
41192 __ai uint32_t vcvtnh_u32_f16(float16_t __p0) {
41193 uint32_t __ret;
41194 __ret = (uint32_t) __builtin_neon_vcvtnh_u32_f16(__p0);
41195 return __ret;
41196 }
41197 #endif
41198
41199 #ifdef __LITTLE_ENDIAN__
41200 __ai uint32_t vcvtnh_u64_f16(float16_t __p0) {
41201 uint32_t __ret;
41202 __ret = (uint32_t) __builtin_neon_vcvtnh_u64_f16(__p0);
41203 return __ret;
41204 }
41205 #else
41206 __ai uint32_t vcvtnh_u64_f16(float16_t __p0) {
41207 uint32_t __ret;
41208 __ret = (uint32_t) __builtin_neon_vcvtnh_u64_f16(__p0);
41209 return __ret;
41210 }
41211 #endif
41212
41213 #ifdef __LITTLE_ENDIAN__
41214 __ai int32_t vcvtph_s16_f16(float16_t __p0) {
41215 int32_t __ret;
41216 __ret = (int32_t) __builtin_neon_vcvtph_s16_f16(__p0);
41217 return __ret;
41218 }
41219 #else
41220 __ai int32_t vcvtph_s16_f16(float16_t __p0) {
41221 int32_t __ret;
41222 __ret = (int32_t) __builtin_neon_vcvtph_s16_f16(__p0);
41223 return __ret;
41224 }
41225 #endif
41226
41227 #ifdef __LITTLE_ENDIAN__
41228 __ai int32_t vcvtph_s32_f16(float16_t __p0) {
41229 int32_t __ret;
41230 __ret = (int32_t) __builtin_neon_vcvtph_s32_f16(__p0);
41231 return __ret;
41232 }
41233 #else
41234 __ai int32_t vcvtph_s32_f16(float16_t __p0) {
41235 int32_t __ret;
41236 __ret = (int32_t) __builtin_neon_vcvtph_s32_f16(__p0);
41237 return __ret;
41238 }
41239 #endif
41240
41241 #ifdef __LITTLE_ENDIAN__
41242 __ai int32_t vcvtph_s64_f16(float16_t __p0) {
41243 int32_t __ret;
41244 __ret = (int32_t) __builtin_neon_vcvtph_s64_f16(__p0);
41245 return __ret;
41246 }
41247 #else
41248 __ai int32_t vcvtph_s64_f16(float16_t __p0) {
41249 int32_t __ret;
41250 __ret = (int32_t) __builtin_neon_vcvtph_s64_f16(__p0);
41251 return __ret;
41252 }
41253 #endif
41254
41255 #ifdef __LITTLE_ENDIAN__
41256 __ai uint32_t vcvtph_u16_f16(float16_t __p0) {
41257 uint32_t __ret;
41258 __ret = (uint32_t) __builtin_neon_vcvtph_u16_f16(__p0);
41259 return __ret;
41260 }
41261 #else
41262 __ai uint32_t vcvtph_u16_f16(float16_t __p0) {
41263 uint32_t __ret;
41264 __ret = (uint32_t) __builtin_neon_vcvtph_u16_f16(__p0);
41265 return __ret;
41266 }
41267 #endif
41268
41269 #ifdef __LITTLE_ENDIAN__
41270 __ai uint32_t vcvtph_u32_f16(float16_t __p0) {
41271 uint32_t __ret;
41272 __ret = (uint32_t) __builtin_neon_vcvtph_u32_f16(__p0);
41273 return __ret;
41274 }
41275 #else
41276 __ai uint32_t vcvtph_u32_f16(float16_t __p0) {
41277 uint32_t __ret;
41278 __ret = (uint32_t) __builtin_neon_vcvtph_u32_f16(__p0);
41279 return __ret;
41280 }
41281 #endif
41282
41283 #ifdef __LITTLE_ENDIAN__
41284 __ai uint32_t vcvtph_u64_f16(float16_t __p0) {
41285 uint32_t __ret;
41286 __ret = (uint32_t) __builtin_neon_vcvtph_u64_f16(__p0);
41287 return __ret;
41288 }
41289 #else
41290 __ai uint32_t vcvtph_u64_f16(float16_t __p0) {
41291 uint32_t __ret;
41292 __ret = (uint32_t) __builtin_neon_vcvtph_u64_f16(__p0);
41293 return __ret;
41294 }
41295 #endif
41296
41297 #ifdef __LITTLE_ENDIAN__
41298 __ai float16_t vfmah_f16(float16_t __p0, float16_t __p1, float16_t __p2) {
41299 float16_t __ret;
41300 __ret = (float16_t) __builtin_neon_vfmah_f16(__p0, __p1, __p2);
41301 return __ret;
41302 }
41303 #else
41304 __ai float16_t vfmah_f16(float16_t __p0, float16_t __p1, float16_t __p2) {
41305 float16_t __ret;
41306 __ret = (float16_t) __builtin_neon_vfmah_f16(__p0, __p1, __p2);
41307 return __ret;
41308 }
41309 #endif
41310
41311 #ifdef __LITTLE_ENDIAN__
41312 #define vfmah_lane_f16(__p0, __p1, __p2, __p3) __extension__ ({ \
41313 float16_t __s0 = __p0; \
41314 float16_t __s1 = __p1; \
41315 float16x4_t __s2 = __p2; \
41316 float16_t __ret; \
41317 __ret = (float16_t) __builtin_neon_vfmah_lane_f16(__s0, __s1, (int8x8_t)__s2, __p3); \
41318 __ret; \
41319 })
41320 #else
41321 #define vfmah_lane_f16(__p0, __p1, __p2, __p3) __extension__ ({ \
41322 float16_t __s0 = __p0; \
41323 float16_t __s1 = __p1; \
41324 float16x4_t __s2 = __p2; \
41325 float16x4_t __rev2; __rev2 = __builtin_shufflevector(__s2, __s2, 3, 2, 1, 0); \
41326 float16_t __ret; \
41327 __ret = (float16_t) __builtin_neon_vfmah_lane_f16(__s0, __s1, (int8x8_t)__rev2, __p3); \
41328 __ret; \
41329 })
41330 #endif
41331
41332 #ifdef __LITTLE_ENDIAN__
41333 #define vfmah_laneq_f16(__p0, __p1, __p2, __p3) __extension__ ({ \
41334 float16_t __s0 = __p0; \
41335 float16_t __s1 = __p1; \
41336 float16x8_t __s2 = __p2; \
41337 float16_t __ret; \
41338 __ret = (float16_t) __builtin_neon_vfmah_laneq_f16(__s0, __s1, (int8x16_t)__s2, __p3); \
41339 __ret; \
41340 })
41341 #else
41342 #define vfmah_laneq_f16(__p0, __p1, __p2, __p3) __extension__ ({ \
41343 float16_t __s0 = __p0; \
41344 float16_t __s1 = __p1; \
41345 float16x8_t __s2 = __p2; \
41346 float16x8_t __rev2; __rev2 = __builtin_shufflevector(__s2, __s2, 7, 6, 5, 4, 3, 2, 1, 0); \
41347 float16_t __ret; \
41348 __ret = (float16_t) __builtin_neon_vfmah_laneq_f16(__s0, __s1, (int8x16_t)__rev2, __p3); \
41349 __ret; \
41350 })
41351 #endif
41352
41353 #ifdef __LITTLE_ENDIAN__
41354 __ai float16_t vmaxh_f16(float16_t __p0, float16_t __p1) {
41355 float16_t __ret;
41356 __ret = (float16_t) __builtin_neon_vmaxh_f16(__p0, __p1);
41357 return __ret;
41358 }
41359 #else
41360 __ai float16_t vmaxh_f16(float16_t __p0, float16_t __p1) {
41361 float16_t __ret;
41362 __ret = (float16_t) __builtin_neon_vmaxh_f16(__p0, __p1);
41363 return __ret;
41364 }
41365 #endif
41366
41367 #ifdef __LITTLE_ENDIAN__
41368 __ai float16_t vmaxnmh_f16(float16_t __p0, float16_t __p1) {
41369 float16_t __ret;
41370 __ret = (float16_t) __builtin_neon_vmaxnmh_f16(__p0, __p1);
41371 return __ret;
41372 }
41373 #else
41374 __ai float16_t vmaxnmh_f16(float16_t __p0, float16_t __p1) {
41375 float16_t __ret;
41376 __ret = (float16_t) __builtin_neon_vmaxnmh_f16(__p0, __p1);
41377 return __ret;
41378 }
41379 #endif
41380
41381 #ifdef __LITTLE_ENDIAN__
41382 __ai float16_t vminh_f16(float16_t __p0, float16_t __p1) {
41383 float16_t __ret;
41384 __ret = (float16_t) __builtin_neon_vminh_f16(__p0, __p1);
41385 return __ret;
41386 }
41387 #else
41388 __ai float16_t vminh_f16(float16_t __p0, float16_t __p1) {
41389 float16_t __ret;
41390 __ret = (float16_t) __builtin_neon_vminh_f16(__p0, __p1);
41391 return __ret;
41392 }
41393 #endif
41394
41395 #ifdef __LITTLE_ENDIAN__
41396 __ai float16_t vminnmh_f16(float16_t __p0, float16_t __p1) {
41397 float16_t __ret;
41398 __ret = (float16_t) __builtin_neon_vminnmh_f16(__p0, __p1);
41399 return __ret;
41400 }
41401 #else
41402 __ai float16_t vminnmh_f16(float16_t __p0, float16_t __p1) {
41403 float16_t __ret;
41404 __ret = (float16_t) __builtin_neon_vminnmh_f16(__p0, __p1);
41405 return __ret;
41406 }
41407 #endif
41408
41409 #ifdef __LITTLE_ENDIAN__
41410 __ai float16_t vmulh_f16(float16_t __p0, float16_t __p1) {
41411 float16_t __ret;
41412 __ret = (float16_t) __builtin_neon_vmulh_f16(__p0, __p1);
41413 return __ret;
41414 }
41415 #else
41416 __ai float16_t vmulh_f16(float16_t __p0, float16_t __p1) {
41417 float16_t __ret;
41418 __ret = (float16_t) __builtin_neon_vmulh_f16(__p0, __p1);
41419 return __ret;
41420 }
41421 __ai float16_t __noswap_vmulh_f16(float16_t __p0, float16_t __p1) {
41422 float16_t __ret;
41423 __ret = (float16_t) __builtin_neon_vmulh_f16(__p0, __p1);
41424 return __ret;
41425 }
41426 #endif
41427
41428 #ifdef __LITTLE_ENDIAN__
41429 #define vmulh_lane_f16(__p0_0, __p1_0, __p2_0) __extension__ ({ \
41430 float16_t __s0_0 = __p0_0; \
41431 float16x4_t __s1_0 = __p1_0; \
41432 float16_t __ret_0; \
41433 __ret_0 = vmulh_f16(__s0_0, vget_lane_f16(__s1_0, __p2_0)); \
41434 __ret_0; \
41435 })
41436 #else
41437 #define vmulh_lane_f16(__p0_1, __p1_1, __p2_1) __extension__ ({ \
41438 float16_t __s0_1 = __p0_1; \
41439 float16x4_t __s1_1 = __p1_1; \
41440 float16x4_t __rev1_1; __rev1_1 = __builtin_shufflevector(__s1_1, __s1_1, 3, 2, 1, 0); \
41441 float16_t __ret_1; \
41442 __ret_1 = __noswap_vmulh_f16(__s0_1, __noswap_vget_lane_f16(__rev1_1, __p2_1)); \
41443 __ret_1; \
41444 })
41445 #endif
41446
41447 #ifdef __LITTLE_ENDIAN__
41448 #define vmulh_laneq_f16(__p0_2, __p1_2, __p2_2) __extension__ ({ \
41449 float16_t __s0_2 = __p0_2; \
41450 float16x8_t __s1_2 = __p1_2; \
41451 float16_t __ret_2; \
41452 __ret_2 = vmulh_f16(__s0_2, vgetq_lane_f16(__s1_2, __p2_2)); \
41453 __ret_2; \
41454 })
41455 #else
41456 #define vmulh_laneq_f16(__p0_3, __p1_3, __p2_3) __extension__ ({ \
41457 float16_t __s0_3 = __p0_3; \
41458 float16x8_t __s1_3 = __p1_3; \
41459 float16x8_t __rev1_3; __rev1_3 = __builtin_shufflevector(__s1_3, __s1_3, 7, 6, 5, 4, 3, 2, 1, 0); \
41460 float16_t __ret_3; \
41461 __ret_3 = __noswap_vmulh_f16(__s0_3, __noswap_vgetq_lane_f16(__rev1_3, __p2_3)); \
41462 __ret_3; \
41463 })
41464 #endif
41465
41466 #ifdef __LITTLE_ENDIAN__
41467 __ai float16_t vmulxh_f16(float16_t __p0, float16_t __p1) {
41468 float16_t __ret;
41469 __ret = (float16_t) __builtin_neon_vmulxh_f16(__p0, __p1);
41470 return __ret;
41471 }
41472 #else
41473 __ai float16_t vmulxh_f16(float16_t __p0, float16_t __p1) {
41474 float16_t __ret;
41475 __ret = (float16_t) __builtin_neon_vmulxh_f16(__p0, __p1);
41476 return __ret;
41477 }
41478 __ai float16_t __noswap_vmulxh_f16(float16_t __p0, float16_t __p1) {
41479 float16_t __ret;
41480 __ret = (float16_t) __builtin_neon_vmulxh_f16(__p0, __p1);
41481 return __ret;
41482 }
41483 #endif
41484
41485 #ifdef __LITTLE_ENDIAN__
41486 #define vmulxh_lane_f16(__p0_4, __p1_4, __p2_4) __extension__ ({ \
41487 float16_t __s0_4 = __p0_4; \
41488 float16x4_t __s1_4 = __p1_4; \
41489 float16_t __ret_4; \
41490 __ret_4 = vmulxh_f16(__s0_4, vget_lane_f16(__s1_4, __p2_4)); \
41491 __ret_4; \
41492 })
41493 #else
41494 #define vmulxh_lane_f16(__p0_5, __p1_5, __p2_5) __extension__ ({ \
41495 float16_t __s0_5 = __p0_5; \
41496 float16x4_t __s1_5 = __p1_5; \
41497 float16x4_t __rev1_5; __rev1_5 = __builtin_shufflevector(__s1_5, __s1_5, 3, 2, 1, 0); \
41498 float16_t __ret_5; \
41499 __ret_5 = __noswap_vmulxh_f16(__s0_5, __noswap_vget_lane_f16(__rev1_5, __p2_5)); \
41500 __ret_5; \
41501 })
41502 #endif
41503
41504 #ifdef __LITTLE_ENDIAN__
41505 #define vmulxh_laneq_f16(__p0_6, __p1_6, __p2_6) __extension__ ({ \
41506 float16_t __s0_6 = __p0_6; \
41507 float16x8_t __s1_6 = __p1_6; \
41508 float16_t __ret_6; \
41509 __ret_6 = vmulxh_f16(__s0_6, vgetq_lane_f16(__s1_6, __p2_6)); \
41510 __ret_6; \
41511 })
41512 #else
41513 #define vmulxh_laneq_f16(__p0_7, __p1_7, __p2_7) __extension__ ({ \
41514 float16_t __s0_7 = __p0_7; \
41515 float16x8_t __s1_7 = __p1_7; \
41516 float16x8_t __rev1_7; __rev1_7 = __builtin_shufflevector(__s1_7, __s1_7, 7, 6, 5, 4, 3, 2, 1, 0); \
41517 float16_t __ret_7; \
41518 __ret_7 = __noswap_vmulxh_f16(__s0_7, __noswap_vgetq_lane_f16(__rev1_7, __p2_7)); \
41519 __ret_7; \
41520 })
41521 #endif
41522
41523 #ifdef __LITTLE_ENDIAN__
41524 __ai float16_t vnegh_f16(float16_t __p0) {
41525 float16_t __ret;
41526 __ret = (float16_t) __builtin_neon_vnegh_f16(__p0);
41527 return __ret;
41528 }
41529 #else
41530 __ai float16_t vnegh_f16(float16_t __p0) {
41531 float16_t __ret;
41532 __ret = (float16_t) __builtin_neon_vnegh_f16(__p0);
41533 return __ret;
41534 }
41535 #endif
41536
41537 #ifdef __LITTLE_ENDIAN__
41538 __ai float16_t vrecpeh_f16(float16_t __p0) {
41539 float16_t __ret;
41540 __ret = (float16_t) __builtin_neon_vrecpeh_f16(__p0);
41541 return __ret;
41542 }
41543 #else
41544 __ai float16_t vrecpeh_f16(float16_t __p0) {
41545 float16_t __ret;
41546 __ret = (float16_t) __builtin_neon_vrecpeh_f16(__p0);
41547 return __ret;
41548 }
41549 #endif
41550
41551 #ifdef __LITTLE_ENDIAN__
41552 __ai float16_t vrecpsh_f16(float16_t __p0, float16_t __p1) {
41553 float16_t __ret;
41554 __ret = (float16_t) __builtin_neon_vrecpsh_f16(__p0, __p1);
41555 return __ret;
41556 }
41557 #else
41558 __ai float16_t vrecpsh_f16(float16_t __p0, float16_t __p1) {
41559 float16_t __ret;
41560 __ret = (float16_t) __builtin_neon_vrecpsh_f16(__p0, __p1);
41561 return __ret;
41562 }
41563 #endif
41564
41565 #ifdef __LITTLE_ENDIAN__
41566 __ai float16_t vrecpxh_f16(float16_t __p0) {
41567 float16_t __ret;
41568 __ret = (float16_t) __builtin_neon_vrecpxh_f16(__p0);
41569 return __ret;
41570 }
41571 #else
41572 __ai float16_t vrecpxh_f16(float16_t __p0) {
41573 float16_t __ret;
41574 __ret = (float16_t) __builtin_neon_vrecpxh_f16(__p0);
41575 return __ret;
41576 }
41577 #endif
41578
41579 #ifdef __LITTLE_ENDIAN__
41580 __ai float16_t vrndh_f16(float16_t __p0) {
41581 float16_t __ret;
41582 __ret = (float16_t) __builtin_neon_vrndh_f16(__p0);
41583 return __ret;
41584 }
41585 #else
41586 __ai float16_t vrndh_f16(float16_t __p0) {
41587 float16_t __ret;
41588 __ret = (float16_t) __builtin_neon_vrndh_f16(__p0);
41589 return __ret;
41590 }
41591 #endif
41592
41593 #ifdef __LITTLE_ENDIAN__
41594 __ai float16_t vrndah_f16(float16_t __p0) {
41595 float16_t __ret;
41596 __ret = (float16_t) __builtin_neon_vrndah_f16(__p0);
41597 return __ret;
41598 }
41599 #else
41600 __ai float16_t vrndah_f16(float16_t __p0) {
41601 float16_t __ret;
41602 __ret = (float16_t) __builtin_neon_vrndah_f16(__p0);
41603 return __ret;
41604 }
41605 #endif
41606
41607 #ifdef __LITTLE_ENDIAN__
41608 __ai float16_t vrndih_f16(float16_t __p0) {
41609 float16_t __ret;
41610 __ret = (float16_t) __builtin_neon_vrndih_f16(__p0);
41611 return __ret;
41612 }
41613 #else
41614 __ai float16_t vrndih_f16(float16_t __p0) {
41615 float16_t __ret;
41616 __ret = (float16_t) __builtin_neon_vrndih_f16(__p0);
41617 return __ret;
41618 }
41619 #endif
41620
41621 #ifdef __LITTLE_ENDIAN__
41622 __ai float16_t vrndmh_f16(float16_t __p0) {
41623 float16_t __ret;
41624 __ret = (float16_t) __builtin_neon_vrndmh_f16(__p0);
41625 return __ret;
41626 }
41627 #else
41628 __ai float16_t vrndmh_f16(float16_t __p0) {
41629 float16_t __ret;
41630 __ret = (float16_t) __builtin_neon_vrndmh_f16(__p0);
41631 return __ret;
41632 }
41633 #endif
41634
41635 #ifdef __LITTLE_ENDIAN__
41636 __ai float16_t vrndnh_f16(float16_t __p0) {
41637 float16_t __ret;
41638 __ret = (float16_t) __builtin_neon_vrndnh_f16(__p0);
41639 return __ret;
41640 }
41641 #else
41642 __ai float16_t vrndnh_f16(float16_t __p0) {
41643 float16_t __ret;
41644 __ret = (float16_t) __builtin_neon_vrndnh_f16(__p0);
41645 return __ret;
41646 }
41647 #endif
41648
41649 #ifdef __LITTLE_ENDIAN__
41650 __ai float16_t vrndph_f16(float16_t __p0) {
41651 float16_t __ret;
41652 __ret = (float16_t) __builtin_neon_vrndph_f16(__p0);
41653 return __ret;
41654 }
41655 #else
41656 __ai float16_t vrndph_f16(float16_t __p0) {
41657 float16_t __ret;
41658 __ret = (float16_t) __builtin_neon_vrndph_f16(__p0);
41659 return __ret;
41660 }
41661 #endif
41662
41663 #ifdef __LITTLE_ENDIAN__
41664 __ai float16_t vrndxh_f16(float16_t __p0) {
41665 float16_t __ret;
41666 __ret = (float16_t) __builtin_neon_vrndxh_f16(__p0);
41667 return __ret;
41668 }
41669 #else
41670 __ai float16_t vrndxh_f16(float16_t __p0) {
41671 float16_t __ret;
41672 __ret = (float16_t) __builtin_neon_vrndxh_f16(__p0);
41673 return __ret;
41674 }
41675 #endif
41676
41677 #ifdef __LITTLE_ENDIAN__
41678 __ai float16_t vrsqrteh_f16(float16_t __p0) {
41679 float16_t __ret;
41680 __ret = (float16_t) __builtin_neon_vrsqrteh_f16(__p0);
41681 return __ret;
41682 }
41683 #else
41684 __ai float16_t vrsqrteh_f16(float16_t __p0) {
41685 float16_t __ret;
41686 __ret = (float16_t) __builtin_neon_vrsqrteh_f16(__p0);
41687 return __ret;
41688 }
41689 #endif
41690
41691 #ifdef __LITTLE_ENDIAN__
41692 __ai float16_t vrsqrtsh_f16(float16_t __p0, float16_t __p1) {
41693 float16_t __ret;
41694 __ret = (float16_t) __builtin_neon_vrsqrtsh_f16(__p0, __p1);
41695 return __ret;
41696 }
41697 #else
41698 __ai float16_t vrsqrtsh_f16(float16_t __p0, float16_t __p1) {
41699 float16_t __ret;
41700 __ret = (float16_t) __builtin_neon_vrsqrtsh_f16(__p0, __p1);
41701 return __ret;
41702 }
41703 #endif
41704
41705 #ifdef __LITTLE_ENDIAN__
41706 __ai float16_t vsubh_f16(float16_t __p0, float16_t __p1) {
41707 float16_t __ret;
41708 __ret = (float16_t) __builtin_neon_vsubh_f16(__p0, __p1);
41709 return __ret;
41710 }
41711 #else
41712 __ai float16_t vsubh_f16(float16_t __p0, float16_t __p1) {
41713 float16_t __ret;
41714 __ret = (float16_t) __builtin_neon_vsubh_f16(__p0, __p1);
41715 return __ret;
41716 }
41717 #endif
41718
41719 #endif
41720 #if defined(__ARM_FEATURE_FP16_SCALAR_ARITHMETIC) && defined(__aarch64__)
41721 #ifdef __LITTLE_ENDIAN__
41722 #define vcvth_n_f16_s32(__p0, __p1) __extension__ ({ \
41723 int32_t __s0 = __p0; \
41724 float16_t __ret; \
41725 __ret = (float16_t) __builtin_neon_vcvth_n_f16_s32(__s0, __p1); \
41726 __ret; \
41727 })
41728 #else
41729 #define vcvth_n_f16_s32(__p0, __p1) __extension__ ({ \
41730 int32_t __s0 = __p0; \
41731 float16_t __ret; \
41732 __ret = (float16_t) __builtin_neon_vcvth_n_f16_s32(__s0, __p1); \
41733 __ret; \
41734 })
41735 #endif
41736
41737 #ifdef __LITTLE_ENDIAN__
41738 #define vcvth_n_f16_s64(__p0, __p1) __extension__ ({ \
41739 int64_t __s0 = __p0; \
41740 float16_t __ret; \
41741 __ret = (float16_t) __builtin_neon_vcvth_n_f16_s64(__s0, __p1); \
41742 __ret; \
41743 })
41744 #else
41745 #define vcvth_n_f16_s64(__p0, __p1) __extension__ ({ \
41746 int64_t __s0 = __p0; \
41747 float16_t __ret; \
41748 __ret = (float16_t) __builtin_neon_vcvth_n_f16_s64(__s0, __p1); \
41749 __ret; \
41750 })
41751 #endif
41752
41753 #ifdef __LITTLE_ENDIAN__
41754 #define vcvth_n_f16_s16(__p0, __p1) __extension__ ({ \
41755 int16_t __s0 = __p0; \
41756 float16_t __ret; \
41757 __ret = (float16_t) __builtin_neon_vcvth_n_f16_s16(__s0, __p1); \
41758 __ret; \
41759 })
41760 #else
41761 #define vcvth_n_f16_s16(__p0, __p1) __extension__ ({ \
41762 int16_t __s0 = __p0; \
41763 float16_t __ret; \
41764 __ret = (float16_t) __builtin_neon_vcvth_n_f16_s16(__s0, __p1); \
41765 __ret; \
41766 })
41767 #endif
41768
41769 #ifdef __LITTLE_ENDIAN__
41770 #define vcvth_n_f16_u32(__p0, __p1) __extension__ ({ \
41771 uint32_t __s0 = __p0; \
41772 float16_t __ret; \
41773 __ret = (float16_t) __builtin_neon_vcvth_n_f16_u32(__s0, __p1); \
41774 __ret; \
41775 })
41776 #else
41777 #define vcvth_n_f16_u32(__p0, __p1) __extension__ ({ \
41778 uint32_t __s0 = __p0; \
41779 float16_t __ret; \
41780 __ret = (float16_t) __builtin_neon_vcvth_n_f16_u32(__s0, __p1); \
41781 __ret; \
41782 })
41783 #endif
41784
41785 #ifdef __LITTLE_ENDIAN__
41786 #define vcvth_n_f16_u64(__p0, __p1) __extension__ ({ \
41787 uint64_t __s0 = __p0; \
41788 float16_t __ret; \
41789 __ret = (float16_t) __builtin_neon_vcvth_n_f16_u64(__s0, __p1); \
41790 __ret; \
41791 })
41792 #else
41793 #define vcvth_n_f16_u64(__p0, __p1) __extension__ ({ \
41794 uint64_t __s0 = __p0; \
41795 float16_t __ret; \
41796 __ret = (float16_t) __builtin_neon_vcvth_n_f16_u64(__s0, __p1); \
41797 __ret; \
41798 })
41799 #endif
41800
41801 #ifdef __LITTLE_ENDIAN__
41802 #define vcvth_n_f16_u16(__p0, __p1) __extension__ ({ \
41803 uint16_t __s0 = __p0; \
41804 float16_t __ret; \
41805 __ret = (float16_t) __builtin_neon_vcvth_n_f16_u16(__s0, __p1); \
41806 __ret; \
41807 })
41808 #else
41809 #define vcvth_n_f16_u16(__p0, __p1) __extension__ ({ \
41810 uint16_t __s0 = __p0; \
41811 float16_t __ret; \
41812 __ret = (float16_t) __builtin_neon_vcvth_n_f16_u16(__s0, __p1); \
41813 __ret; \
41814 })
41815 #endif
41816
41817 #ifdef __LITTLE_ENDIAN__
41818 #define vcvth_n_s32_f16(__p0, __p1) __extension__ ({ \
41819 float16_t __s0 = __p0; \
41820 int32_t __ret; \
41821 __ret = (int32_t) __builtin_neon_vcvth_n_s32_f16(__s0, __p1); \
41822 __ret; \
41823 })
41824 #else
41825 #define vcvth_n_s32_f16(__p0, __p1) __extension__ ({ \
41826 float16_t __s0 = __p0; \
41827 int32_t __ret; \
41828 __ret = (int32_t) __builtin_neon_vcvth_n_s32_f16(__s0, __p1); \
41829 __ret; \
41830 })
41831 #endif
41832
41833 #ifdef __LITTLE_ENDIAN__
41834 #define vcvth_n_s32_s64(__p0, __p1) __extension__ ({ \
41835 int64_t __s0 = __p0; \
41836 int32_t __ret; \
41837 __ret = (int32_t) __builtin_neon_vcvth_n_s32_s64(__s0, __p1); \
41838 __ret; \
41839 })
41840 #else
41841 #define vcvth_n_s32_s64(__p0, __p1) __extension__ ({ \
41842 int64_t __s0 = __p0; \
41843 int32_t __ret; \
41844 __ret = (int32_t) __builtin_neon_vcvth_n_s32_s64(__s0, __p1); \
41845 __ret; \
41846 })
41847 #endif
41848
41849 #ifdef __LITTLE_ENDIAN__
41850 #define vcvth_n_s32_s16(__p0, __p1) __extension__ ({ \
41851 int16_t __s0 = __p0; \
41852 int32_t __ret; \
41853 __ret = (int32_t) __builtin_neon_vcvth_n_s32_s16(__s0, __p1); \
41854 __ret; \
41855 })
41856 #else
41857 #define vcvth_n_s32_s16(__p0, __p1) __extension__ ({ \
41858 int16_t __s0 = __p0; \
41859 int32_t __ret; \
41860 __ret = (int32_t) __builtin_neon_vcvth_n_s32_s16(__s0, __p1); \
41861 __ret; \
41862 })
41863 #endif
41864
41865 #ifdef __LITTLE_ENDIAN__
41866 #define vcvth_n_s32_u32(__p0, __p1) __extension__ ({ \
41867 uint32_t __s0 = __p0; \
41868 int32_t __ret; \
41869 __ret = (int32_t) __builtin_neon_vcvth_n_s32_u32(__s0, __p1); \
41870 __ret; \
41871 })
41872 #else
41873 #define vcvth_n_s32_u32(__p0, __p1) __extension__ ({ \
41874 uint32_t __s0 = __p0; \
41875 int32_t __ret; \
41876 __ret = (int32_t) __builtin_neon_vcvth_n_s32_u32(__s0, __p1); \
41877 __ret; \
41878 })
41879 #endif
41880
41881 #ifdef __LITTLE_ENDIAN__
41882 #define vcvth_n_s32_u64(__p0, __p1) __extension__ ({ \
41883 uint64_t __s0 = __p0; \
41884 int32_t __ret; \
41885 __ret = (int32_t) __builtin_neon_vcvth_n_s32_u64(__s0, __p1); \
41886 __ret; \
41887 })
41888 #else
41889 #define vcvth_n_s32_u64(__p0, __p1) __extension__ ({ \
41890 uint64_t __s0 = __p0; \
41891 int32_t __ret; \
41892 __ret = (int32_t) __builtin_neon_vcvth_n_s32_u64(__s0, __p1); \
41893 __ret; \
41894 })
41895 #endif
41896
41897 #ifdef __LITTLE_ENDIAN__
41898 #define vcvth_n_s32_u16(__p0, __p1) __extension__ ({ \
41899 uint16_t __s0 = __p0; \
41900 int32_t __ret; \
41901 __ret = (int32_t) __builtin_neon_vcvth_n_s32_u16(__s0, __p1); \
41902 __ret; \
41903 })
41904 #else
41905 #define vcvth_n_s32_u16(__p0, __p1) __extension__ ({ \
41906 uint16_t __s0 = __p0; \
41907 int32_t __ret; \
41908 __ret = (int32_t) __builtin_neon_vcvth_n_s32_u16(__s0, __p1); \
41909 __ret; \
41910 })
41911 #endif
41912
41913 #ifdef __LITTLE_ENDIAN__
41914 #define vcvth_n_s64_f16(__p0, __p1) __extension__ ({ \
41915 float16_t __s0 = __p0; \
41916 int64_t __ret; \
41917 __ret = (int64_t) __builtin_neon_vcvth_n_s64_f16(__s0, __p1); \
41918 __ret; \
41919 })
41920 #else
41921 #define vcvth_n_s64_f16(__p0, __p1) __extension__ ({ \
41922 float16_t __s0 = __p0; \
41923 int64_t __ret; \
41924 __ret = (int64_t) __builtin_neon_vcvth_n_s64_f16(__s0, __p1); \
41925 __ret; \
41926 })
41927 #endif
41928
41929 #ifdef __LITTLE_ENDIAN__
41930 #define vcvth_n_s64_s32(__p0, __p1) __extension__ ({ \
41931 int32_t __s0 = __p0; \
41932 int64_t __ret; \
41933 __ret = (int64_t) __builtin_neon_vcvth_n_s64_s32(__s0, __p1); \
41934 __ret; \
41935 })
41936 #else
41937 #define vcvth_n_s64_s32(__p0, __p1) __extension__ ({ \
41938 int32_t __s0 = __p0; \
41939 int64_t __ret; \
41940 __ret = (int64_t) __builtin_neon_vcvth_n_s64_s32(__s0, __p1); \
41941 __ret; \
41942 })
41943 #endif
41944
41945 #ifdef __LITTLE_ENDIAN__
41946 #define vcvth_n_s64_s16(__p0, __p1) __extension__ ({ \
41947 int16_t __s0 = __p0; \
41948 int64_t __ret; \
41949 __ret = (int64_t) __builtin_neon_vcvth_n_s64_s16(__s0, __p1); \
41950 __ret; \
41951 })
41952 #else
41953 #define vcvth_n_s64_s16(__p0, __p1) __extension__ ({ \
41954 int16_t __s0 = __p0; \
41955 int64_t __ret; \
41956 __ret = (int64_t) __builtin_neon_vcvth_n_s64_s16(__s0, __p1); \
41957 __ret; \
41958 })
41959 #endif
41960
41961 #ifdef __LITTLE_ENDIAN__
41962 #define vcvth_n_s64_u32(__p0, __p1) __extension__ ({ \
41963 uint32_t __s0 = __p0; \
41964 int64_t __ret; \
41965 __ret = (int64_t) __builtin_neon_vcvth_n_s64_u32(__s0, __p1); \
41966 __ret; \
41967 })
41968 #else
41969 #define vcvth_n_s64_u32(__p0, __p1) __extension__ ({ \
41970 uint32_t __s0 = __p0; \
41971 int64_t __ret; \
41972 __ret = (int64_t) __builtin_neon_vcvth_n_s64_u32(__s0, __p1); \
41973 __ret; \
41974 })
41975 #endif
41976
41977 #ifdef __LITTLE_ENDIAN__
41978 #define vcvth_n_s64_u64(__p0, __p1) __extension__ ({ \
41979 uint64_t __s0 = __p0; \
41980 int64_t __ret; \
41981 __ret = (int64_t) __builtin_neon_vcvth_n_s64_u64(__s0, __p1); \
41982 __ret; \
41983 })
41984 #else
41985 #define vcvth_n_s64_u64(__p0, __p1) __extension__ ({ \
41986 uint64_t __s0 = __p0; \
41987 int64_t __ret; \
41988 __ret = (int64_t) __builtin_neon_vcvth_n_s64_u64(__s0, __p1); \
41989 __ret; \
41990 })
41991 #endif
41992
41993 #ifdef __LITTLE_ENDIAN__
41994 #define vcvth_n_s64_u16(__p0, __p1) __extension__ ({ \
41995 uint16_t __s0 = __p0; \
41996 int64_t __ret; \
41997 __ret = (int64_t) __builtin_neon_vcvth_n_s64_u16(__s0, __p1); \
41998 __ret; \
41999 })
42000 #else
42001 #define vcvth_n_s64_u16(__p0, __p1) __extension__ ({ \
42002 uint16_t __s0 = __p0; \
42003 int64_t __ret; \
42004 __ret = (int64_t) __builtin_neon_vcvth_n_s64_u16(__s0, __p1); \
42005 __ret; \
42006 })
42007 #endif
42008
42009 #ifdef __LITTLE_ENDIAN__
42010 #define vcvth_n_s16_f16(__p0, __p1) __extension__ ({ \
42011 float16_t __s0 = __p0; \
42012 int16_t __ret; \
42013 __ret = (int16_t) __builtin_neon_vcvth_n_s16_f16(__s0, __p1); \
42014 __ret; \
42015 })
42016 #else
42017 #define vcvth_n_s16_f16(__p0, __p1) __extension__ ({ \
42018 float16_t __s0 = __p0; \
42019 int16_t __ret; \
42020 __ret = (int16_t) __builtin_neon_vcvth_n_s16_f16(__s0, __p1); \
42021 __ret; \
42022 })
42023 #endif
42024
42025 #ifdef __LITTLE_ENDIAN__
42026 #define vcvth_n_s16_s32(__p0, __p1) __extension__ ({ \
42027 int32_t __s0 = __p0; \
42028 int16_t __ret; \
42029 __ret = (int16_t) __builtin_neon_vcvth_n_s16_s32(__s0, __p1); \
42030 __ret; \
42031 })
42032 #else
42033 #define vcvth_n_s16_s32(__p0, __p1) __extension__ ({ \
42034 int32_t __s0 = __p0; \
42035 int16_t __ret; \
42036 __ret = (int16_t) __builtin_neon_vcvth_n_s16_s32(__s0, __p1); \
42037 __ret; \
42038 })
42039 #endif
42040
42041 #ifdef __LITTLE_ENDIAN__
42042 #define vcvth_n_s16_s64(__p0, __p1) __extension__ ({ \
42043 int64_t __s0 = __p0; \
42044 int16_t __ret; \
42045 __ret = (int16_t) __builtin_neon_vcvth_n_s16_s64(__s0, __p1); \
42046 __ret; \
42047 })
42048 #else
42049 #define vcvth_n_s16_s64(__p0, __p1) __extension__ ({ \
42050 int64_t __s0 = __p0; \
42051 int16_t __ret; \
42052 __ret = (int16_t) __builtin_neon_vcvth_n_s16_s64(__s0, __p1); \
42053 __ret; \
42054 })
42055 #endif
42056
42057 #ifdef __LITTLE_ENDIAN__
42058 #define vcvth_n_s16_u32(__p0, __p1) __extension__ ({ \
42059 uint32_t __s0 = __p0; \
42060 int16_t __ret; \
42061 __ret = (int16_t) __builtin_neon_vcvth_n_s16_u32(__s0, __p1); \
42062 __ret; \
42063 })
42064 #else
42065 #define vcvth_n_s16_u32(__p0, __p1) __extension__ ({ \
42066 uint32_t __s0 = __p0; \
42067 int16_t __ret; \
42068 __ret = (int16_t) __builtin_neon_vcvth_n_s16_u32(__s0, __p1); \
42069 __ret; \
42070 })
42071 #endif
42072
42073 #ifdef __LITTLE_ENDIAN__
42074 #define vcvth_n_s16_u64(__p0, __p1) __extension__ ({ \
42075 uint64_t __s0 = __p0; \
42076 int16_t __ret; \
42077 __ret = (int16_t) __builtin_neon_vcvth_n_s16_u64(__s0, __p1); \
42078 __ret; \
42079 })
42080 #else
42081 #define vcvth_n_s16_u64(__p0, __p1) __extension__ ({ \
42082 uint64_t __s0 = __p0; \
42083 int16_t __ret; \
42084 __ret = (int16_t) __builtin_neon_vcvth_n_s16_u64(__s0, __p1); \
42085 __ret; \
42086 })
42087 #endif
42088
42089 #ifdef __LITTLE_ENDIAN__
42090 #define vcvth_n_s16_u16(__p0, __p1) __extension__ ({ \
42091 uint16_t __s0 = __p0; \
42092 int16_t __ret; \
42093 __ret = (int16_t) __builtin_neon_vcvth_n_s16_u16(__s0, __p1); \
42094 __ret; \
42095 })
42096 #else
42097 #define vcvth_n_s16_u16(__p0, __p1) __extension__ ({ \
42098 uint16_t __s0 = __p0; \
42099 int16_t __ret; \
42100 __ret = (int16_t) __builtin_neon_vcvth_n_s16_u16(__s0, __p1); \
42101 __ret; \
42102 })
42103 #endif
42104
42105 #ifdef __LITTLE_ENDIAN__
42106 #define vcvth_n_u32_f16(__p0, __p1) __extension__ ({ \
42107 float16_t __s0 = __p0; \
42108 uint32_t __ret; \
42109 __ret = (uint32_t) __builtin_neon_vcvth_n_u32_f16(__s0, __p1); \
42110 __ret; \
42111 })
42112 #else
42113 #define vcvth_n_u32_f16(__p0, __p1) __extension__ ({ \
42114 float16_t __s0 = __p0; \
42115 uint32_t __ret; \
42116 __ret = (uint32_t) __builtin_neon_vcvth_n_u32_f16(__s0, __p1); \
42117 __ret; \
42118 })
42119 #endif
42120
42121 #ifdef __LITTLE_ENDIAN__
42122 #define vcvth_n_u32_s32(__p0, __p1) __extension__ ({ \
42123 int32_t __s0 = __p0; \
42124 uint32_t __ret; \
42125 __ret = (uint32_t) __builtin_neon_vcvth_n_u32_s32(__s0, __p1); \
42126 __ret; \
42127 })
42128 #else
42129 #define vcvth_n_u32_s32(__p0, __p1) __extension__ ({ \
42130 int32_t __s0 = __p0; \
42131 uint32_t __ret; \
42132 __ret = (uint32_t) __builtin_neon_vcvth_n_u32_s32(__s0, __p1); \
42133 __ret; \
42134 })
42135 #endif
42136
42137 #ifdef __LITTLE_ENDIAN__
42138 #define vcvth_n_u32_s64(__p0, __p1) __extension__ ({ \
42139 int64_t __s0 = __p0; \
42140 uint32_t __ret; \
42141 __ret = (uint32_t) __builtin_neon_vcvth_n_u32_s64(__s0, __p1); \
42142 __ret; \
42143 })
42144 #else
42145 #define vcvth_n_u32_s64(__p0, __p1) __extension__ ({ \
42146 int64_t __s0 = __p0; \
42147 uint32_t __ret; \
42148 __ret = (uint32_t) __builtin_neon_vcvth_n_u32_s64(__s0, __p1); \
42149 __ret; \
42150 })
42151 #endif
42152
42153 #ifdef __LITTLE_ENDIAN__
42154 #define vcvth_n_u32_s16(__p0, __p1) __extension__ ({ \
42155 int16_t __s0 = __p0; \
42156 uint32_t __ret; \
42157 __ret = (uint32_t) __builtin_neon_vcvth_n_u32_s16(__s0, __p1); \
42158 __ret; \
42159 })
42160 #else
42161 #define vcvth_n_u32_s16(__p0, __p1) __extension__ ({ \
42162 int16_t __s0 = __p0; \
42163 uint32_t __ret; \
42164 __ret = (uint32_t) __builtin_neon_vcvth_n_u32_s16(__s0, __p1); \
42165 __ret; \
42166 })
42167 #endif
42168
42169 #ifdef __LITTLE_ENDIAN__
42170 #define vcvth_n_u32_u64(__p0, __p1) __extension__ ({ \
42171 uint64_t __s0 = __p0; \
42172 uint32_t __ret; \
42173 __ret = (uint32_t) __builtin_neon_vcvth_n_u32_u64(__s0, __p1); \
42174 __ret; \
42175 })
42176 #else
42177 #define vcvth_n_u32_u64(__p0, __p1) __extension__ ({ \
42178 uint64_t __s0 = __p0; \
42179 uint32_t __ret; \
42180 __ret = (uint32_t) __builtin_neon_vcvth_n_u32_u64(__s0, __p1); \
42181 __ret; \
42182 })
42183 #endif
42184
42185 #ifdef __LITTLE_ENDIAN__
42186 #define vcvth_n_u32_u16(__p0, __p1) __extension__ ({ \
42187 uint16_t __s0 = __p0; \
42188 uint32_t __ret; \
42189 __ret = (uint32_t) __builtin_neon_vcvth_n_u32_u16(__s0, __p1); \
42190 __ret; \
42191 })
42192 #else
42193 #define vcvth_n_u32_u16(__p0, __p1) __extension__ ({ \
42194 uint16_t __s0 = __p0; \
42195 uint32_t __ret; \
42196 __ret = (uint32_t) __builtin_neon_vcvth_n_u32_u16(__s0, __p1); \
42197 __ret; \
42198 })
42199 #endif
42200
42201 #ifdef __LITTLE_ENDIAN__
42202 #define vcvth_n_u64_f16(__p0, __p1) __extension__ ({ \
42203 float16_t __s0 = __p0; \
42204 uint64_t __ret; \
42205 __ret = (uint64_t) __builtin_neon_vcvth_n_u64_f16(__s0, __p1); \
42206 __ret; \
42207 })
42208 #else
42209 #define vcvth_n_u64_f16(__p0, __p1) __extension__ ({ \
42210 float16_t __s0 = __p0; \
42211 uint64_t __ret; \
42212 __ret = (uint64_t) __builtin_neon_vcvth_n_u64_f16(__s0, __p1); \
42213 __ret; \
42214 })
42215 #endif
42216
42217 #ifdef __LITTLE_ENDIAN__
42218 #define vcvth_n_u64_s32(__p0, __p1) __extension__ ({ \
42219 int32_t __s0 = __p0; \
42220 uint64_t __ret; \
42221 __ret = (uint64_t) __builtin_neon_vcvth_n_u64_s32(__s0, __p1); \
42222 __ret; \
42223 })
42224 #else
42225 #define vcvth_n_u64_s32(__p0, __p1) __extension__ ({ \
42226 int32_t __s0 = __p0; \
42227 uint64_t __ret; \
42228 __ret = (uint64_t) __builtin_neon_vcvth_n_u64_s32(__s0, __p1); \
42229 __ret; \
42230 })
42231 #endif
42232
42233 #ifdef __LITTLE_ENDIAN__
42234 #define vcvth_n_u64_s64(__p0, __p1) __extension__ ({ \
42235 int64_t __s0 = __p0; \
42236 uint64_t __ret; \
42237 __ret = (uint64_t) __builtin_neon_vcvth_n_u64_s64(__s0, __p1); \
42238 __ret; \
42239 })
42240 #else
42241 #define vcvth_n_u64_s64(__p0, __p1) __extension__ ({ \
42242 int64_t __s0 = __p0; \
42243 uint64_t __ret; \
42244 __ret = (uint64_t) __builtin_neon_vcvth_n_u64_s64(__s0, __p1); \
42245 __ret; \
42246 })
42247 #endif
42248
42249 #ifdef __LITTLE_ENDIAN__
42250 #define vcvth_n_u64_s16(__p0, __p1) __extension__ ({ \
42251 int16_t __s0 = __p0; \
42252 uint64_t __ret; \
42253 __ret = (uint64_t) __builtin_neon_vcvth_n_u64_s16(__s0, __p1); \
42254 __ret; \
42255 })
42256 #else
42257 #define vcvth_n_u64_s16(__p0, __p1) __extension__ ({ \
42258 int16_t __s0 = __p0; \
42259 uint64_t __ret; \
42260 __ret = (uint64_t) __builtin_neon_vcvth_n_u64_s16(__s0, __p1); \
42261 __ret; \
42262 })
42263 #endif
42264
42265 #ifdef __LITTLE_ENDIAN__
42266 #define vcvth_n_u64_u32(__p0, __p1) __extension__ ({ \
42267 uint32_t __s0 = __p0; \
42268 uint64_t __ret; \
42269 __ret = (uint64_t) __builtin_neon_vcvth_n_u64_u32(__s0, __p1); \
42270 __ret; \
42271 })
42272 #else
42273 #define vcvth_n_u64_u32(__p0, __p1) __extension__ ({ \
42274 uint32_t __s0 = __p0; \
42275 uint64_t __ret; \
42276 __ret = (uint64_t) __builtin_neon_vcvth_n_u64_u32(__s0, __p1); \
42277 __ret; \
42278 })
42279 #endif
42280
42281 #ifdef __LITTLE_ENDIAN__
42282 #define vcvth_n_u64_u16(__p0, __p1) __extension__ ({ \
42283 uint16_t __s0 = __p0; \
42284 uint64_t __ret; \
42285 __ret = (uint64_t) __builtin_neon_vcvth_n_u64_u16(__s0, __p1); \
42286 __ret; \
42287 })
42288 #else
42289 #define vcvth_n_u64_u16(__p0, __p1) __extension__ ({ \
42290 uint16_t __s0 = __p0; \
42291 uint64_t __ret; \
42292 __ret = (uint64_t) __builtin_neon_vcvth_n_u64_u16(__s0, __p1); \
42293 __ret; \
42294 })
42295 #endif
42296
42297 #ifdef __LITTLE_ENDIAN__
42298 #define vcvth_n_u16_f16(__p0, __p1) __extension__ ({ \
42299 float16_t __s0 = __p0; \
42300 uint16_t __ret; \
42301 __ret = (uint16_t) __builtin_neon_vcvth_n_u16_f16(__s0, __p1); \
42302 __ret; \
42303 })
42304 #else
42305 #define vcvth_n_u16_f16(__p0, __p1) __extension__ ({ \
42306 float16_t __s0 = __p0; \
42307 uint16_t __ret; \
42308 __ret = (uint16_t) __builtin_neon_vcvth_n_u16_f16(__s0, __p1); \
42309 __ret; \
42310 })
42311 #endif
42312
42313 #ifdef __LITTLE_ENDIAN__
42314 #define vcvth_n_u16_s32(__p0, __p1) __extension__ ({ \
42315 int32_t __s0 = __p0; \
42316 uint16_t __ret; \
42317 __ret = (uint16_t) __builtin_neon_vcvth_n_u16_s32(__s0, __p1); \
42318 __ret; \
42319 })
42320 #else
42321 #define vcvth_n_u16_s32(__p0, __p1) __extension__ ({ \
42322 int32_t __s0 = __p0; \
42323 uint16_t __ret; \
42324 __ret = (uint16_t) __builtin_neon_vcvth_n_u16_s32(__s0, __p1); \
42325 __ret; \
42326 })
42327 #endif
42328
42329 #ifdef __LITTLE_ENDIAN__
42330 #define vcvth_n_u16_s64(__p0, __p1) __extension__ ({ \
42331 int64_t __s0 = __p0; \
42332 uint16_t __ret; \
42333 __ret = (uint16_t) __builtin_neon_vcvth_n_u16_s64(__s0, __p1); \
42334 __ret; \
42335 })
42336 #else
42337 #define vcvth_n_u16_s64(__p0, __p1) __extension__ ({ \
42338 int64_t __s0 = __p0; \
42339 uint16_t __ret; \
42340 __ret = (uint16_t) __builtin_neon_vcvth_n_u16_s64(__s0, __p1); \
42341 __ret; \
42342 })
42343 #endif
42344
42345 #ifdef __LITTLE_ENDIAN__
42346 #define vcvth_n_u16_s16(__p0, __p1) __extension__ ({ \
42347 int16_t __s0 = __p0; \
42348 uint16_t __ret; \
42349 __ret = (uint16_t) __builtin_neon_vcvth_n_u16_s16(__s0, __p1); \
42350 __ret; \
42351 })
42352 #else
42353 #define vcvth_n_u16_s16(__p0, __p1) __extension__ ({ \
42354 int16_t __s0 = __p0; \
42355 uint16_t __ret; \
42356 __ret = (uint16_t) __builtin_neon_vcvth_n_u16_s16(__s0, __p1); \
42357 __ret; \
42358 })
42359 #endif
42360
42361 #ifdef __LITTLE_ENDIAN__
42362 #define vcvth_n_u16_u32(__p0, __p1) __extension__ ({ \
42363 uint32_t __s0 = __p0; \
42364 uint16_t __ret; \
42365 __ret = (uint16_t) __builtin_neon_vcvth_n_u16_u32(__s0, __p1); \
42366 __ret; \
42367 })
42368 #else
42369 #define vcvth_n_u16_u32(__p0, __p1) __extension__ ({ \
42370 uint32_t __s0 = __p0; \
42371 uint16_t __ret; \
42372 __ret = (uint16_t) __builtin_neon_vcvth_n_u16_u32(__s0, __p1); \
42373 __ret; \
42374 })
42375 #endif
42376
42377 #ifdef __LITTLE_ENDIAN__
42378 #define vcvth_n_u16_u64(__p0, __p1) __extension__ ({ \
42379 uint64_t __s0 = __p0; \
42380 uint16_t __ret; \
42381 __ret = (uint16_t) __builtin_neon_vcvth_n_u16_u64(__s0, __p1); \
42382 __ret; \
42383 })
42384 #else
42385 #define vcvth_n_u16_u64(__p0, __p1) __extension__ ({ \
42386 uint64_t __s0 = __p0; \
42387 uint16_t __ret; \
42388 __ret = (uint16_t) __builtin_neon_vcvth_n_u16_u64(__s0, __p1); \
42389 __ret; \
42390 })
42391 #endif
42392
42393 #ifdef __LITTLE_ENDIAN__
42394 __ai float16_t vdivh_f16(float16_t __p0, float16_t __p1) {
42395 float16_t __ret;
42396 __ret = (float16_t) __builtin_neon_vdivh_f16(__p0, __p1);
42397 return __ret;
42398 }
42399 #else
42400 __ai float16_t vdivh_f16(float16_t __p0, float16_t __p1) {
42401 float16_t __ret;
42402 __ret = (float16_t) __builtin_neon_vdivh_f16(__p0, __p1);
42403 return __ret;
42404 }
42405 #endif
42406
42407 #ifdef __LITTLE_ENDIAN__
42408 __ai float16_t vfmsh_f16(float16_t __p0, float16_t __p1, float16_t __p2) {
42409 float16_t __ret;
42410 __ret = (float16_t) __builtin_neon_vfmsh_f16(__p0, __p1, __p2);
42411 return __ret;
42412 }
42413 #else
42414 __ai float16_t vfmsh_f16(float16_t __p0, float16_t __p1, float16_t __p2) {
42415 float16_t __ret;
42416 __ret = (float16_t) __builtin_neon_vfmsh_f16(__p0, __p1, __p2);
42417 return __ret;
42418 }
42419 __ai float16_t __noswap_vfmsh_f16(float16_t __p0, float16_t __p1, float16_t __p2) {
42420 float16_t __ret;
42421 __ret = (float16_t) __builtin_neon_vfmsh_f16(__p0, __p1, __p2);
42422 return __ret;
42423 }
42424 #endif
42425
42426 #ifdef __LITTLE_ENDIAN__
42427 __ai float16_t vsqrth_f16(float16_t __p0) {
42428 float16_t __ret;
42429 __ret = (float16_t) __builtin_neon_vsqrth_f16(__p0);
42430 return __ret;
42431 }
42432 #else
42433 __ai float16_t vsqrth_f16(float16_t __p0) {
42434 float16_t __ret;
42435 __ret = (float16_t) __builtin_neon_vsqrth_f16(__p0);
42436 return __ret;
42437 }
42438 #endif
42439
42440 #endif
42441 #if defined(__ARM_FEATURE_FP16_VECTOR_ARITHMETIC)
42442 #ifdef __LITTLE_ENDIAN__
42443 __ai float16x8_t vabdq_f16(float16x8_t __p0, float16x8_t __p1) {
42444 float16x8_t __ret;
42445 __ret = (float16x8_t) __builtin_neon_vabdq_v((int8x16_t)__p0, (int8x16_t)__p1, 40);
42446 return __ret;
42447 }
42448 #else
42449 __ai float16x8_t vabdq_f16(float16x8_t __p0, float16x8_t __p1) {
42450 float16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
42451 float16x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
42452 float16x8_t __ret;
42453 __ret = (float16x8_t) __builtin_neon_vabdq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 40);
42454 __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
42455 return __ret;
42456 }
42457 #endif
42458
42459 #ifdef __LITTLE_ENDIAN__
42460 __ai float16x4_t vabd_f16(float16x4_t __p0, float16x4_t __p1) {
42461 float16x4_t __ret;
42462 __ret = (float16x4_t) __builtin_neon_vabd_v((int8x8_t)__p0, (int8x8_t)__p1, 8);
42463 return __ret;
42464 }
42465 #else
42466 __ai float16x4_t vabd_f16(float16x4_t __p0, float16x4_t __p1) {
42467 float16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
42468 float16x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
42469 float16x4_t __ret;
42470 __ret = (float16x4_t) __builtin_neon_vabd_v((int8x8_t)__rev0, (int8x8_t)__rev1, 8);
42471 __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
42472 return __ret;
42473 }
42474 #endif
42475
42476 #ifdef __LITTLE_ENDIAN__
42477 __ai float16x8_t vabsq_f16(float16x8_t __p0) {
42478 float16x8_t __ret;
42479 __ret = (float16x8_t) __builtin_neon_vabsq_v((int8x16_t)__p0, 40);
42480 return __ret;
42481 }
42482 #else
42483 __ai float16x8_t vabsq_f16(float16x8_t __p0) {
42484 float16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
42485 float16x8_t __ret;
42486 __ret = (float16x8_t) __builtin_neon_vabsq_v((int8x16_t)__rev0, 40);
42487 __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
42488 return __ret;
42489 }
42490 #endif
42491
42492 #ifdef __LITTLE_ENDIAN__
42493 __ai float16x4_t vabs_f16(float16x4_t __p0) {
42494 float16x4_t __ret;
42495 __ret = (float16x4_t) __builtin_neon_vabs_v((int8x8_t)__p0, 8);
42496 return __ret;
42497 }
42498 #else
42499 __ai float16x4_t vabs_f16(float16x4_t __p0) {
42500 float16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
42501 float16x4_t __ret;
42502 __ret = (float16x4_t) __builtin_neon_vabs_v((int8x8_t)__rev0, 8);
42503 __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
42504 return __ret;
42505 }
42506 #endif
42507
42508 #ifdef __LITTLE_ENDIAN__
42509 __ai float16x8_t vaddq_f16(float16x8_t __p0, float16x8_t __p1) {
42510 float16x8_t __ret;
42511 __ret = __p0 + __p1;
42512 return __ret;
42513 }
42514 #else
42515 __ai float16x8_t vaddq_f16(float16x8_t __p0, float16x8_t __p1) {
42516 float16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
42517 float16x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
42518 float16x8_t __ret;
42519 __ret = __rev0 + __rev1;
42520 __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
42521 return __ret;
42522 }
42523 #endif
42524
42525 #ifdef __LITTLE_ENDIAN__
42526 __ai float16x4_t vadd_f16(float16x4_t __p0, float16x4_t __p1) {
42527 float16x4_t __ret;
42528 __ret = __p0 + __p1;
42529 return __ret;
42530 }
42531 #else
42532 __ai float16x4_t vadd_f16(float16x4_t __p0, float16x4_t __p1) {
42533 float16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
42534 float16x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
42535 float16x4_t __ret;
42536 __ret = __rev0 + __rev1;
42537 __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
42538 return __ret;
42539 }
42540 #endif
42541
42542 #ifdef __LITTLE_ENDIAN__
42543 __ai float16x8_t vbslq_f16(uint16x8_t __p0, float16x8_t __p1, float16x8_t __p2) {
42544 float16x8_t __ret;
42545 __ret = (float16x8_t) __builtin_neon_vbslq_v((int8x16_t)__p0, (int8x16_t)__p1, (int8x16_t)__p2, 40);
42546 return __ret;
42547 }
42548 #else
42549 __ai float16x8_t vbslq_f16(uint16x8_t __p0, float16x8_t __p1, float16x8_t __p2) {
42550 uint16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
42551 float16x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
42552 float16x8_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, 7, 6, 5, 4, 3, 2, 1, 0);
42553 float16x8_t __ret;
42554 __ret = (float16x8_t) __builtin_neon_vbslq_v((int8x16_t)__rev0, (int8x16_t)__rev1, (int8x16_t)__rev2, 40);
42555 __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
42556 return __ret;
42557 }
42558 #endif
42559
42560 #ifdef __LITTLE_ENDIAN__
42561 __ai float16x4_t vbsl_f16(uint16x4_t __p0, float16x4_t __p1, float16x4_t __p2) {
42562 float16x4_t __ret;
42563 __ret = (float16x4_t) __builtin_neon_vbsl_v((int8x8_t)__p0, (int8x8_t)__p1, (int8x8_t)__p2, 8);
42564 return __ret;
42565 }
42566 #else
42567 __ai float16x4_t vbsl_f16(uint16x4_t __p0, float16x4_t __p1, float16x4_t __p2) {
42568 uint16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
42569 float16x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
42570 float16x4_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, 3, 2, 1, 0);
42571 float16x4_t __ret;
42572 __ret = (float16x4_t) __builtin_neon_vbsl_v((int8x8_t)__rev0, (int8x8_t)__rev1, (int8x8_t)__rev2, 8);
42573 __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
42574 return __ret;
42575 }
42576 #endif
42577
42578 #ifdef __LITTLE_ENDIAN__
42579 __ai uint16x8_t vcageq_f16(float16x8_t __p0, float16x8_t __p1) {
42580 uint16x8_t __ret;
42581 __ret = (uint16x8_t) __builtin_neon_vcageq_v((int8x16_t)__p0, (int8x16_t)__p1, 49);
42582 return __ret;
42583 }
42584 #else
42585 __ai uint16x8_t vcageq_f16(float16x8_t __p0, float16x8_t __p1) {
42586 float16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
42587 float16x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
42588 uint16x8_t __ret;
42589 __ret = (uint16x8_t) __builtin_neon_vcageq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 49);
42590 __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
42591 return __ret;
42592 }
42593 #endif
42594
42595 #ifdef __LITTLE_ENDIAN__
42596 __ai uint16x4_t vcage_f16(float16x4_t __p0, float16x4_t __p1) {
42597 uint16x4_t __ret;
42598 __ret = (uint16x4_t) __builtin_neon_vcage_v((int8x8_t)__p0, (int8x8_t)__p1, 17);
42599 return __ret;
42600 }
42601 #else
42602 __ai uint16x4_t vcage_f16(float16x4_t __p0, float16x4_t __p1) {
42603 float16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
42604 float16x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
42605 uint16x4_t __ret;
42606 __ret = (uint16x4_t) __builtin_neon_vcage_v((int8x8_t)__rev0, (int8x8_t)__rev1, 17);
42607 __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
42608 return __ret;
42609 }
42610 #endif
42611
42612 #ifdef __LITTLE_ENDIAN__
42613 __ai uint16x8_t vcagtq_f16(float16x8_t __p0, float16x8_t __p1) {
42614 uint16x8_t __ret;
42615 __ret = (uint16x8_t) __builtin_neon_vcagtq_v((int8x16_t)__p0, (int8x16_t)__p1, 49);
42616 return __ret;
42617 }
42618 #else
42619 __ai uint16x8_t vcagtq_f16(float16x8_t __p0, float16x8_t __p1) {
42620 float16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
42621 float16x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
42622 uint16x8_t __ret;
42623 __ret = (uint16x8_t) __builtin_neon_vcagtq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 49);
42624 __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
42625 return __ret;
42626 }
42627 #endif
42628
42629 #ifdef __LITTLE_ENDIAN__
42630 __ai uint16x4_t vcagt_f16(float16x4_t __p0, float16x4_t __p1) {
42631 uint16x4_t __ret;
42632 __ret = (uint16x4_t) __builtin_neon_vcagt_v((int8x8_t)__p0, (int8x8_t)__p1, 17);
42633 return __ret;
42634 }
42635 #else
42636 __ai uint16x4_t vcagt_f16(float16x4_t __p0, float16x4_t __p1) {
42637 float16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
42638 float16x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
42639 uint16x4_t __ret;
42640 __ret = (uint16x4_t) __builtin_neon_vcagt_v((int8x8_t)__rev0, (int8x8_t)__rev1, 17);
42641 __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
42642 return __ret;
42643 }
42644 #endif
42645
42646 #ifdef __LITTLE_ENDIAN__
42647 __ai uint16x8_t vcaleq_f16(float16x8_t __p0, float16x8_t __p1) {
42648 uint16x8_t __ret;
42649 __ret = (uint16x8_t) __builtin_neon_vcaleq_v((int8x16_t)__p0, (int8x16_t)__p1, 49);
42650 return __ret;
42651 }
42652 #else
42653 __ai uint16x8_t vcaleq_f16(float16x8_t __p0, float16x8_t __p1) {
42654 float16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
42655 float16x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
42656 uint16x8_t __ret;
42657 __ret = (uint16x8_t) __builtin_neon_vcaleq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 49);
42658 __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
42659 return __ret;
42660 }
42661 #endif
42662
42663 #ifdef __LITTLE_ENDIAN__
42664 __ai uint16x4_t vcale_f16(float16x4_t __p0, float16x4_t __p1) {
42665 uint16x4_t __ret;
42666 __ret = (uint16x4_t) __builtin_neon_vcale_v((int8x8_t)__p0, (int8x8_t)__p1, 17);
42667 return __ret;
42668 }
42669 #else
42670 __ai uint16x4_t vcale_f16(float16x4_t __p0, float16x4_t __p1) {
42671 float16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
42672 float16x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
42673 uint16x4_t __ret;
42674 __ret = (uint16x4_t) __builtin_neon_vcale_v((int8x8_t)__rev0, (int8x8_t)__rev1, 17);
42675 __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
42676 return __ret;
42677 }
42678 #endif
42679
42680 #ifdef __LITTLE_ENDIAN__
42681 __ai uint16x8_t vcaltq_f16(float16x8_t __p0, float16x8_t __p1) {
42682 uint16x8_t __ret;
42683 __ret = (uint16x8_t) __builtin_neon_vcaltq_v((int8x16_t)__p0, (int8x16_t)__p1, 49);
42684 return __ret;
42685 }
42686 #else
42687 __ai uint16x8_t vcaltq_f16(float16x8_t __p0, float16x8_t __p1) {
42688 float16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
42689 float16x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
42690 uint16x8_t __ret;
42691 __ret = (uint16x8_t) __builtin_neon_vcaltq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 49);
42692 __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
42693 return __ret;
42694 }
42695 #endif
42696
42697 #ifdef __LITTLE_ENDIAN__
42698 __ai uint16x4_t vcalt_f16(float16x4_t __p0, float16x4_t __p1) {
42699 uint16x4_t __ret;
42700 __ret = (uint16x4_t) __builtin_neon_vcalt_v((int8x8_t)__p0, (int8x8_t)__p1, 17);
42701 return __ret;
42702 }
42703 #else
42704 __ai uint16x4_t vcalt_f16(float16x4_t __p0, float16x4_t __p1) {
42705 float16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
42706 float16x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
42707 uint16x4_t __ret;
42708 __ret = (uint16x4_t) __builtin_neon_vcalt_v((int8x8_t)__rev0, (int8x8_t)__rev1, 17);
42709 __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
42710 return __ret;
42711 }
42712 #endif
42713
42714 #ifdef __LITTLE_ENDIAN__
42715 __ai uint16x8_t vceqq_f16(float16x8_t __p0, float16x8_t __p1) {
42716 uint16x8_t __ret;
42717 __ret = (uint16x8_t)(__p0 == __p1);
42718 return __ret;
42719 }
42720 #else
42721 __ai uint16x8_t vceqq_f16(float16x8_t __p0, float16x8_t __p1) {
42722 float16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
42723 float16x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
42724 uint16x8_t __ret;
42725 __ret = (uint16x8_t)(__rev0 == __rev1);
42726 __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
42727 return __ret;
42728 }
42729 #endif
42730
42731 #ifdef __LITTLE_ENDIAN__
42732 __ai uint16x4_t vceq_f16(float16x4_t __p0, float16x4_t __p1) {
42733 uint16x4_t __ret;
42734 __ret = (uint16x4_t)(__p0 == __p1);
42735 return __ret;
42736 }
42737 #else
42738 __ai uint16x4_t vceq_f16(float16x4_t __p0, float16x4_t __p1) {
42739 float16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
42740 float16x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
42741 uint16x4_t __ret;
42742 __ret = (uint16x4_t)(__rev0 == __rev1);
42743 __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
42744 return __ret;
42745 }
42746 #endif
42747
42748 #ifdef __LITTLE_ENDIAN__
42749 __ai uint16x8_t vcgeq_f16(float16x8_t __p0, float16x8_t __p1) {
42750 uint16x8_t __ret;
42751 __ret = (uint16x8_t)(__p0 >= __p1);
42752 return __ret;
42753 }
42754 #else
42755 __ai uint16x8_t vcgeq_f16(float16x8_t __p0, float16x8_t __p1) {
42756 float16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
42757 float16x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
42758 uint16x8_t __ret;
42759 __ret = (uint16x8_t)(__rev0 >= __rev1);
42760 __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
42761 return __ret;
42762 }
42763 #endif
42764
42765 #ifdef __LITTLE_ENDIAN__
42766 __ai uint16x4_t vcge_f16(float16x4_t __p0, float16x4_t __p1) {
42767 uint16x4_t __ret;
42768 __ret = (uint16x4_t)(__p0 >= __p1);
42769 return __ret;
42770 }
42771 #else
42772 __ai uint16x4_t vcge_f16(float16x4_t __p0, float16x4_t __p1) {
42773 float16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
42774 float16x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
42775 uint16x4_t __ret;
42776 __ret = (uint16x4_t)(__rev0 >= __rev1);
42777 __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
42778 return __ret;
42779 }
42780 #endif
42781
42782 #ifdef __LITTLE_ENDIAN__
42783 __ai uint16x8_t vcgtq_f16(float16x8_t __p0, float16x8_t __p1) {
42784 uint16x8_t __ret;
42785 __ret = (uint16x8_t)(__p0 > __p1);
42786 return __ret;
42787 }
42788 #else
42789 __ai uint16x8_t vcgtq_f16(float16x8_t __p0, float16x8_t __p1) {
42790 float16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
42791 float16x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
42792 uint16x8_t __ret;
42793 __ret = (uint16x8_t)(__rev0 > __rev1);
42794 __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
42795 return __ret;
42796 }
42797 #endif
42798
42799 #ifdef __LITTLE_ENDIAN__
42800 __ai uint16x4_t vcgt_f16(float16x4_t __p0, float16x4_t __p1) {
42801 uint16x4_t __ret;
42802 __ret = (uint16x4_t)(__p0 > __p1);
42803 return __ret;
42804 }
42805 #else
42806 __ai uint16x4_t vcgt_f16(float16x4_t __p0, float16x4_t __p1) {
42807 float16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
42808 float16x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
42809 uint16x4_t __ret;
42810 __ret = (uint16x4_t)(__rev0 > __rev1);
42811 __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
42812 return __ret;
42813 }
42814 #endif
42815
42816 #ifdef __LITTLE_ENDIAN__
42817 __ai uint16x8_t vcleq_f16(float16x8_t __p0, float16x8_t __p1) {
42818 uint16x8_t __ret;
42819 __ret = (uint16x8_t)(__p0 <= __p1);
42820 return __ret;
42821 }
42822 #else
42823 __ai uint16x8_t vcleq_f16(float16x8_t __p0, float16x8_t __p1) {
42824 float16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
42825 float16x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
42826 uint16x8_t __ret;
42827 __ret = (uint16x8_t)(__rev0 <= __rev1);
42828 __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
42829 return __ret;
42830 }
42831 #endif
42832
42833 #ifdef __LITTLE_ENDIAN__
42834 __ai uint16x4_t vcle_f16(float16x4_t __p0, float16x4_t __p1) {
42835 uint16x4_t __ret;
42836 __ret = (uint16x4_t)(__p0 <= __p1);
42837 return __ret;
42838 }
42839 #else
42840 __ai uint16x4_t vcle_f16(float16x4_t __p0, float16x4_t __p1) {
42841 float16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
42842 float16x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
42843 uint16x4_t __ret;
42844 __ret = (uint16x4_t)(__rev0 <= __rev1);
42845 __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
42846 return __ret;
42847 }
42848 #endif
42849
42850 #ifdef __LITTLE_ENDIAN__
42851 __ai uint16x8_t vcltq_f16(float16x8_t __p0, float16x8_t __p1) {
42852 uint16x8_t __ret;
42853 __ret = (uint16x8_t)(__p0 < __p1);
42854 return __ret;
42855 }
42856 #else
42857 __ai uint16x8_t vcltq_f16(float16x8_t __p0, float16x8_t __p1) {
42858 float16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
42859 float16x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
42860 uint16x8_t __ret;
42861 __ret = (uint16x8_t)(__rev0 < __rev1);
42862 __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
42863 return __ret;
42864 }
42865 #endif
42866
42867 #ifdef __LITTLE_ENDIAN__
42868 __ai uint16x4_t vclt_f16(float16x4_t __p0, float16x4_t __p1) {
42869 uint16x4_t __ret;
42870 __ret = (uint16x4_t)(__p0 < __p1);
42871 return __ret;
42872 }
42873 #else
42874 __ai uint16x4_t vclt_f16(float16x4_t __p0, float16x4_t __p1) {
42875 float16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
42876 float16x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
42877 uint16x4_t __ret;
42878 __ret = (uint16x4_t)(__rev0 < __rev1);
42879 __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
42880 return __ret;
42881 }
42882 #endif
42883
42884 #ifdef __LITTLE_ENDIAN__
42885 __ai float16x8_t vcvtq_f16_u16(uint16x8_t __p0) {
42886 float16x8_t __ret;
42887 __ret = (float16x8_t) __builtin_neon_vcvtq_f16_v((int8x16_t)__p0, 49);
42888 return __ret;
42889 }
42890 #else
42891 __ai float16x8_t vcvtq_f16_u16(uint16x8_t __p0) {
42892 uint16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
42893 float16x8_t __ret;
42894 __ret = (float16x8_t) __builtin_neon_vcvtq_f16_v((int8x16_t)__rev0, 49);
42895 __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
42896 return __ret;
42897 }
42898 #endif
42899
42900 #ifdef __LITTLE_ENDIAN__
42901 __ai float16x8_t vcvtq_f16_s16(int16x8_t __p0) {
42902 float16x8_t __ret;
42903 __ret = (float16x8_t) __builtin_neon_vcvtq_f16_v((int8x16_t)__p0, 33);
42904 return __ret;
42905 }
42906 #else
42907 __ai float16x8_t vcvtq_f16_s16(int16x8_t __p0) {
42908 int16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
42909 float16x8_t __ret;
42910 __ret = (float16x8_t) __builtin_neon_vcvtq_f16_v((int8x16_t)__rev0, 33);
42911 __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
42912 return __ret;
42913 }
42914 #endif
42915
42916 #ifdef __LITTLE_ENDIAN__
42917 __ai float16x4_t vcvt_f16_u16(uint16x4_t __p0) {
42918 float16x4_t __ret;
42919 __ret = (float16x4_t) __builtin_neon_vcvt_f16_v((int8x8_t)__p0, 17);
42920 return __ret;
42921 }
42922 #else
42923 __ai float16x4_t vcvt_f16_u16(uint16x4_t __p0) {
42924 uint16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
42925 float16x4_t __ret;
42926 __ret = (float16x4_t) __builtin_neon_vcvt_f16_v((int8x8_t)__rev0, 17);
42927 __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
42928 return __ret;
42929 }
42930 #endif
42931
42932 #ifdef __LITTLE_ENDIAN__
42933 __ai float16x4_t vcvt_f16_s16(int16x4_t __p0) {
42934 float16x4_t __ret;
42935 __ret = (float16x4_t) __builtin_neon_vcvt_f16_v((int8x8_t)__p0, 1);
42936 return __ret;
42937 }
42938 #else
42939 __ai float16x4_t vcvt_f16_s16(int16x4_t __p0) {
42940 int16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
42941 float16x4_t __ret;
42942 __ret = (float16x4_t) __builtin_neon_vcvt_f16_v((int8x8_t)__rev0, 1);
42943 __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
42944 return __ret;
42945 }
42946 #endif
42947
42948 #ifdef __LITTLE_ENDIAN__
42949 #define vcvtq_n_f16_u16(__p0, __p1) __extension__ ({ \
42950 uint16x8_t __s0 = __p0; \
42951 float16x8_t __ret; \
42952 __ret = (float16x8_t) __builtin_neon_vcvtq_n_f16_v((int8x16_t)__s0, __p1, 49); \
42953 __ret; \
42954 })
42955 #else
42956 #define vcvtq_n_f16_u16(__p0, __p1) __extension__ ({ \
42957 uint16x8_t __s0 = __p0; \
42958 uint16x8_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 7, 6, 5, 4, 3, 2, 1, 0); \
42959 float16x8_t __ret; \
42960 __ret = (float16x8_t) __builtin_neon_vcvtq_n_f16_v((int8x16_t)__rev0, __p1, 49); \
42961 __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); \
42962 __ret; \
42963 })
42964 #endif
42965
42966 #ifdef __LITTLE_ENDIAN__
42967 #define vcvtq_n_f16_s16(__p0, __p1) __extension__ ({ \
42968 int16x8_t __s0 = __p0; \
42969 float16x8_t __ret; \
42970 __ret = (float16x8_t) __builtin_neon_vcvtq_n_f16_v((int8x16_t)__s0, __p1, 33); \
42971 __ret; \
42972 })
42973 #else
42974 #define vcvtq_n_f16_s16(__p0, __p1) __extension__ ({ \
42975 int16x8_t __s0 = __p0; \
42976 int16x8_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 7, 6, 5, 4, 3, 2, 1, 0); \
42977 float16x8_t __ret; \
42978 __ret = (float16x8_t) __builtin_neon_vcvtq_n_f16_v((int8x16_t)__rev0, __p1, 33); \
42979 __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); \
42980 __ret; \
42981 })
42982 #endif
42983
42984 #ifdef __LITTLE_ENDIAN__
42985 #define vcvt_n_f16_u16(__p0, __p1) __extension__ ({ \
42986 uint16x4_t __s0 = __p0; \
42987 float16x4_t __ret; \
42988 __ret = (float16x4_t) __builtin_neon_vcvt_n_f16_v((int8x8_t)__s0, __p1, 17); \
42989 __ret; \
42990 })
42991 #else
42992 #define vcvt_n_f16_u16(__p0, __p1) __extension__ ({ \
42993 uint16x4_t __s0 = __p0; \
42994 uint16x4_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \
42995 float16x4_t __ret; \
42996 __ret = (float16x4_t) __builtin_neon_vcvt_n_f16_v((int8x8_t)__rev0, __p1, 17); \
42997 __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \
42998 __ret; \
42999 })
43000 #endif
43001
43002 #ifdef __LITTLE_ENDIAN__
43003 #define vcvt_n_f16_s16(__p0, __p1) __extension__ ({ \
43004 int16x4_t __s0 = __p0; \
43005 float16x4_t __ret; \
43006 __ret = (float16x4_t) __builtin_neon_vcvt_n_f16_v((int8x8_t)__s0, __p1, 1); \
43007 __ret; \
43008 })
43009 #else
43010 #define vcvt_n_f16_s16(__p0, __p1) __extension__ ({ \
43011 int16x4_t __s0 = __p0; \
43012 int16x4_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \
43013 float16x4_t __ret; \
43014 __ret = (float16x4_t) __builtin_neon_vcvt_n_f16_v((int8x8_t)__rev0, __p1, 1); \
43015 __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \
43016 __ret; \
43017 })
43018 #endif
43019
43020 #ifdef __LITTLE_ENDIAN__
43021 #define vcvtq_n_s16_f16(__p0, __p1) __extension__ ({ \
43022 float16x8_t __s0 = __p0; \
43023 int16x8_t __ret; \
43024 __ret = (int16x8_t) __builtin_neon_vcvtq_n_s16_v((int8x16_t)__s0, __p1, 33); \
43025 __ret; \
43026 })
43027 #else
43028 #define vcvtq_n_s16_f16(__p0, __p1) __extension__ ({ \
43029 float16x8_t __s0 = __p0; \
43030 float16x8_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 7, 6, 5, 4, 3, 2, 1, 0); \
43031 int16x8_t __ret; \
43032 __ret = (int16x8_t) __builtin_neon_vcvtq_n_s16_v((int8x16_t)__rev0, __p1, 33); \
43033 __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); \
43034 __ret; \
43035 })
43036 #endif
43037
43038 #ifdef __LITTLE_ENDIAN__
43039 #define vcvt_n_s16_f16(__p0, __p1) __extension__ ({ \
43040 float16x4_t __s0 = __p0; \
43041 int16x4_t __ret; \
43042 __ret = (int16x4_t) __builtin_neon_vcvt_n_s16_v((int8x8_t)__s0, __p1, 1); \
43043 __ret; \
43044 })
43045 #else
43046 #define vcvt_n_s16_f16(__p0, __p1) __extension__ ({ \
43047 float16x4_t __s0 = __p0; \
43048 float16x4_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \
43049 int16x4_t __ret; \
43050 __ret = (int16x4_t) __builtin_neon_vcvt_n_s16_v((int8x8_t)__rev0, __p1, 1); \
43051 __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \
43052 __ret; \
43053 })
43054 #endif
43055
43056 #ifdef __LITTLE_ENDIAN__
43057 #define vcvtq_n_u16_f16(__p0, __p1) __extension__ ({ \
43058 float16x8_t __s0 = __p0; \
43059 uint16x8_t __ret; \
43060 __ret = (uint16x8_t) __builtin_neon_vcvtq_n_u16_v((int8x16_t)__s0, __p1, 49); \
43061 __ret; \
43062 })
43063 #else
43064 #define vcvtq_n_u16_f16(__p0, __p1) __extension__ ({ \
43065 float16x8_t __s0 = __p0; \
43066 float16x8_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 7, 6, 5, 4, 3, 2, 1, 0); \
43067 uint16x8_t __ret; \
43068 __ret = (uint16x8_t) __builtin_neon_vcvtq_n_u16_v((int8x16_t)__rev0, __p1, 49); \
43069 __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); \
43070 __ret; \
43071 })
43072 #endif
43073
43074 #ifdef __LITTLE_ENDIAN__
43075 #define vcvt_n_u16_f16(__p0, __p1) __extension__ ({ \
43076 float16x4_t __s0 = __p0; \
43077 uint16x4_t __ret; \
43078 __ret = (uint16x4_t) __builtin_neon_vcvt_n_u16_v((int8x8_t)__s0, __p1, 17); \
43079 __ret; \
43080 })
43081 #else
43082 #define vcvt_n_u16_f16(__p0, __p1) __extension__ ({ \
43083 float16x4_t __s0 = __p0; \
43084 float16x4_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \
43085 uint16x4_t __ret; \
43086 __ret = (uint16x4_t) __builtin_neon_vcvt_n_u16_v((int8x8_t)__rev0, __p1, 17); \
43087 __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \
43088 __ret; \
43089 })
43090 #endif
43091
43092 #ifdef __LITTLE_ENDIAN__
43093 __ai int16x8_t vcvtq_s16_f16(float16x8_t __p0) {
43094 int16x8_t __ret;
43095 __ret = (int16x8_t) __builtin_neon_vcvtq_s16_v((int8x16_t)__p0, 33);
43096 return __ret;
43097 }
43098 #else
43099 __ai int16x8_t vcvtq_s16_f16(float16x8_t __p0) {
43100 float16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
43101 int16x8_t __ret;
43102 __ret = (int16x8_t) __builtin_neon_vcvtq_s16_v((int8x16_t)__rev0, 33);
43103 __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
43104 return __ret;
43105 }
43106 #endif
43107
43108 #ifdef __LITTLE_ENDIAN__
43109 __ai int16x4_t vcvt_s16_f16(float16x4_t __p0) {
43110 int16x4_t __ret;
43111 __ret = (int16x4_t) __builtin_neon_vcvt_s16_v((int8x8_t)__p0, 1);
43112 return __ret;
43113 }
43114 #else
43115 __ai int16x4_t vcvt_s16_f16(float16x4_t __p0) {
43116 float16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
43117 int16x4_t __ret;
43118 __ret = (int16x4_t) __builtin_neon_vcvt_s16_v((int8x8_t)__rev0, 1);
43119 __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
43120 return __ret;
43121 }
43122 #endif
43123
43124 #ifdef __LITTLE_ENDIAN__
43125 __ai uint16x8_t vcvtq_u16_f16(float16x8_t __p0) {
43126 uint16x8_t __ret;
43127 __ret = (uint16x8_t) __builtin_neon_vcvtq_u16_v((int8x16_t)__p0, 49);
43128 return __ret;
43129 }
43130 #else
43131 __ai uint16x8_t vcvtq_u16_f16(float16x8_t __p0) {
43132 float16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
43133 uint16x8_t __ret;
43134 __ret = (uint16x8_t) __builtin_neon_vcvtq_u16_v((int8x16_t)__rev0, 49);
43135 __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
43136 return __ret;
43137 }
43138 #endif
43139
43140 #ifdef __LITTLE_ENDIAN__
43141 __ai uint16x4_t vcvt_u16_f16(float16x4_t __p0) {
43142 uint16x4_t __ret;
43143 __ret = (uint16x4_t) __builtin_neon_vcvt_u16_v((int8x8_t)__p0, 17);
43144 return __ret;
43145 }
43146 #else
43147 __ai uint16x4_t vcvt_u16_f16(float16x4_t __p0) {
43148 float16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
43149 uint16x4_t __ret;
43150 __ret = (uint16x4_t) __builtin_neon_vcvt_u16_v((int8x8_t)__rev0, 17);
43151 __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
43152 return __ret;
43153 }
43154 #endif
43155
43156 #ifdef __LITTLE_ENDIAN__
43157 __ai int16x8_t vcvtaq_s16_f16(float16x8_t __p0) {
43158 int16x8_t __ret;
43159 __ret = (int16x8_t) __builtin_neon_vcvtaq_s16_v((int8x16_t)__p0, 33);
43160 return __ret;
43161 }
43162 #else
43163 __ai int16x8_t vcvtaq_s16_f16(float16x8_t __p0) {
43164 float16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
43165 int16x8_t __ret;
43166 __ret = (int16x8_t) __builtin_neon_vcvtaq_s16_v((int8x16_t)__rev0, 33);
43167 __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
43168 return __ret;
43169 }
43170 #endif
43171
43172 #ifdef __LITTLE_ENDIAN__
43173 __ai int16x4_t vcvta_s16_f16(float16x4_t __p0) {
43174 int16x4_t __ret;
43175 __ret = (int16x4_t) __builtin_neon_vcvta_s16_v((int8x8_t)__p0, 1);
43176 return __ret;
43177 }
43178 #else
43179 __ai int16x4_t vcvta_s16_f16(float16x4_t __p0) {
43180 float16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
43181 int16x4_t __ret;
43182 __ret = (int16x4_t) __builtin_neon_vcvta_s16_v((int8x8_t)__rev0, 1);
43183 __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
43184 return __ret;
43185 }
43186 #endif
43187
43188 #ifdef __LITTLE_ENDIAN__
43189 __ai uint16x8_t vcvtaq_u16_f16(float16x8_t __p0) {
43190 uint16x8_t __ret;
43191 __ret = (uint16x8_t) __builtin_neon_vcvtaq_u16_v((int8x16_t)__p0, 49);
43192 return __ret;
43193 }
43194 #else
43195 __ai uint16x8_t vcvtaq_u16_f16(float16x8_t __p0) {
43196 float16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
43197 uint16x8_t __ret;
43198 __ret = (uint16x8_t) __builtin_neon_vcvtaq_u16_v((int8x16_t)__rev0, 49);
43199 __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
43200 return __ret;
43201 }
43202 #endif
43203
43204 #ifdef __LITTLE_ENDIAN__
43205 __ai uint16x4_t vcvta_u16_f16(float16x4_t __p0) {
43206 uint16x4_t __ret;
43207 __ret = (uint16x4_t) __builtin_neon_vcvta_u16_v((int8x8_t)__p0, 17);
43208 return __ret;
43209 }
43210 #else
43211 __ai uint16x4_t vcvta_u16_f16(float16x4_t __p0) {
43212 float16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
43213 uint16x4_t __ret;
43214 __ret = (uint16x4_t) __builtin_neon_vcvta_u16_v((int8x8_t)__rev0, 17);
43215 __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
43216 return __ret;
43217 }
43218 #endif
43219
43220 #ifdef __LITTLE_ENDIAN__
43221 __ai int16x8_t vcvtmq_s16_f16(float16x8_t __p0) {
43222 int16x8_t __ret;
43223 __ret = (int16x8_t) __builtin_neon_vcvtmq_s16_v((int8x16_t)__p0, 33);
43224 return __ret;
43225 }
43226 #else
43227 __ai int16x8_t vcvtmq_s16_f16(float16x8_t __p0) {
43228 float16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
43229 int16x8_t __ret;
43230 __ret = (int16x8_t) __builtin_neon_vcvtmq_s16_v((int8x16_t)__rev0, 33);
43231 __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
43232 return __ret;
43233 }
43234 #endif
43235
43236 #ifdef __LITTLE_ENDIAN__
43237 __ai int16x4_t vcvtm_s16_f16(float16x4_t __p0) {
43238 int16x4_t __ret;
43239 __ret = (int16x4_t) __builtin_neon_vcvtm_s16_v((int8x8_t)__p0, 1);
43240 return __ret;
43241 }
43242 #else
43243 __ai int16x4_t vcvtm_s16_f16(float16x4_t __p0) {
43244 float16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
43245 int16x4_t __ret;
43246 __ret = (int16x4_t) __builtin_neon_vcvtm_s16_v((int8x8_t)__rev0, 1);
43247 __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
43248 return __ret;
43249 }
43250 #endif
43251
43252 #ifdef __LITTLE_ENDIAN__
43253 __ai uint16x8_t vcvtmq_u16_f16(float16x8_t __p0) {
43254 uint16x8_t __ret;
43255 __ret = (uint16x8_t) __builtin_neon_vcvtmq_u16_v((int8x16_t)__p0, 49);
43256 return __ret;
43257 }
43258 #else
43259 __ai uint16x8_t vcvtmq_u16_f16(float16x8_t __p0) {
43260 float16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
43261 uint16x8_t __ret;
43262 __ret = (uint16x8_t) __builtin_neon_vcvtmq_u16_v((int8x16_t)__rev0, 49);
43263 __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
43264 return __ret;
43265 }
43266 #endif
43267
43268 #ifdef __LITTLE_ENDIAN__
43269 __ai uint16x4_t vcvtm_u16_f16(float16x4_t __p0) {
43270 uint16x4_t __ret;
43271 __ret = (uint16x4_t) __builtin_neon_vcvtm_u16_v((int8x8_t)__p0, 17);
43272 return __ret;
43273 }
43274 #else
43275 __ai uint16x4_t vcvtm_u16_f16(float16x4_t __p0) {
43276 float16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
43277 uint16x4_t __ret;
43278 __ret = (uint16x4_t) __builtin_neon_vcvtm_u16_v((int8x8_t)__rev0, 17);
43279 __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
43280 return __ret;
43281 }
43282 #endif
43283
43284 #ifdef __LITTLE_ENDIAN__
43285 __ai int16x8_t vcvtnq_s16_f16(float16x8_t __p0) {
43286 int16x8_t __ret;
43287 __ret = (int16x8_t) __builtin_neon_vcvtnq_s16_v((int8x16_t)__p0, 33);
43288 return __ret;
43289 }
43290 #else
43291 __ai int16x8_t vcvtnq_s16_f16(float16x8_t __p0) {
43292 float16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
43293 int16x8_t __ret;
43294 __ret = (int16x8_t) __builtin_neon_vcvtnq_s16_v((int8x16_t)__rev0, 33);
43295 __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
43296 return __ret;
43297 }
43298 #endif
43299
43300 #ifdef __LITTLE_ENDIAN__
43301 __ai int16x4_t vcvtn_s16_f16(float16x4_t __p0) {
43302 int16x4_t __ret;
43303 __ret = (int16x4_t) __builtin_neon_vcvtn_s16_v((int8x8_t)__p0, 1);
43304 return __ret;
43305 }
43306 #else
43307 __ai int16x4_t vcvtn_s16_f16(float16x4_t __p0) {
43308 float16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
43309 int16x4_t __ret;
43310 __ret = (int16x4_t) __builtin_neon_vcvtn_s16_v((int8x8_t)__rev0, 1);
43311 __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
43312 return __ret;
43313 }
43314 #endif
43315
43316 #ifdef __LITTLE_ENDIAN__
43317 __ai uint16x8_t vcvtnq_u16_f16(float16x8_t __p0) {
43318 uint16x8_t __ret;
43319 __ret = (uint16x8_t) __builtin_neon_vcvtnq_u16_v((int8x16_t)__p0, 49);
43320 return __ret;
43321 }
43322 #else
43323 __ai uint16x8_t vcvtnq_u16_f16(float16x8_t __p0) {
43324 float16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
43325 uint16x8_t __ret;
43326 __ret = (uint16x8_t) __builtin_neon_vcvtnq_u16_v((int8x16_t)__rev0, 49);
43327 __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
43328 return __ret;
43329 }
43330 #endif
43331
43332 #ifdef __LITTLE_ENDIAN__
43333 __ai uint16x4_t vcvtn_u16_f16(float16x4_t __p0) {
43334 uint16x4_t __ret;
43335 __ret = (uint16x4_t) __builtin_neon_vcvtn_u16_v((int8x8_t)__p0, 17);
43336 return __ret;
43337 }
43338 #else
43339 __ai uint16x4_t vcvtn_u16_f16(float16x4_t __p0) {
43340 float16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
43341 uint16x4_t __ret;
43342 __ret = (uint16x4_t) __builtin_neon_vcvtn_u16_v((int8x8_t)__rev0, 17);
43343 __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
43344 return __ret;
43345 }
43346 #endif
43347
43348 #ifdef __LITTLE_ENDIAN__
43349 __ai int16x8_t vcvtpq_s16_f16(float16x8_t __p0) {
43350 int16x8_t __ret;
43351 __ret = (int16x8_t) __builtin_neon_vcvtpq_s16_v((int8x16_t)__p0, 33);
43352 return __ret;
43353 }
43354 #else
43355 __ai int16x8_t vcvtpq_s16_f16(float16x8_t __p0) {
43356 float16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
43357 int16x8_t __ret;
43358 __ret = (int16x8_t) __builtin_neon_vcvtpq_s16_v((int8x16_t)__rev0, 33);
43359 __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
43360 return __ret;
43361 }
43362 #endif
43363
43364 #ifdef __LITTLE_ENDIAN__
43365 __ai int16x4_t vcvtp_s16_f16(float16x4_t __p0) {
43366 int16x4_t __ret;
43367 __ret = (int16x4_t) __builtin_neon_vcvtp_s16_v((int8x8_t)__p0, 1);
43368 return __ret;
43369 }
43370 #else
43371 __ai int16x4_t vcvtp_s16_f16(float16x4_t __p0) {
43372 float16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
43373 int16x4_t __ret;
43374 __ret = (int16x4_t) __builtin_neon_vcvtp_s16_v((int8x8_t)__rev0, 1);
43375 __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
43376 return __ret;
43377 }
43378 #endif
43379
43380 #ifdef __LITTLE_ENDIAN__
43381 __ai uint16x8_t vcvtpq_u16_f16(float16x8_t __p0) {
43382 uint16x8_t __ret;
43383 __ret = (uint16x8_t) __builtin_neon_vcvtpq_u16_v((int8x16_t)__p0, 49);
43384 return __ret;
43385 }
43386 #else
43387 __ai uint16x8_t vcvtpq_u16_f16(float16x8_t __p0) {
43388 float16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
43389 uint16x8_t __ret;
43390 __ret = (uint16x8_t) __builtin_neon_vcvtpq_u16_v((int8x16_t)__rev0, 49);
43391 __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
43392 return __ret;
43393 }
43394 #endif
43395
43396 #ifdef __LITTLE_ENDIAN__
43397 __ai uint16x4_t vcvtp_u16_f16(float16x4_t __p0) {
43398 uint16x4_t __ret;
43399 __ret = (uint16x4_t) __builtin_neon_vcvtp_u16_v((int8x8_t)__p0, 17);
43400 return __ret;
43401 }
43402 #else
43403 __ai uint16x4_t vcvtp_u16_f16(float16x4_t __p0) {
43404 float16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
43405 uint16x4_t __ret;
43406 __ret = (uint16x4_t) __builtin_neon_vcvtp_u16_v((int8x8_t)__rev0, 17);
43407 __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
43408 return __ret;
43409 }
43410 #endif
43411
43412 #ifdef __LITTLE_ENDIAN__
43413 #define vduph_lane_f16(__p0, __p1) __extension__ ({ \
43414 float16x4_t __s0 = __p0; \
43415 float16_t __ret; \
43416 __ret = (float16_t) __builtin_neon_vduph_lane_f16((int8x8_t)__s0, __p1); \
43417 __ret; \
43418 })
43419 #else
43420 #define vduph_lane_f16(__p0, __p1) __extension__ ({ \
43421 float16x4_t __s0 = __p0; \
43422 float16x4_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \
43423 float16_t __ret; \
43424 __ret = (float16_t) __builtin_neon_vduph_lane_f16((int8x8_t)__rev0, __p1); \
43425 __ret; \
43426 })
43427 #endif
43428
43429 #ifdef __LITTLE_ENDIAN__
43430 #define vduph_laneq_f16(__p0, __p1) __extension__ ({ \
43431 float16x8_t __s0 = __p0; \
43432 float16_t __ret; \
43433 __ret = (float16_t) __builtin_neon_vduph_laneq_f16((int8x16_t)__s0, __p1); \
43434 __ret; \
43435 })
43436 #else
43437 #define vduph_laneq_f16(__p0, __p1) __extension__ ({ \
43438 float16x8_t __s0 = __p0; \
43439 float16x8_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 7, 6, 5, 4, 3, 2, 1, 0); \
43440 float16_t __ret; \
43441 __ret = (float16_t) __builtin_neon_vduph_laneq_f16((int8x16_t)__rev0, __p1); \
43442 __ret; \
43443 })
43444 #endif
43445
43446 #ifdef __LITTLE_ENDIAN__
43447 #define vextq_f16(__p0, __p1, __p2) __extension__ ({ \
43448 float16x8_t __s0 = __p0; \
43449 float16x8_t __s1 = __p1; \
43450 float16x8_t __ret; \
43451 __ret = (float16x8_t) __builtin_neon_vextq_v((int8x16_t)__s0, (int8x16_t)__s1, __p2, 40); \
43452 __ret; \
43453 })
43454 #else
43455 #define vextq_f16(__p0, __p1, __p2) __extension__ ({ \
43456 float16x8_t __s0 = __p0; \
43457 float16x8_t __s1 = __p1; \
43458 float16x8_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 7, 6, 5, 4, 3, 2, 1, 0); \
43459 float16x8_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 7, 6, 5, 4, 3, 2, 1, 0); \
43460 float16x8_t __ret; \
43461 __ret = (float16x8_t) __builtin_neon_vextq_v((int8x16_t)__rev0, (int8x16_t)__rev1, __p2, 40); \
43462 __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); \
43463 __ret; \
43464 })
43465 #endif
43466
43467 #ifdef __LITTLE_ENDIAN__
43468 #define vext_f16(__p0, __p1, __p2) __extension__ ({ \
43469 float16x4_t __s0 = __p0; \
43470 float16x4_t __s1 = __p1; \
43471 float16x4_t __ret; \
43472 __ret = (float16x4_t) __builtin_neon_vext_v((int8x8_t)__s0, (int8x8_t)__s1, __p2, 8); \
43473 __ret; \
43474 })
43475 #else
43476 #define vext_f16(__p0, __p1, __p2) __extension__ ({ \
43477 float16x4_t __s0 = __p0; \
43478 float16x4_t __s1 = __p1; \
43479 float16x4_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \
43480 float16x4_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 3, 2, 1, 0); \
43481 float16x4_t __ret; \
43482 __ret = (float16x4_t) __builtin_neon_vext_v((int8x8_t)__rev0, (int8x8_t)__rev1, __p2, 8); \
43483 __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \
43484 __ret; \
43485 })
43486 #endif
43487
43488 #ifdef __LITTLE_ENDIAN__
43489 __ai float16x8_t vfmaq_f16(float16x8_t __p0, float16x8_t __p1, float16x8_t __p2) {
43490 float16x8_t __ret;
43491 __ret = (float16x8_t) __builtin_neon_vfmaq_v((int8x16_t)__p0, (int8x16_t)__p1, (int8x16_t)__p2, 40);
43492 return __ret;
43493 }
43494 #else
43495 __ai float16x8_t vfmaq_f16(float16x8_t __p0, float16x8_t __p1, float16x8_t __p2) {
43496 float16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
43497 float16x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
43498 float16x8_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, 7, 6, 5, 4, 3, 2, 1, 0);
43499 float16x8_t __ret;
43500 __ret = (float16x8_t) __builtin_neon_vfmaq_v((int8x16_t)__rev0, (int8x16_t)__rev1, (int8x16_t)__rev2, 40);
43501 __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
43502 return __ret;
43503 }
43504 __ai float16x8_t __noswap_vfmaq_f16(float16x8_t __p0, float16x8_t __p1, float16x8_t __p2) {
43505 float16x8_t __ret;
43506 __ret = (float16x8_t) __builtin_neon_vfmaq_v((int8x16_t)__p0, (int8x16_t)__p1, (int8x16_t)__p2, 40);
43507 return __ret;
43508 }
43509 #endif
43510
43511 #ifdef __LITTLE_ENDIAN__
43512 __ai float16x4_t vfma_f16(float16x4_t __p0, float16x4_t __p1, float16x4_t __p2) {
43513 float16x4_t __ret;
43514 __ret = (float16x4_t) __builtin_neon_vfma_v((int8x8_t)__p0, (int8x8_t)__p1, (int8x8_t)__p2, 8);
43515 return __ret;
43516 }
43517 #else
43518 __ai float16x4_t vfma_f16(float16x4_t __p0, float16x4_t __p1, float16x4_t __p2) {
43519 float16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
43520 float16x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
43521 float16x4_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, 3, 2, 1, 0);
43522 float16x4_t __ret;
43523 __ret = (float16x4_t) __builtin_neon_vfma_v((int8x8_t)__rev0, (int8x8_t)__rev1, (int8x8_t)__rev2, 8);
43524 __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
43525 return __ret;
43526 }
43527 __ai float16x4_t __noswap_vfma_f16(float16x4_t __p0, float16x4_t __p1, float16x4_t __p2) {
43528 float16x4_t __ret;
43529 __ret = (float16x4_t) __builtin_neon_vfma_v((int8x8_t)__p0, (int8x8_t)__p1, (int8x8_t)__p2, 8);
43530 return __ret;
43531 }
43532 #endif
43533
43534 #ifdef __LITTLE_ENDIAN__
43535 __ai float16x8_t vmaxq_f16(float16x8_t __p0, float16x8_t __p1) {
43536 float16x8_t __ret;
43537 __ret = (float16x8_t) __builtin_neon_vmaxq_v((int8x16_t)__p0, (int8x16_t)__p1, 40);
43538 return __ret;
43539 }
43540 #else
43541 __ai float16x8_t vmaxq_f16(float16x8_t __p0, float16x8_t __p1) {
43542 float16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
43543 float16x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
43544 float16x8_t __ret;
43545 __ret = (float16x8_t) __builtin_neon_vmaxq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 40);
43546 __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
43547 return __ret;
43548 }
43549 #endif
43550
43551 #ifdef __LITTLE_ENDIAN__
43552 __ai float16x4_t vmax_f16(float16x4_t __p0, float16x4_t __p1) {
43553 float16x4_t __ret;
43554 __ret = (float16x4_t) __builtin_neon_vmax_v((int8x8_t)__p0, (int8x8_t)__p1, 8);
43555 return __ret;
43556 }
43557 #else
43558 __ai float16x4_t vmax_f16(float16x4_t __p0, float16x4_t __p1) {
43559 float16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
43560 float16x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
43561 float16x4_t __ret;
43562 __ret = (float16x4_t) __builtin_neon_vmax_v((int8x8_t)__rev0, (int8x8_t)__rev1, 8);
43563 __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
43564 return __ret;
43565 }
43566 #endif
43567
43568 #ifdef __LITTLE_ENDIAN__
43569 __ai float16x8_t vmaxnmq_f16(float16x8_t __p0, float16x8_t __p1) {
43570 float16x8_t __ret;
43571 __ret = (float16x8_t) __builtin_neon_vmaxnmq_v((int8x16_t)__p0, (int8x16_t)__p1, 40);
43572 return __ret;
43573 }
43574 #else
43575 __ai float16x8_t vmaxnmq_f16(float16x8_t __p0, float16x8_t __p1) {
43576 float16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
43577 float16x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
43578 float16x8_t __ret;
43579 __ret = (float16x8_t) __builtin_neon_vmaxnmq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 40);
43580 __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
43581 return __ret;
43582 }
43583 #endif
43584
43585 #ifdef __LITTLE_ENDIAN__
43586 __ai float16x4_t vmaxnm_f16(float16x4_t __p0, float16x4_t __p1) {
43587 float16x4_t __ret;
43588 __ret = (float16x4_t) __builtin_neon_vmaxnm_v((int8x8_t)__p0, (int8x8_t)__p1, 8);
43589 return __ret;
43590 }
43591 #else
43592 __ai float16x4_t vmaxnm_f16(float16x4_t __p0, float16x4_t __p1) {
43593 float16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
43594 float16x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
43595 float16x4_t __ret;
43596 __ret = (float16x4_t) __builtin_neon_vmaxnm_v((int8x8_t)__rev0, (int8x8_t)__rev1, 8);
43597 __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
43598 return __ret;
43599 }
43600 #endif
43601
43602 #ifdef __LITTLE_ENDIAN__
43603 __ai float16x8_t vminq_f16(float16x8_t __p0, float16x8_t __p1) {
43604 float16x8_t __ret;
43605 __ret = (float16x8_t) __builtin_neon_vminq_v((int8x16_t)__p0, (int8x16_t)__p1, 40);
43606 return __ret;
43607 }
43608 #else
43609 __ai float16x8_t vminq_f16(float16x8_t __p0, float16x8_t __p1) {
43610 float16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
43611 float16x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
43612 float16x8_t __ret;
43613 __ret = (float16x8_t) __builtin_neon_vminq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 40);
43614 __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
43615 return __ret;
43616 }
43617 #endif
43618
43619 #ifdef __LITTLE_ENDIAN__
43620 __ai float16x4_t vmin_f16(float16x4_t __p0, float16x4_t __p1) {
43621 float16x4_t __ret;
43622 __ret = (float16x4_t) __builtin_neon_vmin_v((int8x8_t)__p0, (int8x8_t)__p1, 8);
43623 return __ret;
43624 }
43625 #else
43626 __ai float16x4_t vmin_f16(float16x4_t __p0, float16x4_t __p1) {
43627 float16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
43628 float16x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
43629 float16x4_t __ret;
43630 __ret = (float16x4_t) __builtin_neon_vmin_v((int8x8_t)__rev0, (int8x8_t)__rev1, 8);
43631 __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
43632 return __ret;
43633 }
43634 #endif
43635
43636 #ifdef __LITTLE_ENDIAN__
43637 __ai float16x8_t vminnmq_f16(float16x8_t __p0, float16x8_t __p1) {
43638 float16x8_t __ret;
43639 __ret = (float16x8_t) __builtin_neon_vminnmq_v((int8x16_t)__p0, (int8x16_t)__p1, 40);
43640 return __ret;
43641 }
43642 #else
43643 __ai float16x8_t vminnmq_f16(float16x8_t __p0, float16x8_t __p1) {
43644 float16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
43645 float16x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
43646 float16x8_t __ret;
43647 __ret = (float16x8_t) __builtin_neon_vminnmq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 40);
43648 __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
43649 return __ret;
43650 }
43651 #endif
43652
43653 #ifdef __LITTLE_ENDIAN__
43654 __ai float16x4_t vminnm_f16(float16x4_t __p0, float16x4_t __p1) {
43655 float16x4_t __ret;
43656 __ret = (float16x4_t) __builtin_neon_vminnm_v((int8x8_t)__p0, (int8x8_t)__p1, 8);
43657 return __ret;
43658 }
43659 #else
43660 __ai float16x4_t vminnm_f16(float16x4_t __p0, float16x4_t __p1) {
43661 float16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
43662 float16x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
43663 float16x4_t __ret;
43664 __ret = (float16x4_t) __builtin_neon_vminnm_v((int8x8_t)__rev0, (int8x8_t)__rev1, 8);
43665 __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
43666 return __ret;
43667 }
43668 #endif
43669
43670 #ifdef __LITTLE_ENDIAN__
43671 __ai float16x8_t vmulq_f16(float16x8_t __p0, float16x8_t __p1) {
43672 float16x8_t __ret;
43673 __ret = __p0 * __p1;
43674 return __ret;
43675 }
43676 #else
43677 __ai float16x8_t vmulq_f16(float16x8_t __p0, float16x8_t __p1) {
43678 float16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
43679 float16x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
43680 float16x8_t __ret;
43681 __ret = __rev0 * __rev1;
43682 __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
43683 return __ret;
43684 }
43685 #endif
43686
43687 #ifdef __LITTLE_ENDIAN__
43688 __ai float16x4_t vmul_f16(float16x4_t __p0, float16x4_t __p1) {
43689 float16x4_t __ret;
43690 __ret = __p0 * __p1;
43691 return __ret;
43692 }
43693 #else
43694 __ai float16x4_t vmul_f16(float16x4_t __p0, float16x4_t __p1) {
43695 float16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
43696 float16x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
43697 float16x4_t __ret;
43698 __ret = __rev0 * __rev1;
43699 __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
43700 return __ret;
43701 }
43702 #endif
43703
43704 #ifdef __LITTLE_ENDIAN__
43705 #define vmulq_lane_f16(__p0, __p1, __p2) __extension__ ({ \
43706 float16x8_t __s0 = __p0; \
43707 float16x4_t __s1 = __p1; \
43708 float16x8_t __ret; \
43709 __ret = __s0 * __builtin_shufflevector(__s1, __s1, __p2, __p2, __p2, __p2, __p2, __p2, __p2, __p2); \
43710 __ret; \
43711 })
43712 #else
43713 #define vmulq_lane_f16(__p0, __p1, __p2) __extension__ ({ \
43714 float16x8_t __s0 = __p0; \
43715 float16x4_t __s1 = __p1; \
43716 float16x8_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 7, 6, 5, 4, 3, 2, 1, 0); \
43717 float16x4_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 3, 2, 1, 0); \
43718 float16x8_t __ret; \
43719 __ret = __rev0 * __builtin_shufflevector(__rev1, __rev1, __p2, __p2, __p2, __p2, __p2, __p2, __p2, __p2); \
43720 __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); \
43721 __ret; \
43722 })
43723 #endif
43724
43725 #ifdef __LITTLE_ENDIAN__
43726 #define vmul_lane_f16(__p0, __p1, __p2) __extension__ ({ \
43727 float16x4_t __s0 = __p0; \
43728 float16x4_t __s1 = __p1; \
43729 float16x4_t __ret; \
43730 __ret = __s0 * __builtin_shufflevector(__s1, __s1, __p2, __p2, __p2, __p2); \
43731 __ret; \
43732 })
43733 #else
43734 #define vmul_lane_f16(__p0, __p1, __p2) __extension__ ({ \
43735 float16x4_t __s0 = __p0; \
43736 float16x4_t __s1 = __p1; \
43737 float16x4_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \
43738 float16x4_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 3, 2, 1, 0); \
43739 float16x4_t __ret; \
43740 __ret = __rev0 * __builtin_shufflevector(__rev1, __rev1, __p2, __p2, __p2, __p2); \
43741 __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \
43742 __ret; \
43743 })
43744 #endif
43745
43746 #ifdef __LITTLE_ENDIAN__
43747 #define vmulq_laneq_f16(__p0, __p1, __p2) __extension__ ({ \
43748 float16x8_t __s0 = __p0; \
43749 float16x8_t __s1 = __p1; \
43750 float16x8_t __ret; \
43751 __ret = __s0 * __builtin_shufflevector(__s1, __s1, __p2, __p2, __p2, __p2, __p2, __p2, __p2, __p2); \
43752 __ret; \
43753 })
43754 #else
43755 #define vmulq_laneq_f16(__p0, __p1, __p2) __extension__ ({ \
43756 float16x8_t __s0 = __p0; \
43757 float16x8_t __s1 = __p1; \
43758 float16x8_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 7, 6, 5, 4, 3, 2, 1, 0); \
43759 float16x8_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 7, 6, 5, 4, 3, 2, 1, 0); \
43760 float16x8_t __ret; \
43761 __ret = __rev0 * __builtin_shufflevector(__rev1, __rev1, __p2, __p2, __p2, __p2, __p2, __p2, __p2, __p2); \
43762 __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); \
43763 __ret; \
43764 })
43765 #endif
43766
43767 #ifdef __LITTLE_ENDIAN__
43768 #define vmul_laneq_f16(__p0, __p1, __p2) __extension__ ({ \
43769 float16x4_t __s0 = __p0; \
43770 float16x8_t __s1 = __p1; \
43771 float16x4_t __ret; \
43772 __ret = __s0 * __builtin_shufflevector(__s1, __s1, __p2, __p2, __p2, __p2); \
43773 __ret; \
43774 })
43775 #else
43776 #define vmul_laneq_f16(__p0, __p1, __p2) __extension__ ({ \
43777 float16x4_t __s0 = __p0; \
43778 float16x8_t __s1 = __p1; \
43779 float16x4_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \
43780 float16x8_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 7, 6, 5, 4, 3, 2, 1, 0); \
43781 float16x4_t __ret; \
43782 __ret = __rev0 * __builtin_shufflevector(__rev1, __rev1, __p2, __p2, __p2, __p2); \
43783 __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \
43784 __ret; \
43785 })
43786 #endif
43787
43788 #ifdef __LITTLE_ENDIAN__
43789 __ai float16x8_t vmulq_n_f16(float16x8_t __p0, float16_t __p1) {
43790 float16x8_t __ret;
43791 __ret = __p0 * (float16x8_t) {__p1, __p1, __p1, __p1, __p1, __p1, __p1, __p1};
43792 return __ret;
43793 }
43794 #else
43795 __ai float16x8_t vmulq_n_f16(float16x8_t __p0, float16_t __p1) {
43796 float16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
43797 float16x8_t __ret;
43798 __ret = __rev0 * (float16x8_t) {__p1, __p1, __p1, __p1, __p1, __p1, __p1, __p1};
43799 __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
43800 return __ret;
43801 }
43802 #endif
43803
43804 #ifdef __LITTLE_ENDIAN__
43805 __ai float16x4_t vmul_n_f16(float16x4_t __p0, float16_t __p1) {
43806 float16x4_t __ret;
43807 __ret = __p0 * (float16x4_t) {__p1, __p1, __p1, __p1};
43808 return __ret;
43809 }
43810 #else
43811 __ai float16x4_t vmul_n_f16(float16x4_t __p0, float16_t __p1) {
43812 float16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
43813 float16x4_t __ret;
43814 __ret = __rev0 * (float16x4_t) {__p1, __p1, __p1, __p1};
43815 __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
43816 return __ret;
43817 }
43818 #endif
43819
43820 #ifdef __LITTLE_ENDIAN__
43821 __ai float16x8_t vnegq_f16(float16x8_t __p0) {
43822 float16x8_t __ret;
43823 __ret = -__p0;
43824 return __ret;
43825 }
43826 #else
43827 __ai float16x8_t vnegq_f16(float16x8_t __p0) {
43828 float16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
43829 float16x8_t __ret;
43830 __ret = -__rev0;
43831 __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
43832 return __ret;
43833 }
43834 #endif
43835
43836 #ifdef __LITTLE_ENDIAN__
43837 __ai float16x4_t vneg_f16(float16x4_t __p0) {
43838 float16x4_t __ret;
43839 __ret = -__p0;
43840 return __ret;
43841 }
43842 #else
43843 __ai float16x4_t vneg_f16(float16x4_t __p0) {
43844 float16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
43845 float16x4_t __ret;
43846 __ret = -__rev0;
43847 __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
43848 return __ret;
43849 }
43850 #endif
43851
43852 #ifdef __LITTLE_ENDIAN__
43853 __ai float16x4_t vpadd_f16(float16x4_t __p0, float16x4_t __p1) {
43854 float16x4_t __ret;
43855 __ret = (float16x4_t) __builtin_neon_vpadd_v((int8x8_t)__p0, (int8x8_t)__p1, 8);
43856 return __ret;
43857 }
43858 #else
43859 __ai float16x4_t vpadd_f16(float16x4_t __p0, float16x4_t __p1) {
43860 float16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
43861 float16x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
43862 float16x4_t __ret;
43863 __ret = (float16x4_t) __builtin_neon_vpadd_v((int8x8_t)__rev0, (int8x8_t)__rev1, 8);
43864 __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
43865 return __ret;
43866 }
43867 #endif
43868
43869 #ifdef __LITTLE_ENDIAN__
43870 __ai float16x4_t vpmax_f16(float16x4_t __p0, float16x4_t __p1) {
43871 float16x4_t __ret;
43872 __ret = (float16x4_t) __builtin_neon_vpmax_v((int8x8_t)__p0, (int8x8_t)__p1, 8);
43873 return __ret;
43874 }
43875 #else
43876 __ai float16x4_t vpmax_f16(float16x4_t __p0, float16x4_t __p1) {
43877 float16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
43878 float16x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
43879 float16x4_t __ret;
43880 __ret = (float16x4_t) __builtin_neon_vpmax_v((int8x8_t)__rev0, (int8x8_t)__rev1, 8);
43881 __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
43882 return __ret;
43883 }
43884 #endif
43885
43886 #ifdef __LITTLE_ENDIAN__
43887 __ai float16x4_t vpmin_f16(float16x4_t __p0, float16x4_t __p1) {
43888 float16x4_t __ret;
43889 __ret = (float16x4_t) __builtin_neon_vpmin_v((int8x8_t)__p0, (int8x8_t)__p1, 8);
43890 return __ret;
43891 }
43892 #else
43893 __ai float16x4_t vpmin_f16(float16x4_t __p0, float16x4_t __p1) {
43894 float16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
43895 float16x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
43896 float16x4_t __ret;
43897 __ret = (float16x4_t) __builtin_neon_vpmin_v((int8x8_t)__rev0, (int8x8_t)__rev1, 8);
43898 __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
43899 return __ret;
43900 }
43901 #endif
43902
43903 #ifdef __LITTLE_ENDIAN__
43904 __ai float16x8_t vrecpeq_f16(float16x8_t __p0) {
43905 float16x8_t __ret;
43906 __ret = (float16x8_t) __builtin_neon_vrecpeq_v((int8x16_t)__p0, 40);
43907 return __ret;
43908 }
43909 #else
43910 __ai float16x8_t vrecpeq_f16(float16x8_t __p0) {
43911 float16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
43912 float16x8_t __ret;
43913 __ret = (float16x8_t) __builtin_neon_vrecpeq_v((int8x16_t)__rev0, 40);
43914 __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
43915 return __ret;
43916 }
43917 #endif
43918
43919 #ifdef __LITTLE_ENDIAN__
43920 __ai float16x4_t vrecpe_f16(float16x4_t __p0) {
43921 float16x4_t __ret;
43922 __ret = (float16x4_t) __builtin_neon_vrecpe_v((int8x8_t)__p0, 8);
43923 return __ret;
43924 }
43925 #else
43926 __ai float16x4_t vrecpe_f16(float16x4_t __p0) {
43927 float16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
43928 float16x4_t __ret;
43929 __ret = (float16x4_t) __builtin_neon_vrecpe_v((int8x8_t)__rev0, 8);
43930 __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
43931 return __ret;
43932 }
43933 #endif
43934
43935 #ifdef __LITTLE_ENDIAN__
43936 __ai float16x8_t vrecpsq_f16(float16x8_t __p0, float16x8_t __p1) {
43937 float16x8_t __ret;
43938 __ret = (float16x8_t) __builtin_neon_vrecpsq_v((int8x16_t)__p0, (int8x16_t)__p1, 40);
43939 return __ret;
43940 }
43941 #else
43942 __ai float16x8_t vrecpsq_f16(float16x8_t __p0, float16x8_t __p1) {
43943 float16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
43944 float16x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
43945 float16x8_t __ret;
43946 __ret = (float16x8_t) __builtin_neon_vrecpsq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 40);
43947 __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
43948 return __ret;
43949 }
43950 #endif
43951
43952 #ifdef __LITTLE_ENDIAN__
43953 __ai float16x4_t vrecps_f16(float16x4_t __p0, float16x4_t __p1) {
43954 float16x4_t __ret;
43955 __ret = (float16x4_t) __builtin_neon_vrecps_v((int8x8_t)__p0, (int8x8_t)__p1, 8);
43956 return __ret;
43957 }
43958 #else
43959 __ai float16x4_t vrecps_f16(float16x4_t __p0, float16x4_t __p1) {
43960 float16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
43961 float16x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
43962 float16x4_t __ret;
43963 __ret = (float16x4_t) __builtin_neon_vrecps_v((int8x8_t)__rev0, (int8x8_t)__rev1, 8);
43964 __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
43965 return __ret;
43966 }
43967 #endif
43968
43969 #ifdef __LITTLE_ENDIAN__
43970 __ai float16x8_t vrev64q_f16(float16x8_t __p0) {
43971 float16x8_t __ret;
43972 __ret = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0, 7, 6, 5, 4);
43973 return __ret;
43974 }
43975 #else
43976 __ai float16x8_t vrev64q_f16(float16x8_t __p0) {
43977 float16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
43978 float16x8_t __ret;
43979 __ret = __builtin_shufflevector(__rev0, __rev0, 3, 2, 1, 0, 7, 6, 5, 4);
43980 __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
43981 return __ret;
43982 }
43983 #endif
43984
43985 #ifdef __LITTLE_ENDIAN__
43986 __ai float16x4_t vrev64_f16(float16x4_t __p0) {
43987 float16x4_t __ret;
43988 __ret = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
43989 return __ret;
43990 }
43991 #else
43992 __ai float16x4_t vrev64_f16(float16x4_t __p0) {
43993 float16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
43994 float16x4_t __ret;
43995 __ret = __builtin_shufflevector(__rev0, __rev0, 3, 2, 1, 0);
43996 __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
43997 return __ret;
43998 }
43999 #endif
44000
44001 #ifdef __LITTLE_ENDIAN__
44002 __ai float16x8_t vrndq_f16(float16x8_t __p0) {
44003 float16x8_t __ret;
44004 __ret = (float16x8_t) __builtin_neon_vrndq_v((int8x16_t)__p0, 40);
44005 return __ret;
44006 }
44007 #else
44008 __ai float16x8_t vrndq_f16(float16x8_t __p0) {
44009 float16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
44010 float16x8_t __ret;
44011 __ret = (float16x8_t) __builtin_neon_vrndq_v((int8x16_t)__rev0, 40);
44012 __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
44013 return __ret;
44014 }
44015 #endif
44016
44017 #ifdef __LITTLE_ENDIAN__
44018 __ai float16x4_t vrnd_f16(float16x4_t __p0) {
44019 float16x4_t __ret;
44020 __ret = (float16x4_t) __builtin_neon_vrnd_v((int8x8_t)__p0, 8);
44021 return __ret;
44022 }
44023 #else
44024 __ai float16x4_t vrnd_f16(float16x4_t __p0) {
44025 float16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
44026 float16x4_t __ret;
44027 __ret = (float16x4_t) __builtin_neon_vrnd_v((int8x8_t)__rev0, 8);
44028 __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
44029 return __ret;
44030 }
44031 #endif
44032
44033 #ifdef __LITTLE_ENDIAN__
44034 __ai float16x8_t vrndaq_f16(float16x8_t __p0) {
44035 float16x8_t __ret;
44036 __ret = (float16x8_t) __builtin_neon_vrndaq_v((int8x16_t)__p0, 40);
44037 return __ret;
44038 }
44039 #else
44040 __ai float16x8_t vrndaq_f16(float16x8_t __p0) {
44041 float16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
44042 float16x8_t __ret;
44043 __ret = (float16x8_t) __builtin_neon_vrndaq_v((int8x16_t)__rev0, 40);
44044 __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
44045 return __ret;
44046 }
44047 #endif
44048
44049 #ifdef __LITTLE_ENDIAN__
44050 __ai float16x4_t vrnda_f16(float16x4_t __p0) {
44051 float16x4_t __ret;
44052 __ret = (float16x4_t) __builtin_neon_vrnda_v((int8x8_t)__p0, 8);
44053 return __ret;
44054 }
44055 #else
44056 __ai float16x4_t vrnda_f16(float16x4_t __p0) {
44057 float16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
44058 float16x4_t __ret;
44059 __ret = (float16x4_t) __builtin_neon_vrnda_v((int8x8_t)__rev0, 8);
44060 __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
44061 return __ret;
44062 }
44063 #endif
44064
44065 #ifdef __LITTLE_ENDIAN__
44066 __ai float16x8_t vrndiq_f16(float16x8_t __p0) {
44067 float16x8_t __ret;
44068 __ret = (float16x8_t) __builtin_neon_vrndiq_v((int8x16_t)__p0, 40);
44069 return __ret;
44070 }
44071 #else
44072 __ai float16x8_t vrndiq_f16(float16x8_t __p0) {
44073 float16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
44074 float16x8_t __ret;
44075 __ret = (float16x8_t) __builtin_neon_vrndiq_v((int8x16_t)__rev0, 40);
44076 __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
44077 return __ret;
44078 }
44079 #endif
44080
44081 #ifdef __LITTLE_ENDIAN__
44082 __ai float16x4_t vrndi_f16(float16x4_t __p0) {
44083 float16x4_t __ret;
44084 __ret = (float16x4_t) __builtin_neon_vrndi_v((int8x8_t)__p0, 8);
44085 return __ret;
44086 }
44087 #else
44088 __ai float16x4_t vrndi_f16(float16x4_t __p0) {
44089 float16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
44090 float16x4_t __ret;
44091 __ret = (float16x4_t) __builtin_neon_vrndi_v((int8x8_t)__rev0, 8);
44092 __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
44093 return __ret;
44094 }
44095 #endif
44096
44097 #ifdef __LITTLE_ENDIAN__
44098 __ai float16x8_t vrndmq_f16(float16x8_t __p0) {
44099 float16x8_t __ret;
44100 __ret = (float16x8_t) __builtin_neon_vrndmq_v((int8x16_t)__p0, 40);
44101 return __ret;
44102 }
44103 #else
44104 __ai float16x8_t vrndmq_f16(float16x8_t __p0) {
44105 float16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
44106 float16x8_t __ret;
44107 __ret = (float16x8_t) __builtin_neon_vrndmq_v((int8x16_t)__rev0, 40);
44108 __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
44109 return __ret;
44110 }
44111 #endif
44112
44113 #ifdef __LITTLE_ENDIAN__
44114 __ai float16x4_t vrndm_f16(float16x4_t __p0) {
44115 float16x4_t __ret;
44116 __ret = (float16x4_t) __builtin_neon_vrndm_v((int8x8_t)__p0, 8);
44117 return __ret;
44118 }
44119 #else
44120 __ai float16x4_t vrndm_f16(float16x4_t __p0) {
44121 float16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
44122 float16x4_t __ret;
44123 __ret = (float16x4_t) __builtin_neon_vrndm_v((int8x8_t)__rev0, 8);
44124 __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
44125 return __ret;
44126 }
44127 #endif
44128
44129 #ifdef __LITTLE_ENDIAN__
44130 __ai float16x8_t vrndnq_f16(float16x8_t __p0) {
44131 float16x8_t __ret;
44132 __ret = (float16x8_t) __builtin_neon_vrndnq_v((int8x16_t)__p0, 40);
44133 return __ret;
44134 }
44135 #else
44136 __ai float16x8_t vrndnq_f16(float16x8_t __p0) {
44137 float16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
44138 float16x8_t __ret;
44139 __ret = (float16x8_t) __builtin_neon_vrndnq_v((int8x16_t)__rev0, 40);
44140 __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
44141 return __ret;
44142 }
44143 #endif
44144
44145 #ifdef __LITTLE_ENDIAN__
44146 __ai float16x4_t vrndn_f16(float16x4_t __p0) {
44147 float16x4_t __ret;
44148 __ret = (float16x4_t) __builtin_neon_vrndn_v((int8x8_t)__p0, 8);
44149 return __ret;
44150 }
44151 #else
44152 __ai float16x4_t vrndn_f16(float16x4_t __p0) {
44153 float16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
44154 float16x4_t __ret;
44155 __ret = (float16x4_t) __builtin_neon_vrndn_v((int8x8_t)__rev0, 8);
44156 __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
44157 return __ret;
44158 }
44159 #endif
44160
44161 #ifdef __LITTLE_ENDIAN__
44162 __ai float16x8_t vrndpq_f16(float16x8_t __p0) {
44163 float16x8_t __ret;
44164 __ret = (float16x8_t) __builtin_neon_vrndpq_v((int8x16_t)__p0, 40);
44165 return __ret;
44166 }
44167 #else
44168 __ai float16x8_t vrndpq_f16(float16x8_t __p0) {
44169 float16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
44170 float16x8_t __ret;
44171 __ret = (float16x8_t) __builtin_neon_vrndpq_v((int8x16_t)__rev0, 40);
44172 __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
44173 return __ret;
44174 }
44175 #endif
44176
44177 #ifdef __LITTLE_ENDIAN__
44178 __ai float16x4_t vrndp_f16(float16x4_t __p0) {
44179 float16x4_t __ret;
44180 __ret = (float16x4_t) __builtin_neon_vrndp_v((int8x8_t)__p0, 8);
44181 return __ret;
44182 }
44183 #else
44184 __ai float16x4_t vrndp_f16(float16x4_t __p0) {
44185 float16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
44186 float16x4_t __ret;
44187 __ret = (float16x4_t) __builtin_neon_vrndp_v((int8x8_t)__rev0, 8);
44188 __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
44189 return __ret;
44190 }
44191 #endif
44192
44193 #ifdef __LITTLE_ENDIAN__
44194 __ai float16x8_t vrndxq_f16(float16x8_t __p0) {
44195 float16x8_t __ret;
44196 __ret = (float16x8_t) __builtin_neon_vrndxq_v((int8x16_t)__p0, 40);
44197 return __ret;
44198 }
44199 #else
44200 __ai float16x8_t vrndxq_f16(float16x8_t __p0) {
44201 float16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
44202 float16x8_t __ret;
44203 __ret = (float16x8_t) __builtin_neon_vrndxq_v((int8x16_t)__rev0, 40);
44204 __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
44205 return __ret;
44206 }
44207 #endif
44208
44209 #ifdef __LITTLE_ENDIAN__
44210 __ai float16x4_t vrndx_f16(float16x4_t __p0) {
44211 float16x4_t __ret;
44212 __ret = (float16x4_t) __builtin_neon_vrndx_v((int8x8_t)__p0, 8);
44213 return __ret;
44214 }
44215 #else
44216 __ai float16x4_t vrndx_f16(float16x4_t __p0) {
44217 float16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
44218 float16x4_t __ret;
44219 __ret = (float16x4_t) __builtin_neon_vrndx_v((int8x8_t)__rev0, 8);
44220 __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
44221 return __ret;
44222 }
44223 #endif
44224
44225 #ifdef __LITTLE_ENDIAN__
44226 __ai float16x8_t vrsqrteq_f16(float16x8_t __p0) {
44227 float16x8_t __ret;
44228 __ret = (float16x8_t) __builtin_neon_vrsqrteq_v((int8x16_t)__p0, 40);
44229 return __ret;
44230 }
44231 #else
44232 __ai float16x8_t vrsqrteq_f16(float16x8_t __p0) {
44233 float16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
44234 float16x8_t __ret;
44235 __ret = (float16x8_t) __builtin_neon_vrsqrteq_v((int8x16_t)__rev0, 40);
44236 __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
44237 return __ret;
44238 }
44239 #endif
44240
44241 #ifdef __LITTLE_ENDIAN__
44242 __ai float16x4_t vrsqrte_f16(float16x4_t __p0) {
44243 float16x4_t __ret;
44244 __ret = (float16x4_t) __builtin_neon_vrsqrte_v((int8x8_t)__p0, 8);
44245 return __ret;
44246 }
44247 #else
44248 __ai float16x4_t vrsqrte_f16(float16x4_t __p0) {
44249 float16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
44250 float16x4_t __ret;
44251 __ret = (float16x4_t) __builtin_neon_vrsqrte_v((int8x8_t)__rev0, 8);
44252 __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
44253 return __ret;
44254 }
44255 #endif
44256
44257 #ifdef __LITTLE_ENDIAN__
44258 __ai float16x8_t vrsqrtsq_f16(float16x8_t __p0, float16x8_t __p1) {
44259 float16x8_t __ret;
44260 __ret = (float16x8_t) __builtin_neon_vrsqrtsq_v((int8x16_t)__p0, (int8x16_t)__p1, 40);
44261 return __ret;
44262 }
44263 #else
44264 __ai float16x8_t vrsqrtsq_f16(float16x8_t __p0, float16x8_t __p1) {
44265 float16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
44266 float16x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
44267 float16x8_t __ret;
44268 __ret = (float16x8_t) __builtin_neon_vrsqrtsq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 40);
44269 __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
44270 return __ret;
44271 }
44272 #endif
44273
44274 #ifdef __LITTLE_ENDIAN__
44275 __ai float16x4_t vrsqrts_f16(float16x4_t __p0, float16x4_t __p1) {
44276 float16x4_t __ret;
44277 __ret = (float16x4_t) __builtin_neon_vrsqrts_v((int8x8_t)__p0, (int8x8_t)__p1, 8);
44278 return __ret;
44279 }
44280 #else
44281 __ai float16x4_t vrsqrts_f16(float16x4_t __p0, float16x4_t __p1) {
44282 float16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
44283 float16x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
44284 float16x4_t __ret;
44285 __ret = (float16x4_t) __builtin_neon_vrsqrts_v((int8x8_t)__rev0, (int8x8_t)__rev1, 8);
44286 __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
44287 return __ret;
44288 }
44289 #endif
44290
44291 #ifdef __LITTLE_ENDIAN__
44292 __ai float16x8_t vsubq_f16(float16x8_t __p0, float16x8_t __p1) {
44293 float16x8_t __ret;
44294 __ret = __p0 - __p1;
44295 return __ret;
44296 }
44297 #else
44298 __ai float16x8_t vsubq_f16(float16x8_t __p0, float16x8_t __p1) {
44299 float16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
44300 float16x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
44301 float16x8_t __ret;
44302 __ret = __rev0 - __rev1;
44303 __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
44304 return __ret;
44305 }
44306 #endif
44307
44308 #ifdef __LITTLE_ENDIAN__
44309 __ai float16x4_t vsub_f16(float16x4_t __p0, float16x4_t __p1) {
44310 float16x4_t __ret;
44311 __ret = __p0 - __p1;
44312 return __ret;
44313 }
44314 #else
44315 __ai float16x4_t vsub_f16(float16x4_t __p0, float16x4_t __p1) {
44316 float16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
44317 float16x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
44318 float16x4_t __ret;
44319 __ret = __rev0 - __rev1;
44320 __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
44321 return __ret;
44322 }
44323 #endif
44324
44325 #ifdef __LITTLE_ENDIAN__
44326 __ai float16x8x2_t vtrnq_f16(float16x8_t __p0, float16x8_t __p1) {
44327 float16x8x2_t __ret;
44328 __builtin_neon_vtrnq_v(&__ret, (int8x16_t)__p0, (int8x16_t)__p1, 40);
44329 return __ret;
44330 }
44331 #else
44332 __ai float16x8x2_t vtrnq_f16(float16x8_t __p0, float16x8_t __p1) {
44333 float16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
44334 float16x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
44335 float16x8x2_t __ret;
44336 __builtin_neon_vtrnq_v(&__ret, (int8x16_t)__rev0, (int8x16_t)__rev1, 40);
44337
44338 __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 7, 6, 5, 4, 3, 2, 1, 0);
44339 __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 7, 6, 5, 4, 3, 2, 1, 0);
44340 return __ret;
44341 }
44342 #endif
44343
44344 #ifdef __LITTLE_ENDIAN__
44345 __ai float16x4x2_t vtrn_f16(float16x4_t __p0, float16x4_t __p1) {
44346 float16x4x2_t __ret;
44347 __builtin_neon_vtrn_v(&__ret, (int8x8_t)__p0, (int8x8_t)__p1, 8);
44348 return __ret;
44349 }
44350 #else
44351 __ai float16x4x2_t vtrn_f16(float16x4_t __p0, float16x4_t __p1) {
44352 float16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
44353 float16x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
44354 float16x4x2_t __ret;
44355 __builtin_neon_vtrn_v(&__ret, (int8x8_t)__rev0, (int8x8_t)__rev1, 8);
44356
44357 __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 3, 2, 1, 0);
44358 __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 3, 2, 1, 0);
44359 return __ret;
44360 }
44361 #endif
44362
44363 #ifdef __LITTLE_ENDIAN__
44364 __ai float16x8_t vtrn1q_f16(float16x8_t __p0, float16x8_t __p1) {
44365 float16x8_t __ret;
44366 __ret = __builtin_shufflevector(__p0, __p1, 0, 8, 2, 10, 4, 12, 6, 14);
44367 return __ret;
44368 }
44369 #else
44370 __ai float16x8_t vtrn1q_f16(float16x8_t __p0, float16x8_t __p1) {
44371 float16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
44372 float16x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
44373 float16x8_t __ret;
44374 __ret = __builtin_shufflevector(__rev0, __rev1, 0, 8, 2, 10, 4, 12, 6, 14);
44375 __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
44376 return __ret;
44377 }
44378 #endif
44379
44380 #ifdef __LITTLE_ENDIAN__
44381 __ai float16x4_t vtrn1_f16(float16x4_t __p0, float16x4_t __p1) {
44382 float16x4_t __ret;
44383 __ret = __builtin_shufflevector(__p0, __p1, 0, 4, 2, 6);
44384 return __ret;
44385 }
44386 #else
44387 __ai float16x4_t vtrn1_f16(float16x4_t __p0, float16x4_t __p1) {
44388 float16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
44389 float16x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
44390 float16x4_t __ret;
44391 __ret = __builtin_shufflevector(__rev0, __rev1, 0, 4, 2, 6);
44392 __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
44393 return __ret;
44394 }
44395 #endif
44396
44397 #ifdef __LITTLE_ENDIAN__
44398 __ai float16x8_t vtrn2q_f16(float16x8_t __p0, float16x8_t __p1) {
44399 float16x8_t __ret;
44400 __ret = __builtin_shufflevector(__p0, __p1, 1, 9, 3, 11, 5, 13, 7, 15);
44401 return __ret;
44402 }
44403 #else
44404 __ai float16x8_t vtrn2q_f16(float16x8_t __p0, float16x8_t __p1) {
44405 float16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
44406 float16x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
44407 float16x8_t __ret;
44408 __ret = __builtin_shufflevector(__rev0, __rev1, 1, 9, 3, 11, 5, 13, 7, 15);
44409 __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
44410 return __ret;
44411 }
44412 #endif
44413
44414 #ifdef __LITTLE_ENDIAN__
44415 __ai float16x4_t vtrn2_f16(float16x4_t __p0, float16x4_t __p1) {
44416 float16x4_t __ret;
44417 __ret = __builtin_shufflevector(__p0, __p1, 1, 5, 3, 7);
44418 return __ret;
44419 }
44420 #else
44421 __ai float16x4_t vtrn2_f16(float16x4_t __p0, float16x4_t __p1) {
44422 float16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
44423 float16x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
44424 float16x4_t __ret;
44425 __ret = __builtin_shufflevector(__rev0, __rev1, 1, 5, 3, 7);
44426 __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
44427 return __ret;
44428 }
44429 #endif
44430
44431 #ifdef __LITTLE_ENDIAN__
44432 __ai float16x8x2_t vuzpq_f16(float16x8_t __p0, float16x8_t __p1) {
44433 float16x8x2_t __ret;
44434 __builtin_neon_vuzpq_v(&__ret, (int8x16_t)__p0, (int8x16_t)__p1, 40);
44435 return __ret;
44436 }
44437 #else
44438 __ai float16x8x2_t vuzpq_f16(float16x8_t __p0, float16x8_t __p1) {
44439 float16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
44440 float16x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
44441 float16x8x2_t __ret;
44442 __builtin_neon_vuzpq_v(&__ret, (int8x16_t)__rev0, (int8x16_t)__rev1, 40);
44443
44444 __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 7, 6, 5, 4, 3, 2, 1, 0);
44445 __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 7, 6, 5, 4, 3, 2, 1, 0);
44446 return __ret;
44447 }
44448 #endif
44449
44450 #ifdef __LITTLE_ENDIAN__
44451 __ai float16x4x2_t vuzp_f16(float16x4_t __p0, float16x4_t __p1) {
44452 float16x4x2_t __ret;
44453 __builtin_neon_vuzp_v(&__ret, (int8x8_t)__p0, (int8x8_t)__p1, 8);
44454 return __ret;
44455 }
44456 #else
44457 __ai float16x4x2_t vuzp_f16(float16x4_t __p0, float16x4_t __p1) {
44458 float16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
44459 float16x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
44460 float16x4x2_t __ret;
44461 __builtin_neon_vuzp_v(&__ret, (int8x8_t)__rev0, (int8x8_t)__rev1, 8);
44462
44463 __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 3, 2, 1, 0);
44464 __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 3, 2, 1, 0);
44465 return __ret;
44466 }
44467 #endif
44468
44469 #ifdef __LITTLE_ENDIAN__
44470 __ai float16x8_t vuzp1q_f16(float16x8_t __p0, float16x8_t __p1) {
44471 float16x8_t __ret;
44472 __ret = __builtin_shufflevector(__p0, __p1, 0, 2, 4, 6, 8, 10, 12, 14);
44473 return __ret;
44474 }
44475 #else
44476 __ai float16x8_t vuzp1q_f16(float16x8_t __p0, float16x8_t __p1) {
44477 float16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
44478 float16x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
44479 float16x8_t __ret;
44480 __ret = __builtin_shufflevector(__rev0, __rev1, 0, 2, 4, 6, 8, 10, 12, 14);
44481 __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
44482 return __ret;
44483 }
44484 #endif
44485
44486 #ifdef __LITTLE_ENDIAN__
44487 __ai float16x4_t vuzp1_f16(float16x4_t __p0, float16x4_t __p1) {
44488 float16x4_t __ret;
44489 __ret = __builtin_shufflevector(__p0, __p1, 0, 2, 4, 6);
44490 return __ret;
44491 }
44492 #else
44493 __ai float16x4_t vuzp1_f16(float16x4_t __p0, float16x4_t __p1) {
44494 float16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
44495 float16x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
44496 float16x4_t __ret;
44497 __ret = __builtin_shufflevector(__rev0, __rev1, 0, 2, 4, 6);
44498 __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
44499 return __ret;
44500 }
44501 #endif
44502
44503 #ifdef __LITTLE_ENDIAN__
44504 __ai float16x8_t vuzp2q_f16(float16x8_t __p0, float16x8_t __p1) {
44505 float16x8_t __ret;
44506 __ret = __builtin_shufflevector(__p0, __p1, 1, 3, 5, 7, 9, 11, 13, 15);
44507 return __ret;
44508 }
44509 #else
44510 __ai float16x8_t vuzp2q_f16(float16x8_t __p0, float16x8_t __p1) {
44511 float16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
44512 float16x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
44513 float16x8_t __ret;
44514 __ret = __builtin_shufflevector(__rev0, __rev1, 1, 3, 5, 7, 9, 11, 13, 15);
44515 __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
44516 return __ret;
44517 }
44518 #endif
44519
44520 #ifdef __LITTLE_ENDIAN__
44521 __ai float16x4_t vuzp2_f16(float16x4_t __p0, float16x4_t __p1) {
44522 float16x4_t __ret;
44523 __ret = __builtin_shufflevector(__p0, __p1, 1, 3, 5, 7);
44524 return __ret;
44525 }
44526 #else
44527 __ai float16x4_t vuzp2_f16(float16x4_t __p0, float16x4_t __p1) {
44528 float16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
44529 float16x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
44530 float16x4_t __ret;
44531 __ret = __builtin_shufflevector(__rev0, __rev1, 1, 3, 5, 7);
44532 __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
44533 return __ret;
44534 }
44535 #endif
44536
44537 #ifdef __LITTLE_ENDIAN__
44538 __ai float16x8x2_t vzipq_f16(float16x8_t __p0, float16x8_t __p1) {
44539 float16x8x2_t __ret;
44540 __builtin_neon_vzipq_v(&__ret, (int8x16_t)__p0, (int8x16_t)__p1, 40);
44541 return __ret;
44542 }
44543 #else
44544 __ai float16x8x2_t vzipq_f16(float16x8_t __p0, float16x8_t __p1) {
44545 float16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
44546 float16x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
44547 float16x8x2_t __ret;
44548 __builtin_neon_vzipq_v(&__ret, (int8x16_t)__rev0, (int8x16_t)__rev1, 40);
44549
44550 __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 7, 6, 5, 4, 3, 2, 1, 0);
44551 __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 7, 6, 5, 4, 3, 2, 1, 0);
44552 return __ret;
44553 }
44554 #endif
44555
44556 #ifdef __LITTLE_ENDIAN__
44557 __ai float16x4x2_t vzip_f16(float16x4_t __p0, float16x4_t __p1) {
44558 float16x4x2_t __ret;
44559 __builtin_neon_vzip_v(&__ret, (int8x8_t)__p0, (int8x8_t)__p1, 8);
44560 return __ret;
44561 }
44562 #else
44563 __ai float16x4x2_t vzip_f16(float16x4_t __p0, float16x4_t __p1) {
44564 float16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
44565 float16x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
44566 float16x4x2_t __ret;
44567 __builtin_neon_vzip_v(&__ret, (int8x8_t)__rev0, (int8x8_t)__rev1, 8);
44568
44569 __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 3, 2, 1, 0);
44570 __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 3, 2, 1, 0);
44571 return __ret;
44572 }
44573 #endif
44574
44575 #ifdef __LITTLE_ENDIAN__
44576 __ai float16x8_t vzip1q_f16(float16x8_t __p0, float16x8_t __p1) {
44577 float16x8_t __ret;
44578 __ret = __builtin_shufflevector(__p0, __p1, 0, 8, 1, 9, 2, 10, 3, 11);
44579 return __ret;
44580 }
44581 #else
44582 __ai float16x8_t vzip1q_f16(float16x8_t __p0, float16x8_t __p1) {
44583 float16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
44584 float16x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
44585 float16x8_t __ret;
44586 __ret = __builtin_shufflevector(__rev0, __rev1, 0, 8, 1, 9, 2, 10, 3, 11);
44587 __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
44588 return __ret;
44589 }
44590 #endif
44591
44592 #ifdef __LITTLE_ENDIAN__
44593 __ai float16x4_t vzip1_f16(float16x4_t __p0, float16x4_t __p1) {
44594 float16x4_t __ret;
44595 __ret = __builtin_shufflevector(__p0, __p1, 0, 4, 1, 5);
44596 return __ret;
44597 }
44598 #else
44599 __ai float16x4_t vzip1_f16(float16x4_t __p0, float16x4_t __p1) {
44600 float16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
44601 float16x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
44602 float16x4_t __ret;
44603 __ret = __builtin_shufflevector(__rev0, __rev1, 0, 4, 1, 5);
44604 __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
44605 return __ret;
44606 }
44607 #endif
44608
44609 #ifdef __LITTLE_ENDIAN__
44610 __ai float16x8_t vzip2q_f16(float16x8_t __p0, float16x8_t __p1) {
44611 float16x8_t __ret;
44612 __ret = __builtin_shufflevector(__p0, __p1, 4, 12, 5, 13, 6, 14, 7, 15);
44613 return __ret;
44614 }
44615 #else
44616 __ai float16x8_t vzip2q_f16(float16x8_t __p0, float16x8_t __p1) {
44617 float16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
44618 float16x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
44619 float16x8_t __ret;
44620 __ret = __builtin_shufflevector(__rev0, __rev1, 4, 12, 5, 13, 6, 14, 7, 15);
44621 __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
44622 return __ret;
44623 }
44624 #endif
44625
44626 #ifdef __LITTLE_ENDIAN__
44627 __ai float16x4_t vzip2_f16(float16x4_t __p0, float16x4_t __p1) {
44628 float16x4_t __ret;
44629 __ret = __builtin_shufflevector(__p0, __p1, 2, 6, 3, 7);
44630 return __ret;
44631 }
44632 #else
44633 __ai float16x4_t vzip2_f16(float16x4_t __p0, float16x4_t __p1) {
44634 float16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
44635 float16x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
44636 float16x4_t __ret;
44637 __ret = __builtin_shufflevector(__rev0, __rev1, 2, 6, 3, 7);
44638 __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
44639 return __ret;
44640 }
44641 #endif
44642
44643 #endif
44644 #if defined(__ARM_FEATURE_FP16_VECTOR_ARITHMETIC) && defined(__aarch64__)
44645 #ifdef __LITTLE_ENDIAN__
44646 __ai uint16x8_t vceqzq_f16(float16x8_t __p0) {
44647 uint16x8_t __ret;
44648 __ret = (uint16x8_t) __builtin_neon_vceqzq_v((int8x16_t)__p0, 49);
44649 return __ret;
44650 }
44651 #else
44652 __ai uint16x8_t vceqzq_f16(float16x8_t __p0) {
44653 float16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
44654 uint16x8_t __ret;
44655 __ret = (uint16x8_t) __builtin_neon_vceqzq_v((int8x16_t)__rev0, 49);
44656 __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
44657 return __ret;
44658 }
44659 #endif
44660
44661 #ifdef __LITTLE_ENDIAN__
44662 __ai uint16x4_t vceqz_f16(float16x4_t __p0) {
44663 uint16x4_t __ret;
44664 __ret = (uint16x4_t) __builtin_neon_vceqz_v((int8x8_t)__p0, 17);
44665 return __ret;
44666 }
44667 #else
44668 __ai uint16x4_t vceqz_f16(float16x4_t __p0) {
44669 float16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
44670 uint16x4_t __ret;
44671 __ret = (uint16x4_t) __builtin_neon_vceqz_v((int8x8_t)__rev0, 17);
44672 __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
44673 return __ret;
44674 }
44675 #endif
44676
44677 #ifdef __LITTLE_ENDIAN__
44678 __ai uint16x8_t vcgezq_f16(float16x8_t __p0) {
44679 uint16x8_t __ret;
44680 __ret = (uint16x8_t) __builtin_neon_vcgezq_v((int8x16_t)__p0, 49);
44681 return __ret;
44682 }
44683 #else
44684 __ai uint16x8_t vcgezq_f16(float16x8_t __p0) {
44685 float16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
44686 uint16x8_t __ret;
44687 __ret = (uint16x8_t) __builtin_neon_vcgezq_v((int8x16_t)__rev0, 49);
44688 __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
44689 return __ret;
44690 }
44691 #endif
44692
44693 #ifdef __LITTLE_ENDIAN__
44694 __ai uint16x4_t vcgez_f16(float16x4_t __p0) {
44695 uint16x4_t __ret;
44696 __ret = (uint16x4_t) __builtin_neon_vcgez_v((int8x8_t)__p0, 17);
44697 return __ret;
44698 }
44699 #else
44700 __ai uint16x4_t vcgez_f16(float16x4_t __p0) {
44701 float16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
44702 uint16x4_t __ret;
44703 __ret = (uint16x4_t) __builtin_neon_vcgez_v((int8x8_t)__rev0, 17);
44704 __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
44705 return __ret;
44706 }
44707 #endif
44708
44709 #ifdef __LITTLE_ENDIAN__
44710 __ai uint16x8_t vcgtzq_f16(float16x8_t __p0) {
44711 uint16x8_t __ret;
44712 __ret = (uint16x8_t) __builtin_neon_vcgtzq_v((int8x16_t)__p0, 49);
44713 return __ret;
44714 }
44715 #else
44716 __ai uint16x8_t vcgtzq_f16(float16x8_t __p0) {
44717 float16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
44718 uint16x8_t __ret;
44719 __ret = (uint16x8_t) __builtin_neon_vcgtzq_v((int8x16_t)__rev0, 49);
44720 __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
44721 return __ret;
44722 }
44723 #endif
44724
44725 #ifdef __LITTLE_ENDIAN__
44726 __ai uint16x4_t vcgtz_f16(float16x4_t __p0) {
44727 uint16x4_t __ret;
44728 __ret = (uint16x4_t) __builtin_neon_vcgtz_v((int8x8_t)__p0, 17);
44729 return __ret;
44730 }
44731 #else
44732 __ai uint16x4_t vcgtz_f16(float16x4_t __p0) {
44733 float16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
44734 uint16x4_t __ret;
44735 __ret = (uint16x4_t) __builtin_neon_vcgtz_v((int8x8_t)__rev0, 17);
44736 __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
44737 return __ret;
44738 }
44739 #endif
44740
44741 #ifdef __LITTLE_ENDIAN__
44742 __ai uint16x8_t vclezq_f16(float16x8_t __p0) {
44743 uint16x8_t __ret;
44744 __ret = (uint16x8_t) __builtin_neon_vclezq_v((int8x16_t)__p0, 49);
44745 return __ret;
44746 }
44747 #else
44748 __ai uint16x8_t vclezq_f16(float16x8_t __p0) {
44749 float16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
44750 uint16x8_t __ret;
44751 __ret = (uint16x8_t) __builtin_neon_vclezq_v((int8x16_t)__rev0, 49);
44752 __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
44753 return __ret;
44754 }
44755 #endif
44756
44757 #ifdef __LITTLE_ENDIAN__
44758 __ai uint16x4_t vclez_f16(float16x4_t __p0) {
44759 uint16x4_t __ret;
44760 __ret = (uint16x4_t) __builtin_neon_vclez_v((int8x8_t)__p0, 17);
44761 return __ret;
44762 }
44763 #else
44764 __ai uint16x4_t vclez_f16(float16x4_t __p0) {
44765 float16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
44766 uint16x4_t __ret;
44767 __ret = (uint16x4_t) __builtin_neon_vclez_v((int8x8_t)__rev0, 17);
44768 __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
44769 return __ret;
44770 }
44771 #endif
44772
44773 #ifdef __LITTLE_ENDIAN__
44774 __ai uint16x8_t vcltzq_f16(float16x8_t __p0) {
44775 uint16x8_t __ret;
44776 __ret = (uint16x8_t) __builtin_neon_vcltzq_v((int8x16_t)__p0, 49);
44777 return __ret;
44778 }
44779 #else
44780 __ai uint16x8_t vcltzq_f16(float16x8_t __p0) {
44781 float16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
44782 uint16x8_t __ret;
44783 __ret = (uint16x8_t) __builtin_neon_vcltzq_v((int8x16_t)__rev0, 49);
44784 __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
44785 return __ret;
44786 }
44787 #endif
44788
44789 #ifdef __LITTLE_ENDIAN__
44790 __ai uint16x4_t vcltz_f16(float16x4_t __p0) {
44791 uint16x4_t __ret;
44792 __ret = (uint16x4_t) __builtin_neon_vcltz_v((int8x8_t)__p0, 17);
44793 return __ret;
44794 }
44795 #else
44796 __ai uint16x4_t vcltz_f16(float16x4_t __p0) {
44797 float16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
44798 uint16x4_t __ret;
44799 __ret = (uint16x4_t) __builtin_neon_vcltz_v((int8x8_t)__rev0, 17);
44800 __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
44801 return __ret;
44802 }
44803 #endif
44804
44805 #ifdef __LITTLE_ENDIAN__
44806 __ai float16x8_t vdivq_f16(float16x8_t __p0, float16x8_t __p1) {
44807 float16x8_t __ret;
44808 __ret = __p0 / __p1;
44809 return __ret;
44810 }
44811 #else
44812 __ai float16x8_t vdivq_f16(float16x8_t __p0, float16x8_t __p1) {
44813 float16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
44814 float16x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
44815 float16x8_t __ret;
44816 __ret = __rev0 / __rev1;
44817 __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
44818 return __ret;
44819 }
44820 #endif
44821
44822 #ifdef __LITTLE_ENDIAN__
44823 __ai float16x4_t vdiv_f16(float16x4_t __p0, float16x4_t __p1) {
44824 float16x4_t __ret;
44825 __ret = __p0 / __p1;
44826 return __ret;
44827 }
44828 #else
44829 __ai float16x4_t vdiv_f16(float16x4_t __p0, float16x4_t __p1) {
44830 float16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
44831 float16x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
44832 float16x4_t __ret;
44833 __ret = __rev0 / __rev1;
44834 __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
44835 return __ret;
44836 }
44837 #endif
44838
44839 #ifdef __LITTLE_ENDIAN__
44840 #define vfmaq_lane_f16(__p0, __p1, __p2, __p3) __extension__ ({ \
44841 float16x8_t __s0 = __p0; \
44842 float16x8_t __s1 = __p1; \
44843 float16x4_t __s2 = __p2; \
44844 float16x8_t __ret; \
44845 __ret = (float16x8_t) __builtin_neon_vfmaq_lane_v((int8x16_t)__s0, (int8x16_t)__s1, (int8x8_t)__s2, __p3, 40); \
44846 __ret; \
44847 })
44848 #else
44849 #define vfmaq_lane_f16(__p0, __p1, __p2, __p3) __extension__ ({ \
44850 float16x8_t __s0 = __p0; \
44851 float16x8_t __s1 = __p1; \
44852 float16x4_t __s2 = __p2; \
44853 float16x8_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 7, 6, 5, 4, 3, 2, 1, 0); \
44854 float16x8_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 7, 6, 5, 4, 3, 2, 1, 0); \
44855 float16x4_t __rev2; __rev2 = __builtin_shufflevector(__s2, __s2, 3, 2, 1, 0); \
44856 float16x8_t __ret; \
44857 __ret = (float16x8_t) __builtin_neon_vfmaq_lane_v((int8x16_t)__rev0, (int8x16_t)__rev1, (int8x8_t)__rev2, __p3, 40); \
44858 __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); \
44859 __ret; \
44860 })
44861 #define __noswap_vfmaq_lane_f16(__p0, __p1, __p2, __p3) __extension__ ({ \
44862 float16x8_t __s0 = __p0; \
44863 float16x8_t __s1 = __p1; \
44864 float16x4_t __s2 = __p2; \
44865 float16x8_t __ret; \
44866 __ret = (float16x8_t) __builtin_neon_vfmaq_lane_v((int8x16_t)__s0, (int8x16_t)__s1, (int8x8_t)__s2, __p3, 40); \
44867 __ret; \
44868 })
44869 #endif
44870
44871 #ifdef __LITTLE_ENDIAN__
44872 #define vfma_lane_f16(__p0, __p1, __p2, __p3) __extension__ ({ \
44873 float16x4_t __s0 = __p0; \
44874 float16x4_t __s1 = __p1; \
44875 float16x4_t __s2 = __p2; \
44876 float16x4_t __ret; \
44877 __ret = (float16x4_t) __builtin_neon_vfma_lane_v((int8x8_t)__s0, (int8x8_t)__s1, (int8x8_t)__s2, __p3, 8); \
44878 __ret; \
44879 })
44880 #else
44881 #define vfma_lane_f16(__p0, __p1, __p2, __p3) __extension__ ({ \
44882 float16x4_t __s0 = __p0; \
44883 float16x4_t __s1 = __p1; \
44884 float16x4_t __s2 = __p2; \
44885 float16x4_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \
44886 float16x4_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 3, 2, 1, 0); \
44887 float16x4_t __rev2; __rev2 = __builtin_shufflevector(__s2, __s2, 3, 2, 1, 0); \
44888 float16x4_t __ret; \
44889 __ret = (float16x4_t) __builtin_neon_vfma_lane_v((int8x8_t)__rev0, (int8x8_t)__rev1, (int8x8_t)__rev2, __p3, 8); \
44890 __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \
44891 __ret; \
44892 })
44893 #define __noswap_vfma_lane_f16(__p0, __p1, __p2, __p3) __extension__ ({ \
44894 float16x4_t __s0 = __p0; \
44895 float16x4_t __s1 = __p1; \
44896 float16x4_t __s2 = __p2; \
44897 float16x4_t __ret; \
44898 __ret = (float16x4_t) __builtin_neon_vfma_lane_v((int8x8_t)__s0, (int8x8_t)__s1, (int8x8_t)__s2, __p3, 8); \
44899 __ret; \
44900 })
44901 #endif
44902
44903 #ifdef __LITTLE_ENDIAN__
44904 #define vfmaq_laneq_f16(__p0, __p1, __p2, __p3) __extension__ ({ \
44905 float16x8_t __s0 = __p0; \
44906 float16x8_t __s1 = __p1; \
44907 float16x8_t __s2 = __p2; \
44908 float16x8_t __ret; \
44909 __ret = (float16x8_t) __builtin_neon_vfmaq_laneq_v((int8x16_t)__s0, (int8x16_t)__s1, (int8x16_t)__s2, __p3, 40); \
44910 __ret; \
44911 })
44912 #else
44913 #define vfmaq_laneq_f16(__p0, __p1, __p2, __p3) __extension__ ({ \
44914 float16x8_t __s0 = __p0; \
44915 float16x8_t __s1 = __p1; \
44916 float16x8_t __s2 = __p2; \
44917 float16x8_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 7, 6, 5, 4, 3, 2, 1, 0); \
44918 float16x8_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 7, 6, 5, 4, 3, 2, 1, 0); \
44919 float16x8_t __rev2; __rev2 = __builtin_shufflevector(__s2, __s2, 7, 6, 5, 4, 3, 2, 1, 0); \
44920 float16x8_t __ret; \
44921 __ret = (float16x8_t) __builtin_neon_vfmaq_laneq_v((int8x16_t)__rev0, (int8x16_t)__rev1, (int8x16_t)__rev2, __p3, 40); \
44922 __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); \
44923 __ret; \
44924 })
44925 #define __noswap_vfmaq_laneq_f16(__p0, __p1, __p2, __p3) __extension__ ({ \
44926 float16x8_t __s0 = __p0; \
44927 float16x8_t __s1 = __p1; \
44928 float16x8_t __s2 = __p2; \
44929 float16x8_t __ret; \
44930 __ret = (float16x8_t) __builtin_neon_vfmaq_laneq_v((int8x16_t)__s0, (int8x16_t)__s1, (int8x16_t)__s2, __p3, 40); \
44931 __ret; \
44932 })
44933 #endif
44934
44935 #ifdef __LITTLE_ENDIAN__
44936 #define vfma_laneq_f16(__p0, __p1, __p2, __p3) __extension__ ({ \
44937 float16x4_t __s0 = __p0; \
44938 float16x4_t __s1 = __p1; \
44939 float16x8_t __s2 = __p2; \
44940 float16x4_t __ret; \
44941 __ret = (float16x4_t) __builtin_neon_vfma_laneq_v((int8x8_t)__s0, (int8x8_t)__s1, (int8x16_t)__s2, __p3, 8); \
44942 __ret; \
44943 })
44944 #else
44945 #define vfma_laneq_f16(__p0, __p1, __p2, __p3) __extension__ ({ \
44946 float16x4_t __s0 = __p0; \
44947 float16x4_t __s1 = __p1; \
44948 float16x8_t __s2 = __p2; \
44949 float16x4_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \
44950 float16x4_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 3, 2, 1, 0); \
44951 float16x8_t __rev2; __rev2 = __builtin_shufflevector(__s2, __s2, 7, 6, 5, 4, 3, 2, 1, 0); \
44952 float16x4_t __ret; \
44953 __ret = (float16x4_t) __builtin_neon_vfma_laneq_v((int8x8_t)__rev0, (int8x8_t)__rev1, (int8x16_t)__rev2, __p3, 8); \
44954 __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \
44955 __ret; \
44956 })
44957 #define __noswap_vfma_laneq_f16(__p0, __p1, __p2, __p3) __extension__ ({ \
44958 float16x4_t __s0 = __p0; \
44959 float16x4_t __s1 = __p1; \
44960 float16x8_t __s2 = __p2; \
44961 float16x4_t __ret; \
44962 __ret = (float16x4_t) __builtin_neon_vfma_laneq_v((int8x8_t)__s0, (int8x8_t)__s1, (int8x16_t)__s2, __p3, 8); \
44963 __ret; \
44964 })
44965 #endif
44966
44967 #ifdef __LITTLE_ENDIAN__
44968 __ai float16x8_t vfmaq_n_f16(float16x8_t __p0, float16x8_t __p1, float16_t __p2) {
44969 float16x8_t __ret;
44970 __ret = vfmaq_f16(__p0, __p1, (float16x8_t) {__p2, __p2, __p2, __p2, __p2, __p2, __p2, __p2});
44971 return __ret;
44972 }
44973 #else
44974 __ai float16x8_t vfmaq_n_f16(float16x8_t __p0, float16x8_t __p1, float16_t __p2) {
44975 float16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
44976 float16x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
44977 float16x8_t __ret;
44978 __ret = __noswap_vfmaq_f16(__rev0, __rev1, (float16x8_t) {__p2, __p2, __p2, __p2, __p2, __p2, __p2, __p2});
44979 __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
44980 return __ret;
44981 }
44982 #endif
44983
44984 #ifdef __LITTLE_ENDIAN__
44985 __ai float16x4_t vfma_n_f16(float16x4_t __p0, float16x4_t __p1, float16_t __p2) {
44986 float16x4_t __ret;
44987 __ret = vfma_f16(__p0, __p1, (float16x4_t) {__p2, __p2, __p2, __p2});
44988 return __ret;
44989 }
44990 #else
44991 __ai float16x4_t vfma_n_f16(float16x4_t __p0, float16x4_t __p1, float16_t __p2) {
44992 float16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
44993 float16x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
44994 float16x4_t __ret;
44995 __ret = __noswap_vfma_f16(__rev0, __rev1, (float16x4_t) {__p2, __p2, __p2, __p2});
44996 __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
44997 return __ret;
44998 }
44999 #endif
45000
45001 #ifdef __LITTLE_ENDIAN__
45002 __ai float16x8_t vfmsq_f16(float16x8_t __p0, float16x8_t __p1, float16x8_t __p2) {
45003 float16x8_t __ret;
45004 __ret = vfmaq_f16(__p0, -__p1, __p2);
45005 return __ret;
45006 }
45007 #else
45008 __ai float16x8_t vfmsq_f16(float16x8_t __p0, float16x8_t __p1, float16x8_t __p2) {
45009 float16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
45010 float16x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
45011 float16x8_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, 7, 6, 5, 4, 3, 2, 1, 0);
45012 float16x8_t __ret;
45013 __ret = __noswap_vfmaq_f16(__rev0, -__rev1, __rev2);
45014 __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
45015 return __ret;
45016 }
45017 #endif
45018
45019 #ifdef __LITTLE_ENDIAN__
45020 __ai float16x4_t vfms_f16(float16x4_t __p0, float16x4_t __p1, float16x4_t __p2) {
45021 float16x4_t __ret;
45022 __ret = vfma_f16(__p0, -__p1, __p2);
45023 return __ret;
45024 }
45025 #else
45026 __ai float16x4_t vfms_f16(float16x4_t __p0, float16x4_t __p1, float16x4_t __p2) {
45027 float16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
45028 float16x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
45029 float16x4_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, 3, 2, 1, 0);
45030 float16x4_t __ret;
45031 __ret = __noswap_vfma_f16(__rev0, -__rev1, __rev2);
45032 __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
45033 return __ret;
45034 }
45035 #endif
45036
45037 #ifdef __LITTLE_ENDIAN__
45038 #define vfmsq_lane_f16(__p0_8, __p1_8, __p2_8, __p3_8) __extension__ ({ \
45039 float16x8_t __s0_8 = __p0_8; \
45040 float16x8_t __s1_8 = __p1_8; \
45041 float16x4_t __s2_8 = __p2_8; \
45042 float16x8_t __ret_8; \
45043 __ret_8 = vfmaq_lane_f16(__s0_8, -__s1_8, __s2_8, __p3_8); \
45044 __ret_8; \
45045 })
45046 #else
45047 #define vfmsq_lane_f16(__p0_9, __p1_9, __p2_9, __p3_9) __extension__ ({ \
45048 float16x8_t __s0_9 = __p0_9; \
45049 float16x8_t __s1_9 = __p1_9; \
45050 float16x4_t __s2_9 = __p2_9; \
45051 float16x8_t __rev0_9; __rev0_9 = __builtin_shufflevector(__s0_9, __s0_9, 7, 6, 5, 4, 3, 2, 1, 0); \
45052 float16x8_t __rev1_9; __rev1_9 = __builtin_shufflevector(__s1_9, __s1_9, 7, 6, 5, 4, 3, 2, 1, 0); \
45053 float16x4_t __rev2_9; __rev2_9 = __builtin_shufflevector(__s2_9, __s2_9, 3, 2, 1, 0); \
45054 float16x8_t __ret_9; \
45055 __ret_9 = __noswap_vfmaq_lane_f16(__rev0_9, -__rev1_9, __rev2_9, __p3_9); \
45056 __ret_9 = __builtin_shufflevector(__ret_9, __ret_9, 7, 6, 5, 4, 3, 2, 1, 0); \
45057 __ret_9; \
45058 })
45059 #endif
45060
45061 #ifdef __LITTLE_ENDIAN__
45062 #define vfms_lane_f16(__p0_10, __p1_10, __p2_10, __p3_10) __extension__ ({ \
45063 float16x4_t __s0_10 = __p0_10; \
45064 float16x4_t __s1_10 = __p1_10; \
45065 float16x4_t __s2_10 = __p2_10; \
45066 float16x4_t __ret_10; \
45067 __ret_10 = vfma_lane_f16(__s0_10, -__s1_10, __s2_10, __p3_10); \
45068 __ret_10; \
45069 })
45070 #else
45071 #define vfms_lane_f16(__p0_11, __p1_11, __p2_11, __p3_11) __extension__ ({ \
45072 float16x4_t __s0_11 = __p0_11; \
45073 float16x4_t __s1_11 = __p1_11; \
45074 float16x4_t __s2_11 = __p2_11; \
45075 float16x4_t __rev0_11; __rev0_11 = __builtin_shufflevector(__s0_11, __s0_11, 3, 2, 1, 0); \
45076 float16x4_t __rev1_11; __rev1_11 = __builtin_shufflevector(__s1_11, __s1_11, 3, 2, 1, 0); \
45077 float16x4_t __rev2_11; __rev2_11 = __builtin_shufflevector(__s2_11, __s2_11, 3, 2, 1, 0); \
45078 float16x4_t __ret_11; \
45079 __ret_11 = __noswap_vfma_lane_f16(__rev0_11, -__rev1_11, __rev2_11, __p3_11); \
45080 __ret_11 = __builtin_shufflevector(__ret_11, __ret_11, 3, 2, 1, 0); \
45081 __ret_11; \
45082 })
45083 #endif
45084
45085 #ifdef __LITTLE_ENDIAN__
45086 #define vfmsq_laneq_f16(__p0_12, __p1_12, __p2_12, __p3_12) __extension__ ({ \
45087 float16x8_t __s0_12 = __p0_12; \
45088 float16x8_t __s1_12 = __p1_12; \
45089 float16x8_t __s2_12 = __p2_12; \
45090 float16x8_t __ret_12; \
45091 __ret_12 = vfmaq_laneq_f16(__s0_12, -__s1_12, __s2_12, __p3_12); \
45092 __ret_12; \
45093 })
45094 #else
45095 #define vfmsq_laneq_f16(__p0_13, __p1_13, __p2_13, __p3_13) __extension__ ({ \
45096 float16x8_t __s0_13 = __p0_13; \
45097 float16x8_t __s1_13 = __p1_13; \
45098 float16x8_t __s2_13 = __p2_13; \
45099 float16x8_t __rev0_13; __rev0_13 = __builtin_shufflevector(__s0_13, __s0_13, 7, 6, 5, 4, 3, 2, 1, 0); \
45100 float16x8_t __rev1_13; __rev1_13 = __builtin_shufflevector(__s1_13, __s1_13, 7, 6, 5, 4, 3, 2, 1, 0); \
45101 float16x8_t __rev2_13; __rev2_13 = __builtin_shufflevector(__s2_13, __s2_13, 7, 6, 5, 4, 3, 2, 1, 0); \
45102 float16x8_t __ret_13; \
45103 __ret_13 = __noswap_vfmaq_laneq_f16(__rev0_13, -__rev1_13, __rev2_13, __p3_13); \
45104 __ret_13 = __builtin_shufflevector(__ret_13, __ret_13, 7, 6, 5, 4, 3, 2, 1, 0); \
45105 __ret_13; \
45106 })
45107 #endif
45108
45109 #ifdef __LITTLE_ENDIAN__
45110 #define vfms_laneq_f16(__p0_14, __p1_14, __p2_14, __p3_14) __extension__ ({ \
45111 float16x4_t __s0_14 = __p0_14; \
45112 float16x4_t __s1_14 = __p1_14; \
45113 float16x8_t __s2_14 = __p2_14; \
45114 float16x4_t __ret_14; \
45115 __ret_14 = vfma_laneq_f16(__s0_14, -__s1_14, __s2_14, __p3_14); \
45116 __ret_14; \
45117 })
45118 #else
45119 #define vfms_laneq_f16(__p0_15, __p1_15, __p2_15, __p3_15) __extension__ ({ \
45120 float16x4_t __s0_15 = __p0_15; \
45121 float16x4_t __s1_15 = __p1_15; \
45122 float16x8_t __s2_15 = __p2_15; \
45123 float16x4_t __rev0_15; __rev0_15 = __builtin_shufflevector(__s0_15, __s0_15, 3, 2, 1, 0); \
45124 float16x4_t __rev1_15; __rev1_15 = __builtin_shufflevector(__s1_15, __s1_15, 3, 2, 1, 0); \
45125 float16x8_t __rev2_15; __rev2_15 = __builtin_shufflevector(__s2_15, __s2_15, 7, 6, 5, 4, 3, 2, 1, 0); \
45126 float16x4_t __ret_15; \
45127 __ret_15 = __noswap_vfma_laneq_f16(__rev0_15, -__rev1_15, __rev2_15, __p3_15); \
45128 __ret_15 = __builtin_shufflevector(__ret_15, __ret_15, 3, 2, 1, 0); \
45129 __ret_15; \
45130 })
45131 #endif
45132
45133 #ifdef __LITTLE_ENDIAN__
45134 __ai float16x8_t vfmsq_n_f16(float16x8_t __p0, float16x8_t __p1, float16_t __p2) {
45135 float16x8_t __ret;
45136 __ret = vfmaq_f16(__p0, -__p1, (float16x8_t) {__p2, __p2, __p2, __p2, __p2, __p2, __p2, __p2});
45137 return __ret;
45138 }
45139 #else
45140 __ai float16x8_t vfmsq_n_f16(float16x8_t __p0, float16x8_t __p1, float16_t __p2) {
45141 float16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
45142 float16x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
45143 float16x8_t __ret;
45144 __ret = __noswap_vfmaq_f16(__rev0, -__rev1, (float16x8_t) {__p2, __p2, __p2, __p2, __p2, __p2, __p2, __p2});
45145 __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
45146 return __ret;
45147 }
45148 #endif
45149
45150 #ifdef __LITTLE_ENDIAN__
45151 __ai float16x4_t vfms_n_f16(float16x4_t __p0, float16x4_t __p1, float16_t __p2) {
45152 float16x4_t __ret;
45153 __ret = vfma_f16(__p0, -__p1, (float16x4_t) {__p2, __p2, __p2, __p2});
45154 return __ret;
45155 }
45156 #else
45157 __ai float16x4_t vfms_n_f16(float16x4_t __p0, float16x4_t __p1, float16_t __p2) {
45158 float16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
45159 float16x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
45160 float16x4_t __ret;
45161 __ret = __noswap_vfma_f16(__rev0, -__rev1, (float16x4_t) {__p2, __p2, __p2, __p2});
45162 __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
45163 return __ret;
45164 }
45165 #endif
45166
45167 #ifdef __LITTLE_ENDIAN__
45168 __ai float16_t vmaxnmvq_f16(float16x8_t __p0) {
45169 float16_t __ret;
45170 __ret = (float16_t) __builtin_neon_vmaxnmvq_f16((int8x16_t)__p0);
45171 return __ret;
45172 }
45173 #else
45174 __ai float16_t vmaxnmvq_f16(float16x8_t __p0) {
45175 float16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
45176 float16_t __ret;
45177 __ret = (float16_t) __builtin_neon_vmaxnmvq_f16((int8x16_t)__rev0);
45178 return __ret;
45179 }
45180 #endif
45181
45182 #ifdef __LITTLE_ENDIAN__
45183 __ai float16_t vmaxnmv_f16(float16x4_t __p0) {
45184 float16_t __ret;
45185 __ret = (float16_t) __builtin_neon_vmaxnmv_f16((int8x8_t)__p0);
45186 return __ret;
45187 }
45188 #else
45189 __ai float16_t vmaxnmv_f16(float16x4_t __p0) {
45190 float16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
45191 float16_t __ret;
45192 __ret = (float16_t) __builtin_neon_vmaxnmv_f16((int8x8_t)__rev0);
45193 return __ret;
45194 }
45195 #endif
45196
45197 #ifdef __LITTLE_ENDIAN__
45198 __ai float16_t vmaxvq_f16(float16x8_t __p0) {
45199 float16_t __ret;
45200 __ret = (float16_t) __builtin_neon_vmaxvq_f16((int8x16_t)__p0);
45201 return __ret;
45202 }
45203 #else
45204 __ai float16_t vmaxvq_f16(float16x8_t __p0) {
45205 float16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
45206 float16_t __ret;
45207 __ret = (float16_t) __builtin_neon_vmaxvq_f16((int8x16_t)__rev0);
45208 return __ret;
45209 }
45210 #endif
45211
45212 #ifdef __LITTLE_ENDIAN__
45213 __ai float16_t vmaxv_f16(float16x4_t __p0) {
45214 float16_t __ret;
45215 __ret = (float16_t) __builtin_neon_vmaxv_f16((int8x8_t)__p0);
45216 return __ret;
45217 }
45218 #else
45219 __ai float16_t vmaxv_f16(float16x4_t __p0) {
45220 float16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
45221 float16_t __ret;
45222 __ret = (float16_t) __builtin_neon_vmaxv_f16((int8x8_t)__rev0);
45223 return __ret;
45224 }
45225 #endif
45226
45227 #ifdef __LITTLE_ENDIAN__
45228 __ai float16_t vminnmvq_f16(float16x8_t __p0) {
45229 float16_t __ret;
45230 __ret = (float16_t) __builtin_neon_vminnmvq_f16((int8x16_t)__p0);
45231 return __ret;
45232 }
45233 #else
45234 __ai float16_t vminnmvq_f16(float16x8_t __p0) {
45235 float16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
45236 float16_t __ret;
45237 __ret = (float16_t) __builtin_neon_vminnmvq_f16((int8x16_t)__rev0);
45238 return __ret;
45239 }
45240 #endif
45241
45242 #ifdef __LITTLE_ENDIAN__
45243 __ai float16_t vminnmv_f16(float16x4_t __p0) {
45244 float16_t __ret;
45245 __ret = (float16_t) __builtin_neon_vminnmv_f16((int8x8_t)__p0);
45246 return __ret;
45247 }
45248 #else
45249 __ai float16_t vminnmv_f16(float16x4_t __p0) {
45250 float16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
45251 float16_t __ret;
45252 __ret = (float16_t) __builtin_neon_vminnmv_f16((int8x8_t)__rev0);
45253 return __ret;
45254 }
45255 #endif
45256
45257 #ifdef __LITTLE_ENDIAN__
45258 __ai float16_t vminvq_f16(float16x8_t __p0) {
45259 float16_t __ret;
45260 __ret = (float16_t) __builtin_neon_vminvq_f16((int8x16_t)__p0);
45261 return __ret;
45262 }
45263 #else
45264 __ai float16_t vminvq_f16(float16x8_t __p0) {
45265 float16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
45266 float16_t __ret;
45267 __ret = (float16_t) __builtin_neon_vminvq_f16((int8x16_t)__rev0);
45268 return __ret;
45269 }
45270 #endif
45271
45272 #ifdef __LITTLE_ENDIAN__
45273 __ai float16_t vminv_f16(float16x4_t __p0) {
45274 float16_t __ret;
45275 __ret = (float16_t) __builtin_neon_vminv_f16((int8x8_t)__p0);
45276 return __ret;
45277 }
45278 #else
45279 __ai float16_t vminv_f16(float16x4_t __p0) {
45280 float16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
45281 float16_t __ret;
45282 __ret = (float16_t) __builtin_neon_vminv_f16((int8x8_t)__rev0);
45283 return __ret;
45284 }
45285 #endif
45286
45287 #ifdef __LITTLE_ENDIAN__
45288 __ai float16x8_t vmulxq_f16(float16x8_t __p0, float16x8_t __p1) {
45289 float16x8_t __ret;
45290 __ret = (float16x8_t) __builtin_neon_vmulxq_v((int8x16_t)__p0, (int8x16_t)__p1, 40);
45291 return __ret;
45292 }
45293 #else
45294 __ai float16x8_t vmulxq_f16(float16x8_t __p0, float16x8_t __p1) {
45295 float16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
45296 float16x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
45297 float16x8_t __ret;
45298 __ret = (float16x8_t) __builtin_neon_vmulxq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 40);
45299 __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
45300 return __ret;
45301 }
45302 __ai float16x8_t __noswap_vmulxq_f16(float16x8_t __p0, float16x8_t __p1) {
45303 float16x8_t __ret;
45304 __ret = (float16x8_t) __builtin_neon_vmulxq_v((int8x16_t)__p0, (int8x16_t)__p1, 40);
45305 return __ret;
45306 }
45307 #endif
45308
45309 #ifdef __LITTLE_ENDIAN__
45310 __ai float16x4_t vmulx_f16(float16x4_t __p0, float16x4_t __p1) {
45311 float16x4_t __ret;
45312 __ret = (float16x4_t) __builtin_neon_vmulx_v((int8x8_t)__p0, (int8x8_t)__p1, 8);
45313 return __ret;
45314 }
45315 #else
45316 __ai float16x4_t vmulx_f16(float16x4_t __p0, float16x4_t __p1) {
45317 float16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
45318 float16x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
45319 float16x4_t __ret;
45320 __ret = (float16x4_t) __builtin_neon_vmulx_v((int8x8_t)__rev0, (int8x8_t)__rev1, 8);
45321 __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
45322 return __ret;
45323 }
45324 __ai float16x4_t __noswap_vmulx_f16(float16x4_t __p0, float16x4_t __p1) {
45325 float16x4_t __ret;
45326 __ret = (float16x4_t) __builtin_neon_vmulx_v((int8x8_t)__p0, (int8x8_t)__p1, 8);
45327 return __ret;
45328 }
45329 #endif
45330
45331 #ifdef __LITTLE_ENDIAN__
45332 #define vmulxq_lane_f16(__p0, __p1, __p2) __extension__ ({ \
45333 float16x8_t __s0 = __p0; \
45334 float16x4_t __s1 = __p1; \
45335 float16x8_t __ret; \
45336 __ret = vmulxq_f16(__s0, __builtin_shufflevector(__s1, __s1, __p2, __p2, __p2, __p2, __p2, __p2, __p2, __p2)); \
45337 __ret; \
45338 })
45339 #else
45340 #define vmulxq_lane_f16(__p0, __p1, __p2) __extension__ ({ \
45341 float16x8_t __s0 = __p0; \
45342 float16x4_t __s1 = __p1; \
45343 float16x8_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 7, 6, 5, 4, 3, 2, 1, 0); \
45344 float16x4_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 3, 2, 1, 0); \
45345 float16x8_t __ret; \
45346 __ret = __noswap_vmulxq_f16(__rev0, __builtin_shufflevector(__rev1, __rev1, __p2, __p2, __p2, __p2, __p2, __p2, __p2, __p2)); \
45347 __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); \
45348 __ret; \
45349 })
45350 #endif
45351
45352 #ifdef __LITTLE_ENDIAN__
45353 #define vmulx_lane_f16(__p0, __p1, __p2) __extension__ ({ \
45354 float16x4_t __s0 = __p0; \
45355 float16x4_t __s1 = __p1; \
45356 float16x4_t __ret; \
45357 __ret = vmulx_f16(__s0, __builtin_shufflevector(__s1, __s1, __p2, __p2, __p2, __p2)); \
45358 __ret; \
45359 })
45360 #else
45361 #define vmulx_lane_f16(__p0, __p1, __p2) __extension__ ({ \
45362 float16x4_t __s0 = __p0; \
45363 float16x4_t __s1 = __p1; \
45364 float16x4_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \
45365 float16x4_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 3, 2, 1, 0); \
45366 float16x4_t __ret; \
45367 __ret = __noswap_vmulx_f16(__rev0, __builtin_shufflevector(__rev1, __rev1, __p2, __p2, __p2, __p2)); \
45368 __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \
45369 __ret; \
45370 })
45371 #endif
45372
45373 #ifdef __LITTLE_ENDIAN__
45374 #define vmulxq_laneq_f16(__p0, __p1, __p2) __extension__ ({ \
45375 float16x8_t __s0 = __p0; \
45376 float16x8_t __s1 = __p1; \
45377 float16x8_t __ret; \
45378 __ret = vmulxq_f16(__s0, __builtin_shufflevector(__s1, __s1, __p2, __p2, __p2, __p2, __p2, __p2, __p2, __p2)); \
45379 __ret; \
45380 })
45381 #else
45382 #define vmulxq_laneq_f16(__p0, __p1, __p2) __extension__ ({ \
45383 float16x8_t __s0 = __p0; \
45384 float16x8_t __s1 = __p1; \
45385 float16x8_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 7, 6, 5, 4, 3, 2, 1, 0); \
45386 float16x8_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 7, 6, 5, 4, 3, 2, 1, 0); \
45387 float16x8_t __ret; \
45388 __ret = __noswap_vmulxq_f16(__rev0, __builtin_shufflevector(__rev1, __rev1, __p2, __p2, __p2, __p2, __p2, __p2, __p2, __p2)); \
45389 __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); \
45390 __ret; \
45391 })
45392 #endif
45393
45394 #ifdef __LITTLE_ENDIAN__
45395 #define vmulx_laneq_f16(__p0, __p1, __p2) __extension__ ({ \
45396 float16x4_t __s0 = __p0; \
45397 float16x8_t __s1 = __p1; \
45398 float16x4_t __ret; \
45399 __ret = vmulx_f16(__s0, __builtin_shufflevector(__s1, __s1, __p2, __p2, __p2, __p2)); \
45400 __ret; \
45401 })
45402 #else
45403 #define vmulx_laneq_f16(__p0, __p1, __p2) __extension__ ({ \
45404 float16x4_t __s0 = __p0; \
45405 float16x8_t __s1 = __p1; \
45406 float16x4_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \
45407 float16x8_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 7, 6, 5, 4, 3, 2, 1, 0); \
45408 float16x4_t __ret; \
45409 __ret = __noswap_vmulx_f16(__rev0, __builtin_shufflevector(__rev1, __rev1, __p2, __p2, __p2, __p2)); \
45410 __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \
45411 __ret; \
45412 })
45413 #endif
45414
45415 #ifdef __LITTLE_ENDIAN__
45416 __ai float16x8_t vmulxq_n_f16(float16x8_t __p0, float16_t __p1) {
45417 float16x8_t __ret;
45418 __ret = vmulxq_f16(__p0, (float16x8_t) {__p1, __p1, __p1, __p1, __p1, __p1, __p1, __p1});
45419 return __ret;
45420 }
45421 #else
45422 __ai float16x8_t vmulxq_n_f16(float16x8_t __p0, float16_t __p1) {
45423 float16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
45424 float16x8_t __ret;
45425 __ret = __noswap_vmulxq_f16(__rev0, (float16x8_t) {__p1, __p1, __p1, __p1, __p1, __p1, __p1, __p1});
45426 __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
45427 return __ret;
45428 }
45429 #endif
45430
45431 #ifdef __LITTLE_ENDIAN__
45432 __ai float16x4_t vmulx_n_f16(float16x4_t __p0, float16_t __p1) {
45433 float16x4_t __ret;
45434 __ret = vmulx_f16(__p0, (float16x4_t) {__p1, __p1, __p1, __p1});
45435 return __ret;
45436 }
45437 #else
45438 __ai float16x4_t vmulx_n_f16(float16x4_t __p0, float16_t __p1) {
45439 float16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
45440 float16x4_t __ret;
45441 __ret = __noswap_vmulx_f16(__rev0, (float16x4_t) {__p1, __p1, __p1, __p1});
45442 __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
45443 return __ret;
45444 }
45445 #endif
45446
45447 #ifdef __LITTLE_ENDIAN__
45448 __ai float16x8_t vpmaxnmq_f16(float16x8_t __p0, float16x8_t __p1) {
45449 float16x8_t __ret;
45450 __ret = (float16x8_t) __builtin_neon_vpmaxnmq_v((int8x16_t)__p0, (int8x16_t)__p1, 40);
45451 return __ret;
45452 }
45453 #else
45454 __ai float16x8_t vpmaxnmq_f16(float16x8_t __p0, float16x8_t __p1) {
45455 float16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
45456 float16x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
45457 float16x8_t __ret;
45458 __ret = (float16x8_t) __builtin_neon_vpmaxnmq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 40);
45459 __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
45460 return __ret;
45461 }
45462 #endif
45463
45464 #ifdef __LITTLE_ENDIAN__
45465 __ai float16x4_t vpmaxnm_f16(float16x4_t __p0, float16x4_t __p1) {
45466 float16x4_t __ret;
45467 __ret = (float16x4_t) __builtin_neon_vpmaxnm_v((int8x8_t)__p0, (int8x8_t)__p1, 8);
45468 return __ret;
45469 }
45470 #else
45471 __ai float16x4_t vpmaxnm_f16(float16x4_t __p0, float16x4_t __p1) {
45472 float16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
45473 float16x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
45474 float16x4_t __ret;
45475 __ret = (float16x4_t) __builtin_neon_vpmaxnm_v((int8x8_t)__rev0, (int8x8_t)__rev1, 8);
45476 __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
45477 return __ret;
45478 }
45479 #endif
45480
45481 #ifdef __LITTLE_ENDIAN__
45482 __ai float16x8_t vpminnmq_f16(float16x8_t __p0, float16x8_t __p1) {
45483 float16x8_t __ret;
45484 __ret = (float16x8_t) __builtin_neon_vpminnmq_v((int8x16_t)__p0, (int8x16_t)__p1, 40);
45485 return __ret;
45486 }
45487 #else
45488 __ai float16x8_t vpminnmq_f16(float16x8_t __p0, float16x8_t __p1) {
45489 float16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
45490 float16x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
45491 float16x8_t __ret;
45492 __ret = (float16x8_t) __builtin_neon_vpminnmq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 40);
45493 __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
45494 return __ret;
45495 }
45496 #endif
45497
45498 #ifdef __LITTLE_ENDIAN__
45499 __ai float16x4_t vpminnm_f16(float16x4_t __p0, float16x4_t __p1) {
45500 float16x4_t __ret;
45501 __ret = (float16x4_t) __builtin_neon_vpminnm_v((int8x8_t)__p0, (int8x8_t)__p1, 8);
45502 return __ret;
45503 }
45504 #else
45505 __ai float16x4_t vpminnm_f16(float16x4_t __p0, float16x4_t __p1) {
45506 float16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
45507 float16x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
45508 float16x4_t __ret;
45509 __ret = (float16x4_t) __builtin_neon_vpminnm_v((int8x8_t)__rev0, (int8x8_t)__rev1, 8);
45510 __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
45511 return __ret;
45512 }
45513 #endif
45514
45515 #ifdef __LITTLE_ENDIAN__
45516 __ai float16x8_t vsqrtq_f16(float16x8_t __p0) {
45517 float16x8_t __ret;
45518 __ret = (float16x8_t) __builtin_neon_vsqrtq_v((int8x16_t)__p0, 40);
45519 return __ret;
45520 }
45521 #else
45522 __ai float16x8_t vsqrtq_f16(float16x8_t __p0) {
45523 float16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
45524 float16x8_t __ret;
45525 __ret = (float16x8_t) __builtin_neon_vsqrtq_v((int8x16_t)__rev0, 40);
45526 __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
45527 return __ret;
45528 }
45529 #endif
45530
45531 #ifdef __LITTLE_ENDIAN__
45532 __ai float16x4_t vsqrt_f16(float16x4_t __p0) {
45533 float16x4_t __ret;
45534 __ret = (float16x4_t) __builtin_neon_vsqrt_v((int8x8_t)__p0, 8);
45535 return __ret;
45536 }
45537 #else
45538 __ai float16x4_t vsqrt_f16(float16x4_t __p0) {
45539 float16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
45540 float16x4_t __ret;
45541 __ret = (float16x4_t) __builtin_neon_vsqrt_v((int8x8_t)__rev0, 8);
45542 __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
45543 return __ret;
45544 }
45545 #endif
45546
45547 #endif
45548 #if defined(__ARM_FEATURE_QRDMX)
45549 #ifdef __LITTLE_ENDIAN__
45550 __ai int32x4_t vqrdmlahq_s32(int32x4_t __p0, int32x4_t __p1, int32x4_t __p2) {
45551 int32x4_t __ret;
45552 __ret = vqaddq_s32(__p0, vqrdmulhq_s32(__p1, __p2));
45553 return __ret;
45554 }
45555 #else
45556 __ai int32x4_t vqrdmlahq_s32(int32x4_t __p0, int32x4_t __p1, int32x4_t __p2) {
45557 int32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
45558 int32x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
45559 int32x4_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, 3, 2, 1, 0);
45560 int32x4_t __ret;
45561 __ret = __noswap_vqaddq_s32(__rev0, __noswap_vqrdmulhq_s32(__rev1, __rev2));
45562 __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
45563 return __ret;
45564 }
45565 #endif
45566
45567 #ifdef __LITTLE_ENDIAN__
45568 __ai int16x8_t vqrdmlahq_s16(int16x8_t __p0, int16x8_t __p1, int16x8_t __p2) {
45569 int16x8_t __ret;
45570 __ret = vqaddq_s16(__p0, vqrdmulhq_s16(__p1, __p2));
45571 return __ret;
45572 }
45573 #else
45574 __ai int16x8_t vqrdmlahq_s16(int16x8_t __p0, int16x8_t __p1, int16x8_t __p2) {
45575 int16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
45576 int16x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
45577 int16x8_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, 7, 6, 5, 4, 3, 2, 1, 0);
45578 int16x8_t __ret;
45579 __ret = __noswap_vqaddq_s16(__rev0, __noswap_vqrdmulhq_s16(__rev1, __rev2));
45580 __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
45581 return __ret;
45582 }
45583 #endif
45584
45585 #ifdef __LITTLE_ENDIAN__
45586 __ai int32x2_t vqrdmlah_s32(int32x2_t __p0, int32x2_t __p1, int32x2_t __p2) {
45587 int32x2_t __ret;
45588 __ret = vqadd_s32(__p0, vqrdmulh_s32(__p1, __p2));
45589 return __ret;
45590 }
45591 #else
45592 __ai int32x2_t vqrdmlah_s32(int32x2_t __p0, int32x2_t __p1, int32x2_t __p2) {
45593 int32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
45594 int32x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
45595 int32x2_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, 1, 0);
45596 int32x2_t __ret;
45597 __ret = __noswap_vqadd_s32(__rev0, __noswap_vqrdmulh_s32(__rev1, __rev2));
45598 __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
45599 return __ret;
45600 }
45601 #endif
45602
45603 #ifdef __LITTLE_ENDIAN__
45604 __ai int16x4_t vqrdmlah_s16(int16x4_t __p0, int16x4_t __p1, int16x4_t __p2) {
45605 int16x4_t __ret;
45606 __ret = vqadd_s16(__p0, vqrdmulh_s16(__p1, __p2));
45607 return __ret;
45608 }
45609 #else
45610 __ai int16x4_t vqrdmlah_s16(int16x4_t __p0, int16x4_t __p1, int16x4_t __p2) {
45611 int16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
45612 int16x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
45613 int16x4_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, 3, 2, 1, 0);
45614 int16x4_t __ret;
45615 __ret = __noswap_vqadd_s16(__rev0, __noswap_vqrdmulh_s16(__rev1, __rev2));
45616 __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
45617 return __ret;
45618 }
45619 #endif
45620
45621 #ifdef __LITTLE_ENDIAN__
45622 #define vqrdmlahq_lane_s32(__p0, __p1, __p2, __p3) __extension__ ({ \
45623 int32x4_t __s0 = __p0; \
45624 int32x4_t __s1 = __p1; \
45625 int32x2_t __s2 = __p2; \
45626 int32x4_t __ret; \
45627 __ret = vqaddq_s32(__s0, vqrdmulhq_s32(__s1, __builtin_shufflevector(__s2, __s2, __p3, __p3, __p3, __p3))); \
45628 __ret; \
45629 })
45630 #else
45631 #define vqrdmlahq_lane_s32(__p0, __p1, __p2, __p3) __extension__ ({ \
45632 int32x4_t __s0 = __p0; \
45633 int32x4_t __s1 = __p1; \
45634 int32x2_t __s2 = __p2; \
45635 int32x4_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \
45636 int32x4_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 3, 2, 1, 0); \
45637 int32x2_t __rev2; __rev2 = __builtin_shufflevector(__s2, __s2, 1, 0); \
45638 int32x4_t __ret; \
45639 __ret = __noswap_vqaddq_s32(__rev0, __noswap_vqrdmulhq_s32(__rev1, __builtin_shufflevector(__rev2, __rev2, __p3, __p3, __p3, __p3))); \
45640 __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \
45641 __ret; \
45642 })
45643 #endif
45644
45645 #ifdef __LITTLE_ENDIAN__
45646 #define vqrdmlahq_lane_s16(__p0, __p1, __p2, __p3) __extension__ ({ \
45647 int16x8_t __s0 = __p0; \
45648 int16x8_t __s1 = __p1; \
45649 int16x4_t __s2 = __p2; \
45650 int16x8_t __ret; \
45651 __ret = vqaddq_s16(__s0, vqrdmulhq_s16(__s1, __builtin_shufflevector(__s2, __s2, __p3, __p3, __p3, __p3, __p3, __p3, __p3, __p3))); \
45652 __ret; \
45653 })
45654 #else
45655 #define vqrdmlahq_lane_s16(__p0, __p1, __p2, __p3) __extension__ ({ \
45656 int16x8_t __s0 = __p0; \
45657 int16x8_t __s1 = __p1; \
45658 int16x4_t __s2 = __p2; \
45659 int16x8_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 7, 6, 5, 4, 3, 2, 1, 0); \
45660 int16x8_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 7, 6, 5, 4, 3, 2, 1, 0); \
45661 int16x4_t __rev2; __rev2 = __builtin_shufflevector(__s2, __s2, 3, 2, 1, 0); \
45662 int16x8_t __ret; \
45663 __ret = __noswap_vqaddq_s16(__rev0, __noswap_vqrdmulhq_s16(__rev1, __builtin_shufflevector(__rev2, __rev2, __p3, __p3, __p3, __p3, __p3, __p3, __p3, __p3))); \
45664 __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); \
45665 __ret; \
45666 })
45667 #endif
45668
45669 #ifdef __LITTLE_ENDIAN__
45670 #define vqrdmlah_lane_s32(__p0, __p1, __p2, __p3) __extension__ ({ \
45671 int32x2_t __s0 = __p0; \
45672 int32x2_t __s1 = __p1; \
45673 int32x2_t __s2 = __p2; \
45674 int32x2_t __ret; \
45675 __ret = vqadd_s32(__s0, vqrdmulh_s32(__s1, __builtin_shufflevector(__s2, __s2, __p3, __p3))); \
45676 __ret; \
45677 })
45678 #else
45679 #define vqrdmlah_lane_s32(__p0, __p1, __p2, __p3) __extension__ ({ \
45680 int32x2_t __s0 = __p0; \
45681 int32x2_t __s1 = __p1; \
45682 int32x2_t __s2 = __p2; \
45683 int32x2_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 1, 0); \
45684 int32x2_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 1, 0); \
45685 int32x2_t __rev2; __rev2 = __builtin_shufflevector(__s2, __s2, 1, 0); \
45686 int32x2_t __ret; \
45687 __ret = __noswap_vqadd_s32(__rev0, __noswap_vqrdmulh_s32(__rev1, __builtin_shufflevector(__rev2, __rev2, __p3, __p3))); \
45688 __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \
45689 __ret; \
45690 })
45691 #endif
45692
45693 #ifdef __LITTLE_ENDIAN__
45694 #define vqrdmlah_lane_s16(__p0, __p1, __p2, __p3) __extension__ ({ \
45695 int16x4_t __s0 = __p0; \
45696 int16x4_t __s1 = __p1; \
45697 int16x4_t __s2 = __p2; \
45698 int16x4_t __ret; \
45699 __ret = vqadd_s16(__s0, vqrdmulh_s16(__s1, __builtin_shufflevector(__s2, __s2, __p3, __p3, __p3, __p3))); \
45700 __ret; \
45701 })
45702 #else
45703 #define vqrdmlah_lane_s16(__p0, __p1, __p2, __p3) __extension__ ({ \
45704 int16x4_t __s0 = __p0; \
45705 int16x4_t __s1 = __p1; \
45706 int16x4_t __s2 = __p2; \
45707 int16x4_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \
45708 int16x4_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 3, 2, 1, 0); \
45709 int16x4_t __rev2; __rev2 = __builtin_shufflevector(__s2, __s2, 3, 2, 1, 0); \
45710 int16x4_t __ret; \
45711 __ret = __noswap_vqadd_s16(__rev0, __noswap_vqrdmulh_s16(__rev1, __builtin_shufflevector(__rev2, __rev2, __p3, __p3, __p3, __p3))); \
45712 __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \
45713 __ret; \
45714 })
45715 #endif
45716
45717 #ifdef __LITTLE_ENDIAN__
45718 __ai int32x4_t vqrdmlshq_s32(int32x4_t __p0, int32x4_t __p1, int32x4_t __p2) {
45719 int32x4_t __ret;
45720 __ret = vqsubq_s32(__p0, vqrdmulhq_s32(__p1, __p2));
45721 return __ret;
45722 }
45723 #else
45724 __ai int32x4_t vqrdmlshq_s32(int32x4_t __p0, int32x4_t __p1, int32x4_t __p2) {
45725 int32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
45726 int32x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
45727 int32x4_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, 3, 2, 1, 0);
45728 int32x4_t __ret;
45729 __ret = __noswap_vqsubq_s32(__rev0, __noswap_vqrdmulhq_s32(__rev1, __rev2));
45730 __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
45731 return __ret;
45732 }
45733 #endif
45734
45735 #ifdef __LITTLE_ENDIAN__
45736 __ai int16x8_t vqrdmlshq_s16(int16x8_t __p0, int16x8_t __p1, int16x8_t __p2) {
45737 int16x8_t __ret;
45738 __ret = vqsubq_s16(__p0, vqrdmulhq_s16(__p1, __p2));
45739 return __ret;
45740 }
45741 #else
45742 __ai int16x8_t vqrdmlshq_s16(int16x8_t __p0, int16x8_t __p1, int16x8_t __p2) {
45743 int16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
45744 int16x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
45745 int16x8_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, 7, 6, 5, 4, 3, 2, 1, 0);
45746 int16x8_t __ret;
45747 __ret = __noswap_vqsubq_s16(__rev0, __noswap_vqrdmulhq_s16(__rev1, __rev2));
45748 __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
45749 return __ret;
45750 }
45751 #endif
45752
45753 #ifdef __LITTLE_ENDIAN__
45754 __ai int32x2_t vqrdmlsh_s32(int32x2_t __p0, int32x2_t __p1, int32x2_t __p2) {
45755 int32x2_t __ret;
45756 __ret = vqsub_s32(__p0, vqrdmulh_s32(__p1, __p2));
45757 return __ret;
45758 }
45759 #else
45760 __ai int32x2_t vqrdmlsh_s32(int32x2_t __p0, int32x2_t __p1, int32x2_t __p2) {
45761 int32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
45762 int32x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
45763 int32x2_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, 1, 0);
45764 int32x2_t __ret;
45765 __ret = __noswap_vqsub_s32(__rev0, __noswap_vqrdmulh_s32(__rev1, __rev2));
45766 __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
45767 return __ret;
45768 }
45769 #endif
45770
45771 #ifdef __LITTLE_ENDIAN__
45772 __ai int16x4_t vqrdmlsh_s16(int16x4_t __p0, int16x4_t __p1, int16x4_t __p2) {
45773 int16x4_t __ret;
45774 __ret = vqsub_s16(__p0, vqrdmulh_s16(__p1, __p2));
45775 return __ret;
45776 }
45777 #else
45778 __ai int16x4_t vqrdmlsh_s16(int16x4_t __p0, int16x4_t __p1, int16x4_t __p2) {
45779 int16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
45780 int16x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
45781 int16x4_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, 3, 2, 1, 0);
45782 int16x4_t __ret;
45783 __ret = __noswap_vqsub_s16(__rev0, __noswap_vqrdmulh_s16(__rev1, __rev2));
45784 __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
45785 return __ret;
45786 }
45787 #endif
45788
45789 #ifdef __LITTLE_ENDIAN__
45790 #define vqrdmlshq_lane_s32(__p0, __p1, __p2, __p3) __extension__ ({ \
45791 int32x4_t __s0 = __p0; \
45792 int32x4_t __s1 = __p1; \
45793 int32x2_t __s2 = __p2; \
45794 int32x4_t __ret; \
45795 __ret = vqsubq_s32(__s0, vqrdmulhq_s32(__s1, __builtin_shufflevector(__s2, __s2, __p3, __p3, __p3, __p3))); \
45796 __ret; \
45797 })
45798 #else
45799 #define vqrdmlshq_lane_s32(__p0, __p1, __p2, __p3) __extension__ ({ \
45800 int32x4_t __s0 = __p0; \
45801 int32x4_t __s1 = __p1; \
45802 int32x2_t __s2 = __p2; \
45803 int32x4_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \
45804 int32x4_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 3, 2, 1, 0); \
45805 int32x2_t __rev2; __rev2 = __builtin_shufflevector(__s2, __s2, 1, 0); \
45806 int32x4_t __ret; \
45807 __ret = __noswap_vqsubq_s32(__rev0, __noswap_vqrdmulhq_s32(__rev1, __builtin_shufflevector(__rev2, __rev2, __p3, __p3, __p3, __p3))); \
45808 __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \
45809 __ret; \
45810 })
45811 #endif
45812
45813 #ifdef __LITTLE_ENDIAN__
45814 #define vqrdmlshq_lane_s16(__p0, __p1, __p2, __p3) __extension__ ({ \
45815 int16x8_t __s0 = __p0; \
45816 int16x8_t __s1 = __p1; \
45817 int16x4_t __s2 = __p2; \
45818 int16x8_t __ret; \
45819 __ret = vqsubq_s16(__s0, vqrdmulhq_s16(__s1, __builtin_shufflevector(__s2, __s2, __p3, __p3, __p3, __p3, __p3, __p3, __p3, __p3))); \
45820 __ret; \
45821 })
45822 #else
45823 #define vqrdmlshq_lane_s16(__p0, __p1, __p2, __p3) __extension__ ({ \
45824 int16x8_t __s0 = __p0; \
45825 int16x8_t __s1 = __p1; \
45826 int16x4_t __s2 = __p2; \
45827 int16x8_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 7, 6, 5, 4, 3, 2, 1, 0); \
45828 int16x8_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 7, 6, 5, 4, 3, 2, 1, 0); \
45829 int16x4_t __rev2; __rev2 = __builtin_shufflevector(__s2, __s2, 3, 2, 1, 0); \
45830 int16x8_t __ret; \
45831 __ret = __noswap_vqsubq_s16(__rev0, __noswap_vqrdmulhq_s16(__rev1, __builtin_shufflevector(__rev2, __rev2, __p3, __p3, __p3, __p3, __p3, __p3, __p3, __p3))); \
45832 __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); \
45833 __ret; \
45834 })
45835 #endif
45836
45837 #ifdef __LITTLE_ENDIAN__
45838 #define vqrdmlsh_lane_s32(__p0, __p1, __p2, __p3) __extension__ ({ \
45839 int32x2_t __s0 = __p0; \
45840 int32x2_t __s1 = __p1; \
45841 int32x2_t __s2 = __p2; \
45842 int32x2_t __ret; \
45843 __ret = vqsub_s32(__s0, vqrdmulh_s32(__s1, __builtin_shufflevector(__s2, __s2, __p3, __p3))); \
45844 __ret; \
45845 })
45846 #else
45847 #define vqrdmlsh_lane_s32(__p0, __p1, __p2, __p3) __extension__ ({ \
45848 int32x2_t __s0 = __p0; \
45849 int32x2_t __s1 = __p1; \
45850 int32x2_t __s2 = __p2; \
45851 int32x2_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 1, 0); \
45852 int32x2_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 1, 0); \
45853 int32x2_t __rev2; __rev2 = __builtin_shufflevector(__s2, __s2, 1, 0); \
45854 int32x2_t __ret; \
45855 __ret = __noswap_vqsub_s32(__rev0, __noswap_vqrdmulh_s32(__rev1, __builtin_shufflevector(__rev2, __rev2, __p3, __p3))); \
45856 __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \
45857 __ret; \
45858 })
45859 #endif
45860
45861 #ifdef __LITTLE_ENDIAN__
45862 #define vqrdmlsh_lane_s16(__p0, __p1, __p2, __p3) __extension__ ({ \
45863 int16x4_t __s0 = __p0; \
45864 int16x4_t __s1 = __p1; \
45865 int16x4_t __s2 = __p2; \
45866 int16x4_t __ret; \
45867 __ret = vqsub_s16(__s0, vqrdmulh_s16(__s1, __builtin_shufflevector(__s2, __s2, __p3, __p3, __p3, __p3))); \
45868 __ret; \
45869 })
45870 #else
45871 #define vqrdmlsh_lane_s16(__p0, __p1, __p2, __p3) __extension__ ({ \
45872 int16x4_t __s0 = __p0; \
45873 int16x4_t __s1 = __p1; \
45874 int16x4_t __s2 = __p2; \
45875 int16x4_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \
45876 int16x4_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 3, 2, 1, 0); \
45877 int16x4_t __rev2; __rev2 = __builtin_shufflevector(__s2, __s2, 3, 2, 1, 0); \
45878 int16x4_t __ret; \
45879 __ret = __noswap_vqsub_s16(__rev0, __noswap_vqrdmulh_s16(__rev1, __builtin_shufflevector(__rev2, __rev2, __p3, __p3, __p3, __p3))); \
45880 __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \
45881 __ret; \
45882 })
45883 #endif
45884
45885 #endif
45886 #if defined(__ARM_FEATURE_QRDMX) && defined(__aarch64__)
45887 #ifdef __LITTLE_ENDIAN__
45888 #define vqrdmlahq_laneq_s32(__p0, __p1, __p2, __p3) __extension__ ({ \
45889 int32x4_t __s0 = __p0; \
45890 int32x4_t __s1 = __p1; \
45891 int32x4_t __s2 = __p2; \
45892 int32x4_t __ret; \
45893 __ret = vqaddq_s32(__s0, vqrdmulhq_s32(__s1, __builtin_shufflevector(__s2, __s2, __p3, __p3, __p3, __p3))); \
45894 __ret; \
45895 })
45896 #else
45897 #define vqrdmlahq_laneq_s32(__p0, __p1, __p2, __p3) __extension__ ({ \
45898 int32x4_t __s0 = __p0; \
45899 int32x4_t __s1 = __p1; \
45900 int32x4_t __s2 = __p2; \
45901 int32x4_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \
45902 int32x4_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 3, 2, 1, 0); \
45903 int32x4_t __rev2; __rev2 = __builtin_shufflevector(__s2, __s2, 3, 2, 1, 0); \
45904 int32x4_t __ret; \
45905 __ret = __noswap_vqaddq_s32(__rev0, __noswap_vqrdmulhq_s32(__rev1, __builtin_shufflevector(__rev2, __rev2, __p3, __p3, __p3, __p3))); \
45906 __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \
45907 __ret; \
45908 })
45909 #endif
45910
45911 #ifdef __LITTLE_ENDIAN__
45912 #define vqrdmlahq_laneq_s16(__p0, __p1, __p2, __p3) __extension__ ({ \
45913 int16x8_t __s0 = __p0; \
45914 int16x8_t __s1 = __p1; \
45915 int16x8_t __s2 = __p2; \
45916 int16x8_t __ret; \
45917 __ret = vqaddq_s16(__s0, vqrdmulhq_s16(__s1, __builtin_shufflevector(__s2, __s2, __p3, __p3, __p3, __p3, __p3, __p3, __p3, __p3))); \
45918 __ret; \
45919 })
45920 #else
45921 #define vqrdmlahq_laneq_s16(__p0, __p1, __p2, __p3) __extension__ ({ \
45922 int16x8_t __s0 = __p0; \
45923 int16x8_t __s1 = __p1; \
45924 int16x8_t __s2 = __p2; \
45925 int16x8_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 7, 6, 5, 4, 3, 2, 1, 0); \
45926 int16x8_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 7, 6, 5, 4, 3, 2, 1, 0); \
45927 int16x8_t __rev2; __rev2 = __builtin_shufflevector(__s2, __s2, 7, 6, 5, 4, 3, 2, 1, 0); \
45928 int16x8_t __ret; \
45929 __ret = __noswap_vqaddq_s16(__rev0, __noswap_vqrdmulhq_s16(__rev1, __builtin_shufflevector(__rev2, __rev2, __p3, __p3, __p3, __p3, __p3, __p3, __p3, __p3))); \
45930 __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); \
45931 __ret; \
45932 })
45933 #endif
45934
45935 #ifdef __LITTLE_ENDIAN__
45936 #define vqrdmlah_laneq_s32(__p0, __p1, __p2, __p3) __extension__ ({ \
45937 int32x2_t __s0 = __p0; \
45938 int32x2_t __s1 = __p1; \
45939 int32x4_t __s2 = __p2; \
45940 int32x2_t __ret; \
45941 __ret = vqadd_s32(__s0, vqrdmulh_s32(__s1, __builtin_shufflevector(__s2, __s2, __p3, __p3))); \
45942 __ret; \
45943 })
45944 #else
45945 #define vqrdmlah_laneq_s32(__p0, __p1, __p2, __p3) __extension__ ({ \
45946 int32x2_t __s0 = __p0; \
45947 int32x2_t __s1 = __p1; \
45948 int32x4_t __s2 = __p2; \
45949 int32x2_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 1, 0); \
45950 int32x2_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 1, 0); \
45951 int32x4_t __rev2; __rev2 = __builtin_shufflevector(__s2, __s2, 3, 2, 1, 0); \
45952 int32x2_t __ret; \
45953 __ret = __noswap_vqadd_s32(__rev0, __noswap_vqrdmulh_s32(__rev1, __builtin_shufflevector(__rev2, __rev2, __p3, __p3))); \
45954 __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \
45955 __ret; \
45956 })
45957 #endif
45958
45959 #ifdef __LITTLE_ENDIAN__
45960 #define vqrdmlah_laneq_s16(__p0, __p1, __p2, __p3) __extension__ ({ \
45961 int16x4_t __s0 = __p0; \
45962 int16x4_t __s1 = __p1; \
45963 int16x8_t __s2 = __p2; \
45964 int16x4_t __ret; \
45965 __ret = vqadd_s16(__s0, vqrdmulh_s16(__s1, __builtin_shufflevector(__s2, __s2, __p3, __p3, __p3, __p3))); \
45966 __ret; \
45967 })
45968 #else
45969 #define vqrdmlah_laneq_s16(__p0, __p1, __p2, __p3) __extension__ ({ \
45970 int16x4_t __s0 = __p0; \
45971 int16x4_t __s1 = __p1; \
45972 int16x8_t __s2 = __p2; \
45973 int16x4_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \
45974 int16x4_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 3, 2, 1, 0); \
45975 int16x8_t __rev2; __rev2 = __builtin_shufflevector(__s2, __s2, 7, 6, 5, 4, 3, 2, 1, 0); \
45976 int16x4_t __ret; \
45977 __ret = __noswap_vqadd_s16(__rev0, __noswap_vqrdmulh_s16(__rev1, __builtin_shufflevector(__rev2, __rev2, __p3, __p3, __p3, __p3))); \
45978 __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \
45979 __ret; \
45980 })
45981 #endif
45982
45983 #ifdef __LITTLE_ENDIAN__
45984 #define vqrdmlshq_laneq_s32(__p0, __p1, __p2, __p3) __extension__ ({ \
45985 int32x4_t __s0 = __p0; \
45986 int32x4_t __s1 = __p1; \
45987 int32x4_t __s2 = __p2; \
45988 int32x4_t __ret; \
45989 __ret = vqsubq_s32(__s0, vqrdmulhq_s32(__s1, __builtin_shufflevector(__s2, __s2, __p3, __p3, __p3, __p3))); \
45990 __ret; \
45991 })
45992 #else
45993 #define vqrdmlshq_laneq_s32(__p0, __p1, __p2, __p3) __extension__ ({ \
45994 int32x4_t __s0 = __p0; \
45995 int32x4_t __s1 = __p1; \
45996 int32x4_t __s2 = __p2; \
45997 int32x4_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \
45998 int32x4_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 3, 2, 1, 0); \
45999 int32x4_t __rev2; __rev2 = __builtin_shufflevector(__s2, __s2, 3, 2, 1, 0); \
46000 int32x4_t __ret; \
46001 __ret = __noswap_vqsubq_s32(__rev0, __noswap_vqrdmulhq_s32(__rev1, __builtin_shufflevector(__rev2, __rev2, __p3, __p3, __p3, __p3))); \
46002 __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \
46003 __ret; \
46004 })
46005 #endif
46006
46007 #ifdef __LITTLE_ENDIAN__
46008 #define vqrdmlshq_laneq_s16(__p0, __p1, __p2, __p3) __extension__ ({ \
46009 int16x8_t __s0 = __p0; \
46010 int16x8_t __s1 = __p1; \
46011 int16x8_t __s2 = __p2; \
46012 int16x8_t __ret; \
46013 __ret = vqsubq_s16(__s0, vqrdmulhq_s16(__s1, __builtin_shufflevector(__s2, __s2, __p3, __p3, __p3, __p3, __p3, __p3, __p3, __p3))); \
46014 __ret; \
46015 })
46016 #else
46017 #define vqrdmlshq_laneq_s16(__p0, __p1, __p2, __p3) __extension__ ({ \
46018 int16x8_t __s0 = __p0; \
46019 int16x8_t __s1 = __p1; \
46020 int16x8_t __s2 = __p2; \
46021 int16x8_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 7, 6, 5, 4, 3, 2, 1, 0); \
46022 int16x8_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 7, 6, 5, 4, 3, 2, 1, 0); \
46023 int16x8_t __rev2; __rev2 = __builtin_shufflevector(__s2, __s2, 7, 6, 5, 4, 3, 2, 1, 0); \
46024 int16x8_t __ret; \
46025 __ret = __noswap_vqsubq_s16(__rev0, __noswap_vqrdmulhq_s16(__rev1, __builtin_shufflevector(__rev2, __rev2, __p3, __p3, __p3, __p3, __p3, __p3, __p3, __p3))); \
46026 __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); \
46027 __ret; \
46028 })
46029 #endif
46030
46031 #ifdef __LITTLE_ENDIAN__
46032 #define vqrdmlsh_laneq_s32(__p0, __p1, __p2, __p3) __extension__ ({ \
46033 int32x2_t __s0 = __p0; \
46034 int32x2_t __s1 = __p1; \
46035 int32x4_t __s2 = __p2; \
46036 int32x2_t __ret; \
46037 __ret = vqsub_s32(__s0, vqrdmulh_s32(__s1, __builtin_shufflevector(__s2, __s2, __p3, __p3))); \
46038 __ret; \
46039 })
46040 #else
46041 #define vqrdmlsh_laneq_s32(__p0, __p1, __p2, __p3) __extension__ ({ \
46042 int32x2_t __s0 = __p0; \
46043 int32x2_t __s1 = __p1; \
46044 int32x4_t __s2 = __p2; \
46045 int32x2_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 1, 0); \
46046 int32x2_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 1, 0); \
46047 int32x4_t __rev2; __rev2 = __builtin_shufflevector(__s2, __s2, 3, 2, 1, 0); \
46048 int32x2_t __ret; \
46049 __ret = __noswap_vqsub_s32(__rev0, __noswap_vqrdmulh_s32(__rev1, __builtin_shufflevector(__rev2, __rev2, __p3, __p3))); \
46050 __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \
46051 __ret; \
46052 })
46053 #endif
46054
46055 #ifdef __LITTLE_ENDIAN__
46056 #define vqrdmlsh_laneq_s16(__p0, __p1, __p2, __p3) __extension__ ({ \
46057 int16x4_t __s0 = __p0; \
46058 int16x4_t __s1 = __p1; \
46059 int16x8_t __s2 = __p2; \
46060 int16x4_t __ret; \
46061 __ret = vqsub_s16(__s0, vqrdmulh_s16(__s1, __builtin_shufflevector(__s2, __s2, __p3, __p3, __p3, __p3))); \
46062 __ret; \
46063 })
46064 #else
46065 #define vqrdmlsh_laneq_s16(__p0, __p1, __p2, __p3) __extension__ ({ \
46066 int16x4_t __s0 = __p0; \
46067 int16x4_t __s1 = __p1; \
46068 int16x8_t __s2 = __p2; \
46069 int16x4_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \
46070 int16x4_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 3, 2, 1, 0); \
46071 int16x8_t __rev2; __rev2 = __builtin_shufflevector(__s2, __s2, 7, 6, 5, 4, 3, 2, 1, 0); \
46072 int16x4_t __ret; \
46073 __ret = __noswap_vqsub_s16(__rev0, __noswap_vqrdmulh_s16(__rev1, __builtin_shufflevector(__rev2, __rev2, __p3, __p3, __p3, __p3))); \
46074 __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \
46075 __ret; \
46076 })
46077 #endif
46078
46079 #endif
46080 #if defined(__aarch64__)
46081 #ifdef __LITTLE_ENDIAN__
46082 __ai float64x2_t vabdq_f64(float64x2_t __p0, float64x2_t __p1) {
46083 float64x2_t __ret;
46084 __ret = (float64x2_t) __builtin_neon_vabdq_v((int8x16_t)__p0, (int8x16_t)__p1, 42);
46085 return __ret;
46086 }
46087 #else
46088 __ai float64x2_t vabdq_f64(float64x2_t __p0, float64x2_t __p1) {
46089 float64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
46090 float64x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
46091 float64x2_t __ret;
46092 __ret = (float64x2_t) __builtin_neon_vabdq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 42);
46093 __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
46094 return __ret;
46095 }
46096 #endif
46097
46098 #ifdef __LITTLE_ENDIAN__
46099 __ai float64x1_t vabd_f64(float64x1_t __p0, float64x1_t __p1) {
46100 float64x1_t __ret;
46101 __ret = (float64x1_t) __builtin_neon_vabd_v((int8x8_t)__p0, (int8x8_t)__p1, 10);
46102 return __ret;
46103 }
46104 #else
46105 __ai float64x1_t vabd_f64(float64x1_t __p0, float64x1_t __p1) {
46106 float64x1_t __ret;
46107 __ret = (float64x1_t) __builtin_neon_vabd_v((int8x8_t)__p0, (int8x8_t)__p1, 10);
46108 return __ret;
46109 }
46110 #endif
46111
46112 #ifdef __LITTLE_ENDIAN__
46113 __ai float64_t vabdd_f64(float64_t __p0, float64_t __p1) {
46114 float64_t __ret;
46115 __ret = (float64_t) __builtin_neon_vabdd_f64(__p0, __p1);
46116 return __ret;
46117 }
46118 #else
46119 __ai float64_t vabdd_f64(float64_t __p0, float64_t __p1) {
46120 float64_t __ret;
46121 __ret = (float64_t) __builtin_neon_vabdd_f64(__p0, __p1);
46122 return __ret;
46123 }
46124 #endif
46125
46126 #ifdef __LITTLE_ENDIAN__
46127 __ai float32_t vabds_f32(float32_t __p0, float32_t __p1) {
46128 float32_t __ret;
46129 __ret = (float32_t) __builtin_neon_vabds_f32(__p0, __p1);
46130 return __ret;
46131 }
46132 #else
46133 __ai float32_t vabds_f32(float32_t __p0, float32_t __p1) {
46134 float32_t __ret;
46135 __ret = (float32_t) __builtin_neon_vabds_f32(__p0, __p1);
46136 return __ret;
46137 }
46138 #endif
46139
46140 #ifdef __LITTLE_ENDIAN__
46141 __ai float64x2_t vabsq_f64(float64x2_t __p0) {
46142 float64x2_t __ret;
46143 __ret = (float64x2_t) __builtin_neon_vabsq_v((int8x16_t)__p0, 42);
46144 return __ret;
46145 }
46146 #else
46147 __ai float64x2_t vabsq_f64(float64x2_t __p0) {
46148 float64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
46149 float64x2_t __ret;
46150 __ret = (float64x2_t) __builtin_neon_vabsq_v((int8x16_t)__rev0, 42);
46151 __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
46152 return __ret;
46153 }
46154 #endif
46155
46156 #ifdef __LITTLE_ENDIAN__
46157 __ai int64x2_t vabsq_s64(int64x2_t __p0) {
46158 int64x2_t __ret;
46159 __ret = (int64x2_t) __builtin_neon_vabsq_v((int8x16_t)__p0, 35);
46160 return __ret;
46161 }
46162 #else
46163 __ai int64x2_t vabsq_s64(int64x2_t __p0) {
46164 int64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
46165 int64x2_t __ret;
46166 __ret = (int64x2_t) __builtin_neon_vabsq_v((int8x16_t)__rev0, 35);
46167 __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
46168 return __ret;
46169 }
46170 #endif
46171
46172 #ifdef __LITTLE_ENDIAN__
46173 __ai float64x1_t vabs_f64(float64x1_t __p0) {
46174 float64x1_t __ret;
46175 __ret = (float64x1_t) __builtin_neon_vabs_v((int8x8_t)__p0, 10);
46176 return __ret;
46177 }
46178 #else
46179 __ai float64x1_t vabs_f64(float64x1_t __p0) {
46180 float64x1_t __ret;
46181 __ret = (float64x1_t) __builtin_neon_vabs_v((int8x8_t)__p0, 10);
46182 return __ret;
46183 }
46184 #endif
46185
46186 #ifdef __LITTLE_ENDIAN__
46187 __ai int64x1_t vabs_s64(int64x1_t __p0) {
46188 int64x1_t __ret;
46189 __ret = (int64x1_t) __builtin_neon_vabs_v((int8x8_t)__p0, 3);
46190 return __ret;
46191 }
46192 #else
46193 __ai int64x1_t vabs_s64(int64x1_t __p0) {
46194 int64x1_t __ret;
46195 __ret = (int64x1_t) __builtin_neon_vabs_v((int8x8_t)__p0, 3);
46196 return __ret;
46197 }
46198 #endif
46199
46200 #ifdef __LITTLE_ENDIAN__
46201 __ai int64_t vabsd_s64(int64_t __p0) {
46202 int64_t __ret;
46203 __ret = (int64_t) __builtin_neon_vabsd_s64(__p0);
46204 return __ret;
46205 }
46206 #else
46207 __ai int64_t vabsd_s64(int64_t __p0) {
46208 int64_t __ret;
46209 __ret = (int64_t) __builtin_neon_vabsd_s64(__p0);
46210 return __ret;
46211 }
46212 #endif
46213
46214 #ifdef __LITTLE_ENDIAN__
46215 __ai float64x2_t vaddq_f64(float64x2_t __p0, float64x2_t __p1) {
46216 float64x2_t __ret;
46217 __ret = __p0 + __p1;
46218 return __ret;
46219 }
46220 #else
46221 __ai float64x2_t vaddq_f64(float64x2_t __p0, float64x2_t __p1) {
46222 float64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
46223 float64x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
46224 float64x2_t __ret;
46225 __ret = __rev0 + __rev1;
46226 __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
46227 return __ret;
46228 }
46229 #endif
46230
46231 #ifdef __LITTLE_ENDIAN__
46232 __ai float64x1_t vadd_f64(float64x1_t __p0, float64x1_t __p1) {
46233 float64x1_t __ret;
46234 __ret = __p0 + __p1;
46235 return __ret;
46236 }
46237 #else
46238 __ai float64x1_t vadd_f64(float64x1_t __p0, float64x1_t __p1) {
46239 float64x1_t __ret;
46240 __ret = __p0 + __p1;
46241 return __ret;
46242 }
46243 #endif
46244
46245 #ifdef __LITTLE_ENDIAN__
46246 __ai uint64_t vaddd_u64(uint64_t __p0, uint64_t __p1) {
46247 uint64_t __ret;
46248 __ret = (uint64_t) __builtin_neon_vaddd_u64(__p0, __p1);
46249 return __ret;
46250 }
46251 #else
46252 __ai uint64_t vaddd_u64(uint64_t __p0, uint64_t __p1) {
46253 uint64_t __ret;
46254 __ret = (uint64_t) __builtin_neon_vaddd_u64(__p0, __p1);
46255 return __ret;
46256 }
46257 #endif
46258
46259 #ifdef __LITTLE_ENDIAN__
46260 __ai int64_t vaddd_s64(int64_t __p0, int64_t __p1) {
46261 int64_t __ret;
46262 __ret = (int64_t) __builtin_neon_vaddd_s64(__p0, __p1);
46263 return __ret;
46264 }
46265 #else
46266 __ai int64_t vaddd_s64(int64_t __p0, int64_t __p1) {
46267 int64_t __ret;
46268 __ret = (int64_t) __builtin_neon_vaddd_s64(__p0, __p1);
46269 return __ret;
46270 }
46271 #endif
46272
46273 #ifdef __LITTLE_ENDIAN__
46274 __ai uint16x8_t vaddhn_high_u32(uint16x4_t __p0, uint32x4_t __p1, uint32x4_t __p2) {
46275 uint16x8_t __ret;
46276 __ret = vcombine_u16(__p0, vaddhn_u32(__p1, __p2));
46277 return __ret;
46278 }
46279 #else
46280 __ai uint16x8_t vaddhn_high_u32(uint16x4_t __p0, uint32x4_t __p1, uint32x4_t __p2) {
46281 uint16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
46282 uint32x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
46283 uint32x4_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, 3, 2, 1, 0);
46284 uint16x8_t __ret;
46285 __ret = __noswap_vcombine_u16(__rev0, __noswap_vaddhn_u32(__rev1, __rev2));
46286 __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
46287 return __ret;
46288 }
46289 #endif
46290
46291 #ifdef __LITTLE_ENDIAN__
46292 __ai uint32x4_t vaddhn_high_u64(uint32x2_t __p0, uint64x2_t __p1, uint64x2_t __p2) {
46293 uint32x4_t __ret;
46294 __ret = vcombine_u32(__p0, vaddhn_u64(__p1, __p2));
46295 return __ret;
46296 }
46297 #else
46298 __ai uint32x4_t vaddhn_high_u64(uint32x2_t __p0, uint64x2_t __p1, uint64x2_t __p2) {
46299 uint32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
46300 uint64x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
46301 uint64x2_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, 1, 0);
46302 uint32x4_t __ret;
46303 __ret = __noswap_vcombine_u32(__rev0, __noswap_vaddhn_u64(__rev1, __rev2));
46304 __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
46305 return __ret;
46306 }
46307 #endif
46308
46309 #ifdef __LITTLE_ENDIAN__
46310 __ai uint8x16_t vaddhn_high_u16(uint8x8_t __p0, uint16x8_t __p1, uint16x8_t __p2) {
46311 uint8x16_t __ret;
46312 __ret = vcombine_u8(__p0, vaddhn_u16(__p1, __p2));
46313 return __ret;
46314 }
46315 #else
46316 __ai uint8x16_t vaddhn_high_u16(uint8x8_t __p0, uint16x8_t __p1, uint16x8_t __p2) {
46317 uint8x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
46318 uint16x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
46319 uint16x8_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, 7, 6, 5, 4, 3, 2, 1, 0);
46320 uint8x16_t __ret;
46321 __ret = __noswap_vcombine_u8(__rev0, __noswap_vaddhn_u16(__rev1, __rev2));
46322 __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
46323 return __ret;
46324 }
46325 #endif
46326
46327 #ifdef __LITTLE_ENDIAN__
46328 __ai int16x8_t vaddhn_high_s32(int16x4_t __p0, int32x4_t __p1, int32x4_t __p2) {
46329 int16x8_t __ret;
46330 __ret = vcombine_s16(__p0, vaddhn_s32(__p1, __p2));
46331 return __ret;
46332 }
46333 #else
46334 __ai int16x8_t vaddhn_high_s32(int16x4_t __p0, int32x4_t __p1, int32x4_t __p2) {
46335 int16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
46336 int32x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
46337 int32x4_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, 3, 2, 1, 0);
46338 int16x8_t __ret;
46339 __ret = __noswap_vcombine_s16(__rev0, __noswap_vaddhn_s32(__rev1, __rev2));
46340 __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
46341 return __ret;
46342 }
46343 #endif
46344
46345 #ifdef __LITTLE_ENDIAN__
46346 __ai int32x4_t vaddhn_high_s64(int32x2_t __p0, int64x2_t __p1, int64x2_t __p2) {
46347 int32x4_t __ret;
46348 __ret = vcombine_s32(__p0, vaddhn_s64(__p1, __p2));
46349 return __ret;
46350 }
46351 #else
46352 __ai int32x4_t vaddhn_high_s64(int32x2_t __p0, int64x2_t __p1, int64x2_t __p2) {
46353 int32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
46354 int64x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
46355 int64x2_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, 1, 0);
46356 int32x4_t __ret;
46357 __ret = __noswap_vcombine_s32(__rev0, __noswap_vaddhn_s64(__rev1, __rev2));
46358 __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
46359 return __ret;
46360 }
46361 #endif
46362
46363 #ifdef __LITTLE_ENDIAN__
46364 __ai int8x16_t vaddhn_high_s16(int8x8_t __p0, int16x8_t __p1, int16x8_t __p2) {
46365 int8x16_t __ret;
46366 __ret = vcombine_s8(__p0, vaddhn_s16(__p1, __p2));
46367 return __ret;
46368 }
46369 #else
46370 __ai int8x16_t vaddhn_high_s16(int8x8_t __p0, int16x8_t __p1, int16x8_t __p2) {
46371 int8x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
46372 int16x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
46373 int16x8_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, 7, 6, 5, 4, 3, 2, 1, 0);
46374 int8x16_t __ret;
46375 __ret = __noswap_vcombine_s8(__rev0, __noswap_vaddhn_s16(__rev1, __rev2));
46376 __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
46377 return __ret;
46378 }
46379 #endif
46380
46381 #ifdef __LITTLE_ENDIAN__
46382 __ai uint16_t vaddlvq_u8(uint8x16_t __p0) {
46383 uint16_t __ret;
46384 __ret = (uint16_t) __builtin_neon_vaddlvq_u8((int8x16_t)__p0);
46385 return __ret;
46386 }
46387 #else
46388 __ai uint16_t vaddlvq_u8(uint8x16_t __p0) {
46389 uint8x16_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
46390 uint16_t __ret;
46391 __ret = (uint16_t) __builtin_neon_vaddlvq_u8((int8x16_t)__rev0);
46392 return __ret;
46393 }
46394 #endif
46395
46396 #ifdef __LITTLE_ENDIAN__
46397 __ai uint64_t vaddlvq_u32(uint32x4_t __p0) {
46398 uint64_t __ret;
46399 __ret = (uint64_t) __builtin_neon_vaddlvq_u32((int8x16_t)__p0);
46400 return __ret;
46401 }
46402 #else
46403 __ai uint64_t vaddlvq_u32(uint32x4_t __p0) {
46404 uint32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
46405 uint64_t __ret;
46406 __ret = (uint64_t) __builtin_neon_vaddlvq_u32((int8x16_t)__rev0);
46407 return __ret;
46408 }
46409 #endif
46410
46411 #ifdef __LITTLE_ENDIAN__
46412 __ai uint32_t vaddlvq_u16(uint16x8_t __p0) {
46413 uint32_t __ret;
46414 __ret = (uint32_t) __builtin_neon_vaddlvq_u16((int8x16_t)__p0);
46415 return __ret;
46416 }
46417 #else
46418 __ai uint32_t vaddlvq_u16(uint16x8_t __p0) {
46419 uint16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
46420 uint32_t __ret;
46421 __ret = (uint32_t) __builtin_neon_vaddlvq_u16((int8x16_t)__rev0);
46422 return __ret;
46423 }
46424 #endif
46425
46426 #ifdef __LITTLE_ENDIAN__
46427 __ai int16_t vaddlvq_s8(int8x16_t __p0) {
46428 int16_t __ret;
46429 __ret = (int16_t) __builtin_neon_vaddlvq_s8((int8x16_t)__p0);
46430 return __ret;
46431 }
46432 #else
46433 __ai int16_t vaddlvq_s8(int8x16_t __p0) {
46434 int8x16_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
46435 int16_t __ret;
46436 __ret = (int16_t) __builtin_neon_vaddlvq_s8((int8x16_t)__rev0);
46437 return __ret;
46438 }
46439 #endif
46440
46441 #ifdef __LITTLE_ENDIAN__
46442 __ai int64_t vaddlvq_s32(int32x4_t __p0) {
46443 int64_t __ret;
46444 __ret = (int64_t) __builtin_neon_vaddlvq_s32((int8x16_t)__p0);
46445 return __ret;
46446 }
46447 #else
46448 __ai int64_t vaddlvq_s32(int32x4_t __p0) {
46449 int32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
46450 int64_t __ret;
46451 __ret = (int64_t) __builtin_neon_vaddlvq_s32((int8x16_t)__rev0);
46452 return __ret;
46453 }
46454 #endif
46455
46456 #ifdef __LITTLE_ENDIAN__
46457 __ai int32_t vaddlvq_s16(int16x8_t __p0) {
46458 int32_t __ret;
46459 __ret = (int32_t) __builtin_neon_vaddlvq_s16((int8x16_t)__p0);
46460 return __ret;
46461 }
46462 #else
46463 __ai int32_t vaddlvq_s16(int16x8_t __p0) {
46464 int16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
46465 int32_t __ret;
46466 __ret = (int32_t) __builtin_neon_vaddlvq_s16((int8x16_t)__rev0);
46467 return __ret;
46468 }
46469 #endif
46470
46471 #ifdef __LITTLE_ENDIAN__
46472 __ai uint16_t vaddlv_u8(uint8x8_t __p0) {
46473 uint16_t __ret;
46474 __ret = (uint16_t) __builtin_neon_vaddlv_u8((int8x8_t)__p0);
46475 return __ret;
46476 }
46477 #else
46478 __ai uint16_t vaddlv_u8(uint8x8_t __p0) {
46479 uint8x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
46480 uint16_t __ret;
46481 __ret = (uint16_t) __builtin_neon_vaddlv_u8((int8x8_t)__rev0);
46482 return __ret;
46483 }
46484 #endif
46485
46486 #ifdef __LITTLE_ENDIAN__
46487 __ai uint64_t vaddlv_u32(uint32x2_t __p0) {
46488 uint64_t __ret;
46489 __ret = (uint64_t) __builtin_neon_vaddlv_u32((int8x8_t)__p0);
46490 return __ret;
46491 }
46492 #else
46493 __ai uint64_t vaddlv_u32(uint32x2_t __p0) {
46494 uint32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
46495 uint64_t __ret;
46496 __ret = (uint64_t) __builtin_neon_vaddlv_u32((int8x8_t)__rev0);
46497 return __ret;
46498 }
46499 #endif
46500
46501 #ifdef __LITTLE_ENDIAN__
46502 __ai uint32_t vaddlv_u16(uint16x4_t __p0) {
46503 uint32_t __ret;
46504 __ret = (uint32_t) __builtin_neon_vaddlv_u16((int8x8_t)__p0);
46505 return __ret;
46506 }
46507 #else
46508 __ai uint32_t vaddlv_u16(uint16x4_t __p0) {
46509 uint16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
46510 uint32_t __ret;
46511 __ret = (uint32_t) __builtin_neon_vaddlv_u16((int8x8_t)__rev0);
46512 return __ret;
46513 }
46514 #endif
46515
46516 #ifdef __LITTLE_ENDIAN__
46517 __ai int16_t vaddlv_s8(int8x8_t __p0) {
46518 int16_t __ret;
46519 __ret = (int16_t) __builtin_neon_vaddlv_s8((int8x8_t)__p0);
46520 return __ret;
46521 }
46522 #else
46523 __ai int16_t vaddlv_s8(int8x8_t __p0) {
46524 int8x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
46525 int16_t __ret;
46526 __ret = (int16_t) __builtin_neon_vaddlv_s8((int8x8_t)__rev0);
46527 return __ret;
46528 }
46529 #endif
46530
46531 #ifdef __LITTLE_ENDIAN__
46532 __ai int64_t vaddlv_s32(int32x2_t __p0) {
46533 int64_t __ret;
46534 __ret = (int64_t) __builtin_neon_vaddlv_s32((int8x8_t)__p0);
46535 return __ret;
46536 }
46537 #else
46538 __ai int64_t vaddlv_s32(int32x2_t __p0) {
46539 int32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
46540 int64_t __ret;
46541 __ret = (int64_t) __builtin_neon_vaddlv_s32((int8x8_t)__rev0);
46542 return __ret;
46543 }
46544 #endif
46545
46546 #ifdef __LITTLE_ENDIAN__
46547 __ai int32_t vaddlv_s16(int16x4_t __p0) {
46548 int32_t __ret;
46549 __ret = (int32_t) __builtin_neon_vaddlv_s16((int8x8_t)__p0);
46550 return __ret;
46551 }
46552 #else
46553 __ai int32_t vaddlv_s16(int16x4_t __p0) {
46554 int16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
46555 int32_t __ret;
46556 __ret = (int32_t) __builtin_neon_vaddlv_s16((int8x8_t)__rev0);
46557 return __ret;
46558 }
46559 #endif
46560
46561 #ifdef __LITTLE_ENDIAN__
46562 __ai uint8_t vaddvq_u8(uint8x16_t __p0) {
46563 uint8_t __ret;
46564 __ret = (uint8_t) __builtin_neon_vaddvq_u8((int8x16_t)__p0);
46565 return __ret;
46566 }
46567 #else
46568 __ai uint8_t vaddvq_u8(uint8x16_t __p0) {
46569 uint8x16_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
46570 uint8_t __ret;
46571 __ret = (uint8_t) __builtin_neon_vaddvq_u8((int8x16_t)__rev0);
46572 return __ret;
46573 }
46574 #endif
46575
46576 #ifdef __LITTLE_ENDIAN__
46577 __ai uint32_t vaddvq_u32(uint32x4_t __p0) {
46578 uint32_t __ret;
46579 __ret = (uint32_t) __builtin_neon_vaddvq_u32((int8x16_t)__p0);
46580 return __ret;
46581 }
46582 #else
46583 __ai uint32_t vaddvq_u32(uint32x4_t __p0) {
46584 uint32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
46585 uint32_t __ret;
46586 __ret = (uint32_t) __builtin_neon_vaddvq_u32((int8x16_t)__rev0);
46587 return __ret;
46588 }
46589 #endif
46590
46591 #ifdef __LITTLE_ENDIAN__
46592 __ai uint64_t vaddvq_u64(uint64x2_t __p0) {
46593 uint64_t __ret;
46594 __ret = (uint64_t) __builtin_neon_vaddvq_u64((int8x16_t)__p0);
46595 return __ret;
46596 }
46597 #else
46598 __ai uint64_t vaddvq_u64(uint64x2_t __p0) {
46599 uint64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
46600 uint64_t __ret;
46601 __ret = (uint64_t) __builtin_neon_vaddvq_u64((int8x16_t)__rev0);
46602 return __ret;
46603 }
46604 #endif
46605
46606 #ifdef __LITTLE_ENDIAN__
46607 __ai uint16_t vaddvq_u16(uint16x8_t __p0) {
46608 uint16_t __ret;
46609 __ret = (uint16_t) __builtin_neon_vaddvq_u16((int8x16_t)__p0);
46610 return __ret;
46611 }
46612 #else
46613 __ai uint16_t vaddvq_u16(uint16x8_t __p0) {
46614 uint16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
46615 uint16_t __ret;
46616 __ret = (uint16_t) __builtin_neon_vaddvq_u16((int8x16_t)__rev0);
46617 return __ret;
46618 }
46619 #endif
46620
46621 #ifdef __LITTLE_ENDIAN__
46622 __ai int8_t vaddvq_s8(int8x16_t __p0) {
46623 int8_t __ret;
46624 __ret = (int8_t) __builtin_neon_vaddvq_s8((int8x16_t)__p0);
46625 return __ret;
46626 }
46627 #else
46628 __ai int8_t vaddvq_s8(int8x16_t __p0) {
46629 int8x16_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
46630 int8_t __ret;
46631 __ret = (int8_t) __builtin_neon_vaddvq_s8((int8x16_t)__rev0);
46632 return __ret;
46633 }
46634 #endif
46635
46636 #ifdef __LITTLE_ENDIAN__
46637 __ai float64_t vaddvq_f64(float64x2_t __p0) {
46638 float64_t __ret;
46639 __ret = (float64_t) __builtin_neon_vaddvq_f64((int8x16_t)__p0);
46640 return __ret;
46641 }
46642 #else
46643 __ai float64_t vaddvq_f64(float64x2_t __p0) {
46644 float64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
46645 float64_t __ret;
46646 __ret = (float64_t) __builtin_neon_vaddvq_f64((int8x16_t)__rev0);
46647 return __ret;
46648 }
46649 #endif
46650
46651 #ifdef __LITTLE_ENDIAN__
46652 __ai float32_t vaddvq_f32(float32x4_t __p0) {
46653 float32_t __ret;
46654 __ret = (float32_t) __builtin_neon_vaddvq_f32((int8x16_t)__p0);
46655 return __ret;
46656 }
46657 #else
46658 __ai float32_t vaddvq_f32(float32x4_t __p0) {
46659 float32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
46660 float32_t __ret;
46661 __ret = (float32_t) __builtin_neon_vaddvq_f32((int8x16_t)__rev0);
46662 return __ret;
46663 }
46664 #endif
46665
46666 #ifdef __LITTLE_ENDIAN__
46667 __ai int32_t vaddvq_s32(int32x4_t __p0) {
46668 int32_t __ret;
46669 __ret = (int32_t) __builtin_neon_vaddvq_s32((int8x16_t)__p0);
46670 return __ret;
46671 }
46672 #else
46673 __ai int32_t vaddvq_s32(int32x4_t __p0) {
46674 int32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
46675 int32_t __ret;
46676 __ret = (int32_t) __builtin_neon_vaddvq_s32((int8x16_t)__rev0);
46677 return __ret;
46678 }
46679 #endif
46680
46681 #ifdef __LITTLE_ENDIAN__
46682 __ai int64_t vaddvq_s64(int64x2_t __p0) {
46683 int64_t __ret;
46684 __ret = (int64_t) __builtin_neon_vaddvq_s64((int8x16_t)__p0);
46685 return __ret;
46686 }
46687 #else
46688 __ai int64_t vaddvq_s64(int64x2_t __p0) {
46689 int64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
46690 int64_t __ret;
46691 __ret = (int64_t) __builtin_neon_vaddvq_s64((int8x16_t)__rev0);
46692 return __ret;
46693 }
46694 #endif
46695
46696 #ifdef __LITTLE_ENDIAN__
46697 __ai int16_t vaddvq_s16(int16x8_t __p0) {
46698 int16_t __ret;
46699 __ret = (int16_t) __builtin_neon_vaddvq_s16((int8x16_t)__p0);
46700 return __ret;
46701 }
46702 #else
46703 __ai int16_t vaddvq_s16(int16x8_t __p0) {
46704 int16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
46705 int16_t __ret;
46706 __ret = (int16_t) __builtin_neon_vaddvq_s16((int8x16_t)__rev0);
46707 return __ret;
46708 }
46709 #endif
46710
46711 #ifdef __LITTLE_ENDIAN__
46712 __ai uint8_t vaddv_u8(uint8x8_t __p0) {
46713 uint8_t __ret;
46714 __ret = (uint8_t) __builtin_neon_vaddv_u8((int8x8_t)__p0);
46715 return __ret;
46716 }
46717 #else
46718 __ai uint8_t vaddv_u8(uint8x8_t __p0) {
46719 uint8x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
46720 uint8_t __ret;
46721 __ret = (uint8_t) __builtin_neon_vaddv_u8((int8x8_t)__rev0);
46722 return __ret;
46723 }
46724 #endif
46725
46726 #ifdef __LITTLE_ENDIAN__
46727 __ai uint32_t vaddv_u32(uint32x2_t __p0) {
46728 uint32_t __ret;
46729 __ret = (uint32_t) __builtin_neon_vaddv_u32((int8x8_t)__p0);
46730 return __ret;
46731 }
46732 #else
46733 __ai uint32_t vaddv_u32(uint32x2_t __p0) {
46734 uint32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
46735 uint32_t __ret;
46736 __ret = (uint32_t) __builtin_neon_vaddv_u32((int8x8_t)__rev0);
46737 return __ret;
46738 }
46739 #endif
46740
46741 #ifdef __LITTLE_ENDIAN__
46742 __ai uint16_t vaddv_u16(uint16x4_t __p0) {
46743 uint16_t __ret;
46744 __ret = (uint16_t) __builtin_neon_vaddv_u16((int8x8_t)__p0);
46745 return __ret;
46746 }
46747 #else
46748 __ai uint16_t vaddv_u16(uint16x4_t __p0) {
46749 uint16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
46750 uint16_t __ret;
46751 __ret = (uint16_t) __builtin_neon_vaddv_u16((int8x8_t)__rev0);
46752 return __ret;
46753 }
46754 #endif
46755
46756 #ifdef __LITTLE_ENDIAN__
46757 __ai int8_t vaddv_s8(int8x8_t __p0) {
46758 int8_t __ret;
46759 __ret = (int8_t) __builtin_neon_vaddv_s8((int8x8_t)__p0);
46760 return __ret;
46761 }
46762 #else
46763 __ai int8_t vaddv_s8(int8x8_t __p0) {
46764 int8x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
46765 int8_t __ret;
46766 __ret = (int8_t) __builtin_neon_vaddv_s8((int8x8_t)__rev0);
46767 return __ret;
46768 }
46769 #endif
46770
46771 #ifdef __LITTLE_ENDIAN__
46772 __ai float32_t vaddv_f32(float32x2_t __p0) {
46773 float32_t __ret;
46774 __ret = (float32_t) __builtin_neon_vaddv_f32((int8x8_t)__p0);
46775 return __ret;
46776 }
46777 #else
46778 __ai float32_t vaddv_f32(float32x2_t __p0) {
46779 float32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
46780 float32_t __ret;
46781 __ret = (float32_t) __builtin_neon_vaddv_f32((int8x8_t)__rev0);
46782 return __ret;
46783 }
46784 #endif
46785
46786 #ifdef __LITTLE_ENDIAN__
46787 __ai int32_t vaddv_s32(int32x2_t __p0) {
46788 int32_t __ret;
46789 __ret = (int32_t) __builtin_neon_vaddv_s32((int8x8_t)__p0);
46790 return __ret;
46791 }
46792 #else
46793 __ai int32_t vaddv_s32(int32x2_t __p0) {
46794 int32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
46795 int32_t __ret;
46796 __ret = (int32_t) __builtin_neon_vaddv_s32((int8x8_t)__rev0);
46797 return __ret;
46798 }
46799 #endif
46800
46801 #ifdef __LITTLE_ENDIAN__
46802 __ai int16_t vaddv_s16(int16x4_t __p0) {
46803 int16_t __ret;
46804 __ret = (int16_t) __builtin_neon_vaddv_s16((int8x8_t)__p0);
46805 return __ret;
46806 }
46807 #else
46808 __ai int16_t vaddv_s16(int16x4_t __p0) {
46809 int16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
46810 int16_t __ret;
46811 __ret = (int16_t) __builtin_neon_vaddv_s16((int8x8_t)__rev0);
46812 return __ret;
46813 }
46814 #endif
46815
46816 #ifdef __LITTLE_ENDIAN__
46817 __ai poly64x1_t vbsl_p64(uint64x1_t __p0, poly64x1_t __p1, poly64x1_t __p2) {
46818 poly64x1_t __ret;
46819 __ret = (poly64x1_t) __builtin_neon_vbsl_v((int8x8_t)__p0, (int8x8_t)__p1, (int8x8_t)__p2, 6);
46820 return __ret;
46821 }
46822 #else
46823 __ai poly64x1_t vbsl_p64(uint64x1_t __p0, poly64x1_t __p1, poly64x1_t __p2) {
46824 poly64x1_t __ret;
46825 __ret = (poly64x1_t) __builtin_neon_vbsl_v((int8x8_t)__p0, (int8x8_t)__p1, (int8x8_t)__p2, 6);
46826 return __ret;
46827 }
46828 #endif
46829
46830 #ifdef __LITTLE_ENDIAN__
46831 __ai poly64x2_t vbslq_p64(uint64x2_t __p0, poly64x2_t __p1, poly64x2_t __p2) {
46832 poly64x2_t __ret;
46833 __ret = (poly64x2_t) __builtin_neon_vbslq_v((int8x16_t)__p0, (int8x16_t)__p1, (int8x16_t)__p2, 38);
46834 return __ret;
46835 }
46836 #else
46837 __ai poly64x2_t vbslq_p64(uint64x2_t __p0, poly64x2_t __p1, poly64x2_t __p2) {
46838 uint64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
46839 poly64x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
46840 poly64x2_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, 1, 0);
46841 poly64x2_t __ret;
46842 __ret = (poly64x2_t) __builtin_neon_vbslq_v((int8x16_t)__rev0, (int8x16_t)__rev1, (int8x16_t)__rev2, 38);
46843 __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
46844 return __ret;
46845 }
46846 #endif
46847
46848 #ifdef __LITTLE_ENDIAN__
46849 __ai float64x2_t vbslq_f64(uint64x2_t __p0, float64x2_t __p1, float64x2_t __p2) {
46850 float64x2_t __ret;
46851 __ret = (float64x2_t) __builtin_neon_vbslq_v((int8x16_t)__p0, (int8x16_t)__p1, (int8x16_t)__p2, 42);
46852 return __ret;
46853 }
46854 #else
46855 __ai float64x2_t vbslq_f64(uint64x2_t __p0, float64x2_t __p1, float64x2_t __p2) {
46856 uint64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
46857 float64x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
46858 float64x2_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, 1, 0);
46859 float64x2_t __ret;
46860 __ret = (float64x2_t) __builtin_neon_vbslq_v((int8x16_t)__rev0, (int8x16_t)__rev1, (int8x16_t)__rev2, 42);
46861 __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
46862 return __ret;
46863 }
46864 #endif
46865
46866 #ifdef __LITTLE_ENDIAN__
46867 __ai float64x1_t vbsl_f64(uint64x1_t __p0, float64x1_t __p1, float64x1_t __p2) {
46868 float64x1_t __ret;
46869 __ret = (float64x1_t) __builtin_neon_vbsl_v((int8x8_t)__p0, (int8x8_t)__p1, (int8x8_t)__p2, 10);
46870 return __ret;
46871 }
46872 #else
46873 __ai float64x1_t vbsl_f64(uint64x1_t __p0, float64x1_t __p1, float64x1_t __p2) {
46874 float64x1_t __ret;
46875 __ret = (float64x1_t) __builtin_neon_vbsl_v((int8x8_t)__p0, (int8x8_t)__p1, (int8x8_t)__p2, 10);
46876 return __ret;
46877 }
46878 #endif
46879
46880 #ifdef __LITTLE_ENDIAN__
46881 __ai uint64x2_t vcageq_f64(float64x2_t __p0, float64x2_t __p1) {
46882 uint64x2_t __ret;
46883 __ret = (uint64x2_t) __builtin_neon_vcageq_v((int8x16_t)__p0, (int8x16_t)__p1, 51);
46884 return __ret;
46885 }
46886 #else
46887 __ai uint64x2_t vcageq_f64(float64x2_t __p0, float64x2_t __p1) {
46888 float64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
46889 float64x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
46890 uint64x2_t __ret;
46891 __ret = (uint64x2_t) __builtin_neon_vcageq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 51);
46892 __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
46893 return __ret;
46894 }
46895 #endif
46896
46897 #ifdef __LITTLE_ENDIAN__
46898 __ai uint64x1_t vcage_f64(float64x1_t __p0, float64x1_t __p1) {
46899 uint64x1_t __ret;
46900 __ret = (uint64x1_t) __builtin_neon_vcage_v((int8x8_t)__p0, (int8x8_t)__p1, 19);
46901 return __ret;
46902 }
46903 #else
46904 __ai uint64x1_t vcage_f64(float64x1_t __p0, float64x1_t __p1) {
46905 uint64x1_t __ret;
46906 __ret = (uint64x1_t) __builtin_neon_vcage_v((int8x8_t)__p0, (int8x8_t)__p1, 19);
46907 return __ret;
46908 }
46909 #endif
46910
46911 #ifdef __LITTLE_ENDIAN__
46912 __ai uint64_t vcaged_f64(float64_t __p0, float64_t __p1) {
46913 uint64_t __ret;
46914 __ret = (uint64_t) __builtin_neon_vcaged_f64(__p0, __p1);
46915 return __ret;
46916 }
46917 #else
46918 __ai uint64_t vcaged_f64(float64_t __p0, float64_t __p1) {
46919 uint64_t __ret;
46920 __ret = (uint64_t) __builtin_neon_vcaged_f64(__p0, __p1);
46921 return __ret;
46922 }
46923 #endif
46924
46925 #ifdef __LITTLE_ENDIAN__
46926 __ai uint32_t vcages_f32(float32_t __p0, float32_t __p1) {
46927 uint32_t __ret;
46928 __ret = (uint32_t) __builtin_neon_vcages_f32(__p0, __p1);
46929 return __ret;
46930 }
46931 #else
46932 __ai uint32_t vcages_f32(float32_t __p0, float32_t __p1) {
46933 uint32_t __ret;
46934 __ret = (uint32_t) __builtin_neon_vcages_f32(__p0, __p1);
46935 return __ret;
46936 }
46937 #endif
46938
46939 #ifdef __LITTLE_ENDIAN__
46940 __ai uint64x2_t vcagtq_f64(float64x2_t __p0, float64x2_t __p1) {
46941 uint64x2_t __ret;
46942 __ret = (uint64x2_t) __builtin_neon_vcagtq_v((int8x16_t)__p0, (int8x16_t)__p1, 51);
46943 return __ret;
46944 }
46945 #else
46946 __ai uint64x2_t vcagtq_f64(float64x2_t __p0, float64x2_t __p1) {
46947 float64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
46948 float64x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
46949 uint64x2_t __ret;
46950 __ret = (uint64x2_t) __builtin_neon_vcagtq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 51);
46951 __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
46952 return __ret;
46953 }
46954 #endif
46955
46956 #ifdef __LITTLE_ENDIAN__
46957 __ai uint64x1_t vcagt_f64(float64x1_t __p0, float64x1_t __p1) {
46958 uint64x1_t __ret;
46959 __ret = (uint64x1_t) __builtin_neon_vcagt_v((int8x8_t)__p0, (int8x8_t)__p1, 19);
46960 return __ret;
46961 }
46962 #else
46963 __ai uint64x1_t vcagt_f64(float64x1_t __p0, float64x1_t __p1) {
46964 uint64x1_t __ret;
46965 __ret = (uint64x1_t) __builtin_neon_vcagt_v((int8x8_t)__p0, (int8x8_t)__p1, 19);
46966 return __ret;
46967 }
46968 #endif
46969
46970 #ifdef __LITTLE_ENDIAN__
46971 __ai uint64_t vcagtd_f64(float64_t __p0, float64_t __p1) {
46972 uint64_t __ret;
46973 __ret = (uint64_t) __builtin_neon_vcagtd_f64(__p0, __p1);
46974 return __ret;
46975 }
46976 #else
46977 __ai uint64_t vcagtd_f64(float64_t __p0, float64_t __p1) {
46978 uint64_t __ret;
46979 __ret = (uint64_t) __builtin_neon_vcagtd_f64(__p0, __p1);
46980 return __ret;
46981 }
46982 #endif
46983
46984 #ifdef __LITTLE_ENDIAN__
46985 __ai uint32_t vcagts_f32(float32_t __p0, float32_t __p1) {
46986 uint32_t __ret;
46987 __ret = (uint32_t) __builtin_neon_vcagts_f32(__p0, __p1);
46988 return __ret;
46989 }
46990 #else
46991 __ai uint32_t vcagts_f32(float32_t __p0, float32_t __p1) {
46992 uint32_t __ret;
46993 __ret = (uint32_t) __builtin_neon_vcagts_f32(__p0, __p1);
46994 return __ret;
46995 }
46996 #endif
46997
46998 #ifdef __LITTLE_ENDIAN__
46999 __ai uint64x2_t vcaleq_f64(float64x2_t __p0, float64x2_t __p1) {
47000 uint64x2_t __ret;
47001 __ret = (uint64x2_t) __builtin_neon_vcaleq_v((int8x16_t)__p0, (int8x16_t)__p1, 51);
47002 return __ret;
47003 }
47004 #else
47005 __ai uint64x2_t vcaleq_f64(float64x2_t __p0, float64x2_t __p1) {
47006 float64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
47007 float64x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
47008 uint64x2_t __ret;
47009 __ret = (uint64x2_t) __builtin_neon_vcaleq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 51);
47010 __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
47011 return __ret;
47012 }
47013 #endif
47014
47015 #ifdef __LITTLE_ENDIAN__
47016 __ai uint64x1_t vcale_f64(float64x1_t __p0, float64x1_t __p1) {
47017 uint64x1_t __ret;
47018 __ret = (uint64x1_t) __builtin_neon_vcale_v((int8x8_t)__p0, (int8x8_t)__p1, 19);
47019 return __ret;
47020 }
47021 #else
47022 __ai uint64x1_t vcale_f64(float64x1_t __p0, float64x1_t __p1) {
47023 uint64x1_t __ret;
47024 __ret = (uint64x1_t) __builtin_neon_vcale_v((int8x8_t)__p0, (int8x8_t)__p1, 19);
47025 return __ret;
47026 }
47027 #endif
47028
47029 #ifdef __LITTLE_ENDIAN__
47030 __ai uint64_t vcaled_f64(float64_t __p0, float64_t __p1) {
47031 uint64_t __ret;
47032 __ret = (uint64_t) __builtin_neon_vcaled_f64(__p0, __p1);
47033 return __ret;
47034 }
47035 #else
47036 __ai uint64_t vcaled_f64(float64_t __p0, float64_t __p1) {
47037 uint64_t __ret;
47038 __ret = (uint64_t) __builtin_neon_vcaled_f64(__p0, __p1);
47039 return __ret;
47040 }
47041 #endif
47042
47043 #ifdef __LITTLE_ENDIAN__
47044 __ai uint32_t vcales_f32(float32_t __p0, float32_t __p1) {
47045 uint32_t __ret;
47046 __ret = (uint32_t) __builtin_neon_vcales_f32(__p0, __p1);
47047 return __ret;
47048 }
47049 #else
47050 __ai uint32_t vcales_f32(float32_t __p0, float32_t __p1) {
47051 uint32_t __ret;
47052 __ret = (uint32_t) __builtin_neon_vcales_f32(__p0, __p1);
47053 return __ret;
47054 }
47055 #endif
47056
47057 #ifdef __LITTLE_ENDIAN__
47058 __ai uint64x2_t vcaltq_f64(float64x2_t __p0, float64x2_t __p1) {
47059 uint64x2_t __ret;
47060 __ret = (uint64x2_t) __builtin_neon_vcaltq_v((int8x16_t)__p0, (int8x16_t)__p1, 51);
47061 return __ret;
47062 }
47063 #else
47064 __ai uint64x2_t vcaltq_f64(float64x2_t __p0, float64x2_t __p1) {
47065 float64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
47066 float64x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
47067 uint64x2_t __ret;
47068 __ret = (uint64x2_t) __builtin_neon_vcaltq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 51);
47069 __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
47070 return __ret;
47071 }
47072 #endif
47073
47074 #ifdef __LITTLE_ENDIAN__
47075 __ai uint64x1_t vcalt_f64(float64x1_t __p0, float64x1_t __p1) {
47076 uint64x1_t __ret;
47077 __ret = (uint64x1_t) __builtin_neon_vcalt_v((int8x8_t)__p0, (int8x8_t)__p1, 19);
47078 return __ret;
47079 }
47080 #else
47081 __ai uint64x1_t vcalt_f64(float64x1_t __p0, float64x1_t __p1) {
47082 uint64x1_t __ret;
47083 __ret = (uint64x1_t) __builtin_neon_vcalt_v((int8x8_t)__p0, (int8x8_t)__p1, 19);
47084 return __ret;
47085 }
47086 #endif
47087
47088 #ifdef __LITTLE_ENDIAN__
47089 __ai uint64_t vcaltd_f64(float64_t __p0, float64_t __p1) {
47090 uint64_t __ret;
47091 __ret = (uint64_t) __builtin_neon_vcaltd_f64(__p0, __p1);
47092 return __ret;
47093 }
47094 #else
47095 __ai uint64_t vcaltd_f64(float64_t __p0, float64_t __p1) {
47096 uint64_t __ret;
47097 __ret = (uint64_t) __builtin_neon_vcaltd_f64(__p0, __p1);
47098 return __ret;
47099 }
47100 #endif
47101
47102 #ifdef __LITTLE_ENDIAN__
47103 __ai uint32_t vcalts_f32(float32_t __p0, float32_t __p1) {
47104 uint32_t __ret;
47105 __ret = (uint32_t) __builtin_neon_vcalts_f32(__p0, __p1);
47106 return __ret;
47107 }
47108 #else
47109 __ai uint32_t vcalts_f32(float32_t __p0, float32_t __p1) {
47110 uint32_t __ret;
47111 __ret = (uint32_t) __builtin_neon_vcalts_f32(__p0, __p1);
47112 return __ret;
47113 }
47114 #endif
47115
47116 #ifdef __LITTLE_ENDIAN__
47117 __ai uint64x1_t vceq_p64(poly64x1_t __p0, poly64x1_t __p1) {
47118 uint64x1_t __ret;
47119 __ret = (uint64x1_t)(__p0 == __p1);
47120 return __ret;
47121 }
47122 #else
47123 __ai uint64x1_t vceq_p64(poly64x1_t __p0, poly64x1_t __p1) {
47124 uint64x1_t __ret;
47125 __ret = (uint64x1_t)(__p0 == __p1);
47126 return __ret;
47127 }
47128 #endif
47129
47130 #ifdef __LITTLE_ENDIAN__
47131 __ai uint64x2_t vceqq_p64(poly64x2_t __p0, poly64x2_t __p1) {
47132 uint64x2_t __ret;
47133 __ret = (uint64x2_t)(__p0 == __p1);
47134 return __ret;
47135 }
47136 #else
47137 __ai uint64x2_t vceqq_p64(poly64x2_t __p0, poly64x2_t __p1) {
47138 poly64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
47139 poly64x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
47140 uint64x2_t __ret;
47141 __ret = (uint64x2_t)(__rev0 == __rev1);
47142 __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
47143 return __ret;
47144 }
47145 #endif
47146
47147 #ifdef __LITTLE_ENDIAN__
47148 __ai uint64x2_t vceqq_u64(uint64x2_t __p0, uint64x2_t __p1) {
47149 uint64x2_t __ret;
47150 __ret = (uint64x2_t)(__p0 == __p1);
47151 return __ret;
47152 }
47153 #else
47154 __ai uint64x2_t vceqq_u64(uint64x2_t __p0, uint64x2_t __p1) {
47155 uint64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
47156 uint64x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
47157 uint64x2_t __ret;
47158 __ret = (uint64x2_t)(__rev0 == __rev1);
47159 __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
47160 return __ret;
47161 }
47162 #endif
47163
47164 #ifdef __LITTLE_ENDIAN__
47165 __ai uint64x2_t vceqq_f64(float64x2_t __p0, float64x2_t __p1) {
47166 uint64x2_t __ret;
47167 __ret = (uint64x2_t)(__p0 == __p1);
47168 return __ret;
47169 }
47170 #else
47171 __ai uint64x2_t vceqq_f64(float64x2_t __p0, float64x2_t __p1) {
47172 float64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
47173 float64x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
47174 uint64x2_t __ret;
47175 __ret = (uint64x2_t)(__rev0 == __rev1);
47176 __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
47177 return __ret;
47178 }
47179 #endif
47180
47181 #ifdef __LITTLE_ENDIAN__
47182 __ai uint64x2_t vceqq_s64(int64x2_t __p0, int64x2_t __p1) {
47183 uint64x2_t __ret;
47184 __ret = (uint64x2_t)(__p0 == __p1);
47185 return __ret;
47186 }
47187 #else
47188 __ai uint64x2_t vceqq_s64(int64x2_t __p0, int64x2_t __p1) {
47189 int64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
47190 int64x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
47191 uint64x2_t __ret;
47192 __ret = (uint64x2_t)(__rev0 == __rev1);
47193 __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
47194 return __ret;
47195 }
47196 #endif
47197
47198 #ifdef __LITTLE_ENDIAN__
47199 __ai uint64x1_t vceq_u64(uint64x1_t __p0, uint64x1_t __p1) {
47200 uint64x1_t __ret;
47201 __ret = (uint64x1_t)(__p0 == __p1);
47202 return __ret;
47203 }
47204 #else
47205 __ai uint64x1_t vceq_u64(uint64x1_t __p0, uint64x1_t __p1) {
47206 uint64x1_t __ret;
47207 __ret = (uint64x1_t)(__p0 == __p1);
47208 return __ret;
47209 }
47210 #endif
47211
47212 #ifdef __LITTLE_ENDIAN__
47213 __ai uint64x1_t vceq_f64(float64x1_t __p0, float64x1_t __p1) {
47214 uint64x1_t __ret;
47215 __ret = (uint64x1_t)(__p0 == __p1);
47216 return __ret;
47217 }
47218 #else
47219 __ai uint64x1_t vceq_f64(float64x1_t __p0, float64x1_t __p1) {
47220 uint64x1_t __ret;
47221 __ret = (uint64x1_t)(__p0 == __p1);
47222 return __ret;
47223 }
47224 #endif
47225
47226 #ifdef __LITTLE_ENDIAN__
47227 __ai uint64x1_t vceq_s64(int64x1_t __p0, int64x1_t __p1) {
47228 uint64x1_t __ret;
47229 __ret = (uint64x1_t)(__p0 == __p1);
47230 return __ret;
47231 }
47232 #else
47233 __ai uint64x1_t vceq_s64(int64x1_t __p0, int64x1_t __p1) {
47234 uint64x1_t __ret;
47235 __ret = (uint64x1_t)(__p0 == __p1);
47236 return __ret;
47237 }
47238 #endif
47239
47240 #ifdef __LITTLE_ENDIAN__
47241 __ai uint64_t vceqd_u64(uint64_t __p0, uint64_t __p1) {
47242 uint64_t __ret;
47243 __ret = (uint64_t) __builtin_neon_vceqd_u64(__p0, __p1);
47244 return __ret;
47245 }
47246 #else
47247 __ai uint64_t vceqd_u64(uint64_t __p0, uint64_t __p1) {
47248 uint64_t __ret;
47249 __ret = (uint64_t) __builtin_neon_vceqd_u64(__p0, __p1);
47250 return __ret;
47251 }
47252 #endif
47253
47254 #ifdef __LITTLE_ENDIAN__
47255 __ai int64_t vceqd_s64(int64_t __p0, int64_t __p1) {
47256 int64_t __ret;
47257 __ret = (int64_t) __builtin_neon_vceqd_s64(__p0, __p1);
47258 return __ret;
47259 }
47260 #else
47261 __ai int64_t vceqd_s64(int64_t __p0, int64_t __p1) {
47262 int64_t __ret;
47263 __ret = (int64_t) __builtin_neon_vceqd_s64(__p0, __p1);
47264 return __ret;
47265 }
47266 #endif
47267
47268 #ifdef __LITTLE_ENDIAN__
47269 __ai uint64_t vceqd_f64(float64_t __p0, float64_t __p1) {
47270 uint64_t __ret;
47271 __ret = (uint64_t) __builtin_neon_vceqd_f64(__p0, __p1);
47272 return __ret;
47273 }
47274 #else
47275 __ai uint64_t vceqd_f64(float64_t __p0, float64_t __p1) {
47276 uint64_t __ret;
47277 __ret = (uint64_t) __builtin_neon_vceqd_f64(__p0, __p1);
47278 return __ret;
47279 }
47280 #endif
47281
47282 #ifdef __LITTLE_ENDIAN__
47283 __ai uint32_t vceqs_f32(float32_t __p0, float32_t __p1) {
47284 uint32_t __ret;
47285 __ret = (uint32_t) __builtin_neon_vceqs_f32(__p0, __p1);
47286 return __ret;
47287 }
47288 #else
47289 __ai uint32_t vceqs_f32(float32_t __p0, float32_t __p1) {
47290 uint32_t __ret;
47291 __ret = (uint32_t) __builtin_neon_vceqs_f32(__p0, __p1);
47292 return __ret;
47293 }
47294 #endif
47295
47296 #ifdef __LITTLE_ENDIAN__
47297 __ai uint8x8_t vceqz_p8(poly8x8_t __p0) {
47298 uint8x8_t __ret;
47299 __ret = (uint8x8_t) __builtin_neon_vceqz_v((int8x8_t)__p0, 16);
47300 return __ret;
47301 }
47302 #else
47303 __ai uint8x8_t vceqz_p8(poly8x8_t __p0) {
47304 poly8x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
47305 uint8x8_t __ret;
47306 __ret = (uint8x8_t) __builtin_neon_vceqz_v((int8x8_t)__rev0, 16);
47307 __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
47308 return __ret;
47309 }
47310 #endif
47311
47312 #ifdef __LITTLE_ENDIAN__
47313 __ai uint64x1_t vceqz_p64(poly64x1_t __p0) {
47314 uint64x1_t __ret;
47315 __ret = (uint64x1_t) __builtin_neon_vceqz_v((int8x8_t)__p0, 19);
47316 return __ret;
47317 }
47318 #else
47319 __ai uint64x1_t vceqz_p64(poly64x1_t __p0) {
47320 uint64x1_t __ret;
47321 __ret = (uint64x1_t) __builtin_neon_vceqz_v((int8x8_t)__p0, 19);
47322 return __ret;
47323 }
47324 #endif
47325
47326 #ifdef __LITTLE_ENDIAN__
47327 __ai uint16x4_t vceqz_p16(poly16x4_t __p0) {
47328 uint16x4_t __ret;
47329 __ret = (uint16x4_t) __builtin_neon_vceqz_v((int8x8_t)__p0, 17);
47330 return __ret;
47331 }
47332 #else
47333 __ai uint16x4_t vceqz_p16(poly16x4_t __p0) {
47334 poly16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
47335 uint16x4_t __ret;
47336 __ret = (uint16x4_t) __builtin_neon_vceqz_v((int8x8_t)__rev0, 17);
47337 __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
47338 return __ret;
47339 }
47340 #endif
47341
47342 #ifdef __LITTLE_ENDIAN__
47343 __ai uint8x16_t vceqzq_p8(poly8x16_t __p0) {
47344 uint8x16_t __ret;
47345 __ret = (uint8x16_t) __builtin_neon_vceqzq_v((int8x16_t)__p0, 48);
47346 return __ret;
47347 }
47348 #else
47349 __ai uint8x16_t vceqzq_p8(poly8x16_t __p0) {
47350 poly8x16_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
47351 uint8x16_t __ret;
47352 __ret = (uint8x16_t) __builtin_neon_vceqzq_v((int8x16_t)__rev0, 48);
47353 __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
47354 return __ret;
47355 }
47356 #endif
47357
47358 #ifdef __LITTLE_ENDIAN__
47359 __ai uint64x2_t vceqzq_p64(poly64x2_t __p0) {
47360 uint64x2_t __ret;
47361 __ret = (uint64x2_t) __builtin_neon_vceqzq_v((int8x16_t)__p0, 51);
47362 return __ret;
47363 }
47364 #else
47365 __ai uint64x2_t vceqzq_p64(poly64x2_t __p0) {
47366 poly64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
47367 uint64x2_t __ret;
47368 __ret = (uint64x2_t) __builtin_neon_vceqzq_v((int8x16_t)__rev0, 51);
47369 __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
47370 return __ret;
47371 }
47372 #endif
47373
47374 #ifdef __LITTLE_ENDIAN__
47375 __ai uint16x8_t vceqzq_p16(poly16x8_t __p0) {
47376 uint16x8_t __ret;
47377 __ret = (uint16x8_t) __builtin_neon_vceqzq_v((int8x16_t)__p0, 49);
47378 return __ret;
47379 }
47380 #else
47381 __ai uint16x8_t vceqzq_p16(poly16x8_t __p0) {
47382 poly16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
47383 uint16x8_t __ret;
47384 __ret = (uint16x8_t) __builtin_neon_vceqzq_v((int8x16_t)__rev0, 49);
47385 __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
47386 return __ret;
47387 }
47388 #endif
47389
47390 #ifdef __LITTLE_ENDIAN__
47391 __ai uint8x16_t vceqzq_u8(uint8x16_t __p0) {
47392 uint8x16_t __ret;
47393 __ret = (uint8x16_t) __builtin_neon_vceqzq_v((int8x16_t)__p0, 48);
47394 return __ret;
47395 }
47396 #else
47397 __ai uint8x16_t vceqzq_u8(uint8x16_t __p0) {
47398 uint8x16_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
47399 uint8x16_t __ret;
47400 __ret = (uint8x16_t) __builtin_neon_vceqzq_v((int8x16_t)__rev0, 48);
47401 __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
47402 return __ret;
47403 }
47404 #endif
47405
47406 #ifdef __LITTLE_ENDIAN__
47407 __ai uint32x4_t vceqzq_u32(uint32x4_t __p0) {
47408 uint32x4_t __ret;
47409 __ret = (uint32x4_t) __builtin_neon_vceqzq_v((int8x16_t)__p0, 50);
47410 return __ret;
47411 }
47412 #else
47413 __ai uint32x4_t vceqzq_u32(uint32x4_t __p0) {
47414 uint32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
47415 uint32x4_t __ret;
47416 __ret = (uint32x4_t) __builtin_neon_vceqzq_v((int8x16_t)__rev0, 50);
47417 __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
47418 return __ret;
47419 }
47420 #endif
47421
47422 #ifdef __LITTLE_ENDIAN__
47423 __ai uint64x2_t vceqzq_u64(uint64x2_t __p0) {
47424 uint64x2_t __ret;
47425 __ret = (uint64x2_t) __builtin_neon_vceqzq_v((int8x16_t)__p0, 51);
47426 return __ret;
47427 }
47428 #else
47429 __ai uint64x2_t vceqzq_u64(uint64x2_t __p0) {
47430 uint64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
47431 uint64x2_t __ret;
47432 __ret = (uint64x2_t) __builtin_neon_vceqzq_v((int8x16_t)__rev0, 51);
47433 __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
47434 return __ret;
47435 }
47436 #endif
47437
47438 #ifdef __LITTLE_ENDIAN__
47439 __ai uint16x8_t vceqzq_u16(uint16x8_t __p0) {
47440 uint16x8_t __ret;
47441 __ret = (uint16x8_t) __builtin_neon_vceqzq_v((int8x16_t)__p0, 49);
47442 return __ret;
47443 }
47444 #else
47445 __ai uint16x8_t vceqzq_u16(uint16x8_t __p0) {
47446 uint16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
47447 uint16x8_t __ret;
47448 __ret = (uint16x8_t) __builtin_neon_vceqzq_v((int8x16_t)__rev0, 49);
47449 __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
47450 return __ret;
47451 }
47452 #endif
47453
47454 #ifdef __LITTLE_ENDIAN__
47455 __ai uint8x16_t vceqzq_s8(int8x16_t __p0) {
47456 uint8x16_t __ret;
47457 __ret = (uint8x16_t) __builtin_neon_vceqzq_v((int8x16_t)__p0, 48);
47458 return __ret;
47459 }
47460 #else
47461 __ai uint8x16_t vceqzq_s8(int8x16_t __p0) {
47462 int8x16_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
47463 uint8x16_t __ret;
47464 __ret = (uint8x16_t) __builtin_neon_vceqzq_v((int8x16_t)__rev0, 48);
47465 __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
47466 return __ret;
47467 }
47468 #endif
47469
47470 #ifdef __LITTLE_ENDIAN__
47471 __ai uint64x2_t vceqzq_f64(float64x2_t __p0) {
47472 uint64x2_t __ret;
47473 __ret = (uint64x2_t) __builtin_neon_vceqzq_v((int8x16_t)__p0, 51);
47474 return __ret;
47475 }
47476 #else
47477 __ai uint64x2_t vceqzq_f64(float64x2_t __p0) {
47478 float64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
47479 uint64x2_t __ret;
47480 __ret = (uint64x2_t) __builtin_neon_vceqzq_v((int8x16_t)__rev0, 51);
47481 __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
47482 return __ret;
47483 }
47484 #endif
47485
47486 #ifdef __LITTLE_ENDIAN__
47487 __ai uint32x4_t vceqzq_f32(float32x4_t __p0) {
47488 uint32x4_t __ret;
47489 __ret = (uint32x4_t) __builtin_neon_vceqzq_v((int8x16_t)__p0, 50);
47490 return __ret;
47491 }
47492 #else
47493 __ai uint32x4_t vceqzq_f32(float32x4_t __p0) {
47494 float32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
47495 uint32x4_t __ret;
47496 __ret = (uint32x4_t) __builtin_neon_vceqzq_v((int8x16_t)__rev0, 50);
47497 __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
47498 return __ret;
47499 }
47500 #endif
47501
47502 #ifdef __LITTLE_ENDIAN__
47503 __ai uint32x4_t vceqzq_s32(int32x4_t __p0) {
47504 uint32x4_t __ret;
47505 __ret = (uint32x4_t) __builtin_neon_vceqzq_v((int8x16_t)__p0, 50);
47506 return __ret;
47507 }
47508 #else
47509 __ai uint32x4_t vceqzq_s32(int32x4_t __p0) {
47510 int32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
47511 uint32x4_t __ret;
47512 __ret = (uint32x4_t) __builtin_neon_vceqzq_v((int8x16_t)__rev0, 50);
47513 __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
47514 return __ret;
47515 }
47516 #endif
47517
47518 #ifdef __LITTLE_ENDIAN__
47519 __ai uint64x2_t vceqzq_s64(int64x2_t __p0) {
47520 uint64x2_t __ret;
47521 __ret = (uint64x2_t) __builtin_neon_vceqzq_v((int8x16_t)__p0, 51);
47522 return __ret;
47523 }
47524 #else
47525 __ai uint64x2_t vceqzq_s64(int64x2_t __p0) {
47526 int64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
47527 uint64x2_t __ret;
47528 __ret = (uint64x2_t) __builtin_neon_vceqzq_v((int8x16_t)__rev0, 51);
47529 __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
47530 return __ret;
47531 }
47532 #endif
47533
47534 #ifdef __LITTLE_ENDIAN__
47535 __ai uint16x8_t vceqzq_s16(int16x8_t __p0) {
47536 uint16x8_t __ret;
47537 __ret = (uint16x8_t) __builtin_neon_vceqzq_v((int8x16_t)__p0, 49);
47538 return __ret;
47539 }
47540 #else
47541 __ai uint16x8_t vceqzq_s16(int16x8_t __p0) {
47542 int16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
47543 uint16x8_t __ret;
47544 __ret = (uint16x8_t) __builtin_neon_vceqzq_v((int8x16_t)__rev0, 49);
47545 __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
47546 return __ret;
47547 }
47548 #endif
47549
47550 #ifdef __LITTLE_ENDIAN__
47551 __ai uint8x8_t vceqz_u8(uint8x8_t __p0) {
47552 uint8x8_t __ret;
47553 __ret = (uint8x8_t) __builtin_neon_vceqz_v((int8x8_t)__p0, 16);
47554 return __ret;
47555 }
47556 #else
47557 __ai uint8x8_t vceqz_u8(uint8x8_t __p0) {
47558 uint8x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
47559 uint8x8_t __ret;
47560 __ret = (uint8x8_t) __builtin_neon_vceqz_v((int8x8_t)__rev0, 16);
47561 __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
47562 return __ret;
47563 }
47564 #endif
47565
47566 #ifdef __LITTLE_ENDIAN__
47567 __ai uint32x2_t vceqz_u32(uint32x2_t __p0) {
47568 uint32x2_t __ret;
47569 __ret = (uint32x2_t) __builtin_neon_vceqz_v((int8x8_t)__p0, 18);
47570 return __ret;
47571 }
47572 #else
47573 __ai uint32x2_t vceqz_u32(uint32x2_t __p0) {
47574 uint32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
47575 uint32x2_t __ret;
47576 __ret = (uint32x2_t) __builtin_neon_vceqz_v((int8x8_t)__rev0, 18);
47577 __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
47578 return __ret;
47579 }
47580 #endif
47581
47582 #ifdef __LITTLE_ENDIAN__
47583 __ai uint64x1_t vceqz_u64(uint64x1_t __p0) {
47584 uint64x1_t __ret;
47585 __ret = (uint64x1_t) __builtin_neon_vceqz_v((int8x8_t)__p0, 19);
47586 return __ret;
47587 }
47588 #else
47589 __ai uint64x1_t vceqz_u64(uint64x1_t __p0) {
47590 uint64x1_t __ret;
47591 __ret = (uint64x1_t) __builtin_neon_vceqz_v((int8x8_t)__p0, 19);
47592 return __ret;
47593 }
47594 #endif
47595
47596 #ifdef __LITTLE_ENDIAN__
47597 __ai uint16x4_t vceqz_u16(uint16x4_t __p0) {
47598 uint16x4_t __ret;
47599 __ret = (uint16x4_t) __builtin_neon_vceqz_v((int8x8_t)__p0, 17);
47600 return __ret;
47601 }
47602 #else
47603 __ai uint16x4_t vceqz_u16(uint16x4_t __p0) {
47604 uint16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
47605 uint16x4_t __ret;
47606 __ret = (uint16x4_t) __builtin_neon_vceqz_v((int8x8_t)__rev0, 17);
47607 __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
47608 return __ret;
47609 }
47610 #endif
47611
47612 #ifdef __LITTLE_ENDIAN__
47613 __ai uint8x8_t vceqz_s8(int8x8_t __p0) {
47614 uint8x8_t __ret;
47615 __ret = (uint8x8_t) __builtin_neon_vceqz_v((int8x8_t)__p0, 16);
47616 return __ret;
47617 }
47618 #else
47619 __ai uint8x8_t vceqz_s8(int8x8_t __p0) {
47620 int8x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
47621 uint8x8_t __ret;
47622 __ret = (uint8x8_t) __builtin_neon_vceqz_v((int8x8_t)__rev0, 16);
47623 __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
47624 return __ret;
47625 }
47626 #endif
47627
47628 #ifdef __LITTLE_ENDIAN__
47629 __ai uint64x1_t vceqz_f64(float64x1_t __p0) {
47630 uint64x1_t __ret;
47631 __ret = (uint64x1_t) __builtin_neon_vceqz_v((int8x8_t)__p0, 19);
47632 return __ret;
47633 }
47634 #else
47635 __ai uint64x1_t vceqz_f64(float64x1_t __p0) {
47636 uint64x1_t __ret;
47637 __ret = (uint64x1_t) __builtin_neon_vceqz_v((int8x8_t)__p0, 19);
47638 return __ret;
47639 }
47640 #endif
47641
47642 #ifdef __LITTLE_ENDIAN__
47643 __ai uint32x2_t vceqz_f32(float32x2_t __p0) {
47644 uint32x2_t __ret;
47645 __ret = (uint32x2_t) __builtin_neon_vceqz_v((int8x8_t)__p0, 18);
47646 return __ret;
47647 }
47648 #else
47649 __ai uint32x2_t vceqz_f32(float32x2_t __p0) {
47650 float32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
47651 uint32x2_t __ret;
47652 __ret = (uint32x2_t) __builtin_neon_vceqz_v((int8x8_t)__rev0, 18);
47653 __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
47654 return __ret;
47655 }
47656 #endif
47657
47658 #ifdef __LITTLE_ENDIAN__
47659 __ai uint32x2_t vceqz_s32(int32x2_t __p0) {
47660 uint32x2_t __ret;
47661 __ret = (uint32x2_t) __builtin_neon_vceqz_v((int8x8_t)__p0, 18);
47662 return __ret;
47663 }
47664 #else
47665 __ai uint32x2_t vceqz_s32(int32x2_t __p0) {
47666 int32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
47667 uint32x2_t __ret;
47668 __ret = (uint32x2_t) __builtin_neon_vceqz_v((int8x8_t)__rev0, 18);
47669 __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
47670 return __ret;
47671 }
47672 #endif
47673
47674 #ifdef __LITTLE_ENDIAN__
47675 __ai uint64x1_t vceqz_s64(int64x1_t __p0) {
47676 uint64x1_t __ret;
47677 __ret = (uint64x1_t) __builtin_neon_vceqz_v((int8x8_t)__p0, 19);
47678 return __ret;
47679 }
47680 #else
47681 __ai uint64x1_t vceqz_s64(int64x1_t __p0) {
47682 uint64x1_t __ret;
47683 __ret = (uint64x1_t) __builtin_neon_vceqz_v((int8x8_t)__p0, 19);
47684 return __ret;
47685 }
47686 #endif
47687
47688 #ifdef __LITTLE_ENDIAN__
47689 __ai uint16x4_t vceqz_s16(int16x4_t __p0) {
47690 uint16x4_t __ret;
47691 __ret = (uint16x4_t) __builtin_neon_vceqz_v((int8x8_t)__p0, 17);
47692 return __ret;
47693 }
47694 #else
47695 __ai uint16x4_t vceqz_s16(int16x4_t __p0) {
47696 int16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
47697 uint16x4_t __ret;
47698 __ret = (uint16x4_t) __builtin_neon_vceqz_v((int8x8_t)__rev0, 17);
47699 __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
47700 return __ret;
47701 }
47702 #endif
47703
47704 #ifdef __LITTLE_ENDIAN__
47705 __ai uint64_t vceqzd_u64(uint64_t __p0) {
47706 uint64_t __ret;
47707 __ret = (uint64_t) __builtin_neon_vceqzd_u64(__p0);
47708 return __ret;
47709 }
47710 #else
47711 __ai uint64_t vceqzd_u64(uint64_t __p0) {
47712 uint64_t __ret;
47713 __ret = (uint64_t) __builtin_neon_vceqzd_u64(__p0);
47714 return __ret;
47715 }
47716 #endif
47717
47718 #ifdef __LITTLE_ENDIAN__
47719 __ai int64_t vceqzd_s64(int64_t __p0) {
47720 int64_t __ret;
47721 __ret = (int64_t) __builtin_neon_vceqzd_s64(__p0);
47722 return __ret;
47723 }
47724 #else
47725 __ai int64_t vceqzd_s64(int64_t __p0) {
47726 int64_t __ret;
47727 __ret = (int64_t) __builtin_neon_vceqzd_s64(__p0);
47728 return __ret;
47729 }
47730 #endif
47731
47732 #ifdef __LITTLE_ENDIAN__
47733 __ai uint64_t vceqzd_f64(float64_t __p0) {
47734 uint64_t __ret;
47735 __ret = (uint64_t) __builtin_neon_vceqzd_f64(__p0);
47736 return __ret;
47737 }
47738 #else
47739 __ai uint64_t vceqzd_f64(float64_t __p0) {
47740 uint64_t __ret;
47741 __ret = (uint64_t) __builtin_neon_vceqzd_f64(__p0);
47742 return __ret;
47743 }
47744 #endif
47745
47746 #ifdef __LITTLE_ENDIAN__
47747 __ai uint32_t vceqzs_f32(float32_t __p0) {
47748 uint32_t __ret;
47749 __ret = (uint32_t) __builtin_neon_vceqzs_f32(__p0);
47750 return __ret;
47751 }
47752 #else
47753 __ai uint32_t vceqzs_f32(float32_t __p0) {
47754 uint32_t __ret;
47755 __ret = (uint32_t) __builtin_neon_vceqzs_f32(__p0);
47756 return __ret;
47757 }
47758 #endif
47759
47760 #ifdef __LITTLE_ENDIAN__
47761 __ai uint64x2_t vcgeq_u64(uint64x2_t __p0, uint64x2_t __p1) {
47762 uint64x2_t __ret;
47763 __ret = (uint64x2_t)(__p0 >= __p1);
47764 return __ret;
47765 }
47766 #else
47767 __ai uint64x2_t vcgeq_u64(uint64x2_t __p0, uint64x2_t __p1) {
47768 uint64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
47769 uint64x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
47770 uint64x2_t __ret;
47771 __ret = (uint64x2_t)(__rev0 >= __rev1);
47772 __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
47773 return __ret;
47774 }
47775 #endif
47776
47777 #ifdef __LITTLE_ENDIAN__
47778 __ai uint64x2_t vcgeq_f64(float64x2_t __p0, float64x2_t __p1) {
47779 uint64x2_t __ret;
47780 __ret = (uint64x2_t)(__p0 >= __p1);
47781 return __ret;
47782 }
47783 #else
47784 __ai uint64x2_t vcgeq_f64(float64x2_t __p0, float64x2_t __p1) {
47785 float64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
47786 float64x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
47787 uint64x2_t __ret;
47788 __ret = (uint64x2_t)(__rev0 >= __rev1);
47789 __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
47790 return __ret;
47791 }
47792 #endif
47793
47794 #ifdef __LITTLE_ENDIAN__
47795 __ai uint64x2_t vcgeq_s64(int64x2_t __p0, int64x2_t __p1) {
47796 uint64x2_t __ret;
47797 __ret = (uint64x2_t)(__p0 >= __p1);
47798 return __ret;
47799 }
47800 #else
47801 __ai uint64x2_t vcgeq_s64(int64x2_t __p0, int64x2_t __p1) {
47802 int64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
47803 int64x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
47804 uint64x2_t __ret;
47805 __ret = (uint64x2_t)(__rev0 >= __rev1);
47806 __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
47807 return __ret;
47808 }
47809 #endif
47810
47811 #ifdef __LITTLE_ENDIAN__
47812 __ai uint64x1_t vcge_u64(uint64x1_t __p0, uint64x1_t __p1) {
47813 uint64x1_t __ret;
47814 __ret = (uint64x1_t)(__p0 >= __p1);
47815 return __ret;
47816 }
47817 #else
47818 __ai uint64x1_t vcge_u64(uint64x1_t __p0, uint64x1_t __p1) {
47819 uint64x1_t __ret;
47820 __ret = (uint64x1_t)(__p0 >= __p1);
47821 return __ret;
47822 }
47823 #endif
47824
47825 #ifdef __LITTLE_ENDIAN__
47826 __ai uint64x1_t vcge_f64(float64x1_t __p0, float64x1_t __p1) {
47827 uint64x1_t __ret;
47828 __ret = (uint64x1_t)(__p0 >= __p1);
47829 return __ret;
47830 }
47831 #else
47832 __ai uint64x1_t vcge_f64(float64x1_t __p0, float64x1_t __p1) {
47833 uint64x1_t __ret;
47834 __ret = (uint64x1_t)(__p0 >= __p1);
47835 return __ret;
47836 }
47837 #endif
47838
47839 #ifdef __LITTLE_ENDIAN__
47840 __ai uint64x1_t vcge_s64(int64x1_t __p0, int64x1_t __p1) {
47841 uint64x1_t __ret;
47842 __ret = (uint64x1_t)(__p0 >= __p1);
47843 return __ret;
47844 }
47845 #else
47846 __ai uint64x1_t vcge_s64(int64x1_t __p0, int64x1_t __p1) {
47847 uint64x1_t __ret;
47848 __ret = (uint64x1_t)(__p0 >= __p1);
47849 return __ret;
47850 }
47851 #endif
47852
47853 #ifdef __LITTLE_ENDIAN__
47854 __ai int64_t vcged_s64(int64_t __p0, int64_t __p1) {
47855 int64_t __ret;
47856 __ret = (int64_t) __builtin_neon_vcged_s64(__p0, __p1);
47857 return __ret;
47858 }
47859 #else
47860 __ai int64_t vcged_s64(int64_t __p0, int64_t __p1) {
47861 int64_t __ret;
47862 __ret = (int64_t) __builtin_neon_vcged_s64(__p0, __p1);
47863 return __ret;
47864 }
47865 #endif
47866
47867 #ifdef __LITTLE_ENDIAN__
47868 __ai uint64_t vcged_u64(uint64_t __p0, uint64_t __p1) {
47869 uint64_t __ret;
47870 __ret = (uint64_t) __builtin_neon_vcged_u64(__p0, __p1);
47871 return __ret;
47872 }
47873 #else
47874 __ai uint64_t vcged_u64(uint64_t __p0, uint64_t __p1) {
47875 uint64_t __ret;
47876 __ret = (uint64_t) __builtin_neon_vcged_u64(__p0, __p1);
47877 return __ret;
47878 }
47879 #endif
47880
47881 #ifdef __LITTLE_ENDIAN__
47882 __ai uint64_t vcged_f64(float64_t __p0, float64_t __p1) {
47883 uint64_t __ret;
47884 __ret = (uint64_t) __builtin_neon_vcged_f64(__p0, __p1);
47885 return __ret;
47886 }
47887 #else
47888 __ai uint64_t vcged_f64(float64_t __p0, float64_t __p1) {
47889 uint64_t __ret;
47890 __ret = (uint64_t) __builtin_neon_vcged_f64(__p0, __p1);
47891 return __ret;
47892 }
47893 #endif
47894
47895 #ifdef __LITTLE_ENDIAN__
47896 __ai uint32_t vcges_f32(float32_t __p0, float32_t __p1) {
47897 uint32_t __ret;
47898 __ret = (uint32_t) __builtin_neon_vcges_f32(__p0, __p1);
47899 return __ret;
47900 }
47901 #else
47902 __ai uint32_t vcges_f32(float32_t __p0, float32_t __p1) {
47903 uint32_t __ret;
47904 __ret = (uint32_t) __builtin_neon_vcges_f32(__p0, __p1);
47905 return __ret;
47906 }
47907 #endif
47908
47909 #ifdef __LITTLE_ENDIAN__
47910 __ai uint8x16_t vcgezq_s8(int8x16_t __p0) {
47911 uint8x16_t __ret;
47912 __ret = (uint8x16_t) __builtin_neon_vcgezq_v((int8x16_t)__p0, 48);
47913 return __ret;
47914 }
47915 #else
47916 __ai uint8x16_t vcgezq_s8(int8x16_t __p0) {
47917 int8x16_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
47918 uint8x16_t __ret;
47919 __ret = (uint8x16_t) __builtin_neon_vcgezq_v((int8x16_t)__rev0, 48);
47920 __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
47921 return __ret;
47922 }
47923 #endif
47924
47925 #ifdef __LITTLE_ENDIAN__
47926 __ai uint64x2_t vcgezq_f64(float64x2_t __p0) {
47927 uint64x2_t __ret;
47928 __ret = (uint64x2_t) __builtin_neon_vcgezq_v((int8x16_t)__p0, 51);
47929 return __ret;
47930 }
47931 #else
47932 __ai uint64x2_t vcgezq_f64(float64x2_t __p0) {
47933 float64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
47934 uint64x2_t __ret;
47935 __ret = (uint64x2_t) __builtin_neon_vcgezq_v((int8x16_t)__rev0, 51);
47936 __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
47937 return __ret;
47938 }
47939 #endif
47940
47941 #ifdef __LITTLE_ENDIAN__
47942 __ai uint32x4_t vcgezq_f32(float32x4_t __p0) {
47943 uint32x4_t __ret;
47944 __ret = (uint32x4_t) __builtin_neon_vcgezq_v((int8x16_t)__p0, 50);
47945 return __ret;
47946 }
47947 #else
47948 __ai uint32x4_t vcgezq_f32(float32x4_t __p0) {
47949 float32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
47950 uint32x4_t __ret;
47951 __ret = (uint32x4_t) __builtin_neon_vcgezq_v((int8x16_t)__rev0, 50);
47952 __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
47953 return __ret;
47954 }
47955 #endif
47956
47957 #ifdef __LITTLE_ENDIAN__
47958 __ai uint32x4_t vcgezq_s32(int32x4_t __p0) {
47959 uint32x4_t __ret;
47960 __ret = (uint32x4_t) __builtin_neon_vcgezq_v((int8x16_t)__p0, 50);
47961 return __ret;
47962 }
47963 #else
47964 __ai uint32x4_t vcgezq_s32(int32x4_t __p0) {
47965 int32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
47966 uint32x4_t __ret;
47967 __ret = (uint32x4_t) __builtin_neon_vcgezq_v((int8x16_t)__rev0, 50);
47968 __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
47969 return __ret;
47970 }
47971 #endif
47972
47973 #ifdef __LITTLE_ENDIAN__
47974 __ai uint64x2_t vcgezq_s64(int64x2_t __p0) {
47975 uint64x2_t __ret;
47976 __ret = (uint64x2_t) __builtin_neon_vcgezq_v((int8x16_t)__p0, 51);
47977 return __ret;
47978 }
47979 #else
47980 __ai uint64x2_t vcgezq_s64(int64x2_t __p0) {
47981 int64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
47982 uint64x2_t __ret;
47983 __ret = (uint64x2_t) __builtin_neon_vcgezq_v((int8x16_t)__rev0, 51);
47984 __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
47985 return __ret;
47986 }
47987 #endif
47988
47989 #ifdef __LITTLE_ENDIAN__
47990 __ai uint16x8_t vcgezq_s16(int16x8_t __p0) {
47991 uint16x8_t __ret;
47992 __ret = (uint16x8_t) __builtin_neon_vcgezq_v((int8x16_t)__p0, 49);
47993 return __ret;
47994 }
47995 #else
47996 __ai uint16x8_t vcgezq_s16(int16x8_t __p0) {
47997 int16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
47998 uint16x8_t __ret;
47999 __ret = (uint16x8_t) __builtin_neon_vcgezq_v((int8x16_t)__rev0, 49);
48000 __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
48001 return __ret;
48002 }
48003 #endif
48004
48005 #ifdef __LITTLE_ENDIAN__
48006 __ai uint8x8_t vcgez_s8(int8x8_t __p0) {
48007 uint8x8_t __ret;
48008 __ret = (uint8x8_t) __builtin_neon_vcgez_v((int8x8_t)__p0, 16);
48009 return __ret;
48010 }
48011 #else
48012 __ai uint8x8_t vcgez_s8(int8x8_t __p0) {
48013 int8x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
48014 uint8x8_t __ret;
48015 __ret = (uint8x8_t) __builtin_neon_vcgez_v((int8x8_t)__rev0, 16);
48016 __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
48017 return __ret;
48018 }
48019 #endif
48020
48021 #ifdef __LITTLE_ENDIAN__
48022 __ai uint64x1_t vcgez_f64(float64x1_t __p0) {
48023 uint64x1_t __ret;
48024 __ret = (uint64x1_t) __builtin_neon_vcgez_v((int8x8_t)__p0, 19);
48025 return __ret;
48026 }
48027 #else
48028 __ai uint64x1_t vcgez_f64(float64x1_t __p0) {
48029 uint64x1_t __ret;
48030 __ret = (uint64x1_t) __builtin_neon_vcgez_v((int8x8_t)__p0, 19);
48031 return __ret;
48032 }
48033 #endif
48034
48035 #ifdef __LITTLE_ENDIAN__
48036 __ai uint32x2_t vcgez_f32(float32x2_t __p0) {
48037 uint32x2_t __ret;
48038 __ret = (uint32x2_t) __builtin_neon_vcgez_v((int8x8_t)__p0, 18);
48039 return __ret;
48040 }
48041 #else
48042 __ai uint32x2_t vcgez_f32(float32x2_t __p0) {
48043 float32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
48044 uint32x2_t __ret;
48045 __ret = (uint32x2_t) __builtin_neon_vcgez_v((int8x8_t)__rev0, 18);
48046 __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
48047 return __ret;
48048 }
48049 #endif
48050
48051 #ifdef __LITTLE_ENDIAN__
48052 __ai uint32x2_t vcgez_s32(int32x2_t __p0) {
48053 uint32x2_t __ret;
48054 __ret = (uint32x2_t) __builtin_neon_vcgez_v((int8x8_t)__p0, 18);
48055 return __ret;
48056 }
48057 #else
48058 __ai uint32x2_t vcgez_s32(int32x2_t __p0) {
48059 int32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
48060 uint32x2_t __ret;
48061 __ret = (uint32x2_t) __builtin_neon_vcgez_v((int8x8_t)__rev0, 18);
48062 __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
48063 return __ret;
48064 }
48065 #endif
48066
48067 #ifdef __LITTLE_ENDIAN__
48068 __ai uint64x1_t vcgez_s64(int64x1_t __p0) {
48069 uint64x1_t __ret;
48070 __ret = (uint64x1_t) __builtin_neon_vcgez_v((int8x8_t)__p0, 19);
48071 return __ret;
48072 }
48073 #else
48074 __ai uint64x1_t vcgez_s64(int64x1_t __p0) {
48075 uint64x1_t __ret;
48076 __ret = (uint64x1_t) __builtin_neon_vcgez_v((int8x8_t)__p0, 19);
48077 return __ret;
48078 }
48079 #endif
48080
48081 #ifdef __LITTLE_ENDIAN__
48082 __ai uint16x4_t vcgez_s16(int16x4_t __p0) {
48083 uint16x4_t __ret;
48084 __ret = (uint16x4_t) __builtin_neon_vcgez_v((int8x8_t)__p0, 17);
48085 return __ret;
48086 }
48087 #else
48088 __ai uint16x4_t vcgez_s16(int16x4_t __p0) {
48089 int16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
48090 uint16x4_t __ret;
48091 __ret = (uint16x4_t) __builtin_neon_vcgez_v((int8x8_t)__rev0, 17);
48092 __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
48093 return __ret;
48094 }
48095 #endif
48096
48097 #ifdef __LITTLE_ENDIAN__
48098 __ai int64_t vcgezd_s64(int64_t __p0) {
48099 int64_t __ret;
48100 __ret = (int64_t) __builtin_neon_vcgezd_s64(__p0);
48101 return __ret;
48102 }
48103 #else
48104 __ai int64_t vcgezd_s64(int64_t __p0) {
48105 int64_t __ret;
48106 __ret = (int64_t) __builtin_neon_vcgezd_s64(__p0);
48107 return __ret;
48108 }
48109 #endif
48110
48111 #ifdef __LITTLE_ENDIAN__
48112 __ai uint64_t vcgezd_f64(float64_t __p0) {
48113 uint64_t __ret;
48114 __ret = (uint64_t) __builtin_neon_vcgezd_f64(__p0);
48115 return __ret;
48116 }
48117 #else
48118 __ai uint64_t vcgezd_f64(float64_t __p0) {
48119 uint64_t __ret;
48120 __ret = (uint64_t) __builtin_neon_vcgezd_f64(__p0);
48121 return __ret;
48122 }
48123 #endif
48124
48125 #ifdef __LITTLE_ENDIAN__
48126 __ai uint32_t vcgezs_f32(float32_t __p0) {
48127 uint32_t __ret;
48128 __ret = (uint32_t) __builtin_neon_vcgezs_f32(__p0);
48129 return __ret;
48130 }
48131 #else
48132 __ai uint32_t vcgezs_f32(float32_t __p0) {
48133 uint32_t __ret;
48134 __ret = (uint32_t) __builtin_neon_vcgezs_f32(__p0);
48135 return __ret;
48136 }
48137 #endif
48138
48139 #ifdef __LITTLE_ENDIAN__
48140 __ai uint64x2_t vcgtq_u64(uint64x2_t __p0, uint64x2_t __p1) {
48141 uint64x2_t __ret;
48142 __ret = (uint64x2_t)(__p0 > __p1);
48143 return __ret;
48144 }
48145 #else
48146 __ai uint64x2_t vcgtq_u64(uint64x2_t __p0, uint64x2_t __p1) {
48147 uint64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
48148 uint64x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
48149 uint64x2_t __ret;
48150 __ret = (uint64x2_t)(__rev0 > __rev1);
48151 __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
48152 return __ret;
48153 }
48154 #endif
48155
48156 #ifdef __LITTLE_ENDIAN__
48157 __ai uint64x2_t vcgtq_f64(float64x2_t __p0, float64x2_t __p1) {
48158 uint64x2_t __ret;
48159 __ret = (uint64x2_t)(__p0 > __p1);
48160 return __ret;
48161 }
48162 #else
48163 __ai uint64x2_t vcgtq_f64(float64x2_t __p0, float64x2_t __p1) {
48164 float64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
48165 float64x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
48166 uint64x2_t __ret;
48167 __ret = (uint64x2_t)(__rev0 > __rev1);
48168 __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
48169 return __ret;
48170 }
48171 #endif
48172
48173 #ifdef __LITTLE_ENDIAN__
48174 __ai uint64x2_t vcgtq_s64(int64x2_t __p0, int64x2_t __p1) {
48175 uint64x2_t __ret;
48176 __ret = (uint64x2_t)(__p0 > __p1);
48177 return __ret;
48178 }
48179 #else
48180 __ai uint64x2_t vcgtq_s64(int64x2_t __p0, int64x2_t __p1) {
48181 int64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
48182 int64x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
48183 uint64x2_t __ret;
48184 __ret = (uint64x2_t)(__rev0 > __rev1);
48185 __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
48186 return __ret;
48187 }
48188 #endif
48189
48190 #ifdef __LITTLE_ENDIAN__
48191 __ai uint64x1_t vcgt_u64(uint64x1_t __p0, uint64x1_t __p1) {
48192 uint64x1_t __ret;
48193 __ret = (uint64x1_t)(__p0 > __p1);
48194 return __ret;
48195 }
48196 #else
48197 __ai uint64x1_t vcgt_u64(uint64x1_t __p0, uint64x1_t __p1) {
48198 uint64x1_t __ret;
48199 __ret = (uint64x1_t)(__p0 > __p1);
48200 return __ret;
48201 }
48202 #endif
48203
48204 #ifdef __LITTLE_ENDIAN__
48205 __ai uint64x1_t vcgt_f64(float64x1_t __p0, float64x1_t __p1) {
48206 uint64x1_t __ret;
48207 __ret = (uint64x1_t)(__p0 > __p1);
48208 return __ret;
48209 }
48210 #else
48211 __ai uint64x1_t vcgt_f64(float64x1_t __p0, float64x1_t __p1) {
48212 uint64x1_t __ret;
48213 __ret = (uint64x1_t)(__p0 > __p1);
48214 return __ret;
48215 }
48216 #endif
48217
48218 #ifdef __LITTLE_ENDIAN__
48219 __ai uint64x1_t vcgt_s64(int64x1_t __p0, int64x1_t __p1) {
48220 uint64x1_t __ret;
48221 __ret = (uint64x1_t)(__p0 > __p1);
48222 return __ret;
48223 }
48224 #else
48225 __ai uint64x1_t vcgt_s64(int64x1_t __p0, int64x1_t __p1) {
48226 uint64x1_t __ret;
48227 __ret = (uint64x1_t)(__p0 > __p1);
48228 return __ret;
48229 }
48230 #endif
48231
48232 #ifdef __LITTLE_ENDIAN__
48233 __ai int64_t vcgtd_s64(int64_t __p0, int64_t __p1) {
48234 int64_t __ret;
48235 __ret = (int64_t) __builtin_neon_vcgtd_s64(__p0, __p1);
48236 return __ret;
48237 }
48238 #else
48239 __ai int64_t vcgtd_s64(int64_t __p0, int64_t __p1) {
48240 int64_t __ret;
48241 __ret = (int64_t) __builtin_neon_vcgtd_s64(__p0, __p1);
48242 return __ret;
48243 }
48244 #endif
48245
48246 #ifdef __LITTLE_ENDIAN__
48247 __ai uint64_t vcgtd_u64(uint64_t __p0, uint64_t __p1) {
48248 uint64_t __ret;
48249 __ret = (uint64_t) __builtin_neon_vcgtd_u64(__p0, __p1);
48250 return __ret;
48251 }
48252 #else
48253 __ai uint64_t vcgtd_u64(uint64_t __p0, uint64_t __p1) {
48254 uint64_t __ret;
48255 __ret = (uint64_t) __builtin_neon_vcgtd_u64(__p0, __p1);
48256 return __ret;
48257 }
48258 #endif
48259
48260 #ifdef __LITTLE_ENDIAN__
48261 __ai uint64_t vcgtd_f64(float64_t __p0, float64_t __p1) {
48262 uint64_t __ret;
48263 __ret = (uint64_t) __builtin_neon_vcgtd_f64(__p0, __p1);
48264 return __ret;
48265 }
48266 #else
48267 __ai uint64_t vcgtd_f64(float64_t __p0, float64_t __p1) {
48268 uint64_t __ret;
48269 __ret = (uint64_t) __builtin_neon_vcgtd_f64(__p0, __p1);
48270 return __ret;
48271 }
48272 #endif
48273
48274 #ifdef __LITTLE_ENDIAN__
48275 __ai uint32_t vcgts_f32(float32_t __p0, float32_t __p1) {
48276 uint32_t __ret;
48277 __ret = (uint32_t) __builtin_neon_vcgts_f32(__p0, __p1);
48278 return __ret;
48279 }
48280 #else
48281 __ai uint32_t vcgts_f32(float32_t __p0, float32_t __p1) {
48282 uint32_t __ret;
48283 __ret = (uint32_t) __builtin_neon_vcgts_f32(__p0, __p1);
48284 return __ret;
48285 }
48286 #endif
48287
48288 #ifdef __LITTLE_ENDIAN__
48289 __ai uint8x16_t vcgtzq_s8(int8x16_t __p0) {
48290 uint8x16_t __ret;
48291 __ret = (uint8x16_t) __builtin_neon_vcgtzq_v((int8x16_t)__p0, 48);
48292 return __ret;
48293 }
48294 #else
48295 __ai uint8x16_t vcgtzq_s8(int8x16_t __p0) {
48296 int8x16_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
48297 uint8x16_t __ret;
48298 __ret = (uint8x16_t) __builtin_neon_vcgtzq_v((int8x16_t)__rev0, 48);
48299 __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
48300 return __ret;
48301 }
48302 #endif
48303
48304 #ifdef __LITTLE_ENDIAN__
48305 __ai uint64x2_t vcgtzq_f64(float64x2_t __p0) {
48306 uint64x2_t __ret;
48307 __ret = (uint64x2_t) __builtin_neon_vcgtzq_v((int8x16_t)__p0, 51);
48308 return __ret;
48309 }
48310 #else
48311 __ai uint64x2_t vcgtzq_f64(float64x2_t __p0) {
48312 float64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
48313 uint64x2_t __ret;
48314 __ret = (uint64x2_t) __builtin_neon_vcgtzq_v((int8x16_t)__rev0, 51);
48315 __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
48316 return __ret;
48317 }
48318 #endif
48319
48320 #ifdef __LITTLE_ENDIAN__
48321 __ai uint32x4_t vcgtzq_f32(float32x4_t __p0) {
48322 uint32x4_t __ret;
48323 __ret = (uint32x4_t) __builtin_neon_vcgtzq_v((int8x16_t)__p0, 50);
48324 return __ret;
48325 }
48326 #else
48327 __ai uint32x4_t vcgtzq_f32(float32x4_t __p0) {
48328 float32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
48329 uint32x4_t __ret;
48330 __ret = (uint32x4_t) __builtin_neon_vcgtzq_v((int8x16_t)__rev0, 50);
48331 __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
48332 return __ret;
48333 }
48334 #endif
48335
48336 #ifdef __LITTLE_ENDIAN__
48337 __ai uint32x4_t vcgtzq_s32(int32x4_t __p0) {
48338 uint32x4_t __ret;
48339 __ret = (uint32x4_t) __builtin_neon_vcgtzq_v((int8x16_t)__p0, 50);
48340 return __ret;
48341 }
48342 #else
48343 __ai uint32x4_t vcgtzq_s32(int32x4_t __p0) {
48344 int32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
48345 uint32x4_t __ret;
48346 __ret = (uint32x4_t) __builtin_neon_vcgtzq_v((int8x16_t)__rev0, 50);
48347 __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
48348 return __ret;
48349 }
48350 #endif
48351
48352 #ifdef __LITTLE_ENDIAN__
48353 __ai uint64x2_t vcgtzq_s64(int64x2_t __p0) {
48354 uint64x2_t __ret;
48355 __ret = (uint64x2_t) __builtin_neon_vcgtzq_v((int8x16_t)__p0, 51);
48356 return __ret;
48357 }
48358 #else
48359 __ai uint64x2_t vcgtzq_s64(int64x2_t __p0) {
48360 int64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
48361 uint64x2_t __ret;
48362 __ret = (uint64x2_t) __builtin_neon_vcgtzq_v((int8x16_t)__rev0, 51);
48363 __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
48364 return __ret;
48365 }
48366 #endif
48367
48368 #ifdef __LITTLE_ENDIAN__
48369 __ai uint16x8_t vcgtzq_s16(int16x8_t __p0) {
48370 uint16x8_t __ret;
48371 __ret = (uint16x8_t) __builtin_neon_vcgtzq_v((int8x16_t)__p0, 49);
48372 return __ret;
48373 }
48374 #else
48375 __ai uint16x8_t vcgtzq_s16(int16x8_t __p0) {
48376 int16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
48377 uint16x8_t __ret;
48378 __ret = (uint16x8_t) __builtin_neon_vcgtzq_v((int8x16_t)__rev0, 49);
48379 __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
48380 return __ret;
48381 }
48382 #endif
48383
48384 #ifdef __LITTLE_ENDIAN__
48385 __ai uint8x8_t vcgtz_s8(int8x8_t __p0) {
48386 uint8x8_t __ret;
48387 __ret = (uint8x8_t) __builtin_neon_vcgtz_v((int8x8_t)__p0, 16);
48388 return __ret;
48389 }
48390 #else
48391 __ai uint8x8_t vcgtz_s8(int8x8_t __p0) {
48392 int8x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
48393 uint8x8_t __ret;
48394 __ret = (uint8x8_t) __builtin_neon_vcgtz_v((int8x8_t)__rev0, 16);
48395 __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
48396 return __ret;
48397 }
48398 #endif
48399
48400 #ifdef __LITTLE_ENDIAN__
48401 __ai uint64x1_t vcgtz_f64(float64x1_t __p0) {
48402 uint64x1_t __ret;
48403 __ret = (uint64x1_t) __builtin_neon_vcgtz_v((int8x8_t)__p0, 19);
48404 return __ret;
48405 }
48406 #else
48407 __ai uint64x1_t vcgtz_f64(float64x1_t __p0) {
48408 uint64x1_t __ret;
48409 __ret = (uint64x1_t) __builtin_neon_vcgtz_v((int8x8_t)__p0, 19);
48410 return __ret;
48411 }
48412 #endif
48413
48414 #ifdef __LITTLE_ENDIAN__
48415 __ai uint32x2_t vcgtz_f32(float32x2_t __p0) {
48416 uint32x2_t __ret;
48417 __ret = (uint32x2_t) __builtin_neon_vcgtz_v((int8x8_t)__p0, 18);
48418 return __ret;
48419 }
48420 #else
48421 __ai uint32x2_t vcgtz_f32(float32x2_t __p0) {
48422 float32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
48423 uint32x2_t __ret;
48424 __ret = (uint32x2_t) __builtin_neon_vcgtz_v((int8x8_t)__rev0, 18);
48425 __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
48426 return __ret;
48427 }
48428 #endif
48429
48430 #ifdef __LITTLE_ENDIAN__
48431 __ai uint32x2_t vcgtz_s32(int32x2_t __p0) {
48432 uint32x2_t __ret;
48433 __ret = (uint32x2_t) __builtin_neon_vcgtz_v((int8x8_t)__p0, 18);
48434 return __ret;
48435 }
48436 #else
48437 __ai uint32x2_t vcgtz_s32(int32x2_t __p0) {
48438 int32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
48439 uint32x2_t __ret;
48440 __ret = (uint32x2_t) __builtin_neon_vcgtz_v((int8x8_t)__rev0, 18);
48441 __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
48442 return __ret;
48443 }
48444 #endif
48445
48446 #ifdef __LITTLE_ENDIAN__
48447 __ai uint64x1_t vcgtz_s64(int64x1_t __p0) {
48448 uint64x1_t __ret;
48449 __ret = (uint64x1_t) __builtin_neon_vcgtz_v((int8x8_t)__p0, 19);
48450 return __ret;
48451 }
48452 #else
48453 __ai uint64x1_t vcgtz_s64(int64x1_t __p0) {
48454 uint64x1_t __ret;
48455 __ret = (uint64x1_t) __builtin_neon_vcgtz_v((int8x8_t)__p0, 19);
48456 return __ret;
48457 }
48458 #endif
48459
48460 #ifdef __LITTLE_ENDIAN__
48461 __ai uint16x4_t vcgtz_s16(int16x4_t __p0) {
48462 uint16x4_t __ret;
48463 __ret = (uint16x4_t) __builtin_neon_vcgtz_v((int8x8_t)__p0, 17);
48464 return __ret;
48465 }
48466 #else
48467 __ai uint16x4_t vcgtz_s16(int16x4_t __p0) {
48468 int16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
48469 uint16x4_t __ret;
48470 __ret = (uint16x4_t) __builtin_neon_vcgtz_v((int8x8_t)__rev0, 17);
48471 __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
48472 return __ret;
48473 }
48474 #endif
48475
48476 #ifdef __LITTLE_ENDIAN__
48477 __ai int64_t vcgtzd_s64(int64_t __p0) {
48478 int64_t __ret;
48479 __ret = (int64_t) __builtin_neon_vcgtzd_s64(__p0);
48480 return __ret;
48481 }
48482 #else
48483 __ai int64_t vcgtzd_s64(int64_t __p0) {
48484 int64_t __ret;
48485 __ret = (int64_t) __builtin_neon_vcgtzd_s64(__p0);
48486 return __ret;
48487 }
48488 #endif
48489
48490 #ifdef __LITTLE_ENDIAN__
48491 __ai uint64_t vcgtzd_f64(float64_t __p0) {
48492 uint64_t __ret;
48493 __ret = (uint64_t) __builtin_neon_vcgtzd_f64(__p0);
48494 return __ret;
48495 }
48496 #else
48497 __ai uint64_t vcgtzd_f64(float64_t __p0) {
48498 uint64_t __ret;
48499 __ret = (uint64_t) __builtin_neon_vcgtzd_f64(__p0);
48500 return __ret;
48501 }
48502 #endif
48503
48504 #ifdef __LITTLE_ENDIAN__
48505 __ai uint32_t vcgtzs_f32(float32_t __p0) {
48506 uint32_t __ret;
48507 __ret = (uint32_t) __builtin_neon_vcgtzs_f32(__p0);
48508 return __ret;
48509 }
48510 #else
48511 __ai uint32_t vcgtzs_f32(float32_t __p0) {
48512 uint32_t __ret;
48513 __ret = (uint32_t) __builtin_neon_vcgtzs_f32(__p0);
48514 return __ret;
48515 }
48516 #endif
48517
48518 #ifdef __LITTLE_ENDIAN__
48519 __ai uint64x2_t vcleq_u64(uint64x2_t __p0, uint64x2_t __p1) {
48520 uint64x2_t __ret;
48521 __ret = (uint64x2_t)(__p0 <= __p1);
48522 return __ret;
48523 }
48524 #else
48525 __ai uint64x2_t vcleq_u64(uint64x2_t __p0, uint64x2_t __p1) {
48526 uint64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
48527 uint64x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
48528 uint64x2_t __ret;
48529 __ret = (uint64x2_t)(__rev0 <= __rev1);
48530 __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
48531 return __ret;
48532 }
48533 #endif
48534
48535 #ifdef __LITTLE_ENDIAN__
48536 __ai uint64x2_t vcleq_f64(float64x2_t __p0, float64x2_t __p1) {
48537 uint64x2_t __ret;
48538 __ret = (uint64x2_t)(__p0 <= __p1);
48539 return __ret;
48540 }
48541 #else
48542 __ai uint64x2_t vcleq_f64(float64x2_t __p0, float64x2_t __p1) {
48543 float64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
48544 float64x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
48545 uint64x2_t __ret;
48546 __ret = (uint64x2_t)(__rev0 <= __rev1);
48547 __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
48548 return __ret;
48549 }
48550 #endif
48551
48552 #ifdef __LITTLE_ENDIAN__
48553 __ai uint64x2_t vcleq_s64(int64x2_t __p0, int64x2_t __p1) {
48554 uint64x2_t __ret;
48555 __ret = (uint64x2_t)(__p0 <= __p1);
48556 return __ret;
48557 }
48558 #else
48559 __ai uint64x2_t vcleq_s64(int64x2_t __p0, int64x2_t __p1) {
48560 int64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
48561 int64x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
48562 uint64x2_t __ret;
48563 __ret = (uint64x2_t)(__rev0 <= __rev1);
48564 __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
48565 return __ret;
48566 }
48567 #endif
48568
48569 #ifdef __LITTLE_ENDIAN__
48570 __ai uint64x1_t vcle_u64(uint64x1_t __p0, uint64x1_t __p1) {
48571 uint64x1_t __ret;
48572 __ret = (uint64x1_t)(__p0 <= __p1);
48573 return __ret;
48574 }
48575 #else
48576 __ai uint64x1_t vcle_u64(uint64x1_t __p0, uint64x1_t __p1) {
48577 uint64x1_t __ret;
48578 __ret = (uint64x1_t)(__p0 <= __p1);
48579 return __ret;
48580 }
48581 #endif
48582
48583 #ifdef __LITTLE_ENDIAN__
48584 __ai uint64x1_t vcle_f64(float64x1_t __p0, float64x1_t __p1) {
48585 uint64x1_t __ret;
48586 __ret = (uint64x1_t)(__p0 <= __p1);
48587 return __ret;
48588 }
48589 #else
48590 __ai uint64x1_t vcle_f64(float64x1_t __p0, float64x1_t __p1) {
48591 uint64x1_t __ret;
48592 __ret = (uint64x1_t)(__p0 <= __p1);
48593 return __ret;
48594 }
48595 #endif
48596
48597 #ifdef __LITTLE_ENDIAN__
48598 __ai uint64x1_t vcle_s64(int64x1_t __p0, int64x1_t __p1) {
48599 uint64x1_t __ret;
48600 __ret = (uint64x1_t)(__p0 <= __p1);
48601 return __ret;
48602 }
48603 #else
48604 __ai uint64x1_t vcle_s64(int64x1_t __p0, int64x1_t __p1) {
48605 uint64x1_t __ret;
48606 __ret = (uint64x1_t)(__p0 <= __p1);
48607 return __ret;
48608 }
48609 #endif
48610
48611 #ifdef __LITTLE_ENDIAN__
48612 __ai uint64_t vcled_u64(uint64_t __p0, uint64_t __p1) {
48613 uint64_t __ret;
48614 __ret = (uint64_t) __builtin_neon_vcled_u64(__p0, __p1);
48615 return __ret;
48616 }
48617 #else
48618 __ai uint64_t vcled_u64(uint64_t __p0, uint64_t __p1) {
48619 uint64_t __ret;
48620 __ret = (uint64_t) __builtin_neon_vcled_u64(__p0, __p1);
48621 return __ret;
48622 }
48623 #endif
48624
48625 #ifdef __LITTLE_ENDIAN__
48626 __ai int64_t vcled_s64(int64_t __p0, int64_t __p1) {
48627 int64_t __ret;
48628 __ret = (int64_t) __builtin_neon_vcled_s64(__p0, __p1);
48629 return __ret;
48630 }
48631 #else
48632 __ai int64_t vcled_s64(int64_t __p0, int64_t __p1) {
48633 int64_t __ret;
48634 __ret = (int64_t) __builtin_neon_vcled_s64(__p0, __p1);
48635 return __ret;
48636 }
48637 #endif
48638
48639 #ifdef __LITTLE_ENDIAN__
48640 __ai uint64_t vcled_f64(float64_t __p0, float64_t __p1) {
48641 uint64_t __ret;
48642 __ret = (uint64_t) __builtin_neon_vcled_f64(__p0, __p1);
48643 return __ret;
48644 }
48645 #else
48646 __ai uint64_t vcled_f64(float64_t __p0, float64_t __p1) {
48647 uint64_t __ret;
48648 __ret = (uint64_t) __builtin_neon_vcled_f64(__p0, __p1);
48649 return __ret;
48650 }
48651 #endif
48652
48653 #ifdef __LITTLE_ENDIAN__
48654 __ai uint32_t vcles_f32(float32_t __p0, float32_t __p1) {
48655 uint32_t __ret;
48656 __ret = (uint32_t) __builtin_neon_vcles_f32(__p0, __p1);
48657 return __ret;
48658 }
48659 #else
48660 __ai uint32_t vcles_f32(float32_t __p0, float32_t __p1) {
48661 uint32_t __ret;
48662 __ret = (uint32_t) __builtin_neon_vcles_f32(__p0, __p1);
48663 return __ret;
48664 }
48665 #endif
48666
48667 #ifdef __LITTLE_ENDIAN__
48668 __ai uint8x16_t vclezq_s8(int8x16_t __p0) {
48669 uint8x16_t __ret;
48670 __ret = (uint8x16_t) __builtin_neon_vclezq_v((int8x16_t)__p0, 48);
48671 return __ret;
48672 }
48673 #else
48674 __ai uint8x16_t vclezq_s8(int8x16_t __p0) {
48675 int8x16_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
48676 uint8x16_t __ret;
48677 __ret = (uint8x16_t) __builtin_neon_vclezq_v((int8x16_t)__rev0, 48);
48678 __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
48679 return __ret;
48680 }
48681 #endif
48682
48683 #ifdef __LITTLE_ENDIAN__
48684 __ai uint64x2_t vclezq_f64(float64x2_t __p0) {
48685 uint64x2_t __ret;
48686 __ret = (uint64x2_t) __builtin_neon_vclezq_v((int8x16_t)__p0, 51);
48687 return __ret;
48688 }
48689 #else
48690 __ai uint64x2_t vclezq_f64(float64x2_t __p0) {
48691 float64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
48692 uint64x2_t __ret;
48693 __ret = (uint64x2_t) __builtin_neon_vclezq_v((int8x16_t)__rev0, 51);
48694 __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
48695 return __ret;
48696 }
48697 #endif
48698
48699 #ifdef __LITTLE_ENDIAN__
48700 __ai uint32x4_t vclezq_f32(float32x4_t __p0) {
48701 uint32x4_t __ret;
48702 __ret = (uint32x4_t) __builtin_neon_vclezq_v((int8x16_t)__p0, 50);
48703 return __ret;
48704 }
48705 #else
48706 __ai uint32x4_t vclezq_f32(float32x4_t __p0) {
48707 float32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
48708 uint32x4_t __ret;
48709 __ret = (uint32x4_t) __builtin_neon_vclezq_v((int8x16_t)__rev0, 50);
48710 __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
48711 return __ret;
48712 }
48713 #endif
48714
48715 #ifdef __LITTLE_ENDIAN__
48716 __ai uint32x4_t vclezq_s32(int32x4_t __p0) {
48717 uint32x4_t __ret;
48718 __ret = (uint32x4_t) __builtin_neon_vclezq_v((int8x16_t)__p0, 50);
48719 return __ret;
48720 }
48721 #else
48722 __ai uint32x4_t vclezq_s32(int32x4_t __p0) {
48723 int32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
48724 uint32x4_t __ret;
48725 __ret = (uint32x4_t) __builtin_neon_vclezq_v((int8x16_t)__rev0, 50);
48726 __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
48727 return __ret;
48728 }
48729 #endif
48730
48731 #ifdef __LITTLE_ENDIAN__
48732 __ai uint64x2_t vclezq_s64(int64x2_t __p0) {
48733 uint64x2_t __ret;
48734 __ret = (uint64x2_t) __builtin_neon_vclezq_v((int8x16_t)__p0, 51);
48735 return __ret;
48736 }
48737 #else
48738 __ai uint64x2_t vclezq_s64(int64x2_t __p0) {
48739 int64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
48740 uint64x2_t __ret;
48741 __ret = (uint64x2_t) __builtin_neon_vclezq_v((int8x16_t)__rev0, 51);
48742 __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
48743 return __ret;
48744 }
48745 #endif
48746
48747 #ifdef __LITTLE_ENDIAN__
48748 __ai uint16x8_t vclezq_s16(int16x8_t __p0) {
48749 uint16x8_t __ret;
48750 __ret = (uint16x8_t) __builtin_neon_vclezq_v((int8x16_t)__p0, 49);
48751 return __ret;
48752 }
48753 #else
48754 __ai uint16x8_t vclezq_s16(int16x8_t __p0) {
48755 int16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
48756 uint16x8_t __ret;
48757 __ret = (uint16x8_t) __builtin_neon_vclezq_v((int8x16_t)__rev0, 49);
48758 __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
48759 return __ret;
48760 }
48761 #endif
48762
48763 #ifdef __LITTLE_ENDIAN__
48764 __ai uint8x8_t vclez_s8(int8x8_t __p0) {
48765 uint8x8_t __ret;
48766 __ret = (uint8x8_t) __builtin_neon_vclez_v((int8x8_t)__p0, 16);
48767 return __ret;
48768 }
48769 #else
48770 __ai uint8x8_t vclez_s8(int8x8_t __p0) {
48771 int8x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
48772 uint8x8_t __ret;
48773 __ret = (uint8x8_t) __builtin_neon_vclez_v((int8x8_t)__rev0, 16);
48774 __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
48775 return __ret;
48776 }
48777 #endif
48778
48779 #ifdef __LITTLE_ENDIAN__
48780 __ai uint64x1_t vclez_f64(float64x1_t __p0) {
48781 uint64x1_t __ret;
48782 __ret = (uint64x1_t) __builtin_neon_vclez_v((int8x8_t)__p0, 19);
48783 return __ret;
48784 }
48785 #else
48786 __ai uint64x1_t vclez_f64(float64x1_t __p0) {
48787 uint64x1_t __ret;
48788 __ret = (uint64x1_t) __builtin_neon_vclez_v((int8x8_t)__p0, 19);
48789 return __ret;
48790 }
48791 #endif
48792
48793 #ifdef __LITTLE_ENDIAN__
48794 __ai uint32x2_t vclez_f32(float32x2_t __p0) {
48795 uint32x2_t __ret;
48796 __ret = (uint32x2_t) __builtin_neon_vclez_v((int8x8_t)__p0, 18);
48797 return __ret;
48798 }
48799 #else
48800 __ai uint32x2_t vclez_f32(float32x2_t __p0) {
48801 float32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
48802 uint32x2_t __ret;
48803 __ret = (uint32x2_t) __builtin_neon_vclez_v((int8x8_t)__rev0, 18);
48804 __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
48805 return __ret;
48806 }
48807 #endif
48808
48809 #ifdef __LITTLE_ENDIAN__
48810 __ai uint32x2_t vclez_s32(int32x2_t __p0) {
48811 uint32x2_t __ret;
48812 __ret = (uint32x2_t) __builtin_neon_vclez_v((int8x8_t)__p0, 18);
48813 return __ret;
48814 }
48815 #else
48816 __ai uint32x2_t vclez_s32(int32x2_t __p0) {
48817 int32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
48818 uint32x2_t __ret;
48819 __ret = (uint32x2_t) __builtin_neon_vclez_v((int8x8_t)__rev0, 18);
48820 __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
48821 return __ret;
48822 }
48823 #endif
48824
48825 #ifdef __LITTLE_ENDIAN__
48826 __ai uint64x1_t vclez_s64(int64x1_t __p0) {
48827 uint64x1_t __ret;
48828 __ret = (uint64x1_t) __builtin_neon_vclez_v((int8x8_t)__p0, 19);
48829 return __ret;
48830 }
48831 #else
48832 __ai uint64x1_t vclez_s64(int64x1_t __p0) {
48833 uint64x1_t __ret;
48834 __ret = (uint64x1_t) __builtin_neon_vclez_v((int8x8_t)__p0, 19);
48835 return __ret;
48836 }
48837 #endif
48838
48839 #ifdef __LITTLE_ENDIAN__
48840 __ai uint16x4_t vclez_s16(int16x4_t __p0) {
48841 uint16x4_t __ret;
48842 __ret = (uint16x4_t) __builtin_neon_vclez_v((int8x8_t)__p0, 17);
48843 return __ret;
48844 }
48845 #else
48846 __ai uint16x4_t vclez_s16(int16x4_t __p0) {
48847 int16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
48848 uint16x4_t __ret;
48849 __ret = (uint16x4_t) __builtin_neon_vclez_v((int8x8_t)__rev0, 17);
48850 __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
48851 return __ret;
48852 }
48853 #endif
48854
48855 #ifdef __LITTLE_ENDIAN__
48856 __ai int64_t vclezd_s64(int64_t __p0) {
48857 int64_t __ret;
48858 __ret = (int64_t) __builtin_neon_vclezd_s64(__p0);
48859 return __ret;
48860 }
48861 #else
48862 __ai int64_t vclezd_s64(int64_t __p0) {
48863 int64_t __ret;
48864 __ret = (int64_t) __builtin_neon_vclezd_s64(__p0);
48865 return __ret;
48866 }
48867 #endif
48868
48869 #ifdef __LITTLE_ENDIAN__
48870 __ai uint64_t vclezd_f64(float64_t __p0) {
48871 uint64_t __ret;
48872 __ret = (uint64_t) __builtin_neon_vclezd_f64(__p0);
48873 return __ret;
48874 }
48875 #else
48876 __ai uint64_t vclezd_f64(float64_t __p0) {
48877 uint64_t __ret;
48878 __ret = (uint64_t) __builtin_neon_vclezd_f64(__p0);
48879 return __ret;
48880 }
48881 #endif
48882
48883 #ifdef __LITTLE_ENDIAN__
48884 __ai uint32_t vclezs_f32(float32_t __p0) {
48885 uint32_t __ret;
48886 __ret = (uint32_t) __builtin_neon_vclezs_f32(__p0);
48887 return __ret;
48888 }
48889 #else
48890 __ai uint32_t vclezs_f32(float32_t __p0) {
48891 uint32_t __ret;
48892 __ret = (uint32_t) __builtin_neon_vclezs_f32(__p0);
48893 return __ret;
48894 }
48895 #endif
48896
48897 #ifdef __LITTLE_ENDIAN__
48898 __ai uint64x2_t vcltq_u64(uint64x2_t __p0, uint64x2_t __p1) {
48899 uint64x2_t __ret;
48900 __ret = (uint64x2_t)(__p0 < __p1);
48901 return __ret;
48902 }
48903 #else
48904 __ai uint64x2_t vcltq_u64(uint64x2_t __p0, uint64x2_t __p1) {
48905 uint64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
48906 uint64x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
48907 uint64x2_t __ret;
48908 __ret = (uint64x2_t)(__rev0 < __rev1);
48909 __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
48910 return __ret;
48911 }
48912 #endif
48913
48914 #ifdef __LITTLE_ENDIAN__
48915 __ai uint64x2_t vcltq_f64(float64x2_t __p0, float64x2_t __p1) {
48916 uint64x2_t __ret;
48917 __ret = (uint64x2_t)(__p0 < __p1);
48918 return __ret;
48919 }
48920 #else
48921 __ai uint64x2_t vcltq_f64(float64x2_t __p0, float64x2_t __p1) {
48922 float64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
48923 float64x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
48924 uint64x2_t __ret;
48925 __ret = (uint64x2_t)(__rev0 < __rev1);
48926 __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
48927 return __ret;
48928 }
48929 #endif
48930
48931 #ifdef __LITTLE_ENDIAN__
48932 __ai uint64x2_t vcltq_s64(int64x2_t __p0, int64x2_t __p1) {
48933 uint64x2_t __ret;
48934 __ret = (uint64x2_t)(__p0 < __p1);
48935 return __ret;
48936 }
48937 #else
48938 __ai uint64x2_t vcltq_s64(int64x2_t __p0, int64x2_t __p1) {
48939 int64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
48940 int64x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
48941 uint64x2_t __ret;
48942 __ret = (uint64x2_t)(__rev0 < __rev1);
48943 __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
48944 return __ret;
48945 }
48946 #endif
48947
48948 #ifdef __LITTLE_ENDIAN__
48949 __ai uint64x1_t vclt_u64(uint64x1_t __p0, uint64x1_t __p1) {
48950 uint64x1_t __ret;
48951 __ret = (uint64x1_t)(__p0 < __p1);
48952 return __ret;
48953 }
48954 #else
48955 __ai uint64x1_t vclt_u64(uint64x1_t __p0, uint64x1_t __p1) {
48956 uint64x1_t __ret;
48957 __ret = (uint64x1_t)(__p0 < __p1);
48958 return __ret;
48959 }
48960 #endif
48961
48962 #ifdef __LITTLE_ENDIAN__
48963 __ai uint64x1_t vclt_f64(float64x1_t __p0, float64x1_t __p1) {
48964 uint64x1_t __ret;
48965 __ret = (uint64x1_t)(__p0 < __p1);
48966 return __ret;
48967 }
48968 #else
48969 __ai uint64x1_t vclt_f64(float64x1_t __p0, float64x1_t __p1) {
48970 uint64x1_t __ret;
48971 __ret = (uint64x1_t)(__p0 < __p1);
48972 return __ret;
48973 }
48974 #endif
48975
48976 #ifdef __LITTLE_ENDIAN__
48977 __ai uint64x1_t vclt_s64(int64x1_t __p0, int64x1_t __p1) {
48978 uint64x1_t __ret;
48979 __ret = (uint64x1_t)(__p0 < __p1);
48980 return __ret;
48981 }
48982 #else
48983 __ai uint64x1_t vclt_s64(int64x1_t __p0, int64x1_t __p1) {
48984 uint64x1_t __ret;
48985 __ret = (uint64x1_t)(__p0 < __p1);
48986 return __ret;
48987 }
48988 #endif
48989
48990 #ifdef __LITTLE_ENDIAN__
48991 __ai uint64_t vcltd_u64(uint64_t __p0, uint64_t __p1) {
48992 uint64_t __ret;
48993 __ret = (uint64_t) __builtin_neon_vcltd_u64(__p0, __p1);
48994 return __ret;
48995 }
48996 #else
48997 __ai uint64_t vcltd_u64(uint64_t __p0, uint64_t __p1) {
48998 uint64_t __ret;
48999 __ret = (uint64_t) __builtin_neon_vcltd_u64(__p0, __p1);
49000 return __ret;
49001 }
49002 #endif
49003
49004 #ifdef __LITTLE_ENDIAN__
49005 __ai int64_t vcltd_s64(int64_t __p0, int64_t __p1) {
49006 int64_t __ret;
49007 __ret = (int64_t) __builtin_neon_vcltd_s64(__p0, __p1);
49008 return __ret;
49009 }
49010 #else
49011 __ai int64_t vcltd_s64(int64_t __p0, int64_t __p1) {
49012 int64_t __ret;
49013 __ret = (int64_t) __builtin_neon_vcltd_s64(__p0, __p1);
49014 return __ret;
49015 }
49016 #endif
49017
49018 #ifdef __LITTLE_ENDIAN__
49019 __ai uint64_t vcltd_f64(float64_t __p0, float64_t __p1) {
49020 uint64_t __ret;
49021 __ret = (uint64_t) __builtin_neon_vcltd_f64(__p0, __p1);
49022 return __ret;
49023 }
49024 #else
49025 __ai uint64_t vcltd_f64(float64_t __p0, float64_t __p1) {
49026 uint64_t __ret;
49027 __ret = (uint64_t) __builtin_neon_vcltd_f64(__p0, __p1);
49028 return __ret;
49029 }
49030 #endif
49031
49032 #ifdef __LITTLE_ENDIAN__
49033 __ai uint32_t vclts_f32(float32_t __p0, float32_t __p1) {
49034 uint32_t __ret;
49035 __ret = (uint32_t) __builtin_neon_vclts_f32(__p0, __p1);
49036 return __ret;
49037 }
49038 #else
49039 __ai uint32_t vclts_f32(float32_t __p0, float32_t __p1) {
49040 uint32_t __ret;
49041 __ret = (uint32_t) __builtin_neon_vclts_f32(__p0, __p1);
49042 return __ret;
49043 }
49044 #endif
49045
49046 #ifdef __LITTLE_ENDIAN__
49047 __ai uint8x16_t vcltzq_s8(int8x16_t __p0) {
49048 uint8x16_t __ret;
49049 __ret = (uint8x16_t) __builtin_neon_vcltzq_v((int8x16_t)__p0, 48);
49050 return __ret;
49051 }
49052 #else
49053 __ai uint8x16_t vcltzq_s8(int8x16_t __p0) {
49054 int8x16_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
49055 uint8x16_t __ret;
49056 __ret = (uint8x16_t) __builtin_neon_vcltzq_v((int8x16_t)__rev0, 48);
49057 __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
49058 return __ret;
49059 }
49060 #endif
49061
49062 #ifdef __LITTLE_ENDIAN__
49063 __ai uint64x2_t vcltzq_f64(float64x2_t __p0) {
49064 uint64x2_t __ret;
49065 __ret = (uint64x2_t) __builtin_neon_vcltzq_v((int8x16_t)__p0, 51);
49066 return __ret;
49067 }
49068 #else
49069 __ai uint64x2_t vcltzq_f64(float64x2_t __p0) {
49070 float64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
49071 uint64x2_t __ret;
49072 __ret = (uint64x2_t) __builtin_neon_vcltzq_v((int8x16_t)__rev0, 51);
49073 __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
49074 return __ret;
49075 }
49076 #endif
49077
49078 #ifdef __LITTLE_ENDIAN__
49079 __ai uint32x4_t vcltzq_f32(float32x4_t __p0) {
49080 uint32x4_t __ret;
49081 __ret = (uint32x4_t) __builtin_neon_vcltzq_v((int8x16_t)__p0, 50);
49082 return __ret;
49083 }
49084 #else
49085 __ai uint32x4_t vcltzq_f32(float32x4_t __p0) {
49086 float32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
49087 uint32x4_t __ret;
49088 __ret = (uint32x4_t) __builtin_neon_vcltzq_v((int8x16_t)__rev0, 50);
49089 __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
49090 return __ret;
49091 }
49092 #endif
49093
49094 #ifdef __LITTLE_ENDIAN__
49095 __ai uint32x4_t vcltzq_s32(int32x4_t __p0) {
49096 uint32x4_t __ret;
49097 __ret = (uint32x4_t) __builtin_neon_vcltzq_v((int8x16_t)__p0, 50);
49098 return __ret;
49099 }
49100 #else
49101 __ai uint32x4_t vcltzq_s32(int32x4_t __p0) {
49102 int32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
49103 uint32x4_t __ret;
49104 __ret = (uint32x4_t) __builtin_neon_vcltzq_v((int8x16_t)__rev0, 50);
49105 __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
49106 return __ret;
49107 }
49108 #endif
49109
49110 #ifdef __LITTLE_ENDIAN__
49111 __ai uint64x2_t vcltzq_s64(int64x2_t __p0) {
49112 uint64x2_t __ret;
49113 __ret = (uint64x2_t) __builtin_neon_vcltzq_v((int8x16_t)__p0, 51);
49114 return __ret;
49115 }
49116 #else
49117 __ai uint64x2_t vcltzq_s64(int64x2_t __p0) {
49118 int64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
49119 uint64x2_t __ret;
49120 __ret = (uint64x2_t) __builtin_neon_vcltzq_v((int8x16_t)__rev0, 51);
49121 __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
49122 return __ret;
49123 }
49124 #endif
49125
49126 #ifdef __LITTLE_ENDIAN__
49127 __ai uint16x8_t vcltzq_s16(int16x8_t __p0) {
49128 uint16x8_t __ret;
49129 __ret = (uint16x8_t) __builtin_neon_vcltzq_v((int8x16_t)__p0, 49);
49130 return __ret;
49131 }
49132 #else
49133 __ai uint16x8_t vcltzq_s16(int16x8_t __p0) {
49134 int16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
49135 uint16x8_t __ret;
49136 __ret = (uint16x8_t) __builtin_neon_vcltzq_v((int8x16_t)__rev0, 49);
49137 __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
49138 return __ret;
49139 }
49140 #endif
49141
49142 #ifdef __LITTLE_ENDIAN__
49143 __ai uint8x8_t vcltz_s8(int8x8_t __p0) {
49144 uint8x8_t __ret;
49145 __ret = (uint8x8_t) __builtin_neon_vcltz_v((int8x8_t)__p0, 16);
49146 return __ret;
49147 }
49148 #else
49149 __ai uint8x8_t vcltz_s8(int8x8_t __p0) {
49150 int8x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
49151 uint8x8_t __ret;
49152 __ret = (uint8x8_t) __builtin_neon_vcltz_v((int8x8_t)__rev0, 16);
49153 __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
49154 return __ret;
49155 }
49156 #endif
49157
49158 #ifdef __LITTLE_ENDIAN__
49159 __ai uint64x1_t vcltz_f64(float64x1_t __p0) {
49160 uint64x1_t __ret;
49161 __ret = (uint64x1_t) __builtin_neon_vcltz_v((int8x8_t)__p0, 19);
49162 return __ret;
49163 }
49164 #else
49165 __ai uint64x1_t vcltz_f64(float64x1_t __p0) {
49166 uint64x1_t __ret;
49167 __ret = (uint64x1_t) __builtin_neon_vcltz_v((int8x8_t)__p0, 19);
49168 return __ret;
49169 }
49170 #endif
49171
49172 #ifdef __LITTLE_ENDIAN__
49173 __ai uint32x2_t vcltz_f32(float32x2_t __p0) {
49174 uint32x2_t __ret;
49175 __ret = (uint32x2_t) __builtin_neon_vcltz_v((int8x8_t)__p0, 18);
49176 return __ret;
49177 }
49178 #else
49179 __ai uint32x2_t vcltz_f32(float32x2_t __p0) {
49180 float32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
49181 uint32x2_t __ret;
49182 __ret = (uint32x2_t) __builtin_neon_vcltz_v((int8x8_t)__rev0, 18);
49183 __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
49184 return __ret;
49185 }
49186 #endif
49187
49188 #ifdef __LITTLE_ENDIAN__
49189 __ai uint32x2_t vcltz_s32(int32x2_t __p0) {
49190 uint32x2_t __ret;
49191 __ret = (uint32x2_t) __builtin_neon_vcltz_v((int8x8_t)__p0, 18);
49192 return __ret;
49193 }
49194 #else
49195 __ai uint32x2_t vcltz_s32(int32x2_t __p0) {
49196 int32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
49197 uint32x2_t __ret;
49198 __ret = (uint32x2_t) __builtin_neon_vcltz_v((int8x8_t)__rev0, 18);
49199 __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
49200 return __ret;
49201 }
49202 #endif
49203
49204 #ifdef __LITTLE_ENDIAN__
49205 __ai uint64x1_t vcltz_s64(int64x1_t __p0) {
49206 uint64x1_t __ret;
49207 __ret = (uint64x1_t) __builtin_neon_vcltz_v((int8x8_t)__p0, 19);
49208 return __ret;
49209 }
49210 #else
49211 __ai uint64x1_t vcltz_s64(int64x1_t __p0) {
49212 uint64x1_t __ret;
49213 __ret = (uint64x1_t) __builtin_neon_vcltz_v((int8x8_t)__p0, 19);
49214 return __ret;
49215 }
49216 #endif
49217
49218 #ifdef __LITTLE_ENDIAN__
49219 __ai uint16x4_t vcltz_s16(int16x4_t __p0) {
49220 uint16x4_t __ret;
49221 __ret = (uint16x4_t) __builtin_neon_vcltz_v((int8x8_t)__p0, 17);
49222 return __ret;
49223 }
49224 #else
49225 __ai uint16x4_t vcltz_s16(int16x4_t __p0) {
49226 int16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
49227 uint16x4_t __ret;
49228 __ret = (uint16x4_t) __builtin_neon_vcltz_v((int8x8_t)__rev0, 17);
49229 __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
49230 return __ret;
49231 }
49232 #endif
49233
49234 #ifdef __LITTLE_ENDIAN__
49235 __ai int64_t vcltzd_s64(int64_t __p0) {
49236 int64_t __ret;
49237 __ret = (int64_t) __builtin_neon_vcltzd_s64(__p0);
49238 return __ret;
49239 }
49240 #else
49241 __ai int64_t vcltzd_s64(int64_t __p0) {
49242 int64_t __ret;
49243 __ret = (int64_t) __builtin_neon_vcltzd_s64(__p0);
49244 return __ret;
49245 }
49246 #endif
49247
49248 #ifdef __LITTLE_ENDIAN__
49249 __ai uint64_t vcltzd_f64(float64_t __p0) {
49250 uint64_t __ret;
49251 __ret = (uint64_t) __builtin_neon_vcltzd_f64(__p0);
49252 return __ret;
49253 }
49254 #else
49255 __ai uint64_t vcltzd_f64(float64_t __p0) {
49256 uint64_t __ret;
49257 __ret = (uint64_t) __builtin_neon_vcltzd_f64(__p0);
49258 return __ret;
49259 }
49260 #endif
49261
49262 #ifdef __LITTLE_ENDIAN__
49263 __ai uint32_t vcltzs_f32(float32_t __p0) {
49264 uint32_t __ret;
49265 __ret = (uint32_t) __builtin_neon_vcltzs_f32(__p0);
49266 return __ret;
49267 }
49268 #else
49269 __ai uint32_t vcltzs_f32(float32_t __p0) {
49270 uint32_t __ret;
49271 __ret = (uint32_t) __builtin_neon_vcltzs_f32(__p0);
49272 return __ret;
49273 }
49274 #endif
49275
49276 #ifdef __LITTLE_ENDIAN__
49277 __ai poly64x2_t vcombine_p64(poly64x1_t __p0, poly64x1_t __p1) {
49278 poly64x2_t __ret;
49279 __ret = __builtin_shufflevector(__p0, __p1, 0, 1);
49280 return __ret;
49281 }
49282 #else
49283 __ai poly64x2_t vcombine_p64(poly64x1_t __p0, poly64x1_t __p1) {
49284 poly64x2_t __ret;
49285 __ret = __builtin_shufflevector(__p0, __p1, 0, 1);
49286 __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
49287 return __ret;
49288 }
49289 #endif
49290
49291 #ifdef __LITTLE_ENDIAN__
49292 __ai float64x2_t vcombine_f64(float64x1_t __p0, float64x1_t __p1) {
49293 float64x2_t __ret;
49294 __ret = __builtin_shufflevector(__p0, __p1, 0, 1);
49295 return __ret;
49296 }
49297 #else
49298 __ai float64x2_t vcombine_f64(float64x1_t __p0, float64x1_t __p1) {
49299 float64x2_t __ret;
49300 __ret = __builtin_shufflevector(__p0, __p1, 0, 1);
49301 __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
49302 return __ret;
49303 }
49304 #endif
49305
49306 #ifdef __LITTLE_ENDIAN__
49307 #define vcopyq_lane_p8(__p0_16, __p1_16, __p2_16, __p3_16) __extension__ ({ \
49308 poly8x16_t __s0_16 = __p0_16; \
49309 poly8x8_t __s2_16 = __p2_16; \
49310 poly8x16_t __ret_16; \
49311 __ret_16 = vsetq_lane_p8(vget_lane_p8(__s2_16, __p3_16), __s0_16, __p1_16); \
49312 __ret_16; \
49313 })
49314 #else
49315 #define vcopyq_lane_p8(__p0_17, __p1_17, __p2_17, __p3_17) __extension__ ({ \
49316 poly8x16_t __s0_17 = __p0_17; \
49317 poly8x8_t __s2_17 = __p2_17; \
49318 poly8x16_t __rev0_17; __rev0_17 = __builtin_shufflevector(__s0_17, __s0_17, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
49319 poly8x8_t __rev2_17; __rev2_17 = __builtin_shufflevector(__s2_17, __s2_17, 7, 6, 5, 4, 3, 2, 1, 0); \
49320 poly8x16_t __ret_17; \
49321 __ret_17 = __noswap_vsetq_lane_p8(__noswap_vget_lane_p8(__rev2_17, __p3_17), __rev0_17, __p1_17); \
49322 __ret_17 = __builtin_shufflevector(__ret_17, __ret_17, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
49323 __ret_17; \
49324 })
49325 #endif
49326
49327 #ifdef __LITTLE_ENDIAN__
49328 #define vcopyq_lane_p16(__p0_18, __p1_18, __p2_18, __p3_18) __extension__ ({ \
49329 poly16x8_t __s0_18 = __p0_18; \
49330 poly16x4_t __s2_18 = __p2_18; \
49331 poly16x8_t __ret_18; \
49332 __ret_18 = vsetq_lane_p16(vget_lane_p16(__s2_18, __p3_18), __s0_18, __p1_18); \
49333 __ret_18; \
49334 })
49335 #else
49336 #define vcopyq_lane_p16(__p0_19, __p1_19, __p2_19, __p3_19) __extension__ ({ \
49337 poly16x8_t __s0_19 = __p0_19; \
49338 poly16x4_t __s2_19 = __p2_19; \
49339 poly16x8_t __rev0_19; __rev0_19 = __builtin_shufflevector(__s0_19, __s0_19, 7, 6, 5, 4, 3, 2, 1, 0); \
49340 poly16x4_t __rev2_19; __rev2_19 = __builtin_shufflevector(__s2_19, __s2_19, 3, 2, 1, 0); \
49341 poly16x8_t __ret_19; \
49342 __ret_19 = __noswap_vsetq_lane_p16(__noswap_vget_lane_p16(__rev2_19, __p3_19), __rev0_19, __p1_19); \
49343 __ret_19 = __builtin_shufflevector(__ret_19, __ret_19, 7, 6, 5, 4, 3, 2, 1, 0); \
49344 __ret_19; \
49345 })
49346 #endif
49347
49348 #ifdef __LITTLE_ENDIAN__
49349 #define vcopyq_lane_u8(__p0_20, __p1_20, __p2_20, __p3_20) __extension__ ({ \
49350 uint8x16_t __s0_20 = __p0_20; \
49351 uint8x8_t __s2_20 = __p2_20; \
49352 uint8x16_t __ret_20; \
49353 __ret_20 = vsetq_lane_u8(vget_lane_u8(__s2_20, __p3_20), __s0_20, __p1_20); \
49354 __ret_20; \
49355 })
49356 #else
49357 #define vcopyq_lane_u8(__p0_21, __p1_21, __p2_21, __p3_21) __extension__ ({ \
49358 uint8x16_t __s0_21 = __p0_21; \
49359 uint8x8_t __s2_21 = __p2_21; \
49360 uint8x16_t __rev0_21; __rev0_21 = __builtin_shufflevector(__s0_21, __s0_21, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
49361 uint8x8_t __rev2_21; __rev2_21 = __builtin_shufflevector(__s2_21, __s2_21, 7, 6, 5, 4, 3, 2, 1, 0); \
49362 uint8x16_t __ret_21; \
49363 __ret_21 = __noswap_vsetq_lane_u8(__noswap_vget_lane_u8(__rev2_21, __p3_21), __rev0_21, __p1_21); \
49364 __ret_21 = __builtin_shufflevector(__ret_21, __ret_21, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
49365 __ret_21; \
49366 })
49367 #endif
49368
49369 #ifdef __LITTLE_ENDIAN__
49370 #define vcopyq_lane_u32(__p0_22, __p1_22, __p2_22, __p3_22) __extension__ ({ \
49371 uint32x4_t __s0_22 = __p0_22; \
49372 uint32x2_t __s2_22 = __p2_22; \
49373 uint32x4_t __ret_22; \
49374 __ret_22 = vsetq_lane_u32(vget_lane_u32(__s2_22, __p3_22), __s0_22, __p1_22); \
49375 __ret_22; \
49376 })
49377 #else
49378 #define vcopyq_lane_u32(__p0_23, __p1_23, __p2_23, __p3_23) __extension__ ({ \
49379 uint32x4_t __s0_23 = __p0_23; \
49380 uint32x2_t __s2_23 = __p2_23; \
49381 uint32x4_t __rev0_23; __rev0_23 = __builtin_shufflevector(__s0_23, __s0_23, 3, 2, 1, 0); \
49382 uint32x2_t __rev2_23; __rev2_23 = __builtin_shufflevector(__s2_23, __s2_23, 1, 0); \
49383 uint32x4_t __ret_23; \
49384 __ret_23 = __noswap_vsetq_lane_u32(__noswap_vget_lane_u32(__rev2_23, __p3_23), __rev0_23, __p1_23); \
49385 __ret_23 = __builtin_shufflevector(__ret_23, __ret_23, 3, 2, 1, 0); \
49386 __ret_23; \
49387 })
49388 #endif
49389
49390 #ifdef __LITTLE_ENDIAN__
49391 #define vcopyq_lane_u64(__p0_24, __p1_24, __p2_24, __p3_24) __extension__ ({ \
49392 uint64x2_t __s0_24 = __p0_24; \
49393 uint64x1_t __s2_24 = __p2_24; \
49394 uint64x2_t __ret_24; \
49395 __ret_24 = vsetq_lane_u64(vget_lane_u64(__s2_24, __p3_24), __s0_24, __p1_24); \
49396 __ret_24; \
49397 })
49398 #else
49399 #define vcopyq_lane_u64(__p0_25, __p1_25, __p2_25, __p3_25) __extension__ ({ \
49400 uint64x2_t __s0_25 = __p0_25; \
49401 uint64x1_t __s2_25 = __p2_25; \
49402 uint64x2_t __rev0_25; __rev0_25 = __builtin_shufflevector(__s0_25, __s0_25, 1, 0); \
49403 uint64x2_t __ret_25; \
49404 __ret_25 = __noswap_vsetq_lane_u64(__noswap_vget_lane_u64(__s2_25, __p3_25), __rev0_25, __p1_25); \
49405 __ret_25 = __builtin_shufflevector(__ret_25, __ret_25, 1, 0); \
49406 __ret_25; \
49407 })
49408 #endif
49409
49410 #ifdef __LITTLE_ENDIAN__
49411 #define vcopyq_lane_u16(__p0_26, __p1_26, __p2_26, __p3_26) __extension__ ({ \
49412 uint16x8_t __s0_26 = __p0_26; \
49413 uint16x4_t __s2_26 = __p2_26; \
49414 uint16x8_t __ret_26; \
49415 __ret_26 = vsetq_lane_u16(vget_lane_u16(__s2_26, __p3_26), __s0_26, __p1_26); \
49416 __ret_26; \
49417 })
49418 #else
49419 #define vcopyq_lane_u16(__p0_27, __p1_27, __p2_27, __p3_27) __extension__ ({ \
49420 uint16x8_t __s0_27 = __p0_27; \
49421 uint16x4_t __s2_27 = __p2_27; \
49422 uint16x8_t __rev0_27; __rev0_27 = __builtin_shufflevector(__s0_27, __s0_27, 7, 6, 5, 4, 3, 2, 1, 0); \
49423 uint16x4_t __rev2_27; __rev2_27 = __builtin_shufflevector(__s2_27, __s2_27, 3, 2, 1, 0); \
49424 uint16x8_t __ret_27; \
49425 __ret_27 = __noswap_vsetq_lane_u16(__noswap_vget_lane_u16(__rev2_27, __p3_27), __rev0_27, __p1_27); \
49426 __ret_27 = __builtin_shufflevector(__ret_27, __ret_27, 7, 6, 5, 4, 3, 2, 1, 0); \
49427 __ret_27; \
49428 })
49429 #endif
49430
49431 #ifdef __LITTLE_ENDIAN__
49432 #define vcopyq_lane_s8(__p0_28, __p1_28, __p2_28, __p3_28) __extension__ ({ \
49433 int8x16_t __s0_28 = __p0_28; \
49434 int8x8_t __s2_28 = __p2_28; \
49435 int8x16_t __ret_28; \
49436 __ret_28 = vsetq_lane_s8(vget_lane_s8(__s2_28, __p3_28), __s0_28, __p1_28); \
49437 __ret_28; \
49438 })
49439 #else
49440 #define vcopyq_lane_s8(__p0_29, __p1_29, __p2_29, __p3_29) __extension__ ({ \
49441 int8x16_t __s0_29 = __p0_29; \
49442 int8x8_t __s2_29 = __p2_29; \
49443 int8x16_t __rev0_29; __rev0_29 = __builtin_shufflevector(__s0_29, __s0_29, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
49444 int8x8_t __rev2_29; __rev2_29 = __builtin_shufflevector(__s2_29, __s2_29, 7, 6, 5, 4, 3, 2, 1, 0); \
49445 int8x16_t __ret_29; \
49446 __ret_29 = __noswap_vsetq_lane_s8(__noswap_vget_lane_s8(__rev2_29, __p3_29), __rev0_29, __p1_29); \
49447 __ret_29 = __builtin_shufflevector(__ret_29, __ret_29, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
49448 __ret_29; \
49449 })
49450 #endif
49451
49452 #ifdef __LITTLE_ENDIAN__
49453 #define vcopyq_lane_f32(__p0_30, __p1_30, __p2_30, __p3_30) __extension__ ({ \
49454 float32x4_t __s0_30 = __p0_30; \
49455 float32x2_t __s2_30 = __p2_30; \
49456 float32x4_t __ret_30; \
49457 __ret_30 = vsetq_lane_f32(vget_lane_f32(__s2_30, __p3_30), __s0_30, __p1_30); \
49458 __ret_30; \
49459 })
49460 #else
49461 #define vcopyq_lane_f32(__p0_31, __p1_31, __p2_31, __p3_31) __extension__ ({ \
49462 float32x4_t __s0_31 = __p0_31; \
49463 float32x2_t __s2_31 = __p2_31; \
49464 float32x4_t __rev0_31; __rev0_31 = __builtin_shufflevector(__s0_31, __s0_31, 3, 2, 1, 0); \
49465 float32x2_t __rev2_31; __rev2_31 = __builtin_shufflevector(__s2_31, __s2_31, 1, 0); \
49466 float32x4_t __ret_31; \
49467 __ret_31 = __noswap_vsetq_lane_f32(__noswap_vget_lane_f32(__rev2_31, __p3_31), __rev0_31, __p1_31); \
49468 __ret_31 = __builtin_shufflevector(__ret_31, __ret_31, 3, 2, 1, 0); \
49469 __ret_31; \
49470 })
49471 #endif
49472
49473 #ifdef __LITTLE_ENDIAN__
49474 #define vcopyq_lane_s32(__p0_32, __p1_32, __p2_32, __p3_32) __extension__ ({ \
49475 int32x4_t __s0_32 = __p0_32; \
49476 int32x2_t __s2_32 = __p2_32; \
49477 int32x4_t __ret_32; \
49478 __ret_32 = vsetq_lane_s32(vget_lane_s32(__s2_32, __p3_32), __s0_32, __p1_32); \
49479 __ret_32; \
49480 })
49481 #else
49482 #define vcopyq_lane_s32(__p0_33, __p1_33, __p2_33, __p3_33) __extension__ ({ \
49483 int32x4_t __s0_33 = __p0_33; \
49484 int32x2_t __s2_33 = __p2_33; \
49485 int32x4_t __rev0_33; __rev0_33 = __builtin_shufflevector(__s0_33, __s0_33, 3, 2, 1, 0); \
49486 int32x2_t __rev2_33; __rev2_33 = __builtin_shufflevector(__s2_33, __s2_33, 1, 0); \
49487 int32x4_t __ret_33; \
49488 __ret_33 = __noswap_vsetq_lane_s32(__noswap_vget_lane_s32(__rev2_33, __p3_33), __rev0_33, __p1_33); \
49489 __ret_33 = __builtin_shufflevector(__ret_33, __ret_33, 3, 2, 1, 0); \
49490 __ret_33; \
49491 })
49492 #endif
49493
49494 #ifdef __LITTLE_ENDIAN__
49495 #define vcopyq_lane_s64(__p0_34, __p1_34, __p2_34, __p3_34) __extension__ ({ \
49496 int64x2_t __s0_34 = __p0_34; \
49497 int64x1_t __s2_34 = __p2_34; \
49498 int64x2_t __ret_34; \
49499 __ret_34 = vsetq_lane_s64(vget_lane_s64(__s2_34, __p3_34), __s0_34, __p1_34); \
49500 __ret_34; \
49501 })
49502 #else
49503 #define vcopyq_lane_s64(__p0_35, __p1_35, __p2_35, __p3_35) __extension__ ({ \
49504 int64x2_t __s0_35 = __p0_35; \
49505 int64x1_t __s2_35 = __p2_35; \
49506 int64x2_t __rev0_35; __rev0_35 = __builtin_shufflevector(__s0_35, __s0_35, 1, 0); \
49507 int64x2_t __ret_35; \
49508 __ret_35 = __noswap_vsetq_lane_s64(__noswap_vget_lane_s64(__s2_35, __p3_35), __rev0_35, __p1_35); \
49509 __ret_35 = __builtin_shufflevector(__ret_35, __ret_35, 1, 0); \
49510 __ret_35; \
49511 })
49512 #endif
49513
49514 #ifdef __LITTLE_ENDIAN__
49515 #define vcopyq_lane_s16(__p0_36, __p1_36, __p2_36, __p3_36) __extension__ ({ \
49516 int16x8_t __s0_36 = __p0_36; \
49517 int16x4_t __s2_36 = __p2_36; \
49518 int16x8_t __ret_36; \
49519 __ret_36 = vsetq_lane_s16(vget_lane_s16(__s2_36, __p3_36), __s0_36, __p1_36); \
49520 __ret_36; \
49521 })
49522 #else
49523 #define vcopyq_lane_s16(__p0_37, __p1_37, __p2_37, __p3_37) __extension__ ({ \
49524 int16x8_t __s0_37 = __p0_37; \
49525 int16x4_t __s2_37 = __p2_37; \
49526 int16x8_t __rev0_37; __rev0_37 = __builtin_shufflevector(__s0_37, __s0_37, 7, 6, 5, 4, 3, 2, 1, 0); \
49527 int16x4_t __rev2_37; __rev2_37 = __builtin_shufflevector(__s2_37, __s2_37, 3, 2, 1, 0); \
49528 int16x8_t __ret_37; \
49529 __ret_37 = __noswap_vsetq_lane_s16(__noswap_vget_lane_s16(__rev2_37, __p3_37), __rev0_37, __p1_37); \
49530 __ret_37 = __builtin_shufflevector(__ret_37, __ret_37, 7, 6, 5, 4, 3, 2, 1, 0); \
49531 __ret_37; \
49532 })
49533 #endif
49534
49535 #ifdef __LITTLE_ENDIAN__
49536 #define vcopy_lane_p8(__p0_38, __p1_38, __p2_38, __p3_38) __extension__ ({ \
49537 poly8x8_t __s0_38 = __p0_38; \
49538 poly8x8_t __s2_38 = __p2_38; \
49539 poly8x8_t __ret_38; \
49540 __ret_38 = vset_lane_p8(vget_lane_p8(__s2_38, __p3_38), __s0_38, __p1_38); \
49541 __ret_38; \
49542 })
49543 #else
49544 #define vcopy_lane_p8(__p0_39, __p1_39, __p2_39, __p3_39) __extension__ ({ \
49545 poly8x8_t __s0_39 = __p0_39; \
49546 poly8x8_t __s2_39 = __p2_39; \
49547 poly8x8_t __rev0_39; __rev0_39 = __builtin_shufflevector(__s0_39, __s0_39, 7, 6, 5, 4, 3, 2, 1, 0); \
49548 poly8x8_t __rev2_39; __rev2_39 = __builtin_shufflevector(__s2_39, __s2_39, 7, 6, 5, 4, 3, 2, 1, 0); \
49549 poly8x8_t __ret_39; \
49550 __ret_39 = __noswap_vset_lane_p8(__noswap_vget_lane_p8(__rev2_39, __p3_39), __rev0_39, __p1_39); \
49551 __ret_39 = __builtin_shufflevector(__ret_39, __ret_39, 7, 6, 5, 4, 3, 2, 1, 0); \
49552 __ret_39; \
49553 })
49554 #endif
49555
49556 #ifdef __LITTLE_ENDIAN__
49557 #define vcopy_lane_p16(__p0_40, __p1_40, __p2_40, __p3_40) __extension__ ({ \
49558 poly16x4_t __s0_40 = __p0_40; \
49559 poly16x4_t __s2_40 = __p2_40; \
49560 poly16x4_t __ret_40; \
49561 __ret_40 = vset_lane_p16(vget_lane_p16(__s2_40, __p3_40), __s0_40, __p1_40); \
49562 __ret_40; \
49563 })
49564 #else
49565 #define vcopy_lane_p16(__p0_41, __p1_41, __p2_41, __p3_41) __extension__ ({ \
49566 poly16x4_t __s0_41 = __p0_41; \
49567 poly16x4_t __s2_41 = __p2_41; \
49568 poly16x4_t __rev0_41; __rev0_41 = __builtin_shufflevector(__s0_41, __s0_41, 3, 2, 1, 0); \
49569 poly16x4_t __rev2_41; __rev2_41 = __builtin_shufflevector(__s2_41, __s2_41, 3, 2, 1, 0); \
49570 poly16x4_t __ret_41; \
49571 __ret_41 = __noswap_vset_lane_p16(__noswap_vget_lane_p16(__rev2_41, __p3_41), __rev0_41, __p1_41); \
49572 __ret_41 = __builtin_shufflevector(__ret_41, __ret_41, 3, 2, 1, 0); \
49573 __ret_41; \
49574 })
49575 #endif
49576
49577 #ifdef __LITTLE_ENDIAN__
49578 #define vcopy_lane_u8(__p0_42, __p1_42, __p2_42, __p3_42) __extension__ ({ \
49579 uint8x8_t __s0_42 = __p0_42; \
49580 uint8x8_t __s2_42 = __p2_42; \
49581 uint8x8_t __ret_42; \
49582 __ret_42 = vset_lane_u8(vget_lane_u8(__s2_42, __p3_42), __s0_42, __p1_42); \
49583 __ret_42; \
49584 })
49585 #else
49586 #define vcopy_lane_u8(__p0_43, __p1_43, __p2_43, __p3_43) __extension__ ({ \
49587 uint8x8_t __s0_43 = __p0_43; \
49588 uint8x8_t __s2_43 = __p2_43; \
49589 uint8x8_t __rev0_43; __rev0_43 = __builtin_shufflevector(__s0_43, __s0_43, 7, 6, 5, 4, 3, 2, 1, 0); \
49590 uint8x8_t __rev2_43; __rev2_43 = __builtin_shufflevector(__s2_43, __s2_43, 7, 6, 5, 4, 3, 2, 1, 0); \
49591 uint8x8_t __ret_43; \
49592 __ret_43 = __noswap_vset_lane_u8(__noswap_vget_lane_u8(__rev2_43, __p3_43), __rev0_43, __p1_43); \
49593 __ret_43 = __builtin_shufflevector(__ret_43, __ret_43, 7, 6, 5, 4, 3, 2, 1, 0); \
49594 __ret_43; \
49595 })
49596 #endif
49597
49598 #ifdef __LITTLE_ENDIAN__
49599 #define vcopy_lane_u32(__p0_44, __p1_44, __p2_44, __p3_44) __extension__ ({ \
49600 uint32x2_t __s0_44 = __p0_44; \
49601 uint32x2_t __s2_44 = __p2_44; \
49602 uint32x2_t __ret_44; \
49603 __ret_44 = vset_lane_u32(vget_lane_u32(__s2_44, __p3_44), __s0_44, __p1_44); \
49604 __ret_44; \
49605 })
49606 #else
49607 #define vcopy_lane_u32(__p0_45, __p1_45, __p2_45, __p3_45) __extension__ ({ \
49608 uint32x2_t __s0_45 = __p0_45; \
49609 uint32x2_t __s2_45 = __p2_45; \
49610 uint32x2_t __rev0_45; __rev0_45 = __builtin_shufflevector(__s0_45, __s0_45, 1, 0); \
49611 uint32x2_t __rev2_45; __rev2_45 = __builtin_shufflevector(__s2_45, __s2_45, 1, 0); \
49612 uint32x2_t __ret_45; \
49613 __ret_45 = __noswap_vset_lane_u32(__noswap_vget_lane_u32(__rev2_45, __p3_45), __rev0_45, __p1_45); \
49614 __ret_45 = __builtin_shufflevector(__ret_45, __ret_45, 1, 0); \
49615 __ret_45; \
49616 })
49617 #endif
49618
49619 #ifdef __LITTLE_ENDIAN__
49620 #define vcopy_lane_u64(__p0_46, __p1_46, __p2_46, __p3_46) __extension__ ({ \
49621 uint64x1_t __s0_46 = __p0_46; \
49622 uint64x1_t __s2_46 = __p2_46; \
49623 uint64x1_t __ret_46; \
49624 __ret_46 = vset_lane_u64(vget_lane_u64(__s2_46, __p3_46), __s0_46, __p1_46); \
49625 __ret_46; \
49626 })
49627 #else
49628 #define vcopy_lane_u64(__p0_47, __p1_47, __p2_47, __p3_47) __extension__ ({ \
49629 uint64x1_t __s0_47 = __p0_47; \
49630 uint64x1_t __s2_47 = __p2_47; \
49631 uint64x1_t __ret_47; \
49632 __ret_47 = __noswap_vset_lane_u64(__noswap_vget_lane_u64(__s2_47, __p3_47), __s0_47, __p1_47); \
49633 __ret_47; \
49634 })
49635 #endif
49636
49637 #ifdef __LITTLE_ENDIAN__
49638 #define vcopy_lane_u16(__p0_48, __p1_48, __p2_48, __p3_48) __extension__ ({ \
49639 uint16x4_t __s0_48 = __p0_48; \
49640 uint16x4_t __s2_48 = __p2_48; \
49641 uint16x4_t __ret_48; \
49642 __ret_48 = vset_lane_u16(vget_lane_u16(__s2_48, __p3_48), __s0_48, __p1_48); \
49643 __ret_48; \
49644 })
49645 #else
49646 #define vcopy_lane_u16(__p0_49, __p1_49, __p2_49, __p3_49) __extension__ ({ \
49647 uint16x4_t __s0_49 = __p0_49; \
49648 uint16x4_t __s2_49 = __p2_49; \
49649 uint16x4_t __rev0_49; __rev0_49 = __builtin_shufflevector(__s0_49, __s0_49, 3, 2, 1, 0); \
49650 uint16x4_t __rev2_49; __rev2_49 = __builtin_shufflevector(__s2_49, __s2_49, 3, 2, 1, 0); \
49651 uint16x4_t __ret_49; \
49652 __ret_49 = __noswap_vset_lane_u16(__noswap_vget_lane_u16(__rev2_49, __p3_49), __rev0_49, __p1_49); \
49653 __ret_49 = __builtin_shufflevector(__ret_49, __ret_49, 3, 2, 1, 0); \
49654 __ret_49; \
49655 })
49656 #endif
49657
49658 #ifdef __LITTLE_ENDIAN__
49659 #define vcopy_lane_s8(__p0_50, __p1_50, __p2_50, __p3_50) __extension__ ({ \
49660 int8x8_t __s0_50 = __p0_50; \
49661 int8x8_t __s2_50 = __p2_50; \
49662 int8x8_t __ret_50; \
49663 __ret_50 = vset_lane_s8(vget_lane_s8(__s2_50, __p3_50), __s0_50, __p1_50); \
49664 __ret_50; \
49665 })
49666 #else
49667 #define vcopy_lane_s8(__p0_51, __p1_51, __p2_51, __p3_51) __extension__ ({ \
49668 int8x8_t __s0_51 = __p0_51; \
49669 int8x8_t __s2_51 = __p2_51; \
49670 int8x8_t __rev0_51; __rev0_51 = __builtin_shufflevector(__s0_51, __s0_51, 7, 6, 5, 4, 3, 2, 1, 0); \
49671 int8x8_t __rev2_51; __rev2_51 = __builtin_shufflevector(__s2_51, __s2_51, 7, 6, 5, 4, 3, 2, 1, 0); \
49672 int8x8_t __ret_51; \
49673 __ret_51 = __noswap_vset_lane_s8(__noswap_vget_lane_s8(__rev2_51, __p3_51), __rev0_51, __p1_51); \
49674 __ret_51 = __builtin_shufflevector(__ret_51, __ret_51, 7, 6, 5, 4, 3, 2, 1, 0); \
49675 __ret_51; \
49676 })
49677 #endif
49678
49679 #ifdef __LITTLE_ENDIAN__
49680 #define vcopy_lane_f32(__p0_52, __p1_52, __p2_52, __p3_52) __extension__ ({ \
49681 float32x2_t __s0_52 = __p0_52; \
49682 float32x2_t __s2_52 = __p2_52; \
49683 float32x2_t __ret_52; \
49684 __ret_52 = vset_lane_f32(vget_lane_f32(__s2_52, __p3_52), __s0_52, __p1_52); \
49685 __ret_52; \
49686 })
49687 #else
49688 #define vcopy_lane_f32(__p0_53, __p1_53, __p2_53, __p3_53) __extension__ ({ \
49689 float32x2_t __s0_53 = __p0_53; \
49690 float32x2_t __s2_53 = __p2_53; \
49691 float32x2_t __rev0_53; __rev0_53 = __builtin_shufflevector(__s0_53, __s0_53, 1, 0); \
49692 float32x2_t __rev2_53; __rev2_53 = __builtin_shufflevector(__s2_53, __s2_53, 1, 0); \
49693 float32x2_t __ret_53; \
49694 __ret_53 = __noswap_vset_lane_f32(__noswap_vget_lane_f32(__rev2_53, __p3_53), __rev0_53, __p1_53); \
49695 __ret_53 = __builtin_shufflevector(__ret_53, __ret_53, 1, 0); \
49696 __ret_53; \
49697 })
49698 #endif
49699
49700 #ifdef __LITTLE_ENDIAN__
49701 #define vcopy_lane_s32(__p0_54, __p1_54, __p2_54, __p3_54) __extension__ ({ \
49702 int32x2_t __s0_54 = __p0_54; \
49703 int32x2_t __s2_54 = __p2_54; \
49704 int32x2_t __ret_54; \
49705 __ret_54 = vset_lane_s32(vget_lane_s32(__s2_54, __p3_54), __s0_54, __p1_54); \
49706 __ret_54; \
49707 })
49708 #else
49709 #define vcopy_lane_s32(__p0_55, __p1_55, __p2_55, __p3_55) __extension__ ({ \
49710 int32x2_t __s0_55 = __p0_55; \
49711 int32x2_t __s2_55 = __p2_55; \
49712 int32x2_t __rev0_55; __rev0_55 = __builtin_shufflevector(__s0_55, __s0_55, 1, 0); \
49713 int32x2_t __rev2_55; __rev2_55 = __builtin_shufflevector(__s2_55, __s2_55, 1, 0); \
49714 int32x2_t __ret_55; \
49715 __ret_55 = __noswap_vset_lane_s32(__noswap_vget_lane_s32(__rev2_55, __p3_55), __rev0_55, __p1_55); \
49716 __ret_55 = __builtin_shufflevector(__ret_55, __ret_55, 1, 0); \
49717 __ret_55; \
49718 })
49719 #endif
49720
49721 #ifdef __LITTLE_ENDIAN__
49722 #define vcopy_lane_s64(__p0_56, __p1_56, __p2_56, __p3_56) __extension__ ({ \
49723 int64x1_t __s0_56 = __p0_56; \
49724 int64x1_t __s2_56 = __p2_56; \
49725 int64x1_t __ret_56; \
49726 __ret_56 = vset_lane_s64(vget_lane_s64(__s2_56, __p3_56), __s0_56, __p1_56); \
49727 __ret_56; \
49728 })
49729 #else
49730 #define vcopy_lane_s64(__p0_57, __p1_57, __p2_57, __p3_57) __extension__ ({ \
49731 int64x1_t __s0_57 = __p0_57; \
49732 int64x1_t __s2_57 = __p2_57; \
49733 int64x1_t __ret_57; \
49734 __ret_57 = __noswap_vset_lane_s64(__noswap_vget_lane_s64(__s2_57, __p3_57), __s0_57, __p1_57); \
49735 __ret_57; \
49736 })
49737 #endif
49738
49739 #ifdef __LITTLE_ENDIAN__
49740 #define vcopy_lane_s16(__p0_58, __p1_58, __p2_58, __p3_58) __extension__ ({ \
49741 int16x4_t __s0_58 = __p0_58; \
49742 int16x4_t __s2_58 = __p2_58; \
49743 int16x4_t __ret_58; \
49744 __ret_58 = vset_lane_s16(vget_lane_s16(__s2_58, __p3_58), __s0_58, __p1_58); \
49745 __ret_58; \
49746 })
49747 #else
49748 #define vcopy_lane_s16(__p0_59, __p1_59, __p2_59, __p3_59) __extension__ ({ \
49749 int16x4_t __s0_59 = __p0_59; \
49750 int16x4_t __s2_59 = __p2_59; \
49751 int16x4_t __rev0_59; __rev0_59 = __builtin_shufflevector(__s0_59, __s0_59, 3, 2, 1, 0); \
49752 int16x4_t __rev2_59; __rev2_59 = __builtin_shufflevector(__s2_59, __s2_59, 3, 2, 1, 0); \
49753 int16x4_t __ret_59; \
49754 __ret_59 = __noswap_vset_lane_s16(__noswap_vget_lane_s16(__rev2_59, __p3_59), __rev0_59, __p1_59); \
49755 __ret_59 = __builtin_shufflevector(__ret_59, __ret_59, 3, 2, 1, 0); \
49756 __ret_59; \
49757 })
49758 #endif
49759
49760 #ifdef __LITTLE_ENDIAN__
49761 #define vcopyq_laneq_p8(__p0_60, __p1_60, __p2_60, __p3_60) __extension__ ({ \
49762 poly8x16_t __s0_60 = __p0_60; \
49763 poly8x16_t __s2_60 = __p2_60; \
49764 poly8x16_t __ret_60; \
49765 __ret_60 = vsetq_lane_p8(vgetq_lane_p8(__s2_60, __p3_60), __s0_60, __p1_60); \
49766 __ret_60; \
49767 })
49768 #else
49769 #define vcopyq_laneq_p8(__p0_61, __p1_61, __p2_61, __p3_61) __extension__ ({ \
49770 poly8x16_t __s0_61 = __p0_61; \
49771 poly8x16_t __s2_61 = __p2_61; \
49772 poly8x16_t __rev0_61; __rev0_61 = __builtin_shufflevector(__s0_61, __s0_61, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
49773 poly8x16_t __rev2_61; __rev2_61 = __builtin_shufflevector(__s2_61, __s2_61, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
49774 poly8x16_t __ret_61; \
49775 __ret_61 = __noswap_vsetq_lane_p8(__noswap_vgetq_lane_p8(__rev2_61, __p3_61), __rev0_61, __p1_61); \
49776 __ret_61 = __builtin_shufflevector(__ret_61, __ret_61, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
49777 __ret_61; \
49778 })
49779 #endif
49780
49781 #ifdef __LITTLE_ENDIAN__
49782 #define vcopyq_laneq_p16(__p0_62, __p1_62, __p2_62, __p3_62) __extension__ ({ \
49783 poly16x8_t __s0_62 = __p0_62; \
49784 poly16x8_t __s2_62 = __p2_62; \
49785 poly16x8_t __ret_62; \
49786 __ret_62 = vsetq_lane_p16(vgetq_lane_p16(__s2_62, __p3_62), __s0_62, __p1_62); \
49787 __ret_62; \
49788 })
49789 #else
49790 #define vcopyq_laneq_p16(__p0_63, __p1_63, __p2_63, __p3_63) __extension__ ({ \
49791 poly16x8_t __s0_63 = __p0_63; \
49792 poly16x8_t __s2_63 = __p2_63; \
49793 poly16x8_t __rev0_63; __rev0_63 = __builtin_shufflevector(__s0_63, __s0_63, 7, 6, 5, 4, 3, 2, 1, 0); \
49794 poly16x8_t __rev2_63; __rev2_63 = __builtin_shufflevector(__s2_63, __s2_63, 7, 6, 5, 4, 3, 2, 1, 0); \
49795 poly16x8_t __ret_63; \
49796 __ret_63 = __noswap_vsetq_lane_p16(__noswap_vgetq_lane_p16(__rev2_63, __p3_63), __rev0_63, __p1_63); \
49797 __ret_63 = __builtin_shufflevector(__ret_63, __ret_63, 7, 6, 5, 4, 3, 2, 1, 0); \
49798 __ret_63; \
49799 })
49800 #endif
49801
49802 #ifdef __LITTLE_ENDIAN__
49803 #define vcopyq_laneq_u8(__p0_64, __p1_64, __p2_64, __p3_64) __extension__ ({ \
49804 uint8x16_t __s0_64 = __p0_64; \
49805 uint8x16_t __s2_64 = __p2_64; \
49806 uint8x16_t __ret_64; \
49807 __ret_64 = vsetq_lane_u8(vgetq_lane_u8(__s2_64, __p3_64), __s0_64, __p1_64); \
49808 __ret_64; \
49809 })
49810 #else
49811 #define vcopyq_laneq_u8(__p0_65, __p1_65, __p2_65, __p3_65) __extension__ ({ \
49812 uint8x16_t __s0_65 = __p0_65; \
49813 uint8x16_t __s2_65 = __p2_65; \
49814 uint8x16_t __rev0_65; __rev0_65 = __builtin_shufflevector(__s0_65, __s0_65, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
49815 uint8x16_t __rev2_65; __rev2_65 = __builtin_shufflevector(__s2_65, __s2_65, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
49816 uint8x16_t __ret_65; \
49817 __ret_65 = __noswap_vsetq_lane_u8(__noswap_vgetq_lane_u8(__rev2_65, __p3_65), __rev0_65, __p1_65); \
49818 __ret_65 = __builtin_shufflevector(__ret_65, __ret_65, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
49819 __ret_65; \
49820 })
49821 #endif
49822
49823 #ifdef __LITTLE_ENDIAN__
49824 #define vcopyq_laneq_u32(__p0_66, __p1_66, __p2_66, __p3_66) __extension__ ({ \
49825 uint32x4_t __s0_66 = __p0_66; \
49826 uint32x4_t __s2_66 = __p2_66; \
49827 uint32x4_t __ret_66; \
49828 __ret_66 = vsetq_lane_u32(vgetq_lane_u32(__s2_66, __p3_66), __s0_66, __p1_66); \
49829 __ret_66; \
49830 })
49831 #else
49832 #define vcopyq_laneq_u32(__p0_67, __p1_67, __p2_67, __p3_67) __extension__ ({ \
49833 uint32x4_t __s0_67 = __p0_67; \
49834 uint32x4_t __s2_67 = __p2_67; \
49835 uint32x4_t __rev0_67; __rev0_67 = __builtin_shufflevector(__s0_67, __s0_67, 3, 2, 1, 0); \
49836 uint32x4_t __rev2_67; __rev2_67 = __builtin_shufflevector(__s2_67, __s2_67, 3, 2, 1, 0); \
49837 uint32x4_t __ret_67; \
49838 __ret_67 = __noswap_vsetq_lane_u32(__noswap_vgetq_lane_u32(__rev2_67, __p3_67), __rev0_67, __p1_67); \
49839 __ret_67 = __builtin_shufflevector(__ret_67, __ret_67, 3, 2, 1, 0); \
49840 __ret_67; \
49841 })
49842 #endif
49843
49844 #ifdef __LITTLE_ENDIAN__
49845 #define vcopyq_laneq_u64(__p0_68, __p1_68, __p2_68, __p3_68) __extension__ ({ \
49846 uint64x2_t __s0_68 = __p0_68; \
49847 uint64x2_t __s2_68 = __p2_68; \
49848 uint64x2_t __ret_68; \
49849 __ret_68 = vsetq_lane_u64(vgetq_lane_u64(__s2_68, __p3_68), __s0_68, __p1_68); \
49850 __ret_68; \
49851 })
49852 #else
49853 #define vcopyq_laneq_u64(__p0_69, __p1_69, __p2_69, __p3_69) __extension__ ({ \
49854 uint64x2_t __s0_69 = __p0_69; \
49855 uint64x2_t __s2_69 = __p2_69; \
49856 uint64x2_t __rev0_69; __rev0_69 = __builtin_shufflevector(__s0_69, __s0_69, 1, 0); \
49857 uint64x2_t __rev2_69; __rev2_69 = __builtin_shufflevector(__s2_69, __s2_69, 1, 0); \
49858 uint64x2_t __ret_69; \
49859 __ret_69 = __noswap_vsetq_lane_u64(__noswap_vgetq_lane_u64(__rev2_69, __p3_69), __rev0_69, __p1_69); \
49860 __ret_69 = __builtin_shufflevector(__ret_69, __ret_69, 1, 0); \
49861 __ret_69; \
49862 })
49863 #endif
49864
49865 #ifdef __LITTLE_ENDIAN__
49866 #define vcopyq_laneq_u16(__p0_70, __p1_70, __p2_70, __p3_70) __extension__ ({ \
49867 uint16x8_t __s0_70 = __p0_70; \
49868 uint16x8_t __s2_70 = __p2_70; \
49869 uint16x8_t __ret_70; \
49870 __ret_70 = vsetq_lane_u16(vgetq_lane_u16(__s2_70, __p3_70), __s0_70, __p1_70); \
49871 __ret_70; \
49872 })
49873 #else
49874 #define vcopyq_laneq_u16(__p0_71, __p1_71, __p2_71, __p3_71) __extension__ ({ \
49875 uint16x8_t __s0_71 = __p0_71; \
49876 uint16x8_t __s2_71 = __p2_71; \
49877 uint16x8_t __rev0_71; __rev0_71 = __builtin_shufflevector(__s0_71, __s0_71, 7, 6, 5, 4, 3, 2, 1, 0); \
49878 uint16x8_t __rev2_71; __rev2_71 = __builtin_shufflevector(__s2_71, __s2_71, 7, 6, 5, 4, 3, 2, 1, 0); \
49879 uint16x8_t __ret_71; \
49880 __ret_71 = __noswap_vsetq_lane_u16(__noswap_vgetq_lane_u16(__rev2_71, __p3_71), __rev0_71, __p1_71); \
49881 __ret_71 = __builtin_shufflevector(__ret_71, __ret_71, 7, 6, 5, 4, 3, 2, 1, 0); \
49882 __ret_71; \
49883 })
49884 #endif
49885
49886 #ifdef __LITTLE_ENDIAN__
49887 #define vcopyq_laneq_s8(__p0_72, __p1_72, __p2_72, __p3_72) __extension__ ({ \
49888 int8x16_t __s0_72 = __p0_72; \
49889 int8x16_t __s2_72 = __p2_72; \
49890 int8x16_t __ret_72; \
49891 __ret_72 = vsetq_lane_s8(vgetq_lane_s8(__s2_72, __p3_72), __s0_72, __p1_72); \
49892 __ret_72; \
49893 })
49894 #else
49895 #define vcopyq_laneq_s8(__p0_73, __p1_73, __p2_73, __p3_73) __extension__ ({ \
49896 int8x16_t __s0_73 = __p0_73; \
49897 int8x16_t __s2_73 = __p2_73; \
49898 int8x16_t __rev0_73; __rev0_73 = __builtin_shufflevector(__s0_73, __s0_73, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
49899 int8x16_t __rev2_73; __rev2_73 = __builtin_shufflevector(__s2_73, __s2_73, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
49900 int8x16_t __ret_73; \
49901 __ret_73 = __noswap_vsetq_lane_s8(__noswap_vgetq_lane_s8(__rev2_73, __p3_73), __rev0_73, __p1_73); \
49902 __ret_73 = __builtin_shufflevector(__ret_73, __ret_73, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
49903 __ret_73; \
49904 })
49905 #endif
49906
49907 #ifdef __LITTLE_ENDIAN__
49908 #define vcopyq_laneq_f32(__p0_74, __p1_74, __p2_74, __p3_74) __extension__ ({ \
49909 float32x4_t __s0_74 = __p0_74; \
49910 float32x4_t __s2_74 = __p2_74; \
49911 float32x4_t __ret_74; \
49912 __ret_74 = vsetq_lane_f32(vgetq_lane_f32(__s2_74, __p3_74), __s0_74, __p1_74); \
49913 __ret_74; \
49914 })
49915 #else
49916 #define vcopyq_laneq_f32(__p0_75, __p1_75, __p2_75, __p3_75) __extension__ ({ \
49917 float32x4_t __s0_75 = __p0_75; \
49918 float32x4_t __s2_75 = __p2_75; \
49919 float32x4_t __rev0_75; __rev0_75 = __builtin_shufflevector(__s0_75, __s0_75, 3, 2, 1, 0); \
49920 float32x4_t __rev2_75; __rev2_75 = __builtin_shufflevector(__s2_75, __s2_75, 3, 2, 1, 0); \
49921 float32x4_t __ret_75; \
49922 __ret_75 = __noswap_vsetq_lane_f32(__noswap_vgetq_lane_f32(__rev2_75, __p3_75), __rev0_75, __p1_75); \
49923 __ret_75 = __builtin_shufflevector(__ret_75, __ret_75, 3, 2, 1, 0); \
49924 __ret_75; \
49925 })
49926 #endif
49927
49928 #ifdef __LITTLE_ENDIAN__
49929 #define vcopyq_laneq_s32(__p0_76, __p1_76, __p2_76, __p3_76) __extension__ ({ \
49930 int32x4_t __s0_76 = __p0_76; \
49931 int32x4_t __s2_76 = __p2_76; \
49932 int32x4_t __ret_76; \
49933 __ret_76 = vsetq_lane_s32(vgetq_lane_s32(__s2_76, __p3_76), __s0_76, __p1_76); \
49934 __ret_76; \
49935 })
49936 #else
49937 #define vcopyq_laneq_s32(__p0_77, __p1_77, __p2_77, __p3_77) __extension__ ({ \
49938 int32x4_t __s0_77 = __p0_77; \
49939 int32x4_t __s2_77 = __p2_77; \
49940 int32x4_t __rev0_77; __rev0_77 = __builtin_shufflevector(__s0_77, __s0_77, 3, 2, 1, 0); \
49941 int32x4_t __rev2_77; __rev2_77 = __builtin_shufflevector(__s2_77, __s2_77, 3, 2, 1, 0); \
49942 int32x4_t __ret_77; \
49943 __ret_77 = __noswap_vsetq_lane_s32(__noswap_vgetq_lane_s32(__rev2_77, __p3_77), __rev0_77, __p1_77); \
49944 __ret_77 = __builtin_shufflevector(__ret_77, __ret_77, 3, 2, 1, 0); \
49945 __ret_77; \
49946 })
49947 #endif
49948
49949 #ifdef __LITTLE_ENDIAN__
49950 #define vcopyq_laneq_s64(__p0_78, __p1_78, __p2_78, __p3_78) __extension__ ({ \
49951 int64x2_t __s0_78 = __p0_78; \
49952 int64x2_t __s2_78 = __p2_78; \
49953 int64x2_t __ret_78; \
49954 __ret_78 = vsetq_lane_s64(vgetq_lane_s64(__s2_78, __p3_78), __s0_78, __p1_78); \
49955 __ret_78; \
49956 })
49957 #else
49958 #define vcopyq_laneq_s64(__p0_79, __p1_79, __p2_79, __p3_79) __extension__ ({ \
49959 int64x2_t __s0_79 = __p0_79; \
49960 int64x2_t __s2_79 = __p2_79; \
49961 int64x2_t __rev0_79; __rev0_79 = __builtin_shufflevector(__s0_79, __s0_79, 1, 0); \
49962 int64x2_t __rev2_79; __rev2_79 = __builtin_shufflevector(__s2_79, __s2_79, 1, 0); \
49963 int64x2_t __ret_79; \
49964 __ret_79 = __noswap_vsetq_lane_s64(__noswap_vgetq_lane_s64(__rev2_79, __p3_79), __rev0_79, __p1_79); \
49965 __ret_79 = __builtin_shufflevector(__ret_79, __ret_79, 1, 0); \
49966 __ret_79; \
49967 })
49968 #endif
49969
49970 #ifdef __LITTLE_ENDIAN__
49971 #define vcopyq_laneq_s16(__p0_80, __p1_80, __p2_80, __p3_80) __extension__ ({ \
49972 int16x8_t __s0_80 = __p0_80; \
49973 int16x8_t __s2_80 = __p2_80; \
49974 int16x8_t __ret_80; \
49975 __ret_80 = vsetq_lane_s16(vgetq_lane_s16(__s2_80, __p3_80), __s0_80, __p1_80); \
49976 __ret_80; \
49977 })
49978 #else
49979 #define vcopyq_laneq_s16(__p0_81, __p1_81, __p2_81, __p3_81) __extension__ ({ \
49980 int16x8_t __s0_81 = __p0_81; \
49981 int16x8_t __s2_81 = __p2_81; \
49982 int16x8_t __rev0_81; __rev0_81 = __builtin_shufflevector(__s0_81, __s0_81, 7, 6, 5, 4, 3, 2, 1, 0); \
49983 int16x8_t __rev2_81; __rev2_81 = __builtin_shufflevector(__s2_81, __s2_81, 7, 6, 5, 4, 3, 2, 1, 0); \
49984 int16x8_t __ret_81; \
49985 __ret_81 = __noswap_vsetq_lane_s16(__noswap_vgetq_lane_s16(__rev2_81, __p3_81), __rev0_81, __p1_81); \
49986 __ret_81 = __builtin_shufflevector(__ret_81, __ret_81, 7, 6, 5, 4, 3, 2, 1, 0); \
49987 __ret_81; \
49988 })
49989 #endif
49990
49991 #ifdef __LITTLE_ENDIAN__
49992 #define vcopy_laneq_p8(__p0_82, __p1_82, __p2_82, __p3_82) __extension__ ({ \
49993 poly8x8_t __s0_82 = __p0_82; \
49994 poly8x16_t __s2_82 = __p2_82; \
49995 poly8x8_t __ret_82; \
49996 __ret_82 = vset_lane_p8(vgetq_lane_p8(__s2_82, __p3_82), __s0_82, __p1_82); \
49997 __ret_82; \
49998 })
49999 #else
50000 #define vcopy_laneq_p8(__p0_83, __p1_83, __p2_83, __p3_83) __extension__ ({ \
50001 poly8x8_t __s0_83 = __p0_83; \
50002 poly8x16_t __s2_83 = __p2_83; \
50003 poly8x8_t __rev0_83; __rev0_83 = __builtin_shufflevector(__s0_83, __s0_83, 7, 6, 5, 4, 3, 2, 1, 0); \
50004 poly8x16_t __rev2_83; __rev2_83 = __builtin_shufflevector(__s2_83, __s2_83, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
50005 poly8x8_t __ret_83; \
50006 __ret_83 = __noswap_vset_lane_p8(__noswap_vgetq_lane_p8(__rev2_83, __p3_83), __rev0_83, __p1_83); \
50007 __ret_83 = __builtin_shufflevector(__ret_83, __ret_83, 7, 6, 5, 4, 3, 2, 1, 0); \
50008 __ret_83; \
50009 })
50010 #endif
50011
50012 #ifdef __LITTLE_ENDIAN__
50013 #define vcopy_laneq_p16(__p0_84, __p1_84, __p2_84, __p3_84) __extension__ ({ \
50014 poly16x4_t __s0_84 = __p0_84; \
50015 poly16x8_t __s2_84 = __p2_84; \
50016 poly16x4_t __ret_84; \
50017 __ret_84 = vset_lane_p16(vgetq_lane_p16(__s2_84, __p3_84), __s0_84, __p1_84); \
50018 __ret_84; \
50019 })
50020 #else
50021 #define vcopy_laneq_p16(__p0_85, __p1_85, __p2_85, __p3_85) __extension__ ({ \
50022 poly16x4_t __s0_85 = __p0_85; \
50023 poly16x8_t __s2_85 = __p2_85; \
50024 poly16x4_t __rev0_85; __rev0_85 = __builtin_shufflevector(__s0_85, __s0_85, 3, 2, 1, 0); \
50025 poly16x8_t __rev2_85; __rev2_85 = __builtin_shufflevector(__s2_85, __s2_85, 7, 6, 5, 4, 3, 2, 1, 0); \
50026 poly16x4_t __ret_85; \
50027 __ret_85 = __noswap_vset_lane_p16(__noswap_vgetq_lane_p16(__rev2_85, __p3_85), __rev0_85, __p1_85); \
50028 __ret_85 = __builtin_shufflevector(__ret_85, __ret_85, 3, 2, 1, 0); \
50029 __ret_85; \
50030 })
50031 #endif
50032
50033 #ifdef __LITTLE_ENDIAN__
50034 #define vcopy_laneq_u8(__p0_86, __p1_86, __p2_86, __p3_86) __extension__ ({ \
50035 uint8x8_t __s0_86 = __p0_86; \
50036 uint8x16_t __s2_86 = __p2_86; \
50037 uint8x8_t __ret_86; \
50038 __ret_86 = vset_lane_u8(vgetq_lane_u8(__s2_86, __p3_86), __s0_86, __p1_86); \
50039 __ret_86; \
50040 })
50041 #else
50042 #define vcopy_laneq_u8(__p0_87, __p1_87, __p2_87, __p3_87) __extension__ ({ \
50043 uint8x8_t __s0_87 = __p0_87; \
50044 uint8x16_t __s2_87 = __p2_87; \
50045 uint8x8_t __rev0_87; __rev0_87 = __builtin_shufflevector(__s0_87, __s0_87, 7, 6, 5, 4, 3, 2, 1, 0); \
50046 uint8x16_t __rev2_87; __rev2_87 = __builtin_shufflevector(__s2_87, __s2_87, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
50047 uint8x8_t __ret_87; \
50048 __ret_87 = __noswap_vset_lane_u8(__noswap_vgetq_lane_u8(__rev2_87, __p3_87), __rev0_87, __p1_87); \
50049 __ret_87 = __builtin_shufflevector(__ret_87, __ret_87, 7, 6, 5, 4, 3, 2, 1, 0); \
50050 __ret_87; \
50051 })
50052 #endif
50053
50054 #ifdef __LITTLE_ENDIAN__
50055 #define vcopy_laneq_u32(__p0_88, __p1_88, __p2_88, __p3_88) __extension__ ({ \
50056 uint32x2_t __s0_88 = __p0_88; \
50057 uint32x4_t __s2_88 = __p2_88; \
50058 uint32x2_t __ret_88; \
50059 __ret_88 = vset_lane_u32(vgetq_lane_u32(__s2_88, __p3_88), __s0_88, __p1_88); \
50060 __ret_88; \
50061 })
50062 #else
50063 #define vcopy_laneq_u32(__p0_89, __p1_89, __p2_89, __p3_89) __extension__ ({ \
50064 uint32x2_t __s0_89 = __p0_89; \
50065 uint32x4_t __s2_89 = __p2_89; \
50066 uint32x2_t __rev0_89; __rev0_89 = __builtin_shufflevector(__s0_89, __s0_89, 1, 0); \
50067 uint32x4_t __rev2_89; __rev2_89 = __builtin_shufflevector(__s2_89, __s2_89, 3, 2, 1, 0); \
50068 uint32x2_t __ret_89; \
50069 __ret_89 = __noswap_vset_lane_u32(__noswap_vgetq_lane_u32(__rev2_89, __p3_89), __rev0_89, __p1_89); \
50070 __ret_89 = __builtin_shufflevector(__ret_89, __ret_89, 1, 0); \
50071 __ret_89; \
50072 })
50073 #endif
50074
50075 #ifdef __LITTLE_ENDIAN__
50076 #define vcopy_laneq_u64(__p0_90, __p1_90, __p2_90, __p3_90) __extension__ ({ \
50077 uint64x1_t __s0_90 = __p0_90; \
50078 uint64x2_t __s2_90 = __p2_90; \
50079 uint64x1_t __ret_90; \
50080 __ret_90 = vset_lane_u64(vgetq_lane_u64(__s2_90, __p3_90), __s0_90, __p1_90); \
50081 __ret_90; \
50082 })
50083 #else
50084 #define vcopy_laneq_u64(__p0_91, __p1_91, __p2_91, __p3_91) __extension__ ({ \
50085 uint64x1_t __s0_91 = __p0_91; \
50086 uint64x2_t __s2_91 = __p2_91; \
50087 uint64x2_t __rev2_91; __rev2_91 = __builtin_shufflevector(__s2_91, __s2_91, 1, 0); \
50088 uint64x1_t __ret_91; \
50089 __ret_91 = __noswap_vset_lane_u64(__noswap_vgetq_lane_u64(__rev2_91, __p3_91), __s0_91, __p1_91); \
50090 __ret_91; \
50091 })
50092 #endif
50093
50094 #ifdef __LITTLE_ENDIAN__
50095 #define vcopy_laneq_u16(__p0_92, __p1_92, __p2_92, __p3_92) __extension__ ({ \
50096 uint16x4_t __s0_92 = __p0_92; \
50097 uint16x8_t __s2_92 = __p2_92; \
50098 uint16x4_t __ret_92; \
50099 __ret_92 = vset_lane_u16(vgetq_lane_u16(__s2_92, __p3_92), __s0_92, __p1_92); \
50100 __ret_92; \
50101 })
50102 #else
50103 #define vcopy_laneq_u16(__p0_93, __p1_93, __p2_93, __p3_93) __extension__ ({ \
50104 uint16x4_t __s0_93 = __p0_93; \
50105 uint16x8_t __s2_93 = __p2_93; \
50106 uint16x4_t __rev0_93; __rev0_93 = __builtin_shufflevector(__s0_93, __s0_93, 3, 2, 1, 0); \
50107 uint16x8_t __rev2_93; __rev2_93 = __builtin_shufflevector(__s2_93, __s2_93, 7, 6, 5, 4, 3, 2, 1, 0); \
50108 uint16x4_t __ret_93; \
50109 __ret_93 = __noswap_vset_lane_u16(__noswap_vgetq_lane_u16(__rev2_93, __p3_93), __rev0_93, __p1_93); \
50110 __ret_93 = __builtin_shufflevector(__ret_93, __ret_93, 3, 2, 1, 0); \
50111 __ret_93; \
50112 })
50113 #endif
50114
50115 #ifdef __LITTLE_ENDIAN__
50116 #define vcopy_laneq_s8(__p0_94, __p1_94, __p2_94, __p3_94) __extension__ ({ \
50117 int8x8_t __s0_94 = __p0_94; \
50118 int8x16_t __s2_94 = __p2_94; \
50119 int8x8_t __ret_94; \
50120 __ret_94 = vset_lane_s8(vgetq_lane_s8(__s2_94, __p3_94), __s0_94, __p1_94); \
50121 __ret_94; \
50122 })
50123 #else
50124 #define vcopy_laneq_s8(__p0_95, __p1_95, __p2_95, __p3_95) __extension__ ({ \
50125 int8x8_t __s0_95 = __p0_95; \
50126 int8x16_t __s2_95 = __p2_95; \
50127 int8x8_t __rev0_95; __rev0_95 = __builtin_shufflevector(__s0_95, __s0_95, 7, 6, 5, 4, 3, 2, 1, 0); \
50128 int8x16_t __rev2_95; __rev2_95 = __builtin_shufflevector(__s2_95, __s2_95, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
50129 int8x8_t __ret_95; \
50130 __ret_95 = __noswap_vset_lane_s8(__noswap_vgetq_lane_s8(__rev2_95, __p3_95), __rev0_95, __p1_95); \
50131 __ret_95 = __builtin_shufflevector(__ret_95, __ret_95, 7, 6, 5, 4, 3, 2, 1, 0); \
50132 __ret_95; \
50133 })
50134 #endif
50135
50136 #ifdef __LITTLE_ENDIAN__
50137 #define vcopy_laneq_f32(__p0_96, __p1_96, __p2_96, __p3_96) __extension__ ({ \
50138 float32x2_t __s0_96 = __p0_96; \
50139 float32x4_t __s2_96 = __p2_96; \
50140 float32x2_t __ret_96; \
50141 __ret_96 = vset_lane_f32(vgetq_lane_f32(__s2_96, __p3_96), __s0_96, __p1_96); \
50142 __ret_96; \
50143 })
50144 #else
50145 #define vcopy_laneq_f32(__p0_97, __p1_97, __p2_97, __p3_97) __extension__ ({ \
50146 float32x2_t __s0_97 = __p0_97; \
50147 float32x4_t __s2_97 = __p2_97; \
50148 float32x2_t __rev0_97; __rev0_97 = __builtin_shufflevector(__s0_97, __s0_97, 1, 0); \
50149 float32x4_t __rev2_97; __rev2_97 = __builtin_shufflevector(__s2_97, __s2_97, 3, 2, 1, 0); \
50150 float32x2_t __ret_97; \
50151 __ret_97 = __noswap_vset_lane_f32(__noswap_vgetq_lane_f32(__rev2_97, __p3_97), __rev0_97, __p1_97); \
50152 __ret_97 = __builtin_shufflevector(__ret_97, __ret_97, 1, 0); \
50153 __ret_97; \
50154 })
50155 #endif
50156
50157 #ifdef __LITTLE_ENDIAN__
50158 #define vcopy_laneq_s32(__p0_98, __p1_98, __p2_98, __p3_98) __extension__ ({ \
50159 int32x2_t __s0_98 = __p0_98; \
50160 int32x4_t __s2_98 = __p2_98; \
50161 int32x2_t __ret_98; \
50162 __ret_98 = vset_lane_s32(vgetq_lane_s32(__s2_98, __p3_98), __s0_98, __p1_98); \
50163 __ret_98; \
50164 })
50165 #else
50166 #define vcopy_laneq_s32(__p0_99, __p1_99, __p2_99, __p3_99) __extension__ ({ \
50167 int32x2_t __s0_99 = __p0_99; \
50168 int32x4_t __s2_99 = __p2_99; \
50169 int32x2_t __rev0_99; __rev0_99 = __builtin_shufflevector(__s0_99, __s0_99, 1, 0); \
50170 int32x4_t __rev2_99; __rev2_99 = __builtin_shufflevector(__s2_99, __s2_99, 3, 2, 1, 0); \
50171 int32x2_t __ret_99; \
50172 __ret_99 = __noswap_vset_lane_s32(__noswap_vgetq_lane_s32(__rev2_99, __p3_99), __rev0_99, __p1_99); \
50173 __ret_99 = __builtin_shufflevector(__ret_99, __ret_99, 1, 0); \
50174 __ret_99; \
50175 })
50176 #endif
50177
50178 #ifdef __LITTLE_ENDIAN__
50179 #define vcopy_laneq_s64(__p0_100, __p1_100, __p2_100, __p3_100) __extension__ ({ \
50180 int64x1_t __s0_100 = __p0_100; \
50181 int64x2_t __s2_100 = __p2_100; \
50182 int64x1_t __ret_100; \
50183 __ret_100 = vset_lane_s64(vgetq_lane_s64(__s2_100, __p3_100), __s0_100, __p1_100); \
50184 __ret_100; \
50185 })
50186 #else
50187 #define vcopy_laneq_s64(__p0_101, __p1_101, __p2_101, __p3_101) __extension__ ({ \
50188 int64x1_t __s0_101 = __p0_101; \
50189 int64x2_t __s2_101 = __p2_101; \
50190 int64x2_t __rev2_101; __rev2_101 = __builtin_shufflevector(__s2_101, __s2_101, 1, 0); \
50191 int64x1_t __ret_101; \
50192 __ret_101 = __noswap_vset_lane_s64(__noswap_vgetq_lane_s64(__rev2_101, __p3_101), __s0_101, __p1_101); \
50193 __ret_101; \
50194 })
50195 #endif
50196
50197 #ifdef __LITTLE_ENDIAN__
50198 #define vcopy_laneq_s16(__p0_102, __p1_102, __p2_102, __p3_102) __extension__ ({ \
50199 int16x4_t __s0_102 = __p0_102; \
50200 int16x8_t __s2_102 = __p2_102; \
50201 int16x4_t __ret_102; \
50202 __ret_102 = vset_lane_s16(vgetq_lane_s16(__s2_102, __p3_102), __s0_102, __p1_102); \
50203 __ret_102; \
50204 })
50205 #else
50206 #define vcopy_laneq_s16(__p0_103, __p1_103, __p2_103, __p3_103) __extension__ ({ \
50207 int16x4_t __s0_103 = __p0_103; \
50208 int16x8_t __s2_103 = __p2_103; \
50209 int16x4_t __rev0_103; __rev0_103 = __builtin_shufflevector(__s0_103, __s0_103, 3, 2, 1, 0); \
50210 int16x8_t __rev2_103; __rev2_103 = __builtin_shufflevector(__s2_103, __s2_103, 7, 6, 5, 4, 3, 2, 1, 0); \
50211 int16x4_t __ret_103; \
50212 __ret_103 = __noswap_vset_lane_s16(__noswap_vgetq_lane_s16(__rev2_103, __p3_103), __rev0_103, __p1_103); \
50213 __ret_103 = __builtin_shufflevector(__ret_103, __ret_103, 3, 2, 1, 0); \
50214 __ret_103; \
50215 })
50216 #endif
50217
50218 #ifdef __LITTLE_ENDIAN__
50219 __ai poly64x1_t vcreate_p64(uint64_t __p0) {
50220 poly64x1_t __ret;
50221 __ret = (poly64x1_t)(__p0);
50222 return __ret;
50223 }
50224 #else
50225 __ai poly64x1_t vcreate_p64(uint64_t __p0) {
50226 poly64x1_t __ret;
50227 __ret = (poly64x1_t)(__p0);
50228 return __ret;
50229 }
50230 #endif
50231
50232 #ifdef __LITTLE_ENDIAN__
50233 __ai float64x1_t vcreate_f64(uint64_t __p0) {
50234 float64x1_t __ret;
50235 __ret = (float64x1_t)(__p0);
50236 return __ret;
50237 }
50238 #else
50239 __ai float64x1_t vcreate_f64(uint64_t __p0) {
50240 float64x1_t __ret;
50241 __ret = (float64x1_t)(__p0);
50242 return __ret;
50243 }
50244 #endif
50245
50246 #ifdef __LITTLE_ENDIAN__
50247 __ai float32_t vcvts_f32_s32(int32_t __p0) {
50248 float32_t __ret;
50249 __ret = (float32_t) __builtin_neon_vcvts_f32_s32(__p0);
50250 return __ret;
50251 }
50252 #else
50253 __ai float32_t vcvts_f32_s32(int32_t __p0) {
50254 float32_t __ret;
50255 __ret = (float32_t) __builtin_neon_vcvts_f32_s32(__p0);
50256 return __ret;
50257 }
50258 #endif
50259
50260 #ifdef __LITTLE_ENDIAN__
50261 __ai float32_t vcvts_f32_u32(uint32_t __p0) {
50262 float32_t __ret;
50263 __ret = (float32_t) __builtin_neon_vcvts_f32_u32(__p0);
50264 return __ret;
50265 }
50266 #else
50267 __ai float32_t vcvts_f32_u32(uint32_t __p0) {
50268 float32_t __ret;
50269 __ret = (float32_t) __builtin_neon_vcvts_f32_u32(__p0);
50270 return __ret;
50271 }
50272 #endif
50273
50274 #ifdef __LITTLE_ENDIAN__
50275 __ai float32x2_t vcvt_f32_f64(float64x2_t __p0) {
50276 float32x2_t __ret;
50277 __ret = (float32x2_t) __builtin_neon_vcvt_f32_f64((int8x16_t)__p0, 9);
50278 return __ret;
50279 }
50280 #else
50281 __ai float32x2_t vcvt_f32_f64(float64x2_t __p0) {
50282 float64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
50283 float32x2_t __ret;
50284 __ret = (float32x2_t) __builtin_neon_vcvt_f32_f64((int8x16_t)__rev0, 9);
50285 __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
50286 return __ret;
50287 }
50288 __ai float32x2_t __noswap_vcvt_f32_f64(float64x2_t __p0) {
50289 float32x2_t __ret;
50290 __ret = (float32x2_t) __builtin_neon_vcvt_f32_f64((int8x16_t)__p0, 9);
50291 return __ret;
50292 }
50293 #endif
50294
50295 #ifdef __LITTLE_ENDIAN__
50296 __ai float64_t vcvtd_f64_s64(int64_t __p0) {
50297 float64_t __ret;
50298 __ret = (float64_t) __builtin_neon_vcvtd_f64_s64(__p0);
50299 return __ret;
50300 }
50301 #else
50302 __ai float64_t vcvtd_f64_s64(int64_t __p0) {
50303 float64_t __ret;
50304 __ret = (float64_t) __builtin_neon_vcvtd_f64_s64(__p0);
50305 return __ret;
50306 }
50307 #endif
50308
50309 #ifdef __LITTLE_ENDIAN__
50310 __ai float64_t vcvtd_f64_u64(uint64_t __p0) {
50311 float64_t __ret;
50312 __ret = (float64_t) __builtin_neon_vcvtd_f64_u64(__p0);
50313 return __ret;
50314 }
50315 #else
50316 __ai float64_t vcvtd_f64_u64(uint64_t __p0) {
50317 float64_t __ret;
50318 __ret = (float64_t) __builtin_neon_vcvtd_f64_u64(__p0);
50319 return __ret;
50320 }
50321 #endif
50322
50323 #ifdef __LITTLE_ENDIAN__
50324 __ai float64x2_t vcvtq_f64_u64(uint64x2_t __p0) {
50325 float64x2_t __ret;
50326 __ret = (float64x2_t) __builtin_neon_vcvtq_f64_v((int8x16_t)__p0, 51);
50327 return __ret;
50328 }
50329 #else
50330 __ai float64x2_t vcvtq_f64_u64(uint64x2_t __p0) {
50331 uint64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
50332 float64x2_t __ret;
50333 __ret = (float64x2_t) __builtin_neon_vcvtq_f64_v((int8x16_t)__rev0, 51);
50334 __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
50335 return __ret;
50336 }
50337 #endif
50338
50339 #ifdef __LITTLE_ENDIAN__
50340 __ai float64x2_t vcvtq_f64_s64(int64x2_t __p0) {
50341 float64x2_t __ret;
50342 __ret = (float64x2_t) __builtin_neon_vcvtq_f64_v((int8x16_t)__p0, 35);
50343 return __ret;
50344 }
50345 #else
50346 __ai float64x2_t vcvtq_f64_s64(int64x2_t __p0) {
50347 int64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
50348 float64x2_t __ret;
50349 __ret = (float64x2_t) __builtin_neon_vcvtq_f64_v((int8x16_t)__rev0, 35);
50350 __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
50351 return __ret;
50352 }
50353 #endif
50354
50355 #ifdef __LITTLE_ENDIAN__
50356 __ai float64x1_t vcvt_f64_u64(uint64x1_t __p0) {
50357 float64x1_t __ret;
50358 __ret = (float64x1_t) __builtin_neon_vcvt_f64_v((int8x8_t)__p0, 19);
50359 return __ret;
50360 }
50361 #else
50362 __ai float64x1_t vcvt_f64_u64(uint64x1_t __p0) {
50363 float64x1_t __ret;
50364 __ret = (float64x1_t) __builtin_neon_vcvt_f64_v((int8x8_t)__p0, 19);
50365 return __ret;
50366 }
50367 #endif
50368
50369 #ifdef __LITTLE_ENDIAN__
50370 __ai float64x1_t vcvt_f64_s64(int64x1_t __p0) {
50371 float64x1_t __ret;
50372 __ret = (float64x1_t) __builtin_neon_vcvt_f64_v((int8x8_t)__p0, 3);
50373 return __ret;
50374 }
50375 #else
50376 __ai float64x1_t vcvt_f64_s64(int64x1_t __p0) {
50377 float64x1_t __ret;
50378 __ret = (float64x1_t) __builtin_neon_vcvt_f64_v((int8x8_t)__p0, 3);
50379 return __ret;
50380 }
50381 #endif
50382
50383 #ifdef __LITTLE_ENDIAN__
50384 __ai float64x2_t vcvt_f64_f32(float32x2_t __p0) {
50385 float64x2_t __ret;
50386 __ret = (float64x2_t) __builtin_neon_vcvt_f64_f32((int8x8_t)__p0, 42);
50387 return __ret;
50388 }
50389 #else
50390 __ai float64x2_t vcvt_f64_f32(float32x2_t __p0) {
50391 float32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
50392 float64x2_t __ret;
50393 __ret = (float64x2_t) __builtin_neon_vcvt_f64_f32((int8x8_t)__rev0, 42);
50394 __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
50395 return __ret;
50396 }
50397 __ai float64x2_t __noswap_vcvt_f64_f32(float32x2_t __p0) {
50398 float64x2_t __ret;
50399 __ret = (float64x2_t) __builtin_neon_vcvt_f64_f32((int8x8_t)__p0, 42);
50400 return __ret;
50401 }
50402 #endif
50403
50404 #ifdef __LITTLE_ENDIAN__
50405 __ai float16x8_t vcvt_high_f16_f32(float16x4_t __p0, float32x4_t __p1) {
50406 float16x8_t __ret;
50407 __ret = vcombine_f16(__p0, vcvt_f16_f32(__p1));
50408 return __ret;
50409 }
50410 #else
50411 __ai float16x8_t vcvt_high_f16_f32(float16x4_t __p0, float32x4_t __p1) {
50412 float16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
50413 float32x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
50414 float16x8_t __ret;
50415 __ret = __noswap_vcombine_f16(__rev0, __noswap_vcvt_f16_f32(__rev1));
50416 __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
50417 return __ret;
50418 }
50419 #endif
50420
50421 #ifdef __LITTLE_ENDIAN__
50422 __ai float32x4_t vcvt_high_f32_f16(float16x8_t __p0) {
50423 float32x4_t __ret;
50424 __ret = vcvt_f32_f16(vget_high_f16(__p0));
50425 return __ret;
50426 }
50427 #else
50428 __ai float32x4_t vcvt_high_f32_f16(float16x8_t __p0) {
50429 float16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
50430 float32x4_t __ret;
50431 __ret = __noswap_vcvt_f32_f16(__noswap_vget_high_f16(__rev0));
50432 __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
50433 return __ret;
50434 }
50435 #endif
50436
50437 #ifdef __LITTLE_ENDIAN__
50438 __ai float32x4_t vcvt_high_f32_f64(float32x2_t __p0, float64x2_t __p1) {
50439 float32x4_t __ret;
50440 __ret = vcombine_f32(__p0, vcvt_f32_f64(__p1));
50441 return __ret;
50442 }
50443 #else
50444 __ai float32x4_t vcvt_high_f32_f64(float32x2_t __p0, float64x2_t __p1) {
50445 float32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
50446 float64x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
50447 float32x4_t __ret;
50448 __ret = __noswap_vcombine_f32(__rev0, __noswap_vcvt_f32_f64(__rev1));
50449 __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
50450 return __ret;
50451 }
50452 #endif
50453
50454 #ifdef __LITTLE_ENDIAN__
50455 __ai float64x2_t vcvt_high_f64_f32(float32x4_t __p0) {
50456 float64x2_t __ret;
50457 __ret = vcvt_f64_f32(vget_high_f32(__p0));
50458 return __ret;
50459 }
50460 #else
50461 __ai float64x2_t vcvt_high_f64_f32(float32x4_t __p0) {
50462 float32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
50463 float64x2_t __ret;
50464 __ret = __noswap_vcvt_f64_f32(__noswap_vget_high_f32(__rev0));
50465 __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
50466 return __ret;
50467 }
50468 #endif
50469
50470 #ifdef __LITTLE_ENDIAN__
50471 #define vcvts_n_f32_u32(__p0, __p1) __extension__ ({ \
50472 uint32_t __s0 = __p0; \
50473 float32_t __ret; \
50474 __ret = (float32_t) __builtin_neon_vcvts_n_f32_u32(__s0, __p1); \
50475 __ret; \
50476 })
50477 #else
50478 #define vcvts_n_f32_u32(__p0, __p1) __extension__ ({ \
50479 uint32_t __s0 = __p0; \
50480 float32_t __ret; \
50481 __ret = (float32_t) __builtin_neon_vcvts_n_f32_u32(__s0, __p1); \
50482 __ret; \
50483 })
50484 #endif
50485
50486 #ifdef __LITTLE_ENDIAN__
50487 #define vcvts_n_f32_s32(__p0, __p1) __extension__ ({ \
50488 int32_t __s0 = __p0; \
50489 float32_t __ret; \
50490 __ret = (float32_t) __builtin_neon_vcvts_n_f32_s32(__s0, __p1); \
50491 __ret; \
50492 })
50493 #else
50494 #define vcvts_n_f32_s32(__p0, __p1) __extension__ ({ \
50495 int32_t __s0 = __p0; \
50496 float32_t __ret; \
50497 __ret = (float32_t) __builtin_neon_vcvts_n_f32_s32(__s0, __p1); \
50498 __ret; \
50499 })
50500 #endif
50501
50502 #ifdef __LITTLE_ENDIAN__
50503 #define vcvtq_n_f64_u64(__p0, __p1) __extension__ ({ \
50504 uint64x2_t __s0 = __p0; \
50505 float64x2_t __ret; \
50506 __ret = (float64x2_t) __builtin_neon_vcvtq_n_f64_v((int8x16_t)__s0, __p1, 51); \
50507 __ret; \
50508 })
50509 #else
50510 #define vcvtq_n_f64_u64(__p0, __p1) __extension__ ({ \
50511 uint64x2_t __s0 = __p0; \
50512 uint64x2_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 1, 0); \
50513 float64x2_t __ret; \
50514 __ret = (float64x2_t) __builtin_neon_vcvtq_n_f64_v((int8x16_t)__rev0, __p1, 51); \
50515 __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \
50516 __ret; \
50517 })
50518 #endif
50519
50520 #ifdef __LITTLE_ENDIAN__
50521 #define vcvtq_n_f64_s64(__p0, __p1) __extension__ ({ \
50522 int64x2_t __s0 = __p0; \
50523 float64x2_t __ret; \
50524 __ret = (float64x2_t) __builtin_neon_vcvtq_n_f64_v((int8x16_t)__s0, __p1, 35); \
50525 __ret; \
50526 })
50527 #else
50528 #define vcvtq_n_f64_s64(__p0, __p1) __extension__ ({ \
50529 int64x2_t __s0 = __p0; \
50530 int64x2_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 1, 0); \
50531 float64x2_t __ret; \
50532 __ret = (float64x2_t) __builtin_neon_vcvtq_n_f64_v((int8x16_t)__rev0, __p1, 35); \
50533 __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \
50534 __ret; \
50535 })
50536 #endif
50537
50538 #ifdef __LITTLE_ENDIAN__
50539 #define vcvt_n_f64_u64(__p0, __p1) __extension__ ({ \
50540 uint64x1_t __s0 = __p0; \
50541 float64x1_t __ret; \
50542 __ret = (float64x1_t) __builtin_neon_vcvt_n_f64_v((int8x8_t)__s0, __p1, 19); \
50543 __ret; \
50544 })
50545 #else
50546 #define vcvt_n_f64_u64(__p0, __p1) __extension__ ({ \
50547 uint64x1_t __s0 = __p0; \
50548 float64x1_t __ret; \
50549 __ret = (float64x1_t) __builtin_neon_vcvt_n_f64_v((int8x8_t)__s0, __p1, 19); \
50550 __ret; \
50551 })
50552 #endif
50553
50554 #ifdef __LITTLE_ENDIAN__
50555 #define vcvt_n_f64_s64(__p0, __p1) __extension__ ({ \
50556 int64x1_t __s0 = __p0; \
50557 float64x1_t __ret; \
50558 __ret = (float64x1_t) __builtin_neon_vcvt_n_f64_v((int8x8_t)__s0, __p1, 3); \
50559 __ret; \
50560 })
50561 #else
50562 #define vcvt_n_f64_s64(__p0, __p1) __extension__ ({ \
50563 int64x1_t __s0 = __p0; \
50564 float64x1_t __ret; \
50565 __ret = (float64x1_t) __builtin_neon_vcvt_n_f64_v((int8x8_t)__s0, __p1, 3); \
50566 __ret; \
50567 })
50568 #endif
50569
50570 #ifdef __LITTLE_ENDIAN__
50571 #define vcvtd_n_f64_u64(__p0, __p1) __extension__ ({ \
50572 uint64_t __s0 = __p0; \
50573 float64_t __ret; \
50574 __ret = (float64_t) __builtin_neon_vcvtd_n_f64_u64(__s0, __p1); \
50575 __ret; \
50576 })
50577 #else
50578 #define vcvtd_n_f64_u64(__p0, __p1) __extension__ ({ \
50579 uint64_t __s0 = __p0; \
50580 float64_t __ret; \
50581 __ret = (float64_t) __builtin_neon_vcvtd_n_f64_u64(__s0, __p1); \
50582 __ret; \
50583 })
50584 #endif
50585
50586 #ifdef __LITTLE_ENDIAN__
50587 #define vcvtd_n_f64_s64(__p0, __p1) __extension__ ({ \
50588 int64_t __s0 = __p0; \
50589 float64_t __ret; \
50590 __ret = (float64_t) __builtin_neon_vcvtd_n_f64_s64(__s0, __p1); \
50591 __ret; \
50592 })
50593 #else
50594 #define vcvtd_n_f64_s64(__p0, __p1) __extension__ ({ \
50595 int64_t __s0 = __p0; \
50596 float64_t __ret; \
50597 __ret = (float64_t) __builtin_neon_vcvtd_n_f64_s64(__s0, __p1); \
50598 __ret; \
50599 })
50600 #endif
50601
50602 #ifdef __LITTLE_ENDIAN__
50603 #define vcvts_n_s32_f32(__p0, __p1) __extension__ ({ \
50604 float32_t __s0 = __p0; \
50605 int32_t __ret; \
50606 __ret = (int32_t) __builtin_neon_vcvts_n_s32_f32(__s0, __p1); \
50607 __ret; \
50608 })
50609 #else
50610 #define vcvts_n_s32_f32(__p0, __p1) __extension__ ({ \
50611 float32_t __s0 = __p0; \
50612 int32_t __ret; \
50613 __ret = (int32_t) __builtin_neon_vcvts_n_s32_f32(__s0, __p1); \
50614 __ret; \
50615 })
50616 #endif
50617
50618 #ifdef __LITTLE_ENDIAN__
50619 #define vcvtq_n_s64_f64(__p0, __p1) __extension__ ({ \
50620 float64x2_t __s0 = __p0; \
50621 int64x2_t __ret; \
50622 __ret = (int64x2_t) __builtin_neon_vcvtq_n_s64_v((int8x16_t)__s0, __p1, 35); \
50623 __ret; \
50624 })
50625 #else
50626 #define vcvtq_n_s64_f64(__p0, __p1) __extension__ ({ \
50627 float64x2_t __s0 = __p0; \
50628 float64x2_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 1, 0); \
50629 int64x2_t __ret; \
50630 __ret = (int64x2_t) __builtin_neon_vcvtq_n_s64_v((int8x16_t)__rev0, __p1, 35); \
50631 __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \
50632 __ret; \
50633 })
50634 #endif
50635
50636 #ifdef __LITTLE_ENDIAN__
50637 #define vcvt_n_s64_f64(__p0, __p1) __extension__ ({ \
50638 float64x1_t __s0 = __p0; \
50639 int64x1_t __ret; \
50640 __ret = (int64x1_t) __builtin_neon_vcvt_n_s64_v((int8x8_t)__s0, __p1, 3); \
50641 __ret; \
50642 })
50643 #else
50644 #define vcvt_n_s64_f64(__p0, __p1) __extension__ ({ \
50645 float64x1_t __s0 = __p0; \
50646 int64x1_t __ret; \
50647 __ret = (int64x1_t) __builtin_neon_vcvt_n_s64_v((int8x8_t)__s0, __p1, 3); \
50648 __ret; \
50649 })
50650 #endif
50651
50652 #ifdef __LITTLE_ENDIAN__
50653 #define vcvtd_n_s64_f64(__p0, __p1) __extension__ ({ \
50654 float64_t __s0 = __p0; \
50655 int64_t __ret; \
50656 __ret = (int64_t) __builtin_neon_vcvtd_n_s64_f64(__s0, __p1); \
50657 __ret; \
50658 })
50659 #else
50660 #define vcvtd_n_s64_f64(__p0, __p1) __extension__ ({ \
50661 float64_t __s0 = __p0; \
50662 int64_t __ret; \
50663 __ret = (int64_t) __builtin_neon_vcvtd_n_s64_f64(__s0, __p1); \
50664 __ret; \
50665 })
50666 #endif
50667
50668 #ifdef __LITTLE_ENDIAN__
50669 #define vcvts_n_u32_f32(__p0, __p1) __extension__ ({ \
50670 float32_t __s0 = __p0; \
50671 uint32_t __ret; \
50672 __ret = (uint32_t) __builtin_neon_vcvts_n_u32_f32(__s0, __p1); \
50673 __ret; \
50674 })
50675 #else
50676 #define vcvts_n_u32_f32(__p0, __p1) __extension__ ({ \
50677 float32_t __s0 = __p0; \
50678 uint32_t __ret; \
50679 __ret = (uint32_t) __builtin_neon_vcvts_n_u32_f32(__s0, __p1); \
50680 __ret; \
50681 })
50682 #endif
50683
50684 #ifdef __LITTLE_ENDIAN__
50685 #define vcvtq_n_u64_f64(__p0, __p1) __extension__ ({ \
50686 float64x2_t __s0 = __p0; \
50687 uint64x2_t __ret; \
50688 __ret = (uint64x2_t) __builtin_neon_vcvtq_n_u64_v((int8x16_t)__s0, __p1, 51); \
50689 __ret; \
50690 })
50691 #else
50692 #define vcvtq_n_u64_f64(__p0, __p1) __extension__ ({ \
50693 float64x2_t __s0 = __p0; \
50694 float64x2_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 1, 0); \
50695 uint64x2_t __ret; \
50696 __ret = (uint64x2_t) __builtin_neon_vcvtq_n_u64_v((int8x16_t)__rev0, __p1, 51); \
50697 __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \
50698 __ret; \
50699 })
50700 #endif
50701
50702 #ifdef __LITTLE_ENDIAN__
50703 #define vcvt_n_u64_f64(__p0, __p1) __extension__ ({ \
50704 float64x1_t __s0 = __p0; \
50705 uint64x1_t __ret; \
50706 __ret = (uint64x1_t) __builtin_neon_vcvt_n_u64_v((int8x8_t)__s0, __p1, 19); \
50707 __ret; \
50708 })
50709 #else
50710 #define vcvt_n_u64_f64(__p0, __p1) __extension__ ({ \
50711 float64x1_t __s0 = __p0; \
50712 uint64x1_t __ret; \
50713 __ret = (uint64x1_t) __builtin_neon_vcvt_n_u64_v((int8x8_t)__s0, __p1, 19); \
50714 __ret; \
50715 })
50716 #endif
50717
50718 #ifdef __LITTLE_ENDIAN__
50719 #define vcvtd_n_u64_f64(__p0, __p1) __extension__ ({ \
50720 float64_t __s0 = __p0; \
50721 uint64_t __ret; \
50722 __ret = (uint64_t) __builtin_neon_vcvtd_n_u64_f64(__s0, __p1); \
50723 __ret; \
50724 })
50725 #else
50726 #define vcvtd_n_u64_f64(__p0, __p1) __extension__ ({ \
50727 float64_t __s0 = __p0; \
50728 uint64_t __ret; \
50729 __ret = (uint64_t) __builtin_neon_vcvtd_n_u64_f64(__s0, __p1); \
50730 __ret; \
50731 })
50732 #endif
50733
50734 #ifdef __LITTLE_ENDIAN__
50735 __ai int32_t vcvts_s32_f32(float32_t __p0) {
50736 int32_t __ret;
50737 __ret = (int32_t) __builtin_neon_vcvts_s32_f32(__p0);
50738 return __ret;
50739 }
50740 #else
50741 __ai int32_t vcvts_s32_f32(float32_t __p0) {
50742 int32_t __ret;
50743 __ret = (int32_t) __builtin_neon_vcvts_s32_f32(__p0);
50744 return __ret;
50745 }
50746 #endif
50747
50748 #ifdef __LITTLE_ENDIAN__
50749 __ai int64_t vcvtd_s64_f64(float64_t __p0) {
50750 int64_t __ret;
50751 __ret = (int64_t) __builtin_neon_vcvtd_s64_f64(__p0);
50752 return __ret;
50753 }
50754 #else
50755 __ai int64_t vcvtd_s64_f64(float64_t __p0) {
50756 int64_t __ret;
50757 __ret = (int64_t) __builtin_neon_vcvtd_s64_f64(__p0);
50758 return __ret;
50759 }
50760 #endif
50761
50762 #ifdef __LITTLE_ENDIAN__
50763 __ai int64x2_t vcvtq_s64_f64(float64x2_t __p0) {
50764 int64x2_t __ret;
50765 __ret = (int64x2_t) __builtin_neon_vcvtq_s64_v((int8x16_t)__p0, 35);
50766 return __ret;
50767 }
50768 #else
50769 __ai int64x2_t vcvtq_s64_f64(float64x2_t __p0) {
50770 float64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
50771 int64x2_t __ret;
50772 __ret = (int64x2_t) __builtin_neon_vcvtq_s64_v((int8x16_t)__rev0, 35);
50773 __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
50774 return __ret;
50775 }
50776 #endif
50777
50778 #ifdef __LITTLE_ENDIAN__
50779 __ai int64x1_t vcvt_s64_f64(float64x1_t __p0) {
50780 int64x1_t __ret;
50781 __ret = (int64x1_t) __builtin_neon_vcvt_s64_v((int8x8_t)__p0, 3);
50782 return __ret;
50783 }
50784 #else
50785 __ai int64x1_t vcvt_s64_f64(float64x1_t __p0) {
50786 int64x1_t __ret;
50787 __ret = (int64x1_t) __builtin_neon_vcvt_s64_v((int8x8_t)__p0, 3);
50788 return __ret;
50789 }
50790 #endif
50791
50792 #ifdef __LITTLE_ENDIAN__
50793 __ai uint32_t vcvts_u32_f32(float32_t __p0) {
50794 uint32_t __ret;
50795 __ret = (uint32_t) __builtin_neon_vcvts_u32_f32(__p0);
50796 return __ret;
50797 }
50798 #else
50799 __ai uint32_t vcvts_u32_f32(float32_t __p0) {
50800 uint32_t __ret;
50801 __ret = (uint32_t) __builtin_neon_vcvts_u32_f32(__p0);
50802 return __ret;
50803 }
50804 #endif
50805
50806 #ifdef __LITTLE_ENDIAN__
50807 __ai uint64_t vcvtd_u64_f64(float64_t __p0) {
50808 uint64_t __ret;
50809 __ret = (uint64_t) __builtin_neon_vcvtd_u64_f64(__p0);
50810 return __ret;
50811 }
50812 #else
50813 __ai uint64_t vcvtd_u64_f64(float64_t __p0) {
50814 uint64_t __ret;
50815 __ret = (uint64_t) __builtin_neon_vcvtd_u64_f64(__p0);
50816 return __ret;
50817 }
50818 #endif
50819
50820 #ifdef __LITTLE_ENDIAN__
50821 __ai uint64x2_t vcvtq_u64_f64(float64x2_t __p0) {
50822 uint64x2_t __ret;
50823 __ret = (uint64x2_t) __builtin_neon_vcvtq_u64_v((int8x16_t)__p0, 51);
50824 return __ret;
50825 }
50826 #else
50827 __ai uint64x2_t vcvtq_u64_f64(float64x2_t __p0) {
50828 float64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
50829 uint64x2_t __ret;
50830 __ret = (uint64x2_t) __builtin_neon_vcvtq_u64_v((int8x16_t)__rev0, 51);
50831 __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
50832 return __ret;
50833 }
50834 #endif
50835
50836 #ifdef __LITTLE_ENDIAN__
50837 __ai uint64x1_t vcvt_u64_f64(float64x1_t __p0) {
50838 uint64x1_t __ret;
50839 __ret = (uint64x1_t) __builtin_neon_vcvt_u64_v((int8x8_t)__p0, 19);
50840 return __ret;
50841 }
50842 #else
50843 __ai uint64x1_t vcvt_u64_f64(float64x1_t __p0) {
50844 uint64x1_t __ret;
50845 __ret = (uint64x1_t) __builtin_neon_vcvt_u64_v((int8x8_t)__p0, 19);
50846 return __ret;
50847 }
50848 #endif
50849
50850 #ifdef __LITTLE_ENDIAN__
50851 __ai int32_t vcvtas_s32_f32(float32_t __p0) {
50852 int32_t __ret;
50853 __ret = (int32_t) __builtin_neon_vcvtas_s32_f32(__p0);
50854 return __ret;
50855 }
50856 #else
50857 __ai int32_t vcvtas_s32_f32(float32_t __p0) {
50858 int32_t __ret;
50859 __ret = (int32_t) __builtin_neon_vcvtas_s32_f32(__p0);
50860 return __ret;
50861 }
50862 #endif
50863
50864 #ifdef __LITTLE_ENDIAN__
50865 __ai int64_t vcvtad_s64_f64(float64_t __p0) {
50866 int64_t __ret;
50867 __ret = (int64_t) __builtin_neon_vcvtad_s64_f64(__p0);
50868 return __ret;
50869 }
50870 #else
50871 __ai int64_t vcvtad_s64_f64(float64_t __p0) {
50872 int64_t __ret;
50873 __ret = (int64_t) __builtin_neon_vcvtad_s64_f64(__p0);
50874 return __ret;
50875 }
50876 #endif
50877
50878 #ifdef __LITTLE_ENDIAN__
50879 __ai uint32_t vcvtas_u32_f32(float32_t __p0) {
50880 uint32_t __ret;
50881 __ret = (uint32_t) __builtin_neon_vcvtas_u32_f32(__p0);
50882 return __ret;
50883 }
50884 #else
50885 __ai uint32_t vcvtas_u32_f32(float32_t __p0) {
50886 uint32_t __ret;
50887 __ret = (uint32_t) __builtin_neon_vcvtas_u32_f32(__p0);
50888 return __ret;
50889 }
50890 #endif
50891
50892 #ifdef __LITTLE_ENDIAN__
50893 __ai uint64_t vcvtad_u64_f64(float64_t __p0) {
50894 uint64_t __ret;
50895 __ret = (uint64_t) __builtin_neon_vcvtad_u64_f64(__p0);
50896 return __ret;
50897 }
50898 #else
50899 __ai uint64_t vcvtad_u64_f64(float64_t __p0) {
50900 uint64_t __ret;
50901 __ret = (uint64_t) __builtin_neon_vcvtad_u64_f64(__p0);
50902 return __ret;
50903 }
50904 #endif
50905
50906 #ifdef __LITTLE_ENDIAN__
50907 __ai int32_t vcvtms_s32_f32(float32_t __p0) {
50908 int32_t __ret;
50909 __ret = (int32_t) __builtin_neon_vcvtms_s32_f32(__p0);
50910 return __ret;
50911 }
50912 #else
50913 __ai int32_t vcvtms_s32_f32(float32_t __p0) {
50914 int32_t __ret;
50915 __ret = (int32_t) __builtin_neon_vcvtms_s32_f32(__p0);
50916 return __ret;
50917 }
50918 #endif
50919
50920 #ifdef __LITTLE_ENDIAN__
50921 __ai int64_t vcvtmd_s64_f64(float64_t __p0) {
50922 int64_t __ret;
50923 __ret = (int64_t) __builtin_neon_vcvtmd_s64_f64(__p0);
50924 return __ret;
50925 }
50926 #else
50927 __ai int64_t vcvtmd_s64_f64(float64_t __p0) {
50928 int64_t __ret;
50929 __ret = (int64_t) __builtin_neon_vcvtmd_s64_f64(__p0);
50930 return __ret;
50931 }
50932 #endif
50933
50934 #ifdef __LITTLE_ENDIAN__
50935 __ai uint32_t vcvtms_u32_f32(float32_t __p0) {
50936 uint32_t __ret;
50937 __ret = (uint32_t) __builtin_neon_vcvtms_u32_f32(__p0);
50938 return __ret;
50939 }
50940 #else
50941 __ai uint32_t vcvtms_u32_f32(float32_t __p0) {
50942 uint32_t __ret;
50943 __ret = (uint32_t) __builtin_neon_vcvtms_u32_f32(__p0);
50944 return __ret;
50945 }
50946 #endif
50947
50948 #ifdef __LITTLE_ENDIAN__
50949 __ai uint64_t vcvtmd_u64_f64(float64_t __p0) {
50950 uint64_t __ret;
50951 __ret = (uint64_t) __builtin_neon_vcvtmd_u64_f64(__p0);
50952 return __ret;
50953 }
50954 #else
50955 __ai uint64_t vcvtmd_u64_f64(float64_t __p0) {
50956 uint64_t __ret;
50957 __ret = (uint64_t) __builtin_neon_vcvtmd_u64_f64(__p0);
50958 return __ret;
50959 }
50960 #endif
50961
50962 #ifdef __LITTLE_ENDIAN__
50963 __ai int32_t vcvtns_s32_f32(float32_t __p0) {
50964 int32_t __ret;
50965 __ret = (int32_t) __builtin_neon_vcvtns_s32_f32(__p0);
50966 return __ret;
50967 }
50968 #else
50969 __ai int32_t vcvtns_s32_f32(float32_t __p0) {
50970 int32_t __ret;
50971 __ret = (int32_t) __builtin_neon_vcvtns_s32_f32(__p0);
50972 return __ret;
50973 }
50974 #endif
50975
50976 #ifdef __LITTLE_ENDIAN__
50977 __ai int64_t vcvtnd_s64_f64(float64_t __p0) {
50978 int64_t __ret;
50979 __ret = (int64_t) __builtin_neon_vcvtnd_s64_f64(__p0);
50980 return __ret;
50981 }
50982 #else
50983 __ai int64_t vcvtnd_s64_f64(float64_t __p0) {
50984 int64_t __ret;
50985 __ret = (int64_t) __builtin_neon_vcvtnd_s64_f64(__p0);
50986 return __ret;
50987 }
50988 #endif
50989
50990 #ifdef __LITTLE_ENDIAN__
50991 __ai uint32_t vcvtns_u32_f32(float32_t __p0) {
50992 uint32_t __ret;
50993 __ret = (uint32_t) __builtin_neon_vcvtns_u32_f32(__p0);
50994 return __ret;
50995 }
50996 #else
50997 __ai uint32_t vcvtns_u32_f32(float32_t __p0) {
50998 uint32_t __ret;
50999 __ret = (uint32_t) __builtin_neon_vcvtns_u32_f32(__p0);
51000 return __ret;
51001 }
51002 #endif
51003
51004 #ifdef __LITTLE_ENDIAN__
51005 __ai uint64_t vcvtnd_u64_f64(float64_t __p0) {
51006 uint64_t __ret;
51007 __ret = (uint64_t) __builtin_neon_vcvtnd_u64_f64(__p0);
51008 return __ret;
51009 }
51010 #else
51011 __ai uint64_t vcvtnd_u64_f64(float64_t __p0) {
51012 uint64_t __ret;
51013 __ret = (uint64_t) __builtin_neon_vcvtnd_u64_f64(__p0);
51014 return __ret;
51015 }
51016 #endif
51017
51018 #ifdef __LITTLE_ENDIAN__
51019 __ai int32_t vcvtps_s32_f32(float32_t __p0) {
51020 int32_t __ret;
51021 __ret = (int32_t) __builtin_neon_vcvtps_s32_f32(__p0);
51022 return __ret;
51023 }
51024 #else
51025 __ai int32_t vcvtps_s32_f32(float32_t __p0) {
51026 int32_t __ret;
51027 __ret = (int32_t) __builtin_neon_vcvtps_s32_f32(__p0);
51028 return __ret;
51029 }
51030 #endif
51031
51032 #ifdef __LITTLE_ENDIAN__
51033 __ai int64_t vcvtpd_s64_f64(float64_t __p0) {
51034 int64_t __ret;
51035 __ret = (int64_t) __builtin_neon_vcvtpd_s64_f64(__p0);
51036 return __ret;
51037 }
51038 #else
51039 __ai int64_t vcvtpd_s64_f64(float64_t __p0) {
51040 int64_t __ret;
51041 __ret = (int64_t) __builtin_neon_vcvtpd_s64_f64(__p0);
51042 return __ret;
51043 }
51044 #endif
51045
51046 #ifdef __LITTLE_ENDIAN__
51047 __ai uint32_t vcvtps_u32_f32(float32_t __p0) {
51048 uint32_t __ret;
51049 __ret = (uint32_t) __builtin_neon_vcvtps_u32_f32(__p0);
51050 return __ret;
51051 }
51052 #else
51053 __ai uint32_t vcvtps_u32_f32(float32_t __p0) {
51054 uint32_t __ret;
51055 __ret = (uint32_t) __builtin_neon_vcvtps_u32_f32(__p0);
51056 return __ret;
51057 }
51058 #endif
51059
51060 #ifdef __LITTLE_ENDIAN__
51061 __ai uint64_t vcvtpd_u64_f64(float64_t __p0) {
51062 uint64_t __ret;
51063 __ret = (uint64_t) __builtin_neon_vcvtpd_u64_f64(__p0);
51064 return __ret;
51065 }
51066 #else
51067 __ai uint64_t vcvtpd_u64_f64(float64_t __p0) {
51068 uint64_t __ret;
51069 __ret = (uint64_t) __builtin_neon_vcvtpd_u64_f64(__p0);
51070 return __ret;
51071 }
51072 #endif
51073
51074 #ifdef __LITTLE_ENDIAN__
51075 __ai float32_t vcvtxd_f32_f64(float64_t __p0) {
51076 float32_t __ret;
51077 __ret = (float32_t) __builtin_neon_vcvtxd_f32_f64(__p0);
51078 return __ret;
51079 }
51080 #else
51081 __ai float32_t vcvtxd_f32_f64(float64_t __p0) {
51082 float32_t __ret;
51083 __ret = (float32_t) __builtin_neon_vcvtxd_f32_f64(__p0);
51084 return __ret;
51085 }
51086 #endif
51087
51088 #ifdef __LITTLE_ENDIAN__
51089 __ai float32x2_t vcvtx_f32_f64(float64x2_t __p0) {
51090 float32x2_t __ret;
51091 __ret = (float32x2_t) __builtin_neon_vcvtx_f32_v((int8x16_t)__p0, 42);
51092 return __ret;
51093 }
51094 #else
51095 __ai float32x2_t vcvtx_f32_f64(float64x2_t __p0) {
51096 float64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
51097 float32x2_t __ret;
51098 __ret = (float32x2_t) __builtin_neon_vcvtx_f32_v((int8x16_t)__rev0, 42);
51099 __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
51100 return __ret;
51101 }
51102 __ai float32x2_t __noswap_vcvtx_f32_f64(float64x2_t __p0) {
51103 float32x2_t __ret;
51104 __ret = (float32x2_t) __builtin_neon_vcvtx_f32_v((int8x16_t)__p0, 42);
51105 return __ret;
51106 }
51107 #endif
51108
51109 #ifdef __LITTLE_ENDIAN__
51110 __ai float32x4_t vcvtx_high_f32_f64(float32x2_t __p0, float64x2_t __p1) {
51111 float32x4_t __ret;
51112 __ret = vcombine_f32(__p0, vcvtx_f32_f64(__p1));
51113 return __ret;
51114 }
51115 #else
51116 __ai float32x4_t vcvtx_high_f32_f64(float32x2_t __p0, float64x2_t __p1) {
51117 float32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
51118 float64x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
51119 float32x4_t __ret;
51120 __ret = __noswap_vcombine_f32(__rev0, __noswap_vcvtx_f32_f64(__rev1));
51121 __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
51122 return __ret;
51123 }
51124 #endif
51125
51126 #ifdef __LITTLE_ENDIAN__
51127 __ai float64x2_t vdivq_f64(float64x2_t __p0, float64x2_t __p1) {
51128 float64x2_t __ret;
51129 __ret = __p0 / __p1;
51130 return __ret;
51131 }
51132 #else
51133 __ai float64x2_t vdivq_f64(float64x2_t __p0, float64x2_t __p1) {
51134 float64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
51135 float64x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
51136 float64x2_t __ret;
51137 __ret = __rev0 / __rev1;
51138 __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
51139 return __ret;
51140 }
51141 #endif
51142
51143 #ifdef __LITTLE_ENDIAN__
51144 __ai float32x4_t vdivq_f32(float32x4_t __p0, float32x4_t __p1) {
51145 float32x4_t __ret;
51146 __ret = __p0 / __p1;
51147 return __ret;
51148 }
51149 #else
51150 __ai float32x4_t vdivq_f32(float32x4_t __p0, float32x4_t __p1) {
51151 float32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
51152 float32x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
51153 float32x4_t __ret;
51154 __ret = __rev0 / __rev1;
51155 __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
51156 return __ret;
51157 }
51158 #endif
51159
51160 #ifdef __LITTLE_ENDIAN__
51161 __ai float64x1_t vdiv_f64(float64x1_t __p0, float64x1_t __p1) {
51162 float64x1_t __ret;
51163 __ret = __p0 / __p1;
51164 return __ret;
51165 }
51166 #else
51167 __ai float64x1_t vdiv_f64(float64x1_t __p0, float64x1_t __p1) {
51168 float64x1_t __ret;
51169 __ret = __p0 / __p1;
51170 return __ret;
51171 }
51172 #endif
51173
51174 #ifdef __LITTLE_ENDIAN__
51175 __ai float32x2_t vdiv_f32(float32x2_t __p0, float32x2_t __p1) {
51176 float32x2_t __ret;
51177 __ret = __p0 / __p1;
51178 return __ret;
51179 }
51180 #else
51181 __ai float32x2_t vdiv_f32(float32x2_t __p0, float32x2_t __p1) {
51182 float32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
51183 float32x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
51184 float32x2_t __ret;
51185 __ret = __rev0 / __rev1;
51186 __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
51187 return __ret;
51188 }
51189 #endif
51190
51191 #ifdef __LITTLE_ENDIAN__
51192 #define vdupb_lane_p8(__p0, __p1) __extension__ ({ \
51193 poly8x8_t __s0 = __p0; \
51194 poly8_t __ret; \
51195 __ret = (poly8_t) __builtin_neon_vdupb_lane_i8((int8x8_t)__s0, __p1); \
51196 __ret; \
51197 })
51198 #else
51199 #define vdupb_lane_p8(__p0, __p1) __extension__ ({ \
51200 poly8x8_t __s0 = __p0; \
51201 poly8x8_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 7, 6, 5, 4, 3, 2, 1, 0); \
51202 poly8_t __ret; \
51203 __ret = (poly8_t) __builtin_neon_vdupb_lane_i8((int8x8_t)__rev0, __p1); \
51204 __ret; \
51205 })
51206 #endif
51207
51208 #ifdef __LITTLE_ENDIAN__
51209 #define vduph_lane_p16(__p0, __p1) __extension__ ({ \
51210 poly16x4_t __s0 = __p0; \
51211 poly16_t __ret; \
51212 __ret = (poly16_t) __builtin_neon_vduph_lane_i16((int8x8_t)__s0, __p1); \
51213 __ret; \
51214 })
51215 #else
51216 #define vduph_lane_p16(__p0, __p1) __extension__ ({ \
51217 poly16x4_t __s0 = __p0; \
51218 poly16x4_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \
51219 poly16_t __ret; \
51220 __ret = (poly16_t) __builtin_neon_vduph_lane_i16((int8x8_t)__rev0, __p1); \
51221 __ret; \
51222 })
51223 #endif
51224
51225 #ifdef __LITTLE_ENDIAN__
51226 #define vdupb_lane_u8(__p0, __p1) __extension__ ({ \
51227 uint8x8_t __s0 = __p0; \
51228 uint8_t __ret; \
51229 __ret = (uint8_t) __builtin_neon_vdupb_lane_i8((int8x8_t)__s0, __p1); \
51230 __ret; \
51231 })
51232 #else
51233 #define vdupb_lane_u8(__p0, __p1) __extension__ ({ \
51234 uint8x8_t __s0 = __p0; \
51235 uint8x8_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 7, 6, 5, 4, 3, 2, 1, 0); \
51236 uint8_t __ret; \
51237 __ret = (uint8_t) __builtin_neon_vdupb_lane_i8((int8x8_t)__rev0, __p1); \
51238 __ret; \
51239 })
51240 #endif
51241
51242 #ifdef __LITTLE_ENDIAN__
51243 #define vdups_lane_u32(__p0, __p1) __extension__ ({ \
51244 uint32x2_t __s0 = __p0; \
51245 uint32_t __ret; \
51246 __ret = (uint32_t) __builtin_neon_vdups_lane_i32((int8x8_t)__s0, __p1); \
51247 __ret; \
51248 })
51249 #else
51250 #define vdups_lane_u32(__p0, __p1) __extension__ ({ \
51251 uint32x2_t __s0 = __p0; \
51252 uint32x2_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 1, 0); \
51253 uint32_t __ret; \
51254 __ret = (uint32_t) __builtin_neon_vdups_lane_i32((int8x8_t)__rev0, __p1); \
51255 __ret; \
51256 })
51257 #endif
51258
51259 #ifdef __LITTLE_ENDIAN__
51260 #define vdupd_lane_u64(__p0, __p1) __extension__ ({ \
51261 uint64x1_t __s0 = __p0; \
51262 uint64_t __ret; \
51263 __ret = (uint64_t) __builtin_neon_vdupd_lane_i64((int8x8_t)__s0, __p1); \
51264 __ret; \
51265 })
51266 #else
51267 #define vdupd_lane_u64(__p0, __p1) __extension__ ({ \
51268 uint64x1_t __s0 = __p0; \
51269 uint64_t __ret; \
51270 __ret = (uint64_t) __builtin_neon_vdupd_lane_i64((int8x8_t)__s0, __p1); \
51271 __ret; \
51272 })
51273 #endif
51274
51275 #ifdef __LITTLE_ENDIAN__
51276 #define vduph_lane_u16(__p0, __p1) __extension__ ({ \
51277 uint16x4_t __s0 = __p0; \
51278 uint16_t __ret; \
51279 __ret = (uint16_t) __builtin_neon_vduph_lane_i16((int8x8_t)__s0, __p1); \
51280 __ret; \
51281 })
51282 #else
51283 #define vduph_lane_u16(__p0, __p1) __extension__ ({ \
51284 uint16x4_t __s0 = __p0; \
51285 uint16x4_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \
51286 uint16_t __ret; \
51287 __ret = (uint16_t) __builtin_neon_vduph_lane_i16((int8x8_t)__rev0, __p1); \
51288 __ret; \
51289 })
51290 #endif
51291
51292 #ifdef __LITTLE_ENDIAN__
51293 #define vdupb_lane_s8(__p0, __p1) __extension__ ({ \
51294 int8x8_t __s0 = __p0; \
51295 int8_t __ret; \
51296 __ret = (int8_t) __builtin_neon_vdupb_lane_i8((int8x8_t)__s0, __p1); \
51297 __ret; \
51298 })
51299 #else
51300 #define vdupb_lane_s8(__p0, __p1) __extension__ ({ \
51301 int8x8_t __s0 = __p0; \
51302 int8x8_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 7, 6, 5, 4, 3, 2, 1, 0); \
51303 int8_t __ret; \
51304 __ret = (int8_t) __builtin_neon_vdupb_lane_i8((int8x8_t)__rev0, __p1); \
51305 __ret; \
51306 })
51307 #endif
51308
51309 #ifdef __LITTLE_ENDIAN__
51310 #define vdupd_lane_f64(__p0, __p1) __extension__ ({ \
51311 float64x1_t __s0 = __p0; \
51312 float64_t __ret; \
51313 __ret = (float64_t) __builtin_neon_vdupd_lane_f64((int8x8_t)__s0, __p1); \
51314 __ret; \
51315 })
51316 #else
51317 #define vdupd_lane_f64(__p0, __p1) __extension__ ({ \
51318 float64x1_t __s0 = __p0; \
51319 float64_t __ret; \
51320 __ret = (float64_t) __builtin_neon_vdupd_lane_f64((int8x8_t)__s0, __p1); \
51321 __ret; \
51322 })
51323 #endif
51324
51325 #ifdef __LITTLE_ENDIAN__
51326 #define vdups_lane_f32(__p0, __p1) __extension__ ({ \
51327 float32x2_t __s0 = __p0; \
51328 float32_t __ret; \
51329 __ret = (float32_t) __builtin_neon_vdups_lane_f32((int8x8_t)__s0, __p1); \
51330 __ret; \
51331 })
51332 #else
51333 #define vdups_lane_f32(__p0, __p1) __extension__ ({ \
51334 float32x2_t __s0 = __p0; \
51335 float32x2_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 1, 0); \
51336 float32_t __ret; \
51337 __ret = (float32_t) __builtin_neon_vdups_lane_f32((int8x8_t)__rev0, __p1); \
51338 __ret; \
51339 })
51340 #endif
51341
51342 #ifdef __LITTLE_ENDIAN__
51343 #define vdups_lane_s32(__p0, __p1) __extension__ ({ \
51344 int32x2_t __s0 = __p0; \
51345 int32_t __ret; \
51346 __ret = (int32_t) __builtin_neon_vdups_lane_i32((int8x8_t)__s0, __p1); \
51347 __ret; \
51348 })
51349 #else
51350 #define vdups_lane_s32(__p0, __p1) __extension__ ({ \
51351 int32x2_t __s0 = __p0; \
51352 int32x2_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 1, 0); \
51353 int32_t __ret; \
51354 __ret = (int32_t) __builtin_neon_vdups_lane_i32((int8x8_t)__rev0, __p1); \
51355 __ret; \
51356 })
51357 #endif
51358
51359 #ifdef __LITTLE_ENDIAN__
51360 #define vdupd_lane_s64(__p0, __p1) __extension__ ({ \
51361 int64x1_t __s0 = __p0; \
51362 int64_t __ret; \
51363 __ret = (int64_t) __builtin_neon_vdupd_lane_i64((int8x8_t)__s0, __p1); \
51364 __ret; \
51365 })
51366 #else
51367 #define vdupd_lane_s64(__p0, __p1) __extension__ ({ \
51368 int64x1_t __s0 = __p0; \
51369 int64_t __ret; \
51370 __ret = (int64_t) __builtin_neon_vdupd_lane_i64((int8x8_t)__s0, __p1); \
51371 __ret; \
51372 })
51373 #endif
51374
51375 #ifdef __LITTLE_ENDIAN__
51376 #define vduph_lane_s16(__p0, __p1) __extension__ ({ \
51377 int16x4_t __s0 = __p0; \
51378 int16_t __ret; \
51379 __ret = (int16_t) __builtin_neon_vduph_lane_i16((int8x8_t)__s0, __p1); \
51380 __ret; \
51381 })
51382 #else
51383 #define vduph_lane_s16(__p0, __p1) __extension__ ({ \
51384 int16x4_t __s0 = __p0; \
51385 int16x4_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \
51386 int16_t __ret; \
51387 __ret = (int16_t) __builtin_neon_vduph_lane_i16((int8x8_t)__rev0, __p1); \
51388 __ret; \
51389 })
51390 #endif
51391
51392 #ifdef __LITTLE_ENDIAN__
51393 #define vdup_lane_p64(__p0, __p1) __extension__ ({ \
51394 poly64x1_t __s0 = __p0; \
51395 poly64x1_t __ret; \
51396 __ret = __builtin_shufflevector(__s0, __s0, __p1); \
51397 __ret; \
51398 })
51399 #else
51400 #define vdup_lane_p64(__p0, __p1) __extension__ ({ \
51401 poly64x1_t __s0 = __p0; \
51402 poly64x1_t __ret; \
51403 __ret = __builtin_shufflevector(__s0, __s0, __p1); \
51404 __ret; \
51405 })
51406 #endif
51407
51408 #ifdef __LITTLE_ENDIAN__
51409 #define vdupq_lane_p64(__p0, __p1) __extension__ ({ \
51410 poly64x1_t __s0 = __p0; \
51411 poly64x2_t __ret; \
51412 __ret = __builtin_shufflevector(__s0, __s0, __p1, __p1); \
51413 __ret; \
51414 })
51415 #else
51416 #define vdupq_lane_p64(__p0, __p1) __extension__ ({ \
51417 poly64x1_t __s0 = __p0; \
51418 poly64x2_t __ret; \
51419 __ret = __builtin_shufflevector(__s0, __s0, __p1, __p1); \
51420 __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \
51421 __ret; \
51422 })
51423 #endif
51424
51425 #ifdef __LITTLE_ENDIAN__
51426 #define vdupq_lane_f64(__p0, __p1) __extension__ ({ \
51427 float64x1_t __s0 = __p0; \
51428 float64x2_t __ret; \
51429 __ret = __builtin_shufflevector(__s0, __s0, __p1, __p1); \
51430 __ret; \
51431 })
51432 #else
51433 #define vdupq_lane_f64(__p0, __p1) __extension__ ({ \
51434 float64x1_t __s0 = __p0; \
51435 float64x2_t __ret; \
51436 __ret = __builtin_shufflevector(__s0, __s0, __p1, __p1); \
51437 __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \
51438 __ret; \
51439 })
51440 #endif
51441
51442 #ifdef __LITTLE_ENDIAN__
51443 #define vdupq_lane_f16(__p0, __p1) __extension__ ({ \
51444 float16x4_t __s0 = __p0; \
51445 float16x8_t __ret; \
51446 __ret = __builtin_shufflevector(__s0, __s0, __p1, __p1, __p1, __p1, __p1, __p1, __p1, __p1); \
51447 __ret; \
51448 })
51449 #else
51450 #define vdupq_lane_f16(__p0, __p1) __extension__ ({ \
51451 float16x4_t __s0 = __p0; \
51452 float16x4_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \
51453 float16x8_t __ret; \
51454 __ret = __builtin_shufflevector(__rev0, __rev0, __p1, __p1, __p1, __p1, __p1, __p1, __p1, __p1); \
51455 __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); \
51456 __ret; \
51457 })
51458 #endif
51459
51460 #ifdef __LITTLE_ENDIAN__
51461 #define vdup_lane_f64(__p0, __p1) __extension__ ({ \
51462 float64x1_t __s0 = __p0; \
51463 float64x1_t __ret; \
51464 __ret = __builtin_shufflevector(__s0, __s0, __p1); \
51465 __ret; \
51466 })
51467 #else
51468 #define vdup_lane_f64(__p0, __p1) __extension__ ({ \
51469 float64x1_t __s0 = __p0; \
51470 float64x1_t __ret; \
51471 __ret = __builtin_shufflevector(__s0, __s0, __p1); \
51472 __ret; \
51473 })
51474 #endif
51475
51476 #ifdef __LITTLE_ENDIAN__
51477 #define vdup_lane_f16(__p0, __p1) __extension__ ({ \
51478 float16x4_t __s0 = __p0; \
51479 float16x4_t __ret; \
51480 __ret = __builtin_shufflevector(__s0, __s0, __p1, __p1, __p1, __p1); \
51481 __ret; \
51482 })
51483 #else
51484 #define vdup_lane_f16(__p0, __p1) __extension__ ({ \
51485 float16x4_t __s0 = __p0; \
51486 float16x4_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \
51487 float16x4_t __ret; \
51488 __ret = __builtin_shufflevector(__rev0, __rev0, __p1, __p1, __p1, __p1); \
51489 __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \
51490 __ret; \
51491 })
51492 #endif
51493
51494 #ifdef __LITTLE_ENDIAN__
51495 #define vdupb_laneq_p8(__p0, __p1) __extension__ ({ \
51496 poly8x16_t __s0 = __p0; \
51497 poly8_t __ret; \
51498 __ret = (poly8_t) __builtin_neon_vdupb_laneq_i8((int8x16_t)__s0, __p1); \
51499 __ret; \
51500 })
51501 #else
51502 #define vdupb_laneq_p8(__p0, __p1) __extension__ ({ \
51503 poly8x16_t __s0 = __p0; \
51504 poly8x16_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
51505 poly8_t __ret; \
51506 __ret = (poly8_t) __builtin_neon_vdupb_laneq_i8((int8x16_t)__rev0, __p1); \
51507 __ret; \
51508 })
51509 #endif
51510
51511 #ifdef __LITTLE_ENDIAN__
51512 #define vduph_laneq_p16(__p0, __p1) __extension__ ({ \
51513 poly16x8_t __s0 = __p0; \
51514 poly16_t __ret; \
51515 __ret = (poly16_t) __builtin_neon_vduph_laneq_i16((int8x16_t)__s0, __p1); \
51516 __ret; \
51517 })
51518 #else
51519 #define vduph_laneq_p16(__p0, __p1) __extension__ ({ \
51520 poly16x8_t __s0 = __p0; \
51521 poly16x8_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 7, 6, 5, 4, 3, 2, 1, 0); \
51522 poly16_t __ret; \
51523 __ret = (poly16_t) __builtin_neon_vduph_laneq_i16((int8x16_t)__rev0, __p1); \
51524 __ret; \
51525 })
51526 #endif
51527
51528 #ifdef __LITTLE_ENDIAN__
51529 #define vdupb_laneq_u8(__p0, __p1) __extension__ ({ \
51530 uint8x16_t __s0 = __p0; \
51531 uint8_t __ret; \
51532 __ret = (uint8_t) __builtin_neon_vdupb_laneq_i8((int8x16_t)__s0, __p1); \
51533 __ret; \
51534 })
51535 #else
51536 #define vdupb_laneq_u8(__p0, __p1) __extension__ ({ \
51537 uint8x16_t __s0 = __p0; \
51538 uint8x16_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
51539 uint8_t __ret; \
51540 __ret = (uint8_t) __builtin_neon_vdupb_laneq_i8((int8x16_t)__rev0, __p1); \
51541 __ret; \
51542 })
51543 #endif
51544
51545 #ifdef __LITTLE_ENDIAN__
51546 #define vdups_laneq_u32(__p0, __p1) __extension__ ({ \
51547 uint32x4_t __s0 = __p0; \
51548 uint32_t __ret; \
51549 __ret = (uint32_t) __builtin_neon_vdups_laneq_i32((int8x16_t)__s0, __p1); \
51550 __ret; \
51551 })
51552 #else
51553 #define vdups_laneq_u32(__p0, __p1) __extension__ ({ \
51554 uint32x4_t __s0 = __p0; \
51555 uint32x4_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \
51556 uint32_t __ret; \
51557 __ret = (uint32_t) __builtin_neon_vdups_laneq_i32((int8x16_t)__rev0, __p1); \
51558 __ret; \
51559 })
51560 #endif
51561
51562 #ifdef __LITTLE_ENDIAN__
51563 #define vdupd_laneq_u64(__p0, __p1) __extension__ ({ \
51564 uint64x2_t __s0 = __p0; \
51565 uint64_t __ret; \
51566 __ret = (uint64_t) __builtin_neon_vdupd_laneq_i64((int8x16_t)__s0, __p1); \
51567 __ret; \
51568 })
51569 #else
51570 #define vdupd_laneq_u64(__p0, __p1) __extension__ ({ \
51571 uint64x2_t __s0 = __p0; \
51572 uint64x2_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 1, 0); \
51573 uint64_t __ret; \
51574 __ret = (uint64_t) __builtin_neon_vdupd_laneq_i64((int8x16_t)__rev0, __p1); \
51575 __ret; \
51576 })
51577 #endif
51578
51579 #ifdef __LITTLE_ENDIAN__
51580 #define vduph_laneq_u16(__p0, __p1) __extension__ ({ \
51581 uint16x8_t __s0 = __p0; \
51582 uint16_t __ret; \
51583 __ret = (uint16_t) __builtin_neon_vduph_laneq_i16((int8x16_t)__s0, __p1); \
51584 __ret; \
51585 })
51586 #else
51587 #define vduph_laneq_u16(__p0, __p1) __extension__ ({ \
51588 uint16x8_t __s0 = __p0; \
51589 uint16x8_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 7, 6, 5, 4, 3, 2, 1, 0); \
51590 uint16_t __ret; \
51591 __ret = (uint16_t) __builtin_neon_vduph_laneq_i16((int8x16_t)__rev0, __p1); \
51592 __ret; \
51593 })
51594 #endif
51595
51596 #ifdef __LITTLE_ENDIAN__
51597 #define vdupb_laneq_s8(__p0, __p1) __extension__ ({ \
51598 int8x16_t __s0 = __p0; \
51599 int8_t __ret; \
51600 __ret = (int8_t) __builtin_neon_vdupb_laneq_i8((int8x16_t)__s0, __p1); \
51601 __ret; \
51602 })
51603 #else
51604 #define vdupb_laneq_s8(__p0, __p1) __extension__ ({ \
51605 int8x16_t __s0 = __p0; \
51606 int8x16_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
51607 int8_t __ret; \
51608 __ret = (int8_t) __builtin_neon_vdupb_laneq_i8((int8x16_t)__rev0, __p1); \
51609 __ret; \
51610 })
51611 #endif
51612
51613 #ifdef __LITTLE_ENDIAN__
51614 #define vdupd_laneq_f64(__p0, __p1) __extension__ ({ \
51615 float64x2_t __s0 = __p0; \
51616 float64_t __ret; \
51617 __ret = (float64_t) __builtin_neon_vdupd_laneq_f64((int8x16_t)__s0, __p1); \
51618 __ret; \
51619 })
51620 #else
51621 #define vdupd_laneq_f64(__p0, __p1) __extension__ ({ \
51622 float64x2_t __s0 = __p0; \
51623 float64x2_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 1, 0); \
51624 float64_t __ret; \
51625 __ret = (float64_t) __builtin_neon_vdupd_laneq_f64((int8x16_t)__rev0, __p1); \
51626 __ret; \
51627 })
51628 #endif
51629
51630 #ifdef __LITTLE_ENDIAN__
51631 #define vdups_laneq_f32(__p0, __p1) __extension__ ({ \
51632 float32x4_t __s0 = __p0; \
51633 float32_t __ret; \
51634 __ret = (float32_t) __builtin_neon_vdups_laneq_f32((int8x16_t)__s0, __p1); \
51635 __ret; \
51636 })
51637 #else
51638 #define vdups_laneq_f32(__p0, __p1) __extension__ ({ \
51639 float32x4_t __s0 = __p0; \
51640 float32x4_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \
51641 float32_t __ret; \
51642 __ret = (float32_t) __builtin_neon_vdups_laneq_f32((int8x16_t)__rev0, __p1); \
51643 __ret; \
51644 })
51645 #endif
51646
51647 #ifdef __LITTLE_ENDIAN__
51648 #define vdups_laneq_s32(__p0, __p1) __extension__ ({ \
51649 int32x4_t __s0 = __p0; \
51650 int32_t __ret; \
51651 __ret = (int32_t) __builtin_neon_vdups_laneq_i32((int8x16_t)__s0, __p1); \
51652 __ret; \
51653 })
51654 #else
51655 #define vdups_laneq_s32(__p0, __p1) __extension__ ({ \
51656 int32x4_t __s0 = __p0; \
51657 int32x4_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \
51658 int32_t __ret; \
51659 __ret = (int32_t) __builtin_neon_vdups_laneq_i32((int8x16_t)__rev0, __p1); \
51660 __ret; \
51661 })
51662 #endif
51663
51664 #ifdef __LITTLE_ENDIAN__
51665 #define vdupd_laneq_s64(__p0, __p1) __extension__ ({ \
51666 int64x2_t __s0 = __p0; \
51667 int64_t __ret; \
51668 __ret = (int64_t) __builtin_neon_vdupd_laneq_i64((int8x16_t)__s0, __p1); \
51669 __ret; \
51670 })
51671 #else
51672 #define vdupd_laneq_s64(__p0, __p1) __extension__ ({ \
51673 int64x2_t __s0 = __p0; \
51674 int64x2_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 1, 0); \
51675 int64_t __ret; \
51676 __ret = (int64_t) __builtin_neon_vdupd_laneq_i64((int8x16_t)__rev0, __p1); \
51677 __ret; \
51678 })
51679 #endif
51680
51681 #ifdef __LITTLE_ENDIAN__
51682 #define vduph_laneq_s16(__p0, __p1) __extension__ ({ \
51683 int16x8_t __s0 = __p0; \
51684 int16_t __ret; \
51685 __ret = (int16_t) __builtin_neon_vduph_laneq_i16((int8x16_t)__s0, __p1); \
51686 __ret; \
51687 })
51688 #else
51689 #define vduph_laneq_s16(__p0, __p1) __extension__ ({ \
51690 int16x8_t __s0 = __p0; \
51691 int16x8_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 7, 6, 5, 4, 3, 2, 1, 0); \
51692 int16_t __ret; \
51693 __ret = (int16_t) __builtin_neon_vduph_laneq_i16((int8x16_t)__rev0, __p1); \
51694 __ret; \
51695 })
51696 #endif
51697
51698 #ifdef __LITTLE_ENDIAN__
51699 #define vdup_laneq_p8(__p0, __p1) __extension__ ({ \
51700 poly8x16_t __s0 = __p0; \
51701 poly8x8_t __ret; \
51702 __ret = __builtin_shufflevector(__s0, __s0, __p1, __p1, __p1, __p1, __p1, __p1, __p1, __p1); \
51703 __ret; \
51704 })
51705 #else
51706 #define vdup_laneq_p8(__p0, __p1) __extension__ ({ \
51707 poly8x16_t __s0 = __p0; \
51708 poly8x16_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
51709 poly8x8_t __ret; \
51710 __ret = __builtin_shufflevector(__rev0, __rev0, __p1, __p1, __p1, __p1, __p1, __p1, __p1, __p1); \
51711 __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); \
51712 __ret; \
51713 })
51714 #endif
51715
51716 #ifdef __LITTLE_ENDIAN__
51717 #define vdup_laneq_p64(__p0, __p1) __extension__ ({ \
51718 poly64x2_t __s0 = __p0; \
51719 poly64x1_t __ret; \
51720 __ret = __builtin_shufflevector(__s0, __s0, __p1); \
51721 __ret; \
51722 })
51723 #else
51724 #define vdup_laneq_p64(__p0, __p1) __extension__ ({ \
51725 poly64x2_t __s0 = __p0; \
51726 poly64x2_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 1, 0); \
51727 poly64x1_t __ret; \
51728 __ret = __builtin_shufflevector(__rev0, __rev0, __p1); \
51729 __ret; \
51730 })
51731 #endif
51732
51733 #ifdef __LITTLE_ENDIAN__
51734 #define vdup_laneq_p16(__p0, __p1) __extension__ ({ \
51735 poly16x8_t __s0 = __p0; \
51736 poly16x4_t __ret; \
51737 __ret = __builtin_shufflevector(__s0, __s0, __p1, __p1, __p1, __p1); \
51738 __ret; \
51739 })
51740 #else
51741 #define vdup_laneq_p16(__p0, __p1) __extension__ ({ \
51742 poly16x8_t __s0 = __p0; \
51743 poly16x8_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 7, 6, 5, 4, 3, 2, 1, 0); \
51744 poly16x4_t __ret; \
51745 __ret = __builtin_shufflevector(__rev0, __rev0, __p1, __p1, __p1, __p1); \
51746 __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \
51747 __ret; \
51748 })
51749 #endif
51750
51751 #ifdef __LITTLE_ENDIAN__
51752 #define vdupq_laneq_p8(__p0, __p1) __extension__ ({ \
51753 poly8x16_t __s0 = __p0; \
51754 poly8x16_t __ret; \
51755 __ret = __builtin_shufflevector(__s0, __s0, __p1, __p1, __p1, __p1, __p1, __p1, __p1, __p1, __p1, __p1, __p1, __p1, __p1, __p1, __p1, __p1); \
51756 __ret; \
51757 })
51758 #else
51759 #define vdupq_laneq_p8(__p0, __p1) __extension__ ({ \
51760 poly8x16_t __s0 = __p0; \
51761 poly8x16_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
51762 poly8x16_t __ret; \
51763 __ret = __builtin_shufflevector(__rev0, __rev0, __p1, __p1, __p1, __p1, __p1, __p1, __p1, __p1, __p1, __p1, __p1, __p1, __p1, __p1, __p1, __p1); \
51764 __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
51765 __ret; \
51766 })
51767 #endif
51768
51769 #ifdef __LITTLE_ENDIAN__
51770 #define vdupq_laneq_p64(__p0, __p1) __extension__ ({ \
51771 poly64x2_t __s0 = __p0; \
51772 poly64x2_t __ret; \
51773 __ret = __builtin_shufflevector(__s0, __s0, __p1, __p1); \
51774 __ret; \
51775 })
51776 #else
51777 #define vdupq_laneq_p64(__p0, __p1) __extension__ ({ \
51778 poly64x2_t __s0 = __p0; \
51779 poly64x2_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 1, 0); \
51780 poly64x2_t __ret; \
51781 __ret = __builtin_shufflevector(__rev0, __rev0, __p1, __p1); \
51782 __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \
51783 __ret; \
51784 })
51785 #endif
51786
51787 #ifdef __LITTLE_ENDIAN__
51788 #define vdupq_laneq_p16(__p0, __p1) __extension__ ({ \
51789 poly16x8_t __s0 = __p0; \
51790 poly16x8_t __ret; \
51791 __ret = __builtin_shufflevector(__s0, __s0, __p1, __p1, __p1, __p1, __p1, __p1, __p1, __p1); \
51792 __ret; \
51793 })
51794 #else
51795 #define vdupq_laneq_p16(__p0, __p1) __extension__ ({ \
51796 poly16x8_t __s0 = __p0; \
51797 poly16x8_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 7, 6, 5, 4, 3, 2, 1, 0); \
51798 poly16x8_t __ret; \
51799 __ret = __builtin_shufflevector(__rev0, __rev0, __p1, __p1, __p1, __p1, __p1, __p1, __p1, __p1); \
51800 __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); \
51801 __ret; \
51802 })
51803 #endif
51804
51805 #ifdef __LITTLE_ENDIAN__
51806 #define vdupq_laneq_u8(__p0, __p1) __extension__ ({ \
51807 uint8x16_t __s0 = __p0; \
51808 uint8x16_t __ret; \
51809 __ret = __builtin_shufflevector(__s0, __s0, __p1, __p1, __p1, __p1, __p1, __p1, __p1, __p1, __p1, __p1, __p1, __p1, __p1, __p1, __p1, __p1); \
51810 __ret; \
51811 })
51812 #else
51813 #define vdupq_laneq_u8(__p0, __p1) __extension__ ({ \
51814 uint8x16_t __s0 = __p0; \
51815 uint8x16_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
51816 uint8x16_t __ret; \
51817 __ret = __builtin_shufflevector(__rev0, __rev0, __p1, __p1, __p1, __p1, __p1, __p1, __p1, __p1, __p1, __p1, __p1, __p1, __p1, __p1, __p1, __p1); \
51818 __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
51819 __ret; \
51820 })
51821 #endif
51822
51823 #ifdef __LITTLE_ENDIAN__
51824 #define vdupq_laneq_u32(__p0, __p1) __extension__ ({ \
51825 uint32x4_t __s0 = __p0; \
51826 uint32x4_t __ret; \
51827 __ret = __builtin_shufflevector(__s0, __s0, __p1, __p1, __p1, __p1); \
51828 __ret; \
51829 })
51830 #else
51831 #define vdupq_laneq_u32(__p0, __p1) __extension__ ({ \
51832 uint32x4_t __s0 = __p0; \
51833 uint32x4_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \
51834 uint32x4_t __ret; \
51835 __ret = __builtin_shufflevector(__rev0, __rev0, __p1, __p1, __p1, __p1); \
51836 __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \
51837 __ret; \
51838 })
51839 #endif
51840
51841 #ifdef __LITTLE_ENDIAN__
51842 #define vdupq_laneq_u64(__p0, __p1) __extension__ ({ \
51843 uint64x2_t __s0 = __p0; \
51844 uint64x2_t __ret; \
51845 __ret = __builtin_shufflevector(__s0, __s0, __p1, __p1); \
51846 __ret; \
51847 })
51848 #else
51849 #define vdupq_laneq_u64(__p0, __p1) __extension__ ({ \
51850 uint64x2_t __s0 = __p0; \
51851 uint64x2_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 1, 0); \
51852 uint64x2_t __ret; \
51853 __ret = __builtin_shufflevector(__rev0, __rev0, __p1, __p1); \
51854 __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \
51855 __ret; \
51856 })
51857 #endif
51858
51859 #ifdef __LITTLE_ENDIAN__
51860 #define vdupq_laneq_u16(__p0, __p1) __extension__ ({ \
51861 uint16x8_t __s0 = __p0; \
51862 uint16x8_t __ret; \
51863 __ret = __builtin_shufflevector(__s0, __s0, __p1, __p1, __p1, __p1, __p1, __p1, __p1, __p1); \
51864 __ret; \
51865 })
51866 #else
51867 #define vdupq_laneq_u16(__p0, __p1) __extension__ ({ \
51868 uint16x8_t __s0 = __p0; \
51869 uint16x8_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 7, 6, 5, 4, 3, 2, 1, 0); \
51870 uint16x8_t __ret; \
51871 __ret = __builtin_shufflevector(__rev0, __rev0, __p1, __p1, __p1, __p1, __p1, __p1, __p1, __p1); \
51872 __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); \
51873 __ret; \
51874 })
51875 #endif
51876
51877 #ifdef __LITTLE_ENDIAN__
51878 #define vdupq_laneq_s8(__p0, __p1) __extension__ ({ \
51879 int8x16_t __s0 = __p0; \
51880 int8x16_t __ret; \
51881 __ret = __builtin_shufflevector(__s0, __s0, __p1, __p1, __p1, __p1, __p1, __p1, __p1, __p1, __p1, __p1, __p1, __p1, __p1, __p1, __p1, __p1); \
51882 __ret; \
51883 })
51884 #else
51885 #define vdupq_laneq_s8(__p0, __p1) __extension__ ({ \
51886 int8x16_t __s0 = __p0; \
51887 int8x16_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
51888 int8x16_t __ret; \
51889 __ret = __builtin_shufflevector(__rev0, __rev0, __p1, __p1, __p1, __p1, __p1, __p1, __p1, __p1, __p1, __p1, __p1, __p1, __p1, __p1, __p1, __p1); \
51890 __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
51891 __ret; \
51892 })
51893 #endif
51894
51895 #ifdef __LITTLE_ENDIAN__
51896 #define vdupq_laneq_f64(__p0, __p1) __extension__ ({ \
51897 float64x2_t __s0 = __p0; \
51898 float64x2_t __ret; \
51899 __ret = __builtin_shufflevector(__s0, __s0, __p1, __p1); \
51900 __ret; \
51901 })
51902 #else
51903 #define vdupq_laneq_f64(__p0, __p1) __extension__ ({ \
51904 float64x2_t __s0 = __p0; \
51905 float64x2_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 1, 0); \
51906 float64x2_t __ret; \
51907 __ret = __builtin_shufflevector(__rev0, __rev0, __p1, __p1); \
51908 __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \
51909 __ret; \
51910 })
51911 #endif
51912
51913 #ifdef __LITTLE_ENDIAN__
51914 #define vdupq_laneq_f32(__p0, __p1) __extension__ ({ \
51915 float32x4_t __s0 = __p0; \
51916 float32x4_t __ret; \
51917 __ret = __builtin_shufflevector(__s0, __s0, __p1, __p1, __p1, __p1); \
51918 __ret; \
51919 })
51920 #else
51921 #define vdupq_laneq_f32(__p0, __p1) __extension__ ({ \
51922 float32x4_t __s0 = __p0; \
51923 float32x4_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \
51924 float32x4_t __ret; \
51925 __ret = __builtin_shufflevector(__rev0, __rev0, __p1, __p1, __p1, __p1); \
51926 __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \
51927 __ret; \
51928 })
51929 #endif
51930
51931 #ifdef __LITTLE_ENDIAN__
51932 #define vdupq_laneq_f16(__p0, __p1) __extension__ ({ \
51933 float16x8_t __s0 = __p0; \
51934 float16x8_t __ret; \
51935 __ret = __builtin_shufflevector(__s0, __s0, __p1, __p1, __p1, __p1, __p1, __p1, __p1, __p1); \
51936 __ret; \
51937 })
51938 #else
51939 #define vdupq_laneq_f16(__p0, __p1) __extension__ ({ \
51940 float16x8_t __s0 = __p0; \
51941 float16x8_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 7, 6, 5, 4, 3, 2, 1, 0); \
51942 float16x8_t __ret; \
51943 __ret = __builtin_shufflevector(__rev0, __rev0, __p1, __p1, __p1, __p1, __p1, __p1, __p1, __p1); \
51944 __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); \
51945 __ret; \
51946 })
51947 #endif
51948
51949 #ifdef __LITTLE_ENDIAN__
51950 #define vdupq_laneq_s32(__p0, __p1) __extension__ ({ \
51951 int32x4_t __s0 = __p0; \
51952 int32x4_t __ret; \
51953 __ret = __builtin_shufflevector(__s0, __s0, __p1, __p1, __p1, __p1); \
51954 __ret; \
51955 })
51956 #else
51957 #define vdupq_laneq_s32(__p0, __p1) __extension__ ({ \
51958 int32x4_t __s0 = __p0; \
51959 int32x4_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \
51960 int32x4_t __ret; \
51961 __ret = __builtin_shufflevector(__rev0, __rev0, __p1, __p1, __p1, __p1); \
51962 __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \
51963 __ret; \
51964 })
51965 #endif
51966
51967 #ifdef __LITTLE_ENDIAN__
51968 #define vdupq_laneq_s64(__p0, __p1) __extension__ ({ \
51969 int64x2_t __s0 = __p0; \
51970 int64x2_t __ret; \
51971 __ret = __builtin_shufflevector(__s0, __s0, __p1, __p1); \
51972 __ret; \
51973 })
51974 #else
51975 #define vdupq_laneq_s64(__p0, __p1) __extension__ ({ \
51976 int64x2_t __s0 = __p0; \
51977 int64x2_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 1, 0); \
51978 int64x2_t __ret; \
51979 __ret = __builtin_shufflevector(__rev0, __rev0, __p1, __p1); \
51980 __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \
51981 __ret; \
51982 })
51983 #endif
51984
51985 #ifdef __LITTLE_ENDIAN__
51986 #define vdupq_laneq_s16(__p0, __p1) __extension__ ({ \
51987 int16x8_t __s0 = __p0; \
51988 int16x8_t __ret; \
51989 __ret = __builtin_shufflevector(__s0, __s0, __p1, __p1, __p1, __p1, __p1, __p1, __p1, __p1); \
51990 __ret; \
51991 })
51992 #else
51993 #define vdupq_laneq_s16(__p0, __p1) __extension__ ({ \
51994 int16x8_t __s0 = __p0; \
51995 int16x8_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 7, 6, 5, 4, 3, 2, 1, 0); \
51996 int16x8_t __ret; \
51997 __ret = __builtin_shufflevector(__rev0, __rev0, __p1, __p1, __p1, __p1, __p1, __p1, __p1, __p1); \
51998 __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); \
51999 __ret; \
52000 })
52001 #endif
52002
52003 #ifdef __LITTLE_ENDIAN__
52004 #define vdup_laneq_u8(__p0, __p1) __extension__ ({ \
52005 uint8x16_t __s0 = __p0; \
52006 uint8x8_t __ret; \
52007 __ret = __builtin_shufflevector(__s0, __s0, __p1, __p1, __p1, __p1, __p1, __p1, __p1, __p1); \
52008 __ret; \
52009 })
52010 #else
52011 #define vdup_laneq_u8(__p0, __p1) __extension__ ({ \
52012 uint8x16_t __s0 = __p0; \
52013 uint8x16_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
52014 uint8x8_t __ret; \
52015 __ret = __builtin_shufflevector(__rev0, __rev0, __p1, __p1, __p1, __p1, __p1, __p1, __p1, __p1); \
52016 __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); \
52017 __ret; \
52018 })
52019 #endif
52020
52021 #ifdef __LITTLE_ENDIAN__
52022 #define vdup_laneq_u32(__p0, __p1) __extension__ ({ \
52023 uint32x4_t __s0 = __p0; \
52024 uint32x2_t __ret; \
52025 __ret = __builtin_shufflevector(__s0, __s0, __p1, __p1); \
52026 __ret; \
52027 })
52028 #else
52029 #define vdup_laneq_u32(__p0, __p1) __extension__ ({ \
52030 uint32x4_t __s0 = __p0; \
52031 uint32x4_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \
52032 uint32x2_t __ret; \
52033 __ret = __builtin_shufflevector(__rev0, __rev0, __p1, __p1); \
52034 __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \
52035 __ret; \
52036 })
52037 #endif
52038
52039 #ifdef __LITTLE_ENDIAN__
52040 #define vdup_laneq_u64(__p0, __p1) __extension__ ({ \
52041 uint64x2_t __s0 = __p0; \
52042 uint64x1_t __ret; \
52043 __ret = __builtin_shufflevector(__s0, __s0, __p1); \
52044 __ret; \
52045 })
52046 #else
52047 #define vdup_laneq_u64(__p0, __p1) __extension__ ({ \
52048 uint64x2_t __s0 = __p0; \
52049 uint64x2_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 1, 0); \
52050 uint64x1_t __ret; \
52051 __ret = __builtin_shufflevector(__rev0, __rev0, __p1); \
52052 __ret; \
52053 })
52054 #endif
52055
52056 #ifdef __LITTLE_ENDIAN__
52057 #define vdup_laneq_u16(__p0, __p1) __extension__ ({ \
52058 uint16x8_t __s0 = __p0; \
52059 uint16x4_t __ret; \
52060 __ret = __builtin_shufflevector(__s0, __s0, __p1, __p1, __p1, __p1); \
52061 __ret; \
52062 })
52063 #else
52064 #define vdup_laneq_u16(__p0, __p1) __extension__ ({ \
52065 uint16x8_t __s0 = __p0; \
52066 uint16x8_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 7, 6, 5, 4, 3, 2, 1, 0); \
52067 uint16x4_t __ret; \
52068 __ret = __builtin_shufflevector(__rev0, __rev0, __p1, __p1, __p1, __p1); \
52069 __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \
52070 __ret; \
52071 })
52072 #endif
52073
52074 #ifdef __LITTLE_ENDIAN__
52075 #define vdup_laneq_s8(__p0, __p1) __extension__ ({ \
52076 int8x16_t __s0 = __p0; \
52077 int8x8_t __ret; \
52078 __ret = __builtin_shufflevector(__s0, __s0, __p1, __p1, __p1, __p1, __p1, __p1, __p1, __p1); \
52079 __ret; \
52080 })
52081 #else
52082 #define vdup_laneq_s8(__p0, __p1) __extension__ ({ \
52083 int8x16_t __s0 = __p0; \
52084 int8x16_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
52085 int8x8_t __ret; \
52086 __ret = __builtin_shufflevector(__rev0, __rev0, __p1, __p1, __p1, __p1, __p1, __p1, __p1, __p1); \
52087 __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); \
52088 __ret; \
52089 })
52090 #endif
52091
52092 #ifdef __LITTLE_ENDIAN__
52093 #define vdup_laneq_f64(__p0, __p1) __extension__ ({ \
52094 float64x2_t __s0 = __p0; \
52095 float64x1_t __ret; \
52096 __ret = __builtin_shufflevector(__s0, __s0, __p1); \
52097 __ret; \
52098 })
52099 #else
52100 #define vdup_laneq_f64(__p0, __p1) __extension__ ({ \
52101 float64x2_t __s0 = __p0; \
52102 float64x2_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 1, 0); \
52103 float64x1_t __ret; \
52104 __ret = __builtin_shufflevector(__rev0, __rev0, __p1); \
52105 __ret; \
52106 })
52107 #endif
52108
52109 #ifdef __LITTLE_ENDIAN__
52110 #define vdup_laneq_f32(__p0, __p1) __extension__ ({ \
52111 float32x4_t __s0 = __p0; \
52112 float32x2_t __ret; \
52113 __ret = __builtin_shufflevector(__s0, __s0, __p1, __p1); \
52114 __ret; \
52115 })
52116 #else
52117 #define vdup_laneq_f32(__p0, __p1) __extension__ ({ \
52118 float32x4_t __s0 = __p0; \
52119 float32x4_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \
52120 float32x2_t __ret; \
52121 __ret = __builtin_shufflevector(__rev0, __rev0, __p1, __p1); \
52122 __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \
52123 __ret; \
52124 })
52125 #endif
52126
52127 #ifdef __LITTLE_ENDIAN__
52128 #define vdup_laneq_f16(__p0, __p1) __extension__ ({ \
52129 float16x8_t __s0 = __p0; \
52130 float16x4_t __ret; \
52131 __ret = __builtin_shufflevector(__s0, __s0, __p1, __p1, __p1, __p1); \
52132 __ret; \
52133 })
52134 #else
52135 #define vdup_laneq_f16(__p0, __p1) __extension__ ({ \
52136 float16x8_t __s0 = __p0; \
52137 float16x8_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 7, 6, 5, 4, 3, 2, 1, 0); \
52138 float16x4_t __ret; \
52139 __ret = __builtin_shufflevector(__rev0, __rev0, __p1, __p1, __p1, __p1); \
52140 __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \
52141 __ret; \
52142 })
52143 #endif
52144
52145 #ifdef __LITTLE_ENDIAN__
52146 #define vdup_laneq_s32(__p0, __p1) __extension__ ({ \
52147 int32x4_t __s0 = __p0; \
52148 int32x2_t __ret; \
52149 __ret = __builtin_shufflevector(__s0, __s0, __p1, __p1); \
52150 __ret; \
52151 })
52152 #else
52153 #define vdup_laneq_s32(__p0, __p1) __extension__ ({ \
52154 int32x4_t __s0 = __p0; \
52155 int32x4_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \
52156 int32x2_t __ret; \
52157 __ret = __builtin_shufflevector(__rev0, __rev0, __p1, __p1); \
52158 __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \
52159 __ret; \
52160 })
52161 #endif
52162
52163 #ifdef __LITTLE_ENDIAN__
52164 #define vdup_laneq_s64(__p0, __p1) __extension__ ({ \
52165 int64x2_t __s0 = __p0; \
52166 int64x1_t __ret; \
52167 __ret = __builtin_shufflevector(__s0, __s0, __p1); \
52168 __ret; \
52169 })
52170 #else
52171 #define vdup_laneq_s64(__p0, __p1) __extension__ ({ \
52172 int64x2_t __s0 = __p0; \
52173 int64x2_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 1, 0); \
52174 int64x1_t __ret; \
52175 __ret = __builtin_shufflevector(__rev0, __rev0, __p1); \
52176 __ret; \
52177 })
52178 #endif
52179
52180 #ifdef __LITTLE_ENDIAN__
52181 #define vdup_laneq_s16(__p0, __p1) __extension__ ({ \
52182 int16x8_t __s0 = __p0; \
52183 int16x4_t __ret; \
52184 __ret = __builtin_shufflevector(__s0, __s0, __p1, __p1, __p1, __p1); \
52185 __ret; \
52186 })
52187 #else
52188 #define vdup_laneq_s16(__p0, __p1) __extension__ ({ \
52189 int16x8_t __s0 = __p0; \
52190 int16x8_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 7, 6, 5, 4, 3, 2, 1, 0); \
52191 int16x4_t __ret; \
52192 __ret = __builtin_shufflevector(__rev0, __rev0, __p1, __p1, __p1, __p1); \
52193 __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \
52194 __ret; \
52195 })
52196 #endif
52197
52198 #ifdef __LITTLE_ENDIAN__
52199 __ai poly64x1_t vdup_n_p64(poly64_t __p0) {
52200 poly64x1_t __ret;
52201 __ret = (poly64x1_t) {__p0};
52202 return __ret;
52203 }
52204 #else
52205 __ai poly64x1_t vdup_n_p64(poly64_t __p0) {
52206 poly64x1_t __ret;
52207 __ret = (poly64x1_t) {__p0};
52208 return __ret;
52209 }
52210 #endif
52211
52212 #ifdef __LITTLE_ENDIAN__
52213 __ai poly64x2_t vdupq_n_p64(poly64_t __p0) {
52214 poly64x2_t __ret;
52215 __ret = (poly64x2_t) {__p0, __p0};
52216 return __ret;
52217 }
52218 #else
52219 __ai poly64x2_t vdupq_n_p64(poly64_t __p0) {
52220 poly64x2_t __ret;
52221 __ret = (poly64x2_t) {__p0, __p0};
52222 __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
52223 return __ret;
52224 }
52225 #endif
52226
52227 #ifdef __LITTLE_ENDIAN__
52228 __ai float64x2_t vdupq_n_f64(float64_t __p0) {
52229 float64x2_t __ret;
52230 __ret = (float64x2_t) {__p0, __p0};
52231 return __ret;
52232 }
52233 #else
52234 __ai float64x2_t vdupq_n_f64(float64_t __p0) {
52235 float64x2_t __ret;
52236 __ret = (float64x2_t) {__p0, __p0};
52237 __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
52238 return __ret;
52239 }
52240 #endif
52241
52242 #ifdef __LITTLE_ENDIAN__
52243 __ai float64x1_t vdup_n_f64(float64_t __p0) {
52244 float64x1_t __ret;
52245 __ret = (float64x1_t) {__p0};
52246 return __ret;
52247 }
52248 #else
52249 __ai float64x1_t vdup_n_f64(float64_t __p0) {
52250 float64x1_t __ret;
52251 __ret = (float64x1_t) {__p0};
52252 return __ret;
52253 }
52254 #endif
52255
52256 #ifdef __LITTLE_ENDIAN__
52257 #define vext_p64(__p0, __p1, __p2) __extension__ ({ \
52258 poly64x1_t __s0 = __p0; \
52259 poly64x1_t __s1 = __p1; \
52260 poly64x1_t __ret; \
52261 __ret = (poly64x1_t) __builtin_neon_vext_v((int8x8_t)__s0, (int8x8_t)__s1, __p2, 6); \
52262 __ret; \
52263 })
52264 #else
52265 #define vext_p64(__p0, __p1, __p2) __extension__ ({ \
52266 poly64x1_t __s0 = __p0; \
52267 poly64x1_t __s1 = __p1; \
52268 poly64x1_t __ret; \
52269 __ret = (poly64x1_t) __builtin_neon_vext_v((int8x8_t)__s0, (int8x8_t)__s1, __p2, 6); \
52270 __ret; \
52271 })
52272 #endif
52273
52274 #ifdef __LITTLE_ENDIAN__
52275 #define vextq_p64(__p0, __p1, __p2) __extension__ ({ \
52276 poly64x2_t __s0 = __p0; \
52277 poly64x2_t __s1 = __p1; \
52278 poly64x2_t __ret; \
52279 __ret = (poly64x2_t) __builtin_neon_vextq_v((int8x16_t)__s0, (int8x16_t)__s1, __p2, 38); \
52280 __ret; \
52281 })
52282 #else
52283 #define vextq_p64(__p0, __p1, __p2) __extension__ ({ \
52284 poly64x2_t __s0 = __p0; \
52285 poly64x2_t __s1 = __p1; \
52286 poly64x2_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 1, 0); \
52287 poly64x2_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 1, 0); \
52288 poly64x2_t __ret; \
52289 __ret = (poly64x2_t) __builtin_neon_vextq_v((int8x16_t)__rev0, (int8x16_t)__rev1, __p2, 38); \
52290 __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \
52291 __ret; \
52292 })
52293 #endif
52294
52295 #ifdef __LITTLE_ENDIAN__
52296 #define vextq_f64(__p0, __p1, __p2) __extension__ ({ \
52297 float64x2_t __s0 = __p0; \
52298 float64x2_t __s1 = __p1; \
52299 float64x2_t __ret; \
52300 __ret = (float64x2_t) __builtin_neon_vextq_v((int8x16_t)__s0, (int8x16_t)__s1, __p2, 42); \
52301 __ret; \
52302 })
52303 #else
52304 #define vextq_f64(__p0, __p1, __p2) __extension__ ({ \
52305 float64x2_t __s0 = __p0; \
52306 float64x2_t __s1 = __p1; \
52307 float64x2_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 1, 0); \
52308 float64x2_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 1, 0); \
52309 float64x2_t __ret; \
52310 __ret = (float64x2_t) __builtin_neon_vextq_v((int8x16_t)__rev0, (int8x16_t)__rev1, __p2, 42); \
52311 __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \
52312 __ret; \
52313 })
52314 #endif
52315
52316 #ifdef __LITTLE_ENDIAN__
52317 #define vext_f64(__p0, __p1, __p2) __extension__ ({ \
52318 float64x1_t __s0 = __p0; \
52319 float64x1_t __s1 = __p1; \
52320 float64x1_t __ret; \
52321 __ret = (float64x1_t) __builtin_neon_vext_v((int8x8_t)__s0, (int8x8_t)__s1, __p2, 10); \
52322 __ret; \
52323 })
52324 #else
52325 #define vext_f64(__p0, __p1, __p2) __extension__ ({ \
52326 float64x1_t __s0 = __p0; \
52327 float64x1_t __s1 = __p1; \
52328 float64x1_t __ret; \
52329 __ret = (float64x1_t) __builtin_neon_vext_v((int8x8_t)__s0, (int8x8_t)__s1, __p2, 10); \
52330 __ret; \
52331 })
52332 #endif
52333
52334 #ifdef __LITTLE_ENDIAN__
52335 __ai float64x2_t vfmaq_f64(float64x2_t __p0, float64x2_t __p1, float64x2_t __p2) {
52336 float64x2_t __ret;
52337 __ret = (float64x2_t) __builtin_neon_vfmaq_v((int8x16_t)__p0, (int8x16_t)__p1, (int8x16_t)__p2, 42);
52338 return __ret;
52339 }
52340 #else
52341 __ai float64x2_t vfmaq_f64(float64x2_t __p0, float64x2_t __p1, float64x2_t __p2) {
52342 float64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
52343 float64x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
52344 float64x2_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, 1, 0);
52345 float64x2_t __ret;
52346 __ret = (float64x2_t) __builtin_neon_vfmaq_v((int8x16_t)__rev0, (int8x16_t)__rev1, (int8x16_t)__rev2, 42);
52347 __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
52348 return __ret;
52349 }
52350 __ai float64x2_t __noswap_vfmaq_f64(float64x2_t __p0, float64x2_t __p1, float64x2_t __p2) {
52351 float64x2_t __ret;
52352 __ret = (float64x2_t) __builtin_neon_vfmaq_v((int8x16_t)__p0, (int8x16_t)__p1, (int8x16_t)__p2, 42);
52353 return __ret;
52354 }
52355 #endif
52356
52357 #ifdef __LITTLE_ENDIAN__
52358 __ai float64x1_t vfma_f64(float64x1_t __p0, float64x1_t __p1, float64x1_t __p2) {
52359 float64x1_t __ret;
52360 __ret = (float64x1_t) __builtin_neon_vfma_v((int8x8_t)__p0, (int8x8_t)__p1, (int8x8_t)__p2, 10);
52361 return __ret;
52362 }
52363 #else
52364 __ai float64x1_t vfma_f64(float64x1_t __p0, float64x1_t __p1, float64x1_t __p2) {
52365 float64x1_t __ret;
52366 __ret = (float64x1_t) __builtin_neon_vfma_v((int8x8_t)__p0, (int8x8_t)__p1, (int8x8_t)__p2, 10);
52367 return __ret;
52368 }
52369 __ai float64x1_t __noswap_vfma_f64(float64x1_t __p0, float64x1_t __p1, float64x1_t __p2) {
52370 float64x1_t __ret;
52371 __ret = (float64x1_t) __builtin_neon_vfma_v((int8x8_t)__p0, (int8x8_t)__p1, (int8x8_t)__p2, 10);
52372 return __ret;
52373 }
52374 #endif
52375
52376 #ifdef __LITTLE_ENDIAN__
52377 #define vfmad_lane_f64(__p0, __p1, __p2, __p3) __extension__ ({ \
52378 float64_t __s0 = __p0; \
52379 float64_t __s1 = __p1; \
52380 float64x1_t __s2 = __p2; \
52381 float64_t __ret; \
52382 __ret = (float64_t) __builtin_neon_vfmad_lane_f64(__s0, __s1, (int8x8_t)__s2, __p3); \
52383 __ret; \
52384 })
52385 #else
52386 #define vfmad_lane_f64(__p0, __p1, __p2, __p3) __extension__ ({ \
52387 float64_t __s0 = __p0; \
52388 float64_t __s1 = __p1; \
52389 float64x1_t __s2 = __p2; \
52390 float64_t __ret; \
52391 __ret = (float64_t) __builtin_neon_vfmad_lane_f64(__s0, __s1, (int8x8_t)__s2, __p3); \
52392 __ret; \
52393 })
52394 #define __noswap_vfmad_lane_f64(__p0, __p1, __p2, __p3) __extension__ ({ \
52395 float64_t __s0 = __p0; \
52396 float64_t __s1 = __p1; \
52397 float64x1_t __s2 = __p2; \
52398 float64_t __ret; \
52399 __ret = (float64_t) __builtin_neon_vfmad_lane_f64(__s0, __s1, (int8x8_t)__s2, __p3); \
52400 __ret; \
52401 })
52402 #endif
52403
52404 #ifdef __LITTLE_ENDIAN__
52405 #define vfmas_lane_f32(__p0, __p1, __p2, __p3) __extension__ ({ \
52406 float32_t __s0 = __p0; \
52407 float32_t __s1 = __p1; \
52408 float32x2_t __s2 = __p2; \
52409 float32_t __ret; \
52410 __ret = (float32_t) __builtin_neon_vfmas_lane_f32(__s0, __s1, (int8x8_t)__s2, __p3); \
52411 __ret; \
52412 })
52413 #else
52414 #define vfmas_lane_f32(__p0, __p1, __p2, __p3) __extension__ ({ \
52415 float32_t __s0 = __p0; \
52416 float32_t __s1 = __p1; \
52417 float32x2_t __s2 = __p2; \
52418 float32x2_t __rev2; __rev2 = __builtin_shufflevector(__s2, __s2, 1, 0); \
52419 float32_t __ret; \
52420 __ret = (float32_t) __builtin_neon_vfmas_lane_f32(__s0, __s1, (int8x8_t)__rev2, __p3); \
52421 __ret; \
52422 })
52423 #define __noswap_vfmas_lane_f32(__p0, __p1, __p2, __p3) __extension__ ({ \
52424 float32_t __s0 = __p0; \
52425 float32_t __s1 = __p1; \
52426 float32x2_t __s2 = __p2; \
52427 float32_t __ret; \
52428 __ret = (float32_t) __builtin_neon_vfmas_lane_f32(__s0, __s1, (int8x8_t)__s2, __p3); \
52429 __ret; \
52430 })
52431 #endif
52432
52433 #ifdef __LITTLE_ENDIAN__
52434 #define vfmaq_lane_f64(__p0, __p1, __p2, __p3) __extension__ ({ \
52435 float64x2_t __s0 = __p0; \
52436 float64x2_t __s1 = __p1; \
52437 float64x1_t __s2 = __p2; \
52438 float64x2_t __ret; \
52439 __ret = (float64x2_t) __builtin_neon_vfmaq_lane_v((int8x16_t)__s0, (int8x16_t)__s1, (int8x8_t)__s2, __p3, 42); \
52440 __ret; \
52441 })
52442 #else
52443 #define vfmaq_lane_f64(__p0, __p1, __p2, __p3) __extension__ ({ \
52444 float64x2_t __s0 = __p0; \
52445 float64x2_t __s1 = __p1; \
52446 float64x1_t __s2 = __p2; \
52447 float64x2_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 1, 0); \
52448 float64x2_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 1, 0); \
52449 float64x2_t __ret; \
52450 __ret = (float64x2_t) __builtin_neon_vfmaq_lane_v((int8x16_t)__rev0, (int8x16_t)__rev1, (int8x8_t)__s2, __p3, 42); \
52451 __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \
52452 __ret; \
52453 })
52454 #define __noswap_vfmaq_lane_f64(__p0, __p1, __p2, __p3) __extension__ ({ \
52455 float64x2_t __s0 = __p0; \
52456 float64x2_t __s1 = __p1; \
52457 float64x1_t __s2 = __p2; \
52458 float64x2_t __ret; \
52459 __ret = (float64x2_t) __builtin_neon_vfmaq_lane_v((int8x16_t)__s0, (int8x16_t)__s1, (int8x8_t)__s2, __p3, 42); \
52460 __ret; \
52461 })
52462 #endif
52463
52464 #ifdef __LITTLE_ENDIAN__
52465 #define vfmaq_lane_f32(__p0, __p1, __p2, __p3) __extension__ ({ \
52466 float32x4_t __s0 = __p0; \
52467 float32x4_t __s1 = __p1; \
52468 float32x2_t __s2 = __p2; \
52469 float32x4_t __ret; \
52470 __ret = (float32x4_t) __builtin_neon_vfmaq_lane_v((int8x16_t)__s0, (int8x16_t)__s1, (int8x8_t)__s2, __p3, 41); \
52471 __ret; \
52472 })
52473 #else
52474 #define vfmaq_lane_f32(__p0, __p1, __p2, __p3) __extension__ ({ \
52475 float32x4_t __s0 = __p0; \
52476 float32x4_t __s1 = __p1; \
52477 float32x2_t __s2 = __p2; \
52478 float32x4_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \
52479 float32x4_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 3, 2, 1, 0); \
52480 float32x2_t __rev2; __rev2 = __builtin_shufflevector(__s2, __s2, 1, 0); \
52481 float32x4_t __ret; \
52482 __ret = (float32x4_t) __builtin_neon_vfmaq_lane_v((int8x16_t)__rev0, (int8x16_t)__rev1, (int8x8_t)__rev2, __p3, 41); \
52483 __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \
52484 __ret; \
52485 })
52486 #define __noswap_vfmaq_lane_f32(__p0, __p1, __p2, __p3) __extension__ ({ \
52487 float32x4_t __s0 = __p0; \
52488 float32x4_t __s1 = __p1; \
52489 float32x2_t __s2 = __p2; \
52490 float32x4_t __ret; \
52491 __ret = (float32x4_t) __builtin_neon_vfmaq_lane_v((int8x16_t)__s0, (int8x16_t)__s1, (int8x8_t)__s2, __p3, 41); \
52492 __ret; \
52493 })
52494 #endif
52495
52496 #ifdef __LITTLE_ENDIAN__
52497 #define vfma_lane_f64(__p0, __p1, __p2, __p3) __extension__ ({ \
52498 float64x1_t __s0 = __p0; \
52499 float64x1_t __s1 = __p1; \
52500 float64x1_t __s2 = __p2; \
52501 float64x1_t __ret; \
52502 __ret = (float64x1_t) __builtin_neon_vfma_lane_v((int8x8_t)__s0, (int8x8_t)__s1, (int8x8_t)__s2, __p3, 10); \
52503 __ret; \
52504 })
52505 #else
52506 #define vfma_lane_f64(__p0, __p1, __p2, __p3) __extension__ ({ \
52507 float64x1_t __s0 = __p0; \
52508 float64x1_t __s1 = __p1; \
52509 float64x1_t __s2 = __p2; \
52510 float64x1_t __ret; \
52511 __ret = (float64x1_t) __builtin_neon_vfma_lane_v((int8x8_t)__s0, (int8x8_t)__s1, (int8x8_t)__s2, __p3, 10); \
52512 __ret; \
52513 })
52514 #define __noswap_vfma_lane_f64(__p0, __p1, __p2, __p3) __extension__ ({ \
52515 float64x1_t __s0 = __p0; \
52516 float64x1_t __s1 = __p1; \
52517 float64x1_t __s2 = __p2; \
52518 float64x1_t __ret; \
52519 __ret = (float64x1_t) __builtin_neon_vfma_lane_v((int8x8_t)__s0, (int8x8_t)__s1, (int8x8_t)__s2, __p3, 10); \
52520 __ret; \
52521 })
52522 #endif
52523
52524 #ifdef __LITTLE_ENDIAN__
52525 #define vfma_lane_f32(__p0, __p1, __p2, __p3) __extension__ ({ \
52526 float32x2_t __s0 = __p0; \
52527 float32x2_t __s1 = __p1; \
52528 float32x2_t __s2 = __p2; \
52529 float32x2_t __ret; \
52530 __ret = (float32x2_t) __builtin_neon_vfma_lane_v((int8x8_t)__s0, (int8x8_t)__s1, (int8x8_t)__s2, __p3, 9); \
52531 __ret; \
52532 })
52533 #else
52534 #define vfma_lane_f32(__p0, __p1, __p2, __p3) __extension__ ({ \
52535 float32x2_t __s0 = __p0; \
52536 float32x2_t __s1 = __p1; \
52537 float32x2_t __s2 = __p2; \
52538 float32x2_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 1, 0); \
52539 float32x2_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 1, 0); \
52540 float32x2_t __rev2; __rev2 = __builtin_shufflevector(__s2, __s2, 1, 0); \
52541 float32x2_t __ret; \
52542 __ret = (float32x2_t) __builtin_neon_vfma_lane_v((int8x8_t)__rev0, (int8x8_t)__rev1, (int8x8_t)__rev2, __p3, 9); \
52543 __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \
52544 __ret; \
52545 })
52546 #define __noswap_vfma_lane_f32(__p0, __p1, __p2, __p3) __extension__ ({ \
52547 float32x2_t __s0 = __p0; \
52548 float32x2_t __s1 = __p1; \
52549 float32x2_t __s2 = __p2; \
52550 float32x2_t __ret; \
52551 __ret = (float32x2_t) __builtin_neon_vfma_lane_v((int8x8_t)__s0, (int8x8_t)__s1, (int8x8_t)__s2, __p3, 9); \
52552 __ret; \
52553 })
52554 #endif
52555
52556 #ifdef __LITTLE_ENDIAN__
52557 #define vfmad_laneq_f64(__p0, __p1, __p2, __p3) __extension__ ({ \
52558 float64_t __s0 = __p0; \
52559 float64_t __s1 = __p1; \
52560 float64x2_t __s2 = __p2; \
52561 float64_t __ret; \
52562 __ret = (float64_t) __builtin_neon_vfmad_laneq_f64(__s0, __s1, (int8x16_t)__s2, __p3); \
52563 __ret; \
52564 })
52565 #else
52566 #define vfmad_laneq_f64(__p0, __p1, __p2, __p3) __extension__ ({ \
52567 float64_t __s0 = __p0; \
52568 float64_t __s1 = __p1; \
52569 float64x2_t __s2 = __p2; \
52570 float64x2_t __rev2; __rev2 = __builtin_shufflevector(__s2, __s2, 1, 0); \
52571 float64_t __ret; \
52572 __ret = (float64_t) __builtin_neon_vfmad_laneq_f64(__s0, __s1, (int8x16_t)__rev2, __p3); \
52573 __ret; \
52574 })
52575 #define __noswap_vfmad_laneq_f64(__p0, __p1, __p2, __p3) __extension__ ({ \
52576 float64_t __s0 = __p0; \
52577 float64_t __s1 = __p1; \
52578 float64x2_t __s2 = __p2; \
52579 float64_t __ret; \
52580 __ret = (float64_t) __builtin_neon_vfmad_laneq_f64(__s0, __s1, (int8x16_t)__s2, __p3); \
52581 __ret; \
52582 })
52583 #endif
52584
52585 #ifdef __LITTLE_ENDIAN__
52586 #define vfmas_laneq_f32(__p0, __p1, __p2, __p3) __extension__ ({ \
52587 float32_t __s0 = __p0; \
52588 float32_t __s1 = __p1; \
52589 float32x4_t __s2 = __p2; \
52590 float32_t __ret; \
52591 __ret = (float32_t) __builtin_neon_vfmas_laneq_f32(__s0, __s1, (int8x16_t)__s2, __p3); \
52592 __ret; \
52593 })
52594 #else
52595 #define vfmas_laneq_f32(__p0, __p1, __p2, __p3) __extension__ ({ \
52596 float32_t __s0 = __p0; \
52597 float32_t __s1 = __p1; \
52598 float32x4_t __s2 = __p2; \
52599 float32x4_t __rev2; __rev2 = __builtin_shufflevector(__s2, __s2, 3, 2, 1, 0); \
52600 float32_t __ret; \
52601 __ret = (float32_t) __builtin_neon_vfmas_laneq_f32(__s0, __s1, (int8x16_t)__rev2, __p3); \
52602 __ret; \
52603 })
52604 #define __noswap_vfmas_laneq_f32(__p0, __p1, __p2, __p3) __extension__ ({ \
52605 float32_t __s0 = __p0; \
52606 float32_t __s1 = __p1; \
52607 float32x4_t __s2 = __p2; \
52608 float32_t __ret; \
52609 __ret = (float32_t) __builtin_neon_vfmas_laneq_f32(__s0, __s1, (int8x16_t)__s2, __p3); \
52610 __ret; \
52611 })
52612 #endif
52613
52614 #ifdef __LITTLE_ENDIAN__
52615 #define vfmaq_laneq_f64(__p0, __p1, __p2, __p3) __extension__ ({ \
52616 float64x2_t __s0 = __p0; \
52617 float64x2_t __s1 = __p1; \
52618 float64x2_t __s2 = __p2; \
52619 float64x2_t __ret; \
52620 __ret = (float64x2_t) __builtin_neon_vfmaq_laneq_v((int8x16_t)__s0, (int8x16_t)__s1, (int8x16_t)__s2, __p3, 42); \
52621 __ret; \
52622 })
52623 #else
52624 #define vfmaq_laneq_f64(__p0, __p1, __p2, __p3) __extension__ ({ \
52625 float64x2_t __s0 = __p0; \
52626 float64x2_t __s1 = __p1; \
52627 float64x2_t __s2 = __p2; \
52628 float64x2_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 1, 0); \
52629 float64x2_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 1, 0); \
52630 float64x2_t __rev2; __rev2 = __builtin_shufflevector(__s2, __s2, 1, 0); \
52631 float64x2_t __ret; \
52632 __ret = (float64x2_t) __builtin_neon_vfmaq_laneq_v((int8x16_t)__rev0, (int8x16_t)__rev1, (int8x16_t)__rev2, __p3, 42); \
52633 __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \
52634 __ret; \
52635 })
52636 #define __noswap_vfmaq_laneq_f64(__p0, __p1, __p2, __p3) __extension__ ({ \
52637 float64x2_t __s0 = __p0; \
52638 float64x2_t __s1 = __p1; \
52639 float64x2_t __s2 = __p2; \
52640 float64x2_t __ret; \
52641 __ret = (float64x2_t) __builtin_neon_vfmaq_laneq_v((int8x16_t)__s0, (int8x16_t)__s1, (int8x16_t)__s2, __p3, 42); \
52642 __ret; \
52643 })
52644 #endif
52645
52646 #ifdef __LITTLE_ENDIAN__
52647 #define vfmaq_laneq_f32(__p0, __p1, __p2, __p3) __extension__ ({ \
52648 float32x4_t __s0 = __p0; \
52649 float32x4_t __s1 = __p1; \
52650 float32x4_t __s2 = __p2; \
52651 float32x4_t __ret; \
52652 __ret = (float32x4_t) __builtin_neon_vfmaq_laneq_v((int8x16_t)__s0, (int8x16_t)__s1, (int8x16_t)__s2, __p3, 41); \
52653 __ret; \
52654 })
52655 #else
52656 #define vfmaq_laneq_f32(__p0, __p1, __p2, __p3) __extension__ ({ \
52657 float32x4_t __s0 = __p0; \
52658 float32x4_t __s1 = __p1; \
52659 float32x4_t __s2 = __p2; \
52660 float32x4_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \
52661 float32x4_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 3, 2, 1, 0); \
52662 float32x4_t __rev2; __rev2 = __builtin_shufflevector(__s2, __s2, 3, 2, 1, 0); \
52663 float32x4_t __ret; \
52664 __ret = (float32x4_t) __builtin_neon_vfmaq_laneq_v((int8x16_t)__rev0, (int8x16_t)__rev1, (int8x16_t)__rev2, __p3, 41); \
52665 __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \
52666 __ret; \
52667 })
52668 #define __noswap_vfmaq_laneq_f32(__p0, __p1, __p2, __p3) __extension__ ({ \
52669 float32x4_t __s0 = __p0; \
52670 float32x4_t __s1 = __p1; \
52671 float32x4_t __s2 = __p2; \
52672 float32x4_t __ret; \
52673 __ret = (float32x4_t) __builtin_neon_vfmaq_laneq_v((int8x16_t)__s0, (int8x16_t)__s1, (int8x16_t)__s2, __p3, 41); \
52674 __ret; \
52675 })
52676 #endif
52677
52678 #ifdef __LITTLE_ENDIAN__
52679 #define vfma_laneq_f64(__p0, __p1, __p2, __p3) __extension__ ({ \
52680 float64x1_t __s0 = __p0; \
52681 float64x1_t __s1 = __p1; \
52682 float64x2_t __s2 = __p2; \
52683 float64x1_t __ret; \
52684 __ret = (float64x1_t) __builtin_neon_vfma_laneq_v((int8x8_t)__s0, (int8x8_t)__s1, (int8x16_t)__s2, __p3, 10); \
52685 __ret; \
52686 })
52687 #else
52688 #define vfma_laneq_f64(__p0, __p1, __p2, __p3) __extension__ ({ \
52689 float64x1_t __s0 = __p0; \
52690 float64x1_t __s1 = __p1; \
52691 float64x2_t __s2 = __p2; \
52692 float64x2_t __rev2; __rev2 = __builtin_shufflevector(__s2, __s2, 1, 0); \
52693 float64x1_t __ret; \
52694 __ret = (float64x1_t) __builtin_neon_vfma_laneq_v((int8x8_t)__s0, (int8x8_t)__s1, (int8x16_t)__rev2, __p3, 10); \
52695 __ret; \
52696 })
52697 #define __noswap_vfma_laneq_f64(__p0, __p1, __p2, __p3) __extension__ ({ \
52698 float64x1_t __s0 = __p0; \
52699 float64x1_t __s1 = __p1; \
52700 float64x2_t __s2 = __p2; \
52701 float64x1_t __ret; \
52702 __ret = (float64x1_t) __builtin_neon_vfma_laneq_v((int8x8_t)__s0, (int8x8_t)__s1, (int8x16_t)__s2, __p3, 10); \
52703 __ret; \
52704 })
52705 #endif
52706
52707 #ifdef __LITTLE_ENDIAN__
52708 #define vfma_laneq_f32(__p0, __p1, __p2, __p3) __extension__ ({ \
52709 float32x2_t __s0 = __p0; \
52710 float32x2_t __s1 = __p1; \
52711 float32x4_t __s2 = __p2; \
52712 float32x2_t __ret; \
52713 __ret = (float32x2_t) __builtin_neon_vfma_laneq_v((int8x8_t)__s0, (int8x8_t)__s1, (int8x16_t)__s2, __p3, 9); \
52714 __ret; \
52715 })
52716 #else
52717 #define vfma_laneq_f32(__p0, __p1, __p2, __p3) __extension__ ({ \
52718 float32x2_t __s0 = __p0; \
52719 float32x2_t __s1 = __p1; \
52720 float32x4_t __s2 = __p2; \
52721 float32x2_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 1, 0); \
52722 float32x2_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 1, 0); \
52723 float32x4_t __rev2; __rev2 = __builtin_shufflevector(__s2, __s2, 3, 2, 1, 0); \
52724 float32x2_t __ret; \
52725 __ret = (float32x2_t) __builtin_neon_vfma_laneq_v((int8x8_t)__rev0, (int8x8_t)__rev1, (int8x16_t)__rev2, __p3, 9); \
52726 __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \
52727 __ret; \
52728 })
52729 #define __noswap_vfma_laneq_f32(__p0, __p1, __p2, __p3) __extension__ ({ \
52730 float32x2_t __s0 = __p0; \
52731 float32x2_t __s1 = __p1; \
52732 float32x4_t __s2 = __p2; \
52733 float32x2_t __ret; \
52734 __ret = (float32x2_t) __builtin_neon_vfma_laneq_v((int8x8_t)__s0, (int8x8_t)__s1, (int8x16_t)__s2, __p3, 9); \
52735 __ret; \
52736 })
52737 #endif
52738
52739 #ifdef __LITTLE_ENDIAN__
52740 __ai float64x2_t vfmaq_n_f64(float64x2_t __p0, float64x2_t __p1, float64_t __p2) {
52741 float64x2_t __ret;
52742 __ret = vfmaq_f64(__p0, __p1, (float64x2_t) {__p2, __p2});
52743 return __ret;
52744 }
52745 #else
52746 __ai float64x2_t vfmaq_n_f64(float64x2_t __p0, float64x2_t __p1, float64_t __p2) {
52747 float64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
52748 float64x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
52749 float64x2_t __ret;
52750 __ret = __noswap_vfmaq_f64(__rev0, __rev1, (float64x2_t) {__p2, __p2});
52751 __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
52752 return __ret;
52753 }
52754 #endif
52755
52756 #ifdef __LITTLE_ENDIAN__
52757 __ai float32x4_t vfmaq_n_f32(float32x4_t __p0, float32x4_t __p1, float32_t __p2) {
52758 float32x4_t __ret;
52759 __ret = vfmaq_f32(__p0, __p1, (float32x4_t) {__p2, __p2, __p2, __p2});
52760 return __ret;
52761 }
52762 #else
52763 __ai float32x4_t vfmaq_n_f32(float32x4_t __p0, float32x4_t __p1, float32_t __p2) {
52764 float32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
52765 float32x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
52766 float32x4_t __ret;
52767 __ret = __noswap_vfmaq_f32(__rev0, __rev1, (float32x4_t) {__p2, __p2, __p2, __p2});
52768 __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
52769 return __ret;
52770 }
52771 #endif
52772
52773 #ifdef __LITTLE_ENDIAN__
52774 __ai float32x2_t vfma_n_f32(float32x2_t __p0, float32x2_t __p1, float32_t __p2) {
52775 float32x2_t __ret;
52776 __ret = vfma_f32(__p0, __p1, (float32x2_t) {__p2, __p2});
52777 return __ret;
52778 }
52779 #else
52780 __ai float32x2_t vfma_n_f32(float32x2_t __p0, float32x2_t __p1, float32_t __p2) {
52781 float32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
52782 float32x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
52783 float32x2_t __ret;
52784 __ret = __noswap_vfma_f32(__rev0, __rev1, (float32x2_t) {__p2, __p2});
52785 __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
52786 return __ret;
52787 }
52788 #endif
52789
52790 #ifdef __LITTLE_ENDIAN__
52791 __ai float64x2_t vfmsq_f64(float64x2_t __p0, float64x2_t __p1, float64x2_t __p2) {
52792 float64x2_t __ret;
52793 __ret = vfmaq_f64(__p0, -__p1, __p2);
52794 return __ret;
52795 }
52796 #else
52797 __ai float64x2_t vfmsq_f64(float64x2_t __p0, float64x2_t __p1, float64x2_t __p2) {
52798 float64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
52799 float64x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
52800 float64x2_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, 1, 0);
52801 float64x2_t __ret;
52802 __ret = __noswap_vfmaq_f64(__rev0, -__rev1, __rev2);
52803 __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
52804 return __ret;
52805 }
52806 #endif
52807
52808 #ifdef __LITTLE_ENDIAN__
52809 __ai float64x1_t vfms_f64(float64x1_t __p0, float64x1_t __p1, float64x1_t __p2) {
52810 float64x1_t __ret;
52811 __ret = vfma_f64(__p0, -__p1, __p2);
52812 return __ret;
52813 }
52814 #else
52815 __ai float64x1_t vfms_f64(float64x1_t __p0, float64x1_t __p1, float64x1_t __p2) {
52816 float64x1_t __ret;
52817 __ret = __noswap_vfma_f64(__p0, -__p1, __p2);
52818 return __ret;
52819 }
52820 #endif
52821
52822 #ifdef __LITTLE_ENDIAN__
52823 #define vfmsd_lane_f64(__p0_104, __p1_104, __p2_104, __p3_104) __extension__ ({ \
52824 float64_t __s0_104 = __p0_104; \
52825 float64_t __s1_104 = __p1_104; \
52826 float64x1_t __s2_104 = __p2_104; \
52827 float64_t __ret_104; \
52828 __ret_104 = vfmad_lane_f64(__s0_104, -__s1_104, __s2_104, __p3_104); \
52829 __ret_104; \
52830 })
52831 #else
52832 #define vfmsd_lane_f64(__p0_105, __p1_105, __p2_105, __p3_105) __extension__ ({ \
52833 float64_t __s0_105 = __p0_105; \
52834 float64_t __s1_105 = __p1_105; \
52835 float64x1_t __s2_105 = __p2_105; \
52836 float64_t __ret_105; \
52837 __ret_105 = __noswap_vfmad_lane_f64(__s0_105, -__s1_105, __s2_105, __p3_105); \
52838 __ret_105; \
52839 })
52840 #endif
52841
52842 #ifdef __LITTLE_ENDIAN__
52843 #define vfmss_lane_f32(__p0_106, __p1_106, __p2_106, __p3_106) __extension__ ({ \
52844 float32_t __s0_106 = __p0_106; \
52845 float32_t __s1_106 = __p1_106; \
52846 float32x2_t __s2_106 = __p2_106; \
52847 float32_t __ret_106; \
52848 __ret_106 = vfmas_lane_f32(__s0_106, -__s1_106, __s2_106, __p3_106); \
52849 __ret_106; \
52850 })
52851 #else
52852 #define vfmss_lane_f32(__p0_107, __p1_107, __p2_107, __p3_107) __extension__ ({ \
52853 float32_t __s0_107 = __p0_107; \
52854 float32_t __s1_107 = __p1_107; \
52855 float32x2_t __s2_107 = __p2_107; \
52856 float32x2_t __rev2_107; __rev2_107 = __builtin_shufflevector(__s2_107, __s2_107, 1, 0); \
52857 float32_t __ret_107; \
52858 __ret_107 = __noswap_vfmas_lane_f32(__s0_107, -__s1_107, __rev2_107, __p3_107); \
52859 __ret_107; \
52860 })
52861 #endif
52862
52863 #ifdef __LITTLE_ENDIAN__
52864 #define vfmsq_lane_f64(__p0_108, __p1_108, __p2_108, __p3_108) __extension__ ({ \
52865 float64x2_t __s0_108 = __p0_108; \
52866 float64x2_t __s1_108 = __p1_108; \
52867 float64x1_t __s2_108 = __p2_108; \
52868 float64x2_t __ret_108; \
52869 __ret_108 = vfmaq_lane_f64(__s0_108, -__s1_108, __s2_108, __p3_108); \
52870 __ret_108; \
52871 })
52872 #else
52873 #define vfmsq_lane_f64(__p0_109, __p1_109, __p2_109, __p3_109) __extension__ ({ \
52874 float64x2_t __s0_109 = __p0_109; \
52875 float64x2_t __s1_109 = __p1_109; \
52876 float64x1_t __s2_109 = __p2_109; \
52877 float64x2_t __rev0_109; __rev0_109 = __builtin_shufflevector(__s0_109, __s0_109, 1, 0); \
52878 float64x2_t __rev1_109; __rev1_109 = __builtin_shufflevector(__s1_109, __s1_109, 1, 0); \
52879 float64x2_t __ret_109; \
52880 __ret_109 = __noswap_vfmaq_lane_f64(__rev0_109, -__rev1_109, __s2_109, __p3_109); \
52881 __ret_109 = __builtin_shufflevector(__ret_109, __ret_109, 1, 0); \
52882 __ret_109; \
52883 })
52884 #endif
52885
52886 #ifdef __LITTLE_ENDIAN__
52887 #define vfmsq_lane_f32(__p0_110, __p1_110, __p2_110, __p3_110) __extension__ ({ \
52888 float32x4_t __s0_110 = __p0_110; \
52889 float32x4_t __s1_110 = __p1_110; \
52890 float32x2_t __s2_110 = __p2_110; \
52891 float32x4_t __ret_110; \
52892 __ret_110 = vfmaq_lane_f32(__s0_110, -__s1_110, __s2_110, __p3_110); \
52893 __ret_110; \
52894 })
52895 #else
52896 #define vfmsq_lane_f32(__p0_111, __p1_111, __p2_111, __p3_111) __extension__ ({ \
52897 float32x4_t __s0_111 = __p0_111; \
52898 float32x4_t __s1_111 = __p1_111; \
52899 float32x2_t __s2_111 = __p2_111; \
52900 float32x4_t __rev0_111; __rev0_111 = __builtin_shufflevector(__s0_111, __s0_111, 3, 2, 1, 0); \
52901 float32x4_t __rev1_111; __rev1_111 = __builtin_shufflevector(__s1_111, __s1_111, 3, 2, 1, 0); \
52902 float32x2_t __rev2_111; __rev2_111 = __builtin_shufflevector(__s2_111, __s2_111, 1, 0); \
52903 float32x4_t __ret_111; \
52904 __ret_111 = __noswap_vfmaq_lane_f32(__rev0_111, -__rev1_111, __rev2_111, __p3_111); \
52905 __ret_111 = __builtin_shufflevector(__ret_111, __ret_111, 3, 2, 1, 0); \
52906 __ret_111; \
52907 })
52908 #endif
52909
52910 #ifdef __LITTLE_ENDIAN__
52911 #define vfms_lane_f64(__p0_112, __p1_112, __p2_112, __p3_112) __extension__ ({ \
52912 float64x1_t __s0_112 = __p0_112; \
52913 float64x1_t __s1_112 = __p1_112; \
52914 float64x1_t __s2_112 = __p2_112; \
52915 float64x1_t __ret_112; \
52916 __ret_112 = vfma_lane_f64(__s0_112, -__s1_112, __s2_112, __p3_112); \
52917 __ret_112; \
52918 })
52919 #else
52920 #define vfms_lane_f64(__p0_113, __p1_113, __p2_113, __p3_113) __extension__ ({ \
52921 float64x1_t __s0_113 = __p0_113; \
52922 float64x1_t __s1_113 = __p1_113; \
52923 float64x1_t __s2_113 = __p2_113; \
52924 float64x1_t __ret_113; \
52925 __ret_113 = __noswap_vfma_lane_f64(__s0_113, -__s1_113, __s2_113, __p3_113); \
52926 __ret_113; \
52927 })
52928 #endif
52929
52930 #ifdef __LITTLE_ENDIAN__
52931 #define vfms_lane_f32(__p0_114, __p1_114, __p2_114, __p3_114) __extension__ ({ \
52932 float32x2_t __s0_114 = __p0_114; \
52933 float32x2_t __s1_114 = __p1_114; \
52934 float32x2_t __s2_114 = __p2_114; \
52935 float32x2_t __ret_114; \
52936 __ret_114 = vfma_lane_f32(__s0_114, -__s1_114, __s2_114, __p3_114); \
52937 __ret_114; \
52938 })
52939 #else
52940 #define vfms_lane_f32(__p0_115, __p1_115, __p2_115, __p3_115) __extension__ ({ \
52941 float32x2_t __s0_115 = __p0_115; \
52942 float32x2_t __s1_115 = __p1_115; \
52943 float32x2_t __s2_115 = __p2_115; \
52944 float32x2_t __rev0_115; __rev0_115 = __builtin_shufflevector(__s0_115, __s0_115, 1, 0); \
52945 float32x2_t __rev1_115; __rev1_115 = __builtin_shufflevector(__s1_115, __s1_115, 1, 0); \
52946 float32x2_t __rev2_115; __rev2_115 = __builtin_shufflevector(__s2_115, __s2_115, 1, 0); \
52947 float32x2_t __ret_115; \
52948 __ret_115 = __noswap_vfma_lane_f32(__rev0_115, -__rev1_115, __rev2_115, __p3_115); \
52949 __ret_115 = __builtin_shufflevector(__ret_115, __ret_115, 1, 0); \
52950 __ret_115; \
52951 })
52952 #endif
52953
52954 #ifdef __LITTLE_ENDIAN__
52955 #define vfmsd_laneq_f64(__p0_116, __p1_116, __p2_116, __p3_116) __extension__ ({ \
52956 float64_t __s0_116 = __p0_116; \
52957 float64_t __s1_116 = __p1_116; \
52958 float64x2_t __s2_116 = __p2_116; \
52959 float64_t __ret_116; \
52960 __ret_116 = vfmad_laneq_f64(__s0_116, -__s1_116, __s2_116, __p3_116); \
52961 __ret_116; \
52962 })
52963 #else
52964 #define vfmsd_laneq_f64(__p0_117, __p1_117, __p2_117, __p3_117) __extension__ ({ \
52965 float64_t __s0_117 = __p0_117; \
52966 float64_t __s1_117 = __p1_117; \
52967 float64x2_t __s2_117 = __p2_117; \
52968 float64x2_t __rev2_117; __rev2_117 = __builtin_shufflevector(__s2_117, __s2_117, 1, 0); \
52969 float64_t __ret_117; \
52970 __ret_117 = __noswap_vfmad_laneq_f64(__s0_117, -__s1_117, __rev2_117, __p3_117); \
52971 __ret_117; \
52972 })
52973 #endif
52974
52975 #ifdef __LITTLE_ENDIAN__
52976 #define vfmss_laneq_f32(__p0_118, __p1_118, __p2_118, __p3_118) __extension__ ({ \
52977 float32_t __s0_118 = __p0_118; \
52978 float32_t __s1_118 = __p1_118; \
52979 float32x4_t __s2_118 = __p2_118; \
52980 float32_t __ret_118; \
52981 __ret_118 = vfmas_laneq_f32(__s0_118, -__s1_118, __s2_118, __p3_118); \
52982 __ret_118; \
52983 })
52984 #else
52985 #define vfmss_laneq_f32(__p0_119, __p1_119, __p2_119, __p3_119) __extension__ ({ \
52986 float32_t __s0_119 = __p0_119; \
52987 float32_t __s1_119 = __p1_119; \
52988 float32x4_t __s2_119 = __p2_119; \
52989 float32x4_t __rev2_119; __rev2_119 = __builtin_shufflevector(__s2_119, __s2_119, 3, 2, 1, 0); \
52990 float32_t __ret_119; \
52991 __ret_119 = __noswap_vfmas_laneq_f32(__s0_119, -__s1_119, __rev2_119, __p3_119); \
52992 __ret_119; \
52993 })
52994 #endif
52995
52996 #ifdef __LITTLE_ENDIAN__
52997 #define vfmsq_laneq_f64(__p0_120, __p1_120, __p2_120, __p3_120) __extension__ ({ \
52998 float64x2_t __s0_120 = __p0_120; \
52999 float64x2_t __s1_120 = __p1_120; \
53000 float64x2_t __s2_120 = __p2_120; \
53001 float64x2_t __ret_120; \
53002 __ret_120 = vfmaq_laneq_f64(__s0_120, -__s1_120, __s2_120, __p3_120); \
53003 __ret_120; \
53004 })
53005 #else
53006 #define vfmsq_laneq_f64(__p0_121, __p1_121, __p2_121, __p3_121) __extension__ ({ \
53007 float64x2_t __s0_121 = __p0_121; \
53008 float64x2_t __s1_121 = __p1_121; \
53009 float64x2_t __s2_121 = __p2_121; \
53010 float64x2_t __rev0_121; __rev0_121 = __builtin_shufflevector(__s0_121, __s0_121, 1, 0); \
53011 float64x2_t __rev1_121; __rev1_121 = __builtin_shufflevector(__s1_121, __s1_121, 1, 0); \
53012 float64x2_t __rev2_121; __rev2_121 = __builtin_shufflevector(__s2_121, __s2_121, 1, 0); \
53013 float64x2_t __ret_121; \
53014 __ret_121 = __noswap_vfmaq_laneq_f64(__rev0_121, -__rev1_121, __rev2_121, __p3_121); \
53015 __ret_121 = __builtin_shufflevector(__ret_121, __ret_121, 1, 0); \
53016 __ret_121; \
53017 })
53018 #endif
53019
53020 #ifdef __LITTLE_ENDIAN__
53021 #define vfmsq_laneq_f32(__p0_122, __p1_122, __p2_122, __p3_122) __extension__ ({ \
53022 float32x4_t __s0_122 = __p0_122; \
53023 float32x4_t __s1_122 = __p1_122; \
53024 float32x4_t __s2_122 = __p2_122; \
53025 float32x4_t __ret_122; \
53026 __ret_122 = vfmaq_laneq_f32(__s0_122, -__s1_122, __s2_122, __p3_122); \
53027 __ret_122; \
53028 })
53029 #else
53030 #define vfmsq_laneq_f32(__p0_123, __p1_123, __p2_123, __p3_123) __extension__ ({ \
53031 float32x4_t __s0_123 = __p0_123; \
53032 float32x4_t __s1_123 = __p1_123; \
53033 float32x4_t __s2_123 = __p2_123; \
53034 float32x4_t __rev0_123; __rev0_123 = __builtin_shufflevector(__s0_123, __s0_123, 3, 2, 1, 0); \
53035 float32x4_t __rev1_123; __rev1_123 = __builtin_shufflevector(__s1_123, __s1_123, 3, 2, 1, 0); \
53036 float32x4_t __rev2_123; __rev2_123 = __builtin_shufflevector(__s2_123, __s2_123, 3, 2, 1, 0); \
53037 float32x4_t __ret_123; \
53038 __ret_123 = __noswap_vfmaq_laneq_f32(__rev0_123, -__rev1_123, __rev2_123, __p3_123); \
53039 __ret_123 = __builtin_shufflevector(__ret_123, __ret_123, 3, 2, 1, 0); \
53040 __ret_123; \
53041 })
53042 #endif
53043
53044 #ifdef __LITTLE_ENDIAN__
53045 #define vfms_laneq_f64(__p0_124, __p1_124, __p2_124, __p3_124) __extension__ ({ \
53046 float64x1_t __s0_124 = __p0_124; \
53047 float64x1_t __s1_124 = __p1_124; \
53048 float64x2_t __s2_124 = __p2_124; \
53049 float64x1_t __ret_124; \
53050 __ret_124 = vfma_laneq_f64(__s0_124, -__s1_124, __s2_124, __p3_124); \
53051 __ret_124; \
53052 })
53053 #else
53054 #define vfms_laneq_f64(__p0_125, __p1_125, __p2_125, __p3_125) __extension__ ({ \
53055 float64x1_t __s0_125 = __p0_125; \
53056 float64x1_t __s1_125 = __p1_125; \
53057 float64x2_t __s2_125 = __p2_125; \
53058 float64x2_t __rev2_125; __rev2_125 = __builtin_shufflevector(__s2_125, __s2_125, 1, 0); \
53059 float64x1_t __ret_125; \
53060 __ret_125 = __noswap_vfma_laneq_f64(__s0_125, -__s1_125, __rev2_125, __p3_125); \
53061 __ret_125; \
53062 })
53063 #endif
53064
53065 #ifdef __LITTLE_ENDIAN__
53066 #define vfms_laneq_f32(__p0_126, __p1_126, __p2_126, __p3_126) __extension__ ({ \
53067 float32x2_t __s0_126 = __p0_126; \
53068 float32x2_t __s1_126 = __p1_126; \
53069 float32x4_t __s2_126 = __p2_126; \
53070 float32x2_t __ret_126; \
53071 __ret_126 = vfma_laneq_f32(__s0_126, -__s1_126, __s2_126, __p3_126); \
53072 __ret_126; \
53073 })
53074 #else
53075 #define vfms_laneq_f32(__p0_127, __p1_127, __p2_127, __p3_127) __extension__ ({ \
53076 float32x2_t __s0_127 = __p0_127; \
53077 float32x2_t __s1_127 = __p1_127; \
53078 float32x4_t __s2_127 = __p2_127; \
53079 float32x2_t __rev0_127; __rev0_127 = __builtin_shufflevector(__s0_127, __s0_127, 1, 0); \
53080 float32x2_t __rev1_127; __rev1_127 = __builtin_shufflevector(__s1_127, __s1_127, 1, 0); \
53081 float32x4_t __rev2_127; __rev2_127 = __builtin_shufflevector(__s2_127, __s2_127, 3, 2, 1, 0); \
53082 float32x2_t __ret_127; \
53083 __ret_127 = __noswap_vfma_laneq_f32(__rev0_127, -__rev1_127, __rev2_127, __p3_127); \
53084 __ret_127 = __builtin_shufflevector(__ret_127, __ret_127, 1, 0); \
53085 __ret_127; \
53086 })
53087 #endif
53088
53089 #ifdef __LITTLE_ENDIAN__
53090 __ai float64x2_t vfmsq_n_f64(float64x2_t __p0, float64x2_t __p1, float64_t __p2) {
53091 float64x2_t __ret;
53092 __ret = vfmaq_f64(__p0, -__p1, (float64x2_t) {__p2, __p2});
53093 return __ret;
53094 }
53095 #else
53096 __ai float64x2_t vfmsq_n_f64(float64x2_t __p0, float64x2_t __p1, float64_t __p2) {
53097 float64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
53098 float64x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
53099 float64x2_t __ret;
53100 __ret = __noswap_vfmaq_f64(__rev0, -__rev1, (float64x2_t) {__p2, __p2});
53101 __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
53102 return __ret;
53103 }
53104 #endif
53105
53106 #ifdef __LITTLE_ENDIAN__
53107 __ai float32x4_t vfmsq_n_f32(float32x4_t __p0, float32x4_t __p1, float32_t __p2) {
53108 float32x4_t __ret;
53109 __ret = vfmaq_f32(__p0, -__p1, (float32x4_t) {__p2, __p2, __p2, __p2});
53110 return __ret;
53111 }
53112 #else
53113 __ai float32x4_t vfmsq_n_f32(float32x4_t __p0, float32x4_t __p1, float32_t __p2) {
53114 float32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
53115 float32x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
53116 float32x4_t __ret;
53117 __ret = __noswap_vfmaq_f32(__rev0, -__rev1, (float32x4_t) {__p2, __p2, __p2, __p2});
53118 __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
53119 return __ret;
53120 }
53121 #endif
53122
53123 #ifdef __LITTLE_ENDIAN__
53124 __ai float32x2_t vfms_n_f32(float32x2_t __p0, float32x2_t __p1, float32_t __p2) {
53125 float32x2_t __ret;
53126 __ret = vfma_f32(__p0, -__p1, (float32x2_t) {__p2, __p2});
53127 return __ret;
53128 }
53129 #else
53130 __ai float32x2_t vfms_n_f32(float32x2_t __p0, float32x2_t __p1, float32_t __p2) {
53131 float32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
53132 float32x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
53133 float32x2_t __ret;
53134 __ret = __noswap_vfma_f32(__rev0, -__rev1, (float32x2_t) {__p2, __p2});
53135 __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
53136 return __ret;
53137 }
53138 #endif
53139
53140 #ifdef __LITTLE_ENDIAN__
53141 __ai poly64x1_t vget_high_p64(poly64x2_t __p0) {
53142 poly64x1_t __ret;
53143 __ret = __builtin_shufflevector(__p0, __p0, 1);
53144 return __ret;
53145 }
53146 #else
53147 __ai poly64x1_t vget_high_p64(poly64x2_t __p0) {
53148 poly64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
53149 poly64x1_t __ret;
53150 __ret = __builtin_shufflevector(__rev0, __rev0, 1);
53151 return __ret;
53152 }
53153 __ai poly64x1_t __noswap_vget_high_p64(poly64x2_t __p0) {
53154 poly64x1_t __ret;
53155 __ret = __builtin_shufflevector(__p0, __p0, 1);
53156 return __ret;
53157 }
53158 #endif
53159
53160 #ifdef __LITTLE_ENDIAN__
53161 __ai float64x1_t vget_high_f64(float64x2_t __p0) {
53162 float64x1_t __ret;
53163 __ret = __builtin_shufflevector(__p0, __p0, 1);
53164 return __ret;
53165 }
53166 #else
53167 __ai float64x1_t vget_high_f64(float64x2_t __p0) {
53168 float64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
53169 float64x1_t __ret;
53170 __ret = __builtin_shufflevector(__rev0, __rev0, 1);
53171 return __ret;
53172 }
53173 #endif
53174
53175 #ifdef __LITTLE_ENDIAN__
53176 #define vget_lane_p64(__p0, __p1) __extension__ ({ \
53177 poly64x1_t __s0 = __p0; \
53178 poly64_t __ret; \
53179 __ret = (poly64_t) __builtin_neon_vget_lane_i64((int8x8_t)__s0, __p1); \
53180 __ret; \
53181 })
53182 #else
53183 #define vget_lane_p64(__p0, __p1) __extension__ ({ \
53184 poly64x1_t __s0 = __p0; \
53185 poly64_t __ret; \
53186 __ret = (poly64_t) __builtin_neon_vget_lane_i64((int8x8_t)__s0, __p1); \
53187 __ret; \
53188 })
53189 #define __noswap_vget_lane_p64(__p0, __p1) __extension__ ({ \
53190 poly64x1_t __s0 = __p0; \
53191 poly64_t __ret; \
53192 __ret = (poly64_t) __builtin_neon_vget_lane_i64((int8x8_t)__s0, __p1); \
53193 __ret; \
53194 })
53195 #endif
53196
53197 #ifdef __LITTLE_ENDIAN__
53198 #define vgetq_lane_p64(__p0, __p1) __extension__ ({ \
53199 poly64x2_t __s0 = __p0; \
53200 poly64_t __ret; \
53201 __ret = (poly64_t) __builtin_neon_vgetq_lane_i64((int8x16_t)__s0, __p1); \
53202 __ret; \
53203 })
53204 #else
53205 #define vgetq_lane_p64(__p0, __p1) __extension__ ({ \
53206 poly64x2_t __s0 = __p0; \
53207 poly64x2_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 1, 0); \
53208 poly64_t __ret; \
53209 __ret = (poly64_t) __builtin_neon_vgetq_lane_i64((int8x16_t)__rev0, __p1); \
53210 __ret; \
53211 })
53212 #define __noswap_vgetq_lane_p64(__p0, __p1) __extension__ ({ \
53213 poly64x2_t __s0 = __p0; \
53214 poly64_t __ret; \
53215 __ret = (poly64_t) __builtin_neon_vgetq_lane_i64((int8x16_t)__s0, __p1); \
53216 __ret; \
53217 })
53218 #endif
53219
53220 #ifdef __LITTLE_ENDIAN__
53221 #define vgetq_lane_f64(__p0, __p1) __extension__ ({ \
53222 float64x2_t __s0 = __p0; \
53223 float64_t __ret; \
53224 __ret = (float64_t) __builtin_neon_vgetq_lane_f64((int8x16_t)__s0, __p1); \
53225 __ret; \
53226 })
53227 #else
53228 #define vgetq_lane_f64(__p0, __p1) __extension__ ({ \
53229 float64x2_t __s0 = __p0; \
53230 float64x2_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 1, 0); \
53231 float64_t __ret; \
53232 __ret = (float64_t) __builtin_neon_vgetq_lane_f64((int8x16_t)__rev0, __p1); \
53233 __ret; \
53234 })
53235 #define __noswap_vgetq_lane_f64(__p0, __p1) __extension__ ({ \
53236 float64x2_t __s0 = __p0; \
53237 float64_t __ret; \
53238 __ret = (float64_t) __builtin_neon_vgetq_lane_f64((int8x16_t)__s0, __p1); \
53239 __ret; \
53240 })
53241 #endif
53242
53243 #ifdef __LITTLE_ENDIAN__
53244 #define vget_lane_f64(__p0, __p1) __extension__ ({ \
53245 float64x1_t __s0 = __p0; \
53246 float64_t __ret; \
53247 __ret = (float64_t) __builtin_neon_vget_lane_f64((int8x8_t)__s0, __p1); \
53248 __ret; \
53249 })
53250 #else
53251 #define vget_lane_f64(__p0, __p1) __extension__ ({ \
53252 float64x1_t __s0 = __p0; \
53253 float64_t __ret; \
53254 __ret = (float64_t) __builtin_neon_vget_lane_f64((int8x8_t)__s0, __p1); \
53255 __ret; \
53256 })
53257 #define __noswap_vget_lane_f64(__p0, __p1) __extension__ ({ \
53258 float64x1_t __s0 = __p0; \
53259 float64_t __ret; \
53260 __ret = (float64_t) __builtin_neon_vget_lane_f64((int8x8_t)__s0, __p1); \
53261 __ret; \
53262 })
53263 #endif
53264
53265 #ifdef __LITTLE_ENDIAN__
53266 __ai poly64x1_t vget_low_p64(poly64x2_t __p0) {
53267 poly64x1_t __ret;
53268 __ret = __builtin_shufflevector(__p0, __p0, 0);
53269 return __ret;
53270 }
53271 #else
53272 __ai poly64x1_t vget_low_p64(poly64x2_t __p0) {
53273 poly64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
53274 poly64x1_t __ret;
53275 __ret = __builtin_shufflevector(__rev0, __rev0, 0);
53276 return __ret;
53277 }
53278 #endif
53279
53280 #ifdef __LITTLE_ENDIAN__
53281 __ai float64x1_t vget_low_f64(float64x2_t __p0) {
53282 float64x1_t __ret;
53283 __ret = __builtin_shufflevector(__p0, __p0, 0);
53284 return __ret;
53285 }
53286 #else
53287 __ai float64x1_t vget_low_f64(float64x2_t __p0) {
53288 float64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
53289 float64x1_t __ret;
53290 __ret = __builtin_shufflevector(__rev0, __rev0, 0);
53291 return __ret;
53292 }
53293 #endif
53294
53295 #ifdef __LITTLE_ENDIAN__
53296 #define vld1_p64(__p0) __extension__ ({ \
53297 poly64x1_t __ret; \
53298 __ret = (poly64x1_t) __builtin_neon_vld1_v(__p0, 6); \
53299 __ret; \
53300 })
53301 #else
53302 #define vld1_p64(__p0) __extension__ ({ \
53303 poly64x1_t __ret; \
53304 __ret = (poly64x1_t) __builtin_neon_vld1_v(__p0, 6); \
53305 __ret; \
53306 })
53307 #endif
53308
53309 #ifdef __LITTLE_ENDIAN__
53310 #define vld1q_p64(__p0) __extension__ ({ \
53311 poly64x2_t __ret; \
53312 __ret = (poly64x2_t) __builtin_neon_vld1q_v(__p0, 38); \
53313 __ret; \
53314 })
53315 #else
53316 #define vld1q_p64(__p0) __extension__ ({ \
53317 poly64x2_t __ret; \
53318 __ret = (poly64x2_t) __builtin_neon_vld1q_v(__p0, 38); \
53319 __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \
53320 __ret; \
53321 })
53322 #endif
53323
53324 #ifdef __LITTLE_ENDIAN__
53325 #define vld1q_f64(__p0) __extension__ ({ \
53326 float64x2_t __ret; \
53327 __ret = (float64x2_t) __builtin_neon_vld1q_v(__p0, 42); \
53328 __ret; \
53329 })
53330 #else
53331 #define vld1q_f64(__p0) __extension__ ({ \
53332 float64x2_t __ret; \
53333 __ret = (float64x2_t) __builtin_neon_vld1q_v(__p0, 42); \
53334 __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \
53335 __ret; \
53336 })
53337 #endif
53338
53339 #ifdef __LITTLE_ENDIAN__
53340 #define vld1_f64(__p0) __extension__ ({ \
53341 float64x1_t __ret; \
53342 __ret = (float64x1_t) __builtin_neon_vld1_v(__p0, 10); \
53343 __ret; \
53344 })
53345 #else
53346 #define vld1_f64(__p0) __extension__ ({ \
53347 float64x1_t __ret; \
53348 __ret = (float64x1_t) __builtin_neon_vld1_v(__p0, 10); \
53349 __ret; \
53350 })
53351 #endif
53352
53353 #ifdef __LITTLE_ENDIAN__
53354 #define vld1_dup_p64(__p0) __extension__ ({ \
53355 poly64x1_t __ret; \
53356 __ret = (poly64x1_t) __builtin_neon_vld1_dup_v(__p0, 6); \
53357 __ret; \
53358 })
53359 #else
53360 #define vld1_dup_p64(__p0) __extension__ ({ \
53361 poly64x1_t __ret; \
53362 __ret = (poly64x1_t) __builtin_neon_vld1_dup_v(__p0, 6); \
53363 __ret; \
53364 })
53365 #endif
53366
53367 #ifdef __LITTLE_ENDIAN__
53368 #define vld1q_dup_p64(__p0) __extension__ ({ \
53369 poly64x2_t __ret; \
53370 __ret = (poly64x2_t) __builtin_neon_vld1q_dup_v(__p0, 38); \
53371 __ret; \
53372 })
53373 #else
53374 #define vld1q_dup_p64(__p0) __extension__ ({ \
53375 poly64x2_t __ret; \
53376 __ret = (poly64x2_t) __builtin_neon_vld1q_dup_v(__p0, 38); \
53377 __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \
53378 __ret; \
53379 })
53380 #endif
53381
53382 #ifdef __LITTLE_ENDIAN__
53383 #define vld1q_dup_f64(__p0) __extension__ ({ \
53384 float64x2_t __ret; \
53385 __ret = (float64x2_t) __builtin_neon_vld1q_dup_v(__p0, 42); \
53386 __ret; \
53387 })
53388 #else
53389 #define vld1q_dup_f64(__p0) __extension__ ({ \
53390 float64x2_t __ret; \
53391 __ret = (float64x2_t) __builtin_neon_vld1q_dup_v(__p0, 42); \
53392 __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \
53393 __ret; \
53394 })
53395 #endif
53396
53397 #ifdef __LITTLE_ENDIAN__
53398 #define vld1_dup_f64(__p0) __extension__ ({ \
53399 float64x1_t __ret; \
53400 __ret = (float64x1_t) __builtin_neon_vld1_dup_v(__p0, 10); \
53401 __ret; \
53402 })
53403 #else
53404 #define vld1_dup_f64(__p0) __extension__ ({ \
53405 float64x1_t __ret; \
53406 __ret = (float64x1_t) __builtin_neon_vld1_dup_v(__p0, 10); \
53407 __ret; \
53408 })
53409 #endif
53410
53411 #ifdef __LITTLE_ENDIAN__
53412 #define vld1_lane_p64(__p0, __p1, __p2) __extension__ ({ \
53413 poly64x1_t __s1 = __p1; \
53414 poly64x1_t __ret; \
53415 __ret = (poly64x1_t) __builtin_neon_vld1_lane_v(__p0, (int8x8_t)__s1, __p2, 6); \
53416 __ret; \
53417 })
53418 #else
53419 #define vld1_lane_p64(__p0, __p1, __p2) __extension__ ({ \
53420 poly64x1_t __s1 = __p1; \
53421 poly64x1_t __ret; \
53422 __ret = (poly64x1_t) __builtin_neon_vld1_lane_v(__p0, (int8x8_t)__s1, __p2, 6); \
53423 __ret; \
53424 })
53425 #endif
53426
53427 #ifdef __LITTLE_ENDIAN__
53428 #define vld1q_lane_p64(__p0, __p1, __p2) __extension__ ({ \
53429 poly64x2_t __s1 = __p1; \
53430 poly64x2_t __ret; \
53431 __ret = (poly64x2_t) __builtin_neon_vld1q_lane_v(__p0, (int8x16_t)__s1, __p2, 38); \
53432 __ret; \
53433 })
53434 #else
53435 #define vld1q_lane_p64(__p0, __p1, __p2) __extension__ ({ \
53436 poly64x2_t __s1 = __p1; \
53437 poly64x2_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 1, 0); \
53438 poly64x2_t __ret; \
53439 __ret = (poly64x2_t) __builtin_neon_vld1q_lane_v(__p0, (int8x16_t)__rev1, __p2, 38); \
53440 __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \
53441 __ret; \
53442 })
53443 #endif
53444
53445 #ifdef __LITTLE_ENDIAN__
53446 #define vld1q_lane_f64(__p0, __p1, __p2) __extension__ ({ \
53447 float64x2_t __s1 = __p1; \
53448 float64x2_t __ret; \
53449 __ret = (float64x2_t) __builtin_neon_vld1q_lane_v(__p0, (int8x16_t)__s1, __p2, 42); \
53450 __ret; \
53451 })
53452 #else
53453 #define vld1q_lane_f64(__p0, __p1, __p2) __extension__ ({ \
53454 float64x2_t __s1 = __p1; \
53455 float64x2_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 1, 0); \
53456 float64x2_t __ret; \
53457 __ret = (float64x2_t) __builtin_neon_vld1q_lane_v(__p0, (int8x16_t)__rev1, __p2, 42); \
53458 __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \
53459 __ret; \
53460 })
53461 #endif
53462
53463 #ifdef __LITTLE_ENDIAN__
53464 #define vld1_lane_f64(__p0, __p1, __p2) __extension__ ({ \
53465 float64x1_t __s1 = __p1; \
53466 float64x1_t __ret; \
53467 __ret = (float64x1_t) __builtin_neon_vld1_lane_v(__p0, (int8x8_t)__s1, __p2, 10); \
53468 __ret; \
53469 })
53470 #else
53471 #define vld1_lane_f64(__p0, __p1, __p2) __extension__ ({ \
53472 float64x1_t __s1 = __p1; \
53473 float64x1_t __ret; \
53474 __ret = (float64x1_t) __builtin_neon_vld1_lane_v(__p0, (int8x8_t)__s1, __p2, 10); \
53475 __ret; \
53476 })
53477 #endif
53478
53479 #ifdef __LITTLE_ENDIAN__
53480 #define vld1_p8_x2(__p0) __extension__ ({ \
53481 poly8x8x2_t __ret; \
53482 __builtin_neon_vld1_x2_v(&__ret, __p0, 4); \
53483 __ret; \
53484 })
53485 #else
53486 #define vld1_p8_x2(__p0) __extension__ ({ \
53487 poly8x8x2_t __ret; \
53488 __builtin_neon_vld1_x2_v(&__ret, __p0, 4); \
53489 \
53490 __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \
53491 __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \
53492 __ret; \
53493 })
53494 #endif
53495
53496 #ifdef __LITTLE_ENDIAN__
53497 #define vld1_p64_x2(__p0) __extension__ ({ \
53498 poly64x1x2_t __ret; \
53499 __builtin_neon_vld1_x2_v(&__ret, __p0, 6); \
53500 __ret; \
53501 })
53502 #else
53503 #define vld1_p64_x2(__p0) __extension__ ({ \
53504 poly64x1x2_t __ret; \
53505 __builtin_neon_vld1_x2_v(&__ret, __p0, 6); \
53506 __ret; \
53507 })
53508 #endif
53509
53510 #ifdef __LITTLE_ENDIAN__
53511 #define vld1_p16_x2(__p0) __extension__ ({ \
53512 poly16x4x2_t __ret; \
53513 __builtin_neon_vld1_x2_v(&__ret, __p0, 5); \
53514 __ret; \
53515 })
53516 #else
53517 #define vld1_p16_x2(__p0) __extension__ ({ \
53518 poly16x4x2_t __ret; \
53519 __builtin_neon_vld1_x2_v(&__ret, __p0, 5); \
53520 \
53521 __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 3, 2, 1, 0); \
53522 __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 3, 2, 1, 0); \
53523 __ret; \
53524 })
53525 #endif
53526
53527 #ifdef __LITTLE_ENDIAN__
53528 #define vld1q_p8_x2(__p0) __extension__ ({ \
53529 poly8x16x2_t __ret; \
53530 __builtin_neon_vld1q_x2_v(&__ret, __p0, 36); \
53531 __ret; \
53532 })
53533 #else
53534 #define vld1q_p8_x2(__p0) __extension__ ({ \
53535 poly8x16x2_t __ret; \
53536 __builtin_neon_vld1q_x2_v(&__ret, __p0, 36); \
53537 \
53538 __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
53539 __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
53540 __ret; \
53541 })
53542 #endif
53543
53544 #ifdef __LITTLE_ENDIAN__
53545 #define vld1q_p64_x2(__p0) __extension__ ({ \
53546 poly64x2x2_t __ret; \
53547 __builtin_neon_vld1q_x2_v(&__ret, __p0, 38); \
53548 __ret; \
53549 })
53550 #else
53551 #define vld1q_p64_x2(__p0) __extension__ ({ \
53552 poly64x2x2_t __ret; \
53553 __builtin_neon_vld1q_x2_v(&__ret, __p0, 38); \
53554 \
53555 __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 1, 0); \
53556 __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 1, 0); \
53557 __ret; \
53558 })
53559 #endif
53560
53561 #ifdef __LITTLE_ENDIAN__
53562 #define vld1q_p16_x2(__p0) __extension__ ({ \
53563 poly16x8x2_t __ret; \
53564 __builtin_neon_vld1q_x2_v(&__ret, __p0, 37); \
53565 __ret; \
53566 })
53567 #else
53568 #define vld1q_p16_x2(__p0) __extension__ ({ \
53569 poly16x8x2_t __ret; \
53570 __builtin_neon_vld1q_x2_v(&__ret, __p0, 37); \
53571 \
53572 __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \
53573 __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \
53574 __ret; \
53575 })
53576 #endif
53577
53578 #ifdef __LITTLE_ENDIAN__
53579 #define vld1q_u8_x2(__p0) __extension__ ({ \
53580 uint8x16x2_t __ret; \
53581 __builtin_neon_vld1q_x2_v(&__ret, __p0, 48); \
53582 __ret; \
53583 })
53584 #else
53585 #define vld1q_u8_x2(__p0) __extension__ ({ \
53586 uint8x16x2_t __ret; \
53587 __builtin_neon_vld1q_x2_v(&__ret, __p0, 48); \
53588 \
53589 __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
53590 __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
53591 __ret; \
53592 })
53593 #endif
53594
53595 #ifdef __LITTLE_ENDIAN__
53596 #define vld1q_u32_x2(__p0) __extension__ ({ \
53597 uint32x4x2_t __ret; \
53598 __builtin_neon_vld1q_x2_v(&__ret, __p0, 50); \
53599 __ret; \
53600 })
53601 #else
53602 #define vld1q_u32_x2(__p0) __extension__ ({ \
53603 uint32x4x2_t __ret; \
53604 __builtin_neon_vld1q_x2_v(&__ret, __p0, 50); \
53605 \
53606 __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 3, 2, 1, 0); \
53607 __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 3, 2, 1, 0); \
53608 __ret; \
53609 })
53610 #endif
53611
53612 #ifdef __LITTLE_ENDIAN__
53613 #define vld1q_u64_x2(__p0) __extension__ ({ \
53614 uint64x2x2_t __ret; \
53615 __builtin_neon_vld1q_x2_v(&__ret, __p0, 51); \
53616 __ret; \
53617 })
53618 #else
53619 #define vld1q_u64_x2(__p0) __extension__ ({ \
53620 uint64x2x2_t __ret; \
53621 __builtin_neon_vld1q_x2_v(&__ret, __p0, 51); \
53622 \
53623 __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 1, 0); \
53624 __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 1, 0); \
53625 __ret; \
53626 })
53627 #endif
53628
53629 #ifdef __LITTLE_ENDIAN__
53630 #define vld1q_u16_x2(__p0) __extension__ ({ \
53631 uint16x8x2_t __ret; \
53632 __builtin_neon_vld1q_x2_v(&__ret, __p0, 49); \
53633 __ret; \
53634 })
53635 #else
53636 #define vld1q_u16_x2(__p0) __extension__ ({ \
53637 uint16x8x2_t __ret; \
53638 __builtin_neon_vld1q_x2_v(&__ret, __p0, 49); \
53639 \
53640 __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \
53641 __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \
53642 __ret; \
53643 })
53644 #endif
53645
53646 #ifdef __LITTLE_ENDIAN__
53647 #define vld1q_s8_x2(__p0) __extension__ ({ \
53648 int8x16x2_t __ret; \
53649 __builtin_neon_vld1q_x2_v(&__ret, __p0, 32); \
53650 __ret; \
53651 })
53652 #else
53653 #define vld1q_s8_x2(__p0) __extension__ ({ \
53654 int8x16x2_t __ret; \
53655 __builtin_neon_vld1q_x2_v(&__ret, __p0, 32); \
53656 \
53657 __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
53658 __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
53659 __ret; \
53660 })
53661 #endif
53662
53663 #ifdef __LITTLE_ENDIAN__
53664 #define vld1q_f64_x2(__p0) __extension__ ({ \
53665 float64x2x2_t __ret; \
53666 __builtin_neon_vld1q_x2_v(&__ret, __p0, 42); \
53667 __ret; \
53668 })
53669 #else
53670 #define vld1q_f64_x2(__p0) __extension__ ({ \
53671 float64x2x2_t __ret; \
53672 __builtin_neon_vld1q_x2_v(&__ret, __p0, 42); \
53673 \
53674 __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 1, 0); \
53675 __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 1, 0); \
53676 __ret; \
53677 })
53678 #endif
53679
53680 #ifdef __LITTLE_ENDIAN__
53681 #define vld1q_f32_x2(__p0) __extension__ ({ \
53682 float32x4x2_t __ret; \
53683 __builtin_neon_vld1q_x2_v(&__ret, __p0, 41); \
53684 __ret; \
53685 })
53686 #else
53687 #define vld1q_f32_x2(__p0) __extension__ ({ \
53688 float32x4x2_t __ret; \
53689 __builtin_neon_vld1q_x2_v(&__ret, __p0, 41); \
53690 \
53691 __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 3, 2, 1, 0); \
53692 __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 3, 2, 1, 0); \
53693 __ret; \
53694 })
53695 #endif
53696
53697 #ifdef __LITTLE_ENDIAN__
53698 #define vld1q_f16_x2(__p0) __extension__ ({ \
53699 float16x8x2_t __ret; \
53700 __builtin_neon_vld1q_x2_v(&__ret, __p0, 40); \
53701 __ret; \
53702 })
53703 #else
53704 #define vld1q_f16_x2(__p0) __extension__ ({ \
53705 float16x8x2_t __ret; \
53706 __builtin_neon_vld1q_x2_v(&__ret, __p0, 40); \
53707 \
53708 __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \
53709 __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \
53710 __ret; \
53711 })
53712 #endif
53713
53714 #ifdef __LITTLE_ENDIAN__
53715 #define vld1q_s32_x2(__p0) __extension__ ({ \
53716 int32x4x2_t __ret; \
53717 __builtin_neon_vld1q_x2_v(&__ret, __p0, 34); \
53718 __ret; \
53719 })
53720 #else
53721 #define vld1q_s32_x2(__p0) __extension__ ({ \
53722 int32x4x2_t __ret; \
53723 __builtin_neon_vld1q_x2_v(&__ret, __p0, 34); \
53724 \
53725 __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 3, 2, 1, 0); \
53726 __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 3, 2, 1, 0); \
53727 __ret; \
53728 })
53729 #endif
53730
53731 #ifdef __LITTLE_ENDIAN__
53732 #define vld1q_s64_x2(__p0) __extension__ ({ \
53733 int64x2x2_t __ret; \
53734 __builtin_neon_vld1q_x2_v(&__ret, __p0, 35); \
53735 __ret; \
53736 })
53737 #else
53738 #define vld1q_s64_x2(__p0) __extension__ ({ \
53739 int64x2x2_t __ret; \
53740 __builtin_neon_vld1q_x2_v(&__ret, __p0, 35); \
53741 \
53742 __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 1, 0); \
53743 __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 1, 0); \
53744 __ret; \
53745 })
53746 #endif
53747
53748 #ifdef __LITTLE_ENDIAN__
53749 #define vld1q_s16_x2(__p0) __extension__ ({ \
53750 int16x8x2_t __ret; \
53751 __builtin_neon_vld1q_x2_v(&__ret, __p0, 33); \
53752 __ret; \
53753 })
53754 #else
53755 #define vld1q_s16_x2(__p0) __extension__ ({ \
53756 int16x8x2_t __ret; \
53757 __builtin_neon_vld1q_x2_v(&__ret, __p0, 33); \
53758 \
53759 __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \
53760 __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \
53761 __ret; \
53762 })
53763 #endif
53764
53765 #ifdef __LITTLE_ENDIAN__
53766 #define vld1_u8_x2(__p0) __extension__ ({ \
53767 uint8x8x2_t __ret; \
53768 __builtin_neon_vld1_x2_v(&__ret, __p0, 16); \
53769 __ret; \
53770 })
53771 #else
53772 #define vld1_u8_x2(__p0) __extension__ ({ \
53773 uint8x8x2_t __ret; \
53774 __builtin_neon_vld1_x2_v(&__ret, __p0, 16); \
53775 \
53776 __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \
53777 __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \
53778 __ret; \
53779 })
53780 #endif
53781
53782 #ifdef __LITTLE_ENDIAN__
53783 #define vld1_u32_x2(__p0) __extension__ ({ \
53784 uint32x2x2_t __ret; \
53785 __builtin_neon_vld1_x2_v(&__ret, __p0, 18); \
53786 __ret; \
53787 })
53788 #else
53789 #define vld1_u32_x2(__p0) __extension__ ({ \
53790 uint32x2x2_t __ret; \
53791 __builtin_neon_vld1_x2_v(&__ret, __p0, 18); \
53792 \
53793 __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 1, 0); \
53794 __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 1, 0); \
53795 __ret; \
53796 })
53797 #endif
53798
53799 #ifdef __LITTLE_ENDIAN__
53800 #define vld1_u64_x2(__p0) __extension__ ({ \
53801 uint64x1x2_t __ret; \
53802 __builtin_neon_vld1_x2_v(&__ret, __p0, 19); \
53803 __ret; \
53804 })
53805 #else
53806 #define vld1_u64_x2(__p0) __extension__ ({ \
53807 uint64x1x2_t __ret; \
53808 __builtin_neon_vld1_x2_v(&__ret, __p0, 19); \
53809 __ret; \
53810 })
53811 #endif
53812
53813 #ifdef __LITTLE_ENDIAN__
53814 #define vld1_u16_x2(__p0) __extension__ ({ \
53815 uint16x4x2_t __ret; \
53816 __builtin_neon_vld1_x2_v(&__ret, __p0, 17); \
53817 __ret; \
53818 })
53819 #else
53820 #define vld1_u16_x2(__p0) __extension__ ({ \
53821 uint16x4x2_t __ret; \
53822 __builtin_neon_vld1_x2_v(&__ret, __p0, 17); \
53823 \
53824 __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 3, 2, 1, 0); \
53825 __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 3, 2, 1, 0); \
53826 __ret; \
53827 })
53828 #endif
53829
53830 #ifdef __LITTLE_ENDIAN__
53831 #define vld1_s8_x2(__p0) __extension__ ({ \
53832 int8x8x2_t __ret; \
53833 __builtin_neon_vld1_x2_v(&__ret, __p0, 0); \
53834 __ret; \
53835 })
53836 #else
53837 #define vld1_s8_x2(__p0) __extension__ ({ \
53838 int8x8x2_t __ret; \
53839 __builtin_neon_vld1_x2_v(&__ret, __p0, 0); \
53840 \
53841 __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \
53842 __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \
53843 __ret; \
53844 })
53845 #endif
53846
53847 #ifdef __LITTLE_ENDIAN__
53848 #define vld1_f64_x2(__p0) __extension__ ({ \
53849 float64x1x2_t __ret; \
53850 __builtin_neon_vld1_x2_v(&__ret, __p0, 10); \
53851 __ret; \
53852 })
53853 #else
53854 #define vld1_f64_x2(__p0) __extension__ ({ \
53855 float64x1x2_t __ret; \
53856 __builtin_neon_vld1_x2_v(&__ret, __p0, 10); \
53857 __ret; \
53858 })
53859 #endif
53860
53861 #ifdef __LITTLE_ENDIAN__
53862 #define vld1_f32_x2(__p0) __extension__ ({ \
53863 float32x2x2_t __ret; \
53864 __builtin_neon_vld1_x2_v(&__ret, __p0, 9); \
53865 __ret; \
53866 })
53867 #else
53868 #define vld1_f32_x2(__p0) __extension__ ({ \
53869 float32x2x2_t __ret; \
53870 __builtin_neon_vld1_x2_v(&__ret, __p0, 9); \
53871 \
53872 __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 1, 0); \
53873 __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 1, 0); \
53874 __ret; \
53875 })
53876 #endif
53877
53878 #ifdef __LITTLE_ENDIAN__
53879 #define vld1_f16_x2(__p0) __extension__ ({ \
53880 float16x4x2_t __ret; \
53881 __builtin_neon_vld1_x2_v(&__ret, __p0, 8); \
53882 __ret; \
53883 })
53884 #else
53885 #define vld1_f16_x2(__p0) __extension__ ({ \
53886 float16x4x2_t __ret; \
53887 __builtin_neon_vld1_x2_v(&__ret, __p0, 8); \
53888 \
53889 __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 3, 2, 1, 0); \
53890 __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 3, 2, 1, 0); \
53891 __ret; \
53892 })
53893 #endif
53894
53895 #ifdef __LITTLE_ENDIAN__
53896 #define vld1_s32_x2(__p0) __extension__ ({ \
53897 int32x2x2_t __ret; \
53898 __builtin_neon_vld1_x2_v(&__ret, __p0, 2); \
53899 __ret; \
53900 })
53901 #else
53902 #define vld1_s32_x2(__p0) __extension__ ({ \
53903 int32x2x2_t __ret; \
53904 __builtin_neon_vld1_x2_v(&__ret, __p0, 2); \
53905 \
53906 __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 1, 0); \
53907 __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 1, 0); \
53908 __ret; \
53909 })
53910 #endif
53911
53912 #ifdef __LITTLE_ENDIAN__
53913 #define vld1_s64_x2(__p0) __extension__ ({ \
53914 int64x1x2_t __ret; \
53915 __builtin_neon_vld1_x2_v(&__ret, __p0, 3); \
53916 __ret; \
53917 })
53918 #else
53919 #define vld1_s64_x2(__p0) __extension__ ({ \
53920 int64x1x2_t __ret; \
53921 __builtin_neon_vld1_x2_v(&__ret, __p0, 3); \
53922 __ret; \
53923 })
53924 #endif
53925
53926 #ifdef __LITTLE_ENDIAN__
53927 #define vld1_s16_x2(__p0) __extension__ ({ \
53928 int16x4x2_t __ret; \
53929 __builtin_neon_vld1_x2_v(&__ret, __p0, 1); \
53930 __ret; \
53931 })
53932 #else
53933 #define vld1_s16_x2(__p0) __extension__ ({ \
53934 int16x4x2_t __ret; \
53935 __builtin_neon_vld1_x2_v(&__ret, __p0, 1); \
53936 \
53937 __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 3, 2, 1, 0); \
53938 __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 3, 2, 1, 0); \
53939 __ret; \
53940 })
53941 #endif
53942
53943 #ifdef __LITTLE_ENDIAN__
53944 #define vld1_p8_x3(__p0) __extension__ ({ \
53945 poly8x8x3_t __ret; \
53946 __builtin_neon_vld1_x3_v(&__ret, __p0, 4); \
53947 __ret; \
53948 })
53949 #else
53950 #define vld1_p8_x3(__p0) __extension__ ({ \
53951 poly8x8x3_t __ret; \
53952 __builtin_neon_vld1_x3_v(&__ret, __p0, 4); \
53953 \
53954 __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \
53955 __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \
53956 __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 7, 6, 5, 4, 3, 2, 1, 0); \
53957 __ret; \
53958 })
53959 #endif
53960
53961 #ifdef __LITTLE_ENDIAN__
53962 #define vld1_p64_x3(__p0) __extension__ ({ \
53963 poly64x1x3_t __ret; \
53964 __builtin_neon_vld1_x3_v(&__ret, __p0, 6); \
53965 __ret; \
53966 })
53967 #else
53968 #define vld1_p64_x3(__p0) __extension__ ({ \
53969 poly64x1x3_t __ret; \
53970 __builtin_neon_vld1_x3_v(&__ret, __p0, 6); \
53971 __ret; \
53972 })
53973 #endif
53974
53975 #ifdef __LITTLE_ENDIAN__
53976 #define vld1_p16_x3(__p0) __extension__ ({ \
53977 poly16x4x3_t __ret; \
53978 __builtin_neon_vld1_x3_v(&__ret, __p0, 5); \
53979 __ret; \
53980 })
53981 #else
53982 #define vld1_p16_x3(__p0) __extension__ ({ \
53983 poly16x4x3_t __ret; \
53984 __builtin_neon_vld1_x3_v(&__ret, __p0, 5); \
53985 \
53986 __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 3, 2, 1, 0); \
53987 __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 3, 2, 1, 0); \
53988 __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 3, 2, 1, 0); \
53989 __ret; \
53990 })
53991 #endif
53992
53993 #ifdef __LITTLE_ENDIAN__
53994 #define vld1q_p8_x3(__p0) __extension__ ({ \
53995 poly8x16x3_t __ret; \
53996 __builtin_neon_vld1q_x3_v(&__ret, __p0, 36); \
53997 __ret; \
53998 })
53999 #else
54000 #define vld1q_p8_x3(__p0) __extension__ ({ \
54001 poly8x16x3_t __ret; \
54002 __builtin_neon_vld1q_x3_v(&__ret, __p0, 36); \
54003 \
54004 __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
54005 __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
54006 __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
54007 __ret; \
54008 })
54009 #endif
54010
54011 #ifdef __LITTLE_ENDIAN__
54012 #define vld1q_p64_x3(__p0) __extension__ ({ \
54013 poly64x2x3_t __ret; \
54014 __builtin_neon_vld1q_x3_v(&__ret, __p0, 38); \
54015 __ret; \
54016 })
54017 #else
54018 #define vld1q_p64_x3(__p0) __extension__ ({ \
54019 poly64x2x3_t __ret; \
54020 __builtin_neon_vld1q_x3_v(&__ret, __p0, 38); \
54021 \
54022 __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 1, 0); \
54023 __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 1, 0); \
54024 __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 1, 0); \
54025 __ret; \
54026 })
54027 #endif
54028
54029 #ifdef __LITTLE_ENDIAN__
54030 #define vld1q_p16_x3(__p0) __extension__ ({ \
54031 poly16x8x3_t __ret; \
54032 __builtin_neon_vld1q_x3_v(&__ret, __p0, 37); \
54033 __ret; \
54034 })
54035 #else
54036 #define vld1q_p16_x3(__p0) __extension__ ({ \
54037 poly16x8x3_t __ret; \
54038 __builtin_neon_vld1q_x3_v(&__ret, __p0, 37); \
54039 \
54040 __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \
54041 __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \
54042 __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 7, 6, 5, 4, 3, 2, 1, 0); \
54043 __ret; \
54044 })
54045 #endif
54046
54047 #ifdef __LITTLE_ENDIAN__
54048 #define vld1q_u8_x3(__p0) __extension__ ({ \
54049 uint8x16x3_t __ret; \
54050 __builtin_neon_vld1q_x3_v(&__ret, __p0, 48); \
54051 __ret; \
54052 })
54053 #else
54054 #define vld1q_u8_x3(__p0) __extension__ ({ \
54055 uint8x16x3_t __ret; \
54056 __builtin_neon_vld1q_x3_v(&__ret, __p0, 48); \
54057 \
54058 __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
54059 __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
54060 __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
54061 __ret; \
54062 })
54063 #endif
54064
54065 #ifdef __LITTLE_ENDIAN__
54066 #define vld1q_u32_x3(__p0) __extension__ ({ \
54067 uint32x4x3_t __ret; \
54068 __builtin_neon_vld1q_x3_v(&__ret, __p0, 50); \
54069 __ret; \
54070 })
54071 #else
54072 #define vld1q_u32_x3(__p0) __extension__ ({ \
54073 uint32x4x3_t __ret; \
54074 __builtin_neon_vld1q_x3_v(&__ret, __p0, 50); \
54075 \
54076 __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 3, 2, 1, 0); \
54077 __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 3, 2, 1, 0); \
54078 __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 3, 2, 1, 0); \
54079 __ret; \
54080 })
54081 #endif
54082
54083 #ifdef __LITTLE_ENDIAN__
54084 #define vld1q_u64_x3(__p0) __extension__ ({ \
54085 uint64x2x3_t __ret; \
54086 __builtin_neon_vld1q_x3_v(&__ret, __p0, 51); \
54087 __ret; \
54088 })
54089 #else
54090 #define vld1q_u64_x3(__p0) __extension__ ({ \
54091 uint64x2x3_t __ret; \
54092 __builtin_neon_vld1q_x3_v(&__ret, __p0, 51); \
54093 \
54094 __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 1, 0); \
54095 __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 1, 0); \
54096 __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 1, 0); \
54097 __ret; \
54098 })
54099 #endif
54100
54101 #ifdef __LITTLE_ENDIAN__
54102 #define vld1q_u16_x3(__p0) __extension__ ({ \
54103 uint16x8x3_t __ret; \
54104 __builtin_neon_vld1q_x3_v(&__ret, __p0, 49); \
54105 __ret; \
54106 })
54107 #else
54108 #define vld1q_u16_x3(__p0) __extension__ ({ \
54109 uint16x8x3_t __ret; \
54110 __builtin_neon_vld1q_x3_v(&__ret, __p0, 49); \
54111 \
54112 __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \
54113 __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \
54114 __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 7, 6, 5, 4, 3, 2, 1, 0); \
54115 __ret; \
54116 })
54117 #endif
54118
54119 #ifdef __LITTLE_ENDIAN__
54120 #define vld1q_s8_x3(__p0) __extension__ ({ \
54121 int8x16x3_t __ret; \
54122 __builtin_neon_vld1q_x3_v(&__ret, __p0, 32); \
54123 __ret; \
54124 })
54125 #else
54126 #define vld1q_s8_x3(__p0) __extension__ ({ \
54127 int8x16x3_t __ret; \
54128 __builtin_neon_vld1q_x3_v(&__ret, __p0, 32); \
54129 \
54130 __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
54131 __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
54132 __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
54133 __ret; \
54134 })
54135 #endif
54136
54137 #ifdef __LITTLE_ENDIAN__
54138 #define vld1q_f64_x3(__p0) __extension__ ({ \
54139 float64x2x3_t __ret; \
54140 __builtin_neon_vld1q_x3_v(&__ret, __p0, 42); \
54141 __ret; \
54142 })
54143 #else
54144 #define vld1q_f64_x3(__p0) __extension__ ({ \
54145 float64x2x3_t __ret; \
54146 __builtin_neon_vld1q_x3_v(&__ret, __p0, 42); \
54147 \
54148 __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 1, 0); \
54149 __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 1, 0); \
54150 __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 1, 0); \
54151 __ret; \
54152 })
54153 #endif
54154
54155 #ifdef __LITTLE_ENDIAN__
54156 #define vld1q_f32_x3(__p0) __extension__ ({ \
54157 float32x4x3_t __ret; \
54158 __builtin_neon_vld1q_x3_v(&__ret, __p0, 41); \
54159 __ret; \
54160 })
54161 #else
54162 #define vld1q_f32_x3(__p0) __extension__ ({ \
54163 float32x4x3_t __ret; \
54164 __builtin_neon_vld1q_x3_v(&__ret, __p0, 41); \
54165 \
54166 __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 3, 2, 1, 0); \
54167 __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 3, 2, 1, 0); \
54168 __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 3, 2, 1, 0); \
54169 __ret; \
54170 })
54171 #endif
54172
54173 #ifdef __LITTLE_ENDIAN__
54174 #define vld1q_f16_x3(__p0) __extension__ ({ \
54175 float16x8x3_t __ret; \
54176 __builtin_neon_vld1q_x3_v(&__ret, __p0, 40); \
54177 __ret; \
54178 })
54179 #else
54180 #define vld1q_f16_x3(__p0) __extension__ ({ \
54181 float16x8x3_t __ret; \
54182 __builtin_neon_vld1q_x3_v(&__ret, __p0, 40); \
54183 \
54184 __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \
54185 __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \
54186 __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 7, 6, 5, 4, 3, 2, 1, 0); \
54187 __ret; \
54188 })
54189 #endif
54190
54191 #ifdef __LITTLE_ENDIAN__
54192 #define vld1q_s32_x3(__p0) __extension__ ({ \
54193 int32x4x3_t __ret; \
54194 __builtin_neon_vld1q_x3_v(&__ret, __p0, 34); \
54195 __ret; \
54196 })
54197 #else
54198 #define vld1q_s32_x3(__p0) __extension__ ({ \
54199 int32x4x3_t __ret; \
54200 __builtin_neon_vld1q_x3_v(&__ret, __p0, 34); \
54201 \
54202 __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 3, 2, 1, 0); \
54203 __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 3, 2, 1, 0); \
54204 __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 3, 2, 1, 0); \
54205 __ret; \
54206 })
54207 #endif
54208
54209 #ifdef __LITTLE_ENDIAN__
54210 #define vld1q_s64_x3(__p0) __extension__ ({ \
54211 int64x2x3_t __ret; \
54212 __builtin_neon_vld1q_x3_v(&__ret, __p0, 35); \
54213 __ret; \
54214 })
54215 #else
54216 #define vld1q_s64_x3(__p0) __extension__ ({ \
54217 int64x2x3_t __ret; \
54218 __builtin_neon_vld1q_x3_v(&__ret, __p0, 35); \
54219 \
54220 __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 1, 0); \
54221 __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 1, 0); \
54222 __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 1, 0); \
54223 __ret; \
54224 })
54225 #endif
54226
54227 #ifdef __LITTLE_ENDIAN__
54228 #define vld1q_s16_x3(__p0) __extension__ ({ \
54229 int16x8x3_t __ret; \
54230 __builtin_neon_vld1q_x3_v(&__ret, __p0, 33); \
54231 __ret; \
54232 })
54233 #else
54234 #define vld1q_s16_x3(__p0) __extension__ ({ \
54235 int16x8x3_t __ret; \
54236 __builtin_neon_vld1q_x3_v(&__ret, __p0, 33); \
54237 \
54238 __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \
54239 __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \
54240 __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 7, 6, 5, 4, 3, 2, 1, 0); \
54241 __ret; \
54242 })
54243 #endif
54244
54245 #ifdef __LITTLE_ENDIAN__
54246 #define vld1_u8_x3(__p0) __extension__ ({ \
54247 uint8x8x3_t __ret; \
54248 __builtin_neon_vld1_x3_v(&__ret, __p0, 16); \
54249 __ret; \
54250 })
54251 #else
54252 #define vld1_u8_x3(__p0) __extension__ ({ \
54253 uint8x8x3_t __ret; \
54254 __builtin_neon_vld1_x3_v(&__ret, __p0, 16); \
54255 \
54256 __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \
54257 __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \
54258 __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 7, 6, 5, 4, 3, 2, 1, 0); \
54259 __ret; \
54260 })
54261 #endif
54262
54263 #ifdef __LITTLE_ENDIAN__
54264 #define vld1_u32_x3(__p0) __extension__ ({ \
54265 uint32x2x3_t __ret; \
54266 __builtin_neon_vld1_x3_v(&__ret, __p0, 18); \
54267 __ret; \
54268 })
54269 #else
54270 #define vld1_u32_x3(__p0) __extension__ ({ \
54271 uint32x2x3_t __ret; \
54272 __builtin_neon_vld1_x3_v(&__ret, __p0, 18); \
54273 \
54274 __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 1, 0); \
54275 __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 1, 0); \
54276 __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 1, 0); \
54277 __ret; \
54278 })
54279 #endif
54280
54281 #ifdef __LITTLE_ENDIAN__
54282 #define vld1_u64_x3(__p0) __extension__ ({ \
54283 uint64x1x3_t __ret; \
54284 __builtin_neon_vld1_x3_v(&__ret, __p0, 19); \
54285 __ret; \
54286 })
54287 #else
54288 #define vld1_u64_x3(__p0) __extension__ ({ \
54289 uint64x1x3_t __ret; \
54290 __builtin_neon_vld1_x3_v(&__ret, __p0, 19); \
54291 __ret; \
54292 })
54293 #endif
54294
54295 #ifdef __LITTLE_ENDIAN__
54296 #define vld1_u16_x3(__p0) __extension__ ({ \
54297 uint16x4x3_t __ret; \
54298 __builtin_neon_vld1_x3_v(&__ret, __p0, 17); \
54299 __ret; \
54300 })
54301 #else
54302 #define vld1_u16_x3(__p0) __extension__ ({ \
54303 uint16x4x3_t __ret; \
54304 __builtin_neon_vld1_x3_v(&__ret, __p0, 17); \
54305 \
54306 __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 3, 2, 1, 0); \
54307 __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 3, 2, 1, 0); \
54308 __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 3, 2, 1, 0); \
54309 __ret; \
54310 })
54311 #endif
54312
54313 #ifdef __LITTLE_ENDIAN__
54314 #define vld1_s8_x3(__p0) __extension__ ({ \
54315 int8x8x3_t __ret; \
54316 __builtin_neon_vld1_x3_v(&__ret, __p0, 0); \
54317 __ret; \
54318 })
54319 #else
54320 #define vld1_s8_x3(__p0) __extension__ ({ \
54321 int8x8x3_t __ret; \
54322 __builtin_neon_vld1_x3_v(&__ret, __p0, 0); \
54323 \
54324 __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \
54325 __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \
54326 __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 7, 6, 5, 4, 3, 2, 1, 0); \
54327 __ret; \
54328 })
54329 #endif
54330
54331 #ifdef __LITTLE_ENDIAN__
54332 #define vld1_f64_x3(__p0) __extension__ ({ \
54333 float64x1x3_t __ret; \
54334 __builtin_neon_vld1_x3_v(&__ret, __p0, 10); \
54335 __ret; \
54336 })
54337 #else
54338 #define vld1_f64_x3(__p0) __extension__ ({ \
54339 float64x1x3_t __ret; \
54340 __builtin_neon_vld1_x3_v(&__ret, __p0, 10); \
54341 __ret; \
54342 })
54343 #endif
54344
54345 #ifdef __LITTLE_ENDIAN__
54346 #define vld1_f32_x3(__p0) __extension__ ({ \
54347 float32x2x3_t __ret; \
54348 __builtin_neon_vld1_x3_v(&__ret, __p0, 9); \
54349 __ret; \
54350 })
54351 #else
54352 #define vld1_f32_x3(__p0) __extension__ ({ \
54353 float32x2x3_t __ret; \
54354 __builtin_neon_vld1_x3_v(&__ret, __p0, 9); \
54355 \
54356 __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 1, 0); \
54357 __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 1, 0); \
54358 __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 1, 0); \
54359 __ret; \
54360 })
54361 #endif
54362
54363 #ifdef __LITTLE_ENDIAN__
54364 #define vld1_f16_x3(__p0) __extension__ ({ \
54365 float16x4x3_t __ret; \
54366 __builtin_neon_vld1_x3_v(&__ret, __p0, 8); \
54367 __ret; \
54368 })
54369 #else
54370 #define vld1_f16_x3(__p0) __extension__ ({ \
54371 float16x4x3_t __ret; \
54372 __builtin_neon_vld1_x3_v(&__ret, __p0, 8); \
54373 \
54374 __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 3, 2, 1, 0); \
54375 __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 3, 2, 1, 0); \
54376 __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 3, 2, 1, 0); \
54377 __ret; \
54378 })
54379 #endif
54380
54381 #ifdef __LITTLE_ENDIAN__
54382 #define vld1_s32_x3(__p0) __extension__ ({ \
54383 int32x2x3_t __ret; \
54384 __builtin_neon_vld1_x3_v(&__ret, __p0, 2); \
54385 __ret; \
54386 })
54387 #else
54388 #define vld1_s32_x3(__p0) __extension__ ({ \
54389 int32x2x3_t __ret; \
54390 __builtin_neon_vld1_x3_v(&__ret, __p0, 2); \
54391 \
54392 __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 1, 0); \
54393 __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 1, 0); \
54394 __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 1, 0); \
54395 __ret; \
54396 })
54397 #endif
54398
54399 #ifdef __LITTLE_ENDIAN__
54400 #define vld1_s64_x3(__p0) __extension__ ({ \
54401 int64x1x3_t __ret; \
54402 __builtin_neon_vld1_x3_v(&__ret, __p0, 3); \
54403 __ret; \
54404 })
54405 #else
54406 #define vld1_s64_x3(__p0) __extension__ ({ \
54407 int64x1x3_t __ret; \
54408 __builtin_neon_vld1_x3_v(&__ret, __p0, 3); \
54409 __ret; \
54410 })
54411 #endif
54412
54413 #ifdef __LITTLE_ENDIAN__
54414 #define vld1_s16_x3(__p0) __extension__ ({ \
54415 int16x4x3_t __ret; \
54416 __builtin_neon_vld1_x3_v(&__ret, __p0, 1); \
54417 __ret; \
54418 })
54419 #else
54420 #define vld1_s16_x3(__p0) __extension__ ({ \
54421 int16x4x3_t __ret; \
54422 __builtin_neon_vld1_x3_v(&__ret, __p0, 1); \
54423 \
54424 __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 3, 2, 1, 0); \
54425 __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 3, 2, 1, 0); \
54426 __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 3, 2, 1, 0); \
54427 __ret; \
54428 })
54429 #endif
54430
54431 #ifdef __LITTLE_ENDIAN__
54432 #define vld1_p8_x4(__p0) __extension__ ({ \
54433 poly8x8x4_t __ret; \
54434 __builtin_neon_vld1_x4_v(&__ret, __p0, 4); \
54435 __ret; \
54436 })
54437 #else
54438 #define vld1_p8_x4(__p0) __extension__ ({ \
54439 poly8x8x4_t __ret; \
54440 __builtin_neon_vld1_x4_v(&__ret, __p0, 4); \
54441 \
54442 __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \
54443 __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \
54444 __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 7, 6, 5, 4, 3, 2, 1, 0); \
54445 __ret.val[3] = __builtin_shufflevector(__ret.val[3], __ret.val[3], 7, 6, 5, 4, 3, 2, 1, 0); \
54446 __ret; \
54447 })
54448 #endif
54449
54450 #ifdef __LITTLE_ENDIAN__
54451 #define vld1_p64_x4(__p0) __extension__ ({ \
54452 poly64x1x4_t __ret; \
54453 __builtin_neon_vld1_x4_v(&__ret, __p0, 6); \
54454 __ret; \
54455 })
54456 #else
54457 #define vld1_p64_x4(__p0) __extension__ ({ \
54458 poly64x1x4_t __ret; \
54459 __builtin_neon_vld1_x4_v(&__ret, __p0, 6); \
54460 __ret; \
54461 })
54462 #endif
54463
54464 #ifdef __LITTLE_ENDIAN__
54465 #define vld1_p16_x4(__p0) __extension__ ({ \
54466 poly16x4x4_t __ret; \
54467 __builtin_neon_vld1_x4_v(&__ret, __p0, 5); \
54468 __ret; \
54469 })
54470 #else
54471 #define vld1_p16_x4(__p0) __extension__ ({ \
54472 poly16x4x4_t __ret; \
54473 __builtin_neon_vld1_x4_v(&__ret, __p0, 5); \
54474 \
54475 __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 3, 2, 1, 0); \
54476 __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 3, 2, 1, 0); \
54477 __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 3, 2, 1, 0); \
54478 __ret.val[3] = __builtin_shufflevector(__ret.val[3], __ret.val[3], 3, 2, 1, 0); \
54479 __ret; \
54480 })
54481 #endif
54482
54483 #ifdef __LITTLE_ENDIAN__
54484 #define vld1q_p8_x4(__p0) __extension__ ({ \
54485 poly8x16x4_t __ret; \
54486 __builtin_neon_vld1q_x4_v(&__ret, __p0, 36); \
54487 __ret; \
54488 })
54489 #else
54490 #define vld1q_p8_x4(__p0) __extension__ ({ \
54491 poly8x16x4_t __ret; \
54492 __builtin_neon_vld1q_x4_v(&__ret, __p0, 36); \
54493 \
54494 __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
54495 __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
54496 __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
54497 __ret.val[3] = __builtin_shufflevector(__ret.val[3], __ret.val[3], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
54498 __ret; \
54499 })
54500 #endif
54501
54502 #ifdef __LITTLE_ENDIAN__
54503 #define vld1q_p64_x4(__p0) __extension__ ({ \
54504 poly64x2x4_t __ret; \
54505 __builtin_neon_vld1q_x4_v(&__ret, __p0, 38); \
54506 __ret; \
54507 })
54508 #else
54509 #define vld1q_p64_x4(__p0) __extension__ ({ \
54510 poly64x2x4_t __ret; \
54511 __builtin_neon_vld1q_x4_v(&__ret, __p0, 38); \
54512 \
54513 __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 1, 0); \
54514 __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 1, 0); \
54515 __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 1, 0); \
54516 __ret.val[3] = __builtin_shufflevector(__ret.val[3], __ret.val[3], 1, 0); \
54517 __ret; \
54518 })
54519 #endif
54520
54521 #ifdef __LITTLE_ENDIAN__
54522 #define vld1q_p16_x4(__p0) __extension__ ({ \
54523 poly16x8x4_t __ret; \
54524 __builtin_neon_vld1q_x4_v(&__ret, __p0, 37); \
54525 __ret; \
54526 })
54527 #else
54528 #define vld1q_p16_x4(__p0) __extension__ ({ \
54529 poly16x8x4_t __ret; \
54530 __builtin_neon_vld1q_x4_v(&__ret, __p0, 37); \
54531 \
54532 __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \
54533 __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \
54534 __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 7, 6, 5, 4, 3, 2, 1, 0); \
54535 __ret.val[3] = __builtin_shufflevector(__ret.val[3], __ret.val[3], 7, 6, 5, 4, 3, 2, 1, 0); \
54536 __ret; \
54537 })
54538 #endif
54539
54540 #ifdef __LITTLE_ENDIAN__
54541 #define vld1q_u8_x4(__p0) __extension__ ({ \
54542 uint8x16x4_t __ret; \
54543 __builtin_neon_vld1q_x4_v(&__ret, __p0, 48); \
54544 __ret; \
54545 })
54546 #else
54547 #define vld1q_u8_x4(__p0) __extension__ ({ \
54548 uint8x16x4_t __ret; \
54549 __builtin_neon_vld1q_x4_v(&__ret, __p0, 48); \
54550 \
54551 __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
54552 __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
54553 __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
54554 __ret.val[3] = __builtin_shufflevector(__ret.val[3], __ret.val[3], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
54555 __ret; \
54556 })
54557 #endif
54558
54559 #ifdef __LITTLE_ENDIAN__
54560 #define vld1q_u32_x4(__p0) __extension__ ({ \
54561 uint32x4x4_t __ret; \
54562 __builtin_neon_vld1q_x4_v(&__ret, __p0, 50); \
54563 __ret; \
54564 })
54565 #else
54566 #define vld1q_u32_x4(__p0) __extension__ ({ \
54567 uint32x4x4_t __ret; \
54568 __builtin_neon_vld1q_x4_v(&__ret, __p0, 50); \
54569 \
54570 __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 3, 2, 1, 0); \
54571 __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 3, 2, 1, 0); \
54572 __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 3, 2, 1, 0); \
54573 __ret.val[3] = __builtin_shufflevector(__ret.val[3], __ret.val[3], 3, 2, 1, 0); \
54574 __ret; \
54575 })
54576 #endif
54577
54578 #ifdef __LITTLE_ENDIAN__
54579 #define vld1q_u64_x4(__p0) __extension__ ({ \
54580 uint64x2x4_t __ret; \
54581 __builtin_neon_vld1q_x4_v(&__ret, __p0, 51); \
54582 __ret; \
54583 })
54584 #else
54585 #define vld1q_u64_x4(__p0) __extension__ ({ \
54586 uint64x2x4_t __ret; \
54587 __builtin_neon_vld1q_x4_v(&__ret, __p0, 51); \
54588 \
54589 __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 1, 0); \
54590 __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 1, 0); \
54591 __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 1, 0); \
54592 __ret.val[3] = __builtin_shufflevector(__ret.val[3], __ret.val[3], 1, 0); \
54593 __ret; \
54594 })
54595 #endif
54596
54597 #ifdef __LITTLE_ENDIAN__
54598 #define vld1q_u16_x4(__p0) __extension__ ({ \
54599 uint16x8x4_t __ret; \
54600 __builtin_neon_vld1q_x4_v(&__ret, __p0, 49); \
54601 __ret; \
54602 })
54603 #else
54604 #define vld1q_u16_x4(__p0) __extension__ ({ \
54605 uint16x8x4_t __ret; \
54606 __builtin_neon_vld1q_x4_v(&__ret, __p0, 49); \
54607 \
54608 __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \
54609 __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \
54610 __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 7, 6, 5, 4, 3, 2, 1, 0); \
54611 __ret.val[3] = __builtin_shufflevector(__ret.val[3], __ret.val[3], 7, 6, 5, 4, 3, 2, 1, 0); \
54612 __ret; \
54613 })
54614 #endif
54615
54616 #ifdef __LITTLE_ENDIAN__
54617 #define vld1q_s8_x4(__p0) __extension__ ({ \
54618 int8x16x4_t __ret; \
54619 __builtin_neon_vld1q_x4_v(&__ret, __p0, 32); \
54620 __ret; \
54621 })
54622 #else
54623 #define vld1q_s8_x4(__p0) __extension__ ({ \
54624 int8x16x4_t __ret; \
54625 __builtin_neon_vld1q_x4_v(&__ret, __p0, 32); \
54626 \
54627 __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
54628 __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
54629 __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
54630 __ret.val[3] = __builtin_shufflevector(__ret.val[3], __ret.val[3], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
54631 __ret; \
54632 })
54633 #endif
54634
54635 #ifdef __LITTLE_ENDIAN__
54636 #define vld1q_f64_x4(__p0) __extension__ ({ \
54637 float64x2x4_t __ret; \
54638 __builtin_neon_vld1q_x4_v(&__ret, __p0, 42); \
54639 __ret; \
54640 })
54641 #else
54642 #define vld1q_f64_x4(__p0) __extension__ ({ \
54643 float64x2x4_t __ret; \
54644 __builtin_neon_vld1q_x4_v(&__ret, __p0, 42); \
54645 \
54646 __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 1, 0); \
54647 __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 1, 0); \
54648 __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 1, 0); \
54649 __ret.val[3] = __builtin_shufflevector(__ret.val[3], __ret.val[3], 1, 0); \
54650 __ret; \
54651 })
54652 #endif
54653
54654 #ifdef __LITTLE_ENDIAN__
54655 #define vld1q_f32_x4(__p0) __extension__ ({ \
54656 float32x4x4_t __ret; \
54657 __builtin_neon_vld1q_x4_v(&__ret, __p0, 41); \
54658 __ret; \
54659 })
54660 #else
54661 #define vld1q_f32_x4(__p0) __extension__ ({ \
54662 float32x4x4_t __ret; \
54663 __builtin_neon_vld1q_x4_v(&__ret, __p0, 41); \
54664 \
54665 __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 3, 2, 1, 0); \
54666 __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 3, 2, 1, 0); \
54667 __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 3, 2, 1, 0); \
54668 __ret.val[3] = __builtin_shufflevector(__ret.val[3], __ret.val[3], 3, 2, 1, 0); \
54669 __ret; \
54670 })
54671 #endif
54672
54673 #ifdef __LITTLE_ENDIAN__
54674 #define vld1q_f16_x4(__p0) __extension__ ({ \
54675 float16x8x4_t __ret; \
54676 __builtin_neon_vld1q_x4_v(&__ret, __p0, 40); \
54677 __ret; \
54678 })
54679 #else
54680 #define vld1q_f16_x4(__p0) __extension__ ({ \
54681 float16x8x4_t __ret; \
54682 __builtin_neon_vld1q_x4_v(&__ret, __p0, 40); \
54683 \
54684 __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \
54685 __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \
54686 __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 7, 6, 5, 4, 3, 2, 1, 0); \
54687 __ret.val[3] = __builtin_shufflevector(__ret.val[3], __ret.val[3], 7, 6, 5, 4, 3, 2, 1, 0); \
54688 __ret; \
54689 })
54690 #endif
54691
54692 #ifdef __LITTLE_ENDIAN__
54693 #define vld1q_s32_x4(__p0) __extension__ ({ \
54694 int32x4x4_t __ret; \
54695 __builtin_neon_vld1q_x4_v(&__ret, __p0, 34); \
54696 __ret; \
54697 })
54698 #else
54699 #define vld1q_s32_x4(__p0) __extension__ ({ \
54700 int32x4x4_t __ret; \
54701 __builtin_neon_vld1q_x4_v(&__ret, __p0, 34); \
54702 \
54703 __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 3, 2, 1, 0); \
54704 __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 3, 2, 1, 0); \
54705 __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 3, 2, 1, 0); \
54706 __ret.val[3] = __builtin_shufflevector(__ret.val[3], __ret.val[3], 3, 2, 1, 0); \
54707 __ret; \
54708 })
54709 #endif
54710
54711 #ifdef __LITTLE_ENDIAN__
54712 #define vld1q_s64_x4(__p0) __extension__ ({ \
54713 int64x2x4_t __ret; \
54714 __builtin_neon_vld1q_x4_v(&__ret, __p0, 35); \
54715 __ret; \
54716 })
54717 #else
54718 #define vld1q_s64_x4(__p0) __extension__ ({ \
54719 int64x2x4_t __ret; \
54720 __builtin_neon_vld1q_x4_v(&__ret, __p0, 35); \
54721 \
54722 __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 1, 0); \
54723 __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 1, 0); \
54724 __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 1, 0); \
54725 __ret.val[3] = __builtin_shufflevector(__ret.val[3], __ret.val[3], 1, 0); \
54726 __ret; \
54727 })
54728 #endif
54729
54730 #ifdef __LITTLE_ENDIAN__
54731 #define vld1q_s16_x4(__p0) __extension__ ({ \
54732 int16x8x4_t __ret; \
54733 __builtin_neon_vld1q_x4_v(&__ret, __p0, 33); \
54734 __ret; \
54735 })
54736 #else
54737 #define vld1q_s16_x4(__p0) __extension__ ({ \
54738 int16x8x4_t __ret; \
54739 __builtin_neon_vld1q_x4_v(&__ret, __p0, 33); \
54740 \
54741 __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \
54742 __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \
54743 __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 7, 6, 5, 4, 3, 2, 1, 0); \
54744 __ret.val[3] = __builtin_shufflevector(__ret.val[3], __ret.val[3], 7, 6, 5, 4, 3, 2, 1, 0); \
54745 __ret; \
54746 })
54747 #endif
54748
54749 #ifdef __LITTLE_ENDIAN__
54750 #define vld1_u8_x4(__p0) __extension__ ({ \
54751 uint8x8x4_t __ret; \
54752 __builtin_neon_vld1_x4_v(&__ret, __p0, 16); \
54753 __ret; \
54754 })
54755 #else
54756 #define vld1_u8_x4(__p0) __extension__ ({ \
54757 uint8x8x4_t __ret; \
54758 __builtin_neon_vld1_x4_v(&__ret, __p0, 16); \
54759 \
54760 __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \
54761 __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \
54762 __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 7, 6, 5, 4, 3, 2, 1, 0); \
54763 __ret.val[3] = __builtin_shufflevector(__ret.val[3], __ret.val[3], 7, 6, 5, 4, 3, 2, 1, 0); \
54764 __ret; \
54765 })
54766 #endif
54767
54768 #ifdef __LITTLE_ENDIAN__
54769 #define vld1_u32_x4(__p0) __extension__ ({ \
54770 uint32x2x4_t __ret; \
54771 __builtin_neon_vld1_x4_v(&__ret, __p0, 18); \
54772 __ret; \
54773 })
54774 #else
54775 #define vld1_u32_x4(__p0) __extension__ ({ \
54776 uint32x2x4_t __ret; \
54777 __builtin_neon_vld1_x4_v(&__ret, __p0, 18); \
54778 \
54779 __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 1, 0); \
54780 __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 1, 0); \
54781 __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 1, 0); \
54782 __ret.val[3] = __builtin_shufflevector(__ret.val[3], __ret.val[3], 1, 0); \
54783 __ret; \
54784 })
54785 #endif
54786
54787 #ifdef __LITTLE_ENDIAN__
54788 #define vld1_u64_x4(__p0) __extension__ ({ \
54789 uint64x1x4_t __ret; \
54790 __builtin_neon_vld1_x4_v(&__ret, __p0, 19); \
54791 __ret; \
54792 })
54793 #else
54794 #define vld1_u64_x4(__p0) __extension__ ({ \
54795 uint64x1x4_t __ret; \
54796 __builtin_neon_vld1_x4_v(&__ret, __p0, 19); \
54797 __ret; \
54798 })
54799 #endif
54800
54801 #ifdef __LITTLE_ENDIAN__
54802 #define vld1_u16_x4(__p0) __extension__ ({ \
54803 uint16x4x4_t __ret; \
54804 __builtin_neon_vld1_x4_v(&__ret, __p0, 17); \
54805 __ret; \
54806 })
54807 #else
54808 #define vld1_u16_x4(__p0) __extension__ ({ \
54809 uint16x4x4_t __ret; \
54810 __builtin_neon_vld1_x4_v(&__ret, __p0, 17); \
54811 \
54812 __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 3, 2, 1, 0); \
54813 __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 3, 2, 1, 0); \
54814 __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 3, 2, 1, 0); \
54815 __ret.val[3] = __builtin_shufflevector(__ret.val[3], __ret.val[3], 3, 2, 1, 0); \
54816 __ret; \
54817 })
54818 #endif
54819
54820 #ifdef __LITTLE_ENDIAN__
54821 #define vld1_s8_x4(__p0) __extension__ ({ \
54822 int8x8x4_t __ret; \
54823 __builtin_neon_vld1_x4_v(&__ret, __p0, 0); \
54824 __ret; \
54825 })
54826 #else
54827 #define vld1_s8_x4(__p0) __extension__ ({ \
54828 int8x8x4_t __ret; \
54829 __builtin_neon_vld1_x4_v(&__ret, __p0, 0); \
54830 \
54831 __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \
54832 __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \
54833 __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 7, 6, 5, 4, 3, 2, 1, 0); \
54834 __ret.val[3] = __builtin_shufflevector(__ret.val[3], __ret.val[3], 7, 6, 5, 4, 3, 2, 1, 0); \
54835 __ret; \
54836 })
54837 #endif
54838
54839 #ifdef __LITTLE_ENDIAN__
54840 #define vld1_f64_x4(__p0) __extension__ ({ \
54841 float64x1x4_t __ret; \
54842 __builtin_neon_vld1_x4_v(&__ret, __p0, 10); \
54843 __ret; \
54844 })
54845 #else
54846 #define vld1_f64_x4(__p0) __extension__ ({ \
54847 float64x1x4_t __ret; \
54848 __builtin_neon_vld1_x4_v(&__ret, __p0, 10); \
54849 __ret; \
54850 })
54851 #endif
54852
54853 #ifdef __LITTLE_ENDIAN__
54854 #define vld1_f32_x4(__p0) __extension__ ({ \
54855 float32x2x4_t __ret; \
54856 __builtin_neon_vld1_x4_v(&__ret, __p0, 9); \
54857 __ret; \
54858 })
54859 #else
54860 #define vld1_f32_x4(__p0) __extension__ ({ \
54861 float32x2x4_t __ret; \
54862 __builtin_neon_vld1_x4_v(&__ret, __p0, 9); \
54863 \
54864 __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 1, 0); \
54865 __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 1, 0); \
54866 __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 1, 0); \
54867 __ret.val[3] = __builtin_shufflevector(__ret.val[3], __ret.val[3], 1, 0); \
54868 __ret; \
54869 })
54870 #endif
54871
54872 #ifdef __LITTLE_ENDIAN__
54873 #define vld1_f16_x4(__p0) __extension__ ({ \
54874 float16x4x4_t __ret; \
54875 __builtin_neon_vld1_x4_v(&__ret, __p0, 8); \
54876 __ret; \
54877 })
54878 #else
54879 #define vld1_f16_x4(__p0) __extension__ ({ \
54880 float16x4x4_t __ret; \
54881 __builtin_neon_vld1_x4_v(&__ret, __p0, 8); \
54882 \
54883 __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 3, 2, 1, 0); \
54884 __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 3, 2, 1, 0); \
54885 __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 3, 2, 1, 0); \
54886 __ret.val[3] = __builtin_shufflevector(__ret.val[3], __ret.val[3], 3, 2, 1, 0); \
54887 __ret; \
54888 })
54889 #endif
54890
54891 #ifdef __LITTLE_ENDIAN__
54892 #define vld1_s32_x4(__p0) __extension__ ({ \
54893 int32x2x4_t __ret; \
54894 __builtin_neon_vld1_x4_v(&__ret, __p0, 2); \
54895 __ret; \
54896 })
54897 #else
54898 #define vld1_s32_x4(__p0) __extension__ ({ \
54899 int32x2x4_t __ret; \
54900 __builtin_neon_vld1_x4_v(&__ret, __p0, 2); \
54901 \
54902 __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 1, 0); \
54903 __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 1, 0); \
54904 __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 1, 0); \
54905 __ret.val[3] = __builtin_shufflevector(__ret.val[3], __ret.val[3], 1, 0); \
54906 __ret; \
54907 })
54908 #endif
54909
54910 #ifdef __LITTLE_ENDIAN__
54911 #define vld1_s64_x4(__p0) __extension__ ({ \
54912 int64x1x4_t __ret; \
54913 __builtin_neon_vld1_x4_v(&__ret, __p0, 3); \
54914 __ret; \
54915 })
54916 #else
54917 #define vld1_s64_x4(__p0) __extension__ ({ \
54918 int64x1x4_t __ret; \
54919 __builtin_neon_vld1_x4_v(&__ret, __p0, 3); \
54920 __ret; \
54921 })
54922 #endif
54923
54924 #ifdef __LITTLE_ENDIAN__
54925 #define vld1_s16_x4(__p0) __extension__ ({ \
54926 int16x4x4_t __ret; \
54927 __builtin_neon_vld1_x4_v(&__ret, __p0, 1); \
54928 __ret; \
54929 })
54930 #else
54931 #define vld1_s16_x4(__p0) __extension__ ({ \
54932 int16x4x4_t __ret; \
54933 __builtin_neon_vld1_x4_v(&__ret, __p0, 1); \
54934 \
54935 __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 3, 2, 1, 0); \
54936 __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 3, 2, 1, 0); \
54937 __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 3, 2, 1, 0); \
54938 __ret.val[3] = __builtin_shufflevector(__ret.val[3], __ret.val[3], 3, 2, 1, 0); \
54939 __ret; \
54940 })
54941 #endif
54942
54943 #ifdef __LITTLE_ENDIAN__
54944 #define vld2_p64(__p0) __extension__ ({ \
54945 poly64x1x2_t __ret; \
54946 __builtin_neon_vld2_v(&__ret, __p0, 6); \
54947 __ret; \
54948 })
54949 #else
54950 #define vld2_p64(__p0) __extension__ ({ \
54951 poly64x1x2_t __ret; \
54952 __builtin_neon_vld2_v(&__ret, __p0, 6); \
54953 __ret; \
54954 })
54955 #endif
54956
54957 #ifdef __LITTLE_ENDIAN__
54958 #define vld2q_p64(__p0) __extension__ ({ \
54959 poly64x2x2_t __ret; \
54960 __builtin_neon_vld2q_v(&__ret, __p0, 38); \
54961 __ret; \
54962 })
54963 #else
54964 #define vld2q_p64(__p0) __extension__ ({ \
54965 poly64x2x2_t __ret; \
54966 __builtin_neon_vld2q_v(&__ret, __p0, 38); \
54967 \
54968 __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 1, 0); \
54969 __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 1, 0); \
54970 __ret; \
54971 })
54972 #endif
54973
54974 #ifdef __LITTLE_ENDIAN__
54975 #define vld2q_u64(__p0) __extension__ ({ \
54976 uint64x2x2_t __ret; \
54977 __builtin_neon_vld2q_v(&__ret, __p0, 51); \
54978 __ret; \
54979 })
54980 #else
54981 #define vld2q_u64(__p0) __extension__ ({ \
54982 uint64x2x2_t __ret; \
54983 __builtin_neon_vld2q_v(&__ret, __p0, 51); \
54984 \
54985 __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 1, 0); \
54986 __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 1, 0); \
54987 __ret; \
54988 })
54989 #endif
54990
54991 #ifdef __LITTLE_ENDIAN__
54992 #define vld2q_f64(__p0) __extension__ ({ \
54993 float64x2x2_t __ret; \
54994 __builtin_neon_vld2q_v(&__ret, __p0, 42); \
54995 __ret; \
54996 })
54997 #else
54998 #define vld2q_f64(__p0) __extension__ ({ \
54999 float64x2x2_t __ret; \
55000 __builtin_neon_vld2q_v(&__ret, __p0, 42); \
55001 \
55002 __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 1, 0); \
55003 __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 1, 0); \
55004 __ret; \
55005 })
55006 #endif
55007
55008 #ifdef __LITTLE_ENDIAN__
55009 #define vld2q_s64(__p0) __extension__ ({ \
55010 int64x2x2_t __ret; \
55011 __builtin_neon_vld2q_v(&__ret, __p0, 35); \
55012 __ret; \
55013 })
55014 #else
55015 #define vld2q_s64(__p0) __extension__ ({ \
55016 int64x2x2_t __ret; \
55017 __builtin_neon_vld2q_v(&__ret, __p0, 35); \
55018 \
55019 __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 1, 0); \
55020 __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 1, 0); \
55021 __ret; \
55022 })
55023 #endif
55024
55025 #ifdef __LITTLE_ENDIAN__
55026 #define vld2_f64(__p0) __extension__ ({ \
55027 float64x1x2_t __ret; \
55028 __builtin_neon_vld2_v(&__ret, __p0, 10); \
55029 __ret; \
55030 })
55031 #else
55032 #define vld2_f64(__p0) __extension__ ({ \
55033 float64x1x2_t __ret; \
55034 __builtin_neon_vld2_v(&__ret, __p0, 10); \
55035 __ret; \
55036 })
55037 #endif
55038
55039 #ifdef __LITTLE_ENDIAN__
55040 #define vld2_dup_p64(__p0) __extension__ ({ \
55041 poly64x1x2_t __ret; \
55042 __builtin_neon_vld2_dup_v(&__ret, __p0, 6); \
55043 __ret; \
55044 })
55045 #else
55046 #define vld2_dup_p64(__p0) __extension__ ({ \
55047 poly64x1x2_t __ret; \
55048 __builtin_neon_vld2_dup_v(&__ret, __p0, 6); \
55049 __ret; \
55050 })
55051 #endif
55052
55053 #ifdef __LITTLE_ENDIAN__
55054 #define vld2q_dup_p8(__p0) __extension__ ({ \
55055 poly8x16x2_t __ret; \
55056 __builtin_neon_vld2q_dup_v(&__ret, __p0, 36); \
55057 __ret; \
55058 })
55059 #else
55060 #define vld2q_dup_p8(__p0) __extension__ ({ \
55061 poly8x16x2_t __ret; \
55062 __builtin_neon_vld2q_dup_v(&__ret, __p0, 36); \
55063 \
55064 __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
55065 __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
55066 __ret; \
55067 })
55068 #endif
55069
55070 #ifdef __LITTLE_ENDIAN__
55071 #define vld2q_dup_p64(__p0) __extension__ ({ \
55072 poly64x2x2_t __ret; \
55073 __builtin_neon_vld2q_dup_v(&__ret, __p0, 38); \
55074 __ret; \
55075 })
55076 #else
55077 #define vld2q_dup_p64(__p0) __extension__ ({ \
55078 poly64x2x2_t __ret; \
55079 __builtin_neon_vld2q_dup_v(&__ret, __p0, 38); \
55080 \
55081 __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 1, 0); \
55082 __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 1, 0); \
55083 __ret; \
55084 })
55085 #endif
55086
55087 #ifdef __LITTLE_ENDIAN__
55088 #define vld2q_dup_p16(__p0) __extension__ ({ \
55089 poly16x8x2_t __ret; \
55090 __builtin_neon_vld2q_dup_v(&__ret, __p0, 37); \
55091 __ret; \
55092 })
55093 #else
55094 #define vld2q_dup_p16(__p0) __extension__ ({ \
55095 poly16x8x2_t __ret; \
55096 __builtin_neon_vld2q_dup_v(&__ret, __p0, 37); \
55097 \
55098 __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \
55099 __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \
55100 __ret; \
55101 })
55102 #endif
55103
55104 #ifdef __LITTLE_ENDIAN__
55105 #define vld2q_dup_u8(__p0) __extension__ ({ \
55106 uint8x16x2_t __ret; \
55107 __builtin_neon_vld2q_dup_v(&__ret, __p0, 48); \
55108 __ret; \
55109 })
55110 #else
55111 #define vld2q_dup_u8(__p0) __extension__ ({ \
55112 uint8x16x2_t __ret; \
55113 __builtin_neon_vld2q_dup_v(&__ret, __p0, 48); \
55114 \
55115 __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
55116 __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
55117 __ret; \
55118 })
55119 #endif
55120
55121 #ifdef __LITTLE_ENDIAN__
55122 #define vld2q_dup_u32(__p0) __extension__ ({ \
55123 uint32x4x2_t __ret; \
55124 __builtin_neon_vld2q_dup_v(&__ret, __p0, 50); \
55125 __ret; \
55126 })
55127 #else
55128 #define vld2q_dup_u32(__p0) __extension__ ({ \
55129 uint32x4x2_t __ret; \
55130 __builtin_neon_vld2q_dup_v(&__ret, __p0, 50); \
55131 \
55132 __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 3, 2, 1, 0); \
55133 __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 3, 2, 1, 0); \
55134 __ret; \
55135 })
55136 #endif
55137
55138 #ifdef __LITTLE_ENDIAN__
55139 #define vld2q_dup_u64(__p0) __extension__ ({ \
55140 uint64x2x2_t __ret; \
55141 __builtin_neon_vld2q_dup_v(&__ret, __p0, 51); \
55142 __ret; \
55143 })
55144 #else
55145 #define vld2q_dup_u64(__p0) __extension__ ({ \
55146 uint64x2x2_t __ret; \
55147 __builtin_neon_vld2q_dup_v(&__ret, __p0, 51); \
55148 \
55149 __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 1, 0); \
55150 __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 1, 0); \
55151 __ret; \
55152 })
55153 #endif
55154
55155 #ifdef __LITTLE_ENDIAN__
55156 #define vld2q_dup_u16(__p0) __extension__ ({ \
55157 uint16x8x2_t __ret; \
55158 __builtin_neon_vld2q_dup_v(&__ret, __p0, 49); \
55159 __ret; \
55160 })
55161 #else
55162 #define vld2q_dup_u16(__p0) __extension__ ({ \
55163 uint16x8x2_t __ret; \
55164 __builtin_neon_vld2q_dup_v(&__ret, __p0, 49); \
55165 \
55166 __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \
55167 __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \
55168 __ret; \
55169 })
55170 #endif
55171
55172 #ifdef __LITTLE_ENDIAN__
55173 #define vld2q_dup_s8(__p0) __extension__ ({ \
55174 int8x16x2_t __ret; \
55175 __builtin_neon_vld2q_dup_v(&__ret, __p0, 32); \
55176 __ret; \
55177 })
55178 #else
55179 #define vld2q_dup_s8(__p0) __extension__ ({ \
55180 int8x16x2_t __ret; \
55181 __builtin_neon_vld2q_dup_v(&__ret, __p0, 32); \
55182 \
55183 __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
55184 __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
55185 __ret; \
55186 })
55187 #endif
55188
55189 #ifdef __LITTLE_ENDIAN__
55190 #define vld2q_dup_f64(__p0) __extension__ ({ \
55191 float64x2x2_t __ret; \
55192 __builtin_neon_vld2q_dup_v(&__ret, __p0, 42); \
55193 __ret; \
55194 })
55195 #else
55196 #define vld2q_dup_f64(__p0) __extension__ ({ \
55197 float64x2x2_t __ret; \
55198 __builtin_neon_vld2q_dup_v(&__ret, __p0, 42); \
55199 \
55200 __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 1, 0); \
55201 __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 1, 0); \
55202 __ret; \
55203 })
55204 #endif
55205
55206 #ifdef __LITTLE_ENDIAN__
55207 #define vld2q_dup_f32(__p0) __extension__ ({ \
55208 float32x4x2_t __ret; \
55209 __builtin_neon_vld2q_dup_v(&__ret, __p0, 41); \
55210 __ret; \
55211 })
55212 #else
55213 #define vld2q_dup_f32(__p0) __extension__ ({ \
55214 float32x4x2_t __ret; \
55215 __builtin_neon_vld2q_dup_v(&__ret, __p0, 41); \
55216 \
55217 __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 3, 2, 1, 0); \
55218 __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 3, 2, 1, 0); \
55219 __ret; \
55220 })
55221 #endif
55222
55223 #ifdef __LITTLE_ENDIAN__
55224 #define vld2q_dup_f16(__p0) __extension__ ({ \
55225 float16x8x2_t __ret; \
55226 __builtin_neon_vld2q_dup_v(&__ret, __p0, 40); \
55227 __ret; \
55228 })
55229 #else
55230 #define vld2q_dup_f16(__p0) __extension__ ({ \
55231 float16x8x2_t __ret; \
55232 __builtin_neon_vld2q_dup_v(&__ret, __p0, 40); \
55233 \
55234 __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \
55235 __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \
55236 __ret; \
55237 })
55238 #endif
55239
55240 #ifdef __LITTLE_ENDIAN__
55241 #define vld2q_dup_s32(__p0) __extension__ ({ \
55242 int32x4x2_t __ret; \
55243 __builtin_neon_vld2q_dup_v(&__ret, __p0, 34); \
55244 __ret; \
55245 })
55246 #else
55247 #define vld2q_dup_s32(__p0) __extension__ ({ \
55248 int32x4x2_t __ret; \
55249 __builtin_neon_vld2q_dup_v(&__ret, __p0, 34); \
55250 \
55251 __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 3, 2, 1, 0); \
55252 __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 3, 2, 1, 0); \
55253 __ret; \
55254 })
55255 #endif
55256
55257 #ifdef __LITTLE_ENDIAN__
55258 #define vld2q_dup_s64(__p0) __extension__ ({ \
55259 int64x2x2_t __ret; \
55260 __builtin_neon_vld2q_dup_v(&__ret, __p0, 35); \
55261 __ret; \
55262 })
55263 #else
55264 #define vld2q_dup_s64(__p0) __extension__ ({ \
55265 int64x2x2_t __ret; \
55266 __builtin_neon_vld2q_dup_v(&__ret, __p0, 35); \
55267 \
55268 __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 1, 0); \
55269 __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 1, 0); \
55270 __ret; \
55271 })
55272 #endif
55273
55274 #ifdef __LITTLE_ENDIAN__
55275 #define vld2q_dup_s16(__p0) __extension__ ({ \
55276 int16x8x2_t __ret; \
55277 __builtin_neon_vld2q_dup_v(&__ret, __p0, 33); \
55278 __ret; \
55279 })
55280 #else
55281 #define vld2q_dup_s16(__p0) __extension__ ({ \
55282 int16x8x2_t __ret; \
55283 __builtin_neon_vld2q_dup_v(&__ret, __p0, 33); \
55284 \
55285 __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \
55286 __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \
55287 __ret; \
55288 })
55289 #endif
55290
55291 #ifdef __LITTLE_ENDIAN__
55292 #define vld2_dup_f64(__p0) __extension__ ({ \
55293 float64x1x2_t __ret; \
55294 __builtin_neon_vld2_dup_v(&__ret, __p0, 10); \
55295 __ret; \
55296 })
55297 #else
55298 #define vld2_dup_f64(__p0) __extension__ ({ \
55299 float64x1x2_t __ret; \
55300 __builtin_neon_vld2_dup_v(&__ret, __p0, 10); \
55301 __ret; \
55302 })
55303 #endif
55304
55305 #ifdef __LITTLE_ENDIAN__
55306 #define vld2_lane_p64(__p0, __p1, __p2) __extension__ ({ \
55307 poly64x1x2_t __s1 = __p1; \
55308 poly64x1x2_t __ret; \
55309 __builtin_neon_vld2_lane_v(&__ret, __p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], __p2, 6); \
55310 __ret; \
55311 })
55312 #else
55313 #define vld2_lane_p64(__p0, __p1, __p2) __extension__ ({ \
55314 poly64x1x2_t __s1 = __p1; \
55315 poly64x1x2_t __ret; \
55316 __builtin_neon_vld2_lane_v(&__ret, __p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], __p2, 6); \
55317 __ret; \
55318 })
55319 #endif
55320
55321 #ifdef __LITTLE_ENDIAN__
55322 #define vld2q_lane_p8(__p0, __p1, __p2) __extension__ ({ \
55323 poly8x16x2_t __s1 = __p1; \
55324 poly8x16x2_t __ret; \
55325 __builtin_neon_vld2q_lane_v(&__ret, __p0, (int8x16_t)__s1.val[0], (int8x16_t)__s1.val[1], __p2, 36); \
55326 __ret; \
55327 })
55328 #else
55329 #define vld2q_lane_p8(__p0, __p1, __p2) __extension__ ({ \
55330 poly8x16x2_t __s1 = __p1; \
55331 poly8x16x2_t __rev1; \
55332 __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
55333 __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
55334 poly8x16x2_t __ret; \
55335 __builtin_neon_vld2q_lane_v(&__ret, __p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], __p2, 36); \
55336 \
55337 __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
55338 __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
55339 __ret; \
55340 })
55341 #endif
55342
55343 #ifdef __LITTLE_ENDIAN__
55344 #define vld2q_lane_p64(__p0, __p1, __p2) __extension__ ({ \
55345 poly64x2x2_t __s1 = __p1; \
55346 poly64x2x2_t __ret; \
55347 __builtin_neon_vld2q_lane_v(&__ret, __p0, (int8x16_t)__s1.val[0], (int8x16_t)__s1.val[1], __p2, 38); \
55348 __ret; \
55349 })
55350 #else
55351 #define vld2q_lane_p64(__p0, __p1, __p2) __extension__ ({ \
55352 poly64x2x2_t __s1 = __p1; \
55353 poly64x2x2_t __rev1; \
55354 __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 1, 0); \
55355 __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 1, 0); \
55356 poly64x2x2_t __ret; \
55357 __builtin_neon_vld2q_lane_v(&__ret, __p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], __p2, 38); \
55358 \
55359 __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 1, 0); \
55360 __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 1, 0); \
55361 __ret; \
55362 })
55363 #endif
55364
55365 #ifdef __LITTLE_ENDIAN__
55366 #define vld2q_lane_u8(__p0, __p1, __p2) __extension__ ({ \
55367 uint8x16x2_t __s1 = __p1; \
55368 uint8x16x2_t __ret; \
55369 __builtin_neon_vld2q_lane_v(&__ret, __p0, (int8x16_t)__s1.val[0], (int8x16_t)__s1.val[1], __p2, 48); \
55370 __ret; \
55371 })
55372 #else
55373 #define vld2q_lane_u8(__p0, __p1, __p2) __extension__ ({ \
55374 uint8x16x2_t __s1 = __p1; \
55375 uint8x16x2_t __rev1; \
55376 __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
55377 __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
55378 uint8x16x2_t __ret; \
55379 __builtin_neon_vld2q_lane_v(&__ret, __p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], __p2, 48); \
55380 \
55381 __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
55382 __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
55383 __ret; \
55384 })
55385 #endif
55386
55387 #ifdef __LITTLE_ENDIAN__
55388 #define vld2q_lane_u64(__p0, __p1, __p2) __extension__ ({ \
55389 uint64x2x2_t __s1 = __p1; \
55390 uint64x2x2_t __ret; \
55391 __builtin_neon_vld2q_lane_v(&__ret, __p0, (int8x16_t)__s1.val[0], (int8x16_t)__s1.val[1], __p2, 51); \
55392 __ret; \
55393 })
55394 #else
55395 #define vld2q_lane_u64(__p0, __p1, __p2) __extension__ ({ \
55396 uint64x2x2_t __s1 = __p1; \
55397 uint64x2x2_t __rev1; \
55398 __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 1, 0); \
55399 __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 1, 0); \
55400 uint64x2x2_t __ret; \
55401 __builtin_neon_vld2q_lane_v(&__ret, __p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], __p2, 51); \
55402 \
55403 __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 1, 0); \
55404 __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 1, 0); \
55405 __ret; \
55406 })
55407 #endif
55408
55409 #ifdef __LITTLE_ENDIAN__
55410 #define vld2q_lane_s8(__p0, __p1, __p2) __extension__ ({ \
55411 int8x16x2_t __s1 = __p1; \
55412 int8x16x2_t __ret; \
55413 __builtin_neon_vld2q_lane_v(&__ret, __p0, (int8x16_t)__s1.val[0], (int8x16_t)__s1.val[1], __p2, 32); \
55414 __ret; \
55415 })
55416 #else
55417 #define vld2q_lane_s8(__p0, __p1, __p2) __extension__ ({ \
55418 int8x16x2_t __s1 = __p1; \
55419 int8x16x2_t __rev1; \
55420 __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
55421 __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
55422 int8x16x2_t __ret; \
55423 __builtin_neon_vld2q_lane_v(&__ret, __p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], __p2, 32); \
55424 \
55425 __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
55426 __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
55427 __ret; \
55428 })
55429 #endif
55430
55431 #ifdef __LITTLE_ENDIAN__
55432 #define vld2q_lane_f64(__p0, __p1, __p2) __extension__ ({ \
55433 float64x2x2_t __s1 = __p1; \
55434 float64x2x2_t __ret; \
55435 __builtin_neon_vld2q_lane_v(&__ret, __p0, __s1.val[0], __s1.val[1], __p2, 42); \
55436 __ret; \
55437 })
55438 #else
55439 #define vld2q_lane_f64(__p0, __p1, __p2) __extension__ ({ \
55440 float64x2x2_t __s1 = __p1; \
55441 float64x2x2_t __rev1; \
55442 __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 1, 0); \
55443 __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 1, 0); \
55444 float64x2x2_t __ret; \
55445 __builtin_neon_vld2q_lane_v(&__ret, __p0, __rev1.val[0], __rev1.val[1], __p2, 42); \
55446 \
55447 __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 1, 0); \
55448 __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 1, 0); \
55449 __ret; \
55450 })
55451 #endif
55452
55453 #ifdef __LITTLE_ENDIAN__
55454 #define vld2q_lane_s64(__p0, __p1, __p2) __extension__ ({ \
55455 int64x2x2_t __s1 = __p1; \
55456 int64x2x2_t __ret; \
55457 __builtin_neon_vld2q_lane_v(&__ret, __p0, __s1.val[0], __s1.val[1], __p2, 35); \
55458 __ret; \
55459 })
55460 #else
55461 #define vld2q_lane_s64(__p0, __p1, __p2) __extension__ ({ \
55462 int64x2x2_t __s1 = __p1; \
55463 int64x2x2_t __rev1; \
55464 __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 1, 0); \
55465 __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 1, 0); \
55466 int64x2x2_t __ret; \
55467 __builtin_neon_vld2q_lane_v(&__ret, __p0, __rev1.val[0], __rev1.val[1], __p2, 35); \
55468 \
55469 __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 1, 0); \
55470 __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 1, 0); \
55471 __ret; \
55472 })
55473 #endif
55474
55475 #ifdef __LITTLE_ENDIAN__
55476 #define vld2_lane_u64(__p0, __p1, __p2) __extension__ ({ \
55477 uint64x1x2_t __s1 = __p1; \
55478 uint64x1x2_t __ret; \
55479 __builtin_neon_vld2_lane_v(&__ret, __p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], __p2, 19); \
55480 __ret; \
55481 })
55482 #else
55483 #define vld2_lane_u64(__p0, __p1, __p2) __extension__ ({ \
55484 uint64x1x2_t __s1 = __p1; \
55485 uint64x1x2_t __ret; \
55486 __builtin_neon_vld2_lane_v(&__ret, __p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], __p2, 19); \
55487 __ret; \
55488 })
55489 #endif
55490
55491 #ifdef __LITTLE_ENDIAN__
55492 #define vld2_lane_f64(__p0, __p1, __p2) __extension__ ({ \
55493 float64x1x2_t __s1 = __p1; \
55494 float64x1x2_t __ret; \
55495 __builtin_neon_vld2_lane_v(&__ret, __p0, __s1.val[0], __s1.val[1], __p2, 10); \
55496 __ret; \
55497 })
55498 #else
55499 #define vld2_lane_f64(__p0, __p1, __p2) __extension__ ({ \
55500 float64x1x2_t __s1 = __p1; \
55501 float64x1x2_t __ret; \
55502 __builtin_neon_vld2_lane_v(&__ret, __p0, __s1.val[0], __s1.val[1], __p2, 10); \
55503 __ret; \
55504 })
55505 #endif
55506
55507 #ifdef __LITTLE_ENDIAN__
55508 #define vld2_lane_s64(__p0, __p1, __p2) __extension__ ({ \
55509 int64x1x2_t __s1 = __p1; \
55510 int64x1x2_t __ret; \
55511 __builtin_neon_vld2_lane_v(&__ret, __p0, __s1.val[0], __s1.val[1], __p2, 3); \
55512 __ret; \
55513 })
55514 #else
55515 #define vld2_lane_s64(__p0, __p1, __p2) __extension__ ({ \
55516 int64x1x2_t __s1 = __p1; \
55517 int64x1x2_t __ret; \
55518 __builtin_neon_vld2_lane_v(&__ret, __p0, __s1.val[0], __s1.val[1], __p2, 3); \
55519 __ret; \
55520 })
55521 #endif
55522
55523 #ifdef __LITTLE_ENDIAN__
55524 #define vld3_p64(__p0) __extension__ ({ \
55525 poly64x1x3_t __ret; \
55526 __builtin_neon_vld3_v(&__ret, __p0, 6); \
55527 __ret; \
55528 })
55529 #else
55530 #define vld3_p64(__p0) __extension__ ({ \
55531 poly64x1x3_t __ret; \
55532 __builtin_neon_vld3_v(&__ret, __p0, 6); \
55533 __ret; \
55534 })
55535 #endif
55536
55537 #ifdef __LITTLE_ENDIAN__
55538 #define vld3q_p64(__p0) __extension__ ({ \
55539 poly64x2x3_t __ret; \
55540 __builtin_neon_vld3q_v(&__ret, __p0, 38); \
55541 __ret; \
55542 })
55543 #else
55544 #define vld3q_p64(__p0) __extension__ ({ \
55545 poly64x2x3_t __ret; \
55546 __builtin_neon_vld3q_v(&__ret, __p0, 38); \
55547 \
55548 __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 1, 0); \
55549 __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 1, 0); \
55550 __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 1, 0); \
55551 __ret; \
55552 })
55553 #endif
55554
55555 #ifdef __LITTLE_ENDIAN__
55556 #define vld3q_u64(__p0) __extension__ ({ \
55557 uint64x2x3_t __ret; \
55558 __builtin_neon_vld3q_v(&__ret, __p0, 51); \
55559 __ret; \
55560 })
55561 #else
55562 #define vld3q_u64(__p0) __extension__ ({ \
55563 uint64x2x3_t __ret; \
55564 __builtin_neon_vld3q_v(&__ret, __p0, 51); \
55565 \
55566 __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 1, 0); \
55567 __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 1, 0); \
55568 __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 1, 0); \
55569 __ret; \
55570 })
55571 #endif
55572
55573 #ifdef __LITTLE_ENDIAN__
55574 #define vld3q_f64(__p0) __extension__ ({ \
55575 float64x2x3_t __ret; \
55576 __builtin_neon_vld3q_v(&__ret, __p0, 42); \
55577 __ret; \
55578 })
55579 #else
55580 #define vld3q_f64(__p0) __extension__ ({ \
55581 float64x2x3_t __ret; \
55582 __builtin_neon_vld3q_v(&__ret, __p0, 42); \
55583 \
55584 __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 1, 0); \
55585 __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 1, 0); \
55586 __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 1, 0); \
55587 __ret; \
55588 })
55589 #endif
55590
55591 #ifdef __LITTLE_ENDIAN__
55592 #define vld3q_s64(__p0) __extension__ ({ \
55593 int64x2x3_t __ret; \
55594 __builtin_neon_vld3q_v(&__ret, __p0, 35); \
55595 __ret; \
55596 })
55597 #else
55598 #define vld3q_s64(__p0) __extension__ ({ \
55599 int64x2x3_t __ret; \
55600 __builtin_neon_vld3q_v(&__ret, __p0, 35); \
55601 \
55602 __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 1, 0); \
55603 __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 1, 0); \
55604 __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 1, 0); \
55605 __ret; \
55606 })
55607 #endif
55608
55609 #ifdef __LITTLE_ENDIAN__
55610 #define vld3_f64(__p0) __extension__ ({ \
55611 float64x1x3_t __ret; \
55612 __builtin_neon_vld3_v(&__ret, __p0, 10); \
55613 __ret; \
55614 })
55615 #else
55616 #define vld3_f64(__p0) __extension__ ({ \
55617 float64x1x3_t __ret; \
55618 __builtin_neon_vld3_v(&__ret, __p0, 10); \
55619 __ret; \
55620 })
55621 #endif
55622
55623 #ifdef __LITTLE_ENDIAN__
55624 #define vld3_dup_p64(__p0) __extension__ ({ \
55625 poly64x1x3_t __ret; \
55626 __builtin_neon_vld3_dup_v(&__ret, __p0, 6); \
55627 __ret; \
55628 })
55629 #else
55630 #define vld3_dup_p64(__p0) __extension__ ({ \
55631 poly64x1x3_t __ret; \
55632 __builtin_neon_vld3_dup_v(&__ret, __p0, 6); \
55633 __ret; \
55634 })
55635 #endif
55636
55637 #ifdef __LITTLE_ENDIAN__
55638 #define vld3q_dup_p8(__p0) __extension__ ({ \
55639 poly8x16x3_t __ret; \
55640 __builtin_neon_vld3q_dup_v(&__ret, __p0, 36); \
55641 __ret; \
55642 })
55643 #else
55644 #define vld3q_dup_p8(__p0) __extension__ ({ \
55645 poly8x16x3_t __ret; \
55646 __builtin_neon_vld3q_dup_v(&__ret, __p0, 36); \
55647 \
55648 __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
55649 __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
55650 __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
55651 __ret; \
55652 })
55653 #endif
55654
55655 #ifdef __LITTLE_ENDIAN__
55656 #define vld3q_dup_p64(__p0) __extension__ ({ \
55657 poly64x2x3_t __ret; \
55658 __builtin_neon_vld3q_dup_v(&__ret, __p0, 38); \
55659 __ret; \
55660 })
55661 #else
55662 #define vld3q_dup_p64(__p0) __extension__ ({ \
55663 poly64x2x3_t __ret; \
55664 __builtin_neon_vld3q_dup_v(&__ret, __p0, 38); \
55665 \
55666 __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 1, 0); \
55667 __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 1, 0); \
55668 __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 1, 0); \
55669 __ret; \
55670 })
55671 #endif
55672
55673 #ifdef __LITTLE_ENDIAN__
55674 #define vld3q_dup_p16(__p0) __extension__ ({ \
55675 poly16x8x3_t __ret; \
55676 __builtin_neon_vld3q_dup_v(&__ret, __p0, 37); \
55677 __ret; \
55678 })
55679 #else
55680 #define vld3q_dup_p16(__p0) __extension__ ({ \
55681 poly16x8x3_t __ret; \
55682 __builtin_neon_vld3q_dup_v(&__ret, __p0, 37); \
55683 \
55684 __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \
55685 __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \
55686 __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 7, 6, 5, 4, 3, 2, 1, 0); \
55687 __ret; \
55688 })
55689 #endif
55690
55691 #ifdef __LITTLE_ENDIAN__
55692 #define vld3q_dup_u8(__p0) __extension__ ({ \
55693 uint8x16x3_t __ret; \
55694 __builtin_neon_vld3q_dup_v(&__ret, __p0, 48); \
55695 __ret; \
55696 })
55697 #else
55698 #define vld3q_dup_u8(__p0) __extension__ ({ \
55699 uint8x16x3_t __ret; \
55700 __builtin_neon_vld3q_dup_v(&__ret, __p0, 48); \
55701 \
55702 __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
55703 __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
55704 __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
55705 __ret; \
55706 })
55707 #endif
55708
55709 #ifdef __LITTLE_ENDIAN__
55710 #define vld3q_dup_u32(__p0) __extension__ ({ \
55711 uint32x4x3_t __ret; \
55712 __builtin_neon_vld3q_dup_v(&__ret, __p0, 50); \
55713 __ret; \
55714 })
55715 #else
55716 #define vld3q_dup_u32(__p0) __extension__ ({ \
55717 uint32x4x3_t __ret; \
55718 __builtin_neon_vld3q_dup_v(&__ret, __p0, 50); \
55719 \
55720 __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 3, 2, 1, 0); \
55721 __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 3, 2, 1, 0); \
55722 __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 3, 2, 1, 0); \
55723 __ret; \
55724 })
55725 #endif
55726
55727 #ifdef __LITTLE_ENDIAN__
55728 #define vld3q_dup_u64(__p0) __extension__ ({ \
55729 uint64x2x3_t __ret; \
55730 __builtin_neon_vld3q_dup_v(&__ret, __p0, 51); \
55731 __ret; \
55732 })
55733 #else
55734 #define vld3q_dup_u64(__p0) __extension__ ({ \
55735 uint64x2x3_t __ret; \
55736 __builtin_neon_vld3q_dup_v(&__ret, __p0, 51); \
55737 \
55738 __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 1, 0); \
55739 __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 1, 0); \
55740 __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 1, 0); \
55741 __ret; \
55742 })
55743 #endif
55744
55745 #ifdef __LITTLE_ENDIAN__
55746 #define vld3q_dup_u16(__p0) __extension__ ({ \
55747 uint16x8x3_t __ret; \
55748 __builtin_neon_vld3q_dup_v(&__ret, __p0, 49); \
55749 __ret; \
55750 })
55751 #else
55752 #define vld3q_dup_u16(__p0) __extension__ ({ \
55753 uint16x8x3_t __ret; \
55754 __builtin_neon_vld3q_dup_v(&__ret, __p0, 49); \
55755 \
55756 __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \
55757 __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \
55758 __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 7, 6, 5, 4, 3, 2, 1, 0); \
55759 __ret; \
55760 })
55761 #endif
55762
55763 #ifdef __LITTLE_ENDIAN__
55764 #define vld3q_dup_s8(__p0) __extension__ ({ \
55765 int8x16x3_t __ret; \
55766 __builtin_neon_vld3q_dup_v(&__ret, __p0, 32); \
55767 __ret; \
55768 })
55769 #else
55770 #define vld3q_dup_s8(__p0) __extension__ ({ \
55771 int8x16x3_t __ret; \
55772 __builtin_neon_vld3q_dup_v(&__ret, __p0, 32); \
55773 \
55774 __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
55775 __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
55776 __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
55777 __ret; \
55778 })
55779 #endif
55780
55781 #ifdef __LITTLE_ENDIAN__
55782 #define vld3q_dup_f64(__p0) __extension__ ({ \
55783 float64x2x3_t __ret; \
55784 __builtin_neon_vld3q_dup_v(&__ret, __p0, 42); \
55785 __ret; \
55786 })
55787 #else
55788 #define vld3q_dup_f64(__p0) __extension__ ({ \
55789 float64x2x3_t __ret; \
55790 __builtin_neon_vld3q_dup_v(&__ret, __p0, 42); \
55791 \
55792 __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 1, 0); \
55793 __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 1, 0); \
55794 __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 1, 0); \
55795 __ret; \
55796 })
55797 #endif
55798
55799 #ifdef __LITTLE_ENDIAN__
55800 #define vld3q_dup_f32(__p0) __extension__ ({ \
55801 float32x4x3_t __ret; \
55802 __builtin_neon_vld3q_dup_v(&__ret, __p0, 41); \
55803 __ret; \
55804 })
55805 #else
55806 #define vld3q_dup_f32(__p0) __extension__ ({ \
55807 float32x4x3_t __ret; \
55808 __builtin_neon_vld3q_dup_v(&__ret, __p0, 41); \
55809 \
55810 __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 3, 2, 1, 0); \
55811 __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 3, 2, 1, 0); \
55812 __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 3, 2, 1, 0); \
55813 __ret; \
55814 })
55815 #endif
55816
55817 #ifdef __LITTLE_ENDIAN__
55818 #define vld3q_dup_f16(__p0) __extension__ ({ \
55819 float16x8x3_t __ret; \
55820 __builtin_neon_vld3q_dup_v(&__ret, __p0, 40); \
55821 __ret; \
55822 })
55823 #else
55824 #define vld3q_dup_f16(__p0) __extension__ ({ \
55825 float16x8x3_t __ret; \
55826 __builtin_neon_vld3q_dup_v(&__ret, __p0, 40); \
55827 \
55828 __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \
55829 __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \
55830 __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 7, 6, 5, 4, 3, 2, 1, 0); \
55831 __ret; \
55832 })
55833 #endif
55834
55835 #ifdef __LITTLE_ENDIAN__
55836 #define vld3q_dup_s32(__p0) __extension__ ({ \
55837 int32x4x3_t __ret; \
55838 __builtin_neon_vld3q_dup_v(&__ret, __p0, 34); \
55839 __ret; \
55840 })
55841 #else
55842 #define vld3q_dup_s32(__p0) __extension__ ({ \
55843 int32x4x3_t __ret; \
55844 __builtin_neon_vld3q_dup_v(&__ret, __p0, 34); \
55845 \
55846 __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 3, 2, 1, 0); \
55847 __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 3, 2, 1, 0); \
55848 __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 3, 2, 1, 0); \
55849 __ret; \
55850 })
55851 #endif
55852
55853 #ifdef __LITTLE_ENDIAN__
55854 #define vld3q_dup_s64(__p0) __extension__ ({ \
55855 int64x2x3_t __ret; \
55856 __builtin_neon_vld3q_dup_v(&__ret, __p0, 35); \
55857 __ret; \
55858 })
55859 #else
55860 #define vld3q_dup_s64(__p0) __extension__ ({ \
55861 int64x2x3_t __ret; \
55862 __builtin_neon_vld3q_dup_v(&__ret, __p0, 35); \
55863 \
55864 __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 1, 0); \
55865 __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 1, 0); \
55866 __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 1, 0); \
55867 __ret; \
55868 })
55869 #endif
55870
55871 #ifdef __LITTLE_ENDIAN__
55872 #define vld3q_dup_s16(__p0) __extension__ ({ \
55873 int16x8x3_t __ret; \
55874 __builtin_neon_vld3q_dup_v(&__ret, __p0, 33); \
55875 __ret; \
55876 })
55877 #else
55878 #define vld3q_dup_s16(__p0) __extension__ ({ \
55879 int16x8x3_t __ret; \
55880 __builtin_neon_vld3q_dup_v(&__ret, __p0, 33); \
55881 \
55882 __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \
55883 __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \
55884 __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 7, 6, 5, 4, 3, 2, 1, 0); \
55885 __ret; \
55886 })
55887 #endif
55888
55889 #ifdef __LITTLE_ENDIAN__
55890 #define vld3_dup_f64(__p0) __extension__ ({ \
55891 float64x1x3_t __ret; \
55892 __builtin_neon_vld3_dup_v(&__ret, __p0, 10); \
55893 __ret; \
55894 })
55895 #else
55896 #define vld3_dup_f64(__p0) __extension__ ({ \
55897 float64x1x3_t __ret; \
55898 __builtin_neon_vld3_dup_v(&__ret, __p0, 10); \
55899 __ret; \
55900 })
55901 #endif
55902
55903 #ifdef __LITTLE_ENDIAN__
55904 #define vld3_lane_p64(__p0, __p1, __p2) __extension__ ({ \
55905 poly64x1x3_t __s1 = __p1; \
55906 poly64x1x3_t __ret; \
55907 __builtin_neon_vld3_lane_v(&__ret, __p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], (int8x8_t)__s1.val[2], __p2, 6); \
55908 __ret; \
55909 })
55910 #else
55911 #define vld3_lane_p64(__p0, __p1, __p2) __extension__ ({ \
55912 poly64x1x3_t __s1 = __p1; \
55913 poly64x1x3_t __ret; \
55914 __builtin_neon_vld3_lane_v(&__ret, __p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], (int8x8_t)__s1.val[2], __p2, 6); \
55915 __ret; \
55916 })
55917 #endif
55918
55919 #ifdef __LITTLE_ENDIAN__
55920 #define vld3q_lane_p8(__p0, __p1, __p2) __extension__ ({ \
55921 poly8x16x3_t __s1 = __p1; \
55922 poly8x16x3_t __ret; \
55923 __builtin_neon_vld3q_lane_v(&__ret, __p0, (int8x16_t)__s1.val[0], (int8x16_t)__s1.val[1], (int8x16_t)__s1.val[2], __p2, 36); \
55924 __ret; \
55925 })
55926 #else
55927 #define vld3q_lane_p8(__p0, __p1, __p2) __extension__ ({ \
55928 poly8x16x3_t __s1 = __p1; \
55929 poly8x16x3_t __rev1; \
55930 __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
55931 __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
55932 __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
55933 poly8x16x3_t __ret; \
55934 __builtin_neon_vld3q_lane_v(&__ret, __p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], (int8x16_t)__rev1.val[2], __p2, 36); \
55935 \
55936 __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
55937 __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
55938 __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
55939 __ret; \
55940 })
55941 #endif
55942
55943 #ifdef __LITTLE_ENDIAN__
55944 #define vld3q_lane_p64(__p0, __p1, __p2) __extension__ ({ \
55945 poly64x2x3_t __s1 = __p1; \
55946 poly64x2x3_t __ret; \
55947 __builtin_neon_vld3q_lane_v(&__ret, __p0, (int8x16_t)__s1.val[0], (int8x16_t)__s1.val[1], (int8x16_t)__s1.val[2], __p2, 38); \
55948 __ret; \
55949 })
55950 #else
55951 #define vld3q_lane_p64(__p0, __p1, __p2) __extension__ ({ \
55952 poly64x2x3_t __s1 = __p1; \
55953 poly64x2x3_t __rev1; \
55954 __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 1, 0); \
55955 __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 1, 0); \
55956 __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 1, 0); \
55957 poly64x2x3_t __ret; \
55958 __builtin_neon_vld3q_lane_v(&__ret, __p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], (int8x16_t)__rev1.val[2], __p2, 38); \
55959 \
55960 __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 1, 0); \
55961 __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 1, 0); \
55962 __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 1, 0); \
55963 __ret; \
55964 })
55965 #endif
55966
55967 #ifdef __LITTLE_ENDIAN__
55968 #define vld3q_lane_u8(__p0, __p1, __p2) __extension__ ({ \
55969 uint8x16x3_t __s1 = __p1; \
55970 uint8x16x3_t __ret; \
55971 __builtin_neon_vld3q_lane_v(&__ret, __p0, (int8x16_t)__s1.val[0], (int8x16_t)__s1.val[1], (int8x16_t)__s1.val[2], __p2, 48); \
55972 __ret; \
55973 })
55974 #else
55975 #define vld3q_lane_u8(__p0, __p1, __p2) __extension__ ({ \
55976 uint8x16x3_t __s1 = __p1; \
55977 uint8x16x3_t __rev1; \
55978 __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
55979 __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
55980 __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
55981 uint8x16x3_t __ret; \
55982 __builtin_neon_vld3q_lane_v(&__ret, __p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], (int8x16_t)__rev1.val[2], __p2, 48); \
55983 \
55984 __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
55985 __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
55986 __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
55987 __ret; \
55988 })
55989 #endif
55990
55991 #ifdef __LITTLE_ENDIAN__
55992 #define vld3q_lane_u64(__p0, __p1, __p2) __extension__ ({ \
55993 uint64x2x3_t __s1 = __p1; \
55994 uint64x2x3_t __ret; \
55995 __builtin_neon_vld3q_lane_v(&__ret, __p0, (int8x16_t)__s1.val[0], (int8x16_t)__s1.val[1], (int8x16_t)__s1.val[2], __p2, 51); \
55996 __ret; \
55997 })
55998 #else
55999 #define vld3q_lane_u64(__p0, __p1, __p2) __extension__ ({ \
56000 uint64x2x3_t __s1 = __p1; \
56001 uint64x2x3_t __rev1; \
56002 __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 1, 0); \
56003 __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 1, 0); \
56004 __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 1, 0); \
56005 uint64x2x3_t __ret; \
56006 __builtin_neon_vld3q_lane_v(&__ret, __p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], (int8x16_t)__rev1.val[2], __p2, 51); \
56007 \
56008 __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 1, 0); \
56009 __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 1, 0); \
56010 __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 1, 0); \
56011 __ret; \
56012 })
56013 #endif
56014
56015 #ifdef __LITTLE_ENDIAN__
56016 #define vld3q_lane_s8(__p0, __p1, __p2) __extension__ ({ \
56017 int8x16x3_t __s1 = __p1; \
56018 int8x16x3_t __ret; \
56019 __builtin_neon_vld3q_lane_v(&__ret, __p0, (int8x16_t)__s1.val[0], (int8x16_t)__s1.val[1], (int8x16_t)__s1.val[2], __p2, 32); \
56020 __ret; \
56021 })
56022 #else
56023 #define vld3q_lane_s8(__p0, __p1, __p2) __extension__ ({ \
56024 int8x16x3_t __s1 = __p1; \
56025 int8x16x3_t __rev1; \
56026 __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
56027 __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
56028 __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
56029 int8x16x3_t __ret; \
56030 __builtin_neon_vld3q_lane_v(&__ret, __p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], (int8x16_t)__rev1.val[2], __p2, 32); \
56031 \
56032 __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
56033 __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
56034 __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
56035 __ret; \
56036 })
56037 #endif
56038
56039 #ifdef __LITTLE_ENDIAN__
56040 #define vld3q_lane_f64(__p0, __p1, __p2) __extension__ ({ \
56041 float64x2x3_t __s1 = __p1; \
56042 float64x2x3_t __ret; \
56043 __builtin_neon_vld3q_lane_v(&__ret, __p0, __s1.val[0], __s1.val[1], __s1.val[2], __p2, 42); \
56044 __ret; \
56045 })
56046 #else
56047 #define vld3q_lane_f64(__p0, __p1, __p2) __extension__ ({ \
56048 float64x2x3_t __s1 = __p1; \
56049 float64x2x3_t __rev1; \
56050 __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 1, 0); \
56051 __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 1, 0); \
56052 __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 1, 0); \
56053 float64x2x3_t __ret; \
56054 __builtin_neon_vld3q_lane_v(&__ret, __p0, __rev1.val[0], __rev1.val[1], __rev1.val[2], __p2, 42); \
56055 \
56056 __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 1, 0); \
56057 __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 1, 0); \
56058 __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 1, 0); \
56059 __ret; \
56060 })
56061 #endif
56062
56063 #ifdef __LITTLE_ENDIAN__
56064 #define vld3q_lane_s64(__p0, __p1, __p2) __extension__ ({ \
56065 int64x2x3_t __s1 = __p1; \
56066 int64x2x3_t __ret; \
56067 __builtin_neon_vld3q_lane_v(&__ret, __p0, __s1.val[0], __s1.val[1], __s1.val[2], __p2, 35); \
56068 __ret; \
56069 })
56070 #else
56071 #define vld3q_lane_s64(__p0, __p1, __p2) __extension__ ({ \
56072 int64x2x3_t __s1 = __p1; \
56073 int64x2x3_t __rev1; \
56074 __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 1, 0); \
56075 __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 1, 0); \
56076 __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 1, 0); \
56077 int64x2x3_t __ret; \
56078 __builtin_neon_vld3q_lane_v(&__ret, __p0, __rev1.val[0], __rev1.val[1], __rev1.val[2], __p2, 35); \
56079 \
56080 __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 1, 0); \
56081 __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 1, 0); \
56082 __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 1, 0); \
56083 __ret; \
56084 })
56085 #endif
56086
56087 #ifdef __LITTLE_ENDIAN__
56088 #define vld3_lane_u64(__p0, __p1, __p2) __extension__ ({ \
56089 uint64x1x3_t __s1 = __p1; \
56090 uint64x1x3_t __ret; \
56091 __builtin_neon_vld3_lane_v(&__ret, __p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], (int8x8_t)__s1.val[2], __p2, 19); \
56092 __ret; \
56093 })
56094 #else
56095 #define vld3_lane_u64(__p0, __p1, __p2) __extension__ ({ \
56096 uint64x1x3_t __s1 = __p1; \
56097 uint64x1x3_t __ret; \
56098 __builtin_neon_vld3_lane_v(&__ret, __p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], (int8x8_t)__s1.val[2], __p2, 19); \
56099 __ret; \
56100 })
56101 #endif
56102
56103 #ifdef __LITTLE_ENDIAN__
56104 #define vld3_lane_f64(__p0, __p1, __p2) __extension__ ({ \
56105 float64x1x3_t __s1 = __p1; \
56106 float64x1x3_t __ret; \
56107 __builtin_neon_vld3_lane_v(&__ret, __p0, __s1.val[0], __s1.val[1], __s1.val[2], __p2, 10); \
56108 __ret; \
56109 })
56110 #else
56111 #define vld3_lane_f64(__p0, __p1, __p2) __extension__ ({ \
56112 float64x1x3_t __s1 = __p1; \
56113 float64x1x3_t __ret; \
56114 __builtin_neon_vld3_lane_v(&__ret, __p0, __s1.val[0], __s1.val[1], __s1.val[2], __p2, 10); \
56115 __ret; \
56116 })
56117 #endif
56118
56119 #ifdef __LITTLE_ENDIAN__
56120 #define vld3_lane_s64(__p0, __p1, __p2) __extension__ ({ \
56121 int64x1x3_t __s1 = __p1; \
56122 int64x1x3_t __ret; \
56123 __builtin_neon_vld3_lane_v(&__ret, __p0, __s1.val[0], __s1.val[1], __s1.val[2], __p2, 3); \
56124 __ret; \
56125 })
56126 #else
56127 #define vld3_lane_s64(__p0, __p1, __p2) __extension__ ({ \
56128 int64x1x3_t __s1 = __p1; \
56129 int64x1x3_t __ret; \
56130 __builtin_neon_vld3_lane_v(&__ret, __p0, __s1.val[0], __s1.val[1], __s1.val[2], __p2, 3); \
56131 __ret; \
56132 })
56133 #endif
56134
56135 #ifdef __LITTLE_ENDIAN__
56136 #define vld4_p64(__p0) __extension__ ({ \
56137 poly64x1x4_t __ret; \
56138 __builtin_neon_vld4_v(&__ret, __p0, 6); \
56139 __ret; \
56140 })
56141 #else
56142 #define vld4_p64(__p0) __extension__ ({ \
56143 poly64x1x4_t __ret; \
56144 __builtin_neon_vld4_v(&__ret, __p0, 6); \
56145 __ret; \
56146 })
56147 #endif
56148
56149 #ifdef __LITTLE_ENDIAN__
56150 #define vld4q_p64(__p0) __extension__ ({ \
56151 poly64x2x4_t __ret; \
56152 __builtin_neon_vld4q_v(&__ret, __p0, 38); \
56153 __ret; \
56154 })
56155 #else
56156 #define vld4q_p64(__p0) __extension__ ({ \
56157 poly64x2x4_t __ret; \
56158 __builtin_neon_vld4q_v(&__ret, __p0, 38); \
56159 \
56160 __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 1, 0); \
56161 __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 1, 0); \
56162 __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 1, 0); \
56163 __ret.val[3] = __builtin_shufflevector(__ret.val[3], __ret.val[3], 1, 0); \
56164 __ret; \
56165 })
56166 #endif
56167
56168 #ifdef __LITTLE_ENDIAN__
56169 #define vld4q_u64(__p0) __extension__ ({ \
56170 uint64x2x4_t __ret; \
56171 __builtin_neon_vld4q_v(&__ret, __p0, 51); \
56172 __ret; \
56173 })
56174 #else
56175 #define vld4q_u64(__p0) __extension__ ({ \
56176 uint64x2x4_t __ret; \
56177 __builtin_neon_vld4q_v(&__ret, __p0, 51); \
56178 \
56179 __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 1, 0); \
56180 __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 1, 0); \
56181 __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 1, 0); \
56182 __ret.val[3] = __builtin_shufflevector(__ret.val[3], __ret.val[3], 1, 0); \
56183 __ret; \
56184 })
56185 #endif
56186
56187 #ifdef __LITTLE_ENDIAN__
56188 #define vld4q_f64(__p0) __extension__ ({ \
56189 float64x2x4_t __ret; \
56190 __builtin_neon_vld4q_v(&__ret, __p0, 42); \
56191 __ret; \
56192 })
56193 #else
56194 #define vld4q_f64(__p0) __extension__ ({ \
56195 float64x2x4_t __ret; \
56196 __builtin_neon_vld4q_v(&__ret, __p0, 42); \
56197 \
56198 __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 1, 0); \
56199 __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 1, 0); \
56200 __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 1, 0); \
56201 __ret.val[3] = __builtin_shufflevector(__ret.val[3], __ret.val[3], 1, 0); \
56202 __ret; \
56203 })
56204 #endif
56205
56206 #ifdef __LITTLE_ENDIAN__
56207 #define vld4q_s64(__p0) __extension__ ({ \
56208 int64x2x4_t __ret; \
56209 __builtin_neon_vld4q_v(&__ret, __p0, 35); \
56210 __ret; \
56211 })
56212 #else
56213 #define vld4q_s64(__p0) __extension__ ({ \
56214 int64x2x4_t __ret; \
56215 __builtin_neon_vld4q_v(&__ret, __p0, 35); \
56216 \
56217 __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 1, 0); \
56218 __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 1, 0); \
56219 __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 1, 0); \
56220 __ret.val[3] = __builtin_shufflevector(__ret.val[3], __ret.val[3], 1, 0); \
56221 __ret; \
56222 })
56223 #endif
56224
56225 #ifdef __LITTLE_ENDIAN__
56226 #define vld4_f64(__p0) __extension__ ({ \
56227 float64x1x4_t __ret; \
56228 __builtin_neon_vld4_v(&__ret, __p0, 10); \
56229 __ret; \
56230 })
56231 #else
56232 #define vld4_f64(__p0) __extension__ ({ \
56233 float64x1x4_t __ret; \
56234 __builtin_neon_vld4_v(&__ret, __p0, 10); \
56235 __ret; \
56236 })
56237 #endif
56238
56239 #ifdef __LITTLE_ENDIAN__
56240 #define vld4_dup_p64(__p0) __extension__ ({ \
56241 poly64x1x4_t __ret; \
56242 __builtin_neon_vld4_dup_v(&__ret, __p0, 6); \
56243 __ret; \
56244 })
56245 #else
56246 #define vld4_dup_p64(__p0) __extension__ ({ \
56247 poly64x1x4_t __ret; \
56248 __builtin_neon_vld4_dup_v(&__ret, __p0, 6); \
56249 __ret; \
56250 })
56251 #endif
56252
56253 #ifdef __LITTLE_ENDIAN__
56254 #define vld4q_dup_p8(__p0) __extension__ ({ \
56255 poly8x16x4_t __ret; \
56256 __builtin_neon_vld4q_dup_v(&__ret, __p0, 36); \
56257 __ret; \
56258 })
56259 #else
56260 #define vld4q_dup_p8(__p0) __extension__ ({ \
56261 poly8x16x4_t __ret; \
56262 __builtin_neon_vld4q_dup_v(&__ret, __p0, 36); \
56263 \
56264 __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
56265 __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
56266 __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
56267 __ret.val[3] = __builtin_shufflevector(__ret.val[3], __ret.val[3], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
56268 __ret; \
56269 })
56270 #endif
56271
56272 #ifdef __LITTLE_ENDIAN__
56273 #define vld4q_dup_p64(__p0) __extension__ ({ \
56274 poly64x2x4_t __ret; \
56275 __builtin_neon_vld4q_dup_v(&__ret, __p0, 38); \
56276 __ret; \
56277 })
56278 #else
56279 #define vld4q_dup_p64(__p0) __extension__ ({ \
56280 poly64x2x4_t __ret; \
56281 __builtin_neon_vld4q_dup_v(&__ret, __p0, 38); \
56282 \
56283 __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 1, 0); \
56284 __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 1, 0); \
56285 __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 1, 0); \
56286 __ret.val[3] = __builtin_shufflevector(__ret.val[3], __ret.val[3], 1, 0); \
56287 __ret; \
56288 })
56289 #endif
56290
56291 #ifdef __LITTLE_ENDIAN__
56292 #define vld4q_dup_p16(__p0) __extension__ ({ \
56293 poly16x8x4_t __ret; \
56294 __builtin_neon_vld4q_dup_v(&__ret, __p0, 37); \
56295 __ret; \
56296 })
56297 #else
56298 #define vld4q_dup_p16(__p0) __extension__ ({ \
56299 poly16x8x4_t __ret; \
56300 __builtin_neon_vld4q_dup_v(&__ret, __p0, 37); \
56301 \
56302 __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \
56303 __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \
56304 __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 7, 6, 5, 4, 3, 2, 1, 0); \
56305 __ret.val[3] = __builtin_shufflevector(__ret.val[3], __ret.val[3], 7, 6, 5, 4, 3, 2, 1, 0); \
56306 __ret; \
56307 })
56308 #endif
56309
56310 #ifdef __LITTLE_ENDIAN__
56311 #define vld4q_dup_u8(__p0) __extension__ ({ \
56312 uint8x16x4_t __ret; \
56313 __builtin_neon_vld4q_dup_v(&__ret, __p0, 48); \
56314 __ret; \
56315 })
56316 #else
56317 #define vld4q_dup_u8(__p0) __extension__ ({ \
56318 uint8x16x4_t __ret; \
56319 __builtin_neon_vld4q_dup_v(&__ret, __p0, 48); \
56320 \
56321 __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
56322 __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
56323 __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
56324 __ret.val[3] = __builtin_shufflevector(__ret.val[3], __ret.val[3], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
56325 __ret; \
56326 })
56327 #endif
56328
56329 #ifdef __LITTLE_ENDIAN__
56330 #define vld4q_dup_u32(__p0) __extension__ ({ \
56331 uint32x4x4_t __ret; \
56332 __builtin_neon_vld4q_dup_v(&__ret, __p0, 50); \
56333 __ret; \
56334 })
56335 #else
56336 #define vld4q_dup_u32(__p0) __extension__ ({ \
56337 uint32x4x4_t __ret; \
56338 __builtin_neon_vld4q_dup_v(&__ret, __p0, 50); \
56339 \
56340 __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 3, 2, 1, 0); \
56341 __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 3, 2, 1, 0); \
56342 __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 3, 2, 1, 0); \
56343 __ret.val[3] = __builtin_shufflevector(__ret.val[3], __ret.val[3], 3, 2, 1, 0); \
56344 __ret; \
56345 })
56346 #endif
56347
56348 #ifdef __LITTLE_ENDIAN__
56349 #define vld4q_dup_u64(__p0) __extension__ ({ \
56350 uint64x2x4_t __ret; \
56351 __builtin_neon_vld4q_dup_v(&__ret, __p0, 51); \
56352 __ret; \
56353 })
56354 #else
56355 #define vld4q_dup_u64(__p0) __extension__ ({ \
56356 uint64x2x4_t __ret; \
56357 __builtin_neon_vld4q_dup_v(&__ret, __p0, 51); \
56358 \
56359 __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 1, 0); \
56360 __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 1, 0); \
56361 __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 1, 0); \
56362 __ret.val[3] = __builtin_shufflevector(__ret.val[3], __ret.val[3], 1, 0); \
56363 __ret; \
56364 })
56365 #endif
56366
56367 #ifdef __LITTLE_ENDIAN__
56368 #define vld4q_dup_u16(__p0) __extension__ ({ \
56369 uint16x8x4_t __ret; \
56370 __builtin_neon_vld4q_dup_v(&__ret, __p0, 49); \
56371 __ret; \
56372 })
56373 #else
56374 #define vld4q_dup_u16(__p0) __extension__ ({ \
56375 uint16x8x4_t __ret; \
56376 __builtin_neon_vld4q_dup_v(&__ret, __p0, 49); \
56377 \
56378 __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \
56379 __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \
56380 __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 7, 6, 5, 4, 3, 2, 1, 0); \
56381 __ret.val[3] = __builtin_shufflevector(__ret.val[3], __ret.val[3], 7, 6, 5, 4, 3, 2, 1, 0); \
56382 __ret; \
56383 })
56384 #endif
56385
56386 #ifdef __LITTLE_ENDIAN__
56387 #define vld4q_dup_s8(__p0) __extension__ ({ \
56388 int8x16x4_t __ret; \
56389 __builtin_neon_vld4q_dup_v(&__ret, __p0, 32); \
56390 __ret; \
56391 })
56392 #else
56393 #define vld4q_dup_s8(__p0) __extension__ ({ \
56394 int8x16x4_t __ret; \
56395 __builtin_neon_vld4q_dup_v(&__ret, __p0, 32); \
56396 \
56397 __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
56398 __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
56399 __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
56400 __ret.val[3] = __builtin_shufflevector(__ret.val[3], __ret.val[3], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
56401 __ret; \
56402 })
56403 #endif
56404
56405 #ifdef __LITTLE_ENDIAN__
56406 #define vld4q_dup_f64(__p0) __extension__ ({ \
56407 float64x2x4_t __ret; \
56408 __builtin_neon_vld4q_dup_v(&__ret, __p0, 42); \
56409 __ret; \
56410 })
56411 #else
56412 #define vld4q_dup_f64(__p0) __extension__ ({ \
56413 float64x2x4_t __ret; \
56414 __builtin_neon_vld4q_dup_v(&__ret, __p0, 42); \
56415 \
56416 __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 1, 0); \
56417 __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 1, 0); \
56418 __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 1, 0); \
56419 __ret.val[3] = __builtin_shufflevector(__ret.val[3], __ret.val[3], 1, 0); \
56420 __ret; \
56421 })
56422 #endif
56423
56424 #ifdef __LITTLE_ENDIAN__
56425 #define vld4q_dup_f32(__p0) __extension__ ({ \
56426 float32x4x4_t __ret; \
56427 __builtin_neon_vld4q_dup_v(&__ret, __p0, 41); \
56428 __ret; \
56429 })
56430 #else
56431 #define vld4q_dup_f32(__p0) __extension__ ({ \
56432 float32x4x4_t __ret; \
56433 __builtin_neon_vld4q_dup_v(&__ret, __p0, 41); \
56434 \
56435 __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 3, 2, 1, 0); \
56436 __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 3, 2, 1, 0); \
56437 __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 3, 2, 1, 0); \
56438 __ret.val[3] = __builtin_shufflevector(__ret.val[3], __ret.val[3], 3, 2, 1, 0); \
56439 __ret; \
56440 })
56441 #endif
56442
56443 #ifdef __LITTLE_ENDIAN__
56444 #define vld4q_dup_f16(__p0) __extension__ ({ \
56445 float16x8x4_t __ret; \
56446 __builtin_neon_vld4q_dup_v(&__ret, __p0, 40); \
56447 __ret; \
56448 })
56449 #else
56450 #define vld4q_dup_f16(__p0) __extension__ ({ \
56451 float16x8x4_t __ret; \
56452 __builtin_neon_vld4q_dup_v(&__ret, __p0, 40); \
56453 \
56454 __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \
56455 __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \
56456 __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 7, 6, 5, 4, 3, 2, 1, 0); \
56457 __ret.val[3] = __builtin_shufflevector(__ret.val[3], __ret.val[3], 7, 6, 5, 4, 3, 2, 1, 0); \
56458 __ret; \
56459 })
56460 #endif
56461
56462 #ifdef __LITTLE_ENDIAN__
56463 #define vld4q_dup_s32(__p0) __extension__ ({ \
56464 int32x4x4_t __ret; \
56465 __builtin_neon_vld4q_dup_v(&__ret, __p0, 34); \
56466 __ret; \
56467 })
56468 #else
56469 #define vld4q_dup_s32(__p0) __extension__ ({ \
56470 int32x4x4_t __ret; \
56471 __builtin_neon_vld4q_dup_v(&__ret, __p0, 34); \
56472 \
56473 __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 3, 2, 1, 0); \
56474 __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 3, 2, 1, 0); \
56475 __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 3, 2, 1, 0); \
56476 __ret.val[3] = __builtin_shufflevector(__ret.val[3], __ret.val[3], 3, 2, 1, 0); \
56477 __ret; \
56478 })
56479 #endif
56480
56481 #ifdef __LITTLE_ENDIAN__
56482 #define vld4q_dup_s64(__p0) __extension__ ({ \
56483 int64x2x4_t __ret; \
56484 __builtin_neon_vld4q_dup_v(&__ret, __p0, 35); \
56485 __ret; \
56486 })
56487 #else
56488 #define vld4q_dup_s64(__p0) __extension__ ({ \
56489 int64x2x4_t __ret; \
56490 __builtin_neon_vld4q_dup_v(&__ret, __p0, 35); \
56491 \
56492 __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 1, 0); \
56493 __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 1, 0); \
56494 __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 1, 0); \
56495 __ret.val[3] = __builtin_shufflevector(__ret.val[3], __ret.val[3], 1, 0); \
56496 __ret; \
56497 })
56498 #endif
56499
56500 #ifdef __LITTLE_ENDIAN__
56501 #define vld4q_dup_s16(__p0) __extension__ ({ \
56502 int16x8x4_t __ret; \
56503 __builtin_neon_vld4q_dup_v(&__ret, __p0, 33); \
56504 __ret; \
56505 })
56506 #else
56507 #define vld4q_dup_s16(__p0) __extension__ ({ \
56508 int16x8x4_t __ret; \
56509 __builtin_neon_vld4q_dup_v(&__ret, __p0, 33); \
56510 \
56511 __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \
56512 __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \
56513 __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 7, 6, 5, 4, 3, 2, 1, 0); \
56514 __ret.val[3] = __builtin_shufflevector(__ret.val[3], __ret.val[3], 7, 6, 5, 4, 3, 2, 1, 0); \
56515 __ret; \
56516 })
56517 #endif
56518
56519 #ifdef __LITTLE_ENDIAN__
56520 #define vld4_dup_f64(__p0) __extension__ ({ \
56521 float64x1x4_t __ret; \
56522 __builtin_neon_vld4_dup_v(&__ret, __p0, 10); \
56523 __ret; \
56524 })
56525 #else
56526 #define vld4_dup_f64(__p0) __extension__ ({ \
56527 float64x1x4_t __ret; \
56528 __builtin_neon_vld4_dup_v(&__ret, __p0, 10); \
56529 __ret; \
56530 })
56531 #endif
56532
56533 #ifdef __LITTLE_ENDIAN__
56534 #define vld4_lane_p64(__p0, __p1, __p2) __extension__ ({ \
56535 poly64x1x4_t __s1 = __p1; \
56536 poly64x1x4_t __ret; \
56537 __builtin_neon_vld4_lane_v(&__ret, __p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], (int8x8_t)__s1.val[2], (int8x8_t)__s1.val[3], __p2, 6); \
56538 __ret; \
56539 })
56540 #else
56541 #define vld4_lane_p64(__p0, __p1, __p2) __extension__ ({ \
56542 poly64x1x4_t __s1 = __p1; \
56543 poly64x1x4_t __ret; \
56544 __builtin_neon_vld4_lane_v(&__ret, __p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], (int8x8_t)__s1.val[2], (int8x8_t)__s1.val[3], __p2, 6); \
56545 __ret; \
56546 })
56547 #endif
56548
56549 #ifdef __LITTLE_ENDIAN__
56550 #define vld4q_lane_p8(__p0, __p1, __p2) __extension__ ({ \
56551 poly8x16x4_t __s1 = __p1; \
56552 poly8x16x4_t __ret; \
56553 __builtin_neon_vld4q_lane_v(&__ret, __p0, (int8x16_t)__s1.val[0], (int8x16_t)__s1.val[1], (int8x16_t)__s1.val[2], (int8x16_t)__s1.val[3], __p2, 36); \
56554 __ret; \
56555 })
56556 #else
56557 #define vld4q_lane_p8(__p0, __p1, __p2) __extension__ ({ \
56558 poly8x16x4_t __s1 = __p1; \
56559 poly8x16x4_t __rev1; \
56560 __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
56561 __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
56562 __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
56563 __rev1.val[3] = __builtin_shufflevector(__s1.val[3], __s1.val[3], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
56564 poly8x16x4_t __ret; \
56565 __builtin_neon_vld4q_lane_v(&__ret, __p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], (int8x16_t)__rev1.val[2], (int8x16_t)__rev1.val[3], __p2, 36); \
56566 \
56567 __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
56568 __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
56569 __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
56570 __ret.val[3] = __builtin_shufflevector(__ret.val[3], __ret.val[3], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
56571 __ret; \
56572 })
56573 #endif
56574
56575 #ifdef __LITTLE_ENDIAN__
56576 #define vld4q_lane_p64(__p0, __p1, __p2) __extension__ ({ \
56577 poly64x2x4_t __s1 = __p1; \
56578 poly64x2x4_t __ret; \
56579 __builtin_neon_vld4q_lane_v(&__ret, __p0, (int8x16_t)__s1.val[0], (int8x16_t)__s1.val[1], (int8x16_t)__s1.val[2], (int8x16_t)__s1.val[3], __p2, 38); \
56580 __ret; \
56581 })
56582 #else
56583 #define vld4q_lane_p64(__p0, __p1, __p2) __extension__ ({ \
56584 poly64x2x4_t __s1 = __p1; \
56585 poly64x2x4_t __rev1; \
56586 __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 1, 0); \
56587 __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 1, 0); \
56588 __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 1, 0); \
56589 __rev1.val[3] = __builtin_shufflevector(__s1.val[3], __s1.val[3], 1, 0); \
56590 poly64x2x4_t __ret; \
56591 __builtin_neon_vld4q_lane_v(&__ret, __p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], (int8x16_t)__rev1.val[2], (int8x16_t)__rev1.val[3], __p2, 38); \
56592 \
56593 __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 1, 0); \
56594 __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 1, 0); \
56595 __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 1, 0); \
56596 __ret.val[3] = __builtin_shufflevector(__ret.val[3], __ret.val[3], 1, 0); \
56597 __ret; \
56598 })
56599 #endif
56600
56601 #ifdef __LITTLE_ENDIAN__
56602 #define vld4q_lane_u8(__p0, __p1, __p2) __extension__ ({ \
56603 uint8x16x4_t __s1 = __p1; \
56604 uint8x16x4_t __ret; \
56605 __builtin_neon_vld4q_lane_v(&__ret, __p0, (int8x16_t)__s1.val[0], (int8x16_t)__s1.val[1], (int8x16_t)__s1.val[2], (int8x16_t)__s1.val[3], __p2, 48); \
56606 __ret; \
56607 })
56608 #else
56609 #define vld4q_lane_u8(__p0, __p1, __p2) __extension__ ({ \
56610 uint8x16x4_t __s1 = __p1; \
56611 uint8x16x4_t __rev1; \
56612 __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
56613 __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
56614 __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
56615 __rev1.val[3] = __builtin_shufflevector(__s1.val[3], __s1.val[3], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
56616 uint8x16x4_t __ret; \
56617 __builtin_neon_vld4q_lane_v(&__ret, __p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], (int8x16_t)__rev1.val[2], (int8x16_t)__rev1.val[3], __p2, 48); \
56618 \
56619 __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
56620 __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
56621 __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
56622 __ret.val[3] = __builtin_shufflevector(__ret.val[3], __ret.val[3], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
56623 __ret; \
56624 })
56625 #endif
56626
56627 #ifdef __LITTLE_ENDIAN__
56628 #define vld4q_lane_u64(__p0, __p1, __p2) __extension__ ({ \
56629 uint64x2x4_t __s1 = __p1; \
56630 uint64x2x4_t __ret; \
56631 __builtin_neon_vld4q_lane_v(&__ret, __p0, (int8x16_t)__s1.val[0], (int8x16_t)__s1.val[1], (int8x16_t)__s1.val[2], (int8x16_t)__s1.val[3], __p2, 51); \
56632 __ret; \
56633 })
56634 #else
56635 #define vld4q_lane_u64(__p0, __p1, __p2) __extension__ ({ \
56636 uint64x2x4_t __s1 = __p1; \
56637 uint64x2x4_t __rev1; \
56638 __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 1, 0); \
56639 __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 1, 0); \
56640 __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 1, 0); \
56641 __rev1.val[3] = __builtin_shufflevector(__s1.val[3], __s1.val[3], 1, 0); \
56642 uint64x2x4_t __ret; \
56643 __builtin_neon_vld4q_lane_v(&__ret, __p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], (int8x16_t)__rev1.val[2], (int8x16_t)__rev1.val[3], __p2, 51); \
56644 \
56645 __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 1, 0); \
56646 __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 1, 0); \
56647 __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 1, 0); \
56648 __ret.val[3] = __builtin_shufflevector(__ret.val[3], __ret.val[3], 1, 0); \
56649 __ret; \
56650 })
56651 #endif
56652
56653 #ifdef __LITTLE_ENDIAN__
56654 #define vld4q_lane_s8(__p0, __p1, __p2) __extension__ ({ \
56655 int8x16x4_t __s1 = __p1; \
56656 int8x16x4_t __ret; \
56657 __builtin_neon_vld4q_lane_v(&__ret, __p0, (int8x16_t)__s1.val[0], (int8x16_t)__s1.val[1], (int8x16_t)__s1.val[2], (int8x16_t)__s1.val[3], __p2, 32); \
56658 __ret; \
56659 })
56660 #else
56661 #define vld4q_lane_s8(__p0, __p1, __p2) __extension__ ({ \
56662 int8x16x4_t __s1 = __p1; \
56663 int8x16x4_t __rev1; \
56664 __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
56665 __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
56666 __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
56667 __rev1.val[3] = __builtin_shufflevector(__s1.val[3], __s1.val[3], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
56668 int8x16x4_t __ret; \
56669 __builtin_neon_vld4q_lane_v(&__ret, __p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], (int8x16_t)__rev1.val[2], (int8x16_t)__rev1.val[3], __p2, 32); \
56670 \
56671 __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
56672 __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
56673 __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
56674 __ret.val[3] = __builtin_shufflevector(__ret.val[3], __ret.val[3], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
56675 __ret; \
56676 })
56677 #endif
56678
56679 #ifdef __LITTLE_ENDIAN__
56680 #define vld4q_lane_f64(__p0, __p1, __p2) __extension__ ({ \
56681 float64x2x4_t __s1 = __p1; \
56682 float64x2x4_t __ret; \
56683 __builtin_neon_vld4q_lane_v(&__ret, __p0, __s1.val[0], __s1.val[1], __s1.val[2], __s1.val[3], __p2, 42); \
56684 __ret; \
56685 })
56686 #else
56687 #define vld4q_lane_f64(__p0, __p1, __p2) __extension__ ({ \
56688 float64x2x4_t __s1 = __p1; \
56689 float64x2x4_t __rev1; \
56690 __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 1, 0); \
56691 __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 1, 0); \
56692 __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 1, 0); \
56693 __rev1.val[3] = __builtin_shufflevector(__s1.val[3], __s1.val[3], 1, 0); \
56694 float64x2x4_t __ret; \
56695 __builtin_neon_vld4q_lane_v(&__ret, __p0, __rev1.val[0], __rev1.val[1], __rev1.val[2], __rev1.val[3], __p2, 42); \
56696 \
56697 __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 1, 0); \
56698 __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 1, 0); \
56699 __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 1, 0); \
56700 __ret.val[3] = __builtin_shufflevector(__ret.val[3], __ret.val[3], 1, 0); \
56701 __ret; \
56702 })
56703 #endif
56704
56705 #ifdef __LITTLE_ENDIAN__
56706 #define vld4q_lane_s64(__p0, __p1, __p2) __extension__ ({ \
56707 int64x2x4_t __s1 = __p1; \
56708 int64x2x4_t __ret; \
56709 __builtin_neon_vld4q_lane_v(&__ret, __p0, __s1.val[0], __s1.val[1], __s1.val[2], __s1.val[3], __p2, 35); \
56710 __ret; \
56711 })
56712 #else
56713 #define vld4q_lane_s64(__p0, __p1, __p2) __extension__ ({ \
56714 int64x2x4_t __s1 = __p1; \
56715 int64x2x4_t __rev1; \
56716 __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 1, 0); \
56717 __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 1, 0); \
56718 __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 1, 0); \
56719 __rev1.val[3] = __builtin_shufflevector(__s1.val[3], __s1.val[3], 1, 0); \
56720 int64x2x4_t __ret; \
56721 __builtin_neon_vld4q_lane_v(&__ret, __p0, __rev1.val[0], __rev1.val[1], __rev1.val[2], __rev1.val[3], __p2, 35); \
56722 \
56723 __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 1, 0); \
56724 __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 1, 0); \
56725 __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 1, 0); \
56726 __ret.val[3] = __builtin_shufflevector(__ret.val[3], __ret.val[3], 1, 0); \
56727 __ret; \
56728 })
56729 #endif
56730
56731 #ifdef __LITTLE_ENDIAN__
56732 #define vld4_lane_u64(__p0, __p1, __p2) __extension__ ({ \
56733 uint64x1x4_t __s1 = __p1; \
56734 uint64x1x4_t __ret; \
56735 __builtin_neon_vld4_lane_v(&__ret, __p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], (int8x8_t)__s1.val[2], (int8x8_t)__s1.val[3], __p2, 19); \
56736 __ret; \
56737 })
56738 #else
56739 #define vld4_lane_u64(__p0, __p1, __p2) __extension__ ({ \
56740 uint64x1x4_t __s1 = __p1; \
56741 uint64x1x4_t __ret; \
56742 __builtin_neon_vld4_lane_v(&__ret, __p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], (int8x8_t)__s1.val[2], (int8x8_t)__s1.val[3], __p2, 19); \
56743 __ret; \
56744 })
56745 #endif
56746
56747 #ifdef __LITTLE_ENDIAN__
56748 #define vld4_lane_f64(__p0, __p1, __p2) __extension__ ({ \
56749 float64x1x4_t __s1 = __p1; \
56750 float64x1x4_t __ret; \
56751 __builtin_neon_vld4_lane_v(&__ret, __p0, __s1.val[0], __s1.val[1], __s1.val[2], __s1.val[3], __p2, 10); \
56752 __ret; \
56753 })
56754 #else
56755 #define vld4_lane_f64(__p0, __p1, __p2) __extension__ ({ \
56756 float64x1x4_t __s1 = __p1; \
56757 float64x1x4_t __ret; \
56758 __builtin_neon_vld4_lane_v(&__ret, __p0, __s1.val[0], __s1.val[1], __s1.val[2], __s1.val[3], __p2, 10); \
56759 __ret; \
56760 })
56761 #endif
56762
56763 #ifdef __LITTLE_ENDIAN__
56764 #define vld4_lane_s64(__p0, __p1, __p2) __extension__ ({ \
56765 int64x1x4_t __s1 = __p1; \
56766 int64x1x4_t __ret; \
56767 __builtin_neon_vld4_lane_v(&__ret, __p0, __s1.val[0], __s1.val[1], __s1.val[2], __s1.val[3], __p2, 3); \
56768 __ret; \
56769 })
56770 #else
56771 #define vld4_lane_s64(__p0, __p1, __p2) __extension__ ({ \
56772 int64x1x4_t __s1 = __p1; \
56773 int64x1x4_t __ret; \
56774 __builtin_neon_vld4_lane_v(&__ret, __p0, __s1.val[0], __s1.val[1], __s1.val[2], __s1.val[3], __p2, 3); \
56775 __ret; \
56776 })
56777 #endif
56778
56779 #ifdef __LITTLE_ENDIAN__
56780 #define vldrq_p128(__p0) __extension__ ({ \
56781 poly128_t __ret; \
56782 __ret = (poly128_t) __builtin_neon_vldrq_p128(__p0); \
56783 __ret; \
56784 })
56785 #else
56786 #define vldrq_p128(__p0) __extension__ ({ \
56787 poly128_t __ret; \
56788 __ret = (poly128_t) __builtin_neon_vldrq_p128(__p0); \
56789 __ret; \
56790 })
56791 #endif
56792
56793 #ifdef __LITTLE_ENDIAN__
56794 __ai float64x2_t vmaxq_f64(float64x2_t __p0, float64x2_t __p1) {
56795 float64x2_t __ret;
56796 __ret = (float64x2_t) __builtin_neon_vmaxq_v((int8x16_t)__p0, (int8x16_t)__p1, 42);
56797 return __ret;
56798 }
56799 #else
56800 __ai float64x2_t vmaxq_f64(float64x2_t __p0, float64x2_t __p1) {
56801 float64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
56802 float64x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
56803 float64x2_t __ret;
56804 __ret = (float64x2_t) __builtin_neon_vmaxq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 42);
56805 __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
56806 return __ret;
56807 }
56808 #endif
56809
56810 #ifdef __LITTLE_ENDIAN__
56811 __ai float64x1_t vmax_f64(float64x1_t __p0, float64x1_t __p1) {
56812 float64x1_t __ret;
56813 __ret = (float64x1_t) __builtin_neon_vmax_v((int8x8_t)__p0, (int8x8_t)__p1, 10);
56814 return __ret;
56815 }
56816 #else
56817 __ai float64x1_t vmax_f64(float64x1_t __p0, float64x1_t __p1) {
56818 float64x1_t __ret;
56819 __ret = (float64x1_t) __builtin_neon_vmax_v((int8x8_t)__p0, (int8x8_t)__p1, 10);
56820 return __ret;
56821 }
56822 #endif
56823
56824 #ifdef __LITTLE_ENDIAN__
56825 __ai float64_t vmaxnmvq_f64(float64x2_t __p0) {
56826 float64_t __ret;
56827 __ret = (float64_t) __builtin_neon_vmaxnmvq_f64((int8x16_t)__p0);
56828 return __ret;
56829 }
56830 #else
56831 __ai float64_t vmaxnmvq_f64(float64x2_t __p0) {
56832 float64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
56833 float64_t __ret;
56834 __ret = (float64_t) __builtin_neon_vmaxnmvq_f64((int8x16_t)__rev0);
56835 return __ret;
56836 }
56837 #endif
56838
56839 #ifdef __LITTLE_ENDIAN__
56840 __ai float32_t vmaxnmvq_f32(float32x4_t __p0) {
56841 float32_t __ret;
56842 __ret = (float32_t) __builtin_neon_vmaxnmvq_f32((int8x16_t)__p0);
56843 return __ret;
56844 }
56845 #else
56846 __ai float32_t vmaxnmvq_f32(float32x4_t __p0) {
56847 float32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
56848 float32_t __ret;
56849 __ret = (float32_t) __builtin_neon_vmaxnmvq_f32((int8x16_t)__rev0);
56850 return __ret;
56851 }
56852 #endif
56853
56854 #ifdef __LITTLE_ENDIAN__
56855 __ai float32_t vmaxnmv_f32(float32x2_t __p0) {
56856 float32_t __ret;
56857 __ret = (float32_t) __builtin_neon_vmaxnmv_f32((int8x8_t)__p0);
56858 return __ret;
56859 }
56860 #else
56861 __ai float32_t vmaxnmv_f32(float32x2_t __p0) {
56862 float32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
56863 float32_t __ret;
56864 __ret = (float32_t) __builtin_neon_vmaxnmv_f32((int8x8_t)__rev0);
56865 return __ret;
56866 }
56867 #endif
56868
56869 #ifdef __LITTLE_ENDIAN__
56870 __ai uint8_t vmaxvq_u8(uint8x16_t __p0) {
56871 uint8_t __ret;
56872 __ret = (uint8_t) __builtin_neon_vmaxvq_u8((int8x16_t)__p0);
56873 return __ret;
56874 }
56875 #else
56876 __ai uint8_t vmaxvq_u8(uint8x16_t __p0) {
56877 uint8x16_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
56878 uint8_t __ret;
56879 __ret = (uint8_t) __builtin_neon_vmaxvq_u8((int8x16_t)__rev0);
56880 return __ret;
56881 }
56882 #endif
56883
56884 #ifdef __LITTLE_ENDIAN__
56885 __ai uint32_t vmaxvq_u32(uint32x4_t __p0) {
56886 uint32_t __ret;
56887 __ret = (uint32_t) __builtin_neon_vmaxvq_u32((int8x16_t)__p0);
56888 return __ret;
56889 }
56890 #else
56891 __ai uint32_t vmaxvq_u32(uint32x4_t __p0) {
56892 uint32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
56893 uint32_t __ret;
56894 __ret = (uint32_t) __builtin_neon_vmaxvq_u32((int8x16_t)__rev0);
56895 return __ret;
56896 }
56897 #endif
56898
56899 #ifdef __LITTLE_ENDIAN__
56900 __ai uint16_t vmaxvq_u16(uint16x8_t __p0) {
56901 uint16_t __ret;
56902 __ret = (uint16_t) __builtin_neon_vmaxvq_u16((int8x16_t)__p0);
56903 return __ret;
56904 }
56905 #else
56906 __ai uint16_t vmaxvq_u16(uint16x8_t __p0) {
56907 uint16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
56908 uint16_t __ret;
56909 __ret = (uint16_t) __builtin_neon_vmaxvq_u16((int8x16_t)__rev0);
56910 return __ret;
56911 }
56912 #endif
56913
56914 #ifdef __LITTLE_ENDIAN__
56915 __ai int8_t vmaxvq_s8(int8x16_t __p0) {
56916 int8_t __ret;
56917 __ret = (int8_t) __builtin_neon_vmaxvq_s8((int8x16_t)__p0);
56918 return __ret;
56919 }
56920 #else
56921 __ai int8_t vmaxvq_s8(int8x16_t __p0) {
56922 int8x16_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
56923 int8_t __ret;
56924 __ret = (int8_t) __builtin_neon_vmaxvq_s8((int8x16_t)__rev0);
56925 return __ret;
56926 }
56927 #endif
56928
56929 #ifdef __LITTLE_ENDIAN__
56930 __ai float64_t vmaxvq_f64(float64x2_t __p0) {
56931 float64_t __ret;
56932 __ret = (float64_t) __builtin_neon_vmaxvq_f64((int8x16_t)__p0);
56933 return __ret;
56934 }
56935 #else
56936 __ai float64_t vmaxvq_f64(float64x2_t __p0) {
56937 float64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
56938 float64_t __ret;
56939 __ret = (float64_t) __builtin_neon_vmaxvq_f64((int8x16_t)__rev0);
56940 return __ret;
56941 }
56942 #endif
56943
56944 #ifdef __LITTLE_ENDIAN__
56945 __ai float32_t vmaxvq_f32(float32x4_t __p0) {
56946 float32_t __ret;
56947 __ret = (float32_t) __builtin_neon_vmaxvq_f32((int8x16_t)__p0);
56948 return __ret;
56949 }
56950 #else
56951 __ai float32_t vmaxvq_f32(float32x4_t __p0) {
56952 float32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
56953 float32_t __ret;
56954 __ret = (float32_t) __builtin_neon_vmaxvq_f32((int8x16_t)__rev0);
56955 return __ret;
56956 }
56957 #endif
56958
56959 #ifdef __LITTLE_ENDIAN__
56960 __ai int32_t vmaxvq_s32(int32x4_t __p0) {
56961 int32_t __ret;
56962 __ret = (int32_t) __builtin_neon_vmaxvq_s32((int8x16_t)__p0);
56963 return __ret;
56964 }
56965 #else
56966 __ai int32_t vmaxvq_s32(int32x4_t __p0) {
56967 int32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
56968 int32_t __ret;
56969 __ret = (int32_t) __builtin_neon_vmaxvq_s32((int8x16_t)__rev0);
56970 return __ret;
56971 }
56972 #endif
56973
56974 #ifdef __LITTLE_ENDIAN__
56975 __ai int16_t vmaxvq_s16(int16x8_t __p0) {
56976 int16_t __ret;
56977 __ret = (int16_t) __builtin_neon_vmaxvq_s16((int8x16_t)__p0);
56978 return __ret;
56979 }
56980 #else
56981 __ai int16_t vmaxvq_s16(int16x8_t __p0) {
56982 int16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
56983 int16_t __ret;
56984 __ret = (int16_t) __builtin_neon_vmaxvq_s16((int8x16_t)__rev0);
56985 return __ret;
56986 }
56987 #endif
56988
56989 #ifdef __LITTLE_ENDIAN__
56990 __ai uint8_t vmaxv_u8(uint8x8_t __p0) {
56991 uint8_t __ret;
56992 __ret = (uint8_t) __builtin_neon_vmaxv_u8((int8x8_t)__p0);
56993 return __ret;
56994 }
56995 #else
56996 __ai uint8_t vmaxv_u8(uint8x8_t __p0) {
56997 uint8x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
56998 uint8_t __ret;
56999 __ret = (uint8_t) __builtin_neon_vmaxv_u8((int8x8_t)__rev0);
57000 return __ret;
57001 }
57002 #endif
57003
57004 #ifdef __LITTLE_ENDIAN__
57005 __ai uint32_t vmaxv_u32(uint32x2_t __p0) {
57006 uint32_t __ret;
57007 __ret = (uint32_t) __builtin_neon_vmaxv_u32((int8x8_t)__p0);
57008 return __ret;
57009 }
57010 #else
57011 __ai uint32_t vmaxv_u32(uint32x2_t __p0) {
57012 uint32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
57013 uint32_t __ret;
57014 __ret = (uint32_t) __builtin_neon_vmaxv_u32((int8x8_t)__rev0);
57015 return __ret;
57016 }
57017 #endif
57018
57019 #ifdef __LITTLE_ENDIAN__
57020 __ai uint16_t vmaxv_u16(uint16x4_t __p0) {
57021 uint16_t __ret;
57022 __ret = (uint16_t) __builtin_neon_vmaxv_u16((int8x8_t)__p0);
57023 return __ret;
57024 }
57025 #else
57026 __ai uint16_t vmaxv_u16(uint16x4_t __p0) {
57027 uint16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
57028 uint16_t __ret;
57029 __ret = (uint16_t) __builtin_neon_vmaxv_u16((int8x8_t)__rev0);
57030 return __ret;
57031 }
57032 #endif
57033
57034 #ifdef __LITTLE_ENDIAN__
57035 __ai int8_t vmaxv_s8(int8x8_t __p0) {
57036 int8_t __ret;
57037 __ret = (int8_t) __builtin_neon_vmaxv_s8((int8x8_t)__p0);
57038 return __ret;
57039 }
57040 #else
57041 __ai int8_t vmaxv_s8(int8x8_t __p0) {
57042 int8x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
57043 int8_t __ret;
57044 __ret = (int8_t) __builtin_neon_vmaxv_s8((int8x8_t)__rev0);
57045 return __ret;
57046 }
57047 #endif
57048
57049 #ifdef __LITTLE_ENDIAN__
57050 __ai float32_t vmaxv_f32(float32x2_t __p0) {
57051 float32_t __ret;
57052 __ret = (float32_t) __builtin_neon_vmaxv_f32((int8x8_t)__p0);
57053 return __ret;
57054 }
57055 #else
57056 __ai float32_t vmaxv_f32(float32x2_t __p0) {
57057 float32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
57058 float32_t __ret;
57059 __ret = (float32_t) __builtin_neon_vmaxv_f32((int8x8_t)__rev0);
57060 return __ret;
57061 }
57062 #endif
57063
57064 #ifdef __LITTLE_ENDIAN__
57065 __ai int32_t vmaxv_s32(int32x2_t __p0) {
57066 int32_t __ret;
57067 __ret = (int32_t) __builtin_neon_vmaxv_s32((int8x8_t)__p0);
57068 return __ret;
57069 }
57070 #else
57071 __ai int32_t vmaxv_s32(int32x2_t __p0) {
57072 int32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
57073 int32_t __ret;
57074 __ret = (int32_t) __builtin_neon_vmaxv_s32((int8x8_t)__rev0);
57075 return __ret;
57076 }
57077 #endif
57078
57079 #ifdef __LITTLE_ENDIAN__
57080 __ai int16_t vmaxv_s16(int16x4_t __p0) {
57081 int16_t __ret;
57082 __ret = (int16_t) __builtin_neon_vmaxv_s16((int8x8_t)__p0);
57083 return __ret;
57084 }
57085 #else
57086 __ai int16_t vmaxv_s16(int16x4_t __p0) {
57087 int16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
57088 int16_t __ret;
57089 __ret = (int16_t) __builtin_neon_vmaxv_s16((int8x8_t)__rev0);
57090 return __ret;
57091 }
57092 #endif
57093
57094 #ifdef __LITTLE_ENDIAN__
57095 __ai float64x2_t vminq_f64(float64x2_t __p0, float64x2_t __p1) {
57096 float64x2_t __ret;
57097 __ret = (float64x2_t) __builtin_neon_vminq_v((int8x16_t)__p0, (int8x16_t)__p1, 42);
57098 return __ret;
57099 }
57100 #else
57101 __ai float64x2_t vminq_f64(float64x2_t __p0, float64x2_t __p1) {
57102 float64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
57103 float64x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
57104 float64x2_t __ret;
57105 __ret = (float64x2_t) __builtin_neon_vminq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 42);
57106 __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
57107 return __ret;
57108 }
57109 #endif
57110
57111 #ifdef __LITTLE_ENDIAN__
57112 __ai float64x1_t vmin_f64(float64x1_t __p0, float64x1_t __p1) {
57113 float64x1_t __ret;
57114 __ret = (float64x1_t) __builtin_neon_vmin_v((int8x8_t)__p0, (int8x8_t)__p1, 10);
57115 return __ret;
57116 }
57117 #else
57118 __ai float64x1_t vmin_f64(float64x1_t __p0, float64x1_t __p1) {
57119 float64x1_t __ret;
57120 __ret = (float64x1_t) __builtin_neon_vmin_v((int8x8_t)__p0, (int8x8_t)__p1, 10);
57121 return __ret;
57122 }
57123 #endif
57124
57125 #ifdef __LITTLE_ENDIAN__
57126 __ai float64_t vminnmvq_f64(float64x2_t __p0) {
57127 float64_t __ret;
57128 __ret = (float64_t) __builtin_neon_vminnmvq_f64((int8x16_t)__p0);
57129 return __ret;
57130 }
57131 #else
57132 __ai float64_t vminnmvq_f64(float64x2_t __p0) {
57133 float64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
57134 float64_t __ret;
57135 __ret = (float64_t) __builtin_neon_vminnmvq_f64((int8x16_t)__rev0);
57136 return __ret;
57137 }
57138 #endif
57139
57140 #ifdef __LITTLE_ENDIAN__
57141 __ai float32_t vminnmvq_f32(float32x4_t __p0) {
57142 float32_t __ret;
57143 __ret = (float32_t) __builtin_neon_vminnmvq_f32((int8x16_t)__p0);
57144 return __ret;
57145 }
57146 #else
57147 __ai float32_t vminnmvq_f32(float32x4_t __p0) {
57148 float32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
57149 float32_t __ret;
57150 __ret = (float32_t) __builtin_neon_vminnmvq_f32((int8x16_t)__rev0);
57151 return __ret;
57152 }
57153 #endif
57154
57155 #ifdef __LITTLE_ENDIAN__
57156 __ai float32_t vminnmv_f32(float32x2_t __p0) {
57157 float32_t __ret;
57158 __ret = (float32_t) __builtin_neon_vminnmv_f32((int8x8_t)__p0);
57159 return __ret;
57160 }
57161 #else
57162 __ai float32_t vminnmv_f32(float32x2_t __p0) {
57163 float32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
57164 float32_t __ret;
57165 __ret = (float32_t) __builtin_neon_vminnmv_f32((int8x8_t)__rev0);
57166 return __ret;
57167 }
57168 #endif
57169
57170 #ifdef __LITTLE_ENDIAN__
57171 __ai uint8_t vminvq_u8(uint8x16_t __p0) {
57172 uint8_t __ret;
57173 __ret = (uint8_t) __builtin_neon_vminvq_u8((int8x16_t)__p0);
57174 return __ret;
57175 }
57176 #else
57177 __ai uint8_t vminvq_u8(uint8x16_t __p0) {
57178 uint8x16_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
57179 uint8_t __ret;
57180 __ret = (uint8_t) __builtin_neon_vminvq_u8((int8x16_t)__rev0);
57181 return __ret;
57182 }
57183 #endif
57184
57185 #ifdef __LITTLE_ENDIAN__
57186 __ai uint32_t vminvq_u32(uint32x4_t __p0) {
57187 uint32_t __ret;
57188 __ret = (uint32_t) __builtin_neon_vminvq_u32((int8x16_t)__p0);
57189 return __ret;
57190 }
57191 #else
57192 __ai uint32_t vminvq_u32(uint32x4_t __p0) {
57193 uint32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
57194 uint32_t __ret;
57195 __ret = (uint32_t) __builtin_neon_vminvq_u32((int8x16_t)__rev0);
57196 return __ret;
57197 }
57198 #endif
57199
57200 #ifdef __LITTLE_ENDIAN__
57201 __ai uint16_t vminvq_u16(uint16x8_t __p0) {
57202 uint16_t __ret;
57203 __ret = (uint16_t) __builtin_neon_vminvq_u16((int8x16_t)__p0);
57204 return __ret;
57205 }
57206 #else
57207 __ai uint16_t vminvq_u16(uint16x8_t __p0) {
57208 uint16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
57209 uint16_t __ret;
57210 __ret = (uint16_t) __builtin_neon_vminvq_u16((int8x16_t)__rev0);
57211 return __ret;
57212 }
57213 #endif
57214
57215 #ifdef __LITTLE_ENDIAN__
57216 __ai int8_t vminvq_s8(int8x16_t __p0) {
57217 int8_t __ret;
57218 __ret = (int8_t) __builtin_neon_vminvq_s8((int8x16_t)__p0);
57219 return __ret;
57220 }
57221 #else
57222 __ai int8_t vminvq_s8(int8x16_t __p0) {
57223 int8x16_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
57224 int8_t __ret;
57225 __ret = (int8_t) __builtin_neon_vminvq_s8((int8x16_t)__rev0);
57226 return __ret;
57227 }
57228 #endif
57229
57230 #ifdef __LITTLE_ENDIAN__
57231 __ai float64_t vminvq_f64(float64x2_t __p0) {
57232 float64_t __ret;
57233 __ret = (float64_t) __builtin_neon_vminvq_f64((int8x16_t)__p0);
57234 return __ret;
57235 }
57236 #else
57237 __ai float64_t vminvq_f64(float64x2_t __p0) {
57238 float64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
57239 float64_t __ret;
57240 __ret = (float64_t) __builtin_neon_vminvq_f64((int8x16_t)__rev0);
57241 return __ret;
57242 }
57243 #endif
57244
57245 #ifdef __LITTLE_ENDIAN__
57246 __ai float32_t vminvq_f32(float32x4_t __p0) {
57247 float32_t __ret;
57248 __ret = (float32_t) __builtin_neon_vminvq_f32((int8x16_t)__p0);
57249 return __ret;
57250 }
57251 #else
57252 __ai float32_t vminvq_f32(float32x4_t __p0) {
57253 float32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
57254 float32_t __ret;
57255 __ret = (float32_t) __builtin_neon_vminvq_f32((int8x16_t)__rev0);
57256 return __ret;
57257 }
57258 #endif
57259
57260 #ifdef __LITTLE_ENDIAN__
57261 __ai int32_t vminvq_s32(int32x4_t __p0) {
57262 int32_t __ret;
57263 __ret = (int32_t) __builtin_neon_vminvq_s32((int8x16_t)__p0);
57264 return __ret;
57265 }
57266 #else
57267 __ai int32_t vminvq_s32(int32x4_t __p0) {
57268 int32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
57269 int32_t __ret;
57270 __ret = (int32_t) __builtin_neon_vminvq_s32((int8x16_t)__rev0);
57271 return __ret;
57272 }
57273 #endif
57274
57275 #ifdef __LITTLE_ENDIAN__
57276 __ai int16_t vminvq_s16(int16x8_t __p0) {
57277 int16_t __ret;
57278 __ret = (int16_t) __builtin_neon_vminvq_s16((int8x16_t)__p0);
57279 return __ret;
57280 }
57281 #else
57282 __ai int16_t vminvq_s16(int16x8_t __p0) {
57283 int16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
57284 int16_t __ret;
57285 __ret = (int16_t) __builtin_neon_vminvq_s16((int8x16_t)__rev0);
57286 return __ret;
57287 }
57288 #endif
57289
57290 #ifdef __LITTLE_ENDIAN__
57291 __ai uint8_t vminv_u8(uint8x8_t __p0) {
57292 uint8_t __ret;
57293 __ret = (uint8_t) __builtin_neon_vminv_u8((int8x8_t)__p0);
57294 return __ret;
57295 }
57296 #else
57297 __ai uint8_t vminv_u8(uint8x8_t __p0) {
57298 uint8x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
57299 uint8_t __ret;
57300 __ret = (uint8_t) __builtin_neon_vminv_u8((int8x8_t)__rev0);
57301 return __ret;
57302 }
57303 #endif
57304
57305 #ifdef __LITTLE_ENDIAN__
57306 __ai uint32_t vminv_u32(uint32x2_t __p0) {
57307 uint32_t __ret;
57308 __ret = (uint32_t) __builtin_neon_vminv_u32((int8x8_t)__p0);
57309 return __ret;
57310 }
57311 #else
57312 __ai uint32_t vminv_u32(uint32x2_t __p0) {
57313 uint32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
57314 uint32_t __ret;
57315 __ret = (uint32_t) __builtin_neon_vminv_u32((int8x8_t)__rev0);
57316 return __ret;
57317 }
57318 #endif
57319
57320 #ifdef __LITTLE_ENDIAN__
57321 __ai uint16_t vminv_u16(uint16x4_t __p0) {
57322 uint16_t __ret;
57323 __ret = (uint16_t) __builtin_neon_vminv_u16((int8x8_t)__p0);
57324 return __ret;
57325 }
57326 #else
57327 __ai uint16_t vminv_u16(uint16x4_t __p0) {
57328 uint16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
57329 uint16_t __ret;
57330 __ret = (uint16_t) __builtin_neon_vminv_u16((int8x8_t)__rev0);
57331 return __ret;
57332 }
57333 #endif
57334
57335 #ifdef __LITTLE_ENDIAN__
57336 __ai int8_t vminv_s8(int8x8_t __p0) {
57337 int8_t __ret;
57338 __ret = (int8_t) __builtin_neon_vminv_s8((int8x8_t)__p0);
57339 return __ret;
57340 }
57341 #else
57342 __ai int8_t vminv_s8(int8x8_t __p0) {
57343 int8x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
57344 int8_t __ret;
57345 __ret = (int8_t) __builtin_neon_vminv_s8((int8x8_t)__rev0);
57346 return __ret;
57347 }
57348 #endif
57349
57350 #ifdef __LITTLE_ENDIAN__
57351 __ai float32_t vminv_f32(float32x2_t __p0) {
57352 float32_t __ret;
57353 __ret = (float32_t) __builtin_neon_vminv_f32((int8x8_t)__p0);
57354 return __ret;
57355 }
57356 #else
57357 __ai float32_t vminv_f32(float32x2_t __p0) {
57358 float32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
57359 float32_t __ret;
57360 __ret = (float32_t) __builtin_neon_vminv_f32((int8x8_t)__rev0);
57361 return __ret;
57362 }
57363 #endif
57364
57365 #ifdef __LITTLE_ENDIAN__
57366 __ai int32_t vminv_s32(int32x2_t __p0) {
57367 int32_t __ret;
57368 __ret = (int32_t) __builtin_neon_vminv_s32((int8x8_t)__p0);
57369 return __ret;
57370 }
57371 #else
57372 __ai int32_t vminv_s32(int32x2_t __p0) {
57373 int32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
57374 int32_t __ret;
57375 __ret = (int32_t) __builtin_neon_vminv_s32((int8x8_t)__rev0);
57376 return __ret;
57377 }
57378 #endif
57379
57380 #ifdef __LITTLE_ENDIAN__
57381 __ai int16_t vminv_s16(int16x4_t __p0) {
57382 int16_t __ret;
57383 __ret = (int16_t) __builtin_neon_vminv_s16((int8x8_t)__p0);
57384 return __ret;
57385 }
57386 #else
57387 __ai int16_t vminv_s16(int16x4_t __p0) {
57388 int16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
57389 int16_t __ret;
57390 __ret = (int16_t) __builtin_neon_vminv_s16((int8x8_t)__rev0);
57391 return __ret;
57392 }
57393 #endif
57394
57395 #ifdef __LITTLE_ENDIAN__
57396 __ai float64x2_t vmlaq_f64(float64x2_t __p0, float64x2_t __p1, float64x2_t __p2) {
57397 float64x2_t __ret;
57398 __ret = __p0 + __p1 * __p2;
57399 return __ret;
57400 }
57401 #else
57402 __ai float64x2_t vmlaq_f64(float64x2_t __p0, float64x2_t __p1, float64x2_t __p2) {
57403 float64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
57404 float64x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
57405 float64x2_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, 1, 0);
57406 float64x2_t __ret;
57407 __ret = __rev0 + __rev1 * __rev2;
57408 __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
57409 return __ret;
57410 }
57411 #endif
57412
57413 #ifdef __LITTLE_ENDIAN__
57414 __ai float64x1_t vmla_f64(float64x1_t __p0, float64x1_t __p1, float64x1_t __p2) {
57415 float64x1_t __ret;
57416 __ret = __p0 + __p1 * __p2;
57417 return __ret;
57418 }
57419 #else
57420 __ai float64x1_t vmla_f64(float64x1_t __p0, float64x1_t __p1, float64x1_t __p2) {
57421 float64x1_t __ret;
57422 __ret = __p0 + __p1 * __p2;
57423 return __ret;
57424 }
57425 #endif
57426
57427 #ifdef __LITTLE_ENDIAN__
57428 #define vmlaq_laneq_u32(__p0, __p1, __p2, __p3) __extension__ ({ \
57429 uint32x4_t __s0 = __p0; \
57430 uint32x4_t __s1 = __p1; \
57431 uint32x4_t __s2 = __p2; \
57432 uint32x4_t __ret; \
57433 __ret = __s0 + __s1 * __builtin_shufflevector(__s2, __s2, __p3, __p3, __p3, __p3); \
57434 __ret; \
57435 })
57436 #else
57437 #define vmlaq_laneq_u32(__p0, __p1, __p2, __p3) __extension__ ({ \
57438 uint32x4_t __s0 = __p0; \
57439 uint32x4_t __s1 = __p1; \
57440 uint32x4_t __s2 = __p2; \
57441 uint32x4_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \
57442 uint32x4_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 3, 2, 1, 0); \
57443 uint32x4_t __rev2; __rev2 = __builtin_shufflevector(__s2, __s2, 3, 2, 1, 0); \
57444 uint32x4_t __ret; \
57445 __ret = __rev0 + __rev1 * __builtin_shufflevector(__rev2, __rev2, __p3, __p3, __p3, __p3); \
57446 __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \
57447 __ret; \
57448 })
57449 #endif
57450
57451 #ifdef __LITTLE_ENDIAN__
57452 #define vmlaq_laneq_u16(__p0, __p1, __p2, __p3) __extension__ ({ \
57453 uint16x8_t __s0 = __p0; \
57454 uint16x8_t __s1 = __p1; \
57455 uint16x8_t __s2 = __p2; \
57456 uint16x8_t __ret; \
57457 __ret = __s0 + __s1 * __builtin_shufflevector(__s2, __s2, __p3, __p3, __p3, __p3, __p3, __p3, __p3, __p3); \
57458 __ret; \
57459 })
57460 #else
57461 #define vmlaq_laneq_u16(__p0, __p1, __p2, __p3) __extension__ ({ \
57462 uint16x8_t __s0 = __p0; \
57463 uint16x8_t __s1 = __p1; \
57464 uint16x8_t __s2 = __p2; \
57465 uint16x8_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 7, 6, 5, 4, 3, 2, 1, 0); \
57466 uint16x8_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 7, 6, 5, 4, 3, 2, 1, 0); \
57467 uint16x8_t __rev2; __rev2 = __builtin_shufflevector(__s2, __s2, 7, 6, 5, 4, 3, 2, 1, 0); \
57468 uint16x8_t __ret; \
57469 __ret = __rev0 + __rev1 * __builtin_shufflevector(__rev2, __rev2, __p3, __p3, __p3, __p3, __p3, __p3, __p3, __p3); \
57470 __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); \
57471 __ret; \
57472 })
57473 #endif
57474
57475 #ifdef __LITTLE_ENDIAN__
57476 #define vmlaq_laneq_f32(__p0, __p1, __p2, __p3) __extension__ ({ \
57477 float32x4_t __s0 = __p0; \
57478 float32x4_t __s1 = __p1; \
57479 float32x4_t __s2 = __p2; \
57480 float32x4_t __ret; \
57481 __ret = __s0 + __s1 * __builtin_shufflevector(__s2, __s2, __p3, __p3, __p3, __p3); \
57482 __ret; \
57483 })
57484 #else
57485 #define vmlaq_laneq_f32(__p0, __p1, __p2, __p3) __extension__ ({ \
57486 float32x4_t __s0 = __p0; \
57487 float32x4_t __s1 = __p1; \
57488 float32x4_t __s2 = __p2; \
57489 float32x4_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \
57490 float32x4_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 3, 2, 1, 0); \
57491 float32x4_t __rev2; __rev2 = __builtin_shufflevector(__s2, __s2, 3, 2, 1, 0); \
57492 float32x4_t __ret; \
57493 __ret = __rev0 + __rev1 * __builtin_shufflevector(__rev2, __rev2, __p3, __p3, __p3, __p3); \
57494 __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \
57495 __ret; \
57496 })
57497 #endif
57498
57499 #ifdef __LITTLE_ENDIAN__
57500 #define vmlaq_laneq_s32(__p0, __p1, __p2, __p3) __extension__ ({ \
57501 int32x4_t __s0 = __p0; \
57502 int32x4_t __s1 = __p1; \
57503 int32x4_t __s2 = __p2; \
57504 int32x4_t __ret; \
57505 __ret = __s0 + __s1 * __builtin_shufflevector(__s2, __s2, __p3, __p3, __p3, __p3); \
57506 __ret; \
57507 })
57508 #else
57509 #define vmlaq_laneq_s32(__p0, __p1, __p2, __p3) __extension__ ({ \
57510 int32x4_t __s0 = __p0; \
57511 int32x4_t __s1 = __p1; \
57512 int32x4_t __s2 = __p2; \
57513 int32x4_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \
57514 int32x4_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 3, 2, 1, 0); \
57515 int32x4_t __rev2; __rev2 = __builtin_shufflevector(__s2, __s2, 3, 2, 1, 0); \
57516 int32x4_t __ret; \
57517 __ret = __rev0 + __rev1 * __builtin_shufflevector(__rev2, __rev2, __p3, __p3, __p3, __p3); \
57518 __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \
57519 __ret; \
57520 })
57521 #endif
57522
57523 #ifdef __LITTLE_ENDIAN__
57524 #define vmlaq_laneq_s16(__p0, __p1, __p2, __p3) __extension__ ({ \
57525 int16x8_t __s0 = __p0; \
57526 int16x8_t __s1 = __p1; \
57527 int16x8_t __s2 = __p2; \
57528 int16x8_t __ret; \
57529 __ret = __s0 + __s1 * __builtin_shufflevector(__s2, __s2, __p3, __p3, __p3, __p3, __p3, __p3, __p3, __p3); \
57530 __ret; \
57531 })
57532 #else
57533 #define vmlaq_laneq_s16(__p0, __p1, __p2, __p3) __extension__ ({ \
57534 int16x8_t __s0 = __p0; \
57535 int16x8_t __s1 = __p1; \
57536 int16x8_t __s2 = __p2; \
57537 int16x8_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 7, 6, 5, 4, 3, 2, 1, 0); \
57538 int16x8_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 7, 6, 5, 4, 3, 2, 1, 0); \
57539 int16x8_t __rev2; __rev2 = __builtin_shufflevector(__s2, __s2, 7, 6, 5, 4, 3, 2, 1, 0); \
57540 int16x8_t __ret; \
57541 __ret = __rev0 + __rev1 * __builtin_shufflevector(__rev2, __rev2, __p3, __p3, __p3, __p3, __p3, __p3, __p3, __p3); \
57542 __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); \
57543 __ret; \
57544 })
57545 #endif
57546
57547 #ifdef __LITTLE_ENDIAN__
57548 #define vmla_laneq_u32(__p0, __p1, __p2, __p3) __extension__ ({ \
57549 uint32x2_t __s0 = __p0; \
57550 uint32x2_t __s1 = __p1; \
57551 uint32x4_t __s2 = __p2; \
57552 uint32x2_t __ret; \
57553 __ret = __s0 + __s1 * __builtin_shufflevector(__s2, __s2, __p3, __p3); \
57554 __ret; \
57555 })
57556 #else
57557 #define vmla_laneq_u32(__p0, __p1, __p2, __p3) __extension__ ({ \
57558 uint32x2_t __s0 = __p0; \
57559 uint32x2_t __s1 = __p1; \
57560 uint32x4_t __s2 = __p2; \
57561 uint32x2_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 1, 0); \
57562 uint32x2_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 1, 0); \
57563 uint32x4_t __rev2; __rev2 = __builtin_shufflevector(__s2, __s2, 3, 2, 1, 0); \
57564 uint32x2_t __ret; \
57565 __ret = __rev0 + __rev1 * __builtin_shufflevector(__rev2, __rev2, __p3, __p3); \
57566 __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \
57567 __ret; \
57568 })
57569 #endif
57570
57571 #ifdef __LITTLE_ENDIAN__
57572 #define vmla_laneq_u16(__p0, __p1, __p2, __p3) __extension__ ({ \
57573 uint16x4_t __s0 = __p0; \
57574 uint16x4_t __s1 = __p1; \
57575 uint16x8_t __s2 = __p2; \
57576 uint16x4_t __ret; \
57577 __ret = __s0 + __s1 * __builtin_shufflevector(__s2, __s2, __p3, __p3, __p3, __p3); \
57578 __ret; \
57579 })
57580 #else
57581 #define vmla_laneq_u16(__p0, __p1, __p2, __p3) __extension__ ({ \
57582 uint16x4_t __s0 = __p0; \
57583 uint16x4_t __s1 = __p1; \
57584 uint16x8_t __s2 = __p2; \
57585 uint16x4_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \
57586 uint16x4_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 3, 2, 1, 0); \
57587 uint16x8_t __rev2; __rev2 = __builtin_shufflevector(__s2, __s2, 7, 6, 5, 4, 3, 2, 1, 0); \
57588 uint16x4_t __ret; \
57589 __ret = __rev0 + __rev1 * __builtin_shufflevector(__rev2, __rev2, __p3, __p3, __p3, __p3); \
57590 __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \
57591 __ret; \
57592 })
57593 #endif
57594
57595 #ifdef __LITTLE_ENDIAN__
57596 #define vmla_laneq_f32(__p0, __p1, __p2, __p3) __extension__ ({ \
57597 float32x2_t __s0 = __p0; \
57598 float32x2_t __s1 = __p1; \
57599 float32x4_t __s2 = __p2; \
57600 float32x2_t __ret; \
57601 __ret = __s0 + __s1 * __builtin_shufflevector(__s2, __s2, __p3, __p3); \
57602 __ret; \
57603 })
57604 #else
57605 #define vmla_laneq_f32(__p0, __p1, __p2, __p3) __extension__ ({ \
57606 float32x2_t __s0 = __p0; \
57607 float32x2_t __s1 = __p1; \
57608 float32x4_t __s2 = __p2; \
57609 float32x2_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 1, 0); \
57610 float32x2_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 1, 0); \
57611 float32x4_t __rev2; __rev2 = __builtin_shufflevector(__s2, __s2, 3, 2, 1, 0); \
57612 float32x2_t __ret; \
57613 __ret = __rev0 + __rev1 * __builtin_shufflevector(__rev2, __rev2, __p3, __p3); \
57614 __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \
57615 __ret; \
57616 })
57617 #endif
57618
57619 #ifdef __LITTLE_ENDIAN__
57620 #define vmla_laneq_s32(__p0, __p1, __p2, __p3) __extension__ ({ \
57621 int32x2_t __s0 = __p0; \
57622 int32x2_t __s1 = __p1; \
57623 int32x4_t __s2 = __p2; \
57624 int32x2_t __ret; \
57625 __ret = __s0 + __s1 * __builtin_shufflevector(__s2, __s2, __p3, __p3); \
57626 __ret; \
57627 })
57628 #else
57629 #define vmla_laneq_s32(__p0, __p1, __p2, __p3) __extension__ ({ \
57630 int32x2_t __s0 = __p0; \
57631 int32x2_t __s1 = __p1; \
57632 int32x4_t __s2 = __p2; \
57633 int32x2_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 1, 0); \
57634 int32x2_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 1, 0); \
57635 int32x4_t __rev2; __rev2 = __builtin_shufflevector(__s2, __s2, 3, 2, 1, 0); \
57636 int32x2_t __ret; \
57637 __ret = __rev0 + __rev1 * __builtin_shufflevector(__rev2, __rev2, __p3, __p3); \
57638 __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \
57639 __ret; \
57640 })
57641 #endif
57642
57643 #ifdef __LITTLE_ENDIAN__
57644 #define vmla_laneq_s16(__p0, __p1, __p2, __p3) __extension__ ({ \
57645 int16x4_t __s0 = __p0; \
57646 int16x4_t __s1 = __p1; \
57647 int16x8_t __s2 = __p2; \
57648 int16x4_t __ret; \
57649 __ret = __s0 + __s1 * __builtin_shufflevector(__s2, __s2, __p3, __p3, __p3, __p3); \
57650 __ret; \
57651 })
57652 #else
57653 #define vmla_laneq_s16(__p0, __p1, __p2, __p3) __extension__ ({ \
57654 int16x4_t __s0 = __p0; \
57655 int16x4_t __s1 = __p1; \
57656 int16x8_t __s2 = __p2; \
57657 int16x4_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \
57658 int16x4_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 3, 2, 1, 0); \
57659 int16x8_t __rev2; __rev2 = __builtin_shufflevector(__s2, __s2, 7, 6, 5, 4, 3, 2, 1, 0); \
57660 int16x4_t __ret; \
57661 __ret = __rev0 + __rev1 * __builtin_shufflevector(__rev2, __rev2, __p3, __p3, __p3, __p3); \
57662 __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \
57663 __ret; \
57664 })
57665 #endif
57666
57667 #ifdef __LITTLE_ENDIAN__
57668 __ai float64x2_t vmlaq_n_f64(float64x2_t __p0, float64x2_t __p1, float64_t __p2) {
57669 float64x2_t __ret;
57670 __ret = __p0 + __p1 * (float64x2_t) {__p2, __p2};
57671 return __ret;
57672 }
57673 #else
57674 __ai float64x2_t vmlaq_n_f64(float64x2_t __p0, float64x2_t __p1, float64_t __p2) {
57675 float64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
57676 float64x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
57677 float64x2_t __ret;
57678 __ret = __rev0 + __rev1 * (float64x2_t) {__p2, __p2};
57679 __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
57680 return __ret;
57681 }
57682 #endif
57683
57684 #ifdef __LITTLE_ENDIAN__
57685 #define vmlal_high_lane_u32(__p0, __p1, __p2, __p3) __extension__ ({ \
57686 uint64x2_t __s0 = __p0; \
57687 uint32x4_t __s1 = __p1; \
57688 uint32x2_t __s2 = __p2; \
57689 uint64x2_t __ret; \
57690 __ret = __s0 + vmull_u32(vget_high_u32(__s1), __builtin_shufflevector(__s2, __s2, __p3, __p3)); \
57691 __ret; \
57692 })
57693 #else
57694 #define vmlal_high_lane_u32(__p0, __p1, __p2, __p3) __extension__ ({ \
57695 uint64x2_t __s0 = __p0; \
57696 uint32x4_t __s1 = __p1; \
57697 uint32x2_t __s2 = __p2; \
57698 uint64x2_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 1, 0); \
57699 uint32x4_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 3, 2, 1, 0); \
57700 uint32x2_t __rev2; __rev2 = __builtin_shufflevector(__s2, __s2, 1, 0); \
57701 uint64x2_t __ret; \
57702 __ret = __rev0 + __noswap_vmull_u32(__noswap_vget_high_u32(__rev1), __builtin_shufflevector(__rev2, __rev2, __p3, __p3)); \
57703 __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \
57704 __ret; \
57705 })
57706 #endif
57707
57708 #ifdef __LITTLE_ENDIAN__
57709 #define vmlal_high_lane_u16(__p0, __p1, __p2, __p3) __extension__ ({ \
57710 uint32x4_t __s0 = __p0; \
57711 uint16x8_t __s1 = __p1; \
57712 uint16x4_t __s2 = __p2; \
57713 uint32x4_t __ret; \
57714 __ret = __s0 + vmull_u16(vget_high_u16(__s1), __builtin_shufflevector(__s2, __s2, __p3, __p3, __p3, __p3)); \
57715 __ret; \
57716 })
57717 #else
57718 #define vmlal_high_lane_u16(__p0, __p1, __p2, __p3) __extension__ ({ \
57719 uint32x4_t __s0 = __p0; \
57720 uint16x8_t __s1 = __p1; \
57721 uint16x4_t __s2 = __p2; \
57722 uint32x4_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \
57723 uint16x8_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 7, 6, 5, 4, 3, 2, 1, 0); \
57724 uint16x4_t __rev2; __rev2 = __builtin_shufflevector(__s2, __s2, 3, 2, 1, 0); \
57725 uint32x4_t __ret; \
57726 __ret = __rev0 + __noswap_vmull_u16(__noswap_vget_high_u16(__rev1), __builtin_shufflevector(__rev2, __rev2, __p3, __p3, __p3, __p3)); \
57727 __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \
57728 __ret; \
57729 })
57730 #endif
57731
57732 #ifdef __LITTLE_ENDIAN__
57733 #define vmlal_high_lane_s32(__p0, __p1, __p2, __p3) __extension__ ({ \
57734 int64x2_t __s0 = __p0; \
57735 int32x4_t __s1 = __p1; \
57736 int32x2_t __s2 = __p2; \
57737 int64x2_t __ret; \
57738 __ret = __s0 + vmull_s32(vget_high_s32(__s1), __builtin_shufflevector(__s2, __s2, __p3, __p3)); \
57739 __ret; \
57740 })
57741 #else
57742 #define vmlal_high_lane_s32(__p0, __p1, __p2, __p3) __extension__ ({ \
57743 int64x2_t __s0 = __p0; \
57744 int32x4_t __s1 = __p1; \
57745 int32x2_t __s2 = __p2; \
57746 int64x2_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 1, 0); \
57747 int32x4_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 3, 2, 1, 0); \
57748 int32x2_t __rev2; __rev2 = __builtin_shufflevector(__s2, __s2, 1, 0); \
57749 int64x2_t __ret; \
57750 __ret = __rev0 + __noswap_vmull_s32(__noswap_vget_high_s32(__rev1), __builtin_shufflevector(__rev2, __rev2, __p3, __p3)); \
57751 __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \
57752 __ret; \
57753 })
57754 #endif
57755
57756 #ifdef __LITTLE_ENDIAN__
57757 #define vmlal_high_lane_s16(__p0, __p1, __p2, __p3) __extension__ ({ \
57758 int32x4_t __s0 = __p0; \
57759 int16x8_t __s1 = __p1; \
57760 int16x4_t __s2 = __p2; \
57761 int32x4_t __ret; \
57762 __ret = __s0 + vmull_s16(vget_high_s16(__s1), __builtin_shufflevector(__s2, __s2, __p3, __p3, __p3, __p3)); \
57763 __ret; \
57764 })
57765 #else
57766 #define vmlal_high_lane_s16(__p0, __p1, __p2, __p3) __extension__ ({ \
57767 int32x4_t __s0 = __p0; \
57768 int16x8_t __s1 = __p1; \
57769 int16x4_t __s2 = __p2; \
57770 int32x4_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \
57771 int16x8_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 7, 6, 5, 4, 3, 2, 1, 0); \
57772 int16x4_t __rev2; __rev2 = __builtin_shufflevector(__s2, __s2, 3, 2, 1, 0); \
57773 int32x4_t __ret; \
57774 __ret = __rev0 + __noswap_vmull_s16(__noswap_vget_high_s16(__rev1), __builtin_shufflevector(__rev2, __rev2, __p3, __p3, __p3, __p3)); \
57775 __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \
57776 __ret; \
57777 })
57778 #endif
57779
57780 #ifdef __LITTLE_ENDIAN__
57781 #define vmlal_high_laneq_u32(__p0, __p1, __p2, __p3) __extension__ ({ \
57782 uint64x2_t __s0 = __p0; \
57783 uint32x4_t __s1 = __p1; \
57784 uint32x4_t __s2 = __p2; \
57785 uint64x2_t __ret; \
57786 __ret = __s0 + vmull_u32(vget_high_u32(__s1), __builtin_shufflevector(__s2, __s2, __p3, __p3)); \
57787 __ret; \
57788 })
57789 #else
57790 #define vmlal_high_laneq_u32(__p0, __p1, __p2, __p3) __extension__ ({ \
57791 uint64x2_t __s0 = __p0; \
57792 uint32x4_t __s1 = __p1; \
57793 uint32x4_t __s2 = __p2; \
57794 uint64x2_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 1, 0); \
57795 uint32x4_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 3, 2, 1, 0); \
57796 uint32x4_t __rev2; __rev2 = __builtin_shufflevector(__s2, __s2, 3, 2, 1, 0); \
57797 uint64x2_t __ret; \
57798 __ret = __rev0 + __noswap_vmull_u32(__noswap_vget_high_u32(__rev1), __builtin_shufflevector(__rev2, __rev2, __p3, __p3)); \
57799 __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \
57800 __ret; \
57801 })
57802 #endif
57803
57804 #ifdef __LITTLE_ENDIAN__
57805 #define vmlal_high_laneq_u16(__p0, __p1, __p2, __p3) __extension__ ({ \
57806 uint32x4_t __s0 = __p0; \
57807 uint16x8_t __s1 = __p1; \
57808 uint16x8_t __s2 = __p2; \
57809 uint32x4_t __ret; \
57810 __ret = __s0 + vmull_u16(vget_high_u16(__s1), __builtin_shufflevector(__s2, __s2, __p3, __p3, __p3, __p3)); \
57811 __ret; \
57812 })
57813 #else
57814 #define vmlal_high_laneq_u16(__p0, __p1, __p2, __p3) __extension__ ({ \
57815 uint32x4_t __s0 = __p0; \
57816 uint16x8_t __s1 = __p1; \
57817 uint16x8_t __s2 = __p2; \
57818 uint32x4_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \
57819 uint16x8_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 7, 6, 5, 4, 3, 2, 1, 0); \
57820 uint16x8_t __rev2; __rev2 = __builtin_shufflevector(__s2, __s2, 7, 6, 5, 4, 3, 2, 1, 0); \
57821 uint32x4_t __ret; \
57822 __ret = __rev0 + __noswap_vmull_u16(__noswap_vget_high_u16(__rev1), __builtin_shufflevector(__rev2, __rev2, __p3, __p3, __p3, __p3)); \
57823 __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \
57824 __ret; \
57825 })
57826 #endif
57827
57828 #ifdef __LITTLE_ENDIAN__
57829 #define vmlal_high_laneq_s32(__p0, __p1, __p2, __p3) __extension__ ({ \
57830 int64x2_t __s0 = __p0; \
57831 int32x4_t __s1 = __p1; \
57832 int32x4_t __s2 = __p2; \
57833 int64x2_t __ret; \
57834 __ret = __s0 + vmull_s32(vget_high_s32(__s1), __builtin_shufflevector(__s2, __s2, __p3, __p3)); \
57835 __ret; \
57836 })
57837 #else
57838 #define vmlal_high_laneq_s32(__p0, __p1, __p2, __p3) __extension__ ({ \
57839 int64x2_t __s0 = __p0; \
57840 int32x4_t __s1 = __p1; \
57841 int32x4_t __s2 = __p2; \
57842 int64x2_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 1, 0); \
57843 int32x4_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 3, 2, 1, 0); \
57844 int32x4_t __rev2; __rev2 = __builtin_shufflevector(__s2, __s2, 3, 2, 1, 0); \
57845 int64x2_t __ret; \
57846 __ret = __rev0 + __noswap_vmull_s32(__noswap_vget_high_s32(__rev1), __builtin_shufflevector(__rev2, __rev2, __p3, __p3)); \
57847 __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \
57848 __ret; \
57849 })
57850 #endif
57851
57852 #ifdef __LITTLE_ENDIAN__
57853 #define vmlal_high_laneq_s16(__p0, __p1, __p2, __p3) __extension__ ({ \
57854 int32x4_t __s0 = __p0; \
57855 int16x8_t __s1 = __p1; \
57856 int16x8_t __s2 = __p2; \
57857 int32x4_t __ret; \
57858 __ret = __s0 + vmull_s16(vget_high_s16(__s1), __builtin_shufflevector(__s2, __s2, __p3, __p3, __p3, __p3)); \
57859 __ret; \
57860 })
57861 #else
57862 #define vmlal_high_laneq_s16(__p0, __p1, __p2, __p3) __extension__ ({ \
57863 int32x4_t __s0 = __p0; \
57864 int16x8_t __s1 = __p1; \
57865 int16x8_t __s2 = __p2; \
57866 int32x4_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \
57867 int16x8_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 7, 6, 5, 4, 3, 2, 1, 0); \
57868 int16x8_t __rev2; __rev2 = __builtin_shufflevector(__s2, __s2, 7, 6, 5, 4, 3, 2, 1, 0); \
57869 int32x4_t __ret; \
57870 __ret = __rev0 + __noswap_vmull_s16(__noswap_vget_high_s16(__rev1), __builtin_shufflevector(__rev2, __rev2, __p3, __p3, __p3, __p3)); \
57871 __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \
57872 __ret; \
57873 })
57874 #endif
57875
57876 #ifdef __LITTLE_ENDIAN__
57877 #define vmlal_laneq_u32(__p0, __p1, __p2, __p3) __extension__ ({ \
57878 uint64x2_t __s0 = __p0; \
57879 uint32x2_t __s1 = __p1; \
57880 uint32x4_t __s2 = __p2; \
57881 uint64x2_t __ret; \
57882 __ret = __s0 + vmull_u32(__s1, __builtin_shufflevector(__s2, __s2, __p3, __p3)); \
57883 __ret; \
57884 })
57885 #else
57886 #define vmlal_laneq_u32(__p0, __p1, __p2, __p3) __extension__ ({ \
57887 uint64x2_t __s0 = __p0; \
57888 uint32x2_t __s1 = __p1; \
57889 uint32x4_t __s2 = __p2; \
57890 uint64x2_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 1, 0); \
57891 uint32x2_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 1, 0); \
57892 uint32x4_t __rev2; __rev2 = __builtin_shufflevector(__s2, __s2, 3, 2, 1, 0); \
57893 uint64x2_t __ret; \
57894 __ret = __rev0 + __noswap_vmull_u32(__rev1, __builtin_shufflevector(__rev2, __rev2, __p3, __p3)); \
57895 __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \
57896 __ret; \
57897 })
57898 #endif
57899
57900 #ifdef __LITTLE_ENDIAN__
57901 #define vmlal_laneq_u16(__p0, __p1, __p2, __p3) __extension__ ({ \
57902 uint32x4_t __s0 = __p0; \
57903 uint16x4_t __s1 = __p1; \
57904 uint16x8_t __s2 = __p2; \
57905 uint32x4_t __ret; \
57906 __ret = __s0 + vmull_u16(__s1, __builtin_shufflevector(__s2, __s2, __p3, __p3, __p3, __p3)); \
57907 __ret; \
57908 })
57909 #else
57910 #define vmlal_laneq_u16(__p0, __p1, __p2, __p3) __extension__ ({ \
57911 uint32x4_t __s0 = __p0; \
57912 uint16x4_t __s1 = __p1; \
57913 uint16x8_t __s2 = __p2; \
57914 uint32x4_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \
57915 uint16x4_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 3, 2, 1, 0); \
57916 uint16x8_t __rev2; __rev2 = __builtin_shufflevector(__s2, __s2, 7, 6, 5, 4, 3, 2, 1, 0); \
57917 uint32x4_t __ret; \
57918 __ret = __rev0 + __noswap_vmull_u16(__rev1, __builtin_shufflevector(__rev2, __rev2, __p3, __p3, __p3, __p3)); \
57919 __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \
57920 __ret; \
57921 })
57922 #endif
57923
57924 #ifdef __LITTLE_ENDIAN__
57925 #define vmlal_laneq_s32(__p0, __p1, __p2, __p3) __extension__ ({ \
57926 int64x2_t __s0 = __p0; \
57927 int32x2_t __s1 = __p1; \
57928 int32x4_t __s2 = __p2; \
57929 int64x2_t __ret; \
57930 __ret = __s0 + vmull_s32(__s1, __builtin_shufflevector(__s2, __s2, __p3, __p3)); \
57931 __ret; \
57932 })
57933 #else
57934 #define vmlal_laneq_s32(__p0, __p1, __p2, __p3) __extension__ ({ \
57935 int64x2_t __s0 = __p0; \
57936 int32x2_t __s1 = __p1; \
57937 int32x4_t __s2 = __p2; \
57938 int64x2_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 1, 0); \
57939 int32x2_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 1, 0); \
57940 int32x4_t __rev2; __rev2 = __builtin_shufflevector(__s2, __s2, 3, 2, 1, 0); \
57941 int64x2_t __ret; \
57942 __ret = __rev0 + __noswap_vmull_s32(__rev1, __builtin_shufflevector(__rev2, __rev2, __p3, __p3)); \
57943 __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \
57944 __ret; \
57945 })
57946 #endif
57947
57948 #ifdef __LITTLE_ENDIAN__
57949 #define vmlal_laneq_s16(__p0, __p1, __p2, __p3) __extension__ ({ \
57950 int32x4_t __s0 = __p0; \
57951 int16x4_t __s1 = __p1; \
57952 int16x8_t __s2 = __p2; \
57953 int32x4_t __ret; \
57954 __ret = __s0 + vmull_s16(__s1, __builtin_shufflevector(__s2, __s2, __p3, __p3, __p3, __p3)); \
57955 __ret; \
57956 })
57957 #else
57958 #define vmlal_laneq_s16(__p0, __p1, __p2, __p3) __extension__ ({ \
57959 int32x4_t __s0 = __p0; \
57960 int16x4_t __s1 = __p1; \
57961 int16x8_t __s2 = __p2; \
57962 int32x4_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \
57963 int16x4_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 3, 2, 1, 0); \
57964 int16x8_t __rev2; __rev2 = __builtin_shufflevector(__s2, __s2, 7, 6, 5, 4, 3, 2, 1, 0); \
57965 int32x4_t __ret; \
57966 __ret = __rev0 + __noswap_vmull_s16(__rev1, __builtin_shufflevector(__rev2, __rev2, __p3, __p3, __p3, __p3)); \
57967 __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \
57968 __ret; \
57969 })
57970 #endif
57971
57972 #ifdef __LITTLE_ENDIAN__
57973 __ai float64x2_t vmlsq_f64(float64x2_t __p0, float64x2_t __p1, float64x2_t __p2) {
57974 float64x2_t __ret;
57975 __ret = __p0 - __p1 * __p2;
57976 return __ret;
57977 }
57978 #else
57979 __ai float64x2_t vmlsq_f64(float64x2_t __p0, float64x2_t __p1, float64x2_t __p2) {
57980 float64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
57981 float64x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
57982 float64x2_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, 1, 0);
57983 float64x2_t __ret;
57984 __ret = __rev0 - __rev1 * __rev2;
57985 __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
57986 return __ret;
57987 }
57988 #endif
57989
57990 #ifdef __LITTLE_ENDIAN__
57991 __ai float64x1_t vmls_f64(float64x1_t __p0, float64x1_t __p1, float64x1_t __p2) {
57992 float64x1_t __ret;
57993 __ret = __p0 - __p1 * __p2;
57994 return __ret;
57995 }
57996 #else
57997 __ai float64x1_t vmls_f64(float64x1_t __p0, float64x1_t __p1, float64x1_t __p2) {
57998 float64x1_t __ret;
57999 __ret = __p0 - __p1 * __p2;
58000 return __ret;
58001 }
58002 #endif
58003
58004 #ifdef __LITTLE_ENDIAN__
58005 #define vmlsq_laneq_u32(__p0, __p1, __p2, __p3) __extension__ ({ \
58006 uint32x4_t __s0 = __p0; \
58007 uint32x4_t __s1 = __p1; \
58008 uint32x4_t __s2 = __p2; \
58009 uint32x4_t __ret; \
58010 __ret = __s0 - __s1 * __builtin_shufflevector(__s2, __s2, __p3, __p3, __p3, __p3); \
58011 __ret; \
58012 })
58013 #else
58014 #define vmlsq_laneq_u32(__p0, __p1, __p2, __p3) __extension__ ({ \
58015 uint32x4_t __s0 = __p0; \
58016 uint32x4_t __s1 = __p1; \
58017 uint32x4_t __s2 = __p2; \
58018 uint32x4_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \
58019 uint32x4_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 3, 2, 1, 0); \
58020 uint32x4_t __rev2; __rev2 = __builtin_shufflevector(__s2, __s2, 3, 2, 1, 0); \
58021 uint32x4_t __ret; \
58022 __ret = __rev0 - __rev1 * __builtin_shufflevector(__rev2, __rev2, __p3, __p3, __p3, __p3); \
58023 __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \
58024 __ret; \
58025 })
58026 #endif
58027
58028 #ifdef __LITTLE_ENDIAN__
58029 #define vmlsq_laneq_u16(__p0, __p1, __p2, __p3) __extension__ ({ \
58030 uint16x8_t __s0 = __p0; \
58031 uint16x8_t __s1 = __p1; \
58032 uint16x8_t __s2 = __p2; \
58033 uint16x8_t __ret; \
58034 __ret = __s0 - __s1 * __builtin_shufflevector(__s2, __s2, __p3, __p3, __p3, __p3, __p3, __p3, __p3, __p3); \
58035 __ret; \
58036 })
58037 #else
58038 #define vmlsq_laneq_u16(__p0, __p1, __p2, __p3) __extension__ ({ \
58039 uint16x8_t __s0 = __p0; \
58040 uint16x8_t __s1 = __p1; \
58041 uint16x8_t __s2 = __p2; \
58042 uint16x8_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 7, 6, 5, 4, 3, 2, 1, 0); \
58043 uint16x8_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 7, 6, 5, 4, 3, 2, 1, 0); \
58044 uint16x8_t __rev2; __rev2 = __builtin_shufflevector(__s2, __s2, 7, 6, 5, 4, 3, 2, 1, 0); \
58045 uint16x8_t __ret; \
58046 __ret = __rev0 - __rev1 * __builtin_shufflevector(__rev2, __rev2, __p3, __p3, __p3, __p3, __p3, __p3, __p3, __p3); \
58047 __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); \
58048 __ret; \
58049 })
58050 #endif
58051
58052 #ifdef __LITTLE_ENDIAN__
58053 #define vmlsq_laneq_f32(__p0, __p1, __p2, __p3) __extension__ ({ \
58054 float32x4_t __s0 = __p0; \
58055 float32x4_t __s1 = __p1; \
58056 float32x4_t __s2 = __p2; \
58057 float32x4_t __ret; \
58058 __ret = __s0 - __s1 * __builtin_shufflevector(__s2, __s2, __p3, __p3, __p3, __p3); \
58059 __ret; \
58060 })
58061 #else
58062 #define vmlsq_laneq_f32(__p0, __p1, __p2, __p3) __extension__ ({ \
58063 float32x4_t __s0 = __p0; \
58064 float32x4_t __s1 = __p1; \
58065 float32x4_t __s2 = __p2; \
58066 float32x4_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \
58067 float32x4_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 3, 2, 1, 0); \
58068 float32x4_t __rev2; __rev2 = __builtin_shufflevector(__s2, __s2, 3, 2, 1, 0); \
58069 float32x4_t __ret; \
58070 __ret = __rev0 - __rev1 * __builtin_shufflevector(__rev2, __rev2, __p3, __p3, __p3, __p3); \
58071 __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \
58072 __ret; \
58073 })
58074 #endif
58075
58076 #ifdef __LITTLE_ENDIAN__
58077 #define vmlsq_laneq_s32(__p0, __p1, __p2, __p3) __extension__ ({ \
58078 int32x4_t __s0 = __p0; \
58079 int32x4_t __s1 = __p1; \
58080 int32x4_t __s2 = __p2; \
58081 int32x4_t __ret; \
58082 __ret = __s0 - __s1 * __builtin_shufflevector(__s2, __s2, __p3, __p3, __p3, __p3); \
58083 __ret; \
58084 })
58085 #else
58086 #define vmlsq_laneq_s32(__p0, __p1, __p2, __p3) __extension__ ({ \
58087 int32x4_t __s0 = __p0; \
58088 int32x4_t __s1 = __p1; \
58089 int32x4_t __s2 = __p2; \
58090 int32x4_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \
58091 int32x4_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 3, 2, 1, 0); \
58092 int32x4_t __rev2; __rev2 = __builtin_shufflevector(__s2, __s2, 3, 2, 1, 0); \
58093 int32x4_t __ret; \
58094 __ret = __rev0 - __rev1 * __builtin_shufflevector(__rev2, __rev2, __p3, __p3, __p3, __p3); \
58095 __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \
58096 __ret; \
58097 })
58098 #endif
58099
58100 #ifdef __LITTLE_ENDIAN__
58101 #define vmlsq_laneq_s16(__p0, __p1, __p2, __p3) __extension__ ({ \
58102 int16x8_t __s0 = __p0; \
58103 int16x8_t __s1 = __p1; \
58104 int16x8_t __s2 = __p2; \
58105 int16x8_t __ret; \
58106 __ret = __s0 - __s1 * __builtin_shufflevector(__s2, __s2, __p3, __p3, __p3, __p3, __p3, __p3, __p3, __p3); \
58107 __ret; \
58108 })
58109 #else
58110 #define vmlsq_laneq_s16(__p0, __p1, __p2, __p3) __extension__ ({ \
58111 int16x8_t __s0 = __p0; \
58112 int16x8_t __s1 = __p1; \
58113 int16x8_t __s2 = __p2; \
58114 int16x8_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 7, 6, 5, 4, 3, 2, 1, 0); \
58115 int16x8_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 7, 6, 5, 4, 3, 2, 1, 0); \
58116 int16x8_t __rev2; __rev2 = __builtin_shufflevector(__s2, __s2, 7, 6, 5, 4, 3, 2, 1, 0); \
58117 int16x8_t __ret; \
58118 __ret = __rev0 - __rev1 * __builtin_shufflevector(__rev2, __rev2, __p3, __p3, __p3, __p3, __p3, __p3, __p3, __p3); \
58119 __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); \
58120 __ret; \
58121 })
58122 #endif
58123
58124 #ifdef __LITTLE_ENDIAN__
58125 #define vmls_laneq_u32(__p0, __p1, __p2, __p3) __extension__ ({ \
58126 uint32x2_t __s0 = __p0; \
58127 uint32x2_t __s1 = __p1; \
58128 uint32x4_t __s2 = __p2; \
58129 uint32x2_t __ret; \
58130 __ret = __s0 - __s1 * __builtin_shufflevector(__s2, __s2, __p3, __p3); \
58131 __ret; \
58132 })
58133 #else
58134 #define vmls_laneq_u32(__p0, __p1, __p2, __p3) __extension__ ({ \
58135 uint32x2_t __s0 = __p0; \
58136 uint32x2_t __s1 = __p1; \
58137 uint32x4_t __s2 = __p2; \
58138 uint32x2_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 1, 0); \
58139 uint32x2_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 1, 0); \
58140 uint32x4_t __rev2; __rev2 = __builtin_shufflevector(__s2, __s2, 3, 2, 1, 0); \
58141 uint32x2_t __ret; \
58142 __ret = __rev0 - __rev1 * __builtin_shufflevector(__rev2, __rev2, __p3, __p3); \
58143 __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \
58144 __ret; \
58145 })
58146 #endif
58147
58148 #ifdef __LITTLE_ENDIAN__
58149 #define vmls_laneq_u16(__p0, __p1, __p2, __p3) __extension__ ({ \
58150 uint16x4_t __s0 = __p0; \
58151 uint16x4_t __s1 = __p1; \
58152 uint16x8_t __s2 = __p2; \
58153 uint16x4_t __ret; \
58154 __ret = __s0 - __s1 * __builtin_shufflevector(__s2, __s2, __p3, __p3, __p3, __p3); \
58155 __ret; \
58156 })
58157 #else
58158 #define vmls_laneq_u16(__p0, __p1, __p2, __p3) __extension__ ({ \
58159 uint16x4_t __s0 = __p0; \
58160 uint16x4_t __s1 = __p1; \
58161 uint16x8_t __s2 = __p2; \
58162 uint16x4_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \
58163 uint16x4_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 3, 2, 1, 0); \
58164 uint16x8_t __rev2; __rev2 = __builtin_shufflevector(__s2, __s2, 7, 6, 5, 4, 3, 2, 1, 0); \
58165 uint16x4_t __ret; \
58166 __ret = __rev0 - __rev1 * __builtin_shufflevector(__rev2, __rev2, __p3, __p3, __p3, __p3); \
58167 __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \
58168 __ret; \
58169 })
58170 #endif
58171
58172 #ifdef __LITTLE_ENDIAN__
58173 #define vmls_laneq_f32(__p0, __p1, __p2, __p3) __extension__ ({ \
58174 float32x2_t __s0 = __p0; \
58175 float32x2_t __s1 = __p1; \
58176 float32x4_t __s2 = __p2; \
58177 float32x2_t __ret; \
58178 __ret = __s0 - __s1 * __builtin_shufflevector(__s2, __s2, __p3, __p3); \
58179 __ret; \
58180 })
58181 #else
58182 #define vmls_laneq_f32(__p0, __p1, __p2, __p3) __extension__ ({ \
58183 float32x2_t __s0 = __p0; \
58184 float32x2_t __s1 = __p1; \
58185 float32x4_t __s2 = __p2; \
58186 float32x2_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 1, 0); \
58187 float32x2_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 1, 0); \
58188 float32x4_t __rev2; __rev2 = __builtin_shufflevector(__s2, __s2, 3, 2, 1, 0); \
58189 float32x2_t __ret; \
58190 __ret = __rev0 - __rev1 * __builtin_shufflevector(__rev2, __rev2, __p3, __p3); \
58191 __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \
58192 __ret; \
58193 })
58194 #endif
58195
58196 #ifdef __LITTLE_ENDIAN__
58197 #define vmls_laneq_s32(__p0, __p1, __p2, __p3) __extension__ ({ \
58198 int32x2_t __s0 = __p0; \
58199 int32x2_t __s1 = __p1; \
58200 int32x4_t __s2 = __p2; \
58201 int32x2_t __ret; \
58202 __ret = __s0 - __s1 * __builtin_shufflevector(__s2, __s2, __p3, __p3); \
58203 __ret; \
58204 })
58205 #else
58206 #define vmls_laneq_s32(__p0, __p1, __p2, __p3) __extension__ ({ \
58207 int32x2_t __s0 = __p0; \
58208 int32x2_t __s1 = __p1; \
58209 int32x4_t __s2 = __p2; \
58210 int32x2_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 1, 0); \
58211 int32x2_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 1, 0); \
58212 int32x4_t __rev2; __rev2 = __builtin_shufflevector(__s2, __s2, 3, 2, 1, 0); \
58213 int32x2_t __ret; \
58214 __ret = __rev0 - __rev1 * __builtin_shufflevector(__rev2, __rev2, __p3, __p3); \
58215 __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \
58216 __ret; \
58217 })
58218 #endif
58219
58220 #ifdef __LITTLE_ENDIAN__
58221 #define vmls_laneq_s16(__p0, __p1, __p2, __p3) __extension__ ({ \
58222 int16x4_t __s0 = __p0; \
58223 int16x4_t __s1 = __p1; \
58224 int16x8_t __s2 = __p2; \
58225 int16x4_t __ret; \
58226 __ret = __s0 - __s1 * __builtin_shufflevector(__s2, __s2, __p3, __p3, __p3, __p3); \
58227 __ret; \
58228 })
58229 #else
58230 #define vmls_laneq_s16(__p0, __p1, __p2, __p3) __extension__ ({ \
58231 int16x4_t __s0 = __p0; \
58232 int16x4_t __s1 = __p1; \
58233 int16x8_t __s2 = __p2; \
58234 int16x4_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \
58235 int16x4_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 3, 2, 1, 0); \
58236 int16x8_t __rev2; __rev2 = __builtin_shufflevector(__s2, __s2, 7, 6, 5, 4, 3, 2, 1, 0); \
58237 int16x4_t __ret; \
58238 __ret = __rev0 - __rev1 * __builtin_shufflevector(__rev2, __rev2, __p3, __p3, __p3, __p3); \
58239 __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \
58240 __ret; \
58241 })
58242 #endif
58243
58244 #ifdef __LITTLE_ENDIAN__
58245 __ai float64x2_t vmlsq_n_f64(float64x2_t __p0, float64x2_t __p1, float64_t __p2) {
58246 float64x2_t __ret;
58247 __ret = __p0 - __p1 * (float64x2_t) {__p2, __p2};
58248 return __ret;
58249 }
58250 #else
58251 __ai float64x2_t vmlsq_n_f64(float64x2_t __p0, float64x2_t __p1, float64_t __p2) {
58252 float64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
58253 float64x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
58254 float64x2_t __ret;
58255 __ret = __rev0 - __rev1 * (float64x2_t) {__p2, __p2};
58256 __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
58257 return __ret;
58258 }
58259 #endif
58260
58261 #ifdef __LITTLE_ENDIAN__
58262 #define vmlsl_high_lane_u32(__p0, __p1, __p2, __p3) __extension__ ({ \
58263 uint64x2_t __s0 = __p0; \
58264 uint32x4_t __s1 = __p1; \
58265 uint32x2_t __s2 = __p2; \
58266 uint64x2_t __ret; \
58267 __ret = __s0 - vmull_u32(vget_high_u32(__s1), __builtin_shufflevector(__s2, __s2, __p3, __p3)); \
58268 __ret; \
58269 })
58270 #else
58271 #define vmlsl_high_lane_u32(__p0, __p1, __p2, __p3) __extension__ ({ \
58272 uint64x2_t __s0 = __p0; \
58273 uint32x4_t __s1 = __p1; \
58274 uint32x2_t __s2 = __p2; \
58275 uint64x2_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 1, 0); \
58276 uint32x4_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 3, 2, 1, 0); \
58277 uint32x2_t __rev2; __rev2 = __builtin_shufflevector(__s2, __s2, 1, 0); \
58278 uint64x2_t __ret; \
58279 __ret = __rev0 - __noswap_vmull_u32(__noswap_vget_high_u32(__rev1), __builtin_shufflevector(__rev2, __rev2, __p3, __p3)); \
58280 __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \
58281 __ret; \
58282 })
58283 #endif
58284
58285 #ifdef __LITTLE_ENDIAN__
58286 #define vmlsl_high_lane_u16(__p0, __p1, __p2, __p3) __extension__ ({ \
58287 uint32x4_t __s0 = __p0; \
58288 uint16x8_t __s1 = __p1; \
58289 uint16x4_t __s2 = __p2; \
58290 uint32x4_t __ret; \
58291 __ret = __s0 - vmull_u16(vget_high_u16(__s1), __builtin_shufflevector(__s2, __s2, __p3, __p3, __p3, __p3)); \
58292 __ret; \
58293 })
58294 #else
58295 #define vmlsl_high_lane_u16(__p0, __p1, __p2, __p3) __extension__ ({ \
58296 uint32x4_t __s0 = __p0; \
58297 uint16x8_t __s1 = __p1; \
58298 uint16x4_t __s2 = __p2; \
58299 uint32x4_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \
58300 uint16x8_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 7, 6, 5, 4, 3, 2, 1, 0); \
58301 uint16x4_t __rev2; __rev2 = __builtin_shufflevector(__s2, __s2, 3, 2, 1, 0); \
58302 uint32x4_t __ret; \
58303 __ret = __rev0 - __noswap_vmull_u16(__noswap_vget_high_u16(__rev1), __builtin_shufflevector(__rev2, __rev2, __p3, __p3, __p3, __p3)); \
58304 __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \
58305 __ret; \
58306 })
58307 #endif
58308
58309 #ifdef __LITTLE_ENDIAN__
58310 #define vmlsl_high_lane_s32(__p0, __p1, __p2, __p3) __extension__ ({ \
58311 int64x2_t __s0 = __p0; \
58312 int32x4_t __s1 = __p1; \
58313 int32x2_t __s2 = __p2; \
58314 int64x2_t __ret; \
58315 __ret = __s0 - vmull_s32(vget_high_s32(__s1), __builtin_shufflevector(__s2, __s2, __p3, __p3)); \
58316 __ret; \
58317 })
58318 #else
58319 #define vmlsl_high_lane_s32(__p0, __p1, __p2, __p3) __extension__ ({ \
58320 int64x2_t __s0 = __p0; \
58321 int32x4_t __s1 = __p1; \
58322 int32x2_t __s2 = __p2; \
58323 int64x2_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 1, 0); \
58324 int32x4_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 3, 2, 1, 0); \
58325 int32x2_t __rev2; __rev2 = __builtin_shufflevector(__s2, __s2, 1, 0); \
58326 int64x2_t __ret; \
58327 __ret = __rev0 - __noswap_vmull_s32(__noswap_vget_high_s32(__rev1), __builtin_shufflevector(__rev2, __rev2, __p3, __p3)); \
58328 __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \
58329 __ret; \
58330 })
58331 #endif
58332
58333 #ifdef __LITTLE_ENDIAN__
58334 #define vmlsl_high_lane_s16(__p0, __p1, __p2, __p3) __extension__ ({ \
58335 int32x4_t __s0 = __p0; \
58336 int16x8_t __s1 = __p1; \
58337 int16x4_t __s2 = __p2; \
58338 int32x4_t __ret; \
58339 __ret = __s0 - vmull_s16(vget_high_s16(__s1), __builtin_shufflevector(__s2, __s2, __p3, __p3, __p3, __p3)); \
58340 __ret; \
58341 })
58342 #else
58343 #define vmlsl_high_lane_s16(__p0, __p1, __p2, __p3) __extension__ ({ \
58344 int32x4_t __s0 = __p0; \
58345 int16x8_t __s1 = __p1; \
58346 int16x4_t __s2 = __p2; \
58347 int32x4_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \
58348 int16x8_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 7, 6, 5, 4, 3, 2, 1, 0); \
58349 int16x4_t __rev2; __rev2 = __builtin_shufflevector(__s2, __s2, 3, 2, 1, 0); \
58350 int32x4_t __ret; \
58351 __ret = __rev0 - __noswap_vmull_s16(__noswap_vget_high_s16(__rev1), __builtin_shufflevector(__rev2, __rev2, __p3, __p3, __p3, __p3)); \
58352 __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \
58353 __ret; \
58354 })
58355 #endif
58356
58357 #ifdef __LITTLE_ENDIAN__
58358 #define vmlsl_high_laneq_u32(__p0, __p1, __p2, __p3) __extension__ ({ \
58359 uint64x2_t __s0 = __p0; \
58360 uint32x4_t __s1 = __p1; \
58361 uint32x4_t __s2 = __p2; \
58362 uint64x2_t __ret; \
58363 __ret = __s0 - vmull_u32(vget_high_u32(__s1), __builtin_shufflevector(__s2, __s2, __p3, __p3)); \
58364 __ret; \
58365 })
58366 #else
58367 #define vmlsl_high_laneq_u32(__p0, __p1, __p2, __p3) __extension__ ({ \
58368 uint64x2_t __s0 = __p0; \
58369 uint32x4_t __s1 = __p1; \
58370 uint32x4_t __s2 = __p2; \
58371 uint64x2_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 1, 0); \
58372 uint32x4_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 3, 2, 1, 0); \
58373 uint32x4_t __rev2; __rev2 = __builtin_shufflevector(__s2, __s2, 3, 2, 1, 0); \
58374 uint64x2_t __ret; \
58375 __ret = __rev0 - __noswap_vmull_u32(__noswap_vget_high_u32(__rev1), __builtin_shufflevector(__rev2, __rev2, __p3, __p3)); \
58376 __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \
58377 __ret; \
58378 })
58379 #endif
58380
58381 #ifdef __LITTLE_ENDIAN__
58382 #define vmlsl_high_laneq_u16(__p0, __p1, __p2, __p3) __extension__ ({ \
58383 uint32x4_t __s0 = __p0; \
58384 uint16x8_t __s1 = __p1; \
58385 uint16x8_t __s2 = __p2; \
58386 uint32x4_t __ret; \
58387 __ret = __s0 - vmull_u16(vget_high_u16(__s1), __builtin_shufflevector(__s2, __s2, __p3, __p3, __p3, __p3)); \
58388 __ret; \
58389 })
58390 #else
58391 #define vmlsl_high_laneq_u16(__p0, __p1, __p2, __p3) __extension__ ({ \
58392 uint32x4_t __s0 = __p0; \
58393 uint16x8_t __s1 = __p1; \
58394 uint16x8_t __s2 = __p2; \
58395 uint32x4_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \
58396 uint16x8_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 7, 6, 5, 4, 3, 2, 1, 0); \
58397 uint16x8_t __rev2; __rev2 = __builtin_shufflevector(__s2, __s2, 7, 6, 5, 4, 3, 2, 1, 0); \
58398 uint32x4_t __ret; \
58399 __ret = __rev0 - __noswap_vmull_u16(__noswap_vget_high_u16(__rev1), __builtin_shufflevector(__rev2, __rev2, __p3, __p3, __p3, __p3)); \
58400 __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \
58401 __ret; \
58402 })
58403 #endif
58404
58405 #ifdef __LITTLE_ENDIAN__
58406 #define vmlsl_high_laneq_s32(__p0, __p1, __p2, __p3) __extension__ ({ \
58407 int64x2_t __s0 = __p0; \
58408 int32x4_t __s1 = __p1; \
58409 int32x4_t __s2 = __p2; \
58410 int64x2_t __ret; \
58411 __ret = __s0 - vmull_s32(vget_high_s32(__s1), __builtin_shufflevector(__s2, __s2, __p3, __p3)); \
58412 __ret; \
58413 })
58414 #else
58415 #define vmlsl_high_laneq_s32(__p0, __p1, __p2, __p3) __extension__ ({ \
58416 int64x2_t __s0 = __p0; \
58417 int32x4_t __s1 = __p1; \
58418 int32x4_t __s2 = __p2; \
58419 int64x2_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 1, 0); \
58420 int32x4_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 3, 2, 1, 0); \
58421 int32x4_t __rev2; __rev2 = __builtin_shufflevector(__s2, __s2, 3, 2, 1, 0); \
58422 int64x2_t __ret; \
58423 __ret = __rev0 - __noswap_vmull_s32(__noswap_vget_high_s32(__rev1), __builtin_shufflevector(__rev2, __rev2, __p3, __p3)); \
58424 __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \
58425 __ret; \
58426 })
58427 #endif
58428
58429 #ifdef __LITTLE_ENDIAN__
58430 #define vmlsl_high_laneq_s16(__p0, __p1, __p2, __p3) __extension__ ({ \
58431 int32x4_t __s0 = __p0; \
58432 int16x8_t __s1 = __p1; \
58433 int16x8_t __s2 = __p2; \
58434 int32x4_t __ret; \
58435 __ret = __s0 - vmull_s16(vget_high_s16(__s1), __builtin_shufflevector(__s2, __s2, __p3, __p3, __p3, __p3)); \
58436 __ret; \
58437 })
58438 #else
58439 #define vmlsl_high_laneq_s16(__p0, __p1, __p2, __p3) __extension__ ({ \
58440 int32x4_t __s0 = __p0; \
58441 int16x8_t __s1 = __p1; \
58442 int16x8_t __s2 = __p2; \
58443 int32x4_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \
58444 int16x8_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 7, 6, 5, 4, 3, 2, 1, 0); \
58445 int16x8_t __rev2; __rev2 = __builtin_shufflevector(__s2, __s2, 7, 6, 5, 4, 3, 2, 1, 0); \
58446 int32x4_t __ret; \
58447 __ret = __rev0 - __noswap_vmull_s16(__noswap_vget_high_s16(__rev1), __builtin_shufflevector(__rev2, __rev2, __p3, __p3, __p3, __p3)); \
58448 __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \
58449 __ret; \
58450 })
58451 #endif
58452
58453 #ifdef __LITTLE_ENDIAN__
58454 #define vmlsl_laneq_u32(__p0, __p1, __p2, __p3) __extension__ ({ \
58455 uint64x2_t __s0 = __p0; \
58456 uint32x2_t __s1 = __p1; \
58457 uint32x4_t __s2 = __p2; \
58458 uint64x2_t __ret; \
58459 __ret = __s0 - vmull_u32(__s1, __builtin_shufflevector(__s2, __s2, __p3, __p3)); \
58460 __ret; \
58461 })
58462 #else
58463 #define vmlsl_laneq_u32(__p0, __p1, __p2, __p3) __extension__ ({ \
58464 uint64x2_t __s0 = __p0; \
58465 uint32x2_t __s1 = __p1; \
58466 uint32x4_t __s2 = __p2; \
58467 uint64x2_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 1, 0); \
58468 uint32x2_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 1, 0); \
58469 uint32x4_t __rev2; __rev2 = __builtin_shufflevector(__s2, __s2, 3, 2, 1, 0); \
58470 uint64x2_t __ret; \
58471 __ret = __rev0 - __noswap_vmull_u32(__rev1, __builtin_shufflevector(__rev2, __rev2, __p3, __p3)); \
58472 __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \
58473 __ret; \
58474 })
58475 #endif
58476
58477 #ifdef __LITTLE_ENDIAN__
58478 #define vmlsl_laneq_u16(__p0, __p1, __p2, __p3) __extension__ ({ \
58479 uint32x4_t __s0 = __p0; \
58480 uint16x4_t __s1 = __p1; \
58481 uint16x8_t __s2 = __p2; \
58482 uint32x4_t __ret; \
58483 __ret = __s0 - vmull_u16(__s1, __builtin_shufflevector(__s2, __s2, __p3, __p3, __p3, __p3)); \
58484 __ret; \
58485 })
58486 #else
58487 #define vmlsl_laneq_u16(__p0, __p1, __p2, __p3) __extension__ ({ \
58488 uint32x4_t __s0 = __p0; \
58489 uint16x4_t __s1 = __p1; \
58490 uint16x8_t __s2 = __p2; \
58491 uint32x4_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \
58492 uint16x4_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 3, 2, 1, 0); \
58493 uint16x8_t __rev2; __rev2 = __builtin_shufflevector(__s2, __s2, 7, 6, 5, 4, 3, 2, 1, 0); \
58494 uint32x4_t __ret; \
58495 __ret = __rev0 - __noswap_vmull_u16(__rev1, __builtin_shufflevector(__rev2, __rev2, __p3, __p3, __p3, __p3)); \
58496 __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \
58497 __ret; \
58498 })
58499 #endif
58500
58501 #ifdef __LITTLE_ENDIAN__
58502 #define vmlsl_laneq_s32(__p0, __p1, __p2, __p3) __extension__ ({ \
58503 int64x2_t __s0 = __p0; \
58504 int32x2_t __s1 = __p1; \
58505 int32x4_t __s2 = __p2; \
58506 int64x2_t __ret; \
58507 __ret = __s0 - vmull_s32(__s1, __builtin_shufflevector(__s2, __s2, __p3, __p3)); \
58508 __ret; \
58509 })
58510 #else
58511 #define vmlsl_laneq_s32(__p0, __p1, __p2, __p3) __extension__ ({ \
58512 int64x2_t __s0 = __p0; \
58513 int32x2_t __s1 = __p1; \
58514 int32x4_t __s2 = __p2; \
58515 int64x2_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 1, 0); \
58516 int32x2_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 1, 0); \
58517 int32x4_t __rev2; __rev2 = __builtin_shufflevector(__s2, __s2, 3, 2, 1, 0); \
58518 int64x2_t __ret; \
58519 __ret = __rev0 - __noswap_vmull_s32(__rev1, __builtin_shufflevector(__rev2, __rev2, __p3, __p3)); \
58520 __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \
58521 __ret; \
58522 })
58523 #endif
58524
58525 #ifdef __LITTLE_ENDIAN__
58526 #define vmlsl_laneq_s16(__p0, __p1, __p2, __p3) __extension__ ({ \
58527 int32x4_t __s0 = __p0; \
58528 int16x4_t __s1 = __p1; \
58529 int16x8_t __s2 = __p2; \
58530 int32x4_t __ret; \
58531 __ret = __s0 - vmull_s16(__s1, __builtin_shufflevector(__s2, __s2, __p3, __p3, __p3, __p3)); \
58532 __ret; \
58533 })
58534 #else
58535 #define vmlsl_laneq_s16(__p0, __p1, __p2, __p3) __extension__ ({ \
58536 int32x4_t __s0 = __p0; \
58537 int16x4_t __s1 = __p1; \
58538 int16x8_t __s2 = __p2; \
58539 int32x4_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \
58540 int16x4_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 3, 2, 1, 0); \
58541 int16x8_t __rev2; __rev2 = __builtin_shufflevector(__s2, __s2, 7, 6, 5, 4, 3, 2, 1, 0); \
58542 int32x4_t __ret; \
58543 __ret = __rev0 - __noswap_vmull_s16(__rev1, __builtin_shufflevector(__rev2, __rev2, __p3, __p3, __p3, __p3)); \
58544 __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \
58545 __ret; \
58546 })
58547 #endif
58548
58549 #ifdef __LITTLE_ENDIAN__
58550 __ai poly64x1_t vmov_n_p64(poly64_t __p0) {
58551 poly64x1_t __ret;
58552 __ret = (poly64x1_t) {__p0};
58553 return __ret;
58554 }
58555 #else
58556 __ai poly64x1_t vmov_n_p64(poly64_t __p0) {
58557 poly64x1_t __ret;
58558 __ret = (poly64x1_t) {__p0};
58559 return __ret;
58560 }
58561 #endif
58562
58563 #ifdef __LITTLE_ENDIAN__
58564 __ai poly64x2_t vmovq_n_p64(poly64_t __p0) {
58565 poly64x2_t __ret;
58566 __ret = (poly64x2_t) {__p0, __p0};
58567 return __ret;
58568 }
58569 #else
58570 __ai poly64x2_t vmovq_n_p64(poly64_t __p0) {
58571 poly64x2_t __ret;
58572 __ret = (poly64x2_t) {__p0, __p0};
58573 __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
58574 return __ret;
58575 }
58576 #endif
58577
58578 #ifdef __LITTLE_ENDIAN__
58579 __ai float64x2_t vmovq_n_f64(float64_t __p0) {
58580 float64x2_t __ret;
58581 __ret = (float64x2_t) {__p0, __p0};
58582 return __ret;
58583 }
58584 #else
58585 __ai float64x2_t vmovq_n_f64(float64_t __p0) {
58586 float64x2_t __ret;
58587 __ret = (float64x2_t) {__p0, __p0};
58588 __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
58589 return __ret;
58590 }
58591 #endif
58592
58593 #ifdef __LITTLE_ENDIAN__
58594 __ai float64x1_t vmov_n_f64(float64_t __p0) {
58595 float64x1_t __ret;
58596 __ret = (float64x1_t) {__p0};
58597 return __ret;
58598 }
58599 #else
58600 __ai float64x1_t vmov_n_f64(float64_t __p0) {
58601 float64x1_t __ret;
58602 __ret = (float64x1_t) {__p0};
58603 return __ret;
58604 }
58605 #endif
58606
58607 #ifdef __LITTLE_ENDIAN__
58608 __ai uint16x8_t vmovl_high_u8(uint8x16_t __p0_128) {
58609 uint16x8_t __ret_128;
58610 uint8x8_t __a1_128 = vget_high_u8(__p0_128);
58611 __ret_128 = (uint16x8_t)(vshll_n_u8(__a1_128, 0));
58612 return __ret_128;
58613 }
58614 #else
58615 __ai uint16x8_t vmovl_high_u8(uint8x16_t __p0_129) {
58616 uint8x16_t __rev0_129; __rev0_129 = __builtin_shufflevector(__p0_129, __p0_129, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
58617 uint16x8_t __ret_129;
58618 uint8x8_t __a1_129 = __noswap_vget_high_u8(__rev0_129);
58619 __ret_129 = (uint16x8_t)(__noswap_vshll_n_u8(__a1_129, 0));
58620 __ret_129 = __builtin_shufflevector(__ret_129, __ret_129, 7, 6, 5, 4, 3, 2, 1, 0);
58621 return __ret_129;
58622 }
58623 __ai uint16x8_t __noswap_vmovl_high_u8(uint8x16_t __p0_130) {
58624 uint16x8_t __ret_130;
58625 uint8x8_t __a1_130 = __noswap_vget_high_u8(__p0_130);
58626 __ret_130 = (uint16x8_t)(__noswap_vshll_n_u8(__a1_130, 0));
58627 return __ret_130;
58628 }
58629 #endif
58630
58631 #ifdef __LITTLE_ENDIAN__
58632 __ai uint64x2_t vmovl_high_u32(uint32x4_t __p0_131) {
58633 uint64x2_t __ret_131;
58634 uint32x2_t __a1_131 = vget_high_u32(__p0_131);
58635 __ret_131 = (uint64x2_t)(vshll_n_u32(__a1_131, 0));
58636 return __ret_131;
58637 }
58638 #else
58639 __ai uint64x2_t vmovl_high_u32(uint32x4_t __p0_132) {
58640 uint32x4_t __rev0_132; __rev0_132 = __builtin_shufflevector(__p0_132, __p0_132, 3, 2, 1, 0);
58641 uint64x2_t __ret_132;
58642 uint32x2_t __a1_132 = __noswap_vget_high_u32(__rev0_132);
58643 __ret_132 = (uint64x2_t)(__noswap_vshll_n_u32(__a1_132, 0));
58644 __ret_132 = __builtin_shufflevector(__ret_132, __ret_132, 1, 0);
58645 return __ret_132;
58646 }
58647 __ai uint64x2_t __noswap_vmovl_high_u32(uint32x4_t __p0_133) {
58648 uint64x2_t __ret_133;
58649 uint32x2_t __a1_133 = __noswap_vget_high_u32(__p0_133);
58650 __ret_133 = (uint64x2_t)(__noswap_vshll_n_u32(__a1_133, 0));
58651 return __ret_133;
58652 }
58653 #endif
58654
58655 #ifdef __LITTLE_ENDIAN__
58656 __ai uint32x4_t vmovl_high_u16(uint16x8_t __p0_134) {
58657 uint32x4_t __ret_134;
58658 uint16x4_t __a1_134 = vget_high_u16(__p0_134);
58659 __ret_134 = (uint32x4_t)(vshll_n_u16(__a1_134, 0));
58660 return __ret_134;
58661 }
58662 #else
58663 __ai uint32x4_t vmovl_high_u16(uint16x8_t __p0_135) {
58664 uint16x8_t __rev0_135; __rev0_135 = __builtin_shufflevector(__p0_135, __p0_135, 7, 6, 5, 4, 3, 2, 1, 0);
58665 uint32x4_t __ret_135;
58666 uint16x4_t __a1_135 = __noswap_vget_high_u16(__rev0_135);
58667 __ret_135 = (uint32x4_t)(__noswap_vshll_n_u16(__a1_135, 0));
58668 __ret_135 = __builtin_shufflevector(__ret_135, __ret_135, 3, 2, 1, 0);
58669 return __ret_135;
58670 }
58671 __ai uint32x4_t __noswap_vmovl_high_u16(uint16x8_t __p0_136) {
58672 uint32x4_t __ret_136;
58673 uint16x4_t __a1_136 = __noswap_vget_high_u16(__p0_136);
58674 __ret_136 = (uint32x4_t)(__noswap_vshll_n_u16(__a1_136, 0));
58675 return __ret_136;
58676 }
58677 #endif
58678
58679 #ifdef __LITTLE_ENDIAN__
58680 __ai int16x8_t vmovl_high_s8(int8x16_t __p0_137) {
58681 int16x8_t __ret_137;
58682 int8x8_t __a1_137 = vget_high_s8(__p0_137);
58683 __ret_137 = (int16x8_t)(vshll_n_s8(__a1_137, 0));
58684 return __ret_137;
58685 }
58686 #else
58687 __ai int16x8_t vmovl_high_s8(int8x16_t __p0_138) {
58688 int8x16_t __rev0_138; __rev0_138 = __builtin_shufflevector(__p0_138, __p0_138, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
58689 int16x8_t __ret_138;
58690 int8x8_t __a1_138 = __noswap_vget_high_s8(__rev0_138);
58691 __ret_138 = (int16x8_t)(__noswap_vshll_n_s8(__a1_138, 0));
58692 __ret_138 = __builtin_shufflevector(__ret_138, __ret_138, 7, 6, 5, 4, 3, 2, 1, 0);
58693 return __ret_138;
58694 }
58695 __ai int16x8_t __noswap_vmovl_high_s8(int8x16_t __p0_139) {
58696 int16x8_t __ret_139;
58697 int8x8_t __a1_139 = __noswap_vget_high_s8(__p0_139);
58698 __ret_139 = (int16x8_t)(__noswap_vshll_n_s8(__a1_139, 0));
58699 return __ret_139;
58700 }
58701 #endif
58702
58703 #ifdef __LITTLE_ENDIAN__
58704 __ai int64x2_t vmovl_high_s32(int32x4_t __p0_140) {
58705 int64x2_t __ret_140;
58706 int32x2_t __a1_140 = vget_high_s32(__p0_140);
58707 __ret_140 = (int64x2_t)(vshll_n_s32(__a1_140, 0));
58708 return __ret_140;
58709 }
58710 #else
58711 __ai int64x2_t vmovl_high_s32(int32x4_t __p0_141) {
58712 int32x4_t __rev0_141; __rev0_141 = __builtin_shufflevector(__p0_141, __p0_141, 3, 2, 1, 0);
58713 int64x2_t __ret_141;
58714 int32x2_t __a1_141 = __noswap_vget_high_s32(__rev0_141);
58715 __ret_141 = (int64x2_t)(__noswap_vshll_n_s32(__a1_141, 0));
58716 __ret_141 = __builtin_shufflevector(__ret_141, __ret_141, 1, 0);
58717 return __ret_141;
58718 }
58719 __ai int64x2_t __noswap_vmovl_high_s32(int32x4_t __p0_142) {
58720 int64x2_t __ret_142;
58721 int32x2_t __a1_142 = __noswap_vget_high_s32(__p0_142);
58722 __ret_142 = (int64x2_t)(__noswap_vshll_n_s32(__a1_142, 0));
58723 return __ret_142;
58724 }
58725 #endif
58726
58727 #ifdef __LITTLE_ENDIAN__
58728 __ai int32x4_t vmovl_high_s16(int16x8_t __p0_143) {
58729 int32x4_t __ret_143;
58730 int16x4_t __a1_143 = vget_high_s16(__p0_143);
58731 __ret_143 = (int32x4_t)(vshll_n_s16(__a1_143, 0));
58732 return __ret_143;
58733 }
58734 #else
58735 __ai int32x4_t vmovl_high_s16(int16x8_t __p0_144) {
58736 int16x8_t __rev0_144; __rev0_144 = __builtin_shufflevector(__p0_144, __p0_144, 7, 6, 5, 4, 3, 2, 1, 0);
58737 int32x4_t __ret_144;
58738 int16x4_t __a1_144 = __noswap_vget_high_s16(__rev0_144);
58739 __ret_144 = (int32x4_t)(__noswap_vshll_n_s16(__a1_144, 0));
58740 __ret_144 = __builtin_shufflevector(__ret_144, __ret_144, 3, 2, 1, 0);
58741 return __ret_144;
58742 }
58743 __ai int32x4_t __noswap_vmovl_high_s16(int16x8_t __p0_145) {
58744 int32x4_t __ret_145;
58745 int16x4_t __a1_145 = __noswap_vget_high_s16(__p0_145);
58746 __ret_145 = (int32x4_t)(__noswap_vshll_n_s16(__a1_145, 0));
58747 return __ret_145;
58748 }
58749 #endif
58750
58751 #ifdef __LITTLE_ENDIAN__
58752 __ai uint16x8_t vmovn_high_u32(uint16x4_t __p0, uint32x4_t __p1) {
58753 uint16x8_t __ret;
58754 __ret = vcombine_u16(__p0, vmovn_u32(__p1));
58755 return __ret;
58756 }
58757 #else
58758 __ai uint16x8_t vmovn_high_u32(uint16x4_t __p0, uint32x4_t __p1) {
58759 uint16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
58760 uint32x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
58761 uint16x8_t __ret;
58762 __ret = __noswap_vcombine_u16(__rev0, __noswap_vmovn_u32(__rev1));
58763 __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
58764 return __ret;
58765 }
58766 #endif
58767
58768 #ifdef __LITTLE_ENDIAN__
58769 __ai uint32x4_t vmovn_high_u64(uint32x2_t __p0, uint64x2_t __p1) {
58770 uint32x4_t __ret;
58771 __ret = vcombine_u32(__p0, vmovn_u64(__p1));
58772 return __ret;
58773 }
58774 #else
58775 __ai uint32x4_t vmovn_high_u64(uint32x2_t __p0, uint64x2_t __p1) {
58776 uint32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
58777 uint64x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
58778 uint32x4_t __ret;
58779 __ret = __noswap_vcombine_u32(__rev0, __noswap_vmovn_u64(__rev1));
58780 __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
58781 return __ret;
58782 }
58783 #endif
58784
58785 #ifdef __LITTLE_ENDIAN__
58786 __ai uint8x16_t vmovn_high_u16(uint8x8_t __p0, uint16x8_t __p1) {
58787 uint8x16_t __ret;
58788 __ret = vcombine_u8(__p0, vmovn_u16(__p1));
58789 return __ret;
58790 }
58791 #else
58792 __ai uint8x16_t vmovn_high_u16(uint8x8_t __p0, uint16x8_t __p1) {
58793 uint8x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
58794 uint16x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
58795 uint8x16_t __ret;
58796 __ret = __noswap_vcombine_u8(__rev0, __noswap_vmovn_u16(__rev1));
58797 __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
58798 return __ret;
58799 }
58800 #endif
58801
58802 #ifdef __LITTLE_ENDIAN__
58803 __ai int16x8_t vmovn_high_s32(int16x4_t __p0, int32x4_t __p1) {
58804 int16x8_t __ret;
58805 __ret = vcombine_s16(__p0, vmovn_s32(__p1));
58806 return __ret;
58807 }
58808 #else
58809 __ai int16x8_t vmovn_high_s32(int16x4_t __p0, int32x4_t __p1) {
58810 int16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
58811 int32x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
58812 int16x8_t __ret;
58813 __ret = __noswap_vcombine_s16(__rev0, __noswap_vmovn_s32(__rev1));
58814 __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
58815 return __ret;
58816 }
58817 #endif
58818
58819 #ifdef __LITTLE_ENDIAN__
58820 __ai int32x4_t vmovn_high_s64(int32x2_t __p0, int64x2_t __p1) {
58821 int32x4_t __ret;
58822 __ret = vcombine_s32(__p0, vmovn_s64(__p1));
58823 return __ret;
58824 }
58825 #else
58826 __ai int32x4_t vmovn_high_s64(int32x2_t __p0, int64x2_t __p1) {
58827 int32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
58828 int64x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
58829 int32x4_t __ret;
58830 __ret = __noswap_vcombine_s32(__rev0, __noswap_vmovn_s64(__rev1));
58831 __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
58832 return __ret;
58833 }
58834 #endif
58835
58836 #ifdef __LITTLE_ENDIAN__
58837 __ai int8x16_t vmovn_high_s16(int8x8_t __p0, int16x8_t __p1) {
58838 int8x16_t __ret;
58839 __ret = vcombine_s8(__p0, vmovn_s16(__p1));
58840 return __ret;
58841 }
58842 #else
58843 __ai int8x16_t vmovn_high_s16(int8x8_t __p0, int16x8_t __p1) {
58844 int8x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
58845 int16x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
58846 int8x16_t __ret;
58847 __ret = __noswap_vcombine_s8(__rev0, __noswap_vmovn_s16(__rev1));
58848 __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
58849 return __ret;
58850 }
58851 #endif
58852
58853 #ifdef __LITTLE_ENDIAN__
58854 __ai float64x2_t vmulq_f64(float64x2_t __p0, float64x2_t __p1) {
58855 float64x2_t __ret;
58856 __ret = __p0 * __p1;
58857 return __ret;
58858 }
58859 #else
58860 __ai float64x2_t vmulq_f64(float64x2_t __p0, float64x2_t __p1) {
58861 float64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
58862 float64x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
58863 float64x2_t __ret;
58864 __ret = __rev0 * __rev1;
58865 __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
58866 return __ret;
58867 }
58868 #endif
58869
58870 #ifdef __LITTLE_ENDIAN__
58871 __ai float64x1_t vmul_f64(float64x1_t __p0, float64x1_t __p1) {
58872 float64x1_t __ret;
58873 __ret = __p0 * __p1;
58874 return __ret;
58875 }
58876 #else
58877 __ai float64x1_t vmul_f64(float64x1_t __p0, float64x1_t __p1) {
58878 float64x1_t __ret;
58879 __ret = __p0 * __p1;
58880 return __ret;
58881 }
58882 #endif
58883
58884 #ifdef __LITTLE_ENDIAN__
58885 #define vmuld_lane_f64(__p0_146, __p1_146, __p2_146) __extension__ ({ \
58886 float64_t __s0_146 = __p0_146; \
58887 float64x1_t __s1_146 = __p1_146; \
58888 float64_t __ret_146; \
58889 __ret_146 = __s0_146 * vget_lane_f64(__s1_146, __p2_146); \
58890 __ret_146; \
58891 })
58892 #else
58893 #define vmuld_lane_f64(__p0_147, __p1_147, __p2_147) __extension__ ({ \
58894 float64_t __s0_147 = __p0_147; \
58895 float64x1_t __s1_147 = __p1_147; \
58896 float64_t __ret_147; \
58897 __ret_147 = __s0_147 * __noswap_vget_lane_f64(__s1_147, __p2_147); \
58898 __ret_147; \
58899 })
58900 #endif
58901
58902 #ifdef __LITTLE_ENDIAN__
58903 #define vmuls_lane_f32(__p0_148, __p1_148, __p2_148) __extension__ ({ \
58904 float32_t __s0_148 = __p0_148; \
58905 float32x2_t __s1_148 = __p1_148; \
58906 float32_t __ret_148; \
58907 __ret_148 = __s0_148 * vget_lane_f32(__s1_148, __p2_148); \
58908 __ret_148; \
58909 })
58910 #else
58911 #define vmuls_lane_f32(__p0_149, __p1_149, __p2_149) __extension__ ({ \
58912 float32_t __s0_149 = __p0_149; \
58913 float32x2_t __s1_149 = __p1_149; \
58914 float32x2_t __rev1_149; __rev1_149 = __builtin_shufflevector(__s1_149, __s1_149, 1, 0); \
58915 float32_t __ret_149; \
58916 __ret_149 = __s0_149 * __noswap_vget_lane_f32(__rev1_149, __p2_149); \
58917 __ret_149; \
58918 })
58919 #endif
58920
58921 #ifdef __LITTLE_ENDIAN__
58922 #define vmul_lane_f64(__p0, __p1, __p2) __extension__ ({ \
58923 float64x1_t __s0 = __p0; \
58924 float64x1_t __s1 = __p1; \
58925 float64x1_t __ret; \
58926 __ret = (float64x1_t) __builtin_neon_vmul_lane_v((int8x8_t)__s0, (int8x8_t)__s1, __p2, 10); \
58927 __ret; \
58928 })
58929 #else
58930 #define vmul_lane_f64(__p0, __p1, __p2) __extension__ ({ \
58931 float64x1_t __s0 = __p0; \
58932 float64x1_t __s1 = __p1; \
58933 float64x1_t __ret; \
58934 __ret = (float64x1_t) __builtin_neon_vmul_lane_v((int8x8_t)__s0, (int8x8_t)__s1, __p2, 10); \
58935 __ret; \
58936 })
58937 #endif
58938
58939 #ifdef __LITTLE_ENDIAN__
58940 #define vmulq_lane_f64(__p0, __p1, __p2) __extension__ ({ \
58941 float64x2_t __s0 = __p0; \
58942 float64x1_t __s1 = __p1; \
58943 float64x2_t __ret; \
58944 __ret = __s0 * __builtin_shufflevector(__s1, __s1, __p2, __p2); \
58945 __ret; \
58946 })
58947 #else
58948 #define vmulq_lane_f64(__p0, __p1, __p2) __extension__ ({ \
58949 float64x2_t __s0 = __p0; \
58950 float64x1_t __s1 = __p1; \
58951 float64x2_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 1, 0); \
58952 float64x2_t __ret; \
58953 __ret = __rev0 * __builtin_shufflevector(__s1, __s1, __p2, __p2); \
58954 __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \
58955 __ret; \
58956 })
58957 #endif
58958
58959 #ifdef __LITTLE_ENDIAN__
58960 #define vmuld_laneq_f64(__p0_150, __p1_150, __p2_150) __extension__ ({ \
58961 float64_t __s0_150 = __p0_150; \
58962 float64x2_t __s1_150 = __p1_150; \
58963 float64_t __ret_150; \
58964 __ret_150 = __s0_150 * vgetq_lane_f64(__s1_150, __p2_150); \
58965 __ret_150; \
58966 })
58967 #else
58968 #define vmuld_laneq_f64(__p0_151, __p1_151, __p2_151) __extension__ ({ \
58969 float64_t __s0_151 = __p0_151; \
58970 float64x2_t __s1_151 = __p1_151; \
58971 float64x2_t __rev1_151; __rev1_151 = __builtin_shufflevector(__s1_151, __s1_151, 1, 0); \
58972 float64_t __ret_151; \
58973 __ret_151 = __s0_151 * __noswap_vgetq_lane_f64(__rev1_151, __p2_151); \
58974 __ret_151; \
58975 })
58976 #endif
58977
58978 #ifdef __LITTLE_ENDIAN__
58979 #define vmuls_laneq_f32(__p0_152, __p1_152, __p2_152) __extension__ ({ \
58980 float32_t __s0_152 = __p0_152; \
58981 float32x4_t __s1_152 = __p1_152; \
58982 float32_t __ret_152; \
58983 __ret_152 = __s0_152 * vgetq_lane_f32(__s1_152, __p2_152); \
58984 __ret_152; \
58985 })
58986 #else
58987 #define vmuls_laneq_f32(__p0_153, __p1_153, __p2_153) __extension__ ({ \
58988 float32_t __s0_153 = __p0_153; \
58989 float32x4_t __s1_153 = __p1_153; \
58990 float32x4_t __rev1_153; __rev1_153 = __builtin_shufflevector(__s1_153, __s1_153, 3, 2, 1, 0); \
58991 float32_t __ret_153; \
58992 __ret_153 = __s0_153 * __noswap_vgetq_lane_f32(__rev1_153, __p2_153); \
58993 __ret_153; \
58994 })
58995 #endif
58996
58997 #ifdef __LITTLE_ENDIAN__
58998 #define vmul_laneq_f64(__p0, __p1, __p2) __extension__ ({ \
58999 float64x1_t __s0 = __p0; \
59000 float64x2_t __s1 = __p1; \
59001 float64x1_t __ret; \
59002 __ret = (float64x1_t) __builtin_neon_vmul_laneq_v((int8x8_t)__s0, (int8x16_t)__s1, __p2, 10); \
59003 __ret; \
59004 })
59005 #else
59006 #define vmul_laneq_f64(__p0, __p1, __p2) __extension__ ({ \
59007 float64x1_t __s0 = __p0; \
59008 float64x2_t __s1 = __p1; \
59009 float64x2_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 1, 0); \
59010 float64x1_t __ret; \
59011 __ret = (float64x1_t) __builtin_neon_vmul_laneq_v((int8x8_t)__s0, (int8x16_t)__rev1, __p2, 10); \
59012 __ret; \
59013 })
59014 #endif
59015
59016 #ifdef __LITTLE_ENDIAN__
59017 #define vmulq_laneq_u32(__p0, __p1, __p2) __extension__ ({ \
59018 uint32x4_t __s0 = __p0; \
59019 uint32x4_t __s1 = __p1; \
59020 uint32x4_t __ret; \
59021 __ret = __s0 * __builtin_shufflevector(__s1, __s1, __p2, __p2, __p2, __p2); \
59022 __ret; \
59023 })
59024 #else
59025 #define vmulq_laneq_u32(__p0, __p1, __p2) __extension__ ({ \
59026 uint32x4_t __s0 = __p0; \
59027 uint32x4_t __s1 = __p1; \
59028 uint32x4_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \
59029 uint32x4_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 3, 2, 1, 0); \
59030 uint32x4_t __ret; \
59031 __ret = __rev0 * __builtin_shufflevector(__rev1, __rev1, __p2, __p2, __p2, __p2); \
59032 __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \
59033 __ret; \
59034 })
59035 #endif
59036
59037 #ifdef __LITTLE_ENDIAN__
59038 #define vmulq_laneq_u16(__p0, __p1, __p2) __extension__ ({ \
59039 uint16x8_t __s0 = __p0; \
59040 uint16x8_t __s1 = __p1; \
59041 uint16x8_t __ret; \
59042 __ret = __s0 * __builtin_shufflevector(__s1, __s1, __p2, __p2, __p2, __p2, __p2, __p2, __p2, __p2); \
59043 __ret; \
59044 })
59045 #else
59046 #define vmulq_laneq_u16(__p0, __p1, __p2) __extension__ ({ \
59047 uint16x8_t __s0 = __p0; \
59048 uint16x8_t __s1 = __p1; \
59049 uint16x8_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 7, 6, 5, 4, 3, 2, 1, 0); \
59050 uint16x8_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 7, 6, 5, 4, 3, 2, 1, 0); \
59051 uint16x8_t __ret; \
59052 __ret = __rev0 * __builtin_shufflevector(__rev1, __rev1, __p2, __p2, __p2, __p2, __p2, __p2, __p2, __p2); \
59053 __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); \
59054 __ret; \
59055 })
59056 #endif
59057
59058 #ifdef __LITTLE_ENDIAN__
59059 #define vmulq_laneq_f64(__p0, __p1, __p2) __extension__ ({ \
59060 float64x2_t __s0 = __p0; \
59061 float64x2_t __s1 = __p1; \
59062 float64x2_t __ret; \
59063 __ret = __s0 * __builtin_shufflevector(__s1, __s1, __p2, __p2); \
59064 __ret; \
59065 })
59066 #else
59067 #define vmulq_laneq_f64(__p0, __p1, __p2) __extension__ ({ \
59068 float64x2_t __s0 = __p0; \
59069 float64x2_t __s1 = __p1; \
59070 float64x2_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 1, 0); \
59071 float64x2_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 1, 0); \
59072 float64x2_t __ret; \
59073 __ret = __rev0 * __builtin_shufflevector(__rev1, __rev1, __p2, __p2); \
59074 __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \
59075 __ret; \
59076 })
59077 #endif
59078
59079 #ifdef __LITTLE_ENDIAN__
59080 #define vmulq_laneq_f32(__p0, __p1, __p2) __extension__ ({ \
59081 float32x4_t __s0 = __p0; \
59082 float32x4_t __s1 = __p1; \
59083 float32x4_t __ret; \
59084 __ret = __s0 * __builtin_shufflevector(__s1, __s1, __p2, __p2, __p2, __p2); \
59085 __ret; \
59086 })
59087 #else
59088 #define vmulq_laneq_f32(__p0, __p1, __p2) __extension__ ({ \
59089 float32x4_t __s0 = __p0; \
59090 float32x4_t __s1 = __p1; \
59091 float32x4_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \
59092 float32x4_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 3, 2, 1, 0); \
59093 float32x4_t __ret; \
59094 __ret = __rev0 * __builtin_shufflevector(__rev1, __rev1, __p2, __p2, __p2, __p2); \
59095 __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \
59096 __ret; \
59097 })
59098 #endif
59099
59100 #ifdef __LITTLE_ENDIAN__
59101 #define vmulq_laneq_s32(__p0, __p1, __p2) __extension__ ({ \
59102 int32x4_t __s0 = __p0; \
59103 int32x4_t __s1 = __p1; \
59104 int32x4_t __ret; \
59105 __ret = __s0 * __builtin_shufflevector(__s1, __s1, __p2, __p2, __p2, __p2); \
59106 __ret; \
59107 })
59108 #else
59109 #define vmulq_laneq_s32(__p0, __p1, __p2) __extension__ ({ \
59110 int32x4_t __s0 = __p0; \
59111 int32x4_t __s1 = __p1; \
59112 int32x4_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \
59113 int32x4_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 3, 2, 1, 0); \
59114 int32x4_t __ret; \
59115 __ret = __rev0 * __builtin_shufflevector(__rev1, __rev1, __p2, __p2, __p2, __p2); \
59116 __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \
59117 __ret; \
59118 })
59119 #endif
59120
59121 #ifdef __LITTLE_ENDIAN__
59122 #define vmulq_laneq_s16(__p0, __p1, __p2) __extension__ ({ \
59123 int16x8_t __s0 = __p0; \
59124 int16x8_t __s1 = __p1; \
59125 int16x8_t __ret; \
59126 __ret = __s0 * __builtin_shufflevector(__s1, __s1, __p2, __p2, __p2, __p2, __p2, __p2, __p2, __p2); \
59127 __ret; \
59128 })
59129 #else
59130 #define vmulq_laneq_s16(__p0, __p1, __p2) __extension__ ({ \
59131 int16x8_t __s0 = __p0; \
59132 int16x8_t __s1 = __p1; \
59133 int16x8_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 7, 6, 5, 4, 3, 2, 1, 0); \
59134 int16x8_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 7, 6, 5, 4, 3, 2, 1, 0); \
59135 int16x8_t __ret; \
59136 __ret = __rev0 * __builtin_shufflevector(__rev1, __rev1, __p2, __p2, __p2, __p2, __p2, __p2, __p2, __p2); \
59137 __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); \
59138 __ret; \
59139 })
59140 #endif
59141
59142 #ifdef __LITTLE_ENDIAN__
59143 #define vmul_laneq_u32(__p0, __p1, __p2) __extension__ ({ \
59144 uint32x2_t __s0 = __p0; \
59145 uint32x4_t __s1 = __p1; \
59146 uint32x2_t __ret; \
59147 __ret = __s0 * __builtin_shufflevector(__s1, __s1, __p2, __p2); \
59148 __ret; \
59149 })
59150 #else
59151 #define vmul_laneq_u32(__p0, __p1, __p2) __extension__ ({ \
59152 uint32x2_t __s0 = __p0; \
59153 uint32x4_t __s1 = __p1; \
59154 uint32x2_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 1, 0); \
59155 uint32x4_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 3, 2, 1, 0); \
59156 uint32x2_t __ret; \
59157 __ret = __rev0 * __builtin_shufflevector(__rev1, __rev1, __p2, __p2); \
59158 __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \
59159 __ret; \
59160 })
59161 #endif
59162
59163 #ifdef __LITTLE_ENDIAN__
59164 #define vmul_laneq_u16(__p0, __p1, __p2) __extension__ ({ \
59165 uint16x4_t __s0 = __p0; \
59166 uint16x8_t __s1 = __p1; \
59167 uint16x4_t __ret; \
59168 __ret = __s0 * __builtin_shufflevector(__s1, __s1, __p2, __p2, __p2, __p2); \
59169 __ret; \
59170 })
59171 #else
59172 #define vmul_laneq_u16(__p0, __p1, __p2) __extension__ ({ \
59173 uint16x4_t __s0 = __p0; \
59174 uint16x8_t __s1 = __p1; \
59175 uint16x4_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \
59176 uint16x8_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 7, 6, 5, 4, 3, 2, 1, 0); \
59177 uint16x4_t __ret; \
59178 __ret = __rev0 * __builtin_shufflevector(__rev1, __rev1, __p2, __p2, __p2, __p2); \
59179 __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \
59180 __ret; \
59181 })
59182 #endif
59183
59184 #ifdef __LITTLE_ENDIAN__
59185 #define vmul_laneq_f32(__p0, __p1, __p2) __extension__ ({ \
59186 float32x2_t __s0 = __p0; \
59187 float32x4_t __s1 = __p1; \
59188 float32x2_t __ret; \
59189 __ret = __s0 * __builtin_shufflevector(__s1, __s1, __p2, __p2); \
59190 __ret; \
59191 })
59192 #else
59193 #define vmul_laneq_f32(__p0, __p1, __p2) __extension__ ({ \
59194 float32x2_t __s0 = __p0; \
59195 float32x4_t __s1 = __p1; \
59196 float32x2_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 1, 0); \
59197 float32x4_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 3, 2, 1, 0); \
59198 float32x2_t __ret; \
59199 __ret = __rev0 * __builtin_shufflevector(__rev1, __rev1, __p2, __p2); \
59200 __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \
59201 __ret; \
59202 })
59203 #endif
59204
59205 #ifdef __LITTLE_ENDIAN__
59206 #define vmul_laneq_s32(__p0, __p1, __p2) __extension__ ({ \
59207 int32x2_t __s0 = __p0; \
59208 int32x4_t __s1 = __p1; \
59209 int32x2_t __ret; \
59210 __ret = __s0 * __builtin_shufflevector(__s1, __s1, __p2, __p2); \
59211 __ret; \
59212 })
59213 #else
59214 #define vmul_laneq_s32(__p0, __p1, __p2) __extension__ ({ \
59215 int32x2_t __s0 = __p0; \
59216 int32x4_t __s1 = __p1; \
59217 int32x2_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 1, 0); \
59218 int32x4_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 3, 2, 1, 0); \
59219 int32x2_t __ret; \
59220 __ret = __rev0 * __builtin_shufflevector(__rev1, __rev1, __p2, __p2); \
59221 __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \
59222 __ret; \
59223 })
59224 #endif
59225
59226 #ifdef __LITTLE_ENDIAN__
59227 #define vmul_laneq_s16(__p0, __p1, __p2) __extension__ ({ \
59228 int16x4_t __s0 = __p0; \
59229 int16x8_t __s1 = __p1; \
59230 int16x4_t __ret; \
59231 __ret = __s0 * __builtin_shufflevector(__s1, __s1, __p2, __p2, __p2, __p2); \
59232 __ret; \
59233 })
59234 #else
59235 #define vmul_laneq_s16(__p0, __p1, __p2) __extension__ ({ \
59236 int16x4_t __s0 = __p0; \
59237 int16x8_t __s1 = __p1; \
59238 int16x4_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \
59239 int16x8_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 7, 6, 5, 4, 3, 2, 1, 0); \
59240 int16x4_t __ret; \
59241 __ret = __rev0 * __builtin_shufflevector(__rev1, __rev1, __p2, __p2, __p2, __p2); \
59242 __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \
59243 __ret; \
59244 })
59245 #endif
59246
59247 #ifdef __LITTLE_ENDIAN__
59248 __ai float64x1_t vmul_n_f64(float64x1_t __p0, float64_t __p1) {
59249 float64x1_t __ret;
59250 __ret = (float64x1_t) __builtin_neon_vmul_n_f64((int8x8_t)__p0, __p1);
59251 return __ret;
59252 }
59253 #else
59254 __ai float64x1_t vmul_n_f64(float64x1_t __p0, float64_t __p1) {
59255 float64x1_t __ret;
59256 __ret = (float64x1_t) __builtin_neon_vmul_n_f64((int8x8_t)__p0, __p1);
59257 return __ret;
59258 }
59259 #endif
59260
59261 #ifdef __LITTLE_ENDIAN__
59262 __ai float64x2_t vmulq_n_f64(float64x2_t __p0, float64_t __p1) {
59263 float64x2_t __ret;
59264 __ret = __p0 * (float64x2_t) {__p1, __p1};
59265 return __ret;
59266 }
59267 #else
59268 __ai float64x2_t vmulq_n_f64(float64x2_t __p0, float64_t __p1) {
59269 float64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
59270 float64x2_t __ret;
59271 __ret = __rev0 * (float64x2_t) {__p1, __p1};
59272 __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
59273 return __ret;
59274 }
59275 #endif
59276
59277 #ifdef __LITTLE_ENDIAN__
59278 __ai poly128_t vmull_p64(poly64_t __p0, poly64_t __p1) {
59279 poly128_t __ret;
59280 __ret = (poly128_t) __builtin_neon_vmull_p64(__p0, __p1);
59281 return __ret;
59282 }
59283 #else
59284 __ai poly128_t vmull_p64(poly64_t __p0, poly64_t __p1) {
59285 poly128_t __ret;
59286 __ret = (poly128_t) __builtin_neon_vmull_p64(__p0, __p1);
59287 return __ret;
59288 }
59289 __ai poly128_t __noswap_vmull_p64(poly64_t __p0, poly64_t __p1) {
59290 poly128_t __ret;
59291 __ret = (poly128_t) __builtin_neon_vmull_p64(__p0, __p1);
59292 return __ret;
59293 }
59294 #endif
59295
59296 #ifdef __LITTLE_ENDIAN__
59297 __ai poly16x8_t vmull_high_p8(poly8x16_t __p0, poly8x16_t __p1) {
59298 poly16x8_t __ret;
59299 __ret = vmull_p8(vget_high_p8(__p0), vget_high_p8(__p1));
59300 return __ret;
59301 }
59302 #else
59303 __ai poly16x8_t vmull_high_p8(poly8x16_t __p0, poly8x16_t __p1) {
59304 poly8x16_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
59305 poly8x16_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
59306 poly16x8_t __ret;
59307 __ret = __noswap_vmull_p8(__noswap_vget_high_p8(__rev0), __noswap_vget_high_p8(__rev1));
59308 __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
59309 return __ret;
59310 }
59311 #endif
59312
59313 #ifdef __LITTLE_ENDIAN__
59314 __ai uint16x8_t vmull_high_u8(uint8x16_t __p0, uint8x16_t __p1) {
59315 uint16x8_t __ret;
59316 __ret = vmull_u8(vget_high_u8(__p0), vget_high_u8(__p1));
59317 return __ret;
59318 }
59319 #else
59320 __ai uint16x8_t vmull_high_u8(uint8x16_t __p0, uint8x16_t __p1) {
59321 uint8x16_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
59322 uint8x16_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
59323 uint16x8_t __ret;
59324 __ret = __noswap_vmull_u8(__noswap_vget_high_u8(__rev0), __noswap_vget_high_u8(__rev1));
59325 __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
59326 return __ret;
59327 }
59328 #endif
59329
59330 #ifdef __LITTLE_ENDIAN__
59331 __ai uint64x2_t vmull_high_u32(uint32x4_t __p0, uint32x4_t __p1) {
59332 uint64x2_t __ret;
59333 __ret = vmull_u32(vget_high_u32(__p0), vget_high_u32(__p1));
59334 return __ret;
59335 }
59336 #else
59337 __ai uint64x2_t vmull_high_u32(uint32x4_t __p0, uint32x4_t __p1) {
59338 uint32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
59339 uint32x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
59340 uint64x2_t __ret;
59341 __ret = __noswap_vmull_u32(__noswap_vget_high_u32(__rev0), __noswap_vget_high_u32(__rev1));
59342 __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
59343 return __ret;
59344 }
59345 #endif
59346
59347 #ifdef __LITTLE_ENDIAN__
59348 __ai uint32x4_t vmull_high_u16(uint16x8_t __p0, uint16x8_t __p1) {
59349 uint32x4_t __ret;
59350 __ret = vmull_u16(vget_high_u16(__p0), vget_high_u16(__p1));
59351 return __ret;
59352 }
59353 #else
59354 __ai uint32x4_t vmull_high_u16(uint16x8_t __p0, uint16x8_t __p1) {
59355 uint16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
59356 uint16x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
59357 uint32x4_t __ret;
59358 __ret = __noswap_vmull_u16(__noswap_vget_high_u16(__rev0), __noswap_vget_high_u16(__rev1));
59359 __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
59360 return __ret;
59361 }
59362 #endif
59363
59364 #ifdef __LITTLE_ENDIAN__
59365 __ai int16x8_t vmull_high_s8(int8x16_t __p0, int8x16_t __p1) {
59366 int16x8_t __ret;
59367 __ret = vmull_s8(vget_high_s8(__p0), vget_high_s8(__p1));
59368 return __ret;
59369 }
59370 #else
59371 __ai int16x8_t vmull_high_s8(int8x16_t __p0, int8x16_t __p1) {
59372 int8x16_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
59373 int8x16_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
59374 int16x8_t __ret;
59375 __ret = __noswap_vmull_s8(__noswap_vget_high_s8(__rev0), __noswap_vget_high_s8(__rev1));
59376 __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
59377 return __ret;
59378 }
59379 #endif
59380
59381 #ifdef __LITTLE_ENDIAN__
59382 __ai int64x2_t vmull_high_s32(int32x4_t __p0, int32x4_t __p1) {
59383 int64x2_t __ret;
59384 __ret = vmull_s32(vget_high_s32(__p0), vget_high_s32(__p1));
59385 return __ret;
59386 }
59387 #else
59388 __ai int64x2_t vmull_high_s32(int32x4_t __p0, int32x4_t __p1) {
59389 int32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
59390 int32x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
59391 int64x2_t __ret;
59392 __ret = __noswap_vmull_s32(__noswap_vget_high_s32(__rev0), __noswap_vget_high_s32(__rev1));
59393 __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
59394 return __ret;
59395 }
59396 #endif
59397
59398 #ifdef __LITTLE_ENDIAN__
59399 __ai int32x4_t vmull_high_s16(int16x8_t __p0, int16x8_t __p1) {
59400 int32x4_t __ret;
59401 __ret = vmull_s16(vget_high_s16(__p0), vget_high_s16(__p1));
59402 return __ret;
59403 }
59404 #else
59405 __ai int32x4_t vmull_high_s16(int16x8_t __p0, int16x8_t __p1) {
59406 int16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
59407 int16x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
59408 int32x4_t __ret;
59409 __ret = __noswap_vmull_s16(__noswap_vget_high_s16(__rev0), __noswap_vget_high_s16(__rev1));
59410 __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
59411 return __ret;
59412 }
59413 #endif
59414
59415 #ifdef __LITTLE_ENDIAN__
59416 __ai poly128_t vmull_high_p64(poly64x2_t __p0, poly64x2_t __p1) {
59417 poly128_t __ret;
59418 __ret = vmull_p64((poly64_t)(vget_high_p64(__p0)), (poly64_t)(vget_high_p64(__p1)));
59419 return __ret;
59420 }
59421 #else
59422 __ai poly128_t vmull_high_p64(poly64x2_t __p0, poly64x2_t __p1) {
59423 poly64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
59424 poly64x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
59425 poly128_t __ret;
59426 __ret = __noswap_vmull_p64((poly64_t)(__noswap_vget_high_p64(__rev0)), (poly64_t)(__noswap_vget_high_p64(__rev1)));
59427 return __ret;
59428 }
59429 #endif
59430
59431 #ifdef __LITTLE_ENDIAN__
59432 #define vmull_high_lane_u32(__p0, __p1, __p2) __extension__ ({ \
59433 uint32x4_t __s0 = __p0; \
59434 uint32x2_t __s1 = __p1; \
59435 uint64x2_t __ret; \
59436 __ret = vmull_u32(vget_high_u32(__s0), __builtin_shufflevector(__s1, __s1, __p2, __p2)); \
59437 __ret; \
59438 })
59439 #else
59440 #define vmull_high_lane_u32(__p0, __p1, __p2) __extension__ ({ \
59441 uint32x4_t __s0 = __p0; \
59442 uint32x2_t __s1 = __p1; \
59443 uint32x4_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \
59444 uint32x2_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 1, 0); \
59445 uint64x2_t __ret; \
59446 __ret = __noswap_vmull_u32(__noswap_vget_high_u32(__rev0), __builtin_shufflevector(__rev1, __rev1, __p2, __p2)); \
59447 __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \
59448 __ret; \
59449 })
59450 #endif
59451
59452 #ifdef __LITTLE_ENDIAN__
59453 #define vmull_high_lane_u16(__p0, __p1, __p2) __extension__ ({ \
59454 uint16x8_t __s0 = __p0; \
59455 uint16x4_t __s1 = __p1; \
59456 uint32x4_t __ret; \
59457 __ret = vmull_u16(vget_high_u16(__s0), __builtin_shufflevector(__s1, __s1, __p2, __p2, __p2, __p2)); \
59458 __ret; \
59459 })
59460 #else
59461 #define vmull_high_lane_u16(__p0, __p1, __p2) __extension__ ({ \
59462 uint16x8_t __s0 = __p0; \
59463 uint16x4_t __s1 = __p1; \
59464 uint16x8_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 7, 6, 5, 4, 3, 2, 1, 0); \
59465 uint16x4_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 3, 2, 1, 0); \
59466 uint32x4_t __ret; \
59467 __ret = __noswap_vmull_u16(__noswap_vget_high_u16(__rev0), __builtin_shufflevector(__rev1, __rev1, __p2, __p2, __p2, __p2)); \
59468 __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \
59469 __ret; \
59470 })
59471 #endif
59472
59473 #ifdef __LITTLE_ENDIAN__
59474 #define vmull_high_lane_s32(__p0, __p1, __p2) __extension__ ({ \
59475 int32x4_t __s0 = __p0; \
59476 int32x2_t __s1 = __p1; \
59477 int64x2_t __ret; \
59478 __ret = vmull_s32(vget_high_s32(__s0), __builtin_shufflevector(__s1, __s1, __p2, __p2)); \
59479 __ret; \
59480 })
59481 #else
59482 #define vmull_high_lane_s32(__p0, __p1, __p2) __extension__ ({ \
59483 int32x4_t __s0 = __p0; \
59484 int32x2_t __s1 = __p1; \
59485 int32x4_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \
59486 int32x2_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 1, 0); \
59487 int64x2_t __ret; \
59488 __ret = __noswap_vmull_s32(__noswap_vget_high_s32(__rev0), __builtin_shufflevector(__rev1, __rev1, __p2, __p2)); \
59489 __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \
59490 __ret; \
59491 })
59492 #endif
59493
59494 #ifdef __LITTLE_ENDIAN__
59495 #define vmull_high_lane_s16(__p0, __p1, __p2) __extension__ ({ \
59496 int16x8_t __s0 = __p0; \
59497 int16x4_t __s1 = __p1; \
59498 int32x4_t __ret; \
59499 __ret = vmull_s16(vget_high_s16(__s0), __builtin_shufflevector(__s1, __s1, __p2, __p2, __p2, __p2)); \
59500 __ret; \
59501 })
59502 #else
59503 #define vmull_high_lane_s16(__p0, __p1, __p2) __extension__ ({ \
59504 int16x8_t __s0 = __p0; \
59505 int16x4_t __s1 = __p1; \
59506 int16x8_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 7, 6, 5, 4, 3, 2, 1, 0); \
59507 int16x4_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 3, 2, 1, 0); \
59508 int32x4_t __ret; \
59509 __ret = __noswap_vmull_s16(__noswap_vget_high_s16(__rev0), __builtin_shufflevector(__rev1, __rev1, __p2, __p2, __p2, __p2)); \
59510 __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \
59511 __ret; \
59512 })
59513 #endif
59514
59515 #ifdef __LITTLE_ENDIAN__
59516 #define vmull_high_laneq_u32(__p0, __p1, __p2) __extension__ ({ \
59517 uint32x4_t __s0 = __p0; \
59518 uint32x4_t __s1 = __p1; \
59519 uint64x2_t __ret; \
59520 __ret = vmull_u32(vget_high_u32(__s0), __builtin_shufflevector(__s1, __s1, __p2, __p2)); \
59521 __ret; \
59522 })
59523 #else
59524 #define vmull_high_laneq_u32(__p0, __p1, __p2) __extension__ ({ \
59525 uint32x4_t __s0 = __p0; \
59526 uint32x4_t __s1 = __p1; \
59527 uint32x4_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \
59528 uint32x4_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 3, 2, 1, 0); \
59529 uint64x2_t __ret; \
59530 __ret = __noswap_vmull_u32(__noswap_vget_high_u32(__rev0), __builtin_shufflevector(__rev1, __rev1, __p2, __p2)); \
59531 __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \
59532 __ret; \
59533 })
59534 #endif
59535
59536 #ifdef __LITTLE_ENDIAN__
59537 #define vmull_high_laneq_u16(__p0, __p1, __p2) __extension__ ({ \
59538 uint16x8_t __s0 = __p0; \
59539 uint16x8_t __s1 = __p1; \
59540 uint32x4_t __ret; \
59541 __ret = vmull_u16(vget_high_u16(__s0), __builtin_shufflevector(__s1, __s1, __p2, __p2, __p2, __p2)); \
59542 __ret; \
59543 })
59544 #else
59545 #define vmull_high_laneq_u16(__p0, __p1, __p2) __extension__ ({ \
59546 uint16x8_t __s0 = __p0; \
59547 uint16x8_t __s1 = __p1; \
59548 uint16x8_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 7, 6, 5, 4, 3, 2, 1, 0); \
59549 uint16x8_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 7, 6, 5, 4, 3, 2, 1, 0); \
59550 uint32x4_t __ret; \
59551 __ret = __noswap_vmull_u16(__noswap_vget_high_u16(__rev0), __builtin_shufflevector(__rev1, __rev1, __p2, __p2, __p2, __p2)); \
59552 __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \
59553 __ret; \
59554 })
59555 #endif
59556
59557 #ifdef __LITTLE_ENDIAN__
59558 #define vmull_high_laneq_s32(__p0, __p1, __p2) __extension__ ({ \
59559 int32x4_t __s0 = __p0; \
59560 int32x4_t __s1 = __p1; \
59561 int64x2_t __ret; \
59562 __ret = vmull_s32(vget_high_s32(__s0), __builtin_shufflevector(__s1, __s1, __p2, __p2)); \
59563 __ret; \
59564 })
59565 #else
59566 #define vmull_high_laneq_s32(__p0, __p1, __p2) __extension__ ({ \
59567 int32x4_t __s0 = __p0; \
59568 int32x4_t __s1 = __p1; \
59569 int32x4_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \
59570 int32x4_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 3, 2, 1, 0); \
59571 int64x2_t __ret; \
59572 __ret = __noswap_vmull_s32(__noswap_vget_high_s32(__rev0), __builtin_shufflevector(__rev1, __rev1, __p2, __p2)); \
59573 __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \
59574 __ret; \
59575 })
59576 #endif
59577
59578 #ifdef __LITTLE_ENDIAN__
59579 #define vmull_high_laneq_s16(__p0, __p1, __p2) __extension__ ({ \
59580 int16x8_t __s0 = __p0; \
59581 int16x8_t __s1 = __p1; \
59582 int32x4_t __ret; \
59583 __ret = vmull_s16(vget_high_s16(__s0), __builtin_shufflevector(__s1, __s1, __p2, __p2, __p2, __p2)); \
59584 __ret; \
59585 })
59586 #else
59587 #define vmull_high_laneq_s16(__p0, __p1, __p2) __extension__ ({ \
59588 int16x8_t __s0 = __p0; \
59589 int16x8_t __s1 = __p1; \
59590 int16x8_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 7, 6, 5, 4, 3, 2, 1, 0); \
59591 int16x8_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 7, 6, 5, 4, 3, 2, 1, 0); \
59592 int32x4_t __ret; \
59593 __ret = __noswap_vmull_s16(__noswap_vget_high_s16(__rev0), __builtin_shufflevector(__rev1, __rev1, __p2, __p2, __p2, __p2)); \
59594 __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \
59595 __ret; \
59596 })
59597 #endif
59598
59599 #ifdef __LITTLE_ENDIAN__
59600 __ai uint64x2_t vmull_high_n_u32(uint32x4_t __p0, uint32_t __p1) {
59601 uint64x2_t __ret;
59602 __ret = vmull_n_u32(vget_high_u32(__p0), __p1);
59603 return __ret;
59604 }
59605 #else
59606 __ai uint64x2_t vmull_high_n_u32(uint32x4_t __p0, uint32_t __p1) {
59607 uint32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
59608 uint64x2_t __ret;
59609 __ret = __noswap_vmull_n_u32(__noswap_vget_high_u32(__rev0), __p1);
59610 __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
59611 return __ret;
59612 }
59613 #endif
59614
59615 #ifdef __LITTLE_ENDIAN__
59616 __ai uint32x4_t vmull_high_n_u16(uint16x8_t __p0, uint16_t __p1) {
59617 uint32x4_t __ret;
59618 __ret = vmull_n_u16(vget_high_u16(__p0), __p1);
59619 return __ret;
59620 }
59621 #else
59622 __ai uint32x4_t vmull_high_n_u16(uint16x8_t __p0, uint16_t __p1) {
59623 uint16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
59624 uint32x4_t __ret;
59625 __ret = __noswap_vmull_n_u16(__noswap_vget_high_u16(__rev0), __p1);
59626 __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
59627 return __ret;
59628 }
59629 #endif
59630
59631 #ifdef __LITTLE_ENDIAN__
59632 __ai int64x2_t vmull_high_n_s32(int32x4_t __p0, int32_t __p1) {
59633 int64x2_t __ret;
59634 __ret = vmull_n_s32(vget_high_s32(__p0), __p1);
59635 return __ret;
59636 }
59637 #else
59638 __ai int64x2_t vmull_high_n_s32(int32x4_t __p0, int32_t __p1) {
59639 int32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
59640 int64x2_t __ret;
59641 __ret = __noswap_vmull_n_s32(__noswap_vget_high_s32(__rev0), __p1);
59642 __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
59643 return __ret;
59644 }
59645 #endif
59646
59647 #ifdef __LITTLE_ENDIAN__
59648 __ai int32x4_t vmull_high_n_s16(int16x8_t __p0, int16_t __p1) {
59649 int32x4_t __ret;
59650 __ret = vmull_n_s16(vget_high_s16(__p0), __p1);
59651 return __ret;
59652 }
59653 #else
59654 __ai int32x4_t vmull_high_n_s16(int16x8_t __p0, int16_t __p1) {
59655 int16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
59656 int32x4_t __ret;
59657 __ret = __noswap_vmull_n_s16(__noswap_vget_high_s16(__rev0), __p1);
59658 __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
59659 return __ret;
59660 }
59661 #endif
59662
59663 #ifdef __LITTLE_ENDIAN__
59664 #define vmull_laneq_u32(__p0, __p1, __p2) __extension__ ({ \
59665 uint32x2_t __s0 = __p0; \
59666 uint32x4_t __s1 = __p1; \
59667 uint64x2_t __ret; \
59668 __ret = vmull_u32(__s0, __builtin_shufflevector(__s1, __s1, __p2, __p2)); \
59669 __ret; \
59670 })
59671 #else
59672 #define vmull_laneq_u32(__p0, __p1, __p2) __extension__ ({ \
59673 uint32x2_t __s0 = __p0; \
59674 uint32x4_t __s1 = __p1; \
59675 uint32x2_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 1, 0); \
59676 uint32x4_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 3, 2, 1, 0); \
59677 uint64x2_t __ret; \
59678 __ret = __noswap_vmull_u32(__rev0, __builtin_shufflevector(__rev1, __rev1, __p2, __p2)); \
59679 __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \
59680 __ret; \
59681 })
59682 #endif
59683
59684 #ifdef __LITTLE_ENDIAN__
59685 #define vmull_laneq_u16(__p0, __p1, __p2) __extension__ ({ \
59686 uint16x4_t __s0 = __p0; \
59687 uint16x8_t __s1 = __p1; \
59688 uint32x4_t __ret; \
59689 __ret = vmull_u16(__s0, __builtin_shufflevector(__s1, __s1, __p2, __p2, __p2, __p2)); \
59690 __ret; \
59691 })
59692 #else
59693 #define vmull_laneq_u16(__p0, __p1, __p2) __extension__ ({ \
59694 uint16x4_t __s0 = __p0; \
59695 uint16x8_t __s1 = __p1; \
59696 uint16x4_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \
59697 uint16x8_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 7, 6, 5, 4, 3, 2, 1, 0); \
59698 uint32x4_t __ret; \
59699 __ret = __noswap_vmull_u16(__rev0, __builtin_shufflevector(__rev1, __rev1, __p2, __p2, __p2, __p2)); \
59700 __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \
59701 __ret; \
59702 })
59703 #endif
59704
59705 #ifdef __LITTLE_ENDIAN__
59706 #define vmull_laneq_s32(__p0, __p1, __p2) __extension__ ({ \
59707 int32x2_t __s0 = __p0; \
59708 int32x4_t __s1 = __p1; \
59709 int64x2_t __ret; \
59710 __ret = vmull_s32(__s0, __builtin_shufflevector(__s1, __s1, __p2, __p2)); \
59711 __ret; \
59712 })
59713 #else
59714 #define vmull_laneq_s32(__p0, __p1, __p2) __extension__ ({ \
59715 int32x2_t __s0 = __p0; \
59716 int32x4_t __s1 = __p1; \
59717 int32x2_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 1, 0); \
59718 int32x4_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 3, 2, 1, 0); \
59719 int64x2_t __ret; \
59720 __ret = __noswap_vmull_s32(__rev0, __builtin_shufflevector(__rev1, __rev1, __p2, __p2)); \
59721 __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \
59722 __ret; \
59723 })
59724 #endif
59725
59726 #ifdef __LITTLE_ENDIAN__
59727 #define vmull_laneq_s16(__p0, __p1, __p2) __extension__ ({ \
59728 int16x4_t __s0 = __p0; \
59729 int16x8_t __s1 = __p1; \
59730 int32x4_t __ret; \
59731 __ret = vmull_s16(__s0, __builtin_shufflevector(__s1, __s1, __p2, __p2, __p2, __p2)); \
59732 __ret; \
59733 })
59734 #else
59735 #define vmull_laneq_s16(__p0, __p1, __p2) __extension__ ({ \
59736 int16x4_t __s0 = __p0; \
59737 int16x8_t __s1 = __p1; \
59738 int16x4_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \
59739 int16x8_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 7, 6, 5, 4, 3, 2, 1, 0); \
59740 int32x4_t __ret; \
59741 __ret = __noswap_vmull_s16(__rev0, __builtin_shufflevector(__rev1, __rev1, __p2, __p2, __p2, __p2)); \
59742 __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \
59743 __ret; \
59744 })
59745 #endif
59746
59747 #ifdef __LITTLE_ENDIAN__
59748 __ai float64x2_t vmulxq_f64(float64x2_t __p0, float64x2_t __p1) {
59749 float64x2_t __ret;
59750 __ret = (float64x2_t) __builtin_neon_vmulxq_v((int8x16_t)__p0, (int8x16_t)__p1, 42);
59751 return __ret;
59752 }
59753 #else
59754 __ai float64x2_t vmulxq_f64(float64x2_t __p0, float64x2_t __p1) {
59755 float64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
59756 float64x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
59757 float64x2_t __ret;
59758 __ret = (float64x2_t) __builtin_neon_vmulxq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 42);
59759 __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
59760 return __ret;
59761 }
59762 __ai float64x2_t __noswap_vmulxq_f64(float64x2_t __p0, float64x2_t __p1) {
59763 float64x2_t __ret;
59764 __ret = (float64x2_t) __builtin_neon_vmulxq_v((int8x16_t)__p0, (int8x16_t)__p1, 42);
59765 return __ret;
59766 }
59767 #endif
59768
59769 #ifdef __LITTLE_ENDIAN__
59770 __ai float32x4_t vmulxq_f32(float32x4_t __p0, float32x4_t __p1) {
59771 float32x4_t __ret;
59772 __ret = (float32x4_t) __builtin_neon_vmulxq_v((int8x16_t)__p0, (int8x16_t)__p1, 41);
59773 return __ret;
59774 }
59775 #else
59776 __ai float32x4_t vmulxq_f32(float32x4_t __p0, float32x4_t __p1) {
59777 float32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
59778 float32x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
59779 float32x4_t __ret;
59780 __ret = (float32x4_t) __builtin_neon_vmulxq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 41);
59781 __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
59782 return __ret;
59783 }
59784 __ai float32x4_t __noswap_vmulxq_f32(float32x4_t __p0, float32x4_t __p1) {
59785 float32x4_t __ret;
59786 __ret = (float32x4_t) __builtin_neon_vmulxq_v((int8x16_t)__p0, (int8x16_t)__p1, 41);
59787 return __ret;
59788 }
59789 #endif
59790
59791 #ifdef __LITTLE_ENDIAN__
59792 __ai float64x1_t vmulx_f64(float64x1_t __p0, float64x1_t __p1) {
59793 float64x1_t __ret;
59794 __ret = (float64x1_t) __builtin_neon_vmulx_v((int8x8_t)__p0, (int8x8_t)__p1, 10);
59795 return __ret;
59796 }
59797 #else
59798 __ai float64x1_t vmulx_f64(float64x1_t __p0, float64x1_t __p1) {
59799 float64x1_t __ret;
59800 __ret = (float64x1_t) __builtin_neon_vmulx_v((int8x8_t)__p0, (int8x8_t)__p1, 10);
59801 return __ret;
59802 }
59803 #endif
59804
59805 #ifdef __LITTLE_ENDIAN__
59806 __ai float32x2_t vmulx_f32(float32x2_t __p0, float32x2_t __p1) {
59807 float32x2_t __ret;
59808 __ret = (float32x2_t) __builtin_neon_vmulx_v((int8x8_t)__p0, (int8x8_t)__p1, 9);
59809 return __ret;
59810 }
59811 #else
59812 __ai float32x2_t vmulx_f32(float32x2_t __p0, float32x2_t __p1) {
59813 float32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
59814 float32x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
59815 float32x2_t __ret;
59816 __ret = (float32x2_t) __builtin_neon_vmulx_v((int8x8_t)__rev0, (int8x8_t)__rev1, 9);
59817 __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
59818 return __ret;
59819 }
59820 __ai float32x2_t __noswap_vmulx_f32(float32x2_t __p0, float32x2_t __p1) {
59821 float32x2_t __ret;
59822 __ret = (float32x2_t) __builtin_neon_vmulx_v((int8x8_t)__p0, (int8x8_t)__p1, 9);
59823 return __ret;
59824 }
59825 #endif
59826
59827 #ifdef __LITTLE_ENDIAN__
59828 __ai float64_t vmulxd_f64(float64_t __p0, float64_t __p1) {
59829 float64_t __ret;
59830 __ret = (float64_t) __builtin_neon_vmulxd_f64(__p0, __p1);
59831 return __ret;
59832 }
59833 #else
59834 __ai float64_t vmulxd_f64(float64_t __p0, float64_t __p1) {
59835 float64_t __ret;
59836 __ret = (float64_t) __builtin_neon_vmulxd_f64(__p0, __p1);
59837 return __ret;
59838 }
59839 __ai float64_t __noswap_vmulxd_f64(float64_t __p0, float64_t __p1) {
59840 float64_t __ret;
59841 __ret = (float64_t) __builtin_neon_vmulxd_f64(__p0, __p1);
59842 return __ret;
59843 }
59844 #endif
59845
59846 #ifdef __LITTLE_ENDIAN__
59847 __ai float32_t vmulxs_f32(float32_t __p0, float32_t __p1) {
59848 float32_t __ret;
59849 __ret = (float32_t) __builtin_neon_vmulxs_f32(__p0, __p1);
59850 return __ret;
59851 }
59852 #else
59853 __ai float32_t vmulxs_f32(float32_t __p0, float32_t __p1) {
59854 float32_t __ret;
59855 __ret = (float32_t) __builtin_neon_vmulxs_f32(__p0, __p1);
59856 return __ret;
59857 }
59858 __ai float32_t __noswap_vmulxs_f32(float32_t __p0, float32_t __p1) {
59859 float32_t __ret;
59860 __ret = (float32_t) __builtin_neon_vmulxs_f32(__p0, __p1);
59861 return __ret;
59862 }
59863 #endif
59864
59865 #ifdef __LITTLE_ENDIAN__
59866 #define vmulxd_lane_f64(__p0_154, __p1_154, __p2_154) __extension__ ({ \
59867 float64_t __s0_154 = __p0_154; \
59868 float64x1_t __s1_154 = __p1_154; \
59869 float64_t __ret_154; \
59870 __ret_154 = vmulxd_f64(__s0_154, vget_lane_f64(__s1_154, __p2_154)); \
59871 __ret_154; \
59872 })
59873 #else
59874 #define vmulxd_lane_f64(__p0_155, __p1_155, __p2_155) __extension__ ({ \
59875 float64_t __s0_155 = __p0_155; \
59876 float64x1_t __s1_155 = __p1_155; \
59877 float64_t __ret_155; \
59878 __ret_155 = __noswap_vmulxd_f64(__s0_155, __noswap_vget_lane_f64(__s1_155, __p2_155)); \
59879 __ret_155; \
59880 })
59881 #endif
59882
59883 #ifdef __LITTLE_ENDIAN__
59884 #define vmulxs_lane_f32(__p0_156, __p1_156, __p2_156) __extension__ ({ \
59885 float32_t __s0_156 = __p0_156; \
59886 float32x2_t __s1_156 = __p1_156; \
59887 float32_t __ret_156; \
59888 __ret_156 = vmulxs_f32(__s0_156, vget_lane_f32(__s1_156, __p2_156)); \
59889 __ret_156; \
59890 })
59891 #else
59892 #define vmulxs_lane_f32(__p0_157, __p1_157, __p2_157) __extension__ ({ \
59893 float32_t __s0_157 = __p0_157; \
59894 float32x2_t __s1_157 = __p1_157; \
59895 float32x2_t __rev1_157; __rev1_157 = __builtin_shufflevector(__s1_157, __s1_157, 1, 0); \
59896 float32_t __ret_157; \
59897 __ret_157 = __noswap_vmulxs_f32(__s0_157, __noswap_vget_lane_f32(__rev1_157, __p2_157)); \
59898 __ret_157; \
59899 })
59900 #endif
59901
59902 #ifdef __LITTLE_ENDIAN__
59903 #define vmulxq_lane_f64(__p0, __p1, __p2) __extension__ ({ \
59904 float64x2_t __s0 = __p0; \
59905 float64x1_t __s1 = __p1; \
59906 float64x2_t __ret; \
59907 __ret = vmulxq_f64(__s0, __builtin_shufflevector(__s1, __s1, __p2, __p2)); \
59908 __ret; \
59909 })
59910 #else
59911 #define vmulxq_lane_f64(__p0, __p1, __p2) __extension__ ({ \
59912 float64x2_t __s0 = __p0; \
59913 float64x1_t __s1 = __p1; \
59914 float64x2_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 1, 0); \
59915 float64x2_t __ret; \
59916 __ret = __noswap_vmulxq_f64(__rev0, __builtin_shufflevector(__s1, __s1, __p2, __p2)); \
59917 __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \
59918 __ret; \
59919 })
59920 #endif
59921
59922 #ifdef __LITTLE_ENDIAN__
59923 #define vmulxq_lane_f32(__p0, __p1, __p2) __extension__ ({ \
59924 float32x4_t __s0 = __p0; \
59925 float32x2_t __s1 = __p1; \
59926 float32x4_t __ret; \
59927 __ret = vmulxq_f32(__s0, __builtin_shufflevector(__s1, __s1, __p2, __p2, __p2, __p2)); \
59928 __ret; \
59929 })
59930 #else
59931 #define vmulxq_lane_f32(__p0, __p1, __p2) __extension__ ({ \
59932 float32x4_t __s0 = __p0; \
59933 float32x2_t __s1 = __p1; \
59934 float32x4_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \
59935 float32x2_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 1, 0); \
59936 float32x4_t __ret; \
59937 __ret = __noswap_vmulxq_f32(__rev0, __builtin_shufflevector(__rev1, __rev1, __p2, __p2, __p2, __p2)); \
59938 __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \
59939 __ret; \
59940 })
59941 #endif
59942
59943 #ifdef __LITTLE_ENDIAN__
59944 #define vmulx_lane_f32(__p0, __p1, __p2) __extension__ ({ \
59945 float32x2_t __s0 = __p0; \
59946 float32x2_t __s1 = __p1; \
59947 float32x2_t __ret; \
59948 __ret = vmulx_f32(__s0, __builtin_shufflevector(__s1, __s1, __p2, __p2)); \
59949 __ret; \
59950 })
59951 #else
59952 #define vmulx_lane_f32(__p0, __p1, __p2) __extension__ ({ \
59953 float32x2_t __s0 = __p0; \
59954 float32x2_t __s1 = __p1; \
59955 float32x2_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 1, 0); \
59956 float32x2_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 1, 0); \
59957 float32x2_t __ret; \
59958 __ret = __noswap_vmulx_f32(__rev0, __builtin_shufflevector(__rev1, __rev1, __p2, __p2)); \
59959 __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \
59960 __ret; \
59961 })
59962 #endif
59963
59964 #ifdef __LITTLE_ENDIAN__
59965 #define vmulxd_laneq_f64(__p0_158, __p1_158, __p2_158) __extension__ ({ \
59966 float64_t __s0_158 = __p0_158; \
59967 float64x2_t __s1_158 = __p1_158; \
59968 float64_t __ret_158; \
59969 __ret_158 = vmulxd_f64(__s0_158, vgetq_lane_f64(__s1_158, __p2_158)); \
59970 __ret_158; \
59971 })
59972 #else
59973 #define vmulxd_laneq_f64(__p0_159, __p1_159, __p2_159) __extension__ ({ \
59974 float64_t __s0_159 = __p0_159; \
59975 float64x2_t __s1_159 = __p1_159; \
59976 float64x2_t __rev1_159; __rev1_159 = __builtin_shufflevector(__s1_159, __s1_159, 1, 0); \
59977 float64_t __ret_159; \
59978 __ret_159 = __noswap_vmulxd_f64(__s0_159, __noswap_vgetq_lane_f64(__rev1_159, __p2_159)); \
59979 __ret_159; \
59980 })
59981 #endif
59982
59983 #ifdef __LITTLE_ENDIAN__
59984 #define vmulxs_laneq_f32(__p0_160, __p1_160, __p2_160) __extension__ ({ \
59985 float32_t __s0_160 = __p0_160; \
59986 float32x4_t __s1_160 = __p1_160; \
59987 float32_t __ret_160; \
59988 __ret_160 = vmulxs_f32(__s0_160, vgetq_lane_f32(__s1_160, __p2_160)); \
59989 __ret_160; \
59990 })
59991 #else
59992 #define vmulxs_laneq_f32(__p0_161, __p1_161, __p2_161) __extension__ ({ \
59993 float32_t __s0_161 = __p0_161; \
59994 float32x4_t __s1_161 = __p1_161; \
59995 float32x4_t __rev1_161; __rev1_161 = __builtin_shufflevector(__s1_161, __s1_161, 3, 2, 1, 0); \
59996 float32_t __ret_161; \
59997 __ret_161 = __noswap_vmulxs_f32(__s0_161, __noswap_vgetq_lane_f32(__rev1_161, __p2_161)); \
59998 __ret_161; \
59999 })
60000 #endif
60001
60002 #ifdef __LITTLE_ENDIAN__
60003 #define vmulxq_laneq_f64(__p0, __p1, __p2) __extension__ ({ \
60004 float64x2_t __s0 = __p0; \
60005 float64x2_t __s1 = __p1; \
60006 float64x2_t __ret; \
60007 __ret = vmulxq_f64(__s0, __builtin_shufflevector(__s1, __s1, __p2, __p2)); \
60008 __ret; \
60009 })
60010 #else
60011 #define vmulxq_laneq_f64(__p0, __p1, __p2) __extension__ ({ \
60012 float64x2_t __s0 = __p0; \
60013 float64x2_t __s1 = __p1; \
60014 float64x2_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 1, 0); \
60015 float64x2_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 1, 0); \
60016 float64x2_t __ret; \
60017 __ret = __noswap_vmulxq_f64(__rev0, __builtin_shufflevector(__rev1, __rev1, __p2, __p2)); \
60018 __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \
60019 __ret; \
60020 })
60021 #endif
60022
60023 #ifdef __LITTLE_ENDIAN__
60024 #define vmulxq_laneq_f32(__p0, __p1, __p2) __extension__ ({ \
60025 float32x4_t __s0 = __p0; \
60026 float32x4_t __s1 = __p1; \
60027 float32x4_t __ret; \
60028 __ret = vmulxq_f32(__s0, __builtin_shufflevector(__s1, __s1, __p2, __p2, __p2, __p2)); \
60029 __ret; \
60030 })
60031 #else
60032 #define vmulxq_laneq_f32(__p0, __p1, __p2) __extension__ ({ \
60033 float32x4_t __s0 = __p0; \
60034 float32x4_t __s1 = __p1; \
60035 float32x4_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \
60036 float32x4_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 3, 2, 1, 0); \
60037 float32x4_t __ret; \
60038 __ret = __noswap_vmulxq_f32(__rev0, __builtin_shufflevector(__rev1, __rev1, __p2, __p2, __p2, __p2)); \
60039 __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \
60040 __ret; \
60041 })
60042 #endif
60043
60044 #ifdef __LITTLE_ENDIAN__
60045 #define vmulx_laneq_f32(__p0, __p1, __p2) __extension__ ({ \
60046 float32x2_t __s0 = __p0; \
60047 float32x4_t __s1 = __p1; \
60048 float32x2_t __ret; \
60049 __ret = vmulx_f32(__s0, __builtin_shufflevector(__s1, __s1, __p2, __p2)); \
60050 __ret; \
60051 })
60052 #else
60053 #define vmulx_laneq_f32(__p0, __p1, __p2) __extension__ ({ \
60054 float32x2_t __s0 = __p0; \
60055 float32x4_t __s1 = __p1; \
60056 float32x2_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 1, 0); \
60057 float32x4_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 3, 2, 1, 0); \
60058 float32x2_t __ret; \
60059 __ret = __noswap_vmulx_f32(__rev0, __builtin_shufflevector(__rev1, __rev1, __p2, __p2)); \
60060 __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \
60061 __ret; \
60062 })
60063 #endif
60064
60065 #ifdef __LITTLE_ENDIAN__
60066 __ai float64x2_t vnegq_f64(float64x2_t __p0) {
60067 float64x2_t __ret;
60068 __ret = -__p0;
60069 return __ret;
60070 }
60071 #else
60072 __ai float64x2_t vnegq_f64(float64x2_t __p0) {
60073 float64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
60074 float64x2_t __ret;
60075 __ret = -__rev0;
60076 __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
60077 return __ret;
60078 }
60079 #endif
60080
60081 #ifdef __LITTLE_ENDIAN__
60082 __ai int64x2_t vnegq_s64(int64x2_t __p0) {
60083 int64x2_t __ret;
60084 __ret = -__p0;
60085 return __ret;
60086 }
60087 #else
60088 __ai int64x2_t vnegq_s64(int64x2_t __p0) {
60089 int64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
60090 int64x2_t __ret;
60091 __ret = -__rev0;
60092 __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
60093 return __ret;
60094 }
60095 #endif
60096
60097 #ifdef __LITTLE_ENDIAN__
60098 __ai float64x1_t vneg_f64(float64x1_t __p0) {
60099 float64x1_t __ret;
60100 __ret = -__p0;
60101 return __ret;
60102 }
60103 #else
60104 __ai float64x1_t vneg_f64(float64x1_t __p0) {
60105 float64x1_t __ret;
60106 __ret = -__p0;
60107 return __ret;
60108 }
60109 #endif
60110
60111 #ifdef __LITTLE_ENDIAN__
60112 __ai int64x1_t vneg_s64(int64x1_t __p0) {
60113 int64x1_t __ret;
60114 __ret = -__p0;
60115 return __ret;
60116 }
60117 #else
60118 __ai int64x1_t vneg_s64(int64x1_t __p0) {
60119 int64x1_t __ret;
60120 __ret = -__p0;
60121 return __ret;
60122 }
60123 #endif
60124
60125 #ifdef __LITTLE_ENDIAN__
60126 __ai int64_t vnegd_s64(int64_t __p0) {
60127 int64_t __ret;
60128 __ret = (int64_t) __builtin_neon_vnegd_s64(__p0);
60129 return __ret;
60130 }
60131 #else
60132 __ai int64_t vnegd_s64(int64_t __p0) {
60133 int64_t __ret;
60134 __ret = (int64_t) __builtin_neon_vnegd_s64(__p0);
60135 return __ret;
60136 }
60137 #endif
60138
60139 #ifdef __LITTLE_ENDIAN__
60140 __ai uint8x16_t vpaddq_u8(uint8x16_t __p0, uint8x16_t __p1) {
60141 uint8x16_t __ret;
60142 __ret = (uint8x16_t) __builtin_neon_vpaddq_v((int8x16_t)__p0, (int8x16_t)__p1, 48);
60143 return __ret;
60144 }
60145 #else
60146 __ai uint8x16_t vpaddq_u8(uint8x16_t __p0, uint8x16_t __p1) {
60147 uint8x16_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
60148 uint8x16_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
60149 uint8x16_t __ret;
60150 __ret = (uint8x16_t) __builtin_neon_vpaddq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 48);
60151 __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
60152 return __ret;
60153 }
60154 #endif
60155
60156 #ifdef __LITTLE_ENDIAN__
60157 __ai uint32x4_t vpaddq_u32(uint32x4_t __p0, uint32x4_t __p1) {
60158 uint32x4_t __ret;
60159 __ret = (uint32x4_t) __builtin_neon_vpaddq_v((int8x16_t)__p0, (int8x16_t)__p1, 50);
60160 return __ret;
60161 }
60162 #else
60163 __ai uint32x4_t vpaddq_u32(uint32x4_t __p0, uint32x4_t __p1) {
60164 uint32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
60165 uint32x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
60166 uint32x4_t __ret;
60167 __ret = (uint32x4_t) __builtin_neon_vpaddq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 50);
60168 __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
60169 return __ret;
60170 }
60171 #endif
60172
60173 #ifdef __LITTLE_ENDIAN__
60174 __ai uint64x2_t vpaddq_u64(uint64x2_t __p0, uint64x2_t __p1) {
60175 uint64x2_t __ret;
60176 __ret = (uint64x2_t) __builtin_neon_vpaddq_v((int8x16_t)__p0, (int8x16_t)__p1, 51);
60177 return __ret;
60178 }
60179 #else
60180 __ai uint64x2_t vpaddq_u64(uint64x2_t __p0, uint64x2_t __p1) {
60181 uint64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
60182 uint64x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
60183 uint64x2_t __ret;
60184 __ret = (uint64x2_t) __builtin_neon_vpaddq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 51);
60185 __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
60186 return __ret;
60187 }
60188 #endif
60189
60190 #ifdef __LITTLE_ENDIAN__
60191 __ai uint16x8_t vpaddq_u16(uint16x8_t __p0, uint16x8_t __p1) {
60192 uint16x8_t __ret;
60193 __ret = (uint16x8_t) __builtin_neon_vpaddq_v((int8x16_t)__p0, (int8x16_t)__p1, 49);
60194 return __ret;
60195 }
60196 #else
60197 __ai uint16x8_t vpaddq_u16(uint16x8_t __p0, uint16x8_t __p1) {
60198 uint16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
60199 uint16x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
60200 uint16x8_t __ret;
60201 __ret = (uint16x8_t) __builtin_neon_vpaddq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 49);
60202 __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
60203 return __ret;
60204 }
60205 #endif
60206
60207 #ifdef __LITTLE_ENDIAN__
60208 __ai int8x16_t vpaddq_s8(int8x16_t __p0, int8x16_t __p1) {
60209 int8x16_t __ret;
60210 __ret = (int8x16_t) __builtin_neon_vpaddq_v((int8x16_t)__p0, (int8x16_t)__p1, 32);
60211 return __ret;
60212 }
60213 #else
60214 __ai int8x16_t vpaddq_s8(int8x16_t __p0, int8x16_t __p1) {
60215 int8x16_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
60216 int8x16_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
60217 int8x16_t __ret;
60218 __ret = (int8x16_t) __builtin_neon_vpaddq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 32);
60219 __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
60220 return __ret;
60221 }
60222 #endif
60223
60224 #ifdef __LITTLE_ENDIAN__
60225 __ai float64x2_t vpaddq_f64(float64x2_t __p0, float64x2_t __p1) {
60226 float64x2_t __ret;
60227 __ret = (float64x2_t) __builtin_neon_vpaddq_v((int8x16_t)__p0, (int8x16_t)__p1, 42);
60228 return __ret;
60229 }
60230 #else
60231 __ai float64x2_t vpaddq_f64(float64x2_t __p0, float64x2_t __p1) {
60232 float64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
60233 float64x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
60234 float64x2_t __ret;
60235 __ret = (float64x2_t) __builtin_neon_vpaddq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 42);
60236 __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
60237 return __ret;
60238 }
60239 #endif
60240
60241 #ifdef __LITTLE_ENDIAN__
60242 __ai float32x4_t vpaddq_f32(float32x4_t __p0, float32x4_t __p1) {
60243 float32x4_t __ret;
60244 __ret = (float32x4_t) __builtin_neon_vpaddq_v((int8x16_t)__p0, (int8x16_t)__p1, 41);
60245 return __ret;
60246 }
60247 #else
60248 __ai float32x4_t vpaddq_f32(float32x4_t __p0, float32x4_t __p1) {
60249 float32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
60250 float32x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
60251 float32x4_t __ret;
60252 __ret = (float32x4_t) __builtin_neon_vpaddq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 41);
60253 __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
60254 return __ret;
60255 }
60256 #endif
60257
60258 #ifdef __LITTLE_ENDIAN__
60259 __ai int32x4_t vpaddq_s32(int32x4_t __p0, int32x4_t __p1) {
60260 int32x4_t __ret;
60261 __ret = (int32x4_t) __builtin_neon_vpaddq_v((int8x16_t)__p0, (int8x16_t)__p1, 34);
60262 return __ret;
60263 }
60264 #else
60265 __ai int32x4_t vpaddq_s32(int32x4_t __p0, int32x4_t __p1) {
60266 int32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
60267 int32x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
60268 int32x4_t __ret;
60269 __ret = (int32x4_t) __builtin_neon_vpaddq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 34);
60270 __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
60271 return __ret;
60272 }
60273 #endif
60274
60275 #ifdef __LITTLE_ENDIAN__
60276 __ai int64x2_t vpaddq_s64(int64x2_t __p0, int64x2_t __p1) {
60277 int64x2_t __ret;
60278 __ret = (int64x2_t) __builtin_neon_vpaddq_v((int8x16_t)__p0, (int8x16_t)__p1, 35);
60279 return __ret;
60280 }
60281 #else
60282 __ai int64x2_t vpaddq_s64(int64x2_t __p0, int64x2_t __p1) {
60283 int64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
60284 int64x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
60285 int64x2_t __ret;
60286 __ret = (int64x2_t) __builtin_neon_vpaddq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 35);
60287 __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
60288 return __ret;
60289 }
60290 #endif
60291
60292 #ifdef __LITTLE_ENDIAN__
60293 __ai int16x8_t vpaddq_s16(int16x8_t __p0, int16x8_t __p1) {
60294 int16x8_t __ret;
60295 __ret = (int16x8_t) __builtin_neon_vpaddq_v((int8x16_t)__p0, (int8x16_t)__p1, 33);
60296 return __ret;
60297 }
60298 #else
60299 __ai int16x8_t vpaddq_s16(int16x8_t __p0, int16x8_t __p1) {
60300 int16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
60301 int16x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
60302 int16x8_t __ret;
60303 __ret = (int16x8_t) __builtin_neon_vpaddq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 33);
60304 __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
60305 return __ret;
60306 }
60307 #endif
60308
60309 #ifdef __LITTLE_ENDIAN__
60310 __ai uint64_t vpaddd_u64(uint64x2_t __p0) {
60311 uint64_t __ret;
60312 __ret = (uint64_t) __builtin_neon_vpaddd_u64((int8x16_t)__p0);
60313 return __ret;
60314 }
60315 #else
60316 __ai uint64_t vpaddd_u64(uint64x2_t __p0) {
60317 uint64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
60318 uint64_t __ret;
60319 __ret = (uint64_t) __builtin_neon_vpaddd_u64((int8x16_t)__rev0);
60320 return __ret;
60321 }
60322 #endif
60323
60324 #ifdef __LITTLE_ENDIAN__
60325 __ai float64_t vpaddd_f64(float64x2_t __p0) {
60326 float64_t __ret;
60327 __ret = (float64_t) __builtin_neon_vpaddd_f64((int8x16_t)__p0);
60328 return __ret;
60329 }
60330 #else
60331 __ai float64_t vpaddd_f64(float64x2_t __p0) {
60332 float64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
60333 float64_t __ret;
60334 __ret = (float64_t) __builtin_neon_vpaddd_f64((int8x16_t)__rev0);
60335 return __ret;
60336 }
60337 #endif
60338
60339 #ifdef __LITTLE_ENDIAN__
60340 __ai int64_t vpaddd_s64(int64x2_t __p0) {
60341 int64_t __ret;
60342 __ret = (int64_t) __builtin_neon_vpaddd_s64((int8x16_t)__p0);
60343 return __ret;
60344 }
60345 #else
60346 __ai int64_t vpaddd_s64(int64x2_t __p0) {
60347 int64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
60348 int64_t __ret;
60349 __ret = (int64_t) __builtin_neon_vpaddd_s64((int8x16_t)__rev0);
60350 return __ret;
60351 }
60352 #endif
60353
60354 #ifdef __LITTLE_ENDIAN__
60355 __ai float32_t vpadds_f32(float32x2_t __p0) {
60356 float32_t __ret;
60357 __ret = (float32_t) __builtin_neon_vpadds_f32((int8x8_t)__p0);
60358 return __ret;
60359 }
60360 #else
60361 __ai float32_t vpadds_f32(float32x2_t __p0) {
60362 float32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
60363 float32_t __ret;
60364 __ret = (float32_t) __builtin_neon_vpadds_f32((int8x8_t)__rev0);
60365 return __ret;
60366 }
60367 #endif
60368
60369 #ifdef __LITTLE_ENDIAN__
60370 __ai uint8x16_t vpmaxq_u8(uint8x16_t __p0, uint8x16_t __p1) {
60371 uint8x16_t __ret;
60372 __ret = (uint8x16_t) __builtin_neon_vpmaxq_v((int8x16_t)__p0, (int8x16_t)__p1, 48);
60373 return __ret;
60374 }
60375 #else
60376 __ai uint8x16_t vpmaxq_u8(uint8x16_t __p0, uint8x16_t __p1) {
60377 uint8x16_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
60378 uint8x16_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
60379 uint8x16_t __ret;
60380 __ret = (uint8x16_t) __builtin_neon_vpmaxq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 48);
60381 __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
60382 return __ret;
60383 }
60384 #endif
60385
60386 #ifdef __LITTLE_ENDIAN__
60387 __ai uint32x4_t vpmaxq_u32(uint32x4_t __p0, uint32x4_t __p1) {
60388 uint32x4_t __ret;
60389 __ret = (uint32x4_t) __builtin_neon_vpmaxq_v((int8x16_t)__p0, (int8x16_t)__p1, 50);
60390 return __ret;
60391 }
60392 #else
60393 __ai uint32x4_t vpmaxq_u32(uint32x4_t __p0, uint32x4_t __p1) {
60394 uint32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
60395 uint32x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
60396 uint32x4_t __ret;
60397 __ret = (uint32x4_t) __builtin_neon_vpmaxq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 50);
60398 __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
60399 return __ret;
60400 }
60401 #endif
60402
60403 #ifdef __LITTLE_ENDIAN__
60404 __ai uint16x8_t vpmaxq_u16(uint16x8_t __p0, uint16x8_t __p1) {
60405 uint16x8_t __ret;
60406 __ret = (uint16x8_t) __builtin_neon_vpmaxq_v((int8x16_t)__p0, (int8x16_t)__p1, 49);
60407 return __ret;
60408 }
60409 #else
60410 __ai uint16x8_t vpmaxq_u16(uint16x8_t __p0, uint16x8_t __p1) {
60411 uint16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
60412 uint16x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
60413 uint16x8_t __ret;
60414 __ret = (uint16x8_t) __builtin_neon_vpmaxq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 49);
60415 __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
60416 return __ret;
60417 }
60418 #endif
60419
60420 #ifdef __LITTLE_ENDIAN__
60421 __ai int8x16_t vpmaxq_s8(int8x16_t __p0, int8x16_t __p1) {
60422 int8x16_t __ret;
60423 __ret = (int8x16_t) __builtin_neon_vpmaxq_v((int8x16_t)__p0, (int8x16_t)__p1, 32);
60424 return __ret;
60425 }
60426 #else
60427 __ai int8x16_t vpmaxq_s8(int8x16_t __p0, int8x16_t __p1) {
60428 int8x16_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
60429 int8x16_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
60430 int8x16_t __ret;
60431 __ret = (int8x16_t) __builtin_neon_vpmaxq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 32);
60432 __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
60433 return __ret;
60434 }
60435 #endif
60436
60437 #ifdef __LITTLE_ENDIAN__
60438 __ai float64x2_t vpmaxq_f64(float64x2_t __p0, float64x2_t __p1) {
60439 float64x2_t __ret;
60440 __ret = (float64x2_t) __builtin_neon_vpmaxq_v((int8x16_t)__p0, (int8x16_t)__p1, 42);
60441 return __ret;
60442 }
60443 #else
60444 __ai float64x2_t vpmaxq_f64(float64x2_t __p0, float64x2_t __p1) {
60445 float64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
60446 float64x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
60447 float64x2_t __ret;
60448 __ret = (float64x2_t) __builtin_neon_vpmaxq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 42);
60449 __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
60450 return __ret;
60451 }
60452 #endif
60453
60454 #ifdef __LITTLE_ENDIAN__
60455 __ai float32x4_t vpmaxq_f32(float32x4_t __p0, float32x4_t __p1) {
60456 float32x4_t __ret;
60457 __ret = (float32x4_t) __builtin_neon_vpmaxq_v((int8x16_t)__p0, (int8x16_t)__p1, 41);
60458 return __ret;
60459 }
60460 #else
60461 __ai float32x4_t vpmaxq_f32(float32x4_t __p0, float32x4_t __p1) {
60462 float32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
60463 float32x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
60464 float32x4_t __ret;
60465 __ret = (float32x4_t) __builtin_neon_vpmaxq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 41);
60466 __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
60467 return __ret;
60468 }
60469 #endif
60470
60471 #ifdef __LITTLE_ENDIAN__
60472 __ai int32x4_t vpmaxq_s32(int32x4_t __p0, int32x4_t __p1) {
60473 int32x4_t __ret;
60474 __ret = (int32x4_t) __builtin_neon_vpmaxq_v((int8x16_t)__p0, (int8x16_t)__p1, 34);
60475 return __ret;
60476 }
60477 #else
60478 __ai int32x4_t vpmaxq_s32(int32x4_t __p0, int32x4_t __p1) {
60479 int32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
60480 int32x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
60481 int32x4_t __ret;
60482 __ret = (int32x4_t) __builtin_neon_vpmaxq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 34);
60483 __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
60484 return __ret;
60485 }
60486 #endif
60487
60488 #ifdef __LITTLE_ENDIAN__
60489 __ai int16x8_t vpmaxq_s16(int16x8_t __p0, int16x8_t __p1) {
60490 int16x8_t __ret;
60491 __ret = (int16x8_t) __builtin_neon_vpmaxq_v((int8x16_t)__p0, (int8x16_t)__p1, 33);
60492 return __ret;
60493 }
60494 #else
60495 __ai int16x8_t vpmaxq_s16(int16x8_t __p0, int16x8_t __p1) {
60496 int16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
60497 int16x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
60498 int16x8_t __ret;
60499 __ret = (int16x8_t) __builtin_neon_vpmaxq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 33);
60500 __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
60501 return __ret;
60502 }
60503 #endif
60504
60505 #ifdef __LITTLE_ENDIAN__
60506 __ai float64_t vpmaxqd_f64(float64x2_t __p0) {
60507 float64_t __ret;
60508 __ret = (float64_t) __builtin_neon_vpmaxqd_f64((int8x16_t)__p0);
60509 return __ret;
60510 }
60511 #else
60512 __ai float64_t vpmaxqd_f64(float64x2_t __p0) {
60513 float64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
60514 float64_t __ret;
60515 __ret = (float64_t) __builtin_neon_vpmaxqd_f64((int8x16_t)__rev0);
60516 return __ret;
60517 }
60518 #endif
60519
60520 #ifdef __LITTLE_ENDIAN__
60521 __ai float32_t vpmaxs_f32(float32x2_t __p0) {
60522 float32_t __ret;
60523 __ret = (float32_t) __builtin_neon_vpmaxs_f32((int8x8_t)__p0);
60524 return __ret;
60525 }
60526 #else
60527 __ai float32_t vpmaxs_f32(float32x2_t __p0) {
60528 float32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
60529 float32_t __ret;
60530 __ret = (float32_t) __builtin_neon_vpmaxs_f32((int8x8_t)__rev0);
60531 return __ret;
60532 }
60533 #endif
60534
60535 #ifdef __LITTLE_ENDIAN__
60536 __ai float64x2_t vpmaxnmq_f64(float64x2_t __p0, float64x2_t __p1) {
60537 float64x2_t __ret;
60538 __ret = (float64x2_t) __builtin_neon_vpmaxnmq_v((int8x16_t)__p0, (int8x16_t)__p1, 42);
60539 return __ret;
60540 }
60541 #else
60542 __ai float64x2_t vpmaxnmq_f64(float64x2_t __p0, float64x2_t __p1) {
60543 float64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
60544 float64x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
60545 float64x2_t __ret;
60546 __ret = (float64x2_t) __builtin_neon_vpmaxnmq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 42);
60547 __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
60548 return __ret;
60549 }
60550 #endif
60551
60552 #ifdef __LITTLE_ENDIAN__
60553 __ai float32x4_t vpmaxnmq_f32(float32x4_t __p0, float32x4_t __p1) {
60554 float32x4_t __ret;
60555 __ret = (float32x4_t) __builtin_neon_vpmaxnmq_v((int8x16_t)__p0, (int8x16_t)__p1, 41);
60556 return __ret;
60557 }
60558 #else
60559 __ai float32x4_t vpmaxnmq_f32(float32x4_t __p0, float32x4_t __p1) {
60560 float32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
60561 float32x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
60562 float32x4_t __ret;
60563 __ret = (float32x4_t) __builtin_neon_vpmaxnmq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 41);
60564 __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
60565 return __ret;
60566 }
60567 #endif
60568
60569 #ifdef __LITTLE_ENDIAN__
60570 __ai float32x2_t vpmaxnm_f32(float32x2_t __p0, float32x2_t __p1) {
60571 float32x2_t __ret;
60572 __ret = (float32x2_t) __builtin_neon_vpmaxnm_v((int8x8_t)__p0, (int8x8_t)__p1, 9);
60573 return __ret;
60574 }
60575 #else
60576 __ai float32x2_t vpmaxnm_f32(float32x2_t __p0, float32x2_t __p1) {
60577 float32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
60578 float32x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
60579 float32x2_t __ret;
60580 __ret = (float32x2_t) __builtin_neon_vpmaxnm_v((int8x8_t)__rev0, (int8x8_t)__rev1, 9);
60581 __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
60582 return __ret;
60583 }
60584 #endif
60585
60586 #ifdef __LITTLE_ENDIAN__
60587 __ai float64_t vpmaxnmqd_f64(float64x2_t __p0) {
60588 float64_t __ret;
60589 __ret = (float64_t) __builtin_neon_vpmaxnmqd_f64((int8x16_t)__p0);
60590 return __ret;
60591 }
60592 #else
60593 __ai float64_t vpmaxnmqd_f64(float64x2_t __p0) {
60594 float64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
60595 float64_t __ret;
60596 __ret = (float64_t) __builtin_neon_vpmaxnmqd_f64((int8x16_t)__rev0);
60597 return __ret;
60598 }
60599 #endif
60600
60601 #ifdef __LITTLE_ENDIAN__
60602 __ai float32_t vpmaxnms_f32(float32x2_t __p0) {
60603 float32_t __ret;
60604 __ret = (float32_t) __builtin_neon_vpmaxnms_f32((int8x8_t)__p0);
60605 return __ret;
60606 }
60607 #else
60608 __ai float32_t vpmaxnms_f32(float32x2_t __p0) {
60609 float32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
60610 float32_t __ret;
60611 __ret = (float32_t) __builtin_neon_vpmaxnms_f32((int8x8_t)__rev0);
60612 return __ret;
60613 }
60614 #endif
60615
60616 #ifdef __LITTLE_ENDIAN__
60617 __ai uint8x16_t vpminq_u8(uint8x16_t __p0, uint8x16_t __p1) {
60618 uint8x16_t __ret;
60619 __ret = (uint8x16_t) __builtin_neon_vpminq_v((int8x16_t)__p0, (int8x16_t)__p1, 48);
60620 return __ret;
60621 }
60622 #else
60623 __ai uint8x16_t vpminq_u8(uint8x16_t __p0, uint8x16_t __p1) {
60624 uint8x16_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
60625 uint8x16_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
60626 uint8x16_t __ret;
60627 __ret = (uint8x16_t) __builtin_neon_vpminq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 48);
60628 __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
60629 return __ret;
60630 }
60631 #endif
60632
60633 #ifdef __LITTLE_ENDIAN__
60634 __ai uint32x4_t vpminq_u32(uint32x4_t __p0, uint32x4_t __p1) {
60635 uint32x4_t __ret;
60636 __ret = (uint32x4_t) __builtin_neon_vpminq_v((int8x16_t)__p0, (int8x16_t)__p1, 50);
60637 return __ret;
60638 }
60639 #else
60640 __ai uint32x4_t vpminq_u32(uint32x4_t __p0, uint32x4_t __p1) {
60641 uint32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
60642 uint32x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
60643 uint32x4_t __ret;
60644 __ret = (uint32x4_t) __builtin_neon_vpminq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 50);
60645 __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
60646 return __ret;
60647 }
60648 #endif
60649
60650 #ifdef __LITTLE_ENDIAN__
60651 __ai uint16x8_t vpminq_u16(uint16x8_t __p0, uint16x8_t __p1) {
60652 uint16x8_t __ret;
60653 __ret = (uint16x8_t) __builtin_neon_vpminq_v((int8x16_t)__p0, (int8x16_t)__p1, 49);
60654 return __ret;
60655 }
60656 #else
60657 __ai uint16x8_t vpminq_u16(uint16x8_t __p0, uint16x8_t __p1) {
60658 uint16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
60659 uint16x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
60660 uint16x8_t __ret;
60661 __ret = (uint16x8_t) __builtin_neon_vpminq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 49);
60662 __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
60663 return __ret;
60664 }
60665 #endif
60666
60667 #ifdef __LITTLE_ENDIAN__
60668 __ai int8x16_t vpminq_s8(int8x16_t __p0, int8x16_t __p1) {
60669 int8x16_t __ret;
60670 __ret = (int8x16_t) __builtin_neon_vpminq_v((int8x16_t)__p0, (int8x16_t)__p1, 32);
60671 return __ret;
60672 }
60673 #else
60674 __ai int8x16_t vpminq_s8(int8x16_t __p0, int8x16_t __p1) {
60675 int8x16_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
60676 int8x16_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
60677 int8x16_t __ret;
60678 __ret = (int8x16_t) __builtin_neon_vpminq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 32);
60679 __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
60680 return __ret;
60681 }
60682 #endif
60683
60684 #ifdef __LITTLE_ENDIAN__
60685 __ai float64x2_t vpminq_f64(float64x2_t __p0, float64x2_t __p1) {
60686 float64x2_t __ret;
60687 __ret = (float64x2_t) __builtin_neon_vpminq_v((int8x16_t)__p0, (int8x16_t)__p1, 42);
60688 return __ret;
60689 }
60690 #else
60691 __ai float64x2_t vpminq_f64(float64x2_t __p0, float64x2_t __p1) {
60692 float64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
60693 float64x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
60694 float64x2_t __ret;
60695 __ret = (float64x2_t) __builtin_neon_vpminq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 42);
60696 __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
60697 return __ret;
60698 }
60699 #endif
60700
60701 #ifdef __LITTLE_ENDIAN__
60702 __ai float32x4_t vpminq_f32(float32x4_t __p0, float32x4_t __p1) {
60703 float32x4_t __ret;
60704 __ret = (float32x4_t) __builtin_neon_vpminq_v((int8x16_t)__p0, (int8x16_t)__p1, 41);
60705 return __ret;
60706 }
60707 #else
60708 __ai float32x4_t vpminq_f32(float32x4_t __p0, float32x4_t __p1) {
60709 float32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
60710 float32x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
60711 float32x4_t __ret;
60712 __ret = (float32x4_t) __builtin_neon_vpminq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 41);
60713 __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
60714 return __ret;
60715 }
60716 #endif
60717
60718 #ifdef __LITTLE_ENDIAN__
60719 __ai int32x4_t vpminq_s32(int32x4_t __p0, int32x4_t __p1) {
60720 int32x4_t __ret;
60721 __ret = (int32x4_t) __builtin_neon_vpminq_v((int8x16_t)__p0, (int8x16_t)__p1, 34);
60722 return __ret;
60723 }
60724 #else
60725 __ai int32x4_t vpminq_s32(int32x4_t __p0, int32x4_t __p1) {
60726 int32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
60727 int32x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
60728 int32x4_t __ret;
60729 __ret = (int32x4_t) __builtin_neon_vpminq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 34);
60730 __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
60731 return __ret;
60732 }
60733 #endif
60734
60735 #ifdef __LITTLE_ENDIAN__
60736 __ai int16x8_t vpminq_s16(int16x8_t __p0, int16x8_t __p1) {
60737 int16x8_t __ret;
60738 __ret = (int16x8_t) __builtin_neon_vpminq_v((int8x16_t)__p0, (int8x16_t)__p1, 33);
60739 return __ret;
60740 }
60741 #else
60742 __ai int16x8_t vpminq_s16(int16x8_t __p0, int16x8_t __p1) {
60743 int16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
60744 int16x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
60745 int16x8_t __ret;
60746 __ret = (int16x8_t) __builtin_neon_vpminq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 33);
60747 __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
60748 return __ret;
60749 }
60750 #endif
60751
60752 #ifdef __LITTLE_ENDIAN__
60753 __ai float64_t vpminqd_f64(float64x2_t __p0) {
60754 float64_t __ret;
60755 __ret = (float64_t) __builtin_neon_vpminqd_f64((int8x16_t)__p0);
60756 return __ret;
60757 }
60758 #else
60759 __ai float64_t vpminqd_f64(float64x2_t __p0) {
60760 float64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
60761 float64_t __ret;
60762 __ret = (float64_t) __builtin_neon_vpminqd_f64((int8x16_t)__rev0);
60763 return __ret;
60764 }
60765 #endif
60766
60767 #ifdef __LITTLE_ENDIAN__
60768 __ai float32_t vpmins_f32(float32x2_t __p0) {
60769 float32_t __ret;
60770 __ret = (float32_t) __builtin_neon_vpmins_f32((int8x8_t)__p0);
60771 return __ret;
60772 }
60773 #else
60774 __ai float32_t vpmins_f32(float32x2_t __p0) {
60775 float32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
60776 float32_t __ret;
60777 __ret = (float32_t) __builtin_neon_vpmins_f32((int8x8_t)__rev0);
60778 return __ret;
60779 }
60780 #endif
60781
60782 #ifdef __LITTLE_ENDIAN__
60783 __ai float64x2_t vpminnmq_f64(float64x2_t __p0, float64x2_t __p1) {
60784 float64x2_t __ret;
60785 __ret = (float64x2_t) __builtin_neon_vpminnmq_v((int8x16_t)__p0, (int8x16_t)__p1, 42);
60786 return __ret;
60787 }
60788 #else
60789 __ai float64x2_t vpminnmq_f64(float64x2_t __p0, float64x2_t __p1) {
60790 float64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
60791 float64x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
60792 float64x2_t __ret;
60793 __ret = (float64x2_t) __builtin_neon_vpminnmq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 42);
60794 __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
60795 return __ret;
60796 }
60797 #endif
60798
60799 #ifdef __LITTLE_ENDIAN__
60800 __ai float32x4_t vpminnmq_f32(float32x4_t __p0, float32x4_t __p1) {
60801 float32x4_t __ret;
60802 __ret = (float32x4_t) __builtin_neon_vpminnmq_v((int8x16_t)__p0, (int8x16_t)__p1, 41);
60803 return __ret;
60804 }
60805 #else
60806 __ai float32x4_t vpminnmq_f32(float32x4_t __p0, float32x4_t __p1) {
60807 float32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
60808 float32x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
60809 float32x4_t __ret;
60810 __ret = (float32x4_t) __builtin_neon_vpminnmq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 41);
60811 __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
60812 return __ret;
60813 }
60814 #endif
60815
60816 #ifdef __LITTLE_ENDIAN__
60817 __ai float32x2_t vpminnm_f32(float32x2_t __p0, float32x2_t __p1) {
60818 float32x2_t __ret;
60819 __ret = (float32x2_t) __builtin_neon_vpminnm_v((int8x8_t)__p0, (int8x8_t)__p1, 9);
60820 return __ret;
60821 }
60822 #else
60823 __ai float32x2_t vpminnm_f32(float32x2_t __p0, float32x2_t __p1) {
60824 float32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
60825 float32x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
60826 float32x2_t __ret;
60827 __ret = (float32x2_t) __builtin_neon_vpminnm_v((int8x8_t)__rev0, (int8x8_t)__rev1, 9);
60828 __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
60829 return __ret;
60830 }
60831 #endif
60832
60833 #ifdef __LITTLE_ENDIAN__
60834 __ai float64_t vpminnmqd_f64(float64x2_t __p0) {
60835 float64_t __ret;
60836 __ret = (float64_t) __builtin_neon_vpminnmqd_f64((int8x16_t)__p0);
60837 return __ret;
60838 }
60839 #else
60840 __ai float64_t vpminnmqd_f64(float64x2_t __p0) {
60841 float64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
60842 float64_t __ret;
60843 __ret = (float64_t) __builtin_neon_vpminnmqd_f64((int8x16_t)__rev0);
60844 return __ret;
60845 }
60846 #endif
60847
60848 #ifdef __LITTLE_ENDIAN__
60849 __ai float32_t vpminnms_f32(float32x2_t __p0) {
60850 float32_t __ret;
60851 __ret = (float32_t) __builtin_neon_vpminnms_f32((int8x8_t)__p0);
60852 return __ret;
60853 }
60854 #else
60855 __ai float32_t vpminnms_f32(float32x2_t __p0) {
60856 float32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
60857 float32_t __ret;
60858 __ret = (float32_t) __builtin_neon_vpminnms_f32((int8x8_t)__rev0);
60859 return __ret;
60860 }
60861 #endif
60862
60863 #ifdef __LITTLE_ENDIAN__
60864 __ai int64x2_t vqabsq_s64(int64x2_t __p0) {
60865 int64x2_t __ret;
60866 __ret = (int64x2_t) __builtin_neon_vqabsq_v((int8x16_t)__p0, 35);
60867 return __ret;
60868 }
60869 #else
60870 __ai int64x2_t vqabsq_s64(int64x2_t __p0) {
60871 int64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
60872 int64x2_t __ret;
60873 __ret = (int64x2_t) __builtin_neon_vqabsq_v((int8x16_t)__rev0, 35);
60874 __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
60875 return __ret;
60876 }
60877 #endif
60878
60879 #ifdef __LITTLE_ENDIAN__
60880 __ai int64x1_t vqabs_s64(int64x1_t __p0) {
60881 int64x1_t __ret;
60882 __ret = (int64x1_t) __builtin_neon_vqabs_v((int8x8_t)__p0, 3);
60883 return __ret;
60884 }
60885 #else
60886 __ai int64x1_t vqabs_s64(int64x1_t __p0) {
60887 int64x1_t __ret;
60888 __ret = (int64x1_t) __builtin_neon_vqabs_v((int8x8_t)__p0, 3);
60889 return __ret;
60890 }
60891 #endif
60892
60893 #ifdef __LITTLE_ENDIAN__
60894 __ai int8_t vqabsb_s8(int8_t __p0) {
60895 int8_t __ret;
60896 __ret = (int8_t) __builtin_neon_vqabsb_s8(__p0);
60897 return __ret;
60898 }
60899 #else
60900 __ai int8_t vqabsb_s8(int8_t __p0) {
60901 int8_t __ret;
60902 __ret = (int8_t) __builtin_neon_vqabsb_s8(__p0);
60903 return __ret;
60904 }
60905 #endif
60906
60907 #ifdef __LITTLE_ENDIAN__
60908 __ai int32_t vqabss_s32(int32_t __p0) {
60909 int32_t __ret;
60910 __ret = (int32_t) __builtin_neon_vqabss_s32(__p0);
60911 return __ret;
60912 }
60913 #else
60914 __ai int32_t vqabss_s32(int32_t __p0) {
60915 int32_t __ret;
60916 __ret = (int32_t) __builtin_neon_vqabss_s32(__p0);
60917 return __ret;
60918 }
60919 #endif
60920
60921 #ifdef __LITTLE_ENDIAN__
60922 __ai int64_t vqabsd_s64(int64_t __p0) {
60923 int64_t __ret;
60924 __ret = (int64_t) __builtin_neon_vqabsd_s64(__p0);
60925 return __ret;
60926 }
60927 #else
60928 __ai int64_t vqabsd_s64(int64_t __p0) {
60929 int64_t __ret;
60930 __ret = (int64_t) __builtin_neon_vqabsd_s64(__p0);
60931 return __ret;
60932 }
60933 #endif
60934
60935 #ifdef __LITTLE_ENDIAN__
60936 __ai int16_t vqabsh_s16(int16_t __p0) {
60937 int16_t __ret;
60938 __ret = (int16_t) __builtin_neon_vqabsh_s16(__p0);
60939 return __ret;
60940 }
60941 #else
60942 __ai int16_t vqabsh_s16(int16_t __p0) {
60943 int16_t __ret;
60944 __ret = (int16_t) __builtin_neon_vqabsh_s16(__p0);
60945 return __ret;
60946 }
60947 #endif
60948
60949 #ifdef __LITTLE_ENDIAN__
60950 __ai uint8_t vqaddb_u8(uint8_t __p0, uint8_t __p1) {
60951 uint8_t __ret;
60952 __ret = (uint8_t) __builtin_neon_vqaddb_u8(__p0, __p1);
60953 return __ret;
60954 }
60955 #else
60956 __ai uint8_t vqaddb_u8(uint8_t __p0, uint8_t __p1) {
60957 uint8_t __ret;
60958 __ret = (uint8_t) __builtin_neon_vqaddb_u8(__p0, __p1);
60959 return __ret;
60960 }
60961 #endif
60962
60963 #ifdef __LITTLE_ENDIAN__
60964 __ai uint32_t vqadds_u32(uint32_t __p0, uint32_t __p1) {
60965 uint32_t __ret;
60966 __ret = (uint32_t) __builtin_neon_vqadds_u32(__p0, __p1);
60967 return __ret;
60968 }
60969 #else
60970 __ai uint32_t vqadds_u32(uint32_t __p0, uint32_t __p1) {
60971 uint32_t __ret;
60972 __ret = (uint32_t) __builtin_neon_vqadds_u32(__p0, __p1);
60973 return __ret;
60974 }
60975 #endif
60976
60977 #ifdef __LITTLE_ENDIAN__
60978 __ai uint64_t vqaddd_u64(uint64_t __p0, uint64_t __p1) {
60979 uint64_t __ret;
60980 __ret = (uint64_t) __builtin_neon_vqaddd_u64(__p0, __p1);
60981 return __ret;
60982 }
60983 #else
60984 __ai uint64_t vqaddd_u64(uint64_t __p0, uint64_t __p1) {
60985 uint64_t __ret;
60986 __ret = (uint64_t) __builtin_neon_vqaddd_u64(__p0, __p1);
60987 return __ret;
60988 }
60989 #endif
60990
60991 #ifdef __LITTLE_ENDIAN__
60992 __ai uint16_t vqaddh_u16(uint16_t __p0, uint16_t __p1) {
60993 uint16_t __ret;
60994 __ret = (uint16_t) __builtin_neon_vqaddh_u16(__p0, __p1);
60995 return __ret;
60996 }
60997 #else
60998 __ai uint16_t vqaddh_u16(uint16_t __p0, uint16_t __p1) {
60999 uint16_t __ret;
61000 __ret = (uint16_t) __builtin_neon_vqaddh_u16(__p0, __p1);
61001 return __ret;
61002 }
61003 #endif
61004
61005 #ifdef __LITTLE_ENDIAN__
61006 __ai int8_t vqaddb_s8(int8_t __p0, int8_t __p1) {
61007 int8_t __ret;
61008 __ret = (int8_t) __builtin_neon_vqaddb_s8(__p0, __p1);
61009 return __ret;
61010 }
61011 #else
61012 __ai int8_t vqaddb_s8(int8_t __p0, int8_t __p1) {
61013 int8_t __ret;
61014 __ret = (int8_t) __builtin_neon_vqaddb_s8(__p0, __p1);
61015 return __ret;
61016 }
61017 #endif
61018
61019 #ifdef __LITTLE_ENDIAN__
61020 __ai int32_t vqadds_s32(int32_t __p0, int32_t __p1) {
61021 int32_t __ret;
61022 __ret = (int32_t) __builtin_neon_vqadds_s32(__p0, __p1);
61023 return __ret;
61024 }
61025 #else
61026 __ai int32_t vqadds_s32(int32_t __p0, int32_t __p1) {
61027 int32_t __ret;
61028 __ret = (int32_t) __builtin_neon_vqadds_s32(__p0, __p1);
61029 return __ret;
61030 }
61031 __ai int32_t __noswap_vqadds_s32(int32_t __p0, int32_t __p1) {
61032 int32_t __ret;
61033 __ret = (int32_t) __builtin_neon_vqadds_s32(__p0, __p1);
61034 return __ret;
61035 }
61036 #endif
61037
61038 #ifdef __LITTLE_ENDIAN__
61039 __ai int64_t vqaddd_s64(int64_t __p0, int64_t __p1) {
61040 int64_t __ret;
61041 __ret = (int64_t) __builtin_neon_vqaddd_s64(__p0, __p1);
61042 return __ret;
61043 }
61044 #else
61045 __ai int64_t vqaddd_s64(int64_t __p0, int64_t __p1) {
61046 int64_t __ret;
61047 __ret = (int64_t) __builtin_neon_vqaddd_s64(__p0, __p1);
61048 return __ret;
61049 }
61050 #endif
61051
61052 #ifdef __LITTLE_ENDIAN__
61053 __ai int16_t vqaddh_s16(int16_t __p0, int16_t __p1) {
61054 int16_t __ret;
61055 __ret = (int16_t) __builtin_neon_vqaddh_s16(__p0, __p1);
61056 return __ret;
61057 }
61058 #else
61059 __ai int16_t vqaddh_s16(int16_t __p0, int16_t __p1) {
61060 int16_t __ret;
61061 __ret = (int16_t) __builtin_neon_vqaddh_s16(__p0, __p1);
61062 return __ret;
61063 }
61064 __ai int16_t __noswap_vqaddh_s16(int16_t __p0, int16_t __p1) {
61065 int16_t __ret;
61066 __ret = (int16_t) __builtin_neon_vqaddh_s16(__p0, __p1);
61067 return __ret;
61068 }
61069 #endif
61070
61071 #ifdef __LITTLE_ENDIAN__
61072 __ai int64_t vqdmlals_s32(int64_t __p0, int32_t __p1, int32_t __p2) {
61073 int64_t __ret;
61074 __ret = (int64_t) __builtin_neon_vqdmlals_s32(__p0, __p1, __p2);
61075 return __ret;
61076 }
61077 #else
61078 __ai int64_t vqdmlals_s32(int64_t __p0, int32_t __p1, int32_t __p2) {
61079 int64_t __ret;
61080 __ret = (int64_t) __builtin_neon_vqdmlals_s32(__p0, __p1, __p2);
61081 return __ret;
61082 }
61083 #endif
61084
61085 #ifdef __LITTLE_ENDIAN__
61086 __ai int32_t vqdmlalh_s16(int32_t __p0, int16_t __p1, int16_t __p2) {
61087 int32_t __ret;
61088 __ret = (int32_t) __builtin_neon_vqdmlalh_s16(__p0, __p1, __p2);
61089 return __ret;
61090 }
61091 #else
61092 __ai int32_t vqdmlalh_s16(int32_t __p0, int16_t __p1, int16_t __p2) {
61093 int32_t __ret;
61094 __ret = (int32_t) __builtin_neon_vqdmlalh_s16(__p0, __p1, __p2);
61095 return __ret;
61096 }
61097 #endif
61098
61099 #ifdef __LITTLE_ENDIAN__
61100 __ai int64x2_t vqdmlal_high_s32(int64x2_t __p0, int32x4_t __p1, int32x4_t __p2) {
61101 int64x2_t __ret;
61102 __ret = vqdmlal_s32(__p0, vget_high_s32(__p1), vget_high_s32(__p2));
61103 return __ret;
61104 }
61105 #else
61106 __ai int64x2_t vqdmlal_high_s32(int64x2_t __p0, int32x4_t __p1, int32x4_t __p2) {
61107 int64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
61108 int32x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
61109 int32x4_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, 3, 2, 1, 0);
61110 int64x2_t __ret;
61111 __ret = __noswap_vqdmlal_s32(__rev0, __noswap_vget_high_s32(__rev1), __noswap_vget_high_s32(__rev2));
61112 __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
61113 return __ret;
61114 }
61115 #endif
61116
61117 #ifdef __LITTLE_ENDIAN__
61118 __ai int32x4_t vqdmlal_high_s16(int32x4_t __p0, int16x8_t __p1, int16x8_t __p2) {
61119 int32x4_t __ret;
61120 __ret = vqdmlal_s16(__p0, vget_high_s16(__p1), vget_high_s16(__p2));
61121 return __ret;
61122 }
61123 #else
61124 __ai int32x4_t vqdmlal_high_s16(int32x4_t __p0, int16x8_t __p1, int16x8_t __p2) {
61125 int32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
61126 int16x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
61127 int16x8_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, 7, 6, 5, 4, 3, 2, 1, 0);
61128 int32x4_t __ret;
61129 __ret = __noswap_vqdmlal_s16(__rev0, __noswap_vget_high_s16(__rev1), __noswap_vget_high_s16(__rev2));
61130 __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
61131 return __ret;
61132 }
61133 #endif
61134
61135 #ifdef __LITTLE_ENDIAN__
61136 #define vqdmlal_high_lane_s32(__p0, __p1, __p2, __p3) __extension__ ({ \
61137 int64x2_t __s0 = __p0; \
61138 int32x4_t __s1 = __p1; \
61139 int32x2_t __s2 = __p2; \
61140 int64x2_t __ret; \
61141 __ret = vqdmlal_s32(__s0, vget_high_s32(__s1), __builtin_shufflevector(__s2, __s2, __p3, __p3)); \
61142 __ret; \
61143 })
61144 #else
61145 #define vqdmlal_high_lane_s32(__p0, __p1, __p2, __p3) __extension__ ({ \
61146 int64x2_t __s0 = __p0; \
61147 int32x4_t __s1 = __p1; \
61148 int32x2_t __s2 = __p2; \
61149 int64x2_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 1, 0); \
61150 int32x4_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 3, 2, 1, 0); \
61151 int32x2_t __rev2; __rev2 = __builtin_shufflevector(__s2, __s2, 1, 0); \
61152 int64x2_t __ret; \
61153 __ret = __noswap_vqdmlal_s32(__rev0, __noswap_vget_high_s32(__rev1), __builtin_shufflevector(__rev2, __rev2, __p3, __p3)); \
61154 __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \
61155 __ret; \
61156 })
61157 #endif
61158
61159 #ifdef __LITTLE_ENDIAN__
61160 #define vqdmlal_high_lane_s16(__p0, __p1, __p2, __p3) __extension__ ({ \
61161 int32x4_t __s0 = __p0; \
61162 int16x8_t __s1 = __p1; \
61163 int16x4_t __s2 = __p2; \
61164 int32x4_t __ret; \
61165 __ret = vqdmlal_s16(__s0, vget_high_s16(__s1), __builtin_shufflevector(__s2, __s2, __p3, __p3, __p3, __p3)); \
61166 __ret; \
61167 })
61168 #else
61169 #define vqdmlal_high_lane_s16(__p0, __p1, __p2, __p3) __extension__ ({ \
61170 int32x4_t __s0 = __p0; \
61171 int16x8_t __s1 = __p1; \
61172 int16x4_t __s2 = __p2; \
61173 int32x4_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \
61174 int16x8_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 7, 6, 5, 4, 3, 2, 1, 0); \
61175 int16x4_t __rev2; __rev2 = __builtin_shufflevector(__s2, __s2, 3, 2, 1, 0); \
61176 int32x4_t __ret; \
61177 __ret = __noswap_vqdmlal_s16(__rev0, __noswap_vget_high_s16(__rev1), __builtin_shufflevector(__rev2, __rev2, __p3, __p3, __p3, __p3)); \
61178 __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \
61179 __ret; \
61180 })
61181 #endif
61182
61183 #ifdef __LITTLE_ENDIAN__
61184 #define vqdmlal_high_laneq_s32(__p0, __p1, __p2, __p3) __extension__ ({ \
61185 int64x2_t __s0 = __p0; \
61186 int32x4_t __s1 = __p1; \
61187 int32x4_t __s2 = __p2; \
61188 int64x2_t __ret; \
61189 __ret = vqdmlal_s32(__s0, vget_high_s32(__s1), __builtin_shufflevector(__s2, __s2, __p3, __p3)); \
61190 __ret; \
61191 })
61192 #else
61193 #define vqdmlal_high_laneq_s32(__p0, __p1, __p2, __p3) __extension__ ({ \
61194 int64x2_t __s0 = __p0; \
61195 int32x4_t __s1 = __p1; \
61196 int32x4_t __s2 = __p2; \
61197 int64x2_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 1, 0); \
61198 int32x4_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 3, 2, 1, 0); \
61199 int32x4_t __rev2; __rev2 = __builtin_shufflevector(__s2, __s2, 3, 2, 1, 0); \
61200 int64x2_t __ret; \
61201 __ret = __noswap_vqdmlal_s32(__rev0, __noswap_vget_high_s32(__rev1), __builtin_shufflevector(__rev2, __rev2, __p3, __p3)); \
61202 __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \
61203 __ret; \
61204 })
61205 #endif
61206
61207 #ifdef __LITTLE_ENDIAN__
61208 #define vqdmlal_high_laneq_s16(__p0, __p1, __p2, __p3) __extension__ ({ \
61209 int32x4_t __s0 = __p0; \
61210 int16x8_t __s1 = __p1; \
61211 int16x8_t __s2 = __p2; \
61212 int32x4_t __ret; \
61213 __ret = vqdmlal_s16(__s0, vget_high_s16(__s1), __builtin_shufflevector(__s2, __s2, __p3, __p3, __p3, __p3)); \
61214 __ret; \
61215 })
61216 #else
61217 #define vqdmlal_high_laneq_s16(__p0, __p1, __p2, __p3) __extension__ ({ \
61218 int32x4_t __s0 = __p0; \
61219 int16x8_t __s1 = __p1; \
61220 int16x8_t __s2 = __p2; \
61221 int32x4_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \
61222 int16x8_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 7, 6, 5, 4, 3, 2, 1, 0); \
61223 int16x8_t __rev2; __rev2 = __builtin_shufflevector(__s2, __s2, 7, 6, 5, 4, 3, 2, 1, 0); \
61224 int32x4_t __ret; \
61225 __ret = __noswap_vqdmlal_s16(__rev0, __noswap_vget_high_s16(__rev1), __builtin_shufflevector(__rev2, __rev2, __p3, __p3, __p3, __p3)); \
61226 __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \
61227 __ret; \
61228 })
61229 #endif
61230
61231 #ifdef __LITTLE_ENDIAN__
61232 __ai int64x2_t vqdmlal_high_n_s32(int64x2_t __p0, int32x4_t __p1, int32_t __p2) {
61233 int64x2_t __ret;
61234 __ret = vqdmlal_n_s32(__p0, vget_high_s32(__p1), __p2);
61235 return __ret;
61236 }
61237 #else
61238 __ai int64x2_t vqdmlal_high_n_s32(int64x2_t __p0, int32x4_t __p1, int32_t __p2) {
61239 int64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
61240 int32x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
61241 int64x2_t __ret;
61242 __ret = __noswap_vqdmlal_n_s32(__rev0, __noswap_vget_high_s32(__rev1), __p2);
61243 __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
61244 return __ret;
61245 }
61246 #endif
61247
61248 #ifdef __LITTLE_ENDIAN__
61249 __ai int32x4_t vqdmlal_high_n_s16(int32x4_t __p0, int16x8_t __p1, int16_t __p2) {
61250 int32x4_t __ret;
61251 __ret = vqdmlal_n_s16(__p0, vget_high_s16(__p1), __p2);
61252 return __ret;
61253 }
61254 #else
61255 __ai int32x4_t vqdmlal_high_n_s16(int32x4_t __p0, int16x8_t __p1, int16_t __p2) {
61256 int32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
61257 int16x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
61258 int32x4_t __ret;
61259 __ret = __noswap_vqdmlal_n_s16(__rev0, __noswap_vget_high_s16(__rev1), __p2);
61260 __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
61261 return __ret;
61262 }
61263 #endif
61264
61265 #ifdef __LITTLE_ENDIAN__
61266 #define vqdmlals_lane_s32(__p0, __p1, __p2, __p3) __extension__ ({ \
61267 int64_t __s0 = __p0; \
61268 int32_t __s1 = __p1; \
61269 int32x2_t __s2 = __p2; \
61270 int64_t __ret; \
61271 __ret = (int64_t) __builtin_neon_vqdmlals_lane_s32(__s0, __s1, (int8x8_t)__s2, __p3); \
61272 __ret; \
61273 })
61274 #else
61275 #define vqdmlals_lane_s32(__p0, __p1, __p2, __p3) __extension__ ({ \
61276 int64_t __s0 = __p0; \
61277 int32_t __s1 = __p1; \
61278 int32x2_t __s2 = __p2; \
61279 int32x2_t __rev2; __rev2 = __builtin_shufflevector(__s2, __s2, 1, 0); \
61280 int64_t __ret; \
61281 __ret = (int64_t) __builtin_neon_vqdmlals_lane_s32(__s0, __s1, (int8x8_t)__rev2, __p3); \
61282 __ret; \
61283 })
61284 #endif
61285
61286 #ifdef __LITTLE_ENDIAN__
61287 #define vqdmlalh_lane_s16(__p0, __p1, __p2, __p3) __extension__ ({ \
61288 int32_t __s0 = __p0; \
61289 int16_t __s1 = __p1; \
61290 int16x4_t __s2 = __p2; \
61291 int32_t __ret; \
61292 __ret = (int32_t) __builtin_neon_vqdmlalh_lane_s16(__s0, __s1, (int8x8_t)__s2, __p3); \
61293 __ret; \
61294 })
61295 #else
61296 #define vqdmlalh_lane_s16(__p0, __p1, __p2, __p3) __extension__ ({ \
61297 int32_t __s0 = __p0; \
61298 int16_t __s1 = __p1; \
61299 int16x4_t __s2 = __p2; \
61300 int16x4_t __rev2; __rev2 = __builtin_shufflevector(__s2, __s2, 3, 2, 1, 0); \
61301 int32_t __ret; \
61302 __ret = (int32_t) __builtin_neon_vqdmlalh_lane_s16(__s0, __s1, (int8x8_t)__rev2, __p3); \
61303 __ret; \
61304 })
61305 #endif
61306
61307 #ifdef __LITTLE_ENDIAN__
61308 #define vqdmlals_laneq_s32(__p0, __p1, __p2, __p3) __extension__ ({ \
61309 int64_t __s0 = __p0; \
61310 int32_t __s1 = __p1; \
61311 int32x4_t __s2 = __p2; \
61312 int64_t __ret; \
61313 __ret = (int64_t) __builtin_neon_vqdmlals_laneq_s32(__s0, __s1, (int8x16_t)__s2, __p3); \
61314 __ret; \
61315 })
61316 #else
61317 #define vqdmlals_laneq_s32(__p0, __p1, __p2, __p3) __extension__ ({ \
61318 int64_t __s0 = __p0; \
61319 int32_t __s1 = __p1; \
61320 int32x4_t __s2 = __p2; \
61321 int32x4_t __rev2; __rev2 = __builtin_shufflevector(__s2, __s2, 3, 2, 1, 0); \
61322 int64_t __ret; \
61323 __ret = (int64_t) __builtin_neon_vqdmlals_laneq_s32(__s0, __s1, (int8x16_t)__rev2, __p3); \
61324 __ret; \
61325 })
61326 #endif
61327
61328 #ifdef __LITTLE_ENDIAN__
61329 #define vqdmlalh_laneq_s16(__p0, __p1, __p2, __p3) __extension__ ({ \
61330 int32_t __s0 = __p0; \
61331 int16_t __s1 = __p1; \
61332 int16x8_t __s2 = __p2; \
61333 int32_t __ret; \
61334 __ret = (int32_t) __builtin_neon_vqdmlalh_laneq_s16(__s0, __s1, (int8x16_t)__s2, __p3); \
61335 __ret; \
61336 })
61337 #else
61338 #define vqdmlalh_laneq_s16(__p0, __p1, __p2, __p3) __extension__ ({ \
61339 int32_t __s0 = __p0; \
61340 int16_t __s1 = __p1; \
61341 int16x8_t __s2 = __p2; \
61342 int16x8_t __rev2; __rev2 = __builtin_shufflevector(__s2, __s2, 7, 6, 5, 4, 3, 2, 1, 0); \
61343 int32_t __ret; \
61344 __ret = (int32_t) __builtin_neon_vqdmlalh_laneq_s16(__s0, __s1, (int8x16_t)__rev2, __p3); \
61345 __ret; \
61346 })
61347 #endif
61348
61349 #ifdef __LITTLE_ENDIAN__
61350 #define vqdmlal_laneq_s32(__p0, __p1, __p2, __p3) __extension__ ({ \
61351 int64x2_t __s0 = __p0; \
61352 int32x2_t __s1 = __p1; \
61353 int32x4_t __s2 = __p2; \
61354 int64x2_t __ret; \
61355 __ret = vqdmlal_s32(__s0, __s1, __builtin_shufflevector(__s2, __s2, __p3, __p3)); \
61356 __ret; \
61357 })
61358 #else
61359 #define vqdmlal_laneq_s32(__p0, __p1, __p2, __p3) __extension__ ({ \
61360 int64x2_t __s0 = __p0; \
61361 int32x2_t __s1 = __p1; \
61362 int32x4_t __s2 = __p2; \
61363 int64x2_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 1, 0); \
61364 int32x2_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 1, 0); \
61365 int32x4_t __rev2; __rev2 = __builtin_shufflevector(__s2, __s2, 3, 2, 1, 0); \
61366 int64x2_t __ret; \
61367 __ret = __noswap_vqdmlal_s32(__rev0, __rev1, __builtin_shufflevector(__rev2, __rev2, __p3, __p3)); \
61368 __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \
61369 __ret; \
61370 })
61371 #endif
61372
61373 #ifdef __LITTLE_ENDIAN__
61374 #define vqdmlal_laneq_s16(__p0, __p1, __p2, __p3) __extension__ ({ \
61375 int32x4_t __s0 = __p0; \
61376 int16x4_t __s1 = __p1; \
61377 int16x8_t __s2 = __p2; \
61378 int32x4_t __ret; \
61379 __ret = vqdmlal_s16(__s0, __s1, __builtin_shufflevector(__s2, __s2, __p3, __p3, __p3, __p3)); \
61380 __ret; \
61381 })
61382 #else
61383 #define vqdmlal_laneq_s16(__p0, __p1, __p2, __p3) __extension__ ({ \
61384 int32x4_t __s0 = __p0; \
61385 int16x4_t __s1 = __p1; \
61386 int16x8_t __s2 = __p2; \
61387 int32x4_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \
61388 int16x4_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 3, 2, 1, 0); \
61389 int16x8_t __rev2; __rev2 = __builtin_shufflevector(__s2, __s2, 7, 6, 5, 4, 3, 2, 1, 0); \
61390 int32x4_t __ret; \
61391 __ret = __noswap_vqdmlal_s16(__rev0, __rev1, __builtin_shufflevector(__rev2, __rev2, __p3, __p3, __p3, __p3)); \
61392 __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \
61393 __ret; \
61394 })
61395 #endif
61396
61397 #ifdef __LITTLE_ENDIAN__
61398 __ai int64_t vqdmlsls_s32(int64_t __p0, int32_t __p1, int32_t __p2) {
61399 int64_t __ret;
61400 __ret = (int64_t) __builtin_neon_vqdmlsls_s32(__p0, __p1, __p2);
61401 return __ret;
61402 }
61403 #else
61404 __ai int64_t vqdmlsls_s32(int64_t __p0, int32_t __p1, int32_t __p2) {
61405 int64_t __ret;
61406 __ret = (int64_t) __builtin_neon_vqdmlsls_s32(__p0, __p1, __p2);
61407 return __ret;
61408 }
61409 #endif
61410
61411 #ifdef __LITTLE_ENDIAN__
61412 __ai int32_t vqdmlslh_s16(int32_t __p0, int16_t __p1, int16_t __p2) {
61413 int32_t __ret;
61414 __ret = (int32_t) __builtin_neon_vqdmlslh_s16(__p0, __p1, __p2);
61415 return __ret;
61416 }
61417 #else
61418 __ai int32_t vqdmlslh_s16(int32_t __p0, int16_t __p1, int16_t __p2) {
61419 int32_t __ret;
61420 __ret = (int32_t) __builtin_neon_vqdmlslh_s16(__p0, __p1, __p2);
61421 return __ret;
61422 }
61423 #endif
61424
61425 #ifdef __LITTLE_ENDIAN__
61426 __ai int64x2_t vqdmlsl_high_s32(int64x2_t __p0, int32x4_t __p1, int32x4_t __p2) {
61427 int64x2_t __ret;
61428 __ret = vqdmlsl_s32(__p0, vget_high_s32(__p1), vget_high_s32(__p2));
61429 return __ret;
61430 }
61431 #else
61432 __ai int64x2_t vqdmlsl_high_s32(int64x2_t __p0, int32x4_t __p1, int32x4_t __p2) {
61433 int64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
61434 int32x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
61435 int32x4_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, 3, 2, 1, 0);
61436 int64x2_t __ret;
61437 __ret = __noswap_vqdmlsl_s32(__rev0, __noswap_vget_high_s32(__rev1), __noswap_vget_high_s32(__rev2));
61438 __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
61439 return __ret;
61440 }
61441 #endif
61442
61443 #ifdef __LITTLE_ENDIAN__
61444 __ai int32x4_t vqdmlsl_high_s16(int32x4_t __p0, int16x8_t __p1, int16x8_t __p2) {
61445 int32x4_t __ret;
61446 __ret = vqdmlsl_s16(__p0, vget_high_s16(__p1), vget_high_s16(__p2));
61447 return __ret;
61448 }
61449 #else
61450 __ai int32x4_t vqdmlsl_high_s16(int32x4_t __p0, int16x8_t __p1, int16x8_t __p2) {
61451 int32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
61452 int16x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
61453 int16x8_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, 7, 6, 5, 4, 3, 2, 1, 0);
61454 int32x4_t __ret;
61455 __ret = __noswap_vqdmlsl_s16(__rev0, __noswap_vget_high_s16(__rev1), __noswap_vget_high_s16(__rev2));
61456 __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
61457 return __ret;
61458 }
61459 #endif
61460
61461 #ifdef __LITTLE_ENDIAN__
61462 #define vqdmlsl_high_lane_s32(__p0, __p1, __p2, __p3) __extension__ ({ \
61463 int64x2_t __s0 = __p0; \
61464 int32x4_t __s1 = __p1; \
61465 int32x2_t __s2 = __p2; \
61466 int64x2_t __ret; \
61467 __ret = vqdmlsl_s32(__s0, vget_high_s32(__s1), __builtin_shufflevector(__s2, __s2, __p3, __p3)); \
61468 __ret; \
61469 })
61470 #else
61471 #define vqdmlsl_high_lane_s32(__p0, __p1, __p2, __p3) __extension__ ({ \
61472 int64x2_t __s0 = __p0; \
61473 int32x4_t __s1 = __p1; \
61474 int32x2_t __s2 = __p2; \
61475 int64x2_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 1, 0); \
61476 int32x4_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 3, 2, 1, 0); \
61477 int32x2_t __rev2; __rev2 = __builtin_shufflevector(__s2, __s2, 1, 0); \
61478 int64x2_t __ret; \
61479 __ret = __noswap_vqdmlsl_s32(__rev0, __noswap_vget_high_s32(__rev1), __builtin_shufflevector(__rev2, __rev2, __p3, __p3)); \
61480 __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \
61481 __ret; \
61482 })
61483 #endif
61484
61485 #ifdef __LITTLE_ENDIAN__
61486 #define vqdmlsl_high_lane_s16(__p0, __p1, __p2, __p3) __extension__ ({ \
61487 int32x4_t __s0 = __p0; \
61488 int16x8_t __s1 = __p1; \
61489 int16x4_t __s2 = __p2; \
61490 int32x4_t __ret; \
61491 __ret = vqdmlsl_s16(__s0, vget_high_s16(__s1), __builtin_shufflevector(__s2, __s2, __p3, __p3, __p3, __p3)); \
61492 __ret; \
61493 })
61494 #else
61495 #define vqdmlsl_high_lane_s16(__p0, __p1, __p2, __p3) __extension__ ({ \
61496 int32x4_t __s0 = __p0; \
61497 int16x8_t __s1 = __p1; \
61498 int16x4_t __s2 = __p2; \
61499 int32x4_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \
61500 int16x8_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 7, 6, 5, 4, 3, 2, 1, 0); \
61501 int16x4_t __rev2; __rev2 = __builtin_shufflevector(__s2, __s2, 3, 2, 1, 0); \
61502 int32x4_t __ret; \
61503 __ret = __noswap_vqdmlsl_s16(__rev0, __noswap_vget_high_s16(__rev1), __builtin_shufflevector(__rev2, __rev2, __p3, __p3, __p3, __p3)); \
61504 __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \
61505 __ret; \
61506 })
61507 #endif
61508
61509 #ifdef __LITTLE_ENDIAN__
61510 #define vqdmlsl_high_laneq_s32(__p0, __p1, __p2, __p3) __extension__ ({ \
61511 int64x2_t __s0 = __p0; \
61512 int32x4_t __s1 = __p1; \
61513 int32x4_t __s2 = __p2; \
61514 int64x2_t __ret; \
61515 __ret = vqdmlsl_s32(__s0, vget_high_s32(__s1), __builtin_shufflevector(__s2, __s2, __p3, __p3)); \
61516 __ret; \
61517 })
61518 #else
61519 #define vqdmlsl_high_laneq_s32(__p0, __p1, __p2, __p3) __extension__ ({ \
61520 int64x2_t __s0 = __p0; \
61521 int32x4_t __s1 = __p1; \
61522 int32x4_t __s2 = __p2; \
61523 int64x2_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 1, 0); \
61524 int32x4_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 3, 2, 1, 0); \
61525 int32x4_t __rev2; __rev2 = __builtin_shufflevector(__s2, __s2, 3, 2, 1, 0); \
61526 int64x2_t __ret; \
61527 __ret = __noswap_vqdmlsl_s32(__rev0, __noswap_vget_high_s32(__rev1), __builtin_shufflevector(__rev2, __rev2, __p3, __p3)); \
61528 __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \
61529 __ret; \
61530 })
61531 #endif
61532
61533 #ifdef __LITTLE_ENDIAN__
61534 #define vqdmlsl_high_laneq_s16(__p0, __p1, __p2, __p3) __extension__ ({ \
61535 int32x4_t __s0 = __p0; \
61536 int16x8_t __s1 = __p1; \
61537 int16x8_t __s2 = __p2; \
61538 int32x4_t __ret; \
61539 __ret = vqdmlsl_s16(__s0, vget_high_s16(__s1), __builtin_shufflevector(__s2, __s2, __p3, __p3, __p3, __p3)); \
61540 __ret; \
61541 })
61542 #else
61543 #define vqdmlsl_high_laneq_s16(__p0, __p1, __p2, __p3) __extension__ ({ \
61544 int32x4_t __s0 = __p0; \
61545 int16x8_t __s1 = __p1; \
61546 int16x8_t __s2 = __p2; \
61547 int32x4_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \
61548 int16x8_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 7, 6, 5, 4, 3, 2, 1, 0); \
61549 int16x8_t __rev2; __rev2 = __builtin_shufflevector(__s2, __s2, 7, 6, 5, 4, 3, 2, 1, 0); \
61550 int32x4_t __ret; \
61551 __ret = __noswap_vqdmlsl_s16(__rev0, __noswap_vget_high_s16(__rev1), __builtin_shufflevector(__rev2, __rev2, __p3, __p3, __p3, __p3)); \
61552 __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \
61553 __ret; \
61554 })
61555 #endif
61556
61557 #ifdef __LITTLE_ENDIAN__
61558 __ai int64x2_t vqdmlsl_high_n_s32(int64x2_t __p0, int32x4_t __p1, int32_t __p2) {
61559 int64x2_t __ret;
61560 __ret = vqdmlsl_n_s32(__p0, vget_high_s32(__p1), __p2);
61561 return __ret;
61562 }
61563 #else
61564 __ai int64x2_t vqdmlsl_high_n_s32(int64x2_t __p0, int32x4_t __p1, int32_t __p2) {
61565 int64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
61566 int32x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
61567 int64x2_t __ret;
61568 __ret = __noswap_vqdmlsl_n_s32(__rev0, __noswap_vget_high_s32(__rev1), __p2);
61569 __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
61570 return __ret;
61571 }
61572 #endif
61573
61574 #ifdef __LITTLE_ENDIAN__
61575 __ai int32x4_t vqdmlsl_high_n_s16(int32x4_t __p0, int16x8_t __p1, int16_t __p2) {
61576 int32x4_t __ret;
61577 __ret = vqdmlsl_n_s16(__p0, vget_high_s16(__p1), __p2);
61578 return __ret;
61579 }
61580 #else
61581 __ai int32x4_t vqdmlsl_high_n_s16(int32x4_t __p0, int16x8_t __p1, int16_t __p2) {
61582 int32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
61583 int16x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
61584 int32x4_t __ret;
61585 __ret = __noswap_vqdmlsl_n_s16(__rev0, __noswap_vget_high_s16(__rev1), __p2);
61586 __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
61587 return __ret;
61588 }
61589 #endif
61590
61591 #ifdef __LITTLE_ENDIAN__
61592 #define vqdmlsls_lane_s32(__p0, __p1, __p2, __p3) __extension__ ({ \
61593 int64_t __s0 = __p0; \
61594 int32_t __s1 = __p1; \
61595 int32x2_t __s2 = __p2; \
61596 int64_t __ret; \
61597 __ret = (int64_t) __builtin_neon_vqdmlsls_lane_s32(__s0, __s1, (int8x8_t)__s2, __p3); \
61598 __ret; \
61599 })
61600 #else
61601 #define vqdmlsls_lane_s32(__p0, __p1, __p2, __p3) __extension__ ({ \
61602 int64_t __s0 = __p0; \
61603 int32_t __s1 = __p1; \
61604 int32x2_t __s2 = __p2; \
61605 int32x2_t __rev2; __rev2 = __builtin_shufflevector(__s2, __s2, 1, 0); \
61606 int64_t __ret; \
61607 __ret = (int64_t) __builtin_neon_vqdmlsls_lane_s32(__s0, __s1, (int8x8_t)__rev2, __p3); \
61608 __ret; \
61609 })
61610 #endif
61611
61612 #ifdef __LITTLE_ENDIAN__
61613 #define vqdmlslh_lane_s16(__p0, __p1, __p2, __p3) __extension__ ({ \
61614 int32_t __s0 = __p0; \
61615 int16_t __s1 = __p1; \
61616 int16x4_t __s2 = __p2; \
61617 int32_t __ret; \
61618 __ret = (int32_t) __builtin_neon_vqdmlslh_lane_s16(__s0, __s1, (int8x8_t)__s2, __p3); \
61619 __ret; \
61620 })
61621 #else
61622 #define vqdmlslh_lane_s16(__p0, __p1, __p2, __p3) __extension__ ({ \
61623 int32_t __s0 = __p0; \
61624 int16_t __s1 = __p1; \
61625 int16x4_t __s2 = __p2; \
61626 int16x4_t __rev2; __rev2 = __builtin_shufflevector(__s2, __s2, 3, 2, 1, 0); \
61627 int32_t __ret; \
61628 __ret = (int32_t) __builtin_neon_vqdmlslh_lane_s16(__s0, __s1, (int8x8_t)__rev2, __p3); \
61629 __ret; \
61630 })
61631 #endif
61632
61633 #ifdef __LITTLE_ENDIAN__
61634 #define vqdmlsls_laneq_s32(__p0, __p1, __p2, __p3) __extension__ ({ \
61635 int64_t __s0 = __p0; \
61636 int32_t __s1 = __p1; \
61637 int32x4_t __s2 = __p2; \
61638 int64_t __ret; \
61639 __ret = (int64_t) __builtin_neon_vqdmlsls_laneq_s32(__s0, __s1, (int8x16_t)__s2, __p3); \
61640 __ret; \
61641 })
61642 #else
61643 #define vqdmlsls_laneq_s32(__p0, __p1, __p2, __p3) __extension__ ({ \
61644 int64_t __s0 = __p0; \
61645 int32_t __s1 = __p1; \
61646 int32x4_t __s2 = __p2; \
61647 int32x4_t __rev2; __rev2 = __builtin_shufflevector(__s2, __s2, 3, 2, 1, 0); \
61648 int64_t __ret; \
61649 __ret = (int64_t) __builtin_neon_vqdmlsls_laneq_s32(__s0, __s1, (int8x16_t)__rev2, __p3); \
61650 __ret; \
61651 })
61652 #endif
61653
61654 #ifdef __LITTLE_ENDIAN__
61655 #define vqdmlslh_laneq_s16(__p0, __p1, __p2, __p3) __extension__ ({ \
61656 int32_t __s0 = __p0; \
61657 int16_t __s1 = __p1; \
61658 int16x8_t __s2 = __p2; \
61659 int32_t __ret; \
61660 __ret = (int32_t) __builtin_neon_vqdmlslh_laneq_s16(__s0, __s1, (int8x16_t)__s2, __p3); \
61661 __ret; \
61662 })
61663 #else
61664 #define vqdmlslh_laneq_s16(__p0, __p1, __p2, __p3) __extension__ ({ \
61665 int32_t __s0 = __p0; \
61666 int16_t __s1 = __p1; \
61667 int16x8_t __s2 = __p2; \
61668 int16x8_t __rev2; __rev2 = __builtin_shufflevector(__s2, __s2, 7, 6, 5, 4, 3, 2, 1, 0); \
61669 int32_t __ret; \
61670 __ret = (int32_t) __builtin_neon_vqdmlslh_laneq_s16(__s0, __s1, (int8x16_t)__rev2, __p3); \
61671 __ret; \
61672 })
61673 #endif
61674
61675 #ifdef __LITTLE_ENDIAN__
61676 #define vqdmlsl_laneq_s32(__p0, __p1, __p2, __p3) __extension__ ({ \
61677 int64x2_t __s0 = __p0; \
61678 int32x2_t __s1 = __p1; \
61679 int32x4_t __s2 = __p2; \
61680 int64x2_t __ret; \
61681 __ret = vqdmlsl_s32(__s0, __s1, __builtin_shufflevector(__s2, __s2, __p3, __p3)); \
61682 __ret; \
61683 })
61684 #else
61685 #define vqdmlsl_laneq_s32(__p0, __p1, __p2, __p3) __extension__ ({ \
61686 int64x2_t __s0 = __p0; \
61687 int32x2_t __s1 = __p1; \
61688 int32x4_t __s2 = __p2; \
61689 int64x2_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 1, 0); \
61690 int32x2_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 1, 0); \
61691 int32x4_t __rev2; __rev2 = __builtin_shufflevector(__s2, __s2, 3, 2, 1, 0); \
61692 int64x2_t __ret; \
61693 __ret = __noswap_vqdmlsl_s32(__rev0, __rev1, __builtin_shufflevector(__rev2, __rev2, __p3, __p3)); \
61694 __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \
61695 __ret; \
61696 })
61697 #endif
61698
61699 #ifdef __LITTLE_ENDIAN__
61700 #define vqdmlsl_laneq_s16(__p0, __p1, __p2, __p3) __extension__ ({ \
61701 int32x4_t __s0 = __p0; \
61702 int16x4_t __s1 = __p1; \
61703 int16x8_t __s2 = __p2; \
61704 int32x4_t __ret; \
61705 __ret = vqdmlsl_s16(__s0, __s1, __builtin_shufflevector(__s2, __s2, __p3, __p3, __p3, __p3)); \
61706 __ret; \
61707 })
61708 #else
61709 #define vqdmlsl_laneq_s16(__p0, __p1, __p2, __p3) __extension__ ({ \
61710 int32x4_t __s0 = __p0; \
61711 int16x4_t __s1 = __p1; \
61712 int16x8_t __s2 = __p2; \
61713 int32x4_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \
61714 int16x4_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 3, 2, 1, 0); \
61715 int16x8_t __rev2; __rev2 = __builtin_shufflevector(__s2, __s2, 7, 6, 5, 4, 3, 2, 1, 0); \
61716 int32x4_t __ret; \
61717 __ret = __noswap_vqdmlsl_s16(__rev0, __rev1, __builtin_shufflevector(__rev2, __rev2, __p3, __p3, __p3, __p3)); \
61718 __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \
61719 __ret; \
61720 })
61721 #endif
61722
61723 #ifdef __LITTLE_ENDIAN__
61724 __ai int32_t vqdmulhs_s32(int32_t __p0, int32_t __p1) {
61725 int32_t __ret;
61726 __ret = (int32_t) __builtin_neon_vqdmulhs_s32(__p0, __p1);
61727 return __ret;
61728 }
61729 #else
61730 __ai int32_t vqdmulhs_s32(int32_t __p0, int32_t __p1) {
61731 int32_t __ret;
61732 __ret = (int32_t) __builtin_neon_vqdmulhs_s32(__p0, __p1);
61733 return __ret;
61734 }
61735 __ai int32_t __noswap_vqdmulhs_s32(int32_t __p0, int32_t __p1) {
61736 int32_t __ret;
61737 __ret = (int32_t) __builtin_neon_vqdmulhs_s32(__p0, __p1);
61738 return __ret;
61739 }
61740 #endif
61741
61742 #ifdef __LITTLE_ENDIAN__
61743 __ai int16_t vqdmulhh_s16(int16_t __p0, int16_t __p1) {
61744 int16_t __ret;
61745 __ret = (int16_t) __builtin_neon_vqdmulhh_s16(__p0, __p1);
61746 return __ret;
61747 }
61748 #else
61749 __ai int16_t vqdmulhh_s16(int16_t __p0, int16_t __p1) {
61750 int16_t __ret;
61751 __ret = (int16_t) __builtin_neon_vqdmulhh_s16(__p0, __p1);
61752 return __ret;
61753 }
61754 __ai int16_t __noswap_vqdmulhh_s16(int16_t __p0, int16_t __p1) {
61755 int16_t __ret;
61756 __ret = (int16_t) __builtin_neon_vqdmulhh_s16(__p0, __p1);
61757 return __ret;
61758 }
61759 #endif
61760
61761 #ifdef __LITTLE_ENDIAN__
61762 #define vqdmulhs_lane_s32(__p0_162, __p1_162, __p2_162) __extension__ ({ \
61763 int32_t __s0_162 = __p0_162; \
61764 int32x2_t __s1_162 = __p1_162; \
61765 int32_t __ret_162; \
61766 __ret_162 = vqdmulhs_s32(__s0_162, vget_lane_s32(__s1_162, __p2_162)); \
61767 __ret_162; \
61768 })
61769 #else
61770 #define vqdmulhs_lane_s32(__p0_163, __p1_163, __p2_163) __extension__ ({ \
61771 int32_t __s0_163 = __p0_163; \
61772 int32x2_t __s1_163 = __p1_163; \
61773 int32x2_t __rev1_163; __rev1_163 = __builtin_shufflevector(__s1_163, __s1_163, 1, 0); \
61774 int32_t __ret_163; \
61775 __ret_163 = __noswap_vqdmulhs_s32(__s0_163, __noswap_vget_lane_s32(__rev1_163, __p2_163)); \
61776 __ret_163; \
61777 })
61778 #endif
61779
61780 #ifdef __LITTLE_ENDIAN__
61781 #define vqdmulhh_lane_s16(__p0_164, __p1_164, __p2_164) __extension__ ({ \
61782 int16_t __s0_164 = __p0_164; \
61783 int16x4_t __s1_164 = __p1_164; \
61784 int16_t __ret_164; \
61785 __ret_164 = vqdmulhh_s16(__s0_164, vget_lane_s16(__s1_164, __p2_164)); \
61786 __ret_164; \
61787 })
61788 #else
61789 #define vqdmulhh_lane_s16(__p0_165, __p1_165, __p2_165) __extension__ ({ \
61790 int16_t __s0_165 = __p0_165; \
61791 int16x4_t __s1_165 = __p1_165; \
61792 int16x4_t __rev1_165; __rev1_165 = __builtin_shufflevector(__s1_165, __s1_165, 3, 2, 1, 0); \
61793 int16_t __ret_165; \
61794 __ret_165 = __noswap_vqdmulhh_s16(__s0_165, __noswap_vget_lane_s16(__rev1_165, __p2_165)); \
61795 __ret_165; \
61796 })
61797 #endif
61798
61799 #ifdef __LITTLE_ENDIAN__
61800 #define vqdmulhs_laneq_s32(__p0_166, __p1_166, __p2_166) __extension__ ({ \
61801 int32_t __s0_166 = __p0_166; \
61802 int32x4_t __s1_166 = __p1_166; \
61803 int32_t __ret_166; \
61804 __ret_166 = vqdmulhs_s32(__s0_166, vgetq_lane_s32(__s1_166, __p2_166)); \
61805 __ret_166; \
61806 })
61807 #else
61808 #define vqdmulhs_laneq_s32(__p0_167, __p1_167, __p2_167) __extension__ ({ \
61809 int32_t __s0_167 = __p0_167; \
61810 int32x4_t __s1_167 = __p1_167; \
61811 int32x4_t __rev1_167; __rev1_167 = __builtin_shufflevector(__s1_167, __s1_167, 3, 2, 1, 0); \
61812 int32_t __ret_167; \
61813 __ret_167 = __noswap_vqdmulhs_s32(__s0_167, __noswap_vgetq_lane_s32(__rev1_167, __p2_167)); \
61814 __ret_167; \
61815 })
61816 #endif
61817
61818 #ifdef __LITTLE_ENDIAN__
61819 #define vqdmulhh_laneq_s16(__p0_168, __p1_168, __p2_168) __extension__ ({ \
61820 int16_t __s0_168 = __p0_168; \
61821 int16x8_t __s1_168 = __p1_168; \
61822 int16_t __ret_168; \
61823 __ret_168 = vqdmulhh_s16(__s0_168, vgetq_lane_s16(__s1_168, __p2_168)); \
61824 __ret_168; \
61825 })
61826 #else
61827 #define vqdmulhh_laneq_s16(__p0_169, __p1_169, __p2_169) __extension__ ({ \
61828 int16_t __s0_169 = __p0_169; \
61829 int16x8_t __s1_169 = __p1_169; \
61830 int16x8_t __rev1_169; __rev1_169 = __builtin_shufflevector(__s1_169, __s1_169, 7, 6, 5, 4, 3, 2, 1, 0); \
61831 int16_t __ret_169; \
61832 __ret_169 = __noswap_vqdmulhh_s16(__s0_169, __noswap_vgetq_lane_s16(__rev1_169, __p2_169)); \
61833 __ret_169; \
61834 })
61835 #endif
61836
61837 #ifdef __LITTLE_ENDIAN__
61838 #define vqdmulhq_laneq_s32(__p0, __p1, __p2) __extension__ ({ \
61839 int32x4_t __s0 = __p0; \
61840 int32x4_t __s1 = __p1; \
61841 int32x4_t __ret; \
61842 __ret = vqdmulhq_s32(__s0, __builtin_shufflevector(__s1, __s1, __p2, __p2, __p2, __p2)); \
61843 __ret; \
61844 })
61845 #else
61846 #define vqdmulhq_laneq_s32(__p0, __p1, __p2) __extension__ ({ \
61847 int32x4_t __s0 = __p0; \
61848 int32x4_t __s1 = __p1; \
61849 int32x4_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \
61850 int32x4_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 3, 2, 1, 0); \
61851 int32x4_t __ret; \
61852 __ret = __noswap_vqdmulhq_s32(__rev0, __builtin_shufflevector(__rev1, __rev1, __p2, __p2, __p2, __p2)); \
61853 __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \
61854 __ret; \
61855 })
61856 #endif
61857
61858 #ifdef __LITTLE_ENDIAN__
61859 #define vqdmulhq_laneq_s16(__p0, __p1, __p2) __extension__ ({ \
61860 int16x8_t __s0 = __p0; \
61861 int16x8_t __s1 = __p1; \
61862 int16x8_t __ret; \
61863 __ret = vqdmulhq_s16(__s0, __builtin_shufflevector(__s1, __s1, __p2, __p2, __p2, __p2, __p2, __p2, __p2, __p2)); \
61864 __ret; \
61865 })
61866 #else
61867 #define vqdmulhq_laneq_s16(__p0, __p1, __p2) __extension__ ({ \
61868 int16x8_t __s0 = __p0; \
61869 int16x8_t __s1 = __p1; \
61870 int16x8_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 7, 6, 5, 4, 3, 2, 1, 0); \
61871 int16x8_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 7, 6, 5, 4, 3, 2, 1, 0); \
61872 int16x8_t __ret; \
61873 __ret = __noswap_vqdmulhq_s16(__rev0, __builtin_shufflevector(__rev1, __rev1, __p2, __p2, __p2, __p2, __p2, __p2, __p2, __p2)); \
61874 __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); \
61875 __ret; \
61876 })
61877 #endif
61878
61879 #ifdef __LITTLE_ENDIAN__
61880 #define vqdmulh_laneq_s32(__p0, __p1, __p2) __extension__ ({ \
61881 int32x2_t __s0 = __p0; \
61882 int32x4_t __s1 = __p1; \
61883 int32x2_t __ret; \
61884 __ret = vqdmulh_s32(__s0, __builtin_shufflevector(__s1, __s1, __p2, __p2)); \
61885 __ret; \
61886 })
61887 #else
61888 #define vqdmulh_laneq_s32(__p0, __p1, __p2) __extension__ ({ \
61889 int32x2_t __s0 = __p0; \
61890 int32x4_t __s1 = __p1; \
61891 int32x2_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 1, 0); \
61892 int32x4_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 3, 2, 1, 0); \
61893 int32x2_t __ret; \
61894 __ret = __noswap_vqdmulh_s32(__rev0, __builtin_shufflevector(__rev1, __rev1, __p2, __p2)); \
61895 __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \
61896 __ret; \
61897 })
61898 #endif
61899
61900 #ifdef __LITTLE_ENDIAN__
61901 #define vqdmulh_laneq_s16(__p0, __p1, __p2) __extension__ ({ \
61902 int16x4_t __s0 = __p0; \
61903 int16x8_t __s1 = __p1; \
61904 int16x4_t __ret; \
61905 __ret = vqdmulh_s16(__s0, __builtin_shufflevector(__s1, __s1, __p2, __p2, __p2, __p2)); \
61906 __ret; \
61907 })
61908 #else
61909 #define vqdmulh_laneq_s16(__p0, __p1, __p2) __extension__ ({ \
61910 int16x4_t __s0 = __p0; \
61911 int16x8_t __s1 = __p1; \
61912 int16x4_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \
61913 int16x8_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 7, 6, 5, 4, 3, 2, 1, 0); \
61914 int16x4_t __ret; \
61915 __ret = __noswap_vqdmulh_s16(__rev0, __builtin_shufflevector(__rev1, __rev1, __p2, __p2, __p2, __p2)); \
61916 __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \
61917 __ret; \
61918 })
61919 #endif
61920
61921 #ifdef __LITTLE_ENDIAN__
61922 __ai int64_t vqdmulls_s32(int32_t __p0, int32_t __p1) {
61923 int64_t __ret;
61924 __ret = (int64_t) __builtin_neon_vqdmulls_s32(__p0, __p1);
61925 return __ret;
61926 }
61927 #else
61928 __ai int64_t vqdmulls_s32(int32_t __p0, int32_t __p1) {
61929 int64_t __ret;
61930 __ret = (int64_t) __builtin_neon_vqdmulls_s32(__p0, __p1);
61931 return __ret;
61932 }
61933 __ai int64_t __noswap_vqdmulls_s32(int32_t __p0, int32_t __p1) {
61934 int64_t __ret;
61935 __ret = (int64_t) __builtin_neon_vqdmulls_s32(__p0, __p1);
61936 return __ret;
61937 }
61938 #endif
61939
61940 #ifdef __LITTLE_ENDIAN__
61941 __ai int32_t vqdmullh_s16(int16_t __p0, int16_t __p1) {
61942 int32_t __ret;
61943 __ret = (int32_t) __builtin_neon_vqdmullh_s16(__p0, __p1);
61944 return __ret;
61945 }
61946 #else
61947 __ai int32_t vqdmullh_s16(int16_t __p0, int16_t __p1) {
61948 int32_t __ret;
61949 __ret = (int32_t) __builtin_neon_vqdmullh_s16(__p0, __p1);
61950 return __ret;
61951 }
61952 __ai int32_t __noswap_vqdmullh_s16(int16_t __p0, int16_t __p1) {
61953 int32_t __ret;
61954 __ret = (int32_t) __builtin_neon_vqdmullh_s16(__p0, __p1);
61955 return __ret;
61956 }
61957 #endif
61958
61959 #ifdef __LITTLE_ENDIAN__
61960 __ai int64x2_t vqdmull_high_s32(int32x4_t __p0, int32x4_t __p1) {
61961 int64x2_t __ret;
61962 __ret = vqdmull_s32(vget_high_s32(__p0), vget_high_s32(__p1));
61963 return __ret;
61964 }
61965 #else
61966 __ai int64x2_t vqdmull_high_s32(int32x4_t __p0, int32x4_t __p1) {
61967 int32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
61968 int32x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
61969 int64x2_t __ret;
61970 __ret = __noswap_vqdmull_s32(__noswap_vget_high_s32(__rev0), __noswap_vget_high_s32(__rev1));
61971 __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
61972 return __ret;
61973 }
61974 #endif
61975
61976 #ifdef __LITTLE_ENDIAN__
61977 __ai int32x4_t vqdmull_high_s16(int16x8_t __p0, int16x8_t __p1) {
61978 int32x4_t __ret;
61979 __ret = vqdmull_s16(vget_high_s16(__p0), vget_high_s16(__p1));
61980 return __ret;
61981 }
61982 #else
61983 __ai int32x4_t vqdmull_high_s16(int16x8_t __p0, int16x8_t __p1) {
61984 int16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
61985 int16x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
61986 int32x4_t __ret;
61987 __ret = __noswap_vqdmull_s16(__noswap_vget_high_s16(__rev0), __noswap_vget_high_s16(__rev1));
61988 __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
61989 return __ret;
61990 }
61991 #endif
61992
61993 #ifdef __LITTLE_ENDIAN__
61994 #define vqdmull_high_lane_s32(__p0, __p1, __p2) __extension__ ({ \
61995 int32x4_t __s0 = __p0; \
61996 int32x2_t __s1 = __p1; \
61997 int64x2_t __ret; \
61998 __ret = vqdmull_s32(vget_high_s32(__s0), __builtin_shufflevector(__s1, __s1, __p2, __p2)); \
61999 __ret; \
62000 })
62001 #else
62002 #define vqdmull_high_lane_s32(__p0, __p1, __p2) __extension__ ({ \
62003 int32x4_t __s0 = __p0; \
62004 int32x2_t __s1 = __p1; \
62005 int32x4_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \
62006 int32x2_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 1, 0); \
62007 int64x2_t __ret; \
62008 __ret = __noswap_vqdmull_s32(__noswap_vget_high_s32(__rev0), __builtin_shufflevector(__rev1, __rev1, __p2, __p2)); \
62009 __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \
62010 __ret; \
62011 })
62012 #endif
62013
62014 #ifdef __LITTLE_ENDIAN__
62015 #define vqdmull_high_lane_s16(__p0, __p1, __p2) __extension__ ({ \
62016 int16x8_t __s0 = __p0; \
62017 int16x4_t __s1 = __p1; \
62018 int32x4_t __ret; \
62019 __ret = vqdmull_s16(vget_high_s16(__s0), __builtin_shufflevector(__s1, __s1, __p2, __p2, __p2, __p2)); \
62020 __ret; \
62021 })
62022 #else
62023 #define vqdmull_high_lane_s16(__p0, __p1, __p2) __extension__ ({ \
62024 int16x8_t __s0 = __p0; \
62025 int16x4_t __s1 = __p1; \
62026 int16x8_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 7, 6, 5, 4, 3, 2, 1, 0); \
62027 int16x4_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 3, 2, 1, 0); \
62028 int32x4_t __ret; \
62029 __ret = __noswap_vqdmull_s16(__noswap_vget_high_s16(__rev0), __builtin_shufflevector(__rev1, __rev1, __p2, __p2, __p2, __p2)); \
62030 __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \
62031 __ret; \
62032 })
62033 #endif
62034
62035 #ifdef __LITTLE_ENDIAN__
62036 #define vqdmull_high_laneq_s32(__p0, __p1, __p2) __extension__ ({ \
62037 int32x4_t __s0 = __p0; \
62038 int32x4_t __s1 = __p1; \
62039 int64x2_t __ret; \
62040 __ret = vqdmull_s32(vget_high_s32(__s0), __builtin_shufflevector(__s1, __s1, __p2, __p2)); \
62041 __ret; \
62042 })
62043 #else
62044 #define vqdmull_high_laneq_s32(__p0, __p1, __p2) __extension__ ({ \
62045 int32x4_t __s0 = __p0; \
62046 int32x4_t __s1 = __p1; \
62047 int32x4_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \
62048 int32x4_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 3, 2, 1, 0); \
62049 int64x2_t __ret; \
62050 __ret = __noswap_vqdmull_s32(__noswap_vget_high_s32(__rev0), __builtin_shufflevector(__rev1, __rev1, __p2, __p2)); \
62051 __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \
62052 __ret; \
62053 })
62054 #endif
62055
62056 #ifdef __LITTLE_ENDIAN__
62057 #define vqdmull_high_laneq_s16(__p0, __p1, __p2) __extension__ ({ \
62058 int16x8_t __s0 = __p0; \
62059 int16x8_t __s1 = __p1; \
62060 int32x4_t __ret; \
62061 __ret = vqdmull_s16(vget_high_s16(__s0), __builtin_shufflevector(__s1, __s1, __p2, __p2, __p2, __p2)); \
62062 __ret; \
62063 })
62064 #else
62065 #define vqdmull_high_laneq_s16(__p0, __p1, __p2) __extension__ ({ \
62066 int16x8_t __s0 = __p0; \
62067 int16x8_t __s1 = __p1; \
62068 int16x8_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 7, 6, 5, 4, 3, 2, 1, 0); \
62069 int16x8_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 7, 6, 5, 4, 3, 2, 1, 0); \
62070 int32x4_t __ret; \
62071 __ret = __noswap_vqdmull_s16(__noswap_vget_high_s16(__rev0), __builtin_shufflevector(__rev1, __rev1, __p2, __p2, __p2, __p2)); \
62072 __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \
62073 __ret; \
62074 })
62075 #endif
62076
62077 #ifdef __LITTLE_ENDIAN__
62078 __ai int64x2_t vqdmull_high_n_s32(int32x4_t __p0, int32_t __p1) {
62079 int64x2_t __ret;
62080 __ret = vqdmull_n_s32(vget_high_s32(__p0), __p1);
62081 return __ret;
62082 }
62083 #else
62084 __ai int64x2_t vqdmull_high_n_s32(int32x4_t __p0, int32_t __p1) {
62085 int32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
62086 int64x2_t __ret;
62087 __ret = __noswap_vqdmull_n_s32(__noswap_vget_high_s32(__rev0), __p1);
62088 __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
62089 return __ret;
62090 }
62091 #endif
62092
62093 #ifdef __LITTLE_ENDIAN__
62094 __ai int32x4_t vqdmull_high_n_s16(int16x8_t __p0, int16_t __p1) {
62095 int32x4_t __ret;
62096 __ret = vqdmull_n_s16(vget_high_s16(__p0), __p1);
62097 return __ret;
62098 }
62099 #else
62100 __ai int32x4_t vqdmull_high_n_s16(int16x8_t __p0, int16_t __p1) {
62101 int16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
62102 int32x4_t __ret;
62103 __ret = __noswap_vqdmull_n_s16(__noswap_vget_high_s16(__rev0), __p1);
62104 __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
62105 return __ret;
62106 }
62107 #endif
62108
62109 #ifdef __LITTLE_ENDIAN__
62110 #define vqdmulls_lane_s32(__p0_170, __p1_170, __p2_170) __extension__ ({ \
62111 int32_t __s0_170 = __p0_170; \
62112 int32x2_t __s1_170 = __p1_170; \
62113 int64_t __ret_170; \
62114 __ret_170 = vqdmulls_s32(__s0_170, vget_lane_s32(__s1_170, __p2_170)); \
62115 __ret_170; \
62116 })
62117 #else
62118 #define vqdmulls_lane_s32(__p0_171, __p1_171, __p2_171) __extension__ ({ \
62119 int32_t __s0_171 = __p0_171; \
62120 int32x2_t __s1_171 = __p1_171; \
62121 int32x2_t __rev1_171; __rev1_171 = __builtin_shufflevector(__s1_171, __s1_171, 1, 0); \
62122 int64_t __ret_171; \
62123 __ret_171 = __noswap_vqdmulls_s32(__s0_171, __noswap_vget_lane_s32(__rev1_171, __p2_171)); \
62124 __ret_171; \
62125 })
62126 #endif
62127
62128 #ifdef __LITTLE_ENDIAN__
62129 #define vqdmullh_lane_s16(__p0_172, __p1_172, __p2_172) __extension__ ({ \
62130 int16_t __s0_172 = __p0_172; \
62131 int16x4_t __s1_172 = __p1_172; \
62132 int32_t __ret_172; \
62133 __ret_172 = vqdmullh_s16(__s0_172, vget_lane_s16(__s1_172, __p2_172)); \
62134 __ret_172; \
62135 })
62136 #else
62137 #define vqdmullh_lane_s16(__p0_173, __p1_173, __p2_173) __extension__ ({ \
62138 int16_t __s0_173 = __p0_173; \
62139 int16x4_t __s1_173 = __p1_173; \
62140 int16x4_t __rev1_173; __rev1_173 = __builtin_shufflevector(__s1_173, __s1_173, 3, 2, 1, 0); \
62141 int32_t __ret_173; \
62142 __ret_173 = __noswap_vqdmullh_s16(__s0_173, __noswap_vget_lane_s16(__rev1_173, __p2_173)); \
62143 __ret_173; \
62144 })
62145 #endif
62146
62147 #ifdef __LITTLE_ENDIAN__
62148 #define vqdmulls_laneq_s32(__p0_174, __p1_174, __p2_174) __extension__ ({ \
62149 int32_t __s0_174 = __p0_174; \
62150 int32x4_t __s1_174 = __p1_174; \
62151 int64_t __ret_174; \
62152 __ret_174 = vqdmulls_s32(__s0_174, vgetq_lane_s32(__s1_174, __p2_174)); \
62153 __ret_174; \
62154 })
62155 #else
62156 #define vqdmulls_laneq_s32(__p0_175, __p1_175, __p2_175) __extension__ ({ \
62157 int32_t __s0_175 = __p0_175; \
62158 int32x4_t __s1_175 = __p1_175; \
62159 int32x4_t __rev1_175; __rev1_175 = __builtin_shufflevector(__s1_175, __s1_175, 3, 2, 1, 0); \
62160 int64_t __ret_175; \
62161 __ret_175 = __noswap_vqdmulls_s32(__s0_175, __noswap_vgetq_lane_s32(__rev1_175, __p2_175)); \
62162 __ret_175; \
62163 })
62164 #endif
62165
62166 #ifdef __LITTLE_ENDIAN__
62167 #define vqdmullh_laneq_s16(__p0_176, __p1_176, __p2_176) __extension__ ({ \
62168 int16_t __s0_176 = __p0_176; \
62169 int16x8_t __s1_176 = __p1_176; \
62170 int32_t __ret_176; \
62171 __ret_176 = vqdmullh_s16(__s0_176, vgetq_lane_s16(__s1_176, __p2_176)); \
62172 __ret_176; \
62173 })
62174 #else
62175 #define vqdmullh_laneq_s16(__p0_177, __p1_177, __p2_177) __extension__ ({ \
62176 int16_t __s0_177 = __p0_177; \
62177 int16x8_t __s1_177 = __p1_177; \
62178 int16x8_t __rev1_177; __rev1_177 = __builtin_shufflevector(__s1_177, __s1_177, 7, 6, 5, 4, 3, 2, 1, 0); \
62179 int32_t __ret_177; \
62180 __ret_177 = __noswap_vqdmullh_s16(__s0_177, __noswap_vgetq_lane_s16(__rev1_177, __p2_177)); \
62181 __ret_177; \
62182 })
62183 #endif
62184
62185 #ifdef __LITTLE_ENDIAN__
62186 #define vqdmull_laneq_s32(__p0, __p1, __p2) __extension__ ({ \
62187 int32x2_t __s0 = __p0; \
62188 int32x4_t __s1 = __p1; \
62189 int64x2_t __ret; \
62190 __ret = vqdmull_s32(__s0, __builtin_shufflevector(__s1, __s1, __p2, __p2)); \
62191 __ret; \
62192 })
62193 #else
62194 #define vqdmull_laneq_s32(__p0, __p1, __p2) __extension__ ({ \
62195 int32x2_t __s0 = __p0; \
62196 int32x4_t __s1 = __p1; \
62197 int32x2_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 1, 0); \
62198 int32x4_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 3, 2, 1, 0); \
62199 int64x2_t __ret; \
62200 __ret = __noswap_vqdmull_s32(__rev0, __builtin_shufflevector(__rev1, __rev1, __p2, __p2)); \
62201 __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \
62202 __ret; \
62203 })
62204 #endif
62205
62206 #ifdef __LITTLE_ENDIAN__
62207 #define vqdmull_laneq_s16(__p0, __p1, __p2) __extension__ ({ \
62208 int16x4_t __s0 = __p0; \
62209 int16x8_t __s1 = __p1; \
62210 int32x4_t __ret; \
62211 __ret = vqdmull_s16(__s0, __builtin_shufflevector(__s1, __s1, __p2, __p2, __p2, __p2)); \
62212 __ret; \
62213 })
62214 #else
62215 #define vqdmull_laneq_s16(__p0, __p1, __p2) __extension__ ({ \
62216 int16x4_t __s0 = __p0; \
62217 int16x8_t __s1 = __p1; \
62218 int16x4_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \
62219 int16x8_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 7, 6, 5, 4, 3, 2, 1, 0); \
62220 int32x4_t __ret; \
62221 __ret = __noswap_vqdmull_s16(__rev0, __builtin_shufflevector(__rev1, __rev1, __p2, __p2, __p2, __p2)); \
62222 __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \
62223 __ret; \
62224 })
62225 #endif
62226
62227 #ifdef __LITTLE_ENDIAN__
62228 __ai int16_t vqmovns_s32(int32_t __p0) {
62229 int16_t __ret;
62230 __ret = (int16_t) __builtin_neon_vqmovns_s32(__p0);
62231 return __ret;
62232 }
62233 #else
62234 __ai int16_t vqmovns_s32(int32_t __p0) {
62235 int16_t __ret;
62236 __ret = (int16_t) __builtin_neon_vqmovns_s32(__p0);
62237 return __ret;
62238 }
62239 #endif
62240
62241 #ifdef __LITTLE_ENDIAN__
62242 __ai int32_t vqmovnd_s64(int64_t __p0) {
62243 int32_t __ret;
62244 __ret = (int32_t) __builtin_neon_vqmovnd_s64(__p0);
62245 return __ret;
62246 }
62247 #else
62248 __ai int32_t vqmovnd_s64(int64_t __p0) {
62249 int32_t __ret;
62250 __ret = (int32_t) __builtin_neon_vqmovnd_s64(__p0);
62251 return __ret;
62252 }
62253 #endif
62254
62255 #ifdef __LITTLE_ENDIAN__
62256 __ai int8_t vqmovnh_s16(int16_t __p0) {
62257 int8_t __ret;
62258 __ret = (int8_t) __builtin_neon_vqmovnh_s16(__p0);
62259 return __ret;
62260 }
62261 #else
62262 __ai int8_t vqmovnh_s16(int16_t __p0) {
62263 int8_t __ret;
62264 __ret = (int8_t) __builtin_neon_vqmovnh_s16(__p0);
62265 return __ret;
62266 }
62267 #endif
62268
62269 #ifdef __LITTLE_ENDIAN__
62270 __ai uint16_t vqmovns_u32(uint32_t __p0) {
62271 uint16_t __ret;
62272 __ret = (uint16_t) __builtin_neon_vqmovns_u32(__p0);
62273 return __ret;
62274 }
62275 #else
62276 __ai uint16_t vqmovns_u32(uint32_t __p0) {
62277 uint16_t __ret;
62278 __ret = (uint16_t) __builtin_neon_vqmovns_u32(__p0);
62279 return __ret;
62280 }
62281 #endif
62282
62283 #ifdef __LITTLE_ENDIAN__
62284 __ai uint32_t vqmovnd_u64(uint64_t __p0) {
62285 uint32_t __ret;
62286 __ret = (uint32_t) __builtin_neon_vqmovnd_u64(__p0);
62287 return __ret;
62288 }
62289 #else
62290 __ai uint32_t vqmovnd_u64(uint64_t __p0) {
62291 uint32_t __ret;
62292 __ret = (uint32_t) __builtin_neon_vqmovnd_u64(__p0);
62293 return __ret;
62294 }
62295 #endif
62296
62297 #ifdef __LITTLE_ENDIAN__
62298 __ai uint8_t vqmovnh_u16(uint16_t __p0) {
62299 uint8_t __ret;
62300 __ret = (uint8_t) __builtin_neon_vqmovnh_u16(__p0);
62301 return __ret;
62302 }
62303 #else
62304 __ai uint8_t vqmovnh_u16(uint16_t __p0) {
62305 uint8_t __ret;
62306 __ret = (uint8_t) __builtin_neon_vqmovnh_u16(__p0);
62307 return __ret;
62308 }
62309 #endif
62310
62311 #ifdef __LITTLE_ENDIAN__
62312 __ai uint16x8_t vqmovn_high_u32(uint16x4_t __p0, uint32x4_t __p1) {
62313 uint16x8_t __ret;
62314 __ret = vcombine_u16(__p0, vqmovn_u32(__p1));
62315 return __ret;
62316 }
62317 #else
62318 __ai uint16x8_t vqmovn_high_u32(uint16x4_t __p0, uint32x4_t __p1) {
62319 uint16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
62320 uint32x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
62321 uint16x8_t __ret;
62322 __ret = __noswap_vcombine_u16(__rev0, __noswap_vqmovn_u32(__rev1));
62323 __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
62324 return __ret;
62325 }
62326 #endif
62327
62328 #ifdef __LITTLE_ENDIAN__
62329 __ai uint32x4_t vqmovn_high_u64(uint32x2_t __p0, uint64x2_t __p1) {
62330 uint32x4_t __ret;
62331 __ret = vcombine_u32(__p0, vqmovn_u64(__p1));
62332 return __ret;
62333 }
62334 #else
62335 __ai uint32x4_t vqmovn_high_u64(uint32x2_t __p0, uint64x2_t __p1) {
62336 uint32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
62337 uint64x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
62338 uint32x4_t __ret;
62339 __ret = __noswap_vcombine_u32(__rev0, __noswap_vqmovn_u64(__rev1));
62340 __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
62341 return __ret;
62342 }
62343 #endif
62344
62345 #ifdef __LITTLE_ENDIAN__
62346 __ai uint8x16_t vqmovn_high_u16(uint8x8_t __p0, uint16x8_t __p1) {
62347 uint8x16_t __ret;
62348 __ret = vcombine_u8(__p0, vqmovn_u16(__p1));
62349 return __ret;
62350 }
62351 #else
62352 __ai uint8x16_t vqmovn_high_u16(uint8x8_t __p0, uint16x8_t __p1) {
62353 uint8x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
62354 uint16x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
62355 uint8x16_t __ret;
62356 __ret = __noswap_vcombine_u8(__rev0, __noswap_vqmovn_u16(__rev1));
62357 __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
62358 return __ret;
62359 }
62360 #endif
62361
62362 #ifdef __LITTLE_ENDIAN__
62363 __ai int16x8_t vqmovn_high_s32(int16x4_t __p0, int32x4_t __p1) {
62364 int16x8_t __ret;
62365 __ret = vcombine_s16(__p0, vqmovn_s32(__p1));
62366 return __ret;
62367 }
62368 #else
62369 __ai int16x8_t vqmovn_high_s32(int16x4_t __p0, int32x4_t __p1) {
62370 int16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
62371 int32x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
62372 int16x8_t __ret;
62373 __ret = __noswap_vcombine_s16(__rev0, __noswap_vqmovn_s32(__rev1));
62374 __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
62375 return __ret;
62376 }
62377 #endif
62378
62379 #ifdef __LITTLE_ENDIAN__
62380 __ai int32x4_t vqmovn_high_s64(int32x2_t __p0, int64x2_t __p1) {
62381 int32x4_t __ret;
62382 __ret = vcombine_s32(__p0, vqmovn_s64(__p1));
62383 return __ret;
62384 }
62385 #else
62386 __ai int32x4_t vqmovn_high_s64(int32x2_t __p0, int64x2_t __p1) {
62387 int32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
62388 int64x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
62389 int32x4_t __ret;
62390 __ret = __noswap_vcombine_s32(__rev0, __noswap_vqmovn_s64(__rev1));
62391 __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
62392 return __ret;
62393 }
62394 #endif
62395
62396 #ifdef __LITTLE_ENDIAN__
62397 __ai int8x16_t vqmovn_high_s16(int8x8_t __p0, int16x8_t __p1) {
62398 int8x16_t __ret;
62399 __ret = vcombine_s8(__p0, vqmovn_s16(__p1));
62400 return __ret;
62401 }
62402 #else
62403 __ai int8x16_t vqmovn_high_s16(int8x8_t __p0, int16x8_t __p1) {
62404 int8x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
62405 int16x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
62406 int8x16_t __ret;
62407 __ret = __noswap_vcombine_s8(__rev0, __noswap_vqmovn_s16(__rev1));
62408 __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
62409 return __ret;
62410 }
62411 #endif
62412
62413 #ifdef __LITTLE_ENDIAN__
62414 __ai int16_t vqmovuns_s32(int32_t __p0) {
62415 int16_t __ret;
62416 __ret = (int16_t) __builtin_neon_vqmovuns_s32(__p0);
62417 return __ret;
62418 }
62419 #else
62420 __ai int16_t vqmovuns_s32(int32_t __p0) {
62421 int16_t __ret;
62422 __ret = (int16_t) __builtin_neon_vqmovuns_s32(__p0);
62423 return __ret;
62424 }
62425 #endif
62426
62427 #ifdef __LITTLE_ENDIAN__
62428 __ai int32_t vqmovund_s64(int64_t __p0) {
62429 int32_t __ret;
62430 __ret = (int32_t) __builtin_neon_vqmovund_s64(__p0);
62431 return __ret;
62432 }
62433 #else
62434 __ai int32_t vqmovund_s64(int64_t __p0) {
62435 int32_t __ret;
62436 __ret = (int32_t) __builtin_neon_vqmovund_s64(__p0);
62437 return __ret;
62438 }
62439 #endif
62440
62441 #ifdef __LITTLE_ENDIAN__
62442 __ai int8_t vqmovunh_s16(int16_t __p0) {
62443 int8_t __ret;
62444 __ret = (int8_t) __builtin_neon_vqmovunh_s16(__p0);
62445 return __ret;
62446 }
62447 #else
62448 __ai int8_t vqmovunh_s16(int16_t __p0) {
62449 int8_t __ret;
62450 __ret = (int8_t) __builtin_neon_vqmovunh_s16(__p0);
62451 return __ret;
62452 }
62453 #endif
62454
62455 #ifdef __LITTLE_ENDIAN__
62456 __ai uint16x8_t vqmovun_high_s32(int16x4_t __p0, int32x4_t __p1) {
62457 uint16x8_t __ret;
62458 __ret = vcombine_u16((uint16x4_t)(__p0), vqmovun_s32(__p1));
62459 return __ret;
62460 }
62461 #else
62462 __ai uint16x8_t vqmovun_high_s32(int16x4_t __p0, int32x4_t __p1) {
62463 int16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
62464 int32x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
62465 uint16x8_t __ret;
62466 __ret = __noswap_vcombine_u16((uint16x4_t)(__rev0), __noswap_vqmovun_s32(__rev1));
62467 __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
62468 return __ret;
62469 }
62470 #endif
62471
62472 #ifdef __LITTLE_ENDIAN__
62473 __ai uint32x4_t vqmovun_high_s64(int32x2_t __p0, int64x2_t __p1) {
62474 uint32x4_t __ret;
62475 __ret = vcombine_u32((uint32x2_t)(__p0), vqmovun_s64(__p1));
62476 return __ret;
62477 }
62478 #else
62479 __ai uint32x4_t vqmovun_high_s64(int32x2_t __p0, int64x2_t __p1) {
62480 int32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
62481 int64x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
62482 uint32x4_t __ret;
62483 __ret = __noswap_vcombine_u32((uint32x2_t)(__rev0), __noswap_vqmovun_s64(__rev1));
62484 __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
62485 return __ret;
62486 }
62487 #endif
62488
62489 #ifdef __LITTLE_ENDIAN__
62490 __ai uint8x16_t vqmovun_high_s16(int8x8_t __p0, int16x8_t __p1) {
62491 uint8x16_t __ret;
62492 __ret = vcombine_u8((uint8x8_t)(__p0), vqmovun_s16(__p1));
62493 return __ret;
62494 }
62495 #else
62496 __ai uint8x16_t vqmovun_high_s16(int8x8_t __p0, int16x8_t __p1) {
62497 int8x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
62498 int16x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
62499 uint8x16_t __ret;
62500 __ret = __noswap_vcombine_u8((uint8x8_t)(__rev0), __noswap_vqmovun_s16(__rev1));
62501 __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
62502 return __ret;
62503 }
62504 #endif
62505
62506 #ifdef __LITTLE_ENDIAN__
62507 __ai int64x2_t vqnegq_s64(int64x2_t __p0) {
62508 int64x2_t __ret;
62509 __ret = (int64x2_t) __builtin_neon_vqnegq_v((int8x16_t)__p0, 35);
62510 return __ret;
62511 }
62512 #else
62513 __ai int64x2_t vqnegq_s64(int64x2_t __p0) {
62514 int64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
62515 int64x2_t __ret;
62516 __ret = (int64x2_t) __builtin_neon_vqnegq_v((int8x16_t)__rev0, 35);
62517 __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
62518 return __ret;
62519 }
62520 #endif
62521
62522 #ifdef __LITTLE_ENDIAN__
62523 __ai int64x1_t vqneg_s64(int64x1_t __p0) {
62524 int64x1_t __ret;
62525 __ret = (int64x1_t) __builtin_neon_vqneg_v((int8x8_t)__p0, 3);
62526 return __ret;
62527 }
62528 #else
62529 __ai int64x1_t vqneg_s64(int64x1_t __p0) {
62530 int64x1_t __ret;
62531 __ret = (int64x1_t) __builtin_neon_vqneg_v((int8x8_t)__p0, 3);
62532 return __ret;
62533 }
62534 #endif
62535
62536 #ifdef __LITTLE_ENDIAN__
62537 __ai int8_t vqnegb_s8(int8_t __p0) {
62538 int8_t __ret;
62539 __ret = (int8_t) __builtin_neon_vqnegb_s8(__p0);
62540 return __ret;
62541 }
62542 #else
62543 __ai int8_t vqnegb_s8(int8_t __p0) {
62544 int8_t __ret;
62545 __ret = (int8_t) __builtin_neon_vqnegb_s8(__p0);
62546 return __ret;
62547 }
62548 #endif
62549
62550 #ifdef __LITTLE_ENDIAN__
62551 __ai int32_t vqnegs_s32(int32_t __p0) {
62552 int32_t __ret;
62553 __ret = (int32_t) __builtin_neon_vqnegs_s32(__p0);
62554 return __ret;
62555 }
62556 #else
62557 __ai int32_t vqnegs_s32(int32_t __p0) {
62558 int32_t __ret;
62559 __ret = (int32_t) __builtin_neon_vqnegs_s32(__p0);
62560 return __ret;
62561 }
62562 #endif
62563
62564 #ifdef __LITTLE_ENDIAN__
62565 __ai int64_t vqnegd_s64(int64_t __p0) {
62566 int64_t __ret;
62567 __ret = (int64_t) __builtin_neon_vqnegd_s64(__p0);
62568 return __ret;
62569 }
62570 #else
62571 __ai int64_t vqnegd_s64(int64_t __p0) {
62572 int64_t __ret;
62573 __ret = (int64_t) __builtin_neon_vqnegd_s64(__p0);
62574 return __ret;
62575 }
62576 #endif
62577
62578 #ifdef __LITTLE_ENDIAN__
62579 __ai int16_t vqnegh_s16(int16_t __p0) {
62580 int16_t __ret;
62581 __ret = (int16_t) __builtin_neon_vqnegh_s16(__p0);
62582 return __ret;
62583 }
62584 #else
62585 __ai int16_t vqnegh_s16(int16_t __p0) {
62586 int16_t __ret;
62587 __ret = (int16_t) __builtin_neon_vqnegh_s16(__p0);
62588 return __ret;
62589 }
62590 #endif
62591
62592 #ifdef __LITTLE_ENDIAN__
62593 __ai int32_t vqrdmulhs_s32(int32_t __p0, int32_t __p1) {
62594 int32_t __ret;
62595 __ret = (int32_t) __builtin_neon_vqrdmulhs_s32(__p0, __p1);
62596 return __ret;
62597 }
62598 #else
62599 __ai int32_t vqrdmulhs_s32(int32_t __p0, int32_t __p1) {
62600 int32_t __ret;
62601 __ret = (int32_t) __builtin_neon_vqrdmulhs_s32(__p0, __p1);
62602 return __ret;
62603 }
62604 __ai int32_t __noswap_vqrdmulhs_s32(int32_t __p0, int32_t __p1) {
62605 int32_t __ret;
62606 __ret = (int32_t) __builtin_neon_vqrdmulhs_s32(__p0, __p1);
62607 return __ret;
62608 }
62609 #endif
62610
62611 #ifdef __LITTLE_ENDIAN__
62612 __ai int16_t vqrdmulhh_s16(int16_t __p0, int16_t __p1) {
62613 int16_t __ret;
62614 __ret = (int16_t) __builtin_neon_vqrdmulhh_s16(__p0, __p1);
62615 return __ret;
62616 }
62617 #else
62618 __ai int16_t vqrdmulhh_s16(int16_t __p0, int16_t __p1) {
62619 int16_t __ret;
62620 __ret = (int16_t) __builtin_neon_vqrdmulhh_s16(__p0, __p1);
62621 return __ret;
62622 }
62623 __ai int16_t __noswap_vqrdmulhh_s16(int16_t __p0, int16_t __p1) {
62624 int16_t __ret;
62625 __ret = (int16_t) __builtin_neon_vqrdmulhh_s16(__p0, __p1);
62626 return __ret;
62627 }
62628 #endif
62629
62630 #ifdef __LITTLE_ENDIAN__
62631 #define vqrdmulhs_lane_s32(__p0_178, __p1_178, __p2_178) __extension__ ({ \
62632 int32_t __s0_178 = __p0_178; \
62633 int32x2_t __s1_178 = __p1_178; \
62634 int32_t __ret_178; \
62635 __ret_178 = vqrdmulhs_s32(__s0_178, vget_lane_s32(__s1_178, __p2_178)); \
62636 __ret_178; \
62637 })
62638 #else
62639 #define vqrdmulhs_lane_s32(__p0_179, __p1_179, __p2_179) __extension__ ({ \
62640 int32_t __s0_179 = __p0_179; \
62641 int32x2_t __s1_179 = __p1_179; \
62642 int32x2_t __rev1_179; __rev1_179 = __builtin_shufflevector(__s1_179, __s1_179, 1, 0); \
62643 int32_t __ret_179; \
62644 __ret_179 = __noswap_vqrdmulhs_s32(__s0_179, __noswap_vget_lane_s32(__rev1_179, __p2_179)); \
62645 __ret_179; \
62646 })
62647 #endif
62648
62649 #ifdef __LITTLE_ENDIAN__
62650 #define vqrdmulhh_lane_s16(__p0_180, __p1_180, __p2_180) __extension__ ({ \
62651 int16_t __s0_180 = __p0_180; \
62652 int16x4_t __s1_180 = __p1_180; \
62653 int16_t __ret_180; \
62654 __ret_180 = vqrdmulhh_s16(__s0_180, vget_lane_s16(__s1_180, __p2_180)); \
62655 __ret_180; \
62656 })
62657 #else
62658 #define vqrdmulhh_lane_s16(__p0_181, __p1_181, __p2_181) __extension__ ({ \
62659 int16_t __s0_181 = __p0_181; \
62660 int16x4_t __s1_181 = __p1_181; \
62661 int16x4_t __rev1_181; __rev1_181 = __builtin_shufflevector(__s1_181, __s1_181, 3, 2, 1, 0); \
62662 int16_t __ret_181; \
62663 __ret_181 = __noswap_vqrdmulhh_s16(__s0_181, __noswap_vget_lane_s16(__rev1_181, __p2_181)); \
62664 __ret_181; \
62665 })
62666 #endif
62667
62668 #ifdef __LITTLE_ENDIAN__
62669 #define vqrdmulhs_laneq_s32(__p0_182, __p1_182, __p2_182) __extension__ ({ \
62670 int32_t __s0_182 = __p0_182; \
62671 int32x4_t __s1_182 = __p1_182; \
62672 int32_t __ret_182; \
62673 __ret_182 = vqrdmulhs_s32(__s0_182, vgetq_lane_s32(__s1_182, __p2_182)); \
62674 __ret_182; \
62675 })
62676 #else
62677 #define vqrdmulhs_laneq_s32(__p0_183, __p1_183, __p2_183) __extension__ ({ \
62678 int32_t __s0_183 = __p0_183; \
62679 int32x4_t __s1_183 = __p1_183; \
62680 int32x4_t __rev1_183; __rev1_183 = __builtin_shufflevector(__s1_183, __s1_183, 3, 2, 1, 0); \
62681 int32_t __ret_183; \
62682 __ret_183 = __noswap_vqrdmulhs_s32(__s0_183, __noswap_vgetq_lane_s32(__rev1_183, __p2_183)); \
62683 __ret_183; \
62684 })
62685 #endif
62686
62687 #ifdef __LITTLE_ENDIAN__
62688 #define vqrdmulhh_laneq_s16(__p0_184, __p1_184, __p2_184) __extension__ ({ \
62689 int16_t __s0_184 = __p0_184; \
62690 int16x8_t __s1_184 = __p1_184; \
62691 int16_t __ret_184; \
62692 __ret_184 = vqrdmulhh_s16(__s0_184, vgetq_lane_s16(__s1_184, __p2_184)); \
62693 __ret_184; \
62694 })
62695 #else
62696 #define vqrdmulhh_laneq_s16(__p0_185, __p1_185, __p2_185) __extension__ ({ \
62697 int16_t __s0_185 = __p0_185; \
62698 int16x8_t __s1_185 = __p1_185; \
62699 int16x8_t __rev1_185; __rev1_185 = __builtin_shufflevector(__s1_185, __s1_185, 7, 6, 5, 4, 3, 2, 1, 0); \
62700 int16_t __ret_185; \
62701 __ret_185 = __noswap_vqrdmulhh_s16(__s0_185, __noswap_vgetq_lane_s16(__rev1_185, __p2_185)); \
62702 __ret_185; \
62703 })
62704 #endif
62705
62706 #ifdef __LITTLE_ENDIAN__
62707 #define vqrdmulhq_laneq_s32(__p0, __p1, __p2) __extension__ ({ \
62708 int32x4_t __s0 = __p0; \
62709 int32x4_t __s1 = __p1; \
62710 int32x4_t __ret; \
62711 __ret = vqrdmulhq_s32(__s0, __builtin_shufflevector(__s1, __s1, __p2, __p2, __p2, __p2)); \
62712 __ret; \
62713 })
62714 #else
62715 #define vqrdmulhq_laneq_s32(__p0, __p1, __p2) __extension__ ({ \
62716 int32x4_t __s0 = __p0; \
62717 int32x4_t __s1 = __p1; \
62718 int32x4_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \
62719 int32x4_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 3, 2, 1, 0); \
62720 int32x4_t __ret; \
62721 __ret = __noswap_vqrdmulhq_s32(__rev0, __builtin_shufflevector(__rev1, __rev1, __p2, __p2, __p2, __p2)); \
62722 __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \
62723 __ret; \
62724 })
62725 #endif
62726
62727 #ifdef __LITTLE_ENDIAN__
62728 #define vqrdmulhq_laneq_s16(__p0, __p1, __p2) __extension__ ({ \
62729 int16x8_t __s0 = __p0; \
62730 int16x8_t __s1 = __p1; \
62731 int16x8_t __ret; \
62732 __ret = vqrdmulhq_s16(__s0, __builtin_shufflevector(__s1, __s1, __p2, __p2, __p2, __p2, __p2, __p2, __p2, __p2)); \
62733 __ret; \
62734 })
62735 #else
62736 #define vqrdmulhq_laneq_s16(__p0, __p1, __p2) __extension__ ({ \
62737 int16x8_t __s0 = __p0; \
62738 int16x8_t __s1 = __p1; \
62739 int16x8_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 7, 6, 5, 4, 3, 2, 1, 0); \
62740 int16x8_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 7, 6, 5, 4, 3, 2, 1, 0); \
62741 int16x8_t __ret; \
62742 __ret = __noswap_vqrdmulhq_s16(__rev0, __builtin_shufflevector(__rev1, __rev1, __p2, __p2, __p2, __p2, __p2, __p2, __p2, __p2)); \
62743 __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); \
62744 __ret; \
62745 })
62746 #endif
62747
62748 #ifdef __LITTLE_ENDIAN__
62749 #define vqrdmulh_laneq_s32(__p0, __p1, __p2) __extension__ ({ \
62750 int32x2_t __s0 = __p0; \
62751 int32x4_t __s1 = __p1; \
62752 int32x2_t __ret; \
62753 __ret = vqrdmulh_s32(__s0, __builtin_shufflevector(__s1, __s1, __p2, __p2)); \
62754 __ret; \
62755 })
62756 #else
62757 #define vqrdmulh_laneq_s32(__p0, __p1, __p2) __extension__ ({ \
62758 int32x2_t __s0 = __p0; \
62759 int32x4_t __s1 = __p1; \
62760 int32x2_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 1, 0); \
62761 int32x4_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 3, 2, 1, 0); \
62762 int32x2_t __ret; \
62763 __ret = __noswap_vqrdmulh_s32(__rev0, __builtin_shufflevector(__rev1, __rev1, __p2, __p2)); \
62764 __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \
62765 __ret; \
62766 })
62767 #endif
62768
62769 #ifdef __LITTLE_ENDIAN__
62770 #define vqrdmulh_laneq_s16(__p0, __p1, __p2) __extension__ ({ \
62771 int16x4_t __s0 = __p0; \
62772 int16x8_t __s1 = __p1; \
62773 int16x4_t __ret; \
62774 __ret = vqrdmulh_s16(__s0, __builtin_shufflevector(__s1, __s1, __p2, __p2, __p2, __p2)); \
62775 __ret; \
62776 })
62777 #else
62778 #define vqrdmulh_laneq_s16(__p0, __p1, __p2) __extension__ ({ \
62779 int16x4_t __s0 = __p0; \
62780 int16x8_t __s1 = __p1; \
62781 int16x4_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \
62782 int16x8_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 7, 6, 5, 4, 3, 2, 1, 0); \
62783 int16x4_t __ret; \
62784 __ret = __noswap_vqrdmulh_s16(__rev0, __builtin_shufflevector(__rev1, __rev1, __p2, __p2, __p2, __p2)); \
62785 __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \
62786 __ret; \
62787 })
62788 #endif
62789
62790 #ifdef __LITTLE_ENDIAN__
62791 __ai uint8_t vqrshlb_u8(uint8_t __p0, uint8_t __p1) {
62792 uint8_t __ret;
62793 __ret = (uint8_t) __builtin_neon_vqrshlb_u8(__p0, __p1);
62794 return __ret;
62795 }
62796 #else
62797 __ai uint8_t vqrshlb_u8(uint8_t __p0, uint8_t __p1) {
62798 uint8_t __ret;
62799 __ret = (uint8_t) __builtin_neon_vqrshlb_u8(__p0, __p1);
62800 return __ret;
62801 }
62802 #endif
62803
62804 #ifdef __LITTLE_ENDIAN__
62805 __ai uint32_t vqrshls_u32(uint32_t __p0, uint32_t __p1) {
62806 uint32_t __ret;
62807 __ret = (uint32_t) __builtin_neon_vqrshls_u32(__p0, __p1);
62808 return __ret;
62809 }
62810 #else
62811 __ai uint32_t vqrshls_u32(uint32_t __p0, uint32_t __p1) {
62812 uint32_t __ret;
62813 __ret = (uint32_t) __builtin_neon_vqrshls_u32(__p0, __p1);
62814 return __ret;
62815 }
62816 #endif
62817
62818 #ifdef __LITTLE_ENDIAN__
62819 __ai uint64_t vqrshld_u64(uint64_t __p0, uint64_t __p1) {
62820 uint64_t __ret;
62821 __ret = (uint64_t) __builtin_neon_vqrshld_u64(__p0, __p1);
62822 return __ret;
62823 }
62824 #else
62825 __ai uint64_t vqrshld_u64(uint64_t __p0, uint64_t __p1) {
62826 uint64_t __ret;
62827 __ret = (uint64_t) __builtin_neon_vqrshld_u64(__p0, __p1);
62828 return __ret;
62829 }
62830 #endif
62831
62832 #ifdef __LITTLE_ENDIAN__
62833 __ai uint16_t vqrshlh_u16(uint16_t __p0, uint16_t __p1) {
62834 uint16_t __ret;
62835 __ret = (uint16_t) __builtin_neon_vqrshlh_u16(__p0, __p1);
62836 return __ret;
62837 }
62838 #else
62839 __ai uint16_t vqrshlh_u16(uint16_t __p0, uint16_t __p1) {
62840 uint16_t __ret;
62841 __ret = (uint16_t) __builtin_neon_vqrshlh_u16(__p0, __p1);
62842 return __ret;
62843 }
62844 #endif
62845
62846 #ifdef __LITTLE_ENDIAN__
62847 __ai int8_t vqrshlb_s8(int8_t __p0, int8_t __p1) {
62848 int8_t __ret;
62849 __ret = (int8_t) __builtin_neon_vqrshlb_s8(__p0, __p1);
62850 return __ret;
62851 }
62852 #else
62853 __ai int8_t vqrshlb_s8(int8_t __p0, int8_t __p1) {
62854 int8_t __ret;
62855 __ret = (int8_t) __builtin_neon_vqrshlb_s8(__p0, __p1);
62856 return __ret;
62857 }
62858 #endif
62859
62860 #ifdef __LITTLE_ENDIAN__
62861 __ai int32_t vqrshls_s32(int32_t __p0, int32_t __p1) {
62862 int32_t __ret;
62863 __ret = (int32_t) __builtin_neon_vqrshls_s32(__p0, __p1);
62864 return __ret;
62865 }
62866 #else
62867 __ai int32_t vqrshls_s32(int32_t __p0, int32_t __p1) {
62868 int32_t __ret;
62869 __ret = (int32_t) __builtin_neon_vqrshls_s32(__p0, __p1);
62870 return __ret;
62871 }
62872 #endif
62873
62874 #ifdef __LITTLE_ENDIAN__
62875 __ai int64_t vqrshld_s64(int64_t __p0, int64_t __p1) {
62876 int64_t __ret;
62877 __ret = (int64_t) __builtin_neon_vqrshld_s64(__p0, __p1);
62878 return __ret;
62879 }
62880 #else
62881 __ai int64_t vqrshld_s64(int64_t __p0, int64_t __p1) {
62882 int64_t __ret;
62883 __ret = (int64_t) __builtin_neon_vqrshld_s64(__p0, __p1);
62884 return __ret;
62885 }
62886 #endif
62887
62888 #ifdef __LITTLE_ENDIAN__
62889 __ai int16_t vqrshlh_s16(int16_t __p0, int16_t __p1) {
62890 int16_t __ret;
62891 __ret = (int16_t) __builtin_neon_vqrshlh_s16(__p0, __p1);
62892 return __ret;
62893 }
62894 #else
62895 __ai int16_t vqrshlh_s16(int16_t __p0, int16_t __p1) {
62896 int16_t __ret;
62897 __ret = (int16_t) __builtin_neon_vqrshlh_s16(__p0, __p1);
62898 return __ret;
62899 }
62900 #endif
62901
62902 #ifdef __LITTLE_ENDIAN__
62903 #define vqrshrn_high_n_u32(__p0_186, __p1_186, __p2_186) __extension__ ({ \
62904 uint16x4_t __s0_186 = __p0_186; \
62905 uint32x4_t __s1_186 = __p1_186; \
62906 uint16x8_t __ret_186; \
62907 __ret_186 = (uint16x8_t)(vcombine_u16((uint16x4_t)(__s0_186), (uint16x4_t)(vqrshrn_n_u32(__s1_186, __p2_186)))); \
62908 __ret_186; \
62909 })
62910 #else
62911 #define vqrshrn_high_n_u32(__p0_187, __p1_187, __p2_187) __extension__ ({ \
62912 uint16x4_t __s0_187 = __p0_187; \
62913 uint32x4_t __s1_187 = __p1_187; \
62914 uint16x4_t __rev0_187; __rev0_187 = __builtin_shufflevector(__s0_187, __s0_187, 3, 2, 1, 0); \
62915 uint32x4_t __rev1_187; __rev1_187 = __builtin_shufflevector(__s1_187, __s1_187, 3, 2, 1, 0); \
62916 uint16x8_t __ret_187; \
62917 __ret_187 = (uint16x8_t)(__noswap_vcombine_u16((uint16x4_t)(__rev0_187), (uint16x4_t)(__noswap_vqrshrn_n_u32(__rev1_187, __p2_187)))); \
62918 __ret_187 = __builtin_shufflevector(__ret_187, __ret_187, 7, 6, 5, 4, 3, 2, 1, 0); \
62919 __ret_187; \
62920 })
62921 #endif
62922
62923 #ifdef __LITTLE_ENDIAN__
62924 #define vqrshrn_high_n_u64(__p0_188, __p1_188, __p2_188) __extension__ ({ \
62925 uint32x2_t __s0_188 = __p0_188; \
62926 uint64x2_t __s1_188 = __p1_188; \
62927 uint32x4_t __ret_188; \
62928 __ret_188 = (uint32x4_t)(vcombine_u32((uint32x2_t)(__s0_188), (uint32x2_t)(vqrshrn_n_u64(__s1_188, __p2_188)))); \
62929 __ret_188; \
62930 })
62931 #else
62932 #define vqrshrn_high_n_u64(__p0_189, __p1_189, __p2_189) __extension__ ({ \
62933 uint32x2_t __s0_189 = __p0_189; \
62934 uint64x2_t __s1_189 = __p1_189; \
62935 uint32x2_t __rev0_189; __rev0_189 = __builtin_shufflevector(__s0_189, __s0_189, 1, 0); \
62936 uint64x2_t __rev1_189; __rev1_189 = __builtin_shufflevector(__s1_189, __s1_189, 1, 0); \
62937 uint32x4_t __ret_189; \
62938 __ret_189 = (uint32x4_t)(__noswap_vcombine_u32((uint32x2_t)(__rev0_189), (uint32x2_t)(__noswap_vqrshrn_n_u64(__rev1_189, __p2_189)))); \
62939 __ret_189 = __builtin_shufflevector(__ret_189, __ret_189, 3, 2, 1, 0); \
62940 __ret_189; \
62941 })
62942 #endif
62943
62944 #ifdef __LITTLE_ENDIAN__
62945 #define vqrshrn_high_n_u16(__p0_190, __p1_190, __p2_190) __extension__ ({ \
62946 uint8x8_t __s0_190 = __p0_190; \
62947 uint16x8_t __s1_190 = __p1_190; \
62948 uint8x16_t __ret_190; \
62949 __ret_190 = (uint8x16_t)(vcombine_u8((uint8x8_t)(__s0_190), (uint8x8_t)(vqrshrn_n_u16(__s1_190, __p2_190)))); \
62950 __ret_190; \
62951 })
62952 #else
62953 #define vqrshrn_high_n_u16(__p0_191, __p1_191, __p2_191) __extension__ ({ \
62954 uint8x8_t __s0_191 = __p0_191; \
62955 uint16x8_t __s1_191 = __p1_191; \
62956 uint8x8_t __rev0_191; __rev0_191 = __builtin_shufflevector(__s0_191, __s0_191, 7, 6, 5, 4, 3, 2, 1, 0); \
62957 uint16x8_t __rev1_191; __rev1_191 = __builtin_shufflevector(__s1_191, __s1_191, 7, 6, 5, 4, 3, 2, 1, 0); \
62958 uint8x16_t __ret_191; \
62959 __ret_191 = (uint8x16_t)(__noswap_vcombine_u8((uint8x8_t)(__rev0_191), (uint8x8_t)(__noswap_vqrshrn_n_u16(__rev1_191, __p2_191)))); \
62960 __ret_191 = __builtin_shufflevector(__ret_191, __ret_191, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
62961 __ret_191; \
62962 })
62963 #endif
62964
62965 #ifdef __LITTLE_ENDIAN__
62966 #define vqrshrn_high_n_s32(__p0_192, __p1_192, __p2_192) __extension__ ({ \
62967 int16x4_t __s0_192 = __p0_192; \
62968 int32x4_t __s1_192 = __p1_192; \
62969 int16x8_t __ret_192; \
62970 __ret_192 = (int16x8_t)(vcombine_s16((int16x4_t)(__s0_192), (int16x4_t)(vqrshrn_n_s32(__s1_192, __p2_192)))); \
62971 __ret_192; \
62972 })
62973 #else
62974 #define vqrshrn_high_n_s32(__p0_193, __p1_193, __p2_193) __extension__ ({ \
62975 int16x4_t __s0_193 = __p0_193; \
62976 int32x4_t __s1_193 = __p1_193; \
62977 int16x4_t __rev0_193; __rev0_193 = __builtin_shufflevector(__s0_193, __s0_193, 3, 2, 1, 0); \
62978 int32x4_t __rev1_193; __rev1_193 = __builtin_shufflevector(__s1_193, __s1_193, 3, 2, 1, 0); \
62979 int16x8_t __ret_193; \
62980 __ret_193 = (int16x8_t)(__noswap_vcombine_s16((int16x4_t)(__rev0_193), (int16x4_t)(__noswap_vqrshrn_n_s32(__rev1_193, __p2_193)))); \
62981 __ret_193 = __builtin_shufflevector(__ret_193, __ret_193, 7, 6, 5, 4, 3, 2, 1, 0); \
62982 __ret_193; \
62983 })
62984 #endif
62985
62986 #ifdef __LITTLE_ENDIAN__
62987 #define vqrshrn_high_n_s64(__p0_194, __p1_194, __p2_194) __extension__ ({ \
62988 int32x2_t __s0_194 = __p0_194; \
62989 int64x2_t __s1_194 = __p1_194; \
62990 int32x4_t __ret_194; \
62991 __ret_194 = (int32x4_t)(vcombine_s32((int32x2_t)(__s0_194), (int32x2_t)(vqrshrn_n_s64(__s1_194, __p2_194)))); \
62992 __ret_194; \
62993 })
62994 #else
62995 #define vqrshrn_high_n_s64(__p0_195, __p1_195, __p2_195) __extension__ ({ \
62996 int32x2_t __s0_195 = __p0_195; \
62997 int64x2_t __s1_195 = __p1_195; \
62998 int32x2_t __rev0_195; __rev0_195 = __builtin_shufflevector(__s0_195, __s0_195, 1, 0); \
62999 int64x2_t __rev1_195; __rev1_195 = __builtin_shufflevector(__s1_195, __s1_195, 1, 0); \
63000 int32x4_t __ret_195; \
63001 __ret_195 = (int32x4_t)(__noswap_vcombine_s32((int32x2_t)(__rev0_195), (int32x2_t)(__noswap_vqrshrn_n_s64(__rev1_195, __p2_195)))); \
63002 __ret_195 = __builtin_shufflevector(__ret_195, __ret_195, 3, 2, 1, 0); \
63003 __ret_195; \
63004 })
63005 #endif
63006
63007 #ifdef __LITTLE_ENDIAN__
63008 #define vqrshrn_high_n_s16(__p0_196, __p1_196, __p2_196) __extension__ ({ \
63009 int8x8_t __s0_196 = __p0_196; \
63010 int16x8_t __s1_196 = __p1_196; \
63011 int8x16_t __ret_196; \
63012 __ret_196 = (int8x16_t)(vcombine_s8((int8x8_t)(__s0_196), (int8x8_t)(vqrshrn_n_s16(__s1_196, __p2_196)))); \
63013 __ret_196; \
63014 })
63015 #else
63016 #define vqrshrn_high_n_s16(__p0_197, __p1_197, __p2_197) __extension__ ({ \
63017 int8x8_t __s0_197 = __p0_197; \
63018 int16x8_t __s1_197 = __p1_197; \
63019 int8x8_t __rev0_197; __rev0_197 = __builtin_shufflevector(__s0_197, __s0_197, 7, 6, 5, 4, 3, 2, 1, 0); \
63020 int16x8_t __rev1_197; __rev1_197 = __builtin_shufflevector(__s1_197, __s1_197, 7, 6, 5, 4, 3, 2, 1, 0); \
63021 int8x16_t __ret_197; \
63022 __ret_197 = (int8x16_t)(__noswap_vcombine_s8((int8x8_t)(__rev0_197), (int8x8_t)(__noswap_vqrshrn_n_s16(__rev1_197, __p2_197)))); \
63023 __ret_197 = __builtin_shufflevector(__ret_197, __ret_197, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
63024 __ret_197; \
63025 })
63026 #endif
63027
63028 #ifdef __LITTLE_ENDIAN__
63029 #define vqrshrns_n_u32(__p0, __p1) __extension__ ({ \
63030 uint32_t __s0 = __p0; \
63031 uint16_t __ret; \
63032 __ret = (uint16_t) __builtin_neon_vqrshrns_n_u32(__s0, __p1); \
63033 __ret; \
63034 })
63035 #else
63036 #define vqrshrns_n_u32(__p0, __p1) __extension__ ({ \
63037 uint32_t __s0 = __p0; \
63038 uint16_t __ret; \
63039 __ret = (uint16_t) __builtin_neon_vqrshrns_n_u32(__s0, __p1); \
63040 __ret; \
63041 })
63042 #endif
63043
63044 #ifdef __LITTLE_ENDIAN__
63045 #define vqrshrnd_n_u64(__p0, __p1) __extension__ ({ \
63046 uint64_t __s0 = __p0; \
63047 uint32_t __ret; \
63048 __ret = (uint32_t) __builtin_neon_vqrshrnd_n_u64(__s0, __p1); \
63049 __ret; \
63050 })
63051 #else
63052 #define vqrshrnd_n_u64(__p0, __p1) __extension__ ({ \
63053 uint64_t __s0 = __p0; \
63054 uint32_t __ret; \
63055 __ret = (uint32_t) __builtin_neon_vqrshrnd_n_u64(__s0, __p1); \
63056 __ret; \
63057 })
63058 #endif
63059
63060 #ifdef __LITTLE_ENDIAN__
63061 #define vqrshrnh_n_u16(__p0, __p1) __extension__ ({ \
63062 uint16_t __s0 = __p0; \
63063 uint8_t __ret; \
63064 __ret = (uint8_t) __builtin_neon_vqrshrnh_n_u16(__s0, __p1); \
63065 __ret; \
63066 })
63067 #else
63068 #define vqrshrnh_n_u16(__p0, __p1) __extension__ ({ \
63069 uint16_t __s0 = __p0; \
63070 uint8_t __ret; \
63071 __ret = (uint8_t) __builtin_neon_vqrshrnh_n_u16(__s0, __p1); \
63072 __ret; \
63073 })
63074 #endif
63075
63076 #ifdef __LITTLE_ENDIAN__
63077 #define vqrshrns_n_s32(__p0, __p1) __extension__ ({ \
63078 int32_t __s0 = __p0; \
63079 int16_t __ret; \
63080 __ret = (int16_t) __builtin_neon_vqrshrns_n_s32(__s0, __p1); \
63081 __ret; \
63082 })
63083 #else
63084 #define vqrshrns_n_s32(__p0, __p1) __extension__ ({ \
63085 int32_t __s0 = __p0; \
63086 int16_t __ret; \
63087 __ret = (int16_t) __builtin_neon_vqrshrns_n_s32(__s0, __p1); \
63088 __ret; \
63089 })
63090 #endif
63091
63092 #ifdef __LITTLE_ENDIAN__
63093 #define vqrshrnd_n_s64(__p0, __p1) __extension__ ({ \
63094 int64_t __s0 = __p0; \
63095 int32_t __ret; \
63096 __ret = (int32_t) __builtin_neon_vqrshrnd_n_s64(__s0, __p1); \
63097 __ret; \
63098 })
63099 #else
63100 #define vqrshrnd_n_s64(__p0, __p1) __extension__ ({ \
63101 int64_t __s0 = __p0; \
63102 int32_t __ret; \
63103 __ret = (int32_t) __builtin_neon_vqrshrnd_n_s64(__s0, __p1); \
63104 __ret; \
63105 })
63106 #endif
63107
63108 #ifdef __LITTLE_ENDIAN__
63109 #define vqrshrnh_n_s16(__p0, __p1) __extension__ ({ \
63110 int16_t __s0 = __p0; \
63111 int8_t __ret; \
63112 __ret = (int8_t) __builtin_neon_vqrshrnh_n_s16(__s0, __p1); \
63113 __ret; \
63114 })
63115 #else
63116 #define vqrshrnh_n_s16(__p0, __p1) __extension__ ({ \
63117 int16_t __s0 = __p0; \
63118 int8_t __ret; \
63119 __ret = (int8_t) __builtin_neon_vqrshrnh_n_s16(__s0, __p1); \
63120 __ret; \
63121 })
63122 #endif
63123
63124 #ifdef __LITTLE_ENDIAN__
63125 #define vqrshrun_high_n_s32(__p0_198, __p1_198, __p2_198) __extension__ ({ \
63126 int16x4_t __s0_198 = __p0_198; \
63127 int32x4_t __s1_198 = __p1_198; \
63128 int16x8_t __ret_198; \
63129 __ret_198 = (int16x8_t)(vcombine_s16((int16x4_t)(__s0_198), (int16x4_t)(vqrshrun_n_s32(__s1_198, __p2_198)))); \
63130 __ret_198; \
63131 })
63132 #else
63133 #define vqrshrun_high_n_s32(__p0_199, __p1_199, __p2_199) __extension__ ({ \
63134 int16x4_t __s0_199 = __p0_199; \
63135 int32x4_t __s1_199 = __p1_199; \
63136 int16x4_t __rev0_199; __rev0_199 = __builtin_shufflevector(__s0_199, __s0_199, 3, 2, 1, 0); \
63137 int32x4_t __rev1_199; __rev1_199 = __builtin_shufflevector(__s1_199, __s1_199, 3, 2, 1, 0); \
63138 int16x8_t __ret_199; \
63139 __ret_199 = (int16x8_t)(__noswap_vcombine_s16((int16x4_t)(__rev0_199), (int16x4_t)(__noswap_vqrshrun_n_s32(__rev1_199, __p2_199)))); \
63140 __ret_199 = __builtin_shufflevector(__ret_199, __ret_199, 7, 6, 5, 4, 3, 2, 1, 0); \
63141 __ret_199; \
63142 })
63143 #endif
63144
63145 #ifdef __LITTLE_ENDIAN__
63146 #define vqrshrun_high_n_s64(__p0_200, __p1_200, __p2_200) __extension__ ({ \
63147 int32x2_t __s0_200 = __p0_200; \
63148 int64x2_t __s1_200 = __p1_200; \
63149 int32x4_t __ret_200; \
63150 __ret_200 = (int32x4_t)(vcombine_s32((int32x2_t)(__s0_200), (int32x2_t)(vqrshrun_n_s64(__s1_200, __p2_200)))); \
63151 __ret_200; \
63152 })
63153 #else
63154 #define vqrshrun_high_n_s64(__p0_201, __p1_201, __p2_201) __extension__ ({ \
63155 int32x2_t __s0_201 = __p0_201; \
63156 int64x2_t __s1_201 = __p1_201; \
63157 int32x2_t __rev0_201; __rev0_201 = __builtin_shufflevector(__s0_201, __s0_201, 1, 0); \
63158 int64x2_t __rev1_201; __rev1_201 = __builtin_shufflevector(__s1_201, __s1_201, 1, 0); \
63159 int32x4_t __ret_201; \
63160 __ret_201 = (int32x4_t)(__noswap_vcombine_s32((int32x2_t)(__rev0_201), (int32x2_t)(__noswap_vqrshrun_n_s64(__rev1_201, __p2_201)))); \
63161 __ret_201 = __builtin_shufflevector(__ret_201, __ret_201, 3, 2, 1, 0); \
63162 __ret_201; \
63163 })
63164 #endif
63165
63166 #ifdef __LITTLE_ENDIAN__
63167 #define vqrshrun_high_n_s16(__p0_202, __p1_202, __p2_202) __extension__ ({ \
63168 int8x8_t __s0_202 = __p0_202; \
63169 int16x8_t __s1_202 = __p1_202; \
63170 int8x16_t __ret_202; \
63171 __ret_202 = (int8x16_t)(vcombine_s8((int8x8_t)(__s0_202), (int8x8_t)(vqrshrun_n_s16(__s1_202, __p2_202)))); \
63172 __ret_202; \
63173 })
63174 #else
63175 #define vqrshrun_high_n_s16(__p0_203, __p1_203, __p2_203) __extension__ ({ \
63176 int8x8_t __s0_203 = __p0_203; \
63177 int16x8_t __s1_203 = __p1_203; \
63178 int8x8_t __rev0_203; __rev0_203 = __builtin_shufflevector(__s0_203, __s0_203, 7, 6, 5, 4, 3, 2, 1, 0); \
63179 int16x8_t __rev1_203; __rev1_203 = __builtin_shufflevector(__s1_203, __s1_203, 7, 6, 5, 4, 3, 2, 1, 0); \
63180 int8x16_t __ret_203; \
63181 __ret_203 = (int8x16_t)(__noswap_vcombine_s8((int8x8_t)(__rev0_203), (int8x8_t)(__noswap_vqrshrun_n_s16(__rev1_203, __p2_203)))); \
63182 __ret_203 = __builtin_shufflevector(__ret_203, __ret_203, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
63183 __ret_203; \
63184 })
63185 #endif
63186
63187 #ifdef __LITTLE_ENDIAN__
63188 #define vqrshruns_n_s32(__p0, __p1) __extension__ ({ \
63189 int32_t __s0 = __p0; \
63190 int16_t __ret; \
63191 __ret = (int16_t) __builtin_neon_vqrshruns_n_s32(__s0, __p1); \
63192 __ret; \
63193 })
63194 #else
63195 #define vqrshruns_n_s32(__p0, __p1) __extension__ ({ \
63196 int32_t __s0 = __p0; \
63197 int16_t __ret; \
63198 __ret = (int16_t) __builtin_neon_vqrshruns_n_s32(__s0, __p1); \
63199 __ret; \
63200 })
63201 #endif
63202
63203 #ifdef __LITTLE_ENDIAN__
63204 #define vqrshrund_n_s64(__p0, __p1) __extension__ ({ \
63205 int64_t __s0 = __p0; \
63206 int32_t __ret; \
63207 __ret = (int32_t) __builtin_neon_vqrshrund_n_s64(__s0, __p1); \
63208 __ret; \
63209 })
63210 #else
63211 #define vqrshrund_n_s64(__p0, __p1) __extension__ ({ \
63212 int64_t __s0 = __p0; \
63213 int32_t __ret; \
63214 __ret = (int32_t) __builtin_neon_vqrshrund_n_s64(__s0, __p1); \
63215 __ret; \
63216 })
63217 #endif
63218
63219 #ifdef __LITTLE_ENDIAN__
63220 #define vqrshrunh_n_s16(__p0, __p1) __extension__ ({ \
63221 int16_t __s0 = __p0; \
63222 int8_t __ret; \
63223 __ret = (int8_t) __builtin_neon_vqrshrunh_n_s16(__s0, __p1); \
63224 __ret; \
63225 })
63226 #else
63227 #define vqrshrunh_n_s16(__p0, __p1) __extension__ ({ \
63228 int16_t __s0 = __p0; \
63229 int8_t __ret; \
63230 __ret = (int8_t) __builtin_neon_vqrshrunh_n_s16(__s0, __p1); \
63231 __ret; \
63232 })
63233 #endif
63234
63235 #ifdef __LITTLE_ENDIAN__
63236 __ai uint8_t vqshlb_u8(uint8_t __p0, uint8_t __p1) {
63237 uint8_t __ret;
63238 __ret = (uint8_t) __builtin_neon_vqshlb_u8(__p0, __p1);
63239 return __ret;
63240 }
63241 #else
63242 __ai uint8_t vqshlb_u8(uint8_t __p0, uint8_t __p1) {
63243 uint8_t __ret;
63244 __ret = (uint8_t) __builtin_neon_vqshlb_u8(__p0, __p1);
63245 return __ret;
63246 }
63247 #endif
63248
63249 #ifdef __LITTLE_ENDIAN__
63250 __ai uint32_t vqshls_u32(uint32_t __p0, uint32_t __p1) {
63251 uint32_t __ret;
63252 __ret = (uint32_t) __builtin_neon_vqshls_u32(__p0, __p1);
63253 return __ret;
63254 }
63255 #else
63256 __ai uint32_t vqshls_u32(uint32_t __p0, uint32_t __p1) {
63257 uint32_t __ret;
63258 __ret = (uint32_t) __builtin_neon_vqshls_u32(__p0, __p1);
63259 return __ret;
63260 }
63261 #endif
63262
63263 #ifdef __LITTLE_ENDIAN__
63264 __ai uint64_t vqshld_u64(uint64_t __p0, uint64_t __p1) {
63265 uint64_t __ret;
63266 __ret = (uint64_t) __builtin_neon_vqshld_u64(__p0, __p1);
63267 return __ret;
63268 }
63269 #else
63270 __ai uint64_t vqshld_u64(uint64_t __p0, uint64_t __p1) {
63271 uint64_t __ret;
63272 __ret = (uint64_t) __builtin_neon_vqshld_u64(__p0, __p1);
63273 return __ret;
63274 }
63275 #endif
63276
63277 #ifdef __LITTLE_ENDIAN__
63278 __ai uint16_t vqshlh_u16(uint16_t __p0, uint16_t __p1) {
63279 uint16_t __ret;
63280 __ret = (uint16_t) __builtin_neon_vqshlh_u16(__p0, __p1);
63281 return __ret;
63282 }
63283 #else
63284 __ai uint16_t vqshlh_u16(uint16_t __p0, uint16_t __p1) {
63285 uint16_t __ret;
63286 __ret = (uint16_t) __builtin_neon_vqshlh_u16(__p0, __p1);
63287 return __ret;
63288 }
63289 #endif
63290
63291 #ifdef __LITTLE_ENDIAN__
63292 __ai int8_t vqshlb_s8(int8_t __p0, int8_t __p1) {
63293 int8_t __ret;
63294 __ret = (int8_t) __builtin_neon_vqshlb_s8(__p0, __p1);
63295 return __ret;
63296 }
63297 #else
63298 __ai int8_t vqshlb_s8(int8_t __p0, int8_t __p1) {
63299 int8_t __ret;
63300 __ret = (int8_t) __builtin_neon_vqshlb_s8(__p0, __p1);
63301 return __ret;
63302 }
63303 #endif
63304
63305 #ifdef __LITTLE_ENDIAN__
63306 __ai int32_t vqshls_s32(int32_t __p0, int32_t __p1) {
63307 int32_t __ret;
63308 __ret = (int32_t) __builtin_neon_vqshls_s32(__p0, __p1);
63309 return __ret;
63310 }
63311 #else
63312 __ai int32_t vqshls_s32(int32_t __p0, int32_t __p1) {
63313 int32_t __ret;
63314 __ret = (int32_t) __builtin_neon_vqshls_s32(__p0, __p1);
63315 return __ret;
63316 }
63317 #endif
63318
63319 #ifdef __LITTLE_ENDIAN__
63320 __ai int64_t vqshld_s64(int64_t __p0, int64_t __p1) {
63321 int64_t __ret;
63322 __ret = (int64_t) __builtin_neon_vqshld_s64(__p0, __p1);
63323 return __ret;
63324 }
63325 #else
63326 __ai int64_t vqshld_s64(int64_t __p0, int64_t __p1) {
63327 int64_t __ret;
63328 __ret = (int64_t) __builtin_neon_vqshld_s64(__p0, __p1);
63329 return __ret;
63330 }
63331 #endif
63332
63333 #ifdef __LITTLE_ENDIAN__
63334 __ai int16_t vqshlh_s16(int16_t __p0, int16_t __p1) {
63335 int16_t __ret;
63336 __ret = (int16_t) __builtin_neon_vqshlh_s16(__p0, __p1);
63337 return __ret;
63338 }
63339 #else
63340 __ai int16_t vqshlh_s16(int16_t __p0, int16_t __p1) {
63341 int16_t __ret;
63342 __ret = (int16_t) __builtin_neon_vqshlh_s16(__p0, __p1);
63343 return __ret;
63344 }
63345 #endif
63346
63347 #ifdef __LITTLE_ENDIAN__
63348 #define vqshlb_n_u8(__p0, __p1) __extension__ ({ \
63349 uint8_t __s0 = __p0; \
63350 uint8_t __ret; \
63351 __ret = (uint8_t) __builtin_neon_vqshlb_n_u8(__s0, __p1); \
63352 __ret; \
63353 })
63354 #else
63355 #define vqshlb_n_u8(__p0, __p1) __extension__ ({ \
63356 uint8_t __s0 = __p0; \
63357 uint8_t __ret; \
63358 __ret = (uint8_t) __builtin_neon_vqshlb_n_u8(__s0, __p1); \
63359 __ret; \
63360 })
63361 #endif
63362
63363 #ifdef __LITTLE_ENDIAN__
63364 #define vqshls_n_u32(__p0, __p1) __extension__ ({ \
63365 uint32_t __s0 = __p0; \
63366 uint32_t __ret; \
63367 __ret = (uint32_t) __builtin_neon_vqshls_n_u32(__s0, __p1); \
63368 __ret; \
63369 })
63370 #else
63371 #define vqshls_n_u32(__p0, __p1) __extension__ ({ \
63372 uint32_t __s0 = __p0; \
63373 uint32_t __ret; \
63374 __ret = (uint32_t) __builtin_neon_vqshls_n_u32(__s0, __p1); \
63375 __ret; \
63376 })
63377 #endif
63378
63379 #ifdef __LITTLE_ENDIAN__
63380 #define vqshld_n_u64(__p0, __p1) __extension__ ({ \
63381 uint64_t __s0 = __p0; \
63382 uint64_t __ret; \
63383 __ret = (uint64_t) __builtin_neon_vqshld_n_u64(__s0, __p1); \
63384 __ret; \
63385 })
63386 #else
63387 #define vqshld_n_u64(__p0, __p1) __extension__ ({ \
63388 uint64_t __s0 = __p0; \
63389 uint64_t __ret; \
63390 __ret = (uint64_t) __builtin_neon_vqshld_n_u64(__s0, __p1); \
63391 __ret; \
63392 })
63393 #endif
63394
63395 #ifdef __LITTLE_ENDIAN__
63396 #define vqshlh_n_u16(__p0, __p1) __extension__ ({ \
63397 uint16_t __s0 = __p0; \
63398 uint16_t __ret; \
63399 __ret = (uint16_t) __builtin_neon_vqshlh_n_u16(__s0, __p1); \
63400 __ret; \
63401 })
63402 #else
63403 #define vqshlh_n_u16(__p0, __p1) __extension__ ({ \
63404 uint16_t __s0 = __p0; \
63405 uint16_t __ret; \
63406 __ret = (uint16_t) __builtin_neon_vqshlh_n_u16(__s0, __p1); \
63407 __ret; \
63408 })
63409 #endif
63410
63411 #ifdef __LITTLE_ENDIAN__
63412 #define vqshlb_n_s8(__p0, __p1) __extension__ ({ \
63413 int8_t __s0 = __p0; \
63414 int8_t __ret; \
63415 __ret = (int8_t) __builtin_neon_vqshlb_n_s8(__s0, __p1); \
63416 __ret; \
63417 })
63418 #else
63419 #define vqshlb_n_s8(__p0, __p1) __extension__ ({ \
63420 int8_t __s0 = __p0; \
63421 int8_t __ret; \
63422 __ret = (int8_t) __builtin_neon_vqshlb_n_s8(__s0, __p1); \
63423 __ret; \
63424 })
63425 #endif
63426
63427 #ifdef __LITTLE_ENDIAN__
63428 #define vqshls_n_s32(__p0, __p1) __extension__ ({ \
63429 int32_t __s0 = __p0; \
63430 int32_t __ret; \
63431 __ret = (int32_t) __builtin_neon_vqshls_n_s32(__s0, __p1); \
63432 __ret; \
63433 })
63434 #else
63435 #define vqshls_n_s32(__p0, __p1) __extension__ ({ \
63436 int32_t __s0 = __p0; \
63437 int32_t __ret; \
63438 __ret = (int32_t) __builtin_neon_vqshls_n_s32(__s0, __p1); \
63439 __ret; \
63440 })
63441 #endif
63442
63443 #ifdef __LITTLE_ENDIAN__
63444 #define vqshld_n_s64(__p0, __p1) __extension__ ({ \
63445 int64_t __s0 = __p0; \
63446 int64_t __ret; \
63447 __ret = (int64_t) __builtin_neon_vqshld_n_s64(__s0, __p1); \
63448 __ret; \
63449 })
63450 #else
63451 #define vqshld_n_s64(__p0, __p1) __extension__ ({ \
63452 int64_t __s0 = __p0; \
63453 int64_t __ret; \
63454 __ret = (int64_t) __builtin_neon_vqshld_n_s64(__s0, __p1); \
63455 __ret; \
63456 })
63457 #endif
63458
63459 #ifdef __LITTLE_ENDIAN__
63460 #define vqshlh_n_s16(__p0, __p1) __extension__ ({ \
63461 int16_t __s0 = __p0; \
63462 int16_t __ret; \
63463 __ret = (int16_t) __builtin_neon_vqshlh_n_s16(__s0, __p1); \
63464 __ret; \
63465 })
63466 #else
63467 #define vqshlh_n_s16(__p0, __p1) __extension__ ({ \
63468 int16_t __s0 = __p0; \
63469 int16_t __ret; \
63470 __ret = (int16_t) __builtin_neon_vqshlh_n_s16(__s0, __p1); \
63471 __ret; \
63472 })
63473 #endif
63474
63475 #ifdef __LITTLE_ENDIAN__
63476 #define vqshlub_n_s8(__p0, __p1) __extension__ ({ \
63477 int8_t __s0 = __p0; \
63478 int8_t __ret; \
63479 __ret = (int8_t) __builtin_neon_vqshlub_n_s8(__s0, __p1); \
63480 __ret; \
63481 })
63482 #else
63483 #define vqshlub_n_s8(__p0, __p1) __extension__ ({ \
63484 int8_t __s0 = __p0; \
63485 int8_t __ret; \
63486 __ret = (int8_t) __builtin_neon_vqshlub_n_s8(__s0, __p1); \
63487 __ret; \
63488 })
63489 #endif
63490
63491 #ifdef __LITTLE_ENDIAN__
63492 #define vqshlus_n_s32(__p0, __p1) __extension__ ({ \
63493 int32_t __s0 = __p0; \
63494 int32_t __ret; \
63495 __ret = (int32_t) __builtin_neon_vqshlus_n_s32(__s0, __p1); \
63496 __ret; \
63497 })
63498 #else
63499 #define vqshlus_n_s32(__p0, __p1) __extension__ ({ \
63500 int32_t __s0 = __p0; \
63501 int32_t __ret; \
63502 __ret = (int32_t) __builtin_neon_vqshlus_n_s32(__s0, __p1); \
63503 __ret; \
63504 })
63505 #endif
63506
63507 #ifdef __LITTLE_ENDIAN__
63508 #define vqshlud_n_s64(__p0, __p1) __extension__ ({ \
63509 int64_t __s0 = __p0; \
63510 int64_t __ret; \
63511 __ret = (int64_t) __builtin_neon_vqshlud_n_s64(__s0, __p1); \
63512 __ret; \
63513 })
63514 #else
63515 #define vqshlud_n_s64(__p0, __p1) __extension__ ({ \
63516 int64_t __s0 = __p0; \
63517 int64_t __ret; \
63518 __ret = (int64_t) __builtin_neon_vqshlud_n_s64(__s0, __p1); \
63519 __ret; \
63520 })
63521 #endif
63522
63523 #ifdef __LITTLE_ENDIAN__
63524 #define vqshluh_n_s16(__p0, __p1) __extension__ ({ \
63525 int16_t __s0 = __p0; \
63526 int16_t __ret; \
63527 __ret = (int16_t) __builtin_neon_vqshluh_n_s16(__s0, __p1); \
63528 __ret; \
63529 })
63530 #else
63531 #define vqshluh_n_s16(__p0, __p1) __extension__ ({ \
63532 int16_t __s0 = __p0; \
63533 int16_t __ret; \
63534 __ret = (int16_t) __builtin_neon_vqshluh_n_s16(__s0, __p1); \
63535 __ret; \
63536 })
63537 #endif
63538
63539 #ifdef __LITTLE_ENDIAN__
63540 #define vqshrn_high_n_u32(__p0_204, __p1_204, __p2_204) __extension__ ({ \
63541 uint16x4_t __s0_204 = __p0_204; \
63542 uint32x4_t __s1_204 = __p1_204; \
63543 uint16x8_t __ret_204; \
63544 __ret_204 = (uint16x8_t)(vcombine_u16((uint16x4_t)(__s0_204), (uint16x4_t)(vqshrn_n_u32(__s1_204, __p2_204)))); \
63545 __ret_204; \
63546 })
63547 #else
63548 #define vqshrn_high_n_u32(__p0_205, __p1_205, __p2_205) __extension__ ({ \
63549 uint16x4_t __s0_205 = __p0_205; \
63550 uint32x4_t __s1_205 = __p1_205; \
63551 uint16x4_t __rev0_205; __rev0_205 = __builtin_shufflevector(__s0_205, __s0_205, 3, 2, 1, 0); \
63552 uint32x4_t __rev1_205; __rev1_205 = __builtin_shufflevector(__s1_205, __s1_205, 3, 2, 1, 0); \
63553 uint16x8_t __ret_205; \
63554 __ret_205 = (uint16x8_t)(__noswap_vcombine_u16((uint16x4_t)(__rev0_205), (uint16x4_t)(__noswap_vqshrn_n_u32(__rev1_205, __p2_205)))); \
63555 __ret_205 = __builtin_shufflevector(__ret_205, __ret_205, 7, 6, 5, 4, 3, 2, 1, 0); \
63556 __ret_205; \
63557 })
63558 #endif
63559
63560 #ifdef __LITTLE_ENDIAN__
63561 #define vqshrn_high_n_u64(__p0_206, __p1_206, __p2_206) __extension__ ({ \
63562 uint32x2_t __s0_206 = __p0_206; \
63563 uint64x2_t __s1_206 = __p1_206; \
63564 uint32x4_t __ret_206; \
63565 __ret_206 = (uint32x4_t)(vcombine_u32((uint32x2_t)(__s0_206), (uint32x2_t)(vqshrn_n_u64(__s1_206, __p2_206)))); \
63566 __ret_206; \
63567 })
63568 #else
63569 #define vqshrn_high_n_u64(__p0_207, __p1_207, __p2_207) __extension__ ({ \
63570 uint32x2_t __s0_207 = __p0_207; \
63571 uint64x2_t __s1_207 = __p1_207; \
63572 uint32x2_t __rev0_207; __rev0_207 = __builtin_shufflevector(__s0_207, __s0_207, 1, 0); \
63573 uint64x2_t __rev1_207; __rev1_207 = __builtin_shufflevector(__s1_207, __s1_207, 1, 0); \
63574 uint32x4_t __ret_207; \
63575 __ret_207 = (uint32x4_t)(__noswap_vcombine_u32((uint32x2_t)(__rev0_207), (uint32x2_t)(__noswap_vqshrn_n_u64(__rev1_207, __p2_207)))); \
63576 __ret_207 = __builtin_shufflevector(__ret_207, __ret_207, 3, 2, 1, 0); \
63577 __ret_207; \
63578 })
63579 #endif
63580
63581 #ifdef __LITTLE_ENDIAN__
63582 #define vqshrn_high_n_u16(__p0_208, __p1_208, __p2_208) __extension__ ({ \
63583 uint8x8_t __s0_208 = __p0_208; \
63584 uint16x8_t __s1_208 = __p1_208; \
63585 uint8x16_t __ret_208; \
63586 __ret_208 = (uint8x16_t)(vcombine_u8((uint8x8_t)(__s0_208), (uint8x8_t)(vqshrn_n_u16(__s1_208, __p2_208)))); \
63587 __ret_208; \
63588 })
63589 #else
63590 #define vqshrn_high_n_u16(__p0_209, __p1_209, __p2_209) __extension__ ({ \
63591 uint8x8_t __s0_209 = __p0_209; \
63592 uint16x8_t __s1_209 = __p1_209; \
63593 uint8x8_t __rev0_209; __rev0_209 = __builtin_shufflevector(__s0_209, __s0_209, 7, 6, 5, 4, 3, 2, 1, 0); \
63594 uint16x8_t __rev1_209; __rev1_209 = __builtin_shufflevector(__s1_209, __s1_209, 7, 6, 5, 4, 3, 2, 1, 0); \
63595 uint8x16_t __ret_209; \
63596 __ret_209 = (uint8x16_t)(__noswap_vcombine_u8((uint8x8_t)(__rev0_209), (uint8x8_t)(__noswap_vqshrn_n_u16(__rev1_209, __p2_209)))); \
63597 __ret_209 = __builtin_shufflevector(__ret_209, __ret_209, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
63598 __ret_209; \
63599 })
63600 #endif
63601
63602 #ifdef __LITTLE_ENDIAN__
63603 #define vqshrn_high_n_s32(__p0_210, __p1_210, __p2_210) __extension__ ({ \
63604 int16x4_t __s0_210 = __p0_210; \
63605 int32x4_t __s1_210 = __p1_210; \
63606 int16x8_t __ret_210; \
63607 __ret_210 = (int16x8_t)(vcombine_s16((int16x4_t)(__s0_210), (int16x4_t)(vqshrn_n_s32(__s1_210, __p2_210)))); \
63608 __ret_210; \
63609 })
63610 #else
63611 #define vqshrn_high_n_s32(__p0_211, __p1_211, __p2_211) __extension__ ({ \
63612 int16x4_t __s0_211 = __p0_211; \
63613 int32x4_t __s1_211 = __p1_211; \
63614 int16x4_t __rev0_211; __rev0_211 = __builtin_shufflevector(__s0_211, __s0_211, 3, 2, 1, 0); \
63615 int32x4_t __rev1_211; __rev1_211 = __builtin_shufflevector(__s1_211, __s1_211, 3, 2, 1, 0); \
63616 int16x8_t __ret_211; \
63617 __ret_211 = (int16x8_t)(__noswap_vcombine_s16((int16x4_t)(__rev0_211), (int16x4_t)(__noswap_vqshrn_n_s32(__rev1_211, __p2_211)))); \
63618 __ret_211 = __builtin_shufflevector(__ret_211, __ret_211, 7, 6, 5, 4, 3, 2, 1, 0); \
63619 __ret_211; \
63620 })
63621 #endif
63622
63623 #ifdef __LITTLE_ENDIAN__
63624 #define vqshrn_high_n_s64(__p0_212, __p1_212, __p2_212) __extension__ ({ \
63625 int32x2_t __s0_212 = __p0_212; \
63626 int64x2_t __s1_212 = __p1_212; \
63627 int32x4_t __ret_212; \
63628 __ret_212 = (int32x4_t)(vcombine_s32((int32x2_t)(__s0_212), (int32x2_t)(vqshrn_n_s64(__s1_212, __p2_212)))); \
63629 __ret_212; \
63630 })
63631 #else
63632 #define vqshrn_high_n_s64(__p0_213, __p1_213, __p2_213) __extension__ ({ \
63633 int32x2_t __s0_213 = __p0_213; \
63634 int64x2_t __s1_213 = __p1_213; \
63635 int32x2_t __rev0_213; __rev0_213 = __builtin_shufflevector(__s0_213, __s0_213, 1, 0); \
63636 int64x2_t __rev1_213; __rev1_213 = __builtin_shufflevector(__s1_213, __s1_213, 1, 0); \
63637 int32x4_t __ret_213; \
63638 __ret_213 = (int32x4_t)(__noswap_vcombine_s32((int32x2_t)(__rev0_213), (int32x2_t)(__noswap_vqshrn_n_s64(__rev1_213, __p2_213)))); \
63639 __ret_213 = __builtin_shufflevector(__ret_213, __ret_213, 3, 2, 1, 0); \
63640 __ret_213; \
63641 })
63642 #endif
63643
63644 #ifdef __LITTLE_ENDIAN__
63645 #define vqshrn_high_n_s16(__p0_214, __p1_214, __p2_214) __extension__ ({ \
63646 int8x8_t __s0_214 = __p0_214; \
63647 int16x8_t __s1_214 = __p1_214; \
63648 int8x16_t __ret_214; \
63649 __ret_214 = (int8x16_t)(vcombine_s8((int8x8_t)(__s0_214), (int8x8_t)(vqshrn_n_s16(__s1_214, __p2_214)))); \
63650 __ret_214; \
63651 })
63652 #else
63653 #define vqshrn_high_n_s16(__p0_215, __p1_215, __p2_215) __extension__ ({ \
63654 int8x8_t __s0_215 = __p0_215; \
63655 int16x8_t __s1_215 = __p1_215; \
63656 int8x8_t __rev0_215; __rev0_215 = __builtin_shufflevector(__s0_215, __s0_215, 7, 6, 5, 4, 3, 2, 1, 0); \
63657 int16x8_t __rev1_215; __rev1_215 = __builtin_shufflevector(__s1_215, __s1_215, 7, 6, 5, 4, 3, 2, 1, 0); \
63658 int8x16_t __ret_215; \
63659 __ret_215 = (int8x16_t)(__noswap_vcombine_s8((int8x8_t)(__rev0_215), (int8x8_t)(__noswap_vqshrn_n_s16(__rev1_215, __p2_215)))); \
63660 __ret_215 = __builtin_shufflevector(__ret_215, __ret_215, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
63661 __ret_215; \
63662 })
63663 #endif
63664
63665 #ifdef __LITTLE_ENDIAN__
63666 #define vqshrns_n_u32(__p0, __p1) __extension__ ({ \
63667 uint32_t __s0 = __p0; \
63668 uint16_t __ret; \
63669 __ret = (uint16_t) __builtin_neon_vqshrns_n_u32(__s0, __p1); \
63670 __ret; \
63671 })
63672 #else
63673 #define vqshrns_n_u32(__p0, __p1) __extension__ ({ \
63674 uint32_t __s0 = __p0; \
63675 uint16_t __ret; \
63676 __ret = (uint16_t) __builtin_neon_vqshrns_n_u32(__s0, __p1); \
63677 __ret; \
63678 })
63679 #endif
63680
63681 #ifdef __LITTLE_ENDIAN__
63682 #define vqshrnd_n_u64(__p0, __p1) __extension__ ({ \
63683 uint64_t __s0 = __p0; \
63684 uint32_t __ret; \
63685 __ret = (uint32_t) __builtin_neon_vqshrnd_n_u64(__s0, __p1); \
63686 __ret; \
63687 })
63688 #else
63689 #define vqshrnd_n_u64(__p0, __p1) __extension__ ({ \
63690 uint64_t __s0 = __p0; \
63691 uint32_t __ret; \
63692 __ret = (uint32_t) __builtin_neon_vqshrnd_n_u64(__s0, __p1); \
63693 __ret; \
63694 })
63695 #endif
63696
63697 #ifdef __LITTLE_ENDIAN__
63698 #define vqshrnh_n_u16(__p0, __p1) __extension__ ({ \
63699 uint16_t __s0 = __p0; \
63700 uint8_t __ret; \
63701 __ret = (uint8_t) __builtin_neon_vqshrnh_n_u16(__s0, __p1); \
63702 __ret; \
63703 })
63704 #else
63705 #define vqshrnh_n_u16(__p0, __p1) __extension__ ({ \
63706 uint16_t __s0 = __p0; \
63707 uint8_t __ret; \
63708 __ret = (uint8_t) __builtin_neon_vqshrnh_n_u16(__s0, __p1); \
63709 __ret; \
63710 })
63711 #endif
63712
63713 #ifdef __LITTLE_ENDIAN__
63714 #define vqshrns_n_s32(__p0, __p1) __extension__ ({ \
63715 int32_t __s0 = __p0; \
63716 int16_t __ret; \
63717 __ret = (int16_t) __builtin_neon_vqshrns_n_s32(__s0, __p1); \
63718 __ret; \
63719 })
63720 #else
63721 #define vqshrns_n_s32(__p0, __p1) __extension__ ({ \
63722 int32_t __s0 = __p0; \
63723 int16_t __ret; \
63724 __ret = (int16_t) __builtin_neon_vqshrns_n_s32(__s0, __p1); \
63725 __ret; \
63726 })
63727 #endif
63728
63729 #ifdef __LITTLE_ENDIAN__
63730 #define vqshrnd_n_s64(__p0, __p1) __extension__ ({ \
63731 int64_t __s0 = __p0; \
63732 int32_t __ret; \
63733 __ret = (int32_t) __builtin_neon_vqshrnd_n_s64(__s0, __p1); \
63734 __ret; \
63735 })
63736 #else
63737 #define vqshrnd_n_s64(__p0, __p1) __extension__ ({ \
63738 int64_t __s0 = __p0; \
63739 int32_t __ret; \
63740 __ret = (int32_t) __builtin_neon_vqshrnd_n_s64(__s0, __p1); \
63741 __ret; \
63742 })
63743 #endif
63744
63745 #ifdef __LITTLE_ENDIAN__
63746 #define vqshrnh_n_s16(__p0, __p1) __extension__ ({ \
63747 int16_t __s0 = __p0; \
63748 int8_t __ret; \
63749 __ret = (int8_t) __builtin_neon_vqshrnh_n_s16(__s0, __p1); \
63750 __ret; \
63751 })
63752 #else
63753 #define vqshrnh_n_s16(__p0, __p1) __extension__ ({ \
63754 int16_t __s0 = __p0; \
63755 int8_t __ret; \
63756 __ret = (int8_t) __builtin_neon_vqshrnh_n_s16(__s0, __p1); \
63757 __ret; \
63758 })
63759 #endif
63760
63761 #ifdef __LITTLE_ENDIAN__
63762 #define vqshrun_high_n_s32(__p0_216, __p1_216, __p2_216) __extension__ ({ \
63763 int16x4_t __s0_216 = __p0_216; \
63764 int32x4_t __s1_216 = __p1_216; \
63765 int16x8_t __ret_216; \
63766 __ret_216 = (int16x8_t)(vcombine_s16((int16x4_t)(__s0_216), (int16x4_t)(vqshrun_n_s32(__s1_216, __p2_216)))); \
63767 __ret_216; \
63768 })
63769 #else
63770 #define vqshrun_high_n_s32(__p0_217, __p1_217, __p2_217) __extension__ ({ \
63771 int16x4_t __s0_217 = __p0_217; \
63772 int32x4_t __s1_217 = __p1_217; \
63773 int16x4_t __rev0_217; __rev0_217 = __builtin_shufflevector(__s0_217, __s0_217, 3, 2, 1, 0); \
63774 int32x4_t __rev1_217; __rev1_217 = __builtin_shufflevector(__s1_217, __s1_217, 3, 2, 1, 0); \
63775 int16x8_t __ret_217; \
63776 __ret_217 = (int16x8_t)(__noswap_vcombine_s16((int16x4_t)(__rev0_217), (int16x4_t)(__noswap_vqshrun_n_s32(__rev1_217, __p2_217)))); \
63777 __ret_217 = __builtin_shufflevector(__ret_217, __ret_217, 7, 6, 5, 4, 3, 2, 1, 0); \
63778 __ret_217; \
63779 })
63780 #endif
63781
63782 #ifdef __LITTLE_ENDIAN__
63783 #define vqshrun_high_n_s64(__p0_218, __p1_218, __p2_218) __extension__ ({ \
63784 int32x2_t __s0_218 = __p0_218; \
63785 int64x2_t __s1_218 = __p1_218; \
63786 int32x4_t __ret_218; \
63787 __ret_218 = (int32x4_t)(vcombine_s32((int32x2_t)(__s0_218), (int32x2_t)(vqshrun_n_s64(__s1_218, __p2_218)))); \
63788 __ret_218; \
63789 })
63790 #else
63791 #define vqshrun_high_n_s64(__p0_219, __p1_219, __p2_219) __extension__ ({ \
63792 int32x2_t __s0_219 = __p0_219; \
63793 int64x2_t __s1_219 = __p1_219; \
63794 int32x2_t __rev0_219; __rev0_219 = __builtin_shufflevector(__s0_219, __s0_219, 1, 0); \
63795 int64x2_t __rev1_219; __rev1_219 = __builtin_shufflevector(__s1_219, __s1_219, 1, 0); \
63796 int32x4_t __ret_219; \
63797 __ret_219 = (int32x4_t)(__noswap_vcombine_s32((int32x2_t)(__rev0_219), (int32x2_t)(__noswap_vqshrun_n_s64(__rev1_219, __p2_219)))); \
63798 __ret_219 = __builtin_shufflevector(__ret_219, __ret_219, 3, 2, 1, 0); \
63799 __ret_219; \
63800 })
63801 #endif
63802
63803 #ifdef __LITTLE_ENDIAN__
63804 #define vqshrun_high_n_s16(__p0_220, __p1_220, __p2_220) __extension__ ({ \
63805 int8x8_t __s0_220 = __p0_220; \
63806 int16x8_t __s1_220 = __p1_220; \
63807 int8x16_t __ret_220; \
63808 __ret_220 = (int8x16_t)(vcombine_s8((int8x8_t)(__s0_220), (int8x8_t)(vqshrun_n_s16(__s1_220, __p2_220)))); \
63809 __ret_220; \
63810 })
63811 #else
63812 #define vqshrun_high_n_s16(__p0_221, __p1_221, __p2_221) __extension__ ({ \
63813 int8x8_t __s0_221 = __p0_221; \
63814 int16x8_t __s1_221 = __p1_221; \
63815 int8x8_t __rev0_221; __rev0_221 = __builtin_shufflevector(__s0_221, __s0_221, 7, 6, 5, 4, 3, 2, 1, 0); \
63816 int16x8_t __rev1_221; __rev1_221 = __builtin_shufflevector(__s1_221, __s1_221, 7, 6, 5, 4, 3, 2, 1, 0); \
63817 int8x16_t __ret_221; \
63818 __ret_221 = (int8x16_t)(__noswap_vcombine_s8((int8x8_t)(__rev0_221), (int8x8_t)(__noswap_vqshrun_n_s16(__rev1_221, __p2_221)))); \
63819 __ret_221 = __builtin_shufflevector(__ret_221, __ret_221, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
63820 __ret_221; \
63821 })
63822 #endif
63823
63824 #ifdef __LITTLE_ENDIAN__
63825 #define vqshruns_n_s32(__p0, __p1) __extension__ ({ \
63826 int32_t __s0 = __p0; \
63827 int16_t __ret; \
63828 __ret = (int16_t) __builtin_neon_vqshruns_n_s32(__s0, __p1); \
63829 __ret; \
63830 })
63831 #else
63832 #define vqshruns_n_s32(__p0, __p1) __extension__ ({ \
63833 int32_t __s0 = __p0; \
63834 int16_t __ret; \
63835 __ret = (int16_t) __builtin_neon_vqshruns_n_s32(__s0, __p1); \
63836 __ret; \
63837 })
63838 #endif
63839
63840 #ifdef __LITTLE_ENDIAN__
63841 #define vqshrund_n_s64(__p0, __p1) __extension__ ({ \
63842 int64_t __s0 = __p0; \
63843 int32_t __ret; \
63844 __ret = (int32_t) __builtin_neon_vqshrund_n_s64(__s0, __p1); \
63845 __ret; \
63846 })
63847 #else
63848 #define vqshrund_n_s64(__p0, __p1) __extension__ ({ \
63849 int64_t __s0 = __p0; \
63850 int32_t __ret; \
63851 __ret = (int32_t) __builtin_neon_vqshrund_n_s64(__s0, __p1); \
63852 __ret; \
63853 })
63854 #endif
63855
63856 #ifdef __LITTLE_ENDIAN__
63857 #define vqshrunh_n_s16(__p0, __p1) __extension__ ({ \
63858 int16_t __s0 = __p0; \
63859 int8_t __ret; \
63860 __ret = (int8_t) __builtin_neon_vqshrunh_n_s16(__s0, __p1); \
63861 __ret; \
63862 })
63863 #else
63864 #define vqshrunh_n_s16(__p0, __p1) __extension__ ({ \
63865 int16_t __s0 = __p0; \
63866 int8_t __ret; \
63867 __ret = (int8_t) __builtin_neon_vqshrunh_n_s16(__s0, __p1); \
63868 __ret; \
63869 })
63870 #endif
63871
63872 #ifdef __LITTLE_ENDIAN__
63873 __ai uint8_t vqsubb_u8(uint8_t __p0, uint8_t __p1) {
63874 uint8_t __ret;
63875 __ret = (uint8_t) __builtin_neon_vqsubb_u8(__p0, __p1);
63876 return __ret;
63877 }
63878 #else
63879 __ai uint8_t vqsubb_u8(uint8_t __p0, uint8_t __p1) {
63880 uint8_t __ret;
63881 __ret = (uint8_t) __builtin_neon_vqsubb_u8(__p0, __p1);
63882 return __ret;
63883 }
63884 #endif
63885
63886 #ifdef __LITTLE_ENDIAN__
63887 __ai uint32_t vqsubs_u32(uint32_t __p0, uint32_t __p1) {
63888 uint32_t __ret;
63889 __ret = (uint32_t) __builtin_neon_vqsubs_u32(__p0, __p1);
63890 return __ret;
63891 }
63892 #else
63893 __ai uint32_t vqsubs_u32(uint32_t __p0, uint32_t __p1) {
63894 uint32_t __ret;
63895 __ret = (uint32_t) __builtin_neon_vqsubs_u32(__p0, __p1);
63896 return __ret;
63897 }
63898 #endif
63899
63900 #ifdef __LITTLE_ENDIAN__
63901 __ai uint64_t vqsubd_u64(uint64_t __p0, uint64_t __p1) {
63902 uint64_t __ret;
63903 __ret = (uint64_t) __builtin_neon_vqsubd_u64(__p0, __p1);
63904 return __ret;
63905 }
63906 #else
63907 __ai uint64_t vqsubd_u64(uint64_t __p0, uint64_t __p1) {
63908 uint64_t __ret;
63909 __ret = (uint64_t) __builtin_neon_vqsubd_u64(__p0, __p1);
63910 return __ret;
63911 }
63912 #endif
63913
63914 #ifdef __LITTLE_ENDIAN__
63915 __ai uint16_t vqsubh_u16(uint16_t __p0, uint16_t __p1) {
63916 uint16_t __ret;
63917 __ret = (uint16_t) __builtin_neon_vqsubh_u16(__p0, __p1);
63918 return __ret;
63919 }
63920 #else
63921 __ai uint16_t vqsubh_u16(uint16_t __p0, uint16_t __p1) {
63922 uint16_t __ret;
63923 __ret = (uint16_t) __builtin_neon_vqsubh_u16(__p0, __p1);
63924 return __ret;
63925 }
63926 #endif
63927
63928 #ifdef __LITTLE_ENDIAN__
63929 __ai int8_t vqsubb_s8(int8_t __p0, int8_t __p1) {
63930 int8_t __ret;
63931 __ret = (int8_t) __builtin_neon_vqsubb_s8(__p0, __p1);
63932 return __ret;
63933 }
63934 #else
63935 __ai int8_t vqsubb_s8(int8_t __p0, int8_t __p1) {
63936 int8_t __ret;
63937 __ret = (int8_t) __builtin_neon_vqsubb_s8(__p0, __p1);
63938 return __ret;
63939 }
63940 #endif
63941
63942 #ifdef __LITTLE_ENDIAN__
63943 __ai int32_t vqsubs_s32(int32_t __p0, int32_t __p1) {
63944 int32_t __ret;
63945 __ret = (int32_t) __builtin_neon_vqsubs_s32(__p0, __p1);
63946 return __ret;
63947 }
63948 #else
63949 __ai int32_t vqsubs_s32(int32_t __p0, int32_t __p1) {
63950 int32_t __ret;
63951 __ret = (int32_t) __builtin_neon_vqsubs_s32(__p0, __p1);
63952 return __ret;
63953 }
63954 __ai int32_t __noswap_vqsubs_s32(int32_t __p0, int32_t __p1) {
63955 int32_t __ret;
63956 __ret = (int32_t) __builtin_neon_vqsubs_s32(__p0, __p1);
63957 return __ret;
63958 }
63959 #endif
63960
63961 #ifdef __LITTLE_ENDIAN__
63962 __ai int64_t vqsubd_s64(int64_t __p0, int64_t __p1) {
63963 int64_t __ret;
63964 __ret = (int64_t) __builtin_neon_vqsubd_s64(__p0, __p1);
63965 return __ret;
63966 }
63967 #else
63968 __ai int64_t vqsubd_s64(int64_t __p0, int64_t __p1) {
63969 int64_t __ret;
63970 __ret = (int64_t) __builtin_neon_vqsubd_s64(__p0, __p1);
63971 return __ret;
63972 }
63973 #endif
63974
63975 #ifdef __LITTLE_ENDIAN__
63976 __ai int16_t vqsubh_s16(int16_t __p0, int16_t __p1) {
63977 int16_t __ret;
63978 __ret = (int16_t) __builtin_neon_vqsubh_s16(__p0, __p1);
63979 return __ret;
63980 }
63981 #else
63982 __ai int16_t vqsubh_s16(int16_t __p0, int16_t __p1) {
63983 int16_t __ret;
63984 __ret = (int16_t) __builtin_neon_vqsubh_s16(__p0, __p1);
63985 return __ret;
63986 }
63987 __ai int16_t __noswap_vqsubh_s16(int16_t __p0, int16_t __p1) {
63988 int16_t __ret;
63989 __ret = (int16_t) __builtin_neon_vqsubh_s16(__p0, __p1);
63990 return __ret;
63991 }
63992 #endif
63993
63994 #ifdef __LITTLE_ENDIAN__
63995 __ai poly8x8_t vqtbl1_p8(poly8x16_t __p0, uint8x8_t __p1) {
63996 poly8x8_t __ret;
63997 __ret = (poly8x8_t) __builtin_neon_vqtbl1_v((int8x16_t)__p0, (int8x8_t)__p1, 4);
63998 return __ret;
63999 }
64000 #else
64001 __ai poly8x8_t vqtbl1_p8(poly8x16_t __p0, uint8x8_t __p1) {
64002 poly8x16_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
64003 uint8x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
64004 poly8x8_t __ret;
64005 __ret = (poly8x8_t) __builtin_neon_vqtbl1_v((int8x16_t)__rev0, (int8x8_t)__rev1, 4);
64006 __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
64007 return __ret;
64008 }
64009 #endif
64010
64011 #ifdef __LITTLE_ENDIAN__
64012 __ai poly8x16_t vqtbl1q_p8(poly8x16_t __p0, uint8x16_t __p1) {
64013 poly8x16_t __ret;
64014 __ret = (poly8x16_t) __builtin_neon_vqtbl1q_v((int8x16_t)__p0, (int8x16_t)__p1, 36);
64015 return __ret;
64016 }
64017 #else
64018 __ai poly8x16_t vqtbl1q_p8(poly8x16_t __p0, uint8x16_t __p1) {
64019 poly8x16_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
64020 uint8x16_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
64021 poly8x16_t __ret;
64022 __ret = (poly8x16_t) __builtin_neon_vqtbl1q_v((int8x16_t)__rev0, (int8x16_t)__rev1, 36);
64023 __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
64024 return __ret;
64025 }
64026 #endif
64027
64028 #ifdef __LITTLE_ENDIAN__
64029 __ai uint8x16_t vqtbl1q_u8(uint8x16_t __p0, uint8x16_t __p1) {
64030 uint8x16_t __ret;
64031 __ret = (uint8x16_t) __builtin_neon_vqtbl1q_v((int8x16_t)__p0, (int8x16_t)__p1, 48);
64032 return __ret;
64033 }
64034 #else
64035 __ai uint8x16_t vqtbl1q_u8(uint8x16_t __p0, uint8x16_t __p1) {
64036 uint8x16_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
64037 uint8x16_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
64038 uint8x16_t __ret;
64039 __ret = (uint8x16_t) __builtin_neon_vqtbl1q_v((int8x16_t)__rev0, (int8x16_t)__rev1, 48);
64040 __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
64041 return __ret;
64042 }
64043 #endif
64044
64045 #ifdef __LITTLE_ENDIAN__
64046 __ai int8x16_t vqtbl1q_s8(int8x16_t __p0, int8x16_t __p1) {
64047 int8x16_t __ret;
64048 __ret = (int8x16_t) __builtin_neon_vqtbl1q_v((int8x16_t)__p0, (int8x16_t)__p1, 32);
64049 return __ret;
64050 }
64051 #else
64052 __ai int8x16_t vqtbl1q_s8(int8x16_t __p0, int8x16_t __p1) {
64053 int8x16_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
64054 int8x16_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
64055 int8x16_t __ret;
64056 __ret = (int8x16_t) __builtin_neon_vqtbl1q_v((int8x16_t)__rev0, (int8x16_t)__rev1, 32);
64057 __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
64058 return __ret;
64059 }
64060 #endif
64061
64062 #ifdef __LITTLE_ENDIAN__
64063 __ai uint8x8_t vqtbl1_u8(uint8x16_t __p0, uint8x8_t __p1) {
64064 uint8x8_t __ret;
64065 __ret = (uint8x8_t) __builtin_neon_vqtbl1_v((int8x16_t)__p0, (int8x8_t)__p1, 16);
64066 return __ret;
64067 }
64068 #else
64069 __ai uint8x8_t vqtbl1_u8(uint8x16_t __p0, uint8x8_t __p1) {
64070 uint8x16_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
64071 uint8x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
64072 uint8x8_t __ret;
64073 __ret = (uint8x8_t) __builtin_neon_vqtbl1_v((int8x16_t)__rev0, (int8x8_t)__rev1, 16);
64074 __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
64075 return __ret;
64076 }
64077 #endif
64078
64079 #ifdef __LITTLE_ENDIAN__
64080 __ai int8x8_t vqtbl1_s8(int8x16_t __p0, int8x8_t __p1) {
64081 int8x8_t __ret;
64082 __ret = (int8x8_t) __builtin_neon_vqtbl1_v((int8x16_t)__p0, (int8x8_t)__p1, 0);
64083 return __ret;
64084 }
64085 #else
64086 __ai int8x8_t vqtbl1_s8(int8x16_t __p0, int8x8_t __p1) {
64087 int8x16_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
64088 int8x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
64089 int8x8_t __ret;
64090 __ret = (int8x8_t) __builtin_neon_vqtbl1_v((int8x16_t)__rev0, (int8x8_t)__rev1, 0);
64091 __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
64092 return __ret;
64093 }
64094 #endif
64095
64096 #ifdef __LITTLE_ENDIAN__
64097 __ai poly8x8_t vqtbl2_p8(poly8x16x2_t __p0, uint8x8_t __p1) {
64098 poly8x8_t __ret;
64099 __ret = (poly8x8_t) __builtin_neon_vqtbl2_v((int8x16_t)__p0.val[0], (int8x16_t)__p0.val[1], (int8x8_t)__p1, 4);
64100 return __ret;
64101 }
64102 #else
64103 __ai poly8x8_t vqtbl2_p8(poly8x16x2_t __p0, uint8x8_t __p1) {
64104 poly8x16x2_t __rev0;
64105 __rev0.val[0] = __builtin_shufflevector(__p0.val[0], __p0.val[0], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
64106 __rev0.val[1] = __builtin_shufflevector(__p0.val[1], __p0.val[1], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
64107 uint8x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
64108 poly8x8_t __ret;
64109 __ret = (poly8x8_t) __builtin_neon_vqtbl2_v((int8x16_t)__rev0.val[0], (int8x16_t)__rev0.val[1], (int8x8_t)__rev1, 4);
64110 __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
64111 return __ret;
64112 }
64113 #endif
64114
64115 #ifdef __LITTLE_ENDIAN__
64116 __ai poly8x16_t vqtbl2q_p8(poly8x16x2_t __p0, uint8x16_t __p1) {
64117 poly8x16_t __ret;
64118 __ret = (poly8x16_t) __builtin_neon_vqtbl2q_v((int8x16_t)__p0.val[0], (int8x16_t)__p0.val[1], (int8x16_t)__p1, 36);
64119 return __ret;
64120 }
64121 #else
64122 __ai poly8x16_t vqtbl2q_p8(poly8x16x2_t __p0, uint8x16_t __p1) {
64123 poly8x16x2_t __rev0;
64124 __rev0.val[0] = __builtin_shufflevector(__p0.val[0], __p0.val[0], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
64125 __rev0.val[1] = __builtin_shufflevector(__p0.val[1], __p0.val[1], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
64126 uint8x16_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
64127 poly8x16_t __ret;
64128 __ret = (poly8x16_t) __builtin_neon_vqtbl2q_v((int8x16_t)__rev0.val[0], (int8x16_t)__rev0.val[1], (int8x16_t)__rev1, 36);
64129 __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
64130 return __ret;
64131 }
64132 #endif
64133
64134 #ifdef __LITTLE_ENDIAN__
64135 __ai uint8x16_t vqtbl2q_u8(uint8x16x2_t __p0, uint8x16_t __p1) {
64136 uint8x16_t __ret;
64137 __ret = (uint8x16_t) __builtin_neon_vqtbl2q_v((int8x16_t)__p0.val[0], (int8x16_t)__p0.val[1], (int8x16_t)__p1, 48);
64138 return __ret;
64139 }
64140 #else
64141 __ai uint8x16_t vqtbl2q_u8(uint8x16x2_t __p0, uint8x16_t __p1) {
64142 uint8x16x2_t __rev0;
64143 __rev0.val[0] = __builtin_shufflevector(__p0.val[0], __p0.val[0], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
64144 __rev0.val[1] = __builtin_shufflevector(__p0.val[1], __p0.val[1], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
64145 uint8x16_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
64146 uint8x16_t __ret;
64147 __ret = (uint8x16_t) __builtin_neon_vqtbl2q_v((int8x16_t)__rev0.val[0], (int8x16_t)__rev0.val[1], (int8x16_t)__rev1, 48);
64148 __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
64149 return __ret;
64150 }
64151 #endif
64152
64153 #ifdef __LITTLE_ENDIAN__
64154 __ai int8x16_t vqtbl2q_s8(int8x16x2_t __p0, int8x16_t __p1) {
64155 int8x16_t __ret;
64156 __ret = (int8x16_t) __builtin_neon_vqtbl2q_v((int8x16_t)__p0.val[0], (int8x16_t)__p0.val[1], (int8x16_t)__p1, 32);
64157 return __ret;
64158 }
64159 #else
64160 __ai int8x16_t vqtbl2q_s8(int8x16x2_t __p0, int8x16_t __p1) {
64161 int8x16x2_t __rev0;
64162 __rev0.val[0] = __builtin_shufflevector(__p0.val[0], __p0.val[0], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
64163 __rev0.val[1] = __builtin_shufflevector(__p0.val[1], __p0.val[1], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
64164 int8x16_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
64165 int8x16_t __ret;
64166 __ret = (int8x16_t) __builtin_neon_vqtbl2q_v((int8x16_t)__rev0.val[0], (int8x16_t)__rev0.val[1], (int8x16_t)__rev1, 32);
64167 __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
64168 return __ret;
64169 }
64170 #endif
64171
64172 #ifdef __LITTLE_ENDIAN__
64173 __ai uint8x8_t vqtbl2_u8(uint8x16x2_t __p0, uint8x8_t __p1) {
64174 uint8x8_t __ret;
64175 __ret = (uint8x8_t) __builtin_neon_vqtbl2_v((int8x16_t)__p0.val[0], (int8x16_t)__p0.val[1], (int8x8_t)__p1, 16);
64176 return __ret;
64177 }
64178 #else
64179 __ai uint8x8_t vqtbl2_u8(uint8x16x2_t __p0, uint8x8_t __p1) {
64180 uint8x16x2_t __rev0;
64181 __rev0.val[0] = __builtin_shufflevector(__p0.val[0], __p0.val[0], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
64182 __rev0.val[1] = __builtin_shufflevector(__p0.val[1], __p0.val[1], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
64183 uint8x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
64184 uint8x8_t __ret;
64185 __ret = (uint8x8_t) __builtin_neon_vqtbl2_v((int8x16_t)__rev0.val[0], (int8x16_t)__rev0.val[1], (int8x8_t)__rev1, 16);
64186 __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
64187 return __ret;
64188 }
64189 #endif
64190
64191 #ifdef __LITTLE_ENDIAN__
64192 __ai int8x8_t vqtbl2_s8(int8x16x2_t __p0, int8x8_t __p1) {
64193 int8x8_t __ret;
64194 __ret = (int8x8_t) __builtin_neon_vqtbl2_v((int8x16_t)__p0.val[0], (int8x16_t)__p0.val[1], (int8x8_t)__p1, 0);
64195 return __ret;
64196 }
64197 #else
64198 __ai int8x8_t vqtbl2_s8(int8x16x2_t __p0, int8x8_t __p1) {
64199 int8x16x2_t __rev0;
64200 __rev0.val[0] = __builtin_shufflevector(__p0.val[0], __p0.val[0], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
64201 __rev0.val[1] = __builtin_shufflevector(__p0.val[1], __p0.val[1], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
64202 int8x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
64203 int8x8_t __ret;
64204 __ret = (int8x8_t) __builtin_neon_vqtbl2_v((int8x16_t)__rev0.val[0], (int8x16_t)__rev0.val[1], (int8x8_t)__rev1, 0);
64205 __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
64206 return __ret;
64207 }
64208 #endif
64209
64210 #ifdef __LITTLE_ENDIAN__
64211 __ai poly8x8_t vqtbl3_p8(poly8x16x3_t __p0, uint8x8_t __p1) {
64212 poly8x8_t __ret;
64213 __ret = (poly8x8_t) __builtin_neon_vqtbl3_v((int8x16_t)__p0.val[0], (int8x16_t)__p0.val[1], (int8x16_t)__p0.val[2], (int8x8_t)__p1, 4);
64214 return __ret;
64215 }
64216 #else
64217 __ai poly8x8_t vqtbl3_p8(poly8x16x3_t __p0, uint8x8_t __p1) {
64218 poly8x16x3_t __rev0;
64219 __rev0.val[0] = __builtin_shufflevector(__p0.val[0], __p0.val[0], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
64220 __rev0.val[1] = __builtin_shufflevector(__p0.val[1], __p0.val[1], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
64221 __rev0.val[2] = __builtin_shufflevector(__p0.val[2], __p0.val[2], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
64222 uint8x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
64223 poly8x8_t __ret;
64224 __ret = (poly8x8_t) __builtin_neon_vqtbl3_v((int8x16_t)__rev0.val[0], (int8x16_t)__rev0.val[1], (int8x16_t)__rev0.val[2], (int8x8_t)__rev1, 4);
64225 __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
64226 return __ret;
64227 }
64228 #endif
64229
64230 #ifdef __LITTLE_ENDIAN__
64231 __ai poly8x16_t vqtbl3q_p8(poly8x16x3_t __p0, uint8x16_t __p1) {
64232 poly8x16_t __ret;
64233 __ret = (poly8x16_t) __builtin_neon_vqtbl3q_v((int8x16_t)__p0.val[0], (int8x16_t)__p0.val[1], (int8x16_t)__p0.val[2], (int8x16_t)__p1, 36);
64234 return __ret;
64235 }
64236 #else
64237 __ai poly8x16_t vqtbl3q_p8(poly8x16x3_t __p0, uint8x16_t __p1) {
64238 poly8x16x3_t __rev0;
64239 __rev0.val[0] = __builtin_shufflevector(__p0.val[0], __p0.val[0], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
64240 __rev0.val[1] = __builtin_shufflevector(__p0.val[1], __p0.val[1], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
64241 __rev0.val[2] = __builtin_shufflevector(__p0.val[2], __p0.val[2], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
64242 uint8x16_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
64243 poly8x16_t __ret;
64244 __ret = (poly8x16_t) __builtin_neon_vqtbl3q_v((int8x16_t)__rev0.val[0], (int8x16_t)__rev0.val[1], (int8x16_t)__rev0.val[2], (int8x16_t)__rev1, 36);
64245 __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
64246 return __ret;
64247 }
64248 #endif
64249
64250 #ifdef __LITTLE_ENDIAN__
64251 __ai uint8x16_t vqtbl3q_u8(uint8x16x3_t __p0, uint8x16_t __p1) {
64252 uint8x16_t __ret;
64253 __ret = (uint8x16_t) __builtin_neon_vqtbl3q_v((int8x16_t)__p0.val[0], (int8x16_t)__p0.val[1], (int8x16_t)__p0.val[2], (int8x16_t)__p1, 48);
64254 return __ret;
64255 }
64256 #else
64257 __ai uint8x16_t vqtbl3q_u8(uint8x16x3_t __p0, uint8x16_t __p1) {
64258 uint8x16x3_t __rev0;
64259 __rev0.val[0] = __builtin_shufflevector(__p0.val[0], __p0.val[0], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
64260 __rev0.val[1] = __builtin_shufflevector(__p0.val[1], __p0.val[1], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
64261 __rev0.val[2] = __builtin_shufflevector(__p0.val[2], __p0.val[2], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
64262 uint8x16_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
64263 uint8x16_t __ret;
64264 __ret = (uint8x16_t) __builtin_neon_vqtbl3q_v((int8x16_t)__rev0.val[0], (int8x16_t)__rev0.val[1], (int8x16_t)__rev0.val[2], (int8x16_t)__rev1, 48);
64265 __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
64266 return __ret;
64267 }
64268 #endif
64269
64270 #ifdef __LITTLE_ENDIAN__
64271 __ai int8x16_t vqtbl3q_s8(int8x16x3_t __p0, int8x16_t __p1) {
64272 int8x16_t __ret;
64273 __ret = (int8x16_t) __builtin_neon_vqtbl3q_v((int8x16_t)__p0.val[0], (int8x16_t)__p0.val[1], (int8x16_t)__p0.val[2], (int8x16_t)__p1, 32);
64274 return __ret;
64275 }
64276 #else
64277 __ai int8x16_t vqtbl3q_s8(int8x16x3_t __p0, int8x16_t __p1) {
64278 int8x16x3_t __rev0;
64279 __rev0.val[0] = __builtin_shufflevector(__p0.val[0], __p0.val[0], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
64280 __rev0.val[1] = __builtin_shufflevector(__p0.val[1], __p0.val[1], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
64281 __rev0.val[2] = __builtin_shufflevector(__p0.val[2], __p0.val[2], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
64282 int8x16_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
64283 int8x16_t __ret;
64284 __ret = (int8x16_t) __builtin_neon_vqtbl3q_v((int8x16_t)__rev0.val[0], (int8x16_t)__rev0.val[1], (int8x16_t)__rev0.val[2], (int8x16_t)__rev1, 32);
64285 __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
64286 return __ret;
64287 }
64288 #endif
64289
64290 #ifdef __LITTLE_ENDIAN__
64291 __ai uint8x8_t vqtbl3_u8(uint8x16x3_t __p0, uint8x8_t __p1) {
64292 uint8x8_t __ret;
64293 __ret = (uint8x8_t) __builtin_neon_vqtbl3_v((int8x16_t)__p0.val[0], (int8x16_t)__p0.val[1], (int8x16_t)__p0.val[2], (int8x8_t)__p1, 16);
64294 return __ret;
64295 }
64296 #else
64297 __ai uint8x8_t vqtbl3_u8(uint8x16x3_t __p0, uint8x8_t __p1) {
64298 uint8x16x3_t __rev0;
64299 __rev0.val[0] = __builtin_shufflevector(__p0.val[0], __p0.val[0], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
64300 __rev0.val[1] = __builtin_shufflevector(__p0.val[1], __p0.val[1], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
64301 __rev0.val[2] = __builtin_shufflevector(__p0.val[2], __p0.val[2], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
64302 uint8x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
64303 uint8x8_t __ret;
64304 __ret = (uint8x8_t) __builtin_neon_vqtbl3_v((int8x16_t)__rev0.val[0], (int8x16_t)__rev0.val[1], (int8x16_t)__rev0.val[2], (int8x8_t)__rev1, 16);
64305 __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
64306 return __ret;
64307 }
64308 #endif
64309
64310 #ifdef __LITTLE_ENDIAN__
64311 __ai int8x8_t vqtbl3_s8(int8x16x3_t __p0, int8x8_t __p1) {
64312 int8x8_t __ret;
64313 __ret = (int8x8_t) __builtin_neon_vqtbl3_v((int8x16_t)__p0.val[0], (int8x16_t)__p0.val[1], (int8x16_t)__p0.val[2], (int8x8_t)__p1, 0);
64314 return __ret;
64315 }
64316 #else
64317 __ai int8x8_t vqtbl3_s8(int8x16x3_t __p0, int8x8_t __p1) {
64318 int8x16x3_t __rev0;
64319 __rev0.val[0] = __builtin_shufflevector(__p0.val[0], __p0.val[0], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
64320 __rev0.val[1] = __builtin_shufflevector(__p0.val[1], __p0.val[1], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
64321 __rev0.val[2] = __builtin_shufflevector(__p0.val[2], __p0.val[2], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
64322 int8x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
64323 int8x8_t __ret;
64324 __ret = (int8x8_t) __builtin_neon_vqtbl3_v((int8x16_t)__rev0.val[0], (int8x16_t)__rev0.val[1], (int8x16_t)__rev0.val[2], (int8x8_t)__rev1, 0);
64325 __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
64326 return __ret;
64327 }
64328 #endif
64329
64330 #ifdef __LITTLE_ENDIAN__
64331 __ai poly8x8_t vqtbl4_p8(poly8x16x4_t __p0, uint8x8_t __p1) {
64332 poly8x8_t __ret;
64333 __ret = (poly8x8_t) __builtin_neon_vqtbl4_v((int8x16_t)__p0.val[0], (int8x16_t)__p0.val[1], (int8x16_t)__p0.val[2], (int8x16_t)__p0.val[3], (int8x8_t)__p1, 4);
64334 return __ret;
64335 }
64336 #else
64337 __ai poly8x8_t vqtbl4_p8(poly8x16x4_t __p0, uint8x8_t __p1) {
64338 poly8x16x4_t __rev0;
64339 __rev0.val[0] = __builtin_shufflevector(__p0.val[0], __p0.val[0], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
64340 __rev0.val[1] = __builtin_shufflevector(__p0.val[1], __p0.val[1], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
64341 __rev0.val[2] = __builtin_shufflevector(__p0.val[2], __p0.val[2], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
64342 __rev0.val[3] = __builtin_shufflevector(__p0.val[3], __p0.val[3], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
64343 uint8x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
64344 poly8x8_t __ret;
64345 __ret = (poly8x8_t) __builtin_neon_vqtbl4_v((int8x16_t)__rev0.val[0], (int8x16_t)__rev0.val[1], (int8x16_t)__rev0.val[2], (int8x16_t)__rev0.val[3], (int8x8_t)__rev1, 4);
64346 __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
64347 return __ret;
64348 }
64349 #endif
64350
64351 #ifdef __LITTLE_ENDIAN__
64352 __ai poly8x16_t vqtbl4q_p8(poly8x16x4_t __p0, uint8x16_t __p1) {
64353 poly8x16_t __ret;
64354 __ret = (poly8x16_t) __builtin_neon_vqtbl4q_v((int8x16_t)__p0.val[0], (int8x16_t)__p0.val[1], (int8x16_t)__p0.val[2], (int8x16_t)__p0.val[3], (int8x16_t)__p1, 36);
64355 return __ret;
64356 }
64357 #else
64358 __ai poly8x16_t vqtbl4q_p8(poly8x16x4_t __p0, uint8x16_t __p1) {
64359 poly8x16x4_t __rev0;
64360 __rev0.val[0] = __builtin_shufflevector(__p0.val[0], __p0.val[0], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
64361 __rev0.val[1] = __builtin_shufflevector(__p0.val[1], __p0.val[1], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
64362 __rev0.val[2] = __builtin_shufflevector(__p0.val[2], __p0.val[2], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
64363 __rev0.val[3] = __builtin_shufflevector(__p0.val[3], __p0.val[3], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
64364 uint8x16_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
64365 poly8x16_t __ret;
64366 __ret = (poly8x16_t) __builtin_neon_vqtbl4q_v((int8x16_t)__rev0.val[0], (int8x16_t)__rev0.val[1], (int8x16_t)__rev0.val[2], (int8x16_t)__rev0.val[3], (int8x16_t)__rev1, 36);
64367 __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
64368 return __ret;
64369 }
64370 #endif
64371
64372 #ifdef __LITTLE_ENDIAN__
64373 __ai uint8x16_t vqtbl4q_u8(uint8x16x4_t __p0, uint8x16_t __p1) {
64374 uint8x16_t __ret;
64375 __ret = (uint8x16_t) __builtin_neon_vqtbl4q_v((int8x16_t)__p0.val[0], (int8x16_t)__p0.val[1], (int8x16_t)__p0.val[2], (int8x16_t)__p0.val[3], (int8x16_t)__p1, 48);
64376 return __ret;
64377 }
64378 #else
64379 __ai uint8x16_t vqtbl4q_u8(uint8x16x4_t __p0, uint8x16_t __p1) {
64380 uint8x16x4_t __rev0;
64381 __rev0.val[0] = __builtin_shufflevector(__p0.val[0], __p0.val[0], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
64382 __rev0.val[1] = __builtin_shufflevector(__p0.val[1], __p0.val[1], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
64383 __rev0.val[2] = __builtin_shufflevector(__p0.val[2], __p0.val[2], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
64384 __rev0.val[3] = __builtin_shufflevector(__p0.val[3], __p0.val[3], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
64385 uint8x16_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
64386 uint8x16_t __ret;
64387 __ret = (uint8x16_t) __builtin_neon_vqtbl4q_v((int8x16_t)__rev0.val[0], (int8x16_t)__rev0.val[1], (int8x16_t)__rev0.val[2], (int8x16_t)__rev0.val[3], (int8x16_t)__rev1, 48);
64388 __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
64389 return __ret;
64390 }
64391 #endif
64392
64393 #ifdef __LITTLE_ENDIAN__
64394 __ai int8x16_t vqtbl4q_s8(int8x16x4_t __p0, int8x16_t __p1) {
64395 int8x16_t __ret;
64396 __ret = (int8x16_t) __builtin_neon_vqtbl4q_v((int8x16_t)__p0.val[0], (int8x16_t)__p0.val[1], (int8x16_t)__p0.val[2], (int8x16_t)__p0.val[3], (int8x16_t)__p1, 32);
64397 return __ret;
64398 }
64399 #else
64400 __ai int8x16_t vqtbl4q_s8(int8x16x4_t __p0, int8x16_t __p1) {
64401 int8x16x4_t __rev0;
64402 __rev0.val[0] = __builtin_shufflevector(__p0.val[0], __p0.val[0], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
64403 __rev0.val[1] = __builtin_shufflevector(__p0.val[1], __p0.val[1], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
64404 __rev0.val[2] = __builtin_shufflevector(__p0.val[2], __p0.val[2], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
64405 __rev0.val[3] = __builtin_shufflevector(__p0.val[3], __p0.val[3], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
64406 int8x16_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
64407 int8x16_t __ret;
64408 __ret = (int8x16_t) __builtin_neon_vqtbl4q_v((int8x16_t)__rev0.val[0], (int8x16_t)__rev0.val[1], (int8x16_t)__rev0.val[2], (int8x16_t)__rev0.val[3], (int8x16_t)__rev1, 32);
64409 __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
64410 return __ret;
64411 }
64412 #endif
64413
64414 #ifdef __LITTLE_ENDIAN__
64415 __ai uint8x8_t vqtbl4_u8(uint8x16x4_t __p0, uint8x8_t __p1) {
64416 uint8x8_t __ret;
64417 __ret = (uint8x8_t) __builtin_neon_vqtbl4_v((int8x16_t)__p0.val[0], (int8x16_t)__p0.val[1], (int8x16_t)__p0.val[2], (int8x16_t)__p0.val[3], (int8x8_t)__p1, 16);
64418 return __ret;
64419 }
64420 #else
64421 __ai uint8x8_t vqtbl4_u8(uint8x16x4_t __p0, uint8x8_t __p1) {
64422 uint8x16x4_t __rev0;
64423 __rev0.val[0] = __builtin_shufflevector(__p0.val[0], __p0.val[0], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
64424 __rev0.val[1] = __builtin_shufflevector(__p0.val[1], __p0.val[1], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
64425 __rev0.val[2] = __builtin_shufflevector(__p0.val[2], __p0.val[2], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
64426 __rev0.val[3] = __builtin_shufflevector(__p0.val[3], __p0.val[3], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
64427 uint8x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
64428 uint8x8_t __ret;
64429 __ret = (uint8x8_t) __builtin_neon_vqtbl4_v((int8x16_t)__rev0.val[0], (int8x16_t)__rev0.val[1], (int8x16_t)__rev0.val[2], (int8x16_t)__rev0.val[3], (int8x8_t)__rev1, 16);
64430 __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
64431 return __ret;
64432 }
64433 #endif
64434
64435 #ifdef __LITTLE_ENDIAN__
64436 __ai int8x8_t vqtbl4_s8(int8x16x4_t __p0, int8x8_t __p1) {
64437 int8x8_t __ret;
64438 __ret = (int8x8_t) __builtin_neon_vqtbl4_v((int8x16_t)__p0.val[0], (int8x16_t)__p0.val[1], (int8x16_t)__p0.val[2], (int8x16_t)__p0.val[3], (int8x8_t)__p1, 0);
64439 return __ret;
64440 }
64441 #else
64442 __ai int8x8_t vqtbl4_s8(int8x16x4_t __p0, int8x8_t __p1) {
64443 int8x16x4_t __rev0;
64444 __rev0.val[0] = __builtin_shufflevector(__p0.val[0], __p0.val[0], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
64445 __rev0.val[1] = __builtin_shufflevector(__p0.val[1], __p0.val[1], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
64446 __rev0.val[2] = __builtin_shufflevector(__p0.val[2], __p0.val[2], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
64447 __rev0.val[3] = __builtin_shufflevector(__p0.val[3], __p0.val[3], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
64448 int8x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
64449 int8x8_t __ret;
64450 __ret = (int8x8_t) __builtin_neon_vqtbl4_v((int8x16_t)__rev0.val[0], (int8x16_t)__rev0.val[1], (int8x16_t)__rev0.val[2], (int8x16_t)__rev0.val[3], (int8x8_t)__rev1, 0);
64451 __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
64452 return __ret;
64453 }
64454 #endif
64455
64456 #ifdef __LITTLE_ENDIAN__
64457 __ai poly8x8_t vqtbx1_p8(poly8x8_t __p0, poly8x16_t __p1, uint8x8_t __p2) {
64458 poly8x8_t __ret;
64459 __ret = (poly8x8_t) __builtin_neon_vqtbx1_v((int8x8_t)__p0, (int8x16_t)__p1, (int8x8_t)__p2, 4);
64460 return __ret;
64461 }
64462 #else
64463 __ai poly8x8_t vqtbx1_p8(poly8x8_t __p0, poly8x16_t __p1, uint8x8_t __p2) {
64464 poly8x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
64465 poly8x16_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
64466 uint8x8_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, 7, 6, 5, 4, 3, 2, 1, 0);
64467 poly8x8_t __ret;
64468 __ret = (poly8x8_t) __builtin_neon_vqtbx1_v((int8x8_t)__rev0, (int8x16_t)__rev1, (int8x8_t)__rev2, 4);
64469 __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
64470 return __ret;
64471 }
64472 #endif
64473
64474 #ifdef __LITTLE_ENDIAN__
64475 __ai poly8x16_t vqtbx1q_p8(poly8x16_t __p0, poly8x16_t __p1, uint8x16_t __p2) {
64476 poly8x16_t __ret;
64477 __ret = (poly8x16_t) __builtin_neon_vqtbx1q_v((int8x16_t)__p0, (int8x16_t)__p1, (int8x16_t)__p2, 36);
64478 return __ret;
64479 }
64480 #else
64481 __ai poly8x16_t vqtbx1q_p8(poly8x16_t __p0, poly8x16_t __p1, uint8x16_t __p2) {
64482 poly8x16_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
64483 poly8x16_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
64484 uint8x16_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
64485 poly8x16_t __ret;
64486 __ret = (poly8x16_t) __builtin_neon_vqtbx1q_v((int8x16_t)__rev0, (int8x16_t)__rev1, (int8x16_t)__rev2, 36);
64487 __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
64488 return __ret;
64489 }
64490 #endif
64491
64492 #ifdef __LITTLE_ENDIAN__
64493 __ai uint8x16_t vqtbx1q_u8(uint8x16_t __p0, uint8x16_t __p1, uint8x16_t __p2) {
64494 uint8x16_t __ret;
64495 __ret = (uint8x16_t) __builtin_neon_vqtbx1q_v((int8x16_t)__p0, (int8x16_t)__p1, (int8x16_t)__p2, 48);
64496 return __ret;
64497 }
64498 #else
64499 __ai uint8x16_t vqtbx1q_u8(uint8x16_t __p0, uint8x16_t __p1, uint8x16_t __p2) {
64500 uint8x16_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
64501 uint8x16_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
64502 uint8x16_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
64503 uint8x16_t __ret;
64504 __ret = (uint8x16_t) __builtin_neon_vqtbx1q_v((int8x16_t)__rev0, (int8x16_t)__rev1, (int8x16_t)__rev2, 48);
64505 __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
64506 return __ret;
64507 }
64508 #endif
64509
64510 #ifdef __LITTLE_ENDIAN__
64511 __ai int8x16_t vqtbx1q_s8(int8x16_t __p0, int8x16_t __p1, int8x16_t __p2) {
64512 int8x16_t __ret;
64513 __ret = (int8x16_t) __builtin_neon_vqtbx1q_v((int8x16_t)__p0, (int8x16_t)__p1, (int8x16_t)__p2, 32);
64514 return __ret;
64515 }
64516 #else
64517 __ai int8x16_t vqtbx1q_s8(int8x16_t __p0, int8x16_t __p1, int8x16_t __p2) {
64518 int8x16_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
64519 int8x16_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
64520 int8x16_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
64521 int8x16_t __ret;
64522 __ret = (int8x16_t) __builtin_neon_vqtbx1q_v((int8x16_t)__rev0, (int8x16_t)__rev1, (int8x16_t)__rev2, 32);
64523 __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
64524 return __ret;
64525 }
64526 #endif
64527
64528 #ifdef __LITTLE_ENDIAN__
64529 __ai uint8x8_t vqtbx1_u8(uint8x8_t __p0, uint8x16_t __p1, uint8x8_t __p2) {
64530 uint8x8_t __ret;
64531 __ret = (uint8x8_t) __builtin_neon_vqtbx1_v((int8x8_t)__p0, (int8x16_t)__p1, (int8x8_t)__p2, 16);
64532 return __ret;
64533 }
64534 #else
64535 __ai uint8x8_t vqtbx1_u8(uint8x8_t __p0, uint8x16_t __p1, uint8x8_t __p2) {
64536 uint8x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
64537 uint8x16_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
64538 uint8x8_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, 7, 6, 5, 4, 3, 2, 1, 0);
64539 uint8x8_t __ret;
64540 __ret = (uint8x8_t) __builtin_neon_vqtbx1_v((int8x8_t)__rev0, (int8x16_t)__rev1, (int8x8_t)__rev2, 16);
64541 __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
64542 return __ret;
64543 }
64544 #endif
64545
64546 #ifdef __LITTLE_ENDIAN__
64547 __ai int8x8_t vqtbx1_s8(int8x8_t __p0, int8x16_t __p1, int8x8_t __p2) {
64548 int8x8_t __ret;
64549 __ret = (int8x8_t) __builtin_neon_vqtbx1_v((int8x8_t)__p0, (int8x16_t)__p1, (int8x8_t)__p2, 0);
64550 return __ret;
64551 }
64552 #else
64553 __ai int8x8_t vqtbx1_s8(int8x8_t __p0, int8x16_t __p1, int8x8_t __p2) {
64554 int8x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
64555 int8x16_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
64556 int8x8_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, 7, 6, 5, 4, 3, 2, 1, 0);
64557 int8x8_t __ret;
64558 __ret = (int8x8_t) __builtin_neon_vqtbx1_v((int8x8_t)__rev0, (int8x16_t)__rev1, (int8x8_t)__rev2, 0);
64559 __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
64560 return __ret;
64561 }
64562 #endif
64563
64564 #ifdef __LITTLE_ENDIAN__
64565 __ai poly8x8_t vqtbx2_p8(poly8x8_t __p0, poly8x16x2_t __p1, uint8x8_t __p2) {
64566 poly8x8_t __ret;
64567 __ret = (poly8x8_t) __builtin_neon_vqtbx2_v((int8x8_t)__p0, (int8x16_t)__p1.val[0], (int8x16_t)__p1.val[1], (int8x8_t)__p2, 4);
64568 return __ret;
64569 }
64570 #else
64571 __ai poly8x8_t vqtbx2_p8(poly8x8_t __p0, poly8x16x2_t __p1, uint8x8_t __p2) {
64572 poly8x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
64573 poly8x16x2_t __rev1;
64574 __rev1.val[0] = __builtin_shufflevector(__p1.val[0], __p1.val[0], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
64575 __rev1.val[1] = __builtin_shufflevector(__p1.val[1], __p1.val[1], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
64576 uint8x8_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, 7, 6, 5, 4, 3, 2, 1, 0);
64577 poly8x8_t __ret;
64578 __ret = (poly8x8_t) __builtin_neon_vqtbx2_v((int8x8_t)__rev0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], (int8x8_t)__rev2, 4);
64579 __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
64580 return __ret;
64581 }
64582 #endif
64583
64584 #ifdef __LITTLE_ENDIAN__
64585 __ai poly8x16_t vqtbx2q_p8(poly8x16_t __p0, poly8x16x2_t __p1, uint8x16_t __p2) {
64586 poly8x16_t __ret;
64587 __ret = (poly8x16_t) __builtin_neon_vqtbx2q_v((int8x16_t)__p0, (int8x16_t)__p1.val[0], (int8x16_t)__p1.val[1], (int8x16_t)__p2, 36);
64588 return __ret;
64589 }
64590 #else
64591 __ai poly8x16_t vqtbx2q_p8(poly8x16_t __p0, poly8x16x2_t __p1, uint8x16_t __p2) {
64592 poly8x16_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
64593 poly8x16x2_t __rev1;
64594 __rev1.val[0] = __builtin_shufflevector(__p1.val[0], __p1.val[0], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
64595 __rev1.val[1] = __builtin_shufflevector(__p1.val[1], __p1.val[1], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
64596 uint8x16_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
64597 poly8x16_t __ret;
64598 __ret = (poly8x16_t) __builtin_neon_vqtbx2q_v((int8x16_t)__rev0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], (int8x16_t)__rev2, 36);
64599 __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
64600 return __ret;
64601 }
64602 #endif
64603
64604 #ifdef __LITTLE_ENDIAN__
64605 __ai uint8x16_t vqtbx2q_u8(uint8x16_t __p0, uint8x16x2_t __p1, uint8x16_t __p2) {
64606 uint8x16_t __ret;
64607 __ret = (uint8x16_t) __builtin_neon_vqtbx2q_v((int8x16_t)__p0, (int8x16_t)__p1.val[0], (int8x16_t)__p1.val[1], (int8x16_t)__p2, 48);
64608 return __ret;
64609 }
64610 #else
64611 __ai uint8x16_t vqtbx2q_u8(uint8x16_t __p0, uint8x16x2_t __p1, uint8x16_t __p2) {
64612 uint8x16_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
64613 uint8x16x2_t __rev1;
64614 __rev1.val[0] = __builtin_shufflevector(__p1.val[0], __p1.val[0], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
64615 __rev1.val[1] = __builtin_shufflevector(__p1.val[1], __p1.val[1], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
64616 uint8x16_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
64617 uint8x16_t __ret;
64618 __ret = (uint8x16_t) __builtin_neon_vqtbx2q_v((int8x16_t)__rev0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], (int8x16_t)__rev2, 48);
64619 __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
64620 return __ret;
64621 }
64622 #endif
64623
64624 #ifdef __LITTLE_ENDIAN__
64625 __ai int8x16_t vqtbx2q_s8(int8x16_t __p0, int8x16x2_t __p1, int8x16_t __p2) {
64626 int8x16_t __ret;
64627 __ret = (int8x16_t) __builtin_neon_vqtbx2q_v((int8x16_t)__p0, (int8x16_t)__p1.val[0], (int8x16_t)__p1.val[1], (int8x16_t)__p2, 32);
64628 return __ret;
64629 }
64630 #else
64631 __ai int8x16_t vqtbx2q_s8(int8x16_t __p0, int8x16x2_t __p1, int8x16_t __p2) {
64632 int8x16_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
64633 int8x16x2_t __rev1;
64634 __rev1.val[0] = __builtin_shufflevector(__p1.val[0], __p1.val[0], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
64635 __rev1.val[1] = __builtin_shufflevector(__p1.val[1], __p1.val[1], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
64636 int8x16_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
64637 int8x16_t __ret;
64638 __ret = (int8x16_t) __builtin_neon_vqtbx2q_v((int8x16_t)__rev0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], (int8x16_t)__rev2, 32);
64639 __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
64640 return __ret;
64641 }
64642 #endif
64643
64644 #ifdef __LITTLE_ENDIAN__
64645 __ai uint8x8_t vqtbx2_u8(uint8x8_t __p0, uint8x16x2_t __p1, uint8x8_t __p2) {
64646 uint8x8_t __ret;
64647 __ret = (uint8x8_t) __builtin_neon_vqtbx2_v((int8x8_t)__p0, (int8x16_t)__p1.val[0], (int8x16_t)__p1.val[1], (int8x8_t)__p2, 16);
64648 return __ret;
64649 }
64650 #else
64651 __ai uint8x8_t vqtbx2_u8(uint8x8_t __p0, uint8x16x2_t __p1, uint8x8_t __p2) {
64652 uint8x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
64653 uint8x16x2_t __rev1;
64654 __rev1.val[0] = __builtin_shufflevector(__p1.val[0], __p1.val[0], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
64655 __rev1.val[1] = __builtin_shufflevector(__p1.val[1], __p1.val[1], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
64656 uint8x8_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, 7, 6, 5, 4, 3, 2, 1, 0);
64657 uint8x8_t __ret;
64658 __ret = (uint8x8_t) __builtin_neon_vqtbx2_v((int8x8_t)__rev0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], (int8x8_t)__rev2, 16);
64659 __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
64660 return __ret;
64661 }
64662 #endif
64663
64664 #ifdef __LITTLE_ENDIAN__
64665 __ai int8x8_t vqtbx2_s8(int8x8_t __p0, int8x16x2_t __p1, int8x8_t __p2) {
64666 int8x8_t __ret;
64667 __ret = (int8x8_t) __builtin_neon_vqtbx2_v((int8x8_t)__p0, (int8x16_t)__p1.val[0], (int8x16_t)__p1.val[1], (int8x8_t)__p2, 0);
64668 return __ret;
64669 }
64670 #else
64671 __ai int8x8_t vqtbx2_s8(int8x8_t __p0, int8x16x2_t __p1, int8x8_t __p2) {
64672 int8x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
64673 int8x16x2_t __rev1;
64674 __rev1.val[0] = __builtin_shufflevector(__p1.val[0], __p1.val[0], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
64675 __rev1.val[1] = __builtin_shufflevector(__p1.val[1], __p1.val[1], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
64676 int8x8_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, 7, 6, 5, 4, 3, 2, 1, 0);
64677 int8x8_t __ret;
64678 __ret = (int8x8_t) __builtin_neon_vqtbx2_v((int8x8_t)__rev0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], (int8x8_t)__rev2, 0);
64679 __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
64680 return __ret;
64681 }
64682 #endif
64683
64684 #ifdef __LITTLE_ENDIAN__
64685 __ai poly8x8_t vqtbx3_p8(poly8x8_t __p0, poly8x16x3_t __p1, uint8x8_t __p2) {
64686 poly8x8_t __ret;
64687 __ret = (poly8x8_t) __builtin_neon_vqtbx3_v((int8x8_t)__p0, (int8x16_t)__p1.val[0], (int8x16_t)__p1.val[1], (int8x16_t)__p1.val[2], (int8x8_t)__p2, 4);
64688 return __ret;
64689 }
64690 #else
64691 __ai poly8x8_t vqtbx3_p8(poly8x8_t __p0, poly8x16x3_t __p1, uint8x8_t __p2) {
64692 poly8x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
64693 poly8x16x3_t __rev1;
64694 __rev1.val[0] = __builtin_shufflevector(__p1.val[0], __p1.val[0], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
64695 __rev1.val[1] = __builtin_shufflevector(__p1.val[1], __p1.val[1], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
64696 __rev1.val[2] = __builtin_shufflevector(__p1.val[2], __p1.val[2], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
64697 uint8x8_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, 7, 6, 5, 4, 3, 2, 1, 0);
64698 poly8x8_t __ret;
64699 __ret = (poly8x8_t) __builtin_neon_vqtbx3_v((int8x8_t)__rev0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], (int8x16_t)__rev1.val[2], (int8x8_t)__rev2, 4);
64700 __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
64701 return __ret;
64702 }
64703 #endif
64704
64705 #ifdef __LITTLE_ENDIAN__
64706 __ai poly8x16_t vqtbx3q_p8(poly8x16_t __p0, poly8x16x3_t __p1, uint8x16_t __p2) {
64707 poly8x16_t __ret;
64708 __ret = (poly8x16_t) __builtin_neon_vqtbx3q_v((int8x16_t)__p0, (int8x16_t)__p1.val[0], (int8x16_t)__p1.val[1], (int8x16_t)__p1.val[2], (int8x16_t)__p2, 36);
64709 return __ret;
64710 }
64711 #else
64712 __ai poly8x16_t vqtbx3q_p8(poly8x16_t __p0, poly8x16x3_t __p1, uint8x16_t __p2) {
64713 poly8x16_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
64714 poly8x16x3_t __rev1;
64715 __rev1.val[0] = __builtin_shufflevector(__p1.val[0], __p1.val[0], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
64716 __rev1.val[1] = __builtin_shufflevector(__p1.val[1], __p1.val[1], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
64717 __rev1.val[2] = __builtin_shufflevector(__p1.val[2], __p1.val[2], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
64718 uint8x16_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
64719 poly8x16_t __ret;
64720 __ret = (poly8x16_t) __builtin_neon_vqtbx3q_v((int8x16_t)__rev0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], (int8x16_t)__rev1.val[2], (int8x16_t)__rev2, 36);
64721 __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
64722 return __ret;
64723 }
64724 #endif
64725
64726 #ifdef __LITTLE_ENDIAN__
64727 __ai uint8x16_t vqtbx3q_u8(uint8x16_t __p0, uint8x16x3_t __p1, uint8x16_t __p2) {
64728 uint8x16_t __ret;
64729 __ret = (uint8x16_t) __builtin_neon_vqtbx3q_v((int8x16_t)__p0, (int8x16_t)__p1.val[0], (int8x16_t)__p1.val[1], (int8x16_t)__p1.val[2], (int8x16_t)__p2, 48);
64730 return __ret;
64731 }
64732 #else
64733 __ai uint8x16_t vqtbx3q_u8(uint8x16_t __p0, uint8x16x3_t __p1, uint8x16_t __p2) {
64734 uint8x16_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
64735 uint8x16x3_t __rev1;
64736 __rev1.val[0] = __builtin_shufflevector(__p1.val[0], __p1.val[0], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
64737 __rev1.val[1] = __builtin_shufflevector(__p1.val[1], __p1.val[1], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
64738 __rev1.val[2] = __builtin_shufflevector(__p1.val[2], __p1.val[2], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
64739 uint8x16_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
64740 uint8x16_t __ret;
64741 __ret = (uint8x16_t) __builtin_neon_vqtbx3q_v((int8x16_t)__rev0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], (int8x16_t)__rev1.val[2], (int8x16_t)__rev2, 48);
64742 __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
64743 return __ret;
64744 }
64745 #endif
64746
64747 #ifdef __LITTLE_ENDIAN__
64748 __ai int8x16_t vqtbx3q_s8(int8x16_t __p0, int8x16x3_t __p1, int8x16_t __p2) {
64749 int8x16_t __ret;
64750 __ret = (int8x16_t) __builtin_neon_vqtbx3q_v((int8x16_t)__p0, (int8x16_t)__p1.val[0], (int8x16_t)__p1.val[1], (int8x16_t)__p1.val[2], (int8x16_t)__p2, 32);
64751 return __ret;
64752 }
64753 #else
64754 __ai int8x16_t vqtbx3q_s8(int8x16_t __p0, int8x16x3_t __p1, int8x16_t __p2) {
64755 int8x16_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
64756 int8x16x3_t __rev1;
64757 __rev1.val[0] = __builtin_shufflevector(__p1.val[0], __p1.val[0], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
64758 __rev1.val[1] = __builtin_shufflevector(__p1.val[1], __p1.val[1], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
64759 __rev1.val[2] = __builtin_shufflevector(__p1.val[2], __p1.val[2], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
64760 int8x16_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
64761 int8x16_t __ret;
64762 __ret = (int8x16_t) __builtin_neon_vqtbx3q_v((int8x16_t)__rev0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], (int8x16_t)__rev1.val[2], (int8x16_t)__rev2, 32);
64763 __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
64764 return __ret;
64765 }
64766 #endif
64767
64768 #ifdef __LITTLE_ENDIAN__
64769 __ai uint8x8_t vqtbx3_u8(uint8x8_t __p0, uint8x16x3_t __p1, uint8x8_t __p2) {
64770 uint8x8_t __ret;
64771 __ret = (uint8x8_t) __builtin_neon_vqtbx3_v((int8x8_t)__p0, (int8x16_t)__p1.val[0], (int8x16_t)__p1.val[1], (int8x16_t)__p1.val[2], (int8x8_t)__p2, 16);
64772 return __ret;
64773 }
64774 #else
64775 __ai uint8x8_t vqtbx3_u8(uint8x8_t __p0, uint8x16x3_t __p1, uint8x8_t __p2) {
64776 uint8x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
64777 uint8x16x3_t __rev1;
64778 __rev1.val[0] = __builtin_shufflevector(__p1.val[0], __p1.val[0], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
64779 __rev1.val[1] = __builtin_shufflevector(__p1.val[1], __p1.val[1], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
64780 __rev1.val[2] = __builtin_shufflevector(__p1.val[2], __p1.val[2], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
64781 uint8x8_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, 7, 6, 5, 4, 3, 2, 1, 0);
64782 uint8x8_t __ret;
64783 __ret = (uint8x8_t) __builtin_neon_vqtbx3_v((int8x8_t)__rev0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], (int8x16_t)__rev1.val[2], (int8x8_t)__rev2, 16);
64784 __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
64785 return __ret;
64786 }
64787 #endif
64788
64789 #ifdef __LITTLE_ENDIAN__
64790 __ai int8x8_t vqtbx3_s8(int8x8_t __p0, int8x16x3_t __p1, int8x8_t __p2) {
64791 int8x8_t __ret;
64792 __ret = (int8x8_t) __builtin_neon_vqtbx3_v((int8x8_t)__p0, (int8x16_t)__p1.val[0], (int8x16_t)__p1.val[1], (int8x16_t)__p1.val[2], (int8x8_t)__p2, 0);
64793 return __ret;
64794 }
64795 #else
64796 __ai int8x8_t vqtbx3_s8(int8x8_t __p0, int8x16x3_t __p1, int8x8_t __p2) {
64797 int8x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
64798 int8x16x3_t __rev1;
64799 __rev1.val[0] = __builtin_shufflevector(__p1.val[0], __p1.val[0], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
64800 __rev1.val[1] = __builtin_shufflevector(__p1.val[1], __p1.val[1], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
64801 __rev1.val[2] = __builtin_shufflevector(__p1.val[2], __p1.val[2], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
64802 int8x8_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, 7, 6, 5, 4, 3, 2, 1, 0);
64803 int8x8_t __ret;
64804 __ret = (int8x8_t) __builtin_neon_vqtbx3_v((int8x8_t)__rev0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], (int8x16_t)__rev1.val[2], (int8x8_t)__rev2, 0);
64805 __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
64806 return __ret;
64807 }
64808 #endif
64809
64810 #ifdef __LITTLE_ENDIAN__
64811 __ai poly8x8_t vqtbx4_p8(poly8x8_t __p0, poly8x16x4_t __p1, uint8x8_t __p2) {
64812 poly8x8_t __ret;
64813 __ret = (poly8x8_t) __builtin_neon_vqtbx4_v((int8x8_t)__p0, (int8x16_t)__p1.val[0], (int8x16_t)__p1.val[1], (int8x16_t)__p1.val[2], (int8x16_t)__p1.val[3], (int8x8_t)__p2, 4);
64814 return __ret;
64815 }
64816 #else
64817 __ai poly8x8_t vqtbx4_p8(poly8x8_t __p0, poly8x16x4_t __p1, uint8x8_t __p2) {
64818 poly8x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
64819 poly8x16x4_t __rev1;
64820 __rev1.val[0] = __builtin_shufflevector(__p1.val[0], __p1.val[0], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
64821 __rev1.val[1] = __builtin_shufflevector(__p1.val[1], __p1.val[1], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
64822 __rev1.val[2] = __builtin_shufflevector(__p1.val[2], __p1.val[2], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
64823 __rev1.val[3] = __builtin_shufflevector(__p1.val[3], __p1.val[3], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
64824 uint8x8_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, 7, 6, 5, 4, 3, 2, 1, 0);
64825 poly8x8_t __ret;
64826 __ret = (poly8x8_t) __builtin_neon_vqtbx4_v((int8x8_t)__rev0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], (int8x16_t)__rev1.val[2], (int8x16_t)__rev1.val[3], (int8x8_t)__rev2, 4);
64827 __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
64828 return __ret;
64829 }
64830 #endif
64831
64832 #ifdef __LITTLE_ENDIAN__
64833 __ai poly8x16_t vqtbx4q_p8(poly8x16_t __p0, poly8x16x4_t __p1, uint8x16_t __p2) {
64834 poly8x16_t __ret;
64835 __ret = (poly8x16_t) __builtin_neon_vqtbx4q_v((int8x16_t)__p0, (int8x16_t)__p1.val[0], (int8x16_t)__p1.val[1], (int8x16_t)__p1.val[2], (int8x16_t)__p1.val[3], (int8x16_t)__p2, 36);
64836 return __ret;
64837 }
64838 #else
64839 __ai poly8x16_t vqtbx4q_p8(poly8x16_t __p0, poly8x16x4_t __p1, uint8x16_t __p2) {
64840 poly8x16_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
64841 poly8x16x4_t __rev1;
64842 __rev1.val[0] = __builtin_shufflevector(__p1.val[0], __p1.val[0], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
64843 __rev1.val[1] = __builtin_shufflevector(__p1.val[1], __p1.val[1], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
64844 __rev1.val[2] = __builtin_shufflevector(__p1.val[2], __p1.val[2], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
64845 __rev1.val[3] = __builtin_shufflevector(__p1.val[3], __p1.val[3], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
64846 uint8x16_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
64847 poly8x16_t __ret;
64848 __ret = (poly8x16_t) __builtin_neon_vqtbx4q_v((int8x16_t)__rev0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], (int8x16_t)__rev1.val[2], (int8x16_t)__rev1.val[3], (int8x16_t)__rev2, 36);
64849 __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
64850 return __ret;
64851 }
64852 #endif
64853
64854 #ifdef __LITTLE_ENDIAN__
64855 __ai uint8x16_t vqtbx4q_u8(uint8x16_t __p0, uint8x16x4_t __p1, uint8x16_t __p2) {
64856 uint8x16_t __ret;
64857 __ret = (uint8x16_t) __builtin_neon_vqtbx4q_v((int8x16_t)__p0, (int8x16_t)__p1.val[0], (int8x16_t)__p1.val[1], (int8x16_t)__p1.val[2], (int8x16_t)__p1.val[3], (int8x16_t)__p2, 48);
64858 return __ret;
64859 }
64860 #else
64861 __ai uint8x16_t vqtbx4q_u8(uint8x16_t __p0, uint8x16x4_t __p1, uint8x16_t __p2) {
64862 uint8x16_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
64863 uint8x16x4_t __rev1;
64864 __rev1.val[0] = __builtin_shufflevector(__p1.val[0], __p1.val[0], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
64865 __rev1.val[1] = __builtin_shufflevector(__p1.val[1], __p1.val[1], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
64866 __rev1.val[2] = __builtin_shufflevector(__p1.val[2], __p1.val[2], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
64867 __rev1.val[3] = __builtin_shufflevector(__p1.val[3], __p1.val[3], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
64868 uint8x16_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
64869 uint8x16_t __ret;
64870 __ret = (uint8x16_t) __builtin_neon_vqtbx4q_v((int8x16_t)__rev0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], (int8x16_t)__rev1.val[2], (int8x16_t)__rev1.val[3], (int8x16_t)__rev2, 48);
64871 __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
64872 return __ret;
64873 }
64874 #endif
64875
64876 #ifdef __LITTLE_ENDIAN__
64877 __ai int8x16_t vqtbx4q_s8(int8x16_t __p0, int8x16x4_t __p1, int8x16_t __p2) {
64878 int8x16_t __ret;
64879 __ret = (int8x16_t) __builtin_neon_vqtbx4q_v((int8x16_t)__p0, (int8x16_t)__p1.val[0], (int8x16_t)__p1.val[1], (int8x16_t)__p1.val[2], (int8x16_t)__p1.val[3], (int8x16_t)__p2, 32);
64880 return __ret;
64881 }
64882 #else
64883 __ai int8x16_t vqtbx4q_s8(int8x16_t __p0, int8x16x4_t __p1, int8x16_t __p2) {
64884 int8x16_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
64885 int8x16x4_t __rev1;
64886 __rev1.val[0] = __builtin_shufflevector(__p1.val[0], __p1.val[0], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
64887 __rev1.val[1] = __builtin_shufflevector(__p1.val[1], __p1.val[1], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
64888 __rev1.val[2] = __builtin_shufflevector(__p1.val[2], __p1.val[2], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
64889 __rev1.val[3] = __builtin_shufflevector(__p1.val[3], __p1.val[3], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
64890 int8x16_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
64891 int8x16_t __ret;
64892 __ret = (int8x16_t) __builtin_neon_vqtbx4q_v((int8x16_t)__rev0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], (int8x16_t)__rev1.val[2], (int8x16_t)__rev1.val[3], (int8x16_t)__rev2, 32);
64893 __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
64894 return __ret;
64895 }
64896 #endif
64897
64898 #ifdef __LITTLE_ENDIAN__
64899 __ai uint8x8_t vqtbx4_u8(uint8x8_t __p0, uint8x16x4_t __p1, uint8x8_t __p2) {
64900 uint8x8_t __ret;
64901 __ret = (uint8x8_t) __builtin_neon_vqtbx4_v((int8x8_t)__p0, (int8x16_t)__p1.val[0], (int8x16_t)__p1.val[1], (int8x16_t)__p1.val[2], (int8x16_t)__p1.val[3], (int8x8_t)__p2, 16);
64902 return __ret;
64903 }
64904 #else
64905 __ai uint8x8_t vqtbx4_u8(uint8x8_t __p0, uint8x16x4_t __p1, uint8x8_t __p2) {
64906 uint8x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
64907 uint8x16x4_t __rev1;
64908 __rev1.val[0] = __builtin_shufflevector(__p1.val[0], __p1.val[0], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
64909 __rev1.val[1] = __builtin_shufflevector(__p1.val[1], __p1.val[1], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
64910 __rev1.val[2] = __builtin_shufflevector(__p1.val[2], __p1.val[2], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
64911 __rev1.val[3] = __builtin_shufflevector(__p1.val[3], __p1.val[3], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
64912 uint8x8_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, 7, 6, 5, 4, 3, 2, 1, 0);
64913 uint8x8_t __ret;
64914 __ret = (uint8x8_t) __builtin_neon_vqtbx4_v((int8x8_t)__rev0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], (int8x16_t)__rev1.val[2], (int8x16_t)__rev1.val[3], (int8x8_t)__rev2, 16);
64915 __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
64916 return __ret;
64917 }
64918 #endif
64919
64920 #ifdef __LITTLE_ENDIAN__
64921 __ai int8x8_t vqtbx4_s8(int8x8_t __p0, int8x16x4_t __p1, int8x8_t __p2) {
64922 int8x8_t __ret;
64923 __ret = (int8x8_t) __builtin_neon_vqtbx4_v((int8x8_t)__p0, (int8x16_t)__p1.val[0], (int8x16_t)__p1.val[1], (int8x16_t)__p1.val[2], (int8x16_t)__p1.val[3], (int8x8_t)__p2, 0);
64924 return __ret;
64925 }
64926 #else
64927 __ai int8x8_t vqtbx4_s8(int8x8_t __p0, int8x16x4_t __p1, int8x8_t __p2) {
64928 int8x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
64929 int8x16x4_t __rev1;
64930 __rev1.val[0] = __builtin_shufflevector(__p1.val[0], __p1.val[0], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
64931 __rev1.val[1] = __builtin_shufflevector(__p1.val[1], __p1.val[1], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
64932 __rev1.val[2] = __builtin_shufflevector(__p1.val[2], __p1.val[2], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
64933 __rev1.val[3] = __builtin_shufflevector(__p1.val[3], __p1.val[3], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
64934 int8x8_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, 7, 6, 5, 4, 3, 2, 1, 0);
64935 int8x8_t __ret;
64936 __ret = (int8x8_t) __builtin_neon_vqtbx4_v((int8x8_t)__rev0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], (int8x16_t)__rev1.val[2], (int8x16_t)__rev1.val[3], (int8x8_t)__rev2, 0);
64937 __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
64938 return __ret;
64939 }
64940 #endif
64941
64942 #ifdef __LITTLE_ENDIAN__
64943 __ai uint16x8_t vraddhn_high_u32(uint16x4_t __p0, uint32x4_t __p1, uint32x4_t __p2) {
64944 uint16x8_t __ret;
64945 __ret = vcombine_u16(__p0, vraddhn_u32(__p1, __p2));
64946 return __ret;
64947 }
64948 #else
64949 __ai uint16x8_t vraddhn_high_u32(uint16x4_t __p0, uint32x4_t __p1, uint32x4_t __p2) {
64950 uint16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
64951 uint32x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
64952 uint32x4_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, 3, 2, 1, 0);
64953 uint16x8_t __ret;
64954 __ret = __noswap_vcombine_u16(__rev0, __noswap_vraddhn_u32(__rev1, __rev2));
64955 __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
64956 return __ret;
64957 }
64958 #endif
64959
64960 #ifdef __LITTLE_ENDIAN__
64961 __ai uint32x4_t vraddhn_high_u64(uint32x2_t __p0, uint64x2_t __p1, uint64x2_t __p2) {
64962 uint32x4_t __ret;
64963 __ret = vcombine_u32(__p0, vraddhn_u64(__p1, __p2));
64964 return __ret;
64965 }
64966 #else
64967 __ai uint32x4_t vraddhn_high_u64(uint32x2_t __p0, uint64x2_t __p1, uint64x2_t __p2) {
64968 uint32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
64969 uint64x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
64970 uint64x2_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, 1, 0);
64971 uint32x4_t __ret;
64972 __ret = __noswap_vcombine_u32(__rev0, __noswap_vraddhn_u64(__rev1, __rev2));
64973 __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
64974 return __ret;
64975 }
64976 #endif
64977
64978 #ifdef __LITTLE_ENDIAN__
64979 __ai uint8x16_t vraddhn_high_u16(uint8x8_t __p0, uint16x8_t __p1, uint16x8_t __p2) {
64980 uint8x16_t __ret;
64981 __ret = vcombine_u8(__p0, vraddhn_u16(__p1, __p2));
64982 return __ret;
64983 }
64984 #else
64985 __ai uint8x16_t vraddhn_high_u16(uint8x8_t __p0, uint16x8_t __p1, uint16x8_t __p2) {
64986 uint8x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
64987 uint16x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
64988 uint16x8_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, 7, 6, 5, 4, 3, 2, 1, 0);
64989 uint8x16_t __ret;
64990 __ret = __noswap_vcombine_u8(__rev0, __noswap_vraddhn_u16(__rev1, __rev2));
64991 __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
64992 return __ret;
64993 }
64994 #endif
64995
64996 #ifdef __LITTLE_ENDIAN__
64997 __ai int16x8_t vraddhn_high_s32(int16x4_t __p0, int32x4_t __p1, int32x4_t __p2) {
64998 int16x8_t __ret;
64999 __ret = vcombine_s16(__p0, vraddhn_s32(__p1, __p2));
65000 return __ret;
65001 }
65002 #else
65003 __ai int16x8_t vraddhn_high_s32(int16x4_t __p0, int32x4_t __p1, int32x4_t __p2) {
65004 int16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
65005 int32x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
65006 int32x4_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, 3, 2, 1, 0);
65007 int16x8_t __ret;
65008 __ret = __noswap_vcombine_s16(__rev0, __noswap_vraddhn_s32(__rev1, __rev2));
65009 __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
65010 return __ret;
65011 }
65012 #endif
65013
65014 #ifdef __LITTLE_ENDIAN__
65015 __ai int32x4_t vraddhn_high_s64(int32x2_t __p0, int64x2_t __p1, int64x2_t __p2) {
65016 int32x4_t __ret;
65017 __ret = vcombine_s32(__p0, vraddhn_s64(__p1, __p2));
65018 return __ret;
65019 }
65020 #else
65021 __ai int32x4_t vraddhn_high_s64(int32x2_t __p0, int64x2_t __p1, int64x2_t __p2) {
65022 int32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
65023 int64x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
65024 int64x2_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, 1, 0);
65025 int32x4_t __ret;
65026 __ret = __noswap_vcombine_s32(__rev0, __noswap_vraddhn_s64(__rev1, __rev2));
65027 __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
65028 return __ret;
65029 }
65030 #endif
65031
65032 #ifdef __LITTLE_ENDIAN__
65033 __ai int8x16_t vraddhn_high_s16(int8x8_t __p0, int16x8_t __p1, int16x8_t __p2) {
65034 int8x16_t __ret;
65035 __ret = vcombine_s8(__p0, vraddhn_s16(__p1, __p2));
65036 return __ret;
65037 }
65038 #else
65039 __ai int8x16_t vraddhn_high_s16(int8x8_t __p0, int16x8_t __p1, int16x8_t __p2) {
65040 int8x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
65041 int16x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
65042 int16x8_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, 7, 6, 5, 4, 3, 2, 1, 0);
65043 int8x16_t __ret;
65044 __ret = __noswap_vcombine_s8(__rev0, __noswap_vraddhn_s16(__rev1, __rev2));
65045 __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
65046 return __ret;
65047 }
65048 #endif
65049
65050 #ifdef __LITTLE_ENDIAN__
65051 __ai poly8x8_t vrbit_p8(poly8x8_t __p0) {
65052 poly8x8_t __ret;
65053 __ret = (poly8x8_t) __builtin_neon_vrbit_v((int8x8_t)__p0, 4);
65054 return __ret;
65055 }
65056 #else
65057 __ai poly8x8_t vrbit_p8(poly8x8_t __p0) {
65058 poly8x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
65059 poly8x8_t __ret;
65060 __ret = (poly8x8_t) __builtin_neon_vrbit_v((int8x8_t)__rev0, 4);
65061 __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
65062 return __ret;
65063 }
65064 #endif
65065
65066 #ifdef __LITTLE_ENDIAN__
65067 __ai poly8x16_t vrbitq_p8(poly8x16_t __p0) {
65068 poly8x16_t __ret;
65069 __ret = (poly8x16_t) __builtin_neon_vrbitq_v((int8x16_t)__p0, 36);
65070 return __ret;
65071 }
65072 #else
65073 __ai poly8x16_t vrbitq_p8(poly8x16_t __p0) {
65074 poly8x16_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
65075 poly8x16_t __ret;
65076 __ret = (poly8x16_t) __builtin_neon_vrbitq_v((int8x16_t)__rev0, 36);
65077 __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
65078 return __ret;
65079 }
65080 #endif
65081
65082 #ifdef __LITTLE_ENDIAN__
65083 __ai uint8x16_t vrbitq_u8(uint8x16_t __p0) {
65084 uint8x16_t __ret;
65085 __ret = (uint8x16_t) __builtin_neon_vrbitq_v((int8x16_t)__p0, 48);
65086 return __ret;
65087 }
65088 #else
65089 __ai uint8x16_t vrbitq_u8(uint8x16_t __p0) {
65090 uint8x16_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
65091 uint8x16_t __ret;
65092 __ret = (uint8x16_t) __builtin_neon_vrbitq_v((int8x16_t)__rev0, 48);
65093 __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
65094 return __ret;
65095 }
65096 #endif
65097
65098 #ifdef __LITTLE_ENDIAN__
65099 __ai int8x16_t vrbitq_s8(int8x16_t __p0) {
65100 int8x16_t __ret;
65101 __ret = (int8x16_t) __builtin_neon_vrbitq_v((int8x16_t)__p0, 32);
65102 return __ret;
65103 }
65104 #else
65105 __ai int8x16_t vrbitq_s8(int8x16_t __p0) {
65106 int8x16_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
65107 int8x16_t __ret;
65108 __ret = (int8x16_t) __builtin_neon_vrbitq_v((int8x16_t)__rev0, 32);
65109 __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
65110 return __ret;
65111 }
65112 #endif
65113
65114 #ifdef __LITTLE_ENDIAN__
65115 __ai uint8x8_t vrbit_u8(uint8x8_t __p0) {
65116 uint8x8_t __ret;
65117 __ret = (uint8x8_t) __builtin_neon_vrbit_v((int8x8_t)__p0, 16);
65118 return __ret;
65119 }
65120 #else
65121 __ai uint8x8_t vrbit_u8(uint8x8_t __p0) {
65122 uint8x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
65123 uint8x8_t __ret;
65124 __ret = (uint8x8_t) __builtin_neon_vrbit_v((int8x8_t)__rev0, 16);
65125 __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
65126 return __ret;
65127 }
65128 #endif
65129
65130 #ifdef __LITTLE_ENDIAN__
65131 __ai int8x8_t vrbit_s8(int8x8_t __p0) {
65132 int8x8_t __ret;
65133 __ret = (int8x8_t) __builtin_neon_vrbit_v((int8x8_t)__p0, 0);
65134 return __ret;
65135 }
65136 #else
65137 __ai int8x8_t vrbit_s8(int8x8_t __p0) {
65138 int8x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
65139 int8x8_t __ret;
65140 __ret = (int8x8_t) __builtin_neon_vrbit_v((int8x8_t)__rev0, 0);
65141 __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
65142 return __ret;
65143 }
65144 #endif
65145
65146 #ifdef __LITTLE_ENDIAN__
65147 __ai float64x2_t vrecpeq_f64(float64x2_t __p0) {
65148 float64x2_t __ret;
65149 __ret = (float64x2_t) __builtin_neon_vrecpeq_v((int8x16_t)__p0, 42);
65150 return __ret;
65151 }
65152 #else
65153 __ai float64x2_t vrecpeq_f64(float64x2_t __p0) {
65154 float64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
65155 float64x2_t __ret;
65156 __ret = (float64x2_t) __builtin_neon_vrecpeq_v((int8x16_t)__rev0, 42);
65157 __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
65158 return __ret;
65159 }
65160 #endif
65161
65162 #ifdef __LITTLE_ENDIAN__
65163 __ai float64x1_t vrecpe_f64(float64x1_t __p0) {
65164 float64x1_t __ret;
65165 __ret = (float64x1_t) __builtin_neon_vrecpe_v((int8x8_t)__p0, 10);
65166 return __ret;
65167 }
65168 #else
65169 __ai float64x1_t vrecpe_f64(float64x1_t __p0) {
65170 float64x1_t __ret;
65171 __ret = (float64x1_t) __builtin_neon_vrecpe_v((int8x8_t)__p0, 10);
65172 return __ret;
65173 }
65174 #endif
65175
65176 #ifdef __LITTLE_ENDIAN__
65177 __ai float64_t vrecped_f64(float64_t __p0) {
65178 float64_t __ret;
65179 __ret = (float64_t) __builtin_neon_vrecped_f64(__p0);
65180 return __ret;
65181 }
65182 #else
65183 __ai float64_t vrecped_f64(float64_t __p0) {
65184 float64_t __ret;
65185 __ret = (float64_t) __builtin_neon_vrecped_f64(__p0);
65186 return __ret;
65187 }
65188 #endif
65189
65190 #ifdef __LITTLE_ENDIAN__
65191 __ai float32_t vrecpes_f32(float32_t __p0) {
65192 float32_t __ret;
65193 __ret = (float32_t) __builtin_neon_vrecpes_f32(__p0);
65194 return __ret;
65195 }
65196 #else
65197 __ai float32_t vrecpes_f32(float32_t __p0) {
65198 float32_t __ret;
65199 __ret = (float32_t) __builtin_neon_vrecpes_f32(__p0);
65200 return __ret;
65201 }
65202 #endif
65203
65204 #ifdef __LITTLE_ENDIAN__
65205 __ai float64x2_t vrecpsq_f64(float64x2_t __p0, float64x2_t __p1) {
65206 float64x2_t __ret;
65207 __ret = (float64x2_t) __builtin_neon_vrecpsq_v((int8x16_t)__p0, (int8x16_t)__p1, 42);
65208 return __ret;
65209 }
65210 #else
65211 __ai float64x2_t vrecpsq_f64(float64x2_t __p0, float64x2_t __p1) {
65212 float64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
65213 float64x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
65214 float64x2_t __ret;
65215 __ret = (float64x2_t) __builtin_neon_vrecpsq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 42);
65216 __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
65217 return __ret;
65218 }
65219 #endif
65220
65221 #ifdef __LITTLE_ENDIAN__
65222 __ai float64x1_t vrecps_f64(float64x1_t __p0, float64x1_t __p1) {
65223 float64x1_t __ret;
65224 __ret = (float64x1_t) __builtin_neon_vrecps_v((int8x8_t)__p0, (int8x8_t)__p1, 10);
65225 return __ret;
65226 }
65227 #else
65228 __ai float64x1_t vrecps_f64(float64x1_t __p0, float64x1_t __p1) {
65229 float64x1_t __ret;
65230 __ret = (float64x1_t) __builtin_neon_vrecps_v((int8x8_t)__p0, (int8x8_t)__p1, 10);
65231 return __ret;
65232 }
65233 #endif
65234
65235 #ifdef __LITTLE_ENDIAN__
65236 __ai float64_t vrecpsd_f64(float64_t __p0, float64_t __p1) {
65237 float64_t __ret;
65238 __ret = (float64_t) __builtin_neon_vrecpsd_f64(__p0, __p1);
65239 return __ret;
65240 }
65241 #else
65242 __ai float64_t vrecpsd_f64(float64_t __p0, float64_t __p1) {
65243 float64_t __ret;
65244 __ret = (float64_t) __builtin_neon_vrecpsd_f64(__p0, __p1);
65245 return __ret;
65246 }
65247 #endif
65248
65249 #ifdef __LITTLE_ENDIAN__
65250 __ai float32_t vrecpss_f32(float32_t __p0, float32_t __p1) {
65251 float32_t __ret;
65252 __ret = (float32_t) __builtin_neon_vrecpss_f32(__p0, __p1);
65253 return __ret;
65254 }
65255 #else
65256 __ai float32_t vrecpss_f32(float32_t __p0, float32_t __p1) {
65257 float32_t __ret;
65258 __ret = (float32_t) __builtin_neon_vrecpss_f32(__p0, __p1);
65259 return __ret;
65260 }
65261 #endif
65262
65263 #ifdef __LITTLE_ENDIAN__
65264 __ai float64_t vrecpxd_f64(float64_t __p0) {
65265 float64_t __ret;
65266 __ret = (float64_t) __builtin_neon_vrecpxd_f64(__p0);
65267 return __ret;
65268 }
65269 #else
65270 __ai float64_t vrecpxd_f64(float64_t __p0) {
65271 float64_t __ret;
65272 __ret = (float64_t) __builtin_neon_vrecpxd_f64(__p0);
65273 return __ret;
65274 }
65275 #endif
65276
65277 #ifdef __LITTLE_ENDIAN__
65278 __ai float32_t vrecpxs_f32(float32_t __p0) {
65279 float32_t __ret;
65280 __ret = (float32_t) __builtin_neon_vrecpxs_f32(__p0);
65281 return __ret;
65282 }
65283 #else
65284 __ai float32_t vrecpxs_f32(float32_t __p0) {
65285 float32_t __ret;
65286 __ret = (float32_t) __builtin_neon_vrecpxs_f32(__p0);
65287 return __ret;
65288 }
65289 #endif
65290
65291 #ifdef __LITTLE_ENDIAN__
65292 __ai uint64_t vrshld_u64(uint64_t __p0, uint64_t __p1) {
65293 uint64_t __ret;
65294 __ret = (uint64_t) __builtin_neon_vrshld_u64(__p0, __p1);
65295 return __ret;
65296 }
65297 #else
65298 __ai uint64_t vrshld_u64(uint64_t __p0, uint64_t __p1) {
65299 uint64_t __ret;
65300 __ret = (uint64_t) __builtin_neon_vrshld_u64(__p0, __p1);
65301 return __ret;
65302 }
65303 #endif
65304
65305 #ifdef __LITTLE_ENDIAN__
65306 __ai int64_t vrshld_s64(int64_t __p0, int64_t __p1) {
65307 int64_t __ret;
65308 __ret = (int64_t) __builtin_neon_vrshld_s64(__p0, __p1);
65309 return __ret;
65310 }
65311 #else
65312 __ai int64_t vrshld_s64(int64_t __p0, int64_t __p1) {
65313 int64_t __ret;
65314 __ret = (int64_t) __builtin_neon_vrshld_s64(__p0, __p1);
65315 return __ret;
65316 }
65317 #endif
65318
65319 #ifdef __LITTLE_ENDIAN__
65320 #define vrshrd_n_u64(__p0, __p1) __extension__ ({ \
65321 uint64_t __s0 = __p0; \
65322 uint64_t __ret; \
65323 __ret = (uint64_t) __builtin_neon_vrshrd_n_u64(__s0, __p1); \
65324 __ret; \
65325 })
65326 #else
65327 #define vrshrd_n_u64(__p0, __p1) __extension__ ({ \
65328 uint64_t __s0 = __p0; \
65329 uint64_t __ret; \
65330 __ret = (uint64_t) __builtin_neon_vrshrd_n_u64(__s0, __p1); \
65331 __ret; \
65332 })
65333 #endif
65334
65335 #ifdef __LITTLE_ENDIAN__
65336 #define vrshrd_n_s64(__p0, __p1) __extension__ ({ \
65337 int64_t __s0 = __p0; \
65338 int64_t __ret; \
65339 __ret = (int64_t) __builtin_neon_vrshrd_n_s64(__s0, __p1); \
65340 __ret; \
65341 })
65342 #else
65343 #define vrshrd_n_s64(__p0, __p1) __extension__ ({ \
65344 int64_t __s0 = __p0; \
65345 int64_t __ret; \
65346 __ret = (int64_t) __builtin_neon_vrshrd_n_s64(__s0, __p1); \
65347 __ret; \
65348 })
65349 #endif
65350
65351 #ifdef __LITTLE_ENDIAN__
65352 #define vrshrn_high_n_u32(__p0_222, __p1_222, __p2_222) __extension__ ({ \
65353 uint16x4_t __s0_222 = __p0_222; \
65354 uint32x4_t __s1_222 = __p1_222; \
65355 uint16x8_t __ret_222; \
65356 __ret_222 = (uint16x8_t)(vcombine_u16((uint16x4_t)(__s0_222), (uint16x4_t)(vrshrn_n_u32(__s1_222, __p2_222)))); \
65357 __ret_222; \
65358 })
65359 #else
65360 #define vrshrn_high_n_u32(__p0_223, __p1_223, __p2_223) __extension__ ({ \
65361 uint16x4_t __s0_223 = __p0_223; \
65362 uint32x4_t __s1_223 = __p1_223; \
65363 uint16x4_t __rev0_223; __rev0_223 = __builtin_shufflevector(__s0_223, __s0_223, 3, 2, 1, 0); \
65364 uint32x4_t __rev1_223; __rev1_223 = __builtin_shufflevector(__s1_223, __s1_223, 3, 2, 1, 0); \
65365 uint16x8_t __ret_223; \
65366 __ret_223 = (uint16x8_t)(__noswap_vcombine_u16((uint16x4_t)(__rev0_223), (uint16x4_t)(__noswap_vrshrn_n_u32(__rev1_223, __p2_223)))); \
65367 __ret_223 = __builtin_shufflevector(__ret_223, __ret_223, 7, 6, 5, 4, 3, 2, 1, 0); \
65368 __ret_223; \
65369 })
65370 #endif
65371
65372 #ifdef __LITTLE_ENDIAN__
65373 #define vrshrn_high_n_u64(__p0_224, __p1_224, __p2_224) __extension__ ({ \
65374 uint32x2_t __s0_224 = __p0_224; \
65375 uint64x2_t __s1_224 = __p1_224; \
65376 uint32x4_t __ret_224; \
65377 __ret_224 = (uint32x4_t)(vcombine_u32((uint32x2_t)(__s0_224), (uint32x2_t)(vrshrn_n_u64(__s1_224, __p2_224)))); \
65378 __ret_224; \
65379 })
65380 #else
65381 #define vrshrn_high_n_u64(__p0_225, __p1_225, __p2_225) __extension__ ({ \
65382 uint32x2_t __s0_225 = __p0_225; \
65383 uint64x2_t __s1_225 = __p1_225; \
65384 uint32x2_t __rev0_225; __rev0_225 = __builtin_shufflevector(__s0_225, __s0_225, 1, 0); \
65385 uint64x2_t __rev1_225; __rev1_225 = __builtin_shufflevector(__s1_225, __s1_225, 1, 0); \
65386 uint32x4_t __ret_225; \
65387 __ret_225 = (uint32x4_t)(__noswap_vcombine_u32((uint32x2_t)(__rev0_225), (uint32x2_t)(__noswap_vrshrn_n_u64(__rev1_225, __p2_225)))); \
65388 __ret_225 = __builtin_shufflevector(__ret_225, __ret_225, 3, 2, 1, 0); \
65389 __ret_225; \
65390 })
65391 #endif
65392
65393 #ifdef __LITTLE_ENDIAN__
65394 #define vrshrn_high_n_u16(__p0_226, __p1_226, __p2_226) __extension__ ({ \
65395 uint8x8_t __s0_226 = __p0_226; \
65396 uint16x8_t __s1_226 = __p1_226; \
65397 uint8x16_t __ret_226; \
65398 __ret_226 = (uint8x16_t)(vcombine_u8((uint8x8_t)(__s0_226), (uint8x8_t)(vrshrn_n_u16(__s1_226, __p2_226)))); \
65399 __ret_226; \
65400 })
65401 #else
65402 #define vrshrn_high_n_u16(__p0_227, __p1_227, __p2_227) __extension__ ({ \
65403 uint8x8_t __s0_227 = __p0_227; \
65404 uint16x8_t __s1_227 = __p1_227; \
65405 uint8x8_t __rev0_227; __rev0_227 = __builtin_shufflevector(__s0_227, __s0_227, 7, 6, 5, 4, 3, 2, 1, 0); \
65406 uint16x8_t __rev1_227; __rev1_227 = __builtin_shufflevector(__s1_227, __s1_227, 7, 6, 5, 4, 3, 2, 1, 0); \
65407 uint8x16_t __ret_227; \
65408 __ret_227 = (uint8x16_t)(__noswap_vcombine_u8((uint8x8_t)(__rev0_227), (uint8x8_t)(__noswap_vrshrn_n_u16(__rev1_227, __p2_227)))); \
65409 __ret_227 = __builtin_shufflevector(__ret_227, __ret_227, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
65410 __ret_227; \
65411 })
65412 #endif
65413
65414 #ifdef __LITTLE_ENDIAN__
65415 #define vrshrn_high_n_s32(__p0_228, __p1_228, __p2_228) __extension__ ({ \
65416 int16x4_t __s0_228 = __p0_228; \
65417 int32x4_t __s1_228 = __p1_228; \
65418 int16x8_t __ret_228; \
65419 __ret_228 = (int16x8_t)(vcombine_s16((int16x4_t)(__s0_228), (int16x4_t)(vrshrn_n_s32(__s1_228, __p2_228)))); \
65420 __ret_228; \
65421 })
65422 #else
65423 #define vrshrn_high_n_s32(__p0_229, __p1_229, __p2_229) __extension__ ({ \
65424 int16x4_t __s0_229 = __p0_229; \
65425 int32x4_t __s1_229 = __p1_229; \
65426 int16x4_t __rev0_229; __rev0_229 = __builtin_shufflevector(__s0_229, __s0_229, 3, 2, 1, 0); \
65427 int32x4_t __rev1_229; __rev1_229 = __builtin_shufflevector(__s1_229, __s1_229, 3, 2, 1, 0); \
65428 int16x8_t __ret_229; \
65429 __ret_229 = (int16x8_t)(__noswap_vcombine_s16((int16x4_t)(__rev0_229), (int16x4_t)(__noswap_vrshrn_n_s32(__rev1_229, __p2_229)))); \
65430 __ret_229 = __builtin_shufflevector(__ret_229, __ret_229, 7, 6, 5, 4, 3, 2, 1, 0); \
65431 __ret_229; \
65432 })
65433 #endif
65434
65435 #ifdef __LITTLE_ENDIAN__
65436 #define vrshrn_high_n_s64(__p0_230, __p1_230, __p2_230) __extension__ ({ \
65437 int32x2_t __s0_230 = __p0_230; \
65438 int64x2_t __s1_230 = __p1_230; \
65439 int32x4_t __ret_230; \
65440 __ret_230 = (int32x4_t)(vcombine_s32((int32x2_t)(__s0_230), (int32x2_t)(vrshrn_n_s64(__s1_230, __p2_230)))); \
65441 __ret_230; \
65442 })
65443 #else
65444 #define vrshrn_high_n_s64(__p0_231, __p1_231, __p2_231) __extension__ ({ \
65445 int32x2_t __s0_231 = __p0_231; \
65446 int64x2_t __s1_231 = __p1_231; \
65447 int32x2_t __rev0_231; __rev0_231 = __builtin_shufflevector(__s0_231, __s0_231, 1, 0); \
65448 int64x2_t __rev1_231; __rev1_231 = __builtin_shufflevector(__s1_231, __s1_231, 1, 0); \
65449 int32x4_t __ret_231; \
65450 __ret_231 = (int32x4_t)(__noswap_vcombine_s32((int32x2_t)(__rev0_231), (int32x2_t)(__noswap_vrshrn_n_s64(__rev1_231, __p2_231)))); \
65451 __ret_231 = __builtin_shufflevector(__ret_231, __ret_231, 3, 2, 1, 0); \
65452 __ret_231; \
65453 })
65454 #endif
65455
65456 #ifdef __LITTLE_ENDIAN__
65457 #define vrshrn_high_n_s16(__p0_232, __p1_232, __p2_232) __extension__ ({ \
65458 int8x8_t __s0_232 = __p0_232; \
65459 int16x8_t __s1_232 = __p1_232; \
65460 int8x16_t __ret_232; \
65461 __ret_232 = (int8x16_t)(vcombine_s8((int8x8_t)(__s0_232), (int8x8_t)(vrshrn_n_s16(__s1_232, __p2_232)))); \
65462 __ret_232; \
65463 })
65464 #else
65465 #define vrshrn_high_n_s16(__p0_233, __p1_233, __p2_233) __extension__ ({ \
65466 int8x8_t __s0_233 = __p0_233; \
65467 int16x8_t __s1_233 = __p1_233; \
65468 int8x8_t __rev0_233; __rev0_233 = __builtin_shufflevector(__s0_233, __s0_233, 7, 6, 5, 4, 3, 2, 1, 0); \
65469 int16x8_t __rev1_233; __rev1_233 = __builtin_shufflevector(__s1_233, __s1_233, 7, 6, 5, 4, 3, 2, 1, 0); \
65470 int8x16_t __ret_233; \
65471 __ret_233 = (int8x16_t)(__noswap_vcombine_s8((int8x8_t)(__rev0_233), (int8x8_t)(__noswap_vrshrn_n_s16(__rev1_233, __p2_233)))); \
65472 __ret_233 = __builtin_shufflevector(__ret_233, __ret_233, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
65473 __ret_233; \
65474 })
65475 #endif
65476
65477 #ifdef __LITTLE_ENDIAN__
65478 __ai float64x2_t vrsqrteq_f64(float64x2_t __p0) {
65479 float64x2_t __ret;
65480 __ret = (float64x2_t) __builtin_neon_vrsqrteq_v((int8x16_t)__p0, 42);
65481 return __ret;
65482 }
65483 #else
65484 __ai float64x2_t vrsqrteq_f64(float64x2_t __p0) {
65485 float64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
65486 float64x2_t __ret;
65487 __ret = (float64x2_t) __builtin_neon_vrsqrteq_v((int8x16_t)__rev0, 42);
65488 __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
65489 return __ret;
65490 }
65491 #endif
65492
65493 #ifdef __LITTLE_ENDIAN__
65494 __ai float64x1_t vrsqrte_f64(float64x1_t __p0) {
65495 float64x1_t __ret;
65496 __ret = (float64x1_t) __builtin_neon_vrsqrte_v((int8x8_t)__p0, 10);
65497 return __ret;
65498 }
65499 #else
65500 __ai float64x1_t vrsqrte_f64(float64x1_t __p0) {
65501 float64x1_t __ret;
65502 __ret = (float64x1_t) __builtin_neon_vrsqrte_v((int8x8_t)__p0, 10);
65503 return __ret;
65504 }
65505 #endif
65506
65507 #ifdef __LITTLE_ENDIAN__
65508 __ai float64_t vrsqrted_f64(float64_t __p0) {
65509 float64_t __ret;
65510 __ret = (float64_t) __builtin_neon_vrsqrted_f64(__p0);
65511 return __ret;
65512 }
65513 #else
65514 __ai float64_t vrsqrted_f64(float64_t __p0) {
65515 float64_t __ret;
65516 __ret = (float64_t) __builtin_neon_vrsqrted_f64(__p0);
65517 return __ret;
65518 }
65519 #endif
65520
65521 #ifdef __LITTLE_ENDIAN__
65522 __ai float32_t vrsqrtes_f32(float32_t __p0) {
65523 float32_t __ret;
65524 __ret = (float32_t) __builtin_neon_vrsqrtes_f32(__p0);
65525 return __ret;
65526 }
65527 #else
65528 __ai float32_t vrsqrtes_f32(float32_t __p0) {
65529 float32_t __ret;
65530 __ret = (float32_t) __builtin_neon_vrsqrtes_f32(__p0);
65531 return __ret;
65532 }
65533 #endif
65534
65535 #ifdef __LITTLE_ENDIAN__
65536 __ai float64x2_t vrsqrtsq_f64(float64x2_t __p0, float64x2_t __p1) {
65537 float64x2_t __ret;
65538 __ret = (float64x2_t) __builtin_neon_vrsqrtsq_v((int8x16_t)__p0, (int8x16_t)__p1, 42);
65539 return __ret;
65540 }
65541 #else
65542 __ai float64x2_t vrsqrtsq_f64(float64x2_t __p0, float64x2_t __p1) {
65543 float64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
65544 float64x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
65545 float64x2_t __ret;
65546 __ret = (float64x2_t) __builtin_neon_vrsqrtsq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 42);
65547 __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
65548 return __ret;
65549 }
65550 #endif
65551
65552 #ifdef __LITTLE_ENDIAN__
65553 __ai float64x1_t vrsqrts_f64(float64x1_t __p0, float64x1_t __p1) {
65554 float64x1_t __ret;
65555 __ret = (float64x1_t) __builtin_neon_vrsqrts_v((int8x8_t)__p0, (int8x8_t)__p1, 10);
65556 return __ret;
65557 }
65558 #else
65559 __ai float64x1_t vrsqrts_f64(float64x1_t __p0, float64x1_t __p1) {
65560 float64x1_t __ret;
65561 __ret = (float64x1_t) __builtin_neon_vrsqrts_v((int8x8_t)__p0, (int8x8_t)__p1, 10);
65562 return __ret;
65563 }
65564 #endif
65565
65566 #ifdef __LITTLE_ENDIAN__
65567 __ai float64_t vrsqrtsd_f64(float64_t __p0, float64_t __p1) {
65568 float64_t __ret;
65569 __ret = (float64_t) __builtin_neon_vrsqrtsd_f64(__p0, __p1);
65570 return __ret;
65571 }
65572 #else
65573 __ai float64_t vrsqrtsd_f64(float64_t __p0, float64_t __p1) {
65574 float64_t __ret;
65575 __ret = (float64_t) __builtin_neon_vrsqrtsd_f64(__p0, __p1);
65576 return __ret;
65577 }
65578 #endif
65579
65580 #ifdef __LITTLE_ENDIAN__
65581 __ai float32_t vrsqrtss_f32(float32_t __p0, float32_t __p1) {
65582 float32_t __ret;
65583 __ret = (float32_t) __builtin_neon_vrsqrtss_f32(__p0, __p1);
65584 return __ret;
65585 }
65586 #else
65587 __ai float32_t vrsqrtss_f32(float32_t __p0, float32_t __p1) {
65588 float32_t __ret;
65589 __ret = (float32_t) __builtin_neon_vrsqrtss_f32(__p0, __p1);
65590 return __ret;
65591 }
65592 #endif
65593
65594 #ifdef __LITTLE_ENDIAN__
65595 #define vrsrad_n_u64(__p0, __p1, __p2) __extension__ ({ \
65596 uint64_t __s0 = __p0; \
65597 uint64_t __s1 = __p1; \
65598 uint64_t __ret; \
65599 __ret = (uint64_t) __builtin_neon_vrsrad_n_u64(__s0, __s1, __p2); \
65600 __ret; \
65601 })
65602 #else
65603 #define vrsrad_n_u64(__p0, __p1, __p2) __extension__ ({ \
65604 uint64_t __s0 = __p0; \
65605 uint64_t __s1 = __p1; \
65606 uint64_t __ret; \
65607 __ret = (uint64_t) __builtin_neon_vrsrad_n_u64(__s0, __s1, __p2); \
65608 __ret; \
65609 })
65610 #endif
65611
65612 #ifdef __LITTLE_ENDIAN__
65613 #define vrsrad_n_s64(__p0, __p1, __p2) __extension__ ({ \
65614 int64_t __s0 = __p0; \
65615 int64_t __s1 = __p1; \
65616 int64_t __ret; \
65617 __ret = (int64_t) __builtin_neon_vrsrad_n_s64(__s0, __s1, __p2); \
65618 __ret; \
65619 })
65620 #else
65621 #define vrsrad_n_s64(__p0, __p1, __p2) __extension__ ({ \
65622 int64_t __s0 = __p0; \
65623 int64_t __s1 = __p1; \
65624 int64_t __ret; \
65625 __ret = (int64_t) __builtin_neon_vrsrad_n_s64(__s0, __s1, __p2); \
65626 __ret; \
65627 })
65628 #endif
65629
65630 #ifdef __LITTLE_ENDIAN__
65631 __ai uint16x8_t vrsubhn_high_u32(uint16x4_t __p0, uint32x4_t __p1, uint32x4_t __p2) {
65632 uint16x8_t __ret;
65633 __ret = vcombine_u16(__p0, vrsubhn_u32(__p1, __p2));
65634 return __ret;
65635 }
65636 #else
65637 __ai uint16x8_t vrsubhn_high_u32(uint16x4_t __p0, uint32x4_t __p1, uint32x4_t __p2) {
65638 uint16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
65639 uint32x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
65640 uint32x4_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, 3, 2, 1, 0);
65641 uint16x8_t __ret;
65642 __ret = __noswap_vcombine_u16(__rev0, __noswap_vrsubhn_u32(__rev1, __rev2));
65643 __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
65644 return __ret;
65645 }
65646 #endif
65647
65648 #ifdef __LITTLE_ENDIAN__
65649 __ai uint32x4_t vrsubhn_high_u64(uint32x2_t __p0, uint64x2_t __p1, uint64x2_t __p2) {
65650 uint32x4_t __ret;
65651 __ret = vcombine_u32(__p0, vrsubhn_u64(__p1, __p2));
65652 return __ret;
65653 }
65654 #else
65655 __ai uint32x4_t vrsubhn_high_u64(uint32x2_t __p0, uint64x2_t __p1, uint64x2_t __p2) {
65656 uint32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
65657 uint64x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
65658 uint64x2_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, 1, 0);
65659 uint32x4_t __ret;
65660 __ret = __noswap_vcombine_u32(__rev0, __noswap_vrsubhn_u64(__rev1, __rev2));
65661 __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
65662 return __ret;
65663 }
65664 #endif
65665
65666 #ifdef __LITTLE_ENDIAN__
65667 __ai uint8x16_t vrsubhn_high_u16(uint8x8_t __p0, uint16x8_t __p1, uint16x8_t __p2) {
65668 uint8x16_t __ret;
65669 __ret = vcombine_u8(__p0, vrsubhn_u16(__p1, __p2));
65670 return __ret;
65671 }
65672 #else
65673 __ai uint8x16_t vrsubhn_high_u16(uint8x8_t __p0, uint16x8_t __p1, uint16x8_t __p2) {
65674 uint8x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
65675 uint16x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
65676 uint16x8_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, 7, 6, 5, 4, 3, 2, 1, 0);
65677 uint8x16_t __ret;
65678 __ret = __noswap_vcombine_u8(__rev0, __noswap_vrsubhn_u16(__rev1, __rev2));
65679 __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
65680 return __ret;
65681 }
65682 #endif
65683
65684 #ifdef __LITTLE_ENDIAN__
65685 __ai int16x8_t vrsubhn_high_s32(int16x4_t __p0, int32x4_t __p1, int32x4_t __p2) {
65686 int16x8_t __ret;
65687 __ret = vcombine_s16(__p0, vrsubhn_s32(__p1, __p2));
65688 return __ret;
65689 }
65690 #else
65691 __ai int16x8_t vrsubhn_high_s32(int16x4_t __p0, int32x4_t __p1, int32x4_t __p2) {
65692 int16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
65693 int32x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
65694 int32x4_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, 3, 2, 1, 0);
65695 int16x8_t __ret;
65696 __ret = __noswap_vcombine_s16(__rev0, __noswap_vrsubhn_s32(__rev1, __rev2));
65697 __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
65698 return __ret;
65699 }
65700 #endif
65701
65702 #ifdef __LITTLE_ENDIAN__
65703 __ai int32x4_t vrsubhn_high_s64(int32x2_t __p0, int64x2_t __p1, int64x2_t __p2) {
65704 int32x4_t __ret;
65705 __ret = vcombine_s32(__p0, vrsubhn_s64(__p1, __p2));
65706 return __ret;
65707 }
65708 #else
65709 __ai int32x4_t vrsubhn_high_s64(int32x2_t __p0, int64x2_t __p1, int64x2_t __p2) {
65710 int32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
65711 int64x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
65712 int64x2_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, 1, 0);
65713 int32x4_t __ret;
65714 __ret = __noswap_vcombine_s32(__rev0, __noswap_vrsubhn_s64(__rev1, __rev2));
65715 __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
65716 return __ret;
65717 }
65718 #endif
65719
65720 #ifdef __LITTLE_ENDIAN__
65721 __ai int8x16_t vrsubhn_high_s16(int8x8_t __p0, int16x8_t __p1, int16x8_t __p2) {
65722 int8x16_t __ret;
65723 __ret = vcombine_s8(__p0, vrsubhn_s16(__p1, __p2));
65724 return __ret;
65725 }
65726 #else
65727 __ai int8x16_t vrsubhn_high_s16(int8x8_t __p0, int16x8_t __p1, int16x8_t __p2) {
65728 int8x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
65729 int16x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
65730 int16x8_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, 7, 6, 5, 4, 3, 2, 1, 0);
65731 int8x16_t __ret;
65732 __ret = __noswap_vcombine_s8(__rev0, __noswap_vrsubhn_s16(__rev1, __rev2));
65733 __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
65734 return __ret;
65735 }
65736 #endif
65737
65738 #ifdef __LITTLE_ENDIAN__
65739 #define vset_lane_p64(__p0, __p1, __p2) __extension__ ({ \
65740 poly64_t __s0 = __p0; \
65741 poly64x1_t __s1 = __p1; \
65742 poly64x1_t __ret; \
65743 __ret = (poly64x1_t) __builtin_neon_vset_lane_i64(__s0, (int8x8_t)__s1, __p2); \
65744 __ret; \
65745 })
65746 #else
65747 #define vset_lane_p64(__p0, __p1, __p2) __extension__ ({ \
65748 poly64_t __s0 = __p0; \
65749 poly64x1_t __s1 = __p1; \
65750 poly64x1_t __ret; \
65751 __ret = (poly64x1_t) __builtin_neon_vset_lane_i64(__s0, (int8x8_t)__s1, __p2); \
65752 __ret; \
65753 })
65754 #define __noswap_vset_lane_p64(__p0, __p1, __p2) __extension__ ({ \
65755 poly64_t __s0 = __p0; \
65756 poly64x1_t __s1 = __p1; \
65757 poly64x1_t __ret; \
65758 __ret = (poly64x1_t) __builtin_neon_vset_lane_i64(__s0, (int8x8_t)__s1, __p2); \
65759 __ret; \
65760 })
65761 #endif
65762
65763 #ifdef __LITTLE_ENDIAN__
65764 #define vsetq_lane_p64(__p0, __p1, __p2) __extension__ ({ \
65765 poly64_t __s0 = __p0; \
65766 poly64x2_t __s1 = __p1; \
65767 poly64x2_t __ret; \
65768 __ret = (poly64x2_t) __builtin_neon_vsetq_lane_i64(__s0, (int8x16_t)__s1, __p2); \
65769 __ret; \
65770 })
65771 #else
65772 #define vsetq_lane_p64(__p0, __p1, __p2) __extension__ ({ \
65773 poly64_t __s0 = __p0; \
65774 poly64x2_t __s1 = __p1; \
65775 poly64x2_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 1, 0); \
65776 poly64x2_t __ret; \
65777 __ret = (poly64x2_t) __builtin_neon_vsetq_lane_i64(__s0, (int8x16_t)__rev1, __p2); \
65778 __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \
65779 __ret; \
65780 })
65781 #define __noswap_vsetq_lane_p64(__p0, __p1, __p2) __extension__ ({ \
65782 poly64_t __s0 = __p0; \
65783 poly64x2_t __s1 = __p1; \
65784 poly64x2_t __ret; \
65785 __ret = (poly64x2_t) __builtin_neon_vsetq_lane_i64(__s0, (int8x16_t)__s1, __p2); \
65786 __ret; \
65787 })
65788 #endif
65789
65790 #ifdef __LITTLE_ENDIAN__
65791 #define vsetq_lane_f64(__p0, __p1, __p2) __extension__ ({ \
65792 float64_t __s0 = __p0; \
65793 float64x2_t __s1 = __p1; \
65794 float64x2_t __ret; \
65795 __ret = (float64x2_t) __builtin_neon_vsetq_lane_f64(__s0, (int8x16_t)__s1, __p2); \
65796 __ret; \
65797 })
65798 #else
65799 #define vsetq_lane_f64(__p0, __p1, __p2) __extension__ ({ \
65800 float64_t __s0 = __p0; \
65801 float64x2_t __s1 = __p1; \
65802 float64x2_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 1, 0); \
65803 float64x2_t __ret; \
65804 __ret = (float64x2_t) __builtin_neon_vsetq_lane_f64(__s0, (int8x16_t)__rev1, __p2); \
65805 __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \
65806 __ret; \
65807 })
65808 #define __noswap_vsetq_lane_f64(__p0, __p1, __p2) __extension__ ({ \
65809 float64_t __s0 = __p0; \
65810 float64x2_t __s1 = __p1; \
65811 float64x2_t __ret; \
65812 __ret = (float64x2_t) __builtin_neon_vsetq_lane_f64(__s0, (int8x16_t)__s1, __p2); \
65813 __ret; \
65814 })
65815 #endif
65816
65817 #ifdef __LITTLE_ENDIAN__
65818 #define vset_lane_f64(__p0, __p1, __p2) __extension__ ({ \
65819 float64_t __s0 = __p0; \
65820 float64x1_t __s1 = __p1; \
65821 float64x1_t __ret; \
65822 __ret = (float64x1_t) __builtin_neon_vset_lane_f64(__s0, (int8x8_t)__s1, __p2); \
65823 __ret; \
65824 })
65825 #else
65826 #define vset_lane_f64(__p0, __p1, __p2) __extension__ ({ \
65827 float64_t __s0 = __p0; \
65828 float64x1_t __s1 = __p1; \
65829 float64x1_t __ret; \
65830 __ret = (float64x1_t) __builtin_neon_vset_lane_f64(__s0, (int8x8_t)__s1, __p2); \
65831 __ret; \
65832 })
65833 #define __noswap_vset_lane_f64(__p0, __p1, __p2) __extension__ ({ \
65834 float64_t __s0 = __p0; \
65835 float64x1_t __s1 = __p1; \
65836 float64x1_t __ret; \
65837 __ret = (float64x1_t) __builtin_neon_vset_lane_f64(__s0, (int8x8_t)__s1, __p2); \
65838 __ret; \
65839 })
65840 #endif
65841
65842 #ifdef __LITTLE_ENDIAN__
65843 __ai uint64_t vshld_u64(uint64_t __p0, uint64_t __p1) {
65844 uint64_t __ret;
65845 __ret = (uint64_t) __builtin_neon_vshld_u64(__p0, __p1);
65846 return __ret;
65847 }
65848 #else
65849 __ai uint64_t vshld_u64(uint64_t __p0, uint64_t __p1) {
65850 uint64_t __ret;
65851 __ret = (uint64_t) __builtin_neon_vshld_u64(__p0, __p1);
65852 return __ret;
65853 }
65854 #endif
65855
65856 #ifdef __LITTLE_ENDIAN__
65857 __ai int64_t vshld_s64(int64_t __p0, int64_t __p1) {
65858 int64_t __ret;
65859 __ret = (int64_t) __builtin_neon_vshld_s64(__p0, __p1);
65860 return __ret;
65861 }
65862 #else
65863 __ai int64_t vshld_s64(int64_t __p0, int64_t __p1) {
65864 int64_t __ret;
65865 __ret = (int64_t) __builtin_neon_vshld_s64(__p0, __p1);
65866 return __ret;
65867 }
65868 #endif
65869
65870 #ifdef __LITTLE_ENDIAN__
65871 #define vshld_n_u64(__p0, __p1) __extension__ ({ \
65872 uint64_t __s0 = __p0; \
65873 uint64_t __ret; \
65874 __ret = (uint64_t) __builtin_neon_vshld_n_u64(__s0, __p1); \
65875 __ret; \
65876 })
65877 #else
65878 #define vshld_n_u64(__p0, __p1) __extension__ ({ \
65879 uint64_t __s0 = __p0; \
65880 uint64_t __ret; \
65881 __ret = (uint64_t) __builtin_neon_vshld_n_u64(__s0, __p1); \
65882 __ret; \
65883 })
65884 #endif
65885
65886 #ifdef __LITTLE_ENDIAN__
65887 #define vshld_n_s64(__p0, __p1) __extension__ ({ \
65888 int64_t __s0 = __p0; \
65889 int64_t __ret; \
65890 __ret = (int64_t) __builtin_neon_vshld_n_s64(__s0, __p1); \
65891 __ret; \
65892 })
65893 #else
65894 #define vshld_n_s64(__p0, __p1) __extension__ ({ \
65895 int64_t __s0 = __p0; \
65896 int64_t __ret; \
65897 __ret = (int64_t) __builtin_neon_vshld_n_s64(__s0, __p1); \
65898 __ret; \
65899 })
65900 #endif
65901
65902 #ifdef __LITTLE_ENDIAN__
65903 #define vshll_high_n_u8(__p0_234, __p1_234) __extension__ ({ \
65904 uint8x16_t __s0_234 = __p0_234; \
65905 uint16x8_t __ret_234; \
65906 __ret_234 = (uint16x8_t)(vshll_n_u8(vget_high_u8(__s0_234), __p1_234)); \
65907 __ret_234; \
65908 })
65909 #else
65910 #define vshll_high_n_u8(__p0_235, __p1_235) __extension__ ({ \
65911 uint8x16_t __s0_235 = __p0_235; \
65912 uint8x16_t __rev0_235; __rev0_235 = __builtin_shufflevector(__s0_235, __s0_235, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
65913 uint16x8_t __ret_235; \
65914 __ret_235 = (uint16x8_t)(__noswap_vshll_n_u8(__noswap_vget_high_u8(__rev0_235), __p1_235)); \
65915 __ret_235 = __builtin_shufflevector(__ret_235, __ret_235, 7, 6, 5, 4, 3, 2, 1, 0); \
65916 __ret_235; \
65917 })
65918 #endif
65919
65920 #ifdef __LITTLE_ENDIAN__
65921 #define vshll_high_n_u32(__p0_236, __p1_236) __extension__ ({ \
65922 uint32x4_t __s0_236 = __p0_236; \
65923 uint64x2_t __ret_236; \
65924 __ret_236 = (uint64x2_t)(vshll_n_u32(vget_high_u32(__s0_236), __p1_236)); \
65925 __ret_236; \
65926 })
65927 #else
65928 #define vshll_high_n_u32(__p0_237, __p1_237) __extension__ ({ \
65929 uint32x4_t __s0_237 = __p0_237; \
65930 uint32x4_t __rev0_237; __rev0_237 = __builtin_shufflevector(__s0_237, __s0_237, 3, 2, 1, 0); \
65931 uint64x2_t __ret_237; \
65932 __ret_237 = (uint64x2_t)(__noswap_vshll_n_u32(__noswap_vget_high_u32(__rev0_237), __p1_237)); \
65933 __ret_237 = __builtin_shufflevector(__ret_237, __ret_237, 1, 0); \
65934 __ret_237; \
65935 })
65936 #endif
65937
65938 #ifdef __LITTLE_ENDIAN__
65939 #define vshll_high_n_u16(__p0_238, __p1_238) __extension__ ({ \
65940 uint16x8_t __s0_238 = __p0_238; \
65941 uint32x4_t __ret_238; \
65942 __ret_238 = (uint32x4_t)(vshll_n_u16(vget_high_u16(__s0_238), __p1_238)); \
65943 __ret_238; \
65944 })
65945 #else
65946 #define vshll_high_n_u16(__p0_239, __p1_239) __extension__ ({ \
65947 uint16x8_t __s0_239 = __p0_239; \
65948 uint16x8_t __rev0_239; __rev0_239 = __builtin_shufflevector(__s0_239, __s0_239, 7, 6, 5, 4, 3, 2, 1, 0); \
65949 uint32x4_t __ret_239; \
65950 __ret_239 = (uint32x4_t)(__noswap_vshll_n_u16(__noswap_vget_high_u16(__rev0_239), __p1_239)); \
65951 __ret_239 = __builtin_shufflevector(__ret_239, __ret_239, 3, 2, 1, 0); \
65952 __ret_239; \
65953 })
65954 #endif
65955
65956 #ifdef __LITTLE_ENDIAN__
65957 #define vshll_high_n_s8(__p0_240, __p1_240) __extension__ ({ \
65958 int8x16_t __s0_240 = __p0_240; \
65959 int16x8_t __ret_240; \
65960 __ret_240 = (int16x8_t)(vshll_n_s8(vget_high_s8(__s0_240), __p1_240)); \
65961 __ret_240; \
65962 })
65963 #else
65964 #define vshll_high_n_s8(__p0_241, __p1_241) __extension__ ({ \
65965 int8x16_t __s0_241 = __p0_241; \
65966 int8x16_t __rev0_241; __rev0_241 = __builtin_shufflevector(__s0_241, __s0_241, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
65967 int16x8_t __ret_241; \
65968 __ret_241 = (int16x8_t)(__noswap_vshll_n_s8(__noswap_vget_high_s8(__rev0_241), __p1_241)); \
65969 __ret_241 = __builtin_shufflevector(__ret_241, __ret_241, 7, 6, 5, 4, 3, 2, 1, 0); \
65970 __ret_241; \
65971 })
65972 #endif
65973
65974 #ifdef __LITTLE_ENDIAN__
65975 #define vshll_high_n_s32(__p0_242, __p1_242) __extension__ ({ \
65976 int32x4_t __s0_242 = __p0_242; \
65977 int64x2_t __ret_242; \
65978 __ret_242 = (int64x2_t)(vshll_n_s32(vget_high_s32(__s0_242), __p1_242)); \
65979 __ret_242; \
65980 })
65981 #else
65982 #define vshll_high_n_s32(__p0_243, __p1_243) __extension__ ({ \
65983 int32x4_t __s0_243 = __p0_243; \
65984 int32x4_t __rev0_243; __rev0_243 = __builtin_shufflevector(__s0_243, __s0_243, 3, 2, 1, 0); \
65985 int64x2_t __ret_243; \
65986 __ret_243 = (int64x2_t)(__noswap_vshll_n_s32(__noswap_vget_high_s32(__rev0_243), __p1_243)); \
65987 __ret_243 = __builtin_shufflevector(__ret_243, __ret_243, 1, 0); \
65988 __ret_243; \
65989 })
65990 #endif
65991
65992 #ifdef __LITTLE_ENDIAN__
65993 #define vshll_high_n_s16(__p0_244, __p1_244) __extension__ ({ \
65994 int16x8_t __s0_244 = __p0_244; \
65995 int32x4_t __ret_244; \
65996 __ret_244 = (int32x4_t)(vshll_n_s16(vget_high_s16(__s0_244), __p1_244)); \
65997 __ret_244; \
65998 })
65999 #else
66000 #define vshll_high_n_s16(__p0_245, __p1_245) __extension__ ({ \
66001 int16x8_t __s0_245 = __p0_245; \
66002 int16x8_t __rev0_245; __rev0_245 = __builtin_shufflevector(__s0_245, __s0_245, 7, 6, 5, 4, 3, 2, 1, 0); \
66003 int32x4_t __ret_245; \
66004 __ret_245 = (int32x4_t)(__noswap_vshll_n_s16(__noswap_vget_high_s16(__rev0_245), __p1_245)); \
66005 __ret_245 = __builtin_shufflevector(__ret_245, __ret_245, 3, 2, 1, 0); \
66006 __ret_245; \
66007 })
66008 #endif
66009
66010 #ifdef __LITTLE_ENDIAN__
66011 #define vshrd_n_u64(__p0, __p1) __extension__ ({ \
66012 uint64_t __s0 = __p0; \
66013 uint64_t __ret; \
66014 __ret = (uint64_t) __builtin_neon_vshrd_n_u64(__s0, __p1); \
66015 __ret; \
66016 })
66017 #else
66018 #define vshrd_n_u64(__p0, __p1) __extension__ ({ \
66019 uint64_t __s0 = __p0; \
66020 uint64_t __ret; \
66021 __ret = (uint64_t) __builtin_neon_vshrd_n_u64(__s0, __p1); \
66022 __ret; \
66023 })
66024 #endif
66025
66026 #ifdef __LITTLE_ENDIAN__
66027 #define vshrd_n_s64(__p0, __p1) __extension__ ({ \
66028 int64_t __s0 = __p0; \
66029 int64_t __ret; \
66030 __ret = (int64_t) __builtin_neon_vshrd_n_s64(__s0, __p1); \
66031 __ret; \
66032 })
66033 #else
66034 #define vshrd_n_s64(__p0, __p1) __extension__ ({ \
66035 int64_t __s0 = __p0; \
66036 int64_t __ret; \
66037 __ret = (int64_t) __builtin_neon_vshrd_n_s64(__s0, __p1); \
66038 __ret; \
66039 })
66040 #endif
66041
66042 #ifdef __LITTLE_ENDIAN__
66043 #define vshrn_high_n_u32(__p0_246, __p1_246, __p2_246) __extension__ ({ \
66044 uint16x4_t __s0_246 = __p0_246; \
66045 uint32x4_t __s1_246 = __p1_246; \
66046 uint16x8_t __ret_246; \
66047 __ret_246 = (uint16x8_t)(vcombine_u16((uint16x4_t)(__s0_246), (uint16x4_t)(vshrn_n_u32(__s1_246, __p2_246)))); \
66048 __ret_246; \
66049 })
66050 #else
66051 #define vshrn_high_n_u32(__p0_247, __p1_247, __p2_247) __extension__ ({ \
66052 uint16x4_t __s0_247 = __p0_247; \
66053 uint32x4_t __s1_247 = __p1_247; \
66054 uint16x4_t __rev0_247; __rev0_247 = __builtin_shufflevector(__s0_247, __s0_247, 3, 2, 1, 0); \
66055 uint32x4_t __rev1_247; __rev1_247 = __builtin_shufflevector(__s1_247, __s1_247, 3, 2, 1, 0); \
66056 uint16x8_t __ret_247; \
66057 __ret_247 = (uint16x8_t)(__noswap_vcombine_u16((uint16x4_t)(__rev0_247), (uint16x4_t)(__noswap_vshrn_n_u32(__rev1_247, __p2_247)))); \
66058 __ret_247 = __builtin_shufflevector(__ret_247, __ret_247, 7, 6, 5, 4, 3, 2, 1, 0); \
66059 __ret_247; \
66060 })
66061 #endif
66062
66063 #ifdef __LITTLE_ENDIAN__
66064 #define vshrn_high_n_u64(__p0_248, __p1_248, __p2_248) __extension__ ({ \
66065 uint32x2_t __s0_248 = __p0_248; \
66066 uint64x2_t __s1_248 = __p1_248; \
66067 uint32x4_t __ret_248; \
66068 __ret_248 = (uint32x4_t)(vcombine_u32((uint32x2_t)(__s0_248), (uint32x2_t)(vshrn_n_u64(__s1_248, __p2_248)))); \
66069 __ret_248; \
66070 })
66071 #else
66072 #define vshrn_high_n_u64(__p0_249, __p1_249, __p2_249) __extension__ ({ \
66073 uint32x2_t __s0_249 = __p0_249; \
66074 uint64x2_t __s1_249 = __p1_249; \
66075 uint32x2_t __rev0_249; __rev0_249 = __builtin_shufflevector(__s0_249, __s0_249, 1, 0); \
66076 uint64x2_t __rev1_249; __rev1_249 = __builtin_shufflevector(__s1_249, __s1_249, 1, 0); \
66077 uint32x4_t __ret_249; \
66078 __ret_249 = (uint32x4_t)(__noswap_vcombine_u32((uint32x2_t)(__rev0_249), (uint32x2_t)(__noswap_vshrn_n_u64(__rev1_249, __p2_249)))); \
66079 __ret_249 = __builtin_shufflevector(__ret_249, __ret_249, 3, 2, 1, 0); \
66080 __ret_249; \
66081 })
66082 #endif
66083
66084 #ifdef __LITTLE_ENDIAN__
66085 #define vshrn_high_n_u16(__p0_250, __p1_250, __p2_250) __extension__ ({ \
66086 uint8x8_t __s0_250 = __p0_250; \
66087 uint16x8_t __s1_250 = __p1_250; \
66088 uint8x16_t __ret_250; \
66089 __ret_250 = (uint8x16_t)(vcombine_u8((uint8x8_t)(__s0_250), (uint8x8_t)(vshrn_n_u16(__s1_250, __p2_250)))); \
66090 __ret_250; \
66091 })
66092 #else
66093 #define vshrn_high_n_u16(__p0_251, __p1_251, __p2_251) __extension__ ({ \
66094 uint8x8_t __s0_251 = __p0_251; \
66095 uint16x8_t __s1_251 = __p1_251; \
66096 uint8x8_t __rev0_251; __rev0_251 = __builtin_shufflevector(__s0_251, __s0_251, 7, 6, 5, 4, 3, 2, 1, 0); \
66097 uint16x8_t __rev1_251; __rev1_251 = __builtin_shufflevector(__s1_251, __s1_251, 7, 6, 5, 4, 3, 2, 1, 0); \
66098 uint8x16_t __ret_251; \
66099 __ret_251 = (uint8x16_t)(__noswap_vcombine_u8((uint8x8_t)(__rev0_251), (uint8x8_t)(__noswap_vshrn_n_u16(__rev1_251, __p2_251)))); \
66100 __ret_251 = __builtin_shufflevector(__ret_251, __ret_251, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
66101 __ret_251; \
66102 })
66103 #endif
66104
66105 #ifdef __LITTLE_ENDIAN__
66106 #define vshrn_high_n_s32(__p0_252, __p1_252, __p2_252) __extension__ ({ \
66107 int16x4_t __s0_252 = __p0_252; \
66108 int32x4_t __s1_252 = __p1_252; \
66109 int16x8_t __ret_252; \
66110 __ret_252 = (int16x8_t)(vcombine_s16((int16x4_t)(__s0_252), (int16x4_t)(vshrn_n_s32(__s1_252, __p2_252)))); \
66111 __ret_252; \
66112 })
66113 #else
66114 #define vshrn_high_n_s32(__p0_253, __p1_253, __p2_253) __extension__ ({ \
66115 int16x4_t __s0_253 = __p0_253; \
66116 int32x4_t __s1_253 = __p1_253; \
66117 int16x4_t __rev0_253; __rev0_253 = __builtin_shufflevector(__s0_253, __s0_253, 3, 2, 1, 0); \
66118 int32x4_t __rev1_253; __rev1_253 = __builtin_shufflevector(__s1_253, __s1_253, 3, 2, 1, 0); \
66119 int16x8_t __ret_253; \
66120 __ret_253 = (int16x8_t)(__noswap_vcombine_s16((int16x4_t)(__rev0_253), (int16x4_t)(__noswap_vshrn_n_s32(__rev1_253, __p2_253)))); \
66121 __ret_253 = __builtin_shufflevector(__ret_253, __ret_253, 7, 6, 5, 4, 3, 2, 1, 0); \
66122 __ret_253; \
66123 })
66124 #endif
66125
66126 #ifdef __LITTLE_ENDIAN__
66127 #define vshrn_high_n_s64(__p0_254, __p1_254, __p2_254) __extension__ ({ \
66128 int32x2_t __s0_254 = __p0_254; \
66129 int64x2_t __s1_254 = __p1_254; \
66130 int32x4_t __ret_254; \
66131 __ret_254 = (int32x4_t)(vcombine_s32((int32x2_t)(__s0_254), (int32x2_t)(vshrn_n_s64(__s1_254, __p2_254)))); \
66132 __ret_254; \
66133 })
66134 #else
66135 #define vshrn_high_n_s64(__p0_255, __p1_255, __p2_255) __extension__ ({ \
66136 int32x2_t __s0_255 = __p0_255; \
66137 int64x2_t __s1_255 = __p1_255; \
66138 int32x2_t __rev0_255; __rev0_255 = __builtin_shufflevector(__s0_255, __s0_255, 1, 0); \
66139 int64x2_t __rev1_255; __rev1_255 = __builtin_shufflevector(__s1_255, __s1_255, 1, 0); \
66140 int32x4_t __ret_255; \
66141 __ret_255 = (int32x4_t)(__noswap_vcombine_s32((int32x2_t)(__rev0_255), (int32x2_t)(__noswap_vshrn_n_s64(__rev1_255, __p2_255)))); \
66142 __ret_255 = __builtin_shufflevector(__ret_255, __ret_255, 3, 2, 1, 0); \
66143 __ret_255; \
66144 })
66145 #endif
66146
66147 #ifdef __LITTLE_ENDIAN__
66148 #define vshrn_high_n_s16(__p0_256, __p1_256, __p2_256) __extension__ ({ \
66149 int8x8_t __s0_256 = __p0_256; \
66150 int16x8_t __s1_256 = __p1_256; \
66151 int8x16_t __ret_256; \
66152 __ret_256 = (int8x16_t)(vcombine_s8((int8x8_t)(__s0_256), (int8x8_t)(vshrn_n_s16(__s1_256, __p2_256)))); \
66153 __ret_256; \
66154 })
66155 #else
66156 #define vshrn_high_n_s16(__p0_257, __p1_257, __p2_257) __extension__ ({ \
66157 int8x8_t __s0_257 = __p0_257; \
66158 int16x8_t __s1_257 = __p1_257; \
66159 int8x8_t __rev0_257; __rev0_257 = __builtin_shufflevector(__s0_257, __s0_257, 7, 6, 5, 4, 3, 2, 1, 0); \
66160 int16x8_t __rev1_257; __rev1_257 = __builtin_shufflevector(__s1_257, __s1_257, 7, 6, 5, 4, 3, 2, 1, 0); \
66161 int8x16_t __ret_257; \
66162 __ret_257 = (int8x16_t)(__noswap_vcombine_s8((int8x8_t)(__rev0_257), (int8x8_t)(__noswap_vshrn_n_s16(__rev1_257, __p2_257)))); \
66163 __ret_257 = __builtin_shufflevector(__ret_257, __ret_257, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
66164 __ret_257; \
66165 })
66166 #endif
66167
66168 #ifdef __LITTLE_ENDIAN__
66169 #define vslid_n_u64(__p0, __p1, __p2) __extension__ ({ \
66170 uint64_t __s0 = __p0; \
66171 uint64_t __s1 = __p1; \
66172 uint64_t __ret; \
66173 __ret = (uint64_t) __builtin_neon_vslid_n_u64(__s0, __s1, __p2); \
66174 __ret; \
66175 })
66176 #else
66177 #define vslid_n_u64(__p0, __p1, __p2) __extension__ ({ \
66178 uint64_t __s0 = __p0; \
66179 uint64_t __s1 = __p1; \
66180 uint64_t __ret; \
66181 __ret = (uint64_t) __builtin_neon_vslid_n_u64(__s0, __s1, __p2); \
66182 __ret; \
66183 })
66184 #endif
66185
66186 #ifdef __LITTLE_ENDIAN__
66187 #define vslid_n_s64(__p0, __p1, __p2) __extension__ ({ \
66188 int64_t __s0 = __p0; \
66189 int64_t __s1 = __p1; \
66190 int64_t __ret; \
66191 __ret = (int64_t) __builtin_neon_vslid_n_s64(__s0, __s1, __p2); \
66192 __ret; \
66193 })
66194 #else
66195 #define vslid_n_s64(__p0, __p1, __p2) __extension__ ({ \
66196 int64_t __s0 = __p0; \
66197 int64_t __s1 = __p1; \
66198 int64_t __ret; \
66199 __ret = (int64_t) __builtin_neon_vslid_n_s64(__s0, __s1, __p2); \
66200 __ret; \
66201 })
66202 #endif
66203
66204 #ifdef __LITTLE_ENDIAN__
66205 #define vsli_n_p64(__p0, __p1, __p2) __extension__ ({ \
66206 poly64x1_t __s0 = __p0; \
66207 poly64x1_t __s1 = __p1; \
66208 poly64x1_t __ret; \
66209 __ret = (poly64x1_t) __builtin_neon_vsli_n_v((int8x8_t)__s0, (int8x8_t)__s1, __p2, 6); \
66210 __ret; \
66211 })
66212 #else
66213 #define vsli_n_p64(__p0, __p1, __p2) __extension__ ({ \
66214 poly64x1_t __s0 = __p0; \
66215 poly64x1_t __s1 = __p1; \
66216 poly64x1_t __ret; \
66217 __ret = (poly64x1_t) __builtin_neon_vsli_n_v((int8x8_t)__s0, (int8x8_t)__s1, __p2, 6); \
66218 __ret; \
66219 })
66220 #endif
66221
66222 #ifdef __LITTLE_ENDIAN__
66223 #define vsliq_n_p64(__p0, __p1, __p2) __extension__ ({ \
66224 poly64x2_t __s0 = __p0; \
66225 poly64x2_t __s1 = __p1; \
66226 poly64x2_t __ret; \
66227 __ret = (poly64x2_t) __builtin_neon_vsliq_n_v((int8x16_t)__s0, (int8x16_t)__s1, __p2, 38); \
66228 __ret; \
66229 })
66230 #else
66231 #define vsliq_n_p64(__p0, __p1, __p2) __extension__ ({ \
66232 poly64x2_t __s0 = __p0; \
66233 poly64x2_t __s1 = __p1; \
66234 poly64x2_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 1, 0); \
66235 poly64x2_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 1, 0); \
66236 poly64x2_t __ret; \
66237 __ret = (poly64x2_t) __builtin_neon_vsliq_n_v((int8x16_t)__rev0, (int8x16_t)__rev1, __p2, 38); \
66238 __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \
66239 __ret; \
66240 })
66241 #endif
66242
66243 #ifdef __LITTLE_ENDIAN__
66244 __ai uint8_t vsqaddb_u8(uint8_t __p0, uint8_t __p1) {
66245 uint8_t __ret;
66246 __ret = (uint8_t) __builtin_neon_vsqaddb_u8(__p0, __p1);
66247 return __ret;
66248 }
66249 #else
66250 __ai uint8_t vsqaddb_u8(uint8_t __p0, uint8_t __p1) {
66251 uint8_t __ret;
66252 __ret = (uint8_t) __builtin_neon_vsqaddb_u8(__p0, __p1);
66253 return __ret;
66254 }
66255 #endif
66256
66257 #ifdef __LITTLE_ENDIAN__
66258 __ai uint32_t vsqadds_u32(uint32_t __p0, uint32_t __p1) {
66259 uint32_t __ret;
66260 __ret = (uint32_t) __builtin_neon_vsqadds_u32(__p0, __p1);
66261 return __ret;
66262 }
66263 #else
66264 __ai uint32_t vsqadds_u32(uint32_t __p0, uint32_t __p1) {
66265 uint32_t __ret;
66266 __ret = (uint32_t) __builtin_neon_vsqadds_u32(__p0, __p1);
66267 return __ret;
66268 }
66269 #endif
66270
66271 #ifdef __LITTLE_ENDIAN__
66272 __ai uint64_t vsqaddd_u64(uint64_t __p0, uint64_t __p1) {
66273 uint64_t __ret;
66274 __ret = (uint64_t) __builtin_neon_vsqaddd_u64(__p0, __p1);
66275 return __ret;
66276 }
66277 #else
66278 __ai uint64_t vsqaddd_u64(uint64_t __p0, uint64_t __p1) {
66279 uint64_t __ret;
66280 __ret = (uint64_t) __builtin_neon_vsqaddd_u64(__p0, __p1);
66281 return __ret;
66282 }
66283 #endif
66284
66285 #ifdef __LITTLE_ENDIAN__
66286 __ai uint16_t vsqaddh_u16(uint16_t __p0, uint16_t __p1) {
66287 uint16_t __ret;
66288 __ret = (uint16_t) __builtin_neon_vsqaddh_u16(__p0, __p1);
66289 return __ret;
66290 }
66291 #else
66292 __ai uint16_t vsqaddh_u16(uint16_t __p0, uint16_t __p1) {
66293 uint16_t __ret;
66294 __ret = (uint16_t) __builtin_neon_vsqaddh_u16(__p0, __p1);
66295 return __ret;
66296 }
66297 #endif
66298
66299 #ifdef __LITTLE_ENDIAN__
66300 __ai uint8x16_t vsqaddq_u8(uint8x16_t __p0, uint8x16_t __p1) {
66301 uint8x16_t __ret;
66302 __ret = (uint8x16_t) __builtin_neon_vsqaddq_v((int8x16_t)__p0, (int8x16_t)__p1, 48);
66303 return __ret;
66304 }
66305 #else
66306 __ai uint8x16_t vsqaddq_u8(uint8x16_t __p0, uint8x16_t __p1) {
66307 uint8x16_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
66308 uint8x16_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
66309 uint8x16_t __ret;
66310 __ret = (uint8x16_t) __builtin_neon_vsqaddq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 48);
66311 __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
66312 return __ret;
66313 }
66314 #endif
66315
66316 #ifdef __LITTLE_ENDIAN__
66317 __ai uint32x4_t vsqaddq_u32(uint32x4_t __p0, uint32x4_t __p1) {
66318 uint32x4_t __ret;
66319 __ret = (uint32x4_t) __builtin_neon_vsqaddq_v((int8x16_t)__p0, (int8x16_t)__p1, 50);
66320 return __ret;
66321 }
66322 #else
66323 __ai uint32x4_t vsqaddq_u32(uint32x4_t __p0, uint32x4_t __p1) {
66324 uint32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
66325 uint32x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
66326 uint32x4_t __ret;
66327 __ret = (uint32x4_t) __builtin_neon_vsqaddq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 50);
66328 __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
66329 return __ret;
66330 }
66331 #endif
66332
66333 #ifdef __LITTLE_ENDIAN__
66334 __ai uint64x2_t vsqaddq_u64(uint64x2_t __p0, uint64x2_t __p1) {
66335 uint64x2_t __ret;
66336 __ret = (uint64x2_t) __builtin_neon_vsqaddq_v((int8x16_t)__p0, (int8x16_t)__p1, 51);
66337 return __ret;
66338 }
66339 #else
66340 __ai uint64x2_t vsqaddq_u64(uint64x2_t __p0, uint64x2_t __p1) {
66341 uint64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
66342 uint64x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
66343 uint64x2_t __ret;
66344 __ret = (uint64x2_t) __builtin_neon_vsqaddq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 51);
66345 __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
66346 return __ret;
66347 }
66348 #endif
66349
66350 #ifdef __LITTLE_ENDIAN__
66351 __ai uint16x8_t vsqaddq_u16(uint16x8_t __p0, uint16x8_t __p1) {
66352 uint16x8_t __ret;
66353 __ret = (uint16x8_t) __builtin_neon_vsqaddq_v((int8x16_t)__p0, (int8x16_t)__p1, 49);
66354 return __ret;
66355 }
66356 #else
66357 __ai uint16x8_t vsqaddq_u16(uint16x8_t __p0, uint16x8_t __p1) {
66358 uint16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
66359 uint16x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
66360 uint16x8_t __ret;
66361 __ret = (uint16x8_t) __builtin_neon_vsqaddq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 49);
66362 __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
66363 return __ret;
66364 }
66365 #endif
66366
66367 #ifdef __LITTLE_ENDIAN__
66368 __ai uint8x8_t vsqadd_u8(uint8x8_t __p0, uint8x8_t __p1) {
66369 uint8x8_t __ret;
66370 __ret = (uint8x8_t) __builtin_neon_vsqadd_v((int8x8_t)__p0, (int8x8_t)__p1, 16);
66371 return __ret;
66372 }
66373 #else
66374 __ai uint8x8_t vsqadd_u8(uint8x8_t __p0, uint8x8_t __p1) {
66375 uint8x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
66376 uint8x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
66377 uint8x8_t __ret;
66378 __ret = (uint8x8_t) __builtin_neon_vsqadd_v((int8x8_t)__rev0, (int8x8_t)__rev1, 16);
66379 __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
66380 return __ret;
66381 }
66382 #endif
66383
66384 #ifdef __LITTLE_ENDIAN__
66385 __ai uint32x2_t vsqadd_u32(uint32x2_t __p0, uint32x2_t __p1) {
66386 uint32x2_t __ret;
66387 __ret = (uint32x2_t) __builtin_neon_vsqadd_v((int8x8_t)__p0, (int8x8_t)__p1, 18);
66388 return __ret;
66389 }
66390 #else
66391 __ai uint32x2_t vsqadd_u32(uint32x2_t __p0, uint32x2_t __p1) {
66392 uint32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
66393 uint32x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
66394 uint32x2_t __ret;
66395 __ret = (uint32x2_t) __builtin_neon_vsqadd_v((int8x8_t)__rev0, (int8x8_t)__rev1, 18);
66396 __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
66397 return __ret;
66398 }
66399 #endif
66400
66401 #ifdef __LITTLE_ENDIAN__
66402 __ai uint64x1_t vsqadd_u64(uint64x1_t __p0, uint64x1_t __p1) {
66403 uint64x1_t __ret;
66404 __ret = (uint64x1_t) __builtin_neon_vsqadd_v((int8x8_t)__p0, (int8x8_t)__p1, 19);
66405 return __ret;
66406 }
66407 #else
66408 __ai uint64x1_t vsqadd_u64(uint64x1_t __p0, uint64x1_t __p1) {
66409 uint64x1_t __ret;
66410 __ret = (uint64x1_t) __builtin_neon_vsqadd_v((int8x8_t)__p0, (int8x8_t)__p1, 19);
66411 return __ret;
66412 }
66413 #endif
66414
66415 #ifdef __LITTLE_ENDIAN__
66416 __ai uint16x4_t vsqadd_u16(uint16x4_t __p0, uint16x4_t __p1) {
66417 uint16x4_t __ret;
66418 __ret = (uint16x4_t) __builtin_neon_vsqadd_v((int8x8_t)__p0, (int8x8_t)__p1, 17);
66419 return __ret;
66420 }
66421 #else
66422 __ai uint16x4_t vsqadd_u16(uint16x4_t __p0, uint16x4_t __p1) {
66423 uint16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
66424 uint16x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
66425 uint16x4_t __ret;
66426 __ret = (uint16x4_t) __builtin_neon_vsqadd_v((int8x8_t)__rev0, (int8x8_t)__rev1, 17);
66427 __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
66428 return __ret;
66429 }
66430 #endif
66431
66432 #ifdef __LITTLE_ENDIAN__
66433 __ai float64x2_t vsqrtq_f64(float64x2_t __p0) {
66434 float64x2_t __ret;
66435 __ret = (float64x2_t) __builtin_neon_vsqrtq_v((int8x16_t)__p0, 42);
66436 return __ret;
66437 }
66438 #else
66439 __ai float64x2_t vsqrtq_f64(float64x2_t __p0) {
66440 float64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
66441 float64x2_t __ret;
66442 __ret = (float64x2_t) __builtin_neon_vsqrtq_v((int8x16_t)__rev0, 42);
66443 __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
66444 return __ret;
66445 }
66446 #endif
66447
66448 #ifdef __LITTLE_ENDIAN__
66449 __ai float32x4_t vsqrtq_f32(float32x4_t __p0) {
66450 float32x4_t __ret;
66451 __ret = (float32x4_t) __builtin_neon_vsqrtq_v((int8x16_t)__p0, 41);
66452 return __ret;
66453 }
66454 #else
66455 __ai float32x4_t vsqrtq_f32(float32x4_t __p0) {
66456 float32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
66457 float32x4_t __ret;
66458 __ret = (float32x4_t) __builtin_neon_vsqrtq_v((int8x16_t)__rev0, 41);
66459 __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
66460 return __ret;
66461 }
66462 #endif
66463
66464 #ifdef __LITTLE_ENDIAN__
66465 __ai float64x1_t vsqrt_f64(float64x1_t __p0) {
66466 float64x1_t __ret;
66467 __ret = (float64x1_t) __builtin_neon_vsqrt_v((int8x8_t)__p0, 10);
66468 return __ret;
66469 }
66470 #else
66471 __ai float64x1_t vsqrt_f64(float64x1_t __p0) {
66472 float64x1_t __ret;
66473 __ret = (float64x1_t) __builtin_neon_vsqrt_v((int8x8_t)__p0, 10);
66474 return __ret;
66475 }
66476 #endif
66477
66478 #ifdef __LITTLE_ENDIAN__
66479 __ai float32x2_t vsqrt_f32(float32x2_t __p0) {
66480 float32x2_t __ret;
66481 __ret = (float32x2_t) __builtin_neon_vsqrt_v((int8x8_t)__p0, 9);
66482 return __ret;
66483 }
66484 #else
66485 __ai float32x2_t vsqrt_f32(float32x2_t __p0) {
66486 float32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
66487 float32x2_t __ret;
66488 __ret = (float32x2_t) __builtin_neon_vsqrt_v((int8x8_t)__rev0, 9);
66489 __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
66490 return __ret;
66491 }
66492 #endif
66493
66494 #ifdef __LITTLE_ENDIAN__
66495 #define vsrad_n_u64(__p0, __p1, __p2) __extension__ ({ \
66496 uint64_t __s0 = __p0; \
66497 uint64_t __s1 = __p1; \
66498 uint64_t __ret; \
66499 __ret = (uint64_t) __builtin_neon_vsrad_n_u64(__s0, __s1, __p2); \
66500 __ret; \
66501 })
66502 #else
66503 #define vsrad_n_u64(__p0, __p1, __p2) __extension__ ({ \
66504 uint64_t __s0 = __p0; \
66505 uint64_t __s1 = __p1; \
66506 uint64_t __ret; \
66507 __ret = (uint64_t) __builtin_neon_vsrad_n_u64(__s0, __s1, __p2); \
66508 __ret; \
66509 })
66510 #endif
66511
66512 #ifdef __LITTLE_ENDIAN__
66513 #define vsrad_n_s64(__p0, __p1, __p2) __extension__ ({ \
66514 int64_t __s0 = __p0; \
66515 int64_t __s1 = __p1; \
66516 int64_t __ret; \
66517 __ret = (int64_t) __builtin_neon_vsrad_n_s64(__s0, __s1, __p2); \
66518 __ret; \
66519 })
66520 #else
66521 #define vsrad_n_s64(__p0, __p1, __p2) __extension__ ({ \
66522 int64_t __s0 = __p0; \
66523 int64_t __s1 = __p1; \
66524 int64_t __ret; \
66525 __ret = (int64_t) __builtin_neon_vsrad_n_s64(__s0, __s1, __p2); \
66526 __ret; \
66527 })
66528 #endif
66529
66530 #ifdef __LITTLE_ENDIAN__
66531 #define vsrid_n_u64(__p0, __p1, __p2) __extension__ ({ \
66532 uint64_t __s0 = __p0; \
66533 uint64_t __s1 = __p1; \
66534 uint64_t __ret; \
66535 __ret = (uint64_t) __builtin_neon_vsrid_n_u64(__s0, __s1, __p2); \
66536 __ret; \
66537 })
66538 #else
66539 #define vsrid_n_u64(__p0, __p1, __p2) __extension__ ({ \
66540 uint64_t __s0 = __p0; \
66541 uint64_t __s1 = __p1; \
66542 uint64_t __ret; \
66543 __ret = (uint64_t) __builtin_neon_vsrid_n_u64(__s0, __s1, __p2); \
66544 __ret; \
66545 })
66546 #endif
66547
66548 #ifdef __LITTLE_ENDIAN__
66549 #define vsrid_n_s64(__p0, __p1, __p2) __extension__ ({ \
66550 int64_t __s0 = __p0; \
66551 int64_t __s1 = __p1; \
66552 int64_t __ret; \
66553 __ret = (int64_t) __builtin_neon_vsrid_n_s64(__s0, __s1, __p2); \
66554 __ret; \
66555 })
66556 #else
66557 #define vsrid_n_s64(__p0, __p1, __p2) __extension__ ({ \
66558 int64_t __s0 = __p0; \
66559 int64_t __s1 = __p1; \
66560 int64_t __ret; \
66561 __ret = (int64_t) __builtin_neon_vsrid_n_s64(__s0, __s1, __p2); \
66562 __ret; \
66563 })
66564 #endif
66565
66566 #ifdef __LITTLE_ENDIAN__
66567 #define vsri_n_p64(__p0, __p1, __p2) __extension__ ({ \
66568 poly64x1_t __s0 = __p0; \
66569 poly64x1_t __s1 = __p1; \
66570 poly64x1_t __ret; \
66571 __ret = (poly64x1_t) __builtin_neon_vsri_n_v((int8x8_t)__s0, (int8x8_t)__s1, __p2, 6); \
66572 __ret; \
66573 })
66574 #else
66575 #define vsri_n_p64(__p0, __p1, __p2) __extension__ ({ \
66576 poly64x1_t __s0 = __p0; \
66577 poly64x1_t __s1 = __p1; \
66578 poly64x1_t __ret; \
66579 __ret = (poly64x1_t) __builtin_neon_vsri_n_v((int8x8_t)__s0, (int8x8_t)__s1, __p2, 6); \
66580 __ret; \
66581 })
66582 #endif
66583
66584 #ifdef __LITTLE_ENDIAN__
66585 #define vsriq_n_p64(__p0, __p1, __p2) __extension__ ({ \
66586 poly64x2_t __s0 = __p0; \
66587 poly64x2_t __s1 = __p1; \
66588 poly64x2_t __ret; \
66589 __ret = (poly64x2_t) __builtin_neon_vsriq_n_v((int8x16_t)__s0, (int8x16_t)__s1, __p2, 38); \
66590 __ret; \
66591 })
66592 #else
66593 #define vsriq_n_p64(__p0, __p1, __p2) __extension__ ({ \
66594 poly64x2_t __s0 = __p0; \
66595 poly64x2_t __s1 = __p1; \
66596 poly64x2_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 1, 0); \
66597 poly64x2_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 1, 0); \
66598 poly64x2_t __ret; \
66599 __ret = (poly64x2_t) __builtin_neon_vsriq_n_v((int8x16_t)__rev0, (int8x16_t)__rev1, __p2, 38); \
66600 __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \
66601 __ret; \
66602 })
66603 #endif
66604
66605 #ifdef __LITTLE_ENDIAN__
66606 #define vst1_p64(__p0, __p1) __extension__ ({ \
66607 poly64x1_t __s1 = __p1; \
66608 __builtin_neon_vst1_v(__p0, (int8x8_t)__s1, 6); \
66609 })
66610 #else
66611 #define vst1_p64(__p0, __p1) __extension__ ({ \
66612 poly64x1_t __s1 = __p1; \
66613 __builtin_neon_vst1_v(__p0, (int8x8_t)__s1, 6); \
66614 })
66615 #endif
66616
66617 #ifdef __LITTLE_ENDIAN__
66618 #define vst1q_p64(__p0, __p1) __extension__ ({ \
66619 poly64x2_t __s1 = __p1; \
66620 __builtin_neon_vst1q_v(__p0, (int8x16_t)__s1, 38); \
66621 })
66622 #else
66623 #define vst1q_p64(__p0, __p1) __extension__ ({ \
66624 poly64x2_t __s1 = __p1; \
66625 poly64x2_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 1, 0); \
66626 __builtin_neon_vst1q_v(__p0, (int8x16_t)__rev1, 38); \
66627 })
66628 #endif
66629
66630 #ifdef __LITTLE_ENDIAN__
66631 #define vst1q_f64(__p0, __p1) __extension__ ({ \
66632 float64x2_t __s1 = __p1; \
66633 __builtin_neon_vst1q_v(__p0, (int8x16_t)__s1, 42); \
66634 })
66635 #else
66636 #define vst1q_f64(__p0, __p1) __extension__ ({ \
66637 float64x2_t __s1 = __p1; \
66638 float64x2_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 1, 0); \
66639 __builtin_neon_vst1q_v(__p0, (int8x16_t)__rev1, 42); \
66640 })
66641 #endif
66642
66643 #ifdef __LITTLE_ENDIAN__
66644 #define vst1_f64(__p0, __p1) __extension__ ({ \
66645 float64x1_t __s1 = __p1; \
66646 __builtin_neon_vst1_v(__p0, (int8x8_t)__s1, 10); \
66647 })
66648 #else
66649 #define vst1_f64(__p0, __p1) __extension__ ({ \
66650 float64x1_t __s1 = __p1; \
66651 __builtin_neon_vst1_v(__p0, (int8x8_t)__s1, 10); \
66652 })
66653 #endif
66654
66655 #ifdef __LITTLE_ENDIAN__
66656 #define vst1_lane_p64(__p0, __p1, __p2) __extension__ ({ \
66657 poly64x1_t __s1 = __p1; \
66658 __builtin_neon_vst1_lane_v(__p0, (int8x8_t)__s1, __p2, 6); \
66659 })
66660 #else
66661 #define vst1_lane_p64(__p0, __p1, __p2) __extension__ ({ \
66662 poly64x1_t __s1 = __p1; \
66663 __builtin_neon_vst1_lane_v(__p0, (int8x8_t)__s1, __p2, 6); \
66664 })
66665 #endif
66666
66667 #ifdef __LITTLE_ENDIAN__
66668 #define vst1q_lane_p64(__p0, __p1, __p2) __extension__ ({ \
66669 poly64x2_t __s1 = __p1; \
66670 __builtin_neon_vst1q_lane_v(__p0, (int8x16_t)__s1, __p2, 38); \
66671 })
66672 #else
66673 #define vst1q_lane_p64(__p0, __p1, __p2) __extension__ ({ \
66674 poly64x2_t __s1 = __p1; \
66675 poly64x2_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 1, 0); \
66676 __builtin_neon_vst1q_lane_v(__p0, (int8x16_t)__rev1, __p2, 38); \
66677 })
66678 #endif
66679
66680 #ifdef __LITTLE_ENDIAN__
66681 #define vst1q_lane_f64(__p0, __p1, __p2) __extension__ ({ \
66682 float64x2_t __s1 = __p1; \
66683 __builtin_neon_vst1q_lane_v(__p0, (int8x16_t)__s1, __p2, 42); \
66684 })
66685 #else
66686 #define vst1q_lane_f64(__p0, __p1, __p2) __extension__ ({ \
66687 float64x2_t __s1 = __p1; \
66688 float64x2_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 1, 0); \
66689 __builtin_neon_vst1q_lane_v(__p0, (int8x16_t)__rev1, __p2, 42); \
66690 })
66691 #endif
66692
66693 #ifdef __LITTLE_ENDIAN__
66694 #define vst1_lane_f64(__p0, __p1, __p2) __extension__ ({ \
66695 float64x1_t __s1 = __p1; \
66696 __builtin_neon_vst1_lane_v(__p0, (int8x8_t)__s1, __p2, 10); \
66697 })
66698 #else
66699 #define vst1_lane_f64(__p0, __p1, __p2) __extension__ ({ \
66700 float64x1_t __s1 = __p1; \
66701 __builtin_neon_vst1_lane_v(__p0, (int8x8_t)__s1, __p2, 10); \
66702 })
66703 #endif
66704
66705 #ifdef __LITTLE_ENDIAN__
66706 #define vst1_p8_x2(__p0, __p1) __extension__ ({ \
66707 poly8x8x2_t __s1 = __p1; \
66708 __builtin_neon_vst1_x2_v(__p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], 4); \
66709 })
66710 #else
66711 #define vst1_p8_x2(__p0, __p1) __extension__ ({ \
66712 poly8x8x2_t __s1 = __p1; \
66713 poly8x8x2_t __rev1; \
66714 __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \
66715 __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \
66716 __builtin_neon_vst1_x2_v(__p0, (int8x8_t)__rev1.val[0], (int8x8_t)__rev1.val[1], 4); \
66717 })
66718 #endif
66719
66720 #ifdef __LITTLE_ENDIAN__
66721 #define vst1_p64_x2(__p0, __p1) __extension__ ({ \
66722 poly64x1x2_t __s1 = __p1; \
66723 __builtin_neon_vst1_x2_v(__p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], 6); \
66724 })
66725 #else
66726 #define vst1_p64_x2(__p0, __p1) __extension__ ({ \
66727 poly64x1x2_t __s1 = __p1; \
66728 __builtin_neon_vst1_x2_v(__p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], 6); \
66729 })
66730 #endif
66731
66732 #ifdef __LITTLE_ENDIAN__
66733 #define vst1_p16_x2(__p0, __p1) __extension__ ({ \
66734 poly16x4x2_t __s1 = __p1; \
66735 __builtin_neon_vst1_x2_v(__p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], 5); \
66736 })
66737 #else
66738 #define vst1_p16_x2(__p0, __p1) __extension__ ({ \
66739 poly16x4x2_t __s1 = __p1; \
66740 poly16x4x2_t __rev1; \
66741 __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 3, 2, 1, 0); \
66742 __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 3, 2, 1, 0); \
66743 __builtin_neon_vst1_x2_v(__p0, (int8x8_t)__rev1.val[0], (int8x8_t)__rev1.val[1], 5); \
66744 })
66745 #endif
66746
66747 #ifdef __LITTLE_ENDIAN__
66748 #define vst1q_p8_x2(__p0, __p1) __extension__ ({ \
66749 poly8x16x2_t __s1 = __p1; \
66750 __builtin_neon_vst1q_x2_v(__p0, (int8x16_t)__s1.val[0], (int8x16_t)__s1.val[1], 36); \
66751 })
66752 #else
66753 #define vst1q_p8_x2(__p0, __p1) __extension__ ({ \
66754 poly8x16x2_t __s1 = __p1; \
66755 poly8x16x2_t __rev1; \
66756 __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
66757 __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
66758 __builtin_neon_vst1q_x2_v(__p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], 36); \
66759 })
66760 #endif
66761
66762 #ifdef __LITTLE_ENDIAN__
66763 #define vst1q_p64_x2(__p0, __p1) __extension__ ({ \
66764 poly64x2x2_t __s1 = __p1; \
66765 __builtin_neon_vst1q_x2_v(__p0, (int8x16_t)__s1.val[0], (int8x16_t)__s1.val[1], 38); \
66766 })
66767 #else
66768 #define vst1q_p64_x2(__p0, __p1) __extension__ ({ \
66769 poly64x2x2_t __s1 = __p1; \
66770 poly64x2x2_t __rev1; \
66771 __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 1, 0); \
66772 __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 1, 0); \
66773 __builtin_neon_vst1q_x2_v(__p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], 38); \
66774 })
66775 #endif
66776
66777 #ifdef __LITTLE_ENDIAN__
66778 #define vst1q_p16_x2(__p0, __p1) __extension__ ({ \
66779 poly16x8x2_t __s1 = __p1; \
66780 __builtin_neon_vst1q_x2_v(__p0, (int8x16_t)__s1.val[0], (int8x16_t)__s1.val[1], 37); \
66781 })
66782 #else
66783 #define vst1q_p16_x2(__p0, __p1) __extension__ ({ \
66784 poly16x8x2_t __s1 = __p1; \
66785 poly16x8x2_t __rev1; \
66786 __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \
66787 __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \
66788 __builtin_neon_vst1q_x2_v(__p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], 37); \
66789 })
66790 #endif
66791
66792 #ifdef __LITTLE_ENDIAN__
66793 #define vst1q_u8_x2(__p0, __p1) __extension__ ({ \
66794 uint8x16x2_t __s1 = __p1; \
66795 __builtin_neon_vst1q_x2_v(__p0, (int8x16_t)__s1.val[0], (int8x16_t)__s1.val[1], 48); \
66796 })
66797 #else
66798 #define vst1q_u8_x2(__p0, __p1) __extension__ ({ \
66799 uint8x16x2_t __s1 = __p1; \
66800 uint8x16x2_t __rev1; \
66801 __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
66802 __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
66803 __builtin_neon_vst1q_x2_v(__p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], 48); \
66804 })
66805 #endif
66806
66807 #ifdef __LITTLE_ENDIAN__
66808 #define vst1q_u32_x2(__p0, __p1) __extension__ ({ \
66809 uint32x4x2_t __s1 = __p1; \
66810 __builtin_neon_vst1q_x2_v(__p0, (int8x16_t)__s1.val[0], (int8x16_t)__s1.val[1], 50); \
66811 })
66812 #else
66813 #define vst1q_u32_x2(__p0, __p1) __extension__ ({ \
66814 uint32x4x2_t __s1 = __p1; \
66815 uint32x4x2_t __rev1; \
66816 __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 3, 2, 1, 0); \
66817 __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 3, 2, 1, 0); \
66818 __builtin_neon_vst1q_x2_v(__p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], 50); \
66819 })
66820 #endif
66821
66822 #ifdef __LITTLE_ENDIAN__
66823 #define vst1q_u64_x2(__p0, __p1) __extension__ ({ \
66824 uint64x2x2_t __s1 = __p1; \
66825 __builtin_neon_vst1q_x2_v(__p0, (int8x16_t)__s1.val[0], (int8x16_t)__s1.val[1], 51); \
66826 })
66827 #else
66828 #define vst1q_u64_x2(__p0, __p1) __extension__ ({ \
66829 uint64x2x2_t __s1 = __p1; \
66830 uint64x2x2_t __rev1; \
66831 __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 1, 0); \
66832 __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 1, 0); \
66833 __builtin_neon_vst1q_x2_v(__p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], 51); \
66834 })
66835 #endif
66836
66837 #ifdef __LITTLE_ENDIAN__
66838 #define vst1q_u16_x2(__p0, __p1) __extension__ ({ \
66839 uint16x8x2_t __s1 = __p1; \
66840 __builtin_neon_vst1q_x2_v(__p0, (int8x16_t)__s1.val[0], (int8x16_t)__s1.val[1], 49); \
66841 })
66842 #else
66843 #define vst1q_u16_x2(__p0, __p1) __extension__ ({ \
66844 uint16x8x2_t __s1 = __p1; \
66845 uint16x8x2_t __rev1; \
66846 __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \
66847 __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \
66848 __builtin_neon_vst1q_x2_v(__p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], 49); \
66849 })
66850 #endif
66851
66852 #ifdef __LITTLE_ENDIAN__
66853 #define vst1q_s8_x2(__p0, __p1) __extension__ ({ \
66854 int8x16x2_t __s1 = __p1; \
66855 __builtin_neon_vst1q_x2_v(__p0, (int8x16_t)__s1.val[0], (int8x16_t)__s1.val[1], 32); \
66856 })
66857 #else
66858 #define vst1q_s8_x2(__p0, __p1) __extension__ ({ \
66859 int8x16x2_t __s1 = __p1; \
66860 int8x16x2_t __rev1; \
66861 __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
66862 __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
66863 __builtin_neon_vst1q_x2_v(__p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], 32); \
66864 })
66865 #endif
66866
66867 #ifdef __LITTLE_ENDIAN__
66868 #define vst1q_f64_x2(__p0, __p1) __extension__ ({ \
66869 float64x2x2_t __s1 = __p1; \
66870 __builtin_neon_vst1q_x2_v(__p0, __s1.val[0], __s1.val[1], 42); \
66871 })
66872 #else
66873 #define vst1q_f64_x2(__p0, __p1) __extension__ ({ \
66874 float64x2x2_t __s1 = __p1; \
66875 float64x2x2_t __rev1; \
66876 __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 1, 0); \
66877 __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 1, 0); \
66878 __builtin_neon_vst1q_x2_v(__p0, __rev1.val[0], __rev1.val[1], 42); \
66879 })
66880 #endif
66881
66882 #ifdef __LITTLE_ENDIAN__
66883 #define vst1q_f32_x2(__p0, __p1) __extension__ ({ \
66884 float32x4x2_t __s1 = __p1; \
66885 __builtin_neon_vst1q_x2_v(__p0, __s1.val[0], __s1.val[1], 41); \
66886 })
66887 #else
66888 #define vst1q_f32_x2(__p0, __p1) __extension__ ({ \
66889 float32x4x2_t __s1 = __p1; \
66890 float32x4x2_t __rev1; \
66891 __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 3, 2, 1, 0); \
66892 __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 3, 2, 1, 0); \
66893 __builtin_neon_vst1q_x2_v(__p0, __rev1.val[0], __rev1.val[1], 41); \
66894 })
66895 #endif
66896
66897 #ifdef __LITTLE_ENDIAN__
66898 #define vst1q_f16_x2(__p0, __p1) __extension__ ({ \
66899 float16x8x2_t __s1 = __p1; \
66900 __builtin_neon_vst1q_x2_v(__p0, __s1.val[0], __s1.val[1], 40); \
66901 })
66902 #else
66903 #define vst1q_f16_x2(__p0, __p1) __extension__ ({ \
66904 float16x8x2_t __s1 = __p1; \
66905 float16x8x2_t __rev1; \
66906 __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \
66907 __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \
66908 __builtin_neon_vst1q_x2_v(__p0, __rev1.val[0], __rev1.val[1], 40); \
66909 })
66910 #endif
66911
66912 #ifdef __LITTLE_ENDIAN__
66913 #define vst1q_s32_x2(__p0, __p1) __extension__ ({ \
66914 int32x4x2_t __s1 = __p1; \
66915 __builtin_neon_vst1q_x2_v(__p0, __s1.val[0], __s1.val[1], 34); \
66916 })
66917 #else
66918 #define vst1q_s32_x2(__p0, __p1) __extension__ ({ \
66919 int32x4x2_t __s1 = __p1; \
66920 int32x4x2_t __rev1; \
66921 __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 3, 2, 1, 0); \
66922 __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 3, 2, 1, 0); \
66923 __builtin_neon_vst1q_x2_v(__p0, __rev1.val[0], __rev1.val[1], 34); \
66924 })
66925 #endif
66926
66927 #ifdef __LITTLE_ENDIAN__
66928 #define vst1q_s64_x2(__p0, __p1) __extension__ ({ \
66929 int64x2x2_t __s1 = __p1; \
66930 __builtin_neon_vst1q_x2_v(__p0, __s1.val[0], __s1.val[1], 35); \
66931 })
66932 #else
66933 #define vst1q_s64_x2(__p0, __p1) __extension__ ({ \
66934 int64x2x2_t __s1 = __p1; \
66935 int64x2x2_t __rev1; \
66936 __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 1, 0); \
66937 __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 1, 0); \
66938 __builtin_neon_vst1q_x2_v(__p0, __rev1.val[0], __rev1.val[1], 35); \
66939 })
66940 #endif
66941
66942 #ifdef __LITTLE_ENDIAN__
66943 #define vst1q_s16_x2(__p0, __p1) __extension__ ({ \
66944 int16x8x2_t __s1 = __p1; \
66945 __builtin_neon_vst1q_x2_v(__p0, __s1.val[0], __s1.val[1], 33); \
66946 })
66947 #else
66948 #define vst1q_s16_x2(__p0, __p1) __extension__ ({ \
66949 int16x8x2_t __s1 = __p1; \
66950 int16x8x2_t __rev1; \
66951 __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \
66952 __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \
66953 __builtin_neon_vst1q_x2_v(__p0, __rev1.val[0], __rev1.val[1], 33); \
66954 })
66955 #endif
66956
66957 #ifdef __LITTLE_ENDIAN__
66958 #define vst1_u8_x2(__p0, __p1) __extension__ ({ \
66959 uint8x8x2_t __s1 = __p1; \
66960 __builtin_neon_vst1_x2_v(__p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], 16); \
66961 })
66962 #else
66963 #define vst1_u8_x2(__p0, __p1) __extension__ ({ \
66964 uint8x8x2_t __s1 = __p1; \
66965 uint8x8x2_t __rev1; \
66966 __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \
66967 __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \
66968 __builtin_neon_vst1_x2_v(__p0, (int8x8_t)__rev1.val[0], (int8x8_t)__rev1.val[1], 16); \
66969 })
66970 #endif
66971
66972 #ifdef __LITTLE_ENDIAN__
66973 #define vst1_u32_x2(__p0, __p1) __extension__ ({ \
66974 uint32x2x2_t __s1 = __p1; \
66975 __builtin_neon_vst1_x2_v(__p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], 18); \
66976 })
66977 #else
66978 #define vst1_u32_x2(__p0, __p1) __extension__ ({ \
66979 uint32x2x2_t __s1 = __p1; \
66980 uint32x2x2_t __rev1; \
66981 __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 1, 0); \
66982 __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 1, 0); \
66983 __builtin_neon_vst1_x2_v(__p0, (int8x8_t)__rev1.val[0], (int8x8_t)__rev1.val[1], 18); \
66984 })
66985 #endif
66986
66987 #ifdef __LITTLE_ENDIAN__
66988 #define vst1_u64_x2(__p0, __p1) __extension__ ({ \
66989 uint64x1x2_t __s1 = __p1; \
66990 __builtin_neon_vst1_x2_v(__p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], 19); \
66991 })
66992 #else
66993 #define vst1_u64_x2(__p0, __p1) __extension__ ({ \
66994 uint64x1x2_t __s1 = __p1; \
66995 __builtin_neon_vst1_x2_v(__p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], 19); \
66996 })
66997 #endif
66998
66999 #ifdef __LITTLE_ENDIAN__
67000 #define vst1_u16_x2(__p0, __p1) __extension__ ({ \
67001 uint16x4x2_t __s1 = __p1; \
67002 __builtin_neon_vst1_x2_v(__p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], 17); \
67003 })
67004 #else
67005 #define vst1_u16_x2(__p0, __p1) __extension__ ({ \
67006 uint16x4x2_t __s1 = __p1; \
67007 uint16x4x2_t __rev1; \
67008 __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 3, 2, 1, 0); \
67009 __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 3, 2, 1, 0); \
67010 __builtin_neon_vst1_x2_v(__p0, (int8x8_t)__rev1.val[0], (int8x8_t)__rev1.val[1], 17); \
67011 })
67012 #endif
67013
67014 #ifdef __LITTLE_ENDIAN__
67015 #define vst1_s8_x2(__p0, __p1) __extension__ ({ \
67016 int8x8x2_t __s1 = __p1; \
67017 __builtin_neon_vst1_x2_v(__p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], 0); \
67018 })
67019 #else
67020 #define vst1_s8_x2(__p0, __p1) __extension__ ({ \
67021 int8x8x2_t __s1 = __p1; \
67022 int8x8x2_t __rev1; \
67023 __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \
67024 __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \
67025 __builtin_neon_vst1_x2_v(__p0, (int8x8_t)__rev1.val[0], (int8x8_t)__rev1.val[1], 0); \
67026 })
67027 #endif
67028
67029 #ifdef __LITTLE_ENDIAN__
67030 #define vst1_f64_x2(__p0, __p1) __extension__ ({ \
67031 float64x1x2_t __s1 = __p1; \
67032 __builtin_neon_vst1_x2_v(__p0, __s1.val[0], __s1.val[1], 10); \
67033 })
67034 #else
67035 #define vst1_f64_x2(__p0, __p1) __extension__ ({ \
67036 float64x1x2_t __s1 = __p1; \
67037 __builtin_neon_vst1_x2_v(__p0, __s1.val[0], __s1.val[1], 10); \
67038 })
67039 #endif
67040
67041 #ifdef __LITTLE_ENDIAN__
67042 #define vst1_f32_x2(__p0, __p1) __extension__ ({ \
67043 float32x2x2_t __s1 = __p1; \
67044 __builtin_neon_vst1_x2_v(__p0, __s1.val[0], __s1.val[1], 9); \
67045 })
67046 #else
67047 #define vst1_f32_x2(__p0, __p1) __extension__ ({ \
67048 float32x2x2_t __s1 = __p1; \
67049 float32x2x2_t __rev1; \
67050 __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 1, 0); \
67051 __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 1, 0); \
67052 __builtin_neon_vst1_x2_v(__p0, __rev1.val[0], __rev1.val[1], 9); \
67053 })
67054 #endif
67055
67056 #ifdef __LITTLE_ENDIAN__
67057 #define vst1_f16_x2(__p0, __p1) __extension__ ({ \
67058 float16x4x2_t __s1 = __p1; \
67059 __builtin_neon_vst1_x2_v(__p0, __s1.val[0], __s1.val[1], 8); \
67060 })
67061 #else
67062 #define vst1_f16_x2(__p0, __p1) __extension__ ({ \
67063 float16x4x2_t __s1 = __p1; \
67064 float16x4x2_t __rev1; \
67065 __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 3, 2, 1, 0); \
67066 __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 3, 2, 1, 0); \
67067 __builtin_neon_vst1_x2_v(__p0, __rev1.val[0], __rev1.val[1], 8); \
67068 })
67069 #endif
67070
67071 #ifdef __LITTLE_ENDIAN__
67072 #define vst1_s32_x2(__p0, __p1) __extension__ ({ \
67073 int32x2x2_t __s1 = __p1; \
67074 __builtin_neon_vst1_x2_v(__p0, __s1.val[0], __s1.val[1], 2); \
67075 })
67076 #else
67077 #define vst1_s32_x2(__p0, __p1) __extension__ ({ \
67078 int32x2x2_t __s1 = __p1; \
67079 int32x2x2_t __rev1; \
67080 __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 1, 0); \
67081 __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 1, 0); \
67082 __builtin_neon_vst1_x2_v(__p0, __rev1.val[0], __rev1.val[1], 2); \
67083 })
67084 #endif
67085
67086 #ifdef __LITTLE_ENDIAN__
67087 #define vst1_s64_x2(__p0, __p1) __extension__ ({ \
67088 int64x1x2_t __s1 = __p1; \
67089 __builtin_neon_vst1_x2_v(__p0, __s1.val[0], __s1.val[1], 3); \
67090 })
67091 #else
67092 #define vst1_s64_x2(__p0, __p1) __extension__ ({ \
67093 int64x1x2_t __s1 = __p1; \
67094 __builtin_neon_vst1_x2_v(__p0, __s1.val[0], __s1.val[1], 3); \
67095 })
67096 #endif
67097
67098 #ifdef __LITTLE_ENDIAN__
67099 #define vst1_s16_x2(__p0, __p1) __extension__ ({ \
67100 int16x4x2_t __s1 = __p1; \
67101 __builtin_neon_vst1_x2_v(__p0, __s1.val[0], __s1.val[1], 1); \
67102 })
67103 #else
67104 #define vst1_s16_x2(__p0, __p1) __extension__ ({ \
67105 int16x4x2_t __s1 = __p1; \
67106 int16x4x2_t __rev1; \
67107 __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 3, 2, 1, 0); \
67108 __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 3, 2, 1, 0); \
67109 __builtin_neon_vst1_x2_v(__p0, __rev1.val[0], __rev1.val[1], 1); \
67110 })
67111 #endif
67112
67113 #ifdef __LITTLE_ENDIAN__
67114 #define vst1_p8_x3(__p0, __p1) __extension__ ({ \
67115 poly8x8x3_t __s1 = __p1; \
67116 __builtin_neon_vst1_x3_v(__p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], (int8x8_t)__s1.val[2], 4); \
67117 })
67118 #else
67119 #define vst1_p8_x3(__p0, __p1) __extension__ ({ \
67120 poly8x8x3_t __s1 = __p1; \
67121 poly8x8x3_t __rev1; \
67122 __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \
67123 __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \
67124 __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 7, 6, 5, 4, 3, 2, 1, 0); \
67125 __builtin_neon_vst1_x3_v(__p0, (int8x8_t)__rev1.val[0], (int8x8_t)__rev1.val[1], (int8x8_t)__rev1.val[2], 4); \
67126 })
67127 #endif
67128
67129 #ifdef __LITTLE_ENDIAN__
67130 #define vst1_p64_x3(__p0, __p1) __extension__ ({ \
67131 poly64x1x3_t __s1 = __p1; \
67132 __builtin_neon_vst1_x3_v(__p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], (int8x8_t)__s1.val[2], 6); \
67133 })
67134 #else
67135 #define vst1_p64_x3(__p0, __p1) __extension__ ({ \
67136 poly64x1x3_t __s1 = __p1; \
67137 __builtin_neon_vst1_x3_v(__p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], (int8x8_t)__s1.val[2], 6); \
67138 })
67139 #endif
67140
67141 #ifdef __LITTLE_ENDIAN__
67142 #define vst1_p16_x3(__p0, __p1) __extension__ ({ \
67143 poly16x4x3_t __s1 = __p1; \
67144 __builtin_neon_vst1_x3_v(__p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], (int8x8_t)__s1.val[2], 5); \
67145 })
67146 #else
67147 #define vst1_p16_x3(__p0, __p1) __extension__ ({ \
67148 poly16x4x3_t __s1 = __p1; \
67149 poly16x4x3_t __rev1; \
67150 __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 3, 2, 1, 0); \
67151 __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 3, 2, 1, 0); \
67152 __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 3, 2, 1, 0); \
67153 __builtin_neon_vst1_x3_v(__p0, (int8x8_t)__rev1.val[0], (int8x8_t)__rev1.val[1], (int8x8_t)__rev1.val[2], 5); \
67154 })
67155 #endif
67156
67157 #ifdef __LITTLE_ENDIAN__
67158 #define vst1q_p8_x3(__p0, __p1) __extension__ ({ \
67159 poly8x16x3_t __s1 = __p1; \
67160 __builtin_neon_vst1q_x3_v(__p0, (int8x16_t)__s1.val[0], (int8x16_t)__s1.val[1], (int8x16_t)__s1.val[2], 36); \
67161 })
67162 #else
67163 #define vst1q_p8_x3(__p0, __p1) __extension__ ({ \
67164 poly8x16x3_t __s1 = __p1; \
67165 poly8x16x3_t __rev1; \
67166 __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
67167 __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
67168 __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
67169 __builtin_neon_vst1q_x3_v(__p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], (int8x16_t)__rev1.val[2], 36); \
67170 })
67171 #endif
67172
67173 #ifdef __LITTLE_ENDIAN__
67174 #define vst1q_p64_x3(__p0, __p1) __extension__ ({ \
67175 poly64x2x3_t __s1 = __p1; \
67176 __builtin_neon_vst1q_x3_v(__p0, (int8x16_t)__s1.val[0], (int8x16_t)__s1.val[1], (int8x16_t)__s1.val[2], 38); \
67177 })
67178 #else
67179 #define vst1q_p64_x3(__p0, __p1) __extension__ ({ \
67180 poly64x2x3_t __s1 = __p1; \
67181 poly64x2x3_t __rev1; \
67182 __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 1, 0); \
67183 __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 1, 0); \
67184 __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 1, 0); \
67185 __builtin_neon_vst1q_x3_v(__p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], (int8x16_t)__rev1.val[2], 38); \
67186 })
67187 #endif
67188
67189 #ifdef __LITTLE_ENDIAN__
67190 #define vst1q_p16_x3(__p0, __p1) __extension__ ({ \
67191 poly16x8x3_t __s1 = __p1; \
67192 __builtin_neon_vst1q_x3_v(__p0, (int8x16_t)__s1.val[0], (int8x16_t)__s1.val[1], (int8x16_t)__s1.val[2], 37); \
67193 })
67194 #else
67195 #define vst1q_p16_x3(__p0, __p1) __extension__ ({ \
67196 poly16x8x3_t __s1 = __p1; \
67197 poly16x8x3_t __rev1; \
67198 __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \
67199 __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \
67200 __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 7, 6, 5, 4, 3, 2, 1, 0); \
67201 __builtin_neon_vst1q_x3_v(__p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], (int8x16_t)__rev1.val[2], 37); \
67202 })
67203 #endif
67204
67205 #ifdef __LITTLE_ENDIAN__
67206 #define vst1q_u8_x3(__p0, __p1) __extension__ ({ \
67207 uint8x16x3_t __s1 = __p1; \
67208 __builtin_neon_vst1q_x3_v(__p0, (int8x16_t)__s1.val[0], (int8x16_t)__s1.val[1], (int8x16_t)__s1.val[2], 48); \
67209 })
67210 #else
67211 #define vst1q_u8_x3(__p0, __p1) __extension__ ({ \
67212 uint8x16x3_t __s1 = __p1; \
67213 uint8x16x3_t __rev1; \
67214 __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
67215 __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
67216 __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
67217 __builtin_neon_vst1q_x3_v(__p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], (int8x16_t)__rev1.val[2], 48); \
67218 })
67219 #endif
67220
67221 #ifdef __LITTLE_ENDIAN__
67222 #define vst1q_u32_x3(__p0, __p1) __extension__ ({ \
67223 uint32x4x3_t __s1 = __p1; \
67224 __builtin_neon_vst1q_x3_v(__p0, (int8x16_t)__s1.val[0], (int8x16_t)__s1.val[1], (int8x16_t)__s1.val[2], 50); \
67225 })
67226 #else
67227 #define vst1q_u32_x3(__p0, __p1) __extension__ ({ \
67228 uint32x4x3_t __s1 = __p1; \
67229 uint32x4x3_t __rev1; \
67230 __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 3, 2, 1, 0); \
67231 __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 3, 2, 1, 0); \
67232 __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 3, 2, 1, 0); \
67233 __builtin_neon_vst1q_x3_v(__p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], (int8x16_t)__rev1.val[2], 50); \
67234 })
67235 #endif
67236
67237 #ifdef __LITTLE_ENDIAN__
67238 #define vst1q_u64_x3(__p0, __p1) __extension__ ({ \
67239 uint64x2x3_t __s1 = __p1; \
67240 __builtin_neon_vst1q_x3_v(__p0, (int8x16_t)__s1.val[0], (int8x16_t)__s1.val[1], (int8x16_t)__s1.val[2], 51); \
67241 })
67242 #else
67243 #define vst1q_u64_x3(__p0, __p1) __extension__ ({ \
67244 uint64x2x3_t __s1 = __p1; \
67245 uint64x2x3_t __rev1; \
67246 __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 1, 0); \
67247 __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 1, 0); \
67248 __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 1, 0); \
67249 __builtin_neon_vst1q_x3_v(__p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], (int8x16_t)__rev1.val[2], 51); \
67250 })
67251 #endif
67252
67253 #ifdef __LITTLE_ENDIAN__
67254 #define vst1q_u16_x3(__p0, __p1) __extension__ ({ \
67255 uint16x8x3_t __s1 = __p1; \
67256 __builtin_neon_vst1q_x3_v(__p0, (int8x16_t)__s1.val[0], (int8x16_t)__s1.val[1], (int8x16_t)__s1.val[2], 49); \
67257 })
67258 #else
67259 #define vst1q_u16_x3(__p0, __p1) __extension__ ({ \
67260 uint16x8x3_t __s1 = __p1; \
67261 uint16x8x3_t __rev1; \
67262 __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \
67263 __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \
67264 __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 7, 6, 5, 4, 3, 2, 1, 0); \
67265 __builtin_neon_vst1q_x3_v(__p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], (int8x16_t)__rev1.val[2], 49); \
67266 })
67267 #endif
67268
67269 #ifdef __LITTLE_ENDIAN__
67270 #define vst1q_s8_x3(__p0, __p1) __extension__ ({ \
67271 int8x16x3_t __s1 = __p1; \
67272 __builtin_neon_vst1q_x3_v(__p0, (int8x16_t)__s1.val[0], (int8x16_t)__s1.val[1], (int8x16_t)__s1.val[2], 32); \
67273 })
67274 #else
67275 #define vst1q_s8_x3(__p0, __p1) __extension__ ({ \
67276 int8x16x3_t __s1 = __p1; \
67277 int8x16x3_t __rev1; \
67278 __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
67279 __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
67280 __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
67281 __builtin_neon_vst1q_x3_v(__p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], (int8x16_t)__rev1.val[2], 32); \
67282 })
67283 #endif
67284
67285 #ifdef __LITTLE_ENDIAN__
67286 #define vst1q_f64_x3(__p0, __p1) __extension__ ({ \
67287 float64x2x3_t __s1 = __p1; \
67288 __builtin_neon_vst1q_x3_v(__p0, __s1.val[0], __s1.val[1], __s1.val[2], 42); \
67289 })
67290 #else
67291 #define vst1q_f64_x3(__p0, __p1) __extension__ ({ \
67292 float64x2x3_t __s1 = __p1; \
67293 float64x2x3_t __rev1; \
67294 __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 1, 0); \
67295 __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 1, 0); \
67296 __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 1, 0); \
67297 __builtin_neon_vst1q_x3_v(__p0, __rev1.val[0], __rev1.val[1], __rev1.val[2], 42); \
67298 })
67299 #endif
67300
67301 #ifdef __LITTLE_ENDIAN__
67302 #define vst1q_f32_x3(__p0, __p1) __extension__ ({ \
67303 float32x4x3_t __s1 = __p1; \
67304 __builtin_neon_vst1q_x3_v(__p0, __s1.val[0], __s1.val[1], __s1.val[2], 41); \
67305 })
67306 #else
67307 #define vst1q_f32_x3(__p0, __p1) __extension__ ({ \
67308 float32x4x3_t __s1 = __p1; \
67309 float32x4x3_t __rev1; \
67310 __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 3, 2, 1, 0); \
67311 __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 3, 2, 1, 0); \
67312 __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 3, 2, 1, 0); \
67313 __builtin_neon_vst1q_x3_v(__p0, __rev1.val[0], __rev1.val[1], __rev1.val[2], 41); \
67314 })
67315 #endif
67316
67317 #ifdef __LITTLE_ENDIAN__
67318 #define vst1q_f16_x3(__p0, __p1) __extension__ ({ \
67319 float16x8x3_t __s1 = __p1; \
67320 __builtin_neon_vst1q_x3_v(__p0, __s1.val[0], __s1.val[1], __s1.val[2], 40); \
67321 })
67322 #else
67323 #define vst1q_f16_x3(__p0, __p1) __extension__ ({ \
67324 float16x8x3_t __s1 = __p1; \
67325 float16x8x3_t __rev1; \
67326 __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \
67327 __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \
67328 __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 7, 6, 5, 4, 3, 2, 1, 0); \
67329 __builtin_neon_vst1q_x3_v(__p0, __rev1.val[0], __rev1.val[1], __rev1.val[2], 40); \
67330 })
67331 #endif
67332
67333 #ifdef __LITTLE_ENDIAN__
67334 #define vst1q_s32_x3(__p0, __p1) __extension__ ({ \
67335 int32x4x3_t __s1 = __p1; \
67336 __builtin_neon_vst1q_x3_v(__p0, __s1.val[0], __s1.val[1], __s1.val[2], 34); \
67337 })
67338 #else
67339 #define vst1q_s32_x3(__p0, __p1) __extension__ ({ \
67340 int32x4x3_t __s1 = __p1; \
67341 int32x4x3_t __rev1; \
67342 __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 3, 2, 1, 0); \
67343 __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 3, 2, 1, 0); \
67344 __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 3, 2, 1, 0); \
67345 __builtin_neon_vst1q_x3_v(__p0, __rev1.val[0], __rev1.val[1], __rev1.val[2], 34); \
67346 })
67347 #endif
67348
67349 #ifdef __LITTLE_ENDIAN__
67350 #define vst1q_s64_x3(__p0, __p1) __extension__ ({ \
67351 int64x2x3_t __s1 = __p1; \
67352 __builtin_neon_vst1q_x3_v(__p0, __s1.val[0], __s1.val[1], __s1.val[2], 35); \
67353 })
67354 #else
67355 #define vst1q_s64_x3(__p0, __p1) __extension__ ({ \
67356 int64x2x3_t __s1 = __p1; \
67357 int64x2x3_t __rev1; \
67358 __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 1, 0); \
67359 __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 1, 0); \
67360 __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 1, 0); \
67361 __builtin_neon_vst1q_x3_v(__p0, __rev1.val[0], __rev1.val[1], __rev1.val[2], 35); \
67362 })
67363 #endif
67364
67365 #ifdef __LITTLE_ENDIAN__
67366 #define vst1q_s16_x3(__p0, __p1) __extension__ ({ \
67367 int16x8x3_t __s1 = __p1; \
67368 __builtin_neon_vst1q_x3_v(__p0, __s1.val[0], __s1.val[1], __s1.val[2], 33); \
67369 })
67370 #else
67371 #define vst1q_s16_x3(__p0, __p1) __extension__ ({ \
67372 int16x8x3_t __s1 = __p1; \
67373 int16x8x3_t __rev1; \
67374 __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \
67375 __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \
67376 __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 7, 6, 5, 4, 3, 2, 1, 0); \
67377 __builtin_neon_vst1q_x3_v(__p0, __rev1.val[0], __rev1.val[1], __rev1.val[2], 33); \
67378 })
67379 #endif
67380
67381 #ifdef __LITTLE_ENDIAN__
67382 #define vst1_u8_x3(__p0, __p1) __extension__ ({ \
67383 uint8x8x3_t __s1 = __p1; \
67384 __builtin_neon_vst1_x3_v(__p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], (int8x8_t)__s1.val[2], 16); \
67385 })
67386 #else
67387 #define vst1_u8_x3(__p0, __p1) __extension__ ({ \
67388 uint8x8x3_t __s1 = __p1; \
67389 uint8x8x3_t __rev1; \
67390 __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \
67391 __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \
67392 __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 7, 6, 5, 4, 3, 2, 1, 0); \
67393 __builtin_neon_vst1_x3_v(__p0, (int8x8_t)__rev1.val[0], (int8x8_t)__rev1.val[1], (int8x8_t)__rev1.val[2], 16); \
67394 })
67395 #endif
67396
67397 #ifdef __LITTLE_ENDIAN__
67398 #define vst1_u32_x3(__p0, __p1) __extension__ ({ \
67399 uint32x2x3_t __s1 = __p1; \
67400 __builtin_neon_vst1_x3_v(__p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], (int8x8_t)__s1.val[2], 18); \
67401 })
67402 #else
67403 #define vst1_u32_x3(__p0, __p1) __extension__ ({ \
67404 uint32x2x3_t __s1 = __p1; \
67405 uint32x2x3_t __rev1; \
67406 __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 1, 0); \
67407 __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 1, 0); \
67408 __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 1, 0); \
67409 __builtin_neon_vst1_x3_v(__p0, (int8x8_t)__rev1.val[0], (int8x8_t)__rev1.val[1], (int8x8_t)__rev1.val[2], 18); \
67410 })
67411 #endif
67412
67413 #ifdef __LITTLE_ENDIAN__
67414 #define vst1_u64_x3(__p0, __p1) __extension__ ({ \
67415 uint64x1x3_t __s1 = __p1; \
67416 __builtin_neon_vst1_x3_v(__p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], (int8x8_t)__s1.val[2], 19); \
67417 })
67418 #else
67419 #define vst1_u64_x3(__p0, __p1) __extension__ ({ \
67420 uint64x1x3_t __s1 = __p1; \
67421 __builtin_neon_vst1_x3_v(__p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], (int8x8_t)__s1.val[2], 19); \
67422 })
67423 #endif
67424
67425 #ifdef __LITTLE_ENDIAN__
67426 #define vst1_u16_x3(__p0, __p1) __extension__ ({ \
67427 uint16x4x3_t __s1 = __p1; \
67428 __builtin_neon_vst1_x3_v(__p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], (int8x8_t)__s1.val[2], 17); \
67429 })
67430 #else
67431 #define vst1_u16_x3(__p0, __p1) __extension__ ({ \
67432 uint16x4x3_t __s1 = __p1; \
67433 uint16x4x3_t __rev1; \
67434 __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 3, 2, 1, 0); \
67435 __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 3, 2, 1, 0); \
67436 __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 3, 2, 1, 0); \
67437 __builtin_neon_vst1_x3_v(__p0, (int8x8_t)__rev1.val[0], (int8x8_t)__rev1.val[1], (int8x8_t)__rev1.val[2], 17); \
67438 })
67439 #endif
67440
67441 #ifdef __LITTLE_ENDIAN__
67442 #define vst1_s8_x3(__p0, __p1) __extension__ ({ \
67443 int8x8x3_t __s1 = __p1; \
67444 __builtin_neon_vst1_x3_v(__p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], (int8x8_t)__s1.val[2], 0); \
67445 })
67446 #else
67447 #define vst1_s8_x3(__p0, __p1) __extension__ ({ \
67448 int8x8x3_t __s1 = __p1; \
67449 int8x8x3_t __rev1; \
67450 __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \
67451 __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \
67452 __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 7, 6, 5, 4, 3, 2, 1, 0); \
67453 __builtin_neon_vst1_x3_v(__p0, (int8x8_t)__rev1.val[0], (int8x8_t)__rev1.val[1], (int8x8_t)__rev1.val[2], 0); \
67454 })
67455 #endif
67456
67457 #ifdef __LITTLE_ENDIAN__
67458 #define vst1_f64_x3(__p0, __p1) __extension__ ({ \
67459 float64x1x3_t __s1 = __p1; \
67460 __builtin_neon_vst1_x3_v(__p0, __s1.val[0], __s1.val[1], __s1.val[2], 10); \
67461 })
67462 #else
67463 #define vst1_f64_x3(__p0, __p1) __extension__ ({ \
67464 float64x1x3_t __s1 = __p1; \
67465 __builtin_neon_vst1_x3_v(__p0, __s1.val[0], __s1.val[1], __s1.val[2], 10); \
67466 })
67467 #endif
67468
67469 #ifdef __LITTLE_ENDIAN__
67470 #define vst1_f32_x3(__p0, __p1) __extension__ ({ \
67471 float32x2x3_t __s1 = __p1; \
67472 __builtin_neon_vst1_x3_v(__p0, __s1.val[0], __s1.val[1], __s1.val[2], 9); \
67473 })
67474 #else
67475 #define vst1_f32_x3(__p0, __p1) __extension__ ({ \
67476 float32x2x3_t __s1 = __p1; \
67477 float32x2x3_t __rev1; \
67478 __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 1, 0); \
67479 __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 1, 0); \
67480 __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 1, 0); \
67481 __builtin_neon_vst1_x3_v(__p0, __rev1.val[0], __rev1.val[1], __rev1.val[2], 9); \
67482 })
67483 #endif
67484
67485 #ifdef __LITTLE_ENDIAN__
67486 #define vst1_f16_x3(__p0, __p1) __extension__ ({ \
67487 float16x4x3_t __s1 = __p1; \
67488 __builtin_neon_vst1_x3_v(__p0, __s1.val[0], __s1.val[1], __s1.val[2], 8); \
67489 })
67490 #else
67491 #define vst1_f16_x3(__p0, __p1) __extension__ ({ \
67492 float16x4x3_t __s1 = __p1; \
67493 float16x4x3_t __rev1; \
67494 __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 3, 2, 1, 0); \
67495 __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 3, 2, 1, 0); \
67496 __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 3, 2, 1, 0); \
67497 __builtin_neon_vst1_x3_v(__p0, __rev1.val[0], __rev1.val[1], __rev1.val[2], 8); \
67498 })
67499 #endif
67500
67501 #ifdef __LITTLE_ENDIAN__
67502 #define vst1_s32_x3(__p0, __p1) __extension__ ({ \
67503 int32x2x3_t __s1 = __p1; \
67504 __builtin_neon_vst1_x3_v(__p0, __s1.val[0], __s1.val[1], __s1.val[2], 2); \
67505 })
67506 #else
67507 #define vst1_s32_x3(__p0, __p1) __extension__ ({ \
67508 int32x2x3_t __s1 = __p1; \
67509 int32x2x3_t __rev1; \
67510 __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 1, 0); \
67511 __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 1, 0); \
67512 __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 1, 0); \
67513 __builtin_neon_vst1_x3_v(__p0, __rev1.val[0], __rev1.val[1], __rev1.val[2], 2); \
67514 })
67515 #endif
67516
67517 #ifdef __LITTLE_ENDIAN__
67518 #define vst1_s64_x3(__p0, __p1) __extension__ ({ \
67519 int64x1x3_t __s1 = __p1; \
67520 __builtin_neon_vst1_x3_v(__p0, __s1.val[0], __s1.val[1], __s1.val[2], 3); \
67521 })
67522 #else
67523 #define vst1_s64_x3(__p0, __p1) __extension__ ({ \
67524 int64x1x3_t __s1 = __p1; \
67525 __builtin_neon_vst1_x3_v(__p0, __s1.val[0], __s1.val[1], __s1.val[2], 3); \
67526 })
67527 #endif
67528
67529 #ifdef __LITTLE_ENDIAN__
67530 #define vst1_s16_x3(__p0, __p1) __extension__ ({ \
67531 int16x4x3_t __s1 = __p1; \
67532 __builtin_neon_vst1_x3_v(__p0, __s1.val[0], __s1.val[1], __s1.val[2], 1); \
67533 })
67534 #else
67535 #define vst1_s16_x3(__p0, __p1) __extension__ ({ \
67536 int16x4x3_t __s1 = __p1; \
67537 int16x4x3_t __rev1; \
67538 __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 3, 2, 1, 0); \
67539 __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 3, 2, 1, 0); \
67540 __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 3, 2, 1, 0); \
67541 __builtin_neon_vst1_x3_v(__p0, __rev1.val[0], __rev1.val[1], __rev1.val[2], 1); \
67542 })
67543 #endif
67544
67545 #ifdef __LITTLE_ENDIAN__
67546 #define vst1_p8_x4(__p0, __p1) __extension__ ({ \
67547 poly8x8x4_t __s1 = __p1; \
67548 __builtin_neon_vst1_x4_v(__p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], (int8x8_t)__s1.val[2], (int8x8_t)__s1.val[3], 4); \
67549 })
67550 #else
67551 #define vst1_p8_x4(__p0, __p1) __extension__ ({ \
67552 poly8x8x4_t __s1 = __p1; \
67553 poly8x8x4_t __rev1; \
67554 __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \
67555 __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \
67556 __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 7, 6, 5, 4, 3, 2, 1, 0); \
67557 __rev1.val[3] = __builtin_shufflevector(__s1.val[3], __s1.val[3], 7, 6, 5, 4, 3, 2, 1, 0); \
67558 __builtin_neon_vst1_x4_v(__p0, (int8x8_t)__rev1.val[0], (int8x8_t)__rev1.val[1], (int8x8_t)__rev1.val[2], (int8x8_t)__rev1.val[3], 4); \
67559 })
67560 #endif
67561
67562 #ifdef __LITTLE_ENDIAN__
67563 #define vst1_p64_x4(__p0, __p1) __extension__ ({ \
67564 poly64x1x4_t __s1 = __p1; \
67565 __builtin_neon_vst1_x4_v(__p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], (int8x8_t)__s1.val[2], (int8x8_t)__s1.val[3], 6); \
67566 })
67567 #else
67568 #define vst1_p64_x4(__p0, __p1) __extension__ ({ \
67569 poly64x1x4_t __s1 = __p1; \
67570 __builtin_neon_vst1_x4_v(__p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], (int8x8_t)__s1.val[2], (int8x8_t)__s1.val[3], 6); \
67571 })
67572 #endif
67573
67574 #ifdef __LITTLE_ENDIAN__
67575 #define vst1_p16_x4(__p0, __p1) __extension__ ({ \
67576 poly16x4x4_t __s1 = __p1; \
67577 __builtin_neon_vst1_x4_v(__p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], (int8x8_t)__s1.val[2], (int8x8_t)__s1.val[3], 5); \
67578 })
67579 #else
67580 #define vst1_p16_x4(__p0, __p1) __extension__ ({ \
67581 poly16x4x4_t __s1 = __p1; \
67582 poly16x4x4_t __rev1; \
67583 __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 3, 2, 1, 0); \
67584 __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 3, 2, 1, 0); \
67585 __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 3, 2, 1, 0); \
67586 __rev1.val[3] = __builtin_shufflevector(__s1.val[3], __s1.val[3], 3, 2, 1, 0); \
67587 __builtin_neon_vst1_x4_v(__p0, (int8x8_t)__rev1.val[0], (int8x8_t)__rev1.val[1], (int8x8_t)__rev1.val[2], (int8x8_t)__rev1.val[3], 5); \
67588 })
67589 #endif
67590
67591 #ifdef __LITTLE_ENDIAN__
67592 #define vst1q_p8_x4(__p0, __p1) __extension__ ({ \
67593 poly8x16x4_t __s1 = __p1; \
67594 __builtin_neon_vst1q_x4_v(__p0, (int8x16_t)__s1.val[0], (int8x16_t)__s1.val[1], (int8x16_t)__s1.val[2], (int8x16_t)__s1.val[3], 36); \
67595 })
67596 #else
67597 #define vst1q_p8_x4(__p0, __p1) __extension__ ({ \
67598 poly8x16x4_t __s1 = __p1; \
67599 poly8x16x4_t __rev1; \
67600 __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
67601 __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
67602 __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
67603 __rev1.val[3] = __builtin_shufflevector(__s1.val[3], __s1.val[3], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
67604 __builtin_neon_vst1q_x4_v(__p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], (int8x16_t)__rev1.val[2], (int8x16_t)__rev1.val[3], 36); \
67605 })
67606 #endif
67607
67608 #ifdef __LITTLE_ENDIAN__
67609 #define vst1q_p64_x4(__p0, __p1) __extension__ ({ \
67610 poly64x2x4_t __s1 = __p1; \
67611 __builtin_neon_vst1q_x4_v(__p0, (int8x16_t)__s1.val[0], (int8x16_t)__s1.val[1], (int8x16_t)__s1.val[2], (int8x16_t)__s1.val[3], 38); \
67612 })
67613 #else
67614 #define vst1q_p64_x4(__p0, __p1) __extension__ ({ \
67615 poly64x2x4_t __s1 = __p1; \
67616 poly64x2x4_t __rev1; \
67617 __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 1, 0); \
67618 __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 1, 0); \
67619 __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 1, 0); \
67620 __rev1.val[3] = __builtin_shufflevector(__s1.val[3], __s1.val[3], 1, 0); \
67621 __builtin_neon_vst1q_x4_v(__p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], (int8x16_t)__rev1.val[2], (int8x16_t)__rev1.val[3], 38); \
67622 })
67623 #endif
67624
67625 #ifdef __LITTLE_ENDIAN__
67626 #define vst1q_p16_x4(__p0, __p1) __extension__ ({ \
67627 poly16x8x4_t __s1 = __p1; \
67628 __builtin_neon_vst1q_x4_v(__p0, (int8x16_t)__s1.val[0], (int8x16_t)__s1.val[1], (int8x16_t)__s1.val[2], (int8x16_t)__s1.val[3], 37); \
67629 })
67630 #else
67631 #define vst1q_p16_x4(__p0, __p1) __extension__ ({ \
67632 poly16x8x4_t __s1 = __p1; \
67633 poly16x8x4_t __rev1; \
67634 __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \
67635 __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \
67636 __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 7, 6, 5, 4, 3, 2, 1, 0); \
67637 __rev1.val[3] = __builtin_shufflevector(__s1.val[3], __s1.val[3], 7, 6, 5, 4, 3, 2, 1, 0); \
67638 __builtin_neon_vst1q_x4_v(__p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], (int8x16_t)__rev1.val[2], (int8x16_t)__rev1.val[3], 37); \
67639 })
67640 #endif
67641
67642 #ifdef __LITTLE_ENDIAN__
67643 #define vst1q_u8_x4(__p0, __p1) __extension__ ({ \
67644 uint8x16x4_t __s1 = __p1; \
67645 __builtin_neon_vst1q_x4_v(__p0, (int8x16_t)__s1.val[0], (int8x16_t)__s1.val[1], (int8x16_t)__s1.val[2], (int8x16_t)__s1.val[3], 48); \
67646 })
67647 #else
67648 #define vst1q_u8_x4(__p0, __p1) __extension__ ({ \
67649 uint8x16x4_t __s1 = __p1; \
67650 uint8x16x4_t __rev1; \
67651 __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
67652 __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
67653 __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
67654 __rev1.val[3] = __builtin_shufflevector(__s1.val[3], __s1.val[3], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
67655 __builtin_neon_vst1q_x4_v(__p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], (int8x16_t)__rev1.val[2], (int8x16_t)__rev1.val[3], 48); \
67656 })
67657 #endif
67658
67659 #ifdef __LITTLE_ENDIAN__
67660 #define vst1q_u32_x4(__p0, __p1) __extension__ ({ \
67661 uint32x4x4_t __s1 = __p1; \
67662 __builtin_neon_vst1q_x4_v(__p0, (int8x16_t)__s1.val[0], (int8x16_t)__s1.val[1], (int8x16_t)__s1.val[2], (int8x16_t)__s1.val[3], 50); \
67663 })
67664 #else
67665 #define vst1q_u32_x4(__p0, __p1) __extension__ ({ \
67666 uint32x4x4_t __s1 = __p1; \
67667 uint32x4x4_t __rev1; \
67668 __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 3, 2, 1, 0); \
67669 __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 3, 2, 1, 0); \
67670 __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 3, 2, 1, 0); \
67671 __rev1.val[3] = __builtin_shufflevector(__s1.val[3], __s1.val[3], 3, 2, 1, 0); \
67672 __builtin_neon_vst1q_x4_v(__p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], (int8x16_t)__rev1.val[2], (int8x16_t)__rev1.val[3], 50); \
67673 })
67674 #endif
67675
67676 #ifdef __LITTLE_ENDIAN__
67677 #define vst1q_u64_x4(__p0, __p1) __extension__ ({ \
67678 uint64x2x4_t __s1 = __p1; \
67679 __builtin_neon_vst1q_x4_v(__p0, (int8x16_t)__s1.val[0], (int8x16_t)__s1.val[1], (int8x16_t)__s1.val[2], (int8x16_t)__s1.val[3], 51); \
67680 })
67681 #else
67682 #define vst1q_u64_x4(__p0, __p1) __extension__ ({ \
67683 uint64x2x4_t __s1 = __p1; \
67684 uint64x2x4_t __rev1; \
67685 __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 1, 0); \
67686 __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 1, 0); \
67687 __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 1, 0); \
67688 __rev1.val[3] = __builtin_shufflevector(__s1.val[3], __s1.val[3], 1, 0); \
67689 __builtin_neon_vst1q_x4_v(__p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], (int8x16_t)__rev1.val[2], (int8x16_t)__rev1.val[3], 51); \
67690 })
67691 #endif
67692
67693 #ifdef __LITTLE_ENDIAN__
67694 #define vst1q_u16_x4(__p0, __p1) __extension__ ({ \
67695 uint16x8x4_t __s1 = __p1; \
67696 __builtin_neon_vst1q_x4_v(__p0, (int8x16_t)__s1.val[0], (int8x16_t)__s1.val[1], (int8x16_t)__s1.val[2], (int8x16_t)__s1.val[3], 49); \
67697 })
67698 #else
67699 #define vst1q_u16_x4(__p0, __p1) __extension__ ({ \
67700 uint16x8x4_t __s1 = __p1; \
67701 uint16x8x4_t __rev1; \
67702 __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \
67703 __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \
67704 __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 7, 6, 5, 4, 3, 2, 1, 0); \
67705 __rev1.val[3] = __builtin_shufflevector(__s1.val[3], __s1.val[3], 7, 6, 5, 4, 3, 2, 1, 0); \
67706 __builtin_neon_vst1q_x4_v(__p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], (int8x16_t)__rev1.val[2], (int8x16_t)__rev1.val[3], 49); \
67707 })
67708 #endif
67709
67710 #ifdef __LITTLE_ENDIAN__
67711 #define vst1q_s8_x4(__p0, __p1) __extension__ ({ \
67712 int8x16x4_t __s1 = __p1; \
67713 __builtin_neon_vst1q_x4_v(__p0, (int8x16_t)__s1.val[0], (int8x16_t)__s1.val[1], (int8x16_t)__s1.val[2], (int8x16_t)__s1.val[3], 32); \
67714 })
67715 #else
67716 #define vst1q_s8_x4(__p0, __p1) __extension__ ({ \
67717 int8x16x4_t __s1 = __p1; \
67718 int8x16x4_t __rev1; \
67719 __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
67720 __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
67721 __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
67722 __rev1.val[3] = __builtin_shufflevector(__s1.val[3], __s1.val[3], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
67723 __builtin_neon_vst1q_x4_v(__p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], (int8x16_t)__rev1.val[2], (int8x16_t)__rev1.val[3], 32); \
67724 })
67725 #endif
67726
67727 #ifdef __LITTLE_ENDIAN__
67728 #define vst1q_f64_x4(__p0, __p1) __extension__ ({ \
67729 float64x2x4_t __s1 = __p1; \
67730 __builtin_neon_vst1q_x4_v(__p0, __s1.val[0], __s1.val[1], __s1.val[2], __s1.val[3], 42); \
67731 })
67732 #else
67733 #define vst1q_f64_x4(__p0, __p1) __extension__ ({ \
67734 float64x2x4_t __s1 = __p1; \
67735 float64x2x4_t __rev1; \
67736 __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 1, 0); \
67737 __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 1, 0); \
67738 __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 1, 0); \
67739 __rev1.val[3] = __builtin_shufflevector(__s1.val[3], __s1.val[3], 1, 0); \
67740 __builtin_neon_vst1q_x4_v(__p0, __rev1.val[0], __rev1.val[1], __rev1.val[2], __rev1.val[3], 42); \
67741 })
67742 #endif
67743
67744 #ifdef __LITTLE_ENDIAN__
67745 #define vst1q_f32_x4(__p0, __p1) __extension__ ({ \
67746 float32x4x4_t __s1 = __p1; \
67747 __builtin_neon_vst1q_x4_v(__p0, __s1.val[0], __s1.val[1], __s1.val[2], __s1.val[3], 41); \
67748 })
67749 #else
67750 #define vst1q_f32_x4(__p0, __p1) __extension__ ({ \
67751 float32x4x4_t __s1 = __p1; \
67752 float32x4x4_t __rev1; \
67753 __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 3, 2, 1, 0); \
67754 __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 3, 2, 1, 0); \
67755 __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 3, 2, 1, 0); \
67756 __rev1.val[3] = __builtin_shufflevector(__s1.val[3], __s1.val[3], 3, 2, 1, 0); \
67757 __builtin_neon_vst1q_x4_v(__p0, __rev1.val[0], __rev1.val[1], __rev1.val[2], __rev1.val[3], 41); \
67758 })
67759 #endif
67760
67761 #ifdef __LITTLE_ENDIAN__
67762 #define vst1q_f16_x4(__p0, __p1) __extension__ ({ \
67763 float16x8x4_t __s1 = __p1; \
67764 __builtin_neon_vst1q_x4_v(__p0, __s1.val[0], __s1.val[1], __s1.val[2], __s1.val[3], 40); \
67765 })
67766 #else
67767 #define vst1q_f16_x4(__p0, __p1) __extension__ ({ \
67768 float16x8x4_t __s1 = __p1; \
67769 float16x8x4_t __rev1; \
67770 __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \
67771 __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \
67772 __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 7, 6, 5, 4, 3, 2, 1, 0); \
67773 __rev1.val[3] = __builtin_shufflevector(__s1.val[3], __s1.val[3], 7, 6, 5, 4, 3, 2, 1, 0); \
67774 __builtin_neon_vst1q_x4_v(__p0, __rev1.val[0], __rev1.val[1], __rev1.val[2], __rev1.val[3], 40); \
67775 })
67776 #endif
67777
67778 #ifdef __LITTLE_ENDIAN__
67779 #define vst1q_s32_x4(__p0, __p1) __extension__ ({ \
67780 int32x4x4_t __s1 = __p1; \
67781 __builtin_neon_vst1q_x4_v(__p0, __s1.val[0], __s1.val[1], __s1.val[2], __s1.val[3], 34); \
67782 })
67783 #else
67784 #define vst1q_s32_x4(__p0, __p1) __extension__ ({ \
67785 int32x4x4_t __s1 = __p1; \
67786 int32x4x4_t __rev1; \
67787 __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 3, 2, 1, 0); \
67788 __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 3, 2, 1, 0); \
67789 __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 3, 2, 1, 0); \
67790 __rev1.val[3] = __builtin_shufflevector(__s1.val[3], __s1.val[3], 3, 2, 1, 0); \
67791 __builtin_neon_vst1q_x4_v(__p0, __rev1.val[0], __rev1.val[1], __rev1.val[2], __rev1.val[3], 34); \
67792 })
67793 #endif
67794
67795 #ifdef __LITTLE_ENDIAN__
67796 #define vst1q_s64_x4(__p0, __p1) __extension__ ({ \
67797 int64x2x4_t __s1 = __p1; \
67798 __builtin_neon_vst1q_x4_v(__p0, __s1.val[0], __s1.val[1], __s1.val[2], __s1.val[3], 35); \
67799 })
67800 #else
67801 #define vst1q_s64_x4(__p0, __p1) __extension__ ({ \
67802 int64x2x4_t __s1 = __p1; \
67803 int64x2x4_t __rev1; \
67804 __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 1, 0); \
67805 __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 1, 0); \
67806 __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 1, 0); \
67807 __rev1.val[3] = __builtin_shufflevector(__s1.val[3], __s1.val[3], 1, 0); \
67808 __builtin_neon_vst1q_x4_v(__p0, __rev1.val[0], __rev1.val[1], __rev1.val[2], __rev1.val[3], 35); \
67809 })
67810 #endif
67811
67812 #ifdef __LITTLE_ENDIAN__
67813 #define vst1q_s16_x4(__p0, __p1) __extension__ ({ \
67814 int16x8x4_t __s1 = __p1; \
67815 __builtin_neon_vst1q_x4_v(__p0, __s1.val[0], __s1.val[1], __s1.val[2], __s1.val[3], 33); \
67816 })
67817 #else
67818 #define vst1q_s16_x4(__p0, __p1) __extension__ ({ \
67819 int16x8x4_t __s1 = __p1; \
67820 int16x8x4_t __rev1; \
67821 __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \
67822 __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \
67823 __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 7, 6, 5, 4, 3, 2, 1, 0); \
67824 __rev1.val[3] = __builtin_shufflevector(__s1.val[3], __s1.val[3], 7, 6, 5, 4, 3, 2, 1, 0); \
67825 __builtin_neon_vst1q_x4_v(__p0, __rev1.val[0], __rev1.val[1], __rev1.val[2], __rev1.val[3], 33); \
67826 })
67827 #endif
67828
67829 #ifdef __LITTLE_ENDIAN__
67830 #define vst1_u8_x4(__p0, __p1) __extension__ ({ \
67831 uint8x8x4_t __s1 = __p1; \
67832 __builtin_neon_vst1_x4_v(__p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], (int8x8_t)__s1.val[2], (int8x8_t)__s1.val[3], 16); \
67833 })
67834 #else
67835 #define vst1_u8_x4(__p0, __p1) __extension__ ({ \
67836 uint8x8x4_t __s1 = __p1; \
67837 uint8x8x4_t __rev1; \
67838 __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \
67839 __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \
67840 __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 7, 6, 5, 4, 3, 2, 1, 0); \
67841 __rev1.val[3] = __builtin_shufflevector(__s1.val[3], __s1.val[3], 7, 6, 5, 4, 3, 2, 1, 0); \
67842 __builtin_neon_vst1_x4_v(__p0, (int8x8_t)__rev1.val[0], (int8x8_t)__rev1.val[1], (int8x8_t)__rev1.val[2], (int8x8_t)__rev1.val[3], 16); \
67843 })
67844 #endif
67845
67846 #ifdef __LITTLE_ENDIAN__
67847 #define vst1_u32_x4(__p0, __p1) __extension__ ({ \
67848 uint32x2x4_t __s1 = __p1; \
67849 __builtin_neon_vst1_x4_v(__p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], (int8x8_t)__s1.val[2], (int8x8_t)__s1.val[3], 18); \
67850 })
67851 #else
67852 #define vst1_u32_x4(__p0, __p1) __extension__ ({ \
67853 uint32x2x4_t __s1 = __p1; \
67854 uint32x2x4_t __rev1; \
67855 __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 1, 0); \
67856 __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 1, 0); \
67857 __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 1, 0); \
67858 __rev1.val[3] = __builtin_shufflevector(__s1.val[3], __s1.val[3], 1, 0); \
67859 __builtin_neon_vst1_x4_v(__p0, (int8x8_t)__rev1.val[0], (int8x8_t)__rev1.val[1], (int8x8_t)__rev1.val[2], (int8x8_t)__rev1.val[3], 18); \
67860 })
67861 #endif
67862
67863 #ifdef __LITTLE_ENDIAN__
67864 #define vst1_u64_x4(__p0, __p1) __extension__ ({ \
67865 uint64x1x4_t __s1 = __p1; \
67866 __builtin_neon_vst1_x4_v(__p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], (int8x8_t)__s1.val[2], (int8x8_t)__s1.val[3], 19); \
67867 })
67868 #else
67869 #define vst1_u64_x4(__p0, __p1) __extension__ ({ \
67870 uint64x1x4_t __s1 = __p1; \
67871 __builtin_neon_vst1_x4_v(__p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], (int8x8_t)__s1.val[2], (int8x8_t)__s1.val[3], 19); \
67872 })
67873 #endif
67874
67875 #ifdef __LITTLE_ENDIAN__
67876 #define vst1_u16_x4(__p0, __p1) __extension__ ({ \
67877 uint16x4x4_t __s1 = __p1; \
67878 __builtin_neon_vst1_x4_v(__p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], (int8x8_t)__s1.val[2], (int8x8_t)__s1.val[3], 17); \
67879 })
67880 #else
67881 #define vst1_u16_x4(__p0, __p1) __extension__ ({ \
67882 uint16x4x4_t __s1 = __p1; \
67883 uint16x4x4_t __rev1; \
67884 __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 3, 2, 1, 0); \
67885 __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 3, 2, 1, 0); \
67886 __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 3, 2, 1, 0); \
67887 __rev1.val[3] = __builtin_shufflevector(__s1.val[3], __s1.val[3], 3, 2, 1, 0); \
67888 __builtin_neon_vst1_x4_v(__p0, (int8x8_t)__rev1.val[0], (int8x8_t)__rev1.val[1], (int8x8_t)__rev1.val[2], (int8x8_t)__rev1.val[3], 17); \
67889 })
67890 #endif
67891
67892 #ifdef __LITTLE_ENDIAN__
67893 #define vst1_s8_x4(__p0, __p1) __extension__ ({ \
67894 int8x8x4_t __s1 = __p1; \
67895 __builtin_neon_vst1_x4_v(__p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], (int8x8_t)__s1.val[2], (int8x8_t)__s1.val[3], 0); \
67896 })
67897 #else
67898 #define vst1_s8_x4(__p0, __p1) __extension__ ({ \
67899 int8x8x4_t __s1 = __p1; \
67900 int8x8x4_t __rev1; \
67901 __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \
67902 __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \
67903 __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 7, 6, 5, 4, 3, 2, 1, 0); \
67904 __rev1.val[3] = __builtin_shufflevector(__s1.val[3], __s1.val[3], 7, 6, 5, 4, 3, 2, 1, 0); \
67905 __builtin_neon_vst1_x4_v(__p0, (int8x8_t)__rev1.val[0], (int8x8_t)__rev1.val[1], (int8x8_t)__rev1.val[2], (int8x8_t)__rev1.val[3], 0); \
67906 })
67907 #endif
67908
67909 #ifdef __LITTLE_ENDIAN__
67910 #define vst1_f64_x4(__p0, __p1) __extension__ ({ \
67911 float64x1x4_t __s1 = __p1; \
67912 __builtin_neon_vst1_x4_v(__p0, __s1.val[0], __s1.val[1], __s1.val[2], __s1.val[3], 10); \
67913 })
67914 #else
67915 #define vst1_f64_x4(__p0, __p1) __extension__ ({ \
67916 float64x1x4_t __s1 = __p1; \
67917 __builtin_neon_vst1_x4_v(__p0, __s1.val[0], __s1.val[1], __s1.val[2], __s1.val[3], 10); \
67918 })
67919 #endif
67920
67921 #ifdef __LITTLE_ENDIAN__
67922 #define vst1_f32_x4(__p0, __p1) __extension__ ({ \
67923 float32x2x4_t __s1 = __p1; \
67924 __builtin_neon_vst1_x4_v(__p0, __s1.val[0], __s1.val[1], __s1.val[2], __s1.val[3], 9); \
67925 })
67926 #else
67927 #define vst1_f32_x4(__p0, __p1) __extension__ ({ \
67928 float32x2x4_t __s1 = __p1; \
67929 float32x2x4_t __rev1; \
67930 __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 1, 0); \
67931 __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 1, 0); \
67932 __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 1, 0); \
67933 __rev1.val[3] = __builtin_shufflevector(__s1.val[3], __s1.val[3], 1, 0); \
67934 __builtin_neon_vst1_x4_v(__p0, __rev1.val[0], __rev1.val[1], __rev1.val[2], __rev1.val[3], 9); \
67935 })
67936 #endif
67937
67938 #ifdef __LITTLE_ENDIAN__
67939 #define vst1_f16_x4(__p0, __p1) __extension__ ({ \
67940 float16x4x4_t __s1 = __p1; \
67941 __builtin_neon_vst1_x4_v(__p0, __s1.val[0], __s1.val[1], __s1.val[2], __s1.val[3], 8); \
67942 })
67943 #else
67944 #define vst1_f16_x4(__p0, __p1) __extension__ ({ \
67945 float16x4x4_t __s1 = __p1; \
67946 float16x4x4_t __rev1; \
67947 __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 3, 2, 1, 0); \
67948 __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 3, 2, 1, 0); \
67949 __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 3, 2, 1, 0); \
67950 __rev1.val[3] = __builtin_shufflevector(__s1.val[3], __s1.val[3], 3, 2, 1, 0); \
67951 __builtin_neon_vst1_x4_v(__p0, __rev1.val[0], __rev1.val[1], __rev1.val[2], __rev1.val[3], 8); \
67952 })
67953 #endif
67954
67955 #ifdef __LITTLE_ENDIAN__
67956 #define vst1_s32_x4(__p0, __p1) __extension__ ({ \
67957 int32x2x4_t __s1 = __p1; \
67958 __builtin_neon_vst1_x4_v(__p0, __s1.val[0], __s1.val[1], __s1.val[2], __s1.val[3], 2); \
67959 })
67960 #else
67961 #define vst1_s32_x4(__p0, __p1) __extension__ ({ \
67962 int32x2x4_t __s1 = __p1; \
67963 int32x2x4_t __rev1; \
67964 __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 1, 0); \
67965 __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 1, 0); \
67966 __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 1, 0); \
67967 __rev1.val[3] = __builtin_shufflevector(__s1.val[3], __s1.val[3], 1, 0); \
67968 __builtin_neon_vst1_x4_v(__p0, __rev1.val[0], __rev1.val[1], __rev1.val[2], __rev1.val[3], 2); \
67969 })
67970 #endif
67971
67972 #ifdef __LITTLE_ENDIAN__
67973 #define vst1_s64_x4(__p0, __p1) __extension__ ({ \
67974 int64x1x4_t __s1 = __p1; \
67975 __builtin_neon_vst1_x4_v(__p0, __s1.val[0], __s1.val[1], __s1.val[2], __s1.val[3], 3); \
67976 })
67977 #else
67978 #define vst1_s64_x4(__p0, __p1) __extension__ ({ \
67979 int64x1x4_t __s1 = __p1; \
67980 __builtin_neon_vst1_x4_v(__p0, __s1.val[0], __s1.val[1], __s1.val[2], __s1.val[3], 3); \
67981 })
67982 #endif
67983
67984 #ifdef __LITTLE_ENDIAN__
67985 #define vst1_s16_x4(__p0, __p1) __extension__ ({ \
67986 int16x4x4_t __s1 = __p1; \
67987 __builtin_neon_vst1_x4_v(__p0, __s1.val[0], __s1.val[1], __s1.val[2], __s1.val[3], 1); \
67988 })
67989 #else
67990 #define vst1_s16_x4(__p0, __p1) __extension__ ({ \
67991 int16x4x4_t __s1 = __p1; \
67992 int16x4x4_t __rev1; \
67993 __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 3, 2, 1, 0); \
67994 __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 3, 2, 1, 0); \
67995 __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 3, 2, 1, 0); \
67996 __rev1.val[3] = __builtin_shufflevector(__s1.val[3], __s1.val[3], 3, 2, 1, 0); \
67997 __builtin_neon_vst1_x4_v(__p0, __rev1.val[0], __rev1.val[1], __rev1.val[2], __rev1.val[3], 1); \
67998 })
67999 #endif
68000
68001 #ifdef __LITTLE_ENDIAN__
68002 #define vst2_p64(__p0, __p1) __extension__ ({ \
68003 poly64x1x2_t __s1 = __p1; \
68004 __builtin_neon_vst2_v(__p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], 6); \
68005 })
68006 #else
68007 #define vst2_p64(__p0, __p1) __extension__ ({ \
68008 poly64x1x2_t __s1 = __p1; \
68009 __builtin_neon_vst2_v(__p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], 6); \
68010 })
68011 #endif
68012
68013 #ifdef __LITTLE_ENDIAN__
68014 #define vst2q_p64(__p0, __p1) __extension__ ({ \
68015 poly64x2x2_t __s1 = __p1; \
68016 __builtin_neon_vst2q_v(__p0, (int8x16_t)__s1.val[0], (int8x16_t)__s1.val[1], 38); \
68017 })
68018 #else
68019 #define vst2q_p64(__p0, __p1) __extension__ ({ \
68020 poly64x2x2_t __s1 = __p1; \
68021 poly64x2x2_t __rev1; \
68022 __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 1, 0); \
68023 __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 1, 0); \
68024 __builtin_neon_vst2q_v(__p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], 38); \
68025 })
68026 #endif
68027
68028 #ifdef __LITTLE_ENDIAN__
68029 #define vst2q_u64(__p0, __p1) __extension__ ({ \
68030 uint64x2x2_t __s1 = __p1; \
68031 __builtin_neon_vst2q_v(__p0, (int8x16_t)__s1.val[0], (int8x16_t)__s1.val[1], 51); \
68032 })
68033 #else
68034 #define vst2q_u64(__p0, __p1) __extension__ ({ \
68035 uint64x2x2_t __s1 = __p1; \
68036 uint64x2x2_t __rev1; \
68037 __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 1, 0); \
68038 __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 1, 0); \
68039 __builtin_neon_vst2q_v(__p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], 51); \
68040 })
68041 #endif
68042
68043 #ifdef __LITTLE_ENDIAN__
68044 #define vst2q_f64(__p0, __p1) __extension__ ({ \
68045 float64x2x2_t __s1 = __p1; \
68046 __builtin_neon_vst2q_v(__p0, __s1.val[0], __s1.val[1], 42); \
68047 })
68048 #else
68049 #define vst2q_f64(__p0, __p1) __extension__ ({ \
68050 float64x2x2_t __s1 = __p1; \
68051 float64x2x2_t __rev1; \
68052 __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 1, 0); \
68053 __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 1, 0); \
68054 __builtin_neon_vst2q_v(__p0, __rev1.val[0], __rev1.val[1], 42); \
68055 })
68056 #endif
68057
68058 #ifdef __LITTLE_ENDIAN__
68059 #define vst2q_s64(__p0, __p1) __extension__ ({ \
68060 int64x2x2_t __s1 = __p1; \
68061 __builtin_neon_vst2q_v(__p0, __s1.val[0], __s1.val[1], 35); \
68062 })
68063 #else
68064 #define vst2q_s64(__p0, __p1) __extension__ ({ \
68065 int64x2x2_t __s1 = __p1; \
68066 int64x2x2_t __rev1; \
68067 __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 1, 0); \
68068 __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 1, 0); \
68069 __builtin_neon_vst2q_v(__p0, __rev1.val[0], __rev1.val[1], 35); \
68070 })
68071 #endif
68072
68073 #ifdef __LITTLE_ENDIAN__
68074 #define vst2_f64(__p0, __p1) __extension__ ({ \
68075 float64x1x2_t __s1 = __p1; \
68076 __builtin_neon_vst2_v(__p0, __s1.val[0], __s1.val[1], 10); \
68077 })
68078 #else
68079 #define vst2_f64(__p0, __p1) __extension__ ({ \
68080 float64x1x2_t __s1 = __p1; \
68081 __builtin_neon_vst2_v(__p0, __s1.val[0], __s1.val[1], 10); \
68082 })
68083 #endif
68084
68085 #ifdef __LITTLE_ENDIAN__
68086 #define vst2_lane_p64(__p0, __p1, __p2) __extension__ ({ \
68087 poly64x1x2_t __s1 = __p1; \
68088 __builtin_neon_vst2_lane_v(__p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], __p2, 6); \
68089 })
68090 #else
68091 #define vst2_lane_p64(__p0, __p1, __p2) __extension__ ({ \
68092 poly64x1x2_t __s1 = __p1; \
68093 __builtin_neon_vst2_lane_v(__p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], __p2, 6); \
68094 })
68095 #endif
68096
68097 #ifdef __LITTLE_ENDIAN__
68098 #define vst2q_lane_p8(__p0, __p1, __p2) __extension__ ({ \
68099 poly8x16x2_t __s1 = __p1; \
68100 __builtin_neon_vst2q_lane_v(__p0, (int8x16_t)__s1.val[0], (int8x16_t)__s1.val[1], __p2, 36); \
68101 })
68102 #else
68103 #define vst2q_lane_p8(__p0, __p1, __p2) __extension__ ({ \
68104 poly8x16x2_t __s1 = __p1; \
68105 poly8x16x2_t __rev1; \
68106 __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
68107 __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
68108 __builtin_neon_vst2q_lane_v(__p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], __p2, 36); \
68109 })
68110 #endif
68111
68112 #ifdef __LITTLE_ENDIAN__
68113 #define vst2q_lane_p64(__p0, __p1, __p2) __extension__ ({ \
68114 poly64x2x2_t __s1 = __p1; \
68115 __builtin_neon_vst2q_lane_v(__p0, (int8x16_t)__s1.val[0], (int8x16_t)__s1.val[1], __p2, 38); \
68116 })
68117 #else
68118 #define vst2q_lane_p64(__p0, __p1, __p2) __extension__ ({ \
68119 poly64x2x2_t __s1 = __p1; \
68120 poly64x2x2_t __rev1; \
68121 __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 1, 0); \
68122 __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 1, 0); \
68123 __builtin_neon_vst2q_lane_v(__p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], __p2, 38); \
68124 })
68125 #endif
68126
68127 #ifdef __LITTLE_ENDIAN__
68128 #define vst2q_lane_u8(__p0, __p1, __p2) __extension__ ({ \
68129 uint8x16x2_t __s1 = __p1; \
68130 __builtin_neon_vst2q_lane_v(__p0, (int8x16_t)__s1.val[0], (int8x16_t)__s1.val[1], __p2, 48); \
68131 })
68132 #else
68133 #define vst2q_lane_u8(__p0, __p1, __p2) __extension__ ({ \
68134 uint8x16x2_t __s1 = __p1; \
68135 uint8x16x2_t __rev1; \
68136 __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
68137 __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
68138 __builtin_neon_vst2q_lane_v(__p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], __p2, 48); \
68139 })
68140 #endif
68141
68142 #ifdef __LITTLE_ENDIAN__
68143 #define vst2q_lane_u64(__p0, __p1, __p2) __extension__ ({ \
68144 uint64x2x2_t __s1 = __p1; \
68145 __builtin_neon_vst2q_lane_v(__p0, (int8x16_t)__s1.val[0], (int8x16_t)__s1.val[1], __p2, 51); \
68146 })
68147 #else
68148 #define vst2q_lane_u64(__p0, __p1, __p2) __extension__ ({ \
68149 uint64x2x2_t __s1 = __p1; \
68150 uint64x2x2_t __rev1; \
68151 __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 1, 0); \
68152 __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 1, 0); \
68153 __builtin_neon_vst2q_lane_v(__p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], __p2, 51); \
68154 })
68155 #endif
68156
68157 #ifdef __LITTLE_ENDIAN__
68158 #define vst2q_lane_s8(__p0, __p1, __p2) __extension__ ({ \
68159 int8x16x2_t __s1 = __p1; \
68160 __builtin_neon_vst2q_lane_v(__p0, (int8x16_t)__s1.val[0], (int8x16_t)__s1.val[1], __p2, 32); \
68161 })
68162 #else
68163 #define vst2q_lane_s8(__p0, __p1, __p2) __extension__ ({ \
68164 int8x16x2_t __s1 = __p1; \
68165 int8x16x2_t __rev1; \
68166 __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
68167 __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
68168 __builtin_neon_vst2q_lane_v(__p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], __p2, 32); \
68169 })
68170 #endif
68171
68172 #ifdef __LITTLE_ENDIAN__
68173 #define vst2q_lane_f64(__p0, __p1, __p2) __extension__ ({ \
68174 float64x2x2_t __s1 = __p1; \
68175 __builtin_neon_vst2q_lane_v(__p0, __s1.val[0], __s1.val[1], __p2, 42); \
68176 })
68177 #else
68178 #define vst2q_lane_f64(__p0, __p1, __p2) __extension__ ({ \
68179 float64x2x2_t __s1 = __p1; \
68180 float64x2x2_t __rev1; \
68181 __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 1, 0); \
68182 __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 1, 0); \
68183 __builtin_neon_vst2q_lane_v(__p0, __rev1.val[0], __rev1.val[1], __p2, 42); \
68184 })
68185 #endif
68186
68187 #ifdef __LITTLE_ENDIAN__
68188 #define vst2q_lane_s64(__p0, __p1, __p2) __extension__ ({ \
68189 int64x2x2_t __s1 = __p1; \
68190 __builtin_neon_vst2q_lane_v(__p0, __s1.val[0], __s1.val[1], __p2, 35); \
68191 })
68192 #else
68193 #define vst2q_lane_s64(__p0, __p1, __p2) __extension__ ({ \
68194 int64x2x2_t __s1 = __p1; \
68195 int64x2x2_t __rev1; \
68196 __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 1, 0); \
68197 __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 1, 0); \
68198 __builtin_neon_vst2q_lane_v(__p0, __rev1.val[0], __rev1.val[1], __p2, 35); \
68199 })
68200 #endif
68201
68202 #ifdef __LITTLE_ENDIAN__
68203 #define vst2_lane_u64(__p0, __p1, __p2) __extension__ ({ \
68204 uint64x1x2_t __s1 = __p1; \
68205 __builtin_neon_vst2_lane_v(__p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], __p2, 19); \
68206 })
68207 #else
68208 #define vst2_lane_u64(__p0, __p1, __p2) __extension__ ({ \
68209 uint64x1x2_t __s1 = __p1; \
68210 __builtin_neon_vst2_lane_v(__p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], __p2, 19); \
68211 })
68212 #endif
68213
68214 #ifdef __LITTLE_ENDIAN__
68215 #define vst2_lane_f64(__p0, __p1, __p2) __extension__ ({ \
68216 float64x1x2_t __s1 = __p1; \
68217 __builtin_neon_vst2_lane_v(__p0, __s1.val[0], __s1.val[1], __p2, 10); \
68218 })
68219 #else
68220 #define vst2_lane_f64(__p0, __p1, __p2) __extension__ ({ \
68221 float64x1x2_t __s1 = __p1; \
68222 __builtin_neon_vst2_lane_v(__p0, __s1.val[0], __s1.val[1], __p2, 10); \
68223 })
68224 #endif
68225
68226 #ifdef __LITTLE_ENDIAN__
68227 #define vst2_lane_s64(__p0, __p1, __p2) __extension__ ({ \
68228 int64x1x2_t __s1 = __p1; \
68229 __builtin_neon_vst2_lane_v(__p0, __s1.val[0], __s1.val[1], __p2, 3); \
68230 })
68231 #else
68232 #define vst2_lane_s64(__p0, __p1, __p2) __extension__ ({ \
68233 int64x1x2_t __s1 = __p1; \
68234 __builtin_neon_vst2_lane_v(__p0, __s1.val[0], __s1.val[1], __p2, 3); \
68235 })
68236 #endif
68237
68238 #ifdef __LITTLE_ENDIAN__
68239 #define vst3_p64(__p0, __p1) __extension__ ({ \
68240 poly64x1x3_t __s1 = __p1; \
68241 __builtin_neon_vst3_v(__p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], (int8x8_t)__s1.val[2], 6); \
68242 })
68243 #else
68244 #define vst3_p64(__p0, __p1) __extension__ ({ \
68245 poly64x1x3_t __s1 = __p1; \
68246 __builtin_neon_vst3_v(__p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], (int8x8_t)__s1.val[2], 6); \
68247 })
68248 #endif
68249
68250 #ifdef __LITTLE_ENDIAN__
68251 #define vst3q_p64(__p0, __p1) __extension__ ({ \
68252 poly64x2x3_t __s1 = __p1; \
68253 __builtin_neon_vst3q_v(__p0, (int8x16_t)__s1.val[0], (int8x16_t)__s1.val[1], (int8x16_t)__s1.val[2], 38); \
68254 })
68255 #else
68256 #define vst3q_p64(__p0, __p1) __extension__ ({ \
68257 poly64x2x3_t __s1 = __p1; \
68258 poly64x2x3_t __rev1; \
68259 __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 1, 0); \
68260 __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 1, 0); \
68261 __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 1, 0); \
68262 __builtin_neon_vst3q_v(__p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], (int8x16_t)__rev1.val[2], 38); \
68263 })
68264 #endif
68265
68266 #ifdef __LITTLE_ENDIAN__
68267 #define vst3q_u64(__p0, __p1) __extension__ ({ \
68268 uint64x2x3_t __s1 = __p1; \
68269 __builtin_neon_vst3q_v(__p0, (int8x16_t)__s1.val[0], (int8x16_t)__s1.val[1], (int8x16_t)__s1.val[2], 51); \
68270 })
68271 #else
68272 #define vst3q_u64(__p0, __p1) __extension__ ({ \
68273 uint64x2x3_t __s1 = __p1; \
68274 uint64x2x3_t __rev1; \
68275 __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 1, 0); \
68276 __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 1, 0); \
68277 __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 1, 0); \
68278 __builtin_neon_vst3q_v(__p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], (int8x16_t)__rev1.val[2], 51); \
68279 })
68280 #endif
68281
68282 #ifdef __LITTLE_ENDIAN__
68283 #define vst3q_f64(__p0, __p1) __extension__ ({ \
68284 float64x2x3_t __s1 = __p1; \
68285 __builtin_neon_vst3q_v(__p0, __s1.val[0], __s1.val[1], __s1.val[2], 42); \
68286 })
68287 #else
68288 #define vst3q_f64(__p0, __p1) __extension__ ({ \
68289 float64x2x3_t __s1 = __p1; \
68290 float64x2x3_t __rev1; \
68291 __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 1, 0); \
68292 __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 1, 0); \
68293 __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 1, 0); \
68294 __builtin_neon_vst3q_v(__p0, __rev1.val[0], __rev1.val[1], __rev1.val[2], 42); \
68295 })
68296 #endif
68297
68298 #ifdef __LITTLE_ENDIAN__
68299 #define vst3q_s64(__p0, __p1) __extension__ ({ \
68300 int64x2x3_t __s1 = __p1; \
68301 __builtin_neon_vst3q_v(__p0, __s1.val[0], __s1.val[1], __s1.val[2], 35); \
68302 })
68303 #else
68304 #define vst3q_s64(__p0, __p1) __extension__ ({ \
68305 int64x2x3_t __s1 = __p1; \
68306 int64x2x3_t __rev1; \
68307 __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 1, 0); \
68308 __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 1, 0); \
68309 __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 1, 0); \
68310 __builtin_neon_vst3q_v(__p0, __rev1.val[0], __rev1.val[1], __rev1.val[2], 35); \
68311 })
68312 #endif
68313
68314 #ifdef __LITTLE_ENDIAN__
68315 #define vst3_f64(__p0, __p1) __extension__ ({ \
68316 float64x1x3_t __s1 = __p1; \
68317 __builtin_neon_vst3_v(__p0, __s1.val[0], __s1.val[1], __s1.val[2], 10); \
68318 })
68319 #else
68320 #define vst3_f64(__p0, __p1) __extension__ ({ \
68321 float64x1x3_t __s1 = __p1; \
68322 __builtin_neon_vst3_v(__p0, __s1.val[0], __s1.val[1], __s1.val[2], 10); \
68323 })
68324 #endif
68325
68326 #ifdef __LITTLE_ENDIAN__
68327 #define vst3_lane_p64(__p0, __p1, __p2) __extension__ ({ \
68328 poly64x1x3_t __s1 = __p1; \
68329 __builtin_neon_vst3_lane_v(__p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], (int8x8_t)__s1.val[2], __p2, 6); \
68330 })
68331 #else
68332 #define vst3_lane_p64(__p0, __p1, __p2) __extension__ ({ \
68333 poly64x1x3_t __s1 = __p1; \
68334 __builtin_neon_vst3_lane_v(__p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], (int8x8_t)__s1.val[2], __p2, 6); \
68335 })
68336 #endif
68337
68338 #ifdef __LITTLE_ENDIAN__
68339 #define vst3q_lane_p8(__p0, __p1, __p2) __extension__ ({ \
68340 poly8x16x3_t __s1 = __p1; \
68341 __builtin_neon_vst3q_lane_v(__p0, (int8x16_t)__s1.val[0], (int8x16_t)__s1.val[1], (int8x16_t)__s1.val[2], __p2, 36); \
68342 })
68343 #else
68344 #define vst3q_lane_p8(__p0, __p1, __p2) __extension__ ({ \
68345 poly8x16x3_t __s1 = __p1; \
68346 poly8x16x3_t __rev1; \
68347 __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
68348 __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
68349 __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
68350 __builtin_neon_vst3q_lane_v(__p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], (int8x16_t)__rev1.val[2], __p2, 36); \
68351 })
68352 #endif
68353
68354 #ifdef __LITTLE_ENDIAN__
68355 #define vst3q_lane_p64(__p0, __p1, __p2) __extension__ ({ \
68356 poly64x2x3_t __s1 = __p1; \
68357 __builtin_neon_vst3q_lane_v(__p0, (int8x16_t)__s1.val[0], (int8x16_t)__s1.val[1], (int8x16_t)__s1.val[2], __p2, 38); \
68358 })
68359 #else
68360 #define vst3q_lane_p64(__p0, __p1, __p2) __extension__ ({ \
68361 poly64x2x3_t __s1 = __p1; \
68362 poly64x2x3_t __rev1; \
68363 __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 1, 0); \
68364 __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 1, 0); \
68365 __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 1, 0); \
68366 __builtin_neon_vst3q_lane_v(__p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], (int8x16_t)__rev1.val[2], __p2, 38); \
68367 })
68368 #endif
68369
68370 #ifdef __LITTLE_ENDIAN__
68371 #define vst3q_lane_u8(__p0, __p1, __p2) __extension__ ({ \
68372 uint8x16x3_t __s1 = __p1; \
68373 __builtin_neon_vst3q_lane_v(__p0, (int8x16_t)__s1.val[0], (int8x16_t)__s1.val[1], (int8x16_t)__s1.val[2], __p2, 48); \
68374 })
68375 #else
68376 #define vst3q_lane_u8(__p0, __p1, __p2) __extension__ ({ \
68377 uint8x16x3_t __s1 = __p1; \
68378 uint8x16x3_t __rev1; \
68379 __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
68380 __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
68381 __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
68382 __builtin_neon_vst3q_lane_v(__p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], (int8x16_t)__rev1.val[2], __p2, 48); \
68383 })
68384 #endif
68385
68386 #ifdef __LITTLE_ENDIAN__
68387 #define vst3q_lane_u64(__p0, __p1, __p2) __extension__ ({ \
68388 uint64x2x3_t __s1 = __p1; \
68389 __builtin_neon_vst3q_lane_v(__p0, (int8x16_t)__s1.val[0], (int8x16_t)__s1.val[1], (int8x16_t)__s1.val[2], __p2, 51); \
68390 })
68391 #else
68392 #define vst3q_lane_u64(__p0, __p1, __p2) __extension__ ({ \
68393 uint64x2x3_t __s1 = __p1; \
68394 uint64x2x3_t __rev1; \
68395 __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 1, 0); \
68396 __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 1, 0); \
68397 __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 1, 0); \
68398 __builtin_neon_vst3q_lane_v(__p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], (int8x16_t)__rev1.val[2], __p2, 51); \
68399 })
68400 #endif
68401
68402 #ifdef __LITTLE_ENDIAN__
68403 #define vst3q_lane_s8(__p0, __p1, __p2) __extension__ ({ \
68404 int8x16x3_t __s1 = __p1; \
68405 __builtin_neon_vst3q_lane_v(__p0, (int8x16_t)__s1.val[0], (int8x16_t)__s1.val[1], (int8x16_t)__s1.val[2], __p2, 32); \
68406 })
68407 #else
68408 #define vst3q_lane_s8(__p0, __p1, __p2) __extension__ ({ \
68409 int8x16x3_t __s1 = __p1; \
68410 int8x16x3_t __rev1; \
68411 __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
68412 __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
68413 __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
68414 __builtin_neon_vst3q_lane_v(__p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], (int8x16_t)__rev1.val[2], __p2, 32); \
68415 })
68416 #endif
68417
68418 #ifdef __LITTLE_ENDIAN__
68419 #define vst3q_lane_f64(__p0, __p1, __p2) __extension__ ({ \
68420 float64x2x3_t __s1 = __p1; \
68421 __builtin_neon_vst3q_lane_v(__p0, __s1.val[0], __s1.val[1], __s1.val[2], __p2, 42); \
68422 })
68423 #else
68424 #define vst3q_lane_f64(__p0, __p1, __p2) __extension__ ({ \
68425 float64x2x3_t __s1 = __p1; \
68426 float64x2x3_t __rev1; \
68427 __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 1, 0); \
68428 __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 1, 0); \
68429 __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 1, 0); \
68430 __builtin_neon_vst3q_lane_v(__p0, __rev1.val[0], __rev1.val[1], __rev1.val[2], __p2, 42); \
68431 })
68432 #endif
68433
68434 #ifdef __LITTLE_ENDIAN__
68435 #define vst3q_lane_s64(__p0, __p1, __p2) __extension__ ({ \
68436 int64x2x3_t __s1 = __p1; \
68437 __builtin_neon_vst3q_lane_v(__p0, __s1.val[0], __s1.val[1], __s1.val[2], __p2, 35); \
68438 })
68439 #else
68440 #define vst3q_lane_s64(__p0, __p1, __p2) __extension__ ({ \
68441 int64x2x3_t __s1 = __p1; \
68442 int64x2x3_t __rev1; \
68443 __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 1, 0); \
68444 __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 1, 0); \
68445 __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 1, 0); \
68446 __builtin_neon_vst3q_lane_v(__p0, __rev1.val[0], __rev1.val[1], __rev1.val[2], __p2, 35); \
68447 })
68448 #endif
68449
68450 #ifdef __LITTLE_ENDIAN__
68451 #define vst3_lane_u64(__p0, __p1, __p2) __extension__ ({ \
68452 uint64x1x3_t __s1 = __p1; \
68453 __builtin_neon_vst3_lane_v(__p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], (int8x8_t)__s1.val[2], __p2, 19); \
68454 })
68455 #else
68456 #define vst3_lane_u64(__p0, __p1, __p2) __extension__ ({ \
68457 uint64x1x3_t __s1 = __p1; \
68458 __builtin_neon_vst3_lane_v(__p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], (int8x8_t)__s1.val[2], __p2, 19); \
68459 })
68460 #endif
68461
68462 #ifdef __LITTLE_ENDIAN__
68463 #define vst3_lane_f64(__p0, __p1, __p2) __extension__ ({ \
68464 float64x1x3_t __s1 = __p1; \
68465 __builtin_neon_vst3_lane_v(__p0, __s1.val[0], __s1.val[1], __s1.val[2], __p2, 10); \
68466 })
68467 #else
68468 #define vst3_lane_f64(__p0, __p1, __p2) __extension__ ({ \
68469 float64x1x3_t __s1 = __p1; \
68470 __builtin_neon_vst3_lane_v(__p0, __s1.val[0], __s1.val[1], __s1.val[2], __p2, 10); \
68471 })
68472 #endif
68473
68474 #ifdef __LITTLE_ENDIAN__
68475 #define vst3_lane_s64(__p0, __p1, __p2) __extension__ ({ \
68476 int64x1x3_t __s1 = __p1; \
68477 __builtin_neon_vst3_lane_v(__p0, __s1.val[0], __s1.val[1], __s1.val[2], __p2, 3); \
68478 })
68479 #else
68480 #define vst3_lane_s64(__p0, __p1, __p2) __extension__ ({ \
68481 int64x1x3_t __s1 = __p1; \
68482 __builtin_neon_vst3_lane_v(__p0, __s1.val[0], __s1.val[1], __s1.val[2], __p2, 3); \
68483 })
68484 #endif
68485
68486 #ifdef __LITTLE_ENDIAN__
68487 #define vst4_p64(__p0, __p1) __extension__ ({ \
68488 poly64x1x4_t __s1 = __p1; \
68489 __builtin_neon_vst4_v(__p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], (int8x8_t)__s1.val[2], (int8x8_t)__s1.val[3], 6); \
68490 })
68491 #else
68492 #define vst4_p64(__p0, __p1) __extension__ ({ \
68493 poly64x1x4_t __s1 = __p1; \
68494 __builtin_neon_vst4_v(__p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], (int8x8_t)__s1.val[2], (int8x8_t)__s1.val[3], 6); \
68495 })
68496 #endif
68497
68498 #ifdef __LITTLE_ENDIAN__
68499 #define vst4q_p64(__p0, __p1) __extension__ ({ \
68500 poly64x2x4_t __s1 = __p1; \
68501 __builtin_neon_vst4q_v(__p0, (int8x16_t)__s1.val[0], (int8x16_t)__s1.val[1], (int8x16_t)__s1.val[2], (int8x16_t)__s1.val[3], 38); \
68502 })
68503 #else
68504 #define vst4q_p64(__p0, __p1) __extension__ ({ \
68505 poly64x2x4_t __s1 = __p1; \
68506 poly64x2x4_t __rev1; \
68507 __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 1, 0); \
68508 __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 1, 0); \
68509 __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 1, 0); \
68510 __rev1.val[3] = __builtin_shufflevector(__s1.val[3], __s1.val[3], 1, 0); \
68511 __builtin_neon_vst4q_v(__p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], (int8x16_t)__rev1.val[2], (int8x16_t)__rev1.val[3], 38); \
68512 })
68513 #endif
68514
68515 #ifdef __LITTLE_ENDIAN__
68516 #define vst4q_u64(__p0, __p1) __extension__ ({ \
68517 uint64x2x4_t __s1 = __p1; \
68518 __builtin_neon_vst4q_v(__p0, (int8x16_t)__s1.val[0], (int8x16_t)__s1.val[1], (int8x16_t)__s1.val[2], (int8x16_t)__s1.val[3], 51); \
68519 })
68520 #else
68521 #define vst4q_u64(__p0, __p1) __extension__ ({ \
68522 uint64x2x4_t __s1 = __p1; \
68523 uint64x2x4_t __rev1; \
68524 __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 1, 0); \
68525 __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 1, 0); \
68526 __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 1, 0); \
68527 __rev1.val[3] = __builtin_shufflevector(__s1.val[3], __s1.val[3], 1, 0); \
68528 __builtin_neon_vst4q_v(__p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], (int8x16_t)__rev1.val[2], (int8x16_t)__rev1.val[3], 51); \
68529 })
68530 #endif
68531
68532 #ifdef __LITTLE_ENDIAN__
68533 #define vst4q_f64(__p0, __p1) __extension__ ({ \
68534 float64x2x4_t __s1 = __p1; \
68535 __builtin_neon_vst4q_v(__p0, __s1.val[0], __s1.val[1], __s1.val[2], __s1.val[3], 42); \
68536 })
68537 #else
68538 #define vst4q_f64(__p0, __p1) __extension__ ({ \
68539 float64x2x4_t __s1 = __p1; \
68540 float64x2x4_t __rev1; \
68541 __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 1, 0); \
68542 __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 1, 0); \
68543 __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 1, 0); \
68544 __rev1.val[3] = __builtin_shufflevector(__s1.val[3], __s1.val[3], 1, 0); \
68545 __builtin_neon_vst4q_v(__p0, __rev1.val[0], __rev1.val[1], __rev1.val[2], __rev1.val[3], 42); \
68546 })
68547 #endif
68548
68549 #ifdef __LITTLE_ENDIAN__
68550 #define vst4q_s64(__p0, __p1) __extension__ ({ \
68551 int64x2x4_t __s1 = __p1; \
68552 __builtin_neon_vst4q_v(__p0, __s1.val[0], __s1.val[1], __s1.val[2], __s1.val[3], 35); \
68553 })
68554 #else
68555 #define vst4q_s64(__p0, __p1) __extension__ ({ \
68556 int64x2x4_t __s1 = __p1; \
68557 int64x2x4_t __rev1; \
68558 __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 1, 0); \
68559 __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 1, 0); \
68560 __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 1, 0); \
68561 __rev1.val[3] = __builtin_shufflevector(__s1.val[3], __s1.val[3], 1, 0); \
68562 __builtin_neon_vst4q_v(__p0, __rev1.val[0], __rev1.val[1], __rev1.val[2], __rev1.val[3], 35); \
68563 })
68564 #endif
68565
68566 #ifdef __LITTLE_ENDIAN__
68567 #define vst4_f64(__p0, __p1) __extension__ ({ \
68568 float64x1x4_t __s1 = __p1; \
68569 __builtin_neon_vst4_v(__p0, __s1.val[0], __s1.val[1], __s1.val[2], __s1.val[3], 10); \
68570 })
68571 #else
68572 #define vst4_f64(__p0, __p1) __extension__ ({ \
68573 float64x1x4_t __s1 = __p1; \
68574 __builtin_neon_vst4_v(__p0, __s1.val[0], __s1.val[1], __s1.val[2], __s1.val[3], 10); \
68575 })
68576 #endif
68577
68578 #ifdef __LITTLE_ENDIAN__
68579 #define vst4_lane_p64(__p0, __p1, __p2) __extension__ ({ \
68580 poly64x1x4_t __s1 = __p1; \
68581 __builtin_neon_vst4_lane_v(__p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], (int8x8_t)__s1.val[2], (int8x8_t)__s1.val[3], __p2, 6); \
68582 })
68583 #else
68584 #define vst4_lane_p64(__p0, __p1, __p2) __extension__ ({ \
68585 poly64x1x4_t __s1 = __p1; \
68586 __builtin_neon_vst4_lane_v(__p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], (int8x8_t)__s1.val[2], (int8x8_t)__s1.val[3], __p2, 6); \
68587 })
68588 #endif
68589
68590 #ifdef __LITTLE_ENDIAN__
68591 #define vst4q_lane_p8(__p0, __p1, __p2) __extension__ ({ \
68592 poly8x16x4_t __s1 = __p1; \
68593 __builtin_neon_vst4q_lane_v(__p0, (int8x16_t)__s1.val[0], (int8x16_t)__s1.val[1], (int8x16_t)__s1.val[2], (int8x16_t)__s1.val[3], __p2, 36); \
68594 })
68595 #else
68596 #define vst4q_lane_p8(__p0, __p1, __p2) __extension__ ({ \
68597 poly8x16x4_t __s1 = __p1; \
68598 poly8x16x4_t __rev1; \
68599 __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
68600 __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
68601 __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
68602 __rev1.val[3] = __builtin_shufflevector(__s1.val[3], __s1.val[3], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
68603 __builtin_neon_vst4q_lane_v(__p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], (int8x16_t)__rev1.val[2], (int8x16_t)__rev1.val[3], __p2, 36); \
68604 })
68605 #endif
68606
68607 #ifdef __LITTLE_ENDIAN__
68608 #define vst4q_lane_p64(__p0, __p1, __p2) __extension__ ({ \
68609 poly64x2x4_t __s1 = __p1; \
68610 __builtin_neon_vst4q_lane_v(__p0, (int8x16_t)__s1.val[0], (int8x16_t)__s1.val[1], (int8x16_t)__s1.val[2], (int8x16_t)__s1.val[3], __p2, 38); \
68611 })
68612 #else
68613 #define vst4q_lane_p64(__p0, __p1, __p2) __extension__ ({ \
68614 poly64x2x4_t __s1 = __p1; \
68615 poly64x2x4_t __rev1; \
68616 __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 1, 0); \
68617 __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 1, 0); \
68618 __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 1, 0); \
68619 __rev1.val[3] = __builtin_shufflevector(__s1.val[3], __s1.val[3], 1, 0); \
68620 __builtin_neon_vst4q_lane_v(__p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], (int8x16_t)__rev1.val[2], (int8x16_t)__rev1.val[3], __p2, 38); \
68621 })
68622 #endif
68623
68624 #ifdef __LITTLE_ENDIAN__
68625 #define vst4q_lane_u8(__p0, __p1, __p2) __extension__ ({ \
68626 uint8x16x4_t __s1 = __p1; \
68627 __builtin_neon_vst4q_lane_v(__p0, (int8x16_t)__s1.val[0], (int8x16_t)__s1.val[1], (int8x16_t)__s1.val[2], (int8x16_t)__s1.val[3], __p2, 48); \
68628 })
68629 #else
68630 #define vst4q_lane_u8(__p0, __p1, __p2) __extension__ ({ \
68631 uint8x16x4_t __s1 = __p1; \
68632 uint8x16x4_t __rev1; \
68633 __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
68634 __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
68635 __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
68636 __rev1.val[3] = __builtin_shufflevector(__s1.val[3], __s1.val[3], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
68637 __builtin_neon_vst4q_lane_v(__p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], (int8x16_t)__rev1.val[2], (int8x16_t)__rev1.val[3], __p2, 48); \
68638 })
68639 #endif
68640
68641 #ifdef __LITTLE_ENDIAN__
68642 #define vst4q_lane_u64(__p0, __p1, __p2) __extension__ ({ \
68643 uint64x2x4_t __s1 = __p1; \
68644 __builtin_neon_vst4q_lane_v(__p0, (int8x16_t)__s1.val[0], (int8x16_t)__s1.val[1], (int8x16_t)__s1.val[2], (int8x16_t)__s1.val[3], __p2, 51); \
68645 })
68646 #else
68647 #define vst4q_lane_u64(__p0, __p1, __p2) __extension__ ({ \
68648 uint64x2x4_t __s1 = __p1; \
68649 uint64x2x4_t __rev1; \
68650 __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 1, 0); \
68651 __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 1, 0); \
68652 __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 1, 0); \
68653 __rev1.val[3] = __builtin_shufflevector(__s1.val[3], __s1.val[3], 1, 0); \
68654 __builtin_neon_vst4q_lane_v(__p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], (int8x16_t)__rev1.val[2], (int8x16_t)__rev1.val[3], __p2, 51); \
68655 })
68656 #endif
68657
68658 #ifdef __LITTLE_ENDIAN__
68659 #define vst4q_lane_s8(__p0, __p1, __p2) __extension__ ({ \
68660 int8x16x4_t __s1 = __p1; \
68661 __builtin_neon_vst4q_lane_v(__p0, (int8x16_t)__s1.val[0], (int8x16_t)__s1.val[1], (int8x16_t)__s1.val[2], (int8x16_t)__s1.val[3], __p2, 32); \
68662 })
68663 #else
68664 #define vst4q_lane_s8(__p0, __p1, __p2) __extension__ ({ \
68665 int8x16x4_t __s1 = __p1; \
68666 int8x16x4_t __rev1; \
68667 __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
68668 __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
68669 __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
68670 __rev1.val[3] = __builtin_shufflevector(__s1.val[3], __s1.val[3], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
68671 __builtin_neon_vst4q_lane_v(__p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], (int8x16_t)__rev1.val[2], (int8x16_t)__rev1.val[3], __p2, 32); \
68672 })
68673 #endif
68674
68675 #ifdef __LITTLE_ENDIAN__
68676 #define vst4q_lane_f64(__p0, __p1, __p2) __extension__ ({ \
68677 float64x2x4_t __s1 = __p1; \
68678 __builtin_neon_vst4q_lane_v(__p0, __s1.val[0], __s1.val[1], __s1.val[2], __s1.val[3], __p2, 42); \
68679 })
68680 #else
68681 #define vst4q_lane_f64(__p0, __p1, __p2) __extension__ ({ \
68682 float64x2x4_t __s1 = __p1; \
68683 float64x2x4_t __rev1; \
68684 __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 1, 0); \
68685 __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 1, 0); \
68686 __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 1, 0); \
68687 __rev1.val[3] = __builtin_shufflevector(__s1.val[3], __s1.val[3], 1, 0); \
68688 __builtin_neon_vst4q_lane_v(__p0, __rev1.val[0], __rev1.val[1], __rev1.val[2], __rev1.val[3], __p2, 42); \
68689 })
68690 #endif
68691
68692 #ifdef __LITTLE_ENDIAN__
68693 #define vst4q_lane_s64(__p0, __p1, __p2) __extension__ ({ \
68694 int64x2x4_t __s1 = __p1; \
68695 __builtin_neon_vst4q_lane_v(__p0, __s1.val[0], __s1.val[1], __s1.val[2], __s1.val[3], __p2, 35); \
68696 })
68697 #else
68698 #define vst4q_lane_s64(__p0, __p1, __p2) __extension__ ({ \
68699 int64x2x4_t __s1 = __p1; \
68700 int64x2x4_t __rev1; \
68701 __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 1, 0); \
68702 __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 1, 0); \
68703 __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 1, 0); \
68704 __rev1.val[3] = __builtin_shufflevector(__s1.val[3], __s1.val[3], 1, 0); \
68705 __builtin_neon_vst4q_lane_v(__p0, __rev1.val[0], __rev1.val[1], __rev1.val[2], __rev1.val[3], __p2, 35); \
68706 })
68707 #endif
68708
68709 #ifdef __LITTLE_ENDIAN__
68710 #define vst4_lane_u64(__p0, __p1, __p2) __extension__ ({ \
68711 uint64x1x4_t __s1 = __p1; \
68712 __builtin_neon_vst4_lane_v(__p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], (int8x8_t)__s1.val[2], (int8x8_t)__s1.val[3], __p2, 19); \
68713 })
68714 #else
68715 #define vst4_lane_u64(__p0, __p1, __p2) __extension__ ({ \
68716 uint64x1x4_t __s1 = __p1; \
68717 __builtin_neon_vst4_lane_v(__p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], (int8x8_t)__s1.val[2], (int8x8_t)__s1.val[3], __p2, 19); \
68718 })
68719 #endif
68720
68721 #ifdef __LITTLE_ENDIAN__
68722 #define vst4_lane_f64(__p0, __p1, __p2) __extension__ ({ \
68723 float64x1x4_t __s1 = __p1; \
68724 __builtin_neon_vst4_lane_v(__p0, __s1.val[0], __s1.val[1], __s1.val[2], __s1.val[3], __p2, 10); \
68725 })
68726 #else
68727 #define vst4_lane_f64(__p0, __p1, __p2) __extension__ ({ \
68728 float64x1x4_t __s1 = __p1; \
68729 __builtin_neon_vst4_lane_v(__p0, __s1.val[0], __s1.val[1], __s1.val[2], __s1.val[3], __p2, 10); \
68730 })
68731 #endif
68732
68733 #ifdef __LITTLE_ENDIAN__
68734 #define vst4_lane_s64(__p0, __p1, __p2) __extension__ ({ \
68735 int64x1x4_t __s1 = __p1; \
68736 __builtin_neon_vst4_lane_v(__p0, __s1.val[0], __s1.val[1], __s1.val[2], __s1.val[3], __p2, 3); \
68737 })
68738 #else
68739 #define vst4_lane_s64(__p0, __p1, __p2) __extension__ ({ \
68740 int64x1x4_t __s1 = __p1; \
68741 __builtin_neon_vst4_lane_v(__p0, __s1.val[0], __s1.val[1], __s1.val[2], __s1.val[3], __p2, 3); \
68742 })
68743 #endif
68744
68745 #ifdef __LITTLE_ENDIAN__
68746 #define vstrq_p128(__p0, __p1) __extension__ ({ \
68747 poly128_t __s1 = __p1; \
68748 __builtin_neon_vstrq_p128(__p0, __s1); \
68749 })
68750 #else
68751 #define vstrq_p128(__p0, __p1) __extension__ ({ \
68752 poly128_t __s1 = __p1; \
68753 __builtin_neon_vstrq_p128(__p0, __s1); \
68754 })
68755 #endif
68756
68757 #ifdef __LITTLE_ENDIAN__
68758 __ai uint64_t vsubd_u64(uint64_t __p0, uint64_t __p1) {
68759 uint64_t __ret;
68760 __ret = (uint64_t) __builtin_neon_vsubd_u64(__p0, __p1);
68761 return __ret;
68762 }
68763 #else
68764 __ai uint64_t vsubd_u64(uint64_t __p0, uint64_t __p1) {
68765 uint64_t __ret;
68766 __ret = (uint64_t) __builtin_neon_vsubd_u64(__p0, __p1);
68767 return __ret;
68768 }
68769 #endif
68770
68771 #ifdef __LITTLE_ENDIAN__
68772 __ai int64_t vsubd_s64(int64_t __p0, int64_t __p1) {
68773 int64_t __ret;
68774 __ret = (int64_t) __builtin_neon_vsubd_s64(__p0, __p1);
68775 return __ret;
68776 }
68777 #else
68778 __ai int64_t vsubd_s64(int64_t __p0, int64_t __p1) {
68779 int64_t __ret;
68780 __ret = (int64_t) __builtin_neon_vsubd_s64(__p0, __p1);
68781 return __ret;
68782 }
68783 #endif
68784
68785 #ifdef __LITTLE_ENDIAN__
68786 __ai float64x2_t vsubq_f64(float64x2_t __p0, float64x2_t __p1) {
68787 float64x2_t __ret;
68788 __ret = __p0 - __p1;
68789 return __ret;
68790 }
68791 #else
68792 __ai float64x2_t vsubq_f64(float64x2_t __p0, float64x2_t __p1) {
68793 float64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
68794 float64x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
68795 float64x2_t __ret;
68796 __ret = __rev0 - __rev1;
68797 __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
68798 return __ret;
68799 }
68800 #endif
68801
68802 #ifdef __LITTLE_ENDIAN__
68803 __ai float64x1_t vsub_f64(float64x1_t __p0, float64x1_t __p1) {
68804 float64x1_t __ret;
68805 __ret = __p0 - __p1;
68806 return __ret;
68807 }
68808 #else
68809 __ai float64x1_t vsub_f64(float64x1_t __p0, float64x1_t __p1) {
68810 float64x1_t __ret;
68811 __ret = __p0 - __p1;
68812 return __ret;
68813 }
68814 #endif
68815
68816 #ifdef __LITTLE_ENDIAN__
68817 __ai uint16x8_t vsubhn_high_u32(uint16x4_t __p0, uint32x4_t __p1, uint32x4_t __p2) {
68818 uint16x8_t __ret;
68819 __ret = vcombine_u16(__p0, vsubhn_u32(__p1, __p2));
68820 return __ret;
68821 }
68822 #else
68823 __ai uint16x8_t vsubhn_high_u32(uint16x4_t __p0, uint32x4_t __p1, uint32x4_t __p2) {
68824 uint16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
68825 uint32x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
68826 uint32x4_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, 3, 2, 1, 0);
68827 uint16x8_t __ret;
68828 __ret = __noswap_vcombine_u16(__rev0, __noswap_vsubhn_u32(__rev1, __rev2));
68829 __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
68830 return __ret;
68831 }
68832 #endif
68833
68834 #ifdef __LITTLE_ENDIAN__
68835 __ai uint32x4_t vsubhn_high_u64(uint32x2_t __p0, uint64x2_t __p1, uint64x2_t __p2) {
68836 uint32x4_t __ret;
68837 __ret = vcombine_u32(__p0, vsubhn_u64(__p1, __p2));
68838 return __ret;
68839 }
68840 #else
68841 __ai uint32x4_t vsubhn_high_u64(uint32x2_t __p0, uint64x2_t __p1, uint64x2_t __p2) {
68842 uint32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
68843 uint64x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
68844 uint64x2_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, 1, 0);
68845 uint32x4_t __ret;
68846 __ret = __noswap_vcombine_u32(__rev0, __noswap_vsubhn_u64(__rev1, __rev2));
68847 __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
68848 return __ret;
68849 }
68850 #endif
68851
68852 #ifdef __LITTLE_ENDIAN__
68853 __ai uint8x16_t vsubhn_high_u16(uint8x8_t __p0, uint16x8_t __p1, uint16x8_t __p2) {
68854 uint8x16_t __ret;
68855 __ret = vcombine_u8(__p0, vsubhn_u16(__p1, __p2));
68856 return __ret;
68857 }
68858 #else
68859 __ai uint8x16_t vsubhn_high_u16(uint8x8_t __p0, uint16x8_t __p1, uint16x8_t __p2) {
68860 uint8x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
68861 uint16x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
68862 uint16x8_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, 7, 6, 5, 4, 3, 2, 1, 0);
68863 uint8x16_t __ret;
68864 __ret = __noswap_vcombine_u8(__rev0, __noswap_vsubhn_u16(__rev1, __rev2));
68865 __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
68866 return __ret;
68867 }
68868 #endif
68869
68870 #ifdef __LITTLE_ENDIAN__
68871 __ai int16x8_t vsubhn_high_s32(int16x4_t __p0, int32x4_t __p1, int32x4_t __p2) {
68872 int16x8_t __ret;
68873 __ret = vcombine_s16(__p0, vsubhn_s32(__p1, __p2));
68874 return __ret;
68875 }
68876 #else
68877 __ai int16x8_t vsubhn_high_s32(int16x4_t __p0, int32x4_t __p1, int32x4_t __p2) {
68878 int16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
68879 int32x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
68880 int32x4_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, 3, 2, 1, 0);
68881 int16x8_t __ret;
68882 __ret = __noswap_vcombine_s16(__rev0, __noswap_vsubhn_s32(__rev1, __rev2));
68883 __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
68884 return __ret;
68885 }
68886 #endif
68887
68888 #ifdef __LITTLE_ENDIAN__
68889 __ai int32x4_t vsubhn_high_s64(int32x2_t __p0, int64x2_t __p1, int64x2_t __p2) {
68890 int32x4_t __ret;
68891 __ret = vcombine_s32(__p0, vsubhn_s64(__p1, __p2));
68892 return __ret;
68893 }
68894 #else
68895 __ai int32x4_t vsubhn_high_s64(int32x2_t __p0, int64x2_t __p1, int64x2_t __p2) {
68896 int32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
68897 int64x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
68898 int64x2_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, 1, 0);
68899 int32x4_t __ret;
68900 __ret = __noswap_vcombine_s32(__rev0, __noswap_vsubhn_s64(__rev1, __rev2));
68901 __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
68902 return __ret;
68903 }
68904 #endif
68905
68906 #ifdef __LITTLE_ENDIAN__
68907 __ai int8x16_t vsubhn_high_s16(int8x8_t __p0, int16x8_t __p1, int16x8_t __p2) {
68908 int8x16_t __ret;
68909 __ret = vcombine_s8(__p0, vsubhn_s16(__p1, __p2));
68910 return __ret;
68911 }
68912 #else
68913 __ai int8x16_t vsubhn_high_s16(int8x8_t __p0, int16x8_t __p1, int16x8_t __p2) {
68914 int8x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
68915 int16x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
68916 int16x8_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, 7, 6, 5, 4, 3, 2, 1, 0);
68917 int8x16_t __ret;
68918 __ret = __noswap_vcombine_s8(__rev0, __noswap_vsubhn_s16(__rev1, __rev2));
68919 __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
68920 return __ret;
68921 }
68922 #endif
68923
68924 #ifdef __LITTLE_ENDIAN__
68925 __ai uint16x8_t vsubl_high_u8(uint8x16_t __p0, uint8x16_t __p1) {
68926 uint16x8_t __ret;
68927 __ret = vmovl_high_u8(__p0) - vmovl_high_u8(__p1);
68928 return __ret;
68929 }
68930 #else
68931 __ai uint16x8_t vsubl_high_u8(uint8x16_t __p0, uint8x16_t __p1) {
68932 uint8x16_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
68933 uint8x16_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
68934 uint16x8_t __ret;
68935 __ret = __noswap_vmovl_high_u8(__rev0) - __noswap_vmovl_high_u8(__rev1);
68936 __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
68937 return __ret;
68938 }
68939 #endif
68940
68941 #ifdef __LITTLE_ENDIAN__
68942 __ai uint64x2_t vsubl_high_u32(uint32x4_t __p0, uint32x4_t __p1) {
68943 uint64x2_t __ret;
68944 __ret = vmovl_high_u32(__p0) - vmovl_high_u32(__p1);
68945 return __ret;
68946 }
68947 #else
68948 __ai uint64x2_t vsubl_high_u32(uint32x4_t __p0, uint32x4_t __p1) {
68949 uint32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
68950 uint32x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
68951 uint64x2_t __ret;
68952 __ret = __noswap_vmovl_high_u32(__rev0) - __noswap_vmovl_high_u32(__rev1);
68953 __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
68954 return __ret;
68955 }
68956 #endif
68957
68958 #ifdef __LITTLE_ENDIAN__
68959 __ai uint32x4_t vsubl_high_u16(uint16x8_t __p0, uint16x8_t __p1) {
68960 uint32x4_t __ret;
68961 __ret = vmovl_high_u16(__p0) - vmovl_high_u16(__p1);
68962 return __ret;
68963 }
68964 #else
68965 __ai uint32x4_t vsubl_high_u16(uint16x8_t __p0, uint16x8_t __p1) {
68966 uint16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
68967 uint16x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
68968 uint32x4_t __ret;
68969 __ret = __noswap_vmovl_high_u16(__rev0) - __noswap_vmovl_high_u16(__rev1);
68970 __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
68971 return __ret;
68972 }
68973 #endif
68974
68975 #ifdef __LITTLE_ENDIAN__
68976 __ai int16x8_t vsubl_high_s8(int8x16_t __p0, int8x16_t __p1) {
68977 int16x8_t __ret;
68978 __ret = vmovl_high_s8(__p0) - vmovl_high_s8(__p1);
68979 return __ret;
68980 }
68981 #else
68982 __ai int16x8_t vsubl_high_s8(int8x16_t __p0, int8x16_t __p1) {
68983 int8x16_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
68984 int8x16_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
68985 int16x8_t __ret;
68986 __ret = __noswap_vmovl_high_s8(__rev0) - __noswap_vmovl_high_s8(__rev1);
68987 __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
68988 return __ret;
68989 }
68990 #endif
68991
68992 #ifdef __LITTLE_ENDIAN__
68993 __ai int64x2_t vsubl_high_s32(int32x4_t __p0, int32x4_t __p1) {
68994 int64x2_t __ret;
68995 __ret = vmovl_high_s32(__p0) - vmovl_high_s32(__p1);
68996 return __ret;
68997 }
68998 #else
68999 __ai int64x2_t vsubl_high_s32(int32x4_t __p0, int32x4_t __p1) {
69000 int32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
69001 int32x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
69002 int64x2_t __ret;
69003 __ret = __noswap_vmovl_high_s32(__rev0) - __noswap_vmovl_high_s32(__rev1);
69004 __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
69005 return __ret;
69006 }
69007 #endif
69008
69009 #ifdef __LITTLE_ENDIAN__
69010 __ai int32x4_t vsubl_high_s16(int16x8_t __p0, int16x8_t __p1) {
69011 int32x4_t __ret;
69012 __ret = vmovl_high_s16(__p0) - vmovl_high_s16(__p1);
69013 return __ret;
69014 }
69015 #else
69016 __ai int32x4_t vsubl_high_s16(int16x8_t __p0, int16x8_t __p1) {
69017 int16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
69018 int16x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
69019 int32x4_t __ret;
69020 __ret = __noswap_vmovl_high_s16(__rev0) - __noswap_vmovl_high_s16(__rev1);
69021 __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
69022 return __ret;
69023 }
69024 #endif
69025
69026 #ifdef __LITTLE_ENDIAN__
69027 __ai uint16x8_t vsubw_high_u8(uint16x8_t __p0, uint8x16_t __p1) {
69028 uint16x8_t __ret;
69029 __ret = __p0 - vmovl_high_u8(__p1);
69030 return __ret;
69031 }
69032 #else
69033 __ai uint16x8_t vsubw_high_u8(uint16x8_t __p0, uint8x16_t __p1) {
69034 uint16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
69035 uint8x16_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
69036 uint16x8_t __ret;
69037 __ret = __rev0 - __noswap_vmovl_high_u8(__rev1);
69038 __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
69039 return __ret;
69040 }
69041 #endif
69042
69043 #ifdef __LITTLE_ENDIAN__
69044 __ai uint64x2_t vsubw_high_u32(uint64x2_t __p0, uint32x4_t __p1) {
69045 uint64x2_t __ret;
69046 __ret = __p0 - vmovl_high_u32(__p1);
69047 return __ret;
69048 }
69049 #else
69050 __ai uint64x2_t vsubw_high_u32(uint64x2_t __p0, uint32x4_t __p1) {
69051 uint64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
69052 uint32x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
69053 uint64x2_t __ret;
69054 __ret = __rev0 - __noswap_vmovl_high_u32(__rev1);
69055 __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
69056 return __ret;
69057 }
69058 #endif
69059
69060 #ifdef __LITTLE_ENDIAN__
69061 __ai uint32x4_t vsubw_high_u16(uint32x4_t __p0, uint16x8_t __p1) {
69062 uint32x4_t __ret;
69063 __ret = __p0 - vmovl_high_u16(__p1);
69064 return __ret;
69065 }
69066 #else
69067 __ai uint32x4_t vsubw_high_u16(uint32x4_t __p0, uint16x8_t __p1) {
69068 uint32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
69069 uint16x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
69070 uint32x4_t __ret;
69071 __ret = __rev0 - __noswap_vmovl_high_u16(__rev1);
69072 __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
69073 return __ret;
69074 }
69075 #endif
69076
69077 #ifdef __LITTLE_ENDIAN__
69078 __ai int16x8_t vsubw_high_s8(int16x8_t __p0, int8x16_t __p1) {
69079 int16x8_t __ret;
69080 __ret = __p0 - vmovl_high_s8(__p1);
69081 return __ret;
69082 }
69083 #else
69084 __ai int16x8_t vsubw_high_s8(int16x8_t __p0, int8x16_t __p1) {
69085 int16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
69086 int8x16_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
69087 int16x8_t __ret;
69088 __ret = __rev0 - __noswap_vmovl_high_s8(__rev1);
69089 __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
69090 return __ret;
69091 }
69092 #endif
69093
69094 #ifdef __LITTLE_ENDIAN__
69095 __ai int64x2_t vsubw_high_s32(int64x2_t __p0, int32x4_t __p1) {
69096 int64x2_t __ret;
69097 __ret = __p0 - vmovl_high_s32(__p1);
69098 return __ret;
69099 }
69100 #else
69101 __ai int64x2_t vsubw_high_s32(int64x2_t __p0, int32x4_t __p1) {
69102 int64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
69103 int32x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
69104 int64x2_t __ret;
69105 __ret = __rev0 - __noswap_vmovl_high_s32(__rev1);
69106 __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
69107 return __ret;
69108 }
69109 #endif
69110
69111 #ifdef __LITTLE_ENDIAN__
69112 __ai int32x4_t vsubw_high_s16(int32x4_t __p0, int16x8_t __p1) {
69113 int32x4_t __ret;
69114 __ret = __p0 - vmovl_high_s16(__p1);
69115 return __ret;
69116 }
69117 #else
69118 __ai int32x4_t vsubw_high_s16(int32x4_t __p0, int16x8_t __p1) {
69119 int32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
69120 int16x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
69121 int32x4_t __ret;
69122 __ret = __rev0 - __noswap_vmovl_high_s16(__rev1);
69123 __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
69124 return __ret;
69125 }
69126 #endif
69127
69128 #ifdef __LITTLE_ENDIAN__
69129 __ai poly8x8_t vtrn1_p8(poly8x8_t __p0, poly8x8_t __p1) {
69130 poly8x8_t __ret;
69131 __ret = __builtin_shufflevector(__p0, __p1, 0, 8, 2, 10, 4, 12, 6, 14);
69132 return __ret;
69133 }
69134 #else
69135 __ai poly8x8_t vtrn1_p8(poly8x8_t __p0, poly8x8_t __p1) {
69136 poly8x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
69137 poly8x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
69138 poly8x8_t __ret;
69139 __ret = __builtin_shufflevector(__rev0, __rev1, 0, 8, 2, 10, 4, 12, 6, 14);
69140 __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
69141 return __ret;
69142 }
69143 #endif
69144
69145 #ifdef __LITTLE_ENDIAN__
69146 __ai poly16x4_t vtrn1_p16(poly16x4_t __p0, poly16x4_t __p1) {
69147 poly16x4_t __ret;
69148 __ret = __builtin_shufflevector(__p0, __p1, 0, 4, 2, 6);
69149 return __ret;
69150 }
69151 #else
69152 __ai poly16x4_t vtrn1_p16(poly16x4_t __p0, poly16x4_t __p1) {
69153 poly16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
69154 poly16x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
69155 poly16x4_t __ret;
69156 __ret = __builtin_shufflevector(__rev0, __rev1, 0, 4, 2, 6);
69157 __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
69158 return __ret;
69159 }
69160 #endif
69161
69162 #ifdef __LITTLE_ENDIAN__
69163 __ai poly8x16_t vtrn1q_p8(poly8x16_t __p0, poly8x16_t __p1) {
69164 poly8x16_t __ret;
69165 __ret = __builtin_shufflevector(__p0, __p1, 0, 16, 2, 18, 4, 20, 6, 22, 8, 24, 10, 26, 12, 28, 14, 30);
69166 return __ret;
69167 }
69168 #else
69169 __ai poly8x16_t vtrn1q_p8(poly8x16_t __p0, poly8x16_t __p1) {
69170 poly8x16_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
69171 poly8x16_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
69172 poly8x16_t __ret;
69173 __ret = __builtin_shufflevector(__rev0, __rev1, 0, 16, 2, 18, 4, 20, 6, 22, 8, 24, 10, 26, 12, 28, 14, 30);
69174 __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
69175 return __ret;
69176 }
69177 #endif
69178
69179 #ifdef __LITTLE_ENDIAN__
69180 __ai poly64x2_t vtrn1q_p64(poly64x2_t __p0, poly64x2_t __p1) {
69181 poly64x2_t __ret;
69182 __ret = __builtin_shufflevector(__p0, __p1, 0, 2);
69183 return __ret;
69184 }
69185 #else
69186 __ai poly64x2_t vtrn1q_p64(poly64x2_t __p0, poly64x2_t __p1) {
69187 poly64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
69188 poly64x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
69189 poly64x2_t __ret;
69190 __ret = __builtin_shufflevector(__rev0, __rev1, 0, 2);
69191 __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
69192 return __ret;
69193 }
69194 #endif
69195
69196 #ifdef __LITTLE_ENDIAN__
69197 __ai poly16x8_t vtrn1q_p16(poly16x8_t __p0, poly16x8_t __p1) {
69198 poly16x8_t __ret;
69199 __ret = __builtin_shufflevector(__p0, __p1, 0, 8, 2, 10, 4, 12, 6, 14);
69200 return __ret;
69201 }
69202 #else
69203 __ai poly16x8_t vtrn1q_p16(poly16x8_t __p0, poly16x8_t __p1) {
69204 poly16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
69205 poly16x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
69206 poly16x8_t __ret;
69207 __ret = __builtin_shufflevector(__rev0, __rev1, 0, 8, 2, 10, 4, 12, 6, 14);
69208 __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
69209 return __ret;
69210 }
69211 #endif
69212
69213 #ifdef __LITTLE_ENDIAN__
69214 __ai uint8x16_t vtrn1q_u8(uint8x16_t __p0, uint8x16_t __p1) {
69215 uint8x16_t __ret;
69216 __ret = __builtin_shufflevector(__p0, __p1, 0, 16, 2, 18, 4, 20, 6, 22, 8, 24, 10, 26, 12, 28, 14, 30);
69217 return __ret;
69218 }
69219 #else
69220 __ai uint8x16_t vtrn1q_u8(uint8x16_t __p0, uint8x16_t __p1) {
69221 uint8x16_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
69222 uint8x16_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
69223 uint8x16_t __ret;
69224 __ret = __builtin_shufflevector(__rev0, __rev1, 0, 16, 2, 18, 4, 20, 6, 22, 8, 24, 10, 26, 12, 28, 14, 30);
69225 __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
69226 return __ret;
69227 }
69228 #endif
69229
69230 #ifdef __LITTLE_ENDIAN__
69231 __ai uint32x4_t vtrn1q_u32(uint32x4_t __p0, uint32x4_t __p1) {
69232 uint32x4_t __ret;
69233 __ret = __builtin_shufflevector(__p0, __p1, 0, 4, 2, 6);
69234 return __ret;
69235 }
69236 #else
69237 __ai uint32x4_t vtrn1q_u32(uint32x4_t __p0, uint32x4_t __p1) {
69238 uint32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
69239 uint32x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
69240 uint32x4_t __ret;
69241 __ret = __builtin_shufflevector(__rev0, __rev1, 0, 4, 2, 6);
69242 __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
69243 return __ret;
69244 }
69245 #endif
69246
69247 #ifdef __LITTLE_ENDIAN__
69248 __ai uint64x2_t vtrn1q_u64(uint64x2_t __p0, uint64x2_t __p1) {
69249 uint64x2_t __ret;
69250 __ret = __builtin_shufflevector(__p0, __p1, 0, 2);
69251 return __ret;
69252 }
69253 #else
69254 __ai uint64x2_t vtrn1q_u64(uint64x2_t __p0, uint64x2_t __p1) {
69255 uint64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
69256 uint64x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
69257 uint64x2_t __ret;
69258 __ret = __builtin_shufflevector(__rev0, __rev1, 0, 2);
69259 __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
69260 return __ret;
69261 }
69262 #endif
69263
69264 #ifdef __LITTLE_ENDIAN__
69265 __ai uint16x8_t vtrn1q_u16(uint16x8_t __p0, uint16x8_t __p1) {
69266 uint16x8_t __ret;
69267 __ret = __builtin_shufflevector(__p0, __p1, 0, 8, 2, 10, 4, 12, 6, 14);
69268 return __ret;
69269 }
69270 #else
69271 __ai uint16x8_t vtrn1q_u16(uint16x8_t __p0, uint16x8_t __p1) {
69272 uint16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
69273 uint16x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
69274 uint16x8_t __ret;
69275 __ret = __builtin_shufflevector(__rev0, __rev1, 0, 8, 2, 10, 4, 12, 6, 14);
69276 __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
69277 return __ret;
69278 }
69279 #endif
69280
69281 #ifdef __LITTLE_ENDIAN__
69282 __ai int8x16_t vtrn1q_s8(int8x16_t __p0, int8x16_t __p1) {
69283 int8x16_t __ret;
69284 __ret = __builtin_shufflevector(__p0, __p1, 0, 16, 2, 18, 4, 20, 6, 22, 8, 24, 10, 26, 12, 28, 14, 30);
69285 return __ret;
69286 }
69287 #else
69288 __ai int8x16_t vtrn1q_s8(int8x16_t __p0, int8x16_t __p1) {
69289 int8x16_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
69290 int8x16_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
69291 int8x16_t __ret;
69292 __ret = __builtin_shufflevector(__rev0, __rev1, 0, 16, 2, 18, 4, 20, 6, 22, 8, 24, 10, 26, 12, 28, 14, 30);
69293 __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
69294 return __ret;
69295 }
69296 #endif
69297
69298 #ifdef __LITTLE_ENDIAN__
69299 __ai float64x2_t vtrn1q_f64(float64x2_t __p0, float64x2_t __p1) {
69300 float64x2_t __ret;
69301 __ret = __builtin_shufflevector(__p0, __p1, 0, 2);
69302 return __ret;
69303 }
69304 #else
69305 __ai float64x2_t vtrn1q_f64(float64x2_t __p0, float64x2_t __p1) {
69306 float64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
69307 float64x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
69308 float64x2_t __ret;
69309 __ret = __builtin_shufflevector(__rev0, __rev1, 0, 2);
69310 __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
69311 return __ret;
69312 }
69313 #endif
69314
69315 #ifdef __LITTLE_ENDIAN__
69316 __ai float32x4_t vtrn1q_f32(float32x4_t __p0, float32x4_t __p1) {
69317 float32x4_t __ret;
69318 __ret = __builtin_shufflevector(__p0, __p1, 0, 4, 2, 6);
69319 return __ret;
69320 }
69321 #else
69322 __ai float32x4_t vtrn1q_f32(float32x4_t __p0, float32x4_t __p1) {
69323 float32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
69324 float32x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
69325 float32x4_t __ret;
69326 __ret = __builtin_shufflevector(__rev0, __rev1, 0, 4, 2, 6);
69327 __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
69328 return __ret;
69329 }
69330 #endif
69331
69332 #ifdef __LITTLE_ENDIAN__
69333 __ai int32x4_t vtrn1q_s32(int32x4_t __p0, int32x4_t __p1) {
69334 int32x4_t __ret;
69335 __ret = __builtin_shufflevector(__p0, __p1, 0, 4, 2, 6);
69336 return __ret;
69337 }
69338 #else
69339 __ai int32x4_t vtrn1q_s32(int32x4_t __p0, int32x4_t __p1) {
69340 int32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
69341 int32x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
69342 int32x4_t __ret;
69343 __ret = __builtin_shufflevector(__rev0, __rev1, 0, 4, 2, 6);
69344 __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
69345 return __ret;
69346 }
69347 #endif
69348
69349 #ifdef __LITTLE_ENDIAN__
69350 __ai int64x2_t vtrn1q_s64(int64x2_t __p0, int64x2_t __p1) {
69351 int64x2_t __ret;
69352 __ret = __builtin_shufflevector(__p0, __p1, 0, 2);
69353 return __ret;
69354 }
69355 #else
69356 __ai int64x2_t vtrn1q_s64(int64x2_t __p0, int64x2_t __p1) {
69357 int64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
69358 int64x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
69359 int64x2_t __ret;
69360 __ret = __builtin_shufflevector(__rev0, __rev1, 0, 2);
69361 __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
69362 return __ret;
69363 }
69364 #endif
69365
69366 #ifdef __LITTLE_ENDIAN__
69367 __ai int16x8_t vtrn1q_s16(int16x8_t __p0, int16x8_t __p1) {
69368 int16x8_t __ret;
69369 __ret = __builtin_shufflevector(__p0, __p1, 0, 8, 2, 10, 4, 12, 6, 14);
69370 return __ret;
69371 }
69372 #else
69373 __ai int16x8_t vtrn1q_s16(int16x8_t __p0, int16x8_t __p1) {
69374 int16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
69375 int16x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
69376 int16x8_t __ret;
69377 __ret = __builtin_shufflevector(__rev0, __rev1, 0, 8, 2, 10, 4, 12, 6, 14);
69378 __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
69379 return __ret;
69380 }
69381 #endif
69382
69383 #ifdef __LITTLE_ENDIAN__
69384 __ai uint8x8_t vtrn1_u8(uint8x8_t __p0, uint8x8_t __p1) {
69385 uint8x8_t __ret;
69386 __ret = __builtin_shufflevector(__p0, __p1, 0, 8, 2, 10, 4, 12, 6, 14);
69387 return __ret;
69388 }
69389 #else
69390 __ai uint8x8_t vtrn1_u8(uint8x8_t __p0, uint8x8_t __p1) {
69391 uint8x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
69392 uint8x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
69393 uint8x8_t __ret;
69394 __ret = __builtin_shufflevector(__rev0, __rev1, 0, 8, 2, 10, 4, 12, 6, 14);
69395 __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
69396 return __ret;
69397 }
69398 #endif
69399
69400 #ifdef __LITTLE_ENDIAN__
69401 __ai uint32x2_t vtrn1_u32(uint32x2_t __p0, uint32x2_t __p1) {
69402 uint32x2_t __ret;
69403 __ret = __builtin_shufflevector(__p0, __p1, 0, 2);
69404 return __ret;
69405 }
69406 #else
69407 __ai uint32x2_t vtrn1_u32(uint32x2_t __p0, uint32x2_t __p1) {
69408 uint32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
69409 uint32x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
69410 uint32x2_t __ret;
69411 __ret = __builtin_shufflevector(__rev0, __rev1, 0, 2);
69412 __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
69413 return __ret;
69414 }
69415 #endif
69416
69417 #ifdef __LITTLE_ENDIAN__
69418 __ai uint16x4_t vtrn1_u16(uint16x4_t __p0, uint16x4_t __p1) {
69419 uint16x4_t __ret;
69420 __ret = __builtin_shufflevector(__p0, __p1, 0, 4, 2, 6);
69421 return __ret;
69422 }
69423 #else
69424 __ai uint16x4_t vtrn1_u16(uint16x4_t __p0, uint16x4_t __p1) {
69425 uint16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
69426 uint16x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
69427 uint16x4_t __ret;
69428 __ret = __builtin_shufflevector(__rev0, __rev1, 0, 4, 2, 6);
69429 __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
69430 return __ret;
69431 }
69432 #endif
69433
69434 #ifdef __LITTLE_ENDIAN__
69435 __ai int8x8_t vtrn1_s8(int8x8_t __p0, int8x8_t __p1) {
69436 int8x8_t __ret;
69437 __ret = __builtin_shufflevector(__p0, __p1, 0, 8, 2, 10, 4, 12, 6, 14);
69438 return __ret;
69439 }
69440 #else
69441 __ai int8x8_t vtrn1_s8(int8x8_t __p0, int8x8_t __p1) {
69442 int8x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
69443 int8x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
69444 int8x8_t __ret;
69445 __ret = __builtin_shufflevector(__rev0, __rev1, 0, 8, 2, 10, 4, 12, 6, 14);
69446 __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
69447 return __ret;
69448 }
69449 #endif
69450
69451 #ifdef __LITTLE_ENDIAN__
69452 __ai float32x2_t vtrn1_f32(float32x2_t __p0, float32x2_t __p1) {
69453 float32x2_t __ret;
69454 __ret = __builtin_shufflevector(__p0, __p1, 0, 2);
69455 return __ret;
69456 }
69457 #else
69458 __ai float32x2_t vtrn1_f32(float32x2_t __p0, float32x2_t __p1) {
69459 float32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
69460 float32x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
69461 float32x2_t __ret;
69462 __ret = __builtin_shufflevector(__rev0, __rev1, 0, 2);
69463 __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
69464 return __ret;
69465 }
69466 #endif
69467
69468 #ifdef __LITTLE_ENDIAN__
69469 __ai int32x2_t vtrn1_s32(int32x2_t __p0, int32x2_t __p1) {
69470 int32x2_t __ret;
69471 __ret = __builtin_shufflevector(__p0, __p1, 0, 2);
69472 return __ret;
69473 }
69474 #else
69475 __ai int32x2_t vtrn1_s32(int32x2_t __p0, int32x2_t __p1) {
69476 int32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
69477 int32x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
69478 int32x2_t __ret;
69479 __ret = __builtin_shufflevector(__rev0, __rev1, 0, 2);
69480 __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
69481 return __ret;
69482 }
69483 #endif
69484
69485 #ifdef __LITTLE_ENDIAN__
69486 __ai int16x4_t vtrn1_s16(int16x4_t __p0, int16x4_t __p1) {
69487 int16x4_t __ret;
69488 __ret = __builtin_shufflevector(__p0, __p1, 0, 4, 2, 6);
69489 return __ret;
69490 }
69491 #else
69492 __ai int16x4_t vtrn1_s16(int16x4_t __p0, int16x4_t __p1) {
69493 int16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
69494 int16x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
69495 int16x4_t __ret;
69496 __ret = __builtin_shufflevector(__rev0, __rev1, 0, 4, 2, 6);
69497 __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
69498 return __ret;
69499 }
69500 #endif
69501
69502 #ifdef __LITTLE_ENDIAN__
69503 __ai poly8x8_t vtrn2_p8(poly8x8_t __p0, poly8x8_t __p1) {
69504 poly8x8_t __ret;
69505 __ret = __builtin_shufflevector(__p0, __p1, 1, 9, 3, 11, 5, 13, 7, 15);
69506 return __ret;
69507 }
69508 #else
69509 __ai poly8x8_t vtrn2_p8(poly8x8_t __p0, poly8x8_t __p1) {
69510 poly8x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
69511 poly8x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
69512 poly8x8_t __ret;
69513 __ret = __builtin_shufflevector(__rev0, __rev1, 1, 9, 3, 11, 5, 13, 7, 15);
69514 __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
69515 return __ret;
69516 }
69517 #endif
69518
69519 #ifdef __LITTLE_ENDIAN__
69520 __ai poly16x4_t vtrn2_p16(poly16x4_t __p0, poly16x4_t __p1) {
69521 poly16x4_t __ret;
69522 __ret = __builtin_shufflevector(__p0, __p1, 1, 5, 3, 7);
69523 return __ret;
69524 }
69525 #else
69526 __ai poly16x4_t vtrn2_p16(poly16x4_t __p0, poly16x4_t __p1) {
69527 poly16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
69528 poly16x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
69529 poly16x4_t __ret;
69530 __ret = __builtin_shufflevector(__rev0, __rev1, 1, 5, 3, 7);
69531 __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
69532 return __ret;
69533 }
69534 #endif
69535
69536 #ifdef __LITTLE_ENDIAN__
69537 __ai poly8x16_t vtrn2q_p8(poly8x16_t __p0, poly8x16_t __p1) {
69538 poly8x16_t __ret;
69539 __ret = __builtin_shufflevector(__p0, __p1, 1, 17, 3, 19, 5, 21, 7, 23, 9, 25, 11, 27, 13, 29, 15, 31);
69540 return __ret;
69541 }
69542 #else
69543 __ai poly8x16_t vtrn2q_p8(poly8x16_t __p0, poly8x16_t __p1) {
69544 poly8x16_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
69545 poly8x16_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
69546 poly8x16_t __ret;
69547 __ret = __builtin_shufflevector(__rev0, __rev1, 1, 17, 3, 19, 5, 21, 7, 23, 9, 25, 11, 27, 13, 29, 15, 31);
69548 __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
69549 return __ret;
69550 }
69551 #endif
69552
69553 #ifdef __LITTLE_ENDIAN__
69554 __ai poly64x2_t vtrn2q_p64(poly64x2_t __p0, poly64x2_t __p1) {
69555 poly64x2_t __ret;
69556 __ret = __builtin_shufflevector(__p0, __p1, 1, 3);
69557 return __ret;
69558 }
69559 #else
69560 __ai poly64x2_t vtrn2q_p64(poly64x2_t __p0, poly64x2_t __p1) {
69561 poly64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
69562 poly64x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
69563 poly64x2_t __ret;
69564 __ret = __builtin_shufflevector(__rev0, __rev1, 1, 3);
69565 __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
69566 return __ret;
69567 }
69568 #endif
69569
69570 #ifdef __LITTLE_ENDIAN__
69571 __ai poly16x8_t vtrn2q_p16(poly16x8_t __p0, poly16x8_t __p1) {
69572 poly16x8_t __ret;
69573 __ret = __builtin_shufflevector(__p0, __p1, 1, 9, 3, 11, 5, 13, 7, 15);
69574 return __ret;
69575 }
69576 #else
69577 __ai poly16x8_t vtrn2q_p16(poly16x8_t __p0, poly16x8_t __p1) {
69578 poly16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
69579 poly16x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
69580 poly16x8_t __ret;
69581 __ret = __builtin_shufflevector(__rev0, __rev1, 1, 9, 3, 11, 5, 13, 7, 15);
69582 __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
69583 return __ret;
69584 }
69585 #endif
69586
69587 #ifdef __LITTLE_ENDIAN__
69588 __ai uint8x16_t vtrn2q_u8(uint8x16_t __p0, uint8x16_t __p1) {
69589 uint8x16_t __ret;
69590 __ret = __builtin_shufflevector(__p0, __p1, 1, 17, 3, 19, 5, 21, 7, 23, 9, 25, 11, 27, 13, 29, 15, 31);
69591 return __ret;
69592 }
69593 #else
69594 __ai uint8x16_t vtrn2q_u8(uint8x16_t __p0, uint8x16_t __p1) {
69595 uint8x16_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
69596 uint8x16_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
69597 uint8x16_t __ret;
69598 __ret = __builtin_shufflevector(__rev0, __rev1, 1, 17, 3, 19, 5, 21, 7, 23, 9, 25, 11, 27, 13, 29, 15, 31);
69599 __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
69600 return __ret;
69601 }
69602 #endif
69603
69604 #ifdef __LITTLE_ENDIAN__
69605 __ai uint32x4_t vtrn2q_u32(uint32x4_t __p0, uint32x4_t __p1) {
69606 uint32x4_t __ret;
69607 __ret = __builtin_shufflevector(__p0, __p1, 1, 5, 3, 7);
69608 return __ret;
69609 }
69610 #else
69611 __ai uint32x4_t vtrn2q_u32(uint32x4_t __p0, uint32x4_t __p1) {
69612 uint32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
69613 uint32x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
69614 uint32x4_t __ret;
69615 __ret = __builtin_shufflevector(__rev0, __rev1, 1, 5, 3, 7);
69616 __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
69617 return __ret;
69618 }
69619 #endif
69620
69621 #ifdef __LITTLE_ENDIAN__
69622 __ai uint64x2_t vtrn2q_u64(uint64x2_t __p0, uint64x2_t __p1) {
69623 uint64x2_t __ret;
69624 __ret = __builtin_shufflevector(__p0, __p1, 1, 3);
69625 return __ret;
69626 }
69627 #else
69628 __ai uint64x2_t vtrn2q_u64(uint64x2_t __p0, uint64x2_t __p1) {
69629 uint64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
69630 uint64x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
69631 uint64x2_t __ret;
69632 __ret = __builtin_shufflevector(__rev0, __rev1, 1, 3);
69633 __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
69634 return __ret;
69635 }
69636 #endif
69637
69638 #ifdef __LITTLE_ENDIAN__
69639 __ai uint16x8_t vtrn2q_u16(uint16x8_t __p0, uint16x8_t __p1) {
69640 uint16x8_t __ret;
69641 __ret = __builtin_shufflevector(__p0, __p1, 1, 9, 3, 11, 5, 13, 7, 15);
69642 return __ret;
69643 }
69644 #else
69645 __ai uint16x8_t vtrn2q_u16(uint16x8_t __p0, uint16x8_t __p1) {
69646 uint16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
69647 uint16x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
69648 uint16x8_t __ret;
69649 __ret = __builtin_shufflevector(__rev0, __rev1, 1, 9, 3, 11, 5, 13, 7, 15);
69650 __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
69651 return __ret;
69652 }
69653 #endif
69654
69655 #ifdef __LITTLE_ENDIAN__
69656 __ai int8x16_t vtrn2q_s8(int8x16_t __p0, int8x16_t __p1) {
69657 int8x16_t __ret;
69658 __ret = __builtin_shufflevector(__p0, __p1, 1, 17, 3, 19, 5, 21, 7, 23, 9, 25, 11, 27, 13, 29, 15, 31);
69659 return __ret;
69660 }
69661 #else
69662 __ai int8x16_t vtrn2q_s8(int8x16_t __p0, int8x16_t __p1) {
69663 int8x16_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
69664 int8x16_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
69665 int8x16_t __ret;
69666 __ret = __builtin_shufflevector(__rev0, __rev1, 1, 17, 3, 19, 5, 21, 7, 23, 9, 25, 11, 27, 13, 29, 15, 31);
69667 __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
69668 return __ret;
69669 }
69670 #endif
69671
69672 #ifdef __LITTLE_ENDIAN__
69673 __ai float64x2_t vtrn2q_f64(float64x2_t __p0, float64x2_t __p1) {
69674 float64x2_t __ret;
69675 __ret = __builtin_shufflevector(__p0, __p1, 1, 3);
69676 return __ret;
69677 }
69678 #else
69679 __ai float64x2_t vtrn2q_f64(float64x2_t __p0, float64x2_t __p1) {
69680 float64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
69681 float64x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
69682 float64x2_t __ret;
69683 __ret = __builtin_shufflevector(__rev0, __rev1, 1, 3);
69684 __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
69685 return __ret;
69686 }
69687 #endif
69688
69689 #ifdef __LITTLE_ENDIAN__
69690 __ai float32x4_t vtrn2q_f32(float32x4_t __p0, float32x4_t __p1) {
69691 float32x4_t __ret;
69692 __ret = __builtin_shufflevector(__p0, __p1, 1, 5, 3, 7);
69693 return __ret;
69694 }
69695 #else
69696 __ai float32x4_t vtrn2q_f32(float32x4_t __p0, float32x4_t __p1) {
69697 float32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
69698 float32x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
69699 float32x4_t __ret;
69700 __ret = __builtin_shufflevector(__rev0, __rev1, 1, 5, 3, 7);
69701 __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
69702 return __ret;
69703 }
69704 #endif
69705
69706 #ifdef __LITTLE_ENDIAN__
69707 __ai int32x4_t vtrn2q_s32(int32x4_t __p0, int32x4_t __p1) {
69708 int32x4_t __ret;
69709 __ret = __builtin_shufflevector(__p0, __p1, 1, 5, 3, 7);
69710 return __ret;
69711 }
69712 #else
69713 __ai int32x4_t vtrn2q_s32(int32x4_t __p0, int32x4_t __p1) {
69714 int32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
69715 int32x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
69716 int32x4_t __ret;
69717 __ret = __builtin_shufflevector(__rev0, __rev1, 1, 5, 3, 7);
69718 __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
69719 return __ret;
69720 }
69721 #endif
69722
69723 #ifdef __LITTLE_ENDIAN__
69724 __ai int64x2_t vtrn2q_s64(int64x2_t __p0, int64x2_t __p1) {
69725 int64x2_t __ret;
69726 __ret = __builtin_shufflevector(__p0, __p1, 1, 3);
69727 return __ret;
69728 }
69729 #else
69730 __ai int64x2_t vtrn2q_s64(int64x2_t __p0, int64x2_t __p1) {
69731 int64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
69732 int64x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
69733 int64x2_t __ret;
69734 __ret = __builtin_shufflevector(__rev0, __rev1, 1, 3);
69735 __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
69736 return __ret;
69737 }
69738 #endif
69739
69740 #ifdef __LITTLE_ENDIAN__
69741 __ai int16x8_t vtrn2q_s16(int16x8_t __p0, int16x8_t __p1) {
69742 int16x8_t __ret;
69743 __ret = __builtin_shufflevector(__p0, __p1, 1, 9, 3, 11, 5, 13, 7, 15);
69744 return __ret;
69745 }
69746 #else
69747 __ai int16x8_t vtrn2q_s16(int16x8_t __p0, int16x8_t __p1) {
69748 int16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
69749 int16x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
69750 int16x8_t __ret;
69751 __ret = __builtin_shufflevector(__rev0, __rev1, 1, 9, 3, 11, 5, 13, 7, 15);
69752 __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
69753 return __ret;
69754 }
69755 #endif
69756
69757 #ifdef __LITTLE_ENDIAN__
69758 __ai uint8x8_t vtrn2_u8(uint8x8_t __p0, uint8x8_t __p1) {
69759 uint8x8_t __ret;
69760 __ret = __builtin_shufflevector(__p0, __p1, 1, 9, 3, 11, 5, 13, 7, 15);
69761 return __ret;
69762 }
69763 #else
69764 __ai uint8x8_t vtrn2_u8(uint8x8_t __p0, uint8x8_t __p1) {
69765 uint8x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
69766 uint8x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
69767 uint8x8_t __ret;
69768 __ret = __builtin_shufflevector(__rev0, __rev1, 1, 9, 3, 11, 5, 13, 7, 15);
69769 __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
69770 return __ret;
69771 }
69772 #endif
69773
69774 #ifdef __LITTLE_ENDIAN__
69775 __ai uint32x2_t vtrn2_u32(uint32x2_t __p0, uint32x2_t __p1) {
69776 uint32x2_t __ret;
69777 __ret = __builtin_shufflevector(__p0, __p1, 1, 3);
69778 return __ret;
69779 }
69780 #else
69781 __ai uint32x2_t vtrn2_u32(uint32x2_t __p0, uint32x2_t __p1) {
69782 uint32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
69783 uint32x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
69784 uint32x2_t __ret;
69785 __ret = __builtin_shufflevector(__rev0, __rev1, 1, 3);
69786 __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
69787 return __ret;
69788 }
69789 #endif
69790
69791 #ifdef __LITTLE_ENDIAN__
69792 __ai uint16x4_t vtrn2_u16(uint16x4_t __p0, uint16x4_t __p1) {
69793 uint16x4_t __ret;
69794 __ret = __builtin_shufflevector(__p0, __p1, 1, 5, 3, 7);
69795 return __ret;
69796 }
69797 #else
69798 __ai uint16x4_t vtrn2_u16(uint16x4_t __p0, uint16x4_t __p1) {
69799 uint16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
69800 uint16x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
69801 uint16x4_t __ret;
69802 __ret = __builtin_shufflevector(__rev0, __rev1, 1, 5, 3, 7);
69803 __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
69804 return __ret;
69805 }
69806 #endif
69807
69808 #ifdef __LITTLE_ENDIAN__
69809 __ai int8x8_t vtrn2_s8(int8x8_t __p0, int8x8_t __p1) {
69810 int8x8_t __ret;
69811 __ret = __builtin_shufflevector(__p0, __p1, 1, 9, 3, 11, 5, 13, 7, 15);
69812 return __ret;
69813 }
69814 #else
69815 __ai int8x8_t vtrn2_s8(int8x8_t __p0, int8x8_t __p1) {
69816 int8x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
69817 int8x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
69818 int8x8_t __ret;
69819 __ret = __builtin_shufflevector(__rev0, __rev1, 1, 9, 3, 11, 5, 13, 7, 15);
69820 __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
69821 return __ret;
69822 }
69823 #endif
69824
69825 #ifdef __LITTLE_ENDIAN__
69826 __ai float32x2_t vtrn2_f32(float32x2_t __p0, float32x2_t __p1) {
69827 float32x2_t __ret;
69828 __ret = __builtin_shufflevector(__p0, __p1, 1, 3);
69829 return __ret;
69830 }
69831 #else
69832 __ai float32x2_t vtrn2_f32(float32x2_t __p0, float32x2_t __p1) {
69833 float32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
69834 float32x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
69835 float32x2_t __ret;
69836 __ret = __builtin_shufflevector(__rev0, __rev1, 1, 3);
69837 __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
69838 return __ret;
69839 }
69840 #endif
69841
69842 #ifdef __LITTLE_ENDIAN__
69843 __ai int32x2_t vtrn2_s32(int32x2_t __p0, int32x2_t __p1) {
69844 int32x2_t __ret;
69845 __ret = __builtin_shufflevector(__p0, __p1, 1, 3);
69846 return __ret;
69847 }
69848 #else
69849 __ai int32x2_t vtrn2_s32(int32x2_t __p0, int32x2_t __p1) {
69850 int32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
69851 int32x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
69852 int32x2_t __ret;
69853 __ret = __builtin_shufflevector(__rev0, __rev1, 1, 3);
69854 __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
69855 return __ret;
69856 }
69857 #endif
69858
69859 #ifdef __LITTLE_ENDIAN__
69860 __ai int16x4_t vtrn2_s16(int16x4_t __p0, int16x4_t __p1) {
69861 int16x4_t __ret;
69862 __ret = __builtin_shufflevector(__p0, __p1, 1, 5, 3, 7);
69863 return __ret;
69864 }
69865 #else
69866 __ai int16x4_t vtrn2_s16(int16x4_t __p0, int16x4_t __p1) {
69867 int16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
69868 int16x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
69869 int16x4_t __ret;
69870 __ret = __builtin_shufflevector(__rev0, __rev1, 1, 5, 3, 7);
69871 __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
69872 return __ret;
69873 }
69874 #endif
69875
69876 #ifdef __LITTLE_ENDIAN__
69877 __ai uint64x1_t vtst_p64(poly64x1_t __p0, poly64x1_t __p1) {
69878 uint64x1_t __ret;
69879 __ret = (uint64x1_t) __builtin_neon_vtst_v((int8x8_t)__p0, (int8x8_t)__p1, 19);
69880 return __ret;
69881 }
69882 #else
69883 __ai uint64x1_t vtst_p64(poly64x1_t __p0, poly64x1_t __p1) {
69884 uint64x1_t __ret;
69885 __ret = (uint64x1_t) __builtin_neon_vtst_v((int8x8_t)__p0, (int8x8_t)__p1, 19);
69886 return __ret;
69887 }
69888 #endif
69889
69890 #ifdef __LITTLE_ENDIAN__
69891 __ai uint64x2_t vtstq_p64(poly64x2_t __p0, poly64x2_t __p1) {
69892 uint64x2_t __ret;
69893 __ret = (uint64x2_t) __builtin_neon_vtstq_v((int8x16_t)__p0, (int8x16_t)__p1, 51);
69894 return __ret;
69895 }
69896 #else
69897 __ai uint64x2_t vtstq_p64(poly64x2_t __p0, poly64x2_t __p1) {
69898 poly64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
69899 poly64x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
69900 uint64x2_t __ret;
69901 __ret = (uint64x2_t) __builtin_neon_vtstq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 51);
69902 __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
69903 return __ret;
69904 }
69905 #endif
69906
69907 #ifdef __LITTLE_ENDIAN__
69908 __ai uint64x2_t vtstq_u64(uint64x2_t __p0, uint64x2_t __p1) {
69909 uint64x2_t __ret;
69910 __ret = (uint64x2_t) __builtin_neon_vtstq_v((int8x16_t)__p0, (int8x16_t)__p1, 51);
69911 return __ret;
69912 }
69913 #else
69914 __ai uint64x2_t vtstq_u64(uint64x2_t __p0, uint64x2_t __p1) {
69915 uint64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
69916 uint64x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
69917 uint64x2_t __ret;
69918 __ret = (uint64x2_t) __builtin_neon_vtstq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 51);
69919 __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
69920 return __ret;
69921 }
69922 #endif
69923
69924 #ifdef __LITTLE_ENDIAN__
69925 __ai uint64x2_t vtstq_s64(int64x2_t __p0, int64x2_t __p1) {
69926 uint64x2_t __ret;
69927 __ret = (uint64x2_t) __builtin_neon_vtstq_v((int8x16_t)__p0, (int8x16_t)__p1, 51);
69928 return __ret;
69929 }
69930 #else
69931 __ai uint64x2_t vtstq_s64(int64x2_t __p0, int64x2_t __p1) {
69932 int64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
69933 int64x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
69934 uint64x2_t __ret;
69935 __ret = (uint64x2_t) __builtin_neon_vtstq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 51);
69936 __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
69937 return __ret;
69938 }
69939 #endif
69940
69941 #ifdef __LITTLE_ENDIAN__
69942 __ai uint64x1_t vtst_u64(uint64x1_t __p0, uint64x1_t __p1) {
69943 uint64x1_t __ret;
69944 __ret = (uint64x1_t) __builtin_neon_vtst_v((int8x8_t)__p0, (int8x8_t)__p1, 19);
69945 return __ret;
69946 }
69947 #else
69948 __ai uint64x1_t vtst_u64(uint64x1_t __p0, uint64x1_t __p1) {
69949 uint64x1_t __ret;
69950 __ret = (uint64x1_t) __builtin_neon_vtst_v((int8x8_t)__p0, (int8x8_t)__p1, 19);
69951 return __ret;
69952 }
69953 #endif
69954
69955 #ifdef __LITTLE_ENDIAN__
69956 __ai uint64x1_t vtst_s64(int64x1_t __p0, int64x1_t __p1) {
69957 uint64x1_t __ret;
69958 __ret = (uint64x1_t) __builtin_neon_vtst_v((int8x8_t)__p0, (int8x8_t)__p1, 19);
69959 return __ret;
69960 }
69961 #else
69962 __ai uint64x1_t vtst_s64(int64x1_t __p0, int64x1_t __p1) {
69963 uint64x1_t __ret;
69964 __ret = (uint64x1_t) __builtin_neon_vtst_v((int8x8_t)__p0, (int8x8_t)__p1, 19);
69965 return __ret;
69966 }
69967 #endif
69968
69969 #ifdef __LITTLE_ENDIAN__
69970 __ai uint64_t vtstd_u64(uint64_t __p0, uint64_t __p1) {
69971 uint64_t __ret;
69972 __ret = (uint64_t) __builtin_neon_vtstd_u64(__p0, __p1);
69973 return __ret;
69974 }
69975 #else
69976 __ai uint64_t vtstd_u64(uint64_t __p0, uint64_t __p1) {
69977 uint64_t __ret;
69978 __ret = (uint64_t) __builtin_neon_vtstd_u64(__p0, __p1);
69979 return __ret;
69980 }
69981 #endif
69982
69983 #ifdef __LITTLE_ENDIAN__
69984 __ai int64_t vtstd_s64(int64_t __p0, int64_t __p1) {
69985 int64_t __ret;
69986 __ret = (int64_t) __builtin_neon_vtstd_s64(__p0, __p1);
69987 return __ret;
69988 }
69989 #else
69990 __ai int64_t vtstd_s64(int64_t __p0, int64_t __p1) {
69991 int64_t __ret;
69992 __ret = (int64_t) __builtin_neon_vtstd_s64(__p0, __p1);
69993 return __ret;
69994 }
69995 #endif
69996
69997 #ifdef __LITTLE_ENDIAN__
69998 __ai int8_t vuqaddb_s8(int8_t __p0, int8_t __p1) {
69999 int8_t __ret;
70000 __ret = (int8_t) __builtin_neon_vuqaddb_s8(__p0, __p1);
70001 return __ret;
70002 }
70003 #else
70004 __ai int8_t vuqaddb_s8(int8_t __p0, int8_t __p1) {
70005 int8_t __ret;
70006 __ret = (int8_t) __builtin_neon_vuqaddb_s8(__p0, __p1);
70007 return __ret;
70008 }
70009 #endif
70010
70011 #ifdef __LITTLE_ENDIAN__
70012 __ai int32_t vuqadds_s32(int32_t __p0, int32_t __p1) {
70013 int32_t __ret;
70014 __ret = (int32_t) __builtin_neon_vuqadds_s32(__p0, __p1);
70015 return __ret;
70016 }
70017 #else
70018 __ai int32_t vuqadds_s32(int32_t __p0, int32_t __p1) {
70019 int32_t __ret;
70020 __ret = (int32_t) __builtin_neon_vuqadds_s32(__p0, __p1);
70021 return __ret;
70022 }
70023 #endif
70024
70025 #ifdef __LITTLE_ENDIAN__
70026 __ai int64_t vuqaddd_s64(int64_t __p0, int64_t __p1) {
70027 int64_t __ret;
70028 __ret = (int64_t) __builtin_neon_vuqaddd_s64(__p0, __p1);
70029 return __ret;
70030 }
70031 #else
70032 __ai int64_t vuqaddd_s64(int64_t __p0, int64_t __p1) {
70033 int64_t __ret;
70034 __ret = (int64_t) __builtin_neon_vuqaddd_s64(__p0, __p1);
70035 return __ret;
70036 }
70037 #endif
70038
70039 #ifdef __LITTLE_ENDIAN__
70040 __ai int16_t vuqaddh_s16(int16_t __p0, int16_t __p1) {
70041 int16_t __ret;
70042 __ret = (int16_t) __builtin_neon_vuqaddh_s16(__p0, __p1);
70043 return __ret;
70044 }
70045 #else
70046 __ai int16_t vuqaddh_s16(int16_t __p0, int16_t __p1) {
70047 int16_t __ret;
70048 __ret = (int16_t) __builtin_neon_vuqaddh_s16(__p0, __p1);
70049 return __ret;
70050 }
70051 #endif
70052
70053 #ifdef __LITTLE_ENDIAN__
70054 __ai int8x16_t vuqaddq_s8(int8x16_t __p0, int8x16_t __p1) {
70055 int8x16_t __ret;
70056 __ret = (int8x16_t) __builtin_neon_vuqaddq_v((int8x16_t)__p0, (int8x16_t)__p1, 32);
70057 return __ret;
70058 }
70059 #else
70060 __ai int8x16_t vuqaddq_s8(int8x16_t __p0, int8x16_t __p1) {
70061 int8x16_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
70062 int8x16_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
70063 int8x16_t __ret;
70064 __ret = (int8x16_t) __builtin_neon_vuqaddq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 32);
70065 __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
70066 return __ret;
70067 }
70068 #endif
70069
70070 #ifdef __LITTLE_ENDIAN__
70071 __ai int32x4_t vuqaddq_s32(int32x4_t __p0, int32x4_t __p1) {
70072 int32x4_t __ret;
70073 __ret = (int32x4_t) __builtin_neon_vuqaddq_v((int8x16_t)__p0, (int8x16_t)__p1, 34);
70074 return __ret;
70075 }
70076 #else
70077 __ai int32x4_t vuqaddq_s32(int32x4_t __p0, int32x4_t __p1) {
70078 int32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
70079 int32x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
70080 int32x4_t __ret;
70081 __ret = (int32x4_t) __builtin_neon_vuqaddq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 34);
70082 __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
70083 return __ret;
70084 }
70085 #endif
70086
70087 #ifdef __LITTLE_ENDIAN__
70088 __ai int64x2_t vuqaddq_s64(int64x2_t __p0, int64x2_t __p1) {
70089 int64x2_t __ret;
70090 __ret = (int64x2_t) __builtin_neon_vuqaddq_v((int8x16_t)__p0, (int8x16_t)__p1, 35);
70091 return __ret;
70092 }
70093 #else
70094 __ai int64x2_t vuqaddq_s64(int64x2_t __p0, int64x2_t __p1) {
70095 int64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
70096 int64x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
70097 int64x2_t __ret;
70098 __ret = (int64x2_t) __builtin_neon_vuqaddq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 35);
70099 __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
70100 return __ret;
70101 }
70102 #endif
70103
70104 #ifdef __LITTLE_ENDIAN__
70105 __ai int16x8_t vuqaddq_s16(int16x8_t __p0, int16x8_t __p1) {
70106 int16x8_t __ret;
70107 __ret = (int16x8_t) __builtin_neon_vuqaddq_v((int8x16_t)__p0, (int8x16_t)__p1, 33);
70108 return __ret;
70109 }
70110 #else
70111 __ai int16x8_t vuqaddq_s16(int16x8_t __p0, int16x8_t __p1) {
70112 int16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
70113 int16x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
70114 int16x8_t __ret;
70115 __ret = (int16x8_t) __builtin_neon_vuqaddq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 33);
70116 __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
70117 return __ret;
70118 }
70119 #endif
70120
70121 #ifdef __LITTLE_ENDIAN__
70122 __ai int8x8_t vuqadd_s8(int8x8_t __p0, int8x8_t __p1) {
70123 int8x8_t __ret;
70124 __ret = (int8x8_t) __builtin_neon_vuqadd_v((int8x8_t)__p0, (int8x8_t)__p1, 0);
70125 return __ret;
70126 }
70127 #else
70128 __ai int8x8_t vuqadd_s8(int8x8_t __p0, int8x8_t __p1) {
70129 int8x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
70130 int8x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
70131 int8x8_t __ret;
70132 __ret = (int8x8_t) __builtin_neon_vuqadd_v((int8x8_t)__rev0, (int8x8_t)__rev1, 0);
70133 __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
70134 return __ret;
70135 }
70136 #endif
70137
70138 #ifdef __LITTLE_ENDIAN__
70139 __ai int32x2_t vuqadd_s32(int32x2_t __p0, int32x2_t __p1) {
70140 int32x2_t __ret;
70141 __ret = (int32x2_t) __builtin_neon_vuqadd_v((int8x8_t)__p0, (int8x8_t)__p1, 2);
70142 return __ret;
70143 }
70144 #else
70145 __ai int32x2_t vuqadd_s32(int32x2_t __p0, int32x2_t __p1) {
70146 int32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
70147 int32x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
70148 int32x2_t __ret;
70149 __ret = (int32x2_t) __builtin_neon_vuqadd_v((int8x8_t)__rev0, (int8x8_t)__rev1, 2);
70150 __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
70151 return __ret;
70152 }
70153 #endif
70154
70155 #ifdef __LITTLE_ENDIAN__
70156 __ai int64x1_t vuqadd_s64(int64x1_t __p0, int64x1_t __p1) {
70157 int64x1_t __ret;
70158 __ret = (int64x1_t) __builtin_neon_vuqadd_v((int8x8_t)__p0, (int8x8_t)__p1, 3);
70159 return __ret;
70160 }
70161 #else
70162 __ai int64x1_t vuqadd_s64(int64x1_t __p0, int64x1_t __p1) {
70163 int64x1_t __ret;
70164 __ret = (int64x1_t) __builtin_neon_vuqadd_v((int8x8_t)__p0, (int8x8_t)__p1, 3);
70165 return __ret;
70166 }
70167 #endif
70168
70169 #ifdef __LITTLE_ENDIAN__
70170 __ai int16x4_t vuqadd_s16(int16x4_t __p0, int16x4_t __p1) {
70171 int16x4_t __ret;
70172 __ret = (int16x4_t) __builtin_neon_vuqadd_v((int8x8_t)__p0, (int8x8_t)__p1, 1);
70173 return __ret;
70174 }
70175 #else
70176 __ai int16x4_t vuqadd_s16(int16x4_t __p0, int16x4_t __p1) {
70177 int16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
70178 int16x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
70179 int16x4_t __ret;
70180 __ret = (int16x4_t) __builtin_neon_vuqadd_v((int8x8_t)__rev0, (int8x8_t)__rev1, 1);
70181 __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
70182 return __ret;
70183 }
70184 #endif
70185
70186 #ifdef __LITTLE_ENDIAN__
70187 __ai poly8x8_t vuzp1_p8(poly8x8_t __p0, poly8x8_t __p1) {
70188 poly8x8_t __ret;
70189 __ret = __builtin_shufflevector(__p0, __p1, 0, 2, 4, 6, 8, 10, 12, 14);
70190 return __ret;
70191 }
70192 #else
70193 __ai poly8x8_t vuzp1_p8(poly8x8_t __p0, poly8x8_t __p1) {
70194 poly8x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
70195 poly8x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
70196 poly8x8_t __ret;
70197 __ret = __builtin_shufflevector(__rev0, __rev1, 0, 2, 4, 6, 8, 10, 12, 14);
70198 __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
70199 return __ret;
70200 }
70201 #endif
70202
70203 #ifdef __LITTLE_ENDIAN__
70204 __ai poly16x4_t vuzp1_p16(poly16x4_t __p0, poly16x4_t __p1) {
70205 poly16x4_t __ret;
70206 __ret = __builtin_shufflevector(__p0, __p1, 0, 2, 4, 6);
70207 return __ret;
70208 }
70209 #else
70210 __ai poly16x4_t vuzp1_p16(poly16x4_t __p0, poly16x4_t __p1) {
70211 poly16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
70212 poly16x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
70213 poly16x4_t __ret;
70214 __ret = __builtin_shufflevector(__rev0, __rev1, 0, 2, 4, 6);
70215 __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
70216 return __ret;
70217 }
70218 #endif
70219
70220 #ifdef __LITTLE_ENDIAN__
70221 __ai poly8x16_t vuzp1q_p8(poly8x16_t __p0, poly8x16_t __p1) {
70222 poly8x16_t __ret;
70223 __ret = __builtin_shufflevector(__p0, __p1, 0, 2, 4, 6, 8, 10, 12, 14, 16, 18, 20, 22, 24, 26, 28, 30);
70224 return __ret;
70225 }
70226 #else
70227 __ai poly8x16_t vuzp1q_p8(poly8x16_t __p0, poly8x16_t __p1) {
70228 poly8x16_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
70229 poly8x16_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
70230 poly8x16_t __ret;
70231 __ret = __builtin_shufflevector(__rev0, __rev1, 0, 2, 4, 6, 8, 10, 12, 14, 16, 18, 20, 22, 24, 26, 28, 30);
70232 __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
70233 return __ret;
70234 }
70235 #endif
70236
70237 #ifdef __LITTLE_ENDIAN__
70238 __ai poly64x2_t vuzp1q_p64(poly64x2_t __p0, poly64x2_t __p1) {
70239 poly64x2_t __ret;
70240 __ret = __builtin_shufflevector(__p0, __p1, 0, 2);
70241 return __ret;
70242 }
70243 #else
70244 __ai poly64x2_t vuzp1q_p64(poly64x2_t __p0, poly64x2_t __p1) {
70245 poly64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
70246 poly64x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
70247 poly64x2_t __ret;
70248 __ret = __builtin_shufflevector(__rev0, __rev1, 0, 2);
70249 __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
70250 return __ret;
70251 }
70252 #endif
70253
70254 #ifdef __LITTLE_ENDIAN__
70255 __ai poly16x8_t vuzp1q_p16(poly16x8_t __p0, poly16x8_t __p1) {
70256 poly16x8_t __ret;
70257 __ret = __builtin_shufflevector(__p0, __p1, 0, 2, 4, 6, 8, 10, 12, 14);
70258 return __ret;
70259 }
70260 #else
70261 __ai poly16x8_t vuzp1q_p16(poly16x8_t __p0, poly16x8_t __p1) {
70262 poly16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
70263 poly16x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
70264 poly16x8_t __ret;
70265 __ret = __builtin_shufflevector(__rev0, __rev1, 0, 2, 4, 6, 8, 10, 12, 14);
70266 __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
70267 return __ret;
70268 }
70269 #endif
70270
70271 #ifdef __LITTLE_ENDIAN__
70272 __ai uint8x16_t vuzp1q_u8(uint8x16_t __p0, uint8x16_t __p1) {
70273 uint8x16_t __ret;
70274 __ret = __builtin_shufflevector(__p0, __p1, 0, 2, 4, 6, 8, 10, 12, 14, 16, 18, 20, 22, 24, 26, 28, 30);
70275 return __ret;
70276 }
70277 #else
70278 __ai uint8x16_t vuzp1q_u8(uint8x16_t __p0, uint8x16_t __p1) {
70279 uint8x16_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
70280 uint8x16_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
70281 uint8x16_t __ret;
70282 __ret = __builtin_shufflevector(__rev0, __rev1, 0, 2, 4, 6, 8, 10, 12, 14, 16, 18, 20, 22, 24, 26, 28, 30);
70283 __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
70284 return __ret;
70285 }
70286 #endif
70287
70288 #ifdef __LITTLE_ENDIAN__
70289 __ai uint32x4_t vuzp1q_u32(uint32x4_t __p0, uint32x4_t __p1) {
70290 uint32x4_t __ret;
70291 __ret = __builtin_shufflevector(__p0, __p1, 0, 2, 4, 6);
70292 return __ret;
70293 }
70294 #else
70295 __ai uint32x4_t vuzp1q_u32(uint32x4_t __p0, uint32x4_t __p1) {
70296 uint32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
70297 uint32x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
70298 uint32x4_t __ret;
70299 __ret = __builtin_shufflevector(__rev0, __rev1, 0, 2, 4, 6);
70300 __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
70301 return __ret;
70302 }
70303 #endif
70304
70305 #ifdef __LITTLE_ENDIAN__
70306 __ai uint64x2_t vuzp1q_u64(uint64x2_t __p0, uint64x2_t __p1) {
70307 uint64x2_t __ret;
70308 __ret = __builtin_shufflevector(__p0, __p1, 0, 2);
70309 return __ret;
70310 }
70311 #else
70312 __ai uint64x2_t vuzp1q_u64(uint64x2_t __p0, uint64x2_t __p1) {
70313 uint64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
70314 uint64x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
70315 uint64x2_t __ret;
70316 __ret = __builtin_shufflevector(__rev0, __rev1, 0, 2);
70317 __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
70318 return __ret;
70319 }
70320 #endif
70321
70322 #ifdef __LITTLE_ENDIAN__
70323 __ai uint16x8_t vuzp1q_u16(uint16x8_t __p0, uint16x8_t __p1) {
70324 uint16x8_t __ret;
70325 __ret = __builtin_shufflevector(__p0, __p1, 0, 2, 4, 6, 8, 10, 12, 14);
70326 return __ret;
70327 }
70328 #else
70329 __ai uint16x8_t vuzp1q_u16(uint16x8_t __p0, uint16x8_t __p1) {
70330 uint16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
70331 uint16x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
70332 uint16x8_t __ret;
70333 __ret = __builtin_shufflevector(__rev0, __rev1, 0, 2, 4, 6, 8, 10, 12, 14);
70334 __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
70335 return __ret;
70336 }
70337 #endif
70338
70339 #ifdef __LITTLE_ENDIAN__
70340 __ai int8x16_t vuzp1q_s8(int8x16_t __p0, int8x16_t __p1) {
70341 int8x16_t __ret;
70342 __ret = __builtin_shufflevector(__p0, __p1, 0, 2, 4, 6, 8, 10, 12, 14, 16, 18, 20, 22, 24, 26, 28, 30);
70343 return __ret;
70344 }
70345 #else
70346 __ai int8x16_t vuzp1q_s8(int8x16_t __p0, int8x16_t __p1) {
70347 int8x16_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
70348 int8x16_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
70349 int8x16_t __ret;
70350 __ret = __builtin_shufflevector(__rev0, __rev1, 0, 2, 4, 6, 8, 10, 12, 14, 16, 18, 20, 22, 24, 26, 28, 30);
70351 __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
70352 return __ret;
70353 }
70354 #endif
70355
70356 #ifdef __LITTLE_ENDIAN__
70357 __ai float64x2_t vuzp1q_f64(float64x2_t __p0, float64x2_t __p1) {
70358 float64x2_t __ret;
70359 __ret = __builtin_shufflevector(__p0, __p1, 0, 2);
70360 return __ret;
70361 }
70362 #else
70363 __ai float64x2_t vuzp1q_f64(float64x2_t __p0, float64x2_t __p1) {
70364 float64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
70365 float64x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
70366 float64x2_t __ret;
70367 __ret = __builtin_shufflevector(__rev0, __rev1, 0, 2);
70368 __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
70369 return __ret;
70370 }
70371 #endif
70372
70373 #ifdef __LITTLE_ENDIAN__
70374 __ai float32x4_t vuzp1q_f32(float32x4_t __p0, float32x4_t __p1) {
70375 float32x4_t __ret;
70376 __ret = __builtin_shufflevector(__p0, __p1, 0, 2, 4, 6);
70377 return __ret;
70378 }
70379 #else
70380 __ai float32x4_t vuzp1q_f32(float32x4_t __p0, float32x4_t __p1) {
70381 float32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
70382 float32x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
70383 float32x4_t __ret;
70384 __ret = __builtin_shufflevector(__rev0, __rev1, 0, 2, 4, 6);
70385 __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
70386 return __ret;
70387 }
70388 #endif
70389
70390 #ifdef __LITTLE_ENDIAN__
70391 __ai int32x4_t vuzp1q_s32(int32x4_t __p0, int32x4_t __p1) {
70392 int32x4_t __ret;
70393 __ret = __builtin_shufflevector(__p0, __p1, 0, 2, 4, 6);
70394 return __ret;
70395 }
70396 #else
70397 __ai int32x4_t vuzp1q_s32(int32x4_t __p0, int32x4_t __p1) {
70398 int32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
70399 int32x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
70400 int32x4_t __ret;
70401 __ret = __builtin_shufflevector(__rev0, __rev1, 0, 2, 4, 6);
70402 __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
70403 return __ret;
70404 }
70405 #endif
70406
70407 #ifdef __LITTLE_ENDIAN__
70408 __ai int64x2_t vuzp1q_s64(int64x2_t __p0, int64x2_t __p1) {
70409 int64x2_t __ret;
70410 __ret = __builtin_shufflevector(__p0, __p1, 0, 2);
70411 return __ret;
70412 }
70413 #else
70414 __ai int64x2_t vuzp1q_s64(int64x2_t __p0, int64x2_t __p1) {
70415 int64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
70416 int64x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
70417 int64x2_t __ret;
70418 __ret = __builtin_shufflevector(__rev0, __rev1, 0, 2);
70419 __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
70420 return __ret;
70421 }
70422 #endif
70423
70424 #ifdef __LITTLE_ENDIAN__
70425 __ai int16x8_t vuzp1q_s16(int16x8_t __p0, int16x8_t __p1) {
70426 int16x8_t __ret;
70427 __ret = __builtin_shufflevector(__p0, __p1, 0, 2, 4, 6, 8, 10, 12, 14);
70428 return __ret;
70429 }
70430 #else
70431 __ai int16x8_t vuzp1q_s16(int16x8_t __p0, int16x8_t __p1) {
70432 int16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
70433 int16x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
70434 int16x8_t __ret;
70435 __ret = __builtin_shufflevector(__rev0, __rev1, 0, 2, 4, 6, 8, 10, 12, 14);
70436 __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
70437 return __ret;
70438 }
70439 #endif
70440
70441 #ifdef __LITTLE_ENDIAN__
70442 __ai uint8x8_t vuzp1_u8(uint8x8_t __p0, uint8x8_t __p1) {
70443 uint8x8_t __ret;
70444 __ret = __builtin_shufflevector(__p0, __p1, 0, 2, 4, 6, 8, 10, 12, 14);
70445 return __ret;
70446 }
70447 #else
70448 __ai uint8x8_t vuzp1_u8(uint8x8_t __p0, uint8x8_t __p1) {
70449 uint8x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
70450 uint8x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
70451 uint8x8_t __ret;
70452 __ret = __builtin_shufflevector(__rev0, __rev1, 0, 2, 4, 6, 8, 10, 12, 14);
70453 __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
70454 return __ret;
70455 }
70456 #endif
70457
70458 #ifdef __LITTLE_ENDIAN__
70459 __ai uint32x2_t vuzp1_u32(uint32x2_t __p0, uint32x2_t __p1) {
70460 uint32x2_t __ret;
70461 __ret = __builtin_shufflevector(__p0, __p1, 0, 2);
70462 return __ret;
70463 }
70464 #else
70465 __ai uint32x2_t vuzp1_u32(uint32x2_t __p0, uint32x2_t __p1) {
70466 uint32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
70467 uint32x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
70468 uint32x2_t __ret;
70469 __ret = __builtin_shufflevector(__rev0, __rev1, 0, 2);
70470 __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
70471 return __ret;
70472 }
70473 #endif
70474
70475 #ifdef __LITTLE_ENDIAN__
70476 __ai uint16x4_t vuzp1_u16(uint16x4_t __p0, uint16x4_t __p1) {
70477 uint16x4_t __ret;
70478 __ret = __builtin_shufflevector(__p0, __p1, 0, 2, 4, 6);
70479 return __ret;
70480 }
70481 #else
70482 __ai uint16x4_t vuzp1_u16(uint16x4_t __p0, uint16x4_t __p1) {
70483 uint16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
70484 uint16x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
70485 uint16x4_t __ret;
70486 __ret = __builtin_shufflevector(__rev0, __rev1, 0, 2, 4, 6);
70487 __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
70488 return __ret;
70489 }
70490 #endif
70491
70492 #ifdef __LITTLE_ENDIAN__
70493 __ai int8x8_t vuzp1_s8(int8x8_t __p0, int8x8_t __p1) {
70494 int8x8_t __ret;
70495 __ret = __builtin_shufflevector(__p0, __p1, 0, 2, 4, 6, 8, 10, 12, 14);
70496 return __ret;
70497 }
70498 #else
70499 __ai int8x8_t vuzp1_s8(int8x8_t __p0, int8x8_t __p1) {
70500 int8x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
70501 int8x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
70502 int8x8_t __ret;
70503 __ret = __builtin_shufflevector(__rev0, __rev1, 0, 2, 4, 6, 8, 10, 12, 14);
70504 __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
70505 return __ret;
70506 }
70507 #endif
70508
70509 #ifdef __LITTLE_ENDIAN__
70510 __ai float32x2_t vuzp1_f32(float32x2_t __p0, float32x2_t __p1) {
70511 float32x2_t __ret;
70512 __ret = __builtin_shufflevector(__p0, __p1, 0, 2);
70513 return __ret;
70514 }
70515 #else
70516 __ai float32x2_t vuzp1_f32(float32x2_t __p0, float32x2_t __p1) {
70517 float32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
70518 float32x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
70519 float32x2_t __ret;
70520 __ret = __builtin_shufflevector(__rev0, __rev1, 0, 2);
70521 __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
70522 return __ret;
70523 }
70524 #endif
70525
70526 #ifdef __LITTLE_ENDIAN__
70527 __ai int32x2_t vuzp1_s32(int32x2_t __p0, int32x2_t __p1) {
70528 int32x2_t __ret;
70529 __ret = __builtin_shufflevector(__p0, __p1, 0, 2);
70530 return __ret;
70531 }
70532 #else
70533 __ai int32x2_t vuzp1_s32(int32x2_t __p0, int32x2_t __p1) {
70534 int32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
70535 int32x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
70536 int32x2_t __ret;
70537 __ret = __builtin_shufflevector(__rev0, __rev1, 0, 2);
70538 __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
70539 return __ret;
70540 }
70541 #endif
70542
70543 #ifdef __LITTLE_ENDIAN__
70544 __ai int16x4_t vuzp1_s16(int16x4_t __p0, int16x4_t __p1) {
70545 int16x4_t __ret;
70546 __ret = __builtin_shufflevector(__p0, __p1, 0, 2, 4, 6);
70547 return __ret;
70548 }
70549 #else
70550 __ai int16x4_t vuzp1_s16(int16x4_t __p0, int16x4_t __p1) {
70551 int16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
70552 int16x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
70553 int16x4_t __ret;
70554 __ret = __builtin_shufflevector(__rev0, __rev1, 0, 2, 4, 6);
70555 __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
70556 return __ret;
70557 }
70558 #endif
70559
70560 #ifdef __LITTLE_ENDIAN__
70561 __ai poly8x8_t vuzp2_p8(poly8x8_t __p0, poly8x8_t __p1) {
70562 poly8x8_t __ret;
70563 __ret = __builtin_shufflevector(__p0, __p1, 1, 3, 5, 7, 9, 11, 13, 15);
70564 return __ret;
70565 }
70566 #else
70567 __ai poly8x8_t vuzp2_p8(poly8x8_t __p0, poly8x8_t __p1) {
70568 poly8x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
70569 poly8x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
70570 poly8x8_t __ret;
70571 __ret = __builtin_shufflevector(__rev0, __rev1, 1, 3, 5, 7, 9, 11, 13, 15);
70572 __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
70573 return __ret;
70574 }
70575 #endif
70576
70577 #ifdef __LITTLE_ENDIAN__
70578 __ai poly16x4_t vuzp2_p16(poly16x4_t __p0, poly16x4_t __p1) {
70579 poly16x4_t __ret;
70580 __ret = __builtin_shufflevector(__p0, __p1, 1, 3, 5, 7);
70581 return __ret;
70582 }
70583 #else
70584 __ai poly16x4_t vuzp2_p16(poly16x4_t __p0, poly16x4_t __p1) {
70585 poly16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
70586 poly16x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
70587 poly16x4_t __ret;
70588 __ret = __builtin_shufflevector(__rev0, __rev1, 1, 3, 5, 7);
70589 __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
70590 return __ret;
70591 }
70592 #endif
70593
70594 #ifdef __LITTLE_ENDIAN__
70595 __ai poly8x16_t vuzp2q_p8(poly8x16_t __p0, poly8x16_t __p1) {
70596 poly8x16_t __ret;
70597 __ret = __builtin_shufflevector(__p0, __p1, 1, 3, 5, 7, 9, 11, 13, 15, 17, 19, 21, 23, 25, 27, 29, 31);
70598 return __ret;
70599 }
70600 #else
70601 __ai poly8x16_t vuzp2q_p8(poly8x16_t __p0, poly8x16_t __p1) {
70602 poly8x16_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
70603 poly8x16_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
70604 poly8x16_t __ret;
70605 __ret = __builtin_shufflevector(__rev0, __rev1, 1, 3, 5, 7, 9, 11, 13, 15, 17, 19, 21, 23, 25, 27, 29, 31);
70606 __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
70607 return __ret;
70608 }
70609 #endif
70610
70611 #ifdef __LITTLE_ENDIAN__
70612 __ai poly64x2_t vuzp2q_p64(poly64x2_t __p0, poly64x2_t __p1) {
70613 poly64x2_t __ret;
70614 __ret = __builtin_shufflevector(__p0, __p1, 1, 3);
70615 return __ret;
70616 }
70617 #else
70618 __ai poly64x2_t vuzp2q_p64(poly64x2_t __p0, poly64x2_t __p1) {
70619 poly64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
70620 poly64x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
70621 poly64x2_t __ret;
70622 __ret = __builtin_shufflevector(__rev0, __rev1, 1, 3);
70623 __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
70624 return __ret;
70625 }
70626 #endif
70627
70628 #ifdef __LITTLE_ENDIAN__
70629 __ai poly16x8_t vuzp2q_p16(poly16x8_t __p0, poly16x8_t __p1) {
70630 poly16x8_t __ret;
70631 __ret = __builtin_shufflevector(__p0, __p1, 1, 3, 5, 7, 9, 11, 13, 15);
70632 return __ret;
70633 }
70634 #else
70635 __ai poly16x8_t vuzp2q_p16(poly16x8_t __p0, poly16x8_t __p1) {
70636 poly16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
70637 poly16x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
70638 poly16x8_t __ret;
70639 __ret = __builtin_shufflevector(__rev0, __rev1, 1, 3, 5, 7, 9, 11, 13, 15);
70640 __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
70641 return __ret;
70642 }
70643 #endif
70644
70645 #ifdef __LITTLE_ENDIAN__
70646 __ai uint8x16_t vuzp2q_u8(uint8x16_t __p0, uint8x16_t __p1) {
70647 uint8x16_t __ret;
70648 __ret = __builtin_shufflevector(__p0, __p1, 1, 3, 5, 7, 9, 11, 13, 15, 17, 19, 21, 23, 25, 27, 29, 31);
70649 return __ret;
70650 }
70651 #else
70652 __ai uint8x16_t vuzp2q_u8(uint8x16_t __p0, uint8x16_t __p1) {
70653 uint8x16_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
70654 uint8x16_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
70655 uint8x16_t __ret;
70656 __ret = __builtin_shufflevector(__rev0, __rev1, 1, 3, 5, 7, 9, 11, 13, 15, 17, 19, 21, 23, 25, 27, 29, 31);
70657 __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
70658 return __ret;
70659 }
70660 #endif
70661
70662 #ifdef __LITTLE_ENDIAN__
70663 __ai uint32x4_t vuzp2q_u32(uint32x4_t __p0, uint32x4_t __p1) {
70664 uint32x4_t __ret;
70665 __ret = __builtin_shufflevector(__p0, __p1, 1, 3, 5, 7);
70666 return __ret;
70667 }
70668 #else
70669 __ai uint32x4_t vuzp2q_u32(uint32x4_t __p0, uint32x4_t __p1) {
70670 uint32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
70671 uint32x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
70672 uint32x4_t __ret;
70673 __ret = __builtin_shufflevector(__rev0, __rev1, 1, 3, 5, 7);
70674 __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
70675 return __ret;
70676 }
70677 #endif
70678
70679 #ifdef __LITTLE_ENDIAN__
70680 __ai uint64x2_t vuzp2q_u64(uint64x2_t __p0, uint64x2_t __p1) {
70681 uint64x2_t __ret;
70682 __ret = __builtin_shufflevector(__p0, __p1, 1, 3);
70683 return __ret;
70684 }
70685 #else
70686 __ai uint64x2_t vuzp2q_u64(uint64x2_t __p0, uint64x2_t __p1) {
70687 uint64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
70688 uint64x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
70689 uint64x2_t __ret;
70690 __ret = __builtin_shufflevector(__rev0, __rev1, 1, 3);
70691 __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
70692 return __ret;
70693 }
70694 #endif
70695
70696 #ifdef __LITTLE_ENDIAN__
70697 __ai uint16x8_t vuzp2q_u16(uint16x8_t __p0, uint16x8_t __p1) {
70698 uint16x8_t __ret;
70699 __ret = __builtin_shufflevector(__p0, __p1, 1, 3, 5, 7, 9, 11, 13, 15);
70700 return __ret;
70701 }
70702 #else
70703 __ai uint16x8_t vuzp2q_u16(uint16x8_t __p0, uint16x8_t __p1) {
70704 uint16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
70705 uint16x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
70706 uint16x8_t __ret;
70707 __ret = __builtin_shufflevector(__rev0, __rev1, 1, 3, 5, 7, 9, 11, 13, 15);
70708 __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
70709 return __ret;
70710 }
70711 #endif
70712
70713 #ifdef __LITTLE_ENDIAN__
70714 __ai int8x16_t vuzp2q_s8(int8x16_t __p0, int8x16_t __p1) {
70715 int8x16_t __ret;
70716 __ret = __builtin_shufflevector(__p0, __p1, 1, 3, 5, 7, 9, 11, 13, 15, 17, 19, 21, 23, 25, 27, 29, 31);
70717 return __ret;
70718 }
70719 #else
70720 __ai int8x16_t vuzp2q_s8(int8x16_t __p0, int8x16_t __p1) {
70721 int8x16_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
70722 int8x16_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
70723 int8x16_t __ret;
70724 __ret = __builtin_shufflevector(__rev0, __rev1, 1, 3, 5, 7, 9, 11, 13, 15, 17, 19, 21, 23, 25, 27, 29, 31);
70725 __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
70726 return __ret;
70727 }
70728 #endif
70729
70730 #ifdef __LITTLE_ENDIAN__
70731 __ai float64x2_t vuzp2q_f64(float64x2_t __p0, float64x2_t __p1) {
70732 float64x2_t __ret;
70733 __ret = __builtin_shufflevector(__p0, __p1, 1, 3);
70734 return __ret;
70735 }
70736 #else
70737 __ai float64x2_t vuzp2q_f64(float64x2_t __p0, float64x2_t __p1) {
70738 float64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
70739 float64x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
70740 float64x2_t __ret;
70741 __ret = __builtin_shufflevector(__rev0, __rev1, 1, 3);
70742 __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
70743 return __ret;
70744 }
70745 #endif
70746
70747 #ifdef __LITTLE_ENDIAN__
70748 __ai float32x4_t vuzp2q_f32(float32x4_t __p0, float32x4_t __p1) {
70749 float32x4_t __ret;
70750 __ret = __builtin_shufflevector(__p0, __p1, 1, 3, 5, 7);
70751 return __ret;
70752 }
70753 #else
70754 __ai float32x4_t vuzp2q_f32(float32x4_t __p0, float32x4_t __p1) {
70755 float32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
70756 float32x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
70757 float32x4_t __ret;
70758 __ret = __builtin_shufflevector(__rev0, __rev1, 1, 3, 5, 7);
70759 __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
70760 return __ret;
70761 }
70762 #endif
70763
70764 #ifdef __LITTLE_ENDIAN__
70765 __ai int32x4_t vuzp2q_s32(int32x4_t __p0, int32x4_t __p1) {
70766 int32x4_t __ret;
70767 __ret = __builtin_shufflevector(__p0, __p1, 1, 3, 5, 7);
70768 return __ret;
70769 }
70770 #else
70771 __ai int32x4_t vuzp2q_s32(int32x4_t __p0, int32x4_t __p1) {
70772 int32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
70773 int32x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
70774 int32x4_t __ret;
70775 __ret = __builtin_shufflevector(__rev0, __rev1, 1, 3, 5, 7);
70776 __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
70777 return __ret;
70778 }
70779 #endif
70780
70781 #ifdef __LITTLE_ENDIAN__
70782 __ai int64x2_t vuzp2q_s64(int64x2_t __p0, int64x2_t __p1) {
70783 int64x2_t __ret;
70784 __ret = __builtin_shufflevector(__p0, __p1, 1, 3);
70785 return __ret;
70786 }
70787 #else
70788 __ai int64x2_t vuzp2q_s64(int64x2_t __p0, int64x2_t __p1) {
70789 int64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
70790 int64x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
70791 int64x2_t __ret;
70792 __ret = __builtin_shufflevector(__rev0, __rev1, 1, 3);
70793 __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
70794 return __ret;
70795 }
70796 #endif
70797
70798 #ifdef __LITTLE_ENDIAN__
70799 __ai int16x8_t vuzp2q_s16(int16x8_t __p0, int16x8_t __p1) {
70800 int16x8_t __ret;
70801 __ret = __builtin_shufflevector(__p0, __p1, 1, 3, 5, 7, 9, 11, 13, 15);
70802 return __ret;
70803 }
70804 #else
70805 __ai int16x8_t vuzp2q_s16(int16x8_t __p0, int16x8_t __p1) {
70806 int16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
70807 int16x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
70808 int16x8_t __ret;
70809 __ret = __builtin_shufflevector(__rev0, __rev1, 1, 3, 5, 7, 9, 11, 13, 15);
70810 __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
70811 return __ret;
70812 }
70813 #endif
70814
70815 #ifdef __LITTLE_ENDIAN__
70816 __ai uint8x8_t vuzp2_u8(uint8x8_t __p0, uint8x8_t __p1) {
70817 uint8x8_t __ret;
70818 __ret = __builtin_shufflevector(__p0, __p1, 1, 3, 5, 7, 9, 11, 13, 15);
70819 return __ret;
70820 }
70821 #else
70822 __ai uint8x8_t vuzp2_u8(uint8x8_t __p0, uint8x8_t __p1) {
70823 uint8x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
70824 uint8x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
70825 uint8x8_t __ret;
70826 __ret = __builtin_shufflevector(__rev0, __rev1, 1, 3, 5, 7, 9, 11, 13, 15);
70827 __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
70828 return __ret;
70829 }
70830 #endif
70831
70832 #ifdef __LITTLE_ENDIAN__
70833 __ai uint32x2_t vuzp2_u32(uint32x2_t __p0, uint32x2_t __p1) {
70834 uint32x2_t __ret;
70835 __ret = __builtin_shufflevector(__p0, __p1, 1, 3);
70836 return __ret;
70837 }
70838 #else
70839 __ai uint32x2_t vuzp2_u32(uint32x2_t __p0, uint32x2_t __p1) {
70840 uint32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
70841 uint32x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
70842 uint32x2_t __ret;
70843 __ret = __builtin_shufflevector(__rev0, __rev1, 1, 3);
70844 __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
70845 return __ret;
70846 }
70847 #endif
70848
70849 #ifdef __LITTLE_ENDIAN__
70850 __ai uint16x4_t vuzp2_u16(uint16x4_t __p0, uint16x4_t __p1) {
70851 uint16x4_t __ret;
70852 __ret = __builtin_shufflevector(__p0, __p1, 1, 3, 5, 7);
70853 return __ret;
70854 }
70855 #else
70856 __ai uint16x4_t vuzp2_u16(uint16x4_t __p0, uint16x4_t __p1) {
70857 uint16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
70858 uint16x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
70859 uint16x4_t __ret;
70860 __ret = __builtin_shufflevector(__rev0, __rev1, 1, 3, 5, 7);
70861 __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
70862 return __ret;
70863 }
70864 #endif
70865
70866 #ifdef __LITTLE_ENDIAN__
70867 __ai int8x8_t vuzp2_s8(int8x8_t __p0, int8x8_t __p1) {
70868 int8x8_t __ret;
70869 __ret = __builtin_shufflevector(__p0, __p1, 1, 3, 5, 7, 9, 11, 13, 15);
70870 return __ret;
70871 }
70872 #else
70873 __ai int8x8_t vuzp2_s8(int8x8_t __p0, int8x8_t __p1) {
70874 int8x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
70875 int8x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
70876 int8x8_t __ret;
70877 __ret = __builtin_shufflevector(__rev0, __rev1, 1, 3, 5, 7, 9, 11, 13, 15);
70878 __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
70879 return __ret;
70880 }
70881 #endif
70882
70883 #ifdef __LITTLE_ENDIAN__
70884 __ai float32x2_t vuzp2_f32(float32x2_t __p0, float32x2_t __p1) {
70885 float32x2_t __ret;
70886 __ret = __builtin_shufflevector(__p0, __p1, 1, 3);
70887 return __ret;
70888 }
70889 #else
70890 __ai float32x2_t vuzp2_f32(float32x2_t __p0, float32x2_t __p1) {
70891 float32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
70892 float32x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
70893 float32x2_t __ret;
70894 __ret = __builtin_shufflevector(__rev0, __rev1, 1, 3);
70895 __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
70896 return __ret;
70897 }
70898 #endif
70899
70900 #ifdef __LITTLE_ENDIAN__
70901 __ai int32x2_t vuzp2_s32(int32x2_t __p0, int32x2_t __p1) {
70902 int32x2_t __ret;
70903 __ret = __builtin_shufflevector(__p0, __p1, 1, 3);
70904 return __ret;
70905 }
70906 #else
70907 __ai int32x2_t vuzp2_s32(int32x2_t __p0, int32x2_t __p1) {
70908 int32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
70909 int32x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
70910 int32x2_t __ret;
70911 __ret = __builtin_shufflevector(__rev0, __rev1, 1, 3);
70912 __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
70913 return __ret;
70914 }
70915 #endif
70916
70917 #ifdef __LITTLE_ENDIAN__
70918 __ai int16x4_t vuzp2_s16(int16x4_t __p0, int16x4_t __p1) {
70919 int16x4_t __ret;
70920 __ret = __builtin_shufflevector(__p0, __p1, 1, 3, 5, 7);
70921 return __ret;
70922 }
70923 #else
70924 __ai int16x4_t vuzp2_s16(int16x4_t __p0, int16x4_t __p1) {
70925 int16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
70926 int16x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
70927 int16x4_t __ret;
70928 __ret = __builtin_shufflevector(__rev0, __rev1, 1, 3, 5, 7);
70929 __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
70930 return __ret;
70931 }
70932 #endif
70933
70934 #ifdef __LITTLE_ENDIAN__
70935 __ai poly8x8_t vzip1_p8(poly8x8_t __p0, poly8x8_t __p1) {
70936 poly8x8_t __ret;
70937 __ret = __builtin_shufflevector(__p0, __p1, 0, 8, 1, 9, 2, 10, 3, 11);
70938 return __ret;
70939 }
70940 #else
70941 __ai poly8x8_t vzip1_p8(poly8x8_t __p0, poly8x8_t __p1) {
70942 poly8x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
70943 poly8x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
70944 poly8x8_t __ret;
70945 __ret = __builtin_shufflevector(__rev0, __rev1, 0, 8, 1, 9, 2, 10, 3, 11);
70946 __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
70947 return __ret;
70948 }
70949 #endif
70950
70951 #ifdef __LITTLE_ENDIAN__
70952 __ai poly16x4_t vzip1_p16(poly16x4_t __p0, poly16x4_t __p1) {
70953 poly16x4_t __ret;
70954 __ret = __builtin_shufflevector(__p0, __p1, 0, 4, 1, 5);
70955 return __ret;
70956 }
70957 #else
70958 __ai poly16x4_t vzip1_p16(poly16x4_t __p0, poly16x4_t __p1) {
70959 poly16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
70960 poly16x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
70961 poly16x4_t __ret;
70962 __ret = __builtin_shufflevector(__rev0, __rev1, 0, 4, 1, 5);
70963 __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
70964 return __ret;
70965 }
70966 #endif
70967
70968 #ifdef __LITTLE_ENDIAN__
70969 __ai poly8x16_t vzip1q_p8(poly8x16_t __p0, poly8x16_t __p1) {
70970 poly8x16_t __ret;
70971 __ret = __builtin_shufflevector(__p0, __p1, 0, 16, 1, 17, 2, 18, 3, 19, 4, 20, 5, 21, 6, 22, 7, 23);
70972 return __ret;
70973 }
70974 #else
70975 __ai poly8x16_t vzip1q_p8(poly8x16_t __p0, poly8x16_t __p1) {
70976 poly8x16_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
70977 poly8x16_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
70978 poly8x16_t __ret;
70979 __ret = __builtin_shufflevector(__rev0, __rev1, 0, 16, 1, 17, 2, 18, 3, 19, 4, 20, 5, 21, 6, 22, 7, 23);
70980 __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
70981 return __ret;
70982 }
70983 #endif
70984
70985 #ifdef __LITTLE_ENDIAN__
70986 __ai poly64x2_t vzip1q_p64(poly64x2_t __p0, poly64x2_t __p1) {
70987 poly64x2_t __ret;
70988 __ret = __builtin_shufflevector(__p0, __p1, 0, 2);
70989 return __ret;
70990 }
70991 #else
70992 __ai poly64x2_t vzip1q_p64(poly64x2_t __p0, poly64x2_t __p1) {
70993 poly64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
70994 poly64x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
70995 poly64x2_t __ret;
70996 __ret = __builtin_shufflevector(__rev0, __rev1, 0, 2);
70997 __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
70998 return __ret;
70999 }
71000 #endif
71001
71002 #ifdef __LITTLE_ENDIAN__
71003 __ai poly16x8_t vzip1q_p16(poly16x8_t __p0, poly16x8_t __p1) {
71004 poly16x8_t __ret;
71005 __ret = __builtin_shufflevector(__p0, __p1, 0, 8, 1, 9, 2, 10, 3, 11);
71006 return __ret;
71007 }
71008 #else
71009 __ai poly16x8_t vzip1q_p16(poly16x8_t __p0, poly16x8_t __p1) {
71010 poly16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
71011 poly16x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
71012 poly16x8_t __ret;
71013 __ret = __builtin_shufflevector(__rev0, __rev1, 0, 8, 1, 9, 2, 10, 3, 11);
71014 __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
71015 return __ret;
71016 }
71017 #endif
71018
71019 #ifdef __LITTLE_ENDIAN__
71020 __ai uint8x16_t vzip1q_u8(uint8x16_t __p0, uint8x16_t __p1) {
71021 uint8x16_t __ret;
71022 __ret = __builtin_shufflevector(__p0, __p1, 0, 16, 1, 17, 2, 18, 3, 19, 4, 20, 5, 21, 6, 22, 7, 23);
71023 return __ret;
71024 }
71025 #else
71026 __ai uint8x16_t vzip1q_u8(uint8x16_t __p0, uint8x16_t __p1) {
71027 uint8x16_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
71028 uint8x16_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
71029 uint8x16_t __ret;
71030 __ret = __builtin_shufflevector(__rev0, __rev1, 0, 16, 1, 17, 2, 18, 3, 19, 4, 20, 5, 21, 6, 22, 7, 23);
71031 __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
71032 return __ret;
71033 }
71034 #endif
71035
71036 #ifdef __LITTLE_ENDIAN__
71037 __ai uint32x4_t vzip1q_u32(uint32x4_t __p0, uint32x4_t __p1) {
71038 uint32x4_t __ret;
71039 __ret = __builtin_shufflevector(__p0, __p1, 0, 4, 1, 5);
71040 return __ret;
71041 }
71042 #else
71043 __ai uint32x4_t vzip1q_u32(uint32x4_t __p0, uint32x4_t __p1) {
71044 uint32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
71045 uint32x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
71046 uint32x4_t __ret;
71047 __ret = __builtin_shufflevector(__rev0, __rev1, 0, 4, 1, 5);
71048 __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
71049 return __ret;
71050 }
71051 #endif
71052
71053 #ifdef __LITTLE_ENDIAN__
71054 __ai uint64x2_t vzip1q_u64(uint64x2_t __p0, uint64x2_t __p1) {
71055 uint64x2_t __ret;
71056 __ret = __builtin_shufflevector(__p0, __p1, 0, 2);
71057 return __ret;
71058 }
71059 #else
71060 __ai uint64x2_t vzip1q_u64(uint64x2_t __p0, uint64x2_t __p1) {
71061 uint64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
71062 uint64x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
71063 uint64x2_t __ret;
71064 __ret = __builtin_shufflevector(__rev0, __rev1, 0, 2);
71065 __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
71066 return __ret;
71067 }
71068 #endif
71069
71070 #ifdef __LITTLE_ENDIAN__
71071 __ai uint16x8_t vzip1q_u16(uint16x8_t __p0, uint16x8_t __p1) {
71072 uint16x8_t __ret;
71073 __ret = __builtin_shufflevector(__p0, __p1, 0, 8, 1, 9, 2, 10, 3, 11);
71074 return __ret;
71075 }
71076 #else
71077 __ai uint16x8_t vzip1q_u16(uint16x8_t __p0, uint16x8_t __p1) {
71078 uint16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
71079 uint16x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
71080 uint16x8_t __ret;
71081 __ret = __builtin_shufflevector(__rev0, __rev1, 0, 8, 1, 9, 2, 10, 3, 11);
71082 __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
71083 return __ret;
71084 }
71085 #endif
71086
71087 #ifdef __LITTLE_ENDIAN__
71088 __ai int8x16_t vzip1q_s8(int8x16_t __p0, int8x16_t __p1) {
71089 int8x16_t __ret;
71090 __ret = __builtin_shufflevector(__p0, __p1, 0, 16, 1, 17, 2, 18, 3, 19, 4, 20, 5, 21, 6, 22, 7, 23);
71091 return __ret;
71092 }
71093 #else
71094 __ai int8x16_t vzip1q_s8(int8x16_t __p0, int8x16_t __p1) {
71095 int8x16_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
71096 int8x16_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
71097 int8x16_t __ret;
71098 __ret = __builtin_shufflevector(__rev0, __rev1, 0, 16, 1, 17, 2, 18, 3, 19, 4, 20, 5, 21, 6, 22, 7, 23);
71099 __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
71100 return __ret;
71101 }
71102 #endif
71103
71104 #ifdef __LITTLE_ENDIAN__
71105 __ai float64x2_t vzip1q_f64(float64x2_t __p0, float64x2_t __p1) {
71106 float64x2_t __ret;
71107 __ret = __builtin_shufflevector(__p0, __p1, 0, 2);
71108 return __ret;
71109 }
71110 #else
71111 __ai float64x2_t vzip1q_f64(float64x2_t __p0, float64x2_t __p1) {
71112 float64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
71113 float64x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
71114 float64x2_t __ret;
71115 __ret = __builtin_shufflevector(__rev0, __rev1, 0, 2);
71116 __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
71117 return __ret;
71118 }
71119 #endif
71120
71121 #ifdef __LITTLE_ENDIAN__
71122 __ai float32x4_t vzip1q_f32(float32x4_t __p0, float32x4_t __p1) {
71123 float32x4_t __ret;
71124 __ret = __builtin_shufflevector(__p0, __p1, 0, 4, 1, 5);
71125 return __ret;
71126 }
71127 #else
71128 __ai float32x4_t vzip1q_f32(float32x4_t __p0, float32x4_t __p1) {
71129 float32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
71130 float32x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
71131 float32x4_t __ret;
71132 __ret = __builtin_shufflevector(__rev0, __rev1, 0, 4, 1, 5);
71133 __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
71134 return __ret;
71135 }
71136 #endif
71137
71138 #ifdef __LITTLE_ENDIAN__
71139 __ai int32x4_t vzip1q_s32(int32x4_t __p0, int32x4_t __p1) {
71140 int32x4_t __ret;
71141 __ret = __builtin_shufflevector(__p0, __p1, 0, 4, 1, 5);
71142 return __ret;
71143 }
71144 #else
71145 __ai int32x4_t vzip1q_s32(int32x4_t __p0, int32x4_t __p1) {
71146 int32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
71147 int32x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
71148 int32x4_t __ret;
71149 __ret = __builtin_shufflevector(__rev0, __rev1, 0, 4, 1, 5);
71150 __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
71151 return __ret;
71152 }
71153 #endif
71154
71155 #ifdef __LITTLE_ENDIAN__
71156 __ai int64x2_t vzip1q_s64(int64x2_t __p0, int64x2_t __p1) {
71157 int64x2_t __ret;
71158 __ret = __builtin_shufflevector(__p0, __p1, 0, 2);
71159 return __ret;
71160 }
71161 #else
71162 __ai int64x2_t vzip1q_s64(int64x2_t __p0, int64x2_t __p1) {
71163 int64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
71164 int64x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
71165 int64x2_t __ret;
71166 __ret = __builtin_shufflevector(__rev0, __rev1, 0, 2);
71167 __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
71168 return __ret;
71169 }
71170 #endif
71171
71172 #ifdef __LITTLE_ENDIAN__
71173 __ai int16x8_t vzip1q_s16(int16x8_t __p0, int16x8_t __p1) {
71174 int16x8_t __ret;
71175 __ret = __builtin_shufflevector(__p0, __p1, 0, 8, 1, 9, 2, 10, 3, 11);
71176 return __ret;
71177 }
71178 #else
71179 __ai int16x8_t vzip1q_s16(int16x8_t __p0, int16x8_t __p1) {
71180 int16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
71181 int16x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
71182 int16x8_t __ret;
71183 __ret = __builtin_shufflevector(__rev0, __rev1, 0, 8, 1, 9, 2, 10, 3, 11);
71184 __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
71185 return __ret;
71186 }
71187 #endif
71188
71189 #ifdef __LITTLE_ENDIAN__
71190 __ai uint8x8_t vzip1_u8(uint8x8_t __p0, uint8x8_t __p1) {
71191 uint8x8_t __ret;
71192 __ret = __builtin_shufflevector(__p0, __p1, 0, 8, 1, 9, 2, 10, 3, 11);
71193 return __ret;
71194 }
71195 #else
71196 __ai uint8x8_t vzip1_u8(uint8x8_t __p0, uint8x8_t __p1) {
71197 uint8x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
71198 uint8x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
71199 uint8x8_t __ret;
71200 __ret = __builtin_shufflevector(__rev0, __rev1, 0, 8, 1, 9, 2, 10, 3, 11);
71201 __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
71202 return __ret;
71203 }
71204 #endif
71205
71206 #ifdef __LITTLE_ENDIAN__
71207 __ai uint32x2_t vzip1_u32(uint32x2_t __p0, uint32x2_t __p1) {
71208 uint32x2_t __ret;
71209 __ret = __builtin_shufflevector(__p0, __p1, 0, 2);
71210 return __ret;
71211 }
71212 #else
71213 __ai uint32x2_t vzip1_u32(uint32x2_t __p0, uint32x2_t __p1) {
71214 uint32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
71215 uint32x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
71216 uint32x2_t __ret;
71217 __ret = __builtin_shufflevector(__rev0, __rev1, 0, 2);
71218 __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
71219 return __ret;
71220 }
71221 #endif
71222
71223 #ifdef __LITTLE_ENDIAN__
71224 __ai uint16x4_t vzip1_u16(uint16x4_t __p0, uint16x4_t __p1) {
71225 uint16x4_t __ret;
71226 __ret = __builtin_shufflevector(__p0, __p1, 0, 4, 1, 5);
71227 return __ret;
71228 }
71229 #else
71230 __ai uint16x4_t vzip1_u16(uint16x4_t __p0, uint16x4_t __p1) {
71231 uint16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
71232 uint16x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
71233 uint16x4_t __ret;
71234 __ret = __builtin_shufflevector(__rev0, __rev1, 0, 4, 1, 5);
71235 __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
71236 return __ret;
71237 }
71238 #endif
71239
71240 #ifdef __LITTLE_ENDIAN__
71241 __ai int8x8_t vzip1_s8(int8x8_t __p0, int8x8_t __p1) {
71242 int8x8_t __ret;
71243 __ret = __builtin_shufflevector(__p0, __p1, 0, 8, 1, 9, 2, 10, 3, 11);
71244 return __ret;
71245 }
71246 #else
71247 __ai int8x8_t vzip1_s8(int8x8_t __p0, int8x8_t __p1) {
71248 int8x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
71249 int8x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
71250 int8x8_t __ret;
71251 __ret = __builtin_shufflevector(__rev0, __rev1, 0, 8, 1, 9, 2, 10, 3, 11);
71252 __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
71253 return __ret;
71254 }
71255 #endif
71256
71257 #ifdef __LITTLE_ENDIAN__
71258 __ai float32x2_t vzip1_f32(float32x2_t __p0, float32x2_t __p1) {
71259 float32x2_t __ret;
71260 __ret = __builtin_shufflevector(__p0, __p1, 0, 2);
71261 return __ret;
71262 }
71263 #else
71264 __ai float32x2_t vzip1_f32(float32x2_t __p0, float32x2_t __p1) {
71265 float32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
71266 float32x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
71267 float32x2_t __ret;
71268 __ret = __builtin_shufflevector(__rev0, __rev1, 0, 2);
71269 __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
71270 return __ret;
71271 }
71272 #endif
71273
71274 #ifdef __LITTLE_ENDIAN__
71275 __ai int32x2_t vzip1_s32(int32x2_t __p0, int32x2_t __p1) {
71276 int32x2_t __ret;
71277 __ret = __builtin_shufflevector(__p0, __p1, 0, 2);
71278 return __ret;
71279 }
71280 #else
71281 __ai int32x2_t vzip1_s32(int32x2_t __p0, int32x2_t __p1) {
71282 int32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
71283 int32x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
71284 int32x2_t __ret;
71285 __ret = __builtin_shufflevector(__rev0, __rev1, 0, 2);
71286 __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
71287 return __ret;
71288 }
71289 #endif
71290
71291 #ifdef __LITTLE_ENDIAN__
71292 __ai int16x4_t vzip1_s16(int16x4_t __p0, int16x4_t __p1) {
71293 int16x4_t __ret;
71294 __ret = __builtin_shufflevector(__p0, __p1, 0, 4, 1, 5);
71295 return __ret;
71296 }
71297 #else
71298 __ai int16x4_t vzip1_s16(int16x4_t __p0, int16x4_t __p1) {
71299 int16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
71300 int16x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
71301 int16x4_t __ret;
71302 __ret = __builtin_shufflevector(__rev0, __rev1, 0, 4, 1, 5);
71303 __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
71304 return __ret;
71305 }
71306 #endif
71307
71308 #ifdef __LITTLE_ENDIAN__
71309 __ai poly8x8_t vzip2_p8(poly8x8_t __p0, poly8x8_t __p1) {
71310 poly8x8_t __ret;
71311 __ret = __builtin_shufflevector(__p0, __p1, 4, 12, 5, 13, 6, 14, 7, 15);
71312 return __ret;
71313 }
71314 #else
71315 __ai poly8x8_t vzip2_p8(poly8x8_t __p0, poly8x8_t __p1) {
71316 poly8x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
71317 poly8x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
71318 poly8x8_t __ret;
71319 __ret = __builtin_shufflevector(__rev0, __rev1, 4, 12, 5, 13, 6, 14, 7, 15);
71320 __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
71321 return __ret;
71322 }
71323 #endif
71324
71325 #ifdef __LITTLE_ENDIAN__
71326 __ai poly16x4_t vzip2_p16(poly16x4_t __p0, poly16x4_t __p1) {
71327 poly16x4_t __ret;
71328 __ret = __builtin_shufflevector(__p0, __p1, 2, 6, 3, 7);
71329 return __ret;
71330 }
71331 #else
71332 __ai poly16x4_t vzip2_p16(poly16x4_t __p0, poly16x4_t __p1) {
71333 poly16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
71334 poly16x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
71335 poly16x4_t __ret;
71336 __ret = __builtin_shufflevector(__rev0, __rev1, 2, 6, 3, 7);
71337 __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
71338 return __ret;
71339 }
71340 #endif
71341
71342 #ifdef __LITTLE_ENDIAN__
71343 __ai poly8x16_t vzip2q_p8(poly8x16_t __p0, poly8x16_t __p1) {
71344 poly8x16_t __ret;
71345 __ret = __builtin_shufflevector(__p0, __p1, 8, 24, 9, 25, 10, 26, 11, 27, 12, 28, 13, 29, 14, 30, 15, 31);
71346 return __ret;
71347 }
71348 #else
71349 __ai poly8x16_t vzip2q_p8(poly8x16_t __p0, poly8x16_t __p1) {
71350 poly8x16_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
71351 poly8x16_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
71352 poly8x16_t __ret;
71353 __ret = __builtin_shufflevector(__rev0, __rev1, 8, 24, 9, 25, 10, 26, 11, 27, 12, 28, 13, 29, 14, 30, 15, 31);
71354 __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
71355 return __ret;
71356 }
71357 #endif
71358
71359 #ifdef __LITTLE_ENDIAN__
71360 __ai poly64x2_t vzip2q_p64(poly64x2_t __p0, poly64x2_t __p1) {
71361 poly64x2_t __ret;
71362 __ret = __builtin_shufflevector(__p0, __p1, 1, 3);
71363 return __ret;
71364 }
71365 #else
71366 __ai poly64x2_t vzip2q_p64(poly64x2_t __p0, poly64x2_t __p1) {
71367 poly64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
71368 poly64x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
71369 poly64x2_t __ret;
71370 __ret = __builtin_shufflevector(__rev0, __rev1, 1, 3);
71371 __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
71372 return __ret;
71373 }
71374 #endif
71375
71376 #ifdef __LITTLE_ENDIAN__
71377 __ai poly16x8_t vzip2q_p16(poly16x8_t __p0, poly16x8_t __p1) {
71378 poly16x8_t __ret;
71379 __ret = __builtin_shufflevector(__p0, __p1, 4, 12, 5, 13, 6, 14, 7, 15);
71380 return __ret;
71381 }
71382 #else
71383 __ai poly16x8_t vzip2q_p16(poly16x8_t __p0, poly16x8_t __p1) {
71384 poly16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
71385 poly16x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
71386 poly16x8_t __ret;
71387 __ret = __builtin_shufflevector(__rev0, __rev1, 4, 12, 5, 13, 6, 14, 7, 15);
71388 __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
71389 return __ret;
71390 }
71391 #endif
71392
71393 #ifdef __LITTLE_ENDIAN__
71394 __ai uint8x16_t vzip2q_u8(uint8x16_t __p0, uint8x16_t __p1) {
71395 uint8x16_t __ret;
71396 __ret = __builtin_shufflevector(__p0, __p1, 8, 24, 9, 25, 10, 26, 11, 27, 12, 28, 13, 29, 14, 30, 15, 31);
71397 return __ret;
71398 }
71399 #else
71400 __ai uint8x16_t vzip2q_u8(uint8x16_t __p0, uint8x16_t __p1) {
71401 uint8x16_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
71402 uint8x16_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
71403 uint8x16_t __ret;
71404 __ret = __builtin_shufflevector(__rev0, __rev1, 8, 24, 9, 25, 10, 26, 11, 27, 12, 28, 13, 29, 14, 30, 15, 31);
71405 __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
71406 return __ret;
71407 }
71408 #endif
71409
71410 #ifdef __LITTLE_ENDIAN__
71411 __ai uint32x4_t vzip2q_u32(uint32x4_t __p0, uint32x4_t __p1) {
71412 uint32x4_t __ret;
71413 __ret = __builtin_shufflevector(__p0, __p1, 2, 6, 3, 7);
71414 return __ret;
71415 }
71416 #else
71417 __ai uint32x4_t vzip2q_u32(uint32x4_t __p0, uint32x4_t __p1) {
71418 uint32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
71419 uint32x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
71420 uint32x4_t __ret;
71421 __ret = __builtin_shufflevector(__rev0, __rev1, 2, 6, 3, 7);
71422 __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
71423 return __ret;
71424 }
71425 #endif
71426
71427 #ifdef __LITTLE_ENDIAN__
71428 __ai uint64x2_t vzip2q_u64(uint64x2_t __p0, uint64x2_t __p1) {
71429 uint64x2_t __ret;
71430 __ret = __builtin_shufflevector(__p0, __p1, 1, 3);
71431 return __ret;
71432 }
71433 #else
71434 __ai uint64x2_t vzip2q_u64(uint64x2_t __p0, uint64x2_t __p1) {
71435 uint64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
71436 uint64x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
71437 uint64x2_t __ret;
71438 __ret = __builtin_shufflevector(__rev0, __rev1, 1, 3);
71439 __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
71440 return __ret;
71441 }
71442 #endif
71443
71444 #ifdef __LITTLE_ENDIAN__
71445 __ai uint16x8_t vzip2q_u16(uint16x8_t __p0, uint16x8_t __p1) {
71446 uint16x8_t __ret;
71447 __ret = __builtin_shufflevector(__p0, __p1, 4, 12, 5, 13, 6, 14, 7, 15);
71448 return __ret;
71449 }
71450 #else
71451 __ai uint16x8_t vzip2q_u16(uint16x8_t __p0, uint16x8_t __p1) {
71452 uint16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
71453 uint16x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
71454 uint16x8_t __ret;
71455 __ret = __builtin_shufflevector(__rev0, __rev1, 4, 12, 5, 13, 6, 14, 7, 15);
71456 __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
71457 return __ret;
71458 }
71459 #endif
71460
71461 #ifdef __LITTLE_ENDIAN__
71462 __ai int8x16_t vzip2q_s8(int8x16_t __p0, int8x16_t __p1) {
71463 int8x16_t __ret;
71464 __ret = __builtin_shufflevector(__p0, __p1, 8, 24, 9, 25, 10, 26, 11, 27, 12, 28, 13, 29, 14, 30, 15, 31);
71465 return __ret;
71466 }
71467 #else
71468 __ai int8x16_t vzip2q_s8(int8x16_t __p0, int8x16_t __p1) {
71469 int8x16_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
71470 int8x16_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
71471 int8x16_t __ret;
71472 __ret = __builtin_shufflevector(__rev0, __rev1, 8, 24, 9, 25, 10, 26, 11, 27, 12, 28, 13, 29, 14, 30, 15, 31);
71473 __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
71474 return __ret;
71475 }
71476 #endif
71477
71478 #ifdef __LITTLE_ENDIAN__
71479 __ai float64x2_t vzip2q_f64(float64x2_t __p0, float64x2_t __p1) {
71480 float64x2_t __ret;
71481 __ret = __builtin_shufflevector(__p0, __p1, 1, 3);
71482 return __ret;
71483 }
71484 #else
71485 __ai float64x2_t vzip2q_f64(float64x2_t __p0, float64x2_t __p1) {
71486 float64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
71487 float64x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
71488 float64x2_t __ret;
71489 __ret = __builtin_shufflevector(__rev0, __rev1, 1, 3);
71490 __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
71491 return __ret;
71492 }
71493 #endif
71494
71495 #ifdef __LITTLE_ENDIAN__
71496 __ai float32x4_t vzip2q_f32(float32x4_t __p0, float32x4_t __p1) {
71497 float32x4_t __ret;
71498 __ret = __builtin_shufflevector(__p0, __p1, 2, 6, 3, 7);
71499 return __ret;
71500 }
71501 #else
71502 __ai float32x4_t vzip2q_f32(float32x4_t __p0, float32x4_t __p1) {
71503 float32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
71504 float32x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
71505 float32x4_t __ret;
71506 __ret = __builtin_shufflevector(__rev0, __rev1, 2, 6, 3, 7);
71507 __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
71508 return __ret;
71509 }
71510 #endif
71511
71512 #ifdef __LITTLE_ENDIAN__
71513 __ai int32x4_t vzip2q_s32(int32x4_t __p0, int32x4_t __p1) {
71514 int32x4_t __ret;
71515 __ret = __builtin_shufflevector(__p0, __p1, 2, 6, 3, 7);
71516 return __ret;
71517 }
71518 #else
71519 __ai int32x4_t vzip2q_s32(int32x4_t __p0, int32x4_t __p1) {
71520 int32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
71521 int32x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
71522 int32x4_t __ret;
71523 __ret = __builtin_shufflevector(__rev0, __rev1, 2, 6, 3, 7);
71524 __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
71525 return __ret;
71526 }
71527 #endif
71528
71529 #ifdef __LITTLE_ENDIAN__
71530 __ai int64x2_t vzip2q_s64(int64x2_t __p0, int64x2_t __p1) {
71531 int64x2_t __ret;
71532 __ret = __builtin_shufflevector(__p0, __p1, 1, 3);
71533 return __ret;
71534 }
71535 #else
71536 __ai int64x2_t vzip2q_s64(int64x2_t __p0, int64x2_t __p1) {
71537 int64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
71538 int64x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
71539 int64x2_t __ret;
71540 __ret = __builtin_shufflevector(__rev0, __rev1, 1, 3);
71541 __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
71542 return __ret;
71543 }
71544 #endif
71545
71546 #ifdef __LITTLE_ENDIAN__
71547 __ai int16x8_t vzip2q_s16(int16x8_t __p0, int16x8_t __p1) {
71548 int16x8_t __ret;
71549 __ret = __builtin_shufflevector(__p0, __p1, 4, 12, 5, 13, 6, 14, 7, 15);
71550 return __ret;
71551 }
71552 #else
71553 __ai int16x8_t vzip2q_s16(int16x8_t __p0, int16x8_t __p1) {
71554 int16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
71555 int16x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
71556 int16x8_t __ret;
71557 __ret = __builtin_shufflevector(__rev0, __rev1, 4, 12, 5, 13, 6, 14, 7, 15);
71558 __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
71559 return __ret;
71560 }
71561 #endif
71562
71563 #ifdef __LITTLE_ENDIAN__
71564 __ai uint8x8_t vzip2_u8(uint8x8_t __p0, uint8x8_t __p1) {
71565 uint8x8_t __ret;
71566 __ret = __builtin_shufflevector(__p0, __p1, 4, 12, 5, 13, 6, 14, 7, 15);
71567 return __ret;
71568 }
71569 #else
71570 __ai uint8x8_t vzip2_u8(uint8x8_t __p0, uint8x8_t __p1) {
71571 uint8x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
71572 uint8x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
71573 uint8x8_t __ret;
71574 __ret = __builtin_shufflevector(__rev0, __rev1, 4, 12, 5, 13, 6, 14, 7, 15);
71575 __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
71576 return __ret;
71577 }
71578 #endif
71579
71580 #ifdef __LITTLE_ENDIAN__
71581 __ai uint32x2_t vzip2_u32(uint32x2_t __p0, uint32x2_t __p1) {
71582 uint32x2_t __ret;
71583 __ret = __builtin_shufflevector(__p0, __p1, 1, 3);
71584 return __ret;
71585 }
71586 #else
71587 __ai uint32x2_t vzip2_u32(uint32x2_t __p0, uint32x2_t __p1) {
71588 uint32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
71589 uint32x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
71590 uint32x2_t __ret;
71591 __ret = __builtin_shufflevector(__rev0, __rev1, 1, 3);
71592 __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
71593 return __ret;
71594 }
71595 #endif
71596
71597 #ifdef __LITTLE_ENDIAN__
71598 __ai uint16x4_t vzip2_u16(uint16x4_t __p0, uint16x4_t __p1) {
71599 uint16x4_t __ret;
71600 __ret = __builtin_shufflevector(__p0, __p1, 2, 6, 3, 7);
71601 return __ret;
71602 }
71603 #else
71604 __ai uint16x4_t vzip2_u16(uint16x4_t __p0, uint16x4_t __p1) {
71605 uint16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
71606 uint16x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
71607 uint16x4_t __ret;
71608 __ret = __builtin_shufflevector(__rev0, __rev1, 2, 6, 3, 7);
71609 __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
71610 return __ret;
71611 }
71612 #endif
71613
71614 #ifdef __LITTLE_ENDIAN__
71615 __ai int8x8_t vzip2_s8(int8x8_t __p0, int8x8_t __p1) {
71616 int8x8_t __ret;
71617 __ret = __builtin_shufflevector(__p0, __p1, 4, 12, 5, 13, 6, 14, 7, 15);
71618 return __ret;
71619 }
71620 #else
71621 __ai int8x8_t vzip2_s8(int8x8_t __p0, int8x8_t __p1) {
71622 int8x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
71623 int8x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
71624 int8x8_t __ret;
71625 __ret = __builtin_shufflevector(__rev0, __rev1, 4, 12, 5, 13, 6, 14, 7, 15);
71626 __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
71627 return __ret;
71628 }
71629 #endif
71630
71631 #ifdef __LITTLE_ENDIAN__
71632 __ai float32x2_t vzip2_f32(float32x2_t __p0, float32x2_t __p1) {
71633 float32x2_t __ret;
71634 __ret = __builtin_shufflevector(__p0, __p1, 1, 3);
71635 return __ret;
71636 }
71637 #else
71638 __ai float32x2_t vzip2_f32(float32x2_t __p0, float32x2_t __p1) {
71639 float32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
71640 float32x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
71641 float32x2_t __ret;
71642 __ret = __builtin_shufflevector(__rev0, __rev1, 1, 3);
71643 __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
71644 return __ret;
71645 }
71646 #endif
71647
71648 #ifdef __LITTLE_ENDIAN__
71649 __ai int32x2_t vzip2_s32(int32x2_t __p0, int32x2_t __p1) {
71650 int32x2_t __ret;
71651 __ret = __builtin_shufflevector(__p0, __p1, 1, 3);
71652 return __ret;
71653 }
71654 #else
71655 __ai int32x2_t vzip2_s32(int32x2_t __p0, int32x2_t __p1) {
71656 int32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
71657 int32x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
71658 int32x2_t __ret;
71659 __ret = __builtin_shufflevector(__rev0, __rev1, 1, 3);
71660 __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
71661 return __ret;
71662 }
71663 #endif
71664
71665 #ifdef __LITTLE_ENDIAN__
71666 __ai int16x4_t vzip2_s16(int16x4_t __p0, int16x4_t __p1) {
71667 int16x4_t __ret;
71668 __ret = __builtin_shufflevector(__p0, __p1, 2, 6, 3, 7);
71669 return __ret;
71670 }
71671 #else
71672 __ai int16x4_t vzip2_s16(int16x4_t __p0, int16x4_t __p1) {
71673 int16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
71674 int16x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
71675 int16x4_t __ret;
71676 __ret = __builtin_shufflevector(__rev0, __rev1, 2, 6, 3, 7);
71677 __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
71678 return __ret;
71679 }
71680 #endif
71681
71682 #endif
71683 #ifdef __LITTLE_ENDIAN__
71684 __ai uint8x16_t vabaq_u8(uint8x16_t __p0, uint8x16_t __p1, uint8x16_t __p2) {
71685 uint8x16_t __ret;
71686 __ret = __p0 + vabdq_u8(__p1, __p2);
71687 return __ret;
71688 }
71689 #else
71690 __ai uint8x16_t vabaq_u8(uint8x16_t __p0, uint8x16_t __p1, uint8x16_t __p2) {
71691 uint8x16_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
71692 uint8x16_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
71693 uint8x16_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
71694 uint8x16_t __ret;
71695 __ret = __rev0 + __noswap_vabdq_u8(__rev1, __rev2);
71696 __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
71697 return __ret;
71698 }
71699 #endif
71700
71701 #ifdef __LITTLE_ENDIAN__
71702 __ai uint32x4_t vabaq_u32(uint32x4_t __p0, uint32x4_t __p1, uint32x4_t __p2) {
71703 uint32x4_t __ret;
71704 __ret = __p0 + vabdq_u32(__p1, __p2);
71705 return __ret;
71706 }
71707 #else
71708 __ai uint32x4_t vabaq_u32(uint32x4_t __p0, uint32x4_t __p1, uint32x4_t __p2) {
71709 uint32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
71710 uint32x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
71711 uint32x4_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, 3, 2, 1, 0);
71712 uint32x4_t __ret;
71713 __ret = __rev0 + __noswap_vabdq_u32(__rev1, __rev2);
71714 __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
71715 return __ret;
71716 }
71717 #endif
71718
71719 #ifdef __LITTLE_ENDIAN__
71720 __ai uint16x8_t vabaq_u16(uint16x8_t __p0, uint16x8_t __p1, uint16x8_t __p2) {
71721 uint16x8_t __ret;
71722 __ret = __p0 + vabdq_u16(__p1, __p2);
71723 return __ret;
71724 }
71725 #else
71726 __ai uint16x8_t vabaq_u16(uint16x8_t __p0, uint16x8_t __p1, uint16x8_t __p2) {
71727 uint16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
71728 uint16x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
71729 uint16x8_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, 7, 6, 5, 4, 3, 2, 1, 0);
71730 uint16x8_t __ret;
71731 __ret = __rev0 + __noswap_vabdq_u16(__rev1, __rev2);
71732 __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
71733 return __ret;
71734 }
71735 #endif
71736
71737 #ifdef __LITTLE_ENDIAN__
71738 __ai int8x16_t vabaq_s8(int8x16_t __p0, int8x16_t __p1, int8x16_t __p2) {
71739 int8x16_t __ret;
71740 __ret = __p0 + vabdq_s8(__p1, __p2);
71741 return __ret;
71742 }
71743 #else
71744 __ai int8x16_t vabaq_s8(int8x16_t __p0, int8x16_t __p1, int8x16_t __p2) {
71745 int8x16_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
71746 int8x16_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
71747 int8x16_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
71748 int8x16_t __ret;
71749 __ret = __rev0 + __noswap_vabdq_s8(__rev1, __rev2);
71750 __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
71751 return __ret;
71752 }
71753 #endif
71754
71755 #ifdef __LITTLE_ENDIAN__
71756 __ai int32x4_t vabaq_s32(int32x4_t __p0, int32x4_t __p1, int32x4_t __p2) {
71757 int32x4_t __ret;
71758 __ret = __p0 + vabdq_s32(__p1, __p2);
71759 return __ret;
71760 }
71761 #else
71762 __ai int32x4_t vabaq_s32(int32x4_t __p0, int32x4_t __p1, int32x4_t __p2) {
71763 int32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
71764 int32x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
71765 int32x4_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, 3, 2, 1, 0);
71766 int32x4_t __ret;
71767 __ret = __rev0 + __noswap_vabdq_s32(__rev1, __rev2);
71768 __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
71769 return __ret;
71770 }
71771 #endif
71772
71773 #ifdef __LITTLE_ENDIAN__
71774 __ai int16x8_t vabaq_s16(int16x8_t __p0, int16x8_t __p1, int16x8_t __p2) {
71775 int16x8_t __ret;
71776 __ret = __p0 + vabdq_s16(__p1, __p2);
71777 return __ret;
71778 }
71779 #else
71780 __ai int16x8_t vabaq_s16(int16x8_t __p0, int16x8_t __p1, int16x8_t __p2) {
71781 int16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
71782 int16x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
71783 int16x8_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, 7, 6, 5, 4, 3, 2, 1, 0);
71784 int16x8_t __ret;
71785 __ret = __rev0 + __noswap_vabdq_s16(__rev1, __rev2);
71786 __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
71787 return __ret;
71788 }
71789 #endif
71790
71791 #ifdef __LITTLE_ENDIAN__
71792 __ai uint8x8_t vaba_u8(uint8x8_t __p0, uint8x8_t __p1, uint8x8_t __p2) {
71793 uint8x8_t __ret;
71794 __ret = __p0 + vabd_u8(__p1, __p2);
71795 return __ret;
71796 }
71797 #else
71798 __ai uint8x8_t vaba_u8(uint8x8_t __p0, uint8x8_t __p1, uint8x8_t __p2) {
71799 uint8x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
71800 uint8x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
71801 uint8x8_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, 7, 6, 5, 4, 3, 2, 1, 0);
71802 uint8x8_t __ret;
71803 __ret = __rev0 + __noswap_vabd_u8(__rev1, __rev2);
71804 __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
71805 return __ret;
71806 }
71807 #endif
71808
71809 #ifdef __LITTLE_ENDIAN__
71810 __ai uint32x2_t vaba_u32(uint32x2_t __p0, uint32x2_t __p1, uint32x2_t __p2) {
71811 uint32x2_t __ret;
71812 __ret = __p0 + vabd_u32(__p1, __p2);
71813 return __ret;
71814 }
71815 #else
71816 __ai uint32x2_t vaba_u32(uint32x2_t __p0, uint32x2_t __p1, uint32x2_t __p2) {
71817 uint32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
71818 uint32x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
71819 uint32x2_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, 1, 0);
71820 uint32x2_t __ret;
71821 __ret = __rev0 + __noswap_vabd_u32(__rev1, __rev2);
71822 __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
71823 return __ret;
71824 }
71825 #endif
71826
71827 #ifdef __LITTLE_ENDIAN__
71828 __ai uint16x4_t vaba_u16(uint16x4_t __p0, uint16x4_t __p1, uint16x4_t __p2) {
71829 uint16x4_t __ret;
71830 __ret = __p0 + vabd_u16(__p1, __p2);
71831 return __ret;
71832 }
71833 #else
71834 __ai uint16x4_t vaba_u16(uint16x4_t __p0, uint16x4_t __p1, uint16x4_t __p2) {
71835 uint16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
71836 uint16x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
71837 uint16x4_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, 3, 2, 1, 0);
71838 uint16x4_t __ret;
71839 __ret = __rev0 + __noswap_vabd_u16(__rev1, __rev2);
71840 __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
71841 return __ret;
71842 }
71843 #endif
71844
71845 #ifdef __LITTLE_ENDIAN__
71846 __ai int8x8_t vaba_s8(int8x8_t __p0, int8x8_t __p1, int8x8_t __p2) {
71847 int8x8_t __ret;
71848 __ret = __p0 + vabd_s8(__p1, __p2);
71849 return __ret;
71850 }
71851 #else
71852 __ai int8x8_t vaba_s8(int8x8_t __p0, int8x8_t __p1, int8x8_t __p2) {
71853 int8x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
71854 int8x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
71855 int8x8_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, 7, 6, 5, 4, 3, 2, 1, 0);
71856 int8x8_t __ret;
71857 __ret = __rev0 + __noswap_vabd_s8(__rev1, __rev2);
71858 __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
71859 return __ret;
71860 }
71861 #endif
71862
71863 #ifdef __LITTLE_ENDIAN__
71864 __ai int32x2_t vaba_s32(int32x2_t __p0, int32x2_t __p1, int32x2_t __p2) {
71865 int32x2_t __ret;
71866 __ret = __p0 + vabd_s32(__p1, __p2);
71867 return __ret;
71868 }
71869 #else
71870 __ai int32x2_t vaba_s32(int32x2_t __p0, int32x2_t __p1, int32x2_t __p2) {
71871 int32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
71872 int32x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
71873 int32x2_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, 1, 0);
71874 int32x2_t __ret;
71875 __ret = __rev0 + __noswap_vabd_s32(__rev1, __rev2);
71876 __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
71877 return __ret;
71878 }
71879 #endif
71880
71881 #ifdef __LITTLE_ENDIAN__
71882 __ai int16x4_t vaba_s16(int16x4_t __p0, int16x4_t __p1, int16x4_t __p2) {
71883 int16x4_t __ret;
71884 __ret = __p0 + vabd_s16(__p1, __p2);
71885 return __ret;
71886 }
71887 #else
71888 __ai int16x4_t vaba_s16(int16x4_t __p0, int16x4_t __p1, int16x4_t __p2) {
71889 int16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
71890 int16x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
71891 int16x4_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, 3, 2, 1, 0);
71892 int16x4_t __ret;
71893 __ret = __rev0 + __noswap_vabd_s16(__rev1, __rev2);
71894 __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
71895 return __ret;
71896 }
71897 #endif
71898
71899 #ifdef __LITTLE_ENDIAN__
71900 __ai uint16x8_t vabdl_u8(uint8x8_t __p0, uint8x8_t __p1) {
71901 uint16x8_t __ret;
71902 __ret = (uint16x8_t)(vmovl_u8((uint8x8_t)(vabd_u8(__p0, __p1))));
71903 return __ret;
71904 }
71905 #else
71906 __ai uint16x8_t vabdl_u8(uint8x8_t __p0, uint8x8_t __p1) {
71907 uint8x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
71908 uint8x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
71909 uint16x8_t __ret;
71910 __ret = (uint16x8_t)(__noswap_vmovl_u8((uint8x8_t)(__noswap_vabd_u8(__rev0, __rev1))));
71911 __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
71912 return __ret;
71913 }
71914 __ai uint16x8_t __noswap_vabdl_u8(uint8x8_t __p0, uint8x8_t __p1) {
71915 uint16x8_t __ret;
71916 __ret = (uint16x8_t)(__noswap_vmovl_u8((uint8x8_t)(__noswap_vabd_u8(__p0, __p1))));
71917 return __ret;
71918 }
71919 #endif
71920
71921 #ifdef __LITTLE_ENDIAN__
71922 __ai uint64x2_t vabdl_u32(uint32x2_t __p0, uint32x2_t __p1) {
71923 uint64x2_t __ret;
71924 __ret = (uint64x2_t)(vmovl_u32((uint32x2_t)(vabd_u32(__p0, __p1))));
71925 return __ret;
71926 }
71927 #else
71928 __ai uint64x2_t vabdl_u32(uint32x2_t __p0, uint32x2_t __p1) {
71929 uint32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
71930 uint32x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
71931 uint64x2_t __ret;
71932 __ret = (uint64x2_t)(__noswap_vmovl_u32((uint32x2_t)(__noswap_vabd_u32(__rev0, __rev1))));
71933 __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
71934 return __ret;
71935 }
71936 __ai uint64x2_t __noswap_vabdl_u32(uint32x2_t __p0, uint32x2_t __p1) {
71937 uint64x2_t __ret;
71938 __ret = (uint64x2_t)(__noswap_vmovl_u32((uint32x2_t)(__noswap_vabd_u32(__p0, __p1))));
71939 return __ret;
71940 }
71941 #endif
71942
71943 #ifdef __LITTLE_ENDIAN__
71944 __ai uint32x4_t vabdl_u16(uint16x4_t __p0, uint16x4_t __p1) {
71945 uint32x4_t __ret;
71946 __ret = (uint32x4_t)(vmovl_u16((uint16x4_t)(vabd_u16(__p0, __p1))));
71947 return __ret;
71948 }
71949 #else
71950 __ai uint32x4_t vabdl_u16(uint16x4_t __p0, uint16x4_t __p1) {
71951 uint16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
71952 uint16x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
71953 uint32x4_t __ret;
71954 __ret = (uint32x4_t)(__noswap_vmovl_u16((uint16x4_t)(__noswap_vabd_u16(__rev0, __rev1))));
71955 __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
71956 return __ret;
71957 }
71958 __ai uint32x4_t __noswap_vabdl_u16(uint16x4_t __p0, uint16x4_t __p1) {
71959 uint32x4_t __ret;
71960 __ret = (uint32x4_t)(__noswap_vmovl_u16((uint16x4_t)(__noswap_vabd_u16(__p0, __p1))));
71961 return __ret;
71962 }
71963 #endif
71964
71965 #ifdef __LITTLE_ENDIAN__
71966 __ai int16x8_t vabdl_s8(int8x8_t __p0, int8x8_t __p1) {
71967 int16x8_t __ret;
71968 __ret = (int16x8_t)(vmovl_u8((uint8x8_t)(vabd_s8(__p0, __p1))));
71969 return __ret;
71970 }
71971 #else
71972 __ai int16x8_t vabdl_s8(int8x8_t __p0, int8x8_t __p1) {
71973 int8x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
71974 int8x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
71975 int16x8_t __ret;
71976 __ret = (int16x8_t)(__noswap_vmovl_u8((uint8x8_t)(__noswap_vabd_s8(__rev0, __rev1))));
71977 __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
71978 return __ret;
71979 }
71980 __ai int16x8_t __noswap_vabdl_s8(int8x8_t __p0, int8x8_t __p1) {
71981 int16x8_t __ret;
71982 __ret = (int16x8_t)(__noswap_vmovl_u8((uint8x8_t)(__noswap_vabd_s8(__p0, __p1))));
71983 return __ret;
71984 }
71985 #endif
71986
71987 #ifdef __LITTLE_ENDIAN__
71988 __ai int64x2_t vabdl_s32(int32x2_t __p0, int32x2_t __p1) {
71989 int64x2_t __ret;
71990 __ret = (int64x2_t)(vmovl_u32((uint32x2_t)(vabd_s32(__p0, __p1))));
71991 return __ret;
71992 }
71993 #else
71994 __ai int64x2_t vabdl_s32(int32x2_t __p0, int32x2_t __p1) {
71995 int32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
71996 int32x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
71997 int64x2_t __ret;
71998 __ret = (int64x2_t)(__noswap_vmovl_u32((uint32x2_t)(__noswap_vabd_s32(__rev0, __rev1))));
71999 __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
72000 return __ret;
72001 }
72002 __ai int64x2_t __noswap_vabdl_s32(int32x2_t __p0, int32x2_t __p1) {
72003 int64x2_t __ret;
72004 __ret = (int64x2_t)(__noswap_vmovl_u32((uint32x2_t)(__noswap_vabd_s32(__p0, __p1))));
72005 return __ret;
72006 }
72007 #endif
72008
72009 #ifdef __LITTLE_ENDIAN__
72010 __ai int32x4_t vabdl_s16(int16x4_t __p0, int16x4_t __p1) {
72011 int32x4_t __ret;
72012 __ret = (int32x4_t)(vmovl_u16((uint16x4_t)(vabd_s16(__p0, __p1))));
72013 return __ret;
72014 }
72015 #else
72016 __ai int32x4_t vabdl_s16(int16x4_t __p0, int16x4_t __p1) {
72017 int16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
72018 int16x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
72019 int32x4_t __ret;
72020 __ret = (int32x4_t)(__noswap_vmovl_u16((uint16x4_t)(__noswap_vabd_s16(__rev0, __rev1))));
72021 __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
72022 return __ret;
72023 }
72024 __ai int32x4_t __noswap_vabdl_s16(int16x4_t __p0, int16x4_t __p1) {
72025 int32x4_t __ret;
72026 __ret = (int32x4_t)(__noswap_vmovl_u16((uint16x4_t)(__noswap_vabd_s16(__p0, __p1))));
72027 return __ret;
72028 }
72029 #endif
72030
72031 #ifdef __LITTLE_ENDIAN__
72032 __ai uint16x8_t vaddl_u8(uint8x8_t __p0, uint8x8_t __p1) {
72033 uint16x8_t __ret;
72034 __ret = vmovl_u8(__p0) + vmovl_u8(__p1);
72035 return __ret;
72036 }
72037 #else
72038 __ai uint16x8_t vaddl_u8(uint8x8_t __p0, uint8x8_t __p1) {
72039 uint8x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
72040 uint8x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
72041 uint16x8_t __ret;
72042 __ret = __noswap_vmovl_u8(__rev0) + __noswap_vmovl_u8(__rev1);
72043 __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
72044 return __ret;
72045 }
72046 #endif
72047
72048 #ifdef __LITTLE_ENDIAN__
72049 __ai uint64x2_t vaddl_u32(uint32x2_t __p0, uint32x2_t __p1) {
72050 uint64x2_t __ret;
72051 __ret = vmovl_u32(__p0) + vmovl_u32(__p1);
72052 return __ret;
72053 }
72054 #else
72055 __ai uint64x2_t vaddl_u32(uint32x2_t __p0, uint32x2_t __p1) {
72056 uint32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
72057 uint32x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
72058 uint64x2_t __ret;
72059 __ret = __noswap_vmovl_u32(__rev0) + __noswap_vmovl_u32(__rev1);
72060 __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
72061 return __ret;
72062 }
72063 #endif
72064
72065 #ifdef __LITTLE_ENDIAN__
72066 __ai uint32x4_t vaddl_u16(uint16x4_t __p0, uint16x4_t __p1) {
72067 uint32x4_t __ret;
72068 __ret = vmovl_u16(__p0) + vmovl_u16(__p1);
72069 return __ret;
72070 }
72071 #else
72072 __ai uint32x4_t vaddl_u16(uint16x4_t __p0, uint16x4_t __p1) {
72073 uint16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
72074 uint16x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
72075 uint32x4_t __ret;
72076 __ret = __noswap_vmovl_u16(__rev0) + __noswap_vmovl_u16(__rev1);
72077 __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
72078 return __ret;
72079 }
72080 #endif
72081
72082 #ifdef __LITTLE_ENDIAN__
72083 __ai int16x8_t vaddl_s8(int8x8_t __p0, int8x8_t __p1) {
72084 int16x8_t __ret;
72085 __ret = vmovl_s8(__p0) + vmovl_s8(__p1);
72086 return __ret;
72087 }
72088 #else
72089 __ai int16x8_t vaddl_s8(int8x8_t __p0, int8x8_t __p1) {
72090 int8x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
72091 int8x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
72092 int16x8_t __ret;
72093 __ret = __noswap_vmovl_s8(__rev0) + __noswap_vmovl_s8(__rev1);
72094 __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
72095 return __ret;
72096 }
72097 #endif
72098
72099 #ifdef __LITTLE_ENDIAN__
72100 __ai int64x2_t vaddl_s32(int32x2_t __p0, int32x2_t __p1) {
72101 int64x2_t __ret;
72102 __ret = vmovl_s32(__p0) + vmovl_s32(__p1);
72103 return __ret;
72104 }
72105 #else
72106 __ai int64x2_t vaddl_s32(int32x2_t __p0, int32x2_t __p1) {
72107 int32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
72108 int32x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
72109 int64x2_t __ret;
72110 __ret = __noswap_vmovl_s32(__rev0) + __noswap_vmovl_s32(__rev1);
72111 __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
72112 return __ret;
72113 }
72114 #endif
72115
72116 #ifdef __LITTLE_ENDIAN__
72117 __ai int32x4_t vaddl_s16(int16x4_t __p0, int16x4_t __p1) {
72118 int32x4_t __ret;
72119 __ret = vmovl_s16(__p0) + vmovl_s16(__p1);
72120 return __ret;
72121 }
72122 #else
72123 __ai int32x4_t vaddl_s16(int16x4_t __p0, int16x4_t __p1) {
72124 int16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
72125 int16x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
72126 int32x4_t __ret;
72127 __ret = __noswap_vmovl_s16(__rev0) + __noswap_vmovl_s16(__rev1);
72128 __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
72129 return __ret;
72130 }
72131 #endif
72132
72133 #ifdef __LITTLE_ENDIAN__
72134 __ai uint16x8_t vaddw_u8(uint16x8_t __p0, uint8x8_t __p1) {
72135 uint16x8_t __ret;
72136 __ret = __p0 + vmovl_u8(__p1);
72137 return __ret;
72138 }
72139 #else
72140 __ai uint16x8_t vaddw_u8(uint16x8_t __p0, uint8x8_t __p1) {
72141 uint16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
72142 uint8x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
72143 uint16x8_t __ret;
72144 __ret = __rev0 + __noswap_vmovl_u8(__rev1);
72145 __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
72146 return __ret;
72147 }
72148 #endif
72149
72150 #ifdef __LITTLE_ENDIAN__
72151 __ai uint64x2_t vaddw_u32(uint64x2_t __p0, uint32x2_t __p1) {
72152 uint64x2_t __ret;
72153 __ret = __p0 + vmovl_u32(__p1);
72154 return __ret;
72155 }
72156 #else
72157 __ai uint64x2_t vaddw_u32(uint64x2_t __p0, uint32x2_t __p1) {
72158 uint64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
72159 uint32x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
72160 uint64x2_t __ret;
72161 __ret = __rev0 + __noswap_vmovl_u32(__rev1);
72162 __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
72163 return __ret;
72164 }
72165 #endif
72166
72167 #ifdef __LITTLE_ENDIAN__
72168 __ai uint32x4_t vaddw_u16(uint32x4_t __p0, uint16x4_t __p1) {
72169 uint32x4_t __ret;
72170 __ret = __p0 + vmovl_u16(__p1);
72171 return __ret;
72172 }
72173 #else
72174 __ai uint32x4_t vaddw_u16(uint32x4_t __p0, uint16x4_t __p1) {
72175 uint32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
72176 uint16x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
72177 uint32x4_t __ret;
72178 __ret = __rev0 + __noswap_vmovl_u16(__rev1);
72179 __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
72180 return __ret;
72181 }
72182 #endif
72183
72184 #ifdef __LITTLE_ENDIAN__
72185 __ai int16x8_t vaddw_s8(int16x8_t __p0, int8x8_t __p1) {
72186 int16x8_t __ret;
72187 __ret = __p0 + vmovl_s8(__p1);
72188 return __ret;
72189 }
72190 #else
72191 __ai int16x8_t vaddw_s8(int16x8_t __p0, int8x8_t __p1) {
72192 int16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
72193 int8x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
72194 int16x8_t __ret;
72195 __ret = __rev0 + __noswap_vmovl_s8(__rev1);
72196 __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
72197 return __ret;
72198 }
72199 #endif
72200
72201 #ifdef __LITTLE_ENDIAN__
72202 __ai int64x2_t vaddw_s32(int64x2_t __p0, int32x2_t __p1) {
72203 int64x2_t __ret;
72204 __ret = __p0 + vmovl_s32(__p1);
72205 return __ret;
72206 }
72207 #else
72208 __ai int64x2_t vaddw_s32(int64x2_t __p0, int32x2_t __p1) {
72209 int64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
72210 int32x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
72211 int64x2_t __ret;
72212 __ret = __rev0 + __noswap_vmovl_s32(__rev1);
72213 __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
72214 return __ret;
72215 }
72216 #endif
72217
72218 #ifdef __LITTLE_ENDIAN__
72219 __ai int32x4_t vaddw_s16(int32x4_t __p0, int16x4_t __p1) {
72220 int32x4_t __ret;
72221 __ret = __p0 + vmovl_s16(__p1);
72222 return __ret;
72223 }
72224 #else
72225 __ai int32x4_t vaddw_s16(int32x4_t __p0, int16x4_t __p1) {
72226 int32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
72227 int16x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
72228 int32x4_t __ret;
72229 __ret = __rev0 + __noswap_vmovl_s16(__rev1);
72230 __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
72231 return __ret;
72232 }
72233 #endif
72234
72235 #ifdef __LITTLE_ENDIAN__
72236 __ai uint16x8_t vmlal_u8(uint16x8_t __p0, uint8x8_t __p1, uint8x8_t __p2) {
72237 uint16x8_t __ret;
72238 __ret = __p0 + vmull_u8(__p1, __p2);
72239 return __ret;
72240 }
72241 #else
72242 __ai uint16x8_t vmlal_u8(uint16x8_t __p0, uint8x8_t __p1, uint8x8_t __p2) {
72243 uint16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
72244 uint8x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
72245 uint8x8_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, 7, 6, 5, 4, 3, 2, 1, 0);
72246 uint16x8_t __ret;
72247 __ret = __rev0 + __noswap_vmull_u8(__rev1, __rev2);
72248 __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
72249 return __ret;
72250 }
72251 __ai uint16x8_t __noswap_vmlal_u8(uint16x8_t __p0, uint8x8_t __p1, uint8x8_t __p2) {
72252 uint16x8_t __ret;
72253 __ret = __p0 + __noswap_vmull_u8(__p1, __p2);
72254 return __ret;
72255 }
72256 #endif
72257
72258 #ifdef __LITTLE_ENDIAN__
72259 __ai uint64x2_t vmlal_u32(uint64x2_t __p0, uint32x2_t __p1, uint32x2_t __p2) {
72260 uint64x2_t __ret;
72261 __ret = __p0 + vmull_u32(__p1, __p2);
72262 return __ret;
72263 }
72264 #else
72265 __ai uint64x2_t vmlal_u32(uint64x2_t __p0, uint32x2_t __p1, uint32x2_t __p2) {
72266 uint64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
72267 uint32x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
72268 uint32x2_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, 1, 0);
72269 uint64x2_t __ret;
72270 __ret = __rev0 + __noswap_vmull_u32(__rev1, __rev2);
72271 __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
72272 return __ret;
72273 }
72274 __ai uint64x2_t __noswap_vmlal_u32(uint64x2_t __p0, uint32x2_t __p1, uint32x2_t __p2) {
72275 uint64x2_t __ret;
72276 __ret = __p0 + __noswap_vmull_u32(__p1, __p2);
72277 return __ret;
72278 }
72279 #endif
72280
72281 #ifdef __LITTLE_ENDIAN__
72282 __ai uint32x4_t vmlal_u16(uint32x4_t __p0, uint16x4_t __p1, uint16x4_t __p2) {
72283 uint32x4_t __ret;
72284 __ret = __p0 + vmull_u16(__p1, __p2);
72285 return __ret;
72286 }
72287 #else
72288 __ai uint32x4_t vmlal_u16(uint32x4_t __p0, uint16x4_t __p1, uint16x4_t __p2) {
72289 uint32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
72290 uint16x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
72291 uint16x4_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, 3, 2, 1, 0);
72292 uint32x4_t __ret;
72293 __ret = __rev0 + __noswap_vmull_u16(__rev1, __rev2);
72294 __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
72295 return __ret;
72296 }
72297 __ai uint32x4_t __noswap_vmlal_u16(uint32x4_t __p0, uint16x4_t __p1, uint16x4_t __p2) {
72298 uint32x4_t __ret;
72299 __ret = __p0 + __noswap_vmull_u16(__p1, __p2);
72300 return __ret;
72301 }
72302 #endif
72303
72304 #ifdef __LITTLE_ENDIAN__
72305 __ai int16x8_t vmlal_s8(int16x8_t __p0, int8x8_t __p1, int8x8_t __p2) {
72306 int16x8_t __ret;
72307 __ret = __p0 + vmull_s8(__p1, __p2);
72308 return __ret;
72309 }
72310 #else
72311 __ai int16x8_t vmlal_s8(int16x8_t __p0, int8x8_t __p1, int8x8_t __p2) {
72312 int16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
72313 int8x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
72314 int8x8_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, 7, 6, 5, 4, 3, 2, 1, 0);
72315 int16x8_t __ret;
72316 __ret = __rev0 + __noswap_vmull_s8(__rev1, __rev2);
72317 __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
72318 return __ret;
72319 }
72320 __ai int16x8_t __noswap_vmlal_s8(int16x8_t __p0, int8x8_t __p1, int8x8_t __p2) {
72321 int16x8_t __ret;
72322 __ret = __p0 + __noswap_vmull_s8(__p1, __p2);
72323 return __ret;
72324 }
72325 #endif
72326
72327 #ifdef __LITTLE_ENDIAN__
72328 __ai int64x2_t vmlal_s32(int64x2_t __p0, int32x2_t __p1, int32x2_t __p2) {
72329 int64x2_t __ret;
72330 __ret = __p0 + vmull_s32(__p1, __p2);
72331 return __ret;
72332 }
72333 #else
72334 __ai int64x2_t vmlal_s32(int64x2_t __p0, int32x2_t __p1, int32x2_t __p2) {
72335 int64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
72336 int32x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
72337 int32x2_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, 1, 0);
72338 int64x2_t __ret;
72339 __ret = __rev0 + __noswap_vmull_s32(__rev1, __rev2);
72340 __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
72341 return __ret;
72342 }
72343 __ai int64x2_t __noswap_vmlal_s32(int64x2_t __p0, int32x2_t __p1, int32x2_t __p2) {
72344 int64x2_t __ret;
72345 __ret = __p0 + __noswap_vmull_s32(__p1, __p2);
72346 return __ret;
72347 }
72348 #endif
72349
72350 #ifdef __LITTLE_ENDIAN__
72351 __ai int32x4_t vmlal_s16(int32x4_t __p0, int16x4_t __p1, int16x4_t __p2) {
72352 int32x4_t __ret;
72353 __ret = __p0 + vmull_s16(__p1, __p2);
72354 return __ret;
72355 }
72356 #else
72357 __ai int32x4_t vmlal_s16(int32x4_t __p0, int16x4_t __p1, int16x4_t __p2) {
72358 int32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
72359 int16x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
72360 int16x4_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, 3, 2, 1, 0);
72361 int32x4_t __ret;
72362 __ret = __rev0 + __noswap_vmull_s16(__rev1, __rev2);
72363 __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
72364 return __ret;
72365 }
72366 __ai int32x4_t __noswap_vmlal_s16(int32x4_t __p0, int16x4_t __p1, int16x4_t __p2) {
72367 int32x4_t __ret;
72368 __ret = __p0 + __noswap_vmull_s16(__p1, __p2);
72369 return __ret;
72370 }
72371 #endif
72372
72373 #ifdef __LITTLE_ENDIAN__
72374 #define vmlal_lane_u32(__p0, __p1, __p2, __p3) __extension__ ({ \
72375 uint64x2_t __s0 = __p0; \
72376 uint32x2_t __s1 = __p1; \
72377 uint32x2_t __s2 = __p2; \
72378 uint64x2_t __ret; \
72379 __ret = __s0 + vmull_u32(__s1, __builtin_shufflevector(__s2, __s2, __p3, __p3)); \
72380 __ret; \
72381 })
72382 #else
72383 #define vmlal_lane_u32(__p0, __p1, __p2, __p3) __extension__ ({ \
72384 uint64x2_t __s0 = __p0; \
72385 uint32x2_t __s1 = __p1; \
72386 uint32x2_t __s2 = __p2; \
72387 uint64x2_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 1, 0); \
72388 uint32x2_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 1, 0); \
72389 uint32x2_t __rev2; __rev2 = __builtin_shufflevector(__s2, __s2, 1, 0); \
72390 uint64x2_t __ret; \
72391 __ret = __rev0 + __noswap_vmull_u32(__rev1, __builtin_shufflevector(__rev2, __rev2, __p3, __p3)); \
72392 __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \
72393 __ret; \
72394 })
72395 #endif
72396
72397 #ifdef __LITTLE_ENDIAN__
72398 #define vmlal_lane_u16(__p0, __p1, __p2, __p3) __extension__ ({ \
72399 uint32x4_t __s0 = __p0; \
72400 uint16x4_t __s1 = __p1; \
72401 uint16x4_t __s2 = __p2; \
72402 uint32x4_t __ret; \
72403 __ret = __s0 + vmull_u16(__s1, __builtin_shufflevector(__s2, __s2, __p3, __p3, __p3, __p3)); \
72404 __ret; \
72405 })
72406 #else
72407 #define vmlal_lane_u16(__p0, __p1, __p2, __p3) __extension__ ({ \
72408 uint32x4_t __s0 = __p0; \
72409 uint16x4_t __s1 = __p1; \
72410 uint16x4_t __s2 = __p2; \
72411 uint32x4_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \
72412 uint16x4_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 3, 2, 1, 0); \
72413 uint16x4_t __rev2; __rev2 = __builtin_shufflevector(__s2, __s2, 3, 2, 1, 0); \
72414 uint32x4_t __ret; \
72415 __ret = __rev0 + __noswap_vmull_u16(__rev1, __builtin_shufflevector(__rev2, __rev2, __p3, __p3, __p3, __p3)); \
72416 __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \
72417 __ret; \
72418 })
72419 #endif
72420
72421 #ifdef __LITTLE_ENDIAN__
72422 #define vmlal_lane_s32(__p0, __p1, __p2, __p3) __extension__ ({ \
72423 int64x2_t __s0 = __p0; \
72424 int32x2_t __s1 = __p1; \
72425 int32x2_t __s2 = __p2; \
72426 int64x2_t __ret; \
72427 __ret = __s0 + vmull_s32(__s1, __builtin_shufflevector(__s2, __s2, __p3, __p3)); \
72428 __ret; \
72429 })
72430 #else
72431 #define vmlal_lane_s32(__p0, __p1, __p2, __p3) __extension__ ({ \
72432 int64x2_t __s0 = __p0; \
72433 int32x2_t __s1 = __p1; \
72434 int32x2_t __s2 = __p2; \
72435 int64x2_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 1, 0); \
72436 int32x2_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 1, 0); \
72437 int32x2_t __rev2; __rev2 = __builtin_shufflevector(__s2, __s2, 1, 0); \
72438 int64x2_t __ret; \
72439 __ret = __rev0 + __noswap_vmull_s32(__rev1, __builtin_shufflevector(__rev2, __rev2, __p3, __p3)); \
72440 __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \
72441 __ret; \
72442 })
72443 #endif
72444
72445 #ifdef __LITTLE_ENDIAN__
72446 #define vmlal_lane_s16(__p0, __p1, __p2, __p3) __extension__ ({ \
72447 int32x4_t __s0 = __p0; \
72448 int16x4_t __s1 = __p1; \
72449 int16x4_t __s2 = __p2; \
72450 int32x4_t __ret; \
72451 __ret = __s0 + vmull_s16(__s1, __builtin_shufflevector(__s2, __s2, __p3, __p3, __p3, __p3)); \
72452 __ret; \
72453 })
72454 #else
72455 #define vmlal_lane_s16(__p0, __p1, __p2, __p3) __extension__ ({ \
72456 int32x4_t __s0 = __p0; \
72457 int16x4_t __s1 = __p1; \
72458 int16x4_t __s2 = __p2; \
72459 int32x4_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \
72460 int16x4_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 3, 2, 1, 0); \
72461 int16x4_t __rev2; __rev2 = __builtin_shufflevector(__s2, __s2, 3, 2, 1, 0); \
72462 int32x4_t __ret; \
72463 __ret = __rev0 + __noswap_vmull_s16(__rev1, __builtin_shufflevector(__rev2, __rev2, __p3, __p3, __p3, __p3)); \
72464 __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \
72465 __ret; \
72466 })
72467 #endif
72468
72469 #ifdef __LITTLE_ENDIAN__
72470 __ai uint64x2_t vmlal_n_u32(uint64x2_t __p0, uint32x2_t __p1, uint32_t __p2) {
72471 uint64x2_t __ret;
72472 __ret = __p0 + vmull_u32(__p1, (uint32x2_t) {__p2, __p2});
72473 return __ret;
72474 }
72475 #else
72476 __ai uint64x2_t vmlal_n_u32(uint64x2_t __p0, uint32x2_t __p1, uint32_t __p2) {
72477 uint64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
72478 uint32x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
72479 uint64x2_t __ret;
72480 __ret = __rev0 + __noswap_vmull_u32(__rev1, (uint32x2_t) {__p2, __p2});
72481 __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
72482 return __ret;
72483 }
72484 __ai uint64x2_t __noswap_vmlal_n_u32(uint64x2_t __p0, uint32x2_t __p1, uint32_t __p2) {
72485 uint64x2_t __ret;
72486 __ret = __p0 + __noswap_vmull_u32(__p1, (uint32x2_t) {__p2, __p2});
72487 return __ret;
72488 }
72489 #endif
72490
72491 #ifdef __LITTLE_ENDIAN__
72492 __ai uint32x4_t vmlal_n_u16(uint32x4_t __p0, uint16x4_t __p1, uint16_t __p2) {
72493 uint32x4_t __ret;
72494 __ret = __p0 + vmull_u16(__p1, (uint16x4_t) {__p2, __p2, __p2, __p2});
72495 return __ret;
72496 }
72497 #else
72498 __ai uint32x4_t vmlal_n_u16(uint32x4_t __p0, uint16x4_t __p1, uint16_t __p2) {
72499 uint32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
72500 uint16x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
72501 uint32x4_t __ret;
72502 __ret = __rev0 + __noswap_vmull_u16(__rev1, (uint16x4_t) {__p2, __p2, __p2, __p2});
72503 __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
72504 return __ret;
72505 }
72506 __ai uint32x4_t __noswap_vmlal_n_u16(uint32x4_t __p0, uint16x4_t __p1, uint16_t __p2) {
72507 uint32x4_t __ret;
72508 __ret = __p0 + __noswap_vmull_u16(__p1, (uint16x4_t) {__p2, __p2, __p2, __p2});
72509 return __ret;
72510 }
72511 #endif
72512
72513 #ifdef __LITTLE_ENDIAN__
72514 __ai int64x2_t vmlal_n_s32(int64x2_t __p0, int32x2_t __p1, int32_t __p2) {
72515 int64x2_t __ret;
72516 __ret = __p0 + vmull_s32(__p1, (int32x2_t) {__p2, __p2});
72517 return __ret;
72518 }
72519 #else
72520 __ai int64x2_t vmlal_n_s32(int64x2_t __p0, int32x2_t __p1, int32_t __p2) {
72521 int64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
72522 int32x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
72523 int64x2_t __ret;
72524 __ret = __rev0 + __noswap_vmull_s32(__rev1, (int32x2_t) {__p2, __p2});
72525 __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
72526 return __ret;
72527 }
72528 __ai int64x2_t __noswap_vmlal_n_s32(int64x2_t __p0, int32x2_t __p1, int32_t __p2) {
72529 int64x2_t __ret;
72530 __ret = __p0 + __noswap_vmull_s32(__p1, (int32x2_t) {__p2, __p2});
72531 return __ret;
72532 }
72533 #endif
72534
72535 #ifdef __LITTLE_ENDIAN__
72536 __ai int32x4_t vmlal_n_s16(int32x4_t __p0, int16x4_t __p1, int16_t __p2) {
72537 int32x4_t __ret;
72538 __ret = __p0 + vmull_s16(__p1, (int16x4_t) {__p2, __p2, __p2, __p2});
72539 return __ret;
72540 }
72541 #else
72542 __ai int32x4_t vmlal_n_s16(int32x4_t __p0, int16x4_t __p1, int16_t __p2) {
72543 int32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
72544 int16x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
72545 int32x4_t __ret;
72546 __ret = __rev0 + __noswap_vmull_s16(__rev1, (int16x4_t) {__p2, __p2, __p2, __p2});
72547 __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
72548 return __ret;
72549 }
72550 __ai int32x4_t __noswap_vmlal_n_s16(int32x4_t __p0, int16x4_t __p1, int16_t __p2) {
72551 int32x4_t __ret;
72552 __ret = __p0 + __noswap_vmull_s16(__p1, (int16x4_t) {__p2, __p2, __p2, __p2});
72553 return __ret;
72554 }
72555 #endif
72556
72557 #ifdef __LITTLE_ENDIAN__
72558 __ai uint16x8_t vmlsl_u8(uint16x8_t __p0, uint8x8_t __p1, uint8x8_t __p2) {
72559 uint16x8_t __ret;
72560 __ret = __p0 - vmull_u8(__p1, __p2);
72561 return __ret;
72562 }
72563 #else
72564 __ai uint16x8_t vmlsl_u8(uint16x8_t __p0, uint8x8_t __p1, uint8x8_t __p2) {
72565 uint16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
72566 uint8x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
72567 uint8x8_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, 7, 6, 5, 4, 3, 2, 1, 0);
72568 uint16x8_t __ret;
72569 __ret = __rev0 - __noswap_vmull_u8(__rev1, __rev2);
72570 __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
72571 return __ret;
72572 }
72573 __ai uint16x8_t __noswap_vmlsl_u8(uint16x8_t __p0, uint8x8_t __p1, uint8x8_t __p2) {
72574 uint16x8_t __ret;
72575 __ret = __p0 - __noswap_vmull_u8(__p1, __p2);
72576 return __ret;
72577 }
72578 #endif
72579
72580 #ifdef __LITTLE_ENDIAN__
72581 __ai uint64x2_t vmlsl_u32(uint64x2_t __p0, uint32x2_t __p1, uint32x2_t __p2) {
72582 uint64x2_t __ret;
72583 __ret = __p0 - vmull_u32(__p1, __p2);
72584 return __ret;
72585 }
72586 #else
72587 __ai uint64x2_t vmlsl_u32(uint64x2_t __p0, uint32x2_t __p1, uint32x2_t __p2) {
72588 uint64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
72589 uint32x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
72590 uint32x2_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, 1, 0);
72591 uint64x2_t __ret;
72592 __ret = __rev0 - __noswap_vmull_u32(__rev1, __rev2);
72593 __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
72594 return __ret;
72595 }
72596 __ai uint64x2_t __noswap_vmlsl_u32(uint64x2_t __p0, uint32x2_t __p1, uint32x2_t __p2) {
72597 uint64x2_t __ret;
72598 __ret = __p0 - __noswap_vmull_u32(__p1, __p2);
72599 return __ret;
72600 }
72601 #endif
72602
72603 #ifdef __LITTLE_ENDIAN__
72604 __ai uint32x4_t vmlsl_u16(uint32x4_t __p0, uint16x4_t __p1, uint16x4_t __p2) {
72605 uint32x4_t __ret;
72606 __ret = __p0 - vmull_u16(__p1, __p2);
72607 return __ret;
72608 }
72609 #else
72610 __ai uint32x4_t vmlsl_u16(uint32x4_t __p0, uint16x4_t __p1, uint16x4_t __p2) {
72611 uint32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
72612 uint16x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
72613 uint16x4_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, 3, 2, 1, 0);
72614 uint32x4_t __ret;
72615 __ret = __rev0 - __noswap_vmull_u16(__rev1, __rev2);
72616 __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
72617 return __ret;
72618 }
72619 __ai uint32x4_t __noswap_vmlsl_u16(uint32x4_t __p0, uint16x4_t __p1, uint16x4_t __p2) {
72620 uint32x4_t __ret;
72621 __ret = __p0 - __noswap_vmull_u16(__p1, __p2);
72622 return __ret;
72623 }
72624 #endif
72625
72626 #ifdef __LITTLE_ENDIAN__
72627 __ai int16x8_t vmlsl_s8(int16x8_t __p0, int8x8_t __p1, int8x8_t __p2) {
72628 int16x8_t __ret;
72629 __ret = __p0 - vmull_s8(__p1, __p2);
72630 return __ret;
72631 }
72632 #else
72633 __ai int16x8_t vmlsl_s8(int16x8_t __p0, int8x8_t __p1, int8x8_t __p2) {
72634 int16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
72635 int8x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
72636 int8x8_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, 7, 6, 5, 4, 3, 2, 1, 0);
72637 int16x8_t __ret;
72638 __ret = __rev0 - __noswap_vmull_s8(__rev1, __rev2);
72639 __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
72640 return __ret;
72641 }
72642 __ai int16x8_t __noswap_vmlsl_s8(int16x8_t __p0, int8x8_t __p1, int8x8_t __p2) {
72643 int16x8_t __ret;
72644 __ret = __p0 - __noswap_vmull_s8(__p1, __p2);
72645 return __ret;
72646 }
72647 #endif
72648
72649 #ifdef __LITTLE_ENDIAN__
72650 __ai int64x2_t vmlsl_s32(int64x2_t __p0, int32x2_t __p1, int32x2_t __p2) {
72651 int64x2_t __ret;
72652 __ret = __p0 - vmull_s32(__p1, __p2);
72653 return __ret;
72654 }
72655 #else
72656 __ai int64x2_t vmlsl_s32(int64x2_t __p0, int32x2_t __p1, int32x2_t __p2) {
72657 int64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
72658 int32x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
72659 int32x2_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, 1, 0);
72660 int64x2_t __ret;
72661 __ret = __rev0 - __noswap_vmull_s32(__rev1, __rev2);
72662 __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
72663 return __ret;
72664 }
72665 __ai int64x2_t __noswap_vmlsl_s32(int64x2_t __p0, int32x2_t __p1, int32x2_t __p2) {
72666 int64x2_t __ret;
72667 __ret = __p0 - __noswap_vmull_s32(__p1, __p2);
72668 return __ret;
72669 }
72670 #endif
72671
72672 #ifdef __LITTLE_ENDIAN__
72673 __ai int32x4_t vmlsl_s16(int32x4_t __p0, int16x4_t __p1, int16x4_t __p2) {
72674 int32x4_t __ret;
72675 __ret = __p0 - vmull_s16(__p1, __p2);
72676 return __ret;
72677 }
72678 #else
72679 __ai int32x4_t vmlsl_s16(int32x4_t __p0, int16x4_t __p1, int16x4_t __p2) {
72680 int32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
72681 int16x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
72682 int16x4_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, 3, 2, 1, 0);
72683 int32x4_t __ret;
72684 __ret = __rev0 - __noswap_vmull_s16(__rev1, __rev2);
72685 __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
72686 return __ret;
72687 }
72688 __ai int32x4_t __noswap_vmlsl_s16(int32x4_t __p0, int16x4_t __p1, int16x4_t __p2) {
72689 int32x4_t __ret;
72690 __ret = __p0 - __noswap_vmull_s16(__p1, __p2);
72691 return __ret;
72692 }
72693 #endif
72694
72695 #ifdef __LITTLE_ENDIAN__
72696 #define vmlsl_lane_u32(__p0, __p1, __p2, __p3) __extension__ ({ \
72697 uint64x2_t __s0 = __p0; \
72698 uint32x2_t __s1 = __p1; \
72699 uint32x2_t __s2 = __p2; \
72700 uint64x2_t __ret; \
72701 __ret = __s0 - vmull_u32(__s1, __builtin_shufflevector(__s2, __s2, __p3, __p3)); \
72702 __ret; \
72703 })
72704 #else
72705 #define vmlsl_lane_u32(__p0, __p1, __p2, __p3) __extension__ ({ \
72706 uint64x2_t __s0 = __p0; \
72707 uint32x2_t __s1 = __p1; \
72708 uint32x2_t __s2 = __p2; \
72709 uint64x2_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 1, 0); \
72710 uint32x2_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 1, 0); \
72711 uint32x2_t __rev2; __rev2 = __builtin_shufflevector(__s2, __s2, 1, 0); \
72712 uint64x2_t __ret; \
72713 __ret = __rev0 - __noswap_vmull_u32(__rev1, __builtin_shufflevector(__rev2, __rev2, __p3, __p3)); \
72714 __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \
72715 __ret; \
72716 })
72717 #endif
72718
72719 #ifdef __LITTLE_ENDIAN__
72720 #define vmlsl_lane_u16(__p0, __p1, __p2, __p3) __extension__ ({ \
72721 uint32x4_t __s0 = __p0; \
72722 uint16x4_t __s1 = __p1; \
72723 uint16x4_t __s2 = __p2; \
72724 uint32x4_t __ret; \
72725 __ret = __s0 - vmull_u16(__s1, __builtin_shufflevector(__s2, __s2, __p3, __p3, __p3, __p3)); \
72726 __ret; \
72727 })
72728 #else
72729 #define vmlsl_lane_u16(__p0, __p1, __p2, __p3) __extension__ ({ \
72730 uint32x4_t __s0 = __p0; \
72731 uint16x4_t __s1 = __p1; \
72732 uint16x4_t __s2 = __p2; \
72733 uint32x4_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \
72734 uint16x4_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 3, 2, 1, 0); \
72735 uint16x4_t __rev2; __rev2 = __builtin_shufflevector(__s2, __s2, 3, 2, 1, 0); \
72736 uint32x4_t __ret; \
72737 __ret = __rev0 - __noswap_vmull_u16(__rev1, __builtin_shufflevector(__rev2, __rev2, __p3, __p3, __p3, __p3)); \
72738 __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \
72739 __ret; \
72740 })
72741 #endif
72742
72743 #ifdef __LITTLE_ENDIAN__
72744 #define vmlsl_lane_s32(__p0, __p1, __p2, __p3) __extension__ ({ \
72745 int64x2_t __s0 = __p0; \
72746 int32x2_t __s1 = __p1; \
72747 int32x2_t __s2 = __p2; \
72748 int64x2_t __ret; \
72749 __ret = __s0 - vmull_s32(__s1, __builtin_shufflevector(__s2, __s2, __p3, __p3)); \
72750 __ret; \
72751 })
72752 #else
72753 #define vmlsl_lane_s32(__p0, __p1, __p2, __p3) __extension__ ({ \
72754 int64x2_t __s0 = __p0; \
72755 int32x2_t __s1 = __p1; \
72756 int32x2_t __s2 = __p2; \
72757 int64x2_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 1, 0); \
72758 int32x2_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 1, 0); \
72759 int32x2_t __rev2; __rev2 = __builtin_shufflevector(__s2, __s2, 1, 0); \
72760 int64x2_t __ret; \
72761 __ret = __rev0 - __noswap_vmull_s32(__rev1, __builtin_shufflevector(__rev2, __rev2, __p3, __p3)); \
72762 __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \
72763 __ret; \
72764 })
72765 #endif
72766
72767 #ifdef __LITTLE_ENDIAN__
72768 #define vmlsl_lane_s16(__p0, __p1, __p2, __p3) __extension__ ({ \
72769 int32x4_t __s0 = __p0; \
72770 int16x4_t __s1 = __p1; \
72771 int16x4_t __s2 = __p2; \
72772 int32x4_t __ret; \
72773 __ret = __s0 - vmull_s16(__s1, __builtin_shufflevector(__s2, __s2, __p3, __p3, __p3, __p3)); \
72774 __ret; \
72775 })
72776 #else
72777 #define vmlsl_lane_s16(__p0, __p1, __p2, __p3) __extension__ ({ \
72778 int32x4_t __s0 = __p0; \
72779 int16x4_t __s1 = __p1; \
72780 int16x4_t __s2 = __p2; \
72781 int32x4_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \
72782 int16x4_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 3, 2, 1, 0); \
72783 int16x4_t __rev2; __rev2 = __builtin_shufflevector(__s2, __s2, 3, 2, 1, 0); \
72784 int32x4_t __ret; \
72785 __ret = __rev0 - __noswap_vmull_s16(__rev1, __builtin_shufflevector(__rev2, __rev2, __p3, __p3, __p3, __p3)); \
72786 __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \
72787 __ret; \
72788 })
72789 #endif
72790
72791 #ifdef __LITTLE_ENDIAN__
72792 __ai uint64x2_t vmlsl_n_u32(uint64x2_t __p0, uint32x2_t __p1, uint32_t __p2) {
72793 uint64x2_t __ret;
72794 __ret = __p0 - vmull_u32(__p1, (uint32x2_t) {__p2, __p2});
72795 return __ret;
72796 }
72797 #else
72798 __ai uint64x2_t vmlsl_n_u32(uint64x2_t __p0, uint32x2_t __p1, uint32_t __p2) {
72799 uint64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
72800 uint32x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
72801 uint64x2_t __ret;
72802 __ret = __rev0 - __noswap_vmull_u32(__rev1, (uint32x2_t) {__p2, __p2});
72803 __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
72804 return __ret;
72805 }
72806 __ai uint64x2_t __noswap_vmlsl_n_u32(uint64x2_t __p0, uint32x2_t __p1, uint32_t __p2) {
72807 uint64x2_t __ret;
72808 __ret = __p0 - __noswap_vmull_u32(__p1, (uint32x2_t) {__p2, __p2});
72809 return __ret;
72810 }
72811 #endif
72812
72813 #ifdef __LITTLE_ENDIAN__
72814 __ai uint32x4_t vmlsl_n_u16(uint32x4_t __p0, uint16x4_t __p1, uint16_t __p2) {
72815 uint32x4_t __ret;
72816 __ret = __p0 - vmull_u16(__p1, (uint16x4_t) {__p2, __p2, __p2, __p2});
72817 return __ret;
72818 }
72819 #else
72820 __ai uint32x4_t vmlsl_n_u16(uint32x4_t __p0, uint16x4_t __p1, uint16_t __p2) {
72821 uint32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
72822 uint16x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
72823 uint32x4_t __ret;
72824 __ret = __rev0 - __noswap_vmull_u16(__rev1, (uint16x4_t) {__p2, __p2, __p2, __p2});
72825 __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
72826 return __ret;
72827 }
72828 __ai uint32x4_t __noswap_vmlsl_n_u16(uint32x4_t __p0, uint16x4_t __p1, uint16_t __p2) {
72829 uint32x4_t __ret;
72830 __ret = __p0 - __noswap_vmull_u16(__p1, (uint16x4_t) {__p2, __p2, __p2, __p2});
72831 return __ret;
72832 }
72833 #endif
72834
72835 #ifdef __LITTLE_ENDIAN__
72836 __ai int64x2_t vmlsl_n_s32(int64x2_t __p0, int32x2_t __p1, int32_t __p2) {
72837 int64x2_t __ret;
72838 __ret = __p0 - vmull_s32(__p1, (int32x2_t) {__p2, __p2});
72839 return __ret;
72840 }
72841 #else
72842 __ai int64x2_t vmlsl_n_s32(int64x2_t __p0, int32x2_t __p1, int32_t __p2) {
72843 int64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
72844 int32x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
72845 int64x2_t __ret;
72846 __ret = __rev0 - __noswap_vmull_s32(__rev1, (int32x2_t) {__p2, __p2});
72847 __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
72848 return __ret;
72849 }
72850 __ai int64x2_t __noswap_vmlsl_n_s32(int64x2_t __p0, int32x2_t __p1, int32_t __p2) {
72851 int64x2_t __ret;
72852 __ret = __p0 - __noswap_vmull_s32(__p1, (int32x2_t) {__p2, __p2});
72853 return __ret;
72854 }
72855 #endif
72856
72857 #ifdef __LITTLE_ENDIAN__
72858 __ai int32x4_t vmlsl_n_s16(int32x4_t __p0, int16x4_t __p1, int16_t __p2) {
72859 int32x4_t __ret;
72860 __ret = __p0 - vmull_s16(__p1, (int16x4_t) {__p2, __p2, __p2, __p2});
72861 return __ret;
72862 }
72863 #else
72864 __ai int32x4_t vmlsl_n_s16(int32x4_t __p0, int16x4_t __p1, int16_t __p2) {
72865 int32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
72866 int16x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
72867 int32x4_t __ret;
72868 __ret = __rev0 - __noswap_vmull_s16(__rev1, (int16x4_t) {__p2, __p2, __p2, __p2});
72869 __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
72870 return __ret;
72871 }
72872 __ai int32x4_t __noswap_vmlsl_n_s16(int32x4_t __p0, int16x4_t __p1, int16_t __p2) {
72873 int32x4_t __ret;
72874 __ret = __p0 - __noswap_vmull_s16(__p1, (int16x4_t) {__p2, __p2, __p2, __p2});
72875 return __ret;
72876 }
72877 #endif
72878
72879 #if defined(__ARM_FEATURE_FP16_SCALAR_ARITHMETIC)
72880 #ifdef __LITTLE_ENDIAN__
72881 #define vfmsh_lane_f16(__p0_258, __p1_258, __p2_258, __p3_258) __extension__ ({ \
72882 float16_t __s0_258 = __p0_258; \
72883 float16_t __s1_258 = __p1_258; \
72884 float16x4_t __s2_258 = __p2_258; \
72885 float16_t __ret_258; \
72886 __ret_258 = vfmsh_f16(__s0_258, __s1_258, vget_lane_f16(__s2_258, __p3_258)); \
72887 __ret_258; \
72888 })
72889 #else
72890 #define vfmsh_lane_f16(__p0_259, __p1_259, __p2_259, __p3_259) __extension__ ({ \
72891 float16_t __s0_259 = __p0_259; \
72892 float16_t __s1_259 = __p1_259; \
72893 float16x4_t __s2_259 = __p2_259; \
72894 float16x4_t __rev2_259; __rev2_259 = __builtin_shufflevector(__s2_259, __s2_259, 3, 2, 1, 0); \
72895 float16_t __ret_259; \
72896 __ret_259 = __noswap_vfmsh_f16(__s0_259, __s1_259, __noswap_vget_lane_f16(__rev2_259, __p3_259)); \
72897 __ret_259; \
72898 })
72899 #endif
72900
72901 #ifdef __LITTLE_ENDIAN__
72902 #define vfmsh_laneq_f16(__p0_260, __p1_260, __p2_260, __p3_260) __extension__ ({ \
72903 float16_t __s0_260 = __p0_260; \
72904 float16_t __s1_260 = __p1_260; \
72905 float16x8_t __s2_260 = __p2_260; \
72906 float16_t __ret_260; \
72907 __ret_260 = vfmsh_f16(__s0_260, __s1_260, vgetq_lane_f16(__s2_260, __p3_260)); \
72908 __ret_260; \
72909 })
72910 #else
72911 #define vfmsh_laneq_f16(__p0_261, __p1_261, __p2_261, __p3_261) __extension__ ({ \
72912 float16_t __s0_261 = __p0_261; \
72913 float16_t __s1_261 = __p1_261; \
72914 float16x8_t __s2_261 = __p2_261; \
72915 float16x8_t __rev2_261; __rev2_261 = __builtin_shufflevector(__s2_261, __s2_261, 7, 6, 5, 4, 3, 2, 1, 0); \
72916 float16_t __ret_261; \
72917 __ret_261 = __noswap_vfmsh_f16(__s0_261, __s1_261, __noswap_vgetq_lane_f16(__rev2_261, __p3_261)); \
72918 __ret_261; \
72919 })
72920 #endif
72921
72922 #endif
72923 #if defined(__ARM_FEATURE_QRDMX) && defined(__aarch64__)
72924 #ifdef __LITTLE_ENDIAN__
72925 __ai int32_t vqrdmlahs_s32(int32_t __p0, int32_t __p1, int32_t __p2) {
72926 int32_t __ret;
72927 __ret = vqadds_s32(__p0, vqrdmulhs_s32(__p1, __p2));
72928 return __ret;
72929 }
72930 #else
72931 __ai int32_t vqrdmlahs_s32(int32_t __p0, int32_t __p1, int32_t __p2) {
72932 int32_t __ret;
72933 __ret = __noswap_vqadds_s32(__p0, __noswap_vqrdmulhs_s32(__p1, __p2));
72934 return __ret;
72935 }
72936 #endif
72937
72938 #ifdef __LITTLE_ENDIAN__
72939 __ai int16_t vqrdmlahh_s16(int16_t __p0, int16_t __p1, int16_t __p2) {
72940 int16_t __ret;
72941 __ret = vqaddh_s16(__p0, vqrdmulhh_s16(__p1, __p2));
72942 return __ret;
72943 }
72944 #else
72945 __ai int16_t vqrdmlahh_s16(int16_t __p0, int16_t __p1, int16_t __p2) {
72946 int16_t __ret;
72947 __ret = __noswap_vqaddh_s16(__p0, __noswap_vqrdmulhh_s16(__p1, __p2));
72948 return __ret;
72949 }
72950 #endif
72951
72952 #ifdef __LITTLE_ENDIAN__
72953 #define vqrdmlahs_lane_s32(__p0_262, __p1_262, __p2_262, __p3_262) __extension__ ({ \
72954 int32_t __s0_262 = __p0_262; \
72955 int32_t __s1_262 = __p1_262; \
72956 int32x2_t __s2_262 = __p2_262; \
72957 int32_t __ret_262; \
72958 __ret_262 = vqadds_s32(__s0_262, vqrdmulhs_s32(__s1_262, vget_lane_s32(__s2_262, __p3_262))); \
72959 __ret_262; \
72960 })
72961 #else
72962 #define vqrdmlahs_lane_s32(__p0_263, __p1_263, __p2_263, __p3_263) __extension__ ({ \
72963 int32_t __s0_263 = __p0_263; \
72964 int32_t __s1_263 = __p1_263; \
72965 int32x2_t __s2_263 = __p2_263; \
72966 int32x2_t __rev2_263; __rev2_263 = __builtin_shufflevector(__s2_263, __s2_263, 1, 0); \
72967 int32_t __ret_263; \
72968 __ret_263 = __noswap_vqadds_s32(__s0_263, __noswap_vqrdmulhs_s32(__s1_263, __noswap_vget_lane_s32(__rev2_263, __p3_263))); \
72969 __ret_263; \
72970 })
72971 #endif
72972
72973 #ifdef __LITTLE_ENDIAN__
72974 #define vqrdmlahh_lane_s16(__p0_264, __p1_264, __p2_264, __p3_264) __extension__ ({ \
72975 int16_t __s0_264 = __p0_264; \
72976 int16_t __s1_264 = __p1_264; \
72977 int16x4_t __s2_264 = __p2_264; \
72978 int16_t __ret_264; \
72979 __ret_264 = vqaddh_s16(__s0_264, vqrdmulhh_s16(__s1_264, vget_lane_s16(__s2_264, __p3_264))); \
72980 __ret_264; \
72981 })
72982 #else
72983 #define vqrdmlahh_lane_s16(__p0_265, __p1_265, __p2_265, __p3_265) __extension__ ({ \
72984 int16_t __s0_265 = __p0_265; \
72985 int16_t __s1_265 = __p1_265; \
72986 int16x4_t __s2_265 = __p2_265; \
72987 int16x4_t __rev2_265; __rev2_265 = __builtin_shufflevector(__s2_265, __s2_265, 3, 2, 1, 0); \
72988 int16_t __ret_265; \
72989 __ret_265 = __noswap_vqaddh_s16(__s0_265, __noswap_vqrdmulhh_s16(__s1_265, __noswap_vget_lane_s16(__rev2_265, __p3_265))); \
72990 __ret_265; \
72991 })
72992 #endif
72993
72994 #ifdef __LITTLE_ENDIAN__
72995 #define vqrdmlahs_laneq_s32(__p0_266, __p1_266, __p2_266, __p3_266) __extension__ ({ \
72996 int32_t __s0_266 = __p0_266; \
72997 int32_t __s1_266 = __p1_266; \
72998 int32x4_t __s2_266 = __p2_266; \
72999 int32_t __ret_266; \
73000 __ret_266 = vqadds_s32(__s0_266, vqrdmulhs_s32(__s1_266, vgetq_lane_s32(__s2_266, __p3_266))); \
73001 __ret_266; \
73002 })
73003 #else
73004 #define vqrdmlahs_laneq_s32(__p0_267, __p1_267, __p2_267, __p3_267) __extension__ ({ \
73005 int32_t __s0_267 = __p0_267; \
73006 int32_t __s1_267 = __p1_267; \
73007 int32x4_t __s2_267 = __p2_267; \
73008 int32x4_t __rev2_267; __rev2_267 = __builtin_shufflevector(__s2_267, __s2_267, 3, 2, 1, 0); \
73009 int32_t __ret_267; \
73010 __ret_267 = __noswap_vqadds_s32(__s0_267, __noswap_vqrdmulhs_s32(__s1_267, __noswap_vgetq_lane_s32(__rev2_267, __p3_267))); \
73011 __ret_267; \
73012 })
73013 #endif
73014
73015 #ifdef __LITTLE_ENDIAN__
73016 #define vqrdmlahh_laneq_s16(__p0_268, __p1_268, __p2_268, __p3_268) __extension__ ({ \
73017 int16_t __s0_268 = __p0_268; \
73018 int16_t __s1_268 = __p1_268; \
73019 int16x8_t __s2_268 = __p2_268; \
73020 int16_t __ret_268; \
73021 __ret_268 = vqaddh_s16(__s0_268, vqrdmulhh_s16(__s1_268, vgetq_lane_s16(__s2_268, __p3_268))); \
73022 __ret_268; \
73023 })
73024 #else
73025 #define vqrdmlahh_laneq_s16(__p0_269, __p1_269, __p2_269, __p3_269) __extension__ ({ \
73026 int16_t __s0_269 = __p0_269; \
73027 int16_t __s1_269 = __p1_269; \
73028 int16x8_t __s2_269 = __p2_269; \
73029 int16x8_t __rev2_269; __rev2_269 = __builtin_shufflevector(__s2_269, __s2_269, 7, 6, 5, 4, 3, 2, 1, 0); \
73030 int16_t __ret_269; \
73031 __ret_269 = __noswap_vqaddh_s16(__s0_269, __noswap_vqrdmulhh_s16(__s1_269, __noswap_vgetq_lane_s16(__rev2_269, __p3_269))); \
73032 __ret_269; \
73033 })
73034 #endif
73035
73036 #ifdef __LITTLE_ENDIAN__
73037 __ai int32_t vqrdmlshs_s32(int32_t __p0, int32_t __p1, int32_t __p2) {
73038 int32_t __ret;
73039 __ret = vqsubs_s32(__p0, vqrdmulhs_s32(__p1, __p2));
73040 return __ret;
73041 }
73042 #else
73043 __ai int32_t vqrdmlshs_s32(int32_t __p0, int32_t __p1, int32_t __p2) {
73044 int32_t __ret;
73045 __ret = __noswap_vqsubs_s32(__p0, __noswap_vqrdmulhs_s32(__p1, __p2));
73046 return __ret;
73047 }
73048 #endif
73049
73050 #ifdef __LITTLE_ENDIAN__
73051 __ai int16_t vqrdmlshh_s16(int16_t __p0, int16_t __p1, int16_t __p2) {
73052 int16_t __ret;
73053 __ret = vqsubh_s16(__p0, vqrdmulhh_s16(__p1, __p2));
73054 return __ret;
73055 }
73056 #else
73057 __ai int16_t vqrdmlshh_s16(int16_t __p0, int16_t __p1, int16_t __p2) {
73058 int16_t __ret;
73059 __ret = __noswap_vqsubh_s16(__p0, __noswap_vqrdmulhh_s16(__p1, __p2));
73060 return __ret;
73061 }
73062 #endif
73063
73064 #ifdef __LITTLE_ENDIAN__
73065 #define vqrdmlshs_lane_s32(__p0_270, __p1_270, __p2_270, __p3_270) __extension__ ({ \
73066 int32_t __s0_270 = __p0_270; \
73067 int32_t __s1_270 = __p1_270; \
73068 int32x2_t __s2_270 = __p2_270; \
73069 int32_t __ret_270; \
73070 __ret_270 = vqsubs_s32(__s0_270, vqrdmulhs_s32(__s1_270, vget_lane_s32(__s2_270, __p3_270))); \
73071 __ret_270; \
73072 })
73073 #else
73074 #define vqrdmlshs_lane_s32(__p0_271, __p1_271, __p2_271, __p3_271) __extension__ ({ \
73075 int32_t __s0_271 = __p0_271; \
73076 int32_t __s1_271 = __p1_271; \
73077 int32x2_t __s2_271 = __p2_271; \
73078 int32x2_t __rev2_271; __rev2_271 = __builtin_shufflevector(__s2_271, __s2_271, 1, 0); \
73079 int32_t __ret_271; \
73080 __ret_271 = __noswap_vqsubs_s32(__s0_271, __noswap_vqrdmulhs_s32(__s1_271, __noswap_vget_lane_s32(__rev2_271, __p3_271))); \
73081 __ret_271; \
73082 })
73083 #endif
73084
73085 #ifdef __LITTLE_ENDIAN__
73086 #define vqrdmlshh_lane_s16(__p0_272, __p1_272, __p2_272, __p3_272) __extension__ ({ \
73087 int16_t __s0_272 = __p0_272; \
73088 int16_t __s1_272 = __p1_272; \
73089 int16x4_t __s2_272 = __p2_272; \
73090 int16_t __ret_272; \
73091 __ret_272 = vqsubh_s16(__s0_272, vqrdmulhh_s16(__s1_272, vget_lane_s16(__s2_272, __p3_272))); \
73092 __ret_272; \
73093 })
73094 #else
73095 #define vqrdmlshh_lane_s16(__p0_273, __p1_273, __p2_273, __p3_273) __extension__ ({ \
73096 int16_t __s0_273 = __p0_273; \
73097 int16_t __s1_273 = __p1_273; \
73098 int16x4_t __s2_273 = __p2_273; \
73099 int16x4_t __rev2_273; __rev2_273 = __builtin_shufflevector(__s2_273, __s2_273, 3, 2, 1, 0); \
73100 int16_t __ret_273; \
73101 __ret_273 = __noswap_vqsubh_s16(__s0_273, __noswap_vqrdmulhh_s16(__s1_273, __noswap_vget_lane_s16(__rev2_273, __p3_273))); \
73102 __ret_273; \
73103 })
73104 #endif
73105
73106 #ifdef __LITTLE_ENDIAN__
73107 #define vqrdmlshs_laneq_s32(__p0_274, __p1_274, __p2_274, __p3_274) __extension__ ({ \
73108 int32_t __s0_274 = __p0_274; \
73109 int32_t __s1_274 = __p1_274; \
73110 int32x4_t __s2_274 = __p2_274; \
73111 int32_t __ret_274; \
73112 __ret_274 = vqsubs_s32(__s0_274, vqrdmulhs_s32(__s1_274, vgetq_lane_s32(__s2_274, __p3_274))); \
73113 __ret_274; \
73114 })
73115 #else
73116 #define vqrdmlshs_laneq_s32(__p0_275, __p1_275, __p2_275, __p3_275) __extension__ ({ \
73117 int32_t __s0_275 = __p0_275; \
73118 int32_t __s1_275 = __p1_275; \
73119 int32x4_t __s2_275 = __p2_275; \
73120 int32x4_t __rev2_275; __rev2_275 = __builtin_shufflevector(__s2_275, __s2_275, 3, 2, 1, 0); \
73121 int32_t __ret_275; \
73122 __ret_275 = __noswap_vqsubs_s32(__s0_275, __noswap_vqrdmulhs_s32(__s1_275, __noswap_vgetq_lane_s32(__rev2_275, __p3_275))); \
73123 __ret_275; \
73124 })
73125 #endif
73126
73127 #ifdef __LITTLE_ENDIAN__
73128 #define vqrdmlshh_laneq_s16(__p0_276, __p1_276, __p2_276, __p3_276) __extension__ ({ \
73129 int16_t __s0_276 = __p0_276; \
73130 int16_t __s1_276 = __p1_276; \
73131 int16x8_t __s2_276 = __p2_276; \
73132 int16_t __ret_276; \
73133 __ret_276 = vqsubh_s16(__s0_276, vqrdmulhh_s16(__s1_276, vgetq_lane_s16(__s2_276, __p3_276))); \
73134 __ret_276; \
73135 })
73136 #else
73137 #define vqrdmlshh_laneq_s16(__p0_277, __p1_277, __p2_277, __p3_277) __extension__ ({ \
73138 int16_t __s0_277 = __p0_277; \
73139 int16_t __s1_277 = __p1_277; \
73140 int16x8_t __s2_277 = __p2_277; \
73141 int16x8_t __rev2_277; __rev2_277 = __builtin_shufflevector(__s2_277, __s2_277, 7, 6, 5, 4, 3, 2, 1, 0); \
73142 int16_t __ret_277; \
73143 __ret_277 = __noswap_vqsubh_s16(__s0_277, __noswap_vqrdmulhh_s16(__s1_277, __noswap_vgetq_lane_s16(__rev2_277, __p3_277))); \
73144 __ret_277; \
73145 })
73146 #endif
73147
73148 #endif
73149 #if defined(__aarch64__)
73150 #ifdef __LITTLE_ENDIAN__
73151 __ai uint16x8_t vabdl_high_u8(uint8x16_t __p0, uint8x16_t __p1) {
73152 uint16x8_t __ret;
73153 __ret = vabdl_u8(vget_high_u8(__p0), vget_high_u8(__p1));
73154 return __ret;
73155 }
73156 #else
73157 __ai uint16x8_t vabdl_high_u8(uint8x16_t __p0, uint8x16_t __p1) {
73158 uint8x16_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
73159 uint8x16_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
73160 uint16x8_t __ret;
73161 __ret = __noswap_vabdl_u8(__noswap_vget_high_u8(__rev0), __noswap_vget_high_u8(__rev1));
73162 __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
73163 return __ret;
73164 }
73165 #endif
73166
73167 #ifdef __LITTLE_ENDIAN__
73168 __ai uint64x2_t vabdl_high_u32(uint32x4_t __p0, uint32x4_t __p1) {
73169 uint64x2_t __ret;
73170 __ret = vabdl_u32(vget_high_u32(__p0), vget_high_u32(__p1));
73171 return __ret;
73172 }
73173 #else
73174 __ai uint64x2_t vabdl_high_u32(uint32x4_t __p0, uint32x4_t __p1) {
73175 uint32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
73176 uint32x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
73177 uint64x2_t __ret;
73178 __ret = __noswap_vabdl_u32(__noswap_vget_high_u32(__rev0), __noswap_vget_high_u32(__rev1));
73179 __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
73180 return __ret;
73181 }
73182 #endif
73183
73184 #ifdef __LITTLE_ENDIAN__
73185 __ai uint32x4_t vabdl_high_u16(uint16x8_t __p0, uint16x8_t __p1) {
73186 uint32x4_t __ret;
73187 __ret = vabdl_u16(vget_high_u16(__p0), vget_high_u16(__p1));
73188 return __ret;
73189 }
73190 #else
73191 __ai uint32x4_t vabdl_high_u16(uint16x8_t __p0, uint16x8_t __p1) {
73192 uint16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
73193 uint16x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
73194 uint32x4_t __ret;
73195 __ret = __noswap_vabdl_u16(__noswap_vget_high_u16(__rev0), __noswap_vget_high_u16(__rev1));
73196 __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
73197 return __ret;
73198 }
73199 #endif
73200
73201 #ifdef __LITTLE_ENDIAN__
73202 __ai int16x8_t vabdl_high_s8(int8x16_t __p0, int8x16_t __p1) {
73203 int16x8_t __ret;
73204 __ret = vabdl_s8(vget_high_s8(__p0), vget_high_s8(__p1));
73205 return __ret;
73206 }
73207 #else
73208 __ai int16x8_t vabdl_high_s8(int8x16_t __p0, int8x16_t __p1) {
73209 int8x16_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
73210 int8x16_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
73211 int16x8_t __ret;
73212 __ret = __noswap_vabdl_s8(__noswap_vget_high_s8(__rev0), __noswap_vget_high_s8(__rev1));
73213 __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
73214 return __ret;
73215 }
73216 #endif
73217
73218 #ifdef __LITTLE_ENDIAN__
73219 __ai int64x2_t vabdl_high_s32(int32x4_t __p0, int32x4_t __p1) {
73220 int64x2_t __ret;
73221 __ret = vabdl_s32(vget_high_s32(__p0), vget_high_s32(__p1));
73222 return __ret;
73223 }
73224 #else
73225 __ai int64x2_t vabdl_high_s32(int32x4_t __p0, int32x4_t __p1) {
73226 int32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
73227 int32x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
73228 int64x2_t __ret;
73229 __ret = __noswap_vabdl_s32(__noswap_vget_high_s32(__rev0), __noswap_vget_high_s32(__rev1));
73230 __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
73231 return __ret;
73232 }
73233 #endif
73234
73235 #ifdef __LITTLE_ENDIAN__
73236 __ai int32x4_t vabdl_high_s16(int16x8_t __p0, int16x8_t __p1) {
73237 int32x4_t __ret;
73238 __ret = vabdl_s16(vget_high_s16(__p0), vget_high_s16(__p1));
73239 return __ret;
73240 }
73241 #else
73242 __ai int32x4_t vabdl_high_s16(int16x8_t __p0, int16x8_t __p1) {
73243 int16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
73244 int16x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
73245 int32x4_t __ret;
73246 __ret = __noswap_vabdl_s16(__noswap_vget_high_s16(__rev0), __noswap_vget_high_s16(__rev1));
73247 __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
73248 return __ret;
73249 }
73250 #endif
73251
73252 #ifdef __LITTLE_ENDIAN__
73253 __ai uint16x8_t vaddl_high_u8(uint8x16_t __p0, uint8x16_t __p1) {
73254 uint16x8_t __ret;
73255 __ret = vmovl_high_u8(__p0) + vmovl_high_u8(__p1);
73256 return __ret;
73257 }
73258 #else
73259 __ai uint16x8_t vaddl_high_u8(uint8x16_t __p0, uint8x16_t __p1) {
73260 uint8x16_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
73261 uint8x16_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
73262 uint16x8_t __ret;
73263 __ret = __noswap_vmovl_high_u8(__rev0) + __noswap_vmovl_high_u8(__rev1);
73264 __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
73265 return __ret;
73266 }
73267 #endif
73268
73269 #ifdef __LITTLE_ENDIAN__
73270 __ai uint64x2_t vaddl_high_u32(uint32x4_t __p0, uint32x4_t __p1) {
73271 uint64x2_t __ret;
73272 __ret = vmovl_high_u32(__p0) + vmovl_high_u32(__p1);
73273 return __ret;
73274 }
73275 #else
73276 __ai uint64x2_t vaddl_high_u32(uint32x4_t __p0, uint32x4_t __p1) {
73277 uint32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
73278 uint32x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
73279 uint64x2_t __ret;
73280 __ret = __noswap_vmovl_high_u32(__rev0) + __noswap_vmovl_high_u32(__rev1);
73281 __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
73282 return __ret;
73283 }
73284 #endif
73285
73286 #ifdef __LITTLE_ENDIAN__
73287 __ai uint32x4_t vaddl_high_u16(uint16x8_t __p0, uint16x8_t __p1) {
73288 uint32x4_t __ret;
73289 __ret = vmovl_high_u16(__p0) + vmovl_high_u16(__p1);
73290 return __ret;
73291 }
73292 #else
73293 __ai uint32x4_t vaddl_high_u16(uint16x8_t __p0, uint16x8_t __p1) {
73294 uint16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
73295 uint16x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
73296 uint32x4_t __ret;
73297 __ret = __noswap_vmovl_high_u16(__rev0) + __noswap_vmovl_high_u16(__rev1);
73298 __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
73299 return __ret;
73300 }
73301 #endif
73302
73303 #ifdef __LITTLE_ENDIAN__
73304 __ai int16x8_t vaddl_high_s8(int8x16_t __p0, int8x16_t __p1) {
73305 int16x8_t __ret;
73306 __ret = vmovl_high_s8(__p0) + vmovl_high_s8(__p1);
73307 return __ret;
73308 }
73309 #else
73310 __ai int16x8_t vaddl_high_s8(int8x16_t __p0, int8x16_t __p1) {
73311 int8x16_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
73312 int8x16_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
73313 int16x8_t __ret;
73314 __ret = __noswap_vmovl_high_s8(__rev0) + __noswap_vmovl_high_s8(__rev1);
73315 __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
73316 return __ret;
73317 }
73318 #endif
73319
73320 #ifdef __LITTLE_ENDIAN__
73321 __ai int64x2_t vaddl_high_s32(int32x4_t __p0, int32x4_t __p1) {
73322 int64x2_t __ret;
73323 __ret = vmovl_high_s32(__p0) + vmovl_high_s32(__p1);
73324 return __ret;
73325 }
73326 #else
73327 __ai int64x2_t vaddl_high_s32(int32x4_t __p0, int32x4_t __p1) {
73328 int32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
73329 int32x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
73330 int64x2_t __ret;
73331 __ret = __noswap_vmovl_high_s32(__rev0) + __noswap_vmovl_high_s32(__rev1);
73332 __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
73333 return __ret;
73334 }
73335 #endif
73336
73337 #ifdef __LITTLE_ENDIAN__
73338 __ai int32x4_t vaddl_high_s16(int16x8_t __p0, int16x8_t __p1) {
73339 int32x4_t __ret;
73340 __ret = vmovl_high_s16(__p0) + vmovl_high_s16(__p1);
73341 return __ret;
73342 }
73343 #else
73344 __ai int32x4_t vaddl_high_s16(int16x8_t __p0, int16x8_t __p1) {
73345 int16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
73346 int16x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
73347 int32x4_t __ret;
73348 __ret = __noswap_vmovl_high_s16(__rev0) + __noswap_vmovl_high_s16(__rev1);
73349 __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
73350 return __ret;
73351 }
73352 #endif
73353
73354 #ifdef __LITTLE_ENDIAN__
73355 __ai uint16x8_t vaddw_high_u8(uint16x8_t __p0, uint8x16_t __p1) {
73356 uint16x8_t __ret;
73357 __ret = __p0 + vmovl_high_u8(__p1);
73358 return __ret;
73359 }
73360 #else
73361 __ai uint16x8_t vaddw_high_u8(uint16x8_t __p0, uint8x16_t __p1) {
73362 uint16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
73363 uint8x16_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
73364 uint16x8_t __ret;
73365 __ret = __rev0 + __noswap_vmovl_high_u8(__rev1);
73366 __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
73367 return __ret;
73368 }
73369 #endif
73370
73371 #ifdef __LITTLE_ENDIAN__
73372 __ai uint64x2_t vaddw_high_u32(uint64x2_t __p0, uint32x4_t __p1) {
73373 uint64x2_t __ret;
73374 __ret = __p0 + vmovl_high_u32(__p1);
73375 return __ret;
73376 }
73377 #else
73378 __ai uint64x2_t vaddw_high_u32(uint64x2_t __p0, uint32x4_t __p1) {
73379 uint64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
73380 uint32x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
73381 uint64x2_t __ret;
73382 __ret = __rev0 + __noswap_vmovl_high_u32(__rev1);
73383 __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
73384 return __ret;
73385 }
73386 #endif
73387
73388 #ifdef __LITTLE_ENDIAN__
73389 __ai uint32x4_t vaddw_high_u16(uint32x4_t __p0, uint16x8_t __p1) {
73390 uint32x4_t __ret;
73391 __ret = __p0 + vmovl_high_u16(__p1);
73392 return __ret;
73393 }
73394 #else
73395 __ai uint32x4_t vaddw_high_u16(uint32x4_t __p0, uint16x8_t __p1) {
73396 uint32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
73397 uint16x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
73398 uint32x4_t __ret;
73399 __ret = __rev0 + __noswap_vmovl_high_u16(__rev1);
73400 __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
73401 return __ret;
73402 }
73403 #endif
73404
73405 #ifdef __LITTLE_ENDIAN__
73406 __ai int16x8_t vaddw_high_s8(int16x8_t __p0, int8x16_t __p1) {
73407 int16x8_t __ret;
73408 __ret = __p0 + vmovl_high_s8(__p1);
73409 return __ret;
73410 }
73411 #else
73412 __ai int16x8_t vaddw_high_s8(int16x8_t __p0, int8x16_t __p1) {
73413 int16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
73414 int8x16_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
73415 int16x8_t __ret;
73416 __ret = __rev0 + __noswap_vmovl_high_s8(__rev1);
73417 __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
73418 return __ret;
73419 }
73420 #endif
73421
73422 #ifdef __LITTLE_ENDIAN__
73423 __ai int64x2_t vaddw_high_s32(int64x2_t __p0, int32x4_t __p1) {
73424 int64x2_t __ret;
73425 __ret = __p0 + vmovl_high_s32(__p1);
73426 return __ret;
73427 }
73428 #else
73429 __ai int64x2_t vaddw_high_s32(int64x2_t __p0, int32x4_t __p1) {
73430 int64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
73431 int32x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
73432 int64x2_t __ret;
73433 __ret = __rev0 + __noswap_vmovl_high_s32(__rev1);
73434 __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
73435 return __ret;
73436 }
73437 #endif
73438
73439 #ifdef __LITTLE_ENDIAN__
73440 __ai int32x4_t vaddw_high_s16(int32x4_t __p0, int16x8_t __p1) {
73441 int32x4_t __ret;
73442 __ret = __p0 + vmovl_high_s16(__p1);
73443 return __ret;
73444 }
73445 #else
73446 __ai int32x4_t vaddw_high_s16(int32x4_t __p0, int16x8_t __p1) {
73447 int32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
73448 int16x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
73449 int32x4_t __ret;
73450 __ret = __rev0 + __noswap_vmovl_high_s16(__rev1);
73451 __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
73452 return __ret;
73453 }
73454 #endif
73455
73456 #ifdef __LITTLE_ENDIAN__
73457 #define vcopyq_lane_p64(__p0_278, __p1_278, __p2_278, __p3_278) __extension__ ({ \
73458 poly64x2_t __s0_278 = __p0_278; \
73459 poly64x1_t __s2_278 = __p2_278; \
73460 poly64x2_t __ret_278; \
73461 __ret_278 = vsetq_lane_p64(vget_lane_p64(__s2_278, __p3_278), __s0_278, __p1_278); \
73462 __ret_278; \
73463 })
73464 #else
73465 #define vcopyq_lane_p64(__p0_279, __p1_279, __p2_279, __p3_279) __extension__ ({ \
73466 poly64x2_t __s0_279 = __p0_279; \
73467 poly64x1_t __s2_279 = __p2_279; \
73468 poly64x2_t __rev0_279; __rev0_279 = __builtin_shufflevector(__s0_279, __s0_279, 1, 0); \
73469 poly64x2_t __ret_279; \
73470 __ret_279 = __noswap_vsetq_lane_p64(__noswap_vget_lane_p64(__s2_279, __p3_279), __rev0_279, __p1_279); \
73471 __ret_279 = __builtin_shufflevector(__ret_279, __ret_279, 1, 0); \
73472 __ret_279; \
73473 })
73474 #endif
73475
73476 #ifdef __LITTLE_ENDIAN__
73477 #define vcopyq_lane_f64(__p0_280, __p1_280, __p2_280, __p3_280) __extension__ ({ \
73478 float64x2_t __s0_280 = __p0_280; \
73479 float64x1_t __s2_280 = __p2_280; \
73480 float64x2_t __ret_280; \
73481 __ret_280 = vsetq_lane_f64(vget_lane_f64(__s2_280, __p3_280), __s0_280, __p1_280); \
73482 __ret_280; \
73483 })
73484 #else
73485 #define vcopyq_lane_f64(__p0_281, __p1_281, __p2_281, __p3_281) __extension__ ({ \
73486 float64x2_t __s0_281 = __p0_281; \
73487 float64x1_t __s2_281 = __p2_281; \
73488 float64x2_t __rev0_281; __rev0_281 = __builtin_shufflevector(__s0_281, __s0_281, 1, 0); \
73489 float64x2_t __ret_281; \
73490 __ret_281 = __noswap_vsetq_lane_f64(__noswap_vget_lane_f64(__s2_281, __p3_281), __rev0_281, __p1_281); \
73491 __ret_281 = __builtin_shufflevector(__ret_281, __ret_281, 1, 0); \
73492 __ret_281; \
73493 })
73494 #endif
73495
73496 #ifdef __LITTLE_ENDIAN__
73497 #define vcopy_lane_p64(__p0_282, __p1_282, __p2_282, __p3_282) __extension__ ({ \
73498 poly64x1_t __s0_282 = __p0_282; \
73499 poly64x1_t __s2_282 = __p2_282; \
73500 poly64x1_t __ret_282; \
73501 __ret_282 = vset_lane_p64(vget_lane_p64(__s2_282, __p3_282), __s0_282, __p1_282); \
73502 __ret_282; \
73503 })
73504 #else
73505 #define vcopy_lane_p64(__p0_283, __p1_283, __p2_283, __p3_283) __extension__ ({ \
73506 poly64x1_t __s0_283 = __p0_283; \
73507 poly64x1_t __s2_283 = __p2_283; \
73508 poly64x1_t __ret_283; \
73509 __ret_283 = __noswap_vset_lane_p64(__noswap_vget_lane_p64(__s2_283, __p3_283), __s0_283, __p1_283); \
73510 __ret_283; \
73511 })
73512 #endif
73513
73514 #ifdef __LITTLE_ENDIAN__
73515 #define vcopy_lane_f64(__p0_284, __p1_284, __p2_284, __p3_284) __extension__ ({ \
73516 float64x1_t __s0_284 = __p0_284; \
73517 float64x1_t __s2_284 = __p2_284; \
73518 float64x1_t __ret_284; \
73519 __ret_284 = vset_lane_f64(vget_lane_f64(__s2_284, __p3_284), __s0_284, __p1_284); \
73520 __ret_284; \
73521 })
73522 #else
73523 #define vcopy_lane_f64(__p0_285, __p1_285, __p2_285, __p3_285) __extension__ ({ \
73524 float64x1_t __s0_285 = __p0_285; \
73525 float64x1_t __s2_285 = __p2_285; \
73526 float64x1_t __ret_285; \
73527 __ret_285 = __noswap_vset_lane_f64(__noswap_vget_lane_f64(__s2_285, __p3_285), __s0_285, __p1_285); \
73528 __ret_285; \
73529 })
73530 #endif
73531
73532 #ifdef __LITTLE_ENDIAN__
73533 #define vcopyq_laneq_p64(__p0_286, __p1_286, __p2_286, __p3_286) __extension__ ({ \
73534 poly64x2_t __s0_286 = __p0_286; \
73535 poly64x2_t __s2_286 = __p2_286; \
73536 poly64x2_t __ret_286; \
73537 __ret_286 = vsetq_lane_p64(vgetq_lane_p64(__s2_286, __p3_286), __s0_286, __p1_286); \
73538 __ret_286; \
73539 })
73540 #else
73541 #define vcopyq_laneq_p64(__p0_287, __p1_287, __p2_287, __p3_287) __extension__ ({ \
73542 poly64x2_t __s0_287 = __p0_287; \
73543 poly64x2_t __s2_287 = __p2_287; \
73544 poly64x2_t __rev0_287; __rev0_287 = __builtin_shufflevector(__s0_287, __s0_287, 1, 0); \
73545 poly64x2_t __rev2_287; __rev2_287 = __builtin_shufflevector(__s2_287, __s2_287, 1, 0); \
73546 poly64x2_t __ret_287; \
73547 __ret_287 = __noswap_vsetq_lane_p64(__noswap_vgetq_lane_p64(__rev2_287, __p3_287), __rev0_287, __p1_287); \
73548 __ret_287 = __builtin_shufflevector(__ret_287, __ret_287, 1, 0); \
73549 __ret_287; \
73550 })
73551 #endif
73552
73553 #ifdef __LITTLE_ENDIAN__
73554 #define vcopyq_laneq_f64(__p0_288, __p1_288, __p2_288, __p3_288) __extension__ ({ \
73555 float64x2_t __s0_288 = __p0_288; \
73556 float64x2_t __s2_288 = __p2_288; \
73557 float64x2_t __ret_288; \
73558 __ret_288 = vsetq_lane_f64(vgetq_lane_f64(__s2_288, __p3_288), __s0_288, __p1_288); \
73559 __ret_288; \
73560 })
73561 #else
73562 #define vcopyq_laneq_f64(__p0_289, __p1_289, __p2_289, __p3_289) __extension__ ({ \
73563 float64x2_t __s0_289 = __p0_289; \
73564 float64x2_t __s2_289 = __p2_289; \
73565 float64x2_t __rev0_289; __rev0_289 = __builtin_shufflevector(__s0_289, __s0_289, 1, 0); \
73566 float64x2_t __rev2_289; __rev2_289 = __builtin_shufflevector(__s2_289, __s2_289, 1, 0); \
73567 float64x2_t __ret_289; \
73568 __ret_289 = __noswap_vsetq_lane_f64(__noswap_vgetq_lane_f64(__rev2_289, __p3_289), __rev0_289, __p1_289); \
73569 __ret_289 = __builtin_shufflevector(__ret_289, __ret_289, 1, 0); \
73570 __ret_289; \
73571 })
73572 #endif
73573
73574 #ifdef __LITTLE_ENDIAN__
73575 #define vcopy_laneq_p64(__p0_290, __p1_290, __p2_290, __p3_290) __extension__ ({ \
73576 poly64x1_t __s0_290 = __p0_290; \
73577 poly64x2_t __s2_290 = __p2_290; \
73578 poly64x1_t __ret_290; \
73579 __ret_290 = vset_lane_p64(vgetq_lane_p64(__s2_290, __p3_290), __s0_290, __p1_290); \
73580 __ret_290; \
73581 })
73582 #else
73583 #define vcopy_laneq_p64(__p0_291, __p1_291, __p2_291, __p3_291) __extension__ ({ \
73584 poly64x1_t __s0_291 = __p0_291; \
73585 poly64x2_t __s2_291 = __p2_291; \
73586 poly64x2_t __rev2_291; __rev2_291 = __builtin_shufflevector(__s2_291, __s2_291, 1, 0); \
73587 poly64x1_t __ret_291; \
73588 __ret_291 = __noswap_vset_lane_p64(__noswap_vgetq_lane_p64(__rev2_291, __p3_291), __s0_291, __p1_291); \
73589 __ret_291; \
73590 })
73591 #endif
73592
73593 #ifdef __LITTLE_ENDIAN__
73594 #define vcopy_laneq_f64(__p0_292, __p1_292, __p2_292, __p3_292) __extension__ ({ \
73595 float64x1_t __s0_292 = __p0_292; \
73596 float64x2_t __s2_292 = __p2_292; \
73597 float64x1_t __ret_292; \
73598 __ret_292 = vset_lane_f64(vgetq_lane_f64(__s2_292, __p3_292), __s0_292, __p1_292); \
73599 __ret_292; \
73600 })
73601 #else
73602 #define vcopy_laneq_f64(__p0_293, __p1_293, __p2_293, __p3_293) __extension__ ({ \
73603 float64x1_t __s0_293 = __p0_293; \
73604 float64x2_t __s2_293 = __p2_293; \
73605 float64x2_t __rev2_293; __rev2_293 = __builtin_shufflevector(__s2_293, __s2_293, 1, 0); \
73606 float64x1_t __ret_293; \
73607 __ret_293 = __noswap_vset_lane_f64(__noswap_vgetq_lane_f64(__rev2_293, __p3_293), __s0_293, __p1_293); \
73608 __ret_293; \
73609 })
73610 #endif
73611
73612 #ifdef __LITTLE_ENDIAN__
73613 __ai uint16x8_t vmlal_high_u8(uint16x8_t __p0, uint8x16_t __p1, uint8x16_t __p2) {
73614 uint16x8_t __ret;
73615 __ret = vmlal_u8(__p0, vget_high_u8(__p1), vget_high_u8(__p2));
73616 return __ret;
73617 }
73618 #else
73619 __ai uint16x8_t vmlal_high_u8(uint16x8_t __p0, uint8x16_t __p1, uint8x16_t __p2) {
73620 uint16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
73621 uint8x16_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
73622 uint8x16_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
73623 uint16x8_t __ret;
73624 __ret = __noswap_vmlal_u8(__rev0, __noswap_vget_high_u8(__rev1), __noswap_vget_high_u8(__rev2));
73625 __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
73626 return __ret;
73627 }
73628 #endif
73629
73630 #ifdef __LITTLE_ENDIAN__
73631 __ai uint64x2_t vmlal_high_u32(uint64x2_t __p0, uint32x4_t __p1, uint32x4_t __p2) {
73632 uint64x2_t __ret;
73633 __ret = vmlal_u32(__p0, vget_high_u32(__p1), vget_high_u32(__p2));
73634 return __ret;
73635 }
73636 #else
73637 __ai uint64x2_t vmlal_high_u32(uint64x2_t __p0, uint32x4_t __p1, uint32x4_t __p2) {
73638 uint64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
73639 uint32x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
73640 uint32x4_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, 3, 2, 1, 0);
73641 uint64x2_t __ret;
73642 __ret = __noswap_vmlal_u32(__rev0, __noswap_vget_high_u32(__rev1), __noswap_vget_high_u32(__rev2));
73643 __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
73644 return __ret;
73645 }
73646 #endif
73647
73648 #ifdef __LITTLE_ENDIAN__
73649 __ai uint32x4_t vmlal_high_u16(uint32x4_t __p0, uint16x8_t __p1, uint16x8_t __p2) {
73650 uint32x4_t __ret;
73651 __ret = vmlal_u16(__p0, vget_high_u16(__p1), vget_high_u16(__p2));
73652 return __ret;
73653 }
73654 #else
73655 __ai uint32x4_t vmlal_high_u16(uint32x4_t __p0, uint16x8_t __p1, uint16x8_t __p2) {
73656 uint32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
73657 uint16x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
73658 uint16x8_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, 7, 6, 5, 4, 3, 2, 1, 0);
73659 uint32x4_t __ret;
73660 __ret = __noswap_vmlal_u16(__rev0, __noswap_vget_high_u16(__rev1), __noswap_vget_high_u16(__rev2));
73661 __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
73662 return __ret;
73663 }
73664 #endif
73665
73666 #ifdef __LITTLE_ENDIAN__
73667 __ai int16x8_t vmlal_high_s8(int16x8_t __p0, int8x16_t __p1, int8x16_t __p2) {
73668 int16x8_t __ret;
73669 __ret = vmlal_s8(__p0, vget_high_s8(__p1), vget_high_s8(__p2));
73670 return __ret;
73671 }
73672 #else
73673 __ai int16x8_t vmlal_high_s8(int16x8_t __p0, int8x16_t __p1, int8x16_t __p2) {
73674 int16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
73675 int8x16_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
73676 int8x16_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
73677 int16x8_t __ret;
73678 __ret = __noswap_vmlal_s8(__rev0, __noswap_vget_high_s8(__rev1), __noswap_vget_high_s8(__rev2));
73679 __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
73680 return __ret;
73681 }
73682 #endif
73683
73684 #ifdef __LITTLE_ENDIAN__
73685 __ai int64x2_t vmlal_high_s32(int64x2_t __p0, int32x4_t __p1, int32x4_t __p2) {
73686 int64x2_t __ret;
73687 __ret = vmlal_s32(__p0, vget_high_s32(__p1), vget_high_s32(__p2));
73688 return __ret;
73689 }
73690 #else
73691 __ai int64x2_t vmlal_high_s32(int64x2_t __p0, int32x4_t __p1, int32x4_t __p2) {
73692 int64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
73693 int32x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
73694 int32x4_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, 3, 2, 1, 0);
73695 int64x2_t __ret;
73696 __ret = __noswap_vmlal_s32(__rev0, __noswap_vget_high_s32(__rev1), __noswap_vget_high_s32(__rev2));
73697 __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
73698 return __ret;
73699 }
73700 #endif
73701
73702 #ifdef __LITTLE_ENDIAN__
73703 __ai int32x4_t vmlal_high_s16(int32x4_t __p0, int16x8_t __p1, int16x8_t __p2) {
73704 int32x4_t __ret;
73705 __ret = vmlal_s16(__p0, vget_high_s16(__p1), vget_high_s16(__p2));
73706 return __ret;
73707 }
73708 #else
73709 __ai int32x4_t vmlal_high_s16(int32x4_t __p0, int16x8_t __p1, int16x8_t __p2) {
73710 int32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
73711 int16x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
73712 int16x8_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, 7, 6, 5, 4, 3, 2, 1, 0);
73713 int32x4_t __ret;
73714 __ret = __noswap_vmlal_s16(__rev0, __noswap_vget_high_s16(__rev1), __noswap_vget_high_s16(__rev2));
73715 __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
73716 return __ret;
73717 }
73718 #endif
73719
73720 #ifdef __LITTLE_ENDIAN__
73721 __ai uint64x2_t vmlal_high_n_u32(uint64x2_t __p0, uint32x4_t __p1, uint32_t __p2) {
73722 uint64x2_t __ret;
73723 __ret = vmlal_n_u32(__p0, vget_high_u32(__p1), __p2);
73724 return __ret;
73725 }
73726 #else
73727 __ai uint64x2_t vmlal_high_n_u32(uint64x2_t __p0, uint32x4_t __p1, uint32_t __p2) {
73728 uint64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
73729 uint32x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
73730 uint64x2_t __ret;
73731 __ret = __noswap_vmlal_n_u32(__rev0, __noswap_vget_high_u32(__rev1), __p2);
73732 __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
73733 return __ret;
73734 }
73735 #endif
73736
73737 #ifdef __LITTLE_ENDIAN__
73738 __ai uint32x4_t vmlal_high_n_u16(uint32x4_t __p0, uint16x8_t __p1, uint16_t __p2) {
73739 uint32x4_t __ret;
73740 __ret = vmlal_n_u16(__p0, vget_high_u16(__p1), __p2);
73741 return __ret;
73742 }
73743 #else
73744 __ai uint32x4_t vmlal_high_n_u16(uint32x4_t __p0, uint16x8_t __p1, uint16_t __p2) {
73745 uint32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
73746 uint16x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
73747 uint32x4_t __ret;
73748 __ret = __noswap_vmlal_n_u16(__rev0, __noswap_vget_high_u16(__rev1), __p2);
73749 __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
73750 return __ret;
73751 }
73752 #endif
73753
73754 #ifdef __LITTLE_ENDIAN__
73755 __ai int64x2_t vmlal_high_n_s32(int64x2_t __p0, int32x4_t __p1, int32_t __p2) {
73756 int64x2_t __ret;
73757 __ret = vmlal_n_s32(__p0, vget_high_s32(__p1), __p2);
73758 return __ret;
73759 }
73760 #else
73761 __ai int64x2_t vmlal_high_n_s32(int64x2_t __p0, int32x4_t __p1, int32_t __p2) {
73762 int64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
73763 int32x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
73764 int64x2_t __ret;
73765 __ret = __noswap_vmlal_n_s32(__rev0, __noswap_vget_high_s32(__rev1), __p2);
73766 __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
73767 return __ret;
73768 }
73769 #endif
73770
73771 #ifdef __LITTLE_ENDIAN__
73772 __ai int32x4_t vmlal_high_n_s16(int32x4_t __p0, int16x8_t __p1, int16_t __p2) {
73773 int32x4_t __ret;
73774 __ret = vmlal_n_s16(__p0, vget_high_s16(__p1), __p2);
73775 return __ret;
73776 }
73777 #else
73778 __ai int32x4_t vmlal_high_n_s16(int32x4_t __p0, int16x8_t __p1, int16_t __p2) {
73779 int32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
73780 int16x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
73781 int32x4_t __ret;
73782 __ret = __noswap_vmlal_n_s16(__rev0, __noswap_vget_high_s16(__rev1), __p2);
73783 __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
73784 return __ret;
73785 }
73786 #endif
73787
73788 #ifdef __LITTLE_ENDIAN__
73789 __ai uint16x8_t vmlsl_high_u8(uint16x8_t __p0, uint8x16_t __p1, uint8x16_t __p2) {
73790 uint16x8_t __ret;
73791 __ret = vmlsl_u8(__p0, vget_high_u8(__p1), vget_high_u8(__p2));
73792 return __ret;
73793 }
73794 #else
73795 __ai uint16x8_t vmlsl_high_u8(uint16x8_t __p0, uint8x16_t __p1, uint8x16_t __p2) {
73796 uint16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
73797 uint8x16_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
73798 uint8x16_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
73799 uint16x8_t __ret;
73800 __ret = __noswap_vmlsl_u8(__rev0, __noswap_vget_high_u8(__rev1), __noswap_vget_high_u8(__rev2));
73801 __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
73802 return __ret;
73803 }
73804 #endif
73805
73806 #ifdef __LITTLE_ENDIAN__
73807 __ai uint64x2_t vmlsl_high_u32(uint64x2_t __p0, uint32x4_t __p1, uint32x4_t __p2) {
73808 uint64x2_t __ret;
73809 __ret = vmlsl_u32(__p0, vget_high_u32(__p1), vget_high_u32(__p2));
73810 return __ret;
73811 }
73812 #else
73813 __ai uint64x2_t vmlsl_high_u32(uint64x2_t __p0, uint32x4_t __p1, uint32x4_t __p2) {
73814 uint64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
73815 uint32x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
73816 uint32x4_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, 3, 2, 1, 0);
73817 uint64x2_t __ret;
73818 __ret = __noswap_vmlsl_u32(__rev0, __noswap_vget_high_u32(__rev1), __noswap_vget_high_u32(__rev2));
73819 __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
73820 return __ret;
73821 }
73822 #endif
73823
73824 #ifdef __LITTLE_ENDIAN__
73825 __ai uint32x4_t vmlsl_high_u16(uint32x4_t __p0, uint16x8_t __p1, uint16x8_t __p2) {
73826 uint32x4_t __ret;
73827 __ret = vmlsl_u16(__p0, vget_high_u16(__p1), vget_high_u16(__p2));
73828 return __ret;
73829 }
73830 #else
73831 __ai uint32x4_t vmlsl_high_u16(uint32x4_t __p0, uint16x8_t __p1, uint16x8_t __p2) {
73832 uint32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
73833 uint16x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
73834 uint16x8_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, 7, 6, 5, 4, 3, 2, 1, 0);
73835 uint32x4_t __ret;
73836 __ret = __noswap_vmlsl_u16(__rev0, __noswap_vget_high_u16(__rev1), __noswap_vget_high_u16(__rev2));
73837 __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
73838 return __ret;
73839 }
73840 #endif
73841
73842 #ifdef __LITTLE_ENDIAN__
73843 __ai int16x8_t vmlsl_high_s8(int16x8_t __p0, int8x16_t __p1, int8x16_t __p2) {
73844 int16x8_t __ret;
73845 __ret = vmlsl_s8(__p0, vget_high_s8(__p1), vget_high_s8(__p2));
73846 return __ret;
73847 }
73848 #else
73849 __ai int16x8_t vmlsl_high_s8(int16x8_t __p0, int8x16_t __p1, int8x16_t __p2) {
73850 int16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
73851 int8x16_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
73852 int8x16_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
73853 int16x8_t __ret;
73854 __ret = __noswap_vmlsl_s8(__rev0, __noswap_vget_high_s8(__rev1), __noswap_vget_high_s8(__rev2));
73855 __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
73856 return __ret;
73857 }
73858 #endif
73859
73860 #ifdef __LITTLE_ENDIAN__
73861 __ai int64x2_t vmlsl_high_s32(int64x2_t __p0, int32x4_t __p1, int32x4_t __p2) {
73862 int64x2_t __ret;
73863 __ret = vmlsl_s32(__p0, vget_high_s32(__p1), vget_high_s32(__p2));
73864 return __ret;
73865 }
73866 #else
73867 __ai int64x2_t vmlsl_high_s32(int64x2_t __p0, int32x4_t __p1, int32x4_t __p2) {
73868 int64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
73869 int32x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
73870 int32x4_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, 3, 2, 1, 0);
73871 int64x2_t __ret;
73872 __ret = __noswap_vmlsl_s32(__rev0, __noswap_vget_high_s32(__rev1), __noswap_vget_high_s32(__rev2));
73873 __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
73874 return __ret;
73875 }
73876 #endif
73877
73878 #ifdef __LITTLE_ENDIAN__
73879 __ai int32x4_t vmlsl_high_s16(int32x4_t __p0, int16x8_t __p1, int16x8_t __p2) {
73880 int32x4_t __ret;
73881 __ret = vmlsl_s16(__p0, vget_high_s16(__p1), vget_high_s16(__p2));
73882 return __ret;
73883 }
73884 #else
73885 __ai int32x4_t vmlsl_high_s16(int32x4_t __p0, int16x8_t __p1, int16x8_t __p2) {
73886 int32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
73887 int16x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
73888 int16x8_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, 7, 6, 5, 4, 3, 2, 1, 0);
73889 int32x4_t __ret;
73890 __ret = __noswap_vmlsl_s16(__rev0, __noswap_vget_high_s16(__rev1), __noswap_vget_high_s16(__rev2));
73891 __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
73892 return __ret;
73893 }
73894 #endif
73895
73896 #ifdef __LITTLE_ENDIAN__
73897 __ai uint64x2_t vmlsl_high_n_u32(uint64x2_t __p0, uint32x4_t __p1, uint32_t __p2) {
73898 uint64x2_t __ret;
73899 __ret = vmlsl_n_u32(__p0, vget_high_u32(__p1), __p2);
73900 return __ret;
73901 }
73902 #else
73903 __ai uint64x2_t vmlsl_high_n_u32(uint64x2_t __p0, uint32x4_t __p1, uint32_t __p2) {
73904 uint64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
73905 uint32x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
73906 uint64x2_t __ret;
73907 __ret = __noswap_vmlsl_n_u32(__rev0, __noswap_vget_high_u32(__rev1), __p2);
73908 __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
73909 return __ret;
73910 }
73911 #endif
73912
73913 #ifdef __LITTLE_ENDIAN__
73914 __ai uint32x4_t vmlsl_high_n_u16(uint32x4_t __p0, uint16x8_t __p1, uint16_t __p2) {
73915 uint32x4_t __ret;
73916 __ret = vmlsl_n_u16(__p0, vget_high_u16(__p1), __p2);
73917 return __ret;
73918 }
73919 #else
73920 __ai uint32x4_t vmlsl_high_n_u16(uint32x4_t __p0, uint16x8_t __p1, uint16_t __p2) {
73921 uint32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
73922 uint16x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
73923 uint32x4_t __ret;
73924 __ret = __noswap_vmlsl_n_u16(__rev0, __noswap_vget_high_u16(__rev1), __p2);
73925 __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
73926 return __ret;
73927 }
73928 #endif
73929
73930 #ifdef __LITTLE_ENDIAN__
73931 __ai int64x2_t vmlsl_high_n_s32(int64x2_t __p0, int32x4_t __p1, int32_t __p2) {
73932 int64x2_t __ret;
73933 __ret = vmlsl_n_s32(__p0, vget_high_s32(__p1), __p2);
73934 return __ret;
73935 }
73936 #else
73937 __ai int64x2_t vmlsl_high_n_s32(int64x2_t __p0, int32x4_t __p1, int32_t __p2) {
73938 int64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
73939 int32x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
73940 int64x2_t __ret;
73941 __ret = __noswap_vmlsl_n_s32(__rev0, __noswap_vget_high_s32(__rev1), __p2);
73942 __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
73943 return __ret;
73944 }
73945 #endif
73946
73947 #ifdef __LITTLE_ENDIAN__
73948 __ai int32x4_t vmlsl_high_n_s16(int32x4_t __p0, int16x8_t __p1, int16_t __p2) {
73949 int32x4_t __ret;
73950 __ret = vmlsl_n_s16(__p0, vget_high_s16(__p1), __p2);
73951 return __ret;
73952 }
73953 #else
73954 __ai int32x4_t vmlsl_high_n_s16(int32x4_t __p0, int16x8_t __p1, int16_t __p2) {
73955 int32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
73956 int16x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
73957 int32x4_t __ret;
73958 __ret = __noswap_vmlsl_n_s16(__rev0, __noswap_vget_high_s16(__rev1), __p2);
73959 __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
73960 return __ret;
73961 }
73962 #endif
73963
73964 #ifdef __LITTLE_ENDIAN__
73965 #define vmulx_lane_f64(__p0_294, __p1_294, __p2_294) __extension__ ({ \
73966 float64x1_t __s0_294 = __p0_294; \
73967 float64x1_t __s1_294 = __p1_294; \
73968 float64x1_t __ret_294; \
73969 float64_t __x_294 = vget_lane_f64(__s0_294, 0); \
73970 float64_t __y_294 = vget_lane_f64(__s1_294, __p2_294); \
73971 float64_t __z_294 = vmulxd_f64(__x_294, __y_294); \
73972 __ret_294 = vset_lane_f64(__z_294, __s0_294, __p2_294); \
73973 __ret_294; \
73974 })
73975 #else
73976 #define vmulx_lane_f64(__p0_295, __p1_295, __p2_295) __extension__ ({ \
73977 float64x1_t __s0_295 = __p0_295; \
73978 float64x1_t __s1_295 = __p1_295; \
73979 float64x1_t __ret_295; \
73980 float64_t __x_295 = __noswap_vget_lane_f64(__s0_295, 0); \
73981 float64_t __y_295 = __noswap_vget_lane_f64(__s1_295, __p2_295); \
73982 float64_t __z_295 = __noswap_vmulxd_f64(__x_295, __y_295); \
73983 __ret_295 = __noswap_vset_lane_f64(__z_295, __s0_295, __p2_295); \
73984 __ret_295; \
73985 })
73986 #endif
73987
73988 #ifdef __LITTLE_ENDIAN__
73989 #define vmulx_laneq_f64(__p0_296, __p1_296, __p2_296) __extension__ ({ \
73990 float64x1_t __s0_296 = __p0_296; \
73991 float64x2_t __s1_296 = __p1_296; \
73992 float64x1_t __ret_296; \
73993 float64_t __x_296 = vget_lane_f64(__s0_296, 0); \
73994 float64_t __y_296 = vgetq_lane_f64(__s1_296, __p2_296); \
73995 float64_t __z_296 = vmulxd_f64(__x_296, __y_296); \
73996 __ret_296 = vset_lane_f64(__z_296, __s0_296, 0); \
73997 __ret_296; \
73998 })
73999 #else
74000 #define vmulx_laneq_f64(__p0_297, __p1_297, __p2_297) __extension__ ({ \
74001 float64x1_t __s0_297 = __p0_297; \
74002 float64x2_t __s1_297 = __p1_297; \
74003 float64x2_t __rev1_297; __rev1_297 = __builtin_shufflevector(__s1_297, __s1_297, 1, 0); \
74004 float64x1_t __ret_297; \
74005 float64_t __x_297 = __noswap_vget_lane_f64(__s0_297, 0); \
74006 float64_t __y_297 = __noswap_vgetq_lane_f64(__rev1_297, __p2_297); \
74007 float64_t __z_297 = __noswap_vmulxd_f64(__x_297, __y_297); \
74008 __ret_297 = __noswap_vset_lane_f64(__z_297, __s0_297, 0); \
74009 __ret_297; \
74010 })
74011 #endif
74012
74013 #endif
74014 #ifdef __LITTLE_ENDIAN__
74015 __ai uint16x8_t vabal_u8(uint16x8_t __p0, uint8x8_t __p1, uint8x8_t __p2) {
74016 uint16x8_t __ret;
74017 __ret = __p0 + vabdl_u8(__p1, __p2);
74018 return __ret;
74019 }
74020 #else
74021 __ai uint16x8_t vabal_u8(uint16x8_t __p0, uint8x8_t __p1, uint8x8_t __p2) {
74022 uint16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
74023 uint8x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
74024 uint8x8_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, 7, 6, 5, 4, 3, 2, 1, 0);
74025 uint16x8_t __ret;
74026 __ret = __rev0 + __noswap_vabdl_u8(__rev1, __rev2);
74027 __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
74028 return __ret;
74029 }
74030 __ai uint16x8_t __noswap_vabal_u8(uint16x8_t __p0, uint8x8_t __p1, uint8x8_t __p2) {
74031 uint16x8_t __ret;
74032 __ret = __p0 + __noswap_vabdl_u8(__p1, __p2);
74033 return __ret;
74034 }
74035 #endif
74036
74037 #ifdef __LITTLE_ENDIAN__
74038 __ai uint64x2_t vabal_u32(uint64x2_t __p0, uint32x2_t __p1, uint32x2_t __p2) {
74039 uint64x2_t __ret;
74040 __ret = __p0 + vabdl_u32(__p1, __p2);
74041 return __ret;
74042 }
74043 #else
74044 __ai uint64x2_t vabal_u32(uint64x2_t __p0, uint32x2_t __p1, uint32x2_t __p2) {
74045 uint64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
74046 uint32x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
74047 uint32x2_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, 1, 0);
74048 uint64x2_t __ret;
74049 __ret = __rev0 + __noswap_vabdl_u32(__rev1, __rev2);
74050 __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
74051 return __ret;
74052 }
74053 __ai uint64x2_t __noswap_vabal_u32(uint64x2_t __p0, uint32x2_t __p1, uint32x2_t __p2) {
74054 uint64x2_t __ret;
74055 __ret = __p0 + __noswap_vabdl_u32(__p1, __p2);
74056 return __ret;
74057 }
74058 #endif
74059
74060 #ifdef __LITTLE_ENDIAN__
74061 __ai uint32x4_t vabal_u16(uint32x4_t __p0, uint16x4_t __p1, uint16x4_t __p2) {
74062 uint32x4_t __ret;
74063 __ret = __p0 + vabdl_u16(__p1, __p2);
74064 return __ret;
74065 }
74066 #else
74067 __ai uint32x4_t vabal_u16(uint32x4_t __p0, uint16x4_t __p1, uint16x4_t __p2) {
74068 uint32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
74069 uint16x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
74070 uint16x4_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, 3, 2, 1, 0);
74071 uint32x4_t __ret;
74072 __ret = __rev0 + __noswap_vabdl_u16(__rev1, __rev2);
74073 __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
74074 return __ret;
74075 }
74076 __ai uint32x4_t __noswap_vabal_u16(uint32x4_t __p0, uint16x4_t __p1, uint16x4_t __p2) {
74077 uint32x4_t __ret;
74078 __ret = __p0 + __noswap_vabdl_u16(__p1, __p2);
74079 return __ret;
74080 }
74081 #endif
74082
74083 #ifdef __LITTLE_ENDIAN__
74084 __ai int16x8_t vabal_s8(int16x8_t __p0, int8x8_t __p1, int8x8_t __p2) {
74085 int16x8_t __ret;
74086 __ret = __p0 + vabdl_s8(__p1, __p2);
74087 return __ret;
74088 }
74089 #else
74090 __ai int16x8_t vabal_s8(int16x8_t __p0, int8x8_t __p1, int8x8_t __p2) {
74091 int16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
74092 int8x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
74093 int8x8_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, 7, 6, 5, 4, 3, 2, 1, 0);
74094 int16x8_t __ret;
74095 __ret = __rev0 + __noswap_vabdl_s8(__rev1, __rev2);
74096 __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
74097 return __ret;
74098 }
74099 __ai int16x8_t __noswap_vabal_s8(int16x8_t __p0, int8x8_t __p1, int8x8_t __p2) {
74100 int16x8_t __ret;
74101 __ret = __p0 + __noswap_vabdl_s8(__p1, __p2);
74102 return __ret;
74103 }
74104 #endif
74105
74106 #ifdef __LITTLE_ENDIAN__
74107 __ai int64x2_t vabal_s32(int64x2_t __p0, int32x2_t __p1, int32x2_t __p2) {
74108 int64x2_t __ret;
74109 __ret = __p0 + vabdl_s32(__p1, __p2);
74110 return __ret;
74111 }
74112 #else
74113 __ai int64x2_t vabal_s32(int64x2_t __p0, int32x2_t __p1, int32x2_t __p2) {
74114 int64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
74115 int32x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
74116 int32x2_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, 1, 0);
74117 int64x2_t __ret;
74118 __ret = __rev0 + __noswap_vabdl_s32(__rev1, __rev2);
74119 __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
74120 return __ret;
74121 }
74122 __ai int64x2_t __noswap_vabal_s32(int64x2_t __p0, int32x2_t __p1, int32x2_t __p2) {
74123 int64x2_t __ret;
74124 __ret = __p0 + __noswap_vabdl_s32(__p1, __p2);
74125 return __ret;
74126 }
74127 #endif
74128
74129 #ifdef __LITTLE_ENDIAN__
74130 __ai int32x4_t vabal_s16(int32x4_t __p0, int16x4_t __p1, int16x4_t __p2) {
74131 int32x4_t __ret;
74132 __ret = __p0 + vabdl_s16(__p1, __p2);
74133 return __ret;
74134 }
74135 #else
74136 __ai int32x4_t vabal_s16(int32x4_t __p0, int16x4_t __p1, int16x4_t __p2) {
74137 int32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
74138 int16x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
74139 int16x4_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, 3, 2, 1, 0);
74140 int32x4_t __ret;
74141 __ret = __rev0 + __noswap_vabdl_s16(__rev1, __rev2);
74142 __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
74143 return __ret;
74144 }
74145 __ai int32x4_t __noswap_vabal_s16(int32x4_t __p0, int16x4_t __p1, int16x4_t __p2) {
74146 int32x4_t __ret;
74147 __ret = __p0 + __noswap_vabdl_s16(__p1, __p2);
74148 return __ret;
74149 }
74150 #endif
74151
74152 #if defined(__aarch64__)
74153 #ifdef __LITTLE_ENDIAN__
74154 __ai uint16x8_t vabal_high_u8(uint16x8_t __p0, uint8x16_t __p1, uint8x16_t __p2) {
74155 uint16x8_t __ret;
74156 __ret = vabal_u8(__p0, vget_high_u8(__p1), vget_high_u8(__p2));
74157 return __ret;
74158 }
74159 #else
74160 __ai uint16x8_t vabal_high_u8(uint16x8_t __p0, uint8x16_t __p1, uint8x16_t __p2) {
74161 uint16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
74162 uint8x16_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
74163 uint8x16_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
74164 uint16x8_t __ret;
74165 __ret = __noswap_vabal_u8(__rev0, __noswap_vget_high_u8(__rev1), __noswap_vget_high_u8(__rev2));
74166 __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
74167 return __ret;
74168 }
74169 #endif
74170
74171 #ifdef __LITTLE_ENDIAN__
74172 __ai uint64x2_t vabal_high_u32(uint64x2_t __p0, uint32x4_t __p1, uint32x4_t __p2) {
74173 uint64x2_t __ret;
74174 __ret = vabal_u32(__p0, vget_high_u32(__p1), vget_high_u32(__p2));
74175 return __ret;
74176 }
74177 #else
74178 __ai uint64x2_t vabal_high_u32(uint64x2_t __p0, uint32x4_t __p1, uint32x4_t __p2) {
74179 uint64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
74180 uint32x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
74181 uint32x4_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, 3, 2, 1, 0);
74182 uint64x2_t __ret;
74183 __ret = __noswap_vabal_u32(__rev0, __noswap_vget_high_u32(__rev1), __noswap_vget_high_u32(__rev2));
74184 __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
74185 return __ret;
74186 }
74187 #endif
74188
74189 #ifdef __LITTLE_ENDIAN__
74190 __ai uint32x4_t vabal_high_u16(uint32x4_t __p0, uint16x8_t __p1, uint16x8_t __p2) {
74191 uint32x4_t __ret;
74192 __ret = vabal_u16(__p0, vget_high_u16(__p1), vget_high_u16(__p2));
74193 return __ret;
74194 }
74195 #else
74196 __ai uint32x4_t vabal_high_u16(uint32x4_t __p0, uint16x8_t __p1, uint16x8_t __p2) {
74197 uint32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
74198 uint16x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
74199 uint16x8_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, 7, 6, 5, 4, 3, 2, 1, 0);
74200 uint32x4_t __ret;
74201 __ret = __noswap_vabal_u16(__rev0, __noswap_vget_high_u16(__rev1), __noswap_vget_high_u16(__rev2));
74202 __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
74203 return __ret;
74204 }
74205 #endif
74206
74207 #ifdef __LITTLE_ENDIAN__
74208 __ai int16x8_t vabal_high_s8(int16x8_t __p0, int8x16_t __p1, int8x16_t __p2) {
74209 int16x8_t __ret;
74210 __ret = vabal_s8(__p0, vget_high_s8(__p1), vget_high_s8(__p2));
74211 return __ret;
74212 }
74213 #else
74214 __ai int16x8_t vabal_high_s8(int16x8_t __p0, int8x16_t __p1, int8x16_t __p2) {
74215 int16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
74216 int8x16_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
74217 int8x16_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
74218 int16x8_t __ret;
74219 __ret = __noswap_vabal_s8(__rev0, __noswap_vget_high_s8(__rev1), __noswap_vget_high_s8(__rev2));
74220 __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
74221 return __ret;
74222 }
74223 #endif
74224
74225 #ifdef __LITTLE_ENDIAN__
74226 __ai int64x2_t vabal_high_s32(int64x2_t __p0, int32x4_t __p1, int32x4_t __p2) {
74227 int64x2_t __ret;
74228 __ret = vabal_s32(__p0, vget_high_s32(__p1), vget_high_s32(__p2));
74229 return __ret;
74230 }
74231 #else
74232 __ai int64x2_t vabal_high_s32(int64x2_t __p0, int32x4_t __p1, int32x4_t __p2) {
74233 int64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
74234 int32x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
74235 int32x4_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, 3, 2, 1, 0);
74236 int64x2_t __ret;
74237 __ret = __noswap_vabal_s32(__rev0, __noswap_vget_high_s32(__rev1), __noswap_vget_high_s32(__rev2));
74238 __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
74239 return __ret;
74240 }
74241 #endif
74242
74243 #ifdef __LITTLE_ENDIAN__
74244 __ai int32x4_t vabal_high_s16(int32x4_t __p0, int16x8_t __p1, int16x8_t __p2) {
74245 int32x4_t __ret;
74246 __ret = vabal_s16(__p0, vget_high_s16(__p1), vget_high_s16(__p2));
74247 return __ret;
74248 }
74249 #else
74250 __ai int32x4_t vabal_high_s16(int32x4_t __p0, int16x8_t __p1, int16x8_t __p2) {
74251 int32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
74252 int16x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
74253 int16x8_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, 7, 6, 5, 4, 3, 2, 1, 0);
74254 int32x4_t __ret;
74255 __ret = __noswap_vabal_s16(__rev0, __noswap_vget_high_s16(__rev1), __noswap_vget_high_s16(__rev2));
74256 __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
74257 return __ret;
74258 }
74259 #endif
74260
74261 #endif
74262
74263 #undef __ai
74264
74265 #pragma clang diagnostic pop
74266
74267 #endif /* __ARM_NEON_H */