/aosp_15_r20/external/pytorch/aten/src/ATen/native/quantized/cpu/qnnpack/src/q8vadd/ |
H A D | neon.c | 97 vsraq_n_s32(vacc0_lo, vbicq_s32(vacc0_lo, vzero_shift_mask), 31); in pytorch_q8vadd_ukernel__neon() 99 vsraq_n_s32(vacc1_lo, vbicq_s32(vacc1_lo, vzero_shift_mask), 31); in pytorch_q8vadd_ukernel__neon() 101 vsraq_n_s32(vacc2_lo, vbicq_s32(vacc2_lo, vzero_shift_mask), 31); in pytorch_q8vadd_ukernel__neon() 103 vsraq_n_s32(vacc3_lo, vbicq_s32(vacc3_lo, vzero_shift_mask), 31); in pytorch_q8vadd_ukernel__neon() 105 vsraq_n_s32(vacc0_hi, vbicq_s32(vacc0_hi, vzero_shift_mask), 31); in pytorch_q8vadd_ukernel__neon() 107 vsraq_n_s32(vacc1_hi, vbicq_s32(vacc1_hi, vzero_shift_mask), 31); in pytorch_q8vadd_ukernel__neon() 109 vsraq_n_s32(vacc2_hi, vbicq_s32(vacc2_hi, vzero_shift_mask), 31); in pytorch_q8vadd_ukernel__neon() 111 vsraq_n_s32(vacc3_hi, vbicq_s32(vacc3_hi, vzero_shift_mask), 31); in pytorch_q8vadd_ukernel__neon() 186 vsraq_n_s32(vacc0_lo, vbicq_s32(vacc0_lo, vzero_shift_mask), 31); in pytorch_q8vadd_ukernel__neon() 188 vsraq_n_s32(vacc1_lo, vbicq_s32(vacc1_lo, vzero_shift_mask), 31); in pytorch_q8vadd_ukernel__neon() [all …]
|
/aosp_15_r20/external/XNNPACK/src/qu8-requantization/ |
H A D | gemmlowp-neon.c | 71 const int32x4_t x_adjusted_product = vsraq_n_s32(x_product, vbicq_s32(x, vshift_eq_0_mask), 31); in xnn_qu8_requantize_gemmlowp__neon() 72 const int32x4_t y_adjusted_product = vsraq_n_s32(y_product, vbicq_s32(y, vshift_eq_0_mask), 31); in xnn_qu8_requantize_gemmlowp__neon() 73 const int32x4_t z_adjusted_product = vsraq_n_s32(z_product, vbicq_s32(z, vshift_eq_0_mask), 31); in xnn_qu8_requantize_gemmlowp__neon() 74 const int32x4_t w_adjusted_product = vsraq_n_s32(w_product, vbicq_s32(w, vshift_eq_0_mask), 31); in xnn_qu8_requantize_gemmlowp__neon()
|
/aosp_15_r20/external/XNNPACK/src/qs8-requantization/ |
H A D | gemmlowp-neon.c | 71 const int32x4_t x_adjusted_product = vsraq_n_s32(x_product, vbicq_s32(x, vshift_eq_0_mask), 31); in xnn_qs8_requantize_gemmlowp__neon() 72 const int32x4_t y_adjusted_product = vsraq_n_s32(y_product, vbicq_s32(y, vshift_eq_0_mask), 31); in xnn_qs8_requantize_gemmlowp__neon() 73 const int32x4_t z_adjusted_product = vsraq_n_s32(z_product, vbicq_s32(z, vshift_eq_0_mask), 31); in xnn_qs8_requantize_gemmlowp__neon() 74 const int32x4_t w_adjusted_product = vsraq_n_s32(w_product, vbicq_s32(w, vshift_eq_0_mask), 31); in xnn_qs8_requantize_gemmlowp__neon()
|
/aosp_15_r20/external/pytorch/aten/src/ATen/native/quantized/cpu/qnnpack/src/requantization/ |
H A D | q31-neon.c | 79 vsraq_n_s32(x_product, vbicq_s32(x, vshift_eq_0_mask), 31); in pytorch_qnnp_requantize_q31__neon() 81 vsraq_n_s32(y_product, vbicq_s32(y, vshift_eq_0_mask), 31); in pytorch_qnnp_requantize_q31__neon() 83 vsraq_n_s32(z_product, vbicq_s32(z, vshift_eq_0_mask), 31); in pytorch_qnnp_requantize_q31__neon() 85 vsraq_n_s32(w_product, vbicq_s32(w, vshift_eq_0_mask), 31); in pytorch_qnnp_requantize_q31__neon()
|
/aosp_15_r20/external/pytorch/aten/src/ATen/native/quantized/cpu/qnnpack/src/q8gemm/ |
H A D | 4x8c2-xzp-neon.c | 395 vsraq_n_s32(vacc0x0123, vbicq_s32(vacc0x0123, vzero_shift_mask), 31); in pytorch_q8gemm_xzp_ukernel_4x8c2__neon() 397 vsraq_n_s32(vacc0x4567, vbicq_s32(vacc0x4567, vzero_shift_mask), 31); in pytorch_q8gemm_xzp_ukernel_4x8c2__neon() 399 vsraq_n_s32(vacc1x0123, vbicq_s32(vacc1x0123, vzero_shift_mask), 31); in pytorch_q8gemm_xzp_ukernel_4x8c2__neon() 401 vsraq_n_s32(vacc1x4567, vbicq_s32(vacc1x4567, vzero_shift_mask), 31); in pytorch_q8gemm_xzp_ukernel_4x8c2__neon() 403 vsraq_n_s32(vacc2x0123, vbicq_s32(vacc2x0123, vzero_shift_mask), 31); in pytorch_q8gemm_xzp_ukernel_4x8c2__neon() 405 vsraq_n_s32(vacc2x4567, vbicq_s32(vacc2x4567, vzero_shift_mask), 31); in pytorch_q8gemm_xzp_ukernel_4x8c2__neon() 407 vsraq_n_s32(vacc3x0123, vbicq_s32(vacc3x0123, vzero_shift_mask), 31); in pytorch_q8gemm_xzp_ukernel_4x8c2__neon() 409 vsraq_n_s32(vacc3x4567, vbicq_s32(vacc3x4567, vzero_shift_mask), 31); in pytorch_q8gemm_xzp_ukernel_4x8c2__neon()
|
/aosp_15_r20/external/libaom/aom_dsp/arm/ |
H A D | obmc_variance_neon.c | 47 diff_s32_lo = vsraq_n_s32(diff_s32_lo, diff_s32_lo, 31); in obmc_variance_8x1_s16_neon() 48 diff_s32_hi = vsraq_n_s32(diff_s32_hi, diff_s32_hi, 31); in obmc_variance_8x1_s16_neon() 86 diff_lo = vsraq_n_s32(diff_lo, diff_lo, 31); in obmc_variance_8x1_s32_neon() 87 diff_hi = vsraq_n_s32(diff_hi, diff_hi, 31); in obmc_variance_8x1_s32_neon()
|
H A D | highbd_obmc_variance_neon.c | 47 diff_lo = vsraq_n_s32(diff_lo, diff_lo, 31); in highbd_obmc_variance_8x1_s16_neon() 48 diff_hi = vsraq_n_s32(diff_hi, diff_hi, 31); in highbd_obmc_variance_8x1_s16_neon()
|
/aosp_15_r20/external/libgav1/src/dsp/arm/ |
H A D | motion_vector_search_neon.cc | 41 const int32x4_t add_sign = vsraq_n_s32(m, m, 31); in MvProjection()
|
H A D | motion_field_projection_neon.cc | 57 const int32x4_t add_sign = vsraq_n_s32(m, m, 31); in MvProjection()
|
/aosp_15_r20/external/clang/test/CodeGen/ |
H A D | aarch64-neon-intrinsics.c | 5554 return vsraq_n_s32(a, b, 3); in test_vsraq_n_s32()
|
H A D | arm_neon_intrinsics.c | 17692 return vsraq_n_s32(a, b, 1); in test_vsraq_n_s32()
|
/aosp_15_r20/external/neon_2_sse/ |
H A D | NEON_2_SSE.h | 1085 _NEON2SSESTORAGE int32x4_t vsraq_n_s32(int32x4_t a, int32x4_t b, __constrange(1,32) int c); // VSRA… 8025 _NEON2SSESTORAGE int32x4_t vsraq_n_s32(int32x4_t a, int32x4_t b, __constrange(1,32) int c); // VSRA… 8026 _NEON2SSE_INLINE int32x4_t vsraq_n_s32(int32x4_t a, int32x4_t b, __constrange(1,32) int c) // VSRA.…
|
/aosp_15_r20/prebuilts/rust/linux-x86/1.81.0.u1/lib/rustlib/src/rust/library/stdarch/crates/core_arch/src/arm_shared/neon/ |
H A D | generated.rs | 30256 pub unsafe fn vsraq_n_s32<const N: i32>(a: int32x4_t, b: int32x4_t) -> int32x4_t { in vsraq_n_s32() function 46246 let r: i32x4 = transmute(vsraq_n_s32::<2>(transmute(a), transmute(b))); in test_vsraq_n_s32()
|
/aosp_15_r20/prebuilts/rust/linux-musl-x86/1.80.1/lib/rustlib/src/rust/library/stdarch/crates/core_arch/src/arm_shared/neon/ |
H A D | generated.rs | 30256 pub unsafe fn vsraq_n_s32<const N: i32>(a: int32x4_t, b: int32x4_t) -> int32x4_t { in vsraq_n_s32() function 46246 let r: i32x4 = transmute(vsraq_n_s32::<2>(transmute(a), transmute(b))); in test_vsraq_n_s32()
|
/aosp_15_r20/prebuilts/rust/linux-musl-x86/1.81.0/lib/rustlib/src/rust/library/stdarch/crates/core_arch/src/arm_shared/neon/ |
H A D | generated.rs | 30256 pub unsafe fn vsraq_n_s32<const N: i32>(a: int32x4_t, b: int32x4_t) -> int32x4_t { in vsraq_n_s32() function 46246 let r: i32x4 = transmute(vsraq_n_s32::<2>(transmute(a), transmute(b))); in test_vsraq_n_s32()
|
/aosp_15_r20/prebuilts/rust/linux-x86/1.81.0/lib/rustlib/src/rust/library/stdarch/crates/core_arch/src/arm_shared/neon/ |
H A D | generated.rs | 30256 pub unsafe fn vsraq_n_s32<const N: i32>(a: int32x4_t, b: int32x4_t) -> int32x4_t { in vsraq_n_s32() function 46246 let r: i32x4 = transmute(vsraq_n_s32::<2>(transmute(a), transmute(b))); in test_vsraq_n_s32()
|
/aosp_15_r20/prebuilts/rust/linux-x86/1.80.1/lib/rustlib/src/rust/library/stdarch/crates/core_arch/src/arm_shared/neon/ |
H A D | generated.rs | 30256 pub unsafe fn vsraq_n_s32<const N: i32>(a: int32x4_t, b: int32x4_t) -> int32x4_t { in vsraq_n_s32() function 46246 let r: i32x4 = transmute(vsraq_n_s32::<2>(transmute(a), transmute(b))); in test_vsraq_n_s32()
|
/aosp_15_r20/out/soong/.intermediates/external/clang/clang-gen-arm-neon/gen/clang/Basic/ |
D | arm_neon.h | 24325 #define vsraq_n_s32(__p0, __p1, __p2) __extension__ ({ \ macro 24333 #define vsraq_n_s32(__p0, __p1, __p2) __extension__ ({ \ macro
|
/aosp_15_r20/prebuilts/clang/host/linux-x86/clang-3289846/lib64/clang/3.8/include/ |
D | arm_neon.h | 24325 #define vsraq_n_s32(__p0, __p1, __p2) __extension__ ({ \ macro 24333 #define vsraq_n_s32(__p0, __p1, __p2) __extension__ ({ \ macro
|
/aosp_15_r20/prebuilts/sdk/renderscript/clang-include/ |
H A D | arm_neon.h | 24373 #define vsraq_n_s32(__p0, __p1, __p2) __extension__ ({ \ macro 24381 #define vsraq_n_s32(__p0, __p1, __p2) __extension__ ({ \ macro
|
/aosp_15_r20/prebuilts/clang/host/linux-x86/clang-r530567b/lib/clang/19/include/ |
D | arm_neon.h | 25829 #define vsraq_n_s32(__p0, __p1, __p2) __extension__ ({ \ macro 25837 #define vsraq_n_s32(__p0, __p1, __p2) __extension__ ({ \ macro
|
/aosp_15_r20/prebuilts/clang/host/linux-x86/clang-r522817/lib/clang/18/include/ |
D | arm_neon.h | 25829 #define vsraq_n_s32(__p0, __p1, __p2) __extension__ ({ \ macro 25837 #define vsraq_n_s32(__p0, __p1, __p2) __extension__ ({ \ macro
|
/aosp_15_r20/prebuilts/clang/host/linux-x86/clang-r536225/lib/clang/19/include/ |
D | arm_neon.h | 25939 #define vsraq_n_s32(__p0, __p1, __p2) __extension__ ({ \ macro 25947 #define vsraq_n_s32(__p0, __p1, __p2) __extension__ ({ \ macro
|
/aosp_15_r20/prebuilts/clang-tools/linux-x86/lib64/clang/19/include/ |
H A D | arm_neon.h | 25829 #define vsraq_n_s32(__p0, __p1, __p2) __extension__ ({ \ macro 25837 #define vsraq_n_s32(__p0, __p1, __p2) __extension__ ({ \ macro
|
/aosp_15_r20/prebuilts/clang/host/linux-x86/clang-r530567/lib/clang/19/include/ |
D | arm_neon.h | 25829 #define vsraq_n_s32(__p0, __p1, __p2) __extension__ ({ \ macro 25837 #define vsraq_n_s32(__p0, __p1, __p2) __extension__ ({ \ macro
|