/aosp_15_r20/external/XNNPACK/src/qs8-gemm/gen/ |
H A D | 2x16c16-minmax-rndnu-neon-mlal.c | 220 const int32x4_t vsum1xAB = vpaddq_s32(vacc1x10, vacc1x11); in xnn_qs8_gemm_minmax_rndnu_ukernel_2x16c16__neon_mlal() local 279 const int32x2_t vsum1xAB = vpadd_s32(vpsum1xA, vpsum1xB); in xnn_qs8_gemm_minmax_rndnu_ukernel_2x16c16__neon_mlal() local
|
H A D | 2x16c8-minmax-rndnu-neon-mull.c | 188 const int32x4_t vsum1xAB = vpaddq_s32(vacc1x10, vacc1x11); in xnn_qs8_gemm_minmax_rndnu_ukernel_2x16c8__neon_mull() local 248 const int32x2_t vsum1xAB = vpadd_s32(vpsum1xA, vpsum1xB); in xnn_qs8_gemm_minmax_rndnu_ukernel_2x16c8__neon_mull() local
|
H A D | 2x16c8-minmax-rndnu-neon-mlal.c | 327 const int32x4_t vsum1xAB = vpaddq_s32(vacc1x10, vacc1x11); in xnn_qs8_gemm_minmax_rndnu_ukernel_2x16c8__neon_mlal() local 387 const int32x2_t vsum1xAB = vpadd_s32(vpsum1xA, vpsum1xB); in xnn_qs8_gemm_minmax_rndnu_ukernel_2x16c8__neon_mlal() local
|
H A D | 3x16c16-minmax-rndnu-neon-mlal.c | 291 const int32x4_t vsum1xAB = vpaddq_s32(vacc1x10, vacc1x11); in xnn_qs8_gemm_minmax_rndnu_ukernel_3x16c16__neon_mlal() local 362 const int32x2_t vsum1xAB = vpadd_s32(vpsum1xA, vpsum1xB); in xnn_qs8_gemm_minmax_rndnu_ukernel_3x16c16__neon_mlal() local
|
H A D | 3x16c8-minmax-rndnu-neon-mull.c | 243 const int32x4_t vsum1xAB = vpaddq_s32(vacc1x10, vacc1x11); in xnn_qs8_gemm_minmax_rndnu_ukernel_3x16c8__neon_mull() local 315 const int32x2_t vsum1xAB = vpadd_s32(vpsum1xA, vpsum1xB); in xnn_qs8_gemm_minmax_rndnu_ukernel_3x16c8__neon_mull() local
|
H A D | 4x16c8-minmax-rndnu-neon-mull.c | 298 const int32x4_t vsum1xAB = vpaddq_s32(vacc1x10, vacc1x11); in xnn_qs8_gemm_minmax_rndnu_ukernel_4x16c8__neon_mull() local 382 const int32x2_t vsum1xAB = vpadd_s32(vpsum1xA, vpsum1xB); in xnn_qs8_gemm_minmax_rndnu_ukernel_4x16c8__neon_mull() local
|
H A D | 3x16c8-minmax-rndnu-neon-mlal.c | 432 const int32x4_t vsum1xAB = vpaddq_s32(vacc1x10, vacc1x11); in xnn_qs8_gemm_minmax_rndnu_ukernel_3x16c8__neon_mlal() local 504 const int32x2_t vsum1xAB = vpadd_s32(vpsum1xA, vpsum1xB); in xnn_qs8_gemm_minmax_rndnu_ukernel_3x16c8__neon_mlal() local
|
H A D | 4x16c16-minmax-rndnu-neon-mlal.c | 362 const int32x4_t vsum1xAB = vpaddq_s32(vacc1x10, vacc1x11); in xnn_qs8_gemm_minmax_rndnu_ukernel_4x16c16__neon_mlal() local 445 const int32x2_t vsum1xAB = vpadd_s32(vpsum1xA, vpsum1xB); in xnn_qs8_gemm_minmax_rndnu_ukernel_4x16c16__neon_mlal() local
|
H A D | 2x16c4s2-minmax-rndnu-neon-mull.c | 188 const int32x2_t vsum1xAB = vpadd_s32(vget_low_s32(vacc1xAB), vget_high_s32(vacc1xAB)); in xnn_qs8_gemm_minmax_rndnu_ukernel_2x16c4s2__neon_mull() local
|
H A D | 4x16c8-minmax-rndnu-neon-mlal.c | 537 const int32x4_t vsum1xAB = vpaddq_s32(vacc1x10, vacc1x11); in xnn_qs8_gemm_minmax_rndnu_ukernel_4x16c8__neon_mlal() local 621 const int32x2_t vsum1xAB = vpadd_s32(vpsum1xA, vpsum1xB); in xnn_qs8_gemm_minmax_rndnu_ukernel_4x16c8__neon_mlal() local
|
H A D | 2x16c4-minmax-rndnu-neon-mull-ld1r.c | 245 const int32x2_t vsum1xAB = vpadd_s32(vget_low_s32(vacc1xAB), vget_high_s32(vacc1xAB)); in xnn_qs8_gemm_minmax_rndnu_ukernel_2x16c4__neon_mull_ld1r() local
|
H A D | 2x16c4-minmax-rndnu-neon-mull-ld2r.c | 243 const int32x2_t vsum1xAB = vpadd_s32(vget_low_s32(vacc1xAB), vget_high_s32(vacc1xAB)); in xnn_qs8_gemm_minmax_rndnu_ukernel_2x16c4__neon_mull_ld2r() local
|
H A D | 2x16c4-minmax-rndnu-neon-mull-dup.c | 243 const int32x2_t vsum1xAB = vpadd_s32(vget_low_s32(vacc1xAB), vget_high_s32(vacc1xAB)); in xnn_qs8_gemm_minmax_rndnu_ukernel_2x16c4__neon_mull_dup() local
|
/aosp_15_r20/external/XNNPACK/src/qs8-igemm/gen/ |
H A D | 2x16c8-minmax-rndnu-neon-mull.c | 204 const int32x4_t vsum1xAB = vpaddq_s32(vacc1x10, vacc1x11); in xnn_qs8_igemm_minmax_rndnu_ukernel_2x16c8__neon_mull() local 264 const int32x2_t vsum1xAB = vpadd_s32(vpsum1xA, vpsum1xB); in xnn_qs8_igemm_minmax_rndnu_ukernel_2x16c8__neon_mull() local
|
H A D | 2x16c16-minmax-rndnu-neon-mlal.c | 236 const int32x4_t vsum1xAB = vpaddq_s32(vacc1x10, vacc1x11); in xnn_qs8_igemm_minmax_rndnu_ukernel_2x16c16__neon_mlal() local 295 const int32x2_t vsum1xAB = vpadd_s32(vpsum1xA, vpsum1xB); in xnn_qs8_igemm_minmax_rndnu_ukernel_2x16c16__neon_mlal() local
|
H A D | 2x16c8-minmax-rndnu-neon-mlal.c | 343 const int32x4_t vsum1xAB = vpaddq_s32(vacc1x10, vacc1x11); in xnn_qs8_igemm_minmax_rndnu_ukernel_2x16c8__neon_mlal() local 403 const int32x2_t vsum1xAB = vpadd_s32(vpsum1xA, vpsum1xB); in xnn_qs8_igemm_minmax_rndnu_ukernel_2x16c8__neon_mlal() local
|
H A D | 3x16c8-minmax-rndnu-neon-mull.c | 261 const int32x4_t vsum1xAB = vpaddq_s32(vacc1x10, vacc1x11); in xnn_qs8_igemm_minmax_rndnu_ukernel_3x16c8__neon_mull() local 333 const int32x2_t vsum1xAB = vpadd_s32(vpsum1xA, vpsum1xB); in xnn_qs8_igemm_minmax_rndnu_ukernel_3x16c8__neon_mull() local
|
H A D | 3x16c16-minmax-rndnu-neon-mlal.c | 309 const int32x4_t vsum1xAB = vpaddq_s32(vacc1x10, vacc1x11); in xnn_qs8_igemm_minmax_rndnu_ukernel_3x16c16__neon_mlal() local 380 const int32x2_t vsum1xAB = vpadd_s32(vpsum1xA, vpsum1xB); in xnn_qs8_igemm_minmax_rndnu_ukernel_3x16c16__neon_mlal() local
|
H A D | 4x16c8-minmax-rndnu-neon-mull.c | 318 const int32x4_t vsum1xAB = vpaddq_s32(vacc1x10, vacc1x11); in xnn_qs8_igemm_minmax_rndnu_ukernel_4x16c8__neon_mull() local 402 const int32x2_t vsum1xAB = vpadd_s32(vpsum1xA, vpsum1xB); in xnn_qs8_igemm_minmax_rndnu_ukernel_4x16c8__neon_mull() local
|
H A D | 4x16c16-minmax-rndnu-neon-mlal.c | 382 const int32x4_t vsum1xAB = vpaddq_s32(vacc1x10, vacc1x11); in xnn_qs8_igemm_minmax_rndnu_ukernel_4x16c16__neon_mlal() local 465 const int32x2_t vsum1xAB = vpadd_s32(vpsum1xA, vpsum1xB); in xnn_qs8_igemm_minmax_rndnu_ukernel_4x16c16__neon_mlal() local
|
H A D | 3x16c8-minmax-rndnu-neon-mlal.c | 450 const int32x4_t vsum1xAB = vpaddq_s32(vacc1x10, vacc1x11); in xnn_qs8_igemm_minmax_rndnu_ukernel_3x16c8__neon_mlal() local 522 const int32x2_t vsum1xAB = vpadd_s32(vpsum1xA, vpsum1xB); in xnn_qs8_igemm_minmax_rndnu_ukernel_3x16c8__neon_mlal() local
|
H A D | 4x16c8-minmax-rndnu-neon-mlal.c | 557 const int32x4_t vsum1xAB = vpaddq_s32(vacc1x10, vacc1x11); in xnn_qs8_igemm_minmax_rndnu_ukernel_4x16c8__neon_mlal() local 641 const int32x2_t vsum1xAB = vpadd_s32(vpsum1xA, vpsum1xB); in xnn_qs8_igemm_minmax_rndnu_ukernel_4x16c8__neon_mlal() local
|
H A D | 2x16c4s2-minmax-rndnu-neon-mull.c | 204 const int32x2_t vsum1xAB = vpadd_s32(vget_low_s32(vacc1xAB), vget_high_s32(vacc1xAB)); in xnn_qs8_igemm_minmax_rndnu_ukernel_2x16c4s2__neon_mull() local
|
H A D | 2x16c4-minmax-rndnu-neon-mull-ld2r.c | 258 const int32x2_t vsum1xAB = vpadd_s32(vget_low_s32(vacc1xAB), vget_high_s32(vacc1xAB)); in xnn_qs8_igemm_minmax_rndnu_ukernel_2x16c4__neon_mull_ld2r() local
|
H A D | 2x16c4-minmax-rndnu-neon-mull-ld1r.c | 260 const int32x2_t vsum1xAB = vpadd_s32(vget_low_s32(vacc1xAB), vget_high_s32(vacc1xAB)); in xnn_qs8_igemm_minmax_rndnu_ukernel_2x16c4__neon_mull_ld1r() local
|