1// Copyright 2021 Google LLC 2// 3// This source code is licensed under the BSD-style license found in the 4// LICENSE file in the root directory of this source tree. 5 6$assert DATATYPE in ["QS8", "QU8"] 7$assert REQUANTIZATION in ["FP32", "RNDNU"] 8$assert BATCH_TILE % (16 if LD128 else 8) == 0 9$assert BATCH_TILE >= 8 10$ABC = "0123456789ABCDEFGHIJKLMNOPQRSTUVWXYZ" 11#include <assert.h> 12 13#include <arm_neon.h> 14 15$if REQUANTIZATION == "FP32" and ARMV8: 16 #include <xnnpack/intrinsics-polyfill.h> 17#include <xnnpack/vmul.h> 18 19 20$PARAMS_STRUCT = REQUANTIZATION.lower() + "_" + ("neonv8" if ARMV8 else "neon") 21$XINT8_T = {"QS8": "int8_t", "QU8": "uint8_t"}[DATATYPE] 22$XINT8X8_T = {"QS8": "int8x8_t", "QU8": "uint8x8_t"}[DATATYPE] 23$XINT8X16_T = {"QS8": "int8x16_t", "QU8": "uint8x16_t"}[DATATYPE] 24$VLD1_X8 = {"QS8": "vld1_s8", "QU8": "vld1_u8"}[DATATYPE] 25$VLD1Q_X8 = {"QS8": "vld1q_s8", "QU8": "vld1q_u8"}[DATATYPE] 26$VLD1_DUP_X8 = {"QS8": "vld1_dup_s8", "QU8": "vld1_dup_u8"}[DATATYPE] 27$VLD1Q_DUP_X8 = {"QS8": "vld1q_dup_s8", "QU8": "vld1q_dup_u8"}[DATATYPE] 28$VST1_LANE_X8 = {"QS8": "vst1_lane_s8", "QU8": "vst1_lane_u8"}[DATATYPE] 29$VST1_X8 = {"QS8": "vst1_s8", "QU8": "vst1_u8"}[DATATYPE] 30$VST1Q_X8 = {"QS8": "vst1q_s8", "QU8": "vst1q_u8"}[DATATYPE] 31$VMIN_X8 = {"QS8": "vmin_s8", "QU8": "vmin_u8"}[DATATYPE] 32$VMAX_X8 = {"QS8": "vmax_s8", "QU8": "vmax_u8"}[DATATYPE] 33$VMINQ_X8 = {"QS8": "vminq_s8", "QU8": "vminq_u8"}[DATATYPE] 34$VMAXQ_X8 = {"QS8": "vmaxq_s8", "QU8": "vmaxq_u8"}[DATATYPE] 35$VQMOVXN_S16 = {"QS8": "vqmovn_s16", "QU8": "vqmovun_s16"}[DATATYPE] 36$VQMOVXN_HIGH_S16 = {"QS8": "vqmovn_high_s16", "QU8": "vqmovun_high_s16"}[DATATYPE] 37$VEXT_X8 = {"QS8": "vext_s8", "QU8": "vext_u8"}[DATATYPE] 38$VGET_LOW_X8 = {"QS8": "vget_low_s8", "QU8": "vget_low_u8"}[DATATYPE] 39$VCOMBINE_X8 = {"QS8": "vcombine_s8", "QU8": "vcombine_u8"}[DATATYPE] 40$VREINTERPRET_U32_X8 = {"QS8": "vreinterpret_u32_s8", "QU8": "vreinterpret_u32_u8"}[DATATYPE] 41$VREINTERPRET_U16_X8 = {"QS8": "vreinterpret_u16_s8", "QU8": "vreinterpret_u16_u8"}[DATATYPE] 42void xnn_${DATATYPE.lower()}_vmulc_minmax_${REQUANTIZATION.lower()}_ukernel__${"neonv8" if ARMV8 else "neon"}_${"ld128" if LD128 else "ld64"}_x${BATCH_TILE}( 43 size_t n, 44 const ${XINT8_T}* input_a, 45 const ${XINT8_T}* input_b, 46 ${XINT8_T}* output, 47 const union xnn_${DATATYPE.lower()}_mul_minmax_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS 48{ 49 $if LD128: 50 #if XNN_ARCH_ARM64 51 const ${XINT8X16_T} va_zero_point = ${VLD1Q_DUP_X8}(params->${PARAMS_STRUCT}.a_zero_point); 52 #else 53 const ${XINT8X8_T} va_zero_point = ${VLD1_DUP_X8}(params->${PARAMS_STRUCT}.a_zero_point); 54 #endif 55 $else: 56 const ${XINT8X8_T} va_zero_point = ${VLD1_DUP_X8}(params->${PARAMS_STRUCT}.a_zero_point); 57 $if REQUANTIZATION == "FP32": 58 const float32x4_t vscale = vld1q_dup_f32(¶ms->${PARAMS_STRUCT}.scale); 59 $if ARMV8: 60 const int16x8_t voutput_zero_point = vld1q_dup_s16(¶ms->fp32_neonv8.output_zero_point); 61 $else: 62 const float32x4_t vmagic_bias = vld1q_dup_f32(¶ms->fp32_neon.magic_bias); 63 const int32x4_t vmagic_bias_less_output_zero_point = vld1q_dup_s32(¶ms->fp32_neon.magic_bias_less_output_zero_point); 64 $elif REQUANTIZATION == "RNDNU": 65 const int32x4_t vleft_pre_shift = vld1q_dup_s32(¶ms->rndnu_neon.left_pre_shift); 66 const int32x4_t vmultiplier = vld1q_dup_s32(¶ms->rndnu_neon.multiplier); 67 const int32x4_t vleft_post_shift = vld1q_dup_s32(¶ms->rndnu_neon.left_post_shift); 68 const int16x8_t voutput_zero_point = vld1q_dup_s16(¶ms->rndnu_neon.output_zero_point); 69 $if BATCH_TILE > 8: 70 const ${XINT8X16_T} voutput_min = ${VLD1Q_DUP_X8}(¶ms->${PARAMS_STRUCT}.output_min); 71 const ${XINT8X16_T} voutput_max = ${VLD1Q_DUP_X8}(¶ms->${PARAMS_STRUCT}.output_max); 72 $else: 73 const ${XINT8X8_T} voutput_min = ${VLD1_DUP_X8}(¶ms->${PARAMS_STRUCT}.output_min); 74 const ${XINT8X8_T} voutput_max = ${VLD1_DUP_X8}(¶ms->${PARAMS_STRUCT}.output_max); 75 76 const ${XINT8X8_T} vb = ${VLD1_DUP_X8}(input_b); 77 const ${XINT8X8_T} vb_zero_point = ${VLD1_DUP_X8}(params->${PARAMS_STRUCT}.b_zero_point); 78 $if DATATYPE == "QU8": 79 const int16x8_t vxb = vreinterpretq_s16_u16(vsubl_u8(vb, vb_zero_point)); 80 $else: 81 const int16x8_t vxb = vsubl_s8(vb, vb_zero_point); 82 for (; n >= ${BATCH_TILE} * sizeof(${XINT8_T}); n -= ${BATCH_TILE} * sizeof(${XINT8_T})) { 83 $if LD128: 84 $for N in range(0, BATCH_TILE, 16): 85 const ${XINT8X16_T} va${ABC[N:N+16]} = ${VLD1Q_X8}(input_a); input_a += 16; 86 87 #if XNN_ARCH_ARM64 88 $for N in range(0, BATCH_TILE, 16): 89 $if DATATYPE == "QU8": 90 const int16x8_t vxa${ABC[N:N+8]} = vreinterpretq_s16_u16(vsubl_u8(vget_low_u8(va${ABC[N:N+16]}), vget_low_u8(va_zero_point))); 91 const int16x8_t vxa${ABC[N+8:N+16]} = vreinterpretq_s16_u16(vsubl_high_u8(va${ABC[N:N+16]}, va_zero_point)); 92 $else: 93 const int16x8_t vxa${ABC[N:N+8]} = vsubl_s8(vget_low_s8(va${ABC[N:N+16]}), vget_low_s8(va_zero_point)); 94 const int16x8_t vxa${ABC[N+8:N+16]} = vsubl_high_s8(va${ABC[N:N+16]}, va_zero_point); 95 #else // !XNN_ARCH_ARM64 96 $for N in range(0, BATCH_TILE, 16): 97 $if DATATYPE == "QU8": 98 const int16x8_t vxa${ABC[N:N+8]} = vreinterpretq_s16_u16(vsubl_u8(vget_low_u8(va${ABC[N:N+16]}), va_zero_point)); 99 const int16x8_t vxa${ABC[N+8:N+16]} = vreinterpretq_s16_u16(vsubl_u8(vget_high_u8(va${ABC[N:N+16]}), va_zero_point)); 100 $else: 101 const int16x8_t vxa${ABC[N:N+8]} = vsubl_s8(vget_low_s8(va${ABC[N:N+16]}), va_zero_point); 102 const int16x8_t vxa${ABC[N+8:N+16]} = vsubl_s8(vget_high_s8(va${ABC[N:N+16]}), va_zero_point); 103 #endif // XNN_ARCH_ARM64 104 $else: 105 $for N in range(0, BATCH_TILE, 8): 106 const ${XINT8X8_T} va${ABC[N:N+8]} = ${VLD1_X8}(input_a); input_a += 8; 107 108 $for N in range(0, BATCH_TILE, 8): 109 $if DATATYPE == "QU8": 110 const int16x8_t vxa${ABC[N:N+8]} = vreinterpretq_s16_u16(vsubl_u8(va${ABC[N:N+8]}, va_zero_point)); 111 $else: 112 const int16x8_t vxa${ABC[N:N+8]} = vsubl_s8(va${ABC[N:N+8]}, va_zero_point); 113 114 $for N in range(0, BATCH_TILE, 8): 115 int32x4_t vacc${ABC[N:N+4]} = vmull_s16(vget_low_s16(vxa${ABC[N:N+8]}), vget_low_s16(vxb)); 116 int32x4_t vacc${ABC[N+4:N+8]} = vmull_s16(vget_high_s16(vxa${ABC[N:N+8]}), vget_high_s16(vxb)); 117 118 $if REQUANTIZATION == "FP32": 119 $for N in range(0, BATCH_TILE, 4): 120 float32x4_t vfpacc${ABC[N:N+4]} = vcvtq_f32_s32(vacc${ABC[N:N+4]}); 121 122 $for N in range(0, BATCH_TILE, 4): 123 vfpacc${ABC[N:N+4]} = vmulq_f32(vfpacc${ABC[N:N+4]}, vscale); 124 125 $if ARMV8: 126 $for N in range(0, BATCH_TILE, 4): 127 vacc${ABC[N:N+4]} = vcvtnq_s32_f32(vfpacc${ABC[N:N+4]}); 128 $else: 129 $for N in range(0, BATCH_TILE, 4): 130 vacc${ABC[N:N+4]} = vreinterpretq_s32_f32(vaddq_f32(vfpacc${ABC[N:N+4]}, vmagic_bias)); 131 132 $for N in range(0, BATCH_TILE, 4): 133 vacc${ABC[N:N+4]} = vqsubq_s32(vacc${ABC[N:N+4]}, vmagic_bias_less_output_zero_point); 134 $elif REQUANTIZATION == "RNDNU": 135 $for N in range(0, BATCH_TILE, 4): 136 vacc${ABC[N:N+4]} = vqshlq_s32(vacc${ABC[N:N+4]}, vleft_pre_shift); 137 138 $for N in range(0, BATCH_TILE, 4): 139 vacc${ABC[N:N+4]} = vqdmulhq_s32(vacc${ABC[N:N+4]}, vmultiplier); 140 141 $for N in range(0, BATCH_TILE, 4): 142 vacc${ABC[N:N+4]} = vrshlq_s32(vacc${ABC[N:N+4]}, vleft_post_shift); 143 144 #if XNN_ARCH_ARM64 145 $for N in range(0, BATCH_TILE, 8): 146 int16x8_t vacc${ABC[N:N+8]} = vqmovn_high_s32(vqmovn_s32(vacc${ABC[N:N+4]}), vacc${ABC[N+4:N+8]}); 147 #else 148 $for N in range(0, BATCH_TILE, 8): 149 int16x8_t vacc${ABC[N:N+8]} = vcombine_s16(vqmovn_s32(vacc${ABC[N:N+4]}), vqmovn_s32(vacc${ABC[N+4:N+8]})); 150 #endif 151 152 $if REQUANTIZATION != "FP32" or ARMV8: 153 $for N in range(0, BATCH_TILE, 8): 154 vacc${ABC[N:N+8]} = vqaddq_s16(vacc${ABC[N:N+8]}, voutput_zero_point); 155 156 #if XNN_ARCH_ARM64 157 $for N in range(0, BATCH_TILE, 16): 158 $if N + 8 < BATCH_TILE: 159 ${XINT8X16_T} vout${ABC[N:N+16]} = ${VQMOVXN_HIGH_S16}(${VQMOVXN_S16}(vacc${ABC[N:N+8]}), vacc${ABC[N+8:N+16]}); 160 $else: 161 ${XINT8X8_T} vout${ABC[N:N+8]} = ${VQMOVXN_S16}(vacc${ABC[N:N+8]}); 162 #else 163 $for N in range(0, BATCH_TILE, 16): 164 $if N + 8 < BATCH_TILE: 165 ${XINT8X16_T} vout${ABC[N:N+16]} = ${VCOMBINE_X8}(${VQMOVXN_S16}(vacc${ABC[N:N+8]}), ${VQMOVXN_S16}(vacc${ABC[N+8:N+16]})); 166 $else: 167 ${XINT8X8_T} vout${ABC[N:N+8]} = ${VQMOVXN_S16}(vacc${ABC[N:N+8]}); 168 #endif 169 170 $for N in range(0, BATCH_TILE, 16): 171 $if N + 8 < BATCH_TILE: 172 vout${ABC[N:N+16]} = ${VMAXQ_X8}(vout${ABC[N:N+16]}, voutput_min); 173 $elif BATCH_TILE > 8: 174 vout${ABC[N:N+8]} = ${VMAX_X8}(vout${ABC[N:N+8]}, ${VGET_LOW_X8}(voutput_min)); 175 $else: 176 vout${ABC[N:N+8]} = ${VMAX_X8}(vout${ABC[N:N+8]}, voutput_min); 177 178 $for N in range(0, BATCH_TILE, 16): 179 $if N + 8 < BATCH_TILE: 180 vout${ABC[N:N+16]} = ${VMINQ_X8}(vout${ABC[N:N+16]}, voutput_max); 181 $elif BATCH_TILE > 8: 182 vout${ABC[N:N+8]} = ${VMIN_X8}(vout${ABC[N:N+8]}, ${VGET_LOW_X8}(voutput_max)); 183 $else: 184 vout${ABC[N:N+8]} = ${VMIN_X8}(vout${ABC[N:N+8]}, voutput_max); 185 186 $for N in range(0, BATCH_TILE, 16): 187 $if N + 8 < BATCH_TILE: 188 ${VST1Q_X8}(output, vout${ABC[N:N+16]}); output += 16; 189 $else: 190 ${VST1_X8}(output, vout${ABC[N:N+8]}); output += 8; 191 } 192 if XNN_UNLIKELY(n != 0) { 193 ${"do " if BATCH_TILE > 8 else ""}{ 194 $if BATCH_TILE > 8: 195 const ${XINT8X8_T} va${ABC[0:8]} = ${VLD1_X8}(input_a); input_a += 8; 196 $else: 197 const ${XINT8X8_T} va${ABC[0:8]} = ${VLD1_X8}(input_a); 198 199 $if LD128: 200 $if DATATYPE == "QU8": 201 #if XNN_ARCH_ARM64 202 const int16x8_t vxa${ABC[0:8]} = vreinterpretq_s16_u16(vsubl_u8(va${ABC[0:8]}, vget_low_u8(va_zero_point))); 203 #else // !XNN_ARCH_ARM64 204 const int16x8_t vxa${ABC[0:8]} = vreinterpretq_s16_u16(vsubl_u8(va${ABC[0:8]}, va_zero_point)); 205 #endif // XNN_ARCH_ARM64 206 $else: 207 #if XNN_ARCH_ARM64 208 const int16x8_t vxa${ABC[0:8]} = vsubl_s8(va${ABC[0:8]}, vget_low_s8(va_zero_point)); 209 #else // !XNN_ARCH_ARM64 210 const int16x8_t vxa${ABC[0:8]} = vsubl_s8(va${ABC[0:8]}, va_zero_point); 211 #endif // XNN_ARCH_ARM64 212 $else: 213 $if DATATYPE == "QU8": 214 const int16x8_t vxa${ABC[0:8]} = vreinterpretq_s16_u16(vsubl_u8(va${ABC[0:8]}, va_zero_point)); 215 $else: 216 const int16x8_t vxa${ABC[0:8]} = vsubl_s8(va${ABC[0:8]}, va_zero_point); 217 218 int32x4_t vacc${ABC[0:4]} = vmull_s16(vget_low_s16(vxa${ABC[0:8]}), vget_low_s16(vxb)); 219 int32x4_t vacc${ABC[4:8]} = vmull_s16(vget_high_s16(vxa${ABC[0:8]}), vget_high_s16(vxb)); 220 221 $if REQUANTIZATION == "FP32": 222 float32x4_t vfpacc${ABC[0:4]} = vcvtq_f32_s32(vacc${ABC[0:4]}); 223 float32x4_t vfpacc${ABC[4:8]} = vcvtq_f32_s32(vacc${ABC[4:8]}); 224 225 vfpacc${ABC[0:4]} = vmulq_f32(vfpacc${ABC[0:4]}, vscale); 226 vfpacc${ABC[4:8]} = vmulq_f32(vfpacc${ABC[4:8]}, vscale); 227 228 $if ARMV8: 229 vacc${ABC[0:4]} = vcvtnq_s32_f32(vfpacc${ABC[0:4]}); 230 vacc${ABC[4:8]} = vcvtnq_s32_f32(vfpacc${ABC[4:8]}); 231 $else: 232 vacc${ABC[0:4]} = vreinterpretq_s32_f32(vaddq_f32(vfpacc${ABC[0:4]}, vmagic_bias)); 233 vacc${ABC[4:8]} = vreinterpretq_s32_f32(vaddq_f32(vfpacc${ABC[4:8]}, vmagic_bias)); 234 235 vacc${ABC[0:4]} = vqsubq_s32(vacc${ABC[0:4]}, vmagic_bias_less_output_zero_point); 236 vacc${ABC[4:8]} = vqsubq_s32(vacc${ABC[4:8]}, vmagic_bias_less_output_zero_point); 237 $elif REQUANTIZATION == "RNDNU": 238 vacc${ABC[0:4]} = vqshlq_s32(vacc${ABC[0:4]}, vleft_pre_shift); 239 vacc${ABC[4:8]} = vqshlq_s32(vacc${ABC[4:8]}, vleft_pre_shift); 240 241 vacc${ABC[0:4]} = vqdmulhq_s32(vacc${ABC[0:4]}, vmultiplier); 242 vacc${ABC[4:8]} = vqdmulhq_s32(vacc${ABC[4:8]}, vmultiplier); 243 244 vacc${ABC[0:4]} = vrshlq_s32(vacc${ABC[0:4]}, vleft_post_shift); 245 vacc${ABC[4:8]} = vrshlq_s32(vacc${ABC[4:8]}, vleft_post_shift); 246 247 #if XNN_ARCH_ARM64 248 int16x8_t vacc${ABC[0:8]} = vqmovn_high_s32(vqmovn_s32(vacc${ABC[0:4]}), vacc${ABC[4:8]}); 249 #else 250 int16x8_t vacc${ABC[0:8]} = vcombine_s16(vqmovn_s32(vacc${ABC[0:4]}), vqmovn_s32(vacc${ABC[4:8]})); 251 #endif 252 253 $if REQUANTIZATION != "FP32" or ARMV8: 254 vacc${ABC[0:8]} = vqaddq_s16(vacc${ABC[0:8]}, voutput_zero_point); 255 256 ${XINT8X8_T} vout${ABC[0:8]} = ${VQMOVXN_S16}(vacc${ABC[0:8]}); 257 258 $if BATCH_TILE > 8: 259 vout${ABC[0:8]} = ${VMAX_X8}(vout${ABC[0:8]}, ${VGET_LOW_X8}(voutput_min)); 260 vout${ABC[0:8]} = ${VMIN_X8}(vout${ABC[0:8]}, ${VGET_LOW_X8}(voutput_max)); 261 if XNN_LIKELY(n >= (8 * sizeof(${XINT8_T}))) { 262 ${VST1_X8}(output, vout${ABC[0:8]}); output += 8; 263 n -= 8 * sizeof(${XINT8_T}); 264 } else { 265 if (n & (4 * sizeof(${XINT8_T}))) { 266 vst1_lane_u32((void*) output, ${VREINTERPRET_U32_X8}(vout${ABC[0:8]}), 0); output += 4; 267 vout${ABC[0:8]} = ${VEXT_X8}(vout${ABC[0:8]}, vout${ABC[0:8]}, 4); 268 } 269 if (n & (2 * sizeof(${XINT8_T}))) { 270 vst1_lane_u16((void*) output, ${VREINTERPRET_U16_X8}(vout${ABC[0:8]}), 0); output += 2; 271 vout${ABC[0:8]} = ${VEXT_X8}(vout${ABC[0:8]}, vout${ABC[0:8]}, 2); 272 } 273 if (n & (1 * sizeof(${XINT8_T}))) { 274 ${VST1_LANE_X8}(output, vout${ABC[0:8]}, 0); 275 } 276 n = 0; 277 } 278 $else: 279 vout${ABC[0:8]} = ${VMAX_X8}(vout${ABC[0:8]}, voutput_min); 280 vout${ABC[0:8]} = ${VMIN_X8}(vout${ABC[0:8]}, voutput_max); 281 if (n & (4 * sizeof(${XINT8_T}))) { 282 vst1_lane_u32((void*) output, ${VREINTERPRET_U32_X8}(vout${ABC[0:8]}), 0); output += 4; 283 vout${ABC[0:8]} = ${VEXT_X8}(vout${ABC[0:8]}, vout${ABC[0:8]}, 4); 284 } 285 if (n & (2 * sizeof(${XINT8_T}))) { 286 vst1_lane_u16((void*) output, ${VREINTERPRET_U16_X8}(vout${ABC[0:8]}), 0); output += 2; 287 vout${ABC[0:8]} = ${VEXT_X8}(vout${ABC[0:8]}, vout${ABC[0:8]}, 2); 288 } 289 if (n & (1 * sizeof(${XINT8_T}))) { 290 ${VST1_LANE_X8}(output, vout${ABC[0:8]}, 0); 291 } 292 }${" while (n != 0);" if BATCH_TILE > 8 else ""} 293 } 294} 295