1// Copyright 2021 Google LLC 2// 3// This source code is licensed under the BSD-style license found in the 4// LICENSE file in the root directory of this source tree. 5 6$ABC = "0123456789ABCDEFGHIJKLMNOPQRSTUVWXYZ" 7$assert NR % 8 == 0 8$assert 8 <= NR <= 16 9$assert REQUANTIZATION in ["FP32", "RNDNU"] 10$assert not CHANNELWISE or REQUANTIZATION == "FP32" 11#include <assert.h> 12 13#include <arm_neon.h> 14 15#include <xnnpack/gemm.h> 16$if REQUANTIZATION == "FP32" and ARMV8: 17 #include <xnnpack/intrinsics-polyfill.h> 18#include <xnnpack/math.h> 19 20 21$DATATYPE = "qc8" if CHANNELWISE else "qs8" 22$PARAMS_STRUCT = REQUANTIZATION.lower() + "_" + ("neonv8" if REQUANTIZATION == "FP32" and ARMV8 else "neon") 23$PARAMS_UNION = "xnn_%s_conv_minmax_params" % DATATYPE.lower() 24$ISA = "neonv8" if ARMV8 else "neon" 25void xnn_${DATATYPE}_igemm_minmax_${REQUANTIZATION.lower()}_ukernel_${MR}x${NR}c2s4__${ISA}_${"mlal" if MLA else "mull"}( 26 size_t mr, 27 size_t nc, 28 size_t kc, 29 size_t ks, 30 const int8_t** restrict a, 31 const void* restrict w, 32 int8_t* restrict c, 33 size_t cm_stride, 34 size_t cn_stride, 35 size_t a_offset, 36 const int8_t* zero, 37 const union ${PARAMS_UNION} params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS 38{ 39 assert(mr != 0); 40 assert(mr <= ${MR}); 41 assert(nc != 0); 42 assert(kc != 0); 43 assert(ks != 0); 44 assert(ks % (${MR} * sizeof(void*)) == 0); 45 assert(a_offset % sizeof(int8_t) == 0); 46 assert(a != NULL); 47 assert(w != NULL); 48 assert(c != NULL); 49 50 int8_t* c0 = c; 51 $for M in range(1, MR): 52 int8_t* c${M} = (int8_t*) ((uintptr_t) c${M-1} + cm_stride); 53 $if M % 2 == 0: 54 if XNN_UNPREDICTABLE(mr <= ${M}) { 55 c${M} = c${M-1}; 56 } 57 $elif M + 1 == MR: 58 if XNN_UNPREDICTABLE(mr != ${M+1}) { 59 c${M} = c${M-1}; 60 } 61 $else: 62 if XNN_UNPREDICTABLE(mr < ${M+1}) { 63 c${M} = c${M-1}; 64 } 65 66 kc = round_up_po2(kc, 8 * sizeof(int8_t)); 67 do { 68 $for N in range(0, NR, 4): 69 int32x4_t vacc0x${ABC[N:N+4]} = vld1q_s32(w); w = (const int32_t*) w + 4; 70 $for M in range(1, MR): 71 $for N in range(0, NR, 4): 72 int32x4_t vacc${M}x${ABC[N:N+4]} = vacc0x${ABC[N:N+4]}; 73 74 size_t p = ks; 75 do { 76 $for M in range(MR): 77 const int8_t* restrict a${M} = a[${M}]; 78 if XNN_UNPREDICTABLE(a${M} != zero) { 79 a${M} = (const int8_t*) ((uintptr_t) a${M} + a_offset); 80 } 81 a += ${MR}; 82 83 size_t k = kc; 84 $if MLA: 85 while (k >= 16 * sizeof(int8_t)) { 86 $for M in range(MR): 87 int8x8_t va${M}x0 = vld1_s8(a${M}); a${M} += 8; 88 int8x8_t va${M}x1 = vld1_s8(a${M}); a${M} += 8; 89 90 $for K in range(4): 91 $for N in range(0, NR, 4): 92 const int8x8_t vb${ABC[N:N+4]}c${K}x0 = vld1_s8(w); w = (const int8_t*) w + 8; 93 94 $for K in range(4): 95 $for N in range(0, NR, 4): 96 $for M in range(MR): 97 int16x8_t vprod${M}x${ABC[N:N+4]}c${K} = vmull_s8(vb${ABC[N:N+4]}c${K}x0, va${M}x0); 98 const int8x8_t vb${ABC[N:N+4]}c${K}x1 = vld1_s8(w); w = (const int8_t*) w + 8; 99 $for M in range(MR): 100 vprod${M}x${ABC[N:N+4]}c${K} = vmlal_s8(vprod${M}x${ABC[N:N+4]}c${K}, vb${ABC[N:N+4]}c${K}x1, va${M}x1); 101 $for M in range(MR): 102 vacc${M}x${ABC[N:N+4]} = vpadalq_s16(vacc${M}x${ABC[N:N+4]}, vprod${M}x${ABC[N:N+4]}c${K}); 103 $if K + 1 != 4: 104 $for M in range(MR): 105 va${M}x0 = vext_s8(va${M}x0, va${M}x0, 2); 106 va${M}x1 = vext_s8(va${M}x1, va${M}x1, 2); 107 108 k -= 16 * sizeof(int8_t); 109 } 110 ${"if (k != 0)" if MLA else "do"} { 111 $for M in range(MR): 112 int8x8_t va${M}x0 = vld1_s8(a${M}); a${M} += 8; 113 114 $for K in range(4): 115 $for N in range(0, NR, 4): 116 const int8x8_t vb${ABC[N:N+4]}c${K}x0 = vld1_s8(w); w = (const int8_t*) w + 8; 117 118 $for K in range(4): 119 $for N in range(0, NR, 4): 120 $for M in range(MR): 121 int16x8_t vprod${M}x${ABC[N:N+4]}c${K} = vmull_s8(vb${ABC[N:N+4]}c${K}x0, va${M}x0); 122 $for M in range(MR): 123 vacc${M}x${ABC[N:N+4]} = vpadalq_s16(vacc${M}x${ABC[N:N+4]}, vprod${M}x${ABC[N:N+4]}c${K}); 124 $if K + 1 != 4: 125 $for M in range(MR): 126 va${M}x0 = vext_s8(va${M}x0, va${M}x0, 2); 127 128 $if not MLA: 129 k -= 8 * sizeof(int8_t); 130 }${"" if MLA else " while (k != 0);"} 131 132 p -= ${MR} * sizeof(void*); 133 } while (p != 0); 134 135 $if REQUANTIZATION == "RNDNU": 136 const int32x4_t vright_pre_shift = vld1q_dup_s32(¶ms->${PARAMS_STRUCT}.right_pre_shift); 137 const int32x4_t vmultiplier = vld1q_dup_s32(¶ms->${PARAMS_STRUCT}.multiplier); 138 const int32x4_t vright_post_shift = vld1q_dup_s32(¶ms->${PARAMS_STRUCT}.right_post_shift); 139 140 $for M in range(MR): 141 $for N in range(0, NR, 4): 142 vacc${M}x${ABC[N:N+4]} = vqshlq_s32(vacc${M}x${ABC[N:N+4]}, vright_pre_shift); 143 144 $for M in range(MR): 145 $for N in range(0, NR, 4): 146 vacc${M}x${ABC[N:N+4]} = vqdmulhq_s32(vacc${M}x${ABC[N:N+4]}, vmultiplier); 147 148 $for M in range(MR): 149 $for N in range(0, NR, 4): 150 vacc${M}x${ABC[N:N+4]} = vrshlq_s32(vacc${M}x${ABC[N:N+4]}, vright_post_shift); 151 $elif REQUANTIZATION == "FP32": 152 $for M in range(MR): 153 $for N in range(0, NR, 4): 154 float32x4_t vfpacc${M}x${ABC[N:N+4]} = vcvtq_f32_s32(vacc${M}x${ABC[N:N+4]}); 155 156 $if CHANNELWISE: 157 $for N in range(0, NR, 4): 158 const float32x4_t vscale${ABC[N:N+4]} = vld1q_f32((const float*) w); w = (const void*) ((const float*) w + 4); 159 $for M in range(MR): 160 vfpacc${M}x${ABC[N:N+4]} = vmulq_f32(vfpacc${M}x${ABC[N:N+4]}, vscale${ABC[N:N+4]}); 161 $else: 162 const float32x4_t vscale = vld1q_dup_f32(¶ms->${PARAMS_STRUCT}.scale); 163 $for M in range(MR): 164 $for N in range(0, NR, 4): 165 vfpacc${M}x${ABC[N:N+4]} = vmulq_f32(vfpacc${M}x${ABC[N:N+4]}, vscale); 166 167 $if ARMV8: 168 $for M in range(MR): 169 $for N in range(0, NR, 4): 170 vacc${M}x${ABC[N:N+4]} = vcvtnq_s32_f32(vfpacc${M}x${ABC[N:N+4]}); 171 $else: 172 const float32x4_t vmagic_bias = vld1q_dup_f32(¶ms->${PARAMS_STRUCT}.magic_bias); 173 $for M in range(MR): 174 $for N in range(0, NR, 4): 175 vacc${M}x${ABC[N:N+4]} = vreinterpretq_s32_f32(vaddq_f32(vfpacc${M}x${ABC[N:N+4]}, vmagic_bias)); 176 177 const int32x4_t vmagic_bias_less_output_zero_point = vld1q_dup_s32(¶ms->${PARAMS_STRUCT}.magic_bias_less_output_zero_point); 178 $for M in range(MR): 179 $for N in range(0, NR, 4): 180 vacc${M}x${ABC[N:N+4]} = vqsubq_s32(vacc${M}x${ABC[N:N+4]}, vmagic_bias_less_output_zero_point); 181 182 $if REQUANTIZATION != "FP32" or ARMV8: 183 const int16x8_t voutput_zero_point = vld1q_dup_s16(¶ms->${PARAMS_STRUCT}.output_zero_point); 184#if XNN_ARCH_ARM64 185 $for M in range(MR): 186 $for N in range(0, NR, 8): 187 int16x8_t vacc${M}x${ABC[N:N+8]} = vqmovn_high_s32(vqmovn_s32(vacc${M}x${ABC[N:N+4]}), vacc${M}x${ABC[N+4:N+8]}); 188 189 $if REQUANTIZATION != "FP32" or ARMV8: 190 $for M in range(MR): 191 $for N in range(0, NR, 8): 192 vacc${M}x${ABC[N:N+8]} = vqaddq_s16(vacc${M}x${ABC[N:N+8]}, voutput_zero_point); 193 194 $for M in range(MR): 195 $for N in range(0, NR, 16): 196 $if N + 8 < NR: 197 int8x16_t vout${M}x${ABC[N:N+16]} = vqmovn_high_s16(vqmovn_s16(vacc${M}x${ABC[N:N+8]}), vacc${M}x${ABC[N+8:N+16]}); 198 $elif M % 2 == 1: 199 int8x16_t vout${M-1}x${ABC[N:N+8]}_${M}x${ABC[N:N+8]} = vqmovn_high_s16(vqmovn_s16(vacc${M-1}x${ABC[N:N+8]}), vacc${M}x${ABC[N:N+8]}); 200 $elif M + 1 == MR: 201 int8x8_t vout${M}x${ABC[N:N+8]} = vqmovn_s16(vacc${M}x${ABC[N:N+8]}); 202#else 203 $for M in range(MR): 204 $for N in range(0, NR, 8): 205 int16x8_t vacc${M}x${ABC[N:N+8]} = vcombine_s16(vqmovn_s32(vacc${M}x${ABC[N:N+4]}), vqmovn_s32(vacc${M}x${ABC[N+4:N+8]})); 206 207 $if REQUANTIZATION != "FP32" or ARMV8: 208 $for M in range(MR): 209 $for N in range(0, NR, 8): 210 vacc${M}x${ABC[N:N+8]} = vqaddq_s16(vacc${M}x${ABC[N:N+8]}, voutput_zero_point); 211 212 $for M in range(MR): 213 $for N in range(0, NR, 16): 214 $if N + 8 < NR: 215 int8x16_t vout${M}x${ABC[N:N+16]} = vcombine_s8(vqmovn_s16(vacc${M}x${ABC[N:N+8]}), vqmovn_s16(vacc${M}x${ABC[N+8:N+16]})); 216 $elif M % 2 == 1: 217 int8x16_t vout${M-1}x${ABC[N:N+8]}_${M}x${ABC[N:N+8]} = vcombine_s8(vqmovn_s16(vacc${M-1}x${ABC[N:N+8]}), vqmovn_s16(vacc${M}x${ABC[N:N+8]})); 218 $elif M + 1 == MR: 219 int8x8_t vout${M}x${ABC[N:N+8]} = vqmovn_s16(vacc${M}x${ABC[N:N+8]}); 220#endif 221 222 $if NR == 8 and MR == 1: 223 const int8x8_t voutput_min = vld1_dup_s8(¶ms->${PARAMS_STRUCT}.output_min); 224 $else: 225 const int8x16_t voutput_min = vld1q_dup_s8(¶ms->${PARAMS_STRUCT}.output_min); 226 $for M in range(MR): 227 $for N in range(0, NR, 16): 228 $if N + 8 < NR: 229 vout${M}x${ABC[N:N+16]} = vmaxq_s8(vout${M}x${ABC[N:N+16]}, voutput_min); 230 $elif M % 2 == 1: 231 vout${M-1}x${ABC[N:N+8]}_${M}x${ABC[N:N+8]} = vmaxq_s8(vout${M-1}x${ABC[N:N+8]}_${M}x${ABC[N:N+8]}, voutput_min); 232 $elif M + 1 == MR: 233 $if NR == 8 and MR == 1: 234 vout${M}x${ABC[N:N+8]} = vmax_s8(vout${M}x${ABC[N:N+8]}, voutput_min); 235 $else: 236 vout${M}x${ABC[N:N+8]} = vmax_s8(vout${M}x${ABC[N:N+8]}, vget_low_s8(voutput_min)); 237 238 $if NR == 8 and MR == 1: 239 const int8x8_t voutput_max = vld1_dup_s8(¶ms->${PARAMS_STRUCT}.output_max); 240 $else: 241 const int8x16_t voutput_max = vld1q_dup_s8(¶ms->${PARAMS_STRUCT}.output_max); 242 $for M in range(MR): 243 $for N in range(0, NR, 16): 244 $if N + 8 < NR: 245 vout${M}x${ABC[N:N+16]} = vminq_s8(vout${M}x${ABC[N:N+16]}, voutput_max); 246 $elif M % 2 == 1: 247 vout${M-1}x${ABC[N:N+8]}_${M}x${ABC[N:N+8]} = vminq_s8(vout${M-1}x${ABC[N:N+8]}_${M}x${ABC[N:N+8]}, voutput_max); 248 $elif M + 1 == MR: 249 $if NR == 8 and MR == 1: 250 vout${M}x${ABC[N:N+8]} = vmin_s8(vout${M}x${ABC[N:N+8]}, voutput_max); 251 $else: 252 vout${M}x${ABC[N:N+8]} = vmin_s8(vout${M}x${ABC[N:N+8]}, vget_low_s8(voutput_max)); 253 254 if (nc >= ${NR}) { 255 $for M in reversed(range(MR)): 256 $for N in range(0, NR, 16): 257 $if N + 8 < NR: 258 vst1q_s8(c${M} + ${N}, vout${M}x${ABC[N:N+16]}); 259 $elif M % 2 == 1: 260 vst1_s8(c${M} + ${N}, vget_high_s8(vout${M-1}x${ABC[N:N+8]}_${M}x${ABC[N:N+8]})); 261 vst1_s8(c${M-1} + ${N}, vget_low_s8(vout${M-1}x${ABC[N:N+8]}_${M}x${ABC[N:N+8]})); 262 $elif M + 1 == MR: 263 vst1_s8(c${M} + ${N}, vout${M}x${ABC[N:N+8]}); 264 265 $for M in reversed(range(MR)): 266 c${M} = (int8_t*) ((uintptr_t) c${M} + cn_stride); 267 268 a = (const int8_t**restrict) ((uintptr_t) a - ks); 269 270 nc -= ${NR}; 271 } else { 272 $if NR == 16: 273 $for M in reversed(range(MR)): 274 $if M % 2 == 1: 275 int8x16_t vout${M-1}x01234567_${M}x01234567 = vcombine_s8(vget_low_s8(vout${M-1}x0123456789ABCDEF), vget_low_s8(vout${M}x0123456789ABCDEF)); 276 $elif M + 1 == MR: 277 int8x8_t vout${M}x01234567 = vget_low_s8(vout${M}x0123456789ABCDEF); 278 if (nc & 8) { 279 $for M in reversed(range(MR)): 280 $if M % 2 == 1: 281 vst1_s8(c${M}, vget_high_s8(vout${M-1}x01234567_${M}x01234567)); c${M} += 8; 282 vst1_s8(c${M-1}, vget_low_s8(vout${M-1}x01234567_${M}x01234567)); c${M-1} += 8; 283 $elif M + 1 == MR: 284 vst1_s8(c${M}, vout${M}x01234567); c${M} += 8; 285 $for M in reversed(range(MR)): 286 $if M % 2 == 1: 287 vout${M-1}x01234567_${M}x01234567 = vcombine_s8(vget_high_s8(vout${M-1}x0123456789ABCDEF), vget_high_s8(vout${M}x0123456789ABCDEF)); 288 $elif M + 1 == MR: 289 vout${M}x01234567 = vget_high_s8(vout${M}x0123456789ABCDEF); 290 } 291 if (nc & 4) { 292 $for M in reversed(range(MR)): 293 $if M % 2 == 1: 294 vst1q_lane_u32((void*) c${M}, vreinterpretq_u32_s8(vout${M-1}x01234567_${M}x01234567), 2); c${M} += 4; 295 vst1q_lane_u32((void*) c${M-1}, vreinterpretq_u32_s8(vout${M-1}x01234567_${M}x01234567), 0); c${M-1} += 4; 296 $elif M + 1 == MR: 297 vst1_lane_u32((void*) c${M}, vreinterpret_u32_s8(vout${M}x01234567), 0); c${M} += 4; 298 $for M in reversed(range(MR)): 299 $if M % 2 == 1: 300 vout${M-1}x01234567_${M}x01234567 = vextq_s8(vout${M-1}x01234567_${M}x01234567, vout${M-1}x01234567_${M}x01234567, 4); 301 $elif M + 1 == MR: 302 vout${M}x01234567 = vext_s8(vout${M}x01234567, vout${M}x01234567, 4); 303 } 304 if (nc & 2) { 305 $for M in reversed(range(MR)): 306 $if M % 2 == 1: 307 vst1q_lane_u16((void*) c${M}, vreinterpretq_u16_s8(vout${M-1}x01234567_${M}x01234567), 4); c${M} += 2; 308 vst1q_lane_u16((void*) c${M-1}, vreinterpretq_u16_s8(vout${M-1}x01234567_${M}x01234567), 0); c${M-1} += 2; 309 $elif M + 1 == MR: 310 vst1_lane_u16((void*) c${M}, vreinterpret_u16_s8(vout${M}x01234567), 0); c${M} += 2; 311 $for M in reversed(range(MR)): 312 $if M % 2 == 1: 313 vout${M-1}x01234567_${M}x01234567 = vextq_s8(vout${M-1}x01234567_${M}x01234567, vout${M-1}x01234567_${M}x01234567, 2); 314 $elif M + 1 == MR: 315 vout${M}x01234567 = vext_s8(vout${M}x01234567, vout${M}x01234567, 2); 316 } 317 if (nc & 1) { 318 $for M in reversed(range(MR)): 319 $if M % 2 == 1: 320 vst1q_lane_s8(c${M}, vout${M-1}x01234567_${M}x01234567, 8); 321 vst1q_lane_s8(c${M-1}, vout${M-1}x01234567_${M}x01234567, 0); 322 $elif M + 1 == MR: 323 vst1_lane_s8(c${M}, vout${M}x01234567, 0); 324 } 325 326 nc = 0; 327 } 328 } while (nc != 0); 329} 330