1// Copyright 2020 Google LLC 2// 3// This source code is licensed under the BSD-style license found in the 4// LICENSE file in the root directory of this source tree. 5 6$assert REQUANTIZATION == "RNDNU" 7$assert DATATYPE == "QU8" 8$ABC = "0123456789ABCDEFGHIJKLMNOPQRSTUVWXYZ" 9$assert CHANNEL_TILE % 8 == 0 10$assert CHANNEL_TILE >= 8 11$assert KERNEL_TILE >= 2 12#include <assert.h> 13 14#include <arm_neon.h> 15 16#include <xnnpack/dwconv.h> 17 18 19void xnn_qu8_dwconv_minmax_rndnu_ukernel_up${CHANNEL_TILE}x${KERNEL_TILE}__neon_mul8( 20 size_t channels, 21 size_t output_width, 22 const uint8_t** input, 23 const void* weights, 24 uint8_t* output, 25 size_t input_stride, 26 size_t output_increment, 27 size_t input_offset, 28 const uint8_t* zero, 29 const union xnn_qu8_conv_minmax_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS 30{ 31 assert(channels != 0); 32 assert(output_width != 0); 33 34 const uint8x8_t vkernel_zero_point = vld1_dup_u8(params->rndnu_neon.kernel_zero_point); 35 const uint16x8_t vkernel_zero_point16 = vmovl_u8(vkernel_zero_point); 36 const int32x4_t vright_pre_shift = vld1q_dup_s32(¶ms->rndnu_neon.right_pre_shift); 37 const int32x4_t vmultiplier = vld1q_dup_s32(¶ms->rndnu_neon.multiplier); 38 const int32x4_t vright_post_shift = vld1q_dup_s32(¶ms->rndnu_neon.right_post_shift); 39 const int16x8_t voutput_zero_point = vld1q_dup_s16(¶ms->rndnu_neon.output_zero_point); 40 $if CHANNEL_TILE == 8: 41 const uint8x8_t voutput_min = vld1_dup_u8(¶ms->rndnu_neon.output_min); 42 const uint8x8_t voutput_max = vld1_dup_u8(¶ms->rndnu_neon.output_max); 43 $else: 44 const uint8x16_t voutput_min = vld1q_dup_u8(¶ms->rndnu_neon.output_min); 45 const uint8x16_t voutput_max = vld1q_dup_u8(¶ms->rndnu_neon.output_max); 46 do { 47 $for K in range(KERNEL_TILE): 48 const uint8_t* i${K} = input[${K}]; 49 assert(i${K} != NULL); 50 if XNN_UNPREDICTABLE(i${K} != zero) { 51 i${K} = (const uint8_t*) ((uintptr_t) i${K} + input_offset); 52 } 53 input = (const uint8_t**) ((uintptr_t) input + input_stride); 54 55 56 size_t c = channels; 57 const void* w = weights; 58 for (; c >= ${CHANNEL_TILE}; c -= ${CHANNEL_TILE}) { 59 $for C in range(0, CHANNEL_TILE, 4): 60 int32x4_t vacc${ABC[C:C+4]} = vld1q_s32(w); w = (const void*) ((const int32_t*) w + 4); 61 62 63 $for K in range(KERNEL_TILE): 64 $for C in range(0, CHANNEL_TILE, 8): 65 const uint8x8_t vi${K}x${ABC[C:C+8]} = vld1_u8(i${K}); i${K} += 8; 66 const uint8x8_t vk${K}x${ABC[C:C+8]} = vld1_u8(w); w = (const void*) ((const int8_t*) w + 8); 67 68 $for C in range(0, CHANNEL_TILE, 8): 69 $if K == 0: 70 uint16x8_t vprod${ABC[C:C+8]} = vmull_u8(vi${K}x${ABC[C:C+8]}, vk${K}x${ABC[C:C+8]}); 71 $else: 72 vprod${ABC[C:C+8]} = vmull_u8(vi${K}x${ABC[C:C+8]}, vk${K}x${ABC[C:C+8]}); 73 $if KERNEL_TILE == 1: 74 uint16x8_t vsum${ABC[0:8]} = vmovl_u8(vi${K}x${ABC[0:8]}); 75 $if K == 1: 76 uint16x8_t vsum${ABC[C:C+8]} = vaddl_u8(vi0x${ABC[C:C+8]}, vi1x${ABC[C:C+8]}); 77 $elif K > 1: 78 vsum${ABC[C:C+8]} = vaddw_u8(vsum${ABC[C:C+8]}, vi${K}x${ABC[C:C+8]}); 79 80 $for C in range(0, CHANNEL_TILE, 8): 81 vacc${ABC[C:C+4]} = vreinterpretq_s32_u32(vaddw_u16(vreinterpretq_u32_s32(vacc${ABC[C:C+4]}), vget_low_u16(vprod${ABC[C:C+8]}))); 82 vacc${ABC[C+4:C+8]} = vreinterpretq_s32_u32(vaddw_u16(vreinterpretq_u32_s32(vacc${ABC[C+4:C+8]}), vget_high_u16(vprod${ABC[C:C+8]}))); 83 84 $for C in range(0, CHANNEL_TILE, 8): 85 vacc${ABC[C:C+4]} = vreinterpretq_s32_u32(vmlsl_u16(vreinterpretq_u32_s32(vacc${ABC[C:C+4]}), vget_low_u16(vsum${ABC[C:C+8]}), vget_low_u16(vkernel_zero_point16))); 86 vacc${ABC[C+4:C+8]} = vreinterpretq_s32_u32(vmlsl_u16(vreinterpretq_u32_s32(vacc${ABC[C+4:C+8]}), vget_high_u16(vsum${ABC[C:C+8]}), vget_high_u16(vkernel_zero_point16))); 87 88 $for C in range(0, CHANNEL_TILE, 4): 89 vacc${ABC[C:C+4]} = vshlq_s32(vacc${ABC[C:C+4]}, vright_pre_shift); 90 91 $for C in range(0, CHANNEL_TILE, 4): 92 vacc${ABC[C:C+4]} = vqdmulhq_s32(vacc${ABC[C:C+4]}, vmultiplier); 93 94 $for C in range(0, CHANNEL_TILE, 4): 95 vacc${ABC[C:C+4]} = vrshlq_s32(vacc${ABC[C:C+4]}, vright_post_shift); 96 97#if XNN_ARCH_ARM64 98 $for C in range(0, CHANNEL_TILE, 8): 99 const int16x8_t vacc${ABC[C:C+8]} = vqaddq_s16(vqmovn_high_s32(vqmovn_s32(vacc${ABC[C:C+4]}), vacc${ABC[C+4:C+8]}), voutput_zero_point); 100 101 $for C in range(0, CHANNEL_TILE, 16): 102 $if C + 8 < CHANNEL_TILE: 103 uint8x16_t vout${ABC[C:C+16]} = vqmovun_high_s16(vqmovun_s16(vacc${ABC[C:C+8]}), vacc${ABC[C+8:C+16]}); 104 $else: 105 uint8x8_t vout${ABC[C:C+8]} = vqmovun_s16(vacc${ABC[C:C+8]}); 106#else 107 $for C in range(0, CHANNEL_TILE, 8): 108 const int16x8_t vacc${ABC[C:C+8]} = vqaddq_s16(vcombine_s16(vqmovn_s32(vacc${ABC[C:C+4]}), vqmovn_s32(vacc${ABC[C+4:C+8]})), voutput_zero_point); 109 110 $for C in range(0, CHANNEL_TILE, 16): 111 $if C + 8 < CHANNEL_TILE: 112 uint8x16_t vout${ABC[C:C+16]} = vcombine_u8(vqmovun_s16(vacc${ABC[C:C+8]}), vqmovun_s16(vacc${ABC[C+8:C+16]})); 113 $else: 114 uint8x8_t vout${ABC[C:C+8]} = vqmovun_s16(vacc${ABC[C:C+8]}); 115#endif 116 117 $for C in range(0, CHANNEL_TILE, 16): 118 $if C + 8 < CHANNEL_TILE: 119 vout${ABC[C:C+16]} = vmaxq_u8(vout${ABC[C:C+16]}, voutput_min); 120 $else: 121 $if CHANNEL_TILE == 8: 122 vout${ABC[C:C+8]} = vmax_u8(vout${ABC[C:C+8]}, voutput_min); 123 $else: 124 vout${ABC[C:C+8]} = vmax_u8(vout${ABC[C:C+8]}, vget_low_u8(voutput_min)); 125 126 $for C in range(0, CHANNEL_TILE, 16): 127 $if C + 8 < CHANNEL_TILE: 128 vout${ABC[C:C+16]} = vminq_u8(vout${ABC[C:C+16]}, voutput_max); 129 $else: 130 $if CHANNEL_TILE == 8: 131 vout${ABC[C:C+8]} = vmin_u8(vout${ABC[C:C+8]}, voutput_max); 132 $else: 133 vout${ABC[C:C+8]} = vmin_u8(vout${ABC[C:C+8]}, vget_low_u8(voutput_max)); 134 135 $for C in range(0, CHANNEL_TILE, 16): 136 $if C + 8 < CHANNEL_TILE: 137 vst1q_u8(output, vout${ABC[C:C+16]}); output += 16; 138 $else: 139 vst1_u8(output, vout${ABC[C:C+8]}); output += 8; 140 } 141 if XNN_UNLIKELY(c != 0) { 142 $if CHANNEL_TILE > 8: 143 const uint8_t* k = (const uint8_t*) ((const int32_t*) w + ${CHANNEL_TILE}); 144 ${"do " if CHANNEL_TILE > 8 else ""}{ 145 int32x4_t vacc${ABC[0:4]} = vld1q_s32(w); w = (const void*) ((const int32_t*) w + 4); 146 int32x4_t vacc${ABC[4:8]} = vld1q_s32(w); w = (const void*) ((const int32_t*) w + 4); 147 148 $for K in range(KERNEL_TILE): 149 $if CHANNEL_TILE > 8: 150 const int16x8_t vi${K}x${ABC[0:8]} = vreinterpretq_s16_u16(vmovl_u8(vld1_u8(i${K}))); i${K} += 8; 151 $else: 152 const int16x8_t vi${K}x${ABC[0:8]} = vreinterpretq_s16_u16(vmovl_u8(vld1_u8(i${K}))); 153 $if CHANNEL_TILE > 8: 154 $if K == 0: 155 const int16x8_t vk${K}x${ABC[0:8]} = vreinterpretq_s16_u16(vsubl_u8(vld1_u8(k), vkernel_zero_point)); k += 8; 156 $else: 157 const int16x8_t vk${K}x${ABC[0:8]} = vreinterpretq_s16_u16(vsubl_u8(vld1_u8((const void*) (k + ${K * CHANNEL_TILE - 8})), vkernel_zero_point)); 158 $else: 159 $if K == 0: 160 const int16x8_t vk${K}x${ABC[0:8]} = vreinterpretq_s16_u16(vsubl_u8(vld1_u8(w), vkernel_zero_point)); 161 $else: 162 const int16x8_t vk${K}x${ABC[0:8]} = vreinterpretq_s16_u16(vsubl_u8(vld1_u8((const void*) ((const uint8_t*) w + ${K * CHANNEL_TILE})), vkernel_zero_point)); 163 164 vacc${ABC[0:4]} = vmlal_s16(vacc${ABC[0:4]}, vget_low_s16(vi${K}x${ABC[0:8]}), vget_low_s16(vk${K}x${ABC[0:8]})); 165 vacc${ABC[4:8]} = vmlal_s16(vacc${ABC[4:8]}, vget_high_s16(vi${K}x${ABC[0:8]}), vget_high_s16(vk${K}x${ABC[0:8]})); 166 167 vacc${ABC[0:4]} = vrshlq_s32(vacc${ABC[0:4]}, vright_pre_shift); 168 vacc${ABC[4:8]} = vrshlq_s32(vacc${ABC[4:8]}, vright_pre_shift); 169 170 vacc${ABC[0:4]} = vqdmulhq_s32(vacc${ABC[0:4]}, vmultiplier); 171 vacc${ABC[4:8]} = vqdmulhq_s32(vacc${ABC[4:8]}, vmultiplier); 172 173 vacc${ABC[0:4]} = vrshlq_s32(vacc${ABC[0:4]}, vright_post_shift); 174 vacc${ABC[4:8]} = vrshlq_s32(vacc${ABC[4:8]}, vright_post_shift); 175 176#if XNN_ARCH_ARM64 177 const int16x8_t vacc${ABC[0:8]} = vqaddq_s16(vqmovn_high_s32(vqmovn_s32(vacc${ABC[0:4]}), vacc${ABC[4:8]}), voutput_zero_point); 178 uint8x8_t vout${ABC[0:8]} = vqmovun_s16(vacc${ABC[0:8]}); 179#else 180 const int16x8_t vacc${ABC[0:8]} = vqaddq_s16(vcombine_s16(vqmovn_s32(vacc${ABC[0:4]}), vqmovn_s32(vacc${ABC[4:8]})), voutput_zero_point); 181 uint8x8_t vout${ABC[0:8]} = vqmovun_s16(vacc${ABC[0:8]}); 182#endif 183 184 $if CHANNEL_TILE == 8: 185 vout${ABC[0:8]} = vmax_u8(vout${ABC[0:8]}, voutput_min); 186 vout${ABC[0:8]} = vmin_u8(vout${ABC[0:8]}, voutput_max); 187 $else: 188 vout${ABC[0:8]} = vmax_u8(vout${ABC[0:8]}, vget_low_u8(voutput_min)); 189 vout${ABC[0:8]} = vmin_u8(vout${ABC[0:8]}, vget_low_u8(voutput_max)); 190 191 $if CHANNEL_TILE > 8: 192 if XNN_LIKELY(c >= 8) { 193 vst1_u8(output, vout${ABC[0:8]}); output += 8; 194 c -= 8; 195 } else { 196 if (c & 4) { 197 vst1_lane_u32((void*) output, vreinterpret_u32_u8(vout${ABC[0:8]}), 0); output += 4; 198 vout${ABC[0:8]} = vext_u8(vout${ABC[0:8]}, vout${ABC[0:8]}, 4); 199 } 200 if (c & 2) { 201 vst1_lane_u16((void*) output, vreinterpret_u16_u8(vout${ABC[0:8]}), 0); output += 2; 202 vout${ABC[0:8]} = vext_u8(vout${ABC[0:8]}, vout${ABC[0:8]}, 2); 203 } 204 if (c & 1) { 205 vst1_lane_u8(output, vout${ABC[0:8]}, 0); output += 1; 206 } 207 c = 0; 208 } 209 $else: 210 if (c & 4) { 211 vst1_lane_u32((void*) output, vreinterpret_u32_u8(vout${ABC[0:8]}), 0); output += 4; 212 vout${ABC[0:8]} = vext_u8(vout${ABC[0:8]}, vout${ABC[0:8]}, 4); 213 } 214 if (c & 2) { 215 vst1_lane_u16((void*) output, vreinterpret_u16_u8(vout${ABC[0:8]}), 0); output += 2; 216 vout${ABC[0:8]} = vext_u8(vout${ABC[0:8]}, vout${ABC[0:8]}, 2); 217 } 218 if (c & 1) { 219 vst1_lane_u8(output, vout${ABC[0:8]}, 0); output += 1; 220 } 221 }${" while (c != 0);" if CHANNEL_TILE > 8 else ""} 222 } 223 224 output = (uint8_t*) ((uintptr_t) output + output_increment); 225 } while (--output_width != 0); 226} 227