1// Copyright 2021 Google LLC 2// 3// This source code is licensed under the BSD-style license found in the 4// LICENSE file in the root directory of this source tree. 5 6$assert DATATYPE in ["S8", "U8"] 7$assert CHANNEL_TILE % 8 == 0 8$assert CHANNEL_TILE >= 8 9$assert PIXEL_TILE == 1 10$ABC = "0123456789ABCDEFGHIJKLMNOPQRSTUVWXYZ" 11#include <assert.h> 12 13#include <wasm_simd128.h> 14 15#include <xnnpack/common.h> 16#include <xnnpack/ibilinear.h> 17 18 19$XINT8_T = {"S8": "int8_t", "U8": "uint8_t"}[DATATYPE] 20$WASM_X16X8_LOAD_8X8 = {"S8": "wasm_i16x8_load8x8", "U8": "wasm_u16x8_load8x8"}[DATATYPE] 21$WASM_X32X4_SHR = {"S8": "wasm_i32x4_shr", "U8": "wasm_u32x4_shr"}[DATATYPE] 22$WASM_X8X16_NARROW_I16X8 = {"S8": "wasm_i8x16_narrow_i16x8", "U8": "wasm_u8x16_narrow_i16x8"}[DATATYPE] 23void xnn_${DATATYPE.lower()}_ibilinear_ukernel__wasmsimd_mul32_c${CHANNEL_TILE}${"" if PIXEL_TILE == 1 else "x%d" % PIXEL_TILE}( 24 size_t output_pixels, 25 size_t channels, 26 const ${XINT8_T}**restrict input, 27 size_t input_offset, 28 const int16_t*restrict weights, 29 ${XINT8_T}*restrict output, 30 size_t output_increment) XNN_OOB_READS 31{ 32 assert(output_pixels != 0); 33 assert(channels != 0); 34 35 do { 36 const ${XINT8_T}* i0 = (const ${XINT8_T}*) ((uintptr_t) input[0] + input_offset); 37 const ${XINT8_T}* i1 = (const ${XINT8_T}*) ((uintptr_t) input[1] + input_offset); 38 const ${XINT8_T}* i2 = (const ${XINT8_T}*) ((uintptr_t) input[2] + input_offset); 39 const ${XINT8_T}* i3 = (const ${XINT8_T}*) ((uintptr_t) input[3] + input_offset); 40 input += 4; 41 42 const v128_t valphah = wasm_i32x4_extend_low_i16x8(wasm_v128_load16_splat(weights)); 43 const v128_t valphav = wasm_i32x4_extend_low_i16x8(wasm_v128_load16_splat(weights + 1)); 44 weights += 2; 45 46 const v128_t vrounding = wasm_i32x4_const_splat(0x00200000); 47 48 size_t c = channels; 49 $if CHANNEL_TILE > 8: 50 for (; c >= ${CHANNEL_TILE} * sizeof(${XINT8_T}); c -= ${CHANNEL_TILE} * sizeof(${XINT8_T})) { 51 const v128_t vtl${ABC[0:8]} = ${WASM_X16X8_LOAD_8X8}(i0); 52 const v128_t vtr${ABC[0:8]} = ${WASM_X16X8_LOAD_8X8}(i1); 53 const v128_t vbl${ABC[0:8]} = ${WASM_X16X8_LOAD_8X8}(i2); 54 const v128_t vbr${ABC[0:8]} = ${WASM_X16X8_LOAD_8X8}(i3); 55 $for C in range(8, CHANNEL_TILE, 8): 56 const v128_t vtl${ABC[C:C+8]} = ${WASM_X16X8_LOAD_8X8}(i0 + ${C}); 57 const v128_t vtr${ABC[C:C+8]} = ${WASM_X16X8_LOAD_8X8}(i1 + ${C}); 58 const v128_t vbl${ABC[C:C+8]} = ${WASM_X16X8_LOAD_8X8}(i2 + ${C}); 59 const v128_t vbr${ABC[C:C+8]} = ${WASM_X16X8_LOAD_8X8}(i3 + ${C}); 60 i0 += ${CHANNEL_TILE}; 61 i1 += ${CHANNEL_TILE}; 62 i2 += ${CHANNEL_TILE}; 63 i3 += ${CHANNEL_TILE}; 64 65 $for C in range(0, CHANNEL_TILE, 8): 66 const v128_t vtd${ABC[C:C+8]} = wasm_i16x8_sub(vtr${ABC[C:C+8]}, vtl${ABC[C:C+8]}); 67 const v128_t vbd${ABC[C:C+8]} = wasm_i16x8_sub(vbr${ABC[C:C+8]}, vbl${ABC[C:C+8]}); 68 const v128_t vdl${ABC[C:C+8]} = wasm_i16x8_sub(vbl${ABC[C:C+8]}, vtl${ABC[C:C+8]}); 69 const v128_t vdd${ABC[C:C+8]} = wasm_i16x8_sub(vbd${ABC[C:C+8]}, vtd${ABC[C:C+8]}); 70 71 $for C in range(0, CHANNEL_TILE, 8): 72 const v128_t vt${ABC[C:C+4]} = wasm_i32x4_add(wasm_i32x4_shl(wasm_i32x4_extend_low_i16x8(vtl${ABC[C:C+8]}), 11), wasm_i32x4_mul(wasm_i32x4_extend_low_i16x8(vtd${ABC[C:C+8]}), valphah)); 73 const v128_t vt${ABC[C+4:C+8]} = wasm_i32x4_add(wasm_i32x4_shl(wasm_i32x4_extend_high_i16x8(vtl${ABC[C:C+8]}), 11), wasm_i32x4_mul(wasm_i32x4_extend_high_i16x8(vtd${ABC[C:C+8]}), valphah)); 74 const v128_t vd${ABC[C:C+4]} = wasm_i32x4_add(wasm_i32x4_shl(wasm_i32x4_extend_low_i16x8(vdl${ABC[C:C+8]}), 11), wasm_i32x4_mul(wasm_i32x4_extend_low_i16x8(vdd${ABC[C:C+8]}), valphah)); 75 const v128_t vd${ABC[C+4:C+8]} = wasm_i32x4_add(wasm_i32x4_shl(wasm_i32x4_extend_high_i16x8(vdl${ABC[C:C+8]}), 11), wasm_i32x4_mul(wasm_i32x4_extend_high_i16x8(vdd${ABC[C:C+8]}), valphah)); 76 77 $for C in range(0, CHANNEL_TILE, 4): 78 v128_t vacc${ABC[C:C+4]} = wasm_i32x4_mul(vd${ABC[C:C+4]}, valphav); 79 80 $for C in range(0, CHANNEL_TILE, 4): 81 vacc${ABC[C:C+4]} = wasm_i32x4_add(wasm_i32x4_shl(vt${ABC[C:C+4]}, 11), vacc${ABC[C:C+4]}); 82 83 $for C in range(0, CHANNEL_TILE, 4): 84 vacc${ABC[C:C+4]} = ${WASM_X32X4_SHR}(wasm_i16x8_add(vacc${ABC[C:C+4]}, vrounding), 22); 85 86 $for C in range(0, CHANNEL_TILE, 8): 87 const v128_t vacc${ABC[C:C+8]} = wasm_i16x8_narrow_i32x4(vacc${ABC[C:C+4]}, vacc${ABC[C+4:C+8]}); 88 89 $for C in range(0, CHANNEL_TILE, 16): 90 $if C + 8 < CHANNEL_TILE: 91 const v128_t vo${ABC[C:C+16]} = ${WASM_X8X16_NARROW_I16X8}(vacc${ABC[C:C+8]}, vacc${ABC[C+8:C+16]}); 92 $else: 93 const v128_t vo${ABC[C:C+8]} = ${WASM_X8X16_NARROW_I16X8}(vacc${ABC[C:C+8]}, vacc${ABC[C:C+8]}); 94 95 wasm_v128_store(output, vo${ABC[0:16]}); 96 $for C in range(16, CHANNEL_TILE, 16): 97 $if C + 8 < CHANNEL_TILE: 98 wasm_v128_store(output + ${C}, vo${ABC[C:C+16]}); 99 $else: 100 *((double*) (output + ${C})) = wasm_f64x2_extract_lane(vo${ABC[C:C+8]}, 0); 101 output += ${CHANNEL_TILE}; 102 } 103 for (; c >= 8 * sizeof(${XINT8_T}); c -= 8 * sizeof(${XINT8_T})) { 104 const v128_t vtl01234567 = ${WASM_X16X8_LOAD_8X8}(i0); 105 i0 += 8; 106 const v128_t vtr01234567 = ${WASM_X16X8_LOAD_8X8}(i1); 107 i1 += 8; 108 const v128_t vbl01234567 = ${WASM_X16X8_LOAD_8X8}(i2); 109 i2 += 8; 110 const v128_t vbr01234567 = ${WASM_X16X8_LOAD_8X8}(i3); 111 i3 += 8; 112 113 const v128_t vtd01234567 = wasm_i16x8_sub(vtr01234567, vtl01234567); 114 const v128_t vbd01234567 = wasm_i16x8_sub(vbr01234567, vbl01234567); 115 const v128_t vdl01234567 = wasm_i16x8_sub(vbl01234567, vtl01234567); 116 const v128_t vdd01234567 = wasm_i16x8_sub(vbd01234567, vtd01234567); 117 118 const v128_t vt0123 = wasm_i32x4_add(wasm_i32x4_shl(wasm_i32x4_extend_low_i16x8(vtl01234567), 11), wasm_i32x4_mul(wasm_i32x4_extend_low_i16x8(vtd01234567), valphah)); 119 const v128_t vt4567 = wasm_i32x4_add(wasm_i32x4_shl(wasm_i32x4_extend_high_i16x8(vtl01234567), 11), wasm_i32x4_mul(wasm_i32x4_extend_high_i16x8(vtd01234567), valphah)); 120 const v128_t vd0123 = wasm_i32x4_add(wasm_i32x4_shl(wasm_i32x4_extend_low_i16x8(vdl01234567), 11), wasm_i32x4_mul(wasm_i32x4_extend_low_i16x8(vdd01234567), valphah)); 121 const v128_t vd4567 = wasm_i32x4_add(wasm_i32x4_shl(wasm_i32x4_extend_high_i16x8(vdl01234567), 11), wasm_i32x4_mul(wasm_i32x4_extend_high_i16x8(vdd01234567), valphah)); 122 123 v128_t vacc0123 = wasm_i32x4_mul(vd0123, valphav); 124 v128_t vacc4567 = wasm_i32x4_mul(vd4567, valphav); 125 126 vacc0123 = wasm_i32x4_add(wasm_i32x4_shl(vt0123, 11), vacc0123); 127 vacc4567 = wasm_i32x4_add(wasm_i32x4_shl(vt4567, 11), vacc4567); 128 129 vacc0123 = ${WASM_X32X4_SHR}(wasm_i16x8_add(vacc0123, vrounding), 22); 130 vacc4567 = ${WASM_X32X4_SHR}(wasm_i16x8_add(vacc4567, vrounding), 22); 131 132 const v128_t vacc01234567 = wasm_i16x8_narrow_i32x4(vacc0123, vacc4567); 133 134 const v128_t vo01234567 = ${WASM_X8X16_NARROW_I16X8}(vacc01234567, vacc01234567); 135 136 *((double*) output) = wasm_f64x2_extract_lane(vo01234567, 0); 137 output += 8; 138 } 139 if XNN_UNLIKELY(c != 0) { 140 const v128_t vtl01234567 = ${WASM_X16X8_LOAD_8X8}(i0); 141 const v128_t vtr01234567 = ${WASM_X16X8_LOAD_8X8}(i1); 142 const v128_t vbl01234567 = ${WASM_X16X8_LOAD_8X8}(i2); 143 const v128_t vbr01234567 = ${WASM_X16X8_LOAD_8X8}(i3); 144 145 const v128_t vtd01234567 = wasm_i16x8_sub(vtr01234567, vtl01234567); 146 const v128_t vbd01234567 = wasm_i16x8_sub(vbr01234567, vbl01234567); 147 const v128_t vdl01234567 = wasm_i16x8_sub(vbl01234567, vtl01234567); 148 const v128_t vdd01234567 = wasm_i16x8_sub(vbd01234567, vtd01234567); 149 150 const v128_t vt0123 = wasm_i32x4_add(wasm_i32x4_shl(wasm_i32x4_extend_low_i16x8(vtl01234567), 11), wasm_i32x4_mul(wasm_i32x4_extend_low_i16x8(vtd01234567), valphah)); 151 const v128_t vt4567 = wasm_i32x4_add(wasm_i32x4_shl(wasm_i32x4_extend_high_i16x8(vtl01234567), 11), wasm_i32x4_mul(wasm_i32x4_extend_high_i16x8(vtd01234567), valphah)); 152 const v128_t vd0123 = wasm_i32x4_add(wasm_i32x4_shl(wasm_i32x4_extend_low_i16x8(vdl01234567), 11), wasm_i32x4_mul(wasm_i32x4_extend_low_i16x8(vdd01234567), valphah)); 153 const v128_t vd4567 = wasm_i32x4_add(wasm_i32x4_shl(wasm_i32x4_extend_high_i16x8(vdl01234567), 11), wasm_i32x4_mul(wasm_i32x4_extend_high_i16x8(vdd01234567), valphah)); 154 155 v128_t vacc0123 = wasm_i32x4_mul(vd0123, valphav); 156 v128_t vacc4567 = wasm_i32x4_mul(vd4567, valphav); 157 158 vacc0123 = wasm_i32x4_add(wasm_i32x4_shl(vt0123, 11), vacc0123); 159 vacc4567 = wasm_i32x4_add(wasm_i32x4_shl(vt4567, 11), vacc4567); 160 161 vacc0123 = ${WASM_X32X4_SHR}(wasm_i16x8_add(vacc0123, vrounding), 22); 162 vacc4567 = ${WASM_X32X4_SHR}(wasm_i16x8_add(vacc4567, vrounding), 22); 163 164 const v128_t vacc01234567 = wasm_i16x8_narrow_i32x4(vacc0123, vacc4567); 165 166 v128_t vo01234567 = ${WASM_X8X16_NARROW_I16X8}(vacc01234567, vacc01234567); 167 168 if (c & (4 * sizeof(${XINT8_T}))) { 169 *((float*) output) = wasm_f32x4_extract_lane(vo01234567, 0); 170 output += 4; 171 vo01234567 = wasm_u64x2_shr(vo01234567, 32); 172 } 173 uint32_t vo0123 = (uint32_t) wasm_i32x4_extract_lane(vo01234567, 0); 174 if (c & (2 * sizeof(${XINT8_T}))) { 175 *((uint16_t*) output) = (uint16_t) vo0123; 176 output += 2; 177 vo0123 >>= 16; 178 } 179 if (c & (1 * sizeof(${XINT8_T}))) { 180 *output++ = (uint8_t) vo0123; 181 } 182 } 183 184 output = (${XINT8_T}*) ((uintptr_t) output + output_increment); 185 } while (--output_pixels != 0); 186} 187