1// Copyright 2022 Google LLC 2// 3// This source code is licensed under the BSD-style license found in the 4// LICENSE file in the root directory of this source tree. 5 6$assert BATCH_TILE >= 8 7$assert BATCH_TILE == 8 or BATCH_TILE % 16 == 0 8$SIMD_TILE = BATCH_TILE // 16 9$ABC = "0123456789ABCDEFGHIJKLMNOPQRSTUVWXYZ" 10#include <assert.h> 11 12#include <wasm_simd128.h> 13 14#include <xnnpack/common.h> 15#include <xnnpack/vcvt.h> 16 17 18$XINT8_T = {"QS8": "int8_t", "QU8": "uint8_t"}[DATATYPE] 19$WASM_X16X8_LOAD8X8 = {"QS8": "wasm_i16x8_load8x8", "QU8": "wasm_u16x8_load8x8"}[DATATYPE] 20$WASM_I16X8_Q15MULR = "__builtin_wasm_relaxed_q15mulr_s_i16x8" if RELAXED else "wasm_i16x8_q15mulr_sat" 21$WASM_X8X16_NARROW_I16X8 = {"QS8": "wasm_i8x16_narrow_i16x8", "QU8": "wasm_u8x16_narrow_i16x8"}[DATATYPE] 22$ISA = "wasmrelaxedsimd" if RELAXED else "wasmsimd" 23void xnn_${DATATYPE.lower()}_vlrelu_ukernel__${ISA}_x86_x${BATCH_TILE}( 24 size_t n, 25 const ${XINT8_T}* x, 26 ${XINT8_T}* y, 27 const union xnn_${DATATYPE.lower()}_lrelu_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS 28{ 29 assert(n != 0); 30 assert(n % sizeof(${XINT8_T}) == 0); 31 assert(x != NULL); 32 assert(y != NULL); 33 34 const v128_t vinput_zero_point = wasm_v128_load64_splat(params->wasmsimd_x86.input_zero_point); 35 const v128_t vmultiplier_diff = wasm_v128_load64_splat(params->wasmsimd_x86.multiplier_diff); 36 const v128_t vmultiplier_base = wasm_v128_load64_splat(params->wasmsimd_x86.multiplier_base); 37 const v128_t voutput_zero_point = wasm_v128_load64_splat(params->wasmsimd_x86.output_zero_point); 38 $if BATCH_TILE > 8: 39 for (; n >= ${BATCH_TILE} * sizeof(${XINT8_T}); n -= ${BATCH_TILE} * sizeof(${XINT8_T})) { 40 v128_t vacc${ABC[0]} = ${WASM_X16X8_LOAD8X8}(x); 41 $for N in range(1, 2*SIMD_TILE): 42 v128_t vacc${ABC[N]} = ${WASM_X16X8_LOAD8X8}(x + ${N * 8}); 43 x += ${BATCH_TILE}; 44 45 $for N in range(2*SIMD_TILE): 46 v128_t vmultiplier${ABC[N]} = wasm_i16x8_gt(vacc${ABC[N]}, vinput_zero_point); 47 vacc${ABC[N]} = wasm_i16x8_sub(vinput_zero_point, vacc${ABC[N]}); 48 49 $for N in range(2*SIMD_TILE): 50 vmultiplier${ABC[N]} = wasm_v128_and(vmultiplier${ABC[N]}, vmultiplier_diff); 51 vacc${ABC[N]} = wasm_i16x8_shl(vacc${ABC[N]}, 7); 52 vmultiplier${ABC[N]} = wasm_v128_xor(vmultiplier${ABC[N]}, vmultiplier_base); 53 54 $for N in range(2*SIMD_TILE): 55 vacc${ABC[N]} = ${WASM_I16X8_Q15MULR}(vacc${ABC[N]}, vmultiplier${ABC[N]}); 56 57 $for N in range(2*SIMD_TILE): 58 vacc${ABC[N]} = wasm_i16x8_add_sat(vacc${ABC[N]}, voutput_zero_point); 59 60 $for N in range(SIMD_TILE): 61 const v128_t vy${ABC[N]} = ${WASM_X8X16_NARROW_I16X8}(vacc${ABC[2*N]}, vacc${ABC[2*N+1]}); 62 63 wasm_v128_store(y, vy${ABC[0]}); 64 $for N in range(1, SIMD_TILE): 65 wasm_v128_store((y + ${N * 16}), vy${ABC[N]}); 66 y += ${BATCH_TILE}; 67 } 68 for (; n >= 8 * sizeof(${XINT8_T}); n -= 8 * sizeof(${XINT8_T})) { 69 v128_t vacc = ${WASM_X16X8_LOAD8X8}(x); 70 v128_t vmultiplier = wasm_i16x8_gt(vacc, vinput_zero_point); 71 vacc = wasm_i16x8_sub(vinput_zero_point, vacc); 72 vmultiplier = wasm_v128_and(vmultiplier, vmultiplier_diff); 73 vacc = wasm_i16x8_shl(vacc, 7); 74 vmultiplier = wasm_v128_xor(vmultiplier, vmultiplier_base); 75 vacc = ${WASM_I16X8_Q15MULR}(vacc, vmultiplier); 76 vacc = wasm_i16x8_add_sat(vacc, voutput_zero_point); 77 x += 8; 78 79 const v128_t vy = ${WASM_X8X16_NARROW_I16X8}(vacc, vacc); 80 wasm_v128_store64_lane(y, vy, 0); 81 y += 8; 82 } 83 if XNN_UNLIKELY(n != 0) { 84 assert(n >= 1 * sizeof(${XINT8_T})); 85 assert(n <= 7 * sizeof(${XINT8_T})); 86 87 v128_t vacc = ${WASM_X16X8_LOAD8X8}(x); 88 v128_t vmultiplier = wasm_i16x8_gt(vacc, vinput_zero_point); 89 vacc = wasm_i16x8_sub(vinput_zero_point, vacc); 90 vmultiplier = wasm_v128_and(vmultiplier, vmultiplier_diff); 91 vacc = wasm_i16x8_shl(vacc, 7); 92 vmultiplier = wasm_v128_xor(vmultiplier, vmultiplier_base); 93 vacc = ${WASM_I16X8_Q15MULR}(vacc, vmultiplier); 94 vacc = wasm_i16x8_add_sat(vacc, voutput_zero_point); 95 96 v128_t vy = ${WASM_X8X16_NARROW_I16X8}(vacc, vacc); 97 if (n & (4 * sizeof(${XINT8_T}))) { 98 wasm_v128_store32_lane(y, vy, 0); 99 vy = wasm_u64x2_shr(vy, 32); 100 y += 4; 101 } 102 if (n & (2 * sizeof(${XINT8_T}))) { 103 wasm_v128_store16_lane(y, vy, 0); 104 vy = wasm_u32x4_shr(vy, 16); 105 y += 2; 106 } 107 if (n & (1 * sizeof(${XINT8_T}))) { 108 wasm_v128_store8_lane(y, vy, 0); 109 } 110 } 111} 112