1// Copyright 2021 Google LLC 2// 3// This source code is licensed under the BSD-style license found in the 4// LICENSE file in the root directory of this source tree. 5 6$assert BATCH_TILE % 8 == 0 7$assert BATCH_TILE >= 8 8$ABC = "0123456789ABCDEFGHIJKLMNOPQRSTUVWXYZ" 9#include <assert.h> 10 11#include <arm_neon.h> 12 13#include <xnnpack/common.h> 14#include <xnnpack/intrinsics-polyfill.h> 15#include <xnnpack/vcvt.h> 16 17 18$XINT8_T = {"QS8": "int8_t", "QU8": "uint8_t"}[DATATYPE] 19$WASM_X8X16_NARROW_I16X8 = {"QS8": "wasm_i8x16_narrow_i16x8", "QU8": "wasm_u8x16_narrow_i16x8"}[DATATYPE] 20$WASM_X8X16_MIN = {"QS8": "wasm_i8x16_min", "QU8": "wasm_u8x16_min"}[DATATYPE] 21void xnn_f32_${DATATYPE.lower()}_vcvt_ukernel__wasmsimd_magic_x${BATCH_TILE}( 22 size_t n, 23 const float* x, 24 ${XINT8_T}* y, 25 const union xnn_f32_${DATATYPE.lower()}_cvt_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS 26{ 27 assert(n != 0); 28 assert(n % sizeof(float) == 0); 29 assert(x != NULL); 30 assert(y != NULL); 31 32 const v128_t vscale = wasm_v128_load64_splat(params->wasmsimd_magic.scale); 33 const v128_t vmagic_bias = wasm_v128_load64_splat(params->wasmsimd_magic.magic_bias); 34 const v128_t vmagic_min = wasm_v128_load64_splat(params->wasmsimd_magic.magic_min); 35 const v128_t vmagic_bias_less_zero_point = wasm_v128_load64_splat(params->wasmsimd_magic.magic_bias_less_zero_point); 36 const v128_t voutput_max = wasm_v128_load64_splat(params->wasmsimd_magic.output_max); 37 $if BATCH_TILE > 8: 38 for (; n >= ${BATCH_TILE} * sizeof(float); n -= ${BATCH_TILE} * sizeof(float)) { 39 v128_t vx${ABC[0:4]} = wasm_v128_load(x); 40 $for N in range(4, BATCH_TILE, 4): 41 v128_t vx${ABC[N:N+4]} = wasm_v128_load(x + ${N}); 42 x += ${BATCH_TILE}; 43 44 $for N in range(0, BATCH_TILE, 4): 45 vx${ABC[N:N+4]} = wasm_f32x4_mul(vx${ABC[N:N+4]}, vscale); 46 47 $for N in range(0, BATCH_TILE, 4): 48 vx${ABC[N:N+4]} = wasm_f32x4_add(vx${ABC[N:N+4]}, vmagic_bias); 49 50 $for N in range(0, BATCH_TILE, 4): 51 v128_t vacc${ABC[N:N+4]} = wasm_i32x4_max(vx${ABC[N:N+4]}, vmagic_min); 52 53 $for N in range(0, BATCH_TILE, 4): 54 vacc${ABC[N:N+4]} = wasm_i32x4_sub(vacc${ABC[N:N+4]}, vmagic_bias_less_zero_point); 55 56 $for N in range(0, BATCH_TILE, 8): 57 const v128_t vacc${ABC[N:N+8]} = wasm_i16x8_narrow_i32x4(vacc${ABC[N:N+4]}, vacc${ABC[N+4:N+8]}); 58 59 $for N in range(0, BATCH_TILE, 16): 60 $if N + 8 < BATCH_TILE: 61 v128_t vy${ABC[N:N+16]} = ${WASM_X8X16_NARROW_I16X8}(vacc${ABC[N:N+8]}, vacc${ABC[N+8:N+16]}); 62 $else: 63 v128_t vy${ABC[N:N+8]} = ${WASM_X8X16_NARROW_I16X8}(vacc${ABC[N:N+8]}, vacc${ABC[N:N+8]}); 64 65 $for N in range(0, BATCH_TILE, 16): 66 $if N + 8 < BATCH_TILE: 67 vy${ABC[N:N+16]} = ${WASM_X8X16_MIN}(vy${ABC[N:N+16]}, voutput_max); 68 $else: 69 vy${ABC[N:N+8]} = ${WASM_X8X16_MIN}(vy${ABC[N:N+8]}, voutput_max); 70 71 wasm_v128_store(y, vy${ABC[0:16]}); 72 $for N in range(16, BATCH_TILE, 16): 73 $if N + 8 < BATCH_TILE: 74 wasm_v128_store(y + ${N}, vy${ABC[N:N+16]}); 75 $else: 76 *((double*) (y + ${N})) = wasm_f64x2_extract_lane(vy${ABC[N:N+8]}, 0); 77 y += ${BATCH_TILE}; 78 } 79 for (; n >= 8 * sizeof(float); n -= 8 * sizeof(float)) { 80 v128_t vx_lo = wasm_v128_load(x); 81 v128_t vx_hi = wasm_v128_load(x + 4); 82 x += 8; 83 84 vx_lo = wasm_f32x4_mul(vx_lo, vscale); 85 vx_hi = wasm_f32x4_mul(vx_hi, vscale); 86 87 vx_lo = wasm_f32x4_add(vx_lo, vmagic_bias); 88 vx_hi = wasm_f32x4_add(vx_hi, vmagic_bias); 89 90 v128_t vacc_lo = wasm_i32x4_max(vx_lo, vmagic_min); 91 v128_t vacc_hi = wasm_i32x4_max(vx_hi, vmagic_min); 92 93 vacc_lo = wasm_i32x4_sub(vacc_lo, vmagic_bias_less_zero_point); 94 vacc_hi = wasm_i32x4_sub(vacc_hi, vmagic_bias_less_zero_point); 95 96 const v128_t vacc = wasm_i16x8_narrow_i32x4(vacc_lo, vacc_hi); 97 98 v128_t vy = ${WASM_X8X16_NARROW_I16X8}(vacc, vacc); 99 vy = ${WASM_X8X16_MIN}(vy, voutput_max); 100 *((double*) y) = wasm_f64x2_extract_lane(vy, 0); 101 y += 8; 102 } 103 if XNN_UNLIKELY(n != 0) { 104 assert(n >= 1 * sizeof(float)); 105 assert(n <= 7 * sizeof(float)); 106 v128_t vx_lo = wasm_v128_load(x); 107 const float* x_hi = (const float*) ((uintptr_t) x + (n & (4 * sizeof(float)))); 108 v128_t vx_hi = wasm_v128_load(x_hi); 109 110 vx_lo = wasm_f32x4_mul(vx_lo, vscale); 111 vx_hi = wasm_f32x4_mul(vx_hi, vscale); 112 113 vx_lo = wasm_f32x4_add(vx_lo, vmagic_bias); 114 vx_hi = wasm_f32x4_add(vx_hi, vmagic_bias); 115 116 v128_t vacc_lo = wasm_i32x4_max(vx_lo, vmagic_min); 117 v128_t vacc_hi = wasm_i32x4_max(vx_hi, vmagic_min); 118 119 vacc_lo = wasm_i32x4_sub(vacc_lo, vmagic_bias_less_zero_point); 120 vacc_hi = wasm_i32x4_sub(vacc_hi, vmagic_bias_less_zero_point); 121 122 const v128_t vacc = wasm_i16x8_narrow_i32x4(vacc_lo, vacc_hi); 123 124 v128_t vy = ${WASM_X8X16_NARROW_I16X8}(vacc, vacc); 125 vy = ${WASM_X8X16_MIN}(vy, voutput_max); 126 127 if (n & (4 * sizeof(float))) { 128 *((float*) y) = wasm_f32x4_extract_lane(vy, 0); 129 y += 4; 130 vy = wasm_u64x2_shr(vy, 32); 131 } 132 uint32_t vy_lo = (uint32_t) wasm_i32x4_extract_lane(vy, 0); 133 if (n & (2 * sizeof(float))) { 134 *((uint16_t*) y) = (uint16_t) vy_lo; 135 y += 2; 136 vy_lo >>= 16; 137 } 138 if (n & (1 * sizeof(float))) { 139 *y = (${XINT8_T}) vy_lo; 140 } 141 } 142} 143