xref: /aosp_15_r20/external/XNNPACK/src/qs8-vmulc/wasmsimd-mul32-ld64.c.in (revision 4bdc94577ba0e567308109d787f7fec7b531ce36)
1// Copyright 2021 Google LLC
2//
3// This source code is licensed under the BSD-style license found in the
4// LICENSE file in the root directory of this source tree.
5
6$assert DATATYPE in ["QS8", "QU8"]
7$assert REQUANTIZATION == "FP32"
8$assert BATCH_TILE % 8 == 0
9$assert BATCH_TILE >= 8
10$ABC = "0123456789ABCDEFGHIJKLMNOPQRSTUVWXYZ"
11#include <assert.h>
12
13#include <wasm_simd128.h>
14
15#include <xnnpack/vmul.h>
16
17
18$PARAMS_STRUCT = REQUANTIZATION.lower() + "_wasmsimd"
19$XINT8_T = {"QS8": "int8_t", "QU8": "uint8_t"}[DATATYPE]
20$WASM_X16X8_LOAD8X8 = "wasm_u16x8_load8x8" if DATATYPE == "QU8" else "wasm_i16x8_load8x8"
21$WASM_X32X4_EXTEND_LOW_X16X8 = "wasm_u32x4_extend_low_u16x8" if DATATYPE == "QU8" else "wasm_i32x4_extend_low_i16x8"
22$WASM_X32X4_EXTEND_HIGH_X16X8 = "wasm_u32x4_extend_high_u16x8" if DATATYPE == "QU8" else "wasm_i32x4_extend_high_i16x8"
23$WASM_X8X16_NARROW_I16X8 = {"QS8": "wasm_i8x16_narrow_i16x8", "QU8": "wasm_u8x16_narrow_i16x8"}[DATATYPE]
24$WASM_X8X16_MIN = {"QS8": "wasm_i8x16_min", "QU8": "wasm_u8x16_min"}[DATATYPE]
25void xnn_${DATATYPE.lower()}_vmulc_minmax_${REQUANTIZATION.lower()}_ukernel__wasmsimd_mul32_ld64_x${BATCH_TILE}(
26    size_t n,
27    const ${XINT8_T}* input_a,
28    const ${XINT8_T}* input_b,
29    ${XINT8_T}* output,
30    const union xnn_${DATATYPE.lower()}_mul_minmax_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
31
32{
33  const v128_t va_zero_point = wasm_v128_load64_splat(params->${PARAMS_STRUCT}.a_zero_point);
34  const v128_t vscale = wasm_v128_load64_splat(params->${PARAMS_STRUCT}.scale);
35  const v128_t vmagic_bias = wasm_v128_load64_splat(params->${PARAMS_STRUCT}.magic_bias);
36  const v128_t vmagic_min = wasm_v128_load64_splat(params->${PARAMS_STRUCT}.magic_min);
37  const v128_t vmagic_bias_less_output_zero_point = wasm_v128_load64_splat(params->${PARAMS_STRUCT}.magic_bias_less_output_zero_point);
38  const v128_t voutput_max = wasm_v128_load64_splat(params->${PARAMS_STRUCT}.output_max);
39
40  const v128_t vxb = wasm_i16x8_sub(
41    wasm_i16x8_splat((int16_t) *input_b), wasm_v128_load64_splat(params->${PARAMS_STRUCT}.b_zero_point));
42  const v128_t vxblo = wasm_i32x4_extend_low_i16x8(vxb);
43  const v128_t vxbhi = wasm_i32x4_extend_high_i16x8(vxb);
44  for (; n >= ${BATCH_TILE} * sizeof(${XINT8_T}); n -= ${BATCH_TILE} * sizeof(${XINT8_T})) {
45    const v128_t va${ABC[0:8]} = ${WASM_X16X8_LOAD8X8}(input_a);
46    $for N in range(8, BATCH_TILE, 8):
47      const v128_t va${ABC[N:N+8]} = ${WASM_X16X8_LOAD8X8}(input_a + ${N});
48    input_a += ${BATCH_TILE};
49
50    $for N in range(0, BATCH_TILE, 8):
51      const v128_t vxa${ABC[N:N+8]} = wasm_i16x8_sub(va${ABC[N:N+8]}, va_zero_point);
52
53    $for N in range(0, BATCH_TILE, 8):
54      v128_t vacc${ABC[N:N+4]} = wasm_i32x4_mul(wasm_i32x4_extend_low_i16x8(vxa${ABC[N:N+8]}), vxblo);
55      v128_t vacc${ABC[N+4:N+8]} = wasm_i32x4_mul(wasm_i32x4_extend_high_i16x8(vxa${ABC[N:N+8]}), vxbhi);
56
57    $for N in range(0, BATCH_TILE, 4):
58      vacc${ABC[N:N+4]} = wasm_f32x4_convert_i32x4(vacc${ABC[N:N+4]});
59
60    $for N in range(0, BATCH_TILE, 4):
61      vacc${ABC[N:N+4]} = wasm_f32x4_mul(vacc${ABC[N:N+4]}, vscale);
62
63    $for N in range(0, BATCH_TILE, 4):
64      vacc${ABC[N:N+4]} = wasm_f32x4_add(vacc${ABC[N:N+4]}, vmagic_bias);
65
66    $for N in range(0, BATCH_TILE, 4):
67      vacc${ABC[N:N+4]} = wasm_i32x4_max(vacc${ABC[N:N+4]}, vmagic_min);
68
69    $for N in range(0, BATCH_TILE, 4):
70      vacc${ABC[N:N+4]} = wasm_i32x4_sub(vacc${ABC[N:N+4]}, vmagic_bias_less_output_zero_point);
71
72    $for N in range(0, BATCH_TILE, 8):
73      v128_t vout${ABC[N:N+8]} = wasm_i16x8_narrow_i32x4(vacc${ABC[N:N+4]}, vacc${ABC[N+4:N+8]});
74
75    $for N in range(0, BATCH_TILE, 16):
76      $if N + 8 < BATCH_TILE:
77        v128_t vout${ABC[N:N+16]} = ${WASM_X8X16_NARROW_I16X8}(vout${ABC[N:N+8]}, vout${ABC[N+8:N+16]});
78      $else:
79        v128_t vout${ABC[N:N+8]}${ABC[N:N+8]} = ${WASM_X8X16_NARROW_I16X8}(vout${ABC[N:N+8]}, vout${ABC[N:N+8]});
80
81    $for N in range(0, BATCH_TILE, 16):
82      $if N + 8 < BATCH_TILE:
83        vout${ABC[N:N+16]} = ${WASM_X8X16_MIN}(vout${ABC[N:N+16]}, voutput_max);
84      $else:
85        vout${ABC[N:N+8]}${ABC[N:N+8]} = ${WASM_X8X16_MIN}(vout${ABC[N:N+8]}${ABC[N:N+8]}, voutput_max);
86
87    $if BATCH_TILE >= 16:
88      wasm_v128_store(output, vout${ABC[0:16]});
89    $else:
90      *((double*) output) = wasm_f64x2_extract_lane(vout${ABC[0:8]}${ABC[0:8]}, 0);
91    $for N in range(16, BATCH_TILE, 16):
92      $if N + 8 < BATCH_TILE:
93        wasm_v128_store(output + ${N}, vout${ABC[N:N+16]});
94      $else:
95        *((double*) output) = wasm_f64x2_extract_lane(output + ${N}, vout${ABC[N:N+8]}${ABC[N:N+8]});
96    output += ${BATCH_TILE};
97  }
98  if XNN_UNLIKELY(n != 0) {
99    ${"do " if BATCH_TILE > 8 else ""}{
100      const v128_t va${ABC[0:8]} = ${WASM_X16X8_LOAD8X8}(input_a);
101      $if BATCH_TILE > 8:
102        input_a += 8;
103
104      const v128_t vxa${ABC[0:8]} = wasm_i16x8_sub(va${ABC[0:8]}, va_zero_point);
105
106      v128_t vacc${ABC[0:4]} = wasm_i32x4_mul(wasm_i32x4_extend_low_i16x8(vxa${ABC[0:8]}), vxblo);
107      v128_t vacc${ABC[4:8]} = wasm_i32x4_mul(wasm_i32x4_extend_high_i16x8(vxa${ABC[0:8]}), vxbhi);
108
109      vacc${ABC[0:4]} = wasm_f32x4_convert_i32x4(vacc${ABC[0:4]});
110      vacc${ABC[4:8]} = wasm_f32x4_convert_i32x4(vacc${ABC[4:8]});
111
112      vacc${ABC[0:4]} = wasm_f32x4_mul(vacc${ABC[0:4]}, vscale);
113      vacc${ABC[4:8]} = wasm_f32x4_mul(vacc${ABC[4:8]}, vscale);
114
115      vacc${ABC[0:4]} = wasm_f32x4_add(vacc${ABC[0:4]}, vmagic_bias);
116      vacc${ABC[4:8]} = wasm_f32x4_add(vacc${ABC[4:8]}, vmagic_bias);
117
118      vacc${ABC[0:4]} = wasm_i32x4_max(vacc${ABC[0:4]}, vmagic_min);
119      vacc${ABC[4:8]} = wasm_i32x4_max(vacc${ABC[4:8]}, vmagic_min);
120
121      vacc${ABC[0:4]} = wasm_i32x4_sub(vacc${ABC[0:4]}, vmagic_bias_less_output_zero_point);
122      vacc${ABC[4:8]} = wasm_i32x4_sub(vacc${ABC[4:8]}, vmagic_bias_less_output_zero_point);
123
124      v128_t vout${ABC[0:8]} = wasm_i16x8_narrow_i32x4(vacc${ABC[0:4]}, vacc${ABC[4:8]});
125      v128_t vout${ABC[0:8]}${ABC[0:8]} = ${WASM_X8X16_NARROW_I16X8}(vout${ABC[0:8]}, vout${ABC[0:8]});
126      vout${ABC[0:8]}${ABC[0:8]} = ${WASM_X8X16_MIN}(vout${ABC[0:8]}${ABC[0:8]}, voutput_max);
127
128      $if BATCH_TILE > 8:
129        if XNN_LIKELY(n >= (8 * sizeof(${XINT8_T}))) {
130          *((double*) output) = wasm_f64x2_extract_lane(vout${ABC[0:8]}${ABC[0:8]}, 0);
131          output += 8;
132          n -= 8 * sizeof(${XINT8_T});
133        } else {
134          if (n & (4 * sizeof(${XINT8_T}))) {
135            *((float*) output) = wasm_f32x4_extract_lane(vout${ABC[0:8]}${ABC[0:8]}, 0);
136            vout${ABC[0:8]}${ABC[0:8]} = wasm_u64x2_shr(vout${ABC[0:8]}${ABC[0:8]}, 32);
137            output += 4;
138          }
139          uint32_t vout${ABC[0:4]} = wasm_i32x4_extract_lane(vout${ABC[0:8]}${ABC[0:8]}, 0);
140          if (n & (2 * sizeof(${XINT8_T}))) {
141            *((uint16_t*) output) = (uint16_t) vout${ABC[0:4]};
142            vout${ABC[0:4]} >>= 16;
143            output += 2;
144          }
145          if (n & (1 * sizeof(${XINT8_T}))) {
146            *output = (${XINT8_T}) vout${ABC[0:4]};
147          }
148          n = 0;
149        }
150      $else:
151        if (n & (4 * sizeof(${XINT8_T}))) {
152          *((float*) output) = wasm_f32x4_extract_lane(vout${ABC[0:8]}${ABC[0:8]}, 0);
153          vout${ABC[0:8]}${ABC[0:8]} = wasm_u64x2_shr(vout${ABC[0:8]}${ABC[0:8]}, 32);
154          output += 4;
155        }
156        uint32_t vout${ABC[0:4]} = wasm_i32x4_extract_lane(vout${ABC[0:8]}${ABC[0:8]}, 0);
157        if (n & (2 * sizeof(${XINT8_T}))) {
158          *((uint16_t*) output) = (uint16_t) vout${ABC[0:4]};
159          vout${ABC[0:4]} >>= 16;
160          output += 2;
161        }
162        if (n & (1 * sizeof(${XINT8_T}))) {
163          *output = (${XINT8_T}) vout${ABC[0:4]};
164        }
165    }${" while (n != 0);" if BATCH_TILE > 8 else ""}
166  }
167}
168