xref: /aosp_15_r20/external/XNNPACK/src/s8-ibilinear/wasmsimd-dot16x2.c.in (revision 4bdc94577ba0e567308109d787f7fec7b531ce36)
1// Copyright 2021 Google LLC
2//
3// This source code is licensed under the BSD-style license found in the
4// LICENSE file in the root directory of this source tree.
5
6$assert DATATYPE in ["S8", "U8"]
7$assert CHANNEL_TILE % 8 == 0
8$assert CHANNEL_TILE >= 8
9$assert PIXEL_TILE == 1
10$ABC = "0123456789ABCDEFGHIJKLMNOPQRSTUVWXYZ"
11#include <assert.h>
12
13#include <wasm_simd128.h>
14
15#include <xnnpack/common.h>
16#include <xnnpack/ibilinear.h>
17
18
19$XINT8_T = {"S8": "int8_t", "U8": "uint8_t"}[DATATYPE]
20$WASM_X16X8_LOAD_8X8 = {"S8": "wasm_i16x8_load8x8", "U8": "wasm_u16x8_load8x8"}[DATATYPE]
21$WASM_X32X4_SHR = {"S8": "wasm_i32x4_shr", "U8": "wasm_u32x4_shr"}[DATATYPE]
22$WASM_X8X16_NARROW_I16X8 = {"S8": "wasm_i8x16_narrow_i16x8", "U8": "wasm_u8x16_narrow_i16x8"}[DATATYPE]
23void xnn_${DATATYPE.lower()}_ibilinear_ukernel__wasmsimd_dot16x2_c${CHANNEL_TILE}${"" if PIXEL_TILE == 1 else "x%d" % PIXEL_TILE}(
24    size_t output_pixels,
25    size_t channels,
26    const ${XINT8_T}**restrict input,
27    size_t input_offset,
28    const int16_t*restrict weights,
29    ${XINT8_T}*restrict output,
30    size_t output_increment) XNN_OOB_READS
31{
32  assert(output_pixels != 0);
33  assert(channels != 0);
34
35  do {
36    const ${XINT8_T}* i0 = (const ${XINT8_T}*) ((uintptr_t) input[0] + input_offset);
37    const ${XINT8_T}* i1 = (const ${XINT8_T}*) ((uintptr_t) input[1] + input_offset);
38    const ${XINT8_T}* i2 = (const ${XINT8_T}*) ((uintptr_t) input[2] + input_offset);
39    const ${XINT8_T}* i3 = (const ${XINT8_T}*) ((uintptr_t) input[3] + input_offset);
40    input += 4;
41
42    const v128_t valphah =
43      wasm_i16x8_add(
44        wasm_v128_xor(
45          wasm_v128_load16_splat(weights),
46          wasm_i32x4_const_splat(0xFFFF0000)),
47        wasm_i32x4_const_splat(0x08010000));
48    const v128_t valphav = wasm_i32x4_extend_low_i16x8(wasm_v128_load16_splat(weights + 1));
49    weights += 2;
50
51    const v128_t vrounding = wasm_i32x4_const_splat(0x00200000);
52
53    size_t c = channels;
54    $if CHANNEL_TILE > 8:
55      for (; c >= ${CHANNEL_TILE} * sizeof(${XINT8_T}); c -= ${CHANNEL_TILE} * sizeof(${XINT8_T})) {
56        const v128_t vtl${ABC[0:8]} = ${WASM_X16X8_LOAD_8X8}(i0);
57        const v128_t vtr${ABC[0:8]} = ${WASM_X16X8_LOAD_8X8}(i1);
58        const v128_t vbl${ABC[0:8]} = ${WASM_X16X8_LOAD_8X8}(i2);
59        const v128_t vbr${ABC[0:8]} = ${WASM_X16X8_LOAD_8X8}(i3);
60        $for C in range(8, CHANNEL_TILE, 8):
61          const v128_t vtl${ABC[C:C+8]} = ${WASM_X16X8_LOAD_8X8}(i0 + ${C});
62          const v128_t vtr${ABC[C:C+8]} = ${WASM_X16X8_LOAD_8X8}(i1 + ${C});
63          const v128_t vbl${ABC[C:C+8]} = ${WASM_X16X8_LOAD_8X8}(i2 + ${C});
64          const v128_t vbr${ABC[C:C+8]} = ${WASM_X16X8_LOAD_8X8}(i3 + ${C});
65        i0 += ${CHANNEL_TILE};
66        i1 += ${CHANNEL_TILE};
67        i2 += ${CHANNEL_TILE};
68        i3 += ${CHANNEL_TILE};
69
70        $for C in range(0, CHANNEL_TILE, 8):
71          const v128_t vdr${ABC[C:C+8]} = wasm_i16x8_sub(vbr${ABC[C:C+8]}, vtr${ABC[C:C+8]});
72          const v128_t vt${ABC[C:C+4]} = wasm_i32x4_dot_i16x8(wasm_v16x8_shuffle(vtr${ABC[C:C+8]}, vtl${ABC[C:C+8]}, 0, 8, 1, 9, 2, 10, 3, 11), valphah);
73          const v128_t vdl${ABC[C:C+8]} = wasm_i16x8_sub(vbl${ABC[C:C+8]}, vtl${ABC[C:C+8]});
74          const v128_t vt${ABC[C+4:C+8]} = wasm_i32x4_dot_i16x8(wasm_v16x8_shuffle(vtr${ABC[C:C+8]}, vtl${ABC[C:C+8]}, 4, 12, 5, 13, 6, 14, 7, 15), valphah);
75
76        $for C in range(0, CHANNEL_TILE, 8):
77          const v128_t vd${ABC[C:C+4]} = wasm_i32x4_dot_i16x8(wasm_v16x8_shuffle(vdr${ABC[C:C+8]}, vdl${ABC[C:C+8]}, 0, 8, 1, 9, 2, 10, 3, 11), valphah);
78          const v128_t vd${ABC[C+4:C+8]} = wasm_i32x4_dot_i16x8(wasm_v16x8_shuffle(vdr${ABC[C:C+8]}, vdl${ABC[C:C+8]}, 4, 12, 5, 13, 6, 14, 7, 15), valphah);
79
80        $for C in range(0, CHANNEL_TILE, 4):
81          v128_t vacc${ABC[C:C+4]} = wasm_i32x4_mul(vd${ABC[C:C+4]}, valphav);
82
83        $for C in range(0, CHANNEL_TILE, 4):
84          vacc${ABC[C:C+4]} = wasm_i32x4_add(wasm_i32x4_shl(vt${ABC[C:C+4]}, 11), vacc${ABC[C:C+4]});
85
86        $for C in range(0, CHANNEL_TILE, 4):
87          vacc${ABC[C:C+4]} = ${WASM_X32X4_SHR}(wasm_i16x8_add(vacc${ABC[C:C+4]}, vrounding), 22);
88
89        $for C in range(0, CHANNEL_TILE, 8):
90          const v128_t vacc${ABC[C:C+8]} = wasm_i16x8_narrow_i32x4(vacc${ABC[C:C+4]}, vacc${ABC[C+4:C+8]});
91
92        $for C in range(0, CHANNEL_TILE, 16):
93          $if C + 8 < CHANNEL_TILE:
94            const v128_t vo${ABC[C:C+16]} = ${WASM_X8X16_NARROW_I16X8}(vacc${ABC[C:C+8]}, vacc${ABC[C+8:C+16]});
95          $else:
96            const v128_t vo${ABC[C:C+8]} = ${WASM_X8X16_NARROW_I16X8}(vacc${ABC[C:C+8]}, vacc${ABC[C:C+8]});
97
98        wasm_v128_store(output, vo${ABC[0:16]});
99        $for C in range(16, CHANNEL_TILE, 16):
100          $if C + 8 < CHANNEL_TILE:
101            wasm_v128_store(output + ${C}, vo${ABC[C:C+16]});
102          $else:
103            *((double*) (output + ${C})) = wasm_f64x2_extract_lane(vo${ABC[C:C+8]}, 0);
104        output += ${CHANNEL_TILE};
105      }
106    for (; c >= 8 * sizeof(${XINT8_T}); c -= 8 * sizeof(${XINT8_T})) {
107      const v128_t vtl01234567 = ${WASM_X16X8_LOAD_8X8}(i0);
108      i0 += 8;
109      const v128_t vtr01234567 = ${WASM_X16X8_LOAD_8X8}(i1);
110      i1 += 8;
111      const v128_t vbl01234567 = ${WASM_X16X8_LOAD_8X8}(i2);
112      i2 += 8;
113      const v128_t vbr01234567 = ${WASM_X16X8_LOAD_8X8}(i3);
114      i3 += 8;
115
116      const v128_t vdr01234567 = wasm_i16x8_sub(vbr01234567, vtr01234567);
117      const v128_t vt0123 = wasm_i32x4_dot_i16x8(wasm_v16x8_shuffle(vtr01234567, vtl01234567, 0, 8, 1, 9, 2, 10, 3, 11), valphah);
118      const v128_t vdl01234567 = wasm_i16x8_sub(vbl01234567, vtl01234567);
119      const v128_t vt4567 = wasm_i32x4_dot_i16x8(wasm_v16x8_shuffle(vtr01234567, vtl01234567, 4, 12, 5, 13, 6, 14, 7, 15), valphah);
120
121      const v128_t vd0123 = wasm_i32x4_dot_i16x8(wasm_v16x8_shuffle(vdr01234567, vdl01234567, 0, 8, 1, 9, 2, 10, 3, 11), valphah);
122      const v128_t vd4567 = wasm_i32x4_dot_i16x8(wasm_v16x8_shuffle(vdr01234567, vdl01234567, 4, 12, 5, 13, 6, 14, 7, 15), valphah);
123
124      v128_t vacc0123 = wasm_i32x4_mul(vd0123, valphav);
125      v128_t vacc4567 = wasm_i32x4_mul(vd4567, valphav);
126
127      vacc0123 = wasm_i32x4_add(wasm_i32x4_shl(vt0123, 11), vacc0123);
128      vacc4567 = wasm_i32x4_add(wasm_i32x4_shl(vt4567, 11), vacc4567);
129
130      vacc0123 = ${WASM_X32X4_SHR}(wasm_i16x8_add(vacc0123, vrounding), 22);
131      vacc4567 = ${WASM_X32X4_SHR}(wasm_i16x8_add(vacc4567, vrounding), 22);
132
133      const v128_t vacc01234567 = wasm_i16x8_narrow_i32x4(vacc0123, vacc4567);
134
135      const v128_t vo01234567 = ${WASM_X8X16_NARROW_I16X8}(vacc01234567, vacc01234567);
136
137      *((double*) output) = wasm_f64x2_extract_lane(vo01234567, 0);
138      output += 8;
139    }
140    if XNN_UNLIKELY(c != 0) {
141      const v128_t vtl01234567 = ${WASM_X16X8_LOAD_8X8}(i0);
142      const v128_t vtr01234567 = ${WASM_X16X8_LOAD_8X8}(i1);
143      const v128_t vbl01234567 = ${WASM_X16X8_LOAD_8X8}(i2);
144      const v128_t vbr01234567 = ${WASM_X16X8_LOAD_8X8}(i3);
145
146      const v128_t vdr01234567 = wasm_i16x8_sub(vbr01234567, vtr01234567);
147      const v128_t vt0123 = wasm_i32x4_dot_i16x8(wasm_v16x8_shuffle(vtr01234567, vtl01234567, 0, 8, 1, 9, 2, 10, 3, 11), valphah);
148      const v128_t vdl01234567 = wasm_i16x8_sub(vbl01234567, vtl01234567);
149      const v128_t vt4567 = wasm_i32x4_dot_i16x8(wasm_v16x8_shuffle(vtr01234567, vtl01234567, 4, 12, 5, 13, 6, 14, 7, 15), valphah);
150
151      const v128_t vd0123 = wasm_i32x4_dot_i16x8(wasm_v16x8_shuffle(vdr01234567, vdl01234567, 0, 8, 1, 9, 2, 10, 3, 11), valphah);
152      const v128_t vd4567 = wasm_i32x4_dot_i16x8(wasm_v16x8_shuffle(vdr01234567, vdl01234567, 4, 12, 5, 13, 6, 14, 7, 15), valphah);
153
154      v128_t vacc0123 = wasm_i32x4_mul(vd0123, valphav);
155      v128_t vacc4567 = wasm_i32x4_mul(vd4567, valphav);
156
157      vacc0123 = wasm_i32x4_add(wasm_i32x4_shl(vt0123, 11), vacc0123);
158      vacc4567 = wasm_i32x4_add(wasm_i32x4_shl(vt4567, 11), vacc4567);
159
160      vacc0123 = ${WASM_X32X4_SHR}(wasm_i16x8_add(vacc0123, vrounding), 22);
161      vacc4567 = ${WASM_X32X4_SHR}(wasm_i16x8_add(vacc4567, vrounding), 22);
162
163      const v128_t vacc01234567 = wasm_i16x8_narrow_i32x4(vacc0123, vacc4567);
164
165      v128_t vo01234567 = ${WASM_X8X16_NARROW_I16X8}(vacc01234567, vacc01234567);
166
167      if (c & (4 * sizeof(${XINT8_T}))) {
168        *((float*) output) = wasm_f32x4_extract_lane(vo01234567, 0);
169        output += 4;
170        vo01234567 = wasm_u64x2_shr(vo01234567, 32);
171      }
172      uint32_t vo0123 = (uint32_t) wasm_i32x4_extract_lane(vo01234567, 0);
173      if (c & (2 * sizeof(${XINT8_T}))) {
174        *((uint16_t*) output) = (uint16_t) vo0123;
175        output += 2;
176        vo0123 >>= 16;
177      }
178      if (c & (1 * sizeof(${XINT8_T}))) {
179        *output++ = (uint8_t) vo0123;
180      }
181    }
182
183    output = (${XINT8_T}*) ((uintptr_t) output + output_increment);
184  } while (--output_pixels != 0);
185}
186