xref: /aosp_15_r20/external/XNNPACK/src/x8-lut/gen/lut-wasmsimd-x16.c (revision 4bdc94577ba0e567308109d787f7fec7b531ce36)
1 // Auto-generated file. Do not edit!
2 //   Template: src/x8-lut/wasmsimd.c.in
3 //   Generator: tools/xngen
4 //
5 // Copyright 2021 Google LLC
6 //
7 // This source code is licensed under the BSD-style license found in the
8 // LICENSE file in the root directory of this source tree.
9 
10 #include <assert.h>
11 
12 #include <wasm_simd128.h>
13 
14 #include <xnnpack/intrinsics-polyfill.h>
15 #include <xnnpack/lut.h>
16 #include <xnnpack/common.h>
17 
18 
xnn_x8_lut_ukernel__wasmsimd_x16(size_t n,const uint8_t * x,uint8_t * y,const uint8_t t[restrict XNN_MIN_ELEMENTS (256)])19 void xnn_x8_lut_ukernel__wasmsimd_x16(
20     size_t n,
21     const uint8_t* x,
22     uint8_t* y,
23     const uint8_t t[restrict XNN_MIN_ELEMENTS(256)])
24 {
25   assert(n != 0);
26   assert(x != NULL);
27   assert(y != NULL);
28 
29   const v128_t vtable0 = wasm_v128_load(t);
30   const v128_t vtable1 = wasm_v128_load(t + 16);
31   const v128_t vtable2 = wasm_v128_load(t + 32);
32   const v128_t vtable3 = wasm_v128_load(t + 48);
33   const v128_t vtable4 = wasm_v128_load(t + 64);
34   const v128_t vtable5 = wasm_v128_load(t + 80);
35   const v128_t vtable6 = wasm_v128_load(t + 96);
36   const v128_t vtable7 = wasm_v128_load(t + 112);
37   const v128_t vtable8 = wasm_v128_load(t + 128);
38   const v128_t vtable9 = wasm_v128_load(t + 144);
39   const v128_t vtable10 = wasm_v128_load(t + 160);
40   const v128_t vtable11 = wasm_v128_load(t + 176);
41   const v128_t vtable12 = wasm_v128_load(t + 192);
42   const v128_t vtable13 = wasm_v128_load(t + 208);
43   const v128_t vtable14 = wasm_v128_load(t + 224);
44   const v128_t vtable15 = wasm_v128_load(t + 240);
45   const v128_t voffset = wasm_i8x16_const_splat(16);
46   for (; n >= 16 * sizeof(uint8_t); n -= 16 * sizeof(uint8_t)) {
47     v128_t vx = wasm_v128_load(x);
48     x += 16;
49 
50     v128_t vy = wasm_i8x16_swizzle(vtable0, vx);
51 
52     vx = wasm_i8x16_sub(vx, voffset);
53     vy = wasm_v128_or(vy, wasm_i8x16_swizzle(vtable1, vx));
54     vx = wasm_i8x16_sub(vx, voffset);
55     vy = wasm_v128_or(vy, wasm_i8x16_swizzle(vtable2, vx));
56     vx = wasm_i8x16_sub(vx, voffset);
57     vy = wasm_v128_or(vy, wasm_i8x16_swizzle(vtable3, vx));
58     vx = wasm_i8x16_sub(vx, voffset);
59     vy = wasm_v128_or(vy, wasm_i8x16_swizzle(vtable4, vx));
60     vx = wasm_i8x16_sub(vx, voffset);
61     vy = wasm_v128_or(vy, wasm_i8x16_swizzle(vtable5, vx));
62     vx = wasm_i8x16_sub(vx, voffset);
63     vy = wasm_v128_or(vy, wasm_i8x16_swizzle(vtable6, vx));
64     vx = wasm_i8x16_sub(vx, voffset);
65     vy = wasm_v128_or(vy, wasm_i8x16_swizzle(vtable7, vx));
66     vx = wasm_i8x16_sub(vx, voffset);
67     vy = wasm_v128_or(vy, wasm_i8x16_swizzle(vtable8, vx));
68     vx = wasm_i8x16_sub(vx, voffset);
69     vy = wasm_v128_or(vy, wasm_i8x16_swizzle(vtable9, vx));
70     vx = wasm_i8x16_sub(vx, voffset);
71     vy = wasm_v128_or(vy, wasm_i8x16_swizzle(vtable10, vx));
72     vx = wasm_i8x16_sub(vx, voffset);
73     vy = wasm_v128_or(vy, wasm_i8x16_swizzle(vtable11, vx));
74     vx = wasm_i8x16_sub(vx, voffset);
75     vy = wasm_v128_or(vy, wasm_i8x16_swizzle(vtable12, vx));
76     vx = wasm_i8x16_sub(vx, voffset);
77     vy = wasm_v128_or(vy, wasm_i8x16_swizzle(vtable13, vx));
78     vx = wasm_i8x16_sub(vx, voffset);
79     vy = wasm_v128_or(vy, wasm_i8x16_swizzle(vtable14, vx));
80     vx = wasm_i8x16_sub(vx, voffset);
81     vy = wasm_v128_or(vy, wasm_i8x16_swizzle(vtable15, vx));
82 
83     wasm_v128_store(y, vy);
84     y += 16;
85   }
86   if XNN_UNLIKELY(n != 0) {
87     v128_t vx = wasm_v128_load(x);
88 
89     v128_t vy = wasm_i8x16_swizzle(vtable0, vx);
90 
91     vx = wasm_i8x16_sub(vx, voffset);
92     vy = wasm_v128_or(vy, wasm_i8x16_swizzle(vtable1, vx));
93     vx = wasm_i8x16_sub(vx, voffset);
94     vy = wasm_v128_or(vy, wasm_i8x16_swizzle(vtable2, vx));
95     vx = wasm_i8x16_sub(vx, voffset);
96     vy = wasm_v128_or(vy, wasm_i8x16_swizzle(vtable3, vx));
97     vx = wasm_i8x16_sub(vx, voffset);
98     vy = wasm_v128_or(vy, wasm_i8x16_swizzle(vtable4, vx));
99     vx = wasm_i8x16_sub(vx, voffset);
100     vy = wasm_v128_or(vy, wasm_i8x16_swizzle(vtable5, vx));
101     vx = wasm_i8x16_sub(vx, voffset);
102     vy = wasm_v128_or(vy, wasm_i8x16_swizzle(vtable6, vx));
103     vx = wasm_i8x16_sub(vx, voffset);
104     vy = wasm_v128_or(vy, wasm_i8x16_swizzle(vtable7, vx));
105     vx = wasm_i8x16_sub(vx, voffset);
106     vy = wasm_v128_or(vy, wasm_i8x16_swizzle(vtable8, vx));
107     vx = wasm_i8x16_sub(vx, voffset);
108     vy = wasm_v128_or(vy, wasm_i8x16_swizzle(vtable9, vx));
109     vx = wasm_i8x16_sub(vx, voffset);
110     vy = wasm_v128_or(vy, wasm_i8x16_swizzle(vtable10, vx));
111     vx = wasm_i8x16_sub(vx, voffset);
112     vy = wasm_v128_or(vy, wasm_i8x16_swizzle(vtable11, vx));
113     vx = wasm_i8x16_sub(vx, voffset);
114     vy = wasm_v128_or(vy, wasm_i8x16_swizzle(vtable12, vx));
115     vx = wasm_i8x16_sub(vx, voffset);
116     vy = wasm_v128_or(vy, wasm_i8x16_swizzle(vtable13, vx));
117     vx = wasm_i8x16_sub(vx, voffset);
118     vy = wasm_v128_or(vy, wasm_i8x16_swizzle(vtable14, vx));
119     vx = wasm_i8x16_sub(vx, voffset);
120     vy = wasm_v128_or(vy, wasm_i8x16_swizzle(vtable15, vx));
121 
122     if (n & (8 * sizeof(uint8_t))) {
123       *((double*) y) = wasm_f64x2_extract_lane(vy, 0);
124       vy = wasm_v64x2_shuffle(vy, vy, 1, 1);
125       y += 8;
126     }
127     if (n & (4 * sizeof(uint8_t))) {
128       *((float*) y) = wasm_f32x4_extract_lane(vy, 0);
129       vy = wasm_u64x2_shr(vy, 32);
130       y += 4;
131     }
132     uint32_t vy_lo = wasm_i32x4_extract_lane(vy, 0);
133     if (n & (2 * sizeof(uint8_t))) {
134       *((uint16_t*) y) = (uint16_t) vy_lo;
135       vy_lo >>= 16;
136       y += 2;
137     }
138     if (n & (1 * sizeof(uint8_t))) {
139       *y = (uint8_t) vy_lo;
140     }
141   }
142 }
143