xref: /aosp_15_r20/external/XNNPACK/src/u8-ibilinear/gen/wasmsimd-dot16x2-c16.c (revision 4bdc94577ba0e567308109d787f7fec7b531ce36)
1 // Auto-generated file. Do not edit!
2 //   Template: src/s8-ibilinear/wasmsimd-dot16x2.c.in
3 //   Generator: tools/xngen
4 //
5 // Copyright 2021 Google LLC
6 //
7 // This source code is licensed under the BSD-style license found in the
8 // LICENSE file in the root directory of this source tree.
9 
10 #include <assert.h>
11 
12 #include <wasm_simd128.h>
13 
14 #include <xnnpack/common.h>
15 #include <xnnpack/ibilinear.h>
16 
17 
xnn_u8_ibilinear_ukernel__wasmsimd_dot16x2_c16(size_t output_pixels,size_t channels,const uint8_t ** restrict input,size_t input_offset,const int16_t * restrict weights,uint8_t * restrict output,size_t output_increment)18 void xnn_u8_ibilinear_ukernel__wasmsimd_dot16x2_c16(
19     size_t output_pixels,
20     size_t channels,
21     const uint8_t**restrict input,
22     size_t input_offset,
23     const int16_t*restrict weights,
24     uint8_t*restrict output,
25     size_t output_increment) XNN_OOB_READS
26 {
27   assert(output_pixels != 0);
28   assert(channels != 0);
29 
30   do {
31     const uint8_t* i0 = (const uint8_t*) ((uintptr_t) input[0] + input_offset);
32     const uint8_t* i1 = (const uint8_t*) ((uintptr_t) input[1] + input_offset);
33     const uint8_t* i2 = (const uint8_t*) ((uintptr_t) input[2] + input_offset);
34     const uint8_t* i3 = (const uint8_t*) ((uintptr_t) input[3] + input_offset);
35     input += 4;
36 
37     const v128_t valphah =
38       wasm_i16x8_add(
39         wasm_v128_xor(
40           wasm_v128_load16_splat(weights),
41           wasm_i32x4_const_splat(0xFFFF0000)),
42         wasm_i32x4_const_splat(0x08010000));
43     const v128_t valphav = wasm_i32x4_extend_low_i16x8(wasm_v128_load16_splat(weights + 1));
44     weights += 2;
45 
46     const v128_t vrounding = wasm_i32x4_const_splat(0x00200000);
47 
48     size_t c = channels;
49     for (; c >= 16 * sizeof(uint8_t); c -= 16 * sizeof(uint8_t)) {
50       const v128_t vtl01234567 = wasm_u16x8_load8x8(i0);
51       const v128_t vtr01234567 = wasm_u16x8_load8x8(i1);
52       const v128_t vbl01234567 = wasm_u16x8_load8x8(i2);
53       const v128_t vbr01234567 = wasm_u16x8_load8x8(i3);
54       const v128_t vtl89ABCDEF = wasm_u16x8_load8x8(i0 + 8);
55       const v128_t vtr89ABCDEF = wasm_u16x8_load8x8(i1 + 8);
56       const v128_t vbl89ABCDEF = wasm_u16x8_load8x8(i2 + 8);
57       const v128_t vbr89ABCDEF = wasm_u16x8_load8x8(i3 + 8);
58       i0 += 16;
59       i1 += 16;
60       i2 += 16;
61       i3 += 16;
62 
63       const v128_t vdr01234567 = wasm_i16x8_sub(vbr01234567, vtr01234567);
64       const v128_t vt0123 = wasm_i32x4_dot_i16x8(wasm_v16x8_shuffle(vtr01234567, vtl01234567, 0, 8, 1, 9, 2, 10, 3, 11), valphah);
65       const v128_t vdl01234567 = wasm_i16x8_sub(vbl01234567, vtl01234567);
66       const v128_t vt4567 = wasm_i32x4_dot_i16x8(wasm_v16x8_shuffle(vtr01234567, vtl01234567, 4, 12, 5, 13, 6, 14, 7, 15), valphah);
67       const v128_t vdr89ABCDEF = wasm_i16x8_sub(vbr89ABCDEF, vtr89ABCDEF);
68       const v128_t vt89AB = wasm_i32x4_dot_i16x8(wasm_v16x8_shuffle(vtr89ABCDEF, vtl89ABCDEF, 0, 8, 1, 9, 2, 10, 3, 11), valphah);
69       const v128_t vdl89ABCDEF = wasm_i16x8_sub(vbl89ABCDEF, vtl89ABCDEF);
70       const v128_t vtCDEF = wasm_i32x4_dot_i16x8(wasm_v16x8_shuffle(vtr89ABCDEF, vtl89ABCDEF, 4, 12, 5, 13, 6, 14, 7, 15), valphah);
71 
72       const v128_t vd0123 = wasm_i32x4_dot_i16x8(wasm_v16x8_shuffle(vdr01234567, vdl01234567, 0, 8, 1, 9, 2, 10, 3, 11), valphah);
73       const v128_t vd4567 = wasm_i32x4_dot_i16x8(wasm_v16x8_shuffle(vdr01234567, vdl01234567, 4, 12, 5, 13, 6, 14, 7, 15), valphah);
74       const v128_t vd89AB = wasm_i32x4_dot_i16x8(wasm_v16x8_shuffle(vdr89ABCDEF, vdl89ABCDEF, 0, 8, 1, 9, 2, 10, 3, 11), valphah);
75       const v128_t vdCDEF = wasm_i32x4_dot_i16x8(wasm_v16x8_shuffle(vdr89ABCDEF, vdl89ABCDEF, 4, 12, 5, 13, 6, 14, 7, 15), valphah);
76 
77       v128_t vacc0123 = wasm_i32x4_mul(vd0123, valphav);
78       v128_t vacc4567 = wasm_i32x4_mul(vd4567, valphav);
79       v128_t vacc89AB = wasm_i32x4_mul(vd89AB, valphav);
80       v128_t vaccCDEF = wasm_i32x4_mul(vdCDEF, valphav);
81 
82       vacc0123 = wasm_i32x4_add(wasm_i32x4_shl(vt0123, 11), vacc0123);
83       vacc4567 = wasm_i32x4_add(wasm_i32x4_shl(vt4567, 11), vacc4567);
84       vacc89AB = wasm_i32x4_add(wasm_i32x4_shl(vt89AB, 11), vacc89AB);
85       vaccCDEF = wasm_i32x4_add(wasm_i32x4_shl(vtCDEF, 11), vaccCDEF);
86 
87       vacc0123 = wasm_u32x4_shr(wasm_i16x8_add(vacc0123, vrounding), 22);
88       vacc4567 = wasm_u32x4_shr(wasm_i16x8_add(vacc4567, vrounding), 22);
89       vacc89AB = wasm_u32x4_shr(wasm_i16x8_add(vacc89AB, vrounding), 22);
90       vaccCDEF = wasm_u32x4_shr(wasm_i16x8_add(vaccCDEF, vrounding), 22);
91 
92       const v128_t vacc01234567 = wasm_i16x8_narrow_i32x4(vacc0123, vacc4567);
93       const v128_t vacc89ABCDEF = wasm_i16x8_narrow_i32x4(vacc89AB, vaccCDEF);
94 
95       const v128_t vo0123456789ABCDEF = wasm_u8x16_narrow_i16x8(vacc01234567, vacc89ABCDEF);
96 
97       wasm_v128_store(output, vo0123456789ABCDEF);
98       output += 16;
99     }
100     for (; c >= 8 * sizeof(uint8_t); c -= 8 * sizeof(uint8_t)) {
101       const v128_t vtl01234567 = wasm_u16x8_load8x8(i0);
102       i0 += 8;
103       const v128_t vtr01234567 = wasm_u16x8_load8x8(i1);
104       i1 += 8;
105       const v128_t vbl01234567 = wasm_u16x8_load8x8(i2);
106       i2 += 8;
107       const v128_t vbr01234567 = wasm_u16x8_load8x8(i3);
108       i3 += 8;
109 
110       const v128_t vdr01234567 = wasm_i16x8_sub(vbr01234567, vtr01234567);
111       const v128_t vt0123 = wasm_i32x4_dot_i16x8(wasm_v16x8_shuffle(vtr01234567, vtl01234567, 0, 8, 1, 9, 2, 10, 3, 11), valphah);
112       const v128_t vdl01234567 = wasm_i16x8_sub(vbl01234567, vtl01234567);
113       const v128_t vt4567 = wasm_i32x4_dot_i16x8(wasm_v16x8_shuffle(vtr01234567, vtl01234567, 4, 12, 5, 13, 6, 14, 7, 15), valphah);
114 
115       const v128_t vd0123 = wasm_i32x4_dot_i16x8(wasm_v16x8_shuffle(vdr01234567, vdl01234567, 0, 8, 1, 9, 2, 10, 3, 11), valphah);
116       const v128_t vd4567 = wasm_i32x4_dot_i16x8(wasm_v16x8_shuffle(vdr01234567, vdl01234567, 4, 12, 5, 13, 6, 14, 7, 15), valphah);
117 
118       v128_t vacc0123 = wasm_i32x4_mul(vd0123, valphav);
119       v128_t vacc4567 = wasm_i32x4_mul(vd4567, valphav);
120 
121       vacc0123 = wasm_i32x4_add(wasm_i32x4_shl(vt0123, 11), vacc0123);
122       vacc4567 = wasm_i32x4_add(wasm_i32x4_shl(vt4567, 11), vacc4567);
123 
124       vacc0123 = wasm_u32x4_shr(wasm_i16x8_add(vacc0123, vrounding), 22);
125       vacc4567 = wasm_u32x4_shr(wasm_i16x8_add(vacc4567, vrounding), 22);
126 
127       const v128_t vacc01234567 = wasm_i16x8_narrow_i32x4(vacc0123, vacc4567);
128 
129       const v128_t vo01234567 = wasm_u8x16_narrow_i16x8(vacc01234567, vacc01234567);
130 
131       *((double*) output) = wasm_f64x2_extract_lane(vo01234567, 0);
132       output += 8;
133     }
134     if XNN_UNLIKELY(c != 0) {
135       const v128_t vtl01234567 = wasm_u16x8_load8x8(i0);
136       const v128_t vtr01234567 = wasm_u16x8_load8x8(i1);
137       const v128_t vbl01234567 = wasm_u16x8_load8x8(i2);
138       const v128_t vbr01234567 = wasm_u16x8_load8x8(i3);
139 
140       const v128_t vdr01234567 = wasm_i16x8_sub(vbr01234567, vtr01234567);
141       const v128_t vt0123 = wasm_i32x4_dot_i16x8(wasm_v16x8_shuffle(vtr01234567, vtl01234567, 0, 8, 1, 9, 2, 10, 3, 11), valphah);
142       const v128_t vdl01234567 = wasm_i16x8_sub(vbl01234567, vtl01234567);
143       const v128_t vt4567 = wasm_i32x4_dot_i16x8(wasm_v16x8_shuffle(vtr01234567, vtl01234567, 4, 12, 5, 13, 6, 14, 7, 15), valphah);
144 
145       const v128_t vd0123 = wasm_i32x4_dot_i16x8(wasm_v16x8_shuffle(vdr01234567, vdl01234567, 0, 8, 1, 9, 2, 10, 3, 11), valphah);
146       const v128_t vd4567 = wasm_i32x4_dot_i16x8(wasm_v16x8_shuffle(vdr01234567, vdl01234567, 4, 12, 5, 13, 6, 14, 7, 15), valphah);
147 
148       v128_t vacc0123 = wasm_i32x4_mul(vd0123, valphav);
149       v128_t vacc4567 = wasm_i32x4_mul(vd4567, valphav);
150 
151       vacc0123 = wasm_i32x4_add(wasm_i32x4_shl(vt0123, 11), vacc0123);
152       vacc4567 = wasm_i32x4_add(wasm_i32x4_shl(vt4567, 11), vacc4567);
153 
154       vacc0123 = wasm_u32x4_shr(wasm_i16x8_add(vacc0123, vrounding), 22);
155       vacc4567 = wasm_u32x4_shr(wasm_i16x8_add(vacc4567, vrounding), 22);
156 
157       const v128_t vacc01234567 = wasm_i16x8_narrow_i32x4(vacc0123, vacc4567);
158 
159       v128_t vo01234567 = wasm_u8x16_narrow_i16x8(vacc01234567, vacc01234567);
160 
161       if (c & (4 * sizeof(uint8_t))) {
162         *((float*) output) = wasm_f32x4_extract_lane(vo01234567, 0);
163         output += 4;
164         vo01234567 = wasm_u64x2_shr(vo01234567, 32);
165       }
166       uint32_t vo0123 = (uint32_t) wasm_i32x4_extract_lane(vo01234567, 0);
167       if (c & (2 * sizeof(uint8_t))) {
168         *((uint16_t*) output) = (uint16_t) vo0123;
169         output += 2;
170         vo0123 >>= 16;
171       }
172       if (c & (1 * sizeof(uint8_t))) {
173         *output++ = (uint8_t) vo0123;
174       }
175     }
176 
177     output = (uint8_t*) ((uintptr_t) output + output_increment);
178   } while (--output_pixels != 0);
179 }
180