1 // Auto-generated file. Do not edit!
2 // Template: src/s8-ibilinear/wasmsimd-mul32.c.in
3 // Generator: tools/xngen
4 //
5 // Copyright 2021 Google LLC
6 //
7 // This source code is licensed under the BSD-style license found in the
8 // LICENSE file in the root directory of this source tree.
9
10 #include <assert.h>
11
12 #include <wasm_simd128.h>
13
14 #include <xnnpack/common.h>
15 #include <xnnpack/ibilinear.h>
16
17
xnn_u8_ibilinear_ukernel__wasmsimd_mul32_c16(size_t output_pixels,size_t channels,const uint8_t ** restrict input,size_t input_offset,const int16_t * restrict weights,uint8_t * restrict output,size_t output_increment)18 void xnn_u8_ibilinear_ukernel__wasmsimd_mul32_c16(
19 size_t output_pixels,
20 size_t channels,
21 const uint8_t**restrict input,
22 size_t input_offset,
23 const int16_t*restrict weights,
24 uint8_t*restrict output,
25 size_t output_increment) XNN_OOB_READS
26 {
27 assert(output_pixels != 0);
28 assert(channels != 0);
29
30 do {
31 const uint8_t* i0 = (const uint8_t*) ((uintptr_t) input[0] + input_offset);
32 const uint8_t* i1 = (const uint8_t*) ((uintptr_t) input[1] + input_offset);
33 const uint8_t* i2 = (const uint8_t*) ((uintptr_t) input[2] + input_offset);
34 const uint8_t* i3 = (const uint8_t*) ((uintptr_t) input[3] + input_offset);
35 input += 4;
36
37 const v128_t valphah = wasm_i32x4_extend_low_i16x8(wasm_v128_load16_splat(weights));
38 const v128_t valphav = wasm_i32x4_extend_low_i16x8(wasm_v128_load16_splat(weights + 1));
39 weights += 2;
40
41 const v128_t vrounding = wasm_i32x4_const_splat(0x00200000);
42
43 size_t c = channels;
44 for (; c >= 16 * sizeof(uint8_t); c -= 16 * sizeof(uint8_t)) {
45 const v128_t vtl01234567 = wasm_u16x8_load8x8(i0);
46 const v128_t vtr01234567 = wasm_u16x8_load8x8(i1);
47 const v128_t vbl01234567 = wasm_u16x8_load8x8(i2);
48 const v128_t vbr01234567 = wasm_u16x8_load8x8(i3);
49 const v128_t vtl89ABCDEF = wasm_u16x8_load8x8(i0 + 8);
50 const v128_t vtr89ABCDEF = wasm_u16x8_load8x8(i1 + 8);
51 const v128_t vbl89ABCDEF = wasm_u16x8_load8x8(i2 + 8);
52 const v128_t vbr89ABCDEF = wasm_u16x8_load8x8(i3 + 8);
53 i0 += 16;
54 i1 += 16;
55 i2 += 16;
56 i3 += 16;
57
58 const v128_t vtd01234567 = wasm_i16x8_sub(vtr01234567, vtl01234567);
59 const v128_t vbd01234567 = wasm_i16x8_sub(vbr01234567, vbl01234567);
60 const v128_t vdl01234567 = wasm_i16x8_sub(vbl01234567, vtl01234567);
61 const v128_t vdd01234567 = wasm_i16x8_sub(vbd01234567, vtd01234567);
62 const v128_t vtd89ABCDEF = wasm_i16x8_sub(vtr89ABCDEF, vtl89ABCDEF);
63 const v128_t vbd89ABCDEF = wasm_i16x8_sub(vbr89ABCDEF, vbl89ABCDEF);
64 const v128_t vdl89ABCDEF = wasm_i16x8_sub(vbl89ABCDEF, vtl89ABCDEF);
65 const v128_t vdd89ABCDEF = wasm_i16x8_sub(vbd89ABCDEF, vtd89ABCDEF);
66
67 const v128_t vt0123 = wasm_i32x4_add(wasm_i32x4_shl(wasm_i32x4_extend_low_i16x8(vtl01234567), 11), wasm_i32x4_mul(wasm_i32x4_extend_low_i16x8(vtd01234567), valphah));
68 const v128_t vt4567 = wasm_i32x4_add(wasm_i32x4_shl(wasm_i32x4_extend_high_i16x8(vtl01234567), 11), wasm_i32x4_mul(wasm_i32x4_extend_high_i16x8(vtd01234567), valphah));
69 const v128_t vd0123 = wasm_i32x4_add(wasm_i32x4_shl(wasm_i32x4_extend_low_i16x8(vdl01234567), 11), wasm_i32x4_mul(wasm_i32x4_extend_low_i16x8(vdd01234567), valphah));
70 const v128_t vd4567 = wasm_i32x4_add(wasm_i32x4_shl(wasm_i32x4_extend_high_i16x8(vdl01234567), 11), wasm_i32x4_mul(wasm_i32x4_extend_high_i16x8(vdd01234567), valphah));
71 const v128_t vt89AB = wasm_i32x4_add(wasm_i32x4_shl(wasm_i32x4_extend_low_i16x8(vtl89ABCDEF), 11), wasm_i32x4_mul(wasm_i32x4_extend_low_i16x8(vtd89ABCDEF), valphah));
72 const v128_t vtCDEF = wasm_i32x4_add(wasm_i32x4_shl(wasm_i32x4_extend_high_i16x8(vtl89ABCDEF), 11), wasm_i32x4_mul(wasm_i32x4_extend_high_i16x8(vtd89ABCDEF), valphah));
73 const v128_t vd89AB = wasm_i32x4_add(wasm_i32x4_shl(wasm_i32x4_extend_low_i16x8(vdl89ABCDEF), 11), wasm_i32x4_mul(wasm_i32x4_extend_low_i16x8(vdd89ABCDEF), valphah));
74 const v128_t vdCDEF = wasm_i32x4_add(wasm_i32x4_shl(wasm_i32x4_extend_high_i16x8(vdl89ABCDEF), 11), wasm_i32x4_mul(wasm_i32x4_extend_high_i16x8(vdd89ABCDEF), valphah));
75
76 v128_t vacc0123 = wasm_i32x4_mul(vd0123, valphav);
77 v128_t vacc4567 = wasm_i32x4_mul(vd4567, valphav);
78 v128_t vacc89AB = wasm_i32x4_mul(vd89AB, valphav);
79 v128_t vaccCDEF = wasm_i32x4_mul(vdCDEF, valphav);
80
81 vacc0123 = wasm_i32x4_add(wasm_i32x4_shl(vt0123, 11), vacc0123);
82 vacc4567 = wasm_i32x4_add(wasm_i32x4_shl(vt4567, 11), vacc4567);
83 vacc89AB = wasm_i32x4_add(wasm_i32x4_shl(vt89AB, 11), vacc89AB);
84 vaccCDEF = wasm_i32x4_add(wasm_i32x4_shl(vtCDEF, 11), vaccCDEF);
85
86 vacc0123 = wasm_u32x4_shr(wasm_i16x8_add(vacc0123, vrounding), 22);
87 vacc4567 = wasm_u32x4_shr(wasm_i16x8_add(vacc4567, vrounding), 22);
88 vacc89AB = wasm_u32x4_shr(wasm_i16x8_add(vacc89AB, vrounding), 22);
89 vaccCDEF = wasm_u32x4_shr(wasm_i16x8_add(vaccCDEF, vrounding), 22);
90
91 const v128_t vacc01234567 = wasm_i16x8_narrow_i32x4(vacc0123, vacc4567);
92 const v128_t vacc89ABCDEF = wasm_i16x8_narrow_i32x4(vacc89AB, vaccCDEF);
93
94 const v128_t vo0123456789ABCDEF = wasm_u8x16_narrow_i16x8(vacc01234567, vacc89ABCDEF);
95
96 wasm_v128_store(output, vo0123456789ABCDEF);
97 output += 16;
98 }
99 for (; c >= 8 * sizeof(uint8_t); c -= 8 * sizeof(uint8_t)) {
100 const v128_t vtl01234567 = wasm_u16x8_load8x8(i0);
101 i0 += 8;
102 const v128_t vtr01234567 = wasm_u16x8_load8x8(i1);
103 i1 += 8;
104 const v128_t vbl01234567 = wasm_u16x8_load8x8(i2);
105 i2 += 8;
106 const v128_t vbr01234567 = wasm_u16x8_load8x8(i3);
107 i3 += 8;
108
109 const v128_t vtd01234567 = wasm_i16x8_sub(vtr01234567, vtl01234567);
110 const v128_t vbd01234567 = wasm_i16x8_sub(vbr01234567, vbl01234567);
111 const v128_t vdl01234567 = wasm_i16x8_sub(vbl01234567, vtl01234567);
112 const v128_t vdd01234567 = wasm_i16x8_sub(vbd01234567, vtd01234567);
113
114 const v128_t vt0123 = wasm_i32x4_add(wasm_i32x4_shl(wasm_i32x4_extend_low_i16x8(vtl01234567), 11), wasm_i32x4_mul(wasm_i32x4_extend_low_i16x8(vtd01234567), valphah));
115 const v128_t vt4567 = wasm_i32x4_add(wasm_i32x4_shl(wasm_i32x4_extend_high_i16x8(vtl01234567), 11), wasm_i32x4_mul(wasm_i32x4_extend_high_i16x8(vtd01234567), valphah));
116 const v128_t vd0123 = wasm_i32x4_add(wasm_i32x4_shl(wasm_i32x4_extend_low_i16x8(vdl01234567), 11), wasm_i32x4_mul(wasm_i32x4_extend_low_i16x8(vdd01234567), valphah));
117 const v128_t vd4567 = wasm_i32x4_add(wasm_i32x4_shl(wasm_i32x4_extend_high_i16x8(vdl01234567), 11), wasm_i32x4_mul(wasm_i32x4_extend_high_i16x8(vdd01234567), valphah));
118
119 v128_t vacc0123 = wasm_i32x4_mul(vd0123, valphav);
120 v128_t vacc4567 = wasm_i32x4_mul(vd4567, valphav);
121
122 vacc0123 = wasm_i32x4_add(wasm_i32x4_shl(vt0123, 11), vacc0123);
123 vacc4567 = wasm_i32x4_add(wasm_i32x4_shl(vt4567, 11), vacc4567);
124
125 vacc0123 = wasm_u32x4_shr(wasm_i16x8_add(vacc0123, vrounding), 22);
126 vacc4567 = wasm_u32x4_shr(wasm_i16x8_add(vacc4567, vrounding), 22);
127
128 const v128_t vacc01234567 = wasm_i16x8_narrow_i32x4(vacc0123, vacc4567);
129
130 const v128_t vo01234567 = wasm_u8x16_narrow_i16x8(vacc01234567, vacc01234567);
131
132 *((double*) output) = wasm_f64x2_extract_lane(vo01234567, 0);
133 output += 8;
134 }
135 if XNN_UNLIKELY(c != 0) {
136 const v128_t vtl01234567 = wasm_u16x8_load8x8(i0);
137 const v128_t vtr01234567 = wasm_u16x8_load8x8(i1);
138 const v128_t vbl01234567 = wasm_u16x8_load8x8(i2);
139 const v128_t vbr01234567 = wasm_u16x8_load8x8(i3);
140
141 const v128_t vtd01234567 = wasm_i16x8_sub(vtr01234567, vtl01234567);
142 const v128_t vbd01234567 = wasm_i16x8_sub(vbr01234567, vbl01234567);
143 const v128_t vdl01234567 = wasm_i16x8_sub(vbl01234567, vtl01234567);
144 const v128_t vdd01234567 = wasm_i16x8_sub(vbd01234567, vtd01234567);
145
146 const v128_t vt0123 = wasm_i32x4_add(wasm_i32x4_shl(wasm_i32x4_extend_low_i16x8(vtl01234567), 11), wasm_i32x4_mul(wasm_i32x4_extend_low_i16x8(vtd01234567), valphah));
147 const v128_t vt4567 = wasm_i32x4_add(wasm_i32x4_shl(wasm_i32x4_extend_high_i16x8(vtl01234567), 11), wasm_i32x4_mul(wasm_i32x4_extend_high_i16x8(vtd01234567), valphah));
148 const v128_t vd0123 = wasm_i32x4_add(wasm_i32x4_shl(wasm_i32x4_extend_low_i16x8(vdl01234567), 11), wasm_i32x4_mul(wasm_i32x4_extend_low_i16x8(vdd01234567), valphah));
149 const v128_t vd4567 = wasm_i32x4_add(wasm_i32x4_shl(wasm_i32x4_extend_high_i16x8(vdl01234567), 11), wasm_i32x4_mul(wasm_i32x4_extend_high_i16x8(vdd01234567), valphah));
150
151 v128_t vacc0123 = wasm_i32x4_mul(vd0123, valphav);
152 v128_t vacc4567 = wasm_i32x4_mul(vd4567, valphav);
153
154 vacc0123 = wasm_i32x4_add(wasm_i32x4_shl(vt0123, 11), vacc0123);
155 vacc4567 = wasm_i32x4_add(wasm_i32x4_shl(vt4567, 11), vacc4567);
156
157 vacc0123 = wasm_u32x4_shr(wasm_i16x8_add(vacc0123, vrounding), 22);
158 vacc4567 = wasm_u32x4_shr(wasm_i16x8_add(vacc4567, vrounding), 22);
159
160 const v128_t vacc01234567 = wasm_i16x8_narrow_i32x4(vacc0123, vacc4567);
161
162 v128_t vo01234567 = wasm_u8x16_narrow_i16x8(vacc01234567, vacc01234567);
163
164 if (c & (4 * sizeof(uint8_t))) {
165 *((float*) output) = wasm_f32x4_extract_lane(vo01234567, 0);
166 output += 4;
167 vo01234567 = wasm_u64x2_shr(vo01234567, 32);
168 }
169 uint32_t vo0123 = (uint32_t) wasm_i32x4_extract_lane(vo01234567, 0);
170 if (c & (2 * sizeof(uint8_t))) {
171 *((uint16_t*) output) = (uint16_t) vo0123;
172 output += 2;
173 vo0123 >>= 16;
174 }
175 if (c & (1 * sizeof(uint8_t))) {
176 *output++ = (uint8_t) vo0123;
177 }
178 }
179
180 output = (uint8_t*) ((uintptr_t) output + output_increment);
181 } while (--output_pixels != 0);
182 }
183