1 // Auto-generated file. Do not edit!
2 // Template: src/f32-igemm/wasmsimd-s4.c.in
3 // Generator: tools/xngen
4 //
5 // Copyright 2020 Google LLC
6 //
7 // This source code is licensed under the BSD-style license found in the
8 // LICENSE file in the root directory of this source tree.
9
10 #include <assert.h>
11
12 #include <wasm_simd128.h>
13
14 #include <xnnpack/igemm.h>
15
16
xnn_f32_igemm_minmax_ukernel_3x8s4__wasmsimd_x86(size_t mr,size_t nc,size_t kc,size_t ks,const float ** restrict a,const float * restrict w,float * restrict c,size_t cm_stride,size_t cn_stride,size_t a_offset,const float * zero,const union xnn_f32_minmax_params params[restrict XNN_MIN_ELEMENTS (1)])17 void xnn_f32_igemm_minmax_ukernel_3x8s4__wasmsimd_x86(
18 size_t mr,
19 size_t nc,
20 size_t kc,
21 size_t ks,
22 const float**restrict a,
23 const float*restrict w,
24 float*restrict c,
25 size_t cm_stride,
26 size_t cn_stride,
27 size_t a_offset,
28 const float* zero,
29 const union xnn_f32_minmax_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
30 {
31 assert(mr != 0);
32 assert(mr <= 3);
33 assert(nc != 0);
34 assert(kc != 0);
35 assert(kc % sizeof(float) == 0);
36 assert(ks != 0);
37 assert(ks % (3 * sizeof(void*)) == 0);
38 assert(a_offset % sizeof(float) == 0);
39 assert(a != NULL);
40 assert(w != NULL);
41 assert(c != NULL);
42
43 float* c0 = c;
44 float* c1 = (float*) ((uintptr_t) c0 + cm_stride);
45 if XNN_UNPREDICTABLE(mr < 2) {
46 c1 = c0;
47 }
48 float* c2 = (float*) ((uintptr_t) c1 + cm_stride);
49 if XNN_UNPREDICTABLE(mr <= 2) {
50 c2 = c1;
51 }
52
53 const v128_t vmin = wasm_v128_load64_splat(params->wasmsimd.min);
54 const v128_t vmax = wasm_v128_load64_splat(params->wasmsimd.max);
55 do {
56 v128_t vacc0x0123 = wasm_v128_load(w);
57 v128_t vacc0x4567 = wasm_v128_load(w + 4);
58 v128_t vacc1x0123 = vacc0x0123;
59 v128_t vacc1x4567 = vacc0x4567;
60 v128_t vacc2x0123 = vacc0x0123;
61 v128_t vacc2x4567 = vacc0x4567;
62 w += 8;
63
64 size_t p = ks;
65 do {
66 const float* restrict a0 = a[0];
67 assert(a0 != NULL);
68 if XNN_UNPREDICTABLE(a0 != zero) {
69 a0 = (const float*) ((uintptr_t) a0 + a_offset);
70 }
71 const float* restrict a1 = a[1];
72 assert(a1 != NULL);
73 if XNN_UNPREDICTABLE(a1 != zero) {
74 a1 = (const float*) ((uintptr_t) a1 + a_offset);
75 }
76 const float* restrict a2 = a[2];
77 assert(a2 != NULL);
78 if XNN_UNPREDICTABLE(a2 != zero) {
79 a2 = (const float*) ((uintptr_t) a2 + a_offset);
80 }
81 a += 3;
82
83 size_t k = kc;
84 while (k >= 4 * sizeof(float)) {
85 v128_t va0 = wasm_v128_load(a0);
86 a0 += 4;
87 v128_t va1 = wasm_v128_load(a1);
88 a1 += 4;
89 v128_t va2 = wasm_v128_load(a2);
90 a2 += 4;
91
92
93 const v128_t vb0123c0 = wasm_v128_load(w + 0);
94 const v128_t vb4567c0 = wasm_v128_load(w + 4);
95
96 vacc0x0123 = wasm_f32x4_add(vacc0x0123, wasm_f32x4_mul(va0, vb0123c0));
97 vacc1x0123 = wasm_f32x4_add(vacc1x0123, wasm_f32x4_mul(va1, vb0123c0));
98 vacc2x0123 = wasm_f32x4_add(vacc2x0123, wasm_f32x4_mul(va2, vb0123c0));
99 vacc0x4567 = wasm_f32x4_add(vacc0x4567, wasm_f32x4_mul(va0, vb4567c0));
100 vacc1x4567 = wasm_f32x4_add(vacc1x4567, wasm_f32x4_mul(va1, vb4567c0));
101 vacc2x4567 = wasm_f32x4_add(vacc2x4567, wasm_f32x4_mul(va2, vb4567c0));
102
103 va0 = wasm_v32x4_shuffle(va0, va0, 1, 2, 3, 0);
104 va1 = wasm_v32x4_shuffle(va1, va1, 1, 2, 3, 0);
105 va2 = wasm_v32x4_shuffle(va2, va2, 1, 2, 3, 0);
106
107 const v128_t vb0123c1 = wasm_v128_load(w + 8);
108 const v128_t vb4567c1 = wasm_v128_load(w + 12);
109
110 vacc0x0123 = wasm_f32x4_add(vacc0x0123, wasm_f32x4_mul(va0, vb0123c1));
111 vacc1x0123 = wasm_f32x4_add(vacc1x0123, wasm_f32x4_mul(va1, vb0123c1));
112 vacc2x0123 = wasm_f32x4_add(vacc2x0123, wasm_f32x4_mul(va2, vb0123c1));
113 vacc0x4567 = wasm_f32x4_add(vacc0x4567, wasm_f32x4_mul(va0, vb4567c1));
114 vacc1x4567 = wasm_f32x4_add(vacc1x4567, wasm_f32x4_mul(va1, vb4567c1));
115 vacc2x4567 = wasm_f32x4_add(vacc2x4567, wasm_f32x4_mul(va2, vb4567c1));
116
117 va0 = wasm_v32x4_shuffle(va0, va0, 1, 2, 3, 0);
118 va1 = wasm_v32x4_shuffle(va1, va1, 1, 2, 3, 0);
119 va2 = wasm_v32x4_shuffle(va2, va2, 1, 2, 3, 0);
120
121 const v128_t vb0123c2 = wasm_v128_load(w + 16);
122 const v128_t vb4567c2 = wasm_v128_load(w + 20);
123
124 vacc0x0123 = wasm_f32x4_add(vacc0x0123, wasm_f32x4_mul(va0, vb0123c2));
125 vacc1x0123 = wasm_f32x4_add(vacc1x0123, wasm_f32x4_mul(va1, vb0123c2));
126 vacc2x0123 = wasm_f32x4_add(vacc2x0123, wasm_f32x4_mul(va2, vb0123c2));
127 vacc0x4567 = wasm_f32x4_add(vacc0x4567, wasm_f32x4_mul(va0, vb4567c2));
128 vacc1x4567 = wasm_f32x4_add(vacc1x4567, wasm_f32x4_mul(va1, vb4567c2));
129 vacc2x4567 = wasm_f32x4_add(vacc2x4567, wasm_f32x4_mul(va2, vb4567c2));
130
131 va0 = wasm_v32x4_shuffle(va0, va0, 1, 2, 3, 0);
132 va1 = wasm_v32x4_shuffle(va1, va1, 1, 2, 3, 0);
133 va2 = wasm_v32x4_shuffle(va2, va2, 1, 2, 3, 0);
134
135 const v128_t vb0123c3 = wasm_v128_load(w + 24);
136 const v128_t vb4567c3 = wasm_v128_load(w + 28);
137
138 vacc0x0123 = wasm_f32x4_add(vacc0x0123, wasm_f32x4_mul(va0, vb0123c3));
139 vacc1x0123 = wasm_f32x4_add(vacc1x0123, wasm_f32x4_mul(va1, vb0123c3));
140 vacc2x0123 = wasm_f32x4_add(vacc2x0123, wasm_f32x4_mul(va2, vb0123c3));
141 vacc0x4567 = wasm_f32x4_add(vacc0x4567, wasm_f32x4_mul(va0, vb4567c3));
142 vacc1x4567 = wasm_f32x4_add(vacc1x4567, wasm_f32x4_mul(va1, vb4567c3));
143 vacc2x4567 = wasm_f32x4_add(vacc2x4567, wasm_f32x4_mul(va2, vb4567c3));
144
145
146 w += 32;
147 k -= 4 * sizeof(float);
148 }
149 if XNN_UNLIKELY(k != 0) {
150 v128_t va0 = wasm_v128_load(a0);
151 a0 = (const float*) ((uintptr_t) a0 + k);
152 v128_t va1 = wasm_v128_load(a1);
153 a1 = (const float*) ((uintptr_t) a1 + k);
154 v128_t va2 = wasm_v128_load(a2);
155 a2 = (const float*) ((uintptr_t) a2 + k);
156
157 const v128_t vzero = wasm_f32x4_const_splat(0.0f);
158
159 const v128_t vb0123c0 = wasm_v128_load(w + 0);
160 const v128_t vb4567c0 = wasm_v128_load(w + 4);
161
162 vacc0x0123 = wasm_f32x4_add(vacc0x0123, wasm_f32x4_mul(wasm_v128_andnot(va0, wasm_f32x4_eq(vb0123c0, vzero)), vb0123c0));
163 vacc1x0123 = wasm_f32x4_add(vacc1x0123, wasm_f32x4_mul(wasm_v128_andnot(va1, wasm_f32x4_eq(vb0123c0, vzero)), vb0123c0));
164 vacc2x0123 = wasm_f32x4_add(vacc2x0123, wasm_f32x4_mul(wasm_v128_andnot(va2, wasm_f32x4_eq(vb0123c0, vzero)), vb0123c0));
165 vacc0x4567 = wasm_f32x4_add(vacc0x4567, wasm_f32x4_mul(wasm_v128_andnot(va0, wasm_f32x4_eq(vb4567c0, vzero)), vb4567c0));
166 vacc1x4567 = wasm_f32x4_add(vacc1x4567, wasm_f32x4_mul(wasm_v128_andnot(va1, wasm_f32x4_eq(vb4567c0, vzero)), vb4567c0));
167 vacc2x4567 = wasm_f32x4_add(vacc2x4567, wasm_f32x4_mul(wasm_v128_andnot(va2, wasm_f32x4_eq(vb4567c0, vzero)), vb4567c0));
168
169 va0 = wasm_v32x4_shuffle(va0, va0, 1, 2, 3, 0);
170 va1 = wasm_v32x4_shuffle(va1, va1, 1, 2, 3, 0);
171 va2 = wasm_v32x4_shuffle(va2, va2, 1, 2, 3, 0);
172
173 const v128_t vb0123c1 = wasm_v128_load(w + 8);
174 const v128_t vb4567c1 = wasm_v128_load(w + 12);
175
176 vacc0x0123 = wasm_f32x4_add(vacc0x0123, wasm_f32x4_mul(wasm_v128_andnot(va0, wasm_f32x4_eq(vb0123c1, vzero)), vb0123c1));
177 vacc1x0123 = wasm_f32x4_add(vacc1x0123, wasm_f32x4_mul(wasm_v128_andnot(va1, wasm_f32x4_eq(vb0123c1, vzero)), vb0123c1));
178 vacc2x0123 = wasm_f32x4_add(vacc2x0123, wasm_f32x4_mul(wasm_v128_andnot(va2, wasm_f32x4_eq(vb0123c1, vzero)), vb0123c1));
179 vacc0x4567 = wasm_f32x4_add(vacc0x4567, wasm_f32x4_mul(wasm_v128_andnot(va0, wasm_f32x4_eq(vb4567c1, vzero)), vb4567c1));
180 vacc1x4567 = wasm_f32x4_add(vacc1x4567, wasm_f32x4_mul(wasm_v128_andnot(va1, wasm_f32x4_eq(vb4567c1, vzero)), vb4567c1));
181 vacc2x4567 = wasm_f32x4_add(vacc2x4567, wasm_f32x4_mul(wasm_v128_andnot(va2, wasm_f32x4_eq(vb4567c1, vzero)), vb4567c1));
182
183 va0 = wasm_v32x4_shuffle(va0, va0, 1, 2, 3, 0);
184 va1 = wasm_v32x4_shuffle(va1, va1, 1, 2, 3, 0);
185 va2 = wasm_v32x4_shuffle(va2, va2, 1, 2, 3, 0);
186
187 const v128_t vb0123c2 = wasm_v128_load(w + 16);
188 const v128_t vb4567c2 = wasm_v128_load(w + 20);
189
190 vacc0x0123 = wasm_f32x4_add(vacc0x0123, wasm_f32x4_mul(wasm_v128_andnot(va0, wasm_f32x4_eq(vb0123c2, vzero)), vb0123c2));
191 vacc1x0123 = wasm_f32x4_add(vacc1x0123, wasm_f32x4_mul(wasm_v128_andnot(va1, wasm_f32x4_eq(vb0123c2, vzero)), vb0123c2));
192 vacc2x0123 = wasm_f32x4_add(vacc2x0123, wasm_f32x4_mul(wasm_v128_andnot(va2, wasm_f32x4_eq(vb0123c2, vzero)), vb0123c2));
193 vacc0x4567 = wasm_f32x4_add(vacc0x4567, wasm_f32x4_mul(wasm_v128_andnot(va0, wasm_f32x4_eq(vb4567c2, vzero)), vb4567c2));
194 vacc1x4567 = wasm_f32x4_add(vacc1x4567, wasm_f32x4_mul(wasm_v128_andnot(va1, wasm_f32x4_eq(vb4567c2, vzero)), vb4567c2));
195 vacc2x4567 = wasm_f32x4_add(vacc2x4567, wasm_f32x4_mul(wasm_v128_andnot(va2, wasm_f32x4_eq(vb4567c2, vzero)), vb4567c2));
196
197 va0 = wasm_v32x4_shuffle(va0, va0, 1, 2, 3, 0);
198 va1 = wasm_v32x4_shuffle(va1, va1, 1, 2, 3, 0);
199 va2 = wasm_v32x4_shuffle(va2, va2, 1, 2, 3, 0);
200
201 const v128_t vb0123c3 = wasm_v128_load(w + 24);
202 const v128_t vb4567c3 = wasm_v128_load(w + 28);
203
204 vacc0x0123 = wasm_f32x4_add(vacc0x0123, wasm_f32x4_mul(wasm_v128_andnot(va0, wasm_f32x4_eq(vb0123c3, vzero)), vb0123c3));
205 vacc1x0123 = wasm_f32x4_add(vacc1x0123, wasm_f32x4_mul(wasm_v128_andnot(va1, wasm_f32x4_eq(vb0123c3, vzero)), vb0123c3));
206 vacc2x0123 = wasm_f32x4_add(vacc2x0123, wasm_f32x4_mul(wasm_v128_andnot(va2, wasm_f32x4_eq(vb0123c3, vzero)), vb0123c3));
207 vacc0x4567 = wasm_f32x4_add(vacc0x4567, wasm_f32x4_mul(wasm_v128_andnot(va0, wasm_f32x4_eq(vb4567c3, vzero)), vb4567c3));
208 vacc1x4567 = wasm_f32x4_add(vacc1x4567, wasm_f32x4_mul(wasm_v128_andnot(va1, wasm_f32x4_eq(vb4567c3, vzero)), vb4567c3));
209 vacc2x4567 = wasm_f32x4_add(vacc2x4567, wasm_f32x4_mul(wasm_v128_andnot(va2, wasm_f32x4_eq(vb4567c3, vzero)), vb4567c3));
210
211
212 w += 32;
213 }
214 p -= 3 * sizeof(void*);
215 } while (p != 0);
216
217 vacc0x0123 = wasm_f32x4_pmax(vmin, vacc0x0123);
218 vacc1x0123 = wasm_f32x4_pmax(vmin, vacc1x0123);
219 vacc2x0123 = wasm_f32x4_pmax(vmin, vacc2x0123);
220 vacc0x4567 = wasm_f32x4_pmax(vmin, vacc0x4567);
221 vacc1x4567 = wasm_f32x4_pmax(vmin, vacc1x4567);
222 vacc2x4567 = wasm_f32x4_pmax(vmin, vacc2x4567);
223
224 vacc0x0123 = wasm_f32x4_pmin(vmax, vacc0x0123);
225 vacc1x0123 = wasm_f32x4_pmin(vmax, vacc1x0123);
226 vacc2x0123 = wasm_f32x4_pmin(vmax, vacc2x0123);
227 vacc0x4567 = wasm_f32x4_pmin(vmax, vacc0x4567);
228 vacc1x4567 = wasm_f32x4_pmin(vmax, vacc1x4567);
229 vacc2x4567 = wasm_f32x4_pmin(vmax, vacc2x4567);
230
231 if XNN_LIKELY(nc >= 8) {
232 wasm_v128_store(c2, vacc2x0123);
233 wasm_v128_store(c2 + 4, vacc2x4567);
234 c2 = (float*) ((uintptr_t) c2 + cn_stride);
235 wasm_v128_store(c1, vacc1x0123);
236 wasm_v128_store(c1 + 4, vacc1x4567);
237 c1 = (float*) ((uintptr_t) c1 + cn_stride);
238 wasm_v128_store(c0, vacc0x0123);
239 wasm_v128_store(c0 + 4, vacc0x4567);
240 c0 = (float*) ((uintptr_t) c0 + cn_stride);
241
242 a = (const float**restrict) ((uintptr_t) a - ks);
243 nc -= 8;
244 } else {
245 if (nc & 4) {
246 wasm_v128_store(c2, vacc2x0123);
247 wasm_v128_store(c1, vacc1x0123);
248 wasm_v128_store(c0, vacc0x0123);
249
250 vacc2x0123 = vacc2x4567;
251 vacc1x0123 = vacc1x4567;
252 vacc0x0123 = vacc0x4567;
253
254 c2 += 4;
255 c1 += 4;
256 c0 += 4;
257 }
258 if (nc & 2) {
259 *((double*) c2) = wasm_f64x2_extract_lane(vacc2x0123, 0);
260 *((double*) c1) = wasm_f64x2_extract_lane(vacc1x0123, 0);
261 *((double*) c0) = wasm_f64x2_extract_lane(vacc0x0123, 0);
262
263 vacc2x0123 = wasm_v32x4_shuffle(vacc2x0123, vacc2x0123, 2, 3, 2, 3);
264 vacc1x0123 = wasm_v32x4_shuffle(vacc1x0123, vacc1x0123, 2, 3, 2, 3);
265 vacc0x0123 = wasm_v32x4_shuffle(vacc0x0123, vacc0x0123, 2, 3, 2, 3);
266
267 c2 += 2;
268 c1 += 2;
269 c0 += 2;
270 }
271 if (nc & 1) {
272 *c2 = wasm_f32x4_extract_lane(vacc2x0123, 0);
273 *c1 = wasm_f32x4_extract_lane(vacc1x0123, 0);
274 *c0 = wasm_f32x4_extract_lane(vacc0x0123, 0);
275 }
276
277 nc = 0;
278 }
279 } while (nc != 0);
280 }
281