1 // Auto-generated file. Do not edit!
2 // Template: src/f32-gemm/wasmsimd-s4.c.in
3 // Generator: tools/xngen
4 //
5 // Copyright 2020 Google LLC
6 //
7 // This source code is licensed under the BSD-style license found in the
8 // LICENSE file in the root directory of this source tree.
9
10 #include <assert.h>
11
12 #include <wasm_simd128.h>
13
14 #include <xnnpack/gemm.h>
15
16
xnn_f32_gemminc_minmax_ukernel_6x8s4__wasmrelaxedsimd(size_t mr,size_t nc,size_t kc,const float * restrict a,size_t a_stride,const float * restrict w,float * restrict c,size_t cm_stride,size_t cn_stride,const float * restrict acc,const union xnn_f32_minmax_params params[restrict XNN_MIN_ELEMENTS (1)])17 void xnn_f32_gemminc_minmax_ukernel_6x8s4__wasmrelaxedsimd(
18 size_t mr,
19 size_t nc,
20 size_t kc,
21 const float*restrict a,
22 size_t a_stride,
23 const float*restrict w,
24 float*restrict c,
25 size_t cm_stride,
26 size_t cn_stride,
27 const float*restrict acc,
28 const union xnn_f32_minmax_params params[restrict XNN_MIN_ELEMENTS(1)])
29 {
30 assert(mr != 0);
31 assert(mr <= 6);
32 assert(nc != 0);
33 assert(kc != 0);
34 assert(kc % sizeof(float) == 0);
35 assert(a != NULL);
36 assert(w != NULL);
37 assert(c != NULL);
38 assert(acc != NULL);
39
40 const float* a0 = a;
41 float* c0 = c;
42 const float* a1 = (const float*) ((uintptr_t) a0 + a_stride);
43 float* c1 = (float*) ((uintptr_t) c0 + cm_stride);
44 if XNN_UNPREDICTABLE(mr < 2) {
45 a1 = a0;
46 c1 = c0;
47 }
48 const float* a2 = (const float*) ((uintptr_t) a1 + a_stride);
49 float* c2 = (float*) ((uintptr_t) c1 + cm_stride);
50 if XNN_UNPREDICTABLE(mr <= 2) {
51 a2 = a1;
52 c2 = c1;
53 }
54 const float* a3 = (const float*) ((uintptr_t) a2 + a_stride);
55 float* c3 = (float*) ((uintptr_t) c2 + cm_stride);
56 if XNN_UNPREDICTABLE(mr < 4) {
57 a3 = a2;
58 c3 = c2;
59 }
60 const float* a4 = (const float*) ((uintptr_t) a3 + a_stride);
61 float* c4 = (float*) ((uintptr_t) c3 + cm_stride);
62 if XNN_UNPREDICTABLE(mr <= 4) {
63 a4 = a3;
64 c4 = c3;
65 }
66 const float* a5 = (const float*) ((uintptr_t) a4 + a_stride);
67 float* c5 = (float*) ((uintptr_t) c4 + cm_stride);
68 if XNN_UNPREDICTABLE(mr != 6) {
69 a5 = a4;
70 c5 = c4;
71 }
72
73 const v128_t vmin = wasm_v128_load64_splat(params->wasmsimd.min);
74 const v128_t vmax = wasm_v128_load64_splat(params->wasmsimd.max);
75 do {
76 v128_t vacc0x0123 = wasm_v128_load(acc + 0);
77 v128_t vacc0x4567 = wasm_v128_load(acc + 4);
78 v128_t vacc1x0123 = wasm_v128_load(acc + 8);
79 v128_t vacc1x4567 = wasm_v128_load(acc + 12);
80 v128_t vacc2x0123 = wasm_v128_load(acc + 16);
81 v128_t vacc2x4567 = wasm_v128_load(acc + 20);
82 v128_t vacc3x0123 = wasm_v128_load(acc + 24);
83 v128_t vacc3x4567 = wasm_v128_load(acc + 28);
84 v128_t vacc4x0123 = wasm_v128_load(acc + 32);
85 v128_t vacc4x4567 = wasm_v128_load(acc + 36);
86 v128_t vacc5x0123 = wasm_v128_load(acc + 40);
87 v128_t vacc5x4567 = wasm_v128_load(acc + 44);
88 acc += 48;
89
90 size_t k = kc;
91 while (k >= 4 * sizeof(float)) {
92 v128_t va0 = wasm_v128_load(a0);
93 a0 += 4;
94 v128_t va1 = wasm_v128_load(a1);
95 a1 += 4;
96 v128_t va2 = wasm_v128_load(a2);
97 a2 += 4;
98 v128_t va3 = wasm_v128_load(a3);
99 a3 += 4;
100 v128_t va4 = wasm_v128_load(a4);
101 a4 += 4;
102 v128_t va5 = wasm_v128_load(a5);
103 a5 += 4;
104
105
106 const v128_t vb0123c0 = wasm_v128_load(w + 0);
107 const v128_t vb4567c0 = wasm_v128_load(w + 4);
108
109 vacc0x0123 = wasm_f32x4_add(vacc0x0123, wasm_f32x4_mul(va0, vb0123c0));
110 vacc1x0123 = wasm_f32x4_add(vacc1x0123, wasm_f32x4_mul(va1, vb0123c0));
111 vacc2x0123 = wasm_f32x4_add(vacc2x0123, wasm_f32x4_mul(va2, vb0123c0));
112 vacc3x0123 = wasm_f32x4_add(vacc3x0123, wasm_f32x4_mul(va3, vb0123c0));
113 vacc4x0123 = wasm_f32x4_add(vacc4x0123, wasm_f32x4_mul(va4, vb0123c0));
114 vacc5x0123 = wasm_f32x4_add(vacc5x0123, wasm_f32x4_mul(va5, vb0123c0));
115 vacc0x4567 = wasm_f32x4_add(vacc0x4567, wasm_f32x4_mul(va0, vb4567c0));
116 vacc1x4567 = wasm_f32x4_add(vacc1x4567, wasm_f32x4_mul(va1, vb4567c0));
117 vacc2x4567 = wasm_f32x4_add(vacc2x4567, wasm_f32x4_mul(va2, vb4567c0));
118 vacc3x4567 = wasm_f32x4_add(vacc3x4567, wasm_f32x4_mul(va3, vb4567c0));
119 vacc4x4567 = wasm_f32x4_add(vacc4x4567, wasm_f32x4_mul(va4, vb4567c0));
120 vacc5x4567 = wasm_f32x4_add(vacc5x4567, wasm_f32x4_mul(va5, vb4567c0));
121
122 va0 = wasm_v32x4_shuffle(va0, va0, 1, 2, 3, 0);
123 va1 = wasm_v32x4_shuffle(va1, va1, 1, 2, 3, 0);
124 va2 = wasm_v32x4_shuffle(va2, va2, 1, 2, 3, 0);
125 va3 = wasm_v32x4_shuffle(va3, va3, 1, 2, 3, 0);
126 va4 = wasm_v32x4_shuffle(va4, va4, 1, 2, 3, 0);
127 va5 = wasm_v32x4_shuffle(va5, va5, 1, 2, 3, 0);
128
129 const v128_t vb0123c1 = wasm_v128_load(w + 8);
130 const v128_t vb4567c1 = wasm_v128_load(w + 12);
131
132 vacc0x0123 = wasm_f32x4_add(vacc0x0123, wasm_f32x4_mul(va0, vb0123c1));
133 vacc1x0123 = wasm_f32x4_add(vacc1x0123, wasm_f32x4_mul(va1, vb0123c1));
134 vacc2x0123 = wasm_f32x4_add(vacc2x0123, wasm_f32x4_mul(va2, vb0123c1));
135 vacc3x0123 = wasm_f32x4_add(vacc3x0123, wasm_f32x4_mul(va3, vb0123c1));
136 vacc4x0123 = wasm_f32x4_add(vacc4x0123, wasm_f32x4_mul(va4, vb0123c1));
137 vacc5x0123 = wasm_f32x4_add(vacc5x0123, wasm_f32x4_mul(va5, vb0123c1));
138 vacc0x4567 = wasm_f32x4_add(vacc0x4567, wasm_f32x4_mul(va0, vb4567c1));
139 vacc1x4567 = wasm_f32x4_add(vacc1x4567, wasm_f32x4_mul(va1, vb4567c1));
140 vacc2x4567 = wasm_f32x4_add(vacc2x4567, wasm_f32x4_mul(va2, vb4567c1));
141 vacc3x4567 = wasm_f32x4_add(vacc3x4567, wasm_f32x4_mul(va3, vb4567c1));
142 vacc4x4567 = wasm_f32x4_add(vacc4x4567, wasm_f32x4_mul(va4, vb4567c1));
143 vacc5x4567 = wasm_f32x4_add(vacc5x4567, wasm_f32x4_mul(va5, vb4567c1));
144
145 va0 = wasm_v32x4_shuffle(va0, va0, 1, 2, 3, 0);
146 va1 = wasm_v32x4_shuffle(va1, va1, 1, 2, 3, 0);
147 va2 = wasm_v32x4_shuffle(va2, va2, 1, 2, 3, 0);
148 va3 = wasm_v32x4_shuffle(va3, va3, 1, 2, 3, 0);
149 va4 = wasm_v32x4_shuffle(va4, va4, 1, 2, 3, 0);
150 va5 = wasm_v32x4_shuffle(va5, va5, 1, 2, 3, 0);
151
152 const v128_t vb0123c2 = wasm_v128_load(w + 16);
153 const v128_t vb4567c2 = wasm_v128_load(w + 20);
154
155 vacc0x0123 = wasm_f32x4_add(vacc0x0123, wasm_f32x4_mul(va0, vb0123c2));
156 vacc1x0123 = wasm_f32x4_add(vacc1x0123, wasm_f32x4_mul(va1, vb0123c2));
157 vacc2x0123 = wasm_f32x4_add(vacc2x0123, wasm_f32x4_mul(va2, vb0123c2));
158 vacc3x0123 = wasm_f32x4_add(vacc3x0123, wasm_f32x4_mul(va3, vb0123c2));
159 vacc4x0123 = wasm_f32x4_add(vacc4x0123, wasm_f32x4_mul(va4, vb0123c2));
160 vacc5x0123 = wasm_f32x4_add(vacc5x0123, wasm_f32x4_mul(va5, vb0123c2));
161 vacc0x4567 = wasm_f32x4_add(vacc0x4567, wasm_f32x4_mul(va0, vb4567c2));
162 vacc1x4567 = wasm_f32x4_add(vacc1x4567, wasm_f32x4_mul(va1, vb4567c2));
163 vacc2x4567 = wasm_f32x4_add(vacc2x4567, wasm_f32x4_mul(va2, vb4567c2));
164 vacc3x4567 = wasm_f32x4_add(vacc3x4567, wasm_f32x4_mul(va3, vb4567c2));
165 vacc4x4567 = wasm_f32x4_add(vacc4x4567, wasm_f32x4_mul(va4, vb4567c2));
166 vacc5x4567 = wasm_f32x4_add(vacc5x4567, wasm_f32x4_mul(va5, vb4567c2));
167
168 va0 = wasm_v32x4_shuffle(va0, va0, 1, 2, 3, 0);
169 va1 = wasm_v32x4_shuffle(va1, va1, 1, 2, 3, 0);
170 va2 = wasm_v32x4_shuffle(va2, va2, 1, 2, 3, 0);
171 va3 = wasm_v32x4_shuffle(va3, va3, 1, 2, 3, 0);
172 va4 = wasm_v32x4_shuffle(va4, va4, 1, 2, 3, 0);
173 va5 = wasm_v32x4_shuffle(va5, va5, 1, 2, 3, 0);
174
175 const v128_t vb0123c3 = wasm_v128_load(w + 24);
176 const v128_t vb4567c3 = wasm_v128_load(w + 28);
177
178 vacc0x0123 = wasm_f32x4_add(vacc0x0123, wasm_f32x4_mul(va0, vb0123c3));
179 vacc1x0123 = wasm_f32x4_add(vacc1x0123, wasm_f32x4_mul(va1, vb0123c3));
180 vacc2x0123 = wasm_f32x4_add(vacc2x0123, wasm_f32x4_mul(va2, vb0123c3));
181 vacc3x0123 = wasm_f32x4_add(vacc3x0123, wasm_f32x4_mul(va3, vb0123c3));
182 vacc4x0123 = wasm_f32x4_add(vacc4x0123, wasm_f32x4_mul(va4, vb0123c3));
183 vacc5x0123 = wasm_f32x4_add(vacc5x0123, wasm_f32x4_mul(va5, vb0123c3));
184 vacc0x4567 = wasm_f32x4_add(vacc0x4567, wasm_f32x4_mul(va0, vb4567c3));
185 vacc1x4567 = wasm_f32x4_add(vacc1x4567, wasm_f32x4_mul(va1, vb4567c3));
186 vacc2x4567 = wasm_f32x4_add(vacc2x4567, wasm_f32x4_mul(va2, vb4567c3));
187 vacc3x4567 = wasm_f32x4_add(vacc3x4567, wasm_f32x4_mul(va3, vb4567c3));
188 vacc4x4567 = wasm_f32x4_add(vacc4x4567, wasm_f32x4_mul(va4, vb4567c3));
189 vacc5x4567 = wasm_f32x4_add(vacc5x4567, wasm_f32x4_mul(va5, vb4567c3));
190
191
192 w += 32;
193 k -= 4 * sizeof(float);
194 }
195 if XNN_UNLIKELY(k != 0) {
196 v128_t va0 = wasm_v128_load(a0);
197 a0 = (const float*) ((uintptr_t) a0 + k);
198 v128_t va1 = wasm_v128_load(a1);
199 a1 = (const float*) ((uintptr_t) a1 + k);
200 v128_t va2 = wasm_v128_load(a2);
201 a2 = (const float*) ((uintptr_t) a2 + k);
202 v128_t va3 = wasm_v128_load(a3);
203 a3 = (const float*) ((uintptr_t) a3 + k);
204 v128_t va4 = wasm_v128_load(a4);
205 a4 = (const float*) ((uintptr_t) a4 + k);
206 v128_t va5 = wasm_v128_load(a5);
207 a5 = (const float*) ((uintptr_t) a5 + k);
208
209 const v128_t vzero = wasm_f32x4_const_splat(0.0f);
210
211 const v128_t vb0123c0 = wasm_v128_load(w + 0);
212 const v128_t vb4567c0 = wasm_v128_load(w + 4);
213
214 vacc0x0123 = wasm_f32x4_add(vacc0x0123, wasm_f32x4_mul(wasm_v128_andnot(va0, wasm_f32x4_eq(vb0123c0, vzero)), vb0123c0));
215 vacc1x0123 = wasm_f32x4_add(vacc1x0123, wasm_f32x4_mul(wasm_v128_andnot(va1, wasm_f32x4_eq(vb0123c0, vzero)), vb0123c0));
216 vacc2x0123 = wasm_f32x4_add(vacc2x0123, wasm_f32x4_mul(wasm_v128_andnot(va2, wasm_f32x4_eq(vb0123c0, vzero)), vb0123c0));
217 vacc3x0123 = wasm_f32x4_add(vacc3x0123, wasm_f32x4_mul(wasm_v128_andnot(va3, wasm_f32x4_eq(vb0123c0, vzero)), vb0123c0));
218 vacc4x0123 = wasm_f32x4_add(vacc4x0123, wasm_f32x4_mul(wasm_v128_andnot(va4, wasm_f32x4_eq(vb0123c0, vzero)), vb0123c0));
219 vacc5x0123 = wasm_f32x4_add(vacc5x0123, wasm_f32x4_mul(wasm_v128_andnot(va5, wasm_f32x4_eq(vb0123c0, vzero)), vb0123c0));
220 vacc0x4567 = wasm_f32x4_add(vacc0x4567, wasm_f32x4_mul(wasm_v128_andnot(va0, wasm_f32x4_eq(vb4567c0, vzero)), vb4567c0));
221 vacc1x4567 = wasm_f32x4_add(vacc1x4567, wasm_f32x4_mul(wasm_v128_andnot(va1, wasm_f32x4_eq(vb4567c0, vzero)), vb4567c0));
222 vacc2x4567 = wasm_f32x4_add(vacc2x4567, wasm_f32x4_mul(wasm_v128_andnot(va2, wasm_f32x4_eq(vb4567c0, vzero)), vb4567c0));
223 vacc3x4567 = wasm_f32x4_add(vacc3x4567, wasm_f32x4_mul(wasm_v128_andnot(va3, wasm_f32x4_eq(vb4567c0, vzero)), vb4567c0));
224 vacc4x4567 = wasm_f32x4_add(vacc4x4567, wasm_f32x4_mul(wasm_v128_andnot(va4, wasm_f32x4_eq(vb4567c0, vzero)), vb4567c0));
225 vacc5x4567 = wasm_f32x4_add(vacc5x4567, wasm_f32x4_mul(wasm_v128_andnot(va5, wasm_f32x4_eq(vb4567c0, vzero)), vb4567c0));
226
227 va0 = wasm_v32x4_shuffle(va0, va0, 1, 2, 3, 0);
228 va1 = wasm_v32x4_shuffle(va1, va1, 1, 2, 3, 0);
229 va2 = wasm_v32x4_shuffle(va2, va2, 1, 2, 3, 0);
230 va3 = wasm_v32x4_shuffle(va3, va3, 1, 2, 3, 0);
231 va4 = wasm_v32x4_shuffle(va4, va4, 1, 2, 3, 0);
232 va5 = wasm_v32x4_shuffle(va5, va5, 1, 2, 3, 0);
233
234 const v128_t vb0123c1 = wasm_v128_load(w + 8);
235 const v128_t vb4567c1 = wasm_v128_load(w + 12);
236
237 vacc0x0123 = wasm_f32x4_add(vacc0x0123, wasm_f32x4_mul(wasm_v128_andnot(va0, wasm_f32x4_eq(vb0123c1, vzero)), vb0123c1));
238 vacc1x0123 = wasm_f32x4_add(vacc1x0123, wasm_f32x4_mul(wasm_v128_andnot(va1, wasm_f32x4_eq(vb0123c1, vzero)), vb0123c1));
239 vacc2x0123 = wasm_f32x4_add(vacc2x0123, wasm_f32x4_mul(wasm_v128_andnot(va2, wasm_f32x4_eq(vb0123c1, vzero)), vb0123c1));
240 vacc3x0123 = wasm_f32x4_add(vacc3x0123, wasm_f32x4_mul(wasm_v128_andnot(va3, wasm_f32x4_eq(vb0123c1, vzero)), vb0123c1));
241 vacc4x0123 = wasm_f32x4_add(vacc4x0123, wasm_f32x4_mul(wasm_v128_andnot(va4, wasm_f32x4_eq(vb0123c1, vzero)), vb0123c1));
242 vacc5x0123 = wasm_f32x4_add(vacc5x0123, wasm_f32x4_mul(wasm_v128_andnot(va5, wasm_f32x4_eq(vb0123c1, vzero)), vb0123c1));
243 vacc0x4567 = wasm_f32x4_add(vacc0x4567, wasm_f32x4_mul(wasm_v128_andnot(va0, wasm_f32x4_eq(vb4567c1, vzero)), vb4567c1));
244 vacc1x4567 = wasm_f32x4_add(vacc1x4567, wasm_f32x4_mul(wasm_v128_andnot(va1, wasm_f32x4_eq(vb4567c1, vzero)), vb4567c1));
245 vacc2x4567 = wasm_f32x4_add(vacc2x4567, wasm_f32x4_mul(wasm_v128_andnot(va2, wasm_f32x4_eq(vb4567c1, vzero)), vb4567c1));
246 vacc3x4567 = wasm_f32x4_add(vacc3x4567, wasm_f32x4_mul(wasm_v128_andnot(va3, wasm_f32x4_eq(vb4567c1, vzero)), vb4567c1));
247 vacc4x4567 = wasm_f32x4_add(vacc4x4567, wasm_f32x4_mul(wasm_v128_andnot(va4, wasm_f32x4_eq(vb4567c1, vzero)), vb4567c1));
248 vacc5x4567 = wasm_f32x4_add(vacc5x4567, wasm_f32x4_mul(wasm_v128_andnot(va5, wasm_f32x4_eq(vb4567c1, vzero)), vb4567c1));
249
250 va0 = wasm_v32x4_shuffle(va0, va0, 1, 2, 3, 0);
251 va1 = wasm_v32x4_shuffle(va1, va1, 1, 2, 3, 0);
252 va2 = wasm_v32x4_shuffle(va2, va2, 1, 2, 3, 0);
253 va3 = wasm_v32x4_shuffle(va3, va3, 1, 2, 3, 0);
254 va4 = wasm_v32x4_shuffle(va4, va4, 1, 2, 3, 0);
255 va5 = wasm_v32x4_shuffle(va5, va5, 1, 2, 3, 0);
256
257 const v128_t vb0123c2 = wasm_v128_load(w + 16);
258 const v128_t vb4567c2 = wasm_v128_load(w + 20);
259
260 vacc0x0123 = wasm_f32x4_add(vacc0x0123, wasm_f32x4_mul(wasm_v128_andnot(va0, wasm_f32x4_eq(vb0123c2, vzero)), vb0123c2));
261 vacc1x0123 = wasm_f32x4_add(vacc1x0123, wasm_f32x4_mul(wasm_v128_andnot(va1, wasm_f32x4_eq(vb0123c2, vzero)), vb0123c2));
262 vacc2x0123 = wasm_f32x4_add(vacc2x0123, wasm_f32x4_mul(wasm_v128_andnot(va2, wasm_f32x4_eq(vb0123c2, vzero)), vb0123c2));
263 vacc3x0123 = wasm_f32x4_add(vacc3x0123, wasm_f32x4_mul(wasm_v128_andnot(va3, wasm_f32x4_eq(vb0123c2, vzero)), vb0123c2));
264 vacc4x0123 = wasm_f32x4_add(vacc4x0123, wasm_f32x4_mul(wasm_v128_andnot(va4, wasm_f32x4_eq(vb0123c2, vzero)), vb0123c2));
265 vacc5x0123 = wasm_f32x4_add(vacc5x0123, wasm_f32x4_mul(wasm_v128_andnot(va5, wasm_f32x4_eq(vb0123c2, vzero)), vb0123c2));
266 vacc0x4567 = wasm_f32x4_add(vacc0x4567, wasm_f32x4_mul(wasm_v128_andnot(va0, wasm_f32x4_eq(vb4567c2, vzero)), vb4567c2));
267 vacc1x4567 = wasm_f32x4_add(vacc1x4567, wasm_f32x4_mul(wasm_v128_andnot(va1, wasm_f32x4_eq(vb4567c2, vzero)), vb4567c2));
268 vacc2x4567 = wasm_f32x4_add(vacc2x4567, wasm_f32x4_mul(wasm_v128_andnot(va2, wasm_f32x4_eq(vb4567c2, vzero)), vb4567c2));
269 vacc3x4567 = wasm_f32x4_add(vacc3x4567, wasm_f32x4_mul(wasm_v128_andnot(va3, wasm_f32x4_eq(vb4567c2, vzero)), vb4567c2));
270 vacc4x4567 = wasm_f32x4_add(vacc4x4567, wasm_f32x4_mul(wasm_v128_andnot(va4, wasm_f32x4_eq(vb4567c2, vzero)), vb4567c2));
271 vacc5x4567 = wasm_f32x4_add(vacc5x4567, wasm_f32x4_mul(wasm_v128_andnot(va5, wasm_f32x4_eq(vb4567c2, vzero)), vb4567c2));
272
273 va0 = wasm_v32x4_shuffle(va0, va0, 1, 2, 3, 0);
274 va1 = wasm_v32x4_shuffle(va1, va1, 1, 2, 3, 0);
275 va2 = wasm_v32x4_shuffle(va2, va2, 1, 2, 3, 0);
276 va3 = wasm_v32x4_shuffle(va3, va3, 1, 2, 3, 0);
277 va4 = wasm_v32x4_shuffle(va4, va4, 1, 2, 3, 0);
278 va5 = wasm_v32x4_shuffle(va5, va5, 1, 2, 3, 0);
279
280 const v128_t vb0123c3 = wasm_v128_load(w + 24);
281 const v128_t vb4567c3 = wasm_v128_load(w + 28);
282
283 vacc0x0123 = wasm_f32x4_add(vacc0x0123, wasm_f32x4_mul(wasm_v128_andnot(va0, wasm_f32x4_eq(vb0123c3, vzero)), vb0123c3));
284 vacc1x0123 = wasm_f32x4_add(vacc1x0123, wasm_f32x4_mul(wasm_v128_andnot(va1, wasm_f32x4_eq(vb0123c3, vzero)), vb0123c3));
285 vacc2x0123 = wasm_f32x4_add(vacc2x0123, wasm_f32x4_mul(wasm_v128_andnot(va2, wasm_f32x4_eq(vb0123c3, vzero)), vb0123c3));
286 vacc3x0123 = wasm_f32x4_add(vacc3x0123, wasm_f32x4_mul(wasm_v128_andnot(va3, wasm_f32x4_eq(vb0123c3, vzero)), vb0123c3));
287 vacc4x0123 = wasm_f32x4_add(vacc4x0123, wasm_f32x4_mul(wasm_v128_andnot(va4, wasm_f32x4_eq(vb0123c3, vzero)), vb0123c3));
288 vacc5x0123 = wasm_f32x4_add(vacc5x0123, wasm_f32x4_mul(wasm_v128_andnot(va5, wasm_f32x4_eq(vb0123c3, vzero)), vb0123c3));
289 vacc0x4567 = wasm_f32x4_add(vacc0x4567, wasm_f32x4_mul(wasm_v128_andnot(va0, wasm_f32x4_eq(vb4567c3, vzero)), vb4567c3));
290 vacc1x4567 = wasm_f32x4_add(vacc1x4567, wasm_f32x4_mul(wasm_v128_andnot(va1, wasm_f32x4_eq(vb4567c3, vzero)), vb4567c3));
291 vacc2x4567 = wasm_f32x4_add(vacc2x4567, wasm_f32x4_mul(wasm_v128_andnot(va2, wasm_f32x4_eq(vb4567c3, vzero)), vb4567c3));
292 vacc3x4567 = wasm_f32x4_add(vacc3x4567, wasm_f32x4_mul(wasm_v128_andnot(va3, wasm_f32x4_eq(vb4567c3, vzero)), vb4567c3));
293 vacc4x4567 = wasm_f32x4_add(vacc4x4567, wasm_f32x4_mul(wasm_v128_andnot(va4, wasm_f32x4_eq(vb4567c3, vzero)), vb4567c3));
294 vacc5x4567 = wasm_f32x4_add(vacc5x4567, wasm_f32x4_mul(wasm_v128_andnot(va5, wasm_f32x4_eq(vb4567c3, vzero)), vb4567c3));
295
296
297 w += 32;
298 }
299
300 vacc0x0123 = __builtin_wasm_relaxed_max_f32x4(vmin, vacc0x0123);
301 vacc1x0123 = __builtin_wasm_relaxed_max_f32x4(vmin, vacc1x0123);
302 vacc2x0123 = __builtin_wasm_relaxed_max_f32x4(vmin, vacc2x0123);
303 vacc3x0123 = __builtin_wasm_relaxed_max_f32x4(vmin, vacc3x0123);
304 vacc4x0123 = __builtin_wasm_relaxed_max_f32x4(vmin, vacc4x0123);
305 vacc5x0123 = __builtin_wasm_relaxed_max_f32x4(vmin, vacc5x0123);
306 vacc0x4567 = __builtin_wasm_relaxed_max_f32x4(vmin, vacc0x4567);
307 vacc1x4567 = __builtin_wasm_relaxed_max_f32x4(vmin, vacc1x4567);
308 vacc2x4567 = __builtin_wasm_relaxed_max_f32x4(vmin, vacc2x4567);
309 vacc3x4567 = __builtin_wasm_relaxed_max_f32x4(vmin, vacc3x4567);
310 vacc4x4567 = __builtin_wasm_relaxed_max_f32x4(vmin, vacc4x4567);
311 vacc5x4567 = __builtin_wasm_relaxed_max_f32x4(vmin, vacc5x4567);
312
313 vacc0x0123 = __builtin_wasm_relaxed_min_f32x4(vmax, vacc0x0123);
314 vacc1x0123 = __builtin_wasm_relaxed_min_f32x4(vmax, vacc1x0123);
315 vacc2x0123 = __builtin_wasm_relaxed_min_f32x4(vmax, vacc2x0123);
316 vacc3x0123 = __builtin_wasm_relaxed_min_f32x4(vmax, vacc3x0123);
317 vacc4x0123 = __builtin_wasm_relaxed_min_f32x4(vmax, vacc4x0123);
318 vacc5x0123 = __builtin_wasm_relaxed_min_f32x4(vmax, vacc5x0123);
319 vacc0x4567 = __builtin_wasm_relaxed_min_f32x4(vmax, vacc0x4567);
320 vacc1x4567 = __builtin_wasm_relaxed_min_f32x4(vmax, vacc1x4567);
321 vacc2x4567 = __builtin_wasm_relaxed_min_f32x4(vmax, vacc2x4567);
322 vacc3x4567 = __builtin_wasm_relaxed_min_f32x4(vmax, vacc3x4567);
323 vacc4x4567 = __builtin_wasm_relaxed_min_f32x4(vmax, vacc4x4567);
324 vacc5x4567 = __builtin_wasm_relaxed_min_f32x4(vmax, vacc5x4567);
325
326 if XNN_LIKELY(nc >= 8) {
327 wasm_v128_store(c5, vacc5x0123);
328 wasm_v128_store(c5 + 4, vacc5x4567);
329 c5 = (float*) ((uintptr_t) c5 + cn_stride);
330 wasm_v128_store(c4, vacc4x0123);
331 wasm_v128_store(c4 + 4, vacc4x4567);
332 c4 = (float*) ((uintptr_t) c4 + cn_stride);
333 wasm_v128_store(c3, vacc3x0123);
334 wasm_v128_store(c3 + 4, vacc3x4567);
335 c3 = (float*) ((uintptr_t) c3 + cn_stride);
336 wasm_v128_store(c2, vacc2x0123);
337 wasm_v128_store(c2 + 4, vacc2x4567);
338 c2 = (float*) ((uintptr_t) c2 + cn_stride);
339 wasm_v128_store(c1, vacc1x0123);
340 wasm_v128_store(c1 + 4, vacc1x4567);
341 c1 = (float*) ((uintptr_t) c1 + cn_stride);
342 wasm_v128_store(c0, vacc0x0123);
343 wasm_v128_store(c0 + 4, vacc0x4567);
344 c0 = (float*) ((uintptr_t) c0 + cn_stride);
345
346 a5 = (const float*) ((uintptr_t) a5 - kc);
347 a4 = (const float*) ((uintptr_t) a4 - kc);
348 a3 = (const float*) ((uintptr_t) a3 - kc);
349 a2 = (const float*) ((uintptr_t) a2 - kc);
350 a1 = (const float*) ((uintptr_t) a1 - kc);
351 a0 = (const float*) ((uintptr_t) a0 - kc);
352
353 nc -= 8;
354 } else {
355 if (nc & 4) {
356 wasm_v128_store(c5, vacc5x0123);
357 wasm_v128_store(c4, vacc4x0123);
358 wasm_v128_store(c3, vacc3x0123);
359 wasm_v128_store(c2, vacc2x0123);
360 wasm_v128_store(c1, vacc1x0123);
361 wasm_v128_store(c0, vacc0x0123);
362
363 vacc5x0123 = vacc5x4567;
364 vacc4x0123 = vacc4x4567;
365 vacc3x0123 = vacc3x4567;
366 vacc2x0123 = vacc2x4567;
367 vacc1x0123 = vacc1x4567;
368 vacc0x0123 = vacc0x4567;
369
370 c5 += 4;
371 c4 += 4;
372 c3 += 4;
373 c2 += 4;
374 c1 += 4;
375 c0 += 4;
376 }
377 if (nc & 2) {
378 *((double*) c5) = wasm_f64x2_extract_lane(vacc5x0123, 0);
379 *((double*) c4) = wasm_f64x2_extract_lane(vacc4x0123, 0);
380 *((double*) c3) = wasm_f64x2_extract_lane(vacc3x0123, 0);
381 *((double*) c2) = wasm_f64x2_extract_lane(vacc2x0123, 0);
382 *((double*) c1) = wasm_f64x2_extract_lane(vacc1x0123, 0);
383 *((double*) c0) = wasm_f64x2_extract_lane(vacc0x0123, 0);
384
385 vacc5x0123 = wasm_v32x4_shuffle(vacc5x0123, vacc5x0123, 2, 3, 2, 3);
386 vacc4x0123 = wasm_v32x4_shuffle(vacc4x0123, vacc4x0123, 2, 3, 2, 3);
387 vacc3x0123 = wasm_v32x4_shuffle(vacc3x0123, vacc3x0123, 2, 3, 2, 3);
388 vacc2x0123 = wasm_v32x4_shuffle(vacc2x0123, vacc2x0123, 2, 3, 2, 3);
389 vacc1x0123 = wasm_v32x4_shuffle(vacc1x0123, vacc1x0123, 2, 3, 2, 3);
390 vacc0x0123 = wasm_v32x4_shuffle(vacc0x0123, vacc0x0123, 2, 3, 2, 3);
391
392 c5 += 2;
393 c4 += 2;
394 c3 += 2;
395 c2 += 2;
396 c1 += 2;
397 c0 += 2;
398 }
399 if (nc & 1) {
400 *c5 = wasm_f32x4_extract_lane(vacc5x0123, 0);
401 *c4 = wasm_f32x4_extract_lane(vacc4x0123, 0);
402 *c3 = wasm_f32x4_extract_lane(vacc3x0123, 0);
403 *c2 = wasm_f32x4_extract_lane(vacc2x0123, 0);
404 *c1 = wasm_f32x4_extract_lane(vacc1x0123, 0);
405 *c0 = wasm_f32x4_extract_lane(vacc0x0123, 0);
406 }
407
408 nc = 0;
409 }
410 } while (nc != 0);
411 }
412