xref: /aosp_15_r20/external/XNNPACK/src/f32-gemm/gen/6x8s4-relu-wasmsimd.c (revision 4bdc94577ba0e567308109d787f7fec7b531ce36)
1 // Auto-generated file. Do not edit!
2 //   Template: src/f32-gemm/wasmsimd-s4.c.in
3 //   Generator: tools/xngen
4 //
5 // Copyright 2020 Google LLC
6 //
7 // This source code is licensed under the BSD-style license found in the
8 // LICENSE file in the root directory of this source tree.
9 
10 #include <assert.h>
11 
12 #include <wasm_simd128.h>
13 
14 #include <xnnpack/gemm.h>
15 
16 
xnn_f32_gemm_relu_ukernel_6x8s4__wasmsimd(size_t mr,size_t nc,size_t kc,const float * restrict a,size_t a_stride,const float * restrict w,float * restrict c,size_t cm_stride,size_t cn_stride,const union xnn_f32_relu_params params[restrict XNN_MIN_ELEMENTS (1)])17 void xnn_f32_gemm_relu_ukernel_6x8s4__wasmsimd(
18     size_t mr,
19     size_t nc,
20     size_t kc,
21     const float*restrict a,
22     size_t a_stride,
23     const float*restrict w,
24     float*restrict c,
25     size_t cm_stride,
26     size_t cn_stride,
27     const union xnn_f32_relu_params params[restrict XNN_MIN_ELEMENTS(1)])
28 {
29   assert(mr != 0);
30   assert(mr <= 6);
31   assert(nc != 0);
32   assert(kc != 0);
33   assert(kc % sizeof(float) == 0);
34   assert(a != NULL);
35   assert(w != NULL);
36   assert(c != NULL);
37 
38   const float* a0 = a;
39   float* c0 = c;
40   const float* a1 = (const float*) ((uintptr_t) a0 + a_stride);
41   float* c1 = (float*) ((uintptr_t) c0 + cm_stride);
42   if XNN_UNPREDICTABLE(mr < 2) {
43     a1 = a0;
44     c1 = c0;
45   }
46   const float* a2 = (const float*) ((uintptr_t) a1 + a_stride);
47   float* c2 = (float*) ((uintptr_t) c1 + cm_stride);
48   if XNN_UNPREDICTABLE(mr <= 2) {
49     a2 = a1;
50     c2 = c1;
51   }
52   const float* a3 = (const float*) ((uintptr_t) a2 + a_stride);
53   float* c3 = (float*) ((uintptr_t) c2 + cm_stride);
54   if XNN_UNPREDICTABLE(mr < 4) {
55     a3 = a2;
56     c3 = c2;
57   }
58   const float* a4 = (const float*) ((uintptr_t) a3 + a_stride);
59   float* c4 = (float*) ((uintptr_t) c3 + cm_stride);
60   if XNN_UNPREDICTABLE(mr <= 4) {
61     a4 = a3;
62     c4 = c3;
63   }
64   const float* a5 = (const float*) ((uintptr_t) a4 + a_stride);
65   float* c5 = (float*) ((uintptr_t) c4 + cm_stride);
66   if XNN_UNPREDICTABLE(mr != 6) {
67     a5 = a4;
68     c5 = c4;
69   }
70 
71   do {
72     v128_t vacc0x0123 = wasm_v128_load(w + 0);
73     v128_t vacc0x4567 = wasm_v128_load(w + 4);
74     v128_t vacc1x0123 = vacc0x0123;
75     v128_t vacc1x4567 = vacc0x4567;
76     v128_t vacc2x0123 = vacc0x0123;
77     v128_t vacc2x4567 = vacc0x4567;
78     v128_t vacc3x0123 = vacc0x0123;
79     v128_t vacc3x4567 = vacc0x4567;
80     v128_t vacc4x0123 = vacc0x0123;
81     v128_t vacc4x4567 = vacc0x4567;
82     v128_t vacc5x0123 = vacc0x0123;
83     v128_t vacc5x4567 = vacc0x4567;
84     w += 8;
85 
86     size_t k = kc;
87     while (k >= 4 * sizeof(float)) {
88       v128_t va0 = wasm_v128_load(a0);
89       a0 += 4;
90       v128_t va1 = wasm_v128_load(a1);
91       a1 += 4;
92       v128_t va2 = wasm_v128_load(a2);
93       a2 += 4;
94       v128_t va3 = wasm_v128_load(a3);
95       a3 += 4;
96       v128_t va4 = wasm_v128_load(a4);
97       a4 += 4;
98       v128_t va5 = wasm_v128_load(a5);
99       a5 += 4;
100 
101 
102       const v128_t vb0123c0 = wasm_v128_load(w + 0);
103       const v128_t vb4567c0 = wasm_v128_load(w + 4);
104 
105       vacc0x0123 = wasm_f32x4_add(vacc0x0123, wasm_f32x4_mul(va0, vb0123c0));
106       vacc1x0123 = wasm_f32x4_add(vacc1x0123, wasm_f32x4_mul(va1, vb0123c0));
107       vacc2x0123 = wasm_f32x4_add(vacc2x0123, wasm_f32x4_mul(va2, vb0123c0));
108       vacc3x0123 = wasm_f32x4_add(vacc3x0123, wasm_f32x4_mul(va3, vb0123c0));
109       vacc4x0123 = wasm_f32x4_add(vacc4x0123, wasm_f32x4_mul(va4, vb0123c0));
110       vacc5x0123 = wasm_f32x4_add(vacc5x0123, wasm_f32x4_mul(va5, vb0123c0));
111       vacc0x4567 = wasm_f32x4_add(vacc0x4567, wasm_f32x4_mul(va0, vb4567c0));
112       vacc1x4567 = wasm_f32x4_add(vacc1x4567, wasm_f32x4_mul(va1, vb4567c0));
113       vacc2x4567 = wasm_f32x4_add(vacc2x4567, wasm_f32x4_mul(va2, vb4567c0));
114       vacc3x4567 = wasm_f32x4_add(vacc3x4567, wasm_f32x4_mul(va3, vb4567c0));
115       vacc4x4567 = wasm_f32x4_add(vacc4x4567, wasm_f32x4_mul(va4, vb4567c0));
116       vacc5x4567 = wasm_f32x4_add(vacc5x4567, wasm_f32x4_mul(va5, vb4567c0));
117 
118       va0 = wasm_v32x4_shuffle(va0, va0, 1, 2, 3, 0);
119       va1 = wasm_v32x4_shuffle(va1, va1, 1, 2, 3, 0);
120       va2 = wasm_v32x4_shuffle(va2, va2, 1, 2, 3, 0);
121       va3 = wasm_v32x4_shuffle(va3, va3, 1, 2, 3, 0);
122       va4 = wasm_v32x4_shuffle(va4, va4, 1, 2, 3, 0);
123       va5 = wasm_v32x4_shuffle(va5, va5, 1, 2, 3, 0);
124 
125       const v128_t vb0123c1 = wasm_v128_load(w + 8);
126       const v128_t vb4567c1 = wasm_v128_load(w + 12);
127 
128       vacc0x0123 = wasm_f32x4_add(vacc0x0123, wasm_f32x4_mul(va0, vb0123c1));
129       vacc1x0123 = wasm_f32x4_add(vacc1x0123, wasm_f32x4_mul(va1, vb0123c1));
130       vacc2x0123 = wasm_f32x4_add(vacc2x0123, wasm_f32x4_mul(va2, vb0123c1));
131       vacc3x0123 = wasm_f32x4_add(vacc3x0123, wasm_f32x4_mul(va3, vb0123c1));
132       vacc4x0123 = wasm_f32x4_add(vacc4x0123, wasm_f32x4_mul(va4, vb0123c1));
133       vacc5x0123 = wasm_f32x4_add(vacc5x0123, wasm_f32x4_mul(va5, vb0123c1));
134       vacc0x4567 = wasm_f32x4_add(vacc0x4567, wasm_f32x4_mul(va0, vb4567c1));
135       vacc1x4567 = wasm_f32x4_add(vacc1x4567, wasm_f32x4_mul(va1, vb4567c1));
136       vacc2x4567 = wasm_f32x4_add(vacc2x4567, wasm_f32x4_mul(va2, vb4567c1));
137       vacc3x4567 = wasm_f32x4_add(vacc3x4567, wasm_f32x4_mul(va3, vb4567c1));
138       vacc4x4567 = wasm_f32x4_add(vacc4x4567, wasm_f32x4_mul(va4, vb4567c1));
139       vacc5x4567 = wasm_f32x4_add(vacc5x4567, wasm_f32x4_mul(va5, vb4567c1));
140 
141       va0 = wasm_v32x4_shuffle(va0, va0, 1, 2, 3, 0);
142       va1 = wasm_v32x4_shuffle(va1, va1, 1, 2, 3, 0);
143       va2 = wasm_v32x4_shuffle(va2, va2, 1, 2, 3, 0);
144       va3 = wasm_v32x4_shuffle(va3, va3, 1, 2, 3, 0);
145       va4 = wasm_v32x4_shuffle(va4, va4, 1, 2, 3, 0);
146       va5 = wasm_v32x4_shuffle(va5, va5, 1, 2, 3, 0);
147 
148       const v128_t vb0123c2 = wasm_v128_load(w + 16);
149       const v128_t vb4567c2 = wasm_v128_load(w + 20);
150 
151       vacc0x0123 = wasm_f32x4_add(vacc0x0123, wasm_f32x4_mul(va0, vb0123c2));
152       vacc1x0123 = wasm_f32x4_add(vacc1x0123, wasm_f32x4_mul(va1, vb0123c2));
153       vacc2x0123 = wasm_f32x4_add(vacc2x0123, wasm_f32x4_mul(va2, vb0123c2));
154       vacc3x0123 = wasm_f32x4_add(vacc3x0123, wasm_f32x4_mul(va3, vb0123c2));
155       vacc4x0123 = wasm_f32x4_add(vacc4x0123, wasm_f32x4_mul(va4, vb0123c2));
156       vacc5x0123 = wasm_f32x4_add(vacc5x0123, wasm_f32x4_mul(va5, vb0123c2));
157       vacc0x4567 = wasm_f32x4_add(vacc0x4567, wasm_f32x4_mul(va0, vb4567c2));
158       vacc1x4567 = wasm_f32x4_add(vacc1x4567, wasm_f32x4_mul(va1, vb4567c2));
159       vacc2x4567 = wasm_f32x4_add(vacc2x4567, wasm_f32x4_mul(va2, vb4567c2));
160       vacc3x4567 = wasm_f32x4_add(vacc3x4567, wasm_f32x4_mul(va3, vb4567c2));
161       vacc4x4567 = wasm_f32x4_add(vacc4x4567, wasm_f32x4_mul(va4, vb4567c2));
162       vacc5x4567 = wasm_f32x4_add(vacc5x4567, wasm_f32x4_mul(va5, vb4567c2));
163 
164       va0 = wasm_v32x4_shuffle(va0, va0, 1, 2, 3, 0);
165       va1 = wasm_v32x4_shuffle(va1, va1, 1, 2, 3, 0);
166       va2 = wasm_v32x4_shuffle(va2, va2, 1, 2, 3, 0);
167       va3 = wasm_v32x4_shuffle(va3, va3, 1, 2, 3, 0);
168       va4 = wasm_v32x4_shuffle(va4, va4, 1, 2, 3, 0);
169       va5 = wasm_v32x4_shuffle(va5, va5, 1, 2, 3, 0);
170 
171       const v128_t vb0123c3 = wasm_v128_load(w + 24);
172       const v128_t vb4567c3 = wasm_v128_load(w + 28);
173 
174       vacc0x0123 = wasm_f32x4_add(vacc0x0123, wasm_f32x4_mul(va0, vb0123c3));
175       vacc1x0123 = wasm_f32x4_add(vacc1x0123, wasm_f32x4_mul(va1, vb0123c3));
176       vacc2x0123 = wasm_f32x4_add(vacc2x0123, wasm_f32x4_mul(va2, vb0123c3));
177       vacc3x0123 = wasm_f32x4_add(vacc3x0123, wasm_f32x4_mul(va3, vb0123c3));
178       vacc4x0123 = wasm_f32x4_add(vacc4x0123, wasm_f32x4_mul(va4, vb0123c3));
179       vacc5x0123 = wasm_f32x4_add(vacc5x0123, wasm_f32x4_mul(va5, vb0123c3));
180       vacc0x4567 = wasm_f32x4_add(vacc0x4567, wasm_f32x4_mul(va0, vb4567c3));
181       vacc1x4567 = wasm_f32x4_add(vacc1x4567, wasm_f32x4_mul(va1, vb4567c3));
182       vacc2x4567 = wasm_f32x4_add(vacc2x4567, wasm_f32x4_mul(va2, vb4567c3));
183       vacc3x4567 = wasm_f32x4_add(vacc3x4567, wasm_f32x4_mul(va3, vb4567c3));
184       vacc4x4567 = wasm_f32x4_add(vacc4x4567, wasm_f32x4_mul(va4, vb4567c3));
185       vacc5x4567 = wasm_f32x4_add(vacc5x4567, wasm_f32x4_mul(va5, vb4567c3));
186 
187 
188       w += 32;
189       k -= 4 * sizeof(float);
190     }
191     if XNN_UNLIKELY(k != 0) {
192       v128_t va0 = wasm_v128_load(a0);
193       a0 = (const float*) ((uintptr_t) a0 + k);
194       v128_t va1 = wasm_v128_load(a1);
195       a1 = (const float*) ((uintptr_t) a1 + k);
196       v128_t va2 = wasm_v128_load(a2);
197       a2 = (const float*) ((uintptr_t) a2 + k);
198       v128_t va3 = wasm_v128_load(a3);
199       a3 = (const float*) ((uintptr_t) a3 + k);
200       v128_t va4 = wasm_v128_load(a4);
201       a4 = (const float*) ((uintptr_t) a4 + k);
202       v128_t va5 = wasm_v128_load(a5);
203       a5 = (const float*) ((uintptr_t) a5 + k);
204 
205       const v128_t vzero = wasm_f32x4_const_splat(0.0f);
206 
207       const v128_t vb0123c0 = wasm_v128_load(w + 0);
208       const v128_t vb4567c0 = wasm_v128_load(w + 4);
209 
210       vacc0x0123 = wasm_f32x4_add(vacc0x0123, wasm_f32x4_mul(wasm_v128_andnot(va0, wasm_f32x4_eq(vb0123c0, vzero)), vb0123c0));
211       vacc1x0123 = wasm_f32x4_add(vacc1x0123, wasm_f32x4_mul(wasm_v128_andnot(va1, wasm_f32x4_eq(vb0123c0, vzero)), vb0123c0));
212       vacc2x0123 = wasm_f32x4_add(vacc2x0123, wasm_f32x4_mul(wasm_v128_andnot(va2, wasm_f32x4_eq(vb0123c0, vzero)), vb0123c0));
213       vacc3x0123 = wasm_f32x4_add(vacc3x0123, wasm_f32x4_mul(wasm_v128_andnot(va3, wasm_f32x4_eq(vb0123c0, vzero)), vb0123c0));
214       vacc4x0123 = wasm_f32x4_add(vacc4x0123, wasm_f32x4_mul(wasm_v128_andnot(va4, wasm_f32x4_eq(vb0123c0, vzero)), vb0123c0));
215       vacc5x0123 = wasm_f32x4_add(vacc5x0123, wasm_f32x4_mul(wasm_v128_andnot(va5, wasm_f32x4_eq(vb0123c0, vzero)), vb0123c0));
216       vacc0x4567 = wasm_f32x4_add(vacc0x4567, wasm_f32x4_mul(wasm_v128_andnot(va0, wasm_f32x4_eq(vb4567c0, vzero)), vb4567c0));
217       vacc1x4567 = wasm_f32x4_add(vacc1x4567, wasm_f32x4_mul(wasm_v128_andnot(va1, wasm_f32x4_eq(vb4567c0, vzero)), vb4567c0));
218       vacc2x4567 = wasm_f32x4_add(vacc2x4567, wasm_f32x4_mul(wasm_v128_andnot(va2, wasm_f32x4_eq(vb4567c0, vzero)), vb4567c0));
219       vacc3x4567 = wasm_f32x4_add(vacc3x4567, wasm_f32x4_mul(wasm_v128_andnot(va3, wasm_f32x4_eq(vb4567c0, vzero)), vb4567c0));
220       vacc4x4567 = wasm_f32x4_add(vacc4x4567, wasm_f32x4_mul(wasm_v128_andnot(va4, wasm_f32x4_eq(vb4567c0, vzero)), vb4567c0));
221       vacc5x4567 = wasm_f32x4_add(vacc5x4567, wasm_f32x4_mul(wasm_v128_andnot(va5, wasm_f32x4_eq(vb4567c0, vzero)), vb4567c0));
222 
223       va0 = wasm_v32x4_shuffle(va0, va0, 1, 2, 3, 0);
224       va1 = wasm_v32x4_shuffle(va1, va1, 1, 2, 3, 0);
225       va2 = wasm_v32x4_shuffle(va2, va2, 1, 2, 3, 0);
226       va3 = wasm_v32x4_shuffle(va3, va3, 1, 2, 3, 0);
227       va4 = wasm_v32x4_shuffle(va4, va4, 1, 2, 3, 0);
228       va5 = wasm_v32x4_shuffle(va5, va5, 1, 2, 3, 0);
229 
230       const v128_t vb0123c1 = wasm_v128_load(w + 8);
231       const v128_t vb4567c1 = wasm_v128_load(w + 12);
232 
233       vacc0x0123 = wasm_f32x4_add(vacc0x0123, wasm_f32x4_mul(wasm_v128_andnot(va0, wasm_f32x4_eq(vb0123c1, vzero)), vb0123c1));
234       vacc1x0123 = wasm_f32x4_add(vacc1x0123, wasm_f32x4_mul(wasm_v128_andnot(va1, wasm_f32x4_eq(vb0123c1, vzero)), vb0123c1));
235       vacc2x0123 = wasm_f32x4_add(vacc2x0123, wasm_f32x4_mul(wasm_v128_andnot(va2, wasm_f32x4_eq(vb0123c1, vzero)), vb0123c1));
236       vacc3x0123 = wasm_f32x4_add(vacc3x0123, wasm_f32x4_mul(wasm_v128_andnot(va3, wasm_f32x4_eq(vb0123c1, vzero)), vb0123c1));
237       vacc4x0123 = wasm_f32x4_add(vacc4x0123, wasm_f32x4_mul(wasm_v128_andnot(va4, wasm_f32x4_eq(vb0123c1, vzero)), vb0123c1));
238       vacc5x0123 = wasm_f32x4_add(vacc5x0123, wasm_f32x4_mul(wasm_v128_andnot(va5, wasm_f32x4_eq(vb0123c1, vzero)), vb0123c1));
239       vacc0x4567 = wasm_f32x4_add(vacc0x4567, wasm_f32x4_mul(wasm_v128_andnot(va0, wasm_f32x4_eq(vb4567c1, vzero)), vb4567c1));
240       vacc1x4567 = wasm_f32x4_add(vacc1x4567, wasm_f32x4_mul(wasm_v128_andnot(va1, wasm_f32x4_eq(vb4567c1, vzero)), vb4567c1));
241       vacc2x4567 = wasm_f32x4_add(vacc2x4567, wasm_f32x4_mul(wasm_v128_andnot(va2, wasm_f32x4_eq(vb4567c1, vzero)), vb4567c1));
242       vacc3x4567 = wasm_f32x4_add(vacc3x4567, wasm_f32x4_mul(wasm_v128_andnot(va3, wasm_f32x4_eq(vb4567c1, vzero)), vb4567c1));
243       vacc4x4567 = wasm_f32x4_add(vacc4x4567, wasm_f32x4_mul(wasm_v128_andnot(va4, wasm_f32x4_eq(vb4567c1, vzero)), vb4567c1));
244       vacc5x4567 = wasm_f32x4_add(vacc5x4567, wasm_f32x4_mul(wasm_v128_andnot(va5, wasm_f32x4_eq(vb4567c1, vzero)), vb4567c1));
245 
246       va0 = wasm_v32x4_shuffle(va0, va0, 1, 2, 3, 0);
247       va1 = wasm_v32x4_shuffle(va1, va1, 1, 2, 3, 0);
248       va2 = wasm_v32x4_shuffle(va2, va2, 1, 2, 3, 0);
249       va3 = wasm_v32x4_shuffle(va3, va3, 1, 2, 3, 0);
250       va4 = wasm_v32x4_shuffle(va4, va4, 1, 2, 3, 0);
251       va5 = wasm_v32x4_shuffle(va5, va5, 1, 2, 3, 0);
252 
253       const v128_t vb0123c2 = wasm_v128_load(w + 16);
254       const v128_t vb4567c2 = wasm_v128_load(w + 20);
255 
256       vacc0x0123 = wasm_f32x4_add(vacc0x0123, wasm_f32x4_mul(wasm_v128_andnot(va0, wasm_f32x4_eq(vb0123c2, vzero)), vb0123c2));
257       vacc1x0123 = wasm_f32x4_add(vacc1x0123, wasm_f32x4_mul(wasm_v128_andnot(va1, wasm_f32x4_eq(vb0123c2, vzero)), vb0123c2));
258       vacc2x0123 = wasm_f32x4_add(vacc2x0123, wasm_f32x4_mul(wasm_v128_andnot(va2, wasm_f32x4_eq(vb0123c2, vzero)), vb0123c2));
259       vacc3x0123 = wasm_f32x4_add(vacc3x0123, wasm_f32x4_mul(wasm_v128_andnot(va3, wasm_f32x4_eq(vb0123c2, vzero)), vb0123c2));
260       vacc4x0123 = wasm_f32x4_add(vacc4x0123, wasm_f32x4_mul(wasm_v128_andnot(va4, wasm_f32x4_eq(vb0123c2, vzero)), vb0123c2));
261       vacc5x0123 = wasm_f32x4_add(vacc5x0123, wasm_f32x4_mul(wasm_v128_andnot(va5, wasm_f32x4_eq(vb0123c2, vzero)), vb0123c2));
262       vacc0x4567 = wasm_f32x4_add(vacc0x4567, wasm_f32x4_mul(wasm_v128_andnot(va0, wasm_f32x4_eq(vb4567c2, vzero)), vb4567c2));
263       vacc1x4567 = wasm_f32x4_add(vacc1x4567, wasm_f32x4_mul(wasm_v128_andnot(va1, wasm_f32x4_eq(vb4567c2, vzero)), vb4567c2));
264       vacc2x4567 = wasm_f32x4_add(vacc2x4567, wasm_f32x4_mul(wasm_v128_andnot(va2, wasm_f32x4_eq(vb4567c2, vzero)), vb4567c2));
265       vacc3x4567 = wasm_f32x4_add(vacc3x4567, wasm_f32x4_mul(wasm_v128_andnot(va3, wasm_f32x4_eq(vb4567c2, vzero)), vb4567c2));
266       vacc4x4567 = wasm_f32x4_add(vacc4x4567, wasm_f32x4_mul(wasm_v128_andnot(va4, wasm_f32x4_eq(vb4567c2, vzero)), vb4567c2));
267       vacc5x4567 = wasm_f32x4_add(vacc5x4567, wasm_f32x4_mul(wasm_v128_andnot(va5, wasm_f32x4_eq(vb4567c2, vzero)), vb4567c2));
268 
269       va0 = wasm_v32x4_shuffle(va0, va0, 1, 2, 3, 0);
270       va1 = wasm_v32x4_shuffle(va1, va1, 1, 2, 3, 0);
271       va2 = wasm_v32x4_shuffle(va2, va2, 1, 2, 3, 0);
272       va3 = wasm_v32x4_shuffle(va3, va3, 1, 2, 3, 0);
273       va4 = wasm_v32x4_shuffle(va4, va4, 1, 2, 3, 0);
274       va5 = wasm_v32x4_shuffle(va5, va5, 1, 2, 3, 0);
275 
276       const v128_t vb0123c3 = wasm_v128_load(w + 24);
277       const v128_t vb4567c3 = wasm_v128_load(w + 28);
278 
279       vacc0x0123 = wasm_f32x4_add(vacc0x0123, wasm_f32x4_mul(wasm_v128_andnot(va0, wasm_f32x4_eq(vb0123c3, vzero)), vb0123c3));
280       vacc1x0123 = wasm_f32x4_add(vacc1x0123, wasm_f32x4_mul(wasm_v128_andnot(va1, wasm_f32x4_eq(vb0123c3, vzero)), vb0123c3));
281       vacc2x0123 = wasm_f32x4_add(vacc2x0123, wasm_f32x4_mul(wasm_v128_andnot(va2, wasm_f32x4_eq(vb0123c3, vzero)), vb0123c3));
282       vacc3x0123 = wasm_f32x4_add(vacc3x0123, wasm_f32x4_mul(wasm_v128_andnot(va3, wasm_f32x4_eq(vb0123c3, vzero)), vb0123c3));
283       vacc4x0123 = wasm_f32x4_add(vacc4x0123, wasm_f32x4_mul(wasm_v128_andnot(va4, wasm_f32x4_eq(vb0123c3, vzero)), vb0123c3));
284       vacc5x0123 = wasm_f32x4_add(vacc5x0123, wasm_f32x4_mul(wasm_v128_andnot(va5, wasm_f32x4_eq(vb0123c3, vzero)), vb0123c3));
285       vacc0x4567 = wasm_f32x4_add(vacc0x4567, wasm_f32x4_mul(wasm_v128_andnot(va0, wasm_f32x4_eq(vb4567c3, vzero)), vb4567c3));
286       vacc1x4567 = wasm_f32x4_add(vacc1x4567, wasm_f32x4_mul(wasm_v128_andnot(va1, wasm_f32x4_eq(vb4567c3, vzero)), vb4567c3));
287       vacc2x4567 = wasm_f32x4_add(vacc2x4567, wasm_f32x4_mul(wasm_v128_andnot(va2, wasm_f32x4_eq(vb4567c3, vzero)), vb4567c3));
288       vacc3x4567 = wasm_f32x4_add(vacc3x4567, wasm_f32x4_mul(wasm_v128_andnot(va3, wasm_f32x4_eq(vb4567c3, vzero)), vb4567c3));
289       vacc4x4567 = wasm_f32x4_add(vacc4x4567, wasm_f32x4_mul(wasm_v128_andnot(va4, wasm_f32x4_eq(vb4567c3, vzero)), vb4567c3));
290       vacc5x4567 = wasm_f32x4_add(vacc5x4567, wasm_f32x4_mul(wasm_v128_andnot(va5, wasm_f32x4_eq(vb4567c3, vzero)), vb4567c3));
291 
292 
293       w += 32;
294     }
295 
296     const v128_t vzero = wasm_i32x4_const_splat(0);
297     vacc0x0123 = wasm_i32x4_max(vacc0x0123, vzero);
298     vacc1x0123 = wasm_i32x4_max(vacc1x0123, vzero);
299     vacc2x0123 = wasm_i32x4_max(vacc2x0123, vzero);
300     vacc3x0123 = wasm_i32x4_max(vacc3x0123, vzero);
301     vacc4x0123 = wasm_i32x4_max(vacc4x0123, vzero);
302     vacc5x0123 = wasm_i32x4_max(vacc5x0123, vzero);
303     vacc0x4567 = wasm_i32x4_max(vacc0x4567, vzero);
304     vacc1x4567 = wasm_i32x4_max(vacc1x4567, vzero);
305     vacc2x4567 = wasm_i32x4_max(vacc2x4567, vzero);
306     vacc3x4567 = wasm_i32x4_max(vacc3x4567, vzero);
307     vacc4x4567 = wasm_i32x4_max(vacc4x4567, vzero);
308     vacc5x4567 = wasm_i32x4_max(vacc5x4567, vzero);
309 
310     if XNN_LIKELY(nc >= 8) {
311       wasm_v128_store(c5, vacc5x0123);
312       wasm_v128_store(c5 + 4, vacc5x4567);
313       c5 = (float*) ((uintptr_t) c5 + cn_stride);
314       wasm_v128_store(c4, vacc4x0123);
315       wasm_v128_store(c4 + 4, vacc4x4567);
316       c4 = (float*) ((uintptr_t) c4 + cn_stride);
317       wasm_v128_store(c3, vacc3x0123);
318       wasm_v128_store(c3 + 4, vacc3x4567);
319       c3 = (float*) ((uintptr_t) c3 + cn_stride);
320       wasm_v128_store(c2, vacc2x0123);
321       wasm_v128_store(c2 + 4, vacc2x4567);
322       c2 = (float*) ((uintptr_t) c2 + cn_stride);
323       wasm_v128_store(c1, vacc1x0123);
324       wasm_v128_store(c1 + 4, vacc1x4567);
325       c1 = (float*) ((uintptr_t) c1 + cn_stride);
326       wasm_v128_store(c0, vacc0x0123);
327       wasm_v128_store(c0 + 4, vacc0x4567);
328       c0 = (float*) ((uintptr_t) c0 + cn_stride);
329 
330       a5 = (const float*) ((uintptr_t) a5 - kc);
331       a4 = (const float*) ((uintptr_t) a4 - kc);
332       a3 = (const float*) ((uintptr_t) a3 - kc);
333       a2 = (const float*) ((uintptr_t) a2 - kc);
334       a1 = (const float*) ((uintptr_t) a1 - kc);
335       a0 = (const float*) ((uintptr_t) a0 - kc);
336 
337       nc -= 8;
338     } else {
339       if (nc & 4) {
340         wasm_v128_store(c5, vacc5x0123);
341         wasm_v128_store(c4, vacc4x0123);
342         wasm_v128_store(c3, vacc3x0123);
343         wasm_v128_store(c2, vacc2x0123);
344         wasm_v128_store(c1, vacc1x0123);
345         wasm_v128_store(c0, vacc0x0123);
346 
347         vacc5x0123 = vacc5x4567;
348         vacc4x0123 = vacc4x4567;
349         vacc3x0123 = vacc3x4567;
350         vacc2x0123 = vacc2x4567;
351         vacc1x0123 = vacc1x4567;
352         vacc0x0123 = vacc0x4567;
353 
354         c5 += 4;
355         c4 += 4;
356         c3 += 4;
357         c2 += 4;
358         c1 += 4;
359         c0 += 4;
360       }
361       if (nc & 2) {
362         *((double*) c5) = wasm_f64x2_extract_lane(vacc5x0123, 0);
363         *((double*) c4) = wasm_f64x2_extract_lane(vacc4x0123, 0);
364         *((double*) c3) = wasm_f64x2_extract_lane(vacc3x0123, 0);
365         *((double*) c2) = wasm_f64x2_extract_lane(vacc2x0123, 0);
366         *((double*) c1) = wasm_f64x2_extract_lane(vacc1x0123, 0);
367         *((double*) c0) = wasm_f64x2_extract_lane(vacc0x0123, 0);
368 
369         vacc5x0123 = wasm_v32x4_shuffle(vacc5x0123, vacc5x0123, 2, 3, 2, 3);
370         vacc4x0123 = wasm_v32x4_shuffle(vacc4x0123, vacc4x0123, 2, 3, 2, 3);
371         vacc3x0123 = wasm_v32x4_shuffle(vacc3x0123, vacc3x0123, 2, 3, 2, 3);
372         vacc2x0123 = wasm_v32x4_shuffle(vacc2x0123, vacc2x0123, 2, 3, 2, 3);
373         vacc1x0123 = wasm_v32x4_shuffle(vacc1x0123, vacc1x0123, 2, 3, 2, 3);
374         vacc0x0123 = wasm_v32x4_shuffle(vacc0x0123, vacc0x0123, 2, 3, 2, 3);
375 
376         c5 += 2;
377         c4 += 2;
378         c3 += 2;
379         c2 += 2;
380         c1 += 2;
381         c0 += 2;
382       }
383       if (nc & 1) {
384         *c5 = wasm_f32x4_extract_lane(vacc5x0123, 0);
385         *c4 = wasm_f32x4_extract_lane(vacc4x0123, 0);
386         *c3 = wasm_f32x4_extract_lane(vacc3x0123, 0);
387         *c2 = wasm_f32x4_extract_lane(vacc2x0123, 0);
388         *c1 = wasm_f32x4_extract_lane(vacc1x0123, 0);
389         *c0 = wasm_f32x4_extract_lane(vacc0x0123, 0);
390       }
391 
392       nc = 0;
393     }
394   } while (nc != 0);
395 }
396