1 // Auto-generated file. Do not edit!
2 // Template: src/f32-spmm/wasmsimd.c.in
3 // Generator: tools/xngen
4 //
5 // Copyright 2020 Google LLC
6 //
7 // This source code is licensed under the BSD-style license found in the
8 // LICENSE file in the root directory of this source tree.
9
10 #include <assert.h>
11
12 #include <wasm_simd128.h>
13
14 #include <xnnpack/spmm.h>
15
16
xnn_f32_spmm_minmax_ukernel_32x1__wasmsimd_x86_x4(size_t mc,size_t nc,const float * restrict input,const float * restrict weights,const int32_t * restrict widx_dmap,const uint32_t * restrict nidx_nnzmap,float * restrict output,size_t output_stride,const union xnn_f32_minmax_params params[restrict XNN_MIN_ELEMENTS (1)])17 void xnn_f32_spmm_minmax_ukernel_32x1__wasmsimd_x86_x4(
18 size_t mc,
19 size_t nc,
20 const float*restrict input,
21 const float*restrict weights,
22 const int32_t*restrict widx_dmap,
23 const uint32_t*restrict nidx_nnzmap,
24 float*restrict output,
25 size_t output_stride,
26 const union xnn_f32_minmax_params params[restrict XNN_MIN_ELEMENTS(1)])
27 {
28 assert(mc != 0);
29 assert(mc % sizeof(float) == 0);
30 assert(nc != 0);
31
32 const v128_t vmin = wasm_v128_load64_splat(params->wasmsimd.min);
33 const v128_t vmax = wasm_v128_load64_splat(params->wasmsimd.max);
34 size_t output_decrement = output_stride * nc - 32 * sizeof(float);
35 while XNN_LIKELY(mc >= 32 * sizeof(float)) {
36 const float*restrict w = weights;
37 const int32_t* dmap = widx_dmap;
38 const uint32_t* nnzmap = nidx_nnzmap;
39 size_t n = nc;
40 do {
41 uint32_t nnz = *nnzmap++;
42 v128_t vacc0123x0 = wasm_v128_load32_splat(w);
43 w += 1;
44 v128_t vacc0123x1 = wasm_f32x4_const_splat(0.0f);
45 v128_t vacc0123x2 = wasm_f32x4_const_splat(0.0f);
46 v128_t vacc0123x3 = wasm_f32x4_const_splat(0.0f);
47 v128_t vacc4567x0 = vacc0123x0;
48 v128_t vacc4567x1 = wasm_f32x4_const_splat(0.0f);
49 v128_t vacc4567x2 = wasm_f32x4_const_splat(0.0f);
50 v128_t vacc4567x3 = wasm_f32x4_const_splat(0.0f);
51 v128_t vacc89ABx0 = vacc0123x0;
52 v128_t vacc89ABx1 = wasm_f32x4_const_splat(0.0f);
53 v128_t vacc89ABx2 = wasm_f32x4_const_splat(0.0f);
54 v128_t vacc89ABx3 = wasm_f32x4_const_splat(0.0f);
55 v128_t vaccCDEFx0 = vacc0123x0;
56 v128_t vaccCDEFx1 = wasm_f32x4_const_splat(0.0f);
57 v128_t vaccCDEFx2 = wasm_f32x4_const_splat(0.0f);
58 v128_t vaccCDEFx3 = wasm_f32x4_const_splat(0.0f);
59 v128_t vaccGHIJx0 = vacc0123x0;
60 v128_t vaccGHIJx1 = wasm_f32x4_const_splat(0.0f);
61 v128_t vaccGHIJx2 = wasm_f32x4_const_splat(0.0f);
62 v128_t vaccGHIJx3 = wasm_f32x4_const_splat(0.0f);
63 v128_t vaccKLMNx0 = vacc0123x0;
64 v128_t vaccKLMNx1 = wasm_f32x4_const_splat(0.0f);
65 v128_t vaccKLMNx2 = wasm_f32x4_const_splat(0.0f);
66 v128_t vaccKLMNx3 = wasm_f32x4_const_splat(0.0f);
67 v128_t vaccOPQRx0 = vacc0123x0;
68 v128_t vaccOPQRx1 = wasm_f32x4_const_splat(0.0f);
69 v128_t vaccOPQRx2 = wasm_f32x4_const_splat(0.0f);
70 v128_t vaccOPQRx3 = wasm_f32x4_const_splat(0.0f);
71 v128_t vaccSTUVx0 = vacc0123x0;
72 v128_t vaccSTUVx1 = wasm_f32x4_const_splat(0.0f);
73 v128_t vaccSTUVx2 = wasm_f32x4_const_splat(0.0f);
74 v128_t vaccSTUVx3 = wasm_f32x4_const_splat(0.0f);
75 for (; nnz >= 4; nnz -= 4) {
76 const intptr_t diff0 = dmap[0];
77 const intptr_t diff1 = dmap[1];
78 const intptr_t diff2 = dmap[2];
79 const intptr_t diff3 = dmap[3];
80 dmap += 4;
81 const v128_t vi0123x0 = wasm_v128_load(input);
82 const v128_t vi4567x0 = wasm_v128_load(input + 4);
83 const v128_t vi89ABx0 = wasm_v128_load(input + 8);
84 const v128_t viCDEFx0 = wasm_v128_load(input + 12);
85 const v128_t viGHIJx0 = wasm_v128_load(input + 16);
86 const v128_t viKLMNx0 = wasm_v128_load(input + 20);
87 const v128_t viOPQRx0 = wasm_v128_load(input + 24);
88 const v128_t viSTUVx0 = wasm_v128_load(input + 28);
89 input = (const float*restrict) ((uintptr_t) input + (uintptr_t) diff0);
90 const v128_t vw0 = wasm_v128_load32_splat(w);
91 w += 1;
92 vacc0123x0 = wasm_f32x4_add(vacc0123x0, wasm_f32x4_mul(vi0123x0, vw0));
93 vacc4567x0 = wasm_f32x4_add(vacc4567x0, wasm_f32x4_mul(vi4567x0, vw0));
94 vacc89ABx0 = wasm_f32x4_add(vacc89ABx0, wasm_f32x4_mul(vi89ABx0, vw0));
95 vaccCDEFx0 = wasm_f32x4_add(vaccCDEFx0, wasm_f32x4_mul(viCDEFx0, vw0));
96 vaccGHIJx0 = wasm_f32x4_add(vaccGHIJx0, wasm_f32x4_mul(viGHIJx0, vw0));
97 vaccKLMNx0 = wasm_f32x4_add(vaccKLMNx0, wasm_f32x4_mul(viKLMNx0, vw0));
98 vaccOPQRx0 = wasm_f32x4_add(vaccOPQRx0, wasm_f32x4_mul(viOPQRx0, vw0));
99 vaccSTUVx0 = wasm_f32x4_add(vaccSTUVx0, wasm_f32x4_mul(viSTUVx0, vw0));
100 const v128_t vi0123x1 = wasm_v128_load(input);
101 const v128_t vi4567x1 = wasm_v128_load(input + 4);
102 const v128_t vi89ABx1 = wasm_v128_load(input + 8);
103 const v128_t viCDEFx1 = wasm_v128_load(input + 12);
104 const v128_t viGHIJx1 = wasm_v128_load(input + 16);
105 const v128_t viKLMNx1 = wasm_v128_load(input + 20);
106 const v128_t viOPQRx1 = wasm_v128_load(input + 24);
107 const v128_t viSTUVx1 = wasm_v128_load(input + 28);
108 input = (const float*restrict) ((uintptr_t) input + (uintptr_t) diff1);
109 const v128_t vw1 = wasm_v128_load32_splat(w);
110 w += 1;
111 vacc0123x1 = wasm_f32x4_add(vacc0123x1, wasm_f32x4_mul(vi0123x1, vw1));
112 vacc4567x1 = wasm_f32x4_add(vacc4567x1, wasm_f32x4_mul(vi4567x1, vw1));
113 vacc89ABx1 = wasm_f32x4_add(vacc89ABx1, wasm_f32x4_mul(vi89ABx1, vw1));
114 vaccCDEFx1 = wasm_f32x4_add(vaccCDEFx1, wasm_f32x4_mul(viCDEFx1, vw1));
115 vaccGHIJx1 = wasm_f32x4_add(vaccGHIJx1, wasm_f32x4_mul(viGHIJx1, vw1));
116 vaccKLMNx1 = wasm_f32x4_add(vaccKLMNx1, wasm_f32x4_mul(viKLMNx1, vw1));
117 vaccOPQRx1 = wasm_f32x4_add(vaccOPQRx1, wasm_f32x4_mul(viOPQRx1, vw1));
118 vaccSTUVx1 = wasm_f32x4_add(vaccSTUVx1, wasm_f32x4_mul(viSTUVx1, vw1));
119 const v128_t vi0123x2 = wasm_v128_load(input);
120 const v128_t vi4567x2 = wasm_v128_load(input + 4);
121 const v128_t vi89ABx2 = wasm_v128_load(input + 8);
122 const v128_t viCDEFx2 = wasm_v128_load(input + 12);
123 const v128_t viGHIJx2 = wasm_v128_load(input + 16);
124 const v128_t viKLMNx2 = wasm_v128_load(input + 20);
125 const v128_t viOPQRx2 = wasm_v128_load(input + 24);
126 const v128_t viSTUVx2 = wasm_v128_load(input + 28);
127 input = (const float*restrict) ((uintptr_t) input + (uintptr_t) diff2);
128 const v128_t vw2 = wasm_v128_load32_splat(w);
129 w += 1;
130 vacc0123x2 = wasm_f32x4_add(vacc0123x2, wasm_f32x4_mul(vi0123x2, vw2));
131 vacc4567x2 = wasm_f32x4_add(vacc4567x2, wasm_f32x4_mul(vi4567x2, vw2));
132 vacc89ABx2 = wasm_f32x4_add(vacc89ABx2, wasm_f32x4_mul(vi89ABx2, vw2));
133 vaccCDEFx2 = wasm_f32x4_add(vaccCDEFx2, wasm_f32x4_mul(viCDEFx2, vw2));
134 vaccGHIJx2 = wasm_f32x4_add(vaccGHIJx2, wasm_f32x4_mul(viGHIJx2, vw2));
135 vaccKLMNx2 = wasm_f32x4_add(vaccKLMNx2, wasm_f32x4_mul(viKLMNx2, vw2));
136 vaccOPQRx2 = wasm_f32x4_add(vaccOPQRx2, wasm_f32x4_mul(viOPQRx2, vw2));
137 vaccSTUVx2 = wasm_f32x4_add(vaccSTUVx2, wasm_f32x4_mul(viSTUVx2, vw2));
138 const v128_t vi0123x3 = wasm_v128_load(input);
139 const v128_t vi4567x3 = wasm_v128_load(input + 4);
140 const v128_t vi89ABx3 = wasm_v128_load(input + 8);
141 const v128_t viCDEFx3 = wasm_v128_load(input + 12);
142 const v128_t viGHIJx3 = wasm_v128_load(input + 16);
143 const v128_t viKLMNx3 = wasm_v128_load(input + 20);
144 const v128_t viOPQRx3 = wasm_v128_load(input + 24);
145 const v128_t viSTUVx3 = wasm_v128_load(input + 28);
146 input = (const float*restrict) ((uintptr_t) input + (uintptr_t) diff3);
147 const v128_t vw3 = wasm_v128_load32_splat(w);
148 w += 1;
149 vacc0123x3 = wasm_f32x4_add(vacc0123x3, wasm_f32x4_mul(vi0123x3, vw3));
150 vacc4567x3 = wasm_f32x4_add(vacc4567x3, wasm_f32x4_mul(vi4567x3, vw3));
151 vacc89ABx3 = wasm_f32x4_add(vacc89ABx3, wasm_f32x4_mul(vi89ABx3, vw3));
152 vaccCDEFx3 = wasm_f32x4_add(vaccCDEFx3, wasm_f32x4_mul(viCDEFx3, vw3));
153 vaccGHIJx3 = wasm_f32x4_add(vaccGHIJx3, wasm_f32x4_mul(viGHIJx3, vw3));
154 vaccKLMNx3 = wasm_f32x4_add(vaccKLMNx3, wasm_f32x4_mul(viKLMNx3, vw3));
155 vaccOPQRx3 = wasm_f32x4_add(vaccOPQRx3, wasm_f32x4_mul(viOPQRx3, vw3));
156 vaccSTUVx3 = wasm_f32x4_add(vaccSTUVx3, wasm_f32x4_mul(viSTUVx3, vw3));
157 }
158 v128_t vacc0123 = vacc0123x0;
159 v128_t vacc4567 = vacc4567x0;
160 v128_t vacc89AB = vacc89ABx0;
161 v128_t vaccCDEF = vaccCDEFx0;
162 v128_t vaccGHIJ = vaccGHIJx0;
163 v128_t vaccKLMN = vaccKLMNx0;
164 v128_t vaccOPQR = vaccOPQRx0;
165 v128_t vaccSTUV = vaccSTUVx0;
166 vacc0123 = wasm_f32x4_add(vacc0123, vacc0123x1);
167 vacc4567 = wasm_f32x4_add(vacc4567, vacc4567x1);
168 vacc89AB = wasm_f32x4_add(vacc89AB, vacc89ABx1);
169 vaccCDEF = wasm_f32x4_add(vaccCDEF, vaccCDEFx1);
170 vaccGHIJ = wasm_f32x4_add(vaccGHIJ, vaccGHIJx1);
171 vaccKLMN = wasm_f32x4_add(vaccKLMN, vaccKLMNx1);
172 vaccOPQR = wasm_f32x4_add(vaccOPQR, vaccOPQRx1);
173 vaccSTUV = wasm_f32x4_add(vaccSTUV, vaccSTUVx1);
174 vacc0123 = wasm_f32x4_add(vacc0123, vacc0123x2);
175 vacc4567 = wasm_f32x4_add(vacc4567, vacc4567x2);
176 vacc89AB = wasm_f32x4_add(vacc89AB, vacc89ABx2);
177 vaccCDEF = wasm_f32x4_add(vaccCDEF, vaccCDEFx2);
178 vaccGHIJ = wasm_f32x4_add(vaccGHIJ, vaccGHIJx2);
179 vaccKLMN = wasm_f32x4_add(vaccKLMN, vaccKLMNx2);
180 vaccOPQR = wasm_f32x4_add(vaccOPQR, vaccOPQRx2);
181 vaccSTUV = wasm_f32x4_add(vaccSTUV, vaccSTUVx2);
182 vacc0123 = wasm_f32x4_add(vacc0123, vacc0123x3);
183 vacc4567 = wasm_f32x4_add(vacc4567, vacc4567x3);
184 vacc89AB = wasm_f32x4_add(vacc89AB, vacc89ABx3);
185 vaccCDEF = wasm_f32x4_add(vaccCDEF, vaccCDEFx3);
186 vaccGHIJ = wasm_f32x4_add(vaccGHIJ, vaccGHIJx3);
187 vaccKLMN = wasm_f32x4_add(vaccKLMN, vaccKLMNx3);
188 vaccOPQR = wasm_f32x4_add(vaccOPQR, vaccOPQRx3);
189 vaccSTUV = wasm_f32x4_add(vaccSTUV, vaccSTUVx3);
190 if XNN_LIKELY(nnz != 0) {
191 do {
192 const intptr_t diff = *dmap++;
193 const v128_t vi0123 = wasm_v128_load(input);
194 const v128_t vi4567 = wasm_v128_load(input + 4);
195 const v128_t vi89AB = wasm_v128_load(input + 8);
196 const v128_t viCDEF = wasm_v128_load(input + 12);
197 const v128_t viGHIJ = wasm_v128_load(input + 16);
198 const v128_t viKLMN = wasm_v128_load(input + 20);
199 const v128_t viOPQR = wasm_v128_load(input + 24);
200 const v128_t viSTUV = wasm_v128_load(input + 28);
201 input = (const float*restrict) ((uintptr_t) input + (uintptr_t) diff);
202 const v128_t vw = wasm_v128_load32_splat(w); w += 1;
203 vacc0123 = wasm_f32x4_add(vacc0123, wasm_f32x4_mul(vi0123, vw));
204 vacc4567 = wasm_f32x4_add(vacc4567, wasm_f32x4_mul(vi4567, vw));
205 vacc89AB = wasm_f32x4_add(vacc89AB, wasm_f32x4_mul(vi89AB, vw));
206 vaccCDEF = wasm_f32x4_add(vaccCDEF, wasm_f32x4_mul(viCDEF, vw));
207 vaccGHIJ = wasm_f32x4_add(vaccGHIJ, wasm_f32x4_mul(viGHIJ, vw));
208 vaccKLMN = wasm_f32x4_add(vaccKLMN, wasm_f32x4_mul(viKLMN, vw));
209 vaccOPQR = wasm_f32x4_add(vaccOPQR, wasm_f32x4_mul(viOPQR, vw));
210 vaccSTUV = wasm_f32x4_add(vaccSTUV, wasm_f32x4_mul(viSTUV, vw));
211 } while (--nnz != 0);
212 }
213 v128_t vout0123 = wasm_f32x4_pmin(vmax, vacc0123);
214 v128_t vout4567 = wasm_f32x4_pmin(vmax, vacc4567);
215 v128_t vout89AB = wasm_f32x4_pmin(vmax, vacc89AB);
216 v128_t voutCDEF = wasm_f32x4_pmin(vmax, vaccCDEF);
217 v128_t voutGHIJ = wasm_f32x4_pmin(vmax, vaccGHIJ);
218 v128_t voutKLMN = wasm_f32x4_pmin(vmax, vaccKLMN);
219 v128_t voutOPQR = wasm_f32x4_pmin(vmax, vaccOPQR);
220 v128_t voutSTUV = wasm_f32x4_pmin(vmax, vaccSTUV);
221 vout0123 = wasm_f32x4_pmax(vmin, vout0123);
222 vout4567 = wasm_f32x4_pmax(vmin, vout4567);
223 vout89AB = wasm_f32x4_pmax(vmin, vout89AB);
224 voutCDEF = wasm_f32x4_pmax(vmin, voutCDEF);
225 voutGHIJ = wasm_f32x4_pmax(vmin, voutGHIJ);
226 voutKLMN = wasm_f32x4_pmax(vmin, voutKLMN);
227 voutOPQR = wasm_f32x4_pmax(vmin, voutOPQR);
228 voutSTUV = wasm_f32x4_pmax(vmin, voutSTUV);
229 wasm_v128_store(output, vout0123);
230 wasm_v128_store(output + 4, vout4567);
231 wasm_v128_store(output + 8, vout89AB);
232 wasm_v128_store(output + 12, voutCDEF);
233 wasm_v128_store(output + 16, voutGHIJ);
234 wasm_v128_store(output + 20, voutKLMN);
235 wasm_v128_store(output + 24, voutOPQR);
236 wasm_v128_store(output + 28, voutSTUV);
237 output = (float*restrict) ((uintptr_t) output + output_stride);
238 } while (--n != 0);
239 output = (float*restrict) ((uintptr_t) output - output_decrement);
240 input += 32;
241 mc -= 32 * sizeof(float);
242 }
243 if XNN_UNLIKELY(mc != 0) {
244 output_decrement += 16 * sizeof(float);
245 if (mc & (16 * sizeof(float))) {
246 const float*restrict w = weights;
247 const int32_t* dmap = widx_dmap;
248 const uint32_t* nnzmap = nidx_nnzmap;
249 size_t n = nc;
250 do {
251 uint32_t nnz = *nnzmap++;
252 v128_t vacc0123 = wasm_v128_load32_splat(w); w += 1;
253 v128_t vacc4567 = vacc0123;
254 v128_t vacc89AB = vacc0123;
255 v128_t vaccCDEF = vacc0123;
256 if XNN_LIKELY(nnz != 0) {
257 do {
258 const intptr_t diff = *dmap++;
259 const v128_t vi0123 = wasm_v128_load(input);
260 const v128_t vi4567 = wasm_v128_load(input + 4);
261 const v128_t vi89AB = wasm_v128_load(input + 8);
262 const v128_t viCDEF = wasm_v128_load(input + 12);
263 input = (const float*restrict) ((uintptr_t) input + (uintptr_t) diff);
264 const v128_t vw = wasm_v128_load32_splat(w); w += 1;
265 vacc0123 = wasm_f32x4_add(vacc0123, wasm_f32x4_mul(vi0123, vw));
266 vacc4567 = wasm_f32x4_add(vacc4567, wasm_f32x4_mul(vi4567, vw));
267 vacc89AB = wasm_f32x4_add(vacc89AB, wasm_f32x4_mul(vi89AB, vw));
268 vaccCDEF = wasm_f32x4_add(vaccCDEF, wasm_f32x4_mul(viCDEF, vw));
269 } while (--nnz != 0);
270 }
271 v128_t vout0123 = wasm_f32x4_pmin(vmax, vacc0123);
272 v128_t vout4567 = wasm_f32x4_pmin(vmax, vacc4567);
273 v128_t vout89AB = wasm_f32x4_pmin(vmax, vacc89AB);
274 v128_t voutCDEF = wasm_f32x4_pmin(vmax, vaccCDEF);
275 vout0123 = wasm_f32x4_pmax(vmin, vout0123);
276 vout4567 = wasm_f32x4_pmax(vmin, vout4567);
277 vout89AB = wasm_f32x4_pmax(vmin, vout89AB);
278 voutCDEF = wasm_f32x4_pmax(vmin, voutCDEF);
279 wasm_v128_store(output, vout0123);
280
281 wasm_v128_store(output + 4, vout4567);
282 wasm_v128_store(output + 8, vout89AB);
283 wasm_v128_store(output + 12, voutCDEF);
284 output = (float*restrict) ((uintptr_t) output + output_stride);
285 } while (--n != 0);
286 output = (float*restrict) ((uintptr_t) output - output_decrement);
287 input += 16;
288 }
289 output_decrement += 8 * sizeof(float);
290 if (mc & (8 * sizeof(float))) {
291 const float*restrict w = weights;
292 const int32_t* dmap = widx_dmap;
293 const uint32_t* nnzmap = nidx_nnzmap;
294 size_t n = nc;
295 do {
296 uint32_t nnz = *nnzmap++;
297 v128_t vacc0123 = wasm_v128_load32_splat(w); w += 1;
298 v128_t vacc4567 = vacc0123;
299 if XNN_LIKELY(nnz != 0) {
300 do {
301 const intptr_t diff = *dmap++;
302 const v128_t vi0123 = wasm_v128_load(input);
303 const v128_t vi4567 = wasm_v128_load(input + 4);
304 input = (const float*restrict) ((uintptr_t) input + (uintptr_t) diff);
305 const v128_t vw = wasm_v128_load32_splat(w); w += 1;
306 vacc0123 = wasm_f32x4_add(vacc0123, wasm_f32x4_mul(vi0123, vw));
307 vacc4567 = wasm_f32x4_add(vacc4567, wasm_f32x4_mul(vi4567, vw));
308 } while (--nnz != 0);
309 }
310 v128_t vout0123 = wasm_f32x4_pmin(vmax, vacc0123);
311 v128_t vout4567 = wasm_f32x4_pmin(vmax, vacc4567);
312 vout0123 = wasm_f32x4_pmax(vmin, vout0123);
313 vout4567 = wasm_f32x4_pmax(vmin, vout4567);
314 wasm_v128_store(output, vout0123);
315
316 wasm_v128_store(output + 4, vout4567);
317 output = (float*restrict) ((uintptr_t) output + output_stride);
318 } while (--n != 0);
319 output = (float*restrict) ((uintptr_t) output - output_decrement);
320 input += 8;
321 }
322 output_decrement += 4 * sizeof(float);
323 if (mc & (4 * sizeof(float))) {
324 const float*restrict w = weights;
325 const int32_t* dmap = widx_dmap;
326 const uint32_t* nnzmap = nidx_nnzmap;
327 size_t n = nc;
328 do {
329 uint32_t nnz = *nnzmap++;
330 v128_t vacc0123 = wasm_v128_load32_splat(w); w += 1;
331 if XNN_LIKELY(nnz != 0) {
332 do {
333 const intptr_t diff = *dmap++;
334 const v128_t vi0123 = wasm_v128_load(input);
335 input = (const float*restrict) ((uintptr_t) input + (uintptr_t) diff);
336 const v128_t vw = wasm_v128_load32_splat(w); w += 1;
337 vacc0123 = wasm_f32x4_add(vacc0123, wasm_f32x4_mul(vi0123, vw));
338 } while (--nnz != 0);
339 }
340 v128_t vout0123 = wasm_f32x4_pmin(vmax, vacc0123);
341 vout0123 = wasm_f32x4_pmax(vmin, vout0123);
342 wasm_v128_store(output, vout0123);
343
344 output = (float*restrict) ((uintptr_t) output + output_stride);
345 } while (--n != 0);
346 output = (float*restrict) ((uintptr_t) output - output_decrement);
347 input += 4;
348 }
349 output_decrement += 2 * sizeof(float);
350 if (mc & (2 * sizeof(float))) {
351 const float*restrict w = weights;
352 const int32_t* dmap = widx_dmap;
353 const uint32_t* nnzmap = nidx_nnzmap;
354 size_t n = nc;
355 do {
356 uint32_t nnz = *nnzmap++;
357 v128_t vacc01 = wasm_v128_load32_splat(w); w += 1;
358 if XNN_LIKELY(nnz != 0) {
359 do {
360 const intptr_t diff = *dmap++;
361 const v128_t vi01 = wasm_v128_load64_splat(input);
362 input = (const float*restrict) ((uintptr_t) input + (uintptr_t) diff);
363 const v128_t vw = wasm_v128_load32_splat(w); w += 1;
364 vacc01 = wasm_f32x4_add(vacc01, wasm_f32x4_mul(vi01, vw));
365 } while (--nnz != 0);
366 }
367 v128_t vout01 = wasm_f32x4_pmin(vmax, vacc01);
368 vout01 = wasm_f32x4_pmax(vmin, vout01);
369 *((double*) output) = wasm_f64x2_extract_lane(vout01, 0);
370
371 output = (float*restrict) ((uintptr_t) output + output_stride);
372 } while (--n != 0);
373 output = (float*restrict) ((uintptr_t) output - output_decrement);
374 input += 2;
375 }
376 output_decrement += 1 * sizeof(float);
377 if (mc & (1 * sizeof(float))) {
378 const float*restrict w = weights;
379 const int32_t* dmap = widx_dmap;
380 const uint32_t* nnzmap = nidx_nnzmap;
381 size_t n = nc;
382 do {
383 uint32_t nnz = *nnzmap++;
384 v128_t vacc0 = wasm_v128_load32_splat(w); w += 1;
385 if XNN_LIKELY(nnz != 0) {
386 do {
387 const intptr_t diff = *dmap++;
388 const v128_t vi0 = wasm_v128_load32_splat(input);
389 input = (const float*restrict) ((uintptr_t) input + (uintptr_t) diff);
390 const v128_t vw = wasm_v128_load32_splat(w); w += 1;
391 vacc0 = wasm_f32x4_add(vacc0, wasm_f32x4_mul(vi0, vw));
392 } while (--nnz != 0);
393 }
394 v128_t vout0 = wasm_f32x4_pmin(vmax, vacc0);
395 vout0 = wasm_f32x4_pmax(vmin, vout0);
396 *output = wasm_f32x4_extract_lane(vout0, 0);
397
398 output = (float*restrict) ((uintptr_t) output + output_stride);
399 } while (--n != 0);
400 output = (float*restrict) ((uintptr_t) output - output_decrement);
401 input += 1;
402 }
403 }
404 }
405