1 // Auto-generated file. Do not edit!
2 // Template: src/f32-spmm/wasmsimd-pipelined.c.in
3 // Generator: tools/xngen
4 //
5 // Copyright 2020 Google LLC
6 //
7 // This source code is licensed under the BSD-style license found in the
8 // LICENSE file in the root directory of this source tree.
9
10 #include <assert.h>
11
12 #include <wasm_simd128.h>
13
14 #include <xnnpack/spmm.h>
15
16
xnn_f32_spmm_minmax_ukernel_16x1__wasmsimd_x86_pipelined(size_t mc,size_t nc,const float * restrict input,const float * restrict weights,const int32_t * restrict widx_dmap,const uint32_t * restrict nidx_nnzmap,float * restrict output,size_t output_stride,const union xnn_f32_minmax_params params[restrict XNN_MIN_ELEMENTS (1)])17 void xnn_f32_spmm_minmax_ukernel_16x1__wasmsimd_x86_pipelined(
18 size_t mc,
19 size_t nc,
20 const float*restrict input,
21 const float*restrict weights,
22 const int32_t*restrict widx_dmap,
23 const uint32_t*restrict nidx_nnzmap,
24 float*restrict output,
25 size_t output_stride,
26 const union xnn_f32_minmax_params params[restrict XNN_MIN_ELEMENTS(1)])
27 {
28 assert(mc != 0);
29 assert(mc % sizeof(float) == 0);
30 assert(nc != 0);
31
32 const v128_t vmin = wasm_v128_load64_splat(params->wasmsimd.min);
33 const v128_t vmax = wasm_v128_load64_splat(params->wasmsimd.max);
34 size_t output_decrement = output_stride * nc - 16 * sizeof(float);
35 while XNN_LIKELY(mc >= 16 * sizeof(float)) {
36 const float*restrict w = weights;
37 const int32_t* dmap = widx_dmap;
38 const uint32_t* nnzmap = nidx_nnzmap;
39 v128_t vw = wasm_v128_load32_splat(w); w += 1;
40 intptr_t diff = *dmap++;
41 v128_t vi0123 = wasm_v128_load(input + 0);
42 v128_t vi4567 = wasm_v128_load(input + 4);
43 v128_t vi89AB = wasm_v128_load(input + 8);
44 v128_t viCDEF = wasm_v128_load(input + 12);
45 size_t n = nc;
46 do {
47 uint32_t nnz = *nnzmap++;
48 v128_t vacc0123 = vw;
49 v128_t vacc4567 = vw;
50 v128_t vacc89AB = vw;
51 v128_t vaccCDEF = vw;
52 vw = wasm_v128_load32_splat(w); w += 1;
53
54
55 if XNN_LIKELY(nnz != 0) {
56 do {
57 vacc0123 = wasm_f32x4_add(vacc0123, wasm_f32x4_mul(vi0123, vw));
58 vacc4567 = wasm_f32x4_add(vacc4567, wasm_f32x4_mul(vi4567, vw));
59 vacc89AB = wasm_f32x4_add(vacc89AB, wasm_f32x4_mul(vi89AB, vw));
60 vaccCDEF = wasm_f32x4_add(vaccCDEF, wasm_f32x4_mul(viCDEF, vw));
61 input = (const float*restrict) ((uintptr_t) input + (uintptr_t) diff);
62
63 diff = *dmap++;
64 vw = wasm_v128_load32_splat(w); w += 1;
65 vi0123 = wasm_v128_load(input + 0);
66 vi4567 = wasm_v128_load(input + 4);
67 vi89AB = wasm_v128_load(input + 8);
68 viCDEF = wasm_v128_load(input + 12);
69 } while (--nnz != 0);
70 }
71 v128_t vout0123 = wasm_f32x4_pmin(vmax, vacc0123);
72 v128_t vout4567 = wasm_f32x4_pmin(vmax, vacc4567);
73 v128_t vout89AB = wasm_f32x4_pmin(vmax, vacc89AB);
74 v128_t voutCDEF = wasm_f32x4_pmin(vmax, vaccCDEF);
75 vout0123 = wasm_f32x4_pmax(vmin, vout0123);
76 vout4567 = wasm_f32x4_pmax(vmin, vout4567);
77 vout89AB = wasm_f32x4_pmax(vmin, vout89AB);
78 voutCDEF = wasm_f32x4_pmax(vmin, voutCDEF);
79 wasm_v128_store(output, vout0123);
80 wasm_v128_store(output + 4, vout4567);
81 wasm_v128_store(output + 8, vout89AB);
82 wasm_v128_store(output + 12, voutCDEF);
83 output = (float*restrict) ((uintptr_t) output + output_stride);
84 } while (--n != 0);
85 output = (float*restrict) ((uintptr_t) output - output_decrement);
86 input += 16;
87 mc -= 16 * sizeof(float);
88 }
89 if XNN_UNLIKELY(mc != 0) {
90 output_decrement += 8 * sizeof(float);
91 if (mc & (8 * sizeof(float))) {
92 const float*restrict w = weights;
93 const int32_t* dmap = widx_dmap;
94 const uint32_t* nnzmap = nidx_nnzmap;
95 size_t n = nc;
96 do {
97 uint32_t nnz = *nnzmap++;
98 v128_t vacc0123 = wasm_v128_load32_splat(w); w += 1;
99 v128_t vacc4567 = vacc0123;
100 if XNN_LIKELY(nnz != 0) {
101 do {
102 const intptr_t diff = *dmap++;
103 const v128_t vi0123 = wasm_v128_load(input);
104 const v128_t vi4567 = wasm_v128_load(input + 4);
105 input = (const float*restrict) ((uintptr_t) input + (uintptr_t) diff);
106 const v128_t vw = wasm_v128_load32_splat(w); w += 1;
107 vacc0123 = wasm_f32x4_add(vacc0123, wasm_f32x4_mul(vi0123, vw));
108 vacc4567 = wasm_f32x4_add(vacc4567, wasm_f32x4_mul(vi4567, vw));
109 } while (--nnz != 0);
110 }
111 v128_t vout0123 = wasm_f32x4_pmin(vmax, vacc0123);
112 v128_t vout4567 = wasm_f32x4_pmin(vmax, vacc4567);
113 vout0123 = wasm_f32x4_pmax(vmin, vout0123);
114 vout4567 = wasm_f32x4_pmax(vmin, vout4567);
115 wasm_v128_store(output, vout0123);
116
117 wasm_v128_store(output + 4, vout4567);
118 output = (float*restrict) ((uintptr_t) output + output_stride);
119 } while (--n != 0);
120 output = (float*restrict) ((uintptr_t) output - output_decrement);
121 input += 8;
122 }
123 output_decrement += 4 * sizeof(float);
124 if (mc & (4 * sizeof(float))) {
125 const float*restrict w = weights;
126 const int32_t* dmap = widx_dmap;
127 const uint32_t* nnzmap = nidx_nnzmap;
128 size_t n = nc;
129 do {
130 uint32_t nnz = *nnzmap++;
131 v128_t vacc0123 = wasm_v128_load32_splat(w); w += 1;
132 if XNN_LIKELY(nnz != 0) {
133 do {
134 const intptr_t diff = *dmap++;
135 const v128_t vi0123 = wasm_v128_load(input);
136 input = (const float*restrict) ((uintptr_t) input + (uintptr_t) diff);
137 const v128_t vw = wasm_v128_load32_splat(w); w += 1;
138 vacc0123 = wasm_f32x4_add(vacc0123, wasm_f32x4_mul(vi0123, vw));
139 } while (--nnz != 0);
140 }
141 v128_t vout0123 = wasm_f32x4_pmin(vmax, vacc0123);
142 vout0123 = wasm_f32x4_pmax(vmin, vout0123);
143 wasm_v128_store(output, vout0123);
144
145 output = (float*restrict) ((uintptr_t) output + output_stride);
146 } while (--n != 0);
147 output = (float*restrict) ((uintptr_t) output - output_decrement);
148 input += 4;
149 }
150 output_decrement += 2 * sizeof(float);
151 if (mc & (2 * sizeof(float))) {
152 const float*restrict w = weights;
153 const int32_t* dmap = widx_dmap;
154 const uint32_t* nnzmap = nidx_nnzmap;
155 size_t n = nc;
156 do {
157 uint32_t nnz = *nnzmap++;
158 v128_t vacc01 = wasm_v128_load32_splat(w); w += 1;
159 if XNN_LIKELY(nnz != 0) {
160 do {
161 const intptr_t diff = *dmap++;
162 const v128_t vi01 = wasm_v128_load64_splat(input);
163 input = (const float*restrict) ((uintptr_t) input + (uintptr_t) diff);
164 const v128_t vw = wasm_v128_load32_splat(w); w += 1;
165 vacc01 = wasm_f32x4_add(vacc01, wasm_f32x4_mul(vi01, vw));
166 } while (--nnz != 0);
167 }
168 v128_t vout01 = wasm_f32x4_pmin(vmax, vacc01);
169 vout01 = wasm_f32x4_pmax(vmin, vout01);
170 *((double*) output) = wasm_f64x2_extract_lane(vout01, 0);
171
172 output = (float*restrict) ((uintptr_t) output + output_stride);
173 } while (--n != 0);
174 output = (float*restrict) ((uintptr_t) output - output_decrement);
175 input += 2;
176 }
177 output_decrement += 1 * sizeof(float);
178 if (mc & (1 * sizeof(float))) {
179 const float*restrict w = weights;
180 const int32_t* dmap = widx_dmap;
181 const uint32_t* nnzmap = nidx_nnzmap;
182 size_t n = nc;
183 do {
184 uint32_t nnz = *nnzmap++;
185 v128_t vacc0 = wasm_v128_load32_splat(w); w += 1;
186 if XNN_LIKELY(nnz != 0) {
187 do {
188 const intptr_t diff = *dmap++;
189 const v128_t vi0 = wasm_v128_load32_splat(input);
190 input = (const float*restrict) ((uintptr_t) input + (uintptr_t) diff);
191 const v128_t vw = wasm_v128_load32_splat(w); w += 1;
192 vacc0 = wasm_f32x4_add(vacc0, wasm_f32x4_mul(vi0, vw));
193 } while (--nnz != 0);
194 }
195 v128_t vout0 = wasm_f32x4_pmin(vmax, vacc0);
196 vout0 = wasm_f32x4_pmax(vmin, vout0);
197 *output = wasm_f32x4_extract_lane(vout0, 0);
198
199 output = (float*restrict) ((uintptr_t) output + output_stride);
200 } while (--n != 0);
201 output = (float*restrict) ((uintptr_t) output - output_decrement);
202 input += 1;
203 }
204 }
205 }
206