1 // Auto-generated file. Do not edit!
2 // Template: src/f32-spmm/wasmsimd.c.in
3 // Generator: tools/xngen
4 //
5 // Copyright 2020 Google LLC
6 //
7 // This source code is licensed under the BSD-style license found in the
8 // LICENSE file in the root directory of this source tree.
9
10 #include <assert.h>
11
12 #include <wasm_simd128.h>
13
14 #include <xnnpack/spmm.h>
15
16
xnn_f32_spmm_minmax_ukernel_8x1__wasmsimd_arm_x4(size_t mc,size_t nc,const float * restrict input,const float * restrict weights,const int32_t * restrict widx_dmap,const uint32_t * restrict nidx_nnzmap,float * restrict output,size_t output_stride,const union xnn_f32_minmax_params params[restrict XNN_MIN_ELEMENTS (1)])17 void xnn_f32_spmm_minmax_ukernel_8x1__wasmsimd_arm_x4(
18 size_t mc,
19 size_t nc,
20 const float*restrict input,
21 const float*restrict weights,
22 const int32_t*restrict widx_dmap,
23 const uint32_t*restrict nidx_nnzmap,
24 float*restrict output,
25 size_t output_stride,
26 const union xnn_f32_minmax_params params[restrict XNN_MIN_ELEMENTS(1)])
27 {
28 assert(mc != 0);
29 assert(mc % sizeof(float) == 0);
30 assert(nc != 0);
31
32 const v128_t vmin = wasm_v128_load64_splat(params->wasmsimd.min);
33 const v128_t vmax = wasm_v128_load64_splat(params->wasmsimd.max);
34 size_t output_decrement = output_stride * nc - 8 * sizeof(float);
35 while XNN_LIKELY(mc >= 8 * sizeof(float)) {
36 const float*restrict w = weights;
37 const int32_t* dmap = widx_dmap;
38 const uint32_t* nnzmap = nidx_nnzmap;
39 size_t n = nc;
40 do {
41 uint32_t nnz = *nnzmap++;
42 v128_t vacc0123x0 = wasm_v128_load32_splat(w);
43 w += 1;
44 v128_t vacc0123x1 = wasm_f32x4_const_splat(0.0f);
45 v128_t vacc0123x2 = wasm_f32x4_const_splat(0.0f);
46 v128_t vacc0123x3 = wasm_f32x4_const_splat(0.0f);
47 v128_t vacc4567x0 = vacc0123x0;
48 v128_t vacc4567x1 = wasm_f32x4_const_splat(0.0f);
49 v128_t vacc4567x2 = wasm_f32x4_const_splat(0.0f);
50 v128_t vacc4567x3 = wasm_f32x4_const_splat(0.0f);
51 for (; nnz >= 4; nnz -= 4) {
52 const intptr_t diff0 = dmap[0];
53 const intptr_t diff1 = dmap[1];
54 const intptr_t diff2 = dmap[2];
55 const intptr_t diff3 = dmap[3];
56 dmap += 4;
57 const v128_t vi0123x0 = wasm_v128_load(input);
58 const v128_t vi4567x0 = wasm_v128_load(input + 4);
59 input = (const float*restrict) ((uintptr_t) input + (uintptr_t) diff0);
60 const v128_t vw0 = wasm_v128_load32_splat(w);
61 w += 1;
62 vacc0123x0 = wasm_f32x4_add(vacc0123x0, wasm_f32x4_mul(vi0123x0, vw0));
63 vacc4567x0 = wasm_f32x4_add(vacc4567x0, wasm_f32x4_mul(vi4567x0, vw0));
64 const v128_t vi0123x1 = wasm_v128_load(input);
65 const v128_t vi4567x1 = wasm_v128_load(input + 4);
66 input = (const float*restrict) ((uintptr_t) input + (uintptr_t) diff1);
67 const v128_t vw1 = wasm_v128_load32_splat(w);
68 w += 1;
69 vacc0123x1 = wasm_f32x4_add(vacc0123x1, wasm_f32x4_mul(vi0123x1, vw1));
70 vacc4567x1 = wasm_f32x4_add(vacc4567x1, wasm_f32x4_mul(vi4567x1, vw1));
71 const v128_t vi0123x2 = wasm_v128_load(input);
72 const v128_t vi4567x2 = wasm_v128_load(input + 4);
73 input = (const float*restrict) ((uintptr_t) input + (uintptr_t) diff2);
74 const v128_t vw2 = wasm_v128_load32_splat(w);
75 w += 1;
76 vacc0123x2 = wasm_f32x4_add(vacc0123x2, wasm_f32x4_mul(vi0123x2, vw2));
77 vacc4567x2 = wasm_f32x4_add(vacc4567x2, wasm_f32x4_mul(vi4567x2, vw2));
78 const v128_t vi0123x3 = wasm_v128_load(input);
79 const v128_t vi4567x3 = wasm_v128_load(input + 4);
80 input = (const float*restrict) ((uintptr_t) input + (uintptr_t) diff3);
81 const v128_t vw3 = wasm_v128_load32_splat(w);
82 w += 1;
83 vacc0123x3 = wasm_f32x4_add(vacc0123x3, wasm_f32x4_mul(vi0123x3, vw3));
84 vacc4567x3 = wasm_f32x4_add(vacc4567x3, wasm_f32x4_mul(vi4567x3, vw3));
85 }
86 v128_t vacc0123 = vacc0123x0;
87 v128_t vacc4567 = vacc4567x0;
88 vacc0123 = wasm_f32x4_add(vacc0123, vacc0123x1);
89 vacc4567 = wasm_f32x4_add(vacc4567, vacc4567x1);
90 vacc0123 = wasm_f32x4_add(vacc0123, vacc0123x2);
91 vacc4567 = wasm_f32x4_add(vacc4567, vacc4567x2);
92 vacc0123 = wasm_f32x4_add(vacc0123, vacc0123x3);
93 vacc4567 = wasm_f32x4_add(vacc4567, vacc4567x3);
94 if XNN_LIKELY(nnz != 0) {
95 do {
96 const intptr_t diff = *dmap++;
97 const v128_t vi0123 = wasm_v128_load(input);
98 const v128_t vi4567 = wasm_v128_load(input + 4);
99 input = (const float*restrict) ((uintptr_t) input + (uintptr_t) diff);
100 const v128_t vw = wasm_v128_load32_splat(w); w += 1;
101 vacc0123 = wasm_f32x4_add(vacc0123, wasm_f32x4_mul(vi0123, vw));
102 vacc4567 = wasm_f32x4_add(vacc4567, wasm_f32x4_mul(vi4567, vw));
103 } while (--nnz != 0);
104 }
105 v128_t vout0123 = wasm_f32x4_min(vmax, vacc0123);
106 v128_t vout4567 = wasm_f32x4_min(vmax, vacc4567);
107 vout0123 = wasm_f32x4_max(vmin, vout0123);
108 vout4567 = wasm_f32x4_max(vmin, vout4567);
109 wasm_v128_store(output, vout0123);
110 wasm_v128_store(output + 4, vout4567);
111 output = (float*restrict) ((uintptr_t) output + output_stride);
112 } while (--n != 0);
113 output = (float*restrict) ((uintptr_t) output - output_decrement);
114 input += 8;
115 mc -= 8 * sizeof(float);
116 }
117 if XNN_UNLIKELY(mc != 0) {
118 output_decrement += 4 * sizeof(float);
119 if (mc & (4 * sizeof(float))) {
120 const float*restrict w = weights;
121 const int32_t* dmap = widx_dmap;
122 const uint32_t* nnzmap = nidx_nnzmap;
123 size_t n = nc;
124 do {
125 uint32_t nnz = *nnzmap++;
126 v128_t vacc0123 = wasm_v128_load32_splat(w); w += 1;
127 if XNN_LIKELY(nnz != 0) {
128 do {
129 const intptr_t diff = *dmap++;
130 const v128_t vi0123 = wasm_v128_load(input);
131 input = (const float*restrict) ((uintptr_t) input + (uintptr_t) diff);
132 const v128_t vw = wasm_v128_load32_splat(w); w += 1;
133 vacc0123 = wasm_f32x4_add(vacc0123, wasm_f32x4_mul(vi0123, vw));
134 } while (--nnz != 0);
135 }
136 v128_t vout0123 = wasm_f32x4_min(vmax, vacc0123);
137 vout0123 = wasm_f32x4_max(vmin, vout0123);
138 wasm_v128_store(output, vout0123);
139
140 output = (float*restrict) ((uintptr_t) output + output_stride);
141 } while (--n != 0);
142 output = (float*restrict) ((uintptr_t) output - output_decrement);
143 input += 4;
144 }
145 output_decrement += 2 * sizeof(float);
146 if (mc & (2 * sizeof(float))) {
147 const float*restrict w = weights;
148 const int32_t* dmap = widx_dmap;
149 const uint32_t* nnzmap = nidx_nnzmap;
150 size_t n = nc;
151 do {
152 uint32_t nnz = *nnzmap++;
153 v128_t vacc01 = wasm_v128_load32_splat(w); w += 1;
154 if XNN_LIKELY(nnz != 0) {
155 do {
156 const intptr_t diff = *dmap++;
157 const v128_t vi01 = wasm_v128_load64_splat(input);
158 input = (const float*restrict) ((uintptr_t) input + (uintptr_t) diff);
159 const v128_t vw = wasm_v128_load32_splat(w); w += 1;
160 vacc01 = wasm_f32x4_add(vacc01, wasm_f32x4_mul(vi01, vw));
161 } while (--nnz != 0);
162 }
163 v128_t vout01 = wasm_f32x4_min(vmax, vacc01);
164 vout01 = wasm_f32x4_max(vmin, vout01);
165 *((double*) output) = wasm_f64x2_extract_lane(vout01, 0);
166
167 output = (float*restrict) ((uintptr_t) output + output_stride);
168 } while (--n != 0);
169 output = (float*restrict) ((uintptr_t) output - output_decrement);
170 input += 2;
171 }
172 output_decrement += 1 * sizeof(float);
173 if (mc & (1 * sizeof(float))) {
174 const float*restrict w = weights;
175 const int32_t* dmap = widx_dmap;
176 const uint32_t* nnzmap = nidx_nnzmap;
177 size_t n = nc;
178 do {
179 uint32_t nnz = *nnzmap++;
180 v128_t vacc0 = wasm_v128_load32_splat(w); w += 1;
181 if XNN_LIKELY(nnz != 0) {
182 do {
183 const intptr_t diff = *dmap++;
184 const v128_t vi0 = wasm_v128_load32_splat(input);
185 input = (const float*restrict) ((uintptr_t) input + (uintptr_t) diff);
186 const v128_t vw = wasm_v128_load32_splat(w); w += 1;
187 vacc0 = wasm_f32x4_add(vacc0, wasm_f32x4_mul(vi0, vw));
188 } while (--nnz != 0);
189 }
190 v128_t vout0 = wasm_f32x4_min(vmax, vacc0);
191 vout0 = wasm_f32x4_max(vmin, vout0);
192 *output = wasm_f32x4_extract_lane(vout0, 0);
193
194 output = (float*restrict) ((uintptr_t) output + output_stride);
195 } while (--n != 0);
196 output = (float*restrict) ((uintptr_t) output - output_decrement);
197 input += 1;
198 }
199 }
200 }
201