1 // Auto-generated file. Do not edit!
2 // Template: src/f32-spmm/wasmsimd.c.in
3 // Generator: tools/xngen
4 //
5 // Copyright 2020 Google LLC
6 //
7 // This source code is licensed under the BSD-style license found in the
8 // LICENSE file in the root directory of this source tree.
9
10 #include <assert.h>
11
12 #include <wasm_simd128.h>
13
14 #include <xnnpack/spmm.h>
15
16
xnn_f32_spmm_minmax_ukernel_8x1__wasmsimd_x86_x2(size_t mc,size_t nc,const float * restrict input,const float * restrict weights,const int32_t * restrict widx_dmap,const uint32_t * restrict nidx_nnzmap,float * restrict output,size_t output_stride,const union xnn_f32_minmax_params params[restrict XNN_MIN_ELEMENTS (1)])17 void xnn_f32_spmm_minmax_ukernel_8x1__wasmsimd_x86_x2(
18 size_t mc,
19 size_t nc,
20 const float*restrict input,
21 const float*restrict weights,
22 const int32_t*restrict widx_dmap,
23 const uint32_t*restrict nidx_nnzmap,
24 float*restrict output,
25 size_t output_stride,
26 const union xnn_f32_minmax_params params[restrict XNN_MIN_ELEMENTS(1)])
27 {
28 assert(mc != 0);
29 assert(mc % sizeof(float) == 0);
30 assert(nc != 0);
31
32 const v128_t vmin = wasm_v128_load64_splat(params->wasmsimd.min);
33 const v128_t vmax = wasm_v128_load64_splat(params->wasmsimd.max);
34 size_t output_decrement = output_stride * nc - 8 * sizeof(float);
35 while XNN_LIKELY(mc >= 8 * sizeof(float)) {
36 const float*restrict w = weights;
37 const int32_t* dmap = widx_dmap;
38 const uint32_t* nnzmap = nidx_nnzmap;
39 size_t n = nc;
40 do {
41 uint32_t nnz = *nnzmap++;
42 v128_t vacc0123x0 = wasm_v128_load32_splat(w);
43 w += 1;
44 v128_t vacc0123x1 = wasm_f32x4_const_splat(0.0f);
45 v128_t vacc4567x0 = vacc0123x0;
46 v128_t vacc4567x1 = wasm_f32x4_const_splat(0.0f);
47 for (; nnz >= 2; nnz -= 2) {
48 const intptr_t diff0 = dmap[0];
49 const intptr_t diff1 = dmap[1];
50 dmap += 2;
51 const v128_t vi0123x0 = wasm_v128_load(input);
52 const v128_t vi4567x0 = wasm_v128_load(input + 4);
53 input = (const float*restrict) ((uintptr_t) input + (uintptr_t) diff0);
54 const v128_t vw0 = wasm_v128_load32_splat(w);
55 w += 1;
56 vacc0123x0 = wasm_f32x4_add(vacc0123x0, wasm_f32x4_mul(vi0123x0, vw0));
57 vacc4567x0 = wasm_f32x4_add(vacc4567x0, wasm_f32x4_mul(vi4567x0, vw0));
58 const v128_t vi0123x1 = wasm_v128_load(input);
59 const v128_t vi4567x1 = wasm_v128_load(input + 4);
60 input = (const float*restrict) ((uintptr_t) input + (uintptr_t) diff1);
61 const v128_t vw1 = wasm_v128_load32_splat(w);
62 w += 1;
63 vacc0123x1 = wasm_f32x4_add(vacc0123x1, wasm_f32x4_mul(vi0123x1, vw1));
64 vacc4567x1 = wasm_f32x4_add(vacc4567x1, wasm_f32x4_mul(vi4567x1, vw1));
65 }
66 v128_t vacc0123 = vacc0123x0;
67 v128_t vacc4567 = vacc4567x0;
68 vacc0123 = wasm_f32x4_add(vacc0123, vacc0123x1);
69 vacc4567 = wasm_f32x4_add(vacc4567, vacc4567x1);
70 if XNN_LIKELY(nnz != 0) {
71 do {
72 const intptr_t diff = *dmap++;
73 const v128_t vi0123 = wasm_v128_load(input);
74 const v128_t vi4567 = wasm_v128_load(input + 4);
75 input = (const float*restrict) ((uintptr_t) input + (uintptr_t) diff);
76 const v128_t vw = wasm_v128_load32_splat(w); w += 1;
77 vacc0123 = wasm_f32x4_add(vacc0123, wasm_f32x4_mul(vi0123, vw));
78 vacc4567 = wasm_f32x4_add(vacc4567, wasm_f32x4_mul(vi4567, vw));
79 } while (--nnz != 0);
80 }
81 v128_t vout0123 = wasm_f32x4_pmin(vmax, vacc0123);
82 v128_t vout4567 = wasm_f32x4_pmin(vmax, vacc4567);
83 vout0123 = wasm_f32x4_pmax(vmin, vout0123);
84 vout4567 = wasm_f32x4_pmax(vmin, vout4567);
85 wasm_v128_store(output, vout0123);
86 wasm_v128_store(output + 4, vout4567);
87 output = (float*restrict) ((uintptr_t) output + output_stride);
88 } while (--n != 0);
89 output = (float*restrict) ((uintptr_t) output - output_decrement);
90 input += 8;
91 mc -= 8 * sizeof(float);
92 }
93 if XNN_UNLIKELY(mc != 0) {
94 output_decrement += 4 * sizeof(float);
95 if (mc & (4 * sizeof(float))) {
96 const float*restrict w = weights;
97 const int32_t* dmap = widx_dmap;
98 const uint32_t* nnzmap = nidx_nnzmap;
99 size_t n = nc;
100 do {
101 uint32_t nnz = *nnzmap++;
102 v128_t vacc0123 = wasm_v128_load32_splat(w); w += 1;
103 if XNN_LIKELY(nnz != 0) {
104 do {
105 const intptr_t diff = *dmap++;
106 const v128_t vi0123 = wasm_v128_load(input);
107 input = (const float*restrict) ((uintptr_t) input + (uintptr_t) diff);
108 const v128_t vw = wasm_v128_load32_splat(w); w += 1;
109 vacc0123 = wasm_f32x4_add(vacc0123, wasm_f32x4_mul(vi0123, vw));
110 } while (--nnz != 0);
111 }
112 v128_t vout0123 = wasm_f32x4_pmin(vmax, vacc0123);
113 vout0123 = wasm_f32x4_pmax(vmin, vout0123);
114 wasm_v128_store(output, vout0123);
115
116 output = (float*restrict) ((uintptr_t) output + output_stride);
117 } while (--n != 0);
118 output = (float*restrict) ((uintptr_t) output - output_decrement);
119 input += 4;
120 }
121 output_decrement += 2 * sizeof(float);
122 if (mc & (2 * sizeof(float))) {
123 const float*restrict w = weights;
124 const int32_t* dmap = widx_dmap;
125 const uint32_t* nnzmap = nidx_nnzmap;
126 size_t n = nc;
127 do {
128 uint32_t nnz = *nnzmap++;
129 v128_t vacc01 = wasm_v128_load32_splat(w); w += 1;
130 if XNN_LIKELY(nnz != 0) {
131 do {
132 const intptr_t diff = *dmap++;
133 const v128_t vi01 = wasm_v128_load64_splat(input);
134 input = (const float*restrict) ((uintptr_t) input + (uintptr_t) diff);
135 const v128_t vw = wasm_v128_load32_splat(w); w += 1;
136 vacc01 = wasm_f32x4_add(vacc01, wasm_f32x4_mul(vi01, vw));
137 } while (--nnz != 0);
138 }
139 v128_t vout01 = wasm_f32x4_pmin(vmax, vacc01);
140 vout01 = wasm_f32x4_pmax(vmin, vout01);
141 *((double*) output) = wasm_f64x2_extract_lane(vout01, 0);
142
143 output = (float*restrict) ((uintptr_t) output + output_stride);
144 } while (--n != 0);
145 output = (float*restrict) ((uintptr_t) output - output_decrement);
146 input += 2;
147 }
148 output_decrement += 1 * sizeof(float);
149 if (mc & (1 * sizeof(float))) {
150 const float*restrict w = weights;
151 const int32_t* dmap = widx_dmap;
152 const uint32_t* nnzmap = nidx_nnzmap;
153 size_t n = nc;
154 do {
155 uint32_t nnz = *nnzmap++;
156 v128_t vacc0 = wasm_v128_load32_splat(w); w += 1;
157 if XNN_LIKELY(nnz != 0) {
158 do {
159 const intptr_t diff = *dmap++;
160 const v128_t vi0 = wasm_v128_load32_splat(input);
161 input = (const float*restrict) ((uintptr_t) input + (uintptr_t) diff);
162 const v128_t vw = wasm_v128_load32_splat(w); w += 1;
163 vacc0 = wasm_f32x4_add(vacc0, wasm_f32x4_mul(vi0, vw));
164 } while (--nnz != 0);
165 }
166 v128_t vout0 = wasm_f32x4_pmin(vmax, vacc0);
167 vout0 = wasm_f32x4_pmax(vmin, vout0);
168 *output = wasm_f32x4_extract_lane(vout0, 0);
169
170 output = (float*restrict) ((uintptr_t) output + output_stride);
171 } while (--n != 0);
172 output = (float*restrict) ((uintptr_t) output - output_decrement);
173 input += 1;
174 }
175 }
176 }
177