1 // Auto-generated file. Do not edit!
2 // Template: src/f32-gemm/wasmsimd-s4.c.in
3 // Generator: tools/xngen
4 //
5 // Copyright 2020 Google LLC
6 //
7 // This source code is licensed under the BSD-style license found in the
8 // LICENSE file in the root directory of this source tree.
9
10 #include <assert.h>
11
12 #include <wasm_simd128.h>
13
14 #include <xnnpack/gemm.h>
15
16
xnn_f32_gemm_minmax_ukernel_1x8s4__wasmrelaxedsimd_fma(size_t mr,size_t nc,size_t kc,const float * restrict a,size_t a_stride,const float * restrict w,float * restrict c,size_t cm_stride,size_t cn_stride,const union xnn_f32_minmax_params params[restrict XNN_MIN_ELEMENTS (1)])17 void xnn_f32_gemm_minmax_ukernel_1x8s4__wasmrelaxedsimd_fma(
18 size_t mr,
19 size_t nc,
20 size_t kc,
21 const float*restrict a,
22 size_t a_stride,
23 const float*restrict w,
24 float*restrict c,
25 size_t cm_stride,
26 size_t cn_stride,
27 const union xnn_f32_minmax_params params[restrict XNN_MIN_ELEMENTS(1)])
28 {
29 assert(mr != 0);
30 assert(mr <= 1);
31 assert(nc != 0);
32 assert(kc != 0);
33 assert(kc % sizeof(float) == 0);
34 assert(a != NULL);
35 assert(w != NULL);
36 assert(c != NULL);
37
38 const float* a0 = a;
39 float* c0 = c;
40
41 const v128_t vmin = wasm_v128_load64_splat(params->wasmsimd.min);
42 const v128_t vmax = wasm_v128_load64_splat(params->wasmsimd.max);
43 do {
44 v128_t vacc0x0123 = wasm_v128_load(w + 0);
45 v128_t vacc0x4567 = wasm_v128_load(w + 4);
46 w += 8;
47
48 size_t k = kc;
49 while (k >= 4 * sizeof(float)) {
50 v128_t va0 = wasm_v128_load(a0);
51 a0 += 4;
52
53
54 const v128_t vb0123c0 = wasm_v128_load(w + 0);
55 const v128_t vb4567c0 = wasm_v128_load(w + 4);
56
57 vacc0x0123 = __builtin_wasm_fma_f32x4(vacc0x0123, va0, vb0123c0);
58 vacc0x4567 = __builtin_wasm_fma_f32x4(vacc0x4567, va0, vb4567c0);
59
60 va0 = wasm_v32x4_shuffle(va0, va0, 1, 2, 3, 0);
61
62 const v128_t vb0123c1 = wasm_v128_load(w + 8);
63 const v128_t vb4567c1 = wasm_v128_load(w + 12);
64
65 vacc0x0123 = __builtin_wasm_fma_f32x4(vacc0x0123, va0, vb0123c1);
66 vacc0x4567 = __builtin_wasm_fma_f32x4(vacc0x4567, va0, vb4567c1);
67
68 va0 = wasm_v32x4_shuffle(va0, va0, 1, 2, 3, 0);
69
70 const v128_t vb0123c2 = wasm_v128_load(w + 16);
71 const v128_t vb4567c2 = wasm_v128_load(w + 20);
72
73 vacc0x0123 = __builtin_wasm_fma_f32x4(vacc0x0123, va0, vb0123c2);
74 vacc0x4567 = __builtin_wasm_fma_f32x4(vacc0x4567, va0, vb4567c2);
75
76 va0 = wasm_v32x4_shuffle(va0, va0, 1, 2, 3, 0);
77
78 const v128_t vb0123c3 = wasm_v128_load(w + 24);
79 const v128_t vb4567c3 = wasm_v128_load(w + 28);
80
81 vacc0x0123 = __builtin_wasm_fma_f32x4(vacc0x0123, va0, vb0123c3);
82 vacc0x4567 = __builtin_wasm_fma_f32x4(vacc0x4567, va0, vb4567c3);
83
84
85 w += 32;
86 k -= 4 * sizeof(float);
87 }
88 if XNN_UNLIKELY(k != 0) {
89 v128_t va0 = wasm_v128_load(a0);
90 a0 = (const float*) ((uintptr_t) a0 + k);
91
92 const v128_t vzero = wasm_f32x4_const_splat(0.0f);
93
94 const v128_t vb0123c0 = wasm_v128_load(w + 0);
95 const v128_t vb4567c0 = wasm_v128_load(w + 4);
96
97 vacc0x0123 = __builtin_wasm_fma_f32x4(vacc0x0123, wasm_v128_andnot(va0, wasm_f32x4_eq(vb0123c0, vzero)), vb0123c0);
98 vacc0x4567 = __builtin_wasm_fma_f32x4(vacc0x4567, wasm_v128_andnot(va0, wasm_f32x4_eq(vb4567c0, vzero)), vb4567c0);
99
100 va0 = wasm_v32x4_shuffle(va0, va0, 1, 2, 3, 0);
101
102 const v128_t vb0123c1 = wasm_v128_load(w + 8);
103 const v128_t vb4567c1 = wasm_v128_load(w + 12);
104
105 vacc0x0123 = __builtin_wasm_fma_f32x4(vacc0x0123, wasm_v128_andnot(va0, wasm_f32x4_eq(vb0123c1, vzero)), vb0123c1);
106 vacc0x4567 = __builtin_wasm_fma_f32x4(vacc0x4567, wasm_v128_andnot(va0, wasm_f32x4_eq(vb4567c1, vzero)), vb4567c1);
107
108 va0 = wasm_v32x4_shuffle(va0, va0, 1, 2, 3, 0);
109
110 const v128_t vb0123c2 = wasm_v128_load(w + 16);
111 const v128_t vb4567c2 = wasm_v128_load(w + 20);
112
113 vacc0x0123 = __builtin_wasm_fma_f32x4(vacc0x0123, wasm_v128_andnot(va0, wasm_f32x4_eq(vb0123c2, vzero)), vb0123c2);
114 vacc0x4567 = __builtin_wasm_fma_f32x4(vacc0x4567, wasm_v128_andnot(va0, wasm_f32x4_eq(vb4567c2, vzero)), vb4567c2);
115
116 va0 = wasm_v32x4_shuffle(va0, va0, 1, 2, 3, 0);
117
118 const v128_t vb0123c3 = wasm_v128_load(w + 24);
119 const v128_t vb4567c3 = wasm_v128_load(w + 28);
120
121 vacc0x0123 = __builtin_wasm_fma_f32x4(vacc0x0123, wasm_v128_andnot(va0, wasm_f32x4_eq(vb0123c3, vzero)), vb0123c3);
122 vacc0x4567 = __builtin_wasm_fma_f32x4(vacc0x4567, wasm_v128_andnot(va0, wasm_f32x4_eq(vb4567c3, vzero)), vb4567c3);
123
124
125 w += 32;
126 }
127
128 vacc0x0123 = __builtin_wasm_relaxed_max_f32x4(vmin, vacc0x0123);
129 vacc0x4567 = __builtin_wasm_relaxed_max_f32x4(vmin, vacc0x4567);
130
131 vacc0x0123 = __builtin_wasm_relaxed_min_f32x4(vmax, vacc0x0123);
132 vacc0x4567 = __builtin_wasm_relaxed_min_f32x4(vmax, vacc0x4567);
133
134 if XNN_LIKELY(nc >= 8) {
135 wasm_v128_store(c0, vacc0x0123);
136 wasm_v128_store(c0 + 4, vacc0x4567);
137 c0 = (float*) ((uintptr_t) c0 + cn_stride);
138
139 a0 = (const float*) ((uintptr_t) a0 - kc);
140
141 nc -= 8;
142 } else {
143 if (nc & 4) {
144 wasm_v128_store(c0, vacc0x0123);
145
146 vacc0x0123 = vacc0x4567;
147
148 c0 += 4;
149 }
150 if (nc & 2) {
151 *((double*) c0) = wasm_f64x2_extract_lane(vacc0x0123, 0);
152
153 vacc0x0123 = wasm_v32x4_shuffle(vacc0x0123, vacc0x0123, 2, 3, 2, 3);
154
155 c0 += 2;
156 }
157 if (nc & 1) {
158 *c0 = wasm_f32x4_extract_lane(vacc0x0123, 0);
159 }
160
161 nc = 0;
162 }
163 } while (nc != 0);
164 }
165