xref: /aosp_15_r20/external/XNNPACK/src/f32-gemm/gen/3x8s4-relu-wasmsimd.c (revision 4bdc94577ba0e567308109d787f7fec7b531ce36)
1 // Auto-generated file. Do not edit!
2 //   Template: src/f32-gemm/wasmsimd-s4.c.in
3 //   Generator: tools/xngen
4 //
5 // Copyright 2020 Google LLC
6 //
7 // This source code is licensed under the BSD-style license found in the
8 // LICENSE file in the root directory of this source tree.
9 
10 #include <assert.h>
11 
12 #include <wasm_simd128.h>
13 
14 #include <xnnpack/gemm.h>
15 
16 
xnn_f32_gemm_relu_ukernel_3x8s4__wasmsimd(size_t mr,size_t nc,size_t kc,const float * restrict a,size_t a_stride,const float * restrict w,float * restrict c,size_t cm_stride,size_t cn_stride,const union xnn_f32_relu_params params[restrict XNN_MIN_ELEMENTS (1)])17 void xnn_f32_gemm_relu_ukernel_3x8s4__wasmsimd(
18     size_t mr,
19     size_t nc,
20     size_t kc,
21     const float*restrict a,
22     size_t a_stride,
23     const float*restrict w,
24     float*restrict c,
25     size_t cm_stride,
26     size_t cn_stride,
27     const union xnn_f32_relu_params params[restrict XNN_MIN_ELEMENTS(1)])
28 {
29   assert(mr != 0);
30   assert(mr <= 3);
31   assert(nc != 0);
32   assert(kc != 0);
33   assert(kc % sizeof(float) == 0);
34   assert(a != NULL);
35   assert(w != NULL);
36   assert(c != NULL);
37 
38   const float* a0 = a;
39   float* c0 = c;
40   const float* a1 = (const float*) ((uintptr_t) a0 + a_stride);
41   float* c1 = (float*) ((uintptr_t) c0 + cm_stride);
42   if XNN_UNPREDICTABLE(mr < 2) {
43     a1 = a0;
44     c1 = c0;
45   }
46   const float* a2 = (const float*) ((uintptr_t) a1 + a_stride);
47   float* c2 = (float*) ((uintptr_t) c1 + cm_stride);
48   if XNN_UNPREDICTABLE(mr <= 2) {
49     a2 = a1;
50     c2 = c1;
51   }
52 
53   do {
54     v128_t vacc0x0123 = wasm_v128_load(w + 0);
55     v128_t vacc0x4567 = wasm_v128_load(w + 4);
56     v128_t vacc1x0123 = vacc0x0123;
57     v128_t vacc1x4567 = vacc0x4567;
58     v128_t vacc2x0123 = vacc0x0123;
59     v128_t vacc2x4567 = vacc0x4567;
60     w += 8;
61 
62     size_t k = kc;
63     while (k >= 4 * sizeof(float)) {
64       v128_t va0 = wasm_v128_load(a0);
65       a0 += 4;
66       v128_t va1 = wasm_v128_load(a1);
67       a1 += 4;
68       v128_t va2 = wasm_v128_load(a2);
69       a2 += 4;
70 
71 
72       const v128_t vb0123c0 = wasm_v128_load(w + 0);
73       const v128_t vb4567c0 = wasm_v128_load(w + 4);
74 
75       vacc0x0123 = wasm_f32x4_add(vacc0x0123, wasm_f32x4_mul(va0, vb0123c0));
76       vacc1x0123 = wasm_f32x4_add(vacc1x0123, wasm_f32x4_mul(va1, vb0123c0));
77       vacc2x0123 = wasm_f32x4_add(vacc2x0123, wasm_f32x4_mul(va2, vb0123c0));
78       vacc0x4567 = wasm_f32x4_add(vacc0x4567, wasm_f32x4_mul(va0, vb4567c0));
79       vacc1x4567 = wasm_f32x4_add(vacc1x4567, wasm_f32x4_mul(va1, vb4567c0));
80       vacc2x4567 = wasm_f32x4_add(vacc2x4567, wasm_f32x4_mul(va2, vb4567c0));
81 
82       va0 = wasm_v32x4_shuffle(va0, va0, 1, 2, 3, 0);
83       va1 = wasm_v32x4_shuffle(va1, va1, 1, 2, 3, 0);
84       va2 = wasm_v32x4_shuffle(va2, va2, 1, 2, 3, 0);
85 
86       const v128_t vb0123c1 = wasm_v128_load(w + 8);
87       const v128_t vb4567c1 = wasm_v128_load(w + 12);
88 
89       vacc0x0123 = wasm_f32x4_add(vacc0x0123, wasm_f32x4_mul(va0, vb0123c1));
90       vacc1x0123 = wasm_f32x4_add(vacc1x0123, wasm_f32x4_mul(va1, vb0123c1));
91       vacc2x0123 = wasm_f32x4_add(vacc2x0123, wasm_f32x4_mul(va2, vb0123c1));
92       vacc0x4567 = wasm_f32x4_add(vacc0x4567, wasm_f32x4_mul(va0, vb4567c1));
93       vacc1x4567 = wasm_f32x4_add(vacc1x4567, wasm_f32x4_mul(va1, vb4567c1));
94       vacc2x4567 = wasm_f32x4_add(vacc2x4567, wasm_f32x4_mul(va2, vb4567c1));
95 
96       va0 = wasm_v32x4_shuffle(va0, va0, 1, 2, 3, 0);
97       va1 = wasm_v32x4_shuffle(va1, va1, 1, 2, 3, 0);
98       va2 = wasm_v32x4_shuffle(va2, va2, 1, 2, 3, 0);
99 
100       const v128_t vb0123c2 = wasm_v128_load(w + 16);
101       const v128_t vb4567c2 = wasm_v128_load(w + 20);
102 
103       vacc0x0123 = wasm_f32x4_add(vacc0x0123, wasm_f32x4_mul(va0, vb0123c2));
104       vacc1x0123 = wasm_f32x4_add(vacc1x0123, wasm_f32x4_mul(va1, vb0123c2));
105       vacc2x0123 = wasm_f32x4_add(vacc2x0123, wasm_f32x4_mul(va2, vb0123c2));
106       vacc0x4567 = wasm_f32x4_add(vacc0x4567, wasm_f32x4_mul(va0, vb4567c2));
107       vacc1x4567 = wasm_f32x4_add(vacc1x4567, wasm_f32x4_mul(va1, vb4567c2));
108       vacc2x4567 = wasm_f32x4_add(vacc2x4567, wasm_f32x4_mul(va2, vb4567c2));
109 
110       va0 = wasm_v32x4_shuffle(va0, va0, 1, 2, 3, 0);
111       va1 = wasm_v32x4_shuffle(va1, va1, 1, 2, 3, 0);
112       va2 = wasm_v32x4_shuffle(va2, va2, 1, 2, 3, 0);
113 
114       const v128_t vb0123c3 = wasm_v128_load(w + 24);
115       const v128_t vb4567c3 = wasm_v128_load(w + 28);
116 
117       vacc0x0123 = wasm_f32x4_add(vacc0x0123, wasm_f32x4_mul(va0, vb0123c3));
118       vacc1x0123 = wasm_f32x4_add(vacc1x0123, wasm_f32x4_mul(va1, vb0123c3));
119       vacc2x0123 = wasm_f32x4_add(vacc2x0123, wasm_f32x4_mul(va2, vb0123c3));
120       vacc0x4567 = wasm_f32x4_add(vacc0x4567, wasm_f32x4_mul(va0, vb4567c3));
121       vacc1x4567 = wasm_f32x4_add(vacc1x4567, wasm_f32x4_mul(va1, vb4567c3));
122       vacc2x4567 = wasm_f32x4_add(vacc2x4567, wasm_f32x4_mul(va2, vb4567c3));
123 
124 
125       w += 32;
126       k -= 4 * sizeof(float);
127     }
128     if XNN_UNLIKELY(k != 0) {
129       v128_t va0 = wasm_v128_load(a0);
130       a0 = (const float*) ((uintptr_t) a0 + k);
131       v128_t va1 = wasm_v128_load(a1);
132       a1 = (const float*) ((uintptr_t) a1 + k);
133       v128_t va2 = wasm_v128_load(a2);
134       a2 = (const float*) ((uintptr_t) a2 + k);
135 
136       const v128_t vzero = wasm_f32x4_const_splat(0.0f);
137 
138       const v128_t vb0123c0 = wasm_v128_load(w + 0);
139       const v128_t vb4567c0 = wasm_v128_load(w + 4);
140 
141       vacc0x0123 = wasm_f32x4_add(vacc0x0123, wasm_f32x4_mul(wasm_v128_andnot(va0, wasm_f32x4_eq(vb0123c0, vzero)), vb0123c0));
142       vacc1x0123 = wasm_f32x4_add(vacc1x0123, wasm_f32x4_mul(wasm_v128_andnot(va1, wasm_f32x4_eq(vb0123c0, vzero)), vb0123c0));
143       vacc2x0123 = wasm_f32x4_add(vacc2x0123, wasm_f32x4_mul(wasm_v128_andnot(va2, wasm_f32x4_eq(vb0123c0, vzero)), vb0123c0));
144       vacc0x4567 = wasm_f32x4_add(vacc0x4567, wasm_f32x4_mul(wasm_v128_andnot(va0, wasm_f32x4_eq(vb4567c0, vzero)), vb4567c0));
145       vacc1x4567 = wasm_f32x4_add(vacc1x4567, wasm_f32x4_mul(wasm_v128_andnot(va1, wasm_f32x4_eq(vb4567c0, vzero)), vb4567c0));
146       vacc2x4567 = wasm_f32x4_add(vacc2x4567, wasm_f32x4_mul(wasm_v128_andnot(va2, wasm_f32x4_eq(vb4567c0, vzero)), vb4567c0));
147 
148       va0 = wasm_v32x4_shuffle(va0, va0, 1, 2, 3, 0);
149       va1 = wasm_v32x4_shuffle(va1, va1, 1, 2, 3, 0);
150       va2 = wasm_v32x4_shuffle(va2, va2, 1, 2, 3, 0);
151 
152       const v128_t vb0123c1 = wasm_v128_load(w + 8);
153       const v128_t vb4567c1 = wasm_v128_load(w + 12);
154 
155       vacc0x0123 = wasm_f32x4_add(vacc0x0123, wasm_f32x4_mul(wasm_v128_andnot(va0, wasm_f32x4_eq(vb0123c1, vzero)), vb0123c1));
156       vacc1x0123 = wasm_f32x4_add(vacc1x0123, wasm_f32x4_mul(wasm_v128_andnot(va1, wasm_f32x4_eq(vb0123c1, vzero)), vb0123c1));
157       vacc2x0123 = wasm_f32x4_add(vacc2x0123, wasm_f32x4_mul(wasm_v128_andnot(va2, wasm_f32x4_eq(vb0123c1, vzero)), vb0123c1));
158       vacc0x4567 = wasm_f32x4_add(vacc0x4567, wasm_f32x4_mul(wasm_v128_andnot(va0, wasm_f32x4_eq(vb4567c1, vzero)), vb4567c1));
159       vacc1x4567 = wasm_f32x4_add(vacc1x4567, wasm_f32x4_mul(wasm_v128_andnot(va1, wasm_f32x4_eq(vb4567c1, vzero)), vb4567c1));
160       vacc2x4567 = wasm_f32x4_add(vacc2x4567, wasm_f32x4_mul(wasm_v128_andnot(va2, wasm_f32x4_eq(vb4567c1, vzero)), vb4567c1));
161 
162       va0 = wasm_v32x4_shuffle(va0, va0, 1, 2, 3, 0);
163       va1 = wasm_v32x4_shuffle(va1, va1, 1, 2, 3, 0);
164       va2 = wasm_v32x4_shuffle(va2, va2, 1, 2, 3, 0);
165 
166       const v128_t vb0123c2 = wasm_v128_load(w + 16);
167       const v128_t vb4567c2 = wasm_v128_load(w + 20);
168 
169       vacc0x0123 = wasm_f32x4_add(vacc0x0123, wasm_f32x4_mul(wasm_v128_andnot(va0, wasm_f32x4_eq(vb0123c2, vzero)), vb0123c2));
170       vacc1x0123 = wasm_f32x4_add(vacc1x0123, wasm_f32x4_mul(wasm_v128_andnot(va1, wasm_f32x4_eq(vb0123c2, vzero)), vb0123c2));
171       vacc2x0123 = wasm_f32x4_add(vacc2x0123, wasm_f32x4_mul(wasm_v128_andnot(va2, wasm_f32x4_eq(vb0123c2, vzero)), vb0123c2));
172       vacc0x4567 = wasm_f32x4_add(vacc0x4567, wasm_f32x4_mul(wasm_v128_andnot(va0, wasm_f32x4_eq(vb4567c2, vzero)), vb4567c2));
173       vacc1x4567 = wasm_f32x4_add(vacc1x4567, wasm_f32x4_mul(wasm_v128_andnot(va1, wasm_f32x4_eq(vb4567c2, vzero)), vb4567c2));
174       vacc2x4567 = wasm_f32x4_add(vacc2x4567, wasm_f32x4_mul(wasm_v128_andnot(va2, wasm_f32x4_eq(vb4567c2, vzero)), vb4567c2));
175 
176       va0 = wasm_v32x4_shuffle(va0, va0, 1, 2, 3, 0);
177       va1 = wasm_v32x4_shuffle(va1, va1, 1, 2, 3, 0);
178       va2 = wasm_v32x4_shuffle(va2, va2, 1, 2, 3, 0);
179 
180       const v128_t vb0123c3 = wasm_v128_load(w + 24);
181       const v128_t vb4567c3 = wasm_v128_load(w + 28);
182 
183       vacc0x0123 = wasm_f32x4_add(vacc0x0123, wasm_f32x4_mul(wasm_v128_andnot(va0, wasm_f32x4_eq(vb0123c3, vzero)), vb0123c3));
184       vacc1x0123 = wasm_f32x4_add(vacc1x0123, wasm_f32x4_mul(wasm_v128_andnot(va1, wasm_f32x4_eq(vb0123c3, vzero)), vb0123c3));
185       vacc2x0123 = wasm_f32x4_add(vacc2x0123, wasm_f32x4_mul(wasm_v128_andnot(va2, wasm_f32x4_eq(vb0123c3, vzero)), vb0123c3));
186       vacc0x4567 = wasm_f32x4_add(vacc0x4567, wasm_f32x4_mul(wasm_v128_andnot(va0, wasm_f32x4_eq(vb4567c3, vzero)), vb4567c3));
187       vacc1x4567 = wasm_f32x4_add(vacc1x4567, wasm_f32x4_mul(wasm_v128_andnot(va1, wasm_f32x4_eq(vb4567c3, vzero)), vb4567c3));
188       vacc2x4567 = wasm_f32x4_add(vacc2x4567, wasm_f32x4_mul(wasm_v128_andnot(va2, wasm_f32x4_eq(vb4567c3, vzero)), vb4567c3));
189 
190 
191       w += 32;
192     }
193 
194     const v128_t vzero = wasm_i32x4_const_splat(0);
195     vacc0x0123 = wasm_i32x4_max(vacc0x0123, vzero);
196     vacc1x0123 = wasm_i32x4_max(vacc1x0123, vzero);
197     vacc2x0123 = wasm_i32x4_max(vacc2x0123, vzero);
198     vacc0x4567 = wasm_i32x4_max(vacc0x4567, vzero);
199     vacc1x4567 = wasm_i32x4_max(vacc1x4567, vzero);
200     vacc2x4567 = wasm_i32x4_max(vacc2x4567, vzero);
201 
202     if XNN_LIKELY(nc >= 8) {
203       wasm_v128_store(c2, vacc2x0123);
204       wasm_v128_store(c2 + 4, vacc2x4567);
205       c2 = (float*) ((uintptr_t) c2 + cn_stride);
206       wasm_v128_store(c1, vacc1x0123);
207       wasm_v128_store(c1 + 4, vacc1x4567);
208       c1 = (float*) ((uintptr_t) c1 + cn_stride);
209       wasm_v128_store(c0, vacc0x0123);
210       wasm_v128_store(c0 + 4, vacc0x4567);
211       c0 = (float*) ((uintptr_t) c0 + cn_stride);
212 
213       a2 = (const float*) ((uintptr_t) a2 - kc);
214       a1 = (const float*) ((uintptr_t) a1 - kc);
215       a0 = (const float*) ((uintptr_t) a0 - kc);
216 
217       nc -= 8;
218     } else {
219       if (nc & 4) {
220         wasm_v128_store(c2, vacc2x0123);
221         wasm_v128_store(c1, vacc1x0123);
222         wasm_v128_store(c0, vacc0x0123);
223 
224         vacc2x0123 = vacc2x4567;
225         vacc1x0123 = vacc1x4567;
226         vacc0x0123 = vacc0x4567;
227 
228         c2 += 4;
229         c1 += 4;
230         c0 += 4;
231       }
232       if (nc & 2) {
233         *((double*) c2) = wasm_f64x2_extract_lane(vacc2x0123, 0);
234         *((double*) c1) = wasm_f64x2_extract_lane(vacc1x0123, 0);
235         *((double*) c0) = wasm_f64x2_extract_lane(vacc0x0123, 0);
236 
237         vacc2x0123 = wasm_v32x4_shuffle(vacc2x0123, vacc2x0123, 2, 3, 2, 3);
238         vacc1x0123 = wasm_v32x4_shuffle(vacc1x0123, vacc1x0123, 2, 3, 2, 3);
239         vacc0x0123 = wasm_v32x4_shuffle(vacc0x0123, vacc0x0123, 2, 3, 2, 3);
240 
241         c2 += 2;
242         c1 += 2;
243         c0 += 2;
244       }
245       if (nc & 1) {
246         *c2 = wasm_f32x4_extract_lane(vacc2x0123, 0);
247         *c1 = wasm_f32x4_extract_lane(vacc1x0123, 0);
248         *c0 = wasm_f32x4_extract_lane(vacc0x0123, 0);
249       }
250 
251       nc = 0;
252     }
253   } while (nc != 0);
254 }
255