xref: /aosp_15_r20/external/XNNPACK/src/f32-igemm/gen/3x8-relu-wasmsimd-splat.c (revision 4bdc94577ba0e567308109d787f7fec7b531ce36)
1 // Auto-generated file. Do not edit!
2 //   Template: src/f32-igemm/wasmsimd-splat.c.in
3 //   Generator: tools/xngen
4 //
5 // Copyright 2020 Google LLC
6 //
7 // This source code is licensed under the BSD-style license found in the
8 // LICENSE file in the root directory of this source tree.
9 
10 #include <assert.h>
11 
12 #include <wasm_simd128.h>
13 
14 #include <xnnpack/igemm.h>
15 
16 
xnn_f32_igemm_relu_ukernel_3x8__wasmsimd_splat(size_t mr,size_t nc,size_t kc,size_t ks,const float ** restrict a,const float * restrict w,float * restrict c,size_t cm_stride,size_t cn_stride,size_t a_offset,const float * zero,const union xnn_f32_relu_params params[restrict XNN_MIN_ELEMENTS (1)])17 void xnn_f32_igemm_relu_ukernel_3x8__wasmsimd_splat(
18     size_t mr,
19     size_t nc,
20     size_t kc,
21     size_t ks,
22     const float**restrict a,
23     const float*restrict w,
24     float*restrict c,
25     size_t cm_stride,
26     size_t cn_stride,
27     size_t a_offset,
28     const float* zero,
29     const union xnn_f32_relu_params params[restrict XNN_MIN_ELEMENTS(1)])
30 {
31   assert(mr != 0);
32   assert(mr <= 3);
33   assert(nc != 0);
34   assert(kc != 0);
35   assert(kc % sizeof(float) == 0);
36   assert(ks != 0);
37   assert(ks % (3 * sizeof(void*)) == 0);
38   assert(a_offset % sizeof(float) == 0);
39   assert(a != NULL);
40   assert(w != NULL);
41   assert(c != NULL);
42 
43   float* c0 = c;
44   float* c1 = (float*) ((uintptr_t) c0 + cm_stride);
45   if XNN_UNPREDICTABLE(mr < 2) {
46     c1 = c0;
47   }
48   float* c2 = (float*) ((uintptr_t) c1 + cm_stride);
49   if XNN_UNPREDICTABLE(mr <= 2) {
50     c2 = c1;
51   }
52 
53   do {
54     v128_t vacc0x0123 = wasm_v128_load(w);
55     v128_t vacc0x4567 = wasm_v128_load(w + 4);
56     v128_t vacc1x0123 = vacc0x0123;
57     v128_t vacc1x4567 = vacc0x4567;
58     v128_t vacc2x0123 = vacc0x0123;
59     v128_t vacc2x4567 = vacc0x4567;
60     w += 8;
61 
62     size_t p = ks;
63     do {
64       const float* restrict a0 = a[0];
65       assert(a0 != NULL);
66       if XNN_UNPREDICTABLE(a0 != zero) {
67         a0 = (const float*) ((uintptr_t) a0 + a_offset);
68       }
69       const float* restrict a1 = a[1];
70       assert(a1 != NULL);
71       if XNN_UNPREDICTABLE(a1 != zero) {
72         a1 = (const float*) ((uintptr_t) a1 + a_offset);
73       }
74       const float* restrict a2 = a[2];
75       assert(a2 != NULL);
76       if XNN_UNPREDICTABLE(a2 != zero) {
77         a2 = (const float*) ((uintptr_t) a2 + a_offset);
78       }
79       a += 3;
80 
81       size_t k = kc;
82       while (k >= 4 * sizeof(float)) {
83         const v128_t va0 = wasm_v128_load(a0);
84         a0 += 4;
85         const v128_t va1 = wasm_v128_load(a1);
86         a1 += 4;
87         const v128_t va2 = wasm_v128_load(a2);
88         a2 += 4;
89 
90         const v128_t va0c0 = wasm_v32x4_shuffle(va0, va0, 0, 0, 0, 0);
91         const v128_t va1c0 = wasm_v32x4_shuffle(va1, va1, 0, 0, 0, 0);
92         const v128_t va2c0 = wasm_v32x4_shuffle(va2, va2, 0, 0, 0, 0);
93 
94         const v128_t vb0123c0 = wasm_v128_load(w + 0);
95         const v128_t vb4567c0 = wasm_v128_load(w + 4);
96 
97         vacc0x0123 = wasm_f32x4_add(vacc0x0123, wasm_f32x4_mul(va0c0, vb0123c0));
98         vacc1x0123 = wasm_f32x4_add(vacc1x0123, wasm_f32x4_mul(va1c0, vb0123c0));
99         vacc2x0123 = wasm_f32x4_add(vacc2x0123, wasm_f32x4_mul(va2c0, vb0123c0));
100         vacc0x4567 = wasm_f32x4_add(vacc0x4567, wasm_f32x4_mul(va0c0, vb4567c0));
101         vacc1x4567 = wasm_f32x4_add(vacc1x4567, wasm_f32x4_mul(va1c0, vb4567c0));
102         vacc2x4567 = wasm_f32x4_add(vacc2x4567, wasm_f32x4_mul(va2c0, vb4567c0));
103         const v128_t va0c1 = wasm_v32x4_shuffle(va0, va0, 1, 1, 1, 1);
104         const v128_t va1c1 = wasm_v32x4_shuffle(va1, va1, 1, 1, 1, 1);
105         const v128_t va2c1 = wasm_v32x4_shuffle(va2, va2, 1, 1, 1, 1);
106 
107         const v128_t vb0123c1 = wasm_v128_load(w + 8);
108         const v128_t vb4567c1 = wasm_v128_load(w + 12);
109 
110         vacc0x0123 = wasm_f32x4_add(vacc0x0123, wasm_f32x4_mul(va0c1, vb0123c1));
111         vacc1x0123 = wasm_f32x4_add(vacc1x0123, wasm_f32x4_mul(va1c1, vb0123c1));
112         vacc2x0123 = wasm_f32x4_add(vacc2x0123, wasm_f32x4_mul(va2c1, vb0123c1));
113         vacc0x4567 = wasm_f32x4_add(vacc0x4567, wasm_f32x4_mul(va0c1, vb4567c1));
114         vacc1x4567 = wasm_f32x4_add(vacc1x4567, wasm_f32x4_mul(va1c1, vb4567c1));
115         vacc2x4567 = wasm_f32x4_add(vacc2x4567, wasm_f32x4_mul(va2c1, vb4567c1));
116         const v128_t va0c2 = wasm_v32x4_shuffle(va0, va0, 2, 2, 2, 2);
117         const v128_t va1c2 = wasm_v32x4_shuffle(va1, va1, 2, 2, 2, 2);
118         const v128_t va2c2 = wasm_v32x4_shuffle(va2, va2, 2, 2, 2, 2);
119 
120         const v128_t vb0123c2 = wasm_v128_load(w + 16);
121         const v128_t vb4567c2 = wasm_v128_load(w + 20);
122 
123         vacc0x0123 = wasm_f32x4_add(vacc0x0123, wasm_f32x4_mul(va0c2, vb0123c2));
124         vacc1x0123 = wasm_f32x4_add(vacc1x0123, wasm_f32x4_mul(va1c2, vb0123c2));
125         vacc2x0123 = wasm_f32x4_add(vacc2x0123, wasm_f32x4_mul(va2c2, vb0123c2));
126         vacc0x4567 = wasm_f32x4_add(vacc0x4567, wasm_f32x4_mul(va0c2, vb4567c2));
127         vacc1x4567 = wasm_f32x4_add(vacc1x4567, wasm_f32x4_mul(va1c2, vb4567c2));
128         vacc2x4567 = wasm_f32x4_add(vacc2x4567, wasm_f32x4_mul(va2c2, vb4567c2));
129         const v128_t va0c3 = wasm_v32x4_shuffle(va0, va0, 3, 3, 3, 3);
130         const v128_t va1c3 = wasm_v32x4_shuffle(va1, va1, 3, 3, 3, 3);
131         const v128_t va2c3 = wasm_v32x4_shuffle(va2, va2, 3, 3, 3, 3);
132 
133         const v128_t vb0123c3 = wasm_v128_load(w + 24);
134         const v128_t vb4567c3 = wasm_v128_load(w + 28);
135 
136         vacc0x0123 = wasm_f32x4_add(vacc0x0123, wasm_f32x4_mul(va0c3, vb0123c3));
137         vacc1x0123 = wasm_f32x4_add(vacc1x0123, wasm_f32x4_mul(va1c3, vb0123c3));
138         vacc2x0123 = wasm_f32x4_add(vacc2x0123, wasm_f32x4_mul(va2c3, vb0123c3));
139         vacc0x4567 = wasm_f32x4_add(vacc0x4567, wasm_f32x4_mul(va0c3, vb4567c3));
140         vacc1x4567 = wasm_f32x4_add(vacc1x4567, wasm_f32x4_mul(va1c3, vb4567c3));
141         vacc2x4567 = wasm_f32x4_add(vacc2x4567, wasm_f32x4_mul(va2c3, vb4567c3));
142 
143         w += 32;
144         k -= 4 * sizeof(float);
145       }
146       if XNN_UNLIKELY(k != 0) {
147         do {
148           const v128_t vb0123 = wasm_v128_load(w);
149           const v128_t vb4567 = wasm_v128_load(w + 4);
150           w += 8;
151 
152           const v128_t va0 = wasm_v128_load32_splat(a0);
153           a0 += 1;
154           const v128_t va1 = wasm_v128_load32_splat(a1);
155           a1 += 1;
156           const v128_t va2 = wasm_v128_load32_splat(a2);
157           a2 += 1;
158 
159           vacc0x0123 = wasm_f32x4_add(vacc0x0123, wasm_f32x4_mul(va0, vb0123));
160           vacc0x4567 = wasm_f32x4_add(vacc0x4567, wasm_f32x4_mul(va0, vb4567));
161           vacc1x0123 = wasm_f32x4_add(vacc1x0123, wasm_f32x4_mul(va1, vb0123));
162           vacc1x4567 = wasm_f32x4_add(vacc1x4567, wasm_f32x4_mul(va1, vb4567));
163           vacc2x0123 = wasm_f32x4_add(vacc2x0123, wasm_f32x4_mul(va2, vb0123));
164           vacc2x4567 = wasm_f32x4_add(vacc2x4567, wasm_f32x4_mul(va2, vb4567));
165           k -= sizeof(float);
166         } while (k != 0);
167       }
168       p -= 3 * sizeof(void*);
169     } while (p != 0);
170 
171     const v128_t vzero = wasm_i32x4_const_splat(0);
172     vacc0x0123 = wasm_i32x4_max(vacc0x0123, vzero);
173     vacc1x0123 = wasm_i32x4_max(vacc1x0123, vzero);
174     vacc2x0123 = wasm_i32x4_max(vacc2x0123, vzero);
175     vacc0x4567 = wasm_i32x4_max(vacc0x4567, vzero);
176     vacc1x4567 = wasm_i32x4_max(vacc1x4567, vzero);
177     vacc2x4567 = wasm_i32x4_max(vacc2x4567, vzero);
178 
179     if XNN_LIKELY(nc >= 8) {
180       wasm_v128_store(c2, vacc2x0123);
181       wasm_v128_store(c2 + 4, vacc2x4567);
182       c2 = (float*) ((uintptr_t) c2 + cn_stride);
183       wasm_v128_store(c1, vacc1x0123);
184       wasm_v128_store(c1 + 4, vacc1x4567);
185       c1 = (float*) ((uintptr_t) c1 + cn_stride);
186       wasm_v128_store(c0, vacc0x0123);
187       wasm_v128_store(c0 + 4, vacc0x4567);
188       c0 = (float*) ((uintptr_t) c0 + cn_stride);
189 
190       a = (const float**restrict) ((uintptr_t) a - ks);
191       nc -= 8;
192     } else {
193       if (nc & 4) {
194         wasm_v128_store(c2, vacc2x0123);
195         wasm_v128_store(c1, vacc1x0123);
196         wasm_v128_store(c0, vacc0x0123);
197 
198         vacc2x0123 = vacc2x4567;
199         vacc1x0123 = vacc1x4567;
200         vacc0x0123 = vacc0x4567;
201 
202         c2 += 4;
203         c1 += 4;
204         c0 += 4;
205       }
206       if (nc & 2) {
207         *((double*) c2) = wasm_f64x2_extract_lane(vacc2x0123, 0);
208         *((double*) c1) = wasm_f64x2_extract_lane(vacc1x0123, 0);
209         *((double*) c0) = wasm_f64x2_extract_lane(vacc0x0123, 0);
210 
211         vacc2x0123 = wasm_v32x4_shuffle(vacc2x0123, vacc2x0123, 2, 3, 2, 3);
212         vacc1x0123 = wasm_v32x4_shuffle(vacc1x0123, vacc1x0123, 2, 3, 2, 3);
213         vacc0x0123 = wasm_v32x4_shuffle(vacc0x0123, vacc0x0123, 2, 3, 2, 3);
214 
215         c2 += 2;
216         c1 += 2;
217         c0 += 2;
218       }
219       if (nc & 1) {
220         *c2 = wasm_f32x4_extract_lane(vacc2x0123, 0);
221         *c1 = wasm_f32x4_extract_lane(vacc1x0123, 0);
222         *c0 = wasm_f32x4_extract_lane(vacc0x0123, 0);
223       }
224 
225       nc = 0;
226     }
227   } while (nc != 0);
228 }
229