xref: /aosp_15_r20/external/XNNPACK/src/f32-igemm/gen/3x16s4-minmax-fma3-broadcast.c (revision 4bdc94577ba0e567308109d787f7fec7b531ce36)
1 // Auto-generated file. Do not edit!
2 //   Template: src/f32-igemm/avx-shuffle4.c.in
3 //   Generator: tools/xngen
4 //
5 // Copyright 2019 Google LLC
6 //
7 // This source code is licensed under the BSD-style license found in the
8 // LICENSE file in the root directory of this source tree.
9 
10 #include <assert.h>
11 
12 #include <immintrin.h>
13 
14 #include <xnnpack/igemm.h>
15 
16 
xnn_f32_igemm_minmax_ukernel_3x16s4__fma3_broadcast(size_t mr,size_t nc,size_t kc,size_t ks,const float ** restrict a,const float * restrict w,float * restrict c,size_t cm_stride,size_t cn_stride,size_t a_offset,const float * zero,const union xnn_f32_minmax_params params[restrict XNN_MIN_ELEMENTS (1)])17 void xnn_f32_igemm_minmax_ukernel_3x16s4__fma3_broadcast(
18     size_t mr,
19     size_t nc,
20     size_t kc,
21     size_t ks,
22     const float**restrict a,
23     const float*restrict w,
24     float*restrict c,
25     size_t cm_stride,
26     size_t cn_stride,
27     size_t a_offset,
28     const float* zero,
29     const union xnn_f32_minmax_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
30 {
31   assert(mr != 0);
32   assert(mr <= 3);
33   assert(nc != 0);
34   assert(kc != 0);
35   assert(kc % sizeof(float) == 0);
36   assert(ks != 0);
37   assert(ks % (3 * sizeof(void*)) == 0);
38   assert(a_offset % sizeof(float) == 0);
39   assert(a != NULL);
40   assert(w != NULL);
41   assert(c != NULL);
42 
43   float* c0 = c;
44   float* c1 = (float*) ((uintptr_t) c0 + cm_stride);
45   if XNN_UNPREDICTABLE(mr < 2) {
46     c1 = c0;
47   }
48   float* c2 = (float*) ((uintptr_t) c1 + cm_stride);
49   if XNN_UNPREDICTABLE(mr <= 2) {
50     c2 = c1;
51   }
52 
53   do {
54     __m256 vacc0x01234567 = _mm256_load_ps(w);
55     __m256 vacc0x89ABCDEF = _mm256_load_ps(w + 8);
56     __m256 vacc1x01234567 = vacc0x01234567;
57     __m256 vacc1x89ABCDEF = vacc0x89ABCDEF;
58     __m256 vacc2x01234567 = vacc0x01234567;
59     __m256 vacc2x89ABCDEF = vacc0x89ABCDEF;
60     w += 16;
61 
62     size_t p = ks;
63     do {
64       const float* restrict a0 = a[0];
65       assert(a0 != NULL);
66       if XNN_UNPREDICTABLE(a0 != zero) {
67         a0 = (const float*) ((uintptr_t) a0 + a_offset);
68       }
69       const float* restrict a1 = a[1];
70       assert(a1 != NULL);
71       if XNN_UNPREDICTABLE(a1 != zero) {
72         a1 = (const float*) ((uintptr_t) a1 + a_offset);
73       }
74       const float* restrict a2 = a[2];
75       assert(a2 != NULL);
76       if XNN_UNPREDICTABLE(a2 != zero) {
77         a2 = (const float*) ((uintptr_t) a2 + a_offset);
78       }
79       a += 3;
80 
81       size_t k = kc;
82       while (k >= 4 * sizeof(float)) {
83         __m256 va0 = _mm256_broadcast_ps((const __m128*) a0);
84         a0 += 4;
85         __m256 va1 = _mm256_broadcast_ps((const __m128*) a1);
86         a1 += 4;
87         __m256 va2 = _mm256_broadcast_ps((const __m128*) a2);
88         a2 += 4;
89 
90 
91         const __m256 vb01234567c0 = _mm256_load_ps(w + 0);
92         const __m256 vb89ABCDEFc0 = _mm256_load_ps(w + 8);
93 
94         vacc0x01234567 = _mm256_fmadd_ps(va0, vb01234567c0, vacc0x01234567);
95         vacc1x01234567 = _mm256_fmadd_ps(va1, vb01234567c0, vacc1x01234567);
96         vacc2x01234567 = _mm256_fmadd_ps(va2, vb01234567c0, vacc2x01234567);
97         vacc0x89ABCDEF = _mm256_fmadd_ps(va0, vb89ABCDEFc0, vacc0x89ABCDEF);
98         vacc1x89ABCDEF = _mm256_fmadd_ps(va1, vb89ABCDEFc0, vacc1x89ABCDEF);
99         vacc2x89ABCDEF = _mm256_fmadd_ps(va2, vb89ABCDEFc0, vacc2x89ABCDEF);
100 
101         va0 = _mm256_permute_ps(va0, _MM_SHUFFLE(0, 3, 2, 1));
102         va1 = _mm256_permute_ps(va1, _MM_SHUFFLE(0, 3, 2, 1));
103         va2 = _mm256_permute_ps(va2, _MM_SHUFFLE(0, 3, 2, 1));
104 
105         const __m256 vb01234567c1 = _mm256_load_ps(w + 16);
106         const __m256 vb89ABCDEFc1 = _mm256_load_ps(w + 24);
107 
108         vacc0x01234567 = _mm256_fmadd_ps(va0, vb01234567c1, vacc0x01234567);
109         vacc1x01234567 = _mm256_fmadd_ps(va1, vb01234567c1, vacc1x01234567);
110         vacc2x01234567 = _mm256_fmadd_ps(va2, vb01234567c1, vacc2x01234567);
111         vacc0x89ABCDEF = _mm256_fmadd_ps(va0, vb89ABCDEFc1, vacc0x89ABCDEF);
112         vacc1x89ABCDEF = _mm256_fmadd_ps(va1, vb89ABCDEFc1, vacc1x89ABCDEF);
113         vacc2x89ABCDEF = _mm256_fmadd_ps(va2, vb89ABCDEFc1, vacc2x89ABCDEF);
114 
115         va0 = _mm256_permute_ps(va0, _MM_SHUFFLE(0, 3, 2, 1));
116         va1 = _mm256_permute_ps(va1, _MM_SHUFFLE(0, 3, 2, 1));
117         va2 = _mm256_permute_ps(va2, _MM_SHUFFLE(0, 3, 2, 1));
118 
119         const __m256 vb01234567c2 = _mm256_load_ps(w + 32);
120         const __m256 vb89ABCDEFc2 = _mm256_load_ps(w + 40);
121 
122         vacc0x01234567 = _mm256_fmadd_ps(va0, vb01234567c2, vacc0x01234567);
123         vacc1x01234567 = _mm256_fmadd_ps(va1, vb01234567c2, vacc1x01234567);
124         vacc2x01234567 = _mm256_fmadd_ps(va2, vb01234567c2, vacc2x01234567);
125         vacc0x89ABCDEF = _mm256_fmadd_ps(va0, vb89ABCDEFc2, vacc0x89ABCDEF);
126         vacc1x89ABCDEF = _mm256_fmadd_ps(va1, vb89ABCDEFc2, vacc1x89ABCDEF);
127         vacc2x89ABCDEF = _mm256_fmadd_ps(va2, vb89ABCDEFc2, vacc2x89ABCDEF);
128 
129         va0 = _mm256_permute_ps(va0, _MM_SHUFFLE(0, 3, 2, 1));
130         va1 = _mm256_permute_ps(va1, _MM_SHUFFLE(0, 3, 2, 1));
131         va2 = _mm256_permute_ps(va2, _MM_SHUFFLE(0, 3, 2, 1));
132 
133         const __m256 vb01234567c3 = _mm256_load_ps(w + 48);
134         const __m256 vb89ABCDEFc3 = _mm256_load_ps(w + 56);
135 
136         vacc0x01234567 = _mm256_fmadd_ps(va0, vb01234567c3, vacc0x01234567);
137         vacc1x01234567 = _mm256_fmadd_ps(va1, vb01234567c3, vacc1x01234567);
138         vacc2x01234567 = _mm256_fmadd_ps(va2, vb01234567c3, vacc2x01234567);
139         vacc0x89ABCDEF = _mm256_fmadd_ps(va0, vb89ABCDEFc3, vacc0x89ABCDEF);
140         vacc1x89ABCDEF = _mm256_fmadd_ps(va1, vb89ABCDEFc3, vacc1x89ABCDEF);
141         vacc2x89ABCDEF = _mm256_fmadd_ps(va2, vb89ABCDEFc3, vacc2x89ABCDEF);
142 
143 
144         w += 64;
145         k -= 4 * sizeof(float);
146       }
147       if XNN_UNLIKELY(k != 0) {
148         __m256 va0 = _mm256_broadcast_ps((const __m128*) a0);
149         a0 = (const float*) ((uintptr_t) a0 + k);
150         __m256 va1 = _mm256_broadcast_ps((const __m128*) a1);
151         a1 = (const float*) ((uintptr_t) a1 + k);
152         __m256 va2 = _mm256_broadcast_ps((const __m128*) a2);
153         a2 = (const float*) ((uintptr_t) a2 + k);
154 
155         const __m256 vzero = _mm256_setzero_ps();
156 
157         const __m256 vb01234567c0 = _mm256_load_ps(w + 0);
158         const __m256 vb89ABCDEFc0 = _mm256_load_ps(w + 8);
159 
160         vacc0x01234567 = _mm256_fmadd_ps(_mm256_and_ps(va0, _mm256_cmp_ps(vb01234567c0, vzero, _CMP_NEQ_OQ)), vb01234567c0, vacc0x01234567);
161         vacc1x01234567 = _mm256_fmadd_ps(_mm256_and_ps(va1, _mm256_cmp_ps(vb01234567c0, vzero, _CMP_NEQ_OQ)), vb01234567c0, vacc1x01234567);
162         vacc2x01234567 = _mm256_fmadd_ps(_mm256_and_ps(va2, _mm256_cmp_ps(vb01234567c0, vzero, _CMP_NEQ_OQ)), vb01234567c0, vacc2x01234567);
163         vacc0x89ABCDEF = _mm256_fmadd_ps(_mm256_and_ps(va0, _mm256_cmp_ps(vb89ABCDEFc0, vzero, _CMP_NEQ_OQ)), vb89ABCDEFc0, vacc0x89ABCDEF);
164         vacc1x89ABCDEF = _mm256_fmadd_ps(_mm256_and_ps(va1, _mm256_cmp_ps(vb89ABCDEFc0, vzero, _CMP_NEQ_OQ)), vb89ABCDEFc0, vacc1x89ABCDEF);
165         vacc2x89ABCDEF = _mm256_fmadd_ps(_mm256_and_ps(va2, _mm256_cmp_ps(vb89ABCDEFc0, vzero, _CMP_NEQ_OQ)), vb89ABCDEFc0, vacc2x89ABCDEF);
166 
167         va0 = _mm256_permute_ps(va0, _MM_SHUFFLE(0, 3, 2, 1));
168         va1 = _mm256_permute_ps(va1, _MM_SHUFFLE(0, 3, 2, 1));
169         va2 = _mm256_permute_ps(va2, _MM_SHUFFLE(0, 3, 2, 1));
170 
171         const __m256 vb01234567c1 = _mm256_load_ps(w + 16);
172         const __m256 vb89ABCDEFc1 = _mm256_load_ps(w + 24);
173 
174         vacc0x01234567 = _mm256_fmadd_ps(_mm256_and_ps(va0, _mm256_cmp_ps(vb01234567c1, vzero, _CMP_NEQ_OQ)), vb01234567c1, vacc0x01234567);
175         vacc1x01234567 = _mm256_fmadd_ps(_mm256_and_ps(va1, _mm256_cmp_ps(vb01234567c1, vzero, _CMP_NEQ_OQ)), vb01234567c1, vacc1x01234567);
176         vacc2x01234567 = _mm256_fmadd_ps(_mm256_and_ps(va2, _mm256_cmp_ps(vb01234567c1, vzero, _CMP_NEQ_OQ)), vb01234567c1, vacc2x01234567);
177         vacc0x89ABCDEF = _mm256_fmadd_ps(_mm256_and_ps(va0, _mm256_cmp_ps(vb89ABCDEFc1, vzero, _CMP_NEQ_OQ)), vb89ABCDEFc1, vacc0x89ABCDEF);
178         vacc1x89ABCDEF = _mm256_fmadd_ps(_mm256_and_ps(va1, _mm256_cmp_ps(vb89ABCDEFc1, vzero, _CMP_NEQ_OQ)), vb89ABCDEFc1, vacc1x89ABCDEF);
179         vacc2x89ABCDEF = _mm256_fmadd_ps(_mm256_and_ps(va2, _mm256_cmp_ps(vb89ABCDEFc1, vzero, _CMP_NEQ_OQ)), vb89ABCDEFc1, vacc2x89ABCDEF);
180 
181         va0 = _mm256_permute_ps(va0, _MM_SHUFFLE(0, 3, 2, 1));
182         va1 = _mm256_permute_ps(va1, _MM_SHUFFLE(0, 3, 2, 1));
183         va2 = _mm256_permute_ps(va2, _MM_SHUFFLE(0, 3, 2, 1));
184 
185         const __m256 vb01234567c2 = _mm256_load_ps(w + 32);
186         const __m256 vb89ABCDEFc2 = _mm256_load_ps(w + 40);
187 
188         vacc0x01234567 = _mm256_fmadd_ps(_mm256_and_ps(va0, _mm256_cmp_ps(vb01234567c2, vzero, _CMP_NEQ_OQ)), vb01234567c2, vacc0x01234567);
189         vacc1x01234567 = _mm256_fmadd_ps(_mm256_and_ps(va1, _mm256_cmp_ps(vb01234567c2, vzero, _CMP_NEQ_OQ)), vb01234567c2, vacc1x01234567);
190         vacc2x01234567 = _mm256_fmadd_ps(_mm256_and_ps(va2, _mm256_cmp_ps(vb01234567c2, vzero, _CMP_NEQ_OQ)), vb01234567c2, vacc2x01234567);
191         vacc0x89ABCDEF = _mm256_fmadd_ps(_mm256_and_ps(va0, _mm256_cmp_ps(vb89ABCDEFc2, vzero, _CMP_NEQ_OQ)), vb89ABCDEFc2, vacc0x89ABCDEF);
192         vacc1x89ABCDEF = _mm256_fmadd_ps(_mm256_and_ps(va1, _mm256_cmp_ps(vb89ABCDEFc2, vzero, _CMP_NEQ_OQ)), vb89ABCDEFc2, vacc1x89ABCDEF);
193         vacc2x89ABCDEF = _mm256_fmadd_ps(_mm256_and_ps(va2, _mm256_cmp_ps(vb89ABCDEFc2, vzero, _CMP_NEQ_OQ)), vb89ABCDEFc2, vacc2x89ABCDEF);
194 
195         va0 = _mm256_permute_ps(va0, _MM_SHUFFLE(0, 3, 2, 1));
196         va1 = _mm256_permute_ps(va1, _MM_SHUFFLE(0, 3, 2, 1));
197         va2 = _mm256_permute_ps(va2, _MM_SHUFFLE(0, 3, 2, 1));
198 
199         const __m256 vb01234567c3 = _mm256_load_ps(w + 48);
200         const __m256 vb89ABCDEFc3 = _mm256_load_ps(w + 56);
201 
202         vacc0x01234567 = _mm256_fmadd_ps(_mm256_and_ps(va0, _mm256_cmp_ps(vb01234567c3, vzero, _CMP_NEQ_OQ)), vb01234567c3, vacc0x01234567);
203         vacc1x01234567 = _mm256_fmadd_ps(_mm256_and_ps(va1, _mm256_cmp_ps(vb01234567c3, vzero, _CMP_NEQ_OQ)), vb01234567c3, vacc1x01234567);
204         vacc2x01234567 = _mm256_fmadd_ps(_mm256_and_ps(va2, _mm256_cmp_ps(vb01234567c3, vzero, _CMP_NEQ_OQ)), vb01234567c3, vacc2x01234567);
205         vacc0x89ABCDEF = _mm256_fmadd_ps(_mm256_and_ps(va0, _mm256_cmp_ps(vb89ABCDEFc3, vzero, _CMP_NEQ_OQ)), vb89ABCDEFc3, vacc0x89ABCDEF);
206         vacc1x89ABCDEF = _mm256_fmadd_ps(_mm256_and_ps(va1, _mm256_cmp_ps(vb89ABCDEFc3, vzero, _CMP_NEQ_OQ)), vb89ABCDEFc3, vacc1x89ABCDEF);
207         vacc2x89ABCDEF = _mm256_fmadd_ps(_mm256_and_ps(va2, _mm256_cmp_ps(vb89ABCDEFc3, vzero, _CMP_NEQ_OQ)), vb89ABCDEFc3, vacc2x89ABCDEF);
208 
209 
210         w += 64;
211       }
212       p -= 3 * sizeof(void*);
213     } while (p != 0);
214 
215     const __m256 vmin = _mm256_load_ps(params->avx.min);
216     vacc0x01234567 = _mm256_max_ps(vacc0x01234567, vmin);
217     vacc1x01234567 = _mm256_max_ps(vacc1x01234567, vmin);
218     vacc2x01234567 = _mm256_max_ps(vacc2x01234567, vmin);
219     vacc0x89ABCDEF = _mm256_max_ps(vacc0x89ABCDEF, vmin);
220     vacc1x89ABCDEF = _mm256_max_ps(vacc1x89ABCDEF, vmin);
221     vacc2x89ABCDEF = _mm256_max_ps(vacc2x89ABCDEF, vmin);
222 
223     const __m256 vmax = _mm256_load_ps(params->avx.max);
224     vacc0x01234567 = _mm256_min_ps(vacc0x01234567, vmax);
225     vacc1x01234567 = _mm256_min_ps(vacc1x01234567, vmax);
226     vacc2x01234567 = _mm256_min_ps(vacc2x01234567, vmax);
227     vacc0x89ABCDEF = _mm256_min_ps(vacc0x89ABCDEF, vmax);
228     vacc1x89ABCDEF = _mm256_min_ps(vacc1x89ABCDEF, vmax);
229     vacc2x89ABCDEF = _mm256_min_ps(vacc2x89ABCDEF, vmax);
230 
231     if XNN_LIKELY(nc >= 16) {
232       _mm256_storeu_ps(c2, vacc2x01234567);
233       _mm256_storeu_ps(c2 + 8, vacc2x89ABCDEF);
234       c2 = (float*) ((uintptr_t) c2 + cn_stride);
235       _mm256_storeu_ps(c1, vacc1x01234567);
236       _mm256_storeu_ps(c1 + 8, vacc1x89ABCDEF);
237       c1 = (float*) ((uintptr_t) c1 + cn_stride);
238       _mm256_storeu_ps(c0, vacc0x01234567);
239       _mm256_storeu_ps(c0 + 8, vacc0x89ABCDEF);
240       c0 = (float*) ((uintptr_t) c0 + cn_stride);
241 
242       a = (const float**restrict) ((uintptr_t) a - ks);
243       nc -= 16;
244     } else {
245       if (nc & 8) {
246         _mm256_storeu_ps(c2, vacc2x01234567);
247         _mm256_storeu_ps(c1, vacc1x01234567);
248         _mm256_storeu_ps(c0, vacc0x01234567);
249 
250         vacc2x01234567 = vacc2x89ABCDEF;
251         vacc1x01234567 = vacc1x89ABCDEF;
252         vacc0x01234567 = vacc0x89ABCDEF;
253 
254         c2 += 8;
255         c1 += 8;
256         c0 += 8;
257       }
258       __m128 vacc2x0123 = _mm256_castps256_ps128(vacc2x01234567);
259       __m128 vacc1x0123 = _mm256_castps256_ps128(vacc1x01234567);
260       __m128 vacc0x0123 = _mm256_castps256_ps128(vacc0x01234567);
261       if (nc & 4) {
262         _mm_storeu_ps(c2, vacc2x0123);
263         _mm_storeu_ps(c1, vacc1x0123);
264         _mm_storeu_ps(c0, vacc0x0123);
265 
266         vacc2x0123 = _mm256_extractf128_ps(vacc2x01234567, 1);
267         vacc1x0123 = _mm256_extractf128_ps(vacc1x01234567, 1);
268         vacc0x0123 = _mm256_extractf128_ps(vacc0x01234567, 1);
269 
270         c2 += 4;
271         c1 += 4;
272         c0 += 4;
273       }
274       if (nc & 2) {
275         _mm_storel_pi((__m64*) c2, vacc2x0123);
276         _mm_storel_pi((__m64*) c1, vacc1x0123);
277         _mm_storel_pi((__m64*) c0, vacc0x0123);
278 
279         vacc2x0123 = _mm_movehl_ps(vacc2x0123, vacc2x0123);
280         vacc1x0123 = _mm_movehl_ps(vacc1x0123, vacc1x0123);
281         vacc0x0123 = _mm_movehl_ps(vacc0x0123, vacc0x0123);
282 
283         c2 += 2;
284         c1 += 2;
285         c0 += 2;
286       }
287       if (nc & 1) {
288         _mm_store_ss(c2, vacc2x0123);
289         _mm_store_ss(c1, vacc1x0123);
290         _mm_store_ss(c0, vacc0x0123);
291       }
292 
293       nc = 0;
294     }
295   } while (nc != 0);
296 }
297