xref: /aosp_15_r20/external/XNNPACK/src/f32-gemm/gen-inc/3x16s4inc-minmax-fma3-broadcast.c (revision 4bdc94577ba0e567308109d787f7fec7b531ce36)
1 // Auto-generated file. Do not edit!
2 //   Template: src/f32-gemm/avx-shuffle4.c.in
3 //   Generator: tools/xngen
4 //
5 // Copyright 2019 Google LLC
6 //
7 // This source code is licensed under the BSD-style license found in the
8 // LICENSE file in the root directory of this source tree.
9 
10 #include <assert.h>
11 
12 #include <immintrin.h>
13 
14 #include <xnnpack/gemm.h>
15 
16 
xnn_f32_gemminc_minmax_ukernel_3x16s4__fma3_broadcast(size_t mr,size_t nc,size_t kc,const float * restrict a,size_t a_stride,const float * restrict w,float * restrict c,size_t cm_stride,size_t cn_stride,const float * restrict acc,const union xnn_f32_minmax_params params[restrict XNN_MIN_ELEMENTS (1)])17 void xnn_f32_gemminc_minmax_ukernel_3x16s4__fma3_broadcast(
18     size_t mr,
19     size_t nc,
20     size_t kc,
21     const float*restrict a,
22     size_t a_stride,
23     const float*restrict w,
24     float*restrict c,
25     size_t cm_stride,
26     size_t cn_stride,
27     const float*restrict acc,
28     const union xnn_f32_minmax_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
29 {
30   assert(mr != 0);
31   assert(mr <= 3);
32   assert(nc != 0);
33   assert(kc != 0);
34   assert(kc % sizeof(float) == 0);
35   assert(a != NULL);
36   assert(w != NULL);
37   assert(c != NULL);
38   assert(acc != NULL);
39 
40   const float* a0 = a;
41   float* c0 = c;
42   const float* a1 = (const float*) ((uintptr_t) a0 + a_stride);
43   float* c1 = (float*) ((uintptr_t) c0 + cm_stride);
44   if XNN_UNPREDICTABLE(mr < 2) {
45     a1 = a0;
46     c1 = c0;
47   }
48   const float* a2 = (const float*) ((uintptr_t) a1 + a_stride);
49   float* c2 = (float*) ((uintptr_t) c1 + cm_stride);
50   if XNN_UNPREDICTABLE(mr <= 2) {
51     a2 = a1;
52     c2 = c1;
53   }
54 
55   do {
56     __m256 vacc0x01234567 = _mm256_load_ps(acc + 0);
57     __m256 vacc0x89ABCDEF = _mm256_load_ps(acc + 8);
58     __m256 vacc1x01234567 = _mm256_load_ps(acc + 16);
59     __m256 vacc1x89ABCDEF = _mm256_load_ps(acc + 24);
60     __m256 vacc2x01234567 = _mm256_load_ps(acc + 32);
61     __m256 vacc2x89ABCDEF = _mm256_load_ps(acc + 40);
62     acc += 48;
63 
64     size_t k = kc;
65     while (k >= 4 * sizeof(float)) {
66       __m256 va0 = _mm256_broadcast_ps((const __m128*) a0);
67       a0 += 4;
68       __m256 va1 = _mm256_broadcast_ps((const __m128*) a1);
69       a1 += 4;
70       __m256 va2 = _mm256_broadcast_ps((const __m128*) a2);
71       a2 += 4;
72 
73 
74       const __m256 vb01234567c0 = _mm256_load_ps(w + 0);
75       const __m256 vb89ABCDEFc0 = _mm256_load_ps(w + 8);
76 
77       vacc0x01234567 = _mm256_fmadd_ps(va0, vb01234567c0, vacc0x01234567);
78       vacc1x01234567 = _mm256_fmadd_ps(va1, vb01234567c0, vacc1x01234567);
79       vacc2x01234567 = _mm256_fmadd_ps(va2, vb01234567c0, vacc2x01234567);
80       vacc0x89ABCDEF = _mm256_fmadd_ps(va0, vb89ABCDEFc0, vacc0x89ABCDEF);
81       vacc1x89ABCDEF = _mm256_fmadd_ps(va1, vb89ABCDEFc0, vacc1x89ABCDEF);
82       vacc2x89ABCDEF = _mm256_fmadd_ps(va2, vb89ABCDEFc0, vacc2x89ABCDEF);
83 
84       va0 = _mm256_permute_ps(va0, _MM_SHUFFLE(0, 3, 2, 1));
85       va1 = _mm256_permute_ps(va1, _MM_SHUFFLE(0, 3, 2, 1));
86       va2 = _mm256_permute_ps(va2, _MM_SHUFFLE(0, 3, 2, 1));
87 
88       const __m256 vb01234567c1 = _mm256_load_ps(w + 16);
89       const __m256 vb89ABCDEFc1 = _mm256_load_ps(w + 24);
90 
91       vacc0x01234567 = _mm256_fmadd_ps(va0, vb01234567c1, vacc0x01234567);
92       vacc1x01234567 = _mm256_fmadd_ps(va1, vb01234567c1, vacc1x01234567);
93       vacc2x01234567 = _mm256_fmadd_ps(va2, vb01234567c1, vacc2x01234567);
94       vacc0x89ABCDEF = _mm256_fmadd_ps(va0, vb89ABCDEFc1, vacc0x89ABCDEF);
95       vacc1x89ABCDEF = _mm256_fmadd_ps(va1, vb89ABCDEFc1, vacc1x89ABCDEF);
96       vacc2x89ABCDEF = _mm256_fmadd_ps(va2, vb89ABCDEFc1, vacc2x89ABCDEF);
97 
98       va0 = _mm256_permute_ps(va0, _MM_SHUFFLE(0, 3, 2, 1));
99       va1 = _mm256_permute_ps(va1, _MM_SHUFFLE(0, 3, 2, 1));
100       va2 = _mm256_permute_ps(va2, _MM_SHUFFLE(0, 3, 2, 1));
101 
102       const __m256 vb01234567c2 = _mm256_load_ps(w + 32);
103       const __m256 vb89ABCDEFc2 = _mm256_load_ps(w + 40);
104 
105       vacc0x01234567 = _mm256_fmadd_ps(va0, vb01234567c2, vacc0x01234567);
106       vacc1x01234567 = _mm256_fmadd_ps(va1, vb01234567c2, vacc1x01234567);
107       vacc2x01234567 = _mm256_fmadd_ps(va2, vb01234567c2, vacc2x01234567);
108       vacc0x89ABCDEF = _mm256_fmadd_ps(va0, vb89ABCDEFc2, vacc0x89ABCDEF);
109       vacc1x89ABCDEF = _mm256_fmadd_ps(va1, vb89ABCDEFc2, vacc1x89ABCDEF);
110       vacc2x89ABCDEF = _mm256_fmadd_ps(va2, vb89ABCDEFc2, vacc2x89ABCDEF);
111 
112       va0 = _mm256_permute_ps(va0, _MM_SHUFFLE(0, 3, 2, 1));
113       va1 = _mm256_permute_ps(va1, _MM_SHUFFLE(0, 3, 2, 1));
114       va2 = _mm256_permute_ps(va2, _MM_SHUFFLE(0, 3, 2, 1));
115 
116       const __m256 vb01234567c3 = _mm256_load_ps(w + 48);
117       const __m256 vb89ABCDEFc3 = _mm256_load_ps(w + 56);
118 
119       vacc0x01234567 = _mm256_fmadd_ps(va0, vb01234567c3, vacc0x01234567);
120       vacc1x01234567 = _mm256_fmadd_ps(va1, vb01234567c3, vacc1x01234567);
121       vacc2x01234567 = _mm256_fmadd_ps(va2, vb01234567c3, vacc2x01234567);
122       vacc0x89ABCDEF = _mm256_fmadd_ps(va0, vb89ABCDEFc3, vacc0x89ABCDEF);
123       vacc1x89ABCDEF = _mm256_fmadd_ps(va1, vb89ABCDEFc3, vacc1x89ABCDEF);
124       vacc2x89ABCDEF = _mm256_fmadd_ps(va2, vb89ABCDEFc3, vacc2x89ABCDEF);
125 
126 
127       w += 64;
128       k -= 4 * sizeof(float);
129     }
130     if XNN_UNLIKELY(k != 0) {
131       __m256 va0 = _mm256_broadcast_ps((const __m128*) a0);
132       a0 = (const float*) ((uintptr_t) a0 + k);
133       __m256 va1 = _mm256_broadcast_ps((const __m128*) a1);
134       a1 = (const float*) ((uintptr_t) a1 + k);
135       __m256 va2 = _mm256_broadcast_ps((const __m128*) a2);
136       a2 = (const float*) ((uintptr_t) a2 + k);
137 
138       const __m256 vzero = _mm256_setzero_ps();
139 
140       const __m256 vb01234567c0 = _mm256_load_ps(w + 0);
141       const __m256 vb89ABCDEFc0 = _mm256_load_ps(w + 8);
142 
143       vacc0x01234567 = _mm256_fmadd_ps(_mm256_and_ps(va0, _mm256_cmp_ps(vb01234567c0, vzero, _CMP_NEQ_OQ)), vb01234567c0, vacc0x01234567);
144       vacc1x01234567 = _mm256_fmadd_ps(_mm256_and_ps(va1, _mm256_cmp_ps(vb01234567c0, vzero, _CMP_NEQ_OQ)), vb01234567c0, vacc1x01234567);
145       vacc2x01234567 = _mm256_fmadd_ps(_mm256_and_ps(va2, _mm256_cmp_ps(vb01234567c0, vzero, _CMP_NEQ_OQ)), vb01234567c0, vacc2x01234567);
146       vacc0x89ABCDEF = _mm256_fmadd_ps(_mm256_and_ps(va0, _mm256_cmp_ps(vb89ABCDEFc0, vzero, _CMP_NEQ_OQ)), vb89ABCDEFc0, vacc0x89ABCDEF);
147       vacc1x89ABCDEF = _mm256_fmadd_ps(_mm256_and_ps(va1, _mm256_cmp_ps(vb89ABCDEFc0, vzero, _CMP_NEQ_OQ)), vb89ABCDEFc0, vacc1x89ABCDEF);
148       vacc2x89ABCDEF = _mm256_fmadd_ps(_mm256_and_ps(va2, _mm256_cmp_ps(vb89ABCDEFc0, vzero, _CMP_NEQ_OQ)), vb89ABCDEFc0, vacc2x89ABCDEF);
149 
150       va0 = _mm256_permute_ps(va0, _MM_SHUFFLE(0, 3, 2, 1));
151       va1 = _mm256_permute_ps(va1, _MM_SHUFFLE(0, 3, 2, 1));
152       va2 = _mm256_permute_ps(va2, _MM_SHUFFLE(0, 3, 2, 1));
153 
154       const __m256 vb01234567c1 = _mm256_load_ps(w + 16);
155       const __m256 vb89ABCDEFc1 = _mm256_load_ps(w + 24);
156 
157       vacc0x01234567 = _mm256_fmadd_ps(_mm256_and_ps(va0, _mm256_cmp_ps(vb01234567c1, vzero, _CMP_NEQ_OQ)), vb01234567c1, vacc0x01234567);
158       vacc1x01234567 = _mm256_fmadd_ps(_mm256_and_ps(va1, _mm256_cmp_ps(vb01234567c1, vzero, _CMP_NEQ_OQ)), vb01234567c1, vacc1x01234567);
159       vacc2x01234567 = _mm256_fmadd_ps(_mm256_and_ps(va2, _mm256_cmp_ps(vb01234567c1, vzero, _CMP_NEQ_OQ)), vb01234567c1, vacc2x01234567);
160       vacc0x89ABCDEF = _mm256_fmadd_ps(_mm256_and_ps(va0, _mm256_cmp_ps(vb89ABCDEFc1, vzero, _CMP_NEQ_OQ)), vb89ABCDEFc1, vacc0x89ABCDEF);
161       vacc1x89ABCDEF = _mm256_fmadd_ps(_mm256_and_ps(va1, _mm256_cmp_ps(vb89ABCDEFc1, vzero, _CMP_NEQ_OQ)), vb89ABCDEFc1, vacc1x89ABCDEF);
162       vacc2x89ABCDEF = _mm256_fmadd_ps(_mm256_and_ps(va2, _mm256_cmp_ps(vb89ABCDEFc1, vzero, _CMP_NEQ_OQ)), vb89ABCDEFc1, vacc2x89ABCDEF);
163 
164       va0 = _mm256_permute_ps(va0, _MM_SHUFFLE(0, 3, 2, 1));
165       va1 = _mm256_permute_ps(va1, _MM_SHUFFLE(0, 3, 2, 1));
166       va2 = _mm256_permute_ps(va2, _MM_SHUFFLE(0, 3, 2, 1));
167 
168       const __m256 vb01234567c2 = _mm256_load_ps(w + 32);
169       const __m256 vb89ABCDEFc2 = _mm256_load_ps(w + 40);
170 
171       vacc0x01234567 = _mm256_fmadd_ps(_mm256_and_ps(va0, _mm256_cmp_ps(vb01234567c2, vzero, _CMP_NEQ_OQ)), vb01234567c2, vacc0x01234567);
172       vacc1x01234567 = _mm256_fmadd_ps(_mm256_and_ps(va1, _mm256_cmp_ps(vb01234567c2, vzero, _CMP_NEQ_OQ)), vb01234567c2, vacc1x01234567);
173       vacc2x01234567 = _mm256_fmadd_ps(_mm256_and_ps(va2, _mm256_cmp_ps(vb01234567c2, vzero, _CMP_NEQ_OQ)), vb01234567c2, vacc2x01234567);
174       vacc0x89ABCDEF = _mm256_fmadd_ps(_mm256_and_ps(va0, _mm256_cmp_ps(vb89ABCDEFc2, vzero, _CMP_NEQ_OQ)), vb89ABCDEFc2, vacc0x89ABCDEF);
175       vacc1x89ABCDEF = _mm256_fmadd_ps(_mm256_and_ps(va1, _mm256_cmp_ps(vb89ABCDEFc2, vzero, _CMP_NEQ_OQ)), vb89ABCDEFc2, vacc1x89ABCDEF);
176       vacc2x89ABCDEF = _mm256_fmadd_ps(_mm256_and_ps(va2, _mm256_cmp_ps(vb89ABCDEFc2, vzero, _CMP_NEQ_OQ)), vb89ABCDEFc2, vacc2x89ABCDEF);
177 
178       va0 = _mm256_permute_ps(va0, _MM_SHUFFLE(0, 3, 2, 1));
179       va1 = _mm256_permute_ps(va1, _MM_SHUFFLE(0, 3, 2, 1));
180       va2 = _mm256_permute_ps(va2, _MM_SHUFFLE(0, 3, 2, 1));
181 
182       const __m256 vb01234567c3 = _mm256_load_ps(w + 48);
183       const __m256 vb89ABCDEFc3 = _mm256_load_ps(w + 56);
184 
185       vacc0x01234567 = _mm256_fmadd_ps(_mm256_and_ps(va0, _mm256_cmp_ps(vb01234567c3, vzero, _CMP_NEQ_OQ)), vb01234567c3, vacc0x01234567);
186       vacc1x01234567 = _mm256_fmadd_ps(_mm256_and_ps(va1, _mm256_cmp_ps(vb01234567c3, vzero, _CMP_NEQ_OQ)), vb01234567c3, vacc1x01234567);
187       vacc2x01234567 = _mm256_fmadd_ps(_mm256_and_ps(va2, _mm256_cmp_ps(vb01234567c3, vzero, _CMP_NEQ_OQ)), vb01234567c3, vacc2x01234567);
188       vacc0x89ABCDEF = _mm256_fmadd_ps(_mm256_and_ps(va0, _mm256_cmp_ps(vb89ABCDEFc3, vzero, _CMP_NEQ_OQ)), vb89ABCDEFc3, vacc0x89ABCDEF);
189       vacc1x89ABCDEF = _mm256_fmadd_ps(_mm256_and_ps(va1, _mm256_cmp_ps(vb89ABCDEFc3, vzero, _CMP_NEQ_OQ)), vb89ABCDEFc3, vacc1x89ABCDEF);
190       vacc2x89ABCDEF = _mm256_fmadd_ps(_mm256_and_ps(va2, _mm256_cmp_ps(vb89ABCDEFc3, vzero, _CMP_NEQ_OQ)), vb89ABCDEFc3, vacc2x89ABCDEF);
191 
192 
193       w += 64;
194     }
195 
196     const __m256 vmin = _mm256_load_ps(params->avx.min);
197     vacc0x01234567 = _mm256_max_ps(vacc0x01234567, vmin);
198     vacc1x01234567 = _mm256_max_ps(vacc1x01234567, vmin);
199     vacc2x01234567 = _mm256_max_ps(vacc2x01234567, vmin);
200     vacc0x89ABCDEF = _mm256_max_ps(vacc0x89ABCDEF, vmin);
201     vacc1x89ABCDEF = _mm256_max_ps(vacc1x89ABCDEF, vmin);
202     vacc2x89ABCDEF = _mm256_max_ps(vacc2x89ABCDEF, vmin);
203 
204     const __m256 vmax = _mm256_load_ps(params->avx.max);
205     vacc0x01234567 = _mm256_min_ps(vacc0x01234567, vmax);
206     vacc1x01234567 = _mm256_min_ps(vacc1x01234567, vmax);
207     vacc2x01234567 = _mm256_min_ps(vacc2x01234567, vmax);
208     vacc0x89ABCDEF = _mm256_min_ps(vacc0x89ABCDEF, vmax);
209     vacc1x89ABCDEF = _mm256_min_ps(vacc1x89ABCDEF, vmax);
210     vacc2x89ABCDEF = _mm256_min_ps(vacc2x89ABCDEF, vmax);
211 
212     if XNN_LIKELY(nc >= 16) {
213       _mm256_storeu_ps(c2, vacc2x01234567);
214       _mm256_storeu_ps(c2 + 8, vacc2x89ABCDEF);
215       c2 = (float*) ((uintptr_t) c2 + cn_stride);
216       _mm256_storeu_ps(c1, vacc1x01234567);
217       _mm256_storeu_ps(c1 + 8, vacc1x89ABCDEF);
218       c1 = (float*) ((uintptr_t) c1 + cn_stride);
219       _mm256_storeu_ps(c0, vacc0x01234567);
220       _mm256_storeu_ps(c0 + 8, vacc0x89ABCDEF);
221       c0 = (float*) ((uintptr_t) c0 + cn_stride);
222 
223       a2 = (const float*) ((uintptr_t) a2 - kc);
224       a1 = (const float*) ((uintptr_t) a1 - kc);
225       a0 = (const float*) ((uintptr_t) a0 - kc);
226 
227       nc -= 16;
228     } else {
229       if (nc & 8) {
230         _mm256_storeu_ps(c2, vacc2x01234567);
231         _mm256_storeu_ps(c1, vacc1x01234567);
232         _mm256_storeu_ps(c0, vacc0x01234567);
233 
234         vacc2x01234567 = vacc2x89ABCDEF;
235         vacc1x01234567 = vacc1x89ABCDEF;
236         vacc0x01234567 = vacc0x89ABCDEF;
237 
238         c2 += 8;
239         c1 += 8;
240         c0 += 8;
241       }
242       __m128 vacc2x0123 = _mm256_castps256_ps128(vacc2x01234567);
243       __m128 vacc1x0123 = _mm256_castps256_ps128(vacc1x01234567);
244       __m128 vacc0x0123 = _mm256_castps256_ps128(vacc0x01234567);
245       if (nc & 4) {
246         _mm_storeu_ps(c2, vacc2x0123);
247         _mm_storeu_ps(c1, vacc1x0123);
248         _mm_storeu_ps(c0, vacc0x0123);
249 
250         vacc2x0123 = _mm256_extractf128_ps(vacc2x01234567, 1);
251         vacc1x0123 = _mm256_extractf128_ps(vacc1x01234567, 1);
252         vacc0x0123 = _mm256_extractf128_ps(vacc0x01234567, 1);
253 
254         c2 += 4;
255         c1 += 4;
256         c0 += 4;
257       }
258       if (nc & 2) {
259         _mm_storel_pi((__m64*) c2, vacc2x0123);
260         _mm_storel_pi((__m64*) c1, vacc1x0123);
261         _mm_storel_pi((__m64*) c0, vacc0x0123);
262 
263         vacc2x0123 = _mm_movehl_ps(vacc2x0123, vacc2x0123);
264         vacc1x0123 = _mm_movehl_ps(vacc1x0123, vacc1x0123);
265         vacc0x0123 = _mm_movehl_ps(vacc0x0123, vacc0x0123);
266 
267         c2 += 2;
268         c1 += 2;
269         c0 += 2;
270       }
271       if (nc & 1) {
272         _mm_store_ss(c2, vacc2x0123);
273         _mm_store_ss(c1, vacc1x0123);
274         _mm_store_ss(c0, vacc0x0123);
275       }
276 
277       nc = 0;
278     }
279   } while (nc != 0);
280 }
281