xref: /aosp_15_r20/external/XNNPACK/src/f32-gemm/gen/4x8s4-minmax-sse.c (revision 4bdc94577ba0e567308109d787f7fec7b531ce36)
1 // Auto-generated file. Do not edit!
2 //   Template: src/f32-gemm/sse-shuffle.c.in
3 //   Generator: tools/xngen
4 //
5 // Copyright 2019 Google LLC
6 //
7 // This source code is licensed under the BSD-style license found in the
8 // LICENSE file in the root directory of this source tree.
9 
10 #include <assert.h>
11 
12 #include <xmmintrin.h>
13 
14 #include <xnnpack/gemm.h>
15 
16 
xnn_f32_gemm_minmax_ukernel_4x8s4__sse(size_t mr,size_t nc,size_t kc,const float * restrict a,size_t a_stride,const float * restrict w,float * restrict c,size_t cm_stride,size_t cn_stride,const union xnn_f32_minmax_params params[restrict XNN_MIN_ELEMENTS (1)])17 void xnn_f32_gemm_minmax_ukernel_4x8s4__sse(
18     size_t mr,
19     size_t nc,
20     size_t kc,
21     const float*restrict a,
22     size_t a_stride,
23     const float*restrict w,
24     float*restrict c,
25     size_t cm_stride,
26     size_t cn_stride,
27     const union xnn_f32_minmax_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
28 {
29   assert(mr != 0);
30   assert(mr <= 4);
31   assert(nc != 0);
32   assert(kc != 0);
33   assert(kc % sizeof(float) == 0);
34   assert(a != NULL);
35   assert(w != NULL);
36   assert(c != NULL);
37 
38   const float* a0 = a;
39   float* c0 = c;
40   const float* a1 = (const float*) ((uintptr_t) a0 + a_stride);
41   float* c1 = (float*) ((uintptr_t) c0 + cm_stride);
42   if XNN_UNPREDICTABLE(mr < 2) {
43     a1 = a0;
44     c1 = c0;
45   }
46   const float* a2 = (const float*) ((uintptr_t) a1 + a_stride);
47   float* c2 = (float*) ((uintptr_t) c1 + cm_stride);
48   if XNN_UNPREDICTABLE(mr <= 2) {
49     a2 = a1;
50     c2 = c1;
51   }
52   const float* a3 = (const float*) ((uintptr_t) a2 + a_stride);
53   float* c3 = (float*) ((uintptr_t) c2 + cm_stride);
54   if XNN_UNPREDICTABLE(mr != 4) {
55     a3 = a2;
56     c3 = c2;
57   }
58 
59   do {
60     __m128 vacc0x0123 = _mm_load_ps(w + 0);
61     __m128 vacc0x4567 = _mm_load_ps(w + 4);
62     __m128 vacc1x0123 = vacc0x0123;
63     __m128 vacc1x4567 = vacc0x4567;
64     __m128 vacc2x0123 = vacc0x0123;
65     __m128 vacc2x4567 = vacc0x4567;
66     __m128 vacc3x0123 = vacc0x0123;
67     __m128 vacc3x4567 = vacc0x4567;
68     w += 8;
69 
70     size_t k = kc;
71     while (k >= 4 * sizeof(float)) {
72       __m128 va0 = _mm_loadu_ps(a0);
73       a0 += 4;
74       __m128 va1 = _mm_loadu_ps(a1);
75       a1 += 4;
76       __m128 va2 = _mm_loadu_ps(a2);
77       a2 += 4;
78       __m128 va3 = _mm_loadu_ps(a3);
79       a3 += 4;
80 
81 
82       const __m128 vb0123c0 = _mm_load_ps(w + 0);
83       const __m128 vb4567c0 = _mm_load_ps(w + 4);
84 
85       vacc0x0123 = _mm_add_ps(vacc0x0123, _mm_mul_ps(va0, vb0123c0));
86       vacc1x0123 = _mm_add_ps(vacc1x0123, _mm_mul_ps(va1, vb0123c0));
87       vacc2x0123 = _mm_add_ps(vacc2x0123, _mm_mul_ps(va2, vb0123c0));
88       vacc3x0123 = _mm_add_ps(vacc3x0123, _mm_mul_ps(va3, vb0123c0));
89       vacc0x4567 = _mm_add_ps(vacc0x4567, _mm_mul_ps(va0, vb4567c0));
90       vacc1x4567 = _mm_add_ps(vacc1x4567, _mm_mul_ps(va1, vb4567c0));
91       vacc2x4567 = _mm_add_ps(vacc2x4567, _mm_mul_ps(va2, vb4567c0));
92       vacc3x4567 = _mm_add_ps(vacc3x4567, _mm_mul_ps(va3, vb4567c0));
93 
94       va0 = _mm_shuffle_ps(va0, va0, _MM_SHUFFLE(0, 3, 2, 1));
95       va1 = _mm_shuffle_ps(va1, va1, _MM_SHUFFLE(0, 3, 2, 1));
96       va2 = _mm_shuffle_ps(va2, va2, _MM_SHUFFLE(0, 3, 2, 1));
97       va3 = _mm_shuffle_ps(va3, va3, _MM_SHUFFLE(0, 3, 2, 1));
98 
99       const __m128 vb0123c1 = _mm_load_ps(w + 8);
100       const __m128 vb4567c1 = _mm_load_ps(w + 12);
101 
102       vacc0x0123 = _mm_add_ps(vacc0x0123, _mm_mul_ps(va0, vb0123c1));
103       vacc1x0123 = _mm_add_ps(vacc1x0123, _mm_mul_ps(va1, vb0123c1));
104       vacc2x0123 = _mm_add_ps(vacc2x0123, _mm_mul_ps(va2, vb0123c1));
105       vacc3x0123 = _mm_add_ps(vacc3x0123, _mm_mul_ps(va3, vb0123c1));
106       vacc0x4567 = _mm_add_ps(vacc0x4567, _mm_mul_ps(va0, vb4567c1));
107       vacc1x4567 = _mm_add_ps(vacc1x4567, _mm_mul_ps(va1, vb4567c1));
108       vacc2x4567 = _mm_add_ps(vacc2x4567, _mm_mul_ps(va2, vb4567c1));
109       vacc3x4567 = _mm_add_ps(vacc3x4567, _mm_mul_ps(va3, vb4567c1));
110 
111       va0 = _mm_shuffle_ps(va0, va0, _MM_SHUFFLE(0, 3, 2, 1));
112       va1 = _mm_shuffle_ps(va1, va1, _MM_SHUFFLE(0, 3, 2, 1));
113       va2 = _mm_shuffle_ps(va2, va2, _MM_SHUFFLE(0, 3, 2, 1));
114       va3 = _mm_shuffle_ps(va3, va3, _MM_SHUFFLE(0, 3, 2, 1));
115 
116       const __m128 vb0123c2 = _mm_load_ps(w + 16);
117       const __m128 vb4567c2 = _mm_load_ps(w + 20);
118 
119       vacc0x0123 = _mm_add_ps(vacc0x0123, _mm_mul_ps(va0, vb0123c2));
120       vacc1x0123 = _mm_add_ps(vacc1x0123, _mm_mul_ps(va1, vb0123c2));
121       vacc2x0123 = _mm_add_ps(vacc2x0123, _mm_mul_ps(va2, vb0123c2));
122       vacc3x0123 = _mm_add_ps(vacc3x0123, _mm_mul_ps(va3, vb0123c2));
123       vacc0x4567 = _mm_add_ps(vacc0x4567, _mm_mul_ps(va0, vb4567c2));
124       vacc1x4567 = _mm_add_ps(vacc1x4567, _mm_mul_ps(va1, vb4567c2));
125       vacc2x4567 = _mm_add_ps(vacc2x4567, _mm_mul_ps(va2, vb4567c2));
126       vacc3x4567 = _mm_add_ps(vacc3x4567, _mm_mul_ps(va3, vb4567c2));
127 
128       va0 = _mm_shuffle_ps(va0, va0, _MM_SHUFFLE(0, 3, 2, 1));
129       va1 = _mm_shuffle_ps(va1, va1, _MM_SHUFFLE(0, 3, 2, 1));
130       va2 = _mm_shuffle_ps(va2, va2, _MM_SHUFFLE(0, 3, 2, 1));
131       va3 = _mm_shuffle_ps(va3, va3, _MM_SHUFFLE(0, 3, 2, 1));
132 
133       const __m128 vb0123c3 = _mm_load_ps(w + 24);
134       const __m128 vb4567c3 = _mm_load_ps(w + 28);
135 
136       vacc0x0123 = _mm_add_ps(vacc0x0123, _mm_mul_ps(va0, vb0123c3));
137       vacc1x0123 = _mm_add_ps(vacc1x0123, _mm_mul_ps(va1, vb0123c3));
138       vacc2x0123 = _mm_add_ps(vacc2x0123, _mm_mul_ps(va2, vb0123c3));
139       vacc3x0123 = _mm_add_ps(vacc3x0123, _mm_mul_ps(va3, vb0123c3));
140       vacc0x4567 = _mm_add_ps(vacc0x4567, _mm_mul_ps(va0, vb4567c3));
141       vacc1x4567 = _mm_add_ps(vacc1x4567, _mm_mul_ps(va1, vb4567c3));
142       vacc2x4567 = _mm_add_ps(vacc2x4567, _mm_mul_ps(va2, vb4567c3));
143       vacc3x4567 = _mm_add_ps(vacc3x4567, _mm_mul_ps(va3, vb4567c3));
144 
145 
146       w += 32;
147       k -= 4 * sizeof(float);
148     }
149     if XNN_UNLIKELY(k != 0) {
150       __m128 va0 = _mm_loadu_ps(a0);
151       a0 = (const float*) ((uintptr_t) a0 + k);
152       __m128 va1 = _mm_loadu_ps(a1);
153       a1 = (const float*) ((uintptr_t) a1 + k);
154       __m128 va2 = _mm_loadu_ps(a2);
155       a2 = (const float*) ((uintptr_t) a2 + k);
156       __m128 va3 = _mm_loadu_ps(a3);
157       a3 = (const float*) ((uintptr_t) a3 + k);
158 
159 
160       const __m128 vb0123c0 = _mm_load_ps(w + 0);
161       const __m128 vb4567c0 = _mm_load_ps(w + 4);
162 
163       vacc0x0123 = _mm_add_ps(vacc0x0123, _mm_mul_ps(_mm_andnot_ps(_mm_cmpeq_ps(_mm_setzero_ps(), vb0123c0), va0), vb0123c0));
164       vacc1x0123 = _mm_add_ps(vacc1x0123, _mm_mul_ps(_mm_andnot_ps(_mm_cmpeq_ps(_mm_setzero_ps(), vb0123c0), va1), vb0123c0));
165       vacc2x0123 = _mm_add_ps(vacc2x0123, _mm_mul_ps(_mm_andnot_ps(_mm_cmpeq_ps(_mm_setzero_ps(), vb0123c0), va2), vb0123c0));
166       vacc3x0123 = _mm_add_ps(vacc3x0123, _mm_mul_ps(_mm_andnot_ps(_mm_cmpeq_ps(_mm_setzero_ps(), vb0123c0), va3), vb0123c0));
167       vacc0x4567 = _mm_add_ps(vacc0x4567, _mm_mul_ps(_mm_andnot_ps(_mm_cmpeq_ps(_mm_setzero_ps(), vb4567c0), va0), vb4567c0));
168       vacc1x4567 = _mm_add_ps(vacc1x4567, _mm_mul_ps(_mm_andnot_ps(_mm_cmpeq_ps(_mm_setzero_ps(), vb4567c0), va1), vb4567c0));
169       vacc2x4567 = _mm_add_ps(vacc2x4567, _mm_mul_ps(_mm_andnot_ps(_mm_cmpeq_ps(_mm_setzero_ps(), vb4567c0), va2), vb4567c0));
170       vacc3x4567 = _mm_add_ps(vacc3x4567, _mm_mul_ps(_mm_andnot_ps(_mm_cmpeq_ps(_mm_setzero_ps(), vb4567c0), va3), vb4567c0));
171 
172       va0 = _mm_shuffle_ps(va0, va0, _MM_SHUFFLE(0, 3, 2, 1));
173       va1 = _mm_shuffle_ps(va1, va1, _MM_SHUFFLE(0, 3, 2, 1));
174       va2 = _mm_shuffle_ps(va2, va2, _MM_SHUFFLE(0, 3, 2, 1));
175       va3 = _mm_shuffle_ps(va3, va3, _MM_SHUFFLE(0, 3, 2, 1));
176 
177       const __m128 vb0123c1 = _mm_load_ps(w + 8);
178       const __m128 vb4567c1 = _mm_load_ps(w + 12);
179 
180       vacc0x0123 = _mm_add_ps(vacc0x0123, _mm_mul_ps(_mm_andnot_ps(_mm_cmpeq_ps(_mm_setzero_ps(), vb0123c1), va0), vb0123c1));
181       vacc1x0123 = _mm_add_ps(vacc1x0123, _mm_mul_ps(_mm_andnot_ps(_mm_cmpeq_ps(_mm_setzero_ps(), vb0123c1), va1), vb0123c1));
182       vacc2x0123 = _mm_add_ps(vacc2x0123, _mm_mul_ps(_mm_andnot_ps(_mm_cmpeq_ps(_mm_setzero_ps(), vb0123c1), va2), vb0123c1));
183       vacc3x0123 = _mm_add_ps(vacc3x0123, _mm_mul_ps(_mm_andnot_ps(_mm_cmpeq_ps(_mm_setzero_ps(), vb0123c1), va3), vb0123c1));
184       vacc0x4567 = _mm_add_ps(vacc0x4567, _mm_mul_ps(_mm_andnot_ps(_mm_cmpeq_ps(_mm_setzero_ps(), vb4567c1), va0), vb4567c1));
185       vacc1x4567 = _mm_add_ps(vacc1x4567, _mm_mul_ps(_mm_andnot_ps(_mm_cmpeq_ps(_mm_setzero_ps(), vb4567c1), va1), vb4567c1));
186       vacc2x4567 = _mm_add_ps(vacc2x4567, _mm_mul_ps(_mm_andnot_ps(_mm_cmpeq_ps(_mm_setzero_ps(), vb4567c1), va2), vb4567c1));
187       vacc3x4567 = _mm_add_ps(vacc3x4567, _mm_mul_ps(_mm_andnot_ps(_mm_cmpeq_ps(_mm_setzero_ps(), vb4567c1), va3), vb4567c1));
188 
189       va0 = _mm_shuffle_ps(va0, va0, _MM_SHUFFLE(0, 3, 2, 1));
190       va1 = _mm_shuffle_ps(va1, va1, _MM_SHUFFLE(0, 3, 2, 1));
191       va2 = _mm_shuffle_ps(va2, va2, _MM_SHUFFLE(0, 3, 2, 1));
192       va3 = _mm_shuffle_ps(va3, va3, _MM_SHUFFLE(0, 3, 2, 1));
193 
194       const __m128 vb0123c2 = _mm_load_ps(w + 16);
195       const __m128 vb4567c2 = _mm_load_ps(w + 20);
196 
197       vacc0x0123 = _mm_add_ps(vacc0x0123, _mm_mul_ps(_mm_andnot_ps(_mm_cmpeq_ps(_mm_setzero_ps(), vb0123c2), va0), vb0123c2));
198       vacc1x0123 = _mm_add_ps(vacc1x0123, _mm_mul_ps(_mm_andnot_ps(_mm_cmpeq_ps(_mm_setzero_ps(), vb0123c2), va1), vb0123c2));
199       vacc2x0123 = _mm_add_ps(vacc2x0123, _mm_mul_ps(_mm_andnot_ps(_mm_cmpeq_ps(_mm_setzero_ps(), vb0123c2), va2), vb0123c2));
200       vacc3x0123 = _mm_add_ps(vacc3x0123, _mm_mul_ps(_mm_andnot_ps(_mm_cmpeq_ps(_mm_setzero_ps(), vb0123c2), va3), vb0123c2));
201       vacc0x4567 = _mm_add_ps(vacc0x4567, _mm_mul_ps(_mm_andnot_ps(_mm_cmpeq_ps(_mm_setzero_ps(), vb4567c2), va0), vb4567c2));
202       vacc1x4567 = _mm_add_ps(vacc1x4567, _mm_mul_ps(_mm_andnot_ps(_mm_cmpeq_ps(_mm_setzero_ps(), vb4567c2), va1), vb4567c2));
203       vacc2x4567 = _mm_add_ps(vacc2x4567, _mm_mul_ps(_mm_andnot_ps(_mm_cmpeq_ps(_mm_setzero_ps(), vb4567c2), va2), vb4567c2));
204       vacc3x4567 = _mm_add_ps(vacc3x4567, _mm_mul_ps(_mm_andnot_ps(_mm_cmpeq_ps(_mm_setzero_ps(), vb4567c2), va3), vb4567c2));
205 
206       va0 = _mm_shuffle_ps(va0, va0, _MM_SHUFFLE(0, 3, 2, 1));
207       va1 = _mm_shuffle_ps(va1, va1, _MM_SHUFFLE(0, 3, 2, 1));
208       va2 = _mm_shuffle_ps(va2, va2, _MM_SHUFFLE(0, 3, 2, 1));
209       va3 = _mm_shuffle_ps(va3, va3, _MM_SHUFFLE(0, 3, 2, 1));
210 
211       const __m128 vb0123c3 = _mm_load_ps(w + 24);
212       const __m128 vb4567c3 = _mm_load_ps(w + 28);
213 
214       vacc0x0123 = _mm_add_ps(vacc0x0123, _mm_mul_ps(_mm_andnot_ps(_mm_cmpeq_ps(_mm_setzero_ps(), vb0123c3), va0), vb0123c3));
215       vacc1x0123 = _mm_add_ps(vacc1x0123, _mm_mul_ps(_mm_andnot_ps(_mm_cmpeq_ps(_mm_setzero_ps(), vb0123c3), va1), vb0123c3));
216       vacc2x0123 = _mm_add_ps(vacc2x0123, _mm_mul_ps(_mm_andnot_ps(_mm_cmpeq_ps(_mm_setzero_ps(), vb0123c3), va2), vb0123c3));
217       vacc3x0123 = _mm_add_ps(vacc3x0123, _mm_mul_ps(_mm_andnot_ps(_mm_cmpeq_ps(_mm_setzero_ps(), vb0123c3), va3), vb0123c3));
218       vacc0x4567 = _mm_add_ps(vacc0x4567, _mm_mul_ps(_mm_andnot_ps(_mm_cmpeq_ps(_mm_setzero_ps(), vb4567c3), va0), vb4567c3));
219       vacc1x4567 = _mm_add_ps(vacc1x4567, _mm_mul_ps(_mm_andnot_ps(_mm_cmpeq_ps(_mm_setzero_ps(), vb4567c3), va1), vb4567c3));
220       vacc2x4567 = _mm_add_ps(vacc2x4567, _mm_mul_ps(_mm_andnot_ps(_mm_cmpeq_ps(_mm_setzero_ps(), vb4567c3), va2), vb4567c3));
221       vacc3x4567 = _mm_add_ps(vacc3x4567, _mm_mul_ps(_mm_andnot_ps(_mm_cmpeq_ps(_mm_setzero_ps(), vb4567c3), va3), vb4567c3));
222 
223 
224       w += 32;
225     }
226 
227     const __m128 vmax = _mm_load_ps(params->sse.max);
228     vacc0x0123 = _mm_min_ps(vacc0x0123, vmax);
229     vacc1x0123 = _mm_min_ps(vacc1x0123, vmax);
230     vacc2x0123 = _mm_min_ps(vacc2x0123, vmax);
231     vacc3x0123 = _mm_min_ps(vacc3x0123, vmax);
232     vacc0x4567 = _mm_min_ps(vacc0x4567, vmax);
233     vacc1x4567 = _mm_min_ps(vacc1x4567, vmax);
234     vacc2x4567 = _mm_min_ps(vacc2x4567, vmax);
235     vacc3x4567 = _mm_min_ps(vacc3x4567, vmax);
236 
237     const __m128 vmin = _mm_load_ps(params->sse.min);
238     vacc0x0123 = _mm_max_ps(vacc0x0123, vmin);
239     vacc1x0123 = _mm_max_ps(vacc1x0123, vmin);
240     vacc2x0123 = _mm_max_ps(vacc2x0123, vmin);
241     vacc3x0123 = _mm_max_ps(vacc3x0123, vmin);
242     vacc0x4567 = _mm_max_ps(vacc0x4567, vmin);
243     vacc1x4567 = _mm_max_ps(vacc1x4567, vmin);
244     vacc2x4567 = _mm_max_ps(vacc2x4567, vmin);
245     vacc3x4567 = _mm_max_ps(vacc3x4567, vmin);
246 
247     if XNN_LIKELY(nc >= 8) {
248       _mm_storeu_ps(c3, vacc3x0123);
249       _mm_storeu_ps(c3 + 4, vacc3x4567);
250       c3 = (float*) ((uintptr_t) c3 + cn_stride);
251       _mm_storeu_ps(c2, vacc2x0123);
252       _mm_storeu_ps(c2 + 4, vacc2x4567);
253       c2 = (float*) ((uintptr_t) c2 + cn_stride);
254       _mm_storeu_ps(c1, vacc1x0123);
255       _mm_storeu_ps(c1 + 4, vacc1x4567);
256       c1 = (float*) ((uintptr_t) c1 + cn_stride);
257       _mm_storeu_ps(c0, vacc0x0123);
258       _mm_storeu_ps(c0 + 4, vacc0x4567);
259       c0 = (float*) ((uintptr_t) c0 + cn_stride);
260 
261       a3 = (const float*) ((uintptr_t) a3 - kc);
262       a2 = (const float*) ((uintptr_t) a2 - kc);
263       a1 = (const float*) ((uintptr_t) a1 - kc);
264       a0 = (const float*) ((uintptr_t) a0 - kc);
265 
266       nc -= 8;
267     } else {
268       if (nc & 4) {
269         _mm_storeu_ps(c3, vacc3x0123);
270         _mm_storeu_ps(c2, vacc2x0123);
271         _mm_storeu_ps(c1, vacc1x0123);
272         _mm_storeu_ps(c0, vacc0x0123);
273 
274         vacc3x0123 = vacc3x4567;
275         vacc2x0123 = vacc2x4567;
276         vacc1x0123 = vacc1x4567;
277         vacc0x0123 = vacc0x4567;
278 
279         c3 += 4;
280         c2 += 4;
281         c1 += 4;
282         c0 += 4;
283       }
284       if (nc & 2) {
285         _mm_storel_pi((__m64*) c3, vacc3x0123);
286         _mm_storel_pi((__m64*) c2, vacc2x0123);
287         _mm_storel_pi((__m64*) c1, vacc1x0123);
288         _mm_storel_pi((__m64*) c0, vacc0x0123);
289 
290         vacc3x0123 = _mm_movehl_ps(vacc3x0123, vacc3x0123);
291         vacc2x0123 = _mm_movehl_ps(vacc2x0123, vacc2x0123);
292         vacc1x0123 = _mm_movehl_ps(vacc1x0123, vacc1x0123);
293         vacc0x0123 = _mm_movehl_ps(vacc0x0123, vacc0x0123);
294 
295         c3 += 2;
296         c2 += 2;
297         c1 += 2;
298         c0 += 2;
299       }
300       if (nc & 1) {
301         _mm_store_ss(c3, vacc3x0123);
302         _mm_store_ss(c2, vacc2x0123);
303         _mm_store_ss(c1, vacc1x0123);
304         _mm_store_ss(c0, vacc0x0123);
305       }
306 
307       nc = 0;
308     }
309   } while (nc != 0);
310 }
311