xref: /aosp_15_r20/external/XNNPACK/src/f32-gemm/gen-inc/8x8inc-minmax-fma3-broadcast.c (revision 4bdc94577ba0e567308109d787f7fec7b531ce36)
1 // Auto-generated file. Do not edit!
2 //   Template: src/f32-gemm/avx-broadcast.c.in
3 //   Generator: tools/xngen
4 //
5 // Copyright 2019 Google LLC
6 //
7 // This source code is licensed under the BSD-style license found in the
8 // LICENSE file in the root directory of this source tree.
9 
10 #include <assert.h>
11 
12 #include <immintrin.h>
13 
14 #include <xnnpack/gemm.h>
15 
16 
xnn_f32_gemminc_minmax_ukernel_8x8__fma3_broadcast(size_t mr,size_t nc,size_t kc,const float * restrict a,size_t a_stride,const float * restrict w,float * restrict c,size_t cm_stride,size_t cn_stride,const float * restrict acc,const union xnn_f32_minmax_params params[restrict XNN_MIN_ELEMENTS (1)])17 void xnn_f32_gemminc_minmax_ukernel_8x8__fma3_broadcast(
18     size_t mr,
19     size_t nc,
20     size_t kc,
21     const float*restrict a,
22     size_t a_stride,
23     const float*restrict w,
24     float*restrict c,
25     size_t cm_stride,
26     size_t cn_stride,
27     const float*restrict acc,
28     const union xnn_f32_minmax_params params[restrict XNN_MIN_ELEMENTS(1)])
29 {
30   assert(mr != 0);
31   assert(mr <= 8);
32   assert(nc != 0);
33   assert(kc != 0);
34   assert(kc % sizeof(float) == 0);
35   assert(a != NULL);
36   assert(w != NULL);
37   assert(c != NULL);
38   assert(acc != NULL);
39 
40   const float* a0 = a;
41   float* c0 = c;
42   const float* a1 = (const float*) ((uintptr_t) a0 + a_stride);
43   float* c1 = (float*) ((uintptr_t) c0 + cm_stride);
44   if XNN_UNPREDICTABLE(mr < 2) {
45     a1 = a0;
46     c1 = c0;
47   }
48   const float* a2 = (const float*) ((uintptr_t) a1 + a_stride);
49   float* c2 = (float*) ((uintptr_t) c1 + cm_stride);
50   if XNN_UNPREDICTABLE(mr <= 2) {
51     a2 = a1;
52     c2 = c1;
53   }
54   const float* a3 = (const float*) ((uintptr_t) a2 + a_stride);
55   float* c3 = (float*) ((uintptr_t) c2 + cm_stride);
56   if XNN_UNPREDICTABLE(mr < 4) {
57     a3 = a2;
58     c3 = c2;
59   }
60   const float* a4 = (const float*) ((uintptr_t) a3 + a_stride);
61   float* c4 = (float*) ((uintptr_t) c3 + cm_stride);
62   if XNN_UNPREDICTABLE(mr <= 4) {
63     a4 = a3;
64     c4 = c3;
65   }
66   const float* a5 = (const float*) ((uintptr_t) a4 + a_stride);
67   float* c5 = (float*) ((uintptr_t) c4 + cm_stride);
68   if XNN_UNPREDICTABLE(mr < 6) {
69     a5 = a4;
70     c5 = c4;
71   }
72   const float* a6 = (const float*) ((uintptr_t) a5 + a_stride);
73   float* c6 = (float*) ((uintptr_t) c5 + cm_stride);
74   if XNN_UNPREDICTABLE(mr <= 6) {
75     a6 = a5;
76     c6 = c5;
77   }
78   const float* a7 = (const float*) ((uintptr_t) a6 + a_stride);
79   float* c7 = (float*) ((uintptr_t) c6 + cm_stride);
80   if XNN_UNPREDICTABLE(mr != 8) {
81     a7 = a6;
82     c7 = c6;
83   }
84 
85   do {
86     __m256 vacc0x01234567 = _mm256_load_ps(acc + 0);
87     __m256 vacc1x01234567 = _mm256_load_ps(acc + 8);
88     __m256 vacc2x01234567 = _mm256_load_ps(acc + 16);
89     __m256 vacc3x01234567 = _mm256_load_ps(acc + 24);
90     __m256 vacc4x01234567 = _mm256_load_ps(acc + 32);
91     __m256 vacc5x01234567 = _mm256_load_ps(acc + 40);
92     __m256 vacc6x01234567 = _mm256_load_ps(acc + 48);
93     __m256 vacc7x01234567 = _mm256_load_ps(acc + 56);
94     acc += 64;
95 
96     size_t k = kc;
97     do {
98       const __m256 va0 = _mm256_broadcast_ss(a0);
99       a0 += 1;
100       const __m256 va1 = _mm256_broadcast_ss(a1);
101       a1 += 1;
102       const __m256 va2 = _mm256_broadcast_ss(a2);
103       a2 += 1;
104       const __m256 va3 = _mm256_broadcast_ss(a3);
105       a3 += 1;
106       const __m256 va4 = _mm256_broadcast_ss(a4);
107       a4 += 1;
108       const __m256 va5 = _mm256_broadcast_ss(a5);
109       a5 += 1;
110       const __m256 va6 = _mm256_broadcast_ss(a6);
111       a6 += 1;
112       const __m256 va7 = _mm256_broadcast_ss(a7);
113       a7 += 1;
114 
115       const __m256 vb01234567 = _mm256_load_ps(w);
116       w += 8;
117 
118       vacc0x01234567 = _mm256_fmadd_ps(va0, vb01234567, vacc0x01234567);
119       vacc1x01234567 = _mm256_fmadd_ps(va1, vb01234567, vacc1x01234567);
120       vacc2x01234567 = _mm256_fmadd_ps(va2, vb01234567, vacc2x01234567);
121       vacc3x01234567 = _mm256_fmadd_ps(va3, vb01234567, vacc3x01234567);
122       vacc4x01234567 = _mm256_fmadd_ps(va4, vb01234567, vacc4x01234567);
123       vacc5x01234567 = _mm256_fmadd_ps(va5, vb01234567, vacc5x01234567);
124       vacc6x01234567 = _mm256_fmadd_ps(va6, vb01234567, vacc6x01234567);
125       vacc7x01234567 = _mm256_fmadd_ps(va7, vb01234567, vacc7x01234567);
126 
127       k -= sizeof(float);
128     } while (k != 0);
129 
130     const __m256 vmin = _mm256_load_ps(params->avx.min);
131     vacc0x01234567 = _mm256_max_ps(vacc0x01234567, vmin);
132     vacc1x01234567 = _mm256_max_ps(vacc1x01234567, vmin);
133     vacc2x01234567 = _mm256_max_ps(vacc2x01234567, vmin);
134     vacc3x01234567 = _mm256_max_ps(vacc3x01234567, vmin);
135     vacc4x01234567 = _mm256_max_ps(vacc4x01234567, vmin);
136     vacc5x01234567 = _mm256_max_ps(vacc5x01234567, vmin);
137     vacc6x01234567 = _mm256_max_ps(vacc6x01234567, vmin);
138     vacc7x01234567 = _mm256_max_ps(vacc7x01234567, vmin);
139 
140     const __m256 vmax = _mm256_load_ps(params->avx.max);
141     vacc0x01234567 = _mm256_min_ps(vacc0x01234567, vmax);
142     vacc1x01234567 = _mm256_min_ps(vacc1x01234567, vmax);
143     vacc2x01234567 = _mm256_min_ps(vacc2x01234567, vmax);
144     vacc3x01234567 = _mm256_min_ps(vacc3x01234567, vmax);
145     vacc4x01234567 = _mm256_min_ps(vacc4x01234567, vmax);
146     vacc5x01234567 = _mm256_min_ps(vacc5x01234567, vmax);
147     vacc6x01234567 = _mm256_min_ps(vacc6x01234567, vmax);
148     vacc7x01234567 = _mm256_min_ps(vacc7x01234567, vmax);
149 
150     if XNN_LIKELY(nc >= 8) {
151       _mm256_storeu_ps(c7, vacc7x01234567);
152       c7 = (float*) ((uintptr_t) c7 + cn_stride);
153       _mm256_storeu_ps(c6, vacc6x01234567);
154       c6 = (float*) ((uintptr_t) c6 + cn_stride);
155       _mm256_storeu_ps(c5, vacc5x01234567);
156       c5 = (float*) ((uintptr_t) c5 + cn_stride);
157       _mm256_storeu_ps(c4, vacc4x01234567);
158       c4 = (float*) ((uintptr_t) c4 + cn_stride);
159       _mm256_storeu_ps(c3, vacc3x01234567);
160       c3 = (float*) ((uintptr_t) c3 + cn_stride);
161       _mm256_storeu_ps(c2, vacc2x01234567);
162       c2 = (float*) ((uintptr_t) c2 + cn_stride);
163       _mm256_storeu_ps(c1, vacc1x01234567);
164       c1 = (float*) ((uintptr_t) c1 + cn_stride);
165       _mm256_storeu_ps(c0, vacc0x01234567);
166       c0 = (float*) ((uintptr_t) c0 + cn_stride);
167 
168       a7 = (const float*) ((uintptr_t) a7 - kc);
169       a6 = (const float*) ((uintptr_t) a6 - kc);
170       a5 = (const float*) ((uintptr_t) a5 - kc);
171       a4 = (const float*) ((uintptr_t) a4 - kc);
172       a3 = (const float*) ((uintptr_t) a3 - kc);
173       a2 = (const float*) ((uintptr_t) a2 - kc);
174       a1 = (const float*) ((uintptr_t) a1 - kc);
175       a0 = (const float*) ((uintptr_t) a0 - kc);
176 
177       nc -= 8;
178     } else {
179       __m128 vacc7x0123 = _mm256_castps256_ps128(vacc7x01234567);
180       __m128 vacc6x0123 = _mm256_castps256_ps128(vacc6x01234567);
181       __m128 vacc5x0123 = _mm256_castps256_ps128(vacc5x01234567);
182       __m128 vacc4x0123 = _mm256_castps256_ps128(vacc4x01234567);
183       __m128 vacc3x0123 = _mm256_castps256_ps128(vacc3x01234567);
184       __m128 vacc2x0123 = _mm256_castps256_ps128(vacc2x01234567);
185       __m128 vacc1x0123 = _mm256_castps256_ps128(vacc1x01234567);
186       __m128 vacc0x0123 = _mm256_castps256_ps128(vacc0x01234567);
187       if (nc & 4) {
188         _mm_storeu_ps(c7, vacc7x0123);
189         _mm_storeu_ps(c6, vacc6x0123);
190         _mm_storeu_ps(c5, vacc5x0123);
191         _mm_storeu_ps(c4, vacc4x0123);
192         _mm_storeu_ps(c3, vacc3x0123);
193         _mm_storeu_ps(c2, vacc2x0123);
194         _mm_storeu_ps(c1, vacc1x0123);
195         _mm_storeu_ps(c0, vacc0x0123);
196 
197         vacc7x0123 = _mm256_extractf128_ps(vacc7x01234567, 1);
198         vacc6x0123 = _mm256_extractf128_ps(vacc6x01234567, 1);
199         vacc5x0123 = _mm256_extractf128_ps(vacc5x01234567, 1);
200         vacc4x0123 = _mm256_extractf128_ps(vacc4x01234567, 1);
201         vacc3x0123 = _mm256_extractf128_ps(vacc3x01234567, 1);
202         vacc2x0123 = _mm256_extractf128_ps(vacc2x01234567, 1);
203         vacc1x0123 = _mm256_extractf128_ps(vacc1x01234567, 1);
204         vacc0x0123 = _mm256_extractf128_ps(vacc0x01234567, 1);
205 
206         c7 += 4;
207         c6 += 4;
208         c5 += 4;
209         c4 += 4;
210         c3 += 4;
211         c2 += 4;
212         c1 += 4;
213         c0 += 4;
214       }
215       if (nc & 2) {
216         _mm_storel_pi((__m64*) c7, vacc7x0123);
217         _mm_storel_pi((__m64*) c6, vacc6x0123);
218         _mm_storel_pi((__m64*) c5, vacc5x0123);
219         _mm_storel_pi((__m64*) c4, vacc4x0123);
220         _mm_storel_pi((__m64*) c3, vacc3x0123);
221         _mm_storel_pi((__m64*) c2, vacc2x0123);
222         _mm_storel_pi((__m64*) c1, vacc1x0123);
223         _mm_storel_pi((__m64*) c0, vacc0x0123);
224 
225         vacc7x0123 = _mm_movehl_ps(vacc7x0123, vacc7x0123);
226         vacc6x0123 = _mm_movehl_ps(vacc6x0123, vacc6x0123);
227         vacc5x0123 = _mm_movehl_ps(vacc5x0123, vacc5x0123);
228         vacc4x0123 = _mm_movehl_ps(vacc4x0123, vacc4x0123);
229         vacc3x0123 = _mm_movehl_ps(vacc3x0123, vacc3x0123);
230         vacc2x0123 = _mm_movehl_ps(vacc2x0123, vacc2x0123);
231         vacc1x0123 = _mm_movehl_ps(vacc1x0123, vacc1x0123);
232         vacc0x0123 = _mm_movehl_ps(vacc0x0123, vacc0x0123);
233 
234         c7 += 2;
235         c6 += 2;
236         c5 += 2;
237         c4 += 2;
238         c3 += 2;
239         c2 += 2;
240         c1 += 2;
241         c0 += 2;
242       }
243       if (nc & 1) {
244         _mm_store_ss(c7, vacc7x0123);
245         _mm_store_ss(c6, vacc6x0123);
246         _mm_store_ss(c5, vacc5x0123);
247         _mm_store_ss(c4, vacc4x0123);
248         _mm_store_ss(c3, vacc3x0123);
249         _mm_store_ss(c2, vacc2x0123);
250         _mm_store_ss(c1, vacc1x0123);
251         _mm_store_ss(c0, vacc0x0123);
252       }
253 
254       nc = 0;
255     }
256   } while (nc != 0);
257 }
258