xref: /aosp_15_r20/external/XNNPACK/src/f32-igemm/gen/6x16-minmax-avx512f-broadcast.c (revision 4bdc94577ba0e567308109d787f7fec7b531ce36)
1 // Auto-generated file. Do not edit!
2 //   Template: src/f32-igemm/avx512-broadcast.c.in
3 //   Generator: tools/xngen
4 //
5 // Copyright 2019 Google LLC
6 //
7 // This source code is licensed under the BSD-style license found in the
8 // LICENSE file in the root directory of this source tree.
9 
10 #include <assert.h>
11 
12 #include <immintrin.h>
13 
14 #include <xnnpack/igemm.h>
15 #include <xnnpack/intrinsics-polyfill.h>
16 
17 
xnn_f32_igemm_minmax_ukernel_6x16__avx512f_broadcast(size_t mr,size_t nc,size_t kc,size_t ks,const float ** restrict a,const float * restrict w,float * restrict c,size_t cm_stride,size_t cn_stride,size_t a_offset,const float * zero,const union xnn_f32_minmax_params params[restrict XNN_MIN_ELEMENTS (1)])18 void xnn_f32_igemm_minmax_ukernel_6x16__avx512f_broadcast(
19     size_t mr,
20     size_t nc,
21     size_t kc,
22     size_t ks,
23     const float**restrict a,
24     const float*restrict w,
25     float*restrict c,
26     size_t cm_stride,
27     size_t cn_stride,
28     size_t a_offset,
29     const float* zero,
30     const union xnn_f32_minmax_params params[restrict XNN_MIN_ELEMENTS(1)])
31 {
32   assert(mr != 0);
33   assert(mr <= 6);
34   assert(nc != 0);
35   assert(kc != 0);
36   assert(kc % sizeof(float) == 0);
37   assert(ks != 0);
38   assert(ks % (6 * sizeof(void*)) == 0);
39   assert(a_offset % sizeof(float) == 0);
40   assert(a != NULL);
41   assert(w != NULL);
42   assert(c != NULL);
43 
44   float* c0 = c;
45   float* c1 = (float*) ((uintptr_t) c0 + cm_stride);
46   if XNN_UNPREDICTABLE(mr < 2) {
47     c1 = c0;
48   }
49   float* c2 = (float*) ((uintptr_t) c1 + cm_stride);
50   if XNN_UNPREDICTABLE(mr <= 2) {
51     c2 = c1;
52   }
53   float* c3 = (float*) ((uintptr_t) c2 + cm_stride);
54   if XNN_UNPREDICTABLE(mr < 4) {
55     c3 = c2;
56   }
57   float* c4 = (float*) ((uintptr_t) c3 + cm_stride);
58   if XNN_UNPREDICTABLE(mr <= 4) {
59     c4 = c3;
60   }
61   float* c5 = (float*) ((uintptr_t) c4 + cm_stride);
62   if XNN_UNPREDICTABLE(mr != 6) {
63     c5 = c4;
64   }
65 
66   do {
67     __m512 vacc0x0123456789ABCDEF = _mm512_load_ps(w);
68     __m512 vacc1x0123456789ABCDEF = vacc0x0123456789ABCDEF;
69     __m512 vacc2x0123456789ABCDEF = vacc0x0123456789ABCDEF;
70     __m512 vacc3x0123456789ABCDEF = vacc0x0123456789ABCDEF;
71     __m512 vacc4x0123456789ABCDEF = vacc0x0123456789ABCDEF;
72     __m512 vacc5x0123456789ABCDEF = vacc0x0123456789ABCDEF;
73     w += 16;
74 
75     size_t p = ks;
76     do {
77       const float* restrict a0 = a[0];
78       assert(a0 != NULL);
79       if XNN_UNPREDICTABLE(a0 != zero) {
80         a0 = (const float*) ((uintptr_t) a0 + a_offset);
81       }
82       const float* restrict a1 = a[1];
83       assert(a1 != NULL);
84       if XNN_UNPREDICTABLE(a1 != zero) {
85         a1 = (const float*) ((uintptr_t) a1 + a_offset);
86       }
87       const float* restrict a2 = a[2];
88       assert(a2 != NULL);
89       if XNN_UNPREDICTABLE(a2 != zero) {
90         a2 = (const float*) ((uintptr_t) a2 + a_offset);
91       }
92       const float* restrict a3 = a[3];
93       assert(a3 != NULL);
94       if XNN_UNPREDICTABLE(a3 != zero) {
95         a3 = (const float*) ((uintptr_t) a3 + a_offset);
96       }
97       const float* restrict a4 = a[4];
98       assert(a4 != NULL);
99       if XNN_UNPREDICTABLE(a4 != zero) {
100         a4 = (const float*) ((uintptr_t) a4 + a_offset);
101       }
102       const float* restrict a5 = a[5];
103       assert(a5 != NULL);
104       if XNN_UNPREDICTABLE(a5 != zero) {
105         a5 = (const float*) ((uintptr_t) a5 + a_offset);
106       }
107       a += 6;
108 
109       size_t k = kc;
110       do {
111         const __m512 vb0123456789ABCDEF = _mm512_load_ps(w);
112         w += 16;
113 
114         const __m512 va0 = _mm512_set1_ps(*a0);
115         vacc0x0123456789ABCDEF = _mm512_fmadd_ps(va0, vb0123456789ABCDEF, vacc0x0123456789ABCDEF);
116         const __m512 va1 = _mm512_set1_ps(*a1);
117         vacc1x0123456789ABCDEF = _mm512_fmadd_ps(va1, vb0123456789ABCDEF, vacc1x0123456789ABCDEF);
118         const __m512 va2 = _mm512_set1_ps(*a2);
119         vacc2x0123456789ABCDEF = _mm512_fmadd_ps(va2, vb0123456789ABCDEF, vacc2x0123456789ABCDEF);
120         const __m512 va3 = _mm512_set1_ps(*a3);
121         vacc3x0123456789ABCDEF = _mm512_fmadd_ps(va3, vb0123456789ABCDEF, vacc3x0123456789ABCDEF);
122         const __m512 va4 = _mm512_set1_ps(*a4);
123         vacc4x0123456789ABCDEF = _mm512_fmadd_ps(va4, vb0123456789ABCDEF, vacc4x0123456789ABCDEF);
124         const __m512 va5 = _mm512_set1_ps(*a5);
125         vacc5x0123456789ABCDEF = _mm512_fmadd_ps(va5, vb0123456789ABCDEF, vacc5x0123456789ABCDEF);
126 
127         a0 += 1;
128         a1 += 1;
129         a2 += 1;
130         a3 += 1;
131         a4 += 1;
132         a5 += 1;
133 
134         k -= sizeof(float);
135       } while (k != 0);
136       p -= 6 * sizeof(void*);
137     } while (p != 0);
138 
139     const __m512 vmin = _mm512_set1_ps(params->scalar.min);
140     vacc0x0123456789ABCDEF = _mm512_max_ps(vacc0x0123456789ABCDEF, vmin);
141     vacc1x0123456789ABCDEF = _mm512_max_ps(vacc1x0123456789ABCDEF, vmin);
142     vacc2x0123456789ABCDEF = _mm512_max_ps(vacc2x0123456789ABCDEF, vmin);
143     vacc3x0123456789ABCDEF = _mm512_max_ps(vacc3x0123456789ABCDEF, vmin);
144     vacc4x0123456789ABCDEF = _mm512_max_ps(vacc4x0123456789ABCDEF, vmin);
145     vacc5x0123456789ABCDEF = _mm512_max_ps(vacc5x0123456789ABCDEF, vmin);
146 
147     const __m512 vmax = _mm512_set1_ps(params->scalar.max);
148     vacc0x0123456789ABCDEF = _mm512_min_ps(vacc0x0123456789ABCDEF, vmax);
149     vacc1x0123456789ABCDEF = _mm512_min_ps(vacc1x0123456789ABCDEF, vmax);
150     vacc2x0123456789ABCDEF = _mm512_min_ps(vacc2x0123456789ABCDEF, vmax);
151     vacc3x0123456789ABCDEF = _mm512_min_ps(vacc3x0123456789ABCDEF, vmax);
152     vacc4x0123456789ABCDEF = _mm512_min_ps(vacc4x0123456789ABCDEF, vmax);
153     vacc5x0123456789ABCDEF = _mm512_min_ps(vacc5x0123456789ABCDEF, vmax);
154 
155     if XNN_LIKELY(nc >= 16) {
156       _mm512_storeu_ps(c5, vacc5x0123456789ABCDEF);
157       c5 = (float*) ((uintptr_t) c5 + cn_stride);
158       _mm512_storeu_ps(c4, vacc4x0123456789ABCDEF);
159       c4 = (float*) ((uintptr_t) c4 + cn_stride);
160       _mm512_storeu_ps(c3, vacc3x0123456789ABCDEF);
161       c3 = (float*) ((uintptr_t) c3 + cn_stride);
162       _mm512_storeu_ps(c2, vacc2x0123456789ABCDEF);
163       c2 = (float*) ((uintptr_t) c2 + cn_stride);
164       _mm512_storeu_ps(c1, vacc1x0123456789ABCDEF);
165       c1 = (float*) ((uintptr_t) c1 + cn_stride);
166       _mm512_storeu_ps(c0, vacc0x0123456789ABCDEF);
167       c0 = (float*) ((uintptr_t) c0 + cn_stride);
168 
169       a = (const float**restrict) ((uintptr_t) a - ks);
170       nc -= 16;
171     } else {
172       if (nc & 15) {
173         // Prepare mask for valid 32-bit elements (depends on nc).
174         const __mmask16 vmask = _cvtu32_mask16((uint16_t) ((uint32_t) (UINT32_C(1) << nc) - UINT32_C(1)));
175 
176         _mm512_mask_storeu_ps(c5, vmask, vacc5x0123456789ABCDEF);
177         _mm512_mask_storeu_ps(c4, vmask, vacc4x0123456789ABCDEF);
178         _mm512_mask_storeu_ps(c3, vmask, vacc3x0123456789ABCDEF);
179         _mm512_mask_storeu_ps(c2, vmask, vacc2x0123456789ABCDEF);
180         _mm512_mask_storeu_ps(c1, vmask, vacc1x0123456789ABCDEF);
181         _mm512_mask_storeu_ps(c0, vmask, vacc0x0123456789ABCDEF);
182       }
183 
184       nc = 0;
185     }
186   } while (nc != 0);
187 }
188