xref: /aosp_15_r20/external/XNNPACK/src/f32-gemm/gen/5x16-minmax-avx512f-broadcast.c (revision 4bdc94577ba0e567308109d787f7fec7b531ce36)
1 // Auto-generated file. Do not edit!
2 //   Template: src/f32-gemm/avx512-broadcast.c.in
3 //   Generator: tools/xngen
4 //
5 // Copyright 2019 Google LLC
6 //
7 // This source code is licensed under the BSD-style license found in the
8 // LICENSE file in the root directory of this source tree.
9 
10 #include <assert.h>
11 
12 #include <immintrin.h>
13 
14 #include <xnnpack/gemm.h>
15 #include <xnnpack/intrinsics-polyfill.h>
16 
17 
xnn_f32_gemm_minmax_ukernel_5x16__avx512f_broadcast(size_t mr,size_t nc,size_t kc,const float * restrict a,size_t a_stride,const float * restrict w,float * restrict c,size_t cm_stride,size_t cn_stride,const union xnn_f32_minmax_params params[restrict XNN_MIN_ELEMENTS (1)])18 void xnn_f32_gemm_minmax_ukernel_5x16__avx512f_broadcast(
19     size_t mr,
20     size_t nc,
21     size_t kc,
22     const float*restrict a,
23     size_t a_stride,
24     const float*restrict w,
25     float*restrict c,
26     size_t cm_stride,
27     size_t cn_stride,
28     const union xnn_f32_minmax_params params[restrict XNN_MIN_ELEMENTS(1)])
29 {
30   assert(mr != 0);
31   assert(mr <= 5);
32   assert(nc != 0);
33   assert(kc != 0);
34   assert(kc % sizeof(float) == 0);
35   assert(a != NULL);
36   assert(w != NULL);
37   assert(c != NULL);
38 
39   const float* a0 = a;
40   float* c0 = c;
41   const float* a1 = (const float*) ((uintptr_t) a0 + a_stride);
42   float* c1 = (float*) ((uintptr_t) c0 + cm_stride);
43   if XNN_UNPREDICTABLE(mr < 2) {
44     a1 = a0;
45     c1 = c0;
46   }
47   const float* a2 = (const float*) ((uintptr_t) a1 + a_stride);
48   float* c2 = (float*) ((uintptr_t) c1 + cm_stride);
49   if XNN_UNPREDICTABLE(mr <= 2) {
50     a2 = a1;
51     c2 = c1;
52   }
53   const float* a3 = (const float*) ((uintptr_t) a2 + a_stride);
54   float* c3 = (float*) ((uintptr_t) c2 + cm_stride);
55   if XNN_UNPREDICTABLE(mr < 4) {
56     a3 = a2;
57     c3 = c2;
58   }
59   const float* a4 = (const float*) ((uintptr_t) a3 + a_stride);
60   float* c4 = (float*) ((uintptr_t) c3 + cm_stride);
61   if XNN_UNPREDICTABLE(mr <= 4) {
62     a4 = a3;
63     c4 = c3;
64   }
65 
66   do {
67     __m512 vacc0x0123456789ABCDEF = _mm512_load_ps(w);
68     __m512 vacc1x0123456789ABCDEF = vacc0x0123456789ABCDEF;
69     __m512 vacc2x0123456789ABCDEF = vacc0x0123456789ABCDEF;
70     __m512 vacc3x0123456789ABCDEF = vacc0x0123456789ABCDEF;
71     __m512 vacc4x0123456789ABCDEF = vacc0x0123456789ABCDEF;
72     w += 16;
73 
74     size_t k = kc;
75     do {
76       const __m512 vb0123456789ABCDEF = _mm512_load_ps(w);
77       w += 16;
78 
79       const __m512 va0 = _mm512_set1_ps(*a0);
80       vacc0x0123456789ABCDEF = _mm512_fmadd_ps(va0, vb0123456789ABCDEF, vacc0x0123456789ABCDEF);
81       const __m512 va1 = _mm512_set1_ps(*a1);
82       vacc1x0123456789ABCDEF = _mm512_fmadd_ps(va1, vb0123456789ABCDEF, vacc1x0123456789ABCDEF);
83       const __m512 va2 = _mm512_set1_ps(*a2);
84       vacc2x0123456789ABCDEF = _mm512_fmadd_ps(va2, vb0123456789ABCDEF, vacc2x0123456789ABCDEF);
85       const __m512 va3 = _mm512_set1_ps(*a3);
86       vacc3x0123456789ABCDEF = _mm512_fmadd_ps(va3, vb0123456789ABCDEF, vacc3x0123456789ABCDEF);
87       const __m512 va4 = _mm512_set1_ps(*a4);
88       vacc4x0123456789ABCDEF = _mm512_fmadd_ps(va4, vb0123456789ABCDEF, vacc4x0123456789ABCDEF);
89 
90       a0 += 1;
91       a1 += 1;
92       a2 += 1;
93       a3 += 1;
94       a4 += 1;
95 
96       k -= sizeof(float);
97     } while (k != 0);
98 
99     const __m512 vmin = _mm512_set1_ps(params->scalar.min);
100     vacc0x0123456789ABCDEF = _mm512_max_ps(vacc0x0123456789ABCDEF, vmin);
101     vacc1x0123456789ABCDEF = _mm512_max_ps(vacc1x0123456789ABCDEF, vmin);
102     vacc2x0123456789ABCDEF = _mm512_max_ps(vacc2x0123456789ABCDEF, vmin);
103     vacc3x0123456789ABCDEF = _mm512_max_ps(vacc3x0123456789ABCDEF, vmin);
104     vacc4x0123456789ABCDEF = _mm512_max_ps(vacc4x0123456789ABCDEF, vmin);
105 
106     const __m512 vmax = _mm512_set1_ps(params->scalar.max);
107     vacc0x0123456789ABCDEF = _mm512_min_ps(vacc0x0123456789ABCDEF, vmax);
108     vacc1x0123456789ABCDEF = _mm512_min_ps(vacc1x0123456789ABCDEF, vmax);
109     vacc2x0123456789ABCDEF = _mm512_min_ps(vacc2x0123456789ABCDEF, vmax);
110     vacc3x0123456789ABCDEF = _mm512_min_ps(vacc3x0123456789ABCDEF, vmax);
111     vacc4x0123456789ABCDEF = _mm512_min_ps(vacc4x0123456789ABCDEF, vmax);
112 
113     if XNN_LIKELY(nc >= 16) {
114       _mm512_storeu_ps(c4, vacc4x0123456789ABCDEF);
115       c4 = (float*) ((uintptr_t) c4 + cn_stride);
116       _mm512_storeu_ps(c3, vacc3x0123456789ABCDEF);
117       c3 = (float*) ((uintptr_t) c3 + cn_stride);
118       _mm512_storeu_ps(c2, vacc2x0123456789ABCDEF);
119       c2 = (float*) ((uintptr_t) c2 + cn_stride);
120       _mm512_storeu_ps(c1, vacc1x0123456789ABCDEF);
121       c1 = (float*) ((uintptr_t) c1 + cn_stride);
122       _mm512_storeu_ps(c0, vacc0x0123456789ABCDEF);
123       c0 = (float*) ((uintptr_t) c0 + cn_stride);
124 
125       a4 = (const float*) ((uintptr_t) a4 - kc);
126       a3 = (const float*) ((uintptr_t) a3 - kc);
127       a2 = (const float*) ((uintptr_t) a2 - kc);
128       a1 = (const float*) ((uintptr_t) a1 - kc);
129       a0 = (const float*) ((uintptr_t) a0 - kc);
130 
131       nc -= 16;
132     } else {
133       if (nc & 15) {
134         // Prepare mask for valid 32-bit elements (depends on nc).
135         const __mmask16 vmask = _cvtu32_mask16((uint16_t) ((uint32_t) (UINT32_C(1) << nc) - UINT32_C(1)));
136 
137         _mm512_mask_storeu_ps(c4, vmask, vacc4x0123456789ABCDEF);
138         _mm512_mask_storeu_ps(c3, vmask, vacc3x0123456789ABCDEF);
139         _mm512_mask_storeu_ps(c2, vmask, vacc2x0123456789ABCDEF);
140         _mm512_mask_storeu_ps(c1, vmask, vacc1x0123456789ABCDEF);
141         _mm512_mask_storeu_ps(c0, vmask, vacc0x0123456789ABCDEF);
142       }
143 
144       nc = 0;
145     }
146   } while (nc != 0);
147 }
148