1 // Auto-generated file. Do not edit!
2 // Template: src/f32-gemm/avx512-broadcast.c.in
3 // Generator: tools/xngen
4 //
5 // Copyright 2019 Google LLC
6 //
7 // This source code is licensed under the BSD-style license found in the
8 // LICENSE file in the root directory of this source tree.
9
10 #include <assert.h>
11
12 #include <immintrin.h>
13
14 #include <xnnpack/gemm.h>
15 #include <xnnpack/intrinsics-polyfill.h>
16
17
xnn_f32_gemm_minmax_ukernel_8x16__avx512f_broadcast(size_t mr,size_t nc,size_t kc,const float * restrict a,size_t a_stride,const float * restrict w,float * restrict c,size_t cm_stride,size_t cn_stride,const union xnn_f32_minmax_params params[restrict XNN_MIN_ELEMENTS (1)])18 void xnn_f32_gemm_minmax_ukernel_8x16__avx512f_broadcast(
19 size_t mr,
20 size_t nc,
21 size_t kc,
22 const float*restrict a,
23 size_t a_stride,
24 const float*restrict w,
25 float*restrict c,
26 size_t cm_stride,
27 size_t cn_stride,
28 const union xnn_f32_minmax_params params[restrict XNN_MIN_ELEMENTS(1)])
29 {
30 assert(mr != 0);
31 assert(mr <= 8);
32 assert(nc != 0);
33 assert(kc != 0);
34 assert(kc % sizeof(float) == 0);
35 assert(a != NULL);
36 assert(w != NULL);
37 assert(c != NULL);
38
39 const float* a0 = a;
40 float* c0 = c;
41 const float* a1 = (const float*) ((uintptr_t) a0 + a_stride);
42 float* c1 = (float*) ((uintptr_t) c0 + cm_stride);
43 if XNN_UNPREDICTABLE(mr < 2) {
44 a1 = a0;
45 c1 = c0;
46 }
47 const float* a2 = (const float*) ((uintptr_t) a1 + a_stride);
48 float* c2 = (float*) ((uintptr_t) c1 + cm_stride);
49 if XNN_UNPREDICTABLE(mr <= 2) {
50 a2 = a1;
51 c2 = c1;
52 }
53 const float* a3 = (const float*) ((uintptr_t) a2 + a_stride);
54 float* c3 = (float*) ((uintptr_t) c2 + cm_stride);
55 if XNN_UNPREDICTABLE(mr < 4) {
56 a3 = a2;
57 c3 = c2;
58 }
59 const float* a4 = (const float*) ((uintptr_t) a3 + a_stride);
60 float* c4 = (float*) ((uintptr_t) c3 + cm_stride);
61 if XNN_UNPREDICTABLE(mr <= 4) {
62 a4 = a3;
63 c4 = c3;
64 }
65 const float* a5 = (const float*) ((uintptr_t) a4 + a_stride);
66 float* c5 = (float*) ((uintptr_t) c4 + cm_stride);
67 if XNN_UNPREDICTABLE(mr < 6) {
68 a5 = a4;
69 c5 = c4;
70 }
71 const float* a6 = (const float*) ((uintptr_t) a5 + a_stride);
72 float* c6 = (float*) ((uintptr_t) c5 + cm_stride);
73 if XNN_UNPREDICTABLE(mr <= 6) {
74 a6 = a5;
75 c6 = c5;
76 }
77 const float* a7 = (const float*) ((uintptr_t) a6 + a_stride);
78 float* c7 = (float*) ((uintptr_t) c6 + cm_stride);
79 if XNN_UNPREDICTABLE(mr != 8) {
80 a7 = a6;
81 c7 = c6;
82 }
83
84 do {
85 __m512 vacc0x0123456789ABCDEF = _mm512_load_ps(w);
86 __m512 vacc1x0123456789ABCDEF = vacc0x0123456789ABCDEF;
87 __m512 vacc2x0123456789ABCDEF = vacc0x0123456789ABCDEF;
88 __m512 vacc3x0123456789ABCDEF = vacc0x0123456789ABCDEF;
89 __m512 vacc4x0123456789ABCDEF = vacc0x0123456789ABCDEF;
90 __m512 vacc5x0123456789ABCDEF = vacc0x0123456789ABCDEF;
91 __m512 vacc6x0123456789ABCDEF = vacc0x0123456789ABCDEF;
92 __m512 vacc7x0123456789ABCDEF = vacc0x0123456789ABCDEF;
93 w += 16;
94
95 size_t k = kc;
96 do {
97 const __m512 vb0123456789ABCDEF = _mm512_load_ps(w);
98 w += 16;
99
100 const __m512 va0 = _mm512_set1_ps(*a0);
101 vacc0x0123456789ABCDEF = _mm512_fmadd_ps(va0, vb0123456789ABCDEF, vacc0x0123456789ABCDEF);
102 const __m512 va1 = _mm512_set1_ps(*a1);
103 vacc1x0123456789ABCDEF = _mm512_fmadd_ps(va1, vb0123456789ABCDEF, vacc1x0123456789ABCDEF);
104 const __m512 va2 = _mm512_set1_ps(*a2);
105 vacc2x0123456789ABCDEF = _mm512_fmadd_ps(va2, vb0123456789ABCDEF, vacc2x0123456789ABCDEF);
106 const __m512 va3 = _mm512_set1_ps(*a3);
107 vacc3x0123456789ABCDEF = _mm512_fmadd_ps(va3, vb0123456789ABCDEF, vacc3x0123456789ABCDEF);
108 const __m512 va4 = _mm512_set1_ps(*a4);
109 vacc4x0123456789ABCDEF = _mm512_fmadd_ps(va4, vb0123456789ABCDEF, vacc4x0123456789ABCDEF);
110 const __m512 va5 = _mm512_set1_ps(*a5);
111 vacc5x0123456789ABCDEF = _mm512_fmadd_ps(va5, vb0123456789ABCDEF, vacc5x0123456789ABCDEF);
112 const __m512 va6 = _mm512_set1_ps(*a6);
113 vacc6x0123456789ABCDEF = _mm512_fmadd_ps(va6, vb0123456789ABCDEF, vacc6x0123456789ABCDEF);
114 const __m512 va7 = _mm512_set1_ps(*a7);
115 vacc7x0123456789ABCDEF = _mm512_fmadd_ps(va7, vb0123456789ABCDEF, vacc7x0123456789ABCDEF);
116
117 a0 += 1;
118 a1 += 1;
119 a2 += 1;
120 a3 += 1;
121 a4 += 1;
122 a5 += 1;
123 a6 += 1;
124 a7 += 1;
125
126 k -= sizeof(float);
127 } while (k != 0);
128
129 const __m512 vmin = _mm512_set1_ps(params->scalar.min);
130 vacc0x0123456789ABCDEF = _mm512_max_ps(vacc0x0123456789ABCDEF, vmin);
131 vacc1x0123456789ABCDEF = _mm512_max_ps(vacc1x0123456789ABCDEF, vmin);
132 vacc2x0123456789ABCDEF = _mm512_max_ps(vacc2x0123456789ABCDEF, vmin);
133 vacc3x0123456789ABCDEF = _mm512_max_ps(vacc3x0123456789ABCDEF, vmin);
134 vacc4x0123456789ABCDEF = _mm512_max_ps(vacc4x0123456789ABCDEF, vmin);
135 vacc5x0123456789ABCDEF = _mm512_max_ps(vacc5x0123456789ABCDEF, vmin);
136 vacc6x0123456789ABCDEF = _mm512_max_ps(vacc6x0123456789ABCDEF, vmin);
137 vacc7x0123456789ABCDEF = _mm512_max_ps(vacc7x0123456789ABCDEF, vmin);
138
139 const __m512 vmax = _mm512_set1_ps(params->scalar.max);
140 vacc0x0123456789ABCDEF = _mm512_min_ps(vacc0x0123456789ABCDEF, vmax);
141 vacc1x0123456789ABCDEF = _mm512_min_ps(vacc1x0123456789ABCDEF, vmax);
142 vacc2x0123456789ABCDEF = _mm512_min_ps(vacc2x0123456789ABCDEF, vmax);
143 vacc3x0123456789ABCDEF = _mm512_min_ps(vacc3x0123456789ABCDEF, vmax);
144 vacc4x0123456789ABCDEF = _mm512_min_ps(vacc4x0123456789ABCDEF, vmax);
145 vacc5x0123456789ABCDEF = _mm512_min_ps(vacc5x0123456789ABCDEF, vmax);
146 vacc6x0123456789ABCDEF = _mm512_min_ps(vacc6x0123456789ABCDEF, vmax);
147 vacc7x0123456789ABCDEF = _mm512_min_ps(vacc7x0123456789ABCDEF, vmax);
148
149 if XNN_LIKELY(nc >= 16) {
150 _mm512_storeu_ps(c7, vacc7x0123456789ABCDEF);
151 c7 = (float*) ((uintptr_t) c7 + cn_stride);
152 _mm512_storeu_ps(c6, vacc6x0123456789ABCDEF);
153 c6 = (float*) ((uintptr_t) c6 + cn_stride);
154 _mm512_storeu_ps(c5, vacc5x0123456789ABCDEF);
155 c5 = (float*) ((uintptr_t) c5 + cn_stride);
156 _mm512_storeu_ps(c4, vacc4x0123456789ABCDEF);
157 c4 = (float*) ((uintptr_t) c4 + cn_stride);
158 _mm512_storeu_ps(c3, vacc3x0123456789ABCDEF);
159 c3 = (float*) ((uintptr_t) c3 + cn_stride);
160 _mm512_storeu_ps(c2, vacc2x0123456789ABCDEF);
161 c2 = (float*) ((uintptr_t) c2 + cn_stride);
162 _mm512_storeu_ps(c1, vacc1x0123456789ABCDEF);
163 c1 = (float*) ((uintptr_t) c1 + cn_stride);
164 _mm512_storeu_ps(c0, vacc0x0123456789ABCDEF);
165 c0 = (float*) ((uintptr_t) c0 + cn_stride);
166
167 a7 = (const float*) ((uintptr_t) a7 - kc);
168 a6 = (const float*) ((uintptr_t) a6 - kc);
169 a5 = (const float*) ((uintptr_t) a5 - kc);
170 a4 = (const float*) ((uintptr_t) a4 - kc);
171 a3 = (const float*) ((uintptr_t) a3 - kc);
172 a2 = (const float*) ((uintptr_t) a2 - kc);
173 a1 = (const float*) ((uintptr_t) a1 - kc);
174 a0 = (const float*) ((uintptr_t) a0 - kc);
175
176 nc -= 16;
177 } else {
178 if (nc & 15) {
179 // Prepare mask for valid 32-bit elements (depends on nc).
180 const __mmask16 vmask = _cvtu32_mask16((uint16_t) ((uint32_t) (UINT32_C(1) << nc) - UINT32_C(1)));
181
182 _mm512_mask_storeu_ps(c7, vmask, vacc7x0123456789ABCDEF);
183 _mm512_mask_storeu_ps(c6, vmask, vacc6x0123456789ABCDEF);
184 _mm512_mask_storeu_ps(c5, vmask, vacc5x0123456789ABCDEF);
185 _mm512_mask_storeu_ps(c4, vmask, vacc4x0123456789ABCDEF);
186 _mm512_mask_storeu_ps(c3, vmask, vacc3x0123456789ABCDEF);
187 _mm512_mask_storeu_ps(c2, vmask, vacc2x0123456789ABCDEF);
188 _mm512_mask_storeu_ps(c1, vmask, vacc1x0123456789ABCDEF);
189 _mm512_mask_storeu_ps(c0, vmask, vacc0x0123456789ABCDEF);
190 }
191
192 nc = 0;
193 }
194 } while (nc != 0);
195 }
196