1 // Auto-generated file. Do not edit!
2 // Template: src/f32-igemm/avx512-broadcast.c.in
3 // Generator: tools/xngen
4 //
5 // Copyright 2019 Google LLC
6 //
7 // This source code is licensed under the BSD-style license found in the
8 // LICENSE file in the root directory of this source tree.
9
10 #include <assert.h>
11
12 #include <immintrin.h>
13
14 #include <xnnpack/igemm.h>
15 #include <xnnpack/intrinsics-polyfill.h>
16
17
xnn_f32_igemm_minmax_ukernel_7x16__avx512f_broadcast(size_t mr,size_t nc,size_t kc,size_t ks,const float ** restrict a,const float * restrict w,float * restrict c,size_t cm_stride,size_t cn_stride,size_t a_offset,const float * zero,const union xnn_f32_minmax_params params[restrict XNN_MIN_ELEMENTS (1)])18 void xnn_f32_igemm_minmax_ukernel_7x16__avx512f_broadcast(
19 size_t mr,
20 size_t nc,
21 size_t kc,
22 size_t ks,
23 const float**restrict a,
24 const float*restrict w,
25 float*restrict c,
26 size_t cm_stride,
27 size_t cn_stride,
28 size_t a_offset,
29 const float* zero,
30 const union xnn_f32_minmax_params params[restrict XNN_MIN_ELEMENTS(1)])
31 {
32 assert(mr != 0);
33 assert(mr <= 7);
34 assert(nc != 0);
35 assert(kc != 0);
36 assert(kc % sizeof(float) == 0);
37 assert(ks != 0);
38 assert(ks % (7 * sizeof(void*)) == 0);
39 assert(a_offset % sizeof(float) == 0);
40 assert(a != NULL);
41 assert(w != NULL);
42 assert(c != NULL);
43
44 float* c0 = c;
45 float* c1 = (float*) ((uintptr_t) c0 + cm_stride);
46 if XNN_UNPREDICTABLE(mr < 2) {
47 c1 = c0;
48 }
49 float* c2 = (float*) ((uintptr_t) c1 + cm_stride);
50 if XNN_UNPREDICTABLE(mr <= 2) {
51 c2 = c1;
52 }
53 float* c3 = (float*) ((uintptr_t) c2 + cm_stride);
54 if XNN_UNPREDICTABLE(mr < 4) {
55 c3 = c2;
56 }
57 float* c4 = (float*) ((uintptr_t) c3 + cm_stride);
58 if XNN_UNPREDICTABLE(mr <= 4) {
59 c4 = c3;
60 }
61 float* c5 = (float*) ((uintptr_t) c4 + cm_stride);
62 if XNN_UNPREDICTABLE(mr < 6) {
63 c5 = c4;
64 }
65 float* c6 = (float*) ((uintptr_t) c5 + cm_stride);
66 if XNN_UNPREDICTABLE(mr <= 6) {
67 c6 = c5;
68 }
69
70 do {
71 __m512 vacc0x0123456789ABCDEF = _mm512_load_ps(w);
72 __m512 vacc1x0123456789ABCDEF = vacc0x0123456789ABCDEF;
73 __m512 vacc2x0123456789ABCDEF = vacc0x0123456789ABCDEF;
74 __m512 vacc3x0123456789ABCDEF = vacc0x0123456789ABCDEF;
75 __m512 vacc4x0123456789ABCDEF = vacc0x0123456789ABCDEF;
76 __m512 vacc5x0123456789ABCDEF = vacc0x0123456789ABCDEF;
77 __m512 vacc6x0123456789ABCDEF = vacc0x0123456789ABCDEF;
78 w += 16;
79
80 size_t p = ks;
81 do {
82 const float* restrict a0 = a[0];
83 assert(a0 != NULL);
84 if XNN_UNPREDICTABLE(a0 != zero) {
85 a0 = (const float*) ((uintptr_t) a0 + a_offset);
86 }
87 const float* restrict a1 = a[1];
88 assert(a1 != NULL);
89 if XNN_UNPREDICTABLE(a1 != zero) {
90 a1 = (const float*) ((uintptr_t) a1 + a_offset);
91 }
92 const float* restrict a2 = a[2];
93 assert(a2 != NULL);
94 if XNN_UNPREDICTABLE(a2 != zero) {
95 a2 = (const float*) ((uintptr_t) a2 + a_offset);
96 }
97 const float* restrict a3 = a[3];
98 assert(a3 != NULL);
99 if XNN_UNPREDICTABLE(a3 != zero) {
100 a3 = (const float*) ((uintptr_t) a3 + a_offset);
101 }
102 const float* restrict a4 = a[4];
103 assert(a4 != NULL);
104 if XNN_UNPREDICTABLE(a4 != zero) {
105 a4 = (const float*) ((uintptr_t) a4 + a_offset);
106 }
107 const float* restrict a5 = a[5];
108 assert(a5 != NULL);
109 if XNN_UNPREDICTABLE(a5 != zero) {
110 a5 = (const float*) ((uintptr_t) a5 + a_offset);
111 }
112 const float* restrict a6 = a[6];
113 assert(a6 != NULL);
114 if XNN_UNPREDICTABLE(a6 != zero) {
115 a6 = (const float*) ((uintptr_t) a6 + a_offset);
116 }
117 a += 7;
118
119 size_t k = kc;
120 do {
121 const __m512 vb0123456789ABCDEF = _mm512_load_ps(w);
122 w += 16;
123
124 const __m512 va0 = _mm512_set1_ps(*a0);
125 vacc0x0123456789ABCDEF = _mm512_fmadd_ps(va0, vb0123456789ABCDEF, vacc0x0123456789ABCDEF);
126 const __m512 va1 = _mm512_set1_ps(*a1);
127 vacc1x0123456789ABCDEF = _mm512_fmadd_ps(va1, vb0123456789ABCDEF, vacc1x0123456789ABCDEF);
128 const __m512 va2 = _mm512_set1_ps(*a2);
129 vacc2x0123456789ABCDEF = _mm512_fmadd_ps(va2, vb0123456789ABCDEF, vacc2x0123456789ABCDEF);
130 const __m512 va3 = _mm512_set1_ps(*a3);
131 vacc3x0123456789ABCDEF = _mm512_fmadd_ps(va3, vb0123456789ABCDEF, vacc3x0123456789ABCDEF);
132 const __m512 va4 = _mm512_set1_ps(*a4);
133 vacc4x0123456789ABCDEF = _mm512_fmadd_ps(va4, vb0123456789ABCDEF, vacc4x0123456789ABCDEF);
134 const __m512 va5 = _mm512_set1_ps(*a5);
135 vacc5x0123456789ABCDEF = _mm512_fmadd_ps(va5, vb0123456789ABCDEF, vacc5x0123456789ABCDEF);
136 const __m512 va6 = _mm512_set1_ps(*a6);
137 vacc6x0123456789ABCDEF = _mm512_fmadd_ps(va6, vb0123456789ABCDEF, vacc6x0123456789ABCDEF);
138
139 a0 += 1;
140 a1 += 1;
141 a2 += 1;
142 a3 += 1;
143 a4 += 1;
144 a5 += 1;
145 a6 += 1;
146
147 k -= sizeof(float);
148 } while (k != 0);
149 p -= 7 * sizeof(void*);
150 } while (p != 0);
151
152 const __m512 vmin = _mm512_set1_ps(params->scalar.min);
153 vacc0x0123456789ABCDEF = _mm512_max_ps(vacc0x0123456789ABCDEF, vmin);
154 vacc1x0123456789ABCDEF = _mm512_max_ps(vacc1x0123456789ABCDEF, vmin);
155 vacc2x0123456789ABCDEF = _mm512_max_ps(vacc2x0123456789ABCDEF, vmin);
156 vacc3x0123456789ABCDEF = _mm512_max_ps(vacc3x0123456789ABCDEF, vmin);
157 vacc4x0123456789ABCDEF = _mm512_max_ps(vacc4x0123456789ABCDEF, vmin);
158 vacc5x0123456789ABCDEF = _mm512_max_ps(vacc5x0123456789ABCDEF, vmin);
159 vacc6x0123456789ABCDEF = _mm512_max_ps(vacc6x0123456789ABCDEF, vmin);
160
161 const __m512 vmax = _mm512_set1_ps(params->scalar.max);
162 vacc0x0123456789ABCDEF = _mm512_min_ps(vacc0x0123456789ABCDEF, vmax);
163 vacc1x0123456789ABCDEF = _mm512_min_ps(vacc1x0123456789ABCDEF, vmax);
164 vacc2x0123456789ABCDEF = _mm512_min_ps(vacc2x0123456789ABCDEF, vmax);
165 vacc3x0123456789ABCDEF = _mm512_min_ps(vacc3x0123456789ABCDEF, vmax);
166 vacc4x0123456789ABCDEF = _mm512_min_ps(vacc4x0123456789ABCDEF, vmax);
167 vacc5x0123456789ABCDEF = _mm512_min_ps(vacc5x0123456789ABCDEF, vmax);
168 vacc6x0123456789ABCDEF = _mm512_min_ps(vacc6x0123456789ABCDEF, vmax);
169
170 if XNN_LIKELY(nc >= 16) {
171 _mm512_storeu_ps(c6, vacc6x0123456789ABCDEF);
172 c6 = (float*) ((uintptr_t) c6 + cn_stride);
173 _mm512_storeu_ps(c5, vacc5x0123456789ABCDEF);
174 c5 = (float*) ((uintptr_t) c5 + cn_stride);
175 _mm512_storeu_ps(c4, vacc4x0123456789ABCDEF);
176 c4 = (float*) ((uintptr_t) c4 + cn_stride);
177 _mm512_storeu_ps(c3, vacc3x0123456789ABCDEF);
178 c3 = (float*) ((uintptr_t) c3 + cn_stride);
179 _mm512_storeu_ps(c2, vacc2x0123456789ABCDEF);
180 c2 = (float*) ((uintptr_t) c2 + cn_stride);
181 _mm512_storeu_ps(c1, vacc1x0123456789ABCDEF);
182 c1 = (float*) ((uintptr_t) c1 + cn_stride);
183 _mm512_storeu_ps(c0, vacc0x0123456789ABCDEF);
184 c0 = (float*) ((uintptr_t) c0 + cn_stride);
185
186 a = (const float**restrict) ((uintptr_t) a - ks);
187 nc -= 16;
188 } else {
189 if (nc & 15) {
190 // Prepare mask for valid 32-bit elements (depends on nc).
191 const __mmask16 vmask = _cvtu32_mask16((uint16_t) ((uint32_t) (UINT32_C(1) << nc) - UINT32_C(1)));
192
193 _mm512_mask_storeu_ps(c6, vmask, vacc6x0123456789ABCDEF);
194 _mm512_mask_storeu_ps(c5, vmask, vacc5x0123456789ABCDEF);
195 _mm512_mask_storeu_ps(c4, vmask, vacc4x0123456789ABCDEF);
196 _mm512_mask_storeu_ps(c3, vmask, vacc3x0123456789ABCDEF);
197 _mm512_mask_storeu_ps(c2, vmask, vacc2x0123456789ABCDEF);
198 _mm512_mask_storeu_ps(c1, vmask, vacc1x0123456789ABCDEF);
199 _mm512_mask_storeu_ps(c0, vmask, vacc0x0123456789ABCDEF);
200 }
201
202 nc = 0;
203 }
204 } while (nc != 0);
205 }
206