1 // Auto-generated file. Do not edit!
2 // Template: src/f32-igemm/avx-shuffle4.c.in
3 // Generator: tools/xngen
4 //
5 // Copyright 2019 Google LLC
6 //
7 // This source code is licensed under the BSD-style license found in the
8 // LICENSE file in the root directory of this source tree.
9
10 #include <assert.h>
11
12 #include <immintrin.h>
13
14 #include <xnnpack/igemm.h>
15
16
xnn_f32_igemm_minmax_ukernel_5x16s4__fma3_broadcast(size_t mr,size_t nc,size_t kc,size_t ks,const float ** restrict a,const float * restrict w,float * restrict c,size_t cm_stride,size_t cn_stride,size_t a_offset,const float * zero,const union xnn_f32_minmax_params params[restrict XNN_MIN_ELEMENTS (1)])17 void xnn_f32_igemm_minmax_ukernel_5x16s4__fma3_broadcast(
18 size_t mr,
19 size_t nc,
20 size_t kc,
21 size_t ks,
22 const float**restrict a,
23 const float*restrict w,
24 float*restrict c,
25 size_t cm_stride,
26 size_t cn_stride,
27 size_t a_offset,
28 const float* zero,
29 const union xnn_f32_minmax_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
30 {
31 assert(mr != 0);
32 assert(mr <= 5);
33 assert(nc != 0);
34 assert(kc != 0);
35 assert(kc % sizeof(float) == 0);
36 assert(ks != 0);
37 assert(ks % (5 * sizeof(void*)) == 0);
38 assert(a_offset % sizeof(float) == 0);
39 assert(a != NULL);
40 assert(w != NULL);
41 assert(c != NULL);
42
43 float* c0 = c;
44 float* c1 = (float*) ((uintptr_t) c0 + cm_stride);
45 if XNN_UNPREDICTABLE(mr < 2) {
46 c1 = c0;
47 }
48 float* c2 = (float*) ((uintptr_t) c1 + cm_stride);
49 if XNN_UNPREDICTABLE(mr <= 2) {
50 c2 = c1;
51 }
52 float* c3 = (float*) ((uintptr_t) c2 + cm_stride);
53 if XNN_UNPREDICTABLE(mr < 4) {
54 c3 = c2;
55 }
56 float* c4 = (float*) ((uintptr_t) c3 + cm_stride);
57 if XNN_UNPREDICTABLE(mr <= 4) {
58 c4 = c3;
59 }
60
61 do {
62 __m256 vacc0x01234567 = _mm256_load_ps(w);
63 __m256 vacc0x89ABCDEF = _mm256_load_ps(w + 8);
64 __m256 vacc1x01234567 = vacc0x01234567;
65 __m256 vacc1x89ABCDEF = vacc0x89ABCDEF;
66 __m256 vacc2x01234567 = vacc0x01234567;
67 __m256 vacc2x89ABCDEF = vacc0x89ABCDEF;
68 __m256 vacc3x01234567 = vacc0x01234567;
69 __m256 vacc3x89ABCDEF = vacc0x89ABCDEF;
70 __m256 vacc4x01234567 = vacc0x01234567;
71 __m256 vacc4x89ABCDEF = vacc0x89ABCDEF;
72 w += 16;
73
74 size_t p = ks;
75 do {
76 const float* restrict a0 = a[0];
77 assert(a0 != NULL);
78 if XNN_UNPREDICTABLE(a0 != zero) {
79 a0 = (const float*) ((uintptr_t) a0 + a_offset);
80 }
81 const float* restrict a1 = a[1];
82 assert(a1 != NULL);
83 if XNN_UNPREDICTABLE(a1 != zero) {
84 a1 = (const float*) ((uintptr_t) a1 + a_offset);
85 }
86 const float* restrict a2 = a[2];
87 assert(a2 != NULL);
88 if XNN_UNPREDICTABLE(a2 != zero) {
89 a2 = (const float*) ((uintptr_t) a2 + a_offset);
90 }
91 const float* restrict a3 = a[3];
92 assert(a3 != NULL);
93 if XNN_UNPREDICTABLE(a3 != zero) {
94 a3 = (const float*) ((uintptr_t) a3 + a_offset);
95 }
96 const float* restrict a4 = a[4];
97 assert(a4 != NULL);
98 if XNN_UNPREDICTABLE(a4 != zero) {
99 a4 = (const float*) ((uintptr_t) a4 + a_offset);
100 }
101 a += 5;
102
103 size_t k = kc;
104 while (k >= 4 * sizeof(float)) {
105 __m256 va0 = _mm256_broadcast_ps((const __m128*) a0);
106 a0 += 4;
107 __m256 va1 = _mm256_broadcast_ps((const __m128*) a1);
108 a1 += 4;
109 __m256 va2 = _mm256_broadcast_ps((const __m128*) a2);
110 a2 += 4;
111 __m256 va3 = _mm256_broadcast_ps((const __m128*) a3);
112 a3 += 4;
113 __m256 va4 = _mm256_broadcast_ps((const __m128*) a4);
114 a4 += 4;
115
116
117 const __m256 vb01234567c0 = _mm256_load_ps(w + 0);
118 const __m256 vb89ABCDEFc0 = _mm256_load_ps(w + 8);
119
120 vacc0x01234567 = _mm256_fmadd_ps(va0, vb01234567c0, vacc0x01234567);
121 vacc1x01234567 = _mm256_fmadd_ps(va1, vb01234567c0, vacc1x01234567);
122 vacc2x01234567 = _mm256_fmadd_ps(va2, vb01234567c0, vacc2x01234567);
123 vacc3x01234567 = _mm256_fmadd_ps(va3, vb01234567c0, vacc3x01234567);
124 vacc4x01234567 = _mm256_fmadd_ps(va4, vb01234567c0, vacc4x01234567);
125 vacc0x89ABCDEF = _mm256_fmadd_ps(va0, vb89ABCDEFc0, vacc0x89ABCDEF);
126 vacc1x89ABCDEF = _mm256_fmadd_ps(va1, vb89ABCDEFc0, vacc1x89ABCDEF);
127 vacc2x89ABCDEF = _mm256_fmadd_ps(va2, vb89ABCDEFc0, vacc2x89ABCDEF);
128 vacc3x89ABCDEF = _mm256_fmadd_ps(va3, vb89ABCDEFc0, vacc3x89ABCDEF);
129 vacc4x89ABCDEF = _mm256_fmadd_ps(va4, vb89ABCDEFc0, vacc4x89ABCDEF);
130
131 va0 = _mm256_permute_ps(va0, _MM_SHUFFLE(0, 3, 2, 1));
132 va1 = _mm256_permute_ps(va1, _MM_SHUFFLE(0, 3, 2, 1));
133 va2 = _mm256_permute_ps(va2, _MM_SHUFFLE(0, 3, 2, 1));
134 va3 = _mm256_permute_ps(va3, _MM_SHUFFLE(0, 3, 2, 1));
135 va4 = _mm256_permute_ps(va4, _MM_SHUFFLE(0, 3, 2, 1));
136
137 const __m256 vb01234567c1 = _mm256_load_ps(w + 16);
138 const __m256 vb89ABCDEFc1 = _mm256_load_ps(w + 24);
139
140 vacc0x01234567 = _mm256_fmadd_ps(va0, vb01234567c1, vacc0x01234567);
141 vacc1x01234567 = _mm256_fmadd_ps(va1, vb01234567c1, vacc1x01234567);
142 vacc2x01234567 = _mm256_fmadd_ps(va2, vb01234567c1, vacc2x01234567);
143 vacc3x01234567 = _mm256_fmadd_ps(va3, vb01234567c1, vacc3x01234567);
144 vacc4x01234567 = _mm256_fmadd_ps(va4, vb01234567c1, vacc4x01234567);
145 vacc0x89ABCDEF = _mm256_fmadd_ps(va0, vb89ABCDEFc1, vacc0x89ABCDEF);
146 vacc1x89ABCDEF = _mm256_fmadd_ps(va1, vb89ABCDEFc1, vacc1x89ABCDEF);
147 vacc2x89ABCDEF = _mm256_fmadd_ps(va2, vb89ABCDEFc1, vacc2x89ABCDEF);
148 vacc3x89ABCDEF = _mm256_fmadd_ps(va3, vb89ABCDEFc1, vacc3x89ABCDEF);
149 vacc4x89ABCDEF = _mm256_fmadd_ps(va4, vb89ABCDEFc1, vacc4x89ABCDEF);
150
151 va0 = _mm256_permute_ps(va0, _MM_SHUFFLE(0, 3, 2, 1));
152 va1 = _mm256_permute_ps(va1, _MM_SHUFFLE(0, 3, 2, 1));
153 va2 = _mm256_permute_ps(va2, _MM_SHUFFLE(0, 3, 2, 1));
154 va3 = _mm256_permute_ps(va3, _MM_SHUFFLE(0, 3, 2, 1));
155 va4 = _mm256_permute_ps(va4, _MM_SHUFFLE(0, 3, 2, 1));
156
157 const __m256 vb01234567c2 = _mm256_load_ps(w + 32);
158 const __m256 vb89ABCDEFc2 = _mm256_load_ps(w + 40);
159
160 vacc0x01234567 = _mm256_fmadd_ps(va0, vb01234567c2, vacc0x01234567);
161 vacc1x01234567 = _mm256_fmadd_ps(va1, vb01234567c2, vacc1x01234567);
162 vacc2x01234567 = _mm256_fmadd_ps(va2, vb01234567c2, vacc2x01234567);
163 vacc3x01234567 = _mm256_fmadd_ps(va3, vb01234567c2, vacc3x01234567);
164 vacc4x01234567 = _mm256_fmadd_ps(va4, vb01234567c2, vacc4x01234567);
165 vacc0x89ABCDEF = _mm256_fmadd_ps(va0, vb89ABCDEFc2, vacc0x89ABCDEF);
166 vacc1x89ABCDEF = _mm256_fmadd_ps(va1, vb89ABCDEFc2, vacc1x89ABCDEF);
167 vacc2x89ABCDEF = _mm256_fmadd_ps(va2, vb89ABCDEFc2, vacc2x89ABCDEF);
168 vacc3x89ABCDEF = _mm256_fmadd_ps(va3, vb89ABCDEFc2, vacc3x89ABCDEF);
169 vacc4x89ABCDEF = _mm256_fmadd_ps(va4, vb89ABCDEFc2, vacc4x89ABCDEF);
170
171 va0 = _mm256_permute_ps(va0, _MM_SHUFFLE(0, 3, 2, 1));
172 va1 = _mm256_permute_ps(va1, _MM_SHUFFLE(0, 3, 2, 1));
173 va2 = _mm256_permute_ps(va2, _MM_SHUFFLE(0, 3, 2, 1));
174 va3 = _mm256_permute_ps(va3, _MM_SHUFFLE(0, 3, 2, 1));
175 va4 = _mm256_permute_ps(va4, _MM_SHUFFLE(0, 3, 2, 1));
176
177 const __m256 vb01234567c3 = _mm256_load_ps(w + 48);
178 const __m256 vb89ABCDEFc3 = _mm256_load_ps(w + 56);
179
180 vacc0x01234567 = _mm256_fmadd_ps(va0, vb01234567c3, vacc0x01234567);
181 vacc1x01234567 = _mm256_fmadd_ps(va1, vb01234567c3, vacc1x01234567);
182 vacc2x01234567 = _mm256_fmadd_ps(va2, vb01234567c3, vacc2x01234567);
183 vacc3x01234567 = _mm256_fmadd_ps(va3, vb01234567c3, vacc3x01234567);
184 vacc4x01234567 = _mm256_fmadd_ps(va4, vb01234567c3, vacc4x01234567);
185 vacc0x89ABCDEF = _mm256_fmadd_ps(va0, vb89ABCDEFc3, vacc0x89ABCDEF);
186 vacc1x89ABCDEF = _mm256_fmadd_ps(va1, vb89ABCDEFc3, vacc1x89ABCDEF);
187 vacc2x89ABCDEF = _mm256_fmadd_ps(va2, vb89ABCDEFc3, vacc2x89ABCDEF);
188 vacc3x89ABCDEF = _mm256_fmadd_ps(va3, vb89ABCDEFc3, vacc3x89ABCDEF);
189 vacc4x89ABCDEF = _mm256_fmadd_ps(va4, vb89ABCDEFc3, vacc4x89ABCDEF);
190
191
192 w += 64;
193 k -= 4 * sizeof(float);
194 }
195 if XNN_UNLIKELY(k != 0) {
196 __m256 va0 = _mm256_broadcast_ps((const __m128*) a0);
197 a0 = (const float*) ((uintptr_t) a0 + k);
198 __m256 va1 = _mm256_broadcast_ps((const __m128*) a1);
199 a1 = (const float*) ((uintptr_t) a1 + k);
200 __m256 va2 = _mm256_broadcast_ps((const __m128*) a2);
201 a2 = (const float*) ((uintptr_t) a2 + k);
202 __m256 va3 = _mm256_broadcast_ps((const __m128*) a3);
203 a3 = (const float*) ((uintptr_t) a3 + k);
204 __m256 va4 = _mm256_broadcast_ps((const __m128*) a4);
205 a4 = (const float*) ((uintptr_t) a4 + k);
206
207 const __m256 vzero = _mm256_setzero_ps();
208
209 const __m256 vb01234567c0 = _mm256_load_ps(w + 0);
210 const __m256 vb89ABCDEFc0 = _mm256_load_ps(w + 8);
211
212 vacc0x01234567 = _mm256_fmadd_ps(_mm256_and_ps(va0, _mm256_cmp_ps(vb01234567c0, vzero, _CMP_NEQ_OQ)), vb01234567c0, vacc0x01234567);
213 vacc1x01234567 = _mm256_fmadd_ps(_mm256_and_ps(va1, _mm256_cmp_ps(vb01234567c0, vzero, _CMP_NEQ_OQ)), vb01234567c0, vacc1x01234567);
214 vacc2x01234567 = _mm256_fmadd_ps(_mm256_and_ps(va2, _mm256_cmp_ps(vb01234567c0, vzero, _CMP_NEQ_OQ)), vb01234567c0, vacc2x01234567);
215 vacc3x01234567 = _mm256_fmadd_ps(_mm256_and_ps(va3, _mm256_cmp_ps(vb01234567c0, vzero, _CMP_NEQ_OQ)), vb01234567c0, vacc3x01234567);
216 vacc4x01234567 = _mm256_fmadd_ps(_mm256_and_ps(va4, _mm256_cmp_ps(vb01234567c0, vzero, _CMP_NEQ_OQ)), vb01234567c0, vacc4x01234567);
217 vacc0x89ABCDEF = _mm256_fmadd_ps(_mm256_and_ps(va0, _mm256_cmp_ps(vb89ABCDEFc0, vzero, _CMP_NEQ_OQ)), vb89ABCDEFc0, vacc0x89ABCDEF);
218 vacc1x89ABCDEF = _mm256_fmadd_ps(_mm256_and_ps(va1, _mm256_cmp_ps(vb89ABCDEFc0, vzero, _CMP_NEQ_OQ)), vb89ABCDEFc0, vacc1x89ABCDEF);
219 vacc2x89ABCDEF = _mm256_fmadd_ps(_mm256_and_ps(va2, _mm256_cmp_ps(vb89ABCDEFc0, vzero, _CMP_NEQ_OQ)), vb89ABCDEFc0, vacc2x89ABCDEF);
220 vacc3x89ABCDEF = _mm256_fmadd_ps(_mm256_and_ps(va3, _mm256_cmp_ps(vb89ABCDEFc0, vzero, _CMP_NEQ_OQ)), vb89ABCDEFc0, vacc3x89ABCDEF);
221 vacc4x89ABCDEF = _mm256_fmadd_ps(_mm256_and_ps(va4, _mm256_cmp_ps(vb89ABCDEFc0, vzero, _CMP_NEQ_OQ)), vb89ABCDEFc0, vacc4x89ABCDEF);
222
223 va0 = _mm256_permute_ps(va0, _MM_SHUFFLE(0, 3, 2, 1));
224 va1 = _mm256_permute_ps(va1, _MM_SHUFFLE(0, 3, 2, 1));
225 va2 = _mm256_permute_ps(va2, _MM_SHUFFLE(0, 3, 2, 1));
226 va3 = _mm256_permute_ps(va3, _MM_SHUFFLE(0, 3, 2, 1));
227 va4 = _mm256_permute_ps(va4, _MM_SHUFFLE(0, 3, 2, 1));
228
229 const __m256 vb01234567c1 = _mm256_load_ps(w + 16);
230 const __m256 vb89ABCDEFc1 = _mm256_load_ps(w + 24);
231
232 vacc0x01234567 = _mm256_fmadd_ps(_mm256_and_ps(va0, _mm256_cmp_ps(vb01234567c1, vzero, _CMP_NEQ_OQ)), vb01234567c1, vacc0x01234567);
233 vacc1x01234567 = _mm256_fmadd_ps(_mm256_and_ps(va1, _mm256_cmp_ps(vb01234567c1, vzero, _CMP_NEQ_OQ)), vb01234567c1, vacc1x01234567);
234 vacc2x01234567 = _mm256_fmadd_ps(_mm256_and_ps(va2, _mm256_cmp_ps(vb01234567c1, vzero, _CMP_NEQ_OQ)), vb01234567c1, vacc2x01234567);
235 vacc3x01234567 = _mm256_fmadd_ps(_mm256_and_ps(va3, _mm256_cmp_ps(vb01234567c1, vzero, _CMP_NEQ_OQ)), vb01234567c1, vacc3x01234567);
236 vacc4x01234567 = _mm256_fmadd_ps(_mm256_and_ps(va4, _mm256_cmp_ps(vb01234567c1, vzero, _CMP_NEQ_OQ)), vb01234567c1, vacc4x01234567);
237 vacc0x89ABCDEF = _mm256_fmadd_ps(_mm256_and_ps(va0, _mm256_cmp_ps(vb89ABCDEFc1, vzero, _CMP_NEQ_OQ)), vb89ABCDEFc1, vacc0x89ABCDEF);
238 vacc1x89ABCDEF = _mm256_fmadd_ps(_mm256_and_ps(va1, _mm256_cmp_ps(vb89ABCDEFc1, vzero, _CMP_NEQ_OQ)), vb89ABCDEFc1, vacc1x89ABCDEF);
239 vacc2x89ABCDEF = _mm256_fmadd_ps(_mm256_and_ps(va2, _mm256_cmp_ps(vb89ABCDEFc1, vzero, _CMP_NEQ_OQ)), vb89ABCDEFc1, vacc2x89ABCDEF);
240 vacc3x89ABCDEF = _mm256_fmadd_ps(_mm256_and_ps(va3, _mm256_cmp_ps(vb89ABCDEFc1, vzero, _CMP_NEQ_OQ)), vb89ABCDEFc1, vacc3x89ABCDEF);
241 vacc4x89ABCDEF = _mm256_fmadd_ps(_mm256_and_ps(va4, _mm256_cmp_ps(vb89ABCDEFc1, vzero, _CMP_NEQ_OQ)), vb89ABCDEFc1, vacc4x89ABCDEF);
242
243 va0 = _mm256_permute_ps(va0, _MM_SHUFFLE(0, 3, 2, 1));
244 va1 = _mm256_permute_ps(va1, _MM_SHUFFLE(0, 3, 2, 1));
245 va2 = _mm256_permute_ps(va2, _MM_SHUFFLE(0, 3, 2, 1));
246 va3 = _mm256_permute_ps(va3, _MM_SHUFFLE(0, 3, 2, 1));
247 va4 = _mm256_permute_ps(va4, _MM_SHUFFLE(0, 3, 2, 1));
248
249 const __m256 vb01234567c2 = _mm256_load_ps(w + 32);
250 const __m256 vb89ABCDEFc2 = _mm256_load_ps(w + 40);
251
252 vacc0x01234567 = _mm256_fmadd_ps(_mm256_and_ps(va0, _mm256_cmp_ps(vb01234567c2, vzero, _CMP_NEQ_OQ)), vb01234567c2, vacc0x01234567);
253 vacc1x01234567 = _mm256_fmadd_ps(_mm256_and_ps(va1, _mm256_cmp_ps(vb01234567c2, vzero, _CMP_NEQ_OQ)), vb01234567c2, vacc1x01234567);
254 vacc2x01234567 = _mm256_fmadd_ps(_mm256_and_ps(va2, _mm256_cmp_ps(vb01234567c2, vzero, _CMP_NEQ_OQ)), vb01234567c2, vacc2x01234567);
255 vacc3x01234567 = _mm256_fmadd_ps(_mm256_and_ps(va3, _mm256_cmp_ps(vb01234567c2, vzero, _CMP_NEQ_OQ)), vb01234567c2, vacc3x01234567);
256 vacc4x01234567 = _mm256_fmadd_ps(_mm256_and_ps(va4, _mm256_cmp_ps(vb01234567c2, vzero, _CMP_NEQ_OQ)), vb01234567c2, vacc4x01234567);
257 vacc0x89ABCDEF = _mm256_fmadd_ps(_mm256_and_ps(va0, _mm256_cmp_ps(vb89ABCDEFc2, vzero, _CMP_NEQ_OQ)), vb89ABCDEFc2, vacc0x89ABCDEF);
258 vacc1x89ABCDEF = _mm256_fmadd_ps(_mm256_and_ps(va1, _mm256_cmp_ps(vb89ABCDEFc2, vzero, _CMP_NEQ_OQ)), vb89ABCDEFc2, vacc1x89ABCDEF);
259 vacc2x89ABCDEF = _mm256_fmadd_ps(_mm256_and_ps(va2, _mm256_cmp_ps(vb89ABCDEFc2, vzero, _CMP_NEQ_OQ)), vb89ABCDEFc2, vacc2x89ABCDEF);
260 vacc3x89ABCDEF = _mm256_fmadd_ps(_mm256_and_ps(va3, _mm256_cmp_ps(vb89ABCDEFc2, vzero, _CMP_NEQ_OQ)), vb89ABCDEFc2, vacc3x89ABCDEF);
261 vacc4x89ABCDEF = _mm256_fmadd_ps(_mm256_and_ps(va4, _mm256_cmp_ps(vb89ABCDEFc2, vzero, _CMP_NEQ_OQ)), vb89ABCDEFc2, vacc4x89ABCDEF);
262
263 va0 = _mm256_permute_ps(va0, _MM_SHUFFLE(0, 3, 2, 1));
264 va1 = _mm256_permute_ps(va1, _MM_SHUFFLE(0, 3, 2, 1));
265 va2 = _mm256_permute_ps(va2, _MM_SHUFFLE(0, 3, 2, 1));
266 va3 = _mm256_permute_ps(va3, _MM_SHUFFLE(0, 3, 2, 1));
267 va4 = _mm256_permute_ps(va4, _MM_SHUFFLE(0, 3, 2, 1));
268
269 const __m256 vb01234567c3 = _mm256_load_ps(w + 48);
270 const __m256 vb89ABCDEFc3 = _mm256_load_ps(w + 56);
271
272 vacc0x01234567 = _mm256_fmadd_ps(_mm256_and_ps(va0, _mm256_cmp_ps(vb01234567c3, vzero, _CMP_NEQ_OQ)), vb01234567c3, vacc0x01234567);
273 vacc1x01234567 = _mm256_fmadd_ps(_mm256_and_ps(va1, _mm256_cmp_ps(vb01234567c3, vzero, _CMP_NEQ_OQ)), vb01234567c3, vacc1x01234567);
274 vacc2x01234567 = _mm256_fmadd_ps(_mm256_and_ps(va2, _mm256_cmp_ps(vb01234567c3, vzero, _CMP_NEQ_OQ)), vb01234567c3, vacc2x01234567);
275 vacc3x01234567 = _mm256_fmadd_ps(_mm256_and_ps(va3, _mm256_cmp_ps(vb01234567c3, vzero, _CMP_NEQ_OQ)), vb01234567c3, vacc3x01234567);
276 vacc4x01234567 = _mm256_fmadd_ps(_mm256_and_ps(va4, _mm256_cmp_ps(vb01234567c3, vzero, _CMP_NEQ_OQ)), vb01234567c3, vacc4x01234567);
277 vacc0x89ABCDEF = _mm256_fmadd_ps(_mm256_and_ps(va0, _mm256_cmp_ps(vb89ABCDEFc3, vzero, _CMP_NEQ_OQ)), vb89ABCDEFc3, vacc0x89ABCDEF);
278 vacc1x89ABCDEF = _mm256_fmadd_ps(_mm256_and_ps(va1, _mm256_cmp_ps(vb89ABCDEFc3, vzero, _CMP_NEQ_OQ)), vb89ABCDEFc3, vacc1x89ABCDEF);
279 vacc2x89ABCDEF = _mm256_fmadd_ps(_mm256_and_ps(va2, _mm256_cmp_ps(vb89ABCDEFc3, vzero, _CMP_NEQ_OQ)), vb89ABCDEFc3, vacc2x89ABCDEF);
280 vacc3x89ABCDEF = _mm256_fmadd_ps(_mm256_and_ps(va3, _mm256_cmp_ps(vb89ABCDEFc3, vzero, _CMP_NEQ_OQ)), vb89ABCDEFc3, vacc3x89ABCDEF);
281 vacc4x89ABCDEF = _mm256_fmadd_ps(_mm256_and_ps(va4, _mm256_cmp_ps(vb89ABCDEFc3, vzero, _CMP_NEQ_OQ)), vb89ABCDEFc3, vacc4x89ABCDEF);
282
283
284 w += 64;
285 }
286 p -= 5 * sizeof(void*);
287 } while (p != 0);
288
289 const __m256 vmin = _mm256_load_ps(params->avx.min);
290 vacc0x01234567 = _mm256_max_ps(vacc0x01234567, vmin);
291 vacc1x01234567 = _mm256_max_ps(vacc1x01234567, vmin);
292 vacc2x01234567 = _mm256_max_ps(vacc2x01234567, vmin);
293 vacc3x01234567 = _mm256_max_ps(vacc3x01234567, vmin);
294 vacc4x01234567 = _mm256_max_ps(vacc4x01234567, vmin);
295 vacc0x89ABCDEF = _mm256_max_ps(vacc0x89ABCDEF, vmin);
296 vacc1x89ABCDEF = _mm256_max_ps(vacc1x89ABCDEF, vmin);
297 vacc2x89ABCDEF = _mm256_max_ps(vacc2x89ABCDEF, vmin);
298 vacc3x89ABCDEF = _mm256_max_ps(vacc3x89ABCDEF, vmin);
299 vacc4x89ABCDEF = _mm256_max_ps(vacc4x89ABCDEF, vmin);
300
301 const __m256 vmax = _mm256_load_ps(params->avx.max);
302 vacc0x01234567 = _mm256_min_ps(vacc0x01234567, vmax);
303 vacc1x01234567 = _mm256_min_ps(vacc1x01234567, vmax);
304 vacc2x01234567 = _mm256_min_ps(vacc2x01234567, vmax);
305 vacc3x01234567 = _mm256_min_ps(vacc3x01234567, vmax);
306 vacc4x01234567 = _mm256_min_ps(vacc4x01234567, vmax);
307 vacc0x89ABCDEF = _mm256_min_ps(vacc0x89ABCDEF, vmax);
308 vacc1x89ABCDEF = _mm256_min_ps(vacc1x89ABCDEF, vmax);
309 vacc2x89ABCDEF = _mm256_min_ps(vacc2x89ABCDEF, vmax);
310 vacc3x89ABCDEF = _mm256_min_ps(vacc3x89ABCDEF, vmax);
311 vacc4x89ABCDEF = _mm256_min_ps(vacc4x89ABCDEF, vmax);
312
313 if XNN_LIKELY(nc >= 16) {
314 _mm256_storeu_ps(c4, vacc4x01234567);
315 _mm256_storeu_ps(c4 + 8, vacc4x89ABCDEF);
316 c4 = (float*) ((uintptr_t) c4 + cn_stride);
317 _mm256_storeu_ps(c3, vacc3x01234567);
318 _mm256_storeu_ps(c3 + 8, vacc3x89ABCDEF);
319 c3 = (float*) ((uintptr_t) c3 + cn_stride);
320 _mm256_storeu_ps(c2, vacc2x01234567);
321 _mm256_storeu_ps(c2 + 8, vacc2x89ABCDEF);
322 c2 = (float*) ((uintptr_t) c2 + cn_stride);
323 _mm256_storeu_ps(c1, vacc1x01234567);
324 _mm256_storeu_ps(c1 + 8, vacc1x89ABCDEF);
325 c1 = (float*) ((uintptr_t) c1 + cn_stride);
326 _mm256_storeu_ps(c0, vacc0x01234567);
327 _mm256_storeu_ps(c0 + 8, vacc0x89ABCDEF);
328 c0 = (float*) ((uintptr_t) c0 + cn_stride);
329
330 a = (const float**restrict) ((uintptr_t) a - ks);
331 nc -= 16;
332 } else {
333 if (nc & 8) {
334 _mm256_storeu_ps(c4, vacc4x01234567);
335 _mm256_storeu_ps(c3, vacc3x01234567);
336 _mm256_storeu_ps(c2, vacc2x01234567);
337 _mm256_storeu_ps(c1, vacc1x01234567);
338 _mm256_storeu_ps(c0, vacc0x01234567);
339
340 vacc4x01234567 = vacc4x89ABCDEF;
341 vacc3x01234567 = vacc3x89ABCDEF;
342 vacc2x01234567 = vacc2x89ABCDEF;
343 vacc1x01234567 = vacc1x89ABCDEF;
344 vacc0x01234567 = vacc0x89ABCDEF;
345
346 c4 += 8;
347 c3 += 8;
348 c2 += 8;
349 c1 += 8;
350 c0 += 8;
351 }
352 __m128 vacc4x0123 = _mm256_castps256_ps128(vacc4x01234567);
353 __m128 vacc3x0123 = _mm256_castps256_ps128(vacc3x01234567);
354 __m128 vacc2x0123 = _mm256_castps256_ps128(vacc2x01234567);
355 __m128 vacc1x0123 = _mm256_castps256_ps128(vacc1x01234567);
356 __m128 vacc0x0123 = _mm256_castps256_ps128(vacc0x01234567);
357 if (nc & 4) {
358 _mm_storeu_ps(c4, vacc4x0123);
359 _mm_storeu_ps(c3, vacc3x0123);
360 _mm_storeu_ps(c2, vacc2x0123);
361 _mm_storeu_ps(c1, vacc1x0123);
362 _mm_storeu_ps(c0, vacc0x0123);
363
364 vacc4x0123 = _mm256_extractf128_ps(vacc4x01234567, 1);
365 vacc3x0123 = _mm256_extractf128_ps(vacc3x01234567, 1);
366 vacc2x0123 = _mm256_extractf128_ps(vacc2x01234567, 1);
367 vacc1x0123 = _mm256_extractf128_ps(vacc1x01234567, 1);
368 vacc0x0123 = _mm256_extractf128_ps(vacc0x01234567, 1);
369
370 c4 += 4;
371 c3 += 4;
372 c2 += 4;
373 c1 += 4;
374 c0 += 4;
375 }
376 if (nc & 2) {
377 _mm_storel_pi((__m64*) c4, vacc4x0123);
378 _mm_storel_pi((__m64*) c3, vacc3x0123);
379 _mm_storel_pi((__m64*) c2, vacc2x0123);
380 _mm_storel_pi((__m64*) c1, vacc1x0123);
381 _mm_storel_pi((__m64*) c0, vacc0x0123);
382
383 vacc4x0123 = _mm_movehl_ps(vacc4x0123, vacc4x0123);
384 vacc3x0123 = _mm_movehl_ps(vacc3x0123, vacc3x0123);
385 vacc2x0123 = _mm_movehl_ps(vacc2x0123, vacc2x0123);
386 vacc1x0123 = _mm_movehl_ps(vacc1x0123, vacc1x0123);
387 vacc0x0123 = _mm_movehl_ps(vacc0x0123, vacc0x0123);
388
389 c4 += 2;
390 c3 += 2;
391 c2 += 2;
392 c1 += 2;
393 c0 += 2;
394 }
395 if (nc & 1) {
396 _mm_store_ss(c4, vacc4x0123);
397 _mm_store_ss(c3, vacc3x0123);
398 _mm_store_ss(c2, vacc2x0123);
399 _mm_store_ss(c1, vacc1x0123);
400 _mm_store_ss(c0, vacc0x0123);
401 }
402
403 nc = 0;
404 }
405 } while (nc != 0);
406 }
407