1 // Auto-generated file. Do not edit!
2 // Template: src/f32-gemm/avx-shuffle4.c.in
3 // Generator: tools/xngen
4 //
5 // Copyright 2019 Google LLC
6 //
7 // This source code is licensed under the BSD-style license found in the
8 // LICENSE file in the root directory of this source tree.
9
10 #include <assert.h>
11
12 #include <immintrin.h>
13
14 #include <xnnpack/gemm.h>
15
16
xnn_f32_gemm_minmax_ukernel_5x16s4__fma3_broadcast(size_t mr,size_t nc,size_t kc,const float * restrict a,size_t a_stride,const float * restrict w,float * restrict c,size_t cm_stride,size_t cn_stride,const union xnn_f32_minmax_params params[restrict XNN_MIN_ELEMENTS (1)])17 void xnn_f32_gemm_minmax_ukernel_5x16s4__fma3_broadcast(
18 size_t mr,
19 size_t nc,
20 size_t kc,
21 const float*restrict a,
22 size_t a_stride,
23 const float*restrict w,
24 float*restrict c,
25 size_t cm_stride,
26 size_t cn_stride,
27 const union xnn_f32_minmax_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
28 {
29 assert(mr != 0);
30 assert(mr <= 5);
31 assert(nc != 0);
32 assert(kc != 0);
33 assert(kc % sizeof(float) == 0);
34 assert(a != NULL);
35 assert(w != NULL);
36 assert(c != NULL);
37
38 const float* a0 = a;
39 float* c0 = c;
40 const float* a1 = (const float*) ((uintptr_t) a0 + a_stride);
41 float* c1 = (float*) ((uintptr_t) c0 + cm_stride);
42 if XNN_UNPREDICTABLE(mr < 2) {
43 a1 = a0;
44 c1 = c0;
45 }
46 const float* a2 = (const float*) ((uintptr_t) a1 + a_stride);
47 float* c2 = (float*) ((uintptr_t) c1 + cm_stride);
48 if XNN_UNPREDICTABLE(mr <= 2) {
49 a2 = a1;
50 c2 = c1;
51 }
52 const float* a3 = (const float*) ((uintptr_t) a2 + a_stride);
53 float* c3 = (float*) ((uintptr_t) c2 + cm_stride);
54 if XNN_UNPREDICTABLE(mr < 4) {
55 a3 = a2;
56 c3 = c2;
57 }
58 const float* a4 = (const float*) ((uintptr_t) a3 + a_stride);
59 float* c4 = (float*) ((uintptr_t) c3 + cm_stride);
60 if XNN_UNPREDICTABLE(mr <= 4) {
61 a4 = a3;
62 c4 = c3;
63 }
64
65 do {
66 __m256 vacc0x01234567 = _mm256_load_ps(w + 0);
67 __m256 vacc0x89ABCDEF = _mm256_load_ps(w + 8);
68 __m256 vacc1x01234567 = vacc0x01234567;
69 __m256 vacc1x89ABCDEF = vacc0x89ABCDEF;
70 __m256 vacc2x01234567 = vacc0x01234567;
71 __m256 vacc2x89ABCDEF = vacc0x89ABCDEF;
72 __m256 vacc3x01234567 = vacc0x01234567;
73 __m256 vacc3x89ABCDEF = vacc0x89ABCDEF;
74 __m256 vacc4x01234567 = vacc0x01234567;
75 __m256 vacc4x89ABCDEF = vacc0x89ABCDEF;
76 w += 16;
77
78 size_t k = kc;
79 while (k >= 4 * sizeof(float)) {
80 __m256 va0 = _mm256_broadcast_ps((const __m128*) a0);
81 a0 += 4;
82 __m256 va1 = _mm256_broadcast_ps((const __m128*) a1);
83 a1 += 4;
84 __m256 va2 = _mm256_broadcast_ps((const __m128*) a2);
85 a2 += 4;
86 __m256 va3 = _mm256_broadcast_ps((const __m128*) a3);
87 a3 += 4;
88 __m256 va4 = _mm256_broadcast_ps((const __m128*) a4);
89 a4 += 4;
90
91
92 const __m256 vb01234567c0 = _mm256_load_ps(w + 0);
93 const __m256 vb89ABCDEFc0 = _mm256_load_ps(w + 8);
94
95 vacc0x01234567 = _mm256_fmadd_ps(va0, vb01234567c0, vacc0x01234567);
96 vacc1x01234567 = _mm256_fmadd_ps(va1, vb01234567c0, vacc1x01234567);
97 vacc2x01234567 = _mm256_fmadd_ps(va2, vb01234567c0, vacc2x01234567);
98 vacc3x01234567 = _mm256_fmadd_ps(va3, vb01234567c0, vacc3x01234567);
99 vacc4x01234567 = _mm256_fmadd_ps(va4, vb01234567c0, vacc4x01234567);
100 vacc0x89ABCDEF = _mm256_fmadd_ps(va0, vb89ABCDEFc0, vacc0x89ABCDEF);
101 vacc1x89ABCDEF = _mm256_fmadd_ps(va1, vb89ABCDEFc0, vacc1x89ABCDEF);
102 vacc2x89ABCDEF = _mm256_fmadd_ps(va2, vb89ABCDEFc0, vacc2x89ABCDEF);
103 vacc3x89ABCDEF = _mm256_fmadd_ps(va3, vb89ABCDEFc0, vacc3x89ABCDEF);
104 vacc4x89ABCDEF = _mm256_fmadd_ps(va4, vb89ABCDEFc0, vacc4x89ABCDEF);
105
106 va0 = _mm256_permute_ps(va0, _MM_SHUFFLE(0, 3, 2, 1));
107 va1 = _mm256_permute_ps(va1, _MM_SHUFFLE(0, 3, 2, 1));
108 va2 = _mm256_permute_ps(va2, _MM_SHUFFLE(0, 3, 2, 1));
109 va3 = _mm256_permute_ps(va3, _MM_SHUFFLE(0, 3, 2, 1));
110 va4 = _mm256_permute_ps(va4, _MM_SHUFFLE(0, 3, 2, 1));
111
112 const __m256 vb01234567c1 = _mm256_load_ps(w + 16);
113 const __m256 vb89ABCDEFc1 = _mm256_load_ps(w + 24);
114
115 vacc0x01234567 = _mm256_fmadd_ps(va0, vb01234567c1, vacc0x01234567);
116 vacc1x01234567 = _mm256_fmadd_ps(va1, vb01234567c1, vacc1x01234567);
117 vacc2x01234567 = _mm256_fmadd_ps(va2, vb01234567c1, vacc2x01234567);
118 vacc3x01234567 = _mm256_fmadd_ps(va3, vb01234567c1, vacc3x01234567);
119 vacc4x01234567 = _mm256_fmadd_ps(va4, vb01234567c1, vacc4x01234567);
120 vacc0x89ABCDEF = _mm256_fmadd_ps(va0, vb89ABCDEFc1, vacc0x89ABCDEF);
121 vacc1x89ABCDEF = _mm256_fmadd_ps(va1, vb89ABCDEFc1, vacc1x89ABCDEF);
122 vacc2x89ABCDEF = _mm256_fmadd_ps(va2, vb89ABCDEFc1, vacc2x89ABCDEF);
123 vacc3x89ABCDEF = _mm256_fmadd_ps(va3, vb89ABCDEFc1, vacc3x89ABCDEF);
124 vacc4x89ABCDEF = _mm256_fmadd_ps(va4, vb89ABCDEFc1, vacc4x89ABCDEF);
125
126 va0 = _mm256_permute_ps(va0, _MM_SHUFFLE(0, 3, 2, 1));
127 va1 = _mm256_permute_ps(va1, _MM_SHUFFLE(0, 3, 2, 1));
128 va2 = _mm256_permute_ps(va2, _MM_SHUFFLE(0, 3, 2, 1));
129 va3 = _mm256_permute_ps(va3, _MM_SHUFFLE(0, 3, 2, 1));
130 va4 = _mm256_permute_ps(va4, _MM_SHUFFLE(0, 3, 2, 1));
131
132 const __m256 vb01234567c2 = _mm256_load_ps(w + 32);
133 const __m256 vb89ABCDEFc2 = _mm256_load_ps(w + 40);
134
135 vacc0x01234567 = _mm256_fmadd_ps(va0, vb01234567c2, vacc0x01234567);
136 vacc1x01234567 = _mm256_fmadd_ps(va1, vb01234567c2, vacc1x01234567);
137 vacc2x01234567 = _mm256_fmadd_ps(va2, vb01234567c2, vacc2x01234567);
138 vacc3x01234567 = _mm256_fmadd_ps(va3, vb01234567c2, vacc3x01234567);
139 vacc4x01234567 = _mm256_fmadd_ps(va4, vb01234567c2, vacc4x01234567);
140 vacc0x89ABCDEF = _mm256_fmadd_ps(va0, vb89ABCDEFc2, vacc0x89ABCDEF);
141 vacc1x89ABCDEF = _mm256_fmadd_ps(va1, vb89ABCDEFc2, vacc1x89ABCDEF);
142 vacc2x89ABCDEF = _mm256_fmadd_ps(va2, vb89ABCDEFc2, vacc2x89ABCDEF);
143 vacc3x89ABCDEF = _mm256_fmadd_ps(va3, vb89ABCDEFc2, vacc3x89ABCDEF);
144 vacc4x89ABCDEF = _mm256_fmadd_ps(va4, vb89ABCDEFc2, vacc4x89ABCDEF);
145
146 va0 = _mm256_permute_ps(va0, _MM_SHUFFLE(0, 3, 2, 1));
147 va1 = _mm256_permute_ps(va1, _MM_SHUFFLE(0, 3, 2, 1));
148 va2 = _mm256_permute_ps(va2, _MM_SHUFFLE(0, 3, 2, 1));
149 va3 = _mm256_permute_ps(va3, _MM_SHUFFLE(0, 3, 2, 1));
150 va4 = _mm256_permute_ps(va4, _MM_SHUFFLE(0, 3, 2, 1));
151
152 const __m256 vb01234567c3 = _mm256_load_ps(w + 48);
153 const __m256 vb89ABCDEFc3 = _mm256_load_ps(w + 56);
154
155 vacc0x01234567 = _mm256_fmadd_ps(va0, vb01234567c3, vacc0x01234567);
156 vacc1x01234567 = _mm256_fmadd_ps(va1, vb01234567c3, vacc1x01234567);
157 vacc2x01234567 = _mm256_fmadd_ps(va2, vb01234567c3, vacc2x01234567);
158 vacc3x01234567 = _mm256_fmadd_ps(va3, vb01234567c3, vacc3x01234567);
159 vacc4x01234567 = _mm256_fmadd_ps(va4, vb01234567c3, vacc4x01234567);
160 vacc0x89ABCDEF = _mm256_fmadd_ps(va0, vb89ABCDEFc3, vacc0x89ABCDEF);
161 vacc1x89ABCDEF = _mm256_fmadd_ps(va1, vb89ABCDEFc3, vacc1x89ABCDEF);
162 vacc2x89ABCDEF = _mm256_fmadd_ps(va2, vb89ABCDEFc3, vacc2x89ABCDEF);
163 vacc3x89ABCDEF = _mm256_fmadd_ps(va3, vb89ABCDEFc3, vacc3x89ABCDEF);
164 vacc4x89ABCDEF = _mm256_fmadd_ps(va4, vb89ABCDEFc3, vacc4x89ABCDEF);
165
166
167 w += 64;
168 k -= 4 * sizeof(float);
169 }
170 if XNN_UNLIKELY(k != 0) {
171 __m256 va0 = _mm256_broadcast_ps((const __m128*) a0);
172 a0 = (const float*) ((uintptr_t) a0 + k);
173 __m256 va1 = _mm256_broadcast_ps((const __m128*) a1);
174 a1 = (const float*) ((uintptr_t) a1 + k);
175 __m256 va2 = _mm256_broadcast_ps((const __m128*) a2);
176 a2 = (const float*) ((uintptr_t) a2 + k);
177 __m256 va3 = _mm256_broadcast_ps((const __m128*) a3);
178 a3 = (const float*) ((uintptr_t) a3 + k);
179 __m256 va4 = _mm256_broadcast_ps((const __m128*) a4);
180 a4 = (const float*) ((uintptr_t) a4 + k);
181
182 const __m256 vzero = _mm256_setzero_ps();
183
184 const __m256 vb01234567c0 = _mm256_load_ps(w + 0);
185 const __m256 vb89ABCDEFc0 = _mm256_load_ps(w + 8);
186
187 vacc0x01234567 = _mm256_fmadd_ps(_mm256_and_ps(va0, _mm256_cmp_ps(vb01234567c0, vzero, _CMP_NEQ_OQ)), vb01234567c0, vacc0x01234567);
188 vacc1x01234567 = _mm256_fmadd_ps(_mm256_and_ps(va1, _mm256_cmp_ps(vb01234567c0, vzero, _CMP_NEQ_OQ)), vb01234567c0, vacc1x01234567);
189 vacc2x01234567 = _mm256_fmadd_ps(_mm256_and_ps(va2, _mm256_cmp_ps(vb01234567c0, vzero, _CMP_NEQ_OQ)), vb01234567c0, vacc2x01234567);
190 vacc3x01234567 = _mm256_fmadd_ps(_mm256_and_ps(va3, _mm256_cmp_ps(vb01234567c0, vzero, _CMP_NEQ_OQ)), vb01234567c0, vacc3x01234567);
191 vacc4x01234567 = _mm256_fmadd_ps(_mm256_and_ps(va4, _mm256_cmp_ps(vb01234567c0, vzero, _CMP_NEQ_OQ)), vb01234567c0, vacc4x01234567);
192 vacc0x89ABCDEF = _mm256_fmadd_ps(_mm256_and_ps(va0, _mm256_cmp_ps(vb89ABCDEFc0, vzero, _CMP_NEQ_OQ)), vb89ABCDEFc0, vacc0x89ABCDEF);
193 vacc1x89ABCDEF = _mm256_fmadd_ps(_mm256_and_ps(va1, _mm256_cmp_ps(vb89ABCDEFc0, vzero, _CMP_NEQ_OQ)), vb89ABCDEFc0, vacc1x89ABCDEF);
194 vacc2x89ABCDEF = _mm256_fmadd_ps(_mm256_and_ps(va2, _mm256_cmp_ps(vb89ABCDEFc0, vzero, _CMP_NEQ_OQ)), vb89ABCDEFc0, vacc2x89ABCDEF);
195 vacc3x89ABCDEF = _mm256_fmadd_ps(_mm256_and_ps(va3, _mm256_cmp_ps(vb89ABCDEFc0, vzero, _CMP_NEQ_OQ)), vb89ABCDEFc0, vacc3x89ABCDEF);
196 vacc4x89ABCDEF = _mm256_fmadd_ps(_mm256_and_ps(va4, _mm256_cmp_ps(vb89ABCDEFc0, vzero, _CMP_NEQ_OQ)), vb89ABCDEFc0, vacc4x89ABCDEF);
197
198 va0 = _mm256_permute_ps(va0, _MM_SHUFFLE(0, 3, 2, 1));
199 va1 = _mm256_permute_ps(va1, _MM_SHUFFLE(0, 3, 2, 1));
200 va2 = _mm256_permute_ps(va2, _MM_SHUFFLE(0, 3, 2, 1));
201 va3 = _mm256_permute_ps(va3, _MM_SHUFFLE(0, 3, 2, 1));
202 va4 = _mm256_permute_ps(va4, _MM_SHUFFLE(0, 3, 2, 1));
203
204 const __m256 vb01234567c1 = _mm256_load_ps(w + 16);
205 const __m256 vb89ABCDEFc1 = _mm256_load_ps(w + 24);
206
207 vacc0x01234567 = _mm256_fmadd_ps(_mm256_and_ps(va0, _mm256_cmp_ps(vb01234567c1, vzero, _CMP_NEQ_OQ)), vb01234567c1, vacc0x01234567);
208 vacc1x01234567 = _mm256_fmadd_ps(_mm256_and_ps(va1, _mm256_cmp_ps(vb01234567c1, vzero, _CMP_NEQ_OQ)), vb01234567c1, vacc1x01234567);
209 vacc2x01234567 = _mm256_fmadd_ps(_mm256_and_ps(va2, _mm256_cmp_ps(vb01234567c1, vzero, _CMP_NEQ_OQ)), vb01234567c1, vacc2x01234567);
210 vacc3x01234567 = _mm256_fmadd_ps(_mm256_and_ps(va3, _mm256_cmp_ps(vb01234567c1, vzero, _CMP_NEQ_OQ)), vb01234567c1, vacc3x01234567);
211 vacc4x01234567 = _mm256_fmadd_ps(_mm256_and_ps(va4, _mm256_cmp_ps(vb01234567c1, vzero, _CMP_NEQ_OQ)), vb01234567c1, vacc4x01234567);
212 vacc0x89ABCDEF = _mm256_fmadd_ps(_mm256_and_ps(va0, _mm256_cmp_ps(vb89ABCDEFc1, vzero, _CMP_NEQ_OQ)), vb89ABCDEFc1, vacc0x89ABCDEF);
213 vacc1x89ABCDEF = _mm256_fmadd_ps(_mm256_and_ps(va1, _mm256_cmp_ps(vb89ABCDEFc1, vzero, _CMP_NEQ_OQ)), vb89ABCDEFc1, vacc1x89ABCDEF);
214 vacc2x89ABCDEF = _mm256_fmadd_ps(_mm256_and_ps(va2, _mm256_cmp_ps(vb89ABCDEFc1, vzero, _CMP_NEQ_OQ)), vb89ABCDEFc1, vacc2x89ABCDEF);
215 vacc3x89ABCDEF = _mm256_fmadd_ps(_mm256_and_ps(va3, _mm256_cmp_ps(vb89ABCDEFc1, vzero, _CMP_NEQ_OQ)), vb89ABCDEFc1, vacc3x89ABCDEF);
216 vacc4x89ABCDEF = _mm256_fmadd_ps(_mm256_and_ps(va4, _mm256_cmp_ps(vb89ABCDEFc1, vzero, _CMP_NEQ_OQ)), vb89ABCDEFc1, vacc4x89ABCDEF);
217
218 va0 = _mm256_permute_ps(va0, _MM_SHUFFLE(0, 3, 2, 1));
219 va1 = _mm256_permute_ps(va1, _MM_SHUFFLE(0, 3, 2, 1));
220 va2 = _mm256_permute_ps(va2, _MM_SHUFFLE(0, 3, 2, 1));
221 va3 = _mm256_permute_ps(va3, _MM_SHUFFLE(0, 3, 2, 1));
222 va4 = _mm256_permute_ps(va4, _MM_SHUFFLE(0, 3, 2, 1));
223
224 const __m256 vb01234567c2 = _mm256_load_ps(w + 32);
225 const __m256 vb89ABCDEFc2 = _mm256_load_ps(w + 40);
226
227 vacc0x01234567 = _mm256_fmadd_ps(_mm256_and_ps(va0, _mm256_cmp_ps(vb01234567c2, vzero, _CMP_NEQ_OQ)), vb01234567c2, vacc0x01234567);
228 vacc1x01234567 = _mm256_fmadd_ps(_mm256_and_ps(va1, _mm256_cmp_ps(vb01234567c2, vzero, _CMP_NEQ_OQ)), vb01234567c2, vacc1x01234567);
229 vacc2x01234567 = _mm256_fmadd_ps(_mm256_and_ps(va2, _mm256_cmp_ps(vb01234567c2, vzero, _CMP_NEQ_OQ)), vb01234567c2, vacc2x01234567);
230 vacc3x01234567 = _mm256_fmadd_ps(_mm256_and_ps(va3, _mm256_cmp_ps(vb01234567c2, vzero, _CMP_NEQ_OQ)), vb01234567c2, vacc3x01234567);
231 vacc4x01234567 = _mm256_fmadd_ps(_mm256_and_ps(va4, _mm256_cmp_ps(vb01234567c2, vzero, _CMP_NEQ_OQ)), vb01234567c2, vacc4x01234567);
232 vacc0x89ABCDEF = _mm256_fmadd_ps(_mm256_and_ps(va0, _mm256_cmp_ps(vb89ABCDEFc2, vzero, _CMP_NEQ_OQ)), vb89ABCDEFc2, vacc0x89ABCDEF);
233 vacc1x89ABCDEF = _mm256_fmadd_ps(_mm256_and_ps(va1, _mm256_cmp_ps(vb89ABCDEFc2, vzero, _CMP_NEQ_OQ)), vb89ABCDEFc2, vacc1x89ABCDEF);
234 vacc2x89ABCDEF = _mm256_fmadd_ps(_mm256_and_ps(va2, _mm256_cmp_ps(vb89ABCDEFc2, vzero, _CMP_NEQ_OQ)), vb89ABCDEFc2, vacc2x89ABCDEF);
235 vacc3x89ABCDEF = _mm256_fmadd_ps(_mm256_and_ps(va3, _mm256_cmp_ps(vb89ABCDEFc2, vzero, _CMP_NEQ_OQ)), vb89ABCDEFc2, vacc3x89ABCDEF);
236 vacc4x89ABCDEF = _mm256_fmadd_ps(_mm256_and_ps(va4, _mm256_cmp_ps(vb89ABCDEFc2, vzero, _CMP_NEQ_OQ)), vb89ABCDEFc2, vacc4x89ABCDEF);
237
238 va0 = _mm256_permute_ps(va0, _MM_SHUFFLE(0, 3, 2, 1));
239 va1 = _mm256_permute_ps(va1, _MM_SHUFFLE(0, 3, 2, 1));
240 va2 = _mm256_permute_ps(va2, _MM_SHUFFLE(0, 3, 2, 1));
241 va3 = _mm256_permute_ps(va3, _MM_SHUFFLE(0, 3, 2, 1));
242 va4 = _mm256_permute_ps(va4, _MM_SHUFFLE(0, 3, 2, 1));
243
244 const __m256 vb01234567c3 = _mm256_load_ps(w + 48);
245 const __m256 vb89ABCDEFc3 = _mm256_load_ps(w + 56);
246
247 vacc0x01234567 = _mm256_fmadd_ps(_mm256_and_ps(va0, _mm256_cmp_ps(vb01234567c3, vzero, _CMP_NEQ_OQ)), vb01234567c3, vacc0x01234567);
248 vacc1x01234567 = _mm256_fmadd_ps(_mm256_and_ps(va1, _mm256_cmp_ps(vb01234567c3, vzero, _CMP_NEQ_OQ)), vb01234567c3, vacc1x01234567);
249 vacc2x01234567 = _mm256_fmadd_ps(_mm256_and_ps(va2, _mm256_cmp_ps(vb01234567c3, vzero, _CMP_NEQ_OQ)), vb01234567c3, vacc2x01234567);
250 vacc3x01234567 = _mm256_fmadd_ps(_mm256_and_ps(va3, _mm256_cmp_ps(vb01234567c3, vzero, _CMP_NEQ_OQ)), vb01234567c3, vacc3x01234567);
251 vacc4x01234567 = _mm256_fmadd_ps(_mm256_and_ps(va4, _mm256_cmp_ps(vb01234567c3, vzero, _CMP_NEQ_OQ)), vb01234567c3, vacc4x01234567);
252 vacc0x89ABCDEF = _mm256_fmadd_ps(_mm256_and_ps(va0, _mm256_cmp_ps(vb89ABCDEFc3, vzero, _CMP_NEQ_OQ)), vb89ABCDEFc3, vacc0x89ABCDEF);
253 vacc1x89ABCDEF = _mm256_fmadd_ps(_mm256_and_ps(va1, _mm256_cmp_ps(vb89ABCDEFc3, vzero, _CMP_NEQ_OQ)), vb89ABCDEFc3, vacc1x89ABCDEF);
254 vacc2x89ABCDEF = _mm256_fmadd_ps(_mm256_and_ps(va2, _mm256_cmp_ps(vb89ABCDEFc3, vzero, _CMP_NEQ_OQ)), vb89ABCDEFc3, vacc2x89ABCDEF);
255 vacc3x89ABCDEF = _mm256_fmadd_ps(_mm256_and_ps(va3, _mm256_cmp_ps(vb89ABCDEFc3, vzero, _CMP_NEQ_OQ)), vb89ABCDEFc3, vacc3x89ABCDEF);
256 vacc4x89ABCDEF = _mm256_fmadd_ps(_mm256_and_ps(va4, _mm256_cmp_ps(vb89ABCDEFc3, vzero, _CMP_NEQ_OQ)), vb89ABCDEFc3, vacc4x89ABCDEF);
257
258
259 w += 64;
260 }
261
262 const __m256 vmin = _mm256_load_ps(params->avx.min);
263 vacc0x01234567 = _mm256_max_ps(vacc0x01234567, vmin);
264 vacc1x01234567 = _mm256_max_ps(vacc1x01234567, vmin);
265 vacc2x01234567 = _mm256_max_ps(vacc2x01234567, vmin);
266 vacc3x01234567 = _mm256_max_ps(vacc3x01234567, vmin);
267 vacc4x01234567 = _mm256_max_ps(vacc4x01234567, vmin);
268 vacc0x89ABCDEF = _mm256_max_ps(vacc0x89ABCDEF, vmin);
269 vacc1x89ABCDEF = _mm256_max_ps(vacc1x89ABCDEF, vmin);
270 vacc2x89ABCDEF = _mm256_max_ps(vacc2x89ABCDEF, vmin);
271 vacc3x89ABCDEF = _mm256_max_ps(vacc3x89ABCDEF, vmin);
272 vacc4x89ABCDEF = _mm256_max_ps(vacc4x89ABCDEF, vmin);
273
274 const __m256 vmax = _mm256_load_ps(params->avx.max);
275 vacc0x01234567 = _mm256_min_ps(vacc0x01234567, vmax);
276 vacc1x01234567 = _mm256_min_ps(vacc1x01234567, vmax);
277 vacc2x01234567 = _mm256_min_ps(vacc2x01234567, vmax);
278 vacc3x01234567 = _mm256_min_ps(vacc3x01234567, vmax);
279 vacc4x01234567 = _mm256_min_ps(vacc4x01234567, vmax);
280 vacc0x89ABCDEF = _mm256_min_ps(vacc0x89ABCDEF, vmax);
281 vacc1x89ABCDEF = _mm256_min_ps(vacc1x89ABCDEF, vmax);
282 vacc2x89ABCDEF = _mm256_min_ps(vacc2x89ABCDEF, vmax);
283 vacc3x89ABCDEF = _mm256_min_ps(vacc3x89ABCDEF, vmax);
284 vacc4x89ABCDEF = _mm256_min_ps(vacc4x89ABCDEF, vmax);
285
286 if XNN_LIKELY(nc >= 16) {
287 _mm256_storeu_ps(c4, vacc4x01234567);
288 _mm256_storeu_ps(c4 + 8, vacc4x89ABCDEF);
289 c4 = (float*) ((uintptr_t) c4 + cn_stride);
290 _mm256_storeu_ps(c3, vacc3x01234567);
291 _mm256_storeu_ps(c3 + 8, vacc3x89ABCDEF);
292 c3 = (float*) ((uintptr_t) c3 + cn_stride);
293 _mm256_storeu_ps(c2, vacc2x01234567);
294 _mm256_storeu_ps(c2 + 8, vacc2x89ABCDEF);
295 c2 = (float*) ((uintptr_t) c2 + cn_stride);
296 _mm256_storeu_ps(c1, vacc1x01234567);
297 _mm256_storeu_ps(c1 + 8, vacc1x89ABCDEF);
298 c1 = (float*) ((uintptr_t) c1 + cn_stride);
299 _mm256_storeu_ps(c0, vacc0x01234567);
300 _mm256_storeu_ps(c0 + 8, vacc0x89ABCDEF);
301 c0 = (float*) ((uintptr_t) c0 + cn_stride);
302
303 a4 = (const float*) ((uintptr_t) a4 - kc);
304 a3 = (const float*) ((uintptr_t) a3 - kc);
305 a2 = (const float*) ((uintptr_t) a2 - kc);
306 a1 = (const float*) ((uintptr_t) a1 - kc);
307 a0 = (const float*) ((uintptr_t) a0 - kc);
308
309 nc -= 16;
310 } else {
311 if (nc & 8) {
312 _mm256_storeu_ps(c4, vacc4x01234567);
313 _mm256_storeu_ps(c3, vacc3x01234567);
314 _mm256_storeu_ps(c2, vacc2x01234567);
315 _mm256_storeu_ps(c1, vacc1x01234567);
316 _mm256_storeu_ps(c0, vacc0x01234567);
317
318 vacc4x01234567 = vacc4x89ABCDEF;
319 vacc3x01234567 = vacc3x89ABCDEF;
320 vacc2x01234567 = vacc2x89ABCDEF;
321 vacc1x01234567 = vacc1x89ABCDEF;
322 vacc0x01234567 = vacc0x89ABCDEF;
323
324 c4 += 8;
325 c3 += 8;
326 c2 += 8;
327 c1 += 8;
328 c0 += 8;
329 }
330 __m128 vacc4x0123 = _mm256_castps256_ps128(vacc4x01234567);
331 __m128 vacc3x0123 = _mm256_castps256_ps128(vacc3x01234567);
332 __m128 vacc2x0123 = _mm256_castps256_ps128(vacc2x01234567);
333 __m128 vacc1x0123 = _mm256_castps256_ps128(vacc1x01234567);
334 __m128 vacc0x0123 = _mm256_castps256_ps128(vacc0x01234567);
335 if (nc & 4) {
336 _mm_storeu_ps(c4, vacc4x0123);
337 _mm_storeu_ps(c3, vacc3x0123);
338 _mm_storeu_ps(c2, vacc2x0123);
339 _mm_storeu_ps(c1, vacc1x0123);
340 _mm_storeu_ps(c0, vacc0x0123);
341
342 vacc4x0123 = _mm256_extractf128_ps(vacc4x01234567, 1);
343 vacc3x0123 = _mm256_extractf128_ps(vacc3x01234567, 1);
344 vacc2x0123 = _mm256_extractf128_ps(vacc2x01234567, 1);
345 vacc1x0123 = _mm256_extractf128_ps(vacc1x01234567, 1);
346 vacc0x0123 = _mm256_extractf128_ps(vacc0x01234567, 1);
347
348 c4 += 4;
349 c3 += 4;
350 c2 += 4;
351 c1 += 4;
352 c0 += 4;
353 }
354 if (nc & 2) {
355 _mm_storel_pi((__m64*) c4, vacc4x0123);
356 _mm_storel_pi((__m64*) c3, vacc3x0123);
357 _mm_storel_pi((__m64*) c2, vacc2x0123);
358 _mm_storel_pi((__m64*) c1, vacc1x0123);
359 _mm_storel_pi((__m64*) c0, vacc0x0123);
360
361 vacc4x0123 = _mm_movehl_ps(vacc4x0123, vacc4x0123);
362 vacc3x0123 = _mm_movehl_ps(vacc3x0123, vacc3x0123);
363 vacc2x0123 = _mm_movehl_ps(vacc2x0123, vacc2x0123);
364 vacc1x0123 = _mm_movehl_ps(vacc1x0123, vacc1x0123);
365 vacc0x0123 = _mm_movehl_ps(vacc0x0123, vacc0x0123);
366
367 c4 += 2;
368 c3 += 2;
369 c2 += 2;
370 c1 += 2;
371 c0 += 2;
372 }
373 if (nc & 1) {
374 _mm_store_ss(c4, vacc4x0123);
375 _mm_store_ss(c3, vacc3x0123);
376 _mm_store_ss(c2, vacc2x0123);
377 _mm_store_ss(c1, vacc1x0123);
378 _mm_store_ss(c0, vacc0x0123);
379 }
380
381 nc = 0;
382 }
383 } while (nc != 0);
384 }
385