1 // Auto-generated file. Do not edit!
2 // Template: src/f32-igemm/sse-shuffle.c.in
3 // Generator: tools/xngen
4 //
5 // Copyright 2019 Google LLC
6 //
7 // This source code is licensed under the BSD-style license found in the
8 // LICENSE file in the root directory of this source tree.
9
10 #include <assert.h>
11
12 #include <xmmintrin.h>
13
14 #include <xnnpack/igemm.h>
15
16
xnn_f32_igemm_minmax_ukernel_5x8s4__sse(size_t mr,size_t nc,size_t kc,size_t ks,const float ** restrict a,const float * restrict w,float * restrict c,size_t cm_stride,size_t cn_stride,size_t a_offset,const float * zero,const union xnn_f32_minmax_params params[restrict XNN_MIN_ELEMENTS (1)])17 void xnn_f32_igemm_minmax_ukernel_5x8s4__sse(
18 size_t mr,
19 size_t nc,
20 size_t kc,
21 size_t ks,
22 const float**restrict a,
23 const float*restrict w,
24 float*restrict c,
25 size_t cm_stride,
26 size_t cn_stride,
27 size_t a_offset,
28 const float* zero,
29 const union xnn_f32_minmax_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
30 {
31 assert(mr != 0);
32 assert(mr <= 5);
33 assert(nc != 0);
34 assert(kc != 0);
35 assert(kc % sizeof(float) == 0);
36 assert(ks != 0);
37 assert(ks % (5 * sizeof(void*)) == 0);
38 assert(a_offset % sizeof(float) == 0);
39 assert(a != NULL);
40 assert(w != NULL);
41 assert(c != NULL);
42
43 float* c0 = c;
44 float* c1 = (float*) ((uintptr_t) c0 + cm_stride);
45 if XNN_UNPREDICTABLE(mr < 2) {
46 c1 = c0;
47 }
48 float* c2 = (float*) ((uintptr_t) c1 + cm_stride);
49 if XNN_UNPREDICTABLE(mr <= 2) {
50 c2 = c1;
51 }
52 float* c3 = (float*) ((uintptr_t) c2 + cm_stride);
53 if XNN_UNPREDICTABLE(mr < 4) {
54 c3 = c2;
55 }
56 float* c4 = (float*) ((uintptr_t) c3 + cm_stride);
57 if XNN_UNPREDICTABLE(mr <= 4) {
58 c4 = c3;
59 }
60
61 do {
62 __m128 vacc0x0123 = _mm_load_ps(w);
63 __m128 vacc0x4567 = _mm_load_ps(w + 4);
64 __m128 vacc1x0123 = vacc0x0123;
65 __m128 vacc1x4567 = vacc0x4567;
66 __m128 vacc2x0123 = vacc0x0123;
67 __m128 vacc2x4567 = vacc0x4567;
68 __m128 vacc3x0123 = vacc0x0123;
69 __m128 vacc3x4567 = vacc0x4567;
70 __m128 vacc4x0123 = vacc0x0123;
71 __m128 vacc4x4567 = vacc0x4567;
72 w += 8;
73
74 size_t p = ks;
75 do {
76 const float* restrict a0 = a[0];
77 assert(a0 != NULL);
78 if XNN_UNPREDICTABLE(a0 != zero) {
79 a0 = (const float*) ((uintptr_t) a0 + a_offset);
80 }
81 const float* restrict a1 = a[1];
82 assert(a1 != NULL);
83 if XNN_UNPREDICTABLE(a1 != zero) {
84 a1 = (const float*) ((uintptr_t) a1 + a_offset);
85 }
86 const float* restrict a2 = a[2];
87 assert(a2 != NULL);
88 if XNN_UNPREDICTABLE(a2 != zero) {
89 a2 = (const float*) ((uintptr_t) a2 + a_offset);
90 }
91 const float* restrict a3 = a[3];
92 assert(a3 != NULL);
93 if XNN_UNPREDICTABLE(a3 != zero) {
94 a3 = (const float*) ((uintptr_t) a3 + a_offset);
95 }
96 const float* restrict a4 = a[4];
97 assert(a4 != NULL);
98 if XNN_UNPREDICTABLE(a4 != zero) {
99 a4 = (const float*) ((uintptr_t) a4 + a_offset);
100 }
101 a += 5;
102
103 size_t k = kc;
104 while (k >= 4 * sizeof(float)) {
105 __m128 va0 = _mm_loadu_ps(a0);
106 a0 += 4;
107 __m128 va1 = _mm_loadu_ps(a1);
108 a1 += 4;
109 __m128 va2 = _mm_loadu_ps(a2);
110 a2 += 4;
111 __m128 va3 = _mm_loadu_ps(a3);
112 a3 += 4;
113 __m128 va4 = _mm_loadu_ps(a4);
114 a4 += 4;
115
116
117 const __m128 vb0123c0 = _mm_load_ps(w + 0);
118 const __m128 vb4567c0 = _mm_load_ps(w + 4);
119
120 vacc0x0123 = _mm_add_ps(vacc0x0123, _mm_mul_ps(va0, vb0123c0));
121 vacc1x0123 = _mm_add_ps(vacc1x0123, _mm_mul_ps(va1, vb0123c0));
122 vacc2x0123 = _mm_add_ps(vacc2x0123, _mm_mul_ps(va2, vb0123c0));
123 vacc3x0123 = _mm_add_ps(vacc3x0123, _mm_mul_ps(va3, vb0123c0));
124 vacc4x0123 = _mm_add_ps(vacc4x0123, _mm_mul_ps(va4, vb0123c0));
125 vacc0x4567 = _mm_add_ps(vacc0x4567, _mm_mul_ps(va0, vb4567c0));
126 vacc1x4567 = _mm_add_ps(vacc1x4567, _mm_mul_ps(va1, vb4567c0));
127 vacc2x4567 = _mm_add_ps(vacc2x4567, _mm_mul_ps(va2, vb4567c0));
128 vacc3x4567 = _mm_add_ps(vacc3x4567, _mm_mul_ps(va3, vb4567c0));
129 vacc4x4567 = _mm_add_ps(vacc4x4567, _mm_mul_ps(va4, vb4567c0));
130
131 va0 = _mm_shuffle_ps(va0, va0, _MM_SHUFFLE(0, 3, 2, 1));
132 va1 = _mm_shuffle_ps(va1, va1, _MM_SHUFFLE(0, 3, 2, 1));
133 va2 = _mm_shuffle_ps(va2, va2, _MM_SHUFFLE(0, 3, 2, 1));
134 va3 = _mm_shuffle_ps(va3, va3, _MM_SHUFFLE(0, 3, 2, 1));
135 va4 = _mm_shuffle_ps(va4, va4, _MM_SHUFFLE(0, 3, 2, 1));
136
137 const __m128 vb0123c1 = _mm_load_ps(w + 8);
138 const __m128 vb4567c1 = _mm_load_ps(w + 12);
139
140 vacc0x0123 = _mm_add_ps(vacc0x0123, _mm_mul_ps(va0, vb0123c1));
141 vacc1x0123 = _mm_add_ps(vacc1x0123, _mm_mul_ps(va1, vb0123c1));
142 vacc2x0123 = _mm_add_ps(vacc2x0123, _mm_mul_ps(va2, vb0123c1));
143 vacc3x0123 = _mm_add_ps(vacc3x0123, _mm_mul_ps(va3, vb0123c1));
144 vacc4x0123 = _mm_add_ps(vacc4x0123, _mm_mul_ps(va4, vb0123c1));
145 vacc0x4567 = _mm_add_ps(vacc0x4567, _mm_mul_ps(va0, vb4567c1));
146 vacc1x4567 = _mm_add_ps(vacc1x4567, _mm_mul_ps(va1, vb4567c1));
147 vacc2x4567 = _mm_add_ps(vacc2x4567, _mm_mul_ps(va2, vb4567c1));
148 vacc3x4567 = _mm_add_ps(vacc3x4567, _mm_mul_ps(va3, vb4567c1));
149 vacc4x4567 = _mm_add_ps(vacc4x4567, _mm_mul_ps(va4, vb4567c1));
150
151 va0 = _mm_shuffle_ps(va0, va0, _MM_SHUFFLE(0, 3, 2, 1));
152 va1 = _mm_shuffle_ps(va1, va1, _MM_SHUFFLE(0, 3, 2, 1));
153 va2 = _mm_shuffle_ps(va2, va2, _MM_SHUFFLE(0, 3, 2, 1));
154 va3 = _mm_shuffle_ps(va3, va3, _MM_SHUFFLE(0, 3, 2, 1));
155 va4 = _mm_shuffle_ps(va4, va4, _MM_SHUFFLE(0, 3, 2, 1));
156
157 const __m128 vb0123c2 = _mm_load_ps(w + 16);
158 const __m128 vb4567c2 = _mm_load_ps(w + 20);
159
160 vacc0x0123 = _mm_add_ps(vacc0x0123, _mm_mul_ps(va0, vb0123c2));
161 vacc1x0123 = _mm_add_ps(vacc1x0123, _mm_mul_ps(va1, vb0123c2));
162 vacc2x0123 = _mm_add_ps(vacc2x0123, _mm_mul_ps(va2, vb0123c2));
163 vacc3x0123 = _mm_add_ps(vacc3x0123, _mm_mul_ps(va3, vb0123c2));
164 vacc4x0123 = _mm_add_ps(vacc4x0123, _mm_mul_ps(va4, vb0123c2));
165 vacc0x4567 = _mm_add_ps(vacc0x4567, _mm_mul_ps(va0, vb4567c2));
166 vacc1x4567 = _mm_add_ps(vacc1x4567, _mm_mul_ps(va1, vb4567c2));
167 vacc2x4567 = _mm_add_ps(vacc2x4567, _mm_mul_ps(va2, vb4567c2));
168 vacc3x4567 = _mm_add_ps(vacc3x4567, _mm_mul_ps(va3, vb4567c2));
169 vacc4x4567 = _mm_add_ps(vacc4x4567, _mm_mul_ps(va4, vb4567c2));
170
171 va0 = _mm_shuffle_ps(va0, va0, _MM_SHUFFLE(0, 3, 2, 1));
172 va1 = _mm_shuffle_ps(va1, va1, _MM_SHUFFLE(0, 3, 2, 1));
173 va2 = _mm_shuffle_ps(va2, va2, _MM_SHUFFLE(0, 3, 2, 1));
174 va3 = _mm_shuffle_ps(va3, va3, _MM_SHUFFLE(0, 3, 2, 1));
175 va4 = _mm_shuffle_ps(va4, va4, _MM_SHUFFLE(0, 3, 2, 1));
176
177 const __m128 vb0123c3 = _mm_load_ps(w + 24);
178 const __m128 vb4567c3 = _mm_load_ps(w + 28);
179
180 vacc0x0123 = _mm_add_ps(vacc0x0123, _mm_mul_ps(va0, vb0123c3));
181 vacc1x0123 = _mm_add_ps(vacc1x0123, _mm_mul_ps(va1, vb0123c3));
182 vacc2x0123 = _mm_add_ps(vacc2x0123, _mm_mul_ps(va2, vb0123c3));
183 vacc3x0123 = _mm_add_ps(vacc3x0123, _mm_mul_ps(va3, vb0123c3));
184 vacc4x0123 = _mm_add_ps(vacc4x0123, _mm_mul_ps(va4, vb0123c3));
185 vacc0x4567 = _mm_add_ps(vacc0x4567, _mm_mul_ps(va0, vb4567c3));
186 vacc1x4567 = _mm_add_ps(vacc1x4567, _mm_mul_ps(va1, vb4567c3));
187 vacc2x4567 = _mm_add_ps(vacc2x4567, _mm_mul_ps(va2, vb4567c3));
188 vacc3x4567 = _mm_add_ps(vacc3x4567, _mm_mul_ps(va3, vb4567c3));
189 vacc4x4567 = _mm_add_ps(vacc4x4567, _mm_mul_ps(va4, vb4567c3));
190
191
192 w += 32;
193 k -= 4 * sizeof(float);
194 }
195 if XNN_UNLIKELY(k != 0) {
196 __m128 va0 = _mm_loadu_ps(a0);
197 a0 = (const float*) ((uintptr_t) a0 + k);
198 __m128 va1 = _mm_loadu_ps(a1);
199 a1 = (const float*) ((uintptr_t) a1 + k);
200 __m128 va2 = _mm_loadu_ps(a2);
201 a2 = (const float*) ((uintptr_t) a2 + k);
202 __m128 va3 = _mm_loadu_ps(a3);
203 a3 = (const float*) ((uintptr_t) a3 + k);
204 __m128 va4 = _mm_loadu_ps(a4);
205 a4 = (const float*) ((uintptr_t) a4 + k);
206
207
208 const __m128 vb0123c0 = _mm_load_ps(w + 0);
209 const __m128 vb4567c0 = _mm_load_ps(w + 4);
210
211 vacc0x0123 = _mm_add_ps(vacc0x0123, _mm_mul_ps(_mm_andnot_ps(_mm_cmpeq_ps(_mm_setzero_ps(), vb0123c0), va0), vb0123c0));
212 vacc1x0123 = _mm_add_ps(vacc1x0123, _mm_mul_ps(_mm_andnot_ps(_mm_cmpeq_ps(_mm_setzero_ps(), vb0123c0), va1), vb0123c0));
213 vacc2x0123 = _mm_add_ps(vacc2x0123, _mm_mul_ps(_mm_andnot_ps(_mm_cmpeq_ps(_mm_setzero_ps(), vb0123c0), va2), vb0123c0));
214 vacc3x0123 = _mm_add_ps(vacc3x0123, _mm_mul_ps(_mm_andnot_ps(_mm_cmpeq_ps(_mm_setzero_ps(), vb0123c0), va3), vb0123c0));
215 vacc4x0123 = _mm_add_ps(vacc4x0123, _mm_mul_ps(_mm_andnot_ps(_mm_cmpeq_ps(_mm_setzero_ps(), vb0123c0), va4), vb0123c0));
216 vacc0x4567 = _mm_add_ps(vacc0x4567, _mm_mul_ps(_mm_andnot_ps(_mm_cmpeq_ps(_mm_setzero_ps(), vb4567c0), va0), vb4567c0));
217 vacc1x4567 = _mm_add_ps(vacc1x4567, _mm_mul_ps(_mm_andnot_ps(_mm_cmpeq_ps(_mm_setzero_ps(), vb4567c0), va1), vb4567c0));
218 vacc2x4567 = _mm_add_ps(vacc2x4567, _mm_mul_ps(_mm_andnot_ps(_mm_cmpeq_ps(_mm_setzero_ps(), vb4567c0), va2), vb4567c0));
219 vacc3x4567 = _mm_add_ps(vacc3x4567, _mm_mul_ps(_mm_andnot_ps(_mm_cmpeq_ps(_mm_setzero_ps(), vb4567c0), va3), vb4567c0));
220 vacc4x4567 = _mm_add_ps(vacc4x4567, _mm_mul_ps(_mm_andnot_ps(_mm_cmpeq_ps(_mm_setzero_ps(), vb4567c0), va4), vb4567c0));
221
222 va0 = _mm_shuffle_ps(va0, va0, _MM_SHUFFLE(0, 3, 2, 1));
223 va1 = _mm_shuffle_ps(va1, va1, _MM_SHUFFLE(0, 3, 2, 1));
224 va2 = _mm_shuffle_ps(va2, va2, _MM_SHUFFLE(0, 3, 2, 1));
225 va3 = _mm_shuffle_ps(va3, va3, _MM_SHUFFLE(0, 3, 2, 1));
226 va4 = _mm_shuffle_ps(va4, va4, _MM_SHUFFLE(0, 3, 2, 1));
227
228 const __m128 vb0123c1 = _mm_load_ps(w + 8);
229 const __m128 vb4567c1 = _mm_load_ps(w + 12);
230
231 vacc0x0123 = _mm_add_ps(vacc0x0123, _mm_mul_ps(_mm_andnot_ps(_mm_cmpeq_ps(_mm_setzero_ps(), vb0123c1), va0), vb0123c1));
232 vacc1x0123 = _mm_add_ps(vacc1x0123, _mm_mul_ps(_mm_andnot_ps(_mm_cmpeq_ps(_mm_setzero_ps(), vb0123c1), va1), vb0123c1));
233 vacc2x0123 = _mm_add_ps(vacc2x0123, _mm_mul_ps(_mm_andnot_ps(_mm_cmpeq_ps(_mm_setzero_ps(), vb0123c1), va2), vb0123c1));
234 vacc3x0123 = _mm_add_ps(vacc3x0123, _mm_mul_ps(_mm_andnot_ps(_mm_cmpeq_ps(_mm_setzero_ps(), vb0123c1), va3), vb0123c1));
235 vacc4x0123 = _mm_add_ps(vacc4x0123, _mm_mul_ps(_mm_andnot_ps(_mm_cmpeq_ps(_mm_setzero_ps(), vb0123c1), va4), vb0123c1));
236 vacc0x4567 = _mm_add_ps(vacc0x4567, _mm_mul_ps(_mm_andnot_ps(_mm_cmpeq_ps(_mm_setzero_ps(), vb4567c1), va0), vb4567c1));
237 vacc1x4567 = _mm_add_ps(vacc1x4567, _mm_mul_ps(_mm_andnot_ps(_mm_cmpeq_ps(_mm_setzero_ps(), vb4567c1), va1), vb4567c1));
238 vacc2x4567 = _mm_add_ps(vacc2x4567, _mm_mul_ps(_mm_andnot_ps(_mm_cmpeq_ps(_mm_setzero_ps(), vb4567c1), va2), vb4567c1));
239 vacc3x4567 = _mm_add_ps(vacc3x4567, _mm_mul_ps(_mm_andnot_ps(_mm_cmpeq_ps(_mm_setzero_ps(), vb4567c1), va3), vb4567c1));
240 vacc4x4567 = _mm_add_ps(vacc4x4567, _mm_mul_ps(_mm_andnot_ps(_mm_cmpeq_ps(_mm_setzero_ps(), vb4567c1), va4), vb4567c1));
241
242 va0 = _mm_shuffle_ps(va0, va0, _MM_SHUFFLE(0, 3, 2, 1));
243 va1 = _mm_shuffle_ps(va1, va1, _MM_SHUFFLE(0, 3, 2, 1));
244 va2 = _mm_shuffle_ps(va2, va2, _MM_SHUFFLE(0, 3, 2, 1));
245 va3 = _mm_shuffle_ps(va3, va3, _MM_SHUFFLE(0, 3, 2, 1));
246 va4 = _mm_shuffle_ps(va4, va4, _MM_SHUFFLE(0, 3, 2, 1));
247
248 const __m128 vb0123c2 = _mm_load_ps(w + 16);
249 const __m128 vb4567c2 = _mm_load_ps(w + 20);
250
251 vacc0x0123 = _mm_add_ps(vacc0x0123, _mm_mul_ps(_mm_andnot_ps(_mm_cmpeq_ps(_mm_setzero_ps(), vb0123c2), va0), vb0123c2));
252 vacc1x0123 = _mm_add_ps(vacc1x0123, _mm_mul_ps(_mm_andnot_ps(_mm_cmpeq_ps(_mm_setzero_ps(), vb0123c2), va1), vb0123c2));
253 vacc2x0123 = _mm_add_ps(vacc2x0123, _mm_mul_ps(_mm_andnot_ps(_mm_cmpeq_ps(_mm_setzero_ps(), vb0123c2), va2), vb0123c2));
254 vacc3x0123 = _mm_add_ps(vacc3x0123, _mm_mul_ps(_mm_andnot_ps(_mm_cmpeq_ps(_mm_setzero_ps(), vb0123c2), va3), vb0123c2));
255 vacc4x0123 = _mm_add_ps(vacc4x0123, _mm_mul_ps(_mm_andnot_ps(_mm_cmpeq_ps(_mm_setzero_ps(), vb0123c2), va4), vb0123c2));
256 vacc0x4567 = _mm_add_ps(vacc0x4567, _mm_mul_ps(_mm_andnot_ps(_mm_cmpeq_ps(_mm_setzero_ps(), vb4567c2), va0), vb4567c2));
257 vacc1x4567 = _mm_add_ps(vacc1x4567, _mm_mul_ps(_mm_andnot_ps(_mm_cmpeq_ps(_mm_setzero_ps(), vb4567c2), va1), vb4567c2));
258 vacc2x4567 = _mm_add_ps(vacc2x4567, _mm_mul_ps(_mm_andnot_ps(_mm_cmpeq_ps(_mm_setzero_ps(), vb4567c2), va2), vb4567c2));
259 vacc3x4567 = _mm_add_ps(vacc3x4567, _mm_mul_ps(_mm_andnot_ps(_mm_cmpeq_ps(_mm_setzero_ps(), vb4567c2), va3), vb4567c2));
260 vacc4x4567 = _mm_add_ps(vacc4x4567, _mm_mul_ps(_mm_andnot_ps(_mm_cmpeq_ps(_mm_setzero_ps(), vb4567c2), va4), vb4567c2));
261
262 va0 = _mm_shuffle_ps(va0, va0, _MM_SHUFFLE(0, 3, 2, 1));
263 va1 = _mm_shuffle_ps(va1, va1, _MM_SHUFFLE(0, 3, 2, 1));
264 va2 = _mm_shuffle_ps(va2, va2, _MM_SHUFFLE(0, 3, 2, 1));
265 va3 = _mm_shuffle_ps(va3, va3, _MM_SHUFFLE(0, 3, 2, 1));
266 va4 = _mm_shuffle_ps(va4, va4, _MM_SHUFFLE(0, 3, 2, 1));
267
268 const __m128 vb0123c3 = _mm_load_ps(w + 24);
269 const __m128 vb4567c3 = _mm_load_ps(w + 28);
270
271 vacc0x0123 = _mm_add_ps(vacc0x0123, _mm_mul_ps(_mm_andnot_ps(_mm_cmpeq_ps(_mm_setzero_ps(), vb0123c3), va0), vb0123c3));
272 vacc1x0123 = _mm_add_ps(vacc1x0123, _mm_mul_ps(_mm_andnot_ps(_mm_cmpeq_ps(_mm_setzero_ps(), vb0123c3), va1), vb0123c3));
273 vacc2x0123 = _mm_add_ps(vacc2x0123, _mm_mul_ps(_mm_andnot_ps(_mm_cmpeq_ps(_mm_setzero_ps(), vb0123c3), va2), vb0123c3));
274 vacc3x0123 = _mm_add_ps(vacc3x0123, _mm_mul_ps(_mm_andnot_ps(_mm_cmpeq_ps(_mm_setzero_ps(), vb0123c3), va3), vb0123c3));
275 vacc4x0123 = _mm_add_ps(vacc4x0123, _mm_mul_ps(_mm_andnot_ps(_mm_cmpeq_ps(_mm_setzero_ps(), vb0123c3), va4), vb0123c3));
276 vacc0x4567 = _mm_add_ps(vacc0x4567, _mm_mul_ps(_mm_andnot_ps(_mm_cmpeq_ps(_mm_setzero_ps(), vb4567c3), va0), vb4567c3));
277 vacc1x4567 = _mm_add_ps(vacc1x4567, _mm_mul_ps(_mm_andnot_ps(_mm_cmpeq_ps(_mm_setzero_ps(), vb4567c3), va1), vb4567c3));
278 vacc2x4567 = _mm_add_ps(vacc2x4567, _mm_mul_ps(_mm_andnot_ps(_mm_cmpeq_ps(_mm_setzero_ps(), vb4567c3), va2), vb4567c3));
279 vacc3x4567 = _mm_add_ps(vacc3x4567, _mm_mul_ps(_mm_andnot_ps(_mm_cmpeq_ps(_mm_setzero_ps(), vb4567c3), va3), vb4567c3));
280 vacc4x4567 = _mm_add_ps(vacc4x4567, _mm_mul_ps(_mm_andnot_ps(_mm_cmpeq_ps(_mm_setzero_ps(), vb4567c3), va4), vb4567c3));
281
282
283 w += 32;
284 }
285 p -= 5 * sizeof(void*);
286 } while (p != 0);
287
288 const __m128 vmax = _mm_load_ps(params->sse.max);
289 vacc0x0123 = _mm_min_ps(vacc0x0123, vmax);
290 vacc1x0123 = _mm_min_ps(vacc1x0123, vmax);
291 vacc2x0123 = _mm_min_ps(vacc2x0123, vmax);
292 vacc3x0123 = _mm_min_ps(vacc3x0123, vmax);
293 vacc4x0123 = _mm_min_ps(vacc4x0123, vmax);
294 vacc0x4567 = _mm_min_ps(vacc0x4567, vmax);
295 vacc1x4567 = _mm_min_ps(vacc1x4567, vmax);
296 vacc2x4567 = _mm_min_ps(vacc2x4567, vmax);
297 vacc3x4567 = _mm_min_ps(vacc3x4567, vmax);
298 vacc4x4567 = _mm_min_ps(vacc4x4567, vmax);
299
300 const __m128 vmin = _mm_load_ps(params->sse.min);
301 vacc0x0123 = _mm_max_ps(vacc0x0123, vmin);
302 vacc1x0123 = _mm_max_ps(vacc1x0123, vmin);
303 vacc2x0123 = _mm_max_ps(vacc2x0123, vmin);
304 vacc3x0123 = _mm_max_ps(vacc3x0123, vmin);
305 vacc4x0123 = _mm_max_ps(vacc4x0123, vmin);
306 vacc0x4567 = _mm_max_ps(vacc0x4567, vmin);
307 vacc1x4567 = _mm_max_ps(vacc1x4567, vmin);
308 vacc2x4567 = _mm_max_ps(vacc2x4567, vmin);
309 vacc3x4567 = _mm_max_ps(vacc3x4567, vmin);
310 vacc4x4567 = _mm_max_ps(vacc4x4567, vmin);
311
312 if XNN_LIKELY(nc >= 8) {
313 _mm_storeu_ps(c4, vacc4x0123);
314 _mm_storeu_ps(c4 + 4, vacc4x4567);
315 c4 = (float*) ((uintptr_t) c4 + cn_stride);
316 _mm_storeu_ps(c3, vacc3x0123);
317 _mm_storeu_ps(c3 + 4, vacc3x4567);
318 c3 = (float*) ((uintptr_t) c3 + cn_stride);
319 _mm_storeu_ps(c2, vacc2x0123);
320 _mm_storeu_ps(c2 + 4, vacc2x4567);
321 c2 = (float*) ((uintptr_t) c2 + cn_stride);
322 _mm_storeu_ps(c1, vacc1x0123);
323 _mm_storeu_ps(c1 + 4, vacc1x4567);
324 c1 = (float*) ((uintptr_t) c1 + cn_stride);
325 _mm_storeu_ps(c0, vacc0x0123);
326 _mm_storeu_ps(c0 + 4, vacc0x4567);
327 c0 = (float*) ((uintptr_t) c0 + cn_stride);
328
329 a = (const float**restrict) ((uintptr_t) a - ks);
330 nc -= 8;
331 } else {
332 if (nc & 4) {
333 _mm_storeu_ps(c4, vacc4x0123);
334 _mm_storeu_ps(c3, vacc3x0123);
335 _mm_storeu_ps(c2, vacc2x0123);
336 _mm_storeu_ps(c1, vacc1x0123);
337 _mm_storeu_ps(c0, vacc0x0123);
338
339 vacc4x0123 = vacc4x4567;
340 vacc3x0123 = vacc3x4567;
341 vacc2x0123 = vacc2x4567;
342 vacc1x0123 = vacc1x4567;
343 vacc0x0123 = vacc0x4567;
344
345 c4 += 4;
346 c3 += 4;
347 c2 += 4;
348 c1 += 4;
349 c0 += 4;
350 }
351 if (nc & 2) {
352 _mm_storel_pi((__m64*) c4, vacc4x0123);
353 _mm_storel_pi((__m64*) c3, vacc3x0123);
354 _mm_storel_pi((__m64*) c2, vacc2x0123);
355 _mm_storel_pi((__m64*) c1, vacc1x0123);
356 _mm_storel_pi((__m64*) c0, vacc0x0123);
357
358 vacc4x0123 = _mm_movehl_ps(vacc4x0123, vacc4x0123);
359 vacc3x0123 = _mm_movehl_ps(vacc3x0123, vacc3x0123);
360 vacc2x0123 = _mm_movehl_ps(vacc2x0123, vacc2x0123);
361 vacc1x0123 = _mm_movehl_ps(vacc1x0123, vacc1x0123);
362 vacc0x0123 = _mm_movehl_ps(vacc0x0123, vacc0x0123);
363
364 c4 += 2;
365 c3 += 2;
366 c2 += 2;
367 c1 += 2;
368 c0 += 2;
369 }
370 if (nc & 1) {
371 _mm_store_ss(c4, vacc4x0123);
372 _mm_store_ss(c3, vacc3x0123);
373 _mm_store_ss(c2, vacc2x0123);
374 _mm_store_ss(c1, vacc1x0123);
375 _mm_store_ss(c0, vacc0x0123);
376 }
377
378 nc = 0;
379 }
380 } while (nc != 0);
381 }
382