1 // Auto-generated file. Do not edit!
2 // Template: src/f32-igemm/avx-shuffle4.c.in
3 // Generator: tools/xngen
4 //
5 // Copyright 2019 Google LLC
6 //
7 // This source code is licensed under the BSD-style license found in the
8 // LICENSE file in the root directory of this source tree.
9
10 #include <assert.h>
11
12 #include <immintrin.h>
13
14 #include <xnnpack/igemm.h>
15
16
xnn_f32_igemm_minmax_ukernel_4x16s4__fma3_broadcast(size_t mr,size_t nc,size_t kc,size_t ks,const float ** restrict a,const float * restrict w,float * restrict c,size_t cm_stride,size_t cn_stride,size_t a_offset,const float * zero,const union xnn_f32_minmax_params params[restrict XNN_MIN_ELEMENTS (1)])17 void xnn_f32_igemm_minmax_ukernel_4x16s4__fma3_broadcast(
18 size_t mr,
19 size_t nc,
20 size_t kc,
21 size_t ks,
22 const float**restrict a,
23 const float*restrict w,
24 float*restrict c,
25 size_t cm_stride,
26 size_t cn_stride,
27 size_t a_offset,
28 const float* zero,
29 const union xnn_f32_minmax_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
30 {
31 assert(mr != 0);
32 assert(mr <= 4);
33 assert(nc != 0);
34 assert(kc != 0);
35 assert(kc % sizeof(float) == 0);
36 assert(ks != 0);
37 assert(ks % (4 * sizeof(void*)) == 0);
38 assert(a_offset % sizeof(float) == 0);
39 assert(a != NULL);
40 assert(w != NULL);
41 assert(c != NULL);
42
43 float* c0 = c;
44 float* c1 = (float*) ((uintptr_t) c0 + cm_stride);
45 if XNN_UNPREDICTABLE(mr < 2) {
46 c1 = c0;
47 }
48 float* c2 = (float*) ((uintptr_t) c1 + cm_stride);
49 if XNN_UNPREDICTABLE(mr <= 2) {
50 c2 = c1;
51 }
52 float* c3 = (float*) ((uintptr_t) c2 + cm_stride);
53 if XNN_UNPREDICTABLE(mr != 4) {
54 c3 = c2;
55 }
56
57 do {
58 __m256 vacc0x01234567 = _mm256_load_ps(w);
59 __m256 vacc0x89ABCDEF = _mm256_load_ps(w + 8);
60 __m256 vacc1x01234567 = vacc0x01234567;
61 __m256 vacc1x89ABCDEF = vacc0x89ABCDEF;
62 __m256 vacc2x01234567 = vacc0x01234567;
63 __m256 vacc2x89ABCDEF = vacc0x89ABCDEF;
64 __m256 vacc3x01234567 = vacc0x01234567;
65 __m256 vacc3x89ABCDEF = vacc0x89ABCDEF;
66 w += 16;
67
68 size_t p = ks;
69 do {
70 const float* restrict a0 = a[0];
71 assert(a0 != NULL);
72 if XNN_UNPREDICTABLE(a0 != zero) {
73 a0 = (const float*) ((uintptr_t) a0 + a_offset);
74 }
75 const float* restrict a1 = a[1];
76 assert(a1 != NULL);
77 if XNN_UNPREDICTABLE(a1 != zero) {
78 a1 = (const float*) ((uintptr_t) a1 + a_offset);
79 }
80 const float* restrict a2 = a[2];
81 assert(a2 != NULL);
82 if XNN_UNPREDICTABLE(a2 != zero) {
83 a2 = (const float*) ((uintptr_t) a2 + a_offset);
84 }
85 const float* restrict a3 = a[3];
86 assert(a3 != NULL);
87 if XNN_UNPREDICTABLE(a3 != zero) {
88 a3 = (const float*) ((uintptr_t) a3 + a_offset);
89 }
90 a += 4;
91
92 size_t k = kc;
93 while (k >= 4 * sizeof(float)) {
94 __m256 va0 = _mm256_broadcast_ps((const __m128*) a0);
95 a0 += 4;
96 __m256 va1 = _mm256_broadcast_ps((const __m128*) a1);
97 a1 += 4;
98 __m256 va2 = _mm256_broadcast_ps((const __m128*) a2);
99 a2 += 4;
100 __m256 va3 = _mm256_broadcast_ps((const __m128*) a3);
101 a3 += 4;
102
103
104 const __m256 vb01234567c0 = _mm256_load_ps(w + 0);
105 const __m256 vb89ABCDEFc0 = _mm256_load_ps(w + 8);
106
107 vacc0x01234567 = _mm256_fmadd_ps(va0, vb01234567c0, vacc0x01234567);
108 vacc1x01234567 = _mm256_fmadd_ps(va1, vb01234567c0, vacc1x01234567);
109 vacc2x01234567 = _mm256_fmadd_ps(va2, vb01234567c0, vacc2x01234567);
110 vacc3x01234567 = _mm256_fmadd_ps(va3, vb01234567c0, vacc3x01234567);
111 vacc0x89ABCDEF = _mm256_fmadd_ps(va0, vb89ABCDEFc0, vacc0x89ABCDEF);
112 vacc1x89ABCDEF = _mm256_fmadd_ps(va1, vb89ABCDEFc0, vacc1x89ABCDEF);
113 vacc2x89ABCDEF = _mm256_fmadd_ps(va2, vb89ABCDEFc0, vacc2x89ABCDEF);
114 vacc3x89ABCDEF = _mm256_fmadd_ps(va3, vb89ABCDEFc0, vacc3x89ABCDEF);
115
116 va0 = _mm256_permute_ps(va0, _MM_SHUFFLE(0, 3, 2, 1));
117 va1 = _mm256_permute_ps(va1, _MM_SHUFFLE(0, 3, 2, 1));
118 va2 = _mm256_permute_ps(va2, _MM_SHUFFLE(0, 3, 2, 1));
119 va3 = _mm256_permute_ps(va3, _MM_SHUFFLE(0, 3, 2, 1));
120
121 const __m256 vb01234567c1 = _mm256_load_ps(w + 16);
122 const __m256 vb89ABCDEFc1 = _mm256_load_ps(w + 24);
123
124 vacc0x01234567 = _mm256_fmadd_ps(va0, vb01234567c1, vacc0x01234567);
125 vacc1x01234567 = _mm256_fmadd_ps(va1, vb01234567c1, vacc1x01234567);
126 vacc2x01234567 = _mm256_fmadd_ps(va2, vb01234567c1, vacc2x01234567);
127 vacc3x01234567 = _mm256_fmadd_ps(va3, vb01234567c1, vacc3x01234567);
128 vacc0x89ABCDEF = _mm256_fmadd_ps(va0, vb89ABCDEFc1, vacc0x89ABCDEF);
129 vacc1x89ABCDEF = _mm256_fmadd_ps(va1, vb89ABCDEFc1, vacc1x89ABCDEF);
130 vacc2x89ABCDEF = _mm256_fmadd_ps(va2, vb89ABCDEFc1, vacc2x89ABCDEF);
131 vacc3x89ABCDEF = _mm256_fmadd_ps(va3, vb89ABCDEFc1, vacc3x89ABCDEF);
132
133 va0 = _mm256_permute_ps(va0, _MM_SHUFFLE(0, 3, 2, 1));
134 va1 = _mm256_permute_ps(va1, _MM_SHUFFLE(0, 3, 2, 1));
135 va2 = _mm256_permute_ps(va2, _MM_SHUFFLE(0, 3, 2, 1));
136 va3 = _mm256_permute_ps(va3, _MM_SHUFFLE(0, 3, 2, 1));
137
138 const __m256 vb01234567c2 = _mm256_load_ps(w + 32);
139 const __m256 vb89ABCDEFc2 = _mm256_load_ps(w + 40);
140
141 vacc0x01234567 = _mm256_fmadd_ps(va0, vb01234567c2, vacc0x01234567);
142 vacc1x01234567 = _mm256_fmadd_ps(va1, vb01234567c2, vacc1x01234567);
143 vacc2x01234567 = _mm256_fmadd_ps(va2, vb01234567c2, vacc2x01234567);
144 vacc3x01234567 = _mm256_fmadd_ps(va3, vb01234567c2, vacc3x01234567);
145 vacc0x89ABCDEF = _mm256_fmadd_ps(va0, vb89ABCDEFc2, vacc0x89ABCDEF);
146 vacc1x89ABCDEF = _mm256_fmadd_ps(va1, vb89ABCDEFc2, vacc1x89ABCDEF);
147 vacc2x89ABCDEF = _mm256_fmadd_ps(va2, vb89ABCDEFc2, vacc2x89ABCDEF);
148 vacc3x89ABCDEF = _mm256_fmadd_ps(va3, vb89ABCDEFc2, vacc3x89ABCDEF);
149
150 va0 = _mm256_permute_ps(va0, _MM_SHUFFLE(0, 3, 2, 1));
151 va1 = _mm256_permute_ps(va1, _MM_SHUFFLE(0, 3, 2, 1));
152 va2 = _mm256_permute_ps(va2, _MM_SHUFFLE(0, 3, 2, 1));
153 va3 = _mm256_permute_ps(va3, _MM_SHUFFLE(0, 3, 2, 1));
154
155 const __m256 vb01234567c3 = _mm256_load_ps(w + 48);
156 const __m256 vb89ABCDEFc3 = _mm256_load_ps(w + 56);
157
158 vacc0x01234567 = _mm256_fmadd_ps(va0, vb01234567c3, vacc0x01234567);
159 vacc1x01234567 = _mm256_fmadd_ps(va1, vb01234567c3, vacc1x01234567);
160 vacc2x01234567 = _mm256_fmadd_ps(va2, vb01234567c3, vacc2x01234567);
161 vacc3x01234567 = _mm256_fmadd_ps(va3, vb01234567c3, vacc3x01234567);
162 vacc0x89ABCDEF = _mm256_fmadd_ps(va0, vb89ABCDEFc3, vacc0x89ABCDEF);
163 vacc1x89ABCDEF = _mm256_fmadd_ps(va1, vb89ABCDEFc3, vacc1x89ABCDEF);
164 vacc2x89ABCDEF = _mm256_fmadd_ps(va2, vb89ABCDEFc3, vacc2x89ABCDEF);
165 vacc3x89ABCDEF = _mm256_fmadd_ps(va3, vb89ABCDEFc3, vacc3x89ABCDEF);
166
167
168 w += 64;
169 k -= 4 * sizeof(float);
170 }
171 if XNN_UNLIKELY(k != 0) {
172 __m256 va0 = _mm256_broadcast_ps((const __m128*) a0);
173 a0 = (const float*) ((uintptr_t) a0 + k);
174 __m256 va1 = _mm256_broadcast_ps((const __m128*) a1);
175 a1 = (const float*) ((uintptr_t) a1 + k);
176 __m256 va2 = _mm256_broadcast_ps((const __m128*) a2);
177 a2 = (const float*) ((uintptr_t) a2 + k);
178 __m256 va3 = _mm256_broadcast_ps((const __m128*) a3);
179 a3 = (const float*) ((uintptr_t) a3 + k);
180
181 const __m256 vzero = _mm256_setzero_ps();
182
183 const __m256 vb01234567c0 = _mm256_load_ps(w + 0);
184 const __m256 vb89ABCDEFc0 = _mm256_load_ps(w + 8);
185
186 vacc0x01234567 = _mm256_fmadd_ps(_mm256_and_ps(va0, _mm256_cmp_ps(vb01234567c0, vzero, _CMP_NEQ_OQ)), vb01234567c0, vacc0x01234567);
187 vacc1x01234567 = _mm256_fmadd_ps(_mm256_and_ps(va1, _mm256_cmp_ps(vb01234567c0, vzero, _CMP_NEQ_OQ)), vb01234567c0, vacc1x01234567);
188 vacc2x01234567 = _mm256_fmadd_ps(_mm256_and_ps(va2, _mm256_cmp_ps(vb01234567c0, vzero, _CMP_NEQ_OQ)), vb01234567c0, vacc2x01234567);
189 vacc3x01234567 = _mm256_fmadd_ps(_mm256_and_ps(va3, _mm256_cmp_ps(vb01234567c0, vzero, _CMP_NEQ_OQ)), vb01234567c0, vacc3x01234567);
190 vacc0x89ABCDEF = _mm256_fmadd_ps(_mm256_and_ps(va0, _mm256_cmp_ps(vb89ABCDEFc0, vzero, _CMP_NEQ_OQ)), vb89ABCDEFc0, vacc0x89ABCDEF);
191 vacc1x89ABCDEF = _mm256_fmadd_ps(_mm256_and_ps(va1, _mm256_cmp_ps(vb89ABCDEFc0, vzero, _CMP_NEQ_OQ)), vb89ABCDEFc0, vacc1x89ABCDEF);
192 vacc2x89ABCDEF = _mm256_fmadd_ps(_mm256_and_ps(va2, _mm256_cmp_ps(vb89ABCDEFc0, vzero, _CMP_NEQ_OQ)), vb89ABCDEFc0, vacc2x89ABCDEF);
193 vacc3x89ABCDEF = _mm256_fmadd_ps(_mm256_and_ps(va3, _mm256_cmp_ps(vb89ABCDEFc0, vzero, _CMP_NEQ_OQ)), vb89ABCDEFc0, vacc3x89ABCDEF);
194
195 va0 = _mm256_permute_ps(va0, _MM_SHUFFLE(0, 3, 2, 1));
196 va1 = _mm256_permute_ps(va1, _MM_SHUFFLE(0, 3, 2, 1));
197 va2 = _mm256_permute_ps(va2, _MM_SHUFFLE(0, 3, 2, 1));
198 va3 = _mm256_permute_ps(va3, _MM_SHUFFLE(0, 3, 2, 1));
199
200 const __m256 vb01234567c1 = _mm256_load_ps(w + 16);
201 const __m256 vb89ABCDEFc1 = _mm256_load_ps(w + 24);
202
203 vacc0x01234567 = _mm256_fmadd_ps(_mm256_and_ps(va0, _mm256_cmp_ps(vb01234567c1, vzero, _CMP_NEQ_OQ)), vb01234567c1, vacc0x01234567);
204 vacc1x01234567 = _mm256_fmadd_ps(_mm256_and_ps(va1, _mm256_cmp_ps(vb01234567c1, vzero, _CMP_NEQ_OQ)), vb01234567c1, vacc1x01234567);
205 vacc2x01234567 = _mm256_fmadd_ps(_mm256_and_ps(va2, _mm256_cmp_ps(vb01234567c1, vzero, _CMP_NEQ_OQ)), vb01234567c1, vacc2x01234567);
206 vacc3x01234567 = _mm256_fmadd_ps(_mm256_and_ps(va3, _mm256_cmp_ps(vb01234567c1, vzero, _CMP_NEQ_OQ)), vb01234567c1, vacc3x01234567);
207 vacc0x89ABCDEF = _mm256_fmadd_ps(_mm256_and_ps(va0, _mm256_cmp_ps(vb89ABCDEFc1, vzero, _CMP_NEQ_OQ)), vb89ABCDEFc1, vacc0x89ABCDEF);
208 vacc1x89ABCDEF = _mm256_fmadd_ps(_mm256_and_ps(va1, _mm256_cmp_ps(vb89ABCDEFc1, vzero, _CMP_NEQ_OQ)), vb89ABCDEFc1, vacc1x89ABCDEF);
209 vacc2x89ABCDEF = _mm256_fmadd_ps(_mm256_and_ps(va2, _mm256_cmp_ps(vb89ABCDEFc1, vzero, _CMP_NEQ_OQ)), vb89ABCDEFc1, vacc2x89ABCDEF);
210 vacc3x89ABCDEF = _mm256_fmadd_ps(_mm256_and_ps(va3, _mm256_cmp_ps(vb89ABCDEFc1, vzero, _CMP_NEQ_OQ)), vb89ABCDEFc1, vacc3x89ABCDEF);
211
212 va0 = _mm256_permute_ps(va0, _MM_SHUFFLE(0, 3, 2, 1));
213 va1 = _mm256_permute_ps(va1, _MM_SHUFFLE(0, 3, 2, 1));
214 va2 = _mm256_permute_ps(va2, _MM_SHUFFLE(0, 3, 2, 1));
215 va3 = _mm256_permute_ps(va3, _MM_SHUFFLE(0, 3, 2, 1));
216
217 const __m256 vb01234567c2 = _mm256_load_ps(w + 32);
218 const __m256 vb89ABCDEFc2 = _mm256_load_ps(w + 40);
219
220 vacc0x01234567 = _mm256_fmadd_ps(_mm256_and_ps(va0, _mm256_cmp_ps(vb01234567c2, vzero, _CMP_NEQ_OQ)), vb01234567c2, vacc0x01234567);
221 vacc1x01234567 = _mm256_fmadd_ps(_mm256_and_ps(va1, _mm256_cmp_ps(vb01234567c2, vzero, _CMP_NEQ_OQ)), vb01234567c2, vacc1x01234567);
222 vacc2x01234567 = _mm256_fmadd_ps(_mm256_and_ps(va2, _mm256_cmp_ps(vb01234567c2, vzero, _CMP_NEQ_OQ)), vb01234567c2, vacc2x01234567);
223 vacc3x01234567 = _mm256_fmadd_ps(_mm256_and_ps(va3, _mm256_cmp_ps(vb01234567c2, vzero, _CMP_NEQ_OQ)), vb01234567c2, vacc3x01234567);
224 vacc0x89ABCDEF = _mm256_fmadd_ps(_mm256_and_ps(va0, _mm256_cmp_ps(vb89ABCDEFc2, vzero, _CMP_NEQ_OQ)), vb89ABCDEFc2, vacc0x89ABCDEF);
225 vacc1x89ABCDEF = _mm256_fmadd_ps(_mm256_and_ps(va1, _mm256_cmp_ps(vb89ABCDEFc2, vzero, _CMP_NEQ_OQ)), vb89ABCDEFc2, vacc1x89ABCDEF);
226 vacc2x89ABCDEF = _mm256_fmadd_ps(_mm256_and_ps(va2, _mm256_cmp_ps(vb89ABCDEFc2, vzero, _CMP_NEQ_OQ)), vb89ABCDEFc2, vacc2x89ABCDEF);
227 vacc3x89ABCDEF = _mm256_fmadd_ps(_mm256_and_ps(va3, _mm256_cmp_ps(vb89ABCDEFc2, vzero, _CMP_NEQ_OQ)), vb89ABCDEFc2, vacc3x89ABCDEF);
228
229 va0 = _mm256_permute_ps(va0, _MM_SHUFFLE(0, 3, 2, 1));
230 va1 = _mm256_permute_ps(va1, _MM_SHUFFLE(0, 3, 2, 1));
231 va2 = _mm256_permute_ps(va2, _MM_SHUFFLE(0, 3, 2, 1));
232 va3 = _mm256_permute_ps(va3, _MM_SHUFFLE(0, 3, 2, 1));
233
234 const __m256 vb01234567c3 = _mm256_load_ps(w + 48);
235 const __m256 vb89ABCDEFc3 = _mm256_load_ps(w + 56);
236
237 vacc0x01234567 = _mm256_fmadd_ps(_mm256_and_ps(va0, _mm256_cmp_ps(vb01234567c3, vzero, _CMP_NEQ_OQ)), vb01234567c3, vacc0x01234567);
238 vacc1x01234567 = _mm256_fmadd_ps(_mm256_and_ps(va1, _mm256_cmp_ps(vb01234567c3, vzero, _CMP_NEQ_OQ)), vb01234567c3, vacc1x01234567);
239 vacc2x01234567 = _mm256_fmadd_ps(_mm256_and_ps(va2, _mm256_cmp_ps(vb01234567c3, vzero, _CMP_NEQ_OQ)), vb01234567c3, vacc2x01234567);
240 vacc3x01234567 = _mm256_fmadd_ps(_mm256_and_ps(va3, _mm256_cmp_ps(vb01234567c3, vzero, _CMP_NEQ_OQ)), vb01234567c3, vacc3x01234567);
241 vacc0x89ABCDEF = _mm256_fmadd_ps(_mm256_and_ps(va0, _mm256_cmp_ps(vb89ABCDEFc3, vzero, _CMP_NEQ_OQ)), vb89ABCDEFc3, vacc0x89ABCDEF);
242 vacc1x89ABCDEF = _mm256_fmadd_ps(_mm256_and_ps(va1, _mm256_cmp_ps(vb89ABCDEFc3, vzero, _CMP_NEQ_OQ)), vb89ABCDEFc3, vacc1x89ABCDEF);
243 vacc2x89ABCDEF = _mm256_fmadd_ps(_mm256_and_ps(va2, _mm256_cmp_ps(vb89ABCDEFc3, vzero, _CMP_NEQ_OQ)), vb89ABCDEFc3, vacc2x89ABCDEF);
244 vacc3x89ABCDEF = _mm256_fmadd_ps(_mm256_and_ps(va3, _mm256_cmp_ps(vb89ABCDEFc3, vzero, _CMP_NEQ_OQ)), vb89ABCDEFc3, vacc3x89ABCDEF);
245
246
247 w += 64;
248 }
249 p -= 4 * sizeof(void*);
250 } while (p != 0);
251
252 const __m256 vmin = _mm256_load_ps(params->avx.min);
253 vacc0x01234567 = _mm256_max_ps(vacc0x01234567, vmin);
254 vacc1x01234567 = _mm256_max_ps(vacc1x01234567, vmin);
255 vacc2x01234567 = _mm256_max_ps(vacc2x01234567, vmin);
256 vacc3x01234567 = _mm256_max_ps(vacc3x01234567, vmin);
257 vacc0x89ABCDEF = _mm256_max_ps(vacc0x89ABCDEF, vmin);
258 vacc1x89ABCDEF = _mm256_max_ps(vacc1x89ABCDEF, vmin);
259 vacc2x89ABCDEF = _mm256_max_ps(vacc2x89ABCDEF, vmin);
260 vacc3x89ABCDEF = _mm256_max_ps(vacc3x89ABCDEF, vmin);
261
262 const __m256 vmax = _mm256_load_ps(params->avx.max);
263 vacc0x01234567 = _mm256_min_ps(vacc0x01234567, vmax);
264 vacc1x01234567 = _mm256_min_ps(vacc1x01234567, vmax);
265 vacc2x01234567 = _mm256_min_ps(vacc2x01234567, vmax);
266 vacc3x01234567 = _mm256_min_ps(vacc3x01234567, vmax);
267 vacc0x89ABCDEF = _mm256_min_ps(vacc0x89ABCDEF, vmax);
268 vacc1x89ABCDEF = _mm256_min_ps(vacc1x89ABCDEF, vmax);
269 vacc2x89ABCDEF = _mm256_min_ps(vacc2x89ABCDEF, vmax);
270 vacc3x89ABCDEF = _mm256_min_ps(vacc3x89ABCDEF, vmax);
271
272 if XNN_LIKELY(nc >= 16) {
273 _mm256_storeu_ps(c3, vacc3x01234567);
274 _mm256_storeu_ps(c3 + 8, vacc3x89ABCDEF);
275 c3 = (float*) ((uintptr_t) c3 + cn_stride);
276 _mm256_storeu_ps(c2, vacc2x01234567);
277 _mm256_storeu_ps(c2 + 8, vacc2x89ABCDEF);
278 c2 = (float*) ((uintptr_t) c2 + cn_stride);
279 _mm256_storeu_ps(c1, vacc1x01234567);
280 _mm256_storeu_ps(c1 + 8, vacc1x89ABCDEF);
281 c1 = (float*) ((uintptr_t) c1 + cn_stride);
282 _mm256_storeu_ps(c0, vacc0x01234567);
283 _mm256_storeu_ps(c0 + 8, vacc0x89ABCDEF);
284 c0 = (float*) ((uintptr_t) c0 + cn_stride);
285
286 a = (const float**restrict) ((uintptr_t) a - ks);
287 nc -= 16;
288 } else {
289 if (nc & 8) {
290 _mm256_storeu_ps(c3, vacc3x01234567);
291 _mm256_storeu_ps(c2, vacc2x01234567);
292 _mm256_storeu_ps(c1, vacc1x01234567);
293 _mm256_storeu_ps(c0, vacc0x01234567);
294
295 vacc3x01234567 = vacc3x89ABCDEF;
296 vacc2x01234567 = vacc2x89ABCDEF;
297 vacc1x01234567 = vacc1x89ABCDEF;
298 vacc0x01234567 = vacc0x89ABCDEF;
299
300 c3 += 8;
301 c2 += 8;
302 c1 += 8;
303 c0 += 8;
304 }
305 __m128 vacc3x0123 = _mm256_castps256_ps128(vacc3x01234567);
306 __m128 vacc2x0123 = _mm256_castps256_ps128(vacc2x01234567);
307 __m128 vacc1x0123 = _mm256_castps256_ps128(vacc1x01234567);
308 __m128 vacc0x0123 = _mm256_castps256_ps128(vacc0x01234567);
309 if (nc & 4) {
310 _mm_storeu_ps(c3, vacc3x0123);
311 _mm_storeu_ps(c2, vacc2x0123);
312 _mm_storeu_ps(c1, vacc1x0123);
313 _mm_storeu_ps(c0, vacc0x0123);
314
315 vacc3x0123 = _mm256_extractf128_ps(vacc3x01234567, 1);
316 vacc2x0123 = _mm256_extractf128_ps(vacc2x01234567, 1);
317 vacc1x0123 = _mm256_extractf128_ps(vacc1x01234567, 1);
318 vacc0x0123 = _mm256_extractf128_ps(vacc0x01234567, 1);
319
320 c3 += 4;
321 c2 += 4;
322 c1 += 4;
323 c0 += 4;
324 }
325 if (nc & 2) {
326 _mm_storel_pi((__m64*) c3, vacc3x0123);
327 _mm_storel_pi((__m64*) c2, vacc2x0123);
328 _mm_storel_pi((__m64*) c1, vacc1x0123);
329 _mm_storel_pi((__m64*) c0, vacc0x0123);
330
331 vacc3x0123 = _mm_movehl_ps(vacc3x0123, vacc3x0123);
332 vacc2x0123 = _mm_movehl_ps(vacc2x0123, vacc2x0123);
333 vacc1x0123 = _mm_movehl_ps(vacc1x0123, vacc1x0123);
334 vacc0x0123 = _mm_movehl_ps(vacc0x0123, vacc0x0123);
335
336 c3 += 2;
337 c2 += 2;
338 c1 += 2;
339 c0 += 2;
340 }
341 if (nc & 1) {
342 _mm_store_ss(c3, vacc3x0123);
343 _mm_store_ss(c2, vacc2x0123);
344 _mm_store_ss(c1, vacc1x0123);
345 _mm_store_ss(c0, vacc0x0123);
346 }
347
348 nc = 0;
349 }
350 } while (nc != 0);
351 }
352