1 // Auto-generated file. Do not edit!
2 // Template: src/f32-gemm/avx-shuffle4.c.in
3 // Generator: tools/xngen
4 //
5 // Copyright 2019 Google LLC
6 //
7 // This source code is licensed under the BSD-style license found in the
8 // LICENSE file in the root directory of this source tree.
9
10 #include <assert.h>
11
12 #include <immintrin.h>
13
14 #include <xnnpack/gemm.h>
15
16
xnn_f32_gemm_minmax_ukernel_4x16s4__fma3_broadcast(size_t mr,size_t nc,size_t kc,const float * restrict a,size_t a_stride,const float * restrict w,float * restrict c,size_t cm_stride,size_t cn_stride,const union xnn_f32_minmax_params params[restrict XNN_MIN_ELEMENTS (1)])17 void xnn_f32_gemm_minmax_ukernel_4x16s4__fma3_broadcast(
18 size_t mr,
19 size_t nc,
20 size_t kc,
21 const float*restrict a,
22 size_t a_stride,
23 const float*restrict w,
24 float*restrict c,
25 size_t cm_stride,
26 size_t cn_stride,
27 const union xnn_f32_minmax_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
28 {
29 assert(mr != 0);
30 assert(mr <= 4);
31 assert(nc != 0);
32 assert(kc != 0);
33 assert(kc % sizeof(float) == 0);
34 assert(a != NULL);
35 assert(w != NULL);
36 assert(c != NULL);
37
38 const float* a0 = a;
39 float* c0 = c;
40 const float* a1 = (const float*) ((uintptr_t) a0 + a_stride);
41 float* c1 = (float*) ((uintptr_t) c0 + cm_stride);
42 if XNN_UNPREDICTABLE(mr < 2) {
43 a1 = a0;
44 c1 = c0;
45 }
46 const float* a2 = (const float*) ((uintptr_t) a1 + a_stride);
47 float* c2 = (float*) ((uintptr_t) c1 + cm_stride);
48 if XNN_UNPREDICTABLE(mr <= 2) {
49 a2 = a1;
50 c2 = c1;
51 }
52 const float* a3 = (const float*) ((uintptr_t) a2 + a_stride);
53 float* c3 = (float*) ((uintptr_t) c2 + cm_stride);
54 if XNN_UNPREDICTABLE(mr != 4) {
55 a3 = a2;
56 c3 = c2;
57 }
58
59 do {
60 __m256 vacc0x01234567 = _mm256_load_ps(w + 0);
61 __m256 vacc0x89ABCDEF = _mm256_load_ps(w + 8);
62 __m256 vacc1x01234567 = vacc0x01234567;
63 __m256 vacc1x89ABCDEF = vacc0x89ABCDEF;
64 __m256 vacc2x01234567 = vacc0x01234567;
65 __m256 vacc2x89ABCDEF = vacc0x89ABCDEF;
66 __m256 vacc3x01234567 = vacc0x01234567;
67 __m256 vacc3x89ABCDEF = vacc0x89ABCDEF;
68 w += 16;
69
70 size_t k = kc;
71 while (k >= 4 * sizeof(float)) {
72 __m256 va0 = _mm256_broadcast_ps((const __m128*) a0);
73 a0 += 4;
74 __m256 va1 = _mm256_broadcast_ps((const __m128*) a1);
75 a1 += 4;
76 __m256 va2 = _mm256_broadcast_ps((const __m128*) a2);
77 a2 += 4;
78 __m256 va3 = _mm256_broadcast_ps((const __m128*) a3);
79 a3 += 4;
80
81
82 const __m256 vb01234567c0 = _mm256_load_ps(w + 0);
83 const __m256 vb89ABCDEFc0 = _mm256_load_ps(w + 8);
84
85 vacc0x01234567 = _mm256_fmadd_ps(va0, vb01234567c0, vacc0x01234567);
86 vacc1x01234567 = _mm256_fmadd_ps(va1, vb01234567c0, vacc1x01234567);
87 vacc2x01234567 = _mm256_fmadd_ps(va2, vb01234567c0, vacc2x01234567);
88 vacc3x01234567 = _mm256_fmadd_ps(va3, vb01234567c0, vacc3x01234567);
89 vacc0x89ABCDEF = _mm256_fmadd_ps(va0, vb89ABCDEFc0, vacc0x89ABCDEF);
90 vacc1x89ABCDEF = _mm256_fmadd_ps(va1, vb89ABCDEFc0, vacc1x89ABCDEF);
91 vacc2x89ABCDEF = _mm256_fmadd_ps(va2, vb89ABCDEFc0, vacc2x89ABCDEF);
92 vacc3x89ABCDEF = _mm256_fmadd_ps(va3, vb89ABCDEFc0, vacc3x89ABCDEF);
93
94 va0 = _mm256_permute_ps(va0, _MM_SHUFFLE(0, 3, 2, 1));
95 va1 = _mm256_permute_ps(va1, _MM_SHUFFLE(0, 3, 2, 1));
96 va2 = _mm256_permute_ps(va2, _MM_SHUFFLE(0, 3, 2, 1));
97 va3 = _mm256_permute_ps(va3, _MM_SHUFFLE(0, 3, 2, 1));
98
99 const __m256 vb01234567c1 = _mm256_load_ps(w + 16);
100 const __m256 vb89ABCDEFc1 = _mm256_load_ps(w + 24);
101
102 vacc0x01234567 = _mm256_fmadd_ps(va0, vb01234567c1, vacc0x01234567);
103 vacc1x01234567 = _mm256_fmadd_ps(va1, vb01234567c1, vacc1x01234567);
104 vacc2x01234567 = _mm256_fmadd_ps(va2, vb01234567c1, vacc2x01234567);
105 vacc3x01234567 = _mm256_fmadd_ps(va3, vb01234567c1, vacc3x01234567);
106 vacc0x89ABCDEF = _mm256_fmadd_ps(va0, vb89ABCDEFc1, vacc0x89ABCDEF);
107 vacc1x89ABCDEF = _mm256_fmadd_ps(va1, vb89ABCDEFc1, vacc1x89ABCDEF);
108 vacc2x89ABCDEF = _mm256_fmadd_ps(va2, vb89ABCDEFc1, vacc2x89ABCDEF);
109 vacc3x89ABCDEF = _mm256_fmadd_ps(va3, vb89ABCDEFc1, vacc3x89ABCDEF);
110
111 va0 = _mm256_permute_ps(va0, _MM_SHUFFLE(0, 3, 2, 1));
112 va1 = _mm256_permute_ps(va1, _MM_SHUFFLE(0, 3, 2, 1));
113 va2 = _mm256_permute_ps(va2, _MM_SHUFFLE(0, 3, 2, 1));
114 va3 = _mm256_permute_ps(va3, _MM_SHUFFLE(0, 3, 2, 1));
115
116 const __m256 vb01234567c2 = _mm256_load_ps(w + 32);
117 const __m256 vb89ABCDEFc2 = _mm256_load_ps(w + 40);
118
119 vacc0x01234567 = _mm256_fmadd_ps(va0, vb01234567c2, vacc0x01234567);
120 vacc1x01234567 = _mm256_fmadd_ps(va1, vb01234567c2, vacc1x01234567);
121 vacc2x01234567 = _mm256_fmadd_ps(va2, vb01234567c2, vacc2x01234567);
122 vacc3x01234567 = _mm256_fmadd_ps(va3, vb01234567c2, vacc3x01234567);
123 vacc0x89ABCDEF = _mm256_fmadd_ps(va0, vb89ABCDEFc2, vacc0x89ABCDEF);
124 vacc1x89ABCDEF = _mm256_fmadd_ps(va1, vb89ABCDEFc2, vacc1x89ABCDEF);
125 vacc2x89ABCDEF = _mm256_fmadd_ps(va2, vb89ABCDEFc2, vacc2x89ABCDEF);
126 vacc3x89ABCDEF = _mm256_fmadd_ps(va3, vb89ABCDEFc2, vacc3x89ABCDEF);
127
128 va0 = _mm256_permute_ps(va0, _MM_SHUFFLE(0, 3, 2, 1));
129 va1 = _mm256_permute_ps(va1, _MM_SHUFFLE(0, 3, 2, 1));
130 va2 = _mm256_permute_ps(va2, _MM_SHUFFLE(0, 3, 2, 1));
131 va3 = _mm256_permute_ps(va3, _MM_SHUFFLE(0, 3, 2, 1));
132
133 const __m256 vb01234567c3 = _mm256_load_ps(w + 48);
134 const __m256 vb89ABCDEFc3 = _mm256_load_ps(w + 56);
135
136 vacc0x01234567 = _mm256_fmadd_ps(va0, vb01234567c3, vacc0x01234567);
137 vacc1x01234567 = _mm256_fmadd_ps(va1, vb01234567c3, vacc1x01234567);
138 vacc2x01234567 = _mm256_fmadd_ps(va2, vb01234567c3, vacc2x01234567);
139 vacc3x01234567 = _mm256_fmadd_ps(va3, vb01234567c3, vacc3x01234567);
140 vacc0x89ABCDEF = _mm256_fmadd_ps(va0, vb89ABCDEFc3, vacc0x89ABCDEF);
141 vacc1x89ABCDEF = _mm256_fmadd_ps(va1, vb89ABCDEFc3, vacc1x89ABCDEF);
142 vacc2x89ABCDEF = _mm256_fmadd_ps(va2, vb89ABCDEFc3, vacc2x89ABCDEF);
143 vacc3x89ABCDEF = _mm256_fmadd_ps(va3, vb89ABCDEFc3, vacc3x89ABCDEF);
144
145
146 w += 64;
147 k -= 4 * sizeof(float);
148 }
149 if XNN_UNLIKELY(k != 0) {
150 __m256 va0 = _mm256_broadcast_ps((const __m128*) a0);
151 a0 = (const float*) ((uintptr_t) a0 + k);
152 __m256 va1 = _mm256_broadcast_ps((const __m128*) a1);
153 a1 = (const float*) ((uintptr_t) a1 + k);
154 __m256 va2 = _mm256_broadcast_ps((const __m128*) a2);
155 a2 = (const float*) ((uintptr_t) a2 + k);
156 __m256 va3 = _mm256_broadcast_ps((const __m128*) a3);
157 a3 = (const float*) ((uintptr_t) a3 + k);
158
159 const __m256 vzero = _mm256_setzero_ps();
160
161 const __m256 vb01234567c0 = _mm256_load_ps(w + 0);
162 const __m256 vb89ABCDEFc0 = _mm256_load_ps(w + 8);
163
164 vacc0x01234567 = _mm256_fmadd_ps(_mm256_and_ps(va0, _mm256_cmp_ps(vb01234567c0, vzero, _CMP_NEQ_OQ)), vb01234567c0, vacc0x01234567);
165 vacc1x01234567 = _mm256_fmadd_ps(_mm256_and_ps(va1, _mm256_cmp_ps(vb01234567c0, vzero, _CMP_NEQ_OQ)), vb01234567c0, vacc1x01234567);
166 vacc2x01234567 = _mm256_fmadd_ps(_mm256_and_ps(va2, _mm256_cmp_ps(vb01234567c0, vzero, _CMP_NEQ_OQ)), vb01234567c0, vacc2x01234567);
167 vacc3x01234567 = _mm256_fmadd_ps(_mm256_and_ps(va3, _mm256_cmp_ps(vb01234567c0, vzero, _CMP_NEQ_OQ)), vb01234567c0, vacc3x01234567);
168 vacc0x89ABCDEF = _mm256_fmadd_ps(_mm256_and_ps(va0, _mm256_cmp_ps(vb89ABCDEFc0, vzero, _CMP_NEQ_OQ)), vb89ABCDEFc0, vacc0x89ABCDEF);
169 vacc1x89ABCDEF = _mm256_fmadd_ps(_mm256_and_ps(va1, _mm256_cmp_ps(vb89ABCDEFc0, vzero, _CMP_NEQ_OQ)), vb89ABCDEFc0, vacc1x89ABCDEF);
170 vacc2x89ABCDEF = _mm256_fmadd_ps(_mm256_and_ps(va2, _mm256_cmp_ps(vb89ABCDEFc0, vzero, _CMP_NEQ_OQ)), vb89ABCDEFc0, vacc2x89ABCDEF);
171 vacc3x89ABCDEF = _mm256_fmadd_ps(_mm256_and_ps(va3, _mm256_cmp_ps(vb89ABCDEFc0, vzero, _CMP_NEQ_OQ)), vb89ABCDEFc0, vacc3x89ABCDEF);
172
173 va0 = _mm256_permute_ps(va0, _MM_SHUFFLE(0, 3, 2, 1));
174 va1 = _mm256_permute_ps(va1, _MM_SHUFFLE(0, 3, 2, 1));
175 va2 = _mm256_permute_ps(va2, _MM_SHUFFLE(0, 3, 2, 1));
176 va3 = _mm256_permute_ps(va3, _MM_SHUFFLE(0, 3, 2, 1));
177
178 const __m256 vb01234567c1 = _mm256_load_ps(w + 16);
179 const __m256 vb89ABCDEFc1 = _mm256_load_ps(w + 24);
180
181 vacc0x01234567 = _mm256_fmadd_ps(_mm256_and_ps(va0, _mm256_cmp_ps(vb01234567c1, vzero, _CMP_NEQ_OQ)), vb01234567c1, vacc0x01234567);
182 vacc1x01234567 = _mm256_fmadd_ps(_mm256_and_ps(va1, _mm256_cmp_ps(vb01234567c1, vzero, _CMP_NEQ_OQ)), vb01234567c1, vacc1x01234567);
183 vacc2x01234567 = _mm256_fmadd_ps(_mm256_and_ps(va2, _mm256_cmp_ps(vb01234567c1, vzero, _CMP_NEQ_OQ)), vb01234567c1, vacc2x01234567);
184 vacc3x01234567 = _mm256_fmadd_ps(_mm256_and_ps(va3, _mm256_cmp_ps(vb01234567c1, vzero, _CMP_NEQ_OQ)), vb01234567c1, vacc3x01234567);
185 vacc0x89ABCDEF = _mm256_fmadd_ps(_mm256_and_ps(va0, _mm256_cmp_ps(vb89ABCDEFc1, vzero, _CMP_NEQ_OQ)), vb89ABCDEFc1, vacc0x89ABCDEF);
186 vacc1x89ABCDEF = _mm256_fmadd_ps(_mm256_and_ps(va1, _mm256_cmp_ps(vb89ABCDEFc1, vzero, _CMP_NEQ_OQ)), vb89ABCDEFc1, vacc1x89ABCDEF);
187 vacc2x89ABCDEF = _mm256_fmadd_ps(_mm256_and_ps(va2, _mm256_cmp_ps(vb89ABCDEFc1, vzero, _CMP_NEQ_OQ)), vb89ABCDEFc1, vacc2x89ABCDEF);
188 vacc3x89ABCDEF = _mm256_fmadd_ps(_mm256_and_ps(va3, _mm256_cmp_ps(vb89ABCDEFc1, vzero, _CMP_NEQ_OQ)), vb89ABCDEFc1, vacc3x89ABCDEF);
189
190 va0 = _mm256_permute_ps(va0, _MM_SHUFFLE(0, 3, 2, 1));
191 va1 = _mm256_permute_ps(va1, _MM_SHUFFLE(0, 3, 2, 1));
192 va2 = _mm256_permute_ps(va2, _MM_SHUFFLE(0, 3, 2, 1));
193 va3 = _mm256_permute_ps(va3, _MM_SHUFFLE(0, 3, 2, 1));
194
195 const __m256 vb01234567c2 = _mm256_load_ps(w + 32);
196 const __m256 vb89ABCDEFc2 = _mm256_load_ps(w + 40);
197
198 vacc0x01234567 = _mm256_fmadd_ps(_mm256_and_ps(va0, _mm256_cmp_ps(vb01234567c2, vzero, _CMP_NEQ_OQ)), vb01234567c2, vacc0x01234567);
199 vacc1x01234567 = _mm256_fmadd_ps(_mm256_and_ps(va1, _mm256_cmp_ps(vb01234567c2, vzero, _CMP_NEQ_OQ)), vb01234567c2, vacc1x01234567);
200 vacc2x01234567 = _mm256_fmadd_ps(_mm256_and_ps(va2, _mm256_cmp_ps(vb01234567c2, vzero, _CMP_NEQ_OQ)), vb01234567c2, vacc2x01234567);
201 vacc3x01234567 = _mm256_fmadd_ps(_mm256_and_ps(va3, _mm256_cmp_ps(vb01234567c2, vzero, _CMP_NEQ_OQ)), vb01234567c2, vacc3x01234567);
202 vacc0x89ABCDEF = _mm256_fmadd_ps(_mm256_and_ps(va0, _mm256_cmp_ps(vb89ABCDEFc2, vzero, _CMP_NEQ_OQ)), vb89ABCDEFc2, vacc0x89ABCDEF);
203 vacc1x89ABCDEF = _mm256_fmadd_ps(_mm256_and_ps(va1, _mm256_cmp_ps(vb89ABCDEFc2, vzero, _CMP_NEQ_OQ)), vb89ABCDEFc2, vacc1x89ABCDEF);
204 vacc2x89ABCDEF = _mm256_fmadd_ps(_mm256_and_ps(va2, _mm256_cmp_ps(vb89ABCDEFc2, vzero, _CMP_NEQ_OQ)), vb89ABCDEFc2, vacc2x89ABCDEF);
205 vacc3x89ABCDEF = _mm256_fmadd_ps(_mm256_and_ps(va3, _mm256_cmp_ps(vb89ABCDEFc2, vzero, _CMP_NEQ_OQ)), vb89ABCDEFc2, vacc3x89ABCDEF);
206
207 va0 = _mm256_permute_ps(va0, _MM_SHUFFLE(0, 3, 2, 1));
208 va1 = _mm256_permute_ps(va1, _MM_SHUFFLE(0, 3, 2, 1));
209 va2 = _mm256_permute_ps(va2, _MM_SHUFFLE(0, 3, 2, 1));
210 va3 = _mm256_permute_ps(va3, _MM_SHUFFLE(0, 3, 2, 1));
211
212 const __m256 vb01234567c3 = _mm256_load_ps(w + 48);
213 const __m256 vb89ABCDEFc3 = _mm256_load_ps(w + 56);
214
215 vacc0x01234567 = _mm256_fmadd_ps(_mm256_and_ps(va0, _mm256_cmp_ps(vb01234567c3, vzero, _CMP_NEQ_OQ)), vb01234567c3, vacc0x01234567);
216 vacc1x01234567 = _mm256_fmadd_ps(_mm256_and_ps(va1, _mm256_cmp_ps(vb01234567c3, vzero, _CMP_NEQ_OQ)), vb01234567c3, vacc1x01234567);
217 vacc2x01234567 = _mm256_fmadd_ps(_mm256_and_ps(va2, _mm256_cmp_ps(vb01234567c3, vzero, _CMP_NEQ_OQ)), vb01234567c3, vacc2x01234567);
218 vacc3x01234567 = _mm256_fmadd_ps(_mm256_and_ps(va3, _mm256_cmp_ps(vb01234567c3, vzero, _CMP_NEQ_OQ)), vb01234567c3, vacc3x01234567);
219 vacc0x89ABCDEF = _mm256_fmadd_ps(_mm256_and_ps(va0, _mm256_cmp_ps(vb89ABCDEFc3, vzero, _CMP_NEQ_OQ)), vb89ABCDEFc3, vacc0x89ABCDEF);
220 vacc1x89ABCDEF = _mm256_fmadd_ps(_mm256_and_ps(va1, _mm256_cmp_ps(vb89ABCDEFc3, vzero, _CMP_NEQ_OQ)), vb89ABCDEFc3, vacc1x89ABCDEF);
221 vacc2x89ABCDEF = _mm256_fmadd_ps(_mm256_and_ps(va2, _mm256_cmp_ps(vb89ABCDEFc3, vzero, _CMP_NEQ_OQ)), vb89ABCDEFc3, vacc2x89ABCDEF);
222 vacc3x89ABCDEF = _mm256_fmadd_ps(_mm256_and_ps(va3, _mm256_cmp_ps(vb89ABCDEFc3, vzero, _CMP_NEQ_OQ)), vb89ABCDEFc3, vacc3x89ABCDEF);
223
224
225 w += 64;
226 }
227
228 const __m256 vmin = _mm256_load_ps(params->avx.min);
229 vacc0x01234567 = _mm256_max_ps(vacc0x01234567, vmin);
230 vacc1x01234567 = _mm256_max_ps(vacc1x01234567, vmin);
231 vacc2x01234567 = _mm256_max_ps(vacc2x01234567, vmin);
232 vacc3x01234567 = _mm256_max_ps(vacc3x01234567, vmin);
233 vacc0x89ABCDEF = _mm256_max_ps(vacc0x89ABCDEF, vmin);
234 vacc1x89ABCDEF = _mm256_max_ps(vacc1x89ABCDEF, vmin);
235 vacc2x89ABCDEF = _mm256_max_ps(vacc2x89ABCDEF, vmin);
236 vacc3x89ABCDEF = _mm256_max_ps(vacc3x89ABCDEF, vmin);
237
238 const __m256 vmax = _mm256_load_ps(params->avx.max);
239 vacc0x01234567 = _mm256_min_ps(vacc0x01234567, vmax);
240 vacc1x01234567 = _mm256_min_ps(vacc1x01234567, vmax);
241 vacc2x01234567 = _mm256_min_ps(vacc2x01234567, vmax);
242 vacc3x01234567 = _mm256_min_ps(vacc3x01234567, vmax);
243 vacc0x89ABCDEF = _mm256_min_ps(vacc0x89ABCDEF, vmax);
244 vacc1x89ABCDEF = _mm256_min_ps(vacc1x89ABCDEF, vmax);
245 vacc2x89ABCDEF = _mm256_min_ps(vacc2x89ABCDEF, vmax);
246 vacc3x89ABCDEF = _mm256_min_ps(vacc3x89ABCDEF, vmax);
247
248 if XNN_LIKELY(nc >= 16) {
249 _mm256_storeu_ps(c3, vacc3x01234567);
250 _mm256_storeu_ps(c3 + 8, vacc3x89ABCDEF);
251 c3 = (float*) ((uintptr_t) c3 + cn_stride);
252 _mm256_storeu_ps(c2, vacc2x01234567);
253 _mm256_storeu_ps(c2 + 8, vacc2x89ABCDEF);
254 c2 = (float*) ((uintptr_t) c2 + cn_stride);
255 _mm256_storeu_ps(c1, vacc1x01234567);
256 _mm256_storeu_ps(c1 + 8, vacc1x89ABCDEF);
257 c1 = (float*) ((uintptr_t) c1 + cn_stride);
258 _mm256_storeu_ps(c0, vacc0x01234567);
259 _mm256_storeu_ps(c0 + 8, vacc0x89ABCDEF);
260 c0 = (float*) ((uintptr_t) c0 + cn_stride);
261
262 a3 = (const float*) ((uintptr_t) a3 - kc);
263 a2 = (const float*) ((uintptr_t) a2 - kc);
264 a1 = (const float*) ((uintptr_t) a1 - kc);
265 a0 = (const float*) ((uintptr_t) a0 - kc);
266
267 nc -= 16;
268 } else {
269 if (nc & 8) {
270 _mm256_storeu_ps(c3, vacc3x01234567);
271 _mm256_storeu_ps(c2, vacc2x01234567);
272 _mm256_storeu_ps(c1, vacc1x01234567);
273 _mm256_storeu_ps(c0, vacc0x01234567);
274
275 vacc3x01234567 = vacc3x89ABCDEF;
276 vacc2x01234567 = vacc2x89ABCDEF;
277 vacc1x01234567 = vacc1x89ABCDEF;
278 vacc0x01234567 = vacc0x89ABCDEF;
279
280 c3 += 8;
281 c2 += 8;
282 c1 += 8;
283 c0 += 8;
284 }
285 __m128 vacc3x0123 = _mm256_castps256_ps128(vacc3x01234567);
286 __m128 vacc2x0123 = _mm256_castps256_ps128(vacc2x01234567);
287 __m128 vacc1x0123 = _mm256_castps256_ps128(vacc1x01234567);
288 __m128 vacc0x0123 = _mm256_castps256_ps128(vacc0x01234567);
289 if (nc & 4) {
290 _mm_storeu_ps(c3, vacc3x0123);
291 _mm_storeu_ps(c2, vacc2x0123);
292 _mm_storeu_ps(c1, vacc1x0123);
293 _mm_storeu_ps(c0, vacc0x0123);
294
295 vacc3x0123 = _mm256_extractf128_ps(vacc3x01234567, 1);
296 vacc2x0123 = _mm256_extractf128_ps(vacc2x01234567, 1);
297 vacc1x0123 = _mm256_extractf128_ps(vacc1x01234567, 1);
298 vacc0x0123 = _mm256_extractf128_ps(vacc0x01234567, 1);
299
300 c3 += 4;
301 c2 += 4;
302 c1 += 4;
303 c0 += 4;
304 }
305 if (nc & 2) {
306 _mm_storel_pi((__m64*) c3, vacc3x0123);
307 _mm_storel_pi((__m64*) c2, vacc2x0123);
308 _mm_storel_pi((__m64*) c1, vacc1x0123);
309 _mm_storel_pi((__m64*) c0, vacc0x0123);
310
311 vacc3x0123 = _mm_movehl_ps(vacc3x0123, vacc3x0123);
312 vacc2x0123 = _mm_movehl_ps(vacc2x0123, vacc2x0123);
313 vacc1x0123 = _mm_movehl_ps(vacc1x0123, vacc1x0123);
314 vacc0x0123 = _mm_movehl_ps(vacc0x0123, vacc0x0123);
315
316 c3 += 2;
317 c2 += 2;
318 c1 += 2;
319 c0 += 2;
320 }
321 if (nc & 1) {
322 _mm_store_ss(c3, vacc3x0123);
323 _mm_store_ss(c2, vacc2x0123);
324 _mm_store_ss(c1, vacc1x0123);
325 _mm_store_ss(c0, vacc0x0123);
326 }
327
328 nc = 0;
329 }
330 } while (nc != 0);
331 }
332