1 // Auto-generated file. Do not edit!
2 // Template: src/f32-igemm/sse-shuffle.c.in
3 // Generator: tools/xngen
4 //
5 // Copyright 2019 Google LLC
6 //
7 // This source code is licensed under the BSD-style license found in the
8 // LICENSE file in the root directory of this source tree.
9
10 #include <assert.h>
11
12 #include <xmmintrin.h>
13
14 #include <xnnpack/igemm.h>
15
16
xnn_f32_igemm_minmax_ukernel_3x8s4__sse(size_t mr,size_t nc,size_t kc,size_t ks,const float ** restrict a,const float * restrict w,float * restrict c,size_t cm_stride,size_t cn_stride,size_t a_offset,const float * zero,const union xnn_f32_minmax_params params[restrict XNN_MIN_ELEMENTS (1)])17 void xnn_f32_igemm_minmax_ukernel_3x8s4__sse(
18 size_t mr,
19 size_t nc,
20 size_t kc,
21 size_t ks,
22 const float**restrict a,
23 const float*restrict w,
24 float*restrict c,
25 size_t cm_stride,
26 size_t cn_stride,
27 size_t a_offset,
28 const float* zero,
29 const union xnn_f32_minmax_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
30 {
31 assert(mr != 0);
32 assert(mr <= 3);
33 assert(nc != 0);
34 assert(kc != 0);
35 assert(kc % sizeof(float) == 0);
36 assert(ks != 0);
37 assert(ks % (3 * sizeof(void*)) == 0);
38 assert(a_offset % sizeof(float) == 0);
39 assert(a != NULL);
40 assert(w != NULL);
41 assert(c != NULL);
42
43 float* c0 = c;
44 float* c1 = (float*) ((uintptr_t) c0 + cm_stride);
45 if XNN_UNPREDICTABLE(mr < 2) {
46 c1 = c0;
47 }
48 float* c2 = (float*) ((uintptr_t) c1 + cm_stride);
49 if XNN_UNPREDICTABLE(mr <= 2) {
50 c2 = c1;
51 }
52
53 do {
54 __m128 vacc0x0123 = _mm_load_ps(w);
55 __m128 vacc0x4567 = _mm_load_ps(w + 4);
56 __m128 vacc1x0123 = vacc0x0123;
57 __m128 vacc1x4567 = vacc0x4567;
58 __m128 vacc2x0123 = vacc0x0123;
59 __m128 vacc2x4567 = vacc0x4567;
60 w += 8;
61
62 size_t p = ks;
63 do {
64 const float* restrict a0 = a[0];
65 assert(a0 != NULL);
66 if XNN_UNPREDICTABLE(a0 != zero) {
67 a0 = (const float*) ((uintptr_t) a0 + a_offset);
68 }
69 const float* restrict a1 = a[1];
70 assert(a1 != NULL);
71 if XNN_UNPREDICTABLE(a1 != zero) {
72 a1 = (const float*) ((uintptr_t) a1 + a_offset);
73 }
74 const float* restrict a2 = a[2];
75 assert(a2 != NULL);
76 if XNN_UNPREDICTABLE(a2 != zero) {
77 a2 = (const float*) ((uintptr_t) a2 + a_offset);
78 }
79 a += 3;
80
81 size_t k = kc;
82 while (k >= 4 * sizeof(float)) {
83 __m128 va0 = _mm_loadu_ps(a0);
84 a0 += 4;
85 __m128 va1 = _mm_loadu_ps(a1);
86 a1 += 4;
87 __m128 va2 = _mm_loadu_ps(a2);
88 a2 += 4;
89
90
91 const __m128 vb0123c0 = _mm_load_ps(w + 0);
92 const __m128 vb4567c0 = _mm_load_ps(w + 4);
93
94 vacc0x0123 = _mm_add_ps(vacc0x0123, _mm_mul_ps(va0, vb0123c0));
95 vacc1x0123 = _mm_add_ps(vacc1x0123, _mm_mul_ps(va1, vb0123c0));
96 vacc2x0123 = _mm_add_ps(vacc2x0123, _mm_mul_ps(va2, vb0123c0));
97 vacc0x4567 = _mm_add_ps(vacc0x4567, _mm_mul_ps(va0, vb4567c0));
98 vacc1x4567 = _mm_add_ps(vacc1x4567, _mm_mul_ps(va1, vb4567c0));
99 vacc2x4567 = _mm_add_ps(vacc2x4567, _mm_mul_ps(va2, vb4567c0));
100
101 va0 = _mm_shuffle_ps(va0, va0, _MM_SHUFFLE(0, 3, 2, 1));
102 va1 = _mm_shuffle_ps(va1, va1, _MM_SHUFFLE(0, 3, 2, 1));
103 va2 = _mm_shuffle_ps(va2, va2, _MM_SHUFFLE(0, 3, 2, 1));
104
105 const __m128 vb0123c1 = _mm_load_ps(w + 8);
106 const __m128 vb4567c1 = _mm_load_ps(w + 12);
107
108 vacc0x0123 = _mm_add_ps(vacc0x0123, _mm_mul_ps(va0, vb0123c1));
109 vacc1x0123 = _mm_add_ps(vacc1x0123, _mm_mul_ps(va1, vb0123c1));
110 vacc2x0123 = _mm_add_ps(vacc2x0123, _mm_mul_ps(va2, vb0123c1));
111 vacc0x4567 = _mm_add_ps(vacc0x4567, _mm_mul_ps(va0, vb4567c1));
112 vacc1x4567 = _mm_add_ps(vacc1x4567, _mm_mul_ps(va1, vb4567c1));
113 vacc2x4567 = _mm_add_ps(vacc2x4567, _mm_mul_ps(va2, vb4567c1));
114
115 va0 = _mm_shuffle_ps(va0, va0, _MM_SHUFFLE(0, 3, 2, 1));
116 va1 = _mm_shuffle_ps(va1, va1, _MM_SHUFFLE(0, 3, 2, 1));
117 va2 = _mm_shuffle_ps(va2, va2, _MM_SHUFFLE(0, 3, 2, 1));
118
119 const __m128 vb0123c2 = _mm_load_ps(w + 16);
120 const __m128 vb4567c2 = _mm_load_ps(w + 20);
121
122 vacc0x0123 = _mm_add_ps(vacc0x0123, _mm_mul_ps(va0, vb0123c2));
123 vacc1x0123 = _mm_add_ps(vacc1x0123, _mm_mul_ps(va1, vb0123c2));
124 vacc2x0123 = _mm_add_ps(vacc2x0123, _mm_mul_ps(va2, vb0123c2));
125 vacc0x4567 = _mm_add_ps(vacc0x4567, _mm_mul_ps(va0, vb4567c2));
126 vacc1x4567 = _mm_add_ps(vacc1x4567, _mm_mul_ps(va1, vb4567c2));
127 vacc2x4567 = _mm_add_ps(vacc2x4567, _mm_mul_ps(va2, vb4567c2));
128
129 va0 = _mm_shuffle_ps(va0, va0, _MM_SHUFFLE(0, 3, 2, 1));
130 va1 = _mm_shuffle_ps(va1, va1, _MM_SHUFFLE(0, 3, 2, 1));
131 va2 = _mm_shuffle_ps(va2, va2, _MM_SHUFFLE(0, 3, 2, 1));
132
133 const __m128 vb0123c3 = _mm_load_ps(w + 24);
134 const __m128 vb4567c3 = _mm_load_ps(w + 28);
135
136 vacc0x0123 = _mm_add_ps(vacc0x0123, _mm_mul_ps(va0, vb0123c3));
137 vacc1x0123 = _mm_add_ps(vacc1x0123, _mm_mul_ps(va1, vb0123c3));
138 vacc2x0123 = _mm_add_ps(vacc2x0123, _mm_mul_ps(va2, vb0123c3));
139 vacc0x4567 = _mm_add_ps(vacc0x4567, _mm_mul_ps(va0, vb4567c3));
140 vacc1x4567 = _mm_add_ps(vacc1x4567, _mm_mul_ps(va1, vb4567c3));
141 vacc2x4567 = _mm_add_ps(vacc2x4567, _mm_mul_ps(va2, vb4567c3));
142
143
144 w += 32;
145 k -= 4 * sizeof(float);
146 }
147 if XNN_UNLIKELY(k != 0) {
148 __m128 va0 = _mm_loadu_ps(a0);
149 a0 = (const float*) ((uintptr_t) a0 + k);
150 __m128 va1 = _mm_loadu_ps(a1);
151 a1 = (const float*) ((uintptr_t) a1 + k);
152 __m128 va2 = _mm_loadu_ps(a2);
153 a2 = (const float*) ((uintptr_t) a2 + k);
154
155
156 const __m128 vb0123c0 = _mm_load_ps(w + 0);
157 const __m128 vb4567c0 = _mm_load_ps(w + 4);
158
159 vacc0x0123 = _mm_add_ps(vacc0x0123, _mm_mul_ps(_mm_andnot_ps(_mm_cmpeq_ps(_mm_setzero_ps(), vb0123c0), va0), vb0123c0));
160 vacc1x0123 = _mm_add_ps(vacc1x0123, _mm_mul_ps(_mm_andnot_ps(_mm_cmpeq_ps(_mm_setzero_ps(), vb0123c0), va1), vb0123c0));
161 vacc2x0123 = _mm_add_ps(vacc2x0123, _mm_mul_ps(_mm_andnot_ps(_mm_cmpeq_ps(_mm_setzero_ps(), vb0123c0), va2), vb0123c0));
162 vacc0x4567 = _mm_add_ps(vacc0x4567, _mm_mul_ps(_mm_andnot_ps(_mm_cmpeq_ps(_mm_setzero_ps(), vb4567c0), va0), vb4567c0));
163 vacc1x4567 = _mm_add_ps(vacc1x4567, _mm_mul_ps(_mm_andnot_ps(_mm_cmpeq_ps(_mm_setzero_ps(), vb4567c0), va1), vb4567c0));
164 vacc2x4567 = _mm_add_ps(vacc2x4567, _mm_mul_ps(_mm_andnot_ps(_mm_cmpeq_ps(_mm_setzero_ps(), vb4567c0), va2), vb4567c0));
165
166 va0 = _mm_shuffle_ps(va0, va0, _MM_SHUFFLE(0, 3, 2, 1));
167 va1 = _mm_shuffle_ps(va1, va1, _MM_SHUFFLE(0, 3, 2, 1));
168 va2 = _mm_shuffle_ps(va2, va2, _MM_SHUFFLE(0, 3, 2, 1));
169
170 const __m128 vb0123c1 = _mm_load_ps(w + 8);
171 const __m128 vb4567c1 = _mm_load_ps(w + 12);
172
173 vacc0x0123 = _mm_add_ps(vacc0x0123, _mm_mul_ps(_mm_andnot_ps(_mm_cmpeq_ps(_mm_setzero_ps(), vb0123c1), va0), vb0123c1));
174 vacc1x0123 = _mm_add_ps(vacc1x0123, _mm_mul_ps(_mm_andnot_ps(_mm_cmpeq_ps(_mm_setzero_ps(), vb0123c1), va1), vb0123c1));
175 vacc2x0123 = _mm_add_ps(vacc2x0123, _mm_mul_ps(_mm_andnot_ps(_mm_cmpeq_ps(_mm_setzero_ps(), vb0123c1), va2), vb0123c1));
176 vacc0x4567 = _mm_add_ps(vacc0x4567, _mm_mul_ps(_mm_andnot_ps(_mm_cmpeq_ps(_mm_setzero_ps(), vb4567c1), va0), vb4567c1));
177 vacc1x4567 = _mm_add_ps(vacc1x4567, _mm_mul_ps(_mm_andnot_ps(_mm_cmpeq_ps(_mm_setzero_ps(), vb4567c1), va1), vb4567c1));
178 vacc2x4567 = _mm_add_ps(vacc2x4567, _mm_mul_ps(_mm_andnot_ps(_mm_cmpeq_ps(_mm_setzero_ps(), vb4567c1), va2), vb4567c1));
179
180 va0 = _mm_shuffle_ps(va0, va0, _MM_SHUFFLE(0, 3, 2, 1));
181 va1 = _mm_shuffle_ps(va1, va1, _MM_SHUFFLE(0, 3, 2, 1));
182 va2 = _mm_shuffle_ps(va2, va2, _MM_SHUFFLE(0, 3, 2, 1));
183
184 const __m128 vb0123c2 = _mm_load_ps(w + 16);
185 const __m128 vb4567c2 = _mm_load_ps(w + 20);
186
187 vacc0x0123 = _mm_add_ps(vacc0x0123, _mm_mul_ps(_mm_andnot_ps(_mm_cmpeq_ps(_mm_setzero_ps(), vb0123c2), va0), vb0123c2));
188 vacc1x0123 = _mm_add_ps(vacc1x0123, _mm_mul_ps(_mm_andnot_ps(_mm_cmpeq_ps(_mm_setzero_ps(), vb0123c2), va1), vb0123c2));
189 vacc2x0123 = _mm_add_ps(vacc2x0123, _mm_mul_ps(_mm_andnot_ps(_mm_cmpeq_ps(_mm_setzero_ps(), vb0123c2), va2), vb0123c2));
190 vacc0x4567 = _mm_add_ps(vacc0x4567, _mm_mul_ps(_mm_andnot_ps(_mm_cmpeq_ps(_mm_setzero_ps(), vb4567c2), va0), vb4567c2));
191 vacc1x4567 = _mm_add_ps(vacc1x4567, _mm_mul_ps(_mm_andnot_ps(_mm_cmpeq_ps(_mm_setzero_ps(), vb4567c2), va1), vb4567c2));
192 vacc2x4567 = _mm_add_ps(vacc2x4567, _mm_mul_ps(_mm_andnot_ps(_mm_cmpeq_ps(_mm_setzero_ps(), vb4567c2), va2), vb4567c2));
193
194 va0 = _mm_shuffle_ps(va0, va0, _MM_SHUFFLE(0, 3, 2, 1));
195 va1 = _mm_shuffle_ps(va1, va1, _MM_SHUFFLE(0, 3, 2, 1));
196 va2 = _mm_shuffle_ps(va2, va2, _MM_SHUFFLE(0, 3, 2, 1));
197
198 const __m128 vb0123c3 = _mm_load_ps(w + 24);
199 const __m128 vb4567c3 = _mm_load_ps(w + 28);
200
201 vacc0x0123 = _mm_add_ps(vacc0x0123, _mm_mul_ps(_mm_andnot_ps(_mm_cmpeq_ps(_mm_setzero_ps(), vb0123c3), va0), vb0123c3));
202 vacc1x0123 = _mm_add_ps(vacc1x0123, _mm_mul_ps(_mm_andnot_ps(_mm_cmpeq_ps(_mm_setzero_ps(), vb0123c3), va1), vb0123c3));
203 vacc2x0123 = _mm_add_ps(vacc2x0123, _mm_mul_ps(_mm_andnot_ps(_mm_cmpeq_ps(_mm_setzero_ps(), vb0123c3), va2), vb0123c3));
204 vacc0x4567 = _mm_add_ps(vacc0x4567, _mm_mul_ps(_mm_andnot_ps(_mm_cmpeq_ps(_mm_setzero_ps(), vb4567c3), va0), vb4567c3));
205 vacc1x4567 = _mm_add_ps(vacc1x4567, _mm_mul_ps(_mm_andnot_ps(_mm_cmpeq_ps(_mm_setzero_ps(), vb4567c3), va1), vb4567c3));
206 vacc2x4567 = _mm_add_ps(vacc2x4567, _mm_mul_ps(_mm_andnot_ps(_mm_cmpeq_ps(_mm_setzero_ps(), vb4567c3), va2), vb4567c3));
207
208
209 w += 32;
210 }
211 p -= 3 * sizeof(void*);
212 } while (p != 0);
213
214 const __m128 vmax = _mm_load_ps(params->sse.max);
215 vacc0x0123 = _mm_min_ps(vacc0x0123, vmax);
216 vacc1x0123 = _mm_min_ps(vacc1x0123, vmax);
217 vacc2x0123 = _mm_min_ps(vacc2x0123, vmax);
218 vacc0x4567 = _mm_min_ps(vacc0x4567, vmax);
219 vacc1x4567 = _mm_min_ps(vacc1x4567, vmax);
220 vacc2x4567 = _mm_min_ps(vacc2x4567, vmax);
221
222 const __m128 vmin = _mm_load_ps(params->sse.min);
223 vacc0x0123 = _mm_max_ps(vacc0x0123, vmin);
224 vacc1x0123 = _mm_max_ps(vacc1x0123, vmin);
225 vacc2x0123 = _mm_max_ps(vacc2x0123, vmin);
226 vacc0x4567 = _mm_max_ps(vacc0x4567, vmin);
227 vacc1x4567 = _mm_max_ps(vacc1x4567, vmin);
228 vacc2x4567 = _mm_max_ps(vacc2x4567, vmin);
229
230 if XNN_LIKELY(nc >= 8) {
231 _mm_storeu_ps(c2, vacc2x0123);
232 _mm_storeu_ps(c2 + 4, vacc2x4567);
233 c2 = (float*) ((uintptr_t) c2 + cn_stride);
234 _mm_storeu_ps(c1, vacc1x0123);
235 _mm_storeu_ps(c1 + 4, vacc1x4567);
236 c1 = (float*) ((uintptr_t) c1 + cn_stride);
237 _mm_storeu_ps(c0, vacc0x0123);
238 _mm_storeu_ps(c0 + 4, vacc0x4567);
239 c0 = (float*) ((uintptr_t) c0 + cn_stride);
240
241 a = (const float**restrict) ((uintptr_t) a - ks);
242 nc -= 8;
243 } else {
244 if (nc & 4) {
245 _mm_storeu_ps(c2, vacc2x0123);
246 _mm_storeu_ps(c1, vacc1x0123);
247 _mm_storeu_ps(c0, vacc0x0123);
248
249 vacc2x0123 = vacc2x4567;
250 vacc1x0123 = vacc1x4567;
251 vacc0x0123 = vacc0x4567;
252
253 c2 += 4;
254 c1 += 4;
255 c0 += 4;
256 }
257 if (nc & 2) {
258 _mm_storel_pi((__m64*) c2, vacc2x0123);
259 _mm_storel_pi((__m64*) c1, vacc1x0123);
260 _mm_storel_pi((__m64*) c0, vacc0x0123);
261
262 vacc2x0123 = _mm_movehl_ps(vacc2x0123, vacc2x0123);
263 vacc1x0123 = _mm_movehl_ps(vacc1x0123, vacc1x0123);
264 vacc0x0123 = _mm_movehl_ps(vacc0x0123, vacc0x0123);
265
266 c2 += 2;
267 c1 += 2;
268 c0 += 2;
269 }
270 if (nc & 1) {
271 _mm_store_ss(c2, vacc2x0123);
272 _mm_store_ss(c1, vacc1x0123);
273 _mm_store_ss(c0, vacc0x0123);
274 }
275
276 nc = 0;
277 }
278 } while (nc != 0);
279 }
280