1 // Auto-generated file. Do not edit!
2 // Template: src/f32-gemm/sse-shuffle.c.in
3 // Generator: tools/xngen
4 //
5 // Copyright 2019 Google LLC
6 //
7 // This source code is licensed under the BSD-style license found in the
8 // LICENSE file in the root directory of this source tree.
9
10 #include <assert.h>
11
12 #include <xmmintrin.h>
13
14 #include <xnnpack/gemm.h>
15
16
xnn_f32_gemm_minmax_ukernel_3x8s4__sse(size_t mr,size_t nc,size_t kc,const float * restrict a,size_t a_stride,const float * restrict w,float * restrict c,size_t cm_stride,size_t cn_stride,const union xnn_f32_minmax_params params[restrict XNN_MIN_ELEMENTS (1)])17 void xnn_f32_gemm_minmax_ukernel_3x8s4__sse(
18 size_t mr,
19 size_t nc,
20 size_t kc,
21 const float*restrict a,
22 size_t a_stride,
23 const float*restrict w,
24 float*restrict c,
25 size_t cm_stride,
26 size_t cn_stride,
27 const union xnn_f32_minmax_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
28 {
29 assert(mr != 0);
30 assert(mr <= 3);
31 assert(nc != 0);
32 assert(kc != 0);
33 assert(kc % sizeof(float) == 0);
34 assert(a != NULL);
35 assert(w != NULL);
36 assert(c != NULL);
37
38 const float* a0 = a;
39 float* c0 = c;
40 const float* a1 = (const float*) ((uintptr_t) a0 + a_stride);
41 float* c1 = (float*) ((uintptr_t) c0 + cm_stride);
42 if XNN_UNPREDICTABLE(mr < 2) {
43 a1 = a0;
44 c1 = c0;
45 }
46 const float* a2 = (const float*) ((uintptr_t) a1 + a_stride);
47 float* c2 = (float*) ((uintptr_t) c1 + cm_stride);
48 if XNN_UNPREDICTABLE(mr <= 2) {
49 a2 = a1;
50 c2 = c1;
51 }
52
53 do {
54 __m128 vacc0x0123 = _mm_load_ps(w + 0);
55 __m128 vacc0x4567 = _mm_load_ps(w + 4);
56 __m128 vacc1x0123 = vacc0x0123;
57 __m128 vacc1x4567 = vacc0x4567;
58 __m128 vacc2x0123 = vacc0x0123;
59 __m128 vacc2x4567 = vacc0x4567;
60 w += 8;
61
62 size_t k = kc;
63 while (k >= 4 * sizeof(float)) {
64 __m128 va0 = _mm_loadu_ps(a0);
65 a0 += 4;
66 __m128 va1 = _mm_loadu_ps(a1);
67 a1 += 4;
68 __m128 va2 = _mm_loadu_ps(a2);
69 a2 += 4;
70
71
72 const __m128 vb0123c0 = _mm_load_ps(w + 0);
73 const __m128 vb4567c0 = _mm_load_ps(w + 4);
74
75 vacc0x0123 = _mm_add_ps(vacc0x0123, _mm_mul_ps(va0, vb0123c0));
76 vacc1x0123 = _mm_add_ps(vacc1x0123, _mm_mul_ps(va1, vb0123c0));
77 vacc2x0123 = _mm_add_ps(vacc2x0123, _mm_mul_ps(va2, vb0123c0));
78 vacc0x4567 = _mm_add_ps(vacc0x4567, _mm_mul_ps(va0, vb4567c0));
79 vacc1x4567 = _mm_add_ps(vacc1x4567, _mm_mul_ps(va1, vb4567c0));
80 vacc2x4567 = _mm_add_ps(vacc2x4567, _mm_mul_ps(va2, vb4567c0));
81
82 va0 = _mm_shuffle_ps(va0, va0, _MM_SHUFFLE(0, 3, 2, 1));
83 va1 = _mm_shuffle_ps(va1, va1, _MM_SHUFFLE(0, 3, 2, 1));
84 va2 = _mm_shuffle_ps(va2, va2, _MM_SHUFFLE(0, 3, 2, 1));
85
86 const __m128 vb0123c1 = _mm_load_ps(w + 8);
87 const __m128 vb4567c1 = _mm_load_ps(w + 12);
88
89 vacc0x0123 = _mm_add_ps(vacc0x0123, _mm_mul_ps(va0, vb0123c1));
90 vacc1x0123 = _mm_add_ps(vacc1x0123, _mm_mul_ps(va1, vb0123c1));
91 vacc2x0123 = _mm_add_ps(vacc2x0123, _mm_mul_ps(va2, vb0123c1));
92 vacc0x4567 = _mm_add_ps(vacc0x4567, _mm_mul_ps(va0, vb4567c1));
93 vacc1x4567 = _mm_add_ps(vacc1x4567, _mm_mul_ps(va1, vb4567c1));
94 vacc2x4567 = _mm_add_ps(vacc2x4567, _mm_mul_ps(va2, vb4567c1));
95
96 va0 = _mm_shuffle_ps(va0, va0, _MM_SHUFFLE(0, 3, 2, 1));
97 va1 = _mm_shuffle_ps(va1, va1, _MM_SHUFFLE(0, 3, 2, 1));
98 va2 = _mm_shuffle_ps(va2, va2, _MM_SHUFFLE(0, 3, 2, 1));
99
100 const __m128 vb0123c2 = _mm_load_ps(w + 16);
101 const __m128 vb4567c2 = _mm_load_ps(w + 20);
102
103 vacc0x0123 = _mm_add_ps(vacc0x0123, _mm_mul_ps(va0, vb0123c2));
104 vacc1x0123 = _mm_add_ps(vacc1x0123, _mm_mul_ps(va1, vb0123c2));
105 vacc2x0123 = _mm_add_ps(vacc2x0123, _mm_mul_ps(va2, vb0123c2));
106 vacc0x4567 = _mm_add_ps(vacc0x4567, _mm_mul_ps(va0, vb4567c2));
107 vacc1x4567 = _mm_add_ps(vacc1x4567, _mm_mul_ps(va1, vb4567c2));
108 vacc2x4567 = _mm_add_ps(vacc2x4567, _mm_mul_ps(va2, vb4567c2));
109
110 va0 = _mm_shuffle_ps(va0, va0, _MM_SHUFFLE(0, 3, 2, 1));
111 va1 = _mm_shuffle_ps(va1, va1, _MM_SHUFFLE(0, 3, 2, 1));
112 va2 = _mm_shuffle_ps(va2, va2, _MM_SHUFFLE(0, 3, 2, 1));
113
114 const __m128 vb0123c3 = _mm_load_ps(w + 24);
115 const __m128 vb4567c3 = _mm_load_ps(w + 28);
116
117 vacc0x0123 = _mm_add_ps(vacc0x0123, _mm_mul_ps(va0, vb0123c3));
118 vacc1x0123 = _mm_add_ps(vacc1x0123, _mm_mul_ps(va1, vb0123c3));
119 vacc2x0123 = _mm_add_ps(vacc2x0123, _mm_mul_ps(va2, vb0123c3));
120 vacc0x4567 = _mm_add_ps(vacc0x4567, _mm_mul_ps(va0, vb4567c3));
121 vacc1x4567 = _mm_add_ps(vacc1x4567, _mm_mul_ps(va1, vb4567c3));
122 vacc2x4567 = _mm_add_ps(vacc2x4567, _mm_mul_ps(va2, vb4567c3));
123
124
125 w += 32;
126 k -= 4 * sizeof(float);
127 }
128 if XNN_UNLIKELY(k != 0) {
129 __m128 va0 = _mm_loadu_ps(a0);
130 a0 = (const float*) ((uintptr_t) a0 + k);
131 __m128 va1 = _mm_loadu_ps(a1);
132 a1 = (const float*) ((uintptr_t) a1 + k);
133 __m128 va2 = _mm_loadu_ps(a2);
134 a2 = (const float*) ((uintptr_t) a2 + k);
135
136
137 const __m128 vb0123c0 = _mm_load_ps(w + 0);
138 const __m128 vb4567c0 = _mm_load_ps(w + 4);
139
140 vacc0x0123 = _mm_add_ps(vacc0x0123, _mm_mul_ps(_mm_andnot_ps(_mm_cmpeq_ps(_mm_setzero_ps(), vb0123c0), va0), vb0123c0));
141 vacc1x0123 = _mm_add_ps(vacc1x0123, _mm_mul_ps(_mm_andnot_ps(_mm_cmpeq_ps(_mm_setzero_ps(), vb0123c0), va1), vb0123c0));
142 vacc2x0123 = _mm_add_ps(vacc2x0123, _mm_mul_ps(_mm_andnot_ps(_mm_cmpeq_ps(_mm_setzero_ps(), vb0123c0), va2), vb0123c0));
143 vacc0x4567 = _mm_add_ps(vacc0x4567, _mm_mul_ps(_mm_andnot_ps(_mm_cmpeq_ps(_mm_setzero_ps(), vb4567c0), va0), vb4567c0));
144 vacc1x4567 = _mm_add_ps(vacc1x4567, _mm_mul_ps(_mm_andnot_ps(_mm_cmpeq_ps(_mm_setzero_ps(), vb4567c0), va1), vb4567c0));
145 vacc2x4567 = _mm_add_ps(vacc2x4567, _mm_mul_ps(_mm_andnot_ps(_mm_cmpeq_ps(_mm_setzero_ps(), vb4567c0), va2), vb4567c0));
146
147 va0 = _mm_shuffle_ps(va0, va0, _MM_SHUFFLE(0, 3, 2, 1));
148 va1 = _mm_shuffle_ps(va1, va1, _MM_SHUFFLE(0, 3, 2, 1));
149 va2 = _mm_shuffle_ps(va2, va2, _MM_SHUFFLE(0, 3, 2, 1));
150
151 const __m128 vb0123c1 = _mm_load_ps(w + 8);
152 const __m128 vb4567c1 = _mm_load_ps(w + 12);
153
154 vacc0x0123 = _mm_add_ps(vacc0x0123, _mm_mul_ps(_mm_andnot_ps(_mm_cmpeq_ps(_mm_setzero_ps(), vb0123c1), va0), vb0123c1));
155 vacc1x0123 = _mm_add_ps(vacc1x0123, _mm_mul_ps(_mm_andnot_ps(_mm_cmpeq_ps(_mm_setzero_ps(), vb0123c1), va1), vb0123c1));
156 vacc2x0123 = _mm_add_ps(vacc2x0123, _mm_mul_ps(_mm_andnot_ps(_mm_cmpeq_ps(_mm_setzero_ps(), vb0123c1), va2), vb0123c1));
157 vacc0x4567 = _mm_add_ps(vacc0x4567, _mm_mul_ps(_mm_andnot_ps(_mm_cmpeq_ps(_mm_setzero_ps(), vb4567c1), va0), vb4567c1));
158 vacc1x4567 = _mm_add_ps(vacc1x4567, _mm_mul_ps(_mm_andnot_ps(_mm_cmpeq_ps(_mm_setzero_ps(), vb4567c1), va1), vb4567c1));
159 vacc2x4567 = _mm_add_ps(vacc2x4567, _mm_mul_ps(_mm_andnot_ps(_mm_cmpeq_ps(_mm_setzero_ps(), vb4567c1), va2), vb4567c1));
160
161 va0 = _mm_shuffle_ps(va0, va0, _MM_SHUFFLE(0, 3, 2, 1));
162 va1 = _mm_shuffle_ps(va1, va1, _MM_SHUFFLE(0, 3, 2, 1));
163 va2 = _mm_shuffle_ps(va2, va2, _MM_SHUFFLE(0, 3, 2, 1));
164
165 const __m128 vb0123c2 = _mm_load_ps(w + 16);
166 const __m128 vb4567c2 = _mm_load_ps(w + 20);
167
168 vacc0x0123 = _mm_add_ps(vacc0x0123, _mm_mul_ps(_mm_andnot_ps(_mm_cmpeq_ps(_mm_setzero_ps(), vb0123c2), va0), vb0123c2));
169 vacc1x0123 = _mm_add_ps(vacc1x0123, _mm_mul_ps(_mm_andnot_ps(_mm_cmpeq_ps(_mm_setzero_ps(), vb0123c2), va1), vb0123c2));
170 vacc2x0123 = _mm_add_ps(vacc2x0123, _mm_mul_ps(_mm_andnot_ps(_mm_cmpeq_ps(_mm_setzero_ps(), vb0123c2), va2), vb0123c2));
171 vacc0x4567 = _mm_add_ps(vacc0x4567, _mm_mul_ps(_mm_andnot_ps(_mm_cmpeq_ps(_mm_setzero_ps(), vb4567c2), va0), vb4567c2));
172 vacc1x4567 = _mm_add_ps(vacc1x4567, _mm_mul_ps(_mm_andnot_ps(_mm_cmpeq_ps(_mm_setzero_ps(), vb4567c2), va1), vb4567c2));
173 vacc2x4567 = _mm_add_ps(vacc2x4567, _mm_mul_ps(_mm_andnot_ps(_mm_cmpeq_ps(_mm_setzero_ps(), vb4567c2), va2), vb4567c2));
174
175 va0 = _mm_shuffle_ps(va0, va0, _MM_SHUFFLE(0, 3, 2, 1));
176 va1 = _mm_shuffle_ps(va1, va1, _MM_SHUFFLE(0, 3, 2, 1));
177 va2 = _mm_shuffle_ps(va2, va2, _MM_SHUFFLE(0, 3, 2, 1));
178
179 const __m128 vb0123c3 = _mm_load_ps(w + 24);
180 const __m128 vb4567c3 = _mm_load_ps(w + 28);
181
182 vacc0x0123 = _mm_add_ps(vacc0x0123, _mm_mul_ps(_mm_andnot_ps(_mm_cmpeq_ps(_mm_setzero_ps(), vb0123c3), va0), vb0123c3));
183 vacc1x0123 = _mm_add_ps(vacc1x0123, _mm_mul_ps(_mm_andnot_ps(_mm_cmpeq_ps(_mm_setzero_ps(), vb0123c3), va1), vb0123c3));
184 vacc2x0123 = _mm_add_ps(vacc2x0123, _mm_mul_ps(_mm_andnot_ps(_mm_cmpeq_ps(_mm_setzero_ps(), vb0123c3), va2), vb0123c3));
185 vacc0x4567 = _mm_add_ps(vacc0x4567, _mm_mul_ps(_mm_andnot_ps(_mm_cmpeq_ps(_mm_setzero_ps(), vb4567c3), va0), vb4567c3));
186 vacc1x4567 = _mm_add_ps(vacc1x4567, _mm_mul_ps(_mm_andnot_ps(_mm_cmpeq_ps(_mm_setzero_ps(), vb4567c3), va1), vb4567c3));
187 vacc2x4567 = _mm_add_ps(vacc2x4567, _mm_mul_ps(_mm_andnot_ps(_mm_cmpeq_ps(_mm_setzero_ps(), vb4567c3), va2), vb4567c3));
188
189
190 w += 32;
191 }
192
193 const __m128 vmax = _mm_load_ps(params->sse.max);
194 vacc0x0123 = _mm_min_ps(vacc0x0123, vmax);
195 vacc1x0123 = _mm_min_ps(vacc1x0123, vmax);
196 vacc2x0123 = _mm_min_ps(vacc2x0123, vmax);
197 vacc0x4567 = _mm_min_ps(vacc0x4567, vmax);
198 vacc1x4567 = _mm_min_ps(vacc1x4567, vmax);
199 vacc2x4567 = _mm_min_ps(vacc2x4567, vmax);
200
201 const __m128 vmin = _mm_load_ps(params->sse.min);
202 vacc0x0123 = _mm_max_ps(vacc0x0123, vmin);
203 vacc1x0123 = _mm_max_ps(vacc1x0123, vmin);
204 vacc2x0123 = _mm_max_ps(vacc2x0123, vmin);
205 vacc0x4567 = _mm_max_ps(vacc0x4567, vmin);
206 vacc1x4567 = _mm_max_ps(vacc1x4567, vmin);
207 vacc2x4567 = _mm_max_ps(vacc2x4567, vmin);
208
209 if XNN_LIKELY(nc >= 8) {
210 _mm_storeu_ps(c2, vacc2x0123);
211 _mm_storeu_ps(c2 + 4, vacc2x4567);
212 c2 = (float*) ((uintptr_t) c2 + cn_stride);
213 _mm_storeu_ps(c1, vacc1x0123);
214 _mm_storeu_ps(c1 + 4, vacc1x4567);
215 c1 = (float*) ((uintptr_t) c1 + cn_stride);
216 _mm_storeu_ps(c0, vacc0x0123);
217 _mm_storeu_ps(c0 + 4, vacc0x4567);
218 c0 = (float*) ((uintptr_t) c0 + cn_stride);
219
220 a2 = (const float*) ((uintptr_t) a2 - kc);
221 a1 = (const float*) ((uintptr_t) a1 - kc);
222 a0 = (const float*) ((uintptr_t) a0 - kc);
223
224 nc -= 8;
225 } else {
226 if (nc & 4) {
227 _mm_storeu_ps(c2, vacc2x0123);
228 _mm_storeu_ps(c1, vacc1x0123);
229 _mm_storeu_ps(c0, vacc0x0123);
230
231 vacc2x0123 = vacc2x4567;
232 vacc1x0123 = vacc1x4567;
233 vacc0x0123 = vacc0x4567;
234
235 c2 += 4;
236 c1 += 4;
237 c0 += 4;
238 }
239 if (nc & 2) {
240 _mm_storel_pi((__m64*) c2, vacc2x0123);
241 _mm_storel_pi((__m64*) c1, vacc1x0123);
242 _mm_storel_pi((__m64*) c0, vacc0x0123);
243
244 vacc2x0123 = _mm_movehl_ps(vacc2x0123, vacc2x0123);
245 vacc1x0123 = _mm_movehl_ps(vacc1x0123, vacc1x0123);
246 vacc0x0123 = _mm_movehl_ps(vacc0x0123, vacc0x0123);
247
248 c2 += 2;
249 c1 += 2;
250 c0 += 2;
251 }
252 if (nc & 1) {
253 _mm_store_ss(c2, vacc2x0123);
254 _mm_store_ss(c1, vacc1x0123);
255 _mm_store_ss(c0, vacc0x0123);
256 }
257
258 nc = 0;
259 }
260 } while (nc != 0);
261 }
262