1 // Auto-generated file. Do not edit!
2 // Template: src/x32-transposec/sse2.c.in
3 // Generator: tools/xngen
4 //
5 // Copyright 2021 Google LLC
6 //
7 // This source code is licensed under the BSD-style license found in the
8 // LICENSE file in the root directory of this source tree.
9
10 #include <immintrin.h>
11
12 #include <assert.h>
13
14 #include <xnnpack/common.h>
15 #include <xnnpack/math.h>
16 #include <xnnpack/transpose.h>
17 #include <xnnpack/unaligned.h>
18
19
xnn_x16_transposec_ukernel__8x8_multi_mov_sse2(const uint16_t * input,uint16_t * output,size_t input_stride,size_t output_stride,size_t block_width,size_t block_height)20 void xnn_x16_transposec_ukernel__8x8_multi_mov_sse2(
21 const uint16_t* input,
22 uint16_t* output,
23 size_t input_stride,
24 size_t output_stride,
25 size_t block_width,
26 size_t block_height) XNN_OOB_READS
27 {
28 assert(output_stride >= block_height * sizeof(uint16_t));
29 assert(input_stride >= block_width * sizeof(uint16_t));
30
31 const size_t tile_height = 8;
32 const size_t tile_width = 8;
33 const size_t tile_hbytes = tile_height * sizeof(uint16_t);
34 const size_t tile_wbytes = tile_width * sizeof(uint16_t);
35 const size_t input_reset = tile_wbytes - round_down_po2(block_height, tile_height) * input_stride;
36 const size_t input_offset = tile_height * input_stride;
37 const size_t output_reset = tile_width * output_stride - round_down_po2(block_height, 2) * sizeof(uint16_t) - tile_hbytes;
38
39 const uint16_t* i0 = input;
40 const uint16_t* i1 = (const uint16_t*) ((uintptr_t) i0 + input_stride);
41 const uint16_t* i2 = (const uint16_t*) ((uintptr_t) i1 + input_stride);
42 const uint16_t* i3 = (const uint16_t*) ((uintptr_t) i2 + input_stride);
43 const uint16_t* i4 = (const uint16_t*) ((uintptr_t) i3 + input_stride);
44 const uint16_t* i5 = (const uint16_t*) ((uintptr_t) i4 + input_stride);
45 const uint16_t* i6 = (const uint16_t*) ((uintptr_t) i5 + input_stride);
46 const uint16_t* i7 = (const uint16_t*) ((uintptr_t) i6 + input_stride);
47 uint16_t* o = (uint16_t*) ((uintptr_t) output - tile_hbytes);
48 const size_t minus_output_stride = -output_stride;
49
50 do {
51 const size_t rem = min(block_width - 1, 7);
52 const size_t oN_stride = rem * output_stride;
53 const size_t oN_offset = oN_stride + tile_hbytes;
54 size_t bh = block_height;
55 for (; bh >= 8; bh -= 8) {
56 const __m128i v3_0 = _mm_loadu_si128((const __m128i*) i0);
57 i0 = (uint16_t*) ((uintptr_t) i0 + input_offset);
58 const __m128i v3_1 = _mm_loadu_si128((const __m128i*) i1);
59 i1 = (uint16_t*) ((uintptr_t) i1 + input_offset);
60 const __m128i v3_2 = _mm_loadu_si128((const __m128i*) i2);
61 i2 = (uint16_t*) ((uintptr_t) i2 + input_offset);
62 const __m128i v3_3 = _mm_loadu_si128((const __m128i*) i3);
63 i3 = (uint16_t*) ((uintptr_t) i3 + input_offset);
64 const __m128i v3_4 = _mm_loadu_si128((const __m128i*) i4);
65 i4 = (uint16_t*) ((uintptr_t) i4 + input_offset);
66 const __m128i v3_5 = _mm_loadu_si128((const __m128i*) i5);
67 i5 = (uint16_t*) ((uintptr_t) i5 + input_offset);
68 const __m128i v3_6 = _mm_loadu_si128((const __m128i*) i6);
69 i6 = (uint16_t*) ((uintptr_t) i6 + input_offset);
70 const __m128i v3_7 = _mm_loadu_si128((const __m128i*) i7);
71 i7 = (uint16_t*) ((uintptr_t) i7 + input_offset);
72
73 const __m128i v2_0 = _mm_unpacklo_epi16(v3_0, v3_1);
74 const __m128i v2_1 = _mm_unpackhi_epi16(v3_0, v3_1);
75 const __m128i v2_2 = _mm_unpacklo_epi16(v3_2, v3_3);
76 const __m128i v2_3 = _mm_unpackhi_epi16(v3_2, v3_3);
77 const __m128i v2_4 = _mm_unpacklo_epi16(v3_4, v3_5);
78 const __m128i v2_5 = _mm_unpackhi_epi16(v3_4, v3_5);
79 const __m128i v2_6 = _mm_unpacklo_epi16(v3_6, v3_7);
80 const __m128i v2_7 = _mm_unpackhi_epi16(v3_6, v3_7);
81
82 const __m128i v1_0 = _mm_unpacklo_epi32(v2_0, v2_2);
83 const __m128i v1_1 = _mm_unpackhi_epi32(v2_0, v2_2);
84 const __m128i v1_2 = _mm_unpacklo_epi32(v2_1, v2_3);
85 const __m128i v1_3 = _mm_unpackhi_epi32(v2_1, v2_3);
86 const __m128i v1_4 = _mm_unpacklo_epi32(v2_4, v2_6);
87 const __m128i v1_5 = _mm_unpackhi_epi32(v2_4, v2_6);
88 const __m128i v1_6 = _mm_unpacklo_epi32(v2_5, v2_7);
89 const __m128i v1_7 = _mm_unpackhi_epi32(v2_5, v2_7);
90
91 const __m128i v0_0 = _mm_unpacklo_epi64(v1_0, v1_4);
92 const __m128i v0_1 = _mm_unpackhi_epi64(v1_0, v1_4);
93 const __m128i v0_2 = _mm_unpacklo_epi64(v1_1, v1_5);
94 const __m128i v0_3 = _mm_unpackhi_epi64(v1_1, v1_5);
95 const __m128i v0_4 = _mm_unpacklo_epi64(v1_2, v1_6);
96 const __m128i v0_5 = _mm_unpackhi_epi64(v1_2, v1_6);
97 const __m128i v0_6 = _mm_unpacklo_epi64(v1_3, v1_7);
98 const __m128i v0_7 = _mm_unpackhi_epi64(v1_3, v1_7);
99
100
101 o = (uint16_t*) ((uintptr_t) o + oN_offset);
102 _mm_storeu_si128((__m128i*) o, v0_7);
103 uint16_t *oN = (uint16_t*) ((uintptr_t) o + minus_output_stride);
104 if XNN_UNPREDICTABLE(block_width > 7) {
105 o = oN;
106 }
107 _mm_storeu_si128((__m128i*) o, v0_6);
108 oN = (uint16_t*) ((uintptr_t) o + minus_output_stride);
109 if XNN_UNPREDICTABLE(block_width >= 7) {
110 o = oN;
111 }
112 _mm_storeu_si128((__m128i*) o, v0_5);
113 oN = (uint16_t*) ((uintptr_t) o + minus_output_stride);
114 if XNN_UNPREDICTABLE(block_width > 5) {
115 o = oN;
116 }
117 _mm_storeu_si128((__m128i*) o, v0_4);
118 oN = (uint16_t*) ((uintptr_t) o + minus_output_stride);
119 if XNN_UNPREDICTABLE(block_width >= 5) {
120 o = oN;
121 }
122 _mm_storeu_si128((__m128i*) o, v0_3);
123 oN = (uint16_t*) ((uintptr_t) o + minus_output_stride);
124 if XNN_UNPREDICTABLE(block_width > 3) {
125 o = oN;
126 }
127 _mm_storeu_si128((__m128i*) o, v0_2);
128 oN = (uint16_t*) ((uintptr_t) o + minus_output_stride);
129 if XNN_UNPREDICTABLE(block_width >= 3) {
130 o = oN;
131 }
132 _mm_storeu_si128((__m128i*) o, v0_1);
133 oN = (uint16_t*) ((uintptr_t) o + minus_output_stride);
134 if XNN_UNPREDICTABLE(block_width > 1) {
135 o = oN;
136 }
137 _mm_storeu_si128((__m128i*) o, v0_0);
138 }
139 o = (uint16_t*) ((uintptr_t) o + tile_hbytes);
140 if (bh != 0) {
141 const __m128i v3_0 = _mm_loadu_si128((const __m128i*) i0);
142 if XNN_UNPREDICTABLE(bh < 2) {
143 i1 = i0;
144 }
145 const __m128i v3_1 = _mm_loadu_si128((const __m128i*) i1);
146 if XNN_UNPREDICTABLE(bh <= 2) {
147 i2 = i0;
148 }
149 const __m128i v3_2 = _mm_loadu_si128((const __m128i*) i2);
150 if XNN_UNPREDICTABLE(bh < 4) {
151 i3 = i0;
152 }
153 const __m128i v3_3 = _mm_loadu_si128((const __m128i*) i3);
154 if XNN_UNPREDICTABLE(bh <= 4) {
155 i4 = i0;
156 }
157 const __m128i v3_4 = _mm_loadu_si128((const __m128i*) i4);
158 if XNN_UNPREDICTABLE(bh < 6) {
159 i5 = i0;
160 }
161 const __m128i v3_5 = _mm_loadu_si128((const __m128i*) i5);
162 if XNN_UNPREDICTABLE(bh <= 6) {
163 i6 = i0;
164 }
165 const __m128i v3_6 = _mm_loadu_si128((const __m128i*) i6);
166 const __m128i v3_7 = _mm_undefined_si128();
167
168 const __m128i v2_0 = _mm_unpacklo_epi16(v3_0, v3_1);
169 const __m128i v2_1 = _mm_unpackhi_epi16(v3_0, v3_1);
170 const __m128i v2_2 = _mm_unpacklo_epi16(v3_2, v3_3);
171 const __m128i v2_3 = _mm_unpackhi_epi16(v3_2, v3_3);
172 const __m128i v2_4 = _mm_unpacklo_epi16(v3_4, v3_5);
173 const __m128i v2_5 = _mm_unpackhi_epi16(v3_4, v3_5);
174 const __m128i v2_6 = _mm_unpacklo_epi16(v3_6, v3_7);
175 const __m128i v2_7 = _mm_unpackhi_epi16(v3_6, v3_7);
176
177 const __m128i v1_0 = _mm_unpacklo_epi32(v2_0, v2_2);
178 const __m128i v1_1 = _mm_unpackhi_epi32(v2_0, v2_2);
179 const __m128i v1_2 = _mm_unpacklo_epi32(v2_1, v2_3);
180 const __m128i v1_3 = _mm_unpackhi_epi32(v2_1, v2_3);
181 const __m128i v1_4 = _mm_unpacklo_epi32(v2_4, v2_6);
182 const __m128i v1_5 = _mm_unpackhi_epi32(v2_4, v2_6);
183 const __m128i v1_6 = _mm_unpacklo_epi32(v2_5, v2_7);
184 const __m128i v1_7 = _mm_unpackhi_epi32(v2_5, v2_7);
185
186 __m128i v0_0 = _mm_unpacklo_epi64(v1_0, v1_4);
187 __m128i v0_1 = _mm_unpackhi_epi64(v1_0, v1_4);
188 __m128i v0_2 = _mm_unpacklo_epi64(v1_1, v1_5);
189 __m128i v0_3 = _mm_unpackhi_epi64(v1_1, v1_5);
190 __m128i v0_4 = _mm_unpacklo_epi64(v1_2, v1_6);
191 __m128i v0_5 = _mm_unpackhi_epi64(v1_2, v1_6);
192 __m128i v0_6 = _mm_unpacklo_epi64(v1_3, v1_7);
193 __m128i v0_7 = _mm_unpackhi_epi64(v1_3, v1_7);
194
195
196 if (bh & 4) {
197 o = (uint16_t*) ((uintptr_t) o + oN_stride);
198 _mm_storel_epi64((__m128i*) o, v0_7);
199 uint16_t *oN = (uint16_t*) ((uintptr_t) o + minus_output_stride);
200 if XNN_UNPREDICTABLE(block_width > 7) {
201 o = oN;
202 }
203 _mm_storel_epi64((__m128i*) o, v0_6);
204 oN = (uint16_t*) ((uintptr_t) o + minus_output_stride);
205 if XNN_UNPREDICTABLE(block_width >= 7) {
206 o = oN;
207 }
208 _mm_storel_epi64((__m128i*) o, v0_5);
209 oN = (uint16_t*) ((uintptr_t) o + minus_output_stride);
210 if XNN_UNPREDICTABLE(block_width > 5) {
211 o = oN;
212 }
213 _mm_storel_epi64((__m128i*) o, v0_4);
214 oN = (uint16_t*) ((uintptr_t) o + minus_output_stride);
215 if XNN_UNPREDICTABLE(block_width >= 5) {
216 o = oN;
217 }
218 _mm_storel_epi64((__m128i*) o, v0_3);
219 oN = (uint16_t*) ((uintptr_t) o + minus_output_stride);
220 if XNN_UNPREDICTABLE(block_width > 3) {
221 o = oN;
222 }
223 _mm_storel_epi64((__m128i*) o, v0_2);
224 oN = (uint16_t*) ((uintptr_t) o + minus_output_stride);
225 if XNN_UNPREDICTABLE(block_width >= 3) {
226 o = oN;
227 }
228 _mm_storel_epi64((__m128i*) o, v0_1);
229 oN = (uint16_t*) ((uintptr_t) o + minus_output_stride);
230 if XNN_UNPREDICTABLE(block_width > 1) {
231 o = oN;
232 }
233 _mm_storel_epi64((__m128i*) o, v0_0);
234 o += 4;
235 v0_0 = _mm_unpackhi_epi64(v0_0, v0_0);
236 v0_1 = _mm_unpackhi_epi64(v0_1, v0_1);
237 v0_2 = _mm_unpackhi_epi64(v0_2, v0_2);
238 v0_3 = _mm_unpackhi_epi64(v0_3, v0_3);
239 v0_4 = _mm_unpackhi_epi64(v0_4, v0_4);
240 v0_5 = _mm_unpackhi_epi64(v0_5, v0_5);
241 v0_6 = _mm_unpackhi_epi64(v0_6, v0_6);
242 v0_7 = _mm_unpackhi_epi64(v0_7, v0_7);
243 }
244
245 if (bh & 2) {
246 o = (uint16_t*) ((uintptr_t) o + oN_stride);
247 unaligned_store_u32(o, (uint32_t) _mm_cvtsi128_si32(v0_7));
248 uint16_t *oN = (uint16_t*) ((uintptr_t) o + minus_output_stride);
249 if XNN_UNPREDICTABLE(block_width > 7) {
250 o = oN;
251 }
252 unaligned_store_u32(o, (uint32_t) _mm_cvtsi128_si32(v0_6));
253 oN = (uint16_t*) ((uintptr_t) o + minus_output_stride);
254 if XNN_UNPREDICTABLE(block_width >= 7) {
255 o = oN;
256 }
257 unaligned_store_u32(o, (uint32_t) _mm_cvtsi128_si32(v0_5));
258 oN = (uint16_t*) ((uintptr_t) o + minus_output_stride);
259 if XNN_UNPREDICTABLE(block_width > 5) {
260 o = oN;
261 }
262 unaligned_store_u32(o, (uint32_t) _mm_cvtsi128_si32(v0_4));
263 oN = (uint16_t*) ((uintptr_t) o + minus_output_stride);
264 if XNN_UNPREDICTABLE(block_width >= 5) {
265 o = oN;
266 }
267 unaligned_store_u32(o, (uint32_t) _mm_cvtsi128_si32(v0_3));
268 oN = (uint16_t*) ((uintptr_t) o + minus_output_stride);
269 if XNN_UNPREDICTABLE(block_width > 3) {
270 o = oN;
271 }
272 unaligned_store_u32(o, (uint32_t) _mm_cvtsi128_si32(v0_2));
273 oN = (uint16_t*) ((uintptr_t) o + minus_output_stride);
274 if XNN_UNPREDICTABLE(block_width >= 3) {
275 o = oN;
276 }
277 unaligned_store_u32(o, (uint32_t) _mm_cvtsi128_si32(v0_1));
278 oN = (uint16_t*) ((uintptr_t) o + minus_output_stride);
279 if XNN_UNPREDICTABLE(block_width > 1) {
280 o = oN;
281 }
282 unaligned_store_u32(o, (uint32_t) _mm_cvtsi128_si32(v0_0));
283 o += 2;
284 v0_0 = _mm_srli_epi64(v0_0, 32);
285 v0_1 = _mm_srli_epi64(v0_1, 32);
286 v0_2 = _mm_srli_epi64(v0_2, 32);
287 v0_3 = _mm_srli_epi64(v0_3, 32);
288 v0_4 = _mm_srli_epi64(v0_4, 32);
289 v0_5 = _mm_srli_epi64(v0_5, 32);
290 v0_6 = _mm_srli_epi64(v0_6, 32);
291 v0_7 = _mm_srli_epi64(v0_7, 32);
292 }
293 if (bh & 1) {
294 o = (uint16_t*) ((uintptr_t) o + oN_stride);
295 unaligned_store_u16(o, (uint16_t) _mm_cvtsi128_si32(v0_7));
296 uint16_t* oN = (uint16_t*) ((uintptr_t) o + minus_output_stride);
297 if XNN_UNPREDICTABLE(block_width > 7) {
298 o = oN;
299 }
300 unaligned_store_u16(o, (uint16_t) _mm_cvtsi128_si32(v0_6));
301 oN = (uint16_t*) ((uintptr_t) o + minus_output_stride);
302 if XNN_UNPREDICTABLE(block_width >= 7) {
303 o = oN;
304 }
305 unaligned_store_u16(o, (uint16_t) _mm_cvtsi128_si32(v0_5));
306 oN = (uint16_t*) ((uintptr_t) o + minus_output_stride);
307 if XNN_UNPREDICTABLE(block_width > 5) {
308 o = oN;
309 }
310 unaligned_store_u16(o, (uint16_t) _mm_cvtsi128_si32(v0_4));
311 oN = (uint16_t*) ((uintptr_t) o + minus_output_stride);
312 if XNN_UNPREDICTABLE(block_width >= 5) {
313 o = oN;
314 }
315 unaligned_store_u16(o, (uint16_t) _mm_cvtsi128_si32(v0_3));
316 oN = (uint16_t*) ((uintptr_t) o + minus_output_stride);
317 if XNN_UNPREDICTABLE(block_width > 3) {
318 o = oN;
319 }
320 unaligned_store_u16(o, (uint16_t) _mm_cvtsi128_si32(v0_2));
321 oN = (uint16_t*) ((uintptr_t) o + minus_output_stride);
322 if XNN_UNPREDICTABLE(block_width >= 3) {
323 o = oN;
324 }
325 unaligned_store_u16(o, (uint16_t) _mm_cvtsi128_si32(v0_1));
326 oN = (uint16_t*) ((uintptr_t) o + minus_output_stride);
327 if XNN_UNPREDICTABLE(block_width > 1) {
328 o = oN;
329 }
330 unaligned_store_u16(o, (uint16_t) _mm_cvtsi128_si32(v0_0));
331 }
332 }
333
334 i0 = (const uint16_t*) ((uintptr_t) i0 + input_reset);
335 i1 = (const uint16_t*) ((uintptr_t) i0 + input_stride);
336 i2 = (const uint16_t*) ((uintptr_t) i1 + input_stride);
337 i3 = (const uint16_t*) ((uintptr_t) i2 + input_stride);
338 i4 = (const uint16_t*) ((uintptr_t) i3 + input_stride);
339 i5 = (const uint16_t*) ((uintptr_t) i4 + input_stride);
340 i6 = (const uint16_t*) ((uintptr_t) i5 + input_stride);
341 i7 = (const uint16_t*) ((uintptr_t) i6 + input_stride);
342 o = (uint16_t*) ((uintptr_t) o + output_reset);
343 block_width = doz(block_width, tile_width);
344 } while (block_width != 0);
345 }
346