1 // Auto-generated file. Do not edit!
2 // Template: src/x32-transposec/sse2.c.in
3 // Generator: tools/xngen
4 //
5 // Copyright 2021 Google LLC
6 //
7 // This source code is licensed under the BSD-style license found in the
8 // LICENSE file in the root directory of this source tree.
9
10 #include <immintrin.h>
11
12 #include <assert.h>
13
14 #include <xnnpack/common.h>
15 #include <xnnpack/math.h>
16 #include <xnnpack/transpose.h>
17 #include <xnnpack/unaligned.h>
18
19
xnn_x16_transposec_ukernel__8x8_reuse_multi_sse2(const uint16_t * input,uint16_t * output,size_t input_stride,size_t output_stride,size_t block_width,size_t block_height)20 void xnn_x16_transposec_ukernel__8x8_reuse_multi_sse2(
21 const uint16_t* input,
22 uint16_t* output,
23 size_t input_stride,
24 size_t output_stride,
25 size_t block_width,
26 size_t block_height) XNN_OOB_READS
27 {
28 assert(output_stride >= block_height * sizeof(uint16_t));
29 assert(input_stride >= block_width * sizeof(uint16_t));
30
31 const size_t tile_height = 8;
32 const size_t tile_width = 8;
33 const size_t tile_hbytes = tile_height * sizeof(uint16_t);
34 const size_t tile_wbytes = tile_width * sizeof(uint16_t);
35 const size_t input_reset = tile_wbytes - round_down_po2(block_height, tile_height) * input_stride;
36 const size_t output_reset = tile_width * output_stride - round_down_po2(block_height, 2) * sizeof(uint16_t);
37
38 const uint16_t* i0 = input;
39 uint16_t* o0 = (uint16_t*) output;
40 uint16_t* o1 = (uint16_t*) ((uintptr_t) o0 + output_stride);
41 uint16_t* o2 = (uint16_t*) ((uintptr_t) o1 + output_stride);
42 uint16_t* o3 = (uint16_t*) ((uintptr_t) o2 + output_stride);
43 uint16_t* o4 = (uint16_t*) ((uintptr_t) o3 + output_stride);
44 uint16_t* o5 = (uint16_t*) ((uintptr_t) o4 + output_stride);
45 uint16_t* o6 = (uint16_t*) ((uintptr_t) o5 + output_stride);
46 uint16_t* o7 = (uint16_t*) ((uintptr_t) o6 + output_stride);
47
48 do {
49 if XNN_UNPREDICTABLE(block_width < 2) {
50 o1 = o0;
51 }
52 if XNN_UNPREDICTABLE(block_width <= 2) {
53 o2 = o0;
54 }
55 if XNN_UNPREDICTABLE(block_width < 4) {
56 o3 = o0;
57 }
58 if XNN_UNPREDICTABLE(block_width <= 4) {
59 o4 = o0;
60 }
61 if XNN_UNPREDICTABLE(block_width < 6) {
62 o5 = o0;
63 }
64 if XNN_UNPREDICTABLE(block_width <= 6) {
65 o6 = o0;
66 }
67 if XNN_UNPREDICTABLE(block_width < 8) {
68 o7 = o0;
69 }
70 size_t bh = block_height;
71 for (; bh >= 8; bh -= 8) {
72 const __m128i v3_0 = _mm_loadu_si128((const __m128i*) i0);
73 i0 = (uint16_t*) ((uintptr_t) i0 + input_stride);
74 const __m128i v3_1 = _mm_loadu_si128((const __m128i*) i0);
75 i0 = (uint16_t*) ((uintptr_t) i0 + input_stride);
76 const __m128i v3_2 = _mm_loadu_si128((const __m128i*) i0);
77 i0 = (uint16_t*) ((uintptr_t) i0 + input_stride);
78 const __m128i v3_3 = _mm_loadu_si128((const __m128i*) i0);
79 i0 = (uint16_t*) ((uintptr_t) i0 + input_stride);
80 const __m128i v3_4 = _mm_loadu_si128((const __m128i*) i0);
81 i0 = (uint16_t*) ((uintptr_t) i0 + input_stride);
82 const __m128i v3_5 = _mm_loadu_si128((const __m128i*) i0);
83 i0 = (uint16_t*) ((uintptr_t) i0 + input_stride);
84 const __m128i v3_6 = _mm_loadu_si128((const __m128i*) i0);
85 i0 = (uint16_t*) ((uintptr_t) i0 + input_stride);
86 const __m128i v3_7 = _mm_loadu_si128((const __m128i*) i0);
87 i0 = (uint16_t*) ((uintptr_t) i0 + input_stride);
88
89 const __m128i v2_0 = _mm_unpacklo_epi16(v3_0, v3_1);
90 const __m128i v2_1 = _mm_unpackhi_epi16(v3_0, v3_1);
91 const __m128i v2_2 = _mm_unpacklo_epi16(v3_2, v3_3);
92 const __m128i v2_3 = _mm_unpackhi_epi16(v3_2, v3_3);
93 const __m128i v2_4 = _mm_unpacklo_epi16(v3_4, v3_5);
94 const __m128i v2_5 = _mm_unpackhi_epi16(v3_4, v3_5);
95 const __m128i v2_6 = _mm_unpacklo_epi16(v3_6, v3_7);
96 const __m128i v2_7 = _mm_unpackhi_epi16(v3_6, v3_7);
97
98 const __m128i v1_0 = _mm_unpacklo_epi32(v2_0, v2_2);
99 const __m128i v1_1 = _mm_unpackhi_epi32(v2_0, v2_2);
100 const __m128i v1_2 = _mm_unpacklo_epi32(v2_1, v2_3);
101 const __m128i v1_3 = _mm_unpackhi_epi32(v2_1, v2_3);
102 const __m128i v1_4 = _mm_unpacklo_epi32(v2_4, v2_6);
103 const __m128i v1_5 = _mm_unpackhi_epi32(v2_4, v2_6);
104 const __m128i v1_6 = _mm_unpacklo_epi32(v2_5, v2_7);
105 const __m128i v1_7 = _mm_unpackhi_epi32(v2_5, v2_7);
106
107 const __m128i v0_0 = _mm_unpacklo_epi64(v1_0, v1_4);
108 const __m128i v0_1 = _mm_unpackhi_epi64(v1_0, v1_4);
109 const __m128i v0_2 = _mm_unpacklo_epi64(v1_1, v1_5);
110 const __m128i v0_3 = _mm_unpackhi_epi64(v1_1, v1_5);
111 const __m128i v0_4 = _mm_unpacklo_epi64(v1_2, v1_6);
112 const __m128i v0_5 = _mm_unpackhi_epi64(v1_2, v1_6);
113 const __m128i v0_6 = _mm_unpacklo_epi64(v1_3, v1_7);
114 const __m128i v0_7 = _mm_unpackhi_epi64(v1_3, v1_7);
115
116
117 _mm_storeu_si128((__m128i*) o7, v0_7);
118 o7 = (uint16_t*) ((uintptr_t) o7 + tile_hbytes);
119 _mm_storeu_si128((__m128i*) o6, v0_6);
120 o6 = (uint16_t*) ((uintptr_t) o6 + tile_hbytes);
121 _mm_storeu_si128((__m128i*) o5, v0_5);
122 o5 = (uint16_t*) ((uintptr_t) o5 + tile_hbytes);
123 _mm_storeu_si128((__m128i*) o4, v0_4);
124 o4 = (uint16_t*) ((uintptr_t) o4 + tile_hbytes);
125 _mm_storeu_si128((__m128i*) o3, v0_3);
126 o3 = (uint16_t*) ((uintptr_t) o3 + tile_hbytes);
127 _mm_storeu_si128((__m128i*) o2, v0_2);
128 o2 = (uint16_t*) ((uintptr_t) o2 + tile_hbytes);
129 _mm_storeu_si128((__m128i*) o1, v0_1);
130 o1 = (uint16_t*) ((uintptr_t) o1 + tile_hbytes);
131 _mm_storeu_si128((__m128i*) o0, v0_0);
132 o0 = (uint16_t*) ((uintptr_t) o0 + tile_hbytes);
133 }
134 if (bh != 0) {
135 const __m128i v3_0 = _mm_loadu_si128((const __m128i*) i0);
136 const uint16_t *i1 = (const uint16_t*) ((uintptr_t) i0 + input_stride);
137 if XNN_UNPREDICTABLE(bh < 2) {
138 i1 = i0;
139 }
140 const __m128i v3_1 = _mm_loadu_si128((const __m128i*) i1);
141 const uint16_t *i2 = (const uint16_t*) ((uintptr_t) i1 + input_stride);
142 if XNN_UNPREDICTABLE(bh <= 2) {
143 i2 = i1;
144 }
145 const __m128i v3_2 = _mm_loadu_si128((const __m128i*) i2);
146 const uint16_t *i3 = (const uint16_t*) ((uintptr_t) i2 + input_stride);
147 if XNN_UNPREDICTABLE(bh < 4) {
148 i3 = i2;
149 }
150 const __m128i v3_3 = _mm_loadu_si128((const __m128i*) i3);
151 const uint16_t *i4 = (const uint16_t*) ((uintptr_t) i3 + input_stride);
152 if XNN_UNPREDICTABLE(bh <= 4) {
153 i4 = i3;
154 }
155 const __m128i v3_4 = _mm_loadu_si128((const __m128i*) i4);
156 const uint16_t *i5 = (const uint16_t*) ((uintptr_t) i4 + input_stride);
157 if XNN_UNPREDICTABLE(bh < 6) {
158 i5 = i4;
159 }
160 const __m128i v3_5 = _mm_loadu_si128((const __m128i*) i5);
161 const uint16_t *i6 = (const uint16_t*) ((uintptr_t) i5 + input_stride);
162 if XNN_UNPREDICTABLE(bh <= 6) {
163 i6 = i5;
164 }
165 const __m128i v3_6 = _mm_loadu_si128((const __m128i*) i6);
166 const __m128i v3_7 = _mm_undefined_si128();
167
168 const __m128i v2_0 = _mm_unpacklo_epi16(v3_0, v3_1);
169 const __m128i v2_1 = _mm_unpackhi_epi16(v3_0, v3_1);
170 const __m128i v2_2 = _mm_unpacklo_epi16(v3_2, v3_3);
171 const __m128i v2_3 = _mm_unpackhi_epi16(v3_2, v3_3);
172 const __m128i v2_4 = _mm_unpacklo_epi16(v3_4, v3_5);
173 const __m128i v2_5 = _mm_unpackhi_epi16(v3_4, v3_5);
174 const __m128i v2_6 = _mm_unpacklo_epi16(v3_6, v3_7);
175 const __m128i v2_7 = _mm_unpackhi_epi16(v3_6, v3_7);
176
177 const __m128i v1_0 = _mm_unpacklo_epi32(v2_0, v2_2);
178 const __m128i v1_1 = _mm_unpackhi_epi32(v2_0, v2_2);
179 const __m128i v1_2 = _mm_unpacklo_epi32(v2_1, v2_3);
180 const __m128i v1_3 = _mm_unpackhi_epi32(v2_1, v2_3);
181 const __m128i v1_4 = _mm_unpacklo_epi32(v2_4, v2_6);
182 const __m128i v1_5 = _mm_unpackhi_epi32(v2_4, v2_6);
183 const __m128i v1_6 = _mm_unpacklo_epi32(v2_5, v2_7);
184 const __m128i v1_7 = _mm_unpackhi_epi32(v2_5, v2_7);
185
186 __m128i v0_0 = _mm_unpacklo_epi64(v1_0, v1_4);
187 __m128i v0_1 = _mm_unpackhi_epi64(v1_0, v1_4);
188 __m128i v0_2 = _mm_unpacklo_epi64(v1_1, v1_5);
189 __m128i v0_3 = _mm_unpackhi_epi64(v1_1, v1_5);
190 __m128i v0_4 = _mm_unpacklo_epi64(v1_2, v1_6);
191 __m128i v0_5 = _mm_unpackhi_epi64(v1_2, v1_6);
192 __m128i v0_6 = _mm_unpacklo_epi64(v1_3, v1_7);
193 __m128i v0_7 = _mm_unpackhi_epi64(v1_3, v1_7);
194
195
196 if (bh & 4) {
197 _mm_storel_epi64((__m128i*) o7, v0_7);
198 o7 += 4;
199 _mm_storel_epi64((__m128i*) o6, v0_6);
200 o6 += 4;
201 _mm_storel_epi64((__m128i*) o5, v0_5);
202 o5 += 4;
203 _mm_storel_epi64((__m128i*) o4, v0_4);
204 o4 += 4;
205 _mm_storel_epi64((__m128i*) o3, v0_3);
206 o3 += 4;
207 _mm_storel_epi64((__m128i*) o2, v0_2);
208 o2 += 4;
209 _mm_storel_epi64((__m128i*) o1, v0_1);
210 o1 += 4;
211 _mm_storel_epi64((__m128i*) o0, v0_0);
212 o0 += 4;
213 v0_0 = _mm_unpackhi_epi64(v0_0, v0_0);
214 v0_1 = _mm_unpackhi_epi64(v0_1, v0_1);
215 v0_2 = _mm_unpackhi_epi64(v0_2, v0_2);
216 v0_3 = _mm_unpackhi_epi64(v0_3, v0_3);
217 v0_4 = _mm_unpackhi_epi64(v0_4, v0_4);
218 v0_5 = _mm_unpackhi_epi64(v0_5, v0_5);
219 v0_6 = _mm_unpackhi_epi64(v0_6, v0_6);
220 v0_7 = _mm_unpackhi_epi64(v0_7, v0_7);
221 }
222
223 if (bh & 2) {
224 unaligned_store_u32(o7, (uint32_t) _mm_cvtsi128_si32(v0_7));
225 o7 += 2;
226 unaligned_store_u32(o6, (uint32_t) _mm_cvtsi128_si32(v0_6));
227 o6 += 2;
228 unaligned_store_u32(o5, (uint32_t) _mm_cvtsi128_si32(v0_5));
229 o5 += 2;
230 unaligned_store_u32(o4, (uint32_t) _mm_cvtsi128_si32(v0_4));
231 o4 += 2;
232 unaligned_store_u32(o3, (uint32_t) _mm_cvtsi128_si32(v0_3));
233 o3 += 2;
234 unaligned_store_u32(o2, (uint32_t) _mm_cvtsi128_si32(v0_2));
235 o2 += 2;
236 unaligned_store_u32(o1, (uint32_t) _mm_cvtsi128_si32(v0_1));
237 o1 += 2;
238 unaligned_store_u32(o0, (uint32_t) _mm_cvtsi128_si32(v0_0));
239 o0 += 2;
240 v0_0 = _mm_srli_epi64(v0_0, 32);
241 v0_1 = _mm_srli_epi64(v0_1, 32);
242 v0_2 = _mm_srli_epi64(v0_2, 32);
243 v0_3 = _mm_srli_epi64(v0_3, 32);
244 v0_4 = _mm_srli_epi64(v0_4, 32);
245 v0_5 = _mm_srli_epi64(v0_5, 32);
246 v0_6 = _mm_srli_epi64(v0_6, 32);
247 v0_7 = _mm_srli_epi64(v0_7, 32);
248 }
249 if (bh & 1) {
250 unaligned_store_u16(o7, (uint16_t) _mm_cvtsi128_si32(v0_7));
251 unaligned_store_u16(o6, (uint16_t) _mm_cvtsi128_si32(v0_6));
252 unaligned_store_u16(o5, (uint16_t) _mm_cvtsi128_si32(v0_5));
253 unaligned_store_u16(o4, (uint16_t) _mm_cvtsi128_si32(v0_4));
254 unaligned_store_u16(o3, (uint16_t) _mm_cvtsi128_si32(v0_3));
255 unaligned_store_u16(o2, (uint16_t) _mm_cvtsi128_si32(v0_2));
256 unaligned_store_u16(o1, (uint16_t) _mm_cvtsi128_si32(v0_1));
257 unaligned_store_u16(o0, (uint16_t) _mm_cvtsi128_si32(v0_0));
258 }
259 }
260
261 i0 = (const uint16_t*) ((uintptr_t) i0 + input_reset);
262 o0 = (uint16_t*) ((uintptr_t) o0 + output_reset);
263 o1 = (uint16_t*) ((uintptr_t) o1 + output_reset);
264 o2 = (uint16_t*) ((uintptr_t) o2 + output_reset);
265 o3 = (uint16_t*) ((uintptr_t) o3 + output_reset);
266 o4 = (uint16_t*) ((uintptr_t) o4 + output_reset);
267 o5 = (uint16_t*) ((uintptr_t) o5 + output_reset);
268 o6 = (uint16_t*) ((uintptr_t) o6 + output_reset);
269 o7 = (uint16_t*) ((uintptr_t) o7 + output_reset);
270 block_width = doz(block_width, tile_width);
271 } while (block_width != 0);
272 }
273