xref: /aosp_15_r20/external/XNNPACK/src/x16-transposec/gen/8x8-reuse-mov-wasmsimd.c (revision 4bdc94577ba0e567308109d787f7fec7b531ce36)
1 // Auto-generated file. Do not edit!
2 //   Template: src/x32-transposec/wasmsimd.c.in
3 //   Generator: tools/xngen
4 //
5 // Copyright 2021 Google LLC
6 //
7 // This source code is licensed under the BSD-style license found in the
8 // LICENSE file in the root directory of this source tree.
9 
10 #include <wasm_simd128.h>
11 
12 #include <assert.h>
13 
14 #include <xnnpack/common.h>
15 #include <xnnpack/math.h>
16 #include <xnnpack/transpose.h>
17 
xnn_x16_transposec_ukernel__8x8_reuse_mov_wasmsimd(const uint16_t * input,uint16_t * output,size_t input_stride,size_t output_stride,size_t block_width,size_t block_height)18 void xnn_x16_transposec_ukernel__8x8_reuse_mov_wasmsimd(
19     const uint16_t* input,
20     uint16_t* output,
21     size_t input_stride,
22     size_t output_stride,
23     size_t block_width,
24     size_t block_height) XNN_OOB_READS
25 {
26   assert(output_stride >= block_height * sizeof(uint16_t));
27   assert(input_stride >= block_width * sizeof(uint16_t));
28 
29   const size_t tile_height = 8;
30   const size_t tile_width = 8;
31   const size_t tile_hbytes = tile_height * sizeof(uint16_t);
32   const size_t tile_wbytes = tile_width * sizeof(uint16_t);
33   const size_t input_reset = tile_wbytes - round_down_po2(block_height, tile_height) * input_stride;
34   const size_t output_reset = tile_width * output_stride - round_down_po2(block_height, 2) * sizeof(uint16_t) - tile_hbytes;
35 
36   const uint16_t* i0 = input;
37   uint16_t* o = (uint16_t*) ((uintptr_t) output - tile_hbytes);
38   const size_t minus_output_stride = -output_stride;
39 
40   do {
41     const size_t rem = min(block_width - 1, 7);
42     const size_t oN_stride = rem * output_stride;
43     const size_t oN_offset = oN_stride + tile_hbytes;
44     size_t bh = block_height;
45     for (; bh >= 8; bh -= 8) {
46       const v128_t v3_0 = wasm_v128_load(i0);
47       i0 = (uint16_t*) ((uintptr_t) i0 + input_stride);
48       const v128_t v3_1 = wasm_v128_load(i0);
49       i0 = (uint16_t*) ((uintptr_t) i0 + input_stride);
50       const v128_t v3_2 = wasm_v128_load(i0);
51       i0 = (uint16_t*) ((uintptr_t) i0 + input_stride);
52       const v128_t v3_3 = wasm_v128_load(i0);
53       i0 = (uint16_t*) ((uintptr_t) i0 + input_stride);
54       const v128_t v3_4 = wasm_v128_load(i0);
55       i0 = (uint16_t*) ((uintptr_t) i0 + input_stride);
56       const v128_t v3_5 = wasm_v128_load(i0);
57       i0 = (uint16_t*) ((uintptr_t) i0 + input_stride);
58       const v128_t v3_6 = wasm_v128_load(i0);
59       i0 = (uint16_t*) ((uintptr_t) i0 + input_stride);
60       const v128_t v3_7 = wasm_v128_load(i0);
61       i0 = (uint16_t*) ((uintptr_t) i0 + input_stride);
62 
63       const v128_t v2_0 = wasm_v16x8_shuffle(v3_0, v3_4, 0, 8, 1, 9, 2, 10, 3, 11);
64       const v128_t v2_1 = wasm_v16x8_shuffle(v3_0, v3_4, 4, 12, 5, 13, 6, 14, 7, 15);
65       const v128_t v2_2 = wasm_v16x8_shuffle(v3_1, v3_5, 0, 8, 1, 9, 2, 10, 3, 11);
66       const v128_t v2_3 = wasm_v16x8_shuffle(v3_1, v3_5, 4, 12, 5, 13, 6, 14, 7, 15);
67       const v128_t v2_4 = wasm_v16x8_shuffle(v3_2, v3_6, 0, 8, 1, 9, 2, 10, 3, 11);
68       const v128_t v2_5 = wasm_v16x8_shuffle(v3_2, v3_6, 4, 12, 5, 13, 6, 14, 7, 15);
69       const v128_t v2_6 = wasm_v16x8_shuffle(v3_3, v3_7, 0, 8, 1, 9, 2, 10, 3, 11);
70       const v128_t v2_7 = wasm_v16x8_shuffle(v3_3, v3_7, 4, 12, 5, 13, 6, 14, 7, 15);
71       const v128_t v1_0 = wasm_v16x8_shuffle(v2_0, v2_4, 0, 8, 1, 9, 2, 10, 3, 11);
72       const v128_t v1_1 = wasm_v16x8_shuffle(v2_0, v2_4, 4, 12, 5, 13, 6, 14, 7, 15);
73       const v128_t v1_2 = wasm_v16x8_shuffle(v2_1, v2_5, 0, 8, 1, 9, 2, 10, 3, 11);
74       const v128_t v1_3 = wasm_v16x8_shuffle(v2_1, v2_5, 4, 12, 5, 13, 6, 14, 7, 15);
75       const v128_t v1_4 = wasm_v16x8_shuffle(v2_2, v2_6, 0, 8, 1, 9, 2, 10, 3, 11);
76       const v128_t v1_5 = wasm_v16x8_shuffle(v2_2, v2_6, 4, 12, 5, 13, 6, 14, 7, 15);
77       const v128_t v1_6 = wasm_v16x8_shuffle(v2_3, v2_7, 0, 8, 1, 9, 2, 10, 3, 11);
78       const v128_t v1_7 = wasm_v16x8_shuffle(v2_3, v2_7, 4, 12, 5, 13, 6, 14, 7, 15);
79       const v128_t v0_0 = wasm_v16x8_shuffle(v1_0, v1_4, 0, 8, 1, 9, 2, 10, 3, 11);
80       const v128_t v0_1 = wasm_v16x8_shuffle(v1_0, v1_4, 4, 12, 5, 13, 6, 14, 7, 15);
81       const v128_t v0_2 = wasm_v16x8_shuffle(v1_1, v1_5, 0, 8, 1, 9, 2, 10, 3, 11);
82       const v128_t v0_3 = wasm_v16x8_shuffle(v1_1, v1_5, 4, 12, 5, 13, 6, 14, 7, 15);
83       const v128_t v0_4 = wasm_v16x8_shuffle(v1_2, v1_6, 0, 8, 1, 9, 2, 10, 3, 11);
84       const v128_t v0_5 = wasm_v16x8_shuffle(v1_2, v1_6, 4, 12, 5, 13, 6, 14, 7, 15);
85       const v128_t v0_6 = wasm_v16x8_shuffle(v1_3, v1_7, 0, 8, 1, 9, 2, 10, 3, 11);
86       const v128_t v0_7 = wasm_v16x8_shuffle(v1_3, v1_7, 4, 12, 5, 13, 6, 14, 7, 15);
87 
88       o = (uint16_t*) ((uintptr_t) o + oN_offset);
89       wasm_v128_store(o, v0_7);
90       uint16_t *oN = (uint16_t*) ((uintptr_t) o + minus_output_stride);
91       if XNN_UNPREDICTABLE(block_width > 7) {
92         o = oN;
93       }
94       wasm_v128_store(o, v0_6);
95       oN = (uint16_t*) ((uintptr_t) o + minus_output_stride);
96       if XNN_UNPREDICTABLE(block_width >= 7) {
97         o = oN;
98       }
99       wasm_v128_store(o, v0_5);
100       oN = (uint16_t*) ((uintptr_t) o + minus_output_stride);
101       if XNN_UNPREDICTABLE(block_width > 5) {
102         o = oN;
103       }
104       wasm_v128_store(o, v0_4);
105       oN = (uint16_t*) ((uintptr_t) o + minus_output_stride);
106       if XNN_UNPREDICTABLE(block_width >= 5) {
107         o = oN;
108       }
109       wasm_v128_store(o, v0_3);
110       oN = (uint16_t*) ((uintptr_t) o + minus_output_stride);
111       if XNN_UNPREDICTABLE(block_width > 3) {
112         o = oN;
113       }
114       wasm_v128_store(o, v0_2);
115       oN = (uint16_t*) ((uintptr_t) o + minus_output_stride);
116       if XNN_UNPREDICTABLE(block_width >= 3) {
117         o = oN;
118       }
119       wasm_v128_store(o, v0_1);
120       oN = (uint16_t*) ((uintptr_t) o + minus_output_stride);
121       if XNN_UNPREDICTABLE(block_width > 1) {
122         o = oN;
123       }
124       wasm_v128_store(o, v0_0);
125     }
126     o = (uint16_t*) ((uintptr_t) o + tile_hbytes);
127 
128     if (bh != 0) {
129       const v128_t v3_0 = wasm_v128_load(i0);
130       const uint16_t *i1 = (const uint16_t*) ((uintptr_t) i0 + input_stride);
131       if XNN_UNPREDICTABLE(bh < 2) {
132         i1 = i0;
133       }
134       const v128_t v3_1 = wasm_v128_load(i1);
135       const uint16_t *i2 = (const uint16_t*) ((uintptr_t) i1 + input_stride);
136       if XNN_UNPREDICTABLE(bh <= 2) {
137         i2 = i1;
138       }
139       const v128_t v3_2 = wasm_v128_load(i2);
140       const uint16_t *i3 = (const uint16_t*) ((uintptr_t) i2 + input_stride);
141       if XNN_UNPREDICTABLE(bh < 4) {
142         i3 = i2;
143       }
144       const v128_t v3_3 = wasm_v128_load(i3);
145       const uint16_t *i4 = (const uint16_t*) ((uintptr_t) i3 + input_stride);
146       if XNN_UNPREDICTABLE(bh <= 4) {
147         i4 = i3;
148       }
149       const v128_t v3_4 = wasm_v128_load(i4);
150       const uint16_t *i5 = (const uint16_t*) ((uintptr_t) i4 + input_stride);
151       if XNN_UNPREDICTABLE(bh < 6) {
152         i5 = i4;
153       }
154       const v128_t v3_5 = wasm_v128_load(i5);
155       const uint16_t *i6 = (const uint16_t*) ((uintptr_t) i5 + input_stride);
156       if XNN_UNPREDICTABLE(bh <= 6) {
157         i6 = i5;
158       }
159       const v128_t v3_6 = wasm_v128_load(i6);
160       const v128_t v3_7 = wasm_v128_xor(v3_0, v3_0);
161 
162       const v128_t v2_0 = wasm_v16x8_shuffle(v3_0, v3_4, 0, 8, 1, 9, 2, 10, 3, 11);
163       const v128_t v2_1 = wasm_v16x8_shuffle(v3_0, v3_4, 4, 12, 5, 13, 6, 14, 7, 15);
164       const v128_t v2_2 = wasm_v16x8_shuffle(v3_1, v3_5, 0, 8, 1, 9, 2, 10, 3, 11);
165       const v128_t v2_3 = wasm_v16x8_shuffle(v3_1, v3_5, 4, 12, 5, 13, 6, 14, 7, 15);
166       const v128_t v2_4 = wasm_v16x8_shuffle(v3_2, v3_6, 0, 8, 1, 9, 2, 10, 3, 11);
167       const v128_t v2_5 = wasm_v16x8_shuffle(v3_2, v3_6, 4, 12, 5, 13, 6, 14, 7, 15);
168       const v128_t v2_6 = wasm_v16x8_shuffle(v3_3, v3_7, 0, 8, 1, 9, 2, 10, 3, 11);
169       const v128_t v2_7 = wasm_v16x8_shuffle(v3_3, v3_7, 4, 12, 5, 13, 6, 14, 7, 15);
170       const v128_t v1_0 = wasm_v16x8_shuffle(v2_0, v2_4, 0, 8, 1, 9, 2, 10, 3, 11);
171       const v128_t v1_1 = wasm_v16x8_shuffle(v2_0, v2_4, 4, 12, 5, 13, 6, 14, 7, 15);
172       const v128_t v1_2 = wasm_v16x8_shuffle(v2_1, v2_5, 0, 8, 1, 9, 2, 10, 3, 11);
173       const v128_t v1_3 = wasm_v16x8_shuffle(v2_1, v2_5, 4, 12, 5, 13, 6, 14, 7, 15);
174       const v128_t v1_4 = wasm_v16x8_shuffle(v2_2, v2_6, 0, 8, 1, 9, 2, 10, 3, 11);
175       const v128_t v1_5 = wasm_v16x8_shuffle(v2_2, v2_6, 4, 12, 5, 13, 6, 14, 7, 15);
176       const v128_t v1_6 = wasm_v16x8_shuffle(v2_3, v2_7, 0, 8, 1, 9, 2, 10, 3, 11);
177       const v128_t v1_7 = wasm_v16x8_shuffle(v2_3, v2_7, 4, 12, 5, 13, 6, 14, 7, 15);
178 
179       v128_t v0_0 = wasm_v16x8_shuffle(v1_0, v1_4, 0, 8, 1, 9, 2, 10, 3, 11);
180       v128_t v0_1 = wasm_v16x8_shuffle(v1_0, v1_4, 4, 12, 5, 13, 6, 14, 7, 15);
181       v128_t v0_2 = wasm_v16x8_shuffle(v1_1, v1_5, 0, 8, 1, 9, 2, 10, 3, 11);
182       v128_t v0_3 = wasm_v16x8_shuffle(v1_1, v1_5, 4, 12, 5, 13, 6, 14, 7, 15);
183       v128_t v0_4 = wasm_v16x8_shuffle(v1_2, v1_6, 0, 8, 1, 9, 2, 10, 3, 11);
184       v128_t v0_5 = wasm_v16x8_shuffle(v1_2, v1_6, 4, 12, 5, 13, 6, 14, 7, 15);
185       v128_t v0_6 = wasm_v16x8_shuffle(v1_3, v1_7, 0, 8, 1, 9, 2, 10, 3, 11);
186       v128_t v0_7 = wasm_v16x8_shuffle(v1_3, v1_7, 4, 12, 5, 13, 6, 14, 7, 15);
187 
188       if (bh & 4) {
189         o = (uint16_t*) ((uintptr_t) o + oN_stride);
190         *((double*) o) = wasm_f64x2_extract_lane(v0_7, 0);
191         uint16_t *oN = (uint16_t*) ((uintptr_t) o + minus_output_stride);
192         if XNN_UNPREDICTABLE(block_width > 7) {
193           o = oN;
194         }
195         *((double*) o) = wasm_f64x2_extract_lane(v0_6, 0);
196         oN = (uint16_t*) ((uintptr_t) o + minus_output_stride);
197         if XNN_UNPREDICTABLE(block_width >= 7) {
198           o = oN;
199         }
200         *((double*) o) = wasm_f64x2_extract_lane(v0_5, 0);
201         oN = (uint16_t*) ((uintptr_t) o + minus_output_stride);
202         if XNN_UNPREDICTABLE(block_width > 5) {
203           o = oN;
204         }
205         *((double*) o) = wasm_f64x2_extract_lane(v0_4, 0);
206         oN = (uint16_t*) ((uintptr_t) o + minus_output_stride);
207         if XNN_UNPREDICTABLE(block_width >= 5) {
208           o = oN;
209         }
210         *((double*) o) = wasm_f64x2_extract_lane(v0_3, 0);
211         oN = (uint16_t*) ((uintptr_t) o + minus_output_stride);
212         if XNN_UNPREDICTABLE(block_width > 3) {
213           o = oN;
214         }
215         *((double*) o) = wasm_f64x2_extract_lane(v0_2, 0);
216         oN = (uint16_t*) ((uintptr_t) o + minus_output_stride);
217         if XNN_UNPREDICTABLE(block_width >= 3) {
218           o = oN;
219         }
220         *((double*) o) = wasm_f64x2_extract_lane(v0_1, 0);
221         oN = (uint16_t*) ((uintptr_t) o + minus_output_stride);
222         if XNN_UNPREDICTABLE(block_width > 1) {
223           o = oN;
224         }
225         *((double*) o) = wasm_f64x2_extract_lane(v0_0, 0);
226         o += 4;
227         v0_0 = wasm_v64x2_shuffle(v0_0, v0_0, 1, 1);
228         v0_1 = wasm_v64x2_shuffle(v0_1, v0_1, 1, 1);
229         v0_2 = wasm_v64x2_shuffle(v0_2, v0_2, 1, 1);
230         v0_3 = wasm_v64x2_shuffle(v0_3, v0_3, 1, 1);
231         v0_4 = wasm_v64x2_shuffle(v0_4, v0_4, 1, 1);
232         v0_5 = wasm_v64x2_shuffle(v0_5, v0_5, 1, 1);
233         v0_6 = wasm_v64x2_shuffle(v0_6, v0_6, 1, 1);
234         v0_7 = wasm_v64x2_shuffle(v0_7, v0_7, 1, 1);
235       }
236 
237       if (bh & 2) {
238         o = (uint16_t*) ((uintptr_t) o + oN_stride);
239         *((float*) o) = wasm_f32x4_extract_lane(v0_7, 0);
240         uint16_t *oN = (uint16_t*) ((uintptr_t) o + minus_output_stride);
241         if XNN_UNPREDICTABLE(block_width > 7) {
242           o = oN;
243         }
244         *((float*) o) = wasm_f32x4_extract_lane(v0_6, 0);
245         oN = (uint16_t*) ((uintptr_t) o + minus_output_stride);
246         if XNN_UNPREDICTABLE(block_width >= 7) {
247           o = oN;
248         }
249         *((float*) o) = wasm_f32x4_extract_lane(v0_5, 0);
250         oN = (uint16_t*) ((uintptr_t) o + minus_output_stride);
251         if XNN_UNPREDICTABLE(block_width > 5) {
252           o = oN;
253         }
254         *((float*) o) = wasm_f32x4_extract_lane(v0_4, 0);
255         oN = (uint16_t*) ((uintptr_t) o + minus_output_stride);
256         if XNN_UNPREDICTABLE(block_width >= 5) {
257           o = oN;
258         }
259         *((float*) o) = wasm_f32x4_extract_lane(v0_3, 0);
260         oN = (uint16_t*) ((uintptr_t) o + minus_output_stride);
261         if XNN_UNPREDICTABLE(block_width > 3) {
262           o = oN;
263         }
264         *((float*) o) = wasm_f32x4_extract_lane(v0_2, 0);
265         oN = (uint16_t*) ((uintptr_t) o + minus_output_stride);
266         if XNN_UNPREDICTABLE(block_width >= 3) {
267           o = oN;
268         }
269         *((float*) o) = wasm_f32x4_extract_lane(v0_1, 0);
270         oN = (uint16_t*) ((uintptr_t) o + minus_output_stride);
271         if XNN_UNPREDICTABLE(block_width > 1) {
272           o = oN;
273         }
274         *((float*) o) = wasm_f32x4_extract_lane(v0_0, 0);
275         o += 2;
276         v0_0 = wasm_u64x2_shr(v0_0, 32);
277         v0_1 = wasm_u64x2_shr(v0_1, 32);
278         v0_2 = wasm_u64x2_shr(v0_2, 32);
279         v0_3 = wasm_u64x2_shr(v0_3, 32);
280         v0_4 = wasm_u64x2_shr(v0_4, 32);
281         v0_5 = wasm_u64x2_shr(v0_5, 32);
282         v0_6 = wasm_u64x2_shr(v0_6, 32);
283         v0_7 = wasm_u64x2_shr(v0_7, 32);
284       }
285       if (bh & 1) {
286         o = (uint16_t*) ((uintptr_t) o + oN_stride);
287         *o = wasm_i16x8_extract_lane(v0_7, 0);
288         uint16_t *oN = (uint16_t*) ((uintptr_t) o + minus_output_stride);
289         if XNN_UNPREDICTABLE(block_width > 7) {
290           o = oN;
291         }
292         *o = wasm_i16x8_extract_lane(v0_6, 0);
293         oN = (uint16_t*) ((uintptr_t) o + minus_output_stride);
294         if XNN_UNPREDICTABLE(block_width >= 7) {
295           o = oN;
296         }
297         *o = wasm_i16x8_extract_lane(v0_5, 0);
298         oN = (uint16_t*) ((uintptr_t) o + minus_output_stride);
299         if XNN_UNPREDICTABLE(block_width > 5) {
300           o = oN;
301         }
302         *o = wasm_i16x8_extract_lane(v0_4, 0);
303         oN = (uint16_t*) ((uintptr_t) o + minus_output_stride);
304         if XNN_UNPREDICTABLE(block_width >= 5) {
305           o = oN;
306         }
307         *o = wasm_i16x8_extract_lane(v0_3, 0);
308         oN = (uint16_t*) ((uintptr_t) o + minus_output_stride);
309         if XNN_UNPREDICTABLE(block_width > 3) {
310           o = oN;
311         }
312         *o = wasm_i16x8_extract_lane(v0_2, 0);
313         oN = (uint16_t*) ((uintptr_t) o + minus_output_stride);
314         if XNN_UNPREDICTABLE(block_width >= 3) {
315           o = oN;
316         }
317         *o = wasm_i16x8_extract_lane(v0_1, 0);
318         oN = (uint16_t*) ((uintptr_t) o + minus_output_stride);
319         if XNN_UNPREDICTABLE(block_width > 1) {
320           o = oN;
321         }
322         *o = wasm_i16x8_extract_lane(v0_0, 0);
323       }
324     }
325 
326     i0 = (const uint16_t*) ((uintptr_t) i0 + input_reset);
327     o = (uint16_t*) ((uintptr_t) o + output_reset);
328     block_width = doz(block_width, tile_width);
329   } while (block_width != 0);
330 }
331