1 // Auto-generated file. Do not edit!
2 // Template: src/x32-transposec/neon-zip.c.in
3 // Generator: tools/xngen
4 //
5 // Copyright 2021 Google LLC
6 //
7 // This source code is licensed under the BSD-style license found in the
8 // LICENSE file in the root directory of this source tree.
9
10 #include <arm_neon.h>
11
12 #include <assert.h>
13
14 #include <xnnpack/common.h>
15 #include <xnnpack/math.h>
16 #include <xnnpack/transpose.h>
17
xnn_x8_transposec_ukernel__8x8_reuse_dec_zip_neon(const uint8_t * input,uint8_t * output,size_t input_stride,size_t output_stride,size_t block_width,size_t block_height)18 void xnn_x8_transposec_ukernel__8x8_reuse_dec_zip_neon(
19 const uint8_t* input,
20 uint8_t* output,
21 size_t input_stride,
22 size_t output_stride,
23 size_t block_width,
24 size_t block_height) XNN_OOB_READS
25 {
26 assert(output_stride >= block_height * sizeof(uint8_t));
27 assert(input_stride >= block_width * sizeof(uint8_t));
28
29 const size_t tile_height = 8;
30 const size_t tile_width = 8;
31 const size_t tile_hbytes = tile_height * sizeof(uint8_t);
32 const size_t tile_wbytes = tile_width * sizeof(uint8_t);
33 const size_t input_reset = tile_wbytes - round_down_po2(block_height, tile_height) * input_stride;
34 const size_t output_reset = tile_width * output_stride - round_down_po2(block_height, 2) * sizeof(uint8_t) - tile_hbytes;
35
36 const uint8_t* i0 = input;
37 uint8_t* o = (uint8_t*) ((uintptr_t) output - tile_hbytes);
38 const size_t minus_output_stride = -output_stride;
39
40 do {
41 const size_t rem = min(block_width - 1, 7);
42 const size_t oN_stride = rem * output_stride;
43 const size_t oN_offset = oN_stride + tile_hbytes;
44 size_t bh = block_height;
45 for (; bh >= 8; bh -= 8) {
46 const uint8x8_t v3_0 = vld1_u8(i0); i0 = (uint8_t*) ((uintptr_t) i0 + input_stride);
47 const uint8x8_t v3_1 = vld1_u8(i0); i0 = (uint8_t*) ((uintptr_t) i0 + input_stride);
48 const uint8x8_t v3_2 = vld1_u8(i0); i0 = (uint8_t*) ((uintptr_t) i0 + input_stride);
49 const uint8x8_t v3_3 = vld1_u8(i0); i0 = (uint8_t*) ((uintptr_t) i0 + input_stride);
50 const uint8x8_t v3_4 = vld1_u8(i0); i0 = (uint8_t*) ((uintptr_t) i0 + input_stride);
51 const uint8x8_t v3_5 = vld1_u8(i0); i0 = (uint8_t*) ((uintptr_t) i0 + input_stride);
52 const uint8x8_t v3_6 = vld1_u8(i0); i0 = (uint8_t*) ((uintptr_t) i0 + input_stride);
53 const uint8x8_t v3_7 = vld1_u8(i0); i0 = (uint8_t*) ((uintptr_t) i0 + input_stride);
54
55 const uint8x8x2_t v2_0 = vzip_u8(v3_0, v3_4);
56 const uint8x8x2_t v2_1 = vzip_u8(v3_1, v3_5);
57 const uint8x8x2_t v2_2 = vzip_u8(v3_2, v3_6);
58 const uint8x8x2_t v2_3 = vzip_u8(v3_3, v3_7);
59
60 const uint8x8x2_t v1_0 = vzip_u8(v2_0.val[0], v2_2.val[0]);
61 const uint8x8x2_t v1_1 = vzip_u8(v2_0.val[1], v2_2.val[1]);
62 const uint8x8x2_t v1_2 = vzip_u8(v2_1.val[0], v2_3.val[0]);
63 const uint8x8x2_t v1_3 = vzip_u8(v2_1.val[1], v2_3.val[1]);
64 const uint8x8x2_t v0_0 = vzip_u8(v1_0.val[0], v1_2.val[0]);
65 const uint8x8x2_t v0_1 = vzip_u8(v1_0.val[1], v1_2.val[1]);
66 const uint8x8x2_t v0_2 = vzip_u8(v1_1.val[0], v1_3.val[0]);
67 const uint8x8x2_t v0_3 = vzip_u8(v1_1.val[1], v1_3.val[1]);
68
69 o = (uint8_t*) ((uintptr_t) o + oN_offset);
70 vst1_u8(o, v0_3.val[1]);
71 if XNN_UNPREDICTABLE(block_width > 7) {
72 o = (uint8_t*) ((uintptr_t) o + minus_output_stride);
73 }
74 vst1_u8(o, v0_3.val[0]);
75 if XNN_UNPREDICTABLE(block_width >= 7) {
76 o = (uint8_t*) ((uintptr_t) o + minus_output_stride);
77 }
78 vst1_u8(o, v0_2.val[1]);
79 if XNN_UNPREDICTABLE(block_width > 5) {
80 o = (uint8_t*) ((uintptr_t) o + minus_output_stride);
81 }
82 vst1_u8(o, v0_2.val[0]);
83 if XNN_UNPREDICTABLE(block_width >= 5) {
84 o = (uint8_t*) ((uintptr_t) o + minus_output_stride);
85 }
86 vst1_u8(o, v0_1.val[1]);
87 if XNN_UNPREDICTABLE(block_width > 3) {
88 o = (uint8_t*) ((uintptr_t) o + minus_output_stride);
89 }
90 vst1_u8(o, v0_1.val[0]);
91 if XNN_UNPREDICTABLE(block_width >= 3) {
92 o = (uint8_t*) ((uintptr_t) o + minus_output_stride);
93 }
94 vst1_u8(o, v0_0.val[1]);
95 if XNN_UNPREDICTABLE(block_width > 1) {
96 o = (uint8_t*) ((uintptr_t) o + minus_output_stride);
97 }
98 vst1_u8(o, v0_0.val[0]);
99 }
100 o = (uint8_t*) ((uintptr_t) o + tile_hbytes);
101
102 if (bh != 0) {
103 const uint8x8_t v3_0 = vld1_u8(i0);
104 const uint8_t *i1 = (const uint8_t*) ((uintptr_t) i0 + input_stride);
105 if XNN_UNPREDICTABLE(bh < 2) {
106 i1 = i0;
107 }
108 const uint8x8_t v3_1 = vld1_u8(i1);
109 const uint8_t *i2 = (const uint8_t*) ((uintptr_t) i1 + input_stride);
110 if XNN_UNPREDICTABLE(bh <= 2) {
111 i2 = i1;
112 }
113 const uint8x8_t v3_2 = vld1_u8(i2);
114 const uint8_t *i3 = (const uint8_t*) ((uintptr_t) i2 + input_stride);
115 if XNN_UNPREDICTABLE(bh < 4) {
116 i3 = i2;
117 }
118 const uint8x8_t v3_3 = vld1_u8(i3);
119 const uint8_t *i4 = (const uint8_t*) ((uintptr_t) i3 + input_stride);
120 if XNN_UNPREDICTABLE(bh <= 4) {
121 i4 = i3;
122 }
123 const uint8x8_t v3_4 = vld1_u8(i4);
124 const uint8_t *i5 = (const uint8_t*) ((uintptr_t) i4 + input_stride);
125 if XNN_UNPREDICTABLE(bh < 6) {
126 i5 = i4;
127 }
128 const uint8x8_t v3_5 = vld1_u8(i5);
129 const uint8_t *i6 = (const uint8_t*) ((uintptr_t) i5 + input_stride);
130 if XNN_UNPREDICTABLE(bh <= 6) {
131 i6 = i5;
132 }
133 const uint8x8_t v3_6 = vld1_u8(i6);
134 const uint8x8_t v3_7 = vmov_n_u8(0);
135
136 const uint8x8x2_t v2_0 = vzip_u8(v3_0, v3_4);
137 const uint8x8x2_t v2_1 = vzip_u8(v3_1, v3_5);
138 const uint8x8x2_t v2_2 = vzip_u8(v3_2, v3_6);
139 const uint8x8x2_t v2_3 = vzip_u8(v3_3, v3_7);
140
141 const uint8x8x2_t v1_0 = vzip_u8(v2_0.val[0], v2_2.val[0]);
142 const uint8x8x2_t v1_1 = vzip_u8(v2_0.val[1], v2_2.val[1]);
143 const uint8x8x2_t v1_2 = vzip_u8(v2_1.val[0], v2_3.val[0]);
144 const uint8x8x2_t v1_3 = vzip_u8(v2_1.val[1], v2_3.val[1]);
145 const uint8x8x2_t v0_0 = vzip_u8(v1_0.val[0], v1_2.val[0]);
146 const uint8x8x2_t v0_1 = vzip_u8(v1_0.val[1], v1_2.val[1]);
147 const uint8x8x2_t v0_2 = vzip_u8(v1_1.val[0], v1_3.val[0]);
148 const uint8x8x2_t v0_3 = vzip_u8(v1_1.val[1], v1_3.val[1]);
149
150 uint8x8_t v0_low = v0_0.val[0];
151 uint8x8_t v1_low = v0_0.val[1];
152 uint8x8_t v2_low = v0_1.val[0];
153 uint8x8_t v3_low = v0_1.val[1];
154 uint8x8_t v4_low = v0_2.val[0];
155 uint8x8_t v5_low = v0_2.val[1];
156 uint8x8_t v6_low = v0_3.val[0];
157 uint8x8_t v7_low = v0_3.val[1];
158
159 if (bh & 4) {
160 o = (uint8_t*) ((uintptr_t) o + oN_stride);
161 vst1_lane_u32((void*) o, vreinterpret_u32_u8(v7_low), 0);
162 if XNN_UNPREDICTABLE(block_width > 7) {
163 o = (uint8_t*) ((uintptr_t) o + minus_output_stride);
164 }
165 vst1_lane_u32((void*) o, vreinterpret_u32_u8(v6_low), 0);
166 if XNN_UNPREDICTABLE(block_width >= 7) {
167 o = (uint8_t*) ((uintptr_t) o + minus_output_stride);
168 }
169 vst1_lane_u32((void*) o, vreinterpret_u32_u8(v5_low), 0);
170 if XNN_UNPREDICTABLE(block_width > 5) {
171 o = (uint8_t*) ((uintptr_t) o + minus_output_stride);
172 }
173 vst1_lane_u32((void*) o, vreinterpret_u32_u8(v4_low), 0);
174 if XNN_UNPREDICTABLE(block_width >= 5) {
175 o = (uint8_t*) ((uintptr_t) o + minus_output_stride);
176 }
177 vst1_lane_u32((void*) o, vreinterpret_u32_u8(v3_low), 0);
178 if XNN_UNPREDICTABLE(block_width > 3) {
179 o = (uint8_t*) ((uintptr_t) o + minus_output_stride);
180 }
181 vst1_lane_u32((void*) o, vreinterpret_u32_u8(v2_low), 0);
182 if XNN_UNPREDICTABLE(block_width >= 3) {
183 o = (uint8_t*) ((uintptr_t) o + minus_output_stride);
184 }
185 vst1_lane_u32((void*) o, vreinterpret_u32_u8(v1_low), 0);
186 if XNN_UNPREDICTABLE(block_width > 1) {
187 o = (uint8_t*) ((uintptr_t) o + minus_output_stride);
188 }
189 vst1_lane_u32((void*) o, vreinterpret_u32_u8(v0_low), 0); o += 4;
190 v0_low = vext_u8(v0_low, v0_low, 4);
191 v1_low = vext_u8(v1_low, v1_low, 4);
192 v2_low = vext_u8(v2_low, v2_low, 4);
193 v3_low = vext_u8(v3_low, v3_low, 4);
194 v4_low = vext_u8(v4_low, v4_low, 4);
195 v5_low = vext_u8(v5_low, v5_low, 4);
196 v6_low = vext_u8(v6_low, v6_low, 4);
197 v7_low = vext_u8(v7_low, v7_low, 4);
198 }
199 if (bh & 2) {
200 o = (uint8_t*) ((uintptr_t) o + oN_stride);
201 vst1_lane_u16((void*) o, vreinterpret_u16_u8(v7_low), 0);
202 if XNN_UNPREDICTABLE(block_width > 7) {
203 o = (uint8_t*) ((uintptr_t) o + minus_output_stride);
204 }
205 vst1_lane_u16((void*) o, vreinterpret_u16_u8(v6_low), 0);
206 if XNN_UNPREDICTABLE(block_width >= 7) {
207 o = (uint8_t*) ((uintptr_t) o + minus_output_stride);
208 }
209 vst1_lane_u16((void*) o, vreinterpret_u16_u8(v5_low), 0);
210 if XNN_UNPREDICTABLE(block_width > 5) {
211 o = (uint8_t*) ((uintptr_t) o + minus_output_stride);
212 }
213 vst1_lane_u16((void*) o, vreinterpret_u16_u8(v4_low), 0);
214 if XNN_UNPREDICTABLE(block_width >= 5) {
215 o = (uint8_t*) ((uintptr_t) o + minus_output_stride);
216 }
217 vst1_lane_u16((void*) o, vreinterpret_u16_u8(v3_low), 0);
218 if XNN_UNPREDICTABLE(block_width > 3) {
219 o = (uint8_t*) ((uintptr_t) o + minus_output_stride);
220 }
221 vst1_lane_u16((void*) o, vreinterpret_u16_u8(v2_low), 0);
222 if XNN_UNPREDICTABLE(block_width >= 3) {
223 o = (uint8_t*) ((uintptr_t) o + minus_output_stride);
224 }
225 vst1_lane_u16((void*) o, vreinterpret_u16_u8(v1_low), 0);
226 if XNN_UNPREDICTABLE(block_width > 1) {
227 o = (uint8_t*) ((uintptr_t) o + minus_output_stride);
228 }
229 vst1_lane_u16((void*) o, vreinterpret_u16_u8(v0_low), 0); o += 2;
230 v0_low = vext_u8(v0_low, v0_low, 2);
231 v1_low = vext_u8(v1_low, v1_low, 2);
232 v2_low = vext_u8(v2_low, v2_low, 2);
233 v3_low = vext_u8(v3_low, v3_low, 2);
234 v4_low = vext_u8(v4_low, v4_low, 2);
235 v5_low = vext_u8(v5_low, v5_low, 2);
236 v6_low = vext_u8(v6_low, v6_low, 2);
237 v7_low = vext_u8(v7_low, v7_low, 2);
238 }
239 if (bh & 1) {
240 o = (uint8_t*) ((uintptr_t) o + oN_stride);
241 vst1_lane_u8(o, v7_low, 0);
242 if XNN_UNPREDICTABLE(block_width > 7) {
243 o = (uint8_t*) ((uintptr_t) o + minus_output_stride);
244 }
245 vst1_lane_u8(o, v6_low, 0);
246 if XNN_UNPREDICTABLE(block_width >= 7) {
247 o = (uint8_t*) ((uintptr_t) o + minus_output_stride);
248 }
249 vst1_lane_u8(o, v5_low, 0);
250 if XNN_UNPREDICTABLE(block_width > 5) {
251 o = (uint8_t*) ((uintptr_t) o + minus_output_stride);
252 }
253 vst1_lane_u8(o, v4_low, 0);
254 if XNN_UNPREDICTABLE(block_width >= 5) {
255 o = (uint8_t*) ((uintptr_t) o + minus_output_stride);
256 }
257 vst1_lane_u8(o, v3_low, 0);
258 if XNN_UNPREDICTABLE(block_width > 3) {
259 o = (uint8_t*) ((uintptr_t) o + minus_output_stride);
260 }
261 vst1_lane_u8(o, v2_low, 0);
262 if XNN_UNPREDICTABLE(block_width >= 3) {
263 o = (uint8_t*) ((uintptr_t) o + minus_output_stride);
264 }
265 vst1_lane_u8(o, v1_low, 0);
266 if XNN_UNPREDICTABLE(block_width > 1) {
267 o = (uint8_t*) ((uintptr_t) o + minus_output_stride);
268 }
269 vst1_lane_u8(o, v0_low, 0);
270 }
271 }
272
273 i0 = (const uint8_t*) ((uintptr_t) i0 + input_reset);
274 o = (uint8_t*) ((uintptr_t) o + output_reset);
275 block_width = doz(block_width, tile_width);
276 } while (block_width != 0);
277 }
278