1 /*
2 * Copyright (c) 2021 Arm Limited.
3 *
4 * SPDX-License-Identifier: MIT
5 *
6 * Permission is hereby granted, free of charge, to any person obtaining a copy
7 * of this software and associated documentation files (the "Software"), to
8 * deal in the Software without restriction, including without limitation the
9 * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
10 * sell copies of the Software, and to permit persons to whom the Software is
11 * furnished to do so, subject to the following conditions:
12 *
13 * The above copyright notice and this permission notice shall be included in
14 * all copies or substantial portions of the Software.
15 *
16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
19 * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
20 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
21 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
22 * IN THE SOFTWARE.
23 */
24
25 #pragma once
26
27 #ifdef __aarch64__
28
29 namespace {
30
a64_transpose_interleave_12_1x4(uint8_t * out,const uint8_t * in,size_t width,size_t in_stride,size_t height)31 void a64_transpose_interleave_12_1x4(uint8_t *out, const uint8_t *in, size_t width, size_t in_stride, size_t height)
32 {
33 uint8_t *pad_row = reinterpret_cast<uint8_t *>(alloca(width * sizeof(uint8_t)));
34
35 if (height % 4) {
36 memset(pad_row, 0, width * sizeof(uint8_t));
37 }
38
39 size_t out_stride = 12 * roundup<size_t>(height, 4) * sizeof(uint8_t);
40
41 __asm__ __volatile__(
42 "cmp %x[height], #0x8\n"
43 "blt 10f\n"
44 "1:" // Main row loop: Head
45 "mov x28, %x[in]\n"
46 "mov x27, %x[out]\n"
47 "add x26, x28, %x[in_stride]\n"
48 "add x25, x26, %x[in_stride]\n"
49 "add x24, x25, %x[in_stride]\n"
50 "add x23, x24, %x[in_stride]\n"
51 "add x22, x23, %x[in_stride]\n"
52 "add x21, x22, %x[in_stride]\n"
53 "add x20, x21, %x[in_stride]\n"
54 "add %x[in], x20, %x[in_stride]\n"
55 "sub %x[height], %x[height], #0x8\n"
56 "mov x19, %x[width]\n"
57 "cmp x19, #0x30\n"
58 "blt 3f\n"
59 "2:" // Main row loop: Unroll column loop
60 "ldr q18, [x28], #0x10\n"
61 "sub x19, x19, #0x30\n"
62 "ldr q23, [x26], #0x10\n"
63 "cmp x19, #0x30\n"
64 "ldr q16, [x25], #0x10\n"
65 "zip1 v19.16b, v18.16b, v16.16b\n"
66 "ldr q17, [x28], #0x10\n"
67 "zip2 v22.16b, v18.16b, v16.16b\n"
68 "ldr q11, [x26], #0x10\n"
69 "ldr q16, [x25], #0x10\n"
70 "zip1 v21.16b, v17.16b, v16.16b\n"
71 "ldr q18, [x28], #0x10\n"
72 "zip2 v10.16b, v17.16b, v16.16b\n"
73 "ldr q9, [x26], #0x10\n"
74 "ldr q17, [x25], #0x10\n"
75 "zip1 v8.16b, v18.16b, v17.16b\n"
76 "ldr q16, [x24], #0x10\n"
77 "zip2 v7.16b, v18.16b, v17.16b\n"
78 "ldr q20, [x23], #0x10\n"
79 "ldr q6, [x22], #0x10\n"
80 "zip1 v17.16b, v23.16b, v16.16b\n"
81 "ldr q5, [x24], #0x10\n"
82 "zip2 v16.16b, v23.16b, v16.16b\n"
83 "ldr q4, [x23], #0x10\n"
84 "zip1 v3.16b, v19.16b, v17.16b\n"
85 "ldr q2, [x22], #0x10\n"
86 "zip2 v1.16b, v19.16b, v17.16b\n"
87 "ldr q19, [x21], #0x10\n"
88 "zip1 v0.16b, v22.16b, v16.16b\n"
89 "ldr q31, [x24], #0x10\n"
90 "zip2 v30.16b, v22.16b, v16.16b\n"
91 "ldr q29, [x23], #0x10\n"
92 "zip1 v16.16b, v11.16b, v5.16b\n"
93 "ldr q28, [x22], #0x10\n"
94 "zip1 v27.16b, v21.16b, v16.16b\n"
95 "ldr q26, [x21], #0x10\n"
96 "zip1 v18.16b, v20.16b, v19.16b\n"
97 "ldr q17, [x20], #0x10\n"
98 "zip2 v20.16b, v20.16b, v19.16b\n"
99 "ldr q25, [x21], #0x10\n"
100 "zip2 v24.16b, v21.16b, v16.16b\n"
101 "zip1 v23.16b, v4.16b, v26.16b\n"
102 "ldr q22, [x20], #0x10\n"
103 "zip1 v16.16b, v6.16b, v17.16b\n"
104 "ldr q21, [x20], #0x10\n"
105 "zip1 v19.16b, v18.16b, v16.16b\n"
106 "zip2 v18.16b, v18.16b, v16.16b\n"
107 "str q3, [x27, #0x0]\n"
108 "zip2 v16.16b, v6.16b, v17.16b\n"
109 "str q1, [x27, #0x10]\n"
110 "zip1 v17.16b, v20.16b, v16.16b\n"
111 "str q0, [x27, #0x20]\n"
112 "zip2 v20.16b, v20.16b, v16.16b\n"
113 "str q19, [x27, #0x30]\n"
114 "zip1 v16.16b, v2.16b, v22.16b\n"
115 "str q18, [x27, #0x40]\n"
116 "zip1 v19.16b, v23.16b, v16.16b\n"
117 "str q17, [x27, #0x50]\n"
118 "add x27, x27, %x[out_stride]\n"
119 "zip2 v18.16b, v23.16b, v16.16b\n"
120 "str q30, [x27, #0x0]\n"
121 "zip2 v17.16b, v11.16b, v5.16b\n"
122 "str q27, [x27, #0x10]\n"
123 "zip1 v16.16b, v10.16b, v17.16b\n"
124 "str q24, [x27, #0x20]\n"
125 "zip2 v17.16b, v10.16b, v17.16b\n"
126 "str q20, [x27, #0x30]\n"
127 "zip1 v20.16b, v9.16b, v31.16b\n"
128 "str q19, [x27, #0x40]\n"
129 "zip1 v19.16b, v8.16b, v20.16b\n"
130 "str q18, [x27, #0x50]\n"
131 "add x27, x27, %x[out_stride]\n"
132 "zip2 v18.16b, v4.16b, v26.16b\n"
133 "str q16, [x27, #0x0]\n"
134 "zip2 v16.16b, v2.16b, v22.16b\n"
135 "str q17, [x27, #0x10]\n"
136 "zip1 v17.16b, v18.16b, v16.16b\n"
137 "str q19, [x27, #0x20]\n"
138 "zip2 v16.16b, v18.16b, v16.16b\n"
139 "str q17, [x27, #0x30]\n"
140 "zip1 v19.16b, v29.16b, v25.16b\n"
141 "str q16, [x27, #0x40]\n"
142 "zip1 v17.16b, v28.16b, v21.16b\n"
143 "zip1 v16.16b, v19.16b, v17.16b\n"
144 "str q16, [x27, #0x50]\n"
145 "add x27, x27, %x[out_stride]\n"
146 "zip2 v16.16b, v8.16b, v20.16b\n"
147 "str q16, [x27, #0x0]\n"
148 "zip2 v18.16b, v9.16b, v31.16b\n"
149 "zip2 v17.16b, v19.16b, v17.16b\n"
150 "zip1 v16.16b, v7.16b, v18.16b\n"
151 "str q16, [x27, #0x10]\n"
152 "zip2 v16.16b, v7.16b, v18.16b\n"
153 "str q16, [x27, #0x20]\n"
154 "zip2 v18.16b, v29.16b, v25.16b\n"
155 "str q17, [x27, #0x30]\n"
156 "zip2 v17.16b, v28.16b, v21.16b\n"
157 "zip1 v16.16b, v18.16b, v17.16b\n"
158 "str q16, [x27, #0x40]\n"
159 "zip2 v16.16b, v18.16b, v17.16b\n"
160 "str q16, [x27, #0x50]\n"
161 "add x27, x27, %x[out_stride]\n"
162 "bge 2b\n"
163 "3:" // Main row loop: Unroll column loop skip
164 "cmp x19, #0xc\n"
165 "blt 5f\n"
166 "4:" // Main row loop: Column loop
167 "ldr d19, [x28], #0x8\n"
168 "sub x19, x19, #0xc\n"
169 "ldr d18, [x26], #0x8\n"
170 "cmp x19, #0xc\n"
171 "ldr d17, [x25], #0x8\n"
172 "ldr d16, [x24], #0x8\n"
173 "ldr d24, [x23], #0x8\n"
174 "ld1 { v19.s }[2], [x28], #0x4\n"
175 "ld1 { v18.s }[2], [x26], #0x4\n"
176 "ld1 { v17.s }[2], [x25], #0x4\n"
177 "zip1 v23.16b, v19.16b, v17.16b\n"
178 "ld1 { v16.s }[2], [x24], #0x4\n"
179 "zip2 v20.16b, v19.16b, v17.16b\n"
180 "ld1 { v24.s }[2], [x23], #0x4\n"
181 "ldr d22, [x22], #0x8\n"
182 "zip1 v17.16b, v18.16b, v16.16b\n"
183 "ldr d19, [x21], #0x8\n"
184 "zip2 v16.16b, v18.16b, v16.16b\n"
185 "ld1 { v22.s }[2], [x22], #0x4\n"
186 "zip1 v18.16b, v23.16b, v17.16b\n"
187 "ldr d21, [x20], #0x8\n"
188 "zip2 v17.16b, v23.16b, v17.16b\n"
189 "ld1 { v19.s }[2], [x21], #0x4\n"
190 "zip1 v16.16b, v20.16b, v16.16b\n"
191 "ld1 { v21.s }[2], [x20], #0x4\n"
192 "zip1 v20.16b, v24.16b, v19.16b\n"
193 "str q18, [x27, #0x0]\n"
194 "zip2 v19.16b, v24.16b, v19.16b\n"
195 "str q17, [x27, #0x10]\n"
196 "str q16, [x27, #0x20]\n"
197 "zip1 v18.16b, v22.16b, v21.16b\n"
198 "zip2 v17.16b, v22.16b, v21.16b\n"
199 "zip1 v16.16b, v20.16b, v18.16b\n"
200 "str q16, [x27, #0x30]\n"
201 "zip2 v16.16b, v20.16b, v18.16b\n"
202 "str q16, [x27, #0x40]\n"
203 "zip1 v16.16b, v19.16b, v17.16b\n"
204 "str q16, [x27, #0x50]\n"
205 "add x27, x27, %x[out_stride]\n"
206 "bge 4b\n"
207 "5:" // Main row loop: Column loop skip
208 "cmp x19, #0x4\n"
209 "blt 7f\n"
210 "6:" // Main row loop: width 4 loop: loop
211 "ldr s18, [x28], #0x4\n"
212 "sub x19, x19, #0x4\n"
213 "ldr s17, [x26], #0x4\n"
214 "cmp x19, #0x4\n"
215 "ldr s16, [x25], #0x4\n"
216 "zip1 v18.16b, v18.16b, v16.16b\n"
217 "ldr s16, [x24], #0x4\n"
218 "ldr s20, [x23], #0x4\n"
219 "zip1 v16.16b, v17.16b, v16.16b\n"
220 "ldr s19, [x22], #0x4\n"
221 "ldr s17, [x21], #0x4\n"
222 "zip1 v18.16b, v18.16b, v16.16b\n"
223 "ldr s16, [x20], #0x4\n"
224 "zip1 v17.16b, v20.16b, v17.16b\n"
225 "str q18, [x27, #0x0]\n"
226 "zip1 v16.16b, v19.16b, v16.16b\n"
227 "zip1 v16.16b, v17.16b, v16.16b\n"
228 "str q16, [x27, #0x30]\n"
229 "add x27, x27, #0x10\n"
230 "bge 6b\n"
231 "7:" // Main row loop: width 4 loop: skip
232 "cmp x19, #0x1\n"
233 "blt 9f\n"
234 "8:" // Main row loop: width 1 loop: loop
235 "ldr b18, [x28], #0x1\n"
236 "sub x19, x19, #0x1\n"
237 "ldr b17, [x26], #0x1\n"
238 "cmp x19, #0x1\n"
239 "ldr b16, [x25], #0x1\n"
240 "zip1 v18.16b, v18.16b, v16.16b\n"
241 "ldr b16, [x24], #0x1\n"
242 "ldr b20, [x23], #0x1\n"
243 "zip1 v16.16b, v17.16b, v16.16b\n"
244 "ldr b19, [x22], #0x1\n"
245 "ldr b17, [x21], #0x1\n"
246 "zip1 v18.16b, v18.16b, v16.16b\n"
247 "ldr b16, [x20], #0x1\n"
248 "zip1 v17.16b, v20.16b, v17.16b\n"
249 "str s18, [x27, #0x0]\n"
250 "zip1 v16.16b, v19.16b, v16.16b\n"
251 "zip1 v16.16b, v17.16b, v16.16b\n"
252 "str s16, [x27, #0x30]\n"
253 "add x27, x27, #0x4\n"
254 "bge 8b\n"
255 "9:" // Main row loop: width 1 loop: skip
256 "add %x[out], %x[out], #0x60\n"
257 "cmp %x[height], #0x8\n"
258 "bge 1b\n"
259 "cbz %x[height], 20f\n"
260 "10:" // Main loop skip
261
262 "11:" // Tail row loop: Head
263 "mov x28, %x[in]\n"
264 "mov x27, %x[out]\n"
265 "add x26, x28, %x[in_stride]\n"
266 "add x25, x26, %x[in_stride]\n"
267 "add x24, x25, %x[in_stride]\n"
268 "add %x[in], x24, %x[in_stride]\n"
269 "cmp %x[height], #0x3\n"
270 "csel x24, x24, %x[pad_row], GT\n"
271 "csel x25, x25, %x[pad_row], GE\n"
272 "cmp %x[height], #0x1\n"
273 "csel x26, x26, %x[pad_row], GT\n"
274 "sub %x[height], %x[height], #0x4\n"
275 "mov x19, %x[width]\n"
276 "cmp x19, #0x30\n"
277 "blt 13f\n"
278 "12:" // Tail row loop: Unroll column loop
279 "ldr q18, [x28], #0x10\n"
280 "sub x19, x19, #0x30\n"
281 "ldr q19, [x26], #0x10\n"
282 "cmp x19, #0x30\n"
283 "ldr q16, [x25], #0x10\n"
284 "zip1 v28.16b, v18.16b, v16.16b\n"
285 "ldr q17, [x28], #0x10\n"
286 "zip2 v27.16b, v18.16b, v16.16b\n"
287 "ldr q26, [x26], #0x10\n"
288 "ldr q16, [x25], #0x10\n"
289 "zip1 v25.16b, v17.16b, v16.16b\n"
290 "ldr q18, [x28], #0x10\n"
291 "zip2 v24.16b, v17.16b, v16.16b\n"
292 "ldr q23, [x26], #0x10\n"
293 "ldr q16, [x25], #0x10\n"
294 "zip1 v22.16b, v18.16b, v16.16b\n"
295 "ldr q17, [x24], #0x10\n"
296 "zip2 v21.16b, v18.16b, v16.16b\n"
297 "ldr q20, [x24], #0x10\n"
298 "zip1 v16.16b, v19.16b, v17.16b\n"
299 "zip2 v18.16b, v19.16b, v17.16b\n"
300 "ldr q19, [x24], #0x10\n"
301 "zip1 v17.16b, v28.16b, v16.16b\n"
302 "zip2 v16.16b, v28.16b, v16.16b\n"
303 "str q17, [x27, #0x0]\n"
304 "zip1 v17.16b, v27.16b, v18.16b\n"
305 "str q16, [x27, #0x10]\n"
306 "zip2 v16.16b, v27.16b, v18.16b\n"
307 "str q17, [x27, #0x20]\n"
308 "add x27, x27, %x[out_stride]\n"
309 "zip1 v18.16b, v26.16b, v20.16b\n"
310 "str q16, [x27, #0x0]\n"
311 "zip2 v17.16b, v26.16b, v20.16b\n"
312 "zip1 v16.16b, v25.16b, v18.16b\n"
313 "str q16, [x27, #0x10]\n"
314 "zip2 v16.16b, v25.16b, v18.16b\n"
315 "str q16, [x27, #0x20]\n"
316 "add x27, x27, %x[out_stride]\n"
317 "zip1 v16.16b, v24.16b, v17.16b\n"
318 "str q16, [x27, #0x0]\n"
319 "zip2 v16.16b, v24.16b, v17.16b\n"
320 "zip1 v17.16b, v23.16b, v19.16b\n"
321 "str q16, [x27, #0x10]\n"
322 "zip1 v16.16b, v22.16b, v17.16b\n"
323 "str q16, [x27, #0x20]\n"
324 "add x27, x27, %x[out_stride]\n"
325 "zip2 v16.16b, v22.16b, v17.16b\n"
326 "str q16, [x27, #0x0]\n"
327 "zip2 v17.16b, v23.16b, v19.16b\n"
328 "zip1 v16.16b, v21.16b, v17.16b\n"
329 "str q16, [x27, #0x10]\n"
330 "zip2 v16.16b, v21.16b, v17.16b\n"
331 "str q16, [x27, #0x20]\n"
332 "add x27, x27, %x[out_stride]\n"
333 "bge 12b\n"
334 "13:" // Tail row loop: Unroll column loop skip
335 "cmp x19, #0xc\n"
336 "blt 15f\n"
337 "14:" // Tail row loop: Column loop
338 "ldr d18, [x28], #0x8\n"
339 "sub x19, x19, #0xc\n"
340 "ldr d21, [x26], #0x8\n"
341 "cmp x19, #0xc\n"
342 "ldr d17, [x25], #0x8\n"
343 "ldr d16, [x24], #0x8\n"
344 "ld1 { v18.s }[2], [x28], #0x4\n"
345 "ld1 { v21.s }[2], [x26], #0x4\n"
346 "ld1 { v17.s }[2], [x25], #0x4\n"
347 "zip1 v20.16b, v18.16b, v17.16b\n"
348 "ld1 { v16.s }[2], [x24], #0x4\n"
349 "zip2 v19.16b, v18.16b, v17.16b\n"
350 "zip1 v18.16b, v21.16b, v16.16b\n"
351 "zip2 v17.16b, v21.16b, v16.16b\n"
352 "zip1 v16.16b, v20.16b, v18.16b\n"
353 "str q16, [x27, #0x0]\n"
354 "zip2 v16.16b, v20.16b, v18.16b\n"
355 "str q16, [x27, #0x10]\n"
356 "zip1 v16.16b, v19.16b, v17.16b\n"
357 "str q16, [x27, #0x20]\n"
358 "add x27, x27, %x[out_stride]\n"
359 "bge 14b\n"
360 "15:" // Tail row loop: Column loop skip
361 "cmp x19, #0x4\n"
362 "blt 17f\n"
363 "16:" // Tail row loop: width 4 loop: loop
364 "ldr s17, [x28], #0x4\n"
365 "sub x19, x19, #0x4\n"
366 "ldr s18, [x26], #0x4\n"
367 "cmp x19, #0x4\n"
368 "ldr s16, [x25], #0x4\n"
369 "zip1 v17.16b, v17.16b, v16.16b\n"
370 "ldr s16, [x24], #0x4\n"
371 "zip1 v16.16b, v18.16b, v16.16b\n"
372 "zip1 v16.16b, v17.16b, v16.16b\n"
373 "str q16, [x27, #0x0]\n"
374 "add x27, x27, #0x10\n"
375 "bge 16b\n"
376 "17:" // Tail row loop: width 4 loop: skip
377 "cmp x19, #0x1\n"
378 "blt 19f\n"
379 "18:" // Tail row loop: width 1 loop: loop
380 "ldr b17, [x28], #0x1\n"
381 "sub x19, x19, #0x1\n"
382 "ldr b18, [x26], #0x1\n"
383 "cmp x19, #0x1\n"
384 "ldr b16, [x25], #0x1\n"
385 "zip1 v17.16b, v17.16b, v16.16b\n"
386 "ldr b16, [x24], #0x1\n"
387 "zip1 v16.16b, v18.16b, v16.16b\n"
388 "zip1 v16.16b, v17.16b, v16.16b\n"
389 "str s16, [x27, #0x0]\n"
390 "add x27, x27, #0x4\n"
391 "bge 18b\n"
392 "19:" // Tail row loop: width 1 loop: skip
393 "add %x[out], %x[out], #0x30\n"
394 "cmp %x[height], #0x1\n"
395 "bge 11b\n"
396 "20:" // Done
397
398 : [height] "+&r" (height), [in] "+&r" (in), [out] "+&r" (out)
399 : [in_stride] "r" (in_stride), [out_stride] "r" (out_stride), [pad_row] "r" (pad_row), [width] "r" (width)
400 : "cc", "memory", "v0", "v1", "v2", "v3", "v4", "v5", "v6", "v7", "v8", "v9", "v10", "v11", "v16", "v17", "v18", "v19", "v20", "v21", "v22", "v23", "v24", "v25", "v26", "v27", "v28", "v29", "v30", "v31", "x19", "x20", "x21", "x22", "x23", "x24", "x25", "x26", "x27", "x28"
401 );
402 }
403
404 } // anonymous namespace
405
406 template<>
Transform(uint8_t * out,const uint8_t * in,int stride,int x0,int xmax,int k0,int kmax)407 void Transform<12, 4, true, VLType::None>(
408 uint8_t *out, const uint8_t *in, int stride, int x0, int xmax, int k0, int kmax)
409 {
410 a64_transpose_interleave_12_1x4(
411 reinterpret_cast<uint8_t *>(out),
412 reinterpret_cast<const uint8_t *>(in + k0 * stride + x0),
413 (xmax-x0) * sizeof(uint8_t) / 1,
414 stride * sizeof(uint8_t),
415 (kmax-k0)
416 );
417 }
418
419 template<>
Transform(int8_t * out,const int8_t * in,int stride,int x0,int xmax,int k0,int kmax)420 void Transform<12, 4, true, VLType::None>(
421 int8_t *out, const int8_t *in, int stride, int x0, int xmax, int k0, int kmax)
422 {
423 a64_transpose_interleave_12_1x4(
424 reinterpret_cast<uint8_t *>(out),
425 reinterpret_cast<const uint8_t *>(in + k0 * stride + x0),
426 (xmax-x0) * sizeof(int8_t) / 1,
427 stride * sizeof(int8_t),
428 (kmax-k0)
429 );
430 }
431
432 #endif
433