1 /*
2 * Copyright (c) 2021 Arm Limited.
3 *
4 * SPDX-License-Identifier: MIT
5 *
6 * Permission is hereby granted, free of charge, to any person obtaining a copy
7 * of this software and associated documentation files (the "Software"), to
8 * deal in the Software without restriction, including without limitation the
9 * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
10 * sell copies of the Software, and to permit persons to whom the Software is
11 * furnished to do so, subject to the following conditions:
12 *
13 * The above copyright notice and this permission notice shall be included in
14 * all copies or substantial portions of the Software.
15 *
16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
19 * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
20 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
21 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
22 * IN THE SOFTWARE.
23 */
24
25 #pragma once
26
27 #ifdef __aarch64__
28
29 namespace {
30
a64_transpose_interleave_12_u8u16(uint16_t * out,const uint8_t * in,size_t width,size_t in_stride,size_t height)31 void a64_transpose_interleave_12_u8u16(uint16_t *out, const uint8_t *in, size_t width, size_t in_stride, size_t height)
32 {
33 size_t out_stride = 12 * height * sizeof(uint16_t);
34
35 __asm__ __volatile__(
36 "cmp %x[height], #0x4\n"
37 "blt 10f\n"
38 "1:" // Main row loop: Head
39 "mov x24, %x[in]\n"
40 "mov x23, %x[out]\n"
41 "add x22, x24, %x[in_stride]\n"
42 "add x21, x22, %x[in_stride]\n"
43 "add x20, x21, %x[in_stride]\n"
44 "add %x[in], x20, %x[in_stride]\n"
45 "sub %x[height], %x[height], #0x4\n"
46 "mov x19, %x[width]\n"
47 "cmp x19, #0x18\n"
48 "blt 3f\n"
49 "2:" // Main row loop: Unroll column loop
50 "ldr q16, [x24], #0x10\n"
51 "ushll v27.8h, v16.8b, #0x0\n"
52 "ldr d17, [x24], #0x8\n"
53 "sub x19, x19, #0x18\n"
54 "ushll2 v16.8h, v16.16b, #0x0\n"
55 "ldr q26, [x22], #0x10\n"
56 "cmp x19, #0x18\n"
57 "dup v20.2d, v16.d[0]\n"
58 "ldr q25, [x21], #0x10\n"
59 "dup v24.2d, v16.d[1]\n"
60 "ldr q23, [x20], #0x10\n"
61 "ushll v16.8h, v17.8b, #0x0\n"
62 "ldr d19, [x22], #0x8\n"
63 "mov v24.d[1], v16.d[0]\n"
64 "dup v22.2d, v16.d[1]\n"
65 "ldr d18, [x21], #0x8\n"
66 "ushll v16.8h, v26.8b, #0x0\n"
67 "ldr d21, [x20], #0x8\n"
68 "mov v20.d[1], v16.d[0]\n"
69 "str q27, [x23, #0x0]\n"
70 "dup v17.2d, v16.d[1]\n"
71 "str q20, [x23, #0x10]\n"
72 "ushll2 v16.8h, v26.16b, #0x0\n"
73 "mov v17.d[1], v16.d[0]\n"
74 "str q17, [x23, #0x20]\n"
75 "mov v22.d[1], v16.d[1]\n"
76 "ushll v20.8h, v19.8b, #0x0\n"
77 "ushll v16.8h, v25.8b, #0x0\n"
78 "str q16, [x23, #0x30]\n"
79 "ushll2 v16.8h, v25.16b, #0x0\n"
80 "dup v17.2d, v16.d[0]\n"
81 "dup v19.2d, v16.d[1]\n"
82 "ushll v16.8h, v18.8b, #0x0\n"
83 "mov v19.d[1], v16.d[0]\n"
84 "dup v18.2d, v16.d[1]\n"
85 "ushll v16.8h, v23.8b, #0x0\n"
86 "mov v17.d[1], v16.d[0]\n"
87 "str q17, [x23, #0x40]\n"
88 "dup v17.2d, v16.d[1]\n"
89 "ushll2 v16.8h, v23.16b, #0x0\n"
90 "mov v17.d[1], v16.d[0]\n"
91 "str q17, [x23, #0x50]\n"
92 "add x23, x23, %x[out_stride]\n"
93 "mov v18.d[1], v16.d[1]\n"
94 "str q24, [x23, #0x0]\n"
95 "ushll v16.8h, v21.8b, #0x0\n"
96 "str q22, [x23, #0x10]\n"
97 "str q20, [x23, #0x20]\n"
98 "str q19, [x23, #0x30]\n"
99 "str q18, [x23, #0x40]\n"
100 "str q16, [x23, #0x50]\n"
101 "add x23, x23, %x[out_stride]\n"
102 "bge 2b\n"
103 "3:" // Main row loop: Unroll column loop skip
104 "cmp x19, #0xc\n"
105 "blt 5f\n"
106 "4:" // Main row loop: Column loop
107 "ldr d16, [x24], #0x8\n"
108 "sub x19, x19, #0xc\n"
109 "ldr d21, [x22], #0x8\n"
110 "cmp x19, #0xc\n"
111 "ldr d20, [x21], #0x8\n"
112 "ldr d19, [x20], #0x8\n"
113 "ld1 { v16.s }[2], [x24], #0x4\n"
114 "ushll v17.8h, v16.8b, #0x0\n"
115 "ld1 { v21.s }[2], [x22], #0x4\n"
116 "ushll2 v18.8h, v16.16b, #0x0\n"
117 "ld1 { v20.s }[2], [x21], #0x4\n"
118 "ld1 { v19.s }[2], [x20], #0x4\n"
119 "ushll v16.8h, v21.8b, #0x0\n"
120 "str q17, [x23, #0x0]\n"
121 "ushll2 v17.8h, v21.16b, #0x0\n"
122 "mov v18.d[1], v16.d[0]\n"
123 "str q18, [x23, #0x10]\n"
124 "dup v16.2d, v16.d[1]\n"
125 "mov v16.d[1], v17.d[0]\n"
126 "str q16, [x23, #0x20]\n"
127 "ushll v16.8h, v20.8b, #0x0\n"
128 "str q16, [x23, #0x30]\n"
129 "ushll2 v17.8h, v20.16b, #0x0\n"
130 "ushll v16.8h, v19.8b, #0x0\n"
131 "mov v17.d[1], v16.d[0]\n"
132 "str q17, [x23, #0x40]\n"
133 "dup v17.2d, v16.d[1]\n"
134 "ushll2 v16.8h, v19.16b, #0x0\n"
135 "mov v17.d[1], v16.d[0]\n"
136 "str q17, [x23, #0x50]\n"
137 "add x23, x23, %x[out_stride]\n"
138 "bge 4b\n"
139 "5:" // Main row loop: Column loop skip
140 "cmp x19, #0x4\n"
141 "blt 7f\n"
142 "6:" // Main row loop: width 4 loop: loop
143 "ldr s16, [x24], #0x4\n"
144 "ushll v19.8h, v16.8b, #0x0\n"
145 "ldr s16, [x22], #0x4\n"
146 "sub x19, x19, #0x4\n"
147 "ushll v18.8h, v16.8b, #0x0\n"
148 "ldr s16, [x21], #0x4\n"
149 "cmp x19, #0x4\n"
150 "ushll v17.8h, v16.8b, #0x0\n"
151 "ldr s16, [x20], #0x4\n"
152 "str d19, [x23, #0x0]\n"
153 "ushll v16.8h, v16.8b, #0x0\n"
154 "str d18, [x23, #0x18]\n"
155 "str d17, [x23, #0x30]\n"
156 "str d16, [x23, #0x48]\n"
157 "add x23, x23, #0x8\n"
158 "bge 6b\n"
159 "7:" // Main row loop: width 4 loop: skip
160 "cmp x19, #0x1\n"
161 "blt 9f\n"
162 "8:" // Main row loop: width 1 loop: loop
163 "ldr b16, [x24], #0x1\n"
164 "ushll v19.8h, v16.8b, #0x0\n"
165 "ldr b16, [x22], #0x1\n"
166 "sub x19, x19, #0x1\n"
167 "ushll v18.8h, v16.8b, #0x0\n"
168 "ldr b16, [x21], #0x1\n"
169 "cmp x19, #0x1\n"
170 "ushll v17.8h, v16.8b, #0x0\n"
171 "ldr b16, [x20], #0x1\n"
172 "str h19, [x23, #0x0]\n"
173 "ushll v16.8h, v16.8b, #0x0\n"
174 "str h18, [x23, #0x18]\n"
175 "str h17, [x23, #0x30]\n"
176 "str h16, [x23, #0x48]\n"
177 "add x23, x23, #0x2\n"
178 "bge 8b\n"
179 "9:" // Main row loop: width 1 loop: skip
180 "add %x[out], %x[out], #0x60\n"
181 "cmp %x[height], #0x4\n"
182 "bge 1b\n"
183 "cbz %x[height], 20f\n"
184 "10:" // Main loop skip
185
186 "11:" // Tail row loop: Head
187 "mov x24, %x[in]\n"
188 "mov x23, %x[out]\n"
189 "add %x[in], x24, %x[in_stride]\n"
190 "sub %x[height], %x[height], #0x1\n"
191 "mov x19, %x[width]\n"
192 "cmp x19, #0x18\n"
193 "blt 13f\n"
194 "12:" // Tail row loop: Unroll column loop
195 "ldr q17, [x24], #0x10\n"
196 "ushll v16.8h, v17.8b, #0x0\n"
197 "ldr d18, [x24], #0x8\n"
198 "sub x19, x19, #0x18\n"
199 "ushll2 v17.8h, v17.16b, #0x0\n"
200 "str q16, [x23, #0x0]\n"
201 "cmp x19, #0x18\n"
202 "dup v16.2d, v17.d[0]\n"
203 "str d16, [x23, #0x10]\n"
204 "dup v17.2d, v17.d[1]\n"
205 "add x23, x23, %x[out_stride]\n"
206 "ushll v16.8h, v18.8b, #0x0\n"
207 "mov v17.d[1], v16.d[0]\n"
208 "str q17, [x23, #0x0]\n"
209 "dup v16.2d, v16.d[1]\n"
210 "str d16, [x23, #0x10]\n"
211 "add x23, x23, %x[out_stride]\n"
212 "bge 12b\n"
213 "13:" // Tail row loop: Unroll column loop skip
214 "cmp x19, #0xc\n"
215 "blt 15f\n"
216 "14:" // Tail row loop: Column loop
217 "ldr d17, [x24], #0x8\n"
218 "sub x19, x19, #0xc\n"
219 "cmp x19, #0xc\n"
220 "ld1 { v17.s }[2], [x24], #0x4\n"
221 "ushll v16.8h, v17.8b, #0x0\n"
222 "str q16, [x23, #0x0]\n"
223 "ushll2 v16.8h, v17.16b, #0x0\n"
224 "str d16, [x23, #0x10]\n"
225 "add x23, x23, %x[out_stride]\n"
226 "bge 14b\n"
227 "15:" // Tail row loop: Column loop skip
228 "cmp x19, #0x4\n"
229 "blt 17f\n"
230 "16:" // Tail row loop: width 4 loop: loop
231 "ldr s16, [x24], #0x4\n"
232 "ushll v16.8h, v16.8b, #0x0\n"
233 "str d16, [x23, #0x0]\n"
234 "sub x19, x19, #0x4\n"
235 "add x23, x23, #0x8\n"
236 "cmp x19, #0x4\n"
237 "bge 16b\n"
238 "17:" // Tail row loop: width 4 loop: skip
239 "cmp x19, #0x1\n"
240 "blt 19f\n"
241 "18:" // Tail row loop: width 1 loop: loop
242 "ldr b16, [x24], #0x1\n"
243 "ushll v16.8h, v16.8b, #0x0\n"
244 "str h16, [x23, #0x0]\n"
245 "sub x19, x19, #0x1\n"
246 "add x23, x23, #0x2\n"
247 "cmp x19, #0x1\n"
248 "bge 18b\n"
249 "19:" // Tail row loop: width 1 loop: skip
250 "add %x[out], %x[out], #0x18\n"
251 "cmp %x[height], #0x1\n"
252 "bge 11b\n"
253 "20:" // Done
254
255 : [height] "+&r" (height), [in] "+&r" (in), [out] "+&r" (out)
256 : [in_stride] "r" (in_stride), [out_stride] "r" (out_stride), [width] "r" (width)
257 : "cc", "memory", "v16", "v17", "v18", "v19", "v20", "v21", "v22", "v23", "v24", "v25", "v26", "v27", "x19", "x20", "x21", "x22", "x23", "x24"
258 );
259 }
260
261 } // anonymous namespace
262 template<>
Transform(uint16_t * out,const uint8_t * in,int stride,int x0,int xmax,int k0,int kmax)263 void Transform<12, 1, true, VLType::None>(
264 uint16_t *out, const uint8_t *in, int stride, int x0, int xmax, int k0, int kmax)
265 {
266 a64_transpose_interleave_12_u8u16(
267 out,
268 in + k0 * stride + x0,
269 (xmax-x0),
270 stride * sizeof(uint8_t),
271 (kmax-k0)
272 );
273 }
274
275 #endif
276