1 /*
2 * Copyright (c) 2017-2020 Arm Limited.
3 *
4 * SPDX-License-Identifier: MIT
5 *
6 * Permission is hereby granted, free of charge, to any person obtaining a copy
7 * of this software and associated documentation files (the "Software"), to
8 * deal in the Software without restriction, including without limitation the
9 * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
10 * sell copies of the Software, and to permit persons to whom the Software is
11 * furnished to do so, subject to the following conditions:
12 *
13 * The above copyright notice and this permission notice shall be included in all
14 * copies or substantial portions of the Software.
15 *
16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
19 * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
20 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
21 * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
22 * SOFTWARE.
23 */
24
25 // Build on AArch64 where either FP16_KERNELS is set or FP16 is explicitly supported.
26 #if defined(__aarch64__) && (defined(FP16_KERNELS) || defined(__ARM_FEATURE_FP16_VECTOR_ARITHMETIC))
27
28 #include <arm_neon.h>
29
30 #include "../../asmlib.hpp"
31
32 // Kernel implementation.
33 //
34 // Assume that "Apanel" points to a chunk of A blocks (each size 8xK) in read-order.
35 // Assume that "Bpanel" points to a chunk of B blocks (each size 24xK) in read-order.
36 // Assume that "Cpanel" points to a chunk of C output blocks (each size
37 // 8x24), the chunks being arranged in a row major fashion.
38 //
39 // Note that the intent of this is that either ablocks or bblocks will be 1
40 // - this construction allows the output loop to proceed in either order.
41
42 namespace arm_gemm {
43
a64_hgemm_asimd_8x24_x1(const __fp16 * Apanel,const __fp16 * Bpanel,__fp16 * Cpanel,int ablocks,int bblocks,int K)44 void a64_hgemm_asimd_8x24_x1(const __fp16 *Apanel, const __fp16 *Bpanel, __fp16 *Cpanel, int ablocks, int bblocks, int K) {
45 const __fp16 *a_ptr = Apanel;
46 __fp16 *c_ptr = Cpanel;
47
48 for (int yb=0; yb<ablocks; yb++) {
49 const __fp16 *a_ptr0 = a_ptr;
50 const __fp16 *b_ptr = Bpanel;
51
52 for (int xb=0; xb<bblocks; xb++) {
53 a_ptr = a_ptr0;
54 // Fix up for odd lengths - set a flag if K is odd, but make
55 // sure we round up the iteration count.
56 int oddk = (K & 1);
57 int k = ((K+1)/2) - 1;
58
59 register float16x8_t a0 asm("v0");
60 register float16x8_t a0a asm("v1");
61 register float16x8_t b0 asm("v2");
62 register float16x8_t b1 asm("v3");
63 register float16x8_t b2 asm("v4");
64
65 __asm __volatile (
66 // Initialize result registers, load initial operands, prime prefetches.
67 "movi v8.8h, #0x0\n"
68 "ldr %q[a0], [%[a_ptr]]\n"
69 "movi v9.8h, #0x0\n"
70 "ldr %q[b0], [%[b_ptr]]\n"
71 "movi v10.8h, #0x0\n"
72 "ldr %q[b1], [%[b_ptr], #16]\n"
73 "movi v11.8h, #0x0\n"
74 "movi v12.8h, #0x0\n"
75 "movi v13.8h, #0x0\n"
76 "movi v14.8h, #0x0\n"
77 ASM_PREFETCH("[%[b_ptr], #64]")
78 "movi v15.8h, #0x0\n"
79 ASM_PREFETCH("[%[b_ptr], #128]")
80 "movi v16.8h, #0x0\n"
81 ASM_PREFETCH("[%[a_ptr], #64]")
82 "movi v17.8h, #0x0\n"
83 ASM_PREFETCH("[%[b_ptr], #192]")
84 "movi v18.8h, #0x0\n"
85 ASM_PREFETCH("[%[b_ptr], #256]")
86 "movi v19.8h, #0x0\n"
87 ASM_PREFETCH("[%[b_ptr], #320]")
88 "movi v20.8h, #0x0\n"
89 "movi v21.8h, #0x0\n"
90 "movi v22.8h, #0x0\n"
91 "movi v23.8h, #0x0\n"
92 "movi v24.8h, #0x0\n"
93 "movi v25.8h, #0x0\n"
94 "movi v26.8h, #0x0\n"
95 "movi v27.8h, #0x0\n"
96 "movi v28.8h, #0x0\n"
97 "movi v29.8h, #0x0\n"
98 "movi v30.8h, #0x0\n"
99 "movi v31.8h, #0x0\n"
100
101 // Skip loop if we are doing zero iterations of it.
102 "cbz %w[k], 4f\n"
103
104 "1:\n"
105 "fmla v8.8h , %[b0].8h, %[a0].h[0]\n"
106 "fmla v9.8h , %[b0].8h, %[a0].h[1]\n"
107 "fmla v10.8h, %[b0].8h, %[a0].h[2]\n"
108 "fmla v11.8h, %[b0].8h, %[a0].h[3]\n"
109 "ldr %q[b2], [%[b_ptr], #32]\n"
110 "fmla v12.8h, %[b0].8h, %[a0].h[4]\n"
111 "fmla v13.8h, %[b0].8h, %[a0].h[5]\n"
112 "fmla v14.8h, %[b0].8h, %[a0].h[6]\n"
113 "fmla v15.8h, %[b0].8h, %[a0].h[7]\n"
114 "ldr %q[b0], [%[b_ptr], #48]\n"
115
116 "fmla v16.8h, %[b1].8h, %[a0].h[0]\n"
117 "fmla v17.8h, %[b1].8h, %[a0].h[1]\n"
118 ASM_PREFETCH("[%[a_ptr], #128]")
119 "fmla v18.8h, %[b1].8h, %[a0].h[2]\n"
120 "fmla v19.8h, %[b1].8h, %[a0].h[3]\n"
121 "add %[b_ptr], %[b_ptr], #96\n"
122 "fmla v20.8h, %[b1].8h, %[a0].h[4]\n"
123 "fmla v21.8h, %[b1].8h, %[a0].h[5]\n"
124 "fmla v22.8h, %[b1].8h, %[a0].h[6]\n"
125 "fmla v23.8h, %[b1].8h, %[a0].h[7]\n"
126 "ldr %q[b1], [%[b_ptr], #-32]\n"
127
128 "fmla v24.8h, %[b2].8h, %[a0].h[0]\n"
129 "fmla v25.8h, %[b2].8h, %[a0].h[1]\n"
130 ASM_PREFETCH("[%[b_ptr], #288]")
131 "fmla v26.8h, %[b2].8h, %[a0].h[2]\n"
132 "fmla v27.8h, %[b2].8h, %[a0].h[3]\n"
133 "ldr %q[a0a], [%[a_ptr], #16]\n"
134 "fmla v28.8h, %[b2].8h, %[a0].h[4]\n"
135 "fmla v29.8h, %[b2].8h, %[a0].h[5]\n"
136 "fmla v30.8h, %[b2].8h, %[a0].h[6]\n"
137 "fmla v31.8h, %[b2].8h, %[a0].h[7]\n"
138 "ldr %q[b2], [%[b_ptr], #-16]\n"
139
140 "fmla v8.8h , %[b0].8h, %[a0a].h[0]\n"
141 "fmla v9.8h , %[b0].8h, %[a0a].h[1]\n"
142 "fmla v10.8h, %[b0].8h, %[a0a].h[2]\n"
143 "fmla v11.8h, %[b0].8h, %[a0a].h[3]\n"
144 "fmla v12.8h, %[b0].8h, %[a0a].h[4]\n"
145 "fmla v13.8h, %[b0].8h, %[a0a].h[5]\n"
146 "fmla v14.8h, %[b0].8h, %[a0a].h[6]\n"
147 "fmla v15.8h, %[b0].8h, %[a0a].h[7]\n"
148 "ldr %q[b0], [%[b_ptr]]\n"
149
150 "fmla v16.8h, %[b1].8h, %[a0a].h[0]\n"
151 "fmla v17.8h, %[b1].8h, %[a0a].h[1]\n"
152 ASM_PREFETCH("[%[b_ptr], #352]")
153 "fmla v18.8h, %[b1].8h, %[a0a].h[2]\n"
154 "fmla v19.8h, %[b1].8h, %[a0a].h[3]\n"
155 "fmla v20.8h, %[b1].8h, %[a0a].h[4]\n"
156 "fmla v21.8h, %[b1].8h, %[a0a].h[5]\n"
157 "fmla v22.8h, %[b1].8h, %[a0a].h[6]\n"
158 "fmla v23.8h, %[b1].8h, %[a0a].h[7]\n"
159 "ldr %q[b1], [%[b_ptr], #16]\n"
160
161 "fmla v24.8h, %[b2].8h, %[a0a].h[0]\n"
162 "fmla v25.8h, %[b2].8h, %[a0a].h[1]\n"
163 "add %[a_ptr], %[a_ptr], #32\n"
164 "fmla v26.8h, %[b2].8h, %[a0a].h[2]\n"
165 "fmla v27.8h, %[b2].8h, %[a0a].h[3]\n"
166 "ldr %q[a0], [%[a_ptr]]\n"
167 "fmla v28.8h, %[b2].8h, %[a0a].h[4]\n"
168 "fmla v29.8h, %[b2].8h, %[a0a].h[5]\n"
169 "subs %w[k], %w[k], #1\n"
170 "fmla v30.8h, %[b2].8h, %[a0a].h[6]\n"
171 "fmla v31.8h, %[b2].8h, %[a0a].h[7]\n"
172
173 "bne 1b\n"
174 "4:\n"
175
176 // Jump to odd tail if necessary.
177 "cbnz %w[oddk], 2f\n"
178
179 // Even tail.
180 "fmla v8.8h , %[b0].8h, %[a0].h[0]\n"
181 "fmla v9.8h , %[b0].8h, %[a0].h[1]\n"
182 "fmla v10.8h, %[b0].8h, %[a0].h[2]\n"
183 "fmla v11.8h, %[b0].8h, %[a0].h[3]\n"
184 "ldr %q[b2], [%[b_ptr], #32]\n"
185 "fmla v12.8h, %[b0].8h, %[a0].h[4]\n"
186 "fmla v13.8h, %[b0].8h, %[a0].h[5]\n"
187 "fmla v14.8h, %[b0].8h, %[a0].h[6]\n"
188 "fmla v15.8h, %[b0].8h, %[a0].h[7]\n"
189 "ldr %q[b0], [%[b_ptr], #48]\n"
190
191 "fmla v16.8h, %[b1].8h, %[a0].h[0]\n"
192 "fmla v17.8h, %[b1].8h, %[a0].h[1]\n"
193 "add %[b_ptr], %[b_ptr], #96\n"
194 "fmla v18.8h, %[b1].8h, %[a0].h[2]\n"
195 "fmla v19.8h, %[b1].8h, %[a0].h[3]\n"
196 "fmla v20.8h, %[b1].8h, %[a0].h[4]\n"
197 "fmla v21.8h, %[b1].8h, %[a0].h[5]\n"
198 "add %[a_ptr], %[a_ptr], #32\n"
199 "fmla v22.8h, %[b1].8h, %[a0].h[6]\n"
200 "fmla v23.8h, %[b1].8h, %[a0].h[7]\n"
201 "ldr %q[b1], [%[b_ptr], #-32]\n"
202
203 "fmla v24.8h, %[b2].8h, %[a0].h[0]\n"
204 "fmla v25.8h, %[b2].8h, %[a0].h[1]\n"
205 "fmla v26.8h, %[b2].8h, %[a0].h[2]\n"
206 "fmla v27.8h, %[b2].8h, %[a0].h[3]\n"
207 "ldr %q[a0a], [%[a_ptr], #-16]\n"
208 "fmla v28.8h, %[b2].8h, %[a0].h[4]\n"
209 "fmla v29.8h, %[b2].8h, %[a0].h[5]\n"
210 "fmla v30.8h, %[b2].8h, %[a0].h[6]\n"
211 "fmla v31.8h, %[b2].8h, %[a0].h[7]\n"
212 "ldr %q[b2], [%[b_ptr], #-16]\n"
213
214 "fmla v8.8h , %[b0].8h, %[a0a].h[0]\n"
215 "fmla v16.8h, %[b1].8h, %[a0a].h[0]\n"
216 "str q8, [%[c_ptr]]\n"
217 "fmla v24.8h, %[b2].8h, %[a0a].h[0]\n"
218 "str q16, [%[c_ptr], #16]\n"
219
220 "fmla v9.8h , %[b0].8h, %[a0a].h[1]\n"
221 "str q24, [%[c_ptr], #32]\n"
222 "fmla v17.8h, %[b1].8h, %[a0a].h[1]\n"
223 "str q9, [%[c_ptr], #48]\n"
224 "fmla v25.8h, %[b2].8h, %[a0a].h[1]\n"
225 "str q17, [%[c_ptr], #64]\n"
226
227 "fmla v10.8h, %[b0].8h, %[a0a].h[2]\n"
228 "str q25, [%[c_ptr], #80]\n"
229 "fmla v18.8h, %[b1].8h, %[a0a].h[2]\n"
230 "str q10, [%[c_ptr], #96]\n"
231 "fmla v26.8h, %[b2].8h, %[a0a].h[2]\n"
232 "str q18, [%[c_ptr], #112]\n"
233
234 "fmla v11.8h, %[b0].8h, %[a0a].h[3]\n"
235 "str q26, [%[c_ptr], #128]\n"
236 "fmla v19.8h, %[b1].8h, %[a0a].h[3]\n"
237 "str q11, [%[c_ptr], #144]\n"
238 "fmla v27.8h, %[b2].8h, %[a0a].h[3]\n"
239 "str q19, [%[c_ptr], #160]\n"
240
241 "fmla v12.8h, %[b0].8h, %[a0a].h[4]\n"
242 "str q27, [%[c_ptr], #176]\n"
243 "fmla v20.8h, %[b1].8h, %[a0a].h[4]\n"
244 "str q12, [%[c_ptr], #192]\n"
245 "fmla v28.8h, %[b2].8h, %[a0a].h[4]\n"
246 "str q20, [%[c_ptr], #208]\n"
247
248 "fmla v13.8h, %[b0].8h, %[a0a].h[5]\n"
249 "str q28, [%[c_ptr], #224]\n"
250 "fmla v21.8h, %[b1].8h, %[a0a].h[5]\n"
251 "str q13, [%[c_ptr], #240]\n"
252 "fmla v29.8h, %[b2].8h, %[a0a].h[5]\n"
253 "str q21, [%[c_ptr], #256]\n"
254
255 "fmla v14.8h, %[b0].8h, %[a0a].h[6]\n"
256 "str q29, [%[c_ptr], #272]\n"
257 "fmla v22.8h, %[b1].8h, %[a0a].h[6]\n"
258 "str q14, [%[c_ptr], #288]\n"
259 "fmla v30.8h, %[b2].8h, %[a0a].h[6]\n"
260 "str q22, [%[c_ptr], #304]\n"
261
262 "fmla v15.8h, %[b0].8h, %[a0a].h[7]\n"
263 "str q30, [%[c_ptr], #320]\n"
264 "fmla v23.8h, %[b1].8h, %[a0a].h[7]\n"
265 "str q15, [%[c_ptr], #336]\n"
266 "fmla v31.8h, %[b2].8h, %[a0a].h[7]\n"
267 "b 3f\n"
268
269 // Odd tail
270 "2:\n"
271 "ldr %q[b2], [%[b_ptr], #32]\n"
272 "fmla v8.8h , %[b0].8h, %[a0].h[0]\n"
273 "add %[b_ptr], %[b_ptr], #48\n"
274 "fmla v16.8h, %[b1].8h, %[a0].h[0]\n"
275 "add %[a_ptr], %[a_ptr], #16\n"
276 "str q8, [%[c_ptr]]\n"
277 "fmla v24.8h, %[b2].8h, %[a0].h[0]\n"
278 "str q16, [%[c_ptr], #16]\n"
279
280 "fmla v9.8h , %[b0].8h, %[a0].h[1]\n"
281 "str q24, [%[c_ptr], #32]\n"
282 "fmla v17.8h, %[b1].8h, %[a0].h[1]\n"
283 "str q9, [%[c_ptr], #48]\n"
284 "fmla v25.8h, %[b2].8h, %[a0].h[1]\n"
285 "str q17, [%[c_ptr], #64]\n"
286
287 "fmla v10.8h, %[b0].8h, %[a0].h[2]\n"
288 "str q25, [%[c_ptr], #80]\n"
289 "fmla v18.8h, %[b1].8h, %[a0].h[2]\n"
290 "str q10, [%[c_ptr], #96]\n"
291 "fmla v26.8h, %[b2].8h, %[a0].h[2]\n"
292 "str q18, [%[c_ptr], #112]\n"
293
294 "fmla v11.8h, %[b0].8h, %[a0].h[3]\n"
295 "str q26, [%[c_ptr], #128]\n"
296 "fmla v19.8h, %[b1].8h, %[a0].h[3]\n"
297 "str q11, [%[c_ptr], #144]\n"
298 "fmla v27.8h, %[b2].8h, %[a0].h[3]\n"
299 "str q19, [%[c_ptr], #160]\n"
300
301 "fmla v12.8h, %[b0].8h, %[a0].h[4]\n"
302 "str q27, [%[c_ptr], #176]\n"
303 "fmla v20.8h, %[b1].8h, %[a0].h[4]\n"
304 "str q12, [%[c_ptr], #192]\n"
305 "fmla v28.8h, %[b2].8h, %[a0].h[4]\n"
306 "str q20, [%[c_ptr], #208]\n"
307
308 "fmla v13.8h, %[b0].8h, %[a0].h[5]\n"
309 "str q28, [%[c_ptr], #224]\n"
310 "fmla v21.8h, %[b1].8h, %[a0].h[5]\n"
311 "str q13, [%[c_ptr], #240]\n"
312 "fmla v29.8h, %[b2].8h, %[a0].h[5]\n"
313 "str q21, [%[c_ptr], #256]\n"
314
315 "fmla v14.8h, %[b0].8h, %[a0].h[6]\n"
316 "str q29, [%[c_ptr], #272]\n"
317 "fmla v22.8h, %[b1].8h, %[a0].h[6]\n"
318 "str q14, [%[c_ptr], #288]\n"
319 "fmla v30.8h, %[b2].8h, %[a0].h[6]\n"
320 "str q22, [%[c_ptr], #304]\n"
321
322 "fmla v15.8h, %[b0].8h, %[a0].h[7]\n"
323 "str q30, [%[c_ptr], #320]\n"
324 "fmla v23.8h, %[b1].8h, %[a0].h[7]\n"
325 "str q15, [%[c_ptr], #336]\n"
326 "fmla v31.8h, %[b2].8h, %[a0].h[7]\n"
327
328 "3:\n"
329 "str q23, [%[c_ptr], #352]\n"
330 "str q31, [%[c_ptr], #368]\n"
331 "add %[c_ptr], %[c_ptr], #384\n"
332 :
333 [a_ptr] "+r" (a_ptr), [b_ptr] "+r" (b_ptr), [c_ptr] "+r" (c_ptr),
334 [a0] "+w" (a0), [a0a] "+w" (a0a),
335 [b0] "+w" (b0), [b1] "+w" (b1), [b2] "+w" (b2), [k] "+r" (k)
336 : [oddk] "r" (oddk)
337 : "x20", "x21", "v8", "v9", "v10", "v11", "v12", "v13", "v14", "v15", "v16", "v17", "v18",
338 "v19", "v20", "v21", "v22", "v23", "v24", "v25", "v26", "v27", "v28", "v29", "v30", "v31", "cc"
339 );
340 }
341 }
342 }
343
344 } // namespace arm_gemm
345
346 #endif // __aarch64__ && (FP16_KERNELS || __ARM_FEATURE_FP16_VECTOR_ARITHMETIC)
347