1 /*
2 * Copyright (c) 2017-2018 Arm Limited.
3 *
4 * SPDX-License-Identifier: MIT
5 *
6 * Permission is hereby granted, free of charge, to any person obtaining a copy
7 * of this software and associated documentation files (the "Software"), to
8 * deal in the Software without restriction, including without limitation the
9 * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
10 * sell copies of the Software, and to permit persons to whom the Software is
11 * furnished to do so, subject to the following conditions:
12 *
13 * The above copyright notice and this permission notice shall be included in all
14 * copies or substantial portions of the Software.
15 *
16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
19 * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
20 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
21 * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
22 * SOFTWARE.
23 */
24 #ifdef __aarch64__
25
26 #include <arm_neon.h>
27
28 #include "../../asmlib.hpp"
29
30 namespace arm_gemm {
31
a64_sgemm_asimd_8x12_a55r1(const float * Apanel,const float * Bpanel,float * Cpanel,const int ablocks,const int bblocks,const int K)32 void a64_sgemm_asimd_8x12_a55r1(const float *Apanel, const float *Bpanel, float *Cpanel, const int ablocks, const int bblocks, const int K) {
33 const float *a_ptr = Apanel;
34 float *c_ptr = Cpanel;
35
36 // Fix up for odd lengths - set a flag if K is odd, but make
37 // sure we round up the iteration count.
38 int oddk = (K & 1);
39 int k_iters = ((K+1)/2) - 1;
40
41 for (int yb=0; yb<ablocks; yb++) {
42 const float *a_ptr0 = a_ptr;
43 const float *b_ptr = Bpanel;
44
45 for (int xb=0; xb<bblocks; xb++) {
46 a_ptr = a_ptr0;
47 int k = k_iters;
48
49 register float32x4_t a0 asm("v0");
50 register float32x4_t a1 asm("v1");
51 register float32x4_t b0 asm("v2");
52 register float32x4_t b1 asm("v3");
53 register float32x4_t b2 asm("v4");
54 register float32x4_t a0a asm("v5");
55 register float32x4_t a1a asm("v6");
56
57 __asm __volatile (
58 // Initialize result registers, load initial operands, prime prefetches.
59 "movi v8.4s, #0x0\n"
60 "ldr %q[a0], [%[a_ptr]]\n"
61 "movi v9.4s, #0x0\n"
62 "ldr %q[b0], [%[b_ptr]]\n"
63 "movi v10.4s, #0x0\n"
64 "ldr %q[a1], [%[a_ptr], #16]\n"
65 "movi v11.4s, #0x0\n"
66 "ldr %q[b1], [%[b_ptr], #16]\n"
67 "movi v12.4s, #0x0\n"
68 ASM_PREFETCH("[%[b_ptr], #64]")
69 "movi v13.4s, #0x0\n"
70 ASM_PREFETCH("[%[a_ptr], #64]")
71 "movi v14.4s, #0x0\n"
72 ASM_PREFETCH("[%[b_ptr], #128]")
73 "movi v15.4s, #0x0\n"
74 ASM_PREFETCH("[%[a_ptr], #128]")
75 "movi v16.4s, #0x0\n"
76 ASM_PREFETCH("[%[b_ptr], #192]")
77 "movi v17.4s, #0x0\n"
78 ASM_PREFETCH("[%[b_ptr], #256]")
79 "movi v18.4s, #0x0\n"
80 "movi v19.4s, #0x0\n"
81 ASM_PREFETCH("[%[a_ptr], #192]")
82 "movi v20.4s, #0x0\n"
83 "movi v21.4s, #0x0\n"
84 ASM_PREFETCH("[%[b_ptr], #320]")
85 "movi v22.4s, #0x0\n"
86 "movi v23.4s, #0x0\n"
87 ASM_PREFETCH("[%[a_ptr], #256]")
88 "movi v24.4s, #0x0\n"
89 "movi v25.4s, #0x0\n"
90 ASM_PREFETCH("[%[b_ptr], #384]")
91 "movi v26.4s, #0x0\n"
92 "movi v27.4s, #0x0\n"
93 ASM_PREFETCH("[%[b_ptr], #448]")
94 "movi v28.4s, #0x0\n"
95 "movi v29.4s, #0x0\n"
96 ASM_PREFETCH("[%[a_ptr], #384]")
97 "movi v30.4s, #0x0\n"
98 "movi v31.4s, #0x0\n"
99 ASM_PREFETCH("[%[b_ptr], #512]")
100
101 // The loop is offset by these two instructions which must
102 // always be executed.
103 "fmla v8.4s , %[b0].4s, %[a0].s[0]\n"
104 "ldr %d[b2], [%[b_ptr], #32]\n"
105
106 // Skip loop if we are doing zero iterations of it.
107 "cbz %w[k], 4f\n"
108
109 "1:\n"
110 // Unroll 0
111 "fmla v9.4s , %[b0].4s, %[a0].s[1]\n"
112 "ldr x20, [%[b_ptr], #40]\n"
113 "fmla v10.4s, %[b0].4s, %[a0].s[2]\n"
114 "subs %w[k], %w[k], #1\n"
115 "fmla v11.4s, %[b0].4s, %[a0].s[3]\n"
116 "ldr %d[a0a], [%[a_ptr], #32]\n"
117
118 "fmla v12.4s, %[b0].4s, %[a1].s[0]\n"
119 "ins %[b2].d[1], x20\n"
120 "fmla v13.4s, %[b0].4s, %[a1].s[1]\n"
121 "ldr x20, [%[a_ptr], #40]\n"
122 "fmla v14.4s, %[b0].4s, %[a1].s[2]\n"
123 "fmla v15.4s, %[b0].4s, %[a1].s[3]\n"
124 "ldr %d[a1a], [%[a_ptr], #48]\n"
125
126 "fmla v16.4s, %[b1].4s, %[a0].s[0]\n"
127 "ins %[a0a].d[1], x20\n"
128 "fmla v17.4s, %[b1].4s, %[a0].s[1]\n"
129 "ldr x20, [%[a_ptr], #56]\n"
130 "fmla v18.4s, %[b1].4s, %[a0].s[2]\n"
131 "fmla v19.4s, %[b1].4s, %[a0].s[3]\n"
132 "ldr %d[b0], [%[b_ptr], #48]\n"
133
134 "fmla v20.4s, %[b1].4s, %[a1].s[0]\n"
135 "ins %[a1a].d[1], x20\n"
136 "fmla v21.4s, %[b1].4s, %[a1].s[1]\n"
137 "ldr x20, [%[b_ptr], #56]\n"
138 "fmla v22.4s, %[b1].4s, %[a1].s[2]\n"
139 "fmla v23.4s, %[b1].4s, %[a1].s[3]\n"
140 "ldr %d[b1], [%[b_ptr], #64]\n"
141
142 "fmla v24.4s, %[b2].4s, %[a0].s[0]\n"
143 "ins %[b0].d[1], x20\n"
144 "fmla v25.4s, %[b2].4s, %[a0].s[1]\n"
145 "ldr x20, [%[b_ptr], #72]\n"
146 "fmla v26.4s, %[b2].4s, %[a0].s[2]\n"
147 "fmla v27.4s, %[b2].4s, %[a0].s[3]\n"
148 ASM_PREFETCH("[%[a_ptr], #448]")
149
150 "fmla v28.4s, %[b2].4s, %[a1].s[0]\n"
151 "fmla v29.4s, %[b2].4s, %[a1].s[1]\n"
152 ASM_PREFETCH("[%[b_ptr], #576]")
153 "fmla v30.4s, %[b2].4s, %[a1].s[2]\n"
154 "fmla v31.4s, %[b2].4s, %[a1].s[3]\n"
155
156 // Unroll 1
157 "ldr %d[b2], [%[b_ptr], #80]\n"
158
159 "fmla v8.4s , %[b0].4s, %[a0a].s[0]\n"
160 "ins %[b1].d[1], x20\n"
161 "fmla v9.4s , %[b0].4s, %[a0a].s[1]\n"
162 "ldr x20, [%[b_ptr], #88]\n"
163 "fmla v10.4s, %[b0].4s, %[a0a].s[2]\n"
164 "fmla v11.4s, %[b0].4s, %[a0a].s[3]\n"
165 "ldr %d[a0], [%[a_ptr], #64]\n"
166
167 "fmla v12.4s, %[b0].4s, %[a1a].s[0]\n"
168 "ins %[b2].d[1], x20\n"
169 "fmla v13.4s, %[b0].4s, %[a1a].s[1]\n"
170 "ldr x20, [%[a_ptr], #72]\n"
171 "fmla v14.4s, %[b0].4s, %[a1a].s[2]\n"
172 "fmla v15.4s, %[b0].4s, %[a1a].s[3]\n"
173 "ldr %d[a1], [%[a_ptr], #80]\n"
174
175 "fmla v16.4s, %[b1].4s, %[a0a].s[0]\n"
176 "ins %[a0].d[1], x20\n"
177 "fmla v17.4s, %[b1].4s, %[a0a].s[1]\n"
178 "ldr x20, [%[a_ptr], #88]\n"
179 "fmla v18.4s, %[b1].4s, %[a0a].s[2]\n"
180 "fmla v19.4s, %[b1].4s, %[a0a].s[3]\n"
181 "ldr %d[b0], [%[b_ptr], #96]\n"
182
183 "fmla v20.4s, %[b1].4s, %[a1a].s[0]\n"
184 "ins %[a1].d[1], x20\n"
185 "fmla v21.4s, %[b1].4s, %[a1a].s[1]\n"
186 "ldr x20, [%[b_ptr], #104]\n"
187 "fmla v22.4s, %[b1].4s, %[a1a].s[2]\n"
188 "fmla v23.4s, %[b1].4s, %[a1a].s[3]\n"
189 "ldr %d[b1], [%[b_ptr], #112]\n"
190
191 "fmla v24.4s, %[b2].4s, %[a0a].s[0]\n"
192 "ins %[b0].d[1], x20\n"
193 "fmla v25.4s, %[b2].4s, %[a0a].s[1]\n"
194 "ldr x20, [%[b_ptr], #120]\n"
195 "fmla v26.4s, %[b2].4s, %[a0a].s[2]\n"
196
197 "fmla v27.4s, %[b2].4s, %[a0a].s[3]\n"
198 "add %[a_ptr], %[a_ptr], #64\n"
199
200 "fmla v28.4s, %[b2].4s, %[a1a].s[0]\n"
201 ASM_PREFETCH("[%[b_ptr], #640]")
202 "fmla v29.4s, %[b2].4s, %[a1a].s[1]\n"
203 "add %[b_ptr], %[b_ptr], #96\n"
204 "fmla v30.4s, %[b2].4s, %[a1a].s[2]\n"
205 "ins %[b1].d[1], x20\n"
206 "fmla v31.4s, %[b2].4s, %[a1a].s[3]\n"
207 "ldr %d[b2], [%[b_ptr], #32]\n"
208
209 "fmla v8.4s , %[b0].4s, %[a0].s[0]\n"
210 "b.ne 1b\n"
211
212 // Branch here if K=1 or 2. Do the right thing for odd/even at the end.
213 "4:\n"
214
215 // Start final iteration - branch off to "odd" code before we load a0a.
216 "fmla v9.4s , %[b0].4s, %[a0].s[1]\n"
217 "ldr x20, [%[b_ptr], #40]\n"
218 "fmla v10.4s, %[b0].4s, %[a0].s[2]\n"
219 "cbnz %w[oddk], 2f\n"
220
221 // Even K continuation
222 "fmla v11.4s, %[b0].4s, %[a0].s[3]\n"
223 "ldr %d[a0a], [%[a_ptr], #32]\n"
224
225 "fmla v12.4s, %[b0].4s, %[a1].s[0]\n"
226 "ins %[b2].d[1], x20\n"
227 "fmla v13.4s, %[b0].4s, %[a1].s[1]\n"
228 "ldr x20, [%[a_ptr], #40]\n"
229 "fmla v14.4s, %[b0].4s, %[a1].s[2]\n"
230 ASM_PREFETCHW("[%[c_ptr]]")
231 "fmla v15.4s, %[b0].4s, %[a1].s[3]\n"
232 "ldr %d[a1a], [%[a_ptr], #48]\n"
233
234 "fmla v16.4s, %[b1].4s, %[a0].s[0]\n"
235 "ins %[a0a].d[1], x20\n"
236 "fmla v17.4s, %[b1].4s, %[a0].s[1]\n"
237 "ldr x20, [%[a_ptr], #56]\n"
238 "fmla v18.4s, %[b1].4s, %[a0].s[2]\n"
239 "fmla v19.4s, %[b1].4s, %[a0].s[3]\n"
240 "ldr %d[b0], [%[b_ptr], #48]\n"
241
242 "fmla v20.4s, %[b1].4s, %[a1].s[0]\n"
243 "ins %[a1a].d[1], x20\n"
244 "fmla v21.4s, %[b1].4s, %[a1].s[1]\n"
245 "ldr x20, [%[b_ptr], #56]\n"
246 "fmla v22.4s, %[b1].4s, %[a1].s[2]\n"
247 ASM_PREFETCHW("[%[c_ptr], #64]")
248 "fmla v23.4s, %[b1].4s, %[a1].s[3]\n"
249
250 "fmla v24.4s, %[b2].4s, %[a0].s[0]\n"
251 "fmla v25.4s, %[b2].4s, %[a0].s[1]\n"
252 ASM_PREFETCHW("[%[c_ptr], #128]")
253 "fmla v26.4s, %[b2].4s, %[a0].s[2]\n"
254 "fmla v27.4s, %[b2].4s, %[a0].s[3]\n"
255 "ldr %d[b1], [%[b_ptr], #64]\n"
256
257 "fmla v28.4s, %[b2].4s, %[a1].s[0]\n"
258 "ins %[b0].d[1], x20\n"
259 "fmla v29.4s, %[b2].4s, %[a1].s[1]\n"
260 "ldr x20, [%[b_ptr], #72]\n"
261 "fmla v30.4s, %[b2].4s, %[a1].s[2]\n"
262 ASM_PREFETCHW("[%[c_ptr], #192]")
263 "fmla v31.4s, %[b2].4s, %[a1].s[3]\n"
264 "ldr %d[b2], [%[b_ptr], #80]\n"
265
266 "fmla v8.4s , %[b0].4s, %[a0a].s[0]\n"
267 "ins %[b1].d[1], x20\n"
268 "fmla v9.4s , %[b0].4s, %[a0a].s[1]\n"
269 "ldr x20, [%[b_ptr], #88]\n"
270 "fmla v10.4s, %[b0].4s, %[a0a].s[2]\n"
271 "ins %[b2].d[1], x20\n"
272
273 "fmla v11.4s, %[b0].4s, %[a0a].s[3]\n"
274 ASM_PREFETCHW("[%[c_ptr], #256]")
275 "fmla v12.4s, %[b0].4s, %[a1a].s[0]\n"
276 "fmla v13.4s, %[b0].4s, %[a1a].s[1]\n"
277 "fmla v14.4s, %[b0].4s, %[a1a].s[2]\n"
278 ASM_PREFETCHW("[%[c_ptr], #320]")
279 "fmla v15.4s, %[b0].4s, %[a1a].s[3]\n"
280 "fmla v16.4s, %[b1].4s, %[a0a].s[0]\n"
281 ASM_PREFETCHWL2("[%[c_ptr], #384]")
282 "fmla v17.4s, %[b1].4s, %[a0a].s[1]\n"
283 "fmla v18.4s, %[b1].4s, %[a0a].s[2]\n"
284 ASM_PREFETCHWL2("[%[c_ptr], #448]")
285 "fmla v19.4s, %[b1].4s, %[a0a].s[3]\n"
286 "fmla v20.4s, %[b1].4s, %[a1a].s[0]\n"
287 "fmla v21.4s, %[b1].4s, %[a1a].s[1]\n"
288 ASM_PREFETCHWL2("[%[c_ptr], #512]")
289 "fmla v22.4s, %[b1].4s, %[a1a].s[2]\n"
290 "fmla v23.4s, %[b1].4s, %[a1a].s[3]\n"
291 ASM_PREFETCHWL2("[%[c_ptr], #576]")
292 "fmla v24.4s, %[b2].4s, %[a0a].s[0]\n"
293 "fmla v25.4s, %[b2].4s, %[a0a].s[1]\n"
294 "fmla v26.4s, %[b2].4s, %[a0a].s[2]\n"
295 ASM_PREFETCHWL2("[%[c_ptr], #640]")
296 "fmla v27.4s, %[b2].4s, %[a0a].s[3]\n"
297 "fmla v28.4s, %[b2].4s, %[a1a].s[0]\n"
298 ASM_PREFETCHWL2("[%[c_ptr], #704]")
299 "fmla v29.4s, %[b2].4s, %[a1a].s[1]\n"
300 "add %[a_ptr], %[a_ptr], #64\n"
301 "fmla v30.4s, %[b2].4s, %[a1a].s[2]\n"
302 "add %[b_ptr], %[b_ptr], #96\n"
303 "fmla v31.4s, %[b2].4s, %[a1a].s[3]\n"
304 "b 3f\n"
305
306 // Odd K continuation
307 "2:\n"
308 "fmla v11.4s, %[b0].4s, %[a0].s[3]\n"
309 ASM_PREFETCHW("[%[c_ptr]]")
310 "fmla v12.4s, %[b0].4s, %[a1].s[0]\n"
311 "ins %[b2].d[1], x20\n"
312 "fmla v13.4s, %[b0].4s, %[a1].s[1]\n"
313 ASM_PREFETCHW("[%[c_ptr], #64]")
314 "fmla v14.4s, %[b0].4s, %[a1].s[2]\n"
315 "add %[a_ptr], %[a_ptr], #32\n"
316 "fmla v15.4s, %[b0].4s, %[a1].s[3]\n"
317 ASM_PREFETCHW("[%[c_ptr], #128]")
318 "fmla v16.4s, %[b1].4s, %[a0].s[0]\n"
319 "add %[b_ptr], %[b_ptr], #48\n"
320 "fmla v17.4s, %[b1].4s, %[a0].s[1]\n"
321 ASM_PREFETCHW("[%[c_ptr], #192]")
322 "fmla v18.4s, %[b1].4s, %[a0].s[2]\n"
323 "fmla v19.4s, %[b1].4s, %[a0].s[3]\n"
324 ASM_PREFETCHW("[%[c_ptr], #256]")
325 "fmla v20.4s, %[b1].4s, %[a1].s[0]\n"
326 "fmla v21.4s, %[b1].4s, %[a1].s[1]\n"
327 ASM_PREFETCHW("[%[c_ptr], #320]")
328 "fmla v22.4s, %[b1].4s, %[a1].s[2]\n"
329 "fmla v23.4s, %[b1].4s, %[a1].s[3]\n"
330 ASM_PREFETCHWL2("[%[c_ptr], #384]")
331 "fmla v24.4s, %[b2].4s, %[a0].s[0]\n"
332 "fmla v25.4s, %[b2].4s, %[a0].s[1]\n"
333 ASM_PREFETCHWL2("[%[c_ptr], #448]")
334 "fmla v26.4s, %[b2].4s, %[a0].s[2]\n"
335 "fmla v27.4s, %[b2].4s, %[a0].s[3]\n"
336 ASM_PREFETCHWL2("[%[c_ptr], #512]")
337 "fmla v28.4s, %[b2].4s, %[a1].s[0]\n"
338 ASM_PREFETCHWL2("[%[c_ptr], #576]")
339 "fmla v29.4s, %[b2].4s, %[a1].s[1]\n"
340 ASM_PREFETCHWL2("[%[c_ptr], #640]")
341 "fmla v30.4s, %[b2].4s, %[a1].s[2]\n"
342 ASM_PREFETCHWL2("[%[c_ptr], #704]")
343 "fmla v31.4s, %[b2].4s, %[a1].s[3]\n"
344
345 // Common tail
346 "3:\n"
347 "str q8, [%[c_ptr]]\n"
348 "str q16, [%[c_ptr], #16]\n"
349 "str q24, [%[c_ptr], #32]\n"
350 "str q9, [%[c_ptr], #48]\n"
351 "str q17, [%[c_ptr], #64]\n"
352 "str q25, [%[c_ptr], #80]\n"
353 "str q10, [%[c_ptr], #96]\n"
354 "str q18, [%[c_ptr], #112]\n"
355 "str q26, [%[c_ptr], #128]\n"
356 "str q11, [%[c_ptr], #144]\n"
357 "str q19, [%[c_ptr], #160]\n"
358 "str q27, [%[c_ptr], #176]\n"
359 "str q12, [%[c_ptr], #192]\n"
360 "str q20, [%[c_ptr], #208]\n"
361 "str q28, [%[c_ptr], #224]\n"
362 "str q13, [%[c_ptr], #240]\n"
363 "str q21, [%[c_ptr], #256]\n"
364 "str q29, [%[c_ptr], #272]\n"
365 "str q14, [%[c_ptr], #288]\n"
366 "str q22, [%[c_ptr], #304]\n"
367 "str q30, [%[c_ptr], #320]\n"
368 "str q15, [%[c_ptr], #336]\n"
369 "str q23, [%[c_ptr], #352]\n"
370 "str q31, [%[c_ptr], #368]\n"
371 "add %[c_ptr], %[c_ptr], #384\n"
372 :
373 [a_ptr] "+r" (a_ptr), [b_ptr] "+r" (b_ptr), [c_ptr] "+r" (c_ptr),
374 [a0] "+w" (a0), [a1] "+w" (a1), [a0a] "+w" (a0a), [a1a] "+w" (a1a),
375 [b0] "+w" (b0), [b1] "+w" (b1), [b2] "+w" (b2), [k] "+r" (k)
376 : [oddk] "r" (oddk)
377 : "x20", "x21", "v8", "v9", "v10", "v11", "v12", "v13", "v14", "v15", "v16", "v17", "v18",
378 "v19", "v20", "v21", "v22", "v23", "v24", "v25", "v26", "v27", "v28", "v29", "v30", "v31", "cc"
379 );
380 }
381 }
382 }
383
384 } // namespace arm_gemm
385
386 #endif
387