1// Auto-generated file. Do not edit!
2//   Template: src/qs8-igemm/1x8-aarch32-neon-mlal-lane-cortex-a7.S.in
3//   Generator: tools/xngen
4//
5// Copyright 2021 Google LLC
6//
7// This source code is licensed under the BSD-style license found in the
8// LICENSE file in the root directory of this source tree.
9
10
11#include <xnnpack/assembly.h>
12
13.syntax unified
14
15// void xnn_qu8_igemm_minmax_rndnu_ukernel_1x8__aarch32_neon_mlal_lane_cortex_a7
16//     size_t mr,                                     (r0)
17//     size_t nc,                                      r1
18//     size_t kc,                                     (r2) -> sp + 56 -> r5
19//     size_t ks,                                     (r3) -> sp + 60 -> r14
20//     const uint8_t**restrict a,            sp + 88  -> r2
21//     const void*restrict w,              sp + 92  -> r9
22//     uint8_t*restrict c,                   sp + 96  -> r11
23//     size_t cm_stride,                   sp + 100  -> r6
24//     size_t cn_stride,                   sp + 104  -> r12
25//     size_t a_offset,                    sp + 108 -> (r5)
26//     const uint8_t* zero,                  sp + 112 -> r7
27//     xnn_qs8_conv_minmax_params*params); sp + 116 -> (r5)
28
29// d8-d15, r4-r11,r14(lr) need to be preserved if used. r13(sp),r15(pc) are reserved.
30
31// Based on cortex_a53 microkernel but with Neon loads
32
33// Register usage
34// A0   r3  d0-d1 q0
35
36// B    r9  d8-d9 q4 q5
37
38// C0  r11 d16-d17  q8  d18-d19  q9
39//         q2, q3 acc2
40
41// Unused r4, r8, r10, d15, q10-q15, q1-q3
42
43// params structure is 20 bytes
44//  struct {
45//    uint8_t kernel_zero_point[4];  d14
46//    int32_t right_pre_shift;       d12[0]
47//    int32_t multiplier;            d12[1]
48//    int32_t right_post_shift;      d13[0]
49//    int16_t output_zero_point;     d13[2]
50//    uint8_t output_min;            d13[6]
51//    uint8_t output_max;            d13[7]
52//  } rndnu_neon;
53
54BEGIN_FUNCTION xnn_qu8_igemm_minmax_rndnu_ukernel_1x8__aarch32_neon_mlal_lane_cortex_a7
55        # Push 88 bytes
56        # r2, r3 will be reloaded in outer loop.
57        PUSH    {r2, r3, r5, r6, r7, r9, r11, lr}     // +32
58        VPUSH   {d8-d14}                            // +56 = 88
59
60        LDR     r2,  [sp, 88]           // a
61        LDR     r9,  [sp, 92]           // w
62        LDR     r11, [sp, 96]           // c
63        LDR     r6,  [sp, 100]          // cm_stride
64        LDR     r12, [sp, 104]          // cn_stride
65        LDR     r7,  [sp, 112]          // zero
66        LDR     r5,  [sp, 116]          // params
67        MOV     r14, r3                 // p = ks
68
69        # Load params values
70        VLD1.32 {d14[]}, [r5]!          // QU8 kernel_zero_point
71        VLDM    r5, {d12-d13}           // RNDNU params
72
73
74        .p2align 3
750:
76        # Load initial bias from w into accumulators
77        VLDM    r9!, {d16-d19}          // Bias
78        VMOV.I32 q2, 0                  // second set of C for pipelining FMLA
79        VMOV.I32 q3, 0
80
81        .p2align 3
821:
83        # Load next A pointer
84        LDR     r3, [r2,  0]
85
86        # Add a_offset
87        LDR     r5, [sp, 108]           // a_offset
88        ADD     r2, r2, 4
89        CMP     r3,  r7                 // if a0 == zero
90        ADD     r3,  r3, r5             // a0 += a_offset
91        MOVEQ   r3,  r7                 //   a0 = zero, else += a0 + a_offset
92
93        LDR     r5, [sp, 56]            // kc
94        SUBS    r5, r5, 8               // kc - 8
95        BLO     5f                      // less than 8 channels?
96
97        // Prologue - load A0 and B0
98        VLD1.8  {d0},  [r3]!            // A0
99        SUBS    r5, r5, 8               // k = k - 8
100        VLD1.8  {d8},  [r9]!            // B0
101        BLO     3f                      // less than 8 channels?
102
103        // Main loop - 8 bytes
104        // 64 bytes for weights.
105
106        .p2align 3
1072:
108        // Extend
109        VMOVL.U8 q0, d0
110        VSUBL.U8 q4, d8, d14
111
112        // BLOCK 0
113        VLD1.8  {d10},  [r9]!           // B1
114        VMLAL.S16 q8, d8, d0[0]
115        VMLAL.S16 q9, d9, d0[0]
116        VSUBL.U8 q5, d10, d14
117
118        // BLOCK 1
119        VLD1.8  {d8},  [r9]!            // B2
120        VMLAL.S16 q2, d10, d0[1]
121        VMLAL.S16 q3, d11, d0[1]
122        VSUBL.U8 q4, d8, d14
123
124        // BLOCK 2
125        VLD1.8  {d10},  [r9]!           // B3
126        VMLAL.S16 q8, d8, d0[2]
127        VMLAL.S16 q9, d9, d0[2]
128        VSUBL.U8 q5, d10, d14
129
130        // BLOCK 3
131        VLD1.8  {d8},  [r9]!            // B4
132        VMLAL.S16 q2, d10, d0[3]
133        VMLAL.S16 q3, d11, d0[3]
134        VLD1.8  {d0},  [r3]!            // A0
135        VSUBL.U8 q4, d8, d14
136
137        // BLOCK 4
138        VLD1.8  {d10},  [r9]!           // B5
139        VMLAL.S16 q8, d8, d1[0]
140        VMLAL.S16 q9, d9, d1[0]
141        VSUBL.U8 q5, d10, d14
142
143        // BLOCK 5
144        VLD1.8  {d8},  [r9]!            // B6
145        VMLAL.S16 q2, d10, d1[1]
146        VMLAL.S16 q3, d11, d1[1]
147        VSUBL.U8 q4, d8, d14
148
149        // BLOCK 6
150        VLD1.8  {d10},  [r9]!           // B7
151        VMLAL.S16 q8, d8, d1[2]
152        VMLAL.S16 q9, d9, d1[2]
153        VSUBL.U8 q5, d10, d14
154        SUBS    r5, r5, 8
155
156        // BLOCK 7
157        VLD1.8  {d8},  [r9]!            // B0
158        VMLAL.S16 q2, d10, d1[3]
159        VMLAL.S16 q3, d11, d1[3]
160        BHS     2b
161
162        // Epilogue
163
164        .p2align 3
1653:
166        // Extend
167        VMOVL.U8 q0, d0
168        VSUBL.U8 q4, d8, d14
169
170        // BLOCK 0
171        VLD1.8  {d10},  [r9]!           // B1
172        VMLAL.S16 q8, d8, d0[0]
173        VMLAL.S16 q9, d9, d0[0]
174        VSUBL.U8 q5, d10, d14
175
176        // BLOCK 1
177        VLD1.8  {d8},  [r9]!            // B2
178        VMLAL.S16 q2, d10, d0[1]
179        VMLAL.S16 q3, d11, d0[1]
180        VSUBL.U8 q4, d8, d14
181
182        // BLOCK 2
183        VLD1.8  {d10},  [r9]!           // B3
184        VMLAL.S16 q8, d8, d0[2]
185        VMLAL.S16 q9, d9, d0[2]
186        VSUBL.U8 q5, d10, d14
187
188        // BLOCK 3
189        VLD1.8  {d8},  [r9]!            // B4
190        VMLAL.S16 q2, d10, d0[3]
191        VMLAL.S16 q3, d11, d0[3]
192        VSUBL.U8 q4, d8, d14
193
194        // BLOCK 4
195        VLD1.8  {d10},  [r9]!           // B5
196        VMLAL.S16 q8, d8, d1[0]
197        VMLAL.S16 q9, d9, d1[0]
198        VSUBL.U8 q5, d10, d14
199
200        // BLOCK 5
201        VLD1.8  {d8},  [r9]!            // B6
202        VMLAL.S16 q2, d10, d1[1]
203        VMLAL.S16 q3, d11, d1[1]
204        VSUBL.U8 q4, d8, d14
205
206        // BLOCK 6
207        VLD1.8  {d10},  [r9]!           // B7
208        VMLAL.S16 q8, d8, d1[2]
209        VMLAL.S16 q9, d9, d1[2]
210        VSUBL.U8 q5, d10, d14
211        ADDS    r5, r5, 8
212
213        VMLAL.S16 q2, d10, d1[3]
214        VMLAL.S16 q3, d11, d1[3]
215
216        # Is there a remainder?- 1-7 bytes of A
217        BNE     6f
218
2194:
220        # ks loop
221        SUBS    r14, r14, 4             // ks -= MR * sizeof(void*)
222        BHI     1b
223
224        LDR     r14, [sp, 60]           // p = ks
225
226        VADD.S32 q8, q8, q2
227        VADD.S32 q9, q9, q3
228
229        # RNDNU quantization
230        VDUP.32 q0, d12[0]              // right_pre_shift
231
232        VQSHL.S32 q8,  q8, q0
233        VQSHL.S32 q9,  q9, q0
234
235        VDUP.32 q2, d13[0]              // right_post_shift
236
237        VQDMULH.S32 q8,  q8, d12[1]     // multiplier
238        VQDMULH.S32 q9,  q9, d12[1]
239
240        VRSHL.S32 q8,  q8, q2
241        VRSHL.S32 q9,  q9, q2
242
243        VDUP.16 q0, d13[2]              // output_zero_point
244
245        VQMOVN.S32 d16, q8
246        VQMOVN.S32 d17, q9
247
248        VQADD.S16 q8,  q8, q0
249
250        VDUP.8  d24, d13[6]             // output_min
251
252        VQMOVUN.S16 d0,  q8
253
254        VDUP.8  d25, d13[7]             // output_max
255
256        VMAX.U8 d0, d0, d24
257
258        SUBS    r1, r1, 8
259
260        VMIN.U8 d0, d0, d25
261
262        # Store full 1 x 8
263        BLO     7f
264        VST1.8  {d0}, [r11], r12
265        SUB     r2, r2, r14             // a -= ks
266        BHI     0b
267
268        VPOP    {d8-d14}
269        ADD     sp, sp, 8               // skip r2, r3
270        POP     {r5, r6, r7, r9, r11, pc}
271
272        # Remainder- 1 to 7 bytes of A
273        .p2align 3
2745:
275        AND     r5, r5, 7               // kc remainder 1 to 7
2766:
277        VLD1.8  {d0},  [r3]
278        VLD1.8  {d8},  [r9]!
279
280        VMOVL.U8 q0, d0
281        VSUBL.U8 q4, d8, d14
282        VMLAL.S16 q8, d8, d0[0]
283        VMLAL.S16 q9, d9, d0[0]
284        CMP     r5, 2
285        BLO     4b
286
287        VLD1.8  {d8},  [r9]!
288        VSUBL.U8 q4, d8, d14
289        VMLAL.S16 q8, d8, d0[1]
290        VMLAL.S16 q9, d9, d0[1]
291        BEQ     4b
292
293        VLD1.8  {d8},  [r9]!
294        VSUBL.U8 q4, d8, d14
295        VMLAL.S16 q8, d8, d0[2]
296        VMLAL.S16 q9, d9, d0[2]
297        CMP     r5, 4
298        BLO     4b
299
300        VLD1.8  {d8},  [r9]!
301        VSUBL.U8 q4, d8, d14
302        VMLAL.S16 q8, d8, d0[3]
303        VMLAL.S16 q9, d9, d0[3]
304        BEQ     4b
305
306        VLD1.8  {d8},  [r9]!
307        VSUBL.U8 q4, d8, d14
308        VMLAL.S16 q8, d8, d1[0]
309        VMLAL.S16 q9, d9, d1[0]
310        CMP     r5, 6
311        BLO     4b
312
313        VLD1.8  {d8},  [r9]!
314        VSUBL.U8 q4, d8, d14
315        VMLAL.S16 q8, d8, d1[1]
316        VMLAL.S16 q9, d9, d1[1]
317        BEQ     4b
318
319        VLD1.8  {d8},  [r9]!
320        VSUBL.U8 q4, d8, d14
321        VMLAL.S16 q8, d8, d1[2]
322        VMLAL.S16 q9, d9, d1[2]
323        B       4b
324
325        # Store odd width
326        .p2align 3
3277:
328        TST     r1, 4
329        BEQ     8f
330        VST1.32 {d0[0]}, [r11]!
331        VEXT.8  q0, q0, q0, 4
3328:
333        TST     r1, 2
334        BEQ     9f
335        VST1.16 {d0[0]}, [r11]!
336        VEXT.8  q0, q0, q0, 2
337
3389:
339        TST     r1, 1
340        BEQ     10f
341        VST1.8  {d0[0]}, [r11]
342
34310:
344        VPOP    {d8-d14}
345        ADD     sp, sp, 8               // skip r2, r3
346        POP     {r5, r6, r7, r9, r11, pc}
347
348END_FUNCTION xnn_qu8_igemm_minmax_rndnu_ukernel_1x8__aarch32_neon_mlal_lane_cortex_a7
349
350#ifdef __ELF__
351.section ".note.GNU-stack","",%progbits
352#endif
353