xref: /aosp_15_r20/external/XNNPACK/src/qs8-gemm/c4-neon-mull-dup.c.in (revision 4bdc94577ba0e567308109d787f7fec7b531ce36)
1// Copyright 2021 Google LLC
2//
3// This source code is licensed under the BSD-style license found in the
4// LICENSE file in the root directory of this source tree.
5
6$ABC = "0123456789ABCDEFGHIJKLMNOPQRSTUVWXYZ"
7$assert NR % 8 == 0
8$assert 8 <= NR <= 16
9$assert REQUANTIZATION in ["FP32", "RNDNU"]
10$assert not CHANNELWISE or REQUANTIZATION == "FP32"
11$assert DUP in ["DUP", "LD1R", "LD2R", "LD4R"]
12#include <assert.h>
13
14#include <arm_neon.h>
15
16#include <xnnpack/gemm.h>
17$if REQUANTIZATION == "FP32" and ARMV8:
18  #include <xnnpack/intrinsics-polyfill.h>
19#include <xnnpack/math.h>
20
21
22$DATATYPE = "qc8" if CHANNELWISE else "qs8"
23$PARAMS_STRUCT = REQUANTIZATION.lower() + "_" + ("neonv8" if REQUANTIZATION == "FP32" and ARMV8 else "neon")
24$PARAMS_UNION = "xnn_%s_conv_minmax_params" % DATATYPE.lower()
25$ISA = "neonv8" if ARMV8 else "neon"
26void xnn_${DATATYPE}_gemm_minmax_${REQUANTIZATION.lower()}_ukernel_${MR}x${NR}c4__${ISA}_${"mlal" if MLA else "mull"}_${DUP.lower()}(
27    size_t mr,
28    size_t nc,
29    size_t kc,
30    const int8_t* restrict a,
31    size_t a_stride,
32    const void* restrict w,
33    int8_t* restrict c,
34    size_t cm_stride,
35    size_t cn_stride,
36    const union ${PARAMS_UNION} params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
37{
38  assert(mr != 0);
39  assert(mr <= ${MR});
40  assert(nc != 0);
41  assert(kc != 0);
42  assert(kc % sizeof(int8_t) == 0);
43  assert(a != NULL);
44  assert(w != NULL);
45  assert(c != NULL);
46
47  kc = round_up_po2(kc, 4 * sizeof(int8_t));
48  const int8_t* a0 = a;
49  int8_t* c0 = c;
50  $for M in range(1, MR):
51    const int8_t* a${M} = (const int8_t*) ((uintptr_t) a${M-1} + a_stride);
52    int8_t* c${M} = (int8_t*) ((uintptr_t) c${M-1} + cm_stride);
53    $if M % 2 == 0:
54      if XNN_UNPREDICTABLE(mr <= ${M}) {
55        a${M} = a${M-1};
56        c${M} = c${M-1};
57      }
58    $elif M + 1 == MR:
59      if XNN_UNPREDICTABLE(mr != ${M+1}) {
60        a${M} = a${M-1};
61        c${M} = c${M-1};
62      }
63    $else:
64      if XNN_UNPREDICTABLE(mr < ${M+1}) {
65        a${M} = a${M-1};
66        c${M} = c${M-1};
67      }
68
69  do {
70    $for N in range(0, NR, 2):
71      int32x4_t vacc0x${ABC[N:N+2]} = vreinterpretq_s32_u64(vmovl_u32(vld1_u32(w))); w = (const void*) ((uintptr_t) w + 2 * sizeof(int32_t));
72    $for M in range(1, MR):
73      $for N in range(0, NR, 2):
74        int32x4_t vacc${M}x${ABC[N:N+2]} = vacc0x${ABC[N:N+2]};
75
76    size_t k = kc;
77
78    $if MLA:
79      while (k >= 16 * sizeof(int8_t)) {
80        $for M in range(MR):
81          $if DUP == "LD2R":
82            const int32x2x2_t va${M}x0 = vld2_dup_s32((const void*)a${M}); a${M} += 8;
83            const int32x2x2_t va${M}x1 = vld2_dup_s32((const void*)a${M}); a${M} += 8;
84          $elif DUP == "LD1R":
85            const int32x2_t va${M}0x0 = vld1_dup_s32((const void*)a${M});
86            const int32x2_t va${M}1x0 = vld1_dup_s32((const void*)(a${M} + 4)); a${M} += 8;
87            const int32x2_t va${M}0x1 = vld1_dup_s32((const void*)a${M});
88            const int32x2_t va${M}1x1 = vld1_dup_s32((const void*)(a${M} + 4)); a${M} += 8;
89          $else:
90            const int8x8_t va${M}x0 = vld1_s8(a${M}); a${M} += 8;
91            const int8x8_t va${M}x1 = vld1_s8(a${M}); a${M} += 8;
92
93        $for K in range(2):
94          $for N in range(0, NR, 2):
95            const int8x8_t vb${ABC[N:N+2]}c${K}x0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
96
97        $for K in range(2):
98          $for M in range(MR):
99            $if DUP == "LD2R":
100              const int8x8_t va${M}c${K}x0 = vreinterpret_s8_s32(va${M}x0.val[${K}]);
101              const int8x8_t va${M}c${K}x1 = vreinterpret_s8_s32(va${M}x1.val[${K}]);
102            $elif DUP == "LD1R":
103              const int8x8_t va${M}c${K}x0 = vreinterpret_s8_s32(va${M}${K}x0);
104              const int8x8_t va${M}c${K}x1 = vreinterpret_s8_s32(va${M}${K}x1);
105            $else:
106              const int8x8_t va${M}c${K}x0 = vreinterpret_s8_s32(vdup_lane_s32(vreinterpret_s32_s8(va${M}x0), ${K}));
107              const int8x8_t va${M}c${K}x1 = vreinterpret_s8_s32(vdup_lane_s32(vreinterpret_s32_s8(va${M}x1), ${K}));
108
109          $for N in range(0, NR, 2):
110            $for M in range(MR):
111              int16x8_t vprod${M}x${ABC[N:N+2]}c${K} = vmull_s8(vb${ABC[N:N+2]}c${K}x0, va${M}c${K}x0);
112            const int8x8_t vb${ABC[N:N+2]}c${K}x1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
113            $for M in range(MR):
114              vprod${M}x${ABC[N:N+2]}c${K} = vmlal_s8(vprod${M}x${ABC[N:N+2]}c${K}, vb${ABC[N:N+2]}c${K}x1, va${M}c${K}x1);
115            $for M in range(MR):
116              vacc${M}x${ABC[N:N+2]} = vpadalq_s16(vacc${M}x${ABC[N:N+2]}, vprod${M}x${ABC[N:N+2]}c${K});
117
118        k -= 16 * sizeof(int8_t);
119      }
120
121    ${"if" if MLA else "while"} (k >= 8 * sizeof(int8_t)) {
122      $for M in range(MR):
123        $if DUP == "LD2R":
124          const int32x2x2_t va${M} = vld2_dup_s32((const void*)a${M}); a${M} += 8;
125        $elif DUP == "LD1R":
126          const int32x2_t va${M}0 = vld1_dup_s32((const void*)a${M});
127          const int32x2_t va${M}1 = vld1_dup_s32((const void*)(a${M} + 4)); a${M} += 8;
128        $else:
129          const int8x8_t va${M} = vld1_s8(a${M}); a${M} += 8;
130
131      $for K in range(2):
132        $for N in range(0, NR, 2):
133          const int8x8_t vb${ABC[N:N+2]}c${K} = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
134
135      $for K in range(2):
136        $for M in range(MR):
137          $if DUP == "LD2R":
138            const int8x8_t va${M}c${K} = vreinterpret_s8_s32(va${M}.val[${K}]);
139          $elif DUP == "LD1R":
140            const int8x8_t va${M}c${K} = vreinterpret_s8_s32(va${M}${K});
141          $else:
142            const int8x8_t va${M}c${K} = vreinterpret_s8_s32(vdup_lane_s32(vreinterpret_s32_s8(va${M}), ${K}));
143
144        $for N in range(0, NR, 2):
145          $for M in range(MR):
146            const int16x8_t vprod${M}x${ABC[N:N+2]}c${K} = vmull_s8(vb${ABC[N:N+2]}c${K}, va${M}c${K});
147          $for M in range(MR):
148            vacc${M}x${ABC[N:N+2]} = vpadalq_s16(vacc${M}x${ABC[N:N+2]}, vprod${M}x${ABC[N:N+2]}c${K});
149
150      k -= 8 * sizeof(int8_t);
151    }
152
153    if XNN_UNLIKELY(k != 0) {
154      $for M in range(MR):
155        const int8x8_t va${M} = vld1_s8(a${M}); a${M} = (const int8_t*) ((uintptr_t) a${M} + k);
156
157      $for N in range(0, NR, 2):
158        const int8x8_t vb${ABC[N:N+2]}c0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
159
160      $for M in range(MR):
161        const int8x8_t va${M}c0 = vreinterpret_s8_s32(vdup_lane_s32(vreinterpret_s32_s8(va${M}), 0));
162        $for N in range(0, NR, 2):
163          const int16x8_t vprod${M}x${ABC[N:N+2]}c0 = vmull_s8(vb${ABC[N:N+2]}c0, va${M}c0);
164          vacc${M}x${ABC[N:N+2]} = vpadalq_s16(vacc${M}x${ABC[N:N+2]}, vprod${M}x${ABC[N:N+2]}c0);
165    }
166
167#if XNN_ARCH_ARM64
168    $for M in range(MR):
169      $for N in range(0, NR, 4):
170        int32x4_t vacc${M}x${ABC[N:N+4]} = vpaddq_s32(vacc${M}x${ABC[N:N+2]}, vacc${M}x${ABC[N+2:N+4]});
171#else
172    $for M in range(MR):
173      $for N in range(0, NR, 4):
174        const int32x2_t vsum${M}x${ABC[N:N+2]} = vpadd_s32(vget_low_s32(vacc${M}x${ABC[N:N+2]}), vget_high_s32(vacc${M}x${ABC[N:N+2]}));
175        const int32x2_t vsum${M}x${ABC[N+2:N+4]} = vpadd_s32(vget_low_s32(vacc${M}x${ABC[N+2:N+4]}), vget_high_s32(vacc${M}x${ABC[N+2:N+4]}));
176        int32x4_t vacc${M}x${ABC[N:N+4]} = vcombine_s32(vsum${M}x${ABC[N:N+2]}, vsum${M}x${ABC[N+2:N+4]});
177#endif
178
179    $if REQUANTIZATION == "RNDNU":
180      const int32x4_t vright_pre_shift = vld1q_dup_s32(&params->${PARAMS_STRUCT}.right_pre_shift);
181      const int32x4_t vmultiplier = vld1q_dup_s32(&params->${PARAMS_STRUCT}.multiplier);
182      const int32x4_t vright_post_shift = vld1q_dup_s32(&params->${PARAMS_STRUCT}.right_post_shift);
183
184      $for M in range(MR):
185        $for N in range(0, NR, 4):
186          vacc${M}x${ABC[N:N+4]} = vqshlq_s32(vacc${M}x${ABC[N:N+4]}, vright_pre_shift);
187
188      $for M in range(MR):
189        $for N in range(0, NR, 4):
190          vacc${M}x${ABC[N:N+4]} = vqdmulhq_s32(vacc${M}x${ABC[N:N+4]}, vmultiplier);
191
192      $for M in range(MR):
193        $for N in range(0, NR, 4):
194          vacc${M}x${ABC[N:N+4]} = vrshlq_s32(vacc${M}x${ABC[N:N+4]}, vright_post_shift);
195    $elif REQUANTIZATION == "FP32":
196      $for M in range(MR):
197        $for N in range(0, NR, 4):
198          float32x4_t vfpacc${M}x${ABC[N:N+4]} = vcvtq_f32_s32(vacc${M}x${ABC[N:N+4]});
199
200      $if CHANNELWISE:
201        $for N in range(0, NR, 4):
202          const float32x4_t vscale${ABC[N:N+4]} = vld1q_f32((const float*) w); w = (const void*) ((const float*) w + 4);
203          $for M in range(MR):
204            vfpacc${M}x${ABC[N:N+4]} = vmulq_f32(vfpacc${M}x${ABC[N:N+4]}, vscale${ABC[N:N+4]});
205      $else:
206        const float32x4_t vscale = vld1q_dup_f32(&params->${PARAMS_STRUCT}.scale);
207        $for M in range(MR):
208          $for N in range(0, NR, 4):
209            vfpacc${M}x${ABC[N:N+4]} = vmulq_f32(vfpacc${M}x${ABC[N:N+4]}, vscale);
210
211      $if ARMV8:
212        $for M in range(MR):
213          $for N in range(0, NR, 4):
214            vacc${M}x${ABC[N:N+4]} = vcvtnq_s32_f32(vfpacc${M}x${ABC[N:N+4]});
215      $else:
216        const float32x4_t vmagic_bias = vld1q_dup_f32(&params->${PARAMS_STRUCT}.magic_bias);
217        $for M in range(MR):
218          $for N in range(0, NR, 4):
219            vacc${M}x${ABC[N:N+4]} = vreinterpretq_s32_f32(vaddq_f32(vfpacc${M}x${ABC[N:N+4]}, vmagic_bias));
220
221        const int32x4_t vmagic_bias_less_output_zero_point = vld1q_dup_s32(&params->${PARAMS_STRUCT}.magic_bias_less_output_zero_point);
222        $for M in range(MR):
223          $for N in range(0, NR, 4):
224            vacc${M}x${ABC[N:N+4]} = vqsubq_s32(vacc${M}x${ABC[N:N+4]}, vmagic_bias_less_output_zero_point);
225
226    $if REQUANTIZATION != "FP32" or ARMV8:
227      const int16x8_t voutput_zero_point = vld1q_dup_s16(&params->${PARAMS_STRUCT}.output_zero_point);
228#if XNN_ARCH_ARM64
229    $for M in range(MR):
230      $for N in range(0, NR, 8):
231        int16x8_t vacc${M}x${ABC[N:N+8]} = vqmovn_high_s32(vqmovn_s32(vacc${M}x${ABC[N:N+4]}), vacc${M}x${ABC[N+4:N+8]});
232
233    $if REQUANTIZATION != "FP32" or ARMV8:
234      $for M in range(MR):
235        $for N in range(0, NR, 8):
236          vacc${M}x${ABC[N:N+8]} = vqaddq_s16(vacc${M}x${ABC[N:N+8]}, voutput_zero_point);
237
238    $for M in range(MR):
239      $for N in range(0, NR, 16):
240        $if N + 8 < NR:
241          int8x16_t vout${M}x${ABC[N:N+16]} = vqmovn_high_s16(vqmovn_s16(vacc${M}x${ABC[N:N+8]}), vacc${M}x${ABC[N+8:N+16]});
242        $elif M % 2 == 1:
243          int8x16_t vout${M-1}x${ABC[N:N+8]}_${M}x${ABC[N:N+8]} = vqmovn_high_s16(vqmovn_s16(vacc${M-1}x${ABC[N:N+8]}), vacc${M}x${ABC[N:N+8]});
244        $elif M + 1 == MR:
245          int8x8_t vout${M}x${ABC[N:N+8]} = vqmovn_s16(vacc${M}x${ABC[N:N+8]});
246#else
247    $for M in range(MR):
248      $for N in range(0, NR, 8):
249        int16x8_t vacc${M}x${ABC[N:N+8]} = vcombine_s16(vqmovn_s32(vacc${M}x${ABC[N:N+4]}), vqmovn_s32(vacc${M}x${ABC[N+4:N+8]}));
250
251    $if REQUANTIZATION != "FP32" or ARMV8:
252      $for M in range(MR):
253        $for N in range(0, NR, 8):
254          vacc${M}x${ABC[N:N+8]} = vqaddq_s16(vacc${M}x${ABC[N:N+8]}, voutput_zero_point);
255
256    $for M in range(MR):
257      $for N in range(0, NR, 16):
258        $if N + 8 < NR:
259          int8x16_t vout${M}x${ABC[N:N+16]} = vcombine_s8(vqmovn_s16(vacc${M}x${ABC[N:N+8]}), vqmovn_s16(vacc${M}x${ABC[N+8:N+16]}));
260        $elif M % 2 == 1:
261          int8x16_t vout${M-1}x${ABC[N:N+8]}_${M}x${ABC[N:N+8]} = vcombine_s8(vqmovn_s16(vacc${M-1}x${ABC[N:N+8]}), vqmovn_s16(vacc${M}x${ABC[N:N+8]}));
262        $elif M + 1 == MR:
263          int8x8_t vout${M}x${ABC[N:N+8]} = vqmovn_s16(vacc${M}x${ABC[N:N+8]});
264#endif
265
266    $if NR == 8 and MR == 1:
267      const int8x8_t voutput_min = vld1_dup_s8(&params->${PARAMS_STRUCT}.output_min);
268    $else:
269      const int8x16_t voutput_min = vld1q_dup_s8(&params->${PARAMS_STRUCT}.output_min);
270    $for M in range(MR):
271      $for N in range(0, NR, 16):
272        $if N + 8 < NR:
273          vout${M}x${ABC[N:N+16]} = vmaxq_s8(vout${M}x${ABC[N:N+16]}, voutput_min);
274        $elif M % 2 == 1:
275          vout${M-1}x${ABC[N:N+8]}_${M}x${ABC[N:N+8]} = vmaxq_s8(vout${M-1}x${ABC[N:N+8]}_${M}x${ABC[N:N+8]}, voutput_min);
276        $elif M + 1 == MR:
277          $if NR == 8 and MR == 1:
278            vout${M}x${ABC[N:N+8]} = vmax_s8(vout${M}x${ABC[N:N+8]}, voutput_min);
279          $else:
280            vout${M}x${ABC[N:N+8]} = vmax_s8(vout${M}x${ABC[N:N+8]}, vget_low_s8(voutput_min));
281
282    $if NR == 8 and MR == 1:
283      const int8x8_t voutput_max = vld1_dup_s8(&params->${PARAMS_STRUCT}.output_max);
284    $else:
285      const int8x16_t voutput_max = vld1q_dup_s8(&params->${PARAMS_STRUCT}.output_max);
286    $for M in range(MR):
287      $for N in range(0, NR, 16):
288        $if N + 8 < NR:
289          vout${M}x${ABC[N:N+16]} = vminq_s8(vout${M}x${ABC[N:N+16]}, voutput_max);
290        $elif M % 2 == 1:
291          vout${M-1}x${ABC[N:N+8]}_${M}x${ABC[N:N+8]} = vminq_s8(vout${M-1}x${ABC[N:N+8]}_${M}x${ABC[N:N+8]}, voutput_max);
292        $elif M + 1 == MR:
293          $if NR == 8 and MR == 1:
294            vout${M}x${ABC[N:N+8]} = vmin_s8(vout${M}x${ABC[N:N+8]}, voutput_max);
295          $else:
296            vout${M}x${ABC[N:N+8]} = vmin_s8(vout${M}x${ABC[N:N+8]}, vget_low_s8(voutput_max));
297
298    if (nc >= ${NR}) {
299      $for M in range(MR):
300        $for N in range(0, NR, 16):
301          $if N + 8 < NR:
302            vst1q_s8(c${M} + ${N}, vout${M}x${ABC[N:N+16]});
303          $elif M % 2 == 1:
304            vst1_s8(c${M-1} + ${N}, vget_low_s8(vout${M-1}x${ABC[N:N+8]}_${M}x${ABC[N:N+8]}));
305            vst1_s8(c${M} + ${N}, vget_high_s8(vout${M-1}x${ABC[N:N+8]}_${M}x${ABC[N:N+8]}));
306          $elif M + 1 == MR:
307            vst1_s8(c${M} + ${N}, vout${M}x${ABC[N:N+8]});
308
309      $for M in range(MR):
310        c${M} = (int8_t*) ((uintptr_t) c${M} + cn_stride);
311
312      $for M in range(MR):
313        a${M} = (const int8_t*) ((uintptr_t) a${M} - kc);
314
315      nc -= ${NR};
316    } else {
317      // Final case where not all of the ${NR} columns fit in the destination.
318      $if NR == 16:
319        $for M in range(MR):
320          $if M % 2 == 1:
321            int8x16_t vout${M-1}x01234567_${M}x01234567 = vcombine_s8(vget_low_s8(vout${M-1}x0123456789ABCDEF), vget_low_s8(vout${M}x0123456789ABCDEF));
322          $elif M + 1 == MR:
323            int8x8_t vout${M}x01234567 = vget_low_s8(vout${M}x0123456789ABCDEF);
324        if (nc & 8) {
325          $for M in range(MR):
326            $if M % 2 == 1:
327              vst1_s8(c${M-1}, vget_low_s8(vout${M-1}x01234567_${M}x01234567)); c${M-1} += 8;
328              vst1_s8(c${M}, vget_high_s8(vout${M-1}x01234567_${M}x01234567)); c${M} += 8;
329            $elif M + 1 == MR:
330              vst1_s8(c${M}, vout${M}x01234567); c${M} += 8;
331          $for M in range(MR):
332            $if M % 2 == 1:
333              vout${M-1}x01234567_${M}x01234567 = vcombine_s8(vget_high_s8(vout${M-1}x0123456789ABCDEF), vget_high_s8(vout${M}x0123456789ABCDEF));
334            $elif M + 1 == MR:
335              vout${M}x01234567 = vget_high_s8(vout${M}x0123456789ABCDEF);
336        }
337      if (nc & 4) {
338        $for M in range(MR):
339          $if M % 2 == 1:
340            vst1q_lane_u32((void*) c${M-1}, vreinterpretq_u32_s8(vout${M-1}x01234567_${M}x01234567), 0); c${M-1} += 4;
341            vst1q_lane_u32((void*) c${M}, vreinterpretq_u32_s8(vout${M-1}x01234567_${M}x01234567), 2); c${M} += 4;
342          $elif M + 1 == MR:
343            vst1_lane_u32((void*) c${M}, vreinterpret_u32_s8(vout${M}x01234567), 0); c${M} += 4;
344        $for M in range(MR):
345          $if M % 2 == 1:
346            vout${M-1}x01234567_${M}x01234567 = vextq_s8(vout${M-1}x01234567_${M}x01234567, vout${M-1}x01234567_${M}x01234567, 4);
347          $elif M + 1 == MR:
348            vout${M}x01234567 = vext_s8(vout${M}x01234567, vout${M}x01234567, 4);
349      }
350      if (nc & 2) {
351        $for M in range(MR):
352          $if M % 2 == 1:
353            vst1q_lane_u16((void*) c${M-1}, vreinterpretq_u16_s8(vout${M-1}x01234567_${M}x01234567), 0); c${M-1} += 2;
354            vst1q_lane_u16((void*) c${M}, vreinterpretq_u16_s8(vout${M-1}x01234567_${M}x01234567), 4); c${M} += 2;
355          $elif M + 1 == MR:
356            vst1_lane_u16((void*) c${M}, vreinterpret_u16_s8(vout${M}x01234567), 0); c${M} += 2;
357        $for M in range(MR):
358          $if M % 2 == 1:
359            vout${M-1}x01234567_${M}x01234567 = vextq_s8(vout${M-1}x01234567_${M}x01234567, vout${M-1}x01234567_${M}x01234567, 2);
360          $elif M + 1 == MR:
361            vout${M}x01234567 = vext_s8(vout${M}x01234567, vout${M}x01234567, 2);
362      }
363      if (nc & 1) {
364        $for M in range(MR):
365          $if M % 2 == 1:
366            vst1q_lane_s8(c${M-1}, vout${M-1}x01234567_${M}x01234567, 0);
367            vst1q_lane_s8(c${M}, vout${M-1}x01234567_${M}x01234567, 8);
368          $elif M + 1 == MR:
369            vst1_lane_s8(c${M}, vout${M}x01234567, 0);
370      }
371
372      nc = 0;
373    }
374  } while (nc != 0);
375}
376