xref: /aosp_15_r20/external/XNNPACK/src/qs8-igemm/c4-neon-mull-shuffle.c.in (revision 4bdc94577ba0e567308109d787f7fec7b531ce36)
1// Copyright 2021 Google LLC
2//
3// This source code is licensed under the BSD-style license found in the
4// LICENSE file in the root directory of this source tree.
5
6$ABC = "0123456789ABCDEFGHIJKLMNOPQRSTUVWXYZ"
7$assert NR % 8 == 0
8$assert 8 <= NR <= 16
9$assert REQUANTIZATION in ["FP32", "RNDNU"]
10$assert not CHANNELWISE or REQUANTIZATION == "FP32"
11#include <assert.h>
12
13#include <arm_neon.h>
14
15#include <xnnpack/gemm.h>
16$if REQUANTIZATION == "FP32" and ARMV8:
17  #include <xnnpack/intrinsics-polyfill.h>
18#include <xnnpack/math.h>
19
20
21$DATATYPE = "qc8" if CHANNELWISE else "qs8"
22$PARAMS_STRUCT = REQUANTIZATION.lower() + "_" + ("neonv8" if REQUANTIZATION == "FP32" and ARMV8 else "neon")
23$PARAMS_UNION = "xnn_%s_conv_minmax_params" % DATATYPE.lower()
24$ISA = "neonv8" if ARMV8 else "neon"
25void xnn_${DATATYPE}_igemm_minmax_${REQUANTIZATION.lower()}_ukernel_${MR}x${NR}c4s2__${ISA}_${"mlal" if MLA else "mull"}(
26    size_t mr,
27    size_t nc,
28    size_t kc,
29    size_t ks,
30    const int8_t** restrict a,
31    const void* restrict w,
32    int8_t* restrict c,
33    size_t cm_stride,
34    size_t cn_stride,
35    size_t a_offset,
36    const int8_t* zero,
37    const union ${PARAMS_UNION} params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
38{
39  assert(mr != 0);
40  assert(mr <= ${MR});
41  assert(nc != 0);
42  assert(kc != 0);
43  assert(ks != 0);
44  assert(ks % (${MR} * sizeof(void*)) == 0);
45  assert(a_offset % sizeof(int8_t) == 0);
46  assert(a != NULL);
47  assert(w != NULL);
48  assert(c != NULL);
49
50  int8_t* c0 = c;
51  $for M in range(1, MR):
52    int8_t* c${M} = (int8_t*) ((uintptr_t) c${M-1} + cm_stride);
53    $if M % 2 == 0:
54      if XNN_UNPREDICTABLE(mr <= ${M}) {
55        c${M} = c${M-1};
56      }
57    $elif M + 1 == MR:
58      if XNN_UNPREDICTABLE(mr != ${M+1}) {
59        c${M} = c${M-1};
60      }
61    $else:
62      if XNN_UNPREDICTABLE(mr < ${M+1}) {
63        c${M} = c${M-1};
64      }
65
66  kc = round_up_po2(kc, 8 * sizeof(int8_t));
67  do {
68    $for N in range(0, NR, 2):
69      int32x4_t vacc0x${ABC[N:N+2]} = vreinterpretq_s32_u64(vmovl_u32(vld1_u32(w))); w = (const int32_t*) w + 2;
70    $for M in range(1, MR):
71      $for N in range(0, NR, 2):
72        int32x4_t vacc${M}x${ABC[N:N+2]} = vacc0x${ABC[N:N+2]};
73
74    size_t p = ks;
75    do {
76      $for M in range(MR):
77        const int8_t* restrict a${M} = a[${M}];
78        if XNN_UNPREDICTABLE(a${M} != zero) {
79          a${M} = (const int8_t*) ((uintptr_t) a${M} + a_offset);
80        }
81      a += ${MR};
82
83      size_t k = kc;
84      $if MLA:
85        while (k >= 16 * sizeof(int8_t)) {
86          $for M in range(MR):
87            int8x8_t va${M}x0 = vld1_s8(a${M}); a${M} += 8;
88            int8x8_t va${M}x1 = vld1_s8(a${M}); a${M} += 8;
89
90          $for K in range(2):
91            $for N in range(0, NR, 2):
92              const int8x8_t vb${ABC[N:N+2]}c${K}x0 = vld1_s8(w); w = (const int8_t*) w + 8;
93
94          $for K in range(2):
95            $for N in range(0, NR, 2):
96              $for M in range(MR):
97                int16x8_t vprod${M}x${ABC[N:N+2]}c${K} = vmull_s8(vb${ABC[N:N+2]}c${K}x0, va${M}x0);
98              const int8x8_t vb${ABC[N:N+2]}c${K}x1 = vld1_s8(w); w = (const int8_t*) w + 8;
99              $for M in range(MR):
100                vprod${M}x${ABC[N:N+2]}c${K} = vmlal_s8(vprod${M}x${ABC[N:N+2]}c${K}, vb${ABC[N:N+2]}c${K}x1, va${M}x1);
101              $for M in range(MR):
102                vacc${M}x${ABC[N:N+2]} = vpadalq_s16(vacc${M}x${ABC[N:N+2]}, vprod${M}x${ABC[N:N+2]}c${K});
103            $if K + 1 != 2:
104              $for M in range(MR):
105                va${M}x0 = vext_s8(va${M}x0, va${M}x0, 4);
106                va${M}x1 = vext_s8(va${M}x1, va${M}x1, 4);
107
108          k -= 16 * sizeof(int8_t);
109        }
110      ${"if (k != 0)" if MLA else "do"} {
111        $for M in range(MR):
112          int8x8_t va${M}x0 = vld1_s8(a${M}); a${M} += 8;
113
114        $for K in range(2):
115          $for N in range(0, NR, 2):
116            const int8x8_t vb${ABC[N:N+2]}c${K}x0 = vld1_s8(w); w = (const int8_t*) w + 8;
117
118        $for K in range(2):
119          $for N in range(0, NR, 2):
120            $for M in range(MR):
121              int16x8_t vprod${M}x${ABC[N:N+2]}c${K} = vmull_s8(vb${ABC[N:N+2]}c${K}x0, va${M}x0);
122            $for M in range(MR):
123              vacc${M}x${ABC[N:N+2]} = vpadalq_s16(vacc${M}x${ABC[N:N+2]}, vprod${M}x${ABC[N:N+2]}c${K});
124          $if K + 1 != 2:
125            $for M in range(MR):
126              va${M}x0 = vext_s8(va${M}x0, va${M}x0, 4);
127
128        $if not MLA:
129          k -= 8 * sizeof(int8_t);
130      }${"" if MLA else " while (k != 0);"}
131
132      p -= ${MR} * sizeof(void*);
133    } while (p != 0);
134
135#if XNN_ARCH_ARM64
136    $for M in range(MR):
137      $for N in range(0, NR, 4):
138        int32x4_t vacc${M}x${ABC[N:N+4]} = vpaddq_s32(vacc${M}x${ABC[N:N+2]}, vacc${M}x${ABC[N+2:N+4]});
139#else
140    $for M in range(MR):
141      $for N in range(0, NR, 4):
142        const int32x2_t vsum${M}x${ABC[N:N+2]} = vpadd_s32(vget_low_s32(vacc${M}x${ABC[N:N+2]}), vget_high_s32(vacc${M}x${ABC[N:N+2]}));
143        const int32x2_t vsum${M}x${ABC[N+2:N+4]} = vpadd_s32(vget_low_s32(vacc${M}x${ABC[N+2:N+4]}), vget_high_s32(vacc${M}x${ABC[N+2:N+4]}));
144        int32x4_t vacc${M}x${ABC[N:N+4]} = vcombine_s32(vsum${M}x${ABC[N:N+2]}, vsum${M}x${ABC[N+2:N+4]});
145#endif
146
147    $if REQUANTIZATION == "RNDNU":
148      const int32x4_t vright_pre_shift = vld1q_dup_s32(&params->${PARAMS_STRUCT}.right_pre_shift);
149      const int32x4_t vmultiplier = vld1q_dup_s32(&params->${PARAMS_STRUCT}.multiplier);
150      const int32x4_t vright_post_shift = vld1q_dup_s32(&params->${PARAMS_STRUCT}.right_post_shift);
151
152      $for M in range(MR):
153        $for N in range(0, NR, 4):
154          vacc${M}x${ABC[N:N+4]} = vqshlq_s32(vacc${M}x${ABC[N:N+4]}, vright_pre_shift);
155
156      $for M in range(MR):
157        $for N in range(0, NR, 4):
158          vacc${M}x${ABC[N:N+4]} = vqdmulhq_s32(vacc${M}x${ABC[N:N+4]}, vmultiplier);
159
160      $for M in range(MR):
161        $for N in range(0, NR, 4):
162          vacc${M}x${ABC[N:N+4]} = vrshlq_s32(vacc${M}x${ABC[N:N+4]}, vright_post_shift);
163    $elif REQUANTIZATION == "FP32":
164      $for M in range(MR):
165        $for N in range(0, NR, 4):
166          float32x4_t vfpacc${M}x${ABC[N:N+4]} = vcvtq_f32_s32(vacc${M}x${ABC[N:N+4]});
167
168      $if CHANNELWISE:
169        $for N in range(0, NR, 4):
170          const float32x4_t vscale${ABC[N:N+4]} = vld1q_f32(w); w = (const float*) w + 4;
171          $for M in range(MR):
172            vfpacc${M}x${ABC[N:N+4]} = vmulq_f32(vfpacc${M}x${ABC[N:N+4]}, vscale${ABC[N:N+4]});
173      $else:
174        const float32x4_t vscale = vld1q_dup_f32(&params->${PARAMS_STRUCT}.scale);
175        $for M in range(MR):
176          $for N in range(0, NR, 4):
177            vfpacc${M}x${ABC[N:N+4]} = vmulq_f32(vfpacc${M}x${ABC[N:N+4]}, vscale);
178
179      $if ARMV8:
180        $for M in range(MR):
181          $for N in range(0, NR, 4):
182            vacc${M}x${ABC[N:N+4]} = vcvtnq_s32_f32(vfpacc${M}x${ABC[N:N+4]});
183      $else:
184        const float32x4_t vmagic_bias = vld1q_dup_f32(&params->${PARAMS_STRUCT}.magic_bias);
185        $for M in range(MR):
186          $for N in range(0, NR, 4):
187            vacc${M}x${ABC[N:N+4]} = vreinterpretq_s32_f32(vaddq_f32(vfpacc${M}x${ABC[N:N+4]}, vmagic_bias));
188
189        const int32x4_t vmagic_bias_less_output_zero_point = vld1q_dup_s32(&params->${PARAMS_STRUCT}.magic_bias_less_output_zero_point);
190        $for M in range(MR):
191          $for N in range(0, NR, 4):
192            vacc${M}x${ABC[N:N+4]} = vqsubq_s32(vacc${M}x${ABC[N:N+4]}, vmagic_bias_less_output_zero_point);
193
194    $if REQUANTIZATION != "FP32" or ARMV8:
195      const int16x8_t voutput_zero_point = vld1q_dup_s16(&params->${PARAMS_STRUCT}.output_zero_point);
196#if XNN_ARCH_ARM64
197    $for M in range(MR):
198      $for N in range(0, NR, 8):
199        int16x8_t vacc${M}x${ABC[N:N+8]} = vqmovn_high_s32(vqmovn_s32(vacc${M}x${ABC[N:N+4]}), vacc${M}x${ABC[N+4:N+8]});
200
201    $if REQUANTIZATION != "FP32" or ARMV8:
202      $for M in range(MR):
203        $for N in range(0, NR, 8):
204          vacc${M}x${ABC[N:N+8]} = vqaddq_s16(vacc${M}x${ABC[N:N+8]}, voutput_zero_point);
205
206    $for M in range(MR):
207      $for N in range(0, NR, 16):
208        $if N + 8 < NR:
209          int8x16_t vout${M}x${ABC[N:N+16]} = vqmovn_high_s16(vqmovn_s16(vacc${M}x${ABC[N:N+8]}), vacc${M}x${ABC[N+8:N+16]});
210        $elif M % 2 == 1:
211          int8x16_t vout${M-1}x${ABC[N:N+8]}_${M}x${ABC[N:N+8]} = vqmovn_high_s16(vqmovn_s16(vacc${M-1}x${ABC[N:N+8]}), vacc${M}x${ABC[N:N+8]});
212        $elif M + 1 == MR:
213          int8x8_t vout${M}x${ABC[N:N+8]} = vqmovn_s16(vacc${M}x${ABC[N:N+8]});
214#else
215    $for M in range(MR):
216      $for N in range(0, NR, 8):
217        int16x8_t vacc${M}x${ABC[N:N+8]} = vcombine_s16(vqmovn_s32(vacc${M}x${ABC[N:N+4]}), vqmovn_s32(vacc${M}x${ABC[N+4:N+8]}));
218
219    $if REQUANTIZATION != "FP32" or ARMV8:
220      $for M in range(MR):
221        $for N in range(0, NR, 8):
222          vacc${M}x${ABC[N:N+8]} = vqaddq_s16(vacc${M}x${ABC[N:N+8]}, voutput_zero_point);
223
224    $for M in range(MR):
225      $for N in range(0, NR, 16):
226        $if N + 8 < NR:
227          int8x16_t vout${M}x${ABC[N:N+16]} = vcombine_s8(vqmovn_s16(vacc${M}x${ABC[N:N+8]}), vqmovn_s16(vacc${M}x${ABC[N+8:N+16]}));
228        $elif M % 2 == 1:
229          int8x16_t vout${M-1}x${ABC[N:N+8]}_${M}x${ABC[N:N+8]} = vcombine_s8(vqmovn_s16(vacc${M-1}x${ABC[N:N+8]}), vqmovn_s16(vacc${M}x${ABC[N:N+8]}));
230        $elif M + 1 == MR:
231          int8x8_t vout${M}x${ABC[N:N+8]} = vqmovn_s16(vacc${M}x${ABC[N:N+8]});
232#endif
233
234    $if NR == 8 and MR == 1:
235      const int8x8_t voutput_min = vld1_dup_s8(&params->${PARAMS_STRUCT}.output_min);
236    $else:
237      const int8x16_t voutput_min = vld1q_dup_s8(&params->${PARAMS_STRUCT}.output_min);
238    $for M in range(MR):
239      $for N in range(0, NR, 16):
240        $if N + 8 < NR:
241          vout${M}x${ABC[N:N+16]} = vmaxq_s8(vout${M}x${ABC[N:N+16]}, voutput_min);
242        $elif M % 2 == 1:
243          vout${M-1}x${ABC[N:N+8]}_${M}x${ABC[N:N+8]} = vmaxq_s8(vout${M-1}x${ABC[N:N+8]}_${M}x${ABC[N:N+8]}, voutput_min);
244        $elif M + 1 == MR:
245          $if NR == 8 and MR == 1:
246            vout${M}x${ABC[N:N+8]} = vmax_s8(vout${M}x${ABC[N:N+8]}, voutput_min);
247          $else:
248            vout${M}x${ABC[N:N+8]} = vmax_s8(vout${M}x${ABC[N:N+8]}, vget_low_s8(voutput_min));
249
250    $if NR == 8 and MR == 1:
251      const int8x8_t voutput_max = vld1_dup_s8(&params->${PARAMS_STRUCT}.output_max);
252    $else:
253      const int8x16_t voutput_max = vld1q_dup_s8(&params->${PARAMS_STRUCT}.output_max);
254    $for M in range(MR):
255      $for N in range(0, NR, 16):
256        $if N + 8 < NR:
257          vout${M}x${ABC[N:N+16]} = vminq_s8(vout${M}x${ABC[N:N+16]}, voutput_max);
258        $elif M % 2 == 1:
259          vout${M-1}x${ABC[N:N+8]}_${M}x${ABC[N:N+8]} = vminq_s8(vout${M-1}x${ABC[N:N+8]}_${M}x${ABC[N:N+8]}, voutput_max);
260        $elif M + 1 == MR:
261          $if NR == 8 and MR == 1:
262            vout${M}x${ABC[N:N+8]} = vmin_s8(vout${M}x${ABC[N:N+8]}, voutput_max);
263          $else:
264            vout${M}x${ABC[N:N+8]} = vmin_s8(vout${M}x${ABC[N:N+8]}, vget_low_s8(voutput_max));
265
266    if (nc >= ${NR}) {
267      $for M in reversed(range(MR)):
268        $for N in range(0, NR, 16):
269          $if N + 8 < NR:
270            vst1q_s8(c${M} + ${N}, vout${M}x${ABC[N:N+16]});
271          $elif M % 2 == 1:
272            vst1_s8(c${M} + ${N}, vget_high_s8(vout${M-1}x${ABC[N:N+8]}_${M}x${ABC[N:N+8]}));
273            vst1_s8(c${M-1} + ${N}, vget_low_s8(vout${M-1}x${ABC[N:N+8]}_${M}x${ABC[N:N+8]}));
274          $elif M + 1 == MR:
275            vst1_s8(c${M} + ${N}, vout${M}x${ABC[N:N+8]});
276
277      $for M in reversed(range(MR)):
278        c${M} = (int8_t*) ((uintptr_t) c${M} + cn_stride);
279
280      a = (const int8_t**restrict) ((uintptr_t) a - ks);
281
282      nc -= ${NR};
283    } else {
284      $if NR == 16:
285        $for M in reversed(range(MR)):
286          $if M % 2 == 1:
287            int8x16_t vout${M-1}x01234567_${M}x01234567 = vcombine_s8(vget_low_s8(vout${M-1}x0123456789ABCDEF), vget_low_s8(vout${M}x0123456789ABCDEF));
288          $elif M + 1 == MR:
289            int8x8_t vout${M}x01234567 = vget_low_s8(vout${M}x0123456789ABCDEF);
290        if (nc & 8) {
291          $for M in reversed(range(MR)):
292            $if M % 2 == 1:
293              vst1_s8(c${M}, vget_high_s8(vout${M-1}x01234567_${M}x01234567)); c${M} += 8;
294              vst1_s8(c${M-1}, vget_low_s8(vout${M-1}x01234567_${M}x01234567)); c${M-1} += 8;
295            $elif M + 1 == MR:
296              vst1_s8(c${M}, vout${M}x01234567); c${M} += 8;
297          $for M in reversed(range(MR)):
298            $if M % 2 == 1:
299              vout${M-1}x01234567_${M}x01234567 = vcombine_s8(vget_high_s8(vout${M-1}x0123456789ABCDEF), vget_high_s8(vout${M}x0123456789ABCDEF));
300            $elif M + 1 == MR:
301              vout${M}x01234567 = vget_high_s8(vout${M}x0123456789ABCDEF);
302        }
303      if (nc & 4) {
304        $for M in reversed(range(MR)):
305          $if M % 2 == 1:
306            vst1q_lane_u32((void*) c${M}, vreinterpretq_u32_s8(vout${M-1}x01234567_${M}x01234567), 2); c${M} += 4;
307            vst1q_lane_u32((void*) c${M-1}, vreinterpretq_u32_s8(vout${M-1}x01234567_${M}x01234567), 0); c${M-1} += 4;
308          $elif M + 1 == MR:
309            vst1_lane_u32((void*) c${M}, vreinterpret_u32_s8(vout${M}x01234567), 0); c${M} += 4;
310        $for M in reversed(range(MR)):
311          $if M % 2 == 1:
312            vout${M-1}x01234567_${M}x01234567 = vextq_s8(vout${M-1}x01234567_${M}x01234567, vout${M-1}x01234567_${M}x01234567, 4);
313          $elif M + 1 == MR:
314            vout${M}x01234567 = vext_s8(vout${M}x01234567, vout${M}x01234567, 4);
315      }
316      if (nc & 2) {
317        $for M in reversed(range(MR)):
318          $if M % 2 == 1:
319            vst1q_lane_u16((void*) c${M}, vreinterpretq_u16_s8(vout${M-1}x01234567_${M}x01234567), 4); c${M} += 2;
320            vst1q_lane_u16((void*) c${M-1}, vreinterpretq_u16_s8(vout${M-1}x01234567_${M}x01234567), 0); c${M-1} += 2;
321          $elif M + 1 == MR:
322            vst1_lane_u16((void*) c${M}, vreinterpret_u16_s8(vout${M}x01234567), 0); c${M} += 2;
323        $for M in reversed(range(MR)):
324          $if M % 2 == 1:
325            vout${M-1}x01234567_${M}x01234567 = vextq_s8(vout${M-1}x01234567_${M}x01234567, vout${M-1}x01234567_${M}x01234567, 2);
326          $elif M + 1 == MR:
327            vout${M}x01234567 = vext_s8(vout${M}x01234567, vout${M}x01234567, 2);
328      }
329      if (nc & 1) {
330        $for M in reversed(range(MR)):
331          $if M % 2 == 1:
332            vst1q_lane_s8(c${M}, vout${M-1}x01234567_${M}x01234567, 8);
333            vst1q_lane_s8(c${M-1}, vout${M-1}x01234567_${M}x01234567, 0);
334          $elif M + 1 == MR:
335            vst1_lane_s8(c${M}, vout${M}x01234567, 0);
336      }
337
338      nc = 0;
339    }
340  } while (nc != 0);
341}
342