xref: /aosp_15_r20/external/XNNPACK/src/qu8-vadd/gen/minmax-neon-ld128-x16.c (revision 4bdc94577ba0e567308109d787f7fec7b531ce36)
1 // Auto-generated file. Do not edit!
2 //   Template: src/qs8-vadd/neon.c.in
3 //   Generator: tools/xngen
4 //
5 // Copyright 2020 Google LLC
6 //
7 // This source code is licensed under the BSD-style license found in the
8 // LICENSE file in the root directory of this source tree.
9 
10 #include <assert.h>
11 
12 #include <arm_neon.h>
13 
14 #include <xnnpack/vadd.h>
15 
16 
xnn_qu8_vadd_minmax_ukernel__neon_ld128_x16(size_t n,const uint8_t * input_a,const uint8_t * input_b,uint8_t * output,const union xnn_qu8_add_minmax_params params[restrict XNN_MIN_ELEMENTS (1)])17 void xnn_qu8_vadd_minmax_ukernel__neon_ld128_x16(
18     size_t n,
19     const uint8_t* input_a,
20     const uint8_t* input_b,
21     uint8_t* output,
22     const union xnn_qu8_add_minmax_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
23 {
24   #if XNN_ARCH_ARM64
25     const uint8x16_t va_zero_point = vld1q_dup_u8(&params->neon.a_zero_point);
26     const uint8x16_t vb_zero_point = vld1q_dup_u8(&params->neon.b_zero_point);
27   #else
28     const uint8x8_t va_zero_point = vld1_dup_u8(&params->neon.a_zero_point);
29     const uint8x8_t vb_zero_point = vld1_dup_u8(&params->neon.b_zero_point);
30   #endif
31   const int32x4_t va_multiplier = vld1q_dup_s32(&params->neon.a_multiplier);
32   const int32x4_t vb_multiplier = vld1q_dup_s32(&params->neon.b_multiplier);
33   const int32x4_t vright_shift = vld1q_dup_s32(&params->neon.right_shift);
34   const int16x8_t voutput_zero_point = vld1q_dup_s16(&params->neon.output_zero_point);
35   const uint8x16_t voutput_min = vld1q_dup_u8(&params->neon.output_min);
36   const uint8x16_t voutput_max = vld1q_dup_u8(&params->neon.output_max);
37 
38   for (; n >= 16 * sizeof(uint8_t); n -= 16 * sizeof(uint8_t)) {
39     const uint8x16_t va0123456789ABCDEF = vld1q_u8(input_a); input_a += 16;
40     const uint8x16_t vb0123456789ABCDEF = vld1q_u8(input_b); input_b += 16;
41 
42     #if XNN_ARCH_ARM64
43       const int16x8_t vxa01234567 = vreinterpretq_s16_u16(vsubl_u8(vget_low_u8(va0123456789ABCDEF), vget_low_u8(va_zero_point)));
44       const int16x8_t vxa89ABCDEF = vreinterpretq_s16_u16(vsubl_high_u8(va0123456789ABCDEF, va_zero_point));
45       const int16x8_t vxb01234567 = vreinterpretq_s16_u16(vsubl_u8(vget_low_u8(vb0123456789ABCDEF), vget_low_u8(vb_zero_point)));
46       const int16x8_t vxb89ABCDEF = vreinterpretq_s16_u16(vsubl_high_u8(vb0123456789ABCDEF, vb_zero_point));
47     #else  // !XNN_ARCH_ARM64
48       const int16x8_t vxa01234567 = vreinterpretq_s16_u16(vsubl_u8(vget_low_u8(va0123456789ABCDEF), va_zero_point));
49       const int16x8_t vxa89ABCDEF = vreinterpretq_s16_u16(vsubl_u8(vget_high_u8(va0123456789ABCDEF), va_zero_point));
50       const int16x8_t vxb01234567 = vreinterpretq_s16_u16(vsubl_u8(vget_low_u8(vb0123456789ABCDEF), vb_zero_point));
51       const int16x8_t vxb89ABCDEF = vreinterpretq_s16_u16(vsubl_u8(vget_high_u8(vb0123456789ABCDEF), vb_zero_point));
52     #endif  // XNN_ARCH_ARM64
53 
54     int32x4_t vacc0123 = vmulq_s32(vmovl_s16(vget_low_s16(vxa01234567)), va_multiplier);
55     int32x4_t vacc4567 = vmulq_s32(vmovl_s16(vget_high_s16(vxa01234567)), va_multiplier);
56     int32x4_t vacc89AB = vmulq_s32(vmovl_s16(vget_low_s16(vxa89ABCDEF)), va_multiplier);
57     int32x4_t vaccCDEF = vmulq_s32(vmovl_s16(vget_high_s16(vxa89ABCDEF)), va_multiplier);
58 
59     vacc0123 = vmlaq_s32(vacc0123, vmovl_s16(vget_low_s16(vxb01234567)), vb_multiplier);
60     vacc4567 = vmlaq_s32(vacc4567, vmovl_s16(vget_high_s16(vxb01234567)), vb_multiplier);
61     vacc89AB = vmlaq_s32(vacc89AB, vmovl_s16(vget_low_s16(vxb89ABCDEF)), vb_multiplier);
62     vaccCDEF = vmlaq_s32(vaccCDEF, vmovl_s16(vget_high_s16(vxb89ABCDEF)), vb_multiplier);
63 
64     vacc0123 = vrshlq_s32(vacc0123, vright_shift);
65     vacc4567 = vrshlq_s32(vacc4567, vright_shift);
66     vacc89AB = vrshlq_s32(vacc89AB, vright_shift);
67     vaccCDEF = vrshlq_s32(vaccCDEF, vright_shift);
68 
69     const int16x8_t vacc01234567 = vqaddq_s16(vcombine_s16(vqmovn_s32(vacc0123), vqmovn_s32(vacc4567)), voutput_zero_point);
70     const int16x8_t vacc89ABCDEF = vqaddq_s16(vcombine_s16(vqmovn_s32(vacc89AB), vqmovn_s32(vaccCDEF)), voutput_zero_point);
71 
72     uint8x16_t vout0123456789ABCDEF = vcombine_u8(vqmovun_s16(vacc01234567), vqmovun_s16(vacc89ABCDEF));
73 
74     vout0123456789ABCDEF = vmaxq_u8(vout0123456789ABCDEF, voutput_min);
75 
76     vout0123456789ABCDEF = vminq_u8(vout0123456789ABCDEF, voutput_max);
77 
78     vst1q_u8(output, vout0123456789ABCDEF); output += 16;
79   }
80   if XNN_UNLIKELY(n != 0) {
81     do {
82       const uint8x8_t va01234567 = vld1_u8(input_a); input_a += 8;
83       const uint8x8_t vb01234567 = vld1_u8(input_b); input_b += 8;
84 
85       #if XNN_ARCH_ARM64
86         const int16x8_t vxa01234567 = vreinterpretq_s16_u16(vsubl_u8(va01234567, vget_low_u8(va_zero_point)));
87         const int16x8_t vxb01234567 = vreinterpretq_s16_u16(vsubl_u8(vb01234567, vget_low_u8(vb_zero_point)));
88       #else  // !XNN_ARCH_ARM64
89         const int16x8_t vxa01234567 = vreinterpretq_s16_u16(vsubl_u8(va01234567, va_zero_point));
90         const int16x8_t vxb01234567 = vreinterpretq_s16_u16(vsubl_u8(vb01234567, vb_zero_point));
91       #endif  // XNN_ARCH_ARM64
92 
93       int32x4_t vacc0123 = vmulq_s32(vmovl_s16(vget_low_s16(vxa01234567)), va_multiplier);
94       int32x4_t vacc4567 = vmulq_s32(vmovl_s16(vget_high_s16(vxa01234567)), va_multiplier);
95 
96       vacc0123 = vmlaq_s32(vacc0123, vmovl_s16(vget_low_s16(vxb01234567)), vb_multiplier);
97       vacc4567 = vmlaq_s32(vacc4567, vmovl_s16(vget_high_s16(vxb01234567)), vb_multiplier);
98 
99       vacc0123 = vrshlq_s32(vacc0123, vright_shift);
100       vacc4567 = vrshlq_s32(vacc4567, vright_shift);
101 
102       const int16x8_t vacc01234567 = vqaddq_s16(vcombine_s16(vqmovn_s32(vacc0123), vqmovn_s32(vacc4567)), voutput_zero_point);
103 
104       uint8x8_t vout01234567 = vqmovun_s16(vacc01234567);
105       vout01234567 = vmax_u8(vout01234567, vget_low_u8(voutput_min));
106       vout01234567 = vmin_u8(vout01234567, vget_low_u8(voutput_max));
107 
108       if XNN_LIKELY(n >= (8 * sizeof(uint8_t))) {
109         vst1_u8(output, vout01234567); output += 8;
110         n -= 8 * sizeof(uint8_t);
111       } else {
112         if (n & (4 * sizeof(uint8_t))) {
113           vst1_lane_u32((void*) output, vreinterpret_u32_u8(vout01234567), 0); output += 4;
114           vout01234567 = vext_u8(vout01234567, vout01234567, 4);
115         }
116         if (n & (2 * sizeof(uint8_t))) {
117           vst1_lane_u16((void*) output, vreinterpret_u16_u8(vout01234567), 0); output += 2;
118           vout01234567 = vext_u8(vout01234567, vout01234567, 2);
119         }
120         if (n & (1 * sizeof(uint8_t))) {
121           vst1_lane_u8(output, vout01234567, 0);
122         }
123         n = 0;
124       }
125     } while (n != 0);
126   }
127 }
128