1 // Auto-generated file. Do not edit!
2 // Template: src/qs8-vmul/neon.c.in
3 // Generator: tools/xngen
4 //
5 // Copyright 2021 Google LLC
6 //
7 // This source code is licensed under the BSD-style license found in the
8 // LICENSE file in the root directory of this source tree.
9
10 #include <assert.h>
11
12 #include <arm_neon.h>
13
14 #include <xnnpack/vmul.h>
15
16
xnn_qu8_vmul_minmax_fp32_ukernel__neon_ld128_x16(size_t n,const uint8_t * input_a,const uint8_t * input_b,uint8_t * output,const union xnn_qu8_mul_minmax_params params[restrict XNN_MIN_ELEMENTS (1)])17 void xnn_qu8_vmul_minmax_fp32_ukernel__neon_ld128_x16(
18 size_t n,
19 const uint8_t* input_a,
20 const uint8_t* input_b,
21 uint8_t* output,
22 const union xnn_qu8_mul_minmax_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
23 {
24 #if XNN_ARCH_ARM64
25 const uint8x16_t va_zero_point = vld1q_dup_u8(params->fp32_neon.a_zero_point);
26 const uint8x16_t vb_zero_point = vld1q_dup_u8(params->fp32_neon.b_zero_point);
27 #else
28 const uint8x8_t va_zero_point = vld1_dup_u8(params->fp32_neon.a_zero_point);
29 const uint8x8_t vb_zero_point = vld1_dup_u8(params->fp32_neon.b_zero_point);
30 #endif
31 const float32x4_t vscale = vld1q_dup_f32(¶ms->fp32_neon.scale);
32 const float32x4_t vmagic_bias = vld1q_dup_f32(¶ms->fp32_neon.magic_bias);
33 const int32x4_t vmagic_bias_less_output_zero_point = vld1q_dup_s32(¶ms->fp32_neon.magic_bias_less_output_zero_point);
34 const uint8x16_t voutput_min = vld1q_dup_u8(¶ms->fp32_neon.output_min);
35 const uint8x16_t voutput_max = vld1q_dup_u8(¶ms->fp32_neon.output_max);
36
37 for (; n >= 16 * sizeof(uint8_t); n -= 16 * sizeof(uint8_t)) {
38 const uint8x16_t va0123456789ABCDEF = vld1q_u8(input_a); input_a += 16;
39 const uint8x16_t vb0123456789ABCDEF = vld1q_u8(input_b); input_b += 16;
40
41 #if XNN_ARCH_ARM64
42 const int16x8_t vxa01234567 = vreinterpretq_s16_u16(vsubl_u8(vget_low_u8(va0123456789ABCDEF), vget_low_u8(va_zero_point)));
43 const int16x8_t vxa89ABCDEF = vreinterpretq_s16_u16(vsubl_high_u8(va0123456789ABCDEF, va_zero_point));
44 const int16x8_t vxb01234567 = vreinterpretq_s16_u16(vsubl_u8(vget_low_u8(vb0123456789ABCDEF), vget_low_u8(vb_zero_point)));
45 const int16x8_t vxb89ABCDEF = vreinterpretq_s16_u16(vsubl_high_u8(vb0123456789ABCDEF, vb_zero_point));
46 #else // !XNN_ARCH_ARM64
47 const int16x8_t vxa01234567 = vreinterpretq_s16_u16(vsubl_u8(vget_low_u8(va0123456789ABCDEF), va_zero_point));
48 const int16x8_t vxa89ABCDEF = vreinterpretq_s16_u16(vsubl_u8(vget_high_u8(va0123456789ABCDEF), va_zero_point));
49 const int16x8_t vxb01234567 = vreinterpretq_s16_u16(vsubl_u8(vget_low_u8(vb0123456789ABCDEF), vb_zero_point));
50 const int16x8_t vxb89ABCDEF = vreinterpretq_s16_u16(vsubl_u8(vget_high_u8(vb0123456789ABCDEF), vb_zero_point));
51 #endif // XNN_ARCH_ARM64
52
53 int32x4_t vacc0123 = vmull_s16(vget_low_s16(vxa01234567), vget_low_s16(vxb01234567));
54 int32x4_t vacc4567 = vmull_s16(vget_high_s16(vxa01234567), vget_high_s16(vxb01234567));
55 int32x4_t vacc89AB = vmull_s16(vget_low_s16(vxa89ABCDEF), vget_low_s16(vxb89ABCDEF));
56 int32x4_t vaccCDEF = vmull_s16(vget_high_s16(vxa89ABCDEF), vget_high_s16(vxb89ABCDEF));
57
58 float32x4_t vfpacc0123 = vcvtq_f32_s32(vacc0123);
59 float32x4_t vfpacc4567 = vcvtq_f32_s32(vacc4567);
60 float32x4_t vfpacc89AB = vcvtq_f32_s32(vacc89AB);
61 float32x4_t vfpaccCDEF = vcvtq_f32_s32(vaccCDEF);
62
63 vfpacc0123 = vmulq_f32(vfpacc0123, vscale);
64 vfpacc4567 = vmulq_f32(vfpacc4567, vscale);
65 vfpacc89AB = vmulq_f32(vfpacc89AB, vscale);
66 vfpaccCDEF = vmulq_f32(vfpaccCDEF, vscale);
67
68 vacc0123 = vreinterpretq_s32_f32(vaddq_f32(vfpacc0123, vmagic_bias));
69 vacc4567 = vreinterpretq_s32_f32(vaddq_f32(vfpacc4567, vmagic_bias));
70 vacc89AB = vreinterpretq_s32_f32(vaddq_f32(vfpacc89AB, vmagic_bias));
71 vaccCDEF = vreinterpretq_s32_f32(vaddq_f32(vfpaccCDEF, vmagic_bias));
72
73 vacc0123 = vqsubq_s32(vacc0123, vmagic_bias_less_output_zero_point);
74 vacc4567 = vqsubq_s32(vacc4567, vmagic_bias_less_output_zero_point);
75 vacc89AB = vqsubq_s32(vacc89AB, vmagic_bias_less_output_zero_point);
76 vaccCDEF = vqsubq_s32(vaccCDEF, vmagic_bias_less_output_zero_point);
77
78 #if XNN_ARCH_ARM64
79 int16x8_t vacc01234567 = vqmovn_high_s32(vqmovn_s32(vacc0123), vacc4567);
80 int16x8_t vacc89ABCDEF = vqmovn_high_s32(vqmovn_s32(vacc89AB), vaccCDEF);
81 #else
82 int16x8_t vacc01234567 = vcombine_s16(vqmovn_s32(vacc0123), vqmovn_s32(vacc4567));
83 int16x8_t vacc89ABCDEF = vcombine_s16(vqmovn_s32(vacc89AB), vqmovn_s32(vaccCDEF));
84 #endif
85
86
87 #if XNN_ARCH_ARM64
88 uint8x16_t vout0123456789ABCDEF = vqmovun_high_s16(vqmovun_s16(vacc01234567), vacc89ABCDEF);
89 #else
90 uint8x16_t vout0123456789ABCDEF = vcombine_u8(vqmovun_s16(vacc01234567), vqmovun_s16(vacc89ABCDEF));
91 #endif
92
93 vout0123456789ABCDEF = vmaxq_u8(vout0123456789ABCDEF, voutput_min);
94
95 vout0123456789ABCDEF = vminq_u8(vout0123456789ABCDEF, voutput_max);
96
97 vst1q_u8(output, vout0123456789ABCDEF); output += 16;
98 }
99 if XNN_UNLIKELY(n != 0) {
100 do {
101 const uint8x8_t va01234567 = vld1_u8(input_a); input_a += 8;
102 const uint8x8_t vb01234567 = vld1_u8(input_b); input_b += 8;
103
104 #if XNN_ARCH_ARM64
105 const int16x8_t vxa01234567 = vreinterpretq_s16_u16(vsubl_u8(va01234567, vget_low_u8(va_zero_point)));
106 const int16x8_t vxb01234567 = vreinterpretq_s16_u16(vsubl_u8(vb01234567, vget_low_u8(vb_zero_point)));
107 #else // !XNN_ARCH_ARM64
108 const int16x8_t vxa01234567 = vreinterpretq_s16_u16(vsubl_u8(va01234567, va_zero_point));
109 const int16x8_t vxb01234567 = vreinterpretq_s16_u16(vsubl_u8(vb01234567, vb_zero_point));
110 #endif // XNN_ARCH_ARM64
111
112 int32x4_t vacc0123 = vmull_s16(vget_low_s16(vxa01234567), vget_low_s16(vxb01234567));
113 int32x4_t vacc4567 = vmull_s16(vget_high_s16(vxa01234567), vget_high_s16(vxb01234567));
114
115 float32x4_t vfpacc0123 = vcvtq_f32_s32(vacc0123);
116 float32x4_t vfpacc4567 = vcvtq_f32_s32(vacc4567);
117
118 vfpacc0123 = vmulq_f32(vfpacc0123, vscale);
119 vfpacc4567 = vmulq_f32(vfpacc4567, vscale);
120
121 vacc0123 = vreinterpretq_s32_f32(vaddq_f32(vfpacc0123, vmagic_bias));
122 vacc4567 = vreinterpretq_s32_f32(vaddq_f32(vfpacc4567, vmagic_bias));
123
124 vacc0123 = vqsubq_s32(vacc0123, vmagic_bias_less_output_zero_point);
125 vacc4567 = vqsubq_s32(vacc4567, vmagic_bias_less_output_zero_point);
126
127 #if XNN_ARCH_ARM64
128 int16x8_t vacc01234567 = vqmovn_high_s32(vqmovn_s32(vacc0123), vacc4567);
129 #else
130 int16x8_t vacc01234567 = vcombine_s16(vqmovn_s32(vacc0123), vqmovn_s32(vacc4567));
131 #endif
132
133
134 uint8x8_t vout01234567 = vqmovun_s16(vacc01234567);
135
136 vout01234567 = vmax_u8(vout01234567, vget_low_u8(voutput_min));
137 vout01234567 = vmin_u8(vout01234567, vget_low_u8(voutput_max));
138 if XNN_LIKELY(n >= (8 * sizeof(uint8_t))) {
139 vst1_u8(output, vout01234567); output += 8;
140 n -= 8 * sizeof(uint8_t);
141 } else {
142 if (n & (4 * sizeof(uint8_t))) {
143 vst1_lane_u32((void*) output, vreinterpret_u32_u8(vout01234567), 0); output += 4;
144 vout01234567 = vext_u8(vout01234567, vout01234567, 4);
145 }
146 if (n & (2 * sizeof(uint8_t))) {
147 vst1_lane_u16((void*) output, vreinterpret_u16_u8(vout01234567), 0); output += 2;
148 vout01234567 = vext_u8(vout01234567, vout01234567, 2);
149 }
150 if (n & (1 * sizeof(uint8_t))) {
151 vst1_lane_u8(output, vout01234567, 0);
152 }
153 n = 0;
154 }
155 } while (n != 0);
156 }
157 }
158