1 // Auto-generated file. Do not edit!
2 // Template: src/qs8-vmul/neon.c.in
3 // Generator: tools/xngen
4 //
5 // Copyright 2021 Google LLC
6 //
7 // This source code is licensed under the BSD-style license found in the
8 // LICENSE file in the root directory of this source tree.
9
10 #include <assert.h>
11
12 #include <arm_neon.h>
13
14 #include <xnnpack/vmul.h>
15
16
xnn_qu8_vmul_minmax_rndnu_ukernel__neon_ld64_x16(size_t n,const uint8_t * input_a,const uint8_t * input_b,uint8_t * output,const union xnn_qu8_mul_minmax_params params[restrict XNN_MIN_ELEMENTS (1)])17 void xnn_qu8_vmul_minmax_rndnu_ukernel__neon_ld64_x16(
18 size_t n,
19 const uint8_t* input_a,
20 const uint8_t* input_b,
21 uint8_t* output,
22 const union xnn_qu8_mul_minmax_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
23 {
24 const uint8x8_t va_zero_point = vld1_dup_u8(params->rndnu_neon.a_zero_point);
25 const uint8x8_t vb_zero_point = vld1_dup_u8(params->rndnu_neon.b_zero_point);
26 const int32x4_t vleft_pre_shift = vld1q_dup_s32(¶ms->rndnu_neon.left_pre_shift);
27 const int32x4_t vmultiplier = vld1q_dup_s32(¶ms->rndnu_neon.multiplier);
28 const int32x4_t vleft_post_shift = vld1q_dup_s32(¶ms->rndnu_neon.left_post_shift);
29 const int16x8_t voutput_zero_point = vld1q_dup_s16(¶ms->rndnu_neon.output_zero_point);
30 const uint8x16_t voutput_min = vld1q_dup_u8(¶ms->rndnu_neon.output_min);
31 const uint8x16_t voutput_max = vld1q_dup_u8(¶ms->rndnu_neon.output_max);
32
33 for (; n >= 16 * sizeof(uint8_t); n -= 16 * sizeof(uint8_t)) {
34 const uint8x8_t va01234567 = vld1_u8(input_a); input_a += 8;
35 const uint8x8_t vb01234567 = vld1_u8(input_b); input_b += 8;
36 const uint8x8_t va89ABCDEF = vld1_u8(input_a); input_a += 8;
37 const uint8x8_t vb89ABCDEF = vld1_u8(input_b); input_b += 8;
38
39 const int16x8_t vxa01234567 = vreinterpretq_s16_u16(vsubl_u8(va01234567, va_zero_point));
40 const int16x8_t vxb01234567 = vreinterpretq_s16_u16(vsubl_u8(vb01234567, vb_zero_point));
41 const int16x8_t vxa89ABCDEF = vreinterpretq_s16_u16(vsubl_u8(va89ABCDEF, va_zero_point));
42 const int16x8_t vxb89ABCDEF = vreinterpretq_s16_u16(vsubl_u8(vb89ABCDEF, vb_zero_point));
43
44 int32x4_t vacc0123 = vmull_s16(vget_low_s16(vxa01234567), vget_low_s16(vxb01234567));
45 int32x4_t vacc4567 = vmull_s16(vget_high_s16(vxa01234567), vget_high_s16(vxb01234567));
46 int32x4_t vacc89AB = vmull_s16(vget_low_s16(vxa89ABCDEF), vget_low_s16(vxb89ABCDEF));
47 int32x4_t vaccCDEF = vmull_s16(vget_high_s16(vxa89ABCDEF), vget_high_s16(vxb89ABCDEF));
48
49 vacc0123 = vqshlq_s32(vacc0123, vleft_pre_shift);
50 vacc4567 = vqshlq_s32(vacc4567, vleft_pre_shift);
51 vacc89AB = vqshlq_s32(vacc89AB, vleft_pre_shift);
52 vaccCDEF = vqshlq_s32(vaccCDEF, vleft_pre_shift);
53
54 vacc0123 = vqdmulhq_s32(vacc0123, vmultiplier);
55 vacc4567 = vqdmulhq_s32(vacc4567, vmultiplier);
56 vacc89AB = vqdmulhq_s32(vacc89AB, vmultiplier);
57 vaccCDEF = vqdmulhq_s32(vaccCDEF, vmultiplier);
58
59 vacc0123 = vrshlq_s32(vacc0123, vleft_post_shift);
60 vacc4567 = vrshlq_s32(vacc4567, vleft_post_shift);
61 vacc89AB = vrshlq_s32(vacc89AB, vleft_post_shift);
62 vaccCDEF = vrshlq_s32(vaccCDEF, vleft_post_shift);
63
64 #if XNN_ARCH_ARM64
65 int16x8_t vacc01234567 = vqmovn_high_s32(vqmovn_s32(vacc0123), vacc4567);
66 int16x8_t vacc89ABCDEF = vqmovn_high_s32(vqmovn_s32(vacc89AB), vaccCDEF);
67 #else
68 int16x8_t vacc01234567 = vcombine_s16(vqmovn_s32(vacc0123), vqmovn_s32(vacc4567));
69 int16x8_t vacc89ABCDEF = vcombine_s16(vqmovn_s32(vacc89AB), vqmovn_s32(vaccCDEF));
70 #endif
71
72 vacc01234567 = vqaddq_s16(vacc01234567, voutput_zero_point);
73 vacc89ABCDEF = vqaddq_s16(vacc89ABCDEF, voutput_zero_point);
74
75 #if XNN_ARCH_ARM64
76 uint8x16_t vout0123456789ABCDEF = vqmovun_high_s16(vqmovun_s16(vacc01234567), vacc89ABCDEF);
77 #else
78 uint8x16_t vout0123456789ABCDEF = vcombine_u8(vqmovun_s16(vacc01234567), vqmovun_s16(vacc89ABCDEF));
79 #endif
80
81 vout0123456789ABCDEF = vmaxq_u8(vout0123456789ABCDEF, voutput_min);
82
83 vout0123456789ABCDEF = vminq_u8(vout0123456789ABCDEF, voutput_max);
84
85 vst1q_u8(output, vout0123456789ABCDEF); output += 16;
86 }
87 if XNN_UNLIKELY(n != 0) {
88 do {
89 const uint8x8_t va01234567 = vld1_u8(input_a); input_a += 8;
90 const uint8x8_t vb01234567 = vld1_u8(input_b); input_b += 8;
91
92 const int16x8_t vxa01234567 = vreinterpretq_s16_u16(vsubl_u8(va01234567, va_zero_point));
93 const int16x8_t vxb01234567 = vreinterpretq_s16_u16(vsubl_u8(vb01234567, vb_zero_point));
94
95 int32x4_t vacc0123 = vmull_s16(vget_low_s16(vxa01234567), vget_low_s16(vxb01234567));
96 int32x4_t vacc4567 = vmull_s16(vget_high_s16(vxa01234567), vget_high_s16(vxb01234567));
97
98 vacc0123 = vqshlq_s32(vacc0123, vleft_pre_shift);
99 vacc4567 = vqshlq_s32(vacc4567, vleft_pre_shift);
100
101 vacc0123 = vqdmulhq_s32(vacc0123, vmultiplier);
102 vacc4567 = vqdmulhq_s32(vacc4567, vmultiplier);
103
104 vacc0123 = vrshlq_s32(vacc0123, vleft_post_shift);
105 vacc4567 = vrshlq_s32(vacc4567, vleft_post_shift);
106
107 #if XNN_ARCH_ARM64
108 int16x8_t vacc01234567 = vqmovn_high_s32(vqmovn_s32(vacc0123), vacc4567);
109 #else
110 int16x8_t vacc01234567 = vcombine_s16(vqmovn_s32(vacc0123), vqmovn_s32(vacc4567));
111 #endif
112
113 vacc01234567 = vqaddq_s16(vacc01234567, voutput_zero_point);
114
115 uint8x8_t vout01234567 = vqmovun_s16(vacc01234567);
116
117 vout01234567 = vmax_u8(vout01234567, vget_low_u8(voutput_min));
118 vout01234567 = vmin_u8(vout01234567, vget_low_u8(voutput_max));
119 if XNN_LIKELY(n >= (8 * sizeof(uint8_t))) {
120 vst1_u8(output, vout01234567); output += 8;
121 n -= 8 * sizeof(uint8_t);
122 } else {
123 if (n & (4 * sizeof(uint8_t))) {
124 vst1_lane_u32((void*) output, vreinterpret_u32_u8(vout01234567), 0); output += 4;
125 vout01234567 = vext_u8(vout01234567, vout01234567, 4);
126 }
127 if (n & (2 * sizeof(uint8_t))) {
128 vst1_lane_u16((void*) output, vreinterpret_u16_u8(vout01234567), 0); output += 2;
129 vout01234567 = vext_u8(vout01234567, vout01234567, 2);
130 }
131 if (n & (1 * sizeof(uint8_t))) {
132 vst1_lane_u8(output, vout01234567, 0);
133 }
134 n = 0;
135 }
136 } while (n != 0);
137 }
138 }
139