xref: /aosp_15_r20/external/XNNPACK/src/qs8-dwconv/gen/up8x9-minmax-fp32-neon-mul16.c (revision 4bdc94577ba0e567308109d787f7fec7b531ce36)
1 // Auto-generated file. Do not edit!
2 //   Template: src/qs8-dwconv/unipass-neon-mul16.c.in
3 //   Generator: tools/xngen
4 //
5 // Copyright 2020 Google LLC
6 //
7 // This source code is licensed under the BSD-style license found in the
8 // LICENSE file in the root directory of this source tree.
9 
10 #include <assert.h>
11 
12 #include <arm_neon.h>
13 
14 #include <xnnpack/dwconv.h>
15 
16 
xnn_qs8_dwconv_minmax_fp32_ukernel_up8x9__neon_mul16(size_t channels,size_t output_width,const int8_t ** input,const void * weights,int8_t * output,size_t input_stride,size_t output_increment,size_t input_offset,const int8_t * zero,const union xnn_qs8_conv_minmax_params params[restrict XNN_MIN_ELEMENTS (1)])17 void xnn_qs8_dwconv_minmax_fp32_ukernel_up8x9__neon_mul16(
18     size_t channels,
19     size_t output_width,
20     const int8_t** input,
21     const void* weights,
22     int8_t* output,
23     size_t input_stride,
24     size_t output_increment,
25     size_t input_offset,
26     const int8_t* zero,
27     const union xnn_qs8_conv_minmax_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
28 {
29   assert(channels != 0);
30   assert(output_width != 0);
31 
32   const float32x4_t vscale = vld1q_dup_f32(&params->fp32_neon.scale);
33   const float32x4_t vmagic_bias = vld1q_dup_f32(&params->fp32_neon.magic_bias);
34   const int32x4_t vmagic_bias_less_output_zero_point = vld1q_dup_s32(&params->fp32_neon.magic_bias_less_output_zero_point);
35   const int8x8_t voutput_min = vld1_dup_s8(&params->fp32_neon.output_min);
36   const int8x8_t voutput_max = vld1_dup_s8(&params->fp32_neon.output_max);
37   do {
38     const int8_t* i0 = input[0];
39     assert(i0 != NULL);
40     if XNN_UNPREDICTABLE(i0 != zero) {
41       i0 = (const int8_t*) ((uintptr_t) i0 + input_offset);
42     }
43     const int8_t* i1 = input[1];
44     assert(i1 != NULL);
45     if XNN_UNPREDICTABLE(i1 != zero) {
46       i1 = (const int8_t*) ((uintptr_t) i1 + input_offset);
47     }
48     const int8_t* i2 = input[2];
49     assert(i2 != NULL);
50     if XNN_UNPREDICTABLE(i2 != zero) {
51       i2 = (const int8_t*) ((uintptr_t) i2 + input_offset);
52     }
53     const int8_t* i3 = input[3];
54     assert(i3 != NULL);
55     if XNN_UNPREDICTABLE(i3 != zero) {
56       i3 = (const int8_t*) ((uintptr_t) i3 + input_offset);
57     }
58     const int8_t* i4 = input[4];
59     assert(i4 != NULL);
60     if XNN_UNPREDICTABLE(i4 != zero) {
61       i4 = (const int8_t*) ((uintptr_t) i4 + input_offset);
62     }
63     const int8_t* i5 = input[5];
64     assert(i5 != NULL);
65     if XNN_UNPREDICTABLE(i5 != zero) {
66       i5 = (const int8_t*) ((uintptr_t) i5 + input_offset);
67     }
68     const int8_t* i6 = input[6];
69     assert(i6 != NULL);
70     if XNN_UNPREDICTABLE(i6 != zero) {
71       i6 = (const int8_t*) ((uintptr_t) i6 + input_offset);
72     }
73     const int8_t* i7 = input[7];
74     assert(i7 != NULL);
75     if XNN_UNPREDICTABLE(i7 != zero) {
76       i7 = (const int8_t*) ((uintptr_t) i7 + input_offset);
77     }
78     const int8_t* i8 = input[8];
79     assert(i8 != NULL);
80     if XNN_UNPREDICTABLE(i8 != zero) {
81       i8 = (const int8_t*) ((uintptr_t) i8 + input_offset);
82     }
83     input = (const int8_t**) ((uintptr_t) input + input_stride);
84 
85     size_t c = channels;
86     const void* w = weights;
87     for (; c >= 8; c -= 8) {
88       int32x4_t vacc0123 = vld1q_s32(w); w = (const void*) ((const int32_t*) w + 4);
89       int32x4_t vacc4567 = vld1q_s32(w); w = (const void*) ((const int32_t*) w + 4);
90 
91 
92       const int16x8_t vi0x01234567 = vmovl_s8(vld1_s8(i0)); i0 += 8;
93       const int16x8_t vk0x01234567 = vmovl_s8(vld1_s8(w)); w = (const void*) ((const int8_t*) w + 8);
94 
95       vacc0123 = vmlal_s16(vacc0123, vget_low_s16(vi0x01234567), vget_low_s16(vk0x01234567));
96       vacc4567 = vmlal_s16(vacc4567, vget_high_s16(vi0x01234567), vget_high_s16(vk0x01234567));
97 
98       const int16x8_t vi1x01234567 = vmovl_s8(vld1_s8(i1)); i1 += 8;
99       const int16x8_t vk1x01234567 = vmovl_s8(vld1_s8(w)); w = (const void*) ((const int8_t*) w + 8);
100 
101       vacc0123 = vmlal_s16(vacc0123, vget_low_s16(vi1x01234567), vget_low_s16(vk1x01234567));
102       vacc4567 = vmlal_s16(vacc4567, vget_high_s16(vi1x01234567), vget_high_s16(vk1x01234567));
103 
104       const int16x8_t vi2x01234567 = vmovl_s8(vld1_s8(i2)); i2 += 8;
105       const int16x8_t vk2x01234567 = vmovl_s8(vld1_s8(w)); w = (const void*) ((const int8_t*) w + 8);
106 
107       vacc0123 = vmlal_s16(vacc0123, vget_low_s16(vi2x01234567), vget_low_s16(vk2x01234567));
108       vacc4567 = vmlal_s16(vacc4567, vget_high_s16(vi2x01234567), vget_high_s16(vk2x01234567));
109 
110       const int16x8_t vi3x01234567 = vmovl_s8(vld1_s8(i3)); i3 += 8;
111       const int16x8_t vk3x01234567 = vmovl_s8(vld1_s8(w)); w = (const void*) ((const int8_t*) w + 8);
112 
113       vacc0123 = vmlal_s16(vacc0123, vget_low_s16(vi3x01234567), vget_low_s16(vk3x01234567));
114       vacc4567 = vmlal_s16(vacc4567, vget_high_s16(vi3x01234567), vget_high_s16(vk3x01234567));
115 
116       const int16x8_t vi4x01234567 = vmovl_s8(vld1_s8(i4)); i4 += 8;
117       const int16x8_t vk4x01234567 = vmovl_s8(vld1_s8(w)); w = (const void*) ((const int8_t*) w + 8);
118 
119       vacc0123 = vmlal_s16(vacc0123, vget_low_s16(vi4x01234567), vget_low_s16(vk4x01234567));
120       vacc4567 = vmlal_s16(vacc4567, vget_high_s16(vi4x01234567), vget_high_s16(vk4x01234567));
121 
122       const int16x8_t vi5x01234567 = vmovl_s8(vld1_s8(i5)); i5 += 8;
123       const int16x8_t vk5x01234567 = vmovl_s8(vld1_s8(w)); w = (const void*) ((const int8_t*) w + 8);
124 
125       vacc0123 = vmlal_s16(vacc0123, vget_low_s16(vi5x01234567), vget_low_s16(vk5x01234567));
126       vacc4567 = vmlal_s16(vacc4567, vget_high_s16(vi5x01234567), vget_high_s16(vk5x01234567));
127 
128       const int16x8_t vi6x01234567 = vmovl_s8(vld1_s8(i6)); i6 += 8;
129       const int16x8_t vk6x01234567 = vmovl_s8(vld1_s8(w)); w = (const void*) ((const int8_t*) w + 8);
130 
131       vacc0123 = vmlal_s16(vacc0123, vget_low_s16(vi6x01234567), vget_low_s16(vk6x01234567));
132       vacc4567 = vmlal_s16(vacc4567, vget_high_s16(vi6x01234567), vget_high_s16(vk6x01234567));
133 
134       const int16x8_t vi7x01234567 = vmovl_s8(vld1_s8(i7)); i7 += 8;
135       const int16x8_t vk7x01234567 = vmovl_s8(vld1_s8(w)); w = (const void*) ((const int8_t*) w + 8);
136 
137       vacc0123 = vmlal_s16(vacc0123, vget_low_s16(vi7x01234567), vget_low_s16(vk7x01234567));
138       vacc4567 = vmlal_s16(vacc4567, vget_high_s16(vi7x01234567), vget_high_s16(vk7x01234567));
139 
140       const int16x8_t vi8x01234567 = vmovl_s8(vld1_s8(i8)); i8 += 8;
141       const int16x8_t vk8x01234567 = vmovl_s8(vld1_s8(w)); w = (const void*) ((const int8_t*) w + 8);
142 
143       vacc0123 = vmlal_s16(vacc0123, vget_low_s16(vi8x01234567), vget_low_s16(vk8x01234567));
144       vacc4567 = vmlal_s16(vacc4567, vget_high_s16(vi8x01234567), vget_high_s16(vk8x01234567));
145 
146       float32x4_t vfpacc0123 = vcvtq_f32_s32(vacc0123);
147       float32x4_t vfpacc4567 = vcvtq_f32_s32(vacc4567);
148 
149       vfpacc0123 = vmulq_f32(vfpacc0123, vscale);
150       vfpacc4567 = vmulq_f32(vfpacc4567, vscale);
151 
152       vacc0123 = vreinterpretq_s32_f32(vaddq_f32(vfpacc0123, vmagic_bias));
153       vacc4567 = vreinterpretq_s32_f32(vaddq_f32(vfpacc4567, vmagic_bias));
154 
155       vacc0123 = vqsubq_s32(vacc0123, vmagic_bias_less_output_zero_point);
156       vacc4567 = vqsubq_s32(vacc4567, vmagic_bias_less_output_zero_point);
157 
158 #if XNN_ARCH_ARM64
159       int16x8_t vacc01234567 = vqmovn_high_s32(vqmovn_s32(vacc0123), vacc4567);
160 
161 
162       int8x8_t vout01234567 = vqmovn_s16(vacc01234567);
163 #else  // !XNN_ARCH_ARM64
164       int16x8_t vacc01234567 = vcombine_s16(vqmovn_s32(vacc0123), vqmovn_s32(vacc4567));
165 
166 
167       int8x8_t vout01234567 = vqmovn_s16(vacc01234567);
168 #endif  // !XNN_ARCH_ARM64
169 
170       vout01234567 = vmax_s8(vout01234567, voutput_min);
171 
172       vout01234567 = vmin_s8(vout01234567, voutput_max);
173 
174       vst1_s8(output, vout01234567); output += 8;
175     }
176     if XNN_UNLIKELY(c != 0) {
177       {
178         int32x4_t vacc0123 = vld1q_s32(w); w = (const void*) ((const int32_t*) w + 4);
179         int32x4_t vacc4567 = vld1q_s32(w); w = (const void*) ((const int32_t*) w + 4);
180 
181         const int16x8_t vi0x01234567 = vmovl_s8(vld1_s8(i0));
182         const int16x8_t vk0x01234567 = vmovl_s8(vld1_s8(w));
183 
184         vacc0123 = vmlal_s16(vacc0123, vget_low_s16(vi0x01234567), vget_low_s16(vk0x01234567));
185         vacc4567 = vmlal_s16(vacc4567, vget_high_s16(vi0x01234567), vget_high_s16(vk0x01234567));
186         const int16x8_t vi1x01234567 = vmovl_s8(vld1_s8(i1));
187         const int16x8_t vk1x01234567 = vmovl_s8(vld1_s8((const void*) ((const int8_t*) w + 8)));
188 
189         vacc0123 = vmlal_s16(vacc0123, vget_low_s16(vi1x01234567), vget_low_s16(vk1x01234567));
190         vacc4567 = vmlal_s16(vacc4567, vget_high_s16(vi1x01234567), vget_high_s16(vk1x01234567));
191         const int16x8_t vi2x01234567 = vmovl_s8(vld1_s8(i2));
192         const int16x8_t vk2x01234567 = vmovl_s8(vld1_s8((const void*) ((const int8_t*) w + 16)));
193 
194         vacc0123 = vmlal_s16(vacc0123, vget_low_s16(vi2x01234567), vget_low_s16(vk2x01234567));
195         vacc4567 = vmlal_s16(vacc4567, vget_high_s16(vi2x01234567), vget_high_s16(vk2x01234567));
196         const int16x8_t vi3x01234567 = vmovl_s8(vld1_s8(i3));
197         const int16x8_t vk3x01234567 = vmovl_s8(vld1_s8((const void*) ((const int8_t*) w + 24)));
198 
199         vacc0123 = vmlal_s16(vacc0123, vget_low_s16(vi3x01234567), vget_low_s16(vk3x01234567));
200         vacc4567 = vmlal_s16(vacc4567, vget_high_s16(vi3x01234567), vget_high_s16(vk3x01234567));
201         const int16x8_t vi4x01234567 = vmovl_s8(vld1_s8(i4));
202         const int16x8_t vk4x01234567 = vmovl_s8(vld1_s8((const void*) ((const int8_t*) w + 32)));
203 
204         vacc0123 = vmlal_s16(vacc0123, vget_low_s16(vi4x01234567), vget_low_s16(vk4x01234567));
205         vacc4567 = vmlal_s16(vacc4567, vget_high_s16(vi4x01234567), vget_high_s16(vk4x01234567));
206         const int16x8_t vi5x01234567 = vmovl_s8(vld1_s8(i5));
207         const int16x8_t vk5x01234567 = vmovl_s8(vld1_s8((const void*) ((const int8_t*) w + 40)));
208 
209         vacc0123 = vmlal_s16(vacc0123, vget_low_s16(vi5x01234567), vget_low_s16(vk5x01234567));
210         vacc4567 = vmlal_s16(vacc4567, vget_high_s16(vi5x01234567), vget_high_s16(vk5x01234567));
211         const int16x8_t vi6x01234567 = vmovl_s8(vld1_s8(i6));
212         const int16x8_t vk6x01234567 = vmovl_s8(vld1_s8((const void*) ((const int8_t*) w + 48)));
213 
214         vacc0123 = vmlal_s16(vacc0123, vget_low_s16(vi6x01234567), vget_low_s16(vk6x01234567));
215         vacc4567 = vmlal_s16(vacc4567, vget_high_s16(vi6x01234567), vget_high_s16(vk6x01234567));
216         const int16x8_t vi7x01234567 = vmovl_s8(vld1_s8(i7));
217         const int16x8_t vk7x01234567 = vmovl_s8(vld1_s8((const void*) ((const int8_t*) w + 56)));
218 
219         vacc0123 = vmlal_s16(vacc0123, vget_low_s16(vi7x01234567), vget_low_s16(vk7x01234567));
220         vacc4567 = vmlal_s16(vacc4567, vget_high_s16(vi7x01234567), vget_high_s16(vk7x01234567));
221         const int16x8_t vi8x01234567 = vmovl_s8(vld1_s8(i8));
222         const int16x8_t vk8x01234567 = vmovl_s8(vld1_s8((const void*) ((const int8_t*) w + 64)));
223 
224         vacc0123 = vmlal_s16(vacc0123, vget_low_s16(vi8x01234567), vget_low_s16(vk8x01234567));
225         vacc4567 = vmlal_s16(vacc4567, vget_high_s16(vi8x01234567), vget_high_s16(vk8x01234567));
226 
227         float32x4_t vfpacc0123 = vcvtq_f32_s32(vacc0123);
228         float32x4_t vfpacc4567 = vcvtq_f32_s32(vacc4567);
229 
230         vfpacc0123 = vmulq_f32(vfpacc0123, vscale);
231         vfpacc4567 = vmulq_f32(vfpacc4567, vscale);
232 
233         vacc0123 = vreinterpretq_s32_f32(vaddq_f32(vfpacc0123, vmagic_bias));
234         vacc4567 = vreinterpretq_s32_f32(vaddq_f32(vfpacc4567, vmagic_bias));
235 
236         vacc0123 = vqsubq_s32(vacc0123, vmagic_bias_less_output_zero_point);
237         vacc4567 = vqsubq_s32(vacc4567, vmagic_bias_less_output_zero_point);
238 
239 #if XNN_ARCH_ARM64
240         int16x8_t vacc01234567 = vqmovn_high_s32(vqmovn_s32(vacc0123), vacc4567);
241 #else
242         int16x8_t vacc01234567 = vcombine_s16(vqmovn_s32(vacc0123), vqmovn_s32(vacc4567));
243 #endif
244 
245         int8x8_t vout01234567 = vqmovn_s16(vacc01234567);
246         vout01234567 = vmax_s8(vout01234567, voutput_min);
247         vout01234567 = vmin_s8(vout01234567, voutput_max);
248 
249         if (c & 4) {
250           vst1_lane_u32((void*) output, vreinterpret_u32_s8(vout01234567), 0); output += 4;
251           vout01234567 = vext_s8(vout01234567, vout01234567, 4);
252         }
253         if (c & 2) {
254           vst1_lane_u16((void*) output, vreinterpret_u16_s8(vout01234567), 0); output += 2;
255           vout01234567 = vext_s8(vout01234567, vout01234567, 2);
256         }
257         if (c & 1) {
258           vst1_lane_s8(output, vout01234567, 0); output += 1;
259         }
260       }
261     }
262 
263     output = (int8_t*) ((uintptr_t) output + output_increment);
264   } while (--output_width != 0);
265 }
266