xref: /aosp_15_r20/external/XNNPACK/src/f16-gavgpool-cw/neonfp16arith-x8.c (revision 4bdc94577ba0e567308109d787f7fec7b531ce36)
1 // Copyright 2019 Google LLC
2 //
3 // This source code is licensed under the BSD-style license found in the
4 // LICENSE file in the root directory of this source tree.
5 
6 #include <assert.h>
7 
8 #include <arm_neon.h>
9 
10 #include <xnnpack/common.h>
11 #include <xnnpack/gavgpool.h>
12 #include <xnnpack/math.h>
13 
14 
xnn_f16_gavgpool_cw_ukernel__neonfp16arith_x8(size_t elements,size_t channels,const void * input,void * output,const union xnn_f16_gavgpool_params params[restrict XNN_MIN_ELEMENTS (1)])15 void xnn_f16_gavgpool_cw_ukernel__neonfp16arith_x8(
16     size_t elements,
17     size_t channels,
18     const void* input,
19     void* output,
20     const union xnn_f16_gavgpool_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
21 {
22   assert(elements != 0);
23   assert(elements % sizeof(__fp16) == 0);
24   assert(channels != 0);
25 
26   __fp16* o = (__fp16*) output;
27   const __fp16* i0 = input;
28   const __fp16* i1 = (const __fp16*) ((uintptr_t) i0 + elements);
29   const __fp16* i2 = (const __fp16*) ((uintptr_t) i1 + elements);
30   const __fp16* i3 = (const __fp16*) ((uintptr_t) i2 + elements);
31 
32   const uint16x8_t vmask = vld1q_u16(params->neonfp16arith.mask);
33   const float16x4_t vmultiplier = vreinterpret_f16_u16(vld1_dup_u16(&params->neonfp16arith.multiplier));
34   const float16x4_t voutput_min = vreinterpret_f16_u16(vld1_dup_u16(&params->neonfp16arith.output_min));
35   const float16x4_t voutput_max = vreinterpret_f16_u16(vld1_dup_u16(&params->neonfp16arith.output_max));
36 
37   while (channels >= 4) {
38     float16x8_t vsum0 = vmovq_n_f16(0);
39     float16x8_t vsum1 = vmovq_n_f16(0);
40     float16x8_t vsum2 = vmovq_n_f16(0);
41     float16x8_t vsum3 = vmovq_n_f16(0);
42     size_t n = elements;
43     while (n >= 8 * sizeof(__fp16)) {
44       const float16x8_t vi0 = vld1q_f16(i0); i0 += 8;
45       const float16x8_t vi1 = vld1q_f16(i1); i1 += 8;
46       const float16x8_t vi2 = vld1q_f16(i2); i2 += 8;
47       const float16x8_t vi3 = vld1q_f16(i3); i3 += 8;
48 
49       vsum0 = vaddq_f16(vsum0, vi0);
50       vsum1 = vaddq_f16(vsum1, vi1);
51       vsum2 = vaddq_f16(vsum2, vi2);
52       vsum3 = vaddq_f16(vsum3, vi3);
53       n -= 8 * sizeof(__fp16);
54     }
55 
56     if XNN_UNLIKELY(n != 0) {
57       float16x8_t vi0 = vld1q_f16(i0); i0 = (const __fp16*) ((uintptr_t) i0 + n);
58       float16x8_t vi1 = vld1q_f16(i1); i1 = (const __fp16*) ((uintptr_t) i1 + n);
59       float16x8_t vi2 = vld1q_f16(i2); i2 = (const __fp16*) ((uintptr_t) i2 + n);
60       float16x8_t vi3 = vld1q_f16(i3); i3 = (const __fp16*) ((uintptr_t) i3 + n);
61 
62       vi0 = vreinterpretq_f16_u16(vandq_u16(vmask, vreinterpretq_u16_f16(vi0)));
63       vi1 = vreinterpretq_f16_u16(vandq_u16(vmask, vreinterpretq_u16_f16(vi1)));
64       vi2 = vreinterpretq_f16_u16(vandq_u16(vmask, vreinterpretq_u16_f16(vi2)));
65       vi3 = vreinterpretq_f16_u16(vandq_u16(vmask, vreinterpretq_u16_f16(vi3)));
66 
67       vsum0 = vaddq_f16(vsum0, vi0);
68       vsum1 = vaddq_f16(vsum1, vi1);
69       vsum2 = vaddq_f16(vsum2, vi2);
70       vsum3 = vaddq_f16(vsum3, vi3);
71     }
72 
73     // Having exactly 4 rows makes this work out nicely as we end up with
74     // the 4 totals in 4 different lanes of the same vector.
75     #if XNN_ARCH_ARM64
76       const float16x8_t vsum01 = vpaddq_f16(vsum0, vsum1);
77       const float16x8_t vsum23 = vpaddq_f16(vsum2, vsum3);
78       const float16x8_t vsum0123 = vpaddq_f16(vsum01, vsum23);
79       const float16x4_t vsum = vpadd_f16(vget_low_f16(vsum0123), vget_high_f16(vsum0123));
80     #else
81       const float16x4_t vsum0_lo = vadd_f16(vget_low_f16(vsum0), vget_high_f16(vsum0));
82       const float16x4_t vsum1_lo = vadd_f16(vget_low_f16(vsum1), vget_high_f16(vsum1));
83       const float16x4_t vsum2_lo = vadd_f16(vget_low_f16(vsum2), vget_high_f16(vsum2));
84       const float16x4_t vsum3_lo = vadd_f16(vget_low_f16(vsum3), vget_high_f16(vsum3));
85       const float16x4_t vsum01_lo = vpadd_f16(vsum0_lo, vsum1_lo);
86       const float16x4_t vsum23_lo = vpadd_f16(vsum2_lo, vsum3_lo);
87       const float16x4_t vsum = vpadd_f16(vsum01_lo, vsum23_lo);
88     #endif
89 
90     float16x4_t vout = vmul_f16(vsum, vmultiplier);
91 
92     vout = vmax_f16(vout, voutput_min);
93     vout = vmin_f16(vout, voutput_max);
94 
95     vst1_f16(o, vout); o += 4;
96 
97     i0 = i3;
98     i1 = (const __fp16*) ((uintptr_t) i0 + elements);
99     i2 = (const __fp16*) ((uintptr_t) i1 + elements);
100     i3 = (const __fp16*) ((uintptr_t) i2 + elements);
101     channels -= 4;
102   }
103 
104   while (channels != 0) {
105     float16x8_t vsum0 = vmovq_n_f16(0);
106     size_t n = elements;
107     while (n >= 8 * sizeof(__fp16)) {
108       const float16x8_t vi0 = vld1q_f16(i0); i0 += 8;
109 
110       vsum0 = vaddq_f16(vsum0, vi0);
111       n -= 8 * sizeof(__fp16);
112     }
113 
114     if XNN_UNLIKELY(n != 0) {
115       float16x8_t vi0 = vld1q_f16(i0); i0 = (const __fp16*) ((uintptr_t) i0 + n);
116 
117       vi0 = vreinterpretq_f16_u16(vandq_u16(vmask, vreinterpretq_u16_f16(vi0)));
118 
119       vsum0 = vaddq_f16(vsum0, vi0);
120     }
121 
122     float16x4_t vsum = vadd_f16(vget_low_f16(vsum0), vget_high_f16(vsum0));
123     vsum = vpadd_f16(vsum, vsum);
124     vsum = vpadd_f16(vsum, vsum);
125 
126     float16x4_t vout = vmul_f16(vsum, vmultiplier);
127 
128     vout = vmax_f16(vout, voutput_min);
129     vout = vmin_f16(vout, voutput_max);
130 
131     vst1_lane_f16(o, vout, 0); o += 1;
132     channels -= 1;
133   }
134 }
135