xref: /aosp_15_r20/external/XNNPACK/src/f16-gavgpool/gen/7x-minmax-neonfp16arith-c8.c (revision 4bdc94577ba0e567308109d787f7fec7b531ce36)
1 // Auto-generated file. Do not edit!
2 //   Template: src/f16-gavgpool/unipass-neonfp16arith.c.in
3 //   Generator: tools/xngen
4 //
5 // Copyright 2022 Google LLC
6 //
7 // This source code is licensed under the BSD-style license found in the
8 // LICENSE file in the root directory of this source tree.
9 
10 #include <assert.h>
11 
12 #include <arm_neon.h>
13 
14 #include <xnnpack/gavgpool.h>
15 
16 
xnn_f16_gavgpool_minmax_ukernel_7x__neonfp16arith_c8(size_t rows,size_t channels,const void * input,size_t input_stride,const void * zero,void * output,const union xnn_f16_scaleminmax_params params[restrict XNN_MIN_ELEMENTS (1)])17 void xnn_f16_gavgpool_minmax_ukernel_7x__neonfp16arith_c8(
18     size_t rows,
19     size_t channels,
20     const void* input,
21     size_t input_stride,
22     const void* zero,
23     void* output,
24     const union xnn_f16_scaleminmax_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
25 {
26   assert(rows != 0);
27   assert(rows <= 7);
28   assert(channels != 0);
29 
30   const __fp16* i0 = input;
31   const __fp16* i1 = (const __fp16*) ((uintptr_t) i0 + input_stride);
32   if XNN_UNPREDICTABLE(rows < 2) {
33     i1 = (const __fp16*) zero;
34   }
35   const __fp16* i2 = (const __fp16*) ((uintptr_t) i1 + input_stride);
36   if XNN_UNPREDICTABLE(rows <= 2) {
37     i2 = (const __fp16*) zero;
38   }
39   const __fp16* i3 = (const __fp16*) ((uintptr_t) i2 + input_stride);
40   if XNN_UNPREDICTABLE(rows < 4) {
41     i3 = (const __fp16*) zero;
42   }
43   const __fp16* i4 = (const __fp16*) ((uintptr_t) i3 + input_stride);
44   if XNN_UNPREDICTABLE(rows <= 4) {
45     i4 = (const __fp16*) zero;
46   }
47   const __fp16* i5 = (const __fp16*) ((uintptr_t) i4 + input_stride);
48   if XNN_UNPREDICTABLE(rows < 6) {
49     i5 = (const __fp16*) zero;
50   }
51   const __fp16* i6 = (const __fp16*) ((uintptr_t) i5 + input_stride);
52   if XNN_UNPREDICTABLE(rows <= 6) {
53     i6 = (const __fp16*) zero;
54   }
55 
56   const float16x8_t vscale = vreinterpretq_f16_u16(vld1q_dup_u16(&params->neon.scale));
57   const float16x8_t vmin = vreinterpretq_f16_u16(vld1q_dup_u16(&params->neon.min));
58   const float16x8_t vmax = vreinterpretq_f16_u16(vld1q_dup_u16(&params->neon.max));
59   for (; channels >= 8; channels -= 8) {
60     const float16x8_t vi0x01234567 = vld1q_f16(i0); i0 += 8;
61     const float16x8_t vi1x01234567 = vld1q_f16(i1); i1 += 8;
62 
63     const float16x8_t vi2x01234567 = vld1q_f16(i2); i2 += 8;
64     float16x8_t vacc01234567 = vaddq_f16(vi0x01234567, vi1x01234567);
65 
66     const float16x8_t vi3x01234567 = vld1q_f16(i3); i3 += 8;
67     vacc01234567 = vaddq_f16(vacc01234567, vi2x01234567);
68     const float16x8_t vi4x01234567 = vld1q_f16(i4); i4 += 8;
69     vacc01234567 = vaddq_f16(vacc01234567, vi3x01234567);
70     const float16x8_t vi5x01234567 = vld1q_f16(i5); i5 += 8;
71     vacc01234567 = vaddq_f16(vacc01234567, vi4x01234567);
72     const float16x8_t vi6x01234567 = vld1q_f16(i6); i6 += 8;
73     vacc01234567 = vaddq_f16(vacc01234567, vi5x01234567);
74     vacc01234567 = vaddq_f16(vacc01234567, vi6x01234567);
75 
76     vacc01234567 = vmulq_f16(vacc01234567, vscale);
77 
78     vacc01234567 = vmaxq_f16(vacc01234567, vmin);
79 
80     vacc01234567 = vminq_f16(vacc01234567, vmax);
81 
82     vst1q_f16(output, vacc01234567); output = (__fp16*) output + 8;
83   }
84   if XNN_UNLIKELY(channels != 0) {
85     {
86       const float16x8_t vi0x01234567 = vld1q_f16(i0); i0 += 8;
87       const float16x8_t vi1x01234567 = vld1q_f16(i1); i1 += 8;
88 
89       const float16x8_t vi2x01234567 = vld1q_f16(i2); i2 += 8;
90       float16x8_t vacc01234567 = vaddq_f16(vi0x01234567, vi1x01234567);
91 
92       const float16x8_t vi3x01234567 = vld1q_f16(i3); i3 += 8;
93       vacc01234567 = vaddq_f16(vacc01234567, vi2x01234567);
94       const float16x8_t vi4x01234567 = vld1q_f16(i4); i4 += 8;
95       vacc01234567 = vaddq_f16(vacc01234567, vi3x01234567);
96       const float16x8_t vi5x01234567 = vld1q_f16(i5); i5 += 8;
97       vacc01234567 = vaddq_f16(vacc01234567, vi4x01234567);
98       const float16x8_t vi6x01234567 = vld1q_f16(i6); i6 += 8;
99       vacc01234567 = vaddq_f16(vacc01234567, vi5x01234567);
100       vacc01234567 = vaddq_f16(vacc01234567, vi6x01234567);
101 
102       vacc01234567 = vmulq_f16(vacc01234567, vscale);
103       vacc01234567 = vmaxq_f16(vacc01234567, vmin);
104       vacc01234567 = vminq_f16(vacc01234567, vmax);
105 
106       float16x4_t vacc0123 = vget_low_f16(vacc01234567);
107       if (channels & 4) {
108         vst1_f16(output, vacc0123); output = (__fp16*) output + 4;
109         vacc0123 = vget_high_f16(vacc01234567);
110       }
111       if (channels & 2) {
112         vst1_lane_u32(output, vreinterpret_u32_f16(vacc0123), 0); output = (__fp16*) output + 2;
113         vacc0123 = vext_f16(vacc0123, vacc0123, 2);
114       }
115       if (channels & 1) {
116         vst1_lane_f16(output, vacc0123, 0); output = (__fp16*) output + 1;
117       }
118     }
119   }
120 }
121