1 // Copyright 2020 Google LLC
2 //
3 // This source code is licensed under the BSD-style license found in the
4 // LICENSE file in the root directory of this source tree.
5
6 #include <assert.h>
7
8 #include <arm_neon.h>
9
10 #include <xnnpack/argmaxpool.h>
11
12
xnn_f32_argmaxpool_ukernel_9x__neon_c4(size_t output_pixels,size_t pooling_elements,size_t channels,const float ** input,size_t input_offset,float * output,uint32_t * index,size_t input_increment,size_t output_increment)13 void xnn_f32_argmaxpool_ukernel_9x__neon_c4(
14 size_t output_pixels,
15 size_t pooling_elements,
16 size_t channels,
17 const float** input,
18 size_t input_offset,
19 float* output,
20 uint32_t* index,
21 size_t input_increment,
22 size_t output_increment) XNN_OOB_READS
23 {
24 assert(output_pixels != 0);
25 assert(pooling_elements != 0);
26 assert(pooling_elements <= 9);
27 assert(channels != 0);
28
29 do {
30 const float* i0 = input[0];
31 const float* i1 = input[1];
32 const float* i2 = input[2];
33 const float* i3 = input[3];
34 const float* i4 = input[4];
35 const float* i5 = input[5];
36 const float* i6 = input[6];
37 const float* i7 = input[7];
38 const float* i8 = input[8];
39 i0 = (const float*) ((uintptr_t) i0 + input_offset);
40 i1 = (const float*) ((uintptr_t) i1 + input_offset);
41 i2 = (const float*) ((uintptr_t) i2 + input_offset);
42 i3 = (const float*) ((uintptr_t) i3 + input_offset);
43 i4 = (const float*) ((uintptr_t) i4 + input_offset);
44 i5 = (const float*) ((uintptr_t) i5 + input_offset);
45 i6 = (const float*) ((uintptr_t) i6 + input_offset);
46 i7 = (const float*) ((uintptr_t) i7 + input_offset);
47 i8 = (const float*) ((uintptr_t) i8 + input_offset);
48 if (pooling_elements < 2) {
49 i1 = i0;
50 }
51 if (pooling_elements <= 2) {
52 i2 = i0;
53 }
54 if (pooling_elements < 4) {
55 i3 = i0;
56 }
57 if (pooling_elements <= 4) {
58 i4 = i0;
59 }
60 if (pooling_elements < 6) {
61 i5 = i0;
62 }
63 if (pooling_elements <= 6) {
64 i6 = i0;
65 }
66 if (pooling_elements < 8) {
67 i7 = i0;
68 }
69 if (pooling_elements <= 8) {
70 i8 = i0;
71 }
72
73 size_t c = channels;
74 for (; c >= 4; c -= 4) {
75 const float32x4_t vi0 = vld1q_f32(i0); i0 += 4;
76 const float32x4_t vi1 = vld1q_f32(i1); i1 += 4;
77 const float32x4_t vi2 = vld1q_f32(i2); i2 += 4;
78 const float32x4_t vi3 = vld1q_f32(i3); i3 += 4;
79 const float32x4_t vi4 = vld1q_f32(i4); i4 += 4;
80 const float32x4_t vi5 = vld1q_f32(i5); i5 += 4;
81 const float32x4_t vi6 = vld1q_f32(i6); i6 += 4;
82 const float32x4_t vi7 = vld1q_f32(i7); i7 += 4;
83 const float32x4_t vi8 = vld1q_f32(i8); i8 += 4;
84
85 float32x4_t vmax = vi0;
86 uint32x4_t vidx = vmovq_n_u32(0);
87
88 const uint32x4_t vm1 = vcgtq_f32(vi1, vmax);
89 vmax = vbslq_f32(vm1, vi1, vmax);
90 vidx = vbslq_u32(vm1, vmovq_n_u32(1), vidx);
91
92 const uint32x4_t vm2 = vcgtq_f32(vi2, vmax);
93 vmax = vbslq_f32(vm2, vi2, vmax);
94 vidx = vbslq_u32(vm2, vmovq_n_u32(2), vidx);
95
96 const uint32x4_t vm3 = vcgtq_f32(vi3, vmax);
97 vmax = vbslq_f32(vm3, vi3, vmax);
98 vidx = vbslq_u32(vm3, vmovq_n_u32(3), vidx);
99
100 const uint32x4_t vm4 = vcgtq_f32(vi4, vmax);
101 vmax = vbslq_f32(vm4, vi4, vmax);
102 vidx = vbslq_u32(vm4, vmovq_n_u32(4), vidx);
103
104 const uint32x4_t vm5 = vcgtq_f32(vi5, vmax);
105 vmax = vbslq_f32(vm5, vi5, vmax);
106 vidx = vbslq_u32(vm5, vmovq_n_u32(5), vidx);
107
108 const uint32x4_t vm6 = vcgtq_f32(vi6, vmax);
109 vmax = vbslq_f32(vm6, vi6, vmax);
110 vidx = vbslq_u32(vm6, vmovq_n_u32(6), vidx);
111
112 const uint32x4_t vm7 = vcgtq_f32(vi7, vmax);
113 vmax = vbslq_f32(vm7, vi7, vmax);
114 vidx = vbslq_u32(vm7, vmovq_n_u32(7), vidx);
115
116 const uint32x4_t vm8 = vcgtq_f32(vi8, vmax);
117 vmax = vbslq_f32(vm8, vi8, vmax);
118 vidx = vbslq_u32(vm8, vmovq_n_u32(8), vidx);
119
120 vst1q_f32(output, vmax); output += 4;
121 vst1q_u32(index, vidx); index += 4;
122 }
123 if (c != 0) {
124 const float32x4_t vi0 = vld1q_f32(i0);
125 const float32x4_t vi1 = vld1q_f32(i1);
126 const float32x4_t vi2 = vld1q_f32(i2);
127 const float32x4_t vi3 = vld1q_f32(i3);
128 const float32x4_t vi4 = vld1q_f32(i4);
129 const float32x4_t vi5 = vld1q_f32(i5);
130 const float32x4_t vi6 = vld1q_f32(i6);
131 const float32x4_t vi7 = vld1q_f32(i7);
132 const float32x4_t vi8 = vld1q_f32(i8);
133
134 float32x4_t vmax = vi0;
135 uint32x4_t vidx = vmovq_n_u32(0);
136
137 const uint32x4_t vm1 = vcgtq_f32(vi1, vmax);
138 vmax = vbslq_f32(vm1, vi1, vmax);
139 vidx = vbslq_u32(vm1, vmovq_n_u32(1), vidx);
140
141 const uint32x4_t vm2 = vcgtq_f32(vi2, vmax);
142 vmax = vbslq_f32(vm2, vi2, vmax);
143 vidx = vbslq_u32(vm2, vmovq_n_u32(2), vidx);
144
145 const uint32x4_t vm3 = vcgtq_f32(vi3, vmax);
146 vmax = vbslq_f32(vm3, vi3, vmax);
147 vidx = vbslq_u32(vm3, vmovq_n_u32(3), vidx);
148
149 const uint32x4_t vm4 = vcgtq_f32(vi4, vmax);
150 vmax = vbslq_f32(vm4, vi4, vmax);
151 vidx = vbslq_u32(vm4, vmovq_n_u32(4), vidx);
152
153 const uint32x4_t vm5 = vcgtq_f32(vi5, vmax);
154 vmax = vbslq_f32(vm5, vi5, vmax);
155 vidx = vbslq_u32(vm5, vmovq_n_u32(5), vidx);
156
157 const uint32x4_t vm6 = vcgtq_f32(vi6, vmax);
158 vmax = vbslq_f32(vm6, vi6, vmax);
159 vidx = vbslq_u32(vm6, vmovq_n_u32(6), vidx);
160
161 const uint32x4_t vm7 = vcgtq_f32(vi7, vmax);
162 vmax = vbslq_f32(vm7, vi7, vmax);
163 vidx = vbslq_u32(vm7, vmovq_n_u32(7), vidx);
164
165 const uint32x4_t vm8 = vcgtq_f32(vi8, vmax);
166 vmax = vbslq_f32(vm8, vi8, vmax);
167 vidx = vbslq_u32(vm8, vmovq_n_u32(8), vidx);
168
169 float32x2_t vmax_lo = vget_low_f32(vmax);
170 uint32x2_t vidx_lo = vget_low_u32(vidx);
171 if (c & 2) {
172 vst1_f32(output, vmax_lo); output += 2;
173 vst1_u32(index, vidx_lo); index += 2;
174 vmax_lo = vget_high_f32(vmax);
175 vidx_lo = vget_high_u32(vidx);
176 }
177 if (c & 1) {
178 vst1_lane_f32(output, vmax_lo, 0); output += 1;
179 vst1_lane_u32(index, vidx_lo, 0); index += 1;
180 }
181 }
182 input = (const float**) ((uintptr_t) input + input_increment);
183 output = (float*) ((uintptr_t) output + output_increment);
184 } while (--output_pixels != 0);
185 }
186