1 // Auto-generated file. Do not edit!
2 // Template: src/f32-dwconv/up-neon.c.in
3 // Generator: tools/xngen
4 //
5 // Copyright 2019 Google LLC
6 //
7 // This source code is licensed under the BSD-style license found in the
8 // LICENSE file in the root directory of this source tree.
9
10 #include <assert.h>
11
12 #include <arm_neon.h>
13
14 #include <xnnpack/dwconv.h>
15
16
xnn_f32_dwconv_minmax_ukernel_up16x9__neon_acc2(size_t channels,size_t output_width,const float ** input,const float * weights,float * output,size_t input_stride,size_t output_increment,size_t input_offset,const float * zero,const union xnn_f32_minmax_params params[restrict XNN_MIN_ELEMENTS (1)])17 void xnn_f32_dwconv_minmax_ukernel_up16x9__neon_acc2(
18 size_t channels,
19 size_t output_width,
20 const float** input,
21 const float* weights,
22 float* output,
23 size_t input_stride,
24 size_t output_increment,
25 size_t input_offset,
26 const float* zero,
27 const union xnn_f32_minmax_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
28 {
29 assert(channels != 0);
30 assert(output_width != 0);
31
32 const float32x4_t vmax = vld1q_dup_f32(¶ms->scalar.max);
33 const float32x4_t vmin = vld1q_dup_f32(¶ms->scalar.min);
34 do {
35 const float* i0 = input[0];
36 assert(i0 != NULL);
37 if XNN_UNPREDICTABLE(i0 != zero) {
38 i0 = (const float*) ((uintptr_t) i0 + input_offset);
39 }
40 const float* i1 = input[1];
41 assert(i1 != NULL);
42 if XNN_UNPREDICTABLE(i1 != zero) {
43 i1 = (const float*) ((uintptr_t) i1 + input_offset);
44 }
45 const float* i2 = input[2];
46 assert(i2 != NULL);
47 if XNN_UNPREDICTABLE(i2 != zero) {
48 i2 = (const float*) ((uintptr_t) i2 + input_offset);
49 }
50 const float* i3 = input[3];
51 assert(i3 != NULL);
52 if XNN_UNPREDICTABLE(i3 != zero) {
53 i3 = (const float*) ((uintptr_t) i3 + input_offset);
54 }
55 const float* i4 = input[4];
56 assert(i4 != NULL);
57 if XNN_UNPREDICTABLE(i4 != zero) {
58 i4 = (const float*) ((uintptr_t) i4 + input_offset);
59 }
60 const float* i5 = input[5];
61 assert(i5 != NULL);
62 if XNN_UNPREDICTABLE(i5 != zero) {
63 i5 = (const float*) ((uintptr_t) i5 + input_offset);
64 }
65 const float* i6 = input[6];
66 assert(i6 != NULL);
67 if XNN_UNPREDICTABLE(i6 != zero) {
68 i6 = (const float*) ((uintptr_t) i6 + input_offset);
69 }
70 const float* i7 = input[7];
71 assert(i7 != NULL);
72 if XNN_UNPREDICTABLE(i7 != zero) {
73 i7 = (const float*) ((uintptr_t) i7 + input_offset);
74 }
75 const float* i8 = input[8];
76 assert(i8 != NULL);
77 if XNN_UNPREDICTABLE(i8 != zero) {
78 i8 = (const float*) ((uintptr_t) i8 + input_offset);
79 }
80
81 input = (const float**) ((uintptr_t) input + input_stride);
82
83 size_t c = channels;
84 const float* w = weights;
85 for (; c >= 16; c -= 16) {
86 float32x4_t vacc0123p0 = vld1q_f32(w); w += 4;
87 float32x4_t vacc4567p0 = vld1q_f32(w); w += 4;
88 float32x4_t vacc89ABp0 = vld1q_f32(w); w += 4;
89 float32x4_t vaccCDEFp0 = vld1q_f32(w); w += 4;
90
91
92 const float32x4_t vi0x0123 = vld1q_f32(i0); i0 += 4;
93 const float32x4_t vi0x4567 = vld1q_f32(i0); i0 += 4;
94 const float32x4_t vi0x89AB = vld1q_f32(i0); i0 += 4;
95 const float32x4_t vi0xCDEF = vld1q_f32(i0); i0 += 4;
96 const float32x4_t vk0x0123 = vld1q_f32(w); w += 4;
97 const float32x4_t vk0x4567 = vld1q_f32(w); w += 4;
98 const float32x4_t vk0x89AB = vld1q_f32(w); w += 4;
99 const float32x4_t vk0xCDEF = vld1q_f32(w); w += 4;
100 vacc0123p0 = vmlaq_f32(vacc0123p0, vi0x0123, vk0x0123);
101 vacc4567p0 = vmlaq_f32(vacc4567p0, vi0x4567, vk0x4567);
102 vacc89ABp0 = vmlaq_f32(vacc89ABp0, vi0x89AB, vk0x89AB);
103 vaccCDEFp0 = vmlaq_f32(vaccCDEFp0, vi0xCDEF, vk0xCDEF);
104
105 const float32x4_t vi1x0123 = vld1q_f32(i1); i1 += 4;
106 const float32x4_t vi1x4567 = vld1q_f32(i1); i1 += 4;
107 const float32x4_t vi1x89AB = vld1q_f32(i1); i1 += 4;
108 const float32x4_t vi1xCDEF = vld1q_f32(i1); i1 += 4;
109 const float32x4_t vk1x0123 = vld1q_f32(w); w += 4;
110 const float32x4_t vk1x4567 = vld1q_f32(w); w += 4;
111 const float32x4_t vk1x89AB = vld1q_f32(w); w += 4;
112 const float32x4_t vk1xCDEF = vld1q_f32(w); w += 4;
113 float32x4_t vacc0123p1 = vmulq_f32(vi1x0123, vk1x0123);
114 float32x4_t vacc4567p1 = vmulq_f32(vi1x4567, vk1x4567);
115 float32x4_t vacc89ABp1 = vmulq_f32(vi1x89AB, vk1x89AB);
116 float32x4_t vaccCDEFp1 = vmulq_f32(vi1xCDEF, vk1xCDEF);
117
118 const float32x4_t vi2x0123 = vld1q_f32(i2); i2 += 4;
119 const float32x4_t vi2x4567 = vld1q_f32(i2); i2 += 4;
120 const float32x4_t vi2x89AB = vld1q_f32(i2); i2 += 4;
121 const float32x4_t vi2xCDEF = vld1q_f32(i2); i2 += 4;
122 const float32x4_t vk2x0123 = vld1q_f32(w); w += 4;
123 const float32x4_t vk2x4567 = vld1q_f32(w); w += 4;
124 const float32x4_t vk2x89AB = vld1q_f32(w); w += 4;
125 const float32x4_t vk2xCDEF = vld1q_f32(w); w += 4;
126 vacc0123p0 = vmlaq_f32(vacc0123p0, vi2x0123, vk2x0123);
127 vacc4567p0 = vmlaq_f32(vacc4567p0, vi2x4567, vk2x4567);
128 vacc89ABp0 = vmlaq_f32(vacc89ABp0, vi2x89AB, vk2x89AB);
129 vaccCDEFp0 = vmlaq_f32(vaccCDEFp0, vi2xCDEF, vk2xCDEF);
130
131 const float32x4_t vi3x0123 = vld1q_f32(i3); i3 += 4;
132 const float32x4_t vi3x4567 = vld1q_f32(i3); i3 += 4;
133 const float32x4_t vi3x89AB = vld1q_f32(i3); i3 += 4;
134 const float32x4_t vi3xCDEF = vld1q_f32(i3); i3 += 4;
135 const float32x4_t vk3x0123 = vld1q_f32(w); w += 4;
136 const float32x4_t vk3x4567 = vld1q_f32(w); w += 4;
137 const float32x4_t vk3x89AB = vld1q_f32(w); w += 4;
138 const float32x4_t vk3xCDEF = vld1q_f32(w); w += 4;
139 vacc0123p1 = vmlaq_f32(vacc0123p1, vi3x0123, vk3x0123);
140 vacc4567p1 = vmlaq_f32(vacc4567p1, vi3x4567, vk3x4567);
141 vacc89ABp1 = vmlaq_f32(vacc89ABp1, vi3x89AB, vk3x89AB);
142 vaccCDEFp1 = vmlaq_f32(vaccCDEFp1, vi3xCDEF, vk3xCDEF);
143
144 const float32x4_t vi4x0123 = vld1q_f32(i4); i4 += 4;
145 const float32x4_t vi4x4567 = vld1q_f32(i4); i4 += 4;
146 const float32x4_t vi4x89AB = vld1q_f32(i4); i4 += 4;
147 const float32x4_t vi4xCDEF = vld1q_f32(i4); i4 += 4;
148 const float32x4_t vk4x0123 = vld1q_f32(w); w += 4;
149 const float32x4_t vk4x4567 = vld1q_f32(w); w += 4;
150 const float32x4_t vk4x89AB = vld1q_f32(w); w += 4;
151 const float32x4_t vk4xCDEF = vld1q_f32(w); w += 4;
152 vacc0123p0 = vmlaq_f32(vacc0123p0, vi4x0123, vk4x0123);
153 vacc4567p0 = vmlaq_f32(vacc4567p0, vi4x4567, vk4x4567);
154 vacc89ABp0 = vmlaq_f32(vacc89ABp0, vi4x89AB, vk4x89AB);
155 vaccCDEFp0 = vmlaq_f32(vaccCDEFp0, vi4xCDEF, vk4xCDEF);
156
157 const float32x4_t vi5x0123 = vld1q_f32(i5); i5 += 4;
158 const float32x4_t vi5x4567 = vld1q_f32(i5); i5 += 4;
159 const float32x4_t vi5x89AB = vld1q_f32(i5); i5 += 4;
160 const float32x4_t vi5xCDEF = vld1q_f32(i5); i5 += 4;
161 const float32x4_t vk5x0123 = vld1q_f32(w); w += 4;
162 const float32x4_t vk5x4567 = vld1q_f32(w); w += 4;
163 const float32x4_t vk5x89AB = vld1q_f32(w); w += 4;
164 const float32x4_t vk5xCDEF = vld1q_f32(w); w += 4;
165 vacc0123p1 = vmlaq_f32(vacc0123p1, vi5x0123, vk5x0123);
166 vacc4567p1 = vmlaq_f32(vacc4567p1, vi5x4567, vk5x4567);
167 vacc89ABp1 = vmlaq_f32(vacc89ABp1, vi5x89AB, vk5x89AB);
168 vaccCDEFp1 = vmlaq_f32(vaccCDEFp1, vi5xCDEF, vk5xCDEF);
169
170 const float32x4_t vi6x0123 = vld1q_f32(i6); i6 += 4;
171 const float32x4_t vi6x4567 = vld1q_f32(i6); i6 += 4;
172 const float32x4_t vi6x89AB = vld1q_f32(i6); i6 += 4;
173 const float32x4_t vi6xCDEF = vld1q_f32(i6); i6 += 4;
174 const float32x4_t vk6x0123 = vld1q_f32(w); w += 4;
175 const float32x4_t vk6x4567 = vld1q_f32(w); w += 4;
176 const float32x4_t vk6x89AB = vld1q_f32(w); w += 4;
177 const float32x4_t vk6xCDEF = vld1q_f32(w); w += 4;
178 vacc0123p0 = vmlaq_f32(vacc0123p0, vi6x0123, vk6x0123);
179 vacc4567p0 = vmlaq_f32(vacc4567p0, vi6x4567, vk6x4567);
180 vacc89ABp0 = vmlaq_f32(vacc89ABp0, vi6x89AB, vk6x89AB);
181 vaccCDEFp0 = vmlaq_f32(vaccCDEFp0, vi6xCDEF, vk6xCDEF);
182
183 const float32x4_t vi7x0123 = vld1q_f32(i7); i7 += 4;
184 const float32x4_t vi7x4567 = vld1q_f32(i7); i7 += 4;
185 const float32x4_t vi7x89AB = vld1q_f32(i7); i7 += 4;
186 const float32x4_t vi7xCDEF = vld1q_f32(i7); i7 += 4;
187 const float32x4_t vk7x0123 = vld1q_f32(w); w += 4;
188 const float32x4_t vk7x4567 = vld1q_f32(w); w += 4;
189 const float32x4_t vk7x89AB = vld1q_f32(w); w += 4;
190 const float32x4_t vk7xCDEF = vld1q_f32(w); w += 4;
191 vacc0123p1 = vmlaq_f32(vacc0123p1, vi7x0123, vk7x0123);
192 vacc4567p1 = vmlaq_f32(vacc4567p1, vi7x4567, vk7x4567);
193 vacc89ABp1 = vmlaq_f32(vacc89ABp1, vi7x89AB, vk7x89AB);
194 vaccCDEFp1 = vmlaq_f32(vaccCDEFp1, vi7xCDEF, vk7xCDEF);
195
196 const float32x4_t vi8x0123 = vld1q_f32(i8); i8 += 4;
197 const float32x4_t vi8x4567 = vld1q_f32(i8); i8 += 4;
198 const float32x4_t vi8x89AB = vld1q_f32(i8); i8 += 4;
199 const float32x4_t vi8xCDEF = vld1q_f32(i8); i8 += 4;
200 const float32x4_t vk8x0123 = vld1q_f32(w); w += 4;
201 const float32x4_t vk8x4567 = vld1q_f32(w); w += 4;
202 const float32x4_t vk8x89AB = vld1q_f32(w); w += 4;
203 const float32x4_t vk8xCDEF = vld1q_f32(w); w += 4;
204 vacc0123p0 = vmlaq_f32(vacc0123p0, vi8x0123, vk8x0123);
205 vacc4567p0 = vmlaq_f32(vacc4567p0, vi8x4567, vk8x4567);
206 vacc89ABp0 = vmlaq_f32(vacc89ABp0, vi8x89AB, vk8x89AB);
207 vaccCDEFp0 = vmlaq_f32(vaccCDEFp0, vi8xCDEF, vk8xCDEF);
208
209 // Add up all accumulators to vacc0123456789ABCDEFp0
210 vacc0123p0 = vaddq_f32(vacc0123p0, vacc0123p1);
211 vacc4567p0 = vaddq_f32(vacc4567p0, vacc4567p1);
212 vacc89ABp0 = vaddq_f32(vacc89ABp0, vacc89ABp1);
213 vaccCDEFp0 = vaddq_f32(vaccCDEFp0, vaccCDEFp1);
214
215 float32x4_t vacc0123 = vmaxq_f32(vacc0123p0, vmin);
216 float32x4_t vacc4567 = vmaxq_f32(vacc4567p0, vmin);
217 float32x4_t vacc89AB = vmaxq_f32(vacc89ABp0, vmin);
218 float32x4_t vaccCDEF = vmaxq_f32(vaccCDEFp0, vmin);
219 vacc0123 = vminq_f32(vacc0123, vmax);
220 vacc4567 = vminq_f32(vacc4567, vmax);
221 vacc89AB = vminq_f32(vacc89AB, vmax);
222 vaccCDEF = vminq_f32(vaccCDEF, vmax);
223
224 vst1q_f32(output, vacc0123); output += 4;
225 vst1q_f32(output, vacc4567); output += 4;
226 vst1q_f32(output, vacc89AB); output += 4;
227 vst1q_f32(output, vaccCDEF); output += 4;
228 }
229 for (; c >= 4; c -= 4) {
230 float32x4_t vacc0123p0 = vld1q_f32(w); w += 4;
231
232
233 const float32x4_t vi0x0123 = vld1q_f32(i0); i0 += 4;
234 const float32x4_t vk0x0123 = vld1q_f32(w + 12);
235 vacc0123p0 = vmlaq_f32(vacc0123p0, vi0x0123, vk0x0123);
236
237 const float32x4_t vi1x0123 = vld1q_f32(i1); i1 += 4;
238 const float32x4_t vk1x0123 = vld1q_f32(w + 28);
239 float32x4_t vacc0123p1 = vmulq_f32(vi1x0123, vk1x0123);
240
241 const float32x4_t vi2x0123 = vld1q_f32(i2); i2 += 4;
242 const float32x4_t vk2x0123 = vld1q_f32(w + 44);
243 vacc0123p0 = vmlaq_f32(vacc0123p0, vi2x0123, vk2x0123);
244
245 const float32x4_t vi3x0123 = vld1q_f32(i3); i3 += 4;
246 const float32x4_t vk3x0123 = vld1q_f32(w + 60);
247 vacc0123p1 = vmlaq_f32(vacc0123p1, vi3x0123, vk3x0123);
248
249 const float32x4_t vi4x0123 = vld1q_f32(i4); i4 += 4;
250 const float32x4_t vk4x0123 = vld1q_f32(w + 76);
251 vacc0123p0 = vmlaq_f32(vacc0123p0, vi4x0123, vk4x0123);
252
253 const float32x4_t vi5x0123 = vld1q_f32(i5); i5 += 4;
254 const float32x4_t vk5x0123 = vld1q_f32(w + 92);
255 vacc0123p1 = vmlaq_f32(vacc0123p1, vi5x0123, vk5x0123);
256
257 const float32x4_t vi6x0123 = vld1q_f32(i6); i6 += 4;
258 const float32x4_t vk6x0123 = vld1q_f32(w + 108);
259 vacc0123p0 = vmlaq_f32(vacc0123p0, vi6x0123, vk6x0123);
260
261 const float32x4_t vi7x0123 = vld1q_f32(i7); i7 += 4;
262 const float32x4_t vk7x0123 = vld1q_f32(w + 124);
263 vacc0123p1 = vmlaq_f32(vacc0123p1, vi7x0123, vk7x0123);
264
265 const float32x4_t vi8x0123 = vld1q_f32(i8); i8 += 4;
266 const float32x4_t vk8x0123 = vld1q_f32(w + 140);
267 vacc0123p0 = vmlaq_f32(vacc0123p0, vi8x0123, vk8x0123);
268
269 // Add up all accumulators to vacc0123p0
270 vacc0123p0 = vaddq_f32(vacc0123p0, vacc0123p1);
271
272 float32x4_t vacc0123 = vmaxq_f32(vacc0123p0, vmin);
273 vacc0123 = vminq_f32(vacc0123, vmax);
274
275 vst1q_f32(output, vacc0123); output += 4;
276 }
277 if XNN_UNLIKELY(c != 0) {
278 float32x4_t vacc0123p0 = vld1q_f32(w);
279
280
281 const float32x4_t vi0x0123 = vld1q_f32(i0);
282 const float32x4_t vk0x0123 = vld1q_f32(w + 16);
283 vacc0123p0 = vmlaq_f32(vacc0123p0, vi0x0123, vk0x0123);
284
285 const float32x4_t vi1x0123 = vld1q_f32(i1);
286 const float32x4_t vk1x0123 = vld1q_f32(w + 32);
287 float32x4_t vacc0123p1 = vmulq_f32(vi1x0123, vk1x0123);
288
289 const float32x4_t vi2x0123 = vld1q_f32(i2);
290 const float32x4_t vk2x0123 = vld1q_f32(w + 48);
291 vacc0123p0 = vmlaq_f32(vacc0123p0, vi2x0123, vk2x0123);
292
293 const float32x4_t vi3x0123 = vld1q_f32(i3);
294 const float32x4_t vk3x0123 = vld1q_f32(w + 64);
295 vacc0123p1 = vmlaq_f32(vacc0123p1, vi3x0123, vk3x0123);
296
297 const float32x4_t vi4x0123 = vld1q_f32(i4);
298 const float32x4_t vk4x0123 = vld1q_f32(w + 80);
299 vacc0123p0 = vmlaq_f32(vacc0123p0, vi4x0123, vk4x0123);
300
301 const float32x4_t vi5x0123 = vld1q_f32(i5);
302 const float32x4_t vk5x0123 = vld1q_f32(w + 96);
303 vacc0123p1 = vmlaq_f32(vacc0123p1, vi5x0123, vk5x0123);
304
305 const float32x4_t vi6x0123 = vld1q_f32(i6);
306 const float32x4_t vk6x0123 = vld1q_f32(w + 112);
307 vacc0123p0 = vmlaq_f32(vacc0123p0, vi6x0123, vk6x0123);
308
309 const float32x4_t vi7x0123 = vld1q_f32(i7);
310 const float32x4_t vk7x0123 = vld1q_f32(w + 128);
311 vacc0123p1 = vmlaq_f32(vacc0123p1, vi7x0123, vk7x0123);
312
313 const float32x4_t vi8x0123 = vld1q_f32(i8);
314 const float32x4_t vk8x0123 = vld1q_f32(w + 144);
315 vacc0123p0 = vmlaq_f32(vacc0123p0, vi8x0123, vk8x0123);
316
317 // Add up all accumulators to vacc0123p0
318 vacc0123p0 = vaddq_f32(vacc0123p0, vacc0123p1);
319
320 float32x4_t vacc0123 = vmaxq_f32(vacc0123p0, vmin);
321 vacc0123 = vminq_f32(vacc0123, vmax);
322
323 float32x2_t vacc01 = vget_low_f32(vacc0123);
324 if (c & 2) {
325 vst1_f32(output, vacc01); output += 2;
326 vacc01 = vget_high_f32(vacc0123);
327 }
328 if (c & 1) {
329 vst1_lane_f32(output, vacc01, 0); output += 1;
330 }
331 }
332
333 output = (float*) ((uintptr_t) output + output_increment);
334 } while (--output_width != 0);
335 }
336