1 // Auto-generated file. Do not edit!
2 // Template: src/f32-dwconv2d-chw/5x5p2-scalar.c.in
3 // Generator: tools/xngen
4 //
5 // Copyright 2020 Google LLC
6 //
7 // This source code is licensed under the BSD-style license found in the
8 // LICENSE file in the root directory of this source tree.
9
10 #include <assert.h>
11
12 #include <xnnpack/dwconv.h>
13 #include <xnnpack/math.h>
14
15
xnn_f32_dwconv2d_chw_ukernel_5x5p2__scalar_3x1(size_t input_height,size_t input_width,const float * input,const float * weights,const float * zero,float * output,uint32_t padding_top,const union xnn_f32_chw_params params[restrict XNN_MIN_ELEMENTS (1)])16 void xnn_f32_dwconv2d_chw_ukernel_5x5p2__scalar_3x1(
17 size_t input_height,
18 size_t input_width,
19 const float* input,
20 const float* weights,
21 const float* zero,
22 float* output,
23 uint32_t padding_top,
24 const union xnn_f32_chw_params params[restrict XNN_MIN_ELEMENTS(1)])
25 {
26 assert(input_height != 0);
27 assert(input_width != 0);
28 assert(input_width % sizeof(float) == 0);
29 assert(padding_top == 2);
30
31 const float vmin = params->scalar.min;
32 const float vmax = params->scalar.max;
33
34 const float vbias = weights[0];
35 const float vk00 = weights[1];
36 const float vk01 = weights[2];
37 const float vk02 = weights[3];
38 const float vk03 = weights[4];
39 const float vk04 = weights[5];
40 const float vk10 = weights[6];
41 const float vk11 = weights[7];
42 const float vk12 = weights[8];
43 const float vk13 = weights[9];
44 const float vk14 = weights[10];
45 const float vk20 = weights[11];
46 const float vk21 = weights[12];
47 const float vk22 = weights[13];
48 const float vk23 = weights[14];
49 const float vk24 = weights[15];
50 const float vk30 = weights[16];
51 const float vk31 = weights[17];
52 const float vk32 = weights[18];
53 const float vk33 = weights[19];
54 const float vk34 = weights[20];
55 const float vk40 = weights[21];
56 const float vk41 = weights[22];
57 const float vk42 = weights[23];
58 const float vk43 = weights[24];
59 const float vk44 = weights[25];
60
61 const float* i0 = zero;
62 const float* i1 = zero;
63 const float* i2 = input;
64 const float* i3 = (const float*) ((uintptr_t) i2 + input_width);
65 const float* i4 = (const float*) ((uintptr_t) i3 + input_width);
66 const float* i5 = (const float*) ((uintptr_t) i4 + input_width);
67 const float* i6 = (const float*) ((uintptr_t) i5 + input_width);
68
69 float* o0 = output;
70 float* o1 = (float*) ((uintptr_t) o0 + input_width);
71 float* o2 = (float*) ((uintptr_t) o1 + input_width);
72
73 size_t output_height = input_height;
74 do {
75 if XNN_UNPREDICTABLE(output_height < 2) {
76 i3 = zero;
77 o1 = o0;
78 }
79 if XNN_UNPREDICTABLE(output_height < 3) {
80 i4 = zero;
81 o2 = o1;
82 }
83 if XNN_UNPREDICTABLE(output_height < 4) {
84 i5 = zero;
85 }
86 if XNN_UNPREDICTABLE(output_height < 5) {
87 i6 = zero;
88 }
89
90 float vi0x0 = 0.0f;
91 float vi1x0 = 0.0f;
92 float vi2x0 = 0.0f;
93 float vi3x0 = 0.0f;
94 float vi4x0 = 0.0f;
95 float vi5x0 = 0.0f;
96 float vi6x0 = 0.0f;
97
98 float vi0x1 = 0.0f;
99 float vi1x1 = 0.0f;
100 float vi2x1 = 0.0f;
101 float vi3x1 = 0.0f;
102 float vi4x1 = 0.0f;
103 float vi5x1 = 0.0f;
104 float vi6x1 = 0.0f;
105
106 float vi0x2 = *i0++;
107 float vi1x2 = *i1++;
108 float vi2x2 = *i2++;
109 float vi3x2 = *i3++;
110 float vi4x2 = *i4++;
111 float vi5x2 = *i5++;
112 float vi6x2 = *i6++;
113
114 size_t w = input_width;
115 if (w > 1 * sizeof(float)) {
116 float vi0x3 = *i0++;
117 float vi1x3 = *i1++;
118 float vi2x3 = *i2++;
119 float vi3x3 = *i3++;
120 float vi4x3 = *i4++;
121 float vi5x3 = *i5++;
122 float vi6x3 = *i6++;
123
124 for (; w > 2 * sizeof(float); w -= 1 * sizeof(float)) {
125 const float vi0x4 = *i0++;
126 const float vi1x4 = *i1++;
127 const float vi2x4 = *i2++;
128 const float vi3x4 = *i3++;
129 const float vi4x4 = *i4++;
130 const float vi5x4 = *i5++;
131 const float vi6x4 = *i6++;
132
133 float vo0p0 = vbias + vi0x0 * vk00;
134 float vo1p0 = vbias + vi1x0 * vk00;
135 float vo2p0 = vbias + vi2x0 * vk00;
136 vo0p0 += vi1x0 * vk10;
137 vo1p0 += vi2x0 * vk10;
138 vo2p0 += vi3x0 * vk10;
139 vo0p0 += vi2x0 * vk20;
140 vo1p0 += vi3x0 * vk20;
141 vo2p0 += vi4x0 * vk20;
142 vo0p0 += vi3x0 * vk30;
143 vo1p0 += vi4x0 * vk30;
144 vo2p0 += vi5x0 * vk30;
145 vo0p0 += vi4x0 * vk40;
146 vo1p0 += vi5x0 * vk40;
147 vo2p0 += vi6x0 * vk40;
148
149 vi0x0 = vi0x1;
150 vi1x0 = vi1x1;
151 vi2x0 = vi2x1;
152 vi3x0 = vi3x1;
153 vi4x0 = vi4x1;
154 vi5x0 = vi5x1;
155 vi6x0 = vi6x1;
156
157 vo0p0 += vi0x1 * vk01;
158 vo1p0 += vi1x1 * vk01;
159 vo2p0 += vi2x1 * vk01;
160 vo0p0 += vi1x1 * vk11;
161 vo1p0 += vi2x1 * vk11;
162 vo2p0 += vi3x1 * vk11;
163 vo0p0 += vi2x1 * vk21;
164 vo1p0 += vi3x1 * vk21;
165 vo2p0 += vi4x1 * vk21;
166 vo0p0 += vi3x1 * vk31;
167 vo1p0 += vi4x1 * vk31;
168 vo2p0 += vi5x1 * vk31;
169 vo0p0 += vi4x1 * vk41;
170 vo1p0 += vi5x1 * vk41;
171 vo2p0 += vi6x1 * vk41;
172
173 vi0x1 = vi0x2;
174 vi1x1 = vi1x2;
175 vi2x1 = vi2x2;
176 vi3x1 = vi3x2;
177 vi4x1 = vi4x2;
178 vi5x1 = vi5x2;
179 vi6x1 = vi6x2;
180
181 vo0p0 += vi0x2 * vk02;
182 vo1p0 += vi1x2 * vk02;
183 vo2p0 += vi2x2 * vk02;
184 vo0p0 += vi1x2 * vk12;
185 vo1p0 += vi2x2 * vk12;
186 vo2p0 += vi3x2 * vk12;
187 vo0p0 += vi2x2 * vk22;
188 vo1p0 += vi3x2 * vk22;
189 vo2p0 += vi4x2 * vk22;
190 vo0p0 += vi3x2 * vk32;
191 vo1p0 += vi4x2 * vk32;
192 vo2p0 += vi5x2 * vk32;
193 vo0p0 += vi4x2 * vk42;
194 vo1p0 += vi5x2 * vk42;
195 vo2p0 += vi6x2 * vk42;
196
197 vi0x2 = vi0x3;
198 vi1x2 = vi1x3;
199 vi2x2 = vi2x3;
200 vi3x2 = vi3x3;
201 vi4x2 = vi4x3;
202 vi5x2 = vi5x3;
203 vi6x2 = vi6x3;
204
205 vo0p0 += vi0x3 * vk03;
206 vo1p0 += vi1x3 * vk03;
207 vo2p0 += vi2x3 * vk03;
208 vo0p0 += vi1x3 * vk13;
209 vo1p0 += vi2x3 * vk13;
210 vo2p0 += vi3x3 * vk13;
211 vo0p0 += vi2x3 * vk23;
212 vo1p0 += vi3x3 * vk23;
213 vo2p0 += vi4x3 * vk23;
214 vo0p0 += vi3x3 * vk33;
215 vo1p0 += vi4x3 * vk33;
216 vo2p0 += vi5x3 * vk33;
217 vo0p0 += vi4x3 * vk43;
218 vo1p0 += vi5x3 * vk43;
219 vo2p0 += vi6x3 * vk43;
220
221 vi0x3 = vi0x4;
222 vi1x3 = vi1x4;
223 vi2x3 = vi2x4;
224 vi3x3 = vi3x4;
225 vi4x3 = vi4x4;
226 vi5x3 = vi5x4;
227 vi6x3 = vi6x4;
228
229 vo0p0 += vi0x4 * vk04;
230 vo1p0 += vi1x4 * vk04;
231 vo2p0 += vi2x4 * vk04;
232 vo0p0 += vi1x4 * vk14;
233 vo1p0 += vi2x4 * vk14;
234 vo2p0 += vi3x4 * vk14;
235 vo0p0 += vi2x4 * vk24;
236 vo1p0 += vi3x4 * vk24;
237 vo2p0 += vi4x4 * vk24;
238 vo0p0 += vi3x4 * vk34;
239 vo1p0 += vi4x4 * vk34;
240 vo2p0 += vi5x4 * vk34;
241 vo0p0 += vi4x4 * vk44;
242 vo1p0 += vi5x4 * vk44;
243 vo2p0 += vi6x4 * vk44;
244
245
246 float vo0 = math_max_f32(vo0p0, vmin);
247 float vo1 = math_max_f32(vo1p0, vmin);
248 float vo2 = math_max_f32(vo2p0, vmin);
249
250 vo0 = math_min_f32(vo0, vmax);
251 vo1 = math_min_f32(vo1, vmax);
252 vo2 = math_min_f32(vo2, vmax);
253
254 *o2++ = vo2;
255 *o1++ = vo1;
256 *o0++ = vo0;
257 }
258 assert(w == 2 * sizeof(float));
259 {
260 float vo0p0 = vbias + vi0x0 * vk00;
261 float vo1p0 = vbias + vi1x0 * vk00;
262 float vo2p0 = vbias + vi2x0 * vk00;
263 vo0p0 += vi1x0 * vk10;
264 vo1p0 += vi2x0 * vk10;
265 vo2p0 += vi3x0 * vk10;
266 vo0p0 += vi2x0 * vk20;
267 vo1p0 += vi3x0 * vk20;
268 vo2p0 += vi4x0 * vk20;
269 vo0p0 += vi3x0 * vk30;
270 vo1p0 += vi4x0 * vk30;
271 vo2p0 += vi5x0 * vk30;
272 vo0p0 += vi4x0 * vk40;
273 vo1p0 += vi5x0 * vk40;
274 vo2p0 += vi6x0 * vk40;
275
276 vi0x0 = vi0x1;
277 vi1x0 = vi1x1;
278 vi2x0 = vi2x1;
279 vi3x0 = vi3x1;
280 vi4x0 = vi4x1;
281 vi5x0 = vi5x1;
282 vi6x0 = vi6x1;
283
284 vo0p0 += vi0x1 * vk01;
285 vo1p0 += vi1x1 * vk01;
286 vo2p0 += vi2x1 * vk01;
287 vo0p0 += vi1x1 * vk11;
288 vo1p0 += vi2x1 * vk11;
289 vo2p0 += vi3x1 * vk11;
290 vo0p0 += vi2x1 * vk21;
291 vo1p0 += vi3x1 * vk21;
292 vo2p0 += vi4x1 * vk21;
293 vo0p0 += vi3x1 * vk31;
294 vo1p0 += vi4x1 * vk31;
295 vo2p0 += vi5x1 * vk31;
296 vo0p0 += vi4x1 * vk41;
297 vo1p0 += vi5x1 * vk41;
298 vo2p0 += vi6x1 * vk41;
299
300 vi0x1 = vi0x2;
301 vi1x1 = vi1x2;
302 vi2x1 = vi2x2;
303 vi3x1 = vi3x2;
304 vi4x1 = vi4x2;
305 vi5x1 = vi5x2;
306 vi6x1 = vi6x2;
307
308 vo0p0 += vi0x2 * vk02;
309 vo1p0 += vi1x2 * vk02;
310 vo2p0 += vi2x2 * vk02;
311 vo0p0 += vi1x2 * vk12;
312 vo1p0 += vi2x2 * vk12;
313 vo2p0 += vi3x2 * vk12;
314 vo0p0 += vi2x2 * vk22;
315 vo1p0 += vi3x2 * vk22;
316 vo2p0 += vi4x2 * vk22;
317 vo0p0 += vi3x2 * vk32;
318 vo1p0 += vi4x2 * vk32;
319 vo2p0 += vi5x2 * vk32;
320 vo0p0 += vi4x2 * vk42;
321 vo1p0 += vi5x2 * vk42;
322 vo2p0 += vi6x2 * vk42;
323
324 vi0x2 = vi0x3;
325 vi1x2 = vi1x3;
326 vi2x2 = vi2x3;
327 vi3x2 = vi3x3;
328 vi4x2 = vi4x3;
329 vi5x2 = vi5x3;
330 vi6x2 = vi6x3;
331
332 vo0p0 += vi0x3 * vk03;
333 vo1p0 += vi1x3 * vk03;
334 vo2p0 += vi2x3 * vk03;
335 vo0p0 += vi1x3 * vk13;
336 vo1p0 += vi2x3 * vk13;
337 vo2p0 += vi3x3 * vk13;
338 vo0p0 += vi2x3 * vk23;
339 vo1p0 += vi3x3 * vk23;
340 vo2p0 += vi4x3 * vk23;
341 vo0p0 += vi3x3 * vk33;
342 vo1p0 += vi4x3 * vk33;
343 vo2p0 += vi5x3 * vk33;
344 vo0p0 += vi4x3 * vk43;
345 vo1p0 += vi5x3 * vk43;
346 vo2p0 += vi6x3 * vk43;
347
348
349 float vo0 = math_max_f32(vo0p0, vmin);
350 float vo1 = math_max_f32(vo1p0, vmin);
351 float vo2 = math_max_f32(vo2p0, vmin);
352
353 vo0 = math_min_f32(vo0, vmax);
354 vo1 = math_min_f32(vo1, vmax);
355 vo2 = math_min_f32(vo2, vmax);
356
357 *o2++ = vo2;
358 *o1++ = vo1;
359 *o0++ = vo0;
360 }
361 w -= 1 * sizeof(float);
362 }
363 assert(w == 1 * sizeof(float));
364 {
365 float vo0p0 = vbias + vi0x0 * vk00;
366 float vo1p0 = vbias + vi1x0 * vk00;
367 float vo2p0 = vbias + vi2x0 * vk00;
368 vo0p0 += vi1x0 * vk10;
369 vo1p0 += vi2x0 * vk10;
370 vo2p0 += vi3x0 * vk10;
371 vo0p0 += vi2x0 * vk20;
372 vo1p0 += vi3x0 * vk20;
373 vo2p0 += vi4x0 * vk20;
374 vo0p0 += vi3x0 * vk30;
375 vo1p0 += vi4x0 * vk30;
376 vo2p0 += vi5x0 * vk30;
377 vo0p0 += vi4x0 * vk40;
378 vo1p0 += vi5x0 * vk40;
379 vo2p0 += vi6x0 * vk40;
380
381 vo0p0 += vi0x1 * vk01;
382 vo1p0 += vi1x1 * vk01;
383 vo2p0 += vi2x1 * vk01;
384 vo0p0 += vi1x1 * vk11;
385 vo1p0 += vi2x1 * vk11;
386 vo2p0 += vi3x1 * vk11;
387 vo0p0 += vi2x1 * vk21;
388 vo1p0 += vi3x1 * vk21;
389 vo2p0 += vi4x1 * vk21;
390 vo0p0 += vi3x1 * vk31;
391 vo1p0 += vi4x1 * vk31;
392 vo2p0 += vi5x1 * vk31;
393 vo0p0 += vi4x1 * vk41;
394 vo1p0 += vi5x1 * vk41;
395 vo2p0 += vi6x1 * vk41;
396
397 vo0p0 += vi0x2 * vk02;
398 vo1p0 += vi1x2 * vk02;
399 vo2p0 += vi2x2 * vk02;
400 vo0p0 += vi1x2 * vk12;
401 vo1p0 += vi2x2 * vk12;
402 vo2p0 += vi3x2 * vk12;
403 vo0p0 += vi2x2 * vk22;
404 vo1p0 += vi3x2 * vk22;
405 vo2p0 += vi4x2 * vk22;
406 vo0p0 += vi3x2 * vk32;
407 vo1p0 += vi4x2 * vk32;
408 vo2p0 += vi5x2 * vk32;
409 vo0p0 += vi4x2 * vk42;
410 vo1p0 += vi5x2 * vk42;
411 vo2p0 += vi6x2 * vk42;
412
413
414 float vo0 = math_max_f32(vo0p0, vmin);
415 float vo1 = math_max_f32(vo1p0, vmin);
416 float vo2 = math_max_f32(vo2p0, vmin);
417
418 vo0 = math_min_f32(vo0, vmax);
419 vo1 = math_min_f32(vo1, vmax);
420 vo2 = math_min_f32(vo2, vmax);
421
422 *o2++ = vo2;
423 *o1++ = vo1;
424 *o0++ = vo0;
425 }
426
427 i0 = (const float*) ((uintptr_t) i3 - input_width);
428 i1 = (const float*) ((uintptr_t) i4 - input_width);
429 i2 = i4;
430 i3 = i5;
431 i4 = i6;
432 i5 = (const float*) ((uintptr_t) i4 + input_width);
433 i6 = (const float*) ((uintptr_t) i5 + input_width);
434
435 o0 = o2;
436 o1 = (float*) ((uintptr_t) o0 + input_width);
437 o2 = (float*) ((uintptr_t) o1 + input_width);
438
439 output_height = doz(output_height, 3);
440 } while (output_height != 0);
441 }
442