1 // Auto-generated file. Do not edit!
2 // Template: src/f32-dwconv/up-neon.c.in
3 // Generator: tools/xngen
4 //
5 // Copyright 2019 Google LLC
6 //
7 // This source code is licensed under the BSD-style license found in the
8 // LICENSE file in the root directory of this source tree.
9
10 #include <assert.h>
11
12 #include <arm_neon.h>
13
14 #include <xnnpack/dwconv.h>
15
16
xnn_f32_dwconv_minmax_ukernel_up16x4__neon_acc2(size_t channels,size_t output_width,const float ** input,const float * weights,float * output,size_t input_stride,size_t output_increment,size_t input_offset,const float * zero,const union xnn_f32_minmax_params params[restrict XNN_MIN_ELEMENTS (1)])17 void xnn_f32_dwconv_minmax_ukernel_up16x4__neon_acc2(
18 size_t channels,
19 size_t output_width,
20 const float** input,
21 const float* weights,
22 float* output,
23 size_t input_stride,
24 size_t output_increment,
25 size_t input_offset,
26 const float* zero,
27 const union xnn_f32_minmax_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
28 {
29 assert(channels != 0);
30 assert(output_width != 0);
31
32 const float32x4_t vmax = vld1q_dup_f32(¶ms->scalar.max);
33 const float32x4_t vmin = vld1q_dup_f32(¶ms->scalar.min);
34 do {
35 const float* i0 = input[0];
36 assert(i0 != NULL);
37 if XNN_UNPREDICTABLE(i0 != zero) {
38 i0 = (const float*) ((uintptr_t) i0 + input_offset);
39 }
40 const float* i1 = input[1];
41 assert(i1 != NULL);
42 if XNN_UNPREDICTABLE(i1 != zero) {
43 i1 = (const float*) ((uintptr_t) i1 + input_offset);
44 }
45 const float* i2 = input[2];
46 assert(i2 != NULL);
47 if XNN_UNPREDICTABLE(i2 != zero) {
48 i2 = (const float*) ((uintptr_t) i2 + input_offset);
49 }
50 const float* i3 = input[3];
51 assert(i3 != NULL);
52 if XNN_UNPREDICTABLE(i3 != zero) {
53 i3 = (const float*) ((uintptr_t) i3 + input_offset);
54 }
55
56 input = (const float**) ((uintptr_t) input + input_stride);
57
58 size_t c = channels;
59 const float* w = weights;
60 for (; c >= 16; c -= 16) {
61 float32x4_t vacc0123p0 = vld1q_f32(w); w += 4;
62 float32x4_t vacc4567p0 = vld1q_f32(w); w += 4;
63 float32x4_t vacc89ABp0 = vld1q_f32(w); w += 4;
64 float32x4_t vaccCDEFp0 = vld1q_f32(w); w += 4;
65
66
67 const float32x4_t vi0x0123 = vld1q_f32(i0); i0 += 4;
68 const float32x4_t vi0x4567 = vld1q_f32(i0); i0 += 4;
69 const float32x4_t vi0x89AB = vld1q_f32(i0); i0 += 4;
70 const float32x4_t vi0xCDEF = vld1q_f32(i0); i0 += 4;
71 const float32x4_t vk0x0123 = vld1q_f32(w); w += 4;
72 const float32x4_t vk0x4567 = vld1q_f32(w); w += 4;
73 const float32x4_t vk0x89AB = vld1q_f32(w); w += 4;
74 const float32x4_t vk0xCDEF = vld1q_f32(w); w += 4;
75 vacc0123p0 = vmlaq_f32(vacc0123p0, vi0x0123, vk0x0123);
76 vacc4567p0 = vmlaq_f32(vacc4567p0, vi0x4567, vk0x4567);
77 vacc89ABp0 = vmlaq_f32(vacc89ABp0, vi0x89AB, vk0x89AB);
78 vaccCDEFp0 = vmlaq_f32(vaccCDEFp0, vi0xCDEF, vk0xCDEF);
79
80 const float32x4_t vi1x0123 = vld1q_f32(i1); i1 += 4;
81 const float32x4_t vi1x4567 = vld1q_f32(i1); i1 += 4;
82 const float32x4_t vi1x89AB = vld1q_f32(i1); i1 += 4;
83 const float32x4_t vi1xCDEF = vld1q_f32(i1); i1 += 4;
84 const float32x4_t vk1x0123 = vld1q_f32(w); w += 4;
85 const float32x4_t vk1x4567 = vld1q_f32(w); w += 4;
86 const float32x4_t vk1x89AB = vld1q_f32(w); w += 4;
87 const float32x4_t vk1xCDEF = vld1q_f32(w); w += 4;
88 float32x4_t vacc0123p1 = vmulq_f32(vi1x0123, vk1x0123);
89 float32x4_t vacc4567p1 = vmulq_f32(vi1x4567, vk1x4567);
90 float32x4_t vacc89ABp1 = vmulq_f32(vi1x89AB, vk1x89AB);
91 float32x4_t vaccCDEFp1 = vmulq_f32(vi1xCDEF, vk1xCDEF);
92
93 const float32x4_t vi2x0123 = vld1q_f32(i2); i2 += 4;
94 const float32x4_t vi2x4567 = vld1q_f32(i2); i2 += 4;
95 const float32x4_t vi2x89AB = vld1q_f32(i2); i2 += 4;
96 const float32x4_t vi2xCDEF = vld1q_f32(i2); i2 += 4;
97 const float32x4_t vk2x0123 = vld1q_f32(w); w += 4;
98 const float32x4_t vk2x4567 = vld1q_f32(w); w += 4;
99 const float32x4_t vk2x89AB = vld1q_f32(w); w += 4;
100 const float32x4_t vk2xCDEF = vld1q_f32(w); w += 4;
101 vacc0123p0 = vmlaq_f32(vacc0123p0, vi2x0123, vk2x0123);
102 vacc4567p0 = vmlaq_f32(vacc4567p0, vi2x4567, vk2x4567);
103 vacc89ABp0 = vmlaq_f32(vacc89ABp0, vi2x89AB, vk2x89AB);
104 vaccCDEFp0 = vmlaq_f32(vaccCDEFp0, vi2xCDEF, vk2xCDEF);
105
106 const float32x4_t vi3x0123 = vld1q_f32(i3); i3 += 4;
107 const float32x4_t vi3x4567 = vld1q_f32(i3); i3 += 4;
108 const float32x4_t vi3x89AB = vld1q_f32(i3); i3 += 4;
109 const float32x4_t vi3xCDEF = vld1q_f32(i3); i3 += 4;
110 const float32x4_t vk3x0123 = vld1q_f32(w); w += 4;
111 const float32x4_t vk3x4567 = vld1q_f32(w); w += 4;
112 const float32x4_t vk3x89AB = vld1q_f32(w); w += 4;
113 const float32x4_t vk3xCDEF = vld1q_f32(w); w += 4;
114 vacc0123p1 = vmlaq_f32(vacc0123p1, vi3x0123, vk3x0123);
115 vacc4567p1 = vmlaq_f32(vacc4567p1, vi3x4567, vk3x4567);
116 vacc89ABp1 = vmlaq_f32(vacc89ABp1, vi3x89AB, vk3x89AB);
117 vaccCDEFp1 = vmlaq_f32(vaccCDEFp1, vi3xCDEF, vk3xCDEF);
118
119 // Add up all accumulators to vacc0123456789ABCDEFp0
120 vacc0123p0 = vaddq_f32(vacc0123p0, vacc0123p1);
121 vacc4567p0 = vaddq_f32(vacc4567p0, vacc4567p1);
122 vacc89ABp0 = vaddq_f32(vacc89ABp0, vacc89ABp1);
123 vaccCDEFp0 = vaddq_f32(vaccCDEFp0, vaccCDEFp1);
124
125 float32x4_t vacc0123 = vmaxq_f32(vacc0123p0, vmin);
126 float32x4_t vacc4567 = vmaxq_f32(vacc4567p0, vmin);
127 float32x4_t vacc89AB = vmaxq_f32(vacc89ABp0, vmin);
128 float32x4_t vaccCDEF = vmaxq_f32(vaccCDEFp0, vmin);
129 vacc0123 = vminq_f32(vacc0123, vmax);
130 vacc4567 = vminq_f32(vacc4567, vmax);
131 vacc89AB = vminq_f32(vacc89AB, vmax);
132 vaccCDEF = vminq_f32(vaccCDEF, vmax);
133
134 vst1q_f32(output, vacc0123); output += 4;
135 vst1q_f32(output, vacc4567); output += 4;
136 vst1q_f32(output, vacc89AB); output += 4;
137 vst1q_f32(output, vaccCDEF); output += 4;
138 }
139 for (; c >= 4; c -= 4) {
140 float32x4_t vacc0123p0 = vld1q_f32(w); w += 4;
141
142
143 const float32x4_t vi0x0123 = vld1q_f32(i0); i0 += 4;
144 const float32x4_t vk0x0123 = vld1q_f32(w + 12);
145 vacc0123p0 = vmlaq_f32(vacc0123p0, vi0x0123, vk0x0123);
146
147 const float32x4_t vi1x0123 = vld1q_f32(i1); i1 += 4;
148 const float32x4_t vk1x0123 = vld1q_f32(w + 28);
149 float32x4_t vacc0123p1 = vmulq_f32(vi1x0123, vk1x0123);
150
151 const float32x4_t vi2x0123 = vld1q_f32(i2); i2 += 4;
152 const float32x4_t vk2x0123 = vld1q_f32(w + 44);
153 vacc0123p0 = vmlaq_f32(vacc0123p0, vi2x0123, vk2x0123);
154
155 const float32x4_t vi3x0123 = vld1q_f32(i3); i3 += 4;
156 const float32x4_t vk3x0123 = vld1q_f32(w + 60);
157 vacc0123p1 = vmlaq_f32(vacc0123p1, vi3x0123, vk3x0123);
158
159 // Add up all accumulators to vacc0123p0
160 vacc0123p0 = vaddq_f32(vacc0123p0, vacc0123p1);
161
162 float32x4_t vacc0123 = vmaxq_f32(vacc0123p0, vmin);
163 vacc0123 = vminq_f32(vacc0123, vmax);
164
165 vst1q_f32(output, vacc0123); output += 4;
166 }
167 if XNN_UNLIKELY(c != 0) {
168 float32x4_t vacc0123p0 = vld1q_f32(w);
169
170
171 const float32x4_t vi0x0123 = vld1q_f32(i0);
172 const float32x4_t vk0x0123 = vld1q_f32(w + 16);
173 vacc0123p0 = vmlaq_f32(vacc0123p0, vi0x0123, vk0x0123);
174
175 const float32x4_t vi1x0123 = vld1q_f32(i1);
176 const float32x4_t vk1x0123 = vld1q_f32(w + 32);
177 float32x4_t vacc0123p1 = vmulq_f32(vi1x0123, vk1x0123);
178
179 const float32x4_t vi2x0123 = vld1q_f32(i2);
180 const float32x4_t vk2x0123 = vld1q_f32(w + 48);
181 vacc0123p0 = vmlaq_f32(vacc0123p0, vi2x0123, vk2x0123);
182
183 const float32x4_t vi3x0123 = vld1q_f32(i3);
184 const float32x4_t vk3x0123 = vld1q_f32(w + 64);
185 vacc0123p1 = vmlaq_f32(vacc0123p1, vi3x0123, vk3x0123);
186
187 // Add up all accumulators to vacc0123p0
188 vacc0123p0 = vaddq_f32(vacc0123p0, vacc0123p1);
189
190 float32x4_t vacc0123 = vmaxq_f32(vacc0123p0, vmin);
191 vacc0123 = vminq_f32(vacc0123, vmax);
192
193 float32x2_t vacc01 = vget_low_f32(vacc0123);
194 if (c & 2) {
195 vst1_f32(output, vacc01); output += 2;
196 vacc01 = vget_high_f32(vacc0123);
197 }
198 if (c & 1) {
199 vst1_lane_f32(output, vacc01, 0); output += 1;
200 }
201 }
202
203 output = (float*) ((uintptr_t) output + output_increment);
204 } while (--output_width != 0);
205 }
206