1 // Auto-generated file. Do not edit!
2 // Template: src/f16-ibilinear-chw/neonfp16arith.c.in
3 // Generator: tools/xngen
4 //
5 // Copyright 2022 Google LLC
6 //
7 // This source code is licensed under the BSD-style license found in the
8 // LICENSE file in the root directory of this source tree.
9
10 #include <assert.h>
11
12 #include <arm_neon.h>
13
14 #include <xnnpack/ibilinear.h>
15
16
xnn_f16_ibilinear_chw_ukernel__neonfp16arith_p16(size_t output_pixels,size_t channels,const void ** restrict input,size_t input_offset,const void * restrict weights,void * restrict output,size_t input_increment)17 void xnn_f16_ibilinear_chw_ukernel__neonfp16arith_p16(
18 size_t output_pixels,
19 size_t channels,
20 const void**restrict input,
21 size_t input_offset,
22 const void*restrict weights,
23 void*restrict output,
24 size_t input_increment) XNN_OOB_READS
25 {
26 assert(output_pixels != 0);
27 assert(channels != 0);
28 assert(input_increment % sizeof(__fp16) == 0);
29
30 __fp16* o = (__fp16*) output;
31 do {
32 const __fp16** i = (const __fp16**)input;
33 const __fp16* w = weights;
34 size_t p = output_pixels;
35
36 for (; p >= 16; p -= 16) {
37 const __fp16* itl0 = (const __fp16*) ((uintptr_t) i[0] + input_offset);
38 const __fp16* ibl0 = (const __fp16*) ((uintptr_t) i[1] + input_offset);
39 const __fp16* itl1 = (const __fp16*) ((uintptr_t) i[2] + input_offset);
40 const __fp16* ibl1 = (const __fp16*) ((uintptr_t) i[3] + input_offset);
41 const __fp16* itl2 = (const __fp16*) ((uintptr_t) i[4] + input_offset);
42 const __fp16* ibl2 = (const __fp16*) ((uintptr_t) i[5] + input_offset);
43 const __fp16* itl3 = (const __fp16*) ((uintptr_t) i[6] + input_offset);
44 const __fp16* ibl3 = (const __fp16*) ((uintptr_t) i[7] + input_offset);
45 const __fp16* itl4 = (const __fp16*) ((uintptr_t) i[8] + input_offset);
46 const __fp16* ibl4 = (const __fp16*) ((uintptr_t) i[9] + input_offset);
47 const __fp16* itl5 = (const __fp16*) ((uintptr_t) i[10] + input_offset);
48 const __fp16* ibl5 = (const __fp16*) ((uintptr_t) i[11] + input_offset);
49 const __fp16* itl6 = (const __fp16*) ((uintptr_t) i[12] + input_offset);
50 const __fp16* ibl6 = (const __fp16*) ((uintptr_t) i[13] + input_offset);
51 const __fp16* itl7 = (const __fp16*) ((uintptr_t) i[14] + input_offset);
52 const __fp16* ibl7 = (const __fp16*) ((uintptr_t) i[15] + input_offset);
53 const __fp16* itl8 = (const __fp16*) ((uintptr_t) i[16] + input_offset);
54 const __fp16* ibl8 = (const __fp16*) ((uintptr_t) i[17] + input_offset);
55 const __fp16* itl9 = (const __fp16*) ((uintptr_t) i[18] + input_offset);
56 const __fp16* ibl9 = (const __fp16*) ((uintptr_t) i[19] + input_offset);
57 const __fp16* itlA = (const __fp16*) ((uintptr_t) i[20] + input_offset);
58 const __fp16* iblA = (const __fp16*) ((uintptr_t) i[21] + input_offset);
59 const __fp16* itlB = (const __fp16*) ((uintptr_t) i[22] + input_offset);
60 const __fp16* iblB = (const __fp16*) ((uintptr_t) i[23] + input_offset);
61 const __fp16* itlC = (const __fp16*) ((uintptr_t) i[24] + input_offset);
62 const __fp16* iblC = (const __fp16*) ((uintptr_t) i[25] + input_offset);
63 const __fp16* itlD = (const __fp16*) ((uintptr_t) i[26] + input_offset);
64 const __fp16* iblD = (const __fp16*) ((uintptr_t) i[27] + input_offset);
65 const __fp16* itlE = (const __fp16*) ((uintptr_t) i[28] + input_offset);
66 const __fp16* iblE = (const __fp16*) ((uintptr_t) i[29] + input_offset);
67 const __fp16* itlF = (const __fp16*) ((uintptr_t) i[30] + input_offset);
68 const __fp16* iblF = (const __fp16*) ((uintptr_t) i[31] + input_offset);
69 i += 2 * 16;
70
71 const float16x4x2_t vw0123 = vld2_f16(w + 0);
72 const float16x4x2_t vw4567 = vld2_f16(w + 8);
73 const float16x4x2_t vw89AB = vld2_f16(w + 16);
74 const float16x4x2_t vwCDEF = vld2_f16(w + 24);
75 w += 2 * 16;
76
77 float16x8_t vtltr0123 = vmovq_n_f16(0); // vmov for uninitialized var warning
78 float16x8_t vblbr0123 = vmovq_n_f16(0);
79 vtltr0123 = vreinterpretq_f16_u32(vld1q_lane_u32((const void*) itl0, vreinterpretq_u32_f16(vtltr0123), 0));
80 vblbr0123 = vreinterpretq_f16_u32(vld1q_lane_u32((const void*) ibl0, vreinterpretq_u32_f16(vblbr0123), 0));
81 vtltr0123 = vreinterpretq_f16_u32(vld1q_lane_u32((const void*) itl1, vreinterpretq_u32_f16(vtltr0123), 1));
82 vblbr0123 = vreinterpretq_f16_u32(vld1q_lane_u32((const void*) ibl1, vreinterpretq_u32_f16(vblbr0123), 1));
83 vtltr0123 = vreinterpretq_f16_u32(vld1q_lane_u32((const void*) itl2, vreinterpretq_u32_f16(vtltr0123), 2));
84 vblbr0123 = vreinterpretq_f16_u32(vld1q_lane_u32((const void*) ibl2, vreinterpretq_u32_f16(vblbr0123), 2));
85 vtltr0123 = vreinterpretq_f16_u32(vld1q_lane_u32((const void*) itl3, vreinterpretq_u32_f16(vtltr0123), 3));
86 vblbr0123 = vreinterpretq_f16_u32(vld1q_lane_u32((const void*) ibl3, vreinterpretq_u32_f16(vblbr0123), 3));
87 float16x8_t vtltr4567 = vmovq_n_f16(0); // vmov for uninitialized var warning
88 float16x8_t vblbr4567 = vmovq_n_f16(0);
89 vtltr4567 = vreinterpretq_f16_u32(vld1q_lane_u32((const void*) itl4, vreinterpretq_u32_f16(vtltr4567), 0));
90 vblbr4567 = vreinterpretq_f16_u32(vld1q_lane_u32((const void*) ibl4, vreinterpretq_u32_f16(vblbr4567), 0));
91 vtltr4567 = vreinterpretq_f16_u32(vld1q_lane_u32((const void*) itl5, vreinterpretq_u32_f16(vtltr4567), 1));
92 vblbr4567 = vreinterpretq_f16_u32(vld1q_lane_u32((const void*) ibl5, vreinterpretq_u32_f16(vblbr4567), 1));
93 vtltr4567 = vreinterpretq_f16_u32(vld1q_lane_u32((const void*) itl6, vreinterpretq_u32_f16(vtltr4567), 2));
94 vblbr4567 = vreinterpretq_f16_u32(vld1q_lane_u32((const void*) ibl6, vreinterpretq_u32_f16(vblbr4567), 2));
95 vtltr4567 = vreinterpretq_f16_u32(vld1q_lane_u32((const void*) itl7, vreinterpretq_u32_f16(vtltr4567), 3));
96 vblbr4567 = vreinterpretq_f16_u32(vld1q_lane_u32((const void*) ibl7, vreinterpretq_u32_f16(vblbr4567), 3));
97 float16x8_t vtltr89AB = vmovq_n_f16(0); // vmov for uninitialized var warning
98 float16x8_t vblbr89AB = vmovq_n_f16(0);
99 vtltr89AB = vreinterpretq_f16_u32(vld1q_lane_u32((const void*) itl8, vreinterpretq_u32_f16(vtltr89AB), 0));
100 vblbr89AB = vreinterpretq_f16_u32(vld1q_lane_u32((const void*) ibl8, vreinterpretq_u32_f16(vblbr89AB), 0));
101 vtltr89AB = vreinterpretq_f16_u32(vld1q_lane_u32((const void*) itl9, vreinterpretq_u32_f16(vtltr89AB), 1));
102 vblbr89AB = vreinterpretq_f16_u32(vld1q_lane_u32((const void*) ibl9, vreinterpretq_u32_f16(vblbr89AB), 1));
103 vtltr89AB = vreinterpretq_f16_u32(vld1q_lane_u32((const void*) itlA, vreinterpretq_u32_f16(vtltr89AB), 2));
104 vblbr89AB = vreinterpretq_f16_u32(vld1q_lane_u32((const void*) iblA, vreinterpretq_u32_f16(vblbr89AB), 2));
105 vtltr89AB = vreinterpretq_f16_u32(vld1q_lane_u32((const void*) itlB, vreinterpretq_u32_f16(vtltr89AB), 3));
106 vblbr89AB = vreinterpretq_f16_u32(vld1q_lane_u32((const void*) iblB, vreinterpretq_u32_f16(vblbr89AB), 3));
107 float16x8_t vtltrCDEF = vmovq_n_f16(0); // vmov for uninitialized var warning
108 float16x8_t vblbrCDEF = vmovq_n_f16(0);
109 vtltrCDEF = vreinterpretq_f16_u32(vld1q_lane_u32((const void*) itlC, vreinterpretq_u32_f16(vtltrCDEF), 0));
110 vblbrCDEF = vreinterpretq_f16_u32(vld1q_lane_u32((const void*) iblC, vreinterpretq_u32_f16(vblbrCDEF), 0));
111 vtltrCDEF = vreinterpretq_f16_u32(vld1q_lane_u32((const void*) itlD, vreinterpretq_u32_f16(vtltrCDEF), 1));
112 vblbrCDEF = vreinterpretq_f16_u32(vld1q_lane_u32((const void*) iblD, vreinterpretq_u32_f16(vblbrCDEF), 1));
113 vtltrCDEF = vreinterpretq_f16_u32(vld1q_lane_u32((const void*) itlE, vreinterpretq_u32_f16(vtltrCDEF), 2));
114 vblbrCDEF = vreinterpretq_f16_u32(vld1q_lane_u32((const void*) iblE, vreinterpretq_u32_f16(vblbrCDEF), 2));
115 vtltrCDEF = vreinterpretq_f16_u32(vld1q_lane_u32((const void*) itlF, vreinterpretq_u32_f16(vtltrCDEF), 3));
116 vblbrCDEF = vreinterpretq_f16_u32(vld1q_lane_u32((const void*) iblF, vreinterpretq_u32_f16(vblbrCDEF), 3));
117
118 const float16x8_t valphah01234567 = vcombine_f16(vw0123.val[0], vw4567.val[0]);
119 const float16x8_t valphav01234567 = vcombine_f16(vw0123.val[1], vw4567.val[1]);
120 const float16x8_t valphah89ABCDEF = vcombine_f16(vw89AB.val[0], vwCDEF.val[0]);
121 const float16x8_t valphav89ABCDEF = vcombine_f16(vw89AB.val[1], vwCDEF.val[1]);
122
123 const float16x8_t vldrd0123 = vsubq_f16(vblbr0123, vtltr0123);
124 const float16x8_t vldrd4567 = vsubq_f16(vblbr4567, vtltr4567);
125 const float16x8_t vldrd89AB = vsubq_f16(vblbr89AB, vtltr89AB);
126 const float16x8_t vldrdCDEF = vsubq_f16(vblbrCDEF, vtltrCDEF);
127
128 const float16x8x2_t vld_t01234567 = vuzpq_f16(vldrd0123, vldrd4567);
129 const float16x8_t vld01234567 = vld_t01234567.val[0];
130 const float16x8_t vrd01234567 = vld_t01234567.val[1];
131 const float16x8x2_t vld_t89ABCDEF = vuzpq_f16(vldrd89AB, vldrdCDEF);
132 const float16x8_t vld89ABCDEF = vld_t89ABCDEF.val[0];
133 const float16x8_t vrd89ABCDEF = vld_t89ABCDEF.val[1];
134
135 const float16x8x2_t vtl_t01234567 = vuzpq_f16(vtltr0123, vtltr4567);
136 const float16x8_t vtl01234567 = vtl_t01234567.val[0];
137 const float16x8_t vtr01234567 = vtl_t01234567.val[1];
138 const float16x8x2_t vtl_t89ABCDEF = vuzpq_f16(vtltr89AB, vtltrCDEF);
139 const float16x8_t vtl89ABCDEF = vtl_t89ABCDEF.val[0];
140 const float16x8_t vtr89ABCDEF = vtl_t89ABCDEF.val[1];
141
142 const float16x8_t vl01234567 = vfmaq_f16(vtl01234567, vld01234567, valphav01234567);
143 const float16x8_t vr01234567 = vfmaq_f16(vtr01234567, vrd01234567, valphav01234567);
144 const float16x8_t vl89ABCDEF = vfmaq_f16(vtl89ABCDEF, vld89ABCDEF, valphav89ABCDEF);
145 const float16x8_t vr89ABCDEF = vfmaq_f16(vtr89ABCDEF, vrd89ABCDEF, valphav89ABCDEF);
146
147 const float16x8_t vd01234567 = vsubq_f16(vr01234567, vl01234567);
148 const float16x8_t vd89ABCDEF = vsubq_f16(vr89ABCDEF, vl89ABCDEF);
149 const float16x8_t vo01234567 = vfmaq_f16(vl01234567, vd01234567, valphah01234567);
150 const float16x8_t vo89ABCDEF = vfmaq_f16(vl89ABCDEF, vd89ABCDEF, valphah89ABCDEF);
151
152 vst1q_f16(o + 0, vo01234567);
153 vst1q_f16(o + 8, vo89ABCDEF);
154 o += 16;
155 }
156
157 for (; p >= 4; p -= 4) {
158 const __fp16* itl0 = (const __fp16*) ((uintptr_t) i[0] + input_offset);
159 const __fp16* ibl0 = (const __fp16*) ((uintptr_t) i[1] + input_offset);
160 const __fp16* itl1 = (const __fp16*) ((uintptr_t) i[2] + input_offset);
161 const __fp16* ibl1 = (const __fp16*) ((uintptr_t) i[3] + input_offset);
162 const __fp16* itl2 = (const __fp16*) ((uintptr_t) i[4] + input_offset);
163 const __fp16* ibl2 = (const __fp16*) ((uintptr_t) i[5] + input_offset);
164 const __fp16* itl3 = (const __fp16*) ((uintptr_t) i[6] + input_offset);
165 const __fp16* ibl3 = (const __fp16*) ((uintptr_t) i[7] + input_offset);
166 i += 8;
167
168 const float16x4x2_t vw = vld2_f16(w);
169 w += 8;
170
171 float16x8_t vtltr = vmovq_n_f16(0); // vmov for uninitialized var warning
172 float16x8_t vblbr = vmovq_n_f16(0);
173 vtltr = vreinterpretq_f16_u32(vld1q_lane_u32((const void*) itl0, vreinterpretq_u32_f16(vtltr), 0));
174 vblbr = vreinterpretq_f16_u32(vld1q_lane_u32((const void*) ibl0, vreinterpretq_u32_f16(vblbr), 0));
175 vtltr = vreinterpretq_f16_u32(vld1q_lane_u32((const void*) itl1, vreinterpretq_u32_f16(vtltr), 1));
176 vblbr = vreinterpretq_f16_u32(vld1q_lane_u32((const void*) ibl1, vreinterpretq_u32_f16(vblbr), 1));
177 vtltr = vreinterpretq_f16_u32(vld1q_lane_u32((const void*) itl2, vreinterpretq_u32_f16(vtltr), 2));
178 vblbr = vreinterpretq_f16_u32(vld1q_lane_u32((const void*) ibl2, vreinterpretq_u32_f16(vblbr), 2));
179 vtltr = vreinterpretq_f16_u32(vld1q_lane_u32((const void*) itl3, vreinterpretq_u32_f16(vtltr), 3));
180 vblbr = vreinterpretq_f16_u32(vld1q_lane_u32((const void*) ibl3, vreinterpretq_u32_f16(vblbr), 3));
181
182 const float16x4_t valphah = vw.val[0];
183 const float16x4_t valphav = vw.val[1];
184
185 const float16x8_t vldrd = vsubq_f16(vblbr, vtltr);
186
187 const float16x4x2_t vld_t = vuzp_f16(vget_low_f16(vldrd), vget_high_f16(vldrd));
188 const float16x4_t vld = vld_t.val[0];
189 const float16x4_t vrd = vld_t.val[1];
190
191 const float16x4x2_t vtl_t = vuzp_f16(vget_low_f16(vtltr), vget_high_f16(vtltr));
192 const float16x4_t vtl = vtl_t.val[0];
193 const float16x4_t vtr = vtl_t.val[1];
194
195 const float16x4_t vl = vfma_f16(vtl, vld, valphav);
196 const float16x4_t vr = vfma_f16(vtr, vrd, valphav);
197
198 const float16x4_t vd = vsub_f16(vr, vl);
199 const float16x4_t vo = vfma_f16(vl, vd, valphah);
200
201 vst1_f16(o, vo);
202 o += 4;
203 }
204
205 if XNN_UNLIKELY(p != 0) {
206 if (p & 2) {
207 const __fp16* itl0 = (const __fp16*) ((uintptr_t) i[0] + input_offset);
208 const __fp16* ibl0 = (const __fp16*) ((uintptr_t) i[1] + input_offset);
209 const __fp16* itl1 = (const __fp16*) ((uintptr_t) i[2] + input_offset);
210 const __fp16* ibl1 = (const __fp16*) ((uintptr_t) i[3] + input_offset);
211 i += 4;
212
213 const float16x4_t vw = vld1_f16(w);
214 w += 4;
215
216 const float16x4x2_t vwhv = vuzp_f16(vw, vw);
217 const float16x4_t valphah = vwhv.val[0];
218 const float16x4_t valphav = vwhv.val[1];
219
220 float16x4_t vtltr = vmov_n_f16(0); // vmov for uninitialized var warning
221 float16x4_t vblbr = vmov_n_f16(0);
222
223 vtltr = vreinterpret_f16_u32(vld1_lane_u32((const void*) itl0, vreinterpret_u32_f16(vtltr), 0));
224 vblbr = vreinterpret_f16_u32(vld1_lane_u32((const void*) ibl0, vreinterpret_u32_f16(vblbr), 0));
225 vtltr = vreinterpret_f16_u32(vld1_lane_u32((const void*) itl1, vreinterpret_u32_f16(vtltr), 1));
226 vblbr = vreinterpret_f16_u32(vld1_lane_u32((const void*) ibl1, vreinterpret_u32_f16(vblbr), 1));
227
228 const float16x4_t vldrd = vsub_f16(vblbr, vtltr);
229
230 const float16x4x2_t vld_t = vuzp_f16(vldrd, vldrd);
231 const float16x4_t vld = vld_t.val[0];
232 const float16x4_t vrd = vld_t.val[1];
233
234 const float16x4x2_t vtl_t = vuzp_f16(vtltr, vtltr);
235 const float16x4_t vtl = vtl_t.val[0];
236 const float16x4_t vtr = vtl_t.val[1];
237
238 const float16x4_t vl = vfma_f16(vtl, vld, valphav);
239 const float16x4_t vr = vfma_f16(vtr, vrd, valphav);
240
241 const float16x4_t vd = vsub_f16(vr, vl);
242 const float16x4_t vo = vfma_f16(vl, vd, valphah);
243
244 vst1_lane_u32((void*) o, vreinterpret_u32_f16(vo), 0);
245 o += 2;
246 }
247
248 if (p & 1) {
249 // We are computing the following formula:
250 // result = (1 - alpha_h) * (1 - alpha_v) * top_left +
251 // alpha_h * (1 - alpha_v) * top_right +
252 // (1 - alpha_h) * alpha_v * bottom_left +
253 // alpha_h * alpha_v * bottom_right.
254 //
255 // Rearranging gives
256 // result = left + alpha_h * (right - left),
257 // where
258 // left = top_left + alpha_v * (bottom_left - top_left),
259 // right = top_right + alpha_v * (bottom_right - top_right).
260
261 const __fp16* itl = (const __fp16*) ((uintptr_t) i[0] + input_offset);
262 const __fp16* ibl = (const __fp16*) ((uintptr_t) i[1] + input_offset);
263 i += 2;
264
265 float16x4_t vw = vmov_n_f16(0);
266 vw = vreinterpret_f16_u32(vld1_lane_u32((const void*) w, vreinterpret_u32_f16(vw), 0));
267 w += 2;
268
269 const float16x4x2_t vwhv = vuzp_f16(vw, vw);
270 const float16x4_t valphah = vwhv.val[0];
271 const float16x4_t valphav = vwhv.val[1];
272
273 float16x4_t vtltr = vmov_n_f16(0); // vmov for uninitialized var warning
274 float16x4_t vblbr = vmov_n_f16(0);
275
276 vtltr = vreinterpret_f16_u32(vld1_lane_u32((const void*) itl, vreinterpret_u32_f16(vtltr), 0));
277 vblbr = vreinterpret_f16_u32(vld1_lane_u32((const void*) ibl, vreinterpret_u32_f16(vblbr), 0));
278
279 const float16x4_t vldrd = vsub_f16(vblbr, vtltr);
280
281 const float16x4x2_t vld_t = vuzp_f16(vldrd, vldrd);
282 const float16x4_t vld = vld_t.val[0];
283 const float16x4_t vrd = vld_t.val[1];
284
285 const float16x4x2_t vtl_t = vuzp_f16(vtltr, vtltr);
286 const float16x4_t vtl = vtl_t.val[0];
287 const float16x4_t vtr = vtl_t.val[1];
288
289 const float16x4_t vl = vfma_f16(vtl, vld, valphav);
290 const float16x4_t vr = vfma_f16(vtr, vrd, valphav);
291
292 const float16x4_t vd = vsub_f16(vr, vl);
293 const float16x4_t vo = vfma_f16(vl, vd, valphah);
294
295 vst1_lane_f16(o, vo, 0);
296 o += 1;
297 }
298 }
299
300 input_offset += input_increment;
301 } while (--channels != 0);
302 }
303