1 // Auto-generated file. Do not edit!
2 // Template: src/f16-dwconv2d-chw/3x3p1-neonfp16arith.c.in
3 // Generator: tools/xngen
4 //
5 // Copyright 2020 Google LLC
6 //
7 // This source code is licensed under the BSD-style license found in the
8 // LICENSE file in the root directory of this source tree.
9
10 #include <assert.h>
11
12 #include <arm_neon.h>
13
14 #include <xnnpack/dwconv.h>
15 #include <xnnpack/math.h>
16
17
xnn_f16_dwconv2d_chw_ukernel_3x3p1__neonfp16arith_4x8(size_t input_height,size_t input_width,const void * input,const void * weights,const void * zero,void * output,uint32_t padding_top,const union xnn_f16_chw_params params[restrict XNN_MIN_ELEMENTS (1)])18 void xnn_f16_dwconv2d_chw_ukernel_3x3p1__neonfp16arith_4x8(
19 size_t input_height,
20 size_t input_width,
21 const void* input,
22 const void* weights,
23 const void* zero,
24 void* output,
25 uint32_t padding_top,
26 const union xnn_f16_chw_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
27 {
28 assert(input_height != 0);
29 assert(input_width != 0);
30 assert(input_width % sizeof(__fp16) == 0);
31 assert(padding_top == 1);
32
33 const uint16x8_t vmask = vld1q_u16(params->neonfp16arith.maskx8);
34 const float16x8_t vmax = vld1q_dup_f16(¶ms->neonfp16arith.max);
35 const float16x8_t vmin = vld1q_dup_f16(¶ms->neonfp16arith.min);
36
37 const __fp16* w0 = (const __fp16*)weights;
38 const float16x8_t vw01234567 = vld1q_f16(w0);
39 const float16x4_t vw89 = vreinterpret_f16_u32(vld1_lane_u32((const void*)(w0 + 8), vmov_n_u32(0), 0));
40
41 const size_t input_decrement = round_up_po2(input_width, 8 * sizeof(__fp16));
42
43 const __fp16* i0 = zero;
44 const __fp16* i1 = input;
45 const __fp16* i2 = (const __fp16*) ((uintptr_t) i1 + input_width);
46 const __fp16* i3 = (const __fp16*) ((uintptr_t) i2 + input_width);
47 const __fp16* i4 = (const __fp16*) ((uintptr_t) i3 + input_width);
48 const __fp16* i5 = (const __fp16*) ((uintptr_t) i4 + input_width);
49
50 __fp16* o0 = output;
51 __fp16* o1 = (__fp16*) ((uintptr_t) o0 + input_width);
52 __fp16* o2 = (__fp16*) ((uintptr_t) o1 + input_width);
53 __fp16* o3 = (__fp16*) ((uintptr_t) o2 + input_width);
54
55 size_t output_height = input_height;
56 do {
57 if XNN_UNPREDICTABLE(output_height < 2) {
58 i2 = zero;
59 o1 = o0;
60 }
61 if XNN_UNPREDICTABLE(output_height < 3) {
62 i3 = zero;
63 o2 = o1;
64 }
65 if XNN_UNPREDICTABLE(output_height < 4) {
66 i4 = zero;
67 o3 = o2;
68 }
69 if XNN_UNPREDICTABLE(output_height < 5) {
70 i5 = zero;
71 }
72
73 float16x8_t vi0x01234567 = vmovq_n_f16(0);
74 float16x8_t vi1x01234567 = vmovq_n_f16(0);
75 float16x8_t vi2x01234567 = vmovq_n_f16(0);
76 float16x8_t vi3x01234567 = vmovq_n_f16(0);
77 float16x8_t vi4x01234567 = vmovq_n_f16(0);
78 float16x8_t vi5x01234567 = vmovq_n_f16(0);
79
80 float16x8_t vi0x89ABCDEF = vld1q_f16(i0); i0 += 8;
81 float16x8_t vi1x89ABCDEF = vld1q_f16(i1); i1 += 8;
82 float16x8_t vi2x89ABCDEF = vld1q_f16(i2); i2 += 8;
83 float16x8_t vi3x89ABCDEF = vld1q_f16(i3); i3 += 8;
84 float16x8_t vi4x89ABCDEF = vld1q_f16(i4); i4 += 8;
85 float16x8_t vi5x89ABCDEF = vld1q_f16(i5); i5 += 8;
86
87 size_t w = input_width;
88 for (; w > 8 * sizeof(__fp16); w -= 8 * sizeof(__fp16)) {
89 float16x8_t vo0p0 = vdupq_lane_f16(vget_low_f16(vw01234567), 0);
90 float16x8_t vo1p0 = vdupq_lane_f16(vget_low_f16(vw01234567), 0);
91 float16x8_t vo2p0 = vdupq_lane_f16(vget_low_f16(vw01234567), 0);
92 float16x8_t vo3p0 = vdupq_lane_f16(vget_low_f16(vw01234567), 0);
93
94 const float16x8_t vi0xGHIJKLMN = vld1q_f16(i0); i0 += 8;
95 const float16x8_t vi1xGHIJKLMN = vld1q_f16(i1); i1 += 8;
96 const float16x8_t vi2xGHIJKLMN = vld1q_f16(i2); i2 += 8;
97 const float16x8_t vi3xGHIJKLMN = vld1q_f16(i3); i3 += 8;
98 const float16x8_t vi4xGHIJKLMN = vld1q_f16(i4); i4 += 8;
99 const float16x8_t vi5xGHIJKLMN = vld1q_f16(i5); i5 += 8;
100
101 // Center column
102 vo0p0 = vfmaq_lane_f16(vo0p0, vi0x89ABCDEF, vget_low_f16(vw01234567), 2);
103 vo1p0 = vfmaq_lane_f16(vo1p0, vi1x89ABCDEF, vget_low_f16(vw01234567), 2);
104 vo2p0 = vfmaq_lane_f16(vo2p0, vi2x89ABCDEF, vget_low_f16(vw01234567), 2);
105 vo3p0 = vfmaq_lane_f16(vo3p0, vi3x89ABCDEF, vget_low_f16(vw01234567), 2);
106
107 vo0p0 = vfmaq_lane_f16(vo0p0, vi1x89ABCDEF, vget_high_f16(vw01234567), 1);
108 vo1p0 = vfmaq_lane_f16(vo1p0, vi2x89ABCDEF, vget_high_f16(vw01234567), 1);
109 vo2p0 = vfmaq_lane_f16(vo2p0, vi3x89ABCDEF, vget_high_f16(vw01234567), 1);
110 vo3p0 = vfmaq_lane_f16(vo3p0, vi4x89ABCDEF, vget_high_f16(vw01234567), 1);
111
112 vo0p0 = vfmaq_lane_f16(vo0p0, vi2x89ABCDEF, vw89, 0);
113 vo1p0 = vfmaq_lane_f16(vo1p0, vi3x89ABCDEF, vw89, 0);
114 vo2p0 = vfmaq_lane_f16(vo2p0, vi4x89ABCDEF, vw89, 0);
115 vo3p0 = vfmaq_lane_f16(vo3p0, vi5x89ABCDEF, vw89, 0);
116
117 // Left column
118 const float16x8_t vi0x789ABCDE = vextq_f16(vi0x01234567, vi0x89ABCDEF, 7);
119 const float16x8_t vi1x789ABCDE = vextq_f16(vi1x01234567, vi1x89ABCDEF, 7);
120 const float16x8_t vi2x789ABCDE = vextq_f16(vi2x01234567, vi2x89ABCDEF, 7);
121 const float16x8_t vi3x789ABCDE = vextq_f16(vi3x01234567, vi3x89ABCDEF, 7);
122 const float16x8_t vi4x789ABCDE = vextq_f16(vi4x01234567, vi4x89ABCDEF, 7);
123 const float16x8_t vi5x789ABCDE = vextq_f16(vi5x01234567, vi5x89ABCDEF, 7);
124
125 vo0p0 = vfmaq_lane_f16(vo0p0, vi0x789ABCDE, vget_low_f16(vw01234567), 1);
126 vo1p0 = vfmaq_lane_f16(vo1p0, vi1x789ABCDE, vget_low_f16(vw01234567), 1);
127 vo2p0 = vfmaq_lane_f16(vo2p0, vi2x789ABCDE, vget_low_f16(vw01234567), 1);
128 vo3p0 = vfmaq_lane_f16(vo3p0, vi3x789ABCDE, vget_low_f16(vw01234567), 1);
129
130 vo0p0 = vfmaq_lane_f16(vo0p0, vi1x789ABCDE, vget_high_f16(vw01234567), 0);
131 vo1p0 = vfmaq_lane_f16(vo1p0, vi2x789ABCDE, vget_high_f16(vw01234567), 0);
132 vo2p0 = vfmaq_lane_f16(vo2p0, vi3x789ABCDE, vget_high_f16(vw01234567), 0);
133 vo3p0 = vfmaq_lane_f16(vo3p0, vi4x789ABCDE, vget_high_f16(vw01234567), 0);
134
135 vo0p0 = vfmaq_lane_f16(vo0p0, vi2x789ABCDE, vget_high_f16(vw01234567), 3);
136 vo1p0 = vfmaq_lane_f16(vo1p0, vi3x789ABCDE, vget_high_f16(vw01234567), 3);
137 vo2p0 = vfmaq_lane_f16(vo2p0, vi4x789ABCDE, vget_high_f16(vw01234567), 3);
138 vo3p0 = vfmaq_lane_f16(vo3p0, vi5x789ABCDE, vget_high_f16(vw01234567), 3);
139
140 vi0x01234567 = vi0x89ABCDEF;
141 vi1x01234567 = vi1x89ABCDEF;
142 vi2x01234567 = vi2x89ABCDEF;
143 vi3x01234567 = vi3x89ABCDEF;
144 vi4x01234567 = vi4x89ABCDEF;
145 vi5x01234567 = vi5x89ABCDEF;
146
147 // Right column
148 const float16x8_t vi0x9ABCDEFG = vextq_f16(vi0x89ABCDEF, vi0xGHIJKLMN, 1);
149 const float16x8_t vi1x9ABCDEFG = vextq_f16(vi1x89ABCDEF, vi1xGHIJKLMN, 1);
150 const float16x8_t vi2x9ABCDEFG = vextq_f16(vi2x89ABCDEF, vi2xGHIJKLMN, 1);
151 const float16x8_t vi3x9ABCDEFG = vextq_f16(vi3x89ABCDEF, vi3xGHIJKLMN, 1);
152 const float16x8_t vi4x9ABCDEFG = vextq_f16(vi4x89ABCDEF, vi4xGHIJKLMN, 1);
153 const float16x8_t vi5x9ABCDEFG = vextq_f16(vi5x89ABCDEF, vi5xGHIJKLMN, 1);
154
155 vo0p0 = vfmaq_lane_f16(vo0p0, vi0x9ABCDEFG, vget_low_f16(vw01234567), 3);
156 vo1p0 = vfmaq_lane_f16(vo1p0, vi1x9ABCDEFG, vget_low_f16(vw01234567), 3);
157 vo2p0 = vfmaq_lane_f16(vo2p0, vi2x9ABCDEFG, vget_low_f16(vw01234567), 3);
158 vo3p0 = vfmaq_lane_f16(vo3p0, vi3x9ABCDEFG, vget_low_f16(vw01234567), 3);
159
160 vo0p0 = vfmaq_lane_f16(vo0p0, vi1x9ABCDEFG, vget_high_f16(vw01234567), 2);
161 vo1p0 = vfmaq_lane_f16(vo1p0, vi2x9ABCDEFG, vget_high_f16(vw01234567), 2);
162 vo2p0 = vfmaq_lane_f16(vo2p0, vi3x9ABCDEFG, vget_high_f16(vw01234567), 2);
163 vo3p0 = vfmaq_lane_f16(vo3p0, vi4x9ABCDEFG, vget_high_f16(vw01234567), 2);
164
165 vo0p0 = vfmaq_lane_f16(vo0p0, vi2x9ABCDEFG, vw89, 1);
166 vo1p0 = vfmaq_lane_f16(vo1p0, vi3x9ABCDEFG, vw89, 1);
167 vo2p0 = vfmaq_lane_f16(vo2p0, vi4x9ABCDEFG, vw89, 1);
168 vo3p0 = vfmaq_lane_f16(vo3p0, vi5x9ABCDEFG, vw89, 1);
169
170 vi0x89ABCDEF = vi0xGHIJKLMN;
171 vi1x89ABCDEF = vi1xGHIJKLMN;
172 vi2x89ABCDEF = vi2xGHIJKLMN;
173 vi3x89ABCDEF = vi3xGHIJKLMN;
174 vi4x89ABCDEF = vi4xGHIJKLMN;
175 vi5x89ABCDEF = vi5xGHIJKLMN;
176
177
178 float16x8_t vo0 = vmaxq_f16(vo0p0, vmin);
179 float16x8_t vo1 = vmaxq_f16(vo1p0, vmin);
180 float16x8_t vo2 = vmaxq_f16(vo2p0, vmin);
181 float16x8_t vo3 = vmaxq_f16(vo3p0, vmin);
182
183 vo0 = vminq_f16(vo0, vmax);
184 vo1 = vminq_f16(vo1, vmax);
185 vo2 = vminq_f16(vo2, vmax);
186 vo3 = vminq_f16(vo3, vmax);
187
188 vst1q_f16(o3, vo3); o3 += 8;
189 vst1q_f16(o2, vo2); o2 += 8;
190 vst1q_f16(o1, vo1); o1 += 8;
191 vst1q_f16(o0, vo0); o0 += 8;
192 }
193 // Always process the last block of 1..8 pixels.
194 assert(w >= 1 * sizeof(__fp16));
195 assert(w <= 8 * sizeof(__fp16));
196 {
197 float16x8_t vo0p0 = vdupq_lane_f16(vget_low_f16(vw01234567), 0);
198 float16x8_t vo1p0 = vdupq_lane_f16(vget_low_f16(vw01234567), 0);
199 float16x8_t vo2p0 = vdupq_lane_f16(vget_low_f16(vw01234567), 0);
200 float16x8_t vo3p0 = vdupq_lane_f16(vget_low_f16(vw01234567), 0);
201
202 vi0x89ABCDEF = vreinterpretq_f16_u16(vandq_u16(vmask, vreinterpretq_u16_f16(vi0x89ABCDEF)));
203 vi1x89ABCDEF = vreinterpretq_f16_u16(vandq_u16(vmask, vreinterpretq_u16_f16(vi1x89ABCDEF)));
204 vi2x89ABCDEF = vreinterpretq_f16_u16(vandq_u16(vmask, vreinterpretq_u16_f16(vi2x89ABCDEF)));
205 vi3x89ABCDEF = vreinterpretq_f16_u16(vandq_u16(vmask, vreinterpretq_u16_f16(vi3x89ABCDEF)));
206 vi4x89ABCDEF = vreinterpretq_f16_u16(vandq_u16(vmask, vreinterpretq_u16_f16(vi4x89ABCDEF)));
207 vi5x89ABCDEF = vreinterpretq_f16_u16(vandq_u16(vmask, vreinterpretq_u16_f16(vi5x89ABCDEF)));
208
209 // Center column
210 vo0p0 = vfmaq_lane_f16(vo0p0, vi0x89ABCDEF, vget_low_f16(vw01234567), 2);
211 vo1p0 = vfmaq_lane_f16(vo1p0, vi1x89ABCDEF, vget_low_f16(vw01234567), 2);
212 vo2p0 = vfmaq_lane_f16(vo2p0, vi2x89ABCDEF, vget_low_f16(vw01234567), 2);
213 vo3p0 = vfmaq_lane_f16(vo3p0, vi3x89ABCDEF, vget_low_f16(vw01234567), 2);
214
215 vo0p0 = vfmaq_lane_f16(vo0p0, vi1x89ABCDEF, vget_high_f16(vw01234567), 1);
216 vo1p0 = vfmaq_lane_f16(vo1p0, vi2x89ABCDEF, vget_high_f16(vw01234567), 1);
217 vo2p0 = vfmaq_lane_f16(vo2p0, vi3x89ABCDEF, vget_high_f16(vw01234567), 1);
218 vo3p0 = vfmaq_lane_f16(vo3p0, vi4x89ABCDEF, vget_high_f16(vw01234567), 1);
219
220 vo0p0 = vfmaq_lane_f16(vo0p0, vi2x89ABCDEF, vw89, 0);
221 vo1p0 = vfmaq_lane_f16(vo1p0, vi3x89ABCDEF, vw89, 0);
222 vo2p0 = vfmaq_lane_f16(vo2p0, vi4x89ABCDEF, vw89, 0);
223 vo3p0 = vfmaq_lane_f16(vo3p0, vi5x89ABCDEF, vw89, 0);
224
225 // Left column
226 const float16x8_t vi0x789ABCDE = vextq_f16(vi0x01234567, vi0x89ABCDEF, 7);
227 const float16x8_t vi1x789ABCDE = vextq_f16(vi1x01234567, vi1x89ABCDEF, 7);
228 const float16x8_t vi2x789ABCDE = vextq_f16(vi2x01234567, vi2x89ABCDEF, 7);
229 const float16x8_t vi3x789ABCDE = vextq_f16(vi3x01234567, vi3x89ABCDEF, 7);
230 const float16x8_t vi4x789ABCDE = vextq_f16(vi4x01234567, vi4x89ABCDEF, 7);
231 const float16x8_t vi5x789ABCDE = vextq_f16(vi5x01234567, vi5x89ABCDEF, 7);
232
233 vo0p0 = vfmaq_lane_f16(vo0p0, vi0x789ABCDE, vget_low_f16(vw01234567), 1);
234 vo1p0 = vfmaq_lane_f16(vo1p0, vi1x789ABCDE, vget_low_f16(vw01234567), 1);
235 vo2p0 = vfmaq_lane_f16(vo2p0, vi2x789ABCDE, vget_low_f16(vw01234567), 1);
236 vo3p0 = vfmaq_lane_f16(vo3p0, vi3x789ABCDE, vget_low_f16(vw01234567), 1);
237
238 vo0p0 = vfmaq_lane_f16(vo0p0, vi1x789ABCDE, vget_high_f16(vw01234567), 0);
239 vo1p0 = vfmaq_lane_f16(vo1p0, vi2x789ABCDE, vget_high_f16(vw01234567), 0);
240 vo2p0 = vfmaq_lane_f16(vo2p0, vi3x789ABCDE, vget_high_f16(vw01234567), 0);
241 vo3p0 = vfmaq_lane_f16(vo3p0, vi4x789ABCDE, vget_high_f16(vw01234567), 0);
242
243 vo0p0 = vfmaq_lane_f16(vo0p0, vi2x789ABCDE, vget_high_f16(vw01234567), 3);
244 vo1p0 = vfmaq_lane_f16(vo1p0, vi3x789ABCDE, vget_high_f16(vw01234567), 3);
245 vo2p0 = vfmaq_lane_f16(vo2p0, vi4x789ABCDE, vget_high_f16(vw01234567), 3);
246 vo3p0 = vfmaq_lane_f16(vo3p0, vi5x789ABCDE, vget_high_f16(vw01234567), 3);
247
248 // Right column
249 const float16x8_t vzero = vmovq_n_f16(0);
250 const float16x8_t vi0x9ABCDEFG = vextq_f16(vi0x89ABCDEF, vzero, 1);
251 const float16x8_t vi1x9ABCDEFG = vextq_f16(vi1x89ABCDEF, vzero, 1);
252 const float16x8_t vi2x9ABCDEFG = vextq_f16(vi2x89ABCDEF, vzero, 1);
253 const float16x8_t vi3x9ABCDEFG = vextq_f16(vi3x89ABCDEF, vzero, 1);
254 const float16x8_t vi4x9ABCDEFG = vextq_f16(vi4x89ABCDEF, vzero, 1);
255 const float16x8_t vi5x9ABCDEFG = vextq_f16(vi5x89ABCDEF, vzero, 1);
256
257 vo0p0 = vfmaq_lane_f16(vo0p0, vi0x9ABCDEFG, vget_low_f16(vw01234567), 3);
258 vo1p0 = vfmaq_lane_f16(vo1p0, vi1x9ABCDEFG, vget_low_f16(vw01234567), 3);
259 vo2p0 = vfmaq_lane_f16(vo2p0, vi2x9ABCDEFG, vget_low_f16(vw01234567), 3);
260 vo3p0 = vfmaq_lane_f16(vo3p0, vi3x9ABCDEFG, vget_low_f16(vw01234567), 3);
261
262 vo0p0 = vfmaq_lane_f16(vo0p0, vi1x9ABCDEFG, vget_high_f16(vw01234567), 2);
263 vo1p0 = vfmaq_lane_f16(vo1p0, vi2x9ABCDEFG, vget_high_f16(vw01234567), 2);
264 vo2p0 = vfmaq_lane_f16(vo2p0, vi3x9ABCDEFG, vget_high_f16(vw01234567), 2);
265 vo3p0 = vfmaq_lane_f16(vo3p0, vi4x9ABCDEFG, vget_high_f16(vw01234567), 2);
266
267 vo0p0 = vfmaq_lane_f16(vo0p0, vi2x9ABCDEFG, vw89, 1);
268 vo1p0 = vfmaq_lane_f16(vo1p0, vi3x9ABCDEFG, vw89, 1);
269 vo2p0 = vfmaq_lane_f16(vo2p0, vi4x9ABCDEFG, vw89, 1);
270 vo3p0 = vfmaq_lane_f16(vo3p0, vi5x9ABCDEFG, vw89, 1);
271
272
273 float16x8_t vo0 = vmaxq_f16(vo0p0, vmin);
274 float16x8_t vo1 = vmaxq_f16(vo1p0, vmin);
275 float16x8_t vo2 = vmaxq_f16(vo2p0, vmin);
276 float16x8_t vo3 = vmaxq_f16(vo3p0, vmin);
277
278 vo0 = vminq_f16(vo0, vmax);
279 vo1 = vminq_f16(vo1, vmax);
280 vo2 = vminq_f16(vo2, vmax);
281 vo3 = vminq_f16(vo3, vmax);
282
283 if XNN_LIKELY(w == 8 * sizeof(__fp16)) {
284 vst1q_f16(o3, vo3); o3 += 8;
285 vst1q_f16(o2, vo2); o2 += 8;
286 vst1q_f16(o1, vo1); o1 += 8;
287 vst1q_f16(o0, vo0); o0 += 8;
288 } else {
289 float16x4_t vo3_lo = vget_low_f16(vo3);
290 float16x4_t vo2_lo = vget_low_f16(vo2);
291 float16x4_t vo1_lo = vget_low_f16(vo1);
292 float16x4_t vo0_lo = vget_low_f16(vo0);
293
294 if (w & (4 * sizeof(__fp16))) {
295 vst1_f16(o3, vo3_lo); o3 += 4;
296 vst1_f16(o2, vo2_lo); o2 += 4;
297 vst1_f16(o1, vo1_lo); o1 += 4;
298 vst1_f16(o0, vo0_lo); o0 += 4;
299
300 vo3_lo = vget_high_f16(vo3);
301 vo2_lo = vget_high_f16(vo2);
302 vo1_lo = vget_high_f16(vo1);
303 vo0_lo = vget_high_f16(vo0);
304 }
305 if (w & (2 * sizeof(__fp16))) {
306 vst1_lane_u32((void*) o3, vreinterpret_u32_f16(vo3_lo), 0); o3 += 2;
307 vst1_lane_u32((void*) o2, vreinterpret_u32_f16(vo2_lo), 0); o2 += 2;
308 vst1_lane_u32((void*) o1, vreinterpret_u32_f16(vo1_lo), 0); o1 += 2;
309 vst1_lane_u32((void*) o0, vreinterpret_u32_f16(vo0_lo), 0); o0 += 2;
310
311 vo0_lo = vext_f16(vo0_lo, vo0_lo, 2);
312 vo1_lo = vext_f16(vo1_lo, vo1_lo, 2);
313 vo2_lo = vext_f16(vo2_lo, vo2_lo, 2);
314 vo3_lo = vext_f16(vo3_lo, vo3_lo, 2);
315 }
316 if (w & (1 * sizeof(__fp16))) {
317 vst1_lane_f16(o3, vo3_lo, 0); o3 += 1;
318 vst1_lane_f16(o2, vo2_lo, 0); o2 += 1;
319 vst1_lane_f16(o1, vo1_lo, 0); o1 += 1;
320 vst1_lane_f16(o0, vo0_lo, 0); o0 += 1;
321 }
322 }
323 }
324
325 i0 = (const __fp16*) ((uintptr_t) i4 - input_decrement);
326 i1 = (const __fp16*) ((uintptr_t) i5 - input_decrement);
327 i2 = (const __fp16*) ((uintptr_t) i1 + input_width);
328 i3 = (const __fp16*) ((uintptr_t) i2 + input_width);
329 i4 = (const __fp16*) ((uintptr_t) i3 + input_width);
330 i5 = (const __fp16*) ((uintptr_t) i4 + input_width);
331
332 o0 = o3;
333 o1 = (__fp16*) ((uintptr_t) o0 + input_width);
334 o2 = (__fp16*) ((uintptr_t) o1 + input_width);
335 o3 = (__fp16*) ((uintptr_t) o2 + input_width);
336
337 output_height = doz(output_height, 4);
338 } while (output_height != 0);
339 }
340