xref: /aosp_15_r20/external/XNNPACK/src/f32-dwconv/gen/up16x25-minmax-fma3-acc2.c (revision 4bdc94577ba0e567308109d787f7fec7b531ce36)
1 // Auto-generated file. Do not edit!
2 //   Template: src/f32-dwconv/up-avx.c.in
3 //   Generator: tools/xngen
4 //
5 // Copyright 2019 Google LLC
6 //
7 // This source code is licensed under the BSD-style license found in the
8 // LICENSE file in the root directory of this source tree.
9 
10 #include <assert.h>
11 
12 #include <immintrin.h>
13 
14 #include <xnnpack/dwconv.h>
15 
16 
xnn_f32_dwconv_minmax_ukernel_up16x25__fma3_acc2(size_t channels,size_t output_width,const float ** input,const float * weights,float * output,size_t input_stride,size_t output_increment,size_t input_offset,const float * zero,const union xnn_f32_minmax_params params[restrict XNN_MIN_ELEMENTS (1)])17 void xnn_f32_dwconv_minmax_ukernel_up16x25__fma3_acc2(
18     size_t channels,
19     size_t output_width,
20     const float** input,
21     const float* weights,
22     float* output,
23     size_t input_stride,
24     size_t output_increment,
25     size_t input_offset,
26     const float* zero,
27     const union xnn_f32_minmax_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
28 {
29   assert(channels != 0);
30   assert(output_width != 0);
31 
32   const __m256 vmax = _mm256_load_ps(params->avx.max);
33   const __m256 vmin = _mm256_load_ps(params->avx.min);
34   do {
35     const float* i0 = input[0];
36     assert(i0 != NULL);
37     if XNN_UNPREDICTABLE(i0 != zero) {
38       i0 = (const float*) ((uintptr_t) i0 + input_offset);
39     }
40     const float* i1 = input[1];
41     assert(i1 != NULL);
42     if XNN_UNPREDICTABLE(i1 != zero) {
43       i1 = (const float*) ((uintptr_t) i1 + input_offset);
44     }
45     const float* i2 = input[2];
46     assert(i2 != NULL);
47     if XNN_UNPREDICTABLE(i2 != zero) {
48       i2 = (const float*) ((uintptr_t) i2 + input_offset);
49     }
50     const float* i3 = input[3];
51     assert(i3 != NULL);
52     if XNN_UNPREDICTABLE(i3 != zero) {
53       i3 = (const float*) ((uintptr_t) i3 + input_offset);
54     }
55     const float* i4 = input[4];
56     assert(i4 != NULL);
57     if XNN_UNPREDICTABLE(i4 != zero) {
58       i4 = (const float*) ((uintptr_t) i4 + input_offset);
59     }
60     const float* i5 = input[5];
61     assert(i5 != NULL);
62     if XNN_UNPREDICTABLE(i5 != zero) {
63       i5 = (const float*) ((uintptr_t) i5 + input_offset);
64     }
65     const float* i6 = input[6];
66     assert(i6 != NULL);
67     if XNN_UNPREDICTABLE(i6 != zero) {
68       i6 = (const float*) ((uintptr_t) i6 + input_offset);
69     }
70     const float* i7 = input[7];
71     assert(i7 != NULL);
72     if XNN_UNPREDICTABLE(i7 != zero) {
73       i7 = (const float*) ((uintptr_t) i7 + input_offset);
74     }
75     const float* i8 = input[8];
76     assert(i8 != NULL);
77     if XNN_UNPREDICTABLE(i8 != zero) {
78       i8 = (const float*) ((uintptr_t) i8 + input_offset);
79     }
80     const float* i9 = input[9];
81     assert(i9 != NULL);
82     if XNN_UNPREDICTABLE(i9 != zero) {
83       i9 = (const float*) ((uintptr_t) i9 + input_offset);
84     }
85     const float* i10 = input[10];
86     assert(i10 != NULL);
87     if XNN_UNPREDICTABLE(i10 != zero) {
88       i10 = (const float*) ((uintptr_t) i10 + input_offset);
89     }
90     const float* i11 = input[11];
91     assert(i11 != NULL);
92     if XNN_UNPREDICTABLE(i11 != zero) {
93       i11 = (const float*) ((uintptr_t) i11 + input_offset);
94     }
95     const float* i12 = input[12];
96     assert(i12 != NULL);
97     if XNN_UNPREDICTABLE(i12 != zero) {
98       i12 = (const float*) ((uintptr_t) i12 + input_offset);
99     }
100     const float* i13 = input[13];
101     assert(i13 != NULL);
102     if XNN_UNPREDICTABLE(i13 != zero) {
103       i13 = (const float*) ((uintptr_t) i13 + input_offset);
104     }
105     const float* i14 = input[14];
106     assert(i14 != NULL);
107     if XNN_UNPREDICTABLE(i14 != zero) {
108       i14 = (const float*) ((uintptr_t) i14 + input_offset);
109     }
110     const float* i15 = input[15];
111     assert(i15 != NULL);
112     if XNN_UNPREDICTABLE(i15 != zero) {
113       i15 = (const float*) ((uintptr_t) i15 + input_offset);
114     }
115     const float* i16 = input[16];
116     assert(i16 != NULL);
117     if XNN_UNPREDICTABLE(i16 != zero) {
118       i16 = (const float*) ((uintptr_t) i16 + input_offset);
119     }
120     const float* i17 = input[17];
121     assert(i17 != NULL);
122     if XNN_UNPREDICTABLE(i17 != zero) {
123       i17 = (const float*) ((uintptr_t) i17 + input_offset);
124     }
125     const float* i18 = input[18];
126     assert(i18 != NULL);
127     if XNN_UNPREDICTABLE(i18 != zero) {
128       i18 = (const float*) ((uintptr_t) i18 + input_offset);
129     }
130     const float* i19 = input[19];
131     assert(i19 != NULL);
132     if XNN_UNPREDICTABLE(i19 != zero) {
133       i19 = (const float*) ((uintptr_t) i19 + input_offset);
134     }
135     const float* i20 = input[20];
136     assert(i20 != NULL);
137     if XNN_UNPREDICTABLE(i20 != zero) {
138       i20 = (const float*) ((uintptr_t) i20 + input_offset);
139     }
140     const float* i21 = input[21];
141     assert(i21 != NULL);
142     if XNN_UNPREDICTABLE(i21 != zero) {
143       i21 = (const float*) ((uintptr_t) i21 + input_offset);
144     }
145     const float* i22 = input[22];
146     assert(i22 != NULL);
147     if XNN_UNPREDICTABLE(i22 != zero) {
148       i22 = (const float*) ((uintptr_t) i22 + input_offset);
149     }
150     const float* i23 = input[23];
151     assert(i23 != NULL);
152     if XNN_UNPREDICTABLE(i23 != zero) {
153       i23 = (const float*) ((uintptr_t) i23 + input_offset);
154     }
155     const float* i24 = input[24];
156     assert(i24 != NULL);
157     if XNN_UNPREDICTABLE(i24 != zero) {
158       i24 = (const float*) ((uintptr_t) i24 + input_offset);
159     }
160     input = (const float**) ((uintptr_t) input + input_stride);
161 
162     size_t c = channels;
163     const float* w = weights;
164     for (; c >= 16; c -= 16) {
165       __m256 vacc01234567p0 = _mm256_load_ps(w);
166       __m256 vacc89ABCDEFp0 = _mm256_load_ps(w + 8);
167 
168 
169       const __m256 vi0x01234567 = _mm256_loadu_ps(i0);
170       const __m256 vi0x89ABCDEF = _mm256_loadu_ps(i0 + 8);
171       i0 += 16;
172 
173       const __m256 vk0x01234567 = _mm256_load_ps(w + 16);
174       const __m256 vk0x89ABCDEF = _mm256_load_ps(w + 24);
175       vacc01234567p0 = _mm256_fmadd_ps(vi0x01234567, vk0x01234567, vacc01234567p0);
176       vacc89ABCDEFp0 = _mm256_fmadd_ps(vi0x89ABCDEF, vk0x89ABCDEF, vacc89ABCDEFp0);
177 
178       const __m256 vi1x01234567 = _mm256_loadu_ps(i1);
179       const __m256 vi1x89ABCDEF = _mm256_loadu_ps(i1 + 8);
180       i1 += 16;
181 
182       const __m256 vk1x01234567 = _mm256_load_ps(w + 32);
183       const __m256 vk1x89ABCDEF = _mm256_load_ps(w + 40);
184       __m256 vacc01234567p1 = _mm256_mul_ps(vi1x01234567, vk1x01234567);
185       __m256 vacc89ABCDEFp1 = _mm256_mul_ps(vi1x89ABCDEF, vk1x89ABCDEF);
186 
187       const __m256 vi2x01234567 = _mm256_loadu_ps(i2);
188       const __m256 vi2x89ABCDEF = _mm256_loadu_ps(i2 + 8);
189       i2 += 16;
190 
191       const __m256 vk2x01234567 = _mm256_load_ps(w + 48);
192       const __m256 vk2x89ABCDEF = _mm256_load_ps(w + 56);
193       vacc01234567p0 = _mm256_fmadd_ps(vi2x01234567, vk2x01234567, vacc01234567p0);
194       vacc89ABCDEFp0 = _mm256_fmadd_ps(vi2x89ABCDEF, vk2x89ABCDEF, vacc89ABCDEFp0);
195 
196       const __m256 vi3x01234567 = _mm256_loadu_ps(i3);
197       const __m256 vi3x89ABCDEF = _mm256_loadu_ps(i3 + 8);
198       i3 += 16;
199 
200       const __m256 vk3x01234567 = _mm256_load_ps(w + 64);
201       const __m256 vk3x89ABCDEF = _mm256_load_ps(w + 72);
202       vacc01234567p1 = _mm256_fmadd_ps(vi3x01234567, vk3x01234567, vacc01234567p1);
203       vacc89ABCDEFp1 = _mm256_fmadd_ps(vi3x89ABCDEF, vk3x89ABCDEF, vacc89ABCDEFp1);
204 
205       const __m256 vi4x01234567 = _mm256_loadu_ps(i4);
206       const __m256 vi4x89ABCDEF = _mm256_loadu_ps(i4 + 8);
207       i4 += 16;
208 
209       const __m256 vk4x01234567 = _mm256_load_ps(w + 80);
210       const __m256 vk4x89ABCDEF = _mm256_load_ps(w + 88);
211       vacc01234567p0 = _mm256_fmadd_ps(vi4x01234567, vk4x01234567, vacc01234567p0);
212       vacc89ABCDEFp0 = _mm256_fmadd_ps(vi4x89ABCDEF, vk4x89ABCDEF, vacc89ABCDEFp0);
213 
214       const __m256 vi5x01234567 = _mm256_loadu_ps(i5);
215       const __m256 vi5x89ABCDEF = _mm256_loadu_ps(i5 + 8);
216       i5 += 16;
217 
218       const __m256 vk5x01234567 = _mm256_load_ps(w + 96);
219       const __m256 vk5x89ABCDEF = _mm256_load_ps(w + 104);
220       vacc01234567p1 = _mm256_fmadd_ps(vi5x01234567, vk5x01234567, vacc01234567p1);
221       vacc89ABCDEFp1 = _mm256_fmadd_ps(vi5x89ABCDEF, vk5x89ABCDEF, vacc89ABCDEFp1);
222 
223       const __m256 vi6x01234567 = _mm256_loadu_ps(i6);
224       const __m256 vi6x89ABCDEF = _mm256_loadu_ps(i6 + 8);
225       i6 += 16;
226 
227       const __m256 vk6x01234567 = _mm256_load_ps(w + 112);
228       const __m256 vk6x89ABCDEF = _mm256_load_ps(w + 120);
229       vacc01234567p0 = _mm256_fmadd_ps(vi6x01234567, vk6x01234567, vacc01234567p0);
230       vacc89ABCDEFp0 = _mm256_fmadd_ps(vi6x89ABCDEF, vk6x89ABCDEF, vacc89ABCDEFp0);
231 
232       const __m256 vi7x01234567 = _mm256_loadu_ps(i7);
233       const __m256 vi7x89ABCDEF = _mm256_loadu_ps(i7 + 8);
234       i7 += 16;
235 
236       const __m256 vk7x01234567 = _mm256_load_ps(w + 128);
237       const __m256 vk7x89ABCDEF = _mm256_load_ps(w + 136);
238       vacc01234567p1 = _mm256_fmadd_ps(vi7x01234567, vk7x01234567, vacc01234567p1);
239       vacc89ABCDEFp1 = _mm256_fmadd_ps(vi7x89ABCDEF, vk7x89ABCDEF, vacc89ABCDEFp1);
240 
241       const __m256 vi8x01234567 = _mm256_loadu_ps(i8);
242       const __m256 vi8x89ABCDEF = _mm256_loadu_ps(i8 + 8);
243       i8 += 16;
244 
245       const __m256 vk8x01234567 = _mm256_load_ps(w + 144);
246       const __m256 vk8x89ABCDEF = _mm256_load_ps(w + 152);
247       vacc01234567p0 = _mm256_fmadd_ps(vi8x01234567, vk8x01234567, vacc01234567p0);
248       vacc89ABCDEFp0 = _mm256_fmadd_ps(vi8x89ABCDEF, vk8x89ABCDEF, vacc89ABCDEFp0);
249 
250       const __m256 vi9x01234567 = _mm256_loadu_ps(i9);
251       const __m256 vi9x89ABCDEF = _mm256_loadu_ps(i9 + 8);
252       i9 += 16;
253 
254       const __m256 vk9x01234567 = _mm256_load_ps(w + 160);
255       const __m256 vk9x89ABCDEF = _mm256_load_ps(w + 168);
256       vacc01234567p1 = _mm256_fmadd_ps(vi9x01234567, vk9x01234567, vacc01234567p1);
257       vacc89ABCDEFp1 = _mm256_fmadd_ps(vi9x89ABCDEF, vk9x89ABCDEF, vacc89ABCDEFp1);
258 
259       const __m256 vi10x01234567 = _mm256_loadu_ps(i10);
260       const __m256 vi10x89ABCDEF = _mm256_loadu_ps(i10 + 8);
261       i10 += 16;
262 
263       const __m256 vk10x01234567 = _mm256_load_ps(w + 176);
264       const __m256 vk10x89ABCDEF = _mm256_load_ps(w + 184);
265       vacc01234567p0 = _mm256_fmadd_ps(vi10x01234567, vk10x01234567, vacc01234567p0);
266       vacc89ABCDEFp0 = _mm256_fmadd_ps(vi10x89ABCDEF, vk10x89ABCDEF, vacc89ABCDEFp0);
267 
268       const __m256 vi11x01234567 = _mm256_loadu_ps(i11);
269       const __m256 vi11x89ABCDEF = _mm256_loadu_ps(i11 + 8);
270       i11 += 16;
271 
272       const __m256 vk11x01234567 = _mm256_load_ps(w + 192);
273       const __m256 vk11x89ABCDEF = _mm256_load_ps(w + 200);
274       vacc01234567p1 = _mm256_fmadd_ps(vi11x01234567, vk11x01234567, vacc01234567p1);
275       vacc89ABCDEFp1 = _mm256_fmadd_ps(vi11x89ABCDEF, vk11x89ABCDEF, vacc89ABCDEFp1);
276 
277       const __m256 vi12x01234567 = _mm256_loadu_ps(i12);
278       const __m256 vi12x89ABCDEF = _mm256_loadu_ps(i12 + 8);
279       i12 += 16;
280 
281       const __m256 vk12x01234567 = _mm256_load_ps(w + 208);
282       const __m256 vk12x89ABCDEF = _mm256_load_ps(w + 216);
283       vacc01234567p0 = _mm256_fmadd_ps(vi12x01234567, vk12x01234567, vacc01234567p0);
284       vacc89ABCDEFp0 = _mm256_fmadd_ps(vi12x89ABCDEF, vk12x89ABCDEF, vacc89ABCDEFp0);
285 
286       const __m256 vi13x01234567 = _mm256_loadu_ps(i13);
287       const __m256 vi13x89ABCDEF = _mm256_loadu_ps(i13 + 8);
288       i13 += 16;
289 
290       const __m256 vk13x01234567 = _mm256_load_ps(w + 224);
291       const __m256 vk13x89ABCDEF = _mm256_load_ps(w + 232);
292       vacc01234567p1 = _mm256_fmadd_ps(vi13x01234567, vk13x01234567, vacc01234567p1);
293       vacc89ABCDEFp1 = _mm256_fmadd_ps(vi13x89ABCDEF, vk13x89ABCDEF, vacc89ABCDEFp1);
294 
295       const __m256 vi14x01234567 = _mm256_loadu_ps(i14);
296       const __m256 vi14x89ABCDEF = _mm256_loadu_ps(i14 + 8);
297       i14 += 16;
298 
299       const __m256 vk14x01234567 = _mm256_load_ps(w + 240);
300       const __m256 vk14x89ABCDEF = _mm256_load_ps(w + 248);
301       vacc01234567p0 = _mm256_fmadd_ps(vi14x01234567, vk14x01234567, vacc01234567p0);
302       vacc89ABCDEFp0 = _mm256_fmadd_ps(vi14x89ABCDEF, vk14x89ABCDEF, vacc89ABCDEFp0);
303 
304       const __m256 vi15x01234567 = _mm256_loadu_ps(i15);
305       const __m256 vi15x89ABCDEF = _mm256_loadu_ps(i15 + 8);
306       i15 += 16;
307 
308       const __m256 vk15x01234567 = _mm256_load_ps(w + 256);
309       const __m256 vk15x89ABCDEF = _mm256_load_ps(w + 264);
310       vacc01234567p1 = _mm256_fmadd_ps(vi15x01234567, vk15x01234567, vacc01234567p1);
311       vacc89ABCDEFp1 = _mm256_fmadd_ps(vi15x89ABCDEF, vk15x89ABCDEF, vacc89ABCDEFp1);
312 
313       const __m256 vi16x01234567 = _mm256_loadu_ps(i16);
314       const __m256 vi16x89ABCDEF = _mm256_loadu_ps(i16 + 8);
315       i16 += 16;
316 
317       const __m256 vk16x01234567 = _mm256_load_ps(w + 272);
318       const __m256 vk16x89ABCDEF = _mm256_load_ps(w + 280);
319       vacc01234567p0 = _mm256_fmadd_ps(vi16x01234567, vk16x01234567, vacc01234567p0);
320       vacc89ABCDEFp0 = _mm256_fmadd_ps(vi16x89ABCDEF, vk16x89ABCDEF, vacc89ABCDEFp0);
321 
322       const __m256 vi17x01234567 = _mm256_loadu_ps(i17);
323       const __m256 vi17x89ABCDEF = _mm256_loadu_ps(i17 + 8);
324       i17 += 16;
325 
326       const __m256 vk17x01234567 = _mm256_load_ps(w + 288);
327       const __m256 vk17x89ABCDEF = _mm256_load_ps(w + 296);
328       vacc01234567p1 = _mm256_fmadd_ps(vi17x01234567, vk17x01234567, vacc01234567p1);
329       vacc89ABCDEFp1 = _mm256_fmadd_ps(vi17x89ABCDEF, vk17x89ABCDEF, vacc89ABCDEFp1);
330 
331       const __m256 vi18x01234567 = _mm256_loadu_ps(i18);
332       const __m256 vi18x89ABCDEF = _mm256_loadu_ps(i18 + 8);
333       i18 += 16;
334 
335       const __m256 vk18x01234567 = _mm256_load_ps(w + 304);
336       const __m256 vk18x89ABCDEF = _mm256_load_ps(w + 312);
337       vacc01234567p0 = _mm256_fmadd_ps(vi18x01234567, vk18x01234567, vacc01234567p0);
338       vacc89ABCDEFp0 = _mm256_fmadd_ps(vi18x89ABCDEF, vk18x89ABCDEF, vacc89ABCDEFp0);
339 
340       const __m256 vi19x01234567 = _mm256_loadu_ps(i19);
341       const __m256 vi19x89ABCDEF = _mm256_loadu_ps(i19 + 8);
342       i19 += 16;
343 
344       const __m256 vk19x01234567 = _mm256_load_ps(w + 320);
345       const __m256 vk19x89ABCDEF = _mm256_load_ps(w + 328);
346       vacc01234567p1 = _mm256_fmadd_ps(vi19x01234567, vk19x01234567, vacc01234567p1);
347       vacc89ABCDEFp1 = _mm256_fmadd_ps(vi19x89ABCDEF, vk19x89ABCDEF, vacc89ABCDEFp1);
348 
349       const __m256 vi20x01234567 = _mm256_loadu_ps(i20);
350       const __m256 vi20x89ABCDEF = _mm256_loadu_ps(i20 + 8);
351       i20 += 16;
352 
353       const __m256 vk20x01234567 = _mm256_load_ps(w + 336);
354       const __m256 vk20x89ABCDEF = _mm256_load_ps(w + 344);
355       vacc01234567p0 = _mm256_fmadd_ps(vi20x01234567, vk20x01234567, vacc01234567p0);
356       vacc89ABCDEFp0 = _mm256_fmadd_ps(vi20x89ABCDEF, vk20x89ABCDEF, vacc89ABCDEFp0);
357 
358       const __m256 vi21x01234567 = _mm256_loadu_ps(i21);
359       const __m256 vi21x89ABCDEF = _mm256_loadu_ps(i21 + 8);
360       i21 += 16;
361 
362       const __m256 vk21x01234567 = _mm256_load_ps(w + 352);
363       const __m256 vk21x89ABCDEF = _mm256_load_ps(w + 360);
364       vacc01234567p1 = _mm256_fmadd_ps(vi21x01234567, vk21x01234567, vacc01234567p1);
365       vacc89ABCDEFp1 = _mm256_fmadd_ps(vi21x89ABCDEF, vk21x89ABCDEF, vacc89ABCDEFp1);
366 
367       const __m256 vi22x01234567 = _mm256_loadu_ps(i22);
368       const __m256 vi22x89ABCDEF = _mm256_loadu_ps(i22 + 8);
369       i22 += 16;
370 
371       const __m256 vk22x01234567 = _mm256_load_ps(w + 368);
372       const __m256 vk22x89ABCDEF = _mm256_load_ps(w + 376);
373       vacc01234567p0 = _mm256_fmadd_ps(vi22x01234567, vk22x01234567, vacc01234567p0);
374       vacc89ABCDEFp0 = _mm256_fmadd_ps(vi22x89ABCDEF, vk22x89ABCDEF, vacc89ABCDEFp0);
375 
376       const __m256 vi23x01234567 = _mm256_loadu_ps(i23);
377       const __m256 vi23x89ABCDEF = _mm256_loadu_ps(i23 + 8);
378       i23 += 16;
379 
380       const __m256 vk23x01234567 = _mm256_load_ps(w + 384);
381       const __m256 vk23x89ABCDEF = _mm256_load_ps(w + 392);
382       vacc01234567p1 = _mm256_fmadd_ps(vi23x01234567, vk23x01234567, vacc01234567p1);
383       vacc89ABCDEFp1 = _mm256_fmadd_ps(vi23x89ABCDEF, vk23x89ABCDEF, vacc89ABCDEFp1);
384 
385       const __m256 vi24x01234567 = _mm256_loadu_ps(i24);
386       const __m256 vi24x89ABCDEF = _mm256_loadu_ps(i24 + 8);
387       i24 += 16;
388 
389       const __m256 vk24x01234567 = _mm256_load_ps(w + 400);
390       const __m256 vk24x89ABCDEF = _mm256_load_ps(w + 408);
391       vacc01234567p0 = _mm256_fmadd_ps(vi24x01234567, vk24x01234567, vacc01234567p0);
392       vacc89ABCDEFp0 = _mm256_fmadd_ps(vi24x89ABCDEF, vk24x89ABCDEF, vacc89ABCDEFp0);
393 
394       w += 416;
395 
396       // Add up all accumulators to vacc0123456789ABCDEFp0
397       vacc01234567p0 = _mm256_add_ps(vacc01234567p0, vacc01234567p1);
398       vacc89ABCDEFp0 = _mm256_add_ps(vacc89ABCDEFp0, vacc89ABCDEFp1);
399 
400       __m256 vacc01234567 = _mm256_max_ps(vacc01234567p0, vmin);
401       __m256 vacc89ABCDEF = _mm256_max_ps(vacc89ABCDEFp0, vmin);
402       vacc01234567 = _mm256_min_ps(vacc01234567, vmax);
403       vacc89ABCDEF = _mm256_min_ps(vacc89ABCDEF, vmax);
404 
405       _mm256_storeu_ps(output, vacc01234567);
406       _mm256_storeu_ps(output + 8, vacc89ABCDEF);
407       output += 16;
408     }
409     for (; c >= 8; c -= 8) {
410       __m256 vacc01234567p0 = _mm256_load_ps(w);
411 
412       const __m256 vi0x01234567 = _mm256_loadu_ps(i0);
413       i0 += 8;
414 
415       const __m256 vk0x01234567 = _mm256_load_ps(w + 16);
416       vacc01234567p0 = _mm256_fmadd_ps(vi0x01234567, vk0x01234567, vacc01234567p0);
417 
418       const __m256 vi1x01234567 = _mm256_loadu_ps(i1);
419       i1 += 8;
420 
421       const __m256 vk1x01234567 = _mm256_load_ps(w + 32);
422       __m256 vacc01234567p1 = _mm256_mul_ps(vi1x01234567, vk1x01234567);
423 
424       const __m256 vi2x01234567 = _mm256_loadu_ps(i2);
425       i2 += 8;
426 
427       const __m256 vk2x01234567 = _mm256_load_ps(w + 48);
428       vacc01234567p0 = _mm256_fmadd_ps(vi2x01234567, vk2x01234567, vacc01234567p0);
429 
430       const __m256 vi3x01234567 = _mm256_loadu_ps(i3);
431       i3 += 8;
432 
433       const __m256 vk3x01234567 = _mm256_load_ps(w + 64);
434       vacc01234567p1 = _mm256_fmadd_ps(vi3x01234567, vk3x01234567, vacc01234567p1);
435 
436       const __m256 vi4x01234567 = _mm256_loadu_ps(i4);
437       i4 += 8;
438 
439       const __m256 vk4x01234567 = _mm256_load_ps(w + 80);
440       vacc01234567p0 = _mm256_fmadd_ps(vi4x01234567, vk4x01234567, vacc01234567p0);
441 
442       const __m256 vi5x01234567 = _mm256_loadu_ps(i5);
443       i5 += 8;
444 
445       const __m256 vk5x01234567 = _mm256_load_ps(w + 96);
446       vacc01234567p1 = _mm256_fmadd_ps(vi5x01234567, vk5x01234567, vacc01234567p1);
447 
448       const __m256 vi6x01234567 = _mm256_loadu_ps(i6);
449       i6 += 8;
450 
451       const __m256 vk6x01234567 = _mm256_load_ps(w + 112);
452       vacc01234567p0 = _mm256_fmadd_ps(vi6x01234567, vk6x01234567, vacc01234567p0);
453 
454       const __m256 vi7x01234567 = _mm256_loadu_ps(i7);
455       i7 += 8;
456 
457       const __m256 vk7x01234567 = _mm256_load_ps(w + 128);
458       vacc01234567p1 = _mm256_fmadd_ps(vi7x01234567, vk7x01234567, vacc01234567p1);
459 
460       const __m256 vi8x01234567 = _mm256_loadu_ps(i8);
461       i8 += 8;
462 
463       const __m256 vk8x01234567 = _mm256_load_ps(w + 144);
464       vacc01234567p0 = _mm256_fmadd_ps(vi8x01234567, vk8x01234567, vacc01234567p0);
465 
466       const __m256 vi9x01234567 = _mm256_loadu_ps(i9);
467       i9 += 8;
468 
469       const __m256 vk9x01234567 = _mm256_load_ps(w + 160);
470       vacc01234567p1 = _mm256_fmadd_ps(vi9x01234567, vk9x01234567, vacc01234567p1);
471 
472       const __m256 vi10x01234567 = _mm256_loadu_ps(i10);
473       i10 += 8;
474 
475       const __m256 vk10x01234567 = _mm256_load_ps(w + 176);
476       vacc01234567p0 = _mm256_fmadd_ps(vi10x01234567, vk10x01234567, vacc01234567p0);
477 
478       const __m256 vi11x01234567 = _mm256_loadu_ps(i11);
479       i11 += 8;
480 
481       const __m256 vk11x01234567 = _mm256_load_ps(w + 192);
482       vacc01234567p1 = _mm256_fmadd_ps(vi11x01234567, vk11x01234567, vacc01234567p1);
483 
484       const __m256 vi12x01234567 = _mm256_loadu_ps(i12);
485       i12 += 8;
486 
487       const __m256 vk12x01234567 = _mm256_load_ps(w + 208);
488       vacc01234567p0 = _mm256_fmadd_ps(vi12x01234567, vk12x01234567, vacc01234567p0);
489 
490       const __m256 vi13x01234567 = _mm256_loadu_ps(i13);
491       i13 += 8;
492 
493       const __m256 vk13x01234567 = _mm256_load_ps(w + 224);
494       vacc01234567p1 = _mm256_fmadd_ps(vi13x01234567, vk13x01234567, vacc01234567p1);
495 
496       const __m256 vi14x01234567 = _mm256_loadu_ps(i14);
497       i14 += 8;
498 
499       const __m256 vk14x01234567 = _mm256_load_ps(w + 240);
500       vacc01234567p0 = _mm256_fmadd_ps(vi14x01234567, vk14x01234567, vacc01234567p0);
501 
502       const __m256 vi15x01234567 = _mm256_loadu_ps(i15);
503       i15 += 8;
504 
505       const __m256 vk15x01234567 = _mm256_load_ps(w + 256);
506       vacc01234567p1 = _mm256_fmadd_ps(vi15x01234567, vk15x01234567, vacc01234567p1);
507 
508       const __m256 vi16x01234567 = _mm256_loadu_ps(i16);
509       i16 += 8;
510 
511       const __m256 vk16x01234567 = _mm256_load_ps(w + 272);
512       vacc01234567p0 = _mm256_fmadd_ps(vi16x01234567, vk16x01234567, vacc01234567p0);
513 
514       const __m256 vi17x01234567 = _mm256_loadu_ps(i17);
515       i17 += 8;
516 
517       const __m256 vk17x01234567 = _mm256_load_ps(w + 288);
518       vacc01234567p1 = _mm256_fmadd_ps(vi17x01234567, vk17x01234567, vacc01234567p1);
519 
520       const __m256 vi18x01234567 = _mm256_loadu_ps(i18);
521       i18 += 8;
522 
523       const __m256 vk18x01234567 = _mm256_load_ps(w + 304);
524       vacc01234567p0 = _mm256_fmadd_ps(vi18x01234567, vk18x01234567, vacc01234567p0);
525 
526       const __m256 vi19x01234567 = _mm256_loadu_ps(i19);
527       i19 += 8;
528 
529       const __m256 vk19x01234567 = _mm256_load_ps(w + 320);
530       vacc01234567p1 = _mm256_fmadd_ps(vi19x01234567, vk19x01234567, vacc01234567p1);
531 
532       const __m256 vi20x01234567 = _mm256_loadu_ps(i20);
533       i20 += 8;
534 
535       const __m256 vk20x01234567 = _mm256_load_ps(w + 336);
536       vacc01234567p0 = _mm256_fmadd_ps(vi20x01234567, vk20x01234567, vacc01234567p0);
537 
538       const __m256 vi21x01234567 = _mm256_loadu_ps(i21);
539       i21 += 8;
540 
541       const __m256 vk21x01234567 = _mm256_load_ps(w + 352);
542       vacc01234567p1 = _mm256_fmadd_ps(vi21x01234567, vk21x01234567, vacc01234567p1);
543 
544       const __m256 vi22x01234567 = _mm256_loadu_ps(i22);
545       i22 += 8;
546 
547       const __m256 vk22x01234567 = _mm256_load_ps(w + 368);
548       vacc01234567p0 = _mm256_fmadd_ps(vi22x01234567, vk22x01234567, vacc01234567p0);
549 
550       const __m256 vi23x01234567 = _mm256_loadu_ps(i23);
551       i23 += 8;
552 
553       const __m256 vk23x01234567 = _mm256_load_ps(w + 384);
554       vacc01234567p1 = _mm256_fmadd_ps(vi23x01234567, vk23x01234567, vacc01234567p1);
555 
556       const __m256 vi24x01234567 = _mm256_loadu_ps(i24);
557       i24 += 8;
558 
559       const __m256 vk24x01234567 = _mm256_load_ps(w + 400);
560       vacc01234567p0 = _mm256_fmadd_ps(vi24x01234567, vk24x01234567, vacc01234567p0);
561 
562       w += 8;
563 
564       // Add up all accumulators to vacc01234567p0
565       vacc01234567p0 = _mm256_add_ps(vacc01234567p0, vacc01234567p1);
566 
567       __m256 vacc01234567 = _mm256_max_ps(vacc01234567p0, vmin);
568       vacc01234567 = _mm256_min_ps(vacc01234567, vmax);
569 
570       _mm256_storeu_ps(output, vacc01234567);
571       output += 8;
572     }
573     if XNN_UNLIKELY(c != 0) {
574       assert(c >= 1);
575       assert(c <= 7);
576       const __m256i vmask = _mm256_loadu_si256((const __m256i*) &params->avx.mask_table[7 - c]);
577 
578       __m256 vacc01234567p0 = _mm256_load_ps(w);
579 
580       const __m256 vi0x01234567 = _mm256_maskload_ps(i0, vmask);
581       const __m256 vk0x01234567 = _mm256_load_ps(w + 16);
582       vacc01234567p0 = _mm256_fmadd_ps(vi0x01234567, vk0x01234567, vacc01234567p0);
583 
584       const __m256 vi1x01234567 = _mm256_maskload_ps(i1, vmask);
585       const __m256 vk1x01234567 = _mm256_load_ps(w + 32);
586       __m256 vacc01234567p1 = _mm256_mul_ps(vi1x01234567, vk1x01234567);
587 
588       const __m256 vi2x01234567 = _mm256_maskload_ps(i2, vmask);
589       const __m256 vk2x01234567 = _mm256_load_ps(w + 48);
590       vacc01234567p0 = _mm256_fmadd_ps(vi2x01234567, vk2x01234567, vacc01234567p0);
591 
592       const __m256 vi3x01234567 = _mm256_maskload_ps(i3, vmask);
593       const __m256 vk3x01234567 = _mm256_load_ps(w + 64);
594       vacc01234567p1 = _mm256_fmadd_ps(vi3x01234567, vk3x01234567, vacc01234567p1);
595 
596       const __m256 vi4x01234567 = _mm256_maskload_ps(i4, vmask);
597       const __m256 vk4x01234567 = _mm256_load_ps(w + 80);
598       vacc01234567p0 = _mm256_fmadd_ps(vi4x01234567, vk4x01234567, vacc01234567p0);
599 
600       const __m256 vi5x01234567 = _mm256_maskload_ps(i5, vmask);
601       const __m256 vk5x01234567 = _mm256_load_ps(w + 96);
602       vacc01234567p1 = _mm256_fmadd_ps(vi5x01234567, vk5x01234567, vacc01234567p1);
603 
604       const __m256 vi6x01234567 = _mm256_maskload_ps(i6, vmask);
605       const __m256 vk6x01234567 = _mm256_load_ps(w + 112);
606       vacc01234567p0 = _mm256_fmadd_ps(vi6x01234567, vk6x01234567, vacc01234567p0);
607 
608       const __m256 vi7x01234567 = _mm256_maskload_ps(i7, vmask);
609       const __m256 vk7x01234567 = _mm256_load_ps(w + 128);
610       vacc01234567p1 = _mm256_fmadd_ps(vi7x01234567, vk7x01234567, vacc01234567p1);
611 
612       const __m256 vi8x01234567 = _mm256_maskload_ps(i8, vmask);
613       const __m256 vk8x01234567 = _mm256_load_ps(w + 144);
614       vacc01234567p0 = _mm256_fmadd_ps(vi8x01234567, vk8x01234567, vacc01234567p0);
615 
616       const __m256 vi9x01234567 = _mm256_maskload_ps(i9, vmask);
617       const __m256 vk9x01234567 = _mm256_load_ps(w + 160);
618       vacc01234567p1 = _mm256_fmadd_ps(vi9x01234567, vk9x01234567, vacc01234567p1);
619 
620       const __m256 vi10x01234567 = _mm256_maskload_ps(i10, vmask);
621       const __m256 vk10x01234567 = _mm256_load_ps(w + 176);
622       vacc01234567p0 = _mm256_fmadd_ps(vi10x01234567, vk10x01234567, vacc01234567p0);
623 
624       const __m256 vi11x01234567 = _mm256_maskload_ps(i11, vmask);
625       const __m256 vk11x01234567 = _mm256_load_ps(w + 192);
626       vacc01234567p1 = _mm256_fmadd_ps(vi11x01234567, vk11x01234567, vacc01234567p1);
627 
628       const __m256 vi12x01234567 = _mm256_maskload_ps(i12, vmask);
629       const __m256 vk12x01234567 = _mm256_load_ps(w + 208);
630       vacc01234567p0 = _mm256_fmadd_ps(vi12x01234567, vk12x01234567, vacc01234567p0);
631 
632       const __m256 vi13x01234567 = _mm256_maskload_ps(i13, vmask);
633       const __m256 vk13x01234567 = _mm256_load_ps(w + 224);
634       vacc01234567p1 = _mm256_fmadd_ps(vi13x01234567, vk13x01234567, vacc01234567p1);
635 
636       const __m256 vi14x01234567 = _mm256_maskload_ps(i14, vmask);
637       const __m256 vk14x01234567 = _mm256_load_ps(w + 240);
638       vacc01234567p0 = _mm256_fmadd_ps(vi14x01234567, vk14x01234567, vacc01234567p0);
639 
640       const __m256 vi15x01234567 = _mm256_maskload_ps(i15, vmask);
641       const __m256 vk15x01234567 = _mm256_load_ps(w + 256);
642       vacc01234567p1 = _mm256_fmadd_ps(vi15x01234567, vk15x01234567, vacc01234567p1);
643 
644       const __m256 vi16x01234567 = _mm256_maskload_ps(i16, vmask);
645       const __m256 vk16x01234567 = _mm256_load_ps(w + 272);
646       vacc01234567p0 = _mm256_fmadd_ps(vi16x01234567, vk16x01234567, vacc01234567p0);
647 
648       const __m256 vi17x01234567 = _mm256_maskload_ps(i17, vmask);
649       const __m256 vk17x01234567 = _mm256_load_ps(w + 288);
650       vacc01234567p1 = _mm256_fmadd_ps(vi17x01234567, vk17x01234567, vacc01234567p1);
651 
652       const __m256 vi18x01234567 = _mm256_maskload_ps(i18, vmask);
653       const __m256 vk18x01234567 = _mm256_load_ps(w + 304);
654       vacc01234567p0 = _mm256_fmadd_ps(vi18x01234567, vk18x01234567, vacc01234567p0);
655 
656       const __m256 vi19x01234567 = _mm256_maskload_ps(i19, vmask);
657       const __m256 vk19x01234567 = _mm256_load_ps(w + 320);
658       vacc01234567p1 = _mm256_fmadd_ps(vi19x01234567, vk19x01234567, vacc01234567p1);
659 
660       const __m256 vi20x01234567 = _mm256_maskload_ps(i20, vmask);
661       const __m256 vk20x01234567 = _mm256_load_ps(w + 336);
662       vacc01234567p0 = _mm256_fmadd_ps(vi20x01234567, vk20x01234567, vacc01234567p0);
663 
664       const __m256 vi21x01234567 = _mm256_maskload_ps(i21, vmask);
665       const __m256 vk21x01234567 = _mm256_load_ps(w + 352);
666       vacc01234567p1 = _mm256_fmadd_ps(vi21x01234567, vk21x01234567, vacc01234567p1);
667 
668       const __m256 vi22x01234567 = _mm256_maskload_ps(i22, vmask);
669       const __m256 vk22x01234567 = _mm256_load_ps(w + 368);
670       vacc01234567p0 = _mm256_fmadd_ps(vi22x01234567, vk22x01234567, vacc01234567p0);
671 
672       const __m256 vi23x01234567 = _mm256_maskload_ps(i23, vmask);
673       const __m256 vk23x01234567 = _mm256_load_ps(w + 384);
674       vacc01234567p1 = _mm256_fmadd_ps(vi23x01234567, vk23x01234567, vacc01234567p1);
675 
676       const __m256 vi24x01234567 = _mm256_maskload_ps(i24, vmask);
677       const __m256 vk24x01234567 = _mm256_load_ps(w + 400);
678       vacc01234567p0 = _mm256_fmadd_ps(vi24x01234567, vk24x01234567, vacc01234567p0);
679 
680       // Add up all accumulators to vacc01234567p0
681       vacc01234567p0 = _mm256_add_ps(vacc01234567p0, vacc01234567p1);
682 
683       __m256 vacc01234567 = _mm256_max_ps(vacc01234567p0, vmin);
684       vacc01234567 = _mm256_min_ps(vacc01234567, vmax);
685 
686       __m128 vacc0123 = _mm256_castps256_ps128(vacc01234567);
687       if (c & 4) {
688         _mm_storeu_ps(output, vacc0123);
689         vacc0123 = _mm256_extractf128_ps(vacc01234567, 1);
690         output += 4;
691       }
692       if (c & 2) {
693         _mm_storel_pi((__m64*) output, vacc0123);
694         vacc0123 = _mm_movehl_ps(vacc0123, vacc0123);
695         output += 2;
696       }
697       if (c & 1) {
698         _mm_store_ss(output, vacc0123);
699         output += 1;
700       }
701     }
702 
703     output = (float*) ((uintptr_t) output + output_increment);
704   } while (--output_width != 0);
705 }
706