xref: /aosp_15_r20/external/XNNPACK/src/f32-dwconv2d-chw/gen/3x3p1-minmax-ssse3-6x4.c (revision 4bdc94577ba0e567308109d787f7fec7b531ce36)
1 // Auto-generated file. Do not edit!
2 //   Template: src/f32-dwconv2d-chw/3x3p1-ssse3.c.in
3 //   Generator: tools/xngen
4 //
5 // Copyright 2020 Google LLC
6 //
7 // This source code is licensed under the BSD-style license found in the
8 // LICENSE file in the root directory of this source tree.
9 
10 #include <assert.h>
11 
12 #include <tmmintrin.h>
13 
14 #include <xnnpack/dwconv.h>
15 #include <xnnpack/math.h>
16 
17 
xnn_f32_dwconv2d_chw_ukernel_3x3p1__ssse3_6x4(size_t input_height,size_t input_width,const float * input,const float * weights,const float * zero,float * output,uint32_t padding_top,const union xnn_f32_chw_params params[restrict XNN_MIN_ELEMENTS (1)])18 void xnn_f32_dwconv2d_chw_ukernel_3x3p1__ssse3_6x4(
19     size_t input_height,
20     size_t input_width,
21     const float* input,
22     const float* weights,
23     const float* zero,
24     float* output,
25     uint32_t padding_top,
26     const union xnn_f32_chw_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
27 {
28   assert(input_height != 0);
29   assert(input_width != 0);
30   assert(input_width % sizeof(float) == 0);
31   assert(padding_top == 1);
32 
33   const __m128 vmask = _mm_load_ps((const float*) params->sse.mask);
34   const __m128 vmax = _mm_load_ps(params->sse.max);
35   const __m128 vmin = _mm_load_ps(params->sse.min);
36 
37   const __m128 vbias = _mm_load1_ps(weights);
38   const __m128 vk00 = _mm_load1_ps(weights + 1);
39   const __m128 vk01 = _mm_load1_ps(weights + 2);
40   const __m128 vk02 = _mm_load1_ps(weights + 3);
41   const __m128 vk10 = _mm_load1_ps(weights + 4);
42   const __m128 vk11 = _mm_load1_ps(weights + 5);
43   const __m128 vk12 = _mm_load1_ps(weights + 6);
44   const __m128 vk20 = _mm_load1_ps(weights + 7);
45   const __m128 vk21 = _mm_load1_ps(weights + 8);
46   const __m128 vk22 = _mm_load1_ps(weights + 9);
47 
48   const size_t input_decrement = round_up_po2(input_width, 4 * sizeof(float));
49 
50   const float* i0 = zero;
51   const float* i1 = input;
52   const float* i2 = (const float*) ((uintptr_t) i1 + input_width);
53   const float* i3 = (const float*) ((uintptr_t) i2 + input_width);
54   const float* i4 = (const float*) ((uintptr_t) i3 + input_width);
55   const float* i5 = (const float*) ((uintptr_t) i4 + input_width);
56   const float* i6 = (const float*) ((uintptr_t) i5 + input_width);
57   const float* i7 = (const float*) ((uintptr_t) i6 + input_width);
58 
59   float* o0 = output;
60   float* o1 = (float*) ((uintptr_t) o0 + input_width);
61   float* o2 = (float*) ((uintptr_t) o1 + input_width);
62   float* o3 = (float*) ((uintptr_t) o2 + input_width);
63   float* o4 = (float*) ((uintptr_t) o3 + input_width);
64   float* o5 = (float*) ((uintptr_t) o4 + input_width);
65 
66   size_t output_height = input_height;
67   do {
68     if XNN_UNPREDICTABLE(output_height < 2) {
69       i2 = zero;
70       o1 = o0;
71     }
72     if XNN_UNPREDICTABLE(output_height < 3) {
73       i3 = zero;
74       o2 = o1;
75     }
76     if XNN_UNPREDICTABLE(output_height < 4) {
77       i4 = zero;
78       o3 = o2;
79     }
80     if XNN_UNPREDICTABLE(output_height < 5) {
81       i5 = zero;
82       o4 = o3;
83     }
84     if XNN_UNPREDICTABLE(output_height < 6) {
85       i6 = zero;
86       o5 = o4;
87     }
88     if XNN_UNPREDICTABLE(output_height < 7) {
89       i7 = zero;
90     }
91 
92     __m128 vi0x0123 = _mm_setzero_ps();
93     __m128 vi1x0123 = _mm_setzero_ps();
94     __m128 vi2x0123 = _mm_setzero_ps();
95     __m128 vi3x0123 = _mm_setzero_ps();
96     __m128 vi4x0123 = _mm_setzero_ps();
97     __m128 vi5x0123 = _mm_setzero_ps();
98     __m128 vi6x0123 = _mm_setzero_ps();
99     __m128 vi7x0123 = _mm_setzero_ps();
100 
101     __m128 vi0x4567 = _mm_loadu_ps(i0);
102     i0 += 4;
103     __m128 vi1x4567 = _mm_loadu_ps(i1);
104     i1 += 4;
105     __m128 vi2x4567 = _mm_loadu_ps(i2);
106     i2 += 4;
107     __m128 vi3x4567 = _mm_loadu_ps(i3);
108     i3 += 4;
109     __m128 vi4x4567 = _mm_loadu_ps(i4);
110     i4 += 4;
111     __m128 vi5x4567 = _mm_loadu_ps(i5);
112     i5 += 4;
113     __m128 vi6x4567 = _mm_loadu_ps(i6);
114     i6 += 4;
115     __m128 vi7x4567 = _mm_loadu_ps(i7);
116     i7 += 4;
117 
118     size_t w = input_width;
119     for (; w > 4 * sizeof(float); w -= 4 * sizeof(float)) {
120       const __m128 vi0x89AB = _mm_loadu_ps(i0);
121       i0 += 4;
122       const __m128 vi1x89AB = _mm_loadu_ps(i1);
123       i1 += 4;
124       const __m128 vi2x89AB = _mm_loadu_ps(i2);
125       i2 += 4;
126       const __m128 vi3x89AB = _mm_loadu_ps(i3);
127       i3 += 4;
128       const __m128 vi4x89AB = _mm_loadu_ps(i4);
129       i4 += 4;
130       const __m128 vi5x89AB = _mm_loadu_ps(i5);
131       i5 += 4;
132       const __m128 vi6x89AB = _mm_loadu_ps(i6);
133       i6 += 4;
134       const __m128 vi7x89AB = _mm_loadu_ps(i7);
135       i7 += 4;
136 
137       __m128 vo0p0 = _mm_add_ps(vbias, _mm_mul_ps(vi0x4567, vk01));
138       __m128 vo1p0 = _mm_add_ps(vbias, _mm_mul_ps(vi1x4567, vk01));
139       __m128 vo2p0 = _mm_add_ps(vbias, _mm_mul_ps(vi2x4567, vk01));
140       __m128 vo3p0 = _mm_add_ps(vbias, _mm_mul_ps(vi3x4567, vk01));
141       __m128 vo4p0 = _mm_add_ps(vbias, _mm_mul_ps(vi4x4567, vk01));
142       __m128 vo5p0 = _mm_add_ps(vbias, _mm_mul_ps(vi5x4567, vk01));
143       vo0p0 = _mm_add_ps(vo0p0, _mm_mul_ps(vi1x4567, vk11));
144       vo1p0 = _mm_add_ps(vo1p0, _mm_mul_ps(vi2x4567, vk11));
145       vo2p0 = _mm_add_ps(vo2p0, _mm_mul_ps(vi3x4567, vk11));
146       vo3p0 = _mm_add_ps(vo3p0, _mm_mul_ps(vi4x4567, vk11));
147       vo4p0 = _mm_add_ps(vo4p0, _mm_mul_ps(vi5x4567, vk11));
148       vo5p0 = _mm_add_ps(vo5p0, _mm_mul_ps(vi6x4567, vk11));
149       vo0p0 = _mm_add_ps(vo0p0, _mm_mul_ps(vi2x4567, vk21));
150       vo1p0 = _mm_add_ps(vo1p0, _mm_mul_ps(vi3x4567, vk21));
151       vo2p0 = _mm_add_ps(vo2p0, _mm_mul_ps(vi4x4567, vk21));
152       vo3p0 = _mm_add_ps(vo3p0, _mm_mul_ps(vi5x4567, vk21));
153       vo4p0 = _mm_add_ps(vo4p0, _mm_mul_ps(vi6x4567, vk21));
154       vo5p0 = _mm_add_ps(vo5p0, _mm_mul_ps(vi7x4567, vk21));
155 
156       const __m128 vi0x3456 = _mm_castsi128_ps(_mm_alignr_epi8(_mm_castps_si128(vi0x4567), _mm_castps_si128(vi0x0123), 12));
157       const __m128 vi1x3456 = _mm_castsi128_ps(_mm_alignr_epi8(_mm_castps_si128(vi1x4567), _mm_castps_si128(vi1x0123), 12));
158       const __m128 vi2x3456 = _mm_castsi128_ps(_mm_alignr_epi8(_mm_castps_si128(vi2x4567), _mm_castps_si128(vi2x0123), 12));
159       const __m128 vi3x3456 = _mm_castsi128_ps(_mm_alignr_epi8(_mm_castps_si128(vi3x4567), _mm_castps_si128(vi3x0123), 12));
160       const __m128 vi4x3456 = _mm_castsi128_ps(_mm_alignr_epi8(_mm_castps_si128(vi4x4567), _mm_castps_si128(vi4x0123), 12));
161       const __m128 vi5x3456 = _mm_castsi128_ps(_mm_alignr_epi8(_mm_castps_si128(vi5x4567), _mm_castps_si128(vi5x0123), 12));
162       const __m128 vi6x3456 = _mm_castsi128_ps(_mm_alignr_epi8(_mm_castps_si128(vi6x4567), _mm_castps_si128(vi6x0123), 12));
163       const __m128 vi7x3456 = _mm_castsi128_ps(_mm_alignr_epi8(_mm_castps_si128(vi7x4567), _mm_castps_si128(vi7x0123), 12));
164 
165       vo0p0 = _mm_add_ps(vo0p0, _mm_mul_ps(vi0x3456, vk00));
166       vo1p0 = _mm_add_ps(vo1p0, _mm_mul_ps(vi1x3456, vk00));
167       vo2p0 = _mm_add_ps(vo2p0, _mm_mul_ps(vi2x3456, vk00));
168       vo3p0 = _mm_add_ps(vo3p0, _mm_mul_ps(vi3x3456, vk00));
169       vo4p0 = _mm_add_ps(vo4p0, _mm_mul_ps(vi4x3456, vk00));
170       vo5p0 = _mm_add_ps(vo5p0, _mm_mul_ps(vi5x3456, vk00));
171       vo0p0 = _mm_add_ps(vo0p0, _mm_mul_ps(vi1x3456, vk10));
172       vo1p0 = _mm_add_ps(vo1p0, _mm_mul_ps(vi2x3456, vk10));
173       vo2p0 = _mm_add_ps(vo2p0, _mm_mul_ps(vi3x3456, vk10));
174       vo3p0 = _mm_add_ps(vo3p0, _mm_mul_ps(vi4x3456, vk10));
175       vo4p0 = _mm_add_ps(vo4p0, _mm_mul_ps(vi5x3456, vk10));
176       vo5p0 = _mm_add_ps(vo5p0, _mm_mul_ps(vi6x3456, vk10));
177       vo0p0 = _mm_add_ps(vo0p0, _mm_mul_ps(vi2x3456, vk20));
178       vo1p0 = _mm_add_ps(vo1p0, _mm_mul_ps(vi3x3456, vk20));
179       vo2p0 = _mm_add_ps(vo2p0, _mm_mul_ps(vi4x3456, vk20));
180       vo3p0 = _mm_add_ps(vo3p0, _mm_mul_ps(vi5x3456, vk20));
181       vo4p0 = _mm_add_ps(vo4p0, _mm_mul_ps(vi6x3456, vk20));
182       vo5p0 = _mm_add_ps(vo5p0, _mm_mul_ps(vi7x3456, vk20));
183 
184       vi0x0123 = vi0x4567;
185       vi1x0123 = vi1x4567;
186       vi2x0123 = vi2x4567;
187       vi3x0123 = vi3x4567;
188       vi4x0123 = vi4x4567;
189       vi5x0123 = vi5x4567;
190       vi6x0123 = vi6x4567;
191       vi7x0123 = vi7x4567;
192 
193       const __m128 vi0x5678 = _mm_castsi128_ps(_mm_alignr_epi8(_mm_castps_si128(vi0x89AB), _mm_castps_si128(vi0x4567), 4));
194       const __m128 vi1x5678 = _mm_castsi128_ps(_mm_alignr_epi8(_mm_castps_si128(vi1x89AB), _mm_castps_si128(vi1x4567), 4));
195       const __m128 vi2x5678 = _mm_castsi128_ps(_mm_alignr_epi8(_mm_castps_si128(vi2x89AB), _mm_castps_si128(vi2x4567), 4));
196       const __m128 vi3x5678 = _mm_castsi128_ps(_mm_alignr_epi8(_mm_castps_si128(vi3x89AB), _mm_castps_si128(vi3x4567), 4));
197       const __m128 vi4x5678 = _mm_castsi128_ps(_mm_alignr_epi8(_mm_castps_si128(vi4x89AB), _mm_castps_si128(vi4x4567), 4));
198       const __m128 vi5x5678 = _mm_castsi128_ps(_mm_alignr_epi8(_mm_castps_si128(vi5x89AB), _mm_castps_si128(vi5x4567), 4));
199       const __m128 vi6x5678 = _mm_castsi128_ps(_mm_alignr_epi8(_mm_castps_si128(vi6x89AB), _mm_castps_si128(vi6x4567), 4));
200       const __m128 vi7x5678 = _mm_castsi128_ps(_mm_alignr_epi8(_mm_castps_si128(vi7x89AB), _mm_castps_si128(vi7x4567), 4));
201 
202       vo0p0 = _mm_add_ps(vo0p0, _mm_mul_ps(vi0x5678, vk02));
203       vo1p0 = _mm_add_ps(vo1p0, _mm_mul_ps(vi1x5678, vk02));
204       vo2p0 = _mm_add_ps(vo2p0, _mm_mul_ps(vi2x5678, vk02));
205       vo3p0 = _mm_add_ps(vo3p0, _mm_mul_ps(vi3x5678, vk02));
206       vo4p0 = _mm_add_ps(vo4p0, _mm_mul_ps(vi4x5678, vk02));
207       vo5p0 = _mm_add_ps(vo5p0, _mm_mul_ps(vi5x5678, vk02));
208       vo0p0 = _mm_add_ps(vo0p0, _mm_mul_ps(vi1x5678, vk12));
209       vo1p0 = _mm_add_ps(vo1p0, _mm_mul_ps(vi2x5678, vk12));
210       vo2p0 = _mm_add_ps(vo2p0, _mm_mul_ps(vi3x5678, vk12));
211       vo3p0 = _mm_add_ps(vo3p0, _mm_mul_ps(vi4x5678, vk12));
212       vo4p0 = _mm_add_ps(vo4p0, _mm_mul_ps(vi5x5678, vk12));
213       vo5p0 = _mm_add_ps(vo5p0, _mm_mul_ps(vi6x5678, vk12));
214       vo0p0 = _mm_add_ps(vo0p0, _mm_mul_ps(vi2x5678, vk22));
215       vo1p0 = _mm_add_ps(vo1p0, _mm_mul_ps(vi3x5678, vk22));
216       vo2p0 = _mm_add_ps(vo2p0, _mm_mul_ps(vi4x5678, vk22));
217       vo3p0 = _mm_add_ps(vo3p0, _mm_mul_ps(vi5x5678, vk22));
218       vo4p0 = _mm_add_ps(vo4p0, _mm_mul_ps(vi6x5678, vk22));
219       vo5p0 = _mm_add_ps(vo5p0, _mm_mul_ps(vi7x5678, vk22));
220 
221       vi0x4567 = vi0x89AB;
222       vi1x4567 = vi1x89AB;
223       vi2x4567 = vi2x89AB;
224       vi3x4567 = vi3x89AB;
225       vi4x4567 = vi4x89AB;
226       vi5x4567 = vi5x89AB;
227       vi6x4567 = vi6x89AB;
228       vi7x4567 = vi7x89AB;
229 
230 
231       __m128 vo0 = _mm_max_ps(vo0p0, vmin);
232       __m128 vo1 = _mm_max_ps(vo1p0, vmin);
233       __m128 vo2 = _mm_max_ps(vo2p0, vmin);
234       __m128 vo3 = _mm_max_ps(vo3p0, vmin);
235       __m128 vo4 = _mm_max_ps(vo4p0, vmin);
236       __m128 vo5 = _mm_max_ps(vo5p0, vmin);
237 
238       vo0 = _mm_min_ps(vo0, vmax);
239       vo1 = _mm_min_ps(vo1, vmax);
240       vo2 = _mm_min_ps(vo2, vmax);
241       vo3 = _mm_min_ps(vo3, vmax);
242       vo4 = _mm_min_ps(vo4, vmax);
243       vo5 = _mm_min_ps(vo5, vmax);
244 
245       _mm_storeu_ps(o5, vo5);
246       o5 += 4;
247       _mm_storeu_ps(o4, vo4);
248       o4 += 4;
249       _mm_storeu_ps(o3, vo3);
250       o3 += 4;
251       _mm_storeu_ps(o2, vo2);
252       o2 += 4;
253       _mm_storeu_ps(o1, vo1);
254       o1 += 4;
255       _mm_storeu_ps(o0, vo0);
256       o0 += 4;
257     }
258     // Always process the last block of 1..4 pixels.
259     assert(w >= 1 * sizeof(float));
260     assert(w <= 4 * sizeof(float));
261     {
262       vi0x4567 = _mm_and_ps(vmask, vi0x4567);
263       vi1x4567 = _mm_and_ps(vmask, vi1x4567);
264       vi2x4567 = _mm_and_ps(vmask, vi2x4567);
265       vi3x4567 = _mm_and_ps(vmask, vi3x4567);
266       vi4x4567 = _mm_and_ps(vmask, vi4x4567);
267       vi5x4567 = _mm_and_ps(vmask, vi5x4567);
268       vi6x4567 = _mm_and_ps(vmask, vi6x4567);
269       vi7x4567 = _mm_and_ps(vmask, vi7x4567);
270 
271       __m128 vo0p0 = _mm_add_ps(vbias, _mm_mul_ps(vi0x4567, vk01));
272       __m128 vo1p0 = _mm_add_ps(vbias, _mm_mul_ps(vi1x4567, vk01));
273       __m128 vo2p0 = _mm_add_ps(vbias, _mm_mul_ps(vi2x4567, vk01));
274       __m128 vo3p0 = _mm_add_ps(vbias, _mm_mul_ps(vi3x4567, vk01));
275       __m128 vo4p0 = _mm_add_ps(vbias, _mm_mul_ps(vi4x4567, vk01));
276       __m128 vo5p0 = _mm_add_ps(vbias, _mm_mul_ps(vi5x4567, vk01));
277       vo0p0 = _mm_add_ps(vo0p0, _mm_mul_ps(vi1x4567, vk11));
278       vo1p0 = _mm_add_ps(vo1p0, _mm_mul_ps(vi2x4567, vk11));
279       vo2p0 = _mm_add_ps(vo2p0, _mm_mul_ps(vi3x4567, vk11));
280       vo3p0 = _mm_add_ps(vo3p0, _mm_mul_ps(vi4x4567, vk11));
281       vo4p0 = _mm_add_ps(vo4p0, _mm_mul_ps(vi5x4567, vk11));
282       vo5p0 = _mm_add_ps(vo5p0, _mm_mul_ps(vi6x4567, vk11));
283       vo0p0 = _mm_add_ps(vo0p0, _mm_mul_ps(vi2x4567, vk21));
284       vo1p0 = _mm_add_ps(vo1p0, _mm_mul_ps(vi3x4567, vk21));
285       vo2p0 = _mm_add_ps(vo2p0, _mm_mul_ps(vi4x4567, vk21));
286       vo3p0 = _mm_add_ps(vo3p0, _mm_mul_ps(vi5x4567, vk21));
287       vo4p0 = _mm_add_ps(vo4p0, _mm_mul_ps(vi6x4567, vk21));
288       vo5p0 = _mm_add_ps(vo5p0, _mm_mul_ps(vi7x4567, vk21));
289 
290       const __m128 vi0x3456 = _mm_castsi128_ps(_mm_alignr_epi8(_mm_castps_si128(vi0x4567), _mm_castps_si128(vi0x0123), 12));
291       const __m128 vi1x3456 = _mm_castsi128_ps(_mm_alignr_epi8(_mm_castps_si128(vi1x4567), _mm_castps_si128(vi1x0123), 12));
292       const __m128 vi2x3456 = _mm_castsi128_ps(_mm_alignr_epi8(_mm_castps_si128(vi2x4567), _mm_castps_si128(vi2x0123), 12));
293       const __m128 vi3x3456 = _mm_castsi128_ps(_mm_alignr_epi8(_mm_castps_si128(vi3x4567), _mm_castps_si128(vi3x0123), 12));
294       const __m128 vi4x3456 = _mm_castsi128_ps(_mm_alignr_epi8(_mm_castps_si128(vi4x4567), _mm_castps_si128(vi4x0123), 12));
295       const __m128 vi5x3456 = _mm_castsi128_ps(_mm_alignr_epi8(_mm_castps_si128(vi5x4567), _mm_castps_si128(vi5x0123), 12));
296       const __m128 vi6x3456 = _mm_castsi128_ps(_mm_alignr_epi8(_mm_castps_si128(vi6x4567), _mm_castps_si128(vi6x0123), 12));
297       const __m128 vi7x3456 = _mm_castsi128_ps(_mm_alignr_epi8(_mm_castps_si128(vi7x4567), _mm_castps_si128(vi7x0123), 12));
298 
299       vo0p0 = _mm_add_ps(vo0p0, _mm_mul_ps(vi0x3456, vk00));
300       vo1p0 = _mm_add_ps(vo1p0, _mm_mul_ps(vi1x3456, vk00));
301       vo2p0 = _mm_add_ps(vo2p0, _mm_mul_ps(vi2x3456, vk00));
302       vo3p0 = _mm_add_ps(vo3p0, _mm_mul_ps(vi3x3456, vk00));
303       vo4p0 = _mm_add_ps(vo4p0, _mm_mul_ps(vi4x3456, vk00));
304       vo5p0 = _mm_add_ps(vo5p0, _mm_mul_ps(vi5x3456, vk00));
305       vo0p0 = _mm_add_ps(vo0p0, _mm_mul_ps(vi1x3456, vk10));
306       vo1p0 = _mm_add_ps(vo1p0, _mm_mul_ps(vi2x3456, vk10));
307       vo2p0 = _mm_add_ps(vo2p0, _mm_mul_ps(vi3x3456, vk10));
308       vo3p0 = _mm_add_ps(vo3p0, _mm_mul_ps(vi4x3456, vk10));
309       vo4p0 = _mm_add_ps(vo4p0, _mm_mul_ps(vi5x3456, vk10));
310       vo5p0 = _mm_add_ps(vo5p0, _mm_mul_ps(vi6x3456, vk10));
311       vo0p0 = _mm_add_ps(vo0p0, _mm_mul_ps(vi2x3456, vk20));
312       vo1p0 = _mm_add_ps(vo1p0, _mm_mul_ps(vi3x3456, vk20));
313       vo2p0 = _mm_add_ps(vo2p0, _mm_mul_ps(vi4x3456, vk20));
314       vo3p0 = _mm_add_ps(vo3p0, _mm_mul_ps(vi5x3456, vk20));
315       vo4p0 = _mm_add_ps(vo4p0, _mm_mul_ps(vi6x3456, vk20));
316       vo5p0 = _mm_add_ps(vo5p0, _mm_mul_ps(vi7x3456, vk20));
317 
318       const __m128i vzero = _mm_setzero_si128();
319       const __m128 vi0x5678 = _mm_castsi128_ps(_mm_alignr_epi8(vzero, _mm_castps_si128(vi0x4567), 4));
320       const __m128 vi1x5678 = _mm_castsi128_ps(_mm_alignr_epi8(vzero, _mm_castps_si128(vi1x4567), 4));
321       const __m128 vi2x5678 = _mm_castsi128_ps(_mm_alignr_epi8(vzero, _mm_castps_si128(vi2x4567), 4));
322       const __m128 vi3x5678 = _mm_castsi128_ps(_mm_alignr_epi8(vzero, _mm_castps_si128(vi3x4567), 4));
323       const __m128 vi4x5678 = _mm_castsi128_ps(_mm_alignr_epi8(vzero, _mm_castps_si128(vi4x4567), 4));
324       const __m128 vi5x5678 = _mm_castsi128_ps(_mm_alignr_epi8(vzero, _mm_castps_si128(vi5x4567), 4));
325       const __m128 vi6x5678 = _mm_castsi128_ps(_mm_alignr_epi8(vzero, _mm_castps_si128(vi6x4567), 4));
326       const __m128 vi7x5678 = _mm_castsi128_ps(_mm_alignr_epi8(vzero, _mm_castps_si128(vi7x4567), 4));
327 
328       vo0p0 = _mm_add_ps(vo0p0, _mm_mul_ps(vi0x5678, vk02));
329       vo1p0 = _mm_add_ps(vo1p0, _mm_mul_ps(vi1x5678, vk02));
330       vo2p0 = _mm_add_ps(vo2p0, _mm_mul_ps(vi2x5678, vk02));
331       vo3p0 = _mm_add_ps(vo3p0, _mm_mul_ps(vi3x5678, vk02));
332       vo4p0 = _mm_add_ps(vo4p0, _mm_mul_ps(vi4x5678, vk02));
333       vo5p0 = _mm_add_ps(vo5p0, _mm_mul_ps(vi5x5678, vk02));
334       vo0p0 = _mm_add_ps(vo0p0, _mm_mul_ps(vi1x5678, vk12));
335       vo1p0 = _mm_add_ps(vo1p0, _mm_mul_ps(vi2x5678, vk12));
336       vo2p0 = _mm_add_ps(vo2p0, _mm_mul_ps(vi3x5678, vk12));
337       vo3p0 = _mm_add_ps(vo3p0, _mm_mul_ps(vi4x5678, vk12));
338       vo4p0 = _mm_add_ps(vo4p0, _mm_mul_ps(vi5x5678, vk12));
339       vo5p0 = _mm_add_ps(vo5p0, _mm_mul_ps(vi6x5678, vk12));
340       vo0p0 = _mm_add_ps(vo0p0, _mm_mul_ps(vi2x5678, vk22));
341       vo1p0 = _mm_add_ps(vo1p0, _mm_mul_ps(vi3x5678, vk22));
342       vo2p0 = _mm_add_ps(vo2p0, _mm_mul_ps(vi4x5678, vk22));
343       vo3p0 = _mm_add_ps(vo3p0, _mm_mul_ps(vi5x5678, vk22));
344       vo4p0 = _mm_add_ps(vo4p0, _mm_mul_ps(vi6x5678, vk22));
345       vo5p0 = _mm_add_ps(vo5p0, _mm_mul_ps(vi7x5678, vk22));
346 
347 
348       __m128 vo0 = _mm_max_ps(vo0p0, vmin);
349       __m128 vo1 = _mm_max_ps(vo1p0, vmin);
350       __m128 vo2 = _mm_max_ps(vo2p0, vmin);
351       __m128 vo3 = _mm_max_ps(vo3p0, vmin);
352       __m128 vo4 = _mm_max_ps(vo4p0, vmin);
353       __m128 vo5 = _mm_max_ps(vo5p0, vmin);
354 
355       vo0 = _mm_min_ps(vo0, vmax);
356       vo1 = _mm_min_ps(vo1, vmax);
357       vo2 = _mm_min_ps(vo2, vmax);
358       vo3 = _mm_min_ps(vo3, vmax);
359       vo4 = _mm_min_ps(vo4, vmax);
360       vo5 = _mm_min_ps(vo5, vmax);
361 
362       if XNN_LIKELY(w == 4 * sizeof(float)) {
363         _mm_storeu_ps(o5, vo5);
364         o5 += 4;
365         _mm_storeu_ps(o4, vo4);
366         o4 += 4;
367         _mm_storeu_ps(o3, vo3);
368         o3 += 4;
369         _mm_storeu_ps(o2, vo2);
370         o2 += 4;
371         _mm_storeu_ps(o1, vo1);
372         o1 += 4;
373         _mm_storeu_ps(o0, vo0);
374         o0 += 4;
375       } else {
376         if (w & (2 * sizeof(float))) {
377           _mm_storel_pi((__m64*) o5, vo5);
378           o5 += 2;
379           _mm_storel_pi((__m64*) o4, vo4);
380           o4 += 2;
381           _mm_storel_pi((__m64*) o3, vo3);
382           o3 += 2;
383           _mm_storel_pi((__m64*) o2, vo2);
384           o2 += 2;
385           _mm_storel_pi((__m64*) o1, vo1);
386           o1 += 2;
387           _mm_storel_pi((__m64*) o0, vo0);
388           o0 += 2;
389 
390           vo0 = _mm_movehl_ps(vo0, vo0);
391           vo1 = _mm_movehl_ps(vo1, vo1);
392           vo2 = _mm_movehl_ps(vo2, vo2);
393           vo3 = _mm_movehl_ps(vo3, vo3);
394           vo4 = _mm_movehl_ps(vo4, vo4);
395           vo5 = _mm_movehl_ps(vo5, vo5);
396         }
397         if (w & (1 * sizeof(float))) {
398           _mm_store_ss(o5, vo5);
399           o5 += 1;
400           _mm_store_ss(o4, vo4);
401           o4 += 1;
402           _mm_store_ss(o3, vo3);
403           o3 += 1;
404           _mm_store_ss(o2, vo2);
405           o2 += 1;
406           _mm_store_ss(o1, vo1);
407           o1 += 1;
408           _mm_store_ss(o0, vo0);
409           o0 += 1;
410         }
411       }
412     }
413 
414     i0 = (const float*) ((uintptr_t) i6 - input_decrement);
415     i1 = (const float*) ((uintptr_t) i7 - input_decrement);
416     i2 = (const float*) ((uintptr_t) i1 + input_width);
417     i3 = (const float*) ((uintptr_t) i2 + input_width);
418     i4 = (const float*) ((uintptr_t) i3 + input_width);
419     i5 = (const float*) ((uintptr_t) i4 + input_width);
420     i6 = (const float*) ((uintptr_t) i5 + input_width);
421     i7 = (const float*) ((uintptr_t) i6 + input_width);
422 
423     o0 = o5;
424     o1 = (float*) ((uintptr_t) o0 + input_width);
425     o2 = (float*) ((uintptr_t) o1 + input_width);
426     o3 = (float*) ((uintptr_t) o2 + input_width);
427     o4 = (float*) ((uintptr_t) o3 + input_width);
428     o5 = (float*) ((uintptr_t) o4 + input_width);
429 
430     output_height = doz(output_height, 6);
431   } while (output_height != 0);
432 }
433