xref: /aosp_15_r20/external/XNNPACK/src/f16-igemm/gen/7x8-minmax-avx2-broadcast.c (revision 4bdc94577ba0e567308109d787f7fec7b531ce36)
1 // Auto-generated file. Do not edit!
2 //   Template: src/f16-igemm/avx2-broadcast.c.in
3 //   Generator: tools/xngen
4 //
5 // Copyright 2022 Google LLC
6 //
7 // This source code is licensed under the BSD-style license found in the
8 // LICENSE file in the root directory of this source tree.
9 
10 #include <assert.h>
11 
12 #include <immintrin.h>
13 
14 #include <xnnpack/igemm.h>
15 #include <xnnpack/intrinsics-polyfill.h>
16 
17 
xnn_f16_igemm_minmax_ukernel_7x8__avx2_broadcast(size_t mr,size_t nc,size_t kc,size_t ks,const void ** restrict a,const void * restrict w,void * restrict c,size_t cm_stride,size_t cn_stride,size_t a_offset,const void * zero,const union xnn_f16_minmax_params params[restrict XNN_MIN_ELEMENTS (1)])18 void xnn_f16_igemm_minmax_ukernel_7x8__avx2_broadcast(
19     size_t mr,
20     size_t nc,
21     size_t kc,
22     size_t ks,
23     const void**restrict a,
24     const void*restrict w,
25     void*restrict c,
26     size_t cm_stride,
27     size_t cn_stride,
28     size_t a_offset,
29     const void* zero,
30     const union xnn_f16_minmax_params params[restrict XNN_MIN_ELEMENTS(1)])
31 {
32   assert(mr != 0);
33   assert(mr <= 7);
34   assert(nc != 0);
35   assert(kc != 0);
36   assert(kc % sizeof(uint16_t) == 0);
37   assert(ks != 0);
38   assert(ks % (7 * sizeof(void*)) == 0);
39   assert(a_offset % sizeof(uint16_t) == 0);
40   assert(a != NULL);
41   assert(w != NULL);
42   assert(c != NULL);
43 
44   uint16_t* c0 = c;
45   uint16_t* c1 = (uint16_t*) ((uintptr_t) c0 + cm_stride);
46   if XNN_UNPREDICTABLE(mr < 2) {
47     c1 = c0;
48   }
49   uint16_t* c2 = (uint16_t*) ((uintptr_t) c1 + cm_stride);
50   if XNN_UNPREDICTABLE(mr <= 2) {
51     c2 = c1;
52   }
53   uint16_t* c3 = (uint16_t*) ((uintptr_t) c2 + cm_stride);
54   if XNN_UNPREDICTABLE(mr < 4) {
55     c3 = c2;
56   }
57   uint16_t* c4 = (uint16_t*) ((uintptr_t) c3 + cm_stride);
58   if XNN_UNPREDICTABLE(mr <= 4) {
59     c4 = c3;
60   }
61   uint16_t* c5 = (uint16_t*) ((uintptr_t) c4 + cm_stride);
62   if XNN_UNPREDICTABLE(mr < 6) {
63     c5 = c4;
64   }
65   uint16_t* c6 = (uint16_t*) ((uintptr_t) c5 + cm_stride);
66   if XNN_UNPREDICTABLE(mr <= 6) {
67     c6 = c5;
68   }
69 
70   do {
71     __m256 vacc0x01234567 = _mm256_cvtph_ps(_mm_load_si128((const __m128i*) w));
72     __m256 vacc1x01234567 = vacc0x01234567;
73     __m256 vacc2x01234567 = vacc0x01234567;
74     __m256 vacc3x01234567 = vacc0x01234567;
75     __m256 vacc4x01234567 = vacc0x01234567;
76     __m256 vacc5x01234567 = vacc0x01234567;
77     __m256 vacc6x01234567 = vacc0x01234567;
78     w = (const uint16_t*) w + 8;
79 
80     size_t p = ks;
81     do {
82       const uint16_t* restrict a0 = (const uint16_t*) a[0];
83       assert(a0 != NULL);
84       if XNN_UNPREDICTABLE(a0 != zero) {
85         a0 = (const uint16_t*) ((uintptr_t) a0 + a_offset);
86       }
87       const uint16_t* restrict a1 = (const uint16_t*) a[1];
88       assert(a1 != NULL);
89       if XNN_UNPREDICTABLE(a1 != zero) {
90         a1 = (const uint16_t*) ((uintptr_t) a1 + a_offset);
91       }
92       const uint16_t* restrict a2 = (const uint16_t*) a[2];
93       assert(a2 != NULL);
94       if XNN_UNPREDICTABLE(a2 != zero) {
95         a2 = (const uint16_t*) ((uintptr_t) a2 + a_offset);
96       }
97       const uint16_t* restrict a3 = (const uint16_t*) a[3];
98       assert(a3 != NULL);
99       if XNN_UNPREDICTABLE(a3 != zero) {
100         a3 = (const uint16_t*) ((uintptr_t) a3 + a_offset);
101       }
102       const uint16_t* restrict a4 = (const uint16_t*) a[4];
103       assert(a4 != NULL);
104       if XNN_UNPREDICTABLE(a4 != zero) {
105         a4 = (const uint16_t*) ((uintptr_t) a4 + a_offset);
106       }
107       const uint16_t* restrict a5 = (const uint16_t*) a[5];
108       assert(a5 != NULL);
109       if XNN_UNPREDICTABLE(a5 != zero) {
110         a5 = (const uint16_t*) ((uintptr_t) a5 + a_offset);
111       }
112       const uint16_t* restrict a6 = (const uint16_t*) a[6];
113       assert(a6 != NULL);
114       if XNN_UNPREDICTABLE(a6 != zero) {
115         a6 = (const uint16_t*) ((uintptr_t) a6 + a_offset);
116       }
117       a += 7;
118 
119       size_t k = kc;
120       do {
121         const __m256 vb01234567 = _mm256_cvtph_ps(_mm_load_si128((const __m128i*) w));
122         w = (const uint16_t*) w + 8;
123 
124         const __m256 va0 = _mm256_cvtph_ps(_mm_set1_epi16((short) *a0));
125         a0 += 1;
126         const __m256 va1 = _mm256_cvtph_ps(_mm_set1_epi16((short) *a1));
127         a1 += 1;
128         const __m256 va2 = _mm256_cvtph_ps(_mm_set1_epi16((short) *a2));
129         a2 += 1;
130         const __m256 va3 = _mm256_cvtph_ps(_mm_set1_epi16((short) *a3));
131         a3 += 1;
132         const __m256 va4 = _mm256_cvtph_ps(_mm_set1_epi16((short) *a4));
133         a4 += 1;
134         const __m256 va5 = _mm256_cvtph_ps(_mm_set1_epi16((short) *a5));
135         a5 += 1;
136         const __m256 va6 = _mm256_cvtph_ps(_mm_set1_epi16((short) *a6));
137         a6 += 1;
138 
139         vacc0x01234567 = _mm256_cvtph_ps(_mm256_cvtps_ph(_mm256_fmadd_ps(va0, vb01234567, vacc0x01234567), _MM_FROUND_NO_EXC));
140         vacc1x01234567 = _mm256_cvtph_ps(_mm256_cvtps_ph(_mm256_fmadd_ps(va1, vb01234567, vacc1x01234567), _MM_FROUND_NO_EXC));
141         vacc2x01234567 = _mm256_cvtph_ps(_mm256_cvtps_ph(_mm256_fmadd_ps(va2, vb01234567, vacc2x01234567), _MM_FROUND_NO_EXC));
142         vacc3x01234567 = _mm256_cvtph_ps(_mm256_cvtps_ph(_mm256_fmadd_ps(va3, vb01234567, vacc3x01234567), _MM_FROUND_NO_EXC));
143         vacc4x01234567 = _mm256_cvtph_ps(_mm256_cvtps_ph(_mm256_fmadd_ps(va4, vb01234567, vacc4x01234567), _MM_FROUND_NO_EXC));
144         vacc5x01234567 = _mm256_cvtph_ps(_mm256_cvtps_ph(_mm256_fmadd_ps(va5, vb01234567, vacc5x01234567), _MM_FROUND_NO_EXC));
145         vacc6x01234567 = _mm256_cvtph_ps(_mm256_cvtps_ph(_mm256_fmadd_ps(va6, vb01234567, vacc6x01234567), _MM_FROUND_NO_EXC));
146 
147         k -= sizeof(uint16_t);
148       } while (k != 0);
149       p -= 7 * sizeof(void*);
150     } while (p != 0);
151 
152     const __m256 vmin = _mm256_load_ps(params->avx.min);
153     vacc0x01234567 = _mm256_max_ps(vacc0x01234567, vmin);
154     vacc1x01234567 = _mm256_max_ps(vacc1x01234567, vmin);
155     vacc2x01234567 = _mm256_max_ps(vacc2x01234567, vmin);
156     vacc3x01234567 = _mm256_max_ps(vacc3x01234567, vmin);
157     vacc4x01234567 = _mm256_max_ps(vacc4x01234567, vmin);
158     vacc5x01234567 = _mm256_max_ps(vacc5x01234567, vmin);
159     vacc6x01234567 = _mm256_max_ps(vacc6x01234567, vmin);
160 
161     const __m256 vmax = _mm256_load_ps(params->avx.max);
162     vacc0x01234567 = _mm256_min_ps(vacc0x01234567, vmax);
163     vacc1x01234567 = _mm256_min_ps(vacc1x01234567, vmax);
164     vacc2x01234567 = _mm256_min_ps(vacc2x01234567, vmax);
165     vacc3x01234567 = _mm256_min_ps(vacc3x01234567, vmax);
166     vacc4x01234567 = _mm256_min_ps(vacc4x01234567, vmax);
167     vacc5x01234567 = _mm256_min_ps(vacc5x01234567, vmax);
168     vacc6x01234567 = _mm256_min_ps(vacc6x01234567, vmax);
169 
170     if XNN_LIKELY(nc >= 8) {
171       _mm_storeu_si128((__m128i*) c6, _mm256_cvtps_ph(vacc6x01234567, _MM_FROUND_NO_EXC));
172       c6 = (uint16_t*) ((uintptr_t) c6 + cn_stride);
173       _mm_storeu_si128((__m128i*) c5, _mm256_cvtps_ph(vacc5x01234567, _MM_FROUND_NO_EXC));
174       c5 = (uint16_t*) ((uintptr_t) c5 + cn_stride);
175       _mm_storeu_si128((__m128i*) c4, _mm256_cvtps_ph(vacc4x01234567, _MM_FROUND_NO_EXC));
176       c4 = (uint16_t*) ((uintptr_t) c4 + cn_stride);
177       _mm_storeu_si128((__m128i*) c3, _mm256_cvtps_ph(vacc3x01234567, _MM_FROUND_NO_EXC));
178       c3 = (uint16_t*) ((uintptr_t) c3 + cn_stride);
179       _mm_storeu_si128((__m128i*) c2, _mm256_cvtps_ph(vacc2x01234567, _MM_FROUND_NO_EXC));
180       c2 = (uint16_t*) ((uintptr_t) c2 + cn_stride);
181       _mm_storeu_si128((__m128i*) c1, _mm256_cvtps_ph(vacc1x01234567, _MM_FROUND_NO_EXC));
182       c1 = (uint16_t*) ((uintptr_t) c1 + cn_stride);
183       _mm_storeu_si128((__m128i*) c0, _mm256_cvtps_ph(vacc0x01234567, _MM_FROUND_NO_EXC));
184       c0 = (uint16_t*) ((uintptr_t) c0 + cn_stride);
185 
186       a = (const void**restrict) ((uintptr_t) a - ks);
187       nc -= 8;
188     } else {
189       __m128i vh6x01234567 = _mm256_cvtps_ph(vacc6x01234567, _MM_FROUND_NO_EXC);
190       __m128i vh5x01234567 = _mm256_cvtps_ph(vacc5x01234567, _MM_FROUND_NO_EXC);
191       __m128i vh4x01234567 = _mm256_cvtps_ph(vacc4x01234567, _MM_FROUND_NO_EXC);
192       __m128i vh3x01234567 = _mm256_cvtps_ph(vacc3x01234567, _MM_FROUND_NO_EXC);
193       __m128i vh2x01234567 = _mm256_cvtps_ph(vacc2x01234567, _MM_FROUND_NO_EXC);
194       __m128i vh1x01234567 = _mm256_cvtps_ph(vacc1x01234567, _MM_FROUND_NO_EXC);
195       __m128i vh0x01234567 = _mm256_cvtps_ph(vacc0x01234567, _MM_FROUND_NO_EXC);
196       if (nc & 4) {
197         _mm_storel_epi64((__m128i*) c6, vh6x01234567);
198         _mm_storel_epi64((__m128i*) c5, vh5x01234567);
199         _mm_storel_epi64((__m128i*) c4, vh4x01234567);
200         _mm_storel_epi64((__m128i*) c3, vh3x01234567);
201         _mm_storel_epi64((__m128i*) c2, vh2x01234567);
202         _mm_storel_epi64((__m128i*) c1, vh1x01234567);
203         _mm_storel_epi64((__m128i*) c0, vh0x01234567);
204 
205         vh6x01234567 = _mm_unpackhi_epi64(vh6x01234567, vh6x01234567);
206         vh5x01234567 = _mm_unpackhi_epi64(vh5x01234567, vh5x01234567);
207         vh4x01234567 = _mm_unpackhi_epi64(vh4x01234567, vh4x01234567);
208         vh3x01234567 = _mm_unpackhi_epi64(vh3x01234567, vh3x01234567);
209         vh2x01234567 = _mm_unpackhi_epi64(vh2x01234567, vh2x01234567);
210         vh1x01234567 = _mm_unpackhi_epi64(vh1x01234567, vh1x01234567);
211         vh0x01234567 = _mm_unpackhi_epi64(vh0x01234567, vh0x01234567);
212 
213         c6 += 4;
214         c5 += 4;
215         c4 += 4;
216         c3 += 4;
217         c2 += 4;
218         c1 += 4;
219         c0 += 4;
220       }
221       if (nc & 2) {
222         _mm_storeu_si32(c6, vh6x01234567);
223         _mm_storeu_si32(c5, vh5x01234567);
224         _mm_storeu_si32(c4, vh4x01234567);
225         _mm_storeu_si32(c3, vh3x01234567);
226         _mm_storeu_si32(c2, vh2x01234567);
227         _mm_storeu_si32(c1, vh1x01234567);
228         _mm_storeu_si32(c0, vh0x01234567);
229 
230         vh6x01234567 = _mm_srli_epi64(vh6x01234567, 32);
231         vh5x01234567 = _mm_srli_epi64(vh5x01234567, 32);
232         vh4x01234567 = _mm_srli_epi64(vh4x01234567, 32);
233         vh3x01234567 = _mm_srli_epi64(vh3x01234567, 32);
234         vh2x01234567 = _mm_srli_epi64(vh2x01234567, 32);
235         vh1x01234567 = _mm_srli_epi64(vh1x01234567, 32);
236         vh0x01234567 = _mm_srli_epi64(vh0x01234567, 32);
237 
238         c6 += 2;
239         c5 += 2;
240         c4 += 2;
241         c3 += 2;
242         c2 += 2;
243         c1 += 2;
244         c0 += 2;
245       }
246       if (nc & 1) {
247         *c6 = _mm_extract_epi16(vh6x01234567, 0);
248         *c5 = _mm_extract_epi16(vh5x01234567, 0);
249         *c4 = _mm_extract_epi16(vh4x01234567, 0);
250         *c3 = _mm_extract_epi16(vh3x01234567, 0);
251         *c2 = _mm_extract_epi16(vh2x01234567, 0);
252         *c1 = _mm_extract_epi16(vh1x01234567, 0);
253         *c0 = _mm_extract_epi16(vh0x01234567, 0);
254       }
255 
256       nc = 0;
257     }
258   } while (nc != 0);
259 }
260