xref: /aosp_15_r20/external/libaom/aom_dsp/x86/convolve_avx2.h (revision 77c1e3ccc04c968bd2bc212e87364f250e820521)
1 /*
2  * Copyright (c) 2018, Alliance for Open Media. All rights reserved.
3  *
4  * This source code is subject to the terms of the BSD 2 Clause License and
5  * the Alliance for Open Media Patent License 1.0. If the BSD 2 Clause License
6  * was not distributed with this source code in the LICENSE file, you can
7  * obtain it at www.aomedia.org/license/software. If the Alliance for Open
8  * Media Patent License 1.0 was not distributed with this source code in the
9  * PATENTS file, you can obtain it at www.aomedia.org/license/patent.
10  */
11 
12 #ifndef AOM_AOM_DSP_X86_CONVOLVE_AVX2_H_
13 #define AOM_AOM_DSP_X86_CONVOLVE_AVX2_H_
14 
15 #include <immintrin.h>
16 
17 #include "aom_ports/mem.h"
18 
19 #include "av1/common/convolve.h"
20 #include "av1/common/filter.h"
21 
22 // filters for 16
23 DECLARE_ALIGNED(32, static const uint8_t, filt_global_avx2[]) = {
24   0,  1,  1,  2,  2, 3,  3,  4,  4,  5,  5,  6,  6,  7,  7,  8,  0,  1,  1,
25   2,  2,  3,  3,  4, 4,  5,  5,  6,  6,  7,  7,  8,  2,  3,  3,  4,  4,  5,
26   5,  6,  6,  7,  7, 8,  8,  9,  9,  10, 2,  3,  3,  4,  4,  5,  5,  6,  6,
27   7,  7,  8,  8,  9, 9,  10, 4,  5,  5,  6,  6,  7,  7,  8,  8,  9,  9,  10,
28   10, 11, 11, 12, 4, 5,  5,  6,  6,  7,  7,  8,  8,  9,  9,  10, 10, 11, 11,
29   12, 6,  7,  7,  8, 8,  9,  9,  10, 10, 11, 11, 12, 12, 13, 13, 14, 6,  7,
30   7,  8,  8,  9,  9, 10, 10, 11, 11, 12, 12, 13, 13, 14
31 };
32 
33 DECLARE_ALIGNED(32, static const uint8_t, filt_d4_global_avx2[]) = {
34   0, 1, 2, 3,  1, 2, 3, 4, 2, 3, 4, 5, 3, 4, 5, 6, 0, 1, 2, 3,  1, 2,
35   3, 4, 2, 3,  4, 5, 3, 4, 5, 6, 4, 5, 6, 7, 5, 6, 7, 8, 6, 7,  8, 9,
36   7, 8, 9, 10, 4, 5, 6, 7, 5, 6, 7, 8, 6, 7, 8, 9, 7, 8, 9, 10,
37 };
38 
39 DECLARE_ALIGNED(32, static const uint8_t, filt4_d4_global_avx2[]) = {
40   2, 3, 4, 5, 3, 4, 5, 6, 4, 5, 6, 7, 5, 6, 7, 8,
41   2, 3, 4, 5, 3, 4, 5, 6, 4, 5, 6, 7, 5, 6, 7, 8,
42 };
43 
44 DECLARE_ALIGNED(32, static const uint8_t, filt_center_global_avx2[32]) = {
45   3, 255, 4, 255, 5, 255, 6, 255, 7, 255, 8, 255, 9, 255, 10, 255,
46   3, 255, 4, 255, 5, 255, 6, 255, 7, 255, 8, 255, 9, 255, 10, 255
47 };
48 
49 DECLARE_ALIGNED(32, static const uint8_t,
50                 filt1_global_avx2[32]) = { 0, 1, 1, 2, 2, 3, 3, 4, 4, 5, 5,
51                                            6, 6, 7, 7, 8, 0, 1, 1, 2, 2, 3,
52                                            3, 4, 4, 5, 5, 6, 6, 7, 7, 8 };
53 
54 DECLARE_ALIGNED(32, static const uint8_t,
55                 filt2_global_avx2[32]) = { 2, 3, 3, 4, 4,  5, 5, 6, 6, 7, 7,
56                                            8, 8, 9, 9, 10, 2, 3, 3, 4, 4, 5,
57                                            5, 6, 6, 7, 7,  8, 8, 9, 9, 10 };
58 
59 DECLARE_ALIGNED(32, static const uint8_t, filt3_global_avx2[32]) = {
60   4, 5, 5, 6, 6, 7, 7, 8, 8, 9, 9, 10, 10, 11, 11, 12,
61   4, 5, 5, 6, 6, 7, 7, 8, 8, 9, 9, 10, 10, 11, 11, 12
62 };
63 
64 DECLARE_ALIGNED(32, static const uint8_t, filt4_global_avx2[32]) = {
65   6, 7, 7, 8, 8, 9, 9, 10, 10, 11, 11, 12, 12, 13, 13, 14,
66   6, 7, 7, 8, 8, 9, 9, 10, 10, 11, 11, 12, 12, 13, 13, 14
67 };
68 
69 #define CONVOLVE_SR_HORIZONTAL_FILTER_4TAP                                     \
70   for (i = 0; i < (im_h - 2); i += 2) {                                        \
71     __m256i data = _mm256_castsi128_si256(                                     \
72         _mm_loadu_si128((__m128i *)&src_ptr[(i * src_stride) + j]));           \
73     data = _mm256_inserti128_si256(                                            \
74         data,                                                                  \
75         _mm_loadu_si128(                                                       \
76             (__m128i *)&src_ptr[(i * src_stride) + j + src_stride]),           \
77         1);                                                                    \
78     __m256i res = convolve_lowbd_x_4tap(data, coeffs_h + 1, filt);             \
79     res =                                                                      \
80         _mm256_sra_epi16(_mm256_add_epi16(res, round_const_h), round_shift_h); \
81     _mm256_store_si256((__m256i *)&im_block[i * im_stride], res);              \
82   }                                                                            \
83   __m256i data_1 = _mm256_castsi128_si256(                                     \
84       _mm_loadu_si128((__m128i *)&src_ptr[(i * src_stride) + j]));             \
85   __m256i res = convolve_lowbd_x_4tap(data_1, coeffs_h + 1, filt);             \
86   res = _mm256_sra_epi16(_mm256_add_epi16(res, round_const_h), round_shift_h); \
87   _mm256_store_si256((__m256i *)&im_block[i * im_stride], res);
88 
89 #define CONVOLVE_SR_VERTICAL_FILTER_4TAP                                      \
90   __m256i s[6];                                                               \
91   __m256i src_0 = _mm256_loadu_si256((__m256i *)(im_block + 0 * im_stride));  \
92   __m256i src_1 = _mm256_loadu_si256((__m256i *)(im_block + 1 * im_stride));  \
93   __m256i src_2 = _mm256_loadu_si256((__m256i *)(im_block + 2 * im_stride));  \
94   __m256i src_3 = _mm256_loadu_si256((__m256i *)(im_block + 3 * im_stride));  \
95                                                                               \
96   s[0] = _mm256_unpacklo_epi16(src_0, src_1);                                 \
97   s[1] = _mm256_unpacklo_epi16(src_2, src_3);                                 \
98   s[3] = _mm256_unpackhi_epi16(src_0, src_1);                                 \
99   s[4] = _mm256_unpackhi_epi16(src_2, src_3);                                 \
100                                                                               \
101   for (i = 0; i < h; i += 2) {                                                \
102     const int16_t *data = &im_block[i * im_stride];                           \
103     const __m256i s4 = _mm256_loadu_si256((__m256i *)(data + 4 * im_stride)); \
104     const __m256i s5 = _mm256_loadu_si256((__m256i *)(data + 5 * im_stride)); \
105     s[2] = _mm256_unpacklo_epi16(s4, s5);                                     \
106     s[5] = _mm256_unpackhi_epi16(s4, s5);                                     \
107                                                                               \
108     __m256i res_a = convolve_4tap(s, coeffs_v + 1);                           \
109     __m256i res_b = convolve_4tap(s + 3, coeffs_v + 1);                       \
110                                                                               \
111     res_a =                                                                   \
112         _mm256_sra_epi32(_mm256_add_epi32(res_a, sum_round_v), sum_shift_v);  \
113     res_b =                                                                   \
114         _mm256_sra_epi32(_mm256_add_epi32(res_b, sum_round_v), sum_shift_v);  \
115     const __m256i res_a_round = _mm256_sra_epi32(                             \
116         _mm256_add_epi32(res_a, round_const_v), round_shift_v);               \
117     const __m256i res_b_round = _mm256_sra_epi32(                             \
118         _mm256_add_epi32(res_b, round_const_v), round_shift_v);               \
119     const __m256i res_16bit = _mm256_packs_epi32(res_a_round, res_b_round);   \
120     const __m256i res_8b = _mm256_packus_epi16(res_16bit, res_16bit);         \
121     const __m128i res_0 = _mm256_castsi256_si128(res_8b);                     \
122     const __m128i res_1 = _mm256_extracti128_si256(res_8b, 1);                \
123                                                                               \
124     __m128i *const p_0 = (__m128i *)&dst[i * dst_stride + j];                 \
125     __m128i *const p_1 = (__m128i *)&dst[i * dst_stride + j + dst_stride];    \
126     if (w - j > 4) {                                                          \
127       _mm_storel_epi64(p_0, res_0);                                           \
128       _mm_storel_epi64(p_1, res_1);                                           \
129     } else if (w == 4) {                                                      \
130       xx_storel_32(p_0, res_0);                                               \
131       xx_storel_32(p_1, res_1);                                               \
132     } else {                                                                  \
133       *(uint16_t *)p_0 = (uint16_t)_mm_cvtsi128_si32(res_0);                  \
134       *(uint16_t *)p_1 = (uint16_t)_mm_cvtsi128_si32(res_1);                  \
135     }                                                                         \
136                                                                               \
137     s[0] = s[1];                                                              \
138     s[1] = s[2];                                                              \
139     s[3] = s[4];                                                              \
140     s[4] = s[5];                                                              \
141   }
142 
143 #define CONVOLVE_SR_HORIZONTAL_FILTER_6TAP                                     \
144   for (i = 0; i < (im_h - 2); i += 2) {                                        \
145     __m256i data = _mm256_castsi128_si256(                                     \
146         _mm_loadu_si128((__m128i *)&src_ptr[(i * src_stride) + j]));           \
147     data = _mm256_inserti128_si256(                                            \
148         data,                                                                  \
149         _mm_loadu_si128(                                                       \
150             (__m128i *)&src_ptr[(i * src_stride) + j + src_stride]),           \
151         1);                                                                    \
152                                                                                \
153     __m256i res = convolve_lowbd_x_6tap(data, coeffs_h, filt);                 \
154     res =                                                                      \
155         _mm256_sra_epi16(_mm256_add_epi16(res, round_const_h), round_shift_h); \
156     _mm256_store_si256((__m256i *)&im_block[i * im_stride], res);              \
157   }                                                                            \
158                                                                                \
159   __m256i data_1 = _mm256_castsi128_si256(                                     \
160       _mm_loadu_si128((__m128i *)&src_ptr[(i * src_stride) + j]));             \
161                                                                                \
162   __m256i res = convolve_lowbd_x_6tap(data_1, coeffs_h, filt);                 \
163                                                                                \
164   res = _mm256_sra_epi16(_mm256_add_epi16(res, round_const_h), round_shift_h); \
165                                                                                \
166   _mm256_store_si256((__m256i *)&im_block[i * im_stride], res);
167 
168 #define CONVOLVE_SR_VERTICAL_FILTER_6TAP                                      \
169   __m256i src_0 = _mm256_loadu_si256((__m256i *)(im_block + 0 * im_stride));  \
170   __m256i src_1 = _mm256_loadu_si256((__m256i *)(im_block + 1 * im_stride));  \
171   __m256i src_2 = _mm256_loadu_si256((__m256i *)(im_block + 2 * im_stride));  \
172   __m256i src_3 = _mm256_loadu_si256((__m256i *)(im_block + 3 * im_stride));  \
173                                                                               \
174   __m256i s[8];                                                               \
175   s[0] = _mm256_unpacklo_epi16(src_0, src_1);                                 \
176   s[1] = _mm256_unpacklo_epi16(src_2, src_3);                                 \
177                                                                               \
178   s[3] = _mm256_unpackhi_epi16(src_0, src_1);                                 \
179   s[4] = _mm256_unpackhi_epi16(src_2, src_3);                                 \
180                                                                               \
181   for (i = 0; i < h; i += 2) {                                                \
182     const int16_t *data = &im_block[i * im_stride];                           \
183                                                                               \
184     const __m256i s6 = _mm256_loadu_si256((__m256i *)(data + 4 * im_stride)); \
185     const __m256i s7 = _mm256_loadu_si256((__m256i *)(data + 5 * im_stride)); \
186                                                                               \
187     s[2] = _mm256_unpacklo_epi16(s6, s7);                                     \
188     s[5] = _mm256_unpackhi_epi16(s6, s7);                                     \
189                                                                               \
190     __m256i res_a = convolve_6tap(s, coeffs_v);                               \
191     __m256i res_b = convolve_6tap(s + 3, coeffs_v);                           \
192                                                                               \
193     res_a =                                                                   \
194         _mm256_sra_epi32(_mm256_add_epi32(res_a, sum_round_v), sum_shift_v);  \
195     res_b =                                                                   \
196         _mm256_sra_epi32(_mm256_add_epi32(res_b, sum_round_v), sum_shift_v);  \
197                                                                               \
198     const __m256i res_a_round = _mm256_sra_epi32(                             \
199         _mm256_add_epi32(res_a, round_const_v), round_shift_v);               \
200     const __m256i res_b_round = _mm256_sra_epi32(                             \
201         _mm256_add_epi32(res_b, round_const_v), round_shift_v);               \
202                                                                               \
203     const __m256i res_16bit = _mm256_packs_epi32(res_a_round, res_b_round);   \
204     const __m256i res_8b = _mm256_packus_epi16(res_16bit, res_16bit);         \
205                                                                               \
206     const __m128i res_0 = _mm256_castsi256_si128(res_8b);                     \
207     const __m128i res_1 = _mm256_extracti128_si256(res_8b, 1);                \
208                                                                               \
209     __m128i *const p_0 = (__m128i *)&dst[i * dst_stride + j];                 \
210     __m128i *const p_1 = (__m128i *)&dst[i * dst_stride + j + dst_stride];    \
211     if (w - j > 4) {                                                          \
212       _mm_storel_epi64(p_0, res_0);                                           \
213       _mm_storel_epi64(p_1, res_1);                                           \
214     } else if (w == 4) {                                                      \
215       xx_storel_32(p_0, res_0);                                               \
216       xx_storel_32(p_1, res_1);                                               \
217     } else {                                                                  \
218       *(uint16_t *)p_0 = (uint16_t)_mm_cvtsi128_si32(res_0);                  \
219       *(uint16_t *)p_1 = (uint16_t)_mm_cvtsi128_si32(res_1);                  \
220     }                                                                         \
221                                                                               \
222     s[0] = s[1];                                                              \
223     s[1] = s[2];                                                              \
224                                                                               \
225     s[3] = s[4];                                                              \
226     s[4] = s[5];                                                              \
227   }
228 
229 #define CONVOLVE_SR_HORIZONTAL_FILTER_8TAP                                     \
230   for (i = 0; i < (im_h - 2); i += 2) {                                        \
231     __m256i data = _mm256_castsi128_si256(                                     \
232         _mm_loadu_si128((__m128i *)&src_ptr[(i * src_stride) + j]));           \
233     data = _mm256_inserti128_si256(                                            \
234         data,                                                                  \
235         _mm_loadu_si128(                                                       \
236             (__m128i *)&src_ptr[(i * src_stride) + j + src_stride]),           \
237         1);                                                                    \
238                                                                                \
239     __m256i res = convolve_lowbd_x(data, coeffs_h, filt);                      \
240     res =                                                                      \
241         _mm256_sra_epi16(_mm256_add_epi16(res, round_const_h), round_shift_h); \
242     _mm256_store_si256((__m256i *)&im_block[i * im_stride], res);              \
243   }                                                                            \
244                                                                                \
245   __m256i data_1 = _mm256_castsi128_si256(                                     \
246       _mm_loadu_si128((__m128i *)&src_ptr[(i * src_stride) + j]));             \
247                                                                                \
248   __m256i res = convolve_lowbd_x(data_1, coeffs_h, filt);                      \
249                                                                                \
250   res = _mm256_sra_epi16(_mm256_add_epi16(res, round_const_h), round_shift_h); \
251                                                                                \
252   _mm256_store_si256((__m256i *)&im_block[i * im_stride], res);
253 
254 #define CONVOLVE_SR_VERTICAL_FILTER_8TAP                                      \
255   __m256i src_0 = _mm256_loadu_si256((__m256i *)(im_block + 0 * im_stride));  \
256   __m256i src_1 = _mm256_loadu_si256((__m256i *)(im_block + 1 * im_stride));  \
257   __m256i src_2 = _mm256_loadu_si256((__m256i *)(im_block + 2 * im_stride));  \
258   __m256i src_3 = _mm256_loadu_si256((__m256i *)(im_block + 3 * im_stride));  \
259   __m256i src_4 = _mm256_loadu_si256((__m256i *)(im_block + 4 * im_stride));  \
260   __m256i src_5 = _mm256_loadu_si256((__m256i *)(im_block + 5 * im_stride));  \
261                                                                               \
262   __m256i s[8];                                                               \
263   s[0] = _mm256_unpacklo_epi16(src_0, src_1);                                 \
264   s[1] = _mm256_unpacklo_epi16(src_2, src_3);                                 \
265   s[2] = _mm256_unpacklo_epi16(src_4, src_5);                                 \
266                                                                               \
267   s[4] = _mm256_unpackhi_epi16(src_0, src_1);                                 \
268   s[5] = _mm256_unpackhi_epi16(src_2, src_3);                                 \
269   s[6] = _mm256_unpackhi_epi16(src_4, src_5);                                 \
270                                                                               \
271   for (i = 0; i < h; i += 2) {                                                \
272     const int16_t *data = &im_block[i * im_stride];                           \
273                                                                               \
274     const __m256i s6 = _mm256_loadu_si256((__m256i *)(data + 6 * im_stride)); \
275     const __m256i s7 = _mm256_loadu_si256((__m256i *)(data + 7 * im_stride)); \
276                                                                               \
277     s[3] = _mm256_unpacklo_epi16(s6, s7);                                     \
278     s[7] = _mm256_unpackhi_epi16(s6, s7);                                     \
279                                                                               \
280     __m256i res_a = convolve(s, coeffs_v);                                    \
281     __m256i res_b = convolve(s + 4, coeffs_v);                                \
282                                                                               \
283     res_a =                                                                   \
284         _mm256_sra_epi32(_mm256_add_epi32(res_a, sum_round_v), sum_shift_v);  \
285     res_b =                                                                   \
286         _mm256_sra_epi32(_mm256_add_epi32(res_b, sum_round_v), sum_shift_v);  \
287                                                                               \
288     const __m256i res_a_round = _mm256_sra_epi32(                             \
289         _mm256_add_epi32(res_a, round_const_v), round_shift_v);               \
290     const __m256i res_b_round = _mm256_sra_epi32(                             \
291         _mm256_add_epi32(res_b, round_const_v), round_shift_v);               \
292                                                                               \
293     const __m256i res_16bit = _mm256_packs_epi32(res_a_round, res_b_round);   \
294     const __m256i res_8b = _mm256_packus_epi16(res_16bit, res_16bit);         \
295                                                                               \
296     const __m128i res_0 = _mm256_castsi256_si128(res_8b);                     \
297     const __m128i res_1 = _mm256_extracti128_si256(res_8b, 1);                \
298                                                                               \
299     __m128i *const p_0 = (__m128i *)&dst[i * dst_stride + j];                 \
300     __m128i *const p_1 = (__m128i *)&dst[i * dst_stride + j + dst_stride];    \
301     if (w - j > 4) {                                                          \
302       _mm_storel_epi64(p_0, res_0);                                           \
303       _mm_storel_epi64(p_1, res_1);                                           \
304     } else if (w == 4) {                                                      \
305       xx_storel_32(p_0, res_0);                                               \
306       xx_storel_32(p_1, res_1);                                               \
307     } else {                                                                  \
308       *(uint16_t *)p_0 = (uint16_t)_mm_cvtsi128_si32(res_0);                  \
309       *(uint16_t *)p_1 = (uint16_t)_mm_cvtsi128_si32(res_1);                  \
310     }                                                                         \
311                                                                               \
312     s[0] = s[1];                                                              \
313     s[1] = s[2];                                                              \
314     s[2] = s[3];                                                              \
315                                                                               \
316     s[4] = s[5];                                                              \
317     s[5] = s[6];                                                              \
318     s[6] = s[7];                                                              \
319   }
320 
321 #define CONVOLVE_SR_HORIZONTAL_FILTER_12TAP                                    \
322   const __m256i v_zero = _mm256_setzero_si256();                               \
323   __m256i s[12];                                                               \
324   if (w <= 4) {                                                                \
325     for (i = 0; i < im_h; i += 2) {                                            \
326       const __m256i data = _mm256_permute2x128_si256(                          \
327           _mm256_castsi128_si256(                                              \
328               _mm_loadu_si128((__m128i *)(&src_ptr[i * src_stride + j]))),     \
329           _mm256_castsi128_si256(_mm_loadu_si128(                              \
330               (__m128i *)(&src_ptr[i * src_stride + src_stride + j]))),        \
331           0x20);                                                               \
332       const __m256i s_16lo = _mm256_unpacklo_epi8(data, v_zero);               \
333       const __m256i s_16hi = _mm256_unpackhi_epi8(data, v_zero);               \
334       const __m256i s_lolo = _mm256_unpacklo_epi16(s_16lo, s_16lo);            \
335       const __m256i s_lohi = _mm256_unpackhi_epi16(s_16lo, s_16lo);            \
336                                                                                \
337       const __m256i s_hilo = _mm256_unpacklo_epi16(s_16hi, s_16hi);            \
338       const __m256i s_hihi = _mm256_unpackhi_epi16(s_16hi, s_16hi);            \
339                                                                                \
340       s[0] = _mm256_alignr_epi8(s_lohi, s_lolo, 2);                            \
341       s[1] = _mm256_alignr_epi8(s_lohi, s_lolo, 10);                           \
342       s[2] = _mm256_alignr_epi8(s_hilo, s_lohi, 2);                            \
343       s[3] = _mm256_alignr_epi8(s_hilo, s_lohi, 10);                           \
344       s[4] = _mm256_alignr_epi8(s_hihi, s_hilo, 2);                            \
345       s[5] = _mm256_alignr_epi8(s_hihi, s_hilo, 10);                           \
346                                                                                \
347       const __m256i res_lo = convolve_12taps(s, coeffs_h);                     \
348                                                                                \
349       __m256i res_32b_lo = _mm256_sra_epi32(                                   \
350           _mm256_add_epi32(res_lo, round_const_h12), round_shift_h12);         \
351       __m256i res_16b_lo = _mm256_packs_epi32(res_32b_lo, res_32b_lo);         \
352       const __m128i res_0 = _mm256_extracti128_si256(res_16b_lo, 0);           \
353       const __m128i res_1 = _mm256_extracti128_si256(res_16b_lo, 1);           \
354       if (w > 2) {                                                             \
355         _mm_storel_epi64((__m128i *)&im_block[i * im_stride], res_0);          \
356         _mm_storel_epi64((__m128i *)&im_block[i * im_stride + im_stride],      \
357                          res_1);                                               \
358       } else {                                                                 \
359         uint32_t horiz_2;                                                      \
360         horiz_2 = (uint32_t)_mm_cvtsi128_si32(res_0);                          \
361         im_block[i * im_stride] = (uint16_t)horiz_2;                           \
362         im_block[i * im_stride + 1] = (uint16_t)(horiz_2 >> 16);               \
363         horiz_2 = (uint32_t)_mm_cvtsi128_si32(res_1);                          \
364         im_block[i * im_stride + im_stride] = (uint16_t)horiz_2;               \
365         im_block[i * im_stride + im_stride + 1] = (uint16_t)(horiz_2 >> 16);   \
366       }                                                                        \
367     }                                                                          \
368   } else {                                                                     \
369     for (i = 0; i < im_h; i++) {                                               \
370       const __m256i data = _mm256_permute2x128_si256(                          \
371           _mm256_castsi128_si256(                                              \
372               _mm_loadu_si128((__m128i *)(&src_ptr[i * src_stride + j]))),     \
373           _mm256_castsi128_si256(                                              \
374               _mm_loadu_si128((__m128i *)(&src_ptr[i * src_stride + j + 4]))), \
375           0x20);                                                               \
376       const __m256i s_16lo = _mm256_unpacklo_epi8(data, v_zero);               \
377       const __m256i s_16hi = _mm256_unpackhi_epi8(data, v_zero);               \
378                                                                                \
379       const __m256i s_lolo = _mm256_unpacklo_epi16(s_16lo, s_16lo);            \
380       const __m256i s_lohi = _mm256_unpackhi_epi16(s_16lo, s_16lo);            \
381                                                                                \
382       const __m256i s_hilo = _mm256_unpacklo_epi16(s_16hi, s_16hi);            \
383       const __m256i s_hihi = _mm256_unpackhi_epi16(s_16hi, s_16hi);            \
384                                                                                \
385       s[0] = _mm256_alignr_epi8(s_lohi, s_lolo, 2);                            \
386       s[1] = _mm256_alignr_epi8(s_lohi, s_lolo, 10);                           \
387       s[2] = _mm256_alignr_epi8(s_hilo, s_lohi, 2);                            \
388       s[3] = _mm256_alignr_epi8(s_hilo, s_lohi, 10);                           \
389       s[4] = _mm256_alignr_epi8(s_hihi, s_hilo, 2);                            \
390       s[5] = _mm256_alignr_epi8(s_hihi, s_hilo, 10);                           \
391                                                                                \
392       const __m256i res_lo = convolve_12taps(s, coeffs_h);                     \
393                                                                                \
394       __m256i res_32b_lo = _mm256_sra_epi32(                                   \
395           _mm256_add_epi32(res_lo, round_const_h12), round_shift_h12);         \
396                                                                                \
397       __m256i res_16b_lo = _mm256_packs_epi32(res_32b_lo, res_32b_lo);         \
398       _mm_store_si128((__m128i *)&im_block[i * im_stride],                     \
399                       _mm256_extracti128_si256(                                \
400                           _mm256_permute4x64_epi64(res_16b_lo, 0x88), 0));     \
401     }                                                                          \
402   }
403 
404 #define CONVOLVE_SR_VERTICAL_FILTER_12TAP                                      \
405   __m256i src_0 = _mm256_loadu_si256((__m256i *)(im_block + 0 * im_stride));   \
406   __m256i src_1 = _mm256_loadu_si256((__m256i *)(im_block + 1 * im_stride));   \
407   __m256i src_2 = _mm256_loadu_si256((__m256i *)(im_block + 2 * im_stride));   \
408   __m256i src_3 = _mm256_loadu_si256((__m256i *)(im_block + 3 * im_stride));   \
409   __m256i src_4 = _mm256_loadu_si256((__m256i *)(im_block + 4 * im_stride));   \
410   __m256i src_5 = _mm256_loadu_si256((__m256i *)(im_block + 5 * im_stride));   \
411   __m256i src_6 = _mm256_loadu_si256((__m256i *)(im_block + 6 * im_stride));   \
412   __m256i src_7 = _mm256_loadu_si256((__m256i *)(im_block + 7 * im_stride));   \
413   __m256i src_8 = _mm256_loadu_si256((__m256i *)(im_block + 8 * im_stride));   \
414   __m256i src_9 = _mm256_loadu_si256((__m256i *)(im_block + 9 * im_stride));   \
415                                                                                \
416   s[0] = _mm256_unpacklo_epi16(src_0, src_1);                                  \
417   s[1] = _mm256_unpacklo_epi16(src_2, src_3);                                  \
418   s[2] = _mm256_unpacklo_epi16(src_4, src_5);                                  \
419   s[3] = _mm256_unpacklo_epi16(src_6, src_7);                                  \
420   s[4] = _mm256_unpacklo_epi16(src_8, src_9);                                  \
421                                                                                \
422   s[6] = _mm256_unpackhi_epi16(src_0, src_1);                                  \
423   s[7] = _mm256_unpackhi_epi16(src_2, src_3);                                  \
424   s[8] = _mm256_unpackhi_epi16(src_4, src_5);                                  \
425   s[9] = _mm256_unpackhi_epi16(src_6, src_7);                                  \
426   s[10] = _mm256_unpackhi_epi16(src_8, src_9);                                 \
427                                                                                \
428   for (i = 0; i < h; i += 2) {                                                 \
429     const int16_t *data = &im_block[i * im_stride];                            \
430                                                                                \
431     const __m256i s6 = _mm256_loadu_si256((__m256i *)(data + 10 * im_stride)); \
432     const __m256i s7 = _mm256_loadu_si256((__m256i *)(data + 11 * im_stride)); \
433                                                                                \
434     s[5] = _mm256_unpacklo_epi16(s6, s7);                                      \
435     s[11] = _mm256_unpackhi_epi16(s6, s7);                                     \
436                                                                                \
437     __m256i res_a = convolve_12taps(s, coeffs_v);                              \
438     __m256i res_b = convolve_12taps(s + 6, coeffs_v);                          \
439                                                                                \
440     res_a =                                                                    \
441         _mm256_sra_epi32(_mm256_add_epi32(res_a, sum_round_v), sum_shift_v);   \
442     res_b =                                                                    \
443         _mm256_sra_epi32(_mm256_add_epi32(res_b, sum_round_v), sum_shift_v);   \
444                                                                                \
445     const __m256i res_a_round = _mm256_sra_epi32(                              \
446         _mm256_add_epi32(res_a, round_const_v), round_shift_v);                \
447     const __m256i res_b_round = _mm256_sra_epi32(                              \
448         _mm256_add_epi32(res_b, round_const_v), round_shift_v);                \
449                                                                                \
450     const __m256i res_16bit = _mm256_packs_epi32(res_a_round, res_b_round);    \
451     const __m256i res_8b = _mm256_packus_epi16(res_16bit, res_16bit);          \
452                                                                                \
453     const __m128i res_0 = _mm256_castsi256_si128(res_8b);                      \
454     const __m128i res_1 = _mm256_extracti128_si256(res_8b, 1);                 \
455                                                                                \
456     __m128i *const p_0 = (__m128i *)&dst[i * dst_stride + j];                  \
457     __m128i *const p_1 = (__m128i *)&dst[i * dst_stride + j + dst_stride];     \
458     if (w - j > 4) {                                                           \
459       _mm_storel_epi64(p_0, res_0);                                            \
460       _mm_storel_epi64(p_1, res_1);                                            \
461     } else if (w == 4) {                                                       \
462       xx_storel_32(p_0, res_0);                                                \
463       xx_storel_32(p_1, res_1);                                                \
464     } else {                                                                   \
465       *(uint16_t *)p_0 = (uint16_t)_mm_cvtsi128_si32(res_0);                   \
466       *(uint16_t *)p_1 = (uint16_t)_mm_cvtsi128_si32(res_1);                   \
467     }                                                                          \
468                                                                                \
469     s[0] = s[1];                                                               \
470     s[1] = s[2];                                                               \
471     s[2] = s[3];                                                               \
472     s[3] = s[4];                                                               \
473     s[4] = s[5];                                                               \
474                                                                                \
475     s[6] = s[7];                                                               \
476     s[7] = s[8];                                                               \
477     s[8] = s[9];                                                               \
478     s[9] = s[10];                                                              \
479     s[10] = s[11];                                                             \
480   }
481 
482 #define DIST_WTD_CONVOLVE_HORIZONTAL_FILTER_8TAP                        \
483   do {                                                                  \
484     for (i = 0; i < im_h; i += 2) {                                     \
485       __m256i data =                                                    \
486           _mm256_castsi128_si256(_mm_loadu_si128((__m128i *)src_h));    \
487       if (i + 1 < im_h)                                                 \
488         data = _mm256_inserti128_si256(                                 \
489             data, _mm_loadu_si128((__m128i *)(src_h + src_stride)), 1); \
490       src_h += (src_stride << 1);                                       \
491       __m256i res = convolve_lowbd_x(data, coeffs_x, filt);             \
492                                                                         \
493       res = _mm256_sra_epi16(_mm256_add_epi16(res, round_const_h),      \
494                              round_shift_h);                            \
495                                                                         \
496       _mm256_store_si256((__m256i *)&im_block[i * im_stride], res);     \
497     }                                                                   \
498   } while (0)
499 
500 #define DIST_WTD_CONVOLVE_VERTICAL_FILTER_8TAP                                 \
501   do {                                                                         \
502     __m256i s[8];                                                              \
503     __m256i s0 = _mm256_loadu_si256((__m256i *)(im_block + 0 * im_stride));    \
504     __m256i s1 = _mm256_loadu_si256((__m256i *)(im_block + 1 * im_stride));    \
505     __m256i s2 = _mm256_loadu_si256((__m256i *)(im_block + 2 * im_stride));    \
506     __m256i s3 = _mm256_loadu_si256((__m256i *)(im_block + 3 * im_stride));    \
507     __m256i s4 = _mm256_loadu_si256((__m256i *)(im_block + 4 * im_stride));    \
508     __m256i s5 = _mm256_loadu_si256((__m256i *)(im_block + 5 * im_stride));    \
509                                                                                \
510     s[0] = _mm256_unpacklo_epi16(s0, s1);                                      \
511     s[1] = _mm256_unpacklo_epi16(s2, s3);                                      \
512     s[2] = _mm256_unpacklo_epi16(s4, s5);                                      \
513                                                                                \
514     s[4] = _mm256_unpackhi_epi16(s0, s1);                                      \
515     s[5] = _mm256_unpackhi_epi16(s2, s3);                                      \
516     s[6] = _mm256_unpackhi_epi16(s4, s5);                                      \
517                                                                                \
518     for (i = 0; i < h; i += 2) {                                               \
519       const int16_t *data = &im_block[i * im_stride];                          \
520                                                                                \
521       const __m256i s6 =                                                       \
522           _mm256_loadu_si256((__m256i *)(data + 6 * im_stride));               \
523       const __m256i s7 =                                                       \
524           _mm256_loadu_si256((__m256i *)(data + 7 * im_stride));               \
525                                                                                \
526       s[3] = _mm256_unpacklo_epi16(s6, s7);                                    \
527       s[7] = _mm256_unpackhi_epi16(s6, s7);                                    \
528                                                                                \
529       const __m256i res_a = convolve(s, coeffs_y);                             \
530       const __m256i res_a_round = _mm256_sra_epi32(                            \
531           _mm256_add_epi32(res_a, round_const_v), round_shift_v);              \
532                                                                                \
533       if (w - j > 4) {                                                         \
534         const __m256i res_b = convolve(s + 4, coeffs_y);                       \
535         const __m256i res_b_round = _mm256_sra_epi32(                          \
536             _mm256_add_epi32(res_b, round_const_v), round_shift_v);            \
537         const __m256i res_16b = _mm256_packs_epi32(res_a_round, res_b_round);  \
538         const __m256i res_unsigned = _mm256_add_epi16(res_16b, offset_const);  \
539                                                                                \
540         if (do_average) {                                                      \
541           const __m256i data_ref_0 =                                           \
542               load_line2_avx2(&dst[i * dst_stride + j],                        \
543                               &dst[i * dst_stride + j + dst_stride]);          \
544           const __m256i comp_avg_res = comp_avg(&data_ref_0, &res_unsigned,    \
545                                                 &wt, use_dist_wtd_comp_avg);   \
546                                                                                \
547           const __m256i round_result = convolve_rounding(                      \
548               &comp_avg_res, &offset_const, &rounding_const, rounding_shift);  \
549                                                                                \
550           const __m256i res_8 =                                                \
551               _mm256_packus_epi16(round_result, round_result);                 \
552           const __m128i res_0 = _mm256_castsi256_si128(res_8);                 \
553           const __m128i res_1 = _mm256_extracti128_si256(res_8, 1);            \
554                                                                                \
555           _mm_storel_epi64((__m128i *)(&dst0[i * dst_stride0 + j]), res_0);    \
556           _mm_storel_epi64(                                                    \
557               (__m128i *)((&dst0[i * dst_stride0 + j + dst_stride0])), res_1); \
558         } else {                                                               \
559           const __m128i res_0 = _mm256_castsi256_si128(res_unsigned);          \
560           _mm_store_si128((__m128i *)(&dst[i * dst_stride + j]), res_0);       \
561                                                                                \
562           const __m128i res_1 = _mm256_extracti128_si256(res_unsigned, 1);     \
563           _mm_store_si128((__m128i *)(&dst[i * dst_stride + j + dst_stride]),  \
564                           res_1);                                              \
565         }                                                                      \
566       } else {                                                                 \
567         const __m256i res_16b = _mm256_packs_epi32(res_a_round, res_a_round);  \
568         const __m256i res_unsigned = _mm256_add_epi16(res_16b, offset_const);  \
569                                                                                \
570         if (do_average) {                                                      \
571           const __m256i data_ref_0 =                                           \
572               load_line2_avx2(&dst[i * dst_stride + j],                        \
573                               &dst[i * dst_stride + j + dst_stride]);          \
574                                                                                \
575           const __m256i comp_avg_res = comp_avg(&data_ref_0, &res_unsigned,    \
576                                                 &wt, use_dist_wtd_comp_avg);   \
577                                                                                \
578           const __m256i round_result = convolve_rounding(                      \
579               &comp_avg_res, &offset_const, &rounding_const, rounding_shift);  \
580                                                                                \
581           const __m256i res_8 =                                                \
582               _mm256_packus_epi16(round_result, round_result);                 \
583           const __m128i res_0 = _mm256_castsi256_si128(res_8);                 \
584           const __m128i res_1 = _mm256_extracti128_si256(res_8, 1);            \
585                                                                                \
586           *(int *)(&dst0[i * dst_stride0 + j]) = _mm_cvtsi128_si32(res_0);     \
587           *(int *)(&dst0[i * dst_stride0 + j + dst_stride0]) =                 \
588               _mm_cvtsi128_si32(res_1);                                        \
589                                                                                \
590         } else {                                                               \
591           const __m128i res_0 = _mm256_castsi256_si128(res_unsigned);          \
592           _mm_store_si128((__m128i *)(&dst[i * dst_stride + j]), res_0);       \
593                                                                                \
594           const __m128i res_1 = _mm256_extracti128_si256(res_unsigned, 1);     \
595           _mm_store_si128((__m128i *)(&dst[i * dst_stride + j + dst_stride]),  \
596                           res_1);                                              \
597         }                                                                      \
598       }                                                                        \
599                                                                                \
600       s[0] = s[1];                                                             \
601       s[1] = s[2];                                                             \
602       s[2] = s[3];                                                             \
603                                                                                \
604       s[4] = s[5];                                                             \
605       s[5] = s[6];                                                             \
606       s[6] = s[7];                                                             \
607     }                                                                          \
608   } while (0)
609 
prepare_coeffs_lowbd(const InterpFilterParams * const filter_params,const int subpel_q4,__m256i * const coeffs)610 static inline void prepare_coeffs_lowbd(
611     const InterpFilterParams *const filter_params, const int subpel_q4,
612     __m256i *const coeffs /* [4] */) {
613   const int16_t *const filter = av1_get_interp_filter_subpel_kernel(
614       filter_params, subpel_q4 & SUBPEL_MASK);
615   const __m128i coeffs_8 = _mm_loadu_si128((__m128i *)filter);
616   const __m256i filter_coeffs = _mm256_broadcastsi128_si256(coeffs_8);
617 
618   // right shift all filter co-efficients by 1 to reduce the bits required.
619   // This extra right shift will be taken care of at the end while rounding
620   // the result.
621   // Since all filter co-efficients are even, this change will not affect the
622   // end result
623   assert(_mm_test_all_zeros(_mm_and_si128(coeffs_8, _mm_set1_epi16(1)),
624                             _mm_set1_epi16((short)0xffff)));
625 
626   const __m256i coeffs_1 = _mm256_srai_epi16(filter_coeffs, 1);
627 
628   // coeffs 0 1 0 1 0 1 0 1
629   coeffs[0] = _mm256_shuffle_epi8(coeffs_1, _mm256_set1_epi16(0x0200u));
630   // coeffs 2 3 2 3 2 3 2 3
631   coeffs[1] = _mm256_shuffle_epi8(coeffs_1, _mm256_set1_epi16(0x0604u));
632   // coeffs 4 5 4 5 4 5 4 5
633   coeffs[2] = _mm256_shuffle_epi8(coeffs_1, _mm256_set1_epi16(0x0a08u));
634   // coeffs 6 7 6 7 6 7 6 7
635   coeffs[3] = _mm256_shuffle_epi8(coeffs_1, _mm256_set1_epi16(0x0e0cu));
636 }
637 
prepare_coeffs_6t_lowbd(const InterpFilterParams * const filter_params,const int subpel_q4,__m256i * const coeffs)638 static inline void prepare_coeffs_6t_lowbd(
639     const InterpFilterParams *const filter_params, const int subpel_q4,
640     __m256i *const coeffs /* [4] */) {
641   const int16_t *const filter = av1_get_interp_filter_subpel_kernel(
642       filter_params, subpel_q4 & SUBPEL_MASK);
643   const __m128i coeffs_8 = _mm_loadu_si128((__m128i *)filter);
644   const __m256i filter_coeffs = _mm256_broadcastsi128_si256(coeffs_8);
645 
646   // right shift all filter co-efficients by 1 to reduce the bits required.
647   // This extra right shift will be taken care of at the end while rounding
648   // the result.
649   // Since all filter co-efficients are even, this change will not affect the
650   // end result
651   assert(_mm_test_all_zeros(_mm_and_si128(coeffs_8, _mm_set1_epi16(1)),
652                             _mm_set1_epi16((int16_t)0xffff)));
653 
654   const __m256i coeffs_1 = _mm256_srai_epi16(filter_coeffs, 1);
655 
656   // coeffs 1 2 1 2 1 2 1 2
657   coeffs[0] = _mm256_shuffle_epi8(coeffs_1, _mm256_set1_epi16(0x0402u));
658   // coeffs 3 4 3 4 3 4 3 4
659   coeffs[1] = _mm256_shuffle_epi8(coeffs_1, _mm256_set1_epi16(0x0806u));
660   // coeffs 5 6 5 6 5 6 5 6
661   coeffs[2] = _mm256_shuffle_epi8(coeffs_1, _mm256_set1_epi16(0x0c0au));
662 }
663 
prepare_coeffs_6t(const InterpFilterParams * const filter_params,const int subpel_q4,__m256i * const coeffs)664 static inline void prepare_coeffs_6t(
665     const InterpFilterParams *const filter_params, const int subpel_q4,
666     __m256i *const coeffs /* [4] */) {
667   const int16_t *filter = av1_get_interp_filter_subpel_kernel(
668       filter_params, subpel_q4 & SUBPEL_MASK);
669 
670   const __m128i coeff_8 = _mm_loadu_si128((__m128i *)(filter + 1));
671   const __m256i coeff = _mm256_broadcastsi128_si256(coeff_8);
672 
673   // coeffs 1 2 1 2 1 2 1 2
674   coeffs[0] = _mm256_shuffle_epi32(coeff, 0x00);
675   // coeffs 3 4 3 4 3 4 3 4
676   coeffs[1] = _mm256_shuffle_epi32(coeff, 0x55);
677   // coeffs 5 6 5 6 5 6 5 6
678   coeffs[2] = _mm256_shuffle_epi32(coeff, 0xaa);
679 }
680 
prepare_coeffs(const InterpFilterParams * const filter_params,const int subpel_q4,__m256i * const coeffs)681 static inline void prepare_coeffs(const InterpFilterParams *const filter_params,
682                                   const int subpel_q4,
683                                   __m256i *const coeffs /* [4] */) {
684   const int16_t *filter = av1_get_interp_filter_subpel_kernel(
685       filter_params, subpel_q4 & SUBPEL_MASK);
686 
687   const __m128i coeff_8 = _mm_loadu_si128((__m128i *)filter);
688   const __m256i coeff = _mm256_broadcastsi128_si256(coeff_8);
689 
690   // coeffs 0 1 0 1 0 1 0 1
691   coeffs[0] = _mm256_shuffle_epi32(coeff, 0x00);
692   // coeffs 2 3 2 3 2 3 2 3
693   coeffs[1] = _mm256_shuffle_epi32(coeff, 0x55);
694   // coeffs 4 5 4 5 4 5 4 5
695   coeffs[2] = _mm256_shuffle_epi32(coeff, 0xaa);
696   // coeffs 6 7 6 7 6 7 6 7
697   coeffs[3] = _mm256_shuffle_epi32(coeff, 0xff);
698 }
699 
prepare_coeffs_12taps(const InterpFilterParams * const filter_params,const int subpel_q4,__m256i * const coeffs)700 static inline void prepare_coeffs_12taps(
701     const InterpFilterParams *const filter_params, const int subpel_q4,
702     __m256i *const coeffs /* [4] */) {
703   const int16_t *filter = av1_get_interp_filter_subpel_kernel(
704       filter_params, subpel_q4 & SUBPEL_MASK);
705 
706   __m128i coeff_8 = _mm_loadu_si128((__m128i *)filter);
707   __m256i coeff = _mm256_broadcastsi128_si256(coeff_8);
708 
709   // coeffs 0 1 0 1 0 1 0 1
710   coeffs[0] = _mm256_shuffle_epi32(coeff, 0x00);
711   // coeffs 2 3 2 3 2 3 2 3
712   coeffs[1] = _mm256_shuffle_epi32(coeff, 0x55);
713   // coeffs 4 5 4 5 4 5 4 5
714   coeffs[2] = _mm256_shuffle_epi32(coeff, 0xaa);
715   // coeffs 6 7 6 7 6 7 6 7
716   coeffs[3] = _mm256_shuffle_epi32(coeff, 0xff);
717   // coeffs 8 9 10 11 0 0 0 0
718   coeff_8 = _mm_loadl_epi64((__m128i *)(filter + 8));
719   coeff = _mm256_broadcastq_epi64(coeff_8);
720   coeffs[4] = _mm256_shuffle_epi32(coeff, 0x00);  // coeffs 8 9 8 9 8 9 8 9
721   coeffs[5] = _mm256_shuffle_epi32(coeff, 0x55);  // coeffs 10 11 10 11.. 10 11
722 }
723 
convolve_lowbd(const __m256i * const s,const __m256i * const coeffs)724 static inline __m256i convolve_lowbd(const __m256i *const s,
725                                      const __m256i *const coeffs) {
726   const __m256i res_01 = _mm256_maddubs_epi16(s[0], coeffs[0]);
727   const __m256i res_23 = _mm256_maddubs_epi16(s[1], coeffs[1]);
728   const __m256i res_45 = _mm256_maddubs_epi16(s[2], coeffs[2]);
729   const __m256i res_67 = _mm256_maddubs_epi16(s[3], coeffs[3]);
730 
731   // order: 0 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15
732   const __m256i res = _mm256_add_epi16(_mm256_add_epi16(res_01, res_45),
733                                        _mm256_add_epi16(res_23, res_67));
734 
735   return res;
736 }
737 
convolve_lowbd_6tap(const __m256i * const s,const __m256i * const coeffs)738 static inline __m256i convolve_lowbd_6tap(const __m256i *const s,
739                                           const __m256i *const coeffs) {
740   const __m256i res_01 = _mm256_maddubs_epi16(s[0], coeffs[0]);
741   const __m256i res_23 = _mm256_maddubs_epi16(s[1], coeffs[1]);
742   const __m256i res_45 = _mm256_maddubs_epi16(s[2], coeffs[2]);
743 
744   // order: 0 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15
745   const __m256i res =
746       _mm256_add_epi16(_mm256_add_epi16(res_01, res_45), res_23);
747 
748   return res;
749 }
750 
convolve_lowbd_4tap(const __m256i * const s,const __m256i * const coeffs)751 static inline __m256i convolve_lowbd_4tap(const __m256i *const s,
752                                           const __m256i *const coeffs) {
753   const __m256i res_23 = _mm256_maddubs_epi16(s[0], coeffs[0]);
754   const __m256i res_45 = _mm256_maddubs_epi16(s[1], coeffs[1]);
755 
756   // order: 0 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15
757   const __m256i res = _mm256_add_epi16(res_45, res_23);
758 
759   return res;
760 }
761 
convolve_6tap(const __m256i * const s,const __m256i * const coeffs)762 static inline __m256i convolve_6tap(const __m256i *const s,
763                                     const __m256i *const coeffs) {
764   const __m256i res_0 = _mm256_madd_epi16(s[0], coeffs[0]);
765   const __m256i res_1 = _mm256_madd_epi16(s[1], coeffs[1]);
766   const __m256i res_2 = _mm256_madd_epi16(s[2], coeffs[2]);
767 
768   const __m256i res = _mm256_add_epi32(_mm256_add_epi32(res_0, res_1), res_2);
769 
770   return res;
771 }
772 
convolve_12taps(const __m256i * const s,const __m256i * const coeffs)773 static inline __m256i convolve_12taps(const __m256i *const s,
774                                       const __m256i *const coeffs) {
775   const __m256i res_0 = _mm256_madd_epi16(s[0], coeffs[0]);
776   const __m256i res_1 = _mm256_madd_epi16(s[1], coeffs[1]);
777   const __m256i res_2 = _mm256_madd_epi16(s[2], coeffs[2]);
778   const __m256i res_3 = _mm256_madd_epi16(s[3], coeffs[3]);
779   const __m256i res_4 = _mm256_madd_epi16(s[4], coeffs[4]);
780   const __m256i res_5 = _mm256_madd_epi16(s[5], coeffs[5]);
781 
782   const __m256i res1 = _mm256_add_epi32(_mm256_add_epi32(res_0, res_1),
783                                         _mm256_add_epi32(res_2, res_3));
784   const __m256i res = _mm256_add_epi32(_mm256_add_epi32(res_4, res_5), res1);
785 
786   return res;
787 }
788 
convolve(const __m256i * const s,const __m256i * const coeffs)789 static inline __m256i convolve(const __m256i *const s,
790                                const __m256i *const coeffs) {
791   const __m256i res_0 = _mm256_madd_epi16(s[0], coeffs[0]);
792   const __m256i res_1 = _mm256_madd_epi16(s[1], coeffs[1]);
793   const __m256i res_2 = _mm256_madd_epi16(s[2], coeffs[2]);
794   const __m256i res_3 = _mm256_madd_epi16(s[3], coeffs[3]);
795 
796   const __m256i res = _mm256_add_epi32(_mm256_add_epi32(res_0, res_1),
797                                        _mm256_add_epi32(res_2, res_3));
798 
799   return res;
800 }
801 
convolve_4tap(const __m256i * const s,const __m256i * const coeffs)802 static inline __m256i convolve_4tap(const __m256i *const s,
803                                     const __m256i *const coeffs) {
804   const __m256i res_1 = _mm256_madd_epi16(s[0], coeffs[0]);
805   const __m256i res_2 = _mm256_madd_epi16(s[1], coeffs[1]);
806 
807   const __m256i res = _mm256_add_epi32(res_1, res_2);
808   return res;
809 }
810 
convolve_lowbd_x(const __m256i data,const __m256i * const coeffs,const __m256i * const filt)811 static inline __m256i convolve_lowbd_x(const __m256i data,
812                                        const __m256i *const coeffs,
813                                        const __m256i *const filt) {
814   __m256i s[4];
815 
816   s[0] = _mm256_shuffle_epi8(data, filt[0]);
817   s[1] = _mm256_shuffle_epi8(data, filt[1]);
818   s[2] = _mm256_shuffle_epi8(data, filt[2]);
819   s[3] = _mm256_shuffle_epi8(data, filt[3]);
820 
821   return convolve_lowbd(s, coeffs);
822 }
823 
convolve_lowbd_x_6tap(const __m256i data,const __m256i * const coeffs,const __m256i * const filt)824 static inline __m256i convolve_lowbd_x_6tap(const __m256i data,
825                                             const __m256i *const coeffs,
826                                             const __m256i *const filt) {
827   __m256i s[4];
828 
829   s[0] = _mm256_shuffle_epi8(data, filt[0]);
830   s[1] = _mm256_shuffle_epi8(data, filt[1]);
831   s[2] = _mm256_shuffle_epi8(data, filt[2]);
832 
833   return convolve_lowbd_6tap(s, coeffs);
834 }
835 
convolve_lowbd_x_4tap(const __m256i data,const __m256i * const coeffs,const __m256i * const filt)836 static inline __m256i convolve_lowbd_x_4tap(const __m256i data,
837                                             const __m256i *const coeffs,
838                                             const __m256i *const filt) {
839   __m256i s[2];
840 
841   s[0] = _mm256_shuffle_epi8(data, filt[0]);
842   s[1] = _mm256_shuffle_epi8(data, filt[1]);
843 
844   return convolve_lowbd_4tap(s, coeffs);
845 }
846 
add_store_aligned_256(CONV_BUF_TYPE * const dst,const __m256i * const res,const int do_average)847 static inline void add_store_aligned_256(CONV_BUF_TYPE *const dst,
848                                          const __m256i *const res,
849                                          const int do_average) {
850   __m256i d;
851   if (do_average) {
852     d = _mm256_load_si256((__m256i *)dst);
853     d = _mm256_add_epi32(d, *res);
854     d = _mm256_srai_epi32(d, 1);
855   } else {
856     d = *res;
857   }
858   _mm256_store_si256((__m256i *)dst, d);
859 }
860 
comp_avg(const __m256i * const data_ref_0,const __m256i * const res_unsigned,const __m256i * const wt,const int use_dist_wtd_comp_avg)861 static inline __m256i comp_avg(const __m256i *const data_ref_0,
862                                const __m256i *const res_unsigned,
863                                const __m256i *const wt,
864                                const int use_dist_wtd_comp_avg) {
865   __m256i res;
866   if (use_dist_wtd_comp_avg) {
867     const __m256i data_lo = _mm256_unpacklo_epi16(*data_ref_0, *res_unsigned);
868     const __m256i data_hi = _mm256_unpackhi_epi16(*data_ref_0, *res_unsigned);
869 
870     const __m256i wt_res_lo = _mm256_madd_epi16(data_lo, *wt);
871     const __m256i wt_res_hi = _mm256_madd_epi16(data_hi, *wt);
872 
873     const __m256i res_lo = _mm256_srai_epi32(wt_res_lo, DIST_PRECISION_BITS);
874     const __m256i res_hi = _mm256_srai_epi32(wt_res_hi, DIST_PRECISION_BITS);
875 
876     res = _mm256_packs_epi32(res_lo, res_hi);
877   } else {
878     const __m256i wt_res = _mm256_add_epi16(*data_ref_0, *res_unsigned);
879     res = _mm256_srai_epi16(wt_res, 1);
880   }
881   return res;
882 }
883 
convolve_rounding(const __m256i * const res_unsigned,const __m256i * const offset_const,const __m256i * const round_const,const int round_shift)884 static inline __m256i convolve_rounding(const __m256i *const res_unsigned,
885                                         const __m256i *const offset_const,
886                                         const __m256i *const round_const,
887                                         const int round_shift) {
888   const __m256i res_signed = _mm256_sub_epi16(*res_unsigned, *offset_const);
889   const __m256i res_round = _mm256_srai_epi16(
890       _mm256_add_epi16(res_signed, *round_const), round_shift);
891   return res_round;
892 }
893 
highbd_comp_avg(const __m256i * const data_ref_0,const __m256i * const res_unsigned,const __m256i * const wt0,const __m256i * const wt1,const int use_dist_wtd_comp_avg)894 static inline __m256i highbd_comp_avg(const __m256i *const data_ref_0,
895                                       const __m256i *const res_unsigned,
896                                       const __m256i *const wt0,
897                                       const __m256i *const wt1,
898                                       const int use_dist_wtd_comp_avg) {
899   __m256i res;
900   if (use_dist_wtd_comp_avg) {
901     const __m256i wt0_res = _mm256_mullo_epi32(*data_ref_0, *wt0);
902     const __m256i wt1_res = _mm256_mullo_epi32(*res_unsigned, *wt1);
903     const __m256i wt_res = _mm256_add_epi32(wt0_res, wt1_res);
904     res = _mm256_srai_epi32(wt_res, DIST_PRECISION_BITS);
905   } else {
906     const __m256i wt_res = _mm256_add_epi32(*data_ref_0, *res_unsigned);
907     res = _mm256_srai_epi32(wt_res, 1);
908   }
909   return res;
910 }
911 
highbd_convolve_rounding(const __m256i * const res_unsigned,const __m256i * const offset_const,const __m256i * const round_const,const int round_shift)912 static inline __m256i highbd_convolve_rounding(
913     const __m256i *const res_unsigned, const __m256i *const offset_const,
914     const __m256i *const round_const, const int round_shift) {
915   const __m256i res_signed = _mm256_sub_epi32(*res_unsigned, *offset_const);
916   const __m256i res_round = _mm256_srai_epi32(
917       _mm256_add_epi32(res_signed, *round_const), round_shift);
918 
919   return res_round;
920 }
921 
922 #endif  // AOM_AOM_DSP_X86_CONVOLVE_AVX2_H_
923