xref: /aosp_15_r20/external/libvpx/vpx_dsp/x86/inv_txfm_avx2.c (revision fb1b10ab9aebc7c7068eedab379b749d7e3900be)
1 /*
2  *  Copyright (c) 2023 The WebM project authors. All Rights Reserved.
3  *
4  *  Use of this source code is governed by a BSD-style license
5  *  that can be found in the LICENSE file in the root of the source
6  *  tree. An additional intellectual property rights grant can be found
7  *  in the file PATENTS.  All contributing project authors may
8  *  be found in the AUTHORS file in the root of the source tree.
9  */
10 
11 #include <immintrin.h>  // AVX2
12 
13 #include "./vpx_dsp_rtcd.h"
14 #include "vpx_dsp/txfm_common.h"
15 
16 #define PAIR256_SET_EPI16(a, b)                                            \
17   _mm256_set_epi16((int16_t)(b), (int16_t)(a), (int16_t)(b), (int16_t)(a), \
18                    (int16_t)(b), (int16_t)(a), (int16_t)(b), (int16_t)(a), \
19                    (int16_t)(b), (int16_t)(a), (int16_t)(b), (int16_t)(a), \
20                    (int16_t)(b), (int16_t)(a), (int16_t)(b), (int16_t)(a))
21 
idct_load16x16(const tran_low_t * input,__m256i * in,int stride)22 static INLINE void idct_load16x16(const tran_low_t *input, __m256i *in,
23                                   int stride) {
24   int i;
25   // Load 16x16 values
26   for (i = 0; i < 16; i++) {
27 #if CONFIG_VP9_HIGHBITDEPTH
28     const __m128i in0 = _mm_loadu_si128((const __m128i *)(input + i * stride));
29     const __m128i in1 =
30         _mm_loadu_si128((const __m128i *)((input + i * stride) + 4));
31     const __m128i in2 =
32         _mm_loadu_si128((const __m128i *)((input + i * stride) + 8));
33     const __m128i in3 =
34         _mm_loadu_si128((const __m128i *)((input + i * stride) + 12));
35     const __m128i ls = _mm_packs_epi32(in0, in1);
36     const __m128i rs = _mm_packs_epi32(in2, in3);
37     in[i] = _mm256_inserti128_si256(_mm256_castsi128_si256(ls), rs, 1);
38 #else
39     in[i] = _mm256_load_si256((const __m256i *)(input + i * stride));
40 #endif
41   }
42 }
43 
dct_round_shift_avx2(__m256i in)44 static INLINE __m256i dct_round_shift_avx2(__m256i in) {
45   const __m256i t = _mm256_add_epi32(in, _mm256_set1_epi32(DCT_CONST_ROUNDING));
46   return _mm256_srai_epi32(t, DCT_CONST_BITS);
47 }
48 
idct_madd_round_shift_avx2(__m256i * in,__m256i * cospi)49 static INLINE __m256i idct_madd_round_shift_avx2(__m256i *in, __m256i *cospi) {
50   const __m256i t = _mm256_madd_epi16(*in, *cospi);
51   return dct_round_shift_avx2(t);
52 }
53 
54 // Calculate the dot product between in0/1 and x and wrap to short.
idct_calc_wraplow_avx2(__m256i * in0,__m256i * in1,__m256i * x)55 static INLINE __m256i idct_calc_wraplow_avx2(__m256i *in0, __m256i *in1,
56                                              __m256i *x) {
57   const __m256i t0 = idct_madd_round_shift_avx2(in0, x);
58   const __m256i t1 = idct_madd_round_shift_avx2(in1, x);
59   return _mm256_packs_epi32(t0, t1);
60 }
61 
62 // Multiply elements by constants and add them together.
butterfly16(__m256i in0,__m256i in1,int c0,int c1,__m256i * out0,__m256i * out1)63 static INLINE void butterfly16(__m256i in0, __m256i in1, int c0, int c1,
64                                __m256i *out0, __m256i *out1) {
65   __m256i cst0 = PAIR256_SET_EPI16(c0, -c1);
66   __m256i cst1 = PAIR256_SET_EPI16(c1, c0);
67   __m256i lo = _mm256_unpacklo_epi16(in0, in1);
68   __m256i hi = _mm256_unpackhi_epi16(in0, in1);
69   *out0 = idct_calc_wraplow_avx2(&lo, &hi, &cst0);
70   *out1 = idct_calc_wraplow_avx2(&lo, &hi, &cst1);
71 }
72 
idct16_16col(__m256i * in,__m256i * out)73 static INLINE void idct16_16col(__m256i *in, __m256i *out) {
74   __m256i step1[16], step2[16];
75 
76   // stage 2
77   butterfly16(in[1], in[15], cospi_30_64, cospi_2_64, &step2[8], &step2[15]);
78   butterfly16(in[9], in[7], cospi_14_64, cospi_18_64, &step2[9], &step2[14]);
79   butterfly16(in[5], in[11], cospi_22_64, cospi_10_64, &step2[10], &step2[13]);
80   butterfly16(in[13], in[3], cospi_6_64, cospi_26_64, &step2[11], &step2[12]);
81 
82   // stage 3
83   butterfly16(in[2], in[14], cospi_28_64, cospi_4_64, &step1[4], &step1[7]);
84   butterfly16(in[10], in[6], cospi_12_64, cospi_20_64, &step1[5], &step1[6]);
85   step1[8] = _mm256_add_epi16(step2[8], step2[9]);
86   step1[9] = _mm256_sub_epi16(step2[8], step2[9]);
87   step1[10] = _mm256_sub_epi16(step2[11], step2[10]);
88   step1[11] = _mm256_add_epi16(step2[10], step2[11]);
89   step1[12] = _mm256_add_epi16(step2[12], step2[13]);
90   step1[13] = _mm256_sub_epi16(step2[12], step2[13]);
91   step1[14] = _mm256_sub_epi16(step2[15], step2[14]);
92   step1[15] = _mm256_add_epi16(step2[14], step2[15]);
93 
94   // stage 4
95   butterfly16(in[0], in[8], cospi_16_64, cospi_16_64, &step2[1], &step2[0]);
96   butterfly16(in[4], in[12], cospi_24_64, cospi_8_64, &step2[2], &step2[3]);
97   butterfly16(step1[14], step1[9], cospi_24_64, cospi_8_64, &step2[9],
98               &step2[14]);
99   butterfly16(step1[10], step1[13], -cospi_8_64, -cospi_24_64, &step2[13],
100               &step2[10]);
101   step2[5] = _mm256_sub_epi16(step1[4], step1[5]);
102   step1[4] = _mm256_add_epi16(step1[4], step1[5]);
103   step2[6] = _mm256_sub_epi16(step1[7], step1[6]);
104   step1[7] = _mm256_add_epi16(step1[6], step1[7]);
105   step2[8] = step1[8];
106   step2[11] = step1[11];
107   step2[12] = step1[12];
108   step2[15] = step1[15];
109 
110   // stage 5
111   step1[0] = _mm256_add_epi16(step2[0], step2[3]);
112   step1[1] = _mm256_add_epi16(step2[1], step2[2]);
113   step1[2] = _mm256_sub_epi16(step2[1], step2[2]);
114   step1[3] = _mm256_sub_epi16(step2[0], step2[3]);
115   butterfly16(step2[6], step2[5], cospi_16_64, cospi_16_64, &step1[5],
116               &step1[6]);
117   step1[8] = _mm256_add_epi16(step2[8], step2[11]);
118   step1[9] = _mm256_add_epi16(step2[9], step2[10]);
119   step1[10] = _mm256_sub_epi16(step2[9], step2[10]);
120   step1[11] = _mm256_sub_epi16(step2[8], step2[11]);
121   step1[12] = _mm256_sub_epi16(step2[15], step2[12]);
122   step1[13] = _mm256_sub_epi16(step2[14], step2[13]);
123   step1[14] = _mm256_add_epi16(step2[14], step2[13]);
124   step1[15] = _mm256_add_epi16(step2[15], step2[12]);
125 
126   // stage 6
127   step2[0] = _mm256_add_epi16(step1[0], step1[7]);
128   step2[1] = _mm256_add_epi16(step1[1], step1[6]);
129   step2[2] = _mm256_add_epi16(step1[2], step1[5]);
130   step2[3] = _mm256_add_epi16(step1[3], step1[4]);
131   step2[4] = _mm256_sub_epi16(step1[3], step1[4]);
132   step2[5] = _mm256_sub_epi16(step1[2], step1[5]);
133   step2[6] = _mm256_sub_epi16(step1[1], step1[6]);
134   step2[7] = _mm256_sub_epi16(step1[0], step1[7]);
135   butterfly16(step1[13], step1[10], cospi_16_64, cospi_16_64, &step2[10],
136               &step2[13]);
137   butterfly16(step1[12], step1[11], cospi_16_64, cospi_16_64, &step2[11],
138               &step2[12]);
139 
140   // stage 7
141   out[0] = _mm256_add_epi16(step2[0], step1[15]);
142   out[1] = _mm256_add_epi16(step2[1], step1[14]);
143   out[2] = _mm256_add_epi16(step2[2], step2[13]);
144   out[3] = _mm256_add_epi16(step2[3], step2[12]);
145   out[4] = _mm256_add_epi16(step2[4], step2[11]);
146   out[5] = _mm256_add_epi16(step2[5], step2[10]);
147   out[6] = _mm256_add_epi16(step2[6], step1[9]);
148   out[7] = _mm256_add_epi16(step2[7], step1[8]);
149   out[8] = _mm256_sub_epi16(step2[7], step1[8]);
150   out[9] = _mm256_sub_epi16(step2[6], step1[9]);
151   out[10] = _mm256_sub_epi16(step2[5], step2[10]);
152   out[11] = _mm256_sub_epi16(step2[4], step2[11]);
153   out[12] = _mm256_sub_epi16(step2[3], step2[12]);
154   out[13] = _mm256_sub_epi16(step2[2], step2[13]);
155   out[14] = _mm256_sub_epi16(step2[1], step1[14]);
156   out[15] = _mm256_sub_epi16(step2[0], step1[15]);
157 }
158 
recon_and_store16(uint8_t * dest,__m256i in_x)159 static INLINE void recon_and_store16(uint8_t *dest, __m256i in_x) {
160   const __m256i zero = _mm256_setzero_si256();
161   __m256i d0 = _mm256_castsi128_si256(_mm_loadu_si128((__m128i *)(dest)));
162   d0 = _mm256_permute4x64_epi64(d0, 0xd8);
163   d0 = _mm256_unpacklo_epi8(d0, zero);
164   d0 = _mm256_add_epi16(in_x, d0);
165   d0 = _mm256_packus_epi16(
166       d0, _mm256_castsi128_si256(_mm256_extractf128_si256(d0, 1)));
167 
168   _mm_storeu_si128((__m128i *)dest, _mm256_castsi256_si128(d0));
169 }
170 
write_buffer_16x1(uint8_t * dest,__m256i in)171 static INLINE void write_buffer_16x1(uint8_t *dest, __m256i in) {
172   const __m256i final_rounding = _mm256_set1_epi16(1 << 5);
173   __m256i out;
174   out = _mm256_adds_epi16(in, final_rounding);
175   out = _mm256_srai_epi16(out, 6);
176   recon_and_store16(dest, out);
177 }
178 
store_buffer_16x32(__m256i * in,uint8_t * dst,int stride)179 static INLINE void store_buffer_16x32(__m256i *in, uint8_t *dst, int stride) {
180   const __m256i final_rounding = _mm256_set1_epi16(1 << 5);
181   int j = 0;
182   while (j < 32) {
183     in[j] = _mm256_adds_epi16(in[j], final_rounding);
184     in[j + 1] = _mm256_adds_epi16(in[j + 1], final_rounding);
185 
186     in[j] = _mm256_srai_epi16(in[j], 6);
187     in[j + 1] = _mm256_srai_epi16(in[j + 1], 6);
188 
189     recon_and_store16(dst, in[j]);
190     dst += stride;
191     recon_and_store16(dst, in[j + 1]);
192     dst += stride;
193     j += 2;
194   }
195 }
196 
transpose2_8x8_avx2(__m256i * in,__m256i * out)197 static INLINE void transpose2_8x8_avx2(__m256i *in, __m256i *out) {
198   int i;
199   __m256i t[16], u[16];
200   // (1st, 2nd) ==> (lo, hi)
201   //   (0, 1)   ==>  (0, 1)
202   //   (2, 3)   ==>  (2, 3)
203   //   (4, 5)   ==>  (4, 5)
204   //   (6, 7)   ==>  (6, 7)
205   for (i = 0; i < 4; i++) {
206     t[2 * i] = _mm256_unpacklo_epi16(in[2 * i], in[2 * i + 1]);
207     t[2 * i + 1] = _mm256_unpackhi_epi16(in[2 * i], in[2 * i + 1]);
208   }
209 
210   // (1st, 2nd) ==> (lo, hi)
211   //   (0, 2)   ==>  (0, 2)
212   //   (1, 3)   ==>  (1, 3)
213   //   (4, 6)   ==>  (4, 6)
214   //   (5, 7)   ==>  (5, 7)
215   for (i = 0; i < 2; i++) {
216     u[i] = _mm256_unpacklo_epi32(t[i], t[i + 2]);
217     u[i + 2] = _mm256_unpackhi_epi32(t[i], t[i + 2]);
218 
219     u[i + 4] = _mm256_unpacklo_epi32(t[i + 4], t[i + 6]);
220     u[i + 6] = _mm256_unpackhi_epi32(t[i + 4], t[i + 6]);
221   }
222 
223   // (1st, 2nd) ==> (lo, hi)
224   //   (0, 4)   ==>  (0, 1)
225   //   (1, 5)   ==>  (4, 5)
226   //   (2, 6)   ==>  (2, 3)
227   //   (3, 7)   ==>  (6, 7)
228   for (i = 0; i < 2; i++) {
229     out[2 * i] = _mm256_unpacklo_epi64(u[2 * i], u[2 * i + 4]);
230     out[2 * i + 1] = _mm256_unpackhi_epi64(u[2 * i], u[2 * i + 4]);
231 
232     out[2 * i + 4] = _mm256_unpacklo_epi64(u[2 * i + 1], u[2 * i + 5]);
233     out[2 * i + 5] = _mm256_unpackhi_epi64(u[2 * i + 1], u[2 * i + 5]);
234   }
235 }
236 
transpose_16bit_16x16_avx2(__m256i * in,__m256i * out)237 static INLINE void transpose_16bit_16x16_avx2(__m256i *in, __m256i *out) {
238   __m256i t[16];
239 
240 #define LOADL(idx)                                                            \
241   t[idx] = _mm256_castsi128_si256(_mm_load_si128((__m128i const *)&in[idx])); \
242   t[idx] = _mm256_inserti128_si256(                                           \
243       t[idx], _mm_load_si128((__m128i const *)&in[(idx) + 8]), 1);
244 
245 #define LOADR(idx)                                                           \
246   t[8 + (idx)] =                                                             \
247       _mm256_castsi128_si256(_mm_load_si128((__m128i const *)&in[idx] + 1)); \
248   t[8 + (idx)] = _mm256_inserti128_si256(                                    \
249       t[8 + (idx)], _mm_load_si128((__m128i const *)&in[(idx) + 8] + 1), 1);
250 
251   // load left 8x16
252   LOADL(0)
253   LOADL(1)
254   LOADL(2)
255   LOADL(3)
256   LOADL(4)
257   LOADL(5)
258   LOADL(6)
259   LOADL(7)
260 
261   // load right 8x16
262   LOADR(0)
263   LOADR(1)
264   LOADR(2)
265   LOADR(3)
266   LOADR(4)
267   LOADR(5)
268   LOADR(6)
269   LOADR(7)
270 
271   // get the top 16x8 result
272   transpose2_8x8_avx2(t, out);
273   // get the bottom 16x8 result
274   transpose2_8x8_avx2(&t[8], &out[8]);
275 }
276 
vpx_idct16x16_256_add_avx2(const tran_low_t * input,uint8_t * dest,int stride)277 void vpx_idct16x16_256_add_avx2(const tran_low_t *input, uint8_t *dest,
278                                 int stride) {
279   int i;
280   __m256i in[16];
281 
282   // Load 16x16 values
283   idct_load16x16(input, in, 16);
284 
285   transpose_16bit_16x16_avx2(in, in);
286   idct16_16col(in, in);
287 
288   transpose_16bit_16x16_avx2(in, in);
289   idct16_16col(in, in);
290 
291   for (i = 0; i < 16; ++i) {
292     write_buffer_16x1(dest + i * stride, in[i]);
293   }
294 }
295 
296 // Only do addition and subtraction butterfly, size = 16, 32
add_sub_butterfly_avx2(__m256i * in,__m256i * out,int size)297 static INLINE void add_sub_butterfly_avx2(__m256i *in, __m256i *out, int size) {
298   int i = 0;
299   const int num = size >> 1;
300   const int bound = size - 1;
301   while (i < num) {
302     out[i] = _mm256_add_epi16(in[i], in[bound - i]);
303     out[bound - i] = _mm256_sub_epi16(in[i], in[bound - i]);
304     i++;
305   }
306 }
307 
308 // For each 16x32 block __m256i in[32],
309 // Input with index, 0, 4, 8, 12, 16, 20, 24, 28
310 // output pixels: 0-7 in __m256i out[32]
idct32_1024_16x32_quarter_1(__m256i * in,__m256i * out)311 static INLINE void idct32_1024_16x32_quarter_1(__m256i *in, __m256i *out) {
312   __m256i step1[8], step2[8];
313 
314   // stage 3
315   butterfly16(in[4], in[28], cospi_28_64, cospi_4_64, &step1[4], &step1[7]);
316   butterfly16(in[20], in[12], cospi_12_64, cospi_20_64, &step1[5], &step1[6]);
317 
318   // stage 4
319   butterfly16(in[0], in[16], cospi_16_64, cospi_16_64, &step2[1], &step2[0]);
320   butterfly16(in[8], in[24], cospi_24_64, cospi_8_64, &step2[2], &step2[3]);
321   step2[4] = _mm256_add_epi16(step1[4], step1[5]);
322   step2[5] = _mm256_sub_epi16(step1[4], step1[5]);
323   step2[6] = _mm256_sub_epi16(step1[7], step1[6]);
324   step2[7] = _mm256_add_epi16(step1[7], step1[6]);
325 
326   // stage 5
327   step1[0] = _mm256_add_epi16(step2[0], step2[3]);
328   step1[1] = _mm256_add_epi16(step2[1], step2[2]);
329   step1[2] = _mm256_sub_epi16(step2[1], step2[2]);
330   step1[3] = _mm256_sub_epi16(step2[0], step2[3]);
331   step1[4] = step2[4];
332   butterfly16(step2[6], step2[5], cospi_16_64, cospi_16_64, &step1[5],
333               &step1[6]);
334   step1[7] = step2[7];
335 
336   // stage 6
337   out[0] = _mm256_add_epi16(step1[0], step1[7]);
338   out[1] = _mm256_add_epi16(step1[1], step1[6]);
339   out[2] = _mm256_add_epi16(step1[2], step1[5]);
340   out[3] = _mm256_add_epi16(step1[3], step1[4]);
341   out[4] = _mm256_sub_epi16(step1[3], step1[4]);
342   out[5] = _mm256_sub_epi16(step1[2], step1[5]);
343   out[6] = _mm256_sub_epi16(step1[1], step1[6]);
344   out[7] = _mm256_sub_epi16(step1[0], step1[7]);
345 }
346 
idct32_16x32_quarter_2_stage_4_to_6(__m256i * step1,__m256i * out)347 static INLINE void idct32_16x32_quarter_2_stage_4_to_6(__m256i *step1,
348                                                        __m256i *out) {
349   __m256i step2[32];
350 
351   // stage 4
352   step2[8] = step1[8];
353   step2[15] = step1[15];
354   butterfly16(step1[14], step1[9], cospi_24_64, cospi_8_64, &step2[9],
355               &step2[14]);
356   butterfly16(step1[13], step1[10], -cospi_8_64, cospi_24_64, &step2[10],
357               &step2[13]);
358   step2[11] = step1[11];
359   step2[12] = step1[12];
360 
361   // stage 5
362   step1[8] = _mm256_add_epi16(step2[8], step2[11]);
363   step1[9] = _mm256_add_epi16(step2[9], step2[10]);
364   step1[10] = _mm256_sub_epi16(step2[9], step2[10]);
365   step1[11] = _mm256_sub_epi16(step2[8], step2[11]);
366   step1[12] = _mm256_sub_epi16(step2[15], step2[12]);
367   step1[13] = _mm256_sub_epi16(step2[14], step2[13]);
368   step1[14] = _mm256_add_epi16(step2[14], step2[13]);
369   step1[15] = _mm256_add_epi16(step2[15], step2[12]);
370 
371   // stage 6
372   out[8] = step1[8];
373   out[9] = step1[9];
374   butterfly16(step1[13], step1[10], cospi_16_64, cospi_16_64, &out[10],
375               &out[13]);
376   butterfly16(step1[12], step1[11], cospi_16_64, cospi_16_64, &out[11],
377               &out[12]);
378   out[14] = step1[14];
379   out[15] = step1[15];
380 }
381 
382 // For each 16x32 block __m256i in[32],
383 // Input with index, 2, 6, 10, 14, 18, 22, 26, 30
384 // output pixels: 8-15 in __m256i out[32]
idct32_1024_16x32_quarter_2(__m256i * in,__m256i * out)385 static INLINE void idct32_1024_16x32_quarter_2(__m256i *in, __m256i *out) {
386   __m256i step1[16], step2[16];
387 
388   // stage 2
389   butterfly16(in[2], in[30], cospi_30_64, cospi_2_64, &step2[8], &step2[15]);
390   butterfly16(in[18], in[14], cospi_14_64, cospi_18_64, &step2[9], &step2[14]);
391   butterfly16(in[10], in[22], cospi_22_64, cospi_10_64, &step2[10], &step2[13]);
392   butterfly16(in[26], in[6], cospi_6_64, cospi_26_64, &step2[11], &step2[12]);
393 
394   // stage 3
395   step1[8] = _mm256_add_epi16(step2[8], step2[9]);
396   step1[9] = _mm256_sub_epi16(step2[8], step2[9]);
397   step1[10] = _mm256_sub_epi16(step2[11], step2[10]);
398   step1[11] = _mm256_add_epi16(step2[11], step2[10]);
399   step1[12] = _mm256_add_epi16(step2[12], step2[13]);
400   step1[13] = _mm256_sub_epi16(step2[12], step2[13]);
401   step1[14] = _mm256_sub_epi16(step2[15], step2[14]);
402   step1[15] = _mm256_add_epi16(step2[15], step2[14]);
403 
404   idct32_16x32_quarter_2_stage_4_to_6(step1, out);
405 }
406 
idct32_16x32_quarter_3_4_stage_4_to_7(__m256i * step1,__m256i * out)407 static INLINE void idct32_16x32_quarter_3_4_stage_4_to_7(__m256i *step1,
408                                                          __m256i *out) {
409   __m256i step2[32];
410 
411   // stage 4
412   step2[16] = _mm256_add_epi16(step1[16], step1[19]);
413   step2[17] = _mm256_add_epi16(step1[17], step1[18]);
414   step2[18] = _mm256_sub_epi16(step1[17], step1[18]);
415   step2[19] = _mm256_sub_epi16(step1[16], step1[19]);
416   step2[20] = _mm256_sub_epi16(step1[23], step1[20]);
417   step2[21] = _mm256_sub_epi16(step1[22], step1[21]);
418   step2[22] = _mm256_add_epi16(step1[22], step1[21]);
419   step2[23] = _mm256_add_epi16(step1[23], step1[20]);
420 
421   step2[24] = _mm256_add_epi16(step1[24], step1[27]);
422   step2[25] = _mm256_add_epi16(step1[25], step1[26]);
423   step2[26] = _mm256_sub_epi16(step1[25], step1[26]);
424   step2[27] = _mm256_sub_epi16(step1[24], step1[27]);
425   step2[28] = _mm256_sub_epi16(step1[31], step1[28]);
426   step2[29] = _mm256_sub_epi16(step1[30], step1[29]);
427   step2[30] = _mm256_add_epi16(step1[29], step1[30]);
428   step2[31] = _mm256_add_epi16(step1[28], step1[31]);
429 
430   // stage 5
431   step1[16] = step2[16];
432   step1[17] = step2[17];
433   butterfly16(step2[29], step2[18], cospi_24_64, cospi_8_64, &step1[18],
434               &step1[29]);
435   butterfly16(step2[28], step2[19], cospi_24_64, cospi_8_64, &step1[19],
436               &step1[28]);
437   butterfly16(step2[27], step2[20], -cospi_8_64, cospi_24_64, &step1[20],
438               &step1[27]);
439   butterfly16(step2[26], step2[21], -cospi_8_64, cospi_24_64, &step1[21],
440               &step1[26]);
441   step1[22] = step2[22];
442   step1[23] = step2[23];
443   step1[24] = step2[24];
444   step1[25] = step2[25];
445   step1[30] = step2[30];
446   step1[31] = step2[31];
447 
448   // stage 6
449   out[16] = _mm256_add_epi16(step1[16], step1[23]);
450   out[17] = _mm256_add_epi16(step1[17], step1[22]);
451   out[18] = _mm256_add_epi16(step1[18], step1[21]);
452   out[19] = _mm256_add_epi16(step1[19], step1[20]);
453   step2[20] = _mm256_sub_epi16(step1[19], step1[20]);
454   step2[21] = _mm256_sub_epi16(step1[18], step1[21]);
455   step2[22] = _mm256_sub_epi16(step1[17], step1[22]);
456   step2[23] = _mm256_sub_epi16(step1[16], step1[23]);
457 
458   step2[24] = _mm256_sub_epi16(step1[31], step1[24]);
459   step2[25] = _mm256_sub_epi16(step1[30], step1[25]);
460   step2[26] = _mm256_sub_epi16(step1[29], step1[26]);
461   step2[27] = _mm256_sub_epi16(step1[28], step1[27]);
462   out[28] = _mm256_add_epi16(step1[27], step1[28]);
463   out[29] = _mm256_add_epi16(step1[26], step1[29]);
464   out[30] = _mm256_add_epi16(step1[25], step1[30]);
465   out[31] = _mm256_add_epi16(step1[24], step1[31]);
466 
467   // stage 7
468   butterfly16(step2[27], step2[20], cospi_16_64, cospi_16_64, &out[20],
469               &out[27]);
470   butterfly16(step2[26], step2[21], cospi_16_64, cospi_16_64, &out[21],
471               &out[26]);
472   butterfly16(step2[25], step2[22], cospi_16_64, cospi_16_64, &out[22],
473               &out[25]);
474   butterfly16(step2[24], step2[23], cospi_16_64, cospi_16_64, &out[23],
475               &out[24]);
476 }
477 
idct32_1024_16x32_quarter_1_2(__m256i * in,__m256i * out)478 static INLINE void idct32_1024_16x32_quarter_1_2(__m256i *in, __m256i *out) {
479   __m256i temp[16];
480 
481   // For each 16x32 block __m256i in[32],
482   // Input with index, 0, 4, 8, 12, 16, 20, 24, 28
483   // output pixels: 0-7 in __m256i out[32]
484   idct32_1024_16x32_quarter_1(in, temp);
485 
486   // Input with index, 2, 6, 10, 14, 18, 22, 26, 30
487   // output pixels: 8-15 in __m256i out[32]
488   idct32_1024_16x32_quarter_2(in, temp);
489 
490   // stage 7
491   add_sub_butterfly_avx2(temp, out, 16);
492 }
493 
494 // For each 16x32 block __m256i in[32],
495 // Input with odd index,
496 // 1, 3, 5, 7, 9, 11, 13, 15, 17, 19, 21, 23, 25, 27, 29, 31
497 // output pixels: 16-23, 24-31 in __m256i out[32]
idct32_1024_16x32_quarter_3_4(__m256i * in,__m256i * out)498 static INLINE void idct32_1024_16x32_quarter_3_4(__m256i *in, __m256i *out) {
499   __m256i step1[32], step2[32];
500 
501   // stage 1
502   butterfly16(in[1], in[31], cospi_31_64, cospi_1_64, &step1[16], &step1[31]);
503   butterfly16(in[17], in[15], cospi_15_64, cospi_17_64, &step1[17], &step1[30]);
504   butterfly16(in[9], in[23], cospi_23_64, cospi_9_64, &step1[18], &step1[29]);
505   butterfly16(in[25], in[7], cospi_7_64, cospi_25_64, &step1[19], &step1[28]);
506 
507   butterfly16(in[5], in[27], cospi_27_64, cospi_5_64, &step1[20], &step1[27]);
508   butterfly16(in[21], in[11], cospi_11_64, cospi_21_64, &step1[21], &step1[26]);
509 
510   butterfly16(in[13], in[19], cospi_19_64, cospi_13_64, &step1[22], &step1[25]);
511   butterfly16(in[29], in[3], cospi_3_64, cospi_29_64, &step1[23], &step1[24]);
512 
513   // stage 2
514   step2[16] = _mm256_add_epi16(step1[16], step1[17]);
515   step2[17] = _mm256_sub_epi16(step1[16], step1[17]);
516   step2[18] = _mm256_sub_epi16(step1[19], step1[18]);
517   step2[19] = _mm256_add_epi16(step1[19], step1[18]);
518   step2[20] = _mm256_add_epi16(step1[20], step1[21]);
519   step2[21] = _mm256_sub_epi16(step1[20], step1[21]);
520   step2[22] = _mm256_sub_epi16(step1[23], step1[22]);
521   step2[23] = _mm256_add_epi16(step1[23], step1[22]);
522 
523   step2[24] = _mm256_add_epi16(step1[24], step1[25]);
524   step2[25] = _mm256_sub_epi16(step1[24], step1[25]);
525   step2[26] = _mm256_sub_epi16(step1[27], step1[26]);
526   step2[27] = _mm256_add_epi16(step1[27], step1[26]);
527   step2[28] = _mm256_add_epi16(step1[28], step1[29]);
528   step2[29] = _mm256_sub_epi16(step1[28], step1[29]);
529   step2[30] = _mm256_sub_epi16(step1[31], step1[30]);
530   step2[31] = _mm256_add_epi16(step1[31], step1[30]);
531 
532   // stage 3
533   step1[16] = step2[16];
534   step1[31] = step2[31];
535   butterfly16(step2[30], step2[17], cospi_28_64, cospi_4_64, &step1[17],
536               &step1[30]);
537   butterfly16(step2[29], step2[18], -cospi_4_64, cospi_28_64, &step1[18],
538               &step1[29]);
539   step1[19] = step2[19];
540   step1[20] = step2[20];
541   butterfly16(step2[26], step2[21], cospi_12_64, cospi_20_64, &step1[21],
542               &step1[26]);
543   butterfly16(step2[25], step2[22], -cospi_20_64, cospi_12_64, &step1[22],
544               &step1[25]);
545   step1[23] = step2[23];
546   step1[24] = step2[24];
547   step1[27] = step2[27];
548   step1[28] = step2[28];
549 
550   idct32_16x32_quarter_3_4_stage_4_to_7(step1, out);
551 }
552 
idct32_1024_16x32(__m256i * in,__m256i * out)553 static INLINE void idct32_1024_16x32(__m256i *in, __m256i *out) {
554   __m256i temp[32];
555 
556   // For each 16x32 block __m256i in[32],
557   // Input with index, 0, 4, 8, 12, 16, 20, 24, 28
558   // output pixels: 0-7 in __m256i out[32]
559   // AND
560   // Input with index, 2, 6, 10, 14, 18, 22, 26, 30
561   // output pixels: 8-15 in __m256i out[32]
562   idct32_1024_16x32_quarter_1_2(in, temp);
563 
564   // For each 16x32 block __m256i in[32],
565   // Input with odd index,
566   // 1, 3, 5, 7, 9, 11, 13, 15, 17, 19, 21, 23, 25, 27, 29, 31
567   // output pixels: 16-23, 24-31 in __m256i out[32]
568   idct32_1024_16x32_quarter_3_4(in, temp);
569 
570   // final stage
571   add_sub_butterfly_avx2(temp, out, 32);
572 }
573 
vpx_idct32x32_1024_add_avx2(const tran_low_t * input,uint8_t * dest,int stride)574 void vpx_idct32x32_1024_add_avx2(const tran_low_t *input, uint8_t *dest,
575                                  int stride) {
576   __m256i l[32], r[32], out[32], *in;
577   int i;
578 
579   in = l;
580 
581   for (i = 0; i < 2; i++) {
582     idct_load16x16(input, in, 32);
583     transpose_16bit_16x16_avx2(in, in);
584 
585     idct_load16x16(input + 16, in + 16, 32);
586     transpose_16bit_16x16_avx2(in + 16, in + 16);
587     idct32_1024_16x32(in, in);
588 
589     in = r;
590     input += 32 << 4;
591   }
592 
593   for (i = 0; i < 32; i += 16) {
594     transpose_16bit_16x16_avx2(l + i, out);
595     transpose_16bit_16x16_avx2(r + i, out + 16);
596     idct32_1024_16x32(out, out);
597 
598     store_buffer_16x32(out, dest, stride);
599     dest += 16;
600   }
601 }
602 
603 // Case when only upper-left 16x16 has non-zero coeff
vpx_idct32x32_135_add_avx2(const tran_low_t * input,uint8_t * dest,int stride)604 void vpx_idct32x32_135_add_avx2(const tran_low_t *input, uint8_t *dest,
605                                 int stride) {
606   __m256i in[32], io[32], out[32];
607   int i;
608 
609   for (i = 16; i < 32; i++) {
610     in[i] = _mm256_setzero_si256();
611   }
612 
613   // rows
614   idct_load16x16(input, in, 32);
615   transpose_16bit_16x16_avx2(in, in);
616   idct32_1024_16x32(in, io);
617 
618   // columns
619   for (i = 0; i < 32; i += 16) {
620     transpose_16bit_16x16_avx2(io + i, in);
621     idct32_1024_16x32(in, out);
622 
623     store_buffer_16x32(out, dest, stride);
624     dest += 16;
625   }
626 }
627