xref: /aosp_15_r20/external/libopenapv/src/avx/oapv_tq_avx.c (revision abb65b4b03b69e1d508d4d9a44dcf199df16e7c3)
1 /*
2  * Copyright (c) 2022 Samsung Electronics Co., Ltd.
3  * All Rights Reserved.
4  *
5  * Redistribution and use in source and binary forms, with or without
6  * modification, are permitted provided that the following conditions are met:
7  *
8  * - Redistributions of source code must retain the above copyright notice,
9  *   this list of conditions and the following disclaimer.
10  *
11  * - Redistributions in binary form must reproduce the above copyright notice,
12  *   this list of conditions and the following disclaimer in the documentation
13  *   and/or other materials provided with the distribution.
14  *
15  * - Neither the name of the copyright owner, nor the names of its contributors
16  *   may be used to endorse or promote products derived from this software
17  *   without specific prior written permission.
18  *
19  * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
20  * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
21  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
22  * ARE DISCLAIMED.IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
23  * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
24  * CONSEQUENTIAL DAMAGES(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
25  * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
26  * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
27  * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
28  * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
29  * POSSIBILITY OF SUCH DAMAGE.
30  */
31 
32 #include "oapv_def.h"
33 #include "oapv_tq_avx.h"
34 
35 #ifndef _mm256_set_m128i
36 #define _mm256_set_m128i(/* __m128i */ hi, /* __m128i */ lo) \
37     _mm256_insertf128_si256(_mm256_castsi128_si256(lo), (hi), 0x1)
38 #endif // !_mm256_set_m128i
39 
40 #ifndef _mm256_loadu2_m128i
41 #define _mm256_loadu2_m128i(/* __m128i const* */ hiaddr, \
42                             /* __m128i const* */ loaddr) \
43     _mm256_set_m128i(_mm_loadu_si128(hiaddr), _mm_loadu_si128(loaddr))
44 #endif // !_mm256_loadu2_m128i
45 
oapv_tx_part_avx(s16 * src,s16 * dst,int shift,int line)46 static void oapv_tx_part_avx(s16 *src, s16 *dst, int shift, int line)
47 {
48     __m256i v0, v1, v2, v3, v4, v5, v6, v7;
49     __m256i d0, d1, d2, d3;
50     __m256i coeff[8];
51     coeff[0] = _mm256_set1_epi16(64);
52     coeff[1] = _mm256_set_epi16(64, -64, -64, 64, 64, -64, -64, 64, 64, -64, -64, 64, 64, -64, -64, 64);
53     coeff[2] = _mm256_set_epi16(84, 35, -35, -84, -84, -35, 35, 84, 84, 35, -35, -84, -84, -35, 35, 84);
54     coeff[3] = _mm256_set_epi16(35, -84, 84, -35, -35, 84, -84, 35, 35, -84, 84, -35, -35, 84, -84, 35);
55     coeff[4] = _mm256_set_epi16(-89, -75, -50, -18, 18, 50, 75, 89, -89, -75, -50, -18, 18, 50, 75, 89);
56     coeff[5] = _mm256_set_epi16(-75, 18, 89, 50, -50, -89, -18, 75, -75, 18, 89, 50, -50, -89, -18, 75);
57     coeff[6] = _mm256_set_epi16(-50, 89, -18, -75, 75, 18, -89, 50, -50, 89, -18, -75, 75, 18, -89, 50);
58     coeff[7] = _mm256_set_epi16(-18, 50, -75, 89, -89, 75, -50, 18, -18, 50, -75, 89, -89, 75, -50, 18);
59     __m256i add = _mm256_set1_epi32(1 << (shift - 1));
60 
61     __m256i s0, s1, s2, s3;
62 
63     s0 = _mm256_loadu2_m128i((const __m128i *)&src[32], (const __m128i *)&src[0]);
64     s1 = _mm256_loadu2_m128i((const __m128i *)&src[40], (const __m128i *)&src[8]);
65     s2 = _mm256_loadu2_m128i((const __m128i *)&src[48], (const __m128i *)&src[16]);
66     s3 = _mm256_loadu2_m128i((const __m128i *)&src[56], (const __m128i *)&src[24]);
67 
68     CALCU_2x8(coeff[0], coeff[4], d0, d1);
69     CALCU_2x8(coeff[2], coeff[5], d2, d3);
70     CALCU_2x8_ADD_SHIFT(d0, d1, d2, d3, add, shift)
71 
72         d0 = _mm256_packs_epi32(d0, d1);
73     d1 = _mm256_packs_epi32(d2, d3);
74 
75     d0 = _mm256_permute4x64_epi64(d0, 0xd8);
76     d1 = _mm256_permute4x64_epi64(d1, 0xd8);
77 
78     _mm_store_si128((__m128i *)dst, _mm256_castsi256_si128(d0));
79     _mm_store_si128((__m128i *)(dst + 1 * line), _mm256_extracti128_si256(d0, 1));
80     _mm_store_si128((__m128i *)(dst + 2 * line), _mm256_castsi256_si128(d1));
81     _mm_store_si128((__m128i *)(dst + 3 * line), _mm256_extracti128_si256(d1, 1));
82 
83     CALCU_2x8(coeff[1], coeff[6], d0, d1);
84     CALCU_2x8(coeff[3], coeff[7], d2, d3);
85     CALCU_2x8_ADD_SHIFT(d0, d1, d2, d3, add, shift);
86 
87     d0 = _mm256_packs_epi32(d0, d1);
88     d1 = _mm256_packs_epi32(d2, d3);
89 
90     d0 = _mm256_permute4x64_epi64(d0, 0xd8);
91     d1 = _mm256_permute4x64_epi64(d1, 0xd8);
92 
93     _mm_store_si128((__m128i *)(dst + 4 * line), _mm256_castsi256_si128(d0));
94     _mm_store_si128((__m128i *)(dst + 5 * line), _mm256_extracti128_si256(d0, 1));
95     _mm_store_si128((__m128i *)(dst + 6 * line), _mm256_castsi256_si128(d1));
96     _mm_store_si128((__m128i *)(dst + 7 * line), _mm256_extracti128_si256(d1, 1));
97 }
98 
99 const oapv_fn_tx_t oapv_tbl_fn_txb_avx[2] =
100 {
101     oapv_tx_part_avx,
102         NULL
103 };
104 
105 ///////////////////////////////////////////////////////////////////////////////
106 // end of encoder code
107 // ENABLE_ENCODER
108 ///////////////////////////////////////////////////////////////////////////////
109 
110 #define TRANSPOSE_8x4_16BIT(I0, I1, I2, I3, I4, I5, I6, I7, O0, O1, O2, O3) \
111     tr0_0 = _mm_unpacklo_epi16(I0, I1); \
112     tr0_1 = _mm_unpacklo_epi16(I2, I3); \
113     tr0_2 = _mm_unpacklo_epi16(I4, I5); \
114     tr0_3 = _mm_unpacklo_epi16(I6, I7); \
115     tr1_0 = _mm_unpacklo_epi32(tr0_0, tr0_1); \
116     tr1_1 = _mm_unpackhi_epi32(tr0_0, tr0_1); \
117     tr1_2 = _mm_unpacklo_epi32(tr0_2, tr0_3); \
118     tr1_3 = _mm_unpackhi_epi32(tr0_2, tr0_3); \
119     O0 = _mm_unpacklo_epi64(tr1_0, tr1_2); \
120     O1 = _mm_unpackhi_epi64(tr1_0, tr1_2); \
121     O2 = _mm_unpacklo_epi64(tr1_1, tr1_3); \
122     O3 = _mm_unpackhi_epi64(tr1_1, tr1_3);
123 
124 // transpose 8x8: 8 x 8(32bit) --> 8 x 8(16bit)
125 // O0: row0, row4
126 // O1: row1, row5
127 // O2: row2, row6
128 // O3: row3, row7
129 #define TRANSPOSE_8x8_32BIT_16BIT(I0, I1, I2, I3, I4, I5, I6, I7, O0, O1, O2, O3) \
130     I0 = _mm256_packs_epi32(I0, I4);    \
131     I1 = _mm256_packs_epi32(I1, I5);    \
132     I2 = _mm256_packs_epi32(I2, I6);    \
133     I3 = _mm256_packs_epi32(I3, I7);    \
134     I4 = _mm256_unpacklo_epi16(I0, I2); \
135     I5 = _mm256_unpackhi_epi16(I0, I2); \
136     I6 = _mm256_unpacklo_epi16(I1, I3); \
137     I7 = _mm256_unpackhi_epi16(I1, I3); \
138     I0 = _mm256_unpacklo_epi16(I4, I6); \
139     I1 = _mm256_unpackhi_epi16(I4, I6); \
140     I2 = _mm256_unpacklo_epi16(I5, I7); \
141     I3 = _mm256_unpackhi_epi16(I5, I7); \
142     O0 = _mm256_unpacklo_epi64(I0, I2); \
143     O1 = _mm256_unpackhi_epi64(I0, I2); \
144     O2 = _mm256_unpacklo_epi64(I1, I3); \
145     O3 = _mm256_unpackhi_epi64(I1, I3)
146 
147 // transpose 8x8: 16 x 8(32bit) --> 8 x 16(16bit)
148 #define TRANSPOSE_16x8_32BIT_16BIT(I00, I01, I02, I03, I04, I05, I06, I07, I08, I09, I10, I11, I12, I13, I14, I15, O0, O1, O2, O3, O4, O5, O6, O7)\
149     TRANSPOSE_8x8_32BIT_16BIT(I00, I01, I02, I03, I04, I05, I06, I07, I04, I05, I06, I07); \
150     TRANSPOSE_8x8_32BIT_16BIT(I08, I09, I10, I11, I12, I13, I14, I15, I12, I13, I14, I15); \
151     O0 = _mm256_insertf128_si256(I04, _mm256_castsi256_si128(I12), 1);      \
152     O1 = _mm256_insertf128_si256(I05, _mm256_castsi256_si128(I13), 1);      \
153     O2 = _mm256_insertf128_si256(I06, _mm256_castsi256_si128(I14), 1);      \
154     O3 = _mm256_insertf128_si256(I07, _mm256_castsi256_si128(I15), 1);      \
155     O4 = _mm256_insertf128_si256(I12, _mm256_extracti128_si256(I04, 1), 0); \
156     O5 = _mm256_insertf128_si256(I13, _mm256_extracti128_si256(I05, 1), 0); \
157     O6 = _mm256_insertf128_si256(I14, _mm256_extracti128_si256(I06, 1), 0); \
158     O7 = _mm256_insertf128_si256(I15, _mm256_extracti128_si256(I07, 1), 0)
159 
160 #define set_vals(a,b) b, a, b, a, b, a, b, a, b, a, b, a, b, a, b, a
161 #define set_vals1(a,b) b, a, b, a, b, a, b, a
162 
oapv_itx_part_avx(s16 * src,s16 * dst,int shift,int line)163 static void oapv_itx_part_avx(s16* src, s16* dst, int shift, int line)
164 {
165     const __m256i coeff_p89_p75 = _mm256_setr_epi16(89, 75, 89, 75, 89, 75, 89, 75, 89, 75, 89, 75, 89, 75, 89, 75); // 89 75
166     const __m256i coeff_p50_p18 = _mm256_setr_epi16(50, 18, 50, 18, 50, 18, 50, 18, 50, 18, 50, 18, 50, 18, 50, 18); // 50, 18
167     const __m256i coeff_p75_n18 = _mm256_setr_epi16(75, -18, 75, -18, 75, -18, 75, -18, 75, -18, 75, -18, 75, -18, 75, -18); // 75, -18
168     const __m256i coeff_n89_n50 = _mm256_setr_epi16(-89, -50, -89, -50, -89, -50, -89, -50, -89, -50, -89, -50, -89, -50, -89, -50); // -89, -50
169     const __m256i coeff_p50_n89 = _mm256_setr_epi16(50, -89, 50, -89, 50, -89, 50, -89, 50, -89, 50, -89, 50, -89, 50, -89); // 50,-89
170     const __m256i coeff_p18_p75 = _mm256_setr_epi16(18, 75, 18, 75, 18, 75, 18, 75, 18, 75, 18, 75, 18, 75, 18, 75); // 18, 75
171     const __m256i coeff_p18_n50 = _mm256_setr_epi16(18, -50, 18, -50, 18, -50, 18, -50, 18, -50, 18, -50, 18, -50, 18, -50); // 18,-50
172     const __m256i coeff_p75_n89 = _mm256_setr_epi16(75, -89, 75, -89, 75, -89, 75, -89, 75, -89, 75, -89, 75, -89, 75, -89); // 75,-89
173     const __m256i coeff_p64_p64 = _mm256_setr_epi16(64, 64, 64, 64, 64, 64, 64, 64, 64, 64, 64, 64, 64, 64, 64, 64); // 64, 64
174     const __m256i coeff_p64_n64 = _mm256_setr_epi16(64, -64, 64, -64, 64, -64, 64, -64, 64, -64, 64, -64, 64, -64, 64, -64); // 64, -64
175     const __m256i coeff_p84_n35 = _mm256_setr_epi16(84, 35, 84, 35, 84, 35, 84, 35, 84, 35, 84, 35, 84, 35, 84, 35); // 84, 35
176     const __m256i coeff_p35_n84 = _mm256_setr_epi16(35, -84, 35, -84, 35, -84, 35, -84, 35, -84, 35, -84, 35, -84, 35, -84); // 35, -84
177 
178     __m128i s0, s1, s2, s3, s4, s5, s6, s7;
179     __m128i ss0, ss1, ss2, ss3;
180     __m256i e0, e1, e2, e3, o0, o1, o2, o3, ee0, ee1, eo0, eo1;
181     __m256i t0, t1, t2, t3;
182     __m256i d0, d1, d2, d3, d4, d5, d6, d7;
183     __m256i offset = _mm256_set1_epi32(1 << (shift - 1));
184     int j;
185     int i_src = line;
186     int i_src2 = line << 1;
187     int i_src3 = i_src + i_src2;
188     int i_src4 = i_src << 2;
189     int i_src5 = i_src2 + i_src3;
190     int i_src6 = i_src3 << 1;
191     int i_src7 = i_src3 + i_src4;
192     for (j = 0; j < line; j += 8)
193     {
194         // O[0] -- O[3]
195         s1 = _mm_loadu_si128((__m128i*)(src + i_src + j));
196         s3 = _mm_loadu_si128((__m128i*)(src + i_src3 + j));
197         s5 = _mm_loadu_si128((__m128i*)(src + i_src5 + j));
198         s7 = _mm_loadu_si128((__m128i*)(src + i_src7 + j));
199 
200         ss0 = _mm_unpacklo_epi16(s1, s3);
201         ss1 = _mm_unpackhi_epi16(s1, s3);
202         ss2 = _mm_unpacklo_epi16(s5, s7);
203         ss3 = _mm_unpackhi_epi16(s5, s7);
204 
205         e0 = _mm256_set_m128i(ss1, ss0);
206         e1 = _mm256_set_m128i(ss3, ss2);
207 
208         t0 = _mm256_madd_epi16(e0, coeff_p89_p75);
209         t1 = _mm256_madd_epi16(e1, coeff_p50_p18);
210         t2 = _mm256_madd_epi16(e0, coeff_p75_n18);
211         t3 = _mm256_madd_epi16(e1, coeff_n89_n50);
212         o0 = _mm256_add_epi32(t0, t1);
213         o1 = _mm256_add_epi32(t2, t3);
214 
215         t0 = _mm256_madd_epi16(e0, coeff_p50_n89);
216         t1 = _mm256_madd_epi16(e1, coeff_p18_p75);
217         t2 = _mm256_madd_epi16(e0, coeff_p18_n50);
218         t3 = _mm256_madd_epi16(e1, coeff_p75_n89);
219 
220         o2 = _mm256_add_epi32(t0, t1);
221         o3 = _mm256_add_epi32(t2, t3);
222 
223         // E[0] - E[3]
224         s0 = _mm_loadu_si128((__m128i*)(src + j));
225         s2 = _mm_loadu_si128((__m128i*)(src + i_src2 + j));
226         s4 = _mm_loadu_si128((__m128i*)(src + i_src4 + j));
227         s6 = _mm_loadu_si128((__m128i*)(src + i_src6 + j));
228 
229         ss0 = _mm_unpacklo_epi16(s0, s4);
230         ss1 = _mm_unpackhi_epi16(s0, s4);
231         ss2 = _mm_unpacklo_epi16(s2, s6);
232         ss3 = _mm_unpackhi_epi16(s2, s6);
233 
234         e0 = _mm256_set_m128i(ss1, ss0);
235         e1 = _mm256_set_m128i(ss3, ss2);
236 
237         ee0 = _mm256_madd_epi16(e0, coeff_p64_p64);
238         ee1 = _mm256_madd_epi16(e0, coeff_p64_n64);
239         eo0 = _mm256_madd_epi16(e1, coeff_p84_n35);
240         eo1 = _mm256_madd_epi16(e1, coeff_p35_n84);
241 
242         e0 = _mm256_add_epi32(ee0, eo0);
243         e3 = _mm256_sub_epi32(ee0, eo0);
244         e1 = _mm256_add_epi32(ee1, eo1);
245         e2 = _mm256_sub_epi32(ee1, eo1);
246 
247         e0 = _mm256_add_epi32(e0, offset);
248         e3 = _mm256_add_epi32(e3, offset);
249         e1 = _mm256_add_epi32(e1, offset);
250         e2 = _mm256_add_epi32(e2, offset);
251 
252         d0 = _mm256_add_epi32(e0, o0);
253         d7 = _mm256_sub_epi32(e0, o0);
254         d1 = _mm256_add_epi32(e1, o1);
255         d6 = _mm256_sub_epi32(e1, o1);
256         d2 = _mm256_add_epi32(e2, o2);
257         d5 = _mm256_sub_epi32(e2, o2);
258         d3 = _mm256_add_epi32(e3, o3);
259         d4 = _mm256_sub_epi32(e3, o3);
260 
261         d0 = _mm256_srai_epi32(d0, shift);
262         d7 = _mm256_srai_epi32(d7, shift);
263         d1 = _mm256_srai_epi32(d1, shift);
264         d6 = _mm256_srai_epi32(d6, shift);
265         d2 = _mm256_srai_epi32(d2, shift);
266         d5 = _mm256_srai_epi32(d5, shift);
267         d3 = _mm256_srai_epi32(d3, shift);
268         d4 = _mm256_srai_epi32(d4, shift);
269 
270         // transpose 8x8 : 8 x 8(32bit) --> 4 x 16(16bit)
271         TRANSPOSE_8x8_32BIT_16BIT(d0, d1, d2, d3, d4, d5, d6, d7, d4, d5, d6, d7);
272         d0 = _mm256_insertf128_si256(d4, _mm256_castsi256_si128(d5), 1);
273         d1 = _mm256_insertf128_si256(d6, _mm256_castsi256_si128(d7), 1);
274         d2 = _mm256_insertf128_si256(d5, _mm256_extracti128_si256(d4, 1), 0);
275         d3 = _mm256_insertf128_si256(d7, _mm256_extracti128_si256(d6, 1), 0);
276         // store line x 8
277         _mm256_storeu_si256((__m256i*)dst, d0);
278         _mm256_storeu_si256((__m256i*)(dst + 16), d1);
279         _mm256_storeu_si256((__m256i*)(dst + 32), d2);
280         _mm256_storeu_si256((__m256i*)(dst + 48), d3);
281         dst += 64;
282     }
283 }
284 
285 const oapv_fn_itx_part_t oapv_tbl_fn_itx_part_avx[2] =
286 {
287     oapv_itx_part_avx,
288         NULL
289 };
290 
oapv_itx_avx(s16 * src,int shift1,int shift2,int line)291 static void oapv_itx_avx(s16* src, int shift1, int shift2, int line)
292 {
293     // To Do: Merge 2 passes and optimize AVX further
294     ALIGNED_16(s16 dst[OAPV_BLK_D]);
295     oapv_itx_part_avx(src, dst, shift1, line);
296     oapv_itx_part_avx(dst, src, shift2, line);
297 }
298 
299 const oapv_fn_itx_t oapv_tbl_fn_itx_avx[2] =
300 {
301     oapv_itx_avx,
302         NULL
303 };
304 
mul_128i_to_256i_and_add(__m256i offset_vector,__m128i a,__m128i b)305 __m256i mul_128i_to_256i_and_add(__m256i offset_vector, __m128i a, __m128i b)
306 {
307     __m256i a_64 = _mm256_cvtepi32_epi64(a);
308     __m256i b_64 = _mm256_cvtepi32_epi64(b);
309     __m256i result = _mm256_mul_epi32(a_64, b_64);
310     result = _mm256_add_epi64(result, offset_vector);
311     return result;
312 }
313 
oapv_quant_avx(s16 * coef,u8 qp,int q_matrix[OAPV_BLK_D],int log2_w,int log2_h,int bit_depth,int deadzone_offset)314 static int oapv_quant_avx(s16* coef, u8 qp, int q_matrix[OAPV_BLK_D], int log2_w, int log2_h, int bit_depth, int deadzone_offset)
315 {
316     s64 offset;
317     int shift;
318     int tr_shift;
319 
320     int log2_size = (log2_w + log2_h) >> 1;
321     tr_shift = MAX_TX_DYNAMIC_RANGE - bit_depth - log2_size;
322     shift = QUANT_SHIFT + tr_shift + (qp / 6);
323     offset = (s64)deadzone_offset << (shift - 9);
324     __m256i offset_vector = _mm256_set1_epi64x(offset);
325 
326     int pixels = (1 << (log2_w + log2_h));
327     int i;
328     __m256i shuffle0 = _mm256_setr_epi32(1, 3, 5, 7, 0, 2, 4, 6);
329     __m256i shuffle1 = _mm256_setr_epi8(
330         0, 1, 4, 5, 8, 9, 12, 13,
331         -128, -128, -128, -128, -128, -128, -128, -128,
332         -128, -128, -128, -128, -128, -128, -128, -128,
333         -128, -128, -128, -128, -128, -128, -128, -128);
334     __m256i shuffle2 = _mm256_setr_epi8(
335         -128, -128, -128, -128, -128, -128, -128, -128,
336         0, 1, 4, 5, 8, 9, 12, 13,
337         -128, -128, -128, -128, -128, -128, -128, -128,
338         -128, -128, -128, -128, -128, -128, -128, -128);
339 
340     for (i = 0; i < pixels; i += 8)
341     {
342         // Load first row
343         __m256i quant_matrix = _mm256_lddqu_si256((__m256i*)(q_matrix + i));
344         __m128i coef_row = _mm_lddqu_si128((__m128i*)(coef + i));
345 
346         // Extract sign
347         __m256i coef_row_cast = _mm256_castsi128_si256(coef_row);
348         __m256i sign_mask = _mm256_srai_epi16(coef_row_cast, 15);
349 
350         // Convert to 32 bits and take abs()
351         __m256i coef_row_ext = _mm256_cvtepi16_epi32(coef_row);
352         __m256i coef_row_abs = _mm256_abs_epi32(coef_row_ext);
353 
354         // Multiply coeff with quant values, add offset to result and shift
355         __m256i lev1_low = mul_128i_to_256i_and_add(offset_vector, _mm256_castsi256_si128(coef_row_abs), _mm256_castsi256_si128(quant_matrix));
356         __m256i lev1_high = mul_128i_to_256i_and_add(offset_vector, _mm256_extracti128_si256(coef_row_abs, 1), _mm256_extracti128_si256(quant_matrix, 1));
357         __m256i lev2_low = _mm256_srli_epi64(lev1_low, shift);
358         __m256i lev2_high = _mm256_srli_epi64(lev1_high, shift);
359 
360         // First level of combination
361         lev2_low = _mm256_slli_epi64(lev2_low, 32);
362         __m256i combined = _mm256_or_si256(lev2_low, lev2_high);
363 
364         // Second level of combination
365         __m256i levx = _mm256_permutevar8x32_epi32(combined, shuffle0);
366         __m128i levx_low = _mm256_castsi256_si128(levx);
367         __m256i levx_low_ext = _mm256_castsi128_si256(levx_low);
368         levx_low_ext = _mm256_shuffle_epi8(levx_low_ext, shuffle1);
369         __m128i levx_high = _mm256_extracti128_si256(levx, 1);
370         __m256i levx_high_ext = _mm256_castsi128_si256(levx_high);
371         levx_high_ext = _mm256_shuffle_epi8(levx_high_ext, shuffle2);
372         levx = _mm256_or_si256(levx_high_ext, levx_low_ext);
373 
374         // Apply sign
375         levx = _mm256_sub_epi16(_mm256_xor_si256(levx, sign_mask), sign_mask);
376 
377         // Clip and store in coef
378         __m128i lev4 = _mm256_castsi256_si128(levx);
379         __m128i lev5 = _mm_max_epi16(lev4, _mm_set1_epi16(-32768));
380         __m128i lev6 = _mm_min_epi16(lev5, _mm_set1_epi16(32767));
381         _mm_storeu_si128((__m128i*)(coef + i), lev6);
382     }
383     return OAPV_OK;
384 }
385 
386 const oapv_fn_quant_t oapv_tbl_fn_quant_avx[2] =
387 {
388     oapv_quant_avx,
389         NULL
390 };
391 
392 
oapv_dquant_avx(s16 * coef,s16 q_matrix[OAPV_BLK_D],int log2_w,int log2_h,s8 shift)393 static void oapv_dquant_avx(s16 *coef, s16 q_matrix[OAPV_BLK_D], int log2_w, int log2_h, s8 shift)
394 {
395     int i;
396     int pixels = (1 << (log2_w + log2_h));
397     __m256i shuffle = _mm256_setr_epi8(
398     0, 1, 4, 5, 8, 9, 12, 13,
399     -1, -1, -1, -1, -1, -1, -1, -1,
400     -1, -1, -1, -1, -1, -1, -1, -1,
401     0, 1, 4, 5, 8, 9, 12, 13 );
402     if (shift > 0)
403     {
404         s32 offset = (1 << (shift - 1));
405         __m256i offset_1 = _mm256_set1_epi32(offset);
406         for (i = 0; i < pixels; i += 8)
407         {
408             __m256i cur_q_matrix = _mm256_cvtepi16_epi32(_mm_loadu_si128((__m128i*)(q_matrix + i)));
409             __m256i coef_8_val_act = _mm256_cvtepi16_epi32(_mm_loadu_si128((__m128i*)(coef + i)));
410 
411             __m256i lev1 = _mm256_mullo_epi32(coef_8_val_act, cur_q_matrix);
412             __m256i lev2 = _mm256_add_epi32(lev1, offset_1);
413             __m256i lev3 = _mm256_srai_epi32(lev2, shift);
414 
415             lev3 = _mm256_shuffle_epi8( lev3, shuffle );
416             __m128i low = _mm256_castsi256_si128( lev3 );
417             __m128i high = _mm256_extracti128_si256( lev3, 1 );
418             __m128i lev4 = _mm_or_si128( low, high );
419 
420             __m128i lev5 = _mm_max_epi16(lev4, _mm_set1_epi16(-32768));
421             __m128i lev6 = _mm_min_epi16(lev5, _mm_set1_epi16(32767));
422 
423             _mm_storeu_si128((__m128i *)(coef + i), lev6);
424         }
425     }
426     else
427     {
428         int left_shift = -shift;
429         for (i = 0; i < pixels; i += 8)
430         {
431             __m256i cur_q_matrix = _mm256_cvtepi16_epi32(_mm_loadu_si128((__m128i*)(q_matrix + i)));
432             __m256i coef_8_val_act = _mm256_cvtepi16_epi32(_mm_loadu_si128((__m128i*)(coef + i)));
433 
434             __m256i lev1 = _mm256_mullo_epi32(coef_8_val_act, cur_q_matrix);
435             __m256i lev3 = _mm256_slli_epi32(lev1, left_shift);
436 
437             lev3 = _mm256_shuffle_epi8( lev3, shuffle );
438             __m128i low = _mm256_castsi256_si128( lev3 );
439             __m128i high = _mm256_extracti128_si256( lev3, 1 );
440             __m128i lev4 = _mm_or_si128( low, high );
441 
442             __m128i lev5 = _mm_max_epi16(lev4, _mm_set1_epi16(-32768));
443             __m128i lev6 = _mm_min_epi16(lev5, _mm_set1_epi16(32767));
444 
445             _mm_storeu_si128((__m128i *)(coef + i), lev6);
446         }
447     }
448 }
449 const oapv_fn_dquant_t oapv_tbl_fn_dquant_avx[2] =
450     {
451         oapv_dquant_avx,
452             NULL,
453 };
454 
oapv_adjust_itrans_avx(int * src,int * dst,int itrans_diff_idx,int diff_step,int shift)455 void oapv_adjust_itrans_avx(int* src, int* dst, int itrans_diff_idx, int diff_step, int shift)
456 {
457     __m256i v0 = _mm256_set1_epi32(diff_step);
458     __m256i v1 = _mm256_set1_epi32(1 << (shift - 1));
459     __m256i s0, s1;
460 
461     for (int j = 0; j < 64; j += 8) {
462         s0 = _mm256_loadu_si256((const __m256i*)(src + j));
463         s1 = _mm256_loadu_si256((const __m256i*)(oapv_itrans_diff[itrans_diff_idx] + j));
464         s1 = _mm256_mullo_epi32(s1, v0);
465         s1 = _mm256_add_epi32(s1, v1);
466         s1 = _mm256_srai_epi32(s1, shift);
467         s1 = _mm256_add_epi32(s0, s1);
468         _mm256_storeu_si256((__m256i*)(dst + j), s1);
469     }
470 }
471 
472 const oapv_fn_itx_adj_t oapv_tbl_fn_itx_adj_avx[2] =
473 {
474     oapv_adjust_itrans_avx,
475         NULL,
476 };