1 /******************************************************************************
2 *
3 * Copyright (C) 2022 The Android Open Source Project
4 *
5 * Licensed under the Apache License, Version 2.0 (the "License");
6 * you may not use this file except in compliance with the License.
7 * You may obtain a copy of the License at:
8 *
9 * http://www.apache.org/licenses/LICENSE-2.0
10 *
11 * Unless required by applicable law or agreed to in writing, software
12 * distributed under the License is distributed on an "AS IS" BASIS,
13 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
14 * See the License for the specific language governing permissions and
15 * limitations under the License.
16 *
17 *****************************************************************************
18 * Originally developed and contributed by Ittiam Systems Pvt. Ltd, Bangalore
19 */
20 /**
21 *******************************************************************************
22 * @file
23 * isvcd_iquant_itrans_residual_recon_sse42.c
24 *
25 * @brief
26 * Contains function definitions for iquant_itrans_residual_recon
27 *
28 * @author
29 * Kishore
30 *
31 * @par List of Functions:
32 * - isvcd_iquant_itrans_residual_recon_4x4_sse42()
33 * - isvcd_iquant_itrans_residual_recon_8x8_sse42()
34 * - isvcd_iquant_itrans_residual_recon_4x4_dc_sse42()
35 * - isvcd_iquant_itrans_residual_recon_8x8_dc_sse42()
36 * - isvcd_iquant_itrans_residual_recon_chroma_4x4_sse42()
37 * - isvcd_iquant_itrans_residual_recon_chroma_4x4_dc_sse42()
38 *
39 * @remarks
40 * None
41 *
42 *******************************************************************************
43 */
44 /* User include files */
45 #include <immintrin.h>
46 #include "ih264_typedefs.h"
47 #include "ih264_defs.h"
48 #include "ih264_trans_macros.h"
49 #include "ih264_macros.h"
50 #include "ih264_platform_macros.h"
51 #include "ih264_trans_data.h"
52 #include "ih264_size_defs.h"
53 #include "ih264_structs.h"
54
55 /*****************************************************************************/
56 /* */
57 /* Function Name : isvcd_iquant_itrans_residual_recon_4x4_sse42 */
58 /* */
59 /* Description : this function computes the recon output from the */
60 /* IQ+IT+RESD */
61 /* */
62 /* Inputs : */
63 /* Globals : none */
64 /* Processing : */
65 /* */
66 /* Outputs : i4_nnz */
67 /* Returns : none */
68 /* */
69 /* Issues : none */
70 /* */
71 /* Revision History: */
72 /* */
73 /* DD MM YYYY Author(s) Changes (Describe the changes made) */
74 /* 25 11 2021 Kishore creation */
75 /* */
76 /*****************************************************************************/
77
isvcd_iquant_itrans_residual_recon_4x4_sse42(WORD16 * pi2_src,UWORD8 * pu1_pred,WORD16 * pi2_rsd,UWORD8 * pu1_out,WORD32 pred_strd,WORD32 rsd_strd,WORD32 out_strd,const UWORD16 * pu2_iscal_mat,const UWORD16 * pu2_weigh_mat,UWORD32 u4_qp_div_6,WORD16 * pi2_tmp,WORD32 iq_start_idx,WORD16 * pi2_dc_ld_addr)78 WORD32 isvcd_iquant_itrans_residual_recon_4x4_sse42(
79 WORD16 *pi2_src, UWORD8 *pu1_pred, WORD16 *pi2_rsd, UWORD8 *pu1_out, WORD32 pred_strd,
80 WORD32 rsd_strd, WORD32 out_strd, const UWORD16 *pu2_iscal_mat, const UWORD16 *pu2_weigh_mat,
81 UWORD32 u4_qp_div_6, WORD16 *pi2_tmp, WORD32 iq_start_idx, WORD16 *pi2_dc_ld_addr)
82 {
83 WORD32 i4_nnz = 0;
84 WORD32 row_0, row_1, row_2, row_3;
85 UWORD32 *pu4_out = (UWORD32 *) pu1_out;
86 __m128i src_r0_r1, src_r2_r3;
87 __m128i src_r0, src_r1, src_r2, src_r3;
88 __m128i scalemat_r0_r1, scalemat_r2_r3;
89 __m128i pred_r0, pred_r1, pred_r2, pred_r3;
90 __m128i rsd_r0, rsd_r1, rsd_r2, rsd_r3;
91 __m128i sign_reg, dequant_r0_r1, dequant_r2_r3;
92 __m128i zero_8x16b = _mm_setzero_si128(); // all bits reset to zero
93 __m128i temp0, temp1, temp2, temp3, temp4, temp5, temp6, temp7;
94 __m128i resq_r0, resq_r1, resq_r2, resq_r3;
95 __m128i add_rshift = _mm_set1_epi32((u4_qp_div_6 < 4) ? (1 << (3 - u4_qp_div_6)) : 0);
96 __m128i value_32 = _mm_set1_epi32(32);
97 __m128i dupmax_8x16b = _mm_set1_epi16(RSD_MAX);
98 __m128i dupmin_8x16b = _mm_set1_epi16(RSD_MIN);
99
100 UNUSED(pi2_tmp);
101
102 /*************************************************************/
103 /* Dequantization of coefficients. Will be replaced by SIMD */
104 /* operations on platform */
105 /*************************************************************/
106 // a00 a01 a02 a03 a10 a11 a12 a13 -- the source matrix 0th,1st row
107 src_r0_r1 = _mm_loadu_si128((__m128i *) (pi2_src));
108 // a20 a21 a22 a23 a30 a31 a32 a33 --the source matrix 2nd,3rd row
109 src_r2_r3 = _mm_loadu_si128((__m128i *) (pi2_src + 8));
110 // b00 b01 b02 b03 b10 b11 b12 b13 -- the scaling matrix 0th,1st row
111 scalemat_r0_r1 = _mm_loadu_si128((__m128i *) (pu2_iscal_mat));
112 // b20 b21 b22 b23 b30 b31 b32 b33 -- the scaling matrix 2nd,3rd row
113 scalemat_r2_r3 = _mm_loadu_si128((__m128i *) (pu2_iscal_mat + 8));
114 // q00 q01 q02 q03 q10 q11 q12 q13 -- all 16 bits
115 dequant_r0_r1 = _mm_loadu_si128((__m128i *) (pu2_weigh_mat));
116 // q20 q21 q22 q23 q30 q31 q32 q33 -- all 16 bits
117 dequant_r2_r3 = _mm_loadu_si128((__m128i *) (pu2_weigh_mat + 8));
118 // b00*q00 b01*q01 b02*q02 b03*q03 b10*q10 b11*q11 b12*q12 b13*q13 -- 16 bit result
119 temp0 = _mm_mullo_epi16(scalemat_r0_r1, dequant_r0_r1);
120 // b00*q00 b01*q01 b02*q02 b03*q03 b10*q10 b11*q11 b12*q12 b13*q13 -- 16 bit result
121 temp1 = _mm_mullo_epi16(scalemat_r2_r3, dequant_r2_r3);
122 // b00*q00 0 b01*q01 0 b02*q02 0 b03*q03 0 -- 16 bit long
123 temp4 = _mm_unpacklo_epi16(temp0, zero_8x16b);
124 // b10*q10 0 b11*q11 0 b12*q12 0 b13*q13 0 -- 16 bit long
125 temp5 = _mm_unpackhi_epi16(temp0, zero_8x16b);
126 // b00*q00 0 b01*q01 0 b02*q02 0 b03*q03 0 -- 16 bit long
127 temp6 = _mm_unpacklo_epi16(temp1, zero_8x16b);
128 // b10*q10 0 b11*q11 0 b12*q12 0 b13*q13 0 -- 16 bit long
129 temp7 = _mm_unpackhi_epi16(temp1, zero_8x16b);
130 src_r0 = _mm_unpacklo_epi16(src_r0_r1, zero_8x16b); // a00 0 a01 0 a02 0 a03 0 -- 16 bit long
131 src_r1 = _mm_unpackhi_epi16(src_r0_r1, zero_8x16b); // a10 0 a11 0 a12 0 a13 0 -- 16 bit long
132 src_r2 = _mm_unpacklo_epi16(src_r2_r3, zero_8x16b); // a20 0 a21 0 a22 0 a23 0 -- 16 bit long
133 src_r3 = _mm_unpackhi_epi16(src_r2_r3, zero_8x16b); // a30 0 a31 0 a32 0 a33 0 -- 16 bit long
134 // a00*b00*q00 a10*b10*q10 a20*b20*q20 a30*b30 q30 -- 32 bits long
135 temp4 = _mm_madd_epi16(src_r0, temp4);
136 temp5 = _mm_madd_epi16(src_r1, temp5);
137 temp6 = _mm_madd_epi16(src_r2, temp6);
138 temp7 = _mm_madd_epi16(src_r3, temp7);
139
140 if(u4_qp_div_6 >= 4)
141 {
142 resq_r0 = _mm_slli_epi32(temp4, u4_qp_div_6 - 4);
143 resq_r1 = _mm_slli_epi32(temp5, u4_qp_div_6 - 4);
144 resq_r2 = _mm_slli_epi32(temp6, u4_qp_div_6 - 4);
145 resq_r3 = _mm_slli_epi32(temp7, u4_qp_div_6 - 4);
146 }
147 else
148 {
149 temp4 = _mm_add_epi32(temp4, add_rshift);
150 temp5 = _mm_add_epi32(temp5, add_rshift);
151 temp6 = _mm_add_epi32(temp6, add_rshift);
152 temp7 = _mm_add_epi32(temp7, add_rshift);
153 resq_r0 = _mm_srai_epi32(temp4, 4 - u4_qp_div_6);
154 resq_r1 = _mm_srai_epi32(temp5, 4 - u4_qp_div_6);
155 resq_r2 = _mm_srai_epi32(temp6, 4 - u4_qp_div_6);
156 resq_r3 = _mm_srai_epi32(temp7, 4 - u4_qp_div_6);
157 }
158
159 if(iq_start_idx == 1) resq_r0 = _mm_insert_epi32(resq_r0, (WORD32) pi2_dc_ld_addr[0], 0);
160 /* Perform Inverse transform */
161 /*-------------------------------------------------------------*/
162 /* IDCT [ Horizontal transformation ] */
163 /*-------------------------------------------------------------*/
164 // Matrix transpose
165 /*
166 * a0 a1 a2 a3
167 * b0 b1 b2 b3
168 * c0 c1 c2 c3
169 * d0 d1 d2 d3
170 */
171 temp1 = _mm_unpacklo_epi32(resq_r0, resq_r1); // a0 b0 a1 b1
172 temp3 = _mm_unpacklo_epi32(resq_r2, resq_r3); // c0 d0 c1 d1
173 temp2 = _mm_unpackhi_epi32(resq_r0, resq_r1); // a2 b2 a3 b3
174 temp4 = _mm_unpackhi_epi32(resq_r2, resq_r3); // c2 d2 c3 d3
175 resq_r0 = _mm_unpacklo_epi64(temp1, temp3); // a0 b0 c0 d0
176 resq_r1 = _mm_unpackhi_epi64(temp1, temp3); // a1 b1 c1 d1
177 resq_r2 = _mm_unpacklo_epi64(temp2, temp4); // a2 b2 c2 d2
178 resq_r3 = _mm_unpackhi_epi64(temp2, temp4); // a3 b3 c3 d3
179 // Transform starts -- horizontal transform
180 /*------------------------------------------------------------------*/
181 /* z0 = w0 + w2 */
182 temp0 = _mm_add_epi32(resq_r0, resq_r2);
183 /* z1 = w0 - w2 */
184 temp1 = _mm_sub_epi32(resq_r0, resq_r2);
185 /* z2 = (w1 >> 1) - w3 */
186 temp2 = _mm_srai_epi32(resq_r1, 1); //(w1>>1)
187 temp2 = _mm_sub_epi32(temp2, resq_r3); //(w1>>1) - w3
188 /* z3 = w1 + (w3 >> 1) */
189 temp3 = _mm_srai_epi32(resq_r3, 1); //(w3>>1) + w1
190 temp3 = _mm_add_epi32(temp3, resq_r1);
191 /*----------------------------------------------------------*/
192 /* x0 = z0 + z3 */
193 resq_r0 = _mm_add_epi32(temp0, temp3);
194 /* x1 = z1 + z2 */
195 resq_r1 = _mm_add_epi32(temp1, temp2);
196 /* x2 = z1 - z2 */
197 resq_r2 = _mm_sub_epi32(temp1, temp2);
198 /* x3 = z0 - z3 */
199 resq_r3 = _mm_sub_epi32(temp0, temp3);
200 // Matrix transpose
201 /*
202 * a0 b0 c0 d0
203 * a1 b1 c1 d1
204 * a2 b2 c2 d2
205 * a3 b3 c3 d3
206 */
207 temp1 = _mm_unpacklo_epi32(resq_r0, resq_r1); // a0 a1 b0 b1
208 temp3 = _mm_unpacklo_epi32(resq_r2, resq_r3); // a2 a3 b2 b3
209 temp2 = _mm_unpackhi_epi32(resq_r0, resq_r1); // c0 c1 d0 d1
210 temp4 = _mm_unpackhi_epi32(resq_r2, resq_r3); // c2 c3 d2 d3
211 resq_r0 = _mm_unpacklo_epi64(temp1, temp3); // a0 a1 a2 a3
212 resq_r1 = _mm_unpackhi_epi64(temp1, temp3); // b0 b1 b2 b3
213 resq_r2 = _mm_unpacklo_epi64(temp2, temp4); // c0 c1 c2 c3
214 resq_r3 = _mm_unpackhi_epi64(temp2, temp4); // d0 d1 d2 d3
215 // Transform ends -- horizontal transform
216
217 // Load pred buffer
218 // p00 p01 p02 p03 0 0 0 0 0 0 0 0 -- all 8 bits
219 pred_r0 = _mm_loadl_epi64((__m128i *) (&pu1_pred[0]));
220 // p10 p11 p12 p13 0 0 0 0 0 0 0 0 -- all 8 bits
221 pred_r1 = _mm_loadl_epi64((__m128i *) (&pu1_pred[pred_strd]));
222 // p20 p21 p22 p23 0 0 0 0 0 0 0 0 -- all 8 bits
223 pred_r2 = _mm_loadl_epi64((__m128i *) (&pu1_pred[2 * pred_strd]));
224 // p30 p31 p32 p33 0 0 0 0 0 0 0 0 -- all 8 bits
225 pred_r3 = _mm_loadl_epi64((__m128i *) (&pu1_pred[3 * pred_strd]));
226
227 pred_r0 = _mm_cvtepu8_epi32(pred_r0); // p00 p01 p02 p03 -- all 32 bits
228 pred_r1 = _mm_cvtepu8_epi32(pred_r1); // p10 p11 p12 p13 -- all 32 bits
229 pred_r2 = _mm_cvtepu8_epi32(pred_r2); // p20 p21 p22 p23 -- all 32 bits
230 pred_r3 = _mm_cvtepu8_epi32(pred_r3); // p30 p31 p32 p33 -- all 32 bits
231
232 // Load resd buffer
233 rsd_r0 = _mm_loadl_epi64((__m128i *) (&pi2_rsd[0]));
234 rsd_r1 = _mm_loadl_epi64((__m128i *) (&pi2_rsd[rsd_strd]));
235 rsd_r2 = _mm_loadl_epi64((__m128i *) (&pi2_rsd[2 * rsd_strd]));
236 rsd_r3 = _mm_loadl_epi64((__m128i *) (&pi2_rsd[3 * rsd_strd]));
237
238 rsd_r0 = _mm_cvtepi16_epi32(rsd_r0);
239 rsd_r1 = _mm_cvtepi16_epi32(rsd_r1);
240 rsd_r2 = _mm_cvtepi16_epi32(rsd_r2);
241 rsd_r3 = _mm_cvtepi16_epi32(rsd_r3);
242
243 /*--------------------------------------------------------------*/
244 /* IDCT [ Vertical transformation] and Xij = (xij + 32)>>6 */
245 /* */
246 /* Add the prediction and store it back to same buffer */
247 /*--------------------------------------------------------------*/
248 /* z0j = y0j + y2j */
249 temp0 = _mm_add_epi32(resq_r0, resq_r2);
250 /* z1j = y0j - y2j */
251 temp1 = _mm_sub_epi32(resq_r0, resq_r2);
252 /* z2j = (y1j>>1) - y3j */
253 temp2 = _mm_srai_epi32(resq_r1, 1); //(y1j>>1)
254 temp2 = _mm_sub_epi32(temp2, resq_r3);
255 /* z3j = y1j + (y3j>>1) */
256 temp3 = _mm_srai_epi32(resq_r3, 1); //(y3j>>1)
257 temp3 = _mm_add_epi32(temp3, resq_r1);
258
259 /* x0j = z0j + z3j */
260 temp4 = _mm_add_epi32(temp0, temp3);
261 temp4 = _mm_add_epi32(temp4, value_32);
262 temp4 = _mm_srai_epi32(temp4, 6);
263
264 row_0 = _mm_test_all_ones(_mm_cmpeq_epi32(temp4, zero_8x16b)); // return 1 if all zeros, else 0
265 temp4 = _mm_add_epi32(temp4, rsd_r0);
266 temp4 = _mm_min_epi16(dupmax_8x16b, temp4);
267 temp4 = _mm_max_epi16(dupmin_8x16b, temp4);
268 temp4 = _mm_add_epi32(temp4, pred_r0);
269 /* x1j = z1j + z2j */
270 temp5 = _mm_add_epi32(temp1, temp2);
271 temp5 = _mm_add_epi32(temp5, value_32);
272 temp5 = _mm_srai_epi32(temp5, 6);
273
274 row_1 = _mm_test_all_ones(_mm_cmpeq_epi32(temp5, zero_8x16b)); // return 1 if all zeros, else 0
275 temp5 = _mm_add_epi32(temp5, rsd_r1);
276 temp5 = _mm_min_epi16(dupmax_8x16b, temp5);
277 temp5 = _mm_max_epi16(dupmin_8x16b, temp5);
278 temp5 = _mm_add_epi32(temp5, pred_r1);
279 /* x2j = z1j - z2j */
280 temp6 = _mm_sub_epi32(temp1, temp2);
281 temp6 = _mm_add_epi32(temp6, value_32);
282 temp6 = _mm_srai_epi32(temp6, 6);
283
284 row_2 = _mm_test_all_ones(_mm_cmpeq_epi32(temp6, zero_8x16b)); // return 1 if all zeros, else 0
285 temp6 = _mm_add_epi32(temp6, rsd_r2);
286 temp6 = _mm_min_epi16(dupmax_8x16b, temp6);
287 temp6 = _mm_max_epi16(dupmin_8x16b, temp6);
288 temp6 = _mm_add_epi32(temp6, pred_r2);
289 /* x3j = z0j - z3j */
290 temp7 = _mm_sub_epi32(temp0, temp3);
291 temp7 = _mm_add_epi32(temp7, value_32);
292 temp7 = _mm_srai_epi32(temp7, 6);
293
294 row_3 = _mm_test_all_ones(_mm_cmpeq_epi32(temp7, zero_8x16b)); // return 1 if all zeros, else 0
295 temp7 = _mm_add_epi32(temp7, rsd_r3);
296 temp7 = _mm_min_epi16(dupmax_8x16b, temp7);
297 temp7 = _mm_max_epi16(dupmin_8x16b, temp7);
298 temp7 = _mm_add_epi32(temp7, pred_r3);
299
300 // 32-bit to 16-bit conversion
301 temp0 = _mm_packs_epi32(temp4, temp5);
302 temp1 = _mm_packs_epi32(temp6, temp7);
303 /*------------------------------------------------------------------*/
304 // Clipping the results to 8 bits
305 sign_reg = _mm_cmpgt_epi16(temp0, zero_8x16b); // sign check
306 temp0 = _mm_and_si128(temp0, sign_reg);
307 sign_reg = _mm_cmpgt_epi16(temp1, zero_8x16b);
308 temp1 = _mm_and_si128(temp1, sign_reg);
309
310 resq_r0 = _mm_packus_epi16(temp0, temp1);
311 resq_r1 = _mm_srli_si128(resq_r0, 4);
312 resq_r2 = _mm_srli_si128(resq_r1, 4);
313 resq_r3 = _mm_srli_si128(resq_r2, 4);
314
315 *pu4_out = _mm_cvtsi128_si32(resq_r0);
316 pu1_out += out_strd;
317 pu4_out = (UWORD32 *) (pu1_out);
318 *(pu4_out) = _mm_cvtsi128_si32(resq_r1);
319 pu1_out += out_strd;
320 pu4_out = (UWORD32 *) (pu1_out);
321 *(pu4_out) = _mm_cvtsi128_si32(resq_r2);
322 pu1_out += out_strd;
323 pu4_out = (UWORD32 *) (pu1_out);
324 *(pu4_out) = _mm_cvtsi128_si32(resq_r3);
325
326 i4_nnz = !(row_0 && row_1 && row_2 && row_3);
327 return i4_nnz;
328 }
329
330 /*****************************************************************************/
331 /* */
332 /* Function Name : isvcd_iquant_itrans_residual_recon_8x8_sse42 */
333 /* */
334 /* Description : this function computes the recon output from the */
335 /* IQ+IT+RESD */
336 /* */
337 /* Inputs : */
338 /* Globals : none */
339 /* Processing : */
340 /* */
341 /* Outputs : i4_nnz */
342 /* Returns : none */
343 /* */
344 /* Issues : none */
345 /* */
346 /* Revision History: */
347 /* */
348 /* DD MM YYYY Author(s) Changes (Describe the changes made) */
349 /* 25 11 2021 Kishore creation */
350 /* */
351 /*****************************************************************************/
352
isvcd_iquant_itrans_residual_recon_8x8_sse42(WORD16 * pi2_src,UWORD8 * pu1_pred,WORD16 * pi2_rsd,UWORD8 * pu1_out,WORD32 pred_strd,WORD32 rsd_strd,WORD32 out_strd,const UWORD16 * pu2_iscale_mat,const UWORD16 * pu2_weigh_mat,UWORD32 qp_div,WORD16 * pi2_tmp,WORD32 iq_start_idx,WORD16 * pi2_dc_ld_addr)353 WORD32 isvcd_iquant_itrans_residual_recon_8x8_sse42(
354 WORD16 *pi2_src, UWORD8 *pu1_pred, WORD16 *pi2_rsd, UWORD8 *pu1_out, WORD32 pred_strd,
355 WORD32 rsd_strd, WORD32 out_strd, const UWORD16 *pu2_iscale_mat, const UWORD16 *pu2_weigh_mat,
356 UWORD32 qp_div, WORD16 *pi2_tmp, WORD32 iq_start_idx, WORD16 *pi2_dc_ld_addr)
357 {
358 __m128i rsd_r01_b0, rsd_r23_b0, rsd_r45_b2, rsd_r67_b2;
359 __m128i rsd_r01_b1, rsd_r23_b1, rsd_r45_b3, rsd_r67_b3;
360
361 WORD32 row_01_b0, row_23_b0, row_45_b2, row_67_b2;
362 WORD32 row_01_b1, row_23_b1, row_45_b3, row_67_b3;
363 WORD32 i4_nnz, i4_nnz_b0, i4_nnz_b1, i4_nnz_b2, i4_nnz_b3;
364 __m128i src_r0;
365 __m128i scalemat_r0;
366 __m128i zero_8x16b = _mm_setzero_si128(); // all bits reset to zero
367 __m128i value_32 = _mm_set1_epi32(32);
368 __m128i add_rshift = _mm_set1_epi32((qp_div < 6) ? (1 << (5 - qp_div)) : 0);
369 __m128i dequant_r0;
370 __m128i predload_r;
371 __m128i pred_r0_1, pred_r1_1, pred_r2_1, pred_r3_1, pred_r4_1, pred_r5_1, pred_r6_1, pred_r7_1;
372
373 __m128i rsd_r0, rsd_r1, rsd_r2, rsd_r3, rsd_r4, rsd_r5, rsd_r6, rsd_r7;
374 __m128i sign_reg;
375 __m128i src_r0_1, src_r0_2;
376 __m128i scalemat_r0_1, scalemat_r0_2;
377 __m128i temp1, temp2, temp3, temp4, temp5, temp6, temp7, temp8;
378 __m128i temp10, temp11, temp12, temp13, temp14, temp15, temp16, temp17, temp18, temp19, temp20;
379 // To store dequantization results
380 __m128i resq_r0_1, resq_r0_2, resq_r1_1, resq_r1_2, resq_r2_1, resq_r2_2, resq_r3_1, resq_r3_2,
381 resq_r4_1, resq_r4_2, resq_r5_1, resq_r5_2, resq_r6_1, resq_r6_2, resq_r7_1, resq_r7_2;
382 __m128i dupmax_8x16b = _mm_set1_epi16(RSD_MAX);
383 __m128i dupmin_8x16b = _mm_set1_epi16(RSD_MIN);
384
385 UNUSED(pi2_tmp);
386 UNUSED(iq_start_idx);
387 UNUSED(pi2_dc_ld_addr);
388
389 /*************************************************************/
390 /* Dequantization of coefficients. Will be replaced by SIMD */
391 /* operations on platform. Note : DC coeff is not scaled */
392 /*************************************************************/
393
394 // Row 0 processing
395 // a00 a01 a02 a03 a04 a05 a06 a07 -- the source matrix 0th row
396 src_r0 = _mm_loadu_si128((__m128i *) (pi2_src));
397 // b00 b01 b02 b03 b04 b05 b06 b07 -- the scaling matrix 0th row
398 scalemat_r0 = _mm_loadu_si128((__m128i *) (pu2_iscale_mat));
399 // q0 q1 q2 q3 q4 q5 q6 q7 -- all 16 bits
400 dequant_r0 = _mm_loadu_si128((__m128i *) (&pu2_weigh_mat[0]));
401 src_r0_1 = _mm_unpacklo_epi16(src_r0, zero_8x16b); // a00 0 a01 0 a02 0 a03 0 -- 16 bit long
402 src_r0_2 = _mm_unpackhi_epi16(src_r0, zero_8x16b); // a04 0 a05 0 a06 0 a07 0 -- 16 bit long
403 // b00*q0 b01*q1 b02*q2 b03*q3 b04*q4 b05*q5 b06*q6 b07*q7 -- 16 bit result
404 temp10 = _mm_mullo_epi16(scalemat_r0, dequant_r0);
405 // b00*q0 0 b01*q1 0 b02*q2 0 b03*q3 0 -- 16 bit long
406 scalemat_r0_1 = _mm_unpacklo_epi16(temp10, zero_8x16b);
407 // b04*q4 0 b05*q5 0 b06*q6 0 b07*q7 0 -- 16 bit long
408 scalemat_r0_2 = _mm_unpackhi_epi16(temp10, zero_8x16b);
409 // a00*b00*q0 a01*b01*q1 a02*b02*q2 a03*b03*q3 -- 32 bits long
410 temp5 = _mm_madd_epi16(src_r0_1, scalemat_r0_1);
411 // a04*b04*q4 a05*b05*q5 a06*b06*q6 a07*b07*q7 -- 32 bits long
412 temp7 = _mm_madd_epi16(src_r0_2, scalemat_r0_2);
413
414 if(qp_div >= 6)
415 {
416 resq_r0_1 = _mm_slli_epi32(temp5, qp_div - 6);
417 resq_r0_2 = _mm_slli_epi32(temp7, qp_div - 6);
418 }
419 else
420 {
421 temp5 = _mm_add_epi32(temp5, add_rshift);
422 temp7 = _mm_add_epi32(temp7, add_rshift);
423 resq_r0_1 = _mm_srai_epi32(temp5, 6 - qp_div);
424 resq_r0_2 = _mm_srai_epi32(temp7, 6 - qp_div);
425 }
426 // a00*b00*q0 a01*b01*q1 a02*b02*q2 a03*b03*q3 a04*b04*q4 a05*b05*q5 a06*b06*q6 a07*b07*q7 -- 16
427 // bit long
428 resq_r0_1 = _mm_packs_epi32(resq_r0_1, resq_r0_2);
429 // Row 1 processing
430 // a00 a01 a02 a03 a04 a05 a06 a07 a08 --the source matrix 1st row
431 src_r0 = _mm_loadu_si128((__m128i *) (pi2_src + 8));
432 // b00 b01 b02 b03 b04 b05 b06 b07 b08 -- the scaling matrix 1st row
433 scalemat_r0 = _mm_loadu_si128((__m128i *) (pu2_iscale_mat + 8));
434 // q0 q1 q2 q3 q4 q5 q6 q7 -- all 16 bits
435 dequant_r0 = _mm_loadu_si128((__m128i *) (&pu2_weigh_mat[8]));
436 // a00 0 a01 0 a02 0 a03 0 -- 16 bit long
437 src_r0_1 = _mm_unpacklo_epi16(src_r0, zero_8x16b);
438 // a04 0 a05 0 a06 0 a07 0 -- 16 bit long
439 src_r0_2 = _mm_unpackhi_epi16(src_r0, zero_8x16b);
440 // b00*q0 b01*q1 b02*q2 b03*q3 b04*q4 b05*q5 b06*q6 b07*q7 -- 16 bit result
441 temp10 = _mm_mullo_epi16(scalemat_r0, dequant_r0);
442 // b00*q0 0 b01*q1 0 b02*q2 0 b03*q3 0 -- 16 bit long
443 scalemat_r0_1 = _mm_unpacklo_epi16(temp10, zero_8x16b);
444 // b04*q4 0 b05*q5 0 b06*q6 0 b07*q7 0 -- 16 bit long
445 scalemat_r0_2 = _mm_unpackhi_epi16(temp10, zero_8x16b);
446 // a00*b00*q0 a01*b01*q1 a02*b02*q2 a03*b03*q3 -- 32 bits long
447 temp5 = _mm_madd_epi16(src_r0_1, scalemat_r0_1);
448 // a04*b04*q4 a05*b05*q5 a06*b06*q6 a07*b07*q7 -- 32 bits long
449 temp7 = _mm_madd_epi16(src_r0_2, scalemat_r0_2);
450 if(qp_div >= 6)
451 {
452 resq_r1_1 = _mm_slli_epi32(temp5, qp_div - 6);
453 resq_r1_2 = _mm_slli_epi32(temp7, qp_div - 6);
454 }
455 else
456 {
457 temp5 = _mm_add_epi32(temp5, add_rshift);
458 temp7 = _mm_add_epi32(temp7, add_rshift);
459 resq_r1_1 = _mm_srai_epi32(temp5, 6 - qp_div);
460 resq_r1_2 = _mm_srai_epi32(temp7, 6 - qp_div);
461 }
462 // a00*b00*q0 a01*b01*q1 a02*b02*q2 a03*b03*q3 a04*b04*q4 a05*b05*q5 a06*b06*q6 a07*b07*q7 -- 16
463 // bit long
464 resq_r1_1 = _mm_packs_epi32(resq_r1_1, resq_r1_2);
465 // Row 2 processing
466 // a00 a01 a02 a03 a04 a05 a06 a07 a08 --the source matrix 2nd row
467 src_r0 = _mm_loadu_si128((__m128i *) (pi2_src + 16));
468 // b00 b01 b02 b03 b04 b05 b06 b07 b08 -- the scaling matrix 2nd row
469 scalemat_r0 = _mm_loadu_si128((__m128i *) (pu2_iscale_mat + 16));
470 // q0 q1 q2 q3 q4 q5 q6 q7 -- all 16 bits
471 dequant_r0 = _mm_loadu_si128((__m128i *) (&pu2_weigh_mat[16]));
472 src_r0_1 = _mm_unpacklo_epi16(src_r0, zero_8x16b); // a00 0 a01 0 a02 0 a03 0 -- 16 bit long
473 src_r0_2 = _mm_unpackhi_epi16(src_r0, zero_8x16b); // a04 0 a05 0 a06 0 a07 0 -- 16 bit long
474 // b00*q0 b01*q1 b02*q2 b03*q3 b04*q4 b05*q5 b06*q6 b07*q7 -- 16 bit result
475 temp10 = _mm_mullo_epi16(scalemat_r0, dequant_r0);
476 // b00*q0 0 b01*q1 0 b02*q2 0 b03*q3 0 -- 16 bit long
477 scalemat_r0_1 = _mm_unpacklo_epi16(temp10, zero_8x16b);
478 // b04*q4 0 b05*q5 0 b06*q6 0 b07*q7 0 -- 16 bit long
479 scalemat_r0_2 = _mm_unpackhi_epi16(temp10, zero_8x16b);
480 // a00*b00*q0 a01*b01*q1 a02*b02*q2 a03*b03*q3 -- 32 bits long
481 temp5 = _mm_madd_epi16(src_r0_1, scalemat_r0_1);
482 // a04*b04*q4 a05*b05*q5 a06*b06*q6 a07*b07*q7 -- 32 bits long
483 temp7 = _mm_madd_epi16(src_r0_2, scalemat_r0_2);
484 if(qp_div >= 6)
485 {
486 resq_r2_1 = _mm_slli_epi32(temp5, qp_div - 6);
487 resq_r2_2 = _mm_slli_epi32(temp7, qp_div - 6);
488 }
489 else
490 {
491 temp5 = _mm_add_epi32(temp5, add_rshift);
492 temp7 = _mm_add_epi32(temp7, add_rshift);
493 resq_r2_1 = _mm_srai_epi32(temp5, 6 - qp_div);
494 resq_r2_2 = _mm_srai_epi32(temp7, 6 - qp_div);
495 }
496 // a00*b00*q0 a01*b01*q1 a02*b02*q2 a03*b03*q3 a04*b04*q4 a05*b05*q5 -- 16 bit long
497 resq_r2_1 = _mm_packs_epi32(resq_r2_1, resq_r2_2);
498 // Row 3 processing
499 // a00 a01 a02 a03 a04 a05 a06 a07 a08 --the source matrix 3rd row
500 src_r0 = _mm_loadu_si128((__m128i *) (pi2_src + 24));
501 // b00 b01 b02 b03 b04 b05 b06 b07 b08 -- the scaling matrix 3rd row
502 scalemat_r0 = _mm_loadu_si128((__m128i *) (pu2_iscale_mat + 24));
503 // q0 q1 q2 q3 q4 q5 q6 q7 -- all 16 bits
504 dequant_r0 = _mm_loadu_si128((__m128i *) (&pu2_weigh_mat[24]));
505 src_r0_1 = _mm_unpacklo_epi16(src_r0, zero_8x16b); // a00 0 a01 0 a02 0 a03 0 -- 16 bit long
506 src_r0_2 = _mm_unpackhi_epi16(src_r0, zero_8x16b); // a04 0 a05 0 a06 0 a07 0 -- 16 bit long
507 // b00*q0 b01*q1 b02*q2 b03*q3 b04*q4 b05*q5 b06*q6 b07*q7 -- 16 bit result
508 temp10 = _mm_mullo_epi16(scalemat_r0, dequant_r0);
509 // b00*q0 0 b01*q1 0 b02*q2 0 b03*q3 0 -- 16 bit long
510 scalemat_r0_1 = _mm_unpacklo_epi16(temp10, zero_8x16b);
511 // b04*q4 0 b05*q5 0 b06*q6 0 b07*q7 0 -- 16 bit long
512 scalemat_r0_2 = _mm_unpackhi_epi16(temp10, zero_8x16b);
513 // a00*b00*q0 a01*b01*q1 a02*b02*q2 a03*b03*q3 - 32 bits long
514 temp5 = _mm_madd_epi16(src_r0_1, scalemat_r0_1);
515 // a04*b04*q4 a05*b05*q5 a06*b06*q6 a07*b07*q7 -- 32 bits long
516 temp7 = _mm_madd_epi16(src_r0_2, scalemat_r0_2);
517 if(qp_div >= 6)
518 {
519 resq_r3_1 = _mm_slli_epi32(temp5, qp_div - 6);
520 resq_r3_2 = _mm_slli_epi32(temp7, qp_div - 6);
521 }
522 else
523 {
524 temp5 = _mm_add_epi32(temp5, add_rshift);
525 temp7 = _mm_add_epi32(temp7, add_rshift);
526 resq_r3_1 = _mm_srai_epi32(temp5, 6 - qp_div);
527 resq_r3_2 = _mm_srai_epi32(temp7, 6 - qp_div);
528 }
529 // a00*b00*q0 a01*b01*q1 a02*b02*q2 a03*b03*q3 a04*b04*q4 a05*b05*q5 .... -- 16 bit long
530 resq_r3_1 = _mm_packs_epi32(resq_r3_1, resq_r3_2);
531 // Row 4 processing
532 // a00 a01 a02 a03 a04 a05 a06 a07 a08 --the source matrix 4th row
533 src_r0 = _mm_loadu_si128((__m128i *) (pi2_src + 32));
534 // b00 b01 b02 b03 b04 b05 b06 b07 b08 -- the scaling matrix 4th row
535 scalemat_r0 = _mm_loadu_si128((__m128i *) (pu2_iscale_mat + 32));
536 // q0 q1 q2 q3 q4 q5 q6 q7 -- all 16 bits
537 dequant_r0 = _mm_loadu_si128((__m128i *) (&pu2_weigh_mat[32]));
538 src_r0_1 = _mm_unpacklo_epi16(src_r0, zero_8x16b); // a00 0 a01 0 a02 0 a03 0 -- 16 bit long
539 src_r0_2 = _mm_unpackhi_epi16(src_r0, zero_8x16b); // a04 0 a05 0 a06 0 a07 0 -- 16 bit long
540 // b00*q0 b01*q1 b02*q2 b03*q3 b04*q4 b05*q5 b06*q6 b07*q7 -- 16 bit result
541 temp10 = _mm_mullo_epi16(scalemat_r0, dequant_r0);
542 // b00*q0 0 b01*q1 0 b02*q2 0 b03*q3 0 -- 16 bit long
543 scalemat_r0_1 = _mm_unpacklo_epi16(temp10, zero_8x16b);
544 // b04*q4 0 b05*q5 0 b06*q6 0 b07*q7 0 -- 16 bit long
545 scalemat_r0_2 = _mm_unpackhi_epi16(temp10, zero_8x16b);
546 // a00*b00*q0 a01*b01*q1 a02*b02*q2 a03*b03*q3 -- 32 bits long
547 temp5 = _mm_madd_epi16(src_r0_1, scalemat_r0_1);
548 // a04*b04*q4 a05*b05*q5 a06*b06*q6 a07*b07*q7 -- 32 bits long
549 temp7 = _mm_madd_epi16(src_r0_2, scalemat_r0_2);
550 if(qp_div >= 6)
551 {
552 resq_r4_1 = _mm_slli_epi32(temp5, qp_div - 6);
553 resq_r4_2 = _mm_slli_epi32(temp7, qp_div - 6);
554 }
555 else
556 {
557 temp5 = _mm_add_epi32(temp5, add_rshift);
558 temp7 = _mm_add_epi32(temp7, add_rshift);
559 resq_r4_1 = _mm_srai_epi32(temp5, 6 - qp_div);
560 resq_r4_2 = _mm_srai_epi32(temp7, 6 - qp_div);
561 }
562 // a00*b00*q0 a01*b01*q1 a02*b02*q2 a03*b03*q3 a04*b04*q4 a05*b05*q5 -- 16 bit long
563 resq_r4_1 = _mm_packs_epi32(resq_r4_1, resq_r4_2);
564 // Row 5 processing
565 // a00 a01 a02 a03 a04 a05 a06 a07 a08 --the source matrix 5th row
566 src_r0 = _mm_loadu_si128((__m128i *) (pi2_src + 40));
567 //
568 // b00 b01 b02 b03 b04 b05 b06 b07 b08 -- the scaling matrix 5th row
569 scalemat_r0 = _mm_loadu_si128((__m128i *) (pu2_iscale_mat + 40));
570 // q0 q1 q2 q3 q4 q5 q6 q7 -- all 16 bits
571 dequant_r0 = _mm_loadu_si128((__m128i *) (&pu2_weigh_mat[40]));
572 src_r0_1 = _mm_unpacklo_epi16(src_r0, zero_8x16b); // a00 0 a01 0 a02 0 a03 0 -- 16 bit long
573 src_r0_2 = _mm_unpackhi_epi16(src_r0, zero_8x16b); // a04 0 a05 0 a06 0 a07 0 -- 16 bit long
574 // b00*q0 b01*q1 b02*q2 b03*q3 b04*q4 b05*q5 b06*q6 b07*q7 -- 16 bit result
575 temp10 = _mm_mullo_epi16(scalemat_r0, dequant_r0);
576 // b00*q0 0 b01*q1 0 b02*q2 0 b03*q3 0 -- 16 bit long
577 scalemat_r0_1 = _mm_unpacklo_epi16(temp10, zero_8x16b);
578 // b04*q4 0 b05*q5 0 b06*q6 0 b07*q7 0 -- 16 bit long
579 scalemat_r0_2 = _mm_unpackhi_epi16(temp10, zero_8x16b);
580 // a00*b00*q0 a01*b01*q1 a02*b02*q2 a03*b03*q3 -- 32 bits long
581 temp5 = _mm_madd_epi16(src_r0_1, scalemat_r0_1);
582 // a04*b04*q4 a05*b05*q5 a06*b06*q6 a07*b07*q7 -- 32 bits long
583 temp7 = _mm_madd_epi16(src_r0_2, scalemat_r0_2);
584 if(qp_div >= 6)
585 {
586 resq_r5_1 = _mm_slli_epi32(temp5, qp_div - 6);
587 resq_r5_2 = _mm_slli_epi32(temp7, qp_div - 6);
588 }
589 else
590 {
591 temp5 = _mm_add_epi32(temp5, add_rshift);
592 temp7 = _mm_add_epi32(temp7, add_rshift);
593 resq_r5_1 = _mm_srai_epi32(temp5, 6 - qp_div);
594 resq_r5_2 = _mm_srai_epi32(temp7, 6 - qp_div);
595 }
596 // a00*b00*q0 a01*b01*q1 a02*b02*q2 a03*b03*q3 a04*b04*q4 a05*b05*q5 a06*b06*q6 -- 16 bit long
597 resq_r5_1 = _mm_packs_epi32(resq_r5_1, resq_r5_2);
598 // Row 6 processing
599 // a00 a01 a02 a03 a04 a05 a06 a07 a08 --the source matrix 6th row
600 src_r0 = _mm_loadu_si128((__m128i *) (pi2_src + 48));
601 // b00 b01 b02 b03 b04 b05 b06 b07 b08 -- the scaling matrix 6th row
602 scalemat_r0 = _mm_loadu_si128((__m128i *) (pu2_iscale_mat + 48));
603 // q0 q1 q2 q3 q4 q5 q6 q7 -- all 16 bits
604 dequant_r0 = _mm_loadu_si128((__m128i *) (&pu2_weigh_mat[48]));
605 src_r0_1 = _mm_unpacklo_epi16(src_r0, zero_8x16b); // a00 0 a01 0 a02 0 a03 0 -- 16 bit long
606 src_r0_2 = _mm_unpackhi_epi16(src_r0, zero_8x16b); // a04 0 a05 0 a06 0 a07 0 -- 16 bit long
607 // b00*q0 b01*q1 b02*q2 b03*q3 b04*q4 b05*q5 b06*q6 b07*q7 -- 16 bit result
608 temp10 = _mm_mullo_epi16(scalemat_r0, dequant_r0);
609 // b00*q0 0 b01*q1 0 b02*q2 0 b03*q3 0 -- 16 bit long
610 scalemat_r0_1 = _mm_unpacklo_epi16(temp10, zero_8x16b);
611 // b04*q4 0 b05*q5 0 b06*q6 0 b07*q7 0 -- 16 bit long
612 scalemat_r0_2 = _mm_unpackhi_epi16(temp10, zero_8x16b);
613 // a00*b00*q0 a01*b01*q1 a02*b02*q2 a03*b03*q3 -- 32 bits long
614 temp5 = _mm_madd_epi16(src_r0_1, scalemat_r0_1);
615 // a04*b04*q4 a05*b05*q5 a06*b06*q6 a07*b07*q7 -- 32 bits long
616 temp7 = _mm_madd_epi16(src_r0_2, scalemat_r0_2);
617 if(qp_div >= 6)
618 {
619 resq_r6_1 = _mm_slli_epi32(temp5, qp_div - 6);
620 resq_r6_2 = _mm_slli_epi32(temp7, qp_div - 6);
621 }
622 else
623 {
624 temp5 = _mm_add_epi32(temp5, add_rshift);
625 temp7 = _mm_add_epi32(temp7, add_rshift);
626 resq_r6_1 = _mm_srai_epi32(temp5, 6 - qp_div);
627 resq_r6_2 = _mm_srai_epi32(temp7, 6 - qp_div);
628 }
629 // a00*b00*q0 a01*b01*q1 a02*b02*q2 a03*b03*q3 a04*b04*q4 a05*b05*q5 -- 16 bit long
630 resq_r6_1 = _mm_packs_epi32(resq_r6_1, resq_r6_2);
631 // Row 7 processing
632 // a00 a01 a02 a03 a04 a05 a06 a07 a08 --the source matrix 7th row
633 src_r0 = _mm_loadu_si128((__m128i *) (pi2_src + 56));
634 // b00 b01 b02 b03 b04 b05 b06 b07 b08 -- the scaling matrix 7th row
635 scalemat_r0 = _mm_loadu_si128((__m128i *) (pu2_iscale_mat + 56));
636 // q0 q1 q2 q3 q4 q5 q6 q7 -- all 16 bits
637 dequant_r0 = _mm_loadu_si128((__m128i *) (&pu2_weigh_mat[56]));
638 src_r0_1 = _mm_unpacklo_epi16(src_r0, zero_8x16b); // a00 0 a01 0 a02 0 a03 0 -- 16 bit long
639 src_r0_2 = _mm_unpackhi_epi16(src_r0, zero_8x16b); // a04 0 a05 0 a06 0 a07 0 -- 16 bit long
640 // b00*q0 b01*q1 b02*q2 b03*q3 b04*q4 b05*q5 b06*q6 b07*q7 -- 16 bit result
641 temp10 = _mm_mullo_epi16(scalemat_r0, dequant_r0);
642 // b00*q0 0 b01*q1 0 b02*q2 0 b03*q3 0 -- 16 bit long
643 scalemat_r0_1 = _mm_unpacklo_epi16(temp10, zero_8x16b);
644 // b04*q4 0 b05*q5 0 b06*q6 0 b07*q7 0 -- 16 bit long
645 scalemat_r0_2 = _mm_unpackhi_epi16(temp10, zero_8x16b);
646 // a00*b00*q0 a01*b01*q1 a02*b02*q2 a03*b03*q3 -- 32 bits long
647 temp5 = _mm_madd_epi16(src_r0_1, scalemat_r0_1);
648 // a04*b04*q4 a05*b05*q5 a06*b06*q6 a07*b07*q7 -- 32 bits long
649 temp7 = _mm_madd_epi16(src_r0_2, scalemat_r0_2);
650 if(qp_div >= 6)
651 {
652 resq_r7_1 = _mm_slli_epi32(temp5, qp_div - 6);
653 resq_r7_2 = _mm_slli_epi32(temp7, qp_div - 6);
654 }
655 else
656 {
657 temp5 = _mm_add_epi32(temp5, add_rshift);
658 temp7 = _mm_add_epi32(temp7, add_rshift);
659 resq_r7_1 = _mm_srai_epi32(temp5, 6 - qp_div);
660 resq_r7_2 = _mm_srai_epi32(temp7, 6 - qp_div);
661 }
662 // a00*b00*q0 a01*b01*q1 a02*b02*q2 a03*b03*q3 a04*b04*q4 a05*b05*q5 -- 16 bit long
663 resq_r7_1 = _mm_packs_epi32(resq_r7_1, resq_r7_2);
664
665 /* Perform Inverse transform */
666 /*--------------------------------------------------------------------*/
667 /* IDCT [ Horizontal transformation ] */
668 /*--------------------------------------------------------------------*/
669 // Matrix transpose
670 /*
671 * a0 a1 a2 a3 a4 a5 a6 a7
672 * b0 b1 b2 b3 b4 b5 b6 b7
673 * c0 c1 c2 c3 c4 c5 c6 c7
674 * d0 d1 d2 d3 d4 d5 d6 d7
675 */
676 temp1 = _mm_unpacklo_epi16(resq_r0_1, resq_r1_1); // a0 b0 a1 b1 a2 b2 a3 b3
677 temp3 = _mm_unpacklo_epi16(resq_r2_1, resq_r3_1); // c0 d0 c1 d1 c2 d2 c3 d3
678 temp2 = _mm_unpackhi_epi16(resq_r0_1, resq_r1_1); // a4 b4 a5 b5 a6 b6 a7 b7
679 temp4 = _mm_unpackhi_epi16(resq_r2_1, resq_r3_1); // c4 d4 c5 d5 c6 d6 c7 d7
680 resq_r0_1 = _mm_unpacklo_epi32(temp1, temp3); // a0 b0 c0 d0 a1 b1 c1 d1
681 resq_r1_1 = _mm_unpackhi_epi32(temp1, temp3); // a2 b2 c2 d2 a3 b3 c3 d3
682 resq_r2_1 = _mm_unpacklo_epi32(temp2, temp4); // a4 b4 c4 d4 a5 b5 c5 d5
683 resq_r3_1 = _mm_unpackhi_epi32(temp2, temp4); // a6 b6 c6 d6 a7 b7 c7 d7
684 /*
685 * e0 e1 e2 e3 e4 e5 e6 e7
686 * f0 f1 f2 f3 f4 f5 f6 f7
687 * g0 g1 g2 g3 g4 g5 g6 g7
688 * h0 h1 h2 h3 h4 h5 h6 h7
689 */
690 temp1 = _mm_unpacklo_epi16(resq_r4_1, resq_r5_1); // e0 f0 e1 f1 e2 f2 e2 f3
691 temp3 = _mm_unpacklo_epi16(resq_r6_1, resq_r7_1); // g0 h0 g1 h1 g2 h2 g3 h3
692 temp2 = _mm_unpackhi_epi16(resq_r4_1, resq_r5_1); // e4 f4 e5 f5 e6 f6 e7 f7
693 temp4 = _mm_unpackhi_epi16(resq_r6_1, resq_r7_1); // g4 h4 g5 h5 g6 h6 g7 h7
694 resq_r4_1 = _mm_unpacklo_epi32(temp1, temp3); // e0 f0 g0 h0 e1 f1 g1 h1
695 resq_r5_1 = _mm_unpackhi_epi32(temp1, temp3); // e2 f2 g2 h2 e3 f3 g3 h3
696 resq_r6_1 = _mm_unpacklo_epi32(temp2, temp4); // e4 f4 g4 h4 e5 f5 g5 h5
697 resq_r7_1 = _mm_unpackhi_epi32(temp2, temp4); // e6 f6 g6 h6 e7 f7 g7 h7
698 /*
699 * a0 b0 c0 d0 a1 b1 c1 d1
700 * a2 b2 c2 d2 a3 b3 c3 d3
701 * a4 b4 c4 d4 a5 b5 c5 d5
702 * a6 b6 c6 d6 a7 b7 c7 d7
703 * e0 f0 g0 h0 e1 f1 g1 h1
704 * e2 f2 g2 h2 e3 f3 g3 h3
705 * e4 f4 g4 h4 e5 f5 g5 h5
706 * e6 f6 g6 h6 e7 f7 g7 h7
707 */
708 resq_r0_2 = _mm_unpacklo_epi64(resq_r0_1, resq_r4_1); // a0 b0 c0 d0 e0 f0 g0 h0
709 resq_r1_2 = _mm_unpackhi_epi64(resq_r0_1, resq_r4_1); // a1 b1 c1 d1 e1 f1 g1 h1
710 resq_r2_2 = _mm_unpacklo_epi64(resq_r1_1, resq_r5_1); // a2 b2 c2 d2 e2 f2 g2 h2
711 resq_r3_2 = _mm_unpackhi_epi64(resq_r1_1, resq_r5_1); // a3 b3 c3 d3 e3 f3 g3 h3
712 resq_r4_2 = _mm_unpacklo_epi64(resq_r2_1, resq_r6_1); // a4 b4 c4 d4 e4 f4 g4 h4
713 resq_r5_2 = _mm_unpackhi_epi64(resq_r2_1, resq_r6_1); // a5 b5 c5 d5 e5 f5 g5 h5
714 resq_r6_2 = _mm_unpacklo_epi64(resq_r3_1, resq_r7_1); // a6 b6 c6 d6 e6 f6 g6 h6
715 resq_r7_2 = _mm_unpackhi_epi64(resq_r3_1, resq_r7_1); // a7 b7 c7 d7 e7 f7 g7 h7
716
717 sign_reg = _mm_cmpgt_epi16(zero_8x16b, resq_r1_2);
718 resq_r1_1 = _mm_unpacklo_epi16(resq_r1_2, sign_reg); // a1 b1 c1 d1 -- 32 bit
719 resq_r1_2 = _mm_unpackhi_epi16(resq_r1_2, sign_reg); // e1 f1 g1 h1 -- 32 bit
720 sign_reg = _mm_cmpgt_epi16(zero_8x16b, resq_r3_2);
721 resq_r3_1 = _mm_unpacklo_epi16(resq_r3_2, sign_reg); // a3 b3 c3 d3 -- 32 bit
722 resq_r3_2 = _mm_unpackhi_epi16(resq_r3_2, sign_reg); // e3 f3 g3 h3 -- 32 bit
723 sign_reg = _mm_cmpgt_epi16(zero_8x16b, resq_r5_2);
724 resq_r5_1 = _mm_unpacklo_epi16(resq_r5_2, sign_reg); // a5 b5 c5 d5 -- 32 bit
725 resq_r5_2 = _mm_unpackhi_epi16(resq_r5_2, sign_reg); // e5 f5 g5 h5 -- 32 bit
726 sign_reg = _mm_cmpgt_epi16(zero_8x16b, resq_r7_2);
727 resq_r7_1 = _mm_unpacklo_epi16(resq_r7_2, sign_reg); // a7 b7 c7 d7 -- 32 bit
728 resq_r7_2 = _mm_unpackhi_epi16(resq_r7_2, sign_reg); // e7 f7 g7 h7 -- 32 bit
729 // Transform starts -- horizontal transform
730 /*------------------------------------------------------------------*/
731 /* y0 = w0 + w4 */
732 temp1 = _mm_add_epi16(resq_r0_2, resq_r4_2);
733 /* y2 = w0 - w4 */
734 temp3 = _mm_sub_epi16(resq_r0_2, resq_r4_2);
735 /* y1 = -w3 + w5 - w7 - (w7 >> 1) */
736 temp2 = _mm_sub_epi32(resq_r5_1, resq_r3_1); //-w3+w5
737 temp10 = _mm_sub_epi32(resq_r5_2, resq_r3_2);
738 temp4 = _mm_sub_epi32(temp2, resq_r7_1); //-w3+w5-w7
739 temp12 = _mm_sub_epi32(temp10, resq_r7_2);
740 temp5 = _mm_srai_epi32(resq_r7_1, 1); // w7>>1
741 temp13 = _mm_srai_epi32(resq_r7_2, 1);
742 temp2 = _mm_sub_epi32(temp4, temp5); //-w3+w5-w7 -(w7>>1)
743 temp10 = _mm_sub_epi32(temp12, temp13);
744 temp2 = _mm_packs_epi32(temp2, temp10);
745 /* y3 = w1 + w7 - w3 - (w3 >> 1) */
746 temp4 = _mm_add_epi32(resq_r1_1, resq_r7_1); // w1+w7
747 temp12 = _mm_add_epi32(resq_r1_2, resq_r7_2);
748 temp4 = _mm_sub_epi32(temp4, resq_r3_1); // w1+w7-w3
749 temp12 = _mm_sub_epi32(temp12, resq_r3_2);
750 temp5 = _mm_srai_epi32(resq_r3_1, 1); // w3>>1
751 temp13 = _mm_srai_epi32(resq_r3_2, 1);
752 temp4 = _mm_sub_epi32(temp4, temp5); // w1+w7-w3-(w3>>1)
753 temp12 = _mm_sub_epi32(temp12, temp13);
754 temp4 = _mm_packs_epi32(temp4, temp12);
755 /* y4 = (w2 >> 1) - w6 */
756 temp5 = _mm_srai_epi16(resq_r2_2, 1); // w2>>1
757 temp5 = _mm_sub_epi16(temp5, resq_r6_2); //(w2>>1)-w6
758 /* y5 = -w1 + w7 + w5 + (w5 >> 1) */
759 temp6 = _mm_sub_epi32(resq_r7_1, resq_r1_1); // w7-w1
760 temp14 = _mm_sub_epi32(resq_r7_2, resq_r1_2);
761 temp6 = _mm_add_epi32(temp6, resq_r5_1); // w7-w1+w5
762 temp14 = _mm_add_epi32(temp14, resq_r5_2);
763 temp7 = _mm_srai_epi32(resq_r5_1, 1); // w5>>1
764 temp15 = _mm_srai_epi32(resq_r5_2, 1);
765 temp6 = _mm_add_epi32(temp6, temp7); // w7-w1_w5+(w5>>1)
766 temp14 = _mm_add_epi32(temp14, temp15);
767 temp6 = _mm_packs_epi32(temp6, temp14);
768 /* y6 = w2 + (w6 >> 1) */
769 temp7 = _mm_srai_epi16(resq_r6_2, 1); // w6>>1
770 temp7 = _mm_add_epi16(temp7, resq_r2_2); //(w6>>1)+w2
771 /* y7 = w3 + w5 + w1 + (w1 >> 1) */
772 temp8 = _mm_add_epi32(resq_r3_1, resq_r5_1); // w3+w5
773 temp16 = _mm_add_epi32(resq_r3_2, resq_r5_2);
774 temp8 = _mm_add_epi32(temp8, resq_r1_1); // w3+w5+w1
775 temp16 = _mm_add_epi32(temp16, resq_r1_2);
776 temp17 = _mm_srai_epi32(resq_r1_1, 1); // w1>>1
777 temp18 = _mm_srai_epi32(resq_r1_2, 1);
778 temp8 = _mm_add_epi32(temp8, temp17); // w3+w5+w1+(w1>>1)
779 temp16 = _mm_add_epi32(temp16, temp18);
780 temp8 = _mm_packs_epi32(temp8, temp16);
781 /*------------------------------------------------------------------*/
782 /*------------------------------------------------------------------*/
783 /* z0 = y0 + y6 */
784 resq_r0_1 = _mm_add_epi16(temp1, temp7);
785 /* z1 = y1 + (y7 >> 2) */
786 resq_r1_1 = _mm_srai_epi16(temp8, 2);
787 resq_r1_1 = _mm_add_epi16(resq_r1_1, temp2);
788 /* z2 = y2 + y4 */
789 resq_r2_1 = _mm_add_epi16(temp3, temp5);
790 /* z3 = y3 + (y5 >> 2) */
791 resq_r3_1 = _mm_srai_epi16(temp6, 2);
792 resq_r3_1 = _mm_add_epi16(resq_r3_1, temp4);
793 /* z4 = y2 - y4 */
794 resq_r4_1 = _mm_sub_epi16(temp3, temp5);
795 /* z5 = (y3 >> 2) - y5 */
796 resq_r5_1 = _mm_srai_epi16(temp4, 2);
797 resq_r5_1 = _mm_sub_epi16(resq_r5_1, temp6);
798 /* z6 = y0 - y6 */
799 resq_r6_1 = _mm_sub_epi16(temp1, temp7);
800 /* z7 = y7 - (y1 >> 2) */
801 resq_r7_1 = _mm_srai_epi16(temp2, 2);
802 resq_r7_1 = _mm_sub_epi16(temp8, resq_r7_1);
803 /*------------------------------------------------------------------*/
804 /*------------------------------------------------------------------*/
805 /* x0 = z0 + z7 */
806 temp1 = _mm_add_epi16(resq_r0_1, resq_r7_1);
807 /* x1 = z2 + z5 */
808 temp2 = _mm_add_epi16(resq_r2_1, resq_r5_1);
809 /* x2 = z4 + z3 */
810 temp3 = _mm_add_epi16(resq_r4_1, resq_r3_1);
811 /* x3 = z6 + z1 */
812 temp4 = _mm_add_epi16(resq_r6_1, resq_r1_1);
813 /* x4 = z6 - z1 */
814 temp5 = _mm_sub_epi16(resq_r6_1, resq_r1_1);
815 /* x5 = z4 - z3 */
816 temp6 = _mm_sub_epi16(resq_r4_1, resq_r3_1);
817 /* x6 = z2 - z5 */
818 temp7 = _mm_sub_epi16(resq_r2_1, resq_r5_1);
819 /* x7 = z0 - z7 */
820 temp8 = _mm_sub_epi16(resq_r0_1, resq_r7_1);
821 /*------------------------------------------------------------------*/
822 // Matrix transpose
823 /*
824 * a0 b0 c0 d0 e0 f0 g0 h0
825 * a1 b1 c1 d1 e1 f1 g1 h1
826 * a2 b2 c2 d2 e2 f2 g2 h2
827 * a3 b3 c3 d3 e3 f3 g3 h3
828 */
829 temp17 = _mm_unpacklo_epi16(temp1, temp2); // a0 a1 b0 b1 c0 c1 d0 d1
830 temp19 = _mm_unpacklo_epi16(temp3, temp4); // a2 a3 b2 b3 c2 c3 d2 d3
831 temp18 = _mm_unpackhi_epi16(temp1, temp2); // e0 e1 f0 f1 g0 g1 h0 h1
832 temp20 = _mm_unpackhi_epi16(temp3, temp4); // e2 e3 f2 f3 g2 g3 h2 h3
833
834 resq_r0_1 = _mm_unpacklo_epi32(temp17, temp19); // a0 a1 a2 a3 b0 b1 b2 b3
835 resq_r1_1 = _mm_unpackhi_epi32(temp17, temp19); // c0 c1 c2 c3 d0 d1 d2 d3
836 resq_r2_1 = _mm_unpacklo_epi32(temp18, temp20); // e0 e1 e2 e3 f0 f1 f2 f3
837 resq_r3_1 = _mm_unpackhi_epi32(temp18, temp20); // g0 g2 g2 g3 h0 h1 h2 h3
838 /*
839 * a4 b4 c4 d4 e4 f4 g4 h4
840 * a5 b5 c5 d5 e5 f5 g5 h5
841 * a6 b6 c6 d6 e6 f6 g6 h6
842 * a7 b7 c7 d7 e7 f7 g7 h7
843 */
844 temp17 = _mm_unpacklo_epi16(temp5, temp6); // a4 a5 b4 b5 c4 c5 d4 d5
845 temp19 = _mm_unpacklo_epi16(temp7, temp8); // a6 a7 b6 b7 c6 c7 d6 d7
846 temp18 = _mm_unpackhi_epi16(temp5, temp6); // e4 e5 f4 f5 g4 g5 h4 h5
847 temp20 = _mm_unpackhi_epi16(temp7, temp8); // e6 e7 f6 f7 g6 g7 h6 h7
848
849 resq_r4_1 = _mm_unpacklo_epi32(temp17, temp19); // a4 a5 a6 a7 b4 b5 b6 b7
850 resq_r5_1 = _mm_unpackhi_epi32(temp17, temp19); // c4 c5 c6 c7 d4 d5 d6 d7
851 resq_r6_1 = _mm_unpacklo_epi32(temp18, temp20); // e4 e5 e6 e7 f4 f5 f6 f7
852 resq_r7_1 = _mm_unpackhi_epi32(temp18, temp20); // g4 g5 g6 g7 h4 h5 h6 h7
853 /* a0 a1 a2 a3 b0 b1 b2 b3
854 * c0 c1 c2 c3 d0 d1 d2 d3
855 * e0 e1 e2 e3 f0 f1 f2 f3
856 * g0 g2 g2 g3 h0 h1 h2 h3
857 * a4 a5 a6 a7 b4 b5 b6 b7
858 * c4 c5 c6 c7 d4 d5 d6 d7
859 * e4 e5 e6 e7 f4 f5 f6 f7
860 * g4 g5 g6 g7 h4 h5 h6 h7
861 */
862 resq_r0_2 = _mm_unpacklo_epi64(resq_r0_1, resq_r4_1); // a0 a1 a2 a3 a4 a5 a6 a7
863 resq_r1_2 = _mm_unpackhi_epi64(resq_r0_1, resq_r4_1); // b0 b1 b2 b3 b4 b5 b6 b7
864 resq_r2_2 = _mm_unpacklo_epi64(resq_r1_1, resq_r5_1); // c0 c1 c2 c3 c4 c5 c6 c7
865 resq_r3_2 = _mm_unpackhi_epi64(resq_r1_1, resq_r5_1); // d0 d1 d2 d3 d4 d5 d6 d7
866 resq_r4_2 = _mm_unpacklo_epi64(resq_r2_1, resq_r6_1); // e0 e1 e2 e3 e4 e5 e6 e7
867 resq_r5_2 = _mm_unpackhi_epi64(resq_r2_1, resq_r6_1); // f0 f1 f2 f3 f4 f5 f6 f7
868 resq_r6_2 = _mm_unpacklo_epi64(resq_r3_1, resq_r7_1); // g0 g1 g2 g3 g4 g5 g6 g7
869 resq_r7_2 = _mm_unpackhi_epi64(resq_r3_1, resq_r7_1); // h0 h1 h2 h3 h4 h5 h6 h7
870
871 sign_reg = _mm_cmpgt_epi16(zero_8x16b, resq_r1_2);
872 resq_r1_1 = _mm_unpacklo_epi16(resq_r1_2, sign_reg); // a1 b1 c1 d1 -- 32 bit
873 resq_r1_2 = _mm_unpackhi_epi16(resq_r1_2, sign_reg); // e1 f1 g1 h1 -- 32 bit
874 sign_reg = _mm_cmpgt_epi16(zero_8x16b, resq_r3_2);
875 resq_r3_1 = _mm_unpacklo_epi16(resq_r3_2, sign_reg); // a3 b3 c3 d3 -- 32 bit
876 resq_r3_2 = _mm_unpackhi_epi16(resq_r3_2, sign_reg); // e3 f3 g3 h3 -- 32 bit
877 sign_reg = _mm_cmpgt_epi16(zero_8x16b, resq_r5_2);
878 resq_r5_1 = _mm_unpacklo_epi16(resq_r5_2, sign_reg); // a5 b5 c5 d5 -- 32 bit
879 resq_r5_2 = _mm_unpackhi_epi16(resq_r5_2, sign_reg); // e5 f5 g5 h5 -- 32 bit
880 sign_reg = _mm_cmpgt_epi16(zero_8x16b, resq_r7_2);
881 resq_r7_1 = _mm_unpacklo_epi16(resq_r7_2, sign_reg); // a7 b7 c7 d7 -- 32 bit
882 resq_r7_2 = _mm_unpackhi_epi16(resq_r7_2, sign_reg); // e7 f7 g7 h7 -- 32 bit
883
884 zero_8x16b = _mm_setzero_si128(); // all bits reset to zero
885 // Load pred buffer row 0
886 // p0 p1 p2 p3 p4 p5 p6 p7 0 0 0 0 0 0 0 0 -- all 8 bits
887 predload_r = _mm_loadl_epi64((__m128i *) (&pu1_pred[0]));
888 // p0 p1 p2 p3 p4 p5 p6 p7 -- all 16 bits
889 pred_r0_1 = _mm_unpacklo_epi8(predload_r, zero_8x16b);
890 // Load pred buffer row 1
891 // p0 p1 p2 p3 p4 p5 p6 p7 0 0 0 0 0 0 0 0 -- all 8 bits
892 predload_r = _mm_loadl_epi64((__m128i *) (&pu1_pred[pred_strd]));
893 // p0 p1 p2 p3 p4 p5 p6 p7 -- all 16 bits
894 pred_r1_1 = _mm_unpacklo_epi8(predload_r, zero_8x16b);
895 // Load pred buffer row 2
896 // p0 p1 p2 p3 p4 p5 p6 p7 0 0 0 0 0 0 0 0 -- all 8 bits
897 predload_r = _mm_loadl_epi64((__m128i *) (&pu1_pred[2 * pred_strd]));
898 // p0 p1 p2 p3 p4 p5 p6 p7 -- all 16 bits
899 pred_r2_1 = _mm_unpacklo_epi8(predload_r, zero_8x16b);
900 // Load pred buffer row 3
901 // p0 p1 p2 p3 p4 p5 p6 p7 0 0 0 0 0 0 0 0 -- all 8 bits
902 predload_r = _mm_loadl_epi64((__m128i *) (&pu1_pred[3 * pred_strd]));
903 // p0 p1 p2 p3 p4 p5 p6 p7 -- all 16 bits
904 pred_r3_1 = _mm_unpacklo_epi8(predload_r, zero_8x16b);
905 // Load pred buffer row 4
906 // p0 p1 p2 p3 p4 p5 p6 p7 0 0 0 0 0 0 0 0 -- all 8 bits
907 predload_r = _mm_loadl_epi64((__m128i *) (&pu1_pred[4 * pred_strd]));
908 // p0 p1 p2 p3 p4 p5 p6 p7 -- all 16 bits
909 pred_r4_1 = _mm_unpacklo_epi8(predload_r, zero_8x16b);
910 // Load pred buffer row 5
911 // p0 p1 p2 p3 p4 p5 p6 p7 0 0 0 0 0 0 0 0 -- all 8 bit
912 predload_r = _mm_loadl_epi64((__m128i *) (&pu1_pred[5 * pred_strd]));
913 // p0 p1 p2 p3 p4 p5 p6 p7 -- all 16 bits
914 pred_r5_1 = _mm_unpacklo_epi8(predload_r, zero_8x16b);
915 // Load pred buffer row 6
916 // p0 p1 p2 p3 p4 p5 p6 p7 0 0 0 0 0 0 0 0 -- all 8 bits
917 predload_r = _mm_loadl_epi64((__m128i *) (&pu1_pred[6 * pred_strd]));
918 // p0 p1 p2 p3 p4 p5 p6 p7 -- all 16 bits
919 pred_r6_1 = _mm_unpacklo_epi8(predload_r, zero_8x16b);
920 // Load pred buffer row 7
921 // p0 p1 p2 p3 p4 p5 p6 p7 0 0 0 0 0 0 0 0 -- all 8 bits
922 predload_r = _mm_loadl_epi64((__m128i *) (&pu1_pred[7 * pred_strd]));
923 // p0 p1 p2 p3 p4 p5 p6 p7 -- all 16 bits
924 pred_r7_1 = _mm_unpacklo_epi8(predload_r, zero_8x16b);
925
926 rsd_r0 = _mm_loadu_si128((__m128i *) (&pi2_rsd[0]));
927 rsd_r1 = _mm_loadu_si128((__m128i *) (&pi2_rsd[1 * rsd_strd]));
928 rsd_r2 = _mm_loadu_si128((__m128i *) (&pi2_rsd[2 * rsd_strd]));
929 rsd_r3 = _mm_loadu_si128((__m128i *) (&pi2_rsd[3 * rsd_strd]));
930 rsd_r4 = _mm_loadu_si128((__m128i *) (&pi2_rsd[4 * rsd_strd]));
931 rsd_r5 = _mm_loadu_si128((__m128i *) (&pi2_rsd[5 * rsd_strd]));
932 rsd_r6 = _mm_loadu_si128((__m128i *) (&pi2_rsd[6 * rsd_strd]));
933 rsd_r7 = _mm_loadu_si128((__m128i *) (&pi2_rsd[7 * rsd_strd]));
934
935 /*--------------------------------------------------------------------*/
936 /* IDCT [ Vertical transformation] and Xij = (xij + 32)>>6 */
937 /* */
938 /* Add the prediction and store it back to reconstructed frame buffer */
939 /* [Prediction buffer itself in this case] */
940 /*--------------------------------------------------------------------*/
941
942 /* y0j = w0j + w4j */
943 temp1 = _mm_add_epi16(resq_r0_2, resq_r4_2);
944 /* y2j = w0j - w4j */
945 temp3 = _mm_sub_epi16(resq_r0_2, resq_r4_2);
946 /* y1j = -w3j + w5j - w7j - (w7j >> 1) */
947 temp2 = _mm_sub_epi32(resq_r5_1, resq_r3_1); //-w3+w5
948 temp10 = _mm_sub_epi32(resq_r5_2, resq_r3_2);
949 temp4 = _mm_sub_epi32(temp2, resq_r7_1); //-w3+w5-w7
950 temp12 = _mm_sub_epi32(temp10, resq_r7_2);
951 temp5 = _mm_srai_epi32(resq_r7_1, 1); // w7>>1
952 temp13 = _mm_srai_epi32(resq_r7_2, 1);
953 temp2 = _mm_sub_epi32(temp4, temp5); //-w3+w5-w7 -(w7>>1)
954 temp10 = _mm_sub_epi32(temp12, temp13);
955 temp2 = _mm_packs_epi32(temp2, temp10);
956 /* y3j = w1j + w7j - w3j - (w3j >> 1) */
957 temp4 = _mm_add_epi32(resq_r1_1, resq_r7_1); // w1+w7
958 temp12 = _mm_add_epi32(resq_r1_2, resq_r7_2);
959 temp4 = _mm_sub_epi32(temp4, resq_r3_1); // w1+w7-w3
960 temp12 = _mm_sub_epi32(temp12, resq_r3_2);
961 temp5 = _mm_srai_epi32(resq_r3_1, 1); // w3>>1
962 temp13 = _mm_srai_epi32(resq_r3_2, 1);
963 temp4 = _mm_sub_epi32(temp4, temp5); // w1+w7-w3-(w3>>1)
964 temp12 = _mm_sub_epi32(temp12, temp13);
965 temp4 = _mm_packs_epi32(temp4, temp12);
966 /* y4j = (w2j >> 1) - w6j */
967 temp5 = _mm_srai_epi16(resq_r2_2, 1); // w2>>1
968 temp5 = _mm_sub_epi16(temp5, resq_r6_2); //(w2>>1)-w6
969 /* y5j = -w1j + w7j + w5j + (w5j >> 1) */
970 temp6 = _mm_sub_epi32(resq_r7_1, resq_r1_1); // w7-w1
971 temp14 = _mm_sub_epi32(resq_r7_2, resq_r1_2);
972 temp6 = _mm_add_epi32(temp6, resq_r5_1); // w7-w1+w5
973 temp14 = _mm_add_epi32(temp14, resq_r5_2);
974 temp7 = _mm_srai_epi32(resq_r5_1, 1); // w5>>1
975 temp15 = _mm_srai_epi32(resq_r5_2, 1);
976 temp6 = _mm_add_epi32(temp6, temp7); // w7-w1_w5+(w5>>1)
977 temp14 = _mm_add_epi32(temp14, temp15);
978 temp6 = _mm_packs_epi32(temp6, temp14);
979 /* y6j = w2j + (w6j >> 1) */
980 temp7 = _mm_srai_epi16(resq_r6_2, 1); // w6>>1
981 temp7 = _mm_add_epi16(temp7, resq_r2_2); //(w6>>1)+w2
982 /* y7j = w3j + w5j + w1j + (w1j >> 1) */
983 temp8 = _mm_add_epi32(resq_r3_1, resq_r5_1); // w3+w5
984 temp16 = _mm_add_epi32(resq_r3_2, resq_r5_2);
985 temp8 = _mm_add_epi32(temp8, resq_r1_1); // w3+w5+w1
986 temp16 = _mm_add_epi32(temp16, resq_r1_2);
987 temp17 = _mm_srai_epi32(resq_r1_1, 1); // w1>>1
988 temp18 = _mm_srai_epi32(resq_r1_2, 1);
989 temp8 = _mm_add_epi32(temp8, temp17); // w3+w5+w1+(w1>>1)
990 temp16 = _mm_add_epi32(temp16, temp18);
991 temp8 = _mm_packs_epi32(temp8, temp16);
992 /*------------------------------------------------------------------*/
993 /*------------------------------------------------------------------*/
994 /* z0j = y0j + y6j */
995 resq_r0_1 = _mm_add_epi16(temp1, temp7);
996 /* z1j = y1j + (y7j >> 2) */
997 resq_r1_1 = _mm_srai_epi16(temp8, 2);
998 resq_r1_1 = _mm_add_epi16(resq_r1_1, temp2);
999 /* z2j = y2j + y4j */
1000 resq_r2_1 = _mm_add_epi16(temp3, temp5);
1001 /* z3j = y3j + (y5j >> 2) */
1002 resq_r3_1 = _mm_srai_epi16(temp6, 2);
1003 resq_r3_1 = _mm_add_epi16(resq_r3_1, temp4);
1004 /* z4j = y2j - y4j */
1005 resq_r4_1 = _mm_sub_epi16(temp3, temp5);
1006 /* z5j = (y3j >> 2) - y5j */
1007 resq_r5_1 = _mm_srai_epi16(temp4, 2);
1008 resq_r5_1 = _mm_sub_epi16(resq_r5_1, temp6);
1009 /* z6j = y0j - y6j */
1010 resq_r6_1 = _mm_sub_epi16(temp1, temp7);
1011 /* z7j = y7j - (y1j >> 2) */
1012 resq_r7_1 = _mm_srai_epi16(temp2, 2);
1013 resq_r7_1 = _mm_sub_epi16(temp8, resq_r7_1);
1014 /*------------------------------------------------------------------*/
1015
1016 /*------------------------------------------------------------------*/
1017 /* x0j = z0j + z7j */
1018 temp1 = _mm_add_epi16(resq_r0_1, resq_r7_1);
1019 sign_reg = _mm_cmpgt_epi16(zero_8x16b, temp1);
1020 temp10 = _mm_unpacklo_epi16(temp1, sign_reg);
1021 temp11 = _mm_unpackhi_epi16(temp1, sign_reg);
1022 temp10 = _mm_add_epi32(temp10, value_32);
1023 temp11 = _mm_add_epi32(temp11, value_32);
1024 temp10 = _mm_srai_epi32(temp10, 6);
1025 temp11 = _mm_srai_epi32(temp11, 6);
1026 temp10 = _mm_packs_epi32(temp10, temp11);
1027 rsd_r0 = _mm_add_epi16(temp10, rsd_r0);
1028 rsd_r0 = _mm_min_epi16(dupmax_8x16b, rsd_r0);
1029 rsd_r0 = _mm_max_epi16(dupmin_8x16b, rsd_r0);
1030 temp1 = _mm_add_epi16(rsd_r0, pred_r0_1);
1031 /* x1j = z2j + z5j */
1032 temp2 = _mm_add_epi16(resq_r2_1, resq_r5_1);
1033 sign_reg = _mm_cmpgt_epi16(zero_8x16b, temp2);
1034 temp10 = _mm_unpacklo_epi16(temp2, sign_reg);
1035 temp11 = _mm_unpackhi_epi16(temp2, sign_reg);
1036 temp10 = _mm_add_epi32(temp10, value_32);
1037 temp11 = _mm_add_epi32(temp11, value_32);
1038 temp10 = _mm_srai_epi32(temp10, 6);
1039 temp11 = _mm_srai_epi32(temp11, 6);
1040 temp10 = _mm_packs_epi32(temp10, temp11);
1041 rsd_r1 = _mm_add_epi16(temp10, rsd_r1);
1042 rsd_r1 = _mm_min_epi16(dupmax_8x16b, rsd_r1);
1043 rsd_r1 = _mm_max_epi16(dupmin_8x16b, rsd_r1);
1044 temp2 = _mm_add_epi16(rsd_r1, pred_r1_1);
1045 /* x2j = z4j + z3j */
1046 temp3 = _mm_add_epi16(resq_r4_1, resq_r3_1);
1047 sign_reg = _mm_cmpgt_epi16(zero_8x16b, temp3);
1048 temp10 = _mm_unpacklo_epi16(temp3, sign_reg);
1049 temp11 = _mm_unpackhi_epi16(temp3, sign_reg);
1050 temp10 = _mm_add_epi32(temp10, value_32);
1051 temp11 = _mm_add_epi32(temp11, value_32);
1052 temp10 = _mm_srai_epi32(temp10, 6);
1053 temp11 = _mm_srai_epi32(temp11, 6);
1054 temp10 = _mm_packs_epi32(temp10, temp11);
1055 rsd_r2 = _mm_add_epi16(temp10, rsd_r2);
1056 rsd_r2 = _mm_min_epi16(dupmax_8x16b, rsd_r2);
1057 rsd_r2 = _mm_max_epi16(dupmin_8x16b, rsd_r2);
1058 temp3 = _mm_add_epi16(rsd_r2, pred_r2_1);
1059 /* x3j = z6j + z1j */
1060 temp4 = _mm_add_epi16(resq_r6_1, resq_r1_1);
1061 sign_reg = _mm_cmpgt_epi16(zero_8x16b, temp4);
1062 temp10 = _mm_unpacklo_epi16(temp4, sign_reg);
1063 temp11 = _mm_unpackhi_epi16(temp4, sign_reg);
1064 temp10 = _mm_add_epi32(temp10, value_32);
1065 temp11 = _mm_add_epi32(temp11, value_32);
1066 temp10 = _mm_srai_epi32(temp10, 6);
1067 temp11 = _mm_srai_epi32(temp11, 6);
1068 temp10 = _mm_packs_epi32(temp10, temp11);
1069 rsd_r3 = _mm_add_epi16(temp10, rsd_r3);
1070 rsd_r3 = _mm_min_epi16(dupmax_8x16b, rsd_r3);
1071 rsd_r3 = _mm_max_epi16(dupmin_8x16b, rsd_r3);
1072 temp4 = _mm_add_epi16(rsd_r3, pred_r3_1);
1073 /* x4j = z6j - z1j */
1074 temp5 = _mm_sub_epi16(resq_r6_1, resq_r1_1);
1075 sign_reg = _mm_cmpgt_epi16(zero_8x16b, temp5);
1076 temp10 = _mm_unpacklo_epi16(temp5, sign_reg);
1077 temp11 = _mm_unpackhi_epi16(temp5, sign_reg);
1078 temp10 = _mm_add_epi32(temp10, value_32);
1079 temp11 = _mm_add_epi32(temp11, value_32);
1080 temp10 = _mm_srai_epi32(temp10, 6);
1081 temp11 = _mm_srai_epi32(temp11, 6);
1082 temp10 = _mm_packs_epi32(temp10, temp11);
1083 rsd_r4 = _mm_add_epi16(temp10, rsd_r4);
1084 rsd_r4 = _mm_min_epi16(dupmax_8x16b, rsd_r4);
1085 rsd_r4 = _mm_max_epi16(dupmin_8x16b, rsd_r4);
1086 temp5 = _mm_add_epi16(rsd_r4, pred_r4_1);
1087 /* x5j = z4j - z3j */
1088 temp6 = _mm_sub_epi16(resq_r4_1, resq_r3_1);
1089 sign_reg = _mm_cmpgt_epi16(zero_8x16b, temp6);
1090 temp10 = _mm_unpacklo_epi16(temp6, sign_reg);
1091 temp11 = _mm_unpackhi_epi16(temp6, sign_reg);
1092 temp10 = _mm_add_epi32(temp10, value_32);
1093 temp11 = _mm_add_epi32(temp11, value_32);
1094 temp10 = _mm_srai_epi32(temp10, 6);
1095 temp11 = _mm_srai_epi32(temp11, 6);
1096 temp10 = _mm_packs_epi32(temp10, temp11);
1097 rsd_r5 = _mm_add_epi16(temp10, rsd_r5);
1098 rsd_r5 = _mm_min_epi16(dupmax_8x16b, rsd_r5);
1099 rsd_r5 = _mm_max_epi16(dupmin_8x16b, rsd_r5);
1100 temp6 = _mm_add_epi16(rsd_r5, pred_r5_1);
1101 /* x6j = z2j - z5j */
1102 temp7 = _mm_sub_epi16(resq_r2_1, resq_r5_1);
1103 sign_reg = _mm_cmpgt_epi16(zero_8x16b, temp7);
1104 temp10 = _mm_unpacklo_epi16(temp7, sign_reg);
1105 temp11 = _mm_unpackhi_epi16(temp7, sign_reg);
1106 temp10 = _mm_add_epi32(temp10, value_32);
1107 temp11 = _mm_add_epi32(temp11, value_32);
1108 temp10 = _mm_srai_epi32(temp10, 6);
1109 temp11 = _mm_srai_epi32(temp11, 6);
1110 temp10 = _mm_packs_epi32(temp10, temp11);
1111 rsd_r6 = _mm_add_epi16(temp10, rsd_r6);
1112 rsd_r6 = _mm_min_epi16(dupmax_8x16b, rsd_r6);
1113 rsd_r6 = _mm_max_epi16(dupmin_8x16b, rsd_r6);
1114 temp7 = _mm_add_epi16(rsd_r6, pred_r6_1);
1115 /* x7j = z0j - z7j */
1116 temp8 = _mm_sub_epi16(resq_r0_1, resq_r7_1);
1117 sign_reg = _mm_cmpgt_epi16(zero_8x16b, temp8);
1118 temp10 = _mm_unpacklo_epi16(temp8, sign_reg);
1119 temp11 = _mm_unpackhi_epi16(temp8, sign_reg);
1120 temp10 = _mm_add_epi32(temp10, value_32);
1121 temp11 = _mm_add_epi32(temp11, value_32);
1122 temp10 = _mm_srai_epi32(temp10, 6);
1123 temp11 = _mm_srai_epi32(temp11, 6);
1124 temp10 = _mm_packs_epi32(temp10, temp11);
1125 rsd_r7 = _mm_add_epi16(temp10, rsd_r7);
1126 rsd_r7 = _mm_min_epi16(dupmax_8x16b, rsd_r7);
1127 rsd_r7 = _mm_max_epi16(dupmin_8x16b, rsd_r7);
1128 temp8 = _mm_add_epi16(rsd_r7, pred_r7_1);
1129
1130 rsd_r01_b0 = _mm_unpacklo_epi64(rsd_r0, rsd_r1);
1131 rsd_r23_b0 = _mm_unpacklo_epi64(rsd_r2, rsd_r3);
1132 rsd_r45_b2 = _mm_unpacklo_epi64(rsd_r4, rsd_r5);
1133 rsd_r67_b2 = _mm_unpacklo_epi64(rsd_r6, rsd_r7);
1134
1135 rsd_r01_b1 = _mm_unpackhi_epi64(rsd_r0, rsd_r1);
1136 rsd_r23_b1 = _mm_unpackhi_epi64(rsd_r2, rsd_r3);
1137 rsd_r45_b3 = _mm_unpackhi_epi64(rsd_r4, rsd_r5);
1138 rsd_r67_b3 = _mm_unpackhi_epi64(rsd_r6, rsd_r7);
1139
1140 row_01_b0 = _mm_test_all_ones(
1141 _mm_cmpeq_epi16(rsd_r01_b0, zero_8x16b)); // return 1 if all zeros, else 0
1142 row_23_b0 = _mm_test_all_ones(
1143 _mm_cmpeq_epi16(rsd_r23_b0, zero_8x16b)); // return 1 if all zeros, else 0
1144 row_45_b2 = _mm_test_all_ones(
1145 _mm_cmpeq_epi16(rsd_r45_b2, zero_8x16b)); // return 1 if all zeros, else 0
1146 row_67_b2 = _mm_test_all_ones(
1147 _mm_cmpeq_epi16(rsd_r67_b2, zero_8x16b)); // return 1 if all zeros, else 0
1148
1149 row_01_b1 = _mm_test_all_ones(
1150 _mm_cmpeq_epi16(rsd_r01_b1, zero_8x16b)); // return 1 if all zeros, else 0
1151 row_23_b1 = _mm_test_all_ones(
1152 _mm_cmpeq_epi16(rsd_r23_b1, zero_8x16b)); // return 1 if all zeros, else 0
1153 row_45_b3 = _mm_test_all_ones(
1154 _mm_cmpeq_epi16(rsd_r45_b3, zero_8x16b)); // return 1 if all zeros, else 0
1155 row_67_b3 = _mm_test_all_ones(
1156 _mm_cmpeq_epi16(rsd_r67_b3, zero_8x16b)); // return 1 if all zeros, else 0
1157
1158 /*------------------------------------------------------------------*/
1159 // Clipping the results to 8 bits
1160 sign_reg = _mm_cmpgt_epi16(temp1, zero_8x16b); // sign check
1161 temp1 = _mm_and_si128(temp1, sign_reg);
1162 sign_reg = _mm_cmpgt_epi16(temp2, zero_8x16b); // sign check
1163 temp2 = _mm_and_si128(temp2, sign_reg);
1164 sign_reg = _mm_cmpgt_epi16(temp3, zero_8x16b); // sign check
1165 temp3 = _mm_and_si128(temp3, sign_reg);
1166 sign_reg = _mm_cmpgt_epi16(temp4, zero_8x16b); // sign check
1167 temp4 = _mm_and_si128(temp4, sign_reg);
1168 sign_reg = _mm_cmpgt_epi16(temp5, zero_8x16b); // sign check
1169 temp5 = _mm_and_si128(temp5, sign_reg);
1170 sign_reg = _mm_cmpgt_epi16(temp6, zero_8x16b); // sign check
1171 temp6 = _mm_and_si128(temp6, sign_reg);
1172 sign_reg = _mm_cmpgt_epi16(temp7, zero_8x16b); // sign check
1173 temp7 = _mm_and_si128(temp7, sign_reg);
1174 sign_reg = _mm_cmpgt_epi16(temp8, zero_8x16b); // sign check
1175 temp8 = _mm_and_si128(temp8, sign_reg);
1176
1177 resq_r0_2 = _mm_packus_epi16(temp1, zero_8x16b);
1178 resq_r1_2 = _mm_packus_epi16(temp2, zero_8x16b);
1179 resq_r2_2 = _mm_packus_epi16(temp3, zero_8x16b);
1180 resq_r3_2 = _mm_packus_epi16(temp4, zero_8x16b);
1181 resq_r4_2 = _mm_packus_epi16(temp5, zero_8x16b);
1182 resq_r5_2 = _mm_packus_epi16(temp6, zero_8x16b);
1183 resq_r6_2 = _mm_packus_epi16(temp7, zero_8x16b);
1184 resq_r7_2 = _mm_packus_epi16(temp8, zero_8x16b);
1185
1186 _mm_storel_epi64((__m128i *) (&pu1_out[0]), resq_r0_2);
1187 _mm_storel_epi64((__m128i *) (&pu1_out[out_strd]), resq_r1_2);
1188 _mm_storel_epi64((__m128i *) (&pu1_out[2 * out_strd]), resq_r2_2);
1189 _mm_storel_epi64((__m128i *) (&pu1_out[3 * out_strd]), resq_r3_2);
1190 _mm_storel_epi64((__m128i *) (&pu1_out[4 * out_strd]), resq_r4_2);
1191 _mm_storel_epi64((__m128i *) (&pu1_out[5 * out_strd]), resq_r5_2);
1192 _mm_storel_epi64((__m128i *) (&pu1_out[6 * out_strd]), resq_r6_2);
1193 _mm_storel_epi64((__m128i *) (&pu1_out[7 * out_strd]), resq_r7_2);
1194
1195 i4_nnz_b0 = (!(row_01_b0 && row_23_b0));
1196 i4_nnz_b1 = (!(row_01_b1 && row_23_b1)) << 1;
1197 i4_nnz_b2 = (!(row_45_b2 && row_67_b2)) << 4;
1198 i4_nnz_b3 = (!(row_45_b3 && row_67_b3)) << 5;
1199
1200 i4_nnz = (i4_nnz_b0 | i4_nnz_b1 | i4_nnz_b2 | i4_nnz_b3);
1201 return i4_nnz;
1202 }
1203
1204 /*****************************************************************************/
1205 /* */
1206 /* Function Name : isvcd_iquant_itrans_residual_recon_4x4_dc_sse42 */
1207 /* */
1208 /* Description : this function computes the recon output from the */
1209 /* IQ+IT+RESD */
1210 /* */
1211 /* Inputs : */
1212 /* Globals : none */
1213 /* Processing : */
1214 /* */
1215 /* Outputs : i4_nnz */
1216 /* Returns : none */
1217 /* */
1218 /* Issues : none */
1219 /* */
1220 /* Revision History: */
1221 /* */
1222 /* DD MM YYYY Author(s) Changes (Describe the changes made) */
1223 /* 25 11 2021 Kishore creation */
1224 /* */
1225 /*****************************************************************************/
1226
isvcd_iquant_itrans_residual_recon_4x4_dc_sse42(WORD16 * pi2_src,UWORD8 * pu1_pred,WORD16 * pi2_rsd,UWORD8 * pu1_out,WORD32 pred_strd,WORD32 rsd_strd,WORD32 out_strd,const UWORD16 * pu2_iscal_mat,const UWORD16 * pu2_weigh_mat,UWORD32 u4_qp_div_6,WORD16 * pi2_tmp,WORD32 iq_start_idx,WORD16 * pi2_dc_ld_addr)1227 WORD32 isvcd_iquant_itrans_residual_recon_4x4_dc_sse42(
1228 WORD16 *pi2_src, UWORD8 *pu1_pred, WORD16 *pi2_rsd, UWORD8 *pu1_out, WORD32 pred_strd,
1229 WORD32 rsd_strd, WORD32 out_strd, const UWORD16 *pu2_iscal_mat, const UWORD16 *pu2_weigh_mat,
1230 UWORD32 u4_qp_div_6, WORD16 *pi2_tmp, WORD32 iq_start_idx, WORD16 *pi2_dc_ld_addr)
1231 {
1232 __m128i pred_16x8b_0, pred_8x16b_0, rsd_8x16b_0, out_8x16b_0, out_16x8b_0;
1233 __m128i pred_16x8b_1, pred_8x16b_1, rsd_8x16b_1, out_8x16b_1, out_16x8b_1;
1234 __m128i pred_16x8b_2, pred_8x16b_2, rsd_8x16b_2, out_8x16b_2, out_16x8b_2;
1235 __m128i pred_16x8b_3, pred_8x16b_3, rsd_8x16b_3, out_8x16b_3, out_16x8b_3;
1236 __m128i rsd_8x16b_01, rsd_8x16b_23;
1237
1238 __m128i i_macro_8x16b, dupmax_8x16b, dupmin_8x16b;
1239 __m128i zero_8x16b = _mm_setzero_si128();
1240 WORD32 i4_nnz, row_01, row_23;
1241 WORD32 q0;
1242 WORD16 i_macro;
1243 WORD16 rnd_fact = (u4_qp_div_6 < 4) ? 1 << (3 - u4_qp_div_6) : 0;
1244 UNUSED(pi2_tmp);
1245
1246 if(iq_start_idx == 0)
1247 {
1248 q0 = pi2_src[0];
1249 INV_QUANT(q0, pu2_iscal_mat[0], pu2_weigh_mat[0], u4_qp_div_6, rnd_fact, 4);
1250 }
1251 else
1252 {
1253 q0 = pi2_dc_ld_addr[0]; // Restoring dc value for intra case3
1254 }
1255 i_macro = ((q0 + 32) >> 6);
1256
1257 i_macro_8x16b = _mm_set1_epi16(i_macro);
1258 dupmax_8x16b = _mm_set1_epi16(RSD_MAX);
1259 dupmin_8x16b = _mm_set1_epi16(RSD_MIN);
1260
1261 pred_16x8b_0 = _mm_loadu_si128((__m128i *) (pu1_pred));
1262 pred_16x8b_1 = _mm_loadu_si128((__m128i *) (pu1_pred + pred_strd));
1263 pred_16x8b_2 = _mm_loadu_si128((__m128i *) (pu1_pred + (pred_strd << 1)));
1264 pred_16x8b_3 = _mm_loadu_si128((__m128i *) (pu1_pred + (pred_strd << 1) + pred_strd));
1265
1266 pred_8x16b_0 = _mm_cvtepu8_epi16(pred_16x8b_0);
1267 pred_8x16b_1 = _mm_cvtepu8_epi16(pred_16x8b_1);
1268 pred_8x16b_2 = _mm_cvtepu8_epi16(pred_16x8b_2);
1269 pred_8x16b_3 = _mm_cvtepu8_epi16(pred_16x8b_3);
1270
1271 rsd_8x16b_0 = _mm_loadu_si128((__m128i *) (pi2_rsd));
1272 rsd_8x16b_1 = _mm_loadu_si128((__m128i *) (pi2_rsd + rsd_strd));
1273 rsd_8x16b_2 = _mm_loadu_si128((__m128i *) (pi2_rsd + (rsd_strd << 1)));
1274 rsd_8x16b_3 = _mm_loadu_si128((__m128i *) (pi2_rsd + (rsd_strd << 1) + rsd_strd));
1275
1276 rsd_8x16b_0 = _mm_add_epi16(i_macro_8x16b, rsd_8x16b_0);
1277 rsd_8x16b_1 = _mm_add_epi16(i_macro_8x16b, rsd_8x16b_1);
1278 rsd_8x16b_2 = _mm_add_epi16(i_macro_8x16b, rsd_8x16b_2);
1279 rsd_8x16b_3 = _mm_add_epi16(i_macro_8x16b, rsd_8x16b_3);
1280
1281 rsd_8x16b_0 = _mm_min_epi16(dupmax_8x16b, rsd_8x16b_0);
1282 rsd_8x16b_0 = _mm_max_epi16(dupmin_8x16b, rsd_8x16b_0);
1283 rsd_8x16b_1 = _mm_min_epi16(dupmax_8x16b, rsd_8x16b_1);
1284 rsd_8x16b_1 = _mm_max_epi16(dupmin_8x16b, rsd_8x16b_1);
1285 rsd_8x16b_2 = _mm_min_epi16(dupmax_8x16b, rsd_8x16b_2);
1286 rsd_8x16b_2 = _mm_max_epi16(dupmin_8x16b, rsd_8x16b_2);
1287 rsd_8x16b_3 = _mm_min_epi16(dupmax_8x16b, rsd_8x16b_3);
1288 rsd_8x16b_3 = _mm_max_epi16(dupmin_8x16b, rsd_8x16b_3);
1289
1290 rsd_8x16b_01 = _mm_unpacklo_epi64(rsd_8x16b_0, rsd_8x16b_1);
1291 rsd_8x16b_23 = _mm_unpacklo_epi64(rsd_8x16b_2, rsd_8x16b_3);
1292
1293 row_01 = _mm_test_all_ones(
1294 _mm_cmpeq_epi16(rsd_8x16b_01, zero_8x16b)); // return 1 if all zeros, else 0
1295 row_23 = _mm_test_all_ones(_mm_cmpeq_epi16(rsd_8x16b_23, zero_8x16b));
1296
1297 out_8x16b_0 = _mm_add_epi16(pred_8x16b_0, rsd_8x16b_0);
1298 out_8x16b_1 = _mm_add_epi16(pred_8x16b_1, rsd_8x16b_1);
1299 out_8x16b_2 = _mm_add_epi16(pred_8x16b_2, rsd_8x16b_2);
1300 out_8x16b_3 = _mm_add_epi16(pred_8x16b_3, rsd_8x16b_3);
1301
1302 out_16x8b_0 = _mm_packus_epi16(out_8x16b_0, zero_8x16b);
1303 out_16x8b_1 = _mm_packus_epi16(out_8x16b_1, zero_8x16b);
1304 out_16x8b_2 = _mm_packus_epi16(out_8x16b_2, zero_8x16b);
1305 out_16x8b_3 = _mm_packus_epi16(out_8x16b_3, zero_8x16b);
1306
1307 *((WORD32 *) (pu1_out)) = _mm_cvtsi128_si32(out_16x8b_0);
1308 *((WORD32 *) (pu1_out + out_strd)) = _mm_cvtsi128_si32(out_16x8b_1);
1309 *((WORD32 *) (pu1_out + (out_strd << 1))) = _mm_cvtsi128_si32(out_16x8b_2);
1310 *((WORD32 *) (pu1_out + (out_strd * 3))) = _mm_cvtsi128_si32(out_16x8b_3);
1311
1312 i4_nnz = !(row_01 && row_23);
1313 return i4_nnz;
1314 }
1315
1316 /*****************************************************************************/
1317 /* */
1318 /* Function Name : isvcd_iquant_itrans_residual_recon_8x8_dc_sse42 */
1319 /* */
1320 /* Description : this function computes the recon output from the */
1321 /* IQ+IT+RESD */
1322 /* */
1323 /* Inputs : */
1324 /* Globals : none */
1325 /* Processing : */
1326 /* */
1327 /* Outputs : i4_nnz */
1328 /* Returns : none */
1329 /* */
1330 /* Issues : none */
1331 /* */
1332 /* Revision History: */
1333 /* */
1334 /* DD MM YYYY Author(s) Changes (Describe the changes made) */
1335 /* 25 11 2021 Kishore creation */
1336 /* */
1337 /*****************************************************************************/
1338
isvcd_iquant_itrans_residual_recon_8x8_dc_sse42(WORD16 * pi2_src,UWORD8 * pu1_pred,WORD16 * pi2_rsd,UWORD8 * pu1_out,WORD32 pred_strd,WORD32 rsd_strd,WORD32 out_strd,const UWORD16 * pu2_iscale_mat,const UWORD16 * pu2_weigh_mat,UWORD32 qp_div,WORD16 * pi2_tmp,WORD32 iq_start_idx,WORD16 * pi2_dc_ld_addr)1339 WORD32 isvcd_iquant_itrans_residual_recon_8x8_dc_sse42(
1340 WORD16 *pi2_src, UWORD8 *pu1_pred, WORD16 *pi2_rsd, UWORD8 *pu1_out, WORD32 pred_strd,
1341 WORD32 rsd_strd, WORD32 out_strd, const UWORD16 *pu2_iscale_mat, const UWORD16 *pu2_weigh_mat,
1342 UWORD32 qp_div, WORD16 *pi2_tmp, WORD32 iq_start_idx, WORD16 *pi2_dc_ld_addr)
1343 {
1344 __m128i pred_16x8b_0, pred_8x16b_0, rsd_8x16b_0, out_8x16b_0, out_16x8b_0;
1345 __m128i pred_16x8b_1, pred_8x16b_1, rsd_8x16b_1, out_8x16b_1, out_16x8b_1;
1346 __m128i pred_16x8b_2, pred_8x16b_2, rsd_8x16b_2, out_8x16b_2, out_16x8b_2;
1347 __m128i pred_16x8b_3, pred_8x16b_3, rsd_8x16b_3, out_8x16b_3, out_16x8b_3;
1348 __m128i pred_16x8b_4, pred_8x16b_4, rsd_8x16b_4, out_8x16b_4, out_16x8b_4;
1349 __m128i pred_16x8b_5, pred_8x16b_5, rsd_8x16b_5, out_8x16b_5, out_16x8b_5;
1350 __m128i pred_16x8b_6, pred_8x16b_6, rsd_8x16b_6, out_8x16b_6, out_16x8b_6;
1351 __m128i pred_16x8b_7, pred_8x16b_7, rsd_8x16b_7, out_8x16b_7, out_16x8b_7;
1352 __m128i rsd_8x16b_01_b0, rsd_8x16b_23_b0, rsd_8x16b_45_b2, rsd_8x16b_67_b2;
1353 __m128i rsd_8x16b_01_b1, rsd_8x16b_23_b1, rsd_8x16b_45_b3, rsd_8x16b_67_b3;
1354
1355 WORD32 row_01_b0, row_23_b0, row_45_b2, row_67_b2;
1356 WORD32 row_01_b1, row_23_b1, row_45_b3, row_67_b3;
1357 WORD32 i4_nnz, i4_nnz_b0, i4_nnz_b1, i4_nnz_b2, i4_nnz_b3;
1358
1359 __m128i zero_8x16b = _mm_setzero_si128();
1360
1361 WORD32 pred_strd2 = (pred_strd << 1);
1362 WORD32 pred_strd4 = (pred_strd << 2);
1363 WORD32 rsd_strd2 = (rsd_strd << 1);
1364 WORD32 rsd_strd4 = (rsd_strd << 2);
1365 WORD32 out_strd2 = (out_strd << 1);
1366 WORD32 out_strd4 = (out_strd << 2);
1367
1368 __m128i i_macro_8x16b, dupmax_8x16b, dupmin_8x16b;
1369 WORD32 q;
1370 WORD16 i_macro;
1371 WORD32 rnd_fact = (qp_div < 6) ? (1 << (5 - qp_div)) : 0;
1372 UNUSED(pi2_tmp);
1373 UNUSED(iq_start_idx);
1374 UNUSED(pi2_dc_ld_addr);
1375 /*************************************************************/
1376 /* Dequantization of coefficients. Will be replaced by SIMD */
1377 /* operations on platform. Note : DC coeff is not scaled */
1378 /*************************************************************/
1379 q = pi2_src[0];
1380 INV_QUANT(q, pu2_iscale_mat[0], pu2_weigh_mat[0], qp_div, rnd_fact, 6);
1381 i_macro = (q + 32) >> 6;
1382
1383 i_macro_8x16b = _mm_set1_epi16(i_macro);
1384 dupmax_8x16b = _mm_set1_epi16(RSD_MAX);
1385 dupmin_8x16b = _mm_set1_epi16(RSD_MIN);
1386
1387 pred_16x8b_0 = _mm_loadu_si128((__m128i *) (pu1_pred));
1388 pred_16x8b_1 = _mm_loadu_si128((__m128i *) (pu1_pred + pred_strd));
1389 pred_16x8b_2 = _mm_loadu_si128((__m128i *) (pu1_pred + pred_strd2));
1390 pred_16x8b_3 = _mm_loadu_si128((__m128i *) (pu1_pred + pred_strd2 + pred_strd));
1391 pred_16x8b_4 = _mm_loadu_si128((__m128i *) (pu1_pred + pred_strd4));
1392 pred_16x8b_5 = _mm_loadu_si128((__m128i *) (pu1_pred + pred_strd4 + pred_strd));
1393 pred_16x8b_6 = _mm_loadu_si128((__m128i *) (pu1_pred + pred_strd4 + pred_strd2));
1394 pred_16x8b_7 = _mm_loadu_si128((__m128i *) (pu1_pred + pred_strd4 + pred_strd2 + pred_strd));
1395
1396 pred_8x16b_0 = _mm_cvtepu8_epi16(pred_16x8b_0);
1397 pred_8x16b_1 = _mm_cvtepu8_epi16(pred_16x8b_1);
1398 pred_8x16b_2 = _mm_cvtepu8_epi16(pred_16x8b_2);
1399 pred_8x16b_3 = _mm_cvtepu8_epi16(pred_16x8b_3);
1400 pred_8x16b_4 = _mm_cvtepu8_epi16(pred_16x8b_4);
1401 pred_8x16b_5 = _mm_cvtepu8_epi16(pred_16x8b_5);
1402 pred_8x16b_6 = _mm_cvtepu8_epi16(pred_16x8b_6);
1403 pred_8x16b_7 = _mm_cvtepu8_epi16(pred_16x8b_7);
1404
1405 rsd_8x16b_0 = _mm_loadu_si128((__m128i *) (pi2_rsd));
1406 rsd_8x16b_1 = _mm_loadu_si128((__m128i *) (pi2_rsd + rsd_strd));
1407 rsd_8x16b_2 = _mm_loadu_si128((__m128i *) (pi2_rsd + rsd_strd2));
1408 rsd_8x16b_3 = _mm_loadu_si128((__m128i *) (pi2_rsd + rsd_strd2 + rsd_strd));
1409 rsd_8x16b_4 = _mm_loadu_si128((__m128i *) (pi2_rsd + rsd_strd4));
1410 rsd_8x16b_5 = _mm_loadu_si128((__m128i *) (pi2_rsd + rsd_strd4 + rsd_strd));
1411 rsd_8x16b_6 = _mm_loadu_si128((__m128i *) (pi2_rsd + rsd_strd4 + rsd_strd2));
1412 rsd_8x16b_7 = _mm_loadu_si128((__m128i *) (pi2_rsd + rsd_strd4 + rsd_strd2 + rsd_strd));
1413
1414 rsd_8x16b_0 = _mm_add_epi16(i_macro_8x16b, rsd_8x16b_0);
1415 rsd_8x16b_1 = _mm_add_epi16(i_macro_8x16b, rsd_8x16b_1);
1416 rsd_8x16b_2 = _mm_add_epi16(i_macro_8x16b, rsd_8x16b_2);
1417 rsd_8x16b_3 = _mm_add_epi16(i_macro_8x16b, rsd_8x16b_3);
1418 rsd_8x16b_4 = _mm_add_epi16(i_macro_8x16b, rsd_8x16b_4);
1419 rsd_8x16b_5 = _mm_add_epi16(i_macro_8x16b, rsd_8x16b_5);
1420 rsd_8x16b_6 = _mm_add_epi16(i_macro_8x16b, rsd_8x16b_6);
1421 rsd_8x16b_7 = _mm_add_epi16(i_macro_8x16b, rsd_8x16b_7);
1422
1423 rsd_8x16b_0 = _mm_min_epi16(dupmax_8x16b, rsd_8x16b_0);
1424 rsd_8x16b_0 = _mm_max_epi16(dupmin_8x16b, rsd_8x16b_0);
1425 rsd_8x16b_1 = _mm_min_epi16(dupmax_8x16b, rsd_8x16b_1);
1426 rsd_8x16b_1 = _mm_max_epi16(dupmin_8x16b, rsd_8x16b_1);
1427 rsd_8x16b_2 = _mm_min_epi16(dupmax_8x16b, rsd_8x16b_2);
1428 rsd_8x16b_2 = _mm_max_epi16(dupmin_8x16b, rsd_8x16b_2);
1429 rsd_8x16b_3 = _mm_min_epi16(dupmax_8x16b, rsd_8x16b_3);
1430 rsd_8x16b_3 = _mm_max_epi16(dupmin_8x16b, rsd_8x16b_3);
1431 rsd_8x16b_4 = _mm_min_epi16(dupmax_8x16b, rsd_8x16b_4);
1432 rsd_8x16b_4 = _mm_max_epi16(dupmin_8x16b, rsd_8x16b_4);
1433 rsd_8x16b_5 = _mm_min_epi16(dupmax_8x16b, rsd_8x16b_5);
1434 rsd_8x16b_5 = _mm_max_epi16(dupmin_8x16b, rsd_8x16b_5);
1435 rsd_8x16b_6 = _mm_min_epi16(dupmax_8x16b, rsd_8x16b_6);
1436 rsd_8x16b_6 = _mm_max_epi16(dupmin_8x16b, rsd_8x16b_6);
1437 rsd_8x16b_7 = _mm_min_epi16(dupmax_8x16b, rsd_8x16b_7);
1438 rsd_8x16b_7 = _mm_max_epi16(dupmin_8x16b, rsd_8x16b_7);
1439
1440 rsd_8x16b_01_b0 = _mm_unpacklo_epi64(rsd_8x16b_0, rsd_8x16b_1);
1441 rsd_8x16b_23_b0 = _mm_unpacklo_epi64(rsd_8x16b_2, rsd_8x16b_3);
1442 rsd_8x16b_01_b1 = _mm_unpackhi_epi64(rsd_8x16b_0, rsd_8x16b_1);
1443 rsd_8x16b_23_b1 = _mm_unpackhi_epi64(rsd_8x16b_2, rsd_8x16b_3);
1444
1445 rsd_8x16b_45_b2 = _mm_unpacklo_epi64(rsd_8x16b_4, rsd_8x16b_5);
1446 rsd_8x16b_67_b2 = _mm_unpacklo_epi64(rsd_8x16b_6, rsd_8x16b_7);
1447 rsd_8x16b_45_b3 = _mm_unpackhi_epi64(rsd_8x16b_4, rsd_8x16b_5);
1448 rsd_8x16b_67_b3 = _mm_unpackhi_epi64(rsd_8x16b_6, rsd_8x16b_7);
1449
1450 row_01_b0 = _mm_test_all_ones(rsd_8x16b_01_b0); // return 1 if all zeros, else 0
1451 row_23_b0 = _mm_test_all_ones(rsd_8x16b_23_b0);
1452 row_01_b1 = _mm_test_all_ones(rsd_8x16b_01_b1);
1453 row_23_b1 = _mm_test_all_ones(rsd_8x16b_23_b1);
1454
1455 row_45_b2 = _mm_test_all_ones(rsd_8x16b_45_b2);
1456 row_67_b2 = _mm_test_all_ones(rsd_8x16b_67_b2);
1457 row_45_b3 = _mm_test_all_ones(rsd_8x16b_45_b3);
1458 row_67_b3 = _mm_test_all_ones(rsd_8x16b_67_b3);
1459
1460 out_8x16b_0 = _mm_add_epi16(pred_8x16b_0, rsd_8x16b_0);
1461 out_8x16b_1 = _mm_add_epi16(pred_8x16b_1, rsd_8x16b_1);
1462 out_8x16b_2 = _mm_add_epi16(pred_8x16b_2, rsd_8x16b_2);
1463 out_8x16b_3 = _mm_add_epi16(pred_8x16b_3, rsd_8x16b_3);
1464 out_8x16b_4 = _mm_add_epi16(pred_8x16b_4, rsd_8x16b_4);
1465 out_8x16b_5 = _mm_add_epi16(pred_8x16b_5, rsd_8x16b_5);
1466 out_8x16b_6 = _mm_add_epi16(pred_8x16b_6, rsd_8x16b_6);
1467 out_8x16b_7 = _mm_add_epi16(pred_8x16b_7, rsd_8x16b_7);
1468
1469 out_16x8b_0 = _mm_packus_epi16(out_8x16b_0, zero_8x16b);
1470 out_16x8b_1 = _mm_packus_epi16(out_8x16b_1, zero_8x16b);
1471 out_16x8b_2 = _mm_packus_epi16(out_8x16b_2, zero_8x16b);
1472 out_16x8b_3 = _mm_packus_epi16(out_8x16b_3, zero_8x16b);
1473 out_16x8b_4 = _mm_packus_epi16(out_8x16b_4, zero_8x16b);
1474 out_16x8b_5 = _mm_packus_epi16(out_8x16b_5, zero_8x16b);
1475 out_16x8b_6 = _mm_packus_epi16(out_8x16b_6, zero_8x16b);
1476 out_16x8b_7 = _mm_packus_epi16(out_8x16b_7, zero_8x16b);
1477
1478 _mm_storel_epi64((__m128i *) (pu1_out), out_16x8b_0);
1479 _mm_storel_epi64((__m128i *) (pu1_out + out_strd), out_16x8b_1);
1480 _mm_storel_epi64((__m128i *) (pu1_out + out_strd2), out_16x8b_2);
1481 _mm_storel_epi64((__m128i *) (pu1_out + out_strd2 + out_strd), out_16x8b_3);
1482 _mm_storel_epi64((__m128i *) (pu1_out + out_strd4), out_16x8b_4);
1483 _mm_storel_epi64((__m128i *) (pu1_out + out_strd4 + out_strd), out_16x8b_5);
1484 _mm_storel_epi64((__m128i *) (pu1_out + out_strd4 + out_strd2), out_16x8b_6);
1485 _mm_storel_epi64((__m128i *) (pu1_out + out_strd4 + out_strd2 + out_strd), out_16x8b_7);
1486
1487 i4_nnz_b0 = (!(row_01_b0 && row_23_b0));
1488 i4_nnz_b1 = (!(row_01_b1 && row_23_b1)) << 1;
1489 i4_nnz_b2 = (!(row_45_b2 && row_67_b2)) << 4;
1490 i4_nnz_b3 = (!(row_45_b3 && row_67_b3)) << 5;
1491
1492 i4_nnz = (i4_nnz_b0 | i4_nnz_b1 | i4_nnz_b2 | i4_nnz_b3);
1493 return i4_nnz;
1494 }
1495
1496 /*****************************************************************************/
1497 /* */
1498 /* Function Name : isvcd_iquant_itrans_residual_recon_chroma_4x4_dc_sse42 */
1499 /* */
1500 /* Description : this function computes the recon output from the */
1501 /* IQ+IT+RESD */
1502 /* */
1503 /* Inputs : */
1504 /* Globals : none */
1505 /* Processing : */
1506 /* */
1507 /* Outputs : i4_nnz */
1508 /* Returns : none */
1509 /* */
1510 /* Issues : none */
1511 /* */
1512 /* Revision History: */
1513 /* */
1514 /* DD MM YYYY Author(s) Changes (Describe the changes made) */
1515 /* 25 11 2021 Kishore creation */
1516 /* */
1517 /*****************************************************************************/
1518
isvcd_iquant_itrans_residual_recon_chroma_4x4_dc_sse42(WORD16 * pi2_src,UWORD8 * pu1_pred,WORD16 * pi2_rsd,UWORD8 * pu1_out,WORD32 pred_strd,WORD32 rsd_strd,WORD32 out_strd,const UWORD16 * pu2_iscal_mat,const UWORD16 * pu2_weigh_mat,UWORD32 u4_qp_div_6,WORD16 * pi2_tmp,WORD16 * pi2_dc_src)1519 void isvcd_iquant_itrans_residual_recon_chroma_4x4_dc_sse42(
1520 WORD16 *pi2_src, UWORD8 *pu1_pred, WORD16 *pi2_rsd, UWORD8 *pu1_out, WORD32 pred_strd,
1521 WORD32 rsd_strd, WORD32 out_strd, const UWORD16 *pu2_iscal_mat, const UWORD16 *pu2_weigh_mat,
1522 UWORD32 u4_qp_div_6, WORD16 *pi2_tmp, WORD16 *pi2_dc_src)
1523 {
1524 __m128i pred_16x8b_0, pred_8x16b_0, rsd_8x16b_0, rsd_16x8b_0, out_16x8b_0;
1525 __m128i pred_16x8b_1, pred_8x16b_1, rsd_8x16b_1, rsd_16x8b_1, out_16x8b_1;
1526 __m128i pred_16x8b_2, pred_8x16b_2, rsd_8x16b_2, rsd_16x8b_2, out_16x8b_2;
1527 __m128i pred_16x8b_3, pred_8x16b_3, rsd_8x16b_3, rsd_16x8b_3, out_16x8b_3;
1528
1529 __m128i i_macro_8x16b, dupmax_8x16b, dupmin_8x16b;
1530 __m128i chroma_mask, chroma_mask2;
1531 __m128i zero_8x16b = _mm_setzero_si128();
1532 WORD32 q0;
1533 WORD16 i_macro;
1534 UNUSED(pi2_src);
1535 UNUSED(pu2_iscal_mat);
1536 UNUSED(pu2_weigh_mat);
1537 UNUSED(u4_qp_div_6);
1538 UNUSED(pi2_tmp);
1539
1540 q0 = pi2_dc_src[0]; // Restoring dc value for intra case3
1541 i_macro = ((q0 + 32) >> 6);
1542
1543 i_macro_8x16b = _mm_set1_epi16(i_macro);
1544 dupmax_8x16b = _mm_set1_epi16(RSD_MAX);
1545 dupmin_8x16b = _mm_set1_epi16(RSD_MIN);
1546
1547 pred_16x8b_0 = _mm_loadu_si128((__m128i *) (pu1_pred));
1548 pred_16x8b_1 = _mm_loadu_si128((__m128i *) (pu1_pred + pred_strd));
1549 pred_16x8b_2 = _mm_loadu_si128((__m128i *) (pu1_pred + (pred_strd << 1)));
1550 pred_16x8b_3 = _mm_loadu_si128((__m128i *) (pu1_pred + (pred_strd << 1) + pred_strd));
1551
1552 pred_8x16b_0 = _mm_cvtepu8_epi16(pred_16x8b_0);
1553 pred_8x16b_1 = _mm_cvtepu8_epi16(pred_16x8b_1);
1554 pred_8x16b_2 = _mm_cvtepu8_epi16(pred_16x8b_2);
1555 pred_8x16b_3 = _mm_cvtepu8_epi16(pred_16x8b_3);
1556
1557 rsd_8x16b_0 = _mm_loadu_si128((__m128i *) (pi2_rsd));
1558 rsd_8x16b_1 = _mm_loadu_si128((__m128i *) (pi2_rsd + rsd_strd));
1559 rsd_8x16b_2 = _mm_loadu_si128((__m128i *) (pi2_rsd + (rsd_strd << 1)));
1560 rsd_8x16b_3 = _mm_loadu_si128((__m128i *) (pi2_rsd + (rsd_strd << 1) + rsd_strd));
1561
1562 rsd_8x16b_0 = _mm_add_epi16(i_macro_8x16b, rsd_8x16b_0);
1563 rsd_8x16b_1 = _mm_add_epi16(i_macro_8x16b, rsd_8x16b_1);
1564 rsd_8x16b_2 = _mm_add_epi16(i_macro_8x16b, rsd_8x16b_2);
1565 rsd_8x16b_3 = _mm_add_epi16(i_macro_8x16b, rsd_8x16b_3);
1566
1567 rsd_8x16b_0 = _mm_min_epi16(dupmax_8x16b, rsd_8x16b_0);
1568 rsd_8x16b_0 = _mm_max_epi16(dupmin_8x16b, rsd_8x16b_0);
1569 rsd_8x16b_1 = _mm_min_epi16(dupmax_8x16b, rsd_8x16b_1);
1570 rsd_8x16b_1 = _mm_max_epi16(dupmin_8x16b, rsd_8x16b_1);
1571 rsd_8x16b_2 = _mm_min_epi16(dupmax_8x16b, rsd_8x16b_2);
1572 rsd_8x16b_2 = _mm_max_epi16(dupmin_8x16b, rsd_8x16b_2);
1573 rsd_8x16b_3 = _mm_min_epi16(dupmax_8x16b, rsd_8x16b_3);
1574 rsd_8x16b_3 = _mm_max_epi16(dupmin_8x16b, rsd_8x16b_3);
1575
1576 rsd_8x16b_0 = _mm_add_epi16(pred_8x16b_0, rsd_8x16b_0);
1577 rsd_8x16b_1 = _mm_add_epi16(pred_8x16b_1, rsd_8x16b_1);
1578 rsd_8x16b_2 = _mm_add_epi16(pred_8x16b_2, rsd_8x16b_2);
1579 rsd_8x16b_3 = _mm_add_epi16(pred_8x16b_3, rsd_8x16b_3);
1580
1581 chroma_mask = _mm_set1_epi16(0xFF00);
1582 chroma_mask2 = _mm_set1_epi16(0x00FF);
1583 out_16x8b_0 = _mm_loadu_si128((__m128i *) (&pu1_out[0]));
1584 out_16x8b_1 = _mm_loadu_si128((__m128i *) (&pu1_out[out_strd]));
1585 out_16x8b_2 = _mm_loadu_si128((__m128i *) (&pu1_out[(out_strd << 1)]));
1586 out_16x8b_3 = _mm_loadu_si128((__m128i *) (&pu1_out[(out_strd << 1) + out_strd]));
1587
1588 out_16x8b_0 = _mm_and_si128(out_16x8b_0, chroma_mask);
1589 out_16x8b_1 = _mm_and_si128(out_16x8b_1, chroma_mask);
1590 out_16x8b_2 = _mm_and_si128(out_16x8b_2, chroma_mask);
1591 out_16x8b_3 = _mm_and_si128(out_16x8b_3, chroma_mask);
1592
1593 rsd_16x8b_0 = _mm_packus_epi16(rsd_8x16b_0, zero_8x16b);
1594 rsd_16x8b_1 = _mm_packus_epi16(rsd_8x16b_1, zero_8x16b);
1595 rsd_16x8b_2 = _mm_packus_epi16(rsd_8x16b_2, zero_8x16b);
1596 rsd_16x8b_3 = _mm_packus_epi16(rsd_8x16b_3, zero_8x16b);
1597
1598 rsd_8x16b_0 = _mm_and_si128(rsd_16x8b_0, chroma_mask2);
1599 rsd_8x16b_1 = _mm_and_si128(rsd_16x8b_1, chroma_mask2);
1600 rsd_8x16b_2 = _mm_and_si128(rsd_16x8b_2, chroma_mask2);
1601 rsd_8x16b_3 = _mm_and_si128(rsd_16x8b_3, chroma_mask2);
1602
1603 out_16x8b_0 = _mm_add_epi8(rsd_8x16b_0, out_16x8b_0);
1604 out_16x8b_1 = _mm_add_epi8(rsd_8x16b_1, out_16x8b_1);
1605 out_16x8b_2 = _mm_add_epi8(rsd_8x16b_2, out_16x8b_2);
1606 out_16x8b_3 = _mm_add_epi8(rsd_8x16b_3, out_16x8b_3);
1607
1608 _mm_storel_epi64((__m128i *) (pu1_out), out_16x8b_0);
1609 _mm_storel_epi64((__m128i *) (pu1_out + out_strd), out_16x8b_1);
1610 _mm_storel_epi64((__m128i *) (pu1_out + (out_strd << 1)), out_16x8b_2);
1611 _mm_storel_epi64((__m128i *) (pu1_out + (out_strd * 3)), out_16x8b_3);
1612 }
1613
1614 /*****************************************************************************/
1615 /* */
1616 /* Function Name : isvcd_iquant_itrans_residual_recon_chroma_4x4_sse42 */
1617 /* */
1618 /* Description : this function computes the recon output from the */
1619 /* IQ+IT+RESD */
1620 /* */
1621 /* Inputs : */
1622 /* Globals : none */
1623 /* Processing : */
1624 /* */
1625 /* Outputs : i4_nnz */
1626 /* Returns : none */
1627 /* */
1628 /* Issues : none */
1629 /* */
1630 /* Revision History: */
1631 /* */
1632 /* DD MM YYYY Author(s) Changes (Describe the changes made) */
1633 /* 25 11 2021 Kishore creation */
1634 /* */
1635 /*****************************************************************************/
1636
isvcd_iquant_itrans_residual_recon_chroma_4x4_sse42(WORD16 * pi2_src,UWORD8 * pu1_pred,WORD16 * pi2_rsd,UWORD8 * pu1_out,WORD32 pred_strd,WORD32 rsd_strd,WORD32 out_strd,const UWORD16 * pu2_iscal_mat,const UWORD16 * pu2_weigh_mat,UWORD32 u4_qp_div_6,WORD16 * pi2_tmp,WORD16 * pi2_dc_src)1637 void isvcd_iquant_itrans_residual_recon_chroma_4x4_sse42(
1638 WORD16 *pi2_src, UWORD8 *pu1_pred, WORD16 *pi2_rsd, UWORD8 *pu1_out, WORD32 pred_strd,
1639 WORD32 rsd_strd, WORD32 out_strd, const UWORD16 *pu2_iscal_mat, const UWORD16 *pu2_weigh_mat,
1640 UWORD32 u4_qp_div_6, WORD16 *pi2_tmp, WORD16 *pi2_dc_src)
1641 {
1642 __m128i src_r0_r1, src_r2_r3;
1643 __m128i src_r0, src_r1, src_r2, src_r3;
1644 __m128i scalemat_r0_r1, scalemat_r2_r3;
1645 __m128i pred_r0, pred_r1, pred_r2, pred_r3;
1646 __m128i rsd_r0, rsd_r1, rsd_r2, rsd_r3;
1647 __m128i dequant_r0_r1, dequant_r2_r3;
1648 __m128i zero_8x16b = _mm_setzero_si128(); // all bits reset to zero
1649 __m128i temp0, temp1, temp2, temp3, temp4, temp5, temp6, temp7;
1650 __m128i resq_r0, resq_r1, resq_r2, resq_r3;
1651 __m128i add_rshift = _mm_set1_epi32((u4_qp_div_6 < 4) ? (1 << (3 - u4_qp_div_6)) : 0);
1652 __m128i value_32 = _mm_set1_epi32(32);
1653 __m128i chroma_mask = _mm_set1_epi16(0xFF00);
1654 __m128i chroma_mask2 = _mm_set1_epi16(0x00FF);
1655 __m128i dupmax_8x16b = _mm_set1_epi16(RSD_MAX);
1656 __m128i dupmin_8x16b = _mm_set1_epi16(RSD_MIN);
1657
1658 __m128i out_16x8b_0, out_16x8b_1, out_16x8b_2, out_16x8b_3;
1659
1660 UNUSED(pi2_tmp);
1661
1662 /*************************************************************/
1663 /* Dequantization of coefficients. Will be replaced by SIMD */
1664 /* operations on platform */
1665 /*************************************************************/
1666 // a00 a01 a02 a03 a10 a11 a12 a13 -- the source matrix 0th,1st row
1667 src_r0_r1 = _mm_loadu_si128((__m128i *) (pi2_src));
1668 // a20 a21 a22 a23 a30 a31 a32 a33 -- the source matrix 2nd,3rd row
1669 src_r2_r3 = _mm_loadu_si128((__m128i *) (pi2_src + 8));
1670 // b00 b01 b02 b03 b10 b11 b12 b13 -- the scaling matrix 0th,1st row
1671 scalemat_r0_r1 = _mm_loadu_si128((__m128i *) (pu2_iscal_mat));
1672 // b20 b21 b22 b23 b30 b31 b32 b33 -- the scaling matrix 2nd,3rd row
1673 scalemat_r2_r3 = _mm_loadu_si128((__m128i *) (pu2_iscal_mat + 8));
1674 // q00 q01 q02 q03 q10 q11 q12 q13 -- all 16 bits
1675 dequant_r0_r1 = _mm_loadu_si128((__m128i *) (pu2_weigh_mat));
1676 // q20 q21 q22 q23 q30 q31 q32 q33 -- all 16 bits
1677 dequant_r2_r3 = _mm_loadu_si128((__m128i *) (pu2_weigh_mat + 8));
1678
1679 // b00*q00 b01*q01 b02*q02 b03*q03 b10*q10 b11*q11 b12*q12 b13*q13 -- 16 bit result
1680 temp0 = _mm_mullo_epi16(scalemat_r0_r1, dequant_r0_r1);
1681 // b00*q00 b01*q01 b02*q02 b03*q03 b10*q10 b11*q11 b12*q12 b13*q13 -- 16 bit result
1682 temp1 = _mm_mullo_epi16(scalemat_r2_r3, dequant_r2_r3);
1683 // b00*q00 0 b01*q01 0 b02*q02 0 b03*q03 0 -- 16 bit long
1684 temp4 = _mm_unpacklo_epi16(temp0, zero_8x16b);
1685 // b10*q10 0 b11*q11 0 b12*q12 0 b13*q13 0 -- 16 bit long
1686 temp5 = _mm_unpackhi_epi16(temp0, zero_8x16b);
1687 // b00*q00 0 b01*q01 0 b02*q02 0 b03*q03 0 -- 16 bit long
1688 temp6 = _mm_unpacklo_epi16(temp1, zero_8x16b);
1689 // b10*q10 0 b11*q11 0 b12*q12 0 b13*q13 0 -- 16 bit long
1690 temp7 = _mm_unpackhi_epi16(temp1, zero_8x16b);
1691
1692 src_r0 = _mm_unpacklo_epi16(src_r0_r1, zero_8x16b); // a00 0 a01 0 a02 0 a03 0 -- 16 bit long
1693 src_r1 = _mm_unpackhi_epi16(src_r0_r1, zero_8x16b); // a10 0 a11 0 a12 0 a13 0 -- 16 bit long
1694 src_r2 = _mm_unpacklo_epi16(src_r2_r3, zero_8x16b); // a20 0 a21 0 a22 0 a23 0 -- 16 bit long
1695 src_r3 = _mm_unpackhi_epi16(src_r2_r3, zero_8x16b); // a30 0 a31 0 a32 0 a33 0 -- 16 bit long
1696
1697 // a00*b00*q00 a10*b10*q10 a20*b20*q20 a30*b30 q30 -- 32 bits long
1698 temp4 = _mm_madd_epi16(src_r0, temp4);
1699 temp5 = _mm_madd_epi16(src_r1, temp5);
1700 temp6 = _mm_madd_epi16(src_r2, temp6);
1701 temp7 = _mm_madd_epi16(src_r3, temp7);
1702
1703 if(u4_qp_div_6 >= 4)
1704 {
1705 resq_r0 = _mm_slli_epi32(temp4, u4_qp_div_6 - 4);
1706 resq_r1 = _mm_slli_epi32(temp5, u4_qp_div_6 - 4);
1707 resq_r2 = _mm_slli_epi32(temp6, u4_qp_div_6 - 4);
1708 resq_r3 = _mm_slli_epi32(temp7, u4_qp_div_6 - 4);
1709 }
1710 else
1711 {
1712 temp4 = _mm_add_epi32(temp4, add_rshift);
1713 temp5 = _mm_add_epi32(temp5, add_rshift);
1714 temp6 = _mm_add_epi32(temp6, add_rshift);
1715 temp7 = _mm_add_epi32(temp7, add_rshift);
1716 resq_r0 = _mm_srai_epi32(temp4, 4 - u4_qp_div_6);
1717 resq_r1 = _mm_srai_epi32(temp5, 4 - u4_qp_div_6);
1718 resq_r2 = _mm_srai_epi32(temp6, 4 - u4_qp_div_6);
1719 resq_r3 = _mm_srai_epi32(temp7, 4 - u4_qp_div_6);
1720 }
1721
1722 resq_r0 = _mm_insert_epi32(resq_r0, (WORD32) pi2_dc_src[0], 0);
1723 /* Perform Inverse transform */
1724 /*-------------------------------------------------------------*/
1725 /* IDCT [ Horizontal transformation ] */
1726 /*-------------------------------------------------------------*/
1727 // Matrix transpose
1728 /*
1729 * a0 a1 a2 a3
1730 * b0 b1 b2 b3
1731 * c0 c1 c2 c3
1732 * d0 d1 d2 d3
1733 */
1734 temp1 = _mm_unpacklo_epi32(resq_r0, resq_r1); // a0 b0 a1 b1
1735 temp3 = _mm_unpacklo_epi32(resq_r2, resq_r3); // c0 d0 c1 d1
1736 temp2 = _mm_unpackhi_epi32(resq_r0, resq_r1); // a2 b2 a3 b3
1737 temp4 = _mm_unpackhi_epi32(resq_r2, resq_r3); // c2 d2 c3 d3
1738 resq_r0 = _mm_unpacklo_epi64(temp1, temp3); // a0 b0 c0 d0
1739 resq_r1 = _mm_unpackhi_epi64(temp1, temp3); // a1 b1 c1 d1
1740 resq_r2 = _mm_unpacklo_epi64(temp2, temp4); // a2 b2 c2 d2
1741 resq_r3 = _mm_unpackhi_epi64(temp2, temp4); // a3 b3 c3 d3
1742 // Transform starts -- horizontal transform
1743 /*------------------------------------------------------------------*/
1744 /* z0 = w0 + w2 */
1745 temp0 = _mm_add_epi32(resq_r0, resq_r2);
1746 /* z1 = w0 - w2 */
1747 temp1 = _mm_sub_epi32(resq_r0, resq_r2);
1748 /* z2 = (w1 >> 1) - w3 */
1749 temp2 = _mm_srai_epi32(resq_r1, 1); //(w1>>1)
1750 temp2 = _mm_sub_epi32(temp2, resq_r3); //(w1>>1) - w3
1751 /* z3 = w1 + (w3 >> 1) */
1752 temp3 = _mm_srai_epi32(resq_r3, 1); //(w3>>1) + w1
1753 temp3 = _mm_add_epi32(temp3, resq_r1);
1754 /*----------------------------------------------------------*/
1755 /* x0 = z0 + z3 */
1756 resq_r0 = _mm_add_epi32(temp0, temp3);
1757 /* x1 = z1 + z2 */
1758 resq_r1 = _mm_add_epi32(temp1, temp2);
1759 /* x2 = z1 - z2 */
1760 resq_r2 = _mm_sub_epi32(temp1, temp2);
1761 /* x3 = z0 - z3 */
1762 resq_r3 = _mm_sub_epi32(temp0, temp3);
1763 // Matrix transpose
1764 /*
1765 * a0 b0 c0 d0
1766 * a1 b1 c1 d1
1767 * a2 b2 c2 d2
1768 * a3 b3 c3 d3
1769 */
1770 temp1 = _mm_unpacklo_epi32(resq_r0, resq_r1); // a0 a1 b0 b1
1771 temp3 = _mm_unpacklo_epi32(resq_r2, resq_r3); // a2 a3 b2 b3
1772 temp2 = _mm_unpackhi_epi32(resq_r0, resq_r1); // c0 c1 d0 d1
1773 temp4 = _mm_unpackhi_epi32(resq_r2, resq_r3); // c2 c3 d2 d3
1774 resq_r0 = _mm_unpacklo_epi64(temp1, temp3); // a0 a1 a2 a3
1775 resq_r1 = _mm_unpackhi_epi64(temp1, temp3); // b0 b1 b2 b3
1776 resq_r2 = _mm_unpacklo_epi64(temp2, temp4); // c0 c1 c2 c3
1777 resq_r3 = _mm_unpackhi_epi64(temp2, temp4); // d0 d1 d2 d3
1778 // Transform ends -- horizontal transform
1779
1780 // Load pred buffer
1781 // p00 p01 p02 p03 0 0 0 0 0 0 0 0 -- all 8 bits
1782 pred_r0 = _mm_loadl_epi64((__m128i *) (&pu1_pred[0]));
1783 // p10 p11 p12 p13 0 0 0 0 0 0 0 0 -- all 8 bits
1784 pred_r1 = _mm_loadl_epi64((__m128i *) (&pu1_pred[pred_strd]));
1785 // p20 p21 p22 p23 0 0 0 0 0 0 0 0 -- all 8 bits
1786 pred_r2 = _mm_loadl_epi64((__m128i *) (&pu1_pred[2 * pred_strd]));
1787 // p30 p31 p32 p33 0 0 0 0 0 0 0 0 -- all 8 bits
1788 pred_r3 = _mm_loadl_epi64((__m128i *) (&pu1_pred[3 * pred_strd]));
1789
1790 pred_r0 = _mm_cvtepu8_epi16(pred_r0); // p00 p01 p02 p03 -- all 16 bits
1791 pred_r1 = _mm_cvtepu8_epi16(pred_r1); // p10 p11 p12 p13 -- all 16 bits
1792 pred_r2 = _mm_cvtepu8_epi16(pred_r2); // p20 p21 p22 p23 -- all 16 bits
1793 pred_r3 = _mm_cvtepu8_epi16(pred_r3); // p30 p31 p32 p33 -- all 16 bits
1794
1795 // Load resd buffer
1796 rsd_r0 = _mm_loadu_si128((__m128i *) (&pi2_rsd[0]));
1797 rsd_r1 = _mm_loadu_si128((__m128i *) (&pi2_rsd[rsd_strd]));
1798 rsd_r2 = _mm_loadu_si128((__m128i *) (&pi2_rsd[2 * rsd_strd]));
1799 rsd_r3 = _mm_loadu_si128((__m128i *) (&pi2_rsd[3 * rsd_strd]));
1800
1801 /*--------------------------------------------------------------*/
1802 /* IDCT [ Vertical transformation] and Xij = (xij + 32)>>6 */
1803 /* */
1804 /* Add the prediction and store it back to same buffer */
1805 /*--------------------------------------------------------------*/
1806 /* z0j = y0j + y2j */
1807 temp0 = _mm_add_epi32(resq_r0, resq_r2);
1808 /* z1j = y0j - y2j */
1809 temp1 = _mm_sub_epi32(resq_r0, resq_r2);
1810 /* z2j = (y1j>>1) - y3j */
1811 temp2 = _mm_srai_epi32(resq_r1, 1); //(y1j>>1)
1812 temp2 = _mm_sub_epi32(temp2, resq_r3);
1813 /* z3j = y1j + (y3j>>1) */
1814 temp3 = _mm_srai_epi32(resq_r3, 1); //(y3j>>1)
1815 temp3 = _mm_add_epi32(temp3, resq_r1);
1816
1817 /* x0j = z0j + z3j */
1818 temp4 = _mm_add_epi32(temp0, temp3);
1819 temp4 = _mm_add_epi32(temp4, value_32);
1820 temp4 = _mm_srai_epi32(temp4, 6);
1821 temp4 = _mm_add_epi16(temp4, rsd_r0);
1822 temp4 = _mm_min_epi16(dupmax_8x16b, temp4);
1823 temp4 = _mm_max_epi16(dupmin_8x16b, temp4);
1824 temp4 = _mm_add_epi16(temp4, pred_r0);
1825 /* x1j = z1j + z2j */
1826 temp5 = _mm_add_epi32(temp1, temp2);
1827 temp5 = _mm_add_epi32(temp5, value_32);
1828 temp5 = _mm_srai_epi32(temp5, 6);
1829 temp5 = _mm_add_epi16(temp5, rsd_r1);
1830 temp5 = _mm_min_epi16(dupmax_8x16b, temp5);
1831 temp5 = _mm_max_epi16(dupmin_8x16b, temp5);
1832 temp5 = _mm_add_epi16(temp5, pred_r1);
1833 /* x2j = z1j - z2j */
1834 temp6 = _mm_sub_epi32(temp1, temp2);
1835 temp6 = _mm_add_epi32(temp6, value_32);
1836 temp6 = _mm_srai_epi32(temp6, 6);
1837 temp6 = _mm_add_epi16(temp6, rsd_r2);
1838 temp6 = _mm_min_epi16(dupmax_8x16b, temp6);
1839 temp6 = _mm_max_epi16(dupmin_8x16b, temp6);
1840 temp6 = _mm_add_epi16(temp6, pred_r2);
1841 /* x3j = z0j - z3j */
1842 temp7 = _mm_sub_epi32(temp0, temp3);
1843 temp7 = _mm_add_epi32(temp7, value_32);
1844 temp7 = _mm_srai_epi32(temp7, 6);
1845 temp7 = _mm_add_epi16(temp7, rsd_r3);
1846 temp7 = _mm_min_epi16(dupmax_8x16b, temp7);
1847 temp7 = _mm_max_epi16(dupmin_8x16b, temp7);
1848 temp7 = _mm_add_epi16(temp7, pred_r3);
1849
1850 out_16x8b_0 = _mm_loadu_si128((__m128i *) (&pu1_out[0]));
1851 out_16x8b_1 = _mm_loadu_si128((__m128i *) (&pu1_out[out_strd]));
1852 out_16x8b_2 = _mm_loadu_si128((__m128i *) (&pu1_out[(out_strd << 1)]));
1853 out_16x8b_3 = _mm_loadu_si128((__m128i *) (&pu1_out[(out_strd << 1) + out_strd]));
1854
1855 out_16x8b_0 = _mm_and_si128(out_16x8b_0, chroma_mask);
1856 out_16x8b_1 = _mm_and_si128(out_16x8b_1, chroma_mask);
1857 out_16x8b_2 = _mm_and_si128(out_16x8b_2, chroma_mask);
1858 out_16x8b_3 = _mm_and_si128(out_16x8b_3, chroma_mask);
1859
1860 temp4 = _mm_packus_epi16(temp4, zero_8x16b);
1861 temp5 = _mm_packus_epi16(temp5, zero_8x16b);
1862 temp6 = _mm_packus_epi16(temp6, zero_8x16b);
1863 temp7 = _mm_packus_epi16(temp7, zero_8x16b);
1864
1865 temp4 = _mm_and_si128(temp4, chroma_mask2);
1866 temp5 = _mm_and_si128(temp5, chroma_mask2);
1867 temp6 = _mm_and_si128(temp6, chroma_mask2);
1868 temp7 = _mm_and_si128(temp7, chroma_mask2);
1869
1870 out_16x8b_0 = _mm_add_epi8(temp4, out_16x8b_0);
1871 out_16x8b_1 = _mm_add_epi8(temp5, out_16x8b_1);
1872 out_16x8b_2 = _mm_add_epi8(temp6, out_16x8b_2);
1873 out_16x8b_3 = _mm_add_epi8(temp7, out_16x8b_3);
1874
1875 _mm_storel_epi64((__m128i *) (pu1_out), out_16x8b_0);
1876 _mm_storel_epi64((__m128i *) (pu1_out + out_strd), out_16x8b_1);
1877 _mm_storel_epi64((__m128i *) (pu1_out + (out_strd << 1)), out_16x8b_2);
1878 _mm_storel_epi64((__m128i *) (pu1_out + (out_strd * 3)), out_16x8b_3);
1879 }
1880