1 /******************************************************************************
2 *
3 * Copyright (C) 2022 The Android Open Source Project
4 *
5 * Licensed under the Apache License, Version 2.0 (the "License");
6 * you may not use this file except in compliance with the License.
7 * You may obtain a copy of the License at:
8 *
9 * http://www.apache.org/licenses/LICENSE-2.0
10 *
11 * Unless required by applicable law or agreed to in writing, software
12 * distributed under the License is distributed on an "AS IS" BASIS,
13 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
14 * See the License for the specific language governing permissions and
15 * limitations under the License.
16 *
17 *****************************************************************************
18 * Originally developed and contributed by Ittiam Systems Pvt. Ltd, Bangalore
19 */
20 /**
21 *******************************************************************************
22 * @file
23 * isvcd_iquant_itrans_residual_sse42.c
24 *
25 * @brief
26 * Contains function definitions for iquant_itrans_residual_recon
27 *
28 * @author
29 * Kishore
30 *
31 * @par List of Functions:
32 * - isvcd_iquant_itrans_residual_4x4_sse42()
33 * - isvcd_iquant_itrans_residual_8x8_sse42()
34 * - isvcd_iquant_itrans_residual_4x4_dc_sse42()
35 * - isvcd_iquant_itrans_residual_8x8_dc_sse42()
36 * - isvcd_iquant_itrans_residual_chroma_4x4_sse42()
37 * - isvcd_iquant_itrans_residual_chroma_4x4_dc_sse42()
38 *
39 * @remarks
40 * None
41 *
42 *******************************************************************************
43 */
44 #include <immintrin.h>
45 /* User include files */
46 #include "ih264_typedefs.h"
47 #include "ih264_defs.h"
48 #include "ih264_trans_macros.h"
49 #include "ih264_macros.h"
50 #include "ih264_platform_macros.h"
51 #include "ih264_trans_data.h"
52 #include "ih264_size_defs.h"
53 #include "ih264_structs.h"
54
55 /*****************************************************************************/
56 /* */
57 /* Function Name : isvcd_iquant_itrans_residual_4x4_sse42 */
58 /* */
59 /* Description : this function computes the resd output from the */
60 /* IQ+IT */
61 /* */
62 /* Inputs : */
63 /* Globals : none */
64 /* Processing : */
65 /* */
66 /* Outputs : i4_nnz */
67 /* Returns : none */
68 /* */
69 /* Issues : none */
70 /* */
71 /* Revision History: */
72 /* */
73 /* DD MM YYYY Author(s) Changes (Describe the changes made) */
74 /* 25 11 2021 Kishore creation */
75 /* */
76 /*****************************************************************************/
77
isvcd_iquant_itrans_residual_4x4_sse42(WORD16 * pi2_src,WORD16 * pi2_pred,WORD16 * pi2_out,WORD32 pred_strd,WORD32 out_strd,const UWORD16 * pu2_iscal_mat,const UWORD16 * pu2_weigh_mat,UWORD32 u4_qp_div_6,WORD16 * pi2_tmp,WORD32 iq_start_idx,WORD16 * pi2_dc_ld_addr)78 WORD32 isvcd_iquant_itrans_residual_4x4_sse42(WORD16 *pi2_src, WORD16 *pi2_pred, WORD16 *pi2_out,
79 WORD32 pred_strd, WORD32 out_strd,
80 const UWORD16 *pu2_iscal_mat,
81 const UWORD16 *pu2_weigh_mat, UWORD32 u4_qp_div_6,
82 WORD16 *pi2_tmp, WORD32 iq_start_idx,
83 WORD16 *pi2_dc_ld_addr)
84 {
85 WORD32 i4_nnz = 0;
86 WORD32 row_0, row_1, row_2, row_3;
87 __m128i src_r0_r1, src_r2_r3;
88 __m128i src_r0, src_r1, src_r2, src_r3;
89 __m128i scalemat_r0_r1, scalemat_r2_r3;
90 __m128i pred_r0, pred_r1, pred_r2, pred_r3;
91 __m128i dequant_r0_r1, dequant_r2_r3;
92 __m128i zero_8x16b = _mm_setzero_si128(); // all bits reset to zero
93 __m128i temp0, temp1, temp2, temp3, temp4, temp5, temp6, temp7;
94 __m128i resq_r0, resq_r1, resq_r2, resq_r3;
95 __m128i add_rshift = _mm_set1_epi32((u4_qp_div_6 < 4) ? (1 << (3 - u4_qp_div_6)) : 0);
96 __m128i value_32 = _mm_set1_epi32(32);
97 __m128i dupmax_4x32b = _mm_set1_epi32(RSD_MAX);
98 __m128i dupmin_4x32b = _mm_set1_epi32(RSD_MIN);
99
100 UNUSED(pi2_tmp);
101
102 /*************************************************************/
103 /* Dequantization of coefficients. Will be replaced by SIMD */
104 /* operations on platform */
105 /*************************************************************/
106 // a00 a01 a02 a03 a10 a11 a12 a13 -- the source matrix 0th,1st row
107 src_r0_r1 = _mm_loadu_si128((__m128i *) (pi2_src));
108 // a20 a21 a22 a23 a30 a31 a32 a33 -- the source matrix 2nd,3rd row
109 src_r2_r3 = _mm_loadu_si128((__m128i *) (pi2_src + 8));
110 // b00 b01 b02 b03 b10 b11 b12 b13 -- the scaling matrix 0th,1st row
111 scalemat_r0_r1 = _mm_loadu_si128((__m128i *) (pu2_iscal_mat));
112 // b20 b21 b22 b23 b30 b31 b32 b33 -- the scaling matrix 2nd,3rd row
113 scalemat_r2_r3 = _mm_loadu_si128((__m128i *) (pu2_iscal_mat + 8));
114 // q00 q01 q02 q03 q10 q11 q12 q13 -- all 16 bits
115 dequant_r0_r1 = _mm_loadu_si128((__m128i *) (pu2_weigh_mat));
116 // q20 q21 q22 q23 q30 q31 q32 q33 -- all 16 bits
117 dequant_r2_r3 = _mm_loadu_si128((__m128i *) (pu2_weigh_mat + 8));
118
119 // b00*q00 b01*q01 b02*q02 b03*q03 b10*q10 b11*q11 b12*q12 b13*q13 -- 16 bit result
120 temp0 = _mm_mullo_epi16(scalemat_r0_r1, dequant_r0_r1);
121 // b00*q00 b01*q01 b02*q02 b03*q03 b10*q10 b11*q11 b12*q12 b13*q13 -- 16 bit result
122 temp1 = _mm_mullo_epi16(scalemat_r2_r3, dequant_r2_r3);
123
124 // b00*q00 0 b01*q01 0 b02*q02 0 b03*q03 0 -- 16 bit long
125 temp4 = _mm_unpacklo_epi16(temp0, zero_8x16b);
126 // b10*q10 0 b11*q11 0 b12*q12 0 b13*q13 0 -- 16 bit long
127 temp5 = _mm_unpackhi_epi16(temp0, zero_8x16b);
128 // b00*q00 0 b01*q01 0 b02*q02 0 b03*q03 0 -- 16 bit long
129 temp6 = _mm_unpacklo_epi16(temp1, zero_8x16b);
130 // b10*q10 0 b11*q11 0 b12*q12 0 b13*q13 0 -- 16 bit long
131 temp7 = _mm_unpackhi_epi16(temp1, zero_8x16b);
132
133 src_r0 = _mm_unpacklo_epi16(src_r0_r1, zero_8x16b); // a00 0 a01 0 a02 0 a03 0 -- 16 bit long
134 src_r1 = _mm_unpackhi_epi16(src_r0_r1, zero_8x16b); // a10 0 a11 0 a12 0 a13 0 -- 16 bit long
135 src_r2 = _mm_unpacklo_epi16(src_r2_r3, zero_8x16b); // a20 0 a21 0 a22 0 a23 0 -- 16 bit long
136 src_r3 = _mm_unpackhi_epi16(src_r2_r3, zero_8x16b); // a30 0 a31 0 a32 0 a33 0 -- 16 bit long
137
138 // a00*b00*q00 a10*b10*q10 a20*b20*q20 a30*b30 q30 -- 32 bits long
139 temp4 = _mm_madd_epi16(src_r0, temp4);
140 temp5 = _mm_madd_epi16(src_r1, temp5);
141 temp6 = _mm_madd_epi16(src_r2, temp6);
142 temp7 = _mm_madd_epi16(src_r3, temp7);
143
144 if(u4_qp_div_6 >= 4)
145 {
146 resq_r0 = _mm_slli_epi32(temp4, u4_qp_div_6 - 4);
147 resq_r1 = _mm_slli_epi32(temp5, u4_qp_div_6 - 4);
148 resq_r2 = _mm_slli_epi32(temp6, u4_qp_div_6 - 4);
149 resq_r3 = _mm_slli_epi32(temp7, u4_qp_div_6 - 4);
150 }
151 else
152 {
153 temp4 = _mm_add_epi32(temp4, add_rshift);
154 temp5 = _mm_add_epi32(temp5, add_rshift);
155 temp6 = _mm_add_epi32(temp6, add_rshift);
156 temp7 = _mm_add_epi32(temp7, add_rshift);
157 resq_r0 = _mm_srai_epi32(temp4, 4 - u4_qp_div_6);
158 resq_r1 = _mm_srai_epi32(temp5, 4 - u4_qp_div_6);
159 resq_r2 = _mm_srai_epi32(temp6, 4 - u4_qp_div_6);
160 resq_r3 = _mm_srai_epi32(temp7, 4 - u4_qp_div_6);
161 }
162
163 if(iq_start_idx == 1) resq_r0 = _mm_insert_epi32(resq_r0, (WORD32) pi2_dc_ld_addr[0], 0);
164 /* Perform Inverse transform */
165 /*-------------------------------------------------------------*/
166 /* IDCT [ Horizontal transformation ] */
167 /*-------------------------------------------------------------*/
168 // Matrix transpose
169 /*
170 * a0 a1 a2 a3
171 * b0 b1 b2 b3
172 * c0 c1 c2 c3
173 * d0 d1 d2 d3
174 */
175 temp1 = _mm_unpacklo_epi32(resq_r0, resq_r1); // a0 b0 a1 b1
176 temp3 = _mm_unpacklo_epi32(resq_r2, resq_r3); // c0 d0 c1 d1
177 temp2 = _mm_unpackhi_epi32(resq_r0, resq_r1); // a2 b2 a3 b3
178 temp4 = _mm_unpackhi_epi32(resq_r2, resq_r3); // c2 d2 c3 d3
179 resq_r0 = _mm_unpacklo_epi64(temp1, temp3); // a0 b0 c0 d0
180 resq_r1 = _mm_unpackhi_epi64(temp1, temp3); // a1 b1 c1 d1
181 resq_r2 = _mm_unpacklo_epi64(temp2, temp4); // a2 b2 c2 d2
182 resq_r3 = _mm_unpackhi_epi64(temp2, temp4); // a3 b3 c3 d3
183 // Transform starts -- horizontal transform
184 /*------------------------------------------------------------------*/
185 /* z0 = w0 + w2 */
186 temp0 = _mm_add_epi32(resq_r0, resq_r2);
187 /* z1 = w0 - w2 */
188 temp1 = _mm_sub_epi32(resq_r0, resq_r2);
189 /* z2 = (w1 >> 1) - w3 */
190 temp2 = _mm_srai_epi32(resq_r1, 1); //(w1>>1)
191 temp2 = _mm_sub_epi32(temp2, resq_r3); //(w1>>1) - w3
192 /* z3 = w1 + (w3 >> 1) */
193 temp3 = _mm_srai_epi32(resq_r3, 1); //(w3>>1) + w1
194 temp3 = _mm_add_epi32(temp3, resq_r1);
195 /*----------------------------------------------------------*/
196 /* x0 = z0 + z3 */
197 resq_r0 = _mm_add_epi32(temp0, temp3);
198 /* x1 = z1 + z2 */
199 resq_r1 = _mm_add_epi32(temp1, temp2);
200 /* x2 = z1 - z2 */
201 resq_r2 = _mm_sub_epi32(temp1, temp2);
202 /* x3 = z0 - z3 */
203 resq_r3 = _mm_sub_epi32(temp0, temp3);
204 // Matrix transpose
205 /*
206 * a0 b0 c0 d0
207 * a1 b1 c1 d1
208 * a2 b2 c2 d2
209 * a3 b3 c3 d3
210 */
211 temp1 = _mm_unpacklo_epi32(resq_r0, resq_r1); // a0 a1 b0 b1
212 temp3 = _mm_unpacklo_epi32(resq_r2, resq_r3); // a2 a3 b2 b3
213 temp2 = _mm_unpackhi_epi32(resq_r0, resq_r1); // c0 c1 d0 d1
214 temp4 = _mm_unpackhi_epi32(resq_r2, resq_r3); // c2 c3 d2 d3
215 resq_r0 = _mm_unpacklo_epi64(temp1, temp3); // a0 a1 a2 a3
216 resq_r1 = _mm_unpackhi_epi64(temp1, temp3); // b0 b1 b2 b3
217 resq_r2 = _mm_unpacklo_epi64(temp2, temp4); // c0 c1 c2 c3
218 resq_r3 = _mm_unpackhi_epi64(temp2, temp4); // d0 d1 d2 d3
219 // Transform ends -- horizontal transform
220
221 // Load pred buffer
222 // p00 p01 p02 p03 0 0 0 0 0 0 0 0 -- all 8 bits
223 pred_r0 = _mm_loadl_epi64((__m128i *) (&pi2_pred[0]));
224 // p10 p11 p12 p13 0 0 0 0 0 0 0 0 -- all 8 bits
225 pred_r1 = _mm_loadl_epi64((__m128i *) (&pi2_pred[pred_strd]));
226 // p20 p21 p22 p23 0 0 0 0 0 0 0 0 -- all 8 bits
227 pred_r2 = _mm_loadl_epi64((__m128i *) (&pi2_pred[2 * pred_strd]));
228 // p30 p31 p32 p33 0 0 0 0 0 0 0 0 -- all 8 bits
229 pred_r3 = _mm_loadl_epi64((__m128i *) (&pi2_pred[3 * pred_strd]));
230
231 pred_r0 = _mm_cvtepi16_epi32(pred_r0); // p00 p01 p02 p03 -- all 32 bits
232 pred_r1 = _mm_cvtepi16_epi32(pred_r1); // p10 p11 p12 p13 -- all 32 bits
233 pred_r2 = _mm_cvtepi16_epi32(pred_r2); // p20 p21 p22 p23 -- all 32 bits
234 pred_r3 = _mm_cvtepi16_epi32(pred_r3); // p30 p31 p32 p33 -- all 32 bits
235
236 /*--------------------------------------------------------------*/
237 /* IDCT [ Vertical transformation] and Xij = (xij + 32)>>6 */
238 /* */
239 /* Add the prediction and store it back to same buffer */
240 /*--------------------------------------------------------------*/
241 /* z0j = y0j + y2j */
242 temp0 = _mm_add_epi32(resq_r0, resq_r2);
243 /* z1j = y0j - y2j */
244 temp1 = _mm_sub_epi32(resq_r0, resq_r2);
245 /* z2j = (y1j>>1) - y3j */
246 temp2 = _mm_srai_epi32(resq_r1, 1); //(y1j>>1)
247 temp2 = _mm_sub_epi32(temp2, resq_r3);
248 /* z3j = y1j + (y3j>>1) */
249 temp3 = _mm_srai_epi32(resq_r3, 1); //(y3j>>1)
250 temp3 = _mm_add_epi32(temp3, resq_r1);
251
252 /* x0j = z0j + z3j */
253 temp4 = _mm_add_epi32(temp0, temp3);
254 temp4 = _mm_add_epi32(temp4, value_32);
255 temp4 = _mm_srai_epi32(temp4, 6);
256 temp4 = _mm_add_epi32(temp4, pred_r0);
257 temp4 = _mm_min_epi32(dupmax_4x32b, temp4);
258 temp4 = _mm_max_epi32(dupmin_4x32b, temp4);
259
260 row_0 = _mm_test_all_ones(_mm_cmpeq_epi32(temp4, zero_8x16b)); // return 1 if all zeros, else 0
261
262 /* x1j = z1j + z2j */
263 temp5 = _mm_add_epi32(temp1, temp2);
264 temp5 = _mm_add_epi32(temp5, value_32);
265 temp5 = _mm_srai_epi32(temp5, 6);
266 temp5 = _mm_add_epi32(temp5, pred_r1);
267 temp5 = _mm_min_epi32(dupmax_4x32b, temp5);
268 temp5 = _mm_max_epi32(dupmin_4x32b, temp5);
269
270 row_1 = _mm_test_all_ones(_mm_cmpeq_epi32(temp5, zero_8x16b)); // return 1 if all zeros, else 0
271
272 /* x2j = z1j - z2j */
273 temp6 = _mm_sub_epi32(temp1, temp2);
274 temp6 = _mm_add_epi32(temp6, value_32);
275 temp6 = _mm_srai_epi32(temp6, 6);
276 temp6 = _mm_add_epi32(temp6, pred_r2);
277 temp6 = _mm_min_epi32(dupmax_4x32b, temp6);
278 temp6 = _mm_max_epi32(dupmin_4x32b, temp6);
279 row_2 = _mm_test_all_ones(_mm_cmpeq_epi32(temp6, zero_8x16b)); // return 1 if all zeros, else 0
280
281 /* x3j = z0j - z3j */
282 temp7 = _mm_sub_epi32(temp0, temp3);
283 temp7 = _mm_add_epi32(temp7, value_32);
284 temp7 = _mm_srai_epi32(temp7, 6);
285 temp7 = _mm_add_epi32(temp7, pred_r3);
286 temp7 = _mm_min_epi32(dupmax_4x32b, temp7);
287 temp7 = _mm_max_epi32(dupmin_4x32b, temp7);
288 row_3 = _mm_test_all_ones(_mm_cmpeq_epi32(temp7, zero_8x16b)); // return 1 if all zeros, else 0
289
290 // 32-bit to 16-bit conversion
291 temp0 = _mm_packs_epi32(temp4, zero_8x16b);
292 temp1 = _mm_packs_epi32(temp5, zero_8x16b);
293 temp2 = _mm_packs_epi32(temp6, zero_8x16b);
294 temp3 = _mm_packs_epi32(temp7, zero_8x16b);
295
296 _mm_storel_epi64((__m128i *) (pi2_out), temp0);
297 _mm_storel_epi64((__m128i *) (pi2_out + out_strd), temp1);
298 _mm_storel_epi64((__m128i *) (pi2_out + 2 * out_strd), temp2);
299 _mm_storel_epi64((__m128i *) (pi2_out + 3 * out_strd), temp3);
300
301 i4_nnz = !(row_0 && row_1 && row_2 && row_3);
302 return i4_nnz;
303 }
304
305 /*****************************************************************************/
306 /* */
307 /* Function Name : isvcd_iquant_itrans_residual_8x8_sse42 */
308 /* */
309 /* Description : this function computes the resd output from the */
310 /* IQ+IT */
311 /* */
312 /* Inputs : */
313 /* Globals : none */
314 /* Processing : */
315 /* */
316 /* Outputs : i4_nnz */
317 /* Returns : none */
318 /* */
319 /* Issues : none */
320 /* */
321 /* Revision History: */
322 /* */
323 /* DD MM YYYY Author(s) Changes (Describe the changes made) */
324 /* 25 11 2021 Kishore creation */
325 /* */
326 /*****************************************************************************/
327
isvcd_iquant_itrans_residual_8x8_sse42(WORD16 * pi2_src,WORD16 * pi2_pred,WORD16 * pi2_out,WORD32 pred_strd,WORD32 out_strd,const UWORD16 * pu2_iscale_mat,const UWORD16 * pu2_weigh_mat,UWORD32 qp_div,WORD16 * pi2_tmp,WORD32 iq_start_idx,WORD16 * pi2_dc_ld_addr)328 WORD32 isvcd_iquant_itrans_residual_8x8_sse42(WORD16 *pi2_src, WORD16 *pi2_pred, WORD16 *pi2_out,
329 WORD32 pred_strd, WORD32 out_strd,
330 const UWORD16 *pu2_iscale_mat,
331 const UWORD16 *pu2_weigh_mat, UWORD32 qp_div,
332 WORD16 *pi2_tmp, WORD32 iq_start_idx,
333 WORD16 *pi2_dc_ld_addr)
334 {
335 __m128i pred_r01_b0, pred_r23_b0, pred_r45_b2, pred_r67_b2;
336 __m128i pred_r01_b1, pred_r23_b1, pred_r45_b3, pred_r67_b3;
337
338 WORD32 row_01_b0, row_23_b0, row_45_b2, row_67_b2;
339 WORD32 row_01_b1, row_23_b1, row_45_b3, row_67_b3;
340 WORD32 i4_nnz, i4_nnz_b0, i4_nnz_b1, i4_nnz_b2, i4_nnz_b3;
341 __m128i src_r0;
342 __m128i scalemat_r0;
343 __m128i zero_8x16b = _mm_setzero_si128(); // all bits reset to zero
344 // 0 1 0 1 0 --- 16 bits size
345 __m128i value_32 = _mm_set1_epi32(32);
346 __m128i add_rshift = _mm_set1_epi32((qp_div < 6) ? (1 << (5 - qp_div)) : 0);
347 __m128i dequant_r0;
348 __m128i pred_r0, pred_r1, pred_r2, pred_r3, pred_r4, pred_r5, pred_r6, pred_r7;
349 __m128i sign_reg;
350 __m128i src_r0_1, src_r0_2;
351 __m128i scalemat_r0_1, scalemat_r0_2;
352 __m128i temp1, temp2, temp3, temp4, temp5, temp6, temp7, temp8;
353 __m128i temp10, temp11, temp12, temp13, temp14, temp15, temp16, temp17, temp18, temp19, temp20;
354 // To store dequantization results
355 __m128i resq_r0_1, resq_r0_2, resq_r1_1, resq_r1_2, resq_r2_1, resq_r2_2, resq_r3_1, resq_r3_2,
356 resq_r4_1, resq_r4_2, resq_r5_1, resq_r5_2, resq_r6_1, resq_r6_2, resq_r7_1, resq_r7_2;
357 __m128i dupmax_8x16b = _mm_set1_epi16(RSD_MAX);
358 __m128i dupmin_8x16b = _mm_set1_epi16(RSD_MIN);
359
360 UNUSED(pi2_tmp);
361 UNUSED(iq_start_idx);
362 UNUSED(pi2_dc_ld_addr);
363
364 /*************************************************************/
365 /* Dequantization of coefficients. Will be replaced by SIMD */
366 /* operations on platform. Note : DC coeff is not scaled */
367 /*************************************************************/
368
369 // Row 0 processing
370 // a00 a01 a02 a03 a04 a05 a06 a07 -- the source matrix 0th row
371 src_r0 = _mm_loadu_si128((__m128i *) (pi2_src));
372 // b00 b01 b02 b03 b04 b05 b06 b07 - the scaling matrix 0th row
373 scalemat_r0 = _mm_loadu_si128((__m128i *) (pu2_iscale_mat));
374 // q0 q1 q2 q3 q4 q5 q6 q7 -- all 16 bits
375 dequant_r0 = _mm_loadu_si128((__m128i *) (&pu2_weigh_mat[0]));
376 src_r0_1 = _mm_unpacklo_epi16(src_r0, zero_8x16b); // a00 0 a01 0 a02 0 a03 0 -- 16 bit long
377 src_r0_2 = _mm_unpackhi_epi16(src_r0, zero_8x16b); // a04 0 a05 0 a06 0 a07 0 -- 16 bit long
378 // b00*q0 b01*q1 b02*q2 b03*q3 b04*q4 b05*q5 b06*q6 b07*q7 -- 16 bit result
379 temp10 = _mm_mullo_epi16(scalemat_r0, dequant_r0);
380 // b00*q0 0 b01*q1 0 b02*q2 0 b03*q3 0 -- 16 bit long
381 scalemat_r0_1 = _mm_unpacklo_epi16(temp10, zero_8x16b);
382 // b04*q4 0 b05*q5 0 b06*q6 0 b07*q7 0 -- 16 bit long
383 scalemat_r0_2 = _mm_unpackhi_epi16(temp10, zero_8x16b);
384
385 // a00*b00*q0 a01*b01*q1 a02*b02*q2 a03*b03*q3 -- 32 bits long
386 temp5 = _mm_madd_epi16(src_r0_1, scalemat_r0_1);
387 // a04*b04*q4 a05*b05*q5 a06*b06*q6 a07*b07*q7 -- 32 bits long
388 temp7 = _mm_madd_epi16(src_r0_2, scalemat_r0_2);
389
390 if(qp_div >= 6)
391 {
392 resq_r0_1 = _mm_slli_epi32(temp5, qp_div - 6);
393 resq_r0_2 = _mm_slli_epi32(temp7, qp_div - 6);
394 }
395 else
396 {
397 temp5 = _mm_add_epi32(temp5, add_rshift);
398 temp7 = _mm_add_epi32(temp7, add_rshift);
399 resq_r0_1 = _mm_srai_epi32(temp5, 6 - qp_div);
400 resq_r0_2 = _mm_srai_epi32(temp7, 6 - qp_div);
401 }
402 // a00*b00*q0 a01*b01*q1 a02*b02*q2 a03*b03*q3 a04*b04*q4 a05*b05*q5 a06*b06*q6 a07*b07*q7 -- 16
403 // bit long
404 resq_r0_1 = _mm_packs_epi32(resq_r0_1, resq_r0_2);
405 // Row 1 processing
406 // a00 a01 a02 a03 a04 a05 a06 a07 a08 -- the source matrix 1st row
407 src_r0 = _mm_loadu_si128((__m128i *) (pi2_src + 8));
408 // b00 b01 b02 b03 b04 b05 b06 b07 b08 -- the scaling matrix 1st row
409 scalemat_r0 = _mm_loadu_si128((__m128i *) (pu2_iscale_mat + 8));
410 // q0 q1 q2 q3 q4 q5 q6 q7 -- all 16 bits
411 dequant_r0 = _mm_loadu_si128((__m128i *) (&pu2_weigh_mat[8]));
412 src_r0_1 = _mm_unpacklo_epi16(src_r0, zero_8x16b); // a00 0 a01 0 a02 0 a03 0 -- 16 bit long
413 src_r0_2 = _mm_unpackhi_epi16(src_r0, zero_8x16b); // a04 0 a05 0 a06 0 a07 0 -- 16 bit long
414 // b00*q0 b01*q1 b02*q2 b03*q3 b04*q4 b05*q5 b06*q6 b07*q7 -- 16 bit result
415 temp10 = _mm_mullo_epi16(scalemat_r0, dequant_r0);
416 scalemat_r0_1 = _mm_unpacklo_epi16(temp10, zero_8x16b); // b00*q0 0 b01*q1 0 b02*q2 0 b03*q3 0
417 scalemat_r0_2 = _mm_unpackhi_epi16(temp10, zero_8x16b); // b04*q4 0 b05*q5 0 b06*q6 0 b07*q7 0
418 temp5 = _mm_madd_epi16(src_r0_1, scalemat_r0_1); // a00*b00*q0 a01*b01*q1 a02*b02*q2 a03*b03*q3
419 temp7 = _mm_madd_epi16(src_r0_2, scalemat_r0_2); // a04*b04*q4 a05*b05*q5 a06*b06*q6 a07*b07*q7
420 if(qp_div >= 6)
421 {
422 resq_r1_1 = _mm_slli_epi32(temp5, qp_div - 6);
423 resq_r1_2 = _mm_slli_epi32(temp7, qp_div - 6);
424 }
425 else
426 {
427 temp5 = _mm_add_epi32(temp5, add_rshift);
428 temp7 = _mm_add_epi32(temp7, add_rshift);
429 resq_r1_1 = _mm_srai_epi32(temp5, 6 - qp_div);
430 resq_r1_2 = _mm_srai_epi32(temp7, 6 - qp_div);
431 }
432 // a00*b00*q0 a01*b01*q1 a02*b02*q2 a03*b03*q3 a04*b04*q4 a05*b05*q5 a06*b06*q6 a07*b07*q7
433 resq_r1_1 = _mm_packs_epi32(resq_r1_1, resq_r1_2);
434 // Row 2 processing
435 // a00 a01 a02 a03 a04 a05 a06 a07 a08 --the source matrix 2nd row
436 src_r0 = _mm_loadu_si128((__m128i *) (pi2_src + 16));
437 // b00 b01 b02 b03 b04 b05 b06 b07 b08 -- the scaling matrix 2nd row
438 scalemat_r0 = _mm_loadu_si128((__m128i *) (pu2_iscale_mat + 16));
439 dequant_r0 = _mm_loadu_si128((__m128i *) (&pu2_weigh_mat[16])); // q0 q1 q2 q3 q4 q5 q6 q7
440 src_r0_1 = _mm_unpacklo_epi16(src_r0, zero_8x16b); // a00 0 a01 0 a02 0 a03 0 -- 16 bit long
441 src_r0_2 = _mm_unpackhi_epi16(src_r0, zero_8x16b); // a04 0 a05 0 a06 0 a07 0 -- 16 bit long
442 // b00*q0 b01*q1 b02*q2 b03*q3 b04*q4 b05*q5 b06*q6 b07*q7 -- 16 bit result
443 temp10 = _mm_mullo_epi16(scalemat_r0, dequant_r0);
444 scalemat_r0_1 = _mm_unpacklo_epi16(temp10, zero_8x16b); // b00*q0 0 b01*q1 0 b02*q2 0 b03*q3 0
445 scalemat_r0_2 = _mm_unpackhi_epi16(temp10, zero_8x16b); // b04*q4 0 b05*q5 0 b06*q6 0 b07*q7 0
446 temp5 = _mm_madd_epi16(src_r0_1, scalemat_r0_1); // a00*b00*q0 a01*b01*q1 a02*b02*q2 a03*b03*q3
447 temp7 = _mm_madd_epi16(src_r0_2, scalemat_r0_2); // a04*b04*q4 a05*b05*q5 a06*b06*q6 a07*b07*q7
448 if(qp_div >= 6)
449 {
450 resq_r2_1 = _mm_slli_epi32(temp5, qp_div - 6);
451 resq_r2_2 = _mm_slli_epi32(temp7, qp_div - 6);
452 }
453 else
454 {
455 temp5 = _mm_add_epi32(temp5, add_rshift);
456 temp7 = _mm_add_epi32(temp7, add_rshift);
457 resq_r2_1 = _mm_srai_epi32(temp5, 6 - qp_div);
458 resq_r2_2 = _mm_srai_epi32(temp7, 6 - qp_div);
459 }
460 // a00*b00*q0 a01*b01*q1 a02*b02*q2 a03*b03*q3 a04*b04*q4 a05*b05*q5 a06*b06*q6 a07*b07*q7
461 resq_r2_1 = _mm_packs_epi32(resq_r2_1, resq_r2_2);
462 // Row 3 processing
463 // a00 a01 a02 a03 a04 a05 a06 a07 a08 -- the source matrix 3rd row
464 src_r0 = _mm_loadu_si128((__m128i *) (pi2_src + 24));
465 // b00 b01 b02 b03 b04 b05 b06 b07 b08 -- the scaling matrix 3rd row
466 scalemat_r0 = _mm_loadu_si128((__m128i *) (pu2_iscale_mat + 24));
467 dequant_r0 = _mm_loadu_si128(
468 (__m128i *) (&pu2_weigh_mat[24])); // q0 q1 q2 q3 q4 q5 q6 q7 -- all 16 bits
469 src_r0_1 = _mm_unpacklo_epi16(src_r0, zero_8x16b); // a00 0 a01 0 a02 0 a03 0 -- 16 bit long
470 src_r0_2 = _mm_unpackhi_epi16(src_r0, zero_8x16b); // a04 0 a05 0 a06 0 a07 0 -- 16 bit long
471 // b00*q0 b01*q1 b02*q2 b03*q3 b04*q4 b05*q5 b06*q6 b07*q7 -- 16 bit result
472 temp10 = _mm_mullo_epi16(scalemat_r0, dequant_r0);
473 scalemat_r0_1 = _mm_unpacklo_epi16(temp10, zero_8x16b); // b00*q0 0 b01*q1 0 b02*q2 0 b03*q3 0
474 scalemat_r0_2 = _mm_unpackhi_epi16(temp10, zero_8x16b); // b04*q4 0 b05*q5 0 b06*q6 0 b07*q7 0
475 temp5 = _mm_madd_epi16(src_r0_1, scalemat_r0_1); // a00*b00*q0 a01*b01*q1 a02*b02*q2 a03*b03*q3
476 temp7 = _mm_madd_epi16(src_r0_2, scalemat_r0_2); // a04*b04*q4 a05*b05*q5 a06*b06*q6 a07*b07*q7
477 if(qp_div >= 6)
478 {
479 resq_r3_1 = _mm_slli_epi32(temp5, qp_div - 6);
480 resq_r3_2 = _mm_slli_epi32(temp7, qp_div - 6);
481 }
482 else
483 {
484 temp5 = _mm_add_epi32(temp5, add_rshift);
485 temp7 = _mm_add_epi32(temp7, add_rshift);
486 resq_r3_1 = _mm_srai_epi32(temp5, 6 - qp_div);
487 resq_r3_2 = _mm_srai_epi32(temp7, 6 - qp_div);
488 }
489 // a00*b00*q0 a01*b01*q1 a02*b02*q2 a03*b03*q3 a04*b04*q4 a05*b05*q5 a06*b06*q6 a07*b07*q7
490 resq_r3_1 = _mm_packs_epi32(resq_r3_1, resq_r3_2);
491 // Row 4 processing
492 // a00 a01 a02 a03 a04 a05 a06 a07 a08 -- the source matrix 4th row
493 src_r0 = _mm_loadu_si128((__m128i *) (pi2_src + 32));
494 // b00 b01 b02 b03 b04 b05 b06 b07 b08 -- the scaling matrix 4th row
495 scalemat_r0 = _mm_loadu_si128((__m128i *) (pu2_iscale_mat + 32));
496 dequant_r0 = _mm_loadu_si128((__m128i *) (&pu2_weigh_mat[32])); // q0 q1 q2 q3 q4 q5 q6 q7
497 src_r0_1 = _mm_unpacklo_epi16(src_r0, zero_8x16b); // a00 0 a01 0 a02 0 a03 0 -- 16 bit long
498 src_r0_2 = _mm_unpackhi_epi16(src_r0, zero_8x16b); // a04 0 a05 0 a06 0 a07 0 -- 16 bit long
499 // b00*q0 b01*q1 b02*q2 b03*q3 b04*q4 b05*q5 b06*q6 b07*q7 -- 16 bit result
500 temp10 = _mm_mullo_epi16(scalemat_r0, dequant_r0);
501 scalemat_r0_1 = _mm_unpacklo_epi16(temp10, zero_8x16b); // b00*q0 0 b01*q1 0 b02*q2 0 b03*q3 0
502 scalemat_r0_2 = _mm_unpackhi_epi16(temp10, zero_8x16b); // b04*q4 0 b05*q5 0 b06*q6 0 b07*q7 0
503 temp5 = _mm_madd_epi16(src_r0_1, scalemat_r0_1); // a00*b00*q0 a01*b01*q1 a02*b02*q2 a03*b03*q3
504 temp7 = _mm_madd_epi16(src_r0_2, scalemat_r0_2); // a04*b04*q4 a05*b05*q5 a06*b06*q6 a07*b07*q7
505 if(qp_div >= 6)
506 {
507 resq_r4_1 = _mm_slli_epi32(temp5, qp_div - 6);
508 resq_r4_2 = _mm_slli_epi32(temp7, qp_div - 6);
509 }
510 else
511 {
512 temp5 = _mm_add_epi32(temp5, add_rshift);
513 temp7 = _mm_add_epi32(temp7, add_rshift);
514 resq_r4_1 = _mm_srai_epi32(temp5, 6 - qp_div);
515 resq_r4_2 = _mm_srai_epi32(temp7, 6 - qp_div);
516 }
517 // a00*b00*q0 a01*b01*q1 a02*b02*q2 a03*b03*q3 a04*b04*q4 a05*b05*q5 a06*b06*q6 a07*b07*q7
518 resq_r4_1 = _mm_packs_epi32(resq_r4_1, resq_r4_2);
519 // Row 5 processing
520 // a00 a01 a02 a03 a04 a05 a06 a07 a08 -- the source matrix 5th row
521 src_r0 = _mm_loadu_si128((__m128i *) (pi2_src + 40));
522 // b00 b01 b02 b03 b04 b05 b06 b07 b08 -- the scaling matrix 5th row
523 scalemat_r0 = _mm_loadu_si128((__m128i *) (pu2_iscale_mat + 40));
524 dequant_r0 = _mm_loadu_si128((__m128i *) (&pu2_weigh_mat[40])); // q0 q1 q2 q3 q4 q5 q6 q7
525 src_r0_1 = _mm_unpacklo_epi16(src_r0, zero_8x16b); // a00 0 a01 0 a02 0 a03 0 -- 16 bit long
526 src_r0_2 = _mm_unpackhi_epi16(src_r0, zero_8x16b); // a04 0 a05 0 a06 0 a07 0 -- 16 bit long
527 // b00*q0 b01*q1 b02*q2 b03*q3 b04*q4 b05*q5 b06*q6 b07*q7 -- 16 bit result
528 temp10 = _mm_mullo_epi16(scalemat_r0, dequant_r0);
529 scalemat_r0_1 = _mm_unpacklo_epi16(temp10, zero_8x16b); // b00*q0 0 b01*q1 0 b02*q2 0 b03*q3 0
530 scalemat_r0_2 = _mm_unpackhi_epi16(temp10, zero_8x16b); // b04*q4 0 b05*q5 0 b06*q6 0 b07*q7 0
531 temp5 = _mm_madd_epi16(src_r0_1, scalemat_r0_1); // a00*b00*q0 a01*b01*q1 a02*b02*q2 a03*b03*q3
532 temp7 = _mm_madd_epi16(src_r0_2, scalemat_r0_2); // a04*b04*q4 a05*b05*q5 a06*b06*q6 a07*b07*q7
533 if(qp_div >= 6)
534 {
535 resq_r5_1 = _mm_slli_epi32(temp5, qp_div - 6);
536 resq_r5_2 = _mm_slli_epi32(temp7, qp_div - 6);
537 }
538 else
539 {
540 temp5 = _mm_add_epi32(temp5, add_rshift);
541 temp7 = _mm_add_epi32(temp7, add_rshift);
542 resq_r5_1 = _mm_srai_epi32(temp5, 6 - qp_div);
543 resq_r5_2 = _mm_srai_epi32(temp7, 6 - qp_div);
544 }
545 // a00*b00*q0 a01*b01*q1 a02*b02*q2 a03*b03*q3 a04*b04*q4 a05*b05*q5 a06*b06*q6 a07*b07*q7
546 resq_r5_1 = _mm_packs_epi32(resq_r5_1, resq_r5_2);
547
548 // Row 6 processing
549 // a00 a01 a02 a03 a04 a05 a06 a07 a08 -- the source matrix 6th row
550 src_r0 = _mm_loadu_si128((__m128i *) (pi2_src + 48));
551 // b00 b01 b02 b03 b04 b05 b06 b07 b08 -- the scaling matrix 6th row
552 scalemat_r0 = _mm_loadu_si128((__m128i *) (pu2_iscale_mat + 48));
553 dequant_r0 = _mm_loadu_si128((__m128i *) (&pu2_weigh_mat[48])); // q0 q1 q2 q3 q4 q5 q6 q7
554 src_r0_1 = _mm_unpacklo_epi16(src_r0, zero_8x16b); // a00 0 a01 0 a02 0 a03 0 -- 16 bit long
555 src_r0_2 = _mm_unpackhi_epi16(src_r0, zero_8x16b); // a04 0 a05 0 a06 0 a07 0 -- 16 bit long
556 // b00*q0 b01*q1 b02*q2 b03*q3 b04*q4 b05*q5 b06*q6 b07*q7 -- 16 bit result
557 temp10 = _mm_mullo_epi16(scalemat_r0, dequant_r0);
558 // b00*q0 0 b01*q1 0 b02*q2 0 b03*q3 0 -- 16 bit long
559 scalemat_r0_1 = _mm_unpacklo_epi16(temp10, zero_8x16b);
560 // b04*q4 0 b05*q5 0 b06*q6 0 b07*q7 0 -- 16 bit long
561 scalemat_r0_2 = _mm_unpackhi_epi16(temp10, zero_8x16b);
562 // a00*b00*q0 a01*b01*q1 a02*b02*q2 a03*b03*q3 -- 32 bits long
563 temp5 = _mm_madd_epi16(src_r0_1, scalemat_r0_1);
564 // a04*b04*q4 a05*b05*q5 a06*b06*q6 a07*b07*q7 -- 32 bits long
565 temp7 = _mm_madd_epi16(src_r0_2, scalemat_r0_2);
566 if(qp_div >= 6)
567 {
568 resq_r6_1 = _mm_slli_epi32(temp5, qp_div - 6);
569 resq_r6_2 = _mm_slli_epi32(temp7, qp_div - 6);
570 }
571 else
572 {
573 temp5 = _mm_add_epi32(temp5, add_rshift);
574 temp7 = _mm_add_epi32(temp7, add_rshift);
575 resq_r6_1 = _mm_srai_epi32(temp5, 6 - qp_div);
576 resq_r6_2 = _mm_srai_epi32(temp7, 6 - qp_div);
577 }
578 // a00*b00*q0 a01*b01*q1 a02*b02*q2 a03*b03*q3 a04*b04*q4 a05*b05*q5 a06*b06*q6 a07*b07*q7
579 resq_r6_1 = _mm_packs_epi32(resq_r6_1, resq_r6_2);
580 // Row 7 processing
581 // a00 a01 a02 a03 a04 a05 a06 a07 a08 -- the source matrix 7th row
582 src_r0 = _mm_loadu_si128((__m128i *) (pi2_src + 56));
583 // b00 b01 b02 b03 b04 b05 b06 b07 b08 -- the scaling matrix 7th row
584 scalemat_r0 = _mm_loadu_si128((__m128i *) (pu2_iscale_mat + 56));
585 // q0 q1 q2 q3 q4 q5 q6 q7 -- all 16 bits
586 dequant_r0 = _mm_loadu_si128((__m128i *) (&pu2_weigh_mat[56]));
587 src_r0_1 = _mm_unpacklo_epi16(src_r0, zero_8x16b); // a00 0 a01 0 a02 0 a03 0 -- 16 bit long
588 src_r0_2 = _mm_unpackhi_epi16(src_r0, zero_8x16b); // a04 0 a05 0 a06 0 a07 0 -- 16 bit long
589 // b00*q0 b01*q1 b02*q2 b03*q3 b04*q4 b05*q5 b06*q6 b07*q7 -- 16 bit result
590 temp10 = _mm_mullo_epi16(scalemat_r0, dequant_r0);
591 // b00*q0 0 b01*q1 0 b02*q2 0 b03*q3 0 -- 16 bit long
592 scalemat_r0_1 = _mm_unpacklo_epi16(temp10, zero_8x16b);
593 // b04*q4 0 b05*q5 0 b06*q6 0 b07*q7 0 -- 16 bit long
594 scalemat_r0_2 = _mm_unpackhi_epi16(temp10, zero_8x16b);
595 // a00*b00*q0 a01*b01*q1 a02*b02*q2 a03*b03*q3 -- 32 bits long
596 temp5 = _mm_madd_epi16(src_r0_1, scalemat_r0_1);
597 // a04*b04*q4 a05*b05*q5 a06*b06*q6 a07*b07*q7 -- 32 bits long
598 temp7 = _mm_madd_epi16(src_r0_2, scalemat_r0_2);
599 if(qp_div >= 6)
600 {
601 resq_r7_1 = _mm_slli_epi32(temp5, qp_div - 6);
602 resq_r7_2 = _mm_slli_epi32(temp7, qp_div - 6);
603 }
604 else
605 {
606 temp5 = _mm_add_epi32(temp5, add_rshift);
607 temp7 = _mm_add_epi32(temp7, add_rshift);
608 resq_r7_1 = _mm_srai_epi32(temp5, 6 - qp_div);
609 resq_r7_2 = _mm_srai_epi32(temp7, 6 - qp_div);
610 }
611 // a00*b00*q0 a01*b01*q1 a02*b02*q2 a03*b03*q3 a04*b04*q4 a05*b05*q5 a06*b06*q6 a07*b07*q7
612 resq_r7_1 = _mm_packs_epi32(resq_r7_1, resq_r7_2);
613 /* Perform Inverse transform */
614 /*--------------------------------------------------------------------*/
615 /* IDCT [ Horizontal transformation ] */
616 /*--------------------------------------------------------------------*/
617 // Matrix transpose
618 /*
619 * a0 a1 a2 a3 a4 a5 a6 a7
620 * b0 b1 b2 b3 b4 b5 b6 b7
621 * c0 c1 c2 c3 c4 c5 c6 c7
622 * d0 d1 d2 d3 d4 d5 d6 d7
623 */
624 temp1 = _mm_unpacklo_epi16(resq_r0_1, resq_r1_1); // a0 b0 a1 b1 a2 b2 a3 b3
625 temp3 = _mm_unpacklo_epi16(resq_r2_1, resq_r3_1); // c0 d0 c1 d1 c2 d2 c3 d3
626 temp2 = _mm_unpackhi_epi16(resq_r0_1, resq_r1_1); // a4 b4 a5 b5 a6 b6 a7 b7
627 temp4 = _mm_unpackhi_epi16(resq_r2_1, resq_r3_1); // c4 d4 c5 d5 c6 d6 c7 d7
628 resq_r0_1 = _mm_unpacklo_epi32(temp1, temp3); // a0 b0 c0 d0 a1 b1 c1 d1
629 resq_r1_1 = _mm_unpackhi_epi32(temp1, temp3); // a2 b2 c2 d2 a3 b3 c3 d3
630 resq_r2_1 = _mm_unpacklo_epi32(temp2, temp4); // a4 b4 c4 d4 a5 b5 c5 d5
631 resq_r3_1 = _mm_unpackhi_epi32(temp2, temp4); // a6 b6 c6 d6 a7 b7 c7 d7
632 /*
633 * e0 e1 e2 e3 e4 e5 e6 e7
634 * f0 f1 f2 f3 f4 f5 f6 f7
635 * g0 g1 g2 g3 g4 g5 g6 g7
636 * h0 h1 h2 h3 h4 h5 h6 h7
637 */
638 temp1 = _mm_unpacklo_epi16(resq_r4_1, resq_r5_1); // e0 f0 e1 f1 e2 f2 e2 f3
639 temp3 = _mm_unpacklo_epi16(resq_r6_1, resq_r7_1); // g0 h0 g1 h1 g2 h2 g3 h3
640 temp2 = _mm_unpackhi_epi16(resq_r4_1, resq_r5_1); // e4 f4 e5 f5 e6 f6 e7 f7
641 temp4 = _mm_unpackhi_epi16(resq_r6_1, resq_r7_1); // g4 h4 g5 h5 g6 h6 g7 h7
642 resq_r4_1 = _mm_unpacklo_epi32(temp1, temp3); // e0 f0 g0 h0 e1 f1 g1 h1
643 resq_r5_1 = _mm_unpackhi_epi32(temp1, temp3); // e2 f2 g2 h2 e3 f3 g3 h3
644 resq_r6_1 = _mm_unpacklo_epi32(temp2, temp4); // e4 f4 g4 h4 e5 f5 g5 h5
645 resq_r7_1 = _mm_unpackhi_epi32(temp2, temp4); // e6 f6 g6 h6 e7 f7 g7 h7
646 /*
647 * a0 b0 c0 d0 a1 b1 c1 d1
648 * a2 b2 c2 d2 a3 b3 c3 d3
649 * a4 b4 c4 d4 a5 b5 c5 d5
650 * a6 b6 c6 d6 a7 b7 c7 d7
651 * e0 f0 g0 h0 e1 f1 g1 h1
652 * e2 f2 g2 h2 e3 f3 g3 h3
653 * e4 f4 g4 h4 e5 f5 g5 h5
654 * e6 f6 g6 h6 e7 f7 g7 h7
655 */
656 resq_r0_2 = _mm_unpacklo_epi64(resq_r0_1, resq_r4_1); // a0 b0 c0 d0 e0 f0 g0 h0
657 resq_r1_2 = _mm_unpackhi_epi64(resq_r0_1, resq_r4_1); // a1 b1 c1 d1 e1 f1 g1 h1
658 resq_r2_2 = _mm_unpacklo_epi64(resq_r1_1, resq_r5_1); // a2 b2 c2 d2 e2 f2 g2 h2
659 resq_r3_2 = _mm_unpackhi_epi64(resq_r1_1, resq_r5_1); // a3 b3 c3 d3 e3 f3 g3 h3
660 resq_r4_2 = _mm_unpacklo_epi64(resq_r2_1, resq_r6_1); // a4 b4 c4 d4 e4 f4 g4 h4
661 resq_r5_2 = _mm_unpackhi_epi64(resq_r2_1, resq_r6_1); // a5 b5 c5 d5 e5 f5 g5 h5
662 resq_r6_2 = _mm_unpacklo_epi64(resq_r3_1, resq_r7_1); // a6 b6 c6 d6 e6 f6 g6 h6
663 resq_r7_2 = _mm_unpackhi_epi64(resq_r3_1, resq_r7_1); // a7 b7 c7 d7 e7 f7 g7 h7
664
665 sign_reg = _mm_cmpgt_epi16(zero_8x16b, resq_r1_2);
666 resq_r1_1 = _mm_unpacklo_epi16(resq_r1_2, sign_reg); // a1 b1 c1 d1 -- 32 bit
667 resq_r1_2 = _mm_unpackhi_epi16(resq_r1_2, sign_reg); // e1 f1 g1 h1 -- 32 bit
668 sign_reg = _mm_cmpgt_epi16(zero_8x16b, resq_r3_2);
669 resq_r3_1 = _mm_unpacklo_epi16(resq_r3_2, sign_reg); // a3 b3 c3 d3 -- 32 bit
670 resq_r3_2 = _mm_unpackhi_epi16(resq_r3_2, sign_reg); // e3 f3 g3 h3 -- 32 bit
671 sign_reg = _mm_cmpgt_epi16(zero_8x16b, resq_r5_2);
672 resq_r5_1 = _mm_unpacklo_epi16(resq_r5_2, sign_reg); // a5 b5 c5 d5 -- 32 bit
673 resq_r5_2 = _mm_unpackhi_epi16(resq_r5_2, sign_reg); // e5 f5 g5 h5 -- 32 bit
674 sign_reg = _mm_cmpgt_epi16(zero_8x16b, resq_r7_2);
675 resq_r7_1 = _mm_unpacklo_epi16(resq_r7_2, sign_reg); // a7 b7 c7 d7 -- 32 bit
676 resq_r7_2 = _mm_unpackhi_epi16(resq_r7_2, sign_reg); // e7 f7 g7 h7 -- 32 bit
677 // Transform starts -- horizontal transform
678 /*------------------------------------------------------------------*/
679 /* y0 = w0 + w4 */
680 temp1 = _mm_add_epi16(resq_r0_2, resq_r4_2);
681 /* y2 = w0 - w4 */
682 temp3 = _mm_sub_epi16(resq_r0_2, resq_r4_2);
683 /* y1 = -w3 + w5 - w7 - (w7 >> 1) */
684 temp2 = _mm_sub_epi32(resq_r5_1, resq_r3_1); //-w3+w5
685 temp10 = _mm_sub_epi32(resq_r5_2, resq_r3_2);
686 temp4 = _mm_sub_epi32(temp2, resq_r7_1); //-w3+w5-w7
687 temp12 = _mm_sub_epi32(temp10, resq_r7_2);
688 temp5 = _mm_srai_epi32(resq_r7_1, 1); // w7>>1
689 temp13 = _mm_srai_epi32(resq_r7_2, 1);
690 temp2 = _mm_sub_epi32(temp4, temp5); //-w3+w5-w7 -(w7>>1)
691 temp10 = _mm_sub_epi32(temp12, temp13);
692 temp2 = _mm_packs_epi32(temp2, temp10);
693 /* y3 = w1 + w7 - w3 - (w3 >> 1) */
694 temp4 = _mm_add_epi32(resq_r1_1, resq_r7_1); // w1+w7
695 temp12 = _mm_add_epi32(resq_r1_2, resq_r7_2);
696 temp4 = _mm_sub_epi32(temp4, resq_r3_1); // w1+w7-w3
697 temp12 = _mm_sub_epi32(temp12, resq_r3_2);
698 temp5 = _mm_srai_epi32(resq_r3_1, 1); // w3>>1
699 temp13 = _mm_srai_epi32(resq_r3_2, 1);
700 temp4 = _mm_sub_epi32(temp4, temp5); // w1+w7-w3-(w3>>1)
701 temp12 = _mm_sub_epi32(temp12, temp13);
702 temp4 = _mm_packs_epi32(temp4, temp12);
703 /* y4 = (w2 >> 1) - w6 */
704 temp5 = _mm_srai_epi16(resq_r2_2, 1); // w2>>1
705 temp5 = _mm_sub_epi16(temp5, resq_r6_2); //(w2>>1)-w6
706 /* y5 = -w1 + w7 + w5 + (w5 >> 1) */
707 temp6 = _mm_sub_epi32(resq_r7_1, resq_r1_1); // w7-w1
708 temp14 = _mm_sub_epi32(resq_r7_2, resq_r1_2);
709 temp6 = _mm_add_epi32(temp6, resq_r5_1); // w7-w1+w5
710 temp14 = _mm_add_epi32(temp14, resq_r5_2);
711 temp7 = _mm_srai_epi32(resq_r5_1, 1); // w5>>1
712 temp15 = _mm_srai_epi32(resq_r5_2, 1);
713 temp6 = _mm_add_epi32(temp6, temp7); // w7-w1_w5+(w5>>1)
714 temp14 = _mm_add_epi32(temp14, temp15);
715 temp6 = _mm_packs_epi32(temp6, temp14);
716 /* y6 = w2 + (w6 >> 1) */
717 temp7 = _mm_srai_epi16(resq_r6_2, 1); // w6>>1
718 temp7 = _mm_add_epi16(temp7, resq_r2_2); //(w6>>1)+w2
719 /* y7 = w3 + w5 + w1 + (w1 >> 1) */
720 temp8 = _mm_add_epi32(resq_r3_1, resq_r5_1); // w3+w5
721 temp16 = _mm_add_epi32(resq_r3_2, resq_r5_2);
722 temp8 = _mm_add_epi32(temp8, resq_r1_1); // w3+w5+w1
723 temp16 = _mm_add_epi32(temp16, resq_r1_2);
724 temp17 = _mm_srai_epi32(resq_r1_1, 1); // w1>>1
725 temp18 = _mm_srai_epi32(resq_r1_2, 1);
726 temp8 = _mm_add_epi32(temp8, temp17); // w3+w5+w1+(w1>>1)
727 temp16 = _mm_add_epi32(temp16, temp18);
728 temp8 = _mm_packs_epi32(temp8, temp16);
729 /*------------------------------------------------------------------*/
730 /*------------------------------------------------------------------*/
731 /* z0 = y0 + y6 */
732 resq_r0_1 = _mm_add_epi16(temp1, temp7);
733 /* z1 = y1 + (y7 >> 2) */
734 resq_r1_1 = _mm_srai_epi16(temp8, 2);
735 resq_r1_1 = _mm_add_epi16(resq_r1_1, temp2);
736 /* z2 = y2 + y4 */
737 resq_r2_1 = _mm_add_epi16(temp3, temp5);
738 /* z3 = y3 + (y5 >> 2) */
739 resq_r3_1 = _mm_srai_epi16(temp6, 2);
740 resq_r3_1 = _mm_add_epi16(resq_r3_1, temp4);
741 /* z4 = y2 - y4 */
742 resq_r4_1 = _mm_sub_epi16(temp3, temp5);
743 /* z5 = (y3 >> 2) - y5 */
744 resq_r5_1 = _mm_srai_epi16(temp4, 2);
745 resq_r5_1 = _mm_sub_epi16(resq_r5_1, temp6);
746 /* z6 = y0 - y6 */
747 resq_r6_1 = _mm_sub_epi16(temp1, temp7);
748 /* z7 = y7 - (y1 >> 2) */
749 resq_r7_1 = _mm_srai_epi16(temp2, 2);
750 resq_r7_1 = _mm_sub_epi16(temp8, resq_r7_1);
751 /*------------------------------------------------------------------*/
752 /*------------------------------------------------------------------*/
753 /* x0 = z0 + z7 */
754 temp1 = _mm_add_epi16(resq_r0_1, resq_r7_1);
755 /* x1 = z2 + z5 */
756 temp2 = _mm_add_epi16(resq_r2_1, resq_r5_1);
757 /* x2 = z4 + z3 */
758 temp3 = _mm_add_epi16(resq_r4_1, resq_r3_1);
759 /* x3 = z6 + z1 */
760 temp4 = _mm_add_epi16(resq_r6_1, resq_r1_1);
761 /* x4 = z6 - z1 */
762 temp5 = _mm_sub_epi16(resq_r6_1, resq_r1_1);
763 /* x5 = z4 - z3 */
764 temp6 = _mm_sub_epi16(resq_r4_1, resq_r3_1);
765 /* x6 = z2 - z5 */
766 temp7 = _mm_sub_epi16(resq_r2_1, resq_r5_1);
767 /* x7 = z0 - z7 */
768 temp8 = _mm_sub_epi16(resq_r0_1, resq_r7_1);
769 /*------------------------------------------------------------------*/
770 // Matrix transpose
771 /*
772 * a0 b0 c0 d0 e0 f0 g0 h0
773 * a1 b1 c1 d1 e1 f1 g1 h1
774 * a2 b2 c2 d2 e2 f2 g2 h2
775 * a3 b3 c3 d3 e3 f3 g3 h3
776 */
777 temp17 = _mm_unpacklo_epi16(temp1, temp2); // a0 a1 b0 b1 c0 c1 d0 d1
778 temp19 = _mm_unpacklo_epi16(temp3, temp4); // a2 a3 b2 b3 c2 c3 d2 d3
779 temp18 = _mm_unpackhi_epi16(temp1, temp2); // e0 e1 f0 f1 g0 g1 h0 h1
780 temp20 = _mm_unpackhi_epi16(temp3, temp4); // e2 e3 f2 f3 g2 g3 h2 h3
781
782 resq_r0_1 = _mm_unpacklo_epi32(temp17, temp19); // a0 a1 a2 a3 b0 b1 b2 b3
783 resq_r1_1 = _mm_unpackhi_epi32(temp17, temp19); // c0 c1 c2 c3 d0 d1 d2 d3
784 resq_r2_1 = _mm_unpacklo_epi32(temp18, temp20); // e0 e1 e2 e3 f0 f1 f2 f3
785 resq_r3_1 = _mm_unpackhi_epi32(temp18, temp20); // g0 g2 g2 g3 h0 h1 h2 h3
786 /*
787 * a4 b4 c4 d4 e4 f4 g4 h4
788 * a5 b5 c5 d5 e5 f5 g5 h5
789 * a6 b6 c6 d6 e6 f6 g6 h6
790 * a7 b7 c7 d7 e7 f7 g7 h7
791 */
792 temp17 = _mm_unpacklo_epi16(temp5, temp6); // a4 a5 b4 b5 c4 c5 d4 d5
793 temp19 = _mm_unpacklo_epi16(temp7, temp8); // a6 a7 b6 b7 c6 c7 d6 d7
794 temp18 = _mm_unpackhi_epi16(temp5, temp6); // e4 e5 f4 f5 g4 g5 h4 h5
795 temp20 = _mm_unpackhi_epi16(temp7, temp8); // e6 e7 f6 f7 g6 g7 h6 h7
796
797 resq_r4_1 = _mm_unpacklo_epi32(temp17, temp19); // a4 a5 a6 a7 b4 b5 b6 b7
798 resq_r5_1 = _mm_unpackhi_epi32(temp17, temp19); // c4 c5 c6 c7 d4 d5 d6 d7
799 resq_r6_1 = _mm_unpacklo_epi32(temp18, temp20); // e4 e5 e6 e7 f4 f5 f6 f7
800 resq_r7_1 = _mm_unpackhi_epi32(temp18, temp20); // g4 g5 g6 g7 h4 h5 h6 h7
801 /* a0 a1 a2 a3 b0 b1 b2 b3
802 * c0 c1 c2 c3 d0 d1 d2 d3
803 * e0 e1 e2 e3 f0 f1 f2 f3
804 * g0 g2 g2 g3 h0 h1 h2 h3
805 * a4 a5 a6 a7 b4 b5 b6 b7
806 * c4 c5 c6 c7 d4 d5 d6 d7
807 * e4 e5 e6 e7 f4 f5 f6 f7
808 * g4 g5 g6 g7 h4 h5 h6 h7
809 */
810 resq_r0_2 = _mm_unpacklo_epi64(resq_r0_1, resq_r4_1); // a0 a1 a2 a3 a4 a5 a6 a7
811 resq_r1_2 = _mm_unpackhi_epi64(resq_r0_1, resq_r4_1); // b0 b1 b2 b3 b4 b5 b6 b7
812 resq_r2_2 = _mm_unpacklo_epi64(resq_r1_1, resq_r5_1); // c0 c1 c2 c3 c4 c5 c6 c7
813 resq_r3_2 = _mm_unpackhi_epi64(resq_r1_1, resq_r5_1); // d0 d1 d2 d3 d4 d5 d6 d7
814 resq_r4_2 = _mm_unpacklo_epi64(resq_r2_1, resq_r6_1); // e0 e1 e2 e3 e4 e5 e6 e7
815 resq_r5_2 = _mm_unpackhi_epi64(resq_r2_1, resq_r6_1); // f0 f1 f2 f3 f4 f5 f6 f7
816 resq_r6_2 = _mm_unpacklo_epi64(resq_r3_1, resq_r7_1); // g0 g1 g2 g3 g4 g5 g6 g7
817 resq_r7_2 = _mm_unpackhi_epi64(resq_r3_1, resq_r7_1); // h0 h1 h2 h3 h4 h5 h6 h7
818
819 sign_reg = _mm_cmpgt_epi16(zero_8x16b, resq_r1_2);
820 resq_r1_1 = _mm_unpacklo_epi16(resq_r1_2, sign_reg); // a1 b1 c1 d1 -- 32 bit
821 resq_r1_2 = _mm_unpackhi_epi16(resq_r1_2, sign_reg); // e1 f1 g1 h1 -- 32 bit
822 sign_reg = _mm_cmpgt_epi16(zero_8x16b, resq_r3_2);
823 resq_r3_1 = _mm_unpacklo_epi16(resq_r3_2, sign_reg); // a3 b3 c3 d3 -- 32 bit
824 resq_r3_2 = _mm_unpackhi_epi16(resq_r3_2, sign_reg); // e3 f3 g3 h3 -- 32 bit
825 sign_reg = _mm_cmpgt_epi16(zero_8x16b, resq_r5_2);
826 resq_r5_1 = _mm_unpacklo_epi16(resq_r5_2, sign_reg); // a5 b5 c5 d5 -- 32 bit
827 resq_r5_2 = _mm_unpackhi_epi16(resq_r5_2, sign_reg); // e5 f5 g5 h5 -- 32 bit
828 sign_reg = _mm_cmpgt_epi16(zero_8x16b, resq_r7_2);
829 resq_r7_1 = _mm_unpacklo_epi16(resq_r7_2, sign_reg); // a7 b7 c7 d7 -- 32 bit
830 resq_r7_2 = _mm_unpackhi_epi16(resq_r7_2, sign_reg); // e7 f7 g7 h7 -- 32 bit
831
832 zero_8x16b = _mm_setzero_si128(); // all bits reset to zero
833 // p0 p1 p2 p3 p4 p5 p6 p7 0 0 0 0 0 0 0 0 -- all 8 bits
834 pred_r0 = _mm_loadu_si128((__m128i *) (&pi2_pred[0]));
835 pred_r1 = _mm_loadu_si128((__m128i *) (&pi2_pred[pred_strd]));
836 pred_r2 = _mm_loadu_si128((__m128i *) (&pi2_pred[2 * pred_strd]));
837 pred_r3 = _mm_loadu_si128((__m128i *) (&pi2_pred[3 * pred_strd]));
838 pred_r4 = _mm_loadu_si128((__m128i *) (&pi2_pred[4 * pred_strd]));
839 pred_r5 = _mm_loadu_si128((__m128i *) (&pi2_pred[5 * pred_strd]));
840 pred_r6 = _mm_loadu_si128((__m128i *) (&pi2_pred[6 * pred_strd]));
841 pred_r7 = _mm_loadu_si128((__m128i *) (&pi2_pred[7 * pred_strd]));
842
843 /*--------------------------------------------------------------------*/
844 /* IDCT [ Vertical transformation] and Xij = (xij + 32)>>6 */
845 /* Add the prediction and store it back to reconstructed frame buffer */
846 /* [Prediction buffer itself in this case] */
847 /*--------------------------------------------------------------------*/
848
849 /* y0j = w0j + w4j */
850 temp1 = _mm_add_epi16(resq_r0_2, resq_r4_2);
851 /* y2j = w0j - w4j */
852 temp3 = _mm_sub_epi16(resq_r0_2, resq_r4_2);
853 /* y1j = -w3j + w5j - w7j - (w7j >> 1) */
854 temp2 = _mm_sub_epi32(resq_r5_1, resq_r3_1); //-w3+w5
855 temp10 = _mm_sub_epi32(resq_r5_2, resq_r3_2);
856 temp4 = _mm_sub_epi32(temp2, resq_r7_1); //-w3+w5-w7
857 temp12 = _mm_sub_epi32(temp10, resq_r7_2);
858 temp5 = _mm_srai_epi32(resq_r7_1, 1); // w7>>1
859 temp13 = _mm_srai_epi32(resq_r7_2, 1);
860 temp2 = _mm_sub_epi32(temp4, temp5); //-w3+w5-w7 -(w7>>1)
861 temp10 = _mm_sub_epi32(temp12, temp13);
862 temp2 = _mm_packs_epi32(temp2, temp10);
863 /* y3j = w1j + w7j - w3j - (w3j >> 1) */
864 temp4 = _mm_add_epi32(resq_r1_1, resq_r7_1); // w1+w7
865 temp12 = _mm_add_epi32(resq_r1_2, resq_r7_2);
866 temp4 = _mm_sub_epi32(temp4, resq_r3_1); // w1+w7-w3
867 temp12 = _mm_sub_epi32(temp12, resq_r3_2);
868 temp5 = _mm_srai_epi32(resq_r3_1, 1); // w3>>1
869 temp13 = _mm_srai_epi32(resq_r3_2, 1);
870 temp4 = _mm_sub_epi32(temp4, temp5); // w1+w7-w3-(w3>>1)
871 temp12 = _mm_sub_epi32(temp12, temp13);
872 temp4 = _mm_packs_epi32(temp4, temp12);
873 /* y4j = (w2j >> 1) - w6j */
874 temp5 = _mm_srai_epi16(resq_r2_2, 1); // w2>>1
875 temp5 = _mm_sub_epi16(temp5, resq_r6_2); //(w2>>1)-w6
876 /* y5j = -w1j + w7j + w5j + (w5j >> 1) */
877 temp6 = _mm_sub_epi32(resq_r7_1, resq_r1_1); // w7-w1
878 temp14 = _mm_sub_epi32(resq_r7_2, resq_r1_2);
879 temp6 = _mm_add_epi32(temp6, resq_r5_1); // w7-w1+w5
880 temp14 = _mm_add_epi32(temp14, resq_r5_2);
881 temp7 = _mm_srai_epi32(resq_r5_1, 1); // w5>>1
882 temp15 = _mm_srai_epi32(resq_r5_2, 1);
883 temp6 = _mm_add_epi32(temp6, temp7); // w7-w1_w5+(w5>>1)
884 temp14 = _mm_add_epi32(temp14, temp15);
885 temp6 = _mm_packs_epi32(temp6, temp14);
886 /* y6j = w2j + (w6j >> 1) */
887 temp7 = _mm_srai_epi16(resq_r6_2, 1); // w6>>1
888 temp7 = _mm_add_epi16(temp7, resq_r2_2); //(w6>>1)+w2
889 /* y7j = w3j + w5j + w1j + (w1j >> 1) */
890 temp8 = _mm_add_epi32(resq_r3_1, resq_r5_1); // w3+w5
891 temp16 = _mm_add_epi32(resq_r3_2, resq_r5_2);
892 temp8 = _mm_add_epi32(temp8, resq_r1_1); // w3+w5+w1
893 temp16 = _mm_add_epi32(temp16, resq_r1_2);
894 temp17 = _mm_srai_epi32(resq_r1_1, 1); // w1>>1
895 temp18 = _mm_srai_epi32(resq_r1_2, 1);
896 temp8 = _mm_add_epi32(temp8, temp17); // w3+w5+w1+(w1>>1)
897 temp16 = _mm_add_epi32(temp16, temp18);
898 temp8 = _mm_packs_epi32(temp8, temp16);
899 /*------------------------------------------------------------------*/
900 /*------------------------------------------------------------------*/
901 /* z0j = y0j + y6j */
902 resq_r0_1 = _mm_add_epi16(temp1, temp7);
903 /* z1j = y1j + (y7j >> 2) */
904 resq_r1_1 = _mm_srai_epi16(temp8, 2);
905 resq_r1_1 = _mm_add_epi16(resq_r1_1, temp2);
906 /* z2j = y2j + y4j */
907 resq_r2_1 = _mm_add_epi16(temp3, temp5);
908 /* z3j = y3j + (y5j >> 2) */
909 resq_r3_1 = _mm_srai_epi16(temp6, 2);
910 resq_r3_1 = _mm_add_epi16(resq_r3_1, temp4);
911 /* z4j = y2j - y4j */
912 resq_r4_1 = _mm_sub_epi16(temp3, temp5);
913 /* z5j = (y3j >> 2) - y5j */
914 resq_r5_1 = _mm_srai_epi16(temp4, 2);
915 resq_r5_1 = _mm_sub_epi16(resq_r5_1, temp6);
916 /* z6j = y0j - y6j */
917 resq_r6_1 = _mm_sub_epi16(temp1, temp7);
918 /* z7j = y7j - (y1j >> 2) */
919 resq_r7_1 = _mm_srai_epi16(temp2, 2);
920 resq_r7_1 = _mm_sub_epi16(temp8, resq_r7_1);
921 /*------------------------------------------------------------------*/
922
923 /*------------------------------------------------------------------*/
924 /* x0j = z0j + z7j */
925 temp1 = _mm_add_epi16(resq_r0_1, resq_r7_1);
926 sign_reg = _mm_cmpgt_epi16(zero_8x16b, temp1);
927 temp10 = _mm_unpacklo_epi16(temp1, sign_reg);
928 temp11 = _mm_unpackhi_epi16(temp1, sign_reg);
929 temp10 = _mm_add_epi32(temp10, value_32);
930 temp11 = _mm_add_epi32(temp11, value_32);
931 temp10 = _mm_srai_epi32(temp10, 6);
932 temp11 = _mm_srai_epi32(temp11, 6);
933 temp10 = _mm_packs_epi32(temp10, temp11);
934 pred_r0 = _mm_add_epi16(temp10, pred_r0);
935 pred_r0 = _mm_min_epi16(dupmax_8x16b, pred_r0);
936 pred_r0 = _mm_max_epi16(dupmin_8x16b, pred_r0);
937
938 /* x1j = z2j + z5j */
939 temp2 = _mm_add_epi16(resq_r2_1, resq_r5_1);
940 sign_reg = _mm_cmpgt_epi16(zero_8x16b, temp2);
941 temp10 = _mm_unpacklo_epi16(temp2, sign_reg);
942 temp11 = _mm_unpackhi_epi16(temp2, sign_reg);
943 temp10 = _mm_add_epi32(temp10, value_32);
944 temp11 = _mm_add_epi32(temp11, value_32);
945 temp10 = _mm_srai_epi32(temp10, 6);
946 temp11 = _mm_srai_epi32(temp11, 6);
947 temp10 = _mm_packs_epi32(temp10, temp11);
948 pred_r1 = _mm_add_epi16(temp10, pred_r1);
949 pred_r1 = _mm_min_epi16(dupmax_8x16b, pred_r1);
950 pred_r1 = _mm_max_epi16(dupmin_8x16b, pred_r1);
951 /* x2j = z4j + z3j */
952 temp3 = _mm_add_epi16(resq_r4_1, resq_r3_1);
953 sign_reg = _mm_cmpgt_epi16(zero_8x16b, temp3);
954 temp10 = _mm_unpacklo_epi16(temp3, sign_reg);
955 temp11 = _mm_unpackhi_epi16(temp3, sign_reg);
956 temp10 = _mm_add_epi32(temp10, value_32);
957 temp11 = _mm_add_epi32(temp11, value_32);
958 temp10 = _mm_srai_epi32(temp10, 6);
959 temp11 = _mm_srai_epi32(temp11, 6);
960 temp10 = _mm_packs_epi32(temp10, temp11);
961 pred_r2 = _mm_add_epi16(temp10, pred_r2);
962 pred_r2 = _mm_min_epi16(dupmax_8x16b, pred_r2);
963 pred_r2 = _mm_max_epi16(dupmin_8x16b, pred_r2);
964 /* x3j = z6j + z1j */
965 temp4 = _mm_add_epi16(resq_r6_1, resq_r1_1);
966 sign_reg = _mm_cmpgt_epi16(zero_8x16b, temp4);
967 temp10 = _mm_unpacklo_epi16(temp4, sign_reg);
968 temp11 = _mm_unpackhi_epi16(temp4, sign_reg);
969 temp10 = _mm_add_epi32(temp10, value_32);
970 temp11 = _mm_add_epi32(temp11, value_32);
971 temp10 = _mm_srai_epi32(temp10, 6);
972 temp11 = _mm_srai_epi32(temp11, 6);
973 temp10 = _mm_packs_epi32(temp10, temp11);
974 pred_r3 = _mm_add_epi16(temp10, pred_r3);
975 pred_r3 = _mm_min_epi16(dupmax_8x16b, pred_r3);
976 pred_r3 = _mm_max_epi16(dupmin_8x16b, pred_r3);
977 /* x4j = z6j - z1j */
978 temp5 = _mm_sub_epi16(resq_r6_1, resq_r1_1);
979 sign_reg = _mm_cmpgt_epi16(zero_8x16b, temp5);
980 temp10 = _mm_unpacklo_epi16(temp5, sign_reg);
981 temp11 = _mm_unpackhi_epi16(temp5, sign_reg);
982 temp10 = _mm_add_epi32(temp10, value_32);
983 temp11 = _mm_add_epi32(temp11, value_32);
984 temp10 = _mm_srai_epi32(temp10, 6);
985 temp11 = _mm_srai_epi32(temp11, 6);
986 temp10 = _mm_packs_epi32(temp10, temp11);
987 pred_r4 = _mm_add_epi16(temp10, pred_r4);
988 pred_r4 = _mm_min_epi16(dupmax_8x16b, pred_r4);
989 pred_r4 = _mm_max_epi16(dupmin_8x16b, pred_r4);
990 /* x5j = z4j - z3j */
991 temp6 = _mm_sub_epi16(resq_r4_1, resq_r3_1);
992 sign_reg = _mm_cmpgt_epi16(zero_8x16b, temp6);
993 temp10 = _mm_unpacklo_epi16(temp6, sign_reg);
994 temp11 = _mm_unpackhi_epi16(temp6, sign_reg);
995 temp10 = _mm_add_epi32(temp10, value_32);
996 temp11 = _mm_add_epi32(temp11, value_32);
997 temp10 = _mm_srai_epi32(temp10, 6);
998 temp11 = _mm_srai_epi32(temp11, 6);
999 temp10 = _mm_packs_epi32(temp10, temp11);
1000 pred_r5 = _mm_add_epi16(temp10, pred_r5);
1001 pred_r5 = _mm_min_epi16(dupmax_8x16b, pred_r5);
1002 pred_r5 = _mm_max_epi16(dupmin_8x16b, pred_r5);
1003 /* x6j = z2j - z5j */
1004 temp7 = _mm_sub_epi16(resq_r2_1, resq_r5_1);
1005 sign_reg = _mm_cmpgt_epi16(zero_8x16b, temp7);
1006 temp10 = _mm_unpacklo_epi16(temp7, sign_reg);
1007 temp11 = _mm_unpackhi_epi16(temp7, sign_reg);
1008 temp10 = _mm_add_epi32(temp10, value_32);
1009 temp11 = _mm_add_epi32(temp11, value_32);
1010 temp10 = _mm_srai_epi32(temp10, 6);
1011 temp11 = _mm_srai_epi32(temp11, 6);
1012 temp10 = _mm_packs_epi32(temp10, temp11);
1013 pred_r6 = _mm_add_epi16(temp10, pred_r6);
1014 pred_r6 = _mm_min_epi16(dupmax_8x16b, pred_r6);
1015 pred_r6 = _mm_max_epi16(dupmin_8x16b, pred_r6);
1016 /* x7j = z0j - z7j */
1017 temp8 = _mm_sub_epi16(resq_r0_1, resq_r7_1);
1018 sign_reg = _mm_cmpgt_epi16(zero_8x16b, temp8);
1019 temp10 = _mm_unpacklo_epi16(temp8, sign_reg);
1020 temp11 = _mm_unpackhi_epi16(temp8, sign_reg);
1021 temp10 = _mm_add_epi32(temp10, value_32);
1022 temp11 = _mm_add_epi32(temp11, value_32);
1023 temp10 = _mm_srai_epi32(temp10, 6);
1024 temp11 = _mm_srai_epi32(temp11, 6);
1025 temp10 = _mm_packs_epi32(temp10, temp11);
1026 pred_r7 = _mm_add_epi16(temp10, pred_r7);
1027 pred_r7 = _mm_min_epi16(dupmax_8x16b, pred_r7);
1028 pred_r7 = _mm_max_epi16(dupmin_8x16b, pred_r7);
1029
1030 pred_r01_b0 = _mm_unpacklo_epi64(pred_r0, pred_r1);
1031 pred_r23_b0 = _mm_unpacklo_epi64(pred_r2, pred_r3);
1032 pred_r45_b2 = _mm_unpacklo_epi64(pred_r4, pred_r5);
1033 pred_r67_b2 = _mm_unpacklo_epi64(pred_r6, pred_r7);
1034
1035 pred_r01_b1 = _mm_unpackhi_epi64(pred_r0, pred_r1);
1036 pred_r23_b1 = _mm_unpackhi_epi64(pred_r2, pred_r3);
1037 pred_r45_b3 = _mm_unpackhi_epi64(pred_r4, pred_r5);
1038 pred_r67_b3 = _mm_unpackhi_epi64(pred_r6, pred_r7);
1039
1040 // return 1 if all zeros, else 0
1041 row_01_b0 = _mm_test_all_ones(_mm_cmpeq_epi16(pred_r01_b0, zero_8x16b));
1042 row_23_b0 = _mm_test_all_ones(_mm_cmpeq_epi16(pred_r23_b0, zero_8x16b));
1043 row_45_b2 = _mm_test_all_ones(_mm_cmpeq_epi16(pred_r45_b2, zero_8x16b));
1044 row_67_b2 = _mm_test_all_ones(_mm_cmpeq_epi16(pred_r67_b2, zero_8x16b));
1045
1046 row_01_b1 = _mm_test_all_ones(_mm_cmpeq_epi16(pred_r01_b1, zero_8x16b));
1047 row_23_b1 = _mm_test_all_ones(_mm_cmpeq_epi16(pred_r23_b1, zero_8x16b));
1048 row_45_b3 = _mm_test_all_ones(_mm_cmpeq_epi16(pred_r45_b3, zero_8x16b));
1049 row_67_b3 = _mm_test_all_ones(_mm_cmpeq_epi16(pred_r67_b3, zero_8x16b));
1050
1051 _mm_storeu_si128((__m128i *) (&pi2_out[0]), pred_r0);
1052 _mm_storeu_si128((__m128i *) (&pi2_out[out_strd]), pred_r1);
1053 _mm_storeu_si128((__m128i *) (&pi2_out[2 * out_strd]), pred_r2);
1054 _mm_storeu_si128((__m128i *) (&pi2_out[3 * out_strd]), pred_r3);
1055 _mm_storeu_si128((__m128i *) (&pi2_out[4 * out_strd]), pred_r4);
1056 _mm_storeu_si128((__m128i *) (&pi2_out[5 * out_strd]), pred_r5);
1057 _mm_storeu_si128((__m128i *) (&pi2_out[6 * out_strd]), pred_r6);
1058 _mm_storeu_si128((__m128i *) (&pi2_out[7 * out_strd]), pred_r7);
1059
1060 i4_nnz_b0 = (!(row_01_b0 && row_23_b0));
1061 i4_nnz_b1 = (!(row_01_b1 && row_23_b1)) << 1;
1062 i4_nnz_b2 = (!(row_45_b2 && row_67_b2)) << 4;
1063 i4_nnz_b3 = (!(row_45_b3 && row_67_b3)) << 5;
1064
1065 i4_nnz = (i4_nnz_b0 | i4_nnz_b1 | i4_nnz_b2 | i4_nnz_b3);
1066 return i4_nnz;
1067 }
1068
1069 /*****************************************************************************/
1070 /* */
1071 /* Function Name : isvcd_iquant_itrans_residual_4x4_dc_sse42 */
1072 /* */
1073 /* Description : this function computes the resd output from the */
1074 /* IQ+IT */
1075 /* */
1076 /* Inputs : */
1077 /* Globals : none */
1078 /* Processing : */
1079 /* */
1080 /* Outputs : i4_nnz */
1081 /* Returns : none */
1082 /* */
1083 /* Issues : none */
1084 /* */
1085 /* Revision History: */
1086 /* */
1087 /* DD MM YYYY Author(s) Changes (Describe the changes made) */
1088 /* 25 11 2021 Kishore creation */
1089 /* */
1090 /*****************************************************************************/
1091
isvcd_iquant_itrans_residual_4x4_dc_sse42(WORD16 * pi2_src,WORD16 * pi2_pred,WORD16 * pi2_out,WORD32 pred_strd,WORD32 out_strd,const UWORD16 * pu2_iscal_mat,const UWORD16 * pu2_weigh_mat,UWORD32 u4_qp_div_6,WORD16 * pi2_tmp,WORD32 iq_start_idx,WORD16 * pi2_dc_ld_addr)1092 WORD32 isvcd_iquant_itrans_residual_4x4_dc_sse42(WORD16 *pi2_src, WORD16 *pi2_pred, WORD16 *pi2_out,
1093 WORD32 pred_strd, WORD32 out_strd,
1094 const UWORD16 *pu2_iscal_mat,
1095 const UWORD16 *pu2_weigh_mat, UWORD32 u4_qp_div_6,
1096 WORD16 *pi2_tmp, WORD32 iq_start_idx,
1097 WORD16 *pi2_dc_ld_addr)
1098 {
1099 __m128i pred_8x16b_0;
1100 __m128i pred_8x16b_1;
1101 __m128i pred_8x16b_2;
1102 __m128i pred_8x16b_3;
1103 __m128i pred_8x16b_01, pred_8x16b_23;
1104 __m128i i_macro_8x16b;
1105 __m128i zero_8x16b = _mm_setzero_si128();
1106 __m128i dupmax_8x16b = _mm_set1_epi16(RSD_MAX);
1107 __m128i dupmin_8x16b = _mm_set1_epi16(RSD_MIN);
1108
1109 WORD32 i4_nnz, row_01, row_23;
1110 WORD32 q0;
1111 WORD16 i_macro;
1112 WORD16 rnd_fact = (u4_qp_div_6 < 4) ? 1 << (3 - u4_qp_div_6) : 0;
1113
1114 UNUSED(pi2_tmp);
1115
1116 if(iq_start_idx == 0)
1117 {
1118 q0 = pi2_src[0];
1119 INV_QUANT(q0, pu2_iscal_mat[0], pu2_weigh_mat[0], u4_qp_div_6, rnd_fact, 4);
1120 }
1121 else
1122 {
1123 q0 = pi2_dc_ld_addr[0]; // Restoring dc value for intra case3
1124 }
1125 i_macro = ((q0 + 32) >> 6);
1126 i_macro_8x16b = _mm_set1_epi16(i_macro);
1127
1128 pred_8x16b_0 = _mm_loadu_si128((__m128i *) (pi2_pred));
1129 pred_8x16b_1 = _mm_loadu_si128((__m128i *) (pi2_pred + pred_strd));
1130 pred_8x16b_2 = _mm_loadu_si128((__m128i *) (pi2_pred + (pred_strd << 1)));
1131 pred_8x16b_3 = _mm_loadu_si128((__m128i *) (pi2_pred + (pred_strd << 1) + pred_strd));
1132
1133 pred_8x16b_0 = _mm_add_epi16(i_macro_8x16b, pred_8x16b_0);
1134 pred_8x16b_1 = _mm_add_epi16(i_macro_8x16b, pred_8x16b_1);
1135 pred_8x16b_2 = _mm_add_epi16(i_macro_8x16b, pred_8x16b_2);
1136 pred_8x16b_3 = _mm_add_epi16(i_macro_8x16b, pred_8x16b_3);
1137
1138 pred_8x16b_0 = _mm_min_epi16(dupmax_8x16b, pred_8x16b_0);
1139 pred_8x16b_0 = _mm_max_epi16(dupmin_8x16b, pred_8x16b_0);
1140 pred_8x16b_1 = _mm_min_epi16(dupmax_8x16b, pred_8x16b_1);
1141 pred_8x16b_1 = _mm_max_epi16(dupmin_8x16b, pred_8x16b_1);
1142 pred_8x16b_2 = _mm_min_epi16(dupmax_8x16b, pred_8x16b_2);
1143 pred_8x16b_2 = _mm_max_epi16(dupmin_8x16b, pred_8x16b_2);
1144 pred_8x16b_3 = _mm_min_epi16(dupmax_8x16b, pred_8x16b_3);
1145 pred_8x16b_3 = _mm_max_epi16(dupmin_8x16b, pred_8x16b_3);
1146
1147 pred_8x16b_01 = _mm_unpacklo_epi64(pred_8x16b_0, pred_8x16b_1);
1148 pred_8x16b_23 = _mm_unpacklo_epi64(pred_8x16b_2, pred_8x16b_3);
1149
1150 row_01 = _mm_test_all_ones(
1151 _mm_cmpeq_epi16(pred_8x16b_01, zero_8x16b)); // return 1 if all zeros, else 0
1152 row_23 = _mm_test_all_ones(_mm_cmpeq_epi16(pred_8x16b_23, zero_8x16b));
1153
1154 _mm_storel_epi64((__m128i *) (pi2_out), pred_8x16b_0);
1155 _mm_storel_epi64((__m128i *) (pi2_out + out_strd), pred_8x16b_1);
1156 _mm_storel_epi64((__m128i *) (pi2_out + 2 * out_strd), pred_8x16b_2);
1157 _mm_storel_epi64((__m128i *) (pi2_out + 3 * out_strd), pred_8x16b_3);
1158
1159 i4_nnz = !(row_01 && row_23);
1160 return i4_nnz;
1161 }
1162
1163 /*****************************************************************************/
1164 /* */
1165 /* Function Name : isvcd_iquant_itrans_residual_8x8_dc_sse42 */
1166 /* */
1167 /* Description : this function computes the resd output from the */
1168 /* IQ+IT */
1169 /* */
1170 /* Inputs : */
1171 /* Globals : none */
1172 /* Processing : */
1173 /* */
1174 /* Outputs : i4_nnz */
1175 /* Returns : none */
1176 /* */
1177 /* Issues : none */
1178 /* */
1179 /* Revision History: */
1180 /* */
1181 /* DD MM YYYY Author(s) Changes (Describe the changes made) */
1182 /* 25 11 2021 Kishore creation */
1183 /* */
1184 /*****************************************************************************/
1185
isvcd_iquant_itrans_residual_8x8_dc_sse42(WORD16 * pi2_src,WORD16 * pi2_pred,WORD16 * pi2_out,WORD32 pred_strd,WORD32 out_strd,const UWORD16 * pu2_iscale_mat,const UWORD16 * pu2_weigh_mat,UWORD32 qp_div,WORD16 * pi2_tmp,WORD32 iq_start_idx,WORD16 * pi2_dc_ld_addr)1186 WORD32 isvcd_iquant_itrans_residual_8x8_dc_sse42(WORD16 *pi2_src, WORD16 *pi2_pred, WORD16 *pi2_out,
1187 WORD32 pred_strd, WORD32 out_strd,
1188 const UWORD16 *pu2_iscale_mat,
1189 const UWORD16 *pu2_weigh_mat, UWORD32 qp_div,
1190 WORD16 *pi2_tmp, WORD32 iq_start_idx,
1191 WORD16 *pi2_dc_ld_addr)
1192 {
1193 __m128i pred_8x16b_0;
1194 __m128i pred_8x16b_1;
1195 __m128i pred_8x16b_2;
1196 __m128i pred_8x16b_3;
1197 __m128i pred_8x16b_4;
1198 __m128i pred_8x16b_5;
1199 __m128i pred_8x16b_6;
1200 __m128i pred_8x16b_7;
1201 __m128i pred_8x16b_01_b0, pred_8x16b_23_b0, pred_8x16b_45_b2, pred_8x16b_67_b2;
1202 __m128i pred_8x16b_01_b1, pred_8x16b_23_b1, pred_8x16b_45_b3, pred_8x16b_67_b3;
1203
1204 WORD32 row_01_b0, row_23_b0, row_45_b2, row_67_b2;
1205 WORD32 row_01_b1, row_23_b1, row_45_b3, row_67_b3;
1206 WORD32 i4_nnz, i4_nnz_b0, i4_nnz_b1, i4_nnz_b2, i4_nnz_b3;
1207
1208 __m128i zero_8x16b = _mm_setzero_si128();
1209
1210 WORD32 pred_strd2 = (pred_strd << 1);
1211 WORD32 pred_strd4 = (pred_strd << 2);
1212 WORD32 out_strd2 = (out_strd << 1);
1213 WORD32 out_strd4 = (out_strd << 2);
1214
1215 __m128i i_macro_8x16b;
1216 __m128i dupmax_8x16b = _mm_set1_epi16(RSD_MAX);
1217 __m128i dupmin_8x16b = _mm_set1_epi16(RSD_MIN);
1218
1219 WORD32 q;
1220 WORD16 i_macro;
1221 WORD32 rnd_fact = (qp_div < 6) ? (1 << (5 - qp_div)) : 0;
1222
1223 UNUSED(pi2_tmp);
1224 UNUSED(iq_start_idx);
1225 UNUSED(pi2_dc_ld_addr);
1226 /*************************************************************/
1227 /* Dequantization of coefficients. Will be replaced by SIMD */
1228 /* operations on platform. Note : DC coeff is not scaled */
1229 /*************************************************************/
1230 q = pi2_src[0];
1231 INV_QUANT(q, pu2_iscale_mat[0], pu2_weigh_mat[0], qp_div, rnd_fact, 6);
1232 i_macro = (q + 32) >> 6;
1233
1234 i_macro_8x16b = _mm_set1_epi16(i_macro);
1235
1236 pred_8x16b_0 = _mm_loadu_si128((__m128i *) (pi2_pred));
1237 pred_8x16b_1 = _mm_loadu_si128((__m128i *) (pi2_pred + pred_strd));
1238 pred_8x16b_2 = _mm_loadu_si128((__m128i *) (pi2_pred + pred_strd2));
1239 pred_8x16b_3 = _mm_loadu_si128((__m128i *) (pi2_pred + pred_strd2 + pred_strd));
1240 pred_8x16b_4 = _mm_loadu_si128((__m128i *) (pi2_pred + pred_strd4));
1241 pred_8x16b_5 = _mm_loadu_si128((__m128i *) (pi2_pred + pred_strd4 + pred_strd));
1242 pred_8x16b_6 = _mm_loadu_si128((__m128i *) (pi2_pred + pred_strd4 + pred_strd2));
1243 pred_8x16b_7 = _mm_loadu_si128((__m128i *) (pi2_pred + pred_strd4 + pred_strd2 + pred_strd));
1244
1245 pred_8x16b_0 = _mm_add_epi16(i_macro_8x16b, pred_8x16b_0);
1246 pred_8x16b_1 = _mm_add_epi16(i_macro_8x16b, pred_8x16b_1);
1247 pred_8x16b_2 = _mm_add_epi16(i_macro_8x16b, pred_8x16b_2);
1248 pred_8x16b_3 = _mm_add_epi16(i_macro_8x16b, pred_8x16b_3);
1249 pred_8x16b_4 = _mm_add_epi16(i_macro_8x16b, pred_8x16b_4);
1250 pred_8x16b_5 = _mm_add_epi16(i_macro_8x16b, pred_8x16b_5);
1251 pred_8x16b_6 = _mm_add_epi16(i_macro_8x16b, pred_8x16b_6);
1252 pred_8x16b_7 = _mm_add_epi16(i_macro_8x16b, pred_8x16b_7);
1253
1254 pred_8x16b_0 = _mm_min_epi16(dupmax_8x16b, pred_8x16b_0);
1255 pred_8x16b_0 = _mm_max_epi16(dupmin_8x16b, pred_8x16b_0);
1256 pred_8x16b_1 = _mm_min_epi16(dupmax_8x16b, pred_8x16b_1);
1257 pred_8x16b_1 = _mm_max_epi16(dupmin_8x16b, pred_8x16b_1);
1258 pred_8x16b_2 = _mm_min_epi16(dupmax_8x16b, pred_8x16b_2);
1259 pred_8x16b_2 = _mm_max_epi16(dupmin_8x16b, pred_8x16b_2);
1260 pred_8x16b_3 = _mm_min_epi16(dupmax_8x16b, pred_8x16b_3);
1261 pred_8x16b_3 = _mm_max_epi16(dupmin_8x16b, pred_8x16b_3);
1262 pred_8x16b_4 = _mm_min_epi16(dupmax_8x16b, pred_8x16b_4);
1263 pred_8x16b_4 = _mm_max_epi16(dupmin_8x16b, pred_8x16b_4);
1264 pred_8x16b_5 = _mm_min_epi16(dupmax_8x16b, pred_8x16b_5);
1265 pred_8x16b_5 = _mm_max_epi16(dupmin_8x16b, pred_8x16b_5);
1266 pred_8x16b_6 = _mm_min_epi16(dupmax_8x16b, pred_8x16b_6);
1267 pred_8x16b_6 = _mm_max_epi16(dupmin_8x16b, pred_8x16b_6);
1268 pred_8x16b_7 = _mm_min_epi16(dupmax_8x16b, pred_8x16b_7);
1269 pred_8x16b_7 = _mm_max_epi16(dupmin_8x16b, pred_8x16b_7);
1270
1271 pred_8x16b_01_b0 = _mm_unpacklo_epi64(pred_8x16b_0, pred_8x16b_1);
1272 pred_8x16b_23_b0 = _mm_unpacklo_epi64(pred_8x16b_2, pred_8x16b_3);
1273 pred_8x16b_01_b1 = _mm_unpackhi_epi64(pred_8x16b_0, pred_8x16b_1);
1274 pred_8x16b_23_b1 = _mm_unpackhi_epi64(pred_8x16b_2, pred_8x16b_3);
1275
1276 pred_8x16b_45_b2 = _mm_unpacklo_epi64(pred_8x16b_4, pred_8x16b_5);
1277 pred_8x16b_67_b2 = _mm_unpacklo_epi64(pred_8x16b_6, pred_8x16b_7);
1278 pred_8x16b_45_b3 = _mm_unpackhi_epi64(pred_8x16b_4, pred_8x16b_5);
1279 pred_8x16b_67_b3 = _mm_unpackhi_epi64(pred_8x16b_6, pred_8x16b_7);
1280
1281 row_01_b0 = _mm_test_all_ones(
1282 _mm_cmpeq_epi16(pred_8x16b_01_b0, zero_8x16b)); // return 1 if all zeros, else 0
1283 row_23_b0 = _mm_test_all_ones(_mm_cmpeq_epi16(pred_8x16b_23_b0, zero_8x16b));
1284 row_01_b1 = _mm_test_all_ones(_mm_cmpeq_epi16(pred_8x16b_01_b1, zero_8x16b));
1285 row_23_b1 = _mm_test_all_ones(_mm_cmpeq_epi16(pred_8x16b_23_b1, zero_8x16b));
1286 row_45_b2 = _mm_test_all_ones(_mm_cmpeq_epi16(pred_8x16b_45_b2, zero_8x16b));
1287 row_67_b2 = _mm_test_all_ones(_mm_cmpeq_epi16(pred_8x16b_67_b2, zero_8x16b));
1288 row_45_b3 = _mm_test_all_ones(_mm_cmpeq_epi16(pred_8x16b_45_b3, zero_8x16b));
1289 row_67_b3 = _mm_test_all_ones(_mm_cmpeq_epi16(pred_8x16b_67_b3, zero_8x16b));
1290
1291 _mm_storeu_si128((__m128i *) (pi2_out), pred_8x16b_0);
1292 _mm_storeu_si128((__m128i *) (pi2_out + out_strd), pred_8x16b_1);
1293 _mm_storeu_si128((__m128i *) (pi2_out + out_strd2), pred_8x16b_2);
1294 _mm_storeu_si128((__m128i *) (pi2_out + out_strd2 + out_strd), pred_8x16b_3);
1295 _mm_storeu_si128((__m128i *) (pi2_out + out_strd4), pred_8x16b_4);
1296 _mm_storeu_si128((__m128i *) (pi2_out + out_strd4 + out_strd), pred_8x16b_5);
1297 _mm_storeu_si128((__m128i *) (pi2_out + out_strd4 + out_strd2), pred_8x16b_6);
1298 _mm_storeu_si128((__m128i *) (pi2_out + out_strd4 + out_strd2 + out_strd), pred_8x16b_7);
1299
1300 i4_nnz_b0 = (!(row_01_b0 && row_23_b0));
1301 i4_nnz_b1 = (!(row_01_b1 && row_23_b1)) << 1;
1302 i4_nnz_b2 = (!(row_45_b2 && row_67_b2)) << 4;
1303 i4_nnz_b3 = (!(row_45_b3 && row_67_b3)) << 5;
1304
1305 i4_nnz = (i4_nnz_b0 | i4_nnz_b1 | i4_nnz_b2 | i4_nnz_b3);
1306 return i4_nnz;
1307 }
1308
1309 /*****************************************************************************/
1310 /* */
1311 /* Function Name : isvcd_iquant_itrans_residual_chroma_4x4_sse42 */
1312 /* */
1313 /* Description : this function computes the resd output from the */
1314 /* IQ+IT */
1315 /* */
1316 /* Inputs : */
1317 /* Globals : none */
1318 /* Processing : */
1319 /* */
1320 /* Outputs : i4_nnz */
1321 /* Returns : none */
1322 /* */
1323 /* Issues : none */
1324 /* */
1325 /* Revision History: */
1326 /* */
1327 /* DD MM YYYY Author(s) Changes (Describe the changes made) */
1328 /* 25 11 2021 Kishore creation */
1329 /* */
1330 /*****************************************************************************/
1331
isvcd_iquant_itrans_residual_chroma_4x4_sse42(WORD16 * pi2_src,WORD16 * pi2_pred,WORD16 * pi2_out,WORD32 pred_strd,WORD32 out_strd,const UWORD16 * pu2_iscal_mat,const UWORD16 * pu2_weigh_mat,UWORD32 u4_qp_div_6,WORD16 * pi2_tmp,WORD16 * pi2_dc_src)1332 WORD32 isvcd_iquant_itrans_residual_chroma_4x4_sse42(WORD16 *pi2_src, WORD16 *pi2_pred,
1333 WORD16 *pi2_out, WORD32 pred_strd,
1334 WORD32 out_strd, const UWORD16 *pu2_iscal_mat,
1335 const UWORD16 *pu2_weigh_mat,
1336 UWORD32 u4_qp_div_6, WORD16 *pi2_tmp,
1337 WORD16 *pi2_dc_src)
1338 {
1339 WORD32 i4_nnz = 0;
1340 WORD32 row_0, row_1, row_2, row_3;
1341 __m128i src_r0_r1, src_r2_r3;
1342 __m128i src_r0, src_r1, src_r2, src_r3;
1343 __m128i scalemat_r0_r1, scalemat_r2_r3;
1344 __m128i pred_r0, pred_r1, pred_r2, pred_r3;
1345 __m128i dequant_r0_r1, dequant_r2_r3;
1346 __m128i zero_8x16b = _mm_setzero_si128(); // all bits reset to zero
1347 __m128i temp0, temp1, temp2, temp3, temp4, temp5, temp6, temp7;
1348 __m128i resq_r0, resq_r1, resq_r2, resq_r3;
1349 __m128i add_rshift = _mm_set1_epi32((u4_qp_div_6 < 4) ? (1 << (3 - u4_qp_div_6)) : 0);
1350 __m128i value_32 = _mm_set1_epi32(32);
1351
1352 __m128i out_8x16b_0, out_8x16b_1, out_8x16b_2, out_8x16b_3;
1353 __m128i chroma_mask = _mm_set1_epi32(0xFFFF0000);
1354 __m128i chroma_mask2 = _mm_set1_epi32(0x0000FFFF);
1355 __m128i dupmax_8x16b = _mm_set1_epi16(RSD_MAX);
1356 __m128i dupmin_8x16b = _mm_set1_epi16(RSD_MIN);
1357
1358 UNUSED(pi2_tmp);
1359
1360 /*************************************************************/
1361 /* Dequantization of coefficients. Will be replaced by SIMD */
1362 /* operations on platform */
1363 /*************************************************************/
1364 // a00 a01 a02 a03 a10 a11 a12 a13 -- the source matrix 0th,1st row
1365 src_r0_r1 = _mm_loadu_si128((__m128i *) (pi2_src));
1366 // a20 a21 a22 a23 a30 a31 a32 a33 -- the source matrix 2nd,3rd row
1367 src_r2_r3 = _mm_loadu_si128((__m128i *) (pi2_src + 8));
1368 // b00 b01 b02 b03 b10 b11 b12 b13 -- the scaling matrix 0th,1st row
1369 scalemat_r0_r1 = _mm_loadu_si128((__m128i *) (pu2_iscal_mat));
1370 // b20 b21 b22 b23 b30 b31 b32 b33 -- the scaling matrix 2nd,3rd row
1371 scalemat_r2_r3 = _mm_loadu_si128((__m128i *) (pu2_iscal_mat + 8));
1372 // q00 q01 q02 q03 q10 q11 q12 q13 -- all 16 bits
1373 dequant_r0_r1 = _mm_loadu_si128((__m128i *) (pu2_weigh_mat));
1374 // q20 q21 q22 q23 q30 q31 q32 q33 -- all 16 bits
1375 dequant_r2_r3 = _mm_loadu_si128((__m128i *) (pu2_weigh_mat + 8));
1376
1377 // b00*q00 b01*q01 b02*q02 b03*q03 b10*q10 b11*q11 b12*q12 b13*q13 -- 16 bit result
1378 temp0 = _mm_mullo_epi16(scalemat_r0_r1, dequant_r0_r1);
1379 // b00*q00 b01*q01 b02*q02 b03*q03 b10*q10 b11*q11 b12*q12 b13*q13 -- 16 bit result
1380 temp1 = _mm_mullo_epi16(scalemat_r2_r3, dequant_r2_r3);
1381
1382 // b00*q00 0 b01*q01 0 b02*q02 0 b03*q03 0 -- 16 bit long
1383 temp4 = _mm_unpacklo_epi16(temp0, zero_8x16b);
1384 // b10*q10 0 b11*q11 0 b12*q12 0 b13*q13 0 -- 16 bit long
1385 temp5 = _mm_unpackhi_epi16(temp0, zero_8x16b);
1386 // b00*q00 0 b01*q01 0 b02*q02 0 b03*q03 0 -- 16 bit long
1387 temp6 = _mm_unpacklo_epi16(temp1, zero_8x16b);
1388 // b10*q10 0 b11*q11 0 b12*q12 0 b13*q13 0 -- 16 bit long
1389 temp7 = _mm_unpackhi_epi16(temp1, zero_8x16b);
1390
1391 src_r0 = _mm_unpacklo_epi16(src_r0_r1, zero_8x16b); // a00 0 a01 0 a02 0 a03 0 -- 16 bit long
1392 src_r1 = _mm_unpackhi_epi16(src_r0_r1, zero_8x16b); // a10 0 a11 0 a12 0 a13 0 -- 16 bit long
1393 src_r2 = _mm_unpacklo_epi16(src_r2_r3, zero_8x16b); // a20 0 a21 0 a22 0 a23 0 -- 16 bit long
1394 src_r3 = _mm_unpackhi_epi16(src_r2_r3, zero_8x16b); // a30 0 a31 0 a32 0 a33 0 -- 16 bit long
1395
1396 // a00*b00*q00 a10*b10*q10 a20*b20*q20 a30*b30 q30 -- 32 bits long
1397 temp4 = _mm_madd_epi16(src_r0, temp4);
1398 temp5 = _mm_madd_epi16(src_r1, temp5);
1399 temp6 = _mm_madd_epi16(src_r2, temp6);
1400 temp7 = _mm_madd_epi16(src_r3, temp7);
1401
1402 if(u4_qp_div_6 >= 4)
1403 {
1404 resq_r0 = _mm_slli_epi32(temp4, u4_qp_div_6 - 4);
1405 resq_r1 = _mm_slli_epi32(temp5, u4_qp_div_6 - 4);
1406 resq_r2 = _mm_slli_epi32(temp6, u4_qp_div_6 - 4);
1407 resq_r3 = _mm_slli_epi32(temp7, u4_qp_div_6 - 4);
1408 }
1409 else
1410 {
1411 temp4 = _mm_add_epi32(temp4, add_rshift);
1412 temp5 = _mm_add_epi32(temp5, add_rshift);
1413 temp6 = _mm_add_epi32(temp6, add_rshift);
1414 temp7 = _mm_add_epi32(temp7, add_rshift);
1415 resq_r0 = _mm_srai_epi32(temp4, 4 - u4_qp_div_6);
1416 resq_r1 = _mm_srai_epi32(temp5, 4 - u4_qp_div_6);
1417 resq_r2 = _mm_srai_epi32(temp6, 4 - u4_qp_div_6);
1418 resq_r3 = _mm_srai_epi32(temp7, 4 - u4_qp_div_6);
1419 }
1420
1421 resq_r0 = _mm_insert_epi32(resq_r0, (WORD32) pi2_dc_src[0], 0);
1422 /* Perform Inverse transform */
1423 /*-------------------------------------------------------------*/
1424 /* IDCT [ Horizontal transformation ] */
1425 /*-------------------------------------------------------------*/
1426 // Matrix transpose
1427 /*
1428 * a0 a1 a2 a3
1429 * b0 b1 b2 b3
1430 * c0 c1 c2 c3
1431 * d0 d1 d2 d3
1432 */
1433 temp1 = _mm_unpacklo_epi32(resq_r0, resq_r1); // a0 b0 a1 b1
1434 temp3 = _mm_unpacklo_epi32(resq_r2, resq_r3); // c0 d0 c1 d1
1435 temp2 = _mm_unpackhi_epi32(resq_r0, resq_r1); // a2 b2 a3 b3
1436 temp4 = _mm_unpackhi_epi32(resq_r2, resq_r3); // c2 d2 c3 d3
1437 resq_r0 = _mm_unpacklo_epi64(temp1, temp3); // a0 b0 c0 d0
1438 resq_r1 = _mm_unpackhi_epi64(temp1, temp3); // a1 b1 c1 d1
1439 resq_r2 = _mm_unpacklo_epi64(temp2, temp4); // a2 b2 c2 d2
1440 resq_r3 = _mm_unpackhi_epi64(temp2, temp4); // a3 b3 c3 d3
1441 // Transform starts -- horizontal transform
1442 /*------------------------------------------------------------------*/
1443 /* z0 = w0 + w2 */
1444 temp0 = _mm_add_epi32(resq_r0, resq_r2);
1445 /* z1 = w0 - w2 */
1446 temp1 = _mm_sub_epi32(resq_r0, resq_r2);
1447 /* z2 = (w1 >> 1) - w3 */
1448 temp2 = _mm_srai_epi32(resq_r1, 1); //(w1>>1)
1449 temp2 = _mm_sub_epi32(temp2, resq_r3); //(w1>>1) - w3
1450 /* z3 = w1 + (w3 >> 1) */
1451 temp3 = _mm_srai_epi32(resq_r3, 1); //(w3>>1) + w1
1452 temp3 = _mm_add_epi32(temp3, resq_r1);
1453 /*----------------------------------------------------------*/
1454 /* x0 = z0 + z3 */
1455 resq_r0 = _mm_add_epi32(temp0, temp3);
1456 /* x1 = z1 + z2 */
1457 resq_r1 = _mm_add_epi32(temp1, temp2);
1458 /* x2 = z1 - z2 */
1459 resq_r2 = _mm_sub_epi32(temp1, temp2);
1460 /* x3 = z0 - z3 */
1461 resq_r3 = _mm_sub_epi32(temp0, temp3);
1462 // Matrix transpose
1463 /*
1464 * a0 b0 c0 d0
1465 * a1 b1 c1 d1
1466 * a2 b2 c2 d2
1467 * a3 b3 c3 d3
1468 */
1469 temp1 = _mm_unpacklo_epi32(resq_r0, resq_r1); // a0 a1 b0 b1
1470 temp3 = _mm_unpacklo_epi32(resq_r2, resq_r3); // a2 a3 b2 b3
1471 temp2 = _mm_unpackhi_epi32(resq_r0, resq_r1); // c0 c1 d0 d1
1472 temp4 = _mm_unpackhi_epi32(resq_r2, resq_r3); // c2 c3 d2 d3
1473 resq_r0 = _mm_unpacklo_epi64(temp1, temp3); // a0 a1 a2 a3
1474 resq_r1 = _mm_unpackhi_epi64(temp1, temp3); // b0 b1 b2 b3
1475 resq_r2 = _mm_unpacklo_epi64(temp2, temp4); // c0 c1 c2 c3
1476 resq_r3 = _mm_unpackhi_epi64(temp2, temp4); // d0 d1 d2 d3
1477 // Transform ends -- horizontal transform
1478
1479 // Load pred buffer
1480 // p00 p01 p02 p03 0 0 0 0 0 0 0 0 -- all 8 bits
1481 pred_r0 = _mm_loadu_si128((__m128i *) (&pi2_pred[0]));
1482 // p10 p11 p12 p13 0 0 0 0 0 0 0 0 -- all 8 bits
1483 pred_r1 = _mm_loadu_si128((__m128i *) (&pi2_pred[pred_strd]));
1484 // p20 p21 p22 p23 0 0 0 0 0 0 0 0 -- all 8 bits
1485 pred_r2 = _mm_loadu_si128((__m128i *) (&pi2_pred[2 * pred_strd]));
1486 // p30 p31 p32 p33 0 0 0 0 0 0 0 0 -- all 8 bits
1487 pred_r3 = _mm_loadu_si128((__m128i *) (&pi2_pred[3 * pred_strd]));
1488
1489 /*--------------------------------------------------------------*/
1490 /* IDCT [ Vertical transformation] and Xij = (xij + 32)>>6 */
1491 /* */
1492 /* Add the prediction and store it back to same buffer */
1493 /*--------------------------------------------------------------*/
1494 /* z0j = y0j + y2j */
1495 temp0 = _mm_add_epi32(resq_r0, resq_r2);
1496 /* z1j = y0j - y2j */
1497 temp1 = _mm_sub_epi32(resq_r0, resq_r2);
1498 /* z2j = (y1j>>1) - y3j */
1499 temp2 = _mm_srai_epi32(resq_r1, 1); //(y1j>>1)
1500 temp2 = _mm_sub_epi32(temp2, resq_r3);
1501 /* z3j = y1j + (y3j>>1) */
1502 temp3 = _mm_srai_epi32(resq_r3, 1); //(y3j>>1)
1503 temp3 = _mm_add_epi32(temp3, resq_r1);
1504
1505 /* x0j = z0j + z3j */
1506 temp4 = _mm_add_epi32(temp0, temp3);
1507 temp4 = _mm_add_epi32(temp4, value_32);
1508 temp4 = _mm_srai_epi32(temp4, 6);
1509 temp4 = _mm_add_epi16(temp4, pred_r0);
1510 temp4 = _mm_min_epi16(dupmax_8x16b, temp4);
1511 temp4 = _mm_max_epi16(dupmin_8x16b, temp4);
1512
1513 temp4 = _mm_and_si128(temp4, chroma_mask2);
1514 row_0 = _mm_test_all_ones(_mm_cmpeq_epi16(temp4, zero_8x16b)); // return 1 if all zeros, else 0
1515
1516 /* x1j = z1j + z2j */
1517 temp5 = _mm_add_epi32(temp1, temp2);
1518 temp5 = _mm_add_epi32(temp5, value_32);
1519 temp5 = _mm_srai_epi32(temp5, 6);
1520 temp5 = _mm_add_epi16(temp5, pred_r1);
1521 temp5 = _mm_min_epi16(dupmax_8x16b, temp5);
1522 temp5 = _mm_max_epi16(dupmin_8x16b, temp5);
1523 temp5 = _mm_and_si128(temp5, chroma_mask2);
1524 row_1 = _mm_test_all_ones(_mm_cmpeq_epi16(temp5, zero_8x16b)); // return 1 if all zeros, else 0
1525
1526 /* x2j = z1j - z2j */
1527 temp6 = _mm_sub_epi32(temp1, temp2);
1528 temp6 = _mm_add_epi32(temp6, value_32);
1529 temp6 = _mm_srai_epi32(temp6, 6);
1530 temp6 = _mm_add_epi16(temp6, pred_r2);
1531 temp6 = _mm_min_epi16(dupmax_8x16b, temp6);
1532 temp6 = _mm_max_epi16(dupmin_8x16b, temp6);
1533 temp6 = _mm_and_si128(temp6, chroma_mask2);
1534 row_2 = _mm_test_all_ones(_mm_cmpeq_epi16(temp6, zero_8x16b)); // return 1 if all zeros, else 0
1535
1536 /* x3j = z0j - z3j */
1537 temp7 = _mm_sub_epi32(temp0, temp3);
1538 temp7 = _mm_add_epi32(temp7, value_32);
1539 temp7 = _mm_srai_epi32(temp7, 6);
1540 temp7 = _mm_add_epi16(temp7, pred_r3);
1541 temp7 = _mm_min_epi16(dupmax_8x16b, temp7);
1542 temp7 = _mm_max_epi16(dupmin_8x16b, temp7);
1543 temp7 = _mm_and_si128(temp7, chroma_mask2);
1544 row_3 = _mm_test_all_ones(_mm_cmpeq_epi32(temp7, zero_8x16b)); // return 1 if all zeros, else 0
1545
1546 out_8x16b_0 = _mm_loadu_si128((__m128i *) (&pi2_out[0]));
1547 out_8x16b_1 = _mm_loadu_si128((__m128i *) (&pi2_out[out_strd]));
1548 out_8x16b_2 = _mm_loadu_si128((__m128i *) (&pi2_out[(out_strd << 1)]));
1549 out_8x16b_3 = _mm_loadu_si128((__m128i *) (&pi2_out[(out_strd << 1) + out_strd]));
1550
1551 out_8x16b_0 = _mm_and_si128(out_8x16b_0, chroma_mask);
1552 out_8x16b_1 = _mm_and_si128(out_8x16b_1, chroma_mask);
1553 out_8x16b_2 = _mm_and_si128(out_8x16b_2, chroma_mask);
1554 out_8x16b_3 = _mm_and_si128(out_8x16b_3, chroma_mask);
1555
1556 out_8x16b_0 = _mm_add_epi16(temp4, out_8x16b_0);
1557 out_8x16b_1 = _mm_add_epi16(temp5, out_8x16b_1);
1558 out_8x16b_2 = _mm_add_epi16(temp6, out_8x16b_2);
1559 out_8x16b_3 = _mm_add_epi16(temp7, out_8x16b_3);
1560
1561 _mm_storeu_si128((__m128i *) (pi2_out), out_8x16b_0);
1562 _mm_storeu_si128((__m128i *) (pi2_out + out_strd), out_8x16b_1);
1563 _mm_storeu_si128((__m128i *) (pi2_out + (out_strd << 1)), out_8x16b_2);
1564 _mm_storeu_si128((__m128i *) (pi2_out + (out_strd * 3)), out_8x16b_3);
1565
1566 i4_nnz = !(row_0 && row_1 && row_2 && row_3);
1567 return i4_nnz;
1568 }
1569
1570 /*****************************************************************************/
1571 /* */
1572 /* Function Name : isvcd_iquant_itrans_residual_chroma_4x4_dc_sse42 */
1573 /* */
1574 /* Description : this function computes the resd output from the */
1575 /* IQ+IT */
1576 /* */
1577 /* Inputs : */
1578 /* Globals : none */
1579 /* Processing : */
1580 /* */
1581 /* Outputs : i4_nnz */
1582 /* Returns : none */
1583 /* */
1584 /* Issues : none */
1585 /* */
1586 /* Revision History: */
1587 /* */
1588 /* DD MM YYYY Author(s) Changes (Describe the changes made) */
1589 /* 25 11 2021 Kishore creation */
1590 /* */
1591 /*****************************************************************************/
1592
isvcd_iquant_itrans_residual_chroma_4x4_dc_sse42(WORD16 * pi2_src,WORD16 * pi2_pred,WORD16 * pi2_out,WORD32 pred_strd,WORD32 out_strd,const UWORD16 * pu2_iscal_mat,const UWORD16 * pu2_weigh_mat,UWORD32 u4_qp_div_6,WORD16 * pi2_tmp,WORD16 * pi2_dc_src)1593 WORD32 isvcd_iquant_itrans_residual_chroma_4x4_dc_sse42(
1594 WORD16 *pi2_src, WORD16 *pi2_pred, WORD16 *pi2_out, WORD32 pred_strd, WORD32 out_strd,
1595 const UWORD16 *pu2_iscal_mat, const UWORD16 *pu2_weigh_mat, UWORD32 u4_qp_div_6,
1596 WORD16 *pi2_tmp, WORD16 *pi2_dc_src)
1597 {
1598 __m128i pred_8x16b_0, out_8x16b_0;
1599 __m128i pred_8x16b_1, out_8x16b_1;
1600 __m128i pred_8x16b_2, out_8x16b_2;
1601 __m128i pred_8x16b_3, out_8x16b_3;
1602
1603 __m128i i_macro_8x16b, chroma_mask, chroma_mask2;
1604 __m128i zero_8x16b = _mm_setzero_si128();
1605 __m128i dupmax_8x16b = _mm_set1_epi16(RSD_MAX);
1606 __m128i dupmin_8x16b = _mm_set1_epi16(RSD_MIN);
1607
1608 WORD32 i4_nnz, row_0, row_1, row_2, row_3;
1609 WORD32 q0;
1610 WORD16 i_macro;
1611
1612 UNUSED(pi2_src);
1613 UNUSED(pu2_iscal_mat);
1614 UNUSED(pu2_weigh_mat);
1615 UNUSED(pi2_tmp);
1616 UNUSED(u4_qp_div_6);
1617
1618 q0 = pi2_dc_src[0]; // Restoring dc value for intra case3
1619 i_macro = ((q0 + 32) >> 6);
1620
1621 i_macro_8x16b = _mm_set1_epi16(i_macro);
1622
1623 pred_8x16b_0 = _mm_loadu_si128((__m128i *) (pi2_pred));
1624 pred_8x16b_1 = _mm_loadu_si128((__m128i *) (pi2_pred + pred_strd));
1625 pred_8x16b_2 = _mm_loadu_si128((__m128i *) (pi2_pred + (pred_strd << 1)));
1626 pred_8x16b_3 = _mm_loadu_si128((__m128i *) (pi2_pred + (pred_strd << 1) + pred_strd));
1627
1628 pred_8x16b_0 = _mm_add_epi16(i_macro_8x16b, pred_8x16b_0);
1629 pred_8x16b_1 = _mm_add_epi16(i_macro_8x16b, pred_8x16b_1);
1630 pred_8x16b_2 = _mm_add_epi16(i_macro_8x16b, pred_8x16b_2);
1631 pred_8x16b_3 = _mm_add_epi16(i_macro_8x16b, pred_8x16b_3);
1632
1633 pred_8x16b_0 = _mm_min_epi16(dupmax_8x16b, pred_8x16b_0);
1634 pred_8x16b_0 = _mm_max_epi16(dupmin_8x16b, pred_8x16b_0);
1635 pred_8x16b_1 = _mm_min_epi16(dupmax_8x16b, pred_8x16b_1);
1636 pred_8x16b_1 = _mm_max_epi16(dupmin_8x16b, pred_8x16b_1);
1637 pred_8x16b_2 = _mm_min_epi16(dupmax_8x16b, pred_8x16b_2);
1638 pred_8x16b_2 = _mm_max_epi16(dupmin_8x16b, pred_8x16b_2);
1639 pred_8x16b_3 = _mm_min_epi16(dupmax_8x16b, pred_8x16b_3);
1640 pred_8x16b_3 = _mm_max_epi16(dupmin_8x16b, pred_8x16b_3);
1641
1642 chroma_mask = _mm_set1_epi32(0xFFFF0000);
1643 chroma_mask2 = _mm_set1_epi32(0x0000FFFF);
1644 out_8x16b_0 = _mm_loadu_si128((__m128i *) (&pi2_out[0]));
1645 out_8x16b_1 = _mm_loadu_si128((__m128i *) (&pi2_out[out_strd]));
1646 out_8x16b_2 = _mm_loadu_si128((__m128i *) (&pi2_out[(out_strd << 1)]));
1647 out_8x16b_3 = _mm_loadu_si128((__m128i *) (&pi2_out[(out_strd << 1) + out_strd]));
1648
1649 out_8x16b_0 = _mm_and_si128(out_8x16b_0, chroma_mask);
1650 out_8x16b_1 = _mm_and_si128(out_8x16b_1, chroma_mask);
1651 out_8x16b_2 = _mm_and_si128(out_8x16b_2, chroma_mask);
1652 out_8x16b_3 = _mm_and_si128(out_8x16b_3, chroma_mask);
1653
1654 pred_8x16b_0 = _mm_and_si128(pred_8x16b_0, chroma_mask2);
1655 pred_8x16b_1 = _mm_and_si128(pred_8x16b_1, chroma_mask2);
1656 pred_8x16b_2 = _mm_and_si128(pred_8x16b_2, chroma_mask2);
1657 pred_8x16b_3 = _mm_and_si128(pred_8x16b_3, chroma_mask2);
1658
1659 // return 1 if all zeros, else 0
1660 row_0 = _mm_test_all_ones(_mm_cmpeq_epi16(pred_8x16b_0, zero_8x16b));
1661 // return 1 if all zeros, else 0
1662 row_1 = _mm_test_all_ones(_mm_cmpeq_epi16(pred_8x16b_1, zero_8x16b));
1663 row_2 = _mm_test_all_ones(_mm_cmpeq_epi16(pred_8x16b_2, zero_8x16b));
1664 row_3 = _mm_test_all_ones(_mm_cmpeq_epi16(pred_8x16b_3, zero_8x16b));
1665
1666 out_8x16b_0 = _mm_add_epi16(pred_8x16b_0, out_8x16b_0);
1667 out_8x16b_1 = _mm_add_epi16(pred_8x16b_1, out_8x16b_1);
1668 out_8x16b_2 = _mm_add_epi16(pred_8x16b_2, out_8x16b_2);
1669 out_8x16b_3 = _mm_add_epi16(pred_8x16b_3, out_8x16b_3);
1670
1671 _mm_storeu_si128((__m128i *) (pi2_out), out_8x16b_0);
1672 _mm_storeu_si128((__m128i *) (pi2_out + out_strd), out_8x16b_1);
1673 _mm_storeu_si128((__m128i *) (pi2_out + (out_strd << 1)), out_8x16b_2);
1674 _mm_storeu_si128((__m128i *) (pi2_out + (out_strd * 3)), out_8x16b_3);
1675
1676 i4_nnz = !(row_0 && row_1 && row_2 && row_3);
1677 return i4_nnz;
1678 }
1679