1 /*
2 * Copyright (c) 2022 The WebM project authors. All Rights Reserved.
3 *
4 * Use of this source code is governed by a BSD-style license
5 * that can be found in the LICENSE file in the root of the source
6 * tree. An additional intellectual property rights grant can be found
7 * in the file PATENTS. All contributing project authors may
8 * be found in the AUTHORS file in the root of the source tree.
9 */
10
11 #include "./vpx_dsp_rtcd.h"
12 #include "vpx_dsp/loongarch/fwd_txfm_lsx.h"
13 #include "vpx_dsp/fwd_txfm.h"
14
15 #define UNPCK_SH_SW(in, out0, out1) \
16 do { \
17 out0 = __lsx_vsllwil_w_h(in, 0); \
18 out1 = __lsx_vexth_w_h(in); \
19 } while (0)
20
fdct8x32_1d_column_load_butterfly(const int16_t * input,int32_t src_stride,int16_t * temp_buff)21 static void fdct8x32_1d_column_load_butterfly(const int16_t *input,
22 int32_t src_stride,
23 int16_t *temp_buff) {
24 __m128i in0, in1, in2, in3, in4, in5, in6, in7;
25 __m128i step0, step1, step2, step3;
26 __m128i in0_1, in1_1, in2_1, in3_1, in4_1, in5_1, in6_1, in7_1;
27 __m128i step0_1, step1_1, step2_1, step3_1;
28
29 int32_t stride = src_stride << 1;
30 int32_t stride2 = stride << 1;
31 int32_t stride3 = stride2 + stride;
32 const int16_t *input_tmp = (int16_t *)input;
33
34 in0 = __lsx_vld(input_tmp, 0);
35 DUP2_ARG2(__lsx_vldx, input_tmp, stride, input_tmp, stride2, in1, in2);
36 in3 = __lsx_vldx(input_tmp, stride3);
37
38 input_tmp += stride2;
39 in0_1 = __lsx_vld(input_tmp, 0);
40 DUP2_ARG2(__lsx_vldx, input_tmp, stride, input_tmp, stride2, in1_1, in2_1);
41 in3_1 = __lsx_vldx(input_tmp, stride3);
42
43 input_tmp = input + (src_stride * 24);
44 in4_1 = __lsx_vld(input_tmp, 0);
45 DUP2_ARG2(__lsx_vldx, input_tmp, stride, input_tmp, stride2, in5_1, in6_1);
46 in7_1 = __lsx_vldx(input_tmp, stride3);
47
48 input_tmp += stride2;
49 in4 = __lsx_vld(input_tmp, 0);
50 DUP2_ARG2(__lsx_vldx, input_tmp, stride, input_tmp, stride2, in5, in6);
51 in7 = __lsx_vldx(input_tmp, stride3);
52
53 DUP4_ARG2(__lsx_vslli_h, in0, 2, in1, 2, in2, 2, in3, 2, in0, in1, in2, in3);
54 DUP4_ARG2(__lsx_vslli_h, in4, 2, in5, 2, in6, 2, in7, 2, in4, in5, in6, in7);
55 DUP4_ARG2(__lsx_vslli_h, in0_1, 2, in1_1, 2, in2_1, 2, in3_1, 2, in0_1, in1_1,
56 in2_1, in3_1);
57 DUP4_ARG2(__lsx_vslli_h, in4_1, 2, in5_1, 2, in6_1, 2, in7_1, 2, in4_1, in5_1,
58 in6_1, in7_1);
59 LSX_BUTTERFLY_8_H(in0, in1, in2, in3, in4, in5, in6, in7, step0, step1, step2,
60 step3, in4, in5, in6, in7);
61 LSX_BUTTERFLY_8_H(in0_1, in1_1, in2_1, in3_1, in4_1, in5_1, in6_1, in7_1,
62 step0_1, step1_1, step2_1, step3_1, in4_1, in5_1, in6_1,
63 in7_1);
64
65 __lsx_vst(step0, temp_buff, 0);
66 __lsx_vst(step1, temp_buff, 16);
67 __lsx_vst(step2, temp_buff, 32);
68 __lsx_vst(step3, temp_buff, 48);
69
70 __lsx_vst(in4, temp_buff, 448);
71 __lsx_vst(in5, temp_buff, 464);
72 __lsx_vst(in6, temp_buff, 480);
73 __lsx_vst(in7, temp_buff, 496);
74
75 __lsx_vst(step0_1, temp_buff, 64);
76 __lsx_vst(step1_1, temp_buff, 80);
77 __lsx_vst(step2_1, temp_buff, 96);
78 __lsx_vst(step3_1, temp_buff, 112);
79
80 __lsx_vst(in4_1, temp_buff, 384);
81 __lsx_vst(in5_1, temp_buff, 400);
82 __lsx_vst(in6_1, temp_buff, 416);
83 __lsx_vst(in7_1, temp_buff, 432);
84
85 /* 3rd and 4th set */
86 input_tmp = input + (src_stride * 8);
87 in0 = __lsx_vld(input_tmp, 0);
88 DUP2_ARG2(__lsx_vldx, input_tmp, stride, input_tmp, stride2, in1, in2);
89 in3 = __lsx_vldx(input_tmp, stride3);
90
91 input_tmp += stride2;
92 in0_1 = __lsx_vld(input_tmp, 0);
93 DUP2_ARG2(__lsx_vldx, input_tmp, stride, input_tmp, stride2, in1_1, in2_1);
94 in3_1 = __lsx_vldx(input_tmp, stride3);
95
96 input_tmp += stride2;
97 in4_1 = __lsx_vld(input_tmp, 0);
98 DUP2_ARG2(__lsx_vldx, input_tmp, stride, input_tmp, stride2, in5_1, in6_1);
99 in7_1 = __lsx_vldx(input_tmp, stride3);
100
101 input_tmp += stride2;
102 in4 = __lsx_vld(input_tmp, 0);
103 DUP2_ARG2(__lsx_vldx, input_tmp, stride, input_tmp, stride2, in5, in6);
104 in7 = __lsx_vldx(input_tmp, stride3);
105 DUP4_ARG2(__lsx_vslli_h, in0, 2, in1, 2, in2, 2, in3, 2, in0, in1, in2, in3);
106 DUP4_ARG2(__lsx_vslli_h, in4, 2, in5, 2, in6, 2, in7, 2, in4, in5, in6, in7);
107 DUP4_ARG2(__lsx_vslli_h, in0_1, 2, in1_1, 2, in2_1, 2, in3_1, 2, in0_1, in1_1,
108 in2_1, in3_1);
109 DUP4_ARG2(__lsx_vslli_h, in4_1, 2, in5_1, 2, in6_1, 2, in7_1, 2, in4_1, in5_1,
110 in6_1, in7_1);
111
112 LSX_BUTTERFLY_8_H(in0, in1, in2, in3, in4, in5, in6, in7, step0, step1, step2,
113 step3, in4, in5, in6, in7);
114 LSX_BUTTERFLY_8_H(in0_1, in1_1, in2_1, in3_1, in4_1, in5_1, in6_1, in7_1,
115 step0_1, step1_1, step2_1, step3_1, in4_1, in5_1, in6_1,
116 in7_1);
117
118 __lsx_vst(step0, temp_buff, 128);
119 __lsx_vst(step1, temp_buff, 144);
120 __lsx_vst(step2, temp_buff, 160);
121 __lsx_vst(step3, temp_buff, 176);
122
123 __lsx_vst(in4, temp_buff, 320);
124 __lsx_vst(in5, temp_buff, 336);
125 __lsx_vst(in6, temp_buff, 352);
126 __lsx_vst(in7, temp_buff, 368);
127
128 __lsx_vst(step0_1, temp_buff, 192);
129 __lsx_vst(step1_1, temp_buff, 208);
130 __lsx_vst(step2_1, temp_buff, 224);
131 __lsx_vst(step3_1, temp_buff, 240);
132
133 __lsx_vst(in4_1, temp_buff, 256);
134 __lsx_vst(in5_1, temp_buff, 272);
135 __lsx_vst(in6_1, temp_buff, 288);
136 __lsx_vst(in7_1, temp_buff, 304);
137 }
138
fdct8x32_1d_column_even_store(int16_t * input,int16_t * temp)139 static void fdct8x32_1d_column_even_store(int16_t *input, int16_t *temp) {
140 __m128i in0, in1, in2, in3, in4, in5, in6, in7;
141 __m128i in8, in9, in10, in11, in12, in13, in14, in15;
142 __m128i vec0, vec1, vec2, vec3, vec4, vec5, vec6, vec7;
143 __m128i temp0, temp1;
144
145 /* fdct even */
146 DUP4_ARG2(__lsx_vld, input, 0, input, 16, input, 32, input, 48, in0, in1, in2,
147 in3);
148 DUP4_ARG2(__lsx_vld, input, 192, input, 208, input, 224, input, 240, in12,
149 in13, in14, in15);
150 LSX_BUTTERFLY_8_H(in0, in1, in2, in3, in12, in13, in14, in15, vec0, vec1,
151 vec2, vec3, in12, in13, in14, in15);
152 DUP4_ARG2(__lsx_vld, input, 64, input, 80, input, 96, input, 112, in4, in5,
153 in6, in7);
154 DUP4_ARG2(__lsx_vld, input, 128, input, 144, input, 160, input, 176, in8, in9,
155 in10, in11);
156 LSX_BUTTERFLY_8_H(in4, in5, in6, in7, in8, in9, in10, in11, vec4, vec5, vec6,
157 vec7, in8, in9, in10, in11);
158
159 /* Stage 3 */
160 DUP4_ARG2(__lsx_vadd_h, vec0, vec7, vec1, vec6, vec2, vec5, vec3, vec4, in0,
161 in1, in2, in3);
162 LSX_BUTTERFLY_4_H(in0, in1, in2, in3, temp0, in4, in1, in0);
163 DOTP_CONST_PAIR(temp0, in4, cospi_16_64, cospi_16_64, temp1, temp0);
164 FDCT32_POSTPROC_2V_POS_H(temp0, temp1);
165 __lsx_vst(temp0, temp, 0);
166 __lsx_vst(temp1, temp, 1024);
167
168 DOTP_CONST_PAIR(in0, in1, cospi_24_64, cospi_8_64, temp1, temp0);
169 FDCT32_POSTPROC_2V_POS_H(temp0, temp1);
170 __lsx_vst(temp0, temp, 512);
171 __lsx_vst(temp1, temp, 1536);
172
173 DUP4_ARG2(__lsx_vsub_h, vec0, vec7, vec1, vec6, vec2, vec5, vec3, vec4, vec7,
174 vec6, vec5, vec4);
175 DOTP_CONST_PAIR(vec6, vec5, cospi_16_64, cospi_16_64, vec5, vec6);
176 DUP2_ARG2(__lsx_vadd_h, vec4, vec5, vec7, vec6, vec0, vec1);
177 DOTP_CONST_PAIR(vec1, vec0, cospi_28_64, cospi_4_64, temp1, temp0);
178 FDCT32_POSTPROC_2V_POS_H(temp0, temp1);
179 __lsx_vst(temp0, temp, 256);
180 __lsx_vst(temp1, temp, 1792);
181
182 DUP2_ARG2(__lsx_vsub_h, vec4, vec5, vec7, vec6, vec4, vec7);
183 DOTP_CONST_PAIR(vec7, vec4, cospi_12_64, cospi_20_64, temp1, temp0);
184 FDCT32_POSTPROC_2V_POS_H(temp0, temp1);
185 __lsx_vst(temp0, temp, 1280);
186 __lsx_vst(temp1, temp, 768);
187
188 DOTP_CONST_PAIR(in13, in10, cospi_16_64, cospi_16_64, vec2, vec5);
189 DOTP_CONST_PAIR(in12, in11, cospi_16_64, cospi_16_64, vec3, vec4);
190 DUP4_ARG2(__lsx_vadd_h, in8, vec3, in9, vec2, in14, vec5, in15, vec4, in0,
191 vec1, vec6, in2);
192 DOTP_CONST_PAIR(vec6, vec1, cospi_24_64, cospi_8_64, in1, in3);
193 DUP2_ARG2(__lsx_vadd_h, in0, in1, in2, in3, vec0, vec7);
194 DOTP_CONST_PAIR(vec7, vec0, cospi_30_64, cospi_2_64, temp1, temp0);
195 FDCT32_POSTPROC_2V_POS_H(temp0, temp1);
196 __lsx_vst(temp0, temp, 128);
197 __lsx_vst(temp1, temp, 1920);
198
199 DUP2_ARG2(__lsx_vsub_h, in0, in1, in2, in3, in0, in2);
200 DOTP_CONST_PAIR(in2, in0, cospi_14_64, cospi_18_64, temp1, temp0);
201 FDCT32_POSTPROC_2V_POS_H(temp0, temp1);
202 __lsx_vst(temp0, temp, 1152);
203 __lsx_vst(temp1, temp, 896);
204
205 DUP2_ARG2(__lsx_vsub_h, in9, vec2, in14, vec5, vec2, vec5);
206 temp0 = __lsx_vneg_h(vec2);
207 DOTP_CONST_PAIR(temp0, vec5, cospi_24_64, cospi_8_64, in2, in1);
208 DUP4_ARG2(__lsx_vsub_h, in8, vec3, in15, vec4, in3, in2, in0, in1, in3, in0,
209 vec2, vec5);
210 DOTP_CONST_PAIR(vec5, vec2, cospi_22_64, cospi_10_64, temp1, temp0);
211 FDCT32_POSTPROC_2V_POS_H(temp0, temp1);
212 __lsx_vst(temp0, temp, 640);
213 __lsx_vst(temp1, temp, 1408);
214
215 DUP2_ARG2(__lsx_vadd_h, in3, in2, in0, in1, vec3, vec4);
216 DOTP_CONST_PAIR(vec4, vec3, cospi_6_64, cospi_26_64, temp0, temp1);
217 FDCT32_POSTPROC_2V_POS_H(temp0, temp1);
218 __lsx_vst(temp0, temp, 384);
219 __lsx_vst(temp1, temp, 1664);
220 }
221
fdct8x32_1d_column_odd_store(int16_t * input,int16_t * temp_ptr)222 static void fdct8x32_1d_column_odd_store(int16_t *input, int16_t *temp_ptr) {
223 __m128i in16, in17, in18, in19, in20, in21, in22, in23;
224 __m128i in24, in25, in26, in27, in28, in29, in30, in31, vec4, vec5;
225 __m128i tmp0, tmp1;
226
227 DUP4_ARG2(__lsx_vld, input, 64, input, 80, input, 160, input, 176, in20, in21,
228 in26, in27);
229
230 DOTP_CONST_PAIR(in27, in20, cospi_16_64, cospi_16_64, in20, in27);
231 DOTP_CONST_PAIR(in26, in21, cospi_16_64, cospi_16_64, in21, in26);
232
233 DUP4_ARG2(__lsx_vld, input, 32, input, 48, input, 192, input, 208, in18, in19,
234 in28, in29);
235
236 vec4 = __lsx_vsub_h(in19, in20);
237 __lsx_vst(vec4, input, 64);
238 vec4 = __lsx_vsub_h(in18, in21);
239 __lsx_vst(vec4, input, 80);
240 vec4 = __lsx_vsub_h(in29, in26);
241 __lsx_vst(vec4, input, 160);
242 vec4 = __lsx_vsub_h(in28, in27);
243 __lsx_vst(vec4, input, 176);
244
245 in21 = __lsx_vadd_h(in18, in21);
246 in20 = __lsx_vadd_h(in19, in20);
247 in27 = __lsx_vadd_h(in28, in27);
248 in26 = __lsx_vadd_h(in29, in26);
249
250 DUP4_ARG2(__lsx_vld, input, 96, input, 112, input, 128, input, 144, in22,
251 in23, in24, in25);
252 DOTP_CONST_PAIR(in25, in22, cospi_16_64, cospi_16_64, in22, in25);
253 DOTP_CONST_PAIR(in24, in23, cospi_16_64, cospi_16_64, in23, in24);
254
255 DUP4_ARG2(__lsx_vld, input, 0, input, 16, input, 224, input, 240, in16, in17,
256 in30, in31);
257
258 vec4 = __lsx_vsub_h(in17, in22);
259 __lsx_vst(vec4, input, 32);
260 vec4 = __lsx_vsub_h(in16, in23);
261 __lsx_vst(vec4, input, 48);
262 vec4 = __lsx_vsub_h(in31, in24);
263 __lsx_vst(vec4, input, 192);
264 vec4 = __lsx_vsub_h(in30, in25);
265 __lsx_vst(vec4, input, 208);
266
267 DUP4_ARG2(__lsx_vadd_h, in16, in23, in17, in22, in30, in25, in31, in24, in16,
268 in17, in30, in31);
269 DOTP_CONST_PAIR(in26, in21, cospi_24_64, cospi_8_64, in18, in29);
270 DOTP_CONST_PAIR(in27, in20, cospi_24_64, cospi_8_64, in19, in28);
271 DUP4_ARG2(__lsx_vadd_h, in16, in19, in17, in18, in30, in29, in31, in28, in27,
272 in22, in21, in25);
273 DOTP_CONST_PAIR(in21, in22, cospi_28_64, cospi_4_64, in26, in24);
274 DUP2_ARG2(__lsx_vadd_h, in27, in26, in25, in24, in23, in20);
275 DOTP_CONST_PAIR(in20, in23, cospi_31_64, cospi_1_64, vec4, vec5);
276 FDCT32_POSTPROC_2V_POS_H(vec5, vec4);
277 __lsx_vst(vec5, temp_ptr, 0);
278 __lsx_vst(vec4, temp_ptr, 1920);
279
280 DUP2_ARG2(__lsx_vsub_h, in27, in26, in25, in24, in22, in21);
281 DOTP_CONST_PAIR(in21, in22, cospi_15_64, cospi_17_64, vec5, vec4);
282 FDCT32_POSTPROC_2V_POS_H(vec5, vec4);
283 __lsx_vst(vec5, temp_ptr, 896);
284 __lsx_vst(vec4, temp_ptr, 1024);
285
286 DUP4_ARG2(__lsx_vsub_h, in17, in18, in16, in19, in31, in28, in30, in29, in23,
287 in26, in24, in20);
288 tmp0 = __lsx_vneg_h(in23);
289 DOTP_CONST_PAIR(tmp0, in20, cospi_28_64, cospi_4_64, in27, in25);
290 DUP2_ARG2(__lsx_vsub_h, in26, in27, in24, in25, in23, in20);
291 DOTP_CONST_PAIR(in20, in23, cospi_23_64, cospi_9_64, vec4, vec5);
292 FDCT32_POSTPROC_2V_POS_H(vec5, vec4);
293 __lsx_vst(vec4, temp_ptr, 1408);
294 __lsx_vst(vec5, temp_ptr, 512);
295
296 DUP2_ARG2(__lsx_vadd_h, in26, in27, in24, in25, in22, in21);
297 DOTP_CONST_PAIR(in21, in22, cospi_7_64, cospi_25_64, vec4, vec5);
298 FDCT32_POSTPROC_2V_POS_H(vec5, vec4);
299 __lsx_vst(vec4, temp_ptr, 384);
300 __lsx_vst(vec5, temp_ptr, 1536);
301
302 DUP4_ARG2(__lsx_vld, input, 32, input, 48, input, 64, input, 80, in22, in23,
303 in20, in21);
304 DUP4_ARG2(__lsx_vld, input, 160, input, 176, input, 192, input, 208, in26,
305 in27, in24, in25);
306 in16 = in20;
307 in17 = in21;
308 DUP2_ARG1(__lsx_vneg_h, in16, in17, tmp0, tmp1);
309 DOTP_CONST_PAIR(tmp0, in27, cospi_24_64, cospi_8_64, in20, in27);
310 DOTP_CONST_PAIR(tmp1, in26, cospi_24_64, cospi_8_64, in21, in26);
311 DUP4_ARG2(__lsx_vsub_h, in23, in20, in22, in21, in25, in26, in24, in27, in28,
312 in17, in18, in31);
313 DOTP_CONST_PAIR(in18, in17, cospi_12_64, cospi_20_64, in29, in30);
314 DUP2_ARG2(__lsx_vadd_h, in28, in29, in31, in30, in16, in19);
315 DOTP_CONST_PAIR(in19, in16, cospi_27_64, cospi_5_64, vec5, vec4);
316 FDCT32_POSTPROC_2V_POS_H(vec5, vec4);
317 __lsx_vst(vec5, temp_ptr, 1664);
318 __lsx_vst(vec4, temp_ptr, 256);
319
320 DUP2_ARG2(__lsx_vsub_h, in28, in29, in31, in30, in17, in18);
321 DOTP_CONST_PAIR(in18, in17, cospi_11_64, cospi_21_64, vec5, vec4);
322 FDCT32_POSTPROC_2V_POS_H(vec5, vec4);
323 __lsx_vst(vec5, temp_ptr, 640);
324 __lsx_vst(vec4, temp_ptr, 1280);
325
326 DUP4_ARG2(__lsx_vadd_h, in22, in21, in23, in20, in24, in27, in25, in26, in16,
327 in29, in30, in19);
328 tmp0 = __lsx_vneg_h(in16);
329 DOTP_CONST_PAIR(tmp0, in19, cospi_12_64, cospi_20_64, in28, in31);
330 DUP2_ARG2(__lsx_vsub_h, in29, in28, in30, in31, in16, in19);
331 DOTP_CONST_PAIR(in19, in16, cospi_19_64, cospi_13_64, vec5, vec4);
332 FDCT32_POSTPROC_2V_POS_H(vec5, vec4);
333 __lsx_vst(vec5, temp_ptr, 1152);
334 __lsx_vst(vec4, temp_ptr, 768);
335
336 DUP2_ARG2(__lsx_vadd_h, in29, in28, in30, in31, in17, in18);
337 DOTP_CONST_PAIR(in18, in17, cospi_3_64, cospi_29_64, vec5, vec4);
338 FDCT32_POSTPROC_2V_POS_H(vec5, vec4);
339 __lsx_vst(vec5, temp_ptr, 128);
340 __lsx_vst(vec4, temp_ptr, 1792);
341 }
342
fdct8x32_1d_column(const int16_t * input,int32_t src_stride,int16_t * tmp_buf,int16_t * tmp_buf_big)343 static void fdct8x32_1d_column(const int16_t *input, int32_t src_stride,
344 int16_t *tmp_buf, int16_t *tmp_buf_big) {
345 fdct8x32_1d_column_load_butterfly(input, src_stride, tmp_buf);
346 fdct8x32_1d_column_even_store(tmp_buf, tmp_buf_big);
347 fdct8x32_1d_column_odd_store(tmp_buf + 128, (tmp_buf_big + 32));
348 }
349
fdct8x32_1d_row_load_butterfly(int16_t * temp_buff,int16_t * output)350 static void fdct8x32_1d_row_load_butterfly(int16_t *temp_buff,
351 int16_t *output) {
352 __m128i in0, in1, in2, in3, in4, in5, in6, in7;
353 __m128i in8, in9, in10, in11, in12, in13, in14, in15;
354 __m128i step0, step1, step2, step3, step4, step5, step6, step7;
355
356 DUP4_ARG2(__lsx_vld, temp_buff, 0, temp_buff, 64, temp_buff, 128, temp_buff,
357 192, in0, in1, in2, in3);
358 DUP4_ARG2(__lsx_vld, temp_buff, 256, temp_buff, 320, temp_buff, 384,
359 temp_buff, 448, in4, in5, in6, in7);
360 DUP4_ARG2(__lsx_vld, temp_buff, 48, temp_buff, 112, temp_buff, 176, temp_buff,
361 240, in8, in9, in10, in11);
362 DUP4_ARG2(__lsx_vld, temp_buff, 304, temp_buff, 368, temp_buff, 432,
363 temp_buff, 496, in12, in13, in14, in15);
364 LSX_TRANSPOSE8x8_H(in0, in1, in2, in3, in4, in5, in6, in7, in0, in1, in2, in3,
365 in4, in5, in6, in7);
366 LSX_TRANSPOSE8x8_H(in8, in9, in10, in11, in12, in13, in14, in15, in8, in9,
367 in10, in11, in12, in13, in14, in15);
368 LSX_BUTTERFLY_16_H(in0, in1, in2, in3, in4, in5, in6, in7, in8, in9, in10,
369 in11, in12, in13, in14, in15, step0, step1, step2, step3,
370 step4, step5, step6, step7, in8, in9, in10, in11, in12,
371 in13, in14, in15);
372
373 __lsx_vst(step0, output, 0);
374 __lsx_vst(step1, output, 16);
375 __lsx_vst(step2, output, 32);
376 __lsx_vst(step3, output, 48);
377 __lsx_vst(step4, output, 64);
378 __lsx_vst(step5, output, 80);
379 __lsx_vst(step6, output, 96);
380 __lsx_vst(step7, output, 112);
381
382 __lsx_vst(in8, output, 384);
383 __lsx_vst(in9, output, 400);
384 __lsx_vst(in10, output, 416);
385 __lsx_vst(in11, output, 432);
386 __lsx_vst(in12, output, 448);
387 __lsx_vst(in13, output, 464);
388 __lsx_vst(in14, output, 480);
389 __lsx_vst(in15, output, 496);
390
391 /* 2nd set */
392 DUP4_ARG2(__lsx_vld, temp_buff, 16, temp_buff, 80, temp_buff, 144, temp_buff,
393 208, in0, in1, in2, in3);
394 DUP4_ARG2(__lsx_vld, temp_buff, 272, temp_buff, 336, temp_buff, 400,
395 temp_buff, 464, in4, in5, in6, in7);
396 DUP4_ARG2(__lsx_vld, temp_buff, 32, temp_buff, 96, temp_buff, 160, temp_buff,
397 224, in8, in9, in10, in11);
398 DUP4_ARG2(__lsx_vld, temp_buff, 288, temp_buff, 352, temp_buff, 416,
399 temp_buff, 480, in12, in13, in14, in15);
400 LSX_TRANSPOSE8x8_H(in0, in1, in2, in3, in4, in5, in6, in7, in0, in1, in2, in3,
401 in4, in5, in6, in7);
402 LSX_TRANSPOSE8x8_H(in8, in9, in10, in11, in12, in13, in14, in15, in8, in9,
403 in10, in11, in12, in13, in14, in15);
404 LSX_BUTTERFLY_16_H(in0, in1, in2, in3, in4, in5, in6, in7, in8, in9, in10,
405 in11, in12, in13, in14, in15, step0, step1, step2, step3,
406 step4, step5, step6, step7, in8, in9, in10, in11, in12,
407 in13, in14, in15);
408
409 __lsx_vst(step0, output, 128);
410 __lsx_vst(step1, output, 144);
411 __lsx_vst(step2, output, 160);
412 __lsx_vst(step3, output, 176);
413 __lsx_vst(step4, output, 192);
414 __lsx_vst(step5, output, 208);
415 __lsx_vst(step6, output, 224);
416 __lsx_vst(step7, output, 240);
417
418 __lsx_vst(in8, output, 256);
419 __lsx_vst(in9, output, 272);
420 __lsx_vst(in10, output, 288);
421 __lsx_vst(in11, output, 304);
422 __lsx_vst(in12, output, 320);
423 __lsx_vst(in13, output, 336);
424 __lsx_vst(in14, output, 352);
425 __lsx_vst(in15, output, 368);
426 }
427
fdct8x32_1d_row_even_4x(int16_t * input,int16_t * interm_ptr,int16_t * out)428 static void fdct8x32_1d_row_even_4x(int16_t *input, int16_t *interm_ptr,
429 int16_t *out) {
430 __m128i in0, in1, in2, in3, in4, in5, in6, in7;
431 __m128i in8, in9, in10, in11, in12, in13, in14, in15;
432 __m128i vec0, vec1, vec2, vec3, vec4, vec5, vec6, vec7;
433 __m128i vec0_l, vec1_l, vec2_l, vec3_l, vec4_l, vec5_l, vec6_l, vec7_l;
434 __m128i vec0_r, vec1_r, vec2_r, vec3_r, vec4_r, vec5_r, vec6_r, vec7_r;
435 __m128i tmp0_w, tmp1_w, tmp2_w, tmp3_w;
436
437 /* fdct32 even */
438 /* stage 2 */
439 DUP4_ARG2(__lsx_vld, input, 0, input, 16, input, 32, input, 48, in0, in1, in2,
440 in3);
441 DUP4_ARG2(__lsx_vld, input, 64, input, 80, input, 96, input, 112, in4, in5,
442 in6, in7);
443 DUP4_ARG2(__lsx_vld, input, 128, input, 144, input, 160, input, 176, in8, in9,
444 in10, in11);
445 DUP4_ARG2(__lsx_vld, input, 192, input, 208, input, 224, input, 240, in12,
446 in13, in14, in15);
447
448 LSX_BUTTERFLY_16_H(in0, in1, in2, in3, in4, in5, in6, in7, in8, in9, in10,
449 in11, in12, in13, in14, in15, vec0, vec1, vec2, vec3, vec4,
450 vec5, vec6, vec7, in8, in9, in10, in11, in12, in13, in14,
451 in15);
452
453 __lsx_vst(vec0, interm_ptr, 0);
454 __lsx_vst(vec1, interm_ptr, 16);
455 __lsx_vst(vec2, interm_ptr, 32);
456 __lsx_vst(vec3, interm_ptr, 48);
457 __lsx_vst(vec4, interm_ptr, 64);
458 __lsx_vst(vec5, interm_ptr, 80);
459 __lsx_vst(vec6, interm_ptr, 96);
460 __lsx_vst(vec7, interm_ptr, 112);
461
462 __lsx_vst(in8, interm_ptr, 128);
463 __lsx_vst(in9, interm_ptr, 144);
464 __lsx_vst(in10, interm_ptr, 160);
465 __lsx_vst(in11, interm_ptr, 176);
466 __lsx_vst(in12, interm_ptr, 192);
467 __lsx_vst(in13, interm_ptr, 208);
468 __lsx_vst(in14, interm_ptr, 224);
469 __lsx_vst(in15, interm_ptr, 240);
470
471 /* Stage 3 */
472 UNPCK_SH_SW(vec0, vec0_l, vec0_r);
473 UNPCK_SH_SW(vec1, vec1_l, vec1_r);
474 UNPCK_SH_SW(vec2, vec2_l, vec2_r);
475 UNPCK_SH_SW(vec3, vec3_l, vec3_r);
476 UNPCK_SH_SW(vec4, vec4_l, vec4_r);
477 UNPCK_SH_SW(vec5, vec5_l, vec5_r);
478 UNPCK_SH_SW(vec6, vec6_l, vec6_r);
479 UNPCK_SH_SW(vec7, vec7_l, vec7_r);
480 DUP4_ARG2(__lsx_vadd_w, vec0_r, vec7_r, vec1_r, vec6_r, vec2_r, vec5_r,
481 vec3_r, vec4_r, tmp0_w, tmp1_w, tmp2_w, tmp3_w);
482 LSX_BUTTERFLY_4_W(tmp0_w, tmp1_w, tmp2_w, tmp3_w, vec4_r, vec6_r, vec7_r,
483 vec5_r);
484 DUP4_ARG2(__lsx_vadd_w, vec0_l, vec7_l, vec1_l, vec6_l, vec2_l, vec5_l,
485 vec3_l, vec4_l, vec0_r, vec1_r, vec2_r, vec3_r);
486
487 tmp3_w = __lsx_vadd_w(vec0_r, vec3_r);
488 vec0_r = __lsx_vsub_w(vec0_r, vec3_r);
489 vec3_r = __lsx_vadd_w(vec1_r, vec2_r);
490 vec1_r = __lsx_vsub_w(vec1_r, vec2_r);
491
492 DOTP_CONST_PAIR_W(vec4_r, vec6_r, tmp3_w, vec3_r, cospi_16_64, cospi_16_64,
493 vec4_r, tmp3_w, vec6_r, vec3_r);
494 FDCT32_POSTPROC_NEG_W(vec4_r);
495 FDCT32_POSTPROC_NEG_W(tmp3_w);
496 FDCT32_POSTPROC_NEG_W(vec6_r);
497 FDCT32_POSTPROC_NEG_W(vec3_r);
498 DUP2_ARG2(__lsx_vpickev_h, vec4_r, tmp3_w, vec6_r, vec3_r, vec4, vec5);
499 __lsx_vst(vec5, out, 0);
500 __lsx_vst(vec4, out, 16);
501
502 DOTP_CONST_PAIR_W(vec5_r, vec7_r, vec0_r, vec1_r, cospi_24_64, cospi_8_64,
503 vec4_r, tmp3_w, vec6_r, vec3_r);
504 FDCT32_POSTPROC_NEG_W(vec4_r);
505 FDCT32_POSTPROC_NEG_W(tmp3_w);
506 FDCT32_POSTPROC_NEG_W(vec6_r);
507 FDCT32_POSTPROC_NEG_W(vec3_r);
508 DUP2_ARG2(__lsx_vpickev_h, vec4_r, tmp3_w, vec6_r, vec3_r, vec4, vec5);
509 __lsx_vst(vec5, out, 32);
510 __lsx_vst(vec4, out, 48);
511
512 DUP4_ARG2(__lsx_vld, interm_ptr, 0, interm_ptr, 16, interm_ptr, 32,
513 interm_ptr, 48, vec0, vec1, vec2, vec3);
514 DUP4_ARG2(__lsx_vld, interm_ptr, 64, interm_ptr, 80, interm_ptr, 96,
515 interm_ptr, 112, vec4, vec5, vec6, vec7);
516 DUP4_ARG2(__lsx_vsub_h, vec3, vec4, vec2, vec5, vec1, vec6, vec0, vec7, vec4,
517 vec5, vec6, vec7);
518 DOTP_CONST_PAIR(vec6, vec5, cospi_16_64, cospi_16_64, vec5, vec6);
519 DUP2_ARG2(__lsx_vadd_h, vec4, vec5, vec7, vec6, vec0, vec1);
520 DOTP_CONST_PAIR(vec1, vec0, cospi_28_64, cospi_4_64, in5, in4);
521 FDCT_POSTPROC_2V_NEG_H(in4, in5);
522 __lsx_vst(in4, out, 64);
523 __lsx_vst(in5, out, 112);
524
525 DUP2_ARG2(__lsx_vsub_h, vec4, vec5, vec7, vec6, vec4, vec7);
526 DOTP_CONST_PAIR(vec7, vec4, cospi_12_64, cospi_20_64, in5, in4);
527 FDCT_POSTPROC_2V_NEG_H(in4, in5);
528 __lsx_vst(in4, out, 80);
529 __lsx_vst(in5, out, 96);
530
531 DUP4_ARG2(__lsx_vld, interm_ptr, 128, interm_ptr, 144, interm_ptr, 160,
532 interm_ptr, 176, in8, in9, in10, in11);
533 DUP4_ARG2(__lsx_vld, interm_ptr, 192, interm_ptr, 208, interm_ptr, 224,
534 interm_ptr, 240, in12, in13, in14, in15);
535 DOTP_CONST_PAIR(in13, in10, cospi_16_64, cospi_16_64, vec2, vec5);
536 DOTP_CONST_PAIR(in12, in11, cospi_16_64, cospi_16_64, vec3, vec4);
537 DUP4_ARG2(__lsx_vadd_h, in8, vec3, in9, vec2, in14, vec5, in15, vec4, in0,
538 vec1, vec6, in2);
539 DOTP_CONST_PAIR(vec6, vec1, cospi_24_64, cospi_8_64, in1, in3);
540 DUP2_ARG2(__lsx_vadd_h, in0, in1, in2, in3, vec0, vec7);
541 DOTP_CONST_PAIR(vec7, vec0, cospi_30_64, cospi_2_64, in5, in4);
542 FDCT_POSTPROC_2V_NEG_H(in4, in5);
543 __lsx_vst(in4, out, 128);
544 __lsx_vst(in5, out, 240);
545
546 DUP2_ARG2(__lsx_vsub_h, in0, in1, in2, in3, in0, in2);
547 DOTP_CONST_PAIR(in2, in0, cospi_14_64, cospi_18_64, in5, in4);
548 FDCT_POSTPROC_2V_NEG_H(in4, in5);
549 __lsx_vst(in4, out, 144);
550 __lsx_vst(in5, out, 224);
551
552 DUP2_ARG2(__lsx_vsub_h, in9, vec2, in14, vec5, vec2, vec5);
553 tmp0_w = __lsx_vneg_h(vec2);
554 DOTP_CONST_PAIR(tmp0_w, vec5, cospi_24_64, cospi_8_64, in2, in1);
555 DUP4_ARG2(__lsx_vsub_h, in8, vec3, in15, vec4, in3, in2, in0, in1, in3, in0,
556 vec2, vec5);
557 DOTP_CONST_PAIR(vec5, vec2, cospi_22_64, cospi_10_64, in5, in4);
558 FDCT_POSTPROC_2V_NEG_H(in4, in5);
559 __lsx_vst(in4, out, 160);
560 __lsx_vst(in5, out, 208);
561
562 DUP2_ARG2(__lsx_vadd_h, in3, in2, in0, in1, vec3, vec4);
563 DOTP_CONST_PAIR(vec4, vec3, cospi_6_64, cospi_26_64, in4, in5);
564 FDCT_POSTPROC_2V_NEG_H(in4, in5);
565 __lsx_vst(in4, out, 192);
566 __lsx_vst(in5, out, 176);
567 }
568
fdct8x32_1d_row_even(int16_t * temp,int16_t * out)569 static void fdct8x32_1d_row_even(int16_t *temp, int16_t *out) {
570 __m128i in0, in1, in2, in3, in4, in5, in6, in7;
571 __m128i in8, in9, in10, in11, in12, in13, in14, in15;
572 __m128i vec0, vec1, vec2, vec3, vec4, vec5, vec6, vec7, temp0, temp1;
573
574 /* fdct32 even */
575 /* stage 2 */
576 DUP4_ARG2(__lsx_vld, temp, 0, temp, 16, temp, 32, temp, 48, in0, in1, in2,
577 in3);
578 DUP4_ARG2(__lsx_vld, temp, 64, temp, 80, temp, 96, temp, 112, in4, in5, in6,
579 in7);
580 DUP4_ARG2(__lsx_vld, temp, 128, temp, 144, temp, 160, temp, 176, in8, in9,
581 in10, in11);
582 DUP4_ARG2(__lsx_vld, temp, 192, temp, 208, temp, 224, temp, 240, in12, in13,
583 in14, in15);
584
585 LSX_BUTTERFLY_16_H(in0, in1, in2, in3, in4, in5, in6, in7, in8, in9, in10,
586 in11, in12, in13, in14, in15, vec0, vec1, vec2, vec3, vec4,
587 vec5, vec6, vec7, in8, in9, in10, in11, in12, in13, in14,
588 in15);
589 /* Stage 3 */
590 DUP4_ARG2(__lsx_vadd_h, vec0, vec7, vec1, vec6, vec2, vec5, vec3, vec4, in0,
591 in1, in2, in3);
592 LSX_BUTTERFLY_4_H(in0, in1, in2, in3, temp0, in4, in1, in0);
593 DOTP_CONST_PAIR(temp0, in4, cospi_16_64, cospi_16_64, temp1, temp0);
594 FDCT_POSTPROC_2V_NEG_H(temp0, temp1);
595 __lsx_vst(temp0, out, 0);
596 __lsx_vst(temp1, out, 16);
597
598 DOTP_CONST_PAIR(in0, in1, cospi_24_64, cospi_8_64, temp1, temp0);
599 FDCT_POSTPROC_2V_NEG_H(temp0, temp1);
600 __lsx_vst(temp0, out, 32);
601 __lsx_vst(temp1, out, 48);
602
603 DUP4_ARG2(__lsx_vsub_h, vec3, vec4, vec2, vec5, vec1, vec6, vec0, vec7, vec4,
604 vec5, vec6, vec7);
605 DOTP_CONST_PAIR(vec6, vec5, cospi_16_64, cospi_16_64, vec5, vec6);
606 DUP2_ARG2(__lsx_vadd_h, vec4, vec5, vec7, vec6, vec0, vec1);
607 DOTP_CONST_PAIR(vec1, vec0, cospi_28_64, cospi_4_64, temp1, temp0);
608 FDCT_POSTPROC_2V_NEG_H(temp0, temp1);
609 __lsx_vst(temp0, out, 64);
610 __lsx_vst(temp1, out, 112);
611
612 DUP2_ARG2(__lsx_vsub_h, vec4, vec5, vec7, vec6, vec4, vec7);
613 DOTP_CONST_PAIR(vec7, vec4, cospi_12_64, cospi_20_64, temp1, temp0);
614 FDCT_POSTPROC_2V_NEG_H(temp0, temp1);
615 __lsx_vst(temp0, out, 80);
616 __lsx_vst(temp1, out, 96);
617
618 DOTP_CONST_PAIR(in13, in10, cospi_16_64, cospi_16_64, vec2, vec5);
619 DOTP_CONST_PAIR(in12, in11, cospi_16_64, cospi_16_64, vec3, vec4);
620 DUP4_ARG2(__lsx_vadd_h, in8, vec3, in9, vec2, in14, vec5, in15, vec4, in0,
621 vec1, vec6, in2);
622 DOTP_CONST_PAIR(vec6, vec1, cospi_24_64, cospi_8_64, in1, in3);
623 DUP2_ARG2(__lsx_vadd_h, in0, in1, in2, in3, vec0, vec7);
624 DOTP_CONST_PAIR(vec7, vec0, cospi_30_64, cospi_2_64, temp1, temp0);
625 FDCT_POSTPROC_2V_NEG_H(temp0, temp1);
626 __lsx_vst(temp0, out, 128);
627 __lsx_vst(temp1, out, 240);
628
629 DUP2_ARG2(__lsx_vsub_h, in0, in1, in2, in3, in0, in2);
630 DOTP_CONST_PAIR(in2, in0, cospi_14_64, cospi_18_64, temp1, temp0);
631 FDCT_POSTPROC_2V_NEG_H(temp0, temp1);
632 __lsx_vst(temp0, out, 144);
633 __lsx_vst(temp1, out, 224);
634
635 DUP2_ARG2(__lsx_vsub_h, in9, vec2, in14, vec5, vec2, vec5);
636 temp0 = __lsx_vneg_h(vec2);
637 DOTP_CONST_PAIR(temp0, vec5, cospi_24_64, cospi_8_64, in2, in1);
638 DUP4_ARG2(__lsx_vsub_h, in8, vec3, in15, vec4, in3, in2, in0, in1, in3, in0,
639 vec2, vec5)
640 DOTP_CONST_PAIR(vec5, vec2, cospi_22_64, cospi_10_64, temp1, temp0);
641 FDCT_POSTPROC_2V_NEG_H(temp0, temp1);
642 __lsx_vst(temp0, out, 160);
643 __lsx_vst(temp1, out, 208);
644
645 DUP2_ARG2(__lsx_vadd_h, in3, in2, in0, in1, vec3, vec4);
646 DOTP_CONST_PAIR(vec4, vec3, cospi_6_64, cospi_26_64, temp0, temp1);
647 FDCT_POSTPROC_2V_NEG_H(temp0, temp1);
648 __lsx_vst(temp0, out, 192);
649 __lsx_vst(temp1, out, 176);
650 }
651
fdct8x32_1d_row_odd(int16_t * temp,int16_t * interm_ptr,int16_t * out)652 static void fdct8x32_1d_row_odd(int16_t *temp, int16_t *interm_ptr,
653 int16_t *out) {
654 __m128i in16, in17, in18, in19, in20, in21, in22, in23;
655 __m128i in24, in25, in26, in27, in28, in29, in30, in31, vec4, vec5;
656 __m128i tmp0, tmp1;
657
658 in20 = __lsx_vld(temp, 64);
659 in21 = __lsx_vld(temp, 80);
660 in26 = __lsx_vld(temp, 160);
661 in27 = __lsx_vld(temp, 176);
662
663 DOTP_CONST_PAIR(in27, in20, cospi_16_64, cospi_16_64, in20, in27);
664 DOTP_CONST_PAIR(in26, in21, cospi_16_64, cospi_16_64, in21, in26);
665
666 in18 = __lsx_vld(temp, 32);
667 in19 = __lsx_vld(temp, 48);
668 in28 = __lsx_vld(temp, 192);
669 in29 = __lsx_vld(temp, 208);
670
671 vec4 = __lsx_vsub_h(in19, in20);
672 __lsx_vst(vec4, interm_ptr, 64);
673 vec4 = __lsx_vsub_h(in18, in21);
674 __lsx_vst(vec4, interm_ptr, 176);
675 vec4 = __lsx_vsub_h(in28, in27);
676 __lsx_vst(vec4, interm_ptr, 112);
677 vec4 = __lsx_vsub_h(in29, in26);
678 __lsx_vst(vec4, interm_ptr, 128);
679
680 DUP4_ARG2(__lsx_vadd_h, in18, in21, in19, in20, in28, in27, in29, in26, in21,
681 in20, in27, in26);
682
683 in22 = __lsx_vld(temp, 96);
684 in23 = __lsx_vld(temp, 112);
685 in24 = __lsx_vld(temp, 128);
686 in25 = __lsx_vld(temp, 144);
687
688 DOTP_CONST_PAIR(in25, in22, cospi_16_64, cospi_16_64, in22, in25);
689 DOTP_CONST_PAIR(in24, in23, cospi_16_64, cospi_16_64, in23, in24);
690
691 in16 = __lsx_vld(temp, 0);
692 in17 = __lsx_vld(temp, 16);
693 in30 = __lsx_vld(temp, 224);
694 in31 = __lsx_vld(temp, 240);
695
696 vec4 = __lsx_vsub_h(in17, in22);
697 __lsx_vst(vec4, interm_ptr, 80);
698 vec4 = __lsx_vsub_h(in30, in25);
699 __lsx_vst(vec4, interm_ptr, 96);
700 vec4 = __lsx_vsub_h(in31, in24);
701 __lsx_vst(vec4, interm_ptr, 144);
702 vec4 = __lsx_vsub_h(in16, in23);
703 __lsx_vst(vec4, interm_ptr, 160);
704
705 DUP4_ARG2(__lsx_vadd_h, in16, in23, in17, in22, in30, in25, in31, in24, in16,
706 in17, in30, in31);
707 DOTP_CONST_PAIR(in26, in21, cospi_24_64, cospi_8_64, in18, in29);
708 DOTP_CONST_PAIR(in27, in20, cospi_24_64, cospi_8_64, in19, in28);
709
710 DUP4_ARG2(__lsx_vadd_h, in16, in19, in17, in18, in30, in29, in31, in28, in27,
711 in22, in21, in25);
712 DOTP_CONST_PAIR(in21, in22, cospi_28_64, cospi_4_64, in26, in24);
713 DUP2_ARG2(__lsx_vadd_h, in27, in26, in25, in24, in23, in20);
714
715 DOTP_CONST_PAIR(in20, in23, cospi_31_64, cospi_1_64, vec4, vec5);
716 FDCT_POSTPROC_2V_NEG_H(vec5, vec4);
717 __lsx_vst(vec5, out, 0);
718 __lsx_vst(vec4, out, 240);
719
720 DUP2_ARG2(__lsx_vsub_h, in27, in26, in25, in24, in22, in21);
721
722 DOTP_CONST_PAIR(in21, in22, cospi_15_64, cospi_17_64, vec5, vec4);
723 FDCT_POSTPROC_2V_NEG_H(vec5, vec4);
724 __lsx_vst(vec5, out, 224);
725 __lsx_vst(vec4, out, 16);
726
727 DUP4_ARG2(__lsx_vsub_h, in17, in18, in16, in19, in31, in28, in30, in29, in23,
728 in26, in24, in20);
729 tmp0 = __lsx_vneg_h(in23);
730 DOTP_CONST_PAIR(tmp0, in20, cospi_28_64, cospi_4_64, in27, in25);
731 DUP2_ARG2(__lsx_vsub_h, in26, in27, in24, in25, in23, in20);
732
733 DOTP_CONST_PAIR(in20, in23, cospi_23_64, cospi_9_64, vec4, vec5);
734 FDCT_POSTPROC_2V_NEG_H(vec5, vec4);
735 __lsx_vst(vec4, out, 32);
736 __lsx_vst(vec5, out, 208);
737
738 DUP2_ARG2(__lsx_vadd_h, in26, in27, in24, in25, in22, in21);
739 DOTP_CONST_PAIR(in21, in22, cospi_7_64, cospi_25_64, vec4, vec5);
740 FDCT_POSTPROC_2V_NEG_H(vec5, vec4);
741 __lsx_vst(vec4, out, 48);
742 __lsx_vst(vec5, out, 192);
743
744 in20 = __lsx_vld(interm_ptr, 64);
745 in21 = __lsx_vld(interm_ptr, 176);
746 in27 = __lsx_vld(interm_ptr, 112);
747 in26 = __lsx_vld(interm_ptr, 128);
748
749 in16 = in20;
750 in17 = in21;
751 DUP2_ARG1(__lsx_vneg_h, in16, in17, tmp0, tmp1);
752 DOTP_CONST_PAIR(tmp0, in27, cospi_24_64, cospi_8_64, in20, in27);
753 DOTP_CONST_PAIR(tmp1, in26, cospi_24_64, cospi_8_64, in21, in26);
754
755 in22 = __lsx_vld(interm_ptr, 80);
756 in25 = __lsx_vld(interm_ptr, 96);
757 in24 = __lsx_vld(interm_ptr, 144);
758 in23 = __lsx_vld(interm_ptr, 160);
759
760 DUP4_ARG2(__lsx_vsub_h, in23, in20, in22, in21, in25, in26, in24, in27, in28,
761 in17, in18, in31);
762 DOTP_CONST_PAIR(in18, in17, cospi_12_64, cospi_20_64, in29, in30);
763 DUP2_ARG2(__lsx_vadd_h, in28, in29, in31, in30, in16, in19);
764 DOTP_CONST_PAIR(in19, in16, cospi_27_64, cospi_5_64, vec5, vec4);
765 FDCT_POSTPROC_2V_NEG_H(vec5, vec4);
766 __lsx_vst(vec5, out, 64);
767 __lsx_vst(vec4, out, 176);
768
769 DUP2_ARG2(__lsx_vsub_h, in28, in29, in31, in30, in17, in18);
770 DOTP_CONST_PAIR(in18, in17, cospi_11_64, cospi_21_64, vec5, vec4);
771 FDCT_POSTPROC_2V_NEG_H(vec5, vec4);
772 __lsx_vst(vec5, out, 80);
773 __lsx_vst(vec4, out, 160);
774
775 DUP4_ARG2(__lsx_vadd_h, in22, in21, in23, in20, in24, in27, in25, in26, in16,
776 in29, in30, in19);
777 tmp0 = __lsx_vneg_h(in16);
778 DOTP_CONST_PAIR(tmp0, in19, cospi_12_64, cospi_20_64, in28, in31);
779 DUP2_ARG2(__lsx_vsub_h, in29, in28, in30, in31, in16, in19);
780
781 DOTP_CONST_PAIR(in19, in16, cospi_19_64, cospi_13_64, vec5, vec4);
782 FDCT_POSTPROC_2V_NEG_H(vec5, vec4);
783 __lsx_vst(vec5, out, 144);
784 __lsx_vst(vec4, out, 96);
785
786 DUP2_ARG2(__lsx_vadd_h, in29, in28, in30, in31, in17, in18);
787
788 DOTP_CONST_PAIR(in18, in17, cospi_3_64, cospi_29_64, vec5, vec4);
789 FDCT_POSTPROC_2V_NEG_H(vec5, vec4);
790 __lsx_vst(vec4, out, 112);
791 __lsx_vst(vec5, out, 128);
792 }
793
fdct8x32_1d_row_transpose_store(int16_t * temp,int16_t * output)794 static void fdct8x32_1d_row_transpose_store(int16_t *temp, int16_t *output) {
795 __m128i in0, in1, in2, in3, in4, in5, in6, in7;
796 __m128i in0_1, in1_1, in2_1, in3_1, in4_1, in5_1, in6_1, in7_1;
797
798 /* 1st set */
799 in0 = __lsx_vld(temp, 0);
800 in4 = __lsx_vld(temp, 64);
801 in2 = __lsx_vld(temp, 128);
802 in6 = __lsx_vld(temp, 192);
803 in1 = __lsx_vld(temp, 256);
804 in7 = __lsx_vld(temp, 304);
805 in3 = __lsx_vld(temp, 384);
806 in5 = __lsx_vld(temp, 432);
807
808 LSX_TRANSPOSE8x8_H(in0, in1, in2, in3, in4, in5, in6, in7, in0, in1, in2, in3,
809 in4, in5, in6, in7);
810
811 /* 2nd set */
812 in0_1 = __lsx_vld(temp, 32);
813 in1_1 = __lsx_vld(temp, 464);
814 in2_1 = __lsx_vld(temp, 160);
815 in3_1 = __lsx_vld(temp, 336);
816 in4_1 = __lsx_vld(temp, 96);
817 in5_1 = __lsx_vld(temp, 352);
818 in6_1 = __lsx_vld(temp, 224);
819 in7_1 = __lsx_vld(temp, 480);
820
821 __lsx_vst(in0, output, 0);
822 __lsx_vst(in1, output, 64);
823 __lsx_vst(in2, output, 128);
824 __lsx_vst(in3, output, 192);
825 __lsx_vst(in4, output, 256);
826 __lsx_vst(in5, output, 320);
827 __lsx_vst(in6, output, 384);
828 __lsx_vst(in7, output, 448);
829
830 LSX_TRANSPOSE8x8_H(in0_1, in1_1, in2_1, in3_1, in4_1, in5_1, in6_1, in7_1,
831 in0_1, in1_1, in2_1, in3_1, in4_1, in5_1, in6_1, in7_1);
832
833 /* 3rd set */
834 in0 = __lsx_vld(temp, 16);
835 in1 = __lsx_vld(temp, 272);
836 in2 = __lsx_vld(temp, 144);
837 in3 = __lsx_vld(temp, 400);
838 in4 = __lsx_vld(temp, 80);
839 in5 = __lsx_vld(temp, 416);
840 in6 = __lsx_vld(temp, 208);
841 in7 = __lsx_vld(temp, 288);
842
843 __lsx_vst(in0_1, output, 16);
844 __lsx_vst(in1_1, output, 80);
845 __lsx_vst(in2_1, output, 144);
846 __lsx_vst(in3_1, output, 208);
847 __lsx_vst(in4_1, output, 272);
848 __lsx_vst(in5_1, output, 336);
849 __lsx_vst(in6_1, output, 400);
850 __lsx_vst(in7_1, output, 464);
851
852 LSX_TRANSPOSE8x8_H(in0, in1, in2, in3, in4, in5, in6, in7, in0, in1, in2, in3,
853 in4, in5, in6, in7);
854
855 __lsx_vst(in0, output, 32);
856 __lsx_vst(in1, output, 96);
857 __lsx_vst(in2, output, 160);
858 __lsx_vst(in3, output, 224);
859 __lsx_vst(in4, output, 288);
860 __lsx_vst(in5, output, 352);
861 __lsx_vst(in6, output, 416);
862 __lsx_vst(in7, output, 480);
863
864 /* 4th set */
865 in0_1 = __lsx_vld(temp, 48);
866 in1_1 = __lsx_vld(temp, 448);
867 in2_1 = __lsx_vld(temp, 176);
868 in3_1 = __lsx_vld(temp, 320);
869 in4_1 = __lsx_vld(temp, 112);
870 in5_1 = __lsx_vld(temp, 368);
871 in6_1 = __lsx_vld(temp, 240);
872 in7_1 = __lsx_vld(temp, 496);
873
874 LSX_TRANSPOSE8x8_H(in0_1, in1_1, in2_1, in3_1, in4_1, in5_1, in6_1, in7_1,
875 in0_1, in1_1, in2_1, in3_1, in4_1, in5_1, in6_1, in7_1);
876
877 __lsx_vst(in0_1, output, 48);
878 __lsx_vst(in1_1, output, 112);
879 __lsx_vst(in2_1, output, 176);
880 __lsx_vst(in3_1, output, 240);
881 __lsx_vst(in4_1, output, 304);
882 __lsx_vst(in5_1, output, 368);
883 __lsx_vst(in6_1, output, 432);
884 __lsx_vst(in7_1, output, 496);
885 }
886
fdct32x8_1d_row(int16_t * temp,int16_t * temp_buf,int16_t * output)887 static void fdct32x8_1d_row(int16_t *temp, int16_t *temp_buf, int16_t *output) {
888 fdct8x32_1d_row_load_butterfly(temp, temp_buf);
889 fdct8x32_1d_row_even(temp_buf, temp_buf);
890 fdct8x32_1d_row_odd(temp_buf + 128, temp, temp_buf + 128);
891 fdct8x32_1d_row_transpose_store(temp_buf, output);
892 }
893
fdct32x8_1d_row_4x(int16_t * tmp_buf_big,int16_t * tmp_buf,int16_t * output)894 static void fdct32x8_1d_row_4x(int16_t *tmp_buf_big, int16_t *tmp_buf,
895 int16_t *output) {
896 fdct8x32_1d_row_load_butterfly(tmp_buf_big, tmp_buf);
897 fdct8x32_1d_row_even_4x(tmp_buf, tmp_buf_big, tmp_buf);
898 fdct8x32_1d_row_odd(tmp_buf + 128, tmp_buf_big, tmp_buf + 128);
899 fdct8x32_1d_row_transpose_store(tmp_buf, output);
900 }
901
vpx_fdct32x32_lsx(const int16_t * input,int16_t * output,int32_t src_stride)902 void vpx_fdct32x32_lsx(const int16_t *input, int16_t *output,
903 int32_t src_stride) {
904 int i;
905 DECLARE_ALIGNED(32, int16_t, tmp_buf_big[1024]);
906 DECLARE_ALIGNED(32, int16_t, tmp_buf[256]);
907
908 /* column transform */
909 for (i = 0; i < 4; ++i) {
910 fdct8x32_1d_column(input + (8 * i), src_stride, tmp_buf,
911 tmp_buf_big + (8 * i));
912 }
913
914 /* row transform */
915 fdct32x8_1d_row_4x(tmp_buf_big, tmp_buf, output);
916
917 /* row transform */
918 for (i = 1; i < 4; ++i) {
919 fdct32x8_1d_row(tmp_buf_big + (i * 256), tmp_buf, output + (i * 256));
920 }
921 }
922
fdct8x32_1d_row_even_rd(int16_t * temp,int16_t * out)923 static void fdct8x32_1d_row_even_rd(int16_t *temp, int16_t *out) {
924 __m128i in0, in1, in2, in3, in4, in5, in6, in7;
925 __m128i in8, in9, in10, in11, in12, in13, in14, in15;
926 __m128i vec0, vec1, vec2, vec3, vec4, vec5, vec6, vec7, temp0, temp1;
927
928 /* fdct32 even */
929 /* stage 2 */
930 DUP4_ARG2(__lsx_vld, temp, 0, temp, 16, temp, 32, temp, 48, in0, in1, in2,
931 in3);
932 DUP4_ARG2(__lsx_vld, temp, 64, temp, 80, temp, 96, temp, 112, in4, in5, in6,
933 in7);
934 DUP4_ARG2(__lsx_vld, temp, 128, temp, 144, temp, 160, temp, 176, in8, in9,
935 in10, in11);
936 DUP4_ARG2(__lsx_vld, temp, 192, temp, 208, temp, 224, temp, 240, in12, in13,
937 in14, in15);
938 LSX_BUTTERFLY_16_H(in0, in1, in2, in3, in4, in5, in6, in7, in8, in9, in10,
939 in11, in12, in13, in14, in15, vec0, vec1, vec2, vec3, vec4,
940 vec5, vec6, vec7, in8, in9, in10, in11, in12, in13, in14,
941 in15);
942
943 FDCT_POSTPROC_2V_NEG_H(vec0, vec1);
944 FDCT_POSTPROC_2V_NEG_H(vec2, vec3);
945 FDCT_POSTPROC_2V_NEG_H(vec4, vec5);
946 FDCT_POSTPROC_2V_NEG_H(vec6, vec7);
947 FDCT_POSTPROC_2V_NEG_H(in8, in9);
948 FDCT_POSTPROC_2V_NEG_H(in10, in11);
949 FDCT_POSTPROC_2V_NEG_H(in12, in13);
950 FDCT_POSTPROC_2V_NEG_H(in14, in15);
951
952 /* Stage 3 */
953 DUP4_ARG2(__lsx_vadd_h, vec0, vec7, vec1, vec6, vec2, vec5, vec3, vec4, in0,
954 in1, in2, in3);
955
956 temp0 = __lsx_vadd_h(in0, in3);
957 in0 = __lsx_vsub_h(in0, in3);
958 in3 = __lsx_vadd_h(in1, in2);
959 in1 = __lsx_vsub_h(in1, in2);
960
961 DOTP_CONST_PAIR(temp0, in3, cospi_16_64, cospi_16_64, temp1, temp0);
962 __lsx_vst(temp0, out, 0);
963 __lsx_vst(temp1, out, 16);
964
965 DOTP_CONST_PAIR(in0, in1, cospi_24_64, cospi_8_64, temp1, temp0);
966 __lsx_vst(temp0, out, 32);
967 __lsx_vst(temp1, out, 48);
968
969 DUP4_ARG2(__lsx_vsub_h, vec3, vec4, vec2, vec5, vec1, vec6, vec0, vec7, vec4,
970 vec5, vec6, vec7);
971 DOTP_CONST_PAIR(vec6, vec5, cospi_16_64, cospi_16_64, vec5, vec6);
972 DUP2_ARG2(__lsx_vadd_h, vec4, vec5, vec7, vec6, vec0, vec1);
973 DOTP_CONST_PAIR(vec1, vec0, cospi_28_64, cospi_4_64, temp1, temp0);
974 __lsx_vst(temp0, out, 64);
975 __lsx_vst(temp1, out, 112);
976
977 DUP2_ARG2(__lsx_vsub_h, vec4, vec5, vec7, vec6, vec4, vec7);
978 DOTP_CONST_PAIR(vec7, vec4, cospi_12_64, cospi_20_64, temp1, temp0);
979 __lsx_vst(temp0, out, 80);
980 __lsx_vst(temp1, out, 96);
981
982 DOTP_CONST_PAIR(in13, in10, cospi_16_64, cospi_16_64, vec2, vec5);
983 DOTP_CONST_PAIR(in12, in11, cospi_16_64, cospi_16_64, vec3, vec4);
984 DUP4_ARG2(__lsx_vadd_h, in8, vec3, in9, vec2, in14, vec5, in15, vec4, in0,
985 vec1, vec6, in2);
986 DOTP_CONST_PAIR(vec6, vec1, cospi_24_64, cospi_8_64, in1, in3);
987 DUP2_ARG2(__lsx_vadd_h, in0, in1, in2, in3, vec0, vec7);
988 DOTP_CONST_PAIR(vec7, vec0, cospi_30_64, cospi_2_64, temp1, temp0);
989 __lsx_vst(temp0, out, 128);
990 __lsx_vst(temp1, out, 240);
991
992 DUP2_ARG2(__lsx_vsub_h, in0, in1, in2, in3, in0, in2);
993 DOTP_CONST_PAIR(in2, in0, cospi_14_64, cospi_18_64, temp1, temp0);
994 __lsx_vst(temp0, out, 144);
995 __lsx_vst(temp1, out, 224);
996
997 DUP2_ARG2(__lsx_vsub_h, in9, vec2, in14, vec5, vec2, vec5);
998 temp0 = __lsx_vneg_h(vec2);
999 DOTP_CONST_PAIR(temp0, vec5, cospi_24_64, cospi_8_64, in2, in1);
1000 DUP4_ARG2(__lsx_vsub_h, in8, vec3, in15, vec4, in3, in2, in0, in1, in3, in0,
1001 vec2, vec5);
1002 DOTP_CONST_PAIR(vec5, vec2, cospi_22_64, cospi_10_64, temp1, temp0);
1003 __lsx_vst(temp0, out, 160);
1004 __lsx_vst(temp1, out, 208);
1005
1006 DUP2_ARG2(__lsx_vadd_h, in3, in2, in0, in1, vec3, vec4);
1007 DOTP_CONST_PAIR(vec4, vec3, cospi_6_64, cospi_26_64, temp0, temp1);
1008 __lsx_vst(temp0, out, 192);
1009 __lsx_vst(temp1, out, 176);
1010 }
1011
fdct8x32_1d_row_odd_rd(int16_t * temp,int16_t * interm_ptr,int16_t * out)1012 static void fdct8x32_1d_row_odd_rd(int16_t *temp, int16_t *interm_ptr,
1013 int16_t *out) {
1014 __m128i in16, in17, in18, in19, in20, in21, in22, in23;
1015 __m128i in24, in25, in26, in27, in28, in29, in30, in31;
1016 __m128i vec4, vec5, tmp0, tmp1;
1017
1018 in20 = __lsx_vld(temp, 64);
1019 in21 = __lsx_vld(temp, 80);
1020 in26 = __lsx_vld(temp, 160);
1021 in27 = __lsx_vld(temp, 176);
1022
1023 DOTP_CONST_PAIR(in27, in20, cospi_16_64, cospi_16_64, in20, in27);
1024 DOTP_CONST_PAIR(in26, in21, cospi_16_64, cospi_16_64, in21, in26);
1025
1026 FDCT_POSTPROC_2V_NEG_H(in20, in21);
1027 FDCT_POSTPROC_2V_NEG_H(in26, in27);
1028
1029 in18 = __lsx_vld(temp, 32);
1030 in19 = __lsx_vld(temp, 48);
1031 in28 = __lsx_vld(temp, 192);
1032 in29 = __lsx_vld(temp, 208);
1033
1034 FDCT_POSTPROC_2V_NEG_H(in18, in19);
1035 FDCT_POSTPROC_2V_NEG_H(in28, in29);
1036
1037 vec4 = __lsx_vsub_h(in19, in20);
1038 __lsx_vst(vec4, interm_ptr, 64);
1039 vec4 = __lsx_vsub_h(in18, in21);
1040 __lsx_vst(vec4, interm_ptr, 176);
1041 vec4 = __lsx_vsub_h(in29, in26);
1042 __lsx_vst(vec4, interm_ptr, 128);
1043 vec4 = __lsx_vsub_h(in28, in27);
1044 __lsx_vst(vec4, interm_ptr, 112);
1045
1046 DUP4_ARG2(__lsx_vadd_h, in18, in21, in19, in20, in28, in27, in29, in26, in21,
1047 in20, in27, in26);
1048
1049 in22 = __lsx_vld(temp, 96);
1050 in23 = __lsx_vld(temp, 112);
1051 in24 = __lsx_vld(temp, 128);
1052 in25 = __lsx_vld(temp, 144);
1053
1054 DOTP_CONST_PAIR(in25, in22, cospi_16_64, cospi_16_64, in22, in25);
1055 DOTP_CONST_PAIR(in24, in23, cospi_16_64, cospi_16_64, in23, in24);
1056 FDCT_POSTPROC_2V_NEG_H(in22, in23);
1057 FDCT_POSTPROC_2V_NEG_H(in24, in25);
1058
1059 in16 = __lsx_vld(temp, 0);
1060 in17 = __lsx_vld(temp, 16);
1061 in30 = __lsx_vld(temp, 224);
1062 in31 = __lsx_vld(temp, 240);
1063
1064 FDCT_POSTPROC_2V_NEG_H(in16, in17);
1065 FDCT_POSTPROC_2V_NEG_H(in30, in31);
1066
1067 vec4 = __lsx_vsub_h(in17, in22);
1068 __lsx_vst(vec4, interm_ptr, 80);
1069 vec4 = __lsx_vsub_h(in30, in25);
1070 __lsx_vst(vec4, interm_ptr, 96);
1071 vec4 = __lsx_vsub_h(in31, in24);
1072 __lsx_vst(vec4, interm_ptr, 144);
1073 vec4 = __lsx_vsub_h(in16, in23);
1074 __lsx_vst(vec4, interm_ptr, 160);
1075
1076 DUP4_ARG2(__lsx_vadd_h, in16, in23, in17, in22, in30, in25, in31, in24, in16,
1077 in17, in30, in31);
1078 DOTP_CONST_PAIR(in26, in21, cospi_24_64, cospi_8_64, in18, in29);
1079 DOTP_CONST_PAIR(in27, in20, cospi_24_64, cospi_8_64, in19, in28);
1080 DUP4_ARG2(__lsx_vadd_h, in16, in19, in17, in18, in30, in29, in31, in28, in27,
1081 in22, in21, in25);
1082 DOTP_CONST_PAIR(in21, in22, cospi_28_64, cospi_4_64, in26, in24);
1083 DUP2_ARG2(__lsx_vadd_h, in27, in26, in25, in24, in23, in20);
1084 DOTP_CONST_PAIR(in20, in23, cospi_31_64, cospi_1_64, vec4, vec5);
1085 __lsx_vst(vec5, out, 0);
1086 __lsx_vst(vec4, out, 240);
1087
1088 DUP2_ARG2(__lsx_vsub_h, in27, in26, in25, in24, in22, in21);
1089 DOTP_CONST_PAIR(in21, in22, cospi_15_64, cospi_17_64, vec5, vec4);
1090 __lsx_vst(vec5, out, 224);
1091 __lsx_vst(vec4, out, 16);
1092
1093 DUP4_ARG2(__lsx_vsub_h, in17, in18, in16, in19, in31, in28, in30, in29, in23,
1094 in26, in24, in20);
1095 tmp0 = __lsx_vneg_h(in23);
1096 DOTP_CONST_PAIR(tmp0, in20, cospi_28_64, cospi_4_64, in27, in25);
1097 DUP2_ARG2(__lsx_vsub_h, in26, in27, in24, in25, in23, in20);
1098 DOTP_CONST_PAIR(in20, in23, cospi_23_64, cospi_9_64, vec4, vec5);
1099 __lsx_vst(vec4, out, 32);
1100 __lsx_vst(vec5, out, 208);
1101
1102 DUP2_ARG2(__lsx_vadd_h, in26, in27, in24, in25, in22, in21);
1103 DOTP_CONST_PAIR(in21, in22, cospi_7_64, cospi_25_64, vec4, vec5);
1104 __lsx_vst(vec4, out, 48);
1105 __lsx_vst(vec5, out, 192);
1106
1107 in20 = __lsx_vld(interm_ptr, 64);
1108 in21 = __lsx_vld(interm_ptr, 176);
1109 in27 = __lsx_vld(interm_ptr, 112);
1110 in26 = __lsx_vld(interm_ptr, 128);
1111
1112 in16 = in20;
1113 in17 = in21;
1114 DUP2_ARG1(__lsx_vneg_h, in16, in17, tmp0, tmp1);
1115 DOTP_CONST_PAIR(tmp0, in27, cospi_24_64, cospi_8_64, in20, in27);
1116 DOTP_CONST_PAIR(tmp1, in26, cospi_24_64, cospi_8_64, in21, in26);
1117
1118 in22 = __lsx_vld(interm_ptr, 80);
1119 in25 = __lsx_vld(interm_ptr, 96);
1120 in24 = __lsx_vld(interm_ptr, 144);
1121 in23 = __lsx_vld(interm_ptr, 160);
1122
1123 DUP4_ARG2(__lsx_vsub_h, in23, in20, in22, in21, in25, in26, in24, in27, in28,
1124 in17, in18, in31);
1125 DOTP_CONST_PAIR(in18, in17, cospi_12_64, cospi_20_64, in29, in30);
1126 in16 = __lsx_vadd_h(in28, in29);
1127 in19 = __lsx_vadd_h(in31, in30);
1128 DOTP_CONST_PAIR(in19, in16, cospi_27_64, cospi_5_64, vec5, vec4);
1129 __lsx_vst(vec5, out, 64);
1130 __lsx_vst(vec4, out, 176);
1131
1132 DUP2_ARG2(__lsx_vsub_h, in28, in29, in31, in30, in17, in18);
1133 DOTP_CONST_PAIR(in18, in17, cospi_11_64, cospi_21_64, vec5, vec4);
1134 __lsx_vst(vec5, out, 80);
1135 __lsx_vst(vec4, out, 160);
1136
1137 DUP4_ARG2(__lsx_vadd_h, in22, in21, in23, in20, in24, in27, in25, in26, in16,
1138 in29, in30, in19);
1139 tmp0 = __lsx_vneg_h(in16);
1140 DOTP_CONST_PAIR(tmp0, in19, cospi_12_64, cospi_20_64, in28, in31);
1141 DUP2_ARG2(__lsx_vsub_h, in29, in28, in30, in31, in16, in19);
1142 DOTP_CONST_PAIR(in19, in16, cospi_19_64, cospi_13_64, vec5, vec4);
1143 __lsx_vst(vec5, out, 144);
1144 __lsx_vst(vec4, out, 96);
1145
1146 DUP2_ARG2(__lsx_vadd_h, in29, in28, in30, in31, in17, in18);
1147 DOTP_CONST_PAIR(in18, in17, cospi_3_64, cospi_29_64, vec5, vec4);
1148 __lsx_vst(vec4, out, 112);
1149 __lsx_vst(vec5, out, 128);
1150 }
1151
fdct32x8_1d_row_rd(int16_t * tmp_buf_big,int16_t * tmp_buf,int16_t * output)1152 static void fdct32x8_1d_row_rd(int16_t *tmp_buf_big, int16_t *tmp_buf,
1153 int16_t *output) {
1154 fdct8x32_1d_row_load_butterfly(tmp_buf_big, tmp_buf);
1155 fdct8x32_1d_row_even_rd(tmp_buf, tmp_buf);
1156 fdct8x32_1d_row_odd_rd((tmp_buf + 128), tmp_buf_big, (tmp_buf + 128));
1157 fdct8x32_1d_row_transpose_store(tmp_buf, output);
1158 }
1159
vpx_fdct32x32_rd_lsx(const int16_t * input,int16_t * out,int32_t src_stride)1160 void vpx_fdct32x32_rd_lsx(const int16_t *input, int16_t *out,
1161 int32_t src_stride) {
1162 int32_t i;
1163 DECLARE_ALIGNED(32, int16_t, tmp_buf_big[1024]);
1164 DECLARE_ALIGNED(32, int16_t, tmp_buf[256]);
1165
1166 /* column transform */
1167 for (i = 0; i < 4; ++i) {
1168 fdct8x32_1d_column(input + (8 * i), src_stride, &tmp_buf[0],
1169 &tmp_buf_big[0] + (8 * i));
1170 }
1171 /* row transform */
1172 for (i = 0; i < 4; ++i) {
1173 fdct32x8_1d_row_rd(&tmp_buf_big[0] + (8 * i * 32), &tmp_buf[0],
1174 out + (8 * i * 32));
1175 }
1176 }
1177