xref: /aosp_15_r20/external/libvpx/vp8/common/loongarch/idct_lsx.c (revision fb1b10ab9aebc7c7068eedab379b749d7e3900be)
1 /*
2  *  Copyright (c) 2022 The WebM project authors. All Rights Reserved.
3  *
4  *  Use of this source code is governed by a BSD-style license
5  *  that can be found in the LICENSE file in the root of the source
6  *  tree. An additional intellectual property rights grant can be found
7  *  in the file PATENTS.  All contributing project authors may
8  *  be found in the AUTHORS file in the root of the source tree.
9  */
10 
11 #include "./vp8_rtcd.h"
12 #include "vp8/common/blockd.h"
13 #include "vpx_util/loongson_intrinsics.h"
14 
15 static const int32_t cospi8sqrt2minus1 = 20091;
16 static const int32_t sinpi8sqrt2 = 35468;
17 
18 #define TRANSPOSE8X4_SH_SH(in0, in1, in2, in3, out0, out1, out2, out3)    \
19   do {                                                                    \
20     __m128i tmp0_m, tmp1_m, tmp2_m, tmp3_m;                               \
21                                                                           \
22     DUP2_ARG2(__lsx_vilvl_h, in1, in0, in3, in2, tmp0_m, tmp1_m);         \
23     DUP2_ARG2(__lsx_vilvh_h, in1, in0, in3, in2, tmp2_m, tmp3_m);         \
24     DUP2_ARG2(__lsx_vilvl_w, tmp1_m, tmp0_m, tmp3_m, tmp2_m, out0, out2); \
25     DUP2_ARG2(__lsx_vilvh_w, tmp1_m, tmp0_m, tmp3_m, tmp2_m, out1, out3); \
26   } while (0)
27 
28 #define TRANSPOSE_TWO_4x4_H(in0, in1, in2, in3, out0, out1, out2, out3) \
29   do {                                                                  \
30     __m128i s4_m, s5_m, s6_m, s7_m;                                     \
31                                                                         \
32     TRANSPOSE8X4_SH_SH(in0, in1, in2, in3, s4_m, s5_m, s6_m, s7_m);     \
33     DUP2_ARG2(__lsx_vilvl_d, s6_m, s4_m, s7_m, s5_m, out0, out2);       \
34     out1 = __lsx_vilvh_d(s6_m, s4_m);                                   \
35     out3 = __lsx_vilvh_d(s7_m, s5_m);                                   \
36   } while (0)
37 
38 #define EXPAND_TO_H_MULTIPLY_SINPI8SQRT2_PCK_TO_W(in0, in1)   \
39   do {                                                        \
40     __m128i zero_m = __lsx_vldi(0);                           \
41     __m128i tmp1_m, tmp2_m;                                   \
42     __m128i sinpi8_sqrt2_m = __lsx_vreplgr2vr_w(sinpi8sqrt2); \
43                                                               \
44     tmp1_m = __lsx_vilvl_h(in0, zero_m);                      \
45     tmp2_m = __lsx_vilvh_h(in0, zero_m);                      \
46     tmp1_m = __lsx_vsrai_w(tmp1_m, 16);                       \
47     tmp2_m = __lsx_vsrai_w(tmp2_m, 16);                       \
48     tmp1_m = __lsx_vmul_w(tmp1_m, sinpi8_sqrt2_m);            \
49     tmp1_m = __lsx_vsrai_w(tmp1_m, 16);                       \
50     tmp2_m = __lsx_vmul_w(tmp2_m, sinpi8_sqrt2_m);            \
51     tmp2_m = __lsx_vsrai_w(tmp2_m, 16);                       \
52     in1 = __lsx_vpickev_h(tmp2_m, tmp1_m);                    \
53   } while (0)
54 
55 #define VP8_IDCT_1D_H(in0, in1, in2, in3, out0, out1, out2, out3)      \
56   do {                                                                 \
57     __m128i a1_m, b1_m, c1_m, d1_m;                                    \
58     __m128i c_tmp1_m, c_tmp2_m;                                        \
59     __m128i d_tmp1_m, d_tmp2_m;                                        \
60     __m128i const_cospi8sqrt2minus1_m;                                 \
61                                                                        \
62     const_cospi8sqrt2minus1_m = __lsx_vreplgr2vr_h(cospi8sqrt2minus1); \
63     a1_m = __lsx_vadd_h(in0, in2);                                     \
64     b1_m = __lsx_vsub_h(in0, in2);                                     \
65     EXPAND_TO_H_MULTIPLY_SINPI8SQRT2_PCK_TO_W(in1, c_tmp1_m);          \
66                                                                        \
67     c_tmp2_m = __lsx_vmuh_h(in3, const_cospi8sqrt2minus1_m);           \
68     c_tmp2_m = __lsx_vslli_h(c_tmp2_m, 1);                             \
69     c_tmp2_m = __lsx_vsrai_h(c_tmp2_m, 1);                             \
70     c_tmp2_m = __lsx_vadd_h(in3, c_tmp2_m);                            \
71     c1_m = __lsx_vsub_h(c_tmp1_m, c_tmp2_m);                           \
72                                                                        \
73     d_tmp1_m = __lsx_vmuh_h(in1, const_cospi8sqrt2minus1_m);           \
74     d_tmp1_m = __lsx_vslli_h(d_tmp1_m, 1);                             \
75     d_tmp1_m = __lsx_vsrai_h(d_tmp1_m, 1);                             \
76     d_tmp1_m = __lsx_vadd_h(in1, d_tmp1_m);                            \
77     EXPAND_TO_H_MULTIPLY_SINPI8SQRT2_PCK_TO_W(in3, d_tmp2_m);          \
78     d1_m = __lsx_vadd_h(d_tmp1_m, d_tmp2_m);                           \
79     LSX_BUTTERFLY_4_H(a1_m, b1_m, c1_m, d1_m, out0, out1, out2, out3); \
80   } while (0)
81 
82 #define VP8_IDCT_1D_W(in0, in1, in2, in3, out0, out1, out2, out3)      \
83   do {                                                                 \
84     __m128i a1_m, b1_m, c1_m, d1_m;                                    \
85     __m128i c_tmp1_m, c_tmp2_m, d_tmp1_m, d_tmp2_m;                    \
86     __m128i const_cospi8sqrt2minus1_m, sinpi8_sqrt2_m;                 \
87                                                                        \
88     const_cospi8sqrt2minus1_m = __lsx_vreplgr2vr_w(cospi8sqrt2minus1); \
89     sinpi8_sqrt2_m = __lsx_vreplgr2vr_w(sinpi8sqrt2);                  \
90     a1_m = __lsx_vadd_w(in0, in2);                                     \
91     b1_m = __lsx_vsub_w(in0, in2);                                     \
92     c_tmp1_m = __lsx_vmul_w(in1, sinpi8_sqrt2_m);                      \
93     c_tmp1_m = __lsx_vsrai_w(c_tmp1_m, 16);                            \
94     c_tmp2_m = __lsx_vmul_w(in3, const_cospi8sqrt2minus1_m);           \
95     c_tmp2_m = __lsx_vsrai_w(c_tmp2_m, 16);                            \
96     c_tmp2_m = __lsx_vadd_w(in3, c_tmp2_m);                            \
97     c1_m = __lsx_vsub_w(c_tmp1_m, c_tmp2_m);                           \
98     d_tmp1_m = __lsx_vmul_w(in1, const_cospi8sqrt2minus1_m);           \
99     d_tmp1_m = __lsx_vsrai_w(d_tmp1_m, 16);                            \
100     d_tmp1_m = __lsx_vadd_w(in1, d_tmp1_m);                            \
101     d_tmp2_m = __lsx_vmul_w(in3, sinpi8_sqrt2_m);                      \
102     d_tmp2_m = __lsx_vsrai_w(d_tmp2_m, 16);                            \
103     d1_m = __lsx_vadd_w(d_tmp1_m, d_tmp2_m);                           \
104     LSX_BUTTERFLY_4_W(a1_m, b1_m, c1_m, d1_m, out0, out1, out2, out3); \
105   } while (0)
106 
107 #define UNPCK_SH_SW(in, out0, out1)  \
108   do {                               \
109     out0 = __lsx_vsllwil_w_h(in, 0); \
110     out1 = __lsx_vexth_w_h(in);      \
111   } while (0)
112 
idct4x4_addconst_lsx(int16_t in_dc,uint8_t * pred,int32_t pred_stride,uint8_t * dest,int32_t dest_stride)113 static void idct4x4_addconst_lsx(int16_t in_dc, uint8_t *pred,
114                                  int32_t pred_stride, uint8_t *dest,
115                                  int32_t dest_stride) {
116   __m128i vec, res0, res1, res2, res3, dst0, dst1;
117   __m128i pred0, pred1, pred2, pred3;
118   __m128i zero = __lsx_vldi(0);
119 
120   int32_t pred_stride2 = pred_stride << 1;
121   int32_t pred_stride3 = pred_stride2 + pred_stride;
122 
123   vec = __lsx_vreplgr2vr_h(in_dc);
124   vec = __lsx_vsrari_h(vec, 3);
125   pred0 = __lsx_vld(pred, 0);
126   DUP2_ARG2(__lsx_vldx, pred, pred_stride, pred, pred_stride2, pred1, pred2);
127   pred3 = __lsx_vldx(pred, pred_stride3);
128   DUP4_ARG2(__lsx_vilvl_b, zero, pred0, zero, pred1, zero, pred2, zero, pred3,
129             res0, res1, res2, res3);
130   DUP4_ARG2(__lsx_vadd_h, res0, vec, res1, vec, res2, vec, res3, vec, res0,
131             res1, res2, res3);
132   res0 = __lsx_vclip255_h(res0);
133   res1 = __lsx_vclip255_h(res1);
134   res2 = __lsx_vclip255_h(res2);
135   res3 = __lsx_vclip255_h(res3);
136 
137   DUP2_ARG2(__lsx_vpickev_b, res1, res0, res3, res2, dst0, dst1);
138   dst0 = __lsx_vpickev_w(dst1, dst0);
139   __lsx_vstelm_w(dst0, dest, 0, 0);
140   dest += dest_stride;
141   __lsx_vstelm_w(dst0, dest, 0, 1);
142   dest += dest_stride;
143   __lsx_vstelm_w(dst0, dest, 0, 2);
144   dest += dest_stride;
145   __lsx_vstelm_w(dst0, dest, 0, 3);
146 }
147 
vp8_dc_only_idct_add_lsx(int16_t input_dc,uint8_t * pred_ptr,int32_t pred_stride,uint8_t * dst_ptr,int32_t dst_stride)148 void vp8_dc_only_idct_add_lsx(int16_t input_dc, uint8_t *pred_ptr,
149                               int32_t pred_stride, uint8_t *dst_ptr,
150                               int32_t dst_stride) {
151   idct4x4_addconst_lsx(input_dc, pred_ptr, pred_stride, dst_ptr, dst_stride);
152 }
153 
dequant_idct4x4_addblk_2x_lsx(int16_t * input,int16_t * dequant_input,uint8_t * dest,int32_t dest_stride)154 static void dequant_idct4x4_addblk_2x_lsx(int16_t *input,
155                                           int16_t *dequant_input, uint8_t *dest,
156                                           int32_t dest_stride) {
157   __m128i dest0, dest1, dest2, dest3;
158   __m128i in0, in1, in2, in3, mul0, mul1, mul2, mul3, dequant_in0, dequant_in1;
159   __m128i hz0, hz1, hz2, hz3, vt0, vt1, vt2, vt3, res0, res1, res2, res3;
160   __m128i hz0l, hz1l, hz2l, hz3l, hz0r, hz1r, hz2r, hz3r;
161   __m128i vt0l, vt1l, vt2l, vt3l, vt0r, vt1r, vt2r, vt3r;
162   __m128i zero = __lsx_vldi(0);
163 
164   int32_t dest_stride2 = dest_stride << 1;
165   int32_t dest_stride3 = dest_stride2 + dest_stride;
166 
167   DUP4_ARG2(__lsx_vld, input, 0, input, 16, input, 32, input, 48, in0, in1, in2,
168             in3);
169   DUP2_ARG2(__lsx_vld, dequant_input, 0, dequant_input, 16, dequant_in0,
170             dequant_in1);
171 
172   DUP4_ARG2(__lsx_vmul_h, in0, dequant_in0, in1, dequant_in1, in2, dequant_in0,
173             in3, dequant_in1, mul0, mul1, mul2, mul3);
174   DUP2_ARG2(__lsx_vpickev_d, mul2, mul0, mul3, mul1, in0, in2);
175   DUP2_ARG2(__lsx_vpickod_d, mul2, mul0, mul3, mul1, in1, in3);
176 
177   VP8_IDCT_1D_H(in0, in1, in2, in3, hz0, hz1, hz2, hz3);
178   TRANSPOSE_TWO_4x4_H(hz0, hz1, hz2, hz3, hz0, hz1, hz2, hz3);
179   UNPCK_SH_SW(hz0, hz0r, hz0l);
180   UNPCK_SH_SW(hz1, hz1r, hz1l);
181   UNPCK_SH_SW(hz2, hz2r, hz2l);
182   UNPCK_SH_SW(hz3, hz3r, hz3l);
183   VP8_IDCT_1D_W(hz0l, hz1l, hz2l, hz3l, vt0l, vt1l, vt2l, vt3l);
184   DUP4_ARG2(__lsx_vsrari_w, vt0l, 3, vt1l, 3, vt2l, 3, vt3l, 3, vt0l, vt1l,
185             vt2l, vt3l);
186   VP8_IDCT_1D_W(hz0r, hz1r, hz2r, hz3r, vt0r, vt1r, vt2r, vt3r);
187   DUP4_ARG2(__lsx_vsrari_w, vt0r, 3, vt1r, 3, vt2r, 3, vt3r, 3, vt0r, vt1r,
188             vt2r, vt3r);
189   DUP4_ARG2(__lsx_vpickev_h, vt0l, vt0r, vt1l, vt1r, vt2l, vt2r, vt3l, vt3r,
190             vt0, vt1, vt2, vt3);
191   TRANSPOSE_TWO_4x4_H(vt0, vt1, vt2, vt3, vt0, vt1, vt2, vt3);
192   dest0 = __lsx_vld(dest, 0);
193   DUP2_ARG2(__lsx_vldx, dest, dest_stride, dest, dest_stride2, dest1, dest2);
194   dest3 = __lsx_vldx(dest, dest_stride3);
195   DUP4_ARG2(__lsx_vilvl_b, zero, dest0, zero, dest1, zero, dest2, zero, dest3,
196             res0, res1, res2, res3);
197   DUP4_ARG2(__lsx_vadd_h, res0, vt0, res1, vt1, res2, vt2, res3, vt3, res0,
198             res1, res2, res3);
199 
200   res0 = __lsx_vclip255_h(res0);
201   res1 = __lsx_vclip255_h(res1);
202   res2 = __lsx_vclip255_h(res2);
203   res3 = __lsx_vclip255_h(res3);
204   DUP2_ARG2(__lsx_vpickev_b, res1, res0, res3, res2, vt0l, vt1l);
205 
206   __lsx_vstelm_d(vt0l, dest, 0, 0);
207   __lsx_vstelm_d(vt0l, dest + dest_stride, 0, 1);
208   __lsx_vstelm_d(vt1l, dest + dest_stride2, 0, 0);
209   __lsx_vstelm_d(vt1l, dest + dest_stride3, 0, 1);
210 
211   __lsx_vst(zero, input, 0);
212   __lsx_vst(zero, input, 16);
213   __lsx_vst(zero, input, 32);
214   __lsx_vst(zero, input, 48);
215 }
216 
dequant_idct_addconst_2x_lsx(int16_t * input,int16_t * dequant_input,uint8_t * dest,int32_t dest_stride)217 static void dequant_idct_addconst_2x_lsx(int16_t *input, int16_t *dequant_input,
218                                          uint8_t *dest, int32_t dest_stride) {
219   __m128i input_dc0, input_dc1, vec, res0, res1, res2, res3;
220   __m128i dest0, dest1, dest2, dest3;
221   __m128i zero = __lsx_vldi(0);
222   int32_t dest_stride2 = dest_stride << 1;
223   int32_t dest_stride3 = dest_stride2 + dest_stride;
224 
225   input_dc0 = __lsx_vreplgr2vr_h(input[0] * dequant_input[0]);
226   input_dc1 = __lsx_vreplgr2vr_h(input[16] * dequant_input[0]);
227   DUP2_ARG2(__lsx_vsrari_h, input_dc0, 3, input_dc1, 3, input_dc0, input_dc1);
228   vec = __lsx_vpickev_d(input_dc1, input_dc0);
229   input[0] = 0;
230   input[16] = 0;
231   dest0 = __lsx_vld(dest, 0);
232   DUP2_ARG2(__lsx_vldx, dest, dest_stride, dest, dest_stride2, dest1, dest2);
233   dest3 = __lsx_vldx(dest, dest_stride3);
234   DUP4_ARG2(__lsx_vilvl_b, zero, dest0, zero, dest1, zero, dest2, zero, dest3,
235             res0, res1, res2, res3);
236   DUP4_ARG2(__lsx_vadd_h, res0, vec, res1, vec, res2, vec, res3, vec, res0,
237             res1, res2, res3);
238   res0 = __lsx_vclip255_h(res0);
239   res1 = __lsx_vclip255_h(res1);
240   res2 = __lsx_vclip255_h(res2);
241   res3 = __lsx_vclip255_h(res3);
242 
243   DUP2_ARG2(__lsx_vpickev_b, res1, res0, res3, res2, res0, res1);
244   __lsx_vstelm_d(res0, dest, 0, 0);
245   __lsx_vstelm_d(res0, dest + dest_stride, 0, 1);
246   __lsx_vstelm_d(res1, dest + dest_stride2, 0, 0);
247   __lsx_vstelm_d(res1, dest + dest_stride3, 0, 1);
248 }
249 
vp8_dequant_idct_add_y_block_lsx(int16_t * q,int16_t * dq,uint8_t * dst,int32_t stride,char * eobs)250 void vp8_dequant_idct_add_y_block_lsx(int16_t *q, int16_t *dq, uint8_t *dst,
251                                       int32_t stride, char *eobs) {
252   int16_t *eobs_h = (int16_t *)eobs;
253   uint8_t i;
254 
255   for (i = 4; i--;) {
256     if (eobs_h[0]) {
257       if (eobs_h[0] & 0xfefe) {
258         dequant_idct4x4_addblk_2x_lsx(q, dq, dst, stride);
259       } else {
260         dequant_idct_addconst_2x_lsx(q, dq, dst, stride);
261       }
262     }
263 
264     q += 32;
265 
266     if (eobs_h[1]) {
267       if (eobs_h[1] & 0xfefe) {
268         dequant_idct4x4_addblk_2x_lsx(q, dq, dst + 8, stride);
269       } else {
270         dequant_idct_addconst_2x_lsx(q, dq, dst + 8, stride);
271       }
272     }
273 
274     q += 32;
275     dst += (4 * stride);
276     eobs_h += 2;
277   }
278 }
279 
vp8_dequant_idct_add_uv_block_lsx(int16_t * q,int16_t * dq,uint8_t * dst_u,uint8_t * dst_v,int32_t stride,char * eobs)280 void vp8_dequant_idct_add_uv_block_lsx(int16_t *q, int16_t *dq, uint8_t *dst_u,
281                                        uint8_t *dst_v, int32_t stride,
282                                        char *eobs) {
283   int16_t *eobs_h = (int16_t *)eobs;
284   if (eobs_h[0]) {
285     if (eobs_h[0] & 0xfefe) {
286       dequant_idct4x4_addblk_2x_lsx(q, dq, dst_u, stride);
287     } else {
288       dequant_idct_addconst_2x_lsx(q, dq, dst_u, stride);
289     }
290   }
291 
292   q += 32;
293   dst_u += (stride * 4);
294 
295   if (eobs_h[1]) {
296     if (eobs_h[1] & 0xfefe) {
297       dequant_idct4x4_addblk_2x_lsx(q, dq, dst_u, stride);
298     } else {
299       dequant_idct_addconst_2x_lsx(q, dq, dst_u, stride);
300     }
301   }
302 
303   q += 32;
304 
305   if (eobs_h[2]) {
306     if (eobs_h[2] & 0xfefe) {
307       dequant_idct4x4_addblk_2x_lsx(q, dq, dst_v, stride);
308     } else {
309       dequant_idct_addconst_2x_lsx(q, dq, dst_v, stride);
310     }
311   }
312   q += 32;
313   dst_v += (stride * 4);
314 
315   if (eobs_h[3]) {
316     if (eobs_h[3] & 0xfefe) {
317       dequant_idct4x4_addblk_2x_lsx(q, dq, dst_v, stride);
318     } else {
319       dequant_idct_addconst_2x_lsx(q, dq, dst_v, stride);
320     }
321   }
322 }
323