1 /*
2 * Copyright (c) 2022 The WebM project authors. All Rights Reserved.
3 *
4 * Use of this source code is governed by a BSD-style license
5 * that can be found in the LICENSE file in the root of the source
6 * tree. An additional intellectual property rights grant can be found
7 * in the file PATENTS. All contributing project authors may
8 * be found in the AUTHORS file in the root of the source tree.
9 */
10
11 #include <stdlib.h>
12
13 #include "./vpx_dsp_rtcd.h"
14 #include "vpx_dsp/loongarch/bitdepth_conversion_lsx.h"
15
vpx_hadamard_8x8_lsx(const int16_t * src,ptrdiff_t src_stride,tran_low_t * dst)16 void vpx_hadamard_8x8_lsx(const int16_t *src, ptrdiff_t src_stride,
17 tran_low_t *dst) {
18 __m128i src0, src1, src2, src3, src4, src5, src6, src7;
19 __m128i tmp0, tmp1, tmp2, tmp3, tmp4, tmp5, tmp6, tmp7;
20 ptrdiff_t src_stride2 = src_stride << 1;
21 ptrdiff_t src_stride3 = src_stride2 + src_stride;
22 ptrdiff_t src_stride4 = src_stride2 << 1;
23 ptrdiff_t src_stride6 = src_stride3 << 1;
24
25 int16_t *src_tmp = (int16_t *)src;
26 src0 = __lsx_vld(src_tmp, 0);
27 DUP2_ARG2(__lsx_vldx, src_tmp, src_stride2, src_tmp, src_stride4, src1, src2);
28 src3 = __lsx_vldx(src_tmp, src_stride6);
29 src_tmp += src_stride4;
30 src4 = __lsx_vld(src_tmp, 0);
31 DUP2_ARG2(__lsx_vldx, src_tmp, src_stride2, src_tmp, src_stride4, src5, src6);
32 src7 = __lsx_vldx(src_tmp, src_stride6);
33
34 LSX_BUTTERFLY_8_H(src0, src2, src4, src6, src7, src5, src3, src1, tmp0, tmp2,
35 tmp4, tmp6, tmp7, tmp5, tmp3, tmp1);
36 LSX_BUTTERFLY_8_H(tmp0, tmp1, tmp4, tmp5, tmp7, tmp6, tmp3, tmp2, src0, src1,
37 src4, src5, src7, src6, src3, src2);
38 LSX_BUTTERFLY_8_H(src0, src1, src2, src3, src7, src6, src5, src4, tmp0, tmp7,
39 tmp3, tmp4, tmp5, tmp1, tmp6, tmp2);
40 LSX_TRANSPOSE8x8_H(tmp0, tmp1, tmp2, tmp3, tmp4, tmp5, tmp6, tmp7, src0, src1,
41 src2, src3, src4, src5, src6, src7);
42 LSX_BUTTERFLY_8_H(src0, src2, src4, src6, src7, src5, src3, src1, tmp0, tmp2,
43 tmp4, tmp6, tmp7, tmp5, tmp3, tmp1);
44 LSX_BUTTERFLY_8_H(tmp0, tmp1, tmp4, tmp5, tmp7, tmp6, tmp3, tmp2, src0, src1,
45 src4, src5, src7, src6, src3, src2);
46 LSX_BUTTERFLY_8_H(src0, src1, src2, src3, src7, src6, src5, src4, tmp0, tmp7,
47 tmp3, tmp4, tmp5, tmp1, tmp6, tmp2);
48 store_tran_low(tmp0, dst, 0);
49 store_tran_low(tmp1, dst, 8);
50 store_tran_low(tmp2, dst, 16);
51 store_tran_low(tmp3, dst, 24);
52 store_tran_low(tmp4, dst, 32);
53 store_tran_low(tmp5, dst, 40);
54 store_tran_low(tmp6, dst, 48);
55 store_tran_low(tmp7, dst, 56);
56 }
57
vpx_hadamard_16x16_lsx(const int16_t * src,ptrdiff_t src_stride,tran_low_t * dst)58 void vpx_hadamard_16x16_lsx(const int16_t *src, ptrdiff_t src_stride,
59 tran_low_t *dst) {
60 int i;
61 __m128i a0, a1, a2, a3, b0, b1, b2, b3;
62
63 /* Rearrange 16x16 to 8x32 and remove stride.
64 * Top left first. */
65 vpx_hadamard_8x8_lsx(src + 0 + 0 * src_stride, src_stride, dst + 0);
66 /* Top right. */
67 vpx_hadamard_8x8_lsx(src + 8 + 0 * src_stride, src_stride, dst + 64);
68 /* Bottom left. */
69 vpx_hadamard_8x8_lsx(src + 0 + 8 * src_stride, src_stride, dst + 128);
70 /* Bottom right. */
71 vpx_hadamard_8x8_lsx(src + 8 + 8 * src_stride, src_stride, dst + 192);
72
73 for (i = 0; i < 64; i += 8) {
74 a0 = load_tran_low(dst);
75 a1 = load_tran_low(dst + 64);
76 a2 = load_tran_low(dst + 128);
77 a3 = load_tran_low(dst + 192);
78
79 LSX_BUTTERFLY_4_H(a0, a2, a3, a1, b0, b2, b3, b1);
80 DUP4_ARG2(__lsx_vsrai_h, b0, 1, b1, 1, b2, 1, b3, 1, b0, b1, b2, b3);
81 LSX_BUTTERFLY_4_H(b0, b1, b3, b2, a0, a1, a3, a2);
82
83 store_tran_low(a0, dst, 0);
84 store_tran_low(a1, dst, 64);
85 store_tran_low(a2, dst, 128);
86 store_tran_low(a3, dst, 192);
87
88 dst += 8;
89 }
90 }
91