xref: /aosp_15_r20/external/libvpx/vpx_dsp/loongarch/avg_pred_lsx.c (revision fb1b10ab9aebc7c7068eedab379b749d7e3900be)
1 /*
2  *  Copyright (c) 2022 The WebM project authors. All Rights Reserved.
3  *
4  *  Use of this source code is governed by a BSD-style license
5  *  that can be found in the LICENSE file in the root of the source
6  *  tree. An additional intellectual property rights grant can be found
7  *  in the file PATENTS.  All contributing project authors may
8  *  be found in the AUTHORS file in the root of the source tree.
9  */
10 
11 #include <assert.h>
12 #include "./vpx_dsp_rtcd.h"
13 #include "vpx/vpx_integer.h"
14 #include "vpx_util/loongson_intrinsics.h"
15 
vpx_comp_avg_pred_lsx(uint8_t * comp_pred,const uint8_t * pred,int width,int height,const uint8_t * ref,int ref_stride)16 void vpx_comp_avg_pred_lsx(uint8_t *comp_pred, const uint8_t *pred, int width,
17                            int height, const uint8_t *ref, int ref_stride) {
18   // width > 8 || width == 8 || width == 4
19   if (width > 8) {
20     int i, j;
21     for (i = 0; i < height; ++i) {
22       for (j = 0; j < width; j += 16) {
23         __m128i p, r, avg;
24 
25         p = __lsx_vld(pred + j, 0);
26         r = __lsx_vld(ref + j, 0);
27         avg = __lsx_vavgr_bu(p, r);
28         __lsx_vst(avg, comp_pred + j, 0);
29       }
30       comp_pred += width;
31       pred += width;
32       ref += ref_stride;
33     }
34   } else if (width == 8) {
35     int i = height * width;
36     do {
37       __m128i p, r, r_0, r_1;
38 
39       p = __lsx_vld(pred, 0);
40       r_0 = __lsx_vld(ref, 0);
41       ref += ref_stride;
42       r_1 = __lsx_vld(ref, 0);
43       ref += ref_stride;
44       r = __lsx_vilvl_d(r_1, r_0);
45       r = __lsx_vavgr_bu(p, r);
46 
47       __lsx_vst(r, comp_pred, 0);
48 
49       pred += 16;
50       comp_pred += 16;
51       i -= 16;
52     } while (i);
53   } else {  // width = 4
54     int i = height * width;
55     assert(width == 4);
56     do {
57       __m128i p, r, r_0, r_1, r_2, r_3;
58       p = __lsx_vld(pred, 0);
59 
60       if (width == ref_stride) {
61         r = __lsx_vld(ref, 0);
62         ref += 16;
63       } else {
64         r_0 = __lsx_vld(ref, 0);
65         ref += ref_stride;
66         r_1 = __lsx_vld(ref, 0);
67         ref += ref_stride;
68         r_2 = __lsx_vld(ref, 0);
69         ref += ref_stride;
70         r_3 = __lsx_vld(ref, 0);
71         ref += ref_stride;
72         DUP2_ARG2(__lsx_vilvl_w, r_1, r_0, r_3, r_2, r_0, r_2);
73         r = __lsx_vilvl_d(r_2, r_0);
74       }
75       r = __lsx_vavgr_bu(p, r);
76 
77       __lsx_vst(r, comp_pred, 0);
78       comp_pred += 16;
79       pred += 16;
80       i -= 16;
81     } while (i);
82   }
83 }
84