xref: /aosp_15_r20/external/libaom/aom_dsp/x86/obmc_variance_avx2.c (revision 77c1e3ccc04c968bd2bc212e87364f250e820521)
1 /*
2  * Copyright (c) 2018, Alliance for Open Media. All rights reserved.
3  *
4  * This source code is subject to the terms of the BSD 2 Clause License and
5  * the Alliance for Open Media Patent License 1.0. If the BSD 2 Clause License
6  * was not distributed with this source code in the LICENSE file, you can
7  * obtain it at www.aomedia.org/license/software. If the Alliance for Open
8  * Media Patent License 1.0 was not distributed with this source code in the
9  * PATENTS file, you can obtain it at www.aomedia.org/license/patent.
10  */
11 
12 #include <assert.h>
13 #include <immintrin.h>
14 
15 #include "config/aom_config.h"
16 #include "config/aom_dsp_rtcd.h"
17 
18 #include "aom_ports/mem.h"
19 #include "aom/aom_integer.h"
20 
21 #include "aom_dsp/aom_dsp_common.h"
22 #include "aom_dsp/aom_filter.h"
23 #include "aom_dsp/x86/obmc_intrinsic_sse4.h"
24 
25 ////////////////////////////////////////////////////////////////////////////////
26 // 8 bit
27 ////////////////////////////////////////////////////////////////////////////////
28 
obmc_variance_w8n(const uint8_t * pre,const int pre_stride,const int32_t * wsrc,const int32_t * mask,unsigned int * const sse,int * const sum,const int w,const int h)29 static inline void obmc_variance_w8n(const uint8_t *pre, const int pre_stride,
30                                      const int32_t *wsrc, const int32_t *mask,
31                                      unsigned int *const sse, int *const sum,
32                                      const int w, const int h) {
33   int n = 0, width, height = h;
34   __m128i v_sum_d = _mm_setzero_si128();
35   __m128i v_sse_d = _mm_setzero_si128();
36   const __m256i v_bias_d = _mm256_set1_epi32((1 << 12) >> 1);
37   __m128i v_d;
38   const uint8_t *pre_temp;
39   assert(w >= 8);
40   assert(IS_POWER_OF_TWO(w));
41   assert(IS_POWER_OF_TWO(h));
42   do {
43     width = w;
44     pre_temp = pre;
45     do {
46       const __m128i v_p_b = _mm_loadl_epi64((const __m128i *)pre_temp);
47       const __m256i v_m_d = _mm256_loadu_si256((__m256i const *)(mask + n));
48       const __m256i v_w_d = _mm256_loadu_si256((__m256i const *)(wsrc + n));
49       const __m256i v_p0_d = _mm256_cvtepu8_epi32(v_p_b);
50 
51       // Values in both pre and mask fit in 15 bits, and are packed at 32 bit
52       // boundaries. We use pmaddwd, as it has lower latency on Haswell
53       // than pmulld but produces the same result with these inputs.
54       const __m256i v_pm_d = _mm256_madd_epi16(v_p0_d, v_m_d);
55       const __m256i v_diff0_d = _mm256_sub_epi32(v_w_d, v_pm_d);
56 
57       const __m256i v_sign_d = _mm256_srai_epi32(v_diff0_d, 31);
58       const __m256i v_tmp_d =
59           _mm256_add_epi32(_mm256_add_epi32(v_diff0_d, v_bias_d), v_sign_d);
60       const __m256i v_rdiff0_d = _mm256_srai_epi32(v_tmp_d, 12);
61       const __m128i v_rdiff_d = _mm256_castsi256_si128(v_rdiff0_d);
62       const __m128i v_rdiff1_d = _mm256_extracti128_si256(v_rdiff0_d, 1);
63 
64       const __m128i v_rdiff01_w = _mm_packs_epi32(v_rdiff_d, v_rdiff1_d);
65       const __m128i v_sqrdiff_d = _mm_madd_epi16(v_rdiff01_w, v_rdiff01_w);
66 
67       v_sum_d = _mm_add_epi32(v_sum_d, v_rdiff_d);
68       v_sum_d = _mm_add_epi32(v_sum_d, v_rdiff1_d);
69       v_sse_d = _mm_add_epi32(v_sse_d, v_sqrdiff_d);
70 
71       pre_temp += 8;
72       n += 8;
73       width -= 8;
74     } while (width > 0);
75     pre += pre_stride;
76     height -= 1;
77   } while (height > 0);
78   v_d = _mm_hadd_epi32(v_sum_d, v_sse_d);
79   v_d = _mm_hadd_epi32(v_d, v_d);
80   *sum = _mm_cvtsi128_si32(v_d);
81   *sse = (unsigned int)_mm_cvtsi128_si32(_mm_srli_si128(v_d, 4));
82 }
83 
obmc_variance_w16n(const uint8_t * pre,const int pre_stride,const int32_t * wsrc,const int32_t * mask,unsigned int * const sse,int * const sum,const int w,const int h)84 static inline void obmc_variance_w16n(const uint8_t *pre, const int pre_stride,
85                                       const int32_t *wsrc, const int32_t *mask,
86                                       unsigned int *const sse, int *const sum,
87                                       const int w, const int h) {
88   int n = 0, width, height = h;
89   __m256i v_d;
90   __m128i res0;
91   const uint8_t *pre_temp;
92   const __m256i v_bias_d = _mm256_set1_epi32((1 << 12) >> 1);
93   __m256i v_sum_d = _mm256_setzero_si256();
94   __m256i v_sse_d = _mm256_setzero_si256();
95 
96   assert(w >= 16);
97   assert(IS_POWER_OF_TWO(w));
98   assert(IS_POWER_OF_TWO(h));
99   do {
100     width = w;
101     pre_temp = pre;
102     do {
103       const __m128i v_p_b = _mm_loadu_si128((__m128i *)pre_temp);
104       const __m256i v_m0_d = _mm256_loadu_si256((__m256i const *)(mask + n));
105       const __m256i v_w0_d = _mm256_loadu_si256((__m256i const *)(wsrc + n));
106       const __m256i v_m1_d =
107           _mm256_loadu_si256((__m256i const *)(mask + n + 8));
108       const __m256i v_w1_d =
109           _mm256_loadu_si256((__m256i const *)(wsrc + n + 8));
110 
111       const __m256i v_p0_d = _mm256_cvtepu8_epi32(v_p_b);
112       const __m256i v_p1_d = _mm256_cvtepu8_epi32(_mm_srli_si128(v_p_b, 8));
113 
114       const __m256i v_pm0_d = _mm256_madd_epi16(v_p0_d, v_m0_d);
115       const __m256i v_pm1_d = _mm256_madd_epi16(v_p1_d, v_m1_d);
116 
117       const __m256i v_diff0_d = _mm256_sub_epi32(v_w0_d, v_pm0_d);
118       const __m256i v_diff1_d = _mm256_sub_epi32(v_w1_d, v_pm1_d);
119 
120       const __m256i v_sign0_d = _mm256_srai_epi32(v_diff0_d, 31);
121       const __m256i v_sign1_d = _mm256_srai_epi32(v_diff1_d, 31);
122 
123       const __m256i v_tmp0_d =
124           _mm256_add_epi32(_mm256_add_epi32(v_diff0_d, v_bias_d), v_sign0_d);
125       const __m256i v_tmp1_d =
126           _mm256_add_epi32(_mm256_add_epi32(v_diff1_d, v_bias_d), v_sign1_d);
127 
128       const __m256i v_rdiff0_d = _mm256_srai_epi32(v_tmp0_d, 12);
129       const __m256i v_rdiff2_d = _mm256_srai_epi32(v_tmp1_d, 12);
130 
131       const __m256i v_rdiff1_d = _mm256_add_epi32(v_rdiff0_d, v_rdiff2_d);
132       const __m256i v_rdiff01_w = _mm256_packs_epi32(v_rdiff0_d, v_rdiff2_d);
133       const __m256i v_sqrdiff_d = _mm256_madd_epi16(v_rdiff01_w, v_rdiff01_w);
134 
135       v_sum_d = _mm256_add_epi32(v_sum_d, v_rdiff1_d);
136       v_sse_d = _mm256_add_epi32(v_sse_d, v_sqrdiff_d);
137 
138       pre_temp += 16;
139       n += 16;
140       width -= 16;
141     } while (width > 0);
142     pre += pre_stride;
143     height -= 1;
144   } while (height > 0);
145 
146   v_d = _mm256_hadd_epi32(v_sum_d, v_sse_d);
147   v_d = _mm256_hadd_epi32(v_d, v_d);
148   res0 = _mm256_castsi256_si128(v_d);
149   res0 = _mm_add_epi32(res0, _mm256_extractf128_si256(v_d, 1));
150   *sum = _mm_cvtsi128_si32(res0);
151   *sse = (unsigned int)_mm_cvtsi128_si32(_mm_srli_si128(res0, 4));
152 }
153 
154 #define OBMCVARWXH(W, H)                                                \
155   unsigned int aom_obmc_variance##W##x##H##_avx2(                       \
156       const uint8_t *pre, int pre_stride, const int32_t *wsrc,          \
157       const int32_t *mask, unsigned int *sse) {                         \
158     int sum;                                                            \
159     if (W == 4) {                                                       \
160       obmc_variance_w4(pre, pre_stride, wsrc, mask, sse, &sum, H);      \
161     } else if (W == 8) {                                                \
162       obmc_variance_w8n(pre, pre_stride, wsrc, mask, sse, &sum, W, H);  \
163     } else {                                                            \
164       obmc_variance_w16n(pre, pre_stride, wsrc, mask, sse, &sum, W, H); \
165     }                                                                   \
166                                                                         \
167     return *sse - (unsigned int)(((int64_t)sum * sum) / (W * H));       \
168   }
169 
170 OBMCVARWXH(128, 128)
171 OBMCVARWXH(128, 64)
172 OBMCVARWXH(64, 128)
173 OBMCVARWXH(64, 64)
174 OBMCVARWXH(64, 32)
175 OBMCVARWXH(32, 64)
176 OBMCVARWXH(32, 32)
177 OBMCVARWXH(32, 16)
178 OBMCVARWXH(16, 32)
179 OBMCVARWXH(16, 16)
180 OBMCVARWXH(16, 8)
181 OBMCVARWXH(8, 16)
182 OBMCVARWXH(8, 8)
183 OBMCVARWXH(8, 4)
184 OBMCVARWXH(4, 8)
185 OBMCVARWXH(4, 4)
186 OBMCVARWXH(4, 16)
187 OBMCVARWXH(16, 4)
188 OBMCVARWXH(8, 32)
189 OBMCVARWXH(32, 8)
190 OBMCVARWXH(16, 64)
191 OBMCVARWXH(64, 16)
192