1 /*
2 * Copyright (c) 2018, Alliance for Open Media. All rights reserved.
3 *
4 * This source code is subject to the terms of the BSD 2 Clause License and
5 * the Alliance for Open Media Patent License 1.0. If the BSD 2 Clause License
6 * was not distributed with this source code in the LICENSE file, you can
7 * obtain it at www.aomedia.org/license/software. If the Alliance for Open
8 * Media Patent License 1.0 was not distributed with this source code in the
9 * PATENTS file, you can obtain it at www.aomedia.org/license/patent.
10 */
11
12 #include <assert.h>
13 #include <smmintrin.h>
14 #include "aom_dsp/x86/mem_sse2.h"
15 #include "aom_dsp/x86/synonyms.h"
16
17 #include "config/av1_rtcd.h"
18 #include "av1/common/restoration.h"
19 #include "av1/encoder/pickrst.h"
20
acc_stat_sse41(int32_t * dst,const uint8_t * src,const __m128i * shuffle,const __m128i * kl)21 static inline void acc_stat_sse41(int32_t *dst, const uint8_t *src,
22 const __m128i *shuffle, const __m128i *kl) {
23 const __m128i s = _mm_shuffle_epi8(xx_loadu_128(src), *shuffle);
24 const __m128i d0 = _mm_madd_epi16(*kl, _mm_cvtepu8_epi16(s));
25 const __m128i d1 =
26 _mm_madd_epi16(*kl, _mm_cvtepu8_epi16(_mm_srli_si128(s, 8)));
27 const __m128i dst0 = xx_loadu_128(dst);
28 const __m128i dst1 = xx_loadu_128(dst + 4);
29 const __m128i r0 = _mm_add_epi32(dst0, d0);
30 const __m128i r1 = _mm_add_epi32(dst1, d1);
31 xx_storeu_128(dst, r0);
32 xx_storeu_128(dst + 4, r1);
33 }
34
acc_stat_win7_one_line_sse4_1(const uint8_t * dgd,const uint8_t * src,int h_start,int h_end,int dgd_stride,const __m128i * shuffle,int32_t * sumX,int32_t sumY[WIENER_WIN][WIENER_WIN],int32_t M_int[WIENER_WIN][WIENER_WIN],int32_t H_int[WIENER_WIN2][WIENER_WIN * 8])35 static inline void acc_stat_win7_one_line_sse4_1(
36 const uint8_t *dgd, const uint8_t *src, int h_start, int h_end,
37 int dgd_stride, const __m128i *shuffle, int32_t *sumX,
38 int32_t sumY[WIENER_WIN][WIENER_WIN], int32_t M_int[WIENER_WIN][WIENER_WIN],
39 int32_t H_int[WIENER_WIN2][WIENER_WIN * 8]) {
40 const int wiener_win = 7;
41 int j, k, l;
42 // Main loop handles two pixels at a time
43 // We can assume that h_start is even, since it will always be aligned to
44 // a tile edge + some number of restoration units, and both of those will
45 // be 64-pixel aligned.
46 // However, at the edge of the image, h_end may be odd, so we need to handle
47 // that case correctly.
48 assert(h_start % 2 == 0);
49 const int h_end_even = h_end & ~1;
50 const int has_odd_pixel = h_end & 1;
51 for (j = h_start; j < h_end_even; j += 2) {
52 const uint8_t *dgd_ij = dgd + j;
53 const uint8_t X1 = src[j];
54 const uint8_t X2 = src[j + 1];
55 *sumX += X1 + X2;
56 for (k = 0; k < wiener_win; k++) {
57 const uint8_t *dgd_ijk = dgd_ij + k * dgd_stride;
58 for (l = 0; l < wiener_win; l++) {
59 int32_t *H_ = &H_int[(l * wiener_win + k)][0];
60 const uint8_t D1 = dgd_ijk[l];
61 const uint8_t D2 = dgd_ijk[l + 1];
62 sumY[k][l] += D1 + D2;
63 M_int[k][l] += D1 * X1 + D2 * X2;
64
65 const __m128i kl =
66 _mm_cvtepu8_epi16(_mm_set1_epi16(loadu_int16(dgd_ijk + l)));
67 acc_stat_sse41(H_ + 0 * 8, dgd_ij + 0 * dgd_stride, shuffle, &kl);
68 acc_stat_sse41(H_ + 1 * 8, dgd_ij + 1 * dgd_stride, shuffle, &kl);
69 acc_stat_sse41(H_ + 2 * 8, dgd_ij + 2 * dgd_stride, shuffle, &kl);
70 acc_stat_sse41(H_ + 3 * 8, dgd_ij + 3 * dgd_stride, shuffle, &kl);
71 acc_stat_sse41(H_ + 4 * 8, dgd_ij + 4 * dgd_stride, shuffle, &kl);
72 acc_stat_sse41(H_ + 5 * 8, dgd_ij + 5 * dgd_stride, shuffle, &kl);
73 acc_stat_sse41(H_ + 6 * 8, dgd_ij + 6 * dgd_stride, shuffle, &kl);
74 }
75 }
76 }
77 // If the width is odd, add in the final pixel
78 if (has_odd_pixel) {
79 const uint8_t *dgd_ij = dgd + j;
80 const uint8_t X1 = src[j];
81 *sumX += X1;
82 for (k = 0; k < wiener_win; k++) {
83 const uint8_t *dgd_ijk = dgd_ij + k * dgd_stride;
84 for (l = 0; l < wiener_win; l++) {
85 int32_t *H_ = &H_int[(l * wiener_win + k)][0];
86 const uint8_t D1 = dgd_ijk[l];
87 sumY[k][l] += D1;
88 M_int[k][l] += D1 * X1;
89
90 // The `acc_stat_sse41` function wants its input to have interleaved
91 // copies of two pixels, but we only have one. However, the pixels
92 // are (effectively) used as inputs to a multiply-accumulate.
93 // So if we set the extra pixel slot to 0, then it is effectively
94 // ignored.
95 const __m128i kl = _mm_cvtepu8_epi16(_mm_set1_epi16((int16_t)D1));
96 acc_stat_sse41(H_ + 0 * 8, dgd_ij + 0 * dgd_stride, shuffle, &kl);
97 acc_stat_sse41(H_ + 1 * 8, dgd_ij + 1 * dgd_stride, shuffle, &kl);
98 acc_stat_sse41(H_ + 2 * 8, dgd_ij + 2 * dgd_stride, shuffle, &kl);
99 acc_stat_sse41(H_ + 3 * 8, dgd_ij + 3 * dgd_stride, shuffle, &kl);
100 acc_stat_sse41(H_ + 4 * 8, dgd_ij + 4 * dgd_stride, shuffle, &kl);
101 acc_stat_sse41(H_ + 5 * 8, dgd_ij + 5 * dgd_stride, shuffle, &kl);
102 acc_stat_sse41(H_ + 6 * 8, dgd_ij + 6 * dgd_stride, shuffle, &kl);
103 }
104 }
105 }
106 }
107
compute_stats_win7_opt_sse4_1(const uint8_t * dgd,const uint8_t * src,int h_start,int h_end,int v_start,int v_end,int dgd_stride,int src_stride,int64_t * M,int64_t * H,int use_downsampled_wiener_stats)108 static inline void compute_stats_win7_opt_sse4_1(
109 const uint8_t *dgd, const uint8_t *src, int h_start, int h_end, int v_start,
110 int v_end, int dgd_stride, int src_stride, int64_t *M, int64_t *H,
111 int use_downsampled_wiener_stats) {
112 int i, j, k, l, m, n;
113 const int wiener_win = WIENER_WIN;
114 const int pixel_count = (h_end - h_start) * (v_end - v_start);
115 const int wiener_win2 = wiener_win * wiener_win;
116 const int wiener_halfwin = (wiener_win >> 1);
117 const uint8_t avg =
118 find_average(dgd, h_start, h_end, v_start, v_end, dgd_stride);
119
120 int32_t M_int32[WIENER_WIN][WIENER_WIN] = { { 0 } };
121 int32_t M_int32_row[WIENER_WIN][WIENER_WIN] = { { 0 } };
122 int64_t M_int64[WIENER_WIN][WIENER_WIN] = { { 0 } };
123 int32_t H_int32[WIENER_WIN2][WIENER_WIN * 8] = { { 0 } };
124 int32_t H_int32_row[WIENER_WIN2][WIENER_WIN * 8] = { { 0 } };
125 int64_t H_int64[WIENER_WIN2][WIENER_WIN * 8] = { { 0 } };
126 int32_t sumY[WIENER_WIN][WIENER_WIN] = { { 0 } };
127 int32_t sumX = 0;
128 const uint8_t *dgd_win = dgd - wiener_halfwin * dgd_stride - wiener_halfwin;
129 int downsample_factor =
130 use_downsampled_wiener_stats ? WIENER_STATS_DOWNSAMPLE_FACTOR : 1;
131 int32_t sumX_row = 0;
132 int32_t sumY_row[WIENER_WIN][WIENER_WIN] = { { 0 } };
133
134 const __m128i shuffle = xx_loadu_128(g_shuffle_stats_data);
135 for (j = v_start; j < v_end; j += 64) {
136 const int vert_end = AOMMIN(64, v_end - j) + j;
137 for (i = j; i < vert_end; i = i + downsample_factor) {
138 if (use_downsampled_wiener_stats &&
139 (vert_end - i < WIENER_STATS_DOWNSAMPLE_FACTOR)) {
140 downsample_factor = vert_end - i;
141 }
142 sumX_row = 0;
143 memset(sumY_row, 0, sizeof(int32_t) * WIENER_WIN * WIENER_WIN);
144 memset(M_int32_row, 0, sizeof(int32_t) * WIENER_WIN * WIENER_WIN);
145 memset(H_int32_row, 0, sizeof(int32_t) * WIENER_WIN2 * (WIENER_WIN * 8));
146 acc_stat_win7_one_line_sse4_1(
147 dgd_win + i * dgd_stride, src + i * src_stride, h_start, h_end,
148 dgd_stride, &shuffle, &sumX_row, sumY_row, M_int32_row, H_int32_row);
149 sumX += sumX_row * downsample_factor;
150 // Scale M matrix based on the downsampling factor
151 for (k = 0; k < wiener_win; ++k) {
152 for (l = 0; l < wiener_win; ++l) {
153 sumY[k][l] += (sumY_row[k][l] * downsample_factor);
154 M_int32[k][l] += (M_int32_row[k][l] * downsample_factor);
155 }
156 }
157 // Scale H matrix based on the downsampling factor
158 for (k = 0; k < WIENER_WIN2; ++k) {
159 for (l = 0; l < WIENER_WIN * 8; ++l) {
160 H_int32[k][l] += (H_int32_row[k][l] * downsample_factor);
161 }
162 }
163 }
164 for (k = 0; k < wiener_win; ++k) {
165 for (l = 0; l < wiener_win; ++l) {
166 M_int64[k][l] += M_int32[k][l];
167 M_int32[k][l] = 0;
168 }
169 }
170 for (k = 0; k < WIENER_WIN2; ++k) {
171 for (l = 0; l < WIENER_WIN * 8; ++l) {
172 H_int64[k][l] += H_int32[k][l];
173 H_int32[k][l] = 0;
174 }
175 }
176 }
177
178 const int64_t avg_square_sum = (int64_t)avg * (int64_t)avg * pixel_count;
179 for (k = 0; k < wiener_win; k++) {
180 for (l = 0; l < wiener_win; l++) {
181 const int32_t idx0 = l * wiener_win + k;
182 M[idx0] =
183 M_int64[k][l] + (avg_square_sum - (int64_t)avg * (sumX + sumY[k][l]));
184 int64_t *H_ = H + idx0 * wiener_win2;
185 int64_t *H_int_ = &H_int64[idx0][0];
186 for (m = 0; m < wiener_win; m++) {
187 for (n = 0; n < wiener_win; n++) {
188 H_[m * wiener_win + n] = H_int_[n * 8 + m] + avg_square_sum -
189 (int64_t)avg * (sumY[k][l] + sumY[n][m]);
190 }
191 }
192 }
193 }
194 }
195
196 #if CONFIG_AV1_HIGHBITDEPTH
acc_stat_highbd_sse41(int64_t * dst,const uint16_t * dgd,const __m128i * shuffle,const __m128i * dgd_ijkl)197 static inline void acc_stat_highbd_sse41(int64_t *dst, const uint16_t *dgd,
198 const __m128i *shuffle,
199 const __m128i *dgd_ijkl) {
200 // Load 256 bits from dgd in two chunks
201 const __m128i s0l = xx_loadu_128(dgd);
202 const __m128i s0h = xx_loadu_128(dgd + 4);
203 // s0l = [7 6 5 4 3 2 1 0] as u16 values (dgd indices)
204 // s0h = [11 10 9 8 7 6 5 4] as u16 values (dgd indices)
205 // (Slightly strange order so we can apply the same shuffle to both halves)
206
207 // Shuffle the u16 values in each half (actually using 8-bit shuffle mask)
208 const __m128i s1l = _mm_shuffle_epi8(s0l, *shuffle);
209 const __m128i s1h = _mm_shuffle_epi8(s0h, *shuffle);
210 // s1l = [4 3 3 2 2 1 1 0] as u16 values (dgd indices)
211 // s1h = [8 7 7 6 6 5 5 4] as u16 values (dgd indices)
212
213 // Multiply s1 by dgd_ijkl resulting in 8x u32 values
214 // Horizontally add pairs of u32 resulting in 4x u32
215 const __m128i dl = _mm_madd_epi16(*dgd_ijkl, s1l);
216 const __m128i dh = _mm_madd_epi16(*dgd_ijkl, s1h);
217 // dl = [d c b a] as u32 values
218 // dh = [h g f e] as u32 values
219
220 // Add these 8x u32 results on to dst in four parts
221 const __m128i dll = _mm_cvtepu32_epi64(dl);
222 const __m128i dlh = _mm_cvtepu32_epi64(_mm_srli_si128(dl, 8));
223 const __m128i dhl = _mm_cvtepu32_epi64(dh);
224 const __m128i dhh = _mm_cvtepu32_epi64(_mm_srli_si128(dh, 8));
225 // dll = [b a] as u64 values, etc.
226
227 const __m128i rll = _mm_add_epi64(xx_loadu_128(dst), dll);
228 xx_storeu_128(dst, rll);
229 const __m128i rlh = _mm_add_epi64(xx_loadu_128(dst + 2), dlh);
230 xx_storeu_128(dst + 2, rlh);
231 const __m128i rhl = _mm_add_epi64(xx_loadu_128(dst + 4), dhl);
232 xx_storeu_128(dst + 4, rhl);
233 const __m128i rhh = _mm_add_epi64(xx_loadu_128(dst + 6), dhh);
234 xx_storeu_128(dst + 6, rhh);
235 }
236
acc_stat_highbd_win7_one_line_sse4_1(const uint16_t * dgd,const uint16_t * src,int h_start,int h_end,int dgd_stride,const __m128i * shuffle,int32_t * sumX,int32_t sumY[WIENER_WIN][WIENER_WIN],int64_t M_int[WIENER_WIN][WIENER_WIN],int64_t H_int[WIENER_WIN2][WIENER_WIN * 8])237 static inline void acc_stat_highbd_win7_one_line_sse4_1(
238 const uint16_t *dgd, const uint16_t *src, int h_start, int h_end,
239 int dgd_stride, const __m128i *shuffle, int32_t *sumX,
240 int32_t sumY[WIENER_WIN][WIENER_WIN], int64_t M_int[WIENER_WIN][WIENER_WIN],
241 int64_t H_int[WIENER_WIN2][WIENER_WIN * 8]) {
242 int j, k, l;
243 const int wiener_win = WIENER_WIN;
244 // Main loop handles two pixels at a time
245 // We can assume that h_start is even, since it will always be aligned to
246 // a tile edge + some number of restoration units, and both of those will
247 // be 64-pixel aligned.
248 // However, at the edge of the image, h_end may be odd, so we need to handle
249 // that case correctly.
250 assert(h_start % 2 == 0);
251 const int h_end_even = h_end & ~1;
252 const int has_odd_pixel = h_end & 1;
253 for (j = h_start; j < h_end_even; j += 2) {
254 const uint16_t X1 = src[j];
255 const uint16_t X2 = src[j + 1];
256 *sumX += X1 + X2;
257 const uint16_t *dgd_ij = dgd + j;
258 for (k = 0; k < wiener_win; k++) {
259 const uint16_t *dgd_ijk = dgd_ij + k * dgd_stride;
260 for (l = 0; l < wiener_win; l++) {
261 int64_t *H_ = &H_int[(l * wiener_win + k)][0];
262 const uint16_t D1 = dgd_ijk[l];
263 const uint16_t D2 = dgd_ijk[l + 1];
264 sumY[k][l] += D1 + D2;
265 M_int[k][l] += D1 * X1 + D2 * X2;
266
267 // Load two u16 values from dgd as a single u32
268 // Then broadcast to 4x u32 slots of a 128
269 const __m128i dgd_ijkl = _mm_set1_epi32(loadu_int32(dgd_ijk + l));
270 // dgd_ijkl = [y x y x y x y x] as u16
271
272 acc_stat_highbd_sse41(H_ + 0 * 8, dgd_ij + 0 * dgd_stride, shuffle,
273 &dgd_ijkl);
274 acc_stat_highbd_sse41(H_ + 1 * 8, dgd_ij + 1 * dgd_stride, shuffle,
275 &dgd_ijkl);
276 acc_stat_highbd_sse41(H_ + 2 * 8, dgd_ij + 2 * dgd_stride, shuffle,
277 &dgd_ijkl);
278 acc_stat_highbd_sse41(H_ + 3 * 8, dgd_ij + 3 * dgd_stride, shuffle,
279 &dgd_ijkl);
280 acc_stat_highbd_sse41(H_ + 4 * 8, dgd_ij + 4 * dgd_stride, shuffle,
281 &dgd_ijkl);
282 acc_stat_highbd_sse41(H_ + 5 * 8, dgd_ij + 5 * dgd_stride, shuffle,
283 &dgd_ijkl);
284 acc_stat_highbd_sse41(H_ + 6 * 8, dgd_ij + 6 * dgd_stride, shuffle,
285 &dgd_ijkl);
286 }
287 }
288 }
289 // If the width is odd, add in the final pixel
290 if (has_odd_pixel) {
291 const uint16_t X1 = src[j];
292 *sumX += X1;
293 const uint16_t *dgd_ij = dgd + j;
294 for (k = 0; k < wiener_win; k++) {
295 const uint16_t *dgd_ijk = dgd_ij + k * dgd_stride;
296 for (l = 0; l < wiener_win; l++) {
297 int64_t *H_ = &H_int[(l * wiener_win + k)][0];
298 const uint16_t D1 = dgd_ijk[l];
299 sumY[k][l] += D1;
300 M_int[k][l] += D1 * X1;
301
302 // The `acc_stat_highbd_sse41` function wants its input to have
303 // interleaved copies of two pixels, but we only have one. However, the
304 // pixels are (effectively) used as inputs to a multiply-accumulate. So
305 // if we set the extra pixel slot to 0, then it is effectively ignored.
306 const __m128i dgd_ijkl = _mm_set1_epi32((int)D1);
307
308 acc_stat_highbd_sse41(H_ + 0 * 8, dgd_ij + 0 * dgd_stride, shuffle,
309 &dgd_ijkl);
310 acc_stat_highbd_sse41(H_ + 1 * 8, dgd_ij + 1 * dgd_stride, shuffle,
311 &dgd_ijkl);
312 acc_stat_highbd_sse41(H_ + 2 * 8, dgd_ij + 2 * dgd_stride, shuffle,
313 &dgd_ijkl);
314 acc_stat_highbd_sse41(H_ + 3 * 8, dgd_ij + 3 * dgd_stride, shuffle,
315 &dgd_ijkl);
316 acc_stat_highbd_sse41(H_ + 4 * 8, dgd_ij + 4 * dgd_stride, shuffle,
317 &dgd_ijkl);
318 acc_stat_highbd_sse41(H_ + 5 * 8, dgd_ij + 5 * dgd_stride, shuffle,
319 &dgd_ijkl);
320 acc_stat_highbd_sse41(H_ + 6 * 8, dgd_ij + 6 * dgd_stride, shuffle,
321 &dgd_ijkl);
322 }
323 }
324 }
325 }
326
compute_stats_highbd_win7_opt_sse4_1(const uint8_t * dgd8,const uint8_t * src8,int h_start,int h_end,int v_start,int v_end,int dgd_stride,int src_stride,int64_t * M,int64_t * H,aom_bit_depth_t bit_depth)327 static inline void compute_stats_highbd_win7_opt_sse4_1(
328 const uint8_t *dgd8, const uint8_t *src8, int h_start, int h_end,
329 int v_start, int v_end, int dgd_stride, int src_stride, int64_t *M,
330 int64_t *H, aom_bit_depth_t bit_depth) {
331 int i, j, k, l, m, n;
332 const int wiener_win = WIENER_WIN;
333 const int pixel_count = (h_end - h_start) * (v_end - v_start);
334 const int wiener_win2 = wiener_win * wiener_win;
335 const int wiener_halfwin = (wiener_win >> 1);
336 const uint16_t *src = CONVERT_TO_SHORTPTR(src8);
337 const uint16_t *dgd = CONVERT_TO_SHORTPTR(dgd8);
338 const uint16_t avg =
339 find_average_highbd(dgd, h_start, h_end, v_start, v_end, dgd_stride);
340
341 int64_t M_int[WIENER_WIN][WIENER_WIN] = { { 0 } };
342 int64_t H_int[WIENER_WIN2][WIENER_WIN * 8] = { { 0 } };
343 int32_t sumY[WIENER_WIN][WIENER_WIN] = { { 0 } };
344 int32_t sumX = 0;
345 const uint16_t *dgd_win = dgd - wiener_halfwin * dgd_stride - wiener_halfwin;
346
347 // Load just half of the 256-bit shuffle control used for the AVX2 version
348 const __m128i shuffle = xx_loadu_128(g_shuffle_stats_highbd_data);
349 for (j = v_start; j < v_end; j += 64) {
350 const int vert_end = AOMMIN(64, v_end - j) + j;
351 for (i = j; i < vert_end; i++) {
352 acc_stat_highbd_win7_one_line_sse4_1(
353 dgd_win + i * dgd_stride, src + i * src_stride, h_start, h_end,
354 dgd_stride, &shuffle, &sumX, sumY, M_int, H_int);
355 }
356 }
357
358 uint8_t bit_depth_divider = 1;
359 if (bit_depth == AOM_BITS_12)
360 bit_depth_divider = 16;
361 else if (bit_depth == AOM_BITS_10)
362 bit_depth_divider = 4;
363
364 const int64_t avg_square_sum = (int64_t)avg * (int64_t)avg * pixel_count;
365 for (k = 0; k < wiener_win; k++) {
366 for (l = 0; l < wiener_win; l++) {
367 const int32_t idx0 = l * wiener_win + k;
368 M[idx0] = (M_int[k][l] +
369 (avg_square_sum - (int64_t)avg * (sumX + sumY[k][l]))) /
370 bit_depth_divider;
371 int64_t *H_ = H + idx0 * wiener_win2;
372 int64_t *H_int_ = &H_int[idx0][0];
373 for (m = 0; m < wiener_win; m++) {
374 for (n = 0; n < wiener_win; n++) {
375 H_[m * wiener_win + n] =
376 (H_int_[n * 8 + m] +
377 (avg_square_sum - (int64_t)avg * (sumY[k][l] + sumY[n][m]))) /
378 bit_depth_divider;
379 }
380 }
381 }
382 }
383 }
384
acc_stat_highbd_win5_one_line_sse4_1(const uint16_t * dgd,const uint16_t * src,int h_start,int h_end,int dgd_stride,const __m128i * shuffle,int32_t * sumX,int32_t sumY[WIENER_WIN_CHROMA][WIENER_WIN_CHROMA],int64_t M_int[WIENER_WIN_CHROMA][WIENER_WIN_CHROMA],int64_t H_int[WIENER_WIN2_CHROMA][WIENER_WIN_CHROMA * 8])385 static inline void acc_stat_highbd_win5_one_line_sse4_1(
386 const uint16_t *dgd, const uint16_t *src, int h_start, int h_end,
387 int dgd_stride, const __m128i *shuffle, int32_t *sumX,
388 int32_t sumY[WIENER_WIN_CHROMA][WIENER_WIN_CHROMA],
389 int64_t M_int[WIENER_WIN_CHROMA][WIENER_WIN_CHROMA],
390 int64_t H_int[WIENER_WIN2_CHROMA][WIENER_WIN_CHROMA * 8]) {
391 int j, k, l;
392 const int wiener_win = WIENER_WIN_CHROMA;
393 // Main loop handles two pixels at a time
394 // We can assume that h_start is even, since it will always be aligned to
395 // a tile edge + some number of restoration units, and both of those will
396 // be 64-pixel aligned.
397 // However, at the edge of the image, h_end may be odd, so we need to handle
398 // that case correctly.
399 assert(h_start % 2 == 0);
400 const int h_end_even = h_end & ~1;
401 const int has_odd_pixel = h_end & 1;
402 for (j = h_start; j < h_end_even; j += 2) {
403 const uint16_t X1 = src[j];
404 const uint16_t X2 = src[j + 1];
405 *sumX += X1 + X2;
406 const uint16_t *dgd_ij = dgd + j;
407 for (k = 0; k < wiener_win; k++) {
408 const uint16_t *dgd_ijk = dgd_ij + k * dgd_stride;
409 for (l = 0; l < wiener_win; l++) {
410 int64_t *H_ = &H_int[(l * wiener_win + k)][0];
411 const uint16_t D1 = dgd_ijk[l];
412 const uint16_t D2 = dgd_ijk[l + 1];
413 sumY[k][l] += D1 + D2;
414 M_int[k][l] += D1 * X1 + D2 * X2;
415
416 // Load two u16 values from dgd as a single u32
417 // then broadcast to 4x u32 slots of a 128
418 const __m128i dgd_ijkl = _mm_set1_epi32(loadu_int32(dgd_ijk + l));
419 // dgd_ijkl = [y x y x y x y x] as u16
420
421 acc_stat_highbd_sse41(H_ + 0 * 8, dgd_ij + 0 * dgd_stride, shuffle,
422 &dgd_ijkl);
423 acc_stat_highbd_sse41(H_ + 1 * 8, dgd_ij + 1 * dgd_stride, shuffle,
424 &dgd_ijkl);
425 acc_stat_highbd_sse41(H_ + 2 * 8, dgd_ij + 2 * dgd_stride, shuffle,
426 &dgd_ijkl);
427 acc_stat_highbd_sse41(H_ + 3 * 8, dgd_ij + 3 * dgd_stride, shuffle,
428 &dgd_ijkl);
429 acc_stat_highbd_sse41(H_ + 4 * 8, dgd_ij + 4 * dgd_stride, shuffle,
430 &dgd_ijkl);
431 }
432 }
433 }
434 // If the width is odd, add in the final pixel
435 if (has_odd_pixel) {
436 const uint16_t X1 = src[j];
437 *sumX += X1;
438 const uint16_t *dgd_ij = dgd + j;
439 for (k = 0; k < wiener_win; k++) {
440 const uint16_t *dgd_ijk = dgd_ij + k * dgd_stride;
441 for (l = 0; l < wiener_win; l++) {
442 int64_t *H_ = &H_int[(l * wiener_win + k)][0];
443 const uint16_t D1 = dgd_ijk[l];
444 sumY[k][l] += D1;
445 M_int[k][l] += D1 * X1;
446
447 // The `acc_stat_highbd_sse41` function wants its input to have
448 // interleaved copies of two pixels, but we only have one. However, the
449 // pixels are (effectively) used as inputs to a multiply-accumulate. So
450 // if we set the extra pixel slot to 0, then it is effectively ignored.
451 const __m128i dgd_ijkl = _mm_set1_epi32((int)D1);
452
453 acc_stat_highbd_sse41(H_ + 0 * 8, dgd_ij + 0 * dgd_stride, shuffle,
454 &dgd_ijkl);
455 acc_stat_highbd_sse41(H_ + 1 * 8, dgd_ij + 1 * dgd_stride, shuffle,
456 &dgd_ijkl);
457 acc_stat_highbd_sse41(H_ + 2 * 8, dgd_ij + 2 * dgd_stride, shuffle,
458 &dgd_ijkl);
459 acc_stat_highbd_sse41(H_ + 3 * 8, dgd_ij + 3 * dgd_stride, shuffle,
460 &dgd_ijkl);
461 acc_stat_highbd_sse41(H_ + 4 * 8, dgd_ij + 4 * dgd_stride, shuffle,
462 &dgd_ijkl);
463 }
464 }
465 }
466 }
467
compute_stats_highbd_win5_opt_sse4_1(const uint8_t * dgd8,const uint8_t * src8,int h_start,int h_end,int v_start,int v_end,int dgd_stride,int src_stride,int64_t * M,int64_t * H,aom_bit_depth_t bit_depth)468 static inline void compute_stats_highbd_win5_opt_sse4_1(
469 const uint8_t *dgd8, const uint8_t *src8, int h_start, int h_end,
470 int v_start, int v_end, int dgd_stride, int src_stride, int64_t *M,
471 int64_t *H, aom_bit_depth_t bit_depth) {
472 int i, j, k, l, m, n;
473 const int wiener_win = WIENER_WIN_CHROMA;
474 const int pixel_count = (h_end - h_start) * (v_end - v_start);
475 const int wiener_win2 = wiener_win * wiener_win;
476 const int wiener_halfwin = (wiener_win >> 1);
477 const uint16_t *src = CONVERT_TO_SHORTPTR(src8);
478 const uint16_t *dgd = CONVERT_TO_SHORTPTR(dgd8);
479 const uint16_t avg =
480 find_average_highbd(dgd, h_start, h_end, v_start, v_end, dgd_stride);
481
482 int64_t M_int[WIENER_WIN_CHROMA][WIENER_WIN_CHROMA] = { { 0 } };
483 int64_t H_int[WIENER_WIN2_CHROMA][WIENER_WIN_CHROMA * 8] = { { 0 } };
484 int32_t sumY[WIENER_WIN_CHROMA][WIENER_WIN_CHROMA] = { { 0 } };
485 int32_t sumX = 0;
486 const uint16_t *dgd_win = dgd - wiener_halfwin * dgd_stride - wiener_halfwin;
487
488 // Load just half of the 256-bit shuffle control used for the AVX2 version
489 const __m128i shuffle = xx_loadu_128(g_shuffle_stats_highbd_data);
490 for (j = v_start; j < v_end; j += 64) {
491 const int vert_end = AOMMIN(64, v_end - j) + j;
492 for (i = j; i < vert_end; i++) {
493 acc_stat_highbd_win5_one_line_sse4_1(
494 dgd_win + i * dgd_stride, src + i * src_stride, h_start, h_end,
495 dgd_stride, &shuffle, &sumX, sumY, M_int, H_int);
496 }
497 }
498
499 uint8_t bit_depth_divider = 1;
500 if (bit_depth == AOM_BITS_12)
501 bit_depth_divider = 16;
502 else if (bit_depth == AOM_BITS_10)
503 bit_depth_divider = 4;
504
505 const int64_t avg_square_sum = (int64_t)avg * (int64_t)avg * pixel_count;
506 for (k = 0; k < wiener_win; k++) {
507 for (l = 0; l < wiener_win; l++) {
508 const int32_t idx0 = l * wiener_win + k;
509 M[idx0] = (M_int[k][l] +
510 (avg_square_sum - (int64_t)avg * (sumX + sumY[k][l]))) /
511 bit_depth_divider;
512 int64_t *H_ = H + idx0 * wiener_win2;
513 int64_t *H_int_ = &H_int[idx0][0];
514 for (m = 0; m < wiener_win; m++) {
515 for (n = 0; n < wiener_win; n++) {
516 H_[m * wiener_win + n] =
517 (H_int_[n * 8 + m] +
518 (avg_square_sum - (int64_t)avg * (sumY[k][l] + sumY[n][m]))) /
519 bit_depth_divider;
520 }
521 }
522 }
523 }
524 }
525
av1_compute_stats_highbd_sse4_1(int wiener_win,const uint8_t * dgd8,const uint8_t * src8,int16_t * dgd_avg,int16_t * src_avg,int h_start,int h_end,int v_start,int v_end,int dgd_stride,int src_stride,int64_t * M,int64_t * H,aom_bit_depth_t bit_depth)526 void av1_compute_stats_highbd_sse4_1(int wiener_win, const uint8_t *dgd8,
527 const uint8_t *src8, int16_t *dgd_avg,
528 int16_t *src_avg, int h_start, int h_end,
529 int v_start, int v_end, int dgd_stride,
530 int src_stride, int64_t *M, int64_t *H,
531 aom_bit_depth_t bit_depth) {
532 if (wiener_win == WIENER_WIN) {
533 (void)dgd_avg;
534 (void)src_avg;
535 compute_stats_highbd_win7_opt_sse4_1(dgd8, src8, h_start, h_end, v_start,
536 v_end, dgd_stride, src_stride, M, H,
537 bit_depth);
538 } else if (wiener_win == WIENER_WIN_CHROMA) {
539 (void)dgd_avg;
540 (void)src_avg;
541 compute_stats_highbd_win5_opt_sse4_1(dgd8, src8, h_start, h_end, v_start,
542 v_end, dgd_stride, src_stride, M, H,
543 bit_depth);
544 } else {
545 av1_compute_stats_highbd_c(wiener_win, dgd8, src8, dgd_avg, src_avg,
546 h_start, h_end, v_start, v_end, dgd_stride,
547 src_stride, M, H, bit_depth);
548 }
549 }
550 #endif // CONFIG_AV1_HIGHBITDEPTH
551
acc_stat_win5_one_line_sse4_1(const uint8_t * dgd,const uint8_t * src,int h_start,int h_end,int dgd_stride,const __m128i * shuffle,int32_t * sumX,int32_t sumY[WIENER_WIN_CHROMA][WIENER_WIN_CHROMA],int32_t M_int[WIENER_WIN_CHROMA][WIENER_WIN_CHROMA],int32_t H_int[WIENER_WIN2_CHROMA][WIENER_WIN_CHROMA * 8])552 static inline void acc_stat_win5_one_line_sse4_1(
553 const uint8_t *dgd, const uint8_t *src, int h_start, int h_end,
554 int dgd_stride, const __m128i *shuffle, int32_t *sumX,
555 int32_t sumY[WIENER_WIN_CHROMA][WIENER_WIN_CHROMA],
556 int32_t M_int[WIENER_WIN_CHROMA][WIENER_WIN_CHROMA],
557 int32_t H_int[WIENER_WIN2_CHROMA][WIENER_WIN_CHROMA * 8]) {
558 const int wiener_win = WIENER_WIN_CHROMA;
559 int j, k, l;
560 // Main loop handles two pixels at a time
561 // We can assume that h_start is even, since it will always be aligned to
562 // a tile edge + some number of restoration units, and both of those will
563 // be 64-pixel aligned.
564 // However, at the edge of the image, h_end may be odd, so we need to handle
565 // that case correctly.
566 assert(h_start % 2 == 0);
567 const int h_end_even = h_end & ~1;
568 const int has_odd_pixel = h_end & 1;
569 for (j = h_start; j < h_end_even; j += 2) {
570 const uint8_t *dgd_ij = dgd + j;
571 const uint8_t X1 = src[j];
572 const uint8_t X2 = src[j + 1];
573 *sumX += X1 + X2;
574 for (k = 0; k < wiener_win; k++) {
575 const uint8_t *dgd_ijk = dgd_ij + k * dgd_stride;
576 for (l = 0; l < wiener_win; l++) {
577 int32_t *H_ = &H_int[(l * wiener_win + k)][0];
578 const uint8_t D1 = dgd_ijk[l];
579 const uint8_t D2 = dgd_ijk[l + 1];
580 sumY[k][l] += D1 + D2;
581 M_int[k][l] += D1 * X1 + D2 * X2;
582
583 const __m128i kl =
584 _mm_cvtepu8_epi16(_mm_set1_epi16(loadu_int16(dgd_ijk + l)));
585 acc_stat_sse41(H_ + 0 * 8, dgd_ij + 0 * dgd_stride, shuffle, &kl);
586 acc_stat_sse41(H_ + 1 * 8, dgd_ij + 1 * dgd_stride, shuffle, &kl);
587 acc_stat_sse41(H_ + 2 * 8, dgd_ij + 2 * dgd_stride, shuffle, &kl);
588 acc_stat_sse41(H_ + 3 * 8, dgd_ij + 3 * dgd_stride, shuffle, &kl);
589 acc_stat_sse41(H_ + 4 * 8, dgd_ij + 4 * dgd_stride, shuffle, &kl);
590 }
591 }
592 }
593 // If the width is odd, add in the final pixel
594 if (has_odd_pixel) {
595 const uint8_t *dgd_ij = dgd + j;
596 const uint8_t X1 = src[j];
597 *sumX += X1;
598 for (k = 0; k < wiener_win; k++) {
599 const uint8_t *dgd_ijk = dgd_ij + k * dgd_stride;
600 for (l = 0; l < wiener_win; l++) {
601 int32_t *H_ = &H_int[(l * wiener_win + k)][0];
602 const uint8_t D1 = dgd_ijk[l];
603 sumY[k][l] += D1;
604 M_int[k][l] += D1 * X1;
605
606 // The `acc_stat_sse41` function wants its input to have interleaved
607 // copies of two pixels, but we only have one. However, the pixels
608 // are (effectively) used as inputs to a multiply-accumulate.
609 // So if we set the extra pixel slot to 0, then it is effectively
610 // ignored.
611 const __m128i kl = _mm_cvtepu8_epi16(_mm_set1_epi16((int16_t)D1));
612 acc_stat_sse41(H_ + 0 * 8, dgd_ij + 0 * dgd_stride, shuffle, &kl);
613 acc_stat_sse41(H_ + 1 * 8, dgd_ij + 1 * dgd_stride, shuffle, &kl);
614 acc_stat_sse41(H_ + 2 * 8, dgd_ij + 2 * dgd_stride, shuffle, &kl);
615 acc_stat_sse41(H_ + 3 * 8, dgd_ij + 3 * dgd_stride, shuffle, &kl);
616 acc_stat_sse41(H_ + 4 * 8, dgd_ij + 4 * dgd_stride, shuffle, &kl);
617 }
618 }
619 }
620 }
621
compute_stats_win5_opt_sse4_1(const uint8_t * dgd,const uint8_t * src,int h_start,int h_end,int v_start,int v_end,int dgd_stride,int src_stride,int64_t * M,int64_t * H,int use_downsampled_wiener_stats)622 static inline void compute_stats_win5_opt_sse4_1(
623 const uint8_t *dgd, const uint8_t *src, int h_start, int h_end, int v_start,
624 int v_end, int dgd_stride, int src_stride, int64_t *M, int64_t *H,
625 int use_downsampled_wiener_stats) {
626 int i, j, k, l, m, n;
627 const int wiener_win = WIENER_WIN_CHROMA;
628 const int pixel_count = (h_end - h_start) * (v_end - v_start);
629 const int wiener_win2 = wiener_win * wiener_win;
630 const int wiener_halfwin = (wiener_win >> 1);
631 const uint8_t avg =
632 find_average(dgd, h_start, h_end, v_start, v_end, dgd_stride);
633
634 int32_t M_int32[WIENER_WIN_CHROMA][WIENER_WIN_CHROMA] = { { 0 } };
635 int32_t M_int32_row[WIENER_WIN_CHROMA][WIENER_WIN_CHROMA] = { { 0 } };
636 int64_t M_int64[WIENER_WIN_CHROMA][WIENER_WIN_CHROMA] = { { 0 } };
637 int32_t H_int32[WIENER_WIN2_CHROMA][WIENER_WIN_CHROMA * 8] = { { 0 } };
638 int32_t H_int32_row[WIENER_WIN2_CHROMA][WIENER_WIN_CHROMA * 8] = { { 0 } };
639 int64_t H_int64[WIENER_WIN2_CHROMA][WIENER_WIN_CHROMA * 8] = { { 0 } };
640 int32_t sumY[WIENER_WIN_CHROMA][WIENER_WIN_CHROMA] = { { 0 } };
641 int32_t sumX = 0;
642 const uint8_t *dgd_win = dgd - wiener_halfwin * dgd_stride - wiener_halfwin;
643 int downsample_factor =
644 use_downsampled_wiener_stats ? WIENER_STATS_DOWNSAMPLE_FACTOR : 1;
645 int32_t sumX_row = 0;
646 int32_t sumY_row[WIENER_WIN_CHROMA][WIENER_WIN_CHROMA] = { { 0 } };
647
648 const __m128i shuffle = xx_loadu_128(g_shuffle_stats_data);
649 for (j = v_start; j < v_end; j += 64) {
650 const int vert_end = AOMMIN(64, v_end - j) + j;
651 for (i = j; i < vert_end; i = i + downsample_factor) {
652 if (use_downsampled_wiener_stats &&
653 (vert_end - i < WIENER_STATS_DOWNSAMPLE_FACTOR)) {
654 downsample_factor = vert_end - i;
655 }
656 sumX_row = 0;
657 memset(sumY_row, 0,
658 sizeof(int32_t) * WIENER_WIN_CHROMA * WIENER_WIN_CHROMA);
659 memset(M_int32_row, 0,
660 sizeof(int32_t) * WIENER_WIN_CHROMA * WIENER_WIN_CHROMA);
661 memset(H_int32_row, 0,
662 sizeof(int32_t) * WIENER_WIN2_CHROMA * (WIENER_WIN_CHROMA * 8));
663 acc_stat_win5_one_line_sse4_1(
664 dgd_win + i * dgd_stride, src + i * src_stride, h_start, h_end,
665 dgd_stride, &shuffle, &sumX_row, sumY_row, M_int32_row, H_int32_row);
666 sumX += sumX_row * downsample_factor;
667 // Scale M matrix based on the downsampling factor
668 for (k = 0; k < wiener_win; ++k) {
669 for (l = 0; l < wiener_win; ++l) {
670 sumY[k][l] += (sumY_row[k][l] * downsample_factor);
671 M_int32[k][l] += (M_int32_row[k][l] * downsample_factor);
672 }
673 }
674 // Scale H matrix based on the downsampling factor
675 for (k = 0; k < WIENER_WIN_CHROMA * WIENER_WIN_CHROMA; ++k) {
676 for (l = 0; l < WIENER_WIN_CHROMA * 8; ++l) {
677 H_int32[k][l] += (H_int32_row[k][l] * downsample_factor);
678 }
679 }
680 }
681 for (k = 0; k < wiener_win; ++k) {
682 for (l = 0; l < wiener_win; ++l) {
683 M_int64[k][l] += M_int32[k][l];
684 M_int32[k][l] = 0;
685 }
686 }
687 for (k = 0; k < WIENER_WIN_CHROMA * WIENER_WIN_CHROMA; ++k) {
688 for (l = 0; l < WIENER_WIN_CHROMA * 8; ++l) {
689 H_int64[k][l] += H_int32[k][l];
690 H_int32[k][l] = 0;
691 }
692 }
693 }
694
695 const int64_t avg_square_sum = (int64_t)avg * (int64_t)avg * pixel_count;
696 for (k = 0; k < wiener_win; k++) {
697 for (l = 0; l < wiener_win; l++) {
698 const int32_t idx0 = l * wiener_win + k;
699 M[idx0] =
700 M_int64[k][l] + (avg_square_sum - (int64_t)avg * (sumX + sumY[k][l]));
701 int64_t *H_ = H + idx0 * wiener_win2;
702 int64_t *H_int_ = &H_int64[idx0][0];
703 for (m = 0; m < wiener_win; m++) {
704 for (n = 0; n < wiener_win; n++) {
705 H_[m * wiener_win + n] = H_int_[n * 8 + m] + avg_square_sum -
706 (int64_t)avg * (sumY[k][l] + sumY[n][m]);
707 }
708 }
709 }
710 }
711 }
av1_compute_stats_sse4_1(int wiener_win,const uint8_t * dgd,const uint8_t * src,int16_t * dgd_avg,int16_t * src_avg,int h_start,int h_end,int v_start,int v_end,int dgd_stride,int src_stride,int64_t * M,int64_t * H,int use_downsampled_wiener_stats)712 void av1_compute_stats_sse4_1(int wiener_win, const uint8_t *dgd,
713 const uint8_t *src, int16_t *dgd_avg,
714 int16_t *src_avg, int h_start, int h_end,
715 int v_start, int v_end, int dgd_stride,
716 int src_stride, int64_t *M, int64_t *H,
717 int use_downsampled_wiener_stats) {
718 if (wiener_win == WIENER_WIN) {
719 compute_stats_win7_opt_sse4_1(dgd, src, h_start, h_end, v_start, v_end,
720 dgd_stride, src_stride, M, H,
721 use_downsampled_wiener_stats);
722 } else if (wiener_win == WIENER_WIN_CHROMA) {
723 compute_stats_win5_opt_sse4_1(dgd, src, h_start, h_end, v_start, v_end,
724 dgd_stride, src_stride, M, H,
725 use_downsampled_wiener_stats);
726 } else {
727 av1_compute_stats_c(wiener_win, dgd, src, dgd_avg, src_avg, h_start, h_end,
728 v_start, v_end, dgd_stride, src_stride, M, H,
729 use_downsampled_wiener_stats);
730 }
731 }
732
pair_set_epi16(int a,int b)733 static inline __m128i pair_set_epi16(int a, int b) {
734 return _mm_set1_epi32(
735 (int32_t)(((uint16_t)(a)) | (((uint32_t)(uint16_t)(b)) << 16)));
736 }
737
av1_lowbd_pixel_proj_error_sse4_1(const uint8_t * src8,int width,int height,int src_stride,const uint8_t * dat8,int dat_stride,int32_t * flt0,int flt0_stride,int32_t * flt1,int flt1_stride,int xq[2],const sgr_params_type * params)738 int64_t av1_lowbd_pixel_proj_error_sse4_1(
739 const uint8_t *src8, int width, int height, int src_stride,
740 const uint8_t *dat8, int dat_stride, int32_t *flt0, int flt0_stride,
741 int32_t *flt1, int flt1_stride, int xq[2], const sgr_params_type *params) {
742 int i, j, k;
743 const int32_t shift = SGRPROJ_RST_BITS + SGRPROJ_PRJ_BITS;
744 const __m128i rounding = _mm_set1_epi32(1 << (shift - 1));
745 __m128i sum64 = _mm_setzero_si128();
746 const uint8_t *src = src8;
747 const uint8_t *dat = dat8;
748 int64_t err = 0;
749 if (params->r[0] > 0 && params->r[1] > 0) {
750 __m128i xq_coeff = pair_set_epi16(xq[0], xq[1]);
751 for (i = 0; i < height; ++i) {
752 __m128i sum32 = _mm_setzero_si128();
753 for (j = 0; j <= width - 8; j += 8) {
754 const __m128i d0 = _mm_cvtepu8_epi16(xx_loadl_64(dat + j));
755 const __m128i s0 = _mm_cvtepu8_epi16(xx_loadl_64(src + j));
756 const __m128i flt0_16b =
757 _mm_packs_epi32(xx_loadu_128(flt0 + j), xx_loadu_128(flt0 + j + 4));
758 const __m128i flt1_16b =
759 _mm_packs_epi32(xx_loadu_128(flt1 + j), xx_loadu_128(flt1 + j + 4));
760 const __m128i u0 = _mm_slli_epi16(d0, SGRPROJ_RST_BITS);
761 const __m128i flt0_0_sub_u = _mm_sub_epi16(flt0_16b, u0);
762 const __m128i flt1_0_sub_u = _mm_sub_epi16(flt1_16b, u0);
763 const __m128i v0 = _mm_madd_epi16(
764 xq_coeff, _mm_unpacklo_epi16(flt0_0_sub_u, flt1_0_sub_u));
765 const __m128i v1 = _mm_madd_epi16(
766 xq_coeff, _mm_unpackhi_epi16(flt0_0_sub_u, flt1_0_sub_u));
767 const __m128i vr0 = _mm_srai_epi32(_mm_add_epi32(v0, rounding), shift);
768 const __m128i vr1 = _mm_srai_epi32(_mm_add_epi32(v1, rounding), shift);
769 const __m128i e0 =
770 _mm_sub_epi16(_mm_add_epi16(_mm_packs_epi32(vr0, vr1), d0), s0);
771 const __m128i err0 = _mm_madd_epi16(e0, e0);
772 sum32 = _mm_add_epi32(sum32, err0);
773 }
774 for (k = j; k < width; ++k) {
775 const int32_t u = (int32_t)(dat[k] << SGRPROJ_RST_BITS);
776 int32_t v = xq[0] * (flt0[k] - u) + xq[1] * (flt1[k] - u);
777 const int32_t e = ROUND_POWER_OF_TWO(v, shift) + dat[k] - src[k];
778 err += ((int64_t)e * e);
779 }
780 dat += dat_stride;
781 src += src_stride;
782 flt0 += flt0_stride;
783 flt1 += flt1_stride;
784 const __m128i sum64_0 = _mm_cvtepi32_epi64(sum32);
785 const __m128i sum64_1 = _mm_cvtepi32_epi64(_mm_srli_si128(sum32, 8));
786 sum64 = _mm_add_epi64(sum64, sum64_0);
787 sum64 = _mm_add_epi64(sum64, sum64_1);
788 }
789 } else if (params->r[0] > 0 || params->r[1] > 0) {
790 const int xq_active = (params->r[0] > 0) ? xq[0] : xq[1];
791 const __m128i xq_coeff =
792 pair_set_epi16(xq_active, -xq_active * (1 << SGRPROJ_RST_BITS));
793 const int32_t *flt = (params->r[0] > 0) ? flt0 : flt1;
794 const int flt_stride = (params->r[0] > 0) ? flt0_stride : flt1_stride;
795 for (i = 0; i < height; ++i) {
796 __m128i sum32 = _mm_setzero_si128();
797 for (j = 0; j <= width - 8; j += 8) {
798 const __m128i d0 = _mm_cvtepu8_epi16(xx_loadl_64(dat + j));
799 const __m128i s0 = _mm_cvtepu8_epi16(xx_loadl_64(src + j));
800 const __m128i flt_16b =
801 _mm_packs_epi32(xx_loadu_128(flt + j), xx_loadu_128(flt + j + 4));
802 const __m128i v0 =
803 _mm_madd_epi16(xq_coeff, _mm_unpacklo_epi16(flt_16b, d0));
804 const __m128i v1 =
805 _mm_madd_epi16(xq_coeff, _mm_unpackhi_epi16(flt_16b, d0));
806 const __m128i vr0 = _mm_srai_epi32(_mm_add_epi32(v0, rounding), shift);
807 const __m128i vr1 = _mm_srai_epi32(_mm_add_epi32(v1, rounding), shift);
808 const __m128i e0 =
809 _mm_sub_epi16(_mm_add_epi16(_mm_packs_epi32(vr0, vr1), d0), s0);
810 const __m128i err0 = _mm_madd_epi16(e0, e0);
811 sum32 = _mm_add_epi32(sum32, err0);
812 }
813 for (k = j; k < width; ++k) {
814 const int32_t u = (int32_t)(dat[k] << SGRPROJ_RST_BITS);
815 int32_t v = xq_active * (flt[k] - u);
816 const int32_t e = ROUND_POWER_OF_TWO(v, shift) + dat[k] - src[k];
817 err += ((int64_t)e * e);
818 }
819 dat += dat_stride;
820 src += src_stride;
821 flt += flt_stride;
822 const __m128i sum64_0 = _mm_cvtepi32_epi64(sum32);
823 const __m128i sum64_1 = _mm_cvtepi32_epi64(_mm_srli_si128(sum32, 8));
824 sum64 = _mm_add_epi64(sum64, sum64_0);
825 sum64 = _mm_add_epi64(sum64, sum64_1);
826 }
827 } else {
828 __m128i sum32 = _mm_setzero_si128();
829 for (i = 0; i < height; ++i) {
830 for (j = 0; j <= width - 16; j += 16) {
831 const __m128i d = xx_loadu_128(dat + j);
832 const __m128i s = xx_loadu_128(src + j);
833 const __m128i d0 = _mm_cvtepu8_epi16(d);
834 const __m128i d1 = _mm_cvtepu8_epi16(_mm_srli_si128(d, 8));
835 const __m128i s0 = _mm_cvtepu8_epi16(s);
836 const __m128i s1 = _mm_cvtepu8_epi16(_mm_srli_si128(s, 8));
837 const __m128i diff0 = _mm_sub_epi16(d0, s0);
838 const __m128i diff1 = _mm_sub_epi16(d1, s1);
839 const __m128i err0 = _mm_madd_epi16(diff0, diff0);
840 const __m128i err1 = _mm_madd_epi16(diff1, diff1);
841 sum32 = _mm_add_epi32(sum32, err0);
842 sum32 = _mm_add_epi32(sum32, err1);
843 }
844 for (k = j; k < width; ++k) {
845 const int32_t e = (int32_t)(dat[k]) - src[k];
846 err += ((int64_t)e * e);
847 }
848 dat += dat_stride;
849 src += src_stride;
850 }
851 const __m128i sum64_0 = _mm_cvtepi32_epi64(sum32);
852 const __m128i sum64_1 = _mm_cvtepi32_epi64(_mm_srli_si128(sum32, 8));
853 sum64 = _mm_add_epi64(sum64_0, sum64_1);
854 }
855 int64_t sum[2];
856 xx_storeu_128(sum, sum64);
857 err += sum[0] + sum[1];
858 return err;
859 }
860
861 // When params->r[0] > 0 and params->r[1] > 0. In this case all elements of
862 // C and H need to be computed.
calc_proj_params_r0_r1_sse4_1(const uint8_t * src8,int width,int height,int src_stride,const uint8_t * dat8,int dat_stride,int32_t * flt0,int flt0_stride,int32_t * flt1,int flt1_stride,int64_t H[2][2],int64_t C[2])863 static inline void calc_proj_params_r0_r1_sse4_1(
864 const uint8_t *src8, int width, int height, int src_stride,
865 const uint8_t *dat8, int dat_stride, int32_t *flt0, int flt0_stride,
866 int32_t *flt1, int flt1_stride, int64_t H[2][2], int64_t C[2]) {
867 const int size = width * height;
868 const uint8_t *src = src8;
869 const uint8_t *dat = dat8;
870 __m128i h00, h01, h11, c0, c1;
871 const __m128i zero = _mm_setzero_si128();
872 h01 = h11 = c0 = c1 = h00 = zero;
873
874 for (int i = 0; i < height; ++i) {
875 for (int j = 0; j < width; j += 4) {
876 const __m128i u_load = _mm_cvtepu8_epi32(
877 _mm_cvtsi32_si128(*((int *)(dat + i * dat_stride + j))));
878 const __m128i s_load = _mm_cvtepu8_epi32(
879 _mm_cvtsi32_si128(*((int *)(src + i * src_stride + j))));
880 __m128i f1 = _mm_loadu_si128((__m128i *)(flt0 + i * flt0_stride + j));
881 __m128i f2 = _mm_loadu_si128((__m128i *)(flt1 + i * flt1_stride + j));
882 __m128i d = _mm_slli_epi32(u_load, SGRPROJ_RST_BITS);
883 __m128i s = _mm_slli_epi32(s_load, SGRPROJ_RST_BITS);
884 s = _mm_sub_epi32(s, d);
885 f1 = _mm_sub_epi32(f1, d);
886 f2 = _mm_sub_epi32(f2, d);
887
888 const __m128i h00_even = _mm_mul_epi32(f1, f1);
889 const __m128i h00_odd =
890 _mm_mul_epi32(_mm_srli_epi64(f1, 32), _mm_srli_epi64(f1, 32));
891 h00 = _mm_add_epi64(h00, h00_even);
892 h00 = _mm_add_epi64(h00, h00_odd);
893
894 const __m128i h01_even = _mm_mul_epi32(f1, f2);
895 const __m128i h01_odd =
896 _mm_mul_epi32(_mm_srli_epi64(f1, 32), _mm_srli_epi64(f2, 32));
897 h01 = _mm_add_epi64(h01, h01_even);
898 h01 = _mm_add_epi64(h01, h01_odd);
899
900 const __m128i h11_even = _mm_mul_epi32(f2, f2);
901 const __m128i h11_odd =
902 _mm_mul_epi32(_mm_srli_epi64(f2, 32), _mm_srli_epi64(f2, 32));
903 h11 = _mm_add_epi64(h11, h11_even);
904 h11 = _mm_add_epi64(h11, h11_odd);
905
906 const __m128i c0_even = _mm_mul_epi32(f1, s);
907 const __m128i c0_odd =
908 _mm_mul_epi32(_mm_srli_epi64(f1, 32), _mm_srli_epi64(s, 32));
909 c0 = _mm_add_epi64(c0, c0_even);
910 c0 = _mm_add_epi64(c0, c0_odd);
911
912 const __m128i c1_even = _mm_mul_epi32(f2, s);
913 const __m128i c1_odd =
914 _mm_mul_epi32(_mm_srli_epi64(f2, 32), _mm_srli_epi64(s, 32));
915 c1 = _mm_add_epi64(c1, c1_even);
916 c1 = _mm_add_epi64(c1, c1_odd);
917 }
918 }
919
920 __m128i c_low = _mm_unpacklo_epi64(c0, c1);
921 const __m128i c_high = _mm_unpackhi_epi64(c0, c1);
922 c_low = _mm_add_epi64(c_low, c_high);
923
924 __m128i h0x_low = _mm_unpacklo_epi64(h00, h01);
925 const __m128i h0x_high = _mm_unpackhi_epi64(h00, h01);
926 h0x_low = _mm_add_epi64(h0x_low, h0x_high);
927
928 // Using the symmetric properties of H, calculations of H[1][0] are not
929 // needed.
930 __m128i h1x_low = _mm_unpacklo_epi64(zero, h11);
931 const __m128i h1x_high = _mm_unpackhi_epi64(zero, h11);
932 h1x_low = _mm_add_epi64(h1x_low, h1x_high);
933
934 xx_storeu_128(C, c_low);
935 xx_storeu_128(H[0], h0x_low);
936 xx_storeu_128(H[1], h1x_low);
937
938 H[0][0] /= size;
939 H[0][1] /= size;
940 H[1][1] /= size;
941
942 // Since H is a symmetric matrix
943 H[1][0] = H[0][1];
944 C[0] /= size;
945 C[1] /= size;
946 }
947
948 // When only params->r[0] > 0. In this case only H[0][0] and C[0] are
949 // non-zero and need to be computed.
calc_proj_params_r0_sse4_1(const uint8_t * src8,int width,int height,int src_stride,const uint8_t * dat8,int dat_stride,int32_t * flt0,int flt0_stride,int64_t H[2][2],int64_t C[2])950 static inline void calc_proj_params_r0_sse4_1(const uint8_t *src8, int width,
951 int height, int src_stride,
952 const uint8_t *dat8,
953 int dat_stride, int32_t *flt0,
954 int flt0_stride, int64_t H[2][2],
955 int64_t C[2]) {
956 const int size = width * height;
957 const uint8_t *src = src8;
958 const uint8_t *dat = dat8;
959 __m128i h00, c0;
960 const __m128i zero = _mm_setzero_si128();
961 c0 = h00 = zero;
962
963 for (int i = 0; i < height; ++i) {
964 for (int j = 0; j < width; j += 4) {
965 const __m128i u_load = _mm_cvtepu8_epi32(
966 _mm_cvtsi32_si128(*((int *)(dat + i * dat_stride + j))));
967 const __m128i s_load = _mm_cvtepu8_epi32(
968 _mm_cvtsi32_si128(*((int *)(src + i * src_stride + j))));
969 __m128i f1 = _mm_loadu_si128((__m128i *)(flt0 + i * flt0_stride + j));
970 __m128i d = _mm_slli_epi32(u_load, SGRPROJ_RST_BITS);
971 __m128i s = _mm_slli_epi32(s_load, SGRPROJ_RST_BITS);
972 s = _mm_sub_epi32(s, d);
973 f1 = _mm_sub_epi32(f1, d);
974
975 const __m128i h00_even = _mm_mul_epi32(f1, f1);
976 const __m128i h00_odd =
977 _mm_mul_epi32(_mm_srli_epi64(f1, 32), _mm_srli_epi64(f1, 32));
978 h00 = _mm_add_epi64(h00, h00_even);
979 h00 = _mm_add_epi64(h00, h00_odd);
980
981 const __m128i c0_even = _mm_mul_epi32(f1, s);
982 const __m128i c0_odd =
983 _mm_mul_epi32(_mm_srli_epi64(f1, 32), _mm_srli_epi64(s, 32));
984 c0 = _mm_add_epi64(c0, c0_even);
985 c0 = _mm_add_epi64(c0, c0_odd);
986 }
987 }
988 const __m128i h00_val = _mm_add_epi64(h00, _mm_srli_si128(h00, 8));
989
990 const __m128i c0_val = _mm_add_epi64(c0, _mm_srli_si128(c0, 8));
991
992 const __m128i c = _mm_unpacklo_epi64(c0_val, zero);
993 const __m128i h0x = _mm_unpacklo_epi64(h00_val, zero);
994
995 xx_storeu_128(C, c);
996 xx_storeu_128(H[0], h0x);
997
998 H[0][0] /= size;
999 C[0] /= size;
1000 }
1001
1002 // When only params->r[1] > 0. In this case only H[1][1] and C[1] are
1003 // non-zero and need to be computed.
calc_proj_params_r1_sse4_1(const uint8_t * src8,int width,int height,int src_stride,const uint8_t * dat8,int dat_stride,int32_t * flt1,int flt1_stride,int64_t H[2][2],int64_t C[2])1004 static inline void calc_proj_params_r1_sse4_1(const uint8_t *src8, int width,
1005 int height, int src_stride,
1006 const uint8_t *dat8,
1007 int dat_stride, int32_t *flt1,
1008 int flt1_stride, int64_t H[2][2],
1009 int64_t C[2]) {
1010 const int size = width * height;
1011 const uint8_t *src = src8;
1012 const uint8_t *dat = dat8;
1013 __m128i h11, c1;
1014 const __m128i zero = _mm_setzero_si128();
1015 c1 = h11 = zero;
1016
1017 for (int i = 0; i < height; ++i) {
1018 for (int j = 0; j < width; j += 4) {
1019 const __m128i u_load = _mm_cvtepu8_epi32(
1020 _mm_cvtsi32_si128(*((int *)(dat + i * dat_stride + j))));
1021 const __m128i s_load = _mm_cvtepu8_epi32(
1022 _mm_cvtsi32_si128(*((int *)(src + i * src_stride + j))));
1023 __m128i f2 = _mm_loadu_si128((__m128i *)(flt1 + i * flt1_stride + j));
1024 __m128i d = _mm_slli_epi32(u_load, SGRPROJ_RST_BITS);
1025 __m128i s = _mm_slli_epi32(s_load, SGRPROJ_RST_BITS);
1026 s = _mm_sub_epi32(s, d);
1027 f2 = _mm_sub_epi32(f2, d);
1028
1029 const __m128i h11_even = _mm_mul_epi32(f2, f2);
1030 const __m128i h11_odd =
1031 _mm_mul_epi32(_mm_srli_epi64(f2, 32), _mm_srli_epi64(f2, 32));
1032 h11 = _mm_add_epi64(h11, h11_even);
1033 h11 = _mm_add_epi64(h11, h11_odd);
1034
1035 const __m128i c1_even = _mm_mul_epi32(f2, s);
1036 const __m128i c1_odd =
1037 _mm_mul_epi32(_mm_srli_epi64(f2, 32), _mm_srli_epi64(s, 32));
1038 c1 = _mm_add_epi64(c1, c1_even);
1039 c1 = _mm_add_epi64(c1, c1_odd);
1040 }
1041 }
1042
1043 const __m128i h11_val = _mm_add_epi64(h11, _mm_srli_si128(h11, 8));
1044
1045 const __m128i c1_val = _mm_add_epi64(c1, _mm_srli_si128(c1, 8));
1046
1047 const __m128i c = _mm_unpacklo_epi64(zero, c1_val);
1048 const __m128i h1x = _mm_unpacklo_epi64(zero, h11_val);
1049
1050 xx_storeu_128(C, c);
1051 xx_storeu_128(H[1], h1x);
1052
1053 H[1][1] /= size;
1054 C[1] /= size;
1055 }
1056
1057 // SSE4.1 variant of av1_calc_proj_params_c.
av1_calc_proj_params_sse4_1(const uint8_t * src8,int width,int height,int src_stride,const uint8_t * dat8,int dat_stride,int32_t * flt0,int flt0_stride,int32_t * flt1,int flt1_stride,int64_t H[2][2],int64_t C[2],const sgr_params_type * params)1058 void av1_calc_proj_params_sse4_1(const uint8_t *src8, int width, int height,
1059 int src_stride, const uint8_t *dat8,
1060 int dat_stride, int32_t *flt0, int flt0_stride,
1061 int32_t *flt1, int flt1_stride,
1062 int64_t H[2][2], int64_t C[2],
1063 const sgr_params_type *params) {
1064 if ((params->r[0] > 0) && (params->r[1] > 0)) {
1065 calc_proj_params_r0_r1_sse4_1(src8, width, height, src_stride, dat8,
1066 dat_stride, flt0, flt0_stride, flt1,
1067 flt1_stride, H, C);
1068 } else if (params->r[0] > 0) {
1069 calc_proj_params_r0_sse4_1(src8, width, height, src_stride, dat8,
1070 dat_stride, flt0, flt0_stride, H, C);
1071 } else if (params->r[1] > 0) {
1072 calc_proj_params_r1_sse4_1(src8, width, height, src_stride, dat8,
1073 dat_stride, flt1, flt1_stride, H, C);
1074 }
1075 }
1076
1077 #if CONFIG_AV1_HIGHBITDEPTH
calc_proj_params_r0_r1_high_bd_sse4_1(const uint8_t * src8,int width,int height,int src_stride,const uint8_t * dat8,int dat_stride,int32_t * flt0,int flt0_stride,int32_t * flt1,int flt1_stride,int64_t H[2][2],int64_t C[2])1078 static inline void calc_proj_params_r0_r1_high_bd_sse4_1(
1079 const uint8_t *src8, int width, int height, int src_stride,
1080 const uint8_t *dat8, int dat_stride, int32_t *flt0, int flt0_stride,
1081 int32_t *flt1, int flt1_stride, int64_t H[2][2], int64_t C[2]) {
1082 const int size = width * height;
1083 const uint16_t *src = CONVERT_TO_SHORTPTR(src8);
1084 const uint16_t *dat = CONVERT_TO_SHORTPTR(dat8);
1085 __m128i h00, h01, h11, c0, c1;
1086 const __m128i zero = _mm_setzero_si128();
1087 h01 = h11 = c0 = c1 = h00 = zero;
1088
1089 for (int i = 0; i < height; ++i) {
1090 for (int j = 0; j < width; j += 4) {
1091 const __m128i u_load = _mm_cvtepu16_epi32(
1092 _mm_loadl_epi64((__m128i *)(dat + i * dat_stride + j)));
1093 const __m128i s_load = _mm_cvtepu16_epi32(
1094 _mm_loadl_epi64((__m128i *)(src + i * src_stride + j)));
1095 __m128i f1 = _mm_loadu_si128((__m128i *)(flt0 + i * flt0_stride + j));
1096 __m128i f2 = _mm_loadu_si128((__m128i *)(flt1 + i * flt1_stride + j));
1097 __m128i d = _mm_slli_epi32(u_load, SGRPROJ_RST_BITS);
1098 __m128i s = _mm_slli_epi32(s_load, SGRPROJ_RST_BITS);
1099 s = _mm_sub_epi32(s, d);
1100 f1 = _mm_sub_epi32(f1, d);
1101 f2 = _mm_sub_epi32(f2, d);
1102
1103 const __m128i h00_even = _mm_mul_epi32(f1, f1);
1104 const __m128i h00_odd =
1105 _mm_mul_epi32(_mm_srli_epi64(f1, 32), _mm_srli_epi64(f1, 32));
1106 h00 = _mm_add_epi64(h00, h00_even);
1107 h00 = _mm_add_epi64(h00, h00_odd);
1108
1109 const __m128i h01_even = _mm_mul_epi32(f1, f2);
1110 const __m128i h01_odd =
1111 _mm_mul_epi32(_mm_srli_epi64(f1, 32), _mm_srli_epi64(f2, 32));
1112 h01 = _mm_add_epi64(h01, h01_even);
1113 h01 = _mm_add_epi64(h01, h01_odd);
1114
1115 const __m128i h11_even = _mm_mul_epi32(f2, f2);
1116 const __m128i h11_odd =
1117 _mm_mul_epi32(_mm_srli_epi64(f2, 32), _mm_srli_epi64(f2, 32));
1118 h11 = _mm_add_epi64(h11, h11_even);
1119 h11 = _mm_add_epi64(h11, h11_odd);
1120
1121 const __m128i c0_even = _mm_mul_epi32(f1, s);
1122 const __m128i c0_odd =
1123 _mm_mul_epi32(_mm_srli_epi64(f1, 32), _mm_srli_epi64(s, 32));
1124 c0 = _mm_add_epi64(c0, c0_even);
1125 c0 = _mm_add_epi64(c0, c0_odd);
1126
1127 const __m128i c1_even = _mm_mul_epi32(f2, s);
1128 const __m128i c1_odd =
1129 _mm_mul_epi32(_mm_srli_epi64(f2, 32), _mm_srli_epi64(s, 32));
1130 c1 = _mm_add_epi64(c1, c1_even);
1131 c1 = _mm_add_epi64(c1, c1_odd);
1132 }
1133 }
1134
1135 __m128i c_low = _mm_unpacklo_epi64(c0, c1);
1136 const __m128i c_high = _mm_unpackhi_epi64(c0, c1);
1137 c_low = _mm_add_epi64(c_low, c_high);
1138
1139 __m128i h0x_low = _mm_unpacklo_epi64(h00, h01);
1140 const __m128i h0x_high = _mm_unpackhi_epi64(h00, h01);
1141 h0x_low = _mm_add_epi64(h0x_low, h0x_high);
1142
1143 // Using the symmetric properties of H, calculations of H[1][0] are not
1144 // needed.
1145 __m128i h1x_low = _mm_unpacklo_epi64(zero, h11);
1146 const __m128i h1x_high = _mm_unpackhi_epi64(zero, h11);
1147 h1x_low = _mm_add_epi64(h1x_low, h1x_high);
1148
1149 xx_storeu_128(C, c_low);
1150 xx_storeu_128(H[0], h0x_low);
1151 xx_storeu_128(H[1], h1x_low);
1152
1153 H[0][0] /= size;
1154 H[0][1] /= size;
1155 H[1][1] /= size;
1156
1157 // Since H is a symmetric matrix
1158 H[1][0] = H[0][1];
1159 C[0] /= size;
1160 C[1] /= size;
1161 }
1162
1163 // When only params->r[0] > 0. In this case only H[0][0] and C[0] are
1164 // non-zero and need to be computed.
calc_proj_params_r0_high_bd_sse4_1(const uint8_t * src8,int width,int height,int src_stride,const uint8_t * dat8,int dat_stride,int32_t * flt0,int flt0_stride,int64_t H[2][2],int64_t C[2])1165 static inline void calc_proj_params_r0_high_bd_sse4_1(
1166 const uint8_t *src8, int width, int height, int src_stride,
1167 const uint8_t *dat8, int dat_stride, int32_t *flt0, int flt0_stride,
1168 int64_t H[2][2], int64_t C[2]) {
1169 const int size = width * height;
1170 const uint16_t *src = CONVERT_TO_SHORTPTR(src8);
1171 const uint16_t *dat = CONVERT_TO_SHORTPTR(dat8);
1172 __m128i h00, c0;
1173 const __m128i zero = _mm_setzero_si128();
1174 c0 = h00 = zero;
1175
1176 for (int i = 0; i < height; ++i) {
1177 for (int j = 0; j < width; j += 4) {
1178 const __m128i u_load = _mm_cvtepu16_epi32(
1179 _mm_loadl_epi64((__m128i *)(dat + i * dat_stride + j)));
1180 const __m128i s_load = _mm_cvtepu16_epi32(
1181 _mm_loadl_epi64((__m128i *)(src + i * src_stride + j)));
1182 __m128i f1 = _mm_loadu_si128((__m128i *)(flt0 + i * flt0_stride + j));
1183 __m128i d = _mm_slli_epi32(u_load, SGRPROJ_RST_BITS);
1184 __m128i s = _mm_slli_epi32(s_load, SGRPROJ_RST_BITS);
1185 s = _mm_sub_epi32(s, d);
1186 f1 = _mm_sub_epi32(f1, d);
1187
1188 const __m128i h00_even = _mm_mul_epi32(f1, f1);
1189 const __m128i h00_odd =
1190 _mm_mul_epi32(_mm_srli_epi64(f1, 32), _mm_srli_epi64(f1, 32));
1191 h00 = _mm_add_epi64(h00, h00_even);
1192 h00 = _mm_add_epi64(h00, h00_odd);
1193
1194 const __m128i c0_even = _mm_mul_epi32(f1, s);
1195 const __m128i c0_odd =
1196 _mm_mul_epi32(_mm_srli_epi64(f1, 32), _mm_srli_epi64(s, 32));
1197 c0 = _mm_add_epi64(c0, c0_even);
1198 c0 = _mm_add_epi64(c0, c0_odd);
1199 }
1200 }
1201 const __m128i h00_val = _mm_add_epi64(h00, _mm_srli_si128(h00, 8));
1202
1203 const __m128i c0_val = _mm_add_epi64(c0, _mm_srli_si128(c0, 8));
1204
1205 const __m128i c = _mm_unpacklo_epi64(c0_val, zero);
1206 const __m128i h0x = _mm_unpacklo_epi64(h00_val, zero);
1207
1208 xx_storeu_128(C, c);
1209 xx_storeu_128(H[0], h0x);
1210
1211 H[0][0] /= size;
1212 C[0] /= size;
1213 }
1214
1215 // When only params->r[1] > 0. In this case only H[1][1] and C[1] are
1216 // non-zero and need to be computed.
calc_proj_params_r1_high_bd_sse4_1(const uint8_t * src8,int width,int height,int src_stride,const uint8_t * dat8,int dat_stride,int32_t * flt1,int flt1_stride,int64_t H[2][2],int64_t C[2])1217 static inline void calc_proj_params_r1_high_bd_sse4_1(
1218 const uint8_t *src8, int width, int height, int src_stride,
1219 const uint8_t *dat8, int dat_stride, int32_t *flt1, int flt1_stride,
1220 int64_t H[2][2], int64_t C[2]) {
1221 const int size = width * height;
1222 const uint16_t *src = CONVERT_TO_SHORTPTR(src8);
1223 const uint16_t *dat = CONVERT_TO_SHORTPTR(dat8);
1224 __m128i h11, c1;
1225 const __m128i zero = _mm_setzero_si128();
1226 c1 = h11 = zero;
1227
1228 for (int i = 0; i < height; ++i) {
1229 for (int j = 0; j < width; j += 4) {
1230 const __m128i u_load = _mm_cvtepu16_epi32(
1231 _mm_loadl_epi64((__m128i *)(dat + i * dat_stride + j)));
1232 const __m128i s_load = _mm_cvtepu16_epi32(
1233 _mm_loadl_epi64((__m128i *)(src + i * src_stride + j)));
1234 __m128i f2 = _mm_loadu_si128((__m128i *)(flt1 + i * flt1_stride + j));
1235 __m128i d = _mm_slli_epi32(u_load, SGRPROJ_RST_BITS);
1236 __m128i s = _mm_slli_epi32(s_load, SGRPROJ_RST_BITS);
1237 s = _mm_sub_epi32(s, d);
1238 f2 = _mm_sub_epi32(f2, d);
1239
1240 const __m128i h11_even = _mm_mul_epi32(f2, f2);
1241 const __m128i h11_odd =
1242 _mm_mul_epi32(_mm_srli_epi64(f2, 32), _mm_srli_epi64(f2, 32));
1243 h11 = _mm_add_epi64(h11, h11_even);
1244 h11 = _mm_add_epi64(h11, h11_odd);
1245
1246 const __m128i c1_even = _mm_mul_epi32(f2, s);
1247 const __m128i c1_odd =
1248 _mm_mul_epi32(_mm_srli_epi64(f2, 32), _mm_srli_epi64(s, 32));
1249 c1 = _mm_add_epi64(c1, c1_even);
1250 c1 = _mm_add_epi64(c1, c1_odd);
1251 }
1252 }
1253
1254 const __m128i h11_val = _mm_add_epi64(h11, _mm_srli_si128(h11, 8));
1255
1256 const __m128i c1_val = _mm_add_epi64(c1, _mm_srli_si128(c1, 8));
1257
1258 const __m128i c = _mm_unpacklo_epi64(zero, c1_val);
1259 const __m128i h1x = _mm_unpacklo_epi64(zero, h11_val);
1260
1261 xx_storeu_128(C, c);
1262 xx_storeu_128(H[1], h1x);
1263
1264 H[1][1] /= size;
1265 C[1] /= size;
1266 }
1267
1268 // SSE4.1 variant of av1_calc_proj_params_high_bd_c.
av1_calc_proj_params_high_bd_sse4_1(const uint8_t * src8,int width,int height,int src_stride,const uint8_t * dat8,int dat_stride,int32_t * flt0,int flt0_stride,int32_t * flt1,int flt1_stride,int64_t H[2][2],int64_t C[2],const sgr_params_type * params)1269 void av1_calc_proj_params_high_bd_sse4_1(const uint8_t *src8, int width,
1270 int height, int src_stride,
1271 const uint8_t *dat8, int dat_stride,
1272 int32_t *flt0, int flt0_stride,
1273 int32_t *flt1, int flt1_stride,
1274 int64_t H[2][2], int64_t C[2],
1275 const sgr_params_type *params) {
1276 if ((params->r[0] > 0) && (params->r[1] > 0)) {
1277 calc_proj_params_r0_r1_high_bd_sse4_1(src8, width, height, src_stride, dat8,
1278 dat_stride, flt0, flt0_stride, flt1,
1279 flt1_stride, H, C);
1280 } else if (params->r[0] > 0) {
1281 calc_proj_params_r0_high_bd_sse4_1(src8, width, height, src_stride, dat8,
1282 dat_stride, flt0, flt0_stride, H, C);
1283 } else if (params->r[1] > 0) {
1284 calc_proj_params_r1_high_bd_sse4_1(src8, width, height, src_stride, dat8,
1285 dat_stride, flt1, flt1_stride, H, C);
1286 }
1287 }
1288
av1_highbd_pixel_proj_error_sse4_1(const uint8_t * src8,int width,int height,int src_stride,const uint8_t * dat8,int dat_stride,int32_t * flt0,int flt0_stride,int32_t * flt1,int flt1_stride,int xq[2],const sgr_params_type * params)1289 int64_t av1_highbd_pixel_proj_error_sse4_1(
1290 const uint8_t *src8, int width, int height, int src_stride,
1291 const uint8_t *dat8, int dat_stride, int32_t *flt0, int flt0_stride,
1292 int32_t *flt1, int flt1_stride, int xq[2], const sgr_params_type *params) {
1293 int i, j, k;
1294 const int32_t shift = SGRPROJ_RST_BITS + SGRPROJ_PRJ_BITS;
1295 const __m128i rounding = _mm_set1_epi32(1 << (shift - 1));
1296 __m128i sum64 = _mm_setzero_si128();
1297 const uint16_t *src = CONVERT_TO_SHORTPTR(src8);
1298 const uint16_t *dat = CONVERT_TO_SHORTPTR(dat8);
1299 int64_t err = 0;
1300 if (params->r[0] > 0 && params->r[1] > 0) { // Both filters are enabled
1301 const __m128i xq0 = _mm_set1_epi32(xq[0]);
1302 const __m128i xq1 = _mm_set1_epi32(xq[1]);
1303
1304 for (i = 0; i < height; ++i) {
1305 __m128i sum32 = _mm_setzero_si128();
1306 for (j = 0; j <= width - 8; j += 8) {
1307 // Load 8x pixels from source image
1308 const __m128i s0 = xx_loadu_128(src + j);
1309 // s0 = [7 6 5 4 3 2 1 0] as i16 (indices of src[])
1310
1311 // Load 8x pixels from corrupted image
1312 const __m128i d0 = xx_loadu_128(dat + j);
1313 // d0 = [7 6 5 4 3 2 1 0] as i16 (indices of dat[])
1314
1315 // Shift each pixel value up by SGRPROJ_RST_BITS
1316 const __m128i u0 = _mm_slli_epi16(d0, SGRPROJ_RST_BITS);
1317
1318 // Split u0 into two halves and pad each from u16 to i32
1319 const __m128i u0l = _mm_cvtepu16_epi32(u0);
1320 const __m128i u0h = _mm_cvtepu16_epi32(_mm_srli_si128(u0, 8));
1321 // u0h = [7 6 5 4] as i32, u0l = [3 2 1 0] as i32, all dat[] indices
1322
1323 // Load 8 pixels from first and second filtered images
1324 const __m128i flt0l = xx_loadu_128(flt0 + j);
1325 const __m128i flt0h = xx_loadu_128(flt0 + j + 4);
1326 const __m128i flt1l = xx_loadu_128(flt1 + j);
1327 const __m128i flt1h = xx_loadu_128(flt1 + j + 4);
1328 // flt0 = [7 6 5 4] [3 2 1 0] as i32 (indices of flt0+j)
1329 // flt1 = [7 6 5 4] [3 2 1 0] as i32 (indices of flt1+j)
1330
1331 // Subtract shifted corrupt image from each filtered image
1332 // This gives our two basis vectors for the projection
1333 const __m128i flt0l_subu = _mm_sub_epi32(flt0l, u0l);
1334 const __m128i flt0h_subu = _mm_sub_epi32(flt0h, u0h);
1335 const __m128i flt1l_subu = _mm_sub_epi32(flt1l, u0l);
1336 const __m128i flt1h_subu = _mm_sub_epi32(flt1h, u0h);
1337 // flt?h_subu = [ f[7]-u[7] f[6]-u[6] f[5]-u[5] f[4]-u[4] ] as i32
1338 // flt?l_subu = [ f[3]-u[3] f[2]-u[2] f[1]-u[1] f[0]-u[0] ] as i32
1339
1340 // Multiply each basis vector by the corresponding coefficient
1341 const __m128i v0l = _mm_mullo_epi32(flt0l_subu, xq0);
1342 const __m128i v0h = _mm_mullo_epi32(flt0h_subu, xq0);
1343 const __m128i v1l = _mm_mullo_epi32(flt1l_subu, xq1);
1344 const __m128i v1h = _mm_mullo_epi32(flt1h_subu, xq1);
1345
1346 // Add together the contribution from each scaled basis vector
1347 const __m128i vl = _mm_add_epi32(v0l, v1l);
1348 const __m128i vh = _mm_add_epi32(v0h, v1h);
1349
1350 // Right-shift v with appropriate rounding
1351 const __m128i vrl = _mm_srai_epi32(_mm_add_epi32(vl, rounding), shift);
1352 const __m128i vrh = _mm_srai_epi32(_mm_add_epi32(vh, rounding), shift);
1353
1354 // Saturate each i32 value to i16 and combine lower and upper halves
1355 const __m128i vr = _mm_packs_epi32(vrl, vrh);
1356
1357 // Add twin-subspace-sgr-filter to corrupt image then subtract source
1358 const __m128i e0 = _mm_sub_epi16(_mm_add_epi16(vr, d0), s0);
1359
1360 // Calculate squared error and add adjacent values
1361 const __m128i err0 = _mm_madd_epi16(e0, e0);
1362
1363 sum32 = _mm_add_epi32(sum32, err0);
1364 }
1365
1366 const __m128i sum32l = _mm_cvtepu32_epi64(sum32);
1367 sum64 = _mm_add_epi64(sum64, sum32l);
1368 const __m128i sum32h = _mm_cvtepu32_epi64(_mm_srli_si128(sum32, 8));
1369 sum64 = _mm_add_epi64(sum64, sum32h);
1370
1371 // Process remaining pixels in this row (modulo 8)
1372 for (k = j; k < width; ++k) {
1373 const int32_t u = (int32_t)(dat[k] << SGRPROJ_RST_BITS);
1374 int32_t v = xq[0] * (flt0[k] - u) + xq[1] * (flt1[k] - u);
1375 const int32_t e = ROUND_POWER_OF_TWO(v, shift) + dat[k] - src[k];
1376 err += ((int64_t)e * e);
1377 }
1378 dat += dat_stride;
1379 src += src_stride;
1380 flt0 += flt0_stride;
1381 flt1 += flt1_stride;
1382 }
1383 } else if (params->r[0] > 0 || params->r[1] > 0) { // Only one filter enabled
1384 const int32_t xq_on = (params->r[0] > 0) ? xq[0] : xq[1];
1385 const __m128i xq_active = _mm_set1_epi32(xq_on);
1386 const __m128i xq_inactive =
1387 _mm_set1_epi32(-xq_on * (1 << SGRPROJ_RST_BITS));
1388 const int32_t *flt = (params->r[0] > 0) ? flt0 : flt1;
1389 const int flt_stride = (params->r[0] > 0) ? flt0_stride : flt1_stride;
1390 for (i = 0; i < height; ++i) {
1391 __m128i sum32 = _mm_setzero_si128();
1392 for (j = 0; j <= width - 8; j += 8) {
1393 // Load 8x pixels from source image
1394 const __m128i s0 = xx_loadu_128(src + j);
1395 // s0 = [7 6 5 4 3 2 1 0] as u16 (indices of src[])
1396
1397 // Load 8x pixels from corrupted image and pad each u16 to i32
1398 const __m128i d0 = xx_loadu_128(dat + j);
1399 const __m128i d0h = _mm_cvtepu16_epi32(_mm_srli_si128(d0, 8));
1400 const __m128i d0l = _mm_cvtepu16_epi32(d0);
1401 // d0h, d0l = [7 6 5 4], [3 2 1 0] as u32 (indices of dat[])
1402
1403 // Load 8 pixels from the filtered image
1404 const __m128i flth = xx_loadu_128(flt + j + 4);
1405 const __m128i fltl = xx_loadu_128(flt + j);
1406 // flth, fltl = [7 6 5 4], [3 2 1 0] as i32 (indices of flt+j)
1407
1408 const __m128i flth_xq = _mm_mullo_epi32(flth, xq_active);
1409 const __m128i fltl_xq = _mm_mullo_epi32(fltl, xq_active);
1410 const __m128i d0h_xq = _mm_mullo_epi32(d0h, xq_inactive);
1411 const __m128i d0l_xq = _mm_mullo_epi32(d0l, xq_inactive);
1412
1413 const __m128i vh = _mm_add_epi32(flth_xq, d0h_xq);
1414 const __m128i vl = _mm_add_epi32(fltl_xq, d0l_xq);
1415 // vh = [ xq0(f[7]-d[7]) xq0(f[6]-d[6]) xq0(f[5]-d[5]) xq0(f[4]-d[4]) ]
1416 // vl = [ xq0(f[3]-d[3]) xq0(f[2]-d[2]) xq0(f[1]-d[1]) xq0(f[0]-d[0]) ]
1417
1418 // Shift this down with appropriate rounding
1419 const __m128i vrh = _mm_srai_epi32(_mm_add_epi32(vh, rounding), shift);
1420 const __m128i vrl = _mm_srai_epi32(_mm_add_epi32(vl, rounding), shift);
1421
1422 // Saturate vr0 and vr1 from i32 to i16 then pack together
1423 const __m128i vr = _mm_packs_epi32(vrl, vrh);
1424
1425 // Subtract twin-subspace-sgr filtered from source image to get error
1426 const __m128i e0 = _mm_sub_epi16(_mm_add_epi16(vr, d0), s0);
1427
1428 // Calculate squared error and add adjacent values
1429 const __m128i err0 = _mm_madd_epi16(e0, e0);
1430
1431 sum32 = _mm_add_epi32(sum32, err0);
1432 }
1433
1434 const __m128i sum32l = _mm_cvtepu32_epi64(sum32);
1435 sum64 = _mm_add_epi64(sum64, sum32l);
1436 const __m128i sum32h = _mm_cvtepu32_epi64(_mm_srli_si128(sum32, 8));
1437 sum64 = _mm_add_epi64(sum64, sum32h);
1438
1439 // Process remaining pixels in this row (modulo 8)
1440 for (k = j; k < width; ++k) {
1441 const int32_t u = (int32_t)(dat[k] << SGRPROJ_RST_BITS);
1442 int32_t v = xq_on * (flt[k] - u);
1443 const int32_t e = ROUND_POWER_OF_TWO(v, shift) + dat[k] - src[k];
1444 err += ((int64_t)e * e);
1445 }
1446 dat += dat_stride;
1447 src += src_stride;
1448 flt += flt_stride;
1449 }
1450 } else { // Neither filter is enabled
1451 for (i = 0; i < height; ++i) {
1452 __m128i sum32 = _mm_setzero_si128();
1453 for (j = 0; j <= width - 16; j += 16) {
1454 // Load 2x8 u16 from source image
1455 const __m128i s0 = xx_loadu_128(src + j);
1456 const __m128i s1 = xx_loadu_128(src + j + 8);
1457 // Load 2x8 u16 from corrupted image
1458 const __m128i d0 = xx_loadu_128(dat + j);
1459 const __m128i d1 = xx_loadu_128(dat + j + 8);
1460
1461 // Subtract corrupted image from source image
1462 const __m128i diff0 = _mm_sub_epi16(d0, s0);
1463 const __m128i diff1 = _mm_sub_epi16(d1, s1);
1464
1465 // Square error and add adjacent values
1466 const __m128i err0 = _mm_madd_epi16(diff0, diff0);
1467 const __m128i err1 = _mm_madd_epi16(diff1, diff1);
1468
1469 sum32 = _mm_add_epi32(sum32, err0);
1470 sum32 = _mm_add_epi32(sum32, err1);
1471 }
1472
1473 const __m128i sum32l = _mm_cvtepu32_epi64(sum32);
1474 sum64 = _mm_add_epi64(sum64, sum32l);
1475 const __m128i sum32h = _mm_cvtepu32_epi64(_mm_srli_si128(sum32, 8));
1476 sum64 = _mm_add_epi64(sum64, sum32h);
1477
1478 // Process remaining pixels (modulu 8)
1479 for (k = j; k < width; ++k) {
1480 const int32_t e = (int32_t)(dat[k]) - src[k];
1481 err += ((int64_t)e * e);
1482 }
1483 dat += dat_stride;
1484 src += src_stride;
1485 }
1486 }
1487
1488 // Sum 4 values from sum64l and sum64h into err
1489 int64_t sum[2];
1490 xx_storeu_128(sum, sum64);
1491 err += sum[0] + sum[1];
1492 return err;
1493 }
1494 #endif // CONFIG_AV1_HIGHBITDEPTH
1495