xref: /aosp_15_r20/external/libaom/av1/common/x86/selfguided_sse4.c (revision 77c1e3ccc04c968bd2bc212e87364f250e820521)
1 /*
2  * Copyright (c) 2018, Alliance for Open Media. All rights reserved.
3  *
4  * This source code is subject to the terms of the BSD 2 Clause License and
5  * the Alliance for Open Media Patent License 1.0. If the BSD 2 Clause License
6  * was not distributed with this source code in the LICENSE file, you can
7  * obtain it at www.aomedia.org/license/software. If the Alliance for Open
8  * Media Patent License 1.0 was not distributed with this source code in the
9  * PATENTS file, you can obtain it at www.aomedia.org/license/patent.
10  */
11 
12 #include <smmintrin.h>
13 
14 #include "config/aom_config.h"
15 #include "config/av1_rtcd.h"
16 
17 #include "av1/common/restoration.h"
18 #include "aom_dsp/x86/synonyms.h"
19 
20 // Load 4 bytes from the possibly-misaligned pointer p, extend each byte to
21 // 32-bit precision and return them in an SSE register.
xx_load_extend_8_32(const void * p)22 static __m128i xx_load_extend_8_32(const void *p) {
23   return _mm_cvtepu8_epi32(xx_loadl_32(p));
24 }
25 
26 // Load 4 halfwords from the possibly-misaligned pointer p, extend each
27 // halfword to 32-bit precision and return them in an SSE register.
xx_load_extend_16_32(const void * p)28 static __m128i xx_load_extend_16_32(const void *p) {
29   return _mm_cvtepu16_epi32(xx_loadl_64(p));
30 }
31 
32 // Compute the scan of an SSE register holding 4 32-bit integers. If the
33 // register holds x0..x3 then the scan will hold x0, x0+x1, x0+x1+x2,
34 // x0+x1+x2+x3
scan_32(__m128i x)35 static __m128i scan_32(__m128i x) {
36   const __m128i x01 = _mm_add_epi32(x, _mm_slli_si128(x, 4));
37   return _mm_add_epi32(x01, _mm_slli_si128(x01, 8));
38 }
39 
40 // Compute two integral images from src. B sums elements; A sums their
41 // squares. The images are offset by one pixel, so will have width and height
42 // equal to width + 1, height + 1 and the first row and column will be zero.
43 //
44 // A+1 and B+1 should be aligned to 16 bytes. buf_stride should be a multiple
45 // of 4.
integral_images(const uint8_t * src,int src_stride,int width,int height,int32_t * A,int32_t * B,int buf_stride)46 static void integral_images(const uint8_t *src, int src_stride, int width,
47                             int height, int32_t *A, int32_t *B,
48                             int buf_stride) {
49   // Write out the zero top row
50   memset(A, 0, sizeof(*A) * (width + 1));
51   memset(B, 0, sizeof(*B) * (width + 1));
52 
53   const __m128i zero = _mm_setzero_si128();
54   for (int i = 0; i < height; ++i) {
55     // Zero the left column.
56     A[(i + 1) * buf_stride] = B[(i + 1) * buf_stride] = 0;
57 
58     // ldiff is the difference H - D where H is the output sample immediately
59     // to the left and D is the output sample above it. These are scalars,
60     // replicated across the four lanes.
61     __m128i ldiff1 = zero, ldiff2 = zero;
62     for (int j = 0; j < width; j += 4) {
63       const int ABj = 1 + j;
64 
65       const __m128i above1 = xx_load_128(B + ABj + i * buf_stride);
66       const __m128i above2 = xx_load_128(A + ABj + i * buf_stride);
67 
68       const __m128i x1 = xx_load_extend_8_32(src + j + i * src_stride);
69       const __m128i x2 = _mm_madd_epi16(x1, x1);
70 
71       const __m128i sc1 = scan_32(x1);
72       const __m128i sc2 = scan_32(x2);
73 
74       const __m128i row1 = _mm_add_epi32(_mm_add_epi32(sc1, above1), ldiff1);
75       const __m128i row2 = _mm_add_epi32(_mm_add_epi32(sc2, above2), ldiff2);
76 
77       xx_store_128(B + ABj + (i + 1) * buf_stride, row1);
78       xx_store_128(A + ABj + (i + 1) * buf_stride, row2);
79 
80       // Calculate the new H - D.
81       ldiff1 = _mm_shuffle_epi32(_mm_sub_epi32(row1, above1), 0xff);
82       ldiff2 = _mm_shuffle_epi32(_mm_sub_epi32(row2, above2), 0xff);
83     }
84   }
85 }
86 
87 // Compute two integral images from src. B sums elements; A sums their squares
88 //
89 // A and B should be aligned to 16 bytes. buf_stride should be a multiple of 4.
integral_images_highbd(const uint16_t * src,int src_stride,int width,int height,int32_t * A,int32_t * B,int buf_stride)90 static void integral_images_highbd(const uint16_t *src, int src_stride,
91                                    int width, int height, int32_t *A,
92                                    int32_t *B, int buf_stride) {
93   // Write out the zero top row
94   memset(A, 0, sizeof(*A) * (width + 1));
95   memset(B, 0, sizeof(*B) * (width + 1));
96 
97   const __m128i zero = _mm_setzero_si128();
98   for (int i = 0; i < height; ++i) {
99     // Zero the left column.
100     A[(i + 1) * buf_stride] = B[(i + 1) * buf_stride] = 0;
101 
102     // ldiff is the difference H - D where H is the output sample immediately
103     // to the left and D is the output sample above it. These are scalars,
104     // replicated across the four lanes.
105     __m128i ldiff1 = zero, ldiff2 = zero;
106     for (int j = 0; j < width; j += 4) {
107       const int ABj = 1 + j;
108 
109       const __m128i above1 = xx_load_128(B + ABj + i * buf_stride);
110       const __m128i above2 = xx_load_128(A + ABj + i * buf_stride);
111 
112       const __m128i x1 = xx_load_extend_16_32(src + j + i * src_stride);
113       const __m128i x2 = _mm_madd_epi16(x1, x1);
114 
115       const __m128i sc1 = scan_32(x1);
116       const __m128i sc2 = scan_32(x2);
117 
118       const __m128i row1 = _mm_add_epi32(_mm_add_epi32(sc1, above1), ldiff1);
119       const __m128i row2 = _mm_add_epi32(_mm_add_epi32(sc2, above2), ldiff2);
120 
121       xx_store_128(B + ABj + (i + 1) * buf_stride, row1);
122       xx_store_128(A + ABj + (i + 1) * buf_stride, row2);
123 
124       // Calculate the new H - D.
125       ldiff1 = _mm_shuffle_epi32(_mm_sub_epi32(row1, above1), 0xff);
126       ldiff2 = _mm_shuffle_epi32(_mm_sub_epi32(row2, above2), 0xff);
127     }
128   }
129 }
130 
131 // Compute 4 values of boxsum from the given integral image. ii should point
132 // at the middle of the box (for the first value). r is the box radius.
boxsum_from_ii(const int32_t * ii,int stride,int r)133 static inline __m128i boxsum_from_ii(const int32_t *ii, int stride, int r) {
134   const __m128i tl = xx_loadu_128(ii - (r + 1) - (r + 1) * stride);
135   const __m128i tr = xx_loadu_128(ii + (r + 0) - (r + 1) * stride);
136   const __m128i bl = xx_loadu_128(ii - (r + 1) + r * stride);
137   const __m128i br = xx_loadu_128(ii + (r + 0) + r * stride);
138   const __m128i u = _mm_sub_epi32(tr, tl);
139   const __m128i v = _mm_sub_epi32(br, bl);
140   return _mm_sub_epi32(v, u);
141 }
142 
round_for_shift(unsigned shift)143 static __m128i round_for_shift(unsigned shift) {
144   return _mm_set1_epi32((1 << shift) >> 1);
145 }
146 
compute_p(__m128i sum1,__m128i sum2,int bit_depth,int n)147 static __m128i compute_p(__m128i sum1, __m128i sum2, int bit_depth, int n) {
148   __m128i an, bb;
149   if (bit_depth > 8) {
150     const __m128i rounding_a = round_for_shift(2 * (bit_depth - 8));
151     const __m128i rounding_b = round_for_shift(bit_depth - 8);
152     const __m128i shift_a = _mm_cvtsi32_si128(2 * (bit_depth - 8));
153     const __m128i shift_b = _mm_cvtsi32_si128(bit_depth - 8);
154     const __m128i a = _mm_srl_epi32(_mm_add_epi32(sum2, rounding_a), shift_a);
155     const __m128i b = _mm_srl_epi32(_mm_add_epi32(sum1, rounding_b), shift_b);
156     // b < 2^14, so we can use a 16-bit madd rather than a 32-bit
157     // mullo to square it
158     bb = _mm_madd_epi16(b, b);
159     an = _mm_max_epi32(_mm_mullo_epi32(a, _mm_set1_epi32(n)), bb);
160   } else {
161     bb = _mm_madd_epi16(sum1, sum1);
162     an = _mm_mullo_epi32(sum2, _mm_set1_epi32(n));
163   }
164   return _mm_sub_epi32(an, bb);
165 }
166 
167 // Assumes that C, D are integral images for the original buffer which has been
168 // extended to have a padding of SGRPROJ_BORDER_VERT/SGRPROJ_BORDER_HORZ pixels
169 // on the sides. A, B, C, D point at logical position (0, 0).
calc_ab(int32_t * A,int32_t * B,const int32_t * C,const int32_t * D,int width,int height,int buf_stride,int bit_depth,int sgr_params_idx,int radius_idx)170 static void calc_ab(int32_t *A, int32_t *B, const int32_t *C, const int32_t *D,
171                     int width, int height, int buf_stride, int bit_depth,
172                     int sgr_params_idx, int radius_idx) {
173   const sgr_params_type *const params = &av1_sgr_params[sgr_params_idx];
174   const int r = params->r[radius_idx];
175   const int n = (2 * r + 1) * (2 * r + 1);
176   const __m128i s = _mm_set1_epi32(params->s[radius_idx]);
177   // one_over_n[n-1] is 2^12/n, so easily fits in an int16
178   const __m128i one_over_n = _mm_set1_epi32(av1_one_by_x[n - 1]);
179 
180   const __m128i rnd_z = round_for_shift(SGRPROJ_MTABLE_BITS);
181   const __m128i rnd_res = round_for_shift(SGRPROJ_RECIP_BITS);
182 
183   // Set up masks
184   const __m128i ones32 = _mm_set_epi32(0, 0, ~0, ~0);
185   __m128i mask[4];
186   for (int idx = 0; idx < 4; idx++) {
187     const __m128i shift = _mm_cvtsi32_si128(8 * (4 - idx));
188     mask[idx] = _mm_cvtepi8_epi32(_mm_srl_epi64(ones32, shift));
189   }
190 
191   for (int i = -1; i < height + 1; ++i) {
192     for (int j = -1; j < width + 1; j += 4) {
193       const int32_t *Cij = C + i * buf_stride + j;
194       const int32_t *Dij = D + i * buf_stride + j;
195 
196       __m128i sum1 = boxsum_from_ii(Dij, buf_stride, r);
197       __m128i sum2 = boxsum_from_ii(Cij, buf_stride, r);
198 
199       // When width + 2 isn't a multiple of 4, sum1 and sum2 will contain
200       // some uninitialised data in their upper words. We use a mask to
201       // ensure that these bits are set to 0.
202       int idx = AOMMIN(4, width + 1 - j);
203       assert(idx >= 1);
204 
205       if (idx < 4) {
206         sum1 = _mm_and_si128(mask[idx], sum1);
207         sum2 = _mm_and_si128(mask[idx], sum2);
208       }
209 
210       const __m128i p = compute_p(sum1, sum2, bit_depth, n);
211 
212       const __m128i z = _mm_min_epi32(
213           _mm_srli_epi32(_mm_add_epi32(_mm_mullo_epi32(p, s), rnd_z),
214                          SGRPROJ_MTABLE_BITS),
215           _mm_set1_epi32(255));
216 
217       // 'Gather' type instructions are not available pre-AVX2, so synthesize a
218       // gather using scalar loads.
219       const __m128i a_res =
220           _mm_set_epi32(av1_x_by_xplus1[_mm_extract_epi32(z, 3)],
221                         av1_x_by_xplus1[_mm_extract_epi32(z, 2)],
222                         av1_x_by_xplus1[_mm_extract_epi32(z, 1)],
223                         av1_x_by_xplus1[_mm_extract_epi32(z, 0)]);
224 
225       xx_storeu_128(A + i * buf_stride + j, a_res);
226 
227       const __m128i a_complement =
228           _mm_sub_epi32(_mm_set1_epi32(SGRPROJ_SGR), a_res);
229 
230       // sum1 might have lanes greater than 2^15, so we can't use madd to do
231       // multiplication involving sum1. However, a_complement and one_over_n
232       // are both less than 256, so we can multiply them first.
233       const __m128i a_comp_over_n = _mm_madd_epi16(a_complement, one_over_n);
234       const __m128i b_int = _mm_mullo_epi32(a_comp_over_n, sum1);
235       const __m128i b_res =
236           _mm_srli_epi32(_mm_add_epi32(b_int, rnd_res), SGRPROJ_RECIP_BITS);
237 
238       xx_storeu_128(B + i * buf_stride + j, b_res);
239     }
240   }
241 }
242 
243 // Calculate 4 values of the "cross sum" starting at buf. This is a 3x3 filter
244 // where the outer four corners have weight 3 and all other pixels have weight
245 // 4.
246 //
247 // Pixels are indexed like this:
248 // xtl  xt   xtr
249 // xl    x   xr
250 // xbl  xb   xbr
251 //
252 // buf points to x
253 //
254 // fours = xl + xt + xr + xb + x
255 // threes = xtl + xtr + xbr + xbl
256 // cross_sum = 4 * fours + 3 * threes
257 //           = 4 * (fours + threes) - threes
258 //           = (fours + threes) << 2 - threes
cross_sum(const int32_t * buf,int stride)259 static inline __m128i cross_sum(const int32_t *buf, int stride) {
260   const __m128i xtl = xx_loadu_128(buf - 1 - stride);
261   const __m128i xt = xx_loadu_128(buf - stride);
262   const __m128i xtr = xx_loadu_128(buf + 1 - stride);
263   const __m128i xl = xx_loadu_128(buf - 1);
264   const __m128i x = xx_loadu_128(buf);
265   const __m128i xr = xx_loadu_128(buf + 1);
266   const __m128i xbl = xx_loadu_128(buf - 1 + stride);
267   const __m128i xb = xx_loadu_128(buf + stride);
268   const __m128i xbr = xx_loadu_128(buf + 1 + stride);
269 
270   const __m128i fours = _mm_add_epi32(
271       xl, _mm_add_epi32(xt, _mm_add_epi32(xr, _mm_add_epi32(xb, x))));
272   const __m128i threes =
273       _mm_add_epi32(xtl, _mm_add_epi32(xtr, _mm_add_epi32(xbr, xbl)));
274 
275   return _mm_sub_epi32(_mm_slli_epi32(_mm_add_epi32(fours, threes), 2), threes);
276 }
277 
278 // The final filter for self-guided restoration. Computes a weighted average
279 // across A, B with "cross sums" (see cross_sum implementation above).
final_filter(int32_t * dst,int dst_stride,const int32_t * A,const int32_t * B,int buf_stride,const void * dgd8,int dgd_stride,int width,int height,int highbd)280 static void final_filter(int32_t *dst, int dst_stride, const int32_t *A,
281                          const int32_t *B, int buf_stride, const void *dgd8,
282                          int dgd_stride, int width, int height, int highbd) {
283   const int nb = 5;
284   const __m128i rounding =
285       round_for_shift(SGRPROJ_SGR_BITS + nb - SGRPROJ_RST_BITS);
286   const uint8_t *dgd_real =
287       highbd ? (const uint8_t *)CONVERT_TO_SHORTPTR(dgd8) : dgd8;
288 
289   for (int i = 0; i < height; ++i) {
290     for (int j = 0; j < width; j += 4) {
291       const __m128i a = cross_sum(A + i * buf_stride + j, buf_stride);
292       const __m128i b = cross_sum(B + i * buf_stride + j, buf_stride);
293       const __m128i raw =
294           xx_loadl_64(dgd_real + ((i * dgd_stride + j) << highbd));
295       const __m128i src =
296           highbd ? _mm_cvtepu16_epi32(raw) : _mm_cvtepu8_epi32(raw);
297 
298       __m128i v = _mm_add_epi32(_mm_madd_epi16(a, src), b);
299       __m128i w = _mm_srai_epi32(_mm_add_epi32(v, rounding),
300                                  SGRPROJ_SGR_BITS + nb - SGRPROJ_RST_BITS);
301 
302       xx_storeu_128(dst + i * dst_stride + j, w);
303     }
304   }
305 }
306 
307 // Assumes that C, D are integral images for the original buffer which has been
308 // extended to have a padding of SGRPROJ_BORDER_VERT/SGRPROJ_BORDER_HORZ pixels
309 // on the sides. A, B, C, D point at logical position (0, 0).
calc_ab_fast(int32_t * A,int32_t * B,const int32_t * C,const int32_t * D,int width,int height,int buf_stride,int bit_depth,int sgr_params_idx,int radius_idx)310 static void calc_ab_fast(int32_t *A, int32_t *B, const int32_t *C,
311                          const int32_t *D, int width, int height,
312                          int buf_stride, int bit_depth, int sgr_params_idx,
313                          int radius_idx) {
314   const sgr_params_type *const params = &av1_sgr_params[sgr_params_idx];
315   const int r = params->r[radius_idx];
316   const int n = (2 * r + 1) * (2 * r + 1);
317   const __m128i s = _mm_set1_epi32(params->s[radius_idx]);
318   // one_over_n[n-1] is 2^12/n, so easily fits in an int16
319   const __m128i one_over_n = _mm_set1_epi32(av1_one_by_x[n - 1]);
320 
321   const __m128i rnd_z = round_for_shift(SGRPROJ_MTABLE_BITS);
322   const __m128i rnd_res = round_for_shift(SGRPROJ_RECIP_BITS);
323 
324   // Set up masks
325   const __m128i ones32 = _mm_set_epi32(0, 0, ~0, ~0);
326   __m128i mask[4];
327   for (int idx = 0; idx < 4; idx++) {
328     const __m128i shift = _mm_cvtsi32_si128(8 * (4 - idx));
329     mask[idx] = _mm_cvtepi8_epi32(_mm_srl_epi64(ones32, shift));
330   }
331 
332   for (int i = -1; i < height + 1; i += 2) {
333     for (int j = -1; j < width + 1; j += 4) {
334       const int32_t *Cij = C + i * buf_stride + j;
335       const int32_t *Dij = D + i * buf_stride + j;
336 
337       __m128i sum1 = boxsum_from_ii(Dij, buf_stride, r);
338       __m128i sum2 = boxsum_from_ii(Cij, buf_stride, r);
339 
340       // When width + 2 isn't a multiple of 4, sum1 and sum2 will contain
341       // some uninitialised data in their upper words. We use a mask to
342       // ensure that these bits are set to 0.
343       int idx = AOMMIN(4, width + 1 - j);
344       assert(idx >= 1);
345 
346       if (idx < 4) {
347         sum1 = _mm_and_si128(mask[idx], sum1);
348         sum2 = _mm_and_si128(mask[idx], sum2);
349       }
350 
351       const __m128i p = compute_p(sum1, sum2, bit_depth, n);
352 
353       const __m128i z = _mm_min_epi32(
354           _mm_srli_epi32(_mm_add_epi32(_mm_mullo_epi32(p, s), rnd_z),
355                          SGRPROJ_MTABLE_BITS),
356           _mm_set1_epi32(255));
357 
358       // 'Gather' type instructions are not available pre-AVX2, so synthesize a
359       // gather using scalar loads.
360       const __m128i a_res =
361           _mm_set_epi32(av1_x_by_xplus1[_mm_extract_epi32(z, 3)],
362                         av1_x_by_xplus1[_mm_extract_epi32(z, 2)],
363                         av1_x_by_xplus1[_mm_extract_epi32(z, 1)],
364                         av1_x_by_xplus1[_mm_extract_epi32(z, 0)]);
365 
366       xx_storeu_128(A + i * buf_stride + j, a_res);
367 
368       const __m128i a_complement =
369           _mm_sub_epi32(_mm_set1_epi32(SGRPROJ_SGR), a_res);
370 
371       // sum1 might have lanes greater than 2^15, so we can't use madd to do
372       // multiplication involving sum1. However, a_complement and one_over_n
373       // are both less than 256, so we can multiply them first.
374       const __m128i a_comp_over_n = _mm_madd_epi16(a_complement, one_over_n);
375       const __m128i b_int = _mm_mullo_epi32(a_comp_over_n, sum1);
376       const __m128i b_res =
377           _mm_srli_epi32(_mm_add_epi32(b_int, rnd_res), SGRPROJ_RECIP_BITS);
378 
379       xx_storeu_128(B + i * buf_stride + j, b_res);
380     }
381   }
382 }
383 
384 // Calculate 4 values of the "cross sum" starting at buf.
385 //
386 // Pixels are indexed like this:
387 // xtl  xt   xtr
388 //  -   buf   -
389 // xbl  xb   xbr
390 //
391 // Pixels are weighted like this:
392 //  5    6    5
393 //  0    0    0
394 //  5    6    5
395 //
396 // fives = xtl + xtr + xbl + xbr
397 // sixes = xt + xb
398 // cross_sum = 6 * sixes + 5 * fives
399 //           = 5 * (fives + sixes) - sixes
400 //           = (fives + sixes) << 2 + (fives + sixes) + sixes
cross_sum_fast_even_row(const int32_t * buf,int stride)401 static inline __m128i cross_sum_fast_even_row(const int32_t *buf, int stride) {
402   const __m128i xtl = xx_loadu_128(buf - 1 - stride);
403   const __m128i xt = xx_loadu_128(buf - stride);
404   const __m128i xtr = xx_loadu_128(buf + 1 - stride);
405   const __m128i xbl = xx_loadu_128(buf - 1 + stride);
406   const __m128i xb = xx_loadu_128(buf + stride);
407   const __m128i xbr = xx_loadu_128(buf + 1 + stride);
408 
409   const __m128i fives =
410       _mm_add_epi32(xtl, _mm_add_epi32(xtr, _mm_add_epi32(xbr, xbl)));
411   const __m128i sixes = _mm_add_epi32(xt, xb);
412   const __m128i fives_plus_sixes = _mm_add_epi32(fives, sixes);
413 
414   return _mm_add_epi32(
415       _mm_add_epi32(_mm_slli_epi32(fives_plus_sixes, 2), fives_plus_sixes),
416       sixes);
417 }
418 
419 // Calculate 4 values of the "cross sum" starting at buf.
420 //
421 // Pixels are indexed like this:
422 // xl    x   xr
423 //
424 // Pixels are weighted like this:
425 //  5    6    5
426 //
427 // buf points to x
428 //
429 // fives = xl + xr
430 // sixes = x
431 // cross_sum = 5 * fives + 6 * sixes
432 //           = 4 * (fives + sixes) + (fives + sixes) + sixes
433 //           = (fives + sixes) << 2 + (fives + sixes) + sixes
cross_sum_fast_odd_row(const int32_t * buf)434 static inline __m128i cross_sum_fast_odd_row(const int32_t *buf) {
435   const __m128i xl = xx_loadu_128(buf - 1);
436   const __m128i x = xx_loadu_128(buf);
437   const __m128i xr = xx_loadu_128(buf + 1);
438 
439   const __m128i fives = _mm_add_epi32(xl, xr);
440   const __m128i sixes = x;
441 
442   const __m128i fives_plus_sixes = _mm_add_epi32(fives, sixes);
443 
444   return _mm_add_epi32(
445       _mm_add_epi32(_mm_slli_epi32(fives_plus_sixes, 2), fives_plus_sixes),
446       sixes);
447 }
448 
449 // The final filter for the self-guided restoration. Computes a
450 // weighted average across A, B with "cross sums" (see cross_sum_...
451 // implementations above).
final_filter_fast(int32_t * dst,int dst_stride,const int32_t * A,const int32_t * B,int buf_stride,const void * dgd8,int dgd_stride,int width,int height,int highbd)452 static void final_filter_fast(int32_t *dst, int dst_stride, const int32_t *A,
453                               const int32_t *B, int buf_stride,
454                               const void *dgd8, int dgd_stride, int width,
455                               int height, int highbd) {
456   const int nb0 = 5;
457   const int nb1 = 4;
458 
459   const __m128i rounding0 =
460       round_for_shift(SGRPROJ_SGR_BITS + nb0 - SGRPROJ_RST_BITS);
461   const __m128i rounding1 =
462       round_for_shift(SGRPROJ_SGR_BITS + nb1 - SGRPROJ_RST_BITS);
463 
464   const uint8_t *dgd_real =
465       highbd ? (const uint8_t *)CONVERT_TO_SHORTPTR(dgd8) : dgd8;
466 
467   for (int i = 0; i < height; ++i) {
468     if (!(i & 1)) {  // even row
469       for (int j = 0; j < width; j += 4) {
470         const __m128i a =
471             cross_sum_fast_even_row(A + i * buf_stride + j, buf_stride);
472         const __m128i b =
473             cross_sum_fast_even_row(B + i * buf_stride + j, buf_stride);
474         const __m128i raw =
475             xx_loadl_64(dgd_real + ((i * dgd_stride + j) << highbd));
476         const __m128i src =
477             highbd ? _mm_cvtepu16_epi32(raw) : _mm_cvtepu8_epi32(raw);
478 
479         __m128i v = _mm_add_epi32(_mm_madd_epi16(a, src), b);
480         __m128i w = _mm_srai_epi32(_mm_add_epi32(v, rounding0),
481                                    SGRPROJ_SGR_BITS + nb0 - SGRPROJ_RST_BITS);
482 
483         xx_storeu_128(dst + i * dst_stride + j, w);
484       }
485     } else {  // odd row
486       for (int j = 0; j < width; j += 4) {
487         const __m128i a = cross_sum_fast_odd_row(A + i * buf_stride + j);
488         const __m128i b = cross_sum_fast_odd_row(B + i * buf_stride + j);
489         const __m128i raw =
490             xx_loadl_64(dgd_real + ((i * dgd_stride + j) << highbd));
491         const __m128i src =
492             highbd ? _mm_cvtepu16_epi32(raw) : _mm_cvtepu8_epi32(raw);
493 
494         __m128i v = _mm_add_epi32(_mm_madd_epi16(a, src), b);
495         __m128i w = _mm_srai_epi32(_mm_add_epi32(v, rounding1),
496                                    SGRPROJ_SGR_BITS + nb1 - SGRPROJ_RST_BITS);
497 
498         xx_storeu_128(dst + i * dst_stride + j, w);
499       }
500     }
501   }
502 }
503 
av1_selfguided_restoration_sse4_1(const uint8_t * dgd8,int width,int height,int dgd_stride,int32_t * flt0,int32_t * flt1,int flt_stride,int sgr_params_idx,int bit_depth,int highbd)504 int av1_selfguided_restoration_sse4_1(const uint8_t *dgd8, int width,
505                                       int height, int dgd_stride, int32_t *flt0,
506                                       int32_t *flt1, int flt_stride,
507                                       int sgr_params_idx, int bit_depth,
508                                       int highbd) {
509   int32_t *buf = (int32_t *)aom_memalign(
510       16, 4 * sizeof(*buf) * RESTORATION_PROC_UNIT_PELS);
511   if (!buf) return -1;
512   memset(buf, 0, 4 * sizeof(*buf) * RESTORATION_PROC_UNIT_PELS);
513 
514   const int width_ext = width + 2 * SGRPROJ_BORDER_HORZ;
515   const int height_ext = height + 2 * SGRPROJ_BORDER_VERT;
516 
517   // Adjusting the stride of A and B here appears to avoid bad cache effects,
518   // leading to a significant speed improvement.
519   // We also align the stride to a multiple of 16 bytes for efficiency.
520   int buf_stride = ((width_ext + 3) & ~3) + 16;
521 
522   // The "tl" pointers point at the top-left of the initialised data for the
523   // array. Adding 3 here ensures that column 1 is 16-byte aligned.
524   int32_t *Atl = buf + 0 * RESTORATION_PROC_UNIT_PELS + 3;
525   int32_t *Btl = buf + 1 * RESTORATION_PROC_UNIT_PELS + 3;
526   int32_t *Ctl = buf + 2 * RESTORATION_PROC_UNIT_PELS + 3;
527   int32_t *Dtl = buf + 3 * RESTORATION_PROC_UNIT_PELS + 3;
528 
529   // The "0" pointers are (- SGRPROJ_BORDER_VERT, -SGRPROJ_BORDER_HORZ). Note
530   // there's a zero row and column in A, B (integral images), so we move down
531   // and right one for them.
532   const int buf_diag_border =
533       SGRPROJ_BORDER_HORZ + buf_stride * SGRPROJ_BORDER_VERT;
534 
535   int32_t *A0 = Atl + 1 + buf_stride;
536   int32_t *B0 = Btl + 1 + buf_stride;
537   int32_t *C0 = Ctl + 1 + buf_stride;
538   int32_t *D0 = Dtl + 1 + buf_stride;
539 
540   // Finally, A, B, C, D point at position (0, 0).
541   int32_t *A = A0 + buf_diag_border;
542   int32_t *B = B0 + buf_diag_border;
543   int32_t *C = C0 + buf_diag_border;
544   int32_t *D = D0 + buf_diag_border;
545 
546   const int dgd_diag_border =
547       SGRPROJ_BORDER_HORZ + dgd_stride * SGRPROJ_BORDER_VERT;
548   const uint8_t *dgd0 = dgd8 - dgd_diag_border;
549 
550   // Generate integral images from the input. C will contain sums of squares; D
551   // will contain just sums
552   if (highbd)
553     integral_images_highbd(CONVERT_TO_SHORTPTR(dgd0), dgd_stride, width_ext,
554                            height_ext, Ctl, Dtl, buf_stride);
555   else
556     integral_images(dgd0, dgd_stride, width_ext, height_ext, Ctl, Dtl,
557                     buf_stride);
558 
559   const sgr_params_type *const params = &av1_sgr_params[sgr_params_idx];
560   // Write to flt0 and flt1
561   // If params->r == 0 we skip the corresponding filter. We only allow one of
562   // the radii to be 0, as having both equal to 0 would be equivalent to
563   // skipping SGR entirely.
564   assert(!(params->r[0] == 0 && params->r[1] == 0));
565   assert(params->r[0] < AOMMIN(SGRPROJ_BORDER_VERT, SGRPROJ_BORDER_HORZ));
566   assert(params->r[1] < AOMMIN(SGRPROJ_BORDER_VERT, SGRPROJ_BORDER_HORZ));
567 
568   if (params->r[0] > 0) {
569     calc_ab_fast(A, B, C, D, width, height, buf_stride, bit_depth,
570                  sgr_params_idx, 0);
571     final_filter_fast(flt0, flt_stride, A, B, buf_stride, dgd8, dgd_stride,
572                       width, height, highbd);
573   }
574 
575   if (params->r[1] > 0) {
576     calc_ab(A, B, C, D, width, height, buf_stride, bit_depth, sgr_params_idx,
577             1);
578     final_filter(flt1, flt_stride, A, B, buf_stride, dgd8, dgd_stride, width,
579                  height, highbd);
580   }
581   aom_free(buf);
582   return 0;
583 }
584 
av1_apply_selfguided_restoration_sse4_1(const uint8_t * dat8,int width,int height,int stride,int eps,const int * xqd,uint8_t * dst8,int dst_stride,int32_t * tmpbuf,int bit_depth,int highbd)585 int av1_apply_selfguided_restoration_sse4_1(const uint8_t *dat8, int width,
586                                             int height, int stride, int eps,
587                                             const int *xqd, uint8_t *dst8,
588                                             int dst_stride, int32_t *tmpbuf,
589                                             int bit_depth, int highbd) {
590   int32_t *flt0 = tmpbuf;
591   int32_t *flt1 = flt0 + RESTORATION_UNITPELS_MAX;
592   assert(width * height <= RESTORATION_UNITPELS_MAX);
593   const int ret = av1_selfguided_restoration_sse4_1(
594       dat8, width, height, stride, flt0, flt1, width, eps, bit_depth, highbd);
595   if (ret != 0) return ret;
596   const sgr_params_type *const params = &av1_sgr_params[eps];
597   int xq[2];
598   av1_decode_xq(xqd, xq, params);
599 
600   __m128i xq0 = _mm_set1_epi32(xq[0]);
601   __m128i xq1 = _mm_set1_epi32(xq[1]);
602 
603   for (int i = 0; i < height; ++i) {
604     // Calculate output in batches of 8 pixels
605     for (int j = 0; j < width; j += 8) {
606       const int k = i * width + j;
607       const int m = i * dst_stride + j;
608 
609       const uint8_t *dat8ij = dat8 + i * stride + j;
610       __m128i src;
611       if (highbd) {
612         src = xx_loadu_128(CONVERT_TO_SHORTPTR(dat8ij));
613       } else {
614         src = _mm_cvtepu8_epi16(xx_loadl_64(dat8ij));
615       }
616 
617       const __m128i u = _mm_slli_epi16(src, SGRPROJ_RST_BITS);
618       const __m128i u_0 = _mm_cvtepu16_epi32(u);
619       const __m128i u_1 = _mm_cvtepu16_epi32(_mm_srli_si128(u, 8));
620 
621       __m128i v_0 = _mm_slli_epi32(u_0, SGRPROJ_PRJ_BITS);
622       __m128i v_1 = _mm_slli_epi32(u_1, SGRPROJ_PRJ_BITS);
623 
624       if (params->r[0] > 0) {
625         const __m128i f1_0 = _mm_sub_epi32(xx_loadu_128(&flt0[k]), u_0);
626         v_0 = _mm_add_epi32(v_0, _mm_mullo_epi32(xq0, f1_0));
627 
628         const __m128i f1_1 = _mm_sub_epi32(xx_loadu_128(&flt0[k + 4]), u_1);
629         v_1 = _mm_add_epi32(v_1, _mm_mullo_epi32(xq0, f1_1));
630       }
631 
632       if (params->r[1] > 0) {
633         const __m128i f2_0 = _mm_sub_epi32(xx_loadu_128(&flt1[k]), u_0);
634         v_0 = _mm_add_epi32(v_0, _mm_mullo_epi32(xq1, f2_0));
635 
636         const __m128i f2_1 = _mm_sub_epi32(xx_loadu_128(&flt1[k + 4]), u_1);
637         v_1 = _mm_add_epi32(v_1, _mm_mullo_epi32(xq1, f2_1));
638       }
639 
640       const __m128i rounding =
641           round_for_shift(SGRPROJ_PRJ_BITS + SGRPROJ_RST_BITS);
642       const __m128i w_0 = _mm_srai_epi32(_mm_add_epi32(v_0, rounding),
643                                          SGRPROJ_PRJ_BITS + SGRPROJ_RST_BITS);
644       const __m128i w_1 = _mm_srai_epi32(_mm_add_epi32(v_1, rounding),
645                                          SGRPROJ_PRJ_BITS + SGRPROJ_RST_BITS);
646 
647       if (highbd) {
648         // Pack into 16 bits and clamp to [0, 2^bit_depth)
649         const __m128i tmp = _mm_packus_epi32(w_0, w_1);
650         const __m128i max = _mm_set1_epi16((1 << bit_depth) - 1);
651         const __m128i res = _mm_min_epi16(tmp, max);
652         xx_storeu_128(CONVERT_TO_SHORTPTR(dst8 + m), res);
653       } else {
654         // Pack into 8 bits and clamp to [0, 256)
655         const __m128i tmp = _mm_packs_epi32(w_0, w_1);
656         const __m128i res = _mm_packus_epi16(tmp, tmp /* "don't care" value */);
657         xx_storel_64(dst8 + m, res);
658       }
659     }
660   }
661   return 0;
662 }
663