xref: /aosp_15_r20/external/libaom/av1/common/x86/cfl_ssse3.c (revision 77c1e3ccc04c968bd2bc212e87364f250e820521)
1 /*
2  * Copyright (c) 2017, Alliance for Open Media. All rights reserved.
3  *
4  * This source code is subject to the terms of the BSD 2 Clause License and
5  * the Alliance for Open Media Patent License 1.0. If the BSD 2 Clause License
6  * was not distributed with this source code in the LICENSE file, you can
7  * obtain it at www.aomedia.org/license/software. If the Alliance for Open
8  * Media Patent License 1.0 was not distributed with this source code in the
9  * PATENTS file, you can obtain it at www.aomedia.org/license/patent.
10  */
11 
12 #include <tmmintrin.h>
13 
14 #include "config/av1_rtcd.h"
15 
16 #include "av1/common/cfl.h"
17 
18 #include "av1/common/x86/cfl_simd.h"
19 
20 // Load 32-bit integer from memory into the first element of dst.
_mm_loadh_epi32(__m128i const * mem_addr)21 static inline __m128i _mm_loadh_epi32(__m128i const *mem_addr) {
22   return _mm_cvtsi32_si128(*((int *)mem_addr));
23 }
24 
25 // Store 32-bit integer from the first element of a into memory.
_mm_storeh_epi32(__m128i const * mem_addr,__m128i a)26 static inline void _mm_storeh_epi32(__m128i const *mem_addr, __m128i a) {
27   *((int *)mem_addr) = _mm_cvtsi128_si32(a);
28 }
29 
30 /**
31  * Adds 4 pixels (in a 2x2 grid) and multiplies them by 2. Resulting in a more
32  * precise version of a box filter 4:2:0 pixel subsampling in Q3.
33  *
34  * The CfL prediction buffer is always of size CFL_BUF_SQUARE. However, the
35  * active area is specified using width and height.
36  *
37  * Note: We don't need to worry about going over the active area, as long as we
38  * stay inside the CfL prediction buffer.
39  */
cfl_luma_subsampling_420_lbd_ssse3(const uint8_t * input,int input_stride,uint16_t * pred_buf_q3,int width,int height)40 static inline void cfl_luma_subsampling_420_lbd_ssse3(const uint8_t *input,
41                                                       int input_stride,
42                                                       uint16_t *pred_buf_q3,
43                                                       int width, int height) {
44   const __m128i twos = _mm_set1_epi8(2);
45   __m128i *pred_buf_m128i = (__m128i *)pred_buf_q3;
46   const __m128i *end = pred_buf_m128i + (height >> 1) * CFL_BUF_LINE_I128;
47   const int luma_stride = input_stride << 1;
48   do {
49     if (width == 4) {
50       __m128i top = _mm_loadh_epi32((__m128i *)input);
51       top = _mm_maddubs_epi16(top, twos);
52       __m128i bot = _mm_loadh_epi32((__m128i *)(input + input_stride));
53       bot = _mm_maddubs_epi16(bot, twos);
54       const __m128i sum = _mm_add_epi16(top, bot);
55       _mm_storeh_epi32(pred_buf_m128i, sum);
56     } else if (width == 8) {
57       __m128i top = _mm_loadl_epi64((__m128i *)input);
58       top = _mm_maddubs_epi16(top, twos);
59       __m128i bot = _mm_loadl_epi64((__m128i *)(input + input_stride));
60       bot = _mm_maddubs_epi16(bot, twos);
61       const __m128i sum = _mm_add_epi16(top, bot);
62       _mm_storel_epi64(pred_buf_m128i, sum);
63     } else {
64       __m128i top = _mm_loadu_si128((__m128i *)input);
65       top = _mm_maddubs_epi16(top, twos);
66       __m128i bot = _mm_loadu_si128((__m128i *)(input + input_stride));
67       bot = _mm_maddubs_epi16(bot, twos);
68       const __m128i sum = _mm_add_epi16(top, bot);
69       _mm_storeu_si128(pred_buf_m128i, sum);
70       if (width == 32) {
71         __m128i top_1 = _mm_loadu_si128(((__m128i *)input) + 1);
72         __m128i bot_1 =
73             _mm_loadu_si128(((__m128i *)(input + input_stride)) + 1);
74         top_1 = _mm_maddubs_epi16(top_1, twos);
75         bot_1 = _mm_maddubs_epi16(bot_1, twos);
76         __m128i sum_1 = _mm_add_epi16(top_1, bot_1);
77         _mm_storeu_si128(pred_buf_m128i + 1, sum_1);
78       }
79     }
80     input += luma_stride;
81     pred_buf_m128i += CFL_BUF_LINE_I128;
82   } while (pred_buf_m128i < end);
83 }
84 
85 /**
86  * Adds 2 pixels (in a 2x1 grid) and multiplies them by 4. Resulting in a more
87  * precise version of a box filter 4:2:2 pixel subsampling in Q3.
88  *
89  * The CfL prediction buffer is always of size CFL_BUF_SQUARE. However, the
90  * active area is specified using width and height.
91  *
92  * Note: We don't need to worry about going over the active area, as long as we
93  * stay inside the CfL prediction buffer.
94  */
cfl_luma_subsampling_422_lbd_ssse3(const uint8_t * input,int input_stride,uint16_t * pred_buf_q3,int width,int height)95 static inline void cfl_luma_subsampling_422_lbd_ssse3(const uint8_t *input,
96                                                       int input_stride,
97                                                       uint16_t *pred_buf_q3,
98                                                       int width, int height) {
99   const __m128i fours = _mm_set1_epi8(4);
100   __m128i *pred_buf_m128i = (__m128i *)pred_buf_q3;
101   const __m128i *end = pred_buf_m128i + height * CFL_BUF_LINE_I128;
102   do {
103     if (width == 4) {
104       __m128i top = _mm_loadh_epi32((__m128i *)input);
105       top = _mm_maddubs_epi16(top, fours);
106       _mm_storeh_epi32(pred_buf_m128i, top);
107     } else if (width == 8) {
108       __m128i top = _mm_loadl_epi64((__m128i *)input);
109       top = _mm_maddubs_epi16(top, fours);
110       _mm_storel_epi64(pred_buf_m128i, top);
111     } else {
112       __m128i top = _mm_loadu_si128((__m128i *)input);
113       top = _mm_maddubs_epi16(top, fours);
114       _mm_storeu_si128(pred_buf_m128i, top);
115       if (width == 32) {
116         __m128i top_1 = _mm_loadu_si128(((__m128i *)input) + 1);
117         top_1 = _mm_maddubs_epi16(top_1, fours);
118         _mm_storeu_si128(pred_buf_m128i + 1, top_1);
119       }
120     }
121     input += input_stride;
122     pred_buf_m128i += CFL_BUF_LINE_I128;
123   } while (pred_buf_m128i < end);
124 }
125 
126 /**
127  * Multiplies the pixels by 8 (scaling in Q3).
128  *
129  * The CfL prediction buffer is always of size CFL_BUF_SQUARE. However, the
130  * active area is specified using width and height.
131  *
132  * Note: We don't need to worry about going over the active area, as long as we
133  * stay inside the CfL prediction buffer.
134  */
cfl_luma_subsampling_444_lbd_ssse3(const uint8_t * input,int input_stride,uint16_t * pred_buf_q3,int width,int height)135 static inline void cfl_luma_subsampling_444_lbd_ssse3(const uint8_t *input,
136                                                       int input_stride,
137                                                       uint16_t *pred_buf_q3,
138                                                       int width, int height) {
139   const __m128i zeros = _mm_setzero_si128();
140   const int luma_stride = input_stride;
141   __m128i *pred_buf_m128i = (__m128i *)pred_buf_q3;
142   const __m128i *end = pred_buf_m128i + height * CFL_BUF_LINE_I128;
143   do {
144     if (width == 4) {
145       __m128i row = _mm_loadh_epi32((__m128i *)input);
146       row = _mm_unpacklo_epi8(row, zeros);
147       _mm_storel_epi64(pred_buf_m128i, _mm_slli_epi16(row, 3));
148     } else if (width == 8) {
149       __m128i row = _mm_loadl_epi64((__m128i *)input);
150       row = _mm_unpacklo_epi8(row, zeros);
151       _mm_storeu_si128(pred_buf_m128i, _mm_slli_epi16(row, 3));
152     } else {
153       __m128i row = _mm_loadu_si128((__m128i *)input);
154       const __m128i row_lo = _mm_unpacklo_epi8(row, zeros);
155       const __m128i row_hi = _mm_unpackhi_epi8(row, zeros);
156       _mm_storeu_si128(pred_buf_m128i, _mm_slli_epi16(row_lo, 3));
157       _mm_storeu_si128(pred_buf_m128i + 1, _mm_slli_epi16(row_hi, 3));
158       if (width == 32) {
159         __m128i row_1 = _mm_loadu_si128(((__m128i *)input) + 1);
160         const __m128i row_1_lo = _mm_unpacklo_epi8(row_1, zeros);
161         const __m128i row_1_hi = _mm_unpackhi_epi8(row_1, zeros);
162         _mm_storeu_si128(pred_buf_m128i + 2, _mm_slli_epi16(row_1_lo, 3));
163         _mm_storeu_si128(pred_buf_m128i + 3, _mm_slli_epi16(row_1_hi, 3));
164       }
165     }
166     input += luma_stride;
167     pred_buf_m128i += CFL_BUF_LINE_I128;
168   } while (pred_buf_m128i < end);
169 }
170 
171 #if CONFIG_AV1_HIGHBITDEPTH
172 /**
173  * Adds 4 pixels (in a 2x2 grid) and multiplies them by 2. Resulting in a more
174  * precise version of a box filter 4:2:0 pixel subsampling in Q3.
175  *
176  * The CfL prediction buffer is always of size CFL_BUF_SQUARE. However, the
177  * active area is specified using width and height.
178  *
179  * Note: We don't need to worry about going over the active area, as long as we
180  * stay inside the CfL prediction buffer.
181  */
cfl_luma_subsampling_420_hbd_ssse3(const uint16_t * input,int input_stride,uint16_t * pred_buf_q3,int width,int height)182 static inline void cfl_luma_subsampling_420_hbd_ssse3(const uint16_t *input,
183                                                       int input_stride,
184                                                       uint16_t *pred_buf_q3,
185                                                       int width, int height) {
186   const uint16_t *end = pred_buf_q3 + (height >> 1) * CFL_BUF_LINE;
187   const int luma_stride = input_stride << 1;
188   do {
189     if (width == 4) {
190       const __m128i top = _mm_loadl_epi64((__m128i *)input);
191       const __m128i bot = _mm_loadl_epi64((__m128i *)(input + input_stride));
192       __m128i sum = _mm_add_epi16(top, bot);
193       sum = _mm_hadd_epi16(sum, sum);
194       *((int *)pred_buf_q3) = _mm_cvtsi128_si32(_mm_add_epi16(sum, sum));
195     } else {
196       const __m128i top = _mm_loadu_si128((__m128i *)input);
197       const __m128i bot = _mm_loadu_si128((__m128i *)(input + input_stride));
198       __m128i sum = _mm_add_epi16(top, bot);
199       if (width == 8) {
200         sum = _mm_hadd_epi16(sum, sum);
201         _mm_storel_epi64((__m128i *)pred_buf_q3, _mm_add_epi16(sum, sum));
202       } else {
203         const __m128i top_1 = _mm_loadu_si128(((__m128i *)input) + 1);
204         const __m128i bot_1 =
205             _mm_loadu_si128(((__m128i *)(input + input_stride)) + 1);
206         sum = _mm_hadd_epi16(sum, _mm_add_epi16(top_1, bot_1));
207         _mm_storeu_si128((__m128i *)pred_buf_q3, _mm_add_epi16(sum, sum));
208         if (width == 32) {
209           const __m128i top_2 = _mm_loadu_si128(((__m128i *)input) + 2);
210           const __m128i bot_2 =
211               _mm_loadu_si128(((__m128i *)(input + input_stride)) + 2);
212           const __m128i top_3 = _mm_loadu_si128(((__m128i *)input) + 3);
213           const __m128i bot_3 =
214               _mm_loadu_si128(((__m128i *)(input + input_stride)) + 3);
215           const __m128i sum_2 = _mm_add_epi16(top_2, bot_2);
216           const __m128i sum_3 = _mm_add_epi16(top_3, bot_3);
217           __m128i next_sum = _mm_hadd_epi16(sum_2, sum_3);
218           _mm_storeu_si128(((__m128i *)pred_buf_q3) + 1,
219                            _mm_add_epi16(next_sum, next_sum));
220         }
221       }
222     }
223     input += luma_stride;
224   } while ((pred_buf_q3 += CFL_BUF_LINE) < end);
225 }
226 
227 /**
228  * Adds 2 pixels (in a 2x1 grid) and multiplies them by 4. Resulting in a more
229  * precise version of a box filter 4:2:2 pixel subsampling in Q3.
230  *
231  * The CfL prediction buffer is always of size CFL_BUF_SQUARE. However, the
232  * active area is specified using width and height.
233  *
234  * Note: We don't need to worry about going over the active area, as long as we
235  * stay inside the CfL prediction buffer.
236  */
cfl_luma_subsampling_422_hbd_ssse3(const uint16_t * input,int input_stride,uint16_t * pred_buf_q3,int width,int height)237 static inline void cfl_luma_subsampling_422_hbd_ssse3(const uint16_t *input,
238                                                       int input_stride,
239                                                       uint16_t *pred_buf_q3,
240                                                       int width, int height) {
241   __m128i *pred_buf_m128i = (__m128i *)pred_buf_q3;
242   const __m128i *end = pred_buf_m128i + height * CFL_BUF_LINE_I128;
243   do {
244     if (width == 4) {
245       const __m128i top = _mm_loadl_epi64((__m128i *)input);
246       const __m128i sum = _mm_slli_epi16(_mm_hadd_epi16(top, top), 2);
247       _mm_storeh_epi32(pred_buf_m128i, sum);
248     } else {
249       const __m128i top = _mm_loadu_si128((__m128i *)input);
250       if (width == 8) {
251         const __m128i sum = _mm_slli_epi16(_mm_hadd_epi16(top, top), 2);
252         _mm_storel_epi64(pred_buf_m128i, sum);
253       } else {
254         const __m128i top_1 = _mm_loadu_si128(((__m128i *)input) + 1);
255         const __m128i sum = _mm_slli_epi16(_mm_hadd_epi16(top, top_1), 2);
256         _mm_storeu_si128(pred_buf_m128i, sum);
257         if (width == 32) {
258           const __m128i top_2 = _mm_loadu_si128(((__m128i *)input) + 2);
259           const __m128i top_3 = _mm_loadu_si128(((__m128i *)input) + 3);
260           const __m128i sum_1 = _mm_slli_epi16(_mm_hadd_epi16(top_2, top_3), 2);
261           _mm_storeu_si128(pred_buf_m128i + 1, sum_1);
262         }
263       }
264     }
265     pred_buf_m128i += CFL_BUF_LINE_I128;
266     input += input_stride;
267   } while (pred_buf_m128i < end);
268 }
269 
cfl_luma_subsampling_444_hbd_ssse3(const uint16_t * input,int input_stride,uint16_t * pred_buf_q3,int width,int height)270 static inline void cfl_luma_subsampling_444_hbd_ssse3(const uint16_t *input,
271                                                       int input_stride,
272                                                       uint16_t *pred_buf_q3,
273                                                       int width, int height) {
274   const uint16_t *end = pred_buf_q3 + height * CFL_BUF_LINE;
275   do {
276     if (width == 4) {
277       const __m128i row = _mm_slli_epi16(_mm_loadl_epi64((__m128i *)input), 3);
278       _mm_storel_epi64((__m128i *)pred_buf_q3, row);
279     } else {
280       const __m128i row = _mm_slli_epi16(_mm_loadu_si128((__m128i *)input), 3);
281       _mm_storeu_si128((__m128i *)pred_buf_q3, row);
282       if (width >= 16) {
283         __m128i row_1 = _mm_loadu_si128(((__m128i *)input) + 1);
284         row_1 = _mm_slli_epi16(row_1, 3);
285         _mm_storeu_si128(((__m128i *)pred_buf_q3) + 1, row_1);
286         if (width == 32) {
287           __m128i row_2 = _mm_loadu_si128(((__m128i *)input) + 2);
288           row_2 = _mm_slli_epi16(row_2, 3);
289           _mm_storeu_si128(((__m128i *)pred_buf_q3) + 2, row_2);
290           __m128i row_3 = _mm_loadu_si128(((__m128i *)input) + 3);
291           row_3 = _mm_slli_epi16(row_3, 3);
292           _mm_storeu_si128(((__m128i *)pred_buf_q3) + 3, row_3);
293         }
294       }
295     }
296     input += input_stride;
297     pred_buf_q3 += CFL_BUF_LINE;
298   } while (pred_buf_q3 < end);
299 }
300 #endif  // CONFIG_AV1_HIGHBITDEPTH
301 
CFL_GET_SUBSAMPLE_FUNCTION(ssse3)302 CFL_GET_SUBSAMPLE_FUNCTION(ssse3)
303 
304 static inline __m128i predict_unclipped(const __m128i *input, __m128i alpha_q12,
305                                         __m128i alpha_sign, __m128i dc_q0) {
306   __m128i ac_q3 = _mm_loadu_si128(input);
307   __m128i ac_sign = _mm_sign_epi16(alpha_sign, ac_q3);
308   __m128i scaled_luma_q0 = _mm_mulhrs_epi16(_mm_abs_epi16(ac_q3), alpha_q12);
309   scaled_luma_q0 = _mm_sign_epi16(scaled_luma_q0, ac_sign);
310   return _mm_add_epi16(scaled_luma_q0, dc_q0);
311 }
312 
cfl_predict_lbd_ssse3(const int16_t * pred_buf_q3,uint8_t * dst,int dst_stride,int alpha_q3,int width,int height)313 static inline void cfl_predict_lbd_ssse3(const int16_t *pred_buf_q3,
314                                          uint8_t *dst, int dst_stride,
315                                          int alpha_q3, int width, int height) {
316   const __m128i alpha_sign = _mm_set1_epi16(alpha_q3);
317   const __m128i alpha_q12 = _mm_slli_epi16(_mm_abs_epi16(alpha_sign), 9);
318   const __m128i dc_q0 = _mm_set1_epi16(*dst);
319   __m128i *row = (__m128i *)pred_buf_q3;
320   const __m128i *row_end = row + height * CFL_BUF_LINE_I128;
321   do {
322     __m128i res = predict_unclipped(row, alpha_q12, alpha_sign, dc_q0);
323     if (width < 16) {
324       res = _mm_packus_epi16(res, res);
325       if (width == 4)
326         _mm_storeh_epi32((__m128i *)dst, res);
327       else
328         _mm_storel_epi64((__m128i *)dst, res);
329     } else {
330       __m128i next = predict_unclipped(row + 1, alpha_q12, alpha_sign, dc_q0);
331       res = _mm_packus_epi16(res, next);
332       _mm_storeu_si128((__m128i *)dst, res);
333       if (width == 32) {
334         res = predict_unclipped(row + 2, alpha_q12, alpha_sign, dc_q0);
335         next = predict_unclipped(row + 3, alpha_q12, alpha_sign, dc_q0);
336         res = _mm_packus_epi16(res, next);
337         _mm_storeu_si128((__m128i *)(dst + 16), res);
338       }
339     }
340     dst += dst_stride;
341   } while ((row += CFL_BUF_LINE_I128) < row_end);
342 }
343 
CFL_PREDICT_FN(ssse3,lbd)344 CFL_PREDICT_FN(ssse3, lbd)
345 
346 #if CONFIG_AV1_HIGHBITDEPTH
347 static inline __m128i highbd_max_epi16(int bd) {
348   const __m128i neg_one = _mm_set1_epi16(-1);
349   // (1 << bd) - 1 => -(-1 << bd) -1 => -1 - (-1 << bd) => -1 ^ (-1 << bd)
350   return _mm_xor_si128(_mm_slli_epi16(neg_one, bd), neg_one);
351 }
352 
highbd_clamp_epi16(__m128i u,__m128i zero,__m128i max)353 static inline __m128i highbd_clamp_epi16(__m128i u, __m128i zero, __m128i max) {
354   return _mm_max_epi16(_mm_min_epi16(u, max), zero);
355 }
356 
cfl_predict_hbd_ssse3(const int16_t * pred_buf_q3,uint16_t * dst,int dst_stride,int alpha_q3,int bd,int width,int height)357 static inline void cfl_predict_hbd_ssse3(const int16_t *pred_buf_q3,
358                                          uint16_t *dst, int dst_stride,
359                                          int alpha_q3, int bd, int width,
360                                          int height) {
361   const __m128i alpha_sign = _mm_set1_epi16(alpha_q3);
362   const __m128i alpha_q12 = _mm_slli_epi16(_mm_abs_epi16(alpha_sign), 9);
363   const __m128i dc_q0 = _mm_set1_epi16(*dst);
364   const __m128i max = highbd_max_epi16(bd);
365   const __m128i zeros = _mm_setzero_si128();
366   __m128i *row = (__m128i *)pred_buf_q3;
367   const __m128i *row_end = row + height * CFL_BUF_LINE_I128;
368   do {
369     __m128i res = predict_unclipped(row, alpha_q12, alpha_sign, dc_q0);
370     res = highbd_clamp_epi16(res, zeros, max);
371     if (width == 4) {
372       _mm_storel_epi64((__m128i *)dst, res);
373     } else {
374       _mm_storeu_si128((__m128i *)dst, res);
375     }
376     if (width >= 16) {
377       const __m128i res_1 =
378           predict_unclipped(row + 1, alpha_q12, alpha_sign, dc_q0);
379       _mm_storeu_si128(((__m128i *)dst) + 1,
380                        highbd_clamp_epi16(res_1, zeros, max));
381     }
382     if (width == 32) {
383       const __m128i res_2 =
384           predict_unclipped(row + 2, alpha_q12, alpha_sign, dc_q0);
385       _mm_storeu_si128((__m128i *)(dst + 16),
386                        highbd_clamp_epi16(res_2, zeros, max));
387       const __m128i res_3 =
388           predict_unclipped(row + 3, alpha_q12, alpha_sign, dc_q0);
389       _mm_storeu_si128((__m128i *)(dst + 24),
390                        highbd_clamp_epi16(res_3, zeros, max));
391     }
392     dst += dst_stride;
393   } while ((row += CFL_BUF_LINE_I128) < row_end);
394 }
395 
396 CFL_PREDICT_FN(ssse3, hbd)
397 #endif  // CONFIG_AV1_HIGHBITDEPTH
398