xref: /aosp_15_r20/external/libvpx/vpx_dsp/x86/subtract_avx2.c (revision fb1b10ab9aebc7c7068eedab379b749d7e3900be)
1 /*
2  *  Copyright (c) 2022 The WebM project authors. All Rights Reserved.
3  *
4  *  Use of this source code is governed by a BSD-style license
5  *  that can be found in the LICENSE file in the root of the source
6  *  tree. An additional intellectual property rights grant can be found
7  *  in the file PATENTS.  All contributing project authors may
8  *  be found in the AUTHORS file in the root of the source tree.
9  */
10 
11 #include <assert.h>
12 #include <immintrin.h>
13 
14 #include "./vpx_dsp_rtcd.h"
15 #include "vpx/vpx_integer.h"
16 
subtract32_avx2(int16_t * diff_ptr,const uint8_t * src_ptr,const uint8_t * pred_ptr)17 static VPX_FORCE_INLINE void subtract32_avx2(int16_t *diff_ptr,
18                                              const uint8_t *src_ptr,
19                                              const uint8_t *pred_ptr) {
20   const __m256i s = _mm256_lddqu_si256((const __m256i *)src_ptr);
21   const __m256i p = _mm256_lddqu_si256((const __m256i *)pred_ptr);
22   const __m256i s_0 = _mm256_cvtepu8_epi16(_mm256_castsi256_si128(s));
23   const __m256i s_1 = _mm256_cvtepu8_epi16(_mm256_extracti128_si256(s, 1));
24   const __m256i p_0 = _mm256_cvtepu8_epi16(_mm256_castsi256_si128(p));
25   const __m256i p_1 = _mm256_cvtepu8_epi16(_mm256_extracti128_si256(p, 1));
26   const __m256i d_0 = _mm256_sub_epi16(s_0, p_0);
27   const __m256i d_1 = _mm256_sub_epi16(s_1, p_1);
28   _mm256_storeu_si256((__m256i *)diff_ptr, d_0);
29   _mm256_storeu_si256((__m256i *)(diff_ptr + 16), d_1);
30 }
31 
subtract_block_16xn_avx2(int rows,int16_t * diff_ptr,ptrdiff_t diff_stride,const uint8_t * src_ptr,ptrdiff_t src_stride,const uint8_t * pred_ptr,ptrdiff_t pred_stride)32 static VPX_FORCE_INLINE void subtract_block_16xn_avx2(
33     int rows, int16_t *diff_ptr, ptrdiff_t diff_stride, const uint8_t *src_ptr,
34     ptrdiff_t src_stride, const uint8_t *pred_ptr, ptrdiff_t pred_stride) {
35   int j;
36   for (j = 0; j < rows; ++j) {
37     const __m128i s = _mm_lddqu_si128((const __m128i *)src_ptr);
38     const __m128i p = _mm_lddqu_si128((const __m128i *)pred_ptr);
39     const __m256i s_0 = _mm256_cvtepu8_epi16(s);
40     const __m256i p_0 = _mm256_cvtepu8_epi16(p);
41     const __m256i d_0 = _mm256_sub_epi16(s_0, p_0);
42     _mm256_storeu_si256((__m256i *)diff_ptr, d_0);
43     src_ptr += src_stride;
44     pred_ptr += pred_stride;
45     diff_ptr += diff_stride;
46   }
47 }
48 
subtract_block_32xn_avx2(int rows,int16_t * diff_ptr,ptrdiff_t diff_stride,const uint8_t * src_ptr,ptrdiff_t src_stride,const uint8_t * pred_ptr,ptrdiff_t pred_stride)49 static VPX_FORCE_INLINE void subtract_block_32xn_avx2(
50     int rows, int16_t *diff_ptr, ptrdiff_t diff_stride, const uint8_t *src_ptr,
51     ptrdiff_t src_stride, const uint8_t *pred_ptr, ptrdiff_t pred_stride) {
52   int j;
53   for (j = 0; j < rows; ++j) {
54     subtract32_avx2(diff_ptr, src_ptr, pred_ptr);
55     src_ptr += src_stride;
56     pred_ptr += pred_stride;
57     diff_ptr += diff_stride;
58   }
59 }
60 
subtract_block_64xn_avx2(int rows,int16_t * diff_ptr,ptrdiff_t diff_stride,const uint8_t * src_ptr,ptrdiff_t src_stride,const uint8_t * pred_ptr,ptrdiff_t pred_stride)61 static VPX_FORCE_INLINE void subtract_block_64xn_avx2(
62     int rows, int16_t *diff_ptr, ptrdiff_t diff_stride, const uint8_t *src_ptr,
63     ptrdiff_t src_stride, const uint8_t *pred_ptr, ptrdiff_t pred_stride) {
64   int j;
65   for (j = 0; j < rows; ++j) {
66     subtract32_avx2(diff_ptr, src_ptr, pred_ptr);
67     subtract32_avx2(diff_ptr + 32, src_ptr + 32, pred_ptr + 32);
68     src_ptr += src_stride;
69     pred_ptr += pred_stride;
70     diff_ptr += diff_stride;
71   }
72 }
73 
vpx_subtract_block_avx2(int rows,int cols,int16_t * diff_ptr,ptrdiff_t diff_stride,const uint8_t * src_ptr,ptrdiff_t src_stride,const uint8_t * pred_ptr,ptrdiff_t pred_stride)74 void vpx_subtract_block_avx2(int rows, int cols, int16_t *diff_ptr,
75                              ptrdiff_t diff_stride, const uint8_t *src_ptr,
76                              ptrdiff_t src_stride, const uint8_t *pred_ptr,
77                              ptrdiff_t pred_stride) {
78   switch (cols) {
79     case 16:
80       subtract_block_16xn_avx2(rows, diff_ptr, diff_stride, src_ptr, src_stride,
81                                pred_ptr, pred_stride);
82       break;
83     case 32:
84       subtract_block_32xn_avx2(rows, diff_ptr, diff_stride, src_ptr, src_stride,
85                                pred_ptr, pred_stride);
86       break;
87     case 64:
88       subtract_block_64xn_avx2(rows, diff_ptr, diff_stride, src_ptr, src_stride,
89                                pred_ptr, pred_stride);
90       break;
91     default:
92       vpx_subtract_block_sse2(rows, cols, diff_ptr, diff_stride, src_ptr,
93                               src_stride, pred_ptr, pred_stride);
94       break;
95   }
96 }
97 
98 #if CONFIG_VP9_HIGHBITDEPTH
vpx_highbd_subtract_block_avx2(int rows,int cols,int16_t * diff_ptr,ptrdiff_t diff_stride,const uint8_t * src8_ptr,ptrdiff_t src_stride,const uint8_t * pred8_ptr,ptrdiff_t pred_stride,int bd)99 void vpx_highbd_subtract_block_avx2(int rows, int cols, int16_t *diff_ptr,
100                                     ptrdiff_t diff_stride,
101                                     const uint8_t *src8_ptr,
102                                     ptrdiff_t src_stride,
103                                     const uint8_t *pred8_ptr,
104                                     ptrdiff_t pred_stride, int bd) {
105   uint16_t *src_ptr = CONVERT_TO_SHORTPTR(src8_ptr);
106   uint16_t *pred_ptr = CONVERT_TO_SHORTPTR(pred8_ptr);
107   (void)bd;
108   if (cols == 64) {
109     int j = rows;
110     do {
111       const __m256i s0 = _mm256_lddqu_si256((const __m256i *)src_ptr);
112       const __m256i s1 = _mm256_lddqu_si256((const __m256i *)(src_ptr + 16));
113       const __m256i s2 = _mm256_lddqu_si256((const __m256i *)(src_ptr + 32));
114       const __m256i s3 = _mm256_lddqu_si256((const __m256i *)(src_ptr + 48));
115       const __m256i p0 = _mm256_lddqu_si256((const __m256i *)pred_ptr);
116       const __m256i p1 = _mm256_lddqu_si256((const __m256i *)(pred_ptr + 16));
117       const __m256i p2 = _mm256_lddqu_si256((const __m256i *)(pred_ptr + 32));
118       const __m256i p3 = _mm256_lddqu_si256((const __m256i *)(pred_ptr + 48));
119       const __m256i d0 = _mm256_sub_epi16(s0, p0);
120       const __m256i d1 = _mm256_sub_epi16(s1, p1);
121       const __m256i d2 = _mm256_sub_epi16(s2, p2);
122       const __m256i d3 = _mm256_sub_epi16(s3, p3);
123       _mm256_storeu_si256((__m256i *)diff_ptr, d0);
124       _mm256_storeu_si256((__m256i *)(diff_ptr + 16), d1);
125       _mm256_storeu_si256((__m256i *)(diff_ptr + 32), d2);
126       _mm256_storeu_si256((__m256i *)(diff_ptr + 48), d3);
127       src_ptr += src_stride;
128       pred_ptr += pred_stride;
129       diff_ptr += diff_stride;
130     } while (--j != 0);
131   } else if (cols == 32) {
132     int j = rows;
133     do {
134       const __m256i s0 = _mm256_lddqu_si256((const __m256i *)src_ptr);
135       const __m256i s1 = _mm256_lddqu_si256((const __m256i *)(src_ptr + 16));
136       const __m256i p0 = _mm256_lddqu_si256((const __m256i *)pred_ptr);
137       const __m256i p1 = _mm256_lddqu_si256((const __m256i *)(pred_ptr + 16));
138       const __m256i d0 = _mm256_sub_epi16(s0, p0);
139       const __m256i d1 = _mm256_sub_epi16(s1, p1);
140       _mm256_storeu_si256((__m256i *)diff_ptr, d0);
141       _mm256_storeu_si256((__m256i *)(diff_ptr + 16), d1);
142       src_ptr += src_stride;
143       pred_ptr += pred_stride;
144       diff_ptr += diff_stride;
145     } while (--j != 0);
146   } else if (cols == 16) {
147     int j = rows;
148     do {
149       const __m256i s0 = _mm256_lddqu_si256((const __m256i *)src_ptr);
150       const __m256i s1 =
151           _mm256_lddqu_si256((const __m256i *)(src_ptr + src_stride));
152       const __m256i p0 = _mm256_lddqu_si256((const __m256i *)pred_ptr);
153       const __m256i p1 =
154           _mm256_lddqu_si256((const __m256i *)(pred_ptr + pred_stride));
155       const __m256i d0 = _mm256_sub_epi16(s0, p0);
156       const __m256i d1 = _mm256_sub_epi16(s1, p1);
157       _mm256_storeu_si256((__m256i *)diff_ptr, d0);
158       _mm256_storeu_si256((__m256i *)(diff_ptr + diff_stride), d1);
159       src_ptr += src_stride << 1;
160       pred_ptr += pred_stride << 1;
161       diff_ptr += diff_stride << 1;
162       j -= 2;
163     } while (j != 0);
164   } else if (cols == 8) {
165     int j = rows;
166     do {
167       const __m128i s0 = _mm_lddqu_si128((const __m128i *)src_ptr);
168       const __m128i s1 =
169           _mm_lddqu_si128((const __m128i *)(src_ptr + src_stride));
170       const __m128i p0 = _mm_lddqu_si128((const __m128i *)pred_ptr);
171       const __m128i p1 =
172           _mm_lddqu_si128((const __m128i *)(pred_ptr + pred_stride));
173       const __m128i d0 = _mm_sub_epi16(s0, p0);
174       const __m128i d1 = _mm_sub_epi16(s1, p1);
175       _mm_storeu_si128((__m128i *)diff_ptr, d0);
176       _mm_storeu_si128((__m128i *)(diff_ptr + diff_stride), d1);
177       src_ptr += src_stride << 1;
178       pred_ptr += pred_stride << 1;
179       diff_ptr += diff_stride << 1;
180       j -= 2;
181     } while (j != 0);
182   } else {
183     int j = rows;
184     assert(cols == 4);
185     do {
186       const __m128i s0 = _mm_loadl_epi64((const __m128i *)src_ptr);
187       const __m128i s1 =
188           _mm_loadl_epi64((const __m128i *)(src_ptr + src_stride));
189       const __m128i p0 = _mm_loadl_epi64((const __m128i *)pred_ptr);
190       const __m128i p1 =
191           _mm_loadl_epi64((const __m128i *)(pred_ptr + pred_stride));
192       const __m128i d0 = _mm_sub_epi16(s0, p0);
193       const __m128i d1 = _mm_sub_epi16(s1, p1);
194       _mm_storel_epi64((__m128i *)diff_ptr, d0);
195       _mm_storel_epi64((__m128i *)(diff_ptr + diff_stride), d1);
196       src_ptr += src_stride << 1;
197       pred_ptr += pred_stride << 1;
198       diff_ptr += diff_stride << 1;
199       j -= 2;
200     } while (j != 0);
201   }
202 }
203 #endif  // CONFIG_VP9_HIGHBITDEPTH
204