xref: /aosp_15_r20/external/libvpx/vpx_dsp/x86/avg_intrin_sse2.c (revision fb1b10ab9aebc7c7068eedab379b749d7e3900be)
1 /*
2  *  Copyright (c) 2014 The WebM project authors. All Rights Reserved.
3  *
4  *  Use of this source code is governed by a BSD-style license
5  *  that can be found in the LICENSE file in the root of the source
6  *  tree. An additional intellectual property rights grant can be found
7  *  in the file PATENTS.  All contributing project authors may
8  *  be found in the AUTHORS file in the root of the source tree.
9  */
10 
11 #include <emmintrin.h>
12 
13 #include "./vpx_dsp_rtcd.h"
14 #include "vpx/vpx_integer.h"
15 #include "vpx_dsp/x86/bitdepth_conversion_sse2.h"
16 #include "vpx_ports/mem.h"
17 
sign_extend_16bit_to_32bit_sse2(__m128i in,__m128i zero,__m128i * out_lo,__m128i * out_hi)18 static INLINE void sign_extend_16bit_to_32bit_sse2(__m128i in, __m128i zero,
19                                                    __m128i *out_lo,
20                                                    __m128i *out_hi) {
21   const __m128i sign_bits = _mm_cmplt_epi16(in, zero);
22   *out_lo = _mm_unpacklo_epi16(in, sign_bits);
23   *out_hi = _mm_unpackhi_epi16(in, sign_bits);
24 }
25 
vpx_minmax_8x8_sse2(const uint8_t * s,int p,const uint8_t * d,int dp,int * min,int * max)26 void vpx_minmax_8x8_sse2(const uint8_t *s, int p, const uint8_t *d, int dp,
27                          int *min, int *max) {
28   __m128i u0, s0, d0, diff, maxabsdiff, minabsdiff, negdiff, absdiff0, absdiff;
29   u0 = _mm_setzero_si128();
30   // Row 0
31   s0 = _mm_unpacklo_epi8(_mm_loadl_epi64((const __m128i *)(s)), u0);
32   d0 = _mm_unpacklo_epi8(_mm_loadl_epi64((const __m128i *)(d)), u0);
33   diff = _mm_subs_epi16(s0, d0);
34   negdiff = _mm_subs_epi16(u0, diff);
35   absdiff0 = _mm_max_epi16(diff, negdiff);
36   // Row 1
37   s0 = _mm_unpacklo_epi8(_mm_loadl_epi64((const __m128i *)(s + p)), u0);
38   d0 = _mm_unpacklo_epi8(_mm_loadl_epi64((const __m128i *)(d + dp)), u0);
39   diff = _mm_subs_epi16(s0, d0);
40   negdiff = _mm_subs_epi16(u0, diff);
41   absdiff = _mm_max_epi16(diff, negdiff);
42   maxabsdiff = _mm_max_epi16(absdiff0, absdiff);
43   minabsdiff = _mm_min_epi16(absdiff0, absdiff);
44   // Row 2
45   s0 = _mm_unpacklo_epi8(_mm_loadl_epi64((const __m128i *)(s + 2 * p)), u0);
46   d0 = _mm_unpacklo_epi8(_mm_loadl_epi64((const __m128i *)(d + 2 * dp)), u0);
47   diff = _mm_subs_epi16(s0, d0);
48   negdiff = _mm_subs_epi16(u0, diff);
49   absdiff = _mm_max_epi16(diff, negdiff);
50   maxabsdiff = _mm_max_epi16(maxabsdiff, absdiff);
51   minabsdiff = _mm_min_epi16(minabsdiff, absdiff);
52   // Row 3
53   s0 = _mm_unpacklo_epi8(_mm_loadl_epi64((const __m128i *)(s + 3 * p)), u0);
54   d0 = _mm_unpacklo_epi8(_mm_loadl_epi64((const __m128i *)(d + 3 * dp)), u0);
55   diff = _mm_subs_epi16(s0, d0);
56   negdiff = _mm_subs_epi16(u0, diff);
57   absdiff = _mm_max_epi16(diff, negdiff);
58   maxabsdiff = _mm_max_epi16(maxabsdiff, absdiff);
59   minabsdiff = _mm_min_epi16(minabsdiff, absdiff);
60   // Row 4
61   s0 = _mm_unpacklo_epi8(_mm_loadl_epi64((const __m128i *)(s + 4 * p)), u0);
62   d0 = _mm_unpacklo_epi8(_mm_loadl_epi64((const __m128i *)(d + 4 * dp)), u0);
63   diff = _mm_subs_epi16(s0, d0);
64   negdiff = _mm_subs_epi16(u0, diff);
65   absdiff = _mm_max_epi16(diff, negdiff);
66   maxabsdiff = _mm_max_epi16(maxabsdiff, absdiff);
67   minabsdiff = _mm_min_epi16(minabsdiff, absdiff);
68   // Row 5
69   s0 = _mm_unpacklo_epi8(_mm_loadl_epi64((const __m128i *)(s + 5 * p)), u0);
70   d0 = _mm_unpacklo_epi8(_mm_loadl_epi64((const __m128i *)(d + 5 * dp)), u0);
71   diff = _mm_subs_epi16(s0, d0);
72   negdiff = _mm_subs_epi16(u0, diff);
73   absdiff = _mm_max_epi16(diff, negdiff);
74   maxabsdiff = _mm_max_epi16(maxabsdiff, absdiff);
75   minabsdiff = _mm_min_epi16(minabsdiff, absdiff);
76   // Row 6
77   s0 = _mm_unpacklo_epi8(_mm_loadl_epi64((const __m128i *)(s + 6 * p)), u0);
78   d0 = _mm_unpacklo_epi8(_mm_loadl_epi64((const __m128i *)(d + 6 * dp)), u0);
79   diff = _mm_subs_epi16(s0, d0);
80   negdiff = _mm_subs_epi16(u0, diff);
81   absdiff = _mm_max_epi16(diff, negdiff);
82   maxabsdiff = _mm_max_epi16(maxabsdiff, absdiff);
83   minabsdiff = _mm_min_epi16(minabsdiff, absdiff);
84   // Row 7
85   s0 = _mm_unpacklo_epi8(_mm_loadl_epi64((const __m128i *)(s + 7 * p)), u0);
86   d0 = _mm_unpacklo_epi8(_mm_loadl_epi64((const __m128i *)(d + 7 * dp)), u0);
87   diff = _mm_subs_epi16(s0, d0);
88   negdiff = _mm_subs_epi16(u0, diff);
89   absdiff = _mm_max_epi16(diff, negdiff);
90   maxabsdiff = _mm_max_epi16(maxabsdiff, absdiff);
91   minabsdiff = _mm_min_epi16(minabsdiff, absdiff);
92 
93   maxabsdiff = _mm_max_epi16(maxabsdiff, _mm_srli_si128(maxabsdiff, 8));
94   maxabsdiff = _mm_max_epi16(maxabsdiff, _mm_srli_epi64(maxabsdiff, 32));
95   maxabsdiff = _mm_max_epi16(maxabsdiff, _mm_srli_epi64(maxabsdiff, 16));
96   *max = _mm_extract_epi16(maxabsdiff, 0);
97 
98   minabsdiff = _mm_min_epi16(minabsdiff, _mm_srli_si128(minabsdiff, 8));
99   minabsdiff = _mm_min_epi16(minabsdiff, _mm_srli_epi64(minabsdiff, 32));
100   minabsdiff = _mm_min_epi16(minabsdiff, _mm_srli_epi64(minabsdiff, 16));
101   *min = _mm_extract_epi16(minabsdiff, 0);
102 }
103 
vpx_avg_8x8_sse2(const uint8_t * s,int p)104 unsigned int vpx_avg_8x8_sse2(const uint8_t *s, int p) {
105   __m128i s0, s1, u0;
106   unsigned int avg = 0;
107   u0 = _mm_setzero_si128();
108   s0 = _mm_unpacklo_epi8(_mm_loadl_epi64((const __m128i *)(s)), u0);
109   s1 = _mm_unpacklo_epi8(_mm_loadl_epi64((const __m128i *)(s + p)), u0);
110   s0 = _mm_adds_epu16(s0, s1);
111   s1 = _mm_unpacklo_epi8(_mm_loadl_epi64((const __m128i *)(s + 2 * p)), u0);
112   s0 = _mm_adds_epu16(s0, s1);
113   s1 = _mm_unpacklo_epi8(_mm_loadl_epi64((const __m128i *)(s + 3 * p)), u0);
114   s0 = _mm_adds_epu16(s0, s1);
115   s1 = _mm_unpacklo_epi8(_mm_loadl_epi64((const __m128i *)(s + 4 * p)), u0);
116   s0 = _mm_adds_epu16(s0, s1);
117   s1 = _mm_unpacklo_epi8(_mm_loadl_epi64((const __m128i *)(s + 5 * p)), u0);
118   s0 = _mm_adds_epu16(s0, s1);
119   s1 = _mm_unpacklo_epi8(_mm_loadl_epi64((const __m128i *)(s + 6 * p)), u0);
120   s0 = _mm_adds_epu16(s0, s1);
121   s1 = _mm_unpacklo_epi8(_mm_loadl_epi64((const __m128i *)(s + 7 * p)), u0);
122   s0 = _mm_adds_epu16(s0, s1);
123 
124   s0 = _mm_adds_epu16(s0, _mm_srli_si128(s0, 8));
125   s0 = _mm_adds_epu16(s0, _mm_srli_epi64(s0, 32));
126   s0 = _mm_adds_epu16(s0, _mm_srli_epi64(s0, 16));
127   avg = _mm_extract_epi16(s0, 0);
128   return (avg + 32) >> 6;
129 }
130 
vpx_avg_4x4_sse2(const uint8_t * s,int p)131 unsigned int vpx_avg_4x4_sse2(const uint8_t *s, int p) {
132   __m128i s0, s1, u0;
133   unsigned int avg = 0;
134   u0 = _mm_setzero_si128();
135   s0 = _mm_unpacklo_epi8(_mm_loadl_epi64((const __m128i *)(s)), u0);
136   s1 = _mm_unpacklo_epi8(_mm_loadl_epi64((const __m128i *)(s + p)), u0);
137   s0 = _mm_adds_epu16(s0, s1);
138   s1 = _mm_unpacklo_epi8(_mm_loadl_epi64((const __m128i *)(s + 2 * p)), u0);
139   s0 = _mm_adds_epu16(s0, s1);
140   s1 = _mm_unpacklo_epi8(_mm_loadl_epi64((const __m128i *)(s + 3 * p)), u0);
141   s0 = _mm_adds_epu16(s0, s1);
142 
143   s0 = _mm_adds_epu16(s0, _mm_srli_si128(s0, 4));
144   s0 = _mm_adds_epu16(s0, _mm_srli_epi64(s0, 16));
145   avg = _mm_extract_epi16(s0, 0);
146   return (avg + 8) >> 4;
147 }
148 
149 #if CONFIG_VP9_HIGHBITDEPTH
vpx_highbd_avg_8x8_sse2(const uint8_t * s8,int p)150 unsigned int vpx_highbd_avg_8x8_sse2(const uint8_t *s8, int p) {
151   __m128i s0, s1;
152   unsigned int avg;
153   const uint16_t *s = CONVERT_TO_SHORTPTR(s8);
154   const __m128i zero = _mm_setzero_si128();
155   s0 = _mm_loadu_si128((const __m128i *)(s));
156   s1 = _mm_loadu_si128((const __m128i *)(s + p));
157   s0 = _mm_adds_epu16(s0, s1);
158   s1 = _mm_loadu_si128((const __m128i *)(s + 2 * p));
159   s0 = _mm_adds_epu16(s0, s1);
160   s1 = _mm_loadu_si128((const __m128i *)(s + 3 * p));
161   s0 = _mm_adds_epu16(s0, s1);
162   s1 = _mm_loadu_si128((const __m128i *)(s + 4 * p));
163   s0 = _mm_adds_epu16(s0, s1);
164   s1 = _mm_loadu_si128((const __m128i *)(s + 5 * p));
165   s0 = _mm_adds_epu16(s0, s1);
166   s1 = _mm_loadu_si128((const __m128i *)(s + 6 * p));
167   s0 = _mm_adds_epu16(s0, s1);
168   s1 = _mm_loadu_si128((const __m128i *)(s + 7 * p));
169   s0 = _mm_adds_epu16(s0, s1);
170   s1 = _mm_unpackhi_epi16(s0, zero);
171   s0 = _mm_unpacklo_epi16(s0, zero);
172   s0 = _mm_add_epi32(s0, s1);
173   s0 = _mm_add_epi32(s0, _mm_srli_si128(s0, 8));
174   s0 = _mm_add_epi32(s0, _mm_srli_si128(s0, 4));
175   avg = (unsigned int)_mm_cvtsi128_si32(s0);
176 
177   return (avg + 32) >> 6;
178 }
179 
vpx_highbd_avg_4x4_sse2(const uint8_t * s8,int p)180 unsigned int vpx_highbd_avg_4x4_sse2(const uint8_t *s8, int p) {
181   __m128i s0, s1;
182   unsigned int avg;
183   const uint16_t *s = CONVERT_TO_SHORTPTR(s8);
184   s0 = _mm_loadl_epi64((const __m128i *)(s));
185   s1 = _mm_loadl_epi64((const __m128i *)(s + p));
186   s0 = _mm_adds_epu16(s0, s1);
187   s1 = _mm_loadl_epi64((const __m128i *)(s + 2 * p));
188   s0 = _mm_adds_epu16(s0, s1);
189   s1 = _mm_loadl_epi64((const __m128i *)(s + 3 * p));
190   s0 = _mm_adds_epu16(s0, s1);
191   s0 = _mm_add_epi16(s0, _mm_srli_si128(s0, 4));
192   s0 = _mm_add_epi16(s0, _mm_srli_si128(s0, 2));
193   avg = _mm_extract_epi16(s0, 0);
194 
195   return (avg + 8) >> 4;
196 }
197 #endif  // CONFIG_VP9_HIGHBITDEPTH
198 
hadamard_col8_sse2(__m128i * in,int iter)199 static void hadamard_col8_sse2(__m128i *in, int iter) {
200   __m128i a0 = in[0];
201   __m128i a1 = in[1];
202   __m128i a2 = in[2];
203   __m128i a3 = in[3];
204   __m128i a4 = in[4];
205   __m128i a5 = in[5];
206   __m128i a6 = in[6];
207   __m128i a7 = in[7];
208 
209   __m128i b0 = _mm_add_epi16(a0, a1);
210   __m128i b1 = _mm_sub_epi16(a0, a1);
211   __m128i b2 = _mm_add_epi16(a2, a3);
212   __m128i b3 = _mm_sub_epi16(a2, a3);
213   __m128i b4 = _mm_add_epi16(a4, a5);
214   __m128i b5 = _mm_sub_epi16(a4, a5);
215   __m128i b6 = _mm_add_epi16(a6, a7);
216   __m128i b7 = _mm_sub_epi16(a6, a7);
217 
218   a0 = _mm_add_epi16(b0, b2);
219   a1 = _mm_add_epi16(b1, b3);
220   a2 = _mm_sub_epi16(b0, b2);
221   a3 = _mm_sub_epi16(b1, b3);
222   a4 = _mm_add_epi16(b4, b6);
223   a5 = _mm_add_epi16(b5, b7);
224   a6 = _mm_sub_epi16(b4, b6);
225   a7 = _mm_sub_epi16(b5, b7);
226 
227   if (iter == 0) {
228     b0 = _mm_add_epi16(a0, a4);
229     b7 = _mm_add_epi16(a1, a5);
230     b3 = _mm_add_epi16(a2, a6);
231     b4 = _mm_add_epi16(a3, a7);
232     b2 = _mm_sub_epi16(a0, a4);
233     b6 = _mm_sub_epi16(a1, a5);
234     b1 = _mm_sub_epi16(a2, a6);
235     b5 = _mm_sub_epi16(a3, a7);
236 
237     a0 = _mm_unpacklo_epi16(b0, b1);
238     a1 = _mm_unpacklo_epi16(b2, b3);
239     a2 = _mm_unpackhi_epi16(b0, b1);
240     a3 = _mm_unpackhi_epi16(b2, b3);
241     a4 = _mm_unpacklo_epi16(b4, b5);
242     a5 = _mm_unpacklo_epi16(b6, b7);
243     a6 = _mm_unpackhi_epi16(b4, b5);
244     a7 = _mm_unpackhi_epi16(b6, b7);
245 
246     b0 = _mm_unpacklo_epi32(a0, a1);
247     b1 = _mm_unpacklo_epi32(a4, a5);
248     b2 = _mm_unpackhi_epi32(a0, a1);
249     b3 = _mm_unpackhi_epi32(a4, a5);
250     b4 = _mm_unpacklo_epi32(a2, a3);
251     b5 = _mm_unpacklo_epi32(a6, a7);
252     b6 = _mm_unpackhi_epi32(a2, a3);
253     b7 = _mm_unpackhi_epi32(a6, a7);
254 
255     in[0] = _mm_unpacklo_epi64(b0, b1);
256     in[1] = _mm_unpackhi_epi64(b0, b1);
257     in[2] = _mm_unpacklo_epi64(b2, b3);
258     in[3] = _mm_unpackhi_epi64(b2, b3);
259     in[4] = _mm_unpacklo_epi64(b4, b5);
260     in[5] = _mm_unpackhi_epi64(b4, b5);
261     in[6] = _mm_unpacklo_epi64(b6, b7);
262     in[7] = _mm_unpackhi_epi64(b6, b7);
263   } else {
264     in[0] = _mm_add_epi16(a0, a4);
265     in[7] = _mm_add_epi16(a1, a5);
266     in[3] = _mm_add_epi16(a2, a6);
267     in[4] = _mm_add_epi16(a3, a7);
268     in[2] = _mm_sub_epi16(a0, a4);
269     in[6] = _mm_sub_epi16(a1, a5);
270     in[1] = _mm_sub_epi16(a2, a6);
271     in[5] = _mm_sub_epi16(a3, a7);
272   }
273 }
274 
hadamard_8x8_sse2(const int16_t * src_diff,ptrdiff_t src_stride,tran_low_t * coeff,int is_final)275 static INLINE void hadamard_8x8_sse2(const int16_t *src_diff,
276                                      ptrdiff_t src_stride, tran_low_t *coeff,
277                                      int is_final) {
278   __m128i src[8];
279   src[0] = _mm_load_si128((const __m128i *)src_diff);
280   src[1] = _mm_load_si128((const __m128i *)(src_diff += src_stride));
281   src[2] = _mm_load_si128((const __m128i *)(src_diff += src_stride));
282   src[3] = _mm_load_si128((const __m128i *)(src_diff += src_stride));
283   src[4] = _mm_load_si128((const __m128i *)(src_diff += src_stride));
284   src[5] = _mm_load_si128((const __m128i *)(src_diff += src_stride));
285   src[6] = _mm_load_si128((const __m128i *)(src_diff += src_stride));
286   src[7] = _mm_load_si128((const __m128i *)(src_diff + src_stride));
287 
288   hadamard_col8_sse2(src, 0);
289   hadamard_col8_sse2(src, 1);
290 
291   if (is_final) {
292     store_tran_low(src[0], coeff);
293     coeff += 8;
294     store_tran_low(src[1], coeff);
295     coeff += 8;
296     store_tran_low(src[2], coeff);
297     coeff += 8;
298     store_tran_low(src[3], coeff);
299     coeff += 8;
300     store_tran_low(src[4], coeff);
301     coeff += 8;
302     store_tran_low(src[5], coeff);
303     coeff += 8;
304     store_tran_low(src[6], coeff);
305     coeff += 8;
306     store_tran_low(src[7], coeff);
307   } else {
308     int16_t *coeff16 = (int16_t *)coeff;
309     _mm_store_si128((__m128i *)coeff16, src[0]);
310     coeff16 += 8;
311     _mm_store_si128((__m128i *)coeff16, src[1]);
312     coeff16 += 8;
313     _mm_store_si128((__m128i *)coeff16, src[2]);
314     coeff16 += 8;
315     _mm_store_si128((__m128i *)coeff16, src[3]);
316     coeff16 += 8;
317     _mm_store_si128((__m128i *)coeff16, src[4]);
318     coeff16 += 8;
319     _mm_store_si128((__m128i *)coeff16, src[5]);
320     coeff16 += 8;
321     _mm_store_si128((__m128i *)coeff16, src[6]);
322     coeff16 += 8;
323     _mm_store_si128((__m128i *)coeff16, src[7]);
324   }
325 }
326 
vpx_hadamard_8x8_sse2(const int16_t * src_diff,ptrdiff_t src_stride,tran_low_t * coeff)327 void vpx_hadamard_8x8_sse2(const int16_t *src_diff, ptrdiff_t src_stride,
328                            tran_low_t *coeff) {
329   hadamard_8x8_sse2(src_diff, src_stride, coeff, 1);
330 }
331 
hadamard_16x16_sse2(const int16_t * src_diff,ptrdiff_t src_stride,tran_low_t * coeff,int is_final)332 static INLINE void hadamard_16x16_sse2(const int16_t *src_diff,
333                                        ptrdiff_t src_stride, tran_low_t *coeff,
334                                        int is_final) {
335 #if CONFIG_VP9_HIGHBITDEPTH
336   // For high bitdepths, it is unnecessary to store_tran_low
337   // (mult/unpack/store), then load_tran_low (load/pack) the same memory in the
338   // next stage.  Output to an intermediate buffer first, then store_tran_low()
339   // in the final stage.
340   DECLARE_ALIGNED(32, int16_t, temp_coeff[16 * 16]);
341   int16_t *t_coeff = temp_coeff;
342 #else
343   int16_t *t_coeff = coeff;
344 #endif
345   int16_t *coeff16 = (int16_t *)coeff;
346   int idx;
347   for (idx = 0; idx < 4; ++idx) {
348     const int16_t *src_ptr =
349         src_diff + (idx >> 1) * 8 * src_stride + (idx & 0x01) * 8;
350     hadamard_8x8_sse2(src_ptr, src_stride, (tran_low_t *)(t_coeff + idx * 64),
351                       0);
352   }
353 
354   for (idx = 0; idx < 64; idx += 8) {
355     __m128i coeff0 = _mm_load_si128((const __m128i *)t_coeff);
356     __m128i coeff1 = _mm_load_si128((const __m128i *)(t_coeff + 64));
357     __m128i coeff2 = _mm_load_si128((const __m128i *)(t_coeff + 128));
358     __m128i coeff3 = _mm_load_si128((const __m128i *)(t_coeff + 192));
359 
360     __m128i b0 = _mm_add_epi16(coeff0, coeff1);
361     __m128i b1 = _mm_sub_epi16(coeff0, coeff1);
362     __m128i b2 = _mm_add_epi16(coeff2, coeff3);
363     __m128i b3 = _mm_sub_epi16(coeff2, coeff3);
364 
365     b0 = _mm_srai_epi16(b0, 1);
366     b1 = _mm_srai_epi16(b1, 1);
367     b2 = _mm_srai_epi16(b2, 1);
368     b3 = _mm_srai_epi16(b3, 1);
369 
370     coeff0 = _mm_add_epi16(b0, b2);
371     coeff1 = _mm_add_epi16(b1, b3);
372     coeff2 = _mm_sub_epi16(b0, b2);
373     coeff3 = _mm_sub_epi16(b1, b3);
374 
375     if (is_final) {
376       store_tran_low(coeff0, coeff);
377       store_tran_low(coeff1, coeff + 64);
378       store_tran_low(coeff2, coeff + 128);
379       store_tran_low(coeff3, coeff + 192);
380       coeff += 8;
381     } else {
382       _mm_store_si128((__m128i *)coeff16, coeff0);
383       _mm_store_si128((__m128i *)(coeff16 + 64), coeff1);
384       _mm_store_si128((__m128i *)(coeff16 + 128), coeff2);
385       _mm_store_si128((__m128i *)(coeff16 + 192), coeff3);
386       coeff16 += 8;
387     }
388 
389     t_coeff += 8;
390   }
391 }
392 
vpx_hadamard_16x16_sse2(const int16_t * src_diff,ptrdiff_t src_stride,tran_low_t * coeff)393 void vpx_hadamard_16x16_sse2(const int16_t *src_diff, ptrdiff_t src_stride,
394                              tran_low_t *coeff) {
395   hadamard_16x16_sse2(src_diff, src_stride, coeff, 1);
396 }
397 
vpx_hadamard_32x32_sse2(const int16_t * src_diff,ptrdiff_t src_stride,tran_low_t * coeff)398 void vpx_hadamard_32x32_sse2(const int16_t *src_diff, ptrdiff_t src_stride,
399                              tran_low_t *coeff) {
400 #if CONFIG_VP9_HIGHBITDEPTH
401   // For high bitdepths, it is unnecessary to store_tran_low
402   // (mult/unpack/store), then load_tran_low (load/pack) the same memory in the
403   // next stage.  Output to an intermediate buffer first, then store_tran_low()
404   // in the final stage.
405   DECLARE_ALIGNED(32, int16_t, temp_coeff[32 * 32]);
406   int16_t *t_coeff = temp_coeff;
407 #else
408   int16_t *t_coeff = coeff;
409 #endif
410   int idx;
411   __m128i coeff0_lo, coeff1_lo, coeff2_lo, coeff3_lo, b0_lo, b1_lo, b2_lo,
412       b3_lo;
413   __m128i coeff0_hi, coeff1_hi, coeff2_hi, coeff3_hi, b0_hi, b1_hi, b2_hi,
414       b3_hi;
415   __m128i b0, b1, b2, b3;
416   const __m128i zero = _mm_setzero_si128();
417   for (idx = 0; idx < 4; ++idx) {
418     const int16_t *src_ptr =
419         src_diff + (idx >> 1) * 16 * src_stride + (idx & 0x01) * 16;
420     hadamard_16x16_sse2(src_ptr, src_stride,
421                         (tran_low_t *)(t_coeff + idx * 256), 0);
422   }
423 
424   for (idx = 0; idx < 256; idx += 8) {
425     __m128i coeff0 = _mm_load_si128((const __m128i *)t_coeff);
426     __m128i coeff1 = _mm_load_si128((const __m128i *)(t_coeff + 256));
427     __m128i coeff2 = _mm_load_si128((const __m128i *)(t_coeff + 512));
428     __m128i coeff3 = _mm_load_si128((const __m128i *)(t_coeff + 768));
429 
430     // Sign extend 16 bit to 32 bit.
431     sign_extend_16bit_to_32bit_sse2(coeff0, zero, &coeff0_lo, &coeff0_hi);
432     sign_extend_16bit_to_32bit_sse2(coeff1, zero, &coeff1_lo, &coeff1_hi);
433     sign_extend_16bit_to_32bit_sse2(coeff2, zero, &coeff2_lo, &coeff2_hi);
434     sign_extend_16bit_to_32bit_sse2(coeff3, zero, &coeff3_lo, &coeff3_hi);
435 
436     b0_lo = _mm_add_epi32(coeff0_lo, coeff1_lo);
437     b0_hi = _mm_add_epi32(coeff0_hi, coeff1_hi);
438 
439     b1_lo = _mm_sub_epi32(coeff0_lo, coeff1_lo);
440     b1_hi = _mm_sub_epi32(coeff0_hi, coeff1_hi);
441 
442     b2_lo = _mm_add_epi32(coeff2_lo, coeff3_lo);
443     b2_hi = _mm_add_epi32(coeff2_hi, coeff3_hi);
444 
445     b3_lo = _mm_sub_epi32(coeff2_lo, coeff3_lo);
446     b3_hi = _mm_sub_epi32(coeff2_hi, coeff3_hi);
447 
448     b0_lo = _mm_srai_epi32(b0_lo, 2);
449     b1_lo = _mm_srai_epi32(b1_lo, 2);
450     b2_lo = _mm_srai_epi32(b2_lo, 2);
451     b3_lo = _mm_srai_epi32(b3_lo, 2);
452 
453     b0_hi = _mm_srai_epi32(b0_hi, 2);
454     b1_hi = _mm_srai_epi32(b1_hi, 2);
455     b2_hi = _mm_srai_epi32(b2_hi, 2);
456     b3_hi = _mm_srai_epi32(b3_hi, 2);
457 
458     b0 = _mm_packs_epi32(b0_lo, b0_hi);
459     b1 = _mm_packs_epi32(b1_lo, b1_hi);
460     b2 = _mm_packs_epi32(b2_lo, b2_hi);
461     b3 = _mm_packs_epi32(b3_lo, b3_hi);
462 
463     coeff0 = _mm_add_epi16(b0, b2);
464     coeff1 = _mm_add_epi16(b1, b3);
465     store_tran_low(coeff0, coeff);
466     store_tran_low(coeff1, coeff + 256);
467 
468     coeff2 = _mm_sub_epi16(b0, b2);
469     coeff3 = _mm_sub_epi16(b1, b3);
470     store_tran_low(coeff2, coeff + 512);
471     store_tran_low(coeff3, coeff + 768);
472 
473     coeff += 8;
474     t_coeff += 8;
475   }
476 }
477 
vpx_satd_sse2(const tran_low_t * coeff,int length)478 int vpx_satd_sse2(const tran_low_t *coeff, int length) {
479   int i;
480   const __m128i zero = _mm_setzero_si128();
481   __m128i accum = zero;
482 
483   for (i = 0; i < length; i += 8) {
484     const __m128i src_line = load_tran_low(coeff);
485     const __m128i inv = _mm_sub_epi16(zero, src_line);
486     const __m128i abs = _mm_max_epi16(src_line, inv);  // abs(src_line)
487     const __m128i abs_lo = _mm_unpacklo_epi16(abs, zero);
488     const __m128i abs_hi = _mm_unpackhi_epi16(abs, zero);
489     const __m128i sum = _mm_add_epi32(abs_lo, abs_hi);
490     accum = _mm_add_epi32(accum, sum);
491     coeff += 8;
492   }
493 
494   {  // cascading summation of accum
495     __m128i hi = _mm_srli_si128(accum, 8);
496     accum = _mm_add_epi32(accum, hi);
497     hi = _mm_srli_epi64(accum, 32);
498     accum = _mm_add_epi32(accum, hi);
499   }
500 
501   return _mm_cvtsi128_si32(accum);
502 }
503 
vpx_int_pro_row_sse2(int16_t hbuf[16],const uint8_t * ref,const int ref_stride,const int height)504 void vpx_int_pro_row_sse2(int16_t hbuf[16], const uint8_t *ref,
505                           const int ref_stride, const int height) {
506   int idx;
507   __m128i zero = _mm_setzero_si128();
508   __m128i src_line = _mm_loadu_si128((const __m128i *)ref);
509   __m128i s0 = _mm_unpacklo_epi8(src_line, zero);
510   __m128i s1 = _mm_unpackhi_epi8(src_line, zero);
511   __m128i t0, t1;
512   int height_1 = height - 1;
513   ref += ref_stride;
514 
515   for (idx = 1; idx < height_1; idx += 2) {
516     src_line = _mm_loadu_si128((const __m128i *)ref);
517     t0 = _mm_unpacklo_epi8(src_line, zero);
518     t1 = _mm_unpackhi_epi8(src_line, zero);
519     s0 = _mm_adds_epu16(s0, t0);
520     s1 = _mm_adds_epu16(s1, t1);
521     ref += ref_stride;
522 
523     src_line = _mm_loadu_si128((const __m128i *)ref);
524     t0 = _mm_unpacklo_epi8(src_line, zero);
525     t1 = _mm_unpackhi_epi8(src_line, zero);
526     s0 = _mm_adds_epu16(s0, t0);
527     s1 = _mm_adds_epu16(s1, t1);
528     ref += ref_stride;
529   }
530 
531   src_line = _mm_loadu_si128((const __m128i *)ref);
532   t0 = _mm_unpacklo_epi8(src_line, zero);
533   t1 = _mm_unpackhi_epi8(src_line, zero);
534   s0 = _mm_adds_epu16(s0, t0);
535   s1 = _mm_adds_epu16(s1, t1);
536 
537   if (height == 64) {
538     s0 = _mm_srai_epi16(s0, 5);
539     s1 = _mm_srai_epi16(s1, 5);
540   } else if (height == 32) {
541     s0 = _mm_srai_epi16(s0, 4);
542     s1 = _mm_srai_epi16(s1, 4);
543   } else {
544     s0 = _mm_srai_epi16(s0, 3);
545     s1 = _mm_srai_epi16(s1, 3);
546   }
547 
548   _mm_storeu_si128((__m128i *)hbuf, s0);
549   hbuf += 8;
550   _mm_storeu_si128((__m128i *)hbuf, s1);
551 }
552 
vpx_int_pro_col_sse2(const uint8_t * ref,const int width)553 int16_t vpx_int_pro_col_sse2(const uint8_t *ref, const int width) {
554   __m128i zero = _mm_setzero_si128();
555   __m128i src_line = _mm_loadu_si128((const __m128i *)ref);
556   __m128i s0 = _mm_sad_epu8(src_line, zero);
557   __m128i s1;
558   int i;
559 
560   for (i = 16; i < width; i += 16) {
561     ref += 16;
562     src_line = _mm_loadu_si128((const __m128i *)ref);
563     s1 = _mm_sad_epu8(src_line, zero);
564     s0 = _mm_adds_epu16(s0, s1);
565   }
566 
567   s1 = _mm_srli_si128(s0, 8);
568   s0 = _mm_adds_epu16(s0, s1);
569 
570   return _mm_extract_epi16(s0, 0);
571 }
572 
vpx_vector_var_sse2(const int16_t * ref,const int16_t * src,const int bwl)573 int vpx_vector_var_sse2(const int16_t *ref, const int16_t *src, const int bwl) {
574   int idx;
575   int width = 4 << bwl;
576   int16_t mean;
577   __m128i v0 = _mm_loadu_si128((const __m128i *)ref);
578   __m128i v1 = _mm_load_si128((const __m128i *)src);
579   __m128i diff = _mm_subs_epi16(v0, v1);
580   __m128i sum = diff;
581   __m128i sse = _mm_madd_epi16(diff, diff);
582 
583   ref += 8;
584   src += 8;
585 
586   for (idx = 8; idx < width; idx += 8) {
587     v0 = _mm_loadu_si128((const __m128i *)ref);
588     v1 = _mm_load_si128((const __m128i *)src);
589     diff = _mm_subs_epi16(v0, v1);
590 
591     sum = _mm_add_epi16(sum, diff);
592     v0 = _mm_madd_epi16(diff, diff);
593     sse = _mm_add_epi32(sse, v0);
594 
595     ref += 8;
596     src += 8;
597   }
598 
599   v0 = _mm_srli_si128(sum, 8);
600   sum = _mm_add_epi16(sum, v0);
601   v0 = _mm_srli_epi64(sum, 32);
602   sum = _mm_add_epi16(sum, v0);
603   v0 = _mm_srli_epi32(sum, 16);
604   sum = _mm_add_epi16(sum, v0);
605 
606   v1 = _mm_srli_si128(sse, 8);
607   sse = _mm_add_epi32(sse, v1);
608   v1 = _mm_srli_epi64(sse, 32);
609   sse = _mm_add_epi32(sse, v1);
610 
611   mean = (int16_t)_mm_extract_epi16(sum, 0);
612 
613   return _mm_cvtsi128_si32(sse) - ((mean * mean) >> (bwl + 2));
614 }
615