1 /*
2 * Copyright (c) 2016, Alliance for Open Media. All rights reserved.
3 *
4 * This source code is subject to the terms of the BSD 2 Clause License and
5 * the Alliance for Open Media Patent License 1.0. If the BSD 2 Clause License
6 * was not distributed with this source code in the LICENSE file, you can
7 * obtain it at www.aomedia.org/license/software. If the Alliance for Open
8 * Media Patent License 1.0 was not distributed with this source code in the
9 * PATENTS file, you can obtain it at www.aomedia.org/license/patent.
10 */
11
12 #include <immintrin.h> // AVX2
13
14 #include "config/av1_rtcd.h"
15
16 #include "aom/aom_integer.h"
17
read_coeff(const tran_low_t * coeff,intptr_t offset,__m256i * c)18 static inline void read_coeff(const tran_low_t *coeff, intptr_t offset,
19 __m256i *c) {
20 const tran_low_t *addr = coeff + offset;
21
22 if (sizeof(tran_low_t) == 4) {
23 const __m256i x0 = _mm256_loadu_si256((const __m256i *)addr);
24 const __m256i x1 = _mm256_loadu_si256((const __m256i *)addr + 1);
25 const __m256i y = _mm256_packs_epi32(x0, x1);
26 *c = _mm256_permute4x64_epi64(y, 0xD8);
27 } else {
28 *c = _mm256_loadu_si256((const __m256i *)addr);
29 }
30 }
31
av1_block_error_block_size16_avx2(const int16_t * coeff,const int16_t * dqcoeff,__m256i * sse_256)32 static inline void av1_block_error_block_size16_avx2(const int16_t *coeff,
33 const int16_t *dqcoeff,
34 __m256i *sse_256) {
35 const __m256i _coeff = _mm256_loadu_si256((const __m256i *)coeff);
36 const __m256i _dqcoeff = _mm256_loadu_si256((const __m256i *)dqcoeff);
37 // d0 d1 d2 d3 d4 d5 d6 d7 d8 d9 d10 d11 d12 d13 d14 d15
38 const __m256i diff = _mm256_sub_epi16(_dqcoeff, _coeff);
39 // r0 r1 r2 r3 r4 r5 r6 r7
40 const __m256i error = _mm256_madd_epi16(diff, diff);
41 // r0+r1 r2+r3 | r0+r1 r2+r3 | r4+r5 r6+r7 | r4+r5 r6+r7
42 const __m256i error_hi = _mm256_hadd_epi32(error, error);
43 // r0+r1 | r2+r3 | r4+r5 | r6+r7
44 *sse_256 = _mm256_unpacklo_epi32(error_hi, _mm256_setzero_si256());
45 }
46
av1_block_error_block_size32_avx2(const int16_t * coeff,const int16_t * dqcoeff,__m256i * sse_256)47 static inline void av1_block_error_block_size32_avx2(const int16_t *coeff,
48 const int16_t *dqcoeff,
49 __m256i *sse_256) {
50 const __m256i zero = _mm256_setzero_si256();
51 const __m256i _coeff_0 = _mm256_loadu_si256((const __m256i *)coeff);
52 const __m256i _dqcoeff_0 = _mm256_loadu_si256((const __m256i *)dqcoeff);
53 const __m256i _coeff_1 = _mm256_loadu_si256((const __m256i *)(coeff + 16));
54 const __m256i _dqcoeff_1 =
55 _mm256_loadu_si256((const __m256i *)(dqcoeff + 16));
56
57 // d0 d1 d2 d3 d4 d5 d6 d7 d8 d9 d10 d11 d12 d13 d14 d15
58 const __m256i diff_0 = _mm256_sub_epi16(_dqcoeff_0, _coeff_0);
59 const __m256i diff_1 = _mm256_sub_epi16(_dqcoeff_1, _coeff_1);
60
61 // r0 r1 r2 r3 r4 r5 r6 r7
62 const __m256i error_0 = _mm256_madd_epi16(diff_0, diff_0);
63 const __m256i error_1 = _mm256_madd_epi16(diff_1, diff_1);
64 const __m256i err_final_0 = _mm256_add_epi32(error_0, error_1);
65
66 // For extreme input values, the accumulation needs to happen in 64 bit
67 // precision to avoid any overflow.
68 const __m256i exp0_error_lo = _mm256_unpacklo_epi32(err_final_0, zero);
69 const __m256i exp0_error_hi = _mm256_unpackhi_epi32(err_final_0, zero);
70 const __m256i sum_temp_0 = _mm256_add_epi64(exp0_error_hi, exp0_error_lo);
71 *sse_256 = _mm256_add_epi64(*sse_256, sum_temp_0);
72 }
73
av1_block_error_block_size64_avx2(const int16_t * coeff,const int16_t * dqcoeff,__m256i * sse_256,intptr_t block_size)74 static inline void av1_block_error_block_size64_avx2(const int16_t *coeff,
75 const int16_t *dqcoeff,
76 __m256i *sse_256,
77 intptr_t block_size) {
78 const __m256i zero = _mm256_setzero_si256();
79 for (int i = 0; i < block_size; i += 64) {
80 // Load 64 elements for coeff and dqcoeff.
81 const __m256i _coeff_0 = _mm256_loadu_si256((const __m256i *)coeff);
82 const __m256i _dqcoeff_0 = _mm256_loadu_si256((const __m256i *)dqcoeff);
83 const __m256i _coeff_1 = _mm256_loadu_si256((const __m256i *)(coeff + 16));
84 const __m256i _dqcoeff_1 =
85 _mm256_loadu_si256((const __m256i *)(dqcoeff + 16));
86 const __m256i _coeff_2 = _mm256_loadu_si256((const __m256i *)(coeff + 32));
87 const __m256i _dqcoeff_2 =
88 _mm256_loadu_si256((const __m256i *)(dqcoeff + 32));
89 const __m256i _coeff_3 = _mm256_loadu_si256((const __m256i *)(coeff + 48));
90 const __m256i _dqcoeff_3 =
91 _mm256_loadu_si256((const __m256i *)(dqcoeff + 48));
92
93 // d0 d1 d2 d3 d4 d5 d6 d7 d8 d9 d10 d11 d12 d13 d14 d15
94 const __m256i diff_0 = _mm256_sub_epi16(_dqcoeff_0, _coeff_0);
95 const __m256i diff_1 = _mm256_sub_epi16(_dqcoeff_1, _coeff_1);
96 const __m256i diff_2 = _mm256_sub_epi16(_dqcoeff_2, _coeff_2);
97 const __m256i diff_3 = _mm256_sub_epi16(_dqcoeff_3, _coeff_3);
98
99 // r0 r1 r2 r3 r4 r5 r6 r7
100 const __m256i error_0 = _mm256_madd_epi16(diff_0, diff_0);
101 const __m256i error_1 = _mm256_madd_epi16(diff_1, diff_1);
102 const __m256i error_2 = _mm256_madd_epi16(diff_2, diff_2);
103 const __m256i error_3 = _mm256_madd_epi16(diff_3, diff_3);
104 // r00 r01 r02 r03 r04 r05 r06 r07
105 const __m256i err_final_0 = _mm256_add_epi32(error_0, error_1);
106 // r10 r11 r12 r13 r14 r15 r16 r17
107 const __m256i err_final_1 = _mm256_add_epi32(error_2, error_3);
108
109 // For extreme input values, the accumulation needs to happen in 64 bit
110 // precision to avoid any overflow. r00 r01 r04 r05
111 const __m256i exp0_error_lo = _mm256_unpacklo_epi32(err_final_0, zero);
112 // r02 r03 r06 r07
113 const __m256i exp0_error_hi = _mm256_unpackhi_epi32(err_final_0, zero);
114 // r10 r11 r14 r15
115 const __m256i exp1_error_lo = _mm256_unpacklo_epi32(err_final_1, zero);
116 // r12 r13 r16 r17
117 const __m256i exp1_error_hi = _mm256_unpackhi_epi32(err_final_1, zero);
118
119 const __m256i sum_temp_0 = _mm256_add_epi64(exp0_error_hi, exp0_error_lo);
120 const __m256i sum_temp_1 = _mm256_add_epi64(exp1_error_hi, exp1_error_lo);
121 const __m256i sse_256_temp = _mm256_add_epi64(sum_temp_1, sum_temp_0);
122 *sse_256 = _mm256_add_epi64(*sse_256, sse_256_temp);
123 coeff += 64;
124 dqcoeff += 64;
125 }
126 }
127
av1_block_error_lp_avx2(const int16_t * coeff,const int16_t * dqcoeff,intptr_t block_size)128 int64_t av1_block_error_lp_avx2(const int16_t *coeff, const int16_t *dqcoeff,
129 intptr_t block_size) {
130 assert(block_size % 16 == 0);
131 __m256i sse_256 = _mm256_setzero_si256();
132 int64_t sse;
133
134 if (block_size == 16)
135 av1_block_error_block_size16_avx2(coeff, dqcoeff, &sse_256);
136 else if (block_size == 32)
137 av1_block_error_block_size32_avx2(coeff, dqcoeff, &sse_256);
138 else
139 av1_block_error_block_size64_avx2(coeff, dqcoeff, &sse_256, block_size);
140
141 // Save the higher 64 bit of each 128 bit lane.
142 const __m256i sse_hi = _mm256_srli_si256(sse_256, 8);
143 // Add the higher 64 bit to the low 64 bit.
144 sse_256 = _mm256_add_epi64(sse_256, sse_hi);
145 // Accumulate the sse_256 register to get final sse
146 const __m128i sse_128 = _mm_add_epi64(_mm256_castsi256_si128(sse_256),
147 _mm256_extractf128_si256(sse_256, 1));
148
149 // Store the results.
150 _mm_storel_epi64((__m128i *)&sse, sse_128);
151 return sse;
152 }
153
av1_block_error_avx2(const tran_low_t * coeff,const tran_low_t * dqcoeff,intptr_t block_size,int64_t * ssz)154 int64_t av1_block_error_avx2(const tran_low_t *coeff, const tran_low_t *dqcoeff,
155 intptr_t block_size, int64_t *ssz) {
156 __m256i sse_reg, ssz_reg, coeff_reg, dqcoeff_reg;
157 __m256i exp_dqcoeff_lo, exp_dqcoeff_hi, exp_coeff_lo, exp_coeff_hi;
158 __m256i sse_reg_64hi, ssz_reg_64hi;
159 __m128i sse_reg128, ssz_reg128;
160 int64_t sse;
161 int i;
162 const __m256i zero_reg = _mm256_setzero_si256();
163
164 // init sse and ssz registerd to zero
165 sse_reg = _mm256_setzero_si256();
166 ssz_reg = _mm256_setzero_si256();
167
168 for (i = 0; i < block_size; i += 16) {
169 // load 32 bytes from coeff and dqcoeff
170 read_coeff(coeff, i, &coeff_reg);
171 read_coeff(dqcoeff, i, &dqcoeff_reg);
172 // dqcoeff - coeff
173 dqcoeff_reg = _mm256_sub_epi16(dqcoeff_reg, coeff_reg);
174 // madd (dqcoeff - coeff)
175 dqcoeff_reg = _mm256_madd_epi16(dqcoeff_reg, dqcoeff_reg);
176 // madd coeff
177 coeff_reg = _mm256_madd_epi16(coeff_reg, coeff_reg);
178 // expand each double word of madd (dqcoeff - coeff) to quad word
179 exp_dqcoeff_lo = _mm256_unpacklo_epi32(dqcoeff_reg, zero_reg);
180 exp_dqcoeff_hi = _mm256_unpackhi_epi32(dqcoeff_reg, zero_reg);
181 // expand each double word of madd (coeff) to quad word
182 exp_coeff_lo = _mm256_unpacklo_epi32(coeff_reg, zero_reg);
183 exp_coeff_hi = _mm256_unpackhi_epi32(coeff_reg, zero_reg);
184 // add each quad word of madd (dqcoeff - coeff) and madd (coeff)
185 sse_reg = _mm256_add_epi64(sse_reg, exp_dqcoeff_lo);
186 ssz_reg = _mm256_add_epi64(ssz_reg, exp_coeff_lo);
187 sse_reg = _mm256_add_epi64(sse_reg, exp_dqcoeff_hi);
188 ssz_reg = _mm256_add_epi64(ssz_reg, exp_coeff_hi);
189 }
190 // save the higher 64 bit of each 128 bit lane
191 sse_reg_64hi = _mm256_srli_si256(sse_reg, 8);
192 ssz_reg_64hi = _mm256_srli_si256(ssz_reg, 8);
193 // add the higher 64 bit to the low 64 bit
194 sse_reg = _mm256_add_epi64(sse_reg, sse_reg_64hi);
195 ssz_reg = _mm256_add_epi64(ssz_reg, ssz_reg_64hi);
196
197 // add each 64 bit from each of the 128 bit lane of the 256 bit
198 sse_reg128 = _mm_add_epi64(_mm256_castsi256_si128(sse_reg),
199 _mm256_extractf128_si256(sse_reg, 1));
200
201 ssz_reg128 = _mm_add_epi64(_mm256_castsi256_si128(ssz_reg),
202 _mm256_extractf128_si256(ssz_reg, 1));
203
204 // store the results
205 _mm_storel_epi64((__m128i *)(&sse), sse_reg128);
206
207 _mm_storel_epi64((__m128i *)(ssz), ssz_reg128);
208 _mm256_zeroupper();
209 return sse;
210 }
211