1 /*
2 * Copyright (c) 2024, Alliance for Open Media. All rights reserved.
3 *
4 * This source code is subject to the terms of the BSD 2 Clause License and
5 * the Alliance for Open Media Patent License 1.0. If the BSD 2 Clause License
6 * was not distributed with this source code in the LICENSE file, you can
7 * obtain it at www.aomedia.org/license/software. If the Alliance for Open
8 * Media Patent License 1.0 was not distributed with this source code in the
9 * PATENTS file, you can obtain it at www.aomedia.org/license/patent.
10 */
11
12 #include <assert.h>
13 #include <math.h>
14 #include <immintrin.h>
15
16 #include "aom_dsp/aom_dsp_common.h"
17 #include "aom_dsp/flow_estimation/disflow.h"
18 #include "aom_dsp/x86/synonyms.h"
19 #include "aom_dsp/x86/synonyms_avx2.h"
20
21 #include "config/aom_dsp_rtcd.h"
22
23 #if DISFLOW_PATCH_SIZE != 8
24 #error "Need to change disflow_avx2.c if DISFLOW_PATCH_SIZE != 8"
25 #endif
26
27 // Compute horizontal and vertical kernels and return them packed into a
28 // register. The coefficient ordering is:
29 // h0, h1, v0, v1, h2, h3, v2, v3
30 // This is chosen because it takes less work than fully separating the kernels,
31 // but it is separated enough that we can pick out each coefficient pair in the
32 // main compute_flow_at_point function
compute_cubic_kernels(double u,double v)33 static inline __m128i compute_cubic_kernels(double u, double v) {
34 const __m128d x = _mm_set_pd(v, u);
35
36 const __m128d x2 = _mm_mul_pd(x, x);
37 const __m128d x3 = _mm_mul_pd(x2, x);
38
39 // Macro to multiply a value v by a constant coefficient c
40 #define MULC(c, v) _mm_mul_pd(_mm_set1_pd(c), v)
41
42 // Compute floating-point kernel
43 // Note: To ensure results are bit-identical to the C code, we need to perform
44 // exactly the same sequence of operations here as in the C code.
45 __m128d k0 = _mm_sub_pd(_mm_add_pd(MULC(-0.5, x), x2), MULC(0.5, x3));
46 __m128d k1 =
47 _mm_add_pd(_mm_sub_pd(_mm_set1_pd(1.0), MULC(2.5, x2)), MULC(1.5, x3));
48 __m128d k2 =
49 _mm_sub_pd(_mm_add_pd(MULC(0.5, x), MULC(2.0, x2)), MULC(1.5, x3));
50 __m128d k3 = _mm_add_pd(MULC(-0.5, x2), MULC(0.5, x3));
51 #undef MULC
52
53 // Integerize
54 __m128d prec = _mm_set1_pd((double)(1 << DISFLOW_INTERP_BITS));
55
56 k0 = _mm_round_pd(_mm_mul_pd(k0, prec),
57 _MM_FROUND_TO_NEAREST_INT | _MM_FROUND_NO_EXC);
58 k1 = _mm_round_pd(_mm_mul_pd(k1, prec),
59 _MM_FROUND_TO_NEAREST_INT | _MM_FROUND_NO_EXC);
60 k2 = _mm_round_pd(_mm_mul_pd(k2, prec),
61 _MM_FROUND_TO_NEAREST_INT | _MM_FROUND_NO_EXC);
62 k3 = _mm_round_pd(_mm_mul_pd(k3, prec),
63 _MM_FROUND_TO_NEAREST_INT | _MM_FROUND_NO_EXC);
64
65 const __m128i c0 = _mm_cvtpd_epi32(k0);
66 const __m128i c1 = _mm_cvtpd_epi32(k1);
67 const __m128i c2 = _mm_cvtpd_epi32(k2);
68 const __m128i c3 = _mm_cvtpd_epi32(k3);
69
70 // Rearrange results and convert down to 16 bits, giving the target output
71 // ordering
72 const __m128i c01 = _mm_unpacklo_epi32(c0, c1);
73 const __m128i c23 = _mm_unpacklo_epi32(c2, c3);
74 return _mm_packs_epi32(c01, c23);
75 }
76
77 // Compare two regions of width x height pixels, one rooted at position
78 // (x, y) in src and the other at (x + u, y + v) in ref.
79 // This function returns the sum of squared pixel differences between
80 // the two regions.
81 //
82 // TODO(rachelbarker): Test speed/quality impact of using bilinear interpolation
83 // instad of bicubic interpolation
compute_flow_vector(const uint8_t * src,const uint8_t * ref,int width,int height,int stride,int x,int y,double u,double v,const int16_t * dx,const int16_t * dy,int * b)84 static inline void compute_flow_vector(const uint8_t *src, const uint8_t *ref,
85 int width, int height, int stride, int x,
86 int y, double u, double v,
87 const int16_t *dx, const int16_t *dy,
88 int *b) {
89 const __m256i zero = _mm256_setzero_si256();
90
91 // Accumulate 8 32-bit partial sums for each element of b
92 // These will be flattened at the end.
93 __m256i b0_acc = _mm256_setzero_si256();
94 __m256i b1_acc = _mm256_setzero_si256();
95
96 // Split offset into integer and fractional parts, and compute cubic
97 // interpolation kernels
98 const int u_int = (int)floor(u);
99 const int v_int = (int)floor(v);
100 const double u_frac = u - floor(u);
101 const double v_frac = v - floor(v);
102
103 const __m128i kernels = compute_cubic_kernels(u_frac, v_frac);
104
105 // Storage for intermediate values between the two convolution directions
106 // In the AVX2 implementation, this needs a dummy row at the end, because
107 // we generate 2 rows at a time but the total number of rows is odd.
108 // So we generate one more row than we actually need.
109 DECLARE_ALIGNED(32, int16_t,
110 tmp_[DISFLOW_PATCH_SIZE * (DISFLOW_PATCH_SIZE + 4)]);
111 int16_t *tmp = tmp_ + DISFLOW_PATCH_SIZE; // Offset by one row
112
113 // Clamp coordinates so that all pixels we fetch will remain within the
114 // allocated border region, but allow them to go far enough out that
115 // the border pixels' values do not change.
116 // Since we are calculating an 8x8 block, the bottom-right pixel
117 // in the block has coordinates (x0 + 7, y0 + 7). Then, the cubic
118 // interpolation has 4 taps, meaning that the output of pixel
119 // (x_w, y_w) depends on the pixels in the range
120 // ([x_w - 1, x_w + 2], [y_w - 1, y_w + 2]).
121 //
122 // Thus the most extreme coordinates which will be fetched are
123 // (x0 - 1, y0 - 1) and (x0 + 9, y0 + 9).
124 const int x0 = clamp(x + u_int, -9, width);
125 const int y0 = clamp(y + v_int, -9, height);
126
127 // Horizontal convolution
128
129 // Prepare the kernel vectors
130 // We split the kernel into two vectors with kernel indices:
131 // 0, 1, 0, 1, 0, 1, 0, 1, and
132 // 2, 3, 2, 3, 2, 3, 2, 3
133 __m256i h_kernel_01 = _mm256_broadcastd_epi32(kernels);
134 __m256i h_kernel_23 = _mm256_broadcastd_epi32(_mm_srli_si128(kernels, 8));
135
136 __m256i round_const_h = _mm256_set1_epi32(1 << (DISFLOW_INTERP_BITS - 6 - 1));
137
138 for (int i = -1; i < DISFLOW_PATCH_SIZE + 2; i += 2) {
139 const int y_w = y0 + i;
140 const uint8_t *ref_row = &ref[y_w * stride + (x0 - 1)];
141 int16_t *tmp_row = &tmp[i * DISFLOW_PATCH_SIZE];
142
143 // Load this row of pixels.
144 // For an 8x8 patch, we need to load the 8 image pixels + 3 extras,
145 // for a total of 11 pixels. Here we load 16 pixels, but only use
146 // the first 11.
147 __m256i row =
148 yy_loadu2_128((__m128i *)(ref_row + stride), (__m128i *)ref_row);
149
150 // Expand pixels to int16s
151 // We must use unpacks here, as we have one row in each 128-bit lane
152 // and want to handle each of those independently.
153 // This is in contrast to _mm256_cvtepu8_epi16(), which takes a single
154 // 128-bit input and widens it to 256 bits.
155 __m256i px_0to7_i16 = _mm256_unpacklo_epi8(row, zero);
156 __m256i px_4to10_i16 =
157 _mm256_unpacklo_epi8(_mm256_srli_si256(row, 4), zero);
158
159 // Compute first four outputs
160 // input pixels 0, 1, 1, 2, 2, 3, 3, 4
161 // * kernel 0, 1, 0, 1, 0, 1, 0, 1
162 __m256i px0 =
163 _mm256_unpacklo_epi16(px_0to7_i16, _mm256_srli_si256(px_0to7_i16, 2));
164 // input pixels 2, 3, 3, 4, 4, 5, 5, 6
165 // * kernel 2, 3, 2, 3, 2, 3, 2, 3
166 __m256i px1 = _mm256_unpacklo_epi16(_mm256_srli_si256(px_0to7_i16, 4),
167 _mm256_srli_si256(px_0to7_i16, 6));
168 // Convolve with kernel and sum 2x2 boxes to form first 4 outputs
169 __m256i sum0 = _mm256_add_epi32(_mm256_madd_epi16(px0, h_kernel_01),
170 _mm256_madd_epi16(px1, h_kernel_23));
171
172 __m256i out0 = _mm256_srai_epi32(_mm256_add_epi32(sum0, round_const_h),
173 DISFLOW_INTERP_BITS - 6);
174
175 // Compute second four outputs
176 __m256i px2 =
177 _mm256_unpacklo_epi16(px_4to10_i16, _mm256_srli_si256(px_4to10_i16, 2));
178 __m256i px3 = _mm256_unpacklo_epi16(_mm256_srli_si256(px_4to10_i16, 4),
179 _mm256_srli_si256(px_4to10_i16, 6));
180 __m256i sum1 = _mm256_add_epi32(_mm256_madd_epi16(px2, h_kernel_01),
181 _mm256_madd_epi16(px3, h_kernel_23));
182
183 // Round by just enough bits that the result is
184 // guaranteed to fit into an i16. Then the next stage can use 16 x 16 -> 32
185 // bit multiplies, which should be a fair bit faster than 32 x 32 -> 32
186 // as it does now
187 // This means shifting down so we have 6 extra bits, for a maximum value
188 // of +18360, which can occur if u_frac == 0.5 and the input pixels are
189 // {0, 255, 255, 0}.
190 __m256i out1 = _mm256_srai_epi32(_mm256_add_epi32(sum1, round_const_h),
191 DISFLOW_INTERP_BITS - 6);
192
193 _mm256_storeu_si256((__m256i *)tmp_row, _mm256_packs_epi32(out0, out1));
194 }
195
196 // Vertical convolution
197 const int round_bits = DISFLOW_INTERP_BITS + 6 - DISFLOW_DERIV_SCALE_LOG2;
198 __m256i round_const_v = _mm256_set1_epi32(1 << (round_bits - 1));
199
200 __m256i v_kernel_01 = _mm256_broadcastd_epi32(_mm_srli_si128(kernels, 4));
201 __m256i v_kernel_23 = _mm256_broadcastd_epi32(_mm_srli_si128(kernels, 12));
202
203 for (int i = 0; i < DISFLOW_PATCH_SIZE; i += 2) {
204 int16_t *tmp_row = &tmp[i * DISFLOW_PATCH_SIZE];
205
206 // Load 5 rows of 8 x 16-bit values, and pack into 4 registers
207 // holding rows {0, 1}, {1, 2}, {2, 3}, {3, 4}
208 __m128i row0 = _mm_loadu_si128((__m128i *)(tmp_row - DISFLOW_PATCH_SIZE));
209 __m128i row1 = _mm_loadu_si128((__m128i *)tmp_row);
210 __m128i row2 = _mm_loadu_si128((__m128i *)(tmp_row + DISFLOW_PATCH_SIZE));
211 __m128i row3 =
212 _mm_loadu_si128((__m128i *)(tmp_row + 2 * DISFLOW_PATCH_SIZE));
213 __m128i row4 =
214 _mm_loadu_si128((__m128i *)(tmp_row + 3 * DISFLOW_PATCH_SIZE));
215
216 __m256i px0 = _mm256_set_m128i(row1, row0);
217 __m256i px1 = _mm256_set_m128i(row2, row1);
218 __m256i px2 = _mm256_set_m128i(row3, row2);
219 __m256i px3 = _mm256_set_m128i(row4, row3);
220
221 // We want to calculate px0 * v_kernel[0] + px1 * v_kernel[1] + ... ,
222 // but each multiply expands its output to 32 bits. So we need to be
223 // a little clever about how we do this
224 __m256i sum0 = _mm256_add_epi32(
225 _mm256_madd_epi16(_mm256_unpacklo_epi16(px0, px1), v_kernel_01),
226 _mm256_madd_epi16(_mm256_unpacklo_epi16(px2, px3), v_kernel_23));
227 __m256i sum1 = _mm256_add_epi32(
228 _mm256_madd_epi16(_mm256_unpackhi_epi16(px0, px1), v_kernel_01),
229 _mm256_madd_epi16(_mm256_unpackhi_epi16(px2, px3), v_kernel_23));
230
231 __m256i sum0_rounded =
232 _mm256_srai_epi32(_mm256_add_epi32(sum0, round_const_v), round_bits);
233 __m256i sum1_rounded =
234 _mm256_srai_epi32(_mm256_add_epi32(sum1, round_const_v), round_bits);
235
236 __m256i warped = _mm256_packs_epi32(sum0_rounded, sum1_rounded);
237 __m128i src_pixels_u8 = xx_loadu_2x64(&src[(y + i + 1) * stride + x],
238 &src[(y + i) * stride + x]);
239 __m256i src_pixels =
240 _mm256_slli_epi16(_mm256_cvtepu8_epi16(src_pixels_u8), 3);
241
242 // Calculate delta from the target patch
243 __m256i dt = _mm256_sub_epi16(warped, src_pixels);
244
245 // Load 2x8 elements each of dx and dt, to pair with the 2x8 elements of dt
246 // that we have just computed. Then compute 2x8 partial sums of dx * dt
247 // and dy * dt, implicitly sum to give 2x4 partial sums of each, and
248 // accumulate.
249 __m256i dx_row = _mm256_loadu_si256((__m256i *)&dx[i * DISFLOW_PATCH_SIZE]);
250 __m256i dy_row = _mm256_loadu_si256((__m256i *)&dy[i * DISFLOW_PATCH_SIZE]);
251 b0_acc = _mm256_add_epi32(b0_acc, _mm256_madd_epi16(dx_row, dt));
252 b1_acc = _mm256_add_epi32(b1_acc, _mm256_madd_epi16(dy_row, dt));
253 }
254
255 // Flatten the two sets of partial sums to find the final value of b
256 // We need to set b[0] = sum(b0_acc), b[1] = sum(b1_acc).
257 // We need to do 14 additions in total; a `hadd` instruction can take care
258 // of eight of them, then a vertical sum can do four more, leaving two
259 // scalar additions.
260 __m256i partial_sum_256 = _mm256_hadd_epi32(b0_acc, b1_acc);
261 __m128i partial_sum =
262 _mm_add_epi32(_mm256_extracti128_si256(partial_sum_256, 0),
263 _mm256_extracti128_si256(partial_sum_256, 1));
264 b[0] = _mm_extract_epi32(partial_sum, 0) + _mm_extract_epi32(partial_sum, 1);
265 b[1] = _mm_extract_epi32(partial_sum, 2) + _mm_extract_epi32(partial_sum, 3);
266 }
267
268 // Compute the x and y gradients of the source patch in a single pass,
269 // and store into dx and dy respectively.
sobel_filter(const uint8_t * src,int src_stride,int16_t * dx,int16_t * dy)270 static inline void sobel_filter(const uint8_t *src, int src_stride, int16_t *dx,
271 int16_t *dy) {
272 const __m256i zero = _mm256_setzero_si256();
273
274 // Loop setup: Load the first two rows (of 10 input rows) and apply
275 // the horizontal parts of the two filters
276 __m256i row_m1_0 =
277 yy_loadu2_128((__m128i *)(src - 1), (__m128i *)(src - src_stride - 1));
278 __m256i row_m1_0_a = _mm256_unpacklo_epi8(row_m1_0, zero);
279 __m256i row_m1_0_b =
280 _mm256_unpacklo_epi8(_mm256_srli_si256(row_m1_0, 1), zero);
281 __m256i row_m1_0_c =
282 _mm256_unpacklo_epi8(_mm256_srli_si256(row_m1_0, 2), zero);
283
284 __m256i row_m1_0_hsmooth =
285 _mm256_add_epi16(_mm256_add_epi16(row_m1_0_a, row_m1_0_c),
286 _mm256_slli_epi16(row_m1_0_b, 1));
287 __m256i row_m1_0_hdiff = _mm256_sub_epi16(row_m1_0_a, row_m1_0_c);
288
289 // Main loop: For each pair of output rows (i, i+1):
290 // * Load rows (i+1, i+2) and apply both horizontal filters
291 // * Apply vertical filters and store results
292 // * Shift rows for next iteration
293 for (int i = 0; i < DISFLOW_PATCH_SIZE; i += 2) {
294 // Load rows (i+1, i+2) and apply both horizontal filters
295 const __m256i row_p1_p2 =
296 yy_loadu2_128((__m128i *)(src + (i + 2) * src_stride - 1),
297 (__m128i *)(src + (i + 1) * src_stride - 1));
298 const __m256i row_p1_p2_a = _mm256_unpacklo_epi8(row_p1_p2, zero);
299 const __m256i row_p1_p2_b =
300 _mm256_unpacklo_epi8(_mm256_srli_si256(row_p1_p2, 1), zero);
301 const __m256i row_p1_p2_c =
302 _mm256_unpacklo_epi8(_mm256_srli_si256(row_p1_p2, 2), zero);
303
304 const __m256i row_p1_p2_hsmooth =
305 _mm256_add_epi16(_mm256_add_epi16(row_p1_p2_a, row_p1_p2_c),
306 _mm256_slli_epi16(row_p1_p2_b, 1));
307 const __m256i row_p1_p2_hdiff = _mm256_sub_epi16(row_p1_p2_a, row_p1_p2_c);
308
309 // Apply vertical filters and store results
310 // dx = vertical smooth(horizontal diff(input))
311 // dy = vertical diff(horizontal smooth(input))
312 const __m256i row_0_p1_hdiff =
313 _mm256_permute2x128_si256(row_m1_0_hdiff, row_p1_p2_hdiff, 0x21);
314 const __m256i dx_row =
315 _mm256_add_epi16(_mm256_add_epi16(row_m1_0_hdiff, row_p1_p2_hdiff),
316 _mm256_slli_epi16(row_0_p1_hdiff, 1));
317 const __m256i dy_row =
318 _mm256_sub_epi16(row_m1_0_hsmooth, row_p1_p2_hsmooth);
319
320 _mm256_storeu_si256((__m256i *)(dx + i * DISFLOW_PATCH_SIZE), dx_row);
321 _mm256_storeu_si256((__m256i *)(dy + i * DISFLOW_PATCH_SIZE), dy_row);
322
323 // Shift rows for next iteration
324 // This allows a lot of work to be reused, reducing the number of
325 // horizontal filtering operations from 2*3*8 = 48 to 2*10 = 20
326 row_m1_0_hsmooth = row_p1_p2_hsmooth;
327 row_m1_0_hdiff = row_p1_p2_hdiff;
328 }
329 }
330
compute_flow_matrix(const int16_t * dx,int dx_stride,const int16_t * dy,int dy_stride,double * M)331 static inline void compute_flow_matrix(const int16_t *dx, int dx_stride,
332 const int16_t *dy, int dy_stride,
333 double *M) {
334 __m256i acc[4] = { 0 };
335
336 for (int i = 0; i < DISFLOW_PATCH_SIZE; i += 2) {
337 __m256i dx_row = _mm256_loadu_si256((__m256i *)&dx[i * dx_stride]);
338 __m256i dy_row = _mm256_loadu_si256((__m256i *)&dy[i * dy_stride]);
339
340 acc[0] = _mm256_add_epi32(acc[0], _mm256_madd_epi16(dx_row, dx_row));
341 acc[1] = _mm256_add_epi32(acc[1], _mm256_madd_epi16(dx_row, dy_row));
342 // Don't compute acc[2], as it should be equal to acc[1]
343 acc[3] = _mm256_add_epi32(acc[3], _mm256_madd_epi16(dy_row, dy_row));
344 }
345
346 // Condense sums
347 __m256i partial_sum_0 = _mm256_hadd_epi32(acc[0], acc[1]);
348 __m256i partial_sum_1 = _mm256_hadd_epi32(acc[1], acc[3]);
349 __m256i result_256 = _mm256_hadd_epi32(partial_sum_0, partial_sum_1);
350 __m128i result = _mm_add_epi32(_mm256_extracti128_si256(result_256, 0),
351 _mm256_extracti128_si256(result_256, 1));
352
353 // Apply regularization
354 // We follow the standard regularization method of adding `k * I` before
355 // inverting. This ensures that the matrix will be invertible.
356 //
357 // Setting the regularization strength k to 1 seems to work well here, as
358 // typical values coming from the other equations are very large (1e5 to
359 // 1e6, with an upper limit of around 6e7, at the time of writing).
360 // It also preserves the property that all matrix values are whole numbers,
361 // which is convenient for integerized SIMD implementation.
362 result = _mm_add_epi32(result, _mm_set_epi32(1, 0, 0, 1));
363
364 // Convert results to doubles and store
365 _mm256_storeu_pd(M, _mm256_cvtepi32_pd(result));
366 }
367
368 // Try to invert the matrix M
369 // Note: Due to the nature of how a least-squares matrix is constructed, all of
370 // the eigenvalues will be >= 0, and therefore det M >= 0 as well.
371 // The regularization term `+ k * I` further ensures that det M >= k^2.
372 // As mentioned in compute_flow_matrix(), here we use k = 1, so det M >= 1.
373 // So we don't have to worry about non-invertible matrices here.
invert_2x2(const double * M,double * M_inv)374 static inline void invert_2x2(const double *M, double *M_inv) {
375 double det = (M[0] * M[3]) - (M[1] * M[2]);
376 assert(det >= 1);
377 const double det_inv = 1 / det;
378
379 M_inv[0] = M[3] * det_inv;
380 M_inv[1] = -M[1] * det_inv;
381 M_inv[2] = -M[2] * det_inv;
382 M_inv[3] = M[0] * det_inv;
383 }
384
aom_compute_flow_at_point_avx2(const uint8_t * src,const uint8_t * ref,int x,int y,int width,int height,int stride,double * u,double * v)385 void aom_compute_flow_at_point_avx2(const uint8_t *src, const uint8_t *ref,
386 int x, int y, int width, int height,
387 int stride, double *u, double *v) {
388 DECLARE_ALIGNED(32, double, M[4]);
389 DECLARE_ALIGNED(32, double, M_inv[4]);
390 DECLARE_ALIGNED(32, int16_t, dx[DISFLOW_PATCH_SIZE * DISFLOW_PATCH_SIZE]);
391 DECLARE_ALIGNED(32, int16_t, dy[DISFLOW_PATCH_SIZE * DISFLOW_PATCH_SIZE]);
392 int b[2];
393
394 // Compute gradients within this patch
395 const uint8_t *src_patch = &src[y * stride + x];
396 sobel_filter(src_patch, stride, dx, dy);
397
398 compute_flow_matrix(dx, DISFLOW_PATCH_SIZE, dy, DISFLOW_PATCH_SIZE, M);
399 invert_2x2(M, M_inv);
400
401 for (int itr = 0; itr < DISFLOW_MAX_ITR; itr++) {
402 compute_flow_vector(src, ref, width, height, stride, x, y, *u, *v, dx, dy,
403 b);
404
405 // Solve flow equations to find a better estimate for the flow vector
406 // at this point
407 const double step_u = M_inv[0] * b[0] + M_inv[1] * b[1];
408 const double step_v = M_inv[2] * b[0] + M_inv[3] * b[1];
409 *u += fclamp(step_u * DISFLOW_STEP_SIZE, -2, 2);
410 *v += fclamp(step_v * DISFLOW_STEP_SIZE, -2, 2);
411
412 if (fabs(step_u) + fabs(step_v) < DISFLOW_STEP_SIZE_THRESOLD) {
413 // Stop iteration when we're close to convergence
414 break;
415 }
416 }
417 }
418