1 /*
2 * Copyright (c) 2020, Alliance for Open Media. All rights reserved.
3 *
4 * This source code is subject to the terms of the BSD 2 Clause License and
5 * the Alliance for Open Media Patent License 1.0. If the BSD 2 Clause License
6 * was not distributed with this source code in the LICENSE file, you can
7 * obtain it at www.aomedia.org/license/software. If the Alliance for Open
8 * Media Patent License 1.0 was not distributed with this source code in the
9 * PATENTS file, you can obtain it at www.aomedia.org/license/patent.
10 */
11
12 #include <arm_neon.h>
13 #include <assert.h>
14
15 #include "aom/aom_integer.h"
16 #include "aom_dsp/arm/mem_neon.h"
17 #include "aom_ports/mem.h"
18 #include "config/aom_config.h"
19 #include "config/av1_rtcd.h"
20
21 #include "av1/common/reconinter.h"
22 #include "av1/encoder/context_tree.h"
23 #include "av1/encoder/av1_temporal_denoiser.h"
24
25 // Compute the sum of all pixel differences of this MB.
horizontal_add_s8x16(const int8x16_t v_sum_diff_total)26 static inline int horizontal_add_s8x16(const int8x16_t v_sum_diff_total) {
27 #if AOM_ARCH_AARCH64
28 return vaddlvq_s8(v_sum_diff_total);
29 #else
30 const int16x8_t fe_dc_ba_98_76_54_32_10 = vpaddlq_s8(v_sum_diff_total);
31 const int32x4_t fedc_ba98_7654_3210 = vpaddlq_s16(fe_dc_ba_98_76_54_32_10);
32 const int64x2_t fedcba98_76543210 = vpaddlq_s32(fedc_ba98_7654_3210);
33 const int64x1_t x = vqadd_s64(vget_high_s64(fedcba98_76543210),
34 vget_low_s64(fedcba98_76543210));
35 const int sum_diff = vget_lane_s32(vreinterpret_s32_s64(x), 0);
36 return sum_diff;
37 #endif
38 }
39
40 // Denoise a 16x1 vector.
denoiser_16x1_neon(const uint8_t * sig,const uint8_t * mc_running_avg_y,uint8_t * running_avg_y,const uint8x16_t v_level1_threshold,const uint8x16_t v_level2_threshold,const uint8x16_t v_level3_threshold,const uint8x16_t v_level1_adjustment,const uint8x16_t v_delta_level_1_and_2,const uint8x16_t v_delta_level_2_and_3,int8x16_t v_sum_diff_total)41 static inline int8x16_t denoiser_16x1_neon(
42 const uint8_t *sig, const uint8_t *mc_running_avg_y, uint8_t *running_avg_y,
43 const uint8x16_t v_level1_threshold, const uint8x16_t v_level2_threshold,
44 const uint8x16_t v_level3_threshold, const uint8x16_t v_level1_adjustment,
45 const uint8x16_t v_delta_level_1_and_2,
46 const uint8x16_t v_delta_level_2_and_3, int8x16_t v_sum_diff_total) {
47 const uint8x16_t v_sig = vld1q_u8(sig);
48 const uint8x16_t v_mc_running_avg_y = vld1q_u8(mc_running_avg_y);
49
50 /* Calculate absolute difference and sign masks. */
51 const uint8x16_t v_abs_diff = vabdq_u8(v_sig, v_mc_running_avg_y);
52 const uint8x16_t v_diff_pos_mask = vcltq_u8(v_sig, v_mc_running_avg_y);
53 const uint8x16_t v_diff_neg_mask = vcgtq_u8(v_sig, v_mc_running_avg_y);
54
55 /* Figure out which level that put us in. */
56 const uint8x16_t v_level1_mask = vcleq_u8(v_level1_threshold, v_abs_diff);
57 const uint8x16_t v_level2_mask = vcleq_u8(v_level2_threshold, v_abs_diff);
58 const uint8x16_t v_level3_mask = vcleq_u8(v_level3_threshold, v_abs_diff);
59
60 /* Calculate absolute adjustments for level 1, 2 and 3. */
61 const uint8x16_t v_level2_adjustment =
62 vandq_u8(v_level2_mask, v_delta_level_1_and_2);
63 const uint8x16_t v_level3_adjustment =
64 vandq_u8(v_level3_mask, v_delta_level_2_and_3);
65 const uint8x16_t v_level1and2_adjustment =
66 vaddq_u8(v_level1_adjustment, v_level2_adjustment);
67 const uint8x16_t v_level1and2and3_adjustment =
68 vaddq_u8(v_level1and2_adjustment, v_level3_adjustment);
69
70 /* Figure adjustment absolute value by selecting between the absolute
71 * difference if in level0 or the value for level 1, 2 and 3.
72 */
73 const uint8x16_t v_abs_adjustment =
74 vbslq_u8(v_level1_mask, v_level1and2and3_adjustment, v_abs_diff);
75
76 /* Calculate positive and negative adjustments. Apply them to the signal
77 * and accumulate them. Adjustments are less than eight and the maximum
78 * sum of them (7 * 16) can fit in a signed char.
79 */
80 const uint8x16_t v_pos_adjustment =
81 vandq_u8(v_diff_pos_mask, v_abs_adjustment);
82 const uint8x16_t v_neg_adjustment =
83 vandq_u8(v_diff_neg_mask, v_abs_adjustment);
84
85 uint8x16_t v_running_avg_y = vqaddq_u8(v_sig, v_pos_adjustment);
86 v_running_avg_y = vqsubq_u8(v_running_avg_y, v_neg_adjustment);
87
88 /* Store results. */
89 vst1q_u8(running_avg_y, v_running_avg_y);
90
91 /* Sum all the accumulators to have the sum of all pixel differences
92 * for this macroblock.
93 */
94 {
95 const int8x16_t v_sum_diff =
96 vqsubq_s8(vreinterpretq_s8_u8(v_pos_adjustment),
97 vreinterpretq_s8_u8(v_neg_adjustment));
98 v_sum_diff_total = vaddq_s8(v_sum_diff_total, v_sum_diff);
99 }
100 return v_sum_diff_total;
101 }
102
denoiser_adjust_16x1_neon(const uint8_t * sig,const uint8_t * mc_running_avg_y,uint8_t * running_avg_y,const uint8x16_t k_delta,int8x16_t v_sum_diff_total)103 static inline int8x16_t denoiser_adjust_16x1_neon(
104 const uint8_t *sig, const uint8_t *mc_running_avg_y, uint8_t *running_avg_y,
105 const uint8x16_t k_delta, int8x16_t v_sum_diff_total) {
106 uint8x16_t v_running_avg_y = vld1q_u8(running_avg_y);
107 const uint8x16_t v_sig = vld1q_u8(sig);
108 const uint8x16_t v_mc_running_avg_y = vld1q_u8(mc_running_avg_y);
109
110 /* Calculate absolute difference and sign masks. */
111 const uint8x16_t v_abs_diff = vabdq_u8(v_sig, v_mc_running_avg_y);
112 const uint8x16_t v_diff_pos_mask = vcltq_u8(v_sig, v_mc_running_avg_y);
113 const uint8x16_t v_diff_neg_mask = vcgtq_u8(v_sig, v_mc_running_avg_y);
114 // Clamp absolute difference to delta to get the adjustment.
115 const uint8x16_t v_abs_adjustment = vminq_u8(v_abs_diff, (k_delta));
116
117 const uint8x16_t v_pos_adjustment =
118 vandq_u8(v_diff_pos_mask, v_abs_adjustment);
119 const uint8x16_t v_neg_adjustment =
120 vandq_u8(v_diff_neg_mask, v_abs_adjustment);
121
122 v_running_avg_y = vqsubq_u8(v_running_avg_y, v_pos_adjustment);
123 v_running_avg_y = vqaddq_u8(v_running_avg_y, v_neg_adjustment);
124
125 /* Store results. */
126 vst1q_u8(running_avg_y, v_running_avg_y);
127
128 {
129 const int8x16_t v_sum_diff =
130 vqsubq_s8(vreinterpretq_s8_u8(v_neg_adjustment),
131 vreinterpretq_s8_u8(v_pos_adjustment));
132 v_sum_diff_total = vaddq_s8(v_sum_diff_total, v_sum_diff);
133 }
134 return v_sum_diff_total;
135 }
136
137 // Denoise 8x8 and 8x16 blocks.
av1_denoiser_8xN_neon(const uint8_t * sig,int sig_stride,const uint8_t * mc_running_avg_y,int mc_avg_y_stride,uint8_t * running_avg_y,int avg_y_stride,int increase_denoising,BLOCK_SIZE bs,int motion_magnitude,int width)138 static int av1_denoiser_8xN_neon(const uint8_t *sig, int sig_stride,
139 const uint8_t *mc_running_avg_y,
140 int mc_avg_y_stride, uint8_t *running_avg_y,
141 int avg_y_stride, int increase_denoising,
142 BLOCK_SIZE bs, int motion_magnitude,
143 int width) {
144 int sum_diff_thresh, r, sum_diff = 0;
145 const int shift_inc =
146 (increase_denoising && motion_magnitude <= MOTION_MAGNITUDE_THRESHOLD)
147 ? 1
148 : 0;
149 uint8_t sig_buffer[8][16], mc_running_buffer[8][16], running_buffer[8][16];
150
151 const uint8x16_t v_level1_adjustment = vmovq_n_u8(
152 (motion_magnitude <= MOTION_MAGNITUDE_THRESHOLD) ? 4 + shift_inc : 3);
153 const uint8x16_t v_delta_level_1_and_2 = vdupq_n_u8(1);
154 const uint8x16_t v_delta_level_2_and_3 = vdupq_n_u8(2);
155 const uint8x16_t v_level1_threshold = vdupq_n_u8(4 + shift_inc);
156 const uint8x16_t v_level2_threshold = vdupq_n_u8(8);
157 const uint8x16_t v_level3_threshold = vdupq_n_u8(16);
158
159 const int b_height = block_size_high[bs] >> 1;
160
161 int8x16_t v_sum_diff_total = vdupq_n_s8(0);
162
163 for (r = 0; r < b_height; ++r) {
164 memcpy(sig_buffer[r], sig, width);
165 memcpy(sig_buffer[r] + width, sig + sig_stride, width);
166 memcpy(mc_running_buffer[r], mc_running_avg_y, width);
167 memcpy(mc_running_buffer[r] + width, mc_running_avg_y + mc_avg_y_stride,
168 width);
169 memcpy(running_buffer[r], running_avg_y, width);
170 memcpy(running_buffer[r] + width, running_avg_y + avg_y_stride, width);
171 v_sum_diff_total = denoiser_16x1_neon(
172 sig_buffer[r], mc_running_buffer[r], running_buffer[r],
173 v_level1_threshold, v_level2_threshold, v_level3_threshold,
174 v_level1_adjustment, v_delta_level_1_and_2, v_delta_level_2_and_3,
175 v_sum_diff_total);
176 {
177 const uint8x16_t v_running_buffer = vld1q_u8(running_buffer[r]);
178 const uint8x8_t v_running_buffer_high = vget_high_u8(v_running_buffer);
179 const uint8x8_t v_running_buffer_low = vget_low_u8(v_running_buffer);
180 vst1_u8(running_avg_y, v_running_buffer_low);
181 vst1_u8(running_avg_y + avg_y_stride, v_running_buffer_high);
182 }
183 // Update pointers for next iteration.
184 sig += (sig_stride << 1);
185 mc_running_avg_y += (mc_avg_y_stride << 1);
186 running_avg_y += (avg_y_stride << 1);
187 }
188
189 {
190 sum_diff = horizontal_add_s8x16(v_sum_diff_total);
191 sum_diff_thresh = total_adj_strong_thresh(bs, increase_denoising);
192 if (abs(sum_diff) > sum_diff_thresh) {
193 // Before returning to copy the block (i.e., apply no denoising),
194 // check if we can still apply some (weaker) temporal filtering to
195 // this block, that would otherwise not be denoised at all. Simplest
196 // is to apply an additional adjustment to running_avg_y to bring it
197 // closer to sig. The adjustment is capped by a maximum delta, and
198 // chosen such that in most cases the resulting sum_diff will be
199 // within the acceptable range given by sum_diff_thresh.
200
201 // The delta is set by the excess of absolute pixel diff over the
202 // threshold.
203 const int delta =
204 ((abs(sum_diff) - sum_diff_thresh) >> num_pels_log2_lookup[bs]) + 1;
205 // Only apply the adjustment for max delta up to 3.
206 if (delta < 4) {
207 const uint8x16_t k_delta = vmovq_n_u8(delta);
208 running_avg_y -= avg_y_stride * (b_height << 1);
209 for (r = 0; r < b_height; ++r) {
210 v_sum_diff_total = denoiser_adjust_16x1_neon(
211 sig_buffer[r], mc_running_buffer[r], running_buffer[r], k_delta,
212 v_sum_diff_total);
213 {
214 const uint8x16_t v_running_buffer = vld1q_u8(running_buffer[r]);
215 const uint8x8_t v_running_buffer_high =
216 vget_high_u8(v_running_buffer);
217 const uint8x8_t v_running_buffer_low =
218 vget_low_u8(v_running_buffer);
219 vst1_u8(running_avg_y, v_running_buffer_low);
220 vst1_u8(running_avg_y + avg_y_stride, v_running_buffer_high);
221 }
222 // Update pointers for next iteration.
223 running_avg_y += (avg_y_stride << 1);
224 }
225 sum_diff = horizontal_add_s8x16(v_sum_diff_total);
226 if (abs(sum_diff) > sum_diff_thresh) {
227 return COPY_BLOCK;
228 }
229 } else {
230 return COPY_BLOCK;
231 }
232 }
233 }
234
235 return FILTER_BLOCK;
236 }
237
238 // Denoise 16x16, to 128x128 blocks.
av1_denoiser_NxM_neon(const uint8_t * sig,int sig_stride,const uint8_t * mc_running_avg_y,int mc_avg_y_stride,uint8_t * running_avg_y,int avg_y_stride,int increase_denoising,BLOCK_SIZE bs,int motion_magnitude)239 static int av1_denoiser_NxM_neon(const uint8_t *sig, int sig_stride,
240 const uint8_t *mc_running_avg_y,
241 int mc_avg_y_stride, uint8_t *running_avg_y,
242 int avg_y_stride, int increase_denoising,
243 BLOCK_SIZE bs, int motion_magnitude) {
244 const int shift_inc =
245 (increase_denoising && motion_magnitude <= MOTION_MAGNITUDE_THRESHOLD)
246 ? 1
247 : 0;
248 const uint8x16_t v_level1_adjustment = vmovq_n_u8(
249 (motion_magnitude <= MOTION_MAGNITUDE_THRESHOLD) ? 4 + shift_inc : 3);
250 const uint8x16_t v_delta_level_1_and_2 = vdupq_n_u8(1);
251 const uint8x16_t v_delta_level_2_and_3 = vdupq_n_u8(2);
252 const uint8x16_t v_level1_threshold = vmovq_n_u8(4 + shift_inc);
253 const uint8x16_t v_level2_threshold = vdupq_n_u8(8);
254 const uint8x16_t v_level3_threshold = vdupq_n_u8(16);
255
256 const int b_width = block_size_wide[bs];
257 const int b_height = block_size_high[bs];
258 const int b_width_shift4 = b_width >> 4;
259
260 int8x16_t v_sum_diff_total[8][8];
261 int r, c, sum_diff = 0;
262
263 for (r = 0; r < 8; ++r) {
264 for (c = 0; c < b_width_shift4; ++c) {
265 v_sum_diff_total[c][r] = vdupq_n_s8(0);
266 }
267 }
268
269 for (r = 0; r < b_height; ++r) {
270 for (c = 0; c < b_width_shift4; ++c) {
271 v_sum_diff_total[c][r >> 4] = denoiser_16x1_neon(
272 sig, mc_running_avg_y, running_avg_y, v_level1_threshold,
273 v_level2_threshold, v_level3_threshold, v_level1_adjustment,
274 v_delta_level_1_and_2, v_delta_level_2_and_3,
275 v_sum_diff_total[c][r >> 4]);
276
277 // Update pointers for next iteration.
278 sig += 16;
279 mc_running_avg_y += 16;
280 running_avg_y += 16;
281 }
282
283 if ((r & 0xf) == 0xf || (bs == BLOCK_16X8 && r == 7)) {
284 for (c = 0; c < b_width_shift4; ++c) {
285 sum_diff += horizontal_add_s8x16(v_sum_diff_total[c][r >> 4]);
286 }
287 }
288
289 // Update pointers for next iteration.
290 sig = sig - b_width + sig_stride;
291 mc_running_avg_y = mc_running_avg_y - b_width + mc_avg_y_stride;
292 running_avg_y = running_avg_y - b_width + avg_y_stride;
293 }
294
295 {
296 const int sum_diff_thresh = total_adj_strong_thresh(bs, increase_denoising);
297 if (abs(sum_diff) > sum_diff_thresh) {
298 const int delta =
299 ((abs(sum_diff) - sum_diff_thresh) >> num_pels_log2_lookup[bs]) + 1;
300 // Only apply the adjustment for max delta up to 3.
301 if (delta < 4) {
302 const uint8x16_t k_delta = vdupq_n_u8(delta);
303 sig -= sig_stride * b_height;
304 mc_running_avg_y -= mc_avg_y_stride * b_height;
305 running_avg_y -= avg_y_stride * b_height;
306 sum_diff = 0;
307
308 for (r = 0; r < b_height; ++r) {
309 for (c = 0; c < b_width_shift4; ++c) {
310 v_sum_diff_total[c][r >> 4] =
311 denoiser_adjust_16x1_neon(sig, mc_running_avg_y, running_avg_y,
312 k_delta, v_sum_diff_total[c][r >> 4]);
313
314 // Update pointers for next iteration.
315 sig += 16;
316 mc_running_avg_y += 16;
317 running_avg_y += 16;
318 }
319 if ((r & 0xf) == 0xf || (bs == BLOCK_16X8 && r == 7)) {
320 for (c = 0; c < b_width_shift4; ++c) {
321 sum_diff += horizontal_add_s8x16(v_sum_diff_total[c][r >> 4]);
322 }
323 }
324
325 sig = sig - b_width + sig_stride;
326 mc_running_avg_y = mc_running_avg_y - b_width + mc_avg_y_stride;
327 running_avg_y = running_avg_y - b_width + avg_y_stride;
328 }
329
330 if (abs(sum_diff) > sum_diff_thresh) {
331 return COPY_BLOCK;
332 }
333 } else {
334 return COPY_BLOCK;
335 }
336 }
337 }
338 return FILTER_BLOCK;
339 }
340
av1_denoiser_filter_neon(const uint8_t * sig,int sig_stride,const uint8_t * mc_avg,int mc_avg_stride,uint8_t * avg,int avg_stride,int increase_denoising,BLOCK_SIZE bs,int motion_magnitude)341 int av1_denoiser_filter_neon(const uint8_t *sig, int sig_stride,
342 const uint8_t *mc_avg, int mc_avg_stride,
343 uint8_t *avg, int avg_stride,
344 int increase_denoising, BLOCK_SIZE bs,
345 int motion_magnitude) {
346 // Rank by frequency of the block type to have an early termination.
347 if (bs == BLOCK_16X16 || bs == BLOCK_32X32 || bs == BLOCK_64X64 ||
348 bs == BLOCK_128X128 || bs == BLOCK_128X64 || bs == BLOCK_64X128 ||
349 bs == BLOCK_16X32 || bs == BLOCK_16X8 || bs == BLOCK_32X16 ||
350 bs == BLOCK_32X64 || bs == BLOCK_64X32) {
351 return av1_denoiser_NxM_neon(sig, sig_stride, mc_avg, mc_avg_stride, avg,
352 avg_stride, increase_denoising, bs,
353 motion_magnitude);
354 } else if (bs == BLOCK_8X8 || bs == BLOCK_8X16) {
355 return av1_denoiser_8xN_neon(sig, sig_stride, mc_avg, mc_avg_stride, avg,
356 avg_stride, increase_denoising, bs,
357 motion_magnitude, 8);
358 }
359 return COPY_BLOCK;
360 }
361