1 /*
2 * Copyright (c) 2010 The WebM project authors. All Rights Reserved.
3 *
4 * Use of this source code is governed by a BSD-style license
5 * that can be found in the LICENSE file in the root of the source
6 * tree. An additional intellectual property rights grant can be found
7 * in the file PATENTS. All contributing project authors may
8 * be found in the AUTHORS file in the root of the source tree.
9 */
10
11 #include "vpx_config.h"
12 #include "./vpx_scale_rtcd.h"
13 #include "./vpx_dsp_rtcd.h"
14 #include "./vp8_rtcd.h"
15 #include "bitstream.h"
16 #include "vp8/common/onyxc_int.h"
17 #include "vp8/common/blockd.h"
18 #include "onyx_int.h"
19 #include "vp8/common/systemdependent.h"
20 #include "vp8/common/vp8_skin_detection.h"
21 #include "vp8/encoder/quantize.h"
22 #include "vp8/common/alloccommon.h"
23 #include "mcomp.h"
24 #include "firstpass.h"
25 #include "vpx_dsp/psnr.h"
26 #include "vpx_scale/vpx_scale.h"
27 #include "vp8/common/extend.h"
28 #include "ratectrl.h"
29 #include "vp8/common/quant_common.h"
30 #include "segmentation.h"
31 #if CONFIG_POSTPROC
32 #include "vp8/common/postproc.h"
33 #endif
34 #include "vpx_mem/vpx_mem.h"
35 #include "vp8/common/reconintra.h"
36 #include "vp8/common/swapyv12buffer.h"
37 #include "vp8/common/threading.h"
38 #include "vpx_ports/system_state.h"
39 #include "vpx_ports/vpx_once.h"
40 #include "vpx_ports/vpx_timer.h"
41 #include "vpx_util/vpx_write_yuv_frame.h"
42 #if VPX_ARCH_ARM
43 #include "vpx_ports/arm.h"
44 #endif
45 #if CONFIG_MULTI_RES_ENCODING
46 #include "mr_dissim.h"
47 #endif
48 #include "encodeframe.h"
49 #if CONFIG_MULTITHREAD
50 #include "ethreading.h"
51 #endif
52 #include "picklpf.h"
53 #if !CONFIG_REALTIME_ONLY
54 #include "temporal_filter.h"
55 #endif
56
57 #include <assert.h>
58 #include <math.h>
59 #include <stdio.h>
60 #include <limits.h>
61
62 #if CONFIG_REALTIME_ONLY & CONFIG_ONTHEFLY_BITPACKING
63 extern int vp8_update_coef_context(VP8_COMP *cpi);
64 #endif
65
66 extern unsigned int vp8_get_processor_freq(void);
67
68 int vp8_calc_ss_err(YV12_BUFFER_CONFIG *source, YV12_BUFFER_CONFIG *dest);
69
70 static void set_default_lf_deltas(VP8_COMP *cpi);
71
72 extern const int vp8_gf_interval_table[101];
73
74 #if CONFIG_INTERNAL_STATS
75 #include "math.h"
76 #include "vpx_dsp/ssim.h"
77 #endif
78
79 #ifdef OUTPUT_YUV_SRC
80 FILE *yuv_file;
81 #endif
82 #ifdef OUTPUT_YUV_DENOISED
83 FILE *yuv_denoised_file;
84 #endif
85 #ifdef OUTPUT_YUV_SKINMAP
86 static FILE *yuv_skinmap_file = NULL;
87 #endif
88
89 #if 0
90 FILE *framepsnr;
91 FILE *kf_list;
92 FILE *keyfile;
93 #endif
94
95 #if 0
96 extern int skip_true_count;
97 extern int skip_false_count;
98 #endif
99
100 #ifdef SPEEDSTATS
101 unsigned int frames_at_speed[16] = { 0, 0, 0, 0, 0, 0, 0, 0,
102 0, 0, 0, 0, 0, 0, 0, 0 };
103 unsigned int tot_pm = 0;
104 unsigned int cnt_pm = 0;
105 unsigned int tot_ef = 0;
106 unsigned int cnt_ef = 0;
107 #endif
108
109 #ifdef MODE_STATS
110 extern unsigned __int64 Sectionbits[50];
111 extern int y_modes[5];
112 extern int uv_modes[4];
113 extern int b_modes[10];
114
115 extern int inter_y_modes[10];
116 extern int inter_uv_modes[4];
117 extern unsigned int inter_b_modes[15];
118 #endif
119
120 extern const int vp8_bits_per_mb[2][QINDEX_RANGE];
121
122 extern const int qrounding_factors[129];
123 extern const int qzbin_factors[129];
124 extern void vp8cx_init_quantizer(VP8_COMP *cpi);
125 extern const int vp8cx_base_skip_false_prob[128];
126
127 /* Tables relating active max Q to active min Q */
128 static const unsigned char kf_low_motion_minq[QINDEX_RANGE] = {
129 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
130 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
131 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1,
132 1, 1, 1, 2, 2, 2, 2, 3, 3, 3, 3, 3, 3, 4, 4, 4, 5, 5, 5,
133 5, 5, 6, 6, 6, 6, 7, 7, 8, 8, 8, 8, 9, 9, 10, 10, 10, 10, 11,
134 11, 11, 11, 12, 12, 13, 13, 13, 13, 14, 14, 15, 15, 15, 15, 16, 16, 16, 16,
135 17, 17, 18, 18, 18, 18, 19, 20, 20, 21, 21, 22, 23, 23
136 };
137 static const unsigned char kf_high_motion_minq[QINDEX_RANGE] = {
138 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
139 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1,
140 1, 1, 2, 2, 2, 2, 3, 3, 3, 3, 3, 3, 3, 3, 4, 4, 4, 4, 5,
141 5, 5, 5, 5, 5, 6, 6, 6, 6, 7, 7, 8, 8, 8, 8, 9, 9, 10, 10,
142 10, 10, 11, 11, 11, 11, 12, 12, 13, 13, 13, 13, 14, 14, 15, 15, 15, 15, 16,
143 16, 16, 16, 17, 17, 18, 18, 18, 18, 19, 19, 20, 20, 20, 20, 21, 21, 21, 21,
144 22, 22, 23, 23, 24, 25, 25, 26, 26, 27, 28, 28, 29, 30
145 };
146 static const unsigned char gf_low_motion_minq[QINDEX_RANGE] = {
147 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 2, 2, 2, 2, 3, 3, 3,
148 3, 4, 4, 4, 4, 5, 5, 5, 5, 6, 6, 6, 6, 7, 7, 7, 7, 8, 8,
149 8, 8, 9, 9, 9, 9, 10, 10, 10, 10, 11, 11, 12, 12, 13, 13, 14, 14, 15,
150 15, 16, 16, 17, 17, 18, 18, 19, 19, 20, 20, 21, 21, 22, 22, 23, 23, 24, 24,
151 25, 25, 26, 26, 27, 27, 28, 28, 29, 29, 30, 30, 31, 31, 32, 32, 33, 33, 34,
152 34, 35, 35, 36, 36, 37, 37, 38, 38, 39, 39, 40, 40, 41, 41, 42, 42, 43, 44,
153 45, 46, 47, 48, 49, 50, 51, 52, 53, 54, 55, 56, 57, 58
154 };
155 static const unsigned char gf_mid_motion_minq[QINDEX_RANGE] = {
156 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 2, 2, 3, 3, 3, 4, 4, 4, 5,
157 5, 5, 6, 6, 6, 7, 7, 7, 8, 8, 8, 9, 9, 9, 10, 10, 10, 10, 11,
158 11, 11, 12, 12, 12, 12, 13, 13, 13, 14, 14, 14, 15, 15, 16, 16, 17, 17, 18,
159 18, 19, 19, 20, 20, 21, 21, 22, 22, 23, 23, 24, 24, 25, 25, 26, 26, 27, 27,
160 28, 28, 29, 29, 30, 30, 31, 31, 32, 32, 33, 33, 34, 34, 35, 35, 36, 36, 37,
161 37, 38, 39, 39, 40, 40, 41, 41, 42, 42, 43, 43, 44, 45, 46, 47, 48, 49, 50,
162 51, 52, 53, 54, 55, 56, 57, 58, 59, 60, 61, 62, 63, 64
163 };
164 static const unsigned char gf_high_motion_minq[QINDEX_RANGE] = {
165 0, 0, 0, 0, 1, 1, 1, 1, 1, 2, 2, 2, 3, 3, 3, 4, 4, 4, 5,
166 5, 5, 6, 6, 6, 7, 7, 7, 8, 8, 8, 9, 9, 9, 10, 10, 10, 11, 11,
167 12, 12, 13, 13, 14, 14, 15, 15, 16, 16, 17, 17, 18, 18, 19, 19, 20, 20, 21,
168 21, 22, 22, 23, 23, 24, 24, 25, 25, 26, 26, 27, 27, 28, 28, 29, 29, 30, 30,
169 31, 31, 32, 32, 33, 33, 34, 34, 35, 35, 36, 36, 37, 37, 38, 38, 39, 39, 40,
170 40, 41, 41, 42, 42, 43, 44, 45, 46, 47, 48, 49, 50, 51, 52, 53, 54, 55, 56,
171 57, 58, 59, 60, 62, 64, 66, 68, 70, 72, 74, 76, 78, 80
172 };
173 static const unsigned char inter_minq[QINDEX_RANGE] = {
174 0, 0, 1, 1, 2, 3, 3, 4, 4, 5, 6, 6, 7, 8, 8, 9, 9, 10, 11,
175 11, 12, 13, 13, 14, 15, 15, 16, 17, 17, 18, 19, 20, 20, 21, 22, 22, 23, 24,
176 24, 25, 26, 27, 27, 28, 29, 30, 30, 31, 32, 33, 33, 34, 35, 36, 36, 37, 38,
177 39, 39, 40, 41, 42, 42, 43, 44, 45, 46, 46, 47, 48, 49, 50, 50, 51, 52, 53,
178 54, 55, 55, 56, 57, 58, 59, 60, 60, 61, 62, 63, 64, 65, 66, 67, 67, 68, 69,
179 70, 71, 72, 73, 74, 75, 75, 76, 77, 78, 79, 80, 81, 82, 83, 84, 85, 86, 86,
180 87, 88, 89, 90, 91, 92, 93, 94, 95, 96, 97, 98, 99, 100
181 };
182
183 #ifdef PACKET_TESTING
184 extern FILE *vpxlogc;
185 #endif
186
vp8_save_layer_context(VP8_COMP * cpi)187 void vp8_save_layer_context(VP8_COMP *cpi) {
188 LAYER_CONTEXT *lc = &cpi->layer_context[cpi->current_layer];
189
190 /* Save layer dependent coding state */
191 lc->target_bandwidth = cpi->target_bandwidth;
192 lc->starting_buffer_level = cpi->oxcf.starting_buffer_level;
193 lc->optimal_buffer_level = cpi->oxcf.optimal_buffer_level;
194 lc->maximum_buffer_size = cpi->oxcf.maximum_buffer_size;
195 lc->starting_buffer_level_in_ms = cpi->oxcf.starting_buffer_level_in_ms;
196 lc->optimal_buffer_level_in_ms = cpi->oxcf.optimal_buffer_level_in_ms;
197 lc->maximum_buffer_size_in_ms = cpi->oxcf.maximum_buffer_size_in_ms;
198 lc->buffer_level = cpi->buffer_level;
199 lc->bits_off_target = cpi->bits_off_target;
200 lc->total_actual_bits = cpi->total_actual_bits;
201 lc->worst_quality = cpi->worst_quality;
202 lc->active_worst_quality = cpi->active_worst_quality;
203 lc->best_quality = cpi->best_quality;
204 lc->active_best_quality = cpi->active_best_quality;
205 lc->ni_av_qi = cpi->ni_av_qi;
206 lc->ni_tot_qi = cpi->ni_tot_qi;
207 lc->ni_frames = cpi->ni_frames;
208 lc->avg_frame_qindex = cpi->avg_frame_qindex;
209 lc->rate_correction_factor = cpi->rate_correction_factor;
210 lc->key_frame_rate_correction_factor = cpi->key_frame_rate_correction_factor;
211 lc->gf_rate_correction_factor = cpi->gf_rate_correction_factor;
212 lc->zbin_over_quant = cpi->mb.zbin_over_quant;
213 lc->inter_frame_target = cpi->inter_frame_target;
214 lc->total_byte_count = cpi->total_byte_count;
215 lc->filter_level = cpi->common.filter_level;
216 lc->frames_since_last_drop_overshoot = cpi->frames_since_last_drop_overshoot;
217 lc->force_maxqp = cpi->force_maxqp;
218 lc->last_frame_percent_intra = cpi->last_frame_percent_intra;
219 lc->last_q[0] = cpi->last_q[0];
220 lc->last_q[1] = cpi->last_q[1];
221
222 memcpy(lc->count_mb_ref_frame_usage, cpi->mb.count_mb_ref_frame_usage,
223 sizeof(cpi->mb.count_mb_ref_frame_usage));
224 }
225
vp8_restore_layer_context(VP8_COMP * cpi,const int layer)226 void vp8_restore_layer_context(VP8_COMP *cpi, const int layer) {
227 LAYER_CONTEXT *lc = &cpi->layer_context[layer];
228
229 /* Restore layer dependent coding state */
230 cpi->current_layer = layer;
231 cpi->target_bandwidth = lc->target_bandwidth;
232 cpi->oxcf.target_bandwidth = lc->target_bandwidth;
233 cpi->oxcf.starting_buffer_level = lc->starting_buffer_level;
234 cpi->oxcf.optimal_buffer_level = lc->optimal_buffer_level;
235 cpi->oxcf.maximum_buffer_size = lc->maximum_buffer_size;
236 cpi->oxcf.starting_buffer_level_in_ms = lc->starting_buffer_level_in_ms;
237 cpi->oxcf.optimal_buffer_level_in_ms = lc->optimal_buffer_level_in_ms;
238 cpi->oxcf.maximum_buffer_size_in_ms = lc->maximum_buffer_size_in_ms;
239 cpi->buffer_level = lc->buffer_level;
240 cpi->bits_off_target = lc->bits_off_target;
241 cpi->total_actual_bits = lc->total_actual_bits;
242 cpi->active_worst_quality = lc->active_worst_quality;
243 cpi->active_best_quality = lc->active_best_quality;
244 cpi->ni_av_qi = lc->ni_av_qi;
245 cpi->ni_tot_qi = lc->ni_tot_qi;
246 cpi->ni_frames = lc->ni_frames;
247 cpi->avg_frame_qindex = lc->avg_frame_qindex;
248 cpi->rate_correction_factor = lc->rate_correction_factor;
249 cpi->key_frame_rate_correction_factor = lc->key_frame_rate_correction_factor;
250 cpi->gf_rate_correction_factor = lc->gf_rate_correction_factor;
251 cpi->mb.zbin_over_quant = lc->zbin_over_quant;
252 cpi->inter_frame_target = lc->inter_frame_target;
253 cpi->total_byte_count = lc->total_byte_count;
254 cpi->common.filter_level = lc->filter_level;
255 cpi->frames_since_last_drop_overshoot = lc->frames_since_last_drop_overshoot;
256 cpi->force_maxqp = lc->force_maxqp;
257 cpi->last_frame_percent_intra = lc->last_frame_percent_intra;
258 cpi->last_q[0] = lc->last_q[0];
259 cpi->last_q[1] = lc->last_q[1];
260
261 memcpy(cpi->mb.count_mb_ref_frame_usage, lc->count_mb_ref_frame_usage,
262 sizeof(cpi->mb.count_mb_ref_frame_usage));
263 }
264
rescale(int val,int num,int denom)265 static int rescale(int val, int num, int denom) {
266 int64_t llnum = num;
267 int64_t llden = denom;
268 int64_t llval = val;
269
270 int64_t result = (llval * llnum / llden);
271 if (result <= INT_MAX)
272 return (int)result;
273 else
274 return INT_MAX;
275 }
276
vp8_init_temporal_layer_context(VP8_COMP * cpi,const VP8_CONFIG * oxcf,const int layer,double prev_layer_framerate)277 void vp8_init_temporal_layer_context(VP8_COMP *cpi, const VP8_CONFIG *oxcf,
278 const int layer,
279 double prev_layer_framerate) {
280 LAYER_CONTEXT *lc = &cpi->layer_context[layer];
281
282 lc->framerate = cpi->output_framerate / cpi->oxcf.rate_decimator[layer];
283 if (cpi->oxcf.target_bitrate[layer] > INT_MAX / 1000)
284 lc->target_bandwidth = INT_MAX;
285 else
286 lc->target_bandwidth = cpi->oxcf.target_bitrate[layer] * 1000;
287
288 lc->starting_buffer_level_in_ms = oxcf->starting_buffer_level;
289 lc->optimal_buffer_level_in_ms = oxcf->optimal_buffer_level;
290 lc->maximum_buffer_size_in_ms = oxcf->maximum_buffer_size;
291
292 lc->starting_buffer_level =
293 rescale((int)(oxcf->starting_buffer_level), lc->target_bandwidth, 1000);
294
295 if (oxcf->optimal_buffer_level == 0) {
296 lc->optimal_buffer_level = lc->target_bandwidth / 8;
297 } else {
298 lc->optimal_buffer_level =
299 rescale((int)(oxcf->optimal_buffer_level), lc->target_bandwidth, 1000);
300 }
301
302 if (oxcf->maximum_buffer_size == 0) {
303 lc->maximum_buffer_size = lc->target_bandwidth / 8;
304 } else {
305 lc->maximum_buffer_size =
306 rescale((int)(oxcf->maximum_buffer_size), lc->target_bandwidth, 1000);
307 }
308
309 /* Work out the average size of a frame within this layer */
310 if (layer > 0) {
311 lc->avg_frame_size_for_layer =
312 (int)round((cpi->oxcf.target_bitrate[layer] -
313 cpi->oxcf.target_bitrate[layer - 1]) *
314 1000 / (lc->framerate - prev_layer_framerate));
315 }
316
317 lc->active_worst_quality = cpi->oxcf.worst_allowed_q;
318 lc->active_best_quality = cpi->oxcf.best_allowed_q;
319 lc->avg_frame_qindex = cpi->oxcf.worst_allowed_q;
320
321 lc->buffer_level = lc->starting_buffer_level;
322 lc->bits_off_target = lc->starting_buffer_level;
323
324 lc->total_actual_bits = 0;
325 lc->ni_av_qi = 0;
326 lc->ni_tot_qi = 0;
327 lc->ni_frames = 0;
328 lc->rate_correction_factor = 1.0;
329 lc->key_frame_rate_correction_factor = 1.0;
330 lc->gf_rate_correction_factor = 1.0;
331 lc->inter_frame_target = 0;
332 }
333
334 // Upon a run-time change in temporal layers, reset the layer context parameters
335 // for any "new" layers. For "existing" layers, let them inherit the parameters
336 // from the previous layer state (at the same layer #). In future we may want
337 // to better map the previous layer state(s) to the "new" ones.
vp8_reset_temporal_layer_change(VP8_COMP * cpi,const VP8_CONFIG * oxcf,const int prev_num_layers)338 void vp8_reset_temporal_layer_change(VP8_COMP *cpi, const VP8_CONFIG *oxcf,
339 const int prev_num_layers) {
340 int i;
341 double prev_layer_framerate = 0;
342 const int curr_num_layers = cpi->oxcf.number_of_layers;
343 // If the previous state was 1 layer, get current layer context from cpi.
344 // We need this to set the layer context for the new layers below.
345 if (prev_num_layers == 1) {
346 cpi->current_layer = 0;
347 vp8_save_layer_context(cpi);
348 }
349 for (i = 0; i < curr_num_layers; ++i) {
350 LAYER_CONTEXT *lc = &cpi->layer_context[i];
351 if (i >= prev_num_layers) {
352 vp8_init_temporal_layer_context(cpi, oxcf, i, prev_layer_framerate);
353 }
354 // The initial buffer levels are set based on their starting levels.
355 // We could set the buffer levels based on the previous state (normalized
356 // properly by the layer bandwidths) but we would need to keep track of
357 // the previous set of layer bandwidths (i.e., target_bitrate[i])
358 // before the layer change. For now, reset to the starting levels.
359 lc->buffer_level =
360 cpi->oxcf.starting_buffer_level_in_ms * cpi->oxcf.target_bitrate[i];
361 lc->bits_off_target = lc->buffer_level;
362 // TDOD(marpan): Should we set the rate_correction_factor and
363 // active_worst/best_quality to values derived from the previous layer
364 // state (to smooth-out quality dips/rate fluctuation at transition)?
365
366 // We need to treat the 1 layer case separately: oxcf.target_bitrate[i]
367 // is not set for 1 layer, and the vp8_restore_layer_context/save_context()
368 // are not called in the encoding loop, so we need to call it here to
369 // pass the layer context state to |cpi|.
370 if (curr_num_layers == 1) {
371 lc->target_bandwidth = cpi->oxcf.target_bandwidth;
372 lc->buffer_level =
373 cpi->oxcf.starting_buffer_level_in_ms * lc->target_bandwidth / 1000;
374 lc->bits_off_target = lc->buffer_level;
375 vp8_restore_layer_context(cpi, 0);
376 }
377 prev_layer_framerate = cpi->output_framerate / cpi->oxcf.rate_decimator[i];
378 }
379 }
380
setup_features(VP8_COMP * cpi)381 static void setup_features(VP8_COMP *cpi) {
382 // If segmentation enabled set the update flags
383 if (cpi->mb.e_mbd.segmentation_enabled) {
384 cpi->mb.e_mbd.update_mb_segmentation_map = 1;
385 cpi->mb.e_mbd.update_mb_segmentation_data = 1;
386 } else {
387 cpi->mb.e_mbd.update_mb_segmentation_map = 0;
388 cpi->mb.e_mbd.update_mb_segmentation_data = 0;
389 }
390
391 cpi->mb.e_mbd.mode_ref_lf_delta_enabled = 0;
392 cpi->mb.e_mbd.mode_ref_lf_delta_update = 0;
393 memset(cpi->mb.e_mbd.ref_lf_deltas, 0, sizeof(cpi->mb.e_mbd.ref_lf_deltas));
394 memset(cpi->mb.e_mbd.mode_lf_deltas, 0, sizeof(cpi->mb.e_mbd.mode_lf_deltas));
395 memset(cpi->mb.e_mbd.last_ref_lf_deltas, 0,
396 sizeof(cpi->mb.e_mbd.ref_lf_deltas));
397 memset(cpi->mb.e_mbd.last_mode_lf_deltas, 0,
398 sizeof(cpi->mb.e_mbd.mode_lf_deltas));
399
400 set_default_lf_deltas(cpi);
401 }
402
403 static void dealloc_raw_frame_buffers(VP8_COMP *cpi);
404
initialize_enc(void)405 static void initialize_enc(void) {
406 vpx_dsp_rtcd();
407 vp8_init_intra_predictors();
408 }
409
vp8_initialize_enc(void)410 void vp8_initialize_enc(void) { once(initialize_enc); }
411
dealloc_compressor_data(VP8_COMP * cpi)412 static void dealloc_compressor_data(VP8_COMP *cpi) {
413 vpx_free(cpi->tplist);
414 cpi->tplist = NULL;
415
416 /* Delete last frame MV storage buffers */
417 vpx_free(cpi->lfmv);
418 cpi->lfmv = 0;
419
420 vpx_free(cpi->lf_ref_frame_sign_bias);
421 cpi->lf_ref_frame_sign_bias = 0;
422
423 vpx_free(cpi->lf_ref_frame);
424 cpi->lf_ref_frame = 0;
425
426 /* Delete sementation map */
427 vpx_free(cpi->segmentation_map);
428 cpi->segmentation_map = 0;
429
430 vpx_free(cpi->active_map);
431 cpi->active_map = 0;
432
433 vp8_de_alloc_frame_buffers(&cpi->common);
434
435 vp8_yv12_de_alloc_frame_buffer(&cpi->pick_lf_lvl_frame);
436 vp8_yv12_de_alloc_frame_buffer(&cpi->scaled_source);
437 dealloc_raw_frame_buffers(cpi);
438
439 vpx_free(cpi->tok);
440 cpi->tok = 0;
441
442 /* Structure used to monitor GF usage */
443 vpx_free(cpi->gf_active_flags);
444 cpi->gf_active_flags = 0;
445
446 /* Activity mask based per mb zbin adjustments */
447 vpx_free(cpi->mb_activity_map);
448 cpi->mb_activity_map = 0;
449
450 vpx_free(cpi->mb.pip);
451 cpi->mb.pip = 0;
452 }
453
enable_segmentation(VP8_COMP * cpi)454 static void enable_segmentation(VP8_COMP *cpi) {
455 /* Set the appropriate feature bit */
456 cpi->mb.e_mbd.segmentation_enabled = 1;
457 cpi->mb.e_mbd.update_mb_segmentation_map = 1;
458 cpi->mb.e_mbd.update_mb_segmentation_data = 1;
459 }
disable_segmentation(VP8_COMP * cpi)460 static void disable_segmentation(VP8_COMP *cpi) {
461 /* Clear the appropriate feature bit */
462 cpi->mb.e_mbd.segmentation_enabled = 0;
463 }
464
465 /* Valid values for a segment are 0 to 3
466 * Segmentation map is arrange as [Rows][Columns]
467 */
set_segmentation_map(VP8_COMP * cpi,unsigned char * segmentation_map)468 static void set_segmentation_map(VP8_COMP *cpi,
469 unsigned char *segmentation_map) {
470 /* Copy in the new segmentation map */
471 memcpy(cpi->segmentation_map, segmentation_map,
472 (cpi->common.mb_rows * cpi->common.mb_cols));
473
474 /* Signal that the map should be updated. */
475 cpi->mb.e_mbd.update_mb_segmentation_map = 1;
476 cpi->mb.e_mbd.update_mb_segmentation_data = 1;
477 }
478
479 /* The values given for each segment can be either deltas (from the default
480 * value chosen for the frame) or absolute values.
481 *
482 * Valid range for abs values is:
483 * (0-127 for MB_LVL_ALT_Q), (0-63 for SEGMENT_ALT_LF)
484 * Valid range for delta values are:
485 * (+/-127 for MB_LVL_ALT_Q), (+/-63 for SEGMENT_ALT_LF)
486 *
487 * abs_delta = SEGMENT_DELTADATA (deltas)
488 * abs_delta = SEGMENT_ABSDATA (use the absolute values given).
489 *
490 */
set_segment_data(VP8_COMP * cpi,signed char * feature_data,unsigned char abs_delta)491 static void set_segment_data(VP8_COMP *cpi, signed char *feature_data,
492 unsigned char abs_delta) {
493 cpi->mb.e_mbd.mb_segment_abs_delta = abs_delta;
494 memcpy(cpi->segment_feature_data, feature_data,
495 sizeof(cpi->segment_feature_data));
496 }
497
498 /* A simple function to cyclically refresh the background at a lower Q */
cyclic_background_refresh(VP8_COMP * cpi,int Q,int lf_adjustment)499 static void cyclic_background_refresh(VP8_COMP *cpi, int Q, int lf_adjustment) {
500 unsigned char *seg_map = cpi->segmentation_map;
501 signed char feature_data[MB_LVL_MAX][MAX_MB_SEGMENTS];
502 int i;
503 int block_count = cpi->cyclic_refresh_mode_max_mbs_perframe;
504 int mbs_in_frame = cpi->common.mb_rows * cpi->common.mb_cols;
505
506 cpi->cyclic_refresh_q = Q / 2;
507
508 if (cpi->oxcf.screen_content_mode) {
509 // Modify quality ramp-up based on Q. Above some Q level, increase the
510 // number of blocks to be refreshed, and reduce it below the thredhold.
511 // Turn-off under certain conditions (i.e., away from key frame, and if
512 // we are at good quality (low Q) and most of the blocks were
513 // skipped-encoded
514 // in previous frame.
515 int qp_thresh = (cpi->oxcf.screen_content_mode == 2) ? 80 : 100;
516 if (Q >= qp_thresh) {
517 cpi->cyclic_refresh_mode_max_mbs_perframe =
518 (cpi->common.mb_rows * cpi->common.mb_cols) / 10;
519 } else if (cpi->frames_since_key > 250 && Q < 20 &&
520 cpi->mb.skip_true_count > (int)(0.95 * mbs_in_frame)) {
521 cpi->cyclic_refresh_mode_max_mbs_perframe = 0;
522 } else {
523 cpi->cyclic_refresh_mode_max_mbs_perframe =
524 (cpi->common.mb_rows * cpi->common.mb_cols) / 20;
525 }
526 block_count = cpi->cyclic_refresh_mode_max_mbs_perframe;
527 }
528
529 // Set every macroblock to be eligible for update.
530 // For key frame this will reset seg map to 0.
531 memset(cpi->segmentation_map, 0, mbs_in_frame);
532
533 if (cpi->common.frame_type != KEY_FRAME && block_count > 0) {
534 /* Cycle through the macro_block rows */
535 /* MB loop to set local segmentation map */
536 i = cpi->cyclic_refresh_mode_index;
537 assert(i < mbs_in_frame);
538 do {
539 /* If the MB is as a candidate for clean up then mark it for
540 * possible boost/refresh (segment 1) The segment id may get
541 * reset to 0 later if the MB gets coded anything other than
542 * last frame 0,0 as only (last frame 0,0) MBs are eligable for
543 * refresh : that is to say Mbs likely to be background blocks.
544 */
545 if (cpi->cyclic_refresh_map[i] == 0) {
546 seg_map[i] = 1;
547 block_count--;
548 } else if (cpi->cyclic_refresh_map[i] < 0) {
549 cpi->cyclic_refresh_map[i]++;
550 }
551
552 i++;
553 if (i == mbs_in_frame) i = 0;
554
555 } while (block_count && i != cpi->cyclic_refresh_mode_index);
556
557 cpi->cyclic_refresh_mode_index = i;
558
559 #if CONFIG_TEMPORAL_DENOISING
560 if (cpi->oxcf.noise_sensitivity > 0) {
561 if (cpi->denoiser.denoiser_mode == kDenoiserOnYUVAggressive &&
562 Q < (int)cpi->denoiser.denoise_pars.qp_thresh &&
563 (cpi->frames_since_key >
564 2 * cpi->denoiser.denoise_pars.consec_zerolast)) {
565 // Under aggressive denoising, use segmentation to turn off loop
566 // filter below some qp thresh. The filter is reduced for all
567 // blocks that have been encoded as ZEROMV LAST x frames in a row,
568 // where x is set by cpi->denoiser.denoise_pars.consec_zerolast.
569 // This is to avoid "dot" artifacts that can occur from repeated
570 // loop filtering on noisy input source.
571 cpi->cyclic_refresh_q = Q;
572 // lf_adjustment = -MAX_LOOP_FILTER;
573 lf_adjustment = -40;
574 for (i = 0; i < mbs_in_frame; ++i) {
575 seg_map[i] = (cpi->consec_zero_last[i] >
576 cpi->denoiser.denoise_pars.consec_zerolast)
577 ? 1
578 : 0;
579 }
580 }
581 }
582 #endif
583 }
584
585 /* Activate segmentation. */
586 cpi->mb.e_mbd.update_mb_segmentation_map = 1;
587 cpi->mb.e_mbd.update_mb_segmentation_data = 1;
588 enable_segmentation(cpi);
589
590 /* Set up the quant segment data */
591 feature_data[MB_LVL_ALT_Q][0] = 0;
592 feature_data[MB_LVL_ALT_Q][1] = (cpi->cyclic_refresh_q - Q);
593 feature_data[MB_LVL_ALT_Q][2] = 0;
594 feature_data[MB_LVL_ALT_Q][3] = 0;
595
596 /* Set up the loop segment data */
597 feature_data[MB_LVL_ALT_LF][0] = 0;
598 feature_data[MB_LVL_ALT_LF][1] = lf_adjustment;
599 feature_data[MB_LVL_ALT_LF][2] = 0;
600 feature_data[MB_LVL_ALT_LF][3] = 0;
601
602 /* Initialise the feature data structure */
603 set_segment_data(cpi, &feature_data[0][0], SEGMENT_DELTADATA);
604 }
605
compute_skin_map(VP8_COMP * cpi)606 static void compute_skin_map(VP8_COMP *cpi) {
607 int mb_row, mb_col, num_bl;
608 VP8_COMMON *cm = &cpi->common;
609 const uint8_t *src_y = cpi->Source->y_buffer;
610 const uint8_t *src_u = cpi->Source->u_buffer;
611 const uint8_t *src_v = cpi->Source->v_buffer;
612 const int src_ystride = cpi->Source->y_stride;
613 const int src_uvstride = cpi->Source->uv_stride;
614
615 const SKIN_DETECTION_BLOCK_SIZE bsize =
616 (cm->Width * cm->Height <= 352 * 288) ? SKIN_8X8 : SKIN_16X16;
617
618 for (mb_row = 0; mb_row < cm->mb_rows; mb_row++) {
619 num_bl = 0;
620 for (mb_col = 0; mb_col < cm->mb_cols; mb_col++) {
621 const int bl_index = mb_row * cm->mb_cols + mb_col;
622 cpi->skin_map[bl_index] =
623 vp8_compute_skin_block(src_y, src_u, src_v, src_ystride, src_uvstride,
624 bsize, cpi->consec_zero_last[bl_index], 0);
625 num_bl++;
626 src_y += 16;
627 src_u += 8;
628 src_v += 8;
629 }
630 src_y += (src_ystride << 4) - (num_bl << 4);
631 src_u += (src_uvstride << 3) - (num_bl << 3);
632 src_v += (src_uvstride << 3) - (num_bl << 3);
633 }
634
635 // Remove isolated skin blocks (none of its neighbors are skin) and isolated
636 // non-skin blocks (all of its neighbors are skin). Skip the boundary.
637 for (mb_row = 1; mb_row < cm->mb_rows - 1; mb_row++) {
638 for (mb_col = 1; mb_col < cm->mb_cols - 1; mb_col++) {
639 const int bl_index = mb_row * cm->mb_cols + mb_col;
640 int num_neighbor = 0;
641 int mi, mj;
642 int non_skin_threshold = 8;
643
644 for (mi = -1; mi <= 1; mi += 1) {
645 for (mj = -1; mj <= 1; mj += 1) {
646 int bl_neighbor_index = (mb_row + mi) * cm->mb_cols + mb_col + mj;
647 if (cpi->skin_map[bl_neighbor_index]) num_neighbor++;
648 }
649 }
650
651 if (cpi->skin_map[bl_index] && num_neighbor < 2)
652 cpi->skin_map[bl_index] = 0;
653 if (!cpi->skin_map[bl_index] && num_neighbor == non_skin_threshold)
654 cpi->skin_map[bl_index] = 1;
655 }
656 }
657 }
658
set_default_lf_deltas(VP8_COMP * cpi)659 static void set_default_lf_deltas(VP8_COMP *cpi) {
660 cpi->mb.e_mbd.mode_ref_lf_delta_enabled = 1;
661 cpi->mb.e_mbd.mode_ref_lf_delta_update = 1;
662
663 memset(cpi->mb.e_mbd.ref_lf_deltas, 0, sizeof(cpi->mb.e_mbd.ref_lf_deltas));
664 memset(cpi->mb.e_mbd.mode_lf_deltas, 0, sizeof(cpi->mb.e_mbd.mode_lf_deltas));
665
666 /* Test of ref frame deltas */
667 cpi->mb.e_mbd.ref_lf_deltas[INTRA_FRAME] = 2;
668 cpi->mb.e_mbd.ref_lf_deltas[LAST_FRAME] = 0;
669 cpi->mb.e_mbd.ref_lf_deltas[GOLDEN_FRAME] = -2;
670 cpi->mb.e_mbd.ref_lf_deltas[ALTREF_FRAME] = -2;
671
672 cpi->mb.e_mbd.mode_lf_deltas[0] = 4; /* BPRED */
673
674 if (cpi->oxcf.Mode == MODE_REALTIME) {
675 cpi->mb.e_mbd.mode_lf_deltas[1] = -12; /* Zero */
676 } else {
677 cpi->mb.e_mbd.mode_lf_deltas[1] = -2; /* Zero */
678 }
679
680 cpi->mb.e_mbd.mode_lf_deltas[2] = 2; /* New mv */
681 cpi->mb.e_mbd.mode_lf_deltas[3] = 4; /* Split mv */
682 }
683
684 /* Convenience macros for mapping speed and mode into a continuous
685 * range
686 */
687 #define GOOD(x) ((x) + 1)
688 #define RT(x) ((x) + 7)
689
speed_map(int speed,const int * map)690 static int speed_map(int speed, const int *map) {
691 int res;
692
693 do {
694 res = *map++;
695 } while (speed >= *map++);
696 return res;
697 }
698
699 static const int thresh_mult_map_znn[] = {
700 /* map common to zero, nearest, and near */
701 0, GOOD(2), 1500, GOOD(3), 2000, RT(0), 1000, RT(2), 2000, INT_MAX
702 };
703
704 static const int thresh_mult_map_vhpred[] = { 1000, GOOD(2), 1500, GOOD(3),
705 2000, RT(0), 1000, RT(1),
706 2000, RT(7), INT_MAX, INT_MAX };
707
708 static const int thresh_mult_map_bpred[] = { 2000, GOOD(0), 2500, GOOD(2),
709 5000, GOOD(3), 7500, RT(0),
710 2500, RT(1), 5000, RT(6),
711 INT_MAX, INT_MAX };
712
713 static const int thresh_mult_map_tm[] = { 1000, GOOD(2), 1500, GOOD(3),
714 2000, RT(0), 0, RT(1),
715 1000, RT(2), 2000, RT(7),
716 INT_MAX, INT_MAX };
717
718 static const int thresh_mult_map_new1[] = { 1000, GOOD(2), 2000,
719 RT(0), 2000, INT_MAX };
720
721 static const int thresh_mult_map_new2[] = { 1000, GOOD(2), 2000, GOOD(3),
722 2500, GOOD(5), 4000, RT(0),
723 2000, RT(2), 2500, RT(5),
724 4000, INT_MAX };
725
726 static const int thresh_mult_map_split1[] = {
727 2500, GOOD(0), 1700, GOOD(2), 10000, GOOD(3), 25000, GOOD(4), INT_MAX,
728 RT(0), 5000, RT(1), 10000, RT(2), 25000, RT(3), INT_MAX, INT_MAX
729 };
730
731 static const int thresh_mult_map_split2[] = {
732 5000, GOOD(0), 4500, GOOD(2), 20000, GOOD(3), 50000, GOOD(4), INT_MAX,
733 RT(0), 10000, RT(1), 20000, RT(2), 50000, RT(3), INT_MAX, INT_MAX
734 };
735
736 static const int mode_check_freq_map_zn2[] = {
737 /* {zero,nearest}{2,3} */
738 0, RT(10), 1 << 1, RT(11), 1 << 2, RT(12), 1 << 3, INT_MAX
739 };
740
741 static const int mode_check_freq_map_vhbpred[] = { 0, GOOD(5), 2, RT(0),
742 0, RT(3), 2, RT(5),
743 4, INT_MAX };
744
745 static const int mode_check_freq_map_near2[] = {
746 0, GOOD(5), 2, RT(0), 0, RT(3), 2,
747 RT(10), 1 << 2, RT(11), 1 << 3, RT(12), 1 << 4, INT_MAX
748 };
749
750 static const int mode_check_freq_map_new1[] = {
751 0, RT(10), 1 << 1, RT(11), 1 << 2, RT(12), 1 << 3, INT_MAX
752 };
753
754 static const int mode_check_freq_map_new2[] = { 0, GOOD(5), 4, RT(0),
755 0, RT(3), 4, RT(10),
756 1 << 3, RT(11), 1 << 4, RT(12),
757 1 << 5, INT_MAX };
758
759 static const int mode_check_freq_map_split1[] = { 0, GOOD(2), 2, GOOD(3),
760 7, RT(1), 2, RT(2),
761 7, INT_MAX };
762
763 static const int mode_check_freq_map_split2[] = { 0, GOOD(1), 2, GOOD(2),
764 4, GOOD(3), 15, RT(1),
765 4, RT(2), 15, INT_MAX };
766
vp8_set_speed_features(VP8_COMP * cpi)767 void vp8_set_speed_features(VP8_COMP *cpi) {
768 SPEED_FEATURES *sf = &cpi->sf;
769 int Mode = cpi->compressor_speed;
770 int Speed = cpi->Speed;
771 int Speed2;
772 int i;
773 VP8_COMMON *cm = &cpi->common;
774 int last_improved_quant = sf->improved_quant;
775 int ref_frames;
776
777 /* Initialise default mode frequency sampling variables */
778 for (i = 0; i < MAX_MODES; ++i) {
779 cpi->mode_check_freq[i] = 0;
780 }
781
782 cpi->mb.mbs_tested_so_far = 0;
783 cpi->mb.mbs_zero_last_dot_suppress = 0;
784
785 /* best quality defaults */
786 sf->RD = 1;
787 sf->search_method = NSTEP;
788 sf->improved_quant = 1;
789 sf->improved_dct = 1;
790 sf->auto_filter = 1;
791 sf->recode_loop = 1;
792 sf->quarter_pixel_search = 1;
793 sf->half_pixel_search = 1;
794 sf->iterative_sub_pixel = 1;
795 sf->optimize_coefficients = 1;
796 sf->use_fastquant_for_pick = 0;
797 sf->no_skip_block4x4_search = 1;
798
799 sf->first_step = 0;
800 sf->max_step_search_steps = MAX_MVSEARCH_STEPS;
801 sf->improved_mv_pred = 1;
802
803 /* default thresholds to 0 */
804 for (i = 0; i < MAX_MODES; ++i) sf->thresh_mult[i] = 0;
805
806 /* Count enabled references */
807 ref_frames = 1;
808 if (cpi->ref_frame_flags & VP8_LAST_FRAME) ref_frames++;
809 if (cpi->ref_frame_flags & VP8_GOLD_FRAME) ref_frames++;
810 if (cpi->ref_frame_flags & VP8_ALTR_FRAME) ref_frames++;
811
812 /* Convert speed to continuous range, with clamping */
813 if (Mode == 0) {
814 Speed = 0;
815 } else if (Mode == 2) {
816 Speed = RT(Speed);
817 } else {
818 if (Speed > 5) Speed = 5;
819 Speed = GOOD(Speed);
820 }
821
822 sf->thresh_mult[THR_ZERO1] = sf->thresh_mult[THR_NEAREST1] =
823 sf->thresh_mult[THR_NEAR1] = sf->thresh_mult[THR_DC] = 0; /* always */
824
825 sf->thresh_mult[THR_ZERO2] = sf->thresh_mult[THR_ZERO3] =
826 sf->thresh_mult[THR_NEAREST2] = sf->thresh_mult[THR_NEAREST3] =
827 sf->thresh_mult[THR_NEAR2] = sf->thresh_mult[THR_NEAR3] =
828 speed_map(Speed, thresh_mult_map_znn);
829
830 sf->thresh_mult[THR_V_PRED] = sf->thresh_mult[THR_H_PRED] =
831 speed_map(Speed, thresh_mult_map_vhpred);
832 sf->thresh_mult[THR_B_PRED] = speed_map(Speed, thresh_mult_map_bpred);
833 sf->thresh_mult[THR_TM] = speed_map(Speed, thresh_mult_map_tm);
834 sf->thresh_mult[THR_NEW1] = speed_map(Speed, thresh_mult_map_new1);
835 sf->thresh_mult[THR_NEW2] = sf->thresh_mult[THR_NEW3] =
836 speed_map(Speed, thresh_mult_map_new2);
837 sf->thresh_mult[THR_SPLIT1] = speed_map(Speed, thresh_mult_map_split1);
838 sf->thresh_mult[THR_SPLIT2] = sf->thresh_mult[THR_SPLIT3] =
839 speed_map(Speed, thresh_mult_map_split2);
840
841 // Special case for temporal layers.
842 // Reduce the thresholds for zero/nearest/near for GOLDEN, if GOLDEN is
843 // used as second reference. We don't modify thresholds for ALTREF case
844 // since ALTREF is usually used as long-term reference in temporal layers.
845 if ((cpi->Speed <= 6) && (cpi->oxcf.number_of_layers > 1) &&
846 (cpi->ref_frame_flags & VP8_LAST_FRAME) &&
847 (cpi->ref_frame_flags & VP8_GOLD_FRAME)) {
848 if (cpi->closest_reference_frame == GOLDEN_FRAME) {
849 sf->thresh_mult[THR_ZERO2] = sf->thresh_mult[THR_ZERO2] >> 3;
850 sf->thresh_mult[THR_NEAREST2] = sf->thresh_mult[THR_NEAREST2] >> 3;
851 sf->thresh_mult[THR_NEAR2] = sf->thresh_mult[THR_NEAR2] >> 3;
852 } else {
853 sf->thresh_mult[THR_ZERO2] = sf->thresh_mult[THR_ZERO2] >> 1;
854 sf->thresh_mult[THR_NEAREST2] = sf->thresh_mult[THR_NEAREST2] >> 1;
855 sf->thresh_mult[THR_NEAR2] = sf->thresh_mult[THR_NEAR2] >> 1;
856 }
857 }
858
859 cpi->mode_check_freq[THR_ZERO1] = cpi->mode_check_freq[THR_NEAREST1] =
860 cpi->mode_check_freq[THR_NEAR1] = cpi->mode_check_freq[THR_TM] =
861 cpi->mode_check_freq[THR_DC] = 0; /* always */
862
863 cpi->mode_check_freq[THR_ZERO2] = cpi->mode_check_freq[THR_ZERO3] =
864 cpi->mode_check_freq[THR_NEAREST2] = cpi->mode_check_freq[THR_NEAREST3] =
865 speed_map(Speed, mode_check_freq_map_zn2);
866
867 cpi->mode_check_freq[THR_NEAR2] = cpi->mode_check_freq[THR_NEAR3] =
868 speed_map(Speed, mode_check_freq_map_near2);
869
870 cpi->mode_check_freq[THR_V_PRED] = cpi->mode_check_freq[THR_H_PRED] =
871 cpi->mode_check_freq[THR_B_PRED] =
872 speed_map(Speed, mode_check_freq_map_vhbpred);
873
874 // For real-time mode at speed 10 keep the mode_check_freq threshold
875 // for NEW1 similar to that of speed 9.
876 Speed2 = Speed;
877 if (cpi->Speed == 10 && Mode == 2) Speed2 = RT(9);
878 cpi->mode_check_freq[THR_NEW1] = speed_map(Speed2, mode_check_freq_map_new1);
879
880 cpi->mode_check_freq[THR_NEW2] = cpi->mode_check_freq[THR_NEW3] =
881 speed_map(Speed, mode_check_freq_map_new2);
882
883 cpi->mode_check_freq[THR_SPLIT1] =
884 speed_map(Speed, mode_check_freq_map_split1);
885 cpi->mode_check_freq[THR_SPLIT2] = cpi->mode_check_freq[THR_SPLIT3] =
886 speed_map(Speed, mode_check_freq_map_split2);
887 Speed = cpi->Speed;
888 switch (Mode) {
889 #if !CONFIG_REALTIME_ONLY
890 case 0: /* best quality mode */
891 sf->first_step = 0;
892 sf->max_step_search_steps = MAX_MVSEARCH_STEPS;
893 break;
894 case 1:
895 case 3:
896 if (Speed > 0) {
897 /* Disable coefficient optimization above speed 0 */
898 sf->optimize_coefficients = 0;
899 sf->use_fastquant_for_pick = 1;
900 sf->no_skip_block4x4_search = 0;
901
902 sf->first_step = 1;
903 }
904
905 if (Speed > 2) {
906 sf->improved_quant = 0;
907 sf->improved_dct = 0;
908
909 /* Only do recode loop on key frames, golden frames and
910 * alt ref frames
911 */
912 sf->recode_loop = 2;
913 }
914
915 if (Speed > 3) {
916 sf->auto_filter = 1;
917 sf->recode_loop = 0; /* recode loop off */
918 sf->RD = 0; /* Turn rd off */
919 }
920
921 if (Speed > 4) {
922 sf->auto_filter = 0; /* Faster selection of loop filter */
923 }
924
925 break;
926 #endif
927 case 2:
928 sf->optimize_coefficients = 0;
929 sf->recode_loop = 0;
930 sf->auto_filter = 1;
931 sf->iterative_sub_pixel = 1;
932 sf->search_method = NSTEP;
933
934 if (Speed > 0) {
935 sf->improved_quant = 0;
936 sf->improved_dct = 0;
937
938 sf->use_fastquant_for_pick = 1;
939 sf->no_skip_block4x4_search = 0;
940 sf->first_step = 1;
941 }
942
943 if (Speed > 2) sf->auto_filter = 0; /* Faster selection of loop filter */
944
945 if (Speed > 3) {
946 sf->RD = 0;
947 sf->auto_filter = 1;
948 }
949
950 if (Speed > 4) {
951 sf->auto_filter = 0; /* Faster selection of loop filter */
952 sf->search_method = HEX;
953 sf->iterative_sub_pixel = 0;
954 }
955
956 if (Speed > 6) {
957 unsigned int sum = 0;
958 unsigned int total_mbs = cm->MBs;
959 int thresh;
960 unsigned int total_skip;
961
962 int min = 2000;
963
964 if (cpi->oxcf.encode_breakout > 2000) min = cpi->oxcf.encode_breakout;
965
966 min >>= 7;
967
968 for (i = 0; i < min; ++i) {
969 sum += cpi->mb.error_bins[i];
970 }
971
972 total_skip = sum;
973 sum = 0;
974
975 /* i starts from 2 to make sure thresh started from 2048 */
976 for (; i < 1024; ++i) {
977 sum += cpi->mb.error_bins[i];
978
979 if (10 * sum >=
980 (unsigned int)(cpi->Speed - 6) * (total_mbs - total_skip)) {
981 break;
982 }
983 }
984
985 i--;
986 thresh = (i << 7);
987
988 if (thresh < 2000) thresh = 2000;
989
990 if (ref_frames > 1) {
991 sf->thresh_mult[THR_NEW1] = thresh;
992 sf->thresh_mult[THR_NEAREST1] = thresh >> 1;
993 sf->thresh_mult[THR_NEAR1] = thresh >> 1;
994 }
995
996 if (ref_frames > 2) {
997 sf->thresh_mult[THR_NEW2] = thresh << 1;
998 sf->thresh_mult[THR_NEAREST2] = thresh;
999 sf->thresh_mult[THR_NEAR2] = thresh;
1000 }
1001
1002 if (ref_frames > 3) {
1003 sf->thresh_mult[THR_NEW3] = thresh << 1;
1004 sf->thresh_mult[THR_NEAREST3] = thresh;
1005 sf->thresh_mult[THR_NEAR3] = thresh;
1006 }
1007
1008 sf->improved_mv_pred = 0;
1009 }
1010
1011 if (Speed > 8) sf->quarter_pixel_search = 0;
1012
1013 if (cm->version == 0) {
1014 cm->filter_type = NORMAL_LOOPFILTER;
1015
1016 if (Speed >= 14) cm->filter_type = SIMPLE_LOOPFILTER;
1017 } else {
1018 cm->filter_type = SIMPLE_LOOPFILTER;
1019 }
1020
1021 /* This has a big hit on quality. Last resort */
1022 if (Speed >= 15) sf->half_pixel_search = 0;
1023
1024 memset(cpi->mb.error_bins, 0, sizeof(cpi->mb.error_bins));
1025
1026 } /* switch */
1027
1028 /* Slow quant, dct and trellis not worthwhile for first pass
1029 * so make sure they are always turned off.
1030 */
1031 if (cpi->pass == 1) {
1032 sf->improved_quant = 0;
1033 sf->optimize_coefficients = 0;
1034 sf->improved_dct = 0;
1035 }
1036
1037 if (cpi->sf.search_method == NSTEP) {
1038 vp8_init3smotion_compensation(&cpi->mb,
1039 cm->yv12_fb[cm->lst_fb_idx].y_stride);
1040 } else if (cpi->sf.search_method == DIAMOND) {
1041 vp8_init_dsmotion_compensation(&cpi->mb,
1042 cm->yv12_fb[cm->lst_fb_idx].y_stride);
1043 }
1044
1045 if (cpi->sf.improved_dct) {
1046 cpi->mb.short_fdct8x4 = vp8_short_fdct8x4;
1047 cpi->mb.short_fdct4x4 = vp8_short_fdct4x4;
1048 } else {
1049 /* No fast FDCT defined for any platform at this time. */
1050 cpi->mb.short_fdct8x4 = vp8_short_fdct8x4;
1051 cpi->mb.short_fdct4x4 = vp8_short_fdct4x4;
1052 }
1053
1054 cpi->mb.short_walsh4x4 = vp8_short_walsh4x4;
1055
1056 if (cpi->sf.improved_quant) {
1057 cpi->mb.quantize_b = vp8_regular_quantize_b;
1058 } else {
1059 cpi->mb.quantize_b = vp8_fast_quantize_b;
1060 }
1061 if (cpi->sf.improved_quant != last_improved_quant) vp8cx_init_quantizer(cpi);
1062
1063 if (cpi->sf.iterative_sub_pixel == 1) {
1064 cpi->find_fractional_mv_step = vp8_find_best_sub_pixel_step_iteratively;
1065 } else if (cpi->sf.quarter_pixel_search) {
1066 cpi->find_fractional_mv_step = vp8_find_best_sub_pixel_step;
1067 } else if (cpi->sf.half_pixel_search) {
1068 cpi->find_fractional_mv_step = vp8_find_best_half_pixel_step;
1069 } else {
1070 cpi->find_fractional_mv_step = vp8_skip_fractional_mv_step;
1071 }
1072
1073 if (cpi->sf.optimize_coefficients == 1 && cpi->pass != 1) {
1074 cpi->mb.optimize = 1;
1075 } else {
1076 cpi->mb.optimize = 0;
1077 }
1078
1079 if (cpi->common.full_pixel) {
1080 cpi->find_fractional_mv_step = vp8_skip_fractional_mv_step;
1081 }
1082
1083 #ifdef SPEEDSTATS
1084 frames_at_speed[cpi->Speed]++;
1085 #endif
1086 }
1087 #undef GOOD
1088 #undef RT
1089
alloc_raw_frame_buffers(VP8_COMP * cpi)1090 static void alloc_raw_frame_buffers(VP8_COMP *cpi) {
1091 #if VP8_TEMPORAL_ALT_REF
1092 int width = (cpi->oxcf.Width + 15) & ~15;
1093 int height = (cpi->oxcf.Height + 15) & ~15;
1094 #endif
1095
1096 cpi->lookahead = vp8_lookahead_init(cpi->oxcf.Width, cpi->oxcf.Height,
1097 cpi->oxcf.lag_in_frames);
1098 if (!cpi->lookahead) {
1099 vpx_internal_error(&cpi->common.error, VPX_CODEC_MEM_ERROR,
1100 "Failed to allocate lag buffers");
1101 }
1102
1103 #if VP8_TEMPORAL_ALT_REF
1104
1105 if (vp8_yv12_alloc_frame_buffer(&cpi->alt_ref_buffer, width, height,
1106 VP8BORDERINPIXELS)) {
1107 vpx_internal_error(&cpi->common.error, VPX_CODEC_MEM_ERROR,
1108 "Failed to allocate altref buffer");
1109 }
1110
1111 #endif
1112 }
1113
dealloc_raw_frame_buffers(VP8_COMP * cpi)1114 static void dealloc_raw_frame_buffers(VP8_COMP *cpi) {
1115 #if VP8_TEMPORAL_ALT_REF
1116 vp8_yv12_de_alloc_frame_buffer(&cpi->alt_ref_buffer);
1117 #endif
1118 vp8_lookahead_destroy(cpi->lookahead);
1119 }
1120
vp8_alloc_partition_data(VP8_COMP * cpi)1121 static int vp8_alloc_partition_data(VP8_COMP *cpi) {
1122 vpx_free(cpi->mb.pip);
1123
1124 cpi->mb.pip =
1125 vpx_calloc((cpi->common.mb_cols + 1) * (cpi->common.mb_rows + 1),
1126 sizeof(PARTITION_INFO));
1127 if (!cpi->mb.pip) return 1;
1128
1129 cpi->mb.pi = cpi->mb.pip + cpi->common.mode_info_stride + 1;
1130
1131 return 0;
1132 }
1133
vp8_alloc_compressor_data(VP8_COMP * cpi)1134 void vp8_alloc_compressor_data(VP8_COMP *cpi) {
1135 VP8_COMMON *cm = &cpi->common;
1136
1137 int width = cm->Width;
1138 int height = cm->Height;
1139
1140 if (vp8_alloc_frame_buffers(cm, width, height)) {
1141 vpx_internal_error(&cpi->common.error, VPX_CODEC_MEM_ERROR,
1142 "Failed to allocate frame buffers");
1143 }
1144
1145 if (vp8_alloc_partition_data(cpi)) {
1146 vpx_internal_error(&cpi->common.error, VPX_CODEC_MEM_ERROR,
1147 "Failed to allocate partition data");
1148 }
1149
1150 if ((width & 0xf) != 0) width += 16 - (width & 0xf);
1151
1152 if ((height & 0xf) != 0) height += 16 - (height & 0xf);
1153
1154 if (vp8_yv12_alloc_frame_buffer(&cpi->pick_lf_lvl_frame, width, height,
1155 VP8BORDERINPIXELS)) {
1156 vpx_internal_error(&cpi->common.error, VPX_CODEC_MEM_ERROR,
1157 "Failed to allocate last frame buffer");
1158 }
1159
1160 if (vp8_yv12_alloc_frame_buffer(&cpi->scaled_source, width, height,
1161 VP8BORDERINPIXELS)) {
1162 vpx_internal_error(&cpi->common.error, VPX_CODEC_MEM_ERROR,
1163 "Failed to allocate scaled source buffer");
1164 }
1165
1166 vpx_free(cpi->tok);
1167
1168 {
1169 #if CONFIG_REALTIME_ONLY & CONFIG_ONTHEFLY_BITPACKING
1170 unsigned int tokens = 8 * 24 * 16; /* one MB for each thread */
1171 #else
1172 unsigned int tokens = cm->mb_rows * cm->mb_cols * 24 * 16;
1173 #endif
1174 CHECK_MEM_ERROR(&cpi->common.error, cpi->tok,
1175 vpx_calloc(tokens, sizeof(*cpi->tok)));
1176 }
1177
1178 /* Data used for real time vc mode to see if gf needs refreshing */
1179 cpi->zeromv_count = 0;
1180
1181 /* Structures used to monitor GF usage */
1182 vpx_free(cpi->gf_active_flags);
1183 CHECK_MEM_ERROR(
1184 &cpi->common.error, cpi->gf_active_flags,
1185 vpx_calloc(sizeof(*cpi->gf_active_flags), cm->mb_rows * cm->mb_cols));
1186 cpi->gf_active_count = cm->mb_rows * cm->mb_cols;
1187
1188 vpx_free(cpi->mb_activity_map);
1189 CHECK_MEM_ERROR(
1190 &cpi->common.error, cpi->mb_activity_map,
1191 vpx_calloc(sizeof(*cpi->mb_activity_map), cm->mb_rows * cm->mb_cols));
1192
1193 /* allocate memory for storing last frame's MVs for MV prediction. */
1194 vpx_free(cpi->lfmv);
1195 CHECK_MEM_ERROR(
1196 &cpi->common.error, cpi->lfmv,
1197 vpx_calloc((cm->mb_rows + 2) * (cm->mb_cols + 2), sizeof(*cpi->lfmv)));
1198 vpx_free(cpi->lf_ref_frame_sign_bias);
1199 CHECK_MEM_ERROR(&cpi->common.error, cpi->lf_ref_frame_sign_bias,
1200 vpx_calloc((cm->mb_rows + 2) * (cm->mb_cols + 2),
1201 sizeof(*cpi->lf_ref_frame_sign_bias)));
1202 vpx_free(cpi->lf_ref_frame);
1203 CHECK_MEM_ERROR(&cpi->common.error, cpi->lf_ref_frame,
1204 vpx_calloc((cm->mb_rows + 2) * (cm->mb_cols + 2),
1205 sizeof(*cpi->lf_ref_frame)));
1206
1207 /* Create the encoder segmentation map and set all entries to 0 */
1208 vpx_free(cpi->segmentation_map);
1209 CHECK_MEM_ERROR(
1210 &cpi->common.error, cpi->segmentation_map,
1211 vpx_calloc(cm->mb_rows * cm->mb_cols, sizeof(*cpi->segmentation_map)));
1212 cpi->cyclic_refresh_mode_index = 0;
1213 vpx_free(cpi->active_map);
1214 CHECK_MEM_ERROR(
1215 &cpi->common.error, cpi->active_map,
1216 vpx_calloc(cm->mb_rows * cm->mb_cols, sizeof(*cpi->active_map)));
1217 memset(cpi->active_map, 1, (cm->mb_rows * cm->mb_cols));
1218
1219 #if CONFIG_MULTITHREAD
1220 if (width < 640) {
1221 cpi->mt_sync_range = 1;
1222 } else if (width <= 1280) {
1223 cpi->mt_sync_range = 4;
1224 } else if (width <= 2560) {
1225 cpi->mt_sync_range = 8;
1226 } else {
1227 cpi->mt_sync_range = 16;
1228 }
1229 #endif
1230
1231 vpx_free(cpi->tplist);
1232 CHECK_MEM_ERROR(&cpi->common.error, cpi->tplist,
1233 vpx_malloc(sizeof(TOKENLIST) * cm->mb_rows));
1234
1235 #if CONFIG_TEMPORAL_DENOISING
1236 if (cpi->oxcf.noise_sensitivity > 0) {
1237 vp8_denoiser_free(&cpi->denoiser);
1238 if (vp8_denoiser_allocate(&cpi->denoiser, width, height, cm->mb_rows,
1239 cm->mb_cols, cpi->oxcf.noise_sensitivity)) {
1240 vpx_internal_error(&cpi->common.error, VPX_CODEC_MEM_ERROR,
1241 "Failed to allocate denoiser");
1242 }
1243 }
1244 #endif
1245 }
1246
1247 /* Quant MOD */
1248 static const int q_trans[] = {
1249 0, 1, 2, 3, 4, 5, 7, 8, 9, 10, 12, 13, 15, 17, 18, 19,
1250 20, 21, 23, 24, 25, 26, 27, 28, 29, 30, 31, 33, 35, 37, 39, 41,
1251 43, 45, 47, 49, 51, 53, 55, 57, 59, 61, 64, 67, 70, 73, 76, 79,
1252 82, 85, 88, 91, 94, 97, 100, 103, 106, 109, 112, 115, 118, 121, 124, 127,
1253 };
1254
vp8_reverse_trans(int x)1255 int vp8_reverse_trans(int x) {
1256 int i;
1257
1258 for (i = 0; i < 64; ++i) {
1259 if (q_trans[i] >= x) return i;
1260 }
1261
1262 return 63;
1263 }
vp8_new_framerate(VP8_COMP * cpi,double framerate)1264 void vp8_new_framerate(VP8_COMP *cpi, double framerate) {
1265 if (framerate < .1) framerate = 30;
1266
1267 cpi->framerate = framerate;
1268 cpi->output_framerate = framerate;
1269 const double per_frame_bandwidth =
1270 round(cpi->oxcf.target_bandwidth / cpi->output_framerate);
1271 cpi->per_frame_bandwidth = (int)VPXMIN(per_frame_bandwidth, INT_MAX);
1272 cpi->av_per_frame_bandwidth = cpi->per_frame_bandwidth;
1273 const int64_t vbr_min_bits = (int64_t)cpi->av_per_frame_bandwidth *
1274 cpi->oxcf.two_pass_vbrmin_section / 100;
1275 cpi->min_frame_bandwidth = (int)VPXMIN(vbr_min_bits, INT_MAX);
1276
1277 /* Set Maximum gf/arf interval */
1278 cpi->max_gf_interval = ((int)(cpi->output_framerate / 2.0) + 2);
1279
1280 if (cpi->max_gf_interval < 12) cpi->max_gf_interval = 12;
1281
1282 /* Extended interval for genuinely static scenes */
1283 cpi->twopass.static_scene_max_gf_interval = cpi->key_frame_frequency >> 1;
1284
1285 /* Special conditions when altr ref frame enabled in lagged compress mode */
1286 if (cpi->oxcf.play_alternate && cpi->oxcf.lag_in_frames) {
1287 if (cpi->max_gf_interval > cpi->oxcf.lag_in_frames - 1) {
1288 cpi->max_gf_interval = cpi->oxcf.lag_in_frames - 1;
1289 }
1290
1291 if (cpi->twopass.static_scene_max_gf_interval >
1292 cpi->oxcf.lag_in_frames - 1) {
1293 cpi->twopass.static_scene_max_gf_interval = cpi->oxcf.lag_in_frames - 1;
1294 }
1295 }
1296
1297 if (cpi->max_gf_interval > cpi->twopass.static_scene_max_gf_interval) {
1298 cpi->max_gf_interval = cpi->twopass.static_scene_max_gf_interval;
1299 }
1300 }
1301
init_config(VP8_COMP * cpi,const VP8_CONFIG * oxcf)1302 static void init_config(VP8_COMP *cpi, const VP8_CONFIG *oxcf) {
1303 VP8_COMMON *cm = &cpi->common;
1304
1305 cpi->oxcf = *oxcf;
1306
1307 cpi->auto_gold = 1;
1308 cpi->auto_adjust_gold_quantizer = 1;
1309
1310 cm->version = oxcf->Version;
1311 vp8_setup_version(cm);
1312
1313 /* Frame rate is not available on the first frame, as it's derived from
1314 * the observed timestamps. The actual value used here doesn't matter
1315 * too much, as it will adapt quickly.
1316 */
1317 if (oxcf->timebase.num > 0) {
1318 cpi->framerate =
1319 (double)(oxcf->timebase.den) / (double)(oxcf->timebase.num);
1320 } else {
1321 cpi->framerate = 30;
1322 }
1323
1324 /* If the reciprocal of the timebase seems like a reasonable framerate,
1325 * then use that as a guess, otherwise use 30.
1326 */
1327 if (cpi->framerate > 180) cpi->framerate = 30;
1328
1329 cpi->ref_framerate = cpi->framerate;
1330
1331 cpi->ref_frame_flags = VP8_ALTR_FRAME | VP8_GOLD_FRAME | VP8_LAST_FRAME;
1332
1333 cm->refresh_golden_frame = 0;
1334 cm->refresh_last_frame = 1;
1335 cm->refresh_entropy_probs = 1;
1336
1337 /* change includes all joint functionality */
1338 vp8_change_config(cpi, oxcf);
1339
1340 /* Initialize active best and worst q and average q values. */
1341 cpi->active_worst_quality = cpi->oxcf.worst_allowed_q;
1342 cpi->active_best_quality = cpi->oxcf.best_allowed_q;
1343 cpi->avg_frame_qindex = cpi->oxcf.worst_allowed_q;
1344
1345 /* Initialise the starting buffer levels */
1346 cpi->buffer_level = cpi->oxcf.starting_buffer_level;
1347 cpi->bits_off_target = cpi->oxcf.starting_buffer_level;
1348
1349 cpi->rolling_target_bits = cpi->av_per_frame_bandwidth;
1350 cpi->rolling_actual_bits = cpi->av_per_frame_bandwidth;
1351 cpi->long_rolling_target_bits = cpi->av_per_frame_bandwidth;
1352 cpi->long_rolling_actual_bits = cpi->av_per_frame_bandwidth;
1353
1354 cpi->total_actual_bits = 0;
1355 cpi->total_target_vs_actual = 0;
1356
1357 /* Temporal scalabilty */
1358 if (cpi->oxcf.number_of_layers > 1) {
1359 unsigned int i;
1360 double prev_layer_framerate = 0;
1361
1362 for (i = 0; i < cpi->oxcf.number_of_layers; ++i) {
1363 vp8_init_temporal_layer_context(cpi, oxcf, i, prev_layer_framerate);
1364 prev_layer_framerate =
1365 cpi->output_framerate / cpi->oxcf.rate_decimator[i];
1366 }
1367 }
1368
1369 #if VP8_TEMPORAL_ALT_REF
1370 {
1371 int i;
1372
1373 cpi->fixed_divide[0] = 0;
1374
1375 for (i = 1; i < 512; ++i) cpi->fixed_divide[i] = 0x80000 / i;
1376 }
1377 #endif
1378 }
1379
vp8_update_layer_contexts(VP8_COMP * cpi)1380 void vp8_update_layer_contexts(VP8_COMP *cpi) {
1381 VP8_CONFIG *oxcf = &cpi->oxcf;
1382
1383 /* Update snapshots of the layer contexts to reflect new parameters */
1384 if (oxcf->number_of_layers > 1) {
1385 unsigned int i;
1386 double prev_layer_framerate = 0;
1387
1388 assert(oxcf->number_of_layers <= VPX_TS_MAX_LAYERS);
1389 for (i = 0; i < oxcf->number_of_layers && i < VPX_TS_MAX_LAYERS; ++i) {
1390 LAYER_CONTEXT *lc = &cpi->layer_context[i];
1391
1392 lc->framerate = cpi->ref_framerate / oxcf->rate_decimator[i];
1393 if (oxcf->target_bitrate[i] > INT_MAX / 1000)
1394 lc->target_bandwidth = INT_MAX;
1395 else
1396 lc->target_bandwidth = oxcf->target_bitrate[i] * 1000;
1397
1398 lc->starting_buffer_level = rescale(
1399 (int)oxcf->starting_buffer_level_in_ms, lc->target_bandwidth, 1000);
1400
1401 if (oxcf->optimal_buffer_level == 0) {
1402 lc->optimal_buffer_level = lc->target_bandwidth / 8;
1403 } else {
1404 lc->optimal_buffer_level = rescale(
1405 (int)oxcf->optimal_buffer_level_in_ms, lc->target_bandwidth, 1000);
1406 }
1407
1408 if (oxcf->maximum_buffer_size == 0) {
1409 lc->maximum_buffer_size = lc->target_bandwidth / 8;
1410 } else {
1411 lc->maximum_buffer_size = rescale((int)oxcf->maximum_buffer_size_in_ms,
1412 lc->target_bandwidth, 1000);
1413 }
1414
1415 /* Work out the average size of a frame within this layer */
1416 if (i > 0) {
1417 lc->avg_frame_size_for_layer =
1418 (int)round((oxcf->target_bitrate[i] - oxcf->target_bitrate[i - 1]) *
1419 1000 / (lc->framerate - prev_layer_framerate));
1420 }
1421
1422 prev_layer_framerate = lc->framerate;
1423 }
1424 }
1425 }
1426
vp8_change_config(VP8_COMP * cpi,const VP8_CONFIG * oxcf)1427 void vp8_change_config(VP8_COMP *cpi, const VP8_CONFIG *oxcf) {
1428 VP8_COMMON *cm = &cpi->common;
1429 int last_w, last_h;
1430 unsigned int prev_number_of_layers;
1431 double raw_target_rate;
1432
1433 if (!cpi) return;
1434
1435 if (!oxcf) return;
1436
1437 if (cm->version != oxcf->Version) {
1438 cm->version = oxcf->Version;
1439 vp8_setup_version(cm);
1440 }
1441
1442 last_w = cpi->oxcf.Width;
1443 last_h = cpi->oxcf.Height;
1444 prev_number_of_layers = cpi->oxcf.number_of_layers;
1445
1446 cpi->oxcf = *oxcf;
1447
1448 switch (cpi->oxcf.Mode) {
1449 case MODE_REALTIME:
1450 cpi->pass = 0;
1451 cpi->compressor_speed = 2;
1452
1453 if (cpi->oxcf.cpu_used < -16) {
1454 cpi->oxcf.cpu_used = -16;
1455 }
1456
1457 if (cpi->oxcf.cpu_used > 16) cpi->oxcf.cpu_used = 16;
1458
1459 break;
1460
1461 case MODE_GOODQUALITY:
1462 cpi->pass = 0;
1463 cpi->compressor_speed = 1;
1464
1465 if (cpi->oxcf.cpu_used < -5) {
1466 cpi->oxcf.cpu_used = -5;
1467 }
1468
1469 if (cpi->oxcf.cpu_used > 5) cpi->oxcf.cpu_used = 5;
1470
1471 break;
1472
1473 case MODE_BESTQUALITY:
1474 cpi->pass = 0;
1475 cpi->compressor_speed = 0;
1476 break;
1477
1478 case MODE_FIRSTPASS:
1479 cpi->pass = 1;
1480 cpi->compressor_speed = 1;
1481 break;
1482 case MODE_SECONDPASS:
1483 cpi->pass = 2;
1484 cpi->compressor_speed = 1;
1485
1486 if (cpi->oxcf.cpu_used < -5) {
1487 cpi->oxcf.cpu_used = -5;
1488 }
1489
1490 if (cpi->oxcf.cpu_used > 5) cpi->oxcf.cpu_used = 5;
1491
1492 break;
1493 case MODE_SECONDPASS_BEST:
1494 cpi->pass = 2;
1495 cpi->compressor_speed = 0;
1496 break;
1497 }
1498
1499 if (cpi->pass == 0) cpi->auto_worst_q = 1;
1500
1501 cpi->oxcf.worst_allowed_q = q_trans[oxcf->worst_allowed_q];
1502 cpi->oxcf.best_allowed_q = q_trans[oxcf->best_allowed_q];
1503 cpi->oxcf.cq_level = q_trans[cpi->oxcf.cq_level];
1504
1505 if (oxcf->fixed_q >= 0) {
1506 if (oxcf->worst_allowed_q < 0) {
1507 cpi->oxcf.fixed_q = q_trans[0];
1508 } else {
1509 cpi->oxcf.fixed_q = q_trans[oxcf->worst_allowed_q];
1510 }
1511
1512 if (oxcf->alt_q < 0) {
1513 cpi->oxcf.alt_q = q_trans[0];
1514 } else {
1515 cpi->oxcf.alt_q = q_trans[oxcf->alt_q];
1516 }
1517
1518 if (oxcf->key_q < 0) {
1519 cpi->oxcf.key_q = q_trans[0];
1520 } else {
1521 cpi->oxcf.key_q = q_trans[oxcf->key_q];
1522 }
1523
1524 if (oxcf->gold_q < 0) {
1525 cpi->oxcf.gold_q = q_trans[0];
1526 } else {
1527 cpi->oxcf.gold_q = q_trans[oxcf->gold_q];
1528 }
1529 }
1530
1531 cpi->ext_refresh_frame_flags_pending = 0;
1532
1533 cpi->baseline_gf_interval =
1534 cpi->oxcf.alt_freq ? cpi->oxcf.alt_freq : DEFAULT_GF_INTERVAL;
1535
1536 // GF behavior for 1 pass CBR, used when error_resilience is off.
1537 if (!cpi->oxcf.error_resilient_mode &&
1538 cpi->oxcf.end_usage == USAGE_STREAM_FROM_SERVER &&
1539 cpi->oxcf.Mode == MODE_REALTIME)
1540 cpi->baseline_gf_interval = cpi->gf_interval_onepass_cbr;
1541
1542 #if (CONFIG_REALTIME_ONLY & CONFIG_ONTHEFLY_BITPACKING)
1543 cpi->oxcf.token_partitions = 3;
1544 #endif
1545
1546 if (cpi->oxcf.token_partitions >= 0 && cpi->oxcf.token_partitions <= 3) {
1547 cm->multi_token_partition = (TOKEN_PARTITION)cpi->oxcf.token_partitions;
1548 }
1549
1550 setup_features(cpi);
1551
1552 if (!cpi->use_roi_static_threshold) {
1553 int i;
1554 for (i = 0; i < MAX_MB_SEGMENTS; ++i) {
1555 cpi->segment_encode_breakout[i] = cpi->oxcf.encode_breakout;
1556 }
1557 }
1558
1559 /* At the moment the first order values may not be > MAXQ */
1560 if (cpi->oxcf.fixed_q > MAXQ) cpi->oxcf.fixed_q = MAXQ;
1561
1562 /* local file playback mode == really big buffer */
1563 if (cpi->oxcf.end_usage == USAGE_LOCAL_FILE_PLAYBACK) {
1564 cpi->oxcf.starting_buffer_level = 60000;
1565 cpi->oxcf.optimal_buffer_level = 60000;
1566 cpi->oxcf.maximum_buffer_size = 240000;
1567 cpi->oxcf.starting_buffer_level_in_ms = 60000;
1568 cpi->oxcf.optimal_buffer_level_in_ms = 60000;
1569 cpi->oxcf.maximum_buffer_size_in_ms = 240000;
1570 }
1571
1572 raw_target_rate = ((int64_t)cpi->oxcf.Width * cpi->oxcf.Height * 8 * 3 *
1573 cpi->framerate / 1000.0);
1574 if (cpi->oxcf.target_bandwidth > raw_target_rate)
1575 cpi->oxcf.target_bandwidth = (unsigned int)raw_target_rate;
1576 /* Convert target bandwidth from Kbit/s to Bit/s */
1577 cpi->oxcf.target_bandwidth *= 1000;
1578
1579 cpi->oxcf.starting_buffer_level = rescale(
1580 (int)cpi->oxcf.starting_buffer_level, cpi->oxcf.target_bandwidth, 1000);
1581
1582 /* Set or reset optimal and maximum buffer levels. */
1583 if (cpi->oxcf.optimal_buffer_level == 0) {
1584 cpi->oxcf.optimal_buffer_level = cpi->oxcf.target_bandwidth / 8;
1585 } else {
1586 cpi->oxcf.optimal_buffer_level = rescale(
1587 (int)cpi->oxcf.optimal_buffer_level, cpi->oxcf.target_bandwidth, 1000);
1588 }
1589
1590 if (cpi->oxcf.maximum_buffer_size == 0) {
1591 cpi->oxcf.maximum_buffer_size = cpi->oxcf.target_bandwidth / 8;
1592 } else {
1593 cpi->oxcf.maximum_buffer_size = rescale((int)cpi->oxcf.maximum_buffer_size,
1594 cpi->oxcf.target_bandwidth, 1000);
1595 }
1596 // Under a configuration change, where maximum_buffer_size may change,
1597 // keep buffer level clipped to the maximum allowed buffer size.
1598 if (cpi->bits_off_target > cpi->oxcf.maximum_buffer_size) {
1599 cpi->bits_off_target = cpi->oxcf.maximum_buffer_size;
1600 cpi->buffer_level = cpi->bits_off_target;
1601 }
1602
1603 /* Set up frame rate and related parameters rate control values. */
1604 vp8_new_framerate(cpi, cpi->framerate);
1605
1606 /* Set absolute upper and lower quality limits */
1607 cpi->worst_quality = cpi->oxcf.worst_allowed_q;
1608 cpi->best_quality = cpi->oxcf.best_allowed_q;
1609
1610 /* active values should only be modified if out of new range */
1611 if (cpi->active_worst_quality > cpi->oxcf.worst_allowed_q) {
1612 cpi->active_worst_quality = cpi->oxcf.worst_allowed_q;
1613 }
1614 /* less likely */
1615 else if (cpi->active_worst_quality < cpi->oxcf.best_allowed_q) {
1616 cpi->active_worst_quality = cpi->oxcf.best_allowed_q;
1617 }
1618 if (cpi->active_best_quality < cpi->oxcf.best_allowed_q) {
1619 cpi->active_best_quality = cpi->oxcf.best_allowed_q;
1620 }
1621 /* less likely */
1622 else if (cpi->active_best_quality > cpi->oxcf.worst_allowed_q) {
1623 cpi->active_best_quality = cpi->oxcf.worst_allowed_q;
1624 }
1625
1626 cpi->buffered_mode = cpi->oxcf.optimal_buffer_level > 0;
1627
1628 cpi->cq_target_quality = cpi->oxcf.cq_level;
1629
1630 /* Only allow dropped frames in buffered mode */
1631 cpi->drop_frames_allowed = cpi->oxcf.allow_df && cpi->buffered_mode;
1632
1633 cpi->target_bandwidth = cpi->oxcf.target_bandwidth;
1634
1635 // Check if the number of temporal layers has changed, and if so reset the
1636 // pattern counter and set/initialize the temporal layer context for the
1637 // new layer configuration.
1638 if (cpi->oxcf.number_of_layers != prev_number_of_layers) {
1639 // If the number of temporal layers are changed we must start at the
1640 // base of the pattern cycle, so set the layer id to 0 and reset
1641 // the temporal pattern counter.
1642 if (cpi->temporal_layer_id > 0) {
1643 cpi->temporal_layer_id = 0;
1644 }
1645 cpi->temporal_pattern_counter = 0;
1646 vp8_reset_temporal_layer_change(cpi, oxcf, prev_number_of_layers);
1647 }
1648
1649 if (!cpi->initial_width) {
1650 cpi->initial_width = cpi->oxcf.Width;
1651 cpi->initial_height = cpi->oxcf.Height;
1652 }
1653
1654 cm->Width = cpi->oxcf.Width;
1655 cm->Height = cpi->oxcf.Height;
1656 assert(cm->Width <= cpi->initial_width);
1657 assert(cm->Height <= cpi->initial_height);
1658
1659 /* TODO(jkoleszar): if an internal spatial resampling is active,
1660 * and we downsize the input image, maybe we should clear the
1661 * internal scale immediately rather than waiting for it to
1662 * correct.
1663 */
1664
1665 /* VP8 sharpness level mapping 0-7 (vs 0-10 in general VPx dialogs) */
1666 if (cpi->oxcf.Sharpness > 7) cpi->oxcf.Sharpness = 7;
1667
1668 cm->sharpness_level = cpi->oxcf.Sharpness;
1669
1670 if (cm->horiz_scale != VP8E_NORMAL || cm->vert_scale != VP8E_NORMAL) {
1671 int hr, hs, vr, vs;
1672
1673 Scale2Ratio(cm->horiz_scale, &hr, &hs);
1674 Scale2Ratio(cm->vert_scale, &vr, &vs);
1675
1676 /* always go to the next whole number */
1677 cm->Width = (hs - 1 + cpi->oxcf.Width * hr) / hs;
1678 cm->Height = (vs - 1 + cpi->oxcf.Height * vr) / vs;
1679 }
1680
1681 if (last_w != cpi->oxcf.Width || last_h != cpi->oxcf.Height) {
1682 cpi->force_next_frame_intra = 1;
1683 }
1684
1685 if (((cm->Width + 15) & ~15) != cm->yv12_fb[cm->lst_fb_idx].y_width ||
1686 ((cm->Height + 15) & ~15) != cm->yv12_fb[cm->lst_fb_idx].y_height ||
1687 cm->yv12_fb[cm->lst_fb_idx].y_width == 0) {
1688 dealloc_raw_frame_buffers(cpi);
1689 alloc_raw_frame_buffers(cpi);
1690 vp8_alloc_compressor_data(cpi);
1691 }
1692
1693 if (cpi->oxcf.fixed_q >= 0) {
1694 cpi->last_q[0] = cpi->oxcf.fixed_q;
1695 cpi->last_q[1] = cpi->oxcf.fixed_q;
1696 }
1697
1698 cpi->Speed = cpi->oxcf.cpu_used;
1699
1700 /* force to allowlag to 0 if lag_in_frames is 0; */
1701 if (cpi->oxcf.lag_in_frames == 0) {
1702 cpi->oxcf.allow_lag = 0;
1703 }
1704 /* Limit on lag buffers as these are not currently dynamically allocated */
1705 else if (cpi->oxcf.lag_in_frames > MAX_LAG_BUFFERS) {
1706 cpi->oxcf.lag_in_frames = MAX_LAG_BUFFERS;
1707 }
1708
1709 /* YX Temp */
1710 cpi->alt_ref_source = NULL;
1711 cpi->is_src_frame_alt_ref = 0;
1712
1713 #if CONFIG_TEMPORAL_DENOISING
1714 if (cpi->oxcf.noise_sensitivity) {
1715 if (!cpi->denoiser.yv12_mc_running_avg.buffer_alloc) {
1716 int width = (cpi->oxcf.Width + 15) & ~15;
1717 int height = (cpi->oxcf.Height + 15) & ~15;
1718 if (vp8_denoiser_allocate(&cpi->denoiser, width, height, cm->mb_rows,
1719 cm->mb_cols, cpi->oxcf.noise_sensitivity)) {
1720 vpx_internal_error(&cpi->common.error, VPX_CODEC_MEM_ERROR,
1721 "Failed to allocate denoiser");
1722 }
1723 }
1724 }
1725 #endif
1726
1727 #if 0
1728 /* Experimental RD Code */
1729 cpi->frame_distortion = 0;
1730 cpi->last_frame_distortion = 0;
1731 #endif
1732 }
1733
1734 #ifndef M_LOG2_E
1735 #define M_LOG2_E 0.693147180559945309417
1736 #endif
1737 #define log2f(x) (log(x) / (float)M_LOG2_E)
1738
cal_mvsadcosts(int * mvsadcost[2])1739 static void cal_mvsadcosts(int *mvsadcost[2]) {
1740 int i = 1;
1741
1742 mvsadcost[0][0] = 300;
1743 mvsadcost[1][0] = 300;
1744
1745 do {
1746 double z = 256 * (2 * (log2f(8 * i) + .6));
1747 mvsadcost[0][i] = (int)z;
1748 mvsadcost[1][i] = (int)z;
1749 mvsadcost[0][-i] = (int)z;
1750 mvsadcost[1][-i] = (int)z;
1751 } while (++i <= mvfp_max);
1752 }
1753
vp8_create_compressor(const VP8_CONFIG * oxcf)1754 struct VP8_COMP *vp8_create_compressor(const VP8_CONFIG *oxcf) {
1755 int i;
1756
1757 VP8_COMP *cpi;
1758 VP8_COMMON *cm;
1759
1760 cpi = vpx_memalign(32, sizeof(VP8_COMP));
1761 /* Check that the CPI instance is valid */
1762 if (!cpi) return 0;
1763
1764 cm = &cpi->common;
1765
1766 memset(cpi, 0, sizeof(VP8_COMP));
1767
1768 if (setjmp(cm->error.jmp)) {
1769 cpi->common.error.setjmp = 0;
1770 vp8_remove_compressor(&cpi);
1771 return 0;
1772 }
1773
1774 cpi->common.error.setjmp = 1;
1775
1776 CHECK_MEM_ERROR(
1777 &cpi->common.error, cpi->mb.ss,
1778 vpx_calloc(sizeof(search_site), (MAX_MVSEARCH_STEPS * 8) + 1));
1779
1780 vp8_create_common(&cpi->common);
1781
1782 init_config(cpi, oxcf);
1783
1784 memcpy(cpi->base_skip_false_prob, vp8cx_base_skip_false_prob,
1785 sizeof(vp8cx_base_skip_false_prob));
1786 cpi->common.current_video_frame = 0;
1787 cpi->temporal_pattern_counter = 0;
1788 cpi->temporal_layer_id = -1;
1789 cpi->kf_overspend_bits = 0;
1790 cpi->kf_bitrate_adjustment = 0;
1791 cpi->frames_till_gf_update_due = 0;
1792 cpi->gf_overspend_bits = 0;
1793 cpi->non_gf_bitrate_adjustment = 0;
1794 cpi->prob_last_coded = 128;
1795 cpi->prob_gf_coded = 128;
1796 cpi->prob_intra_coded = 63;
1797
1798 /* Prime the recent reference frame usage counters.
1799 * Hereafter they will be maintained as a sort of moving average
1800 */
1801 cpi->recent_ref_frame_usage[INTRA_FRAME] = 1;
1802 cpi->recent_ref_frame_usage[LAST_FRAME] = 1;
1803 cpi->recent_ref_frame_usage[GOLDEN_FRAME] = 1;
1804 cpi->recent_ref_frame_usage[ALTREF_FRAME] = 1;
1805
1806 /* Set reference frame sign bias for ALTREF frame to 1 (for now) */
1807 cpi->common.ref_frame_sign_bias[ALTREF_FRAME] = 1;
1808
1809 cpi->twopass.gf_decay_rate = 0;
1810 cpi->baseline_gf_interval = DEFAULT_GF_INTERVAL;
1811
1812 cpi->gold_is_last = 0;
1813 cpi->alt_is_last = 0;
1814 cpi->gold_is_alt = 0;
1815
1816 cpi->active_map_enabled = 0;
1817
1818 cpi->use_roi_static_threshold = 0;
1819
1820 #if 0
1821 /* Experimental code for lagged and one pass */
1822 /* Initialise one_pass GF frames stats */
1823 /* Update stats used for GF selection */
1824 if (cpi->pass == 0)
1825 {
1826 cpi->one_pass_frame_index = 0;
1827
1828 for (i = 0; i < MAX_LAG_BUFFERS; ++i)
1829 {
1830 cpi->one_pass_frame_stats[i].frames_so_far = 0;
1831 cpi->one_pass_frame_stats[i].frame_intra_error = 0.0;
1832 cpi->one_pass_frame_stats[i].frame_coded_error = 0.0;
1833 cpi->one_pass_frame_stats[i].frame_pcnt_inter = 0.0;
1834 cpi->one_pass_frame_stats[i].frame_pcnt_motion = 0.0;
1835 cpi->one_pass_frame_stats[i].frame_mvr = 0.0;
1836 cpi->one_pass_frame_stats[i].frame_mvr_abs = 0.0;
1837 cpi->one_pass_frame_stats[i].frame_mvc = 0.0;
1838 cpi->one_pass_frame_stats[i].frame_mvc_abs = 0.0;
1839 }
1840 }
1841 #endif
1842
1843 cpi->mse_source_denoised = 0;
1844
1845 /* Should we use the cyclic refresh method.
1846 * Currently there is no external control for this.
1847 * Enable it for error_resilient_mode, or for 1 pass CBR mode.
1848 */
1849 cpi->cyclic_refresh_mode_enabled =
1850 (cpi->oxcf.error_resilient_mode ||
1851 (cpi->oxcf.end_usage == USAGE_STREAM_FROM_SERVER &&
1852 cpi->oxcf.Mode <= 2));
1853 cpi->cyclic_refresh_mode_max_mbs_perframe =
1854 (cpi->common.mb_rows * cpi->common.mb_cols) / 7;
1855 if (cpi->oxcf.number_of_layers == 1) {
1856 cpi->cyclic_refresh_mode_max_mbs_perframe =
1857 (cpi->common.mb_rows * cpi->common.mb_cols) / 20;
1858 } else if (cpi->oxcf.number_of_layers == 2) {
1859 cpi->cyclic_refresh_mode_max_mbs_perframe =
1860 (cpi->common.mb_rows * cpi->common.mb_cols) / 10;
1861 }
1862 cpi->cyclic_refresh_mode_index = 0;
1863 cpi->cyclic_refresh_q = 32;
1864
1865 // GF behavior for 1 pass CBR, used when error_resilience is off.
1866 cpi->gf_update_onepass_cbr = 0;
1867 cpi->gf_noboost_onepass_cbr = 0;
1868 if (!cpi->oxcf.error_resilient_mode &&
1869 cpi->oxcf.end_usage == USAGE_STREAM_FROM_SERVER && cpi->oxcf.Mode <= 2) {
1870 cpi->gf_update_onepass_cbr = 1;
1871 cpi->gf_noboost_onepass_cbr = 1;
1872 cpi->gf_interval_onepass_cbr =
1873 cpi->cyclic_refresh_mode_max_mbs_perframe > 0
1874 ? (2 * (cpi->common.mb_rows * cpi->common.mb_cols) /
1875 cpi->cyclic_refresh_mode_max_mbs_perframe)
1876 : 10;
1877 cpi->gf_interval_onepass_cbr =
1878 VPXMIN(40, VPXMAX(6, cpi->gf_interval_onepass_cbr));
1879 cpi->baseline_gf_interval = cpi->gf_interval_onepass_cbr;
1880 }
1881
1882 if (cpi->cyclic_refresh_mode_enabled) {
1883 CHECK_MEM_ERROR(&cpi->common.error, cpi->cyclic_refresh_map,
1884 vpx_calloc((cpi->common.mb_rows * cpi->common.mb_cols), 1));
1885 } else {
1886 cpi->cyclic_refresh_map = (signed char *)NULL;
1887 }
1888
1889 CHECK_MEM_ERROR(
1890 &cpi->common.error, cpi->skin_map,
1891 vpx_calloc(cm->mb_rows * cm->mb_cols, sizeof(cpi->skin_map[0])));
1892
1893 CHECK_MEM_ERROR(&cpi->common.error, cpi->consec_zero_last,
1894 vpx_calloc(cm->mb_rows * cm->mb_cols, 1));
1895 CHECK_MEM_ERROR(&cpi->common.error, cpi->consec_zero_last_mvbias,
1896 vpx_calloc((cpi->common.mb_rows * cpi->common.mb_cols), 1));
1897
1898 /*Initialize the feed-forward activity masking.*/
1899 cpi->activity_avg = 90 << 12;
1900
1901 /* Give a sensible default for the first frame. */
1902 cpi->frames_since_key = 8;
1903 cpi->key_frame_frequency = cpi->oxcf.key_freq;
1904 cpi->this_key_frame_forced = 0;
1905 cpi->next_key_frame_forced = 0;
1906
1907 cpi->source_alt_ref_pending = 0;
1908 cpi->source_alt_ref_active = 0;
1909 cpi->common.refresh_alt_ref_frame = 0;
1910
1911 cpi->force_maxqp = 0;
1912 cpi->frames_since_last_drop_overshoot = 0;
1913 cpi->rt_always_update_correction_factor = 0;
1914 cpi->rt_drop_recode_on_overshoot = 1;
1915
1916 cpi->b_calculate_psnr = CONFIG_INTERNAL_STATS;
1917 #if CONFIG_INTERNAL_STATS
1918 cpi->b_calculate_ssimg = 0;
1919
1920 cpi->count = 0;
1921 cpi->bytes = 0;
1922
1923 if (cpi->b_calculate_psnr) {
1924 cpi->total_sq_error = 0.0;
1925 cpi->total_sq_error2 = 0.0;
1926 cpi->total_y = 0.0;
1927 cpi->total_u = 0.0;
1928 cpi->total_v = 0.0;
1929 cpi->total = 0.0;
1930 cpi->totalp_y = 0.0;
1931 cpi->totalp_u = 0.0;
1932 cpi->totalp_v = 0.0;
1933 cpi->totalp = 0.0;
1934 cpi->tot_recode_hits = 0;
1935 cpi->summed_quality = 0;
1936 cpi->summed_weights = 0;
1937 }
1938
1939 #endif
1940
1941 cpi->first_time_stamp_ever = 0x7FFFFFFF;
1942
1943 cpi->frames_till_gf_update_due = 0;
1944 cpi->key_frame_count = 1;
1945
1946 cpi->ni_av_qi = cpi->oxcf.worst_allowed_q;
1947 cpi->ni_tot_qi = 0;
1948 cpi->ni_frames = 0;
1949 cpi->total_byte_count = 0;
1950
1951 cpi->drop_frame = 0;
1952
1953 cpi->rate_correction_factor = 1.0;
1954 cpi->key_frame_rate_correction_factor = 1.0;
1955 cpi->gf_rate_correction_factor = 1.0;
1956 cpi->twopass.est_max_qcorrection_factor = 1.0;
1957
1958 for (i = 0; i < KEY_FRAME_CONTEXT; ++i) {
1959 cpi->prior_key_frame_distance[i] = (int)cpi->output_framerate;
1960 }
1961
1962 #ifdef OUTPUT_YUV_SRC
1963 yuv_file = fopen("bd.yuv", "ab");
1964 #endif
1965 #ifdef OUTPUT_YUV_DENOISED
1966 yuv_denoised_file = fopen("denoised.yuv", "ab");
1967 #endif
1968 #ifdef OUTPUT_YUV_SKINMAP
1969 yuv_skinmap_file = fopen("skinmap.yuv", "wb");
1970 #endif
1971
1972 #if 0
1973 framepsnr = fopen("framepsnr.stt", "a");
1974 kf_list = fopen("kf_list.stt", "w");
1975 #endif
1976
1977 cpi->output_pkt_list = oxcf->output_pkt_list;
1978
1979 #if !CONFIG_REALTIME_ONLY
1980
1981 if (cpi->pass == 1) {
1982 vp8_init_first_pass(cpi);
1983 } else if (cpi->pass == 2) {
1984 size_t packet_sz = sizeof(FIRSTPASS_STATS);
1985 int packets = (int)(oxcf->two_pass_stats_in.sz / packet_sz);
1986
1987 cpi->twopass.stats_in_start = oxcf->two_pass_stats_in.buf;
1988 cpi->twopass.stats_in = cpi->twopass.stats_in_start;
1989 cpi->twopass.stats_in_end =
1990 (void *)((char *)cpi->twopass.stats_in + (packets - 1) * packet_sz);
1991 vp8_init_second_pass(cpi);
1992 }
1993
1994 #endif
1995
1996 if (cpi->compressor_speed == 2) {
1997 cpi->avg_encode_time = 0;
1998 cpi->avg_pick_mode_time = 0;
1999 }
2000
2001 vp8_set_speed_features(cpi);
2002
2003 /* Set starting values of RD threshold multipliers (128 = *1) */
2004 for (i = 0; i < MAX_MODES; ++i) {
2005 cpi->mb.rd_thresh_mult[i] = 128;
2006 }
2007
2008 #if CONFIG_MULTITHREAD
2009 if (vp8cx_create_encoder_threads(cpi)) {
2010 cpi->common.error.setjmp = 0;
2011 vp8_remove_compressor(&cpi);
2012 return 0;
2013 }
2014 #endif
2015
2016 cpi->fn_ptr[BLOCK_16X16].sdf = vpx_sad16x16;
2017 cpi->fn_ptr[BLOCK_16X16].vf = vpx_variance16x16;
2018 cpi->fn_ptr[BLOCK_16X16].svf = vpx_sub_pixel_variance16x16;
2019 cpi->fn_ptr[BLOCK_16X16].sdx4df = vpx_sad16x16x4d;
2020
2021 cpi->fn_ptr[BLOCK_16X8].sdf = vpx_sad16x8;
2022 cpi->fn_ptr[BLOCK_16X8].vf = vpx_variance16x8;
2023 cpi->fn_ptr[BLOCK_16X8].svf = vpx_sub_pixel_variance16x8;
2024 cpi->fn_ptr[BLOCK_16X8].sdx4df = vpx_sad16x8x4d;
2025
2026 cpi->fn_ptr[BLOCK_8X16].sdf = vpx_sad8x16;
2027 cpi->fn_ptr[BLOCK_8X16].vf = vpx_variance8x16;
2028 cpi->fn_ptr[BLOCK_8X16].svf = vpx_sub_pixel_variance8x16;
2029 cpi->fn_ptr[BLOCK_8X16].sdx4df = vpx_sad8x16x4d;
2030
2031 cpi->fn_ptr[BLOCK_8X8].sdf = vpx_sad8x8;
2032 cpi->fn_ptr[BLOCK_8X8].vf = vpx_variance8x8;
2033 cpi->fn_ptr[BLOCK_8X8].svf = vpx_sub_pixel_variance8x8;
2034 cpi->fn_ptr[BLOCK_8X8].sdx4df = vpx_sad8x8x4d;
2035
2036 cpi->fn_ptr[BLOCK_4X4].sdf = vpx_sad4x4;
2037 cpi->fn_ptr[BLOCK_4X4].vf = vpx_variance4x4;
2038 cpi->fn_ptr[BLOCK_4X4].svf = vpx_sub_pixel_variance4x4;
2039 cpi->fn_ptr[BLOCK_4X4].sdx4df = vpx_sad4x4x4d;
2040
2041 #if VPX_ARCH_X86 || VPX_ARCH_X86_64
2042 cpi->fn_ptr[BLOCK_16X16].copymem = vp8_copy32xn;
2043 cpi->fn_ptr[BLOCK_16X8].copymem = vp8_copy32xn;
2044 cpi->fn_ptr[BLOCK_8X16].copymem = vp8_copy32xn;
2045 cpi->fn_ptr[BLOCK_8X8].copymem = vp8_copy32xn;
2046 cpi->fn_ptr[BLOCK_4X4].copymem = vp8_copy32xn;
2047 #endif
2048
2049 cpi->diamond_search_sad = vp8_diamond_search_sad;
2050 cpi->refining_search_sad = vp8_refining_search_sad;
2051
2052 /* make sure frame 1 is okay */
2053 cpi->mb.error_bins[0] = cpi->common.MBs;
2054
2055 /* vp8cx_init_quantizer() is first called here. Add check in
2056 * vp8cx_frame_init_quantizer() so that vp8cx_init_quantizer is only
2057 * called later when needed. This will avoid unnecessary calls of
2058 * vp8cx_init_quantizer() for every frame.
2059 */
2060 vp8cx_init_quantizer(cpi);
2061
2062 vp8_loop_filter_init(cm);
2063
2064 #if CONFIG_MULTI_RES_ENCODING
2065
2066 /* Calculate # of MBs in a row in lower-resolution level image. */
2067 if (cpi->oxcf.mr_encoder_id > 0) vp8_cal_low_res_mb_cols(cpi);
2068
2069 #endif
2070
2071 /* setup RD costs to MACROBLOCK struct */
2072
2073 cpi->mb.mvcost[0] = &cpi->rd_costs.mvcosts[0][mv_max + 1];
2074 cpi->mb.mvcost[1] = &cpi->rd_costs.mvcosts[1][mv_max + 1];
2075 cpi->mb.mvsadcost[0] = &cpi->rd_costs.mvsadcosts[0][mvfp_max + 1];
2076 cpi->mb.mvsadcost[1] = &cpi->rd_costs.mvsadcosts[1][mvfp_max + 1];
2077
2078 cal_mvsadcosts(cpi->mb.mvsadcost);
2079
2080 cpi->mb.mbmode_cost = cpi->rd_costs.mbmode_cost;
2081 cpi->mb.intra_uv_mode_cost = cpi->rd_costs.intra_uv_mode_cost;
2082 cpi->mb.bmode_costs = cpi->rd_costs.bmode_costs;
2083 cpi->mb.inter_bmode_costs = cpi->rd_costs.inter_bmode_costs;
2084 cpi->mb.token_costs = cpi->rd_costs.token_costs;
2085
2086 /* setup block ptrs & offsets */
2087 vp8_setup_block_ptrs(&cpi->mb);
2088 vp8_setup_block_dptrs(&cpi->mb.e_mbd);
2089
2090 cpi->common.error.setjmp = 0;
2091
2092 return cpi;
2093 }
2094
vp8_remove_compressor(VP8_COMP ** comp)2095 void vp8_remove_compressor(VP8_COMP **comp) {
2096 VP8_COMP *cpi = *comp;
2097
2098 if (!cpi) return;
2099
2100 if (cpi && (cpi->common.current_video_frame > 0)) {
2101 #if !CONFIG_REALTIME_ONLY
2102
2103 if (cpi->pass == 2) {
2104 vp8_end_second_pass(cpi);
2105 }
2106
2107 #endif
2108
2109 #if CONFIG_INTERNAL_STATS
2110
2111 if (cpi->pass != 1) {
2112 FILE *f = fopen("opsnr.stt", "a");
2113 double time_encoded =
2114 (cpi->last_end_time_stamp_seen - cpi->first_time_stamp_ever) /
2115 10000000.000;
2116
2117 if (cpi->b_calculate_psnr) {
2118 if (cpi->oxcf.number_of_layers > 1) {
2119 int i;
2120
2121 fprintf(f,
2122 "Layer\tBitrate\tAVGPsnr\tGLBPsnr\tAVPsnrP\t"
2123 "GLPsnrP\tVPXSSIM\n");
2124 for (i = 0; i < (int)cpi->oxcf.number_of_layers; ++i) {
2125 double dr =
2126 (double)cpi->bytes_in_layer[i] * 8.0 / 1000.0 / time_encoded;
2127 double samples = 3.0 / 2 * cpi->frames_in_layer[i] *
2128 cpi->common.Width * cpi->common.Height;
2129 double total_psnr =
2130 vpx_sse_to_psnr(samples, 255.0, cpi->total_error2[i]);
2131 double total_psnr2 =
2132 vpx_sse_to_psnr(samples, 255.0, cpi->total_error2_p[i]);
2133 double total_ssim =
2134 100 * pow(cpi->sum_ssim[i] / cpi->sum_weights[i], 8.0);
2135
2136 fprintf(f,
2137 "%5d\t%7.3f\t%7.3f\t%7.3f\t%7.3f\t"
2138 "%7.3f\t%7.3f\n",
2139 i, dr, cpi->sum_psnr[i] / cpi->frames_in_layer[i],
2140 total_psnr, cpi->sum_psnr_p[i] / cpi->frames_in_layer[i],
2141 total_psnr2, total_ssim);
2142 }
2143 } else {
2144 double dr = (double)cpi->bytes * 8.0 / 1000.0 / time_encoded;
2145 double samples =
2146 3.0 / 2 * cpi->count * cpi->common.Width * cpi->common.Height;
2147 double total_psnr =
2148 vpx_sse_to_psnr(samples, 255.0, cpi->total_sq_error);
2149 double total_psnr2 =
2150 vpx_sse_to_psnr(samples, 255.0, cpi->total_sq_error2);
2151 double total_ssim =
2152 100 * pow(cpi->summed_quality / cpi->summed_weights, 8.0);
2153
2154 fprintf(f,
2155 "Bitrate\tAVGPsnr\tGLBPsnr\tAVPsnrP\t"
2156 "GLPsnrP\tVPXSSIM\n");
2157 fprintf(f,
2158 "%7.3f\t%7.3f\t%7.3f\t%7.3f\t%7.3f\t"
2159 "%7.3f\n",
2160 dr, cpi->total / cpi->count, total_psnr,
2161 cpi->totalp / cpi->count, total_psnr2, total_ssim);
2162 }
2163 }
2164 fclose(f);
2165 #if 0
2166 f = fopen("qskip.stt", "a");
2167 fprintf(f, "minq:%d -maxq:%d skiptrue:skipfalse = %d:%d\n", cpi->oxcf.best_allowed_q, cpi->oxcf.worst_allowed_q, skiptruecount, skipfalsecount);
2168 fclose(f);
2169 #endif
2170 }
2171
2172 #endif
2173
2174 #ifdef SPEEDSTATS
2175
2176 if (cpi->compressor_speed == 2) {
2177 int i;
2178 FILE *f = fopen("cxspeed.stt", "a");
2179 cnt_pm /= cpi->common.MBs;
2180
2181 for (i = 0; i < 16; ++i) fprintf(f, "%5d", frames_at_speed[i]);
2182
2183 fprintf(f, "\n");
2184 fclose(f);
2185 }
2186
2187 #endif
2188
2189 #ifdef MODE_STATS
2190 {
2191 extern int count_mb_seg[4];
2192 FILE *f = fopen("modes.stt", "a");
2193 double dr = cpi->framerate * (double)bytes * (double)8 / (double)count /
2194 (double)1000;
2195 fprintf(f, "intra_mode in Intra Frames:\n");
2196 fprintf(f, "Y: %8d, %8d, %8d, %8d, %8d\n", y_modes[0], y_modes[1],
2197 y_modes[2], y_modes[3], y_modes[4]);
2198 fprintf(f, "UV:%8d, %8d, %8d, %8d\n", uv_modes[0], uv_modes[1],
2199 uv_modes[2], uv_modes[3]);
2200 fprintf(f, "B: ");
2201 {
2202 int i;
2203
2204 for (i = 0; i < 10; ++i) fprintf(f, "%8d, ", b_modes[i]);
2205
2206 fprintf(f, "\n");
2207 }
2208
2209 fprintf(f, "Modes in Inter Frames:\n");
2210 fprintf(f, "Y: %8d, %8d, %8d, %8d, %8d, %8d, %8d, %8d, %8d, %8d\n",
2211 inter_y_modes[0], inter_y_modes[1], inter_y_modes[2],
2212 inter_y_modes[3], inter_y_modes[4], inter_y_modes[5],
2213 inter_y_modes[6], inter_y_modes[7], inter_y_modes[8],
2214 inter_y_modes[9]);
2215 fprintf(f, "UV:%8d, %8d, %8d, %8d\n", inter_uv_modes[0],
2216 inter_uv_modes[1], inter_uv_modes[2], inter_uv_modes[3]);
2217 fprintf(f, "B: ");
2218 {
2219 int i;
2220
2221 for (i = 0; i < 15; ++i) fprintf(f, "%8d, ", inter_b_modes[i]);
2222
2223 fprintf(f, "\n");
2224 }
2225 fprintf(f, "P:%8d, %8d, %8d, %8d\n", count_mb_seg[0], count_mb_seg[1],
2226 count_mb_seg[2], count_mb_seg[3]);
2227 fprintf(f, "PB:%8d, %8d, %8d, %8d\n", inter_b_modes[LEFT4X4],
2228 inter_b_modes[ABOVE4X4], inter_b_modes[ZERO4X4],
2229 inter_b_modes[NEW4X4]);
2230
2231 fclose(f);
2232 }
2233 #endif
2234
2235 #if defined(SECTIONBITS_OUTPUT)
2236
2237 if (0) {
2238 int i;
2239 FILE *f = fopen("tokenbits.stt", "a");
2240
2241 for (i = 0; i < 28; ++i) fprintf(f, "%8d", (int)(Sectionbits[i] / 256));
2242
2243 fprintf(f, "\n");
2244 fclose(f);
2245 }
2246
2247 #endif
2248
2249 #if 0
2250 {
2251 printf("\n_pick_loop_filter_level:%d\n", cpi->time_pick_lpf / 1000);
2252 printf("\n_frames receive_data encod_mb_row compress_frame Total\n");
2253 printf("%6d %10ld %10ld %10ld %10ld\n", cpi->common.current_video_frame, cpi->time_receive_data / 1000, cpi->time_encode_mb_row / 1000, cpi->time_compress_data / 1000, (cpi->time_receive_data + cpi->time_compress_data) / 1000);
2254 }
2255 #endif
2256 }
2257
2258 #if CONFIG_MULTITHREAD
2259 vp8cx_remove_encoder_threads(cpi);
2260 #endif
2261
2262 #if CONFIG_TEMPORAL_DENOISING
2263 vp8_denoiser_free(&cpi->denoiser);
2264 #endif
2265 dealloc_compressor_data(cpi);
2266 vpx_free(cpi->mb.ss);
2267 vpx_free(cpi->tok);
2268 vpx_free(cpi->skin_map);
2269 vpx_free(cpi->cyclic_refresh_map);
2270 vpx_free(cpi->consec_zero_last);
2271 vpx_free(cpi->consec_zero_last_mvbias);
2272
2273 vp8_remove_common(&cpi->common);
2274 vpx_free(cpi);
2275 *comp = 0;
2276
2277 #ifdef OUTPUT_YUV_SRC
2278 fclose(yuv_file);
2279 #endif
2280 #ifdef OUTPUT_YUV_DENOISED
2281 fclose(yuv_denoised_file);
2282 #endif
2283 #ifdef OUTPUT_YUV_SKINMAP
2284 fclose(yuv_skinmap_file);
2285 #endif
2286
2287 #if 0
2288
2289 if (keyfile)
2290 fclose(keyfile);
2291
2292 if (framepsnr)
2293 fclose(framepsnr);
2294
2295 if (kf_list)
2296 fclose(kf_list);
2297
2298 #endif
2299 }
2300
calc_plane_error(unsigned char * orig,int orig_stride,unsigned char * recon,int recon_stride,unsigned int cols,unsigned int rows)2301 static uint64_t calc_plane_error(unsigned char *orig, int orig_stride,
2302 unsigned char *recon, int recon_stride,
2303 unsigned int cols, unsigned int rows) {
2304 unsigned int row, col;
2305 uint64_t total_sse = 0;
2306 int diff;
2307
2308 for (row = 0; row + 16 <= rows; row += 16) {
2309 for (col = 0; col + 16 <= cols; col += 16) {
2310 unsigned int sse;
2311
2312 vpx_mse16x16(orig + col, orig_stride, recon + col, recon_stride, &sse);
2313 total_sse += sse;
2314 }
2315
2316 /* Handle odd-sized width */
2317 if (col < cols) {
2318 unsigned int border_row, border_col;
2319 unsigned char *border_orig = orig;
2320 unsigned char *border_recon = recon;
2321
2322 for (border_row = 0; border_row < 16; ++border_row) {
2323 for (border_col = col; border_col < cols; ++border_col) {
2324 diff = border_orig[border_col] - border_recon[border_col];
2325 total_sse += diff * diff;
2326 }
2327
2328 border_orig += orig_stride;
2329 border_recon += recon_stride;
2330 }
2331 }
2332
2333 orig += orig_stride * 16;
2334 recon += recon_stride * 16;
2335 }
2336
2337 /* Handle odd-sized height */
2338 for (; row < rows; ++row) {
2339 for (col = 0; col < cols; ++col) {
2340 diff = orig[col] - recon[col];
2341 total_sse += diff * diff;
2342 }
2343
2344 orig += orig_stride;
2345 recon += recon_stride;
2346 }
2347
2348 vpx_clear_system_state();
2349 return total_sse;
2350 }
2351
generate_psnr_packet(VP8_COMP * cpi)2352 static void generate_psnr_packet(VP8_COMP *cpi) {
2353 YV12_BUFFER_CONFIG *orig = cpi->Source;
2354 YV12_BUFFER_CONFIG *recon = cpi->common.frame_to_show;
2355 struct vpx_codec_cx_pkt pkt;
2356 uint64_t sse;
2357 int i;
2358 unsigned int width = cpi->common.Width;
2359 unsigned int height = cpi->common.Height;
2360
2361 pkt.kind = VPX_CODEC_PSNR_PKT;
2362 sse = calc_plane_error(orig->y_buffer, orig->y_stride, recon->y_buffer,
2363 recon->y_stride, width, height);
2364 pkt.data.psnr.sse[0] = sse;
2365 pkt.data.psnr.sse[1] = sse;
2366 pkt.data.psnr.samples[0] = width * height;
2367 pkt.data.psnr.samples[1] = width * height;
2368
2369 width = (width + 1) / 2;
2370 height = (height + 1) / 2;
2371
2372 sse = calc_plane_error(orig->u_buffer, orig->uv_stride, recon->u_buffer,
2373 recon->uv_stride, width, height);
2374 pkt.data.psnr.sse[0] += sse;
2375 pkt.data.psnr.sse[2] = sse;
2376 pkt.data.psnr.samples[0] += width * height;
2377 pkt.data.psnr.samples[2] = width * height;
2378
2379 sse = calc_plane_error(orig->v_buffer, orig->uv_stride, recon->v_buffer,
2380 recon->uv_stride, width, height);
2381 pkt.data.psnr.sse[0] += sse;
2382 pkt.data.psnr.sse[3] = sse;
2383 pkt.data.psnr.samples[0] += width * height;
2384 pkt.data.psnr.samples[3] = width * height;
2385
2386 for (i = 0; i < 4; ++i) {
2387 pkt.data.psnr.psnr[i] = vpx_sse_to_psnr(pkt.data.psnr.samples[i], 255.0,
2388 (double)(pkt.data.psnr.sse[i]));
2389 }
2390
2391 vpx_codec_pkt_list_add(cpi->output_pkt_list, &pkt);
2392 }
2393
vp8_use_as_reference(VP8_COMP * cpi,int ref_frame_flags)2394 int vp8_use_as_reference(VP8_COMP *cpi, int ref_frame_flags) {
2395 if (ref_frame_flags > 7) return -1;
2396
2397 cpi->ref_frame_flags = ref_frame_flags;
2398 return 0;
2399 }
vp8_update_reference(VP8_COMP * cpi,int ref_frame_flags)2400 int vp8_update_reference(VP8_COMP *cpi, int ref_frame_flags) {
2401 if (ref_frame_flags > 7) return -1;
2402
2403 cpi->common.refresh_golden_frame = 0;
2404 cpi->common.refresh_alt_ref_frame = 0;
2405 cpi->common.refresh_last_frame = 0;
2406
2407 if (ref_frame_flags & VP8_LAST_FRAME) cpi->common.refresh_last_frame = 1;
2408
2409 if (ref_frame_flags & VP8_GOLD_FRAME) cpi->common.refresh_golden_frame = 1;
2410
2411 if (ref_frame_flags & VP8_ALTR_FRAME) cpi->common.refresh_alt_ref_frame = 1;
2412
2413 cpi->ext_refresh_frame_flags_pending = 1;
2414 return 0;
2415 }
2416
vp8_get_reference(VP8_COMP * cpi,enum vpx_ref_frame_type ref_frame_flag,YV12_BUFFER_CONFIG * sd)2417 int vp8_get_reference(VP8_COMP *cpi, enum vpx_ref_frame_type ref_frame_flag,
2418 YV12_BUFFER_CONFIG *sd) {
2419 VP8_COMMON *cm = &cpi->common;
2420 int ref_fb_idx;
2421
2422 if (ref_frame_flag == VP8_LAST_FRAME) {
2423 ref_fb_idx = cm->lst_fb_idx;
2424 } else if (ref_frame_flag == VP8_GOLD_FRAME) {
2425 ref_fb_idx = cm->gld_fb_idx;
2426 } else if (ref_frame_flag == VP8_ALTR_FRAME) {
2427 ref_fb_idx = cm->alt_fb_idx;
2428 } else {
2429 return -1;
2430 }
2431
2432 vp8_yv12_copy_frame(&cm->yv12_fb[ref_fb_idx], sd);
2433
2434 return 0;
2435 }
vp8_set_reference(VP8_COMP * cpi,enum vpx_ref_frame_type ref_frame_flag,YV12_BUFFER_CONFIG * sd)2436 int vp8_set_reference(VP8_COMP *cpi, enum vpx_ref_frame_type ref_frame_flag,
2437 YV12_BUFFER_CONFIG *sd) {
2438 VP8_COMMON *cm = &cpi->common;
2439
2440 int ref_fb_idx;
2441
2442 if (ref_frame_flag == VP8_LAST_FRAME) {
2443 ref_fb_idx = cm->lst_fb_idx;
2444 } else if (ref_frame_flag == VP8_GOLD_FRAME) {
2445 ref_fb_idx = cm->gld_fb_idx;
2446 } else if (ref_frame_flag == VP8_ALTR_FRAME) {
2447 ref_fb_idx = cm->alt_fb_idx;
2448 } else {
2449 return -1;
2450 }
2451
2452 vp8_yv12_copy_frame(sd, &cm->yv12_fb[ref_fb_idx]);
2453
2454 return 0;
2455 }
vp8_update_entropy(VP8_COMP * cpi,int update)2456 int vp8_update_entropy(VP8_COMP *cpi, int update) {
2457 VP8_COMMON *cm = &cpi->common;
2458 cm->refresh_entropy_probs = update;
2459
2460 return 0;
2461 }
2462
scale_and_extend_source(YV12_BUFFER_CONFIG * sd,VP8_COMP * cpi)2463 static void scale_and_extend_source(YV12_BUFFER_CONFIG *sd, VP8_COMP *cpi) {
2464 VP8_COMMON *cm = &cpi->common;
2465
2466 /* are we resizing the image */
2467 if (cm->horiz_scale != 0 || cm->vert_scale != 0) {
2468 #if CONFIG_SPATIAL_RESAMPLING
2469 int hr, hs, vr, vs;
2470 int tmp_height;
2471
2472 if (cm->vert_scale == 3) {
2473 tmp_height = 9;
2474 } else {
2475 tmp_height = 11;
2476 }
2477
2478 Scale2Ratio(cm->horiz_scale, &hr, &hs);
2479 Scale2Ratio(cm->vert_scale, &vr, &vs);
2480
2481 vpx_scale_frame(sd, &cpi->scaled_source, cm->temp_scale_frame.y_buffer,
2482 tmp_height, hs, hr, vs, vr, 0);
2483
2484 vp8_yv12_extend_frame_borders(&cpi->scaled_source);
2485 cpi->Source = &cpi->scaled_source;
2486 #endif
2487 } else {
2488 cpi->Source = sd;
2489 }
2490 }
2491
resize_key_frame(VP8_COMP * cpi)2492 static int resize_key_frame(VP8_COMP *cpi) {
2493 #if CONFIG_SPATIAL_RESAMPLING
2494 VP8_COMMON *cm = &cpi->common;
2495
2496 /* Do we need to apply resampling for one pass cbr.
2497 * In one pass this is more limited than in two pass cbr.
2498 * The test and any change is only made once per key frame sequence.
2499 */
2500 if (cpi->oxcf.allow_spatial_resampling &&
2501 (cpi->oxcf.end_usage == USAGE_STREAM_FROM_SERVER)) {
2502 int hr, hs, vr, vs;
2503 int new_width, new_height;
2504
2505 /* If we are below the resample DOWN watermark then scale down a
2506 * notch.
2507 */
2508 if (cpi->buffer_level < (cpi->oxcf.resample_down_water_mark *
2509 cpi->oxcf.optimal_buffer_level / 100)) {
2510 cm->horiz_scale =
2511 (cm->horiz_scale < VP8E_ONETWO) ? cm->horiz_scale + 1 : VP8E_ONETWO;
2512 cm->vert_scale =
2513 (cm->vert_scale < VP8E_ONETWO) ? cm->vert_scale + 1 : VP8E_ONETWO;
2514 }
2515 /* Should we now start scaling back up */
2516 else if (cpi->buffer_level > (cpi->oxcf.resample_up_water_mark *
2517 cpi->oxcf.optimal_buffer_level / 100)) {
2518 cm->horiz_scale =
2519 (cm->horiz_scale > VP8E_NORMAL) ? cm->horiz_scale - 1 : VP8E_NORMAL;
2520 cm->vert_scale =
2521 (cm->vert_scale > VP8E_NORMAL) ? cm->vert_scale - 1 : VP8E_NORMAL;
2522 }
2523
2524 /* Get the new height and width */
2525 Scale2Ratio(cm->horiz_scale, &hr, &hs);
2526 Scale2Ratio(cm->vert_scale, &vr, &vs);
2527 new_width = ((hs - 1) + (cpi->oxcf.Width * hr)) / hs;
2528 new_height = ((vs - 1) + (cpi->oxcf.Height * vr)) / vs;
2529
2530 /* If the image size has changed we need to reallocate the buffers
2531 * and resample the source image
2532 */
2533 if ((cm->Width != new_width) || (cm->Height != new_height)) {
2534 cm->Width = new_width;
2535 cm->Height = new_height;
2536 vp8_alloc_compressor_data(cpi);
2537 scale_and_extend_source(cpi->un_scaled_source, cpi);
2538 return 1;
2539 }
2540 }
2541
2542 #endif
2543 return 0;
2544 }
2545
update_alt_ref_frame_stats(VP8_COMP * cpi)2546 static void update_alt_ref_frame_stats(VP8_COMP *cpi) {
2547 VP8_COMMON *cm = &cpi->common;
2548
2549 /* Select an interval before next GF or altref */
2550 if (!cpi->auto_gold) cpi->frames_till_gf_update_due = DEFAULT_GF_INTERVAL;
2551
2552 if ((cpi->pass != 2) && cpi->frames_till_gf_update_due) {
2553 cpi->current_gf_interval = cpi->frames_till_gf_update_due;
2554
2555 /* Set the bits per frame that we should try and recover in
2556 * subsequent inter frames to account for the extra GF spend...
2557 * note that his does not apply for GF updates that occur
2558 * coincident with a key frame as the extra cost of key frames is
2559 * dealt with elsewhere.
2560 */
2561 cpi->gf_overspend_bits += cpi->projected_frame_size;
2562 cpi->non_gf_bitrate_adjustment =
2563 cpi->gf_overspend_bits / cpi->frames_till_gf_update_due;
2564 }
2565
2566 /* Update data structure that monitors level of reference to last GF */
2567 memset(cpi->gf_active_flags, 1, (cm->mb_rows * cm->mb_cols));
2568 cpi->gf_active_count = cm->mb_rows * cm->mb_cols;
2569
2570 /* this frame refreshes means next frames don't unless specified by user */
2571 cpi->frames_since_golden = 0;
2572
2573 /* Clear the alternate reference update pending flag. */
2574 cpi->source_alt_ref_pending = 0;
2575
2576 /* Set the alternate reference frame active flag */
2577 cpi->source_alt_ref_active = 1;
2578 }
update_golden_frame_stats(VP8_COMP * cpi)2579 static void update_golden_frame_stats(VP8_COMP *cpi) {
2580 VP8_COMMON *cm = &cpi->common;
2581
2582 /* Update the Golden frame usage counts. */
2583 if (cm->refresh_golden_frame) {
2584 /* Select an interval before next GF */
2585 if (!cpi->auto_gold) cpi->frames_till_gf_update_due = DEFAULT_GF_INTERVAL;
2586
2587 if ((cpi->pass != 2) && (cpi->frames_till_gf_update_due > 0)) {
2588 cpi->current_gf_interval = cpi->frames_till_gf_update_due;
2589
2590 /* Set the bits per frame that we should try and recover in
2591 * subsequent inter frames to account for the extra GF spend...
2592 * note that his does not apply for GF updates that occur
2593 * coincident with a key frame as the extra cost of key frames
2594 * is dealt with elsewhere.
2595 */
2596 if ((cm->frame_type != KEY_FRAME) && !cpi->source_alt_ref_active) {
2597 /* Calcluate GF bits to be recovered
2598 * Projected size - av frame bits available for inter
2599 * frames for clip as a whole
2600 */
2601 cpi->gf_overspend_bits +=
2602 (cpi->projected_frame_size - cpi->inter_frame_target);
2603 }
2604
2605 cpi->non_gf_bitrate_adjustment =
2606 cpi->gf_overspend_bits / cpi->frames_till_gf_update_due;
2607 }
2608
2609 /* Update data structure that monitors level of reference to last GF */
2610 memset(cpi->gf_active_flags, 1, (cm->mb_rows * cm->mb_cols));
2611 cpi->gf_active_count = cm->mb_rows * cm->mb_cols;
2612
2613 /* this frame refreshes means next frames don't unless specified by
2614 * user
2615 */
2616 cm->refresh_golden_frame = 0;
2617 cpi->frames_since_golden = 0;
2618
2619 cpi->recent_ref_frame_usage[INTRA_FRAME] = 1;
2620 cpi->recent_ref_frame_usage[LAST_FRAME] = 1;
2621 cpi->recent_ref_frame_usage[GOLDEN_FRAME] = 1;
2622 cpi->recent_ref_frame_usage[ALTREF_FRAME] = 1;
2623
2624 /* ******** Fixed Q test code only ************ */
2625 /* If we are going to use the ALT reference for the next group of
2626 * frames set a flag to say so.
2627 */
2628 if (cpi->oxcf.fixed_q >= 0 && cpi->oxcf.play_alternate &&
2629 !cpi->common.refresh_alt_ref_frame) {
2630 cpi->source_alt_ref_pending = 1;
2631 cpi->frames_till_gf_update_due = cpi->baseline_gf_interval;
2632 }
2633
2634 if (!cpi->source_alt_ref_pending) cpi->source_alt_ref_active = 0;
2635
2636 /* Decrement count down till next gf */
2637 if (cpi->frames_till_gf_update_due > 0) cpi->frames_till_gf_update_due--;
2638
2639 } else if (!cpi->common.refresh_alt_ref_frame) {
2640 /* Decrement count down till next gf */
2641 if (cpi->frames_till_gf_update_due > 0) cpi->frames_till_gf_update_due--;
2642
2643 if (cpi->frames_till_alt_ref_frame) cpi->frames_till_alt_ref_frame--;
2644
2645 cpi->frames_since_golden++;
2646
2647 if (cpi->frames_since_golden > 1) {
2648 cpi->recent_ref_frame_usage[INTRA_FRAME] +=
2649 cpi->mb.count_mb_ref_frame_usage[INTRA_FRAME];
2650 cpi->recent_ref_frame_usage[LAST_FRAME] +=
2651 cpi->mb.count_mb_ref_frame_usage[LAST_FRAME];
2652 cpi->recent_ref_frame_usage[GOLDEN_FRAME] +=
2653 cpi->mb.count_mb_ref_frame_usage[GOLDEN_FRAME];
2654 cpi->recent_ref_frame_usage[ALTREF_FRAME] +=
2655 cpi->mb.count_mb_ref_frame_usage[ALTREF_FRAME];
2656 }
2657 }
2658 }
2659
2660 /* This function updates the reference frame probability estimates that
2661 * will be used during mode selection
2662 */
update_rd_ref_frame_probs(VP8_COMP * cpi)2663 static void update_rd_ref_frame_probs(VP8_COMP *cpi) {
2664 VP8_COMMON *cm = &cpi->common;
2665
2666 const int *const rfct = cpi->mb.count_mb_ref_frame_usage;
2667 const int rf_intra = rfct[INTRA_FRAME];
2668 const int rf_inter =
2669 rfct[LAST_FRAME] + rfct[GOLDEN_FRAME] + rfct[ALTREF_FRAME];
2670
2671 if (cm->frame_type == KEY_FRAME) {
2672 cpi->prob_intra_coded = 255;
2673 cpi->prob_last_coded = 128;
2674 cpi->prob_gf_coded = 128;
2675 } else if (!(rf_intra + rf_inter)) {
2676 cpi->prob_intra_coded = 63;
2677 cpi->prob_last_coded = 128;
2678 cpi->prob_gf_coded = 128;
2679 }
2680
2681 /* update reference frame costs since we can do better than what we got
2682 * last frame.
2683 */
2684 if (cpi->oxcf.number_of_layers == 1) {
2685 if (cpi->common.refresh_alt_ref_frame) {
2686 cpi->prob_intra_coded += 40;
2687 if (cpi->prob_intra_coded > 255) cpi->prob_intra_coded = 255;
2688 cpi->prob_last_coded = 200;
2689 cpi->prob_gf_coded = 1;
2690 } else if (cpi->frames_since_golden == 0) {
2691 cpi->prob_last_coded = 214;
2692 } else if (cpi->frames_since_golden == 1) {
2693 cpi->prob_last_coded = 192;
2694 cpi->prob_gf_coded = 220;
2695 } else if (cpi->source_alt_ref_active) {
2696 cpi->prob_gf_coded -= 20;
2697
2698 if (cpi->prob_gf_coded < 10) cpi->prob_gf_coded = 10;
2699 }
2700 if (!cpi->source_alt_ref_active) cpi->prob_gf_coded = 255;
2701 }
2702 }
2703
2704 #if !CONFIG_REALTIME_ONLY
2705 /* 1 = key, 0 = inter */
decide_key_frame(VP8_COMP * cpi)2706 static int decide_key_frame(VP8_COMP *cpi) {
2707 VP8_COMMON *cm = &cpi->common;
2708
2709 int code_key_frame = 0;
2710
2711 cpi->kf_boost = 0;
2712
2713 if (cpi->Speed > 11) return 0;
2714
2715 /* Clear down mmx registers */
2716 vpx_clear_system_state();
2717
2718 if ((cpi->compressor_speed == 2) && (cpi->Speed >= 5) && (cpi->sf.RD == 0)) {
2719 double change = 1.0 *
2720 abs((int)(cpi->mb.intra_error - cpi->last_intra_error)) /
2721 (1 + cpi->last_intra_error);
2722 double change2 =
2723 1.0 *
2724 abs((int)(cpi->mb.prediction_error - cpi->last_prediction_error)) /
2725 (1 + cpi->last_prediction_error);
2726 double minerror = cm->MBs * 256;
2727
2728 cpi->last_intra_error = cpi->mb.intra_error;
2729 cpi->last_prediction_error = cpi->mb.prediction_error;
2730
2731 if (10 * cpi->mb.intra_error / (1 + cpi->mb.prediction_error) < 15 &&
2732 cpi->mb.prediction_error > minerror &&
2733 (change > .25 || change2 > .25)) {
2734 /*(change > 1.4 || change < .75)&& cpi->this_frame_percent_intra >
2735 * cpi->last_frame_percent_intra + 3*/
2736 return 1;
2737 }
2738
2739 return 0;
2740 }
2741
2742 /* If the following are true we might as well code a key frame */
2743 if (((cpi->this_frame_percent_intra == 100) &&
2744 (cpi->this_frame_percent_intra > (cpi->last_frame_percent_intra + 2))) ||
2745 ((cpi->this_frame_percent_intra > 95) &&
2746 (cpi->this_frame_percent_intra >=
2747 (cpi->last_frame_percent_intra + 5)))) {
2748 code_key_frame = 1;
2749 }
2750 /* in addition if the following are true and this is not a golden frame
2751 * then code a key frame Note that on golden frames there often seems
2752 * to be a pop in intra usage anyway hence this restriction is
2753 * designed to prevent spurious key frames. The Intra pop needs to be
2754 * investigated.
2755 */
2756 else if (((cpi->this_frame_percent_intra > 60) &&
2757 (cpi->this_frame_percent_intra >
2758 (cpi->last_frame_percent_intra * 2))) ||
2759 ((cpi->this_frame_percent_intra > 75) &&
2760 (cpi->this_frame_percent_intra >
2761 (cpi->last_frame_percent_intra * 3 / 2))) ||
2762 ((cpi->this_frame_percent_intra > 90) &&
2763 (cpi->this_frame_percent_intra >
2764 (cpi->last_frame_percent_intra + 10)))) {
2765 if (!cm->refresh_golden_frame) code_key_frame = 1;
2766 }
2767
2768 return code_key_frame;
2769 }
2770
Pass1Encode(VP8_COMP * cpi)2771 static void Pass1Encode(VP8_COMP *cpi) {
2772 vp8_set_quantizer(cpi, 26);
2773 vp8_first_pass(cpi);
2774 }
2775 #endif
2776
2777 #if 0
2778 void write_cx_frame_to_file(YV12_BUFFER_CONFIG *frame, int this_frame)
2779 {
2780
2781 /* write the frame */
2782 FILE *yframe;
2783 int i;
2784 char filename[255];
2785
2786 sprintf(filename, "cx\\y%04d.raw", this_frame);
2787 yframe = fopen(filename, "wb");
2788
2789 for (i = 0; i < frame->y_height; ++i)
2790 fwrite(frame->y_buffer + i * frame->y_stride, frame->y_width, 1, yframe);
2791
2792 fclose(yframe);
2793 sprintf(filename, "cx\\u%04d.raw", this_frame);
2794 yframe = fopen(filename, "wb");
2795
2796 for (i = 0; i < frame->uv_height; ++i)
2797 fwrite(frame->u_buffer + i * frame->uv_stride, frame->uv_width, 1, yframe);
2798
2799 fclose(yframe);
2800 sprintf(filename, "cx\\v%04d.raw", this_frame);
2801 yframe = fopen(filename, "wb");
2802
2803 for (i = 0; i < frame->uv_height; ++i)
2804 fwrite(frame->v_buffer + i * frame->uv_stride, frame->uv_width, 1, yframe);
2805
2806 fclose(yframe);
2807 }
2808 #endif
2809
2810 #if !CONFIG_REALTIME_ONLY
2811 /* Function to test for conditions that indeicate we should loop
2812 * back and recode a frame.
2813 */
recode_loop_test(VP8_COMP * cpi,int high_limit,int low_limit,int q,int maxq,int minq)2814 static int recode_loop_test(VP8_COMP *cpi, int high_limit, int low_limit, int q,
2815 int maxq, int minq) {
2816 int force_recode = 0;
2817 VP8_COMMON *cm = &cpi->common;
2818
2819 /* Is frame recode allowed at all
2820 * Yes if either recode mode 1 is selected or mode two is selcted
2821 * and the frame is a key frame. golden frame or alt_ref_frame
2822 */
2823 if ((cpi->sf.recode_loop == 1) ||
2824 ((cpi->sf.recode_loop == 2) &&
2825 ((cm->frame_type == KEY_FRAME) || cm->refresh_golden_frame ||
2826 cm->refresh_alt_ref_frame))) {
2827 /* General over and under shoot tests */
2828 if (((cpi->projected_frame_size > high_limit) && (q < maxq)) ||
2829 ((cpi->projected_frame_size < low_limit) && (q > minq))) {
2830 force_recode = 1;
2831 }
2832 /* Special Constrained quality tests */
2833 else if (cpi->oxcf.end_usage == USAGE_CONSTRAINED_QUALITY) {
2834 /* Undershoot and below auto cq level */
2835 if ((q > cpi->cq_target_quality) &&
2836 (cpi->projected_frame_size < ((cpi->this_frame_target * 7) >> 3))) {
2837 force_recode = 1;
2838 }
2839 /* Severe undershoot and between auto and user cq level */
2840 else if ((q > cpi->oxcf.cq_level) &&
2841 (cpi->projected_frame_size < cpi->min_frame_bandwidth) &&
2842 (cpi->active_best_quality > cpi->oxcf.cq_level)) {
2843 force_recode = 1;
2844 cpi->active_best_quality = cpi->oxcf.cq_level;
2845 }
2846 }
2847 }
2848
2849 return force_recode;
2850 }
2851 #endif // !CONFIG_REALTIME_ONLY
2852
update_reference_frames(VP8_COMP * cpi)2853 static void update_reference_frames(VP8_COMP *cpi) {
2854 VP8_COMMON *cm = &cpi->common;
2855 YV12_BUFFER_CONFIG *yv12_fb = cm->yv12_fb;
2856
2857 /* At this point the new frame has been encoded.
2858 * If any buffer copy / swapping is signaled it should be done here.
2859 */
2860
2861 if (cm->frame_type == KEY_FRAME) {
2862 yv12_fb[cm->new_fb_idx].flags |= VP8_GOLD_FRAME | VP8_ALTR_FRAME;
2863
2864 yv12_fb[cm->gld_fb_idx].flags &= ~VP8_GOLD_FRAME;
2865 yv12_fb[cm->alt_fb_idx].flags &= ~VP8_ALTR_FRAME;
2866
2867 cm->alt_fb_idx = cm->gld_fb_idx = cm->new_fb_idx;
2868
2869 cpi->current_ref_frames[GOLDEN_FRAME] = cm->current_video_frame;
2870 cpi->current_ref_frames[ALTREF_FRAME] = cm->current_video_frame;
2871 } else {
2872 if (cm->refresh_alt_ref_frame) {
2873 assert(!cm->copy_buffer_to_arf);
2874
2875 cm->yv12_fb[cm->new_fb_idx].flags |= VP8_ALTR_FRAME;
2876 cm->yv12_fb[cm->alt_fb_idx].flags &= ~VP8_ALTR_FRAME;
2877 cm->alt_fb_idx = cm->new_fb_idx;
2878
2879 cpi->current_ref_frames[ALTREF_FRAME] = cm->current_video_frame;
2880 } else if (cm->copy_buffer_to_arf) {
2881 assert(!(cm->copy_buffer_to_arf & ~0x3));
2882
2883 if (cm->copy_buffer_to_arf == 1) {
2884 if (cm->alt_fb_idx != cm->lst_fb_idx) {
2885 yv12_fb[cm->lst_fb_idx].flags |= VP8_ALTR_FRAME;
2886 yv12_fb[cm->alt_fb_idx].flags &= ~VP8_ALTR_FRAME;
2887 cm->alt_fb_idx = cm->lst_fb_idx;
2888
2889 cpi->current_ref_frames[ALTREF_FRAME] =
2890 cpi->current_ref_frames[LAST_FRAME];
2891 }
2892 } else {
2893 if (cm->alt_fb_idx != cm->gld_fb_idx) {
2894 yv12_fb[cm->gld_fb_idx].flags |= VP8_ALTR_FRAME;
2895 yv12_fb[cm->alt_fb_idx].flags &= ~VP8_ALTR_FRAME;
2896 cm->alt_fb_idx = cm->gld_fb_idx;
2897
2898 cpi->current_ref_frames[ALTREF_FRAME] =
2899 cpi->current_ref_frames[GOLDEN_FRAME];
2900 }
2901 }
2902 }
2903
2904 if (cm->refresh_golden_frame) {
2905 assert(!cm->copy_buffer_to_gf);
2906
2907 cm->yv12_fb[cm->new_fb_idx].flags |= VP8_GOLD_FRAME;
2908 cm->yv12_fb[cm->gld_fb_idx].flags &= ~VP8_GOLD_FRAME;
2909 cm->gld_fb_idx = cm->new_fb_idx;
2910
2911 cpi->current_ref_frames[GOLDEN_FRAME] = cm->current_video_frame;
2912 } else if (cm->copy_buffer_to_gf) {
2913 assert(!(cm->copy_buffer_to_arf & ~0x3));
2914
2915 if (cm->copy_buffer_to_gf == 1) {
2916 if (cm->gld_fb_idx != cm->lst_fb_idx) {
2917 yv12_fb[cm->lst_fb_idx].flags |= VP8_GOLD_FRAME;
2918 yv12_fb[cm->gld_fb_idx].flags &= ~VP8_GOLD_FRAME;
2919 cm->gld_fb_idx = cm->lst_fb_idx;
2920
2921 cpi->current_ref_frames[GOLDEN_FRAME] =
2922 cpi->current_ref_frames[LAST_FRAME];
2923 }
2924 } else {
2925 if (cm->alt_fb_idx != cm->gld_fb_idx) {
2926 yv12_fb[cm->alt_fb_idx].flags |= VP8_GOLD_FRAME;
2927 yv12_fb[cm->gld_fb_idx].flags &= ~VP8_GOLD_FRAME;
2928 cm->gld_fb_idx = cm->alt_fb_idx;
2929
2930 cpi->current_ref_frames[GOLDEN_FRAME] =
2931 cpi->current_ref_frames[ALTREF_FRAME];
2932 }
2933 }
2934 }
2935 }
2936
2937 if (cm->refresh_last_frame) {
2938 cm->yv12_fb[cm->new_fb_idx].flags |= VP8_LAST_FRAME;
2939 cm->yv12_fb[cm->lst_fb_idx].flags &= ~VP8_LAST_FRAME;
2940 cm->lst_fb_idx = cm->new_fb_idx;
2941
2942 cpi->current_ref_frames[LAST_FRAME] = cm->current_video_frame;
2943 }
2944
2945 #if CONFIG_TEMPORAL_DENOISING
2946 if (cpi->oxcf.noise_sensitivity) {
2947 /* we shouldn't have to keep multiple copies as we know in advance which
2948 * buffer we should start - for now to get something up and running
2949 * I've chosen to copy the buffers
2950 */
2951 if (cm->frame_type == KEY_FRAME) {
2952 int i;
2953 for (i = LAST_FRAME; i < MAX_REF_FRAMES; ++i)
2954 vp8_yv12_copy_frame(cpi->Source, &cpi->denoiser.yv12_running_avg[i]);
2955 } else {
2956 vp8_yv12_extend_frame_borders(
2957 &cpi->denoiser.yv12_running_avg[INTRA_FRAME]);
2958
2959 if (cm->refresh_alt_ref_frame || cm->copy_buffer_to_arf) {
2960 vp8_yv12_copy_frame(&cpi->denoiser.yv12_running_avg[INTRA_FRAME],
2961 &cpi->denoiser.yv12_running_avg[ALTREF_FRAME]);
2962 }
2963 if (cm->refresh_golden_frame || cm->copy_buffer_to_gf) {
2964 vp8_yv12_copy_frame(&cpi->denoiser.yv12_running_avg[INTRA_FRAME],
2965 &cpi->denoiser.yv12_running_avg[GOLDEN_FRAME]);
2966 }
2967 if (cm->refresh_last_frame) {
2968 vp8_yv12_copy_frame(&cpi->denoiser.yv12_running_avg[INTRA_FRAME],
2969 &cpi->denoiser.yv12_running_avg[LAST_FRAME]);
2970 }
2971 }
2972 if (cpi->oxcf.noise_sensitivity == 4)
2973 vp8_yv12_copy_frame(cpi->Source, &cpi->denoiser.yv12_last_source);
2974 }
2975 #endif
2976 }
2977
measure_square_diff_partial(YV12_BUFFER_CONFIG * source,YV12_BUFFER_CONFIG * dest,VP8_COMP * cpi)2978 static int measure_square_diff_partial(YV12_BUFFER_CONFIG *source,
2979 YV12_BUFFER_CONFIG *dest,
2980 VP8_COMP *cpi) {
2981 int i, j;
2982 int Total = 0;
2983 int num_blocks = 0;
2984 int skip = 2;
2985 int min_consec_zero_last = 10;
2986 int tot_num_blocks = (source->y_height * source->y_width) >> 8;
2987 unsigned char *src = source->y_buffer;
2988 unsigned char *dst = dest->y_buffer;
2989
2990 /* Loop through the Y plane, every |skip| blocks along rows and colmumns,
2991 * summing the square differences, and only for blocks that have been
2992 * zero_last mode at least |x| frames in a row.
2993 */
2994 for (i = 0; i < source->y_height; i += 16 * skip) {
2995 int block_index_row = (i >> 4) * cpi->common.mb_cols;
2996 for (j = 0; j < source->y_width; j += 16 * skip) {
2997 int index = block_index_row + (j >> 4);
2998 if (cpi->consec_zero_last[index] >= min_consec_zero_last) {
2999 unsigned int sse;
3000 Total += vpx_mse16x16(src + j, source->y_stride, dst + j,
3001 dest->y_stride, &sse);
3002 num_blocks++;
3003 }
3004 }
3005 src += 16 * skip * source->y_stride;
3006 dst += 16 * skip * dest->y_stride;
3007 }
3008 // Only return non-zero if we have at least ~1/16 samples for estimate.
3009 if (num_blocks > (tot_num_blocks >> 4)) {
3010 assert(num_blocks != 0);
3011 return (Total / num_blocks);
3012 } else {
3013 return 0;
3014 }
3015 }
3016
3017 #if CONFIG_TEMPORAL_DENOISING
process_denoiser_mode_change(VP8_COMP * cpi)3018 static void process_denoiser_mode_change(VP8_COMP *cpi) {
3019 const VP8_COMMON *const cm = &cpi->common;
3020 int i, j;
3021 int total = 0;
3022 int num_blocks = 0;
3023 // Number of blocks skipped along row/column in computing the
3024 // nmse (normalized mean square error) of source.
3025 int skip = 2;
3026 // Only select blocks for computing nmse that have been encoded
3027 // as ZERO LAST min_consec_zero_last frames in a row.
3028 // Scale with number of temporal layers.
3029 int min_consec_zero_last = 12 / cpi->oxcf.number_of_layers;
3030 // Decision is tested for changing the denoising mode every
3031 // num_mode_change times this function is called. Note that this
3032 // function called every 8 frames, so (8 * num_mode_change) is number
3033 // of frames where denoising mode change is tested for switch.
3034 int num_mode_change = 20;
3035 // Framerate factor, to compensate for larger mse at lower framerates.
3036 // Use ref_framerate, which is full source framerate for temporal layers.
3037 // TODO(marpan): Adjust this factor.
3038 int fac_framerate = cpi->ref_framerate < 25.0f ? 80 : 100;
3039 int tot_num_blocks = cm->mb_rows * cm->mb_cols;
3040 int ystride = cpi->Source->y_stride;
3041 unsigned char *src = cpi->Source->y_buffer;
3042 unsigned char *dst = cpi->denoiser.yv12_last_source.y_buffer;
3043 static const unsigned char const_source[16] = { 128, 128, 128, 128, 128, 128,
3044 128, 128, 128, 128, 128, 128,
3045 128, 128, 128, 128 };
3046 int bandwidth = (int)(cpi->target_bandwidth);
3047 // For temporal layers, use full bandwidth (top layer).
3048 if (cpi->oxcf.number_of_layers > 1) {
3049 LAYER_CONTEXT *lc = &cpi->layer_context[cpi->oxcf.number_of_layers - 1];
3050 bandwidth = (int)(lc->target_bandwidth);
3051 }
3052 // Loop through the Y plane, every skip blocks along rows and columns,
3053 // summing the normalized mean square error, only for blocks that have
3054 // been encoded as ZEROMV LAST at least min_consec_zero_last least frames in
3055 // a row and have small sum difference between current and previous frame.
3056 // Normalization here is by the contrast of the current frame block.
3057 for (i = 0; i < cm->Height; i += 16 * skip) {
3058 int block_index_row = (i >> 4) * cm->mb_cols;
3059 for (j = 0; j < cm->Width; j += 16 * skip) {
3060 int index = block_index_row + (j >> 4);
3061 if (cpi->consec_zero_last[index] >= min_consec_zero_last) {
3062 unsigned int sse;
3063 const unsigned int var =
3064 vpx_variance16x16(src + j, ystride, dst + j, ystride, &sse);
3065 // Only consider this block as valid for noise measurement
3066 // if the sum_diff average of the current and previous frame
3067 // is small (to avoid effects from lighting change).
3068 if ((sse - var) < 128) {
3069 unsigned int sse2;
3070 const unsigned int act =
3071 vpx_variance16x16(src + j, ystride, const_source, 0, &sse2);
3072 if (act > 0) total += sse / act;
3073 num_blocks++;
3074 }
3075 }
3076 }
3077 src += 16 * skip * ystride;
3078 dst += 16 * skip * ystride;
3079 }
3080 total = total * fac_framerate / 100;
3081
3082 // Only consider this frame as valid sample if we have computed nmse over
3083 // at least ~1/16 blocks, and Total > 0 (Total == 0 can happen if the
3084 // application inputs duplicate frames, or contrast is all zero).
3085 if (total > 0 && (num_blocks > (tot_num_blocks >> 4))) {
3086 // Update the recursive mean square source_diff.
3087 total = (total << 8) / num_blocks;
3088 if (cpi->denoiser.nmse_source_diff_count == 0) {
3089 // First sample in new interval.
3090 cpi->denoiser.nmse_source_diff = total;
3091 cpi->denoiser.qp_avg = cm->base_qindex;
3092 } else {
3093 // For subsequent samples, use average with weight ~1/4 for new sample.
3094 cpi->denoiser.nmse_source_diff =
3095 (int)((total + 3 * cpi->denoiser.nmse_source_diff) >> 2);
3096 cpi->denoiser.qp_avg =
3097 (int)((cm->base_qindex + 3 * cpi->denoiser.qp_avg) >> 2);
3098 }
3099 cpi->denoiser.nmse_source_diff_count++;
3100 }
3101 // Check for changing the denoiser mode, when we have obtained #samples =
3102 // num_mode_change. Condition the change also on the bitrate and QP.
3103 if (cpi->denoiser.nmse_source_diff_count == num_mode_change) {
3104 // Check for going up: from normal to aggressive mode.
3105 if ((cpi->denoiser.denoiser_mode == kDenoiserOnYUV) &&
3106 (cpi->denoiser.nmse_source_diff >
3107 cpi->denoiser.threshold_aggressive_mode) &&
3108 (cpi->denoiser.qp_avg < cpi->denoiser.qp_threshold_up &&
3109 bandwidth > cpi->denoiser.bitrate_threshold)) {
3110 vp8_denoiser_set_parameters(&cpi->denoiser, kDenoiserOnYUVAggressive);
3111 } else {
3112 // Check for going down: from aggressive to normal mode.
3113 if (((cpi->denoiser.denoiser_mode == kDenoiserOnYUVAggressive) &&
3114 (cpi->denoiser.nmse_source_diff <
3115 cpi->denoiser.threshold_aggressive_mode)) ||
3116 ((cpi->denoiser.denoiser_mode == kDenoiserOnYUVAggressive) &&
3117 (cpi->denoiser.qp_avg > cpi->denoiser.qp_threshold_down ||
3118 bandwidth < cpi->denoiser.bitrate_threshold))) {
3119 vp8_denoiser_set_parameters(&cpi->denoiser, kDenoiserOnYUV);
3120 }
3121 }
3122 // Reset metric and counter for next interval.
3123 cpi->denoiser.nmse_source_diff = 0;
3124 cpi->denoiser.qp_avg = 0;
3125 cpi->denoiser.nmse_source_diff_count = 0;
3126 }
3127 }
3128 #endif
3129
vp8_loopfilter_frame(VP8_COMP * cpi,VP8_COMMON * cm)3130 void vp8_loopfilter_frame(VP8_COMP *cpi, VP8_COMMON *cm) {
3131 const FRAME_TYPE frame_type = cm->frame_type;
3132
3133 int update_any_ref_buffers = 1;
3134 if (cpi->common.refresh_last_frame == 0 &&
3135 cpi->common.refresh_golden_frame == 0 &&
3136 cpi->common.refresh_alt_ref_frame == 0) {
3137 update_any_ref_buffers = 0;
3138 }
3139
3140 if (cm->no_lpf) {
3141 cm->filter_level = 0;
3142 } else {
3143 struct vpx_usec_timer timer;
3144
3145 vpx_clear_system_state();
3146
3147 vpx_usec_timer_start(&timer);
3148 if (cpi->sf.auto_filter == 0) {
3149 #if CONFIG_TEMPORAL_DENOISING
3150 if (cpi->oxcf.noise_sensitivity && cm->frame_type != KEY_FRAME) {
3151 // Use the denoised buffer for selecting base loop filter level.
3152 // Denoised signal for current frame is stored in INTRA_FRAME.
3153 // No denoising on key frames.
3154 vp8cx_pick_filter_level_fast(
3155 &cpi->denoiser.yv12_running_avg[INTRA_FRAME], cpi);
3156 } else {
3157 vp8cx_pick_filter_level_fast(cpi->Source, cpi);
3158 }
3159 #else
3160 vp8cx_pick_filter_level_fast(cpi->Source, cpi);
3161 #endif
3162 } else {
3163 #if CONFIG_TEMPORAL_DENOISING
3164 if (cpi->oxcf.noise_sensitivity && cm->frame_type != KEY_FRAME) {
3165 // Use the denoised buffer for selecting base loop filter level.
3166 // Denoised signal for current frame is stored in INTRA_FRAME.
3167 // No denoising on key frames.
3168 vp8cx_pick_filter_level(&cpi->denoiser.yv12_running_avg[INTRA_FRAME],
3169 cpi);
3170 } else {
3171 vp8cx_pick_filter_level(cpi->Source, cpi);
3172 }
3173 #else
3174 vp8cx_pick_filter_level(cpi->Source, cpi);
3175 #endif
3176 }
3177
3178 if (cm->filter_level > 0) {
3179 vp8cx_set_alt_lf_level(cpi, cm->filter_level);
3180 }
3181
3182 vpx_usec_timer_mark(&timer);
3183 cpi->time_pick_lpf += vpx_usec_timer_elapsed(&timer);
3184 }
3185
3186 #if CONFIG_MULTITHREAD
3187 if (vpx_atomic_load_acquire(&cpi->b_multi_threaded)) {
3188 /* signal that we have set filter_level */
3189 vp8_sem_post(&cpi->h_event_end_lpf);
3190 }
3191 #endif
3192
3193 // No need to apply loop-filter if the encoded frame does not update
3194 // any reference buffers.
3195 if (cm->filter_level > 0 && update_any_ref_buffers) {
3196 vp8_loop_filter_frame(cm, &cpi->mb.e_mbd, frame_type);
3197 }
3198
3199 vp8_yv12_extend_frame_borders(cm->frame_to_show);
3200 }
3201 // Return 1 if frame is to be dropped. Update frame drop decimation
3202 // counters.
vp8_check_drop_buffer(VP8_COMP * cpi)3203 int vp8_check_drop_buffer(VP8_COMP *cpi) {
3204 VP8_COMMON *cm = &cpi->common;
3205 int drop_mark = (int)(cpi->oxcf.drop_frames_water_mark *
3206 cpi->oxcf.optimal_buffer_level / 100);
3207 int drop_mark75 = drop_mark * 2 / 3;
3208 int drop_mark50 = drop_mark / 4;
3209 int drop_mark25 = drop_mark / 8;
3210 if (cpi->drop_frames_allowed) {
3211 /* The reset to decimation 0 is only done here for one pass.
3212 * Once it is set two pass leaves decimation on till the next kf.
3213 */
3214 if (cpi->buffer_level > drop_mark && cpi->decimation_factor > 0) {
3215 cpi->decimation_factor--;
3216 }
3217
3218 if (cpi->buffer_level > drop_mark75 && cpi->decimation_factor > 0) {
3219 cpi->decimation_factor = 1;
3220
3221 } else if (cpi->buffer_level < drop_mark25 &&
3222 (cpi->decimation_factor == 2 || cpi->decimation_factor == 3)) {
3223 cpi->decimation_factor = 3;
3224 } else if (cpi->buffer_level < drop_mark50 &&
3225 (cpi->decimation_factor == 1 || cpi->decimation_factor == 2)) {
3226 cpi->decimation_factor = 2;
3227 } else if (cpi->buffer_level < drop_mark75 &&
3228 (cpi->decimation_factor == 0 || cpi->decimation_factor == 1)) {
3229 cpi->decimation_factor = 1;
3230 }
3231 }
3232
3233 /* The following decimates the frame rate according to a regular
3234 * pattern (i.e. to 1/2 or 2/3 frame rate) This can be used to help
3235 * prevent buffer under-run in CBR mode. Alternatively it might be
3236 * desirable in some situations to drop frame rate but throw more bits
3237 * at each frame.
3238 *
3239 * Note that dropping a key frame can be problematic if spatial
3240 * resampling is also active
3241 */
3242 if (cpi->decimation_factor > 0 && cpi->drop_frames_allowed) {
3243 switch (cpi->decimation_factor) {
3244 case 1:
3245 cpi->per_frame_bandwidth = cpi->per_frame_bandwidth * 3 / 2;
3246 break;
3247 case 2:
3248 cpi->per_frame_bandwidth = cpi->per_frame_bandwidth * 5 / 4;
3249 break;
3250 case 3:
3251 cpi->per_frame_bandwidth = cpi->per_frame_bandwidth * 5 / 4;
3252 break;
3253 }
3254
3255 /* Note that we should not throw out a key frame (especially when
3256 * spatial resampling is enabled).
3257 */
3258 if (cm->frame_type == KEY_FRAME) {
3259 cpi->decimation_count = cpi->decimation_factor;
3260 } else if (cpi->decimation_count > 0) {
3261 cpi->decimation_count--;
3262
3263 cpi->bits_off_target += cpi->av_per_frame_bandwidth;
3264 if (cpi->bits_off_target > cpi->oxcf.maximum_buffer_size) {
3265 cpi->bits_off_target = cpi->oxcf.maximum_buffer_size;
3266 }
3267
3268 #if CONFIG_MULTI_RES_ENCODING
3269 vp8_store_drop_frame_info(cpi);
3270 #endif
3271
3272 cm->current_video_frame++;
3273 cpi->frames_since_key++;
3274 cpi->ext_refresh_frame_flags_pending = 0;
3275 // We advance the temporal pattern for dropped frames.
3276 cpi->temporal_pattern_counter++;
3277
3278 #if CONFIG_INTERNAL_STATS
3279 cpi->count++;
3280 #endif
3281
3282 cpi->buffer_level = cpi->bits_off_target;
3283
3284 if (cpi->oxcf.number_of_layers > 1) {
3285 unsigned int i;
3286
3287 /* Propagate bits saved by dropping the frame to higher
3288 * layers
3289 */
3290 for (i = cpi->current_layer + 1; i < cpi->oxcf.number_of_layers; ++i) {
3291 LAYER_CONTEXT *lc = &cpi->layer_context[i];
3292 lc->bits_off_target += (int)(lc->target_bandwidth / lc->framerate);
3293 if (lc->bits_off_target > lc->maximum_buffer_size) {
3294 lc->bits_off_target = lc->maximum_buffer_size;
3295 }
3296 lc->buffer_level = lc->bits_off_target;
3297 }
3298 }
3299 return 1;
3300 } else {
3301 cpi->decimation_count = cpi->decimation_factor;
3302 }
3303 } else {
3304 cpi->decimation_count = 0;
3305 }
3306 return 0;
3307 }
3308
encode_frame_to_data_rate(VP8_COMP * cpi,size_t * size,unsigned char * dest,unsigned char * dest_end,unsigned int * frame_flags)3309 static void encode_frame_to_data_rate(VP8_COMP *cpi, size_t *size,
3310 unsigned char *dest,
3311 unsigned char *dest_end,
3312 unsigned int *frame_flags) {
3313 int Q;
3314 int frame_over_shoot_limit;
3315 int frame_under_shoot_limit;
3316
3317 int Loop = 0;
3318
3319 VP8_COMMON *cm = &cpi->common;
3320 int active_worst_qchanged = 0;
3321
3322 #if !CONFIG_REALTIME_ONLY
3323 int q_low;
3324 int q_high;
3325 int zbin_oq_high;
3326 int zbin_oq_low = 0;
3327 int top_index;
3328 int bottom_index;
3329 int overshoot_seen = 0;
3330 int undershoot_seen = 0;
3331 #endif
3332
3333 /* Clear down mmx registers to allow floating point in what follows */
3334 vpx_clear_system_state();
3335
3336 if (cpi->force_next_frame_intra) {
3337 cm->frame_type = KEY_FRAME; /* delayed intra frame */
3338 cpi->force_next_frame_intra = 0;
3339 }
3340
3341 /* For an alt ref frame in 2 pass we skip the call to the second pass
3342 * function that sets the target bandwidth
3343 */
3344 switch (cpi->pass) {
3345 #if !CONFIG_REALTIME_ONLY
3346 case 2:
3347 if (cpi->common.refresh_alt_ref_frame) {
3348 /* Per frame bit target for the alt ref frame */
3349 cpi->per_frame_bandwidth = cpi->twopass.gf_bits;
3350 /* per second target bitrate */
3351 cpi->target_bandwidth =
3352 (int)(cpi->twopass.gf_bits * cpi->output_framerate);
3353 }
3354 break;
3355 #endif // !CONFIG_REALTIME_ONLY
3356 default: {
3357 const double per_frame_bandwidth =
3358 round(cpi->target_bandwidth / cpi->output_framerate);
3359 cpi->per_frame_bandwidth = (int)VPXMIN(per_frame_bandwidth, INT_MAX);
3360 break;
3361 }
3362 }
3363
3364 /* Default turn off buffer to buffer copying */
3365 cm->copy_buffer_to_gf = 0;
3366 cm->copy_buffer_to_arf = 0;
3367
3368 /* Clear zbin over-quant value and mode boost values. */
3369 cpi->mb.zbin_over_quant = 0;
3370 cpi->mb.zbin_mode_boost = 0;
3371
3372 /* Enable or disable mode based tweaking of the zbin
3373 * For 2 Pass Only used where GF/ARF prediction quality
3374 * is above a threshold
3375 */
3376 cpi->mb.zbin_mode_boost_enabled = 1;
3377 if (cpi->pass == 2) {
3378 if (cpi->gfu_boost <= 400) {
3379 cpi->mb.zbin_mode_boost_enabled = 0;
3380 }
3381 }
3382
3383 /* Current default encoder behaviour for the altref sign bias */
3384 if (cpi->source_alt_ref_active) {
3385 cpi->common.ref_frame_sign_bias[ALTREF_FRAME] = 1;
3386 } else {
3387 cpi->common.ref_frame_sign_bias[ALTREF_FRAME] = 0;
3388 }
3389
3390 /* Check to see if a key frame is signaled
3391 * For two pass with auto key frame enabled cm->frame_type may already
3392 * be set, but not for one pass.
3393 */
3394 if ((cm->current_video_frame == 0) || (cm->frame_flags & FRAMEFLAGS_KEY) ||
3395 (cpi->oxcf.auto_key &&
3396 (cpi->frames_since_key % cpi->key_frame_frequency == 0))) {
3397 /* Key frame from VFW/auto-keyframe/first frame */
3398 cm->frame_type = KEY_FRAME;
3399 #if CONFIG_TEMPORAL_DENOISING
3400 if (cpi->oxcf.noise_sensitivity == 4) {
3401 // For adaptive mode, reset denoiser to normal mode on key frame.
3402 vp8_denoiser_set_parameters(&cpi->denoiser, kDenoiserOnYUV);
3403 }
3404 #endif
3405 }
3406
3407 #if CONFIG_MULTI_RES_ENCODING
3408 if (cpi->oxcf.mr_total_resolutions > 1) {
3409 LOWER_RES_FRAME_INFO *low_res_frame_info =
3410 (LOWER_RES_FRAME_INFO *)cpi->oxcf.mr_low_res_mode_info;
3411
3412 if (cpi->oxcf.mr_encoder_id) {
3413 // Check if lower resolution is available for motion vector reuse.
3414 if (cm->frame_type != KEY_FRAME) {
3415 cpi->mr_low_res_mv_avail = 1;
3416 cpi->mr_low_res_mv_avail &= !(low_res_frame_info->is_frame_dropped);
3417
3418 if (cpi->ref_frame_flags & VP8_LAST_FRAME)
3419 cpi->mr_low_res_mv_avail &=
3420 (cpi->current_ref_frames[LAST_FRAME] ==
3421 low_res_frame_info->low_res_ref_frames[LAST_FRAME]);
3422
3423 if (cpi->ref_frame_flags & VP8_GOLD_FRAME)
3424 cpi->mr_low_res_mv_avail &=
3425 (cpi->current_ref_frames[GOLDEN_FRAME] ==
3426 low_res_frame_info->low_res_ref_frames[GOLDEN_FRAME]);
3427
3428 // Don't use altref to determine whether low res is available.
3429 // TODO (marpan): Should we make this type of condition on a
3430 // per-reference frame basis?
3431 /*
3432 if (cpi->ref_frame_flags & VP8_ALTR_FRAME)
3433 cpi->mr_low_res_mv_avail &= (cpi->current_ref_frames[ALTREF_FRAME]
3434 == low_res_frame_info->low_res_ref_frames[ALTREF_FRAME]);
3435 */
3436 }
3437 // Disable motion vector reuse (i.e., disable any usage of the low_res)
3438 // if the previous lower stream is skipped/disabled.
3439 if (low_res_frame_info->skip_encoding_prev_stream) {
3440 cpi->mr_low_res_mv_avail = 0;
3441 }
3442 }
3443 // This stream is not skipped (i.e., it's being encoded), so set this skip
3444 // flag to 0. This is needed for the next stream (i.e., which is the next
3445 // frame to be encoded).
3446 low_res_frame_info->skip_encoding_prev_stream = 0;
3447
3448 // On a key frame: For the lowest resolution, keep track of the key frame
3449 // counter value. For the higher resolutions, reset the current video
3450 // frame counter to that of the lowest resolution.
3451 // This is done to the handle the case where we may stop/start encoding
3452 // higher layer(s). The restart-encoding of higher layer is only signaled
3453 // by a key frame for now.
3454 // TODO (marpan): Add flag to indicate restart-encoding of higher layer.
3455 if (cm->frame_type == KEY_FRAME) {
3456 if (cpi->oxcf.mr_encoder_id) {
3457 // If the initial starting value of the buffer level is zero (this can
3458 // happen because we may have not started encoding this higher stream),
3459 // then reset it to non-zero value based on |starting_buffer_level|.
3460 if (cpi->common.current_video_frame == 0 && cpi->buffer_level == 0) {
3461 unsigned int i;
3462 cpi->bits_off_target = cpi->oxcf.starting_buffer_level;
3463 cpi->buffer_level = cpi->oxcf.starting_buffer_level;
3464 for (i = 0; i < cpi->oxcf.number_of_layers; ++i) {
3465 LAYER_CONTEXT *lc = &cpi->layer_context[i];
3466 lc->bits_off_target = lc->starting_buffer_level;
3467 lc->buffer_level = lc->starting_buffer_level;
3468 }
3469 }
3470 cpi->common.current_video_frame =
3471 low_res_frame_info->key_frame_counter_value;
3472 } else {
3473 low_res_frame_info->key_frame_counter_value =
3474 cpi->common.current_video_frame;
3475 }
3476 }
3477 }
3478 #endif
3479
3480 // Find the reference frame closest to the current frame.
3481 cpi->closest_reference_frame = LAST_FRAME;
3482 if (cm->frame_type != KEY_FRAME) {
3483 int i;
3484 MV_REFERENCE_FRAME closest_ref = INTRA_FRAME;
3485 if (cpi->ref_frame_flags & VP8_LAST_FRAME) {
3486 closest_ref = LAST_FRAME;
3487 } else if (cpi->ref_frame_flags & VP8_GOLD_FRAME) {
3488 closest_ref = GOLDEN_FRAME;
3489 } else if (cpi->ref_frame_flags & VP8_ALTR_FRAME) {
3490 closest_ref = ALTREF_FRAME;
3491 }
3492 for (i = 1; i <= 3; ++i) {
3493 vpx_ref_frame_type_t ref_frame_type =
3494 (vpx_ref_frame_type_t)((i == 3) ? 4 : i);
3495 if (cpi->ref_frame_flags & ref_frame_type) {
3496 if ((cm->current_video_frame - cpi->current_ref_frames[i]) <
3497 (cm->current_video_frame - cpi->current_ref_frames[closest_ref])) {
3498 closest_ref = i;
3499 }
3500 }
3501 }
3502 cpi->closest_reference_frame = closest_ref;
3503 }
3504
3505 /* Set various flags etc to special state if it is a key frame */
3506 if (cm->frame_type == KEY_FRAME) {
3507 int i;
3508
3509 // Set the loop filter deltas and segmentation map update
3510 setup_features(cpi);
3511
3512 /* The alternate reference frame cannot be active for a key frame */
3513 cpi->source_alt_ref_active = 0;
3514
3515 /* Reset the RD threshold multipliers to default of * 1 (128) */
3516 for (i = 0; i < MAX_MODES; ++i) {
3517 cpi->mb.rd_thresh_mult[i] = 128;
3518 }
3519
3520 // Reset the zero_last counter to 0 on key frame.
3521 memset(cpi->consec_zero_last, 0, cm->mb_rows * cm->mb_cols);
3522 memset(cpi->consec_zero_last_mvbias, 0,
3523 (cpi->common.mb_rows * cpi->common.mb_cols));
3524 }
3525
3526 #if 0
3527 /* Experimental code for lagged compress and one pass
3528 * Initialise one_pass GF frames stats
3529 * Update stats used for GF selection
3530 */
3531 {
3532 cpi->one_pass_frame_index = cm->current_video_frame % MAX_LAG_BUFFERS;
3533
3534 cpi->one_pass_frame_stats[cpi->one_pass_frame_index ].frames_so_far = 0;
3535 cpi->one_pass_frame_stats[cpi->one_pass_frame_index ].frame_intra_error = 0.0;
3536 cpi->one_pass_frame_stats[cpi->one_pass_frame_index ].frame_coded_error = 0.0;
3537 cpi->one_pass_frame_stats[cpi->one_pass_frame_index ].frame_pcnt_inter = 0.0;
3538 cpi->one_pass_frame_stats[cpi->one_pass_frame_index ].frame_pcnt_motion = 0.0;
3539 cpi->one_pass_frame_stats[cpi->one_pass_frame_index ].frame_mvr = 0.0;
3540 cpi->one_pass_frame_stats[cpi->one_pass_frame_index ].frame_mvr_abs = 0.0;
3541 cpi->one_pass_frame_stats[cpi->one_pass_frame_index ].frame_mvc = 0.0;
3542 cpi->one_pass_frame_stats[cpi->one_pass_frame_index ].frame_mvc_abs = 0.0;
3543 }
3544 #endif
3545
3546 update_rd_ref_frame_probs(cpi);
3547
3548 if (vp8_check_drop_buffer(cpi)) {
3549 return;
3550 }
3551
3552 /* Decide how big to make the frame */
3553 if (!vp8_pick_frame_size(cpi)) {
3554 /*TODO: 2 drop_frame and return code could be put together. */
3555 #if CONFIG_MULTI_RES_ENCODING
3556 vp8_store_drop_frame_info(cpi);
3557 #endif
3558 cm->current_video_frame++;
3559 cpi->frames_since_key++;
3560 cpi->ext_refresh_frame_flags_pending = 0;
3561 // We advance the temporal pattern for dropped frames.
3562 cpi->temporal_pattern_counter++;
3563 return;
3564 }
3565
3566 /* Reduce active_worst_allowed_q for CBR if our buffer is getting too full.
3567 * This has a knock on effect on active best quality as well.
3568 * For CBR if the buffer reaches its maximum level then we can no longer
3569 * save up bits for later frames so we might as well use them up
3570 * on the current frame.
3571 */
3572 if ((cpi->oxcf.end_usage == USAGE_STREAM_FROM_SERVER) &&
3573 (cpi->buffer_level >= cpi->oxcf.optimal_buffer_level) &&
3574 cpi->buffered_mode) {
3575 /* Max adjustment is 1/4 */
3576 int Adjustment = cpi->active_worst_quality / 4;
3577
3578 if (Adjustment) {
3579 int buff_lvl_step;
3580
3581 if (cpi->buffer_level < cpi->oxcf.maximum_buffer_size) {
3582 buff_lvl_step = (int)((cpi->oxcf.maximum_buffer_size -
3583 cpi->oxcf.optimal_buffer_level) /
3584 Adjustment);
3585
3586 if (buff_lvl_step) {
3587 Adjustment =
3588 (int)((cpi->buffer_level - cpi->oxcf.optimal_buffer_level) /
3589 buff_lvl_step);
3590 } else {
3591 Adjustment = 0;
3592 }
3593 }
3594
3595 cpi->active_worst_quality -= Adjustment;
3596
3597 if (cpi->active_worst_quality < cpi->active_best_quality) {
3598 cpi->active_worst_quality = cpi->active_best_quality;
3599 }
3600 }
3601 }
3602
3603 /* Set an active best quality and if necessary active worst quality
3604 * There is some odd behavior for one pass here that needs attention.
3605 */
3606 if ((cpi->pass == 2) || (cpi->ni_frames > 150)) {
3607 vpx_clear_system_state();
3608
3609 Q = cpi->active_worst_quality;
3610
3611 if (cm->frame_type == KEY_FRAME) {
3612 if (cpi->pass == 2) {
3613 if (cpi->gfu_boost > 600) {
3614 cpi->active_best_quality = kf_low_motion_minq[Q];
3615 } else {
3616 cpi->active_best_quality = kf_high_motion_minq[Q];
3617 }
3618
3619 /* Special case for key frames forced because we have reached
3620 * the maximum key frame interval. Here force the Q to a range
3621 * based on the ambient Q to reduce the risk of popping
3622 */
3623 if (cpi->this_key_frame_forced) {
3624 if (cpi->active_best_quality > cpi->avg_frame_qindex * 7 / 8) {
3625 cpi->active_best_quality = cpi->avg_frame_qindex * 7 / 8;
3626 } else if (cpi->active_best_quality < (cpi->avg_frame_qindex >> 2)) {
3627 cpi->active_best_quality = cpi->avg_frame_qindex >> 2;
3628 }
3629 }
3630 }
3631 /* One pass more conservative */
3632 else {
3633 cpi->active_best_quality = kf_high_motion_minq[Q];
3634 }
3635 }
3636
3637 else if (cpi->oxcf.number_of_layers == 1 &&
3638 (cm->refresh_golden_frame || cpi->common.refresh_alt_ref_frame)) {
3639 /* Use the lower of cpi->active_worst_quality and recent
3640 * average Q as basis for GF/ARF Q limit unless last frame was
3641 * a key frame.
3642 */
3643 if ((cpi->frames_since_key > 1) &&
3644 (cpi->avg_frame_qindex < cpi->active_worst_quality)) {
3645 Q = cpi->avg_frame_qindex;
3646 }
3647
3648 /* For constrained quality don't allow Q less than the cq level */
3649 if ((cpi->oxcf.end_usage == USAGE_CONSTRAINED_QUALITY) &&
3650 (Q < cpi->cq_target_quality)) {
3651 Q = cpi->cq_target_quality;
3652 }
3653
3654 if (cpi->pass == 2) {
3655 if (cpi->gfu_boost > 1000) {
3656 cpi->active_best_quality = gf_low_motion_minq[Q];
3657 } else if (cpi->gfu_boost < 400) {
3658 cpi->active_best_quality = gf_high_motion_minq[Q];
3659 } else {
3660 cpi->active_best_quality = gf_mid_motion_minq[Q];
3661 }
3662
3663 /* Constrained quality use slightly lower active best. */
3664 if (cpi->oxcf.end_usage == USAGE_CONSTRAINED_QUALITY) {
3665 cpi->active_best_quality = cpi->active_best_quality * 15 / 16;
3666 }
3667 }
3668 /* One pass more conservative */
3669 else {
3670 cpi->active_best_quality = gf_high_motion_minq[Q];
3671 }
3672 } else {
3673 cpi->active_best_quality = inter_minq[Q];
3674
3675 /* For the constant/constrained quality mode we don't want
3676 * q to fall below the cq level.
3677 */
3678 if ((cpi->oxcf.end_usage == USAGE_CONSTRAINED_QUALITY) &&
3679 (cpi->active_best_quality < cpi->cq_target_quality)) {
3680 /* If we are strongly undershooting the target rate in the last
3681 * frames then use the user passed in cq value not the auto
3682 * cq value.
3683 */
3684 if (cpi->rolling_actual_bits < cpi->min_frame_bandwidth) {
3685 cpi->active_best_quality = cpi->oxcf.cq_level;
3686 } else {
3687 cpi->active_best_quality = cpi->cq_target_quality;
3688 }
3689 }
3690 }
3691
3692 /* If CBR and the buffer is as full then it is reasonable to allow
3693 * higher quality on the frames to prevent bits just going to waste.
3694 */
3695 if (cpi->oxcf.end_usage == USAGE_STREAM_FROM_SERVER) {
3696 /* Note that the use of >= here elliminates the risk of a divide
3697 * by 0 error in the else if clause
3698 */
3699 if (cpi->buffer_level >= cpi->oxcf.maximum_buffer_size) {
3700 cpi->active_best_quality = cpi->best_quality;
3701
3702 } else if (cpi->buffer_level > cpi->oxcf.optimal_buffer_level) {
3703 int Fraction =
3704 (int)(((cpi->buffer_level - cpi->oxcf.optimal_buffer_level) * 128) /
3705 (cpi->oxcf.maximum_buffer_size -
3706 cpi->oxcf.optimal_buffer_level));
3707 int min_qadjustment =
3708 ((cpi->active_best_quality - cpi->best_quality) * Fraction) / 128;
3709
3710 cpi->active_best_quality -= min_qadjustment;
3711 }
3712 }
3713 }
3714 /* Make sure constrained quality mode limits are adhered to for the first
3715 * few frames of one pass encodes
3716 */
3717 else if (cpi->oxcf.end_usage == USAGE_CONSTRAINED_QUALITY) {
3718 if ((cm->frame_type == KEY_FRAME) || cm->refresh_golden_frame ||
3719 cpi->common.refresh_alt_ref_frame) {
3720 cpi->active_best_quality = cpi->best_quality;
3721 } else if (cpi->active_best_quality < cpi->cq_target_quality) {
3722 cpi->active_best_quality = cpi->cq_target_quality;
3723 }
3724 }
3725
3726 /* Clip the active best and worst quality values to limits */
3727 if (cpi->active_worst_quality > cpi->worst_quality) {
3728 cpi->active_worst_quality = cpi->worst_quality;
3729 }
3730
3731 if (cpi->active_best_quality < cpi->best_quality) {
3732 cpi->active_best_quality = cpi->best_quality;
3733 }
3734
3735 if (cpi->active_worst_quality < cpi->active_best_quality) {
3736 cpi->active_worst_quality = cpi->active_best_quality;
3737 }
3738
3739 /* Determine initial Q to try */
3740 Q = vp8_regulate_q(cpi, cpi->this_frame_target);
3741
3742 #if !CONFIG_REALTIME_ONLY
3743
3744 /* Set highest allowed value for Zbin over quant */
3745 if (cm->frame_type == KEY_FRAME) {
3746 zbin_oq_high = 0;
3747 } else if ((cpi->oxcf.number_of_layers == 1) &&
3748 ((cm->refresh_alt_ref_frame ||
3749 (cm->refresh_golden_frame && !cpi->source_alt_ref_active)))) {
3750 zbin_oq_high = 16;
3751 } else {
3752 zbin_oq_high = ZBIN_OQ_MAX;
3753 }
3754 #endif
3755
3756 compute_skin_map(cpi);
3757
3758 /* Setup background Q adjustment for error resilient mode.
3759 * For multi-layer encodes only enable this for the base layer.
3760 */
3761 if (cpi->cyclic_refresh_mode_enabled) {
3762 // Special case for screen_content_mode with golden frame updates.
3763 int disable_cr_gf =
3764 (cpi->oxcf.screen_content_mode == 2 && cm->refresh_golden_frame);
3765 if (cpi->current_layer == 0 && cpi->force_maxqp == 0 && !disable_cr_gf) {
3766 cyclic_background_refresh(cpi, Q, 0);
3767 } else {
3768 disable_segmentation(cpi);
3769 }
3770 }
3771
3772 vp8_compute_frame_size_bounds(cpi, &frame_under_shoot_limit,
3773 &frame_over_shoot_limit);
3774
3775 #if !CONFIG_REALTIME_ONLY
3776 /* Limit Q range for the adaptive loop. */
3777 bottom_index = cpi->active_best_quality;
3778 top_index = cpi->active_worst_quality;
3779 q_low = cpi->active_best_quality;
3780 q_high = cpi->active_worst_quality;
3781 #endif
3782
3783 vp8_save_coding_context(cpi);
3784
3785 scale_and_extend_source(cpi->un_scaled_source, cpi);
3786
3787 #if CONFIG_TEMPORAL_DENOISING && CONFIG_POSTPROC
3788 // Option to apply spatial blur under the aggressive or adaptive
3789 // (temporal denoising) mode.
3790 if (cpi->oxcf.noise_sensitivity >= 3) {
3791 if (cpi->denoiser.denoise_pars.spatial_blur != 0) {
3792 vp8_de_noise(cm, cpi->Source, cpi->denoiser.denoise_pars.spatial_blur, 1);
3793 }
3794 }
3795 #endif
3796
3797 #if !(CONFIG_REALTIME_ONLY) && CONFIG_POSTPROC && !(CONFIG_TEMPORAL_DENOISING)
3798
3799 if (cpi->oxcf.noise_sensitivity > 0) {
3800 unsigned char *src;
3801 int l = 0;
3802
3803 switch (cpi->oxcf.noise_sensitivity) {
3804 case 1: l = 20; break;
3805 case 2: l = 40; break;
3806 case 3: l = 60; break;
3807 case 4: l = 80; break;
3808 case 5: l = 100; break;
3809 case 6: l = 150; break;
3810 }
3811
3812 if (cm->frame_type == KEY_FRAME) {
3813 vp8_de_noise(cm, cpi->Source, l, 1);
3814 } else {
3815 vp8_de_noise(cm, cpi->Source, l, 1);
3816
3817 src = cpi->Source->y_buffer;
3818
3819 if (cpi->Source->y_stride < 0) {
3820 src += cpi->Source->y_stride * (cpi->Source->y_height - 1);
3821 }
3822 }
3823 }
3824
3825 #endif
3826
3827 #ifdef OUTPUT_YUV_SRC
3828 vpx_write_yuv_frame(yuv_file, cpi->Source);
3829 #endif
3830
3831 do {
3832 vpx_clear_system_state();
3833
3834 vp8_set_quantizer(cpi, Q);
3835
3836 /* setup skip prob for costing in mode/mv decision */
3837 if (cpi->common.mb_no_coeff_skip) {
3838 cpi->prob_skip_false = cpi->base_skip_false_prob[Q];
3839
3840 if (cm->frame_type != KEY_FRAME) {
3841 if (cpi->common.refresh_alt_ref_frame) {
3842 if (cpi->last_skip_false_probs[2] != 0) {
3843 cpi->prob_skip_false = cpi->last_skip_false_probs[2];
3844 }
3845
3846 /*
3847 if(cpi->last_skip_false_probs[2]!=0 && abs(Q-
3848 cpi->last_skip_probs_q[2])<=16 )
3849 cpi->prob_skip_false = cpi->last_skip_false_probs[2];
3850 else if (cpi->last_skip_false_probs[2]!=0)
3851 cpi->prob_skip_false = (cpi->last_skip_false_probs[2] +
3852 cpi->prob_skip_false ) / 2;
3853 */
3854 } else if (cpi->common.refresh_golden_frame) {
3855 if (cpi->last_skip_false_probs[1] != 0) {
3856 cpi->prob_skip_false = cpi->last_skip_false_probs[1];
3857 }
3858
3859 /*
3860 if(cpi->last_skip_false_probs[1]!=0 && abs(Q-
3861 cpi->last_skip_probs_q[1])<=16 )
3862 cpi->prob_skip_false = cpi->last_skip_false_probs[1];
3863 else if (cpi->last_skip_false_probs[1]!=0)
3864 cpi->prob_skip_false = (cpi->last_skip_false_probs[1] +
3865 cpi->prob_skip_false ) / 2;
3866 */
3867 } else {
3868 if (cpi->last_skip_false_probs[0] != 0) {
3869 cpi->prob_skip_false = cpi->last_skip_false_probs[0];
3870 }
3871
3872 /*
3873 if(cpi->last_skip_false_probs[0]!=0 && abs(Q-
3874 cpi->last_skip_probs_q[0])<=16 )
3875 cpi->prob_skip_false = cpi->last_skip_false_probs[0];
3876 else if(cpi->last_skip_false_probs[0]!=0)
3877 cpi->prob_skip_false = (cpi->last_skip_false_probs[0] +
3878 cpi->prob_skip_false ) / 2;
3879 */
3880 }
3881
3882 /* as this is for cost estimate, let's make sure it does not
3883 * go extreme eitehr way
3884 */
3885 if (cpi->prob_skip_false < 5) cpi->prob_skip_false = 5;
3886
3887 if (cpi->prob_skip_false > 250) cpi->prob_skip_false = 250;
3888
3889 if (cpi->oxcf.number_of_layers == 1 && cpi->is_src_frame_alt_ref) {
3890 cpi->prob_skip_false = 1;
3891 }
3892 }
3893
3894 #if 0
3895
3896 if (cpi->pass != 1)
3897 {
3898 FILE *f = fopen("skip.stt", "a");
3899 fprintf(f, "%d, %d, %4d ", cpi->common.refresh_golden_frame, cpi->common.refresh_alt_ref_frame, cpi->prob_skip_false);
3900 fclose(f);
3901 }
3902
3903 #endif
3904 }
3905
3906 if (cm->frame_type == KEY_FRAME) {
3907 if (resize_key_frame(cpi)) {
3908 /* If the frame size has changed, need to reset Q, quantizer,
3909 * and background refresh.
3910 */
3911 Q = vp8_regulate_q(cpi, cpi->this_frame_target);
3912 if (cpi->cyclic_refresh_mode_enabled) {
3913 if (cpi->current_layer == 0) {
3914 cyclic_background_refresh(cpi, Q, 0);
3915 } else {
3916 disable_segmentation(cpi);
3917 }
3918 }
3919 // Reset the zero_last counter to 0 on key frame.
3920 memset(cpi->consec_zero_last, 0, cm->mb_rows * cm->mb_cols);
3921 memset(cpi->consec_zero_last_mvbias, 0,
3922 (cpi->common.mb_rows * cpi->common.mb_cols));
3923 vp8_set_quantizer(cpi, Q);
3924 }
3925
3926 vp8_setup_key_frame(cpi);
3927 }
3928
3929 #if CONFIG_REALTIME_ONLY & CONFIG_ONTHEFLY_BITPACKING
3930 {
3931 if (cpi->oxcf.error_resilient_mode) cm->refresh_entropy_probs = 0;
3932
3933 if (cpi->oxcf.error_resilient_mode & VPX_ERROR_RESILIENT_PARTITIONS) {
3934 if (cm->frame_type == KEY_FRAME) cm->refresh_entropy_probs = 1;
3935 }
3936
3937 if (cm->refresh_entropy_probs == 0) {
3938 /* save a copy for later refresh */
3939 memcpy(&cm->lfc, &cm->fc, sizeof(cm->fc));
3940 }
3941
3942 vp8_update_coef_context(cpi);
3943
3944 vp8_update_coef_probs(cpi);
3945
3946 /* transform / motion compensation build reconstruction frame
3947 * +pack coef partitions
3948 */
3949 vp8_encode_frame(cpi);
3950
3951 /* cpi->projected_frame_size is not needed for RT mode */
3952 }
3953 #else
3954 /* transform / motion compensation build reconstruction frame */
3955 vp8_encode_frame(cpi);
3956
3957 if (cpi->pass == 0 && cpi->oxcf.end_usage == USAGE_STREAM_FROM_SERVER &&
3958 cpi->rt_drop_recode_on_overshoot == 1) {
3959 if (vp8_drop_encodedframe_overshoot(cpi, Q)) {
3960 vpx_clear_system_state();
3961 return;
3962 }
3963 if (cm->frame_type != KEY_FRAME)
3964 cpi->last_pred_err_mb =
3965 (int)(cpi->mb.prediction_error / cpi->common.MBs);
3966 }
3967
3968 cpi->projected_frame_size -= vp8_estimate_entropy_savings(cpi);
3969 cpi->projected_frame_size =
3970 (cpi->projected_frame_size > 0) ? cpi->projected_frame_size : 0;
3971 #endif
3972 vpx_clear_system_state();
3973
3974 /* Test to see if the stats generated for this frame indicate that
3975 * we should have coded a key frame (assuming that we didn't)!
3976 */
3977
3978 if (cpi->pass != 2 && cpi->oxcf.auto_key && cm->frame_type != KEY_FRAME &&
3979 cpi->compressor_speed != 2) {
3980 #if !CONFIG_REALTIME_ONLY
3981 if (decide_key_frame(cpi)) {
3982 /* Reset all our sizing numbers and recode */
3983 cm->frame_type = KEY_FRAME;
3984
3985 vp8_pick_frame_size(cpi);
3986
3987 /* Clear the Alt reference frame active flag when we have
3988 * a key frame
3989 */
3990 cpi->source_alt_ref_active = 0;
3991
3992 // Set the loop filter deltas and segmentation map update
3993 setup_features(cpi);
3994
3995 vp8_restore_coding_context(cpi);
3996
3997 Q = vp8_regulate_q(cpi, cpi->this_frame_target);
3998
3999 vp8_compute_frame_size_bounds(cpi, &frame_under_shoot_limit,
4000 &frame_over_shoot_limit);
4001
4002 /* Limit Q range for the adaptive loop. */
4003 bottom_index = cpi->active_best_quality;
4004 top_index = cpi->active_worst_quality;
4005 q_low = cpi->active_best_quality;
4006 q_high = cpi->active_worst_quality;
4007
4008 Loop = 1;
4009
4010 continue;
4011 }
4012 #endif
4013 }
4014
4015 vpx_clear_system_state();
4016
4017 if (frame_over_shoot_limit == 0) frame_over_shoot_limit = 1;
4018
4019 /* Are we are overshooting and up against the limit of active max Q. */
4020 if (!cpi->rt_always_update_correction_factor &&
4021 ((cpi->pass != 2) ||
4022 (cpi->oxcf.end_usage == USAGE_STREAM_FROM_SERVER)) &&
4023 (Q == cpi->active_worst_quality) &&
4024 (cpi->active_worst_quality < cpi->worst_quality) &&
4025 (cpi->projected_frame_size > frame_over_shoot_limit)) {
4026 int over_size_percent =
4027 ((cpi->projected_frame_size - frame_over_shoot_limit) * 100) /
4028 frame_over_shoot_limit;
4029
4030 /* If so is there any scope for relaxing it */
4031 while ((cpi->active_worst_quality < cpi->worst_quality) &&
4032 (over_size_percent > 0)) {
4033 cpi->active_worst_quality++;
4034 /* Assume 1 qstep = about 4% on frame size. */
4035 over_size_percent = (int)(over_size_percent * 0.96);
4036 }
4037 #if !CONFIG_REALTIME_ONLY
4038 top_index = cpi->active_worst_quality;
4039 #endif // !CONFIG_REALTIME_ONLY
4040 /* If we have updated the active max Q do not call
4041 * vp8_update_rate_correction_factors() this loop.
4042 */
4043 active_worst_qchanged = 1;
4044 } else {
4045 active_worst_qchanged = 0;
4046 }
4047
4048 #if CONFIG_REALTIME_ONLY
4049 Loop = 0;
4050 #else
4051 /* Special case handling for forced key frames */
4052 if ((cm->frame_type == KEY_FRAME) && cpi->this_key_frame_forced) {
4053 int last_q = Q;
4054 int kf_err = vp8_calc_ss_err(cpi->Source, &cm->yv12_fb[cm->new_fb_idx]);
4055
4056 /* The key frame is not good enough */
4057 if (kf_err > ((cpi->ambient_err * 7) >> 3)) {
4058 /* Lower q_high */
4059 q_high = (Q > q_low) ? (Q - 1) : q_low;
4060
4061 /* Adjust Q */
4062 Q = (q_high + q_low) >> 1;
4063 }
4064 /* The key frame is much better than the previous frame */
4065 else if (kf_err < (cpi->ambient_err >> 1)) {
4066 /* Raise q_low */
4067 q_low = (Q < q_high) ? (Q + 1) : q_high;
4068
4069 /* Adjust Q */
4070 Q = (q_high + q_low + 1) >> 1;
4071 }
4072
4073 /* Clamp Q to upper and lower limits: */
4074 if (Q > q_high) {
4075 Q = q_high;
4076 } else if (Q < q_low) {
4077 Q = q_low;
4078 }
4079
4080 Loop = Q != last_q;
4081 }
4082
4083 /* Is the projected frame size out of range and are we allowed
4084 * to attempt to recode.
4085 */
4086 else if (recode_loop_test(cpi, frame_over_shoot_limit,
4087 frame_under_shoot_limit, Q, top_index,
4088 bottom_index)) {
4089 int last_q = Q;
4090 int Retries = 0;
4091
4092 /* Frame size out of permitted range. Update correction factor
4093 * & compute new Q to try...
4094 */
4095
4096 /* Frame is too large */
4097 if (cpi->projected_frame_size > cpi->this_frame_target) {
4098 /* Raise Qlow as to at least the current value */
4099 q_low = (Q < q_high) ? (Q + 1) : q_high;
4100
4101 /* If we are using over quant do the same for zbin_oq_low */
4102 if (cpi->mb.zbin_over_quant > 0) {
4103 zbin_oq_low = (cpi->mb.zbin_over_quant < zbin_oq_high)
4104 ? (cpi->mb.zbin_over_quant + 1)
4105 : zbin_oq_high;
4106 }
4107
4108 if (undershoot_seen) {
4109 /* Update rate_correction_factor unless
4110 * cpi->active_worst_quality has changed.
4111 */
4112 if (!active_worst_qchanged) {
4113 vp8_update_rate_correction_factors(cpi, 1);
4114 }
4115
4116 Q = (q_high + q_low + 1) / 2;
4117
4118 /* Adjust cpi->zbin_over_quant (only allowed when Q
4119 * is max)
4120 */
4121 if (Q < MAXQ) {
4122 cpi->mb.zbin_over_quant = 0;
4123 } else {
4124 zbin_oq_low = (cpi->mb.zbin_over_quant < zbin_oq_high)
4125 ? (cpi->mb.zbin_over_quant + 1)
4126 : zbin_oq_high;
4127 cpi->mb.zbin_over_quant = (zbin_oq_high + zbin_oq_low) / 2;
4128 }
4129 } else {
4130 /* Update rate_correction_factor unless
4131 * cpi->active_worst_quality has changed.
4132 */
4133 if (!active_worst_qchanged) {
4134 vp8_update_rate_correction_factors(cpi, 0);
4135 }
4136
4137 Q = vp8_regulate_q(cpi, cpi->this_frame_target);
4138
4139 while (((Q < q_low) || (cpi->mb.zbin_over_quant < zbin_oq_low)) &&
4140 (Retries < 10)) {
4141 vp8_update_rate_correction_factors(cpi, 0);
4142 Q = vp8_regulate_q(cpi, cpi->this_frame_target);
4143 Retries++;
4144 }
4145 }
4146
4147 overshoot_seen = 1;
4148 }
4149 /* Frame is too small */
4150 else {
4151 if (cpi->mb.zbin_over_quant == 0) {
4152 /* Lower q_high if not using over quant */
4153 q_high = (Q > q_low) ? (Q - 1) : q_low;
4154 } else {
4155 /* else lower zbin_oq_high */
4156 zbin_oq_high = (cpi->mb.zbin_over_quant > zbin_oq_low)
4157 ? (cpi->mb.zbin_over_quant - 1)
4158 : zbin_oq_low;
4159 }
4160
4161 if (overshoot_seen) {
4162 /* Update rate_correction_factor unless
4163 * cpi->active_worst_quality has changed.
4164 */
4165 if (!active_worst_qchanged) {
4166 vp8_update_rate_correction_factors(cpi, 1);
4167 }
4168
4169 Q = (q_high + q_low) / 2;
4170
4171 /* Adjust cpi->zbin_over_quant (only allowed when Q
4172 * is max)
4173 */
4174 if (Q < MAXQ) {
4175 cpi->mb.zbin_over_quant = 0;
4176 } else {
4177 cpi->mb.zbin_over_quant = (zbin_oq_high + zbin_oq_low) / 2;
4178 }
4179 } else {
4180 /* Update rate_correction_factor unless
4181 * cpi->active_worst_quality has changed.
4182 */
4183 if (!active_worst_qchanged) {
4184 vp8_update_rate_correction_factors(cpi, 0);
4185 }
4186
4187 Q = vp8_regulate_q(cpi, cpi->this_frame_target);
4188
4189 /* Special case reset for qlow for constrained quality.
4190 * This should only trigger where there is very substantial
4191 * undershoot on a frame and the auto cq level is above
4192 * the user passsed in value.
4193 */
4194 if ((cpi->oxcf.end_usage == USAGE_CONSTRAINED_QUALITY) &&
4195 (Q < q_low)) {
4196 q_low = Q;
4197 }
4198
4199 while (((Q > q_high) || (cpi->mb.zbin_over_quant > zbin_oq_high)) &&
4200 (Retries < 10)) {
4201 vp8_update_rate_correction_factors(cpi, 0);
4202 Q = vp8_regulate_q(cpi, cpi->this_frame_target);
4203 Retries++;
4204 }
4205 }
4206
4207 undershoot_seen = 1;
4208 }
4209
4210 /* Clamp Q to upper and lower limits: */
4211 if (Q > q_high) {
4212 Q = q_high;
4213 } else if (Q < q_low) {
4214 Q = q_low;
4215 }
4216
4217 /* Clamp cpi->zbin_over_quant */
4218 cpi->mb.zbin_over_quant =
4219 (cpi->mb.zbin_over_quant < zbin_oq_low) ? zbin_oq_low
4220 : (cpi->mb.zbin_over_quant > zbin_oq_high) ? zbin_oq_high
4221 : cpi->mb.zbin_over_quant;
4222
4223 Loop = Q != last_q;
4224 } else {
4225 Loop = 0;
4226 }
4227 #endif // CONFIG_REALTIME_ONLY
4228
4229 if (cpi->is_src_frame_alt_ref) Loop = 0;
4230
4231 if (Loop == 1) {
4232 vp8_restore_coding_context(cpi);
4233 #if CONFIG_INTERNAL_STATS
4234 cpi->tot_recode_hits++;
4235 #endif
4236 }
4237 } while (Loop == 1);
4238
4239 #if defined(DROP_UNCODED_FRAMES)
4240 /* if there are no coded macroblocks at all drop this frame */
4241 if (cpi->common.MBs == cpi->mb.skip_true_count &&
4242 (cpi->drop_frame_count & 7) != 7 && cm->frame_type != KEY_FRAME) {
4243 cpi->common.current_video_frame++;
4244 cpi->frames_since_key++;
4245 cpi->drop_frame_count++;
4246 cpi->ext_refresh_frame_flags_pending = 0;
4247 // We advance the temporal pattern for dropped frames.
4248 cpi->temporal_pattern_counter++;
4249 return;
4250 }
4251 cpi->drop_frame_count = 0;
4252 #endif
4253
4254 #if 0
4255 /* Experimental code for lagged and one pass
4256 * Update stats used for one pass GF selection
4257 */
4258 {
4259 cpi->one_pass_frame_stats[cpi->one_pass_frame_index].frame_coded_error = (double)cpi->prediction_error;
4260 cpi->one_pass_frame_stats[cpi->one_pass_frame_index].frame_intra_error = (double)cpi->intra_error;
4261 cpi->one_pass_frame_stats[cpi->one_pass_frame_index].frame_pcnt_inter = (double)(100 - cpi->this_frame_percent_intra) / 100.0;
4262 }
4263 #endif
4264
4265 /* Special case code to reduce pulsing when key frames are forced at a
4266 * fixed interval. Note the reconstruction error if it is the frame before
4267 * the force key frame
4268 */
4269 if (cpi->next_key_frame_forced && (cpi->twopass.frames_to_key == 0)) {
4270 cpi->ambient_err =
4271 vp8_calc_ss_err(cpi->Source, &cm->yv12_fb[cm->new_fb_idx]);
4272 }
4273
4274 /* This frame's MVs are saved and will be used in next frame's MV predictor.
4275 * Last frame has one more line(add to bottom) and one more column(add to
4276 * right) than cm->mip. The edge elements are initialized to 0.
4277 */
4278 #if CONFIG_MULTI_RES_ENCODING
4279 if (!cpi->oxcf.mr_encoder_id && cm->show_frame)
4280 #else
4281 if (cm->show_frame) /* do not save for altref frame */
4282 #endif
4283 {
4284 int mb_row;
4285 int mb_col;
4286 /* Point to beginning of allocated MODE_INFO arrays. */
4287 MODE_INFO *tmp = cm->mip;
4288
4289 if (cm->frame_type != KEY_FRAME) {
4290 for (mb_row = 0; mb_row < cm->mb_rows + 1; ++mb_row) {
4291 for (mb_col = 0; mb_col < cm->mb_cols + 1; ++mb_col) {
4292 if (tmp->mbmi.ref_frame != INTRA_FRAME) {
4293 cpi->lfmv[mb_col + mb_row * (cm->mode_info_stride + 1)].as_int =
4294 tmp->mbmi.mv.as_int;
4295 }
4296
4297 cpi->lf_ref_frame_sign_bias[mb_col +
4298 mb_row * (cm->mode_info_stride + 1)] =
4299 cm->ref_frame_sign_bias[tmp->mbmi.ref_frame];
4300 cpi->lf_ref_frame[mb_col + mb_row * (cm->mode_info_stride + 1)] =
4301 tmp->mbmi.ref_frame;
4302 tmp++;
4303 }
4304 }
4305 }
4306 }
4307
4308 /* Count last ref frame 0,0 usage on current encoded frame. */
4309 {
4310 int mb_row;
4311 int mb_col;
4312 /* Point to beginning of MODE_INFO arrays. */
4313 MODE_INFO *tmp = cm->mi;
4314
4315 cpi->zeromv_count = 0;
4316
4317 if (cm->frame_type != KEY_FRAME) {
4318 for (mb_row = 0; mb_row < cm->mb_rows; ++mb_row) {
4319 for (mb_col = 0; mb_col < cm->mb_cols; ++mb_col) {
4320 if (tmp->mbmi.mode == ZEROMV && tmp->mbmi.ref_frame == LAST_FRAME) {
4321 cpi->zeromv_count++;
4322 }
4323 tmp++;
4324 }
4325 tmp++;
4326 }
4327 }
4328 }
4329
4330 #if CONFIG_MULTI_RES_ENCODING
4331 vp8_cal_dissimilarity(cpi);
4332 #endif
4333
4334 /* Update the GF usage maps.
4335 * This is done after completing the compression of a frame when all
4336 * modes etc. are finalized but before loop filter
4337 */
4338 if (cpi->oxcf.number_of_layers == 1) {
4339 vp8_update_gf_usage_maps(cpi, cm, &cpi->mb);
4340 }
4341
4342 if (cm->frame_type == KEY_FRAME) cm->refresh_last_frame = 1;
4343
4344 #if 0
4345 {
4346 FILE *f = fopen("gfactive.stt", "a");
4347 fprintf(f, "%8d %8d %8d %8d %8d\n", cm->current_video_frame, (100 * cpi->gf_active_count) / (cpi->common.mb_rows * cpi->common.mb_cols), cpi->this_iiratio, cpi->next_iiratio, cm->refresh_golden_frame);
4348 fclose(f);
4349 }
4350 #endif
4351
4352 /* For inter frames the current default behavior is that when
4353 * cm->refresh_golden_frame is set we copy the old GF over to the ARF buffer
4354 * This is purely an encoder decision at present.
4355 * Avoid this behavior when refresh flags are set by the user.
4356 */
4357 if (!cpi->oxcf.error_resilient_mode && cm->refresh_golden_frame &&
4358 !cpi->ext_refresh_frame_flags_pending) {
4359 cm->copy_buffer_to_arf = 2;
4360 } else {
4361 cm->copy_buffer_to_arf = 0;
4362 }
4363
4364 cm->frame_to_show = &cm->yv12_fb[cm->new_fb_idx];
4365
4366 #if CONFIG_TEMPORAL_DENOISING
4367 // Get some measure of the amount of noise, by measuring the (partial) mse
4368 // between source and denoised buffer, for y channel. Partial refers to
4369 // computing the sse for a sub-sample of the frame (i.e., skip x blocks along
4370 // row/column),
4371 // and only for blocks in that set that are consecutive ZEROMV_LAST mode.
4372 // Do this every ~8 frames, to further reduce complexity.
4373 // TODO(marpan): Keep this for now for the case cpi->oxcf.noise_sensitivity <
4374 // 4,
4375 // should be removed in favor of the process_denoiser_mode_change() function
4376 // below.
4377 if (cpi->oxcf.noise_sensitivity > 0 && cpi->oxcf.noise_sensitivity < 4 &&
4378 !cpi->oxcf.screen_content_mode && cpi->frames_since_key % 8 == 0 &&
4379 cm->frame_type != KEY_FRAME) {
4380 cpi->mse_source_denoised = measure_square_diff_partial(
4381 &cpi->denoiser.yv12_running_avg[INTRA_FRAME], cpi->Source, cpi);
4382 }
4383
4384 // For the adaptive denoising mode (noise_sensitivity == 4), sample the mse
4385 // of source diff (between current and previous frame), and determine if we
4386 // should switch the denoiser mode. Sampling refers to computing the mse for
4387 // a sub-sample of the frame (i.e., skip x blocks along row/column), and
4388 // only for blocks in that set that have used ZEROMV LAST, along with some
4389 // constraint on the sum diff between blocks. This process is called every
4390 // ~8 frames, to further reduce complexity.
4391 if (cpi->oxcf.noise_sensitivity == 4 && !cpi->oxcf.screen_content_mode &&
4392 cpi->frames_since_key % 8 == 0 && cm->frame_type != KEY_FRAME) {
4393 process_denoiser_mode_change(cpi);
4394 }
4395 #endif
4396
4397 #ifdef OUTPUT_YUV_SKINMAP
4398 if (cpi->common.current_video_frame > 1) {
4399 vp8_compute_skin_map(cpi, yuv_skinmap_file);
4400 }
4401 #endif
4402
4403 #if CONFIG_MULTITHREAD
4404 if (vpx_atomic_load_acquire(&cpi->b_multi_threaded)) {
4405 /* start loopfilter in separate thread */
4406 vp8_sem_post(&cpi->h_event_start_lpf);
4407 cpi->b_lpf_running = 1;
4408 /* wait for the filter_level to be picked so that we can continue with
4409 * stream packing */
4410 vp8_sem_wait(&cpi->h_event_end_lpf);
4411 } else
4412 #endif
4413 {
4414 vp8_loopfilter_frame(cpi, cm);
4415 }
4416
4417 update_reference_frames(cpi);
4418
4419 #ifdef OUTPUT_YUV_DENOISED
4420 vpx_write_yuv_frame(yuv_denoised_file,
4421 &cpi->denoiser.yv12_running_avg[INTRA_FRAME]);
4422 #endif
4423
4424 #if !(CONFIG_REALTIME_ONLY & CONFIG_ONTHEFLY_BITPACKING)
4425 if (cpi->oxcf.error_resilient_mode) {
4426 cm->refresh_entropy_probs = 0;
4427 }
4428 #endif
4429
4430 /* build the bitstream */
4431 vp8_pack_bitstream(cpi, dest, dest_end, size);
4432
4433 /* Move storing frame_type out of the above loop since it is also
4434 * needed in motion search besides loopfilter */
4435 cm->last_frame_type = cm->frame_type;
4436
4437 /* Update rate control heuristics */
4438 cpi->total_byte_count += (*size);
4439 cpi->projected_frame_size = (int)(*size) << 3;
4440
4441 if (cpi->oxcf.number_of_layers > 1) {
4442 unsigned int i;
4443 for (i = cpi->current_layer + 1; i < cpi->oxcf.number_of_layers; ++i) {
4444 cpi->layer_context[i].total_byte_count += (*size);
4445 }
4446 }
4447
4448 if (!active_worst_qchanged) vp8_update_rate_correction_factors(cpi, 2);
4449
4450 cpi->last_q[cm->frame_type] = cm->base_qindex;
4451
4452 if (cm->frame_type == KEY_FRAME) {
4453 vp8_adjust_key_frame_context(cpi);
4454 }
4455
4456 /* Keep a record of ambient average Q. */
4457 if (cm->frame_type != KEY_FRAME) {
4458 cpi->avg_frame_qindex =
4459 (2 + 3 * cpi->avg_frame_qindex + cm->base_qindex) >> 2;
4460 }
4461
4462 /* Keep a record from which we can calculate the average Q excluding
4463 * GF updates and key frames
4464 */
4465 if ((cm->frame_type != KEY_FRAME) &&
4466 ((cpi->oxcf.number_of_layers > 1) ||
4467 (!cm->refresh_golden_frame && !cm->refresh_alt_ref_frame))) {
4468 cpi->ni_frames++;
4469
4470 /* Calculate the average Q for normal inter frames (not key or GFU
4471 * frames).
4472 */
4473 if (cpi->pass == 2) {
4474 cpi->ni_tot_qi += Q;
4475 cpi->ni_av_qi = (cpi->ni_tot_qi / cpi->ni_frames);
4476 } else {
4477 /* Damp value for first few frames */
4478 if (cpi->ni_frames > 150) {
4479 cpi->ni_tot_qi += Q;
4480 cpi->ni_av_qi = (cpi->ni_tot_qi / cpi->ni_frames);
4481 }
4482 /* For one pass, early in the clip ... average the current frame Q
4483 * value with the worstq entered by the user as a dampening measure
4484 */
4485 else {
4486 cpi->ni_tot_qi += Q;
4487 cpi->ni_av_qi =
4488 ((cpi->ni_tot_qi / cpi->ni_frames) + cpi->worst_quality + 1) / 2;
4489 }
4490
4491 /* If the average Q is higher than what was used in the last
4492 * frame (after going through the recode loop to keep the frame
4493 * size within range) then use the last frame value - 1. The -1
4494 * is designed to stop Q and hence the data rate, from
4495 * progressively falling away during difficult sections, but at
4496 * the same time reduce the number of iterations around the
4497 * recode loop.
4498 */
4499 if (Q > cpi->ni_av_qi) cpi->ni_av_qi = Q - 1;
4500 }
4501 }
4502
4503 /* Update the buffer level variable. */
4504 /* Non-viewable frames are a special case and are treated as pure overhead. */
4505 if (!cm->show_frame) {
4506 cpi->bits_off_target -= cpi->projected_frame_size;
4507 } else {
4508 cpi->bits_off_target +=
4509 cpi->av_per_frame_bandwidth - cpi->projected_frame_size;
4510 }
4511
4512 /* Clip the buffer level to the maximum specified buffer size */
4513 if (cpi->bits_off_target > cpi->oxcf.maximum_buffer_size) {
4514 cpi->bits_off_target = cpi->oxcf.maximum_buffer_size;
4515 }
4516
4517 // Don't let the buffer level go below some threshold, given here
4518 // by -|maximum_buffer_size|. For now we only do this for
4519 // screen content input.
4520 if (cpi->oxcf.screen_content_mode &&
4521 cpi->bits_off_target < -cpi->oxcf.maximum_buffer_size) {
4522 cpi->bits_off_target = -cpi->oxcf.maximum_buffer_size;
4523 }
4524
4525 /* Rolling monitors of whether we are over or underspending used to
4526 * help regulate min and Max Q in two pass.
4527 */
4528 cpi->rolling_target_bits = (int)ROUND64_POWER_OF_TWO(
4529 (int64_t)cpi->rolling_target_bits * 3 + cpi->this_frame_target, 2);
4530 cpi->rolling_actual_bits = (int)ROUND64_POWER_OF_TWO(
4531 (int64_t)cpi->rolling_actual_bits * 3 + cpi->projected_frame_size, 2);
4532 cpi->long_rolling_target_bits = (int)ROUND64_POWER_OF_TWO(
4533 (int64_t)cpi->long_rolling_target_bits * 31 + cpi->this_frame_target, 5);
4534 cpi->long_rolling_actual_bits = (int)ROUND64_POWER_OF_TWO(
4535 (int64_t)cpi->long_rolling_actual_bits * 31 + cpi->projected_frame_size,
4536 5);
4537
4538 /* Actual bits spent */
4539 cpi->total_actual_bits += cpi->projected_frame_size;
4540
4541 #if 0 && CONFIG_INTERNAL_STATS
4542 /* Debug stats */
4543 cpi->total_target_vs_actual +=
4544 (cpi->this_frame_target - cpi->projected_frame_size);
4545 #endif
4546
4547 cpi->buffer_level = cpi->bits_off_target;
4548
4549 /* Propagate values to higher temporal layers */
4550 if (cpi->oxcf.number_of_layers > 1) {
4551 unsigned int i;
4552
4553 for (i = cpi->current_layer + 1; i < cpi->oxcf.number_of_layers; ++i) {
4554 LAYER_CONTEXT *lc = &cpi->layer_context[i];
4555 int bits_off_for_this_layer = (int)round(
4556 lc->target_bandwidth / lc->framerate - cpi->projected_frame_size);
4557
4558 lc->bits_off_target += bits_off_for_this_layer;
4559
4560 /* Clip buffer level to maximum buffer size for the layer */
4561 if (lc->bits_off_target > lc->maximum_buffer_size) {
4562 lc->bits_off_target = lc->maximum_buffer_size;
4563 }
4564
4565 lc->total_actual_bits += cpi->projected_frame_size;
4566 lc->total_target_vs_actual += bits_off_for_this_layer;
4567 lc->buffer_level = lc->bits_off_target;
4568 }
4569 }
4570
4571 /* Update bits left to the kf and gf groups to account for overshoot
4572 * or undershoot on these frames
4573 */
4574 if (cm->frame_type == KEY_FRAME) {
4575 cpi->twopass.kf_group_bits +=
4576 cpi->this_frame_target - cpi->projected_frame_size;
4577
4578 if (cpi->twopass.kf_group_bits < 0) cpi->twopass.kf_group_bits = 0;
4579 } else if (cm->refresh_golden_frame || cm->refresh_alt_ref_frame) {
4580 cpi->twopass.gf_group_bits +=
4581 cpi->this_frame_target - cpi->projected_frame_size;
4582
4583 if (cpi->twopass.gf_group_bits < 0) cpi->twopass.gf_group_bits = 0;
4584 }
4585
4586 if (cm->frame_type != KEY_FRAME) {
4587 if (cpi->common.refresh_alt_ref_frame) {
4588 cpi->last_skip_false_probs[2] = cpi->prob_skip_false;
4589 cpi->last_skip_probs_q[2] = cm->base_qindex;
4590 } else if (cpi->common.refresh_golden_frame) {
4591 cpi->last_skip_false_probs[1] = cpi->prob_skip_false;
4592 cpi->last_skip_probs_q[1] = cm->base_qindex;
4593 } else {
4594 cpi->last_skip_false_probs[0] = cpi->prob_skip_false;
4595 cpi->last_skip_probs_q[0] = cm->base_qindex;
4596
4597 /* update the baseline */
4598 cpi->base_skip_false_prob[cm->base_qindex] = cpi->prob_skip_false;
4599 }
4600 }
4601
4602 #if 0 && CONFIG_INTERNAL_STATS
4603 {
4604 FILE *f = fopen("tmp.stt", "a");
4605
4606 vpx_clear_system_state();
4607
4608 if (cpi->twopass.total_left_stats.coded_error != 0.0)
4609 fprintf(f, "%10d %10d %10d %10d %10d %10"PRId64" %10"PRId64
4610 "%10"PRId64" %10d %6d %6d %6d %6d %5d %5d %5d %8d "
4611 "%8.2lf %"PRId64" %10.3lf %10"PRId64" %8d\n",
4612 cpi->common.current_video_frame, cpi->this_frame_target,
4613 cpi->projected_frame_size,
4614 (cpi->projected_frame_size - cpi->this_frame_target),
4615 cpi->total_target_vs_actual,
4616 cpi->buffer_level,
4617 (cpi->oxcf.starting_buffer_level-cpi->bits_off_target),
4618 cpi->total_actual_bits, cm->base_qindex,
4619 cpi->active_best_quality, cpi->active_worst_quality,
4620 cpi->ni_av_qi, cpi->cq_target_quality,
4621 cm->refresh_golden_frame, cm->refresh_alt_ref_frame,
4622 cm->frame_type, cpi->gfu_boost,
4623 cpi->twopass.est_max_qcorrection_factor,
4624 cpi->twopass.bits_left,
4625 cpi->twopass.total_left_stats.coded_error,
4626 (double)cpi->twopass.bits_left /
4627 cpi->twopass.total_left_stats.coded_error,
4628 cpi->tot_recode_hits);
4629 else
4630 fprintf(f, "%10d %10d %10d %10d %10d %10"PRId64" %10"PRId64
4631 "%10"PRId64" %10d %6d %6d %6d %6d %5d %5d %5d %8d "
4632 "%8.2lf %"PRId64" %10.3lf %8d\n",
4633 cpi->common.current_video_frame, cpi->this_frame_target,
4634 cpi->projected_frame_size,
4635 (cpi->projected_frame_size - cpi->this_frame_target),
4636 cpi->total_target_vs_actual,
4637 cpi->buffer_level,
4638 (cpi->oxcf.starting_buffer_level-cpi->bits_off_target),
4639 cpi->total_actual_bits, cm->base_qindex,
4640 cpi->active_best_quality, cpi->active_worst_quality,
4641 cpi->ni_av_qi, cpi->cq_target_quality,
4642 cm->refresh_golden_frame, cm->refresh_alt_ref_frame,
4643 cm->frame_type, cpi->gfu_boost,
4644 cpi->twopass.est_max_qcorrection_factor,
4645 cpi->twopass.bits_left,
4646 cpi->twopass.total_left_stats.coded_error,
4647 cpi->tot_recode_hits);
4648
4649 fclose(f);
4650
4651 {
4652 FILE *fmodes = fopen("Modes.stt", "a");
4653
4654 fprintf(fmodes, "%6d:%1d:%1d:%1d ",
4655 cpi->common.current_video_frame,
4656 cm->frame_type, cm->refresh_golden_frame,
4657 cm->refresh_alt_ref_frame);
4658
4659 fprintf(fmodes, "\n");
4660
4661 fclose(fmodes);
4662 }
4663 }
4664
4665 #endif
4666
4667 cpi->ext_refresh_frame_flags_pending = 0;
4668
4669 if (cm->refresh_golden_frame == 1) {
4670 cm->frame_flags = cm->frame_flags | FRAMEFLAGS_GOLDEN;
4671 } else {
4672 cm->frame_flags = cm->frame_flags & ~FRAMEFLAGS_GOLDEN;
4673 }
4674
4675 if (cm->refresh_alt_ref_frame == 1) {
4676 cm->frame_flags = cm->frame_flags | FRAMEFLAGS_ALTREF;
4677 } else {
4678 cm->frame_flags = cm->frame_flags & ~FRAMEFLAGS_ALTREF;
4679 }
4680
4681 if (cm->refresh_last_frame & cm->refresh_golden_frame) { /* both refreshed */
4682 cpi->gold_is_last = 1;
4683 } else if (cm->refresh_last_frame ^ cm->refresh_golden_frame) {
4684 /* 1 refreshed but not the other */
4685 cpi->gold_is_last = 0;
4686 }
4687
4688 if (cm->refresh_last_frame & cm->refresh_alt_ref_frame) { /* both refreshed */
4689 cpi->alt_is_last = 1;
4690 } else if (cm->refresh_last_frame ^ cm->refresh_alt_ref_frame) {
4691 /* 1 refreshed but not the other */
4692 cpi->alt_is_last = 0;
4693 }
4694
4695 if (cm->refresh_alt_ref_frame &
4696 cm->refresh_golden_frame) { /* both refreshed */
4697 cpi->gold_is_alt = 1;
4698 } else if (cm->refresh_alt_ref_frame ^ cm->refresh_golden_frame) {
4699 /* 1 refreshed but not the other */
4700 cpi->gold_is_alt = 0;
4701 }
4702
4703 cpi->ref_frame_flags = VP8_ALTR_FRAME | VP8_GOLD_FRAME | VP8_LAST_FRAME;
4704
4705 if (cpi->gold_is_last) cpi->ref_frame_flags &= ~VP8_GOLD_FRAME;
4706
4707 if (cpi->alt_is_last) cpi->ref_frame_flags &= ~VP8_ALTR_FRAME;
4708
4709 if (cpi->gold_is_alt) cpi->ref_frame_flags &= ~VP8_ALTR_FRAME;
4710
4711 if (!cpi->oxcf.error_resilient_mode) {
4712 if (cpi->oxcf.play_alternate && cm->refresh_alt_ref_frame &&
4713 (cm->frame_type != KEY_FRAME)) {
4714 /* Update the alternate reference frame stats as appropriate. */
4715 update_alt_ref_frame_stats(cpi);
4716 } else {
4717 /* Update the Golden frame stats as appropriate. */
4718 update_golden_frame_stats(cpi);
4719 }
4720 }
4721
4722 if (cm->frame_type == KEY_FRAME) {
4723 /* Tell the caller that the frame was coded as a key frame */
4724 *frame_flags = cm->frame_flags | FRAMEFLAGS_KEY;
4725
4726 /* As this frame is a key frame the next defaults to an inter frame. */
4727 cm->frame_type = INTER_FRAME;
4728
4729 cpi->last_frame_percent_intra = 100;
4730 } else {
4731 *frame_flags = cm->frame_flags & ~FRAMEFLAGS_KEY;
4732
4733 cpi->last_frame_percent_intra = cpi->this_frame_percent_intra;
4734 }
4735
4736 /* Clear the one shot update flags for segmentation map and mode/ref
4737 * loop filter deltas.
4738 */
4739 cpi->mb.e_mbd.update_mb_segmentation_map = 0;
4740 cpi->mb.e_mbd.update_mb_segmentation_data = 0;
4741 cpi->mb.e_mbd.mode_ref_lf_delta_update = 0;
4742
4743 /* Don't increment frame counters if this was an altref buffer update
4744 * not a real frame
4745 */
4746 if (cm->show_frame) {
4747 cm->current_video_frame++;
4748 cpi->frames_since_key++;
4749 cpi->temporal_pattern_counter++;
4750 }
4751
4752 #if 0
4753 {
4754 char filename[512];
4755 FILE *recon_file;
4756 sprintf(filename, "enc%04d.yuv", (int) cm->current_video_frame);
4757 recon_file = fopen(filename, "wb");
4758 fwrite(cm->yv12_fb[cm->lst_fb_idx].buffer_alloc,
4759 cm->yv12_fb[cm->lst_fb_idx].frame_size, 1, recon_file);
4760 fclose(recon_file);
4761 }
4762 #endif
4763
4764 /* DEBUG */
4765 /* vpx_write_yuv_frame("encoder_recon.yuv", cm->frame_to_show); */
4766 }
4767 #if !CONFIG_REALTIME_ONLY
Pass2Encode(VP8_COMP * cpi,size_t * size,unsigned char * dest,unsigned char * dest_end,unsigned int * frame_flags)4768 static void Pass2Encode(VP8_COMP *cpi, size_t *size, unsigned char *dest,
4769 unsigned char *dest_end, unsigned int *frame_flags) {
4770 if (!cpi->common.refresh_alt_ref_frame) vp8_second_pass(cpi);
4771
4772 encode_frame_to_data_rate(cpi, size, dest, dest_end, frame_flags);
4773 cpi->twopass.bits_left -= 8 * (int)(*size);
4774
4775 if (!cpi->common.refresh_alt_ref_frame) {
4776 double two_pass_min_rate =
4777 (double)(cpi->oxcf.target_bandwidth *
4778 cpi->oxcf.two_pass_vbrmin_section / 100);
4779 cpi->twopass.bits_left += (int64_t)(two_pass_min_rate / cpi->framerate);
4780 }
4781 }
4782 #endif
4783
vp8_receive_raw_frame(VP8_COMP * cpi,unsigned int frame_flags,YV12_BUFFER_CONFIG * sd,int64_t time_stamp,int64_t end_time)4784 int vp8_receive_raw_frame(VP8_COMP *cpi, unsigned int frame_flags,
4785 YV12_BUFFER_CONFIG *sd, int64_t time_stamp,
4786 int64_t end_time) {
4787 struct vpx_usec_timer timer;
4788 int res = 0;
4789
4790 vpx_usec_timer_start(&timer);
4791
4792 /* Reinit the lookahead buffer if the frame size changes */
4793 if (sd->y_width != cpi->oxcf.Width || sd->y_height != cpi->oxcf.Height) {
4794 assert(cpi->oxcf.lag_in_frames < 2);
4795 dealloc_raw_frame_buffers(cpi);
4796 alloc_raw_frame_buffers(cpi);
4797 }
4798
4799 if (vp8_lookahead_push(cpi->lookahead, sd, time_stamp, end_time, frame_flags,
4800 cpi->active_map_enabled ? cpi->active_map : NULL)) {
4801 res = -1;
4802 }
4803 vpx_usec_timer_mark(&timer);
4804 cpi->time_receive_data += vpx_usec_timer_elapsed(&timer);
4805
4806 return res;
4807 }
4808
frame_is_reference(const VP8_COMP * cpi)4809 static int frame_is_reference(const VP8_COMP *cpi) {
4810 const VP8_COMMON *cm = &cpi->common;
4811 const MACROBLOCKD *xd = &cpi->mb.e_mbd;
4812
4813 return cm->frame_type == KEY_FRAME || cm->refresh_last_frame ||
4814 cm->refresh_golden_frame || cm->refresh_alt_ref_frame ||
4815 cm->copy_buffer_to_gf || cm->copy_buffer_to_arf ||
4816 cm->refresh_entropy_probs || xd->mode_ref_lf_delta_update ||
4817 xd->update_mb_segmentation_map || xd->update_mb_segmentation_data;
4818 }
4819
vp8_get_compressed_data(VP8_COMP * cpi,unsigned int * frame_flags,size_t * size,unsigned char * dest,unsigned char * dest_end,int64_t * time_stamp,int64_t * time_end,int flush)4820 int vp8_get_compressed_data(VP8_COMP *cpi, unsigned int *frame_flags,
4821 size_t *size, unsigned char *dest,
4822 unsigned char *dest_end, int64_t *time_stamp,
4823 int64_t *time_end, int flush) {
4824 VP8_COMMON *cm;
4825 struct vpx_usec_timer tsctimer;
4826 struct vpx_usec_timer ticktimer;
4827 struct vpx_usec_timer cmptimer;
4828 YV12_BUFFER_CONFIG *force_src_buffer = NULL;
4829
4830 if (!cpi) return -1;
4831
4832 cm = &cpi->common;
4833
4834 vpx_usec_timer_start(&cmptimer);
4835
4836 cpi->source = NULL;
4837
4838 #if !CONFIG_REALTIME_ONLY
4839 /* Should we code an alternate reference frame */
4840 if (cpi->oxcf.error_resilient_mode == 0 && cpi->oxcf.play_alternate &&
4841 cpi->source_alt_ref_pending) {
4842 if ((cpi->source = vp8_lookahead_peek(
4843 cpi->lookahead, cpi->frames_till_gf_update_due, PEEK_FORWARD))) {
4844 cpi->alt_ref_source = cpi->source;
4845 if (cpi->oxcf.arnr_max_frames > 0) {
4846 vp8_temporal_filter_prepare_c(cpi, cpi->frames_till_gf_update_due);
4847 force_src_buffer = &cpi->alt_ref_buffer;
4848 }
4849 cpi->frames_till_alt_ref_frame = cpi->frames_till_gf_update_due;
4850 cm->refresh_alt_ref_frame = 1;
4851 cm->refresh_golden_frame = 0;
4852 cm->refresh_last_frame = 0;
4853 cm->show_frame = 0;
4854 /* Clear Pending alt Ref flag. */
4855 cpi->source_alt_ref_pending = 0;
4856 cpi->is_src_frame_alt_ref = 0;
4857 }
4858 }
4859 #endif
4860
4861 if (!cpi->source) {
4862 /* Read last frame source if we are encoding first pass. */
4863 if (cpi->pass == 1 && cm->current_video_frame > 0) {
4864 if ((cpi->last_source =
4865 vp8_lookahead_peek(cpi->lookahead, 1, PEEK_BACKWARD)) == NULL) {
4866 return -1;
4867 }
4868 }
4869
4870 if ((cpi->source = vp8_lookahead_pop(cpi->lookahead, flush))) {
4871 cm->show_frame = 1;
4872
4873 cpi->is_src_frame_alt_ref =
4874 cpi->alt_ref_source && (cpi->source == cpi->alt_ref_source);
4875
4876 if (cpi->is_src_frame_alt_ref) cpi->alt_ref_source = NULL;
4877 }
4878 }
4879
4880 if (cpi->source) {
4881 cpi->Source = force_src_buffer ? force_src_buffer : &cpi->source->img;
4882 cpi->un_scaled_source = cpi->Source;
4883 *time_stamp = cpi->source->ts_start;
4884 *time_end = cpi->source->ts_end;
4885 *frame_flags = cpi->source->flags;
4886
4887 if (cpi->pass == 1 && cm->current_video_frame > 0) {
4888 cpi->last_frame_unscaled_source = &cpi->last_source->img;
4889 }
4890 } else {
4891 *size = 0;
4892 #if !CONFIG_REALTIME_ONLY
4893
4894 if (flush && cpi->pass == 1 && !cpi->twopass.first_pass_done) {
4895 vp8_end_first_pass(cpi); /* get last stats packet */
4896 cpi->twopass.first_pass_done = 1;
4897 }
4898
4899 #endif
4900
4901 return -1;
4902 }
4903
4904 if (cpi->source->ts_start < cpi->first_time_stamp_ever) {
4905 cpi->first_time_stamp_ever = cpi->source->ts_start;
4906 cpi->last_end_time_stamp_seen = cpi->source->ts_start;
4907 }
4908
4909 /* adjust frame rates based on timestamps given */
4910 if (cm->show_frame) {
4911 int64_t this_duration;
4912 int step = 0;
4913
4914 if (cpi->source->ts_start == cpi->first_time_stamp_ever) {
4915 this_duration = cpi->source->ts_end - cpi->source->ts_start;
4916 step = 1;
4917 } else {
4918 int64_t last_duration;
4919
4920 this_duration = cpi->source->ts_end - cpi->last_end_time_stamp_seen;
4921 last_duration = cpi->last_end_time_stamp_seen - cpi->last_time_stamp_seen;
4922 // Cap this to avoid overflow of (this_duration - last_duration) * 10
4923 this_duration = VPXMIN(this_duration, INT64_MAX / 10);
4924 /* do a step update if the duration changes by 10% */
4925 if (last_duration) {
4926 step = (int)(((this_duration - last_duration) * 10 / last_duration));
4927 }
4928 }
4929
4930 if (this_duration) {
4931 if (step) {
4932 cpi->ref_framerate = 10000000.0 / this_duration;
4933 } else {
4934 double avg_duration, interval;
4935
4936 /* Average this frame's rate into the last second's average
4937 * frame rate. If we haven't seen 1 second yet, then average
4938 * over the whole interval seen.
4939 */
4940 interval = (double)(cpi->source->ts_end - cpi->first_time_stamp_ever);
4941 if (interval > 10000000.0) interval = 10000000;
4942
4943 avg_duration = 10000000.0 / cpi->ref_framerate;
4944 avg_duration *= (interval - avg_duration + this_duration);
4945 avg_duration /= interval;
4946
4947 cpi->ref_framerate = 10000000.0 / avg_duration;
4948 }
4949 #if CONFIG_MULTI_RES_ENCODING
4950 if (cpi->oxcf.mr_total_resolutions > 1) {
4951 LOWER_RES_FRAME_INFO *low_res_frame_info =
4952 (LOWER_RES_FRAME_INFO *)cpi->oxcf.mr_low_res_mode_info;
4953 // Frame rate should be the same for all spatial layers in
4954 // multi-res-encoding (simulcast), so we constrain the frame for
4955 // higher layers to be that of lowest resolution. This is needed
4956 // as he application may decide to skip encoding a high layer and
4957 // then start again, in which case a big jump in time-stamps will
4958 // be received for that high layer, which will yield an incorrect
4959 // frame rate (from time-stamp adjustment in above calculation).
4960 if (cpi->oxcf.mr_encoder_id) {
4961 if (!low_res_frame_info->skip_encoding_base_stream)
4962 cpi->ref_framerate = low_res_frame_info->low_res_framerate;
4963 } else {
4964 // Keep track of frame rate for lowest resolution.
4965 low_res_frame_info->low_res_framerate = cpi->ref_framerate;
4966 // The base stream is being encoded so set skip flag to 0.
4967 low_res_frame_info->skip_encoding_base_stream = 0;
4968 }
4969 }
4970 #endif
4971 if (cpi->oxcf.number_of_layers > 1) {
4972 unsigned int i;
4973
4974 /* Update frame rates for each layer */
4975 assert(cpi->oxcf.number_of_layers <= VPX_TS_MAX_LAYERS);
4976 for (i = 0; i < cpi->oxcf.number_of_layers && i < VPX_TS_MAX_LAYERS;
4977 ++i) {
4978 LAYER_CONTEXT *lc = &cpi->layer_context[i];
4979 lc->framerate = cpi->ref_framerate / cpi->oxcf.rate_decimator[i];
4980 }
4981 } else {
4982 vp8_new_framerate(cpi, cpi->ref_framerate);
4983 }
4984 }
4985
4986 cpi->last_time_stamp_seen = cpi->source->ts_start;
4987 cpi->last_end_time_stamp_seen = cpi->source->ts_end;
4988 }
4989
4990 if (cpi->oxcf.number_of_layers > 1) {
4991 int layer;
4992
4993 vp8_update_layer_contexts(cpi);
4994
4995 /* Restore layer specific context & set frame rate */
4996 if (cpi->temporal_layer_id >= 0) {
4997 layer = cpi->temporal_layer_id;
4998 } else {
4999 layer =
5000 cpi->oxcf
5001 .layer_id[cpi->temporal_pattern_counter % cpi->oxcf.periodicity];
5002 }
5003 vp8_restore_layer_context(cpi, layer);
5004 vp8_new_framerate(cpi, cpi->layer_context[layer].framerate);
5005 }
5006
5007 if (cpi->compressor_speed == 2) {
5008 vpx_usec_timer_start(&tsctimer);
5009 vpx_usec_timer_start(&ticktimer);
5010 }
5011
5012 cpi->lf_zeromv_pct = (cpi->zeromv_count * 100) / cm->MBs;
5013
5014 #if CONFIG_REALTIME_ONLY & CONFIG_ONTHEFLY_BITPACKING
5015 {
5016 int i;
5017 const int num_part = (1 << cm->multi_token_partition);
5018 /* the available bytes in dest */
5019 const unsigned long dest_size = dest_end - dest;
5020 const int tok_part_buff_size = (dest_size * 9) / (10 * num_part);
5021
5022 unsigned char *dp = dest;
5023
5024 cpi->partition_d[0] = dp;
5025 dp += dest_size / 10; /* reserve 1/10 for control partition */
5026 cpi->partition_d_end[0] = dp;
5027
5028 for (i = 0; i < num_part; ++i) {
5029 cpi->partition_d[i + 1] = dp;
5030 dp += tok_part_buff_size;
5031 cpi->partition_d_end[i + 1] = dp;
5032 }
5033 }
5034 #endif
5035
5036 /* start with a 0 size frame */
5037 *size = 0;
5038
5039 /* Clear down mmx registers */
5040 vpx_clear_system_state();
5041
5042 cm->frame_type = INTER_FRAME;
5043 cm->frame_flags = *frame_flags;
5044
5045 #if 0
5046
5047 if (cm->refresh_alt_ref_frame)
5048 {
5049 cm->refresh_golden_frame = 0;
5050 cm->refresh_last_frame = 0;
5051 }
5052 else
5053 {
5054 cm->refresh_golden_frame = 0;
5055 cm->refresh_last_frame = 1;
5056 }
5057
5058 #endif
5059 /* find a free buffer for the new frame */
5060 {
5061 int i = 0;
5062 for (; i < NUM_YV12_BUFFERS; ++i) {
5063 if (!cm->yv12_fb[i].flags) {
5064 cm->new_fb_idx = i;
5065 break;
5066 }
5067 }
5068
5069 assert(i < NUM_YV12_BUFFERS);
5070 }
5071 switch (cpi->pass) {
5072 #if !CONFIG_REALTIME_ONLY
5073 case 1: Pass1Encode(cpi); break;
5074 case 2: Pass2Encode(cpi, size, dest, dest_end, frame_flags); break;
5075 #endif // !CONFIG_REALTIME_ONLY
5076 default:
5077 encode_frame_to_data_rate(cpi, size, dest, dest_end, frame_flags);
5078 break;
5079 }
5080
5081 if (cpi->compressor_speed == 2) {
5082 unsigned int duration, duration2;
5083 vpx_usec_timer_mark(&tsctimer);
5084 vpx_usec_timer_mark(&ticktimer);
5085
5086 duration = (int)(vpx_usec_timer_elapsed(&ticktimer));
5087 duration2 = (unsigned int)((double)duration / 2);
5088
5089 if (cm->frame_type != KEY_FRAME) {
5090 if (cpi->avg_encode_time == 0) {
5091 cpi->avg_encode_time = duration;
5092 } else {
5093 cpi->avg_encode_time = (7 * cpi->avg_encode_time + duration) >> 3;
5094 }
5095 }
5096
5097 if (duration2) {
5098 {
5099 if (cpi->avg_pick_mode_time == 0) {
5100 cpi->avg_pick_mode_time = duration2;
5101 } else {
5102 cpi->avg_pick_mode_time =
5103 (7 * cpi->avg_pick_mode_time + duration2) >> 3;
5104 }
5105 }
5106 }
5107 }
5108
5109 if (cm->refresh_entropy_probs == 0) {
5110 memcpy(&cm->fc, &cm->lfc, sizeof(cm->fc));
5111 }
5112
5113 /* Save the contexts separately for alt ref, gold and last. */
5114 /* (TODO jbb -> Optimize this with pointers to avoid extra copies. ) */
5115 if (cm->refresh_alt_ref_frame) memcpy(&cpi->lfc_a, &cm->fc, sizeof(cm->fc));
5116
5117 if (cm->refresh_golden_frame) memcpy(&cpi->lfc_g, &cm->fc, sizeof(cm->fc));
5118
5119 if (cm->refresh_last_frame) memcpy(&cpi->lfc_n, &cm->fc, sizeof(cm->fc));
5120
5121 /* if it's a dropped frame honor the requests on subsequent frames */
5122 if (*size > 0) {
5123 cpi->droppable = !frame_is_reference(cpi);
5124
5125 /* return to normal state */
5126 cm->refresh_entropy_probs = 1;
5127 cm->refresh_alt_ref_frame = 0;
5128 cm->refresh_golden_frame = 0;
5129 cm->refresh_last_frame = 1;
5130 cm->frame_type = INTER_FRAME;
5131 }
5132
5133 /* Save layer specific state */
5134 if (cpi->oxcf.number_of_layers > 1) vp8_save_layer_context(cpi);
5135
5136 vpx_usec_timer_mark(&cmptimer);
5137 cpi->time_compress_data += vpx_usec_timer_elapsed(&cmptimer);
5138
5139 #if CONFIG_MULTITHREAD
5140 /* wait for the lpf thread done */
5141 if (vpx_atomic_load_acquire(&cpi->b_multi_threaded) && cpi->b_lpf_running) {
5142 vp8_sem_wait(&cpi->h_event_end_lpf);
5143 cpi->b_lpf_running = 0;
5144 }
5145 #endif
5146
5147 if (cpi->b_calculate_psnr && cpi->pass != 1 && cm->show_frame) {
5148 generate_psnr_packet(cpi);
5149 }
5150
5151 #if CONFIG_INTERNAL_STATS
5152
5153 if (cpi->pass != 1) {
5154 cpi->bytes += *size;
5155
5156 if (cm->show_frame) {
5157 cpi->common.show_frame_mi = cpi->common.mi;
5158 cpi->count++;
5159
5160 if (cpi->b_calculate_psnr) {
5161 uint64_t ye, ue, ve;
5162 double frame_psnr;
5163 YV12_BUFFER_CONFIG *orig = cpi->Source;
5164 YV12_BUFFER_CONFIG *recon = cpi->common.frame_to_show;
5165 unsigned int y_width = cpi->common.Width;
5166 unsigned int y_height = cpi->common.Height;
5167 unsigned int uv_width = (y_width + 1) / 2;
5168 unsigned int uv_height = (y_height + 1) / 2;
5169 int y_samples = y_height * y_width;
5170 int uv_samples = uv_height * uv_width;
5171 int t_samples = y_samples + 2 * uv_samples;
5172 double sq_error;
5173
5174 ye = calc_plane_error(orig->y_buffer, orig->y_stride, recon->y_buffer,
5175 recon->y_stride, y_width, y_height);
5176
5177 ue = calc_plane_error(orig->u_buffer, orig->uv_stride, recon->u_buffer,
5178 recon->uv_stride, uv_width, uv_height);
5179
5180 ve = calc_plane_error(orig->v_buffer, orig->uv_stride, recon->v_buffer,
5181 recon->uv_stride, uv_width, uv_height);
5182
5183 sq_error = (double)(ye + ue + ve);
5184
5185 frame_psnr = vpx_sse_to_psnr(t_samples, 255.0, sq_error);
5186
5187 cpi->total_y += vpx_sse_to_psnr(y_samples, 255.0, (double)ye);
5188 cpi->total_u += vpx_sse_to_psnr(uv_samples, 255.0, (double)ue);
5189 cpi->total_v += vpx_sse_to_psnr(uv_samples, 255.0, (double)ve);
5190 cpi->total_sq_error += sq_error;
5191 cpi->total += frame_psnr;
5192 #if CONFIG_POSTPROC
5193 {
5194 YV12_BUFFER_CONFIG *pp = &cm->post_proc_buffer;
5195 double sq_error2;
5196 double frame_psnr2, frame_ssim2 = 0;
5197 double weight = 0;
5198
5199 vp8_deblock(cm, cm->frame_to_show, &cm->post_proc_buffer,
5200 cm->filter_level * 10 / 6);
5201 vpx_clear_system_state();
5202
5203 ye = calc_plane_error(orig->y_buffer, orig->y_stride, pp->y_buffer,
5204 pp->y_stride, y_width, y_height);
5205
5206 ue = calc_plane_error(orig->u_buffer, orig->uv_stride, pp->u_buffer,
5207 pp->uv_stride, uv_width, uv_height);
5208
5209 ve = calc_plane_error(orig->v_buffer, orig->uv_stride, pp->v_buffer,
5210 pp->uv_stride, uv_width, uv_height);
5211
5212 sq_error2 = (double)(ye + ue + ve);
5213
5214 frame_psnr2 = vpx_sse_to_psnr(t_samples, 255.0, sq_error2);
5215
5216 cpi->totalp_y += vpx_sse_to_psnr(y_samples, 255.0, (double)ye);
5217 cpi->totalp_u += vpx_sse_to_psnr(uv_samples, 255.0, (double)ue);
5218 cpi->totalp_v += vpx_sse_to_psnr(uv_samples, 255.0, (double)ve);
5219 cpi->total_sq_error2 += sq_error2;
5220 cpi->totalp += frame_psnr2;
5221
5222 frame_ssim2 =
5223 vpx_calc_ssim(cpi->Source, &cm->post_proc_buffer, &weight);
5224
5225 cpi->summed_quality += frame_ssim2 * weight;
5226 cpi->summed_weights += weight;
5227
5228 if (cpi->oxcf.number_of_layers > 1) {
5229 unsigned int i;
5230
5231 for (i = cpi->current_layer; i < cpi->oxcf.number_of_layers; ++i) {
5232 cpi->frames_in_layer[i]++;
5233
5234 cpi->bytes_in_layer[i] += *size;
5235 cpi->sum_psnr[i] += frame_psnr;
5236 cpi->sum_psnr_p[i] += frame_psnr2;
5237 cpi->total_error2[i] += sq_error;
5238 cpi->total_error2_p[i] += sq_error2;
5239 cpi->sum_ssim[i] += frame_ssim2 * weight;
5240 cpi->sum_weights[i] += weight;
5241 }
5242 }
5243 }
5244 #endif
5245 }
5246 }
5247 }
5248
5249 #if 0
5250
5251 if (cpi->common.frame_type != 0 && cpi->common.base_qindex == cpi->oxcf.worst_allowed_q)
5252 {
5253 skiptruecount += cpi->skip_true_count;
5254 skipfalsecount += cpi->skip_false_count;
5255 }
5256
5257 #endif
5258 #if 0
5259
5260 if (cpi->pass != 1)
5261 {
5262 FILE *f = fopen("skip.stt", "a");
5263 fprintf(f, "frame:%4d flags:%4x Q:%4d P:%4d Size:%5d\n", cpi->common.current_video_frame, *frame_flags, cpi->common.base_qindex, cpi->prob_skip_false, *size);
5264
5265 if (cpi->is_src_frame_alt_ref == 1)
5266 fprintf(f, "skipcount: %4d framesize: %d\n", cpi->skip_true_count , *size);
5267
5268 fclose(f);
5269 }
5270
5271 #endif
5272 #endif
5273
5274 return 0;
5275 }
5276
vp8_get_preview_raw_frame(VP8_COMP * cpi,YV12_BUFFER_CONFIG * dest,vp8_ppflags_t * flags)5277 int vp8_get_preview_raw_frame(VP8_COMP *cpi, YV12_BUFFER_CONFIG *dest,
5278 vp8_ppflags_t *flags) {
5279 if (cpi->common.refresh_alt_ref_frame) {
5280 return -1;
5281 } else {
5282 int ret;
5283
5284 #if CONFIG_POSTPROC
5285 cpi->common.show_frame_mi = cpi->common.mi;
5286 ret = vp8_post_proc_frame(&cpi->common, dest, flags);
5287 #else
5288 (void)flags;
5289
5290 if (cpi->common.frame_to_show) {
5291 *dest = *cpi->common.frame_to_show;
5292 dest->y_width = cpi->common.Width;
5293 dest->y_height = cpi->common.Height;
5294 dest->uv_height = cpi->common.Height / 2;
5295 ret = 0;
5296 } else {
5297 ret = -1;
5298 }
5299
5300 #endif
5301 vpx_clear_system_state();
5302 return ret;
5303 }
5304 }
5305
vp8_set_roimap(VP8_COMP * cpi,unsigned char * map,unsigned int rows,unsigned int cols,int delta_q[4],int delta_lf[4],unsigned int threshold[4])5306 int vp8_set_roimap(VP8_COMP *cpi, unsigned char *map, unsigned int rows,
5307 unsigned int cols, int delta_q[4], int delta_lf[4],
5308 unsigned int threshold[4]) {
5309 signed char feature_data[MB_LVL_MAX][MAX_MB_SEGMENTS];
5310 int internal_delta_q[MAX_MB_SEGMENTS];
5311 const int range = 63;
5312 int i;
5313
5314 // Check number of rows and columns match
5315 if (cpi->common.mb_rows != (int)rows || cpi->common.mb_cols != (int)cols) {
5316 return -1;
5317 }
5318
5319 for (i = 0; i < MAX_MB_SEGMENTS; ++i) {
5320 // Note abs() alone can't be used as the behavior of abs(INT_MIN) is
5321 // undefined.
5322 if (delta_q[i] > range || delta_q[i] < -range || delta_lf[i] > range ||
5323 delta_lf[i] < -range) {
5324 return -1;
5325 }
5326 }
5327
5328 // Also disable segmentation if no deltas are specified.
5329 if (!map || (delta_q[0] == 0 && delta_q[1] == 0 && delta_q[2] == 0 &&
5330 delta_q[3] == 0 && delta_lf[0] == 0 && delta_lf[1] == 0 &&
5331 delta_lf[2] == 0 && delta_lf[3] == 0 && threshold[0] == 0 &&
5332 threshold[1] == 0 && threshold[2] == 0 && threshold[3] == 0)) {
5333 disable_segmentation(cpi);
5334 return 0;
5335 }
5336
5337 // Translate the external delta q values to internal values.
5338 for (i = 0; i < MAX_MB_SEGMENTS; ++i) {
5339 internal_delta_q[i] =
5340 (delta_q[i] >= 0) ? q_trans[delta_q[i]] : -q_trans[-delta_q[i]];
5341 }
5342
5343 /* Set the segmentation Map */
5344 set_segmentation_map(cpi, map);
5345
5346 /* Activate segmentation. */
5347 enable_segmentation(cpi);
5348
5349 /* Set up the quant segment data */
5350 feature_data[MB_LVL_ALT_Q][0] = internal_delta_q[0];
5351 feature_data[MB_LVL_ALT_Q][1] = internal_delta_q[1];
5352 feature_data[MB_LVL_ALT_Q][2] = internal_delta_q[2];
5353 feature_data[MB_LVL_ALT_Q][3] = internal_delta_q[3];
5354
5355 /* Set up the loop segment data s */
5356 feature_data[MB_LVL_ALT_LF][0] = delta_lf[0];
5357 feature_data[MB_LVL_ALT_LF][1] = delta_lf[1];
5358 feature_data[MB_LVL_ALT_LF][2] = delta_lf[2];
5359 feature_data[MB_LVL_ALT_LF][3] = delta_lf[3];
5360
5361 cpi->segment_encode_breakout[0] = threshold[0];
5362 cpi->segment_encode_breakout[1] = threshold[1];
5363 cpi->segment_encode_breakout[2] = threshold[2];
5364 cpi->segment_encode_breakout[3] = threshold[3];
5365
5366 /* Initialise the feature data structure */
5367 set_segment_data(cpi, &feature_data[0][0], SEGMENT_DELTADATA);
5368
5369 if (threshold[0] != 0 || threshold[1] != 0 || threshold[2] != 0 ||
5370 threshold[3] != 0)
5371 cpi->use_roi_static_threshold = 1;
5372 cpi->cyclic_refresh_mode_enabled = 0;
5373
5374 return 0;
5375 }
5376
vp8_set_active_map(VP8_COMP * cpi,unsigned char * map,unsigned int rows,unsigned int cols)5377 int vp8_set_active_map(VP8_COMP *cpi, unsigned char *map, unsigned int rows,
5378 unsigned int cols) {
5379 if ((int)rows == cpi->common.mb_rows && (int)cols == cpi->common.mb_cols) {
5380 if (map) {
5381 memcpy(cpi->active_map, map, rows * cols);
5382 cpi->active_map_enabled = 1;
5383 } else {
5384 cpi->active_map_enabled = 0;
5385 }
5386
5387 return 0;
5388 } else {
5389 return -1;
5390 }
5391 }
5392
vp8_set_internal_size(VP8_COMP * cpi,VPX_SCALING_MODE horiz_mode,VPX_SCALING_MODE vert_mode)5393 int vp8_set_internal_size(VP8_COMP *cpi, VPX_SCALING_MODE horiz_mode,
5394 VPX_SCALING_MODE vert_mode) {
5395 if (horiz_mode <= VP8E_ONETWO) {
5396 cpi->common.horiz_scale = horiz_mode;
5397 } else {
5398 return -1;
5399 }
5400
5401 if (vert_mode <= VP8E_ONETWO) {
5402 cpi->common.vert_scale = vert_mode;
5403 } else {
5404 return -1;
5405 }
5406
5407 return 0;
5408 }
5409
vp8_calc_ss_err(YV12_BUFFER_CONFIG * source,YV12_BUFFER_CONFIG * dest)5410 int vp8_calc_ss_err(YV12_BUFFER_CONFIG *source, YV12_BUFFER_CONFIG *dest) {
5411 int i, j;
5412 int Total = 0;
5413
5414 unsigned char *src = source->y_buffer;
5415 unsigned char *dst = dest->y_buffer;
5416
5417 /* Loop through the Y plane raw and reconstruction data summing
5418 * (square differences)
5419 */
5420 for (i = 0; i < source->y_height; i += 16) {
5421 for (j = 0; j < source->y_width; j += 16) {
5422 unsigned int sse;
5423 Total += vpx_mse16x16(src + j, source->y_stride, dst + j, dest->y_stride,
5424 &sse);
5425 }
5426
5427 src += 16 * source->y_stride;
5428 dst += 16 * dest->y_stride;
5429 }
5430
5431 return Total;
5432 }
5433
vp8_get_quantizer(VP8_COMP * cpi)5434 int vp8_get_quantizer(VP8_COMP *cpi) { return cpi->common.base_qindex; }
5435