xref: /aosp_15_r20/external/libaom/av1/encoder/pass2_strategy.c (revision 77c1e3ccc04c968bd2bc212e87364f250e820521)
1 /*
2  * Copyright (c) 2019, Alliance for Open Media. All rights reserved.
3  *
4  * This source code is subject to the terms of the BSD 2 Clause License and
5  * the Alliance for Open Media Patent License 1.0. If the BSD 2 Clause License
6  * was not distributed with this source code in the LICENSE file, you can
7  * obtain it at www.aomedia.org/license/software. If the Alliance for Open
8  * Media Patent License 1.0 was not distributed with this source code in the
9  * PATENTS file, you can obtain it at www.aomedia.org/license/patent.
10  */
11 
12 /*!\defgroup gf_group_algo Golden Frame Group
13  * \ingroup high_level_algo
14  * Algorithms regarding determining the length of GF groups and defining GF
15  * group structures.
16  * @{
17  */
18 /*! @} - end defgroup gf_group_algo */
19 
20 #include <assert.h>
21 #include <limits.h>
22 #include <stdint.h>
23 
24 #include "aom_dsp/aom_dsp_common.h"
25 #include "aom_mem/aom_mem.h"
26 #include "config/aom_config.h"
27 #include "config/aom_scale_rtcd.h"
28 
29 #include "aom/aom_codec.h"
30 #include "aom/aom_encoder.h"
31 
32 #include "av1/common/av1_common_int.h"
33 
34 #include "av1/encoder/encoder.h"
35 #include "av1/encoder/firstpass.h"
36 #include "av1/encoder/gop_structure.h"
37 #include "av1/encoder/pass2_strategy.h"
38 #include "av1/encoder/ratectrl.h"
39 #include "av1/encoder/rc_utils.h"
40 #include "av1/encoder/temporal_filter.h"
41 #if CONFIG_THREE_PASS
42 #include "av1/encoder/thirdpass.h"
43 #endif
44 #include "av1/encoder/tpl_model.h"
45 #include "av1/encoder/encode_strategy.h"
46 
47 #define DEFAULT_KF_BOOST 2300
48 #define DEFAULT_GF_BOOST 2000
49 #define GROUP_ADAPTIVE_MAXQ 1
50 
51 static void init_gf_stats(GF_GROUP_STATS *gf_stats);
52 #if CONFIG_THREE_PASS
53 static int define_gf_group_pass3(AV1_COMP *cpi, EncodeFrameParams *frame_params,
54                                  int is_final_pass);
55 #endif
56 
57 // Calculate an active area of the image that discounts formatting
58 // bars and partially discounts other 0 energy areas.
59 #define MIN_ACTIVE_AREA 0.5
60 #define MAX_ACTIVE_AREA 1.0
calculate_active_area(const FRAME_INFO * frame_info,const FIRSTPASS_STATS * this_frame)61 static double calculate_active_area(const FRAME_INFO *frame_info,
62                                     const FIRSTPASS_STATS *this_frame) {
63   const double active_pct =
64       1.0 -
65       ((this_frame->intra_skip_pct / 2) +
66        ((this_frame->inactive_zone_rows * 2) / (double)frame_info->mb_rows));
67   return fclamp(active_pct, MIN_ACTIVE_AREA, MAX_ACTIVE_AREA);
68 }
69 
70 // Calculate a modified Error used in distributing bits between easier and
71 // harder frames.
72 #define ACT_AREA_CORRECTION 0.5
calculate_modified_err_new(const FRAME_INFO * frame_info,const FIRSTPASS_STATS * total_stats,const FIRSTPASS_STATS * this_stats,int vbrbias,double modified_error_min,double modified_error_max)73 static double calculate_modified_err_new(const FRAME_INFO *frame_info,
74                                          const FIRSTPASS_STATS *total_stats,
75                                          const FIRSTPASS_STATS *this_stats,
76                                          int vbrbias, double modified_error_min,
77                                          double modified_error_max) {
78   if (total_stats == NULL) {
79     return 0;
80   }
81   const double av_weight = total_stats->weight / total_stats->count;
82   const double av_err =
83       (total_stats->coded_error * av_weight) / total_stats->count;
84   double modified_error =
85       av_err * pow(this_stats->coded_error * this_stats->weight /
86                        DOUBLE_DIVIDE_CHECK(av_err),
87                    vbrbias / 100.0);
88 
89   // Correction for active area. Frames with a reduced active area
90   // (eg due to formatting bars) have a higher error per mb for the
91   // remaining active MBs. The correction here assumes that coding
92   // 0.5N blocks of complexity 2X is a little easier than coding N
93   // blocks of complexity X.
94   modified_error *=
95       pow(calculate_active_area(frame_info, this_stats), ACT_AREA_CORRECTION);
96 
97   return fclamp(modified_error, modified_error_min, modified_error_max);
98 }
99 
calculate_modified_err(const FRAME_INFO * frame_info,const TWO_PASS * twopass,const AV1EncoderConfig * oxcf,const FIRSTPASS_STATS * this_frame)100 static double calculate_modified_err(const FRAME_INFO *frame_info,
101                                      const TWO_PASS *twopass,
102                                      const AV1EncoderConfig *oxcf,
103                                      const FIRSTPASS_STATS *this_frame) {
104   const FIRSTPASS_STATS *total_stats = twopass->stats_buf_ctx->total_stats;
105   return calculate_modified_err_new(
106       frame_info, total_stats, this_frame, oxcf->rc_cfg.vbrbias,
107       twopass->modified_error_min, twopass->modified_error_max);
108 }
109 
110 // Resets the first pass file to the given position using a relative seek from
111 // the current position.
reset_fpf_position(TWO_PASS_FRAME * p_frame,const FIRSTPASS_STATS * position)112 static void reset_fpf_position(TWO_PASS_FRAME *p_frame,
113                                const FIRSTPASS_STATS *position) {
114   p_frame->stats_in = position;
115 }
116 
input_stats(TWO_PASS * p,TWO_PASS_FRAME * p_frame,FIRSTPASS_STATS * fps)117 static int input_stats(TWO_PASS *p, TWO_PASS_FRAME *p_frame,
118                        FIRSTPASS_STATS *fps) {
119   if (p_frame->stats_in >= p->stats_buf_ctx->stats_in_end) return EOF;
120 
121   *fps = *p_frame->stats_in;
122   ++p_frame->stats_in;
123   return 1;
124 }
125 
input_stats_lap(TWO_PASS * p,TWO_PASS_FRAME * p_frame,FIRSTPASS_STATS * fps)126 static int input_stats_lap(TWO_PASS *p, TWO_PASS_FRAME *p_frame,
127                            FIRSTPASS_STATS *fps) {
128   if (p_frame->stats_in >= p->stats_buf_ctx->stats_in_end) return EOF;
129 
130   *fps = *p_frame->stats_in;
131   /* Move old stats[0] out to accommodate for next frame stats  */
132   memmove(p->frame_stats_arr[0], p->frame_stats_arr[1],
133           (p->stats_buf_ctx->stats_in_end - p_frame->stats_in - 1) *
134               sizeof(FIRSTPASS_STATS));
135   p->stats_buf_ctx->stats_in_end--;
136   return 1;
137 }
138 
139 // Read frame stats at an offset from the current position.
read_frame_stats(const TWO_PASS * p,const TWO_PASS_FRAME * p_frame,int offset)140 static const FIRSTPASS_STATS *read_frame_stats(const TWO_PASS *p,
141                                                const TWO_PASS_FRAME *p_frame,
142                                                int offset) {
143   if ((offset >= 0 &&
144        p_frame->stats_in + offset >= p->stats_buf_ctx->stats_in_end) ||
145       (offset < 0 &&
146        p_frame->stats_in + offset < p->stats_buf_ctx->stats_in_start)) {
147     return NULL;
148   }
149 
150   return &p_frame->stats_in[offset];
151 }
152 
153 // This function returns the maximum target rate per frame.
frame_max_bits(const RATE_CONTROL * rc,const AV1EncoderConfig * oxcf)154 static int frame_max_bits(const RATE_CONTROL *rc,
155                           const AV1EncoderConfig *oxcf) {
156   int64_t max_bits = ((int64_t)rc->avg_frame_bandwidth *
157                       (int64_t)oxcf->rc_cfg.vbrmax_section) /
158                      100;
159   if (max_bits < 0)
160     max_bits = 0;
161   else if (max_bits > rc->max_frame_bandwidth)
162     max_bits = rc->max_frame_bandwidth;
163 
164   return (int)max_bits;
165 }
166 
167 // Based on history adjust expectations of bits per macroblock.
twopass_update_bpm_factor(AV1_COMP * cpi,int rate_err_tol)168 static void twopass_update_bpm_factor(AV1_COMP *cpi, int rate_err_tol) {
169   TWO_PASS *const twopass = &cpi->ppi->twopass;
170   const PRIMARY_RATE_CONTROL *const p_rc = &cpi->ppi->p_rc;
171 
172   // Based on recent history adjust expectations of bits per macroblock.
173   double rate_err_factor = 1.0;
174   const double adj_limit = AOMMAX(0.2, (double)(100 - rate_err_tol) / 200.0);
175   const double min_fac = 1.0 - adj_limit;
176   const double max_fac = 1.0 + adj_limit;
177 
178 #if CONFIG_THREE_PASS
179   if (cpi->third_pass_ctx && cpi->third_pass_ctx->frame_info_count > 0) {
180     int64_t actual_bits = 0;
181     int64_t target_bits = 0;
182     double factor = 0.0;
183     int count = 0;
184     for (int i = 0; i < cpi->third_pass_ctx->frame_info_count; i++) {
185       actual_bits += cpi->third_pass_ctx->frame_info[i].actual_bits;
186       target_bits += cpi->third_pass_ctx->frame_info[i].bits_allocated;
187       factor += cpi->third_pass_ctx->frame_info[i].bpm_factor;
188       count++;
189     }
190 
191     if (count == 0) {
192       factor = 1.0;
193     } else {
194       factor /= (double)count;
195     }
196 
197     factor *= (double)actual_bits / DOUBLE_DIVIDE_CHECK((double)target_bits);
198 
199     if ((twopass->bpm_factor <= 1 && factor < twopass->bpm_factor) ||
200         (twopass->bpm_factor >= 1 && factor > twopass->bpm_factor)) {
201       twopass->bpm_factor = factor;
202       twopass->bpm_factor =
203           AOMMAX(min_fac, AOMMIN(max_fac, twopass->bpm_factor));
204     }
205   }
206 #endif  // CONFIG_THREE_PASS
207 
208   int err_estimate = p_rc->rate_error_estimate;
209   int64_t total_actual_bits = p_rc->total_actual_bits;
210   double rolling_arf_group_actual_bits =
211       (double)twopass->rolling_arf_group_actual_bits;
212   double rolling_arf_group_target_bits =
213       (double)twopass->rolling_arf_group_target_bits;
214 
215 #if CONFIG_FPMT_TEST
216   const int is_parallel_frame =
217       cpi->ppi->gf_group.frame_parallel_level[cpi->gf_frame_index] > 0 ? 1 : 0;
218   const int simulate_parallel_frame =
219       cpi->ppi->fpmt_unit_test_cfg == PARALLEL_SIMULATION_ENCODE
220           ? is_parallel_frame
221           : 0;
222   total_actual_bits = simulate_parallel_frame ? p_rc->temp_total_actual_bits
223                                               : p_rc->total_actual_bits;
224   rolling_arf_group_target_bits =
225       (double)(simulate_parallel_frame
226                    ? p_rc->temp_rolling_arf_group_target_bits
227                    : twopass->rolling_arf_group_target_bits);
228   rolling_arf_group_actual_bits =
229       (double)(simulate_parallel_frame
230                    ? p_rc->temp_rolling_arf_group_actual_bits
231                    : twopass->rolling_arf_group_actual_bits);
232   err_estimate = simulate_parallel_frame ? p_rc->temp_rate_error_estimate
233                                          : p_rc->rate_error_estimate;
234 #endif
235 
236   if ((p_rc->bits_off_target && total_actual_bits > 0) &&
237       (rolling_arf_group_target_bits >= 1.0)) {
238     if (rolling_arf_group_actual_bits > rolling_arf_group_target_bits) {
239       double error_fraction =
240           (rolling_arf_group_actual_bits - rolling_arf_group_target_bits) /
241           rolling_arf_group_target_bits;
242       error_fraction = (error_fraction > 1.0) ? 1.0 : error_fraction;
243       rate_err_factor = 1.0 + error_fraction;
244     } else {
245       double error_fraction =
246           (rolling_arf_group_target_bits - rolling_arf_group_actual_bits) /
247           rolling_arf_group_target_bits;
248       rate_err_factor = 1.0 - error_fraction;
249     }
250 
251     rate_err_factor = AOMMAX(min_fac, AOMMIN(max_fac, rate_err_factor));
252   }
253 
254   // Is the rate control trending in the right direction. Only make
255   // an adjustment if things are getting worse.
256   if ((rate_err_factor < 1.0 && err_estimate >= 0) ||
257       (rate_err_factor > 1.0 && err_estimate <= 0)) {
258     twopass->bpm_factor *= rate_err_factor;
259     twopass->bpm_factor = AOMMAX(min_fac, AOMMIN(max_fac, twopass->bpm_factor));
260   }
261 }
262 
263 static const double q_div_term[(QINDEX_RANGE >> 4) + 1] = {
264   18.0, 30.0, 38.0, 44.0, 47.0, 50.0, 52.0, 54.0, 56.0,
265   58.0, 60.0, 62.0, 64.0, 66.0, 68.0, 70.0, 72.0
266 };
267 
268 #define EPMB_SCALER 1250000
calc_correction_factor(double err_per_mb,int q)269 static double calc_correction_factor(double err_per_mb, int q) {
270   double power_term = 0.90;
271   const int index = q >> 4;
272   const double divisor =
273       q_div_term[index] +
274       (((q_div_term[index + 1] - q_div_term[index]) * (q % 16)) / 16.0);
275   double error_term = EPMB_SCALER * pow(err_per_mb, power_term);
276   return error_term / divisor;
277 }
278 
279 // Similar to find_qindex_by_rate() function in ratectrl.c, but includes
280 // calculation of a correction_factor.
find_qindex_by_rate_with_correction(uint64_t desired_bits_per_mb,aom_bit_depth_t bit_depth,double error_per_mb,double group_weight_factor,int best_qindex,int worst_qindex)281 static int find_qindex_by_rate_with_correction(uint64_t desired_bits_per_mb,
282                                                aom_bit_depth_t bit_depth,
283                                                double error_per_mb,
284                                                double group_weight_factor,
285                                                int best_qindex,
286                                                int worst_qindex) {
287   assert(best_qindex <= worst_qindex);
288   int low = best_qindex;
289   int high = worst_qindex;
290 
291   while (low < high) {
292     const int mid = (low + high) >> 1;
293     const double q_factor = calc_correction_factor(error_per_mb, mid);
294     const double q = av1_convert_qindex_to_q(mid, bit_depth);
295     const uint64_t mid_bits_per_mb =
296         (uint64_t)((q_factor * group_weight_factor) / q);
297 
298     if (mid_bits_per_mb > desired_bits_per_mb) {
299       low = mid + 1;
300     } else {
301       high = mid;
302     }
303   }
304   return low;
305 }
306 
307 /*!\brief Choose a target maximum Q for a group of frames
308  *
309  * \ingroup rate_control
310  *
311  * This function is used to estimate a suitable maximum Q for a
312  * group of frames. Inititally it is called to get a crude estimate
313  * for the whole clip. It is then called for each ARF/GF group to get
314  * a revised estimate for that group.
315  *
316  * \param[in]    cpi                 Top-level encoder structure
317  * \param[in]    av_frame_err        The average per frame coded error score
318  *                                   for frames making up this section/group.
319  * \param[in]    inactive_zone       Used to mask off /ignore part of the
320  *                                   frame. The most common use case is where
321  *                                   a wide format video (e.g. 16:9) is
322  *                                   letter-boxed into a more square format.
323  *                                   Here we want to ignore the bands at the
324  *                                   top and bottom.
325  * \param[in]    av_target_bandwidth The target bits per frame
326  *
327  * \return The maximum Q for frames in the group.
328  */
get_twopass_worst_quality(AV1_COMP * cpi,const double av_frame_err,double inactive_zone,int av_target_bandwidth)329 static int get_twopass_worst_quality(AV1_COMP *cpi, const double av_frame_err,
330                                      double inactive_zone,
331                                      int av_target_bandwidth) {
332   const RATE_CONTROL *const rc = &cpi->rc;
333   const AV1EncoderConfig *const oxcf = &cpi->oxcf;
334   const RateControlCfg *const rc_cfg = &oxcf->rc_cfg;
335   inactive_zone = fclamp(inactive_zone, 0.0, 0.9999);
336 
337   if (av_target_bandwidth <= 0) {
338     return rc->worst_quality;  // Highest value allowed
339   } else {
340     const int num_mbs = (oxcf->resize_cfg.resize_mode != RESIZE_NONE)
341                             ? cpi->initial_mbs
342                             : cpi->common.mi_params.MBs;
343     const int active_mbs = AOMMAX(1, num_mbs - (int)(num_mbs * inactive_zone));
344     const double av_err_per_mb = av_frame_err / (1.0 - inactive_zone);
345     const uint64_t target_norm_bits_per_mb =
346         ((uint64_t)av_target_bandwidth << BPER_MB_NORMBITS) / active_mbs;
347     int rate_err_tol = AOMMIN(rc_cfg->under_shoot_pct, rc_cfg->over_shoot_pct);
348     const double size_factor =
349         (active_mbs < 500) ? 0.925 : ((active_mbs > 3000) ? 1.05 : 1.0);
350     const double speed_factor =
351         AOMMIN(1.02, (0.975 + (0.005 * cpi->oxcf.speed)));
352 
353     // Update bpm correction factor based on previous GOP rate error.
354     twopass_update_bpm_factor(cpi, rate_err_tol);
355 
356     // Try and pick a max Q that will be high enough to encode the
357     // content at the given rate.
358     int q = find_qindex_by_rate_with_correction(
359         target_norm_bits_per_mb, cpi->common.seq_params->bit_depth,
360         av_err_per_mb,
361         cpi->ppi->twopass.bpm_factor * speed_factor * size_factor,
362         rc->best_quality, rc->worst_quality);
363 
364     // Restriction on active max q for constrained quality mode.
365     if (rc_cfg->mode == AOM_CQ) q = AOMMAX(q, rc_cfg->cq_level);
366     return q;
367   }
368 }
369 
370 #define INTRA_PART 0.005
371 #define DEFAULT_DECAY_LIMIT 0.75
372 #define LOW_SR_DIFF_TRHESH 0.01
373 #define NCOUNT_FRAME_II_THRESH 5.0
374 #define LOW_CODED_ERR_PER_MB 0.01
375 
376 /* This function considers how the quality of prediction may be deteriorating
377  * with distance. It comapres the coded error for the last frame and the
378  * second reference frame (usually two frames old) and also applies a factor
379  * based on the extent of INTRA coding.
380  *
381  * The decay factor is then used to reduce the contribution of frames further
382  * from the alt-ref or golden frame, to the bitframe boost calculation for that
383  * alt-ref or golden frame.
384  */
get_sr_decay_rate(const FIRSTPASS_STATS * frame)385 static double get_sr_decay_rate(const FIRSTPASS_STATS *frame) {
386   double sr_diff = (frame->sr_coded_error - frame->coded_error);
387   double sr_decay = 1.0;
388   double modified_pct_inter;
389   double modified_pcnt_intra;
390 
391   modified_pct_inter = frame->pcnt_inter;
392   if ((frame->coded_error > LOW_CODED_ERR_PER_MB) &&
393       ((frame->intra_error / DOUBLE_DIVIDE_CHECK(frame->coded_error)) <
394        (double)NCOUNT_FRAME_II_THRESH)) {
395     modified_pct_inter = frame->pcnt_inter - frame->pcnt_neutral;
396   }
397   modified_pcnt_intra = 100 * (1.0 - modified_pct_inter);
398 
399   if ((sr_diff > LOW_SR_DIFF_TRHESH)) {
400     double sr_diff_part = ((sr_diff * 0.25) / frame->intra_error);
401     sr_decay = 1.0 - sr_diff_part - (INTRA_PART * modified_pcnt_intra);
402   }
403   return AOMMAX(sr_decay, DEFAULT_DECAY_LIMIT);
404 }
405 
406 // This function gives an estimate of how badly we believe the prediction
407 // quality is decaying from frame to frame.
get_zero_motion_factor(const FIRSTPASS_STATS * frame)408 static double get_zero_motion_factor(const FIRSTPASS_STATS *frame) {
409   const double zero_motion_pct = frame->pcnt_inter - frame->pcnt_motion;
410   double sr_decay = get_sr_decay_rate(frame);
411   return AOMMIN(sr_decay, zero_motion_pct);
412 }
413 
414 #define DEFAULT_ZM_FACTOR 0.5
get_prediction_decay_rate(const FIRSTPASS_STATS * frame_stats)415 static double get_prediction_decay_rate(const FIRSTPASS_STATS *frame_stats) {
416   const double sr_decay_rate = get_sr_decay_rate(frame_stats);
417   double zero_motion_factor =
418       DEFAULT_ZM_FACTOR * (frame_stats->pcnt_inter - frame_stats->pcnt_motion);
419 
420   // Clamp value to range 0.0 to 1.0
421   // This should happen anyway if input values are sensibly clamped but checked
422   // here just in case.
423   if (zero_motion_factor > 1.0)
424     zero_motion_factor = 1.0;
425   else if (zero_motion_factor < 0.0)
426     zero_motion_factor = 0.0;
427 
428   return AOMMAX(zero_motion_factor,
429                 (sr_decay_rate + ((1.0 - sr_decay_rate) * zero_motion_factor)));
430 }
431 
432 // Function to test for a condition where a complex transition is followed
433 // by a static section. For example in slide shows where there is a fade
434 // between slides. This is to help with more optimal kf and gf positioning.
detect_transition_to_still(const FIRSTPASS_INFO * firstpass_info,int next_stats_index,const int min_gf_interval,const int frame_interval,const int still_interval,const double loop_decay_rate,const double last_decay_rate)435 static int detect_transition_to_still(const FIRSTPASS_INFO *firstpass_info,
436                                       int next_stats_index,
437                                       const int min_gf_interval,
438                                       const int frame_interval,
439                                       const int still_interval,
440                                       const double loop_decay_rate,
441                                       const double last_decay_rate) {
442   // Break clause to detect very still sections after motion
443   // For example a static image after a fade or other transition
444   // instead of a clean scene cut.
445   if (frame_interval > min_gf_interval && loop_decay_rate >= 0.999 &&
446       last_decay_rate < 0.9) {
447     int stats_left =
448         av1_firstpass_info_future_count(firstpass_info, next_stats_index);
449     if (stats_left >= still_interval) {
450       int j;
451       // Look ahead a few frames to see if static condition persists...
452       for (j = 0; j < still_interval; ++j) {
453         const FIRSTPASS_STATS *stats =
454             av1_firstpass_info_peek(firstpass_info, next_stats_index + j);
455         if (stats->pcnt_inter - stats->pcnt_motion < 0.999) break;
456       }
457       // Only if it does do we signal a transition to still.
458       return j == still_interval;
459     }
460   }
461   return 0;
462 }
463 
464 // This function detects a flash through the high relative pcnt_second_ref
465 // score in the frame following a flash frame. The offset passed in should
466 // reflect this.
detect_flash(const TWO_PASS * twopass,const TWO_PASS_FRAME * twopass_frame,const int offset)467 static int detect_flash(const TWO_PASS *twopass,
468                         const TWO_PASS_FRAME *twopass_frame, const int offset) {
469   const FIRSTPASS_STATS *const next_frame =
470       read_frame_stats(twopass, twopass_frame, offset);
471 
472   // What we are looking for here is a situation where there is a
473   // brief break in prediction (such as a flash) but subsequent frames
474   // are reasonably well predicted by an earlier (pre flash) frame.
475   // The recovery after a flash is indicated by a high pcnt_second_ref
476   // compared to pcnt_inter.
477   return next_frame != NULL &&
478          next_frame->pcnt_second_ref > next_frame->pcnt_inter &&
479          next_frame->pcnt_second_ref >= 0.5;
480 }
481 
482 // Update the motion related elements to the GF arf boost calculation.
accumulate_frame_motion_stats(const FIRSTPASS_STATS * stats,GF_GROUP_STATS * gf_stats,double f_w,double f_h)483 static void accumulate_frame_motion_stats(const FIRSTPASS_STATS *stats,
484                                           GF_GROUP_STATS *gf_stats, double f_w,
485                                           double f_h) {
486   const double pct = stats->pcnt_motion;
487 
488   // Accumulate Motion In/Out of frame stats.
489   gf_stats->this_frame_mv_in_out = stats->mv_in_out_count * pct;
490   gf_stats->mv_in_out_accumulator += gf_stats->this_frame_mv_in_out;
491   gf_stats->abs_mv_in_out_accumulator += fabs(gf_stats->this_frame_mv_in_out);
492 
493   // Accumulate a measure of how uniform (or conversely how random) the motion
494   // field is (a ratio of abs(mv) / mv).
495   if (pct > 0.05) {
496     const double mvr_ratio =
497         fabs(stats->mvr_abs) / DOUBLE_DIVIDE_CHECK(fabs(stats->MVr));
498     const double mvc_ratio =
499         fabs(stats->mvc_abs) / DOUBLE_DIVIDE_CHECK(fabs(stats->MVc));
500 
501     gf_stats->mv_ratio_accumulator +=
502         pct *
503         (mvr_ratio < stats->mvr_abs * f_h ? mvr_ratio : stats->mvr_abs * f_h);
504     gf_stats->mv_ratio_accumulator +=
505         pct *
506         (mvc_ratio < stats->mvc_abs * f_w ? mvc_ratio : stats->mvc_abs * f_w);
507   }
508 }
509 
accumulate_this_frame_stats(const FIRSTPASS_STATS * stats,const double mod_frame_err,GF_GROUP_STATS * gf_stats)510 static void accumulate_this_frame_stats(const FIRSTPASS_STATS *stats,
511                                         const double mod_frame_err,
512                                         GF_GROUP_STATS *gf_stats) {
513   gf_stats->gf_group_err += mod_frame_err;
514 #if GROUP_ADAPTIVE_MAXQ
515   gf_stats->gf_group_raw_error += stats->coded_error;
516 #endif
517   gf_stats->gf_group_skip_pct += stats->intra_skip_pct;
518   gf_stats->gf_group_inactive_zone_rows += stats->inactive_zone_rows;
519 }
520 
accumulate_next_frame_stats(const FIRSTPASS_STATS * stats,const int flash_detected,const int frames_since_key,const int cur_idx,GF_GROUP_STATS * gf_stats,int f_w,int f_h)521 static void accumulate_next_frame_stats(const FIRSTPASS_STATS *stats,
522                                         const int flash_detected,
523                                         const int frames_since_key,
524                                         const int cur_idx,
525                                         GF_GROUP_STATS *gf_stats, int f_w,
526                                         int f_h) {
527   accumulate_frame_motion_stats(stats, gf_stats, f_w, f_h);
528   // sum up the metric values of current gf group
529   gf_stats->avg_sr_coded_error += stats->sr_coded_error;
530   gf_stats->avg_pcnt_second_ref += stats->pcnt_second_ref;
531   gf_stats->avg_new_mv_count += stats->new_mv_count;
532   gf_stats->avg_wavelet_energy += stats->frame_avg_wavelet_energy;
533   if (fabs(stats->raw_error_stdev) > 0.000001) {
534     gf_stats->non_zero_stdev_count++;
535     gf_stats->avg_raw_err_stdev += stats->raw_error_stdev;
536   }
537 
538   // Accumulate the effect of prediction quality decay
539   if (!flash_detected) {
540     gf_stats->last_loop_decay_rate = gf_stats->loop_decay_rate;
541     gf_stats->loop_decay_rate = get_prediction_decay_rate(stats);
542 
543     gf_stats->decay_accumulator =
544         gf_stats->decay_accumulator * gf_stats->loop_decay_rate;
545 
546     // Monitor for static sections.
547     if ((frames_since_key + cur_idx - 1) > 1) {
548       gf_stats->zero_motion_accumulator = AOMMIN(
549           gf_stats->zero_motion_accumulator, get_zero_motion_factor(stats));
550     }
551   }
552 }
553 
average_gf_stats(const int total_frame,GF_GROUP_STATS * gf_stats)554 static void average_gf_stats(const int total_frame, GF_GROUP_STATS *gf_stats) {
555   if (total_frame) {
556     gf_stats->avg_sr_coded_error /= total_frame;
557     gf_stats->avg_pcnt_second_ref /= total_frame;
558     gf_stats->avg_new_mv_count /= total_frame;
559     gf_stats->avg_wavelet_energy /= total_frame;
560   }
561 
562   if (gf_stats->non_zero_stdev_count)
563     gf_stats->avg_raw_err_stdev /= gf_stats->non_zero_stdev_count;
564 }
565 
566 #define BOOST_FACTOR 12.5
baseline_err_per_mb(const FRAME_INFO * frame_info)567 static double baseline_err_per_mb(const FRAME_INFO *frame_info) {
568   unsigned int screen_area = frame_info->frame_height * frame_info->frame_width;
569 
570   // Use a different error per mb factor for calculating boost for
571   //  different formats.
572   if (screen_area <= 640 * 360) {
573     return 500.0;
574   } else {
575     return 1000.0;
576   }
577 }
578 
calc_frame_boost(const PRIMARY_RATE_CONTROL * p_rc,const FRAME_INFO * frame_info,const FIRSTPASS_STATS * this_frame,double this_frame_mv_in_out,double max_boost)579 static double calc_frame_boost(const PRIMARY_RATE_CONTROL *p_rc,
580                                const FRAME_INFO *frame_info,
581                                const FIRSTPASS_STATS *this_frame,
582                                double this_frame_mv_in_out, double max_boost) {
583   double frame_boost;
584   const double lq = av1_convert_qindex_to_q(p_rc->avg_frame_qindex[INTER_FRAME],
585                                             frame_info->bit_depth);
586   const double boost_q_correction = AOMMIN((0.5 + (lq * 0.015)), 1.5);
587   const double active_area = calculate_active_area(frame_info, this_frame);
588 
589   // Underlying boost factor is based on inter error ratio.
590   frame_boost = AOMMAX(baseline_err_per_mb(frame_info) * active_area,
591                        this_frame->intra_error * active_area) /
592                 DOUBLE_DIVIDE_CHECK(this_frame->coded_error);
593   frame_boost = frame_boost * BOOST_FACTOR * boost_q_correction;
594 
595   // Increase boost for frames where new data coming into frame (e.g. zoom out).
596   // Slightly reduce boost if there is a net balance of motion out of the frame
597   // (zoom in). The range for this_frame_mv_in_out is -1.0 to +1.0.
598   if (this_frame_mv_in_out > 0.0)
599     frame_boost += frame_boost * (this_frame_mv_in_out * 2.0);
600   // In the extreme case the boost is halved.
601   else
602     frame_boost += frame_boost * (this_frame_mv_in_out / 2.0);
603 
604   return AOMMIN(frame_boost, max_boost * boost_q_correction);
605 }
606 
calc_kf_frame_boost(const PRIMARY_RATE_CONTROL * p_rc,const FRAME_INFO * frame_info,const FIRSTPASS_STATS * this_frame,double * sr_accumulator,double max_boost)607 static double calc_kf_frame_boost(const PRIMARY_RATE_CONTROL *p_rc,
608                                   const FRAME_INFO *frame_info,
609                                   const FIRSTPASS_STATS *this_frame,
610                                   double *sr_accumulator, double max_boost) {
611   double frame_boost;
612   const double lq = av1_convert_qindex_to_q(p_rc->avg_frame_qindex[INTER_FRAME],
613                                             frame_info->bit_depth);
614   const double boost_q_correction = AOMMIN((0.50 + (lq * 0.015)), 2.00);
615   const double active_area = calculate_active_area(frame_info, this_frame);
616 
617   // Underlying boost factor is based on inter error ratio.
618   frame_boost = AOMMAX(baseline_err_per_mb(frame_info) * active_area,
619                        this_frame->intra_error * active_area) /
620                 DOUBLE_DIVIDE_CHECK(
621                     (this_frame->coded_error + *sr_accumulator) * active_area);
622 
623   // Update the accumulator for second ref error difference.
624   // This is intended to give an indication of how much the coded error is
625   // increasing over time.
626   *sr_accumulator += (this_frame->sr_coded_error - this_frame->coded_error);
627   *sr_accumulator = AOMMAX(0.0, *sr_accumulator);
628 
629   // Q correction and scaling
630   // The 40.0 value here is an experimentally derived baseline minimum.
631   // This value is in line with the minimum per frame boost in the alt_ref
632   // boost calculation.
633   frame_boost = ((frame_boost + 40.0) * boost_q_correction);
634 
635   return AOMMIN(frame_boost, max_boost * boost_q_correction);
636 }
637 
get_projected_gfu_boost(const PRIMARY_RATE_CONTROL * p_rc,int gfu_boost,int frames_to_project,int num_stats_used_for_gfu_boost)638 static int get_projected_gfu_boost(const PRIMARY_RATE_CONTROL *p_rc,
639                                    int gfu_boost, int frames_to_project,
640                                    int num_stats_used_for_gfu_boost) {
641   /*
642    * If frames_to_project is equal to num_stats_used_for_gfu_boost,
643    * it means that gfu_boost was calculated over frames_to_project to
644    * begin with(ie; all stats required were available), hence return
645    * the original boost.
646    */
647   if (num_stats_used_for_gfu_boost >= frames_to_project) return gfu_boost;
648 
649   double min_boost_factor = sqrt(p_rc->baseline_gf_interval);
650   // Get the current tpl factor (number of frames = frames_to_project).
651   double tpl_factor = av1_get_gfu_boost_projection_factor(
652       min_boost_factor, MAX_GFUBOOST_FACTOR, frames_to_project);
653   // Get the tpl factor when number of frames = num_stats_used_for_prior_boost.
654   double tpl_factor_num_stats = av1_get_gfu_boost_projection_factor(
655       min_boost_factor, MAX_GFUBOOST_FACTOR, num_stats_used_for_gfu_boost);
656   int projected_gfu_boost =
657       (int)rint((tpl_factor * gfu_boost) / tpl_factor_num_stats);
658   return projected_gfu_boost;
659 }
660 
661 #define GF_MAX_BOOST 90.0
662 #define GF_MIN_BOOST 50
663 #define MIN_DECAY_FACTOR 0.01
av1_calc_arf_boost(const TWO_PASS * twopass,const TWO_PASS_FRAME * twopass_frame,const PRIMARY_RATE_CONTROL * p_rc,FRAME_INFO * frame_info,int offset,int f_frames,int b_frames,int * num_fpstats_used,int * num_fpstats_required,int project_gfu_boost)664 int av1_calc_arf_boost(const TWO_PASS *twopass,
665                        const TWO_PASS_FRAME *twopass_frame,
666                        const PRIMARY_RATE_CONTROL *p_rc, FRAME_INFO *frame_info,
667                        int offset, int f_frames, int b_frames,
668                        int *num_fpstats_used, int *num_fpstats_required,
669                        int project_gfu_boost) {
670   int i;
671   GF_GROUP_STATS gf_stats;
672   init_gf_stats(&gf_stats);
673   double boost_score = (double)NORMAL_BOOST;
674   int arf_boost;
675   int flash_detected = 0;
676   if (num_fpstats_used) *num_fpstats_used = 0;
677 
678   // Search forward from the proposed arf/next gf position.
679   for (i = 0; i < f_frames; ++i) {
680     const FIRSTPASS_STATS *this_frame =
681         read_frame_stats(twopass, twopass_frame, i + offset);
682     if (this_frame == NULL) break;
683 
684     // Update the motion related elements to the boost calculation.
685     accumulate_frame_motion_stats(this_frame, &gf_stats,
686                                   frame_info->frame_width,
687                                   frame_info->frame_height);
688 
689     // We want to discount the flash frame itself and the recovery
690     // frame that follows as both will have poor scores.
691     flash_detected = detect_flash(twopass, twopass_frame, i + offset) ||
692                      detect_flash(twopass, twopass_frame, i + offset + 1);
693 
694     // Accumulate the effect of prediction quality decay.
695     if (!flash_detected) {
696       gf_stats.decay_accumulator *= get_prediction_decay_rate(this_frame);
697       gf_stats.decay_accumulator = gf_stats.decay_accumulator < MIN_DECAY_FACTOR
698                                        ? MIN_DECAY_FACTOR
699                                        : gf_stats.decay_accumulator;
700     }
701 
702     boost_score +=
703         gf_stats.decay_accumulator *
704         calc_frame_boost(p_rc, frame_info, this_frame,
705                          gf_stats.this_frame_mv_in_out, GF_MAX_BOOST);
706     if (num_fpstats_used) (*num_fpstats_used)++;
707   }
708 
709   arf_boost = (int)boost_score;
710 
711   // Reset for backward looking loop.
712   boost_score = 0.0;
713   init_gf_stats(&gf_stats);
714   // Search backward towards last gf position.
715   for (i = -1; i >= -b_frames; --i) {
716     const FIRSTPASS_STATS *this_frame =
717         read_frame_stats(twopass, twopass_frame, i + offset);
718     if (this_frame == NULL) break;
719 
720     // Update the motion related elements to the boost calculation.
721     accumulate_frame_motion_stats(this_frame, &gf_stats,
722                                   frame_info->frame_width,
723                                   frame_info->frame_height);
724 
725     // We want to discount the the flash frame itself and the recovery
726     // frame that follows as both will have poor scores.
727     flash_detected = detect_flash(twopass, twopass_frame, i + offset) ||
728                      detect_flash(twopass, twopass_frame, i + offset + 1);
729 
730     // Cumulative effect of prediction quality decay.
731     if (!flash_detected) {
732       gf_stats.decay_accumulator *= get_prediction_decay_rate(this_frame);
733       gf_stats.decay_accumulator = gf_stats.decay_accumulator < MIN_DECAY_FACTOR
734                                        ? MIN_DECAY_FACTOR
735                                        : gf_stats.decay_accumulator;
736     }
737 
738     boost_score +=
739         gf_stats.decay_accumulator *
740         calc_frame_boost(p_rc, frame_info, this_frame,
741                          gf_stats.this_frame_mv_in_out, GF_MAX_BOOST);
742     if (num_fpstats_used) (*num_fpstats_used)++;
743   }
744   arf_boost += (int)boost_score;
745 
746   if (project_gfu_boost) {
747     assert(num_fpstats_required != NULL);
748     assert(num_fpstats_used != NULL);
749     *num_fpstats_required = f_frames + b_frames;
750     arf_boost = get_projected_gfu_boost(p_rc, arf_boost, *num_fpstats_required,
751                                         *num_fpstats_used);
752   }
753 
754   if (arf_boost < ((b_frames + f_frames) * GF_MIN_BOOST))
755     arf_boost = ((b_frames + f_frames) * GF_MIN_BOOST);
756 
757   return arf_boost;
758 }
759 
760 // Calculate a section intra ratio used in setting max loop filter.
calculate_section_intra_ratio(const FIRSTPASS_STATS * begin,const FIRSTPASS_STATS * end,int section_length)761 static int calculate_section_intra_ratio(const FIRSTPASS_STATS *begin,
762                                          const FIRSTPASS_STATS *end,
763                                          int section_length) {
764   const FIRSTPASS_STATS *s = begin;
765   double intra_error = 0.0;
766   double coded_error = 0.0;
767   int i = 0;
768 
769   while (s < end && i < section_length) {
770     intra_error += s->intra_error;
771     coded_error += s->coded_error;
772     ++s;
773     ++i;
774   }
775 
776   return (int)(intra_error / DOUBLE_DIVIDE_CHECK(coded_error));
777 }
778 
779 /*!\brief Calculates the bit target for this GF/ARF group
780  *
781  * \ingroup rate_control
782  *
783  * Calculates the total bits to allocate in this GF/ARF group.
784  *
785  * \param[in]    cpi              Top-level encoder structure
786  * \param[in]    gf_group_err     Cumulative coded error score for the
787  *                                frames making up this group.
788  *
789  * \return The target total number of bits for this GF/ARF group.
790  */
calculate_total_gf_group_bits(AV1_COMP * cpi,double gf_group_err)791 static int64_t calculate_total_gf_group_bits(AV1_COMP *cpi,
792                                              double gf_group_err) {
793   const RATE_CONTROL *const rc = &cpi->rc;
794   const PRIMARY_RATE_CONTROL *const p_rc = &cpi->ppi->p_rc;
795   const TWO_PASS *const twopass = &cpi->ppi->twopass;
796   const int max_bits = frame_max_bits(rc, &cpi->oxcf);
797   int64_t total_group_bits;
798 
799   // Calculate the bits to be allocated to the group as a whole.
800   if ((twopass->kf_group_bits > 0) && (twopass->kf_group_error_left > 0)) {
801     total_group_bits = (int64_t)(twopass->kf_group_bits *
802                                  (gf_group_err / twopass->kf_group_error_left));
803   } else {
804     total_group_bits = 0;
805   }
806 
807   // Clamp odd edge cases.
808   total_group_bits = (total_group_bits < 0) ? 0
809                      : (total_group_bits > twopass->kf_group_bits)
810                          ? twopass->kf_group_bits
811                          : total_group_bits;
812 
813   // Clip based on user supplied data rate variability limit.
814   if (total_group_bits > (int64_t)max_bits * p_rc->baseline_gf_interval)
815     total_group_bits = (int64_t)max_bits * p_rc->baseline_gf_interval;
816 
817   return total_group_bits;
818 }
819 
820 // Calculate the number of bits to assign to boosted frames in a group.
calculate_boost_bits(int frame_count,int boost,int64_t total_group_bits)821 static int calculate_boost_bits(int frame_count, int boost,
822                                 int64_t total_group_bits) {
823   int allocation_chunks;
824 
825   // return 0 for invalid inputs (could arise e.g. through rounding errors)
826   if (!boost || (total_group_bits <= 0)) return 0;
827 
828   if (frame_count <= 0) return (int)(AOMMIN(total_group_bits, INT_MAX));
829 
830   allocation_chunks = (frame_count * 100) + boost;
831 
832   // Prevent overflow.
833   if (boost > 1023) {
834     int divisor = boost >> 10;
835     boost /= divisor;
836     allocation_chunks /= divisor;
837   }
838 
839   // Calculate the number of extra bits for use in the boosted frame or frames.
840   return AOMMAX((int)(((int64_t)boost * total_group_bits) / allocation_chunks),
841                 0);
842 }
843 
844 // Calculate the boost factor based on the number of bits assigned, i.e. the
845 // inverse of calculate_boost_bits().
calculate_boost_factor(int frame_count,int bits,int64_t total_group_bits)846 static int calculate_boost_factor(int frame_count, int bits,
847                                   int64_t total_group_bits) {
848   return (int)(100.0 * frame_count * bits / (total_group_bits - bits));
849 }
850 
851 // Reduce the number of bits assigned to keyframe or arf if necessary, to
852 // prevent bitrate spikes that may break level constraints.
853 // frame_type: 0: keyframe; 1: arf.
adjust_boost_bits_for_target_level(const AV1_COMP * const cpi,RATE_CONTROL * const rc,int bits_assigned,int64_t group_bits,int frame_type)854 static int adjust_boost_bits_for_target_level(const AV1_COMP *const cpi,
855                                               RATE_CONTROL *const rc,
856                                               int bits_assigned,
857                                               int64_t group_bits,
858                                               int frame_type) {
859   const AV1_COMMON *const cm = &cpi->common;
860   const SequenceHeader *const seq_params = cm->seq_params;
861   PRIMARY_RATE_CONTROL *const p_rc = &cpi->ppi->p_rc;
862   const int temporal_layer_id = cm->temporal_layer_id;
863   const int spatial_layer_id = cm->spatial_layer_id;
864   for (int index = 0; index < seq_params->operating_points_cnt_minus_1 + 1;
865        ++index) {
866     if (!is_in_operating_point(seq_params->operating_point_idc[index],
867                                temporal_layer_id, spatial_layer_id)) {
868       continue;
869     }
870 
871     const AV1_LEVEL target_level =
872         cpi->ppi->level_params.target_seq_level_idx[index];
873     if (target_level >= SEQ_LEVELS) continue;
874 
875     assert(is_valid_seq_level_idx(target_level));
876 
877     const double level_bitrate_limit = av1_get_max_bitrate_for_level(
878         target_level, seq_params->tier[0], seq_params->profile);
879     const int target_bits_per_frame =
880         (int)(level_bitrate_limit / cpi->framerate);
881     if (frame_type == 0) {
882       // Maximum bits for keyframe is 8 times the target_bits_per_frame.
883       const int level_enforced_max_kf_bits = target_bits_per_frame * 8;
884       if (bits_assigned > level_enforced_max_kf_bits) {
885         const int frames = rc->frames_to_key - 1;
886         p_rc->kf_boost = calculate_boost_factor(
887             frames, level_enforced_max_kf_bits, group_bits);
888         bits_assigned =
889             calculate_boost_bits(frames, p_rc->kf_boost, group_bits);
890       }
891     } else if (frame_type == 1) {
892       // Maximum bits for arf is 4 times the target_bits_per_frame.
893       const int level_enforced_max_arf_bits = target_bits_per_frame * 4;
894       if (bits_assigned > level_enforced_max_arf_bits) {
895         p_rc->gfu_boost =
896             calculate_boost_factor(p_rc->baseline_gf_interval,
897                                    level_enforced_max_arf_bits, group_bits);
898         bits_assigned = calculate_boost_bits(p_rc->baseline_gf_interval,
899                                              p_rc->gfu_boost, group_bits);
900       }
901     } else {
902       assert(0);
903     }
904   }
905 
906   return bits_assigned;
907 }
908 
909 // Allocate bits to each frame in a GF / ARF group
allocate_gf_group_bits(GF_GROUP * gf_group,PRIMARY_RATE_CONTROL * const p_rc,RATE_CONTROL * const rc,int64_t gf_group_bits,int gf_arf_bits,int key_frame,int use_arf)910 static void allocate_gf_group_bits(GF_GROUP *gf_group,
911                                    PRIMARY_RATE_CONTROL *const p_rc,
912                                    RATE_CONTROL *const rc,
913                                    int64_t gf_group_bits, int gf_arf_bits,
914                                    int key_frame, int use_arf) {
915   static const double layer_fraction[MAX_ARF_LAYERS + 1] = { 1.0,  0.70, 0.55,
916                                                              0.60, 0.60, 1.0,
917                                                              1.0 };
918   int64_t total_group_bits = gf_group_bits;
919   int base_frame_bits;
920   const int gf_group_size = gf_group->size;
921   int layer_frames[MAX_ARF_LAYERS + 1] = { 0 };
922 
923   // For key frames the frame target rate is already set and it
924   // is also the golden frame.
925   // === [frame_index == 0] ===
926   int frame_index = !!key_frame;
927 
928   // Subtract the extra bits set aside for ARF frames from the Group Total
929   if (use_arf) total_group_bits -= gf_arf_bits;
930 
931   int num_frames =
932       AOMMAX(1, p_rc->baseline_gf_interval - (rc->frames_since_key == 0));
933   base_frame_bits = (int)(total_group_bits / num_frames);
934 
935   // Check the number of frames in each layer in case we have a
936   // non standard group length.
937   int max_arf_layer = gf_group->max_layer_depth - 1;
938   for (int idx = frame_index; idx < gf_group_size; ++idx) {
939     if ((gf_group->update_type[idx] == ARF_UPDATE) ||
940         (gf_group->update_type[idx] == INTNL_ARF_UPDATE)) {
941       layer_frames[gf_group->layer_depth[idx]]++;
942     }
943   }
944 
945   // Allocate extra bits to each ARF layer
946   int i;
947   int layer_extra_bits[MAX_ARF_LAYERS + 1] = { 0 };
948   assert(max_arf_layer <= MAX_ARF_LAYERS);
949   for (i = 1; i <= max_arf_layer; ++i) {
950     double fraction = (i == max_arf_layer) ? 1.0 : layer_fraction[i];
951     layer_extra_bits[i] =
952         (int)((gf_arf_bits * fraction) / AOMMAX(1, layer_frames[i]));
953     gf_arf_bits -= (int)(gf_arf_bits * fraction);
954   }
955 
956   // Now combine ARF layer and baseline bits to give total bits for each frame.
957   int arf_extra_bits;
958   for (int idx = frame_index; idx < gf_group_size; ++idx) {
959     switch (gf_group->update_type[idx]) {
960       case ARF_UPDATE:
961       case INTNL_ARF_UPDATE:
962         arf_extra_bits = layer_extra_bits[gf_group->layer_depth[idx]];
963         gf_group->bit_allocation[idx] =
964             (base_frame_bits > INT_MAX - arf_extra_bits)
965                 ? INT_MAX
966                 : (base_frame_bits + arf_extra_bits);
967         break;
968       case INTNL_OVERLAY_UPDATE:
969       case OVERLAY_UPDATE: gf_group->bit_allocation[idx] = 0; break;
970       default: gf_group->bit_allocation[idx] = base_frame_bits; break;
971     }
972   }
973 
974   // Set the frame following the current GOP to 0 bit allocation. For ARF
975   // groups, this next frame will be overlay frame, which is the first frame
976   // in the next GOP. For GF group, next GOP will overwrite the rate allocation.
977   // Setting this frame to use 0 bit (of out the current GOP budget) will
978   // simplify logics in reference frame management.
979   if (gf_group_size < MAX_STATIC_GF_GROUP_LENGTH)
980     gf_group->bit_allocation[gf_group_size] = 0;
981 }
982 
983 // Returns true if KF group and GF group both are almost completely static.
is_almost_static(double gf_zero_motion,int kf_zero_motion,int is_lap_enabled)984 static inline int is_almost_static(double gf_zero_motion, int kf_zero_motion,
985                                    int is_lap_enabled) {
986   if (is_lap_enabled) {
987     /*
988      * when LAP enabled kf_zero_motion is not reliable, so use strict
989      * constraint on gf_zero_motion.
990      */
991     return (gf_zero_motion >= 0.999);
992   } else {
993     return (gf_zero_motion >= 0.995) &&
994            (kf_zero_motion >= STATIC_KF_GROUP_THRESH);
995   }
996 }
997 
998 #define ARF_ABS_ZOOM_THRESH 4.4
detect_gf_cut(AV1_COMP * cpi,int frame_index,int cur_start,int flash_detected,int active_max_gf_interval,int active_min_gf_interval,GF_GROUP_STATS * gf_stats)999 static inline int detect_gf_cut(AV1_COMP *cpi, int frame_index, int cur_start,
1000                                 int flash_detected, int active_max_gf_interval,
1001                                 int active_min_gf_interval,
1002                                 GF_GROUP_STATS *gf_stats) {
1003   RATE_CONTROL *const rc = &cpi->rc;
1004   TWO_PASS *const twopass = &cpi->ppi->twopass;
1005   AV1_COMMON *const cm = &cpi->common;
1006   // Motion breakout threshold for loop below depends on image size.
1007   const double mv_ratio_accumulator_thresh = (cm->height + cm->width) / 4.0;
1008 
1009   if (!flash_detected) {
1010     // Break clause to detect very still sections after motion. For example,
1011     // a static image after a fade or other transition.
1012 
1013     // TODO(angiebird): This is a temporary change, we will avoid using
1014     // twopass_frame.stats_in in the follow-up CL
1015     int index = (int)(cpi->twopass_frame.stats_in -
1016                       twopass->stats_buf_ctx->stats_in_start);
1017     if (detect_transition_to_still(&twopass->firstpass_info, index,
1018                                    rc->min_gf_interval, frame_index - cur_start,
1019                                    5, gf_stats->loop_decay_rate,
1020                                    gf_stats->last_loop_decay_rate)) {
1021       return 1;
1022     }
1023   }
1024 
1025   // Some conditions to breakout after min interval.
1026   if (frame_index - cur_start >= active_min_gf_interval &&
1027       // If possible don't break very close to a kf
1028       (rc->frames_to_key - frame_index >= rc->min_gf_interval) &&
1029       ((frame_index - cur_start) & 0x01) && !flash_detected &&
1030       (gf_stats->mv_ratio_accumulator > mv_ratio_accumulator_thresh ||
1031        gf_stats->abs_mv_in_out_accumulator > ARF_ABS_ZOOM_THRESH)) {
1032     return 1;
1033   }
1034 
1035   // If almost totally static, we will not use the the max GF length later,
1036   // so we can continue for more frames.
1037   if (((frame_index - cur_start) >= active_max_gf_interval + 1) &&
1038       !is_almost_static(gf_stats->zero_motion_accumulator,
1039                         twopass->kf_zeromotion_pct, cpi->ppi->lap_enabled)) {
1040     return 1;
1041   }
1042   return 0;
1043 }
1044 
is_shorter_gf_interval_better(AV1_COMP * cpi,const EncodeFrameParams * frame_params)1045 static int is_shorter_gf_interval_better(
1046     AV1_COMP *cpi, const EncodeFrameParams *frame_params) {
1047   const RATE_CONTROL *const rc = &cpi->rc;
1048   PRIMARY_RATE_CONTROL *const p_rc = &cpi->ppi->p_rc;
1049   int gop_length_decision_method = cpi->sf.tpl_sf.gop_length_decision_method;
1050   int shorten_gf_interval;
1051 
1052   av1_tpl_preload_rc_estimate(cpi, frame_params);
1053 
1054   if (gop_length_decision_method == 2) {
1055     // GF group length is decided based on GF boost and tpl stats of ARFs from
1056     // base layer, (base+1) layer.
1057     shorten_gf_interval =
1058         (p_rc->gfu_boost <
1059          p_rc->num_stats_used_for_gfu_boost * GF_MIN_BOOST * 1.4) &&
1060         !av1_tpl_setup_stats(cpi, 3, frame_params);
1061   } else {
1062     int do_complete_tpl = 1;
1063     GF_GROUP *const gf_group = &cpi->ppi->gf_group;
1064     int is_temporal_filter_enabled =
1065         (rc->frames_since_key > 0 && gf_group->arf_index > -1);
1066 
1067     if (gop_length_decision_method == 1) {
1068       // Check if tpl stats of ARFs from base layer, (base+1) layer,
1069       // (base+2) layer can decide the GF group length.
1070       int gop_length_eval = av1_tpl_setup_stats(cpi, 2, frame_params);
1071 
1072       if (gop_length_eval != 2) {
1073         do_complete_tpl = 0;
1074         shorten_gf_interval = !gop_length_eval;
1075       }
1076     }
1077 
1078     if (do_complete_tpl) {
1079       // Decide GF group length based on complete tpl stats.
1080       shorten_gf_interval = !av1_tpl_setup_stats(cpi, 1, frame_params);
1081       // Tpl stats is reused when the ARF is temporally filtered and GF
1082       // interval is not shortened.
1083       if (is_temporal_filter_enabled && !shorten_gf_interval) {
1084         cpi->skip_tpl_setup_stats = 1;
1085 #if CONFIG_BITRATE_ACCURACY && !CONFIG_THREE_PASS
1086         assert(cpi->gf_frame_index == 0);
1087         av1_vbr_rc_update_q_index_list(&cpi->vbr_rc_info, &cpi->ppi->tpl_data,
1088                                        gf_group,
1089                                        cpi->common.seq_params->bit_depth);
1090 #endif  // CONFIG_BITRATE_ACCURACY
1091       }
1092     }
1093   }
1094   return shorten_gf_interval;
1095 }
1096 
1097 #define MIN_SHRINK_LEN 6  // the minimum length of gf if we are shrinking
1098 #define SMOOTH_FILT_LEN 7
1099 #define HALF_FILT_LEN (SMOOTH_FILT_LEN / 2)
1100 #define WINDOW_SIZE 7
1101 #define HALF_WIN (WINDOW_SIZE / 2)
1102 
1103 // Smooth filter intra_error and coded_error in firstpass stats.
1104 // If stats[i].is_flash==1, the ith element should not be used in the filtering.
smooth_filter_stats(const FIRSTPASS_STATS * stats,int start_idx,int last_idx,double * filt_intra_err,double * filt_coded_err)1105 static void smooth_filter_stats(const FIRSTPASS_STATS *stats, int start_idx,
1106                                 int last_idx, double *filt_intra_err,
1107                                 double *filt_coded_err) {
1108   // A 7-tap gaussian smooth filter
1109   static const double smooth_filt[SMOOTH_FILT_LEN] = { 0.006, 0.061, 0.242,
1110                                                        0.383, 0.242, 0.061,
1111                                                        0.006 };
1112   int i, j;
1113   for (i = start_idx; i <= last_idx; i++) {
1114     double total_wt = 0;
1115     for (j = -HALF_FILT_LEN; j <= HALF_FILT_LEN; j++) {
1116       int idx = AOMMIN(AOMMAX(i + j, start_idx), last_idx);
1117       if (stats[idx].is_flash) continue;
1118 
1119       filt_intra_err[i] +=
1120           smooth_filt[j + HALF_FILT_LEN] * stats[idx].intra_error;
1121       total_wt += smooth_filt[j + HALF_FILT_LEN];
1122     }
1123     if (total_wt > 0.01) {
1124       filt_intra_err[i] /= total_wt;
1125     } else {
1126       filt_intra_err[i] = stats[i].intra_error;
1127     }
1128   }
1129   for (i = start_idx; i <= last_idx; i++) {
1130     double total_wt = 0;
1131     for (j = -HALF_FILT_LEN; j <= HALF_FILT_LEN; j++) {
1132       int idx = AOMMIN(AOMMAX(i + j, start_idx), last_idx);
1133       // Coded error involves idx and idx - 1.
1134       if (stats[idx].is_flash || (idx > 0 && stats[idx - 1].is_flash)) continue;
1135 
1136       filt_coded_err[i] +=
1137           smooth_filt[j + HALF_FILT_LEN] * stats[idx].coded_error;
1138       total_wt += smooth_filt[j + HALF_FILT_LEN];
1139     }
1140     if (total_wt > 0.01) {
1141       filt_coded_err[i] /= total_wt;
1142     } else {
1143       filt_coded_err[i] = stats[i].coded_error;
1144     }
1145   }
1146 }
1147 
1148 // Calculate gradient
get_gradient(const double * values,int start,int last,double * grad)1149 static void get_gradient(const double *values, int start, int last,
1150                          double *grad) {
1151   if (start == last) {
1152     grad[start] = 0;
1153     return;
1154   }
1155   for (int i = start; i <= last; i++) {
1156     int prev = AOMMAX(i - 1, start);
1157     int next = AOMMIN(i + 1, last);
1158     grad[i] = (values[next] - values[prev]) / (next - prev);
1159   }
1160 }
1161 
find_next_scenecut(const FIRSTPASS_STATS * const stats_start,int first,int last)1162 static int find_next_scenecut(const FIRSTPASS_STATS *const stats_start,
1163                               int first, int last) {
1164   // Identify unstable areas caused by scenecuts.
1165   // Find the max and 2nd max coded error, and the average of the rest frames.
1166   // If there is only one frame that yields a huge coded error, it is likely a
1167   // scenecut.
1168   double this_ratio, max_prev_ratio, max_next_ratio, max_prev_coded,
1169       max_next_coded;
1170 
1171   if (last - first == 0) return -1;
1172 
1173   for (int i = first; i <= last; i++) {
1174     if (stats_start[i].is_flash || (i > 0 && stats_start[i - 1].is_flash))
1175       continue;
1176     double temp_intra = AOMMAX(stats_start[i].intra_error, 0.01);
1177     this_ratio = stats_start[i].coded_error / temp_intra;
1178     // find the avg ratio in the preceding neighborhood
1179     max_prev_ratio = 0;
1180     max_prev_coded = 0;
1181     for (int j = AOMMAX(first, i - HALF_WIN); j < i; j++) {
1182       if (stats_start[j].is_flash || (j > 0 && stats_start[j - 1].is_flash))
1183         continue;
1184       temp_intra = AOMMAX(stats_start[j].intra_error, 0.01);
1185       double temp_ratio = stats_start[j].coded_error / temp_intra;
1186       if (temp_ratio > max_prev_ratio) {
1187         max_prev_ratio = temp_ratio;
1188       }
1189       if (stats_start[j].coded_error > max_prev_coded) {
1190         max_prev_coded = stats_start[j].coded_error;
1191       }
1192     }
1193     // find the avg ratio in the following neighborhood
1194     max_next_ratio = 0;
1195     max_next_coded = 0;
1196     for (int j = i + 1; j <= AOMMIN(i + HALF_WIN, last); j++) {
1197       if (stats_start[i].is_flash || (i > 0 && stats_start[i - 1].is_flash))
1198         continue;
1199       temp_intra = AOMMAX(stats_start[j].intra_error, 0.01);
1200       double temp_ratio = stats_start[j].coded_error / temp_intra;
1201       if (temp_ratio > max_next_ratio) {
1202         max_next_ratio = temp_ratio;
1203       }
1204       if (stats_start[j].coded_error > max_next_coded) {
1205         max_next_coded = stats_start[j].coded_error;
1206       }
1207     }
1208 
1209     if (max_prev_ratio < 0.001 && max_next_ratio < 0.001) {
1210       // the ratios are very small, only check a small fixed threshold
1211       if (this_ratio < 0.02) continue;
1212     } else {
1213       // check if this frame has a larger ratio than the neighborhood
1214       double max_sr = stats_start[i].sr_coded_error;
1215       if (i < last) max_sr = AOMMAX(max_sr, stats_start[i + 1].sr_coded_error);
1216       double max_sr_fr_ratio =
1217           max_sr / AOMMAX(stats_start[i].coded_error, 0.01);
1218 
1219       if (max_sr_fr_ratio > 1.2) continue;
1220       if (this_ratio < 2 * AOMMAX(max_prev_ratio, max_next_ratio) &&
1221           stats_start[i].coded_error <
1222               2 * AOMMAX(max_prev_coded, max_next_coded)) {
1223         continue;
1224       }
1225     }
1226     return i;
1227   }
1228   return -1;
1229 }
1230 
1231 // Remove the region with index next_region.
1232 // parameter merge: 0: merge with previous; 1: merge with next; 2:
1233 // merge with both, take type from previous if possible
1234 // After removing, next_region will be the index of the next region.
remove_region(int merge,REGIONS * regions,int * num_regions,int * next_region)1235 static void remove_region(int merge, REGIONS *regions, int *num_regions,
1236                           int *next_region) {
1237   int k = *next_region;
1238   assert(k < *num_regions);
1239   if (*num_regions == 1) {
1240     *num_regions = 0;
1241     return;
1242   }
1243   if (k == 0) {
1244     merge = 1;
1245   } else if (k == *num_regions - 1) {
1246     merge = 0;
1247   }
1248   int num_merge = (merge == 2) ? 2 : 1;
1249   switch (merge) {
1250     case 0:
1251       regions[k - 1].last = regions[k].last;
1252       *next_region = k;
1253       break;
1254     case 1:
1255       regions[k + 1].start = regions[k].start;
1256       *next_region = k + 1;
1257       break;
1258     case 2:
1259       regions[k - 1].last = regions[k + 1].last;
1260       *next_region = k;
1261       break;
1262     default: assert(0);
1263   }
1264   *num_regions -= num_merge;
1265   for (k = *next_region - (merge == 1); k < *num_regions; k++) {
1266     regions[k] = regions[k + num_merge];
1267   }
1268 }
1269 
1270 // Insert a region in the cur_region_idx. The start and last should both be in
1271 // the current region. After insertion, the cur_region_idx will point to the
1272 // last region that was splitted from the original region.
insert_region(int start,int last,REGION_TYPES type,REGIONS * regions,int * num_regions,int * cur_region_idx)1273 static void insert_region(int start, int last, REGION_TYPES type,
1274                           REGIONS *regions, int *num_regions,
1275                           int *cur_region_idx) {
1276   int k = *cur_region_idx;
1277   REGION_TYPES this_region_type = regions[k].type;
1278   int this_region_last = regions[k].last;
1279   int num_add = (start != regions[k].start) + (last != regions[k].last);
1280   // move the following regions further to the back
1281   for (int r = *num_regions - 1; r > k; r--) {
1282     regions[r + num_add] = regions[r];
1283   }
1284   *num_regions += num_add;
1285   if (start > regions[k].start) {
1286     regions[k].last = start - 1;
1287     k++;
1288     regions[k].start = start;
1289   }
1290   regions[k].type = type;
1291   if (last < this_region_last) {
1292     regions[k].last = last;
1293     k++;
1294     regions[k].start = last + 1;
1295     regions[k].last = this_region_last;
1296     regions[k].type = this_region_type;
1297   } else {
1298     regions[k].last = this_region_last;
1299   }
1300   *cur_region_idx = k;
1301 }
1302 
1303 // Get the average of stats inside a region.
analyze_region(const FIRSTPASS_STATS * stats,int k,REGIONS * regions)1304 static void analyze_region(const FIRSTPASS_STATS *stats, int k,
1305                            REGIONS *regions) {
1306   int i;
1307   regions[k].avg_cor_coeff = 0;
1308   regions[k].avg_sr_fr_ratio = 0;
1309   regions[k].avg_intra_err = 0;
1310   regions[k].avg_coded_err = 0;
1311 
1312   int check_first_sr = (k != 0);
1313 
1314   for (i = regions[k].start; i <= regions[k].last; i++) {
1315     if (i > regions[k].start || check_first_sr) {
1316       double num_frames =
1317           (double)(regions[k].last - regions[k].start + check_first_sr);
1318       double max_coded_error =
1319           AOMMAX(stats[i].coded_error, stats[i - 1].coded_error);
1320       double this_ratio =
1321           stats[i].sr_coded_error / AOMMAX(max_coded_error, 0.001);
1322       regions[k].avg_sr_fr_ratio += this_ratio / num_frames;
1323     }
1324 
1325     regions[k].avg_intra_err +=
1326         stats[i].intra_error / (double)(regions[k].last - regions[k].start + 1);
1327     regions[k].avg_coded_err +=
1328         stats[i].coded_error / (double)(regions[k].last - regions[k].start + 1);
1329 
1330     regions[k].avg_cor_coeff +=
1331         AOMMAX(stats[i].cor_coeff, 0.001) /
1332         (double)(regions[k].last - regions[k].start + 1);
1333     regions[k].avg_noise_var +=
1334         AOMMAX(stats[i].noise_var, 0.001) /
1335         (double)(regions[k].last - regions[k].start + 1);
1336   }
1337 }
1338 
1339 // Calculate the regions stats of every region.
get_region_stats(const FIRSTPASS_STATS * stats,REGIONS * regions,int num_regions)1340 static void get_region_stats(const FIRSTPASS_STATS *stats, REGIONS *regions,
1341                              int num_regions) {
1342   for (int k = 0; k < num_regions; k++) {
1343     analyze_region(stats, k, regions);
1344   }
1345 }
1346 
1347 // Find tentative stable regions
find_stable_regions(const FIRSTPASS_STATS * stats,const double * grad_coded,int this_start,int this_last,REGIONS * regions)1348 static int find_stable_regions(const FIRSTPASS_STATS *stats,
1349                                const double *grad_coded, int this_start,
1350                                int this_last, REGIONS *regions) {
1351   int i, j, k = 0;
1352   regions[k].start = this_start;
1353   for (i = this_start; i <= this_last; i++) {
1354     // Check mean and variance of stats in a window
1355     double mean_intra = 0.001, var_intra = 0.001;
1356     double mean_coded = 0.001, var_coded = 0.001;
1357     int count = 0;
1358     for (j = -HALF_WIN; j <= HALF_WIN; j++) {
1359       int idx = AOMMIN(AOMMAX(i + j, this_start), this_last);
1360       if (stats[idx].is_flash || (idx > 0 && stats[idx - 1].is_flash)) continue;
1361       mean_intra += stats[idx].intra_error;
1362       var_intra += stats[idx].intra_error * stats[idx].intra_error;
1363       mean_coded += stats[idx].coded_error;
1364       var_coded += stats[idx].coded_error * stats[idx].coded_error;
1365       count++;
1366     }
1367 
1368     REGION_TYPES cur_type;
1369     if (count > 0) {
1370       mean_intra /= (double)count;
1371       var_intra /= (double)count;
1372       mean_coded /= (double)count;
1373       var_coded /= (double)count;
1374       int is_intra_stable = (var_intra / (mean_intra * mean_intra) < 1.03);
1375       int is_coded_stable = (var_coded / (mean_coded * mean_coded) < 1.04 &&
1376                              fabs(grad_coded[i]) / mean_coded < 0.05) ||
1377                             mean_coded / mean_intra < 0.05;
1378       int is_coded_small = mean_coded < 0.5 * mean_intra;
1379       cur_type = (is_intra_stable && is_coded_stable && is_coded_small)
1380                      ? STABLE_REGION
1381                      : HIGH_VAR_REGION;
1382     } else {
1383       cur_type = HIGH_VAR_REGION;
1384     }
1385 
1386     // mark a new region if type changes
1387     if (i == regions[k].start) {
1388       // first frame in the region
1389       regions[k].type = cur_type;
1390     } else if (cur_type != regions[k].type) {
1391       // Append a new region
1392       regions[k].last = i - 1;
1393       regions[k + 1].start = i;
1394       regions[k + 1].type = cur_type;
1395       k++;
1396     }
1397   }
1398   regions[k].last = this_last;
1399   return k + 1;
1400 }
1401 
1402 // Clean up regions that should be removed or merged.
cleanup_regions(REGIONS * regions,int * num_regions)1403 static void cleanup_regions(REGIONS *regions, int *num_regions) {
1404   int k = 0;
1405   while (k < *num_regions) {
1406     if ((k > 0 && regions[k - 1].type == regions[k].type &&
1407          regions[k].type != SCENECUT_REGION) ||
1408         regions[k].last < regions[k].start) {
1409       remove_region(0, regions, num_regions, &k);
1410     } else {
1411       k++;
1412     }
1413   }
1414 }
1415 
1416 // Remove regions that are of type and shorter than length.
1417 // Merge it with its neighboring regions.
remove_short_regions(REGIONS * regions,int * num_regions,REGION_TYPES type,int length)1418 static void remove_short_regions(REGIONS *regions, int *num_regions,
1419                                  REGION_TYPES type, int length) {
1420   int k = 0;
1421   while (k < *num_regions && (*num_regions) > 1) {
1422     if ((regions[k].last - regions[k].start + 1 < length &&
1423          regions[k].type == type)) {
1424       // merge current region with the previous and next regions
1425       remove_region(2, regions, num_regions, &k);
1426     } else {
1427       k++;
1428     }
1429   }
1430   cleanup_regions(regions, num_regions);
1431 }
1432 
adjust_unstable_region_bounds(const FIRSTPASS_STATS * stats,REGIONS * regions,int * num_regions)1433 static void adjust_unstable_region_bounds(const FIRSTPASS_STATS *stats,
1434                                           REGIONS *regions, int *num_regions) {
1435   int i, j, k;
1436   // Remove regions that are too short. Likely noise.
1437   remove_short_regions(regions, num_regions, STABLE_REGION, HALF_WIN);
1438   remove_short_regions(regions, num_regions, HIGH_VAR_REGION, HALF_WIN);
1439 
1440   get_region_stats(stats, regions, *num_regions);
1441 
1442   // Adjust region boundaries. The thresholds are empirically obtained, but
1443   // overall the performance is not very sensitive to small changes to them.
1444   for (k = 0; k < *num_regions; k++) {
1445     if (regions[k].type == STABLE_REGION) continue;
1446     if (k > 0) {
1447       // Adjust previous boundary.
1448       // First find the average intra/coded error in the previous
1449       // neighborhood.
1450       double avg_intra_err = 0;
1451       const int starti = AOMMAX(regions[k - 1].last - WINDOW_SIZE + 1,
1452                                 regions[k - 1].start + 1);
1453       const int lasti = regions[k - 1].last;
1454       int counti = 0;
1455       for (i = starti; i <= lasti; i++) {
1456         avg_intra_err += stats[i].intra_error;
1457         counti++;
1458       }
1459       if (counti > 0) {
1460         avg_intra_err = AOMMAX(avg_intra_err / (double)counti, 0.001);
1461         int count_coded = 0, count_grad = 0;
1462         for (j = lasti + 1; j <= regions[k].last; j++) {
1463           const int intra_close =
1464               fabs(stats[j].intra_error - avg_intra_err) / avg_intra_err < 0.1;
1465           const int coded_small = stats[j].coded_error / avg_intra_err < 0.1;
1466           const int coeff_close = stats[j].cor_coeff > 0.995;
1467           if (!coeff_close || !coded_small) count_coded--;
1468           if (intra_close && count_coded >= 0 && count_grad >= 0) {
1469             // this frame probably belongs to the previous stable region
1470             regions[k - 1].last = j;
1471             regions[k].start = j + 1;
1472           } else {
1473             break;
1474           }
1475         }
1476       }
1477     }  // if k > 0
1478     if (k < *num_regions - 1) {
1479       // Adjust next boundary.
1480       // First find the average intra/coded error in the next neighborhood.
1481       double avg_intra_err = 0;
1482       const int starti = regions[k + 1].start;
1483       const int lasti = AOMMIN(regions[k + 1].last - 1,
1484                                regions[k + 1].start + WINDOW_SIZE - 1);
1485       int counti = 0;
1486       for (i = starti; i <= lasti; i++) {
1487         avg_intra_err += stats[i].intra_error;
1488         counti++;
1489       }
1490       if (counti > 0) {
1491         avg_intra_err = AOMMAX(avg_intra_err / (double)counti, 0.001);
1492         // At the boundary, coded error is large, but still the frame is stable
1493         int count_coded = 1, count_grad = 1;
1494         for (j = starti - 1; j >= regions[k].start; j--) {
1495           const int intra_close =
1496               fabs(stats[j].intra_error - avg_intra_err) / avg_intra_err < 0.1;
1497           const int coded_small =
1498               stats[j + 1].coded_error / avg_intra_err < 0.1;
1499           const int coeff_close = stats[j].cor_coeff > 0.995;
1500           if (!coeff_close || !coded_small) count_coded--;
1501           if (intra_close && count_coded >= 0 && count_grad >= 0) {
1502             // this frame probably belongs to the next stable region
1503             regions[k + 1].start = j;
1504             regions[k].last = j - 1;
1505           } else {
1506             break;
1507           }
1508         }
1509       }
1510     }  // if k < *num_regions - 1
1511   }    // end of loop over all regions
1512 
1513   cleanup_regions(regions, num_regions);
1514   remove_short_regions(regions, num_regions, HIGH_VAR_REGION, HALF_WIN);
1515   get_region_stats(stats, regions, *num_regions);
1516 
1517   // If a stable regions has higher error than neighboring high var regions,
1518   // or if the stable region has a lower average correlation,
1519   // then it should be merged with them
1520   k = 0;
1521   while (k < *num_regions && (*num_regions) > 1) {
1522     if (regions[k].type == STABLE_REGION &&
1523         (regions[k].last - regions[k].start + 1) < 2 * WINDOW_SIZE &&
1524         ((k > 0 &&  // previous regions
1525           (regions[k].avg_coded_err > regions[k - 1].avg_coded_err * 1.01 ||
1526            regions[k].avg_cor_coeff < regions[k - 1].avg_cor_coeff * 0.999)) &&
1527          (k < *num_regions - 1 &&  // next region
1528           (regions[k].avg_coded_err > regions[k + 1].avg_coded_err * 1.01 ||
1529            regions[k].avg_cor_coeff < regions[k + 1].avg_cor_coeff * 0.999)))) {
1530       // merge current region with the previous and next regions
1531       remove_region(2, regions, num_regions, &k);
1532       analyze_region(stats, k - 1, regions);
1533     } else if (regions[k].type == HIGH_VAR_REGION &&
1534                (regions[k].last - regions[k].start + 1) < 2 * WINDOW_SIZE &&
1535                ((k > 0 &&  // previous regions
1536                  (regions[k].avg_coded_err <
1537                       regions[k - 1].avg_coded_err * 0.99 ||
1538                   regions[k].avg_cor_coeff >
1539                       regions[k - 1].avg_cor_coeff * 1.001)) &&
1540                 (k < *num_regions - 1 &&  // next region
1541                  (regions[k].avg_coded_err <
1542                       regions[k + 1].avg_coded_err * 0.99 ||
1543                   regions[k].avg_cor_coeff >
1544                       regions[k + 1].avg_cor_coeff * 1.001)))) {
1545       // merge current region with the previous and next regions
1546       remove_region(2, regions, num_regions, &k);
1547       analyze_region(stats, k - 1, regions);
1548     } else {
1549       k++;
1550     }
1551   }
1552 
1553   remove_short_regions(regions, num_regions, STABLE_REGION, WINDOW_SIZE);
1554   remove_short_regions(regions, num_regions, HIGH_VAR_REGION, HALF_WIN);
1555 }
1556 
1557 // Identify blending regions.
find_blending_regions(const FIRSTPASS_STATS * stats,REGIONS * regions,int * num_regions)1558 static void find_blending_regions(const FIRSTPASS_STATS *stats,
1559                                   REGIONS *regions, int *num_regions) {
1560   int i, k = 0;
1561   // Blending regions will have large content change, therefore will have a
1562   // large consistent change in intra error.
1563   int count_stable = 0;
1564   while (k < *num_regions) {
1565     if (regions[k].type == STABLE_REGION) {
1566       k++;
1567       count_stable++;
1568       continue;
1569     }
1570     int dir = 0;
1571     int start = 0, last;
1572     for (i = regions[k].start; i <= regions[k].last; i++) {
1573       // First mark the regions that has consistent large change of intra error.
1574       if (k == 0 && i == regions[k].start) continue;
1575       if (stats[i].is_flash || (i > 0 && stats[i - 1].is_flash)) continue;
1576       double grad = stats[i].intra_error - stats[i - 1].intra_error;
1577       int large_change = fabs(grad) / AOMMAX(stats[i].intra_error, 0.01) > 0.05;
1578       int this_dir = 0;
1579       if (large_change) {
1580         this_dir = (grad > 0) ? 1 : -1;
1581       }
1582       // the current trend continues
1583       if (dir == this_dir) continue;
1584       if (dir != 0) {
1585         // Mark the end of a new large change group and add it
1586         last = i - 1;
1587         insert_region(start, last, BLENDING_REGION, regions, num_regions, &k);
1588       }
1589       dir = this_dir;
1590       if (k == 0 && i == regions[k].start + 1) {
1591         start = i - 1;
1592       } else {
1593         start = i;
1594       }
1595     }
1596     if (dir != 0) {
1597       last = regions[k].last;
1598       insert_region(start, last, BLENDING_REGION, regions, num_regions, &k);
1599     }
1600     k++;
1601   }
1602 
1603   // If the blending region has very low correlation, mark it as high variance
1604   // since we probably cannot benefit from it anyways.
1605   get_region_stats(stats, regions, *num_regions);
1606   for (k = 0; k < *num_regions; k++) {
1607     if (regions[k].type != BLENDING_REGION) continue;
1608     if (regions[k].last == regions[k].start || regions[k].avg_cor_coeff < 0.6 ||
1609         count_stable == 0)
1610       regions[k].type = HIGH_VAR_REGION;
1611   }
1612   get_region_stats(stats, regions, *num_regions);
1613 
1614   // It is possible for blending to result in a "dip" in intra error (first
1615   // decrease then increase). Therefore we need to find the dip and combine the
1616   // two regions.
1617   k = 1;
1618   while (k < *num_regions) {
1619     if (k < *num_regions - 1 && regions[k].type == HIGH_VAR_REGION) {
1620       // Check if this short high variance regions is actually in the middle of
1621       // a blending region.
1622       if (regions[k - 1].type == BLENDING_REGION &&
1623           regions[k + 1].type == BLENDING_REGION &&
1624           regions[k].last - regions[k].start < 3) {
1625         int prev_dir = (stats[regions[k - 1].last].intra_error -
1626                         stats[regions[k - 1].last - 1].intra_error) > 0
1627                            ? 1
1628                            : -1;
1629         int next_dir = (stats[regions[k + 1].last].intra_error -
1630                         stats[regions[k + 1].last - 1].intra_error) > 0
1631                            ? 1
1632                            : -1;
1633         if (prev_dir < 0 && next_dir > 0) {
1634           // This is possibly a mid region of blending. Check the ratios
1635           double ratio_thres = AOMMIN(regions[k - 1].avg_sr_fr_ratio,
1636                                       regions[k + 1].avg_sr_fr_ratio) *
1637                                0.95;
1638           if (regions[k].avg_sr_fr_ratio > ratio_thres) {
1639             regions[k].type = BLENDING_REGION;
1640             remove_region(2, regions, num_regions, &k);
1641             analyze_region(stats, k - 1, regions);
1642             continue;
1643           }
1644         }
1645       }
1646     }
1647     // Check if we have a pair of consecutive blending regions.
1648     if (regions[k - 1].type == BLENDING_REGION &&
1649         regions[k].type == BLENDING_REGION) {
1650       int prev_dir = (stats[regions[k - 1].last].intra_error -
1651                       stats[regions[k - 1].last - 1].intra_error) > 0
1652                          ? 1
1653                          : -1;
1654       int next_dir = (stats[regions[k].last].intra_error -
1655                       stats[regions[k].last - 1].intra_error) > 0
1656                          ? 1
1657                          : -1;
1658 
1659       // if both are too short, no need to check
1660       int total_length = regions[k].last - regions[k - 1].start + 1;
1661       if (total_length < 4) {
1662         regions[k - 1].type = HIGH_VAR_REGION;
1663         k++;
1664         continue;
1665       }
1666 
1667       int to_merge = 0;
1668       if (prev_dir < 0 && next_dir > 0) {
1669         // In this case we check the last frame in the previous region.
1670         double prev_length =
1671             (double)(regions[k - 1].last - regions[k - 1].start + 1);
1672         double last_ratio, ratio_thres;
1673         if (prev_length < 2.01) {
1674           // if the previous region is very short
1675           double max_coded_error =
1676               AOMMAX(stats[regions[k - 1].last].coded_error,
1677                      stats[regions[k - 1].last - 1].coded_error);
1678           last_ratio = stats[regions[k - 1].last].sr_coded_error /
1679                        AOMMAX(max_coded_error, 0.001);
1680           ratio_thres = regions[k].avg_sr_fr_ratio * 0.95;
1681         } else {
1682           double max_coded_error =
1683               AOMMAX(stats[regions[k - 1].last].coded_error,
1684                      stats[regions[k - 1].last - 1].coded_error);
1685           last_ratio = stats[regions[k - 1].last].sr_coded_error /
1686                        AOMMAX(max_coded_error, 0.001);
1687           double prev_ratio =
1688               (regions[k - 1].avg_sr_fr_ratio * prev_length - last_ratio) /
1689               (prev_length - 1.0);
1690           ratio_thres = AOMMIN(prev_ratio, regions[k].avg_sr_fr_ratio) * 0.95;
1691         }
1692         if (last_ratio > ratio_thres) {
1693           to_merge = 1;
1694         }
1695       }
1696 
1697       if (to_merge) {
1698         remove_region(0, regions, num_regions, &k);
1699         analyze_region(stats, k - 1, regions);
1700         continue;
1701       } else {
1702         // These are possibly two separate blending regions. Mark the boundary
1703         // frame as HIGH_VAR_REGION to separate the two.
1704         int prev_k = k - 1;
1705         insert_region(regions[prev_k].last, regions[prev_k].last,
1706                       HIGH_VAR_REGION, regions, num_regions, &prev_k);
1707         analyze_region(stats, prev_k, regions);
1708         k = prev_k + 1;
1709         analyze_region(stats, k, regions);
1710       }
1711     }
1712     k++;
1713   }
1714   cleanup_regions(regions, num_regions);
1715 }
1716 
1717 // Clean up decision for blendings. Remove blending regions that are too short.
1718 // Also if a very short high var region is between a blending and a stable
1719 // region, just merge it with one of them.
cleanup_blendings(REGIONS * regions,int * num_regions)1720 static void cleanup_blendings(REGIONS *regions, int *num_regions) {
1721   int k = 0;
1722   while (k<*num_regions && * num_regions> 1) {
1723     int is_short_blending = regions[k].type == BLENDING_REGION &&
1724                             regions[k].last - regions[k].start + 1 < 5;
1725     int is_short_hv = regions[k].type == HIGH_VAR_REGION &&
1726                       regions[k].last - regions[k].start + 1 < 5;
1727     int has_stable_neighbor =
1728         ((k > 0 && regions[k - 1].type == STABLE_REGION) ||
1729          (k < *num_regions - 1 && regions[k + 1].type == STABLE_REGION));
1730     int has_blend_neighbor =
1731         ((k > 0 && regions[k - 1].type == BLENDING_REGION) ||
1732          (k < *num_regions - 1 && regions[k + 1].type == BLENDING_REGION));
1733     int total_neighbors = (k > 0) + (k < *num_regions - 1);
1734 
1735     if (is_short_blending ||
1736         (is_short_hv &&
1737          has_stable_neighbor + has_blend_neighbor >= total_neighbors)) {
1738       // Remove this region.Try to determine whether to combine it with the
1739       // previous or next region.
1740       int merge;
1741       double prev_diff =
1742           (k > 0)
1743               ? fabs(regions[k].avg_cor_coeff - regions[k - 1].avg_cor_coeff)
1744               : 1;
1745       double next_diff =
1746           (k < *num_regions - 1)
1747               ? fabs(regions[k].avg_cor_coeff - regions[k + 1].avg_cor_coeff)
1748               : 1;
1749       // merge == 0 means to merge with previous, 1 means to merge with next
1750       merge = prev_diff > next_diff;
1751       remove_region(merge, regions, num_regions, &k);
1752     } else {
1753       k++;
1754     }
1755   }
1756   cleanup_regions(regions, num_regions);
1757 }
1758 
free_firstpass_stats_buffers(REGIONS * temp_regions,double * filt_intra_err,double * filt_coded_err,double * grad_coded)1759 static void free_firstpass_stats_buffers(REGIONS *temp_regions,
1760                                          double *filt_intra_err,
1761                                          double *filt_coded_err,
1762                                          double *grad_coded) {
1763   aom_free(temp_regions);
1764   aom_free(filt_intra_err);
1765   aom_free(filt_coded_err);
1766   aom_free(grad_coded);
1767 }
1768 
1769 // Identify stable and unstable regions from first pass stats.
1770 // stats_start points to the first frame to analyze.
1771 // |offset| is the offset from the current frame to the frame stats_start is
1772 // pointing to.
1773 // Returns 0 on success, -1 on memory allocation failure.
identify_regions(const FIRSTPASS_STATS * const stats_start,int total_frames,int offset,REGIONS * regions,int * total_regions)1774 static int identify_regions(const FIRSTPASS_STATS *const stats_start,
1775                             int total_frames, int offset, REGIONS *regions,
1776                             int *total_regions) {
1777   int k;
1778   if (total_frames <= 1) return 0;
1779 
1780   // store the initial decisions
1781   REGIONS *temp_regions =
1782       (REGIONS *)aom_malloc(total_frames * sizeof(temp_regions[0]));
1783   // buffers for filtered stats
1784   double *filt_intra_err =
1785       (double *)aom_calloc(total_frames, sizeof(*filt_intra_err));
1786   double *filt_coded_err =
1787       (double *)aom_calloc(total_frames, sizeof(*filt_coded_err));
1788   double *grad_coded = (double *)aom_calloc(total_frames, sizeof(*grad_coded));
1789   if (!(temp_regions && filt_intra_err && filt_coded_err && grad_coded)) {
1790     free_firstpass_stats_buffers(temp_regions, filt_intra_err, filt_coded_err,
1791                                  grad_coded);
1792     return -1;
1793   }
1794   av1_zero_array(temp_regions, total_frames);
1795 
1796   int cur_region = 0, this_start = 0, this_last;
1797 
1798   int next_scenecut = -1;
1799   do {
1800     // first get the obvious scenecuts
1801     next_scenecut =
1802         find_next_scenecut(stats_start, this_start, total_frames - 1);
1803     this_last = (next_scenecut >= 0) ? (next_scenecut - 1) : total_frames - 1;
1804 
1805     // low-pass filter the needed stats
1806     smooth_filter_stats(stats_start, this_start, this_last, filt_intra_err,
1807                         filt_coded_err);
1808     get_gradient(filt_coded_err, this_start, this_last, grad_coded);
1809 
1810     // find tentative stable regions and unstable regions
1811     int num_regions = find_stable_regions(stats_start, grad_coded, this_start,
1812                                           this_last, temp_regions);
1813 
1814     adjust_unstable_region_bounds(stats_start, temp_regions, &num_regions);
1815 
1816     get_region_stats(stats_start, temp_regions, num_regions);
1817 
1818     // Try to identify blending regions in the unstable regions
1819     find_blending_regions(stats_start, temp_regions, &num_regions);
1820     cleanup_blendings(temp_regions, &num_regions);
1821 
1822     // The flash points should all be considered high variance points
1823     k = 0;
1824     while (k < num_regions) {
1825       if (temp_regions[k].type != STABLE_REGION) {
1826         k++;
1827         continue;
1828       }
1829       int start = temp_regions[k].start;
1830       int last = temp_regions[k].last;
1831       for (int i = start; i <= last; i++) {
1832         if (stats_start[i].is_flash) {
1833           insert_region(i, i, HIGH_VAR_REGION, temp_regions, &num_regions, &k);
1834         }
1835       }
1836       k++;
1837     }
1838     cleanup_regions(temp_regions, &num_regions);
1839 
1840     // copy the regions in the scenecut group
1841     for (k = 0; k < num_regions; k++) {
1842       if (temp_regions[k].last < temp_regions[k].start &&
1843           k == num_regions - 1) {
1844         num_regions--;
1845         break;
1846       }
1847       regions[k + cur_region] = temp_regions[k];
1848     }
1849     cur_region += num_regions;
1850 
1851     // add the scenecut region
1852     if (next_scenecut > -1) {
1853       // add the scenecut region, and find the next scenecut
1854       regions[cur_region].type = SCENECUT_REGION;
1855       regions[cur_region].start = next_scenecut;
1856       regions[cur_region].last = next_scenecut;
1857       cur_region++;
1858       this_start = next_scenecut + 1;
1859     }
1860   } while (next_scenecut >= 0);
1861 
1862   *total_regions = cur_region;
1863   get_region_stats(stats_start, regions, *total_regions);
1864 
1865   for (k = 0; k < *total_regions; k++) {
1866     // If scenecuts are very minor, mark them as high variance.
1867     if (regions[k].type != SCENECUT_REGION ||
1868         regions[k].avg_cor_coeff *
1869                 (1 - stats_start[regions[k].start].noise_var /
1870                          regions[k].avg_intra_err) <
1871             0.8) {
1872       continue;
1873     }
1874     regions[k].type = HIGH_VAR_REGION;
1875   }
1876   cleanup_regions(regions, total_regions);
1877   get_region_stats(stats_start, regions, *total_regions);
1878 
1879   for (k = 0; k < *total_regions; k++) {
1880     regions[k].start += offset;
1881     regions[k].last += offset;
1882   }
1883 
1884   free_firstpass_stats_buffers(temp_regions, filt_intra_err, filt_coded_err,
1885                                grad_coded);
1886   return 0;
1887 }
1888 
find_regions_index(const REGIONS * regions,int num_regions,int frame_idx)1889 static int find_regions_index(const REGIONS *regions, int num_regions,
1890                               int frame_idx) {
1891   for (int k = 0; k < num_regions; k++) {
1892     if (regions[k].start <= frame_idx && regions[k].last >= frame_idx) {
1893       return k;
1894     }
1895   }
1896   return -1;
1897 }
1898 
1899 /*!\brief Determine the length of future GF groups.
1900  *
1901  * \ingroup gf_group_algo
1902  * This function decides the gf group length of future frames in batch
1903  *
1904  * \param[in]    cpi              Top-level encoder structure
1905  * \param[in]    max_gop_length   Maximum length of the GF group
1906  * \param[in]    max_intervals    Maximum number of intervals to decide
1907  *
1908  * \remark Nothing is returned. Instead, cpi->ppi->rc.gf_intervals is
1909  * changed to store the decided GF group lengths.
1910  */
calculate_gf_length(AV1_COMP * cpi,int max_gop_length,int max_intervals)1911 static void calculate_gf_length(AV1_COMP *cpi, int max_gop_length,
1912                                 int max_intervals) {
1913   RATE_CONTROL *const rc = &cpi->rc;
1914   PRIMARY_RATE_CONTROL *const p_rc = &cpi->ppi->p_rc;
1915   TWO_PASS *const twopass = &cpi->ppi->twopass;
1916   FIRSTPASS_STATS next_frame;
1917   const FIRSTPASS_STATS *const start_pos = cpi->twopass_frame.stats_in;
1918   const FIRSTPASS_STATS *const stats = start_pos - (rc->frames_since_key == 0);
1919 
1920   const int f_w = cpi->common.width;
1921   const int f_h = cpi->common.height;
1922   int i;
1923 
1924   int flash_detected;
1925 
1926   av1_zero(next_frame);
1927 
1928   if (has_no_stats_stage(cpi)) {
1929     for (i = 0; i < MAX_NUM_GF_INTERVALS; i++) {
1930       p_rc->gf_intervals[i] = AOMMIN(rc->max_gf_interval, max_gop_length);
1931     }
1932     p_rc->cur_gf_index = 0;
1933     rc->intervals_till_gf_calculate_due = MAX_NUM_GF_INTERVALS;
1934     return;
1935   }
1936 
1937   // TODO(urvang): Try logic to vary min and max interval based on q.
1938   const int active_min_gf_interval = rc->min_gf_interval;
1939   const int active_max_gf_interval =
1940       AOMMIN(rc->max_gf_interval, max_gop_length);
1941   const int min_shrink_int = AOMMAX(MIN_SHRINK_LEN, active_min_gf_interval);
1942 
1943   i = (rc->frames_since_key == 0);
1944   max_intervals = cpi->ppi->lap_enabled ? 1 : max_intervals;
1945   int count_cuts = 1;
1946   // If cpi->gf_state.arf_gf_boost_lst is 0, we are starting with a KF or GF.
1947   int cur_start = -1 + !cpi->ppi->gf_state.arf_gf_boost_lst, cur_last;
1948   int cut_pos[MAX_NUM_GF_INTERVALS + 1] = { -1 };
1949   int cut_here;
1950   GF_GROUP_STATS gf_stats;
1951   init_gf_stats(&gf_stats);
1952   while (count_cuts < max_intervals + 1) {
1953     // reaches next key frame, break here
1954     if (i >= rc->frames_to_key) {
1955       cut_here = 2;
1956     } else if (i - cur_start >= rc->static_scene_max_gf_interval) {
1957       // reached maximum len, but nothing special yet (almost static)
1958       // let's look at the next interval
1959       cut_here = 1;
1960     } else if (EOF == input_stats(twopass, &cpi->twopass_frame, &next_frame)) {
1961       // reaches last frame, break
1962       cut_here = 2;
1963     } else {
1964       // Test for the case where there is a brief flash but the prediction
1965       // quality back to an earlier frame is then restored.
1966       flash_detected = detect_flash(twopass, &cpi->twopass_frame, 0);
1967       // TODO(bohanli): remove redundant accumulations here, or unify
1968       // this and the ones in define_gf_group
1969       accumulate_next_frame_stats(&next_frame, flash_detected,
1970                                   rc->frames_since_key, i, &gf_stats, f_w, f_h);
1971 
1972       cut_here = detect_gf_cut(cpi, i, cur_start, flash_detected,
1973                                active_max_gf_interval, active_min_gf_interval,
1974                                &gf_stats);
1975     }
1976     if (cut_here) {
1977       cur_last = i - 1;  // the current last frame in the gf group
1978       int ori_last = cur_last;
1979       // The region frame idx does not start from the same frame as cur_start
1980       // and cur_last. Need to offset them.
1981       int offset = rc->frames_since_key - p_rc->regions_offset;
1982       REGIONS *regions = p_rc->regions;
1983       int num_regions = p_rc->num_regions;
1984 
1985       int scenecut_idx = -1;
1986       // only try shrinking if interval smaller than active_max_gf_interval
1987       if (cur_last - cur_start <= active_max_gf_interval &&
1988           cur_last > cur_start) {
1989         // find the region indices of where the first and last frame belong.
1990         int k_start =
1991             find_regions_index(regions, num_regions, cur_start + offset);
1992         int k_last =
1993             find_regions_index(regions, num_regions, cur_last + offset);
1994         if (cur_start + offset == 0) k_start = 0;
1995 
1996         // See if we have a scenecut in between
1997         for (int r = k_start + 1; r <= k_last; r++) {
1998           if (regions[r].type == SCENECUT_REGION &&
1999               regions[r].last - offset - cur_start > active_min_gf_interval) {
2000             scenecut_idx = r;
2001             break;
2002           }
2003         }
2004 
2005         // if the found scenecut is very close to the end, ignore it.
2006         if (regions[num_regions - 1].last - regions[scenecut_idx].last < 4) {
2007           scenecut_idx = -1;
2008         }
2009 
2010         if (scenecut_idx != -1) {
2011           // If we have a scenecut, then stop at it.
2012           // TODO(bohanli): add logic here to stop before the scenecut and for
2013           // the next gop start from the scenecut with GF
2014           int is_minor_sc =
2015               (regions[scenecut_idx].avg_cor_coeff *
2016                    (1 - stats[regions[scenecut_idx].start - offset].noise_var /
2017                             regions[scenecut_idx].avg_intra_err) >
2018                0.6);
2019           cur_last = regions[scenecut_idx].last - offset - !is_minor_sc;
2020         } else {
2021           int is_last_analysed = (k_last == num_regions - 1) &&
2022                                  (cur_last + offset == regions[k_last].last);
2023           int not_enough_regions =
2024               k_last - k_start <=
2025               1 + (regions[k_start].type == SCENECUT_REGION);
2026           // if we are very close to the end, then do not shrink since it may
2027           // introduce intervals that are too short
2028           if (!(is_last_analysed && not_enough_regions)) {
2029             const double arf_length_factor = 0.1;
2030             double best_score = 0;
2031             int best_j = -1;
2032             const int first_frame = regions[0].start - offset;
2033             const int last_frame = regions[num_regions - 1].last - offset;
2034             // score of how much the arf helps the whole GOP
2035             double base_score = 0.0;
2036             // Accumulate base_score in
2037             for (int j = cur_start + 1; j < cur_start + min_shrink_int; j++) {
2038               if (stats + j >= twopass->stats_buf_ctx->stats_in_end) break;
2039               base_score = (base_score + 1.0) * stats[j].cor_coeff;
2040             }
2041             int met_blending = 0;   // Whether we have met blending areas before
2042             int last_blending = 0;  // Whether the previous frame if blending
2043             for (int j = cur_start + min_shrink_int; j <= cur_last; j++) {
2044               if (stats + j >= twopass->stats_buf_ctx->stats_in_end) break;
2045               base_score = (base_score + 1.0) * stats[j].cor_coeff;
2046               int this_reg =
2047                   find_regions_index(regions, num_regions, j + offset);
2048               if (this_reg < 0) continue;
2049               // A GOP should include at most 1 blending region.
2050               if (regions[this_reg].type == BLENDING_REGION) {
2051                 last_blending = 1;
2052                 if (met_blending) {
2053                   break;
2054                 } else {
2055                   base_score = 0;
2056                   continue;
2057                 }
2058               } else {
2059                 if (last_blending) met_blending = 1;
2060                 last_blending = 0;
2061               }
2062 
2063               // Add the factor of how good the neighborhood is for this
2064               // candidate arf.
2065               double this_score = arf_length_factor * base_score;
2066               double temp_accu_coeff = 1.0;
2067               // following frames
2068               int count_f = 0;
2069               for (int n = j + 1; n <= j + 3 && n <= last_frame; n++) {
2070                 if (stats + n >= twopass->stats_buf_ctx->stats_in_end) break;
2071                 temp_accu_coeff *= stats[n].cor_coeff;
2072                 this_score +=
2073                     temp_accu_coeff *
2074                     sqrt(AOMMAX(0.5,
2075                                 1 - stats[n].noise_var /
2076                                         AOMMAX(stats[n].intra_error, 0.001)));
2077                 count_f++;
2078               }
2079               // preceding frames
2080               temp_accu_coeff = 1.0;
2081               for (int n = j; n > j - 3 * 2 + count_f && n > first_frame; n--) {
2082                 if (stats + n < twopass->stats_buf_ctx->stats_in_start) break;
2083                 temp_accu_coeff *= stats[n].cor_coeff;
2084                 this_score +=
2085                     temp_accu_coeff *
2086                     sqrt(AOMMAX(0.5,
2087                                 1 - stats[n].noise_var /
2088                                         AOMMAX(stats[n].intra_error, 0.001)));
2089               }
2090 
2091               if (this_score > best_score) {
2092                 best_score = this_score;
2093                 best_j = j;
2094               }
2095             }
2096 
2097             // For blending areas, move one more frame in case we missed the
2098             // first blending frame.
2099             int best_reg =
2100                 find_regions_index(regions, num_regions, best_j + offset);
2101             if (best_reg < num_regions - 1 && best_reg > 0) {
2102               if (regions[best_reg - 1].type == BLENDING_REGION &&
2103                   regions[best_reg + 1].type == BLENDING_REGION) {
2104                 if (best_j + offset == regions[best_reg].start &&
2105                     best_j + offset < regions[best_reg].last) {
2106                   best_j += 1;
2107                 } else if (best_j + offset == regions[best_reg].last &&
2108                            best_j + offset > regions[best_reg].start) {
2109                   best_j -= 1;
2110                 }
2111               }
2112             }
2113 
2114             if (cur_last - best_j < 2) best_j = cur_last;
2115             if (best_j > 0 && best_score > 0.1) cur_last = best_j;
2116             // if cannot find anything, just cut at the original place.
2117           }
2118         }
2119       }
2120       cut_pos[count_cuts] = cur_last;
2121       count_cuts++;
2122 
2123       // reset pointers to the shrunken location
2124       cpi->twopass_frame.stats_in = start_pos + cur_last;
2125       cur_start = cur_last;
2126       int cur_region_idx =
2127           find_regions_index(regions, num_regions, cur_start + 1 + offset);
2128       if (cur_region_idx >= 0)
2129         if (regions[cur_region_idx].type == SCENECUT_REGION) cur_start++;
2130 
2131       i = cur_last;
2132 
2133       if (cut_here > 1 && cur_last == ori_last) break;
2134 
2135       // reset accumulators
2136       init_gf_stats(&gf_stats);
2137     }
2138     ++i;
2139   }
2140 
2141   // save intervals
2142   rc->intervals_till_gf_calculate_due = count_cuts - 1;
2143   for (int n = 1; n < count_cuts; n++) {
2144     p_rc->gf_intervals[n - 1] = cut_pos[n] - cut_pos[n - 1];
2145   }
2146   p_rc->cur_gf_index = 0;
2147   cpi->twopass_frame.stats_in = start_pos;
2148 }
2149 
correct_frames_to_key(AV1_COMP * cpi)2150 static void correct_frames_to_key(AV1_COMP *cpi) {
2151   int lookahead_size =
2152       (int)av1_lookahead_depth(cpi->ppi->lookahead, cpi->compressor_stage);
2153   if (lookahead_size <
2154       av1_lookahead_pop_sz(cpi->ppi->lookahead, cpi->compressor_stage)) {
2155     assert(
2156         IMPLIES(cpi->oxcf.pass != AOM_RC_ONE_PASS && cpi->ppi->frames_left > 0,
2157                 lookahead_size == cpi->ppi->frames_left));
2158     cpi->rc.frames_to_key = AOMMIN(cpi->rc.frames_to_key, lookahead_size);
2159   } else if (cpi->ppi->frames_left > 0) {
2160     // Correct frames to key based on limit
2161     cpi->rc.frames_to_key =
2162         AOMMIN(cpi->rc.frames_to_key, cpi->ppi->frames_left);
2163   }
2164 }
2165 
2166 /*!\brief Define a GF group in one pass mode when no look ahead stats are
2167  * available.
2168  *
2169  * \ingroup gf_group_algo
2170  * This function defines the structure of a GF group, along with various
2171  * parameters regarding bit-allocation and quality setup in the special
2172  * case of one pass encoding where no lookahead stats are avialable.
2173  *
2174  * \param[in]    cpi             Top-level encoder structure
2175  *
2176  * \remark Nothing is returned. Instead, cpi->ppi->gf_group is changed.
2177  */
define_gf_group_pass0(AV1_COMP * cpi)2178 static void define_gf_group_pass0(AV1_COMP *cpi) {
2179   RATE_CONTROL *const rc = &cpi->rc;
2180   PRIMARY_RATE_CONTROL *const p_rc = &cpi->ppi->p_rc;
2181   GF_GROUP *const gf_group = &cpi->ppi->gf_group;
2182   const AV1EncoderConfig *const oxcf = &cpi->oxcf;
2183   const GFConfig *const gf_cfg = &oxcf->gf_cfg;
2184   int target;
2185 
2186   if (oxcf->q_cfg.aq_mode == CYCLIC_REFRESH_AQ) {
2187     av1_cyclic_refresh_set_golden_update(cpi);
2188   } else {
2189     p_rc->baseline_gf_interval = p_rc->gf_intervals[p_rc->cur_gf_index];
2190     rc->intervals_till_gf_calculate_due--;
2191     p_rc->cur_gf_index++;
2192   }
2193 
2194   // correct frames_to_key when lookahead queue is flushing
2195   correct_frames_to_key(cpi);
2196 
2197   if (p_rc->baseline_gf_interval > rc->frames_to_key)
2198     p_rc->baseline_gf_interval = rc->frames_to_key;
2199 
2200   p_rc->gfu_boost = DEFAULT_GF_BOOST;
2201   p_rc->constrained_gf_group =
2202       (p_rc->baseline_gf_interval >= rc->frames_to_key) ? 1 : 0;
2203 
2204   gf_group->max_layer_depth_allowed = oxcf->gf_cfg.gf_max_pyr_height;
2205 
2206   // Rare case when the look-ahead is less than the target GOP length, can't
2207   // generate ARF frame.
2208   if (p_rc->baseline_gf_interval > gf_cfg->lag_in_frames ||
2209       !is_altref_enabled(gf_cfg->lag_in_frames, gf_cfg->enable_auto_arf) ||
2210       p_rc->baseline_gf_interval < rc->min_gf_interval)
2211     gf_group->max_layer_depth_allowed = 0;
2212 
2213   // Set up the structure of this Group-Of-Pictures (same as GF_GROUP)
2214   av1_gop_setup_structure(cpi);
2215 
2216   // Allocate bits to each of the frames in the GF group.
2217   // TODO(sarahparker) Extend this to work with pyramid structure.
2218   for (int cur_index = 0; cur_index < gf_group->size; ++cur_index) {
2219     const FRAME_UPDATE_TYPE cur_update_type = gf_group->update_type[cur_index];
2220     if (oxcf->rc_cfg.mode == AOM_CBR) {
2221       if (cur_update_type == KF_UPDATE) {
2222         target = av1_calc_iframe_target_size_one_pass_cbr(cpi);
2223       } else {
2224         target = av1_calc_pframe_target_size_one_pass_cbr(cpi, cur_update_type);
2225       }
2226     } else {
2227       if (cur_update_type == KF_UPDATE) {
2228         target = av1_calc_iframe_target_size_one_pass_vbr(cpi);
2229       } else {
2230         target = av1_calc_pframe_target_size_one_pass_vbr(cpi, cur_update_type);
2231       }
2232     }
2233     gf_group->bit_allocation[cur_index] = target;
2234   }
2235 }
2236 
set_baseline_gf_interval(PRIMARY_RATE_CONTROL * p_rc,int arf_position)2237 static inline void set_baseline_gf_interval(PRIMARY_RATE_CONTROL *p_rc,
2238                                             int arf_position) {
2239   p_rc->baseline_gf_interval = arf_position;
2240 }
2241 
2242 // initialize GF_GROUP_STATS
init_gf_stats(GF_GROUP_STATS * gf_stats)2243 static void init_gf_stats(GF_GROUP_STATS *gf_stats) {
2244   gf_stats->gf_group_err = 0.0;
2245   gf_stats->gf_group_raw_error = 0.0;
2246   gf_stats->gf_group_skip_pct = 0.0;
2247   gf_stats->gf_group_inactive_zone_rows = 0.0;
2248 
2249   gf_stats->mv_ratio_accumulator = 0.0;
2250   gf_stats->decay_accumulator = 1.0;
2251   gf_stats->zero_motion_accumulator = 1.0;
2252   gf_stats->loop_decay_rate = 1.0;
2253   gf_stats->last_loop_decay_rate = 1.0;
2254   gf_stats->this_frame_mv_in_out = 0.0;
2255   gf_stats->mv_in_out_accumulator = 0.0;
2256   gf_stats->abs_mv_in_out_accumulator = 0.0;
2257 
2258   gf_stats->avg_sr_coded_error = 0.0;
2259   gf_stats->avg_pcnt_second_ref = 0.0;
2260   gf_stats->avg_new_mv_count = 0.0;
2261   gf_stats->avg_wavelet_energy = 0.0;
2262   gf_stats->avg_raw_err_stdev = 0.0;
2263   gf_stats->non_zero_stdev_count = 0;
2264 }
2265 
accumulate_gop_stats(AV1_COMP * cpi,int is_intra_only,int f_w,int f_h,FIRSTPASS_STATS * next_frame,const FIRSTPASS_STATS * start_pos,GF_GROUP_STATS * gf_stats,int * idx)2266 static void accumulate_gop_stats(AV1_COMP *cpi, int is_intra_only, int f_w,
2267                                  int f_h, FIRSTPASS_STATS *next_frame,
2268                                  const FIRSTPASS_STATS *start_pos,
2269                                  GF_GROUP_STATS *gf_stats, int *idx) {
2270   int i, flash_detected;
2271   TWO_PASS *const twopass = &cpi->ppi->twopass;
2272   PRIMARY_RATE_CONTROL *const p_rc = &cpi->ppi->p_rc;
2273   RATE_CONTROL *const rc = &cpi->rc;
2274   FRAME_INFO *frame_info = &cpi->frame_info;
2275   const AV1EncoderConfig *const oxcf = &cpi->oxcf;
2276 
2277   init_gf_stats(gf_stats);
2278   av1_zero(*next_frame);
2279 
2280   // If this is a key frame or the overlay from a previous arf then
2281   // the error score / cost of this frame has already been accounted for.
2282   i = is_intra_only;
2283   // get the determined gf group length from p_rc->gf_intervals
2284   while (i < p_rc->gf_intervals[p_rc->cur_gf_index]) {
2285     // read in the next frame
2286     if (EOF == input_stats(twopass, &cpi->twopass_frame, next_frame)) break;
2287     // Accumulate error score of frames in this gf group.
2288     double mod_frame_err =
2289         calculate_modified_err(frame_info, twopass, oxcf, next_frame);
2290     // accumulate stats for this frame
2291     accumulate_this_frame_stats(next_frame, mod_frame_err, gf_stats);
2292     ++i;
2293   }
2294 
2295   reset_fpf_position(&cpi->twopass_frame, start_pos);
2296 
2297   i = is_intra_only;
2298   input_stats(twopass, &cpi->twopass_frame, next_frame);
2299   while (i < p_rc->gf_intervals[p_rc->cur_gf_index]) {
2300     // read in the next frame
2301     if (EOF == input_stats(twopass, &cpi->twopass_frame, next_frame)) break;
2302 
2303     // Test for the case where there is a brief flash but the prediction
2304     // quality back to an earlier frame is then restored.
2305     flash_detected = detect_flash(twopass, &cpi->twopass_frame, 0);
2306 
2307     // accumulate stats for next frame
2308     accumulate_next_frame_stats(next_frame, flash_detected,
2309                                 rc->frames_since_key, i, gf_stats, f_w, f_h);
2310 
2311     ++i;
2312   }
2313 
2314   i = p_rc->gf_intervals[p_rc->cur_gf_index];
2315   average_gf_stats(i, gf_stats);
2316 
2317   *idx = i;
2318 }
2319 
update_gop_length(RATE_CONTROL * rc,PRIMARY_RATE_CONTROL * p_rc,int idx,int is_final_pass)2320 static void update_gop_length(RATE_CONTROL *rc, PRIMARY_RATE_CONTROL *p_rc,
2321                               int idx, int is_final_pass) {
2322   if (is_final_pass) {
2323     rc->intervals_till_gf_calculate_due--;
2324     p_rc->cur_gf_index++;
2325   }
2326 
2327   // Was the group length constrained by the requirement for a new KF?
2328   p_rc->constrained_gf_group = (idx >= rc->frames_to_key) ? 1 : 0;
2329 
2330   set_baseline_gf_interval(p_rc, idx);
2331   rc->frames_till_gf_update_due = p_rc->baseline_gf_interval;
2332 }
2333 
2334 #define MAX_GF_BOOST 5400
2335 #define REDUCE_GF_LENGTH_THRESH 4
2336 #define REDUCE_GF_LENGTH_TO_KEY_THRESH 9
2337 #define REDUCE_GF_LENGTH_BY 1
set_gop_bits_boost(AV1_COMP * cpi,int i,int is_intra_only,int is_final_pass,int use_alt_ref,int alt_offset,const FIRSTPASS_STATS * start_pos,GF_GROUP_STATS * gf_stats)2338 static void set_gop_bits_boost(AV1_COMP *cpi, int i, int is_intra_only,
2339                                int is_final_pass, int use_alt_ref,
2340                                int alt_offset, const FIRSTPASS_STATS *start_pos,
2341                                GF_GROUP_STATS *gf_stats) {
2342   // Should we use the alternate reference frame.
2343   AV1_COMMON *const cm = &cpi->common;
2344   RATE_CONTROL *const rc = &cpi->rc;
2345   PRIMARY_RATE_CONTROL *const p_rc = &cpi->ppi->p_rc;
2346   TWO_PASS *const twopass = &cpi->ppi->twopass;
2347   GF_GROUP *gf_group = &cpi->ppi->gf_group;
2348   FRAME_INFO *frame_info = &cpi->frame_info;
2349   const AV1EncoderConfig *const oxcf = &cpi->oxcf;
2350   const RateControlCfg *const rc_cfg = &oxcf->rc_cfg;
2351 
2352   int ext_len = i - is_intra_only;
2353   if (use_alt_ref) {
2354     const int forward_frames = (rc->frames_to_key - i >= ext_len)
2355                                    ? ext_len
2356                                    : AOMMAX(0, rc->frames_to_key - i);
2357 
2358     // Calculate the boost for alt ref.
2359     p_rc->gfu_boost = av1_calc_arf_boost(
2360         twopass, &cpi->twopass_frame, p_rc, frame_info, alt_offset,
2361         forward_frames, ext_len, &p_rc->num_stats_used_for_gfu_boost,
2362         &p_rc->num_stats_required_for_gfu_boost, cpi->ppi->lap_enabled);
2363   } else {
2364     reset_fpf_position(&cpi->twopass_frame, start_pos);
2365     p_rc->gfu_boost = AOMMIN(
2366         MAX_GF_BOOST,
2367         av1_calc_arf_boost(
2368             twopass, &cpi->twopass_frame, p_rc, frame_info, alt_offset, ext_len,
2369             0, &p_rc->num_stats_used_for_gfu_boost,
2370             &p_rc->num_stats_required_for_gfu_boost, cpi->ppi->lap_enabled));
2371   }
2372 
2373 #define LAST_ALR_BOOST_FACTOR 0.2f
2374   p_rc->arf_boost_factor = 1.0;
2375   if (use_alt_ref && !is_lossless_requested(rc_cfg)) {
2376     // Reduce the boost of altref in the last gf group
2377     if (rc->frames_to_key - ext_len == REDUCE_GF_LENGTH_BY ||
2378         rc->frames_to_key - ext_len == 0) {
2379       p_rc->arf_boost_factor = LAST_ALR_BOOST_FACTOR;
2380     }
2381   }
2382 
2383   // Reset the file position.
2384   reset_fpf_position(&cpi->twopass_frame, start_pos);
2385   if (cpi->ppi->lap_enabled) {
2386     // Since we don't have enough stats to know the actual error of the
2387     // gf group, we assume error of each frame to be equal to 1 and set
2388     // the error of the group as baseline_gf_interval.
2389     gf_stats->gf_group_err = p_rc->baseline_gf_interval;
2390   }
2391   // Calculate the bits to be allocated to the gf/arf group as a whole
2392   p_rc->gf_group_bits =
2393       calculate_total_gf_group_bits(cpi, gf_stats->gf_group_err);
2394 
2395 #if GROUP_ADAPTIVE_MAXQ
2396   // Calculate an estimate of the maxq needed for the group.
2397   // We are more aggressive about correcting for sections
2398   // where there could be significant overshoot than for easier
2399   // sections where we do not wish to risk creating an overshoot
2400   // of the allocated bit budget.
2401   if ((rc_cfg->mode != AOM_Q) && (p_rc->baseline_gf_interval > 1) &&
2402       is_final_pass) {
2403     const int vbr_group_bits_per_frame =
2404         (int)(p_rc->gf_group_bits / p_rc->baseline_gf_interval);
2405     const double group_av_err =
2406         gf_stats->gf_group_raw_error / p_rc->baseline_gf_interval;
2407     const double group_av_skip_pct =
2408         gf_stats->gf_group_skip_pct / p_rc->baseline_gf_interval;
2409     const double group_av_inactive_zone =
2410         ((gf_stats->gf_group_inactive_zone_rows * 2) /
2411          (p_rc->baseline_gf_interval * (double)cm->mi_params.mb_rows));
2412 
2413     int tmp_q;
2414     tmp_q = get_twopass_worst_quality(
2415         cpi, group_av_err, (group_av_skip_pct + group_av_inactive_zone),
2416         vbr_group_bits_per_frame);
2417     rc->active_worst_quality = AOMMAX(tmp_q, rc->active_worst_quality >> 1);
2418   }
2419 #endif
2420 
2421   // Adjust KF group bits and error remaining.
2422   if (is_final_pass) twopass->kf_group_error_left -= gf_stats->gf_group_err;
2423 
2424   // Reset the file position.
2425   reset_fpf_position(&cpi->twopass_frame, start_pos);
2426 
2427   // Calculate a section intra ratio used in setting max loop filter.
2428   if (rc->frames_since_key != 0) {
2429     twopass->section_intra_rating = calculate_section_intra_ratio(
2430         start_pos, twopass->stats_buf_ctx->stats_in_end,
2431         p_rc->baseline_gf_interval);
2432   }
2433 
2434   av1_gop_bit_allocation(cpi, rc, gf_group, rc->frames_since_key == 0,
2435                          use_alt_ref, p_rc->gf_group_bits);
2436 
2437   // TODO(jingning): Generalize this condition.
2438   if (is_final_pass) {
2439     cpi->ppi->gf_state.arf_gf_boost_lst = use_alt_ref;
2440 
2441     // Reset rolling actual and target bits counters for ARF groups.
2442     twopass->rolling_arf_group_target_bits = 1;
2443     twopass->rolling_arf_group_actual_bits = 1;
2444   }
2445 #if CONFIG_BITRATE_ACCURACY
2446   if (is_final_pass) {
2447     av1_vbr_rc_set_gop_bit_budget(&cpi->vbr_rc_info,
2448                                   p_rc->baseline_gf_interval);
2449   }
2450 #endif
2451 }
2452 
2453 /*!\brief Define a GF group.
2454  *
2455  * \ingroup gf_group_algo
2456  * This function defines the structure of a GF group, along with various
2457  * parameters regarding bit-allocation and quality setup.
2458  *
2459  * \param[in]    cpi             Top-level encoder structure
2460  * \param[in]    frame_params    Structure with frame parameters
2461  * \param[in]    is_final_pass   Whether this is the final pass for the
2462  *                               GF group, or a trial (non-zero)
2463  *
2464  * \remark Nothing is returned. Instead, cpi->ppi->gf_group is changed.
2465  */
define_gf_group(AV1_COMP * cpi,EncodeFrameParams * frame_params,int is_final_pass)2466 static void define_gf_group(AV1_COMP *cpi, EncodeFrameParams *frame_params,
2467                             int is_final_pass) {
2468   AV1_COMMON *const cm = &cpi->common;
2469   RATE_CONTROL *const rc = &cpi->rc;
2470   PRIMARY_RATE_CONTROL *const p_rc = &cpi->ppi->p_rc;
2471   const AV1EncoderConfig *const oxcf = &cpi->oxcf;
2472   TWO_PASS *const twopass = &cpi->ppi->twopass;
2473   FIRSTPASS_STATS next_frame;
2474   const FIRSTPASS_STATS *const start_pos = cpi->twopass_frame.stats_in;
2475   GF_GROUP *gf_group = &cpi->ppi->gf_group;
2476   const GFConfig *const gf_cfg = &oxcf->gf_cfg;
2477   const RateControlCfg *const rc_cfg = &oxcf->rc_cfg;
2478   const int f_w = cm->width;
2479   const int f_h = cm->height;
2480   int i;
2481   const int is_intra_only = rc->frames_since_key == 0;
2482 
2483   cpi->ppi->internal_altref_allowed = (gf_cfg->gf_max_pyr_height > 1);
2484 
2485   // Reset the GF group data structures unless this is a key
2486   // frame in which case it will already have been done.
2487   if (!is_intra_only) {
2488     av1_zero(cpi->ppi->gf_group);
2489     cpi->gf_frame_index = 0;
2490   }
2491 
2492   if (has_no_stats_stage(cpi)) {
2493     define_gf_group_pass0(cpi);
2494     return;
2495   }
2496 
2497 #if CONFIG_THREE_PASS
2498   if (cpi->third_pass_ctx && oxcf->pass == AOM_RC_THIRD_PASS) {
2499     int ret = define_gf_group_pass3(cpi, frame_params, is_final_pass);
2500     if (ret == 0) return;
2501 
2502     av1_free_thirdpass_ctx(cpi->third_pass_ctx);
2503     cpi->third_pass_ctx = NULL;
2504   }
2505 #endif  // CONFIG_THREE_PASS
2506 
2507   // correct frames_to_key when lookahead queue is emptying
2508   if (cpi->ppi->lap_enabled) {
2509     correct_frames_to_key(cpi);
2510   }
2511 
2512   GF_GROUP_STATS gf_stats;
2513   accumulate_gop_stats(cpi, is_intra_only, f_w, f_h, &next_frame, start_pos,
2514                        &gf_stats, &i);
2515 
2516   const int can_disable_arf = !gf_cfg->gf_min_pyr_height;
2517 
2518   // If this is a key frame or the overlay from a previous arf then
2519   // the error score / cost of this frame has already been accounted for.
2520   const int active_min_gf_interval = rc->min_gf_interval;
2521 
2522   // Disable internal ARFs for "still" gf groups.
2523   //   zero_motion_accumulator: minimum percentage of (0,0) motion;
2524   //   avg_sr_coded_error:      average of the SSE per pixel of each frame;
2525   //   avg_raw_err_stdev:       average of the standard deviation of (0,0)
2526   //                            motion error per block of each frame.
2527   const int can_disable_internal_arfs = gf_cfg->gf_min_pyr_height <= 1;
2528   if (can_disable_internal_arfs &&
2529       gf_stats.zero_motion_accumulator > MIN_ZERO_MOTION &&
2530       gf_stats.avg_sr_coded_error < MAX_SR_CODED_ERROR &&
2531       gf_stats.avg_raw_err_stdev < MAX_RAW_ERR_VAR) {
2532     cpi->ppi->internal_altref_allowed = 0;
2533   }
2534 
2535   int use_alt_ref;
2536   if (can_disable_arf) {
2537     use_alt_ref =
2538         !is_almost_static(gf_stats.zero_motion_accumulator,
2539                           twopass->kf_zeromotion_pct, cpi->ppi->lap_enabled) &&
2540         p_rc->use_arf_in_this_kf_group && (i < gf_cfg->lag_in_frames) &&
2541         (i >= MIN_GF_INTERVAL);
2542   } else {
2543     use_alt_ref = p_rc->use_arf_in_this_kf_group &&
2544                   (i < gf_cfg->lag_in_frames) && (i > 2);
2545   }
2546   if (use_alt_ref) {
2547     gf_group->max_layer_depth_allowed = gf_cfg->gf_max_pyr_height;
2548   } else {
2549     gf_group->max_layer_depth_allowed = 0;
2550   }
2551 
2552   int alt_offset = 0;
2553   // The length reduction strategy is tweaked for certain cases, and doesn't
2554   // work well for certain other cases.
2555   const int allow_gf_length_reduction =
2556       ((rc_cfg->mode == AOM_Q && rc_cfg->cq_level <= 128) ||
2557        !cpi->ppi->internal_altref_allowed) &&
2558       !is_lossless_requested(rc_cfg);
2559 
2560   if (allow_gf_length_reduction && use_alt_ref) {
2561     // adjust length of this gf group if one of the following condition met
2562     // 1: only one overlay frame left and this gf is too long
2563     // 2: next gf group is too short to have arf compared to the current gf
2564 
2565     // maximum length of next gf group
2566     const int next_gf_len = rc->frames_to_key - i;
2567     const int single_overlay_left =
2568         next_gf_len == 0 && i > REDUCE_GF_LENGTH_THRESH;
2569     // the next gf is probably going to have a ARF but it will be shorter than
2570     // this gf
2571     const int unbalanced_gf =
2572         i > REDUCE_GF_LENGTH_TO_KEY_THRESH &&
2573         next_gf_len + 1 < REDUCE_GF_LENGTH_TO_KEY_THRESH &&
2574         next_gf_len + 1 >= rc->min_gf_interval;
2575 
2576     if (single_overlay_left || unbalanced_gf) {
2577       const int roll_back = REDUCE_GF_LENGTH_BY;
2578       // Reduce length only if active_min_gf_interval will be respected later.
2579       if (i - roll_back >= active_min_gf_interval + 1) {
2580         alt_offset = -roll_back;
2581         i -= roll_back;
2582         if (is_final_pass) rc->intervals_till_gf_calculate_due = 0;
2583         p_rc->gf_intervals[p_rc->cur_gf_index] -= roll_back;
2584         reset_fpf_position(&cpi->twopass_frame, start_pos);
2585         accumulate_gop_stats(cpi, is_intra_only, f_w, f_h, &next_frame,
2586                              start_pos, &gf_stats, &i);
2587       }
2588     }
2589   }
2590 
2591   update_gop_length(rc, p_rc, i, is_final_pass);
2592 
2593   // Set up the structure of this Group-Of-Pictures (same as GF_GROUP)
2594   av1_gop_setup_structure(cpi);
2595 
2596   set_gop_bits_boost(cpi, i, is_intra_only, is_final_pass, use_alt_ref,
2597                      alt_offset, start_pos, &gf_stats);
2598 
2599   frame_params->frame_type =
2600       rc->frames_since_key == 0 ? KEY_FRAME : INTER_FRAME;
2601   frame_params->show_frame =
2602       !(gf_group->update_type[cpi->gf_frame_index] == ARF_UPDATE ||
2603         gf_group->update_type[cpi->gf_frame_index] == INTNL_ARF_UPDATE);
2604 }
2605 
2606 #if CONFIG_THREE_PASS
2607 /*!\brief Define a GF group for the third apss.
2608  *
2609  * \ingroup gf_group_algo
2610  * This function defines the structure of a GF group for the third pass, along
2611  * with various parameters regarding bit-allocation and quality setup based on
2612  * the two-pass bitstream.
2613  * Much of the function still uses the strategies used for the second pass and
2614  * relies on first pass statistics. It is expected that over time these portions
2615  * would be replaced with strategies specific to the third pass.
2616  *
2617  * \param[in]    cpi             Top-level encoder structure
2618  * \param[in]    frame_params    Structure with frame parameters
2619  * \param[in]    is_final_pass   Whether this is the final pass for the
2620  *                               GF group, or a trial (non-zero)
2621  *
2622  * \return       0: Success;
2623  *              -1: There are conflicts between the bitstream and current config
2624  *               The values in cpi->ppi->gf_group are also changed.
2625  */
define_gf_group_pass3(AV1_COMP * cpi,EncodeFrameParams * frame_params,int is_final_pass)2626 static int define_gf_group_pass3(AV1_COMP *cpi, EncodeFrameParams *frame_params,
2627                                  int is_final_pass) {
2628   if (!cpi->third_pass_ctx) return -1;
2629   AV1_COMMON *const cm = &cpi->common;
2630   RATE_CONTROL *const rc = &cpi->rc;
2631   PRIMARY_RATE_CONTROL *const p_rc = &cpi->ppi->p_rc;
2632   const AV1EncoderConfig *const oxcf = &cpi->oxcf;
2633   FIRSTPASS_STATS next_frame;
2634   const FIRSTPASS_STATS *const start_pos = cpi->twopass_frame.stats_in;
2635   GF_GROUP *gf_group = &cpi->ppi->gf_group;
2636   const GFConfig *const gf_cfg = &oxcf->gf_cfg;
2637   const int f_w = cm->width;
2638   const int f_h = cm->height;
2639   int i;
2640   const int is_intra_only = rc->frames_since_key == 0;
2641 
2642   cpi->ppi->internal_altref_allowed = (gf_cfg->gf_max_pyr_height > 1);
2643 
2644   // Reset the GF group data structures unless this is a key
2645   // frame in which case it will already have been done.
2646   if (!is_intra_only) {
2647     av1_zero(cpi->ppi->gf_group);
2648     cpi->gf_frame_index = 0;
2649   }
2650 
2651   GF_GROUP_STATS gf_stats;
2652   accumulate_gop_stats(cpi, is_intra_only, f_w, f_h, &next_frame, start_pos,
2653                        &gf_stats, &i);
2654 
2655   const int can_disable_arf = !gf_cfg->gf_min_pyr_height;
2656 
2657   // TODO(any): set cpi->ppi->internal_altref_allowed accordingly;
2658 
2659   int use_alt_ref = av1_check_use_arf(cpi->third_pass_ctx);
2660   if (use_alt_ref == 0 && !can_disable_arf) return -1;
2661   if (use_alt_ref) {
2662     gf_group->max_layer_depth_allowed = gf_cfg->gf_max_pyr_height;
2663   } else {
2664     gf_group->max_layer_depth_allowed = 0;
2665   }
2666 
2667   update_gop_length(rc, p_rc, i, is_final_pass);
2668 
2669   // Set up the structure of this Group-Of-Pictures (same as GF_GROUP)
2670   av1_gop_setup_structure(cpi);
2671 
2672   set_gop_bits_boost(cpi, i, is_intra_only, is_final_pass, use_alt_ref, 0,
2673                      start_pos, &gf_stats);
2674 
2675   frame_params->frame_type = cpi->third_pass_ctx->frame_info[0].frame_type;
2676   frame_params->show_frame = cpi->third_pass_ctx->frame_info[0].is_show_frame;
2677   return 0;
2678 }
2679 #endif  // CONFIG_THREE_PASS
2680 
2681 // #define FIXED_ARF_BITS
2682 #ifdef FIXED_ARF_BITS
2683 #define ARF_BITS_FRACTION 0.75
2684 #endif
av1_gop_bit_allocation(const AV1_COMP * cpi,RATE_CONTROL * const rc,GF_GROUP * gf_group,int is_key_frame,int use_arf,int64_t gf_group_bits)2685 void av1_gop_bit_allocation(const AV1_COMP *cpi, RATE_CONTROL *const rc,
2686                             GF_GROUP *gf_group, int is_key_frame, int use_arf,
2687                             int64_t gf_group_bits) {
2688   PRIMARY_RATE_CONTROL *const p_rc = &cpi->ppi->p_rc;
2689   // Calculate the extra bits to be used for boosted frame(s)
2690 #ifdef FIXED_ARF_BITS
2691   int gf_arf_bits = (int)(ARF_BITS_FRACTION * gf_group_bits);
2692 #else
2693   int gf_arf_bits = calculate_boost_bits(
2694       p_rc->baseline_gf_interval - (rc->frames_since_key == 0), p_rc->gfu_boost,
2695       gf_group_bits);
2696 #endif
2697 
2698   gf_arf_bits = adjust_boost_bits_for_target_level(cpi, rc, gf_arf_bits,
2699                                                    gf_group_bits, 1);
2700 
2701   // Allocate bits to each of the frames in the GF group.
2702   allocate_gf_group_bits(gf_group, p_rc, rc, gf_group_bits, gf_arf_bits,
2703                          is_key_frame, use_arf);
2704 }
2705 
2706 // Minimum % intra coding observed in first pass (1.0 = 100%)
2707 #define MIN_INTRA_LEVEL 0.25
2708 // Minimum ratio between the % of intra coding and inter coding in the first
2709 // pass after discounting neutral blocks (discounting neutral blocks in this
2710 // way helps catch scene cuts in clips with very flat areas or letter box
2711 // format clips with image padding.
2712 #define INTRA_VS_INTER_THRESH 2.0
2713 // Hard threshold where the first pass chooses intra for almost all blocks.
2714 // In such a case even if the frame is not a scene cut coding a key frame
2715 // may be a good option.
2716 #define VERY_LOW_INTER_THRESH 0.05
2717 // Maximum threshold for the relative ratio of intra error score vs best
2718 // inter error score.
2719 #define KF_II_ERR_THRESHOLD 1.9
2720 // In real scene cuts there is almost always a sharp change in the intra
2721 // or inter error score.
2722 #define ERR_CHANGE_THRESHOLD 0.4
2723 // For real scene cuts we expect an improvment in the intra inter error
2724 // ratio in the next frame.
2725 #define II_IMPROVEMENT_THRESHOLD 3.5
2726 #define KF_II_MAX 128.0
2727 // Intra / Inter threshold very low
2728 #define VERY_LOW_II 1.5
2729 // Clean slide transitions we expect a sharp single frame spike in error.
2730 #define ERROR_SPIKE 5.0
2731 
2732 // Slide show transition detection.
2733 // Tests for case where there is very low error either side of the current frame
2734 // but much higher just for this frame. This can help detect key frames in
2735 // slide shows even where the slides are pictures of different sizes.
2736 // Also requires that intra and inter errors are very similar to help eliminate
2737 // harmful false positives.
2738 // It will not help if the transition is a fade or other multi-frame effect.
slide_transition(const FIRSTPASS_STATS * this_frame,const FIRSTPASS_STATS * last_frame,const FIRSTPASS_STATS * next_frame)2739 static int slide_transition(const FIRSTPASS_STATS *this_frame,
2740                             const FIRSTPASS_STATS *last_frame,
2741                             const FIRSTPASS_STATS *next_frame) {
2742   return (this_frame->intra_error < (this_frame->coded_error * VERY_LOW_II)) &&
2743          (this_frame->coded_error > (last_frame->coded_error * ERROR_SPIKE)) &&
2744          (this_frame->coded_error > (next_frame->coded_error * ERROR_SPIKE));
2745 }
2746 
2747 // Threshold for use of the lagging second reference frame. High second ref
2748 // usage may point to a transient event like a flash or occlusion rather than
2749 // a real scene cut.
2750 // We adapt the threshold based on number of frames in this key-frame group so
2751 // far.
get_second_ref_usage_thresh(int frame_count_so_far)2752 static double get_second_ref_usage_thresh(int frame_count_so_far) {
2753   const int adapt_upto = 32;
2754   const double min_second_ref_usage_thresh = 0.085;
2755   const double second_ref_usage_thresh_max_delta = 0.035;
2756   if (frame_count_so_far >= adapt_upto) {
2757     return min_second_ref_usage_thresh + second_ref_usage_thresh_max_delta;
2758   }
2759   return min_second_ref_usage_thresh +
2760          ((double)frame_count_so_far / (adapt_upto - 1)) *
2761              second_ref_usage_thresh_max_delta;
2762 }
2763 
test_candidate_kf(const FIRSTPASS_INFO * firstpass_info,int this_stats_index,int frame_count_so_far,enum aom_rc_mode rc_mode,int scenecut_mode,int num_mbs)2764 static int test_candidate_kf(const FIRSTPASS_INFO *firstpass_info,
2765                              int this_stats_index, int frame_count_so_far,
2766                              enum aom_rc_mode rc_mode, int scenecut_mode,
2767                              int num_mbs) {
2768   const FIRSTPASS_STATS *last_stats =
2769       av1_firstpass_info_peek(firstpass_info, this_stats_index - 1);
2770   const FIRSTPASS_STATS *this_stats =
2771       av1_firstpass_info_peek(firstpass_info, this_stats_index);
2772   const FIRSTPASS_STATS *next_stats =
2773       av1_firstpass_info_peek(firstpass_info, this_stats_index + 1);
2774   if (last_stats == NULL || this_stats == NULL || next_stats == NULL) {
2775     return 0;
2776   }
2777 
2778   int is_viable_kf = 0;
2779   double pcnt_intra = 1.0 - this_stats->pcnt_inter;
2780   double modified_pcnt_inter =
2781       this_stats->pcnt_inter - this_stats->pcnt_neutral;
2782   const double second_ref_usage_thresh =
2783       get_second_ref_usage_thresh(frame_count_so_far);
2784   int frames_to_test_after_candidate_key = SCENE_CUT_KEY_TEST_INTERVAL;
2785   int count_for_tolerable_prediction = 3;
2786 
2787   // We do "-1" because the candidate key is not counted.
2788   int stats_after_this_stats =
2789       av1_firstpass_info_future_count(firstpass_info, this_stats_index) - 1;
2790 
2791   if (scenecut_mode == ENABLE_SCENECUT_MODE_1) {
2792     if (stats_after_this_stats < 3) {
2793       return 0;
2794     } else {
2795       frames_to_test_after_candidate_key = 3;
2796       count_for_tolerable_prediction = 1;
2797     }
2798   }
2799   // Make sure we have enough stats after the candidate key.
2800   frames_to_test_after_candidate_key =
2801       AOMMIN(frames_to_test_after_candidate_key, stats_after_this_stats);
2802 
2803   // Does the frame satisfy the primary criteria of a key frame?
2804   // See above for an explanation of the test criteria.
2805   // If so, then examine how well it predicts subsequent frames.
2806   if (IMPLIES(rc_mode == AOM_Q, frame_count_so_far >= 3) &&
2807       (this_stats->pcnt_second_ref < second_ref_usage_thresh) &&
2808       (next_stats->pcnt_second_ref < second_ref_usage_thresh) &&
2809       ((this_stats->pcnt_inter < VERY_LOW_INTER_THRESH) ||
2810        slide_transition(this_stats, last_stats, next_stats) ||
2811        ((pcnt_intra > MIN_INTRA_LEVEL) &&
2812         (pcnt_intra > (INTRA_VS_INTER_THRESH * modified_pcnt_inter)) &&
2813         ((this_stats->intra_error /
2814           DOUBLE_DIVIDE_CHECK(this_stats->coded_error)) <
2815          KF_II_ERR_THRESHOLD) &&
2816         ((fabs(last_stats->coded_error - this_stats->coded_error) /
2817               DOUBLE_DIVIDE_CHECK(this_stats->coded_error) >
2818           ERR_CHANGE_THRESHOLD) ||
2819          (fabs(last_stats->intra_error - this_stats->intra_error) /
2820               DOUBLE_DIVIDE_CHECK(this_stats->intra_error) >
2821           ERR_CHANGE_THRESHOLD) ||
2822          ((next_stats->intra_error /
2823            DOUBLE_DIVIDE_CHECK(next_stats->coded_error)) >
2824           II_IMPROVEMENT_THRESHOLD))))) {
2825     int i;
2826     double boost_score = 0.0;
2827     double old_boost_score = 0.0;
2828     double decay_accumulator = 1.0;
2829 
2830     // Examine how well the key frame predicts subsequent frames.
2831     for (i = 1; i <= frames_to_test_after_candidate_key; ++i) {
2832       // Get the next frame details
2833       const FIRSTPASS_STATS *local_next_frame =
2834           av1_firstpass_info_peek(firstpass_info, this_stats_index + i);
2835       double next_iiratio =
2836           (BOOST_FACTOR * local_next_frame->intra_error /
2837            DOUBLE_DIVIDE_CHECK(local_next_frame->coded_error));
2838 
2839       if (next_iiratio > KF_II_MAX) next_iiratio = KF_II_MAX;
2840 
2841       // Cumulative effect of decay in prediction quality.
2842       if (local_next_frame->pcnt_inter > 0.85)
2843         decay_accumulator *= local_next_frame->pcnt_inter;
2844       else
2845         decay_accumulator *= (0.85 + local_next_frame->pcnt_inter) / 2.0;
2846 
2847       // Keep a running total.
2848       boost_score += (decay_accumulator * next_iiratio);
2849 
2850       // Test various breakout clauses.
2851       // TODO(any): Test of intra error should be normalized to an MB.
2852       if ((local_next_frame->pcnt_inter < 0.05) || (next_iiratio < 1.5) ||
2853           (((local_next_frame->pcnt_inter - local_next_frame->pcnt_neutral) <
2854             0.20) &&
2855            (next_iiratio < 3.0)) ||
2856           ((boost_score - old_boost_score) < 3.0) ||
2857           (local_next_frame->intra_error < (200.0 / (double)num_mbs))) {
2858         break;
2859       }
2860 
2861       old_boost_score = boost_score;
2862     }
2863 
2864     // If there is tolerable prediction for at least the next 3 frames then
2865     // break out else discard this potential key frame and move on
2866     if (boost_score > 30.0 && (i > count_for_tolerable_prediction)) {
2867       is_viable_kf = 1;
2868     } else {
2869       is_viable_kf = 0;
2870     }
2871   }
2872   return is_viable_kf;
2873 }
2874 
2875 #define FRAMES_TO_CHECK_DECAY 8
2876 #define KF_MIN_FRAME_BOOST 80.0
2877 #define KF_MAX_FRAME_BOOST 128.0
2878 #define MIN_KF_BOOST 600  // Minimum boost for non-static KF interval
2879 #define MAX_KF_BOOST 3200
2880 #define MIN_STATIC_KF_BOOST 5400  // Minimum boost for static KF interval
2881 
detect_app_forced_key(AV1_COMP * cpi)2882 static int detect_app_forced_key(AV1_COMP *cpi) {
2883   int num_frames_to_app_forced_key = is_forced_keyframe_pending(
2884       cpi->ppi->lookahead, cpi->ppi->lookahead->max_sz, cpi->compressor_stage);
2885   return num_frames_to_app_forced_key;
2886 }
2887 
get_projected_kf_boost(AV1_COMP * cpi)2888 static int get_projected_kf_boost(AV1_COMP *cpi) {
2889   /*
2890    * If num_stats_used_for_kf_boost >= frames_to_key, then
2891    * all stats needed for prior boost calculation are available.
2892    * Hence projecting the prior boost is not needed in this cases.
2893    */
2894   if (cpi->ppi->p_rc.num_stats_used_for_kf_boost >= cpi->rc.frames_to_key)
2895     return cpi->ppi->p_rc.kf_boost;
2896 
2897   // Get the current tpl factor (number of frames = frames_to_key).
2898   double tpl_factor = av1_get_kf_boost_projection_factor(cpi->rc.frames_to_key);
2899   // Get the tpl factor when number of frames = num_stats_used_for_kf_boost.
2900   double tpl_factor_num_stats = av1_get_kf_boost_projection_factor(
2901       cpi->ppi->p_rc.num_stats_used_for_kf_boost);
2902   int projected_kf_boost =
2903       (int)rint((tpl_factor * cpi->ppi->p_rc.kf_boost) / tpl_factor_num_stats);
2904   return projected_kf_boost;
2905 }
2906 
2907 /*!\brief Determine the location of the next key frame
2908  *
2909  * \ingroup gf_group_algo
2910  * This function decides the placement of the next key frame when a
2911  * scenecut is detected or the maximum key frame distance is reached.
2912  *
2913  * \param[in]    cpi              Top-level encoder structure
2914  * \param[in]    firstpass_info   struct for firstpass info
2915  * \param[in]    num_frames_to_detect_scenecut Maximum lookahead frames.
2916  * \param[in]    search_start_idx   the start index for searching key frame.
2917  *                                  Set it to one if we already know the
2918  *                                  current frame is key frame. Otherwise,
2919  *                                  set it to zero.
2920  *
2921  * \return       Number of frames to the next key including the current frame.
2922  */
define_kf_interval(AV1_COMP * cpi,const FIRSTPASS_INFO * firstpass_info,int num_frames_to_detect_scenecut,int search_start_idx)2923 static int define_kf_interval(AV1_COMP *cpi,
2924                               const FIRSTPASS_INFO *firstpass_info,
2925                               int num_frames_to_detect_scenecut,
2926                               int search_start_idx) {
2927   const TWO_PASS *const twopass = &cpi->ppi->twopass;
2928   const RATE_CONTROL *const rc = &cpi->rc;
2929   PRIMARY_RATE_CONTROL *const p_rc = &cpi->ppi->p_rc;
2930   const AV1EncoderConfig *const oxcf = &cpi->oxcf;
2931   const KeyFrameCfg *const kf_cfg = &oxcf->kf_cfg;
2932   double recent_loop_decay[FRAMES_TO_CHECK_DECAY];
2933   double decay_accumulator = 1.0;
2934   int i = 0, j;
2935   int frames_to_key = search_start_idx;
2936   int frames_since_key = rc->frames_since_key + 1;
2937   int scenecut_detected = 0;
2938 
2939   int num_frames_to_next_key = detect_app_forced_key(cpi);
2940 
2941   if (num_frames_to_detect_scenecut == 0) {
2942     if (num_frames_to_next_key != -1)
2943       return num_frames_to_next_key;
2944     else
2945       return rc->frames_to_key;
2946   }
2947 
2948   if (num_frames_to_next_key != -1)
2949     num_frames_to_detect_scenecut =
2950         AOMMIN(num_frames_to_detect_scenecut, num_frames_to_next_key);
2951 
2952   // Initialize the decay rates for the recent frames to check
2953   for (j = 0; j < FRAMES_TO_CHECK_DECAY; ++j) recent_loop_decay[j] = 1.0;
2954 
2955   i = 0;
2956   const int num_mbs = (oxcf->resize_cfg.resize_mode != RESIZE_NONE)
2957                           ? cpi->initial_mbs
2958                           : cpi->common.mi_params.MBs;
2959   const int future_stats_count =
2960       av1_firstpass_info_future_count(firstpass_info, 0);
2961   while (frames_to_key < future_stats_count &&
2962          frames_to_key < num_frames_to_detect_scenecut) {
2963     // Provided that we are not at the end of the file...
2964     if ((cpi->ppi->p_rc.enable_scenecut_detection > 0) && kf_cfg->auto_key &&
2965         frames_to_key + 1 < future_stats_count) {
2966       double loop_decay_rate;
2967 
2968       // Check for a scene cut.
2969       if (frames_since_key >= kf_cfg->key_freq_min) {
2970         scenecut_detected = test_candidate_kf(
2971             &twopass->firstpass_info, frames_to_key, frames_since_key,
2972             oxcf->rc_cfg.mode, cpi->ppi->p_rc.enable_scenecut_detection,
2973             num_mbs);
2974         if (scenecut_detected) {
2975           break;
2976         }
2977       }
2978 
2979       // How fast is the prediction quality decaying?
2980       const FIRSTPASS_STATS *next_stats =
2981           av1_firstpass_info_peek(firstpass_info, frames_to_key + 1);
2982       loop_decay_rate = get_prediction_decay_rate(next_stats);
2983 
2984       // We want to know something about the recent past... rather than
2985       // as used elsewhere where we are concerned with decay in prediction
2986       // quality since the last GF or KF.
2987       recent_loop_decay[i % FRAMES_TO_CHECK_DECAY] = loop_decay_rate;
2988       decay_accumulator = 1.0;
2989       for (j = 0; j < FRAMES_TO_CHECK_DECAY; ++j)
2990         decay_accumulator *= recent_loop_decay[j];
2991 
2992       // Special check for transition or high motion followed by a
2993       // static scene.
2994       if (frames_since_key >= kf_cfg->key_freq_min) {
2995         scenecut_detected = detect_transition_to_still(
2996             firstpass_info, frames_to_key + 1, rc->min_gf_interval, i,
2997             kf_cfg->key_freq_max - i, loop_decay_rate, decay_accumulator);
2998         if (scenecut_detected) {
2999           // In the case of transition followed by a static scene, the key frame
3000           // could be a good predictor for the following frames, therefore we
3001           // do not use an arf.
3002           p_rc->use_arf_in_this_kf_group = 0;
3003           break;
3004         }
3005       }
3006 
3007       // Step on to the next frame.
3008       ++frames_to_key;
3009       ++frames_since_key;
3010 
3011       // If we don't have a real key frame within the next two
3012       // key_freq_max intervals then break out of the loop.
3013       if (frames_to_key >= 2 * kf_cfg->key_freq_max) {
3014         break;
3015       }
3016     } else {
3017       ++frames_to_key;
3018       ++frames_since_key;
3019     }
3020     ++i;
3021   }
3022   if (cpi->ppi->lap_enabled && !scenecut_detected)
3023     frames_to_key = num_frames_to_next_key;
3024 
3025   return frames_to_key;
3026 }
3027 
get_kf_group_avg_error(TWO_PASS * twopass,TWO_PASS_FRAME * twopass_frame,const FIRSTPASS_STATS * first_frame,const FIRSTPASS_STATS * start_position,int frames_to_key)3028 static double get_kf_group_avg_error(TWO_PASS *twopass,
3029                                      TWO_PASS_FRAME *twopass_frame,
3030                                      const FIRSTPASS_STATS *first_frame,
3031                                      const FIRSTPASS_STATS *start_position,
3032                                      int frames_to_key) {
3033   FIRSTPASS_STATS cur_frame = *first_frame;
3034   int num_frames, i;
3035   double kf_group_avg_error = 0.0;
3036 
3037   reset_fpf_position(twopass_frame, start_position);
3038 
3039   for (i = 0; i < frames_to_key; ++i) {
3040     kf_group_avg_error += cur_frame.coded_error;
3041     if (EOF == input_stats(twopass, twopass_frame, &cur_frame)) break;
3042   }
3043   num_frames = i + 1;
3044   num_frames = AOMMIN(num_frames, frames_to_key);
3045   kf_group_avg_error = kf_group_avg_error / num_frames;
3046 
3047   return (kf_group_avg_error);
3048 }
3049 
get_kf_group_bits(AV1_COMP * cpi,double kf_group_err,double kf_group_avg_error)3050 static int64_t get_kf_group_bits(AV1_COMP *cpi, double kf_group_err,
3051                                  double kf_group_avg_error) {
3052   RATE_CONTROL *const rc = &cpi->rc;
3053   TWO_PASS *const twopass = &cpi->ppi->twopass;
3054   int64_t kf_group_bits;
3055   if (cpi->ppi->lap_enabled) {
3056     kf_group_bits = (int64_t)rc->frames_to_key * rc->avg_frame_bandwidth;
3057     if (cpi->oxcf.rc_cfg.vbr_corpus_complexity_lap) {
3058       double vbr_corpus_complexity_lap =
3059           cpi->oxcf.rc_cfg.vbr_corpus_complexity_lap / 10.0;
3060       /* Get the average corpus complexity of the frame */
3061       kf_group_bits = (int64_t)(kf_group_bits * (kf_group_avg_error /
3062                                                  vbr_corpus_complexity_lap));
3063     }
3064   } else {
3065     kf_group_bits = (int64_t)(twopass->bits_left *
3066                               (kf_group_err / twopass->modified_error_left));
3067   }
3068 
3069   return kf_group_bits;
3070 }
3071 
calc_avg_stats(AV1_COMP * cpi,FIRSTPASS_STATS * avg_frame_stat)3072 static int calc_avg_stats(AV1_COMP *cpi, FIRSTPASS_STATS *avg_frame_stat) {
3073   RATE_CONTROL *const rc = &cpi->rc;
3074   TWO_PASS *const twopass = &cpi->ppi->twopass;
3075   FIRSTPASS_STATS cur_frame;
3076   av1_zero(cur_frame);
3077   int num_frames = 0;
3078   // Accumulate total stat using available number of stats.
3079   for (num_frames = 0; num_frames < (rc->frames_to_key - 1); ++num_frames) {
3080     if (EOF == input_stats(twopass, &cpi->twopass_frame, &cur_frame)) break;
3081     av1_accumulate_stats(avg_frame_stat, &cur_frame);
3082   }
3083 
3084   if (num_frames < 2) {
3085     return num_frames;
3086   }
3087   // Average the total stat
3088   avg_frame_stat->weight = avg_frame_stat->weight / num_frames;
3089   avg_frame_stat->intra_error = avg_frame_stat->intra_error / num_frames;
3090   avg_frame_stat->frame_avg_wavelet_energy =
3091       avg_frame_stat->frame_avg_wavelet_energy / num_frames;
3092   avg_frame_stat->coded_error = avg_frame_stat->coded_error / num_frames;
3093   avg_frame_stat->sr_coded_error = avg_frame_stat->sr_coded_error / num_frames;
3094   avg_frame_stat->pcnt_inter = avg_frame_stat->pcnt_inter / num_frames;
3095   avg_frame_stat->pcnt_motion = avg_frame_stat->pcnt_motion / num_frames;
3096   avg_frame_stat->pcnt_second_ref =
3097       avg_frame_stat->pcnt_second_ref / num_frames;
3098   avg_frame_stat->pcnt_neutral = avg_frame_stat->pcnt_neutral / num_frames;
3099   avg_frame_stat->intra_skip_pct = avg_frame_stat->intra_skip_pct / num_frames;
3100   avg_frame_stat->inactive_zone_rows =
3101       avg_frame_stat->inactive_zone_rows / num_frames;
3102   avg_frame_stat->inactive_zone_cols =
3103       avg_frame_stat->inactive_zone_cols / num_frames;
3104   avg_frame_stat->MVr = avg_frame_stat->MVr / num_frames;
3105   avg_frame_stat->mvr_abs = avg_frame_stat->mvr_abs / num_frames;
3106   avg_frame_stat->MVc = avg_frame_stat->MVc / num_frames;
3107   avg_frame_stat->mvc_abs = avg_frame_stat->mvc_abs / num_frames;
3108   avg_frame_stat->MVrv = avg_frame_stat->MVrv / num_frames;
3109   avg_frame_stat->MVcv = avg_frame_stat->MVcv / num_frames;
3110   avg_frame_stat->mv_in_out_count =
3111       avg_frame_stat->mv_in_out_count / num_frames;
3112   avg_frame_stat->new_mv_count = avg_frame_stat->new_mv_count / num_frames;
3113   avg_frame_stat->count = avg_frame_stat->count / num_frames;
3114   avg_frame_stat->duration = avg_frame_stat->duration / num_frames;
3115 
3116   return num_frames;
3117 }
3118 
get_kf_boost_score(AV1_COMP * cpi,double kf_raw_err,double * zero_motion_accumulator,double * sr_accumulator,int use_avg_stat)3119 static double get_kf_boost_score(AV1_COMP *cpi, double kf_raw_err,
3120                                  double *zero_motion_accumulator,
3121                                  double *sr_accumulator, int use_avg_stat) {
3122   RATE_CONTROL *const rc = &cpi->rc;
3123   TWO_PASS *const twopass = &cpi->ppi->twopass;
3124   FRAME_INFO *const frame_info = &cpi->frame_info;
3125   FIRSTPASS_STATS frame_stat;
3126   av1_zero(frame_stat);
3127   int i = 0, num_stat_used = 0;
3128   double boost_score = 0.0;
3129   const double kf_max_boost =
3130       cpi->oxcf.rc_cfg.mode == AOM_Q
3131           ? AOMMIN(AOMMAX(rc->frames_to_key * 2.0, KF_MIN_FRAME_BOOST),
3132                    KF_MAX_FRAME_BOOST)
3133           : KF_MAX_FRAME_BOOST;
3134 
3135   // Calculate the average using available number of stats.
3136   if (use_avg_stat) num_stat_used = calc_avg_stats(cpi, &frame_stat);
3137 
3138   for (i = num_stat_used; i < (rc->frames_to_key - 1); ++i) {
3139     if (!use_avg_stat &&
3140         EOF == input_stats(twopass, &cpi->twopass_frame, &frame_stat))
3141       break;
3142 
3143     // Monitor for static sections.
3144     // For the first frame in kf group, the second ref indicator is invalid.
3145     if (i > 0) {
3146       *zero_motion_accumulator =
3147           AOMMIN(*zero_motion_accumulator, get_zero_motion_factor(&frame_stat));
3148     } else {
3149       *zero_motion_accumulator = frame_stat.pcnt_inter - frame_stat.pcnt_motion;
3150     }
3151 
3152     // Not all frames in the group are necessarily used in calculating boost.
3153     if ((*sr_accumulator < (kf_raw_err * 1.50)) &&
3154         (i <= rc->max_gf_interval * 2)) {
3155       double frame_boost;
3156       double zm_factor;
3157 
3158       // Factor 0.75-1.25 based on how much of frame is static.
3159       zm_factor = (0.75 + (*zero_motion_accumulator / 2.0));
3160 
3161       if (i < 2) *sr_accumulator = 0.0;
3162       frame_boost =
3163           calc_kf_frame_boost(&cpi->ppi->p_rc, frame_info, &frame_stat,
3164                               sr_accumulator, kf_max_boost);
3165       boost_score += frame_boost * zm_factor;
3166     }
3167   }
3168   return boost_score;
3169 }
3170 
3171 /*!\brief Interval(in seconds) to clip key-frame distance to in LAP.
3172  */
3173 #define MAX_KF_BITS_INTERVAL_SINGLE_PASS 5
3174 
3175 /*!\brief Determine the next key frame group
3176  *
3177  * \ingroup gf_group_algo
3178  * This function decides the placement of the next key frame, and
3179  * calculates the bit allocation of the KF group and the keyframe itself.
3180  *
3181  * \param[in]    cpi              Top-level encoder structure
3182  * \param[in]    this_frame       Pointer to first pass stats
3183  */
find_next_key_frame(AV1_COMP * cpi,FIRSTPASS_STATS * this_frame)3184 static void find_next_key_frame(AV1_COMP *cpi, FIRSTPASS_STATS *this_frame) {
3185   RATE_CONTROL *const rc = &cpi->rc;
3186   PRIMARY_RATE_CONTROL *const p_rc = &cpi->ppi->p_rc;
3187   TWO_PASS *const twopass = &cpi->ppi->twopass;
3188   GF_GROUP *const gf_group = &cpi->ppi->gf_group;
3189   FRAME_INFO *const frame_info = &cpi->frame_info;
3190   AV1_COMMON *const cm = &cpi->common;
3191   CurrentFrame *const current_frame = &cm->current_frame;
3192   const AV1EncoderConfig *const oxcf = &cpi->oxcf;
3193   const KeyFrameCfg *const kf_cfg = &oxcf->kf_cfg;
3194   const FIRSTPASS_STATS first_frame = *this_frame;
3195   FIRSTPASS_STATS next_frame;
3196   const FIRSTPASS_INFO *firstpass_info = &twopass->firstpass_info;
3197   av1_zero(next_frame);
3198 
3199   rc->frames_since_key = 0;
3200   // Use arfs if possible.
3201   p_rc->use_arf_in_this_kf_group = is_altref_enabled(
3202       oxcf->gf_cfg.lag_in_frames, oxcf->gf_cfg.enable_auto_arf);
3203 
3204   // Reset the GF group data structures.
3205   av1_zero(*gf_group);
3206   cpi->gf_frame_index = 0;
3207 
3208   // KF is always a GF so clear frames till next gf counter.
3209   rc->frames_till_gf_update_due = 0;
3210 
3211   if (has_no_stats_stage(cpi)) {
3212     int num_frames_to_app_forced_key = detect_app_forced_key(cpi);
3213     p_rc->this_key_frame_forced =
3214         current_frame->frame_number != 0 && rc->frames_to_key == 0;
3215     if (num_frames_to_app_forced_key != -1)
3216       rc->frames_to_key = num_frames_to_app_forced_key;
3217     else
3218       rc->frames_to_key = AOMMAX(1, kf_cfg->key_freq_max);
3219     correct_frames_to_key(cpi);
3220     p_rc->kf_boost = DEFAULT_KF_BOOST;
3221     gf_group->update_type[0] = KF_UPDATE;
3222     return;
3223   }
3224   int i;
3225   const FIRSTPASS_STATS *const start_position = cpi->twopass_frame.stats_in;
3226   int kf_bits = 0;
3227   double zero_motion_accumulator = 1.0;
3228   double boost_score = 0.0;
3229   double kf_raw_err = 0.0;
3230   double kf_mod_err = 0.0;
3231   double sr_accumulator = 0.0;
3232   double kf_group_avg_error = 0.0;
3233   int frames_to_key, frames_to_key_clipped = INT_MAX;
3234   int64_t kf_group_bits_clipped = INT64_MAX;
3235 
3236   // Is this a forced key frame by interval.
3237   p_rc->this_key_frame_forced = p_rc->next_key_frame_forced;
3238 
3239   twopass->kf_group_bits = 0;        // Total bits available to kf group
3240   twopass->kf_group_error_left = 0;  // Group modified error score.
3241 
3242   kf_raw_err = this_frame->intra_error;
3243   kf_mod_err = calculate_modified_err(frame_info, twopass, oxcf, this_frame);
3244 
3245   // We assume the current frame is a key frame and we are looking for the next
3246   // key frame. Therefore search_start_idx = 1
3247   frames_to_key = define_kf_interval(cpi, firstpass_info, kf_cfg->key_freq_max,
3248                                      /*search_start_idx=*/1);
3249 
3250   if (frames_to_key != -1) {
3251     rc->frames_to_key = AOMMIN(kf_cfg->key_freq_max, frames_to_key);
3252   } else {
3253     rc->frames_to_key = kf_cfg->key_freq_max;
3254   }
3255 
3256   if (cpi->ppi->lap_enabled) correct_frames_to_key(cpi);
3257 
3258   // If there is a max kf interval set by the user we must obey it.
3259   // We already breakout of the loop above at 2x max.
3260   // This code centers the extra kf if the actual natural interval
3261   // is between 1x and 2x.
3262   if (kf_cfg->auto_key && rc->frames_to_key > kf_cfg->key_freq_max) {
3263     FIRSTPASS_STATS tmp_frame = first_frame;
3264 
3265     rc->frames_to_key /= 2;
3266 
3267     // Reset to the start of the group.
3268     reset_fpf_position(&cpi->twopass_frame, start_position);
3269     // Rescan to get the correct error data for the forced kf group.
3270     for (i = 0; i < rc->frames_to_key; ++i) {
3271       if (EOF == input_stats(twopass, &cpi->twopass_frame, &tmp_frame)) break;
3272     }
3273     p_rc->next_key_frame_forced = 1;
3274   } else if ((cpi->twopass_frame.stats_in ==
3275                   twopass->stats_buf_ctx->stats_in_end &&
3276               is_stat_consumption_stage_twopass(cpi)) ||
3277              rc->frames_to_key >= kf_cfg->key_freq_max) {
3278     p_rc->next_key_frame_forced = 1;
3279   } else {
3280     p_rc->next_key_frame_forced = 0;
3281   }
3282 
3283   double kf_group_err = 0;
3284   for (i = 0; i < rc->frames_to_key; ++i) {
3285     const FIRSTPASS_STATS *this_stats =
3286         av1_firstpass_info_peek(&twopass->firstpass_info, i);
3287     if (this_stats != NULL) {
3288       // Accumulate kf group error.
3289       kf_group_err += calculate_modified_err_new(
3290           frame_info, &firstpass_info->total_stats, this_stats,
3291           oxcf->rc_cfg.vbrbias, twopass->modified_error_min,
3292           twopass->modified_error_max);
3293       ++p_rc->num_stats_used_for_kf_boost;
3294     }
3295   }
3296 
3297   // Calculate the number of bits that should be assigned to the kf group.
3298   if ((twopass->bits_left > 0 && twopass->modified_error_left > 0.0) ||
3299       (cpi->ppi->lap_enabled && oxcf->rc_cfg.mode != AOM_Q)) {
3300     // Maximum number of bits for a single normal frame (not key frame).
3301     const int max_bits = frame_max_bits(rc, oxcf);
3302 
3303     // Maximum number of bits allocated to the key frame group.
3304     int64_t max_grp_bits;
3305 
3306     if (oxcf->rc_cfg.vbr_corpus_complexity_lap) {
3307       kf_group_avg_error =
3308           get_kf_group_avg_error(twopass, &cpi->twopass_frame, &first_frame,
3309                                  start_position, rc->frames_to_key);
3310     }
3311 
3312     // Default allocation based on bits left and relative
3313     // complexity of the section.
3314     twopass->kf_group_bits =
3315         get_kf_group_bits(cpi, kf_group_err, kf_group_avg_error);
3316     // Clip based on maximum per frame rate defined by the user.
3317     max_grp_bits = (int64_t)max_bits * (int64_t)rc->frames_to_key;
3318     if (twopass->kf_group_bits > max_grp_bits)
3319       twopass->kf_group_bits = max_grp_bits;
3320   } else {
3321     twopass->kf_group_bits = 0;
3322   }
3323   twopass->kf_group_bits = AOMMAX(0, twopass->kf_group_bits);
3324 
3325   if (cpi->ppi->lap_enabled) {
3326     // In the case of single pass based on LAP, frames to  key may have an
3327     // inaccurate value, and hence should be clipped to an appropriate
3328     // interval.
3329     frames_to_key_clipped =
3330         (int)(MAX_KF_BITS_INTERVAL_SINGLE_PASS * cpi->framerate);
3331 
3332     // This variable calculates the bits allocated to kf_group with a clipped
3333     // frames_to_key.
3334     if (rc->frames_to_key > frames_to_key_clipped) {
3335       kf_group_bits_clipped =
3336           (int64_t)((double)twopass->kf_group_bits * frames_to_key_clipped /
3337                     rc->frames_to_key);
3338     }
3339   }
3340 
3341   // Reset the first pass file position.
3342   reset_fpf_position(&cpi->twopass_frame, start_position);
3343 
3344   // Scan through the kf group collating various stats used to determine
3345   // how many bits to spend on it.
3346   boost_score = get_kf_boost_score(cpi, kf_raw_err, &zero_motion_accumulator,
3347                                    &sr_accumulator, 0);
3348   reset_fpf_position(&cpi->twopass_frame, start_position);
3349   // Store the zero motion percentage
3350   twopass->kf_zeromotion_pct = (int)(zero_motion_accumulator * 100.0);
3351 
3352   // Calculate a section intra ratio used in setting max loop filter.
3353   twopass->section_intra_rating = calculate_section_intra_ratio(
3354       start_position, twopass->stats_buf_ctx->stats_in_end, rc->frames_to_key);
3355 
3356   p_rc->kf_boost = (int)boost_score;
3357 
3358   if (cpi->ppi->lap_enabled) {
3359     if (oxcf->rc_cfg.mode == AOM_Q) {
3360       p_rc->kf_boost = get_projected_kf_boost(cpi);
3361     } else {
3362       // TODO(any): Explore using average frame stats for AOM_Q as well.
3363       boost_score = get_kf_boost_score(
3364           cpi, kf_raw_err, &zero_motion_accumulator, &sr_accumulator, 1);
3365       reset_fpf_position(&cpi->twopass_frame, start_position);
3366       p_rc->kf_boost += (int)boost_score;
3367     }
3368   }
3369 
3370   // Special case for static / slide show content but don't apply
3371   // if the kf group is very short.
3372   if ((zero_motion_accumulator > STATIC_KF_GROUP_FLOAT_THRESH) &&
3373       (rc->frames_to_key > 8)) {
3374     p_rc->kf_boost = AOMMAX(p_rc->kf_boost, MIN_STATIC_KF_BOOST);
3375   } else {
3376     // Apply various clamps for min and max boost
3377     p_rc->kf_boost = AOMMAX(p_rc->kf_boost, (rc->frames_to_key * 3));
3378     p_rc->kf_boost = AOMMAX(p_rc->kf_boost, MIN_KF_BOOST);
3379 #ifdef STRICT_RC
3380     p_rc->kf_boost = AOMMIN(p_rc->kf_boost, MAX_KF_BOOST);
3381 #endif
3382   }
3383 
3384   // Work out how many bits to allocate for the key frame itself.
3385   // In case of LAP enabled for VBR, if the frames_to_key value is
3386   // very high, we calculate the bits based on a clipped value of
3387   // frames_to_key.
3388   kf_bits = calculate_boost_bits(
3389       AOMMIN(rc->frames_to_key, frames_to_key_clipped) - 1, p_rc->kf_boost,
3390       AOMMIN(twopass->kf_group_bits, kf_group_bits_clipped));
3391   // printf("kf boost = %d kf_bits = %d kf_zeromotion_pct = %d\n",
3392   // p_rc->kf_boost,
3393   //        kf_bits, twopass->kf_zeromotion_pct);
3394   kf_bits = adjust_boost_bits_for_target_level(cpi, rc, kf_bits,
3395                                                twopass->kf_group_bits, 0);
3396 
3397   twopass->kf_group_bits -= kf_bits;
3398 
3399   // Save the bits to spend on the key frame.
3400   gf_group->bit_allocation[0] = kf_bits;
3401   gf_group->update_type[0] = KF_UPDATE;
3402 
3403   // Note the total error score of the kf group minus the key frame itself.
3404   if (cpi->ppi->lap_enabled)
3405     // As we don't have enough stats to know the actual error of the group,
3406     // we assume the complexity of each frame to be equal to 1, and set the
3407     // error as the number of frames in the group(minus the keyframe).
3408     twopass->kf_group_error_left = (double)(rc->frames_to_key - 1);
3409   else
3410     twopass->kf_group_error_left = kf_group_err - kf_mod_err;
3411 
3412   // Adjust the count of total modified error left.
3413   // The count of bits left is adjusted elsewhere based on real coded frame
3414   // sizes.
3415   twopass->modified_error_left -= kf_group_err;
3416 }
3417 
3418 #define ARF_STATS_OUTPUT 0
3419 #if ARF_STATS_OUTPUT
3420 unsigned int arf_count = 0;
3421 #endif
3422 
get_section_target_bandwidth(AV1_COMP * cpi)3423 static int get_section_target_bandwidth(AV1_COMP *cpi) {
3424   AV1_COMMON *const cm = &cpi->common;
3425   CurrentFrame *const current_frame = &cm->current_frame;
3426   RATE_CONTROL *const rc = &cpi->rc;
3427   TWO_PASS *const twopass = &cpi->ppi->twopass;
3428   int64_t section_target_bandwidth;
3429   const int frames_left = (int)(twopass->stats_buf_ctx->total_stats->count -
3430                                 current_frame->frame_number);
3431   if (cpi->ppi->lap_enabled)
3432     section_target_bandwidth = rc->avg_frame_bandwidth;
3433   else {
3434     section_target_bandwidth = twopass->bits_left / frames_left;
3435     section_target_bandwidth = AOMMIN(section_target_bandwidth, INT_MAX);
3436   }
3437   return (int)section_target_bandwidth;
3438 }
3439 
set_twopass_params_based_on_fp_stats(AV1_COMP * cpi,const FIRSTPASS_STATS * this_frame_ptr)3440 static inline void set_twopass_params_based_on_fp_stats(
3441     AV1_COMP *cpi, const FIRSTPASS_STATS *this_frame_ptr) {
3442   if (this_frame_ptr == NULL) return;
3443 
3444   TWO_PASS_FRAME *twopass_frame = &cpi->twopass_frame;
3445   // The multiplication by 256 reverses a scaling factor of (>> 8)
3446   // applied when combining MB error values for the frame.
3447   twopass_frame->mb_av_energy = log1p(this_frame_ptr->intra_error);
3448 
3449   const FIRSTPASS_STATS *const total_stats =
3450       cpi->ppi->twopass.stats_buf_ctx->total_stats;
3451   if (is_fp_wavelet_energy_invalid(total_stats) == 0) {
3452     twopass_frame->frame_avg_haar_energy =
3453         log1p(this_frame_ptr->frame_avg_wavelet_energy);
3454   }
3455 
3456   // Set the frame content type flag.
3457   if (this_frame_ptr->intra_skip_pct >= FC_ANIMATION_THRESH)
3458     twopass_frame->fr_content_type = FC_GRAPHICS_ANIMATION;
3459   else
3460     twopass_frame->fr_content_type = FC_NORMAL;
3461 }
3462 
process_first_pass_stats(AV1_COMP * cpi,FIRSTPASS_STATS * this_frame)3463 static void process_first_pass_stats(AV1_COMP *cpi,
3464                                      FIRSTPASS_STATS *this_frame) {
3465   AV1_COMMON *const cm = &cpi->common;
3466   CurrentFrame *const current_frame = &cm->current_frame;
3467   RATE_CONTROL *const rc = &cpi->rc;
3468   PRIMARY_RATE_CONTROL *const p_rc = &cpi->ppi->p_rc;
3469   TWO_PASS *const twopass = &cpi->ppi->twopass;
3470   FIRSTPASS_STATS *total_stats = twopass->stats_buf_ctx->total_stats;
3471 
3472   if (cpi->oxcf.rc_cfg.mode != AOM_Q && current_frame->frame_number == 0 &&
3473       cpi->gf_frame_index == 0 && total_stats &&
3474       twopass->stats_buf_ctx->total_left_stats) {
3475     if (cpi->ppi->lap_enabled) {
3476       /*
3477        * Accumulate total_stats using available limited number of stats,
3478        * and assign it to total_left_stats.
3479        */
3480       *twopass->stats_buf_ctx->total_left_stats = *total_stats;
3481     }
3482     // Special case code for first frame.
3483     const int section_target_bandwidth = get_section_target_bandwidth(cpi);
3484     const double section_length =
3485         twopass->stats_buf_ctx->total_left_stats->count;
3486     const double section_error =
3487         twopass->stats_buf_ctx->total_left_stats->coded_error / section_length;
3488     const double section_intra_skip =
3489         twopass->stats_buf_ctx->total_left_stats->intra_skip_pct /
3490         section_length;
3491     const double section_inactive_zone =
3492         (twopass->stats_buf_ctx->total_left_stats->inactive_zone_rows * 2) /
3493         ((double)cm->mi_params.mb_rows * section_length);
3494     const int tmp_q = get_twopass_worst_quality(
3495         cpi, section_error, section_intra_skip + section_inactive_zone,
3496         section_target_bandwidth);
3497 
3498     rc->active_worst_quality = tmp_q;
3499     rc->ni_av_qi = tmp_q;
3500     p_rc->last_q[INTER_FRAME] = tmp_q;
3501     p_rc->avg_q = av1_convert_qindex_to_q(tmp_q, cm->seq_params->bit_depth);
3502     p_rc->avg_frame_qindex[INTER_FRAME] = tmp_q;
3503     p_rc->last_q[KEY_FRAME] = (tmp_q + cpi->oxcf.rc_cfg.best_allowed_q) / 2;
3504     p_rc->avg_frame_qindex[KEY_FRAME] = p_rc->last_q[KEY_FRAME];
3505   }
3506 
3507   if (cpi->twopass_frame.stats_in < twopass->stats_buf_ctx->stats_in_end) {
3508     *this_frame = *cpi->twopass_frame.stats_in;
3509     ++cpi->twopass_frame.stats_in;
3510   }
3511   set_twopass_params_based_on_fp_stats(cpi, this_frame);
3512 }
3513 
setup_target_rate(AV1_COMP * cpi)3514 static void setup_target_rate(AV1_COMP *cpi) {
3515   RATE_CONTROL *const rc = &cpi->rc;
3516   GF_GROUP *const gf_group = &cpi->ppi->gf_group;
3517 
3518   int target_rate = gf_group->bit_allocation[cpi->gf_frame_index];
3519 
3520   if (has_no_stats_stage(cpi)) {
3521     av1_rc_set_frame_target(cpi, target_rate, cpi->common.width,
3522                             cpi->common.height);
3523   }
3524 
3525   rc->base_frame_target = target_rate;
3526 }
3527 
av1_mark_flashes(FIRSTPASS_STATS * first_stats,FIRSTPASS_STATS * last_stats)3528 void av1_mark_flashes(FIRSTPASS_STATS *first_stats,
3529                       FIRSTPASS_STATS *last_stats) {
3530   FIRSTPASS_STATS *this_stats = first_stats, *next_stats;
3531   while (this_stats < last_stats - 1) {
3532     next_stats = this_stats + 1;
3533     if (next_stats->pcnt_second_ref > next_stats->pcnt_inter &&
3534         next_stats->pcnt_second_ref >= 0.5) {
3535       this_stats->is_flash = 1;
3536     } else {
3537       this_stats->is_flash = 0;
3538     }
3539     this_stats = next_stats;
3540   }
3541   // We always treat the last one as none flash.
3542   if (last_stats - 1 >= first_stats) {
3543     (last_stats - 1)->is_flash = 0;
3544   }
3545 }
3546 
3547 // Smooth-out the noise variance so it is more stable
3548 // Returns 0 on success, -1 on memory allocation failure.
3549 // TODO(bohanli): Use a better low-pass filter than averaging
smooth_filter_noise(FIRSTPASS_STATS * first_stats,FIRSTPASS_STATS * last_stats)3550 static int smooth_filter_noise(FIRSTPASS_STATS *first_stats,
3551                                FIRSTPASS_STATS *last_stats) {
3552   int len = (int)(last_stats - first_stats);
3553   double *smooth_noise = aom_malloc(len * sizeof(*smooth_noise));
3554   if (!smooth_noise) return -1;
3555 
3556   for (int i = 0; i < len; i++) {
3557     double total_noise = 0;
3558     double total_wt = 0;
3559     for (int j = -HALF_FILT_LEN; j <= HALF_FILT_LEN; j++) {
3560       int idx = AOMMIN(AOMMAX(i + j, 0), len - 1);
3561       if (first_stats[idx].is_flash) continue;
3562 
3563       total_noise += first_stats[idx].noise_var;
3564       total_wt += 1.0;
3565     }
3566     if (total_wt > 0.01) {
3567       total_noise /= total_wt;
3568     } else {
3569       total_noise = first_stats[i].noise_var;
3570     }
3571     smooth_noise[i] = total_noise;
3572   }
3573 
3574   for (int i = 0; i < len; i++) {
3575     first_stats[i].noise_var = smooth_noise[i];
3576   }
3577 
3578   aom_free(smooth_noise);
3579   return 0;
3580 }
3581 
3582 // Estimate the noise variance of each frame from the first pass stats
av1_estimate_noise(FIRSTPASS_STATS * first_stats,FIRSTPASS_STATS * last_stats,struct aom_internal_error_info * error_info)3583 void av1_estimate_noise(FIRSTPASS_STATS *first_stats,
3584                         FIRSTPASS_STATS *last_stats,
3585                         struct aom_internal_error_info *error_info) {
3586   FIRSTPASS_STATS *this_stats, *next_stats;
3587   double C1, C2, C3, noise;
3588   for (this_stats = first_stats + 2; this_stats < last_stats; this_stats++) {
3589     this_stats->noise_var = 0.0;
3590     // flashes tend to have high correlation of innovations, so ignore them.
3591     if (this_stats->is_flash || (this_stats - 1)->is_flash ||
3592         (this_stats - 2)->is_flash)
3593       continue;
3594 
3595     C1 = (this_stats - 1)->intra_error *
3596          (this_stats->intra_error - this_stats->coded_error);
3597     C2 = (this_stats - 2)->intra_error *
3598          ((this_stats - 1)->intra_error - (this_stats - 1)->coded_error);
3599     C3 = (this_stats - 2)->intra_error *
3600          (this_stats->intra_error - this_stats->sr_coded_error);
3601     if (C1 <= 0 || C2 <= 0 || C3 <= 0) continue;
3602     C1 = sqrt(C1);
3603     C2 = sqrt(C2);
3604     C3 = sqrt(C3);
3605 
3606     noise = (this_stats - 1)->intra_error - C1 * C2 / C3;
3607     noise = AOMMAX(noise, 0.01);
3608     this_stats->noise_var = noise;
3609   }
3610 
3611   // Copy noise from the neighbor if the noise value is not trustworthy
3612   for (this_stats = first_stats + 2; this_stats < last_stats; this_stats++) {
3613     if (this_stats->is_flash || (this_stats - 1)->is_flash ||
3614         (this_stats - 2)->is_flash)
3615       continue;
3616     if (this_stats->noise_var < 1.0) {
3617       int found = 0;
3618       // TODO(bohanli): consider expanding to two directions at the same time
3619       for (next_stats = this_stats + 1; next_stats < last_stats; next_stats++) {
3620         if (next_stats->is_flash || (next_stats - 1)->is_flash ||
3621             (next_stats - 2)->is_flash || next_stats->noise_var < 1.0)
3622           continue;
3623         found = 1;
3624         this_stats->noise_var = next_stats->noise_var;
3625         break;
3626       }
3627       if (found) continue;
3628       for (next_stats = this_stats - 1; next_stats >= first_stats + 2;
3629            next_stats--) {
3630         if (next_stats->is_flash || (next_stats - 1)->is_flash ||
3631             (next_stats - 2)->is_flash || next_stats->noise_var < 1.0)
3632           continue;
3633         this_stats->noise_var = next_stats->noise_var;
3634         break;
3635       }
3636     }
3637   }
3638 
3639   // copy the noise if this is a flash
3640   for (this_stats = first_stats + 2; this_stats < last_stats; this_stats++) {
3641     if (this_stats->is_flash || (this_stats - 1)->is_flash ||
3642         (this_stats - 2)->is_flash) {
3643       int found = 0;
3644       for (next_stats = this_stats + 1; next_stats < last_stats; next_stats++) {
3645         if (next_stats->is_flash || (next_stats - 1)->is_flash ||
3646             (next_stats - 2)->is_flash)
3647           continue;
3648         found = 1;
3649         this_stats->noise_var = next_stats->noise_var;
3650         break;
3651       }
3652       if (found) continue;
3653       for (next_stats = this_stats - 1; next_stats >= first_stats + 2;
3654            next_stats--) {
3655         if (next_stats->is_flash || (next_stats - 1)->is_flash ||
3656             (next_stats - 2)->is_flash)
3657           continue;
3658         this_stats->noise_var = next_stats->noise_var;
3659         break;
3660       }
3661     }
3662   }
3663 
3664   // if we are at the first 2 frames, copy the noise
3665   for (this_stats = first_stats;
3666        this_stats < first_stats + 2 && (first_stats + 2) < last_stats;
3667        this_stats++) {
3668     this_stats->noise_var = (first_stats + 2)->noise_var;
3669   }
3670 
3671   if (smooth_filter_noise(first_stats, last_stats) == -1) {
3672     aom_internal_error(error_info, AOM_CODEC_MEM_ERROR,
3673                        "Error allocating buffers in smooth_filter_noise()");
3674   }
3675 }
3676 
3677 // Estimate correlation coefficient of each frame with its previous frame.
av1_estimate_coeff(FIRSTPASS_STATS * first_stats,FIRSTPASS_STATS * last_stats)3678 void av1_estimate_coeff(FIRSTPASS_STATS *first_stats,
3679                         FIRSTPASS_STATS *last_stats) {
3680   FIRSTPASS_STATS *this_stats;
3681   for (this_stats = first_stats + 1; this_stats < last_stats; this_stats++) {
3682     const double C =
3683         sqrt(AOMMAX((this_stats - 1)->intra_error *
3684                         (this_stats->intra_error - this_stats->coded_error),
3685                     0.001));
3686     const double cor_coeff =
3687         C /
3688         AOMMAX((this_stats - 1)->intra_error - this_stats->noise_var, 0.001);
3689 
3690     this_stats->cor_coeff =
3691         cor_coeff *
3692         sqrt(AOMMAX((this_stats - 1)->intra_error - this_stats->noise_var,
3693                     0.001) /
3694              AOMMAX(this_stats->intra_error - this_stats->noise_var, 0.001));
3695     // clip correlation coefficient.
3696     this_stats->cor_coeff = AOMMIN(AOMMAX(this_stats->cor_coeff, 0), 1);
3697   }
3698   first_stats->cor_coeff = 1.0;
3699 }
3700 
av1_get_second_pass_params(AV1_COMP * cpi,EncodeFrameParams * const frame_params,unsigned int frame_flags)3701 void av1_get_second_pass_params(AV1_COMP *cpi,
3702                                 EncodeFrameParams *const frame_params,
3703                                 unsigned int frame_flags) {
3704   RATE_CONTROL *const rc = &cpi->rc;
3705   PRIMARY_RATE_CONTROL *const p_rc = &cpi->ppi->p_rc;
3706   TWO_PASS *const twopass = &cpi->ppi->twopass;
3707   GF_GROUP *const gf_group = &cpi->ppi->gf_group;
3708   const AV1EncoderConfig *const oxcf = &cpi->oxcf;
3709 
3710   if (cpi->use_ducky_encode &&
3711       cpi->ducky_encode_info.frame_info.gop_mode == DUCKY_ENCODE_GOP_MODE_RCL) {
3712     frame_params->frame_type = gf_group->frame_type[cpi->gf_frame_index];
3713     frame_params->show_frame =
3714         !(gf_group->update_type[cpi->gf_frame_index] == ARF_UPDATE ||
3715           gf_group->update_type[cpi->gf_frame_index] == INTNL_ARF_UPDATE);
3716     if (cpi->gf_frame_index == 0) {
3717       av1_tf_info_reset(&cpi->ppi->tf_info);
3718       av1_tf_info_filtering(&cpi->ppi->tf_info, cpi, gf_group);
3719     }
3720     return;
3721   }
3722 
3723   const FIRSTPASS_STATS *const start_pos = cpi->twopass_frame.stats_in;
3724   int update_total_stats = 0;
3725 
3726   if (is_stat_consumption_stage(cpi) && !cpi->twopass_frame.stats_in) return;
3727 
3728   // Check forced key frames.
3729   const int frames_to_next_forced_key = detect_app_forced_key(cpi);
3730   if (frames_to_next_forced_key == 0) {
3731     rc->frames_to_key = 0;
3732     frame_flags &= FRAMEFLAGS_KEY;
3733   } else if (frames_to_next_forced_key > 0 &&
3734              frames_to_next_forced_key < rc->frames_to_key) {
3735     rc->frames_to_key = frames_to_next_forced_key;
3736   }
3737 
3738   assert(cpi->twopass_frame.stats_in != NULL);
3739   const int update_type = gf_group->update_type[cpi->gf_frame_index];
3740   frame_params->frame_type = gf_group->frame_type[cpi->gf_frame_index];
3741 
3742   if (cpi->gf_frame_index < gf_group->size && !(frame_flags & FRAMEFLAGS_KEY)) {
3743     assert(cpi->gf_frame_index < gf_group->size);
3744 
3745     setup_target_rate(cpi);
3746 
3747     // If this is an arf frame then we dont want to read the stats file or
3748     // advance the input pointer as we already have what we need.
3749     if (update_type == ARF_UPDATE || update_type == INTNL_ARF_UPDATE) {
3750       const FIRSTPASS_STATS *const this_frame_ptr =
3751           read_frame_stats(twopass, &cpi->twopass_frame,
3752                            gf_group->arf_src_offset[cpi->gf_frame_index]);
3753       set_twopass_params_based_on_fp_stats(cpi, this_frame_ptr);
3754       return;
3755     }
3756   }
3757 
3758   if (oxcf->rc_cfg.mode == AOM_Q)
3759     rc->active_worst_quality = oxcf->rc_cfg.cq_level;
3760 
3761   if (cpi->gf_frame_index == gf_group->size) {
3762     if (cpi->ppi->lap_enabled && cpi->ppi->p_rc.enable_scenecut_detection) {
3763       const int num_frames_to_detect_scenecut = MAX_GF_LENGTH_LAP + 1;
3764       const int frames_to_key = define_kf_interval(
3765           cpi, &twopass->firstpass_info, num_frames_to_detect_scenecut,
3766           /*search_start_idx=*/0);
3767       if (frames_to_key != -1)
3768         rc->frames_to_key = AOMMIN(rc->frames_to_key, frames_to_key);
3769     }
3770   }
3771 
3772   FIRSTPASS_STATS this_frame;
3773   av1_zero(this_frame);
3774   // call above fn
3775   if (is_stat_consumption_stage(cpi)) {
3776     if (cpi->gf_frame_index < gf_group->size || rc->frames_to_key == 0) {
3777       process_first_pass_stats(cpi, &this_frame);
3778       update_total_stats = 1;
3779     }
3780   } else {
3781     rc->active_worst_quality = oxcf->rc_cfg.cq_level;
3782   }
3783 
3784   // Keyframe and section processing.
3785   FIRSTPASS_STATS this_frame_copy;
3786   this_frame_copy = this_frame;
3787   if (rc->frames_to_key <= 0) {
3788     assert(rc->frames_to_key == 0);
3789     // Define next KF group and assign bits to it.
3790     frame_params->frame_type = KEY_FRAME;
3791     find_next_key_frame(cpi, &this_frame);
3792     this_frame = this_frame_copy;
3793   }
3794 
3795   if (rc->frames_to_fwd_kf <= 0)
3796     rc->frames_to_fwd_kf = oxcf->kf_cfg.fwd_kf_dist;
3797 
3798   // Define a new GF/ARF group. (Should always enter here for key frames).
3799   if (cpi->gf_frame_index == gf_group->size) {
3800     av1_tf_info_reset(&cpi->ppi->tf_info);
3801 #if CONFIG_BITRATE_ACCURACY && !CONFIG_THREE_PASS
3802     vbr_rc_reset_gop_data(&cpi->vbr_rc_info);
3803 #endif  // CONFIG_BITRATE_ACCURACY
3804     int max_gop_length =
3805         (oxcf->gf_cfg.lag_in_frames >= 32)
3806             ? AOMMIN(MAX_GF_INTERVAL, oxcf->gf_cfg.lag_in_frames -
3807                                           oxcf->algo_cfg.arnr_max_frames / 2)
3808             : MAX_GF_LENGTH_LAP;
3809 
3810     // Handle forward key frame when enabled.
3811     if (oxcf->kf_cfg.fwd_kf_dist > 0)
3812       max_gop_length = AOMMIN(rc->frames_to_fwd_kf + 1, max_gop_length);
3813 
3814     // Use the provided gop size in low delay setting
3815     if (oxcf->gf_cfg.lag_in_frames == 0) max_gop_length = rc->max_gf_interval;
3816 
3817     // Limit the max gop length for the last gop in 1 pass setting.
3818     max_gop_length = AOMMIN(max_gop_length, rc->frames_to_key);
3819 
3820     // Identify regions if needed.
3821     // TODO(bohanli): identify regions for all stats available.
3822     if (rc->frames_since_key == 0 || rc->frames_since_key == 1 ||
3823         (p_rc->frames_till_regions_update - rc->frames_since_key <
3824              rc->frames_to_key &&
3825          p_rc->frames_till_regions_update - rc->frames_since_key <
3826              max_gop_length + 1)) {
3827       // how many frames we can analyze from this frame
3828       int rest_frames =
3829           AOMMIN(rc->frames_to_key, MAX_FIRSTPASS_ANALYSIS_FRAMES);
3830       rest_frames =
3831           AOMMIN(rest_frames, (int)(twopass->stats_buf_ctx->stats_in_end -
3832                                     cpi->twopass_frame.stats_in +
3833                                     (rc->frames_since_key == 0)));
3834       p_rc->frames_till_regions_update = rest_frames;
3835 
3836       int ret;
3837       if (cpi->ppi->lap_enabled) {
3838         av1_mark_flashes(twopass->stats_buf_ctx->stats_in_start,
3839                          twopass->stats_buf_ctx->stats_in_end);
3840         av1_estimate_noise(twopass->stats_buf_ctx->stats_in_start,
3841                            twopass->stats_buf_ctx->stats_in_end,
3842                            cpi->common.error);
3843         av1_estimate_coeff(twopass->stats_buf_ctx->stats_in_start,
3844                            twopass->stats_buf_ctx->stats_in_end);
3845         ret = identify_regions(cpi->twopass_frame.stats_in, rest_frames,
3846                                (rc->frames_since_key == 0), p_rc->regions,
3847                                &p_rc->num_regions);
3848       } else {
3849         ret = identify_regions(
3850             cpi->twopass_frame.stats_in - (rc->frames_since_key == 0),
3851             rest_frames, 0, p_rc->regions, &p_rc->num_regions);
3852       }
3853       if (ret == -1) {
3854         aom_internal_error(cpi->common.error, AOM_CODEC_MEM_ERROR,
3855                            "Error allocating buffers in identify_regions");
3856       }
3857     }
3858 
3859     int cur_region_idx =
3860         find_regions_index(p_rc->regions, p_rc->num_regions,
3861                            rc->frames_since_key - p_rc->regions_offset);
3862     if ((cur_region_idx >= 0 &&
3863          p_rc->regions[cur_region_idx].type == SCENECUT_REGION) ||
3864         rc->frames_since_key == 0) {
3865       // If we start from a scenecut, then the last GOP's arf boost is not
3866       // needed for this GOP.
3867       cpi->ppi->gf_state.arf_gf_boost_lst = 0;
3868     }
3869 
3870     int need_gf_len = 1;
3871 #if CONFIG_THREE_PASS
3872     if (cpi->third_pass_ctx && oxcf->pass == AOM_RC_THIRD_PASS) {
3873       // set up bitstream to read
3874       if (!cpi->third_pass_ctx->input_file_name && oxcf->two_pass_output) {
3875         cpi->third_pass_ctx->input_file_name = oxcf->two_pass_output;
3876       }
3877       av1_open_second_pass_log(cpi, 1);
3878       THIRD_PASS_GOP_INFO *gop_info = &cpi->third_pass_ctx->gop_info;
3879       // Read in GOP information from the second pass file.
3880       av1_read_second_pass_gop_info(cpi->second_pass_log_stream, gop_info,
3881                                     cpi->common.error);
3882 #if CONFIG_BITRATE_ACCURACY
3883       TPL_INFO *tpl_info;
3884       AOM_CHECK_MEM_ERROR(cpi->common.error, tpl_info,
3885                           aom_malloc(sizeof(*tpl_info)));
3886       av1_read_tpl_info(tpl_info, cpi->second_pass_log_stream,
3887                         cpi->common.error);
3888       aom_free(tpl_info);
3889 #if CONFIG_THREE_PASS
3890       // TODO(angiebird): Put this part into a func
3891       cpi->vbr_rc_info.cur_gop_idx++;
3892 #endif  // CONFIG_THREE_PASS
3893 #endif  // CONFIG_BITRATE_ACCURACY
3894       // Read in third_pass_info from the bitstream.
3895       av1_set_gop_third_pass(cpi->third_pass_ctx);
3896       // Read in per-frame info from second-pass encoding
3897       av1_read_second_pass_per_frame_info(
3898           cpi->second_pass_log_stream, cpi->third_pass_ctx->frame_info,
3899           gop_info->num_frames, cpi->common.error);
3900 
3901       p_rc->cur_gf_index = 0;
3902       p_rc->gf_intervals[0] = cpi->third_pass_ctx->gop_info.gf_length;
3903       need_gf_len = 0;
3904     }
3905 #endif  // CONFIG_THREE_PASS
3906 
3907     if (need_gf_len) {
3908       // If we cannot obtain GF group length from second_pass_file
3909       // TODO(jingning): Resolve the redundant calls here.
3910       if (rc->intervals_till_gf_calculate_due == 0 || 1) {
3911         calculate_gf_length(cpi, max_gop_length, MAX_NUM_GF_INTERVALS);
3912       }
3913 
3914       if (max_gop_length > 16 && oxcf->algo_cfg.enable_tpl_model &&
3915           oxcf->gf_cfg.lag_in_frames >= 32 &&
3916           cpi->sf.tpl_sf.gop_length_decision_method != 3) {
3917         int this_idx = rc->frames_since_key +
3918                        p_rc->gf_intervals[p_rc->cur_gf_index] -
3919                        p_rc->regions_offset - 1;
3920         int this_region =
3921             find_regions_index(p_rc->regions, p_rc->num_regions, this_idx);
3922         int next_region =
3923             find_regions_index(p_rc->regions, p_rc->num_regions, this_idx + 1);
3924         // TODO(angiebird): Figure out why this_region and next_region are -1 in
3925         // unit test like AltRefFramePresenceTestLarge (aomedia:3134)
3926         int is_last_scenecut =
3927             p_rc->gf_intervals[p_rc->cur_gf_index] >= rc->frames_to_key ||
3928             (this_region != -1 &&
3929              p_rc->regions[this_region].type == SCENECUT_REGION) ||
3930             (next_region != -1 &&
3931              p_rc->regions[next_region].type == SCENECUT_REGION);
3932 
3933         int ori_gf_int = p_rc->gf_intervals[p_rc->cur_gf_index];
3934 
3935         if (p_rc->gf_intervals[p_rc->cur_gf_index] > 16 &&
3936             rc->min_gf_interval <= 16) {
3937           // The calculate_gf_length function is previously used with
3938           // max_gop_length = 32 with look-ahead gf intervals.
3939           define_gf_group(cpi, frame_params, 0);
3940           av1_tf_info_filtering(&cpi->ppi->tf_info, cpi, gf_group);
3941           this_frame = this_frame_copy;
3942 
3943           if (is_shorter_gf_interval_better(cpi, frame_params)) {
3944             // A shorter gf interval is better.
3945             // TODO(jingning): Remove redundant computations here.
3946             max_gop_length = 16;
3947             calculate_gf_length(cpi, max_gop_length, 1);
3948             if (is_last_scenecut &&
3949                 (ori_gf_int - p_rc->gf_intervals[p_rc->cur_gf_index] < 4)) {
3950               p_rc->gf_intervals[p_rc->cur_gf_index] = ori_gf_int;
3951             }
3952           }
3953         }
3954       }
3955     }
3956 
3957     define_gf_group(cpi, frame_params, 0);
3958 
3959     if (gf_group->update_type[cpi->gf_frame_index] != ARF_UPDATE &&
3960         rc->frames_since_key > 0)
3961       process_first_pass_stats(cpi, &this_frame);
3962 
3963     define_gf_group(cpi, frame_params, 1);
3964 
3965 #if CONFIG_THREE_PASS
3966     // write gop info if needed for third pass. Per-frame info is written after
3967     // each frame is encoded.
3968     av1_write_second_pass_gop_info(cpi);
3969 #endif  // CONFIG_THREE_PASS
3970 
3971     av1_tf_info_filtering(&cpi->ppi->tf_info, cpi, gf_group);
3972 
3973     rc->frames_till_gf_update_due = p_rc->baseline_gf_interval;
3974     assert(cpi->gf_frame_index == 0);
3975 #if ARF_STATS_OUTPUT
3976     {
3977       FILE *fpfile;
3978       fpfile = fopen("arf.stt", "a");
3979       ++arf_count;
3980       fprintf(fpfile, "%10d %10d %10d %10d %10d\n",
3981               cpi->common.current_frame.frame_number,
3982               rc->frames_till_gf_update_due, cpi->ppi->p_rc.kf_boost, arf_count,
3983               p_rc->gfu_boost);
3984 
3985       fclose(fpfile);
3986     }
3987 #endif
3988   }
3989   assert(cpi->gf_frame_index < gf_group->size);
3990 
3991   if (gf_group->update_type[cpi->gf_frame_index] == ARF_UPDATE ||
3992       gf_group->update_type[cpi->gf_frame_index] == INTNL_ARF_UPDATE) {
3993     reset_fpf_position(&cpi->twopass_frame, start_pos);
3994 
3995     const FIRSTPASS_STATS *const this_frame_ptr =
3996         read_frame_stats(twopass, &cpi->twopass_frame,
3997                          gf_group->arf_src_offset[cpi->gf_frame_index]);
3998     set_twopass_params_based_on_fp_stats(cpi, this_frame_ptr);
3999   } else {
4000     // Back up this frame's stats for updating total stats during post encode.
4001     cpi->twopass_frame.this_frame = update_total_stats ? start_pos : NULL;
4002   }
4003 
4004   frame_params->frame_type = gf_group->frame_type[cpi->gf_frame_index];
4005   setup_target_rate(cpi);
4006 }
4007 
av1_init_second_pass(AV1_COMP * cpi)4008 void av1_init_second_pass(AV1_COMP *cpi) {
4009   const AV1EncoderConfig *const oxcf = &cpi->oxcf;
4010   TWO_PASS *const twopass = &cpi->ppi->twopass;
4011   FRAME_INFO *const frame_info = &cpi->frame_info;
4012   double frame_rate;
4013   FIRSTPASS_STATS *stats;
4014 
4015   if (!twopass->stats_buf_ctx->stats_in_end) return;
4016 
4017   av1_mark_flashes(twopass->stats_buf_ctx->stats_in_start,
4018                    twopass->stats_buf_ctx->stats_in_end);
4019   av1_estimate_noise(twopass->stats_buf_ctx->stats_in_start,
4020                      twopass->stats_buf_ctx->stats_in_end, cpi->common.error);
4021   av1_estimate_coeff(twopass->stats_buf_ctx->stats_in_start,
4022                      twopass->stats_buf_ctx->stats_in_end);
4023 
4024   stats = twopass->stats_buf_ctx->total_stats;
4025 
4026   *stats = *twopass->stats_buf_ctx->stats_in_end;
4027   *twopass->stats_buf_ctx->total_left_stats = *stats;
4028 
4029   frame_rate = 10000000.0 * stats->count / stats->duration;
4030   // Each frame can have a different duration, as the frame rate in the source
4031   // isn't guaranteed to be constant. The frame rate prior to the first frame
4032   // encoded in the second pass is a guess. However, the sum duration is not.
4033   // It is calculated based on the actual durations of all frames from the
4034   // first pass.
4035   av1_new_framerate(cpi, frame_rate);
4036   twopass->bits_left =
4037       (int64_t)(stats->duration * oxcf->rc_cfg.target_bandwidth / 10000000.0);
4038 
4039 #if CONFIG_BITRATE_ACCURACY
4040   av1_vbr_rc_init(&cpi->vbr_rc_info, twopass->bits_left,
4041                   (int)round(stats->count));
4042 #endif
4043 
4044 #if CONFIG_RATECTRL_LOG
4045   rc_log_init(&cpi->rc_log);
4046 #endif
4047 
4048   // This variable monitors how far behind the second ref update is lagging.
4049   twopass->sr_update_lag = 1;
4050 
4051   // Scan the first pass file and calculate a modified total error based upon
4052   // the bias/power function used to allocate bits.
4053   {
4054     const double avg_error =
4055         stats->coded_error / DOUBLE_DIVIDE_CHECK(stats->count);
4056     const FIRSTPASS_STATS *s = cpi->twopass_frame.stats_in;
4057     double modified_error_total = 0.0;
4058     twopass->modified_error_min =
4059         (avg_error * oxcf->rc_cfg.vbrmin_section) / 100;
4060     twopass->modified_error_max =
4061         (avg_error * oxcf->rc_cfg.vbrmax_section) / 100;
4062     while (s < twopass->stats_buf_ctx->stats_in_end) {
4063       modified_error_total +=
4064           calculate_modified_err(frame_info, twopass, oxcf, s);
4065       ++s;
4066     }
4067     twopass->modified_error_left = modified_error_total;
4068   }
4069 
4070   // Reset the vbr bits off target counters
4071   cpi->ppi->p_rc.vbr_bits_off_target = 0;
4072   cpi->ppi->p_rc.vbr_bits_off_target_fast = 0;
4073 
4074   cpi->ppi->p_rc.rate_error_estimate = 0;
4075 
4076   // Static sequence monitor variables.
4077   twopass->kf_zeromotion_pct = 100;
4078   twopass->last_kfgroup_zeromotion_pct = 100;
4079 
4080   // Initialize bits per macro_block estimate correction factor.
4081   twopass->bpm_factor = 1.0;
4082   // Initialize actual and target bits counters for ARF groups so that
4083   // at the start we have a neutral bpm adjustment.
4084   twopass->rolling_arf_group_target_bits = 1;
4085   twopass->rolling_arf_group_actual_bits = 1;
4086 }
4087 
av1_init_single_pass_lap(AV1_COMP * cpi)4088 void av1_init_single_pass_lap(AV1_COMP *cpi) {
4089   TWO_PASS *const twopass = &cpi->ppi->twopass;
4090 
4091   if (!twopass->stats_buf_ctx->stats_in_end) return;
4092 
4093   // This variable monitors how far behind the second ref update is lagging.
4094   twopass->sr_update_lag = 1;
4095 
4096   twopass->bits_left = 0;
4097   twopass->modified_error_min = 0.0;
4098   twopass->modified_error_max = 0.0;
4099   twopass->modified_error_left = 0.0;
4100 
4101   // Reset the vbr bits off target counters
4102   cpi->ppi->p_rc.vbr_bits_off_target = 0;
4103   cpi->ppi->p_rc.vbr_bits_off_target_fast = 0;
4104 
4105   cpi->ppi->p_rc.rate_error_estimate = 0;
4106 
4107   // Static sequence monitor variables.
4108   twopass->kf_zeromotion_pct = 100;
4109   twopass->last_kfgroup_zeromotion_pct = 100;
4110 
4111   // Initialize bits per macro_block estimate correction factor.
4112   twopass->bpm_factor = 1.0;
4113   // Initialize actual and target bits counters for ARF groups so that
4114   // at the start we have a neutral bpm adjustment.
4115   twopass->rolling_arf_group_target_bits = 1;
4116   twopass->rolling_arf_group_actual_bits = 1;
4117 }
4118 
4119 #define MINQ_ADJ_LIMIT 48
4120 #define MINQ_ADJ_LIMIT_CQ 20
4121 #define HIGH_UNDERSHOOT_RATIO 2
av1_twopass_postencode_update(AV1_COMP * cpi)4122 void av1_twopass_postencode_update(AV1_COMP *cpi) {
4123   TWO_PASS *const twopass = &cpi->ppi->twopass;
4124   RATE_CONTROL *const rc = &cpi->rc;
4125   PRIMARY_RATE_CONTROL *const p_rc = &cpi->ppi->p_rc;
4126   const RateControlCfg *const rc_cfg = &cpi->oxcf.rc_cfg;
4127 
4128   // Increment the stats_in pointer.
4129   if (is_stat_consumption_stage(cpi) &&
4130       !(cpi->use_ducky_encode && cpi->ducky_encode_info.frame_info.gop_mode ==
4131                                      DUCKY_ENCODE_GOP_MODE_RCL) &&
4132       (cpi->gf_frame_index < cpi->ppi->gf_group.size ||
4133        rc->frames_to_key == 0)) {
4134     const int update_type = cpi->ppi->gf_group.update_type[cpi->gf_frame_index];
4135     if (update_type != ARF_UPDATE && update_type != INTNL_ARF_UPDATE) {
4136       FIRSTPASS_STATS this_frame;
4137       assert(cpi->twopass_frame.stats_in >
4138              twopass->stats_buf_ctx->stats_in_start);
4139       --cpi->twopass_frame.stats_in;
4140       if (cpi->ppi->lap_enabled) {
4141         input_stats_lap(twopass, &cpi->twopass_frame, &this_frame);
4142       } else {
4143         input_stats(twopass, &cpi->twopass_frame, &this_frame);
4144       }
4145     } else if (cpi->ppi->lap_enabled) {
4146       cpi->twopass_frame.stats_in = twopass->stats_buf_ctx->stats_in_start;
4147     }
4148   }
4149 
4150   // VBR correction is done through rc->vbr_bits_off_target. Based on the
4151   // sign of this value, a limited % adjustment is made to the target rate
4152   // of subsequent frames, to try and push it back towards 0. This method
4153   // is designed to prevent extreme behaviour at the end of a clip
4154   // or group of frames.
4155   p_rc->vbr_bits_off_target += rc->base_frame_target - rc->projected_frame_size;
4156   twopass->bits_left = AOMMAX(twopass->bits_left - rc->base_frame_target, 0);
4157 
4158   if (cpi->do_update_vbr_bits_off_target_fast) {
4159     // Subtract current frame's fast_extra_bits.
4160     p_rc->vbr_bits_off_target_fast -= rc->frame_level_fast_extra_bits;
4161     rc->frame_level_fast_extra_bits = 0;
4162   }
4163 
4164   // Target vs actual bits for this arf group.
4165   if (twopass->rolling_arf_group_target_bits >
4166       INT_MAX - rc->base_frame_target) {
4167     twopass->rolling_arf_group_target_bits = INT_MAX;
4168   } else {
4169     twopass->rolling_arf_group_target_bits += rc->base_frame_target;
4170   }
4171   twopass->rolling_arf_group_actual_bits += rc->projected_frame_size;
4172 
4173   // Calculate the pct rc error.
4174   if (p_rc->total_actual_bits) {
4175     p_rc->rate_error_estimate =
4176         (int)((p_rc->vbr_bits_off_target * 100) / p_rc->total_actual_bits);
4177     p_rc->rate_error_estimate = clamp(p_rc->rate_error_estimate, -100, 100);
4178   } else {
4179     p_rc->rate_error_estimate = 0;
4180   }
4181 
4182 #if CONFIG_FPMT_TEST
4183   /* The variables temp_vbr_bits_off_target, temp_bits_left,
4184    * temp_rolling_arf_group_target_bits, temp_rolling_arf_group_actual_bits
4185    * temp_rate_error_estimate are introduced for quality simulation purpose,
4186    * it retains the value previous to the parallel encode frames. The
4187    * variables are updated based on the update flag.
4188    *
4189    * If there exist show_existing_frames between parallel frames, then to
4190    * retain the temp state do not update it. */
4191   const int simulate_parallel_frame =
4192       cpi->ppi->fpmt_unit_test_cfg == PARALLEL_SIMULATION_ENCODE;
4193   int show_existing_between_parallel_frames =
4194       (cpi->ppi->gf_group.update_type[cpi->gf_frame_index] ==
4195            INTNL_OVERLAY_UPDATE &&
4196        cpi->ppi->gf_group.frame_parallel_level[cpi->gf_frame_index + 1] == 2);
4197 
4198   if (cpi->do_frame_data_update && !show_existing_between_parallel_frames &&
4199       simulate_parallel_frame) {
4200     cpi->ppi->p_rc.temp_vbr_bits_off_target = p_rc->vbr_bits_off_target;
4201     cpi->ppi->p_rc.temp_bits_left = twopass->bits_left;
4202     cpi->ppi->p_rc.temp_rolling_arf_group_target_bits =
4203         twopass->rolling_arf_group_target_bits;
4204     cpi->ppi->p_rc.temp_rolling_arf_group_actual_bits =
4205         twopass->rolling_arf_group_actual_bits;
4206     cpi->ppi->p_rc.temp_rate_error_estimate = p_rc->rate_error_estimate;
4207   }
4208 #endif
4209   // Update the active best quality pyramid.
4210   if (!rc->is_src_frame_alt_ref) {
4211     const int pyramid_level =
4212         cpi->ppi->gf_group.layer_depth[cpi->gf_frame_index];
4213     int i;
4214     for (i = pyramid_level; i <= MAX_ARF_LAYERS; ++i) {
4215       p_rc->active_best_quality[i] = cpi->common.quant_params.base_qindex;
4216 #if CONFIG_TUNE_VMAF
4217       if (cpi->vmaf_info.original_qindex != -1 &&
4218           (cpi->oxcf.tune_cfg.tuning >= AOM_TUNE_VMAF_WITH_PREPROCESSING &&
4219            cpi->oxcf.tune_cfg.tuning <= AOM_TUNE_VMAF_NEG_MAX_GAIN)) {
4220         p_rc->active_best_quality[i] = cpi->vmaf_info.original_qindex;
4221       }
4222 #endif
4223     }
4224   }
4225 
4226 #if 0
4227   {
4228     AV1_COMMON *cm = &cpi->common;
4229     FILE *fpfile;
4230     fpfile = fopen("details.stt", "a");
4231     fprintf(fpfile,
4232             "%10d %10d %10d %10" PRId64 " %10" PRId64
4233             " %10d %10d %10d %10.4lf %10.4lf %10.4lf %10.4lf\n",
4234             cm->current_frame.frame_number, rc->base_frame_target,
4235             rc->projected_frame_size, rc->total_actual_bits,
4236             rc->vbr_bits_off_target, p_rc->rate_error_estimate,
4237             twopass->rolling_arf_group_target_bits,
4238             twopass->rolling_arf_group_actual_bits,
4239             (double)twopass->rolling_arf_group_actual_bits /
4240                 (double)twopass->rolling_arf_group_target_bits,
4241             twopass->bpm_factor,
4242             av1_convert_qindex_to_q(cpi->common.quant_params.base_qindex,
4243                                     cm->seq_params->bit_depth),
4244             av1_convert_qindex_to_q(rc->active_worst_quality,
4245                                     cm->seq_params->bit_depth));
4246     fclose(fpfile);
4247   }
4248 #endif
4249 
4250   if (cpi->common.current_frame.frame_type != KEY_FRAME) {
4251     twopass->kf_group_bits -= rc->base_frame_target;
4252     twopass->last_kfgroup_zeromotion_pct = twopass->kf_zeromotion_pct;
4253   }
4254   twopass->kf_group_bits = AOMMAX(twopass->kf_group_bits, 0);
4255 
4256   // If the rate control is drifting consider adjustment to min or maxq.
4257   if ((rc_cfg->mode != AOM_Q) && !cpi->rc.is_src_frame_alt_ref &&
4258       (p_rc->rolling_target_bits > 0)) {
4259     int minq_adj_limit;
4260     int maxq_adj_limit;
4261     minq_adj_limit =
4262         (rc_cfg->mode == AOM_CQ ? MINQ_ADJ_LIMIT_CQ : MINQ_ADJ_LIMIT);
4263     maxq_adj_limit = (rc->worst_quality - rc->active_worst_quality);
4264 
4265     // Undershoot
4266     if ((rc_cfg->under_shoot_pct < 100) &&
4267         (p_rc->rolling_actual_bits < p_rc->rolling_target_bits)) {
4268       int pct_error =
4269           ((p_rc->rolling_target_bits - p_rc->rolling_actual_bits) * 100) /
4270           p_rc->rolling_target_bits;
4271 
4272       if ((pct_error >= rc_cfg->under_shoot_pct) &&
4273           (p_rc->rate_error_estimate > 0)) {
4274         twopass->extend_minq += 1;
4275         twopass->extend_maxq -= 1;
4276       }
4277 
4278       // Overshoot
4279     } else if ((rc_cfg->over_shoot_pct < 100) &&
4280                (p_rc->rolling_actual_bits > p_rc->rolling_target_bits)) {
4281       int pct_error =
4282           ((p_rc->rolling_actual_bits - p_rc->rolling_target_bits) * 100) /
4283           p_rc->rolling_target_bits;
4284 
4285       pct_error = clamp(pct_error, 0, 100);
4286       if ((pct_error >= rc_cfg->over_shoot_pct) &&
4287           (p_rc->rate_error_estimate < 0)) {
4288         twopass->extend_maxq += 1;
4289         twopass->extend_minq -= 1;
4290       }
4291     }
4292     twopass->extend_minq =
4293         clamp(twopass->extend_minq, -minq_adj_limit, minq_adj_limit);
4294     twopass->extend_maxq = clamp(twopass->extend_maxq, 0, maxq_adj_limit);
4295 
4296     // If there is a big and undexpected undershoot then feed the extra
4297     // bits back in quickly. One situation where this may happen is if a
4298     // frame is unexpectedly almost perfectly predicted by the ARF or GF
4299     // but not very well predcited by the previous frame.
4300     if (!frame_is_kf_gf_arf(cpi) && !cpi->rc.is_src_frame_alt_ref) {
4301       int fast_extra_thresh = rc->base_frame_target / HIGH_UNDERSHOOT_RATIO;
4302       if (rc->projected_frame_size < fast_extra_thresh) {
4303         p_rc->vbr_bits_off_target_fast +=
4304             fast_extra_thresh - rc->projected_frame_size;
4305         p_rc->vbr_bits_off_target_fast =
4306             AOMMIN(p_rc->vbr_bits_off_target_fast,
4307                    (4 * (int64_t)rc->avg_frame_bandwidth));
4308       }
4309     }
4310 
4311 #if CONFIG_FPMT_TEST
4312     if (cpi->do_frame_data_update && !show_existing_between_parallel_frames &&
4313         simulate_parallel_frame) {
4314       cpi->ppi->p_rc.temp_vbr_bits_off_target_fast =
4315           p_rc->vbr_bits_off_target_fast;
4316       cpi->ppi->p_rc.temp_extend_minq = twopass->extend_minq;
4317       cpi->ppi->p_rc.temp_extend_maxq = twopass->extend_maxq;
4318     }
4319 #endif
4320   }
4321 
4322   // Update the frame probabilities obtained from parallel encode frames
4323   FrameProbInfo *const frame_probs = &cpi->ppi->frame_probs;
4324 #if CONFIG_FPMT_TEST
4325   /* The variable temp_active_best_quality is introduced only for quality
4326    * simulation purpose, it retains the value previous to the parallel
4327    * encode frames. The variable is updated based on the update flag.
4328    *
4329    * If there exist show_existing_frames between parallel frames, then to
4330    * retain the temp state do not update it. */
4331   if (cpi->do_frame_data_update && !show_existing_between_parallel_frames &&
4332       simulate_parallel_frame) {
4333     int i;
4334     const int pyramid_level =
4335         cpi->ppi->gf_group.layer_depth[cpi->gf_frame_index];
4336     if (!rc->is_src_frame_alt_ref) {
4337       for (i = pyramid_level; i <= MAX_ARF_LAYERS; ++i)
4338         cpi->ppi->p_rc.temp_active_best_quality[i] =
4339             p_rc->active_best_quality[i];
4340     }
4341   }
4342 
4343   // Update the frame probabilities obtained from parallel encode frames
4344   FrameProbInfo *const temp_frame_probs_simulation =
4345       simulate_parallel_frame ? &cpi->ppi->temp_frame_probs_simulation
4346                               : frame_probs;
4347   FrameProbInfo *const temp_frame_probs =
4348       simulate_parallel_frame ? &cpi->ppi->temp_frame_probs : NULL;
4349 #endif
4350   int i, j, loop;
4351   // Sequentially do average on temp_frame_probs_simulation which holds
4352   // probabilities of last frame before parallel encode
4353   for (loop = 0; loop <= cpi->num_frame_recode; loop++) {
4354     // Sequentially update tx_type_probs
4355     if (cpi->do_update_frame_probs_txtype[loop] &&
4356         (cpi->ppi->gf_group.frame_parallel_level[cpi->gf_frame_index] > 0)) {
4357       const FRAME_UPDATE_TYPE update_type =
4358           get_frame_update_type(&cpi->ppi->gf_group, cpi->gf_frame_index);
4359       for (i = 0; i < TX_SIZES_ALL; i++) {
4360         int left = 1024;
4361 
4362         for (j = TX_TYPES - 1; j >= 0; j--) {
4363           const int new_prob =
4364               cpi->frame_new_probs[loop].tx_type_probs[update_type][i][j];
4365 #if CONFIG_FPMT_TEST
4366           int prob =
4367               (temp_frame_probs_simulation->tx_type_probs[update_type][i][j] +
4368                new_prob) >>
4369               1;
4370           left -= prob;
4371           if (j == 0) prob += left;
4372           temp_frame_probs_simulation->tx_type_probs[update_type][i][j] = prob;
4373 #else
4374           int prob =
4375               (frame_probs->tx_type_probs[update_type][i][j] + new_prob) >> 1;
4376           left -= prob;
4377           if (j == 0) prob += left;
4378           frame_probs->tx_type_probs[update_type][i][j] = prob;
4379 #endif
4380         }
4381       }
4382     }
4383 
4384     // Sequentially update obmc_probs
4385     if (cpi->do_update_frame_probs_obmc[loop] &&
4386         cpi->ppi->gf_group.frame_parallel_level[cpi->gf_frame_index] > 0) {
4387       const FRAME_UPDATE_TYPE update_type =
4388           get_frame_update_type(&cpi->ppi->gf_group, cpi->gf_frame_index);
4389 
4390       for (i = 0; i < BLOCK_SIZES_ALL; i++) {
4391         const int new_prob =
4392             cpi->frame_new_probs[loop].obmc_probs[update_type][i];
4393 #if CONFIG_FPMT_TEST
4394         temp_frame_probs_simulation->obmc_probs[update_type][i] =
4395             (temp_frame_probs_simulation->obmc_probs[update_type][i] +
4396              new_prob) >>
4397             1;
4398 #else
4399         frame_probs->obmc_probs[update_type][i] =
4400             (frame_probs->obmc_probs[update_type][i] + new_prob) >> 1;
4401 #endif
4402       }
4403     }
4404 
4405     // Sequentially update warped_probs
4406     if (cpi->do_update_frame_probs_warp[loop] &&
4407         cpi->ppi->gf_group.frame_parallel_level[cpi->gf_frame_index] > 0) {
4408       const FRAME_UPDATE_TYPE update_type =
4409           get_frame_update_type(&cpi->ppi->gf_group, cpi->gf_frame_index);
4410       const int new_prob = cpi->frame_new_probs[loop].warped_probs[update_type];
4411 #if CONFIG_FPMT_TEST
4412       temp_frame_probs_simulation->warped_probs[update_type] =
4413           (temp_frame_probs_simulation->warped_probs[update_type] + new_prob) >>
4414           1;
4415 #else
4416       frame_probs->warped_probs[update_type] =
4417           (frame_probs->warped_probs[update_type] + new_prob) >> 1;
4418 #endif
4419     }
4420 
4421     // Sequentially update switchable_interp_probs
4422     if (cpi->do_update_frame_probs_interpfilter[loop] &&
4423         cpi->ppi->gf_group.frame_parallel_level[cpi->gf_frame_index] > 0) {
4424       const FRAME_UPDATE_TYPE update_type =
4425           get_frame_update_type(&cpi->ppi->gf_group, cpi->gf_frame_index);
4426 
4427       for (i = 0; i < SWITCHABLE_FILTER_CONTEXTS; i++) {
4428         int left = 1536;
4429 
4430         for (j = SWITCHABLE_FILTERS - 1; j >= 0; j--) {
4431           const int new_prob = cpi->frame_new_probs[loop]
4432                                    .switchable_interp_probs[update_type][i][j];
4433 #if CONFIG_FPMT_TEST
4434           int prob = (temp_frame_probs_simulation
4435                           ->switchable_interp_probs[update_type][i][j] +
4436                       new_prob) >>
4437                      1;
4438           left -= prob;
4439           if (j == 0) prob += left;
4440 
4441           temp_frame_probs_simulation
4442               ->switchable_interp_probs[update_type][i][j] = prob;
4443 #else
4444           int prob = (frame_probs->switchable_interp_probs[update_type][i][j] +
4445                       new_prob) >>
4446                      1;
4447           left -= prob;
4448           if (j == 0) prob += left;
4449           frame_probs->switchable_interp_probs[update_type][i][j] = prob;
4450 #endif
4451         }
4452       }
4453     }
4454   }
4455 
4456 #if CONFIG_FPMT_TEST
4457   // Copying temp_frame_probs_simulation to temp_frame_probs based on
4458   // the flag
4459   if (cpi->do_frame_data_update &&
4460       cpi->ppi->gf_group.frame_parallel_level[cpi->gf_frame_index] > 0 &&
4461       simulate_parallel_frame) {
4462     for (int update_type_idx = 0; update_type_idx < FRAME_UPDATE_TYPES;
4463          update_type_idx++) {
4464       for (i = 0; i < BLOCK_SIZES_ALL; i++) {
4465         temp_frame_probs->obmc_probs[update_type_idx][i] =
4466             temp_frame_probs_simulation->obmc_probs[update_type_idx][i];
4467       }
4468       temp_frame_probs->warped_probs[update_type_idx] =
4469           temp_frame_probs_simulation->warped_probs[update_type_idx];
4470       for (i = 0; i < TX_SIZES_ALL; i++) {
4471         for (j = 0; j < TX_TYPES; j++) {
4472           temp_frame_probs->tx_type_probs[update_type_idx][i][j] =
4473               temp_frame_probs_simulation->tx_type_probs[update_type_idx][i][j];
4474         }
4475       }
4476       for (i = 0; i < SWITCHABLE_FILTER_CONTEXTS; i++) {
4477         for (j = 0; j < SWITCHABLE_FILTERS; j++) {
4478           temp_frame_probs->switchable_interp_probs[update_type_idx][i][j] =
4479               temp_frame_probs_simulation
4480                   ->switchable_interp_probs[update_type_idx][i][j];
4481         }
4482       }
4483     }
4484   }
4485 #endif
4486   // Update framerate obtained from parallel encode frames
4487   if (cpi->common.show_frame &&
4488       cpi->ppi->gf_group.frame_parallel_level[cpi->gf_frame_index] > 0)
4489     cpi->framerate = cpi->new_framerate;
4490 #if CONFIG_FPMT_TEST
4491   // SIMULATION PURPOSE
4492   int show_existing_between_parallel_frames_cndn =
4493       (cpi->ppi->gf_group.update_type[cpi->gf_frame_index] ==
4494            INTNL_OVERLAY_UPDATE &&
4495        cpi->ppi->gf_group.frame_parallel_level[cpi->gf_frame_index + 1] == 2);
4496   if (cpi->common.show_frame && !show_existing_between_parallel_frames_cndn &&
4497       cpi->do_frame_data_update && simulate_parallel_frame)
4498     cpi->temp_framerate = cpi->framerate;
4499 #endif
4500 }
4501