/* * Copyright (c) 2010 The WebM project authors. All Rights Reserved. * * Use of this source code is governed by a BSD-style license * that can be found in the LICENSE file in the root of the source * tree. An additional intellectual property rights grant can be found * in the file PATENTS. All contributing project authors may * be found in the AUTHORS file in the root of the source tree.
*/
// Whether to use high precision mv for altref computation. #define ALTREF_HIGH_PRECISION_MV 1
// Q threshold for high precision mv. Choose a very high value for now so that // HIGH_PRECISION is always chosen. #define HIGH_PRECISION_MV_QTHRESH 200
#define FRAME_SIZE_FACTOR 128 // empirical params for context model threshold #define FRAME_RATE_FACTOR 8
// This equation makes the threshold adaptive to frame size. // Coding gain obtained by recoding comes from alternate frames of large // content change. We skip recoding if the difference of previous and current // frame context probability model is less than a certain threshold. // The first component is the most critical part to guarantee adaptivity. // Other parameters are estimated based on normal setting of hd resolution // parameters. e.g. frame_size = 1920x1080, bitrate = 8000, qindex_factor < 50 constint thresh =
((FRAME_SIZE_FACTOR * frame_size - FRAME_RATE_FACTOR * bitrate) *
qindex_factor) >>
9;
return thresh;
}
// compute the total cost difference between current // and previous frame context prob model. staticint compute_context_model_diff(const VP9_COMMON *const cm) { const FRAME_CONTEXT *const pre_fc =
&cm->frame_contexts[cm->frame_context_idx]; const FRAME_CONTEXT *const cur_fc = cm->fc; const FRAME_COUNTS *counts = &cm->counts;
vpx_prob pre_last_prob, cur_last_prob; int diff = 0; int i, j, k, l, m, n;
// Test for whether to calculate metrics for the frame. staticint is_psnr_calc_enabled(const VP9_COMP *cpi) { const VP9_COMMON *const cm = &cpi->common; const VP9EncoderConfig *const oxcf = &cpi->oxcf;
staticconstchar *level_fail_messages[TARGET_LEVEL_FAIL_IDS] = { "The average bit-rate is too high.", "The picture size is too large.", "The picture width/height is too large.", "The luma sample rate is too large.", "The CPB size is too large.", "The compression ratio is too small", "Too many column tiles are used.", "The alt-ref distance is too small.", "Too many reference buffers are used."
};
// Mark all inactive blocks as active. Other segmentation features may be set // so memset cannot be used, instead only inactive blocks should be reset. staticvoid suppress_active_map(VP9_COMP *cpi) { unsignedchar *const seg_map = cpi->segmentation_map;
if (cpi->active_map.enabled || cpi->active_map.update) { constint rows = cpi->common.mi_rows; constint cols = cpi->common.mi_cols; int i;
for (i = 0; i < rows * cols; ++i) if (seg_map[i] == AM_SEGMENT_ID_INACTIVE)
seg_map[i] = AM_SEGMENT_ID_ACTIVE;
}
}
if (frame_is_intra_only(&cpi->common)) {
cpi->active_map.enabled = 0;
cpi->active_map.update = 1;
}
if (cpi->active_map.update) { if (cpi->active_map.enabled) { for (i = 0; i < cpi->common.mi_rows * cpi->common.mi_cols; ++i) if (seg_map[i] == AM_SEGMENT_ID_ACTIVE) seg_map[i] = active_map[i];
vp9_enable_segmentation(seg);
vp9_enable_segfeature(seg, AM_SEGMENT_ID_INACTIVE, SEG_LVL_SKIP);
vp9_enable_segfeature(seg, AM_SEGMENT_ID_INACTIVE, SEG_LVL_ALT_LF); // Setting the data to -MAX_LOOP_FILTER will result in the computed loop // filter level being zero regardless of the value of seg->abs_delta.
vp9_set_segdata(seg, AM_SEGMENT_ID_INACTIVE, SEG_LVL_ALT_LF,
-MAX_LOOP_FILTER);
} else {
vp9_disable_segfeature(seg, AM_SEGMENT_ID_INACTIVE, SEG_LVL_SKIP);
vp9_disable_segfeature(seg, AM_SEGMENT_ID_INACTIVE, SEG_LVL_ALT_LF); if (seg->enabled) {
seg->update_data = 1;
seg->update_map = 1;
}
}
cpi->active_map.update = 0;
}
}
staticvoid apply_roi_map(VP9_COMP *cpi) {
VP9_COMMON *cm = &cpi->common; struct segmentation *const seg = &cm->seg;
vpx_roi_map_t *roi = &cpi->roi; constint *delta_q = roi->delta_q; constint *delta_lf = roi->delta_lf; constint *skip = roi->skip; int ref_frame[8]; int internal_delta_q[MAX_SEGMENTS]; int i;
// TODO(jianj): Investigate why ROI not working in speed < 5 or in non // realtime mode. if (cpi->oxcf.mode != REALTIME || cpi->oxcf.speed < 5) return; if (!roi->enabled) return;
for (i = 0; i < MAX_SEGMENTS; ++i) { // Translate the external delta q values to internal values.
internal_delta_q[i] = vp9_quantizer_to_qindex(abs(delta_q[i])); if (delta_q[i] < 0) internal_delta_q[i] = -internal_delta_q[i];
vp9_disable_segfeature(seg, i, SEG_LVL_ALT_Q);
vp9_disable_segfeature(seg, i, SEG_LVL_ALT_LF); if (internal_delta_q[i] != 0) {
vp9_enable_segfeature(seg, i, SEG_LVL_ALT_Q);
vp9_set_segdata(seg, i, SEG_LVL_ALT_Q, internal_delta_q[i]);
} if (delta_lf[i] != 0) {
vp9_enable_segfeature(seg, i, SEG_LVL_ALT_LF);
vp9_set_segdata(seg, i, SEG_LVL_ALT_LF, delta_lf[i]);
} if (skip[i] != 0) {
vp9_enable_segfeature(seg, i, SEG_LVL_SKIP);
vp9_set_segdata(seg, i, SEG_LVL_SKIP, 0);
} if (ref_frame[i] >= 0) { int valid_ref = 1; // ALTREF is not used as reference for nonrd_pickmode with 0 lag. if (ref_frame[i] == ALTREF_FRAME && cpi->sf.use_nonrd_pick_mode)
valid_ref = 0; // If GOLDEN is selected, make sure it's set as reference. if (ref_frame[i] == GOLDEN_FRAME &&
!(cpi->ref_frame_flags & ref_frame_to_flag(ref_frame[i]))) {
valid_ref = 0;
} // GOLDEN was updated in previous encoded frame, so GOLDEN and LAST are // same reference. if (ref_frame[i] == GOLDEN_FRAME && cpi->rc.frames_since_golden == 0)
ref_frame[i] = LAST_FRAME; if (valid_ref) {
vp9_enable_segfeature(seg, i, SEG_LVL_REF_FRAME);
vp9_set_segdata(seg, i, SEG_LVL_REF_FRAME, ref_frame[i]);
}
}
}
roi->enabled = 1;
}
staticint check_seg_range(int seg_data[8], int range) { int i; for (i = 0; i < 8; ++i) { // Note abs() alone can't be used as the behavior of abs(INT_MIN) is // undefined. if (seg_data[i] > range || seg_data[i] < -range) { return 0;
}
} return 1;
}
staticvoid setup_frame(VP9_COMP *cpi) {
VP9_COMMON *const cm = &cpi->common; // Set up entropy context depending on frame type. The decoder mandates // the use of the default context, index 0, for keyframes and inter // frames where the error_resilient_mode or intra_only flag is set. For // other inter-frames the encoder currently uses only two contexts; // context 1 for ALTREF frames and context 0 for the others. if (frame_is_intra_only(cm) || cm->error_resilient_mode) {
vp9_setup_past_independence(cm);
} else { if (!cpi->use_svc) cm->frame_context_idx = cpi->refresh_alt_ref_frame;
}
// TODO(jingning): Overwrite the frame_context_idx index in multi-layer ARF // case. Need some further investigation on if we could apply this to single // layer ARF case as well. if (cpi->multi_layer_arf && !cpi->use_svc) {
GF_GROUP *const gf_group = &cpi->twopass.gf_group; constint gf_group_index = gf_group->index; constint boost_frame =
!cpi->rc.is_src_frame_alt_ref &&
(cpi->refresh_golden_frame || cpi->refresh_alt_ref_frame);
// frame_context_idx Frame Type // 0 Intra only frame, base layer ARF // 1 ARFs with layer depth = 2,3 // 2 ARFs with layer depth > 3 // 3 Non-boosted frames if (frame_is_intra_only(cm)) {
cm->frame_context_idx = 0;
} elseif (boost_frame) { if (gf_group->rf_level[gf_group_index] == GF_ARF_STD)
cm->frame_context_idx = 0; elseif (gf_group->layer_depth[gf_group_index] <= 3)
cm->frame_context_idx = 1; else
cm->frame_context_idx = 2;
} else {
cm->frame_context_idx = 3;
}
}
staticvoid vp9_swap_mi_and_prev_mi(VP9_COMMON *cm) { // Current mip will be the prev_mip for the next frame.
MODE_INFO **temp_base = cm->prev_mi_grid_base;
MODE_INFO *temp = cm->prev_mip;
// Skip update prev_mi frame in show_existing_frame mode. if (cm->show_existing_frame) return;
// Stores a snapshot of key state variables which can subsequently be // restored with a call to vp9_restore_coding_context. These functions are // intended for use in a re-code loop in vp9_compress_frame where the // quantizer value is adjusted between loop iterations.
vp9_copy(cc->nmvjointcost, cpi->td.mb.nmvjointcost);
// Restore key state variables to the snapshot state stored in the // previous call to vp9_save_coding_context.
vp9_copy(cpi->td.mb.nmvjointcost, cc->nmvjointcost);
int high_q = (int)(rc->avg_q > 48.0); int qi_delta;
// Disable and clear down for KF if (cm->frame_type == KEY_FRAME) { // Clear down the global segmentation map
memset(cpi->segmentation_map, 0, cm->mi_rows * cm->mi_cols);
seg->update_map = 0;
seg->update_data = 0;
cpi->static_mb_pct = 0;
// Clear down the segment features.
vp9_clearall_segfeatures(seg);
} elseif (cpi->refresh_alt_ref_frame) { // If this is an alt ref frame // Clear down the global segmentation map
memset(cpi->segmentation_map, 0, cm->mi_rows * cm->mi_cols);
seg->update_map = 0;
seg->update_data = 0;
cpi->static_mb_pct = 0;
// Disable segmentation and individual segment features by default
vp9_disable_segmentation(seg);
vp9_clearall_segfeatures(seg);
// Scan frames from current to arf frame. // This function re-enables segmentation if appropriate.
vp9_update_mbgraph_stats(cpi);
// If segmentation was enabled set those features needed for the // arf itself. if (seg->enabled) {
seg->update_map = 1;
seg->update_data = 1;
// Where relevant assume segment data is delta data
seg->abs_delta = SEGMENT_DELTADATA;
}
} elseif (seg->enabled) { // All other frames if segmentation has been enabled
// First normal frame in a valid gf or alt ref group if (rc->frames_since_golden == 0) { // Set up segment features for normal frames in an arf group if (rc->source_alt_ref_active) {
seg->update_map = 0;
seg->update_data = 1;
seg->abs_delta = SEGMENT_DELTADATA;
// Segment coding disabled for compred testing if (high_q || (cpi->static_mb_pct == 100)) {
vp9_set_segdata(seg, 1, SEG_LVL_REF_FRAME, ALTREF_FRAME);
vp9_enable_segfeature(seg, 1, SEG_LVL_REF_FRAME);
vp9_enable_segfeature(seg, 1, SEG_LVL_SKIP);
}
} else { // Disable segmentation and clear down features if alt ref // is not active for this group
vp9_clearall_segfeatures(seg);
}
} elseif (rc->is_src_frame_alt_ref) { // Special case where we are coding over the top of a previous // alt ref frame. // Segment coding disabled for compred testing
// Enable ref frame features for segment 0 as well
vp9_enable_segfeature(seg, 0, SEG_LVL_REF_FRAME);
vp9_enable_segfeature(seg, 1, SEG_LVL_REF_FRAME);
// All mbs should use ALTREF_FRAME
vp9_clear_segdata(seg, 0, SEG_LVL_REF_FRAME);
vp9_set_segdata(seg, 0, SEG_LVL_REF_FRAME, ALTREF_FRAME);
vp9_clear_segdata(seg, 1, SEG_LVL_REF_FRAME);
vp9_set_segdata(seg, 1, SEG_LVL_REF_FRAME, ALTREF_FRAME);
// Skip all MBs if high Q (0,0 mv and skip coeffs) if (high_q) {
vp9_enable_segfeature(seg, 0, SEG_LVL_SKIP);
vp9_enable_segfeature(seg, 1, SEG_LVL_SKIP);
} // Enable data update
seg->update_data = 1;
} else { // All other frames.
// No updates.. leave things as they are.
seg->update_map = 0;
seg->update_data = 0;
}
}
} #endif// !CONFIG_REALTIME_ONLY
staticvoid update_reference_segmentation_map(VP9_COMP *cpi) {
VP9_COMMON *const cm = &cpi->common;
MODE_INFO **mi_8x8_ptr = cm->mi_grid_visible;
uint8_t *cache_ptr = cm->last_frame_seg_map; int row, col;
// For 1 pass cbr: allocate scaled_frame that may be used as an intermediate // buffer for a 2 stage down-sampling: two stages of 1:2 down-sampling for a // target of 1/4x1/4. number_spatial_layers must be greater than 2. if (is_one_pass_svc(cpi) && !cpi->svc.scaled_temp_is_alloc &&
cpi->svc.number_spatial_layers > 2) {
cpi->svc.scaled_temp_is_alloc = 1; if (vpx_realloc_frame_buffer(
&cpi->svc.scaled_temp, cm->width >> 1, cm->height >> 1,
cm->subsampling_x, cm->subsampling_y, #if CONFIG_VP9_HIGHBITDEPTH
cm->use_highbitdepth, #endif
VP9_ENC_BORDER_IN_PIXELS, cm->byte_alignment, NULL, NULL, NULL))
vpx_internal_error(&cpi->common.error, VPX_CODEC_MEM_ERROR, "Failed to allocate scaled_frame for svc ");
}
if (vpx_realloc_frame_buffer(&cpi->scaled_last_source, cm->width, cm->height,
cm->subsampling_x, cm->subsampling_y, #if CONFIG_VP9_HIGHBITDEPTH
cm->use_highbitdepth, #endif
VP9_ENC_BORDER_IN_PIXELS, cm->byte_alignment,
NULL, NULL, NULL))
vpx_internal_error(&cm->error, VPX_CODEC_MEM_ERROR, "Failed to allocate scaled last source buffer"); #ifdef ENABLE_KF_DENOISE if (vpx_realloc_frame_buffer(&cpi->raw_unscaled_source, cm->width, cm->height,
cm->subsampling_x, cm->subsampling_y, #if CONFIG_VP9_HIGHBITDEPTH
cm->use_highbitdepth, #endif
VP9_ENC_BORDER_IN_PIXELS, cm->byte_alignment,
NULL, NULL, NULL))
vpx_internal_error(&cm->error, VPX_CODEC_MEM_ERROR, "Failed to allocate unscaled raw source frame buffer");
if (vpx_realloc_frame_buffer(&cpi->raw_scaled_source, cm->width, cm->height,
cm->subsampling_x, cm->subsampling_y, #if CONFIG_VP9_HIGHBITDEPTH
cm->use_highbitdepth, #endif
VP9_ENC_BORDER_IN_PIXELS, cm->byte_alignment,
NULL, NULL, NULL))
vpx_internal_error(&cm->error, VPX_CODEC_MEM_ERROR, "Failed to allocate scaled raw source frame buffer"); #endif
}
// Under a configuration change, where maximum_buffer_size may change, // keep buffer level clipped to the maximum allowed buffer size.
rc->bits_off_target = VPXMIN(rc->bits_off_target, rc->maximum_buffer_size);
rc->buffer_level = VPXMIN(rc->buffer_level, rc->maximum_buffer_size);
}
staticvoid realloc_segmentation_maps(VP9_COMP *cpi) {
VP9_COMMON *const cm = &cpi->common;
// Create the encoder segmentation map and set all entries to 0
vpx_free(cpi->segmentation_map);
CHECK_MEM_ERROR(&cm->error, cpi->segmentation_map,
vpx_calloc(cm->mi_rows * cm->mi_cols, 1));
// Create a map used for cyclic background refresh. if (cpi->cyclic_refresh) vp9_cyclic_refresh_free(cpi->cyclic_refresh);
CHECK_MEM_ERROR(&cm->error, cpi->cyclic_refresh,
vp9_cyclic_refresh_alloc(cm->mi_rows, cm->mi_cols));
// Create a map used to mark inactive areas.
vpx_free(cpi->active_map.map);
CHECK_MEM_ERROR(&cm->error, cpi->active_map.map,
vpx_calloc(cm->mi_rows * cm->mi_cols, 1));
// And a place holder structure is the coding context // for use if we want to save and restore it
vpx_free(cpi->coding_context.last_frame_seg_map_copy);
CHECK_MEM_ERROR(&cm->error, cpi->coding_context.last_frame_seg_map_copy,
vpx_calloc(cm->mi_rows * cm->mi_cols, 1));
}
/*********************************************************************** * Read before modifying 'cal_nmvjointsadcost' or 'cal_nmvsadcosts' * *********************************************************************** * The following 2 functions ('cal_nmvjointsadcost' and * * 'cal_nmvsadcosts') are used to calculate cost lookup tables * * used by 'vp9_diamond_search_sad'. The C implementation of the * * function is generic, but the NEON intrinsics optimised version * * relies on the following properties of the computed tables: * * For cal_nmvjointsadcost: * * - mvjointsadcost[1] == mvjointsadcost[2] == mvjointsadcost[3] * * For cal_nmvsadcosts: * * - For all i: mvsadcost[0][i] == mvsadcost[1][i] * * (Equal costs for both components) * * - For all i: mvsadcost[0][i] == mvsadcost[0][-i] * * (Cost function is even) * * If these do not hold, then the NEON optimised version of the * * 'vp9_diamond_search_sad' function cannot be used as it is, in which * * case you can revert to using the C function instead. *
***********************************************************************/
staticvoid cal_nmvjointsadcost(int *mvjointsadcost) { /********************************************************************* * Warning: Read the comments above before modifying this function *
*********************************************************************/
mvjointsadcost[0] = 600;
mvjointsadcost[1] = 300;
mvjointsadcost[2] = 300;
mvjointsadcost[3] = 300;
}
staticvoid cal_nmvsadcosts(int *mvsadcost[2]) { /********************************************************************* * Warning: Read the comments above before modifying this function *
*********************************************************************/ int i = 1;
// TODO(angiebird): Check whether we can move this function to vpx_image.c staticINLINEvoid vpx_img_chroma_subsampling(vpx_img_fmt_t fmt, unsignedint *subsampling_x, unsignedint *subsampling_y) { switch (fmt) { case VPX_IMG_FMT_I420: case VPX_IMG_FMT_YV12: case VPX_IMG_FMT_NV12: case VPX_IMG_FMT_I422: case VPX_IMG_FMT_I42016: case VPX_IMG_FMT_I42216: *subsampling_x = 1; break; default: *subsampling_x = 0; break;
}
switch (fmt) { case VPX_IMG_FMT_I420: case VPX_IMG_FMT_I440: case VPX_IMG_FMT_YV12: case VPX_IMG_FMT_NV12: case VPX_IMG_FMT_I42016: case VPX_IMG_FMT_I44016: *subsampling_y = 1; break; default: *subsampling_y = 0; break;
}
}
// TODO(angiebird): Check whether we can move this function to vpx_image.c staticINLINEint vpx_img_use_highbitdepth(vpx_img_fmt_t fmt) { return fmt & VPX_IMG_FMT_HIGHBITDEPTH;
}
/********************************************************************* * Warning: Read the comments around 'cal_nmvjointsadcost' and * * 'cal_nmvsadcosts' before modifying how these tables are computed. *
*********************************************************************/
cal_nmvjointsadcost(cpi->td.mb.nmvjointsadcost);
cpi->td.mb.nmvcost[0] = &cpi->nmvcosts[0][MV_MAX];
cpi->td.mb.nmvcost[1] = &cpi->nmvcosts[1][MV_MAX];
cpi->td.mb.nmvsadcost[0] = &cpi->nmvsadcosts[0][MV_MAX];
cpi->td.mb.nmvsadcost[1] = &cpi->nmvsadcosts[1][MV_MAX];
cal_nmvsadcosts(cpi->td.mb.nmvsadcost);
lc->rc_twopass_stats_in.sz = packets_in_layer * packet_sz;
CHECK_MEM_ERROR(&cm->error, lc->rc_twopass_stats_in.buf,
vpx_malloc(lc->rc_twopass_stats_in.sz));
lc->twopass.stats_in_start = lc->rc_twopass_stats_in.buf;
lc->twopass.stats_in = lc->twopass.stats_in_start;
lc->twopass.stats_in_end =
lc->twopass.stats_in_start + packets_in_layer - 1; // Note the last packet is cumulative first pass stats. // So the number of frames is packet number minus one
num_frames = packets_in_layer - 1;
fps_init_first_pass_info(&lc->twopass.first_pass_info,
lc->rc_twopass_stats_in.buf, num_frames);
stats_copy[layer_id] = lc->rc_twopass_stats_in.buf;
}
}
vp9_init_second_pass_spatial_svc(cpi);
} else { int num_frames;
cpi->twopass.stats_in_start = oxcf->two_pass_stats_in.buf;
cpi->twopass.stats_in = cpi->twopass.stats_in_start;
cpi->twopass.stats_in_end = &cpi->twopass.stats_in[packets - 1]; // Note the last packet is cumulative first pass stats. // So the number of frames is packet number minus one
num_frames = packets - 1;
fps_init_first_pass_info(&cpi->twopass.first_pass_info,
oxcf->two_pass_stats_in.buf, num_frames);
/* vp9_init_quantizer() is first called here. Add check in * vp9_frame_init_quantizer() so that vp9_init_quantizer is only * called later when needed. This will avoid unnecessary calls of * vp9_init_quantizer() for every frame.
*/
vp9_init_quantizer(cpi);
vp9_loop_filter_init(cm);
// Set up the unit scaling factor used during motion search. #if CONFIG_VP9_HIGHBITDEPTH
vp9_setup_scale_factors_for_frame(&cpi->me_sf, cm->width, cm->height,
cm->width, cm->height,
cm->use_highbitdepth); #else
vp9_setup_scale_factors_for_frame(&cpi->me_sf, cm->width, cm->height,
cm->width, cm->height); #endif// CONFIG_VP9_HIGHBITDEPTH
cpi->td.mb.me_sf = &cpi->me_sf;
for (i = 0; i < sizeof(cpi->mbgraph_stats) / sizeof(cpi->mbgraph_stats[0]);
++i) {
vpx_free(cpi->mbgraph_stats[i].mb_stats);
}
vp9_extrc_delete(&cpi->ext_ratectrl);
// Help detect use after free of the error detail string.
memset(cm->error.detail, 'A', sizeof(cm->error.detail) - 1);
cm->error.detail[sizeof(cm->error.detail) - 1] = '\0';
// The issue b/311394513 reveals a corner case bug. // For bd = 8, vpx_scaled_2d() requires both x_step_q4 and y_step_q4 are less // than or equal to 64. For bd >= 10, vpx_highbd_convolve8() requires both // x_step_q4 and y_step_q4 are less than or equal to 32. If this condition // isn't met, it needs to call vp9_scale_and_extend_frame_nonnormative() that // supports arbitrary scaling. constint x_step_q4 = 16 * src_w / dst_w; constint y_step_q4 = 16 * src_h / dst_h; constint is_arbitrary_scaling =
(bd == 8 && (x_step_q4 > 64 || y_step_q4 > 64)) ||
(bd >= 10 && (x_step_q4 > 32 || y_step_q4 > 32)); if (is_arbitrary_scaling) {
vp9_scale_and_extend_frame_nonnormative(src, dst, bd); return;
}
// test in two pass for the first staticint two_pass_first_group_inter(VP9_COMP *cpi) { if (cpi->oxcf.pass == 2) {
TWO_PASS *const twopass = &cpi->twopass;
GF_GROUP *const gf_group = &twopass->gf_group; constint gfg_index = gf_group->index;
// Function to test for conditions that indicate we should loop // back and recode a frame. staticint recode_loop_test(VP9_COMP *cpi, int high_limit, int low_limit, int q, int maxq, int minq) { const RATE_CONTROL *const rc = &cpi->rc; const VP9EncoderConfig *const oxcf = &cpi->oxcf; constint frame_is_kfgfarf = frame_is_kf_gf_arf(cpi); int force_recode = 0;
if ((rc->projected_frame_size >= rc->max_frame_bandwidth) ||
big_rate_miss(cpi) || (cpi->sf.recode_loop == ALLOW_RECODE) ||
(two_pass_first_group_inter(cpi) &&
(cpi->sf.recode_loop == ALLOW_RECODE_FIRST)) ||
(frame_is_kfgfarf && (cpi->sf.recode_loop >= ALLOW_RECODE_KFARFGF))) { if (frame_is_kfgfarf && (oxcf->resize_mode == RESIZE_DYNAMIC) &&
scale_down(cpi, q)) { // Code this group at a lower resolution.
cpi->resize_pending = 1; return 1;
}
// Force recode for extreme overshoot. if ((rc->projected_frame_size >= rc->max_frame_bandwidth) ||
(cpi->sf.recode_loop >= ALLOW_RECODE_KFARFGF &&
rc->projected_frame_size >= big_rate_miss_high_threshold(cpi))) { return 1;
}
// TODO(agrange) high_limit could be greater than the scale-down threshold. if ((rc->projected_frame_size > high_limit && q < maxq) ||
(rc->projected_frame_size < low_limit && q > minq)) {
force_recode = 1;
} elseif (cpi->oxcf.rc_mode == VPX_CQ) { // Deal with frame undershoot and whether or not we are // below the automatically set cq level. if (q > oxcf->cq_level &&
rc->projected_frame_size < ((rc->this_frame_target * 7) >> 3)) {
force_recode = 1;
}
}
} return force_recode;
} #endif// !CONFIG_REALTIME_ONLY
staticvoid update_ref_frames(VP9_COMP *cpi) {
VP9_COMMON *const cm = &cpi->common;
BufferPool *const pool = cm->buffer_pool;
GF_GROUP *const gf_group = &cpi->twopass.gf_group;
// Overlay frame should ideally look at the colocated ref frame from rc lib. // Here temporarily just don't update the indices. if (next_gf_index < gf_group->gf_group_size) {
cpi->lst_fb_idx = gf_group->ext_rc_ref[next_gf_index].last_index;
cpi->gld_fb_idx = gf_group->ext_rc_ref[next_gf_index].golden_index;
cpi->alt_fb_idx = gf_group->ext_rc_ref[next_gf_index].altref_index;
}
return;
}
if (cpi->rc.show_arf_as_gld) { int tmp = cpi->alt_fb_idx;
cpi->alt_fb_idx = cpi->gld_fb_idx;
cpi->gld_fb_idx = tmp;
} elseif (cm->show_existing_frame) { // Pop ARF.
cpi->lst_fb_idx = cpi->alt_fb_idx;
cpi->alt_fb_idx =
stack_pop(gf_group->arf_index_stack, gf_group->stack_size);
--gf_group->stack_size;
}
// At this point the new frame has been encoded. // If any buffer copy / swapping is signaled it should be done here. if (cm->frame_type == KEY_FRAME) {
ref_cnt_fb(pool->frame_bufs, &cm->ref_frame_map[cpi->gld_fb_idx],
cm->new_fb_idx);
ref_cnt_fb(pool->frame_bufs, &cm->ref_frame_map[cpi->alt_fb_idx],
cm->new_fb_idx);
} elseif (vp9_preserve_existing_gf(cpi)) { // We have decided to preserve the previously existing golden frame as our // new ARF frame. However, in the short term in function // vp9_get_refresh_mask() we left it in the GF slot and, if // we're updating the GF with the current decoded frame, we save it to the // ARF slot instead. // We now have to update the ARF with the current frame and swap gld_fb_idx // and alt_fb_idx so that, overall, we've stored the old GF in the new ARF // slot and, if we're updating the GF, the current frame becomes the new GF. int tmp;
for (ref_frame = LAST_FRAME; ref_frame <= ALTREF_FRAME; ++ref_frame) { // Need to convert from VP9_REFFRAME to index into ref_mask (subtract 1). if (cpi->ref_frame_flags & ref_mask[ref_frame - 1]) {
BufferPool *const pool = cm->buffer_pool; const YV12_BUFFER_CONFIG *const ref =
get_ref_frame_buffer(cpi, ref_frame);
staticvoid full_to_model_counts(vp9_coeff_count_model *model_count,
vp9_coeff_count *full_count) { int i, j, k, l;
for (i = 0; i < PLANE_TYPES; ++i) for (j = 0; j < REF_TYPES; ++j) for (k = 0; k < COEF_BANDS; ++k) for (l = 0; l < BAND_COEFF_CONTEXTS(k); ++l)
full_to_model_count(model_count[i][j][k][l], full_count[i][j][k][l]);
}
#if 0 && CONFIG_INTERNAL_STATS staticvoid output_frame_level_debug_stats(VP9_COMP *cpi) {
VP9_COMMON *const cm = &cpi->common;
FILE *const f = fopen("tmp.stt", cm->current_video_frame ? "a" : "w");
int64_t recon_err;
// Default based on max resolution.
cpi->mv_step_param = vp9_init_search_range(max_mv_def);
if (cpi->sf.mv.auto_mv_step_size) { if (frame_is_intra_only(cm)) { // Initialize max_mv_magnitude for use in the first INTER frame // after a key/intra-only frame.
cpi->max_mv_magnitude = max_mv_def;
} else { if (cm->show_frame) { // Allow mv_steps to correspond to twice the max mv magnitude found // in the previous frame, capped by the default max_mv_magnitude based // on resolution.
cpi->mv_step_param = vp9_init_search_range(
VPXMIN(max_mv_def, 2 * cpi->max_mv_magnitude));
}
cpi->max_mv_magnitude = 0;
}
}
}
if (cpi->use_svc) {
cpi->svc.base_qindex[cpi->svc.spatial_layer_id] = *q;
}
if (!frame_is_intra_only(cm)) {
vp9_set_high_precision_mv(cpi, (*q) < HIGH_PRECISION_MV_QTHRESH);
}
#if !CONFIG_REALTIME_ONLY // Configure experimental use of segmentation for enhanced coding of // static regions if indicated. // Only allowed in the second pass of a two pass encode, as it requires // lagged coding, and if the relevant speed feature flag is set. if (cpi->oxcf.pass == 2 && cpi->sf.static_segmentation)
configure_static_seg_features(cpi); #endif// !CONFIG_REALTIME_ONLY
#if CONFIG_VP9_POSTPROC && !(CONFIG_VP9_TEMPORAL_DENOISING) if (cpi->oxcf.noise_sensitivity > 0) { int l = 0; switch (cpi->oxcf.noise_sensitivity) { case 1: l = 20; break; case 2: l = 40; break; case 3: l = 60; break; case 4: case 5: l = 100; break; case 6: l = 150; break;
} if (!cpi->common.postproc_state.limits) {
CHECK_MEM_ERROR(&cm->error, cpi->common.postproc_state.limits,
vpx_calloc(cpi->un_scaled_source->y_width, sizeof(*cpi->common.postproc_state.limits)));
}
vp9_denoise(&cpi->common, cpi->Source, cpi->Source, l,
cpi->common.postproc_state.limits);
} #endif// CONFIG_VP9_POSTPROC
}
staticvoid init_motion_estimation(VP9_COMP *cpi) { int y_stride = cpi->scaled_source.y_stride;
// There has been a change in frame size.
vp9_set_size_literal(cpi, oxcf->scaled_frame_width,
oxcf->scaled_frame_height);
} #endif// !CONFIG_REALTIME_ONLY
if (oxcf->pass == 0 && oxcf->rc_mode == VPX_CBR &&
oxcf->resize_mode == RESIZE_DYNAMIC && cpi->resize_pending != 0) { // For SVC scaled width/height will have been set (svc->resize_set=1) // in get_svc_params based on the layer width/height. if (!cpi->use_svc || !cpi->svc.resize_set) {
oxcf->scaled_frame_width =
(oxcf->width * cpi->resize_scale_num) / cpi->resize_scale_den;
oxcf->scaled_frame_height =
(oxcf->height * cpi->resize_scale_num) / cpi->resize_scale_den; // There has been a change in frame size.
vp9_set_size_literal(cpi, oxcf->scaled_frame_width,
oxcf->scaled_frame_height);
}
// TODO(agrange) Scale cpi->max_mv_magnitude if frame-size has changed.
set_mv_search_params(cpi);
vp9_noise_estimate_init(&cpi->noise_estimate, cm->width, cm->height); #if CONFIG_VP9_TEMPORAL_DENOISING // Reset the denoiser on the resized frame. if (cpi->oxcf.noise_sensitivity > 0) {
vp9_denoiser_free(&(cpi->denoiser));
setup_denoiser_buffer(cpi); // Dynamic resize is only triggered for non-SVC, so we can force // golden frame update here as temporary fix to denoiser.
cpi->refresh_golden_frame = 1;
} #endif
}
if ((oxcf->pass == 2) && !cpi->use_svc) {
vp9_set_target_rate(cpi);
}
alloc_frame_mvs(cm, cm->new_fb_idx);
// Reset the frame pointers to the current frame size. if (vpx_realloc_frame_buffer(get_frame_new_buffer(cm), cm->width, cm->height,
cm->subsampling_x, cm->subsampling_y, #if CONFIG_VP9_HIGHBITDEPTH
cm->use_highbitdepth, #endif
VP9_ENC_BORDER_IN_PIXELS, cm->byte_alignment,
NULL, NULL, NULL))
vpx_internal_error(&cm->error, VPX_CODEC_MEM_ERROR, "Failed to allocate frame buffer");
if (buf_idx != INVALID_IDX) {
YV12_BUFFER_CONFIG *const buf = &cm->buffer_pool->frame_bufs[buf_idx].buf;
ref_buf->buf = buf; #if CONFIG_VP9_HIGHBITDEPTH
vp9_setup_scale_factors_for_frame(
&ref_buf->sf, buf->y_crop_width, buf->y_crop_height, cm->width,
cm->height, (buf->flags & YV12_FLAG_HIGHBITDEPTH) ? 1 : 0); #else
vp9_setup_scale_factors_for_frame(&ref_buf->sf, buf->y_crop_width,
buf->y_crop_height, cm->width,
cm->height); #endif// CONFIG_VP9_HIGHBITDEPTH
has_valid_ref_frame |= vp9_is_valid_scale(&ref_buf->sf); if (vp9_is_scaled(&ref_buf->sf)) vpx_extend_frame_borders(buf);
} else {
ref_buf->buf = NULL;
}
} if (!frame_is_intra_only(cm) && !has_valid_ref_frame) {
vpx_internal_error(
&cm->error, VPX_CODEC_ERROR, "Can't find at least one reference frame with valid size");
}
set_ref_ptrs(cm, xd, LAST_FRAME, LAST_FRAME);
}
staticvoid save_encode_params(VP9_COMP *cpi) { int tile_idx; int i, j;
TileDataEnc *tile_data;
RD_OPT *rd_opt = &cpi->rd; for (i = 0; i < MAX_REF_FRAMES; i++) { for (j = 0; j < REFERENCE_MODES; j++)
rd_opt->prediction_type_threshes_prev[i][j] =
rd_opt->prediction_type_threshes[i][j];
// Flag to check if its valid to compute the source sad (used for // scene detection and for superblock content state in CBR mode). // The flag may get reset below based on SVC or resizing state.
cpi->compute_source_sad_onepass = cpi->oxcf.mode == REALTIME;
vpx_clear_system_state();
set_frame_size(cpi);
if (is_one_pass_svc(cpi) &&
cpi->un_scaled_source->y_width == cm->width << 2 &&
cpi->un_scaled_source->y_height == cm->height << 2 &&
svc->scaled_temp.y_width == cm->width << 1 &&
svc->scaled_temp.y_height == cm->height << 1) { // For svc, if it is a 1/4x1/4 downscaling, do a two-stage scaling to take // advantage of the 1:2 optimized scaler. In the process, the 1/2x1/2 // result will be saved in scaled_temp and might be used later. const INTERP_FILTER filter_scaler2 = svc->downsample_filter_type[1]; constint phase_scaler2 = svc->downsample_filter_phase[1];
cpi->Source = svc_twostage_scale(
cm, cpi->un_scaled_source, &cpi->scaled_source, &svc->scaled_temp,
filter_scaler, phase_scaler, filter_scaler2, phase_scaler2);
svc->scaled_one_half = 1;
} elseif (is_one_pass_svc(cpi) &&
cpi->un_scaled_source->y_width == cm->width << 1 &&
cpi->un_scaled_source->y_height == cm->height << 1 &&
svc->scaled_one_half) { // If the spatial layer is 1/2x1/2 and the scaling is already done in the // two-stage scaling, use the result directly.
cpi->Source = &svc->scaled_temp;
svc->scaled_one_half = 0;
} else {
cpi->Source = vp9_scale_if_required(
cm, cpi->un_scaled_source, &cpi->scaled_source, (cpi->oxcf.pass == 0),
filter_scaler, phase_scaler);
} #ifdef OUTPUT_YUV_SVC_SRC // Write out at most 3 spatial layers. if (is_one_pass_svc(cpi) && svc->spatial_layer_id < 3) {
vpx_write_yuv_frame(yuv_svc_src[svc->spatial_layer_id], cpi->Source);
} #endif // Unfiltered raw source used in metrics calculation if the source // has been filtered. if (is_psnr_calc_enabled(cpi)) { #ifdef ENABLE_KF_DENOISE if (is_spatial_denoise_enabled(cpi)) {
cpi->raw_source_frame = vp9_scale_if_required(
cm, &cpi->raw_unscaled_source, &cpi->raw_scaled_source,
(cpi->oxcf.pass == 0), EIGHTTAP, phase_scaler);
} else {
cpi->raw_source_frame = cpi->Source;
} #else
cpi->raw_source_frame = cpi->Source; #endif
}
#if CONFIG_VP9_TEMPORAL_DENOISING if (cpi->oxcf.noise_sensitivity > 0 && cpi->use_svc)
vp9_denoiser_reset_on_first_frame(cpi); #endif
// Scene detection is always used for VBR mode or screen-content case. // For other cases (e.g., CBR mode) use it for 5 <= speed.
cpi->rc.high_source_sad = 0;
cpi->rc.hybrid_intra_scene_change = 0;
cpi->rc.re_encode_maxq_scene_change = 0; if (cm->show_frame && cpi->oxcf.mode == REALTIME &&
!cpi->disable_scene_detection_rtc_ratectrl &&
(cpi->oxcf.rc_mode == VPX_VBR ||
cpi->oxcf.content == VP9E_CONTENT_SCREEN || cpi->oxcf.speed >= 5))
vp9_scene_detection_onepass(cpi);
if (svc->spatial_layer_id == svc->first_spatial_layer_to_encode) {
svc->high_source_sad_superframe = cpi->rc.high_source_sad;
svc->high_num_blocks_with_motion = cpi->rc.high_num_blocks_with_motion; // On scene change reset temporal layer pattern to TL0. // Note that if the base/lower spatial layers are skipped: instead of // inserting base layer here, we force max-q for the next superframe // with lower spatial layers: this is done in vp9_encodedframe_overshoot() // when max-q is decided for the current layer. // Only do this reset for bypass/flexible mode. if (svc->high_source_sad_superframe && svc->temporal_layer_id > 0 &&
svc->temporal_layering_mode == VP9E_TEMPORAL_LAYERING_MODE_BYPASS) { // rc->high_source_sad will get reset so copy it to restore it. int tmp_high_source_sad = cpi->rc.high_source_sad;
vp9_svc_reset_temporal_layers(cpi, cm->frame_type == KEY_FRAME);
cpi->rc.high_source_sad = tmp_high_source_sad;
}
}
vp9_update_noise_estimate(cpi);
// For 1 pass CBR, check if we are dropping this frame. // Never drop on key frame, if base layer is key for svc, // on scene change, or if superframe has layer sync. if ((cpi->rc.high_source_sad || svc->high_source_sad_superframe) &&
!(cpi->rc.use_post_encode_drop && svc->last_layer_dropped[0]))
no_drop_scene_change = 1; if (cpi->oxcf.pass == 0 && cpi->oxcf.rc_mode == VPX_CBR &&
!frame_is_intra_only(cm) && !no_drop_scene_change &&
!svc->superframe_has_layer_sync &&
(!cpi->use_svc ||
!svc->layer_context[svc->temporal_layer_id].is_key_frame)) { if (vp9_rc_drop_frame(cpi)) return 0;
}
// For 1 pass SVC, only ZEROMV is allowed for spatial reference frame // when svc->force_zero_mode_spatial_ref = 1. Under those conditions we can // avoid this frame-level upsampling (for non intra_only frames). // For SVC single_layer mode, dynamic resize is allowed and we need to // scale references for this case. if (frame_is_intra_only(cm) == 0 &&
((svc->single_layer_svc && cpi->oxcf.resize_mode == RESIZE_DYNAMIC) ||
!(is_one_pass_svc(cpi) && svc->force_zero_mode_spatial_ref))) {
vp9_scale_references(cpi);
}
// TODO(jianj): Look into issue of skin detection with high bitdepth. if (cm->bit_depth == 8 && cpi->oxcf.speed >= 5 && cpi->oxcf.pass == 0 &&
cpi->oxcf.rc_mode == VPX_CBR &&
cpi->oxcf.content != VP9E_CONTENT_SCREEN &&
cpi->oxcf.aq_mode == CYCLIC_REFRESH_AQ) {
cpi->use_skin_detection = 1;
}
// Enable post encode frame dropping for CBR on non key frame, when // ext_use_post_encode_drop is specified by user.
cpi->rc.use_post_encode_drop = cpi->rc.ext_use_post_encode_drop &&
cpi->oxcf.rc_mode == VPX_CBR &&
cm->frame_type != KEY_FRAME;
vp9_set_quantizer(cpi, q, 0);
vp9_set_variance_partition_thresholds(cpi, q, 0);
setup_frame(cpi);
suppress_active_map(cpi);
if (cpi->use_svc) { // On non-zero spatial layer, check for disabling inter-layer // prediction. if (svc->spatial_layer_id > 0) vp9_svc_constrain_inter_layer_pred(cpi);
vp9_svc_assert_constraints_pattern(cpi);
}
if (cpi->rc.last_post_encode_dropped_scene_change) {
cpi->rc.high_source_sad = 1;
svc->high_source_sad_superframe = 1; // For now disable use_source_sad since Last_Source will not be the previous // encoded but the dropped one.
cpi->sf.use_source_sad = 0;
cpi->rc.last_post_encode_dropped_scene_change = 0;
} // Check if this high_source_sad (scene/slide change) frame should be // encoded at high/max QP, and if so, set the q and adjust some rate // control parameters. if (cpi->sf.overshoot_detection_cbr_rt == FAST_DETECTION_MAXQ &&
(cpi->rc.high_source_sad ||
(cpi->use_svc && svc->high_source_sad_superframe))) { if (vp9_encodedframe_overshoot(cpi, -1, &q)) {
vp9_set_quantizer(cpi, q, 0);
vp9_set_variance_partition_thresholds(cpi, q, 0);
}
}
#if !CONFIG_REALTIME_ONLY // Variance adaptive and in frame q adjustment experiments are mutually // exclusive. if (cpi->oxcf.aq_mode == VARIANCE_AQ) {
vp9_vaq_frame_setup(cpi);
} elseif (cpi->oxcf.aq_mode == EQUATOR360_AQ) {
vp9_360aq_frame_setup(cpi);
} elseif (cpi->oxcf.aq_mode == COMPLEXITY_AQ) {
vp9_setup_in_frame_q_adj(cpi);
} elseif (cpi->oxcf.aq_mode == LOOKAHEAD_AQ) { // it may be pretty bad for rate-control, // and I should handle it somehow
vp9_alt_ref_aq_setup_map(cpi->alt_ref_aq, cpi);
} else { #endif // If ROI is enabled and skip feature is used for segmentation, apply cyclic // refresh but not apply ROI for skip for the first 20 frames (defined by // FRAMES_NO_SKIPPING_AFTER_KEY) after key frame to improve quality. if (cpi->roi.enabled && !frame_is_intra_only(cm)) { if (cpi->roi.skip[BACKGROUND_SEG_SKIP_ID]) { if (cpi->oxcf.aq_mode == CYCLIC_REFRESH_AQ)
vp9_cyclic_refresh_setup(cpi); if (cpi->rc.frames_since_key > FRAMES_NO_SKIPPING_AFTER_KEY)
apply_roi_map(cpi);
} } else {
* in the file PATENTSHORS file in the root of the source */
java.lang.StringIndexOutOfBoundsException: Index 7 out of bounds for length 7 intvpx_calloc * java.lang.StringIndexOutOfBoundsException: Index 58 out of bounds for length 58
efresh_setup)
}
CONFIG_REALTIME_ONLY
java.lang.StringIndexOutOfBoundsException: Index 3 out of bounds for length 3 #TileDataEnc this_tilecpi->tile_datatile_col
// Check if we should re-encode this frame at high Q because of high
/
for( = 0;tile_col <multi_thread_ctxt-allocated_tile_cols
(++ java.lang.StringIndexOutOfBoundsException: Index 20 out of bounds for length 20
(>use_svc &svc->high_source_sad_superframe) {
frame_sizejava.lang.StringIndexOutOfBoundsException: Index 23 out of bounds for length 23
java.lang.StringIndexOutOfBoundsException: Range [0, 49) out of bounds for length 0
=
=java.lang.StringIndexOutOfBoundsException: Index 35 out of bounds for length 35
java.lang.StringIndexOutOfBoundsException: Index 3 out of bounds for length 3
// Update the skip mb flag probabilities based on the distribution // seen in the last encoder iteration. // update_base_skip_probs(cpi);
vpx_clear_system_state(); return 1;
}
staticvoid update_rq_history(RATE_QINDEX_HISTORY *rq_history, int target_bits, int actual_bits, int q_index) {
rq_history->q_index_history[rq_history->recode_count] = q_index;
rq_history->rate_history[rq_history->recode_count] = actual_bits; if (actual_bits <= target_bits) {
rq_history->q_index_high = q_index;
} if (actual_bits >= target_bits) {
rq_history->q_index_low = q_index;
}
rq_history->recode_count += 1;
}
staticint guess_q_index_from_model(const RATE_QSTEP_MODEL *rq_model, int target_bits) { // The model predicts bits as follows. // target_bits = bias - ratio * log2(q_step) // Given the target_bits, we compute the q_step as follows. double q_step;
assert(rq_model->ratio > 0);
q_step = pow(2.0, (rq_model->bias - target_bits) / rq_model->ratio); // TODO(angiebird): Make this function support highbitdepth. return vp9_convert_q_to_qindex(q_step, VPX_BITS_8);
}
staticint guess_q_index_linear(int prev_q_index, int target_bits, int actual_bits, int gap) { int q_index = prev_q_index; if (actual_bits < target_bits) {
q_index -= gap;
q_index = VPXMAX(q_index, 0);
} else {
q_index += gap;
q_index = VPXMIN(q_index, 255);
} return q_index;
}
#ifdef AGGRESSIVE_VBR if (two_pass_first_group_inter(cpi)) { // Adjustment limits for min and max q
qrange_adj = VPXMAX(1, (top_index - bottom_index) / 2);
// Unfiltered raw source used in metrics calculation if the source // has been filtered. if (is_psnr_calc_enabled(cpi)) { #ifdef ENABLE_KF_DENOISE if (is_spatial_denoise_enabled(cpi)) {
cpi->raw_source_frame = vp9_scale_if_required(
cm, &cpi->raw_unscaled_source, &cpi->raw_scaled_source,
(oxcf->pass == 0), EIGHTTAP, 0);
} else {
cpi->raw_source_frame = cpi->Source;
} #else
cpi->raw_source_frame = cpi->Source; #endif
}
// Update the skip mb flag probabilities based on the distribution // seen in the last encoder iteration. // update_base_skip_probs(cpi);
vpx_clear_system_state();
// Dummy pack of the bitstream using up to date stats to get an // accurate estimate of output frame size to determine if we need // to recode. if (cpi->sf.recode_loop >= ALLOW_RECODE_KFARFGF) {
save_coding_context(cpi); if (!cpi->sf.use_nonrd_pick_mode)
vp9_pack_bitstream(cpi, dest, dest_size, size);
rc->projected_frame_size = (int)(*size) << 3;
if (frame_over_shoot_limit == 0) frame_over_shoot_limit = 1;
}
if (cpi->ext_ratectrl.ready &&
(cpi->ext_ratectrl.funcs.rc_type & VPX_RC_QP) != 0) { break;
} #if CONFIG_RATE_CTRL if (cpi->oxcf.use_simple_encode_api) { // This part needs to be after save_coding_context() because // restore_coding_context will be called in the end of this function. // TODO(angiebird): This is a hack for making sure the encoder use the // external_quantize_index exactly. Avoid this kind of hack later. if (cpi->encode_command.use_external_quantize_index) { break;
}
// Check if we hit the target bitrate. if (percent_diff <=
cpi->encode_command.target_frame_bits_error_percent ||
rq_history->recode_count >= RATE_CTRL_MAX_RECODE_NUM ||
rq_history->q_index_low >= rq_history->q_index_high) { break;
}
// Prevent possible divide by zero error below for perfect KF
kf_err += !kf_err;
// The key frame is not good enough or we can afford // to make it better without undue risk of popping. if ((kf_err > high_err_target &&
rc->projected_frame_size <= frame_over_shoot_limit) ||
(kf_err > low_err_target &&
rc->projected_frame_size <= frame_under_shoot_limit)) { // Lower q_high
q_high = q > q_low ? q - 1 : q_low;
// Clamp Q to upper and lower limits:
q = clamp(q, q_low, q_high);
loop = q != last_q;
} elseif (recode_loop_test(cpi, frame_over_shoot_limit,
frame_under_shoot_limit, q,
VPXMAX(q_high, top_index), bottom_index)) { // Is the projected frame size out of range and are we allowed // to attempt to recode. int last_q = q; int retries = 0; int qstep;
if (cpi->resize_pending == 1) { // Change in frame size so go back around the recode loop.
cpi->rc.frame_size_selector =
SCALE_STEP1 - cpi->rc.frame_size_selector;
cpi->rc.next_frame_size_selector = cpi->rc.frame_size_selector;
// Frame size out of permitted range: // Update correction factor & compute new Q to try...
// Frame is too large if (rc->projected_frame_size > rc->this_frame_target) { // Special case if the projected size is > the max allowed. if ((q == q_high) &&
((rc->projected_frame_size >= rc->max_frame_bandwidth) ||
(!rc->is_src_frame_alt_ref &&
(rc->projected_frame_size >=
big_rate_miss_high_threshold(cpi))))) { int max_rate = VPXMAX(1, VPXMIN(rc->max_frame_bandwidth,
big_rate_miss_high_threshold(cpi))); double q_val_high;
q_val_high = vp9_convert_qindex_to_q(q_high, cm->bit_depth);
q_val_high =
q_val_high * ((double)rc->projected_frame_size / max_rate);
q_high = vp9_convert_q_to_qindex(q_val_high, cm->bit_depth);
q_high = clamp(q_high, rc->best_quality, rc->worst_quality);
}
// Raise Qlow as to at least the current value
qstep =
get_qstep_adj(rc->projected_frame_size, rc->this_frame_target);
q_low = VPXMIN(q + qstep, q_high);
overshoot_seen = 1;
} else { // Frame is too small
qstep =
get_qstep_adj(rc->this_frame_target, rc->projected_frame_size);
q_high = VPXMAX(q - qstep, q_low);
if (overshoot_seen || loop_at_this_size > 1) {
vp9_rc_update_rate_correction_factors(cpi);
q = (q_high + q_low) / 2;
} else {
vp9_rc_update_rate_correction_factors(cpi);
q = vp9_rc_regulate_q(cpi, rc->this_frame_target,
VPXMIN(q_low, bottom_index), top_index); // Special case reset for qlow for constrained quality. // This should only trigger where there is very substantial // undershoot on a frame and the auto cq level is above // the user passed in value. if (oxcf->rc_mode == VPX_CQ && q < q_low) {
q_low = q;
}
if (cpi->sf.recode_loop >= ALLOW_RECODE_KFARFGF) if (loop) restore_coding_context(cpi); #if CONFIG_COLLECT_COMPONENT_TIMING if (loop) printf("\n Recoding:"); #endif
} while (loop);
#ifdef AGGRESSIVE_VBR if (two_pass_first_group_inter(cpi)) {
cpi->twopass.active_worst_quality =
VPXMIN(q + qrange_adj, oxcf->worst_allowed_q);
} elseif (!frame_is_kf_gf_arf(cpi)) { #else if (!frame_is_kf_gf_arf(cpi)) { #endif // Have we been forced to adapt Q outside the expected range by an extreme // rate miss. If so adjust the active maxQ for the subsequent frames. if (!rc->is_src_frame_alt_ref && (q > cpi->twopass.active_worst_quality)) {
cpi->twopass.active_worst_quality = q;
} elseif (oxcf->vbr_corpus_complexity && q == q_low &&
rc->projected_frame_size < rc->this_frame_target) {
cpi->twopass.active_worst_quality =
VPXMAX(q, cpi->twopass.active_worst_quality - 1);
}
}
if (enable_acl) { // Skip recoding, if model diff is below threshold constint thresh = compute_context_model_thresh(cpi); constint diff = compute_context_model_diff(cm); if (diff >= thresh) {
vp9_encode_frame(cpi);
}
} if (cpi->sf.recode_loop >= ALLOW_RECODE_KFARFGF) {
vpx_clear_system_state();
restore_coding_context(cpi);
}
} #endif// !CONFIG_REALTIME_ONLY
staticvoid set_ext_overrides(VP9_COMP *cpi) { // Overrides the defaults with the externally supplied values with // vp9_update_reference() and vp9_update_entropy() calls // Note: The overrides are valid only for the next frame passed // to encode_frame_to_data_rate() function if (cpi->ext_refresh_frame_context_pending) {
cpi->common.refresh_frame_context = cpi->ext_refresh_frame_context;
cpi->ext_refresh_frame_context_pending = 0;
} if (cpi->ext_refresh_frame_flags_pending) {
cpi->refresh_last_frame = cpi->ext_refresh_last_frame;
cpi->refresh_golden_frame = cpi->ext_refresh_golden_frame;
cpi->refresh_alt_ref_frame = cpi->ext_refresh_alt_ref_frame;
}
}
staticINLINEvoid add_denoise_point(int centre_val, int data_val, int thresh,
uint8_t point_weight, int *sum_val, int *sum_weight) { if (abs(centre_val - data_val) <= thresh) {
*sum_weight += point_weight;
*sum_val += (int)data_val * (int)point_weight;
}
}
staticvoid spatial_denoise_point(uint8_t *src_ptr, constint stride, constint strength) { int sum_weight = 0; int sum_val = 0; int thresh = strength; int kernel_size = 5; int half_k_size = 2; int i, j; int max_diff = 0;
uint8_t *tmp_ptr;
uint8_t *kernel_ptr;
// Find the maximum deviation from the source point in the locale.
tmp_ptr = src_ptr - (stride * (half_k_size + 1)) - (half_k_size + 1); for (i = 0; i < kernel_size + 2; ++i) { for (j = 0; j < kernel_size + 2; ++j) {
max_diff = VPXMAX(max_diff, abs((int)*src_ptr - (int)tmp_ptr[j]));
}
tmp_ptr += stride;
}
// Apply the kernel
tmp_ptr = src_ptr - (stride * half_k_size) - half_k_size; for (i = 0; i < kernel_size; ++i) { for (j = 0; j < kernel_size; ++j) {
add_denoise_point((int)*src_ptr, (int)tmp_ptr[j], thresh, *kernel_ptr,
&sum_val, &sum_weight);
++kernel_ptr;
}
tmp_ptr += stride;
}
// Update the source value with the new filtered value
*src_ptr = (uint16_t)((sum_val + (sum_weight >> 1)) / sum_weight);
} #endif// CONFIG_VP9_HIGHBITDEPTH
// Apply thresholded spatial noise suppression to a given buffer. staticvoid spatial_denoise_buffer(VP9_COMP *cpi, uint8_t *buffer, constint stride, constint width, constint height, constint strength) {
VP9_COMMON *const cm = &cpi->common;
uint8_t *src_ptr = buffer; int row; int col;
// Base the filter strength on the current active max Q. constint q = (int)(vp9_convert_qindex_to_q(twopass->active_worst_quality,
cm->bit_depth)); int strength =
VPXMAX(oxcf->arnr_strength >> 2, VPXMIN(oxcf->arnr_strength, (q >> 4)));
// Denoise each of Y,U and V buffers.
spatial_denoise_buffer(cpi, src->y_buffer, src->y_stride, src->y_width,
src->y_height, strength);
// In order to make SSIM_VAR_SCALE in a same scale for both 8 bit // and high bit videos, the variance needs to be divided by 2.0 or // 64.0 separately. // TODO(sdeng): need to tune for 12bit videos. #if CONFIG_VP9_HIGHBITDEPTH if (cpi->Source->flags & YV12_FLAG_HIGHBITDEPTH)
var += vp9_high_get_sby_variance(cpi, &buf, BLOCK_8X8, xd->bd); else #endif
var += vp9_get_sby_variance(cpi, &buf, BLOCK_8X8);
num_of_var += 1.0;
}
}
var = var / num_of_var / 64.0;
// Curve fitting with an exponential model on all 16x16 blocks from the // Midres dataset.
var = 67.035434 * (1 - exp(-0.0021489 * var)) + 17.492222;
cpi->mi_ssim_rdmult_scaling_factors[index] = var;
log_sum += log(var);
}
}
log_sum = exp(log_sum / (double)(num_rows * num_cols));
for (row = 0; row < num_rows; ++row) { for (col = 0; col < num_cols; ++col) { constint index = row * num_cols + col;
cpi->mi_ssim_rdmult_scaling_factors[index] /= log_sum;
}
}
(void)xd;
}
// Process the wiener variance in 16x16 block basis. staticint qsort_comp(constvoid *elem1, constvoid *elem2) { int a = *((constint *)elem1); int b = *((constint *)elem2); if (a > b) return 1; if (a < b) return -1; return 0;
}
if (vp9_svc_check_skip_enhancement_layer(cpi)) return;
set_ext_overrides(cpi);
vpx_clear_system_state();
#ifdef ENABLE_KF_DENOISE // Spatial denoise of key frame. if (is_spatial_denoise_enabled(cpi)) spatial_denoise_frame(cpi); #endif
if (cm->show_existing_frame == 0) { // Update frame index
set_frame_index(cpi, cm);
// Set the arf sign bias for this frame.
set_ref_sign_bias(cpi);
}
// On the very first frame set the deadline_mode_previous_frame to // the current mode. if (cpi->common.current_video_frame == 0)
cpi->deadline_mode_previous_frame = cpi->oxcf.mode;
// Set default state for segment based loop filter update flags.
cm->lf.mode_ref_delta_update = 0;
if (cpi->oxcf.pass == 2 && cpi->sf.adaptive_interp_filter_search)
cpi->sf.interp_filter_search_mask = setup_interp_filter_search_mask(cpi);
// Set various flags etc to special state if it is a key frame. if (frame_is_intra_only(cm)) { // Reset the loop filter deltas and segmentation map.
vp9_reset_segment_features(&cm->seg);
// If segmentation is enabled force a map update for key frames. if (seg->enabled) {
seg->update_map = 1;
seg->update_data = 1;
}
// The alternate reference frame cannot be active for a key frame.
cpi->rc.source_alt_ref_active = 0;
// TODO(jingning): When using show existing frame mode, we assume that the // current ARF will be directly used as the final reconstructed frame. This is // an encoder control scheme. One could in principle explore other // possibilities to arrange the reference frame buffer and their coding order. if (cm->show_existing_frame) {
ref_cnt_fb(cm->buffer_pool->frame_bufs, &cm->new_fb_idx,
cm->ref_frame_map[cpi->alt_fb_idx]);
}
#if !CONFIG_REALTIME_ONLY // Disable segmentation if it decrease rate/distortion ratio if (cpi->oxcf.aq_mode == LOOKAHEAD_AQ)
vp9_try_disable_lookahead_aq(cpi, size, dest, dest_size); #endif
// Special case code to reduce pulsing when key frames are forced at a // fixed interval. Note the reconstruction error if it is the frame before // the force key frame if (cpi->rc.next_key_frame_forced && cpi->rc.frames_to_key == 1) { #if CONFIG_VP9_HIGHBITDEPTH if (cm->use_highbitdepth) {
cpi->ambient_err =
vpx_highbd_get_y_sse(cpi->Source, get_frame_new_buffer(cm));
} else {
cpi->ambient_err = vpx_get_y_sse(cpi->Source, get_frame_new_buffer(cm));
} #else
cpi->ambient_err = vpx_get_y_sse(cpi->Source, get_frame_new_buffer(cm)); #endif// CONFIG_VP9_HIGHBITDEPTH
}
// If the encoder forced a KEY_FRAME decision if (cm->frame_type == KEY_FRAME) cpi->refresh_last_frame = 1;
if (cpi->ext_ratectrl.ready &&
cpi->ext_ratectrl.funcs.update_encodeframe_result != NULL) {
vpx_codec_err_t codec_status = vp9_extrc_update_encodeframe_result(
&cpi->ext_ratectrl, (*size) << 3, cm->base_qindex); if (codec_status != VPX_CODEC_OK) {
vpx_internal_error(&cm->error, codec_status, "vp9_extrc_update_encodeframe_result() failed");
}
} #if CONFIG_REALTIME_ONLY
(void)encode_frame_result;
assert(encode_frame_result == NULL); #else// CONFIG_REALTIME_ONLY if (encode_frame_result != NULL) { const RefCntBuffer *coded_frame_buf =
get_ref_cnt_buffer(cm, cm->new_fb_idx);
RefCntBuffer *ref_frame_bufs[MAX_INTER_REF_FRAMES];
FRAME_UPDATE_TYPE update_type =
cpi->twopass.gf_group.update_type[cpi->twopass.gf_group.index]; int quantize_index = vp9_get_quantizer(cpi);
get_ref_frame_bufs(cpi, ref_frame_bufs); // update_encode_frame_result() depends on twopass.gf_group.index and // cm->new_fb_idx, cpi->Source, cpi->lst_fb_idx, cpi->gld_fb_idx and // cpi->alt_fb_idx are updated for current frame and have // not been updated for the next frame yet. // The update locations are as follows. // 1) twopass.gf_group.index is initialized at define_gf_group by vp9_zero() // for the first frame in the gf_group and is updated for the next frame at // vp9_twopass_postencode_update(). // 2) cpi->Source is updated at the beginning of vp9_get_compressed_data() // 3) cm->new_fb_idx is updated at the beginning of // vp9_get_compressed_data() by get_free_fb(cm). // 4) cpi->lst_fb_idx/gld_fb_idx/alt_fb_idx will be updated for the next // frame at vp9_update_reference_frames(). // This function needs to be called before vp9_update_reference_frames(). // TODO(angiebird): Improve the codebase to make the update of frame // dependent variables more robust.
// Keep track of the frame buffer index updated/refreshed for the // current encoded TL0 superframe. if (cpi->svc.temporal_layer_id == 0) { if (cpi->refresh_last_frame)
cpi->svc.fb_idx_upd_tl0[cpi->svc.spatial_layer_id] = cpi->lst_fb_idx; elseif (cpi->refresh_golden_frame)
cpi->svc.fb_idx_upd_tl0[cpi->svc.spatial_layer_id] = cpi->gld_fb_idx; elseif (cpi->refresh_alt_ref_frame)
cpi->svc.fb_idx_upd_tl0[cpi->svc.spatial_layer_id] = cpi->alt_fb_idx;
}
if (cm->seg.update_map) update_reference_segmentation_map(cpi);
if (frame_is_intra_only(cm) == 0) {
release_scaled_references(cpi);
}
vp9_update_reference_frames(cpi);
if (!cm->show_existing_frame) { for (t = TX_4X4; t <= TX_32X32; ++t) {
full_to_model_counts(cpi->td.counts->coef[t],
cpi->td.rd_counts.coef_counts[t]);
}
if (!cm->error_resilient_mode && !cm->frame_parallel_decoding_mode) { if (!frame_is_intra_only(cm)) {
vp9_adapt_mode_probs(cm);
vp9_adapt_mv_probs(cm, cm->allow_high_precision_mv);
}
vp9_adapt_coef_probs(cm);
}
}
if (cm->frame_type == KEY_FRAME) { // Tell the caller that the frame was coded as a key frame
*frame_flags = cpi->frame_flags | FRAMEFLAGS_KEY;
} else {
*frame_flags = cpi->frame_flags & ~FRAMEFLAGS_KEY;
}
// Clear the one shot update flags for segmentation map and mode/ref loop // filter deltas.
cm->seg.update_map = 0;
cm->seg.update_data = 0;
cm->lf.mode_ref_delta_update = 0;
// keep track of the last coded dimensions
cm->last_width = cm->width;
cm->last_height = cm->height;
// reset to normal state now that we are done. if (!cm->show_existing_frame) {
cm->last_show_frame = cm->show_frame;
cm->prev_frame = cm->cur_frame;
}
if (cm->show_frame) {
vp9_swap_mi_and_prev_mi(cm); if (cpi->use_svc) vp9_inc_frame_in_layer(cpi);
}
update_frame_indexes(cm, cm->show_frame);
if (cpi->use_svc) {
cpi->svc
.layer_context[cpi->svc.spatial_layer_id *
cpi->svc.number_temporal_layers +
cpi->svc.temporal_layer_id]
.last_frame_type = cm->frame_type; // Reset layer_sync back to 0 for next frame.
cpi->svc.spatial_layer_sync[cpi->svc.spatial_layer_id] = 0;
}
cpi->force_update_segmentation = 0;
#if !CONFIG_REALTIME_ONLY if (cpi->oxcf.aq_mode == LOOKAHEAD_AQ)
vp9_alt_ref_aq_unset_all(cpi->alt_ref_aq, cpi); #endif
// do a step update if the duration changes by 10% if (last_duration)
step = (int)((this_duration - last_duration) * 10 / last_duration);
}
if (this_duration) { if (step) {
vp9_new_framerate(cpi, 10000000.0 / this_duration);
} else { // Average this frame's rate into the last second's average // frame rate. If we haven't seen 1 second yet, then average // over the whole interval seen. constdouble interval = VPXMIN(
(double)(source->ts_end - cpi->first_time_stamp_ever), 10000000.0); double avg_duration = 10000000.0 / cpi->framerate;
avg_duration *= (interval - avg_duration + this_duration);
avg_duration /= interval;
// Returns 0 if this is not an alt ref else the offset of the source frame // used as the arf midpoint. staticint get_arf_src_index(VP9_COMP *cpi) {
RATE_CONTROL *const rc = &cpi->rc; int arf_src_index = 0; if (is_altref_enabled(cpi)) { if (cpi->oxcf.pass == 2) { const GF_GROUP *const gf_group = &cpi->twopass.gf_group; if (gf_group->update_type[gf_group->index] == ARF_UPDATE) {
arf_src_index = gf_group->arf_src_offset[gf_group->index];
}
} elseif (rc->source_alt_ref_pending) {
arf_src_index = rc->frames_till_gf_update_due;
}
} return arf_src_index;
}
if (rc->is_src_frame_alt_ref) { // Current frame is an ARF overlay frame.
cpi->alt_ref_source = NULL;
// Don't refresh the last buffer for an ARF overlay frame. It will // become the GF so preserve last as an alternative prediction option.
cpi->refresh_last_frame = 0;
}
}
if (cm->frame_type == KEY_FRAME) {
level_stats->ref_refresh_map = 0;
} else { int count = 0;
level_stats->ref_refresh_map |= vp9_get_refresh_mask(cpi); // Also need to consider the case where the encoder refers to a buffer // that has been implicitly refreshed after encoding a keyframe. if (!cm->intra_only) {
level_stats->ref_refresh_map |= (1 << cpi->lst_fb_idx);
level_stats->ref_refresh_map |= (1 << cpi->gld_fb_idx);
level_stats->ref_refresh_map |= (1 << cpi->alt_fb_idx);
} for (i = 0; i < REF_FRAMES; ++i) {
count += (level_stats->ref_refresh_map >> i) & 1;
} if (count > level_spec->max_ref_frame_buffers) {
level_spec->max_ref_frame_buffers = count;
}
}
if (level_index >= 0 && level_constraint->fail_flag == 0) { if (level_spec->max_luma_picture_size >
vp9_level_defs[level_index].max_luma_picture_size) {
level_constraint->fail_flag |= (1 << LUMA_PIC_SIZE_TOO_LARGE);
vpx_internal_error(&cm->error, VPX_CODEC_ERROR, "Failed to encode to the target level %d. %s",
vp9_level_defs[level_index].level,
level_fail_messages[LUMA_PIC_SIZE_TOO_LARGE]);
}
if (level_spec->max_luma_picture_breadth >
vp9_level_defs[level_index].max_luma_picture_breadth) {
level_constraint->fail_flag |= (1 << LUMA_PIC_BREADTH_TOO_LARGE);
vpx_internal_error(&cm->error, VPX_CODEC_ERROR, "Failed to encode to the target level %d. %s",
vp9_level_defs[level_index].level,
level_fail_messages[LUMA_PIC_BREADTH_TOO_LARGE]);
}
if ((double)level_spec->max_luma_sample_rate >
(double)vp9_level_defs[level_index].max_luma_sample_rate *
(1 + SAMPLE_RATE_GRACE_P)) {
level_constraint->fail_flag |= (1 << LUMA_SAMPLE_RATE_TOO_LARGE);
vpx_internal_error(&cm->error, VPX_CODEC_ERROR, "Failed to encode to the target level %d. %s",
vp9_level_defs[level_index].level,
level_fail_messages[LUMA_SAMPLE_RATE_TOO_LARGE]);
}
if (level_spec->max_col_tiles > vp9_level_defs[level_index].max_col_tiles) {
level_constraint->fail_flag |= (1 << TOO_MANY_COLUMN_TILE);
vpx_internal_error(&cm->error, VPX_CODEC_ERROR, "Failed to encode to the target level %d. %s",
vp9_level_defs[level_index].level,
level_fail_messages[TOO_MANY_COLUMN_TILE]);
}
if (level_spec->min_altref_distance <
vp9_level_defs[level_index].min_altref_distance) {
level_constraint->fail_flag |= (1 << ALTREF_DIST_TOO_SMALL);
vpx_internal_error(&cm->error, VPX_CODEC_ERROR, "Failed to encode to the target level %d. %s",
vp9_level_defs[level_index].level,
level_fail_messages[ALTREF_DIST_TOO_SMALL]);
}
if (level_spec->max_ref_frame_buffers >
vp9_level_defs[level_index].max_ref_frame_buffers) {
level_constraint->fail_flag |= (1 << TOO_MANY_REF_BUFFER);
vpx_internal_error(&cm->error, VPX_CODEC_ERROR, "Failed to encode to the target level %d. %s",
vp9_level_defs[level_index].level,
level_fail_messages[TOO_MANY_REF_BUFFER]);
}
if (level_spec->max_cpb_size > vp9_level_defs[level_index].max_cpb_size) {
level_constraint->fail_flag |= (1 << CPB_TOO_LARGE);
vpx_internal_error(&cm->error, VPX_CODEC_ERROR, "Failed to encode to the target level %d. %s",
vp9_level_defs[level_index].level,
level_fail_messages[CPB_TOO_LARGE]);
}
// Set an upper bound for the next frame size. It will be used in // level_rc_framerate() before encoding the next frame.
cpb_data_size = 0; for (i = 0; i < CPB_WINDOW_SIZE - 1; ++i) { if (i >= level_stats->frame_window_buffer.len) break;
idx = (level_stats->frame_window_buffer.start +
level_stats->frame_window_buffer.len - 1 - i) %
FRAME_WINDOW_SIZE;
cpb_data_size += level_stats->frame_window_buffer.buf[idx].size;
}
cpb_data_size = cpb_data_size / 125.0;
level_constraint->max_frame_size =
(int)((vp9_level_defs[level_index].max_cpb_size - cpb_data_size) *
1000.0); if (level_stats->frame_window_buffer.len < CPB_WINDOW_SIZE - 1)
level_constraint->max_frame_size >>= 1;
}
}
void vp9_get_ref_frame_info(FRAME_UPDATE_TYPE update_type, int ref_frame_flags,
RefCntBuffer *ref_frame_bufs[MAX_INTER_REF_FRAMES], int *ref_frame_coding_indexes, int *ref_frame_valid_list) { if (update_type != KF_UPDATE) { const VP9_REFFRAME inter_ref_flags[MAX_INTER_REF_FRAMES] = { VP9_LAST_FLAG,
VP9_GOLD_FLAG,
VP9_ALT_FLAG }; int i; for (i = 0; i < MAX_INTER_REF_FRAMES; ++i) {
assert(ref_frame_bufs[i] != NULL);
ref_frame_coding_indexes[i] = ref_frame_bufs[i]->frame_coding_index;
ref_frame_valid_list[i] = (ref_frame_flags & inter_ref_flags[i]) != 0;
}
} else { // No reference frame is available when this is a key frame. int i; for (i = 0; i < MAX_INTER_REF_FRAMES; ++i) {
ref_frame_coding_indexes[i] = -1;
ref_frame_valid_list[i] = 0;
}
}
}
// Returns if TPL stats need to be calculated. staticINLINEint should_run_tpl(VP9_COMP *cpi, int gf_group_index) {
RATE_CONTROL *const rc = &cpi->rc; if (!cpi->sf.enable_tpl_model) return 0; // If there is an ARF for this GOP, TPL stats is always calculated. if (gf_group_index == 1 &&
cpi->twopass.gf_group.update_type[gf_group_index] == ARF_UPDATE) return 1; // If this GOP doesn't have an ARF, TPL stats is still calculated, only when // external rate control is used. if (cpi->ext_ratectrl.ready &&
cpi->ext_ratectrl.funcs.send_tpl_gop_stats != NULL &&
rc->frames_till_gf_update_due == rc->baseline_gf_interval &&
cpi->twopass.gf_group.update_type[1] != ARF_UPDATE) { return 1;
} return 0;
}
// Is multi-arf enabled. // Note that at the moment multi_arf is only configured for 2 pass VBR and // will not work properly with svc. // Enable the Jingning's new "multi_layer_arf" code if "enable_auto_arf" // is greater than or equal to 2. if ((oxcf->pass == 2) && !cpi->use_svc && (cpi->oxcf.enable_auto_arf >= 2))
cpi->multi_layer_arf = 1; else
cpi->multi_layer_arf = 0;
// Should we encode an arf frame.
arf_src_index = get_arf_src_index(cpi);
if (arf_src_index) { for (i = 0; i <= arf_src_index; ++i) { struct lookahead_entry *e = vp9_lookahead_peek(cpi->lookahead, i); // Avoid creating an alt-ref if there's a forced keyframe pending. if (e == NULL) { break;
} elseif (e->flags == VPX_EFLAG_FORCE_KF) {
arf_src_index = 0;
flush = 1; break;
}
}
}
// Clear arf index stack before group of pictures processing starts. if (gf_group_index == 1) {
stack_init(cpi->twopass.gf_group.arf_index_stack, MAX_LAG_BUFFERS * 2);
cpi->twopass.gf_group.stack_size = 0;
}
if (arf_src_index) { if (!(cpi->ext_ratectrl.ready &&
(cpi->ext_ratectrl.funcs.rc_type & VPX_RC_GOP) != 0 &&
cpi->ext_ratectrl.funcs.get_gop_decision != NULL)) { // This assert only makes sense when not using external RC.
assert(arf_src_index <= rc->frames_to_key);
} if ((source = vp9_lookahead_peek(cpi->lookahead, arf_src_index)) != NULL) {
cpi->alt_ref_source = source;
#if !CONFIG_REALTIME_ONLY if ((oxcf->mode != REALTIME) && (oxcf->arnr_max_frames > 0) &&
(oxcf->arnr_strength > 0)) { int bitrate = cpi->rc.avg_frame_bandwidth / 40; int not_low_bitrate = bitrate > ALT_REF_AQ_LOW_BITRATE_BOUNDARY;
#if CONFIG_COLLECT_COMPONENT_TIMING
start_timing(cpi, vp9_temporal_filter_time); #endif // Produce the filtered ARF frame.
vp9_temporal_filter(cpi, arf_src_index);
vpx_extend_frame_borders(&cpi->tf_buffer); #if CONFIG_COLLECT_COMPONENT_TIMING
end_timing(cpi, vp9_temporal_filter_time); #endif
// for small bitrates segmentation overhead usually // eats all bitrate gain from enabling delta quantizers if (cpi->oxcf.alt_ref_aq != 0 && not_low_bitrate && not_last_frame)
vp9_alt_ref_aq_setup_mode(cpi->alt_ref_aq, cpi);
if (!source) { // Get last frame source. if (cm->current_video_frame > 0) { if ((last_source = vp9_lookahead_peek(cpi->lookahead, -1)) == NULL) return -1;
}
// Read in the source frame. if (cpi->use_svc || cpi->svc.set_intra_only_frame)
source = vp9_svc_lookahead_pop(cpi, cpi->lookahead, flush); else
source = vp9_lookahead_pop(cpi->lookahead, flush);
if (source != NULL) {
cm->show_frame = 1;
cm->intra_only = 0; // If the flags indicate intra frame, but if the current picture is for // spatial layer above first_spatial_layer_to_encode, it should not be an // intra picture. if ((source->flags & VPX_EFLAG_FORCE_KF) && cpi->use_svc &&
cpi->svc.spatial_layer_id > cpi->svc.first_spatial_layer_to_encode) {
source->flags &= ~(unsignedint)(VPX_EFLAG_FORCE_KF);
}
// Check to see if the frame should be encoded as an arf overlay.
check_src_altref(cpi, source);
}
}
#ifdef ENABLE_KF_DENOISE // Copy of raw source for metrics calculation. if (is_psnr_calc_enabled(cpi))
vp9_copy_and_extend_frame(cpi->Source, &cpi->raw_unscaled_source); #endif
// Clear down mmx registers
vpx_clear_system_state();
// adjust frame rates based on timestamps given if (cm->show_frame) { if (cpi->use_svc && cpi->svc.use_set_ref_frame_config &&
cpi->svc.duration[cpi->svc.spatial_layer_id] > 0)
vp9_svc_adjust_frame_rate(cpi); else
adjust_frame_rate(cpi, source);
}
if (is_one_pass_svc(cpi)) {
vp9_update_temporal_layer_framerate(cpi);
vp9_restore_layer_context(cpi);
}
// Find a free buffer for the new frame, releasing the reference previously // held. if (cm->new_fb_idx != INVALID_IDX) {
--pool->frame_bufs[cm->new_fb_idx].ref_count;
}
cm->new_fb_idx = get_free_fb(cm);
if (cm->new_fb_idx == INVALID_IDX) return -1;
cm->cur_frame = &pool->frame_bufs[cm->new_fb_idx]; // If the frame buffer for current frame is the same as previous frame, MV in // the base layer shouldn't be used as it'll cause data race. if (cpi->svc.spatial_layer_id > 0 && cm->cur_frame == cm->prev_frame) {
cpi->svc.use_base_mv = 0;
} // Start with a 0 size frame.
*size = 0;
{ double y, u, v, frame_all;
frame_all = vpx_calc_fastssim(cpi->Source, cm->frame_to_show, &y, &u,
&v, bit_depth, in_bit_depth);
adjust_image_stat(y, u, v, frame_all, &cpi->fastssim);
}
{ double y, u, v, frame_all;
frame_all = vpx_psnrhvs(cpi->Source, cm->frame_to_show, &y, &u, &v,
bit_depth, in_bit_depth);
adjust_image_stat(y, u, v, frame_all, &cpi->psnrhvs);
}
}
}
#endif
#if CONFIG_COLLECT_COMPONENT_TIMING if (oxcf->pass == 2) end_timing(cpi, vp9_get_compressed_data_time);
// Print out timing information. // Note: Use "cpi->frame_component_time[0] > 100 us" to avoid showing of // show_existing_frame and lag-in-frames. // if (cpi->frame_component_time[0] > 100) if (oxcf->pass == 2) {
uint64_t frame_total = 0, total = 0; int i;
fprintf(stderr, "\n Frame number: %d, Frame type: %s, Show Frame: %d, Q: %d\n",
cm->current_video_frame, get_frame_type_enum(cm->frame_type),
cm->show_frame, cm->base_qindex); for (i = 0; i < kTimingComponents; i++) {
cpi->component_time[i] += cpi->frame_component_time[i]; // Use vp9_get_compressed_data_time (i = 0) as the total time. if (i == 0) {
frame_total = cpi->frame_component_time[0];
total = cpi->component_time[0];
}
fprintf(stderr, " %50s: %15" PRId64 " us [%6.2f%%] (total: %15" PRId64 " us [%6.2f%%])\n",
get_component_name(i), cpi->frame_component_time[i],
(float)((float)cpi->frame_component_time[i] * 100.0 /
(float)frame_total),
cpi->component_time[i],
(float)((float)cpi->component_time[i] * 100.0 / (float)total));
cpi->frame_component_time[i] = 0;
}
} #endif
if (is_one_pass_svc(cpi)) { if (cm->show_frame) {
++cpi->svc.spatial_layer_to_encode; if (cpi->svc.spatial_layer_to_encode >= cpi->svc.number_spatial_layers)
cpi->svc.spatial_layer_to_encode = 0;
}
}
// In realtime mode, enable row based multi-threading for all the speed levels // where non-rd path is used. if (cpi->oxcf.mode == REALTIME && cpi->oxcf.speed >= 5 && cpi->oxcf.row_mt) {
cpi->row_mt = 1;
}
if (cpi->row_mt)
cpi->row_mt_bit_exact = 1; else
cpi->row_mt_bit_exact = 0;
}
Die Informationen auf dieser Webseite wurden
nach bestem Wissen sorgfältig zusammengestellt. Es wird jedoch weder Vollständigkeit, noch Richtigkeit,
noch Qualität der bereit gestellten Informationen zugesichert.
Bemerkung:
Die farbliche Syntaxdarstellung und die Messung sind noch experimentell.