/* * Copyright 2015 Advanced Micro Devices, Inc. * * Permission is hereby granted, free of charge, to any person obtaining a * copy of this software and associated documentation files (the "Software"), * to deal in the Software without restriction, including without limitation * the rights to use, copy, modify, merge, publish, distribute, sublicense, * and/or sell copies of the Software, and to permit persons to whom the * Software is furnished to do so, subject to the following conditions: * * The above copyright notice and this permission notice shall be included in * all copies or substantial portions of the Software. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR * OTHER DEALINGS IN THE SOFTWARE. * * Authors: AMD
*/
/** * DOC: Overview * * DC is the OS-agnostic component of the amdgpu DC driver. * * DC maintains and validates a set of structs representing the state of the * driver and writes that state to AMD hardware * * Main DC HW structs: * * struct dc - The central struct. One per driver. Created on driver load, * destroyed on driver unload. * * struct dc_context - One per driver. * Used as a backpointer by most other structs in dc. * * struct dc_link - One per connector (the physical DP, HDMI, miniDP, or eDP * plugpoints). Created on driver load, destroyed on driver unload. * * struct dc_sink - One per display. Created on boot or hotplug. * Destroyed on shutdown or hotunplug. A dc_link can have a local sink * (the display directly attached). It may also have one or more remote * sinks (in the Multi-Stream Transport case) * * struct resource_pool - One per driver. Represents the hw blocks not in the * main pipeline. Not directly accessible by dm. * * Main dc state structs: * * These structs can be created and destroyed as needed. There is a full set of * these structs in dc->current_state representing the currently programmed state. * * struct dc_state - The global DC state to track global state information, * such as bandwidth values. * * struct dc_stream_state - Represents the hw configuration for the pipeline from * a framebuffer to a display. Maps one-to-one with dc_sink. * * struct dc_plane_state - Represents a framebuffer. Each stream has at least one, * and may have more in the Multi-Plane Overlay case. * * struct resource_context - Represents the programmable state of everything in * the resource_pool. Not directly accessible by dm. * * struct pipe_ctx - A member of struct resource_context. Represents the * internal hardware pipeline components. Each dc_plane_state has either * one or two (in the pipe-split case).
*/
/* Private functions */
staticinlinevoid elevate_update_type(enum surface_update_type *original, enum surface_update_type new)
{ if (new > *original)
*original = new;
}
staticvoid destroy_links(struct dc *dc)
{
uint32_t i;
for (i = 0; i < dc->link_count; i++) { if (NULL != dc->links[i])
dc->link_srv->destroy_link(&dc->links[i]);
}
}
/* When getting the number of connectors, the VBIOS reports the number of valid indices, * but it doesn't say which indices are valid, and not every index has an actual connector. * So, if we don't find a connector on an index, that is not an error. * * - There is no guarantee that the first N indices will be valid * - VBIOS may report a higher amount of valid indices than there are actual connectors * - Some VBIOS have valid configurations for more connectors than there actually are * on the card. This may be because the manufacturer used the same VBIOS for different * variants of the same card.
*/ for (i = 0; dc->link_count < connectors_num && i < MAX_LINKS; i++) { struct graphics_object_id connector_id = bios->funcs->get_connector_id(bios, i); struct link_init_data link_init_params = {0}; struct dc_link *link;
if (connector_id.id == CONNECTOR_ID_UNKNOWN) continue;
DC_LOG_DC("BIOS object table - printing link object info for connector number: %d, link_index: %d", i, dc->link_count);
/* Create a link for each usb4 dpia port */
dc->lowest_dpia_link_index = MAX_LINKS; for (i = 0; i < dc->res_pool->usb4_dpia_count; i++) { struct link_init_data link_init_params = {0}; struct dc_link *link;
link = dc->link_srv->create_link(&link_init_params); if (link) { if (dc->lowest_dpia_link_index > dc->link_count)
dc->lowest_dpia_link_index = dc->link_count;
/* Create additional DIG link encoder objects if fewer than the platform * supports were created during link construction. This can happen if the * number of physical connectors is less than the number of DIGs.
*/ staticbool create_link_encoders(struct dc *dc)
{ bool res = true; unsignedint num_usb4_dpia = dc->res_pool->res_cap->num_usb4_dpia; unsignedint num_dig_link_enc = dc->res_pool->res_cap->num_dig_link_enc; int i;
/* A platform without USB4 DPIA endpoints has a fixed mapping between DIG * link encoders and physical display endpoints and does not require * additional link encoder objects.
*/ if (num_usb4_dpia == 0) return res;
/* Create as many link encoder objects as the platform supports. DPIA * endpoints can be programmably mapped to any DIG.
*/ if (num_dig_link_enc > dc->res_pool->dig_link_enc_count) { for (i = 0; i < num_dig_link_enc; i++) { struct link_encoder *link_enc = dc->res_pool->link_encoders[i];
if (!link_enc && dc->res_pool->funcs->link_enc_create_minimal) {
link_enc = dc->res_pool->funcs->link_enc_create_minimal(dc->ctx,
(enum engine_id)(ENGINE_ID_DIGA + i)); if (link_enc) {
dc->res_pool->link_encoders[i] = link_enc;
dc->res_pool->dig_link_enc_count++;
} else {
res = false;
}
}
}
}
return res;
}
/* Destroy any additional DIG link encoder objects created by * create_link_encoders(). * NB: Must only be called after destroy_links().
*/ staticvoid destroy_link_encoders(struct dc *dc)
{ unsignedint num_usb4_dpia; unsignedint num_dig_link_enc; int i;
/* A platform without USB4 DPIA endpoints has a fixed mapping between DIG * link encoders and physical display endpoints and does not require * additional link encoder objects.
*/ if (num_usb4_dpia == 0) return;
for (i = 0; i < num_dig_link_enc; i++) { struct link_encoder *link_enc = dc->res_pool->link_encoders[i];
staticbool set_long_vtotal(struct dc *dc, struct dc_stream_state *stream, struct dc_crtc_timing_adjust *adjust)
{ if (!dc || !stream || !adjust) returnfalse;
if (!dc->current_state) returnfalse;
int i;
for (i = 0; i < MAX_PIPES; i++) { struct pipe_ctx *pipe = &dc->current_state->res_ctx.pipe_ctx[i];
if (pipe->stream == stream && pipe->stream_res.tg) { if (dc->hwss.set_long_vtotal)
dc->hwss.set_long_vtotal(&pipe, 1, adjust->v_total_min, adjust->v_total_max);
returntrue;
}
}
returnfalse;
}
/** * dc_stream_adjust_vmin_vmax - look up pipe context & update parts of DRR * @dc: dc reference * @stream: Initial dc stream state * @adjust: Updated parameters for vertical_total_min and vertical_total_max * * Looks up the pipe context of dc_stream_state and updates the * vertical_total_min and vertical_total_max of the DRR, Dynamic Refresh * Rate, which is a power-saving feature that targets reducing panel * refresh rate while the screen is static * * Return: %true if the pipe context is found and adjusted; * %false if the pipe context is not found.
*/ bool dc_stream_adjust_vmin_vmax(struct dc *dc, struct dc_stream_state *stream, struct dc_crtc_timing_adjust *adjust)
{ int i;
/* * Don't adjust DRR while there's bandwidth optimizations pending to * avoid conflicting with firmware updates.
*/ if (dc->ctx->dce_version > DCE_VERSION_MAX) { if ((dc->optimized_required || dc->wm_optimized_required) &&
(stream->adjust.v_total_max != adjust->v_total_max ||
stream->adjust.v_total_min != adjust->v_total_min)) {
stream->adjust.timing_adjust_pending = true; returnfalse;
}
}
/** * dc_stream_get_last_used_drr_vtotal - Looks up the pipe context of * dc_stream_state and gets the last VTOTAL used by DRR (Dynamic Refresh Rate) * * @dc: [in] dc reference * @stream: [in] Initial dc stream state * @refresh_rate: [in] new refresh_rate * * Return: %true if the pipe context is found and there is an associated * timing_generator for the DC; * %false if the pipe context is not found or there is no * timing_generator for the DC.
*/ bool dc_stream_get_last_used_drr_vtotal(struct dc *dc, struct dc_stream_state *stream,
uint32_t *refresh_rate)
{ bool status = false;
int i = 0;
dc_exit_ips_for_hw_access(dc);
for (i = 0; i < MAX_PIPES; i++) { struct pipe_ctx *pipe = &dc->current_state->res_ctx.pipe_ctx[i];
if (pipe->stream == stream && pipe->stream_res.tg) { /* Only execute if a function pointer has been defined for * the DC version in question
*/ if (pipe->stream_res.tg->funcs->get_last_used_drr_vtotal) {
pipe->stream_res.tg->funcs->get_last_used_drr_vtotal(pipe->stream_res.tg, refresh_rate);
status = true;
break;
}
}
}
return status;
}
#ifdefined(CONFIG_DRM_AMD_SECURE_DISPLAY) staticinlinevoid
dc_stream_forward_dmub_crc_window(struct dc_dmub_srv *dmub_srv, struct rect *rect, struct otg_phy_mux *mux_mapping, bool is_stop)
{ union dmub_rb_cmd cmd = {0};
/** * dc_stream_configure_crc() - Configure CRC capture for the given stream. * @dc: DC Object * @stream: The stream to configure CRC on. * @crc_window: CRC window (x/y start/end) information * @enable: Enable CRC if true, disable otherwise. * @continuous: Capture CRC on every frame if true. Otherwise, only capture * once. * @idx: Capture CRC on which CRC engine instance * @reset: Reset CRC engine before the configuration * * By default, the entire frame is used to calculate the CRC. * * Return: %false if the stream is not found or CRC capture is not supported; * %true if the stream has been configured.
*/ bool dc_stream_configure_crc(struct dc *dc, struct dc_stream_state *stream, struct crc_params *crc_window, bool enable, bool continuous,
uint8_t idx, bool reset)
{ struct pipe_ctx *pipe; struct crc_params param; struct timing_generator *tg;
/* Default to the union of both windows */
param.selection = UNION_WINDOW_A_B;
param.continuous_mode = continuous;
param.enable = enable;
param.crc_eng_inst = idx;
param.reset = reset;
tg = pipe->stream_res.tg;
/* Only call if supported */ if (tg->funcs->configure_crc) return tg->funcs->configure_crc(tg, ¶m);
DC_LOG_WARNING("CRC capture not supported."); returnfalse;
}
/** * dc_stream_get_crc() - Get CRC values for the given stream. * * @dc: DC object. * @stream: The DC stream state of the stream to get CRCs from. * @idx: index of crc engine to get CRC from * @r_cr: CRC value for the red component. * @g_y: CRC value for the green component. * @b_cb: CRC value for the blue component. * * dc_stream_configure_crc needs to be called beforehand to enable CRCs. * * Return: * %false if stream is not found, or if CRCs are not enabled.
*/ bool dc_stream_get_crc(struct dc *dc, struct dc_stream_state *stream, uint8_t idx,
uint32_t *r_cr, uint32_t *g_y, uint32_t *b_cb)
{ int i; struct pipe_ctx *pipe; struct timing_generator *tg;
dc_exit_ips_for_hw_access(dc);
for (i = 0; i < MAX_PIPES; i++) {
pipe = &dc->current_state->res_ctx.pipe_ctx[i]; if (pipe->stream == stream) break;
} /* Stream not found */ if (i == MAX_PIPES) returnfalse;
tg = pipe->stream_res.tg;
if (tg->funcs->get_crc) return tg->funcs->get_crc(tg, idx, r_cr, g_y, b_cb);
DC_LOG_WARNING("CRC capture not supported."); returnfalse;
}
void dc_stream_set_dyn_expansion(struct dc *dc, struct dc_stream_state *stream, enum dc_dynamic_expansion option)
{ /* OPP FMT dyn expansion updates*/ int i; struct pipe_ctx *pipe_ctx;
dc_exit_ips_for_hw_access(dc);
for (i = 0; i < MAX_PIPES; i++) { if (dc->current_state->res_ctx.pipe_ctx[i].stream
== stream) {
pipe_ctx = &dc->current_state->res_ctx.pipe_ctx[i];
pipe_ctx->stream_res.opp->dyn_expansion = option;
pipe_ctx->stream_res.opp->funcs->opp_set_dyn_expansion(
pipe_ctx->stream_res.opp,
COLOR_SPACE_YCBCR601,
stream->timing.display_color_depth,
stream->signal);
}
}
}
for (i = 0; i < MAX_PIPES; i++) { if (link->dc->current_state->res_ctx.pipe_ctx[i].stream ==
stream) {
pipes = &link->dc->current_state->res_ctx.pipe_ctx[i]; break;
}
}
if (!pipes) return; if (option > DITHER_OPTION_MAX) return;
bool dc_stream_set_gamut_remap(struct dc *dc, conststruct dc_stream_state *stream)
{ int i; bool ret = false; struct pipe_ctx *pipes;
dc_exit_ips_for_hw_access(dc);
for (i = 0; i < MAX_PIPES; i++) { if (dc->current_state->res_ctx.pipe_ctx[i].stream == stream) {
pipes = &dc->current_state->res_ctx.pipe_ctx[i];
dc->hwss.program_gamut_remap(pipes);
ret = true;
}
}
return ret;
}
bool dc_stream_program_csc_matrix(struct dc *dc, struct dc_stream_state *stream)
{ int i; bool ret = false; struct pipe_ctx *pipes;
dc_exit_ips_for_hw_access(dc);
for (i = 0; i < MAX_PIPES; i++) { if (dc->current_state->res_ctx.pipe_ctx[i].stream
== stream) {
void dc_stream_set_static_screen_params(struct dc *dc, struct dc_stream_state **streams, int num_streams, conststruct dc_static_screen_params *params)
{ int i, j; struct pipe_ctx *pipes_affected[MAX_PIPES]; int num_pipes_affected = 0;
dc_exit_ips_for_hw_access(dc);
for (i = 0; i < num_streams; i++) { struct dc_stream_state *stream = streams[i];
for (j = 0; j < MAX_PIPES; j++) { if (dc->current_state->res_ctx.pipe_ctx[j].stream
== stream) {
pipes_affected[num_pipes_affected++] =
&dc->current_state->res_ctx.pipe_ctx[j];
}
}
}
dc_dceip = kzalloc(sizeof(*dc_dceip), GFP_KERNEL); if (!dc_dceip) {
dm_error("%s: failed to create dceip\n", __func__); goto fail;
}
dc->bw_dceip = dc_dceip;
dc_vbios = kzalloc(sizeof(*dc_vbios), GFP_KERNEL); if (!dc_vbios) {
dm_error("%s: failed to create vbios\n", __func__); goto fail;
}
dc->bw_vbios = dc_vbios;
dcn_soc = kzalloc(sizeof(*dcn_soc), GFP_KERNEL); if (!dcn_soc) {
dm_error("%s: failed to create dcn_soc\n", __func__); goto fail;
}
dc->dcn_soc = dcn_soc;
dcn_ip = kzalloc(sizeof(*dcn_ip), GFP_KERNEL); if (!dcn_ip) {
dm_error("%s: failed to create dcn_ip\n", __func__); goto fail;
}
dc->dcn_ip = dcn_ip;
if (init_params->bb_from_dmub)
dc->dml2_options.bb_from_dmub = init_params->bb_from_dmub; else
dc->dml2_options.bb_from_dmub = NULL;
if (!dc_construct_ctx(dc, init_params)) {
dm_error("%s: failed to create ctx\n", __func__); goto fail;
}
dc_ctx = dc->ctx;
/* Resource should construct all asic specific resources. * This should be the only place where we need to parse the asic id
*/ if (init_params->vbios_override)
dc_ctx->dc_bios = init_params->vbios_override; else { /* Create BIOS parser */ struct bp_init_data bp_init_data;
if (!dc_ctx->gpio_service) {
ASSERT_CRITICAL(false); goto fail;
}
dc->res_pool = dc_create_resource_pool(dc, init_params, dc_ctx->dce_version); if (!dc->res_pool) goto fail;
/* set i2c speed if not done by the respective dcnxxx__resource.c */ if (dc->caps.i2c_speed_in_khz_hdcp == 0)
dc->caps.i2c_speed_in_khz_hdcp = dc->caps.i2c_speed_in_khz; if (dc->caps.max_optimizable_video_width == 0)
dc->caps.max_optimizable_video_width = 5120;
dc->clk_mgr = dc_clk_mgr_create(dc->ctx, dc->res_pool->pp_smu, dc->res_pool->dccg); if (!dc->clk_mgr) goto fail; #ifdef CONFIG_DRM_AMD_DC_FP
dc->clk_mgr->force_smu_not_present = init_params->force_smu_not_present;
if (dc->res_pool->funcs->update_bw_bounding_box) {
DC_FP_START();
dc->res_pool->funcs->update_bw_bounding_box(dc, dc->clk_mgr->bw_params);
DC_FP_END();
} #endif
if (!create_links(dc, init_params->num_virtual_links)) goto fail;
/* Create additional DIG link encoder objects if fewer than the platform * supports were created during link construction.
*/ if (!create_link_encoders(dc)) goto fail;
/* Creation of current_state must occur after dc->dml * is initialized in dc_create_resource_pool because * on creation it copies the contents of dc->dml
*/
dc->current_state = dc_state_create(dc, NULL);
if (!dc->current_state) {
dm_error("%s: failed to create validate ctx\n", __func__); goto fail;
}
returntrue;
fail: returnfalse;
}
staticvoid disable_all_writeback_pipes_for_stream( conststruct dc *dc, struct dc_stream_state *stream, struct dc_state *context)
{ int i;
for (i = 0; i < stream->num_wb_info; i++)
stream->writeback_info[i].wb_enabled = false;
}
staticvoid apply_ctx_interdependent_lock(struct dc *dc, struct dc_state *context, struct dc_stream_state *stream, bool lock)
{ int i;
/* Checks if interdependent update function pointer is NULL or not, takes care of DCE110 case */ if (dc->hwss.interdependent_update_lock)
dc->hwss.interdependent_update_lock(dc, context, lock); else { for (i = 0; i < dc->res_pool->pipe_count; i++) { struct pipe_ctx *pipe_ctx = &context->res_ctx.pipe_ctx[i]; struct pipe_ctx *old_pipe_ctx = &dc->current_state->res_ctx.pipe_ctx[i];
// Copied conditions that were previously in dce110_apply_ctx_for_surface if (stream == pipe_ctx->stream) { if (resource_is_pipe_type(pipe_ctx, OPP_HEAD) &&
(pipe_ctx->plane_state || old_pipe_ctx->plane_state))
dc->hwss.pipe_control_lock(dc, pipe_ctx, lock);
}
}
}
}
switch (dc->debug.visual_confirm) { case VISUAL_CONFIRM_DISABLE: return; case VISUAL_CONFIRM_PSR: case VISUAL_CONFIRM_FAMS:
pipe_ctx = dc_stream_get_pipe_ctx(stream_state); if (!pipe_ctx) return;
dc_dmub_srv_get_visual_confirm_color_cmd(dc, pipe_ctx);
memcpy(color, &dc->ctx->dmub_srv->dmub->visual_confirm_color, sizeof(struct tg_color)); return;
default: /* find plane with highest layer_index */ for (i = 0; i < stream_status->plane_count; i++) { if (stream_status->plane_states[i]->visible)
plane_state = stream_status->plane_states[i];
} if (!plane_state) return; /* find pipe that contains plane with highest layer index */ for (i = 0; i < MAX_PIPES; i++) { struct pipe_ctx *pipe = &dc->current_state->res_ctx.pipe_ctx[i];
if (old_pipe->plane_state && !new_pipe->plane_state)
should_disable = true;
}
if (should_disable && old_stream) { bool is_phantom = dc_state_get_stream_subvp_type(dc->current_state, old_stream) == SUBVP_PHANTOM;
pipe = &dc->current_state->res_ctx.pipe_ctx[i];
tg = pipe->stream_res.tg; /* When disabling plane for a phantom pipe, we must turn on the * phantom OTG so the disable programming gets the double buffer * update. Otherwise the pipe will be left in a partially disabled * state that can result in underflow or hang when enabling it * again for different use.
*/ if (is_phantom) { if (tg->funcs->enable_crtc) { if (dc->hwseq->funcs.blank_pixel_data)
dc->hwseq->funcs.blank_pixel_data(dc, pipe, true);
tg->funcs->enable_crtc(tg);
}
}
if (dc->res_pool->funcs->prepare_mcache_programming)
dc->res_pool->funcs->prepare_mcache_programming(dc, dangling_context); if (dc->hwss.program_front_end_for_ctx) {
dc->hwss.interdependent_update_lock(dc, dc->current_state, true);
dc->hwss.program_front_end_for_ctx(dc, dangling_context);
dc->hwss.interdependent_update_lock(dc, dc->current_state, false);
dc->hwss.post_unlock_program_front_end(dc, dangling_context);
} /* We need to put the phantom OTG back into it's default (disabled) state or we * can get corruption when transition from one SubVP config to a different one. * The OTG is set to disable on falling edge of VUPDATE so the plane disable * will still get it's double buffer update.
*/ if (is_phantom) { if (tg->funcs->disable_phantom_crtc)
tg->funcs->disable_phantom_crtc(tg);
}
}
}
staticvoid enable_timing_multisync( struct dc *dc, struct dc_state *ctx)
{ int i, multisync_count = 0; int pipe_count = dc->res_pool->pipe_count; struct pipe_ctx *multisync_pipes[MAX_PIPES] = { NULL };
for (i = 0; i < pipe_count; i++) { if (!ctx->res_ctx.pipe_ctx[i].stream ||
!ctx->res_ctx.pipe_ctx[i].stream->triggered_crtc_reset.enabled) continue; if (ctx->res_ctx.pipe_ctx[i].stream == ctx->res_ctx.pipe_ctx[i].stream->triggered_crtc_reset.event_source) continue;
multisync_pipes[multisync_count] = &ctx->res_ctx.pipe_ctx[i];
multisync_count++;
}
staticvoid program_timing_sync( struct dc *dc, struct dc_state *ctx)
{ int i, j, k; int group_index = 0; int num_group = 0; int pipe_count = dc->res_pool->pipe_count; struct pipe_ctx *unsynced_pipes[MAX_PIPES] = { NULL };
for (i = 0; i < pipe_count; i++) { if (!ctx->res_ctx.pipe_ctx[i].stream
|| ctx->res_ctx.pipe_ctx[i].top_pipe
|| ctx->res_ctx.pipe_ctx[i].prev_odm_pipe) continue;
unsynced_pipes[i] = &ctx->res_ctx.pipe_ctx[i];
}
for (i = 0; i < pipe_count; i++) { int group_size = 1; enum timing_synchronization_type sync_type = NOT_SYNCHRONIZABLE; struct pipe_ctx *pipe_set[MAX_PIPES];
/* Add tg to the set, search rest of the tg's for ones with * same timing, add all tgs with same timing to the group
*/ for (j = i + 1; j < pipe_count; j++) { if (!unsynced_pipes[j]) continue; if (sync_type != TIMING_SYNCHRONIZABLE &&
dc->hwss.enable_vblanks_synchronization &&
unsynced_pipes[j]->stream_res.tg->funcs->align_vblanks &&
resource_are_vblanks_synchronizable(
unsynced_pipes[j]->stream,
pipe_set[0]->stream)) {
sync_type = VBLANK_SYNCHRONIZABLE;
pipe_set[group_size] = unsynced_pipes[j];
unsynced_pipes[j] = NULL;
group_size++;
} else if (sync_type != VBLANK_SYNCHRONIZABLE &&
resource_are_streams_timing_synchronizable(
unsynced_pipes[j]->stream,
pipe_set[0]->stream)) {
sync_type = TIMING_SYNCHRONIZABLE;
pipe_set[group_size] = unsynced_pipes[j];
unsynced_pipes[j] = NULL;
group_size++;
}
}
/* set first unblanked pipe as master */ for (j = 0; j < group_size; j++) { bool is_blanked;
if (pipe_set[j]->stream_res.opp->funcs->dpg_is_blanked)
is_blanked =
pipe_set[j]->stream_res.opp->funcs->dpg_is_blanked(pipe_set[j]->stream_res.opp); else
is_blanked =
pipe_set[j]->stream_res.tg->funcs->is_blanked(pipe_set[j]->stream_res.tg); if (!is_blanked) { if (j == 0) break;
swap(pipe_set[0], pipe_set[j]); break;
}
}
for (k = 0; k < group_size; k++) { struct dc_stream_status *status = dc_state_get_stream_status(ctx, pipe_set[k]->stream);
/* remove any other unblanked pipes as they have already been synced */ if (dc->config.use_pipe_ctx_sync_logic) { /* check pipe's syncd to decide which pipe to be removed */ for (j = 1; j < group_size; j++) { if (pipe_set[j]->pipe_idx_syncd == pipe_set[0]->pipe_idx_syncd) {
group_size--;
pipe_set[j] = pipe_set[group_size];
j--;
} else /* link slave pipe's syncd with master pipe */
pipe_set[j]->pipe_idx_syncd = pipe_set[0]->pipe_idx_syncd;
}
} else { /* remove any other pipes by checking valid plane */ for (j = j + 1; j < group_size; j++) { bool is_blanked;
if (pipe_set[j]->stream_res.opp->funcs->dpg_is_blanked)
is_blanked =
pipe_set[j]->stream_res.opp->funcs->dpg_is_blanked(pipe_set[j]->stream_res.opp); else
is_blanked =
pipe_set[j]->stream_res.tg->funcs->is_blanked(pipe_set[j]->stream_res.tg); if (!is_blanked) {
group_size--;
pipe_set[j] = pipe_set[group_size];
j--;
}
}
}
if (stream_count != dc->current_state->stream_count) returntrue;
for (i = 0; i < dc->current_state->stream_count; i++) { if (dc->current_state->streams[i] != streams[i]) returntrue; if (!streams[i]->link->link_state_valid) returntrue;
}
/* Support seamless boot on EDP displays only */ if (sink->sink_signal != SIGNAL_TYPE_EDP) { returnfalse;
}
if (dc->debug.force_odm_combine) {
DC_LOG_DEBUG("boot timing validation failed due to force_odm_combine\n"); returnfalse;
}
/* Check for enabled DIG to identify enabled display */ if (!link->link_enc->funcs->is_dig_enabled(link->link_enc)) {
DC_LOG_DEBUG("boot timing validation failed due to disabled DIG\n"); returnfalse;
}
// tg_inst not found if (i == dc->res_pool->stream_enc_count) {
DC_LOG_DEBUG("boot timing validation failed due to timing generator instance not found\n"); returnfalse;
}
if (tg_inst >= dc->res_pool->timing_generator_count) {
DC_LOG_DEBUG("boot timing validation failed due to invalid timing generator count\n"); returnfalse;
}
if (tg_inst != link->link_enc->preferred_engine) {
DC_LOG_DEBUG("boot timing validation failed due to non-preferred timing generator\n"); returnfalse;
}
tg = dc->res_pool->timing_generators[tg_inst];
if (!tg->funcs->get_hw_timing) {
DC_LOG_DEBUG("boot timing validation failed due to missing get_hw_timing callback\n"); returnfalse;
}
if (!tg->funcs->get_hw_timing(tg, &hw_crtc_timing)) {
DC_LOG_DEBUG("boot timing validation failed due to failed get_hw_timing return\n"); returnfalse;
}
if (crtc_timing->h_total != hw_crtc_timing.h_total) {
DC_LOG_DEBUG("boot timing validation failed due to h_total mismatch\n"); returnfalse;
}
if (crtc_timing->h_border_left != hw_crtc_timing.h_border_left) {
DC_LOG_DEBUG("boot timing validation failed due to h_border_left mismatch\n"); returnfalse;
}
if (crtc_timing->h_addressable != hw_crtc_timing.h_addressable) {
DC_LOG_DEBUG("boot timing validation failed due to h_addressable mismatch\n"); returnfalse;
}
if (crtc_timing->h_border_right != hw_crtc_timing.h_border_right) {
DC_LOG_DEBUG("boot timing validation failed due to h_border_right mismatch\n"); returnfalse;
}
if (crtc_timing->h_front_porch != hw_crtc_timing.h_front_porch) {
DC_LOG_DEBUG("boot timing validation failed due to h_front_porch mismatch\n"); returnfalse;
}
if (crtc_timing->h_sync_width != hw_crtc_timing.h_sync_width) {
DC_LOG_DEBUG("boot timing validation failed due to h_sync_width mismatch\n"); returnfalse;
}
if (crtc_timing->v_total != hw_crtc_timing.v_total) {
DC_LOG_DEBUG("boot timing validation failed due to v_total mismatch\n"); returnfalse;
}
if (crtc_timing->v_border_top != hw_crtc_timing.v_border_top) {
DC_LOG_DEBUG("boot timing validation failed due to v_border_top mismatch\n"); returnfalse;
}
if (crtc_timing->v_addressable != hw_crtc_timing.v_addressable) {
DC_LOG_DEBUG("boot timing validation failed due to v_addressable mismatch\n"); returnfalse;
}
if (crtc_timing->v_border_bottom != hw_crtc_timing.v_border_bottom) {
DC_LOG_DEBUG("boot timing validation failed due to v_border_bottom mismatch\n"); returnfalse;
}
if (crtc_timing->v_front_porch != hw_crtc_timing.v_front_porch) {
DC_LOG_DEBUG("boot timing validation failed due to v_front_porch mismatch\n"); returnfalse;
}
if (crtc_timing->v_sync_width != hw_crtc_timing.v_sync_width) {
DC_LOG_DEBUG("boot timing validation failed due to v_sync_width mismatch\n"); returnfalse;
}
/* block DSC for now, as VBIOS does not currently support DSC timings */ if (crtc_timing->flags.DSC) {
DC_LOG_DEBUG("boot timing validation failed due to DSC\n"); returnfalse;
}
if (pixels_per_cycle != 1 && !dc->debug.enable_dp_dig_pixel_rate_div_policy) {
DC_LOG_DEBUG("boot timing validation failed due to pixels_per_cycle\n"); returnfalse;
}
pix_clk_100hz *= pixels_per_cycle;
}
// Note: In rare cases, HW pixclk may differ from crtc's pixclk // slightly due to rounding issues in 10 kHz units. if (crtc_timing->pix_clk_100hz != pix_clk_100hz) {
DC_LOG_DEBUG("boot timing validation failed due to pix_clk_100hz mismatch\n"); returnfalse;
}
if (!se || !se->funcs->dp_get_pixel_format) {
DC_LOG_DEBUG("boot timing validation failed due to missing dp_get_pixel_format\n"); returnfalse;
}
if (!se->funcs->dp_get_pixel_format(
se,
&hw_crtc_timing.pixel_encoding,
&hw_crtc_timing.display_color_depth)) {
DC_LOG_DEBUG("boot timing validation failed due to dp_get_pixel_format failure\n"); returnfalse;
}
if (hw_crtc_timing.display_color_depth != crtc_timing->display_color_depth) {
DC_LOG_DEBUG("boot timing validation failed due to display_color_depth mismatch\n"); returnfalse;
}
if (hw_crtc_timing.pixel_encoding != crtc_timing->pixel_encoding) {
DC_LOG_DEBUG("boot timing validation failed due to pixel_encoding mismatch\n"); returnfalse;
}
}
if (link->dpcd_caps.dprx_feature.bits.VSC_SDP_COLORIMETRY_SUPPORTED) {
DC_LOG_DEBUG("boot timing validation failed due to VSC SDP colorimetry\n"); returnfalse;
}
if (link->dpcd_caps.channel_coding_cap.bits.DP_128b_132b_SUPPORTED) {
DC_LOG_DEBUG("boot timing validation failed due to DP 128b/132b\n"); returnfalse;
}
if (dc->link_srv->edp_is_ilr_optimization_required(link, crtc_timing)) {
DC_LOG_EVENT_LINK_TRAINING("Seamless boot disabled to optimize eDP link rate\n"); returnfalse;
}
static uint8_t get_stream_mask(struct dc *dc, struct dc_state *context)
{ int i; unsignedint stream_mask = 0;
for (i = 0; i < dc->res_pool->pipe_count; i++) { if (context->res_ctx.pipe_ctx[i].stream)
stream_mask |= 1 << i;
}
return stream_mask;
}
void dc_z10_restore(conststruct dc *dc)
{ if (dc->hwss.z10_restore)
dc->hwss.z10_restore(dc);
}
void dc_z10_save_init(struct dc *dc)
{ if (dc->hwss.z10_save_init)
dc->hwss.z10_save_init(dc);
}
/* Set a pipe unlock order based on the change in DET allocation and stores it in dc scratch memory * Prevents over allocation of DET during unlock process * e.g. 2 pipe config with different streams with a max of 20 DET segments * Before: After: * - Pipe0: 10 DET segments - Pipe0: 12 DET segments * - Pipe1: 10 DET segments - Pipe1: 8 DET segments * If Pipe0 gets updated first, 22 DET segments will be allocated
*/ staticvoid determine_pipe_unlock_order(struct dc *dc, struct dc_state *context)
{ unsignedint i = 0; struct pipe_ctx *pipe = NULL; struct timing_generator *tg = NULL;
if (!dc->config.set_pipe_unlock_order) return;
memset(dc->scratch.pipes_to_unlock_first, 0, sizeof(dc->scratch.pipes_to_unlock_first)); for (i = 0; i < dc->res_pool->pipe_count; i++) {
pipe = &context->res_ctx.pipe_ctx[i];
tg = pipe->stream_res.tg;
/** * dc_commit_state_no_check - Apply context to the hardware * * @dc: DC object with the current status to be updated * @context: New state that will become the current status at the end of this function * * Applies given context to the hardware and copy it into current context. * It's up to the user to release the src context afterwards. * * Return: an enum dc_status result code for the operation
*/ staticenum dc_status dc_commit_state_no_check(struct dc *dc, struct dc_state *context)
{ struct dc_bios *dcb = dc->ctx->dc_bios; enum dc_status result = DC_ERROR_UNEXPECTED; struct pipe_ctx *pipe; int i, k, l; struct dc_stream_state *dc_streams[MAX_STREAMS] = {0}; struct dc_state *old_state; bool subvp_prev_use = false;
for (i = 0; i < dc->res_pool->pipe_count; i++) { struct pipe_ctx *old_pipe = &dc->current_state->res_ctx.pipe_ctx[i];
/* Check old context for SubVP */
subvp_prev_use |= (dc_state_get_pipe_subvp_type(dc->current_state, old_pipe) == SUBVP_PHANTOM); if (subvp_prev_use) break;
}
for (i = 0; i < context->stream_count; i++)
dc_streams[i] = context->streams[i];
if (!dcb->funcs->is_accelerated_mode(dcb)) {
disable_vbios_mode_if_required(dc, context);
dc->hwss.enable_accelerated_mode(dc, context);
}
if (dc->hwseq->funcs.wait_for_pipe_update_if_needed) { for (i = 0; i < dc->res_pool->pipe_count; i++) {
pipe = &context->res_ctx.pipe_ctx[i]; //Only delay otg master for a given config if (resource_is_pipe_type(pipe, OTG_MASTER)) { //dc_commit_state_no_check is always a full update
dc->hwseq->funcs.wait_for_pipe_update_if_needed(dc, pipe, false); break;
}
}
}
if (context->stream_count > get_seamless_boot_stream_count(context) ||
context->stream_count == 0)
dc->hwss.prepare_bandwidth(dc, context);
/* When SubVP is active, all HW programming must be done while * SubVP lock is acquired
*/ if (dc->hwss.subvp_pipe_control_lock)
dc->hwss.subvp_pipe_control_lock(dc, context, true, true, NULL, subvp_prev_use); if (dc->hwss.fams2_global_control_lock)
dc->hwss.fams2_global_control_lock(dc, context, true);
if (dc->hwss.update_dsc_pg)
dc->hwss.update_dsc_pg(dc, context, false);
disable_dangling_plane(dc, context); /* re-program planes for existing stream, in case we need to * free up plane resource for later use
*/ if (dc->hwss.apply_ctx_for_surface) { for (i = 0; i < context->stream_count; i++) { if (context->streams[i]->mode_changed) continue;
apply_ctx_interdependent_lock(dc, context, context->streams[i], true);
dc->hwss.apply_ctx_for_surface(
dc, context->streams[i],
context->stream_status[i].plane_count,
context); /* use new pipe config in new context */
apply_ctx_interdependent_lock(dc, context, context->streams[i], false);
dc->hwss.post_unlock_program_front_end(dc, context);
}
}
/* Program hardware */ for (i = 0; i < dc->res_pool->pipe_count; i++) {
pipe = &context->res_ctx.pipe_ctx[i];
dc->hwss.wait_for_mpcc_disconnect(dc, dc->res_pool, pipe);
}
result = dc->hwss.apply_ctx_to_hw(dc, context);
if (result != DC_OK) { /* Application of dc_state to hardware stopped. */
dc->current_state->res_ctx.link_enc_cfg_ctx.mode = LINK_ENC_CFG_STEADY; return result;
}
dc_trigger_sync(dc, context);
/* Full update should unconditionally be triggered when dc_commit_state_no_check is called */ for (i = 0; i < context->stream_count; i++) {
uint32_t prev_dsc_changed = context->streams[i]->update_flags.bits.dsc_changed;
determine_pipe_unlock_order(dc, context); /* Program all planes within new context*/ if (dc->res_pool->funcs->prepare_mcache_programming)
dc->res_pool->funcs->prepare_mcache_programming(dc, context); if (dc->hwss.program_front_end_for_ctx) {
dc->hwss.interdependent_update_lock(dc, context, true);
dc->hwss.program_front_end_for_ctx(dc, context);
if (dc->hwseq->funcs.set_wait_for_update_needed_for_pipe) { for (i = 0; i < dc->res_pool->pipe_count; i++) {
pipe = &context->res_ctx.pipe_ctx[i];
dc->hwseq->funcs.set_wait_for_update_needed_for_pipe(dc, pipe);
}
}
/* * enable stereo * TODO rework dc_enable_stereo call to work with validation sets?
*/ for (k = 0; k < MAX_PIPES; k++) {
pipe = &context->res_ctx.pipe_ctx[k];
for (l = 0 ; pipe && l < context->stream_count; l++) { if (context->streams[l] &&
context->streams[l] == pipe->stream &&
dc->hwss.setup_stereo)
dc->hwss.setup_stereo(pipe, dc);
}
}
Die Informationen auf dieser Webseite wurden
nach bestem Wissen sorgfältig zusammengestellt. Es wird jedoch weder Vollständigkeit, noch Richtigkeit,
noch Qualität der bereit gestellten Informationen zugesichert.
Bemerkung:
Die farbliche Syntaxdarstellung und die Messung sind noch experimentell.