/** * DOC: SLPC - Dynamic Frequency management * * Single Loop Power Control (SLPC) is a GuC algorithm that manages * GT frequency based on busyness and how KMD initializes it. SLPC is * almost completely in control after initialization except for a few * scenarios mentioned below. * * KMD uses the concept of waitboost to ramp frequency to RP0 when there * are pending submissions for a context. It achieves this by sending GuC a * request to update the min frequency to RP0. Waitboost is disabled * when the request retires. * * Another form of frequency control happens through per-context hints. * A context can be marked as low latency during creation. That will ensure * that SLPC uses an aggressive frequency ramp when that context is active. * * Power profiles add another level of control to these mechanisms. * When power saving profile is chosen, SLPC will use conservative * thresholds to ramp frequency, thus saving power. KMD will disable * waitboosts as well, which achieves further power savings. Base profile * is default and ensures balanced performance for any workload. * * Lastly, users have some level of control through sysfs, where min/max * frequency values can be altered and the use of efficient freq * can be toggled.
*/
staticvoid slpc_mem_set_param(struct slpc_shared_data *data,
u32 id, u32 value)
{
GEM_BUG_ON(id >= SLPC_MAX_OVERRIDE_PARAMETERS); /* * When the flag bit is set, corresponding value will be read * and applied by SLPC.
*/
data->override_params.bits[id >> 5] |= (1 << (id % 32));
data->override_params.values[id] = value;
}
staticvoid slpc_mem_set_enabled(struct slpc_shared_data *data,
u8 enable_id, u8 disable_id)
{ /* * Enabling a param involves setting the enable_id * to 1 and disable_id to 0.
*/
slpc_mem_set_param(data, enable_id, 1);
slpc_mem_set_param(data, disable_id, 0);
}
staticvoid slpc_mem_set_disabled(struct slpc_shared_data *data,
u8 enable_id, u8 disable_id)
{ /* * Disabling a param involves setting the enable_id * to 0 and disable_id to 1.
*/
slpc_mem_set_param(data, disable_id, 1);
slpc_mem_set_param(data, enable_id, 0);
}
/* * This function is a little different as compared to * intel_guc_slpc_set_min_freq(). Softlimit will not be updated * here since this is used to temporarily change min freq, * for example, during a waitboost. Caller is responsible for * checking bounds.
*/
with_intel_runtime_pm(&i915->runtime_pm, wakeref) { /* Non-blocking request will avoid stalls */
ret = slpc_set_param_nb(slpc,
SLPC_PARAM_GLOBAL_MIN_GT_UNSLICE_FREQ_MHZ,
freq); if (ret)
guc_notice(guc, "Failed to send set_param for min freq(%d): %pe\n",
freq, ERR_PTR(ret));
}
/* * Raise min freq to boost. It's possible that * this is greater than current max. But it will * certainly be limited by RP0. An error setting * the min param is not fatal.
*/
mutex_lock(&slpc->lock); if (atomic_read(&slpc->num_waiters)) {
err = slpc_force_min_freq(slpc, slpc->boost_freq); if (!err)
slpc->num_boosts++;
}
mutex_unlock(&slpc->lock);
}
int intel_guc_slpc_init(struct intel_guc_slpc *slpc)
{ struct intel_guc *guc = slpc_to_guc(slpc);
u32 size = PAGE_ALIGN(sizeof(struct slpc_shared_data)); int err;
if (!ret) { if (wait_for(slpc_is_running(slpc), SLPC_RESET_TIMEOUT_MS)) {
guc_probe_error(guc, "SLPC not enabled! State = %s\n",
slpc_get_state_string(slpc)); return -EIO;
}
}
/* * Don't allow balancer related algorithms on platforms before * Xe_LPG, where GuC started to restrict it to TDP limited scenarios.
*/ if (GRAPHICS_VER_FULL(i915) < IP_VER(12, 70)) {
slpc_mem_set_disabled(data, SLPC_PARAM_TASK_ENABLE_BALANCER,
SLPC_PARAM_TASK_DISABLE_BALANCER);
/** * intel_guc_slpc_set_max_freq() - Set max frequency limit for SLPC. * @slpc: pointer to intel_guc_slpc. * @val: frequency (MHz) * * This function will invoke GuC SLPC action to update the max frequency * limit for unslice. * * Return: 0 on success, non-zero error code on failure.
*/ int intel_guc_slpc_set_max_freq(struct intel_guc_slpc *slpc, u32 val)
{ struct drm_i915_private *i915 = slpc_to_i915(slpc);
intel_wakeref_t wakeref; int ret;
if (val < slpc->min_freq ||
val > slpc->rp0_freq ||
val < slpc->min_freq_softlimit) return -EINVAL;
with_intel_runtime_pm(&i915->runtime_pm, wakeref) {
ret = slpc_set_param(slpc,
SLPC_PARAM_GLOBAL_MAX_GT_UNSLICE_FREQ_MHZ,
val);
/* Return standardized err code for sysfs calls */ if (ret)
ret = -EIO;
}
if (!ret)
slpc->max_freq_softlimit = val;
return ret;
}
/** * intel_guc_slpc_get_max_freq() - Get max frequency limit for SLPC. * @slpc: pointer to intel_guc_slpc. * @val: pointer to val which will hold max frequency (MHz) * * This function will invoke GuC SLPC action to read the max frequency * limit for unslice. * * Return: 0 on success, non-zero error code on failure.
*/ int intel_guc_slpc_get_max_freq(struct intel_guc_slpc *slpc, u32 *val)
{ struct drm_i915_private *i915 = slpc_to_i915(slpc);
intel_wakeref_t wakeref; int ret = 0;
with_intel_runtime_pm(&i915->runtime_pm, wakeref) { /* Force GuC to update task data */
ret = slpc_query_task_state(slpc);
if (!ret)
*val = slpc_decode_max_freq(slpc);
}
return ret;
}
int intel_guc_slpc_set_ignore_eff_freq(struct intel_guc_slpc *slpc, bool val)
{ struct drm_i915_private *i915 = slpc_to_i915(slpc);
intel_wakeref_t wakeref; int ret;
/** * intel_guc_slpc_set_min_freq() - Set min frequency limit for SLPC. * @slpc: pointer to intel_guc_slpc. * @val: frequency (MHz) * * This function will invoke GuC SLPC action to update the min unslice * frequency. * * Return: 0 on success, non-zero error code on failure.
*/ int intel_guc_slpc_set_min_freq(struct intel_guc_slpc *slpc, u32 val)
{ struct drm_i915_private *i915 = slpc_to_i915(slpc);
intel_wakeref_t wakeref; int ret;
if (val < slpc->min_freq ||
val > slpc->rp0_freq ||
val > slpc->max_freq_softlimit) return -EINVAL;
/* Need a lock now since waitboost can be modifying min as well */
mutex_lock(&slpc->lock);
wakeref = intel_runtime_pm_get(&i915->runtime_pm);
ret = slpc_set_param(slpc,
SLPC_PARAM_GLOBAL_MIN_GT_UNSLICE_FREQ_MHZ,
val);
/* Return standardized err code for sysfs calls */ if (ret)
ret = -EIO;
return ret;
}
/** * intel_guc_slpc_get_min_freq() - Get min frequency limit for SLPC. * @slpc: pointer to intel_guc_slpc. * @val: pointer to val which will hold min frequency (MHz) * * This function will invoke GuC SLPC action to read the min frequency * limit for unslice. * * Return: 0 on success, non-zero error code on failure.
*/ int intel_guc_slpc_get_min_freq(struct intel_guc_slpc *slpc, u32 *val)
{ struct drm_i915_private *i915 = slpc_to_i915(slpc);
intel_wakeref_t wakeref; int ret = 0;
with_intel_runtime_pm(&i915->runtime_pm, wakeref) { /* Force GuC to update task data */
ret = slpc_query_task_state(slpc);
if (!ret)
*val = slpc_decode_min_freq(slpc);
}
return ret;
}
int intel_guc_slpc_set_strategy(struct intel_guc_slpc *slpc, u32 val)
{ struct drm_i915_private *i915 = slpc_to_i915(slpc);
intel_wakeref_t wakeref; int ret = 0;
with_intel_runtime_pm(&i915->runtime_pm, wakeref)
ret = slpc_set_param(slpc,
SLPC_PARAM_STRATEGIES,
val);
return ret;
}
int intel_guc_slpc_set_media_ratio_mode(struct intel_guc_slpc *slpc, u32 val)
{ struct drm_i915_private *i915 = slpc_to_i915(slpc);
intel_wakeref_t wakeref; int ret = 0;
if (!HAS_MEDIA_RATIO_MODE(i915)) return -ENODEV;
with_intel_runtime_pm(&i915->runtime_pm, wakeref)
ret = slpc_set_param(slpc,
SLPC_PARAM_MEDIA_FF_RATIO_MODE,
val); return ret;
}
int intel_guc_slpc_set_power_profile(struct intel_guc_slpc *slpc, u32 val)
{ struct drm_i915_private *i915 = slpc_to_i915(slpc);
intel_wakeref_t wakeref; int ret = 0;
if (val > SLPC_POWER_PROFILES_POWER_SAVING) return -EINVAL;
ret = slpc_set_param(slpc,
SLPC_PARAM_POWER_PROFILE,
val); if (ret)
guc_err(slpc_to_guc(slpc), "Failed to set power profile to %d: %pe\n",
val, ERR_PTR(ret)); else
slpc->power_profile = val;
/* * Allow GuC to receive ARAT timer expiry event. * This interrupt register is setup by RPS code * when host based Turbo is enabled.
*/
pm_intrmsk_mbz |= ARAT_EXPIRED_INTRMSK;
staticint slpc_set_softlimits(struct intel_guc_slpc *slpc)
{ int ret = 0;
/* * Softlimits are initially equivalent to platform limits * unless they have deviated from defaults, in which case, * we retain the values and set min/max accordingly.
*/ if (!slpc->max_freq_softlimit) {
slpc->max_freq_softlimit = slpc->rp0_freq;
slpc_to_gt(slpc)->defaults.max_freq = slpc->max_freq_softlimit;
} elseif (slpc->max_freq_softlimit != slpc->rp0_freq) {
ret = intel_guc_slpc_set_max_freq(slpc,
slpc->max_freq_softlimit);
}
if (unlikely(ret)) return ret;
if (!slpc->min_freq_softlimit) { /* Min softlimit is initialized to RPn */
slpc->min_freq_softlimit = slpc->min_freq;
slpc_to_gt(slpc)->defaults.min_freq = slpc->min_freq_softlimit;
} else { return intel_guc_slpc_set_min_freq(slpc,
slpc->min_freq_softlimit);
}
return 0;
}
staticbool is_slpc_min_freq_rpmax(struct intel_guc_slpc *slpc)
{ int slpc_min_freq; int ret;
ret = intel_guc_slpc_get_min_freq(slpc, &slpc_min_freq); if (ret) {
guc_err(slpc_to_guc(slpc), "Failed to get min freq: %pe\n", ERR_PTR(ret)); returnfalse;
}
if (slpc_min_freq == SLPC_MAX_FREQ_MHZ) returntrue; else returnfalse;
}
staticvoid update_server_min_softlimit(struct intel_guc_slpc *slpc)
{ /* For server parts, SLPC min will be at RPMax. * Use min softlimit to clamp it to RP0 instead.
*/ if (!slpc->min_freq_softlimit &&
is_slpc_min_freq_rpmax(slpc)) {
slpc->min_is_rpmax = true;
slpc->min_freq_softlimit = slpc->rp0_freq;
(slpc_to_gt(slpc))->defaults.min_freq = slpc->min_freq_softlimit;
}
}
staticint slpc_use_fused_rp0(struct intel_guc_slpc *slpc)
{ /* Force SLPC to used platform rp0 */ return slpc_set_param(slpc,
SLPC_PARAM_GLOBAL_MAX_GT_UNSLICE_FREQ_MHZ,
slpc->rp0_freq);
}
if (!slpc->boost_freq)
slpc->boost_freq = slpc->rp0_freq;
}
/* * intel_guc_slpc_enable() - Start SLPC * @slpc: pointer to intel_guc_slpc. * * SLPC is enabled by setting up the shared data structure and * sending reset event to GuC SLPC. Initial data is setup in * intel_guc_slpc_init. Here we send the reset event. We do * not currently need a slpc_disable since this is taken care * of automatically when a reset/suspend occurs and the GuC * CTB is destroyed. * * Return: 0 on success, non-zero error code on failure.
*/ int intel_guc_slpc_enable(struct intel_guc_slpc *slpc)
{ struct intel_guc *guc = slpc_to_guc(slpc); int ret;
GEM_BUG_ON(!slpc->vma);
slpc_shared_data_reset(slpc);
ret = slpc_reset(slpc); if (unlikely(ret < 0)) {
guc_probe_error(guc, "SLPC Reset event returned: %pe\n", ERR_PTR(ret)); return ret;
}
ret = slpc_query_task_state(slpc); if (unlikely(ret < 0)) return ret;
intel_guc_pm_intrmsk_enable(slpc_to_gt(slpc));
slpc_get_rp_values(slpc);
/* Handle the case where min=max=RPmax */
update_server_min_softlimit(slpc);
/* Set SLPC max limit to RP0 */
ret = slpc_use_fused_rp0(slpc); if (unlikely(ret)) {
guc_probe_error(guc, "Failed to set SLPC max to RP0: %pe\n", ERR_PTR(ret)); return ret;
}
/* Set cached value of ignore efficient freq */
intel_guc_slpc_set_ignore_eff_freq(slpc, slpc->ignore_eff_freq);
/* Revert SLPC min/max to softlimits if necessary */
ret = slpc_set_softlimits(slpc); if (unlikely(ret)) {
guc_probe_error(guc, "Failed to set SLPC softlimits: %pe\n", ERR_PTR(ret)); return ret;
}
/* Set cached media freq ratio mode */
intel_guc_slpc_set_media_ratio_mode(slpc, slpc->media_ratio_mode);
/* Enable SLPC Optimized Strategy for compute */
intel_guc_slpc_set_strategy(slpc, SLPC_OPTIMIZED_STRATEGY_COMPUTE);
/* Set cached value of power_profile */
ret = intel_guc_slpc_set_power_profile(slpc, slpc->power_profile); if (unlikely(ret)) {
guc_probe_error(guc, "Failed to set SLPC power profile: %pe\n", ERR_PTR(ret)); return ret;
}
return 0;
}
int intel_guc_slpc_set_boost_freq(struct intel_guc_slpc *slpc, u32 val)
{ int ret = 0;
if (val < slpc->min_freq || val > slpc->rp0_freq) return -EINVAL;
mutex_lock(&slpc->lock);
if (slpc->boost_freq != val) { /* Apply only if there are active waiters */ if (atomic_read(&slpc->num_waiters)) {
ret = slpc_force_min_freq(slpc, val); if (ret) {
ret = -EIO; goto done;
}
}
slpc->boost_freq = val;
}
done:
mutex_unlock(&slpc->lock); return ret;
}
void intel_guc_slpc_dec_waiters(struct intel_guc_slpc *slpc)
{ /* * Return min back to the softlimit. * This is called during request retire, * so we don't need to fail that if the * set_param fails.
*/
mutex_lock(&slpc->lock); if (atomic_dec_and_test(&slpc->num_waiters))
slpc_force_min_freq(slpc, slpc->min_freq_softlimit);
mutex_unlock(&slpc->lock);
}
int intel_guc_slpc_print_info(struct intel_guc_slpc *slpc, struct drm_printer *p)
{ struct drm_i915_private *i915 = slpc_to_i915(slpc); struct slpc_shared_data *data = slpc->vaddr; struct slpc_task_state_data *slpc_tasks;
intel_wakeref_t wakeref; int ret = 0;
GEM_BUG_ON(!slpc->vma);
with_intel_runtime_pm(&i915->runtime_pm, wakeref) {
ret = slpc_query_task_state(slpc);
Die Informationen auf dieser Webseite wurden
nach bestem Wissen sorgfältig zusammengestellt. Es wird jedoch weder Vollständigkeit, noch Richtigkeit,
noch Qualität der bereit gestellten Informationen zugesichert.
Bemerkung:
Die farbliche Syntaxdarstellung und die Messung sind noch experimentell.