for (i = 0; i < HNAE3_MAX_TC; i++) { switch (ets->tc_tsa[i]) { case IEEE_8021QAZ_TSA_STRICT:
hdev->tm_info.tc_info[i].tc_sch_mode =
HCLGE_SCH_MODE_SP;
hdev->tm_info.pg_info[0].tc_dwrr[i] = 0; break; case IEEE_8021QAZ_TSA_ETS:
hdev->tm_info.tc_info[i].tc_sch_mode =
HCLGE_SCH_MODE_DWRR;
hdev->tm_info.pg_info[0].tc_dwrr[i] =
ets->tc_tx_bw[i]; break; default: /* Hardware only supports SP (strict priority) * or ETS (enhanced transmission selection) * algorithms, if we receive some other value * from dcbnl, then throw an error.
*/ return -EINVAL;
}
}
for (i = 0; i < HNAE3_MAX_TC; i++) {
ets->prio_tc[i] = hdev->tm_info.prio_tc[i]; if (i < hdev->tm_info.num_tc)
ets->tc_tx_bw[i] = hdev->tm_info.pg_info[0].tc_dwrr[i]; else
ets->tc_tx_bw[i] = 0;
for (i = 0; i < HNAE3_MAX_TC; i++) { switch (ets->tc_tsa[i]) { case IEEE_8021QAZ_TSA_STRICT: if (hdev->tm_info.tc_info[i].tc_sch_mode !=
HCLGE_SCH_MODE_SP)
*changed = true; break; case IEEE_8021QAZ_TSA_ETS: if (i >= tc_num) {
dev_err(&hdev->pdev->dev, "tc%u is disabled, cannot set ets bw\n",
i); return -EINVAL;
}
/* The hardware will switch to sp mode if bandwidth is * 0, so limit ets bandwidth must be greater than 0.
*/ if (!ets->tc_tx_bw[i]) {
dev_err(&hdev->pdev->dev, "tc%u ets bw cannot be 0\n", i); return -EINVAL;
}
if (hdev->tm_info.tc_info[i].tc_sch_mode !=
HCLGE_SCH_MODE_DWRR)
*changed = true;
ret = hclge_pause_setup_hw(hdev, false); if (ret) return ret;
ret = hclge_notify_client(hdev, HNAE3_DOWN_CLIENT); if (ret) return ret;
ret = hclge_tm_flush_cfg(hdev, true); if (ret) return ret;
/* No matter whether the following operations are performed * successfully or not, disabling the tm flush and notify * the network status to up are necessary. * Do not return immediately.
*/
ret = hclge_buffer_alloc(hdev); if (ret)
last_bad_ret = ret;
ret = hclge_tm_flush_cfg(hdev, false); if (ret)
last_bad_ret = ret;
ret = hclge_notify_client(hdev, HNAE3_UP_CLIENT); if (ret)
last_bad_ret = ret;
h->kinfo.dscp_prio[app->protocol] = app->priority;
ret = hclge_dscp_to_tc_map(hdev); if (ret) {
dev_err(&hdev->pdev->dev, "failed to set dscp to tc map, ret = %d\n", ret);
h->kinfo.dscp_prio[app->protocol] = old_app.priority;
(void)dcb_ieee_delapp(netdev, app); return ret;
}
vport->nic.kinfo.tc_map_mode = HNAE3_TC_MAP_MODE_DSCP; if (old_app.priority == HNAE3_PRIO_ID_INVALID)
h->kinfo.dscp_app_cnt++; else
ret = dcb_ieee_delapp(netdev, &old_app);
ret = dcb_ieee_delapp(netdev, app); if (ret) return ret;
h->kinfo.dscp_prio[app->protocol] = HNAE3_PRIO_ID_INVALID;
ret = hclge_dscp_to_tc_map(hdev); if (ret) {
dev_err(&hdev->pdev->dev, "failed to del dscp to tc map, ret = %d\n", ret);
h->kinfo.dscp_prio[app->protocol] = app->priority;
(void)dcb_ieee_setapp(netdev, app); return ret;
}
if (h->kinfo.dscp_app_cnt)
h->kinfo.dscp_app_cnt--;
if (!h->kinfo.dscp_app_cnt) {
vport->nic.kinfo.tc_map_mode = HNAE3_TC_MAP_MODE_PRIO;
ret = hclge_up_to_tc_map(hdev);
}
/* No support for LLD_MANAGED modes or CEE */ if ((mode & DCB_CAP_DCBX_LLD_MANAGED) ||
(mode & DCB_CAP_DCBX_VER_CEE) ||
!(mode & DCB_CAP_DCBX_HOST)) return 1;
hdev->dcbx_cap = mode;
return 0;
}
staticint hclge_mqprio_qopt_check(struct hclge_dev *hdev, struct tc_mqprio_qopt_offload *mqprio_qopt)
{
u16 queue_sum = 0; int ret; int i;
if (!mqprio_qopt->qopt.num_tc) {
mqprio_qopt->qopt.num_tc = 1; return 0;
}
ret = hclge_dcb_common_validate(hdev, mqprio_qopt->qopt.num_tc,
mqprio_qopt->qopt.prio_tc_map); if (ret) return ret;
for (i = 0; i < mqprio_qopt->qopt.num_tc; i++) { if (!is_power_of_2(mqprio_qopt->qopt.count[i])) {
dev_err(&hdev->pdev->dev, "qopt queue count must be power of 2\n"); return -EINVAL;
}
if (mqprio_qopt->qopt.count[i] > hdev->pf_rss_size_max) {
dev_err(&hdev->pdev->dev, "qopt queue count should be no more than %u\n",
hdev->pf_rss_size_max); return -EINVAL;
}
if (mqprio_qopt->qopt.offset[i] != queue_sum) {
dev_err(&hdev->pdev->dev, "qopt queue offset must start from 0, and being continuous\n"); return -EINVAL;
}
if (mqprio_qopt->min_rate[i] || mqprio_qopt->max_rate[i]) {
dev_err(&hdev->pdev->dev, "qopt tx_rate is not supported\n"); return -EOPNOTSUPP;
}
queue_sum = mqprio_qopt->qopt.offset[i];
queue_sum += mqprio_qopt->qopt.count[i];
} if (hdev->vport[0].alloc_tqps < queue_sum) {
dev_err(&hdev->pdev->dev, "qopt queue count sum should be less than %u\n",
hdev->vport[0].alloc_tqps); return -EINVAL;
}
staticint hclge_config_tc(struct hclge_dev *hdev, struct hnae3_tc_info *tc_info)
{ int i;
hclge_tm_schd_info_update(hdev, tc_info->num_tc); for (i = 0; i < HNAE3_MAX_USER_PRIO; i++)
hdev->tm_info.prio_tc[i] = tc_info->prio_tc[i];
return hclge_map_update(hdev);
}
/* Set up TC for hardware offloaded mqprio in channel mode */ staticint hclge_setup_tc(struct hnae3_handle *h, struct tc_mqprio_qopt_offload *mqprio_qopt)
{ struct hclge_vport *vport = hclge_get_vport(h); struct hnae3_knic_private_info *kinfo; struct hclge_dev *hdev = vport->back; struct hnae3_tc_info old_tc_info;
u8 tc = mqprio_qopt->qopt.num_tc; int ret;
/* if client unregistered, it's not allowed to change * mqprio configuration, which may cause uninit ring * fail.
*/ if (!test_bit(HCLGE_STATE_NIC_REGISTERED, &hdev->state)) return -EBUSY;
kinfo = &vport->nic.kinfo; if (kinfo->tc_info.dcb_ets_active) return -EINVAL;
ret = hclge_mqprio_qopt_check(hdev, mqprio_qopt); if (ret) {
dev_err(&hdev->pdev->dev, "failed to check mqprio qopt params, ret = %d\n", ret); return ret;
}
kinfo->tc_info.mqprio_destroy = !tc;
ret = hclge_notify_down_uinit(hdev); if (ret) return ret;
ret = hclge_config_tc(hdev, &kinfo->tc_info); if (ret) goto err_out;
return hclge_notify_init_up(hdev);
err_out: if (!tc) {
dev_warn(&hdev->pdev->dev, "failed to destroy mqprio, will active after reset, ret = %d\n",
ret);
} else { /* roll-back */
memcpy(&kinfo->tc_info, &old_tc_info, sizeof(old_tc_info)); if (hclge_config_tc(hdev, &kinfo->tc_info))
dev_err(&hdev->pdev->dev, "failed to roll back tc configuration\n");
}
hclge_notify_init_up(hdev);
Die Informationen auf dieser Webseite wurden
nach bestem Wissen sorgfältig zusammengestellt. Es wird jedoch weder Vollständigkeit, noch Richtigkeit,
noch Qualität der bereit gestellten Informationen zugesichert.
Bemerkung:
Die farbliche Syntaxdarstellung und die Messung sind noch experimentell.