/* If queue count is 0, then the traffic * belonging to this class will not use * ETHOFLD queues. So, no need to validate * further.
*/ if (!mqprio->qopt.count[i]) break;
if (!mqprio->qopt.count[j]) continue;
if (max_t(u32, start_a, start_b) <=
min_t(u32, end_a, end_b)) {
netdev_err(dev, "Queues can't overlap across tc\n"); return -EINVAL;
}
}
/* Convert byte per second to bits per second */
min_rate += (mqprio->min_rate[i] * 8);
max_rate += (mqprio->max_rate[i] * 8);
}
if (qoffset >= adap->tids.neotids || qcount > adap->tids.neotids) return -ENOMEM;
/* Return if no ETHOFLD structures have been allocated yet */ if (!refcount_read(&adap->tc_mqprio->refcnt)) return;
/* Return if no hardware queues have been allocated */ if (!adap->sge.eohw_rxq[pi->first_qset].rspq.desc) return;
for (i = 0; i < pi->nqsets; i++) {
eorxq = &adap->sge.eohw_rxq[pi->first_qset + i];
eotxq = &adap->sge.eohw_txq[pi->first_qset + i];
/* Device removal path will already disable NAPI * before unregistering netdevice. So, only disable * NAPI if we're not in device removal path
*/ if (!(adap->flags & CXGB4_SHUTTING_DOWN))
cxgb4_quiesce_rx(&eorxq->rspq);
/* Free up ETHOFLD structures if there are no users */ if (refcount_dec_and_test(&adap->tc_mqprio->refcnt)) {
kfree(adap->sge.eohw_txq);
kfree(adap->sge.eohw_rxq);
}
}
tc_port_mqprio = &adap->tc_mqprio->port_mqprio[pi->port_id];
p.u.params.channel = pi->tx_chan; for (i = 0; i < mqprio->qopt.num_tc; i++) { /* Convert from bytes per second to Kbps */
p.u.params.minrate = div_u64(mqprio->min_rate[i] * 8, 1000);
p.u.params.maxrate = div_u64(mqprio->max_rate[i] * 8, 1000);
/* Request larger burst buffer for smaller MTU, so * that hardware can work on more data per burst * cycle.
*/ if (dev->mtu <= ETH_DATA_LEN)
p.u.params.burstsize = 8 * dev->mtu;
e = cxgb4_sched_class_alloc(dev, &p); if (!e) {
ret = -ENOMEM; goto out_err;
}
tc_port_mqprio->tc_hwtc_map[i] = e->idx;
}
return 0;
out_err: while (i--)
cxgb4_sched_class_free(dev, tc_port_mqprio->tc_hwtc_map[i]);
tc_port_mqprio = &adap->tc_mqprio->port_mqprio[pi->port_id]; for (i = 0; i < tc_port_mqprio->mqprio.qopt.num_tc; i++)
cxgb4_sched_class_free(dev, tc_port_mqprio->tc_hwtc_map[i]);
}
/* If we're shutting down, interrupts are disabled and no completions * come back. So, skip waiting for completions in this scenario.
*/ if (!(adap->flags & CXGB4_SHUTTING_DOWN))
init_completion(&eosw_txq->completion);
/* Inform the stack about the configured tc params. * * Set the correct queue map. If no queue count has been * specified, then send the traffic through default NIC * queues; instead of ETHOFLD queues.
*/
ret = netdev_set_num_tc(dev, mqprio->qopt.num_tc); if (ret) goto out_free_eotids;
tot_qcount = pi->nqsets; for (i = 0; i < mqprio->qopt.num_tc; i++) {
qcount = mqprio->qopt.count[i]; if (qcount) {
qoffset = mqprio->qopt.offset[i] + pi->nqsets;
} else {
qcount = pi->nqsets;
qoffset = 0;
}
ret = netdev_set_tc_queue(dev, i, qcount, qoffset); if (ret) goto out_reset_tc;
tot_qcount += mqprio->qopt.count[i];
}
ret = netif_set_real_num_tx_queues(dev, tot_qcount); if (ret) goto out_reset_tc;
int cxgb4_setup_tc_mqprio(struct net_device *dev, struct tc_mqprio_qopt_offload *mqprio)
{ struct adapter *adap = netdev2adap(dev); bool needs_bring_up = false; int ret;
ret = cxgb4_mqprio_validate(dev, mqprio); if (ret) return ret;
mutex_lock(&adap->tc_mqprio->mqprio_mutex);
/* To configure tc params, the current allocated EOTIDs must * be freed up. However, they can't be freed up if there's * traffic running on the interface. So, ensure interface is * down before configuring tc params.
*/ if (netif_running(dev)) {
netif_tx_stop_all_queues(dev);
netif_carrier_off(dev);
needs_bring_up = true;
}
cxgb4_mqprio_disable_offload(dev);
/* If requested for clear, then just return since resources are * already freed up by now.
*/ if (!mqprio->qopt.num_tc) goto out;
/* Allocate free available traffic classes and configure * their rate parameters.
*/
ret = cxgb4_mqprio_alloc_tc(dev, mqprio); if (ret) goto out;
ret = cxgb4_mqprio_enable_offload(dev, mqprio); if (ret) {
cxgb4_mqprio_free_tc(dev); goto out;
}
out: if (needs_bring_up) {
netif_tx_start_all_queues(dev);
netif_carrier_on(dev);
}
if (adap->tc_mqprio) {
mutex_lock(&adap->tc_mqprio->mqprio_mutex); if (adap->tc_mqprio->port_mqprio) { for (i = 0; i < adap->params.nports; i++) { struct net_device *dev = adap->port[i];
Die Informationen auf dieser Webseite wurden
nach bestem Wissen sorgfältig zusammengestellt. Es wird jedoch weder Vollständigkeit, noch Richtigkeit,
noch Qualität der bereit gestellten Informationen zugesichert.
Bemerkung:
Die farbliche Syntaxdarstellung und die Messung sind noch experimentell.