/* Limit qopt->hw to maximum supported offload value. Drivers have * the option of overriding this later if they don't support the a * given offload type.
*/ if (qopt->hw > TC_MQPRIO_HW_OFFLOAD_MAX)
qopt->hw = TC_MQPRIO_HW_OFFLOAD_MAX;
/* If hardware offload is requested, we will leave 3 options to the * device driver: * - populate the queue counts itself (and ignore what was requested) * - validate the provided queue counts by itself (and apply them) * - request queue count validation here (and apply them)
*/
err = mqprio_validate_qopt(dev, qopt,
!qopt->hw || caps->validate_queue_counts, false, extack); if (err) return err;
/* If ndo_setup_tc is not present then hardware doesn't support offload * and we should return an error.
*/ if (qopt->hw && !dev->netdev_ops->ndo_setup_tc) {
NL_SET_ERR_MSG(extack, "Device does not support hardware offload"); return -EINVAL;
}
nla_for_each_attr_type(n, TCA_MQPRIO_TC_ENTRY, nlattr_opt,
nlattr_opt_len, rem) {
err = mqprio_parse_tc_entry(fp, n, &seen_tcs, extack); if (err) goto out;
}
for (tc = 0; tc < TC_QOPT_MAX_QUEUE; tc++) {
priv->fp[tc] = fp[tc]; if (fp[tc] == TC_FP_PREEMPTIBLE)
have_preemption = true;
}
if (have_preemption && !ethtool_dev_mm_supported(dev)) {
NL_SET_ERR_MSG(extack, "Device does not support preemption"); return -EOPNOTSUPP;
}
out: return err;
}
/* Parse the other netlink attributes that represent the payload of * TCA_OPTIONS, which are appended right after struct tc_mqprio_qopt.
*/ staticint mqprio_parse_nlattr(struct Qdisc *sch, struct tc_mqprio_qopt *qopt, struct nlattr *opt, struct netlink_ext_ack *extack)
{ struct nlattr *nlattr_opt = nla_data(opt) + NLA_ALIGN(sizeof(*qopt)); int nlattr_opt_len = nla_len(opt) - NLA_ALIGN(sizeof(*qopt)); struct mqprio_sched *priv = qdisc_priv(sch); struct nlattr *tb[TCA_MQPRIO_MAX + 1] = {}; struct nlattr *attr; int i, rem, err;
if (nlattr_opt_len >= nla_attr_size(0)) {
err = nla_parse_deprecated(tb, TCA_MQPRIO_MAX, nlattr_opt,
nlattr_opt_len, mqprio_policy,
NULL); if (err < 0) return err;
}
if (!qopt->hw) {
NL_SET_ERR_MSG(extack, "mqprio TCA_OPTIONS can only contain netlink attributes in hardware mode"); return -EINVAL;
}
if (tb[TCA_MQPRIO_MODE]) {
priv->flags |= TC_MQPRIO_F_MODE;
priv->mode = nla_get_u16(tb[TCA_MQPRIO_MODE]);
}
if (tb[TCA_MQPRIO_SHAPER]) {
priv->flags |= TC_MQPRIO_F_SHAPER;
priv->shaper = nla_get_u16(tb[TCA_MQPRIO_SHAPER]);
}
if (tb[TCA_MQPRIO_MIN_RATE64]) { if (priv->shaper != TC_MQPRIO_SHAPER_BW_RATE) {
NL_SET_ERR_MSG_ATTR(extack, tb[TCA_MQPRIO_MIN_RATE64], "min_rate accepted only when shaper is in bw_rlimit mode"); return -EINVAL;
}
i = 0;
nla_for_each_nested(attr, tb[TCA_MQPRIO_MIN_RATE64],
rem) { if (nla_type(attr) != TCA_MQPRIO_MIN_RATE64) {
NL_SET_ERR_MSG_ATTR(extack, attr, "Attribute type expected to be TCA_MQPRIO_MIN_RATE64"); return -EINVAL;
}
if (nla_len(attr) != sizeof(u64)) {
NL_SET_ERR_MSG_ATTR(extack, attr, "Attribute TCA_MQPRIO_MIN_RATE64 expected to have 8 bytes length"); return -EINVAL;
}
if (i >= qopt->num_tc) break;
priv->min_rate[i] = nla_get_u64(attr);
i++;
}
priv->flags |= TC_MQPRIO_F_MIN_RATE;
}
if (tb[TCA_MQPRIO_MAX_RATE64]) { if (priv->shaper != TC_MQPRIO_SHAPER_BW_RATE) {
NL_SET_ERR_MSG_ATTR(extack, tb[TCA_MQPRIO_MAX_RATE64], "max_rate accepted only when shaper is in bw_rlimit mode"); return -EINVAL;
}
i = 0;
nla_for_each_nested(attr, tb[TCA_MQPRIO_MAX_RATE64],
rem) { if (nla_type(attr) != TCA_MQPRIO_MAX_RATE64) {
NL_SET_ERR_MSG_ATTR(extack, attr, "Attribute type expected to be TCA_MQPRIO_MAX_RATE64"); return -EINVAL;
}
if (nla_len(attr) != sizeof(u64)) {
NL_SET_ERR_MSG_ATTR(extack, attr, "Attribute TCA_MQPRIO_MAX_RATE64 expected to have 8 bytes length"); return -EINVAL;
}
if (i >= qopt->num_tc) break;
priv->max_rate[i] = nla_get_u64(attr);
i++;
}
priv->flags |= TC_MQPRIO_F_MAX_RATE;
}
if (tb[TCA_MQPRIO_TC_ENTRY]) {
err = mqprio_parse_tc_entries(sch, nlattr_opt, nlattr_opt_len,
extack); if (err) return err;
}
/* If the mqprio options indicate that hardware should own * the queue mapping then run ndo_setup_tc otherwise use the * supplied and verified mapping
*/ if (qopt->hw) {
err = mqprio_enable_offload(sch, qopt, extack); if (err) return err;
} else {
netdev_set_num_tc(dev, qopt->num_tc); for (i = 0; i < qopt->num_tc; i++)
netdev_set_tc_queue(dev, i,
qopt->count[i], qopt->offset[i]);
}
/* Always use supplied priority mappings */ for (i = 0; i < TC_BITMASK + 1; i++)
netdev_set_prio_tc_map(dev, i, qopt->prio_tc_map[i]);
if (priv->flags & TC_MQPRIO_F_MIN_RATE) {
nest = nla_nest_start_noflag(skb, TCA_MQPRIO_MIN_RATE64); if (!nest) goto nla_put_failure;
for (i = 0; i < opt->num_tc; i++) { if (nla_put(skb, TCA_MQPRIO_MIN_RATE64, sizeof(priv->min_rate[i]),
&priv->min_rate[i])) goto nla_put_failure;
}
nla_nest_end(skb, nest);
}
if (priv->flags & TC_MQPRIO_F_MAX_RATE) {
nest = nla_nest_start_noflag(skb, TCA_MQPRIO_MAX_RATE64); if (!nest) goto nla_put_failure;
for (i = 0; i < opt->num_tc; i++) { if (nla_put(skb, TCA_MQPRIO_MAX_RATE64, sizeof(priv->max_rate[i]),
&priv->max_rate[i])) goto nla_put_failure;
}
nla_nest_end(skb, nest);
} return 0;
/* MQ supports lockless qdiscs. However, statistics accounting needs * to account for all, none, or a mix of locked and unlocked child * qdiscs. Percpu stats are added to counters in-band and locking * qdisc totals are added at end.
*/ for (ntx = 0; ntx < dev->num_tx_queues; ntx++) {
qdisc = rtnl_dereference(netdev_get_tx_queue(dev, ntx)->qdisc_sleeping);
spin_lock_bh(qdisc_lock(qdisc));
/* There are essentially two regions here that have valid classid * values. The first region will have a classid value of 1 through * num_tx_queues. All of these are backed by actual Qdiscs.
*/ if (ntx < TC_H_MIN_PRIORITY) return (ntx <= dev->num_tx_queues) ? ntx : 0;
/* The second region represents the hardware traffic classes. These * are represented by classid values of TC_H_MIN_PRIORITY through * TC_H_MIN_PRIORITY + netdev_get_num_tc - 1
*/ return ((ntx - TC_H_MIN_PRIORITY) < netdev_get_num_tc(dev)) ? ntx : 0;
}
gnet_stats_basic_sync_init(&bstats); /* Drop lock here it will be reclaimed before touching * statistics this is required because the d->lock we * hold here is the look on dev_queue->qdisc_sleeping * also acquired below.
*/ if (d->lock)
spin_unlock_bh(d->lock);
for (i = tc.offset; i < tc.offset + tc.count; i++) { struct netdev_queue *q = netdev_get_tx_queue(dev, i); struct Qdisc *qdisc = rtnl_dereference(q->qdisc);
/* Walk hierarchy with a virtual class per tc */
arg->count = arg->skip; for (ntx = arg->skip; ntx < netdev_get_num_tc(dev); ntx++) { if (!tc_qdisc_stats_dump(sch, ntx + TC_H_MIN_PRIORITY, arg)) return;
}
/* Pad the values and skip over unused traffic classes */ if (ntx < TC_MAX_QUEUE) {
arg->count = TC_MAX_QUEUE;
ntx = TC_MAX_QUEUE;
}
Die Informationen auf dieser Webseite wurden
nach bestem Wissen sorgfältig zusammengestellt. Es wird jedoch weder Vollständigkeit, noch Richtigkeit,
noch Qualität der bereit gestellten Informationen zugesichert.
Bemerkung:
Die farbliche Syntaxdarstellung und die Messung sind noch experimentell.