// SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB /* Copyright (c) 2020, Mellanox Technologies inc. All rights reserved. */ #include <net/sch_generic.h>
int mlx5e_qos_max_leaf_nodes(struct mlx5_core_dev *mdev)
{ return min(MLX5E_QOS_MAX_LEAF_NODES, mlx5_qos_max_leaf_nodes(mdev));
}
/* TX datapath API */
u16 mlx5e_qid_from_qos(struct mlx5e_channels *chs, u16 qid)
{ /* These channel params are safe to access from the datapath, because: * 1. This function is called only after checking selq->htb_maj_id != 0, * and the number of queues can't change while HTB offload is active. * 2. When selq->htb_maj_id becomes 0, synchronize_rcu waits for * mlx5e_select_queue to finish while holding priv->state_lock, * preventing other code from changing the number of queues.
*/ bool is_ptp = MLX5E_GET_PFLAG(&chs->params, MLX5E_PFLAG_TX_PORT_TS);
WARN_ON(node_qid >= mlx5e_htb_cur_leaf_nodes(priv->htb)); if (!priv->htb_qos_sq_stats) { struct mlx5e_sq_stats **stats_list;
stats_list = kvcalloc(mlx5e_qos_max_leaf_nodes(priv->mdev), sizeof(*stats_list), GFP_KERNEL); if (!stats_list) return -ENOMEM;
WRITE_ONCE(priv->htb_qos_sq_stats, stats_list);
}
if (!priv->htb_qos_sq_stats[node_qid]) { struct mlx5e_sq_stats *stats;
stats = kzalloc(sizeof(*stats), GFP_KERNEL); if (!stats) return -ENOMEM;
WRITE_ONCE(priv->htb_qos_sq_stats[node_qid], stats); /* Order htb_max_qos_sqs increment after writing the array pointer. * Pairs with smp_load_acquire in en_stats.c.
*/
smp_store_release(&priv->htb_max_qos_sqs, priv->htb_max_qos_sqs + 1);
}
ix = node_qid % params->num_channels;
qid = node_qid / params->num_channels;
c = chs->c[ix];
/* If it's a new queue, it will be marked as started at this point. * Stop it before updating txq2sq.
*/
mlx5e_tx_disable_queue(netdev_get_tx_queue(priv->netdev, qid));
/* Make the change to txq2sq visible before the queue is started. * As mlx5e_xmit runs under a spinlock, there is an implicit ACQUIRE, * which pairs with this barrier.
*/
smp_wmb();
/* Make the change to txq2sq visible before the queue is started again. * As mlx5e_xmit runs under a spinlock, there is an implicit ACQUIRE, * which pairs with this barrier.
*/
smp_wmb();
}
ix = qid % params->num_channels;
qid /= params->num_channels;
c = priv->channels.c[ix];
qos_sqs = mlx5e_state_dereference(priv, c->qos_sqs);
sq = rcu_replace_pointer(qos_sqs[qid], NULL, lockdep_is_held(&priv->state_lock)); if (!sq) /* Handle the case when the SQ failed to open. */ return;
int mlx5e_htb_setup_tc(struct mlx5e_priv *priv, struct tc_htb_qopt_offload *htb_qopt)
{ struct mlx5e_htb *htb = priv->htb; int res;
if (!htb && htb_qopt->command != TC_HTB_CREATE) return -EINVAL;
if (htb_qopt->prio || htb_qopt->quantum) {
NL_SET_ERR_MSG_MOD(htb_qopt->extack, "prio and quantum parameters are not supported by device with HTB offload enabled."); return -EOPNOTSUPP;
}
switch (htb_qopt->command) { case TC_HTB_CREATE: if (!mlx5_qos_is_supported(priv->mdev)) {
NL_SET_ERR_MSG_MOD(htb_qopt->extack, "Missing QoS capabilities. Try disabling SRIOV or use a supported device."); return -EOPNOTSUPP;
}
priv->htb = mlx5e_htb_alloc();
htb = priv->htb; if (!htb) return -ENOMEM;
res = mlx5e_htb_init(htb, htb_qopt, priv->netdev, priv->mdev, &priv->selq, priv); if (res) {
mlx5e_htb_free(htb);
priv->htb = NULL;
} return res; case TC_HTB_DESTROY:
mlx5e_htb_cleanup(htb);
mlx5e_htb_free(htb);
priv->htb = NULL; return 0; case TC_HTB_LEAF_ALLOC_QUEUE:
res = mlx5e_htb_leaf_alloc_queue(htb, htb_qopt->classid, htb_qopt->parent_classid,
htb_qopt->rate, htb_qopt->ceil, htb_qopt->extack); if (res < 0) return res;
htb_qopt->qid = res; return 0; case TC_HTB_LEAF_TO_INNER: return mlx5e_htb_leaf_to_inner(htb, htb_qopt->parent_classid, htb_qopt->classid,
htb_qopt->rate, htb_qopt->ceil, htb_qopt->extack); case TC_HTB_LEAF_DEL: return mlx5e_htb_leaf_del(htb, &htb_qopt->classid, htb_qopt->extack); case TC_HTB_LEAF_DEL_LAST: case TC_HTB_LEAF_DEL_LAST_FORCE: return mlx5e_htb_leaf_del_last(htb, htb_qopt->classid,
htb_qopt->command == TC_HTB_LEAF_DEL_LAST_FORCE,
htb_qopt->extack); case TC_HTB_NODE_MODIFY: return mlx5e_htb_node_modify(htb, htb_qopt->classid, htb_qopt->rate, htb_qopt->ceil,
htb_qopt->extack); case TC_HTB_LEAF_QUERY_QUEUE:
res = mlx5e_htb_get_txq_by_classid(htb, htb_qopt->classid); if (res < 0) return res;
htb_qopt->qid = res; return 0; default: return -EOPNOTSUPP;
}
}
int mlx5e_mqprio_rl_init(struct mlx5e_mqprio_rl *rl, struct mlx5_core_dev *mdev, u8 num_tc,
u64 max_rate[])
{ int err; int tc;
if (!mlx5_qos_is_supported(mdev)) {
qos_warn(mdev, "Missing QoS capabilities. Try disabling SRIOV or use a supported device."); return -EOPNOTSUPP;
} if (num_tc > mlx5e_qos_max_leaf_nodes(mdev)) return -EINVAL;
Die Informationen auf dieser Webseite wurden
nach bestem Wissen sorgfältig zusammengestellt. Es wird jedoch weder Vollständigkeit, noch Richtigkeit,
noch Qualität der bereit gestellten Informationen zugesichert.
Bemerkung:
Die farbliche Syntaxdarstellung und die Messung sind noch experimentell.