if (rxfh->key)
memcpy(rss_data->rss_key, rxfh->key, rss_data->rss_key_size);
if (rxfh->indir) { for (lut = 0; lut < rss_data->rss_lut_size; lut++)
rss_data->rss_lut[lut] = rxfh->indir[lut];
}
err = idpf_config_rss(vport);
unlock_mutex:
idpf_vport_ctrl_unlock(netdev);
return err;
}
/** * idpf_get_channels: get the number of channels supported by the device * @netdev: network interface device structure * @ch: channel information structure * * Report maximum of TX and RX. Report one extra channel to match our MailBox * Queue.
*/ staticvoid idpf_get_channels(struct net_device *netdev, struct ethtool_channels *ch)
{ struct idpf_netdev_priv *np = netdev_priv(netdev); struct idpf_vport_config *vport_config;
u16 num_txq, num_rxq;
u16 combined;
dev = &vport->adapter->pdev->dev; /* It's possible to specify number of queues that exceeds max. * Stack checks max combined_count and max [tx|rx]_count but not the * max combined_count + [tx|rx]_count. These checks should catch that.
*/ if (num_req_tx_q > vport_config->max_q.max_txq) {
dev_info(dev, "Maximum TX queues is %d\n",
vport_config->max_q.max_txq);
err = -EINVAL; goto unlock_mutex;
} if (num_req_rx_q > vport_config->max_q.max_rxq) {
dev_info(dev, "Maximum RX queues is %d\n",
vport_config->max_q.max_rxq);
err = -EINVAL; goto unlock_mutex;
}
if (num_req_tx_q == num_txq && num_req_rx_q == num_rxq) goto unlock_mutex;
/** * idpf_set_ringparam - Set ring parameters * @netdev: network interface device structure * @ring: ethtool ringparam structure * @kring: unused * @ext_ack: unused * * Sets ring parameters. TX and RX rings are controlled separately, but the * number of rings is not specified, so all rings get the same settings.
*/ staticint idpf_set_ringparam(struct net_device *netdev, struct ethtool_ringparam *ring, struct kernel_ethtool_ringparam *kring, struct netlink_ext_ack *ext_ack)
{ struct idpf_vport_user_config_data *config_data;
u32 new_rx_count, new_tx_count; struct idpf_vport *vport; int i, err = 0;
u16 idx;
if (ring->tx_pending < IDPF_MIN_TXQ_DESC) {
netdev_err(netdev, "Descriptors requested (Tx: %u) is less than min supported (%u)\n",
ring->tx_pending,
IDPF_MIN_TXQ_DESC);
err = -EINVAL; goto unlock_mutex;
}
if (ring->rx_pending < IDPF_MIN_RXQ_DESC) {
netdev_err(netdev, "Descriptors requested (Rx: %u) is less than min supported (%u)\n",
ring->rx_pending,
IDPF_MIN_RXQ_DESC);
err = -EINVAL; goto unlock_mutex;
}
new_rx_count = ALIGN(ring->rx_pending, IDPF_REQ_RXQ_DESC_MULTIPLE); if (new_rx_count != ring->rx_pending)
netdev_info(netdev, "Requested Rx descriptor count rounded up to %u\n",
new_rx_count);
new_tx_count = ALIGN(ring->tx_pending, IDPF_REQ_DESC_MULTIPLE); if (new_tx_count != ring->tx_pending)
netdev_info(netdev, "Requested Tx descriptor count rounded up to %u\n",
new_tx_count);
/* Since we adjusted the RX completion queue count, the RX buffer queue * descriptor count needs to be adjusted as well
*/ for (i = 0; i < vport->num_bufqs_per_qgrp; i++)
vport->bufq_desc_count[i] =
IDPF_RX_BUFQ_DESC_COUNT(new_rx_count,
vport->num_bufqs_per_qgrp);
/** * struct idpf_stats - definition for an ethtool statistic * @stat_string: statistic name to display in ethtool -S output * @sizeof_stat: the sizeof() the stat, must be no greater than sizeof(u64) * @stat_offset: offsetof() the stat from a base pointer * * This structure defines a statistic to be added to the ethtool stats buffer. * It defines a statistic as offset from a common base pointer. Stats should * be defined in constant arrays using the IDPF_STAT macro, with every element * of the array using the same _type for calculating the sizeof_stat and * stat_offset. * * The @sizeof_stat is expected to be sizeof(u8), sizeof(u16), sizeof(u32) or * sizeof(u64). Other sizes are not expected and will produce a WARN_ONCE from * the idpf_add_ethtool_stat() helper function. * * The @stat_string is interpreted as a format string, allowing formatted * values to be inserted while looping over multiple structures for a given * statistics array. Thus, every statistic string in an array should have the * same type and number of format specifiers, to be formatted by variadic * arguments to the idpf_add_stat_string() helper function.
*/ struct idpf_stats { char stat_string[ETH_GSTRING_LEN]; int sizeof_stat; int stat_offset;
};
/* Helper macro to define an idpf_stat structure with proper size and type. * Use this when defining constant statistics arrays. Note that @_type expects * only a type name and is used multiple times.
*/ #define IDPF_STAT(_type, _name, _stat) { \
.stat_string = _name, \
.sizeof_stat = sizeof_field(_type, _stat), \
.stat_offset = offsetof(_type, _stat) \
}
/* Helper macros for defining some statistics related to queues */ #define IDPF_RX_QUEUE_STAT(_name, _stat) \
IDPF_STAT(struct idpf_rx_queue, _name, _stat) #define IDPF_TX_QUEUE_STAT(_name, _stat) \
IDPF_STAT(struct idpf_tx_queue, _name, _stat)
/* Stats associated with a Tx queue */ staticconststruct idpf_stats idpf_gstrings_tx_queue_stats[] = {
IDPF_TX_QUEUE_STAT("pkts", q_stats.packets),
IDPF_TX_QUEUE_STAT("bytes", q_stats.bytes),
IDPF_TX_QUEUE_STAT("lso_pkts", q_stats.lso_pkts),
};
/* Stats associated with an Rx queue */ staticconststruct idpf_stats idpf_gstrings_rx_queue_stats[] = {
IDPF_RX_QUEUE_STAT("pkts", q_stats.packets),
IDPF_RX_QUEUE_STAT("bytes", q_stats.bytes),
IDPF_RX_QUEUE_STAT("rx_gro_hw_pkts", q_stats.rsc_pkts),
};
/** * __idpf_add_qstat_strings - copy stat strings into ethtool buffer * @p: ethtool supplied buffer * @stats: stat definitions array * @size: size of the stats array * @type: stat type * @idx: stat index * * Format and copy the strings described by stats into the buffer pointed at * by p.
*/ staticvoid __idpf_add_qstat_strings(u8 **p, conststruct idpf_stats *stats, constunsignedint size, constchar *type, unsignedint idx)
{ unsignedint i;
for (i = 0; i < size; i++)
ethtool_sprintf(p, "%s_q-%u_%s",
type, idx, stats[i].stat_string);
}
/** * idpf_add_qstat_strings - Copy queue stat strings into ethtool buffer * @p: ethtool supplied buffer * @stats: stat definitions array * @type: stat type * @idx: stat idx * * Format and copy the strings described by the const static stats value into * the buffer pointed at by p. * * The parameter @stats is evaluated twice, so parameters with side effects * should be avoided. Additionally, stats must be an array such that * ARRAY_SIZE can be called on it.
*/ #define idpf_add_qstat_strings(p, stats, type, idx) \
__idpf_add_qstat_strings(p, stats, ARRAY_SIZE(stats), type, idx)
/** * idpf_add_stat_strings - Copy port stat strings into ethtool buffer * @p: ethtool buffer * @stats: struct to copy from * @size: size of stats array to copy from
*/ staticvoid idpf_add_stat_strings(u8 **p, conststruct idpf_stats *stats, constunsignedint size)
{ unsignedint i;
for (i = 0; i < size; i++)
ethtool_puts(p, stats[i].stat_string);
}
/** * idpf_get_stat_strings - Get stat strings * @netdev: network interface device structure * @data: buffer for string data * * Builds the statistics string table
*/ staticvoid idpf_get_stat_strings(struct net_device *netdev, u8 *data)
{ struct idpf_netdev_priv *np = netdev_priv(netdev); struct idpf_vport_config *vport_config; unsignedint i;
vport_config = np->adapter->vport_config[np->vport_idx]; /* It's critical that we always report a constant number of strings and * that the strings are reported in the same order regardless of how * many queues are actually in use.
*/ for (i = 0; i < vport_config->max_q.max_txq; i++)
idpf_add_qstat_strings(&data, idpf_gstrings_tx_queue_stats, "tx", i);
for (i = 0; i < vport_config->max_q.max_rxq; i++)
idpf_add_qstat_strings(&data, idpf_gstrings_rx_queue_stats, "rx", i);
}
/** * idpf_get_strings - Get string set * @netdev: network interface device structure * @sset: id of string set * @data: buffer for string data * * Builds string tables for various string sets
*/ staticvoid idpf_get_strings(struct net_device *netdev, u32 sset, u8 *data)
{ switch (sset) { case ETH_SS_STATS:
idpf_get_stat_strings(netdev, data); break; default: break;
}
}
/** * idpf_get_sset_count - Get length of string set * @netdev: network interface device structure * @sset: id of string set * * Reports size of various string tables.
*/ staticint idpf_get_sset_count(struct net_device *netdev, int sset)
{ struct idpf_netdev_priv *np = netdev_priv(netdev); struct idpf_vport_config *vport_config;
u16 max_txq, max_rxq;
if (sset != ETH_SS_STATS) return -EINVAL;
vport_config = np->adapter->vport_config[np->vport_idx]; /* This size reported back here *must* be constant throughout the * lifecycle of the netdevice, i.e. we must report the maximum length * even for queues that don't technically exist. This is due to the * fact that this userspace API uses three separate ioctl calls to get * stats data but has no way to communicate back to userspace when that * size has changed, which can typically happen as a result of changing * number of queues. If the number/order of stats change in the middle * of this call chain it will lead to userspace crashing/accessing bad * data through buffer under/overflow.
*/
max_txq = vport_config->max_q.max_txq;
max_rxq = vport_config->max_q.max_rxq;
/** * idpf_add_one_ethtool_stat - copy the stat into the supplied buffer * @data: location to store the stat value * @pstat: old stat pointer to copy from * @stat: the stat definition * * Copies the stat data defined by the pointer and stat structure pair into * the memory supplied as data. If the pointer is null, data will be zero'd.
*/ staticvoid idpf_add_one_ethtool_stat(u64 *data, constvoid *pstat, conststruct idpf_stats *stat)
{ char *p;
if (!pstat) { /* Ensure that the ethtool data buffer is zero'd for any stats * which don't have a valid pointer.
*/
*data = 0; return;
}
/** * idpf_add_queue_stats - copy queue statistics into supplied buffer * @data: ethtool stats buffer * @q: the queue to copy * @type: type of the queue * * Queue statistics must be copied while protected by u64_stats_fetch_begin, * so we can't directly use idpf_add_ethtool_stats. Assumes that queue stats * are defined in idpf_gstrings_queue_stats. If the queue pointer is null, * zero out the queue stat values and update the data pointer. Otherwise * safely copy the stats from the queue into the supplied buffer and update * the data pointer when finished. * * This function expects to be called while under rcu_read_lock().
*/ staticvoid idpf_add_queue_stats(u64 **data, constvoid *q, enum virtchnl2_queue_type type)
{ conststruct u64_stats_sync *stats_sync; conststruct idpf_stats *stats; unsignedint start; unsignedint size; unsignedint i;
/* To avoid invalid statistics values, ensure that we keep retrying * the copy until we get a consistent value according to * u64_stats_fetch_retry.
*/ do {
start = u64_stats_fetch_begin(stats_sync); for (i = 0; i < size; i++)
idpf_add_one_ethtool_stat(&(*data)[i], q, &stats[i]);
} while (u64_stats_fetch_retry(stats_sync, start));
/* Once we successfully copy the stats in, update the data pointer */
*data += size;
}
/** * idpf_add_empty_queue_stats - Add stats for a non-existent queue * @data: pointer to data buffer * @qtype: type of data queue * * We must report a constant length of stats back to userspace regardless of * how many queues are actually in use because stats collection happens over * three separate ioctls and there's no way to notify userspace the size * changed between those calls. This adds empty to data to the stats since we * don't have a real queue to refer to for this stats slot.
*/ staticvoid idpf_add_empty_queue_stats(u64 **data, u16 qtype)
{ unsignedint i; int stats_len;
for (i = 0; i < stats_len; i++)
(*data)[i] = 0;
*data += stats_len;
}
/** * idpf_add_port_stats - Copy port stats into ethtool buffer * @vport: virtual port struct * @data: ethtool buffer to copy into
*/ staticvoid idpf_add_port_stats(struct idpf_vport *vport, u64 **data)
{ unsignedint size = IDPF_PORT_STATS_LEN; unsignedint start; unsignedint i;
do {
start = u64_stats_fetch_begin(&vport->port_stats.stats_sync); for (i = 0; i < size; i++)
idpf_add_one_ethtool_stat(&(*data)[i], vport,
&idpf_gstrings_port_stats[i]);
} while (u64_stats_fetch_retry(&vport->port_stats.stats_sync, start));
*data += size;
}
/** * idpf_collect_queue_stats - accumulate various per queue stats * into port level stats * @vport: pointer to vport struct
**/ staticvoid idpf_collect_queue_stats(struct idpf_vport *vport)
{ struct idpf_port_stats *pstats = &vport->port_stats; int i, j;
/* zero out port stats since they're actually tracked in per * queue stats; this is only for reporting
*/
u64_stats_update_begin(&pstats->stats_sync);
u64_stats_set(&pstats->rx_hw_csum_err, 0);
u64_stats_set(&pstats->rx_hsplit, 0);
u64_stats_set(&pstats->rx_hsplit_hbo, 0);
u64_stats_set(&pstats->rx_bad_descs, 0);
u64_stats_set(&pstats->tx_linearize, 0);
u64_stats_set(&pstats->tx_busy, 0);
u64_stats_set(&pstats->tx_drops, 0);
u64_stats_set(&pstats->tx_dma_map_errs, 0);
u64_stats_update_end(&pstats->stats_sync);
for (i = 0; i < vport->num_rxq_grp; i++) { struct idpf_rxq_group *rxq_grp = &vport->rxq_grps[i];
u16 num_rxq;
if (idpf_is_queue_model_split(vport->rxq_model))
num_rxq = rxq_grp->splitq.num_rxq_sets; else
num_rxq = rxq_grp->singleq.num_rxq;
if (!txq)
idpf_add_empty_queue_stats(&data, qtype); else
idpf_add_queue_stats(&data, txq, qtype);
}
}
vport_config = vport->adapter->vport_config[vport->idx]; /* It is critical we provide a constant number of stats back to * userspace regardless of how many queues are actually in use because * there is no way to inform userspace the size has changed between * ioctl calls. This will fill in any missing stats with zero.
*/ for (; total < vport_config->max_q.max_txq; total++)
idpf_add_empty_queue_stats(&data, VIRTCHNL2_QUEUE_TYPE_TX);
total = 0;
if (is_splitq)
rxq = &rxq_grp->splitq.rxq_sets[j]->rxq; else
rxq = rxq_grp->singleq.rxqs[j]; if (!rxq)
idpf_add_empty_queue_stats(&data, qtype); else
idpf_add_queue_stats(&data, rxq, qtype);
}
}
for (; total < vport_config->max_q.max_rxq; total++)
idpf_add_empty_queue_stats(&data, VIRTCHNL2_QUEUE_TYPE_RX);
rcu_read_unlock();
idpf_vport_ctrl_unlock(netdev);
}
/** * idpf_find_rxq_vec - find rxq vector from q index * @vport: virtual port associated to queue * @q_num: q index used to find queue * * returns pointer to rx vector
*/ staticstruct idpf_q_vector *idpf_find_rxq_vec(conststruct idpf_vport *vport, int q_num)
{ int q_grp, q_idx;
if (!idpf_is_queue_model_split(vport->rxq_model)) return vport->rxq_grps->singleq.rxqs[q_num]->q_vector;
/** * idpf_find_txq_vec - find txq vector from q index * @vport: virtual port associated to queue * @q_num: q index used to find queue * * returns pointer to tx vector
*/ staticstruct idpf_q_vector *idpf_find_txq_vec(conststruct idpf_vport *vport, int q_num)
{ int q_grp;
if (!idpf_is_queue_model_split(vport->txq_model)) return vport->txqs[q_num]->q_vector;
q_grp = q_num / IDPF_DFLT_SPLITQ_TXQ_PER_GROUP;
return vport->txq_grps[q_grp].complq->q_vector;
}
/** * __idpf_get_q_coalesce - get ITR values for specific queue * @ec: ethtool structure to fill with driver's coalesce settings * @q_vector: queue vector corresponding to this queue * @type: queue type
*/ staticvoid __idpf_get_q_coalesce(struct ethtool_coalesce *ec, conststruct idpf_q_vector *q_vector, enum virtchnl2_queue_type type)
{ if (type == VIRTCHNL2_QUEUE_TYPE_RX) {
ec->use_adaptive_rx_coalesce =
IDPF_ITR_IS_DYNAMIC(q_vector->rx_intr_mode);
ec->rx_coalesce_usecs = q_vector->rx_itr_value;
} else {
ec->use_adaptive_tx_coalesce =
IDPF_ITR_IS_DYNAMIC(q_vector->tx_intr_mode);
ec->tx_coalesce_usecs = q_vector->tx_itr_value;
}
}
/** * idpf_get_q_coalesce - get ITR values for specific queue * @netdev: pointer to the netdev associated with this query * @ec: coalesce settings to program the device with * @q_num: update ITR/INTRL (coalesce) settings for this queue number/index * * Return 0 on success, and negative on failure
*/ staticint idpf_get_q_coalesce(struct net_device *netdev, struct ethtool_coalesce *ec,
u32 q_num)
{ conststruct idpf_netdev_priv *np = netdev_priv(netdev); conststruct idpf_vport *vport; int err = 0;
if (q_num < vport->num_rxq)
__idpf_get_q_coalesce(ec, idpf_find_rxq_vec(vport, q_num),
VIRTCHNL2_QUEUE_TYPE_RX);
if (q_num < vport->num_txq)
__idpf_get_q_coalesce(ec, idpf_find_txq_vec(vport, q_num),
VIRTCHNL2_QUEUE_TYPE_TX);
unlock_mutex:
idpf_vport_ctrl_unlock(netdev);
return err;
}
/** * idpf_get_coalesce - get ITR values as requested by user * @netdev: pointer to the netdev associated with this query * @ec: coalesce settings to be filled * @kec: unused * @extack: unused * * Return 0 on success, and negative on failure
*/ staticint idpf_get_coalesce(struct net_device *netdev, struct ethtool_coalesce *ec, struct kernel_ethtool_coalesce *kec, struct netlink_ext_ack *extack)
{ /* Return coalesce based on queue number zero */ return idpf_get_q_coalesce(netdev, ec, 0);
}
/** * idpf_get_per_q_coalesce - get ITR values as requested by user * @netdev: pointer to the netdev associated with this query * @q_num: queue for which the itr values has to retrieved * @ec: coalesce settings to be filled * * Return 0 on success, and negative on failure
*/
/** * __idpf_set_q_coalesce - set ITR values for specific queue * @ec: ethtool structure from user to update ITR settings * @q_coal: per queue coalesce settings * @qv: queue vector for which itr values has to be set * @is_rxq: is queue type rx * * Returns 0 on success, negative otherwise.
*/ staticint __idpf_set_q_coalesce(conststruct ethtool_coalesce *ec, struct idpf_q_coalesce *q_coal, struct idpf_q_vector *qv, bool is_rxq)
{
u32 use_adaptive_coalesce, coalesce_usecs; bool is_dim_ena = false;
u16 itr_val;
/* Update of static/dynamic itr will be taken care when interrupt is * fired
*/ return 0;
}
/** * idpf_set_q_coalesce - set ITR values for specific queue * @vport: vport associated to the queue that need updating * @q_coal: per queue coalesce settings * @ec: coalesce settings to program the device with * @q_num: update ITR/INTRL (coalesce) settings for this queue number/index * @is_rxq: is queue type rx * * Return 0 on success, and negative on failure
*/ staticint idpf_set_q_coalesce(conststruct idpf_vport *vport, struct idpf_q_coalesce *q_coal, conststruct ethtool_coalesce *ec, int q_num, bool is_rxq)
{ struct idpf_q_vector *qv;
if (qv && __idpf_set_q_coalesce(ec, q_coal, qv, is_rxq)) return -EINVAL;
return 0;
}
/** * idpf_set_coalesce - set ITR values as requested by user * @netdev: pointer to the netdev associated with this query * @ec: coalesce settings to program the device with * @kec: unused * @extack: unused * * Return 0 on success, and negative on failure
*/ staticint idpf_set_coalesce(struct net_device *netdev, struct ethtool_coalesce *ec, struct kernel_ethtool_coalesce *kec, struct netlink_ext_ack *extack)
{ struct idpf_netdev_priv *np = netdev_priv(netdev); struct idpf_vport_user_config_data *user_config; struct idpf_q_coalesce *q_coal; struct idpf_vport *vport; int i, err = 0;
if (np->state != __IDPF_VPORT_UP) goto unlock_mutex;
for (i = 0; i < vport->num_txq; i++) {
q_coal = &user_config->q_coalesce[i];
err = idpf_set_q_coalesce(vport, q_coal, ec, i, false); if (err) goto unlock_mutex;
}
for (i = 0; i < vport->num_rxq; i++) {
q_coal = &user_config->q_coalesce[i];
err = idpf_set_q_coalesce(vport, q_coal, ec, i, true); if (err) goto unlock_mutex;
}
unlock_mutex:
idpf_vport_ctrl_unlock(netdev);
return err;
}
/** * idpf_set_per_q_coalesce - set ITR values as requested by user * @netdev: pointer to the netdev associated with this query * @q_num: queue for which the itr values has to be set * @ec: coalesce settings to program the device with * * Return 0 on success, and negative on failure
*/ staticint idpf_set_per_q_coalesce(struct net_device *netdev, u32 q_num, struct ethtool_coalesce *ec)
{ struct idpf_netdev_priv *np = netdev_priv(netdev); struct idpf_vport_user_config_data *user_config; struct idpf_q_coalesce *q_coal; struct idpf_vport *vport; int err;
Die Informationen auf dieser Webseite wurden
nach bestem Wissen sorgfältig zusammengestellt. Es wird jedoch weder Vollständigkeit, noch Richtigkeit,
noch Qualität der bereit gestellten Informationen zugesichert.
Bemerkung:
Die farbliche Syntaxdarstellung und die Messung sind noch experimentell.