/* For the following stats column string names, make sure the order * matches how it is filled in the code. For xdp_aborted, xdp_drop, * xdp_pass, xdp_tx, xdp_redirect, make sure it also matches the order * as declared in enum xdp_action inside file uapi/linux/bpf.h .
*/ staticconstchar gve_gstrings_main_stats[][ETH_GSTRING_LEN] = { "rx_packets", "rx_hsplit_pkt", "tx_packets", "rx_bytes", "tx_bytes", "rx_dropped", "tx_dropped", "tx_timeouts", "rx_skb_alloc_fail", "rx_buf_alloc_fail", "rx_desc_err_dropped_pkt", "rx_hsplit_unsplit_pkt", "interface_up_cnt", "interface_down_cnt", "reset_cnt", "page_alloc_fail", "dma_mapping_error", "stats_report_trigger_cnt",
};
staticvoid gve_get_strings(struct net_device *netdev, u32 stringset, u8 *data)
{ struct gve_priv *priv = netdev_priv(netdev);
u8 *s = (char *)data; int num_tx_queues; int i, j;
num_tx_queues = gve_num_tx_queues(priv); switch (stringset) { case ETH_SS_STATS: for (i = 0; i < ARRAY_SIZE(gve_gstrings_main_stats); i++)
ethtool_puts(&s, gve_gstrings_main_stats[i]);
for (i = 0; i < priv->rx_cfg.num_queues; i++) for (j = 0; j < NUM_GVE_RX_CNTS; j++)
ethtool_sprintf(&s, gve_gstrings_rx_stats[j],
i);
for (i = 0; i < num_tx_queues; i++) for (j = 0; j < NUM_GVE_TX_CNTS; j++)
ethtool_sprintf(&s, gve_gstrings_tx_stats[j],
i);
for (i = 0; i < ARRAY_SIZE(gve_gstrings_adminq_stats); i++)
ethtool_cpy(&s, gve_gstrings_adminq_stats[i]);
break;
case ETH_SS_PRIV_FLAGS: for (i = 0; i < ARRAY_SIZE(gve_gstrings_priv_flags); i++)
ethtool_puts(&s, gve_gstrings_priv_flags[i]); break;
default: break;
}
}
staticint gve_get_sset_count(struct net_device *netdev, int sset)
{ struct gve_priv *priv = netdev_priv(netdev); int num_tx_queues;
/* For rx cross-reporting stats, start from nic rx stats in report */
base_stats_idx = GVE_TX_STATS_REPORT_NUM * num_tx_queues +
GVE_RX_STATS_REPORT_NUM * priv->rx_cfg.num_queues; /* The boundary between driver stats and NIC stats shifts if there are * stopped queues.
*/
base_stats_idx += NIC_RX_STATS_REPORT_NUM * num_stopped_rxqs +
NIC_TX_STATS_REPORT_NUM * num_stopped_txqs;
max_stats_idx = NIC_RX_STATS_REPORT_NUM *
(priv->rx_cfg.num_queues - num_stopped_rxqs) +
base_stats_idx; /* Preprocess the stats report for rx, map queue id to start index */
skip_nic_stats = false; for (stats_idx = base_stats_idx; stats_idx < max_stats_idx;
stats_idx += NIC_RX_STATS_REPORT_NUM) {
u32 stat_name = be32_to_cpu(report_stats[stats_idx].stat_name);
u32 queue_id = be32_to_cpu(report_stats[stats_idx].queue_id);
if (stat_name == 0) { /* no stats written by NIC yet */
skip_nic_stats = true; break;
} if (queue_id < 0 || queue_id >= priv->rx_cfg.num_queues) {
net_err_ratelimited("Invalid rxq id in NIC stats\n"); continue;
}
rx_qid_to_stats_idx[queue_id] = stats_idx;
} /* walk RX rings */ if (priv->rx) { for (ring = 0; ring < priv->rx_cfg.num_queues; ring++) { struct gve_rx_ring *rx = &priv->rx[ring];
/* For tx cross-reporting stats, start from nic tx stats in report */
base_stats_idx = max_stats_idx;
max_stats_idx = NIC_TX_STATS_REPORT_NUM *
(num_tx_queues - num_stopped_txqs) +
max_stats_idx; /* Preprocess the stats report for tx, map queue id to start index */
skip_nic_stats = false; for (stats_idx = base_stats_idx; stats_idx < max_stats_idx;
stats_idx += NIC_TX_STATS_REPORT_NUM) {
u32 stat_name = be32_to_cpu(report_stats[stats_idx].stat_name);
u32 queue_id = be32_to_cpu(report_stats[stats_idx].queue_id);
if (stat_name == 0) { /* no stats written by NIC yet */
skip_nic_stats = true; break;
} if (queue_id < 0 || queue_id >= num_tx_queues) {
net_err_ratelimited("Invalid txq id in NIC stats\n"); continue;
}
tx_qid_to_stats_idx[queue_id] = stats_idx;
} /* walk TX rings */ if (priv->tx) { for (ring = 0; ring < num_tx_queues; ring++) { struct gve_tx_ring *tx = &priv->tx[ring];
if (gve_is_gqi(priv)) {
data[i++] = tx->req;
data[i++] = tx->done;
data[i++] = tx->req - tx->done;
} else { /* DQO doesn't currently support * posted/completed descriptor counts;
*/
data[i++] = 0;
data[i++] = 0;
data[i++] =
(tx->dqo_tx.tail - tx->dqo_tx.head) &
tx->mask;
} do {
start =
u64_stats_fetch_begin(&priv->tx[ring].statss);
tmp_tx_bytes = tx->bytes_done;
} while (u64_stats_fetch_retry(&priv->tx[ring].statss,
start));
data[i++] = tmp_tx_bytes;
data[i++] = tx->wake_queue;
data[i++] = tx->stop_queue;
data[i++] = gve_tx_load_event_counter(priv, tx);
data[i++] = tx->dma_mapping_error; /* stats from NIC */
stats_idx = tx_qid_to_stats_idx[ring]; if (skip_nic_stats || stats_idx < 0) { /* skip NIC tx stats */
i += NIC_TX_STATS_REPORT_NUM;
} else { for (j = 0; j < NIC_TX_STATS_REPORT_NUM; j++) {
u64 value =
be64_to_cpu(report_stats[stats_idx + j].value);
data[i++] = value;
}
} /* XDP counters */ do {
start = u64_stats_fetch_begin(&priv->tx[ring].statss);
data[i] = tx->xdp_xsk_sent;
data[i + 1] = tx->xdp_xmit;
data[i + 2] = tx->xdp_xmit_errors;
} while (u64_stats_fetch_retry(&priv->tx[ring].statss,
start));
i += 3; /* XDP tx counters */
}
} else {
i += num_tx_queues * NUM_GVE_TX_CNTS;
}
/* Changing combined is not allowed */ if (cmd->combined_count != old_settings.combined_count) return -EINVAL;
if (!new_rx || !new_tx) return -EINVAL;
if (priv->xdp_prog) { if (new_tx != new_rx ||
(2 * new_tx > priv->tx_cfg.max_queues)) {
dev_err(&priv->pdev->dev, "The number of configured RX queues should be equal to the number of configured TX queues and the number of configured RX/TX queues should be less than or equal to half the maximum number of RX/TX queues when XDP program is installed"); return -EINVAL;
}
/* One XDP TX queue per RX queue. */
new_tx_cfg.num_xdp_queues = new_rx;
} else {
new_tx_cfg.num_xdp_queues = 0;
}
/* get current queue configuration */
gve_get_curr_alloc_cfgs(priv, &tx_alloc_cfg, &rx_alloc_cfg);
/* copy over the new ring_size from ethtool */
tx_alloc_cfg.ring_size = new_tx_desc_cnt;
rx_alloc_cfg.ring_size = new_rx_desc_cnt;
if (netif_running(priv->dev)) {
err = gve_adjust_config(priv, &tx_alloc_cfg, &rx_alloc_cfg); if (err) return err;
}
/* Set new ring_size for the next up */
priv->tx_desc_cnt = new_tx_desc_cnt;
priv->rx_desc_cnt = new_rx_desc_cnt;
return 0;
}
staticint gve_validate_req_ring_size(struct gve_priv *priv, u16 new_tx_desc_cnt,
u16 new_rx_desc_cnt)
{ /* check for valid range */ if (new_tx_desc_cnt < priv->min_tx_desc_cnt ||
new_tx_desc_cnt > priv->max_tx_desc_cnt ||
new_rx_desc_cnt < priv->min_rx_desc_cnt ||
new_rx_desc_cnt > priv->max_rx_desc_cnt) {
dev_err(&priv->pdev->dev, "Requested descriptor count out of range\n"); return -EINVAL;
}
if (!is_power_of_2(new_tx_desc_cnt) || !is_power_of_2(new_rx_desc_cnt)) {
dev_err(&priv->pdev->dev, "Requested descriptor count has to be a power of 2\n"); return -EINVAL;
} return 0;
}
/* Only 1 flag exists currently: report-stats (BIT(0)), so set that flag. */ if (priv->ethtool_flags & BIT(0))
ret_flags |= BIT(0); return ret_flags;
}
switch (cmd->cmd) { case ETHTOOL_GRXRINGS:
cmd->data = priv->rx_cfg.num_queues; break; case ETHTOOL_GRXCLSRLCNT: if (!priv->max_flow_rules) return -EOPNOTSUPP;
err = gve_adminq_query_flow_rules(priv, GVE_FLOW_RULE_QUERY_STATS, 0); if (err) return err;
¤ Die Informationen auf dieser Webseite wurden
nach bestem Wissen sorgfältig zusammengestellt. Es wird jedoch weder Vollständigkeit, noch Richtigkeit,
noch Qualität der bereit gestellten Informationen zugesichert.0.3Bemerkung:
(vorverarbeitet)
¤
Die Informationen auf dieser Webseite wurden
nach bestem Wissen sorgfältig zusammengestellt. Es wird jedoch weder Vollständigkeit, noch Richtigkeit,
noch Qualität der bereit gestellten Informationen zugesichert.
Bemerkung:
Die farbliche Syntaxdarstellung und die Messung sind noch experimentell.