// SPDX-License-Identifier: GPL-2.0-only /* * Copyright 2008-2010 Cisco Systems, Inc. All rights reserved. * Copyright 2007 Nuova Systems, Inc. All rights reserved.
*/
if (!c->max_wq_ring)
c->max_wq_ring = ENIC_MAX_WQ_DESCS_DEFAULT; if (!c->max_rq_ring)
c->max_rq_ring = ENIC_MAX_RQ_DESCS_DEFAULT; if (!c->max_cq_ring)
c->max_cq_ring = ENIC_MAX_CQ_DESCS_DEFAULT;
c->wq_desc_count =
min_t(u32, c->max_wq_ring,
max_t(u32, ENIC_MIN_WQ_DESCS, c->wq_desc_count));
c->wq_desc_count &= 0xffffffe0; /* must be aligned to groups of 32 */
c->rq_desc_count =
min_t(u32, c->max_rq_ring,
max_t(u32, ENIC_MIN_RQ_DESCS, c->rq_desc_count));
c->rq_desc_count &= 0xffffffe0; /* must be aligned to groups of 32 */
for (i = 0; i < enic->wq_count; i++)
vnic_wq_free(&enic->wq[i].vwq); for (i = 0; i < enic->rq_count; i++)
vnic_rq_free(&enic->rq[i].vrq); for (i = 0; i < enic->cq_count; i++)
vnic_cq_free(&enic->cq[i]); for (i = 0; i < enic->intr_count; i++)
vnic_intr_free(&enic->intr[i]);
}
/* Init INTR resources * * mask_on_assertion is not used for INTx due to the level- * triggered nature of INTx
*/
switch (intr_mode) { case VNIC_DEV_INTR_MODE_MSI: case VNIC_DEV_INTR_MODE_MSIX:
mask_on_assertion = 1; break; default:
mask_on_assertion = 0; break;
}
for (i = 0; i < enic->intr_count; i++) {
vnic_intr_init(&enic->intr[i],
enic->config.intr_timer_usec,
enic->config.intr_timer_type,
mask_on_assertion);
}
}
int enic_alloc_vnic_resources(struct enic *enic)
{ enum vnic_dev_intr_mode intr_mode; int rq_cq_desc_size; unsignedint i; int err;
switch (enic->ext_cq) { case ENIC_RQ_CQ_ENTRY_SIZE_16:
rq_cq_desc_size = 16; break; case ENIC_RQ_CQ_ENTRY_SIZE_32:
rq_cq_desc_size = 32; break; case ENIC_RQ_CQ_ENTRY_SIZE_64:
rq_cq_desc_size = 64; break; default:
dev_err(enic_get_dev(enic), "Unable to determine rq cq desc size: %d",
enic->ext_cq);
err = -ENODEV; goto err_out;
}
/* Allocate queue resources
*/
for (i = 0; i < enic->wq_count; i++) {
err = vnic_wq_alloc(enic->vdev, &enic->wq[i].vwq, i,
enic->config.wq_desc_count, sizeof(struct wq_enet_desc)); if (err) goto err_out_cleanup;
}
for (i = 0; i < enic->rq_count; i++) {
err = vnic_rq_alloc(enic->vdev, &enic->rq[i].vrq, i,
enic->config.rq_desc_count, sizeof(struct rq_enet_desc)); if (err) goto err_out_cleanup;
}
for (i = 0; i < enic->cq_count; i++) { if (i < enic->rq_count)
err = vnic_cq_alloc(enic->vdev, &enic->cq[i], i,
enic->config.rq_desc_count,
rq_cq_desc_size); else
err = vnic_cq_alloc(enic->vdev, &enic->cq[i], i,
enic->config.wq_desc_count, sizeof(struct cq_enet_wq_desc)); if (err) goto err_out_cleanup;
}
for (i = 0; i < enic->intr_count; i++) {
err = vnic_intr_alloc(enic->vdev, &enic->intr[i], i); if (err) goto err_out_cleanup;
}
/* * CMD_CQ_ENTRY_SIZE_SET can fail on older hw generations that don't support * that command
*/ void enic_ext_cq(struct enic *enic)
{
u64 a0 = CMD_CQ_ENTRY_SIZE_SET, a1 = 0; int wait = 1000; int ret;
spin_lock_bh(&enic->devcmd_lock);
ret = vnic_dev_cmd(enic->vdev, CMD_CAPABILITY, &a0, &a1, wait); if (ret || a0) {
dev_info(&enic->pdev->dev, "CMD_CQ_ENTRY_SIZE_SET not supported.");
enic->ext_cq = ENIC_RQ_CQ_ENTRY_SIZE_16; goto out;
}
a1 &= VNIC_RQ_CQ_ENTRY_SIZE_ALL_BIT;
enic->ext_cq = fls(a1) - 1;
a0 = VNIC_RQ_ALL;
a1 = enic->ext_cq;
ret = vnic_dev_cmd(enic->vdev, CMD_CQ_ENTRY_SIZE_SET, &a0, &a1, wait); if (ret) {
dev_info(&enic->pdev->dev, "CMD_CQ_ENTRY_SIZE_SET failed.");
enic->ext_cq = ENIC_RQ_CQ_ENTRY_SIZE_16;
}
out:
spin_unlock_bh(&enic->devcmd_lock);
dev_info(&enic->pdev->dev, "CQ entry size set to %d bytes",
16 << enic->ext_cq);
}
Messung V0.5
¤ Dauer der Verarbeitung: 0.1 Sekunden
(vorverarbeitet)
¤
Die Informationen auf dieser Webseite wurden
nach bestem Wissen sorgfältig zusammengestellt. Es wird jedoch weder Vollständigkeit, noch Richtigkeit,
noch Qualität der bereit gestellten Informationen zugesichert.
Bemerkung:
Die farbliche Syntaxdarstellung und die Messung sind noch experimentell.