/* * cxgb4_uld.c:Chelsio Upper Layer Driver Interface for T4/T5/T6 SGE management * * Copyright (c) 2016 Chelsio Communications, Inc. All rights reserved. * * This software is available to you under a choice of one of two * licenses. You may choose to be licensed under the terms of the GNU * General Public License (GPL) Version 2, available from the file * COPYING in the main directory of this source tree, or the * OpenIB.org BSD license below: * * Redistribution and use in source and binary forms, with or * without modification, are permitted provided that the following * conditions are met: * * - Redistributions of source code must retain the above * copyright notice, this list of conditions and the following * disclaimer. * * - Redistributions in binary form must reproduce the above * copyright notice, this list of conditions and the following * disclaimer in the documentation and/or other materials * provided with the distribution. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE * SOFTWARE. * * Written by: Atul Gupta (atul.gupta@chelsio.com) * Written by: Hariprasad Shenai (hariprasad@chelsio.com)
*/
if (adap->uld[q->uld].lro_flush)
adap->uld[q->uld].lro_flush(&q->lro_mgr);
}
/** * uldrx_handler - response queue handler for ULD queues * @q: the response queue that received the packet * @rsp: the response queue descriptor holding the offload message * @gl: the gather list of packet fragments * * Deliver an ingress offload packet to a ULD. All processing is done by * the ULD, we just maintain statistics.
*/ staticint uldrx_handler(struct sge_rspq *q, const __be64 *rsp, conststruct pkt_gl *gl)
{ struct adapter *adap = q->adap; struct sge_ofld_rxq *rxq = container_of(q, struct sge_ofld_rxq, rspq); int ret;
/* FW can send CPLs encapsulated in a CPL_FW4_MSG */ if (((conststruct rss_header *)rsp)->opcode == CPL_FW4_MSG &&
((conststruct cpl_fw4_msg *)(rsp + 1))->type == FW_TYPE_RSSCPL)
rsp += 2;
if (q->flush_handler)
ret = adap->uld[q->uld].lro_rx_handler(adap->uld[q->uld].handle,
rsp, gl, &q->lro_mgr,
&q->napi); else
ret = adap->uld[q->uld].rx_handler(adap->uld[q->uld].handle,
rsp, gl);
ciq_size = 64 + adap->vres.cq.size + adap->tids.nftids; if (ciq_size > SGE_MAX_IQ_SIZE) {
dev_warn(adap->pdev_dev, "CIQ size too small for available IQs\n");
ciq_size = SGE_MAX_IQ_SIZE;
}
for (i = rxq_info->nrxq; i < nrxq; i++) { struct sge_ofld_rxq *r = &rxq_info->uldrxq[i];
/* This function should be called with uld_mutex taken. */ staticvoid cxgb4_shutdown_uld_adapter(struct adapter *adap, enum cxgb4_uld type)
{ if (adap->uld[type].handle) {
adap->uld[type].handle = NULL;
adap->uld[type].add = NULL;
release_sge_txq_uld(adap, type);
if (adap->flags & CXGB4_FULL_INIT_DONE)
quiesce_rx_uld(adap, type);
if (adap->flags & CXGB4_USING_MSIX)
free_msix_queue_irqs_uld(adap, type);
/* cxgb4_set_ktls_feature: request FW to enable/disable ktls settings. * @adap: adapter info * @enable: 1 to enable / 0 to disable ktls settings.
*/ int cxgb4_set_ktls_feature(struct adapter *adap, bool enable)
{ int ret = 0;
u32 params =
FW_PARAMS_MNEM_V(FW_PARAMS_MNEM_DEV) |
FW_PARAMS_PARAM_X_V(FW_PARAMS_PARAM_DEV_KTLS_HW) |
FW_PARAMS_PARAM_Y_V(enable) |
FW_PARAMS_PARAM_Z_V(FW_PARAMS_PARAM_DEV_KTLS_HW_USER_ENABLE);
if (enable) { if (!refcount_read(&adap->chcr_ktls.ktls_refcount)) { /* At this moment if ULD connection are up means, other * ULD is/are already active, return failure.
*/ if (cxgb4_uld_in_use(adap)) {
dev_dbg(adap->pdev_dev, "ULD connections (tid/stid) active. Can't enable kTLS\n"); return -EINVAL;
}
ret = t4_set_params(adap, adap->mbox, adap->pf,
0, 1, ¶ms, ¶ms); if (ret) return ret;
refcount_set(&adap->chcr_ktls.ktls_refcount, 1);
pr_debug("kTLS has been enabled. Restrictions placed on ULD support\n");
} else { /* ktls settings already up, just increment refcount. */
refcount_inc(&adap->chcr_ktls.ktls_refcount);
}
} else { /* return failure if refcount is already 0. */ if (!refcount_read(&adap->chcr_ktls.ktls_refcount)) return -EINVAL; /* decrement refcount and test, if 0, disable ktls feature, * else return command success.
*/ if (refcount_dec_and_test(&adap->chcr_ktls.ktls_refcount)) {
ret = t4_set_params(adap, adap->mbox, adap->pf,
0, 1, ¶ms, ¶ms); if (ret) return ret;
pr_debug("kTLS is disabled. Restrictions on ULD support removed\n");
}
}
return ret;
} #endif
staticvoid cxgb4_uld_alloc_resources(struct adapter *adap, enum cxgb4_uld type, conststruct cxgb4_uld_info *p)
{ int ret = 0;
if ((type == CXGB4_ULD_CRYPTO && !is_pci_uld(adap)) ||
(type != CXGB4_ULD_CRYPTO && !is_offload(adap))) return; if (type == CXGB4_ULD_ISCSIT && is_t4(adap->params.chip)) return;
ret = cfg_queues_uld(adap, type, p); if (ret) goto out;
ret = setup_sge_queues_uld(adap, type, p->lro); if (ret) goto free_queues; if (adap->flags & CXGB4_USING_MSIX) {
ret = request_msix_queue_irqs_uld(adap, type); if (ret) goto free_rxq;
} if (adap->flags & CXGB4_FULL_INIT_DONE)
enable_rx_uld(adap, type); if (adap->uld[type].add) goto free_irq;
ret = setup_sge_txq_uld(adap, type, p); if (ret) goto free_irq;
adap->uld[type] = *p;
ret = uld_attach(adap, type); if (ret) goto free_txq; return;
free_txq:
release_sge_txq_uld(adap, type);
free_irq: if (adap->flags & CXGB4_FULL_INIT_DONE)
quiesce_rx_uld(adap, type); if (adap->flags & CXGB4_USING_MSIX)
free_msix_queue_irqs_uld(adap, type);
free_rxq:
free_sge_queues_uld(adap, type);
free_queues:
free_queues_uld(adap, type);
out:
dev_warn(adap->pdev_dev, "ULD registration failed for uld type %d\n", type);
}
/* cxgb4_register_uld - register an upper-layer driver * @type: the ULD type * @p: the ULD methods * * Registers an upper-layer driver with this driver and notifies the ULD * about any presently available devices that support its type.
*/ void cxgb4_register_uld(enum cxgb4_uld type, conststruct cxgb4_uld_info *p)
{ struct cxgb4_uld_list *uld_entry; struct adapter *adap;
if (type >= CXGB4_ULD_MAX) return;
uld_entry = kzalloc(sizeof(*uld_entry), GFP_KERNEL); if (!uld_entry) return;
Die Informationen auf dieser Webseite wurden
nach bestem Wissen sorgfältig zusammengestellt. Es wird jedoch weder Vollständigkeit, noch Richtigkeit,
noch Qualität der bereit gestellten Informationen zugesichert.
Bemerkung:
Die farbliche Syntaxdarstellung und die Messung sind noch experimentell.