int cgxlmac_to_pf(struct rvu *rvu, int cgx_id, int lmac_id)
{ unsignedlong pfmap;
pfmap = cgxlmac_to_pfmap(rvu, cgx_id, lmac_id);
/* Assumes only one pf mapped to a cgx lmac port */ if (!pfmap) return -ENODEV; else return find_first_bit(&pfmap,
rvu->cgx_cnt_max * rvu->hw->lmac_per_cgx);
}
/* Return first enabled CGX instance if none are enabled then return NULL */ void *rvu_first_cgx_pdata(struct rvu *rvu)
{ int first_enabled_cgx = 0; void *cgxd = NULL;
for (; first_enabled_cgx < rvu->cgx_cnt_max; first_enabled_cgx++) {
cgxd = rvu_cgx_pdata(first_enabled_cgx, rvu); if (cgxd) break;
}
return cgxd;
}
/* Based on P2X connectivity find mapped NIX block for a PF */ staticvoid rvu_map_cgx_nix_block(struct rvu *rvu, int pf, int cgx_id, int lmac_id)
{ struct rvu_pfvf *pfvf = &rvu->pf[pf];
u8 p2x;
p2x = cgx_lmac_get_p2x(cgx_id, lmac_id); /* Firmware sets P2X_SELECT as either NIX0 or NIX1 */
pfvf->nix_blkaddr = BLKADDR_NIX0; if (is_rvu_supports_nix1(rvu) && p2x == CMR_P2X_SEL_NIX1)
pfvf->nix_blkaddr = BLKADDR_NIX1;
}
staticint rvu_map_cgx_lmac_pf(struct rvu *rvu)
{ struct npc_pkind *pkind = &rvu->hw->pkind; int cgx_cnt_max = rvu->cgx_cnt_max; int pf = PF_CGXMAP_BASE; unsignedlong lmac_bmap; int size, free_pkind; int cgx, lmac, iter; int numvfs, hwvfs;
if (!cgx_cnt_max) return 0;
if (cgx_cnt_max > 0xF || rvu->hw->lmac_per_cgx > 0xF) return -EINVAL;
/* Alloc map table * An additional entry is required since PF id starts from 1 and * hence entry at offset 0 is invalid.
*/
size = (cgx_cnt_max * rvu->hw->lmac_per_cgx + 1) * sizeof(u8);
rvu->pf2cgxlmac_map = devm_kmalloc(rvu->dev, size, GFP_KERNEL); if (!rvu->pf2cgxlmac_map) return -ENOMEM;
/* Initialize all entries with an invalid cgx and lmac id */
memset(rvu->pf2cgxlmac_map, 0xFF, size);
staticint rvu_cgx_send_link_info(int cgx_id, int lmac_id, struct rvu *rvu)
{ struct cgx_evq_entry *qentry; unsignedlong flags; int err;
qentry = kmalloc(sizeof(*qentry), GFP_KERNEL); if (!qentry) return -ENOMEM;
/* Lock the event queue before we read the local link status */
spin_lock_irqsave(&rvu->cgx_evq_lock, flags);
err = cgx_get_link_info(rvu_cgx_pdata(cgx_id, rvu), lmac_id,
&qentry->link_event.link_uinfo);
qentry->link_event.cgx_id = cgx_id;
qentry->link_event.lmac_id = lmac_id; if (err) {
kfree(qentry); goto skip_add;
}
list_add_tail(&qentry->evq_node, &rvu->cgx_evq_head);
skip_add:
spin_unlock_irqrestore(&rvu->cgx_evq_lock, flags);
/* start worker to process the events */
queue_work(rvu->cgx_evh_wq, &rvu->cgx_evh_work);
return 0;
}
/* This is called from interrupt context and is expected to be atomic */ staticint cgx_lmac_postevent(struct cgx_link_event *event, void *data)
{ struct cgx_evq_entry *qentry; struct rvu *rvu = data;
/* post event to the event queue */
qentry = kmalloc(sizeof(*qentry), GFP_ATOMIC); if (!qentry) return -ENOMEM;
qentry->link_event = *event;
spin_lock(&rvu->cgx_evq_lock);
list_add_tail(&qentry->evq_node, &rvu->cgx_evq_head);
spin_unlock(&rvu->cgx_evq_lock);
/* start worker to process the events */
queue_work(rvu->cgx_evh_wq, &rvu->cgx_evh_work);
do { /* Dequeue an event */
spin_lock_irqsave(&rvu->cgx_evq_lock, flags);
qentry = list_first_entry_or_null(&rvu->cgx_evq_head, struct cgx_evq_entry,
evq_node); if (qentry)
list_del(&qentry->evq_node);
spin_unlock_irqrestore(&rvu->cgx_evq_lock, flags); if (!qentry) break; /* nothing more to process */
event = &qentry->link_event;
/* process event */
cgx_notify_pfs(event, rvu);
kfree(qentry);
} while (1);
}
int rvu_cgx_init(struct rvu *rvu)
{ struct mac_ops *mac_ops; int cgx, err; void *cgxd;
/* CGX port id starts from 0 and are not necessarily contiguous * Hence we allocate resources based on the maximum port id value.
*/
rvu->cgx_cnt_max = cgx_get_cgxcnt_max(); if (!rvu->cgx_cnt_max) {
dev_info(rvu->dev, "No CGX devices found!\n"); return 0;
}
/* Enable receive on all LMACS */ for (cgx = 0; cgx <= rvu->cgx_cnt_max; cgx++) {
cgxd = rvu_cgx_pdata(cgx, rvu); if (!cgxd) continue;
mac_ops = get_mac_ops(cgxd);
lmac_bmap = cgx_get_lmac_bmap(cgxd);
for_each_set_bit(lmac, &lmac_bmap, rvu->hw->lmac_per_cgx)
mac_ops->mac_enadis_rx(cgxd, lmac, true);
}
/* Do link up for all CGX ports */ for (cgx = 0; cgx <= rvu->cgx_cnt_max; cgx++) {
cgxd = rvu_cgx_pdata(cgx, rvu); if (!cgxd) continue;
err = cgx_lmac_linkup_start(cgxd); if (err)
dev_err(rvu->dev, "Link up process failed to start on cgx %d\n",
cgx);
}
}
int rvu_cgx_exit(struct rvu *rvu)
{ unsignedlong lmac_bmap; int cgx, lmac; void *cgxd;
/* Ensure event handler unregister is completed */
mb();
rvu_cgx_wq_destroy(rvu); return 0;
}
/* Most of the CGX configuration is restricted to the mapped PF only, * VF's of mapped PF and other PFs are not allowed. This fn() checks * whether a PFFUNC is permitted to do the config or not.
*/ inlinebool is_cgx_config_permitted(struct rvu *rvu, u16 pcifunc)
{ if ((pcifunc & RVU_PFVF_FUNC_MASK) ||
!is_pf_cgxmapped(rvu, rvu_get_pf(rvu->pdev, pcifunc))) returnfalse; returntrue;
}
if (!is_cgx_config_permitted(rvu, req->hdr.pcifunc)) return LMAC_AF_ERR_PERM_DENIED;
parent_pf = &rvu->pf[pf]; /* To ensure reset cgx stats won't affect VF stats, * check if it used by only PF interface. * If not, return
*/ if (parent_pf->cgx_users > 1) {
dev_info(rvu->dev, "CGX busy, could not reset statistics\n"); return 0;
}
int rvu_mbox_handler_cgx_mac_max_entries_get(struct rvu *rvu, struct msg_req *req, struct cgx_max_dmac_entries_get_rsp
*rsp)
{ int pf = rvu_get_pf(rvu->pdev, req->hdr.pcifunc);
u8 cgx_id, lmac_id;
/* If msg is received from PFs(which are not mapped to CGX LMACs) * or VF then no entries are allocated for DMAC filters at CGX level. * So returning zero.
*/ if (!is_cgx_config_permitted(rvu, req->hdr.pcifunc)) {
rsp->max_dmac_filters = 0; return 0;
}
if (rvu_npc_exact_has_match_table(rvu)) {
rsp->max_dmac_filters = rvu_npc_exact_get_max_entries(rvu); return 0;
}
if (!is_mac_feature_supported(rvu, pf, RVU_LMAC_FEAT_PTP)) return 0;
/* This msg is expected only from PF/VFs that are mapped to CGX/RPM LMACs, * if received from other PF/VF simply ACK, nothing to do.
*/ if (!is_pf_cgxmapped(rvu, pf)) return -EPERM;
mac_ops = get_mac_ops(cgxd);
mac_ops->mac_enadis_ptp_config(cgxd, lmac_id, enable); /* If PTP is enabled then inform NPC that packets to be * parsed by this PF will have their data shifted by 8 bytes * and if PTP is disabled then no shift is required
*/ if (npc_config_ts_kpuaction(rvu, pf, pcifunc, enable)) return -EINVAL; /* This flag is required to clean up CGX conf if app gets killed */
pfvf->hw_rx_tstamp_en = enable;
if (en) {
set_bit(pf, &rvu->pf_notify_bmap); /* Send the current link status to PF */
rvu_cgx_send_link_info(cgx_id, lmac_id, rvu);
} else {
clear_bit(pf, &rvu->pf_notify_bmap);
}
if (!is_mac_feature_supported(rvu, pf, RVU_LMAC_FEAT_FC)) return 0;
/* This msg is expected only from PF/VFs that are mapped to CGX LMACs, * if received from other PF/VF simply ACK, nothing to do.
*/ if (!is_pf_cgxmapped(rvu, pf)) return LMAC_AF_ERR_PF_NOT_MAPPED;
mac_ops->mac_get_pfc_frm_cfg(cgxd, lmac_id, &tx_pfc, &rx_pfc); if (tx_pfc || rx_pfc) {
dev_warn(rvu->dev, "Can not configure 802.3X flow control as PFC frames are enabled"); return LMAC_AF_ERR_8023PAUSE_ENADIS_PERM_DENIED;
}
int rvu_mbox_handler_cgx_cfg_pause_frm(struct rvu *rvu, struct cgx_pause_frm_cfg *req, struct cgx_pause_frm_cfg *rsp)
{ int pf = rvu_get_pf(rvu->pdev, req->hdr.pcifunc); struct mac_ops *mac_ops;
u8 cgx_id, lmac_id; int err = 0; void *cgxd;
/* This msg is expected only from PF/VFs that are mapped to CGX LMACs, * if received from other PF/VF simply ACK, nothing to do.
*/ if (!is_pf_cgxmapped(rvu, pf)) return -ENODEV;
/* Finds cumulative status of NIX rx/tx counters from LF of a PF and those * from its VFs as well. ie. NIX rx/tx counters at the CGX port level
*/ int rvu_cgx_nix_cuml_stats(struct rvu *rvu, void *cgxd, int lmac_id, int index, int rxtxflag, u64 *stat)
{ struct rvu_block *block; int blkaddr;
u16 pcifunc; int pf, lf;
/* Assumes LF of a PF and all of its VF belongs to the same * NIX block
*/
pcifunc = rvu_make_pcifunc(rvu->pdev, pf, 0);
blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NIX, pcifunc); if (blkaddr < 0) return 0;
block = &rvu->hw->block[blkaddr];
for (lf = 0; lf < block->lf.max; lf++) { /* Check if a lf is attached to this PF or one of its VFs */ if (!((block->fn_map[lf] & ~RVU_PFVF_FUNC_MASK) == (pcifunc &
~RVU_PFVF_FUNC_MASK))) continue; if (rxtxflag == NIX_STATS_RX)
*stat += rvu_read64(rvu, blkaddr,
NIX_AF_LFX_RX_STATX(lf, index)); else
*stat += rvu_read64(rvu, blkaddr,
NIX_AF_LFX_TX_STATX(lf, index));
}
return 0;
}
int rvu_cgx_start_stop_io(struct rvu *rvu, u16 pcifunc, bool start)
{ struct rvu_pfvf *parent_pf, *pfvf; int cgx_users, err = 0;
if (!is_pf_cgxmapped(rvu, rvu_get_pf(rvu->pdev, pcifunc))) return 0;
if (start && pfvf->cgx_in_use) gotoexit; /* CGX is already started hence nothing to do */ if (!start && !pfvf->cgx_in_use) gotoexit; /* CGX is already stopped hence nothing to do */
/* Start CGX when first of all NIXLFs is started. * Stop CGX when last of all NIXLFs is stopped.
*/ if (!cgx_users) {
err = rvu_cgx_config_rxtx(rvu, pcifunc & ~RVU_PFVF_FUNC_MASK,
start); if (err) {
dev_err(rvu->dev, "Unable to %s CGX\n",
start ? "start" : "stop"); /* Revert the usage count in case of error */
parent_pf->cgx_users = start ? parent_pf->cgx_users - 1
: parent_pf->cgx_users + 1; gotoexit;
}
}
pfvf->cgx_in_use = start; exit:
mutex_unlock(&rvu->cgx_cfg_lock); return err;
}
int rvu_mbox_handler_cgx_set_fec_param(struct rvu *rvu, struct fec_mode *req, struct fec_mode *rsp)
{ int pf = rvu_get_pf(rvu->pdev, req->hdr.pcifunc);
u8 cgx_id, lmac_id;
/* This msg is expected only from PF/VFs that are mapped to CGX LMACs, * if received from other PF/VF simply ACK, nothing to do.
*/ if (!is_pf_cgxmapped(rvu, pf)) return -ENODEV;
int rvu_mbox_handler_cgx_prio_flow_ctrl_cfg(struct rvu *rvu, struct cgx_pfc_cfg *req, struct cgx_pfc_rsp *rsp)
{ int pf = rvu_get_pf(rvu->pdev, req->hdr.pcifunc); struct mac_ops *mac_ops;
u8 cgx_id, lmac_id; void *cgxd; int err;
/* This msg is expected only from PF/VFs that are mapped to CGX LMACs, * if received from other PF/VF simply ACK, nothing to do.
*/ if (!is_pf_cgxmapped(rvu, pf)) return -ENODEV;
Die Informationen auf dieser Webseite wurden
nach bestem Wissen sorgfältig zusammengestellt. Es wird jedoch weder Vollständigkeit, noch Richtigkeit,
noch Qualität der bereit gestellten Informationen zugesichert.
Bemerkung:
Die farbliche Syntaxdarstellung und die Messung sind noch experimentell.