/* Return false if at least one of the input flows is not extracted */ return !unsupported;
}
constchar *npc_get_field_name(u8 hdr)
{ if (hdr >= ARRAY_SIZE(npc_flow_names)) return npc_flow_names[NPC_UNKNOWN];
return npc_flow_names[hdr];
}
/* Compute keyword masks and figure out the number of keywords a field * spans in the key.
*/ staticvoid npc_set_kw_masks(struct npc_mcam *mcam, u8 type,
u8 nr_bits, int start_kwi, int offset, u8 intf)
{ struct npc_key_field *field = &mcam->rx_key_fields[type];
u8 bits_in_kw; int max_kwi;
staticbool npc_check_overlap_fields(struct npc_key_field *input1, struct npc_key_field *input2)
{ int kwi;
/* Fields with same layer id and different ltypes are mutually * exclusive hence they can be overlapped
*/ if (input1->layer_mdata.lid == input2->layer_mdata.lid &&
input1->layer_mdata.ltype != input2->layer_mdata.ltype) returnfalse;
for (kwi = 0; kwi < NPC_MAX_KWS_IN_KEY; kwi++) { if (input1->kw_mask[kwi] & input2->kw_mask[kwi]) returntrue;
}
returnfalse;
}
/* Helper function to check whether given field overlaps with any other fields * in the key. Due to limitations on key size and the key extraction profile in * use higher layers can overwrite lower layer's header fields. Hence overlap * needs to be checked.
*/ staticbool npc_check_overlap(struct rvu *rvu, int blkaddr, enum key_fields type, u8 start_lid, u8 intf)
{ struct npc_mcam *mcam = &rvu->hw->mcam; struct npc_key_field *dummy, *input; int start_kwi, offset;
u8 nr_bits, lid, lt, ld;
u64 cfg;
staticvoid npc_scan_parse_result(struct npc_mcam *mcam, u8 bit_number,
u8 key_nibble, u8 intf)
{
u8 offset = (key_nibble * 4) % 64; /* offset within key word */
u8 kwi = (key_nibble * 4) / 64; /* which word in key */
u8 nr_bits = 4; /* bits in a nibble */
u8 type;
switch (bit_number) { case 0 ... 2:
type = NPC_CHAN; break; case 3:
type = NPC_ERRLEV; break; case 4 ... 5:
type = NPC_ERRCODE; break; case 6:
type = NPC_LXMB; break; /* check for LTYPE only as of now */ case 9:
type = NPC_LA; break; case 12:
type = NPC_LB; break; case 15:
type = NPC_LC; break; case 18:
type = NPC_LD; break; case 21:
type = NPC_LE; break; case 24:
type = NPC_LF; break; case 27:
type = NPC_LG; break; case 30:
type = NPC_LH; break; default: return;
}
staticvoid npc_handle_multi_layer_fields(struct rvu *rvu, int blkaddr, u8 intf)
{ struct npc_mcam *mcam = &rvu->hw->mcam; struct npc_key_field *key_fields; /* Ether type can come from three layers * (ethernet, single tagged, double tagged)
*/ struct npc_key_field *etype_ether; struct npc_key_field *etype_tag1; struct npc_key_field *etype_tag2; /* Outer VLAN TCI can come from two layers * (single tagged, double tagged)
*/ struct npc_key_field *vlan_tag1; struct npc_key_field *vlan_tag2; /* Inner VLAN TCI for double tagged frames */ struct npc_key_field *vlan_tag3;
u64 *features;
u8 start_lid; int i;
key_fields = mcam->rx_key_fields;
features = &mcam->rx_features;
if (is_npc_intf_tx(intf)) {
key_fields = mcam->tx_key_fields;
features = &mcam->tx_features;
}
/* Handle header fields which can come from multiple layers like * etype, outer vlan tci. These fields should have same position in * the key otherwise to install a mcam rule more than one entry is * needed which complicates mcam space management.
*/
etype_ether = &key_fields[NPC_ETYPE_ETHER];
etype_tag1 = &key_fields[NPC_ETYPE_TAG1];
etype_tag2 = &key_fields[NPC_ETYPE_TAG2];
vlan_tag1 = &key_fields[NPC_VLAN_TAG1];
vlan_tag2 = &key_fields[NPC_VLAN_TAG2];
vlan_tag3 = &key_fields[NPC_VLAN_TAG3];
/* if key profile programmed does not extract Ethertype at all */ if (!etype_ether->nr_kws && !etype_tag1->nr_kws && !etype_tag2->nr_kws) {
dev_err(rvu->dev, "mkex: Ethertype is not extracted.\n"); goto vlan_tci;
}
/* if key profile programmed extracts Ethertype from one layer */ if (etype_ether->nr_kws && !etype_tag1->nr_kws && !etype_tag2->nr_kws)
key_fields[NPC_ETYPE] = *etype_ether; if (!etype_ether->nr_kws && etype_tag1->nr_kws && !etype_tag2->nr_kws)
key_fields[NPC_ETYPE] = *etype_tag1; if (!etype_ether->nr_kws && !etype_tag1->nr_kws && etype_tag2->nr_kws)
key_fields[NPC_ETYPE] = *etype_tag2;
/* if key profile programmed extracts Ethertype from multiple layers */ if (etype_ether->nr_kws && etype_tag1->nr_kws) { for (i = 0; i < NPC_MAX_KWS_IN_KEY; i++) { if (etype_ether->kw_mask[i] != etype_tag1->kw_mask[i]) {
dev_err(rvu->dev, "mkex: Etype pos is different for untagged and tagged pkts.\n"); goto vlan_tci;
}
}
key_fields[NPC_ETYPE] = *etype_tag1;
} if (etype_ether->nr_kws && etype_tag2->nr_kws) { for (i = 0; i < NPC_MAX_KWS_IN_KEY; i++) { if (etype_ether->kw_mask[i] != etype_tag2->kw_mask[i]) {
dev_err(rvu->dev, "mkex: Etype pos is different for untagged and double tagged pkts.\n"); goto vlan_tci;
}
}
key_fields[NPC_ETYPE] = *etype_tag2;
} if (etype_tag1->nr_kws && etype_tag2->nr_kws) { for (i = 0; i < NPC_MAX_KWS_IN_KEY; i++) { if (etype_tag1->kw_mask[i] != etype_tag2->kw_mask[i]) {
dev_err(rvu->dev, "mkex: Etype pos is different for tagged and double tagged pkts.\n"); goto vlan_tci;
}
}
key_fields[NPC_ETYPE] = *etype_tag2;
}
/* check none of higher layers overwrite Ethertype */
start_lid = key_fields[NPC_ETYPE].layer_mdata.lid + 1; if (npc_check_overlap(rvu, blkaddr, NPC_ETYPE, start_lid, intf)) {
dev_err(rvu->dev, "mkex: Ethertype is overwritten by higher layers.\n"); goto vlan_tci;
}
*features |= BIT_ULL(NPC_ETYPE);
vlan_tci: /* if key profile does not extract outer vlan tci at all */ if (!vlan_tag1->nr_kws && !vlan_tag2->nr_kws) {
dev_err(rvu->dev, "mkex: Outer vlan tci is not extracted.\n"); goto done;
}
/* if key profile extracts outer vlan tci from one layer */ if (vlan_tag1->nr_kws && !vlan_tag2->nr_kws)
key_fields[NPC_OUTER_VID] = *vlan_tag1; if (!vlan_tag1->nr_kws && vlan_tag2->nr_kws)
key_fields[NPC_OUTER_VID] = *vlan_tag2;
/* if key profile extracts outer vlan tci from multiple layers */ if (vlan_tag1->nr_kws && vlan_tag2->nr_kws) { for (i = 0; i < NPC_MAX_KWS_IN_KEY; i++) { if (vlan_tag1->kw_mask[i] != vlan_tag2->kw_mask[i]) {
dev_err(rvu->dev, "mkex: Out vlan tci pos is different for tagged and double tagged pkts.\n"); goto done;
}
}
key_fields[NPC_OUTER_VID] = *vlan_tag2;
} /* check none of higher layers overwrite outer vlan tci */
start_lid = key_fields[NPC_OUTER_VID].layer_mdata.lid + 1; if (npc_check_overlap(rvu, blkaddr, NPC_OUTER_VID, start_lid, intf)) {
dev_err(rvu->dev, "mkex: Outer vlan tci is overwritten by higher layers.\n"); goto done;
}
*features |= BIT_ULL(NPC_OUTER_VID);
/* for tcp/udp/sctp corresponding layer type should be in the key */ if (*features & proto_flags) { if (!npc_check_field(rvu, blkaddr, NPC_LD, intf))
*features &= ~proto_flags; else
*features |= BIT_ULL(NPC_IPPROTO_TCP) |
BIT_ULL(NPC_IPPROTO_UDP) |
BIT_ULL(NPC_IPPROTO_SCTP) |
BIT_ULL(NPC_IPPROTO_ICMP);
}
/* for AH/ICMP/ICMPv6/, check if corresponding layer type is present in the key */ if (npc_check_field(rvu, blkaddr, NPC_LD, intf)) {
*features |= BIT_ULL(NPC_IPPROTO_AH);
*features |= BIT_ULL(NPC_IPPROTO_ICMP);
*features |= BIT_ULL(NPC_IPPROTO_ICMP6);
}
/* for ESP, check if corresponding layer type is present in the key */ if (npc_check_field(rvu, blkaddr, NPC_LE, intf))
*features |= BIT_ULL(NPC_IPPROTO_ESP);
/* for vlan corresponding layer type should be in the key */ if (*features & BIT_ULL(NPC_OUTER_VID)) if (!npc_check_field(rvu, blkaddr, NPC_LB, intf))
*features &= ~BIT_ULL(NPC_OUTER_VID);
/* Allow extracting SPI field from AH and ESP headers at same offset */ if (npc_is_field_present(rvu, NPC_IPSEC_SPI, intf) &&
(*features & (BIT_ULL(NPC_IPPROTO_ESP) | BIT_ULL(NPC_IPPROTO_AH))))
*features |= BIT_ULL(NPC_IPSEC_SPI);
/* for vlan ethertypes corresponding layer type should be in the key */ if (npc_check_field(rvu, blkaddr, NPC_LB, intf))
*features |= BIT_ULL(NPC_VLAN_ETYPE_CTAG) |
BIT_ULL(NPC_VLAN_ETYPE_STAG);
/* for L2M/L2B/L3M/L3B, check if the type is present in the key */ if (npc_check_field(rvu, blkaddr, NPC_LXMB, intf))
*features |= BIT_ULL(NPC_LXMB);
for (hdr = NPC_MPLS1_LBTCBOS; hdr <= NPC_MPLS4_TTL; hdr++) { if (npc_check_field(rvu, blkaddr, hdr, intf))
*features |= BIT_ULL(hdr);
}
}
/* Scan key extraction profile and record how fields of our interest * fill the key structure. Also verify Channel and DMAC exists in * key and not overwritten by other header fields.
*/ staticint npc_scan_kex(struct rvu *rvu, int blkaddr, u8 intf)
{ struct npc_mcam *mcam = &rvu->hw->mcam;
u8 lid, lt, ld, bitnr;
u64 cfg, masked_cfg;
u8 key_nibble = 0;
/* Scan and note how parse result is going to be in key. * A bit set in PARSE_NIBBLE_ENA corresponds to a nibble from * parse result in the key. The enabled nibbles from parse result * will be concatenated in key.
*/
cfg = rvu_read64(rvu, blkaddr, NPC_AF_INTFX_KEX_CFG(intf));
masked_cfg = cfg & NPC_PARSE_NIBBLE;
for_each_set_bit(bitnr, (unsignedlong *)&masked_cfg, 31) {
npc_scan_parse_result(mcam, bitnr, key_nibble, intf);
key_nibble++;
}
/* Ignore exact match bits for mcam entries except the first rule * which is drop on hit. This first rule is configured explitcitly by * exact match code.
*/
masked_cfg = cfg & NPC_EXACT_NIBBLE;
bitnr = NPC_EXACT_NIBBLE_START;
for_each_set_bit_from(bitnr, (unsignedlong *)&masked_cfg, NPC_EXACT_NIBBLE_END + 1) {
npc_scan_exact_result(mcam, bitnr, key_nibble, intf);
key_nibble++;
}
/* Scan and note how layer data is going to be in key */ for (lid = 0; lid < NPC_MAX_LID; lid++) { for (lt = 0; lt < NPC_MAX_LT; lt++) { for (ld = 0; ld < NPC_MAX_LD; ld++) {
cfg = rvu_read64(rvu, blkaddr,
NPC_AF_INTFX_LIDX_LTX_LDX_CFG
(intf, lid, lt, ld)); if (!FIELD_GET(NPC_LDATA_EN, cfg)) continue;
npc_scan_ldata(rvu, blkaddr, lid, lt, cfg,
intf);
}
}
}
return 0;
}
staticint npc_scan_verify_kex(struct rvu *rvu, int blkaddr)
{ int err;
err = npc_scan_kex(rvu, blkaddr, NIX_INTF_RX); if (err) return err;
err = npc_scan_kex(rvu, blkaddr, NIX_INTF_TX); if (err) return err;
/* Channel is mandatory */ if (!npc_is_field_present(rvu, NPC_CHAN, NIX_INTF_RX)) {
dev_err(rvu->dev, "Channel not present in Key\n"); return -EINVAL;
} /* check that none of the fields overwrite channel */ if (npc_check_overlap(rvu, blkaddr, NPC_CHAN, 0, NIX_INTF_RX)) {
dev_err(rvu->dev, "Channel cannot be overwritten\n"); return -EINVAL;
}
/* npc_update_entry - Based on the masks generated during * the key scanning, updates the given entry with value and * masks for the field of interest. Maximum 16 bytes of a packet * header can be extracted by HW hence lo and hi are sufficient. * When field bytes are less than or equal to 8 then hi should be * 0 for value and mask. * * If exact match of value is required then mask should be all 1's. * If any bits in mask are 0 then corresponding bits in value are * dont care.
*/ void npc_update_entry(struct rvu *rvu, enum key_fields type, struct mcam_entry *entry, u64 val_lo,
u64 val_hi, u64 mask_lo, u64 mask_hi, u8 intf)
{ struct npc_mcam *mcam = &rvu->hw->mcam; struct mcam_entry dummy = { {0} }; struct npc_key_field *field;
u64 kw1, kw2, kw3;
u8 shift; int i;
field = &mcam->rx_key_fields[type]; if (is_npc_intf_tx(intf))
field = &mcam->tx_key_fields[type];
if (!field->nr_kws) return;
for (i = 0; i < NPC_MAX_KWS_IN_KEY; i++) { if (!field->kw_mask[i]) continue; /* place key value in kw[x] */
shift = __ffs64(field->kw_mask[i]); /* update entry value */
kw1 = (val_lo << shift) & field->kw_mask[i];
dummy.kw[i] = kw1; /* update entry mask */
kw1 = (mask_lo << shift) & field->kw_mask[i];
dummy.kw_mask[i] = kw1;
if (field->nr_kws == 1) break; /* place remaining bits of key value in kw[x + 1] */ if (field->nr_kws == 2) { /* update entry value */
kw2 = shift ? val_lo >> (64 - shift) : 0;
kw2 |= (val_hi << shift);
kw2 &= field->kw_mask[i + 1];
dummy.kw[i + 1] = kw2; /* update entry mask */
kw2 = shift ? mask_lo >> (64 - shift) : 0;
kw2 |= (mask_hi << shift);
kw2 &= field->kw_mask[i + 1];
dummy.kw_mask[i + 1] = kw2; break;
} /* place remaining bits of key value in kw[x + 1], kw[x + 2] */ if (field->nr_kws == 3) { /* update entry value */
kw2 = shift ? val_lo >> (64 - shift) : 0;
kw2 |= (val_hi << shift);
kw2 &= field->kw_mask[i + 1];
kw3 = shift ? val_hi >> (64 - shift) : 0;
kw3 &= field->kw_mask[i + 2];
dummy.kw[i + 1] = kw2;
dummy.kw[i + 2] = kw3; /* update entry mask */
kw2 = shift ? mask_lo >> (64 - shift) : 0;
kw2 |= (mask_hi << shift);
kw2 &= field->kw_mask[i + 1];
kw3 = shift ? mask_hi >> (64 - shift) : 0;
kw3 &= field->kw_mask[i + 2];
dummy.kw_mask[i + 1] = kw2;
dummy.kw_mask[i + 2] = kw3; break;
}
} /* dummy is ready with values and masks for given key * field now clear and update input entry with those
*/ for (i = 0; i < NPC_MAX_KWS_IN_KEY; i++) { if (!field->kw_mask[i]) continue;
entry->kw[i] &= ~field->kw_mask[i];
entry->kw_mask[i] &= ~field->kw_mask[i];
/* For an ipv6 address fe80::2c68:63ff:fe5e:2d0a the packet * values to be programmed in MCAM should as below: * val_high: 0xfe80000000000000 * val_low: 0x2c6863fffe5e2d0a
*/ if (features & BIT_ULL(NPC_SIP_IPV6)) {
be32_to_cpu_array(src_ip_mask, mask->ip6src, IPV6_WORDS);
be32_to_cpu_array(src_ip, pkt->ip6src, IPV6_WORDS);
/* For tcp/udp/sctp LTYPE should be present in entry */ if (features & BIT_ULL(NPC_IPPROTO_TCP))
npc_update_entry(rvu, NPC_LD, entry, NPC_LT_LD_TCP,
0, ~0ULL, 0, intf); if (features & BIT_ULL(NPC_IPPROTO_UDP))
npc_update_entry(rvu, NPC_LD, entry, NPC_LT_LD_UDP,
0, ~0ULL, 0, intf); if (features & BIT_ULL(NPC_IPPROTO_SCTP))
npc_update_entry(rvu, NPC_LD, entry, NPC_LT_LD_SCTP,
0, ~0ULL, 0, intf); if (features & BIT_ULL(NPC_IPPROTO_ICMP))
npc_update_entry(rvu, NPC_LD, entry, NPC_LT_LD_ICMP,
0, ~0ULL, 0, intf); if (features & BIT_ULL(NPC_IPPROTO_ICMP6))
npc_update_entry(rvu, NPC_LD, entry, NPC_LT_LD_ICMP6,
0, ~0ULL, 0, intf);
/* For AH, LTYPE should be present in entry */ if (features & BIT_ULL(NPC_IPPROTO_AH))
npc_update_entry(rvu, NPC_LD, entry, NPC_LT_LD_AH,
0, ~0ULL, 0, intf); /* For ESP, LTYPE should be present in entry */ if (features & BIT_ULL(NPC_IPPROTO_ESP))
npc_update_entry(rvu, NPC_LE, entry, NPC_LT_LE_ESP,
0, ~0ULL, 0, intf);
/* If a PF/VF is installing a multicast rule then it is expected * that the PF/VF should have created a group for the multicast/mirror * list. Otherwise reject the configuration. * During this scenario, req->index is set as multicast/mirror * group index.
*/ if (req->hdr.pcifunc &&
(op == NIX_RX_ACTIONOP_MCAST || op == NIX_TX_ACTIONOP_MCAST)) {
mce_index = rvu_nix_mcast_get_mce_index(rvu, req->hdr.pcifunc, req->index); if (mce_index < 0) return mce_index;
if (req->op == NIX_RX_ACTION_DEFAULT) { if (pfvf->def_ucast_rule) {
action = pfvf->def_ucast_rule->rx_action;
} else { /* For profiles which do not extract DMAC, the default * unicast entry is unused. Hence modify action for the * requests which use same action as default unicast * entry
*/
*(u64 *)&action = 0;
action.pf_func = target;
action.op = NIX_RX_ACTIONOP_UCAST;
} if (req->match_id)
action.match_id = req->match_id;
}
find_rule:
rule = rvu_mcam_find_rule(mcam, entry_index); if (!rule) {
rule = kzalloc(sizeof(*rule), GFP_KERNEL); if (!rule) return -ENOMEM; new = true;
}
/* allocate new counter if rule has no counter */ if (!req->default_rule && req->set_cntr && !rule->has_cntr)
rvu_mcam_add_counter_to_rule(rvu, owner, rule, rsp);
/* if user wants to delete an existing counter for a rule then * free the counter
*/ if (!req->set_cntr && rule->has_cntr)
rvu_mcam_remove_counter_from_rule(rvu, owner, rule);
write_req.hdr.pcifunc = owner;
/* AF owns the default rules so change the owner just to relax * the checks in rvu_mbox_handler_npc_mcam_write_entry
*/ if (req->default_rule)
write_req.hdr.pcifunc = 0;
write_req.entry = entry_index;
write_req.intf = req->intf;
write_req.enable_entry = (u8)enable; /* if counter is available then clear and use it */ if (req->set_cntr && rule->has_cntr) {
rvu_write64(rvu, blkaddr, NPC_AF_MATCH_STATX(rule->cntr), req->cntr_val);
write_req.set_cntr = 1;
write_req.cntr = rule->cntr;
}
if (new)
rvu_mcam_add_rule(mcam, rule); if (req->default_rule)
pfvf->def_ucast_rule = rule;
/* write to mcam entry registers */
err = rvu_mbox_handler_npc_mcam_write_entry(rvu, &write_req,
&write_rsp); if (err) {
rvu_mcam_remove_counter_from_rule(rvu, owner, rule); if (new) {
list_del(&rule->list);
kfree(rule);
} return err;
}
/* VF's MAC address is being changed via PF */ if (pf_set_vfs_mac) {
ether_addr_copy(pfvf->default_mac, req->packet.dmac);
ether_addr_copy(pfvf->mac_addr, req->packet.dmac);
set_bit(PF_SET_VF_MAC, &pfvf->flags);
}
if (test_bit(PF_SET_VF_CFG, &pfvf->flags) &&
req->vtag0_type == NIX_AF_LFX_RX_VTAG_TYPE7)
rule->vfvlan_cfg = true;
blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NPC, 0); if (blkaddr < 0) {
dev_err(rvu->dev, "%s: NPC block not implemented\n", __func__); return NPC_MCAM_INVALID_REQ;
}
if (!is_npc_interface_valid(rvu, req->intf)) return NPC_FLOW_INTF_INVALID;
/* If DMAC is not extracted in MKEX, rules installed by AF * can rely on L2MB bit set by hardware protocol checker for * broadcast and multicast addresses.
*/ if (npc_check_field(rvu, blkaddr, NPC_DMAC, req->intf)) goto process_flow;
if (is_pffunc_af(req->hdr.pcifunc) &&
req->features & BIT_ULL(NPC_DMAC)) { if (is_unicast_ether_addr(req->packet.dmac)) {
dev_warn(rvu->dev, "%s: mkex profile does not support ucast flow\n",
__func__); return NPC_FLOW_NOT_SUPPORTED;
}
if (!npc_is_field_present(rvu, NPC_LXMB, req->intf)) {
dev_warn(rvu->dev, "%s: mkex profile does not support bcast/mcast flow",
__func__); return NPC_FLOW_NOT_SUPPORTED;
}
/* Modify feature to use LXMB instead of DMAC */
req->features &= ~BIT_ULL(NPC_DMAC);
req->features |= BIT_ULL(NPC_LXMB);
}
process_flow: if (from_vf && req->default_rule) return NPC_FLOW_VF_PERM_DENIED;
/* Each PF/VF info is maintained in struct rvu_pfvf. * rvu_pfvf for the target PF/VF needs to be retrieved * hence modify pcifunc accordingly.
*/
if (!req->hdr.pcifunc) { /* AF installing for a PF/VF */
target = req->vf;
} elseif (!from_vf && req->vf && !from_rep_dev) { /* PF installing for its VF */
target = (req->hdr.pcifunc & ~RVU_PFVF_FUNC_MASK) | req->vf;
pf_set_vfs_mac = req->default_rule &&
(req->features & BIT_ULL(NPC_DMAC));
} elseif (from_rep_dev && req->vf) { /* Representor device installing for a representee */
target = req->vf;
} else { /* msg received from PF/VF */
target = req->hdr.pcifunc;
}
/* ignore chan_mask in case pf func is not AF, revisit later */ if (!is_pffunc_af(req->hdr.pcifunc))
req->chan_mask = 0xFFF;
err = npc_check_unsupported_flows(rvu, req->features, req->intf); if (err) return NPC_FLOW_NOT_SUPPORTED;
pfvf = rvu_get_pfvf(rvu, target);
if (from_rep_dev)
req->channel = pfvf->rx_chan_base; /* PF installing for its VF */ if (req->hdr.pcifunc && !from_vf && req->vf && !from_rep_dev)
set_bit(PF_SET_VF_CFG, &pfvf->flags);
/* Proceed if NIXLF is attached or not for TX rules */
err = nix_get_nixlf(rvu, target, &nixlf, NULL); if (err && is_npc_intf_rx(req->intf) && !pf_set_vfs_mac) return NPC_FLOW_NO_NIXLF;
/* don't enable rule when nixlf not attached or initialized */ if (!(is_nixlf_attached(rvu, target) &&
test_bit(NIXLF_INITIALIZED, &pfvf->flags)))
enable = false;
/* Packets reaching NPC in Tx path implies that a * NIXLF is properly setup and transmitting. * Hence rules can be enabled for Tx.
*/ if (is_npc_intf_tx(req->intf))
enable = true;
/* Do not allow requests from uninitialized VFs */ if (from_vf && !enable) return NPC_FLOW_VF_NOT_INIT;
/* PF sets VF mac & VF NIXLF is not attached, update the mac addr */ if (pf_set_vfs_mac && !enable) {
ether_addr_copy(pfvf->default_mac, req->packet.dmac);
ether_addr_copy(pfvf->mac_addr, req->packet.dmac);
set_bit(PF_SET_VF_MAC, &pfvf->flags); return 0;
}
blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NPC, 0); if (blkaddr < 0) return;
mutex_lock(&mcam->lock); /* Disable MCAM entries installed by PF with target as VF pcifunc */ for (index = 0; index < mcam->bmap_entries; index++) { if (mcam->entry2target_pffunc[index] == target)
npc_enable_mcam_entry(rvu, mcam, blkaddr,
index, false);
}
mutex_unlock(&mcam->lock);
}
/* single drop on non hit rule starting from 0th index. This an extension * to RPM mac filter to support more rules.
*/ int npc_install_mcam_drop_rule(struct rvu *rvu, int mcam_idx, u16 *counter_idx,
u64 chan_val, u64 chan_mask, u64 exact_val, u64 exact_mask,
u64 bcast_mcast_val, u64 bcast_mcast_mask)
{ struct npc_mcam_alloc_counter_req cntr_req = { 0 }; struct npc_mcam_alloc_counter_rsp cntr_rsp = { 0 }; struct npc_mcam_write_entry_req req = { 0 }; struct npc_mcam *mcam = &rvu->hw->mcam; struct rvu_npc_mcam_rule *rule; struct msg_rsp rsp; bool enabled; int blkaddr; int err;
blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NPC, 0); if (blkaddr < 0) {
dev_err(rvu->dev, "%s: NPC block not implemented\n", __func__); return -ENODEV;
}
/* Bail out if no exact match support */ if (!rvu_npc_exact_has_match_table(rvu)) {
dev_info(rvu->dev, "%s: No support for exact match feature\n", __func__); return -EINVAL;
}
/* If 0th entry is already used, return err */
enabled = is_mcam_entry_enabled(rvu, mcam, blkaddr, mcam_idx); if (enabled) {
dev_err(rvu->dev, "%s: failed to add single drop on non hit rule at %d th index\n",
__func__, mcam_idx); return -EINVAL;
}
/* Add this entry to mcam rules list */
rule = kzalloc(sizeof(*rule), GFP_KERNEL); if (!rule) return -ENOMEM;
/* Disable rule by default. Enable rule when first dmac filter is * installed
*/
rule->enable = false;
rule->chan = chan_val;
rule->chan_mask = chan_mask;
rule->entry = mcam_idx;
rvu_mcam_add_rule(mcam, rule);
/* Allocate counter for this single drop on non hit rule */
cntr_req.hdr.pcifunc = 0; /* AF request */
cntr_req.contig = true;
cntr_req.count = 1;
err = rvu_mbox_handler_npc_mcam_alloc_counter(rvu, &cntr_req, &cntr_rsp); if (err) {
dev_err(rvu->dev, "%s: Err to allocate cntr for drop rule (err=%d)\n",
__func__, err); return -EFAULT;
}
*counter_idx = cntr_rsp.cntr;
/* Fill in fields for this mcam entry */
npc_update_entry(rvu, NPC_EXACT_RESULT, &req.entry_data, exact_val, 0,
exact_mask, 0, NIX_INTF_RX);
npc_update_entry(rvu, NPC_CHAN, &req.entry_data, chan_val, 0,
chan_mask, 0, NIX_INTF_RX);
npc_update_entry(rvu, NPC_LXMB, &req.entry_data, bcast_mcast_val, 0,
bcast_mcast_mask, 0, NIX_INTF_RX);
err = rvu_mbox_handler_npc_mcam_write_entry(rvu, &req, &rsp); if (err) {
dev_err(rvu->dev, "%s: Installation of single drop on non hit rule at %d failed\n",
__func__, mcam_idx); return err;
}
dev_err(rvu->dev, "%s: Installed single drop on non hit rule at %d, cntr=%d\n",
__func__, mcam_idx, req.cntr);
/* disable entry at Bank 0, index 0 */
npc_enable_mcam_entry(rvu, mcam, blkaddr, mcam_idx, false);
return 0;
}
int rvu_mbox_handler_npc_get_field_status(struct rvu *rvu, struct npc_get_field_status_req *req, struct npc_get_field_status_rsp *rsp)
{ int blkaddr;
Die Informationen auf dieser Webseite wurden
nach bestem Wissen sorgfältig zusammengestellt. Es wird jedoch weder Vollständigkeit, noch Richtigkeit,
noch Qualität der bereit gestellten Informationen zugesichert.
Bemerkung:
Die farbliche Syntaxdarstellung und die Messung sind noch experimentell.