/** * ice_vc_fdir_param_check * @vf: pointer to the VF structure * @vsi_id: VF relative VSI ID * * Check for the valid VSI ID, PF's state and VF's state * * Return: 0 on success, and -EINVAL on error.
*/ staticint
ice_vc_fdir_param_check(struct ice_vf *vf, u16 vsi_id)
{ struct ice_pf *pf = vf->pf;
if (!test_bit(ICE_FLAG_FD_ENA, pf->flags)) return -EINVAL;
if (!test_bit(ICE_VF_STATE_ACTIVE, vf->vf_states)) return -EINVAL;
if (!(vf->driver_caps & VIRTCHNL_VF_OFFLOAD_FDIR_PF)) return -EINVAL;
if (!ice_vc_isvalid_vsi_id(vf, vsi_id)) return -EINVAL;
if (!ice_get_vf_vsi(vf)) return -EINVAL;
return 0;
}
/** * ice_vf_start_ctrl_vsi * @vf: pointer to the VF structure * * Allocate ctrl_vsi for the first time and open the ctrl_vsi port for VF * * Return: 0 on success, and other on error.
*/ staticint ice_vf_start_ctrl_vsi(struct ice_vf *vf)
{ struct ice_pf *pf = vf->pf; struct ice_vsi *ctrl_vsi; struct device *dev; int err;
dev = ice_pf_to_dev(pf); if (vf->ctrl_vsi_idx != ICE_NO_VSI) return -EEXIST;
ctrl_vsi = ice_vf_ctrl_vsi_setup(vf); if (!ctrl_vsi) {
dev_dbg(dev, "Could not setup control VSI for VF %d\n",
vf->vf_id); return -ENOMEM;
}
err = ice_vsi_open_ctrl(ctrl_vsi); if (err) {
dev_dbg(dev, "Could not open control VSI for VF %d\n",
vf->vf_id); goto err_vsi_open;
}
/** * ice_vc_fdir_alloc_prof - allocate profile for this filter flow type * @vf: pointer to the VF structure * @flow: filter flow type * * Return: 0 on success, and other on error.
*/ staticint
ice_vc_fdir_alloc_prof(struct ice_vf *vf, enum ice_fltr_ptype flow)
{ struct ice_vf_fdir *fdir = &vf->fdir;
if (!fdir->fdir_prof) {
fdir->fdir_prof = devm_kcalloc(ice_pf_to_dev(vf->pf),
ICE_FLTR_PTYPE_MAX, sizeof(*fdir->fdir_prof),
GFP_KERNEL); if (!fdir->fdir_prof) return -ENOMEM;
}
if (!fdir->fdir_prof[flow]) {
fdir->fdir_prof[flow] = devm_kzalloc(ice_pf_to_dev(vf->pf), sizeof(**fdir->fdir_prof),
GFP_KERNEL); if (!fdir->fdir_prof[flow]) return -ENOMEM;
}
return 0;
}
/** * ice_vc_fdir_free_prof - free profile for this filter flow type * @vf: pointer to the VF structure * @flow: filter flow type
*/ staticvoid
ice_vc_fdir_free_prof(struct ice_vf *vf, enum ice_fltr_ptype flow)
{ struct ice_vf_fdir *fdir = &vf->fdir;
/** * ice_vc_fdir_parse_flow_fld * @proto_hdr: virtual channel protocol filter header * @conf: FDIR configuration for each filter * @fld: field type array * @fld_cnt: field counter * * Parse the virtual channel filter header and store them into field type array * * Return: 0 on success, and other on error.
*/ staticint
ice_vc_fdir_parse_flow_fld(struct virtchnl_proto_hdr *proto_hdr, struct virtchnl_fdir_fltr_conf *conf, enum ice_flow_field *fld, int *fld_cnt)
{ struct virtchnl_proto_hdr hdr;
u32 i;
memcpy(&hdr, proto_hdr, sizeof(hdr));
for (i = 0; (i < ARRAY_SIZE(fdir_inset_map)) &&
VIRTCHNL_GET_PROTO_HDR_FIELD(&hdr); i++) if (VIRTCHNL_TEST_PROTO_HDR(&hdr, fdir_inset_map[i].field)) { if (fdir_inset_map[i].mask &&
((fdir_inset_map[i].mask & conf->inset_flag) !=
fdir_inset_map[i].flag)) continue;
/** * ice_vc_fdir_set_flow_fld * @vf: pointer to the VF structure * @fltr: virtual channel add cmd buffer * @conf: FDIR configuration for each filter * @seg: array of one or more packet segments that describe the flow * * Parse the virtual channel add msg buffer's field vector and store them into * flow's packet segment field * * Return: 0 on success, and other on error.
*/ staticint
ice_vc_fdir_set_flow_fld(struct ice_vf *vf, struct virtchnl_fdir_add *fltr, struct virtchnl_fdir_fltr_conf *conf, struct ice_flow_seg_info *seg)
{ struct virtchnl_fdir_rule *rule = &fltr->rule_cfg; enum ice_flow_field fld[ICE_FLOW_FIELD_IDX_MAX]; struct device *dev = ice_pf_to_dev(vf->pf); struct virtchnl_proto_hdrs *proto; int fld_cnt = 0; int i;
proto = &rule->proto_hdrs; for (i = 0; i < proto->count; i++) { struct virtchnl_proto_hdr *hdr = &proto->proto_hdr[i]; int ret;
ret = ice_vc_fdir_parse_flow_fld(hdr, conf, fld, &fld_cnt); if (ret) return ret;
}
if (fld_cnt == 0) {
dev_dbg(dev, "Empty input set for VF %d\n", vf->vf_id); return -EINVAL;
}
for (i = 0; i < fld_cnt; i++)
ice_flow_set_fld(seg, fld[i],
ICE_FLOW_FLD_OFF_INVAL,
ICE_FLOW_FLD_OFF_INVAL,
ICE_FLOW_FLD_OFF_INVAL, false);
return 0;
}
/** * ice_vc_fdir_set_flow_hdr - config the flow's packet segment header * @vf: pointer to the VF structure * @conf: FDIR configuration for each filter * @seg: array of one or more packet segments that describe the flow * * Return: 0 on success, and other on error.
*/ staticint
ice_vc_fdir_set_flow_hdr(struct ice_vf *vf, struct virtchnl_fdir_fltr_conf *conf, struct ice_flow_seg_info *seg)
{ enum ice_fltr_ptype flow = conf->input.flow_type; enum ice_fdir_tunnel_type ttype = conf->ttype; struct device *dev = ice_pf_to_dev(vf->pf);
switch (flow) { case ICE_FLTR_PTYPE_NON_IP_L2:
ICE_FLOW_SET_HDRS(seg, ICE_FLOW_SEG_HDR_ETH_NON_IP); break; case ICE_FLTR_PTYPE_NONF_IPV4_L2TPV3:
ICE_FLOW_SET_HDRS(seg, ICE_FLOW_SEG_HDR_L2TPV3 |
ICE_FLOW_SEG_HDR_IPV4 |
ICE_FLOW_SEG_HDR_IPV_OTHER); break; case ICE_FLTR_PTYPE_NONF_IPV4_ESP:
ICE_FLOW_SET_HDRS(seg, ICE_FLOW_SEG_HDR_ESP |
ICE_FLOW_SEG_HDR_IPV4 |
ICE_FLOW_SEG_HDR_IPV_OTHER); break; case ICE_FLTR_PTYPE_NONF_IPV4_AH:
ICE_FLOW_SET_HDRS(seg, ICE_FLOW_SEG_HDR_AH |
ICE_FLOW_SEG_HDR_IPV4 |
ICE_FLOW_SEG_HDR_IPV_OTHER); break; case ICE_FLTR_PTYPE_NONF_IPV4_NAT_T_ESP:
ICE_FLOW_SET_HDRS(seg, ICE_FLOW_SEG_HDR_NAT_T_ESP |
ICE_FLOW_SEG_HDR_IPV4 |
ICE_FLOW_SEG_HDR_IPV_OTHER); break; case ICE_FLTR_PTYPE_NONF_IPV4_PFCP_NODE:
ICE_FLOW_SET_HDRS(seg, ICE_FLOW_SEG_HDR_PFCP_NODE |
ICE_FLOW_SEG_HDR_IPV4 |
ICE_FLOW_SEG_HDR_IPV_OTHER); break; case ICE_FLTR_PTYPE_NONF_IPV4_PFCP_SESSION:
ICE_FLOW_SET_HDRS(seg, ICE_FLOW_SEG_HDR_PFCP_SESSION |
ICE_FLOW_SEG_HDR_IPV4 |
ICE_FLOW_SEG_HDR_IPV_OTHER); break; case ICE_FLTR_PTYPE_NONF_IPV4_OTHER:
ICE_FLOW_SET_HDRS(seg, ICE_FLOW_SEG_HDR_IPV4 |
ICE_FLOW_SEG_HDR_IPV_OTHER); break; case ICE_FLTR_PTYPE_NONF_IPV4_TCP:
ICE_FLOW_SET_HDRS(seg, ICE_FLOW_SEG_HDR_TCP |
ICE_FLOW_SEG_HDR_IPV4 |
ICE_FLOW_SEG_HDR_IPV_OTHER); break; case ICE_FLTR_PTYPE_NONF_IPV4_UDP:
ICE_FLOW_SET_HDRS(seg, ICE_FLOW_SEG_HDR_UDP |
ICE_FLOW_SEG_HDR_IPV4 |
ICE_FLOW_SEG_HDR_IPV_OTHER); break; case ICE_FLTR_PTYPE_NONF_IPV4_GTPU_IPV4_UDP: case ICE_FLTR_PTYPE_NONF_IPV4_GTPU_IPV4_TCP: case ICE_FLTR_PTYPE_NONF_IPV4_GTPU_IPV4_ICMP: case ICE_FLTR_PTYPE_NONF_IPV4_GTPU_IPV4_OTHER: if (ttype == ICE_FDIR_TUNNEL_TYPE_GTPU) {
ICE_FLOW_SET_HDRS(seg, ICE_FLOW_SEG_HDR_GTPU_IP |
ICE_FLOW_SEG_HDR_IPV4 |
ICE_FLOW_SEG_HDR_IPV_OTHER);
} elseif (ttype == ICE_FDIR_TUNNEL_TYPE_GTPU_EH) {
ICE_FLOW_SET_HDRS(seg, ICE_FLOW_SEG_HDR_GTPU_EH |
ICE_FLOW_SEG_HDR_GTPU_IP |
ICE_FLOW_SEG_HDR_IPV4 |
ICE_FLOW_SEG_HDR_IPV_OTHER);
} else {
dev_dbg(dev, "Invalid tunnel type 0x%x for VF %d\n",
flow, vf->vf_id); return -EINVAL;
} break; case ICE_FLTR_PTYPE_NONF_IPV4_SCTP:
ICE_FLOW_SET_HDRS(seg, ICE_FLOW_SEG_HDR_SCTP |
ICE_FLOW_SEG_HDR_IPV4 |
ICE_FLOW_SEG_HDR_IPV_OTHER); break; case ICE_FLTR_PTYPE_NONF_IPV6_L2TPV3:
ICE_FLOW_SET_HDRS(seg, ICE_FLOW_SEG_HDR_L2TPV3 |
ICE_FLOW_SEG_HDR_IPV6 |
ICE_FLOW_SEG_HDR_IPV_OTHER); break; case ICE_FLTR_PTYPE_NONF_IPV6_ESP:
ICE_FLOW_SET_HDRS(seg, ICE_FLOW_SEG_HDR_ESP |
ICE_FLOW_SEG_HDR_IPV6 |
ICE_FLOW_SEG_HDR_IPV_OTHER); break; case ICE_FLTR_PTYPE_NONF_IPV6_AH:
ICE_FLOW_SET_HDRS(seg, ICE_FLOW_SEG_HDR_AH |
ICE_FLOW_SEG_HDR_IPV6 |
ICE_FLOW_SEG_HDR_IPV_OTHER); break; case ICE_FLTR_PTYPE_NONF_IPV6_NAT_T_ESP:
ICE_FLOW_SET_HDRS(seg, ICE_FLOW_SEG_HDR_NAT_T_ESP |
ICE_FLOW_SEG_HDR_IPV6 |
ICE_FLOW_SEG_HDR_IPV_OTHER); break; case ICE_FLTR_PTYPE_NONF_IPV6_PFCP_NODE:
ICE_FLOW_SET_HDRS(seg, ICE_FLOW_SEG_HDR_PFCP_NODE |
ICE_FLOW_SEG_HDR_IPV6 |
ICE_FLOW_SEG_HDR_IPV_OTHER); break; case ICE_FLTR_PTYPE_NONF_IPV6_PFCP_SESSION:
ICE_FLOW_SET_HDRS(seg, ICE_FLOW_SEG_HDR_PFCP_SESSION |
ICE_FLOW_SEG_HDR_IPV6 |
ICE_FLOW_SEG_HDR_IPV_OTHER); break; case ICE_FLTR_PTYPE_NONF_IPV6_OTHER:
ICE_FLOW_SET_HDRS(seg, ICE_FLOW_SEG_HDR_IPV6 |
ICE_FLOW_SEG_HDR_IPV_OTHER); break; case ICE_FLTR_PTYPE_NONF_IPV6_TCP:
ICE_FLOW_SET_HDRS(seg, ICE_FLOW_SEG_HDR_TCP |
ICE_FLOW_SEG_HDR_IPV6 |
ICE_FLOW_SEG_HDR_IPV_OTHER); break; case ICE_FLTR_PTYPE_NONF_IPV6_UDP:
ICE_FLOW_SET_HDRS(seg, ICE_FLOW_SEG_HDR_UDP |
ICE_FLOW_SEG_HDR_IPV6 |
ICE_FLOW_SEG_HDR_IPV_OTHER); break; case ICE_FLTR_PTYPE_NONF_IPV6_SCTP:
ICE_FLOW_SET_HDRS(seg, ICE_FLOW_SEG_HDR_SCTP |
ICE_FLOW_SEG_HDR_IPV6 |
ICE_FLOW_SEG_HDR_IPV_OTHER); break; default:
dev_dbg(dev, "Invalid flow type 0x%x for VF %d failed\n",
flow, vf->vf_id); return -EINVAL;
}
return 0;
}
/** * ice_vc_fdir_rem_prof - remove profile for this filter flow type * @vf: pointer to the VF structure * @flow: filter flow type * @tun: 0 implies non-tunnel type filter, 1 implies tunnel type filter
*/ staticvoid
ice_vc_fdir_rem_prof(struct ice_vf *vf, enum ice_fltr_ptype flow, int tun)
{ struct ice_vf_fdir *fdir = &vf->fdir; struct ice_fd_hw_prof *vf_prof; struct ice_pf *pf = vf->pf; struct ice_vsi *vf_vsi; struct device *dev; struct ice_hw *hw;
u64 prof_id; int i;
dev = ice_pf_to_dev(pf);
hw = &pf->hw; if (!fdir->fdir_prof || !fdir->fdir_prof[flow]) return;
/** * ice_vc_fdir_has_prof_conflict * @vf: pointer to the VF structure * @conf: FDIR configuration for each filter * * Check if @conf has conflicting profile with existing profiles * * Return: true on success, and false on error.
*/ staticbool
ice_vc_fdir_has_prof_conflict(struct ice_vf *vf, struct virtchnl_fdir_fltr_conf *conf)
{ struct ice_fdir_fltr *desc;
existing_conf = to_fltr_conf_from_desc(desc);
a = &existing_conf->input;
b = &conf->input;
flow_type_a = a->flow_type;
flow_type_b = b->flow_type;
/* No need to compare two rules with different tunnel types or * with the same protocol type.
*/ if (existing_conf->ttype != conf->ttype ||
flow_type_a == flow_type_b) continue;
switch (flow_type_a) { case ICE_FLTR_PTYPE_NONF_IPV4_UDP: case ICE_FLTR_PTYPE_NONF_IPV4_TCP: case ICE_FLTR_PTYPE_NONF_IPV4_SCTP: if (flow_type_b == ICE_FLTR_PTYPE_NONF_IPV4_OTHER) returntrue; break; case ICE_FLTR_PTYPE_NONF_IPV4_OTHER: if (flow_type_b == ICE_FLTR_PTYPE_NONF_IPV4_UDP ||
flow_type_b == ICE_FLTR_PTYPE_NONF_IPV4_TCP ||
flow_type_b == ICE_FLTR_PTYPE_NONF_IPV4_SCTP) returntrue; break; case ICE_FLTR_PTYPE_NONF_IPV6_UDP: case ICE_FLTR_PTYPE_NONF_IPV6_TCP: case ICE_FLTR_PTYPE_NONF_IPV6_SCTP: if (flow_type_b == ICE_FLTR_PTYPE_NONF_IPV6_OTHER) returntrue; break; case ICE_FLTR_PTYPE_NONF_IPV6_OTHER: if (flow_type_b == ICE_FLTR_PTYPE_NONF_IPV6_UDP ||
flow_type_b == ICE_FLTR_PTYPE_NONF_IPV6_TCP ||
flow_type_b == ICE_FLTR_PTYPE_NONF_IPV6_SCTP) returntrue; break; default: break;
}
}
returnfalse;
}
/** * ice_vc_fdir_write_flow_prof * @vf: pointer to the VF structure * @flow: filter flow type * @seg: array of one or more packet segments that describe the flow * @tun: 0 implies non-tunnel type filter, 1 implies tunnel type filter * * Write the flow's profile config and packet segment into the hardware * * Return: 0 on success, and other on error.
*/ staticint
ice_vc_fdir_write_flow_prof(struct ice_vf *vf, enum ice_fltr_ptype flow, struct ice_flow_seg_info *seg, int tun)
{ struct ice_vf_fdir *fdir = &vf->fdir; struct ice_vsi *vf_vsi, *ctrl_vsi; struct ice_flow_seg_info *old_seg; struct ice_flow_prof *prof = NULL; struct ice_fd_hw_prof *vf_prof; struct device *dev; struct ice_pf *pf; struct ice_hw *hw;
u64 entry1_h = 0;
u64 entry2_h = 0; int ret;
pf = vf->pf;
dev = ice_pf_to_dev(pf);
hw = &pf->hw;
vf_vsi = ice_get_vf_vsi(vf); if (!vf_vsi) return -EINVAL;
ctrl_vsi = pf->vsi[vf->ctrl_vsi_idx]; if (!ctrl_vsi) return -EINVAL;
vf_prof = fdir->fdir_prof[flow];
old_seg = vf_prof->fdir_seg[tun]; if (old_seg) { if (!memcmp(old_seg, seg, sizeof(*seg))) {
dev_dbg(dev, "Duplicated profile for VF %d!\n",
vf->vf_id); return -EEXIST;
}
if (fdir->fdir_fltr_cnt[flow][tun]) {
ret = -EINVAL;
dev_dbg(dev, "Input set conflicts for VF %d\n",
vf->vf_id); goto err_exit;
}
ret = ice_flow_add_prof(hw, ICE_BLK_FD, ICE_FLOW_RX, seg,
tun + 1, false, &prof); if (ret) {
dev_dbg(dev, "Could not add VSI flow 0x%x for VF %d\n",
flow, vf->vf_id); goto err_exit;
}
ret = ice_flow_add_entry(hw, ICE_BLK_FD, prof->id, vf_vsi->idx,
vf_vsi->idx, ICE_FLOW_PRIO_NORMAL,
seg, &entry1_h); if (ret) {
dev_dbg(dev, "Could not add flow 0x%x VSI entry for VF %d\n",
flow, vf->vf_id); goto err_prof;
}
ret = ice_flow_add_entry(hw, ICE_BLK_FD, prof->id, vf_vsi->idx,
ctrl_vsi->idx, ICE_FLOW_PRIO_NORMAL,
seg, &entry2_h); if (ret) {
dev_dbg(dev, "Could not add flow 0x%x Ctrl VSI entry for VF %d\n",
flow, vf->vf_id); goto err_entry_1;
}
/** * ice_vc_fdir_config_input_set * @vf: pointer to the VF structure * @fltr: virtual channel add cmd buffer * @conf: FDIR configuration for each filter * @tun: 0 implies non-tunnel type filter, 1 implies tunnel type filter * * Config the input set type and value for virtual channel add msg buffer * * Return: 0 on success, and other on error.
*/ staticint
ice_vc_fdir_config_input_set(struct ice_vf *vf, struct virtchnl_fdir_add *fltr, struct virtchnl_fdir_fltr_conf *conf, int tun)
{ struct ice_fdir_fltr *input = &conf->input; struct device *dev = ice_pf_to_dev(vf->pf); struct ice_flow_seg_info *seg; enum ice_fltr_ptype flow; int ret;
ret = ice_vc_fdir_has_prof_conflict(vf, conf); if (ret) {
dev_dbg(dev, "Found flow profile conflict for VF %d\n",
vf->vf_id); return ret;
}
flow = input->flow_type;
ret = ice_vc_fdir_alloc_prof(vf, flow); if (ret) {
dev_dbg(dev, "Alloc flow prof for VF %d failed\n", vf->vf_id); return ret;
}
seg = devm_kzalloc(dev, sizeof(*seg), GFP_KERNEL); if (!seg) return -ENOMEM;
ret = ice_vc_fdir_set_flow_fld(vf, fltr, conf, seg); if (ret) {
dev_dbg(dev, "Set flow field for VF %d failed\n", vf->vf_id); goto err_exit;
}
ret = ice_vc_fdir_set_flow_hdr(vf, conf, seg); if (ret) {
dev_dbg(dev, "Set flow hdr for VF %d failed\n", vf->vf_id); goto err_exit;
}
ret = ice_vc_fdir_write_flow_prof(vf, flow, seg, tun); if (ret == -EEXIST) {
devm_kfree(dev, seg);
} elseif (ret) {
dev_dbg(dev, "Write flow profile for VF %d failed\n",
vf->vf_id); goto err_exit;
}
return 0;
err_exit:
devm_kfree(dev, seg); return ret;
}
/** * ice_vc_fdir_is_raw_flow - check if FDIR flow is raw (binary) * @proto: virtchnl protocol headers * * Check if the FDIR rule is raw flow (protocol agnostic flow) or not. Note * that common FDIR rule must have non-zero proto->count. Thus, we choose the * tunnel_level and count of proto as the indicators. If both tunnel_level and * count of proto are zero, this FDIR rule will be regarded as raw flow. * * Returns: true if headers describe raw flow, false otherwise.
*/ staticbool
ice_vc_fdir_is_raw_flow(struct virtchnl_proto_hdrs *proto)
{ return (proto->tunnel_level == 0 && proto->count == 0);
}
/** * ice_vc_fdir_parse_raw - parse a virtchnl raw FDIR rule * @vf: pointer to the VF info * @proto: virtchnl protocol headers * @conf: FDIR configuration for each filter * * Parse the virtual channel filter's raw flow and store it in @conf * * Return: 0 on success or negative errno on failure.
*/ staticint
ice_vc_fdir_parse_raw(struct ice_vf *vf, struct virtchnl_proto_hdrs *proto, struct virtchnl_fdir_fltr_conf *conf)
{
u8 *pkt_buf, *msk_buf __free(kfree) = NULL; struct ice_parser_result rslt; struct ice_pf *pf = vf->pf;
u16 pkt_len, udp_port = 0; struct ice_parser *psr; int status = -ENOMEM; struct ice_hw *hw;
pkt_len = proto->raw.pkt_len;
if (!pkt_len || pkt_len > VIRTCHNL_MAX_SIZE_RAW_PACKET) return -EINVAL;
/** * ice_vc_fdir_comp_rules - compare if two filter rules have the same value * @conf_a: FDIR configuration for filter a * @conf_b: FDIR configuration for filter b * * Return: 0 on success, and other on error.
*/ staticbool
ice_vc_fdir_comp_rules(struct virtchnl_fdir_fltr_conf *conf_a, struct virtchnl_fdir_fltr_conf *conf_b)
{ struct ice_fdir_fltr *a = &conf_a->input; struct ice_fdir_fltr *b = &conf_b->input;
if (conf_a->ttype != conf_b->ttype) returnfalse; if (a->flow_type != b->flow_type) returnfalse; if (memcmp(&a->ip, &b->ip, sizeof(a->ip))) returnfalse; if (memcmp(&a->mask, &b->mask, sizeof(a->mask))) returnfalse; if (memcmp(&a->gtpu_data, &b->gtpu_data, sizeof(a->gtpu_data))) returnfalse; if (memcmp(&a->gtpu_mask, &b->gtpu_mask, sizeof(a->gtpu_mask))) returnfalse; if (memcmp(&a->l2tpv3_data, &b->l2tpv3_data, sizeof(a->l2tpv3_data))) returnfalse; if (memcmp(&a->l2tpv3_mask, &b->l2tpv3_mask, sizeof(a->l2tpv3_mask))) returnfalse; if (memcmp(&a->ext_data, &b->ext_data, sizeof(a->ext_data))) returnfalse; if (memcmp(&a->ext_mask, &b->ext_mask, sizeof(a->ext_mask))) returnfalse;
returntrue;
}
/** * ice_vc_fdir_is_dup_fltr * @vf: pointer to the VF info * @conf: FDIR configuration for each filter * * Check if there is duplicated rule with same conf value * * Return: 0 true success, and false on error.
*/ staticbool
ice_vc_fdir_is_dup_fltr(struct ice_vf *vf, struct virtchnl_fdir_fltr_conf *conf)
{ struct ice_fdir_fltr *desc; bool ret;
ret = ice_vc_fdir_comp_rules(node, conf); if (ret) returntrue;
}
returnfalse;
}
/** * ice_vc_fdir_insert_entry * @vf: pointer to the VF info * @conf: FDIR configuration for each filter * @id: pointer to ID value allocated by driver * * Insert FDIR conf entry into list and allocate ID for this filter * * Return: 0 true success, and other on error.
*/ staticint
ice_vc_fdir_insert_entry(struct ice_vf *vf, struct virtchnl_fdir_fltr_conf *conf, u32 *id)
{ struct ice_fdir_fltr *input = &conf->input; int i;
/* alloc ID corresponding with conf */
i = idr_alloc(&vf->fdir.fdir_rule_idr, conf, 0,
ICE_FDIR_MAX_FLTRS, GFP_KERNEL); if (i < 0) return -EINVAL;
*id = i;
/** * ice_vc_fdir_lookup_entry - lookup FDIR conf entry by ID value * @vf: pointer to the VF info * @id: filter rule's ID * * Return: NULL on error, and other on success.
*/ staticstruct virtchnl_fdir_fltr_conf *
ice_vc_fdir_lookup_entry(struct ice_vf *vf, u32 id)
{ return idr_find(&vf->fdir.fdir_rule_idr, id);
}
/** * ice_vc_fdir_flush_entry - remove all FDIR conf entry * @vf: pointer to the VF info
*/ staticvoid ice_vc_fdir_flush_entry(struct ice_vf *vf)
{ struct virtchnl_fdir_fltr_conf *conf; struct ice_fdir_fltr *desc, *temp;
/** * ice_vc_fdir_write_fltr - write filter rule into hardware * @vf: pointer to the VF info * @conf: FDIR configuration for each filter * @add: true implies add rule, false implies del rules * @is_tun: false implies non-tunnel type filter, true implies tunnel filter * * Return: 0 on success, and other on error.
*/ staticint ice_vc_fdir_write_fltr(struct ice_vf *vf, struct virtchnl_fdir_fltr_conf *conf, bool add, bool is_tun)
{ struct ice_fdir_fltr *input = &conf->input; struct ice_vsi *vsi, *ctrl_vsi; struct ice_fltr_desc desc; struct device *dev; struct ice_pf *pf; struct ice_hw *hw; int ret;
u8 *pkt;
pf = vf->pf;
dev = ice_pf_to_dev(pf);
hw = &pf->hw;
vsi = ice_get_vf_vsi(vf); if (!vsi) {
dev_dbg(dev, "Invalid vsi for VF %d\n", vf->vf_id); return -EINVAL;
}
dev_dbg(dev, "VF %d: Size in the FD table: guaranteed:0x%x, best effort:0x%x\n",
vf->vf_id, fd_size_g, fd_size_b);
dev_dbg(dev, "VF %d: Filter counter in the FD table: guaranteed:0x%x, best effort:0x%x\n",
vf->vf_id, fd_cnt_g, fd_cnt_b);
}
/** * ice_vf_verify_rx_desc - verify received FDIR programming status descriptor * @vf: pointer to the VF info * @ctx: FDIR context info for post processing * @status: virtchnl FDIR program status * * Return: 0 on success, and other on error.
*/ staticint
ice_vf_verify_rx_desc(struct ice_vf *vf, struct ice_vf_fdir_ctx *ctx, enum virtchnl_fdir_prgm_status *status)
{ struct device *dev = ice_pf_to_dev(vf->pf);
u32 stat_err, error, prog_id; int ret;
stat_err = le16_to_cpu(ctx->rx_desc.wb.status_error0); if (FIELD_GET(ICE_FXD_FLTR_WB_QW1_DD_M, stat_err) !=
ICE_FXD_FLTR_WB_QW1_DD_YES) {
*status = VIRTCHNL_FDIR_FAILURE_RULE_NORESOURCE;
dev_err(dev, "VF %d: Desc Done not set\n", vf->vf_id);
ret = -EINVAL; goto err_exit;
}
prog_id = FIELD_GET(ICE_FXD_FLTR_WB_QW1_PROG_ID_M, stat_err); if (prog_id == ICE_FXD_FLTR_WB_QW1_PROG_ADD &&
ctx->v_opcode != VIRTCHNL_OP_ADD_FDIR_FILTER) {
dev_err(dev, "VF %d: Desc show add, but ctx not",
vf->vf_id);
*status = VIRTCHNL_FDIR_FAILURE_RULE_INVALID;
ret = -EINVAL; goto err_exit;
}
if (prog_id == ICE_FXD_FLTR_WB_QW1_PROG_DEL &&
ctx->v_opcode != VIRTCHNL_OP_DEL_FDIR_FILTER) {
dev_err(dev, "VF %d: Desc show del, but ctx not",
vf->vf_id);
*status = VIRTCHNL_FDIR_FAILURE_RULE_INVALID;
ret = -EINVAL; goto err_exit;
}
error = FIELD_GET(ICE_FXD_FLTR_WB_QW1_FAIL_M, stat_err); if (error == ICE_FXD_FLTR_WB_QW1_FAIL_YES) { if (prog_id == ICE_FXD_FLTR_WB_QW1_PROG_ADD) {
dev_err(dev, "VF %d, Failed to add FDIR rule due to no space in the table",
vf->vf_id);
*status = VIRTCHNL_FDIR_FAILURE_RULE_NORESOURCE;
} else {
dev_err(dev, "VF %d, Failed to remove FDIR rule, attempt to remove non-existent entry",
vf->vf_id);
*status = VIRTCHNL_FDIR_FAILURE_RULE_NONEXIST;
}
ret = -EINVAL; goto err_exit;
}
/** * ice_vc_add_fdir_fltr_post * @vf: pointer to the VF structure * @ctx: FDIR context info for post processing * @status: virtchnl FDIR program status * @success: true implies success, false implies failure * * Post process for flow director add command. If success, then do post process * and send back success msg by virtchnl. Otherwise, do context reversion and * send back failure msg by virtchnl. * * Return: 0 on success, and other on error.
*/ staticint
ice_vc_add_fdir_fltr_post(struct ice_vf *vf, struct ice_vf_fdir_ctx *ctx, enum virtchnl_fdir_prgm_status status, bool success)
{ struct virtchnl_fdir_fltr_conf *conf = ctx->conf; struct device *dev = ice_pf_to_dev(vf->pf); enum virtchnl_status_code v_ret; struct virtchnl_fdir_add *resp; int ret, len, is_tun;
v_ret = VIRTCHNL_STATUS_SUCCESS;
len = sizeof(*resp);
resp = kzalloc(len, GFP_KERNEL); if (!resp) {
len = 0;
v_ret = VIRTCHNL_STATUS_ERR_NO_MEMORY;
dev_dbg(dev, "VF %d: Alloc resp buf fail", vf->vf_id); goto err_exit;
}
/** * ice_vc_del_fdir_fltr_post * @vf: pointer to the VF structure * @ctx: FDIR context info for post processing * @status: virtchnl FDIR program status * @success: true implies success, false implies failure * * Post process for flow director del command. If success, then do post process * and send back success msg by virtchnl. Otherwise, do context reversion and * send back failure msg by virtchnl. * * Return: 0 on success, and other on error.
*/ staticint
ice_vc_del_fdir_fltr_post(struct ice_vf *vf, struct ice_vf_fdir_ctx *ctx, enum virtchnl_fdir_prgm_status status, bool success)
{ struct virtchnl_fdir_fltr_conf *conf = ctx->conf; struct device *dev = ice_pf_to_dev(vf->pf); enum virtchnl_status_code v_ret; struct virtchnl_fdir_del *resp; int ret, len, is_tun;
v_ret = VIRTCHNL_STATUS_SUCCESS;
len = sizeof(*resp);
resp = kzalloc(len, GFP_KERNEL); if (!resp) {
len = 0;
v_ret = VIRTCHNL_STATUS_ERR_NO_MEMORY;
dev_dbg(dev, "VF %d: Alloc resp buf fail", vf->vf_id); goto err_exit;
}
/** * ice_flush_fdir_ctx * @pf: pointer to the PF structure * * Flush all the pending event on ctx_done list and process them.
*/ void ice_flush_fdir_ctx(struct ice_pf *pf)
{ struct ice_vf *vf; unsignedint bkt;
if (!test_and_clear_bit(ICE_FD_VF_FLUSH_CTX, pf->state)) return;
/** * ice_vc_parser_fv_check_diff - check two parsed FDIR profile fv context * @fv_a: struct of parsed FDIR profile field vector * @fv_b: struct of parsed FDIR profile field vector * * Check if the two parsed FDIR profile field vector context are different, * including proto_id, offset and mask. * * Return: true on different, false on otherwise.
*/ staticbool ice_vc_parser_fv_check_diff(struct ice_parser_fv *fv_a, struct ice_parser_fv *fv_b)
{ return (fv_a->proto_id != fv_b->proto_id ||
fv_a->offset != fv_b->offset ||
fv_a->msk != fv_b->msk);
}
/** * ice_vc_parser_fv_save - save parsed FDIR profile fv context * @fv: struct of parsed FDIR profile field vector * @fv_src: parsed FDIR profile field vector context to save * * Save the parsed FDIR profile field vector context, including proto_id, * offset and mask. * * Return: Void.
*/ staticvoid ice_vc_parser_fv_save(struct ice_parser_fv *fv, struct ice_parser_fv *fv_src)
{
fv->proto_id = fv_src->proto_id;
fv->offset = fv_src->offset;
fv->msk = fv_src->msk;
fv->spec = 0;
}
/** * ice_vc_add_fdir_raw - add a raw FDIR filter for VF * @vf: pointer to the VF info * @conf: FDIR configuration for each filter * @v_ret: the final VIRTCHNL code * @stat: pointer to the VIRTCHNL_OP_ADD_FDIR_FILTER * @len: length of the stat * * Return: 0 on success or negative errno on failure.
*/ staticint
ice_vc_add_fdir_raw(struct ice_vf *vf, struct virtchnl_fdir_fltr_conf *conf, enum virtchnl_status_code *v_ret, struct virtchnl_fdir_add *stat, int len)
{ struct ice_vsi *vf_vsi, *ctrl_vsi; struct ice_fdir_prof_info *pi; struct ice_pf *pf = vf->pf; int ret, ptg, id, i; struct device *dev; struct ice_hw *hw; bool fv_found;
id = find_first_bit(conf->prof->ptypes, ICE_FLOW_PTYPE_MAX);
ptg = hw->blk[ICE_BLK_FD].xlt1.t[id];
vf_vsi = ice_get_vf_vsi(vf); if (!vf_vsi) {
dev_err(dev, "Can not get FDIR vf_vsi for VF %d\n", vf->vf_id); return -ENODEV;
}
ctrl_vsi = pf->vsi[vf->ctrl_vsi_idx]; if (!ctrl_vsi) {
dev_err(dev, "Can not get FDIR ctrl_vsi for VF %d\n",
vf->vf_id); return -ENODEV;
}
fv_found = false;
/* Check if profile info already exists, then update the counter */
pi = &vf->fdir_prof_info[ptg]; if (pi->fdir_active_cnt != 0) { for (i = 0; i < ICE_MAX_FV_WORDS; i++) if (ice_vc_parser_fv_check_diff(&pi->prof.fv[i],
&conf->prof->fv[i])) break; if (i == ICE_MAX_FV_WORDS) {
fv_found = true;
pi->fdir_active_cnt++;
}
}
/* HW profile setting is only required for the first time */ if (!fv_found) {
ret = ice_flow_set_parser_prof(hw, vf_vsi->idx,
ctrl_vsi->idx, conf->prof,
ICE_BLK_FD);
ret = ice_vc_fdir_insert_entry(vf, conf, &conf->flow_id); if (ret) {
*v_ret = VIRTCHNL_STATUS_ERR_NO_MEMORY;
dev_dbg(dev, "VF %d: insert FDIR list failed\n",
vf->vf_id); return ret;
}
ret = ice_vc_fdir_set_irq_ctx(vf, conf,
VIRTCHNL_OP_ADD_FDIR_FILTER); if (ret) {
dev_dbg(dev, "VF %d: set FDIR context failed\n",
vf->vf_id); goto err_rem_entry;
}
ret = ice_vc_fdir_write_fltr(vf, conf, true, false); if (ret) {
dev_err(dev, "VF %d: adding FDIR raw flow rule failed, ret:%d\n",
vf->vf_id, ret); goto err_clr_irq;
}
/* Save parsed profile fv info of the FDIR rule for the first time */ if (!fv_found) { for (i = 0; i < conf->prof->fv_num; i++)
ice_vc_parser_fv_save(&pi->prof.fv[i],
&conf->prof->fv[i]);
pi->prof.fv_num = conf->prof->fv_num;
pi->fdir_active_cnt = 1;
}
/** * ice_vc_add_fdir_fltr - add a FDIR filter for VF by the msg buffer * @vf: pointer to the VF info * @msg: pointer to the msg buffer * * Return: 0 on success, and other on error.
*/ int ice_vc_add_fdir_fltr(struct ice_vf *vf, u8 *msg)
{ struct virtchnl_fdir_add *fltr = (struct virtchnl_fdir_add *)msg; struct virtchnl_fdir_add *stat = NULL; struct virtchnl_fdir_fltr_conf *conf; enum virtchnl_status_code v_ret; struct ice_vsi *vf_vsi; struct device *dev; struct ice_pf *pf; int is_tun = 0; int len = 0; int ret;
pf = vf->pf;
dev = ice_pf_to_dev(pf);
vf_vsi = ice_get_vf_vsi(vf); if (!vf_vsi) {
dev_err(dev, "Can not get FDIR vf_vsi for VF %u\n", vf->vf_id);
v_ret = VIRTCHNL_STATUS_ERR_PARAM; goto err_exit;
}
#define ICE_VF_MAX_FDIR_FILTERS 128 if (!ice_fdir_num_avail_fltr(&pf->hw, vf_vsi) ||
vf->fdir.fdir_fltr_cnt_total >= ICE_VF_MAX_FDIR_FILTERS) {
v_ret = VIRTCHNL_STATUS_ERR_PARAM;
dev_err(dev, "Max number of FDIR filters for VF %d is reached\n",
vf->vf_id); goto err_exit;
}
ret = ice_vc_fdir_param_check(vf, fltr->vsi_id); if (ret) {
v_ret = VIRTCHNL_STATUS_ERR_PARAM;
dev_dbg(dev, "Parameter check for VF %d failed\n", vf->vf_id); goto err_exit;
}
ret = ice_vf_start_ctrl_vsi(vf); if (ret && (ret != -EEXIST)) {
v_ret = VIRTCHNL_STATUS_ERR_PARAM;
dev_err(dev, "Init FDIR for VF %d failed, ret:%d\n",
vf->vf_id, ret); goto err_exit;
}
stat = kzalloc(sizeof(*stat), GFP_KERNEL); if (!stat) {
v_ret = VIRTCHNL_STATUS_ERR_NO_MEMORY;
dev_dbg(dev, "Alloc stat for VF %d failed\n", vf->vf_id); goto err_exit;
}
/* For raw FDIR filters created by the parser */ if (conf->parser_ena) {
ret = ice_vc_add_fdir_raw(vf, conf, &v_ret, stat, len); if (ret) goto err_free_conf; gotoexit;
}
/** * ice_vc_del_fdir_raw - delete a raw FDIR filter for VF * @vf: pointer to the VF info * @conf: FDIR configuration for each filter * @v_ret: the final VIRTCHNL code * @stat: pointer to the VIRTCHNL_OP_DEL_FDIR_FILTER * @len: length of the stat * * Return: 0 on success or negative errno on failure.
*/ staticint
ice_vc_del_fdir_raw(struct ice_vf *vf, struct virtchnl_fdir_fltr_conf *conf, enum virtchnl_status_code *v_ret, struct virtchnl_fdir_del *stat, int len)
{ struct ice_vsi *vf_vsi, *ctrl_vsi; enum ice_block blk = ICE_BLK_FD; struct ice_fdir_prof_info *pi; struct ice_pf *pf = vf->pf; struct device *dev; struct ice_hw *hw; unsignedlong id;
u16 vsi_num; int ptg; int ret;
id = find_first_bit(conf->prof->ptypes, ICE_FLOW_PTYPE_MAX);
ptg = hw->blk[ICE_BLK_FD].xlt1.t[id];
ret = ice_vc_fdir_write_fltr(vf, conf, false, false); if (ret) {
dev_err(dev, "VF %u: deleting FDIR raw flow rule failed: %d\n",
vf->vf_id, ret); return ret;
}
vf_vsi = ice_get_vf_vsi(vf); if (!vf_vsi) {
dev_err(dev, "Can not get FDIR vf_vsi for VF %u\n", vf->vf_id); return -ENODEV;
}
ctrl_vsi = pf->vsi[vf->ctrl_vsi_idx]; if (!ctrl_vsi) {
dev_err(dev, "Can not get FDIR ctrl_vsi for VF %u\n",
vf->vf_id); return -ENODEV;
}
pi = &vf->fdir_prof_info[ptg]; if (pi->fdir_active_cnt != 0) {
pi->fdir_active_cnt--; /* Remove the profile id flow if no active FDIR rule left */ if (!pi->fdir_active_cnt) {
vsi_num = ice_get_hw_vsi_num(hw, ctrl_vsi->idx);
ice_rem_prof_id_flow(hw, blk, vsi_num, id);
/* Just return failure when ctrl_vsi idx is invalid */ if (vf->ctrl_vsi_idx == ICE_NO_VSI) {
v_ret = VIRTCHNL_STATUS_SUCCESS;
stat->status = VIRTCHNL_FDIR_FAILURE_RULE_NORESOURCE;
dev_err(dev, "Invalid FDIR ctrl_vsi for VF %d\n", vf->vf_id); goto err_exit;
}
ret = ice_vc_fdir_set_irq_ctx(vf, conf, VIRTCHNL_OP_DEL_FDIR_FILTER); if (ret) {
v_ret = VIRTCHNL_STATUS_SUCCESS;
stat->status = VIRTCHNL_FDIR_FAILURE_RULE_NORESOURCE;
dev_dbg(dev, "VF %d: set FDIR context failed\n", vf->vf_id); goto err_exit;
}
/* For raw FDIR filters created by the parser */ if (conf->parser_ena) {
ret = ice_vc_del_fdir_raw(vf, conf, &v_ret, stat, len); if (ret) goto err_del_tmr; gotoexit;
}
Die Informationen auf dieser Webseite wurden
nach bestem Wissen sorgfältig zusammengestellt. Es wird jedoch weder Vollständigkeit, noch Richtigkeit,
noch Qualität der bereit gestellten Informationen zugesichert.
Bemerkung:
Die farbliche Syntaxdarstellung und die Messung sind noch experimentell.