/* Convert to long words as firmware expects * lengths in units of NFP_FL_LW_SIZ.
*/
nfp_flow->meta.key_len >>= NFP_FL_LW_SIZ;
nfp_flow->meta.mask_len >>= NFP_FL_LW_SIZ;
nfp_flow->meta.act_len >>= NFP_FL_LW_SIZ;
skb = nfp_flower_cmsg_alloc(app, tot_len, mtype, GFP_KERNEL); if (!skb) return -ENOMEM;
/* Convert back to bytes as software expects * lengths in units of bytes.
*/
nfp_flow->meta.key_len <<= NFP_FL_LW_SIZ;
nfp_flow->meta.mask_len <<= NFP_FL_LW_SIZ;
nfp_flow->meta.act_len <<= NFP_FL_LW_SIZ;
if (dissector->used_keys & ~NFP_FLOWER_WHITELIST_DISSECTOR) {
NL_SET_ERR_MSG_MOD(extack, "unsupported offload: match not supported"); return -EOPNOTSUPP;
}
/* If any tun dissector is used then the required set must be used. */ if (dissector->used_keys & NFP_FLOWER_WHITELIST_TUN_DISSECTOR &&
(dissector->used_keys & NFP_FLOWER_WHITELIST_TUN_DISSECTOR_V6_R)
!= NFP_FLOWER_WHITELIST_TUN_DISSECTOR_V6_R &&
(dissector->used_keys & NFP_FLOWER_WHITELIST_TUN_DISSECTOR_R)
!= NFP_FLOWER_WHITELIST_TUN_DISSECTOR_R) {
NL_SET_ERR_MSG_MOD(extack, "unsupported offload: tunnel match not supported"); return -EOPNOTSUPP;
}
if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_VLAN)) { struct flow_match_vlan vlan;
flow_rule_match_vlan(rule, &vlan); if (!(priv->flower_ext_feats & NFP_FL_FEATS_VLAN_PCP) &&
vlan.key->vlan_priority) {
NL_SET_ERR_MSG_MOD(extack, "unsupported offload: loaded firmware does not support VLAN PCP offload"); return -EOPNOTSUPP;
} if (priv->flower_ext_feats & NFP_FL_FEATS_VLAN_QINQ &&
!(key_layer_two & NFP_FLOWER_LAYER2_QINQ)) {
key_layer |= NFP_FLOWER_LAYER_EXT_META;
key_size += sizeof(struct nfp_flower_ext_meta);
key_size += sizeof(struct nfp_flower_vlan);
key_layer_two |= NFP_FLOWER_LAYER2_QINQ;
}
}
if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_CVLAN)) { struct flow_match_vlan cvlan;
if (!(priv->flower_ext_feats & NFP_FL_FEATS_VLAN_QINQ)) {
NL_SET_ERR_MSG_MOD(extack, "unsupported offload: loaded firmware does not support VLAN QinQ offload"); return -EOPNOTSUPP;
}
if (flow_rule_has_enc_control_flags(enc_ctl.mask->flags,
extack)) return -EOPNOTSUPP;
if (enc_ctl.mask->addr_type != 0xffff) {
NL_SET_ERR_MSG_MOD(extack, "unsupported offload: wildcarded protocols on tunnels are not supported"); return -EOPNOTSUPP;
}
ipv6_tun = enc_ctl.key->addr_type ==
FLOW_DISSECTOR_KEY_IPV6_ADDRS; if (ipv6_tun &&
!(priv->flower_ext_feats & NFP_FL_FEATS_IPV6_TUN)) {
NL_SET_ERR_MSG_MOD(extack, "unsupported offload: firmware does not support IPv6 tunnels"); return -EOPNOTSUPP;
}
if (!ipv6_tun &&
enc_ctl.key->addr_type != FLOW_DISSECTOR_KEY_IPV4_ADDRS) {
NL_SET_ERR_MSG_MOD(extack, "unsupported offload: tunnel address type not IPv4 or IPv6"); return -EOPNOTSUPP;
}
if (ipv6_tun) {
flow_rule_match_enc_ipv6_addrs(rule, &ipv6_addrs); if (memchr_inv(&ipv6_addrs.mask->dst, 0xff, sizeof(ipv6_addrs.mask->dst))) {
NL_SET_ERR_MSG_MOD(extack, "unsupported offload: only an exact match IPv6 destination address is supported"); return -EOPNOTSUPP;
}
} else {
flow_rule_match_enc_ipv4_addrs(rule, &ipv4_addrs); if (ipv4_addrs.mask->dst != cpu_to_be32(~0)) {
NL_SET_ERR_MSG_MOD(extack, "unsupported offload: only an exact match IPv4 destination address is supported"); return -EOPNOTSUPP;
}
}
if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_ENC_OPTS))
flow_rule_match_enc_opts(rule, &enc_op);
if (!flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_ENC_PORTS)) { /* Check if GRE, which has no enc_ports */ if (!netif_is_gretap(netdev) && !netif_is_ip6gretap(netdev)) {
NL_SET_ERR_MSG_MOD(extack, "unsupported offload: an exact match on L4 destination port is required for non-GRE tunnels"); return -EOPNOTSUPP;
}
if (enc_op.key) {
NL_SET_ERR_MSG_MOD(extack, "unsupported offload: encap options not supported on GRE tunnels"); return -EOPNOTSUPP;
}
} else {
flow_rule_match_enc_ports(rule, &enc_ports); if (enc_ports.mask->dst != cpu_to_be16(~0)) {
NL_SET_ERR_MSG_MOD(extack, "unsupported offload: only an exact match L4 destination port is supported"); return -EOPNOTSUPP;
}
/* Ensure the ingress netdev matches the expected * tun type.
*/ if (!nfp_fl_netdev_is_tunnel_type(netdev, *tun_type)) {
NL_SET_ERR_MSG_MOD(extack, "unsupported offload: ingress netdev does not match the expected tunnel type"); return -EOPNOTSUPP;
}
}
}
if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_BASIC))
flow_rule_match_basic(rule, &basic);
if (basic.mask && basic.mask->n_proto) { /* Ethernet type is present in the key. */ switch (basic.key->n_proto) { case cpu_to_be16(ETH_P_IP):
key_layer |= NFP_FLOWER_LAYER_IPV4;
key_size += sizeof(struct nfp_flower_ipv4); break;
case cpu_to_be16(ETH_P_IPV6):
key_layer |= NFP_FLOWER_LAYER_IPV6;
key_size += sizeof(struct nfp_flower_ipv6); break;
/* Currently we do not offload ARP * because we rely on it to get to the host.
*/ case cpu_to_be16(ETH_P_ARP):
NL_SET_ERR_MSG_MOD(extack, "unsupported offload: ARP not supported"); return -EOPNOTSUPP;
case cpu_to_be16(ETH_P_MPLS_UC): case cpu_to_be16(ETH_P_MPLS_MC): if (!(key_layer & NFP_FLOWER_LAYER_MAC)) {
key_layer |= NFP_FLOWER_LAYER_MAC;
key_size += sizeof(struct nfp_flower_mac_mpls);
} break;
/* Will be included in layer 2. */ case cpu_to_be16(ETH_P_8021Q): break;
default:
NL_SET_ERR_MSG_MOD(extack, "unsupported offload: match on given EtherType is not supported"); return -EOPNOTSUPP;
}
} elseif (nfp_flower_check_higher_than_mac(rule)) {
NL_SET_ERR_MSG_MOD(extack, "unsupported offload: cannot match above L2 without specified EtherType"); return -EOPNOTSUPP;
}
if (basic.mask && basic.mask->ip_proto) { switch (basic.key->ip_proto) { case IPPROTO_TCP: case IPPROTO_UDP: case IPPROTO_SCTP: case IPPROTO_ICMP: case IPPROTO_ICMPV6:
key_layer |= NFP_FLOWER_LAYER_TP;
key_size += sizeof(struct nfp_flower_tp_ports); break;
}
}
if (!(key_layer & NFP_FLOWER_LAYER_TP) &&
nfp_flower_check_higher_than_l3(rule)) {
NL_SET_ERR_MSG_MOD(extack, "unsupported offload: cannot match on L4 information without specified IP protocol type"); return -EOPNOTSUPP;
}
if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_TCP)) { struct flow_match_tcp tcp;
u32 tcp_flags;
if (tcp_flags & ~NFP_FLOWER_SUPPORTED_TCPFLAGS) {
NL_SET_ERR_MSG_MOD(extack, "unsupported offload: no match support for selected TCP flags"); return -EOPNOTSUPP;
}
/* We only support PSH and URG flags when either * FIN, SYN or RST is present as well.
*/ if ((tcp_flags & (TCPHDR_PSH | TCPHDR_URG)) &&
!(tcp_flags & (TCPHDR_FIN | TCPHDR_SYN | TCPHDR_RST))) {
NL_SET_ERR_MSG_MOD(extack, "unsupported offload: PSH and URG is only supported when used with FIN, SYN or RST"); return -EOPNOTSUPP;
}
/* We need to store TCP flags in the either the IPv4 or IPv6 key * space, thus we need to ensure we include a IPv4/IPv6 key * layer if we have not done so already.
*/ if (!basic.key) {
NL_SET_ERR_MSG_MOD(extack, "unsupported offload: match on TCP flags requires a match on L3 protocol"); return -EOPNOTSUPP;
}
staticint
nfp_flower_can_merge(struct nfp_fl_payload *sub_flow1, struct nfp_fl_payload *sub_flow2)
{ /* Two flows can be merged if sub_flow2 only matches on bits that are * either matched by sub_flow1 or set by a sub_flow1 action. This * ensures that every packet that hits sub_flow1 and recirculates is * guaranteed to hit sub_flow2.
*/ struct nfp_flower_merge_check sub_flow1_merge, sub_flow2_merge; int err, act_out = 0;
u8 last_act_id = 0;
err = nfp_flower_populate_merge_match(sub_flow1, &sub_flow1_merge, true); if (err) return err;
err = nfp_flower_populate_merge_match(sub_flow2, &sub_flow2_merge, false); if (err) return err;
err = nfp_flower_update_merge_with_actions(sub_flow1, &sub_flow1_merge,
&last_act_id, &act_out); if (err) return err;
/* Must only be 1 output action and it must be the last in sequence. */ if (act_out != 1 || last_act_id != NFP_FL_ACTION_OPCODE_OUTPUT) return -EOPNOTSUPP;
/* Reject merge if sub_flow2 matches on something that is not matched * on or set in an action by sub_flow1.
*/
err = bitmap_andnot(sub_flow2_merge.vals, sub_flow2_merge.vals,
sub_flow1_merge.vals, sizeof(struct nfp_flower_merge_check) * 8); if (err) return -EINVAL;
/* The last action of sub_flow1 must be output - do not merge this. */
sub1_act_len = sub_flow1->meta.act_len - sizeof(struct nfp_fl_output);
sub2_act_len = sub_flow2->meta.act_len;
if (!sub2_act_len) return -EINVAL;
if (sub1_act_len + sub2_act_len > NFP_FL_MAX_A_SIZ) return -EINVAL;
/* A shortcut can only be applied if there is a single action. */ if (sub1_act_len)
merge_flow->meta.shortcut = cpu_to_be32(NFP_FL_SC_ACT_NULL); else
merge_flow->meta.shortcut = sub_flow2->meta.shortcut;
/* Copy any pre-actions to the start of merge flow action list. */
pre_off1 = nfp_flower_copy_pre_actions(merge_act,
sub_flow1->action_data,
sub1_act_len, &tunnel_act);
merge_act += pre_off1;
sub1_act_len -= pre_off1;
pre_off2 = nfp_flower_copy_pre_actions(merge_act,
sub_flow2->action_data,
sub2_act_len, NULL);
merge_act += pre_off2;
sub2_act_len -= pre_off2;
/* FW does a tunnel push when egressing, therefore, if sub_flow 1 pushes * a tunnel, there are restrictions on what sub_flow 2 actions lead to a * valid merge.
*/ if (tunnel_act) { char *post_tun_acts = &sub_flow2->action_data[pre_off2];
err = nfp_fl_verify_post_tun_acts(post_tun_acts, sub2_act_len,
&post_tun_push_vlan); if (err) return err;
/* Copy remaining actions from sub_flows 1 and 2. */
memcpy(merge_act, sub_flow1->action_data + pre_off1, sub1_act_len);
if (post_tun_push_vlan) { /* Update tunnel action in merge to include VLAN push. */
err = nfp_fl_push_vlan_after_tun(merge_act, sub1_act_len,
post_tun_push_vlan); if (err) return err;
/* Flow link code should only be accessed under RTNL. */ staticvoid nfp_flower_unlink_flow(struct nfp_fl_payload_link *link)
{
list_del(&link->merge_flow.list);
list_del(&link->sub_flow.list);
kfree(link);
}
/** * nfp_flower_merge_offloaded_flows() - Merge 2 existing flows to single flow. * @app: Pointer to the APP handle * @sub_flow1: Initial flow matched to produce merge hint * @sub_flow2: Post recirculation flow matched in merge hint * * Combines 2 flows (if valid) to a single flow, removing the initial from hw * and offloading the new, merged flow. * * Return: negative value on error, 0 in success.
*/ int nfp_flower_merge_offloaded_flows(struct nfp_app *app, struct nfp_fl_payload *sub_flow1, struct nfp_fl_payload *sub_flow2)
{ struct nfp_flower_priv *priv = app->priv; struct nfp_fl_payload *merge_flow; struct nfp_fl_key_ls merge_key_ls; struct nfp_merge_info *merge_info;
u64 parent_ctx = 0; int err;
if (sub_flow1 == sub_flow2 ||
nfp_flower_is_merge_flow(sub_flow1) ||
nfp_flower_is_merge_flow(sub_flow2)) return -EINVAL;
/* Check if the two flows are already merged */
parent_ctx = (u64)(be32_to_cpu(sub_flow1->meta.host_ctx_id)) << 32;
parent_ctx |= (u64)(be32_to_cpu(sub_flow2->meta.host_ctx_id)); if (rhashtable_lookup_fast(&priv->merge_table,
&parent_ctx, merge_table_params)) {
nfp_flower_cmsg_warn(app, "The two flows are already merged.\n"); return 0;
}
err = nfp_flower_can_merge(sub_flow1, sub_flow2); if (err) return err;
merge_key_ls.key_size = sub_flow1->meta.key_len;
merge_flow = nfp_flower_allocate_new(&merge_key_ls); if (!merge_flow) return -ENOMEM;
if (key_layer & ~NFP_FLOWER_PRE_TUN_RULE_FIELDS) {
NL_SET_ERR_MSG_MOD(extack, "unsupported pre-tunnel rule: too many match fields"); return -EOPNOTSUPP;
} elseif (key_ls->key_layer_two & ~NFP_FLOWER_LAYER2_QINQ) {
NL_SET_ERR_MSG_MOD(extack, "unsupported pre-tunnel rule: non-vlan in extended match fields"); return -EOPNOTSUPP;
}
if (!(key_layer & NFP_FLOWER_LAYER_MAC)) {
NL_SET_ERR_MSG_MOD(extack, "unsupported pre-tunnel rule: MAC fields match required"); return -EOPNOTSUPP;
}
if (!(key_layer & NFP_FLOWER_LAYER_IPV4) &&
!(key_layer & NFP_FLOWER_LAYER_IPV6)) {
NL_SET_ERR_MSG_MOD(extack, "unsupported pre-tunnel rule: match on ipv4/ipv6 eth_type must be present"); return -EOPNOTSUPP;
}
/* Ensure destination MAC address is fully matched. */
mac = (struct nfp_flower_mac_mpls *)mask; if (!is_broadcast_ether_addr(&mac->mac_dst[0])) {
NL_SET_ERR_MSG_MOD(extack, "unsupported pre-tunnel rule: dest MAC field must not be masked"); return -EOPNOTSUPP;
}
/* Ensure source MAC address is fully matched. This is only needed * for firmware with the DECAP_V2 feature enabled. Don't do this * for firmware without this feature to keep old behaviour.
*/ if (priv->flower_ext_feats & NFP_FL_FEATS_DECAP_V2) {
mac = (struct nfp_flower_mac_mpls *)mask; if (!is_broadcast_ether_addr(&mac->mac_src[0])) {
NL_SET_ERR_MSG_MOD(extack, "unsupported pre-tunnel rule: source MAC field must not be masked"); return -EOPNOTSUPP;
}
}
if (mac->mpls_lse) {
NL_SET_ERR_MSG_MOD(extack, "unsupported pre-tunnel rule: MPLS not supported"); return -EOPNOTSUPP;
}
/* Ensure destination MAC address matches pre_tun_dev. */
mac = (struct nfp_flower_mac_mpls *)ext; if (memcmp(&mac->mac_dst[0], flow->pre_tun_rule.dev->dev_addr, 6)) {
NL_SET_ERR_MSG_MOD(extack, "unsupported pre-tunnel rule: dest MAC must match output dev MAC"); return -EOPNOTSUPP;
}
/* Save mac addresses in pre_tun_rule entry for later use */
memcpy(&flow->pre_tun_rule.loc_mac, &mac->mac_dst[0], ETH_ALEN);
memcpy(&flow->pre_tun_rule.rem_mac, &mac->mac_src[0], ETH_ALEN);
mask += sizeof(struct nfp_flower_mac_mpls);
ext += sizeof(struct nfp_flower_mac_mpls); if (key_layer & NFP_FLOWER_LAYER_IPV4 ||
key_layer & NFP_FLOWER_LAYER_IPV6) { /* Flags and proto fields have same offset in IPv4 and IPv6. */ int ip_flags = offsetof(struct nfp_flower_ipv4, ip_ext.flags); int ip_proto = offsetof(struct nfp_flower_ipv4, ip_ext.proto); int size; int i;
/* Ensure proto and flags are the only IP layer fields. */ for (i = 0; i < size; i++) if (mask[i] && i != ip_flags && i != ip_proto) {
NL_SET_ERR_MSG_MOD(extack, "unsupported pre-tunnel rule: only flags and proto can be matched in ip header"); return -EOPNOTSUPP;
}
ext += size;
mask += size;
}
if ((priv->flower_ext_feats & NFP_FL_FEATS_VLAN_QINQ)) { if (key_ls->key_layer_two & NFP_FLOWER_LAYER2_QINQ) { struct nfp_flower_vlan *vlan_tags;
u16 vlan_tpid;
u16 vlan_tci;
/* Action must be a single egress or pop_vlan and egress. */
act_offset = 0;
act = (struct nfp_fl_act_head *)&flow->action_data[act_offset]; if (vlan) { if (act->jump_id != NFP_FL_ACTION_OPCODE_POP_VLAN) {
NL_SET_ERR_MSG_MOD(extack, "unsupported pre-tunnel rule: match on VLAN must have VLAN pop as first action"); return -EOPNOTSUPP;
}
if (act->jump_id != NFP_FL_ACTION_OPCODE_OUTPUT) {
NL_SET_ERR_MSG_MOD(extack, "unsupported pre-tunnel rule: non egress action detected where egress was expected"); return -EOPNOTSUPP;
}
act_offset += act->len_lw << NFP_FL_LW_SIZ;
/* Ensure there are no more actions after egress. */ if (act_offset != flow->meta.act_len) {
NL_SET_ERR_MSG_MOD(extack, "unsupported pre-tunnel rule: egress is not the last action"); return -EOPNOTSUPP;
}
if (dissector->used_keys & BIT_ULL(FLOW_DISSECTOR_KEY_CT)) {
flow_rule_match_ct(rule, &ct); /* Allow special case where CT match is all 0 */ if (memchr_inv(ct.key, 0, sizeof(*ct.key))) returnfalse;
}
if (flow->common.chain_index) returnfalse;
returntrue;
}
/** * nfp_flower_add_offload() - Adds a new flow to hardware. * @app: Pointer to the APP handle * @netdev: netdev structure. * @flow: TC flower classifier offload structure. * * Adds a new flow to the repeated hash structure and action payload. * * Return: negative value on error, 0 if configured successfully.
*/ staticint
nfp_flower_add_offload(struct nfp_app *app, struct net_device *netdev, struct flow_cls_offload *flow)
{ struct flow_rule *rule = flow_cls_offload_flow_rule(flow); enum nfp_flower_tun_type tun_type = NFP_FL_TUNNEL_NONE; struct nfp_flower_priv *priv = app->priv; struct netlink_ext_ack *extack = NULL; struct nfp_fl_payload *flow_pay; struct nfp_fl_key_ls *key_layer; struct nfp_port *port = NULL; int err;
extack = flow->common.extack; if (nfp_netdev_is_nfp_repr(netdev))
port = nfp_port_from_netdev(netdev);
if (is_pre_ct_flow(flow)) return nfp_fl_ct_handle_pre_ct(priv, netdev, flow, extack, NULL);
if (is_post_ct_flow(flow)) return nfp_fl_ct_handle_post_ct(priv, netdev, flow, extack);
if (!offload_pre_check(flow)) return -EOPNOTSUPP;
key_layer = kmalloc(sizeof(*key_layer), GFP_KERNEL); if (!key_layer) return -ENOMEM;
/* Remove any merge flow formed from the deleted sub_flow. */
list_for_each_entry_safe(link, temp, &sub_flow->linked_flows,
sub_flow.list)
nfp_flower_remove_merge_flow(app, sub_flow,
link->merge_flow.flow);
}
/** * nfp_flower_del_offload() - Removes a flow from hardware. * @app: Pointer to the APP handle * @netdev: netdev structure. * @flow: TC flower classifier offload structure * * Removes a flow from the repeated hash structure and clears the * action payload. Any flows merged from this are also deleted. * * Return: negative value on error, 0 if removed successfully.
*/ staticint
nfp_flower_del_offload(struct nfp_app *app, struct net_device *netdev, struct flow_cls_offload *flow)
{ struct nfp_flower_priv *priv = app->priv; struct nfp_fl_ct_map_entry *ct_map_ent; struct netlink_ext_ack *extack = NULL; struct nfp_fl_payload *nfp_flow; struct nfp_port *port = NULL; int err;
extack = flow->common.extack; if (nfp_netdev_is_nfp_repr(netdev))
port = nfp_port_from_netdev(netdev);
ctx_id = be32_to_cpu(merge_flow->meta.host_ctx_id);
pkts = priv->stats[ctx_id].pkts; /* Do not cycle subflows if no stats to distribute. */ if (!pkts) return;
bytes = priv->stats[ctx_id].bytes;
used = priv->stats[ctx_id].used;
/* Reset stats for the merge flow. */
priv->stats[ctx_id].pkts = 0;
priv->stats[ctx_id].bytes = 0;
/* The merge flow has received stats updates from firmware. * Distribute these stats to all subflows that form the merge. * The stats will collected from TC via the subflows.
*/
list_for_each_entry(link, &merge_flow->linked_flows, merge_flow.list) {
sub_flow = link->sub_flow.flow;
ctx_id = be32_to_cpu(sub_flow->meta.host_ctx_id);
priv->stats[ctx_id].pkts += pkts;
priv->stats[ctx_id].bytes += bytes;
priv->stats[ctx_id].used = max_t(u64, used,
priv->stats[ctx_id].used);
}
}
/* Get merge flows that the subflow forms to distribute their stats. */
list_for_each_entry(link, &sub_flow->linked_flows, sub_flow.list)
__nfp_flower_update_merge_stats(app, link->merge_flow.flow);
}
/** * nfp_flower_get_stats() - Populates flow stats obtained from hardware. * @app: Pointer to the APP handle * @netdev: Netdev structure. * @flow: TC flower classifier offload structure * * Populates a flow statistics structure which which corresponds to a * specific flow. * * Return: negative value on error, 0 if stats populated successfully.
*/ staticint
nfp_flower_get_stats(struct nfp_app *app, struct net_device *netdev, struct flow_cls_offload *flow)
{ struct nfp_flower_priv *priv = app->priv; struct nfp_fl_ct_map_entry *ct_map_ent; struct netlink_ext_ack *extack = NULL; struct nfp_fl_payload *nfp_flow;
u32 ctx_id;
/* Check ct_map table first */
ct_map_ent = rhashtable_lookup_fast(&priv->ct_map_table, &flow->cookie,
nfp_ct_map_params); if (ct_map_ent) return nfp_fl_ct_stats(flow, ct_map_ent);
extack = flow->common.extack;
nfp_flow = nfp_flower_search_fl_table(app, flow->cookie, netdev); if (!nfp_flow) {
NL_SET_ERR_MSG_MOD(extack, "invalid entry: cannot dump stats for flow that does not exist"); return -EINVAL;
}
ctx_id = be32_to_cpu(nfp_flow->meta.host_ctx_id);
spin_lock_bh(&priv->stats_lock); /* If request is for a sub_flow, update stats from merged flows. */ if (!list_empty(&nfp_flow->linked_flows))
nfp_flower_update_merge_stats(app, nfp_flow);
if (!eth_proto_is_802_3(flower->common.protocol)) return -EOPNOTSUPP;
mutex_lock(&priv->nfp_fl_lock); switch (flower->command) { case FLOW_CLS_REPLACE:
ret = nfp_flower_add_offload(app, netdev, flower); break; case FLOW_CLS_DESTROY:
ret = nfp_flower_del_offload(app, netdev, flower); break; case FLOW_CLS_STATS:
ret = nfp_flower_get_stats(app, netdev, flower); break; default:
ret = -EOPNOTSUPP; break;
}
mutex_unlock(&priv->nfp_fl_lock);
Die Informationen auf dieser Webseite wurden
nach bestem Wissen sorgfältig zusammengestellt. Es wird jedoch weder Vollständigkeit, noch Richtigkeit,
noch Qualität der bereit gestellten Informationen zugesichert.
Bemerkung:
Die farbliche Syntaxdarstellung und die Messung sind noch experimentell.