/* The kernel versions of TUNNEL_* are not ABI and therefore vulnerable * to change. Such changes will break our FW ABI.
*/ #define NFP_FL_TUNNEL_CSUM cpu_to_be16(0x01) #define NFP_FL_TUNNEL_KEY cpu_to_be16(0x04) #define NFP_FL_TUNNEL_GENEVE_OPT cpu_to_be16(0x0800) #define NFP_FL_SUPPORTED_TUNNEL_INFO_FLAGS (IP_TUNNEL_INFO_TX | \
IP_TUNNEL_INFO_IPV6) #define NFP_FL_SUPPORTED_UDP_TUN_FLAGS (NFP_FL_TUNNEL_CSUM | \
NFP_FL_TUNNEL_KEY | \
NFP_FL_TUNNEL_GENEVE_OPT)
/* BOS is optional in the TC action but required for offload. */ if (act->mpls_push.bos != ACT_MPLS_BOS_NOT_SET) {
mpls_lse |= act->mpls_push.bos << MPLS_LS_S_SHIFT;
} else {
NL_SET_ERR_MSG_MOD(extack, "unsupported offload: BOS field must explicitly be set for MPLS push"); return -EOPNOTSUPP;
}
/* Leave MPLS TC as a default value of 0 if not explicitly set. */ if (act->mpls_push.tc != ACT_MPLS_TC_NOT_SET)
mpls_lse |= act->mpls_push.tc << MPLS_LS_TC_SHIFT;
/* Proto, label and TTL are enforced and verified for MPLS push. */
mpls_lse |= act->mpls_push.label << MPLS_LS_LABEL_SHIFT;
mpls_lse |= act->mpls_push.ttl << MPLS_LS_TTL_SHIFT;
push_mpls->ethtype = act->mpls_push.proto;
push_mpls->lse = cpu_to_be32(mpls_lse);
out_dev = act->dev; if (!out_dev || !netif_is_lag_master(out_dev)) return 0;
if (act_len + act_size > NFP_FL_MAX_A_SIZ) {
NL_SET_ERR_MSG_MOD(extack, "unsupported offload: maximum allowed action list size exceeded at LAG action"); return -EOPNOTSUPP;
}
/* Pre_lag action must be first on action list. * If other actions already exist they need to be pushed forward.
*/ if (act_len)
memmove(nfp_flow->action_data + act_size,
nfp_flow->action_data, act_len);
out_dev = act->dev; if (!out_dev) {
NL_SET_ERR_MSG_MOD(extack, "unsupported offload: invalid egress interface for mirred action"); return -EOPNOTSUPP;
}
tmp_flags = last ? NFP_FL_OUT_FLAGS_LAST : 0;
if (tun_type) { /* Verify the egress netdev matches the tunnel type. */ if (!nfp_fl_netdev_is_tunnel_type(out_dev, tun_type)) {
NL_SET_ERR_MSG_MOD(extack, "unsupported offload: egress interface does not match the required tunnel type"); return -EOPNOTSUPP;
}
if (*tun_out_cnt) {
NL_SET_ERR_MSG_MOD(extack, "unsupported offload: cannot offload more than one tunnel mirred output per filter"); return -EOPNOTSUPP;
}
(*tun_out_cnt)++;
if (nfp_netdev_is_nfp_repr(in_dev)) { /* Confirm ingress and egress are on same device. */ if (!netdev_port_same_parent_id(in_dev, out_dev)) {
NL_SET_ERR_MSG_MOD(extack, "unsupported offload: ingress and egress interfaces are on different devices"); return -EOPNOTSUPP;
}
}
if (!nfp_netdev_is_nfp_repr(out_dev)) {
NL_SET_ERR_MSG_MOD(extack, "unsupported offload: egress interface is not an nfp port"); return -EOPNOTSUPP;
}
output->port = cpu_to_be32(nfp_repr_get_port_id(out_dev)); if (!output->port) {
NL_SET_ERR_MSG_MOD(extack, "unsupported offload: invalid port id for egress interface"); return -EOPNOTSUPP;
}
}
nfp_flow->meta.shortcut = output->port;
return 0;
}
staticbool
nfp_flower_tun_is_gre(struct flow_rule *rule, int start_idx)
{ struct flow_action_entry *act = rule->action.entries; int num_act = rule->action.num_entries; int act_idx;
/* Preparse action list for next mirred or redirect action */ for (act_idx = start_idx + 1; act_idx < num_act; act_idx++) if (act[act_idx].id == FLOW_ACTION_REDIRECT ||
act[act_idx].id == FLOW_ACTION_MIRRED) return netif_is_gretap(act[act_idx].dev) ||
netif_is_ip6gretap(act[act_idx].dev);
/* Determine the tunnel type based on the egress netdev * in the mirred action for tunnels without l4.
*/ if (nfp_flower_tun_is_gre(rule, act_idx)) return NFP_FL_TUNNEL_GRE;
switch (tun->key.tp_dst) { case htons(IANA_VXLAN_UDP_PORT): return NFP_FL_TUNNEL_VXLAN; case htons(GENEVE_UDP_PORT): if (priv->flower_ext_feats & NFP_FL_FEATS_GENEVE) return NFP_FL_TUNNEL_GENEVE;
fallthrough; default: return NFP_FL_TUNNEL_NONE;
}
}
/* Pre_tunnel action must be first on action list. * If other actions already exist they need to be pushed forward.
*/ if (act_len)
memmove(act_data + act_size, act_data, act_len);
/* We need to populate the options in reverse order for HW. * Therefore we go through the options, calculating the * number of options and the total size, then we populate * them in reverse order in the action list.
*/
opt_cnt = 0;
tot_push_len = 0;
opt_len = ip_tun->options_len; while (opt_len > 0) { struct geneve_opt *opt = (struct geneve_opt *)src;
opt_cnt++; if (opt_cnt > NFP_FL_MAX_GENEVE_OPT_CNT) {
NL_SET_ERR_MSG_MOD(extack, "unsupported offload: maximum allowed number of geneve options exceeded"); return -EOPNOTSUPP;
}
tot_push_len += sizeof(struct nfp_fl_push_geneve) +
opt->length * 4; if (tot_push_len > NFP_FL_MAX_GENEVE_OPT_ACT) {
NL_SET_ERR_MSG_MOD(extack, "unsupported offload: maximum allowed action list size exceeded at push geneve options"); return -EOPNOTSUPP;
}
if (ip_tun->options_len &&
(tun_type != NFP_FL_TUNNEL_GENEVE ||
!(priv->flower_ext_feats & NFP_FL_FEATS_GENEVE_OPT))) {
NL_SET_ERR_MSG_MOD(extack, "unsupported offload: loaded firmware does not support geneve options offload"); return -EOPNOTSUPP;
}
tun_flags = ip_tunnel_flags_to_be16(ip_tun->key.tun_flags); if (!ip_tunnel_flags_is_be16_compat(ip_tun->key.tun_flags) ||
(tun_flags & ~NFP_FL_SUPPORTED_UDP_TUN_FLAGS)) {
NL_SET_ERR_MSG_MOD(extack, "unsupported offload: loaded firmware does not support tunnel flag offload"); return -EOPNOTSUPP;
}
/* Set tunnel type and pre-tunnel index. */
tmp_set_ip_tun_type_index |=
FIELD_PREP(NFP_FL_TUNNEL_TYPE, tun_type) |
FIELD_PREP(NFP_FL_PRE_TUN_INDEX, pretun_idx);
set_tun->tun_type_index = cpu_to_be32(tmp_set_ip_tun_type_index); if (tun_flags & NFP_FL_TUNNEL_KEY)
set_tun->tun_id = ip_tun->key.tun_id;
/* Do a route lookup to determine ttl - if fails then use * default. Note that CONFIG_INET is a requirement of * CONFIG_NET_SWITCHDEV so must be defined here.
*/
flow.daddr = ip_tun->key.u.ipv4.dst;
flow.flowi4_proto = IPPROTO_UDP;
rt = ip_route_output_key(net, &flow);
err = PTR_ERR_OR_ZERO(rt); if (!err) {
set_tun->ttl = ip4_dst_hoplimit(&rt->dst);
ip_rt_put(rt);
} else {
set_tun->ttl = READ_ONCE(net->ipv4.sysctl_ip_default_ttl);
}
}
static u32 nfp_fl_csum_l4_to_flag(u8 ip_proto)
{ switch (ip_proto) { case 0: /* Filter doesn't force proto match, * both TCP and UDP will be updated if encountered
*/ return TCA_CSUM_UPDATE_FLAG_TCP | TCA_CSUM_UPDATE_FLAG_UDP; case IPPROTO_TCP: return TCA_CSUM_UPDATE_FLAG_TCP; case IPPROTO_UDP: return TCA_CSUM_UPDATE_FLAG_UDP; default: /* All other protocols will be ignored by FW */ return 0;
}
}
if (set_act->set_ip6_dst.head.len_lw &&
set_act->set_ip6_src.head.len_lw) { /* TC compiles set src and dst IPv6 address as a single action, * the hardware requires this to be 2 separate actions.
*/
nfp_action += act_size;
act_size = sizeof(set_act->set_ip6_src);
memcpy(nfp_action, &set_act->set_ip6_src, act_size);
*a_len += act_size;
staticint
nfp_flower_output_action(struct nfp_app *app, conststruct flow_action_entry *act, struct nfp_fl_payload *nfp_fl, int *a_len, struct net_device *netdev, bool last, enum nfp_flower_tun_type *tun_type, int *tun_out_cnt, int *out_cnt, u32 *csum_updated, bool pkt_host, struct netlink_ext_ack *extack)
{ struct nfp_flower_priv *priv = app->priv; struct nfp_fl_output *output; int err, prelag_size;
/* If csum_updated has not been reset by now, it means HW will * incorrectly update csums when they are not requested.
*/ if (*csum_updated) {
NL_SET_ERR_MSG_MOD(extack, "unsupported offload: set actions without updating checksums are not supported"); return -EOPNOTSUPP;
}
if (*a_len + sizeof(struct nfp_fl_output) > NFP_FL_MAX_A_SIZ) {
NL_SET_ERR_MSG_MOD(extack, "unsupported offload: mirred output increases action list size beyond the allowed maximum"); return -EOPNOTSUPP;
}
if (priv->flower_en_feats & NFP_FL_ENABLE_LAG) { /* nfp_fl_pre_lag returns -err or size of prelag action added. * This will be 0 if it is not egressing to a lag dev.
*/
prelag_size = nfp_fl_pre_lag(app, act, nfp_fl, *a_len, extack); if (prelag_size < 0) { return prelag_size;
} elseif (prelag_size > 0 && (!last || *out_cnt)) {
NL_SET_ERR_MSG_MOD(extack, "unsupported offload: LAG action has to be last action in action list"); return -EOPNOTSUPP;
}
*tun_type = nfp_fl_get_tun_from_act(app, rule, act, act_idx); if (*tun_type == NFP_FL_TUNNEL_NONE) {
NL_SET_ERR_MSG_MOD(extack, "unsupported offload: unsupported tunnel type in action list"); return -EOPNOTSUPP;
}
if (ip_tun->mode & ~NFP_FL_SUPPORTED_TUNNEL_INFO_FLAGS) {
NL_SET_ERR_MSG_MOD(extack, "unsupported offload: unsupported tunnel flags in action list"); return -EOPNOTSUPP;
}
/* Pre-tunnel action is required for tunnel encap. * This checks for next hop entries on NFP. * If none, the packet falls back before applying other actions.
*/ if (*a_len + sizeof(struct nfp_fl_pre_tunnel) + sizeof(struct nfp_fl_set_tun) > NFP_FL_MAX_A_SIZ) {
NL_SET_ERR_MSG_MOD(extack, "unsupported offload: maximum allowed action list size exceeded at tunnel encap"); return -EOPNOTSUPP;
}
err = nfp_fl_push_geneve_options(nfp_fl, a_len, act, extack); if (err) return err;
set_tun = (void *)&nfp_fl->action_data[*a_len];
err = nfp_fl_set_tun(app, set_tun, act, pre_tun, *tun_type,
netdev, extack); if (err) return err;
*a_len += sizeof(struct nfp_fl_set_tun);
} break; case FLOW_ACTION_TUNNEL_DECAP: /* Tunnel decap is handled by default so accept action. */ return 0; case FLOW_ACTION_MANGLE: if (nfp_fl_pedit(act, &nfp_fl->action_data[*a_len],
a_len, csum_updated, set_act, extack)) return -EOPNOTSUPP; break; case FLOW_ACTION_CSUM: /* csum action requests recalc of something we have not fixed */ if (act->csum_flags & ~*csum_updated) {
NL_SET_ERR_MSG_MOD(extack, "unsupported offload: unsupported csum update action in action list"); return -EOPNOTSUPP;
} /* If we will correctly fix the csum we can remove it from the * csum update list. Which will later be used to check support.
*/
*csum_updated &= ~act->csum_flags; break; case FLOW_ACTION_MPLS_PUSH: if (*a_len + sizeof(struct nfp_fl_push_mpls) > NFP_FL_MAX_A_SIZ) {
NL_SET_ERR_MSG_MOD(extack, "unsupported offload: maximum allowed action list size exceeded at push MPLS"); return -EOPNOTSUPP;
}
nfp_fl_set_mpls(set_m, act);
*a_len += sizeof(struct nfp_fl_set_mpls); break; case FLOW_ACTION_PTYPE: /* TC ptype skbedit sets PACKET_HOST for ingress redirect. */ if (act->ptype != PACKET_HOST) return -EOPNOTSUPP;
*pkt_host = true; break; case FLOW_ACTION_POLICE: if (!(fl_priv->flower_ext_feats & NFP_FL_FEATS_QOS_METER)) {
NL_SET_ERR_MSG_MOD(extack, "unsupported offload: unsupported police action in action list"); return -EOPNOTSUPP;
}
err = nfp_flower_meter_action(app, act, nfp_fl, a_len, netdev,
extack); if (err) return err; break; default: /* Currently we do not handle any other actions. */
NL_SET_ERR_MSG_MOD(extack, "unsupported offload: unsupported action in action list"); return -EOPNOTSUPP;
}
/* We optimise when the action list is small, this can unfortunately * not happen once we have more than one action in the action list.
*/ if (act_cnt > 1)
nfp_flow->meta.shortcut = cpu_to_be32(NFP_FL_SC_ACT_NULL);
nfp_flow->meta.act_len = act_len;
return 0;
}
Messung V0.5
¤ Dauer der Verarbeitung: 0.4 Sekunden
(vorverarbeitet)
¤
Die Informationen auf dieser Webseite wurden
nach bestem Wissen sorgfältig zusammengestellt. Es wird jedoch weder Vollständigkeit, noch Richtigkeit,
noch Qualität der bereit gestellten Informationen zugesichert.
Bemerkung:
Die farbliche Syntaxdarstellung und die Messung sind noch experimentell.