/* TCP over IPv4 flows, Not fragmented, with vlan tag */
MVPP2_DEF_FLOW(MVPP22_FLOW_TCP4, MVPP2_FL_IP4_TCP_NF_TAG,
MVPP22_CLS_HEK_IP4_5T | MVPP22_CLS_HEK_TAGGED,
MVPP2_PRS_RI_L3_IP4 | MVPP2_PRS_RI_L4_TCP,
MVPP2_PRS_IP_MASK),
/* UDP over IPv4 flows, Not fragmented, with vlan tag */
MVPP2_DEF_FLOW(MVPP22_FLOW_UDP4, MVPP2_FL_IP4_UDP_NF_TAG,
MVPP22_CLS_HEK_IP4_5T | MVPP22_CLS_HEK_TAGGED,
MVPP2_PRS_RI_L3_IP4 | MVPP2_PRS_RI_L4_UDP,
MVPP2_PRS_IP_MASK),
/* TCP over IPv6 flows, not fragmented, with vlan tag */
MVPP2_DEF_FLOW(MVPP22_FLOW_TCP6, MVPP2_FL_IP6_TCP_NF_TAG,
MVPP22_CLS_HEK_IP6_5T | MVPP22_CLS_HEK_TAGGED,
MVPP2_PRS_RI_L3_IP6 | MVPP2_PRS_RI_L4_TCP,
MVPP2_PRS_IP_MASK),
/* UDP over IPv6 flows, not fragmented, with vlan tag */
MVPP2_DEF_FLOW(MVPP22_FLOW_UDP6, MVPP2_FL_IP6_UDP_NF_TAG,
MVPP22_CLS_HEK_IP6_5T | MVPP22_CLS_HEK_TAGGED,
MVPP2_PRS_RI_L3_IP6 | MVPP2_PRS_RI_L4_UDP,
MVPP2_PRS_IP_MASK),
/* Non IP flow, no vlan tag */
MVPP2_DEF_FLOW(MVPP22_FLOW_ETHERNET, MVPP2_FL_NON_IP_UNTAG,
0,
MVPP2_PRS_RI_VLAN_NONE,
MVPP2_PRS_RI_VLAN_MASK), /* Non IP flow, with vlan tag */
MVPP2_DEF_FLOW(MVPP22_FLOW_ETHERNET, MVPP2_FL_NON_IP_TAG,
MVPP22_CLS_HEK_TAGGED,
0, 0),
};
u32 mvpp2_cls_flow_hits(struct mvpp2 *priv, int index)
{
mvpp2_write(priv, MVPP2_CTRS_IDX, index);
/* Initialize the parser entry for the given flow */ staticvoid mvpp2_cls_flow_prs_init(struct mvpp2 *priv, conststruct mvpp2_cls_flow *flow)
{
mvpp2_prs_add_flow(priv, flow->flow_id, flow->prs_ri.ri,
flow->prs_ri.ri_mask);
}
/* Initialize the Lookup Id table entry for the given flow */ staticvoid mvpp2_cls_flow_lkp_init(struct mvpp2 *priv, conststruct mvpp2_cls_flow *flow)
{ struct mvpp2_cls_lookup_entry le;
le.way = 0;
le.lkpid = flow->flow_id;
/* The default RxQ for this port is set in the C2 lookup */
le.data = 0;
/* We point on the first lookup in the sequence for the flow, that is * the C2 lookup.
*/
le.data |= MVPP2_CLS_LKP_FLOW_PTR(MVPP2_CLS_FLT_FIRST(flow->flow_id));
/* CLS is always enabled, RSS is enabled/disabled in C2 lookup */
le.data |= MVPP2_CLS_LKP_TBL_LOOKUP_EN_MASK;
val = mvpp2_read(priv, MVPP22_CLS_C2_TCAM_INV); if (c2->valid)
val &= ~MVPP22_CLS_C2_TCAM_INV_BIT; else
val |= MVPP22_CLS_C2_TCAM_INV_BIT;
mvpp2_write(priv, MVPP22_CLS_C2_TCAM_INV, val);
/* Initialize the flow table entries for the given flow */ staticvoid mvpp2_cls_flow_init(struct mvpp2 *priv, conststruct mvpp2_cls_flow *flow)
{ struct mvpp2_cls_flow_entry fe; int i, pri = 0;
/* Assign default values to all entries in the flow */ for (i = MVPP2_CLS_FLT_FIRST(flow->flow_id);
i <= MVPP2_CLS_FLT_LAST(flow->flow_id); i++) {
memset(&fe, 0, sizeof(fe));
fe.index = i;
mvpp2_cls_flow_pri_set(&fe, pri++);
if (i == MVPP2_CLS_FLT_LAST(flow->flow_id))
mvpp2_cls_flow_last_set(&fe, 1);
/* Add all ports */ for (i = 0; i < MVPP2_MAX_PORTS; i++)
mvpp2_cls_flow_port_add(&fe, BIT(i));
mvpp2_cls_flow_write(priv, &fe);
/* C3Hx lookups */ for (i = 0; i < MVPP2_MAX_PORTS; i++) {
mvpp2_cls_flow_read(priv,
MVPP2_CLS_FLT_HASH_ENTRY(i, flow->flow_id),
&fe);
/* Set a default engine. Will be overwritten when setting the * real HEK parameters
*/
mvpp2_cls_flow_eng_set(&fe, MVPP22_CLS_ENGINE_C3HA);
mvpp2_cls_flow_port_id_sel(&fe, true);
mvpp2_cls_flow_port_add(&fe, BIT(i));
mvpp2_cls_flow_write(priv, &fe);
}
}
/* Adds a field to the Header Extracted Key generation parameters*/ staticint mvpp2_flow_add_hek_field(struct mvpp2_cls_flow_entry *fe,
u32 field_id)
{ int nb_fields = mvpp2_cls_flow_hek_num_get(fe);
if (nb_fields == MVPP2_FLOW_N_FIELDS) return -EINVAL;
/* Clear old fields */
mvpp2_cls_flow_hek_num_set(fe, 0);
fe->data[2] = 0;
for_each_set_bit(i, &hash_opts, MVPP22_CLS_HEK_N_FIELDS) { switch (BIT(i)) { case MVPP22_CLS_HEK_OPT_MAC_DA:
field_id = MVPP22_CLS_FIELD_MAC_DA; break; case MVPP22_CLS_HEK_OPT_VLAN:
field_id = MVPP22_CLS_FIELD_VLAN; break; case MVPP22_CLS_HEK_OPT_VLAN_PRI:
field_id = MVPP22_CLS_FIELD_VLAN_PRI; break; case MVPP22_CLS_HEK_OPT_IP4SA:
field_id = MVPP22_CLS_FIELD_IP4SA; break; case MVPP22_CLS_HEK_OPT_IP4DA:
field_id = MVPP22_CLS_FIELD_IP4DA; break; case MVPP22_CLS_HEK_OPT_IP6SA:
field_id = MVPP22_CLS_FIELD_IP6SA; break; case MVPP22_CLS_HEK_OPT_IP6DA:
field_id = MVPP22_CLS_FIELD_IP6DA; break; case MVPP22_CLS_HEK_OPT_L4SIP:
field_id = MVPP22_CLS_FIELD_L4SIP; break; case MVPP22_CLS_HEK_OPT_L4DIP:
field_id = MVPP22_CLS_FIELD_L4DIP; break; default: return -EINVAL;
} if (mvpp2_flow_add_hek_field(fe, field_id)) return -EINVAL;
}
return 0;
}
/* Returns the size, in bits, of the corresponding HEK field */ staticint mvpp2_cls_hek_field_size(u32 field)
{ switch (field) { case MVPP22_CLS_HEK_OPT_MAC_DA: return 48; case MVPP22_CLS_HEK_OPT_VLAN: return 12; case MVPP22_CLS_HEK_OPT_VLAN_PRI: return 3; case MVPP22_CLS_HEK_OPT_IP4SA: case MVPP22_CLS_HEK_OPT_IP4DA: return 32; case MVPP22_CLS_HEK_OPT_IP6SA: case MVPP22_CLS_HEK_OPT_IP6DA: return 128; case MVPP22_CLS_HEK_OPT_L4SIP: case MVPP22_CLS_HEK_OPT_L4DIP: return 16; default: return -1;
}
}
/* Set the hash generation options for the given traffic flow. * One traffic flow (in the ethtool sense) has multiple classification flows, * to handle specific cases such as fragmentation, or the presence of a * VLAN / DSA Tag. * * Each of these individual flows has different constraints, for example we * can't hash fragmented packets on L4 data (else we would risk having packet * re-ordering), so each classification flows masks the options with their * supported ones. *
*/ staticint mvpp2_port_rss_hash_opts_set(struct mvpp2_port *port, int flow_type,
u16 requested_opts)
{ conststruct mvpp2_cls_flow *flow; struct mvpp2_cls_flow_entry fe; int i, engine, flow_index;
u16 hash_opts;
for_each_cls_flow_id_with_type(i, flow_type) {
flow = mvpp2_cls_flow_get(i); if (!flow) return -EINVAL;
/* Use C3HB engine to access L4 infos. This adds L4 infos to the * hash parameters
*/ if (hash_opts & MVPP22_CLS_HEK_L4_OPTS)
engine = MVPP22_CLS_ENGINE_C3HB; else
engine = MVPP22_CLS_ENGINE_C3HA;
if (mvpp2_flow_set_hek_fields(&fe, hash_opts)) return -EINVAL;
mvpp2_cls_flow_eng_set(&fe, engine);
mvpp2_cls_flow_write(port->priv, &fe);
}
return 0;
}
u16 mvpp2_flow_get_hek_fields(struct mvpp2_cls_flow_entry *fe)
{
u16 hash_opts = 0; int n_fields, i, field;
n_fields = mvpp2_cls_flow_hek_num_get(fe);
for (i = 0; i < n_fields; i++) {
field = mvpp2_cls_flow_hek_get(fe, i);
switch (field) { case MVPP22_CLS_FIELD_MAC_DA:
hash_opts |= MVPP22_CLS_HEK_OPT_MAC_DA; break; case MVPP22_CLS_FIELD_VLAN:
hash_opts |= MVPP22_CLS_HEK_OPT_VLAN; break; case MVPP22_CLS_FIELD_VLAN_PRI:
hash_opts |= MVPP22_CLS_HEK_OPT_VLAN_PRI; break; case MVPP22_CLS_FIELD_L3_PROTO:
hash_opts |= MVPP22_CLS_HEK_OPT_L3_PROTO; break; case MVPP22_CLS_FIELD_IP4SA:
hash_opts |= MVPP22_CLS_HEK_OPT_IP4SA; break; case MVPP22_CLS_FIELD_IP4DA:
hash_opts |= MVPP22_CLS_HEK_OPT_IP4DA; break; case MVPP22_CLS_FIELD_IP6SA:
hash_opts |= MVPP22_CLS_HEK_OPT_IP6SA; break; case MVPP22_CLS_FIELD_IP6DA:
hash_opts |= MVPP22_CLS_HEK_OPT_IP6DA; break; case MVPP22_CLS_FIELD_L4SIP:
hash_opts |= MVPP22_CLS_HEK_OPT_L4SIP; break; case MVPP22_CLS_FIELD_L4DIP:
hash_opts |= MVPP22_CLS_HEK_OPT_L4DIP; break; default: break;
}
} return hash_opts;
}
/* Returns the hash opts for this flow. There are several classifier flows * for one traffic flow, this returns an aggregation of all configurations.
*/ static u16 mvpp2_port_rss_hash_opts_get(struct mvpp2_port *port, int flow_type)
{ conststruct mvpp2_cls_flow *flow; struct mvpp2_cls_flow_entry fe; int i, flow_index;
u16 hash_opts = 0;
for_each_cls_flow_id_with_type(i, flow_type) {
flow = mvpp2_cls_flow_get(i); if (!flow) return 0;
/* Match on Lookup Type */
c2.tcam[4] |= MVPP22_CLS_C2_TCAM_EN(MVPP22_CLS_C2_LU_TYPE(MVPP2_CLS_LU_TYPE_MASK));
c2.tcam[4] |= MVPP22_CLS_C2_LU_TYPE(MVPP22_CLS_LU_TYPE_ALL);
/* Update RSS status after matching this entry */
c2.act = MVPP22_CLS_C2_ACT_RSS_EN(MVPP22_C2_UPD_LOCK);
/* Mark packet as "forwarded to software", needed for RSS */
c2.act |= MVPP22_CLS_C2_ACT_FWD(MVPP22_C2_FWD_SW_LOCK);
/* Configure the default rx queue : Update Queue Low and Queue High, but * don't lock, since the rx queue selection might be overridden by RSS
*/
c2.act |= MVPP22_CLS_C2_ACT_QHIGH(MVPP22_C2_UPD) |
MVPP22_CLS_C2_ACT_QLOW(MVPP22_C2_UPD);
/* Disable the FIFO stages in C2 engine, which are only used in BIST * mode
*/
mvpp2_write(priv, MVPP22_CLS_C2_TCAM_CTRL,
MVPP22_CLS_C2_TCAM_BYPASS_FIFO);
/* Set way for the port */
val = mvpp2_read(port->priv, MVPP2_CLS_PORT_WAY_REG);
val &= ~MVPP2_CLS_PORT_WAY_MASK(port->id);
mvpp2_write(port->priv, MVPP2_CLS_PORT_WAY_REG, val);
/* Pick the entry to be accessed in lookup ID decoding table * according to the way and lkpid.
*/
le.lkpid = port->id;
le.way = 0;
le.data = 0;
/* Set initial CPU queue for receiving packets */
le.data &= ~MVPP2_CLS_LKP_TBL_RXQ_MASK;
le.data |= port->first_rxq;
/* The RxQ number is used to select the RSS table. It that case, we set * it to be the ctx number.
*/
qh = (ctx >> 3) & MVPP22_CLS_C2_ATTR0_QHIGH_MASK;
ql = ctx & MVPP22_CLS_C2_ATTR0_QLOW_MASK;
/* Match on Lookup Type */
c2.tcam[4] |= MVPP22_CLS_C2_TCAM_EN(MVPP22_CLS_C2_LU_TYPE(MVPP2_CLS_LU_TYPE_MASK));
c2.tcam[4] |= MVPP22_CLS_C2_LU_TYPE(rule->loc);
if (act->id == FLOW_ACTION_DROP) {
c2.act = MVPP22_CLS_C2_ACT_COLOR(MVPP22_C2_COL_RED_LOCK);
} else { /* We want to keep the default color derived from the Header * Parser drop entries, for VLAN and MAC filtering. This will * assign a default color of Green or Red, and we want matches * with a non-drop action to keep that color.
*/
c2.act = MVPP22_CLS_C2_ACT_COLOR(MVPP22_C2_COL_NO_UPD_LOCK);
/* Update RSS status after matching this entry */ if (act->queue.ctx)
c2.attr[2] |= MVPP22_CLS_C2_ATTR2_RSS_EN;
/* Always lock the RSS_EN decision. We might have high prio * rules steering to an RXQ, and a lower one steering to RSS, * we don't want the low prio RSS rule overwriting this flag.
*/
c2.act = MVPP22_CLS_C2_ACT_RSS_EN(MVPP22_C2_UPD_LOCK);
/* Mark packet as "forwarded to software", needed for RSS */
c2.act |= MVPP22_CLS_C2_ACT_FWD(MVPP22_C2_FWD_SW_LOCK);
/* The order of insertion in C2 tcam must match the order in which * the fields are found in the header
*/ if (flow_rule_match_key(flow, FLOW_DISSECTOR_KEY_VLAN)) { struct flow_match_vlan match;
flow_rule_match_vlan(flow, &match); if (match.mask->vlan_id) {
rule->hek_fields |= MVPP22_CLS_HEK_OPT_VLAN;
if (match.mask->vlan_priority) {
rule->hek_fields |= MVPP22_CLS_HEK_OPT_VLAN_PRI;
/* VLAN pri is always at offset 13 relative to the * current offset
*/
rule->c2_tcam |= ((u64)match.key->vlan_priority) <<
(offs + 13);
rule->c2_tcam_mask |= ((u64)match.mask->vlan_priority) <<
(offs + 13);
}
if (match.mask->vlan_dei) return -EOPNOTSUPP;
/* vlan id and prio always seem to take a full 16-bit slot in * the Header Extracted Key.
*/
offs += 16;
}
if (flow_rule_match_key(flow, FLOW_DISSECTOR_KEY_PORTS)) { struct flow_match_ports match;
flow_rule_match_ports(flow, &match); if (match.mask->src) {
rule->hek_fields |= MVPP22_CLS_HEK_OPT_L4SIP;
/* When both an RSS context and an queue index are set, the index * is considered as an offset to be added to the indirection table * entries. We don't support this, so reject this rule.
*/ if (act->queue.ctx && act->queue.index) return -EOPNOTSUPP;
/* For now, only use the C2 engine which has a HEK size limited to 64 * bits for TCAM matching.
*/
rule->engine = MVPP22_CLS_ENGINE_C2;
if (mvpp2_cls_c2_build_match(rule)) return -EINVAL;
if (rxnfc->fs.location >= MVPP2_N_RFS_ENTRIES_PER_FLOW) return -EINVAL;
efs = port->rfs_rules[rxnfc->fs.location]; if (!efs) return -ENOENT;
memcpy(rxnfc, &efs->rxnfc, sizeof(efs->rxnfc));
return 0;
}
int mvpp2_ethtool_cls_rule_ins(struct mvpp2_port *port, struct ethtool_rxnfc *info)
{ struct ethtool_rx_flow_spec_input input = {}; struct ethtool_rx_flow_rule *ethtool_rule; struct mvpp2_ethtool_fs *efs, *old_efs; int ret = 0;
if (info->fs.location >= MVPP2_N_RFS_ENTRIES_PER_FLOW) return -EINVAL;
efs = kzalloc(sizeof(*efs), GFP_KERNEL); if (!efs) return -ENOMEM;
input.fs = &info->fs;
/* We need to manually set the rss_ctx, since this info isn't present * in info->fs
*/ if (info->fs.flow_type & FLOW_RSS)
input.rss_ctx = info->rss_context;
ethtool_rule = ethtool_rx_flow_rule_create(&input); if (IS_ERR(ethtool_rule)) {
ret = PTR_ERR(ethtool_rule); goto clean_rule;
}
efs->rule.flow = ethtool_rule->rule;
efs->rule.flow_type = mvpp2_cls_ethtool_flow_to_type(info->fs.flow_type); if (efs->rule.flow_type < 0) {
ret = efs->rule.flow_type; goto clean_rule;
}
ret = mvpp2_cls_rfs_parse_rule(&efs->rule); if (ret) goto clean_eth_rule;
efs->rule.loc = info->fs.location;
/* Replace an already existing rule */ if (port->rfs_rules[efs->rule.loc]) {
old_efs = port->rfs_rules[efs->rule.loc];
ret = mvpp2_port_cls_rfs_rule_remove(port, &old_efs->rule); if (ret) goto clean_eth_rule;
kfree(old_efs);
port->n_rfs_rules--;
}
ret = mvpp2_port_flt_rfs_rule_insert(port, &efs->rule); if (ret) goto clean_eth_rule;
/* Number of RXQs per CPU */
nrxqs = port->nrxqs / cpus;
/* CPU that will handle this rx queue */
cpu = rxq / nrxqs;
if (!cpu_online(cpu)) return port->first_rxq;
/* Indirection to better distribute the paquets on the CPUs when * configuring the RSS queues.
*/ return port->first_rxq + ((rxq * nrxqs + rxq / cpus) % port->nrxqs);
}
for (i = 0; i < MVPP22_RSS_TABLE_ENTRIES; i++) {
u32 sel = MVPP22_RSS_INDEX_TABLE(rss_ctx) |
MVPP22_RSS_INDEX_TABLE_ENTRY(i);
mvpp2_write(priv, MVPP22_RSS_INDEX, sel);
/* Find the first free RSS table */ for (ctx = 0; ctx < MVPP22_N_RSS_TABLES; ctx++) { if (!priv->rss_tables[ctx]) break;
}
if (ctx == MVPP22_N_RSS_TABLES) return -EINVAL;
priv->rss_tables[ctx] = kzalloc(sizeof(*priv->rss_tables[ctx]),
GFP_KERNEL); if (!priv->rss_tables[ctx]) return -ENOMEM;
*rss_ctx = ctx;
/* Set the table width: replace the whole classifier Rx queue number * with the ones configured in RSS table entries.
*/
mvpp2_write(priv, MVPP22_RSS_INDEX, MVPP22_RSS_INDEX_TABLE(ctx));
mvpp2_write(priv, MVPP22_RSS_WIDTH, 8);
int mvpp22_port_rss_ctx_create(struct mvpp2_port *port, u32 port_ctx)
{
u32 rss_ctx; int ret;
ret = mvpp22_rss_context_create(port, &rss_ctx); if (ret) return ret;
if (WARN_ON_ONCE(port->rss_ctx[port_ctx] >= 0)) return -EINVAL;
port->rss_ctx[port_ctx] = rss_ctx; return 0;
}
staticstruct mvpp2_rss_table *mvpp22_rss_table_get(struct mvpp2 *priv, int rss_ctx)
{ if (rss_ctx < 0 || rss_ctx >= MVPP22_N_RSS_TABLES) return NULL;
return priv->rss_tables[rss_ctx];
}
int mvpp22_port_rss_ctx_delete(struct mvpp2_port *port, u32 port_ctx)
{ struct mvpp2 *priv = port->priv; struct ethtool_rxnfc *rxnfc; int i, rss_ctx, ret;
rss_ctx = mvpp22_rss_ctx(port, port_ctx);
if (rss_ctx < 0 || rss_ctx >= MVPP22_N_RSS_TABLES) return -EINVAL;
/* Invalidate any active classification rule that use this context */ for (i = 0; i < MVPP2_N_RFS_ENTRIES_PER_FLOW; i++) { if (!port->rfs_rules[i]) continue;
ret = mvpp2_ethtool_cls_rule_del(port, rxnfc); if (ret) {
netdev_warn(port->dev, "couldn't remove classification rule %d associated to this context",
rxnfc->fs.location);
}
}
for_each_set_bit(i, &hash_opts, MVPP22_CLS_HEK_N_FIELDS) { switch (BIT(i)) { case MVPP22_CLS_HEK_OPT_MAC_DA:
info->data |= RXH_L2DA; break; case MVPP22_CLS_HEK_OPT_VLAN:
info->data |= RXH_VLAN; break; case MVPP22_CLS_HEK_OPT_L3_PROTO:
info->data |= RXH_L3_PROTO; break; case MVPP22_CLS_HEK_OPT_IP4SA: case MVPP22_CLS_HEK_OPT_IP6SA:
info->data |= RXH_IP_SRC; break; case MVPP22_CLS_HEK_OPT_IP4DA: case MVPP22_CLS_HEK_OPT_IP6DA:
info->data |= RXH_IP_DST; break; case MVPP22_CLS_HEK_OPT_L4SIP:
info->data |= RXH_L4_B_0_1; break; case MVPP22_CLS_HEK_OPT_L4DIP:
info->data |= RXH_L4_B_2_3; break; default: return -EINVAL;
}
} return 0;
}
int mvpp22_port_rss_init(struct mvpp2_port *port)
{ struct mvpp2_rss_table *table;
u32 context = 0; int i, ret;
for (i = 0; i < MVPP22_N_RSS_TABLES; i++)
port->rss_ctx[i] = -1;
ret = mvpp22_rss_context_create(port, &context); if (ret) return ret;
table = mvpp22_rss_table_get(port->priv, context); if (!table) return -EINVAL;
port->rss_ctx[0] = context;
/* Configure the first table to evenly distribute the packets across * real Rx Queues. The table entries map a hash to a port Rx Queue.
*/ for (i = 0; i < MVPP22_RSS_TABLE_ENTRIES; i++)
table->indir[i] = ethtool_rxfh_indir_default(i, port->nrxqs);
Die Informationen auf dieser Webseite wurden
nach bestem Wissen sorgfältig zusammengestellt. Es wird jedoch weder Vollständigkeit, noch Richtigkeit,
noch Qualität der bereit gestellten Informationen zugesichert.
Bemerkung:
Die farbliche Syntaxdarstellung und die Messung sind noch experimentell.