/* Reuse path - other offloaded program is already tracking this map. */
record = rhashtable_lookup_fast(&bpf->maps_neutral, &map->id,
nfp_bpf_maps_neutral_params); if (record) {
nfp_prog->map_records[nfp_prog->map_records_cnt++] = record;
record->count++; return 0;
}
/* Grab a single ref to the map for our record. The prog destroy ndo * happens after free_used_maps().
*/
bpf_map_inc(map);
record = kmalloc(sizeof(*record), GFP_KERNEL); if (!record) {
err = -ENOMEM; goto err_map_put;
}
for (i = 0; i < nfp_prog->map_records_cnt; i++) if (nfp_prog->map_records[i]) {
bpf_map_put(nfp_prog->map_records[i]->ptr);
kfree(nfp_prog->map_records[i]);
}
}
staticint
nfp_map_ptrs_record(struct nfp_app_bpf *bpf, struct nfp_prog *nfp_prog, struct bpf_prog *prog)
{ int i, cnt, err = 0;
mutex_lock(&prog->aux->used_maps_mutex);
/* Quickly count the maps we will have to remember */
cnt = 0; for (i = 0; i < prog->aux->used_map_cnt; i++) if (bpf_map_offload_neutral(prog->aux->used_maps[i]))
cnt++; if (!cnt) goto out;
/* Atomic engine requires values to be in big endian, we need to byte swap * the value words used with xadd.
*/ staticvoid nfp_map_bpf_byte_swap(struct nfp_bpf_map *nfp_map, void *value)
{
u32 *word = value; unsignedint i;
for (i = 0; i < DIV_ROUND_UP(nfp_map->offmap->map.value_size, 4); i++) if (nfp_map->use_map[i].type == NFP_MAP_USE_ATOMIC_CNT)
word[i] = (__force u32)cpu_to_be32(word[i]);
}
/* Mark value as unsafely initialized in case it becomes atomic later * and we didn't byte swap something non-byte swap neutral.
*/ staticvoid
nfp_map_bpf_byte_swap_record(struct nfp_bpf_map *nfp_map, void *value)
{
u32 *word = value; unsignedint i;
for (i = 0; i < DIV_ROUND_UP(nfp_map->offmap->map.value_size, 4); i++) if (nfp_map->use_map[i].type == NFP_MAP_UNUSED &&
word[i] != (__force u32)cpu_to_be32(word[i]))
nfp_map->use_map[i].non_zero_update = 1;
}
if (offmap->map.map_flags ||
offmap->map.numa_node != NUMA_NO_NODE) {
pr_info("map flags are not supported\n"); return -EINVAL;
}
if (!(bpf->maps.types & 1 << offmap->map.map_type)) {
pr_info("map type not supported\n"); return -EOPNOTSUPP;
} if (bpf->maps.max_maps == bpf->maps_in_use) {
pr_info("too many maps for a device\n"); return -ENOMEM;
} if (bpf->maps.max_elems - bpf->map_elems_in_use <
offmap->map.max_entries) {
pr_info("map with too many elements: %u, left: %u\n",
offmap->map.max_entries,
bpf->maps.max_elems - bpf->map_elems_in_use); return -ENOMEM;
}
if (round_up(offmap->map.key_size, 8) +
round_up(offmap->map.value_size, 8) > bpf->maps.max_elem_sz) {
pr_info("map elements too large: %u, FW max element size (key+value): %u\n",
round_up(offmap->map.key_size, 8) +
round_up(offmap->map.value_size, 8),
bpf->maps.max_elem_sz); return -ENOMEM;
} if (offmap->map.key_size > bpf->maps.max_key_sz) {
pr_info("map key size %u, FW max is %u\n",
offmap->map.key_size, bpf->maps.max_key_sz); return -ENOMEM;
} if (offmap->map.value_size > bpf->maps.max_val_sz) {
pr_info("map value size %u, FW max is %u\n",
offmap->map.value_size, bpf->maps.max_val_sz); return -ENOMEM;
}
/* Load up the JITed code */
err = nfp_net_reconfig(nn, NFP_NET_CFG_UPDATE_BPF); if (err)
NL_SET_ERR_MSG_MOD(extack, "FW command error while loading BPF");
int nfp_net_bpf_offload(struct nfp_net *nn, struct bpf_prog *prog, bool old_prog, struct netlink_ext_ack *extack)
{ int err;
if (prog && !bpf_offload_dev_match(prog, nn->dp.netdev)) return -EINVAL;
if (prog && old_prog) {
u8 cap;
cap = nn_readb(nn, NFP_NET_CFG_BPF_CAP); if (!(cap & NFP_NET_BPF_CAP_RELO)) {
NL_SET_ERR_MSG_MOD(extack, "FW does not support live reload"); return -EBUSY;
}
}
/* Something else is loaded, different program type? */ if (!old_prog && nn->dp.ctrl & NFP_NET_CFG_CTRL_BPF) return -EBUSY;
if (old_prog && !prog) return nfp_net_bpf_stop(nn);
err = nfp_net_bpf_load(nn, prog, extack); if (err) return err;
Die Informationen auf dieser Webseite wurden
nach bestem Wissen sorgfältig zusammengestellt. Es wird jedoch weder Vollständigkeit, noch Richtigkeit,
noch Qualität der bereit gestellten Informationen zugesichert.
Bemerkung:
Die farbliche Syntaxdarstellung und die Messung sind noch experimentell.