if (!bpf_map__is_internal(map)) {
snprintf(buf, buf_sz, "%s", name); returntrue;
}
for (i = 0, n = ARRAY_SIZE(sfxs); i < n; i++) { constchar *sfx = sfxs[i], *p;
p = strstr(name, sfx); if (p) {
snprintf(buf, buf_sz, "%s", p + 1);
sanitize_identifier(buf); returntrue;
}
}
returnfalse;
}
staticbool get_datasec_ident(constchar *sec_name, char *buf, size_t buf_sz)
{ staticconstchar *pfxs[] = { ".data", ".rodata", ".bss", ".kconfig" }; int i, n;
/* recognize hard coded LLVM section name */ if (strcmp(sec_name, ".addr_space.1") == 0) { /* this is the name to use in skeleton */
snprintf(buf, buf_sz, "arena"); returntrue;
} for (i = 0, n = ARRAY_SIZE(pfxs); i < n; i++) { constchar *pfx = pfxs[i];
/* static variables are not exposed through BPF skeleton */ if (btf_var(var)->linkage == BTF_VAR_STATIC) continue;
if (off > need_off) {
p_err("Something is wrong for %s's variable #%d: need offset %d, already at %d.\n",
sec_name, i, need_off, off); return -EINVAL;
}
align = btf__align_of(btf, var->type); if (align <= 0) {
p_err("Failed to determine alignment of variable '%s': %d",
var_name, align); return -EINVAL;
} /* Assume 32-bit architectures when generating data section * struct memory layout. Given bpftool can't know which target * host architecture it's emitting skeleton for, we need to be * conservative and assume 32-bit one to ensure enough padding * bytes are generated for pointer and long types. This will * still work correctly for 64-bit architectures, because in * the worst case we'll generate unnecessary padding field, * which on 64-bit architectures is not strictly necessary and * would be handled by natural 8-byte alignment. But it still * will be a correct memory layout, based on recorded offsets * in BTF.
*/ if (align > 4)
align = 4;
/* sanitize variable name, e.g., for static vars inside * a function, it's name is '<function name>.<variable name>', * which we'll turn into a '<function name>_<variable name>'
*/
var_ident[0] = '\0';
strncat(var_ident, var_name, sizeof(var_ident) - 1);
sanitize_identifier(var_ident);
d = btf_dump__new(btf, codegen_btf_dump_printf, NULL, NULL); if (!d) return -errno;
bpf_object__for_each_map(map, obj) { /* only generate definitions for memory-mapped internal maps */ if (!is_mmapable_map(map, map_ident, sizeof(map_ident))) continue;
sec = find_type_for_map(btf, map_ident);
/* In some cases (e.g., sections like .rodata.cst16 containing * compiler allocated string constants only) there will be * special internal maps with no corresponding DATASEC BTF * type. In such case, generate empty structs for each such * map. It will still be memory-mapped and its contents * accessible from user-space through BPF skeleton.
*/ if (!sec) {
printf(" struct %s__%s {\n", obj_name, map_ident);
printf(" } *%s;\n", map_ident);
} else {
err = codegen_datasec_def(obj, btf, d, sec, obj_name); if (err) goto out;
}
}
/* static variables are not exposed through BPF skeleton */ if (btf_var(var)->linkage == BTF_VAR_STATIC) continue;
/* The datasec member has KIND_VAR but we want the * underlying type of the variable (e.g. KIND_INT).
*/
var = skip_mods_and_typedefs(btf, var->type, NULL);
printf("\t\t"); /* Func and array members require special handling. * Instead of producing `typename *var`, they produce * `typeof(typename) *var`. This allows us to keep a * similar syntax where the identifier is just prefixed * by *, allowing us to ignore C declaration minutiae.
*/
needs_typeof = btf_is_array(var) || btf_is_ptr_to_func_proto(btf, var); if (needs_typeof)
printf("__typeof__(");
err = btf_dump__emit_type_decl(d, var_type_id, &opts); if (err) goto out;
err = bpf_object__gen_loader(obj, &opts); if (err) return err;
err = bpf_object__load(obj); if (err) {
p_err("failed to load object file"); goto out;
} /* If there was no error during load then gen_loader_opts * are populated with the loader program.
*/
/* for backward compatibility with old libbpf versions that don't * handle new BPF skeleton with new struct bpf_map_skeleton definition * that includes link field, avoid specifying new increased size, * unless we absolutely have to (i.e., if there are struct_ops maps * present)
*/
map_sz = offsetof(struct bpf_map_skeleton, link); if (populate_links) {
bpf_object__for_each_map(map, obj) { if (bpf_map__type(map) == BPF_MAP_TYPE_STRUCT_OPS) {
map_sz = sizeof(struct bpf_map_skeleton); break;
}
}
}
d = btf_dump__new(btf, codegen_btf_dump_printf, NULL, NULL); if (!d) return -errno;
n = btf_vlen(map_type); for (i = 0, m = btf_members(map_type); i < n; i++, m++) {
member_type = skip_mods_and_typedefs(btf, m->type, &member_type_id);
member_name = btf__name_by_offset(btf, m->name_off);
offset = m->offset / 8; if (next_offset < offset)
printf("\t\t\tchar __padding_%d[%u];\n", i, offset - next_offset);
switch (btf_kind(member_type)) { case BTF_KIND_INT: case BTF_KIND_FLOAT: case BTF_KIND_ENUM: case BTF_KIND_ENUM64: /* scalar type */
printf("\t\t\t");
opts.field_name = member_name;
err = btf_dump__emit_type_decl(d, member_type_id, &opts); if (err) {
p_err("Failed to emit type declaration for %s: %d", member_name, err); goto out;
}
printf(";\n");
size = btf__resolve_size(btf, member_type_id); if (size < 0) {
p_err("Failed to resolve size of %s: %d\n", member_name, size);
err = size; goto out;
}
next_offset = offset + size; break;
case BTF_KIND_PTR: if (resolve_func_ptr(btf, m->type, NULL)) { /* Function pointer */
printf("\t\t\tstruct bpf_program *%s;\n", member_name);
next_offset = offset + sizeof(void *); break;
} /* All pointer types are unsupported except for * function pointers.
*/
fallthrough;
default: /* Unsupported types * * Types other than scalar types and function * pointers are currently not supported in order to * prevent conflicts in the generated code caused * by multiple definitions. For instance, if the * struct type FOO is used in a struct_ops map, * bpftool has to generate definitions for FOO, * which may result in conflicts if FOO is defined * in different skeleton files.
*/
size = btf__resolve_size(btf, member_type_id); if (size < 0) {
p_err("Failed to resolve size of %s: %d\n", member_name, size);
err = size; goto out;
}
printf("\t\t\tchar __unsupported_%d[%d];\n", i, size);
next_offset = offset + size; break;
}
}
/* Cannot fail since it must be a struct type */
size = btf__resolve_size(btf, map_type_id); if (next_offset < (__u32)size)
printf("\t\t\tchar __padding_end[%u];\n", size - next_offset);
out:
btf_dump__free(d);
return err;
}
/* Generate the pointer of the shadow type for a struct_ops map. * * This function adds a pointer of the shadow type for a struct_ops map. * The members of a struct_ops map can be exported through a pointer to a * shadow type. The user can access these members through the pointer. * * A shadow type includes not all members, only members of some types. * They are scalar types and function pointers. The function pointers are * translated to the pointer of the struct bpf_program. The scalar types * are translated to the original type without any modifiers. * * Unsupported types will be translated to a char array to occupy the same * space as the original field, being renamed as __unsupported_*. The user * should treat these fields as opaque data.
*/ staticint gen_st_ops_shadow_type(constchar *obj_name, struct btf *btf, constchar *ident, conststruct bpf_map *map)
{ conststruct btf_type *map_type; constchar *type_name;
__u32 map_type_id; int err;
map_type_id = bpf_map__btf_value_type_id(map); if (map_type_id == 0) return -EINVAL;
map_type = btf__type_by_id(btf, map_type_id); if (!map_type) return -EINVAL;
/* Generate the pointers to shadow types of * struct_ops maps.
*/
bpf_object__for_each_map(map, obj) { if (bpf_map__type(map) != BPF_MAP_TYPE_STRUCT_OPS) continue; if (!get_map_ident(map, ident, sizeof(ident))) continue;
if (st_ops_cnt == 0) /* first struct_ops map */
printf("\tstruct {\n");
st_ops_cnt++;
/* Subskeletons are like skeletons, except they don't own the bpf_object, * associated maps, links, etc. Instead, they know about the existence of * variables, maps, programs and are able to find their locations * _at runtime_ from an already loaded bpf_object. * * This allows for library-like BPF objects to have userspace counterparts * with access to their own items without having to know anything about the * final BPF object that the library was linked into.
*/ staticint do_subskeleton(int argc, char **argv)
{ char header_guard[MAX_OBJ_NAME_LEN + sizeof("__SUBSKEL_H__")];
size_t i, len, file_sz, map_cnt = 0, prog_cnt = 0, mmap_sz, var_cnt = 0, var_idx = 0;
DECLARE_LIBBPF_OPTS(bpf_object_open_opts, opts); char obj_name[MAX_OBJ_NAME_LEN] = "", *obj_data; struct bpf_object *obj = NULL; constchar *file, *var_name; char ident[256]; int fd, err = -1, map_type_id; conststruct bpf_map *map; struct bpf_program *prog; struct btf *btf; conststruct btf_type *map_type, *var_type; conststruct btf_var_secinfo *var; struct stat st;
if (!REQ_ARGS(1)) {
usage(); return -1;
}
file = GET_ARG();
while (argc) { if (!REQ_ARGS(2)) return -1;
if (is_prefix(*argv, "name")) {
NEXT_ARG();
if (obj_name[0] != '\0') {
p_err("object name already specified"); return -1;
}
/* The empty object name allows us to use bpf_map__name and produce * ELF section names out of it. (".data" instead of "obj.data")
*/
opts.object_name = "";
obj = bpf_object__open_mem(obj_data, file_sz, &opts); if (!obj) { char err_buf[256];
libbpf_strerror(errno, err_buf, sizeof(err_buf));
p_err("failed to open BPF object file: %s", err_buf);
obj = NULL; goto out;
}
btf = bpf_object__btf(obj); if (!btf) {
err = -1;
p_err("need btf type information for %s", obj_name); goto out;
}
/* First, count how many variables we have to find. * We need this in advance so the subskel can allocate the right * amount of storage.
*/
bpf_object__for_each_map(map, obj) { if (!get_map_ident(map, ident, sizeof(ident))) continue;
/* Also count all maps that have a name */
map_cnt++;
if (!is_mmapable_map(map, ident, sizeof(ident))) continue;
/* walk through each symbol and emit the runtime representation */
bpf_object__for_each_map(map, obj) { if (!is_mmapable_map(map, ident, sizeof(ident))) continue;
map_type_id = bpf_map__btf_value_type_id(map); if (map_type_id <= 0) /* skip over internal maps with no type*/ continue;
map_type = btf__type_by_id(btf, map_type_id);
var = btf_var_secinfos(map_type);
len = btf_vlen(map_type); for (i = 0; i < len; i++, var++) {
var_type = btf__type_by_id(btf, var->type);
var_name = btf__name_by_offset(btf, var_type->name_off);
if (btf_var(var_type)->linkage == BTF_VAR_STATIC) continue;
/* Note that we use the dot prefix in .data as the * field access operator i.e. maps%s becomes maps.data
*/
codegen("\
\n\
\n\
s->vars[%3$d].name = \"%1$s\"; \n\
s->vars[%3$d].map = &obj->maps.%2$s; \n\
s->vars[%3$d].addr = (void **) &obj->%2$s.%1$s;\n\ ", var_name, ident, var_idx);
/* mark type on cloned BTF as used */
cloned_type = (struct btf_type *) btf__type_by_id(info->marked_btf, type_id);
cloned_type->name_off = MARKED;
/* recursively mark other types needed by it */ switch (btf_kind(btf_type)) { case BTF_KIND_UNKN: case BTF_KIND_INT: case BTF_KIND_FLOAT: case BTF_KIND_ENUM: case BTF_KIND_ENUM64: case BTF_KIND_STRUCT: case BTF_KIND_UNION: break; case BTF_KIND_PTR: if (follow_pointers) {
err = btfgen_mark_type(info, btf_type->type, follow_pointers); if (err) return err;
} break; case BTF_KIND_CONST: case BTF_KIND_RESTRICT: case BTF_KIND_VOLATILE: case BTF_KIND_TYPEDEF:
err = btfgen_mark_type(info, btf_type->type, follow_pointers); if (err) return err; break; case BTF_KIND_ARRAY:
array = btf_array(btf_type);
/* mark array type */
err = btfgen_mark_type(info, array->type, follow_pointers); /* mark array's index type */
err = err ? : btfgen_mark_type(info, array->index_type, follow_pointers); if (err) return err; break; case BTF_KIND_FUNC_PROTO: /* mark ret type */
err = btfgen_mark_type(info, btf_type->type, follow_pointers); if (err) return err;
/* mark parameters types */
param = btf_params(btf_type); for (i = 0; i < btf_vlen(btf_type); i++) {
err = btfgen_mark_type(info, param->type, follow_pointers); if (err) return err;
param++;
} break; /* tells if some other type needs to be handled */ default:
p_err("unsupported kind: %s (%u)", btf_kind_str(btf_type), type_id); return -EINVAL;
}
/* Mark types, members, and member types. Compared to btfgen_record_field_relo, * this function does not rely on the target spec for inferring members, but * uses the associated BTF. * * The `behind_ptr` argument is used to stop marking of composite types reached * through a pointer. This way, we can keep BTF size in check while providing * reasonable match semantics.
*/ staticint btfgen_mark_type_match(struct btfgen_info *info, __u32 type_id, bool behind_ptr)
{ conststruct btf_type *btf_type; struct btf *btf = info->src_btf; struct btf_type *cloned_type; int i, err;
if (type_id == 0) return 0;
btf_type = btf__type_by_id(btf, type_id); /* mark type on cloned BTF as used */
cloned_type = (struct btf_type *)btf__type_by_id(info->marked_btf, type_id);
cloned_type->name_off = MARKED;
switch (btf_kind(btf_type)) { case BTF_KIND_UNKN: case BTF_KIND_INT: case BTF_KIND_FLOAT: case BTF_KIND_ENUM: case BTF_KIND_ENUM64: break; case BTF_KIND_STRUCT: case BTF_KIND_UNION: { struct btf_member *m = btf_members(btf_type);
__u16 vlen = btf_vlen(btf_type);
if (behind_ptr) break;
for (i = 0; i < vlen; i++, m++) { /* mark member */
btfgen_mark_member(info, type_id, i);
/* mark member's type */
err = btfgen_mark_type_match(info, m->type, false); if (err) return err;
} break;
} case BTF_KIND_CONST: case BTF_KIND_FWD: case BTF_KIND_RESTRICT: case BTF_KIND_TYPEDEF: case BTF_KIND_VOLATILE: return btfgen_mark_type_match(info, btf_type->type, behind_ptr); case BTF_KIND_PTR: return btfgen_mark_type_match(info, btf_type->type, true); case BTF_KIND_ARRAY: { struct btf_array *array;
array = btf_array(btf_type); /* mark array type */
err = btfgen_mark_type_match(info, array->type, false); /* mark array's index type */
err = err ? : btfgen_mark_type_match(info, array->index_type, false); if (err) return err; break;
} case BTF_KIND_FUNC_PROTO: {
__u16 vlen = btf_vlen(btf_type); struct btf_param *param;
/* mark ret type */
err = btfgen_mark_type_match(info, btf_type->type, false); if (err) return err;
/* mark parameters types */
param = btf_params(btf_type); for (i = 0; i < vlen; i++) {
err = btfgen_mark_type_match(info, param->type, false); if (err) return err;
param++;
} break;
} /* tells if some other type needs to be handled */ default:
p_err("unsupported kind: %s (%u)", btf_kind_str(btf_type), type_id); return -EINVAL;
}
return 0;
}
/* Mark types, members, and member types. Compared to btfgen_record_field_relo, * this function does not rely on the target spec for inferring members, but * uses the associated BTF.
*/ staticint btfgen_record_type_match_relo(struct btfgen_info *info, struct bpf_core_spec *targ_spec)
{ return btfgen_mark_type_match(info, targ_spec->root_type_id, false);
}
staticint btfgen_record_reloc(struct btfgen_info *info, struct bpf_core_spec *res)
{ switch (res->relo_kind) { case BPF_CORE_FIELD_BYTE_OFFSET: case BPF_CORE_FIELD_BYTE_SIZE: case BPF_CORE_FIELD_EXISTS: case BPF_CORE_FIELD_SIGNED: case BPF_CORE_FIELD_LSHIFT_U64: case BPF_CORE_FIELD_RSHIFT_U64: return btfgen_record_field_relo(info, res); case BPF_CORE_TYPE_ID_LOCAL: /* BPF_CORE_TYPE_ID_LOCAL doesn't require kernel BTF */ return 0; case BPF_CORE_TYPE_ID_TARGET: case BPF_CORE_TYPE_EXISTS: case BPF_CORE_TYPE_SIZE: return btfgen_record_type_relo(info, res); case BPF_CORE_TYPE_MATCHES: return btfgen_record_type_match_relo(info, res); case BPF_CORE_ENUMVAL_EXISTS: case BPF_CORE_ENUMVAL_VALUE: return btfgen_record_enumval_relo(info, res); default: return -EINVAL;
}
}
/* first pass: add all marked types to btf_new and add their new ids to the ids map */ for (i = 1; i < n; i++) { conststruct btf_type *cloned_type, *type; constchar *name; int new_id;
cloned_m = btf_members(cloned_type);
m = btf_members(type);
vlen = btf_vlen(cloned_type); for (idx_src = 0; idx_src < vlen; idx_src++, cloned_m++, m++) { /* add only members that are marked as used */ if (cloned_m->name_off != MARKED) continue;
/* Create minimized BTF file for a set of BPF objects. * * The BTFGen algorithm is divided in two main parts: (1) collect the * BTF types that are involved in relocations and (2) generate the BTF * object using the collected types. * * In order to collect the types involved in the relocations, we parse * the BTF and BTF.ext sections of the BPF objects and use * bpf_core_calc_relo_insn() to get the target specification, this * indicates how the types and fields are used in a relocation. * * Types are recorded in different ways according to the kind of the * relocation. For field-based relocations only the members that are * actually used are saved in order to reduce the size of the generated * BTF file. For type-based relocations empty struct / unions are * generated and for enum-based relocations the whole type is saved. * * The second part of the algorithm generates the BTF object. It creates * an empty BTF object and fills it with the types recorded in the * previous step. This function takes care of only adding the structure * and union members that were marked as used and it also fixes up the * type IDs on the generated BTF object.
*/ staticint minimize_btf(constchar *src_btf, constchar *dst_btf, constchar *objspaths[])
{ struct btfgen_info *info; struct btf *btf_new = NULL; int err, i;
info = btfgen_new_info(src_btf); if (!info) {
err = -errno;
p_err("failed to allocate info structure: %s", strerror(errno)); goto out;
}
for (i = 0; objspaths[i] != NULL; i++) {
err = btfgen_record_obj(info, objspaths[i]); if (err) {
p_err("error recording relocations for %s: %s", objspaths[i],
strerror(errno)); goto out;
}
}
Die Informationen auf dieser Webseite wurden
nach bestem Wissen sorgfältig zusammengestellt. Es wird jedoch weder Vollständigkeit, noch Richtigkeit,
noch Qualität der bereit gestellten Informationen zugesichert.
Bemerkung:
Die farbliche Syntaxdarstellung und die Messung sind noch experimentell.