struct btf { /* raw BTF data in native endianness */ void *raw_data; /* raw BTF data in non-native endianness */ void *raw_data_swapped;
__u32 raw_size; /* whether target endianness differs from the native one */ bool swapped_endian;
/* * When BTF is loaded from an ELF or raw memory it is stored * in a contiguous memory block. The hdr, type_data, and, strs_data * point inside that memory region to their respective parts of BTF * representation: * * +--------------------------------+ * | Header | Types | Strings | * +--------------------------------+ * ^ ^ ^ * | | | * hdr | | * types_data-+ | * strs_data------------+ * * If BTF data is later modified, e.g., due to types added or * removed, BTF deduplication performed, etc, this contiguous * representation is broken up into three independently allocated * memory regions to be able to modify them independently. * raw_data is nulled out at that point, but can be later allocated * and cached again if user calls btf__raw_data(), at which point * raw_data will contain a contiguous copy of header, types, and * strings: * * +----------+ +---------+ +-----------+ * | Header | | Types | | Strings | * +----------+ +---------+ +-----------+ * ^ ^ ^ * | | | * hdr | | * types_data----+ | * strset__data(strs_set)-----+ * * +----------+---------+-----------+ * | Header | Types | Strings | * raw_data----->+----------+---------+-----------+
*/ struct btf_header *hdr;
void *types_data;
size_t types_data_cap; /* used size stored in hdr->type_len */
/* type ID to `struct btf_type *` lookup index * type_offs[0] corresponds to the first non-VOID type: * - for base BTF it's type [1]; * - for split BTF it's the first non-base BTF type.
*/
__u32 *type_offs;
size_t type_offs_cap; /* number of types in this BTF instance: * - doesn't include special [0] void type; * - for split BTF counts number of types added on top of base BTF.
*/
__u32 nr_types; /* if not NULL, points to the base BTF on top of which the current * split BTF is based
*/ struct btf *base_btf; /* BTF type ID of the first type in this BTF instance: * - for base BTF it's equal to 1; * - for split BTF it's equal to biggest type ID of base BTF plus 1.
*/ int start_id; /* logical string offset of this BTF instance: * - for base BTF it's equal to 0; * - for split BTF it's equal to total size of base BTF's string section size.
*/ int start_str_off;
/* only one of strs_data or strs_set can be non-NULL, depending on * whether BTF is in a modifiable state (strs_set is used) or not * (strs_data points inside raw_data)
*/ void *strs_data; /* a set of unique strings */ struct strset *strs_set; /* whether strings are already deduplicated */ bool strs_deduped;
/* whether base_btf should be freed in btf_free for this instance */ bool owns_base;
/* whether raw_data is a (read-only) mmap */ bool raw_data_is_mmap;
/* BTF object FD, if loaded into kernel */ int fd;
/* Pointer size (in bytes) for a target architecture of this BTF */ int ptr_sz;
};
/* Ensure given dynamically allocated memory region pointed to by *data* with * capacity of *cap_cnt* elements each taking *elem_sz* bytes has enough * memory to accommodate *add_cnt* new elements, assuming *cur_cnt* elements * are already used. At most *max_cnt* elements can be ever allocated. * If necessary, memory is reallocated and all existing data is copied over, * new pointer to the memory region is stored at *data, new memory region * capacity (in number of elements) is stored in *cap. * On success, memory pointer to the beginning of unused memory is returned. * On error, NULL is returned.
*/ void *libbpf_add_mem(void **data, size_t *cap_cnt, size_t elem_sz,
size_t cur_cnt, size_t max_cnt, size_t add_cnt)
{
size_t new_cnt; void *new_data;
/* requested more than the set limit */ if (cur_cnt + add_cnt > max_cnt) return NULL;
new_cnt = *cap_cnt;
new_cnt += new_cnt / 4; /* expand by 25% */ if (new_cnt < 16) /* but at least 16 elements */
new_cnt = 16; if (new_cnt > max_cnt) /* but not exceeding a set limit */
new_cnt = max_cnt; if (new_cnt < cur_cnt + add_cnt) /* also ensure we have enough memory */
new_cnt = cur_cnt + add_cnt;
new_data = libbpf_reallocarray(*data, new_cnt, elem_sz); if (!new_data) return NULL;
/* zero out newly allocated portion of memory */
memset(new_data + (*cap_cnt) * elem_sz, 0, (new_cnt - *cap_cnt) * elem_sz);
/* Ensure given dynamically allocated memory region has enough allocated space * to accommodate *need_cnt* elements of size *elem_sz* bytes each
*/ int libbpf_ensure_mem(void **data, size_t *cap_cnt, size_t elem_sz, size_t need_cnt)
{ void *p;
if (need_cnt <= *cap_cnt) return 0;
p = libbpf_add_mem(data, cap_cnt, elem_sz, *cap_cnt, SIZE_MAX, need_cnt - *cap_cnt); if (!p) return -ENOMEM;
if ((longlong)hdr->type_off + hdr->type_len > hdr->str_off) {
pr_debug("Invalid BTF data sections layout: type data at %u + %u, strings data at %u + %u\n",
hdr->type_off, hdr->type_len, hdr->str_off, hdr->str_len); return -EINVAL;
}
if (hdr->type_off % 4) {
pr_debug("BTF type section is not aligned to 4 bytes\n"); return -EINVAL;
}
switch (kind) { case BTF_KIND_UNKN: case BTF_KIND_INT: case BTF_KIND_FWD: case BTF_KIND_FLOAT: break; case BTF_KIND_PTR: case BTF_KIND_TYPEDEF: case BTF_KIND_VOLATILE: case BTF_KIND_CONST: case BTF_KIND_RESTRICT: case BTF_KIND_VAR: case BTF_KIND_DECL_TAG: case BTF_KIND_TYPE_TAG:
err = btf_validate_id(btf, t->type, id); if (err) return err; break; case BTF_KIND_ARRAY: { conststruct btf_array *a = btf_array(t);
err = btf_validate_id(btf, a->type, id);
err = err ?: btf_validate_id(btf, a->index_type, id); if (err) return err; break;
} case BTF_KIND_STRUCT: case BTF_KIND_UNION: { conststruct btf_member *m = btf_members(t);
n = btf_vlen(t); for (i = 0; i < n; i++, m++) {
err = btf_validate_str(btf, m->name_off, "field name", id);
err = err ?: btf_validate_id(btf, m->type, id); if (err) return err;
} break;
} case BTF_KIND_ENUM: { conststruct btf_enum *m = btf_enum(t);
n = btf_vlen(t); for (i = 0; i < n; i++, m++) {
err = btf_validate_str(btf, m->name_off, "enum name", id); if (err) return err;
} break;
} case BTF_KIND_ENUM64: { conststruct btf_enum64 *m = btf_enum64(t);
n = btf_vlen(t); for (i = 0; i < n; i++, m++) {
err = btf_validate_str(btf, m->name_off, "enum name", id); if (err) return err;
} break;
} case BTF_KIND_FUNC: { conststruct btf_type *ft;
err = btf_validate_id(btf, t->type, id); if (err) return err;
ft = btf__type_by_id(btf, t->type); if (btf_kind(ft) != BTF_KIND_FUNC_PROTO) {
pr_warn("btf: type [%u]: referenced type [%u] is not FUNC_PROTO\n", id, t->type); return -EINVAL;
} break;
} case BTF_KIND_FUNC_PROTO: { conststruct btf_param *m = btf_params(t);
n = btf_vlen(t); for (i = 0; i < n; i++, m++) {
err = btf_validate_str(btf, m->name_off, "param name", id);
err = err ?: btf_validate_id(btf, m->type, id); if (err) return err;
} break;
} case BTF_KIND_DATASEC: { conststruct btf_var_secinfo *m = btf_var_secinfos(t);
n = btf_vlen(t); for (i = 0; i < n; i++, m++) {
err = btf_validate_id(btf, m->type, id); if (err) return err;
} break;
} default:
pr_warn("btf: type [%u]: unrecognized kind %u\n", id, kind); return -EINVAL;
} return 0;
}
/* Validate basic sanity of BTF. It's intentionally less thorough than * kernel's validation and validates only properties of BTF that libbpf relies * on to be correct (e.g., valid type IDs, valid string offsets, etc)
*/ staticint btf_sanity_check(conststruct btf *btf)
{ conststruct btf_type *t;
__u32 i, n = btf__type_cnt(btf); int err;
for (i = btf->start_id; i < n; i++) {
t = btf_type_by_id(btf, i);
err = btf_validate_type(btf, t, i); if (err) return err;
} return 0;
}
/* Return pointer size this BTF instance assumes. The size is heuristically * determined by looking for 'long' or 'unsigned long' integer type and * recording its size in bytes. If BTF type information doesn't have any such * type, this function returns 0. In the latter case, native architecture's * pointer size is assumed, so will be either 4 or 8, depending on * architecture that libbpf was compiled for. It's possible to override * guessed value by using btf__set_pointer_size() API.
*/
size_t btf__pointer_size(conststruct btf *btf)
{ if (!btf->ptr_sz)
((struct btf *)btf)->ptr_sz = determine_ptr_size(btf);
if (btf->ptr_sz < 0) /* not enough BTF type info to guess */ return 0;
return btf->ptr_sz;
}
/* Override or set pointer size in bytes. Only values of 4 and 8 are * supported.
*/ int btf__set_pointer_size(struct btf *btf, size_t ptr_sz)
{ if (ptr_sz != 4 && ptr_sz != 8) return libbpf_err(-EINVAL);
btf->ptr_sz = ptr_sz; return 0;
}
t = btf__type_by_id(btf, type_id); for (i = 0; i < MAX_RESOLVE_DEPTH && !btf_type_is_void_or_null(t); i++) { switch (btf_kind(t)) { case BTF_KIND_INT: case BTF_KIND_STRUCT: case BTF_KIND_UNION: case BTF_KIND_ENUM: case BTF_KIND_ENUM64: case BTF_KIND_DATASEC: case BTF_KIND_FLOAT:
size = t->size; goto done; case BTF_KIND_PTR:
size = btf_ptr_sz(btf); goto done; case BTF_KIND_TYPEDEF: case BTF_KIND_VOLATILE: case BTF_KIND_CONST: case BTF_KIND_RESTRICT: case BTF_KIND_VAR: case BTF_KIND_DECL_TAG: case BTF_KIND_TYPE_TAG:
type_id = t->type; break; case BTF_KIND_ARRAY:
array = btf_array(t); if (nelems && array->nelems > UINT32_MAX / nelems) return libbpf_err(-E2BIG);
nelems *= array->nelems;
type_id = array->type; break; default: return libbpf_err(-EINVAL);
}
t = btf__type_by_id(btf, type_id);
}
done: if (size < 0) return libbpf_err(-EINVAL); if (nelems && size > UINT32_MAX / nelems) return libbpf_err(-E2BIG);
switch (kind) { case BTF_KIND_INT: case BTF_KIND_ENUM: case BTF_KIND_ENUM64: case BTF_KIND_FLOAT: return min(btf_ptr_sz(btf), (size_t)t->size); case BTF_KIND_PTR: return btf_ptr_sz(btf); case BTF_KIND_TYPEDEF: case BTF_KIND_VOLATILE: case BTF_KIND_CONST: case BTF_KIND_RESTRICT: case BTF_KIND_TYPE_TAG: return btf__align_of(btf, t->type); case BTF_KIND_ARRAY: return btf__align_of(btf, btf_array(t)->type); case BTF_KIND_STRUCT: case BTF_KIND_UNION: { conststruct btf_member *m = btf_members(t);
__u16 vlen = btf_vlen(t); int i, max_align = 1, align;
for (i = 0; i < vlen; i++, m++) {
align = btf__align_of(btf, m->type); if (align <= 0) return libbpf_err(align);
max_align = max(max_align, align);
/* if field offset isn't aligned according to field * type's alignment, then struct must be packed
*/ if (btf_member_bitfield_size(t, i) == 0 &&
(m->offset % (8 * align)) != 0) return 1;
}
/* if struct/union size isn't a multiple of its alignment, * then struct must be packed
*/ if ((t->size % max_align) != 0) return 1;
void btf__free(struct btf *btf)
{ if (IS_ERR_OR_NULL(btf)) return;
if (btf->fd >= 0)
close(btf->fd);
if (btf_is_modifiable(btf)) { /* if BTF was modified after loading, it will have a split * in-memory representation for header, types, and strings * sections, so we need to free all of them individually. It * might still have a cached contiguous raw data present, * which will be unconditionally freed below.
*/
free(btf->hdr);
free(btf->types_data);
strset__free(btf->strs_set);
}
btf_free_raw_data(btf);
free(btf->raw_data_swapped);
free(btf->type_offs); if (btf->owns_base)
btf__free(btf->base_btf);
free(btf);
}
idx++; if (gelf_getshdr(scn, &sh) != &sh) {
pr_warn("failed to get section(%d) header from %s\n",
idx, path); goto err;
}
name = elf_strptr(elf, shstrndx, sh.sh_name); if (!name) {
pr_warn("failed to get section(%d) name from %s\n",
idx, path); goto err;
}
if (strcmp(name, BTF_ELF_SEC) == 0)
field = &secs->btf_data; elseif (strcmp(name, BTF_EXT_ELF_SEC) == 0)
field = &secs->btf_ext_data; elseif (strcmp(name, BTF_BASE_ELF_SEC) == 0)
field = &secs->btf_base_data; else continue;
if (sh.sh_type != SHT_PROGBITS) {
pr_warn("unexpected section type (%d) of section(%d, %s) from %s\n",
sh.sh_type, idx, name, path); goto err;
}
data = elf_getdata(scn, 0); if (!data) {
pr_warn("failed to get section(%d, %s) data from %s\n",
idx, name, path); goto err;
}
*field = data;
}
switch (gelf_getclass(elf)) { case ELFCLASS32:
btf__set_pointer_size(btf, 4); break; case ELFCLASS64:
btf__set_pointer_size(btf, 8); break; default:
pr_warn("failed to get ELF class (bitness) for %s\n", path); break;
}
if (btf->fd >= 0) return libbpf_err(-EEXIST); if (log_sz && !log_buf) return libbpf_err(-EINVAL);
/* cache native raw data representation */
raw_data = btf_get_raw_data(btf, &raw_size, false); if (!raw_data) {
err = -ENOMEM; goto done;
}
btf->raw_size = raw_size;
btf->raw_data = raw_data;
retry_load: /* if log_level is 0, we won't provide log_buf/log_size to the kernel, * initially. Only if BTF loading fails, we bump log_level to 1 and * retry, using either auto-allocated or custom log_buf. This way * non-NULL custom log_buf provides a buffer just in case, but hopes * for successful load and no need for log_buf.
*/ if (log_level) { /* if caller didn't provide custom log_buf, we'll keep * allocating our own progressively bigger buffers for BTF * verification log
*/ if (!log_buf) {
buf_sz = max((__u32)BPF_LOG_BUF_SIZE, buf_sz * 2);
tmp = realloc(buf, buf_sz); if (!tmp) {
err = -ENOMEM; goto done;
}
buf = tmp;
buf[0] = '\0';
}
opts.token_fd = token_fd; if (token_fd)
opts.btf_flags |= BPF_F_TOKEN_FD;
btf->fd = bpf_btf_load(raw_data, raw_size, &opts); if (btf->fd < 0) { /* time to turn on verbose mode and try again */ if (log_level == 0) {
log_level = 1; goto retry_load;
} /* only retry if caller didn't provide custom log_buf, but * make sure we can never overflow buf_sz
*/ if (!log_buf && errno == ENOSPC && buf_sz <= UINT_MAX / 2) goto retry_load;
err = -errno;
pr_warn("BTF loading error: %s\n", errstr(err)); /* don't print out contents of custom log_buf */ if (!log_buf && buf[0])
pr_warn("-- BEGIN BTF LOAD LOG ---\n%s\n-- END BTF LOAD LOG --\n", buf);
}
data = swap_endian ? btf->raw_data_swapped : btf->raw_data; if (data) {
*size = btf->raw_size; return data;
}
data_sz = hdr->hdr_len + hdr->type_len + hdr->str_len;
data = calloc(1, data_sz); if (!data) return NULL;
p = data;
memcpy(p, hdr, hdr->hdr_len); if (swap_endian)
btf_bswap_hdr(p);
p += hdr->hdr_len;
memcpy(p, btf->types_data, hdr->type_len); if (swap_endian) { for (i = 0; i < btf->nr_types; i++) {
t = p + btf->type_offs[i]; /* btf_bswap_type_rest() relies on native t->info, so * we swap base type info after we swapped all the * additional information
*/ if (btf_bswap_type_rest(t)) goto err_out;
btf_bswap_type_base(t);
}
}
p += hdr->type_len;
memcpy(p, btf_strs_data(btf), hdr->str_len);
p += hdr->str_len;
/* we won't know btf_size until we call bpf_btf_get_info_by_fd(). so * let's start with a sane default - 4KiB here - and resize it only if * bpf_btf_get_info_by_fd() needs a bigger buffer.
*/
last_size = 4096;
ptr = malloc(last_size); if (!ptr) return ERR_PTR(-ENOMEM);
staticvoid btf_invalidate_raw_data(struct btf *btf)
{ if (btf->raw_data)
btf_free_raw_data(btf); if (btf->raw_data_swapped) {
free(btf->raw_data_swapped);
btf->raw_data_swapped = NULL;
}
}
/* Ensure BTF is ready to be modified (by splitting into a three memory * regions for header, types, and strings). Also invalidate cached * raw_data, if any.
*/ staticint btf_ensure_modifiable(struct btf *btf)
{ void *hdr, *types; struct strset *set = NULL; int err = -ENOMEM;
if (btf_is_modifiable(btf)) { /* any BTF modification invalidates raw_data */
btf_invalidate_raw_data(btf); return 0;
}
/* split raw data into three memory regions */
hdr = malloc(btf->hdr->hdr_len);
types = malloc(btf->hdr->type_len); if (!hdr || !types) goto err_out;
/* build lookup index for all strings */
set = strset__new(BTF_MAX_STR_OFFSET, btf->strs_data, btf->hdr->str_len); if (IS_ERR(set)) {
err = PTR_ERR(set); goto err_out;
}
/* only when everything was successful, update internal state */
btf->hdr = hdr;
btf->types_data = types;
btf->types_data_cap = btf->hdr->type_len;
btf->strs_data = NULL;
btf->strs_set = set; /* if BTF was created from scratch, all strings are guaranteed to be * unique and deduplicated
*/ if (btf->hdr->str_len == 0)
btf->strs_deduped = true; if (!btf->base_btf && btf->hdr->str_len == 1)
btf->strs_deduped = true;
/* Find an offset in BTF string section that corresponds to a given string *s*. * Returns: * - >0 offset into string section, if string is found; * - -ENOENT, if string is not in the string section; * - <0, on any other error.
*/ int btf__find_str(struct btf *btf, constchar *s)
{ int off;
if (btf->base_btf) {
off = btf__find_str(btf->base_btf, s); if (off != -ENOENT) return off;
}
/* BTF needs to be in a modifiable state to build string lookup index */ if (btf_ensure_modifiable(btf)) return libbpf_err(-ENOMEM);
off = strset__find_str(btf->strs_set, s); if (off < 0) return libbpf_err(off);
return btf->start_str_off + off;
}
/* Add a string s to the BTF string section. * Returns: * - > 0 offset into string section, on success; * - < 0, on error.
*/ int btf__add_str(struct btf *btf, constchar *s)
{ int off;
if (btf->base_btf) {
off = btf__find_str(btf->base_btf, s); if (off != -ENOENT) return off;
}
if (btf_ensure_modifiable(btf)) return libbpf_err(-ENOMEM);
off = strset__add_str(btf->strs_set, s); if (off < 0) return libbpf_err(off);
/* pre-allocate enough memory for new types */
t = btf_add_type_mem(btf, data_sz); if (!t) return libbpf_err(-ENOMEM);
/* pre-allocate enough memory for type offset index for new types */
off = btf_add_type_offs_mem(btf, cnt); if (!off) return libbpf_err(-ENOMEM);
/* Map the string offsets from src_btf to the offsets from btf to improve performance */
p.str_off_map = hashmap__new(btf_dedup_identity_hash_fn, btf_dedup_equal_fn, NULL); if (IS_ERR(p.str_off_map)) return libbpf_err(-ENOMEM);
/* bulk copy types data for all types from src_btf */
memcpy(t, src_btf->types_data, data_sz);
for (i = 0; i < cnt; i++) { struct btf_field_iter it;
__u32 *type_id, *str_off;
sz = btf_type_size(t); if (sz < 0) { /* unlikely, has to be corrupted src_btf */
err = sz; goto err_out;
}
/* fill out type ID to type offset mapping for lookups by type ID */
*off = t - btf->types_data;
/* add, dedup, and remap strings referenced by this BTF type */
err = btf_field_iter_init(&it, t, BTF_FIELD_ITER_STRS); if (err) goto err_out; while ((str_off = btf_field_iter_next(&it))) {
err = btf_rewrite_str(&p, str_off); if (err) goto err_out;
}
/* remap all type IDs referenced from this BTF type */
err = btf_field_iter_init(&it, t, BTF_FIELD_ITER_IDS); if (err) goto err_out;
while ((type_id = btf_field_iter_next(&it))) { if (!*type_id) /* nothing to do for VOID references */ continue;
/* we haven't updated btf's type count yet, so * btf->start_id + btf->nr_types - 1 is the type ID offset we should * add to all newly added BTF types
*/
*type_id += btf->start_id + btf->nr_types - 1;
}
/* go to next type data and type offset index entry */
t += sz;
off++;
}
/* Up until now any of the copied type data was effectively invisible, * so if we exited early before this point due to error, BTF would be * effectively unmodified. There would be extra internal memory * pre-allocated, but it would not be available for querying. But now * that we've copied and rewritten all the data successfully, we can * update type count and various internal offsets and sizes to * "commit" the changes and made them visible to the outside world.
*/
btf->hdr->type_len += data_sz;
btf->hdr->str_off += data_sz;
btf->nr_types += cnt;
hashmap__free(p.str_off_map);
/* return type ID of the first added BTF type */ return btf->start_id + btf->nr_types - cnt;
err_out: /* zero out preallocated memory as if it was just allocated with * libbpf_add_mem()
*/
memset(btf->types_data + btf->hdr->type_len, 0, data_sz);
memset(btf->strs_data + old_strs_len, 0, btf->hdr->str_len - old_strs_len);
/* and now restore original strings section size; types data size * wasn't modified, so doesn't need restoring, see big comment above
*/
btf->hdr->str_len = old_strs_len;
hashmap__free(p.str_off_map);
return libbpf_err(err);
}
/* * Append new BTF_KIND_INT type with: * - *name* - non-empty, non-NULL type name; * - *sz* - power-of-2 (1, 2, 4, ..) size of the type, in bytes; * - encoding is a combination of BTF_INT_SIGNED, BTF_INT_CHAR, BTF_INT_BOOL. * Returns: * - >0, type ID of newly added BTF type; * - <0, on error.
*/ int btf__add_int(struct btf *btf, constchar *name, size_t byte_sz, int encoding)
{ struct btf_type *t; int sz, name_off;
/* non-empty name */ if (!name || !name[0]) return libbpf_err(-EINVAL); /* byte_sz must be power of 2 */ if (!byte_sz || (byte_sz & (byte_sz - 1)) || byte_sz > 16) return libbpf_err(-EINVAL); if (encoding & ~(BTF_INT_SIGNED | BTF_INT_CHAR | BTF_INT_BOOL)) return libbpf_err(-EINVAL);
/* deconstruct BTF, if necessary, and invalidate raw_data */ if (btf_ensure_modifiable(btf)) return libbpf_err(-ENOMEM);
sz = sizeof(struct btf_type) + sizeof(int);
t = btf_add_type_mem(btf, sz); if (!t) return libbpf_err(-ENOMEM);
/* if something goes wrong later, we might end up with an extra string, * but that shouldn't be a problem, because BTF can't be constructed * completely anyway and will most probably be just discarded
*/
name_off = btf__add_str(btf, name); if (name_off < 0) return name_off;
t->name_off = name_off;
t->info = btf_type_info(BTF_KIND_INT, 0, 0);
t->size = byte_sz; /* set INT info, we don't allow setting legacy bit offset/size */
*(__u32 *)(t + 1) = (encoding << 24) | (byte_sz * 8);
return btf_commit_type(btf, sz);
}
/* * Append new BTF_KIND_FLOAT type with: * - *name* - non-empty, non-NULL type name; * - *sz* - size of the type, in bytes; * Returns: * - >0, type ID of newly added BTF type; * - <0, on error.
*/ int btf__add_float(struct btf *btf, constchar *name, size_t byte_sz)
{ struct btf_type *t; int sz, name_off;
/* non-empty name */ if (!name || !name[0]) return libbpf_err(-EINVAL);
/* byte_sz must be one of the explicitly allowed values */ if (byte_sz != 2 && byte_sz != 4 && byte_sz != 8 && byte_sz != 12 &&
byte_sz != 16) return libbpf_err(-EINVAL);
if (btf_ensure_modifiable(btf)) return libbpf_err(-ENOMEM);
sz = sizeof(struct btf_type);
t = btf_add_type_mem(btf, sz); if (!t) return libbpf_err(-ENOMEM);
name_off = btf__add_str(btf, name); if (name_off < 0) return name_off;
/* it's completely legal to append BTF types with type IDs pointing forward to * types that haven't been appended yet, so we only make sure that id looks * sane, we can't guarantee that ID will always be valid
*/ staticint validate_type_id(int id)
{ if (id < 0 || id > BTF_MAX_NR_TYPES) return -EINVAL; return 0;
}
/* generic append function for PTR, TYPEDEF, CONST/VOLATILE/RESTRICT */ staticint btf_add_ref_kind(struct btf *btf, int kind, constchar *name, int ref_type_id, int kflag)
{ struct btf_type *t; int sz, name_off = 0;
if (validate_type_id(ref_type_id)) return libbpf_err(-EINVAL);
if (btf_ensure_modifiable(btf)) return libbpf_err(-ENOMEM);
sz = sizeof(struct btf_type);
t = btf_add_type_mem(btf, sz); if (!t) return libbpf_err(-ENOMEM);
if (name && name[0]) {
name_off = btf__add_str(btf, name); if (name_off < 0) return name_off;
}
/* * Append new BTF_KIND_PTR type with: * - *ref_type_id* - referenced type ID, it might not exist yet; * Returns: * - >0, type ID of newly added BTF type; * - <0, on error.
*/ int btf__add_ptr(struct btf *btf, int ref_type_id)
{ return btf_add_ref_kind(btf, BTF_KIND_PTR, NULL, ref_type_id, 0);
}
/* * Append new BTF_KIND_ARRAY type with: * - *index_type_id* - type ID of the type describing array index; * - *elem_type_id* - type ID of the type describing array element; * - *nr_elems* - the size of the array; * Returns: * - >0, type ID of newly added BTF type; * - <0, on error.
*/ int btf__add_array(struct btf *btf, int index_type_id, int elem_type_id, __u32 nr_elems)
{ struct btf_type *t; struct btf_array *a; int sz;
if (validate_type_id(index_type_id) || validate_type_id(elem_type_id)) return libbpf_err(-EINVAL);
if (btf_ensure_modifiable(btf)) return libbpf_err(-ENOMEM);
sz = sizeof(struct btf_type) + sizeof(struct btf_array);
t = btf_add_type_mem(btf, sz); if (!t) return libbpf_err(-ENOMEM);
/* generic STRUCT/UNION append function */ staticint btf_add_composite(struct btf *btf, int kind, constchar *name, __u32 bytes_sz)
{ struct btf_type *t; int sz, name_off = 0;
if (btf_ensure_modifiable(btf)) return libbpf_err(-ENOMEM);
sz = sizeof(struct btf_type);
t = btf_add_type_mem(btf, sz); if (!t) return libbpf_err(-ENOMEM);
if (name && name[0]) {
name_off = btf__add_str(btf, name); if (name_off < 0) return name_off;
}
/* start out with vlen=0 and no kflag; this will be adjusted when * adding each member
*/
t->name_off = name_off;
t->info = btf_type_info(kind, 0, 0);
t->size = bytes_sz;
return btf_commit_type(btf, sz);
}
/* * Append new BTF_KIND_STRUCT type with: * - *name* - name of the struct, can be NULL or empty for anonymous structs; * - *byte_sz* - size of the struct, in bytes; * * Struct initially has no fields in it. Fields can be added by * btf__add_field() right after btf__add_struct() succeeds. * * Returns: * - >0, type ID of newly added BTF type; * - <0, on error.
*/ int btf__add_struct(struct btf *btf, constchar *name, __u32 byte_sz)
{ return btf_add_composite(btf, BTF_KIND_STRUCT, name, byte_sz);
}
/* * Append new BTF_KIND_UNION type with: * - *name* - name of the union, can be NULL or empty for anonymous union; * - *byte_sz* - size of the union, in bytes; * * Union initially has no fields in it. Fields can be added by * btf__add_field() right after btf__add_union() succeeds. All fields * should have *bit_offset* of 0. * * Returns: * - >0, type ID of newly added BTF type; * - <0, on error.
*/ int btf__add_union(struct btf *btf, constchar *name, __u32 byte_sz)
{ return btf_add_composite(btf, BTF_KIND_UNION, name, byte_sz);
}
/* * Append new field for the current STRUCT/UNION type with: * - *name* - name of the field, can be NULL or empty for anonymous field; * - *type_id* - type ID for the type describing field type; * - *bit_offset* - bit offset of the start of the field within struct/union; * - *bit_size* - bit size of a bitfield, 0 for non-bitfield fields; * Returns: * - 0, on success; * - <0, on error.
*/ int btf__add_field(struct btf *btf, constchar *name, int type_id,
__u32 bit_offset, __u32 bit_size)
{ struct btf_type *t; struct btf_member *m; bool is_bitfield; int sz, name_off = 0;
/* last type should be union/struct */ if (btf->nr_types == 0) return libbpf_err(-EINVAL);
t = btf_last_type(btf); if (!btf_is_composite(t)) return libbpf_err(-EINVAL);
if (validate_type_id(type_id)) return libbpf_err(-EINVAL); /* best-effort bit field offset/size enforcement */
is_bitfield = bit_size || (bit_offset % 8 != 0); if (is_bitfield && (bit_size == 0 || bit_size > 255 || bit_offset > 0xffffff)) return libbpf_err(-EINVAL);
/* only offset 0 is allowed for unions */ if (btf_is_union(t) && bit_offset) return libbpf_err(-EINVAL);
/* decompose and invalidate raw data */ if (btf_ensure_modifiable(btf)) return libbpf_err(-ENOMEM);
sz = sizeof(struct btf_member);
m = btf_add_type_mem(btf, sz); if (!m) return libbpf_err(-ENOMEM);
if (name && name[0]) {
name_off = btf__add_str(btf, name); if (name_off < 0) return name_off;
}
/* byte_sz must be power of 2 */ if (!byte_sz || (byte_sz & (byte_sz - 1)) || byte_sz > 8) return libbpf_err(-EINVAL);
if (btf_ensure_modifiable(btf)) return libbpf_err(-ENOMEM);
sz = sizeof(struct btf_type);
t = btf_add_type_mem(btf, sz); if (!t) return libbpf_err(-ENOMEM);
if (name && name[0]) {
name_off = btf__add_str(btf, name); if (name_off < 0) return name_off;
}
/* start out with vlen=0; it will be adjusted when adding enum values */
t->name_off = name_off;
t->info = btf_type_info(kind, 0, is_signed);
t->size = byte_sz;
return btf_commit_type(btf, sz);
}
/* * Append new BTF_KIND_ENUM type with: * - *name* - name of the enum, can be NULL or empty for anonymous enums; * - *byte_sz* - size of the enum, in bytes. * * Enum initially has no enum values in it (and corresponds to enum forward * declaration). Enumerator values can be added by btf__add_enum_value() * immediately after btf__add_enum() succeeds. * * Returns: * - >0, type ID of newly added BTF type; * - <0, on error.
*/ int btf__add_enum(struct btf *btf, constchar *name, __u32 byte_sz)
{ /* * set the signedness to be unsigned, it will change to signed * if any later enumerator is negative.
*/ return btf_add_enum_common(btf, name, byte_sz, false, BTF_KIND_ENUM);
}
/* * Append new enum value for the current ENUM type with: * - *name* - name of the enumerator value, can't be NULL or empty; * - *value* - integer value corresponding to enum value *name*; * Returns: * - 0, on success; * - <0, on error.
*/ int btf__add_enum_value(struct btf *btf, constchar *name, __s64 value)
{ struct btf_type *t; struct btf_enum *v; int sz, name_off;
/* last type should be BTF_KIND_ENUM */ if (btf->nr_types == 0) return libbpf_err(-EINVAL);
t = btf_last_type(btf); if (!btf_is_enum(t)) return libbpf_err(-EINVAL);
/* non-empty name */ if (!name || !name[0]) return libbpf_err(-EINVAL); if (value < INT_MIN || value > UINT_MAX) return libbpf_err(-E2BIG);
/* decompose and invalidate raw data */ if (btf_ensure_modifiable(btf)) return libbpf_err(-ENOMEM);
sz = sizeof(struct btf_enum);
v = btf_add_type_mem(btf, sz); if (!v) return libbpf_err(-ENOMEM);
name_off = btf__add_str(btf, name); if (name_off < 0) return name_off;
v->name_off = name_off;
v->val = value;
/* update parent type's vlen */
t = btf_last_type(btf);
btf_type_inc_vlen(t);
/* if negative value, set signedness to signed */ if (value < 0)
t->info = btf_type_info(btf_kind(t), btf_vlen(t), true);
/* * Append new BTF_KIND_ENUM64 type with: * - *name* - name of the enum, can be NULL or empty for anonymous enums; * - *byte_sz* - size of the enum, in bytes. * - *is_signed* - whether the enum values are signed or not; * * Enum initially has no enum values in it (and corresponds to enum forward * declaration). Enumerator values can be added by btf__add_enum64_value() * immediately after btf__add_enum64() succeeds. * * Returns: * - >0, type ID of newly added BTF type; * - <0, on error.
*/ int btf__add_enum64(struct btf *btf, constchar *name, __u32 byte_sz, bool is_signed)
{ return btf_add_enum_common(btf, name, byte_sz, is_signed,
BTF_KIND_ENUM64);
}
/* * Append new enum value for the current ENUM64 type with: * - *name* - name of the enumerator value, can't be NULL or empty; * - *value* - integer value corresponding to enum value *name*; * Returns: * - 0, on success; * - <0, on error.
*/ int btf__add_enum64_value(struct btf *btf, constchar *name, __u64 value)
{ struct btf_enum64 *v; struct btf_type *t; int sz, name_off;
/* last type should be BTF_KIND_ENUM64 */ if (btf->nr_types == 0) return libbpf_err(-EINVAL);
t = btf_last_type(btf); if (!btf_is_enum64(t)) return libbpf_err(-EINVAL);
/* non-empty name */ if (!name || !name[0]) return libbpf_err(-EINVAL);
/* decompose and invalidate raw data */ if (btf_ensure_modifiable(btf)) return libbpf_err(-ENOMEM);
sz = sizeof(struct btf_enum64);
v = btf_add_type_mem(btf, sz); if (!v) return libbpf_err(-ENOMEM);
name_off = btf__add_str(btf, name); if (name_off < 0) return name_off;
/* * Append new BTF_KIND_FWD type with: * - *name*, non-empty/non-NULL name; * - *fwd_kind*, kind of forward declaration, one of BTF_FWD_STRUCT, * BTF_FWD_UNION, or BTF_FWD_ENUM; * Returns: * - >0, type ID of newly added BTF type; * - <0, on error.
*/ int btf__add_fwd(struct btf *btf, constchar *name, enum btf_fwd_kind fwd_kind)
{ if (!name || !name[0]) return libbpf_err(-EINVAL);
switch (fwd_kind) { case BTF_FWD_STRUCT: case BTF_FWD_UNION: { struct btf_type *t; int id;
id = btf_add_ref_kind(btf, BTF_KIND_FWD, name, 0, 0); if (id <= 0) return id;
t = btf_type_by_id(btf, id);
t->info = btf_type_info(BTF_KIND_FWD, 0, fwd_kind == BTF_FWD_UNION); return id;
} case BTF_FWD_ENUM: /* enum forward in BTF currently is just an enum with no enum * values; we also assume a standard 4-byte size for it
*/ return btf__add_enum(btf, name, sizeof(int)); default: return libbpf_err(-EINVAL);
}
}
/* * Append new BTF_KING_TYPEDEF type with: * - *name*, non-empty/non-NULL name; * - *ref_type_id* - referenced type ID, it might not exist yet; * Returns: * - >0, type ID of newly added BTF type; * - <0, on error.
*/ int btf__add_typedef(struct btf *btf, constchar *name, int ref_type_id)
{ if (!name || !name[0]) return libbpf_err(-EINVAL);
/* * Append new BTF_KIND_VOLATILE type with: * - *ref_type_id* - referenced type ID, it might not exist yet; * Returns: * - >0, type ID of newly added BTF type; * - <0, on error.
*/ int btf__add_volatile(struct btf *btf, int ref_type_id)
{ return btf_add_ref_kind(btf, BTF_KIND_VOLATILE, NULL, ref_type_id, 0);
}
/* * Append new BTF_KIND_CONST type with: * - *ref_type_id* - referenced type ID, it might not exist yet; * Returns: * - >0, type ID of newly added BTF type; * - <0, on error.
*/ int btf__add_const(struct btf *btf, int ref_type_id)
{ return btf_add_ref_kind(btf, BTF_KIND_CONST, NULL, ref_type_id, 0);
}
/* * Append new BTF_KIND_RESTRICT type with: * - *ref_type_id* - referenced type ID, it might not exist yet; * Returns: * - >0, type ID of newly added BTF type; * - <0, on error.
*/ int btf__add_restrict(struct btf *btf, int ref_type_id)
{ return btf_add_ref_kind(btf, BTF_KIND_RESTRICT, NULL, ref_type_id, 0);
}
/* * Append new BTF_KIND_TYPE_TAG type with: * - *value*, non-empty/non-NULL tag value; * - *ref_type_id* - referenced type ID, it might not exist yet; * Returns: * - >0, type ID of newly added BTF type; * - <0, on error.
*/ int btf__add_type_tag(struct btf *btf, constchar *value, int ref_type_id)
{ if (!value || !value[0]) return libbpf_err(-EINVAL);
--> --------------------
--> maximum size reached
--> --------------------
Messung V0.5
¤ Dauer der Verarbeitung: 0.51 Sekunden
(vorverarbeitet)
¤
Die Informationen auf dieser Webseite wurden
nach bestem Wissen sorgfältig zusammengestellt. Es wird jedoch weder Vollständigkeit, noch Richtigkeit,
noch Qualität der bereit gestellten Informationen zugesichert.
Bemerkung:
Die farbliche Syntaxdarstellung und die Messung sind noch experimentell.