summaryrefslogtreecommitdiffstats
path: root/tools/lib
diff options
context:
space:
mode:
Diffstat (limited to 'tools/lib')
-rw-r--r--tools/lib/bpf/Build4
-rw-r--r--tools/lib/bpf/Makefile12
-rw-r--r--tools/lib/bpf/bpf.c1
-rw-r--r--tools/lib/bpf/bpf.h1
-rw-r--r--tools/lib/bpf/btf.c329
-rw-r--r--tools/lib/bpf/btf.h19
-rw-r--r--tools/lib/bpf/btf_dump.c1336
-rw-r--r--tools/lib/bpf/hashmap.c229
-rw-r--r--tools/lib/bpf/hashmap.h173
-rw-r--r--tools/lib/bpf/libbpf.c175
-rw-r--r--tools/lib/bpf/libbpf.h7
-rw-r--r--tools/lib/bpf/libbpf.map9
-rw-r--r--tools/lib/bpf/libbpf_internal.h2
13 files changed, 2085 insertions, 212 deletions
diff --git a/tools/lib/bpf/Build b/tools/lib/bpf/Build
index ee9d5362f35b..e3962cfbc9a6 100644
--- a/tools/lib/bpf/Build
+++ b/tools/lib/bpf/Build
@@ -1 +1,3 @@
-libbpf-y := libbpf.o bpf.o nlattr.o btf.o libbpf_errno.o str_error.o netlink.o bpf_prog_linfo.o libbpf_probes.o xsk.o
+libbpf-y := libbpf.o bpf.o nlattr.o btf.o libbpf_errno.o str_error.o \
+ netlink.o bpf_prog_linfo.o libbpf_probes.o xsk.o hashmap.o \
+ btf_dump.o
diff --git a/tools/lib/bpf/Makefile b/tools/lib/bpf/Makefile
index f91639bf5650..9312066a1ae3 100644
--- a/tools/lib/bpf/Makefile
+++ b/tools/lib/bpf/Makefile
@@ -3,7 +3,7 @@
BPF_VERSION = 0
BPF_PATCHLEVEL = 0
-BPF_EXTRAVERSION = 3
+BPF_EXTRAVERSION = 4
MAKEFLAGS += --no-print-directory
@@ -204,6 +204,16 @@ check_abi: $(OUTPUT)libbpf.so
"versioned symbols in $^ ($(VERSIONED_SYM_COUNT))." \
"Please make sure all LIBBPF_API symbols are" \
"versioned in $(VERSION_SCRIPT)." >&2; \
+ readelf -s --wide $(OUTPUT)libbpf-in.o | \
+ awk '/GLOBAL/ && /DEFAULT/ && !/UND/ {print $$8}'| \
+ sort -u > $(OUTPUT)libbpf_global_syms.tmp; \
+ readelf -s --wide $(OUTPUT)libbpf.so | \
+ grep -Eo '[^ ]+@LIBBPF_' | cut -d@ -f1 | \
+ sort -u > $(OUTPUT)libbpf_versioned_syms.tmp; \
+ diff -u $(OUTPUT)libbpf_global_syms.tmp \
+ $(OUTPUT)libbpf_versioned_syms.tmp; \
+ rm $(OUTPUT)libbpf_global_syms.tmp \
+ $(OUTPUT)libbpf_versioned_syms.tmp; \
exit 1; \
fi
diff --git a/tools/lib/bpf/bpf.c b/tools/lib/bpf/bpf.c
index c4a48086dc9a..0d4b4fe10a84 100644
--- a/tools/lib/bpf/bpf.c
+++ b/tools/lib/bpf/bpf.c
@@ -256,6 +256,7 @@ int bpf_load_program_xattr(const struct bpf_load_program_attr *load_attr,
if (load_attr->name)
memcpy(attr.prog_name, load_attr->name,
min(strlen(load_attr->name), BPF_OBJ_NAME_LEN - 1));
+ attr.prog_flags = load_attr->prog_flags;
fd = sys_bpf_prog_load(&attr, sizeof(attr));
if (fd >= 0)
diff --git a/tools/lib/bpf/bpf.h b/tools/lib/bpf/bpf.h
index 9593fec75652..ff42ca043dc8 100644
--- a/tools/lib/bpf/bpf.h
+++ b/tools/lib/bpf/bpf.h
@@ -87,6 +87,7 @@ struct bpf_load_program_attr {
const void *line_info;
__u32 line_info_cnt;
__u32 log_level;
+ __u32 prog_flags;
};
/* Flags to direct loading requirements */
diff --git a/tools/lib/bpf/btf.c b/tools/lib/bpf/btf.c
index 03348c4d6bd4..b2478e98c367 100644
--- a/tools/lib/bpf/btf.c
+++ b/tools/lib/bpf/btf.c
@@ -4,14 +4,17 @@
#include <stdio.h>
#include <stdlib.h>
#include <string.h>
+#include <fcntl.h>
#include <unistd.h>
#include <errno.h>
#include <linux/err.h>
#include <linux/btf.h>
+#include <gelf.h>
#include "btf.h"
#include "bpf.h"
#include "libbpf.h"
#include "libbpf_internal.h"
+#include "hashmap.h"
#define max(a, b) ((a) > (b) ? (a) : (b))
#define min(a, b) ((a) < (b) ? (a) : (b))
@@ -417,6 +420,132 @@ done:
return btf;
}
+static bool btf_check_endianness(const GElf_Ehdr *ehdr)
+{
+#if __BYTE_ORDER__ == __ORDER_LITTLE_ENDIAN__
+ return ehdr->e_ident[EI_DATA] == ELFDATA2LSB;
+#elif __BYTE_ORDER__ == __ORDER_BIG_ENDIAN__
+ return ehdr->e_ident[EI_DATA] == ELFDATA2MSB;
+#else
+# error "Unrecognized __BYTE_ORDER__"
+#endif
+}
+
+struct btf *btf__parse_elf(const char *path, struct btf_ext **btf_ext)
+{
+ Elf_Data *btf_data = NULL, *btf_ext_data = NULL;
+ int err = 0, fd = -1, idx = 0;
+ struct btf *btf = NULL;
+ Elf_Scn *scn = NULL;
+ Elf *elf = NULL;
+ GElf_Ehdr ehdr;
+
+ if (elf_version(EV_CURRENT) == EV_NONE) {
+ pr_warning("failed to init libelf for %s\n", path);
+ return ERR_PTR(-LIBBPF_ERRNO__LIBELF);
+ }
+
+ fd = open(path, O_RDONLY);
+ if (fd < 0) {
+ err = -errno;
+ pr_warning("failed to open %s: %s\n", path, strerror(errno));
+ return ERR_PTR(err);
+ }
+
+ err = -LIBBPF_ERRNO__FORMAT;
+
+ elf = elf_begin(fd, ELF_C_READ, NULL);
+ if (!elf) {
+ pr_warning("failed to open %s as ELF file\n", path);
+ goto done;
+ }
+ if (!gelf_getehdr(elf, &ehdr)) {
+ pr_warning("failed to get EHDR from %s\n", path);
+ goto done;
+ }
+ if (!btf_check_endianness(&ehdr)) {
+ pr_warning("non-native ELF endianness is not supported\n");
+ goto done;
+ }
+ if (!elf_rawdata(elf_getscn(elf, ehdr.e_shstrndx), NULL)) {
+ pr_warning("failed to get e_shstrndx from %s\n", path);
+ goto done;
+ }
+
+ while ((scn = elf_nextscn(elf, scn)) != NULL) {
+ GElf_Shdr sh;
+ char *name;
+
+ idx++;
+ if (gelf_getshdr(scn, &sh) != &sh) {
+ pr_warning("failed to get section(%d) header from %s\n",
+ idx, path);
+ goto done;
+ }
+ name = elf_strptr(elf, ehdr.e_shstrndx, sh.sh_name);
+ if (!name) {
+ pr_warning("failed to get section(%d) name from %s\n",
+ idx, path);
+ goto done;
+ }
+ if (strcmp(name, BTF_ELF_SEC) == 0) {
+ btf_data = elf_getdata(scn, 0);
+ if (!btf_data) {
+ pr_warning("failed to get section(%d, %s) data from %s\n",
+ idx, name, path);
+ goto done;
+ }
+ continue;
+ } else if (btf_ext && strcmp(name, BTF_EXT_ELF_SEC) == 0) {
+ btf_ext_data = elf_getdata(scn, 0);
+ if (!btf_ext_data) {
+ pr_warning("failed to get section(%d, %s) data from %s\n",
+ idx, name, path);
+ goto done;
+ }
+ continue;
+ }
+ }
+
+ err = 0;
+
+ if (!btf_data) {
+ err = -ENOENT;
+ goto done;
+ }
+ btf = btf__new(btf_data->d_buf, btf_data->d_size);
+ if (IS_ERR(btf))
+ goto done;
+
+ if (btf_ext && btf_ext_data) {
+ *btf_ext = btf_ext__new(btf_ext_data->d_buf,
+ btf_ext_data->d_size);
+ if (IS_ERR(*btf_ext))
+ goto done;
+ } else if (btf_ext) {
+ *btf_ext = NULL;
+ }
+done:
+ if (elf)
+ elf_end(elf);
+ close(fd);
+
+ if (err)
+ return ERR_PTR(err);
+ /*
+ * btf is always parsed before btf_ext, so no need to clean up
+ * btf_ext, if btf loading failed
+ */
+ if (IS_ERR(btf))
+ return btf;
+ if (btf_ext && IS_ERR(*btf_ext)) {
+ btf__free(btf);
+ err = PTR_ERR(*btf_ext);
+ return ERR_PTR(err);
+ }
+ return btf;
+}
+
static int compare_vsi_off(const void *_a, const void *_b)
{
const struct btf_var_secinfo *a = _a;
@@ -1165,16 +1294,9 @@ done:
return err;
}
-#define BTF_DEDUP_TABLE_DEFAULT_SIZE (1 << 14)
-#define BTF_DEDUP_TABLE_MAX_SIZE_LOG 31
#define BTF_UNPROCESSED_ID ((__u32)-1)
#define BTF_IN_PROGRESS_ID ((__u32)-2)
-struct btf_dedup_node {
- struct btf_dedup_node *next;
- __u32 type_id;
-};
-
struct btf_dedup {
/* .BTF section to be deduped in-place */
struct btf *btf;
@@ -1190,7 +1312,7 @@ struct btf_dedup {
* candidates, which is fine because we rely on subsequent
* btf_xxx_equal() checks to authoritatively verify type equality.
*/
- struct btf_dedup_node **dedup_table;
+ struct hashmap *dedup_table;
/* Canonical types map */
__u32 *map;
/* Hypothetical mapping, used during type graph equivalence checks */
@@ -1215,30 +1337,18 @@ struct btf_str_ptrs {
__u32 cap;
};
-static inline __u32 hash_combine(__u32 h, __u32 value)
+static long hash_combine(long h, long value)
{
-/* 2^31 + 2^29 - 2^25 + 2^22 - 2^19 - 2^16 + 1 */
-#define GOLDEN_RATIO_PRIME 0x9e370001UL
- return h * 37 + value * GOLDEN_RATIO_PRIME;
-#undef GOLDEN_RATIO_PRIME
+ return h * 31 + value;
}
-#define for_each_dedup_cand(d, hash, node) \
- for (node = d->dedup_table[hash & (d->opts.dedup_table_size - 1)]; \
- node; \
- node = node->next)
+#define for_each_dedup_cand(d, node, hash) \
+ hashmap__for_each_key_entry(d->dedup_table, node, (void *)hash)
-static int btf_dedup_table_add(struct btf_dedup *d, __u32 hash, __u32 type_id)
+static int btf_dedup_table_add(struct btf_dedup *d, long hash, __u32 type_id)
{
- struct btf_dedup_node *node = malloc(sizeof(struct btf_dedup_node));
- int bucket = hash & (d->opts.dedup_table_size - 1);
-
- if (!node)
- return -ENOMEM;
- node->type_id = type_id;
- node->next = d->dedup_table[bucket];
- d->dedup_table[bucket] = node;
- return 0;
+ return hashmap__append(d->dedup_table,
+ (void *)hash, (void *)(long)type_id);
}
static int btf_dedup_hypot_map_add(struct btf_dedup *d,
@@ -1267,36 +1377,10 @@ static void btf_dedup_clear_hypot_map(struct btf_dedup *d)
d->hypot_cnt = 0;
}
-static void btf_dedup_table_free(struct btf_dedup *d)
-{
- struct btf_dedup_node *head, *tmp;
- int i;
-
- if (!d->dedup_table)
- return;
-
- for (i = 0; i < d->opts.dedup_table_size; i++) {
- while (d->dedup_table[i]) {
- tmp = d->dedup_table[i];
- d->dedup_table[i] = tmp->next;
- free(tmp);
- }
-
- head = d->dedup_table[i];
- while (head) {
- tmp = head;
- head = head->next;
- free(tmp);
- }
- }
-
- free(d->dedup_table);
- d->dedup_table = NULL;
-}
-
static void btf_dedup_free(struct btf_dedup *d)
{
- btf_dedup_table_free(d);
+ hashmap__free(d->dedup_table);
+ d->dedup_table = NULL;
free(d->map);
d->map = NULL;
@@ -1310,40 +1394,43 @@ static void btf_dedup_free(struct btf_dedup *d)
free(d);
}
-/* Find closest power of two >= to size, capped at 2^max_size_log */
-static __u32 roundup_pow2_max(__u32 size, int max_size_log)
+static size_t btf_dedup_identity_hash_fn(const void *key, void *ctx)
{
- int i;
+ return (size_t)key;
+}
- for (i = 0; i < max_size_log && (1U << i) < size; i++)
- ;
- return 1U << i;
+static size_t btf_dedup_collision_hash_fn(const void *key, void *ctx)
+{
+ return 0;
}
+static bool btf_dedup_equal_fn(const void *k1, const void *k2, void *ctx)
+{
+ return k1 == k2;
+}
static struct btf_dedup *btf_dedup_new(struct btf *btf, struct btf_ext *btf_ext,
const struct btf_dedup_opts *opts)
{
struct btf_dedup *d = calloc(1, sizeof(struct btf_dedup));
+ hashmap_hash_fn hash_fn = btf_dedup_identity_hash_fn;
int i, err = 0;
- __u32 sz;
if (!d)
return ERR_PTR(-ENOMEM);
d->opts.dont_resolve_fwds = opts && opts->dont_resolve_fwds;
- sz = opts && opts->dedup_table_size ? opts->dedup_table_size
- : BTF_DEDUP_TABLE_DEFAULT_SIZE;
- sz = roundup_pow2_max(sz, BTF_DEDUP_TABLE_MAX_SIZE_LOG);
- d->opts.dedup_table_size = sz;
+ /* dedup_table_size is now used only to force collisions in tests */
+ if (opts && opts->dedup_table_size == 1)
+ hash_fn = btf_dedup_collision_hash_fn;
d->btf = btf;
d->btf_ext = btf_ext;
- d->dedup_table = calloc(d->opts.dedup_table_size,
- sizeof(struct btf_dedup_node *));
- if (!d->dedup_table) {
- err = -ENOMEM;
+ d->dedup_table = hashmap__new(hash_fn, btf_dedup_equal_fn, NULL);
+ if (IS_ERR(d->dedup_table)) {
+ err = PTR_ERR(d->dedup_table);
+ d->dedup_table = NULL;
goto done;
}
@@ -1662,9 +1749,9 @@ done:
return err;
}
-static __u32 btf_hash_common(struct btf_type *t)
+static long btf_hash_common(struct btf_type *t)
{
- __u32 h;
+ long h;
h = hash_combine(0, t->name_off);
h = hash_combine(h, t->info);
@@ -1680,10 +1767,10 @@ static bool btf_equal_common(struct btf_type *t1, struct btf_type *t2)
}
/* Calculate type signature hash of INT. */
-static __u32 btf_hash_int(struct btf_type *t)
+static long btf_hash_int(struct btf_type *t)
{
__u32 info = *(__u32 *)(t + 1);
- __u32 h;
+ long h;
h = btf_hash_common(t);
h = hash_combine(h, info);
@@ -1703,9 +1790,9 @@ static bool btf_equal_int(struct btf_type *t1, struct btf_type *t2)
}
/* Calculate type signature hash of ENUM. */
-static __u32 btf_hash_enum(struct btf_type *t)
+static long btf_hash_enum(struct btf_type *t)
{
- __u32 h;
+ long h;
/* don't hash vlen and enum members to support enum fwd resolving */
h = hash_combine(0, t->name_off);
@@ -1757,11 +1844,11 @@ static bool btf_compat_enum(struct btf_type *t1, struct btf_type *t2)
* as referenced type IDs equivalence is established separately during type
* graph equivalence check algorithm.
*/
-static __u32 btf_hash_struct(struct btf_type *t)
+static long btf_hash_struct(struct btf_type *t)
{
struct btf_member *member = (struct btf_member *)(t + 1);
__u32 vlen = BTF_INFO_VLEN(t->info);
- __u32 h = btf_hash_common(t);
+ long h = btf_hash_common(t);
int i;
for (i = 0; i < vlen; i++) {
@@ -1804,10 +1891,10 @@ static bool btf_shallow_equal_struct(struct btf_type *t1, struct btf_type *t2)
* under assumption that they were already resolved to canonical type IDs and
* are not going to change.
*/
-static __u32 btf_hash_array(struct btf_type *t)
+static long btf_hash_array(struct btf_type *t)
{
struct btf_array *info = (struct btf_array *)(t + 1);
- __u32 h = btf_hash_common(t);
+ long h = btf_hash_common(t);
h = hash_combine(h, info->type);
h = hash_combine(h, info->index_type);
@@ -1858,11 +1945,11 @@ static bool btf_compat_array(struct btf_type *t1, struct btf_type *t2)
* under assumption that they were already resolved to canonical type IDs and
* are not going to change.
*/
-static inline __u32 btf_hash_fnproto(struct btf_type *t)
+static long btf_hash_fnproto(struct btf_type *t)
{
struct btf_param *member = (struct btf_param *)(t + 1);
__u16 vlen = BTF_INFO_VLEN(t->info);
- __u32 h = btf_hash_common(t);
+ long h = btf_hash_common(t);
int i;
for (i = 0; i < vlen; i++) {
@@ -1880,7 +1967,7 @@ static inline __u32 btf_hash_fnproto(struct btf_type *t)
* This function is called during reference types deduplication to compare
* FUNC_PROTO to potential canonical representative.
*/
-static inline bool btf_equal_fnproto(struct btf_type *t1, struct btf_type *t2)
+static bool btf_equal_fnproto(struct btf_type *t1, struct btf_type *t2)
{
struct btf_param *m1, *m2;
__u16 vlen;
@@ -1906,7 +1993,7 @@ static inline bool btf_equal_fnproto(struct btf_type *t1, struct btf_type *t2)
* IDs. This check is performed during type graph equivalence check and
* referenced types equivalence is checked separately.
*/
-static inline bool btf_compat_fnproto(struct btf_type *t1, struct btf_type *t2)
+static bool btf_compat_fnproto(struct btf_type *t1, struct btf_type *t2)
{
struct btf_param *m1, *m2;
__u16 vlen;
@@ -1937,11 +2024,12 @@ static inline bool btf_compat_fnproto(struct btf_type *t1, struct btf_type *t2)
static int btf_dedup_prim_type(struct btf_dedup *d, __u32 type_id)
{
struct btf_type *t = d->btf->types[type_id];
+ struct hashmap_entry *hash_entry;
struct btf_type *cand;
- struct btf_dedup_node *cand_node;
/* if we don't find equivalent type, then we are canonical */
__u32 new_id = type_id;
- __u32 h;
+ __u32 cand_id;
+ long h;
switch (BTF_INFO_KIND(t->info)) {
case BTF_KIND_CONST:
@@ -1960,10 +2048,11 @@ static int btf_dedup_prim_type(struct btf_dedup *d, __u32 type_id)
case BTF_KIND_INT:
h = btf_hash_int(t);
- for_each_dedup_cand(d, h, cand_node) {
- cand = d->btf->types[cand_node->type_id];
+ for_each_dedup_cand(d, hash_entry, h) {
+ cand_id = (__u32)(long)hash_entry->value;
+ cand = d->btf->types[cand_id];
if (btf_equal_int(t, cand)) {
- new_id = cand_node->type_id;
+ new_id = cand_id;
break;
}
}
@@ -1971,10 +2060,11 @@ static int btf_dedup_prim_type(struct btf_dedup *d, __u32 type_id)
case BTF_KIND_ENUM:
h = btf_hash_enum(t);
- for_each_dedup_cand(d, h, cand_node) {
- cand = d->btf->types[cand_node->type_id];
+ for_each_dedup_cand(d, hash_entry, h) {
+ cand_id = (__u32)(long)hash_entry->value;
+ cand = d->btf->types[cand_id];
if (btf_equal_enum(t, cand)) {
- new_id = cand_node->type_id;
+ new_id = cand_id;
break;
}
if (d->opts.dont_resolve_fwds)
@@ -1982,21 +2072,22 @@ static int btf_dedup_prim_type(struct btf_dedup *d, __u32 type_id)
if (btf_compat_enum(t, cand)) {
if (btf_is_enum_fwd(t)) {
/* resolve fwd to full enum */
- new_id = cand_node->type_id;
+ new_id = cand_id;
break;
}
/* resolve canonical enum fwd to full enum */
- d->map[cand_node->type_id] = type_id;
+ d->map[cand_id] = type_id;
}
}
break;
case BTF_KIND_FWD:
h = btf_hash_common(t);
- for_each_dedup_cand(d, h, cand_node) {
- cand = d->btf->types[cand_node->type_id];
+ for_each_dedup_cand(d, hash_entry, h) {
+ cand_id = (__u32)(long)hash_entry->value;
+ cand = d->btf->types[cand_id];
if (btf_equal_common(t, cand)) {
- new_id = cand_node->type_id;
+ new_id = cand_id;
break;
}
}
@@ -2397,12 +2488,12 @@ static void btf_dedup_merge_hypot_map(struct btf_dedup *d)
*/
static int btf_dedup_struct_type(struct btf_dedup *d, __u32 type_id)
{
- struct btf_dedup_node *cand_node;
struct btf_type *cand_type, *t;
+ struct hashmap_entry *hash_entry;
/* if we don't find equivalent type, then we are canonical */
__u32 new_id = type_id;
__u16 kind;
- __u32 h;
+ long h;
/* already deduped or is in process of deduping (loop detected) */
if (d->map[type_id] <= BTF_MAX_NR_TYPES)
@@ -2415,7 +2506,8 @@ static int btf_dedup_struct_type(struct btf_dedup *d, __u32 type_id)
return 0;
h = btf_hash_struct(t);
- for_each_dedup_cand(d, h, cand_node) {
+ for_each_dedup_cand(d, hash_entry, h) {
+ __u32 cand_id = (__u32)(long)hash_entry->value;
int eq;
/*
@@ -2428,17 +2520,17 @@ static int btf_dedup_struct_type(struct btf_dedup *d, __u32 type_id)
* creating a loop (FWD -> STRUCT and STRUCT -> FWD), because
* FWD and compatible STRUCT/UNION are considered equivalent.
*/
- cand_type = d->btf->types[cand_node->type_id];
+ cand_type = d->btf->types[cand_id];
if (!btf_shallow_equal_struct(t, cand_type))
continue;
btf_dedup_clear_hypot_map(d);
- eq = btf_dedup_is_equiv(d, type_id, cand_node->type_id);
+ eq = btf_dedup_is_equiv(d, type_id, cand_id);
if (eq < 0)
return eq;
if (!eq)
continue;
- new_id = cand_node->type_id;
+ new_id = cand_id;
btf_dedup_merge_hypot_map(d);
break;
}
@@ -2488,12 +2580,12 @@ static int btf_dedup_struct_types(struct btf_dedup *d)
*/
static int btf_dedup_ref_type(struct btf_dedup *d, __u32 type_id)
{
- struct btf_dedup_node *cand_node;
+ struct hashmap_entry *hash_entry;
+ __u32 new_id = type_id, cand_id;
struct btf_type *t, *cand;
/* if we don't find equivalent type, then we are representative type */
- __u32 new_id = type_id;
int ref_type_id;
- __u32 h;
+ long h;
if (d->map[type_id] == BTF_IN_PROGRESS_ID)
return -ELOOP;
@@ -2516,10 +2608,11 @@ static int btf_dedup_ref_type(struct btf_dedup *d, __u32 type_id)
t->type = ref_type_id;
h = btf_hash_common(t);
- for_each_dedup_cand(d, h, cand_node) {
- cand = d->btf->types[cand_node->type_id];
+ for_each_dedup_cand(d, hash_entry, h) {
+ cand_id = (__u32)(long)hash_entry->value;
+ cand = d->btf->types[cand_id];
if (btf_equal_common(t, cand)) {
- new_id = cand_node->type_id;
+ new_id = cand_id;
break;
}
}
@@ -2539,10 +2632,11 @@ static int btf_dedup_ref_type(struct btf_dedup *d, __u32 type_id)
info->index_type = ref_type_id;
h = btf_hash_array(t);
- for_each_dedup_cand(d, h, cand_node) {
- cand = d->btf->types[cand_node->type_id];
+ for_each_dedup_cand(d, hash_entry, h) {
+ cand_id = (__u32)(long)hash_entry->value;
+ cand = d->btf->types[cand_id];
if (btf_equal_array(t, cand)) {
- new_id = cand_node->type_id;
+ new_id = cand_id;
break;
}
}
@@ -2570,10 +2664,11 @@ static int btf_dedup_ref_type(struct btf_dedup *d, __u32 type_id)
}
h = btf_hash_fnproto(t);
- for_each_dedup_cand(d, h, cand_node) {
- cand = d->btf->types[cand_node->type_id];
+ for_each_dedup_cand(d, hash_entry, h) {
+ cand_id = (__u32)(long)hash_entry->value;
+ cand = d->btf->types[cand_id];
if (btf_equal_fnproto(t, cand)) {
- new_id = cand_node->type_id;
+ new_id = cand_id;
break;
}
}
@@ -2600,7 +2695,9 @@ static int btf_dedup_ref_types(struct btf_dedup *d)
if (err < 0)
return err;
}
- btf_dedup_table_free(d);
+ /* we won't need d->dedup_table anymore */
+ hashmap__free(d->dedup_table);
+ d->dedup_table = NULL;
return 0;
}
diff --git a/tools/lib/bpf/btf.h b/tools/lib/bpf/btf.h
index c7b399e81fce..ba4ffa831aa4 100644
--- a/tools/lib/bpf/btf.h
+++ b/tools/lib/bpf/btf.h
@@ -4,6 +4,7 @@
#ifndef __LIBBPF_BTF_H
#define __LIBBPF_BTF_H
+#include <stdarg.h>
#include <linux/types.h>
#ifdef __cplusplus
@@ -59,6 +60,8 @@ struct btf_ext_header {
LIBBPF_API void btf__free(struct btf *btf);
LIBBPF_API struct btf *btf__new(__u8 *data, __u32 size);
+LIBBPF_API struct btf *btf__parse_elf(const char *path,
+ struct btf_ext **btf_ext);
LIBBPF_API int btf__finalize_data(struct bpf_object *obj, struct btf *btf);
LIBBPF_API int btf__load(struct btf *btf);
LIBBPF_API __s32 btf__find_by_name(const struct btf *btf,
@@ -100,6 +103,22 @@ struct btf_dedup_opts {
LIBBPF_API int btf__dedup(struct btf *btf, struct btf_ext *btf_ext,
const struct btf_dedup_opts *opts);
+struct btf_dump;
+
+struct btf_dump_opts {
+ void *ctx;
+};
+
+typedef void (*btf_dump_printf_fn_t)(void *ctx, const char *fmt, va_list args);
+
+LIBBPF_API struct btf_dump *btf_dump__new(const struct btf *btf,
+ const struct btf_ext *btf_ext,
+ const struct btf_dump_opts *opts,
+ btf_dump_printf_fn_t printf_fn);
+LIBBPF_API void btf_dump__free(struct btf_dump *d);
+
+LIBBPF_API int btf_dump__dump_type(struct btf_dump *d, __u32 id);
+
#ifdef __cplusplus
} /* extern "C" */
#endif
diff --git a/tools/lib/bpf/btf_dump.c b/tools/lib/bpf/btf_dump.c
new file mode 100644
index 000000000000..4b22db77e2cc
--- /dev/null
+++ b/tools/lib/bpf/btf_dump.c
@@ -0,0 +1,1336 @@
+// SPDX-License-Identifier: (LGPL-2.1 OR BSD-2-Clause)
+
+/*
+ * BTF-to-C type converter.
+ *
+ * Copyright (c) 2019 Facebook
+ */
+
+#include <stdbool.h>
+#include <stddef.h>
+#include <stdlib.h>
+#include <string.h>
+#include <errno.h>
+#include <linux/err.h>
+#include <linux/btf.h>
+#include "btf.h"
+#include "hashmap.h"
+#include "libbpf.h"
+#include "libbpf_internal.h"
+
+#define min(x, y) ((x) < (y) ? (x) : (y))
+#define max(x, y) ((x) < (y) ? (y) : (x))
+
+static const char PREFIXES[] = "\t\t\t\t\t\t\t\t\t\t\t\t\t";
+static const size_t PREFIX_CNT = sizeof(PREFIXES) - 1;
+
+static const char *pfx(int lvl)
+{
+ return lvl >= PREFIX_CNT ? PREFIXES : &PREFIXES[PREFIX_CNT - lvl];
+}
+
+enum btf_dump_type_order_state {
+ NOT_ORDERED,
+ ORDERING,
+ ORDERED,
+};
+
+enum btf_dump_type_emit_state {
+ NOT_EMITTED,
+ EMITTING,
+ EMITTED,
+};
+
+/* per-type auxiliary state */
+struct btf_dump_type_aux_state {
+ /* topological sorting state */
+ enum btf_dump_type_order_state order_state: 2;
+ /* emitting state used to determine the need for forward declaration */
+ enum btf_dump_type_emit_state emit_state: 2;
+ /* whether forward declaration was already emitted */
+ __u8 fwd_emitted: 1;
+ /* whether unique non-duplicate name was already assigned */
+ __u8 name_resolved: 1;
+};
+
+struct btf_dump {
+ const struct btf *btf;
+ const struct btf_ext *btf_ext;
+ btf_dump_printf_fn_t printf_fn;
+ struct btf_dump_opts opts;
+
+ /* per-type auxiliary state */
+ struct btf_dump_type_aux_state *type_states;
+ /* per-type optional cached unique name, must be freed, if present */
+ const char **cached_names;
+
+ /* topo-sorted list of dependent type definitions */
+ __u32 *emit_queue;
+ int emit_queue_cap;
+ int emit_queue_cnt;
+
+ /*
+ * stack of type declarations (e.g., chain of modifiers, arrays,
+ * funcs, etc)
+ */
+ __u32 *decl_stack;
+ int decl_stack_cap;
+ int decl_stack_cnt;
+
+ /* maps struct/union/enum name to a number of name occurrences */
+ struct hashmap *type_names;
+ /*
+ * maps typedef identifiers and enum value names to a number of such
+ * name occurrences
+ */
+ struct hashmap *ident_names;
+};
+
+static size_t str_hash_fn(const void *key, void *ctx)
+{
+ const char *s = key;
+ size_t h = 0;
+
+ while (*s) {
+ h = h * 31 + *s;
+ s++;
+ }
+ return h;
+}
+
+static bool str_equal_fn(const void *a, const void *b, void *ctx)
+{
+ return strcmp(a, b) == 0;
+}
+
+static __u16 btf_kind_of(const struct btf_type *t)
+{
+ return BTF_INFO_KIND(t->info);
+}
+
+static __u16 btf_vlen_of(const struct btf_type *t)
+{
+ return BTF_INFO_VLEN(t->info);
+}
+
+static bool btf_kflag_of(const struct btf_type *t)
+{
+ return BTF_INFO_KFLAG(t->info);
+}
+
+static const char *btf_name_of(const struct btf_dump *d, __u32 name_off)
+{
+ return btf__name_by_offset(d->btf, name_off);
+}
+
+static void btf_dump_printf(const struct btf_dump *d, const char *fmt, ...)
+{
+ va_list args;
+
+ va_start(args, fmt);
+ d->printf_fn(d->opts.ctx, fmt, args);
+ va_end(args);
+}
+
+struct btf_dump *btf_dump__new(const struct btf *btf,
+ const struct btf_ext *btf_ext,
+ const struct btf_dump_opts *opts,
+ btf_dump_printf_fn_t printf_fn)
+{
+ struct btf_dump *d;
+ int err;
+
+ d = calloc(1, sizeof(struct btf_dump));
+ if (!d)
+ return ERR_PTR(-ENOMEM);
+
+ d->btf = btf;
+ d->btf_ext = btf_ext;
+ d->printf_fn = printf_fn;
+ d->opts.ctx = opts ? opts->ctx : NULL;
+
+ d->type_names = hashmap__new(str_hash_fn, str_equal_fn, NULL);
+ if (IS_ERR(d->type_names)) {
+ err = PTR_ERR(d->type_names);
+ d->type_names = NULL;
+ btf_dump__free(d);
+ return ERR_PTR(err);
+ }
+ d->ident_names = hashmap__new(str_hash_fn, str_equal_fn, NULL);
+ if (IS_ERR(d->ident_names)) {
+ err = PTR_ERR(d->ident_names);
+ d->ident_names = NULL;
+ btf_dump__free(d);
+ return ERR_PTR(err);
+ }
+
+ return d;
+}
+
+void btf_dump__free(struct btf_dump *d)
+{
+ int i, cnt;
+
+ if (!d)
+ return;
+
+ free(d->type_states);
+ if (d->cached_names) {
+ /* any set cached name is owned by us and should be freed */
+ for (i = 0, cnt = btf__get_nr_types(d->btf); i <= cnt; i++) {
+ if (d->cached_names[i])
+ free((void *)d->cached_names[i]);
+ }
+ }
+ free(d->cached_names);
+ free(d->emit_queue);
+ free(d->decl_stack);
+ hashmap__free(d->type_names);
+ hashmap__free(d->ident_names);
+
+ free(d);
+}
+
+static int btf_dump_order_type(struct btf_dump *d, __u32 id, bool through_ptr);
+static void btf_dump_emit_type(struct btf_dump *d, __u32 id, __u32 cont_id);
+
+/*
+ * Dump BTF type in a compilable C syntax, including all the necessary
+ * dependent types, necessary for compilation. If some of the dependent types
+ * were already emitted as part of previous btf_dump__dump_type() invocation
+ * for another type, they won't be emitted again. This API allows callers to
+ * filter out BTF types according to user-defined criterias and emitted only
+ * minimal subset of types, necessary to compile everything. Full struct/union
+ * definitions will still be emitted, even if the only usage is through
+ * pointer and could be satisfied with just a forward declaration.
+ *
+ * Dumping is done in two high-level passes:
+ * 1. Topologically sort type definitions to satisfy C rules of compilation.
+ * 2. Emit type definitions in C syntax.
+ *
+ * Returns 0 on success; <0, otherwise.
+ */
+int btf_dump__dump_type(struct btf_dump *d, __u32 id)
+{
+ int err, i;
+
+ if (id > btf__get_nr_types(d->btf))
+ return -EINVAL;
+
+ /* type states are lazily allocated, as they might not be needed */
+ if (!d->type_states) {
+ d->type_states = calloc(1 + btf__get_nr_types(d->btf),
+ sizeof(d->type_states[0]));
+ if (!d->type_states)
+ return -ENOMEM;
+ d->cached_names = calloc(1 + btf__get_nr_types(d->btf),
+ sizeof(d->cached_names[0]));
+ if (!d->cached_names)
+ return -ENOMEM;
+
+ /* VOID is special */
+ d->type_states[0].order_state = ORDERED;
+ d->type_states[0].emit_state = EMITTED;
+ }
+
+ d->emit_queue_cnt = 0;
+ err = btf_dump_order_type(d, id, false);
+ if (err < 0)
+ return err;
+
+ for (i = 0; i < d->emit_queue_cnt; i++)
+ btf_dump_emit_type(d, d->emit_queue[i], 0 /*top-level*/);
+
+ return 0;
+}
+
+static int btf_dump_add_emit_queue_id(struct btf_dump *d, __u32 id)
+{
+ __u32 *new_queue;
+ size_t new_cap;
+
+ if (d->emit_queue_cnt >= d->emit_queue_cap) {
+ new_cap = max(16, d->emit_queue_cap * 3 / 2);
+ new_queue = realloc(d->emit_queue,
+ new_cap * sizeof(new_queue[0]));
+ if (!new_queue)
+ return -ENOMEM;
+ d->emit_queue = new_queue;
+ d->emit_queue_cap = new_cap;
+ }
+
+ d->emit_queue[d->emit_queue_cnt++] = id;
+ return 0;
+}
+
+/*
+ * Determine order of emitting dependent types and specified type to satisfy
+ * C compilation rules. This is done through topological sorting with an
+ * additional complication which comes from C rules. The main idea for C is
+ * that if some type is "embedded" into a struct/union, it's size needs to be
+ * known at the time of definition of containing type. E.g., for:
+ *
+ * struct A {};
+ * struct B { struct A x; }
+ *
+ * struct A *HAS* to be defined before struct B, because it's "embedded",
+ * i.e., it is part of struct B layout. But in the following case:
+ *
+ * struct A;
+ * struct B { struct A *x; }
+ * struct A {};
+ *
+ * it's enough to just have a forward declaration of struct A at the time of
+ * struct B definition, as struct B has a pointer to struct A, so the size of
+ * field x is known without knowing struct A size: it's sizeof(void *).
+ *
+ * Unfortunately, there are some trickier cases we need to handle, e.g.:
+ *
+ * struct A {}; // if this was forward-declaration: compilation error
+ * struct B {
+ * struct { // anonymous struct
+ * struct A y;
+ * } *x;
+ * };
+ *
+ * In this case, struct B's field x is a pointer, so it's size is known
+ * regardless of the size of (anonymous) struct it points to. But because this
+ * struct is anonymous and thus defined inline inside struct B, *and* it
+ * embeds struct A, compiler requires full definition of struct A to be known
+ * before struct B can be defined. This creates a transitive dependency
+ * between struct A and struct B. If struct A was forward-declared before
+ * struct B definition and fully defined after struct B definition, that would
+ * trigger compilation error.
+ *
+ * All this means that while we are doing topological sorting on BTF type
+ * graph, we need to determine relationships between different types (graph
+ * nodes):
+ * - weak link (relationship) between X and Y, if Y *CAN* be
+ * forward-declared at the point of X definition;
+ * - strong link, if Y *HAS* to be fully-defined before X can be defined.
+ *
+ * The rule is as follows. Given a chain of BTF types from X to Y, if there is
+ * BTF_KIND_PTR type in the chain and at least one non-anonymous type
+ * Z (excluding X, including Y), then link is weak. Otherwise, it's strong.
+ * Weak/strong relationship is determined recursively during DFS traversal and
+ * is returned as a result from btf_dump_order_type().
+ *
+ * btf_dump_order_type() is trying to avoid unnecessary forward declarations,
+ * but it is not guaranteeing that no extraneous forward declarations will be
+ * emitted.
+ *
+ * To avoid extra work, algorithm marks some of BTF types as ORDERED, when
+ * it's done with them, but not for all (e.g., VOLATILE, CONST, RESTRICT,
+ * ARRAY, FUNC_PROTO), as weak/strong semantics for those depends on the
+ * entire graph path, so depending where from one came to that BTF type, it
+ * might cause weak or strong ordering. For types like STRUCT/UNION/INT/ENUM,
+ * once they are processed, there is no need to do it again, so they are
+ * marked as ORDERED. We can mark PTR as ORDERED as well, as it semi-forces
+ * weak link, unless subsequent referenced STRUCT/UNION/ENUM is anonymous. But
+ * in any case, once those are processed, no need to do it again, as the
+ * result won't change.
+ *
+ * Returns:
+ * - 1, if type is part of strong link (so there is strong topological
+ * ordering requirements);
+ * - 0, if type is part of weak link (so can be satisfied through forward
+ * declaration);
+ * - <0, on error (e.g., unsatisfiable type loop detected).
+ */
+static int btf_dump_order_type(struct btf_dump *d, __u32 id, bool through_ptr)
+{
+ /*
+ * Order state is used to detect strong link cycles, but only for BTF
+ * kinds that are or could be an independent definition (i.e.,
+ * stand-alone fwd decl, enum, typedef, struct, union). Ptrs, arrays,
+ * func_protos, modifiers are just means to get to these definitions.
+ * Int/void don't need definitions, they are assumed to be always
+ * properly defined. We also ignore datasec, var, and funcs for now.
+ * So for all non-defining kinds, we never even set ordering state,
+ * for defining kinds we set ORDERING and subsequently ORDERED if it
+ * forms a strong link.
+ */
+ struct btf_dump_type_aux_state *tstate = &d->type_states[id];
+ const struct btf_type *t;
+ __u16 kind, vlen;
+ int err, i;
+
+ /* return true, letting typedefs know that it's ok to be emitted */
+ if (tstate->order_state == ORDERED)
+ return 1;
+
+ t = btf__type_by_id(d->btf, id);
+ kind = btf_kind_of(t);
+
+ if (tstate->order_state == ORDERING) {
+ /* type loop, but resolvable through fwd declaration */
+ if ((kind == BTF_KIND_STRUCT || kind == BTF_KIND_UNION) &&
+ through_ptr && t->name_off != 0)
+ return 0;
+ pr_warning("unsatisfiable type cycle, id:[%u]\n", id);
+ return -ELOOP;
+ }
+
+ switch (kind) {
+ case BTF_KIND_INT:
+ tstate->order_state = ORDERED;
+ return 0;
+
+ case BTF_KIND_PTR:
+ err = btf_dump_order_type(d, t->type, true);
+ tstate->order_state = ORDERED;
+ return err;
+
+ case BTF_KIND_ARRAY: {
+ const struct btf_array *a = (void *)(t + 1);
+
+ return btf_dump_order_type(d, a->type, through_ptr);
+ }
+ case BTF_KIND_STRUCT:
+ case BTF_KIND_UNION: {
+ const struct btf_member *m = (void *)(t + 1);
+ /*
+ * struct/union is part of strong link, only if it's embedded
+ * (so no ptr in a path) or it's anonymous (so has to be
+ * defined inline, even if declared through ptr)
+ */
+ if (through_ptr && t->name_off != 0)
+ return 0;
+
+ tstate->order_state = ORDERING;
+
+ vlen = btf_vlen_of(t);
+ for (i = 0; i < vlen; i++, m++) {
+ err = btf_dump_order_type(d, m->type, false);
+ if (err < 0)
+ return err;
+ }
+
+ if (t->name_off != 0) {
+ err = btf_dump_add_emit_queue_id(d, id);
+ if (err < 0)
+ return err;
+ }
+
+ tstate->order_state = ORDERED;
+ return 1;
+ }
+ case BTF_KIND_ENUM:
+ case BTF_KIND_FWD:
+ if (t->name_off != 0) {
+ err = btf_dump_add_emit_queue_id(d, id);
+ if (err)
+ return err;
+ }
+ tstate->order_state = ORDERED;
+ return 1;
+
+ case BTF_KIND_TYPEDEF: {
+ int is_strong;
+
+ is_strong = btf_dump_order_type(d, t->type, through_ptr);
+ if (is_strong < 0)
+ return is_strong;
+
+ /* typedef is similar to struct/union w.r.t. fwd-decls */
+ if (through_ptr && !is_strong)
+ return 0;
+
+ /* typedef is always a named definition */
+ err = btf_dump_add_emit_queue_id(d, id);
+ if (err)
+ return err;
+
+ d->type_states[id].order_state = ORDERED;
+ return 1;
+ }
+ case BTF_KIND_VOLATILE:
+ case BTF_KIND_CONST:
+ case BTF_KIND_RESTRICT:
+ return btf_dump_order_type(d, t->type, through_ptr);
+
+ case BTF_KIND_FUNC_PROTO: {
+ const struct btf_param *p = (void *)(t + 1);
+ bool is_strong;
+
+ err = btf_dump_order_type(d, t->type, through_ptr);
+ if (err < 0)
+ return err;
+ is_strong = err > 0;
+
+ vlen = btf_vlen_of(t);
+ for (i = 0; i < vlen; i++, p++) {
+ err = btf_dump_order_type(d, p->type, through_ptr);
+ if (err < 0)
+ return err;
+ if (err > 0)
+ is_strong = true;
+ }
+ return is_strong;
+ }
+ case BTF_KIND_FUNC:
+ case BTF_KIND_VAR:
+ case BTF_KIND_DATASEC:
+ d->type_states[id].order_state = ORDERED;
+ return 0;
+
+ default:
+ return -EINVAL;
+ }
+}
+
+static void btf_dump_emit_struct_fwd(struct btf_dump *d, __u32 id,
+ const struct btf_type *t);
+static void btf_dump_emit_struct_def(struct btf_dump *d, __u32 id,
+ const struct btf_type *t, int lvl);
+
+static void btf_dump_emit_enum_fwd(struct btf_dump *d, __u32 id,
+ const struct btf_type *t);
+static void btf_dump_emit_enum_def(struct btf_dump *d, __u32 id,
+ const struct btf_type *t, int lvl);
+
+static void btf_dump_emit_fwd_def(struct btf_dump *d, __u32 id,
+ const struct btf_type *t);
+
+static void btf_dump_emit_typedef_def(struct btf_dump *d, __u32 id,
+ const struct btf_type *t, int lvl);
+
+/* a local view into a shared stack */
+struct id_stack {
+ const __u32 *ids;
+ int cnt;
+};
+
+static void btf_dump_emit_type_decl(struct btf_dump *d, __u32 id,
+ const char *fname, int lvl);
+static void btf_dump_emit_type_chain(struct btf_dump *d,
+ struct id_stack *decl_stack,
+ const char *fname, int lvl);
+
+static const char *btf_dump_type_name(struct btf_dump *d, __u32 id);
+static const char *btf_dump_ident_name(struct btf_dump *d, __u32 id);
+static size_t btf_dump_name_dups(struct btf_dump *d, struct hashmap *name_map,
+ const char *orig_name);
+
+static bool btf_dump_is_blacklisted(struct btf_dump *d, __u32 id)
+{
+ const struct btf_type *t = btf__type_by_id(d->btf, id);
+
+ /* __builtin_va_list is a compiler built-in, which causes compilation
+ * errors, when compiling w/ different compiler, then used to compile
+ * original code (e.g., GCC to compile kernel, Clang to use generated
+ * C header from BTF). As it is built-in, it should be already defined
+ * properly internally in compiler.
+ */
+ if (t->name_off == 0)
+ return false;
+ return strcmp(btf_name_of(d, t->name_off), "__builtin_va_list") == 0;
+}
+
+/*
+ * Emit C-syntax definitions of types from chains of BTF types.
+ *
+ * High-level handling of determining necessary forward declarations are handled
+ * by btf_dump_emit_type() itself, but all nitty-gritty details of emitting type
+ * declarations/definitions in C syntax are handled by a combo of
+ * btf_dump_emit_type_decl()/btf_dump_emit_type_chain() w/ delegation to
+ * corresponding btf_dump_emit_*_{def,fwd}() functions.
+ *
+ * We also keep track of "containing struct/union type ID" to determine when
+ * we reference it from inside and thus can avoid emitting unnecessary forward
+ * declaration.
+ *
+ * This algorithm is designed in such a way, that even if some error occurs
+ * (either technical, e.g., out of memory, or logical, i.e., malformed BTF
+ * that doesn't comply to C rules completely), algorithm will try to proceed
+ * and produce as much meaningful output as possible.
+ */
+static void btf_dump_emit_type(struct btf_dump *d, __u32 id, __u32 cont_id)
+{
+ struct btf_dump_type_aux_state *tstate = &d->type_states[id];
+ bool top_level_def = cont_id == 0;
+ const struct btf_type *t;
+ __u16 kind;
+
+ if (tstate->emit_state == EMITTED)
+ return;
+
+ t = btf__type_by_id(d->btf, id);
+ kind = btf_kind_of(t);
+
+ if (top_level_def && t->name_off == 0) {
+ pr_warning("unexpected nameless definition, id:[%u]\n", id);
+ return;
+ }
+
+ if (tstate->emit_state == EMITTING) {
+ if (tstate->fwd_emitted)
+ return;
+
+ switch (kind) {
+ case BTF_KIND_STRUCT:
+ case BTF_KIND_UNION:
+ /*
+ * if we are referencing a struct/union that we are
+ * part of - then no need for fwd declaration
+ */
+ if (id == cont_id)
+ return;
+ if (t->name_off == 0) {
+ pr_warning("anonymous struct/union loop, id:[%u]\n",
+ id);
+ return;
+ }
+ btf_dump_emit_struct_fwd(d, id, t);
+ btf_dump_printf(d, ";\n\n");
+ tstate->fwd_emitted = 1;
+ break;
+ case BTF_KIND_TYPEDEF:
+ /*
+ * for typedef fwd_emitted means typedef definition
+ * was emitted, but it can be used only for "weak"
+ * references through pointer only, not for embedding
+ */
+ if (!btf_dump_is_blacklisted(d, id)) {
+ btf_dump_emit_typedef_def(d, id, t, 0);
+ btf_dump_printf(d, ";\n\n");
+ };
+ tstate->fwd_emitted = 1;
+ break;
+ default:
+ break;
+ }
+
+ return;
+ }
+
+ switch (kind) {
+ case BTF_KIND_INT:
+ tstate->emit_state = EMITTED;
+ break;
+ case BTF_KIND_ENUM:
+ if (top_level_def) {
+ btf_dump_emit_enum_def(d, id, t, 0);
+ btf_dump_printf(d, ";\n\n");
+ }
+ tstate->emit_state = EMITTED;
+ break;
+ case BTF_KIND_PTR:
+ case BTF_KIND_VOLATILE:
+ case BTF_KIND_CONST:
+ case BTF_KIND_RESTRICT:
+ btf_dump_emit_type(d, t->type, cont_id);
+ break;
+ case BTF_KIND_ARRAY: {
+ const struct btf_array *a = (void *)(t + 1);
+
+ btf_dump_emit_type(d, a->type, cont_id);
+ break;
+ }
+ case BTF_KIND_FWD:
+ btf_dump_emit_fwd_def(d, id, t);
+ btf_dump_printf(d, ";\n\n");
+ tstate->emit_state = EMITTED;
+ break;
+ case BTF_KIND_TYPEDEF:
+ tstate->emit_state = EMITTING;
+ btf_dump_emit_type(d, t->type, id);
+ /*
+ * typedef can server as both definition and forward
+ * declaration; at this stage someone depends on
+ * typedef as a forward declaration (refers to it
+ * through pointer), so unless we already did it,
+ * emit typedef as a forward declaration
+ */
+ if (!tstate->fwd_emitted && !btf_dump_is_blacklisted(d, id)) {
+ btf_dump_emit_typedef_def(d, id, t, 0);
+ btf_dump_printf(d, ";\n\n");
+ }
+ tstate->emit_state = EMITTED;
+ break;
+ case BTF_KIND_STRUCT:
+ case BTF_KIND_UNION:
+ tstate->emit_state = EMITTING;
+ /* if it's a top-level struct/union definition or struct/union
+ * is anonymous, then in C we'll be emitting all fields and
+ * their types (as opposed to just `struct X`), so we need to
+ * make sure that all types, referenced from struct/union
+ * members have necessary forward-declarations, where
+ * applicable
+ */
+ if (top_level_def || t->name_off == 0) {
+ const struct btf_member *m = (void *)(t + 1);
+ __u16 vlen = btf_vlen_of(t);
+ int i, new_cont_id;
+
+ new_cont_id = t->name_off == 0 ? cont_id : id;
+ for (i = 0; i < vlen; i++, m++)
+ btf_dump_emit_type(d, m->type, new_cont_id);
+ } else if (!tstate->fwd_emitted && id != cont_id) {
+ btf_dump_emit_struct_fwd(d, id, t);
+ btf_dump_printf(d, ";\n\n");
+ tstate->fwd_emitted = 1;
+ }
+
+ if (top_level_def) {
+ btf_dump_emit_struct_def(d, id, t, 0);
+ btf_dump_printf(d, ";\n\n");
+ tstate->emit_state = EMITTED;
+ } else {
+ tstate->emit_state = NOT_EMITTED;
+ }
+ break;
+ case BTF_KIND_FUNC_PROTO: {
+ const struct btf_param *p = (void *)(t + 1);
+ __u16 vlen = btf_vlen_of(t);
+ int i;
+
+ btf_dump_emit_type(d, t->type, cont_id);
+ for (i = 0; i < vlen; i++, p++)
+ btf_dump_emit_type(d, p->type, cont_id);
+
+ break;
+ }
+ default:
+ break;
+ }
+}
+
+static int btf_align_of(const struct btf *btf, __u32 id)
+{
+ const struct btf_type *t = btf__type_by_id(btf, id);
+ __u16 kind = btf_kind_of(t);
+
+ switch (kind) {
+ case BTF_KIND_INT:
+ case BTF_KIND_ENUM:
+ return min(sizeof(void *), t->size);
+ case BTF_KIND_PTR:
+ return sizeof(void *);
+ case BTF_KIND_TYPEDEF:
+ case BTF_KIND_VOLATILE:
+ case BTF_KIND_CONST:
+ case BTF_KIND_RESTRICT:
+ return btf_align_of(btf, t->type);
+ case BTF_KIND_ARRAY: {
+ const struct btf_array *a = (void *)(t + 1);
+
+ return btf_align_of(btf, a->type);
+ }
+ case BTF_KIND_STRUCT:
+ case BTF_KIND_UNION: {
+ const struct btf_member *m = (void *)(t + 1);
+ __u16 vlen = btf_vlen_of(t);
+ int i, align = 1;
+
+ for (i = 0; i < vlen; i++, m++)
+ align = max(align, btf_align_of(btf, m->type));
+
+ return align;
+ }
+ default:
+ pr_warning("unsupported BTF_KIND:%u\n", btf_kind_of(t));
+ return 1;
+ }
+}
+
+static bool btf_is_struct_packed(const struct btf *btf, __u32 id,
+ const struct btf_type *t)
+{
+ const struct btf_member *m;
+ int align, i, bit_sz;
+ __u16 vlen;
+ bool kflag;
+
+ align = btf_align_of(btf, id);
+ /* size of a non-packed struct has to be a multiple of its alignment*/
+ if (t->size % align)
+ return true;
+
+ m = (void *)(t + 1);
+ kflag = btf_kflag_of(t);
+ vlen = btf_vlen_of(t);
+ /* all non-bitfield fields have to be naturally aligned */
+ for (i = 0; i < vlen; i++, m++) {
+ align = btf_align_of(btf, m->type);
+ bit_sz = kflag ? BTF_MEMBER_BITFIELD_SIZE(m->offset) : 0;
+ if (bit_sz == 0 && m->offset % (8 * align) != 0)
+ return true;
+ }
+
+ /*
+ * if original struct was marked as packed, but its layout is
+ * naturally aligned, we'll detect that it's not packed
+ */
+ return false;
+}
+
+static int chip_away_bits(int total, int at_most)
+{
+ return total % at_most ? : at_most;
+}
+
+static void btf_dump_emit_bit_padding(const struct btf_dump *d,
+ int cur_off, int m_off, int m_bit_sz,
+ int align, int lvl)
+{
+ int off_diff = m_off - cur_off;
+ int ptr_bits = sizeof(void *) * 8;
+
+ if (off_diff <= 0)
+ /* no gap */
+ return;
+ if (m_bit_sz == 0 && off_diff < align * 8)
+ /* natural padding will take care of a gap */
+ return;
+
+ while (off_diff > 0) {
+ const char *pad_type;
+ int pad_bits;
+
+ if (ptr_bits > 32 && off_diff > 32) {
+ pad_type = "long";
+ pad_bits = chip_away_bits(off_diff, ptr_bits);
+ } else if (off_diff > 16) {
+ pad_type = "int";
+ pad_bits = chip_away_bits(off_diff, 32);
+ } else if (off_diff > 8) {
+ pad_type = "short";
+ pad_bits = chip_away_bits(off_diff, 16);
+ } else {
+ pad_type = "char";
+ pad_bits = chip_away_bits(off_diff, 8);
+ }
+ btf_dump_printf(d, "\n%s%s: %d;", pfx(lvl), pad_type, pad_bits);
+ off_diff -= pad_bits;
+ }
+}
+
+static void btf_dump_emit_struct_fwd(struct btf_dump *d, __u32 id,
+ const struct btf_type *t)
+{
+ btf_dump_printf(d, "%s %s",
+ btf_kind_of(t) == BTF_KIND_STRUCT ? "struct" : "union",
+ btf_dump_type_name(d, id));
+}
+
+static void btf_dump_emit_struct_def(struct btf_dump *d,
+ __u32 id,
+ const struct btf_type *t,
+ int lvl)
+{
+ const struct btf_member *m = (void *)(t + 1);
+ bool kflag = btf_kflag_of(t), is_struct;
+ int align, i, packed, off = 0;
+ __u16 vlen = btf_vlen_of(t);
+
+ is_struct = btf_kind_of(t) == BTF_KIND_STRUCT;
+ packed = is_struct ? btf_is_struct_packed(d->btf, id, t) : 0;
+ align = packed ? 1 : btf_align_of(d->btf, id);
+
+ btf_dump_printf(d, "%s%s%s {",
+ is_struct ? "struct" : "union",
+ t->name_off ? " " : "",
+ btf_dump_type_name(d, id));
+
+ for (i = 0; i < vlen; i++, m++) {
+ const char *fname;
+ int m_off, m_sz;
+
+ fname = btf_name_of(d, m->name_off);
+ m_sz = kflag ? BTF_MEMBER_BITFIELD_SIZE(m->offset) : 0;
+ m_off = kflag ? BTF_MEMBER_BIT_OFFSET(m->offset) : m->offset;
+ align = packed ? 1 : btf_align_of(d->btf, m->type);
+
+ btf_dump_emit_bit_padding(d, off, m_off, m_sz, align, lvl + 1);
+ btf_dump_printf(d, "\n%s", pfx(lvl + 1));
+ btf_dump_emit_type_decl(d, m->type, fname, lvl + 1);
+
+ if (m_sz) {
+ btf_dump_printf(d, ": %d", m_sz);
+ off = m_off + m_sz;
+ } else {
+ m_sz = max(0, btf__resolve_size(d->btf, m->type));
+ off = m_off + m_sz * 8;
+ }
+ btf_dump_printf(d, ";");
+ }
+
+ if (vlen)
+ btf_dump_printf(d, "\n");
+ btf_dump_printf(d, "%s}", pfx(lvl));
+ if (packed)
+ btf_dump_printf(d, " __attribute__((packed))");
+}
+
+static void btf_dump_emit_enum_fwd(struct btf_dump *d, __u32 id,
+ const struct btf_type *t)
+{
+ btf_dump_printf(d, "enum %s", btf_dump_type_name(d, id));
+}
+
+static void btf_dump_emit_enum_def(struct btf_dump *d, __u32 id,
+ const struct btf_type *t,
+ int lvl)
+{
+ const struct btf_enum *v = (void *)(t+1);
+ __u16 vlen = btf_vlen_of(t);
+ const char *name;
+ size_t dup_cnt;
+ int i;
+
+ btf_dump_printf(d, "enum%s%s",
+ t->name_off ? " " : "",
+ btf_dump_type_name(d, id));
+
+ if (vlen) {
+ btf_dump_printf(d, " {");
+ for (i = 0; i < vlen; i++, v++) {
+ name = btf_name_of(d, v->name_off);
+ /* enumerators share namespace with typedef idents */
+ dup_cnt = btf_dump_name_dups(d, d->ident_names, name);
+ if (dup_cnt > 1) {
+ btf_dump_printf(d, "\n%s%s___%zu = %d,",
+ pfx(lvl + 1), name, dup_cnt,
+ (__s32)v->val);
+ } else {
+ btf_dump_printf(d, "\n%s%s = %d,",
+ pfx(lvl + 1), name,
+ (__s32)v->val);
+ }
+ }
+ btf_dump_printf(d, "\n%s}", pfx(lvl));
+ }
+}
+
+static void btf_dump_emit_fwd_def(struct btf_dump *d, __u32 id,
+ const struct btf_type *t)
+{
+ const char *name = btf_dump_type_name(d, id);
+
+ if (btf_kflag_of(t))
+ btf_dump_printf(d, "union %s", name);
+ else
+ btf_dump_printf(d, "struct %s", name);
+}
+
+static void btf_dump_emit_typedef_def(struct btf_dump *d, __u32 id,
+ const struct btf_type *t, int lvl)
+{
+ const char *name = btf_dump_ident_name(d, id);
+
+ btf_dump_printf(d, "typedef ");
+ btf_dump_emit_type_decl(d, t->type, name, lvl);
+}
+
+static int btf_dump_push_decl_stack_id(struct btf_dump *d, __u32 id)
+{
+ __u32 *new_stack;
+ size_t new_cap;
+
+ if (d->decl_stack_cnt >= d->decl_stack_cap) {
+ new_cap = max(16, d->decl_stack_cap * 3 / 2);
+ new_stack = realloc(d->decl_stack,
+ new_cap * sizeof(new_stack[0]));
+ if (!new_stack)
+ return -ENOMEM;
+ d->decl_stack = new_stack;
+ d->decl_stack_cap = new_cap;
+ }
+
+ d->decl_stack[d->decl_stack_cnt++] = id;
+
+ return 0;
+}
+
+/*
+ * Emit type declaration (e.g., field type declaration in a struct or argument
+ * declaration in function prototype) in correct C syntax.
+ *
+ * For most types it's trivial, but there are few quirky type declaration
+ * cases worth mentioning:
+ * - function prototypes (especially nesting of function prototypes);
+ * - arrays;
+ * - const/volatile/restrict for pointers vs other types.
+ *
+ * For a good discussion of *PARSING* C syntax (as a human), see
+ * Peter van der Linden's "Expert C Programming: Deep C Secrets",
+ * Ch.3 "Unscrambling Declarations in C".
+ *
+ * It won't help with BTF to C conversion much, though, as it's an opposite
+ * problem. So we came up with this algorithm in reverse to van der Linden's
+ * parsing algorithm. It goes from structured BTF representation of type
+ * declaration to a valid compilable C syntax.
+ *
+ * For instance, consider this C typedef:
+ * typedef const int * const * arr[10] arr_t;
+ * It will be represented in BTF with this chain of BTF types:
+ * [typedef] -> [array] -> [ptr] -> [const] -> [ptr] -> [const] -> [int]
+ *
+ * Notice how [const] modifier always goes before type it modifies in BTF type
+ * graph, but in C syntax, const/volatile/restrict modifiers are written to
+ * the right of pointers, but to the left of other types. There are also other
+ * quirks, like function pointers, arrays of them, functions returning other
+ * functions, etc.
+ *
+ * We handle that by pushing all the types to a stack, until we hit "terminal"
+ * type (int/enum/struct/union/fwd). Then depending on the kind of a type on
+ * top of a stack, modifiers are handled differently. Array/function pointers
+ * have also wildly different syntax and how nesting of them are done. See
+ * code for authoritative definition.
+ *
+ * To avoid allocating new stack for each independent chain of BTF types, we
+ * share one bigger stack, with each chain working only on its own local view
+ * of a stack frame. Some care is required to "pop" stack frames after
+ * processing type declaration chain.
+ */
+static void btf_dump_emit_type_decl(struct btf_dump *d, __u32 id,
+ const char *fname, int lvl)
+{
+ struct id_stack decl_stack;
+ const struct btf_type *t;
+ int err, stack_start;
+ __u16 kind;
+
+ stack_start = d->decl_stack_cnt;
+ for (;;) {
+ err = btf_dump_push_decl_stack_id(d, id);
+ if (err < 0) {
+ /*
+ * if we don't have enough memory for entire type decl
+ * chain, restore stack, emit warning, and try to
+ * proceed nevertheless
+ */
+ pr_warning("not enough memory for decl stack:%d", err);
+ d->decl_stack_cnt = stack_start;
+ return;
+ }
+
+ /* VOID */
+ if (id == 0)
+ break;
+
+ t = btf__type_by_id(d->btf, id);
+ kind = btf_kind_of(t);
+ switch (kind) {
+ case BTF_KIND_PTR:
+ case BTF_KIND_VOLATILE:
+ case BTF_KIND_CONST:
+ case BTF_KIND_RESTRICT:
+ case BTF_KIND_FUNC_PROTO:
+ id = t->type;
+ break;
+ case BTF_KIND_ARRAY: {
+ const struct btf_array *a = (void *)(t + 1);
+
+ id = a->type;
+ break;
+ }
+ case BTF_KIND_INT:
+ case BTF_KIND_ENUM:
+ case BTF_KIND_FWD:
+ case BTF_KIND_STRUCT:
+ case BTF_KIND_UNION:
+ case BTF_KIND_TYPEDEF:
+ goto done;
+ default:
+ pr_warning("unexpected type in decl chain, kind:%u, id:[%u]\n",
+ kind, id);
+ goto done;
+ }
+ }
+done:
+ /*
+ * We might be inside a chain of declarations (e.g., array of function
+ * pointers returning anonymous (so inlined) structs, having another
+ * array field). Each of those needs its own "stack frame" to handle
+ * emitting of declarations. Those stack frames are non-overlapping
+ * portions of shared btf_dump->decl_stack. To make it a bit nicer to
+ * handle this set of nested stacks, we create a view corresponding to
+ * our own "stack frame" and work with it as an independent stack.
+ * We'll need to clean up after emit_type_chain() returns, though.
+ */
+ decl_stack.ids = d->decl_stack + stack_start;
+ decl_stack.cnt = d->decl_stack_cnt - stack_start;
+ btf_dump_emit_type_chain(d, &decl_stack, fname, lvl);
+ /*
+ * emit_type_chain() guarantees that it will pop its entire decl_stack
+ * frame before returning. But it works with a read-only view into
+ * decl_stack, so it doesn't actually pop anything from the
+ * perspective of shared btf_dump->decl_stack, per se. We need to
+ * reset decl_stack state to how it was before us to avoid it growing
+ * all the time.
+ */
+ d->decl_stack_cnt = stack_start;
+}
+
+static void btf_dump_emit_mods(struct btf_dump *d, struct id_stack *decl_stack)
+{
+ const struct btf_type *t;
+ __u32 id;
+
+ while (decl_stack->cnt) {
+ id = decl_stack->ids[decl_stack->cnt - 1];
+ t = btf__type_by_id(d->btf, id);
+
+ switch (btf_kind_of(t)) {
+ case BTF_KIND_VOLATILE:
+ btf_dump_printf(d, "volatile ");
+ break;
+ case BTF_KIND_CONST:
+ btf_dump_printf(d, "const ");
+ break;
+ case BTF_KIND_RESTRICT:
+ btf_dump_printf(d, "restrict ");
+ break;
+ default:
+ return;
+ }
+ decl_stack->cnt--;
+ }
+}
+
+static bool btf_is_mod_kind(const struct btf *btf, __u32 id)
+{
+ const struct btf_type *t = btf__type_by_id(btf, id);
+
+ switch (btf_kind_of(t)) {
+ case BTF_KIND_VOLATILE:
+ case BTF_KIND_CONST:
+ case BTF_KIND_RESTRICT:
+ return true;
+ default:
+ return false;
+ }
+}
+
+static void btf_dump_emit_name(const struct btf_dump *d,
+ const char *name, bool last_was_ptr)
+{
+ bool separate = name[0] && !last_was_ptr;
+
+ btf_dump_printf(d, "%s%s", separate ? " " : "", name);
+}
+
+static void btf_dump_emit_type_chain(struct btf_dump *d,
+ struct id_stack *decls,
+ const char *fname, int lvl)
+{
+ /*
+ * last_was_ptr is used to determine if we need to separate pointer
+ * asterisk (*) from previous part of type signature with space, so
+ * that we get `int ***`, instead of `int * * *`. We default to true
+ * for cases where we have single pointer in a chain. E.g., in ptr ->
+ * func_proto case. func_proto will start a new emit_type_chain call
+ * with just ptr, which should be emitted as (*) or (*<fname>), so we
+ * don't want to prepend space for that last pointer.
+ */
+ bool last_was_ptr = true;
+ const struct btf_type *t;
+ const char *name;
+ __u16 kind;
+ __u32 id;
+
+ while (decls->cnt) {
+ id = decls->ids[--decls->cnt];
+ if (id == 0) {
+ /* VOID is a special snowflake */
+ btf_dump_emit_mods(d, decls);
+ btf_dump_printf(d, "void");
+ last_was_ptr = false;
+ continue;
+ }
+
+ t = btf__type_by_id(d->btf, id);
+ kind = btf_kind_of(t);
+
+ switch (kind) {
+ case BTF_KIND_INT:
+ btf_dump_emit_mods(d, decls);
+ name = btf_name_of(d, t->name_off);
+ btf_dump_printf(d, "%s", name);
+ break;
+ case BTF_KIND_STRUCT:
+ case BTF_KIND_UNION:
+ btf_dump_emit_mods(d, decls);
+ /* inline anonymous struct/union */
+ if (t->name_off == 0)
+ btf_dump_emit_struct_def(d, id, t, lvl);
+ else
+ btf_dump_emit_struct_fwd(d, id, t);
+ break;
+ case BTF_KIND_ENUM:
+ btf_dump_emit_mods(d, decls);
+ /* inline anonymous enum */
+ if (t->name_off == 0)
+ btf_dump_emit_enum_def(d, id, t, lvl);
+ else
+ btf_dump_emit_enum_fwd(d, id, t);
+ break;
+ case BTF_KIND_FWD:
+ btf_dump_emit_mods(d, decls);
+ btf_dump_emit_fwd_def(d, id, t);
+ break;
+ case BTF_KIND_TYPEDEF:
+ btf_dump_emit_mods(d, decls);
+ btf_dump_printf(d, "%s", btf_dump_ident_name(d, id));
+ break;
+ case BTF_KIND_PTR:
+ btf_dump_printf(d, "%s", last_was_ptr ? "*" : " *");
+ break;
+ case BTF_KIND_VOLATILE:
+ btf_dump_printf(d, " volatile");
+ break;
+ case BTF_KIND_CONST:
+ btf_dump_printf(d, " const");
+ break;
+ case BTF_KIND_RESTRICT:
+ btf_dump_printf(d, " restrict");
+ break;
+ case BTF_KIND_ARRAY: {
+ const struct btf_array *a = (void *)(t + 1);
+ const struct btf_type *next_t;
+ __u32 next_id;
+ bool multidim;
+ /*
+ * GCC has a bug
+ * (https://gcc.gnu.org/bugzilla/show_bug.cgi?id=8354)
+ * which causes it to emit extra const/volatile
+ * modifiers for an array, if array's element type has
+ * const/volatile modifiers. Clang doesn't do that.
+ * In general, it doesn't seem very meaningful to have
+ * a const/volatile modifier for array, so we are
+ * going to silently skip them here.
+ */
+ while (decls->cnt) {
+ next_id = decls->ids[decls->cnt - 1];
+ if (btf_is_mod_kind(d->btf, next_id))
+ decls->cnt--;
+ else
+ break;
+ }
+
+ if (decls->cnt == 0) {
+ btf_dump_emit_name(d, fname, last_was_ptr);
+ btf_dump_printf(d, "[%u]", a->nelems);
+ return;
+ }
+
+ next_t = btf__type_by_id(d->btf, next_id);
+ multidim = btf_kind_of(next_t) == BTF_KIND_ARRAY;
+ /* we need space if we have named non-pointer */
+ if (fname[0] && !last_was_ptr)
+ btf_dump_printf(d, " ");
+ /* no parentheses for multi-dimensional array */
+ if (!multidim)
+ btf_dump_printf(d, "(");
+ btf_dump_emit_type_chain(d, decls, fname, lvl);
+ if (!multidim)
+ btf_dump_printf(d, ")");
+ btf_dump_printf(d, "[%u]", a->nelems);
+ return;
+ }
+ case BTF_KIND_FUNC_PROTO: {
+ const struct btf_param *p = (void *)(t + 1);
+ __u16 vlen = btf_vlen_of(t);
+ int i;
+
+ btf_dump_emit_mods(d, decls);
+ if (decls->cnt) {
+ btf_dump_printf(d, " (");
+ btf_dump_emit_type_chain(d, decls, fname, lvl);
+ btf_dump_printf(d, ")");
+ } else {
+ btf_dump_emit_name(d, fname, last_was_ptr);
+ }
+ btf_dump_printf(d, "(");
+ /*
+ * Clang for BPF target generates func_proto with no
+ * args as a func_proto with a single void arg (e.g.,
+ * `int (*f)(void)` vs just `int (*f)()`). We are
+ * going to pretend there are no args for such case.
+ */
+ if (vlen == 1 && p->type == 0) {
+ btf_dump_printf(d, ")");
+ return;
+ }
+
+ for (i = 0; i < vlen; i++, p++) {
+ if (i > 0)
+ btf_dump_printf(d, ", ");
+
+ /* last arg of type void is vararg */
+ if (i == vlen - 1 && p->type == 0) {
+ btf_dump_printf(d, "...");
+ break;
+ }
+
+ name = btf_name_of(d, p->name_off);
+ btf_dump_emit_type_decl(d, p->type, name, lvl);
+ }
+
+ btf_dump_printf(d, ")");
+ return;
+ }
+ default:
+ pr_warning("unexpected type in decl chain, kind:%u, id:[%u]\n",
+ kind, id);
+ return;
+ }
+
+ last_was_ptr = kind == BTF_KIND_PTR;
+ }
+
+ btf_dump_emit_name(d, fname, last_was_ptr);
+}
+
+/* return number of duplicates (occurrences) of a given name */
+static size_t btf_dump_name_dups(struct btf_dump *d, struct hashmap *name_map,
+ const char *orig_name)
+{
+ size_t dup_cnt = 0;
+
+ hashmap__find(name_map, orig_name, (void **)&dup_cnt);
+ dup_cnt++;
+ hashmap__set(name_map, orig_name, (void *)dup_cnt, NULL, NULL);
+
+ return dup_cnt;
+}
+
+static const char *btf_dump_resolve_name(struct btf_dump *d, __u32 id,
+ struct hashmap *name_map)
+{
+ struct btf_dump_type_aux_state *s = &d->type_states[id];
+ const struct btf_type *t = btf__type_by_id(d->btf, id);
+ const char *orig_name = btf_name_of(d, t->name_off);
+ const char **cached_name = &d->cached_names[id];
+ size_t dup_cnt;
+
+ if (t->name_off == 0)
+ return "";
+
+ if (s->name_resolved)
+ return *cached_name ? *cached_name : orig_name;
+
+ dup_cnt = btf_dump_name_dups(d, name_map, orig_name);
+ if (dup_cnt > 1) {
+ const size_t max_len = 256;
+ char new_name[max_len];
+
+ snprintf(new_name, max_len, "%s___%zu", orig_name, dup_cnt);
+ *cached_name = strdup(new_name);
+ }
+
+ s->name_resolved = 1;
+ return *cached_name ? *cached_name : orig_name;
+}
+
+static const char *btf_dump_type_name(struct btf_dump *d, __u32 id)
+{
+ return btf_dump_resolve_name(d, id, d->type_names);
+}
+
+static const char *btf_dump_ident_name(struct btf_dump *d, __u32 id)
+{
+ return btf_dump_resolve_name(d, id, d->ident_names);
+}
diff --git a/tools/lib/bpf/hashmap.c b/tools/lib/bpf/hashmap.c
new file mode 100644
index 000000000000..6122272943e6
--- /dev/null
+++ b/tools/lib/bpf/hashmap.c
@@ -0,0 +1,229 @@
+// SPDX-License-Identifier: (LGPL-2.1 OR BSD-2-Clause)
+
+/*
+ * Generic non-thread safe hash map implementation.
+ *
+ * Copyright (c) 2019 Facebook
+ */
+#include <stdint.h>
+#include <stdlib.h>
+#include <stdio.h>
+#include <errno.h>
+#include <linux/err.h>
+#include "hashmap.h"
+
+/* start with 4 buckets */
+#define HASHMAP_MIN_CAP_BITS 2
+
+static void hashmap_add_entry(struct hashmap_entry **pprev,
+ struct hashmap_entry *entry)
+{
+ entry->next = *pprev;
+ *pprev = entry;
+}
+
+static void hashmap_del_entry(struct hashmap_entry **pprev,
+ struct hashmap_entry *entry)
+{
+ *pprev = entry->next;
+ entry->next = NULL;
+}
+
+void hashmap__init(struct hashmap *map, hashmap_hash_fn hash_fn,
+ hashmap_equal_fn equal_fn, void *ctx)
+{
+ map->hash_fn = hash_fn;
+ map->equal_fn = equal_fn;
+ map->ctx = ctx;
+
+ map->buckets = NULL;
+ map->cap = 0;
+ map->cap_bits = 0;
+ map->sz = 0;
+}
+
+struct hashmap *hashmap__new(hashmap_hash_fn hash_fn,
+ hashmap_equal_fn equal_fn,
+ void *ctx)
+{
+ struct hashmap *map = malloc(sizeof(struct hashmap));
+
+ if (!map)
+ return ERR_PTR(-ENOMEM);
+ hashmap__init(map, hash_fn, equal_fn, ctx);
+ return map;
+}
+
+void hashmap__clear(struct hashmap *map)
+{
+ free(map->buckets);
+ map->cap = map->cap_bits = map->sz = 0;
+}
+
+void hashmap__free(struct hashmap *map)
+{
+ if (!map)
+ return;
+
+ hashmap__clear(map);
+ free(map);
+}
+
+size_t hashmap__size(const struct hashmap *map)
+{
+ return map->sz;
+}
+
+size_t hashmap__capacity(const struct hashmap *map)
+{
+ return map->cap;
+}
+
+static bool hashmap_needs_to_grow(struct hashmap *map)
+{
+ /* grow if empty or more than 75% filled */
+ return (map->cap == 0) || ((map->sz + 1) * 4 / 3 > map->cap);
+}
+
+static int hashmap_grow(struct hashmap *map)
+{
+ struct hashmap_entry **new_buckets;
+ struct hashmap_entry *cur, *tmp;
+ size_t new_cap_bits, new_cap;
+ size_t h;
+ int bkt;
+
+ new_cap_bits = map->cap_bits + 1;
+ if (new_cap_bits < HASHMAP_MIN_CAP_BITS)
+ new_cap_bits = HASHMAP_MIN_CAP_BITS;
+
+ new_cap = 1UL << new_cap_bits;
+ new_buckets = calloc(new_cap, sizeof(new_buckets[0]));
+ if (!new_buckets)
+ return -ENOMEM;
+
+ hashmap__for_each_entry_safe(map, cur, tmp, bkt) {
+ h = hash_bits(map->hash_fn(cur->key, map->ctx), new_cap_bits);
+ hashmap_add_entry(&new_buckets[h], cur);
+ }
+
+ map->cap = new_cap;
+ map->cap_bits = new_cap_bits;
+ free(map->buckets);
+ map->buckets = new_buckets;
+
+ return 0;
+}
+
+static bool hashmap_find_entry(const struct hashmap *map,
+ const void *key, size_t hash,
+ struct hashmap_entry ***pprev,
+ struct hashmap_entry **entry)
+{
+ struct hashmap_entry *cur, **prev_ptr;
+
+ if (!map->buckets)
+ return false;
+
+ for (prev_ptr = &map->buckets[hash], cur = *prev_ptr;
+ cur;
+ prev_ptr = &cur->next, cur = cur->next) {
+ if (map->equal_fn(cur->key, key, map->ctx)) {
+ if (pprev)
+ *pprev = prev_ptr;
+ *entry = cur;
+ return true;
+ }
+ }
+
+ return false;
+}
+
+int hashmap__insert(struct hashmap *map, const void *key, void *value,
+ enum hashmap_insert_strategy strategy,
+ const void **old_key, void **old_value)
+{
+ struct hashmap_entry *entry;
+ size_t h;
+ int err;
+
+ if (old_key)
+ *old_key = NULL;
+ if (old_value)
+ *old_value = NULL;
+
+ h = hash_bits(map->hash_fn(key, map->ctx), map->cap_bits);
+ if (strategy != HASHMAP_APPEND &&
+ hashmap_find_entry(map, key, h, NULL, &entry)) {
+ if (old_key)
+ *old_key = entry->key;
+ if (old_value)
+ *old_value = entry->value;
+
+ if (strategy == HASHMAP_SET || strategy == HASHMAP_UPDATE) {
+ entry->key = key;
+ entry->value = value;
+ return 0;
+ } else if (strategy == HASHMAP_ADD) {
+ return -EEXIST;
+ }
+ }
+
+ if (strategy == HASHMAP_UPDATE)
+ return -ENOENT;
+
+ if (hashmap_needs_to_grow(map)) {
+ err = hashmap_grow(map);
+ if (err)
+ return err;
+ h = hash_bits(map->hash_fn(key, map->ctx), map->cap_bits);
+ }
+
+ entry = malloc(sizeof(struct hashmap_entry));
+ if (!entry)
+ return -ENOMEM;
+
+ entry->key = key;
+ entry->value = value;
+ hashmap_add_entry(&map->buckets[h], entry);
+ map->sz++;
+
+ return 0;
+}
+
+bool hashmap__find(const struct hashmap *map, const void *key, void **value)
+{
+ struct hashmap_entry *entry;
+ size_t h;
+
+ h = hash_bits(map->hash_fn(key, map->ctx), map->cap_bits);
+ if (!hashmap_find_entry(map, key, h, NULL, &entry))
+ return false;
+
+ if (value)
+ *value = entry->value;
+ return true;
+}
+
+bool hashmap__delete(struct hashmap *map, const void *key,
+ const void **old_key, void **old_value)
+{
+ struct hashmap_entry **pprev, *entry;
+ size_t h;
+
+ h = hash_bits(map->hash_fn(key, map->ctx), map->cap_bits);
+ if (!hashmap_find_entry(map, key, h, &pprev, &entry))
+ return false;
+
+ if (old_key)
+ *old_key = entry->key;
+ if (old_value)
+ *old_value = entry->value;
+
+ hashmap_del_entry(pprev, entry);
+ free(entry);
+ map->sz--;
+
+ return true;
+}
+
diff --git a/tools/lib/bpf/hashmap.h b/tools/lib/bpf/hashmap.h
new file mode 100644
index 000000000000..03748a742146
--- /dev/null
+++ b/tools/lib/bpf/hashmap.h
@@ -0,0 +1,173 @@
+/* SPDX-License-Identifier: (LGPL-2.1 OR BSD-2-Clause) */
+
+/*
+ * Generic non-thread safe hash map implementation.
+ *
+ * Copyright (c) 2019 Facebook
+ */
+#ifndef __LIBBPF_HASHMAP_H
+#define __LIBBPF_HASHMAP_H
+
+#include <stdbool.h>
+#include <stddef.h>
+#include "libbpf_internal.h"
+
+static inline size_t hash_bits(size_t h, int bits)
+{
+ /* shuffle bits and return requested number of upper bits */
+ return (h * 11400714819323198485llu) >> (__WORDSIZE - bits);
+}
+
+typedef size_t (*hashmap_hash_fn)(const void *key, void *ctx);
+typedef bool (*hashmap_equal_fn)(const void *key1, const void *key2, void *ctx);
+
+struct hashmap_entry {
+ const void *key;
+ void *value;
+ struct hashmap_entry *next;
+};
+
+struct hashmap {
+ hashmap_hash_fn hash_fn;
+ hashmap_equal_fn equal_fn;
+ void *ctx;
+
+ struct hashmap_entry **buckets;
+ size_t cap;
+ size_t cap_bits;
+ size_t sz;
+};
+
+#define HASHMAP_INIT(hash_fn, equal_fn, ctx) { \
+ .hash_fn = (hash_fn), \
+ .equal_fn = (equal_fn), \
+ .ctx = (ctx), \
+ .buckets = NULL, \
+ .cap = 0, \
+ .cap_bits = 0, \
+ .sz = 0, \
+}
+
+void hashmap__init(struct hashmap *map, hashmap_hash_fn hash_fn,
+ hashmap_equal_fn equal_fn, void *ctx);
+struct hashmap *hashmap__new(hashmap_hash_fn hash_fn,
+ hashmap_equal_fn equal_fn,
+ void *ctx);
+void hashmap__clear(struct hashmap *map);
+void hashmap__free(struct hashmap *map);
+
+size_t hashmap__size(const struct hashmap *map);
+size_t hashmap__capacity(const struct hashmap *map);
+
+/*
+ * Hashmap insertion strategy:
+ * - HASHMAP_ADD - only add key/value if key doesn't exist yet;
+ * - HASHMAP_SET - add key/value pair if key doesn't exist yet; otherwise,
+ * update value;
+ * - HASHMAP_UPDATE - update value, if key already exists; otherwise, do
+ * nothing and return -ENOENT;
+ * - HASHMAP_APPEND - always add key/value pair, even if key already exists.
+ * This turns hashmap into a multimap by allowing multiple values to be
+ * associated with the same key. Most useful read API for such hashmap is
+ * hashmap__for_each_key_entry() iteration. If hashmap__find() is still
+ * used, it will return last inserted key/value entry (first in a bucket
+ * chain).
+ */
+enum hashmap_insert_strategy {
+ HASHMAP_ADD,
+ HASHMAP_SET,
+ HASHMAP_UPDATE,
+ HASHMAP_APPEND,
+};
+
+/*
+ * hashmap__insert() adds key/value entry w/ various semantics, depending on
+ * provided strategy value. If a given key/value pair replaced already
+ * existing key/value pair, both old key and old value will be returned
+ * through old_key and old_value to allow calling code do proper memory
+ * management.
+ */
+int hashmap__insert(struct hashmap *map, const void *key, void *value,
+ enum hashmap_insert_strategy strategy,
+ const void **old_key, void **old_value);
+
+static inline int hashmap__add(struct hashmap *map,
+ const void *key, void *value)
+{
+ return hashmap__insert(map, key, value, HASHMAP_ADD, NULL, NULL);
+}
+
+static inline int hashmap__set(struct hashmap *map,
+ const void *key, void *value,
+ const void **old_key, void **old_value)
+{
+ return hashmap__insert(map, key, value, HASHMAP_SET,
+ old_key, old_value);
+}
+
+static inline int hashmap__update(struct hashmap *map,
+ const void *key, void *value,
+ const void **old_key, void **old_value)
+{
+ return hashmap__insert(map, key, value, HASHMAP_UPDATE,
+ old_key, old_value);
+}
+
+static inline int hashmap__append(struct hashmap *map,
+ const void *key, void *value)
+{
+ return hashmap__insert(map, key, value, HASHMAP_APPEND, NULL, NULL);
+}
+
+bool hashmap__delete(struct hashmap *map, const void *key,
+ const void **old_key, void **old_value);
+
+bool hashmap__find(const struct hashmap *map, const void *key, void **value);
+
+/*
+ * hashmap__for_each_entry - iterate over all entries in hashmap
+ * @map: hashmap to iterate
+ * @cur: struct hashmap_entry * used as a loop cursor
+ * @bkt: integer used as a bucket loop cursor
+ */
+#define hashmap__for_each_entry(map, cur, bkt) \
+ for (bkt = 0; bkt < map->cap; bkt++) \
+ for (cur = map->buckets[bkt]; cur; cur = cur->next)
+
+/*
+ * hashmap__for_each_entry_safe - iterate over all entries in hashmap, safe
+ * against removals
+ * @map: hashmap to iterate
+ * @cur: struct hashmap_entry * used as a loop cursor
+ * @tmp: struct hashmap_entry * used as a temporary next cursor storage
+ * @bkt: integer used as a bucket loop cursor
+ */
+#define hashmap__for_each_entry_safe(map, cur, tmp, bkt) \
+ for (bkt = 0; bkt < map->cap; bkt++) \
+ for (cur = map->buckets[bkt]; \
+ cur && ({tmp = cur->next; true; }); \
+ cur = tmp)
+
+/*
+ * hashmap__for_each_key_entry - iterate over entries associated with given key
+ * @map: hashmap to iterate
+ * @cur: struct hashmap_entry * used as a loop cursor
+ * @key: key to iterate entries for
+ */
+#define hashmap__for_each_key_entry(map, cur, _key) \
+ for (cur = ({ size_t bkt = hash_bits(map->hash_fn((_key), map->ctx),\
+ map->cap_bits); \
+ map->buckets ? map->buckets[bkt] : NULL; }); \
+ cur; \
+ cur = cur->next) \
+ if (map->equal_fn(cur->key, (_key), map->ctx))
+
+#define hashmap__for_each_key_entry_safe(map, cur, tmp, _key) \
+ for (cur = ({ size_t bkt = hash_bits(map->hash_fn((_key), map->ctx),\
+ map->cap_bits); \
+ cur = map->buckets ? map->buckets[bkt] : NULL; }); \
+ cur && ({ tmp = cur->next; true; }); \
+ cur = tmp) \
+ if (map->equal_fn(cur->key, (_key), map->ctx))
+
+#endif /* __LIBBPF_HASHMAP_H */
diff --git a/tools/lib/bpf/libbpf.c b/tools/lib/bpf/libbpf.c
index 151f7ac1882e..c8fbc050fd78 100644
--- a/tools/lib/bpf/libbpf.c
+++ b/tools/lib/bpf/libbpf.c
@@ -188,6 +188,7 @@ struct bpf_program {
void *line_info;
__u32 line_info_rec_size;
__u32 line_info_cnt;
+ __u32 prog_flags;
};
enum libbpf_map_type {
@@ -348,8 +349,11 @@ static int
bpf_program__init(void *data, size_t size, char *section_name, int idx,
struct bpf_program *prog)
{
- if (size < sizeof(struct bpf_insn)) {
- pr_warning("corrupted section '%s'\n", section_name);
+ const size_t bpf_insn_sz = sizeof(struct bpf_insn);
+
+ if (size == 0 || size % bpf_insn_sz) {
+ pr_warning("corrupted section '%s', size: %zu\n",
+ section_name, size);
return -EINVAL;
}
@@ -375,9 +379,8 @@ bpf_program__init(void *data, size_t size, char *section_name, int idx,
section_name);
goto errout;
}
- prog->insns_cnt = size / sizeof(struct bpf_insn);
- memcpy(prog->insns, data,
- prog->insns_cnt * sizeof(struct bpf_insn));
+ prog->insns_cnt = size / bpf_insn_sz;
+ memcpy(prog->insns, data, size);
prog->idx = idx;
prog->instances.fds = NULL;
prog->instances.nr = -1;
@@ -494,15 +497,14 @@ static struct bpf_object *bpf_object__new(const char *path,
strcpy(obj->path, path);
/* Using basename() GNU version which doesn't modify arg. */
- strncpy(obj->name, basename((void *)path),
- sizeof(obj->name) - 1);
+ strncpy(obj->name, basename((void *)path), sizeof(obj->name) - 1);
end = strchr(obj->name, '.');
if (end)
*end = 0;
obj->efile.fd = -1;
/*
- * Caller of this function should also calls
+ * Caller of this function should also call
* bpf_object__elf_finish() after data collection to return
* obj_buf to user. If not, we should duplicate the buffer to
* avoid user freeing them before elf finish.
@@ -562,38 +564,35 @@ static int bpf_object__elf_init(struct bpf_object *obj)
} else {
obj->efile.fd = open(obj->path, O_RDONLY);
if (obj->efile.fd < 0) {
- char errmsg[STRERR_BUFSIZE];
- char *cp = libbpf_strerror_r(errno, errmsg,
- sizeof(errmsg));
+ char errmsg[STRERR_BUFSIZE], *cp;
+ err = -errno;
+ cp = libbpf_strerror_r(err, errmsg, sizeof(errmsg));
pr_warning("failed to open %s: %s\n", obj->path, cp);
- return -errno;
+ return err;
}
obj->efile.elf = elf_begin(obj->efile.fd,
- LIBBPF_ELF_C_READ_MMAP,
- NULL);
+ LIBBPF_ELF_C_READ_MMAP, NULL);
}
if (!obj->efile.elf) {
- pr_warning("failed to open %s as ELF file\n",
- obj->path);
+ pr_warning("failed to open %s as ELF file\n", obj->path);
err = -LIBBPF_ERRNO__LIBELF;
goto errout;
}
if (!gelf_getehdr(obj->efile.elf, &obj->efile.ehdr)) {
- pr_warning("failed to get EHDR from %s\n",
- obj->path);
+ pr_warning("failed to get EHDR from %s\n", obj->path);
err = -LIBBPF_ERRNO__FORMAT;
goto errout;
}
ep = &obj->efile.ehdr;
/* Old LLVM set e_machine to EM_NONE */
- if ((ep->e_type != ET_REL) || (ep->e_machine && (ep->e_machine != EM_BPF))) {
- pr_warning("%s is not an eBPF object file\n",
- obj->path);
+ if (ep->e_type != ET_REL ||
+ (ep->e_machine && ep->e_machine != EM_BPF)) {
+ pr_warning("%s is not an eBPF object file\n", obj->path);
err = -LIBBPF_ERRNO__FORMAT;
goto errout;
}
@@ -604,47 +603,31 @@ errout:
return err;
}
-static int
-bpf_object__check_endianness(struct bpf_object *obj)
-{
- static unsigned int const endian = 1;
-
- switch (obj->efile.ehdr.e_ident[EI_DATA]) {
- case ELFDATA2LSB:
- /* We are big endian, BPF obj is little endian. */
- if (*(unsigned char const *)&endian != 1)
- goto mismatch;
- break;
-
- case ELFDATA2MSB:
- /* We are little endian, BPF obj is big endian. */
- if (*(unsigned char const *)&endian != 0)
- goto mismatch;
- break;
- default:
- return -LIBBPF_ERRNO__ENDIAN;
- }
-
- return 0;
-
-mismatch:
- pr_warning("Error: endianness mismatch.\n");
+static int bpf_object__check_endianness(struct bpf_object *obj)
+{
+#if __BYTE_ORDER__ == __ORDER_LITTLE_ENDIAN__
+ if (obj->efile.ehdr.e_ident[EI_DATA] == ELFDATA2LSB)
+ return 0;
+#elif __BYTE_ORDER__ == __ORDER_BIG_ENDIAN__
+ if (obj->efile.ehdr.e_ident[EI_DATA] == ELFDATA2MSB)
+ return 0;
+#else
+# error "Unrecognized __BYTE_ORDER__"
+#endif
+ pr_warning("endianness mismatch.\n");
return -LIBBPF_ERRNO__ENDIAN;
}
static int
-bpf_object__init_license(struct bpf_object *obj,
- void *data, size_t size)
+bpf_object__init_license(struct bpf_object *obj, void *data, size_t size)
{
- memcpy(obj->license, data,
- min(size, sizeof(obj->license) - 1));
+ memcpy(obj->license, data, min(size, sizeof(obj->license) - 1));
pr_debug("license of %s is %s\n", obj->path, obj->license);
return 0;
}
static int
-bpf_object__init_kversion(struct bpf_object *obj,
- void *data, size_t size)
+bpf_object__init_kversion(struct bpf_object *obj, void *data, size_t size)
{
__u32 kver;
@@ -654,8 +637,7 @@ bpf_object__init_kversion(struct bpf_object *obj,
}
memcpy(&kver, data, sizeof(kver));
obj->kern_version = kver;
- pr_debug("kernel version of %s is %x\n", obj->path,
- obj->kern_version);
+ pr_debug("kernel version of %s is %x\n", obj->path, obj->kern_version);
return 0;
}
@@ -811,8 +793,7 @@ bpf_object__init_internal_map(struct bpf_object *obj, struct bpf_map *map,
def->key_size = sizeof(int);
def->value_size = data->d_size;
def->max_entries = 1;
- def->map_flags = type == LIBBPF_MAP_RODATA ?
- BPF_F_RDONLY_PROG : 0;
+ def->map_flags = type == LIBBPF_MAP_RODATA ? BPF_F_RDONLY_PROG : 0;
if (data_buff) {
*data_buff = malloc(data->d_size);
if (!*data_buff) {
@@ -827,8 +808,7 @@ bpf_object__init_internal_map(struct bpf_object *obj, struct bpf_map *map,
return 0;
}
-static int
-bpf_object__init_maps(struct bpf_object *obj, int flags)
+static int bpf_object__init_maps(struct bpf_object *obj, int flags)
{
int i, map_idx, map_def_sz = 0, nr_syms, nr_maps = 0, nr_maps_glob = 0;
bool strict = !(flags & MAPS_RELAX_COMPAT);
@@ -930,6 +910,11 @@ bpf_object__init_maps(struct bpf_object *obj, int flags)
map_name = elf_strptr(obj->efile.elf,
obj->efile.strtabidx,
sym.st_name);
+ if (!map_name) {
+ pr_warning("failed to get map #%d name sym string for obj %s\n",
+ map_idx, obj->path);
+ return -LIBBPF_ERRNO__FORMAT;
+ }
obj->maps[map_idx].libbpf_type = LIBBPF_MAP_UNSPEC;
obj->maps[map_idx].offset = sym.st_value;
@@ -1104,8 +1089,7 @@ static int bpf_object__elf_collect(struct bpf_object *obj, int flags)
/* Elf is corrupted/truncated, avoid calling elf_strptr. */
if (!elf_rawdata(elf_getscn(elf, ep->e_shstrndx), NULL)) {
- pr_warning("failed to get e_shstrndx from %s\n",
- obj->path);
+ pr_warning("failed to get e_shstrndx from %s\n", obj->path);
return -LIBBPF_ERRNO__FORMAT;
}
@@ -1226,7 +1210,7 @@ static int bpf_object__elf_collect(struct bpf_object *obj, int flags)
if (!obj->efile.strtabidx || obj->efile.strtabidx >= idx) {
pr_warning("Corrupted ELF file: index of strtab invalid\n");
- return LIBBPF_ERRNO__FORMAT;
+ return -LIBBPF_ERRNO__FORMAT;
}
if (btf_data) {
obj->btf = btf__new(btf_data->d_buf, btf_data->d_size);
@@ -1346,8 +1330,7 @@ bpf_program__collect_reloc(struct bpf_program *prog, GElf_Shdr *shdr,
size_t nr_maps = obj->nr_maps;
int i, nrels;
- pr_debug("collecting relocating info for: '%s'\n",
- prog->section_name);
+ pr_debug("collecting relocating info for: '%s'\n", prog->section_name);
nrels = shdr->sh_size / shdr->sh_entsize;
prog->reloc_desc = malloc(sizeof(*prog->reloc_desc) * nrels);
@@ -1372,9 +1355,7 @@ bpf_program__collect_reloc(struct bpf_program *prog, GElf_Shdr *shdr,
return -LIBBPF_ERRNO__FORMAT;
}
- if (!gelf_getsym(symbols,
- GELF_R_SYM(rel.r_info),
- &sym)) {
+ if (!gelf_getsym(symbols, GELF_R_SYM(rel.r_info), &sym)) {
pr_warning("relocation: symbol %"PRIx64" not found\n",
GELF_R_SYM(rel.r_info));
return -LIBBPF_ERRNO__FORMAT;
@@ -1435,8 +1416,7 @@ bpf_program__collect_reloc(struct bpf_program *prog, GElf_Shdr *shdr,
if (maps[map_idx].libbpf_type != type)
continue;
if (type != LIBBPF_MAP_UNSPEC ||
- (type == LIBBPF_MAP_UNSPEC &&
- maps[map_idx].offset == sym.st_value)) {
+ maps[map_idx].offset == sym.st_value) {
pr_debug("relocation: find map %zd (%s) for insn %u\n",
map_idx, maps[map_idx].name, insn_idx);
break;
@@ -1444,7 +1424,7 @@ bpf_program__collect_reloc(struct bpf_program *prog, GElf_Shdr *shdr,
}
if (map_idx >= nr_maps) {
- pr_warning("bpf relocation: map_idx %d large than %d\n",
+ pr_warning("bpf relocation: map_idx %d larger than %d\n",
(int)map_idx, (int)nr_maps - 1);
return -LIBBPF_ERRNO__RELOC;
}
@@ -1760,7 +1740,7 @@ bpf_object__create_maps(struct bpf_object *obj)
create_attr.key_size = def->key_size;
create_attr.value_size = def->value_size;
create_attr.max_entries = def->max_entries;
- create_attr.btf_fd = 0;
+ create_attr.btf_fd = -1;
create_attr.btf_key_type_id = 0;
create_attr.btf_value_type_id = 0;
if (bpf_map_type__is_map_in_map(def->type) &&
@@ -1774,11 +1754,11 @@ bpf_object__create_maps(struct bpf_object *obj)
}
*pfd = bpf_create_map_xattr(&create_attr);
- if (*pfd < 0 && create_attr.btf_key_type_id) {
+ if (*pfd < 0 && create_attr.btf_fd >= 0) {
cp = libbpf_strerror_r(errno, errmsg, sizeof(errmsg));
pr_warning("Error in bpf_create_map_xattr(%s):%s(%d). Retrying without BTF.\n",
map->name, cp, errno);
- create_attr.btf_fd = 0;
+ create_attr.btf_fd = -1;
create_attr.btf_key_type_id = 0;
create_attr.btf_value_type_id = 0;
map->btf_key_type_id = 0;
@@ -1807,7 +1787,7 @@ err_out:
}
}
- pr_debug("create map %s: fd=%d\n", map->name, *pfd);
+ pr_debug("created map %s: fd=%d\n", map->name, *pfd);
}
return 0;
@@ -1828,18 +1808,14 @@ check_btf_ext_reloc_err(struct bpf_program *prog, int err,
if (btf_prog_info) {
/*
* Some info has already been found but has problem
- * in the last btf_ext reloc. Must have to error
- * out.
+ * in the last btf_ext reloc. Must have to error out.
*/
pr_warning("Error in relocating %s for sec %s.\n",
info_name, prog->section_name);
return err;
}
- /*
- * Have problem loading the very first info. Ignore
- * the rest.
- */
+ /* Have problem loading the very first info. Ignore the rest. */
pr_warning("Cannot find %s for main program sec %s. Ignore all %s.\n",
info_name, prog->section_name, info_name);
return 0;
@@ -2043,9 +2019,7 @@ static int bpf_object__collect_reloc(struct bpf_object *obj)
return -LIBBPF_ERRNO__RELOC;
}
- err = bpf_program__collect_reloc(prog,
- shdr, data,
- obj);
+ err = bpf_program__collect_reloc(prog, shdr, data, obj);
if (err)
return err;
}
@@ -2062,6 +2036,9 @@ load_program(struct bpf_program *prog, struct bpf_insn *insns, int insns_cnt,
char *log_buf;
int ret;
+ if (!insns || !insns_cnt)
+ return -EINVAL;
+
memset(&load_attr, 0, sizeof(struct bpf_load_program_attr));
load_attr.prog_type = prog->type;
load_attr.expected_attach_type = prog->expected_attach_type;
@@ -2072,7 +2049,7 @@ load_program(struct bpf_program *prog, struct bpf_insn *insns, int insns_cnt,
load_attr.license = license;
load_attr.kern_version = kern_version;
load_attr.prog_ifindex = prog->prog_ifindex;
- load_attr.prog_btf_fd = prog->btf_fd >= 0 ? prog->btf_fd : 0;
+ load_attr.prog_btf_fd = prog->btf_fd;
load_attr.func_info = prog->func_info;
load_attr.func_info_rec_size = prog->func_info_rec_size;
load_attr.func_info_cnt = prog->func_info_cnt;
@@ -2080,8 +2057,7 @@ load_program(struct bpf_program *prog, struct bpf_insn *insns, int insns_cnt,
load_attr.line_info_rec_size = prog->line_info_rec_size;
load_attr.line_info_cnt = prog->line_info_cnt;
load_attr.log_level = prog->log_level;
- if (!load_attr.insns || !load_attr.insns_cnt)
- return -EINVAL;
+ load_attr.prog_flags = prog->prog_flags;
retry_load:
log_buf = malloc(log_buf_size);
@@ -2226,7 +2202,7 @@ static bool bpf_program__is_function_storage(struct bpf_program *prog,
}
static int
-bpf_object__load_progs(struct bpf_object *obj)
+bpf_object__load_progs(struct bpf_object *obj, int log_level)
{
size_t i;
int err;
@@ -2234,6 +2210,7 @@ bpf_object__load_progs(struct bpf_object *obj)
for (i = 0; i < obj->nr_programs; i++) {
if (bpf_program__is_function_storage(&obj->programs[i], obj))
continue;
+ obj->programs[i].log_level |= log_level;
err = bpf_program__load(&obj->programs[i],
obj->license,
obj->kern_version);
@@ -2360,11 +2337,9 @@ struct bpf_object *bpf_object__open_buffer(void *obj_buf,
snprintf(tmp_name, sizeof(tmp_name), "%lx-%lx",
(unsigned long)obj_buf,
(unsigned long)obj_buf_sz);
- tmp_name[sizeof(tmp_name) - 1] = '\0';
name = tmp_name;
}
- pr_debug("loading object '%s' from buffer\n",
- name);
+ pr_debug("loading object '%s' from buffer\n", name);
return __bpf_object__open(name, obj_buf, obj_buf_sz, true, true);
}
@@ -2385,10 +2360,14 @@ int bpf_object__unload(struct bpf_object *obj)
return 0;
}
-int bpf_object__load(struct bpf_object *obj)
+int bpf_object__load_xattr(struct bpf_object_load_attr *attr)
{
+ struct bpf_object *obj;
int err;
+ if (!attr)
+ return -EINVAL;
+ obj = attr->obj;
if (!obj)
return -EINVAL;
@@ -2401,7 +2380,7 @@ int bpf_object__load(struct bpf_object *obj)
CHECK_ERR(bpf_object__create_maps(obj), err, out);
CHECK_ERR(bpf_object__relocate(obj), err, out);
- CHECK_ERR(bpf_object__load_progs(obj), err, out);
+ CHECK_ERR(bpf_object__load_progs(obj, attr->log_level), err, out);
return 0;
out:
@@ -2410,6 +2389,15 @@ out:
return err;
}
+int bpf_object__load(struct bpf_object *obj)
+{
+ struct bpf_object_load_attr attr = {
+ .obj = obj,
+ };
+
+ return bpf_object__load_xattr(&attr);
+}
+
static int check_path(const char *path)
{
char *cp, errmsg[STRERR_BUFSIZE];
@@ -3466,9 +3454,7 @@ bpf_object__find_map_by_offset(struct bpf_object *obj, size_t offset)
long libbpf_get_error(const void *ptr)
{
- if (IS_ERR(ptr))
- return PTR_ERR(ptr);
- return 0;
+ return PTR_ERR_OR_ZERO(ptr);
}
int bpf_prog_load(const char *file, enum bpf_prog_type type,
@@ -3529,6 +3515,7 @@ int bpf_prog_load_xattr(const struct bpf_prog_load_attr *attr,
expected_attach_type);
prog->log_level = attr->log_level;
+ prog->prog_flags = attr->prog_flags;
if (!first_prog)
first_prog = prog;
}
diff --git a/tools/lib/bpf/libbpf.h b/tools/lib/bpf/libbpf.h
index c5ff00515ce7..1af0d48178c8 100644
--- a/tools/lib/bpf/libbpf.h
+++ b/tools/lib/bpf/libbpf.h
@@ -89,8 +89,14 @@ LIBBPF_API int bpf_object__unpin_programs(struct bpf_object *obj,
LIBBPF_API int bpf_object__pin(struct bpf_object *object, const char *path);
LIBBPF_API void bpf_object__close(struct bpf_object *object);
+struct bpf_object_load_attr {
+ struct bpf_object *obj;
+ int log_level;
+};
+
/* Load/unload object into/from kernel */
LIBBPF_API int bpf_object__load(struct bpf_object *obj);
+LIBBPF_API int bpf_object__load_xattr(struct bpf_object_load_attr *attr);
LIBBPF_API int bpf_object__unload(struct bpf_object *obj);
LIBBPF_API const char *bpf_object__name(struct bpf_object *obj);
LIBBPF_API unsigned int bpf_object__kversion(struct bpf_object *obj);
@@ -320,6 +326,7 @@ struct bpf_prog_load_attr {
enum bpf_attach_type expected_attach_type;
int ifindex;
int log_level;
+ int prog_flags;
};
LIBBPF_API int bpf_prog_load_xattr(const struct bpf_prog_load_attr *attr,
diff --git a/tools/lib/bpf/libbpf.map b/tools/lib/bpf/libbpf.map
index 673001787cba..46dcda89df21 100644
--- a/tools/lib/bpf/libbpf.map
+++ b/tools/lib/bpf/libbpf.map
@@ -164,3 +164,12 @@ LIBBPF_0.0.3 {
bpf_map_freeze;
btf__finalize_data;
} LIBBPF_0.0.2;
+
+LIBBPF_0.0.4 {
+ global:
+ btf_dump__dump_type;
+ btf_dump__free;
+ btf_dump__new;
+ btf__parse_elf;
+ bpf_object__load_xattr;
+} LIBBPF_0.0.3;
diff --git a/tools/lib/bpf/libbpf_internal.h b/tools/lib/bpf/libbpf_internal.h
index dfab8012185c..61d90eb82ee6 100644
--- a/tools/lib/bpf/libbpf_internal.h
+++ b/tools/lib/bpf/libbpf_internal.h
@@ -9,6 +9,8 @@
#ifndef __LIBBPF_LIBBPF_INTERNAL_H
#define __LIBBPF_LIBBPF_INTERNAL_H
+#include "libbpf.h"
+
#define BTF_INFO_ENC(kind, kind_flag, vlen) \
((!!(kind_flag) << 31) | ((kind) << 24) | ((vlen) & BTF_MAX_VLEN))
#define BTF_TYPE_ENC(name, info, size_or_type) (name), (info), (size_or_type)