Lines Matching +full:enum +full:- +full:cnt +full:- +full:name
1 // SPDX-License-Identifier: GPL-2.0
75 * NOTE that we cannot assume any reference-order.
81 * object describing "void *". This type-reference is done
88 * - Each line started with "[?]" is a btf_type object
89 * - [?] is the type_id of the btf_type object.
90 * - CONST/PTR is the BTF_KIND_XXX
91 * - "(anon)" is the name of the type. It just
92 * happens that CONST and PTR has no name.
93 * - type_id=XXX is the 'u32 type' in btf_type
107 * have a name.
117 * an array: "btf->types".
127 * check this type-reference in the first pass.
130 * checking the name is a valid offset to the string section).
138 * 1) does exist in the BTF (i.e. in btf->types[])
171 * BTF_KIND_CONST -> BTF_KIND_PTR -> BTF_KIND_CONST -> BTF_KIND_PTR +
173 * +-----------------------------------------+
178 #define BITS_PER_BYTE_MASK (BITS_PER_BYTE - 1)
208 enum btf_kfunc_hook {
226 enum {
243 u32 cnt;
248 u32 cnt;
276 char name[MODULE_NAME_LEN];
278 __u32 *base_id_map; /* map from distilled base BTF -> vmlinux BTF ids */
281 enum verifier_phase {
292 enum visit_state {
298 enum resolve_mode {
320 enum verifier_phase phase;
321 enum resolve_mode resolve_mode;
331 [BTF_KIND_ENUM] = "ENUM",
349 return btf_kind_str[BTF_INFO_KIND(t->info)];
357 * 128-bit int); if we are at the end of our safe buffer and have
364 /* Type name size */
378 * One challenge with showing nested data is we want to skip 0-valued
383 * pass is signalled by show->state.depth_check being set, and if we
384 * encounter a non-zero value we set show->state.depth_to_show to
402 * as we traverse the object's data. skbuff-like semantics are
405 * - obj.head points to the start of the toplevel object for display
406 * - obj.size is the size of the toplevel object
407 * - obj.data points to the current point in the original data at
430 int status; /* non-zero for error */
433 char name[BTF_SHOW_NAME_SIZE]; /* space for member name/type */
479 * type through t->type AND its size cannot
480 * be determined without following the t->type.
485 switch (BTF_INFO_KIND(t->info)) {
504 return BTF_INFO_KIND(t->info) == BTF_KIND_DATASEC;
509 return BTF_INFO_KIND(t->info) == BTF_KIND_DECL_TAG;
532 return btf->kernel_btf && !btf->base_btf;
540 total += btf->nr_types;
541 btf = btf->base_btf;
547 s32 btf_find_by_name_kind(const struct btf *btf, const char *name, u8 kind)
556 if (BTF_INFO_KIND(t->info) != kind)
559 tname = btf_name_by_offset(btf, t->name_off);
560 if (!strcmp(tname, name))
564 return -ENOENT;
567 s32 bpf_find_btf_id(const char *name, u32 kind, struct btf **btf_p)
577 return -EINVAL;
579 ret = btf_find_by_name_kind(btf, name, kind);
589 /* If name is not found in vmlinux's BTF then search in module's BTFs */
599 ret = btf_find_by_name_kind(btf, name, kind);
618 id = t->type;
619 t = btf_type_by_id(btf, t->type);
637 return btf_type_skip_modifiers(btf, t->type, res_id);
667 * another type (through member->type).
673 * btf_type_is_array() because its element (array->type)
676 * member-type repeated by array->nelems of times.
690 /* t->size can be used */
693 switch (BTF_INFO_KIND(t->info)) {
753 return kind_ops[BTF_INFO_KIND(t->info)];
761 while (offset < btf->start_str_off)
762 btf = btf->base_btf;
764 offset -= btf->start_str_off;
765 return offset < btf->hdr.str_len;
780 while (offset < btf->start_str_off)
781 btf = btf->base_btf;
783 offset -= btf->start_str_off;
784 if (offset < btf->hdr.str_len)
785 return &btf->strings[offset];
834 const char *name;
839 name = btf_str_by_offset(btf, offset);
840 return name ?: "(invalid-name-offset)";
850 while (type_id < btf->start_id)
851 btf = btf->base_btf;
853 type_id -= btf->start_id;
854 if (type_id >= btf->nr_types)
856 return btf->types[type_id];
895 id = m->type;
903 u32 bitfield_size = BTF_MEMBER_BITFIELD_SIZE(m->offset);
904 u32 bit_offset = BTF_MEMBER_BIT_OFFSET(m->offset);
915 BITS_PER_BYTE_MASKED(m->offset) ||
916 BITS_ROUNDUP_BYTES(m->offset) != expected_offset ||
931 BTF_INFO_KIND(t->info) != BTF_KIND_TYPEDEF) {
932 t = btf_type_by_id(btf, t->type);
943 * Populate show->state.name with type name information.
944 * Format of type name is
956 const char *name = NULL, *prefix = "", *parens = "";
957 const struct btf_member *m = show->state.member;
960 u32 id = show->state.type_id;
966 show->state.name[0] = '\0';
969 * Don't show type name if we're showing an array member;
973 if (show->state.array_member)
976 /* Retrieve member name, if any. */
978 member = btf_name_by_offset(show->btf, m->name_off);
980 id = m->type;
988 * in our show->state points at the resolved type of the typedef.
990 t = btf_type_by_id(show->btf, id);
996 * array suffixes while ensuring the type name for a typedef
1012 * We also want to get typedef name while proceeding to resolve
1018 switch (BTF_INFO_KIND(t->info)) {
1020 if (!name)
1021 name = btf_name_by_offset(show->btf,
1022 t->name_off);
1024 id = t->type;
1033 array_suffix -= 2;
1034 id = array->type;
1039 ptr_suffix -= 1;
1040 id = t->type;
1048 t = btf_type_skip_qualifiers(show->btf, id);
1054 if (!name)
1055 name = btf_name_by_offset(show->btf, t->name_off);
1057 switch (BTF_INFO_KIND(t->info)) {
1060 prefix = BTF_INFO_KIND(t->info) == BTF_KIND_STRUCT ?
1068 prefix = "enum";
1077 /* typedef does not require struct/union/enum prefix */
1081 if (!name)
1082 name = "";
1084 /* Even if we don't want type name info, we want parentheses etc */
1085 if (show->flags & BTF_SHOW_NONAME)
1086 snprintf(show->state.name, sizeof(show->state.name), "%s",
1089 snprintf(show->state.name, sizeof(show->state.name),
1095 /* ...next is our prefix (struct, enum, etc) */
1097 strlen(prefix) > 0 && strlen(name) > 0 ? " " : "",
1098 /* ...this is the type name itself */
1099 name,
1104 return show->state.name;
1112 if ((indent - show->state.depth) >= indents)
1113 return indent - show->state.depth;
1119 return show->flags & BTF_SHOW_COMPACT ? "" : __btf_show_indent(show);
1124 return show->flags & BTF_SHOW_COMPACT ? "" : "\n";
1129 if (show->state.depth == 0)
1132 if ((show->flags & BTF_SHOW_COMPACT) && show->state.type &&
1133 BTF_INFO_KIND(show->state.type->info) == BTF_KIND_UNION)
1143 if (!show->state.depth_check) {
1145 show->showfn(show, fmt, args);
1158 (show->flags & BTF_SHOW_ZERO) || \
1159 show->state.depth == 0) { \
1165 if (show->state.depth > show->state.depth_to_show) \
1166 show->state.depth_to_show = show->state.depth; \
1176 if (show->state.depth > show->state.depth_to_show) \
1177 show->state.depth_to_show = show->state.depth; \
1183 return show->obj.head + show->obj.size - data;
1189 return data >= show->obj.data &&
1190 (data + size) < (show->obj.data + BTF_SHOW_OBJ_SAFE_SIZE);
1202 return show->obj.safe + (data - show->obj.data);
1207 * Return a safe-to-access version of data pointed to by @data.
1211 * If BTF_SHOW_UNSAFE is specified, just return data as-is; no
1230 * We use stack data as opposed to per-CPU buffers because the
1242 if (show->flags & BTF_SHOW_UNSAFE)
1245 rt = btf_resolve_size(show->btf, t, &size);
1247 show->state.status = PTR_ERR(rt);
1256 if (show->state.depth == 0) {
1257 show->obj.size = size;
1258 show->obj.head = data;
1277 * - the current type size is within the safe buffer; or
1278 * - at least BTF_SHOW_OBJ_BASE_TYPE_SIZE bytes are left in
1295 show->state.status = copy_from_kernel_nofault(show->obj.safe,
1297 if (!show->state.status) {
1298 show->obj.data = data;
1299 safe = show->obj.safe;
1314 show->state.type = t;
1315 show->state.type_id = type_id;
1316 show->state.name[0] = '\0';
1323 show->state.type = NULL;
1324 show->state.type_id = 0;
1325 show->state.name[0] = '\0';
1340 show->state.depth++;
1347 show->state.depth--;
1356 show->state.member = m;
1361 show->state.array_member = 1;
1367 show->state.member = NULL;
1372 show->state.array_member = 0;
1382 show->state.array_encoding = array_encoding;
1383 show->state.array_terminated = 0;
1389 show->state.array_encoding = 0;
1390 show->state.array_terminated = 0;
1420 struct bpf_verifier_log *log = &env->log;
1436 struct bpf_verifier_log *log = &env->log;
1437 struct btf *btf = env->btf;
1443 if (log->level == BPF_LOG_KERNEL) {
1446 * Skip those prints for in-kernel BTF verification.
1452 if (env->btf->base_btf && IS_ENABLED(CONFIG_MODULE_ALLOW_BTF_MISMATCH))
1457 env->log_type_id,
1459 __btf_name_by_offset(btf, t->name_off),
1463 btf_type_ops(t)->log_details(env, t);
1486 struct bpf_verifier_log *log = &env->log;
1487 struct btf *btf = env->btf;
1493 if (log->level == BPF_LOG_KERNEL) {
1498 if (env->btf->base_btf && IS_ENABLED(CONFIG_MODULE_ALLOW_BTF_MISMATCH))
1508 if (env->phase != CHECK_META)
1514 __btf_name_by_offset(btf, member->name_off),
1515 member->type,
1516 BTF_MEMBER_BITFIELD_SIZE(member->offset),
1517 BTF_MEMBER_BIT_OFFSET(member->offset));
1520 __btf_name_by_offset(btf, member->name_off),
1521 member->type, member->offset);
1539 struct bpf_verifier_log *log = &env->log;
1544 if (log->level == BPF_LOG_KERNEL && !fmt)
1546 if (env->phase != CHECK_META)
1550 vsi->type, vsi->offset, vsi->size);
1564 struct bpf_verifier_log *log = &env->log;
1565 const struct btf *btf = env->btf;
1571 if (log->level == BPF_LOG_KERNEL)
1573 hdr = &btf->hdr;
1574 __btf_verifier_log(log, "magic: 0x%x\n", hdr->magic);
1575 __btf_verifier_log(log, "version: %u\n", hdr->version);
1576 __btf_verifier_log(log, "flags: 0x%x\n", hdr->flags);
1577 __btf_verifier_log(log, "hdr_len: %u\n", hdr->hdr_len);
1578 __btf_verifier_log(log, "type_off: %u\n", hdr->type_off);
1579 __btf_verifier_log(log, "type_len: %u\n", hdr->type_len);
1580 __btf_verifier_log(log, "str_off: %u\n", hdr->str_off);
1581 __btf_verifier_log(log, "str_len: %u\n", hdr->str_len);
1587 struct btf *btf = env->btf;
1589 if (btf->types_size == btf->nr_types) {
1595 if (btf->start_id + btf->types_size == BTF_MAX_TYPE) {
1597 return -E2BIG;
1600 expand_by = max_t(u32, btf->types_size >> 2, 16);
1602 btf->types_size + expand_by);
1607 return -ENOMEM;
1609 if (btf->nr_types == 0) {
1610 if (!btf->base_btf) {
1613 btf->nr_types++;
1616 memcpy(new_types, btf->types,
1617 sizeof(*btf->types) * btf->nr_types);
1620 kvfree(btf->types);
1621 btf->types = new_types;
1622 btf->types_size = new_size;
1625 btf->types[btf->nr_types++] = t;
1638 btf->id = id;
1643 return -ENOSPC;
1653 * In map-in-map, calling map_delete_elem() on outer
1662 idr_remove(&btf_idr, btf->id);
1668 struct btf_kfunc_set_tab *tab = btf->kfunc_set_tab;
1673 for (hook = 0; hook < ARRAY_SIZE(tab->sets); hook++)
1674 kfree(tab->sets[hook]);
1676 btf->kfunc_set_tab = NULL;
1681 struct btf_id_dtor_kfunc_tab *tab = btf->dtor_kfunc_tab;
1686 btf->dtor_kfunc_tab = NULL;
1695 for (i = 0; i < tab->cnt; i++)
1696 btf_record_free(tab->types[i].record);
1702 struct btf_struct_metas *tab = btf->struct_meta_tab;
1705 btf->struct_meta_tab = NULL;
1710 struct btf_struct_ops_tab *tab = btf->struct_ops_tab;
1716 for (i = 0; i < tab->cnt; i++)
1717 bpf_struct_ops_desc_release(&tab->ops[i]);
1720 btf->struct_ops_tab = NULL;
1729 kvfree(btf->types);
1730 kvfree(btf->resolved_sizes);
1731 kvfree(btf->resolved_ids);
1732 /* vmlinux does not allocate btf->data, it simply points it at
1736 kvfree(btf->data);
1737 kvfree(btf->base_id_map);
1750 return btf->name;
1755 refcount_inc(&btf->refcnt);
1760 if (btf && refcount_dec_and_test(&btf->refcnt)) {
1762 call_rcu(&btf->rcu, btf_free_rcu);
1768 return btf->base_btf;
1773 return &btf->hdr;
1778 btf->base_btf = (struct btf *)base_btf;
1779 btf->start_id = btf_nr_types(base_btf);
1780 btf->start_str_off = base_btf->hdr.str_len;
1785 struct btf *btf = env->btf;
1786 u32 nr_types = btf->nr_types;
1806 btf->resolved_sizes = resolved_sizes;
1807 btf->resolved_ids = resolved_ids;
1808 env->visit_states = visit_states;
1816 return -ENOMEM;
1821 kvfree(env->visit_states);
1828 switch (env->resolve_mode) {
1830 /* int, enum or void is a sink */
1833 /* int, enum, void, struct, array, func or func_proto is a sink
1839 /* int, enum, void, ptr, func or func_proto is a sink
1854 if (type_id < env->btf->start_id)
1857 return env->visit_states[type_id - env->btf->start_id] == RESOLVED;
1863 const struct btf *btf = env->btf;
1866 if (env->top_stack == MAX_RESOLVE_DEPTH)
1867 return -E2BIG;
1869 if (type_id < btf->start_id
1870 || env->visit_states[type_id - btf->start_id] != NOT_VISITED)
1871 return -EEXIST;
1873 env->visit_states[type_id - btf->start_id] = VISITED;
1875 v = &env->stack[env->top_stack++];
1876 v->t = t;
1877 v->type_id = type_id;
1878 v->next_member = 0;
1880 if (env->resolve_mode == RESOLVE_TBD) {
1882 env->resolve_mode = RESOLVE_PTR;
1884 env->resolve_mode = RESOLVE_STRUCT_OR_ARRAY;
1893 env->stack[env->top_stack - 1].next_member = next_member;
1900 u32 type_id = env->stack[--(env->top_stack)].type_id;
1901 struct btf *btf = env->btf;
1903 type_id -= btf->start_id; /* adjust to local type id */
1904 btf->resolved_sizes[type_id] = resolved_size;
1905 btf->resolved_ids[type_id] = resolved_type_id;
1906 env->visit_states[type_id] = RESOLVED;
1911 return env->top_stack ? &env->stack[env->top_stack - 1] : NULL;
1914 /* Resolve the size of a passed-in "type"
1944 switch (BTF_INFO_KIND(type->info)) {
1945 /* type->size can be used */
1952 size = type->size;
1965 id = type->type;
1966 type = btf_type_by_id(btf, type->type);
1973 if (nelems && array->nelems > U32_MAX / nelems)
1974 return ERR_PTR(-EINVAL);
1975 nelems *= array->nelems;
1976 type = btf_type_by_id(btf, array->type);
1981 return ERR_PTR(-EINVAL);
1985 return ERR_PTR(-EINVAL);
1989 return ERR_PTR(-EINVAL);
1997 *elem_id = array ? array->type : 0;
2013 while (type_id < btf->start_id)
2014 btf = btf->base_btf;
2016 return btf->resolved_ids[type_id - btf->start_id];
2029 while (type_id < btf->start_id)
2030 btf = btf->base_btf;
2032 return btf->resolved_sizes[type_id - btf->start_id];
2047 size = size_type->size;
2062 size = size_type->size;
2085 return -EINVAL;
2095 return -EINVAL;
2099 * int, enum and modifier types have their specific callback functions.
2106 if (BTF_MEMBER_BITFIELD_SIZE(member->offset)) {
2109 return -EINVAL;
2112 /* bitfield size is 0, so member->offset represents bit offset only.
2115 return btf_type_ops(member_type)->check_member(env, struct_type,
2123 btf_verifier_log_basic(env, v->t, "Unsupported resolve");
2124 return -EINVAL;
2131 btf_show(show, "<unsupported kind:%u>", BTF_INFO_KIND(t->info));
2140 u32 struct_bits_off = member->offset;
2141 u32 struct_size = struct_type->size;
2145 if (U32_MAX - struct_bits_off < BTF_INT_OFFSET(int_data)) {
2148 return -EINVAL;
2159 return -EINVAL;
2163 struct_size - bytes_offset < BITS_ROUNDUP_BYTES(nr_copy_bits)) {
2166 return -EINVAL;
2179 u32 struct_size = struct_type->size;
2186 return -EINVAL;
2190 nr_bits = BTF_MEMBER_BITFIELD_SIZE(member->offset);
2191 struct_bits_off = BTF_MEMBER_BIT_OFFSET(member->offset);
2200 return -EINVAL;
2207 return -EINVAL;
2215 return -EINVAL;
2219 struct_size - bytes_offset < BITS_ROUNDUP_BYTES(nr_copy_bits)) {
2222 return -EINVAL;
2239 return -EINVAL;
2244 return -EINVAL;
2249 return -EINVAL;
2256 return -EINVAL;
2264 return -EINVAL;
2267 if (BITS_ROUNDUP_BYTES(nr_bits) > t->size) {
2269 return -EINVAL;
2284 return -ENOTSUPP;
2299 t->size, BTF_INT_OFFSET(int_data),
2342 /* shake out un-needed bits by shift/or operations */
2344 upper_num = lower_num << (left_shift_bits - 64);
2348 (lower_num >> (64 - left_shift_bits));
2353 lower_num = upper_num >> (right_shift_bits - 64);
2357 (upper_num << (64 - right_shift_bits));
2386 left_shift_bits = BITS_PER_U128 - nr_copy_bits;
2388 right_shift_bits = BITS_PER_U128 - nr_bits;
2457 if (show->state.array_encoding == BTF_INT_CHAR) {
2459 if (show->state.array_terminated)
2462 show->state.array_terminated = 1;
2499 u32 resolved_type_id = member->type;
2501 struct btf *btf = env->btf;
2507 return -EINVAL;
2513 return btf_type_ops(resolved_type)->check_member(env, struct_type,
2524 u32 resolved_type_id = member->type;
2526 struct btf *btf = env->btf;
2532 return -EINVAL;
2538 return btf_type_ops(resolved_type)->check_kflag_member(env, struct_type,
2550 struct_size = struct_type->size;
2551 struct_bits_off = member->offset;
2557 return -EINVAL;
2560 if (struct_size - bytes_offset < sizeof(void *)) {
2563 return -EINVAL;
2577 return -EINVAL;
2582 return -EINVAL;
2585 if (!BTF_TYPE_ID_VALID(t->type)) {
2587 return -EINVAL;
2590 /* typedef/type_tag type must have a valid name, and other ref types,
2591 * volatile, const, restrict, should have a null name.
2593 if (BTF_INFO_KIND(t->info) == BTF_KIND_TYPEDEF) {
2594 if (!t->name_off ||
2595 !btf_name_valid_identifier(env->btf, t->name_off)) {
2596 btf_verifier_log_type(env, t, "Invalid name");
2597 return -EINVAL;
2599 } else if (BTF_INFO_KIND(t->info) == BTF_KIND_TYPE_TAG) {
2600 value = btf_name_by_offset(env->btf, t->name_off);
2602 btf_verifier_log_type(env, t, "Invalid name");
2603 return -EINVAL;
2606 if (t->name_off) {
2607 btf_verifier_log_type(env, t, "Invalid name");
2608 return -EINVAL;
2620 const struct btf_type *t = v->t;
2622 u32 next_type_id = t->type;
2623 struct btf *btf = env->btf;
2627 btf_verifier_log_type(env, v->t, "Invalid type_id");
2628 return -EINVAL;
2638 * save us a few type-following when we use it later (e.g. in
2649 btf_verifier_log_type(env, v->t, "Invalid type_id");
2650 return -EINVAL;
2663 const struct btf_type *t = v->t;
2664 u32 next_type_id = t->type;
2665 struct btf *btf = env->btf;
2669 btf_verifier_log_type(env, v->t, "Invalid type_id");
2670 return -EINVAL;
2696 btf_verifier_log_type(env, v->t, "Invalid type_id");
2697 return -EINVAL;
2709 const struct btf_type *t = v->t;
2710 u32 next_type_id = t->type;
2711 struct btf *btf = env->btf;
2715 btf_verifier_log_type(env, v->t, "Invalid type_id");
2716 return -EINVAL;
2725 * to a ptr (last-resolved-ptr).
2727 * We now need to continue from the last-resolved-ptr to
2728 * ensure the last-resolved-ptr will not referring back to
2752 btf_verifier_log_type(env, v->t, "Invalid type_id");
2753 return -EINVAL;
2767 if (btf->resolved_ids)
2772 btf_type_ops(t)->show(btf, t, type_id, data, bits_offset, show);
2781 btf_type_ops(t)->show(btf, t, type_id, data, bits_offset, show);
2795 if (show->flags & BTF_SHOW_PTR_RAW)
2805 btf_verifier_log(env, "type_id=%u", t->type);
2832 return -EINVAL;
2835 if (t->type) {
2837 return -EINVAL;
2840 /* fwd type must have a valid name */
2841 if (!t->name_off ||
2842 !btf_name_valid_identifier(env->btf, t->name_off)) {
2843 btf_verifier_log_type(env, t, "Invalid name");
2844 return -EINVAL;
2872 u32 struct_bits_off = member->offset;
2875 struct btf *btf = env->btf;
2880 return -EINVAL;
2883 array_type_id = member->type;
2885 struct_size = struct_type->size;
2887 if (struct_size - bytes_offset < array_size) {
2890 return -EINVAL;
2907 return -EINVAL;
2910 /* array type should not have a name */
2911 if (t->name_off) {
2912 btf_verifier_log_type(env, t, "Invalid name");
2913 return -EINVAL;
2918 return -EINVAL;
2923 return -EINVAL;
2926 if (t->size) {
2928 return -EINVAL;
2932 * so !array->type and !array->index_type are not allowed.
2934 if (!array->type || !BTF_TYPE_ID_VALID(array->type)) {
2936 return -EINVAL;
2939 if (!array->index_type || !BTF_TYPE_ID_VALID(array->index_type)) {
2941 return -EINVAL;
2952 const struct btf_array *array = btf_type_array(v->t);
2955 struct btf *btf = env->btf;
2958 /* Check array->index_type */
2959 index_type_id = array->index_type;
2963 btf_verifier_log_type(env, v->t, "Invalid index");
2964 return -EINVAL;
2974 btf_verifier_log_type(env, v->t, "Invalid index");
2975 return -EINVAL;
2978 /* Check array->type */
2979 elem_type_id = array->type;
2983 btf_verifier_log_type(env, v->t,
2985 return -EINVAL;
2994 btf_verifier_log_type(env, v->t, "Invalid elem");
2995 return -EINVAL;
2999 btf_verifier_log_type(env, v->t, "Invalid array of int");
3000 return -EINVAL;
3003 if (array->nelems && elem_size > U32_MAX / array->nelems) {
3004 btf_verifier_log_type(env, v->t,
3006 return -EINVAL;
3009 env_stack_pop_resolved(env, elem_type_id, elem_size * array->nelems);
3020 array->type, array->index_type, array->nelems);
3033 elem_type_id = array->type;
3036 elem_size = elem_type->size;
3059 for (i = 0; i < array->nelems; i++) {
3063 elem_ops->show(btf, elem_type, elem_type_id, data,
3069 if (show->state.array_terminated)
3080 const struct btf_member *m = show->state.member;
3083 * First check if any members would be shown (are non-zero).
3085 * details on how this works at a high-level.
3087 if (show->state.depth > 0 && !(show->flags & BTF_SHOW_ZERO)) {
3088 if (!show->state.depth_check) {
3089 show->state.depth_check = show->state.depth + 1;
3090 show->state.depth_to_show = 0;
3093 show->state.member = m;
3095 if (show->state.depth_check != show->state.depth + 1)
3097 show->state.depth_check = 0;
3099 if (show->state.depth_to_show <= show->state.depth)
3103 * non-zero array member(s).
3123 u32 struct_bits_off = member->offset;
3129 return -EINVAL;
3132 struct_size = struct_type->size;
3134 if (struct_size - bytes_offset < member_type->size) {
3137 return -EINVAL;
3147 bool is_union = BTF_INFO_KIND(t->info) == BTF_KIND_UNION;
3150 struct btf *btf = env->btf;
3151 u32 struct_size = t->size;
3160 return -EINVAL;
3163 /* struct type either no name or a valid one */
3164 if (t->name_off &&
3165 !btf_name_valid_identifier(env->btf, t->name_off)) {
3166 btf_verifier_log_type(env, t, "Invalid name");
3167 return -EINVAL;
3174 if (!btf_name_offset_valid(btf, member->name_off)) {
3177 member->name_off);
3178 return -EINVAL;
3181 /* struct member either no name or a valid one */
3182 if (member->name_off &&
3183 !btf_name_valid_identifier(btf, member->name_off)) {
3184 btf_verifier_log_member(env, t, member, "Invalid name");
3185 return -EINVAL;
3188 if (!member->type || !BTF_TYPE_ID_VALID(member->type)) {
3191 return -EINVAL;
3198 return -EINVAL;
3208 return -EINVAL;
3214 return -EINVAL;
3235 if (v->next_member) {
3240 last_member = btf_type_member(v->t) + v->next_member - 1;
3241 last_member_type_id = last_member->type;
3244 return -EINVAL;
3246 last_member_type = btf_type_by_id(env->btf,
3248 if (btf_type_kflag(v->t))
3249 err = btf_type_ops(last_member_type)->check_kflag_member(env, v->t,
3253 err = btf_type_ops(last_member_type)->check_member(env, v->t,
3260 for_each_member_from(i, v->next_member, v->t, member) {
3261 u32 member_type_id = member->type;
3262 const struct btf_type *member_type = btf_type_by_id(env->btf,
3267 btf_verifier_log_member(env, v->t, member,
3269 return -EINVAL;
3278 if (btf_type_kflag(v->t))
3279 err = btf_type_ops(member_type)->check_kflag_member(env, v->t,
3283 err = btf_type_ops(member_type)->check_member(env, v->t,
3298 btf_verifier_log(env, "size=%u vlen=%u", t->size, btf_type_vlen(t));
3301 enum {
3307 enum btf_field_type type;
3321 u32 off, int sz, enum btf_field_type field_type,
3326 if (t->size != sz)
3328 info->type = field_type;
3329 info->off = off;
3336 enum btf_field_type type;
3343 t = btf_type_by_id(btf, t->type);
3347 t = btf_type_by_id(btf, t->type);
3352 if (btf_type_is_type_tag(btf_type_by_id(btf, t->type)))
3353 return -EINVAL;
3354 tag_value = __btf_name_by_offset(btf, t->name_off);
3364 return -EINVAL;
3370 t = btf_type_skip_modifiers(btf, t->type, &res_id);
3373 return -EINVAL;
3375 info->type = type;
3376 info->off = off;
3377 info->kptr.type_id = res_id;
3392 if (pt != btf_type_by_id(btf, t->type))
3394 if (btf_type_decl_tag(t)->component_idx != comp_idx)
3396 if (strncmp(__btf_name_by_offset(btf, t->name_off), tag_key, len))
3400 return -ENOENT;
3416 value = __btf_name_by_offset(btf, t->name_off) + len;
3421 return ERR_PTR(-EEXIST);
3430 enum btf_field_type head_type)
3438 if (t->size != sz)
3442 return -EINVAL;
3445 return -EINVAL;
3446 value_type = kstrndup(value_type, node_field_name - value_type, GFP_KERNEL | __GFP_NOWARN);
3448 return -ENOMEM;
3455 return -EINVAL;
3456 info->type = head_type;
3457 info->off = off;
3458 info->graph_root.value_btf_id = id;
3459 info->graph_root.node_name = node_field_name;
3464 if (field_mask & field_type && !strcmp(name, field_type_str)) { \
3474 const char *name = __btf_name_by_offset(btf, var_type->name_off);
3477 if (!strcmp(name, "bpf_spin_lock")) {
3479 return -E2BIG;
3486 if (!strcmp(name, "bpf_res_spin_lock")) {
3488 return -E2BIG;
3495 if (!strcmp(name, "bpf_timer")) {
3497 return -E2BIG;
3504 if (!strcmp(name, "bpf_wq")) {
3506 return -E2BIG;
3557 return -EINVAL;
3565 return -E2BIG;
3597 return -E2BIG;
3611 err = btf_repeat_fields(info, info_cnt, ret, nelems - 1, t->size);
3640 nelems *= array->nelems;
3641 var_type = btf_type_by_id(btf, array->type);
3644 return -E2BIG;
3652 sz = var_type->size;
3702 return -EFAULT;
3708 return -E2BIG;
3710 ret = btf_repeat_fields(info, info_cnt, 1, nelems - 1, sz);
3728 member->type);
3733 return -EINVAL;
3739 &info[idx], info_cnt - idx, level);
3756 const struct btf_type *var = btf_type_by_id(btf, vsi->type);
3757 const struct btf_type *var_type = btf_type_by_id(btf, var->type);
3759 off = vsi->offset;
3760 ret = btf_find_field_one(btf, var, var_type, -1, off, vsi->size,
3762 &info[idx], info_cnt - idx,
3779 return -EINVAL;
3796 * in vmlinux or module BTFs, by name and kind.
3798 t = btf_type_by_id(btf, info->kptr.type_id);
3799 id = bpf_find_btf_id(__btf_name_by_offset(btf, t->name_off), BTF_INFO_KIND(t->info),
3801 if (id == -ENOENT) {
3808 field->kptr.dtor = NULL;
3809 id = info->kptr.type_id;
3819 if (info->type == BPF_KPTR_REF) {
3837 ret = -ENOENT;
3844 ret = -ENXIO;
3852 dtor_func_name = __btf_name_by_offset(kptr_btf, dtor_func->name_off);
3855 ret = -EINVAL;
3858 field->kptr.dtor = (void *)addr;
3862 field->kptr.btf_id = id;
3863 field->kptr.btf = kptr_btf;
3864 field->kptr.module = mod;
3884 t = btf_type_by_id(btf, info->graph_root.value_btf_id);
3890 if (strcmp(info->graph_root.node_name,
3891 __btf_name_by_offset(btf, member->name_off)))
3893 /* Invalid BTF, two members with same name */
3895 return -EINVAL;
3896 n = btf_type_by_id(btf, member->type);
3898 return -EINVAL;
3899 if (strcmp(node_type_name, __btf_name_by_offset(btf, n->name_off)))
3900 return -EINVAL;
3903 return -EINVAL;
3906 return -EINVAL;
3908 field->graph_root.btf = (struct btf *)btf;
3909 field->graph_root.value_btf_id = info->graph_root.value_btf_id;
3910 field->graph_root.node_offset = offset;
3913 return -ENOENT;
3936 if (a->offset < b->offset)
3937 return -1;
3938 else if (a->offset > b->offset)
3949 int ret, i, cnt;
3957 cnt = ret;
3961 rec = kzalloc(struct_size(rec, fields, cnt), GFP_KERNEL | __GFP_NOWARN);
3963 return ERR_PTR(-ENOMEM);
3965 rec->spin_lock_off = -EINVAL;
3966 rec->res_spin_lock_off = -EINVAL;
3967 rec->timer_off = -EINVAL;
3968 rec->wq_off = -EINVAL;
3969 rec->refcount_off = -EINVAL;
3970 for (i = 0; i < cnt; i++) {
3974 ret = -EFAULT;
3978 ret = -EEXIST;
3983 rec->field_mask |= info_arr[i].type;
3984 rec->fields[i].offset = info_arr[i].off;
3985 rec->fields[i].type = info_arr[i].type;
3986 rec->fields[i].size = field_type_size;
3990 WARN_ON_ONCE(rec->spin_lock_off >= 0);
3992 rec->spin_lock_off = rec->fields[i].offset;
3995 WARN_ON_ONCE(rec->spin_lock_off >= 0);
3997 rec->res_spin_lock_off = rec->fields[i].offset;
4000 WARN_ON_ONCE(rec->timer_off >= 0);
4002 rec->timer_off = rec->fields[i].offset;
4005 WARN_ON_ONCE(rec->wq_off >= 0);
4007 rec->wq_off = rec->fields[i].offset;
4010 WARN_ON_ONCE(rec->refcount_off >= 0);
4012 rec->refcount_off = rec->fields[i].offset;
4018 ret = btf_parse_kptr(btf, &rec->fields[i], &info_arr[i]);
4023 ret = btf_parse_list_head(btf, &rec->fields[i], &info_arr[i]);
4028 ret = btf_parse_rb_root(btf, &rec->fields[i], &info_arr[i]);
4036 ret = -EFAULT;
4039 rec->cnt++;
4042 if (rec->spin_lock_off >= 0 && rec->res_spin_lock_off >= 0) {
4043 ret = -EINVAL;
4050 (rec->spin_lock_off < 0 && rec->res_spin_lock_off < 0)) {
4051 ret = -EINVAL;
4055 if (rec->refcount_off < 0 &&
4058 ret = -EINVAL;
4062 sort_r(rec->fields, rec->cnt, sizeof(struct btf_field), btf_field_cmp,
4083 if (IS_ERR_OR_NULL(rec) || !(rec->field_mask & (BPF_GRAPH_ROOT | BPF_UPTR)))
4085 for (i = 0; i < rec->cnt; i++) {
4090 if (rec->fields[i].type == BPF_UPTR) {
4094 if (btf_is_kernel(rec->fields[i].kptr.btf))
4095 return -EINVAL;
4096 t = btf_type_by_id(rec->fields[i].kptr.btf,
4097 rec->fields[i].kptr.btf_id);
4098 if (!t->size)
4099 return -EINVAL;
4100 if (t->size > PAGE_SIZE)
4101 return -E2BIG;
4105 if (!(rec->fields[i].type & BPF_GRAPH_ROOT))
4107 btf_id = rec->fields[i].graph_root.value_btf_id;
4110 return -EFAULT;
4111 rec->fields[i].graph_root.value_rec = meta->record;
4117 if (!(rec->field_mask & BPF_GRAPH_NODE))
4126 * - A type can only be owned by another type in user BTF if it
4128 * - A type can only _own_ another type in user BTF if it has a
4138 * A -> B -> C
4140 * - A is an root, e.g. has bpf_rb_root.
4141 * - B is both a root and node, e.g. has bpf_rb_node and
4143 * - C is only an root, e.g. has bpf_list_node
4148 * A -> B
4150 * - A is both an root and node.
4151 * - B is only an node.
4153 if (meta->record->field_mask & BPF_GRAPH_ROOT)
4154 return -ELOOP;
4173 member->type);
4187 member->type,
4196 ops->show(btf, member_type, member->type,
4210 const struct btf_member *m = show->state.member;
4213 * First check if any members would be shown (are non-zero).
4215 * details on how this works at a high-level.
4217 if (show->state.depth > 0 && !(show->flags & BTF_SHOW_ZERO)) {
4218 if (!show->state.depth_check) {
4219 show->state.depth_check = show->state.depth + 1;
4220 show->state.depth_to_show = 0;
4224 show->state.member = m;
4225 if (show->state.depth_check != show->state.depth + 1)
4227 show->state.depth_check = 0;
4229 if (show->state.depth_to_show <= show->state.depth)
4233 * non-zero child values.
4254 u32 struct_bits_off = member->offset;
4260 return -EINVAL;
4263 struct_size = struct_type->size;
4265 if (struct_size - bytes_offset < member_type->size) {
4268 return -EINVAL;
4282 struct_bits_off = BTF_MEMBER_BIT_OFFSET(member->offset);
4283 nr_bits = BTF_MEMBER_BITFIELD_SIZE(member->offset);
4288 return -EINVAL;
4295 return -EINVAL;
4298 struct_size = struct_type->size;
4303 return -EINVAL;
4314 struct btf *btf = env->btf;
4326 return -EINVAL;
4329 if (t->size > 8 || !is_power_of_2(t->size)) {
4331 return -EINVAL;
4334 /* enum type either no name or a valid one */
4335 if (t->name_off &&
4336 !btf_name_valid_identifier(env->btf, t->name_off)) {
4337 btf_verifier_log_type(env, t, "Invalid name");
4338 return -EINVAL;
4347 return -EINVAL;
4350 /* enum member must have a valid name */
4353 btf_verifier_log_type(env, t, "Invalid name");
4354 return -EINVAL;
4357 if (env->log.level == BPF_LOG_KERNEL)
4371 btf_verifier_log(env, "size=%u vlen=%u", t->size, btf_type_vlen(t));
4422 struct btf *btf = env->btf;
4434 return -EINVAL;
4437 if (t->size > 8 || !is_power_of_2(t->size)) {
4439 return -EINVAL;
4442 /* enum type either no name or a valid one */
4443 if (t->name_off &&
4444 !btf_name_valid_identifier(env->btf, t->name_off)) {
4445 btf_verifier_log_type(env, t, "Invalid name");
4446 return -EINVAL;
4455 return -EINVAL;
4458 /* enum member must have a valid name */
4461 btf_verifier_log_type(env, t, "Invalid name");
4462 return -EINVAL;
4465 if (env->log.level == BPF_LOG_KERNEL)
4530 return -EINVAL;
4533 if (t->name_off) {
4534 btf_verifier_log_type(env, t, "Invalid name");
4535 return -EINVAL;
4540 return -EINVAL;
4554 btf_verifier_log(env, "return=%u args=(", t->type);
4567 __btf_name_by_offset(env->btf,
4569 for (i = 1; i < nr_args - 1; i++)
4571 __btf_name_by_offset(env->btf,
4575 const struct btf_param *last_arg = &args[nr_args - 1];
4577 if (last_arg->type)
4578 btf_verifier_log(env, ", %u %s", last_arg->type,
4579 __btf_name_by_offset(env->btf,
4580 last_arg->name_off));
4597 * (i.e. struct's member -> BTF_KIND_PTR -> BTF_KIND_FUNC_PROTO)
4611 if (!t->name_off ||
4612 !btf_name_valid_identifier(env->btf, t->name_off)) {
4613 btf_verifier_log_type(env, t, "Invalid name");
4614 return -EINVAL;
4619 return -EINVAL;
4624 return -EINVAL;
4635 const struct btf_type *t = v->t;
4636 u32 next_type_id = t->type;
4667 return -EINVAL;
4672 return -EINVAL;
4677 return -EINVAL;
4680 if (!t->name_off ||
4681 !btf_name_valid_identifier(env->btf, t->name_off)) {
4682 btf_verifier_log_type(env, t, "Invalid name");
4683 return -EINVAL;
4687 if (!t->type || !BTF_TYPE_ID_VALID(t->type)) {
4689 return -EINVAL;
4693 if (var->linkage != BTF_VAR_STATIC &&
4694 var->linkage != BTF_VAR_GLOBAL_ALLOCATED) {
4696 return -EINVAL;
4708 btf_verifier_log(env, "type_id=%u linkage=%u", t->type, var->linkage);
4733 return -EINVAL;
4736 if (!t->size) {
4738 return -EINVAL;
4743 return -EINVAL;
4746 if (!t->name_off ||
4747 !btf_name_valid_section(env->btf, t->name_off)) {
4748 btf_verifier_log_type(env, t, "Invalid name");
4749 return -EINVAL;
4756 if (!vsi->type || !BTF_TYPE_ID_VALID(vsi->type)) {
4759 return -EINVAL;
4762 if (vsi->offset < last_vsi_end_off || vsi->offset >= t->size) {
4765 return -EINVAL;
4768 if (!vsi->size || vsi->size > t->size) {
4771 return -EINVAL;
4774 last_vsi_end_off = vsi->offset + vsi->size;
4775 if (last_vsi_end_off > t->size) {
4778 return -EINVAL;
4782 sum += vsi->size;
4785 if (t->size < sum) {
4787 return -EINVAL;
4797 struct btf *btf = env->btf;
4800 env->resolve_mode = RESOLVE_TBD;
4801 for_each_vsi_from(i, v->next_member, v->t, vsi) {
4802 u32 var_type_id = vsi->type, type_id, type_size = 0;
4803 const struct btf_type *var_type = btf_type_by_id(env->btf,
4806 btf_verifier_log_vsi(env, v->t, vsi,
4808 return -EINVAL;
4817 type_id = var_type->type;
4819 btf_verifier_log_vsi(env, v->t, vsi, "Invalid type");
4820 return -EINVAL;
4823 if (vsi->size < type_size) {
4824 btf_verifier_log_vsi(env, v->t, vsi, "Invalid size");
4825 return -EINVAL;
4836 btf_verifier_log(env, "size=%u vlen=%u", t->size, btf_type_vlen(t));
4852 __btf_name_by_offset(btf, t->name_off));
4854 var = btf_type_by_id(btf, vsi->type);
4857 btf_type_ops(var)->show(btf, var, vsi->type,
4858 data + vsi->offset, bits_offset, show);
4878 return -EINVAL;
4883 return -EINVAL;
4886 if (t->size != 2 && t->size != 4 && t->size != 8 && t->size != 12 &&
4887 t->size != 16) {
4889 return -EINVAL;
4910 * that types after CO-RE can pass the kernel BTF verifier.
4912 align_bytes = min_t(u64, sizeof(void *), member_type->size);
4914 div64_u64_rem(member->offset, align_bits, &misalign_bits);
4918 return -EINVAL;
4921 start_offset_bytes = member->offset / BITS_PER_BYTE;
4922 end_offset_bytes = start_offset_bytes + member_type->size;
4923 if (end_offset_bytes > struct_type->size) {
4926 return -EINVAL;
4935 btf_verifier_log(env, "size=%u", t->size);
4960 return -EINVAL;
4963 value = btf_name_by_offset(env->btf, t->name_off);
4966 return -EINVAL;
4971 return -EINVAL;
4974 component_idx = btf_type_decl_tag(t)->component_idx;
4975 if (component_idx < -1) {
4977 return -EINVAL;
4989 const struct btf_type *t = v->t;
4990 u32 next_type_id = t->type;
4991 struct btf *btf = env->btf;
4997 btf_verifier_log_type(env, v->t, "Invalid type_id");
4998 return -EINVAL;
5005 component_idx = btf_type_decl_tag(t)->component_idx;
5006 if (component_idx != -1) {
5008 btf_verifier_log_type(env, v->t, "Invalid component_idx");
5009 return -EINVAL;
5016 next_type = btf_type_by_id(btf, next_type->type);
5021 btf_verifier_log_type(env, v->t, "Invalid component_idx");
5022 return -EINVAL;
5033 btf_verifier_log(env, "type=%u component_idx=%d", t->type,
5034 btf_type_decl_tag(t)->component_idx);
5055 btf = env->btf;
5059 /* Check func return type which could be "void" (t->type == 0) */
5060 if (t->type) {
5061 u32 ret_type_id = t->type;
5066 return -EINVAL;
5071 return -EINVAL;
5084 return -EINVAL;
5092 if (!args[nr_args - 1].type) {
5093 if (args[nr_args - 1].name_off) {
5096 return -EINVAL;
5098 nr_args--;
5109 return -EINVAL;
5114 return -EINVAL;
5122 return -EINVAL;
5134 return -EINVAL;
5149 btf = env->btf;
5150 proto_type = btf_type_by_id(btf, t->type);
5154 return -EINVAL;
5162 return -EINVAL;
5200 env->log_type_id, meta_left, sizeof(*t));
5201 return -EINVAL;
5203 meta_left -= sizeof(*t);
5205 if (t->info & ~BTF_INFO_MASK) {
5207 env->log_type_id, t->info);
5208 return -EINVAL;
5211 if (BTF_INFO_KIND(t->info) > BTF_KIND_MAX ||
5212 BTF_INFO_KIND(t->info) == BTF_KIND_UNKN) {
5214 env->log_type_id, BTF_INFO_KIND(t->info));
5215 return -EINVAL;
5218 if (!btf_name_offset_valid(env->btf, t->name_off)) {
5220 env->log_type_id, t->name_off);
5221 return -EINVAL;
5224 var_meta_size = btf_type_ops(t)->check_meta(env, t, meta_left);
5228 meta_left -= var_meta_size;
5230 return saved_meta_left - meta_left;
5235 struct btf *btf = env->btf;
5239 hdr = &btf->hdr;
5240 cur = btf->nohdr_data + hdr->type_off;
5241 end = cur + hdr->type_len;
5243 env->log_type_id = btf->base_btf ? btf->start_id : 1;
5248 meta_size = btf_check_meta(env, t, end - cur);
5254 env->log_type_id++;
5264 struct btf *btf = env->btf;
5289 u32 elem_type_id = array->type;
5294 (array->nelems * elem_size ==
5304 u32 save_log_type_id = env->log_type_id;
5308 env->resolve_mode = RESOLVE_TBD;
5311 env->log_type_id = v->type_id;
5312 err = btf_type_ops(v->t)->resolve(env, v);
5315 env->log_type_id = type_id;
5316 if (err == -E2BIG) {
5320 } else if (err == -EEXIST) {
5327 err = -EINVAL;
5330 env->log_type_id = save_log_type_id;
5336 struct btf *btf = env->btf;
5345 env->phase++;
5346 for (i = btf->base_btf ? 0 : 1; i < btf->nr_types; i++) {
5347 type_id = btf->start_id + i;
5350 env->log_type_id = type_id;
5370 const struct btf_header *hdr = &env->btf->hdr;
5374 if (hdr->type_off & (sizeof(u32) - 1)) {
5376 return -EINVAL;
5379 if (!env->btf->base_btf && !hdr->type_len) {
5381 return -EINVAL;
5394 struct btf *btf = env->btf;
5397 hdr = &btf->hdr;
5398 start = btf->nohdr_data + hdr->str_off;
5399 end = start + hdr->str_len;
5401 if (end != btf->data + btf->data_size) {
5403 return -EINVAL;
5406 btf->strings = start;
5408 if (btf->base_btf && !hdr->str_len)
5410 if (!hdr->str_len || hdr->str_len - 1 > BTF_MAX_NAME_OFFSET || end[-1]) {
5412 return -EINVAL;
5414 if (!btf->base_btf && start[0]) {
5416 return -EINVAL;
5432 return (int)(x->off - y->off) ? : (int)(x->len - y->len);
5443 btf = env->btf;
5444 hdr = &btf->hdr;
5456 expected_total = btf_data_size - hdr->hdr_len;
5460 return -EINVAL;
5465 return -EINVAL;
5469 return -EINVAL;
5471 if (expected_total - total < secs[i].len) {
5474 return -EINVAL;
5482 return -EINVAL;
5494 btf = env->btf;
5495 btf_data_size = btf->data_size;
5499 return -EINVAL;
5502 hdr = btf->data;
5503 hdr_len = hdr->hdr_len;
5506 return -EINVAL;
5510 if (hdr_len > sizeof(btf->hdr)) {
5511 u8 *expected_zero = btf->data + sizeof(btf->hdr);
5512 u8 *end = btf->data + hdr_len;
5517 return -E2BIG;
5522 hdr_copy = min_t(u32, hdr_len, sizeof(btf->hdr));
5523 memcpy(&btf->hdr, btf->data, hdr_copy);
5525 hdr = &btf->hdr;
5529 if (hdr->magic != BTF_MAGIC) {
5531 return -EINVAL;
5534 if (hdr->version != BTF_VERSION) {
5536 return -ENOTSUPP;
5539 if (hdr->flags) {
5541 return -ENOTSUPP;
5544 if (!btf->base_btf && btf_data_size == hdr->hdr_len) {
5546 return -EINVAL;
5568 BUILD_BUG_ON(offsetof(struct btf_id_set, cnt) != 0);
5573 return ERR_PTR(-ENOMEM);
5574 aof->cnt = 0;
5587 new_aof = krealloc(aof, struct_size(new_aof, ids, aof->cnt + 1),
5590 ret = -ENOMEM;
5594 aof->ids[aof->cnt++] = id;
5606 ret = -EINVAL;
5614 new_aof = krealloc(aof, struct_size(new_aof, ids, aof->cnt + 1),
5617 ret = -ENOMEM;
5621 aof->ids[aof->cnt++] = i;
5624 if (!aof->cnt) {
5628 sort(&aof->ids, aof->cnt, sizeof(aof->ids[0]), btf_id_cmp_func, NULL);
5645 if (btf_id_set_contains(aof, member->type))
5650 tab_cnt = tab ? tab->cnt : 0;
5654 ret = -ENOMEM;
5658 new_tab->cnt = 0;
5661 type = &tab->types[tab->cnt];
5662 type->btf_id = i;
5665 BPF_KPTR, t->size);
5668 ret = PTR_ERR_OR_ZERO(record) ?: -EFAULT;
5671 type->record = record;
5672 tab->cnt++;
5688 tab = btf->struct_meta_tab;
5691 return bsearch(&btf_id, tab->types, tab->cnt, sizeof(tab->types[0]), btf_id_cmp_func);
5697 int i, n, good_id = start_id - 1;
5708 return -EINVAL;
5716 if (!chain_limit--) {
5718 return -ELOOP;
5723 return -EINVAL;
5731 cur_id = t->type;
5734 return -EINVAL;
5751 err = -EFAULT;
5758 bpfptr_t btf_data = make_bpfptr(attr->btf, uattr.is_kernel);
5759 char __user *log_ubuf = u64_to_user_ptr(attr->btf_log_buf);
5766 if (attr->btf_size > BTF_MAX_SIZE)
5767 return ERR_PTR(-E2BIG);
5771 return ERR_PTR(-ENOMEM);
5776 err = bpf_vlog_init(&env->log, attr->btf_log_level,
5777 log_ubuf, attr->btf_log_size);
5783 err = -ENOMEM;
5786 env->btf = btf;
5788 data = kvmalloc(attr->btf_size, GFP_KERNEL | __GFP_NOWARN);
5790 err = -ENOMEM;
5794 btf->data = data;
5795 btf->data_size = attr->btf_size;
5797 if (copy_from_bpfptr(data, btf_data, attr->btf_size)) {
5798 err = -EFAULT;
5806 btf->nohdr_data = btf->data + btf->hdr.hdr_len;
5820 struct_meta_tab = btf_parse_struct_metas(&env->log, btf);
5825 btf->struct_meta_tab = struct_meta_tab;
5830 for (i = 0; i < struct_meta_tab->cnt; i++) {
5831 err = btf_check_and_fixup_fields(btf, struct_meta_tab->types[i].record);
5837 err = finalize_log(&env->log, uattr, uattr_size);
5842 refcount_set(&btf->refcnt, 1);
5848 /* overwrite err with -ENOSPC or -EFAULT */
5849 ret = finalize_log(&env->log, uattr, uattr_size);
5876 enum {
5881 __ctx_convert_unused, /* to avoid empty enum in extreme .config */
5893 static const struct btf_type *find_canonical_prog_ctx_type(enum bpf_prog_type prog_type)
5906 return btf_type_by_id(btf_vmlinux, ctx_type->type);
5909 static int find_kern_ctx_type_id(enum bpf_prog_type prog_type)
5916 return -EFAULT;
5922 return ctx_type->type;
5935 const struct btf_type *t, enum bpf_prog_type prog_type,
5941 t = btf_type_by_id(btf, t->type);
5948 t = btf_type_by_id(btf, t->type);
5951 tname = btf_name_by_offset(btf, t->name_off);
5958 t = btf_type_by_id(btf, t->type);
5967 tname = btf_name_by_offset(btf, t->name_off);
5969 bpf_log(log, "arg#%d struct doesn't have a name\n", arg);
5980 ctx_tname = btf_name_by_offset(btf_vmlinux, ctx_type->name_off);
5989 /* only compare that prog's ctx type name is the same as
6000 * underlying struct and check name again
6005 ctx_type = btf_type_by_id(btf_vmlinux, ctx_type->type);
6011 /* forward declarations for arch-specific underlying types of
6012 * bpf_user_pt_regs_t; this avoids the need for arch-specific #ifdef
6022 enum bpf_prog_type prog_type,
6023 enum bpf_attach_type attach_type)
6030 return -EINVAL;
6032 t = btf_type_by_id(btf, t->type);
6037 t = btf_type_by_id(btf, t->type);
6040 tname = btf_name_by_offset(btf, t->name_off);
6048 t = btf_type_by_id(btf, t->type);
6054 tname = btf_name_by_offset(btf, t->name_off);
6056 bpf_log(log, "arg#%d type doesn't have a name\n", arg);
6057 return -EINVAL;
6080 if (btf_is_int(t) && t->size == 8)
6091 if (btf_is_int(t) && t->size == 8)
6097 strncmp(tname, "bpf_iter__", sizeof("bpf_iter__") - 1) == 0)
6104 if (btf_is_int(t) && t->size == 8)
6114 if (btf_is_int(t) && t->size == 8)
6129 return -EINVAL;
6134 ctx_type = btf_type_by_id(btf_vmlinux, ctx_type->type);
6142 return -EINVAL;
6145 ctx_tname = btf_name_by_offset(btf_vmlinux, ctx_type->name_off);
6148 return -EINVAL;
6157 enum bpf_prog_type prog_type,
6161 return -ENOENT;
6165 int get_kern_ctx_btf_id(struct bpf_verifier_log *log, enum bpf_prog_type prog_type)
6175 kctx_type_id = kctx_member->type;
6179 return -EINVAL;
6188 static struct btf *btf_parse_base(struct btf_verifier_env *env, const char *name,
6195 return ERR_PTR(-ENOENT);
6199 err = -ENOMEM;
6202 env->btf = btf;
6204 btf->data = data;
6205 btf->data_size = data_size;
6206 btf->kernel_btf = true;
6207 snprintf(btf->name, sizeof(btf->name), "%s", name);
6213 btf->nohdr_data = btf->data + btf->hdr.hdr_len;
6227 refcount_set(&btf->refcnt, 1);
6233 kvfree(btf->types);
6248 return ERR_PTR(-ENOMEM);
6250 log = &env->log;
6251 log->level = BPF_LOG_KERNEL;
6252 btf = btf_parse_base(env, "vmlinux", __start_BTF, __stop_BTF - __start_BTF);
6274 if (!btf->base_btf || !btf->base_id_map)
6276 return btf->base_id_map[id];
6294 return ERR_PTR(-EINVAL);
6298 return ERR_PTR(-ENOMEM);
6300 log = &env->log;
6301 log->level = BPF_LOG_KERNEL;
6315 err = -ENOMEM;
6318 env->btf = btf;
6320 btf->base_btf = base_btf;
6321 btf->start_id = base_btf->nr_types;
6322 btf->start_str_off = base_btf->hdr.str_len;
6323 btf->kernel_btf = true;
6324 snprintf(btf->name, sizeof(btf->name), "%s", module_name);
6326 btf->data = kvmemdup(data, data_size, GFP_KERNEL | __GFP_NOWARN);
6327 if (!btf->data) {
6328 err = -ENOMEM;
6331 btf->data_size = data_size;
6337 btf->nohdr_data = btf->data + btf->hdr.hdr_len;
6352 err = btf_relocate(btf, vmlinux_btf, &btf->base_id_map);
6360 refcount_set(&btf->refcnt, 1);
6368 kvfree(btf->data);
6369 kvfree(btf->types);
6379 struct bpf_prog *tgt_prog = prog->aux->dst_prog;
6382 return tgt_prog->aux->btf;
6384 return prog->aux->attach_btf;
6390 t = btf_type_skip_modifiers(btf, t->type, NULL);
6409 offset += btf_type_is_ptr(t) ? 8 : roundup(t->size, 8);
6414 t = btf_type_skip_modifiers(btf, func_proto->type, NULL);
6415 offset += btf_type_is_ptr(t) ? 8 : roundup(t->size, 8);
6424 enum bpf_attach_type atype = prog->expected_attach_type;
6426 switch (prog->type) {
6655 bool btf_ctx_access(int off, int size, enum bpf_access_type type,
6659 const struct btf_type *t = prog->aux->attach_func_proto;
6660 struct bpf_prog *tgt_prog = prog->aux->dst_prog;
6662 const char *tname = prog->aux->attach_func_name;
6663 struct bpf_verifier_log *log = info->log;
6681 if (prog->aux->attach_btf_trace) {
6682 /* skip first 'void *__data' argument in btf_trace_##name typedef */
6684 nr_args--;
6688 bpf_log(log, "func '%s' doesn't have %d-th argument\n",
6694 switch (prog->expected_attach_type) {
6697 info->is_retval = true;
6705 * While the LSM programs are BPF_MODIFY_RETURN-like
6709 * return -EINVAL;
6716 t = btf_type_by_id(btf, t->type);
6725 t = btf_type_skip_modifiers(btf, t->type, NULL);
6734 bpf_log(log, "func '%s' doesn't have %d-th argument\n",
6747 t = btf_type_by_id(btf, t->type);
6755 __btf_name_by_offset(btf, t->name_off),
6767 for (i = 0; i < prog->aux->ctx_arg_info_size; i++) {
6768 const struct bpf_ctx_arg_aux *ctx_arg_info = &prog->aux->ctx_arg_info[i];
6771 type = base_type(ctx_arg_info->reg_type);
6772 flag = type_flag(ctx_arg_info->reg_type);
6773 if (ctx_arg_info->offset == off && type == PTR_TO_BUF &&
6775 info->reg_type = ctx_arg_info->reg_type;
6788 for (i = 0; i < prog->aux->ctx_arg_info_size; i++) {
6789 const struct bpf_ctx_arg_aux *ctx_arg_info = &prog->aux->ctx_arg_info[i];
6791 if (ctx_arg_info->offset == off) {
6792 if (!ctx_arg_info->btf_id) {
6797 info->reg_type = ctx_arg_info->reg_type;
6798 info->btf = ctx_arg_info->btf ? : btf_vmlinux;
6799 info->btf_id = ctx_arg_info->btf_id;
6800 info->ref_obj_id = ctx_arg_info->ref_obj_id;
6805 info->reg_type = PTR_TO_BTF_ID;
6807 info->reg_type |= PTR_TRUSTED;
6810 info->reg_type |= PTR_MAYBE_NULL;
6812 if (prog->expected_attach_type == BPF_TRACE_RAW_TP) {
6813 struct btf *btf = prog->aux->attach_btf;
6818 t = btf_type_by_id(btf, prog->aux->attach_btf_id);
6821 tname = btf_name_by_offset(btf, t->name_off);
6825 tname += sizeof("btf_trace_") - 1;
6831 info->reg_type |= PTR_MAYBE_NULL;
6837 /* If we don't know NULL-ness specification and the tracepoint
6842 info->reg_type |= PTR_MAYBE_NULL;
6846 enum bpf_prog_type tgt_type;
6848 if (tgt_prog->type == BPF_PROG_TYPE_EXT)
6849 tgt_type = tgt_prog->aux->saved_dst_prog_type;
6851 tgt_type = tgt_prog->type;
6855 info->btf = btf_vmlinux;
6856 info->btf_id = ret;
6863 info->btf = btf;
6864 info->btf_id = t->type;
6865 t = btf_type_by_id(btf, t->type);
6868 tag_value = __btf_name_by_offset(btf, t->name_off);
6870 info->reg_type |= MEM_USER;
6872 info->reg_type |= MEM_PERCPU;
6877 info->btf_id = t->type;
6878 t = btf_type_by_id(btf, t->type);
6887 tname, arg, info->btf_id, btf_type_str(t),
6888 __btf_name_by_offset(btf, t->name_off));
6896 info->reg_type = SCALAR_VALUE;
6902 enum bpf_struct_walk_result {
6911 u32 *next_btf_id, enum bpf_type_flag *flag,
6922 t = btf_type_skip_modifiers(btf, t->type, NULL);
6923 tname = __btf_name_by_offset(btf, t->name_off);
6926 return -EINVAL;
6930 if (BTF_INFO_KIND(t->info) == BTF_KIND_UNION && vlen != 1 && !(*flag & PTR_UNTRUSTED))
6938 if (off + size > t->size) {
6947 member = btf_type_member(t) + vlen - 1;
6948 mtype = btf_type_skip_modifiers(btf, member->type,
6954 if (array_elem->nelems != 0)
6962 t = btf_type_skip_modifiers(btf, array_elem->type,
6971 off = (off - moff) % t->size;
6977 return -EACCES;
7023 mid = member->type;
7024 mtype = btf_type_by_id(btf, member->type);
7025 mname = __btf_name_by_offset(btf, member->name_off);
7032 return -EFAULT;
7044 * linearize a multi-dimensional array.
7062 * When accessing outer->array[1][0], it moves
7088 elem_idx = (off - moff) / msize;
7108 off -= moff;
7114 enum bpf_type_flag tmp_flag = 0;
7121 return -EACCES;
7125 t = btf_type_by_id(btf, mtype->type);
7127 tag_value = __btf_name_by_offset(btf, t->name_off);
7139 stype = btf_type_skip_modifiers(btf, mtype->type, &id);
7153 * space. e.g. skb->cb[].
7159 return -EACCES;
7165 return -EINVAL;
7170 int off, int size, enum bpf_access_type atype __maybe_unused,
7171 u32 *next_btf_id, enum bpf_type_flag *flag,
7174 const struct btf *btf = reg->btf;
7175 enum bpf_type_flag tmp_flag = 0;
7177 u32 id = reg->btf_id;
7180 while (type_is_alloc(reg->type)) {
7188 rec = meta->record;
7189 for (i = 0; i < rec->cnt; i++) {
7190 struct btf_field *field = &rec->fields[i];
7191 u32 offset = field->offset;
7192 if (off < offset + field->size && offset < off + size) {
7195 btf_field_type_name(field->type));
7196 return -EACCES;
7211 if (type_is_alloc(reg->type))
7234 return -EINVAL;
7239 return -EINVAL;
7263 enum bpf_type_flag flag = 0;
7307 t = btf_type_by_id(btf, t->type);
7309 return -EINVAL;
7315 return t->size;
7316 return -EINVAL;
7347 m->arg_size[i] = 8;
7348 m->arg_flags[i] = 0;
7350 m->ret_size = 8;
7351 m->ret_flags = 0;
7352 m->nr_args = MAX_BPF_FUNC_REG_ARGS;
7361 return -EINVAL;
7363 ret = __get_type_size(btf, func->type, &t);
7368 return -EINVAL;
7370 m->ret_size = ret;
7371 m->ret_flags = __get_type_fmodel_flags(t);
7374 if (i == nargs - 1 && args[i].type == 0) {
7378 return -EINVAL;
7387 return -EINVAL;
7393 return -EINVAL;
7395 m->arg_size[i] = ret;
7396 m->arg_flags[i] = __get_type_fmodel_flags(t);
7398 m->nr_args = nargs;
7406 * EINVAL - function prototype mismatch
7407 * EFAULT - verifier bug
7408 * 0 - 99% match. The last 1% is validated by the verifier.
7418 fn1 = btf_name_by_offset(btf1, t1->name_off);
7419 fn2 = btf_name_by_offset(btf2, t2->name_off);
7423 return -EINVAL;
7427 return -EINVAL;
7430 t1 = btf_type_by_id(btf1, t1->type);
7432 return -EFAULT;
7433 t2 = btf_type_by_id(btf2, t2->type);
7435 return -EFAULT;
7445 return -EINVAL;
7448 t1 = btf_type_skip_modifiers(btf1, t1->type, NULL);
7449 t2 = btf_type_skip_modifiers(btf2, t2->type, NULL);
7450 if (t1->info != t2->info) {
7455 return -EINVAL;
7462 if (t1->info != t2->info) {
7466 return -EINVAL;
7468 if (btf_type_has_size(t1) && t1->size != t2->size) {
7471 i, fn1, t1->size,
7472 fn2, t2->size);
7473 return -EINVAL;
7486 return -EINVAL;
7488 t1 = btf_type_skip_modifiers(btf1, t1->type, NULL);
7489 t2 = btf_type_skip_modifiers(btf2, t2->type, NULL);
7494 return -EINVAL;
7500 return -EINVAL;
7508 s1 = btf_name_by_offset(btf1, t1->name_off);
7509 s2 = btf_name_by_offset(btf2, t2->name_off);
7514 return -EINVAL;
7524 struct btf *btf1 = prog->aux->btf;
7528 if (!prog->aux->func_info) {
7530 return -EINVAL;
7533 btf_id = prog->aux->func_info[0].type_id;
7535 return -EFAULT;
7539 return -EFAULT;
7546 const char *name;
7548 t = btf_type_by_id(btf, t->type); /* skip PTR */
7551 t = btf_type_by_id(btf, t->type);
7556 name = btf_str_by_offset(btf, t->name_off);
7557 return name && strcmp(name, "bpf_dynptr") == 0;
7564 const char *name;
7567 u16 cnt;
7591 type_id = t->type;
7592 t = btf_type_by_id(btf, t->type);
7594 type_id = t->type;
7595 t = btf_type_by_id(btf, t->type);
7603 arg_idx, btf_type_str(t), __btf_name_by_offset(btf, t->name_off),
7607 if (cc->cnt != 1) {
7609 arg_idx, btf_type_str(t), __btf_name_by_offset(btf, t->name_off),
7610 cc->cnt == 0 ? "has no matches" : "is ambiguous");
7611 err = cc->cnt == 0 ? -ENOENT : -ESRCH;
7614 if (btf_is_module(cc->cands[0].btf)) {
7616 arg_idx, btf_type_str(t), __btf_name_by_offset(btf, t->name_off));
7617 err = -EOPNOTSUPP;
7620 kern_type_id = cc->cands[0].id;
7630 enum btf_arg_tag {
7638 /* Process BTF of a function to produce high-level expectation of function
7642 * EFAULT - there is a verifier bug. Abort verification.
7643 * EINVAL - cannot convert BTF.
7644 * 0 - Successfully processed BTF and constructed argument expectations.
7648 bool is_global = subprog_aux(env, subprog)->linkage == BTF_FUNC_GLOBAL;
7650 struct bpf_verifier_log *log = &env->log;
7651 struct bpf_prog *prog = env->prog;
7652 enum bpf_prog_type prog_type = prog->type;
7653 struct btf *btf = prog->aux->btf;
7659 if (sub->args_cached)
7662 if (!prog->aux->func_info) {
7664 return -EFAULT;
7667 btf_id = prog->aux->func_info[subprog].type_id;
7670 return -EINVAL;
7672 return -EFAULT;
7682 return -EFAULT;
7684 tname = btf_name_by_offset(btf, fn_t->name_off);
7686 if (prog->aux->func_info_aux[subprog].unreliable) {
7688 return -EFAULT;
7691 prog_type = prog->aux->dst_prog->type;
7693 t = btf_type_by_id(btf, fn_t->type);
7696 return -EFAULT;
7702 return -EINVAL;
7705 return -EINVAL;
7708 t = btf_type_by_id(btf, t->type);
7710 t = btf_type_by_id(btf, t->type);
7713 return -EINVAL;
7717 return -EINVAL;
7731 const char *tag = __btf_name_by_offset(btf, tag_t->name_off) + 4;
7736 return -EOPNOTSUPP;
7751 return -EOPNOTSUPP;
7754 if (id != -ENOENT) {
7761 t = btf_type_by_id(btf, t->type);
7768 return -EINVAL;
7772 prog->expected_attach_type))
7773 return -EINVAL;
7774 sub->args[i].arg_type = ARG_PTR_TO_CTX;
7780 return -EINVAL;
7782 sub->args[i].arg_type = ARG_PTR_TO_DYNPTR | MEM_RDONLY;
7790 return -EINVAL;
7797 sub->args[i].arg_type = ARG_PTR_TO_BTF_ID | PTR_TRUSTED;
7799 sub->args[i].arg_type |= PTR_MAYBE_NULL;
7800 sub->args[i].btf_id = kern_type_id;
7806 return -EINVAL;
7808 sub->args[i].arg_type = ARG_PTR_TO_ARENA;
7816 return -EINVAL;
7819 t = btf_type_skip_modifiers(btf, t->type, NULL);
7823 i, btf_type_str(t), btf_name_by_offset(btf, t->name_off),
7825 return -EINVAL;
7828 sub->args[i].arg_type = ARG_PTR_TO_MEM | PTR_MAYBE_NULL;
7830 sub->args[i].arg_type &= ~PTR_MAYBE_NULL;
7831 sub->args[i].mem_size = mem_size;
7838 return -EINVAL;
7841 sub->args[i].arg_type = ARG_ANYTHING;
7845 return -EINVAL;
7848 return -EINVAL;
7851 sub->arg_cnt = nargs;
7852 sub->args_cached = true;
7862 show->btf = btf;
7863 memset(&show->state, 0, sizeof(show->state));
7864 memset(&show->obj, 0, sizeof(show->obj));
7866 btf_type_ops(t)->show(btf, t, type_id, obj, 0, show);
7872 seq_vprintf((struct seq_file *)show->target, fmt, args);
7909 len = vsnprintf(show->target, ssnprintf->len_left, fmt, args);
7912 ssnprintf->len_left = 0;
7913 ssnprintf->len = len;
7914 } else if (len >= ssnprintf->len_left) {
7916 ssnprintf->len_left = 0;
7917 ssnprintf->len += len;
7919 ssnprintf->len_left -= len;
7920 ssnprintf->len += len;
7921 show->target += len;
7949 const struct btf *btf = filp->private_data;
7951 seq_printf(m, "btf_id:\t%u\n", btf->id);
7957 btf_put(filp->private_data);
8008 refcount_inc(&btf->refcnt);
8025 uinfo = u64_to_user_ptr(attr->info.info);
8026 uinfo_len = attr->info.info_len;
8031 return -EFAULT;
8033 info.id = btf->id;
8035 btf_copy = min_t(u32, btf->data_size, info.btf_size);
8036 if (copy_to_user(ubtf, btf->data, btf_copy))
8037 return -EFAULT;
8038 info.btf_size = btf->data_size;
8040 info.kernel_btf = btf->kernel_btf;
8042 uname = u64_to_user_ptr(info.name);
8045 return -EINVAL;
8047 name_len = strlen(btf->name);
8052 if (copy_to_user(uname, btf->name, name_len + 1))
8053 return -EFAULT;
8057 if (copy_to_user(uname, btf->name, uname_len - 1))
8058 return -EFAULT;
8059 if (put_user(zero, uname + uname_len - 1))
8060 return -EFAULT;
8061 /* let user-space know about too short buffer */
8062 ret = -ENOSPC;
8067 put_user(info_copy, &uattr->info.info_len))
8068 return -EFAULT;
8080 if (!btf || !refcount_inc_not_zero(&btf->refcnt))
8081 btf = ERR_PTR(-ENOENT);
8096 return btf->id;
8101 return btf->kernel_btf;
8106 return btf->kernel_btf && strcmp(btf->name, "vmlinux") != 0;
8109 enum {
8135 if (mod->btf_data_size == 0 ||
8144 err = -ENOMEM;
8147 btf = btf_parse_module(mod->name, mod->btf_data, mod->btf_data_size,
8148 mod->btf_base_data, mod->btf_base_data_size);
8153 mod->name, PTR_ERR(btf));
8169 btf_mod->module = module;
8170 btf_mod->btf = btf;
8171 list_add(&btf_mod->list, &btf_modules);
8182 attr->attr.name = btf->name;
8183 attr->attr.mode = 0444;
8184 attr->size = btf->data_size;
8185 attr->private = btf->data;
8186 attr->read_new = sysfs_bin_attr_simple_read;
8191 mod->name, err);
8197 btf_mod->sysfs_attr = attr;
8204 if (btf_mod->module != module)
8207 btf_mod->flags |= BTF_MODULE_F_LIVE;
8215 if (btf_mod->module != module)
8218 list_del(&btf_mod->list);
8219 if (btf_mod->sysfs_attr)
8220 sysfs_remove_bin_file(btf_kobj, btf_mod->sysfs_attr);
8221 purge_cand_cache(btf_mod->btf);
8222 btf_put(btf_mod->btf);
8223 kfree(btf_mod->sysfs_attr);
8255 if (btf_mod->btf != btf)
8263 if ((btf_mod->flags & BTF_MODULE_F_LIVE) && try_module_get(btf_mod->module))
8264 res = btf_mod->module;
8294 if (btf_mod->module != module)
8297 btf_get(btf_mod->btf);
8298 btf = btf_mod->btf;
8311 return -ENOENT;
8318 BPF_CALL_4(bpf_btf_find_by_name_kind, char *, name, int, name_sz, u32, kind, int, flags)
8325 return -EINVAL;
8327 if (name_sz <= 1 || name[name_sz - 1])
8328 return -EINVAL;
8330 ret = bpf_find_btf_id(name, kind, &btf);
8355 #define BTF_TRACING_TYPE(name, type) BTF_ID(struct, type)
8359 /* Validate well-formedness of iter argument type.
8367 const char *name;
8371 return -EINVAL;
8374 t = btf_type_skip_modifiers(btf, arg->type, NULL);
8376 return -EINVAL;
8377 t = btf_type_skip_modifiers(btf, t->type, &btf_id);
8379 return -EINVAL;
8381 name = btf_name_by_offset(btf, t->name_off);
8382 if (!name || strncmp(name, ITER_PREFIX, sizeof(ITER_PREFIX) - 1))
8383 return -EINVAL;
8399 if (!flags || (flags & (flags - 1)))
8400 return -EINVAL;
8405 return -EINVAL;
8415 if (t->size == 0 || (t->size % 8))
8416 return -EINVAL;
8421 iter_name = btf_name_by_offset(btf, t->name_off) + sizeof(ITER_PREFIX) - 1;
8431 return -EINVAL;
8435 return -EINVAL;
8439 t = btf_type_skip_modifiers(btf, func->type, NULL);
8441 return -EINVAL;
8446 t = btf_type_by_id(btf, func->type);
8448 return -EINVAL;
8460 /* any kfunc should be FUNC -> FUNC_PROTO */
8463 return -EINVAL;
8465 /* sanity check kfunc name */
8466 func_name = btf_name_by_offset(btf, func->name_off);
8468 return -EINVAL;
8470 func = btf_type_by_id(btf, func->type);
8472 return -EINVAL;
8485 static int btf_populate_kfunc_set(struct btf *btf, enum btf_kfunc_hook hook,
8489 struct btf_id_set8 *add_set = kset->set;
8491 bool add_filter = !!kset->filter;
8498 ret = -EINVAL;
8502 if (!add_set->cnt)
8505 tab = btf->kfunc_set_tab;
8510 hook_filter = &tab->hook_filters[hook];
8511 for (i = 0; i < hook_filter->nr_filters; i++) {
8512 if (hook_filter->filters[i] == kset->filter) {
8518 if (add_filter && hook_filter->nr_filters == BTF_KFUNC_FILTER_MAX_CNT) {
8519 ret = -E2BIG;
8527 return -ENOMEM;
8528 btf->kfunc_set_tab = tab;
8531 set = tab->sets[hook];
8536 ret = -EINVAL;
8544 * hence re-sorting the final set again is required to make binary
8550 set_cnt = set ? set->cnt : 0;
8552 if (set_cnt > U32_MAX - add_set->cnt) {
8553 ret = -EOVERFLOW;
8557 if (set_cnt + add_set->cnt > BTF_KFUNC_SET_MAX_CNT) {
8558 ret = -E2BIG;
8563 set = krealloc(tab->sets[hook],
8564 struct_size(set, pairs, set_cnt + add_set->cnt),
8567 ret = -ENOMEM;
8571 /* For newly allocated set, initialize set->cnt to 0 */
8572 if (!tab->sets[hook])
8573 set->cnt = 0;
8574 tab->sets[hook] = set;
8577 memcpy(set->pairs + set->cnt, add_set->pairs, add_set->cnt * sizeof(set->pairs[0]));
8579 for (i = set->cnt; i < set->cnt + add_set->cnt; i++)
8580 set->pairs[i].id = btf_relocate_id(btf, set->pairs[i].id);
8582 set->cnt += add_set->cnt;
8584 sort(set->pairs, set->cnt, sizeof(set->pairs[0]), btf_id_cmp_func, NULL);
8587 hook_filter = &tab->hook_filters[hook];
8588 hook_filter->filters[hook_filter->nr_filters++] = kset->filter;
8597 enum btf_kfunc_hook hook,
8607 if (!btf->kfunc_set_tab)
8609 hook_filter = &btf->kfunc_set_tab->hook_filters[hook];
8610 for (i = 0; i < hook_filter->nr_filters; i++) {
8611 if (hook_filter->filters[i](prog, kfunc_btf_id))
8614 set = btf->kfunc_set_tab->sets[hook];
8624 static int bpf_prog_type_to_kfunc_hook(enum bpf_prog_type prog_type)
8675 * protection for looking up a well-formed btf->kfunc_set_tab.
8681 enum bpf_prog_type prog_type = resolve_prog_type(prog);
8682 enum btf_kfunc_hook hook;
8699 static int __register_btf_kfunc_id_set(enum btf_kfunc_hook hook,
8705 btf = btf_get_module_btf(kset->owner);
8707 return check_btf_kconfigs(kset->owner, "kfunc");
8711 for (i = 0; i < kset->set->cnt; i++) {
8712 ret = btf_check_kfunc_protos(btf, btf_relocate_id(btf, kset->set->pairs[i].id),
8713 kset->set->pairs[i].flags);
8726 int register_btf_kfunc_id_set(enum bpf_prog_type prog_type,
8729 enum btf_kfunc_hook hook;
8734 if (!(kset->set->flags & BTF_SET8_KFUNCS)) {
8735 WARN_ON(!kset->owner);
8736 return -EINVAL;
8753 struct btf_id_dtor_kfunc_tab *tab = btf->dtor_kfunc_tab;
8757 return -ENOENT;
8758 /* Even though the size of tab->dtors[0] is > sizeof(u32), we only need
8762 dtor = bsearch(&btf_id, tab->dtors, tab->cnt, sizeof(tab->dtors[0]), btf_id_cmp_func);
8764 return -ENOENT;
8765 return dtor->kfunc_btf_id;
8768 static int btf_check_dtor_kfuncs(struct btf *btf, const struct btf_id_dtor_kfunc *dtors, u32 cnt)
8775 for (i = 0; i < cnt; i++) {
8780 return -EINVAL;
8782 dtor_func_proto = btf_type_by_id(btf, dtor_func->type);
8784 return -EINVAL;
8787 t = btf_type_by_id(btf, dtor_func_proto->type);
8789 return -EINVAL;
8793 return -EINVAL;
8800 return -EINVAL;
8822 ret = -E2BIG;
8831 tab = btf->dtor_kfunc_tab;
8834 ret = -EINVAL;
8838 tab_cnt = tab ? tab->cnt : 0;
8839 if (tab_cnt > U32_MAX - add_cnt) {
8840 ret = -EOVERFLOW;
8845 ret = -E2BIG;
8849 tab = krealloc(btf->dtor_kfunc_tab,
8853 ret = -ENOMEM;
8857 if (!btf->dtor_kfunc_tab)
8858 tab->cnt = 0;
8859 btf->dtor_kfunc_tab = tab;
8861 memcpy(tab->dtors + tab->cnt, dtors, add_cnt * sizeof(tab->dtors[0]));
8865 tab->dtors[i].btf_id = btf_relocate_id(btf, tab->dtors[i].btf_id);
8866 tab->dtors[i].kfunc_btf_id = btf_relocate_id(btf, tab->dtors[i].kfunc_btf_id);
8869 tab->cnt += add_cnt;
8871 sort(tab->dtors, tab->cnt, sizeof(tab->dtors[0]), btf_id_cmp_func, NULL);
8884 * type-based CO-RE relocations and follow slightly different rules than
8885 * field-based relocations. This function assumes that root types were already
8886 * checked for name match. Beyond that initial root-level name check, names
8888 * - any two STRUCTs/UNIONs/FWDs/ENUMs/INTs/ENUM64s are considered compatible, but
8891 * - for ENUMs/ENUM64s, the size is ignored;
8892 * - for INT, size and signedness are ignored;
8893 * - for ARRAY, dimensionality is ignored, element types are checked for
8895 * - CONST/VOLATILE/RESTRICT modifiers are ignored;
8896 * - TYPEDEFs/PTRs are compatible if types they pointing to are compatible;
8897 * - FUNC_PROTOs are compatible if they have compatible signature: same
8900 * more experience with using BPF CO-RE relocations.
8920 /* check X___Y name pattern, where X and Y are not underscores */
8926 size_t bpf_core_essential_name_len(const char *name)
8928 size_t n = strlen(name);
8931 for (i = n - 5; i >= 0; i--) {
8932 if (bpf_core_is_flavor_sep(name + i))
8940 if (!cands->cnt)
8948 kfree(cands->name);
8969 bpf_log(log, "[%d]%s(", i, cc->name);
8970 for (j = 0; j < cc->cnt; j++) {
8971 bpf_log(log, "%d", cc->cands[j].id);
8972 if (j < cc->cnt - 1)
8992 return jhash(cands->name, cands->name_len, 0);
9001 if (cc && cc->name_len == cands->name_len &&
9002 !strncmp(cc->name, cands->name, cands->name_len))
9007 static size_t sizeof_cands(int cnt)
9009 return offsetof(struct bpf_cand_cache, cands[cnt]);
9022 new_cands = kmemdup(cands, sizeof_cands(cands->cnt), GFP_KERNEL);
9025 return ERR_PTR(-ENOMEM);
9027 /* strdup the name, since it will stay in cache.
9028 * the cands->name points to strings in prog's BTF and the prog can be unloaded.
9030 new_cands->name = kmemdup_nul(cands->name, cands->name_len, GFP_KERNEL);
9032 if (!new_cands->name) {
9034 return ERR_PTR(-ENOMEM);
9053 * since new module might have candidates with the name
9063 for (j = 0; j < cc->cnt; j++)
9064 if (cc->cands[j].btf == btf) {
9094 if (btf_kind(t) != cands->kind)
9097 targ_name = btf_name_by_offset(targ_btf, t->name_off);
9102 * for non-existing name will have a chance to schedule().
9106 if (strncmp(cands->name, targ_name, cands->name_len) != 0)
9110 if (targ_essent_len != cands->name_len)
9113 /* most of the time there is only one candidate for a given kind+name pair */
9114 new_cands = kmalloc(sizeof_cands(cands->cnt + 1), GFP_KERNEL);
9117 return ERR_PTR(-ENOMEM);
9120 memcpy(new_cands, cands, sizeof_cands(cands->cnt));
9123 cands->cands[cands->cnt].btf = targ_btf;
9124 cands->cands[cands->cnt].id = i;
9125 cands->cnt++;
9134 const struct btf *local_btf = ctx->btf;
9139 const char *name;
9146 return ERR_PTR(-EINVAL);
9150 return ERR_PTR(-EINVAL);
9152 name = btf_name_by_offset(local_btf, local_type->name_off);
9153 if (str_is_empty(name))
9154 return ERR_PTR(-EINVAL);
9155 local_essent_len = bpf_core_essential_name_len(name);
9158 cands->name = name;
9159 cands->kind = btf_kind(local_type);
9160 cands->name_len = local_essent_len;
9165 if (cc->cnt)
9175 /* cands is a pointer to kmalloced memory here if cands->cnt > 0 */
9177 /* populate cache even when cands->cnt == 0 */
9183 if (cc->cnt)
9187 /* cands is a pointer to stack here and cands->cnt == 0 */
9190 /* if cache has it return it even if cc->cnt == 0 */
9210 /* cands is a pointer to kmalloced memory here if cands->cnt > 0
9211 * or pointer to stack if cands->cnd == 0.
9212 * Copy it into the cache even when cands->cnt == 0 and
9221 bool need_cands = relo->kind != BPF_CORE_TYPE_ID_LOCAL;
9233 return -ENOMEM;
9235 type = btf_type_by_id(ctx->btf, relo->type_id);
9237 bpf_log(ctx->log, "relo #%u: bad type id %u\n",
9238 relo_idx, relo->type_id);
9240 return -EINVAL;
9248 cc = bpf_core_find_cands(ctx, relo->type_id);
9250 bpf_log(ctx->log, "target candidate search failed for %d\n",
9251 relo->type_id);
9255 if (cc->cnt) {
9256 cands.cands = kcalloc(cc->cnt, sizeof(*cands.cands), GFP_KERNEL);
9258 err = -ENOMEM;
9262 for (i = 0; i < cc->cnt; i++) {
9263 bpf_log(ctx->log,
9264 "CO-RE relocating %s %s: found target candidate [%d]\n",
9265 btf_kind_str[cc->kind], cc->name, cc->cands[i].id);
9266 cands.cands[i].btf = cc->cands[i].btf;
9267 cands.cands[i].id = cc->cands[i].id;
9269 cands.len = cc->cnt;
9277 err = bpf_core_calc_relo_insn((void *)ctx->log, relo, relo_idx, ctx->btf, &cands, specs,
9282 err = bpf_core_patch_insn((void *)ctx->log, insn, relo->insn_off / 8, relo, relo_idx,
9290 if (ctx->log->level & BPF_LOG_LEVEL2)
9291 print_cand_cache(ctx->log);
9300 struct btf *btf = reg->btf;
9308 walk_type = btf_type_by_id(btf, reg->btf_id);
9312 tname = btf_name_by_offset(btf, walk_type->name_off);
9318 safe_id = btf_find_by_name_kind(btf, safe_tname, BTF_INFO_KIND(walk_type->info));
9327 const char *m_name = __btf_name_by_offset(btf, member->name_off);
9328 const struct btf_type *mtype = btf_type_by_id(btf, member->type);
9334 btf_type_skip_modifiers(btf, mtype->type, &id);
9335 /* If we match on both type and name, the field is considered trusted. */
9350 size_t pattern_len = sizeof(NOCAST_ALIAS_SUFFIX) - sizeof(char);
9360 reg_name = btf_name_by_offset(reg_btf, reg_type->name_off);
9361 arg_name = btf_name_by_offset(arg_btf, arg_type->name_off);
9367 * if the strings are the same size, they can't possibly be no-cast
9370 * because they are _not_ no-cast aliases, they are the same type.
9375 /* Either of the two names must be the other name, suffixed with ___init. */
9391 /* ___init suffix must come at the end of the name */
9406 tab = btf->struct_ops_tab;
9410 return -ENOMEM;
9411 tab->capacity = 4;
9412 btf->struct_ops_tab = tab;
9415 for (i = 0; i < tab->cnt; i++)
9416 if (tab->ops[i].st_ops == st_ops)
9417 return -EEXIST;
9419 if (tab->cnt == tab->capacity) {
9421 struct_size(tab, ops, tab->capacity * 2),
9424 return -ENOMEM;
9426 tab->capacity *= 2;
9427 btf->struct_ops_tab = tab;
9430 tab->ops[btf->struct_ops_tab->cnt].st_ops = st_ops;
9432 err = bpf_struct_ops_desc_init(&tab->ops[btf->struct_ops_tab->cnt], btf, log);
9436 btf->struct_ops_tab->cnt++;
9446 u32 cnt;
9450 if (!btf->struct_ops_tab)
9453 cnt = btf->struct_ops_tab->cnt;
9454 st_ops_list = btf->struct_ops_tab->ops;
9455 for (i = 0; i < cnt; i++) {
9468 u32 cnt;
9472 if (!btf->struct_ops_tab)
9475 cnt = btf->struct_ops_tab->cnt;
9476 st_ops_list = btf->struct_ops_tab->ops;
9477 for (i = 0; i < cnt; i++) {
9491 btf = btf_get_module_btf(st_ops->owner);
9493 return check_btf_kconfigs(st_ops->owner, "struct_ops");
9499 err = -ENOMEM;
9503 log->level = BPF_LOG_KERNEL;
9524 param_name = btf_name_by_offset(btf, arg->name_off);
9530 param_name += len - suffix_len;