Lines Matching +full:host1x +full:- +full:class
1 // SPDX-License-Identifier: GPL-2.0
75 * NOTE that we cannot assume any reference-order.
81 * object describing "void *". This type-reference is done
88 * - Each line started with "[?]" is a btf_type object
89 * - [?] is the type_id of the btf_type object.
90 * - CONST/PTR is the BTF_KIND_XXX
91 * - "(anon)" is the name of the type. It just
93 * - type_id=XXX is the 'u32 type' in btf_type
117 * an array: "btf->types".
127 * check this type-reference in the first pass.
138 * 1) does exist in the BTF (i.e. in btf->types[])
171 * BTF_KIND_CONST -> BTF_KIND_PTR -> BTF_KIND_CONST -> BTF_KIND_PTR +
173 * +-----------------------------------------+
178 #define BITS_PER_BYTE_MASK (BITS_PER_BYTE - 1)
278 __u32 *base_id_map; /* map from distilled base BTF -> vmlinux BTF ids */
349 return btf_kind_str[BTF_INFO_KIND(t->info)];
357 * 128-bit int); if we are at the end of our safe buffer and have
378 * One challenge with showing nested data is we want to skip 0-valued
383 * pass is signalled by show->state.depth_check being set, and if we
384 * encounter a non-zero value we set show->state.depth_to_show to
402 * as we traverse the object's data. skbuff-like semantics are
405 * - obj.head points to the start of the toplevel object for display
406 * - obj.size is the size of the toplevel object
407 * - obj.data points to the current point in the original data at
430 int status; /* non-zero for error */
479 * type through t->type AND its size cannot
480 * be determined without following the t->type.
485 switch (BTF_INFO_KIND(t->info)) {
504 return BTF_INFO_KIND(t->info) == BTF_KIND_DATASEC;
509 return BTF_INFO_KIND(t->info) == BTF_KIND_DECL_TAG;
532 return btf->kernel_btf && !btf->base_btf;
540 total += btf->nr_types;
541 btf = btf->base_btf;
556 if (BTF_INFO_KIND(t->info) != kind)
559 tname = btf_name_by_offset(btf, t->name_off);
564 return -ENOENT;
577 return -EINVAL;
618 id = t->type;
619 t = btf_type_by_id(btf, t->type);
637 return btf_type_skip_modifiers(btf, t->type, res_id);
667 * another type (through member->type).
673 * btf_type_is_array() because its element (array->type)
676 * member-type repeated by array->nelems of times.
690 /* t->size can be used */
693 switch (BTF_INFO_KIND(t->info)) {
753 return kind_ops[BTF_INFO_KIND(t->info)];
761 while (offset < btf->start_str_off)
762 btf = btf->base_btf;
764 offset -= btf->start_str_off;
765 return offset < btf->hdr.str_len;
780 while (offset < btf->start_str_off)
781 btf = btf->base_btf;
783 offset -= btf->start_str_off;
784 if (offset < btf->hdr.str_len)
785 return &btf->strings[offset];
840 return name ?: "(invalid-name-offset)";
850 while (type_id < btf->start_id)
851 btf = btf->base_btf;
853 type_id -= btf->start_id;
854 if (type_id >= btf->nr_types)
856 return btf->types[type_id];
912 id = m->type;
920 u32 bitfield_size = BTF_MEMBER_BITFIELD_SIZE(m->offset);
921 u32 bit_offset = BTF_MEMBER_BIT_OFFSET(m->offset);
932 BITS_PER_BYTE_MASKED(m->offset) ||
933 BITS_ROUNDUP_BYTES(m->offset) != expected_offset ||
948 BTF_INFO_KIND(t->info) != BTF_KIND_TYPEDEF) {
949 t = btf_type_by_id(btf, t->type);
960 * Populate show->state.name with type name information.
974 const struct btf_member *m = show->state.member;
977 u32 id = show->state.type_id;
983 show->state.name[0] = '\0';
990 if (show->state.array_member)
995 member = btf_name_by_offset(show->btf, m->name_off);
997 id = m->type;
1005 * in our show->state points at the resolved type of the typedef.
1007 t = btf_type_by_id(show->btf, id);
1035 switch (BTF_INFO_KIND(t->info)) {
1038 name = btf_name_by_offset(show->btf,
1039 t->name_off);
1041 id = t->type;
1050 array_suffix -= 2;
1051 id = array->type;
1056 ptr_suffix -= 1;
1057 id = t->type;
1065 t = btf_type_skip_qualifiers(show->btf, id);
1072 name = btf_name_by_offset(show->btf, t->name_off);
1074 switch (BTF_INFO_KIND(t->info)) {
1077 prefix = BTF_INFO_KIND(t->info) == BTF_KIND_STRUCT ?
1102 if (show->flags & BTF_SHOW_NONAME)
1103 snprintf(show->state.name, sizeof(show->state.name), "%s",
1106 snprintf(show->state.name, sizeof(show->state.name),
1121 return show->state.name;
1129 if ((indent - show->state.depth) >= indents)
1130 return indent - show->state.depth;
1136 return show->flags & BTF_SHOW_COMPACT ? "" : __btf_show_indent(show);
1141 return show->flags & BTF_SHOW_COMPACT ? "" : "\n";
1146 if (show->state.depth == 0)
1149 if ((show->flags & BTF_SHOW_COMPACT) && show->state.type &&
1150 BTF_INFO_KIND(show->state.type->info) == BTF_KIND_UNION)
1160 if (!show->state.depth_check) {
1162 show->showfn(show, fmt, args);
1175 (show->flags & BTF_SHOW_ZERO) || \
1176 show->state.depth == 0) { \
1182 if (show->state.depth > show->state.depth_to_show) \
1183 show->state.depth_to_show = show->state.depth; \
1193 if (show->state.depth > show->state.depth_to_show) \
1194 show->state.depth_to_show = show->state.depth; \
1200 return show->obj.head + show->obj.size - data;
1206 return data >= show->obj.data &&
1207 (data + size) < (show->obj.data + BTF_SHOW_OBJ_SAFE_SIZE);
1219 return show->obj.safe + (data - show->obj.data);
1224 * Return a safe-to-access version of data pointed to by @data.
1228 * If BTF_SHOW_UNSAFE is specified, just return data as-is; no
1247 * We use stack data as opposed to per-CPU buffers because the
1259 if (show->flags & BTF_SHOW_UNSAFE)
1262 rt = btf_resolve_size(show->btf, t, &size);
1264 show->state.status = PTR_ERR(rt);
1273 if (show->state.depth == 0) {
1274 show->obj.size = size;
1275 show->obj.head = data;
1294 * - the current type size is within the safe buffer; or
1295 * - at least BTF_SHOW_OBJ_BASE_TYPE_SIZE bytes are left in
1312 show->state.status = copy_from_kernel_nofault(show->obj.safe,
1314 if (!show->state.status) {
1315 show->obj.data = data;
1316 safe = show->obj.safe;
1331 show->state.type = t;
1332 show->state.type_id = type_id;
1333 show->state.name[0] = '\0';
1340 show->state.type = NULL;
1341 show->state.type_id = 0;
1342 show->state.name[0] = '\0';
1357 show->state.depth++;
1364 show->state.depth--;
1373 show->state.member = m;
1378 show->state.array_member = 1;
1384 show->state.member = NULL;
1389 show->state.array_member = 0;
1399 show->state.array_encoding = array_encoding;
1400 show->state.array_terminated = 0;
1406 show->state.array_encoding = 0;
1407 show->state.array_terminated = 0;
1437 struct bpf_verifier_log *log = &env->log;
1453 struct bpf_verifier_log *log = &env->log;
1454 struct btf *btf = env->btf;
1460 if (log->level == BPF_LOG_KERNEL) {
1463 * Skip those prints for in-kernel BTF verification.
1469 if (env->btf->base_btf && IS_ENABLED(CONFIG_MODULE_ALLOW_BTF_MISMATCH))
1474 env->log_type_id,
1476 __btf_name_by_offset(btf, t->name_off),
1480 btf_type_ops(t)->log_details(env, t);
1503 struct bpf_verifier_log *log = &env->log;
1504 struct btf *btf = env->btf;
1510 if (log->level == BPF_LOG_KERNEL) {
1515 if (env->btf->base_btf && IS_ENABLED(CONFIG_MODULE_ALLOW_BTF_MISMATCH))
1525 if (env->phase != CHECK_META)
1531 __btf_name_by_offset(btf, member->name_off),
1532 member->type,
1533 BTF_MEMBER_BITFIELD_SIZE(member->offset),
1534 BTF_MEMBER_BIT_OFFSET(member->offset));
1537 __btf_name_by_offset(btf, member->name_off),
1538 member->type, member->offset);
1556 struct bpf_verifier_log *log = &env->log;
1561 if (log->level == BPF_LOG_KERNEL && !fmt)
1563 if (env->phase != CHECK_META)
1567 vsi->type, vsi->offset, vsi->size);
1581 struct bpf_verifier_log *log = &env->log;
1582 const struct btf *btf = env->btf;
1588 if (log->level == BPF_LOG_KERNEL)
1590 hdr = &btf->hdr;
1591 __btf_verifier_log(log, "magic: 0x%x\n", hdr->magic);
1592 __btf_verifier_log(log, "version: %u\n", hdr->version);
1593 __btf_verifier_log(log, "flags: 0x%x\n", hdr->flags);
1594 __btf_verifier_log(log, "hdr_len: %u\n", hdr->hdr_len);
1595 __btf_verifier_log(log, "type_off: %u\n", hdr->type_off);
1596 __btf_verifier_log(log, "type_len: %u\n", hdr->type_len);
1597 __btf_verifier_log(log, "str_off: %u\n", hdr->str_off);
1598 __btf_verifier_log(log, "str_len: %u\n", hdr->str_len);
1604 struct btf *btf = env->btf;
1606 if (btf->types_size == btf->nr_types) {
1612 if (btf->start_id + btf->types_size == BTF_MAX_TYPE) {
1614 return -E2BIG;
1617 expand_by = max_t(u32, btf->types_size >> 2, 16);
1619 btf->types_size + expand_by);
1624 return -ENOMEM;
1626 if (btf->nr_types == 0) {
1627 if (!btf->base_btf) {
1630 btf->nr_types++;
1633 memcpy(new_types, btf->types,
1634 sizeof(*btf->types) * btf->nr_types);
1637 kvfree(btf->types);
1638 btf->types = new_types;
1639 btf->types_size = new_size;
1642 btf->types[btf->nr_types++] = t;
1655 btf->id = id;
1660 return -ENOSPC;
1670 * In map-in-map, calling map_delete_elem() on outer
1679 idr_remove(&btf_idr, btf->id);
1685 struct btf_kfunc_set_tab *tab = btf->kfunc_set_tab;
1690 for (hook = 0; hook < ARRAY_SIZE(tab->sets); hook++)
1691 kfree(tab->sets[hook]);
1693 btf->kfunc_set_tab = NULL;
1698 struct btf_id_dtor_kfunc_tab *tab = btf->dtor_kfunc_tab;
1703 btf->dtor_kfunc_tab = NULL;
1712 for (i = 0; i < tab->cnt; i++)
1713 btf_record_free(tab->types[i].record);
1719 struct btf_struct_metas *tab = btf->struct_meta_tab;
1722 btf->struct_meta_tab = NULL;
1727 struct btf_struct_ops_tab *tab = btf->struct_ops_tab;
1733 for (i = 0; i < tab->cnt; i++)
1734 bpf_struct_ops_desc_release(&tab->ops[i]);
1737 btf->struct_ops_tab = NULL;
1746 kvfree(btf->types);
1747 kvfree(btf->resolved_sizes);
1748 kvfree(btf->resolved_ids);
1749 /* vmlinux does not allocate btf->data, it simply points it at
1753 kvfree(btf->data);
1754 kvfree(btf->base_id_map);
1767 return btf->name;
1772 refcount_inc(&btf->refcnt);
1777 if (btf && refcount_dec_and_test(&btf->refcnt)) {
1779 call_rcu(&btf->rcu, btf_free_rcu);
1785 return btf->base_btf;
1790 return &btf->hdr;
1795 btf->base_btf = (struct btf *)base_btf;
1796 btf->start_id = btf_nr_types(base_btf);
1797 btf->start_str_off = base_btf->hdr.str_len;
1802 struct btf *btf = env->btf;
1803 u32 nr_types = btf->nr_types;
1823 btf->resolved_sizes = resolved_sizes;
1824 btf->resolved_ids = resolved_ids;
1825 env->visit_states = visit_states;
1833 return -ENOMEM;
1838 kvfree(env->visit_states);
1845 switch (env->resolve_mode) {
1871 if (type_id < env->btf->start_id)
1874 return env->visit_states[type_id - env->btf->start_id] == RESOLVED;
1880 const struct btf *btf = env->btf;
1883 if (env->top_stack == MAX_RESOLVE_DEPTH)
1884 return -E2BIG;
1886 if (type_id < btf->start_id
1887 || env->visit_states[type_id - btf->start_id] != NOT_VISITED)
1888 return -EEXIST;
1890 env->visit_states[type_id - btf->start_id] = VISITED;
1892 v = &env->stack[env->top_stack++];
1893 v->t = t;
1894 v->type_id = type_id;
1895 v->next_member = 0;
1897 if (env->resolve_mode == RESOLVE_TBD) {
1899 env->resolve_mode = RESOLVE_PTR;
1901 env->resolve_mode = RESOLVE_STRUCT_OR_ARRAY;
1910 env->stack[env->top_stack - 1].next_member = next_member;
1917 u32 type_id = env->stack[--(env->top_stack)].type_id;
1918 struct btf *btf = env->btf;
1920 type_id -= btf->start_id; /* adjust to local type id */
1921 btf->resolved_sizes[type_id] = resolved_size;
1922 btf->resolved_ids[type_id] = resolved_type_id;
1923 env->visit_states[type_id] = RESOLVED;
1928 return env->top_stack ? &env->stack[env->top_stack - 1] : NULL;
1931 /* Resolve the size of a passed-in "type"
1961 switch (BTF_INFO_KIND(type->info)) {
1962 /* type->size can be used */
1969 size = type->size;
1982 id = type->type;
1983 type = btf_type_by_id(btf, type->type);
1990 if (nelems && array->nelems > U32_MAX / nelems)
1991 return ERR_PTR(-EINVAL);
1992 nelems *= array->nelems;
1993 type = btf_type_by_id(btf, array->type);
1998 return ERR_PTR(-EINVAL);
2002 return ERR_PTR(-EINVAL);
2006 return ERR_PTR(-EINVAL);
2014 *elem_id = array ? array->type : 0;
2030 while (type_id < btf->start_id)
2031 btf = btf->base_btf;
2033 return btf->resolved_ids[type_id - btf->start_id];
2046 while (type_id < btf->start_id)
2047 btf = btf->base_btf;
2049 return btf->resolved_sizes[type_id - btf->start_id];
2064 size = size_type->size;
2079 size = size_type->size;
2102 return -EINVAL;
2112 return -EINVAL;
2123 if (BTF_MEMBER_BITFIELD_SIZE(member->offset)) {
2126 return -EINVAL;
2129 /* bitfield size is 0, so member->offset represents bit offset only.
2132 return btf_type_ops(member_type)->check_member(env, struct_type,
2140 btf_verifier_log_basic(env, v->t, "Unsupported resolve");
2141 return -EINVAL;
2148 btf_show(show, "<unsupported kind:%u>", BTF_INFO_KIND(t->info));
2157 u32 struct_bits_off = member->offset;
2158 u32 struct_size = struct_type->size;
2162 if (U32_MAX - struct_bits_off < BTF_INT_OFFSET(int_data)) {
2165 return -EINVAL;
2176 return -EINVAL;
2180 struct_size - bytes_offset < BITS_ROUNDUP_BYTES(nr_copy_bits)) {
2183 return -EINVAL;
2196 u32 struct_size = struct_type->size;
2203 return -EINVAL;
2207 nr_bits = BTF_MEMBER_BITFIELD_SIZE(member->offset);
2208 struct_bits_off = BTF_MEMBER_BIT_OFFSET(member->offset);
2217 return -EINVAL;
2224 return -EINVAL;
2232 return -EINVAL;
2236 struct_size - bytes_offset < BITS_ROUNDUP_BYTES(nr_copy_bits)) {
2239 return -EINVAL;
2256 return -EINVAL;
2261 return -EINVAL;
2266 return -EINVAL;
2273 return -EINVAL;
2281 return -EINVAL;
2284 if (BITS_ROUNDUP_BYTES(nr_bits) > t->size) {
2286 return -EINVAL;
2301 return -ENOTSUPP;
2316 t->size, BTF_INT_OFFSET(int_data),
2359 /* shake out un-needed bits by shift/or operations */
2361 upper_num = lower_num << (left_shift_bits - 64);
2365 (lower_num >> (64 - left_shift_bits));
2370 lower_num = upper_num >> (right_shift_bits - 64);
2374 (upper_num << (64 - right_shift_bits));
2403 left_shift_bits = BITS_PER_U128 - nr_copy_bits;
2405 right_shift_bits = BITS_PER_U128 - nr_bits;
2474 if (show->state.array_encoding == BTF_INT_CHAR) {
2476 if (show->state.array_terminated)
2479 show->state.array_terminated = 1;
2516 u32 resolved_type_id = member->type;
2518 struct btf *btf = env->btf;
2524 return -EINVAL;
2530 return btf_type_ops(resolved_type)->check_member(env, struct_type,
2541 u32 resolved_type_id = member->type;
2543 struct btf *btf = env->btf;
2549 return -EINVAL;
2555 return btf_type_ops(resolved_type)->check_kflag_member(env, struct_type,
2567 struct_size = struct_type->size;
2568 struct_bits_off = member->offset;
2574 return -EINVAL;
2577 if (struct_size - bytes_offset < sizeof(void *)) {
2580 return -EINVAL;
2594 return -EINVAL;
2599 return -EINVAL;
2602 if (!BTF_TYPE_ID_VALID(t->type)) {
2604 return -EINVAL;
2610 if (BTF_INFO_KIND(t->info) == BTF_KIND_TYPEDEF) {
2611 if (!t->name_off ||
2612 !btf_name_valid_identifier(env->btf, t->name_off)) {
2614 return -EINVAL;
2616 } else if (BTF_INFO_KIND(t->info) == BTF_KIND_TYPE_TAG) {
2617 value = btf_name_by_offset(env->btf, t->name_off);
2620 return -EINVAL;
2623 if (t->name_off) {
2625 return -EINVAL;
2637 const struct btf_type *t = v->t;
2639 u32 next_type_id = t->type;
2640 struct btf *btf = env->btf;
2644 btf_verifier_log_type(env, v->t, "Invalid type_id");
2645 return -EINVAL;
2655 * save us a few type-following when we use it later (e.g. in
2666 btf_verifier_log_type(env, v->t, "Invalid type_id");
2667 return -EINVAL;
2680 const struct btf_type *t = v->t;
2681 u32 next_type_id = t->type;
2682 struct btf *btf = env->btf;
2686 btf_verifier_log_type(env, v->t, "Invalid type_id");
2687 return -EINVAL;
2713 btf_verifier_log_type(env, v->t, "Invalid type_id");
2714 return -EINVAL;
2726 const struct btf_type *t = v->t;
2727 u32 next_type_id = t->type;
2728 struct btf *btf = env->btf;
2732 btf_verifier_log_type(env, v->t, "Invalid type_id");
2733 return -EINVAL;
2742 * to a ptr (last-resolved-ptr).
2744 * We now need to continue from the last-resolved-ptr to
2745 * ensure the last-resolved-ptr will not referring back to
2769 btf_verifier_log_type(env, v->t, "Invalid type_id");
2770 return -EINVAL;
2784 if (btf->resolved_ids)
2789 btf_type_ops(t)->show(btf, t, type_id, data, bits_offset, show);
2798 btf_type_ops(t)->show(btf, t, type_id, data, bits_offset, show);
2812 if (show->flags & BTF_SHOW_PTR_RAW)
2822 btf_verifier_log(env, "type_id=%u", t->type);
2849 return -EINVAL;
2852 if (t->type) {
2854 return -EINVAL;
2858 if (!t->name_off ||
2859 !btf_name_valid_identifier(env->btf, t->name_off)) {
2861 return -EINVAL;
2889 u32 struct_bits_off = member->offset;
2892 struct btf *btf = env->btf;
2897 return -EINVAL;
2900 array_type_id = member->type;
2902 struct_size = struct_type->size;
2904 if (struct_size - bytes_offset < array_size) {
2907 return -EINVAL;
2924 return -EINVAL;
2928 if (t->name_off) {
2930 return -EINVAL;
2935 return -EINVAL;
2940 return -EINVAL;
2943 if (t->size) {
2945 return -EINVAL;
2949 * so !array->type and !array->index_type are not allowed.
2951 if (!array->type || !BTF_TYPE_ID_VALID(array->type)) {
2953 return -EINVAL;
2956 if (!array->index_type || !BTF_TYPE_ID_VALID(array->index_type)) {
2958 return -EINVAL;
2969 const struct btf_array *array = btf_type_array(v->t);
2972 struct btf *btf = env->btf;
2975 /* Check array->index_type */
2976 index_type_id = array->index_type;
2980 btf_verifier_log_type(env, v->t, "Invalid index");
2981 return -EINVAL;
2991 btf_verifier_log_type(env, v->t, "Invalid index");
2992 return -EINVAL;
2995 /* Check array->type */
2996 elem_type_id = array->type;
3000 btf_verifier_log_type(env, v->t,
3002 return -EINVAL;
3011 btf_verifier_log_type(env, v->t, "Invalid elem");
3012 return -EINVAL;
3016 btf_verifier_log_type(env, v->t, "Invalid array of int");
3017 return -EINVAL;
3020 if (array->nelems && elem_size > U32_MAX / array->nelems) {
3021 btf_verifier_log_type(env, v->t,
3023 return -EINVAL;
3026 env_stack_pop_resolved(env, elem_type_id, elem_size * array->nelems);
3037 array->type, array->index_type, array->nelems);
3050 elem_type_id = array->type;
3053 elem_size = elem_type->size;
3076 for (i = 0; i < array->nelems; i++) {
3080 elem_ops->show(btf, elem_type, elem_type_id, data,
3086 if (show->state.array_terminated)
3097 const struct btf_member *m = show->state.member;
3100 * First check if any members would be shown (are non-zero).
3102 * details on how this works at a high-level.
3104 if (show->state.depth > 0 && !(show->flags & BTF_SHOW_ZERO)) {
3105 if (!show->state.depth_check) {
3106 show->state.depth_check = show->state.depth + 1;
3107 show->state.depth_to_show = 0;
3110 show->state.member = m;
3112 if (show->state.depth_check != show->state.depth + 1)
3114 show->state.depth_check = 0;
3116 if (show->state.depth_to_show <= show->state.depth)
3120 * non-zero array member(s).
3140 u32 struct_bits_off = member->offset;
3146 return -EINVAL;
3149 struct_size = struct_type->size;
3151 if (struct_size - bytes_offset < member_type->size) {
3154 return -EINVAL;
3164 bool is_union = BTF_INFO_KIND(t->info) == BTF_KIND_UNION;
3167 struct btf *btf = env->btf;
3168 u32 struct_size = t->size;
3177 return -EINVAL;
3181 if (t->name_off &&
3182 !btf_name_valid_identifier(env->btf, t->name_off)) {
3184 return -EINVAL;
3191 if (!btf_name_offset_valid(btf, member->name_off)) {
3194 member->name_off);
3195 return -EINVAL;
3199 if (member->name_off &&
3200 !btf_name_valid_identifier(btf, member->name_off)) {
3202 return -EINVAL;
3205 if (!member->type || !BTF_TYPE_ID_VALID(member->type)) {
3208 return -EINVAL;
3215 return -EINVAL;
3225 return -EINVAL;
3231 return -EINVAL;
3252 if (v->next_member) {
3257 last_member = btf_type_member(v->t) + v->next_member - 1;
3258 last_member_type_id = last_member->type;
3261 return -EINVAL;
3263 last_member_type = btf_type_by_id(env->btf,
3265 if (btf_type_kflag(v->t))
3266 err = btf_type_ops(last_member_type)->check_kflag_member(env, v->t,
3270 err = btf_type_ops(last_member_type)->check_member(env, v->t,
3277 for_each_member_from(i, v->next_member, v->t, member) {
3278 u32 member_type_id = member->type;
3279 const struct btf_type *member_type = btf_type_by_id(env->btf,
3284 btf_verifier_log_member(env, v->t, member,
3286 return -EINVAL;
3295 if (btf_type_kflag(v->t))
3296 err = btf_type_ops(member_type)->check_kflag_member(env, v->t,
3300 err = btf_type_ops(member_type)->check_member(env, v->t,
3315 btf_verifier_log(env, "size=%u vlen=%u", t->size, btf_type_vlen(t));
3343 if (t->size != sz)
3345 info->type = field_type;
3346 info->off = off;
3360 t = btf_type_by_id(btf, t->type);
3364 t = btf_type_by_id(btf, t->type);
3369 if (btf_type_is_type_tag(btf_type_by_id(btf, t->type)))
3370 return -EINVAL;
3371 tag_value = __btf_name_by_offset(btf, t->name_off);
3381 return -EINVAL;
3387 t = btf_type_skip_modifiers(btf, t->type, &res_id);
3390 return -EINVAL;
3392 info->type = type;
3393 info->off = off;
3394 info->kptr.type_id = res_id;
3409 if (pt != btf_type_by_id(btf, t->type))
3411 if (btf_type_decl_tag(t)->component_idx != comp_idx)
3413 if (strncmp(__btf_name_by_offset(btf, t->name_off), tag_key, len))
3417 return -ENOENT;
3433 value = __btf_name_by_offset(btf, t->name_off) + len;
3438 return ERR_PTR(-EEXIST);
3455 if (t->size != sz)
3459 return -EINVAL;
3462 return -EINVAL;
3463 value_type = kstrndup(value_type, node_field_name - value_type,
3466 return -ENOMEM;
3473 return -EINVAL;
3474 info->type = head_type;
3475 info->off = off;
3476 info->graph_root.value_btf_id = id;
3477 info->graph_root.node_name = node_field_name;
3501 const char *name = __btf_name_by_offset(btf, var_type->name_off);
3514 return -E2BIG;
3558 return -EINVAL;
3566 return -E2BIG;
3598 return -E2BIG;
3612 err = btf_repeat_fields(info, info_cnt, ret, nelems - 1, t->size);
3641 nelems *= array->nelems;
3642 var_type = btf_type_by_id(btf, array->type);
3645 return -E2BIG;
3653 sz = var_type->size;
3704 return -EFAULT;
3710 return -E2BIG;
3712 ret = btf_repeat_fields(info, info_cnt, 1, nelems - 1, sz);
3730 member->type);
3735 return -EINVAL;
3741 &info[idx], info_cnt - idx, level);
3758 const struct btf_type *var = btf_type_by_id(btf, vsi->type);
3759 const struct btf_type *var_type = btf_type_by_id(btf, var->type);
3761 off = vsi->offset;
3762 ret = btf_find_field_one(btf, var, var_type, -1, off, vsi->size,
3764 &info[idx], info_cnt - idx,
3781 return -EINVAL;
3800 t = btf_type_by_id(btf, info->kptr.type_id);
3801 id = bpf_find_btf_id(__btf_name_by_offset(btf, t->name_off), BTF_INFO_KIND(t->info),
3803 if (id == -ENOENT) {
3810 field->kptr.dtor = NULL;
3811 id = info->kptr.type_id;
3821 if (info->type == BPF_KPTR_REF) {
3839 ret = -ENOENT;
3846 ret = -ENXIO;
3854 dtor_func_name = __btf_name_by_offset(kptr_btf, dtor_func->name_off);
3857 ret = -EINVAL;
3860 field->kptr.dtor = (void *)addr;
3864 field->kptr.btf_id = id;
3865 field->kptr.btf = kptr_btf;
3866 field->kptr.module = mod;
3886 t = btf_type_by_id(btf, info->graph_root.value_btf_id);
3892 if (strcmp(info->graph_root.node_name,
3893 __btf_name_by_offset(btf, member->name_off)))
3897 return -EINVAL;
3898 n = btf_type_by_id(btf, member->type);
3900 return -EINVAL;
3901 if (strcmp(node_type_name, __btf_name_by_offset(btf, n->name_off)))
3902 return -EINVAL;
3905 return -EINVAL;
3908 return -EINVAL;
3910 field->graph_root.btf = (struct btf *)btf;
3911 field->graph_root.value_btf_id = info->graph_root.value_btf_id;
3912 field->graph_root.node_offset = offset;
3915 return -ENOENT;
3938 if (a->offset < b->offset)
3939 return -1;
3940 else if (a->offset > b->offset)
3965 return ERR_PTR(-ENOMEM);
3967 rec->spin_lock_off = -EINVAL;
3968 rec->res_spin_lock_off = -EINVAL;
3969 rec->timer_off = -EINVAL;
3970 rec->wq_off = -EINVAL;
3971 rec->refcount_off = -EINVAL;
3972 rec->task_work_off = -EINVAL;
3977 ret = -EFAULT;
3981 ret = -EEXIST;
3986 rec->field_mask |= info_arr[i].type;
3987 rec->fields[i].offset = info_arr[i].off;
3988 rec->fields[i].type = info_arr[i].type;
3989 rec->fields[i].size = field_type_size;
3993 WARN_ON_ONCE(rec->spin_lock_off >= 0);
3995 rec->spin_lock_off = rec->fields[i].offset;
3998 WARN_ON_ONCE(rec->spin_lock_off >= 0);
4000 rec->res_spin_lock_off = rec->fields[i].offset;
4003 WARN_ON_ONCE(rec->timer_off >= 0);
4005 rec->timer_off = rec->fields[i].offset;
4008 WARN_ON_ONCE(rec->wq_off >= 0);
4010 rec->wq_off = rec->fields[i].offset;
4013 WARN_ON_ONCE(rec->task_work_off >= 0);
4014 rec->task_work_off = rec->fields[i].offset;
4017 WARN_ON_ONCE(rec->refcount_off >= 0);
4019 rec->refcount_off = rec->fields[i].offset;
4025 ret = btf_parse_kptr(btf, &rec->fields[i], &info_arr[i]);
4030 ret = btf_parse_list_head(btf, &rec->fields[i], &info_arr[i]);
4035 ret = btf_parse_rb_root(btf, &rec->fields[i], &info_arr[i]);
4043 ret = -EFAULT;
4046 rec->cnt++;
4049 if (rec->spin_lock_off >= 0 && rec->res_spin_lock_off >= 0) {
4050 ret = -EINVAL;
4057 (rec->spin_lock_off < 0 && rec->res_spin_lock_off < 0)) {
4058 ret = -EINVAL;
4062 if (rec->refcount_off < 0 &&
4065 ret = -EINVAL;
4069 sort_r(rec->fields, rec->cnt, sizeof(struct btf_field), btf_field_cmp,
4090 if (IS_ERR_OR_NULL(rec) || !(rec->field_mask & (BPF_GRAPH_ROOT | BPF_UPTR)))
4092 for (i = 0; i < rec->cnt; i++) {
4097 if (rec->fields[i].type == BPF_UPTR) {
4101 if (btf_is_kernel(rec->fields[i].kptr.btf))
4102 return -EINVAL;
4103 t = btf_type_by_id(rec->fields[i].kptr.btf,
4104 rec->fields[i].kptr.btf_id);
4105 if (!t->size)
4106 return -EINVAL;
4107 if (t->size > PAGE_SIZE)
4108 return -E2BIG;
4112 if (!(rec->fields[i].type & BPF_GRAPH_ROOT))
4114 btf_id = rec->fields[i].graph_root.value_btf_id;
4117 return -EFAULT;
4118 rec->fields[i].graph_root.value_rec = meta->record;
4124 if (!(rec->field_mask & BPF_GRAPH_NODE))
4133 * - A type can only be owned by another type in user BTF if it
4135 * - A type can only _own_ another type in user BTF if it has a
4145 * A -> B -> C
4147 * - A is an root, e.g. has bpf_rb_root.
4148 * - B is both a root and node, e.g. has bpf_rb_node and
4150 * - C is only an root, e.g. has bpf_list_node
4155 * A -> B
4157 * - A is both an root and node.
4158 * - B is only an node.
4160 if (meta->record->field_mask & BPF_GRAPH_ROOT)
4161 return -ELOOP;
4180 member->type);
4194 member->type,
4203 ops->show(btf, member_type, member->type,
4217 const struct btf_member *m = show->state.member;
4220 * First check if any members would be shown (are non-zero).
4222 * details on how this works at a high-level.
4224 if (show->state.depth > 0 && !(show->flags & BTF_SHOW_ZERO)) {
4225 if (!show->state.depth_check) {
4226 show->state.depth_check = show->state.depth + 1;
4227 show->state.depth_to_show = 0;
4231 show->state.member = m;
4232 if (show->state.depth_check != show->state.depth + 1)
4234 show->state.depth_check = 0;
4236 if (show->state.depth_to_show <= show->state.depth)
4240 * non-zero child values.
4261 u32 struct_bits_off = member->offset;
4267 return -EINVAL;
4270 struct_size = struct_type->size;
4272 if (struct_size - bytes_offset < member_type->size) {
4275 return -EINVAL;
4289 struct_bits_off = BTF_MEMBER_BIT_OFFSET(member->offset);
4290 nr_bits = BTF_MEMBER_BITFIELD_SIZE(member->offset);
4295 return -EINVAL;
4302 return -EINVAL;
4305 struct_size = struct_type->size;
4310 return -EINVAL;
4321 struct btf *btf = env->btf;
4333 return -EINVAL;
4336 if (t->size > 8 || !is_power_of_2(t->size)) {
4338 return -EINVAL;
4342 if (t->name_off &&
4343 !btf_name_valid_identifier(env->btf, t->name_off)) {
4345 return -EINVAL;
4354 return -EINVAL;
4361 return -EINVAL;
4364 if (env->log.level == BPF_LOG_KERNEL)
4378 btf_verifier_log(env, "size=%u vlen=%u", t->size, btf_type_vlen(t));
4429 struct btf *btf = env->btf;
4441 return -EINVAL;
4444 if (t->size > 8 || !is_power_of_2(t->size)) {
4446 return -EINVAL;
4450 if (t->name_off &&
4451 !btf_name_valid_identifier(env->btf, t->name_off)) {
4453 return -EINVAL;
4462 return -EINVAL;
4469 return -EINVAL;
4472 if (env->log.level == BPF_LOG_KERNEL)
4537 return -EINVAL;
4540 if (t->name_off) {
4542 return -EINVAL;
4547 return -EINVAL;
4561 btf_verifier_log(env, "return=%u args=(", t->type);
4574 __btf_name_by_offset(env->btf,
4576 for (i = 1; i < nr_args - 1; i++)
4578 __btf_name_by_offset(env->btf,
4582 const struct btf_param *last_arg = &args[nr_args - 1];
4584 if (last_arg->type)
4585 btf_verifier_log(env, ", %u %s", last_arg->type,
4586 __btf_name_by_offset(env->btf,
4587 last_arg->name_off));
4604 * (i.e. struct's member -> BTF_KIND_PTR -> BTF_KIND_FUNC_PROTO)
4618 if (!t->name_off ||
4619 !btf_name_valid_identifier(env->btf, t->name_off)) {
4621 return -EINVAL;
4626 return -EINVAL;
4631 return -EINVAL;
4642 const struct btf_type *t = v->t;
4643 u32 next_type_id = t->type;
4674 return -EINVAL;
4679 return -EINVAL;
4684 return -EINVAL;
4687 if (!t->name_off ||
4688 !btf_name_valid_identifier(env->btf, t->name_off)) {
4690 return -EINVAL;
4694 if (!t->type || !BTF_TYPE_ID_VALID(t->type)) {
4696 return -EINVAL;
4700 if (var->linkage != BTF_VAR_STATIC &&
4701 var->linkage != BTF_VAR_GLOBAL_ALLOCATED) {
4703 return -EINVAL;
4715 btf_verifier_log(env, "type_id=%u linkage=%u", t->type, var->linkage);
4740 return -EINVAL;
4743 if (!t->size) {
4745 return -EINVAL;
4750 return -EINVAL;
4753 if (!t->name_off ||
4754 !btf_name_valid_section(env->btf, t->name_off)) {
4756 return -EINVAL;
4763 if (!vsi->type || !BTF_TYPE_ID_VALID(vsi->type)) {
4766 return -EINVAL;
4769 if (vsi->offset < last_vsi_end_off || vsi->offset >= t->size) {
4772 return -EINVAL;
4775 if (!vsi->size || vsi->size > t->size) {
4778 return -EINVAL;
4781 last_vsi_end_off = vsi->offset + vsi->size;
4782 if (last_vsi_end_off > t->size) {
4785 return -EINVAL;
4789 sum += vsi->size;
4792 if (t->size < sum) {
4794 return -EINVAL;
4804 struct btf *btf = env->btf;
4807 env->resolve_mode = RESOLVE_TBD;
4808 for_each_vsi_from(i, v->next_member, v->t, vsi) {
4809 u32 var_type_id = vsi->type, type_id, type_size = 0;
4810 const struct btf_type *var_type = btf_type_by_id(env->btf,
4813 btf_verifier_log_vsi(env, v->t, vsi,
4815 return -EINVAL;
4824 type_id = var_type->type;
4826 btf_verifier_log_vsi(env, v->t, vsi, "Invalid type");
4827 return -EINVAL;
4830 if (vsi->size < type_size) {
4831 btf_verifier_log_vsi(env, v->t, vsi, "Invalid size");
4832 return -EINVAL;
4843 btf_verifier_log(env, "size=%u vlen=%u", t->size, btf_type_vlen(t));
4859 __btf_name_by_offset(btf, t->name_off));
4861 var = btf_type_by_id(btf, vsi->type);
4864 btf_type_ops(var)->show(btf, var, vsi->type,
4865 data + vsi->offset, bits_offset, show);
4885 return -EINVAL;
4890 return -EINVAL;
4893 if (t->size != 2 && t->size != 4 && t->size != 8 && t->size != 12 &&
4894 t->size != 16) {
4896 return -EINVAL;
4917 * that types after CO-RE can pass the kernel BTF verifier.
4919 align_bytes = min_t(u64, sizeof(void *), member_type->size);
4921 div64_u64_rem(member->offset, align_bits, &misalign_bits);
4925 return -EINVAL;
4928 start_offset_bytes = member->offset / BITS_PER_BYTE;
4929 end_offset_bytes = start_offset_bytes + member_type->size;
4930 if (end_offset_bytes > struct_type->size) {
4933 return -EINVAL;
4942 btf_verifier_log(env, "size=%u", t->size);
4967 return -EINVAL;
4970 value = btf_name_by_offset(env->btf, t->name_off);
4973 return -EINVAL;
4978 return -EINVAL;
4981 component_idx = btf_type_decl_tag(t)->component_idx;
4982 if (component_idx < -1) {
4984 return -EINVAL;
4996 const struct btf_type *t = v->t;
4997 u32 next_type_id = t->type;
4998 struct btf *btf = env->btf;
5004 btf_verifier_log_type(env, v->t, "Invalid type_id");
5005 return -EINVAL;
5012 component_idx = btf_type_decl_tag(t)->component_idx;
5013 if (component_idx != -1) {
5015 btf_verifier_log_type(env, v->t, "Invalid component_idx");
5016 return -EINVAL;
5023 next_type = btf_type_by_id(btf, next_type->type);
5028 btf_verifier_log_type(env, v->t, "Invalid component_idx");
5029 return -EINVAL;
5040 btf_verifier_log(env, "type=%u component_idx=%d", t->type,
5041 btf_type_decl_tag(t)->component_idx);
5062 btf = env->btf;
5066 /* Check func return type which could be "void" (t->type == 0) */
5067 if (t->type) {
5068 u32 ret_type_id = t->type;
5073 return -EINVAL;
5078 return -EINVAL;
5091 return -EINVAL;
5099 if (!args[nr_args - 1].type) {
5100 if (args[nr_args - 1].name_off) {
5103 return -EINVAL;
5105 nr_args--;
5116 return -EINVAL;
5121 return -EINVAL;
5129 return -EINVAL;
5141 return -EINVAL;
5156 btf = env->btf;
5157 proto_type = btf_type_by_id(btf, t->type);
5161 return -EINVAL;
5169 return -EINVAL;
5207 env->log_type_id, meta_left, sizeof(*t));
5208 return -EINVAL;
5210 meta_left -= sizeof(*t);
5212 if (t->info & ~BTF_INFO_MASK) {
5214 env->log_type_id, t->info);
5215 return -EINVAL;
5218 if (BTF_INFO_KIND(t->info) > BTF_KIND_MAX ||
5219 BTF_INFO_KIND(t->info) == BTF_KIND_UNKN) {
5221 env->log_type_id, BTF_INFO_KIND(t->info));
5222 return -EINVAL;
5225 if (!btf_name_offset_valid(env->btf, t->name_off)) {
5227 env->log_type_id, t->name_off);
5228 return -EINVAL;
5231 var_meta_size = btf_type_ops(t)->check_meta(env, t, meta_left);
5235 meta_left -= var_meta_size;
5237 return saved_meta_left - meta_left;
5242 struct btf *btf = env->btf;
5246 hdr = &btf->hdr;
5247 cur = btf->nohdr_data + hdr->type_off;
5248 end = cur + hdr->type_len;
5250 env->log_type_id = btf->base_btf ? btf->start_id : 1;
5255 meta_size = btf_check_meta(env, t, end - cur);
5261 env->log_type_id++;
5271 struct btf *btf = env->btf;
5296 u32 elem_type_id = array->type;
5301 (array->nelems * elem_size ==
5311 u32 save_log_type_id = env->log_type_id;
5315 env->resolve_mode = RESOLVE_TBD;
5318 env->log_type_id = v->type_id;
5319 err = btf_type_ops(v->t)->resolve(env, v);
5322 env->log_type_id = type_id;
5323 if (err == -E2BIG) {
5327 } else if (err == -EEXIST) {
5334 err = -EINVAL;
5337 env->log_type_id = save_log_type_id;
5343 struct btf *btf = env->btf;
5352 env->phase++;
5353 for (i = btf->base_btf ? 0 : 1; i < btf->nr_types; i++) {
5354 type_id = btf->start_id + i;
5357 env->log_type_id = type_id;
5377 const struct btf_header *hdr = &env->btf->hdr;
5381 if (hdr->type_off & (sizeof(u32) - 1)) {
5383 return -EINVAL;
5386 if (!env->btf->base_btf && !hdr->type_len) {
5388 return -EINVAL;
5401 struct btf *btf = env->btf;
5404 hdr = &btf->hdr;
5405 start = btf->nohdr_data + hdr->str_off;
5406 end = start + hdr->str_len;
5408 if (end != btf->data + btf->data_size) {
5410 return -EINVAL;
5413 btf->strings = start;
5415 if (btf->base_btf && !hdr->str_len)
5417 if (!hdr->str_len || hdr->str_len - 1 > BTF_MAX_NAME_OFFSET || end[-1]) {
5419 return -EINVAL;
5421 if (!btf->base_btf && start[0]) {
5423 return -EINVAL;
5439 return (int)(x->off - y->off) ? : (int)(x->len - y->len);
5450 btf = env->btf;
5451 hdr = &btf->hdr;
5463 expected_total = btf_data_size - hdr->hdr_len;
5467 return -EINVAL;
5472 return -EINVAL;
5476 return -EINVAL;
5478 if (expected_total - total < secs[i].len) {
5481 return -EINVAL;
5489 return -EINVAL;
5501 btf = env->btf;
5502 btf_data_size = btf->data_size;
5506 return -EINVAL;
5509 hdr = btf->data;
5510 hdr_len = hdr->hdr_len;
5513 return -EINVAL;
5517 if (hdr_len > sizeof(btf->hdr)) {
5518 u8 *expected_zero = btf->data + sizeof(btf->hdr);
5519 u8 *end = btf->data + hdr_len;
5524 return -E2BIG;
5529 hdr_copy = min_t(u32, hdr_len, sizeof(btf->hdr));
5530 memcpy(&btf->hdr, btf->data, hdr_copy);
5532 hdr = &btf->hdr;
5536 if (hdr->magic != BTF_MAGIC) {
5538 return -EINVAL;
5541 if (hdr->version != BTF_VERSION) {
5543 return -ENOTSUPP;
5546 if (hdr->flags) {
5548 return -ENOTSUPP;
5551 if (!btf->base_btf && btf_data_size == hdr->hdr_len) {
5553 return -EINVAL;
5580 return ERR_PTR(-ENOMEM);
5581 aof->cnt = 0;
5594 new_aof = krealloc(aof, struct_size(new_aof, ids, aof->cnt + 1),
5597 ret = -ENOMEM;
5601 aof->ids[aof->cnt++] = id;
5613 ret = -EINVAL;
5621 new_aof = krealloc(aof, struct_size(new_aof, ids, aof->cnt + 1),
5624 ret = -ENOMEM;
5628 aof->ids[aof->cnt++] = i;
5631 if (!aof->cnt) {
5635 sort(&aof->ids, aof->cnt, sizeof(aof->ids[0]), btf_id_cmp_func, NULL);
5652 if (btf_id_set_contains(aof, member->type))
5657 tab_cnt = tab ? tab->cnt : 0;
5661 ret = -ENOMEM;
5665 new_tab->cnt = 0;
5668 type = &tab->types[tab->cnt];
5669 type->btf_id = i;
5672 BPF_KPTR, t->size);
5675 ret = PTR_ERR_OR_ZERO(record) ?: -EFAULT;
5678 type->record = record;
5679 tab->cnt++;
5695 tab = btf->struct_meta_tab;
5698 return bsearch(&btf_id, tab->types, tab->cnt, sizeof(tab->types[0]), btf_id_cmp_func);
5704 int i, n, good_id = start_id - 1;
5715 return -EINVAL;
5723 if (!chain_limit--) {
5725 return -ELOOP;
5730 return -EINVAL;
5738 cur_id = t->type;
5741 return -EINVAL;
5758 err = -EFAULT;
5765 bpfptr_t btf_data = make_bpfptr(attr->btf, uattr.is_kernel);
5766 char __user *log_ubuf = u64_to_user_ptr(attr->btf_log_buf);
5773 if (attr->btf_size > BTF_MAX_SIZE)
5774 return ERR_PTR(-E2BIG);
5778 return ERR_PTR(-ENOMEM);
5783 err = bpf_vlog_init(&env->log, attr->btf_log_level,
5784 log_ubuf, attr->btf_log_size);
5790 err = -ENOMEM;
5793 env->btf = btf;
5795 data = kvmalloc(attr->btf_size, GFP_KERNEL | __GFP_NOWARN);
5797 err = -ENOMEM;
5801 btf->data = data;
5802 btf->data_size = attr->btf_size;
5804 if (copy_from_bpfptr(data, btf_data, attr->btf_size)) {
5805 err = -EFAULT;
5813 btf->nohdr_data = btf->data + btf->hdr.hdr_len;
5827 struct_meta_tab = btf_parse_struct_metas(&env->log, btf);
5832 btf->struct_meta_tab = struct_meta_tab;
5837 for (i = 0; i < struct_meta_tab->cnt; i++) {
5838 err = btf_check_and_fixup_fields(btf, struct_meta_tab->types[i].record);
5844 err = finalize_log(&env->log, uattr, uattr_size);
5849 refcount_set(&btf->refcnt, 1);
5855 /* overwrite err with -ENOSPC or -EFAULT */
5856 ret = finalize_log(&env->log, uattr, uattr_size);
5913 return btf_type_by_id(btf_vmlinux, ctx_type->type);
5923 return -EFAULT;
5929 return ctx_type->type;
5948 t = btf_type_by_id(btf, t->type);
5955 t = btf_type_by_id(btf, t->type);
5958 tname = btf_name_by_offset(btf, t->name_off);
5965 t = btf_type_by_id(btf, t->type);
5974 tname = btf_name_by_offset(btf, t->name_off);
5987 ctx_tname = btf_name_by_offset(btf_vmlinux, ctx_type->name_off);
6012 ctx_type = btf_type_by_id(btf_vmlinux, ctx_type->type);
6018 /* forward declarations for arch-specific underlying types of
6019 * bpf_user_pt_regs_t; this avoids the need for arch-specific #ifdef
6037 return -EINVAL;
6039 t = btf_type_by_id(btf, t->type);
6044 t = btf_type_by_id(btf, t->type);
6047 tname = btf_name_by_offset(btf, t->name_off);
6055 t = btf_type_by_id(btf, t->type);
6061 tname = btf_name_by_offset(btf, t->name_off);
6064 return -EINVAL;
6087 if (btf_is_int(t) && t->size == 8)
6098 if (btf_is_int(t) && t->size == 8)
6104 strncmp(tname, "bpf_iter__", sizeof("bpf_iter__") - 1) == 0)
6111 if (btf_is_int(t) && t->size == 8)
6121 if (btf_is_int(t) && t->size == 8)
6136 return -EINVAL;
6141 ctx_type = btf_type_by_id(btf_vmlinux, ctx_type->type);
6149 return -EINVAL;
6152 ctx_tname = btf_name_by_offset(btf_vmlinux, ctx_type->name_off);
6155 return -EINVAL;
6168 return -ENOENT;
6182 kctx_type_id = kctx_member->type;
6186 return -EINVAL;
6201 return ERR_PTR(-ENOENT);
6205 err = -ENOMEM;
6208 env->btf = btf;
6210 btf->data = data;
6211 btf->data_size = data_size;
6212 btf->kernel_btf = true;
6213 snprintf(btf->name, sizeof(btf->name), "%s", name);
6219 btf->nohdr_data = btf->data + btf->hdr.hdr_len;
6233 refcount_set(&btf->refcnt, 1);
6239 kvfree(btf->types);
6254 return ERR_PTR(-ENOMEM);
6256 log = &env->log;
6257 log->level = BPF_LOG_KERNEL;
6258 btf = btf_parse_base(env, "vmlinux", __start_BTF, __stop_BTF - __start_BTF);
6280 if (!btf->base_btf || !btf->base_id_map)
6282 return btf->base_id_map[id];
6300 return ERR_PTR(-EINVAL);
6304 return ERR_PTR(-ENOMEM);
6306 log = &env->log;
6307 log->level = BPF_LOG_KERNEL;
6321 err = -ENOMEM;
6324 env->btf = btf;
6326 btf->base_btf = base_btf;
6327 btf->start_id = base_btf->nr_types;
6328 btf->start_str_off = base_btf->hdr.str_len;
6329 btf->kernel_btf = true;
6330 snprintf(btf->name, sizeof(btf->name), "%s", module_name);
6332 btf->data = kvmemdup(data, data_size, GFP_KERNEL | __GFP_NOWARN);
6333 if (!btf->data) {
6334 err = -ENOMEM;
6337 btf->data_size = data_size;
6343 btf->nohdr_data = btf->data + btf->hdr.hdr_len;
6358 err = btf_relocate(btf, vmlinux_btf, &btf->base_id_map);
6366 refcount_set(&btf->refcnt, 1);
6374 kvfree(btf->data);
6375 kvfree(btf->types);
6385 struct bpf_prog *tgt_prog = prog->aux->dst_prog;
6388 return tgt_prog->aux->btf;
6390 return prog->aux->attach_btf;
6396 t = btf_type_skip_modifiers(btf, t->type, NULL);
6415 offset += btf_type_is_ptr(t) ? 8 : roundup(t->size, 8);
6420 t = btf_type_skip_modifiers(btf, func_proto->type, NULL);
6421 offset += btf_type_is_ptr(t) ? 8 : roundup(t->size, 8);
6430 enum bpf_attach_type atype = prog->expected_attach_type;
6432 switch (prog->type) {
6472 /* ... from sched_numa_pair_template event class */
6499 /* ext4, from ext4__mballoc event class */
6505 /* ... from filelock_lock event class */
6510 /* ... from filelock_lease event class */
6516 /* host1x */
6526 /* .. from mm_page event class */
6543 /* ... from xprt_cong_event event class */
6555 /* writeback, from writeback_folio_template event class */
6665 const struct btf_type *t = prog->aux->attach_func_proto;
6666 struct bpf_prog *tgt_prog = prog->aux->dst_prog;
6668 const char *tname = prog->aux->attach_func_name;
6669 struct bpf_verifier_log *log = info->log;
6687 if (prog->aux->attach_btf_trace) {
6690 nr_args--;
6694 bpf_log(log, "func '%s' doesn't have %d-th argument\n",
6700 switch (prog->expected_attach_type) {
6703 info->is_retval = true;
6711 * While the LSM programs are BPF_MODIFY_RETURN-like
6715 * return -EINVAL;
6722 t = btf_type_by_id(btf, t->type);
6731 t = btf_type_skip_modifiers(btf, t->type, NULL);
6740 bpf_log(log, "func '%s' doesn't have %d-th argument\n",
6753 t = btf_type_by_id(btf, t->type);
6761 __btf_name_by_offset(btf, t->name_off),
6773 for (i = 0; i < prog->aux->ctx_arg_info_size; i++) {
6774 const struct bpf_ctx_arg_aux *ctx_arg_info = &prog->aux->ctx_arg_info[i];
6777 type = base_type(ctx_arg_info->reg_type);
6778 flag = type_flag(ctx_arg_info->reg_type);
6779 if (ctx_arg_info->offset == off && type == PTR_TO_BUF &&
6781 info->reg_type = ctx_arg_info->reg_type;
6794 for (i = 0; i < prog->aux->ctx_arg_info_size; i++) {
6795 const struct bpf_ctx_arg_aux *ctx_arg_info = &prog->aux->ctx_arg_info[i];
6797 if (ctx_arg_info->offset == off) {
6798 if (!ctx_arg_info->btf_id) {
6803 info->reg_type = ctx_arg_info->reg_type;
6804 info->btf = ctx_arg_info->btf ? : btf_vmlinux;
6805 info->btf_id = ctx_arg_info->btf_id;
6806 info->ref_obj_id = ctx_arg_info->ref_obj_id;
6811 info->reg_type = PTR_TO_BTF_ID;
6813 info->reg_type |= PTR_TRUSTED;
6816 info->reg_type |= PTR_MAYBE_NULL;
6818 if (prog->expected_attach_type == BPF_TRACE_RAW_TP) {
6819 struct btf *btf = prog->aux->attach_btf;
6824 t = btf_type_by_id(btf, prog->aux->attach_btf_id);
6827 tname = btf_name_by_offset(btf, t->name_off);
6831 tname += sizeof("btf_trace_") - 1;
6837 info->reg_type |= PTR_MAYBE_NULL;
6843 /* If we don't know NULL-ness specification and the tracepoint
6848 info->reg_type |= PTR_MAYBE_NULL;
6854 if (tgt_prog->type == BPF_PROG_TYPE_EXT)
6855 tgt_type = tgt_prog->aux->saved_dst_prog_type;
6857 tgt_type = tgt_prog->type;
6861 info->btf = btf_vmlinux;
6862 info->btf_id = ret;
6869 info->btf = btf;
6870 info->btf_id = t->type;
6871 t = btf_type_by_id(btf, t->type);
6874 tag_value = __btf_name_by_offset(btf, t->name_off);
6876 info->reg_type |= MEM_USER;
6878 info->reg_type |= MEM_PERCPU;
6883 info->btf_id = t->type;
6884 t = btf_type_by_id(btf, t->type);
6893 tname, arg, info->btf_id, btf_type_str(t),
6894 __btf_name_by_offset(btf, t->name_off));
6902 info->reg_type = SCALAR_VALUE;
6929 t = btf_type_skip_modifiers(btf, t->type, NULL);
6930 tname = __btf_name_by_offset(btf, t->name_off);
6933 return -EINVAL;
6937 if (BTF_INFO_KIND(t->info) == BTF_KIND_UNION && vlen != 1 && !(*flag & PTR_UNTRUSTED))
6945 if (off + size > t->size) {
6954 member = btf_type_member(t) + vlen - 1;
6955 mtype = btf_type_skip_modifiers(btf, member->type,
6961 if (array_elem->nelems != 0)
6969 t = btf_type_skip_modifiers(btf, array_elem->type,
6978 off = (off - moff) % t->size;
6984 return -EACCES;
7030 mid = member->type;
7031 mtype = btf_type_by_id(btf, member->type);
7032 mname = __btf_name_by_offset(btf, member->name_off);
7039 return -EFAULT;
7051 * linearize a multi-dimensional array.
7069 * When accessing outer->array[1][0], it moves
7095 elem_idx = (off - moff) / msize;
7115 off -= moff;
7128 return -EACCES;
7132 t = btf_type_by_id(btf, mtype->type);
7134 tag_value = __btf_name_by_offset(btf, t->name_off);
7146 stype = btf_type_skip_modifiers(btf, mtype->type, &id);
7162 * space. e.g. skb->cb[].
7168 return -EACCES;
7174 return -EINVAL;
7183 const struct btf *btf = reg->btf;
7186 u32 id = reg->btf_id;
7189 while (type_is_alloc(reg->type)) {
7197 rec = meta->record;
7198 for (i = 0; i < rec->cnt; i++) {
7199 struct btf_field *field = &rec->fields[i];
7200 u32 offset = field->offset;
7201 if (off < offset + field->size && offset < off + size) {
7204 btf_field_type_name(field->type));
7205 return -EACCES;
7220 if (type_is_alloc(reg->type))
7246 return -EINVAL;
7251 return -EINVAL;
7319 t = btf_type_by_id(btf, t->type);
7321 return -EINVAL;
7327 return t->size;
7328 return -EINVAL;
7359 m->arg_size[i] = 8;
7360 m->arg_flags[i] = 0;
7362 m->ret_size = 8;
7363 m->ret_flags = 0;
7364 m->nr_args = MAX_BPF_FUNC_REG_ARGS;
7373 return -EINVAL;
7375 ret = __get_type_size(btf, func->type, &t);
7380 return -EINVAL;
7382 m->ret_size = ret;
7383 m->ret_flags = __get_type_fmodel_flags(t);
7386 if (i == nargs - 1 && args[i].type == 0) {
7390 return -EINVAL;
7399 return -EINVAL;
7405 return -EINVAL;
7407 m->arg_size[i] = ret;
7408 m->arg_flags[i] = __get_type_fmodel_flags(t);
7410 m->nr_args = nargs;
7418 * EINVAL - function prototype mismatch
7419 * EFAULT - verifier bug
7420 * 0 - 99% match. The last 1% is validated by the verifier.
7430 fn1 = btf_name_by_offset(btf1, t1->name_off);
7431 fn2 = btf_name_by_offset(btf2, t2->name_off);
7435 return -EINVAL;
7439 return -EINVAL;
7442 t1 = btf_type_by_id(btf1, t1->type);
7444 return -EFAULT;
7445 t2 = btf_type_by_id(btf2, t2->type);
7447 return -EFAULT;
7457 return -EINVAL;
7460 t1 = btf_type_skip_modifiers(btf1, t1->type, NULL);
7461 t2 = btf_type_skip_modifiers(btf2, t2->type, NULL);
7462 if (t1->info != t2->info) {
7467 return -EINVAL;
7474 if (t1->info != t2->info) {
7478 return -EINVAL;
7480 if (btf_type_has_size(t1) && t1->size != t2->size) {
7483 i, fn1, t1->size,
7484 fn2, t2->size);
7485 return -EINVAL;
7498 return -EINVAL;
7500 t1 = btf_type_skip_modifiers(btf1, t1->type, NULL);
7501 t2 = btf_type_skip_modifiers(btf2, t2->type, NULL);
7506 return -EINVAL;
7512 return -EINVAL;
7520 s1 = btf_name_by_offset(btf1, t1->name_off);
7521 s2 = btf_name_by_offset(btf2, t2->name_off);
7526 return -EINVAL;
7536 struct btf *btf1 = prog->aux->btf;
7540 if (!prog->aux->func_info) {
7542 return -EINVAL;
7545 btf_id = prog->aux->func_info[0].type_id;
7547 return -EFAULT;
7551 return -EFAULT;
7560 t = btf_type_by_id(btf, t->type); /* skip PTR */
7563 t = btf_type_by_id(btf, t->type);
7568 name = btf_str_by_offset(btf, t->name_off);
7603 type_id = t->type;
7604 t = btf_type_by_id(btf, t->type);
7606 type_id = t->type;
7607 t = btf_type_by_id(btf, t->type);
7615 arg_idx, btf_type_str(t), __btf_name_by_offset(btf, t->name_off),
7619 if (cc->cnt != 1) {
7621 arg_idx, btf_type_str(t), __btf_name_by_offset(btf, t->name_off),
7622 cc->cnt == 0 ? "has no matches" : "is ambiguous");
7623 err = cc->cnt == 0 ? -ENOENT : -ESRCH;
7626 if (btf_is_module(cc->cands[0].btf)) {
7628 arg_idx, btf_type_str(t), __btf_name_by_offset(btf, t->name_off));
7629 err = -EOPNOTSUPP;
7632 kern_type_id = cc->cands[0].id;
7651 /* Process BTF of a function to produce high-level expectation of function
7655 * EFAULT - there is a verifier bug. Abort verification.
7656 * EINVAL - cannot convert BTF.
7657 * 0 - Successfully processed BTF and constructed argument expectations.
7661 bool is_global = subprog_aux(env, subprog)->linkage == BTF_FUNC_GLOBAL;
7663 struct bpf_verifier_log *log = &env->log;
7664 struct bpf_prog *prog = env->prog;
7665 enum bpf_prog_type prog_type = prog->type;
7666 struct btf *btf = prog->aux->btf;
7672 if (sub->args_cached)
7675 if (!prog->aux->func_info) {
7677 return -EFAULT;
7680 btf_id = prog->aux->func_info[subprog].type_id;
7683 return -EINVAL;
7685 return -EFAULT;
7695 return -EFAULT;
7697 tname = btf_name_by_offset(btf, fn_t->name_off);
7699 if (prog->aux->func_info_aux[subprog].unreliable) {
7701 return -EFAULT;
7704 prog_type = prog->aux->dst_prog->type;
7706 t = btf_type_by_id(btf, fn_t->type);
7709 return -EFAULT;
7715 return -EINVAL;
7718 return -EINVAL;
7721 t = btf_type_by_id(btf, t->type);
7723 t = btf_type_by_id(btf, t->type);
7726 return -EINVAL;
7730 return -EINVAL;
7744 const char *tag = __btf_name_by_offset(btf, tag_t->name_off) + 4;
7749 return -EOPNOTSUPP;
7766 return -EOPNOTSUPP;
7769 if (id != -ENOENT) {
7776 t = btf_type_by_id(btf, t->type);
7783 return -EINVAL;
7787 prog->expected_attach_type))
7788 return -EINVAL;
7789 sub->args[i].arg_type = ARG_PTR_TO_CTX;
7795 return -EINVAL;
7797 sub->args[i].arg_type = ARG_PTR_TO_DYNPTR | MEM_RDONLY;
7805 return -EINVAL;
7812 sub->args[i].arg_type = ARG_PTR_TO_BTF_ID | PTR_TRUSTED;
7814 sub->args[i].arg_type |= PTR_MAYBE_NULL;
7815 sub->args[i].btf_id = kern_type_id;
7824 return -EINVAL;
7827 ref_t = btf_type_skip_modifiers(btf, t->type, NULL);
7829 sub->args[i].arg_type = ARG_PTR_TO_MEM | MEM_RDONLY | PTR_UNTRUSTED;
7830 sub->args[i].mem_size = 0;
7841 tname = __btf_name_by_offset(vmlinux_btf, t->name_off);
7844 return -EINVAL;
7846 sub->args[i].arg_type = ARG_PTR_TO_BTF_ID | PTR_UNTRUSTED;
7847 sub->args[i].btf_id = kern_type_id;
7853 return -EINVAL;
7855 sub->args[i].arg_type = ARG_PTR_TO_ARENA;
7863 return -EINVAL;
7866 t = btf_type_skip_modifiers(btf, t->type, NULL);
7870 i, btf_type_str(t), btf_name_by_offset(btf, t->name_off),
7872 return -EINVAL;
7875 sub->args[i].arg_type = ARG_PTR_TO_MEM | PTR_MAYBE_NULL;
7877 sub->args[i].arg_type &= ~PTR_MAYBE_NULL;
7878 sub->args[i].mem_size = mem_size;
7885 return -EINVAL;
7888 sub->args[i].arg_type = ARG_ANYTHING;
7892 return -EINVAL;
7895 return -EINVAL;
7898 sub->arg_cnt = nargs;
7899 sub->args_cached = true;
7909 show->btf = btf;
7910 memset(&show->state, 0, sizeof(show->state));
7911 memset(&show->obj, 0, sizeof(show->obj));
7913 btf_type_ops(t)->show(btf, t, type_id, obj, 0, show);
7919 seq_vprintf((struct seq_file *)show->target, fmt, args);
7956 len = vsnprintf(show->target, ssnprintf->len_left, fmt, args);
7959 ssnprintf->len_left = 0;
7960 ssnprintf->len = len;
7961 } else if (len >= ssnprintf->len_left) {
7963 ssnprintf->len_left = 0;
7964 ssnprintf->len += len;
7966 ssnprintf->len_left -= len;
7967 ssnprintf->len += len;
7968 show->target += len;
7996 const struct btf *btf = filp->private_data;
7998 seq_printf(m, "btf_id:\t%u\n", btf->id);
8004 btf_put(filp->private_data);
8051 CLASS(fd, f)(fd);
8055 refcount_inc(&btf->refcnt);
8072 uinfo = u64_to_user_ptr(attr->info.info);
8073 uinfo_len = attr->info.info_len;
8078 return -EFAULT;
8080 info.id = btf->id;
8082 btf_copy = min_t(u32, btf->data_size, info.btf_size);
8083 if (copy_to_user(ubtf, btf->data, btf_copy))
8084 return -EFAULT;
8085 info.btf_size = btf->data_size;
8087 info.kernel_btf = btf->kernel_btf;
8092 return -EINVAL;
8094 name_len = strlen(btf->name);
8099 if (copy_to_user(uname, btf->name, name_len + 1))
8100 return -EFAULT;
8104 if (copy_to_user(uname, btf->name, uname_len - 1))
8105 return -EFAULT;
8106 if (put_user(zero, uname + uname_len - 1))
8107 return -EFAULT;
8108 /* let user-space know about too short buffer */
8109 ret = -ENOSPC;
8114 put_user(info_copy, &uattr->info.info_len))
8115 return -EFAULT;
8127 if (!btf || !refcount_inc_not_zero(&btf->refcnt))
8128 btf = ERR_PTR(-ENOENT);
8143 return btf->id;
8148 return btf->kernel_btf;
8153 return btf->kernel_btf && strcmp(btf->name, "vmlinux") != 0;
8182 if (mod->btf_data_size == 0 ||
8191 err = -ENOMEM;
8194 btf = btf_parse_module(mod->name, mod->btf_data, mod->btf_data_size,
8195 mod->btf_base_data, mod->btf_base_data_size);
8200 mod->name, PTR_ERR(btf));
8216 btf_mod->module = module;
8217 btf_mod->btf = btf;
8218 list_add(&btf_mod->list, &btf_modules);
8229 attr->attr.name = btf->name;
8230 attr->attr.mode = 0444;
8231 attr->size = btf->data_size;
8232 attr->private = btf->data;
8233 attr->read = sysfs_bin_attr_simple_read;
8238 mod->name, err);
8244 btf_mod->sysfs_attr = attr;
8251 if (btf_mod->module != module)
8254 btf_mod->flags |= BTF_MODULE_F_LIVE;
8262 if (btf_mod->module != module)
8265 list_del(&btf_mod->list);
8266 if (btf_mod->sysfs_attr)
8267 sysfs_remove_bin_file(btf_kobj, btf_mod->sysfs_attr);
8268 purge_cand_cache(btf_mod->btf);
8269 btf_put(btf_mod->btf);
8270 kfree(btf_mod->sysfs_attr);
8302 if (btf_mod->btf != btf)
8310 if ((btf_mod->flags & BTF_MODULE_F_LIVE) && try_module_get(btf_mod->module))
8311 res = btf_mod->module;
8341 if (btf_mod->module != module)
8344 btf_get(btf_mod->btf);
8345 btf = btf_mod->btf;
8358 return -ENOENT;
8372 return -EINVAL;
8374 if (name_sz <= 1 || name[name_sz - 1])
8375 return -EINVAL;
8406 /* Validate well-formedness of iter argument type.
8418 return -EINVAL;
8421 t = btf_type_skip_modifiers(btf, arg->type, NULL);
8423 return -EINVAL;
8424 t = btf_type_skip_modifiers(btf, t->type, &btf_id);
8426 return -EINVAL;
8428 name = btf_name_by_offset(btf, t->name_off);
8429 if (!name || strncmp(name, ITER_PREFIX, sizeof(ITER_PREFIX) - 1))
8430 return -EINVAL;
8446 if (!flags || (flags & (flags - 1)))
8447 return -EINVAL;
8452 return -EINVAL;
8462 if (t->size == 0 || (t->size % 8))
8463 return -EINVAL;
8468 iter_name = btf_name_by_offset(btf, t->name_off) + sizeof(ITER_PREFIX) - 1;
8478 return -EINVAL;
8482 return -EINVAL;
8486 t = btf_type_skip_modifiers(btf, func->type, NULL);
8488 return -EINVAL;
8493 t = btf_type_by_id(btf, func->type);
8495 return -EINVAL;
8507 /* any kfunc should be FUNC -> FUNC_PROTO */
8510 return -EINVAL;
8513 func_name = btf_name_by_offset(btf, func->name_off);
8515 return -EINVAL;
8517 func = btf_type_by_id(btf, func->type);
8519 return -EINVAL;
8536 struct btf_id_set8 *add_set = kset->set;
8538 bool add_filter = !!kset->filter;
8545 ret = -EINVAL;
8549 if (!add_set->cnt)
8552 tab = btf->kfunc_set_tab;
8557 hook_filter = &tab->hook_filters[hook];
8558 for (i = 0; i < hook_filter->nr_filters; i++) {
8559 if (hook_filter->filters[i] == kset->filter) {
8565 if (add_filter && hook_filter->nr_filters == BTF_KFUNC_FILTER_MAX_CNT) {
8566 ret = -E2BIG;
8574 return -ENOMEM;
8575 btf->kfunc_set_tab = tab;
8578 set = tab->sets[hook];
8583 ret = -EINVAL;
8591 * hence re-sorting the final set again is required to make binary
8597 set_cnt = set ? set->cnt : 0;
8599 if (set_cnt > U32_MAX - add_set->cnt) {
8600 ret = -EOVERFLOW;
8604 if (set_cnt + add_set->cnt > BTF_KFUNC_SET_MAX_CNT) {
8605 ret = -E2BIG;
8610 set = krealloc(tab->sets[hook],
8611 struct_size(set, pairs, set_cnt + add_set->cnt),
8614 ret = -ENOMEM;
8618 /* For newly allocated set, initialize set->cnt to 0 */
8619 if (!tab->sets[hook])
8620 set->cnt = 0;
8621 tab->sets[hook] = set;
8624 memcpy(set->pairs + set->cnt, add_set->pairs, add_set->cnt * sizeof(set->pairs[0]));
8626 for (i = set->cnt; i < set->cnt + add_set->cnt; i++)
8627 set->pairs[i].id = btf_relocate_id(btf, set->pairs[i].id);
8629 set->cnt += add_set->cnt;
8631 sort(set->pairs, set->cnt, sizeof(set->pairs[0]), btf_id_cmp_func, NULL);
8634 hook_filter = &tab->hook_filters[hook];
8635 hook_filter->filters[hook_filter->nr_filters++] = kset->filter;
8654 if (!btf->kfunc_set_tab)
8656 hook_filter = &btf->kfunc_set_tab->hook_filters[hook];
8657 for (i = 0; i < hook_filter->nr_filters; i++) {
8658 if (hook_filter->filters[i](prog, kfunc_btf_id))
8661 set = btf->kfunc_set_tab->sets[hook];
8722 * protection for looking up a well-formed btf->kfunc_set_tab.
8752 btf = btf_get_module_btf(kset->owner);
8754 return check_btf_kconfigs(kset->owner, "kfunc");
8758 for (i = 0; i < kset->set->cnt; i++) {
8759 ret = btf_check_kfunc_protos(btf, btf_relocate_id(btf, kset->set->pairs[i].id),
8760 kset->set->pairs[i].flags);
8781 if (!(kset->set->flags & BTF_SET8_KFUNCS)) {
8782 WARN_ON(!kset->owner);
8783 return -EINVAL;
8800 struct btf_id_dtor_kfunc_tab *tab = btf->dtor_kfunc_tab;
8804 return -ENOENT;
8805 /* Even though the size of tab->dtors[0] is > sizeof(u32), we only need
8809 dtor = bsearch(&btf_id, tab->dtors, tab->cnt, sizeof(tab->dtors[0]), btf_id_cmp_func);
8811 return -ENOENT;
8812 return dtor->kfunc_btf_id;
8827 return -EINVAL;
8829 dtor_func_proto = btf_type_by_id(btf, dtor_func->type);
8831 return -EINVAL;
8834 t = btf_type_by_id(btf, dtor_func_proto->type);
8836 return -EINVAL;
8840 return -EINVAL;
8847 return -EINVAL;
8869 ret = -E2BIG;
8878 tab = btf->dtor_kfunc_tab;
8881 ret = -EINVAL;
8885 tab_cnt = tab ? tab->cnt : 0;
8886 if (tab_cnt > U32_MAX - add_cnt) {
8887 ret = -EOVERFLOW;
8892 ret = -E2BIG;
8896 tab = krealloc(btf->dtor_kfunc_tab,
8900 ret = -ENOMEM;
8904 if (!btf->dtor_kfunc_tab)
8905 tab->cnt = 0;
8906 btf->dtor_kfunc_tab = tab;
8908 memcpy(tab->dtors + tab->cnt, dtors, add_cnt * sizeof(tab->dtors[0]));
8912 tab->dtors[i].btf_id = btf_relocate_id(btf, tab->dtors[i].btf_id);
8913 tab->dtors[i].kfunc_btf_id = btf_relocate_id(btf, tab->dtors[i].kfunc_btf_id);
8916 tab->cnt += add_cnt;
8918 sort(tab->dtors, tab->cnt, sizeof(tab->dtors[0]), btf_id_cmp_func, NULL);
8931 * type-based CO-RE relocations and follow slightly different rules than
8932 * field-based relocations. This function assumes that root types were already
8933 * checked for name match. Beyond that initial root-level name check, names
8935 * - any two STRUCTs/UNIONs/FWDs/ENUMs/INTs/ENUM64s are considered compatible, but
8938 * - for ENUMs/ENUM64s, the size is ignored;
8939 * - for INT, size and signedness are ignored;
8940 * - for ARRAY, dimensionality is ignored, element types are checked for
8942 * - CONST/VOLATILE/RESTRICT modifiers are ignored;
8943 * - TYPEDEFs/PTRs are compatible if types they pointing to are compatible;
8944 * - FUNC_PROTOs are compatible if they have compatible signature: same
8947 * more experience with using BPF CO-RE relocations.
8978 for (i = n - 5; i >= 0; i--) {
8987 if (!cands->cnt)
8995 kfree(cands->name);
9016 bpf_log(log, "[%d]%s(", i, cc->name);
9017 for (j = 0; j < cc->cnt; j++) {
9018 bpf_log(log, "%d", cc->cands[j].id);
9019 if (j < cc->cnt - 1)
9039 return jhash(cands->name, cands->name_len, 0);
9048 if (cc && cc->name_len == cands->name_len &&
9049 !strncmp(cc->name, cands->name, cands->name_len))
9069 new_cands = kmemdup(cands, sizeof_cands(cands->cnt), GFP_KERNEL_ACCOUNT);
9072 return ERR_PTR(-ENOMEM);
9075 * the cands->name points to strings in prog's BTF and the prog can be unloaded.
9077 new_cands->name = kmemdup_nul(cands->name, cands->name_len, GFP_KERNEL_ACCOUNT);
9079 if (!new_cands->name) {
9081 return ERR_PTR(-ENOMEM);
9110 for (j = 0; j < cc->cnt; j++)
9111 if (cc->cands[j].btf == btf) {
9141 if (btf_kind(t) != cands->kind)
9144 targ_name = btf_name_by_offset(targ_btf, t->name_off);
9149 * for non-existing name will have a chance to schedule().
9153 if (strncmp(cands->name, targ_name, cands->name_len) != 0)
9157 if (targ_essent_len != cands->name_len)
9161 new_cands = kmalloc(sizeof_cands(cands->cnt + 1), GFP_KERNEL_ACCOUNT);
9164 return ERR_PTR(-ENOMEM);
9167 memcpy(new_cands, cands, sizeof_cands(cands->cnt));
9170 cands->cands[cands->cnt].btf = targ_btf;
9171 cands->cands[cands->cnt].id = i;
9172 cands->cnt++;
9181 const struct btf *local_btf = ctx->btf;
9193 return ERR_PTR(-EINVAL);
9197 return ERR_PTR(-EINVAL);
9199 name = btf_name_by_offset(local_btf, local_type->name_off);
9201 return ERR_PTR(-EINVAL);
9205 cands->name = name;
9206 cands->kind = btf_kind(local_type);
9207 cands->name_len = local_essent_len;
9212 if (cc->cnt)
9222 /* cands is a pointer to kmalloced memory here if cands->cnt > 0 */
9224 /* populate cache even when cands->cnt == 0 */
9230 if (cc->cnt)
9234 /* cands is a pointer to stack here and cands->cnt == 0 */
9237 /* if cache has it return it even if cc->cnt == 0 */
9257 /* cands is a pointer to kmalloced memory here if cands->cnt > 0
9258 * or pointer to stack if cands->cnd == 0.
9259 * Copy it into the cache even when cands->cnt == 0 and
9268 bool need_cands = relo->kind != BPF_CORE_TYPE_ID_LOCAL;
9280 return -ENOMEM;
9282 type = btf_type_by_id(ctx->btf, relo->type_id);
9284 bpf_log(ctx->log, "relo #%u: bad type id %u\n",
9285 relo_idx, relo->type_id);
9287 return -EINVAL;
9295 cc = bpf_core_find_cands(ctx, relo->type_id);
9297 bpf_log(ctx->log, "target candidate search failed for %d\n",
9298 relo->type_id);
9302 if (cc->cnt) {
9303 cands.cands = kcalloc(cc->cnt, sizeof(*cands.cands), GFP_KERNEL_ACCOUNT);
9305 err = -ENOMEM;
9309 for (i = 0; i < cc->cnt; i++) {
9310 bpf_log(ctx->log,
9311 "CO-RE relocating %s %s: found target candidate [%d]\n",
9312 btf_kind_str[cc->kind], cc->name, cc->cands[i].id);
9313 cands.cands[i].btf = cc->cands[i].btf;
9314 cands.cands[i].id = cc->cands[i].id;
9316 cands.len = cc->cnt;
9324 err = bpf_core_calc_relo_insn((void *)ctx->log, relo, relo_idx, ctx->btf, &cands, specs,
9329 err = bpf_core_patch_insn((void *)ctx->log, insn, relo->insn_off / 8, relo, relo_idx,
9337 if (ctx->log->level & BPF_LOG_LEVEL2)
9338 print_cand_cache(ctx->log);
9347 struct btf *btf = reg->btf;
9355 walk_type = btf_type_by_id(btf, reg->btf_id);
9359 tname = btf_name_by_offset(btf, walk_type->name_off);
9365 safe_id = btf_find_by_name_kind(btf, safe_tname, BTF_INFO_KIND(walk_type->info));
9374 const char *m_name = __btf_name_by_offset(btf, member->name_off);
9375 const struct btf_type *mtype = btf_type_by_id(btf, member->type);
9381 btf_type_skip_modifiers(btf, mtype->type, &id);
9397 size_t pattern_len = sizeof(NOCAST_ALIAS_SUFFIX) - sizeof(char);
9407 reg_name = btf_name_by_offset(reg_btf, reg_type->name_off);
9408 arg_name = btf_name_by_offset(arg_btf, arg_type->name_off);
9414 * if the strings are the same size, they can't possibly be no-cast
9417 * because they are _not_ no-cast aliases, they are the same type.
9453 tab = btf->struct_ops_tab;
9457 return -ENOMEM;
9458 tab->capacity = 4;
9459 btf->struct_ops_tab = tab;
9462 for (i = 0; i < tab->cnt; i++)
9463 if (tab->ops[i].st_ops == st_ops)
9464 return -EEXIST;
9466 if (tab->cnt == tab->capacity) {
9468 struct_size(tab, ops, tab->capacity * 2),
9471 return -ENOMEM;
9473 tab->capacity *= 2;
9474 btf->struct_ops_tab = tab;
9477 tab->ops[btf->struct_ops_tab->cnt].st_ops = st_ops;
9479 err = bpf_struct_ops_desc_init(&tab->ops[btf->struct_ops_tab->cnt], btf, log);
9483 btf->struct_ops_tab->cnt++;
9497 if (!btf->struct_ops_tab)
9500 cnt = btf->struct_ops_tab->cnt;
9501 st_ops_list = btf->struct_ops_tab->ops;
9519 if (!btf->struct_ops_tab)
9522 cnt = btf->struct_ops_tab->cnt;
9523 st_ops_list = btf->struct_ops_tab->ops;
9538 btf = btf_get_module_btf(st_ops->owner);
9540 return check_btf_kconfigs(st_ops->owner, "struct_ops");
9546 err = -ENOMEM;
9550 log->level = BPF_LOG_KERNEL;
9571 param_name = btf_name_by_offset(btf, arg->name_off);
9577 param_name += len - suffix_len;