xref: /linux/kernel/bpf/btf.c (revision 876f5ebd58a9ac42f48a7ead3d5b274a314e0ace)
1 // SPDX-License-Identifier: GPL-2.0
2 /* Copyright (c) 2018 Facebook */
3 
4 #include <uapi/linux/btf.h>
5 #include <uapi/linux/bpf.h>
6 #include <uapi/linux/bpf_perf_event.h>
7 #include <uapi/linux/types.h>
8 #include <linux/seq_file.h>
9 #include <linux/compiler.h>
10 #include <linux/ctype.h>
11 #include <linux/errno.h>
12 #include <linux/slab.h>
13 #include <linux/anon_inodes.h>
14 #include <linux/file.h>
15 #include <linux/uaccess.h>
16 #include <linux/kernel.h>
17 #include <linux/idr.h>
18 #include <linux/sort.h>
19 #include <linux/bpf_verifier.h>
20 #include <linux/btf.h>
21 #include <linux/btf_ids.h>
22 #include <linux/bpf.h>
23 #include <linux/bpf_lsm.h>
24 #include <linux/skmsg.h>
25 #include <linux/perf_event.h>
26 #include <linux/bsearch.h>
27 #include <linux/kobject.h>
28 #include <linux/sysfs.h>
29 #include <linux/overflow.h>
30 
31 #include <net/netfilter/nf_bpf_link.h>
32 
33 #include <net/sock.h>
34 #include <net/xdp.h>
35 #include "../tools/lib/bpf/relo_core.h"
36 
37 /* BTF (BPF Type Format) is the meta data format which describes
38  * the data types of BPF program/map.  Hence, it basically focus
39  * on the C programming language which the modern BPF is primary
40  * using.
41  *
42  * ELF Section:
43  * ~~~~~~~~~~~
44  * The BTF data is stored under the ".BTF" ELF section
45  *
46  * struct btf_type:
47  * ~~~~~~~~~~~~~~~
48  * Each 'struct btf_type' object describes a C data type.
49  * Depending on the type it is describing, a 'struct btf_type'
50  * object may be followed by more data.  F.e.
51  * To describe an array, 'struct btf_type' is followed by
52  * 'struct btf_array'.
53  *
54  * 'struct btf_type' and any extra data following it are
55  * 4 bytes aligned.
56  *
57  * Type section:
58  * ~~~~~~~~~~~~~
59  * The BTF type section contains a list of 'struct btf_type' objects.
60  * Each one describes a C type.  Recall from the above section
61  * that a 'struct btf_type' object could be immediately followed by extra
62  * data in order to describe some particular C types.
63  *
64  * type_id:
65  * ~~~~~~~
66  * Each btf_type object is identified by a type_id.  The type_id
67  * is implicitly implied by the location of the btf_type object in
68  * the BTF type section.  The first one has type_id 1.  The second
69  * one has type_id 2...etc.  Hence, an earlier btf_type has
70  * a smaller type_id.
71  *
72  * A btf_type object may refer to another btf_type object by using
73  * type_id (i.e. the "type" in the "struct btf_type").
74  *
75  * NOTE that we cannot assume any reference-order.
76  * A btf_type object can refer to an earlier btf_type object
77  * but it can also refer to a later btf_type object.
78  *
79  * For example, to describe "const void *".  A btf_type
80  * object describing "const" may refer to another btf_type
81  * object describing "void *".  This type-reference is done
82  * by specifying type_id:
83  *
84  * [1] CONST (anon) type_id=2
85  * [2] PTR (anon) type_id=0
86  *
87  * The above is the btf_verifier debug log:
88  *   - Each line started with "[?]" is a btf_type object
89  *   - [?] is the type_id of the btf_type object.
90  *   - CONST/PTR is the BTF_KIND_XXX
91  *   - "(anon)" is the name of the type.  It just
92  *     happens that CONST and PTR has no name.
93  *   - type_id=XXX is the 'u32 type' in btf_type
94  *
95  * NOTE: "void" has type_id 0
96  *
97  * String section:
98  * ~~~~~~~~~~~~~~
99  * The BTF string section contains the names used by the type section.
100  * Each string is referred by an "offset" from the beginning of the
101  * string section.
102  *
103  * Each string is '\0' terminated.
104  *
105  * The first character in the string section must be '\0'
106  * which is used to mean 'anonymous'. Some btf_type may not
107  * have a name.
108  */
109 
110 /* BTF verification:
111  *
112  * To verify BTF data, two passes are needed.
113  *
114  * Pass #1
115  * ~~~~~~~
116  * The first pass is to collect all btf_type objects to
117  * an array: "btf->types".
118  *
119  * Depending on the C type that a btf_type is describing,
120  * a btf_type may be followed by extra data.  We don't know
121  * how many btf_type is there, and more importantly we don't
122  * know where each btf_type is located in the type section.
123  *
124  * Without knowing the location of each type_id, most verifications
125  * cannot be done.  e.g. an earlier btf_type may refer to a later
126  * btf_type (recall the "const void *" above), so we cannot
127  * check this type-reference in the first pass.
128  *
129  * In the first pass, it still does some verifications (e.g.
130  * checking the name is a valid offset to the string section).
131  *
132  * Pass #2
133  * ~~~~~~~
134  * The main focus is to resolve a btf_type that is referring
135  * to another type.
136  *
137  * We have to ensure the referring type:
138  * 1) does exist in the BTF (i.e. in btf->types[])
139  * 2) does not cause a loop:
140  *	struct A {
141  *		struct B b;
142  *	};
143  *
144  *	struct B {
145  *		struct A a;
146  *	};
147  *
148  * btf_type_needs_resolve() decides if a btf_type needs
149  * to be resolved.
150  *
151  * The needs_resolve type implements the "resolve()" ops which
152  * essentially does a DFS and detects backedge.
153  *
154  * During resolve (or DFS), different C types have different
155  * "RESOLVED" conditions.
156  *
157  * When resolving a BTF_KIND_STRUCT, we need to resolve all its
158  * members because a member is always referring to another
159  * type.  A struct's member can be treated as "RESOLVED" if
160  * it is referring to a BTF_KIND_PTR.  Otherwise, the
161  * following valid C struct would be rejected:
162  *
163  *	struct A {
164  *		int m;
165  *		struct A *a;
166  *	};
167  *
168  * When resolving a BTF_KIND_PTR, it needs to keep resolving if
169  * it is referring to another BTF_KIND_PTR.  Otherwise, we cannot
170  * detect a pointer loop, e.g.:
171  * BTF_KIND_CONST -> BTF_KIND_PTR -> BTF_KIND_CONST -> BTF_KIND_PTR +
172  *                        ^                                         |
173  *                        +-----------------------------------------+
174  *
175  */
176 
177 #define BITS_PER_U128 (sizeof(u64) * BITS_PER_BYTE * 2)
178 #define BITS_PER_BYTE_MASK (BITS_PER_BYTE - 1)
179 #define BITS_PER_BYTE_MASKED(bits) ((bits) & BITS_PER_BYTE_MASK)
180 #define BITS_ROUNDDOWN_BYTES(bits) ((bits) >> 3)
181 #define BITS_ROUNDUP_BYTES(bits) \
182 	(BITS_ROUNDDOWN_BYTES(bits) + !!BITS_PER_BYTE_MASKED(bits))
183 
184 #define BTF_INFO_MASK 0x9f00ffff
185 #define BTF_INT_MASK 0x0fffffff
186 #define BTF_TYPE_ID_VALID(type_id) ((type_id) <= BTF_MAX_TYPE)
187 #define BTF_STR_OFFSET_VALID(name_off) ((name_off) <= BTF_MAX_NAME_OFFSET)
188 
189 /* 16MB for 64k structs and each has 16 members and
190  * a few MB spaces for the string section.
191  * The hard limit is S32_MAX.
192  */
193 #define BTF_MAX_SIZE (16 * 1024 * 1024)
194 
195 #define for_each_member_from(i, from, struct_type, member)		\
196 	for (i = from, member = btf_type_member(struct_type) + from;	\
197 	     i < btf_type_vlen(struct_type);				\
198 	     i++, member++)
199 
200 #define for_each_vsi_from(i, from, struct_type, member)				\
201 	for (i = from, member = btf_type_var_secinfo(struct_type) + from;	\
202 	     i < btf_type_vlen(struct_type);					\
203 	     i++, member++)
204 
205 DEFINE_IDR(btf_idr);
206 DEFINE_SPINLOCK(btf_idr_lock);
207 
208 enum btf_kfunc_hook {
209 	BTF_KFUNC_HOOK_COMMON,
210 	BTF_KFUNC_HOOK_XDP,
211 	BTF_KFUNC_HOOK_TC,
212 	BTF_KFUNC_HOOK_STRUCT_OPS,
213 	BTF_KFUNC_HOOK_TRACING,
214 	BTF_KFUNC_HOOK_SYSCALL,
215 	BTF_KFUNC_HOOK_FMODRET,
216 	BTF_KFUNC_HOOK_CGROUP,
217 	BTF_KFUNC_HOOK_SCHED_ACT,
218 	BTF_KFUNC_HOOK_SK_SKB,
219 	BTF_KFUNC_HOOK_SOCKET_FILTER,
220 	BTF_KFUNC_HOOK_LWT,
221 	BTF_KFUNC_HOOK_NETFILTER,
222 	BTF_KFUNC_HOOK_KPROBE,
223 	BTF_KFUNC_HOOK_MAX,
224 };
225 
226 enum {
227 	BTF_KFUNC_SET_MAX_CNT = 256,
228 	BTF_DTOR_KFUNC_MAX_CNT = 256,
229 	BTF_KFUNC_FILTER_MAX_CNT = 16,
230 };
231 
232 struct btf_kfunc_hook_filter {
233 	btf_kfunc_filter_t filters[BTF_KFUNC_FILTER_MAX_CNT];
234 	u32 nr_filters;
235 };
236 
237 struct btf_kfunc_set_tab {
238 	struct btf_id_set8 *sets[BTF_KFUNC_HOOK_MAX];
239 	struct btf_kfunc_hook_filter hook_filters[BTF_KFUNC_HOOK_MAX];
240 };
241 
242 struct btf_id_dtor_kfunc_tab {
243 	u32 cnt;
244 	struct btf_id_dtor_kfunc dtors[];
245 };
246 
247 struct btf_struct_ops_tab {
248 	u32 cnt;
249 	u32 capacity;
250 	struct bpf_struct_ops_desc ops[];
251 };
252 
253 struct btf {
254 	void *data;
255 	struct btf_type **types;
256 	u32 *resolved_ids;
257 	u32 *resolved_sizes;
258 	const char *strings;
259 	void *nohdr_data;
260 	struct btf_header hdr;
261 	u32 nr_types; /* includes VOID for base BTF */
262 	u32 types_size;
263 	u32 data_size;
264 	refcount_t refcnt;
265 	u32 id;
266 	struct rcu_head rcu;
267 	struct btf_kfunc_set_tab *kfunc_set_tab;
268 	struct btf_id_dtor_kfunc_tab *dtor_kfunc_tab;
269 	struct btf_struct_metas *struct_meta_tab;
270 	struct btf_struct_ops_tab *struct_ops_tab;
271 
272 	/* split BTF support */
273 	struct btf *base_btf;
274 	u32 start_id; /* first type ID in this BTF (0 for base BTF) */
275 	u32 start_str_off; /* first string offset (0 for base BTF) */
276 	char name[MODULE_NAME_LEN];
277 	bool kernel_btf;
278 	__u32 *base_id_map; /* map from distilled base BTF -> vmlinux BTF ids */
279 };
280 
281 enum verifier_phase {
282 	CHECK_META,
283 	CHECK_TYPE,
284 };
285 
286 struct resolve_vertex {
287 	const struct btf_type *t;
288 	u32 type_id;
289 	u16 next_member;
290 };
291 
292 enum visit_state {
293 	NOT_VISITED,
294 	VISITED,
295 	RESOLVED,
296 };
297 
298 enum resolve_mode {
299 	RESOLVE_TBD,	/* To Be Determined */
300 	RESOLVE_PTR,	/* Resolving for Pointer */
301 	RESOLVE_STRUCT_OR_ARRAY,	/* Resolving for struct/union
302 					 * or array
303 					 */
304 };
305 
306 #define MAX_RESOLVE_DEPTH 32
307 
308 struct btf_sec_info {
309 	u32 off;
310 	u32 len;
311 };
312 
313 struct btf_verifier_env {
314 	struct btf *btf;
315 	u8 *visit_states;
316 	struct resolve_vertex stack[MAX_RESOLVE_DEPTH];
317 	struct bpf_verifier_log log;
318 	u32 log_type_id;
319 	u32 top_stack;
320 	enum verifier_phase phase;
321 	enum resolve_mode resolve_mode;
322 };
323 
324 static const char * const btf_kind_str[NR_BTF_KINDS] = {
325 	[BTF_KIND_UNKN]		= "UNKNOWN",
326 	[BTF_KIND_INT]		= "INT",
327 	[BTF_KIND_PTR]		= "PTR",
328 	[BTF_KIND_ARRAY]	= "ARRAY",
329 	[BTF_KIND_STRUCT]	= "STRUCT",
330 	[BTF_KIND_UNION]	= "UNION",
331 	[BTF_KIND_ENUM]		= "ENUM",
332 	[BTF_KIND_FWD]		= "FWD",
333 	[BTF_KIND_TYPEDEF]	= "TYPEDEF",
334 	[BTF_KIND_VOLATILE]	= "VOLATILE",
335 	[BTF_KIND_CONST]	= "CONST",
336 	[BTF_KIND_RESTRICT]	= "RESTRICT",
337 	[BTF_KIND_FUNC]		= "FUNC",
338 	[BTF_KIND_FUNC_PROTO]	= "FUNC_PROTO",
339 	[BTF_KIND_VAR]		= "VAR",
340 	[BTF_KIND_DATASEC]	= "DATASEC",
341 	[BTF_KIND_FLOAT]	= "FLOAT",
342 	[BTF_KIND_DECL_TAG]	= "DECL_TAG",
343 	[BTF_KIND_TYPE_TAG]	= "TYPE_TAG",
344 	[BTF_KIND_ENUM64]	= "ENUM64",
345 };
346 
347 const char *btf_type_str(const struct btf_type *t)
348 {
349 	return btf_kind_str[BTF_INFO_KIND(t->info)];
350 }
351 
352 /* Chunk size we use in safe copy of data to be shown. */
353 #define BTF_SHOW_OBJ_SAFE_SIZE		32
354 
355 /*
356  * This is the maximum size of a base type value (equivalent to a
357  * 128-bit int); if we are at the end of our safe buffer and have
358  * less than 16 bytes space we can't be assured of being able
359  * to copy the next type safely, so in such cases we will initiate
360  * a new copy.
361  */
362 #define BTF_SHOW_OBJ_BASE_TYPE_SIZE	16
363 
364 /* Type name size */
365 #define BTF_SHOW_NAME_SIZE		80
366 
367 /*
368  * The suffix of a type that indicates it cannot alias another type when
369  * comparing BTF IDs for kfunc invocations.
370  */
371 #define NOCAST_ALIAS_SUFFIX		"___init"
372 
373 /*
374  * Common data to all BTF show operations. Private show functions can add
375  * their own data to a structure containing a struct btf_show and consult it
376  * in the show callback.  See btf_type_show() below.
377  *
378  * One challenge with showing nested data is we want to skip 0-valued
379  * data, but in order to figure out whether a nested object is all zeros
380  * we need to walk through it.  As a result, we need to make two passes
381  * when handling structs, unions and arrays; the first path simply looks
382  * for nonzero data, while the second actually does the display.  The first
383  * pass is signalled by show->state.depth_check being set, and if we
384  * encounter a non-zero value we set show->state.depth_to_show to
385  * the depth at which we encountered it.  When we have completed the
386  * first pass, we will know if anything needs to be displayed if
387  * depth_to_show > depth.  See btf_[struct,array]_show() for the
388  * implementation of this.
389  *
390  * Another problem is we want to ensure the data for display is safe to
391  * access.  To support this, the anonymous "struct {} obj" tracks the data
392  * object and our safe copy of it.  We copy portions of the data needed
393  * to the object "copy" buffer, but because its size is limited to
394  * BTF_SHOW_OBJ_COPY_LEN bytes, multiple copies may be required as we
395  * traverse larger objects for display.
396  *
397  * The various data type show functions all start with a call to
398  * btf_show_start_type() which returns a pointer to the safe copy
399  * of the data needed (or if BTF_SHOW_UNSAFE is specified, to the
400  * raw data itself).  btf_show_obj_safe() is responsible for
401  * using copy_from_kernel_nofault() to update the safe data if necessary
402  * as we traverse the object's data.  skbuff-like semantics are
403  * used:
404  *
405  * - obj.head points to the start of the toplevel object for display
406  * - obj.size is the size of the toplevel object
407  * - obj.data points to the current point in the original data at
408  *   which our safe data starts.  obj.data will advance as we copy
409  *   portions of the data.
410  *
411  * In most cases a single copy will suffice, but larger data structures
412  * such as "struct task_struct" will require many copies.  The logic in
413  * btf_show_obj_safe() handles the logic that determines if a new
414  * copy_from_kernel_nofault() is needed.
415  */
416 struct btf_show {
417 	u64 flags;
418 	void *target;	/* target of show operation (seq file, buffer) */
419 	__printf(2, 0) void (*showfn)(struct btf_show *show, const char *fmt, va_list args);
420 	const struct btf *btf;
421 	/* below are used during iteration */
422 	struct {
423 		u8 depth;
424 		u8 depth_to_show;
425 		u8 depth_check;
426 		u8 array_member:1,
427 		   array_terminated:1;
428 		u16 array_encoding;
429 		u32 type_id;
430 		int status;			/* non-zero for error */
431 		const struct btf_type *type;
432 		const struct btf_member *member;
433 		char name[BTF_SHOW_NAME_SIZE];	/* space for member name/type */
434 	} state;
435 	struct {
436 		u32 size;
437 		void *head;
438 		void *data;
439 		u8 safe[BTF_SHOW_OBJ_SAFE_SIZE];
440 	} obj;
441 };
442 
443 struct btf_kind_operations {
444 	s32 (*check_meta)(struct btf_verifier_env *env,
445 			  const struct btf_type *t,
446 			  u32 meta_left);
447 	int (*resolve)(struct btf_verifier_env *env,
448 		       const struct resolve_vertex *v);
449 	int (*check_member)(struct btf_verifier_env *env,
450 			    const struct btf_type *struct_type,
451 			    const struct btf_member *member,
452 			    const struct btf_type *member_type);
453 	int (*check_kflag_member)(struct btf_verifier_env *env,
454 				  const struct btf_type *struct_type,
455 				  const struct btf_member *member,
456 				  const struct btf_type *member_type);
457 	void (*log_details)(struct btf_verifier_env *env,
458 			    const struct btf_type *t);
459 	void (*show)(const struct btf *btf, const struct btf_type *t,
460 			 u32 type_id, void *data, u8 bits_offsets,
461 			 struct btf_show *show);
462 };
463 
464 static const struct btf_kind_operations * const kind_ops[NR_BTF_KINDS];
465 static struct btf_type btf_void;
466 
467 static int btf_resolve(struct btf_verifier_env *env,
468 		       const struct btf_type *t, u32 type_id);
469 
470 static int btf_func_check(struct btf_verifier_env *env,
471 			  const struct btf_type *t);
472 
473 static bool btf_type_is_modifier(const struct btf_type *t)
474 {
475 	/* Some of them is not strictly a C modifier
476 	 * but they are grouped into the same bucket
477 	 * for BTF concern:
478 	 *   A type (t) that refers to another
479 	 *   type through t->type AND its size cannot
480 	 *   be determined without following the t->type.
481 	 *
482 	 * ptr does not fall into this bucket
483 	 * because its size is always sizeof(void *).
484 	 */
485 	switch (BTF_INFO_KIND(t->info)) {
486 	case BTF_KIND_TYPEDEF:
487 	case BTF_KIND_VOLATILE:
488 	case BTF_KIND_CONST:
489 	case BTF_KIND_RESTRICT:
490 	case BTF_KIND_TYPE_TAG:
491 		return true;
492 	}
493 
494 	return false;
495 }
496 
497 bool btf_type_is_void(const struct btf_type *t)
498 {
499 	return t == &btf_void;
500 }
501 
502 static bool btf_type_is_datasec(const struct btf_type *t)
503 {
504 	return BTF_INFO_KIND(t->info) == BTF_KIND_DATASEC;
505 }
506 
507 static bool btf_type_is_decl_tag(const struct btf_type *t)
508 {
509 	return BTF_INFO_KIND(t->info) == BTF_KIND_DECL_TAG;
510 }
511 
512 static bool btf_type_nosize(const struct btf_type *t)
513 {
514 	return btf_type_is_void(t) || btf_type_is_fwd(t) ||
515 	       btf_type_is_func(t) || btf_type_is_func_proto(t) ||
516 	       btf_type_is_decl_tag(t);
517 }
518 
519 static bool btf_type_nosize_or_null(const struct btf_type *t)
520 {
521 	return !t || btf_type_nosize(t);
522 }
523 
524 static bool btf_type_is_decl_tag_target(const struct btf_type *t)
525 {
526 	return btf_type_is_func(t) || btf_type_is_struct(t) ||
527 	       btf_type_is_var(t) || btf_type_is_typedef(t);
528 }
529 
530 bool btf_is_vmlinux(const struct btf *btf)
531 {
532 	return btf->kernel_btf && !btf->base_btf;
533 }
534 
535 u32 btf_nr_types(const struct btf *btf)
536 {
537 	u32 total = 0;
538 
539 	while (btf) {
540 		total += btf->nr_types;
541 		btf = btf->base_btf;
542 	}
543 
544 	return total;
545 }
546 
547 s32 btf_find_by_name_kind(const struct btf *btf, const char *name, u8 kind)
548 {
549 	const struct btf_type *t;
550 	const char *tname;
551 	u32 i, total;
552 
553 	total = btf_nr_types(btf);
554 	for (i = 1; i < total; i++) {
555 		t = btf_type_by_id(btf, i);
556 		if (BTF_INFO_KIND(t->info) != kind)
557 			continue;
558 
559 		tname = btf_name_by_offset(btf, t->name_off);
560 		if (!strcmp(tname, name))
561 			return i;
562 	}
563 
564 	return -ENOENT;
565 }
566 
567 s32 bpf_find_btf_id(const char *name, u32 kind, struct btf **btf_p)
568 {
569 	struct btf *btf;
570 	s32 ret;
571 	int id;
572 
573 	btf = bpf_get_btf_vmlinux();
574 	if (IS_ERR(btf))
575 		return PTR_ERR(btf);
576 	if (!btf)
577 		return -EINVAL;
578 
579 	ret = btf_find_by_name_kind(btf, name, kind);
580 	/* ret is never zero, since btf_find_by_name_kind returns
581 	 * positive btf_id or negative error.
582 	 */
583 	if (ret > 0) {
584 		btf_get(btf);
585 		*btf_p = btf;
586 		return ret;
587 	}
588 
589 	/* If name is not found in vmlinux's BTF then search in module's BTFs */
590 	spin_lock_bh(&btf_idr_lock);
591 	idr_for_each_entry(&btf_idr, btf, id) {
592 		if (!btf_is_module(btf))
593 			continue;
594 		/* linear search could be slow hence unlock/lock
595 		 * the IDR to avoiding holding it for too long
596 		 */
597 		btf_get(btf);
598 		spin_unlock_bh(&btf_idr_lock);
599 		ret = btf_find_by_name_kind(btf, name, kind);
600 		if (ret > 0) {
601 			*btf_p = btf;
602 			return ret;
603 		}
604 		btf_put(btf);
605 		spin_lock_bh(&btf_idr_lock);
606 	}
607 	spin_unlock_bh(&btf_idr_lock);
608 	return ret;
609 }
610 EXPORT_SYMBOL_GPL(bpf_find_btf_id);
611 
612 const struct btf_type *btf_type_skip_modifiers(const struct btf *btf,
613 					       u32 id, u32 *res_id)
614 {
615 	const struct btf_type *t = btf_type_by_id(btf, id);
616 
617 	while (btf_type_is_modifier(t)) {
618 		id = t->type;
619 		t = btf_type_by_id(btf, t->type);
620 	}
621 
622 	if (res_id)
623 		*res_id = id;
624 
625 	return t;
626 }
627 
628 const struct btf_type *btf_type_resolve_ptr(const struct btf *btf,
629 					    u32 id, u32 *res_id)
630 {
631 	const struct btf_type *t;
632 
633 	t = btf_type_skip_modifiers(btf, id, NULL);
634 	if (!btf_type_is_ptr(t))
635 		return NULL;
636 
637 	return btf_type_skip_modifiers(btf, t->type, res_id);
638 }
639 
640 const struct btf_type *btf_type_resolve_func_ptr(const struct btf *btf,
641 						 u32 id, u32 *res_id)
642 {
643 	const struct btf_type *ptype;
644 
645 	ptype = btf_type_resolve_ptr(btf, id, res_id);
646 	if (ptype && btf_type_is_func_proto(ptype))
647 		return ptype;
648 
649 	return NULL;
650 }
651 
652 /* Types that act only as a source, not sink or intermediate
653  * type when resolving.
654  */
655 static bool btf_type_is_resolve_source_only(const struct btf_type *t)
656 {
657 	return btf_type_is_var(t) ||
658 	       btf_type_is_decl_tag(t) ||
659 	       btf_type_is_datasec(t);
660 }
661 
662 /* What types need to be resolved?
663  *
664  * btf_type_is_modifier() is an obvious one.
665  *
666  * btf_type_is_struct() because its member refers to
667  * another type (through member->type).
668  *
669  * btf_type_is_var() because the variable refers to
670  * another type. btf_type_is_datasec() holds multiple
671  * btf_type_is_var() types that need resolving.
672  *
673  * btf_type_is_array() because its element (array->type)
674  * refers to another type.  Array can be thought of a
675  * special case of struct while array just has the same
676  * member-type repeated by array->nelems of times.
677  */
678 static bool btf_type_needs_resolve(const struct btf_type *t)
679 {
680 	return btf_type_is_modifier(t) ||
681 	       btf_type_is_ptr(t) ||
682 	       btf_type_is_struct(t) ||
683 	       btf_type_is_array(t) ||
684 	       btf_type_is_var(t) ||
685 	       btf_type_is_func(t) ||
686 	       btf_type_is_decl_tag(t) ||
687 	       btf_type_is_datasec(t);
688 }
689 
690 /* t->size can be used */
691 static bool btf_type_has_size(const struct btf_type *t)
692 {
693 	switch (BTF_INFO_KIND(t->info)) {
694 	case BTF_KIND_INT:
695 	case BTF_KIND_STRUCT:
696 	case BTF_KIND_UNION:
697 	case BTF_KIND_ENUM:
698 	case BTF_KIND_DATASEC:
699 	case BTF_KIND_FLOAT:
700 	case BTF_KIND_ENUM64:
701 		return true;
702 	}
703 
704 	return false;
705 }
706 
707 static const char *btf_int_encoding_str(u8 encoding)
708 {
709 	if (encoding == 0)
710 		return "(none)";
711 	else if (encoding == BTF_INT_SIGNED)
712 		return "SIGNED";
713 	else if (encoding == BTF_INT_CHAR)
714 		return "CHAR";
715 	else if (encoding == BTF_INT_BOOL)
716 		return "BOOL";
717 	else
718 		return "UNKN";
719 }
720 
721 static u32 btf_type_int(const struct btf_type *t)
722 {
723 	return *(u32 *)(t + 1);
724 }
725 
726 static const struct btf_array *btf_type_array(const struct btf_type *t)
727 {
728 	return (const struct btf_array *)(t + 1);
729 }
730 
731 static const struct btf_enum *btf_type_enum(const struct btf_type *t)
732 {
733 	return (const struct btf_enum *)(t + 1);
734 }
735 
736 static const struct btf_var *btf_type_var(const struct btf_type *t)
737 {
738 	return (const struct btf_var *)(t + 1);
739 }
740 
741 static const struct btf_decl_tag *btf_type_decl_tag(const struct btf_type *t)
742 {
743 	return (const struct btf_decl_tag *)(t + 1);
744 }
745 
746 static const struct btf_enum64 *btf_type_enum64(const struct btf_type *t)
747 {
748 	return (const struct btf_enum64 *)(t + 1);
749 }
750 
751 static const struct btf_kind_operations *btf_type_ops(const struct btf_type *t)
752 {
753 	return kind_ops[BTF_INFO_KIND(t->info)];
754 }
755 
756 static bool btf_name_offset_valid(const struct btf *btf, u32 offset)
757 {
758 	if (!BTF_STR_OFFSET_VALID(offset))
759 		return false;
760 
761 	while (offset < btf->start_str_off)
762 		btf = btf->base_btf;
763 
764 	offset -= btf->start_str_off;
765 	return offset < btf->hdr.str_len;
766 }
767 
768 static bool __btf_name_char_ok(char c, bool first)
769 {
770 	if ((first ? !isalpha(c) :
771 		     !isalnum(c)) &&
772 	    c != '_' &&
773 	    c != '.')
774 		return false;
775 	return true;
776 }
777 
778 const char *btf_str_by_offset(const struct btf *btf, u32 offset)
779 {
780 	while (offset < btf->start_str_off)
781 		btf = btf->base_btf;
782 
783 	offset -= btf->start_str_off;
784 	if (offset < btf->hdr.str_len)
785 		return &btf->strings[offset];
786 
787 	return NULL;
788 }
789 
790 static bool btf_name_valid_identifier(const struct btf *btf, u32 offset)
791 {
792 	/* offset must be valid */
793 	const char *src = btf_str_by_offset(btf, offset);
794 	const char *src_limit;
795 
796 	if (!__btf_name_char_ok(*src, true))
797 		return false;
798 
799 	/* set a limit on identifier length */
800 	src_limit = src + KSYM_NAME_LEN;
801 	src++;
802 	while (*src && src < src_limit) {
803 		if (!__btf_name_char_ok(*src, false))
804 			return false;
805 		src++;
806 	}
807 
808 	return !*src;
809 }
810 
811 /* Allow any printable character in DATASEC names */
812 static bool btf_name_valid_section(const struct btf *btf, u32 offset)
813 {
814 	/* offset must be valid */
815 	const char *src = btf_str_by_offset(btf, offset);
816 	const char *src_limit;
817 
818 	if (!*src)
819 		return false;
820 
821 	/* set a limit on identifier length */
822 	src_limit = src + KSYM_NAME_LEN;
823 	while (*src && src < src_limit) {
824 		if (!isprint(*src))
825 			return false;
826 		src++;
827 	}
828 
829 	return !*src;
830 }
831 
832 static const char *__btf_name_by_offset(const struct btf *btf, u32 offset)
833 {
834 	const char *name;
835 
836 	if (!offset)
837 		return "(anon)";
838 
839 	name = btf_str_by_offset(btf, offset);
840 	return name ?: "(invalid-name-offset)";
841 }
842 
843 const char *btf_name_by_offset(const struct btf *btf, u32 offset)
844 {
845 	return btf_str_by_offset(btf, offset);
846 }
847 
848 const struct btf_type *btf_type_by_id(const struct btf *btf, u32 type_id)
849 {
850 	while (type_id < btf->start_id)
851 		btf = btf->base_btf;
852 
853 	type_id -= btf->start_id;
854 	if (type_id >= btf->nr_types)
855 		return NULL;
856 	return btf->types[type_id];
857 }
858 EXPORT_SYMBOL_GPL(btf_type_by_id);
859 
860 /*
861  * Check that the type @t is a regular int. This means that @t is not
862  * a bit field and it has the same size as either of u8/u16/u32/u64
863  * or __int128. If @expected_size is not zero, then size of @t should
864  * be the same. A caller should already have checked that the type @t
865  * is an integer.
866  */
867 static bool __btf_type_int_is_regular(const struct btf_type *t, size_t expected_size)
868 {
869 	u32 int_data = btf_type_int(t);
870 	u8 nr_bits = BTF_INT_BITS(int_data);
871 	u8 nr_bytes = BITS_ROUNDUP_BYTES(nr_bits);
872 
873 	return BITS_PER_BYTE_MASKED(nr_bits) == 0 &&
874 	       BTF_INT_OFFSET(int_data) == 0 &&
875 	       (nr_bytes <= 16 && is_power_of_2(nr_bytes)) &&
876 	       (expected_size == 0 || nr_bytes == expected_size);
877 }
878 
879 static bool btf_type_int_is_regular(const struct btf_type *t)
880 {
881 	return __btf_type_int_is_regular(t, 0);
882 }
883 
884 bool btf_type_is_i32(const struct btf_type *t)
885 {
886 	return btf_type_is_int(t) && __btf_type_int_is_regular(t, 4);
887 }
888 
889 bool btf_type_is_i64(const struct btf_type *t)
890 {
891 	return btf_type_is_int(t) && __btf_type_int_is_regular(t, 8);
892 }
893 
894 /*
895  * Check that given struct member is a regular int with expected
896  * offset and size.
897  */
898 bool btf_member_is_reg_int(const struct btf *btf, const struct btf_type *s,
899 			   const struct btf_member *m,
900 			   u32 expected_offset, u32 expected_size)
901 {
902 	const struct btf_type *t;
903 	u32 id, int_data;
904 	u8 nr_bits;
905 
906 	id = m->type;
907 	t = btf_type_id_size(btf, &id, NULL);
908 	if (!t || !btf_type_is_int(t))
909 		return false;
910 
911 	int_data = btf_type_int(t);
912 	nr_bits = BTF_INT_BITS(int_data);
913 	if (btf_type_kflag(s)) {
914 		u32 bitfield_size = BTF_MEMBER_BITFIELD_SIZE(m->offset);
915 		u32 bit_offset = BTF_MEMBER_BIT_OFFSET(m->offset);
916 
917 		/* if kflag set, int should be a regular int and
918 		 * bit offset should be at byte boundary.
919 		 */
920 		return !bitfield_size &&
921 		       BITS_ROUNDUP_BYTES(bit_offset) == expected_offset &&
922 		       BITS_ROUNDUP_BYTES(nr_bits) == expected_size;
923 	}
924 
925 	if (BTF_INT_OFFSET(int_data) ||
926 	    BITS_PER_BYTE_MASKED(m->offset) ||
927 	    BITS_ROUNDUP_BYTES(m->offset) != expected_offset ||
928 	    BITS_PER_BYTE_MASKED(nr_bits) ||
929 	    BITS_ROUNDUP_BYTES(nr_bits) != expected_size)
930 		return false;
931 
932 	return true;
933 }
934 
935 /* Similar to btf_type_skip_modifiers() but does not skip typedefs. */
936 static const struct btf_type *btf_type_skip_qualifiers(const struct btf *btf,
937 						       u32 id)
938 {
939 	const struct btf_type *t = btf_type_by_id(btf, id);
940 
941 	while (btf_type_is_modifier(t) &&
942 	       BTF_INFO_KIND(t->info) != BTF_KIND_TYPEDEF) {
943 		t = btf_type_by_id(btf, t->type);
944 	}
945 
946 	return t;
947 }
948 
949 #define BTF_SHOW_MAX_ITER	10
950 
951 #define BTF_KIND_BIT(kind)	(1ULL << kind)
952 
953 /*
954  * Populate show->state.name with type name information.
955  * Format of type name is
956  *
957  * [.member_name = ] (type_name)
958  */
959 static const char *btf_show_name(struct btf_show *show)
960 {
961 	/* BTF_MAX_ITER array suffixes "[]" */
962 	const char *array_suffixes = "[][][][][][][][][][]";
963 	const char *array_suffix = &array_suffixes[strlen(array_suffixes)];
964 	/* BTF_MAX_ITER pointer suffixes "*" */
965 	const char *ptr_suffixes = "**********";
966 	const char *ptr_suffix = &ptr_suffixes[strlen(ptr_suffixes)];
967 	const char *name = NULL, *prefix = "", *parens = "";
968 	const struct btf_member *m = show->state.member;
969 	const struct btf_type *t;
970 	const struct btf_array *array;
971 	u32 id = show->state.type_id;
972 	const char *member = NULL;
973 	bool show_member = false;
974 	u64 kinds = 0;
975 	int i;
976 
977 	show->state.name[0] = '\0';
978 
979 	/*
980 	 * Don't show type name if we're showing an array member;
981 	 * in that case we show the array type so don't need to repeat
982 	 * ourselves for each member.
983 	 */
984 	if (show->state.array_member)
985 		return "";
986 
987 	/* Retrieve member name, if any. */
988 	if (m) {
989 		member = btf_name_by_offset(show->btf, m->name_off);
990 		show_member = strlen(member) > 0;
991 		id = m->type;
992 	}
993 
994 	/*
995 	 * Start with type_id, as we have resolved the struct btf_type *
996 	 * via btf_modifier_show() past the parent typedef to the child
997 	 * struct, int etc it is defined as.  In such cases, the type_id
998 	 * still represents the starting type while the struct btf_type *
999 	 * in our show->state points at the resolved type of the typedef.
1000 	 */
1001 	t = btf_type_by_id(show->btf, id);
1002 	if (!t)
1003 		return "";
1004 
1005 	/*
1006 	 * The goal here is to build up the right number of pointer and
1007 	 * array suffixes while ensuring the type name for a typedef
1008 	 * is represented.  Along the way we accumulate a list of
1009 	 * BTF kinds we have encountered, since these will inform later
1010 	 * display; for example, pointer types will not require an
1011 	 * opening "{" for struct, we will just display the pointer value.
1012 	 *
1013 	 * We also want to accumulate the right number of pointer or array
1014 	 * indices in the format string while iterating until we get to
1015 	 * the typedef/pointee/array member target type.
1016 	 *
1017 	 * We start by pointing at the end of pointer and array suffix
1018 	 * strings; as we accumulate pointers and arrays we move the pointer
1019 	 * or array string backwards so it will show the expected number of
1020 	 * '*' or '[]' for the type.  BTF_SHOW_MAX_ITER of nesting of pointers
1021 	 * and/or arrays and typedefs are supported as a precaution.
1022 	 *
1023 	 * We also want to get typedef name while proceeding to resolve
1024 	 * type it points to so that we can add parentheses if it is a
1025 	 * "typedef struct" etc.
1026 	 */
1027 	for (i = 0; i < BTF_SHOW_MAX_ITER; i++) {
1028 
1029 		switch (BTF_INFO_KIND(t->info)) {
1030 		case BTF_KIND_TYPEDEF:
1031 			if (!name)
1032 				name = btf_name_by_offset(show->btf,
1033 							       t->name_off);
1034 			kinds |= BTF_KIND_BIT(BTF_KIND_TYPEDEF);
1035 			id = t->type;
1036 			break;
1037 		case BTF_KIND_ARRAY:
1038 			kinds |= BTF_KIND_BIT(BTF_KIND_ARRAY);
1039 			parens = "[";
1040 			if (!t)
1041 				return "";
1042 			array = btf_type_array(t);
1043 			if (array_suffix > array_suffixes)
1044 				array_suffix -= 2;
1045 			id = array->type;
1046 			break;
1047 		case BTF_KIND_PTR:
1048 			kinds |= BTF_KIND_BIT(BTF_KIND_PTR);
1049 			if (ptr_suffix > ptr_suffixes)
1050 				ptr_suffix -= 1;
1051 			id = t->type;
1052 			break;
1053 		default:
1054 			id = 0;
1055 			break;
1056 		}
1057 		if (!id)
1058 			break;
1059 		t = btf_type_skip_qualifiers(show->btf, id);
1060 	}
1061 	/* We may not be able to represent this type; bail to be safe */
1062 	if (i == BTF_SHOW_MAX_ITER)
1063 		return "";
1064 
1065 	if (!name)
1066 		name = btf_name_by_offset(show->btf, t->name_off);
1067 
1068 	switch (BTF_INFO_KIND(t->info)) {
1069 	case BTF_KIND_STRUCT:
1070 	case BTF_KIND_UNION:
1071 		prefix = BTF_INFO_KIND(t->info) == BTF_KIND_STRUCT ?
1072 			 "struct" : "union";
1073 		/* if it's an array of struct/union, parens is already set */
1074 		if (!(kinds & (BTF_KIND_BIT(BTF_KIND_ARRAY))))
1075 			parens = "{";
1076 		break;
1077 	case BTF_KIND_ENUM:
1078 	case BTF_KIND_ENUM64:
1079 		prefix = "enum";
1080 		break;
1081 	default:
1082 		break;
1083 	}
1084 
1085 	/* pointer does not require parens */
1086 	if (kinds & BTF_KIND_BIT(BTF_KIND_PTR))
1087 		parens = "";
1088 	/* typedef does not require struct/union/enum prefix */
1089 	if (kinds & BTF_KIND_BIT(BTF_KIND_TYPEDEF))
1090 		prefix = "";
1091 
1092 	if (!name)
1093 		name = "";
1094 
1095 	/* Even if we don't want type name info, we want parentheses etc */
1096 	if (show->flags & BTF_SHOW_NONAME)
1097 		snprintf(show->state.name, sizeof(show->state.name), "%s",
1098 			 parens);
1099 	else
1100 		snprintf(show->state.name, sizeof(show->state.name),
1101 			 "%s%s%s(%s%s%s%s%s%s)%s",
1102 			 /* first 3 strings comprise ".member = " */
1103 			 show_member ? "." : "",
1104 			 show_member ? member : "",
1105 			 show_member ? " = " : "",
1106 			 /* ...next is our prefix (struct, enum, etc) */
1107 			 prefix,
1108 			 strlen(prefix) > 0 && strlen(name) > 0 ? " " : "",
1109 			 /* ...this is the type name itself */
1110 			 name,
1111 			 /* ...suffixed by the appropriate '*', '[]' suffixes */
1112 			 strlen(ptr_suffix) > 0 ? " " : "", ptr_suffix,
1113 			 array_suffix, parens);
1114 
1115 	return show->state.name;
1116 }
1117 
1118 static const char *__btf_show_indent(struct btf_show *show)
1119 {
1120 	const char *indents = "                                ";
1121 	const char *indent = &indents[strlen(indents)];
1122 
1123 	if ((indent - show->state.depth) >= indents)
1124 		return indent - show->state.depth;
1125 	return indents;
1126 }
1127 
1128 static const char *btf_show_indent(struct btf_show *show)
1129 {
1130 	return show->flags & BTF_SHOW_COMPACT ? "" : __btf_show_indent(show);
1131 }
1132 
1133 static const char *btf_show_newline(struct btf_show *show)
1134 {
1135 	return show->flags & BTF_SHOW_COMPACT ? "" : "\n";
1136 }
1137 
1138 static const char *btf_show_delim(struct btf_show *show)
1139 {
1140 	if (show->state.depth == 0)
1141 		return "";
1142 
1143 	if ((show->flags & BTF_SHOW_COMPACT) && show->state.type &&
1144 		BTF_INFO_KIND(show->state.type->info) == BTF_KIND_UNION)
1145 		return "|";
1146 
1147 	return ",";
1148 }
1149 
1150 __printf(2, 3) static void btf_show(struct btf_show *show, const char *fmt, ...)
1151 {
1152 	va_list args;
1153 
1154 	if (!show->state.depth_check) {
1155 		va_start(args, fmt);
1156 		show->showfn(show, fmt, args);
1157 		va_end(args);
1158 	}
1159 }
1160 
1161 /* Macros are used here as btf_show_type_value[s]() prepends and appends
1162  * format specifiers to the format specifier passed in; these do the work of
1163  * adding indentation, delimiters etc while the caller simply has to specify
1164  * the type value(s) in the format specifier + value(s).
1165  */
1166 #define btf_show_type_value(show, fmt, value)				       \
1167 	do {								       \
1168 		if ((value) != (__typeof__(value))0 ||			       \
1169 		    (show->flags & BTF_SHOW_ZERO) ||			       \
1170 		    show->state.depth == 0) {				       \
1171 			btf_show(show, "%s%s" fmt "%s%s",		       \
1172 				 btf_show_indent(show),			       \
1173 				 btf_show_name(show),			       \
1174 				 value, btf_show_delim(show),		       \
1175 				 btf_show_newline(show));		       \
1176 			if (show->state.depth > show->state.depth_to_show)     \
1177 				show->state.depth_to_show = show->state.depth; \
1178 		}							       \
1179 	} while (0)
1180 
1181 #define btf_show_type_values(show, fmt, ...)				       \
1182 	do {								       \
1183 		btf_show(show, "%s%s" fmt "%s%s", btf_show_indent(show),       \
1184 			 btf_show_name(show),				       \
1185 			 __VA_ARGS__, btf_show_delim(show),		       \
1186 			 btf_show_newline(show));			       \
1187 		if (show->state.depth > show->state.depth_to_show)	       \
1188 			show->state.depth_to_show = show->state.depth;	       \
1189 	} while (0)
1190 
1191 /* How much is left to copy to safe buffer after @data? */
1192 static int btf_show_obj_size_left(struct btf_show *show, void *data)
1193 {
1194 	return show->obj.head + show->obj.size - data;
1195 }
1196 
1197 /* Is object pointed to by @data of @size already copied to our safe buffer? */
1198 static bool btf_show_obj_is_safe(struct btf_show *show, void *data, int size)
1199 {
1200 	return data >= show->obj.data &&
1201 	       (data + size) < (show->obj.data + BTF_SHOW_OBJ_SAFE_SIZE);
1202 }
1203 
1204 /*
1205  * If object pointed to by @data of @size falls within our safe buffer, return
1206  * the equivalent pointer to the same safe data.  Assumes
1207  * copy_from_kernel_nofault() has already happened and our safe buffer is
1208  * populated.
1209  */
1210 static void *__btf_show_obj_safe(struct btf_show *show, void *data, int size)
1211 {
1212 	if (btf_show_obj_is_safe(show, data, size))
1213 		return show->obj.safe + (data - show->obj.data);
1214 	return NULL;
1215 }
1216 
1217 /*
1218  * Return a safe-to-access version of data pointed to by @data.
1219  * We do this by copying the relevant amount of information
1220  * to the struct btf_show obj.safe buffer using copy_from_kernel_nofault().
1221  *
1222  * If BTF_SHOW_UNSAFE is specified, just return data as-is; no
1223  * safe copy is needed.
1224  *
1225  * Otherwise we need to determine if we have the required amount
1226  * of data (determined by the @data pointer and the size of the
1227  * largest base type we can encounter (represented by
1228  * BTF_SHOW_OBJ_BASE_TYPE_SIZE). Having that much data ensures
1229  * that we will be able to print some of the current object,
1230  * and if more is needed a copy will be triggered.
1231  * Some objects such as structs will not fit into the buffer;
1232  * in such cases additional copies when we iterate over their
1233  * members may be needed.
1234  *
1235  * btf_show_obj_safe() is used to return a safe buffer for
1236  * btf_show_start_type(); this ensures that as we recurse into
1237  * nested types we always have safe data for the given type.
1238  * This approach is somewhat wasteful; it's possible for example
1239  * that when iterating over a large union we'll end up copying the
1240  * same data repeatedly, but the goal is safety not performance.
1241  * We use stack data as opposed to per-CPU buffers because the
1242  * iteration over a type can take some time, and preemption handling
1243  * would greatly complicate use of the safe buffer.
1244  */
1245 static void *btf_show_obj_safe(struct btf_show *show,
1246 			       const struct btf_type *t,
1247 			       void *data)
1248 {
1249 	const struct btf_type *rt;
1250 	int size_left, size;
1251 	void *safe = NULL;
1252 
1253 	if (show->flags & BTF_SHOW_UNSAFE)
1254 		return data;
1255 
1256 	rt = btf_resolve_size(show->btf, t, &size);
1257 	if (IS_ERR(rt)) {
1258 		show->state.status = PTR_ERR(rt);
1259 		return NULL;
1260 	}
1261 
1262 	/*
1263 	 * Is this toplevel object? If so, set total object size and
1264 	 * initialize pointers.  Otherwise check if we still fall within
1265 	 * our safe object data.
1266 	 */
1267 	if (show->state.depth == 0) {
1268 		show->obj.size = size;
1269 		show->obj.head = data;
1270 	} else {
1271 		/*
1272 		 * If the size of the current object is > our remaining
1273 		 * safe buffer we _may_ need to do a new copy.  However
1274 		 * consider the case of a nested struct; it's size pushes
1275 		 * us over the safe buffer limit, but showing any individual
1276 		 * struct members does not.  In such cases, we don't need
1277 		 * to initiate a fresh copy yet; however we definitely need
1278 		 * at least BTF_SHOW_OBJ_BASE_TYPE_SIZE bytes left
1279 		 * in our buffer, regardless of the current object size.
1280 		 * The logic here is that as we resolve types we will
1281 		 * hit a base type at some point, and we need to be sure
1282 		 * the next chunk of data is safely available to display
1283 		 * that type info safely.  We cannot rely on the size of
1284 		 * the current object here because it may be much larger
1285 		 * than our current buffer (e.g. task_struct is 8k).
1286 		 * All we want to do here is ensure that we can print the
1287 		 * next basic type, which we can if either
1288 		 * - the current type size is within the safe buffer; or
1289 		 * - at least BTF_SHOW_OBJ_BASE_TYPE_SIZE bytes are left in
1290 		 *   the safe buffer.
1291 		 */
1292 		safe = __btf_show_obj_safe(show, data,
1293 					   min(size,
1294 					       BTF_SHOW_OBJ_BASE_TYPE_SIZE));
1295 	}
1296 
1297 	/*
1298 	 * We need a new copy to our safe object, either because we haven't
1299 	 * yet copied and are initializing safe data, or because the data
1300 	 * we want falls outside the boundaries of the safe object.
1301 	 */
1302 	if (!safe) {
1303 		size_left = btf_show_obj_size_left(show, data);
1304 		if (size_left > BTF_SHOW_OBJ_SAFE_SIZE)
1305 			size_left = BTF_SHOW_OBJ_SAFE_SIZE;
1306 		show->state.status = copy_from_kernel_nofault(show->obj.safe,
1307 							      data, size_left);
1308 		if (!show->state.status) {
1309 			show->obj.data = data;
1310 			safe = show->obj.safe;
1311 		}
1312 	}
1313 
1314 	return safe;
1315 }
1316 
1317 /*
1318  * Set the type we are starting to show and return a safe data pointer
1319  * to be used for showing the associated data.
1320  */
1321 static void *btf_show_start_type(struct btf_show *show,
1322 				 const struct btf_type *t,
1323 				 u32 type_id, void *data)
1324 {
1325 	show->state.type = t;
1326 	show->state.type_id = type_id;
1327 	show->state.name[0] = '\0';
1328 
1329 	return btf_show_obj_safe(show, t, data);
1330 }
1331 
1332 static void btf_show_end_type(struct btf_show *show)
1333 {
1334 	show->state.type = NULL;
1335 	show->state.type_id = 0;
1336 	show->state.name[0] = '\0';
1337 }
1338 
1339 static void *btf_show_start_aggr_type(struct btf_show *show,
1340 				      const struct btf_type *t,
1341 				      u32 type_id, void *data)
1342 {
1343 	void *safe_data = btf_show_start_type(show, t, type_id, data);
1344 
1345 	if (!safe_data)
1346 		return safe_data;
1347 
1348 	btf_show(show, "%s%s%s", btf_show_indent(show),
1349 		 btf_show_name(show),
1350 		 btf_show_newline(show));
1351 	show->state.depth++;
1352 	return safe_data;
1353 }
1354 
1355 static void btf_show_end_aggr_type(struct btf_show *show,
1356 				   const char *suffix)
1357 {
1358 	show->state.depth--;
1359 	btf_show(show, "%s%s%s%s", btf_show_indent(show), suffix,
1360 		 btf_show_delim(show), btf_show_newline(show));
1361 	btf_show_end_type(show);
1362 }
1363 
1364 static void btf_show_start_member(struct btf_show *show,
1365 				  const struct btf_member *m)
1366 {
1367 	show->state.member = m;
1368 }
1369 
1370 static void btf_show_start_array_member(struct btf_show *show)
1371 {
1372 	show->state.array_member = 1;
1373 	btf_show_start_member(show, NULL);
1374 }
1375 
1376 static void btf_show_end_member(struct btf_show *show)
1377 {
1378 	show->state.member = NULL;
1379 }
1380 
1381 static void btf_show_end_array_member(struct btf_show *show)
1382 {
1383 	show->state.array_member = 0;
1384 	btf_show_end_member(show);
1385 }
1386 
1387 static void *btf_show_start_array_type(struct btf_show *show,
1388 				       const struct btf_type *t,
1389 				       u32 type_id,
1390 				       u16 array_encoding,
1391 				       void *data)
1392 {
1393 	show->state.array_encoding = array_encoding;
1394 	show->state.array_terminated = 0;
1395 	return btf_show_start_aggr_type(show, t, type_id, data);
1396 }
1397 
1398 static void btf_show_end_array_type(struct btf_show *show)
1399 {
1400 	show->state.array_encoding = 0;
1401 	show->state.array_terminated = 0;
1402 	btf_show_end_aggr_type(show, "]");
1403 }
1404 
1405 static void *btf_show_start_struct_type(struct btf_show *show,
1406 					const struct btf_type *t,
1407 					u32 type_id,
1408 					void *data)
1409 {
1410 	return btf_show_start_aggr_type(show, t, type_id, data);
1411 }
1412 
1413 static void btf_show_end_struct_type(struct btf_show *show)
1414 {
1415 	btf_show_end_aggr_type(show, "}");
1416 }
1417 
1418 __printf(2, 3) static void __btf_verifier_log(struct bpf_verifier_log *log,
1419 					      const char *fmt, ...)
1420 {
1421 	va_list args;
1422 
1423 	va_start(args, fmt);
1424 	bpf_verifier_vlog(log, fmt, args);
1425 	va_end(args);
1426 }
1427 
1428 __printf(2, 3) static void btf_verifier_log(struct btf_verifier_env *env,
1429 					    const char *fmt, ...)
1430 {
1431 	struct bpf_verifier_log *log = &env->log;
1432 	va_list args;
1433 
1434 	if (!bpf_verifier_log_needed(log))
1435 		return;
1436 
1437 	va_start(args, fmt);
1438 	bpf_verifier_vlog(log, fmt, args);
1439 	va_end(args);
1440 }
1441 
1442 __printf(4, 5) static void __btf_verifier_log_type(struct btf_verifier_env *env,
1443 						   const struct btf_type *t,
1444 						   bool log_details,
1445 						   const char *fmt, ...)
1446 {
1447 	struct bpf_verifier_log *log = &env->log;
1448 	struct btf *btf = env->btf;
1449 	va_list args;
1450 
1451 	if (!bpf_verifier_log_needed(log))
1452 		return;
1453 
1454 	if (log->level == BPF_LOG_KERNEL) {
1455 		/* btf verifier prints all types it is processing via
1456 		 * btf_verifier_log_type(..., fmt = NULL).
1457 		 * Skip those prints for in-kernel BTF verification.
1458 		 */
1459 		if (!fmt)
1460 			return;
1461 
1462 		/* Skip logging when loading module BTF with mismatches permitted */
1463 		if (env->btf->base_btf && IS_ENABLED(CONFIG_MODULE_ALLOW_BTF_MISMATCH))
1464 			return;
1465 	}
1466 
1467 	__btf_verifier_log(log, "[%u] %s %s%s",
1468 			   env->log_type_id,
1469 			   btf_type_str(t),
1470 			   __btf_name_by_offset(btf, t->name_off),
1471 			   log_details ? " " : "");
1472 
1473 	if (log_details)
1474 		btf_type_ops(t)->log_details(env, t);
1475 
1476 	if (fmt && *fmt) {
1477 		__btf_verifier_log(log, " ");
1478 		va_start(args, fmt);
1479 		bpf_verifier_vlog(log, fmt, args);
1480 		va_end(args);
1481 	}
1482 
1483 	__btf_verifier_log(log, "\n");
1484 }
1485 
1486 #define btf_verifier_log_type(env, t, ...) \
1487 	__btf_verifier_log_type((env), (t), true, __VA_ARGS__)
1488 #define btf_verifier_log_basic(env, t, ...) \
1489 	__btf_verifier_log_type((env), (t), false, __VA_ARGS__)
1490 
1491 __printf(4, 5)
1492 static void btf_verifier_log_member(struct btf_verifier_env *env,
1493 				    const struct btf_type *struct_type,
1494 				    const struct btf_member *member,
1495 				    const char *fmt, ...)
1496 {
1497 	struct bpf_verifier_log *log = &env->log;
1498 	struct btf *btf = env->btf;
1499 	va_list args;
1500 
1501 	if (!bpf_verifier_log_needed(log))
1502 		return;
1503 
1504 	if (log->level == BPF_LOG_KERNEL) {
1505 		if (!fmt)
1506 			return;
1507 
1508 		/* Skip logging when loading module BTF with mismatches permitted */
1509 		if (env->btf->base_btf && IS_ENABLED(CONFIG_MODULE_ALLOW_BTF_MISMATCH))
1510 			return;
1511 	}
1512 
1513 	/* The CHECK_META phase already did a btf dump.
1514 	 *
1515 	 * If member is logged again, it must hit an error in
1516 	 * parsing this member.  It is useful to print out which
1517 	 * struct this member belongs to.
1518 	 */
1519 	if (env->phase != CHECK_META)
1520 		btf_verifier_log_type(env, struct_type, NULL);
1521 
1522 	if (btf_type_kflag(struct_type))
1523 		__btf_verifier_log(log,
1524 				   "\t%s type_id=%u bitfield_size=%u bits_offset=%u",
1525 				   __btf_name_by_offset(btf, member->name_off),
1526 				   member->type,
1527 				   BTF_MEMBER_BITFIELD_SIZE(member->offset),
1528 				   BTF_MEMBER_BIT_OFFSET(member->offset));
1529 	else
1530 		__btf_verifier_log(log, "\t%s type_id=%u bits_offset=%u",
1531 				   __btf_name_by_offset(btf, member->name_off),
1532 				   member->type, member->offset);
1533 
1534 	if (fmt && *fmt) {
1535 		__btf_verifier_log(log, " ");
1536 		va_start(args, fmt);
1537 		bpf_verifier_vlog(log, fmt, args);
1538 		va_end(args);
1539 	}
1540 
1541 	__btf_verifier_log(log, "\n");
1542 }
1543 
1544 __printf(4, 5)
1545 static void btf_verifier_log_vsi(struct btf_verifier_env *env,
1546 				 const struct btf_type *datasec_type,
1547 				 const struct btf_var_secinfo *vsi,
1548 				 const char *fmt, ...)
1549 {
1550 	struct bpf_verifier_log *log = &env->log;
1551 	va_list args;
1552 
1553 	if (!bpf_verifier_log_needed(log))
1554 		return;
1555 	if (log->level == BPF_LOG_KERNEL && !fmt)
1556 		return;
1557 	if (env->phase != CHECK_META)
1558 		btf_verifier_log_type(env, datasec_type, NULL);
1559 
1560 	__btf_verifier_log(log, "\t type_id=%u offset=%u size=%u",
1561 			   vsi->type, vsi->offset, vsi->size);
1562 	if (fmt && *fmt) {
1563 		__btf_verifier_log(log, " ");
1564 		va_start(args, fmt);
1565 		bpf_verifier_vlog(log, fmt, args);
1566 		va_end(args);
1567 	}
1568 
1569 	__btf_verifier_log(log, "\n");
1570 }
1571 
1572 static void btf_verifier_log_hdr(struct btf_verifier_env *env,
1573 				 u32 btf_data_size)
1574 {
1575 	struct bpf_verifier_log *log = &env->log;
1576 	const struct btf *btf = env->btf;
1577 	const struct btf_header *hdr;
1578 
1579 	if (!bpf_verifier_log_needed(log))
1580 		return;
1581 
1582 	if (log->level == BPF_LOG_KERNEL)
1583 		return;
1584 	hdr = &btf->hdr;
1585 	__btf_verifier_log(log, "magic: 0x%x\n", hdr->magic);
1586 	__btf_verifier_log(log, "version: %u\n", hdr->version);
1587 	__btf_verifier_log(log, "flags: 0x%x\n", hdr->flags);
1588 	__btf_verifier_log(log, "hdr_len: %u\n", hdr->hdr_len);
1589 	__btf_verifier_log(log, "type_off: %u\n", hdr->type_off);
1590 	__btf_verifier_log(log, "type_len: %u\n", hdr->type_len);
1591 	__btf_verifier_log(log, "str_off: %u\n", hdr->str_off);
1592 	__btf_verifier_log(log, "str_len: %u\n", hdr->str_len);
1593 	__btf_verifier_log(log, "btf_total_size: %u\n", btf_data_size);
1594 }
1595 
1596 static int btf_add_type(struct btf_verifier_env *env, struct btf_type *t)
1597 {
1598 	struct btf *btf = env->btf;
1599 
1600 	if (btf->types_size == btf->nr_types) {
1601 		/* Expand 'types' array */
1602 
1603 		struct btf_type **new_types;
1604 		u32 expand_by, new_size;
1605 
1606 		if (btf->start_id + btf->types_size == BTF_MAX_TYPE) {
1607 			btf_verifier_log(env, "Exceeded max num of types");
1608 			return -E2BIG;
1609 		}
1610 
1611 		expand_by = max_t(u32, btf->types_size >> 2, 16);
1612 		new_size = min_t(u32, BTF_MAX_TYPE,
1613 				 btf->types_size + expand_by);
1614 
1615 		new_types = kvcalloc(new_size, sizeof(*new_types),
1616 				     GFP_KERNEL | __GFP_NOWARN);
1617 		if (!new_types)
1618 			return -ENOMEM;
1619 
1620 		if (btf->nr_types == 0) {
1621 			if (!btf->base_btf) {
1622 				/* lazily init VOID type */
1623 				new_types[0] = &btf_void;
1624 				btf->nr_types++;
1625 			}
1626 		} else {
1627 			memcpy(new_types, btf->types,
1628 			       sizeof(*btf->types) * btf->nr_types);
1629 		}
1630 
1631 		kvfree(btf->types);
1632 		btf->types = new_types;
1633 		btf->types_size = new_size;
1634 	}
1635 
1636 	btf->types[btf->nr_types++] = t;
1637 
1638 	return 0;
1639 }
1640 
1641 static int btf_alloc_id(struct btf *btf)
1642 {
1643 	int id;
1644 
1645 	idr_preload(GFP_KERNEL);
1646 	spin_lock_bh(&btf_idr_lock);
1647 	id = idr_alloc_cyclic(&btf_idr, btf, 1, INT_MAX, GFP_ATOMIC);
1648 	if (id > 0)
1649 		btf->id = id;
1650 	spin_unlock_bh(&btf_idr_lock);
1651 	idr_preload_end();
1652 
1653 	if (WARN_ON_ONCE(!id))
1654 		return -ENOSPC;
1655 
1656 	return id > 0 ? 0 : id;
1657 }
1658 
1659 static void btf_free_id(struct btf *btf)
1660 {
1661 	unsigned long flags;
1662 
1663 	/*
1664 	 * In map-in-map, calling map_delete_elem() on outer
1665 	 * map will call bpf_map_put on the inner map.
1666 	 * It will then eventually call btf_free_id()
1667 	 * on the inner map.  Some of the map_delete_elem()
1668 	 * implementation may have irq disabled, so
1669 	 * we need to use the _irqsave() version instead
1670 	 * of the _bh() version.
1671 	 */
1672 	spin_lock_irqsave(&btf_idr_lock, flags);
1673 	idr_remove(&btf_idr, btf->id);
1674 	spin_unlock_irqrestore(&btf_idr_lock, flags);
1675 }
1676 
1677 static void btf_free_kfunc_set_tab(struct btf *btf)
1678 {
1679 	struct btf_kfunc_set_tab *tab = btf->kfunc_set_tab;
1680 	int hook;
1681 
1682 	if (!tab)
1683 		return;
1684 	for (hook = 0; hook < ARRAY_SIZE(tab->sets); hook++)
1685 		kfree(tab->sets[hook]);
1686 	kfree(tab);
1687 	btf->kfunc_set_tab = NULL;
1688 }
1689 
1690 static void btf_free_dtor_kfunc_tab(struct btf *btf)
1691 {
1692 	struct btf_id_dtor_kfunc_tab *tab = btf->dtor_kfunc_tab;
1693 
1694 	if (!tab)
1695 		return;
1696 	kfree(tab);
1697 	btf->dtor_kfunc_tab = NULL;
1698 }
1699 
1700 static void btf_struct_metas_free(struct btf_struct_metas *tab)
1701 {
1702 	int i;
1703 
1704 	if (!tab)
1705 		return;
1706 	for (i = 0; i < tab->cnt; i++)
1707 		btf_record_free(tab->types[i].record);
1708 	kfree(tab);
1709 }
1710 
1711 static void btf_free_struct_meta_tab(struct btf *btf)
1712 {
1713 	struct btf_struct_metas *tab = btf->struct_meta_tab;
1714 
1715 	btf_struct_metas_free(tab);
1716 	btf->struct_meta_tab = NULL;
1717 }
1718 
1719 static void btf_free_struct_ops_tab(struct btf *btf)
1720 {
1721 	struct btf_struct_ops_tab *tab = btf->struct_ops_tab;
1722 	u32 i;
1723 
1724 	if (!tab)
1725 		return;
1726 
1727 	for (i = 0; i < tab->cnt; i++)
1728 		bpf_struct_ops_desc_release(&tab->ops[i]);
1729 
1730 	kfree(tab);
1731 	btf->struct_ops_tab = NULL;
1732 }
1733 
1734 static void btf_free(struct btf *btf)
1735 {
1736 	btf_free_struct_meta_tab(btf);
1737 	btf_free_dtor_kfunc_tab(btf);
1738 	btf_free_kfunc_set_tab(btf);
1739 	btf_free_struct_ops_tab(btf);
1740 	kvfree(btf->types);
1741 	kvfree(btf->resolved_sizes);
1742 	kvfree(btf->resolved_ids);
1743 	/* vmlinux does not allocate btf->data, it simply points it at
1744 	 * __start_BTF.
1745 	 */
1746 	if (!btf_is_vmlinux(btf))
1747 		kvfree(btf->data);
1748 	kvfree(btf->base_id_map);
1749 	kfree(btf);
1750 }
1751 
1752 static void btf_free_rcu(struct rcu_head *rcu)
1753 {
1754 	struct btf *btf = container_of(rcu, struct btf, rcu);
1755 
1756 	btf_free(btf);
1757 }
1758 
1759 const char *btf_get_name(const struct btf *btf)
1760 {
1761 	return btf->name;
1762 }
1763 
1764 void btf_get(struct btf *btf)
1765 {
1766 	refcount_inc(&btf->refcnt);
1767 }
1768 
1769 void btf_put(struct btf *btf)
1770 {
1771 	if (btf && refcount_dec_and_test(&btf->refcnt)) {
1772 		btf_free_id(btf);
1773 		call_rcu(&btf->rcu, btf_free_rcu);
1774 	}
1775 }
1776 
1777 struct btf *btf_base_btf(const struct btf *btf)
1778 {
1779 	return btf->base_btf;
1780 }
1781 
1782 const struct btf_header *btf_header(const struct btf *btf)
1783 {
1784 	return &btf->hdr;
1785 }
1786 
1787 void btf_set_base_btf(struct btf *btf, const struct btf *base_btf)
1788 {
1789 	btf->base_btf = (struct btf *)base_btf;
1790 	btf->start_id = btf_nr_types(base_btf);
1791 	btf->start_str_off = base_btf->hdr.str_len;
1792 }
1793 
1794 static int env_resolve_init(struct btf_verifier_env *env)
1795 {
1796 	struct btf *btf = env->btf;
1797 	u32 nr_types = btf->nr_types;
1798 	u32 *resolved_sizes = NULL;
1799 	u32 *resolved_ids = NULL;
1800 	u8 *visit_states = NULL;
1801 
1802 	resolved_sizes = kvcalloc(nr_types, sizeof(*resolved_sizes),
1803 				  GFP_KERNEL | __GFP_NOWARN);
1804 	if (!resolved_sizes)
1805 		goto nomem;
1806 
1807 	resolved_ids = kvcalloc(nr_types, sizeof(*resolved_ids),
1808 				GFP_KERNEL | __GFP_NOWARN);
1809 	if (!resolved_ids)
1810 		goto nomem;
1811 
1812 	visit_states = kvcalloc(nr_types, sizeof(*visit_states),
1813 				GFP_KERNEL | __GFP_NOWARN);
1814 	if (!visit_states)
1815 		goto nomem;
1816 
1817 	btf->resolved_sizes = resolved_sizes;
1818 	btf->resolved_ids = resolved_ids;
1819 	env->visit_states = visit_states;
1820 
1821 	return 0;
1822 
1823 nomem:
1824 	kvfree(resolved_sizes);
1825 	kvfree(resolved_ids);
1826 	kvfree(visit_states);
1827 	return -ENOMEM;
1828 }
1829 
1830 static void btf_verifier_env_free(struct btf_verifier_env *env)
1831 {
1832 	kvfree(env->visit_states);
1833 	kfree(env);
1834 }
1835 
1836 static bool env_type_is_resolve_sink(const struct btf_verifier_env *env,
1837 				     const struct btf_type *next_type)
1838 {
1839 	switch (env->resolve_mode) {
1840 	case RESOLVE_TBD:
1841 		/* int, enum or void is a sink */
1842 		return !btf_type_needs_resolve(next_type);
1843 	case RESOLVE_PTR:
1844 		/* int, enum, void, struct, array, func or func_proto is a sink
1845 		 * for ptr
1846 		 */
1847 		return !btf_type_is_modifier(next_type) &&
1848 			!btf_type_is_ptr(next_type);
1849 	case RESOLVE_STRUCT_OR_ARRAY:
1850 		/* int, enum, void, ptr, func or func_proto is a sink
1851 		 * for struct and array
1852 		 */
1853 		return !btf_type_is_modifier(next_type) &&
1854 			!btf_type_is_array(next_type) &&
1855 			!btf_type_is_struct(next_type);
1856 	default:
1857 		BUG();
1858 	}
1859 }
1860 
1861 static bool env_type_is_resolved(const struct btf_verifier_env *env,
1862 				 u32 type_id)
1863 {
1864 	/* base BTF types should be resolved by now */
1865 	if (type_id < env->btf->start_id)
1866 		return true;
1867 
1868 	return env->visit_states[type_id - env->btf->start_id] == RESOLVED;
1869 }
1870 
1871 static int env_stack_push(struct btf_verifier_env *env,
1872 			  const struct btf_type *t, u32 type_id)
1873 {
1874 	const struct btf *btf = env->btf;
1875 	struct resolve_vertex *v;
1876 
1877 	if (env->top_stack == MAX_RESOLVE_DEPTH)
1878 		return -E2BIG;
1879 
1880 	if (type_id < btf->start_id
1881 	    || env->visit_states[type_id - btf->start_id] != NOT_VISITED)
1882 		return -EEXIST;
1883 
1884 	env->visit_states[type_id - btf->start_id] = VISITED;
1885 
1886 	v = &env->stack[env->top_stack++];
1887 	v->t = t;
1888 	v->type_id = type_id;
1889 	v->next_member = 0;
1890 
1891 	if (env->resolve_mode == RESOLVE_TBD) {
1892 		if (btf_type_is_ptr(t))
1893 			env->resolve_mode = RESOLVE_PTR;
1894 		else if (btf_type_is_struct(t) || btf_type_is_array(t))
1895 			env->resolve_mode = RESOLVE_STRUCT_OR_ARRAY;
1896 	}
1897 
1898 	return 0;
1899 }
1900 
1901 static void env_stack_set_next_member(struct btf_verifier_env *env,
1902 				      u16 next_member)
1903 {
1904 	env->stack[env->top_stack - 1].next_member = next_member;
1905 }
1906 
1907 static void env_stack_pop_resolved(struct btf_verifier_env *env,
1908 				   u32 resolved_type_id,
1909 				   u32 resolved_size)
1910 {
1911 	u32 type_id = env->stack[--(env->top_stack)].type_id;
1912 	struct btf *btf = env->btf;
1913 
1914 	type_id -= btf->start_id; /* adjust to local type id */
1915 	btf->resolved_sizes[type_id] = resolved_size;
1916 	btf->resolved_ids[type_id] = resolved_type_id;
1917 	env->visit_states[type_id] = RESOLVED;
1918 }
1919 
1920 static const struct resolve_vertex *env_stack_peak(struct btf_verifier_env *env)
1921 {
1922 	return env->top_stack ? &env->stack[env->top_stack - 1] : NULL;
1923 }
1924 
1925 /* Resolve the size of a passed-in "type"
1926  *
1927  * type: is an array (e.g. u32 array[x][y])
1928  * return type: type "u32[x][y]", i.e. BTF_KIND_ARRAY,
1929  * *type_size: (x * y * sizeof(u32)).  Hence, *type_size always
1930  *             corresponds to the return type.
1931  * *elem_type: u32
1932  * *elem_id: id of u32
1933  * *total_nelems: (x * y).  Hence, individual elem size is
1934  *                (*type_size / *total_nelems)
1935  * *type_id: id of type if it's changed within the function, 0 if not
1936  *
1937  * type: is not an array (e.g. const struct X)
1938  * return type: type "struct X"
1939  * *type_size: sizeof(struct X)
1940  * *elem_type: same as return type ("struct X")
1941  * *elem_id: 0
1942  * *total_nelems: 1
1943  * *type_id: id of type if it's changed within the function, 0 if not
1944  */
1945 static const struct btf_type *
1946 __btf_resolve_size(const struct btf *btf, const struct btf_type *type,
1947 		   u32 *type_size, const struct btf_type **elem_type,
1948 		   u32 *elem_id, u32 *total_nelems, u32 *type_id)
1949 {
1950 	const struct btf_type *array_type = NULL;
1951 	const struct btf_array *array = NULL;
1952 	u32 i, size, nelems = 1, id = 0;
1953 
1954 	for (i = 0; i < MAX_RESOLVE_DEPTH; i++) {
1955 		switch (BTF_INFO_KIND(type->info)) {
1956 		/* type->size can be used */
1957 		case BTF_KIND_INT:
1958 		case BTF_KIND_STRUCT:
1959 		case BTF_KIND_UNION:
1960 		case BTF_KIND_ENUM:
1961 		case BTF_KIND_FLOAT:
1962 		case BTF_KIND_ENUM64:
1963 			size = type->size;
1964 			goto resolved;
1965 
1966 		case BTF_KIND_PTR:
1967 			size = sizeof(void *);
1968 			goto resolved;
1969 
1970 		/* Modifiers */
1971 		case BTF_KIND_TYPEDEF:
1972 		case BTF_KIND_VOLATILE:
1973 		case BTF_KIND_CONST:
1974 		case BTF_KIND_RESTRICT:
1975 		case BTF_KIND_TYPE_TAG:
1976 			id = type->type;
1977 			type = btf_type_by_id(btf, type->type);
1978 			break;
1979 
1980 		case BTF_KIND_ARRAY:
1981 			if (!array_type)
1982 				array_type = type;
1983 			array = btf_type_array(type);
1984 			if (nelems && array->nelems > U32_MAX / nelems)
1985 				return ERR_PTR(-EINVAL);
1986 			nelems *= array->nelems;
1987 			type = btf_type_by_id(btf, array->type);
1988 			break;
1989 
1990 		/* type without size */
1991 		default:
1992 			return ERR_PTR(-EINVAL);
1993 		}
1994 	}
1995 
1996 	return ERR_PTR(-EINVAL);
1997 
1998 resolved:
1999 	if (nelems && size > U32_MAX / nelems)
2000 		return ERR_PTR(-EINVAL);
2001 
2002 	*type_size = nelems * size;
2003 	if (total_nelems)
2004 		*total_nelems = nelems;
2005 	if (elem_type)
2006 		*elem_type = type;
2007 	if (elem_id)
2008 		*elem_id = array ? array->type : 0;
2009 	if (type_id && id)
2010 		*type_id = id;
2011 
2012 	return array_type ? : type;
2013 }
2014 
2015 const struct btf_type *
2016 btf_resolve_size(const struct btf *btf, const struct btf_type *type,
2017 		 u32 *type_size)
2018 {
2019 	return __btf_resolve_size(btf, type, type_size, NULL, NULL, NULL, NULL);
2020 }
2021 
2022 static u32 btf_resolved_type_id(const struct btf *btf, u32 type_id)
2023 {
2024 	while (type_id < btf->start_id)
2025 		btf = btf->base_btf;
2026 
2027 	return btf->resolved_ids[type_id - btf->start_id];
2028 }
2029 
2030 /* The input param "type_id" must point to a needs_resolve type */
2031 static const struct btf_type *btf_type_id_resolve(const struct btf *btf,
2032 						  u32 *type_id)
2033 {
2034 	*type_id = btf_resolved_type_id(btf, *type_id);
2035 	return btf_type_by_id(btf, *type_id);
2036 }
2037 
2038 static u32 btf_resolved_type_size(const struct btf *btf, u32 type_id)
2039 {
2040 	while (type_id < btf->start_id)
2041 		btf = btf->base_btf;
2042 
2043 	return btf->resolved_sizes[type_id - btf->start_id];
2044 }
2045 
2046 const struct btf_type *btf_type_id_size(const struct btf *btf,
2047 					u32 *type_id, u32 *ret_size)
2048 {
2049 	const struct btf_type *size_type;
2050 	u32 size_type_id = *type_id;
2051 	u32 size = 0;
2052 
2053 	size_type = btf_type_by_id(btf, size_type_id);
2054 	if (btf_type_nosize_or_null(size_type))
2055 		return NULL;
2056 
2057 	if (btf_type_has_size(size_type)) {
2058 		size = size_type->size;
2059 	} else if (btf_type_is_array(size_type)) {
2060 		size = btf_resolved_type_size(btf, size_type_id);
2061 	} else if (btf_type_is_ptr(size_type)) {
2062 		size = sizeof(void *);
2063 	} else {
2064 		if (WARN_ON_ONCE(!btf_type_is_modifier(size_type) &&
2065 				 !btf_type_is_var(size_type)))
2066 			return NULL;
2067 
2068 		size_type_id = btf_resolved_type_id(btf, size_type_id);
2069 		size_type = btf_type_by_id(btf, size_type_id);
2070 		if (btf_type_nosize_or_null(size_type))
2071 			return NULL;
2072 		else if (btf_type_has_size(size_type))
2073 			size = size_type->size;
2074 		else if (btf_type_is_array(size_type))
2075 			size = btf_resolved_type_size(btf, size_type_id);
2076 		else if (btf_type_is_ptr(size_type))
2077 			size = sizeof(void *);
2078 		else
2079 			return NULL;
2080 	}
2081 
2082 	*type_id = size_type_id;
2083 	if (ret_size)
2084 		*ret_size = size;
2085 
2086 	return size_type;
2087 }
2088 
2089 static int btf_df_check_member(struct btf_verifier_env *env,
2090 			       const struct btf_type *struct_type,
2091 			       const struct btf_member *member,
2092 			       const struct btf_type *member_type)
2093 {
2094 	btf_verifier_log_basic(env, struct_type,
2095 			       "Unsupported check_member");
2096 	return -EINVAL;
2097 }
2098 
2099 static int btf_df_check_kflag_member(struct btf_verifier_env *env,
2100 				     const struct btf_type *struct_type,
2101 				     const struct btf_member *member,
2102 				     const struct btf_type *member_type)
2103 {
2104 	btf_verifier_log_basic(env, struct_type,
2105 			       "Unsupported check_kflag_member");
2106 	return -EINVAL;
2107 }
2108 
2109 /* Used for ptr, array struct/union and float type members.
2110  * int, enum and modifier types have their specific callback functions.
2111  */
2112 static int btf_generic_check_kflag_member(struct btf_verifier_env *env,
2113 					  const struct btf_type *struct_type,
2114 					  const struct btf_member *member,
2115 					  const struct btf_type *member_type)
2116 {
2117 	if (BTF_MEMBER_BITFIELD_SIZE(member->offset)) {
2118 		btf_verifier_log_member(env, struct_type, member,
2119 					"Invalid member bitfield_size");
2120 		return -EINVAL;
2121 	}
2122 
2123 	/* bitfield size is 0, so member->offset represents bit offset only.
2124 	 * It is safe to call non kflag check_member variants.
2125 	 */
2126 	return btf_type_ops(member_type)->check_member(env, struct_type,
2127 						       member,
2128 						       member_type);
2129 }
2130 
2131 static int btf_df_resolve(struct btf_verifier_env *env,
2132 			  const struct resolve_vertex *v)
2133 {
2134 	btf_verifier_log_basic(env, v->t, "Unsupported resolve");
2135 	return -EINVAL;
2136 }
2137 
2138 static void btf_df_show(const struct btf *btf, const struct btf_type *t,
2139 			u32 type_id, void *data, u8 bits_offsets,
2140 			struct btf_show *show)
2141 {
2142 	btf_show(show, "<unsupported kind:%u>", BTF_INFO_KIND(t->info));
2143 }
2144 
2145 static int btf_int_check_member(struct btf_verifier_env *env,
2146 				const struct btf_type *struct_type,
2147 				const struct btf_member *member,
2148 				const struct btf_type *member_type)
2149 {
2150 	u32 int_data = btf_type_int(member_type);
2151 	u32 struct_bits_off = member->offset;
2152 	u32 struct_size = struct_type->size;
2153 	u32 nr_copy_bits;
2154 	u32 bytes_offset;
2155 
2156 	if (U32_MAX - struct_bits_off < BTF_INT_OFFSET(int_data)) {
2157 		btf_verifier_log_member(env, struct_type, member,
2158 					"bits_offset exceeds U32_MAX");
2159 		return -EINVAL;
2160 	}
2161 
2162 	struct_bits_off += BTF_INT_OFFSET(int_data);
2163 	bytes_offset = BITS_ROUNDDOWN_BYTES(struct_bits_off);
2164 	nr_copy_bits = BTF_INT_BITS(int_data) +
2165 		BITS_PER_BYTE_MASKED(struct_bits_off);
2166 
2167 	if (nr_copy_bits > BITS_PER_U128) {
2168 		btf_verifier_log_member(env, struct_type, member,
2169 					"nr_copy_bits exceeds 128");
2170 		return -EINVAL;
2171 	}
2172 
2173 	if (struct_size < bytes_offset ||
2174 	    struct_size - bytes_offset < BITS_ROUNDUP_BYTES(nr_copy_bits)) {
2175 		btf_verifier_log_member(env, struct_type, member,
2176 					"Member exceeds struct_size");
2177 		return -EINVAL;
2178 	}
2179 
2180 	return 0;
2181 }
2182 
2183 static int btf_int_check_kflag_member(struct btf_verifier_env *env,
2184 				      const struct btf_type *struct_type,
2185 				      const struct btf_member *member,
2186 				      const struct btf_type *member_type)
2187 {
2188 	u32 struct_bits_off, nr_bits, nr_int_data_bits, bytes_offset;
2189 	u32 int_data = btf_type_int(member_type);
2190 	u32 struct_size = struct_type->size;
2191 	u32 nr_copy_bits;
2192 
2193 	/* a regular int type is required for the kflag int member */
2194 	if (!btf_type_int_is_regular(member_type)) {
2195 		btf_verifier_log_member(env, struct_type, member,
2196 					"Invalid member base type");
2197 		return -EINVAL;
2198 	}
2199 
2200 	/* check sanity of bitfield size */
2201 	nr_bits = BTF_MEMBER_BITFIELD_SIZE(member->offset);
2202 	struct_bits_off = BTF_MEMBER_BIT_OFFSET(member->offset);
2203 	nr_int_data_bits = BTF_INT_BITS(int_data);
2204 	if (!nr_bits) {
2205 		/* Not a bitfield member, member offset must be at byte
2206 		 * boundary.
2207 		 */
2208 		if (BITS_PER_BYTE_MASKED(struct_bits_off)) {
2209 			btf_verifier_log_member(env, struct_type, member,
2210 						"Invalid member offset");
2211 			return -EINVAL;
2212 		}
2213 
2214 		nr_bits = nr_int_data_bits;
2215 	} else if (nr_bits > nr_int_data_bits) {
2216 		btf_verifier_log_member(env, struct_type, member,
2217 					"Invalid member bitfield_size");
2218 		return -EINVAL;
2219 	}
2220 
2221 	bytes_offset = BITS_ROUNDDOWN_BYTES(struct_bits_off);
2222 	nr_copy_bits = nr_bits + BITS_PER_BYTE_MASKED(struct_bits_off);
2223 	if (nr_copy_bits > BITS_PER_U128) {
2224 		btf_verifier_log_member(env, struct_type, member,
2225 					"nr_copy_bits exceeds 128");
2226 		return -EINVAL;
2227 	}
2228 
2229 	if (struct_size < bytes_offset ||
2230 	    struct_size - bytes_offset < BITS_ROUNDUP_BYTES(nr_copy_bits)) {
2231 		btf_verifier_log_member(env, struct_type, member,
2232 					"Member exceeds struct_size");
2233 		return -EINVAL;
2234 	}
2235 
2236 	return 0;
2237 }
2238 
2239 static s32 btf_int_check_meta(struct btf_verifier_env *env,
2240 			      const struct btf_type *t,
2241 			      u32 meta_left)
2242 {
2243 	u32 int_data, nr_bits, meta_needed = sizeof(int_data);
2244 	u16 encoding;
2245 
2246 	if (meta_left < meta_needed) {
2247 		btf_verifier_log_basic(env, t,
2248 				       "meta_left:%u meta_needed:%u",
2249 				       meta_left, meta_needed);
2250 		return -EINVAL;
2251 	}
2252 
2253 	if (btf_type_vlen(t)) {
2254 		btf_verifier_log_type(env, t, "vlen != 0");
2255 		return -EINVAL;
2256 	}
2257 
2258 	if (btf_type_kflag(t)) {
2259 		btf_verifier_log_type(env, t, "Invalid btf_info kind_flag");
2260 		return -EINVAL;
2261 	}
2262 
2263 	int_data = btf_type_int(t);
2264 	if (int_data & ~BTF_INT_MASK) {
2265 		btf_verifier_log_basic(env, t, "Invalid int_data:%x",
2266 				       int_data);
2267 		return -EINVAL;
2268 	}
2269 
2270 	nr_bits = BTF_INT_BITS(int_data) + BTF_INT_OFFSET(int_data);
2271 
2272 	if (nr_bits > BITS_PER_U128) {
2273 		btf_verifier_log_type(env, t, "nr_bits exceeds %zu",
2274 				      BITS_PER_U128);
2275 		return -EINVAL;
2276 	}
2277 
2278 	if (BITS_ROUNDUP_BYTES(nr_bits) > t->size) {
2279 		btf_verifier_log_type(env, t, "nr_bits exceeds type_size");
2280 		return -EINVAL;
2281 	}
2282 
2283 	/*
2284 	 * Only one of the encoding bits is allowed and it
2285 	 * should be sufficient for the pretty print purpose (i.e. decoding).
2286 	 * Multiple bits can be allowed later if it is found
2287 	 * to be insufficient.
2288 	 */
2289 	encoding = BTF_INT_ENCODING(int_data);
2290 	if (encoding &&
2291 	    encoding != BTF_INT_SIGNED &&
2292 	    encoding != BTF_INT_CHAR &&
2293 	    encoding != BTF_INT_BOOL) {
2294 		btf_verifier_log_type(env, t, "Unsupported encoding");
2295 		return -ENOTSUPP;
2296 	}
2297 
2298 	btf_verifier_log_type(env, t, NULL);
2299 
2300 	return meta_needed;
2301 }
2302 
2303 static void btf_int_log(struct btf_verifier_env *env,
2304 			const struct btf_type *t)
2305 {
2306 	int int_data = btf_type_int(t);
2307 
2308 	btf_verifier_log(env,
2309 			 "size=%u bits_offset=%u nr_bits=%u encoding=%s",
2310 			 t->size, BTF_INT_OFFSET(int_data),
2311 			 BTF_INT_BITS(int_data),
2312 			 btf_int_encoding_str(BTF_INT_ENCODING(int_data)));
2313 }
2314 
2315 static void btf_int128_print(struct btf_show *show, void *data)
2316 {
2317 	/* data points to a __int128 number.
2318 	 * Suppose
2319 	 *     int128_num = *(__int128 *)data;
2320 	 * The below formulas shows what upper_num and lower_num represents:
2321 	 *     upper_num = int128_num >> 64;
2322 	 *     lower_num = int128_num & 0xffffffffFFFFFFFFULL;
2323 	 */
2324 	u64 upper_num, lower_num;
2325 
2326 #ifdef __BIG_ENDIAN_BITFIELD
2327 	upper_num = *(u64 *)data;
2328 	lower_num = *(u64 *)(data + 8);
2329 #else
2330 	upper_num = *(u64 *)(data + 8);
2331 	lower_num = *(u64 *)data;
2332 #endif
2333 	if (upper_num == 0)
2334 		btf_show_type_value(show, "0x%llx", lower_num);
2335 	else
2336 		btf_show_type_values(show, "0x%llx%016llx", upper_num,
2337 				     lower_num);
2338 }
2339 
2340 static void btf_int128_shift(u64 *print_num, u16 left_shift_bits,
2341 			     u16 right_shift_bits)
2342 {
2343 	u64 upper_num, lower_num;
2344 
2345 #ifdef __BIG_ENDIAN_BITFIELD
2346 	upper_num = print_num[0];
2347 	lower_num = print_num[1];
2348 #else
2349 	upper_num = print_num[1];
2350 	lower_num = print_num[0];
2351 #endif
2352 
2353 	/* shake out un-needed bits by shift/or operations */
2354 	if (left_shift_bits >= 64) {
2355 		upper_num = lower_num << (left_shift_bits - 64);
2356 		lower_num = 0;
2357 	} else {
2358 		upper_num = (upper_num << left_shift_bits) |
2359 			    (lower_num >> (64 - left_shift_bits));
2360 		lower_num = lower_num << left_shift_bits;
2361 	}
2362 
2363 	if (right_shift_bits >= 64) {
2364 		lower_num = upper_num >> (right_shift_bits - 64);
2365 		upper_num = 0;
2366 	} else {
2367 		lower_num = (lower_num >> right_shift_bits) |
2368 			    (upper_num << (64 - right_shift_bits));
2369 		upper_num = upper_num >> right_shift_bits;
2370 	}
2371 
2372 #ifdef __BIG_ENDIAN_BITFIELD
2373 	print_num[0] = upper_num;
2374 	print_num[1] = lower_num;
2375 #else
2376 	print_num[0] = lower_num;
2377 	print_num[1] = upper_num;
2378 #endif
2379 }
2380 
2381 static void btf_bitfield_show(void *data, u8 bits_offset,
2382 			      u8 nr_bits, struct btf_show *show)
2383 {
2384 	u16 left_shift_bits, right_shift_bits;
2385 	u8 nr_copy_bytes;
2386 	u8 nr_copy_bits;
2387 	u64 print_num[2] = {};
2388 
2389 	nr_copy_bits = nr_bits + bits_offset;
2390 	nr_copy_bytes = BITS_ROUNDUP_BYTES(nr_copy_bits);
2391 
2392 	memcpy(print_num, data, nr_copy_bytes);
2393 
2394 #ifdef __BIG_ENDIAN_BITFIELD
2395 	left_shift_bits = bits_offset;
2396 #else
2397 	left_shift_bits = BITS_PER_U128 - nr_copy_bits;
2398 #endif
2399 	right_shift_bits = BITS_PER_U128 - nr_bits;
2400 
2401 	btf_int128_shift(print_num, left_shift_bits, right_shift_bits);
2402 	btf_int128_print(show, print_num);
2403 }
2404 
2405 
2406 static void btf_int_bits_show(const struct btf *btf,
2407 			      const struct btf_type *t,
2408 			      void *data, u8 bits_offset,
2409 			      struct btf_show *show)
2410 {
2411 	u32 int_data = btf_type_int(t);
2412 	u8 nr_bits = BTF_INT_BITS(int_data);
2413 	u8 total_bits_offset;
2414 
2415 	/*
2416 	 * bits_offset is at most 7.
2417 	 * BTF_INT_OFFSET() cannot exceed 128 bits.
2418 	 */
2419 	total_bits_offset = bits_offset + BTF_INT_OFFSET(int_data);
2420 	data += BITS_ROUNDDOWN_BYTES(total_bits_offset);
2421 	bits_offset = BITS_PER_BYTE_MASKED(total_bits_offset);
2422 	btf_bitfield_show(data, bits_offset, nr_bits, show);
2423 }
2424 
2425 static void btf_int_show(const struct btf *btf, const struct btf_type *t,
2426 			 u32 type_id, void *data, u8 bits_offset,
2427 			 struct btf_show *show)
2428 {
2429 	u32 int_data = btf_type_int(t);
2430 	u8 encoding = BTF_INT_ENCODING(int_data);
2431 	bool sign = encoding & BTF_INT_SIGNED;
2432 	u8 nr_bits = BTF_INT_BITS(int_data);
2433 	void *safe_data;
2434 
2435 	safe_data = btf_show_start_type(show, t, type_id, data);
2436 	if (!safe_data)
2437 		return;
2438 
2439 	if (bits_offset || BTF_INT_OFFSET(int_data) ||
2440 	    BITS_PER_BYTE_MASKED(nr_bits)) {
2441 		btf_int_bits_show(btf, t, safe_data, bits_offset, show);
2442 		goto out;
2443 	}
2444 
2445 	switch (nr_bits) {
2446 	case 128:
2447 		btf_int128_print(show, safe_data);
2448 		break;
2449 	case 64:
2450 		if (sign)
2451 			btf_show_type_value(show, "%lld", *(s64 *)safe_data);
2452 		else
2453 			btf_show_type_value(show, "%llu", *(u64 *)safe_data);
2454 		break;
2455 	case 32:
2456 		if (sign)
2457 			btf_show_type_value(show, "%d", *(s32 *)safe_data);
2458 		else
2459 			btf_show_type_value(show, "%u", *(u32 *)safe_data);
2460 		break;
2461 	case 16:
2462 		if (sign)
2463 			btf_show_type_value(show, "%d", *(s16 *)safe_data);
2464 		else
2465 			btf_show_type_value(show, "%u", *(u16 *)safe_data);
2466 		break;
2467 	case 8:
2468 		if (show->state.array_encoding == BTF_INT_CHAR) {
2469 			/* check for null terminator */
2470 			if (show->state.array_terminated)
2471 				break;
2472 			if (*(char *)data == '\0') {
2473 				show->state.array_terminated = 1;
2474 				break;
2475 			}
2476 			if (isprint(*(char *)data)) {
2477 				btf_show_type_value(show, "'%c'",
2478 						    *(char *)safe_data);
2479 				break;
2480 			}
2481 		}
2482 		if (sign)
2483 			btf_show_type_value(show, "%d", *(s8 *)safe_data);
2484 		else
2485 			btf_show_type_value(show, "%u", *(u8 *)safe_data);
2486 		break;
2487 	default:
2488 		btf_int_bits_show(btf, t, safe_data, bits_offset, show);
2489 		break;
2490 	}
2491 out:
2492 	btf_show_end_type(show);
2493 }
2494 
2495 static const struct btf_kind_operations int_ops = {
2496 	.check_meta = btf_int_check_meta,
2497 	.resolve = btf_df_resolve,
2498 	.check_member = btf_int_check_member,
2499 	.check_kflag_member = btf_int_check_kflag_member,
2500 	.log_details = btf_int_log,
2501 	.show = btf_int_show,
2502 };
2503 
2504 static int btf_modifier_check_member(struct btf_verifier_env *env,
2505 				     const struct btf_type *struct_type,
2506 				     const struct btf_member *member,
2507 				     const struct btf_type *member_type)
2508 {
2509 	const struct btf_type *resolved_type;
2510 	u32 resolved_type_id = member->type;
2511 	struct btf_member resolved_member;
2512 	struct btf *btf = env->btf;
2513 
2514 	resolved_type = btf_type_id_size(btf, &resolved_type_id, NULL);
2515 	if (!resolved_type) {
2516 		btf_verifier_log_member(env, struct_type, member,
2517 					"Invalid member");
2518 		return -EINVAL;
2519 	}
2520 
2521 	resolved_member = *member;
2522 	resolved_member.type = resolved_type_id;
2523 
2524 	return btf_type_ops(resolved_type)->check_member(env, struct_type,
2525 							 &resolved_member,
2526 							 resolved_type);
2527 }
2528 
2529 static int btf_modifier_check_kflag_member(struct btf_verifier_env *env,
2530 					   const struct btf_type *struct_type,
2531 					   const struct btf_member *member,
2532 					   const struct btf_type *member_type)
2533 {
2534 	const struct btf_type *resolved_type;
2535 	u32 resolved_type_id = member->type;
2536 	struct btf_member resolved_member;
2537 	struct btf *btf = env->btf;
2538 
2539 	resolved_type = btf_type_id_size(btf, &resolved_type_id, NULL);
2540 	if (!resolved_type) {
2541 		btf_verifier_log_member(env, struct_type, member,
2542 					"Invalid member");
2543 		return -EINVAL;
2544 	}
2545 
2546 	resolved_member = *member;
2547 	resolved_member.type = resolved_type_id;
2548 
2549 	return btf_type_ops(resolved_type)->check_kflag_member(env, struct_type,
2550 							       &resolved_member,
2551 							       resolved_type);
2552 }
2553 
2554 static int btf_ptr_check_member(struct btf_verifier_env *env,
2555 				const struct btf_type *struct_type,
2556 				const struct btf_member *member,
2557 				const struct btf_type *member_type)
2558 {
2559 	u32 struct_size, struct_bits_off, bytes_offset;
2560 
2561 	struct_size = struct_type->size;
2562 	struct_bits_off = member->offset;
2563 	bytes_offset = BITS_ROUNDDOWN_BYTES(struct_bits_off);
2564 
2565 	if (BITS_PER_BYTE_MASKED(struct_bits_off)) {
2566 		btf_verifier_log_member(env, struct_type, member,
2567 					"Member is not byte aligned");
2568 		return -EINVAL;
2569 	}
2570 
2571 	if (struct_size - bytes_offset < sizeof(void *)) {
2572 		btf_verifier_log_member(env, struct_type, member,
2573 					"Member exceeds struct_size");
2574 		return -EINVAL;
2575 	}
2576 
2577 	return 0;
2578 }
2579 
2580 static int btf_ref_type_check_meta(struct btf_verifier_env *env,
2581 				   const struct btf_type *t,
2582 				   u32 meta_left)
2583 {
2584 	const char *value;
2585 
2586 	if (btf_type_vlen(t)) {
2587 		btf_verifier_log_type(env, t, "vlen != 0");
2588 		return -EINVAL;
2589 	}
2590 
2591 	if (btf_type_kflag(t) && !btf_type_is_type_tag(t)) {
2592 		btf_verifier_log_type(env, t, "Invalid btf_info kind_flag");
2593 		return -EINVAL;
2594 	}
2595 
2596 	if (!BTF_TYPE_ID_VALID(t->type)) {
2597 		btf_verifier_log_type(env, t, "Invalid type_id");
2598 		return -EINVAL;
2599 	}
2600 
2601 	/* typedef/type_tag type must have a valid name, and other ref types,
2602 	 * volatile, const, restrict, should have a null name.
2603 	 */
2604 	if (BTF_INFO_KIND(t->info) == BTF_KIND_TYPEDEF) {
2605 		if (!t->name_off ||
2606 		    !btf_name_valid_identifier(env->btf, t->name_off)) {
2607 			btf_verifier_log_type(env, t, "Invalid name");
2608 			return -EINVAL;
2609 		}
2610 	} else if (BTF_INFO_KIND(t->info) == BTF_KIND_TYPE_TAG) {
2611 		value = btf_name_by_offset(env->btf, t->name_off);
2612 		if (!value || !value[0]) {
2613 			btf_verifier_log_type(env, t, "Invalid name");
2614 			return -EINVAL;
2615 		}
2616 	} else {
2617 		if (t->name_off) {
2618 			btf_verifier_log_type(env, t, "Invalid name");
2619 			return -EINVAL;
2620 		}
2621 	}
2622 
2623 	btf_verifier_log_type(env, t, NULL);
2624 
2625 	return 0;
2626 }
2627 
2628 static int btf_modifier_resolve(struct btf_verifier_env *env,
2629 				const struct resolve_vertex *v)
2630 {
2631 	const struct btf_type *t = v->t;
2632 	const struct btf_type *next_type;
2633 	u32 next_type_id = t->type;
2634 	struct btf *btf = env->btf;
2635 
2636 	next_type = btf_type_by_id(btf, next_type_id);
2637 	if (!next_type || btf_type_is_resolve_source_only(next_type)) {
2638 		btf_verifier_log_type(env, v->t, "Invalid type_id");
2639 		return -EINVAL;
2640 	}
2641 
2642 	if (!env_type_is_resolve_sink(env, next_type) &&
2643 	    !env_type_is_resolved(env, next_type_id))
2644 		return env_stack_push(env, next_type, next_type_id);
2645 
2646 	/* Figure out the resolved next_type_id with size.
2647 	 * They will be stored in the current modifier's
2648 	 * resolved_ids and resolved_sizes such that it can
2649 	 * save us a few type-following when we use it later (e.g. in
2650 	 * pretty print).
2651 	 */
2652 	if (!btf_type_id_size(btf, &next_type_id, NULL)) {
2653 		if (env_type_is_resolved(env, next_type_id))
2654 			next_type = btf_type_id_resolve(btf, &next_type_id);
2655 
2656 		/* "typedef void new_void", "const void"...etc */
2657 		if (!btf_type_is_void(next_type) &&
2658 		    !btf_type_is_fwd(next_type) &&
2659 		    !btf_type_is_func_proto(next_type)) {
2660 			btf_verifier_log_type(env, v->t, "Invalid type_id");
2661 			return -EINVAL;
2662 		}
2663 	}
2664 
2665 	env_stack_pop_resolved(env, next_type_id, 0);
2666 
2667 	return 0;
2668 }
2669 
2670 static int btf_var_resolve(struct btf_verifier_env *env,
2671 			   const struct resolve_vertex *v)
2672 {
2673 	const struct btf_type *next_type;
2674 	const struct btf_type *t = v->t;
2675 	u32 next_type_id = t->type;
2676 	struct btf *btf = env->btf;
2677 
2678 	next_type = btf_type_by_id(btf, next_type_id);
2679 	if (!next_type || btf_type_is_resolve_source_only(next_type)) {
2680 		btf_verifier_log_type(env, v->t, "Invalid type_id");
2681 		return -EINVAL;
2682 	}
2683 
2684 	if (!env_type_is_resolve_sink(env, next_type) &&
2685 	    !env_type_is_resolved(env, next_type_id))
2686 		return env_stack_push(env, next_type, next_type_id);
2687 
2688 	if (btf_type_is_modifier(next_type)) {
2689 		const struct btf_type *resolved_type;
2690 		u32 resolved_type_id;
2691 
2692 		resolved_type_id = next_type_id;
2693 		resolved_type = btf_type_id_resolve(btf, &resolved_type_id);
2694 
2695 		if (btf_type_is_ptr(resolved_type) &&
2696 		    !env_type_is_resolve_sink(env, resolved_type) &&
2697 		    !env_type_is_resolved(env, resolved_type_id))
2698 			return env_stack_push(env, resolved_type,
2699 					      resolved_type_id);
2700 	}
2701 
2702 	/* We must resolve to something concrete at this point, no
2703 	 * forward types or similar that would resolve to size of
2704 	 * zero is allowed.
2705 	 */
2706 	if (!btf_type_id_size(btf, &next_type_id, NULL)) {
2707 		btf_verifier_log_type(env, v->t, "Invalid type_id");
2708 		return -EINVAL;
2709 	}
2710 
2711 	env_stack_pop_resolved(env, next_type_id, 0);
2712 
2713 	return 0;
2714 }
2715 
2716 static int btf_ptr_resolve(struct btf_verifier_env *env,
2717 			   const struct resolve_vertex *v)
2718 {
2719 	const struct btf_type *next_type;
2720 	const struct btf_type *t = v->t;
2721 	u32 next_type_id = t->type;
2722 	struct btf *btf = env->btf;
2723 
2724 	next_type = btf_type_by_id(btf, next_type_id);
2725 	if (!next_type || btf_type_is_resolve_source_only(next_type)) {
2726 		btf_verifier_log_type(env, v->t, "Invalid type_id");
2727 		return -EINVAL;
2728 	}
2729 
2730 	if (!env_type_is_resolve_sink(env, next_type) &&
2731 	    !env_type_is_resolved(env, next_type_id))
2732 		return env_stack_push(env, next_type, next_type_id);
2733 
2734 	/* If the modifier was RESOLVED during RESOLVE_STRUCT_OR_ARRAY,
2735 	 * the modifier may have stopped resolving when it was resolved
2736 	 * to a ptr (last-resolved-ptr).
2737 	 *
2738 	 * We now need to continue from the last-resolved-ptr to
2739 	 * ensure the last-resolved-ptr will not referring back to
2740 	 * the current ptr (t).
2741 	 */
2742 	if (btf_type_is_modifier(next_type)) {
2743 		const struct btf_type *resolved_type;
2744 		u32 resolved_type_id;
2745 
2746 		resolved_type_id = next_type_id;
2747 		resolved_type = btf_type_id_resolve(btf, &resolved_type_id);
2748 
2749 		if (btf_type_is_ptr(resolved_type) &&
2750 		    !env_type_is_resolve_sink(env, resolved_type) &&
2751 		    !env_type_is_resolved(env, resolved_type_id))
2752 			return env_stack_push(env, resolved_type,
2753 					      resolved_type_id);
2754 	}
2755 
2756 	if (!btf_type_id_size(btf, &next_type_id, NULL)) {
2757 		if (env_type_is_resolved(env, next_type_id))
2758 			next_type = btf_type_id_resolve(btf, &next_type_id);
2759 
2760 		if (!btf_type_is_void(next_type) &&
2761 		    !btf_type_is_fwd(next_type) &&
2762 		    !btf_type_is_func_proto(next_type)) {
2763 			btf_verifier_log_type(env, v->t, "Invalid type_id");
2764 			return -EINVAL;
2765 		}
2766 	}
2767 
2768 	env_stack_pop_resolved(env, next_type_id, 0);
2769 
2770 	return 0;
2771 }
2772 
2773 static void btf_modifier_show(const struct btf *btf,
2774 			      const struct btf_type *t,
2775 			      u32 type_id, void *data,
2776 			      u8 bits_offset, struct btf_show *show)
2777 {
2778 	if (btf->resolved_ids)
2779 		t = btf_type_id_resolve(btf, &type_id);
2780 	else
2781 		t = btf_type_skip_modifiers(btf, type_id, NULL);
2782 
2783 	btf_type_ops(t)->show(btf, t, type_id, data, bits_offset, show);
2784 }
2785 
2786 static void btf_var_show(const struct btf *btf, const struct btf_type *t,
2787 			 u32 type_id, void *data, u8 bits_offset,
2788 			 struct btf_show *show)
2789 {
2790 	t = btf_type_id_resolve(btf, &type_id);
2791 
2792 	btf_type_ops(t)->show(btf, t, type_id, data, bits_offset, show);
2793 }
2794 
2795 static void btf_ptr_show(const struct btf *btf, const struct btf_type *t,
2796 			 u32 type_id, void *data, u8 bits_offset,
2797 			 struct btf_show *show)
2798 {
2799 	void *safe_data;
2800 
2801 	safe_data = btf_show_start_type(show, t, type_id, data);
2802 	if (!safe_data)
2803 		return;
2804 
2805 	/* It is a hashed value unless BTF_SHOW_PTR_RAW is specified */
2806 	if (show->flags & BTF_SHOW_PTR_RAW)
2807 		btf_show_type_value(show, "0x%px", *(void **)safe_data);
2808 	else
2809 		btf_show_type_value(show, "0x%p", *(void **)safe_data);
2810 	btf_show_end_type(show);
2811 }
2812 
2813 static void btf_ref_type_log(struct btf_verifier_env *env,
2814 			     const struct btf_type *t)
2815 {
2816 	btf_verifier_log(env, "type_id=%u", t->type);
2817 }
2818 
2819 static const struct btf_kind_operations modifier_ops = {
2820 	.check_meta = btf_ref_type_check_meta,
2821 	.resolve = btf_modifier_resolve,
2822 	.check_member = btf_modifier_check_member,
2823 	.check_kflag_member = btf_modifier_check_kflag_member,
2824 	.log_details = btf_ref_type_log,
2825 	.show = btf_modifier_show,
2826 };
2827 
2828 static const struct btf_kind_operations ptr_ops = {
2829 	.check_meta = btf_ref_type_check_meta,
2830 	.resolve = btf_ptr_resolve,
2831 	.check_member = btf_ptr_check_member,
2832 	.check_kflag_member = btf_generic_check_kflag_member,
2833 	.log_details = btf_ref_type_log,
2834 	.show = btf_ptr_show,
2835 };
2836 
2837 static s32 btf_fwd_check_meta(struct btf_verifier_env *env,
2838 			      const struct btf_type *t,
2839 			      u32 meta_left)
2840 {
2841 	if (btf_type_vlen(t)) {
2842 		btf_verifier_log_type(env, t, "vlen != 0");
2843 		return -EINVAL;
2844 	}
2845 
2846 	if (t->type) {
2847 		btf_verifier_log_type(env, t, "type != 0");
2848 		return -EINVAL;
2849 	}
2850 
2851 	/* fwd type must have a valid name */
2852 	if (!t->name_off ||
2853 	    !btf_name_valid_identifier(env->btf, t->name_off)) {
2854 		btf_verifier_log_type(env, t, "Invalid name");
2855 		return -EINVAL;
2856 	}
2857 
2858 	btf_verifier_log_type(env, t, NULL);
2859 
2860 	return 0;
2861 }
2862 
2863 static void btf_fwd_type_log(struct btf_verifier_env *env,
2864 			     const struct btf_type *t)
2865 {
2866 	btf_verifier_log(env, "%s", btf_type_kflag(t) ? "union" : "struct");
2867 }
2868 
2869 static const struct btf_kind_operations fwd_ops = {
2870 	.check_meta = btf_fwd_check_meta,
2871 	.resolve = btf_df_resolve,
2872 	.check_member = btf_df_check_member,
2873 	.check_kflag_member = btf_df_check_kflag_member,
2874 	.log_details = btf_fwd_type_log,
2875 	.show = btf_df_show,
2876 };
2877 
2878 static int btf_array_check_member(struct btf_verifier_env *env,
2879 				  const struct btf_type *struct_type,
2880 				  const struct btf_member *member,
2881 				  const struct btf_type *member_type)
2882 {
2883 	u32 struct_bits_off = member->offset;
2884 	u32 struct_size, bytes_offset;
2885 	u32 array_type_id, array_size;
2886 	struct btf *btf = env->btf;
2887 
2888 	if (BITS_PER_BYTE_MASKED(struct_bits_off)) {
2889 		btf_verifier_log_member(env, struct_type, member,
2890 					"Member is not byte aligned");
2891 		return -EINVAL;
2892 	}
2893 
2894 	array_type_id = member->type;
2895 	btf_type_id_size(btf, &array_type_id, &array_size);
2896 	struct_size = struct_type->size;
2897 	bytes_offset = BITS_ROUNDDOWN_BYTES(struct_bits_off);
2898 	if (struct_size - bytes_offset < array_size) {
2899 		btf_verifier_log_member(env, struct_type, member,
2900 					"Member exceeds struct_size");
2901 		return -EINVAL;
2902 	}
2903 
2904 	return 0;
2905 }
2906 
2907 static s32 btf_array_check_meta(struct btf_verifier_env *env,
2908 				const struct btf_type *t,
2909 				u32 meta_left)
2910 {
2911 	const struct btf_array *array = btf_type_array(t);
2912 	u32 meta_needed = sizeof(*array);
2913 
2914 	if (meta_left < meta_needed) {
2915 		btf_verifier_log_basic(env, t,
2916 				       "meta_left:%u meta_needed:%u",
2917 				       meta_left, meta_needed);
2918 		return -EINVAL;
2919 	}
2920 
2921 	/* array type should not have a name */
2922 	if (t->name_off) {
2923 		btf_verifier_log_type(env, t, "Invalid name");
2924 		return -EINVAL;
2925 	}
2926 
2927 	if (btf_type_vlen(t)) {
2928 		btf_verifier_log_type(env, t, "vlen != 0");
2929 		return -EINVAL;
2930 	}
2931 
2932 	if (btf_type_kflag(t)) {
2933 		btf_verifier_log_type(env, t, "Invalid btf_info kind_flag");
2934 		return -EINVAL;
2935 	}
2936 
2937 	if (t->size) {
2938 		btf_verifier_log_type(env, t, "size != 0");
2939 		return -EINVAL;
2940 	}
2941 
2942 	/* Array elem type and index type cannot be in type void,
2943 	 * so !array->type and !array->index_type are not allowed.
2944 	 */
2945 	if (!array->type || !BTF_TYPE_ID_VALID(array->type)) {
2946 		btf_verifier_log_type(env, t, "Invalid elem");
2947 		return -EINVAL;
2948 	}
2949 
2950 	if (!array->index_type || !BTF_TYPE_ID_VALID(array->index_type)) {
2951 		btf_verifier_log_type(env, t, "Invalid index");
2952 		return -EINVAL;
2953 	}
2954 
2955 	btf_verifier_log_type(env, t, NULL);
2956 
2957 	return meta_needed;
2958 }
2959 
2960 static int btf_array_resolve(struct btf_verifier_env *env,
2961 			     const struct resolve_vertex *v)
2962 {
2963 	const struct btf_array *array = btf_type_array(v->t);
2964 	const struct btf_type *elem_type, *index_type;
2965 	u32 elem_type_id, index_type_id;
2966 	struct btf *btf = env->btf;
2967 	u32 elem_size;
2968 
2969 	/* Check array->index_type */
2970 	index_type_id = array->index_type;
2971 	index_type = btf_type_by_id(btf, index_type_id);
2972 	if (btf_type_nosize_or_null(index_type) ||
2973 	    btf_type_is_resolve_source_only(index_type)) {
2974 		btf_verifier_log_type(env, v->t, "Invalid index");
2975 		return -EINVAL;
2976 	}
2977 
2978 	if (!env_type_is_resolve_sink(env, index_type) &&
2979 	    !env_type_is_resolved(env, index_type_id))
2980 		return env_stack_push(env, index_type, index_type_id);
2981 
2982 	index_type = btf_type_id_size(btf, &index_type_id, NULL);
2983 	if (!index_type || !btf_type_is_int(index_type) ||
2984 	    !btf_type_int_is_regular(index_type)) {
2985 		btf_verifier_log_type(env, v->t, "Invalid index");
2986 		return -EINVAL;
2987 	}
2988 
2989 	/* Check array->type */
2990 	elem_type_id = array->type;
2991 	elem_type = btf_type_by_id(btf, elem_type_id);
2992 	if (btf_type_nosize_or_null(elem_type) ||
2993 	    btf_type_is_resolve_source_only(elem_type)) {
2994 		btf_verifier_log_type(env, v->t,
2995 				      "Invalid elem");
2996 		return -EINVAL;
2997 	}
2998 
2999 	if (!env_type_is_resolve_sink(env, elem_type) &&
3000 	    !env_type_is_resolved(env, elem_type_id))
3001 		return env_stack_push(env, elem_type, elem_type_id);
3002 
3003 	elem_type = btf_type_id_size(btf, &elem_type_id, &elem_size);
3004 	if (!elem_type) {
3005 		btf_verifier_log_type(env, v->t, "Invalid elem");
3006 		return -EINVAL;
3007 	}
3008 
3009 	if (btf_type_is_int(elem_type) && !btf_type_int_is_regular(elem_type)) {
3010 		btf_verifier_log_type(env, v->t, "Invalid array of int");
3011 		return -EINVAL;
3012 	}
3013 
3014 	if (array->nelems && elem_size > U32_MAX / array->nelems) {
3015 		btf_verifier_log_type(env, v->t,
3016 				      "Array size overflows U32_MAX");
3017 		return -EINVAL;
3018 	}
3019 
3020 	env_stack_pop_resolved(env, elem_type_id, elem_size * array->nelems);
3021 
3022 	return 0;
3023 }
3024 
3025 static void btf_array_log(struct btf_verifier_env *env,
3026 			  const struct btf_type *t)
3027 {
3028 	const struct btf_array *array = btf_type_array(t);
3029 
3030 	btf_verifier_log(env, "type_id=%u index_type_id=%u nr_elems=%u",
3031 			 array->type, array->index_type, array->nelems);
3032 }
3033 
3034 static void __btf_array_show(const struct btf *btf, const struct btf_type *t,
3035 			     u32 type_id, void *data, u8 bits_offset,
3036 			     struct btf_show *show)
3037 {
3038 	const struct btf_array *array = btf_type_array(t);
3039 	const struct btf_kind_operations *elem_ops;
3040 	const struct btf_type *elem_type;
3041 	u32 i, elem_size = 0, elem_type_id;
3042 	u16 encoding = 0;
3043 
3044 	elem_type_id = array->type;
3045 	elem_type = btf_type_skip_modifiers(btf, elem_type_id, NULL);
3046 	if (elem_type && btf_type_has_size(elem_type))
3047 		elem_size = elem_type->size;
3048 
3049 	if (elem_type && btf_type_is_int(elem_type)) {
3050 		u32 int_type = btf_type_int(elem_type);
3051 
3052 		encoding = BTF_INT_ENCODING(int_type);
3053 
3054 		/*
3055 		 * BTF_INT_CHAR encoding never seems to be set for
3056 		 * char arrays, so if size is 1 and element is
3057 		 * printable as a char, we'll do that.
3058 		 */
3059 		if (elem_size == 1)
3060 			encoding = BTF_INT_CHAR;
3061 	}
3062 
3063 	if (!btf_show_start_array_type(show, t, type_id, encoding, data))
3064 		return;
3065 
3066 	if (!elem_type)
3067 		goto out;
3068 	elem_ops = btf_type_ops(elem_type);
3069 
3070 	for (i = 0; i < array->nelems; i++) {
3071 
3072 		btf_show_start_array_member(show);
3073 
3074 		elem_ops->show(btf, elem_type, elem_type_id, data,
3075 			       bits_offset, show);
3076 		data += elem_size;
3077 
3078 		btf_show_end_array_member(show);
3079 
3080 		if (show->state.array_terminated)
3081 			break;
3082 	}
3083 out:
3084 	btf_show_end_array_type(show);
3085 }
3086 
3087 static void btf_array_show(const struct btf *btf, const struct btf_type *t,
3088 			   u32 type_id, void *data, u8 bits_offset,
3089 			   struct btf_show *show)
3090 {
3091 	const struct btf_member *m = show->state.member;
3092 
3093 	/*
3094 	 * First check if any members would be shown (are non-zero).
3095 	 * See comments above "struct btf_show" definition for more
3096 	 * details on how this works at a high-level.
3097 	 */
3098 	if (show->state.depth > 0 && !(show->flags & BTF_SHOW_ZERO)) {
3099 		if (!show->state.depth_check) {
3100 			show->state.depth_check = show->state.depth + 1;
3101 			show->state.depth_to_show = 0;
3102 		}
3103 		__btf_array_show(btf, t, type_id, data, bits_offset, show);
3104 		show->state.member = m;
3105 
3106 		if (show->state.depth_check != show->state.depth + 1)
3107 			return;
3108 		show->state.depth_check = 0;
3109 
3110 		if (show->state.depth_to_show <= show->state.depth)
3111 			return;
3112 		/*
3113 		 * Reaching here indicates we have recursed and found
3114 		 * non-zero array member(s).
3115 		 */
3116 	}
3117 	__btf_array_show(btf, t, type_id, data, bits_offset, show);
3118 }
3119 
3120 static const struct btf_kind_operations array_ops = {
3121 	.check_meta = btf_array_check_meta,
3122 	.resolve = btf_array_resolve,
3123 	.check_member = btf_array_check_member,
3124 	.check_kflag_member = btf_generic_check_kflag_member,
3125 	.log_details = btf_array_log,
3126 	.show = btf_array_show,
3127 };
3128 
3129 static int btf_struct_check_member(struct btf_verifier_env *env,
3130 				   const struct btf_type *struct_type,
3131 				   const struct btf_member *member,
3132 				   const struct btf_type *member_type)
3133 {
3134 	u32 struct_bits_off = member->offset;
3135 	u32 struct_size, bytes_offset;
3136 
3137 	if (BITS_PER_BYTE_MASKED(struct_bits_off)) {
3138 		btf_verifier_log_member(env, struct_type, member,
3139 					"Member is not byte aligned");
3140 		return -EINVAL;
3141 	}
3142 
3143 	struct_size = struct_type->size;
3144 	bytes_offset = BITS_ROUNDDOWN_BYTES(struct_bits_off);
3145 	if (struct_size - bytes_offset < member_type->size) {
3146 		btf_verifier_log_member(env, struct_type, member,
3147 					"Member exceeds struct_size");
3148 		return -EINVAL;
3149 	}
3150 
3151 	return 0;
3152 }
3153 
3154 static s32 btf_struct_check_meta(struct btf_verifier_env *env,
3155 				 const struct btf_type *t,
3156 				 u32 meta_left)
3157 {
3158 	bool is_union = BTF_INFO_KIND(t->info) == BTF_KIND_UNION;
3159 	const struct btf_member *member;
3160 	u32 meta_needed, last_offset;
3161 	struct btf *btf = env->btf;
3162 	u32 struct_size = t->size;
3163 	u32 offset;
3164 	u16 i;
3165 
3166 	meta_needed = btf_type_vlen(t) * sizeof(*member);
3167 	if (meta_left < meta_needed) {
3168 		btf_verifier_log_basic(env, t,
3169 				       "meta_left:%u meta_needed:%u",
3170 				       meta_left, meta_needed);
3171 		return -EINVAL;
3172 	}
3173 
3174 	/* struct type either no name or a valid one */
3175 	if (t->name_off &&
3176 	    !btf_name_valid_identifier(env->btf, t->name_off)) {
3177 		btf_verifier_log_type(env, t, "Invalid name");
3178 		return -EINVAL;
3179 	}
3180 
3181 	btf_verifier_log_type(env, t, NULL);
3182 
3183 	last_offset = 0;
3184 	for_each_member(i, t, member) {
3185 		if (!btf_name_offset_valid(btf, member->name_off)) {
3186 			btf_verifier_log_member(env, t, member,
3187 						"Invalid member name_offset:%u",
3188 						member->name_off);
3189 			return -EINVAL;
3190 		}
3191 
3192 		/* struct member either no name or a valid one */
3193 		if (member->name_off &&
3194 		    !btf_name_valid_identifier(btf, member->name_off)) {
3195 			btf_verifier_log_member(env, t, member, "Invalid name");
3196 			return -EINVAL;
3197 		}
3198 		/* A member cannot be in type void */
3199 		if (!member->type || !BTF_TYPE_ID_VALID(member->type)) {
3200 			btf_verifier_log_member(env, t, member,
3201 						"Invalid type_id");
3202 			return -EINVAL;
3203 		}
3204 
3205 		offset = __btf_member_bit_offset(t, member);
3206 		if (is_union && offset) {
3207 			btf_verifier_log_member(env, t, member,
3208 						"Invalid member bits_offset");
3209 			return -EINVAL;
3210 		}
3211 
3212 		/*
3213 		 * ">" instead of ">=" because the last member could be
3214 		 * "char a[0];"
3215 		 */
3216 		if (last_offset > offset) {
3217 			btf_verifier_log_member(env, t, member,
3218 						"Invalid member bits_offset");
3219 			return -EINVAL;
3220 		}
3221 
3222 		if (BITS_ROUNDUP_BYTES(offset) > struct_size) {
3223 			btf_verifier_log_member(env, t, member,
3224 						"Member bits_offset exceeds its struct size");
3225 			return -EINVAL;
3226 		}
3227 
3228 		btf_verifier_log_member(env, t, member, NULL);
3229 		last_offset = offset;
3230 	}
3231 
3232 	return meta_needed;
3233 }
3234 
3235 static int btf_struct_resolve(struct btf_verifier_env *env,
3236 			      const struct resolve_vertex *v)
3237 {
3238 	const struct btf_member *member;
3239 	int err;
3240 	u16 i;
3241 
3242 	/* Before continue resolving the next_member,
3243 	 * ensure the last member is indeed resolved to a
3244 	 * type with size info.
3245 	 */
3246 	if (v->next_member) {
3247 		const struct btf_type *last_member_type;
3248 		const struct btf_member *last_member;
3249 		u32 last_member_type_id;
3250 
3251 		last_member = btf_type_member(v->t) + v->next_member - 1;
3252 		last_member_type_id = last_member->type;
3253 		if (WARN_ON_ONCE(!env_type_is_resolved(env,
3254 						       last_member_type_id)))
3255 			return -EINVAL;
3256 
3257 		last_member_type = btf_type_by_id(env->btf,
3258 						  last_member_type_id);
3259 		if (btf_type_kflag(v->t))
3260 			err = btf_type_ops(last_member_type)->check_kflag_member(env, v->t,
3261 								last_member,
3262 								last_member_type);
3263 		else
3264 			err = btf_type_ops(last_member_type)->check_member(env, v->t,
3265 								last_member,
3266 								last_member_type);
3267 		if (err)
3268 			return err;
3269 	}
3270 
3271 	for_each_member_from(i, v->next_member, v->t, member) {
3272 		u32 member_type_id = member->type;
3273 		const struct btf_type *member_type = btf_type_by_id(env->btf,
3274 								member_type_id);
3275 
3276 		if (btf_type_nosize_or_null(member_type) ||
3277 		    btf_type_is_resolve_source_only(member_type)) {
3278 			btf_verifier_log_member(env, v->t, member,
3279 						"Invalid member");
3280 			return -EINVAL;
3281 		}
3282 
3283 		if (!env_type_is_resolve_sink(env, member_type) &&
3284 		    !env_type_is_resolved(env, member_type_id)) {
3285 			env_stack_set_next_member(env, i + 1);
3286 			return env_stack_push(env, member_type, member_type_id);
3287 		}
3288 
3289 		if (btf_type_kflag(v->t))
3290 			err = btf_type_ops(member_type)->check_kflag_member(env, v->t,
3291 									    member,
3292 									    member_type);
3293 		else
3294 			err = btf_type_ops(member_type)->check_member(env, v->t,
3295 								      member,
3296 								      member_type);
3297 		if (err)
3298 			return err;
3299 	}
3300 
3301 	env_stack_pop_resolved(env, 0, 0);
3302 
3303 	return 0;
3304 }
3305 
3306 static void btf_struct_log(struct btf_verifier_env *env,
3307 			   const struct btf_type *t)
3308 {
3309 	btf_verifier_log(env, "size=%u vlen=%u", t->size, btf_type_vlen(t));
3310 }
3311 
3312 enum {
3313 	BTF_FIELD_IGNORE = 0,
3314 	BTF_FIELD_FOUND  = 1,
3315 };
3316 
3317 struct btf_field_info {
3318 	enum btf_field_type type;
3319 	u32 off;
3320 	union {
3321 		struct {
3322 			u32 type_id;
3323 		} kptr;
3324 		struct {
3325 			const char *node_name;
3326 			u32 value_btf_id;
3327 		} graph_root;
3328 	};
3329 };
3330 
3331 static int btf_find_struct(const struct btf *btf, const struct btf_type *t,
3332 			   u32 off, int sz, enum btf_field_type field_type,
3333 			   struct btf_field_info *info)
3334 {
3335 	if (!__btf_type_is_struct(t))
3336 		return BTF_FIELD_IGNORE;
3337 	if (t->size != sz)
3338 		return BTF_FIELD_IGNORE;
3339 	info->type = field_type;
3340 	info->off = off;
3341 	return BTF_FIELD_FOUND;
3342 }
3343 
3344 static int btf_find_kptr(const struct btf *btf, const struct btf_type *t,
3345 			 u32 off, int sz, struct btf_field_info *info, u32 field_mask)
3346 {
3347 	enum btf_field_type type;
3348 	const char *tag_value;
3349 	bool is_type_tag;
3350 	u32 res_id;
3351 
3352 	/* Permit modifiers on the pointer itself */
3353 	if (btf_type_is_volatile(t))
3354 		t = btf_type_by_id(btf, t->type);
3355 	/* For PTR, sz is always == 8 */
3356 	if (!btf_type_is_ptr(t))
3357 		return BTF_FIELD_IGNORE;
3358 	t = btf_type_by_id(btf, t->type);
3359 	is_type_tag = btf_type_is_type_tag(t) && !btf_type_kflag(t);
3360 	if (!is_type_tag)
3361 		return BTF_FIELD_IGNORE;
3362 	/* Reject extra tags */
3363 	if (btf_type_is_type_tag(btf_type_by_id(btf, t->type)))
3364 		return -EINVAL;
3365 	tag_value = __btf_name_by_offset(btf, t->name_off);
3366 	if (!strcmp("kptr_untrusted", tag_value))
3367 		type = BPF_KPTR_UNREF;
3368 	else if (!strcmp("kptr", tag_value))
3369 		type = BPF_KPTR_REF;
3370 	else if (!strcmp("percpu_kptr", tag_value))
3371 		type = BPF_KPTR_PERCPU;
3372 	else if (!strcmp("uptr", tag_value))
3373 		type = BPF_UPTR;
3374 	else
3375 		return -EINVAL;
3376 
3377 	if (!(type & field_mask))
3378 		return BTF_FIELD_IGNORE;
3379 
3380 	/* Get the base type */
3381 	t = btf_type_skip_modifiers(btf, t->type, &res_id);
3382 	/* Only pointer to struct is allowed */
3383 	if (!__btf_type_is_struct(t))
3384 		return -EINVAL;
3385 
3386 	info->type = type;
3387 	info->off = off;
3388 	info->kptr.type_id = res_id;
3389 	return BTF_FIELD_FOUND;
3390 }
3391 
3392 int btf_find_next_decl_tag(const struct btf *btf, const struct btf_type *pt,
3393 			   int comp_idx, const char *tag_key, int last_id)
3394 {
3395 	int len = strlen(tag_key);
3396 	int i, n;
3397 
3398 	for (i = last_id + 1, n = btf_nr_types(btf); i < n; i++) {
3399 		const struct btf_type *t = btf_type_by_id(btf, i);
3400 
3401 		if (!btf_type_is_decl_tag(t))
3402 			continue;
3403 		if (pt != btf_type_by_id(btf, t->type))
3404 			continue;
3405 		if (btf_type_decl_tag(t)->component_idx != comp_idx)
3406 			continue;
3407 		if (strncmp(__btf_name_by_offset(btf, t->name_off), tag_key, len))
3408 			continue;
3409 		return i;
3410 	}
3411 	return -ENOENT;
3412 }
3413 
3414 const char *btf_find_decl_tag_value(const struct btf *btf, const struct btf_type *pt,
3415 				    int comp_idx, const char *tag_key)
3416 {
3417 	const char *value = NULL;
3418 	const struct btf_type *t;
3419 	int len, id;
3420 
3421 	id = btf_find_next_decl_tag(btf, pt, comp_idx, tag_key, 0);
3422 	if (id < 0)
3423 		return ERR_PTR(id);
3424 
3425 	t = btf_type_by_id(btf, id);
3426 	len = strlen(tag_key);
3427 	value = __btf_name_by_offset(btf, t->name_off) + len;
3428 
3429 	/* Prevent duplicate entries for same type */
3430 	id = btf_find_next_decl_tag(btf, pt, comp_idx, tag_key, id);
3431 	if (id >= 0)
3432 		return ERR_PTR(-EEXIST);
3433 
3434 	return value;
3435 }
3436 
3437 static int
3438 btf_find_graph_root(const struct btf *btf, const struct btf_type *pt,
3439 		    const struct btf_type *t, int comp_idx, u32 off,
3440 		    int sz, struct btf_field_info *info,
3441 		    enum btf_field_type head_type)
3442 {
3443 	const char *node_field_name;
3444 	const char *value_type;
3445 	s32 id;
3446 
3447 	if (!__btf_type_is_struct(t))
3448 		return BTF_FIELD_IGNORE;
3449 	if (t->size != sz)
3450 		return BTF_FIELD_IGNORE;
3451 	value_type = btf_find_decl_tag_value(btf, pt, comp_idx, "contains:");
3452 	if (IS_ERR(value_type))
3453 		return -EINVAL;
3454 	node_field_name = strstr(value_type, ":");
3455 	if (!node_field_name)
3456 		return -EINVAL;
3457 	value_type = kstrndup(value_type, node_field_name - value_type,
3458 			      GFP_KERNEL_ACCOUNT | __GFP_NOWARN);
3459 	if (!value_type)
3460 		return -ENOMEM;
3461 	id = btf_find_by_name_kind(btf, value_type, BTF_KIND_STRUCT);
3462 	kfree(value_type);
3463 	if (id < 0)
3464 		return id;
3465 	node_field_name++;
3466 	if (str_is_empty(node_field_name))
3467 		return -EINVAL;
3468 	info->type = head_type;
3469 	info->off = off;
3470 	info->graph_root.value_btf_id = id;
3471 	info->graph_root.node_name = node_field_name;
3472 	return BTF_FIELD_FOUND;
3473 }
3474 
3475 #define field_mask_test_name(field_type, field_type_str) \
3476 	if (field_mask & field_type && !strcmp(name, field_type_str)) { \
3477 		type = field_type;					\
3478 		goto end;						\
3479 	}
3480 
3481 static int btf_get_field_type(const struct btf *btf, const struct btf_type *var_type,
3482 			      u32 field_mask, u32 *seen_mask,
3483 			      int *align, int *sz)
3484 {
3485 	int type = 0;
3486 	const char *name = __btf_name_by_offset(btf, var_type->name_off);
3487 
3488 	if (field_mask & BPF_SPIN_LOCK) {
3489 		if (!strcmp(name, "bpf_spin_lock")) {
3490 			if (*seen_mask & BPF_SPIN_LOCK)
3491 				return -E2BIG;
3492 			*seen_mask |= BPF_SPIN_LOCK;
3493 			type = BPF_SPIN_LOCK;
3494 			goto end;
3495 		}
3496 	}
3497 	if (field_mask & BPF_RES_SPIN_LOCK) {
3498 		if (!strcmp(name, "bpf_res_spin_lock")) {
3499 			if (*seen_mask & BPF_RES_SPIN_LOCK)
3500 				return -E2BIG;
3501 			*seen_mask |= BPF_RES_SPIN_LOCK;
3502 			type = BPF_RES_SPIN_LOCK;
3503 			goto end;
3504 		}
3505 	}
3506 	if (field_mask & BPF_TIMER) {
3507 		if (!strcmp(name, "bpf_timer")) {
3508 			if (*seen_mask & BPF_TIMER)
3509 				return -E2BIG;
3510 			*seen_mask |= BPF_TIMER;
3511 			type = BPF_TIMER;
3512 			goto end;
3513 		}
3514 	}
3515 	if (field_mask & BPF_WORKQUEUE) {
3516 		if (!strcmp(name, "bpf_wq")) {
3517 			if (*seen_mask & BPF_WORKQUEUE)
3518 				return -E2BIG;
3519 			*seen_mask |= BPF_WORKQUEUE;
3520 			type = BPF_WORKQUEUE;
3521 			goto end;
3522 		}
3523 	}
3524 	field_mask_test_name(BPF_LIST_HEAD, "bpf_list_head");
3525 	field_mask_test_name(BPF_LIST_NODE, "bpf_list_node");
3526 	field_mask_test_name(BPF_RB_ROOT,   "bpf_rb_root");
3527 	field_mask_test_name(BPF_RB_NODE,   "bpf_rb_node");
3528 	field_mask_test_name(BPF_REFCOUNT,  "bpf_refcount");
3529 
3530 	/* Only return BPF_KPTR when all other types with matchable names fail */
3531 	if (field_mask & (BPF_KPTR | BPF_UPTR) && !__btf_type_is_struct(var_type)) {
3532 		type = BPF_KPTR_REF;
3533 		goto end;
3534 	}
3535 	return 0;
3536 end:
3537 	*sz = btf_field_type_size(type);
3538 	*align = btf_field_type_align(type);
3539 	return type;
3540 }
3541 
3542 #undef field_mask_test_name
3543 
3544 /* Repeat a number of fields for a specified number of times.
3545  *
3546  * Copy the fields starting from the first field and repeat them for
3547  * repeat_cnt times. The fields are repeated by adding the offset of each
3548  * field with
3549  *   (i + 1) * elem_size
3550  * where i is the repeat index and elem_size is the size of an element.
3551  */
3552 static int btf_repeat_fields(struct btf_field_info *info, int info_cnt,
3553 			     u32 field_cnt, u32 repeat_cnt, u32 elem_size)
3554 {
3555 	u32 i, j;
3556 	u32 cur;
3557 
3558 	/* Ensure not repeating fields that should not be repeated. */
3559 	for (i = 0; i < field_cnt; i++) {
3560 		switch (info[i].type) {
3561 		case BPF_KPTR_UNREF:
3562 		case BPF_KPTR_REF:
3563 		case BPF_KPTR_PERCPU:
3564 		case BPF_UPTR:
3565 		case BPF_LIST_HEAD:
3566 		case BPF_RB_ROOT:
3567 			break;
3568 		default:
3569 			return -EINVAL;
3570 		}
3571 	}
3572 
3573 	/* The type of struct size or variable size is u32,
3574 	 * so the multiplication will not overflow.
3575 	 */
3576 	if (field_cnt * (repeat_cnt + 1) > info_cnt)
3577 		return -E2BIG;
3578 
3579 	cur = field_cnt;
3580 	for (i = 0; i < repeat_cnt; i++) {
3581 		memcpy(&info[cur], &info[0], field_cnt * sizeof(info[0]));
3582 		for (j = 0; j < field_cnt; j++)
3583 			info[cur++].off += (i + 1) * elem_size;
3584 	}
3585 
3586 	return 0;
3587 }
3588 
3589 static int btf_find_struct_field(const struct btf *btf,
3590 				 const struct btf_type *t, u32 field_mask,
3591 				 struct btf_field_info *info, int info_cnt,
3592 				 u32 level);
3593 
3594 /* Find special fields in the struct type of a field.
3595  *
3596  * This function is used to find fields of special types that is not a
3597  * global variable or a direct field of a struct type. It also handles the
3598  * repetition if it is the element type of an array.
3599  */
3600 static int btf_find_nested_struct(const struct btf *btf, const struct btf_type *t,
3601 				  u32 off, u32 nelems,
3602 				  u32 field_mask, struct btf_field_info *info,
3603 				  int info_cnt, u32 level)
3604 {
3605 	int ret, err, i;
3606 
3607 	level++;
3608 	if (level >= MAX_RESOLVE_DEPTH)
3609 		return -E2BIG;
3610 
3611 	ret = btf_find_struct_field(btf, t, field_mask, info, info_cnt, level);
3612 
3613 	if (ret <= 0)
3614 		return ret;
3615 
3616 	/* Shift the offsets of the nested struct fields to the offsets
3617 	 * related to the container.
3618 	 */
3619 	for (i = 0; i < ret; i++)
3620 		info[i].off += off;
3621 
3622 	if (nelems > 1) {
3623 		err = btf_repeat_fields(info, info_cnt, ret, nelems - 1, t->size);
3624 		if (err == 0)
3625 			ret *= nelems;
3626 		else
3627 			ret = err;
3628 	}
3629 
3630 	return ret;
3631 }
3632 
3633 static int btf_find_field_one(const struct btf *btf,
3634 			      const struct btf_type *var,
3635 			      const struct btf_type *var_type,
3636 			      int var_idx,
3637 			      u32 off, u32 expected_size,
3638 			      u32 field_mask, u32 *seen_mask,
3639 			      struct btf_field_info *info, int info_cnt,
3640 			      u32 level)
3641 {
3642 	int ret, align, sz, field_type;
3643 	struct btf_field_info tmp;
3644 	const struct btf_array *array;
3645 	u32 i, nelems = 1;
3646 
3647 	/* Walk into array types to find the element type and the number of
3648 	 * elements in the (flattened) array.
3649 	 */
3650 	for (i = 0; i < MAX_RESOLVE_DEPTH && btf_type_is_array(var_type); i++) {
3651 		array = btf_array(var_type);
3652 		nelems *= array->nelems;
3653 		var_type = btf_type_by_id(btf, array->type);
3654 	}
3655 	if (i == MAX_RESOLVE_DEPTH)
3656 		return -E2BIG;
3657 	if (nelems == 0)
3658 		return 0;
3659 
3660 	field_type = btf_get_field_type(btf, var_type,
3661 					field_mask, seen_mask, &align, &sz);
3662 	/* Look into variables of struct types */
3663 	if (!field_type && __btf_type_is_struct(var_type)) {
3664 		sz = var_type->size;
3665 		if (expected_size && expected_size != sz * nelems)
3666 			return 0;
3667 		ret = btf_find_nested_struct(btf, var_type, off, nelems, field_mask,
3668 					     &info[0], info_cnt, level);
3669 		return ret;
3670 	}
3671 
3672 	if (field_type == 0)
3673 		return 0;
3674 	if (field_type < 0)
3675 		return field_type;
3676 
3677 	if (expected_size && expected_size != sz * nelems)
3678 		return 0;
3679 	if (off % align)
3680 		return 0;
3681 
3682 	switch (field_type) {
3683 	case BPF_SPIN_LOCK:
3684 	case BPF_RES_SPIN_LOCK:
3685 	case BPF_TIMER:
3686 	case BPF_WORKQUEUE:
3687 	case BPF_LIST_NODE:
3688 	case BPF_RB_NODE:
3689 	case BPF_REFCOUNT:
3690 		ret = btf_find_struct(btf, var_type, off, sz, field_type,
3691 				      info_cnt ? &info[0] : &tmp);
3692 		if (ret < 0)
3693 			return ret;
3694 		break;
3695 	case BPF_KPTR_UNREF:
3696 	case BPF_KPTR_REF:
3697 	case BPF_KPTR_PERCPU:
3698 	case BPF_UPTR:
3699 		ret = btf_find_kptr(btf, var_type, off, sz,
3700 				    info_cnt ? &info[0] : &tmp, field_mask);
3701 		if (ret < 0)
3702 			return ret;
3703 		break;
3704 	case BPF_LIST_HEAD:
3705 	case BPF_RB_ROOT:
3706 		ret = btf_find_graph_root(btf, var, var_type,
3707 					  var_idx, off, sz,
3708 					  info_cnt ? &info[0] : &tmp,
3709 					  field_type);
3710 		if (ret < 0)
3711 			return ret;
3712 		break;
3713 	default:
3714 		return -EFAULT;
3715 	}
3716 
3717 	if (ret == BTF_FIELD_IGNORE)
3718 		return 0;
3719 	if (!info_cnt)
3720 		return -E2BIG;
3721 	if (nelems > 1) {
3722 		ret = btf_repeat_fields(info, info_cnt, 1, nelems - 1, sz);
3723 		if (ret < 0)
3724 			return ret;
3725 	}
3726 	return nelems;
3727 }
3728 
3729 static int btf_find_struct_field(const struct btf *btf,
3730 				 const struct btf_type *t, u32 field_mask,
3731 				 struct btf_field_info *info, int info_cnt,
3732 				 u32 level)
3733 {
3734 	int ret, idx = 0;
3735 	const struct btf_member *member;
3736 	u32 i, off, seen_mask = 0;
3737 
3738 	for_each_member(i, t, member) {
3739 		const struct btf_type *member_type = btf_type_by_id(btf,
3740 								    member->type);
3741 
3742 		off = __btf_member_bit_offset(t, member);
3743 		if (off % 8)
3744 			/* valid C code cannot generate such BTF */
3745 			return -EINVAL;
3746 		off /= 8;
3747 
3748 		ret = btf_find_field_one(btf, t, member_type, i,
3749 					 off, 0,
3750 					 field_mask, &seen_mask,
3751 					 &info[idx], info_cnt - idx, level);
3752 		if (ret < 0)
3753 			return ret;
3754 		idx += ret;
3755 	}
3756 	return idx;
3757 }
3758 
3759 static int btf_find_datasec_var(const struct btf *btf, const struct btf_type *t,
3760 				u32 field_mask, struct btf_field_info *info,
3761 				int info_cnt, u32 level)
3762 {
3763 	int ret, idx = 0;
3764 	const struct btf_var_secinfo *vsi;
3765 	u32 i, off, seen_mask = 0;
3766 
3767 	for_each_vsi(i, t, vsi) {
3768 		const struct btf_type *var = btf_type_by_id(btf, vsi->type);
3769 		const struct btf_type *var_type = btf_type_by_id(btf, var->type);
3770 
3771 		off = vsi->offset;
3772 		ret = btf_find_field_one(btf, var, var_type, -1, off, vsi->size,
3773 					 field_mask, &seen_mask,
3774 					 &info[idx], info_cnt - idx,
3775 					 level);
3776 		if (ret < 0)
3777 			return ret;
3778 		idx += ret;
3779 	}
3780 	return idx;
3781 }
3782 
3783 static int btf_find_field(const struct btf *btf, const struct btf_type *t,
3784 			  u32 field_mask, struct btf_field_info *info,
3785 			  int info_cnt)
3786 {
3787 	if (__btf_type_is_struct(t))
3788 		return btf_find_struct_field(btf, t, field_mask, info, info_cnt, 0);
3789 	else if (btf_type_is_datasec(t))
3790 		return btf_find_datasec_var(btf, t, field_mask, info, info_cnt, 0);
3791 	return -EINVAL;
3792 }
3793 
3794 /* Callers have to ensure the life cycle of btf if it is program BTF */
3795 static int btf_parse_kptr(const struct btf *btf, struct btf_field *field,
3796 			  struct btf_field_info *info)
3797 {
3798 	struct module *mod = NULL;
3799 	const struct btf_type *t;
3800 	/* If a matching btf type is found in kernel or module BTFs, kptr_ref
3801 	 * is that BTF, otherwise it's program BTF
3802 	 */
3803 	struct btf *kptr_btf;
3804 	int ret;
3805 	s32 id;
3806 
3807 	/* Find type in map BTF, and use it to look up the matching type
3808 	 * in vmlinux or module BTFs, by name and kind.
3809 	 */
3810 	t = btf_type_by_id(btf, info->kptr.type_id);
3811 	id = bpf_find_btf_id(__btf_name_by_offset(btf, t->name_off), BTF_INFO_KIND(t->info),
3812 			     &kptr_btf);
3813 	if (id == -ENOENT) {
3814 		/* btf_parse_kptr should only be called w/ btf = program BTF */
3815 		WARN_ON_ONCE(btf_is_kernel(btf));
3816 
3817 		/* Type exists only in program BTF. Assume that it's a MEM_ALLOC
3818 		 * kptr allocated via bpf_obj_new
3819 		 */
3820 		field->kptr.dtor = NULL;
3821 		id = info->kptr.type_id;
3822 		kptr_btf = (struct btf *)btf;
3823 		goto found_dtor;
3824 	}
3825 	if (id < 0)
3826 		return id;
3827 
3828 	/* Find and stash the function pointer for the destruction function that
3829 	 * needs to be eventually invoked from the map free path.
3830 	 */
3831 	if (info->type == BPF_KPTR_REF) {
3832 		const struct btf_type *dtor_func;
3833 		const char *dtor_func_name;
3834 		unsigned long addr;
3835 		s32 dtor_btf_id;
3836 
3837 		/* This call also serves as a whitelist of allowed objects that
3838 		 * can be used as a referenced pointer and be stored in a map at
3839 		 * the same time.
3840 		 */
3841 		dtor_btf_id = btf_find_dtor_kfunc(kptr_btf, id);
3842 		if (dtor_btf_id < 0) {
3843 			ret = dtor_btf_id;
3844 			goto end_btf;
3845 		}
3846 
3847 		dtor_func = btf_type_by_id(kptr_btf, dtor_btf_id);
3848 		if (!dtor_func) {
3849 			ret = -ENOENT;
3850 			goto end_btf;
3851 		}
3852 
3853 		if (btf_is_module(kptr_btf)) {
3854 			mod = btf_try_get_module(kptr_btf);
3855 			if (!mod) {
3856 				ret = -ENXIO;
3857 				goto end_btf;
3858 			}
3859 		}
3860 
3861 		/* We already verified dtor_func to be btf_type_is_func
3862 		 * in register_btf_id_dtor_kfuncs.
3863 		 */
3864 		dtor_func_name = __btf_name_by_offset(kptr_btf, dtor_func->name_off);
3865 		addr = kallsyms_lookup_name(dtor_func_name);
3866 		if (!addr) {
3867 			ret = -EINVAL;
3868 			goto end_mod;
3869 		}
3870 		field->kptr.dtor = (void *)addr;
3871 	}
3872 
3873 found_dtor:
3874 	field->kptr.btf_id = id;
3875 	field->kptr.btf = kptr_btf;
3876 	field->kptr.module = mod;
3877 	return 0;
3878 end_mod:
3879 	module_put(mod);
3880 end_btf:
3881 	btf_put(kptr_btf);
3882 	return ret;
3883 }
3884 
3885 static int btf_parse_graph_root(const struct btf *btf,
3886 				struct btf_field *field,
3887 				struct btf_field_info *info,
3888 				const char *node_type_name,
3889 				size_t node_type_align)
3890 {
3891 	const struct btf_type *t, *n = NULL;
3892 	const struct btf_member *member;
3893 	u32 offset;
3894 	int i;
3895 
3896 	t = btf_type_by_id(btf, info->graph_root.value_btf_id);
3897 	/* We've already checked that value_btf_id is a struct type. We
3898 	 * just need to figure out the offset of the list_node, and
3899 	 * verify its type.
3900 	 */
3901 	for_each_member(i, t, member) {
3902 		if (strcmp(info->graph_root.node_name,
3903 			   __btf_name_by_offset(btf, member->name_off)))
3904 			continue;
3905 		/* Invalid BTF, two members with same name */
3906 		if (n)
3907 			return -EINVAL;
3908 		n = btf_type_by_id(btf, member->type);
3909 		if (!__btf_type_is_struct(n))
3910 			return -EINVAL;
3911 		if (strcmp(node_type_name, __btf_name_by_offset(btf, n->name_off)))
3912 			return -EINVAL;
3913 		offset = __btf_member_bit_offset(n, member);
3914 		if (offset % 8)
3915 			return -EINVAL;
3916 		offset /= 8;
3917 		if (offset % node_type_align)
3918 			return -EINVAL;
3919 
3920 		field->graph_root.btf = (struct btf *)btf;
3921 		field->graph_root.value_btf_id = info->graph_root.value_btf_id;
3922 		field->graph_root.node_offset = offset;
3923 	}
3924 	if (!n)
3925 		return -ENOENT;
3926 	return 0;
3927 }
3928 
3929 static int btf_parse_list_head(const struct btf *btf, struct btf_field *field,
3930 			       struct btf_field_info *info)
3931 {
3932 	return btf_parse_graph_root(btf, field, info, "bpf_list_node",
3933 					    __alignof__(struct bpf_list_node));
3934 }
3935 
3936 static int btf_parse_rb_root(const struct btf *btf, struct btf_field *field,
3937 			     struct btf_field_info *info)
3938 {
3939 	return btf_parse_graph_root(btf, field, info, "bpf_rb_node",
3940 					    __alignof__(struct bpf_rb_node));
3941 }
3942 
3943 static int btf_field_cmp(const void *_a, const void *_b, const void *priv)
3944 {
3945 	const struct btf_field *a = (const struct btf_field *)_a;
3946 	const struct btf_field *b = (const struct btf_field *)_b;
3947 
3948 	if (a->offset < b->offset)
3949 		return -1;
3950 	else if (a->offset > b->offset)
3951 		return 1;
3952 	return 0;
3953 }
3954 
3955 struct btf_record *btf_parse_fields(const struct btf *btf, const struct btf_type *t,
3956 				    u32 field_mask, u32 value_size)
3957 {
3958 	struct btf_field_info info_arr[BTF_FIELDS_MAX];
3959 	u32 next_off = 0, field_type_size;
3960 	struct btf_record *rec;
3961 	int ret, i, cnt;
3962 
3963 	ret = btf_find_field(btf, t, field_mask, info_arr, ARRAY_SIZE(info_arr));
3964 	if (ret < 0)
3965 		return ERR_PTR(ret);
3966 	if (!ret)
3967 		return NULL;
3968 
3969 	cnt = ret;
3970 	/* This needs to be kzalloc to zero out padding and unused fields, see
3971 	 * comment in btf_record_equal.
3972 	 */
3973 	rec = kzalloc(struct_size(rec, fields, cnt), GFP_KERNEL_ACCOUNT | __GFP_NOWARN);
3974 	if (!rec)
3975 		return ERR_PTR(-ENOMEM);
3976 
3977 	rec->spin_lock_off = -EINVAL;
3978 	rec->res_spin_lock_off = -EINVAL;
3979 	rec->timer_off = -EINVAL;
3980 	rec->wq_off = -EINVAL;
3981 	rec->refcount_off = -EINVAL;
3982 	for (i = 0; i < cnt; i++) {
3983 		field_type_size = btf_field_type_size(info_arr[i].type);
3984 		if (info_arr[i].off + field_type_size > value_size) {
3985 			WARN_ONCE(1, "verifier bug off %d size %d", info_arr[i].off, value_size);
3986 			ret = -EFAULT;
3987 			goto end;
3988 		}
3989 		if (info_arr[i].off < next_off) {
3990 			ret = -EEXIST;
3991 			goto end;
3992 		}
3993 		next_off = info_arr[i].off + field_type_size;
3994 
3995 		rec->field_mask |= info_arr[i].type;
3996 		rec->fields[i].offset = info_arr[i].off;
3997 		rec->fields[i].type = info_arr[i].type;
3998 		rec->fields[i].size = field_type_size;
3999 
4000 		switch (info_arr[i].type) {
4001 		case BPF_SPIN_LOCK:
4002 			WARN_ON_ONCE(rec->spin_lock_off >= 0);
4003 			/* Cache offset for faster lookup at runtime */
4004 			rec->spin_lock_off = rec->fields[i].offset;
4005 			break;
4006 		case BPF_RES_SPIN_LOCK:
4007 			WARN_ON_ONCE(rec->spin_lock_off >= 0);
4008 			/* Cache offset for faster lookup at runtime */
4009 			rec->res_spin_lock_off = rec->fields[i].offset;
4010 			break;
4011 		case BPF_TIMER:
4012 			WARN_ON_ONCE(rec->timer_off >= 0);
4013 			/* Cache offset for faster lookup at runtime */
4014 			rec->timer_off = rec->fields[i].offset;
4015 			break;
4016 		case BPF_WORKQUEUE:
4017 			WARN_ON_ONCE(rec->wq_off >= 0);
4018 			/* Cache offset for faster lookup at runtime */
4019 			rec->wq_off = rec->fields[i].offset;
4020 			break;
4021 		case BPF_REFCOUNT:
4022 			WARN_ON_ONCE(rec->refcount_off >= 0);
4023 			/* Cache offset for faster lookup at runtime */
4024 			rec->refcount_off = rec->fields[i].offset;
4025 			break;
4026 		case BPF_KPTR_UNREF:
4027 		case BPF_KPTR_REF:
4028 		case BPF_KPTR_PERCPU:
4029 		case BPF_UPTR:
4030 			ret = btf_parse_kptr(btf, &rec->fields[i], &info_arr[i]);
4031 			if (ret < 0)
4032 				goto end;
4033 			break;
4034 		case BPF_LIST_HEAD:
4035 			ret = btf_parse_list_head(btf, &rec->fields[i], &info_arr[i]);
4036 			if (ret < 0)
4037 				goto end;
4038 			break;
4039 		case BPF_RB_ROOT:
4040 			ret = btf_parse_rb_root(btf, &rec->fields[i], &info_arr[i]);
4041 			if (ret < 0)
4042 				goto end;
4043 			break;
4044 		case BPF_LIST_NODE:
4045 		case BPF_RB_NODE:
4046 			break;
4047 		default:
4048 			ret = -EFAULT;
4049 			goto end;
4050 		}
4051 		rec->cnt++;
4052 	}
4053 
4054 	if (rec->spin_lock_off >= 0 && rec->res_spin_lock_off >= 0) {
4055 		ret = -EINVAL;
4056 		goto end;
4057 	}
4058 
4059 	/* bpf_{list_head, rb_node} require bpf_spin_lock */
4060 	if ((btf_record_has_field(rec, BPF_LIST_HEAD) ||
4061 	     btf_record_has_field(rec, BPF_RB_ROOT)) &&
4062 		 (rec->spin_lock_off < 0 && rec->res_spin_lock_off < 0)) {
4063 		ret = -EINVAL;
4064 		goto end;
4065 	}
4066 
4067 	if (rec->refcount_off < 0 &&
4068 	    btf_record_has_field(rec, BPF_LIST_NODE) &&
4069 	    btf_record_has_field(rec, BPF_RB_NODE)) {
4070 		ret = -EINVAL;
4071 		goto end;
4072 	}
4073 
4074 	sort_r(rec->fields, rec->cnt, sizeof(struct btf_field), btf_field_cmp,
4075 	       NULL, rec);
4076 
4077 	return rec;
4078 end:
4079 	btf_record_free(rec);
4080 	return ERR_PTR(ret);
4081 }
4082 
4083 int btf_check_and_fixup_fields(const struct btf *btf, struct btf_record *rec)
4084 {
4085 	int i;
4086 
4087 	/* There are three types that signify ownership of some other type:
4088 	 *  kptr_ref, bpf_list_head, bpf_rb_root.
4089 	 * kptr_ref only supports storing kernel types, which can't store
4090 	 * references to program allocated local types.
4091 	 *
4092 	 * Hence we only need to ensure that bpf_{list_head,rb_root} ownership
4093 	 * does not form cycles.
4094 	 */
4095 	if (IS_ERR_OR_NULL(rec) || !(rec->field_mask & (BPF_GRAPH_ROOT | BPF_UPTR)))
4096 		return 0;
4097 	for (i = 0; i < rec->cnt; i++) {
4098 		struct btf_struct_meta *meta;
4099 		const struct btf_type *t;
4100 		u32 btf_id;
4101 
4102 		if (rec->fields[i].type == BPF_UPTR) {
4103 			/* The uptr only supports pinning one page and cannot
4104 			 * point to a kernel struct
4105 			 */
4106 			if (btf_is_kernel(rec->fields[i].kptr.btf))
4107 				return -EINVAL;
4108 			t = btf_type_by_id(rec->fields[i].kptr.btf,
4109 					   rec->fields[i].kptr.btf_id);
4110 			if (!t->size)
4111 				return -EINVAL;
4112 			if (t->size > PAGE_SIZE)
4113 				return -E2BIG;
4114 			continue;
4115 		}
4116 
4117 		if (!(rec->fields[i].type & BPF_GRAPH_ROOT))
4118 			continue;
4119 		btf_id = rec->fields[i].graph_root.value_btf_id;
4120 		meta = btf_find_struct_meta(btf, btf_id);
4121 		if (!meta)
4122 			return -EFAULT;
4123 		rec->fields[i].graph_root.value_rec = meta->record;
4124 
4125 		/* We need to set value_rec for all root types, but no need
4126 		 * to check ownership cycle for a type unless it's also a
4127 		 * node type.
4128 		 */
4129 		if (!(rec->field_mask & BPF_GRAPH_NODE))
4130 			continue;
4131 
4132 		/* We need to ensure ownership acyclicity among all types. The
4133 		 * proper way to do it would be to topologically sort all BTF
4134 		 * IDs based on the ownership edges, since there can be multiple
4135 		 * bpf_{list_head,rb_node} in a type. Instead, we use the
4136 		 * following resaoning:
4137 		 *
4138 		 * - A type can only be owned by another type in user BTF if it
4139 		 *   has a bpf_{list,rb}_node. Let's call these node types.
4140 		 * - A type can only _own_ another type in user BTF if it has a
4141 		 *   bpf_{list_head,rb_root}. Let's call these root types.
4142 		 *
4143 		 * We ensure that if a type is both a root and node, its
4144 		 * element types cannot be root types.
4145 		 *
4146 		 * To ensure acyclicity:
4147 		 *
4148 		 * When A is an root type but not a node, its ownership
4149 		 * chain can be:
4150 		 *	A -> B -> C
4151 		 * Where:
4152 		 * - A is an root, e.g. has bpf_rb_root.
4153 		 * - B is both a root and node, e.g. has bpf_rb_node and
4154 		 *   bpf_list_head.
4155 		 * - C is only an root, e.g. has bpf_list_node
4156 		 *
4157 		 * When A is both a root and node, some other type already
4158 		 * owns it in the BTF domain, hence it can not own
4159 		 * another root type through any of the ownership edges.
4160 		 *	A -> B
4161 		 * Where:
4162 		 * - A is both an root and node.
4163 		 * - B is only an node.
4164 		 */
4165 		if (meta->record->field_mask & BPF_GRAPH_ROOT)
4166 			return -ELOOP;
4167 	}
4168 	return 0;
4169 }
4170 
4171 static void __btf_struct_show(const struct btf *btf, const struct btf_type *t,
4172 			      u32 type_id, void *data, u8 bits_offset,
4173 			      struct btf_show *show)
4174 {
4175 	const struct btf_member *member;
4176 	void *safe_data;
4177 	u32 i;
4178 
4179 	safe_data = btf_show_start_struct_type(show, t, type_id, data);
4180 	if (!safe_data)
4181 		return;
4182 
4183 	for_each_member(i, t, member) {
4184 		const struct btf_type *member_type = btf_type_by_id(btf,
4185 								member->type);
4186 		const struct btf_kind_operations *ops;
4187 		u32 member_offset, bitfield_size;
4188 		u32 bytes_offset;
4189 		u8 bits8_offset;
4190 
4191 		btf_show_start_member(show, member);
4192 
4193 		member_offset = __btf_member_bit_offset(t, member);
4194 		bitfield_size = __btf_member_bitfield_size(t, member);
4195 		bytes_offset = BITS_ROUNDDOWN_BYTES(member_offset);
4196 		bits8_offset = BITS_PER_BYTE_MASKED(member_offset);
4197 		if (bitfield_size) {
4198 			safe_data = btf_show_start_type(show, member_type,
4199 							member->type,
4200 							data + bytes_offset);
4201 			if (safe_data)
4202 				btf_bitfield_show(safe_data,
4203 						  bits8_offset,
4204 						  bitfield_size, show);
4205 			btf_show_end_type(show);
4206 		} else {
4207 			ops = btf_type_ops(member_type);
4208 			ops->show(btf, member_type, member->type,
4209 				  data + bytes_offset, bits8_offset, show);
4210 		}
4211 
4212 		btf_show_end_member(show);
4213 	}
4214 
4215 	btf_show_end_struct_type(show);
4216 }
4217 
4218 static void btf_struct_show(const struct btf *btf, const struct btf_type *t,
4219 			    u32 type_id, void *data, u8 bits_offset,
4220 			    struct btf_show *show)
4221 {
4222 	const struct btf_member *m = show->state.member;
4223 
4224 	/*
4225 	 * First check if any members would be shown (are non-zero).
4226 	 * See comments above "struct btf_show" definition for more
4227 	 * details on how this works at a high-level.
4228 	 */
4229 	if (show->state.depth > 0 && !(show->flags & BTF_SHOW_ZERO)) {
4230 		if (!show->state.depth_check) {
4231 			show->state.depth_check = show->state.depth + 1;
4232 			show->state.depth_to_show = 0;
4233 		}
4234 		__btf_struct_show(btf, t, type_id, data, bits_offset, show);
4235 		/* Restore saved member data here */
4236 		show->state.member = m;
4237 		if (show->state.depth_check != show->state.depth + 1)
4238 			return;
4239 		show->state.depth_check = 0;
4240 
4241 		if (show->state.depth_to_show <= show->state.depth)
4242 			return;
4243 		/*
4244 		 * Reaching here indicates we have recursed and found
4245 		 * non-zero child values.
4246 		 */
4247 	}
4248 
4249 	__btf_struct_show(btf, t, type_id, data, bits_offset, show);
4250 }
4251 
4252 static const struct btf_kind_operations struct_ops = {
4253 	.check_meta = btf_struct_check_meta,
4254 	.resolve = btf_struct_resolve,
4255 	.check_member = btf_struct_check_member,
4256 	.check_kflag_member = btf_generic_check_kflag_member,
4257 	.log_details = btf_struct_log,
4258 	.show = btf_struct_show,
4259 };
4260 
4261 static int btf_enum_check_member(struct btf_verifier_env *env,
4262 				 const struct btf_type *struct_type,
4263 				 const struct btf_member *member,
4264 				 const struct btf_type *member_type)
4265 {
4266 	u32 struct_bits_off = member->offset;
4267 	u32 struct_size, bytes_offset;
4268 
4269 	if (BITS_PER_BYTE_MASKED(struct_bits_off)) {
4270 		btf_verifier_log_member(env, struct_type, member,
4271 					"Member is not byte aligned");
4272 		return -EINVAL;
4273 	}
4274 
4275 	struct_size = struct_type->size;
4276 	bytes_offset = BITS_ROUNDDOWN_BYTES(struct_bits_off);
4277 	if (struct_size - bytes_offset < member_type->size) {
4278 		btf_verifier_log_member(env, struct_type, member,
4279 					"Member exceeds struct_size");
4280 		return -EINVAL;
4281 	}
4282 
4283 	return 0;
4284 }
4285 
4286 static int btf_enum_check_kflag_member(struct btf_verifier_env *env,
4287 				       const struct btf_type *struct_type,
4288 				       const struct btf_member *member,
4289 				       const struct btf_type *member_type)
4290 {
4291 	u32 struct_bits_off, nr_bits, bytes_end, struct_size;
4292 	u32 int_bitsize = sizeof(int) * BITS_PER_BYTE;
4293 
4294 	struct_bits_off = BTF_MEMBER_BIT_OFFSET(member->offset);
4295 	nr_bits = BTF_MEMBER_BITFIELD_SIZE(member->offset);
4296 	if (!nr_bits) {
4297 		if (BITS_PER_BYTE_MASKED(struct_bits_off)) {
4298 			btf_verifier_log_member(env, struct_type, member,
4299 						"Member is not byte aligned");
4300 			return -EINVAL;
4301 		}
4302 
4303 		nr_bits = int_bitsize;
4304 	} else if (nr_bits > int_bitsize) {
4305 		btf_verifier_log_member(env, struct_type, member,
4306 					"Invalid member bitfield_size");
4307 		return -EINVAL;
4308 	}
4309 
4310 	struct_size = struct_type->size;
4311 	bytes_end = BITS_ROUNDUP_BYTES(struct_bits_off + nr_bits);
4312 	if (struct_size < bytes_end) {
4313 		btf_verifier_log_member(env, struct_type, member,
4314 					"Member exceeds struct_size");
4315 		return -EINVAL;
4316 	}
4317 
4318 	return 0;
4319 }
4320 
4321 static s32 btf_enum_check_meta(struct btf_verifier_env *env,
4322 			       const struct btf_type *t,
4323 			       u32 meta_left)
4324 {
4325 	const struct btf_enum *enums = btf_type_enum(t);
4326 	struct btf *btf = env->btf;
4327 	const char *fmt_str;
4328 	u16 i, nr_enums;
4329 	u32 meta_needed;
4330 
4331 	nr_enums = btf_type_vlen(t);
4332 	meta_needed = nr_enums * sizeof(*enums);
4333 
4334 	if (meta_left < meta_needed) {
4335 		btf_verifier_log_basic(env, t,
4336 				       "meta_left:%u meta_needed:%u",
4337 				       meta_left, meta_needed);
4338 		return -EINVAL;
4339 	}
4340 
4341 	if (t->size > 8 || !is_power_of_2(t->size)) {
4342 		btf_verifier_log_type(env, t, "Unexpected size");
4343 		return -EINVAL;
4344 	}
4345 
4346 	/* enum type either no name or a valid one */
4347 	if (t->name_off &&
4348 	    !btf_name_valid_identifier(env->btf, t->name_off)) {
4349 		btf_verifier_log_type(env, t, "Invalid name");
4350 		return -EINVAL;
4351 	}
4352 
4353 	btf_verifier_log_type(env, t, NULL);
4354 
4355 	for (i = 0; i < nr_enums; i++) {
4356 		if (!btf_name_offset_valid(btf, enums[i].name_off)) {
4357 			btf_verifier_log(env, "\tInvalid name_offset:%u",
4358 					 enums[i].name_off);
4359 			return -EINVAL;
4360 		}
4361 
4362 		/* enum member must have a valid name */
4363 		if (!enums[i].name_off ||
4364 		    !btf_name_valid_identifier(btf, enums[i].name_off)) {
4365 			btf_verifier_log_type(env, t, "Invalid name");
4366 			return -EINVAL;
4367 		}
4368 
4369 		if (env->log.level == BPF_LOG_KERNEL)
4370 			continue;
4371 		fmt_str = btf_type_kflag(t) ? "\t%s val=%d\n" : "\t%s val=%u\n";
4372 		btf_verifier_log(env, fmt_str,
4373 				 __btf_name_by_offset(btf, enums[i].name_off),
4374 				 enums[i].val);
4375 	}
4376 
4377 	return meta_needed;
4378 }
4379 
4380 static void btf_enum_log(struct btf_verifier_env *env,
4381 			 const struct btf_type *t)
4382 {
4383 	btf_verifier_log(env, "size=%u vlen=%u", t->size, btf_type_vlen(t));
4384 }
4385 
4386 static void btf_enum_show(const struct btf *btf, const struct btf_type *t,
4387 			  u32 type_id, void *data, u8 bits_offset,
4388 			  struct btf_show *show)
4389 {
4390 	const struct btf_enum *enums = btf_type_enum(t);
4391 	u32 i, nr_enums = btf_type_vlen(t);
4392 	void *safe_data;
4393 	int v;
4394 
4395 	safe_data = btf_show_start_type(show, t, type_id, data);
4396 	if (!safe_data)
4397 		return;
4398 
4399 	v = *(int *)safe_data;
4400 
4401 	for (i = 0; i < nr_enums; i++) {
4402 		if (v != enums[i].val)
4403 			continue;
4404 
4405 		btf_show_type_value(show, "%s",
4406 				    __btf_name_by_offset(btf,
4407 							 enums[i].name_off));
4408 
4409 		btf_show_end_type(show);
4410 		return;
4411 	}
4412 
4413 	if (btf_type_kflag(t))
4414 		btf_show_type_value(show, "%d", v);
4415 	else
4416 		btf_show_type_value(show, "%u", v);
4417 	btf_show_end_type(show);
4418 }
4419 
4420 static const struct btf_kind_operations enum_ops = {
4421 	.check_meta = btf_enum_check_meta,
4422 	.resolve = btf_df_resolve,
4423 	.check_member = btf_enum_check_member,
4424 	.check_kflag_member = btf_enum_check_kflag_member,
4425 	.log_details = btf_enum_log,
4426 	.show = btf_enum_show,
4427 };
4428 
4429 static s32 btf_enum64_check_meta(struct btf_verifier_env *env,
4430 				 const struct btf_type *t,
4431 				 u32 meta_left)
4432 {
4433 	const struct btf_enum64 *enums = btf_type_enum64(t);
4434 	struct btf *btf = env->btf;
4435 	const char *fmt_str;
4436 	u16 i, nr_enums;
4437 	u32 meta_needed;
4438 
4439 	nr_enums = btf_type_vlen(t);
4440 	meta_needed = nr_enums * sizeof(*enums);
4441 
4442 	if (meta_left < meta_needed) {
4443 		btf_verifier_log_basic(env, t,
4444 				       "meta_left:%u meta_needed:%u",
4445 				       meta_left, meta_needed);
4446 		return -EINVAL;
4447 	}
4448 
4449 	if (t->size > 8 || !is_power_of_2(t->size)) {
4450 		btf_verifier_log_type(env, t, "Unexpected size");
4451 		return -EINVAL;
4452 	}
4453 
4454 	/* enum type either no name or a valid one */
4455 	if (t->name_off &&
4456 	    !btf_name_valid_identifier(env->btf, t->name_off)) {
4457 		btf_verifier_log_type(env, t, "Invalid name");
4458 		return -EINVAL;
4459 	}
4460 
4461 	btf_verifier_log_type(env, t, NULL);
4462 
4463 	for (i = 0; i < nr_enums; i++) {
4464 		if (!btf_name_offset_valid(btf, enums[i].name_off)) {
4465 			btf_verifier_log(env, "\tInvalid name_offset:%u",
4466 					 enums[i].name_off);
4467 			return -EINVAL;
4468 		}
4469 
4470 		/* enum member must have a valid name */
4471 		if (!enums[i].name_off ||
4472 		    !btf_name_valid_identifier(btf, enums[i].name_off)) {
4473 			btf_verifier_log_type(env, t, "Invalid name");
4474 			return -EINVAL;
4475 		}
4476 
4477 		if (env->log.level == BPF_LOG_KERNEL)
4478 			continue;
4479 
4480 		fmt_str = btf_type_kflag(t) ? "\t%s val=%lld\n" : "\t%s val=%llu\n";
4481 		btf_verifier_log(env, fmt_str,
4482 				 __btf_name_by_offset(btf, enums[i].name_off),
4483 				 btf_enum64_value(enums + i));
4484 	}
4485 
4486 	return meta_needed;
4487 }
4488 
4489 static void btf_enum64_show(const struct btf *btf, const struct btf_type *t,
4490 			    u32 type_id, void *data, u8 bits_offset,
4491 			    struct btf_show *show)
4492 {
4493 	const struct btf_enum64 *enums = btf_type_enum64(t);
4494 	u32 i, nr_enums = btf_type_vlen(t);
4495 	void *safe_data;
4496 	s64 v;
4497 
4498 	safe_data = btf_show_start_type(show, t, type_id, data);
4499 	if (!safe_data)
4500 		return;
4501 
4502 	v = *(u64 *)safe_data;
4503 
4504 	for (i = 0; i < nr_enums; i++) {
4505 		if (v != btf_enum64_value(enums + i))
4506 			continue;
4507 
4508 		btf_show_type_value(show, "%s",
4509 				    __btf_name_by_offset(btf,
4510 							 enums[i].name_off));
4511 
4512 		btf_show_end_type(show);
4513 		return;
4514 	}
4515 
4516 	if (btf_type_kflag(t))
4517 		btf_show_type_value(show, "%lld", v);
4518 	else
4519 		btf_show_type_value(show, "%llu", v);
4520 	btf_show_end_type(show);
4521 }
4522 
4523 static const struct btf_kind_operations enum64_ops = {
4524 	.check_meta = btf_enum64_check_meta,
4525 	.resolve = btf_df_resolve,
4526 	.check_member = btf_enum_check_member,
4527 	.check_kflag_member = btf_enum_check_kflag_member,
4528 	.log_details = btf_enum_log,
4529 	.show = btf_enum64_show,
4530 };
4531 
4532 static s32 btf_func_proto_check_meta(struct btf_verifier_env *env,
4533 				     const struct btf_type *t,
4534 				     u32 meta_left)
4535 {
4536 	u32 meta_needed = btf_type_vlen(t) * sizeof(struct btf_param);
4537 
4538 	if (meta_left < meta_needed) {
4539 		btf_verifier_log_basic(env, t,
4540 				       "meta_left:%u meta_needed:%u",
4541 				       meta_left, meta_needed);
4542 		return -EINVAL;
4543 	}
4544 
4545 	if (t->name_off) {
4546 		btf_verifier_log_type(env, t, "Invalid name");
4547 		return -EINVAL;
4548 	}
4549 
4550 	if (btf_type_kflag(t)) {
4551 		btf_verifier_log_type(env, t, "Invalid btf_info kind_flag");
4552 		return -EINVAL;
4553 	}
4554 
4555 	btf_verifier_log_type(env, t, NULL);
4556 
4557 	return meta_needed;
4558 }
4559 
4560 static void btf_func_proto_log(struct btf_verifier_env *env,
4561 			       const struct btf_type *t)
4562 {
4563 	const struct btf_param *args = (const struct btf_param *)(t + 1);
4564 	u16 nr_args = btf_type_vlen(t), i;
4565 
4566 	btf_verifier_log(env, "return=%u args=(", t->type);
4567 	if (!nr_args) {
4568 		btf_verifier_log(env, "void");
4569 		goto done;
4570 	}
4571 
4572 	if (nr_args == 1 && !args[0].type) {
4573 		/* Only one vararg */
4574 		btf_verifier_log(env, "vararg");
4575 		goto done;
4576 	}
4577 
4578 	btf_verifier_log(env, "%u %s", args[0].type,
4579 			 __btf_name_by_offset(env->btf,
4580 					      args[0].name_off));
4581 	for (i = 1; i < nr_args - 1; i++)
4582 		btf_verifier_log(env, ", %u %s", args[i].type,
4583 				 __btf_name_by_offset(env->btf,
4584 						      args[i].name_off));
4585 
4586 	if (nr_args > 1) {
4587 		const struct btf_param *last_arg = &args[nr_args - 1];
4588 
4589 		if (last_arg->type)
4590 			btf_verifier_log(env, ", %u %s", last_arg->type,
4591 					 __btf_name_by_offset(env->btf,
4592 							      last_arg->name_off));
4593 		else
4594 			btf_verifier_log(env, ", vararg");
4595 	}
4596 
4597 done:
4598 	btf_verifier_log(env, ")");
4599 }
4600 
4601 static const struct btf_kind_operations func_proto_ops = {
4602 	.check_meta = btf_func_proto_check_meta,
4603 	.resolve = btf_df_resolve,
4604 	/*
4605 	 * BTF_KIND_FUNC_PROTO cannot be directly referred by
4606 	 * a struct's member.
4607 	 *
4608 	 * It should be a function pointer instead.
4609 	 * (i.e. struct's member -> BTF_KIND_PTR -> BTF_KIND_FUNC_PROTO)
4610 	 *
4611 	 * Hence, there is no btf_func_check_member().
4612 	 */
4613 	.check_member = btf_df_check_member,
4614 	.check_kflag_member = btf_df_check_kflag_member,
4615 	.log_details = btf_func_proto_log,
4616 	.show = btf_df_show,
4617 };
4618 
4619 static s32 btf_func_check_meta(struct btf_verifier_env *env,
4620 			       const struct btf_type *t,
4621 			       u32 meta_left)
4622 {
4623 	if (!t->name_off ||
4624 	    !btf_name_valid_identifier(env->btf, t->name_off)) {
4625 		btf_verifier_log_type(env, t, "Invalid name");
4626 		return -EINVAL;
4627 	}
4628 
4629 	if (btf_type_vlen(t) > BTF_FUNC_GLOBAL) {
4630 		btf_verifier_log_type(env, t, "Invalid func linkage");
4631 		return -EINVAL;
4632 	}
4633 
4634 	if (btf_type_kflag(t)) {
4635 		btf_verifier_log_type(env, t, "Invalid btf_info kind_flag");
4636 		return -EINVAL;
4637 	}
4638 
4639 	btf_verifier_log_type(env, t, NULL);
4640 
4641 	return 0;
4642 }
4643 
4644 static int btf_func_resolve(struct btf_verifier_env *env,
4645 			    const struct resolve_vertex *v)
4646 {
4647 	const struct btf_type *t = v->t;
4648 	u32 next_type_id = t->type;
4649 	int err;
4650 
4651 	err = btf_func_check(env, t);
4652 	if (err)
4653 		return err;
4654 
4655 	env_stack_pop_resolved(env, next_type_id, 0);
4656 	return 0;
4657 }
4658 
4659 static const struct btf_kind_operations func_ops = {
4660 	.check_meta = btf_func_check_meta,
4661 	.resolve = btf_func_resolve,
4662 	.check_member = btf_df_check_member,
4663 	.check_kflag_member = btf_df_check_kflag_member,
4664 	.log_details = btf_ref_type_log,
4665 	.show = btf_df_show,
4666 };
4667 
4668 static s32 btf_var_check_meta(struct btf_verifier_env *env,
4669 			      const struct btf_type *t,
4670 			      u32 meta_left)
4671 {
4672 	const struct btf_var *var;
4673 	u32 meta_needed = sizeof(*var);
4674 
4675 	if (meta_left < meta_needed) {
4676 		btf_verifier_log_basic(env, t,
4677 				       "meta_left:%u meta_needed:%u",
4678 				       meta_left, meta_needed);
4679 		return -EINVAL;
4680 	}
4681 
4682 	if (btf_type_vlen(t)) {
4683 		btf_verifier_log_type(env, t, "vlen != 0");
4684 		return -EINVAL;
4685 	}
4686 
4687 	if (btf_type_kflag(t)) {
4688 		btf_verifier_log_type(env, t, "Invalid btf_info kind_flag");
4689 		return -EINVAL;
4690 	}
4691 
4692 	if (!t->name_off ||
4693 	    !btf_name_valid_identifier(env->btf, t->name_off)) {
4694 		btf_verifier_log_type(env, t, "Invalid name");
4695 		return -EINVAL;
4696 	}
4697 
4698 	/* A var cannot be in type void */
4699 	if (!t->type || !BTF_TYPE_ID_VALID(t->type)) {
4700 		btf_verifier_log_type(env, t, "Invalid type_id");
4701 		return -EINVAL;
4702 	}
4703 
4704 	var = btf_type_var(t);
4705 	if (var->linkage != BTF_VAR_STATIC &&
4706 	    var->linkage != BTF_VAR_GLOBAL_ALLOCATED) {
4707 		btf_verifier_log_type(env, t, "Linkage not supported");
4708 		return -EINVAL;
4709 	}
4710 
4711 	btf_verifier_log_type(env, t, NULL);
4712 
4713 	return meta_needed;
4714 }
4715 
4716 static void btf_var_log(struct btf_verifier_env *env, const struct btf_type *t)
4717 {
4718 	const struct btf_var *var = btf_type_var(t);
4719 
4720 	btf_verifier_log(env, "type_id=%u linkage=%u", t->type, var->linkage);
4721 }
4722 
4723 static const struct btf_kind_operations var_ops = {
4724 	.check_meta		= btf_var_check_meta,
4725 	.resolve		= btf_var_resolve,
4726 	.check_member		= btf_df_check_member,
4727 	.check_kflag_member	= btf_df_check_kflag_member,
4728 	.log_details		= btf_var_log,
4729 	.show			= btf_var_show,
4730 };
4731 
4732 static s32 btf_datasec_check_meta(struct btf_verifier_env *env,
4733 				  const struct btf_type *t,
4734 				  u32 meta_left)
4735 {
4736 	const struct btf_var_secinfo *vsi;
4737 	u64 last_vsi_end_off = 0, sum = 0;
4738 	u32 i, meta_needed;
4739 
4740 	meta_needed = btf_type_vlen(t) * sizeof(*vsi);
4741 	if (meta_left < meta_needed) {
4742 		btf_verifier_log_basic(env, t,
4743 				       "meta_left:%u meta_needed:%u",
4744 				       meta_left, meta_needed);
4745 		return -EINVAL;
4746 	}
4747 
4748 	if (!t->size) {
4749 		btf_verifier_log_type(env, t, "size == 0");
4750 		return -EINVAL;
4751 	}
4752 
4753 	if (btf_type_kflag(t)) {
4754 		btf_verifier_log_type(env, t, "Invalid btf_info kind_flag");
4755 		return -EINVAL;
4756 	}
4757 
4758 	if (!t->name_off ||
4759 	    !btf_name_valid_section(env->btf, t->name_off)) {
4760 		btf_verifier_log_type(env, t, "Invalid name");
4761 		return -EINVAL;
4762 	}
4763 
4764 	btf_verifier_log_type(env, t, NULL);
4765 
4766 	for_each_vsi(i, t, vsi) {
4767 		/* A var cannot be in type void */
4768 		if (!vsi->type || !BTF_TYPE_ID_VALID(vsi->type)) {
4769 			btf_verifier_log_vsi(env, t, vsi,
4770 					     "Invalid type_id");
4771 			return -EINVAL;
4772 		}
4773 
4774 		if (vsi->offset < last_vsi_end_off || vsi->offset >= t->size) {
4775 			btf_verifier_log_vsi(env, t, vsi,
4776 					     "Invalid offset");
4777 			return -EINVAL;
4778 		}
4779 
4780 		if (!vsi->size || vsi->size > t->size) {
4781 			btf_verifier_log_vsi(env, t, vsi,
4782 					     "Invalid size");
4783 			return -EINVAL;
4784 		}
4785 
4786 		last_vsi_end_off = vsi->offset + vsi->size;
4787 		if (last_vsi_end_off > t->size) {
4788 			btf_verifier_log_vsi(env, t, vsi,
4789 					     "Invalid offset+size");
4790 			return -EINVAL;
4791 		}
4792 
4793 		btf_verifier_log_vsi(env, t, vsi, NULL);
4794 		sum += vsi->size;
4795 	}
4796 
4797 	if (t->size < sum) {
4798 		btf_verifier_log_type(env, t, "Invalid btf_info size");
4799 		return -EINVAL;
4800 	}
4801 
4802 	return meta_needed;
4803 }
4804 
4805 static int btf_datasec_resolve(struct btf_verifier_env *env,
4806 			       const struct resolve_vertex *v)
4807 {
4808 	const struct btf_var_secinfo *vsi;
4809 	struct btf *btf = env->btf;
4810 	u16 i;
4811 
4812 	env->resolve_mode = RESOLVE_TBD;
4813 	for_each_vsi_from(i, v->next_member, v->t, vsi) {
4814 		u32 var_type_id = vsi->type, type_id, type_size = 0;
4815 		const struct btf_type *var_type = btf_type_by_id(env->btf,
4816 								 var_type_id);
4817 		if (!var_type || !btf_type_is_var(var_type)) {
4818 			btf_verifier_log_vsi(env, v->t, vsi,
4819 					     "Not a VAR kind member");
4820 			return -EINVAL;
4821 		}
4822 
4823 		if (!env_type_is_resolve_sink(env, var_type) &&
4824 		    !env_type_is_resolved(env, var_type_id)) {
4825 			env_stack_set_next_member(env, i + 1);
4826 			return env_stack_push(env, var_type, var_type_id);
4827 		}
4828 
4829 		type_id = var_type->type;
4830 		if (!btf_type_id_size(btf, &type_id, &type_size)) {
4831 			btf_verifier_log_vsi(env, v->t, vsi, "Invalid type");
4832 			return -EINVAL;
4833 		}
4834 
4835 		if (vsi->size < type_size) {
4836 			btf_verifier_log_vsi(env, v->t, vsi, "Invalid size");
4837 			return -EINVAL;
4838 		}
4839 	}
4840 
4841 	env_stack_pop_resolved(env, 0, 0);
4842 	return 0;
4843 }
4844 
4845 static void btf_datasec_log(struct btf_verifier_env *env,
4846 			    const struct btf_type *t)
4847 {
4848 	btf_verifier_log(env, "size=%u vlen=%u", t->size, btf_type_vlen(t));
4849 }
4850 
4851 static void btf_datasec_show(const struct btf *btf,
4852 			     const struct btf_type *t, u32 type_id,
4853 			     void *data, u8 bits_offset,
4854 			     struct btf_show *show)
4855 {
4856 	const struct btf_var_secinfo *vsi;
4857 	const struct btf_type *var;
4858 	u32 i;
4859 
4860 	if (!btf_show_start_type(show, t, type_id, data))
4861 		return;
4862 
4863 	btf_show_type_value(show, "section (\"%s\") = {",
4864 			    __btf_name_by_offset(btf, t->name_off));
4865 	for_each_vsi(i, t, vsi) {
4866 		var = btf_type_by_id(btf, vsi->type);
4867 		if (i)
4868 			btf_show(show, ",");
4869 		btf_type_ops(var)->show(btf, var, vsi->type,
4870 					data + vsi->offset, bits_offset, show);
4871 	}
4872 	btf_show_end_type(show);
4873 }
4874 
4875 static const struct btf_kind_operations datasec_ops = {
4876 	.check_meta		= btf_datasec_check_meta,
4877 	.resolve		= btf_datasec_resolve,
4878 	.check_member		= btf_df_check_member,
4879 	.check_kflag_member	= btf_df_check_kflag_member,
4880 	.log_details		= btf_datasec_log,
4881 	.show			= btf_datasec_show,
4882 };
4883 
4884 static s32 btf_float_check_meta(struct btf_verifier_env *env,
4885 				const struct btf_type *t,
4886 				u32 meta_left)
4887 {
4888 	if (btf_type_vlen(t)) {
4889 		btf_verifier_log_type(env, t, "vlen != 0");
4890 		return -EINVAL;
4891 	}
4892 
4893 	if (btf_type_kflag(t)) {
4894 		btf_verifier_log_type(env, t, "Invalid btf_info kind_flag");
4895 		return -EINVAL;
4896 	}
4897 
4898 	if (t->size != 2 && t->size != 4 && t->size != 8 && t->size != 12 &&
4899 	    t->size != 16) {
4900 		btf_verifier_log_type(env, t, "Invalid type_size");
4901 		return -EINVAL;
4902 	}
4903 
4904 	btf_verifier_log_type(env, t, NULL);
4905 
4906 	return 0;
4907 }
4908 
4909 static int btf_float_check_member(struct btf_verifier_env *env,
4910 				  const struct btf_type *struct_type,
4911 				  const struct btf_member *member,
4912 				  const struct btf_type *member_type)
4913 {
4914 	u64 start_offset_bytes;
4915 	u64 end_offset_bytes;
4916 	u64 misalign_bits;
4917 	u64 align_bytes;
4918 	u64 align_bits;
4919 
4920 	/* Different architectures have different alignment requirements, so
4921 	 * here we check only for the reasonable minimum. This way we ensure
4922 	 * that types after CO-RE can pass the kernel BTF verifier.
4923 	 */
4924 	align_bytes = min_t(u64, sizeof(void *), member_type->size);
4925 	align_bits = align_bytes * BITS_PER_BYTE;
4926 	div64_u64_rem(member->offset, align_bits, &misalign_bits);
4927 	if (misalign_bits) {
4928 		btf_verifier_log_member(env, struct_type, member,
4929 					"Member is not properly aligned");
4930 		return -EINVAL;
4931 	}
4932 
4933 	start_offset_bytes = member->offset / BITS_PER_BYTE;
4934 	end_offset_bytes = start_offset_bytes + member_type->size;
4935 	if (end_offset_bytes > struct_type->size) {
4936 		btf_verifier_log_member(env, struct_type, member,
4937 					"Member exceeds struct_size");
4938 		return -EINVAL;
4939 	}
4940 
4941 	return 0;
4942 }
4943 
4944 static void btf_float_log(struct btf_verifier_env *env,
4945 			  const struct btf_type *t)
4946 {
4947 	btf_verifier_log(env, "size=%u", t->size);
4948 }
4949 
4950 static const struct btf_kind_operations float_ops = {
4951 	.check_meta = btf_float_check_meta,
4952 	.resolve = btf_df_resolve,
4953 	.check_member = btf_float_check_member,
4954 	.check_kflag_member = btf_generic_check_kflag_member,
4955 	.log_details = btf_float_log,
4956 	.show = btf_df_show,
4957 };
4958 
4959 static s32 btf_decl_tag_check_meta(struct btf_verifier_env *env,
4960 			      const struct btf_type *t,
4961 			      u32 meta_left)
4962 {
4963 	const struct btf_decl_tag *tag;
4964 	u32 meta_needed = sizeof(*tag);
4965 	s32 component_idx;
4966 	const char *value;
4967 
4968 	if (meta_left < meta_needed) {
4969 		btf_verifier_log_basic(env, t,
4970 				       "meta_left:%u meta_needed:%u",
4971 				       meta_left, meta_needed);
4972 		return -EINVAL;
4973 	}
4974 
4975 	value = btf_name_by_offset(env->btf, t->name_off);
4976 	if (!value || !value[0]) {
4977 		btf_verifier_log_type(env, t, "Invalid value");
4978 		return -EINVAL;
4979 	}
4980 
4981 	if (btf_type_vlen(t)) {
4982 		btf_verifier_log_type(env, t, "vlen != 0");
4983 		return -EINVAL;
4984 	}
4985 
4986 	component_idx = btf_type_decl_tag(t)->component_idx;
4987 	if (component_idx < -1) {
4988 		btf_verifier_log_type(env, t, "Invalid component_idx");
4989 		return -EINVAL;
4990 	}
4991 
4992 	btf_verifier_log_type(env, t, NULL);
4993 
4994 	return meta_needed;
4995 }
4996 
4997 static int btf_decl_tag_resolve(struct btf_verifier_env *env,
4998 			   const struct resolve_vertex *v)
4999 {
5000 	const struct btf_type *next_type;
5001 	const struct btf_type *t = v->t;
5002 	u32 next_type_id = t->type;
5003 	struct btf *btf = env->btf;
5004 	s32 component_idx;
5005 	u32 vlen;
5006 
5007 	next_type = btf_type_by_id(btf, next_type_id);
5008 	if (!next_type || !btf_type_is_decl_tag_target(next_type)) {
5009 		btf_verifier_log_type(env, v->t, "Invalid type_id");
5010 		return -EINVAL;
5011 	}
5012 
5013 	if (!env_type_is_resolve_sink(env, next_type) &&
5014 	    !env_type_is_resolved(env, next_type_id))
5015 		return env_stack_push(env, next_type, next_type_id);
5016 
5017 	component_idx = btf_type_decl_tag(t)->component_idx;
5018 	if (component_idx != -1) {
5019 		if (btf_type_is_var(next_type) || btf_type_is_typedef(next_type)) {
5020 			btf_verifier_log_type(env, v->t, "Invalid component_idx");
5021 			return -EINVAL;
5022 		}
5023 
5024 		if (btf_type_is_struct(next_type)) {
5025 			vlen = btf_type_vlen(next_type);
5026 		} else {
5027 			/* next_type should be a function */
5028 			next_type = btf_type_by_id(btf, next_type->type);
5029 			vlen = btf_type_vlen(next_type);
5030 		}
5031 
5032 		if ((u32)component_idx >= vlen) {
5033 			btf_verifier_log_type(env, v->t, "Invalid component_idx");
5034 			return -EINVAL;
5035 		}
5036 	}
5037 
5038 	env_stack_pop_resolved(env, next_type_id, 0);
5039 
5040 	return 0;
5041 }
5042 
5043 static void btf_decl_tag_log(struct btf_verifier_env *env, const struct btf_type *t)
5044 {
5045 	btf_verifier_log(env, "type=%u component_idx=%d", t->type,
5046 			 btf_type_decl_tag(t)->component_idx);
5047 }
5048 
5049 static const struct btf_kind_operations decl_tag_ops = {
5050 	.check_meta = btf_decl_tag_check_meta,
5051 	.resolve = btf_decl_tag_resolve,
5052 	.check_member = btf_df_check_member,
5053 	.check_kflag_member = btf_df_check_kflag_member,
5054 	.log_details = btf_decl_tag_log,
5055 	.show = btf_df_show,
5056 };
5057 
5058 static int btf_func_proto_check(struct btf_verifier_env *env,
5059 				const struct btf_type *t)
5060 {
5061 	const struct btf_type *ret_type;
5062 	const struct btf_param *args;
5063 	const struct btf *btf;
5064 	u16 nr_args, i;
5065 	int err;
5066 
5067 	btf = env->btf;
5068 	args = (const struct btf_param *)(t + 1);
5069 	nr_args = btf_type_vlen(t);
5070 
5071 	/* Check func return type which could be "void" (t->type == 0) */
5072 	if (t->type) {
5073 		u32 ret_type_id = t->type;
5074 
5075 		ret_type = btf_type_by_id(btf, ret_type_id);
5076 		if (!ret_type) {
5077 			btf_verifier_log_type(env, t, "Invalid return type");
5078 			return -EINVAL;
5079 		}
5080 
5081 		if (btf_type_is_resolve_source_only(ret_type)) {
5082 			btf_verifier_log_type(env, t, "Invalid return type");
5083 			return -EINVAL;
5084 		}
5085 
5086 		if (btf_type_needs_resolve(ret_type) &&
5087 		    !env_type_is_resolved(env, ret_type_id)) {
5088 			err = btf_resolve(env, ret_type, ret_type_id);
5089 			if (err)
5090 				return err;
5091 		}
5092 
5093 		/* Ensure the return type is a type that has a size */
5094 		if (!btf_type_id_size(btf, &ret_type_id, NULL)) {
5095 			btf_verifier_log_type(env, t, "Invalid return type");
5096 			return -EINVAL;
5097 		}
5098 	}
5099 
5100 	if (!nr_args)
5101 		return 0;
5102 
5103 	/* Last func arg type_id could be 0 if it is a vararg */
5104 	if (!args[nr_args - 1].type) {
5105 		if (args[nr_args - 1].name_off) {
5106 			btf_verifier_log_type(env, t, "Invalid arg#%u",
5107 					      nr_args);
5108 			return -EINVAL;
5109 		}
5110 		nr_args--;
5111 	}
5112 
5113 	for (i = 0; i < nr_args; i++) {
5114 		const struct btf_type *arg_type;
5115 		u32 arg_type_id;
5116 
5117 		arg_type_id = args[i].type;
5118 		arg_type = btf_type_by_id(btf, arg_type_id);
5119 		if (!arg_type) {
5120 			btf_verifier_log_type(env, t, "Invalid arg#%u", i + 1);
5121 			return -EINVAL;
5122 		}
5123 
5124 		if (btf_type_is_resolve_source_only(arg_type)) {
5125 			btf_verifier_log_type(env, t, "Invalid arg#%u", i + 1);
5126 			return -EINVAL;
5127 		}
5128 
5129 		if (args[i].name_off &&
5130 		    (!btf_name_offset_valid(btf, args[i].name_off) ||
5131 		     !btf_name_valid_identifier(btf, args[i].name_off))) {
5132 			btf_verifier_log_type(env, t,
5133 					      "Invalid arg#%u", i + 1);
5134 			return -EINVAL;
5135 		}
5136 
5137 		if (btf_type_needs_resolve(arg_type) &&
5138 		    !env_type_is_resolved(env, arg_type_id)) {
5139 			err = btf_resolve(env, arg_type, arg_type_id);
5140 			if (err)
5141 				return err;
5142 		}
5143 
5144 		if (!btf_type_id_size(btf, &arg_type_id, NULL)) {
5145 			btf_verifier_log_type(env, t, "Invalid arg#%u", i + 1);
5146 			return -EINVAL;
5147 		}
5148 	}
5149 
5150 	return 0;
5151 }
5152 
5153 static int btf_func_check(struct btf_verifier_env *env,
5154 			  const struct btf_type *t)
5155 {
5156 	const struct btf_type *proto_type;
5157 	const struct btf_param *args;
5158 	const struct btf *btf;
5159 	u16 nr_args, i;
5160 
5161 	btf = env->btf;
5162 	proto_type = btf_type_by_id(btf, t->type);
5163 
5164 	if (!proto_type || !btf_type_is_func_proto(proto_type)) {
5165 		btf_verifier_log_type(env, t, "Invalid type_id");
5166 		return -EINVAL;
5167 	}
5168 
5169 	args = (const struct btf_param *)(proto_type + 1);
5170 	nr_args = btf_type_vlen(proto_type);
5171 	for (i = 0; i < nr_args; i++) {
5172 		if (!args[i].name_off && args[i].type) {
5173 			btf_verifier_log_type(env, t, "Invalid arg#%u", i + 1);
5174 			return -EINVAL;
5175 		}
5176 	}
5177 
5178 	return 0;
5179 }
5180 
5181 static const struct btf_kind_operations * const kind_ops[NR_BTF_KINDS] = {
5182 	[BTF_KIND_INT] = &int_ops,
5183 	[BTF_KIND_PTR] = &ptr_ops,
5184 	[BTF_KIND_ARRAY] = &array_ops,
5185 	[BTF_KIND_STRUCT] = &struct_ops,
5186 	[BTF_KIND_UNION] = &struct_ops,
5187 	[BTF_KIND_ENUM] = &enum_ops,
5188 	[BTF_KIND_FWD] = &fwd_ops,
5189 	[BTF_KIND_TYPEDEF] = &modifier_ops,
5190 	[BTF_KIND_VOLATILE] = &modifier_ops,
5191 	[BTF_KIND_CONST] = &modifier_ops,
5192 	[BTF_KIND_RESTRICT] = &modifier_ops,
5193 	[BTF_KIND_FUNC] = &func_ops,
5194 	[BTF_KIND_FUNC_PROTO] = &func_proto_ops,
5195 	[BTF_KIND_VAR] = &var_ops,
5196 	[BTF_KIND_DATASEC] = &datasec_ops,
5197 	[BTF_KIND_FLOAT] = &float_ops,
5198 	[BTF_KIND_DECL_TAG] = &decl_tag_ops,
5199 	[BTF_KIND_TYPE_TAG] = &modifier_ops,
5200 	[BTF_KIND_ENUM64] = &enum64_ops,
5201 };
5202 
5203 static s32 btf_check_meta(struct btf_verifier_env *env,
5204 			  const struct btf_type *t,
5205 			  u32 meta_left)
5206 {
5207 	u32 saved_meta_left = meta_left;
5208 	s32 var_meta_size;
5209 
5210 	if (meta_left < sizeof(*t)) {
5211 		btf_verifier_log(env, "[%u] meta_left:%u meta_needed:%zu",
5212 				 env->log_type_id, meta_left, sizeof(*t));
5213 		return -EINVAL;
5214 	}
5215 	meta_left -= sizeof(*t);
5216 
5217 	if (t->info & ~BTF_INFO_MASK) {
5218 		btf_verifier_log(env, "[%u] Invalid btf_info:%x",
5219 				 env->log_type_id, t->info);
5220 		return -EINVAL;
5221 	}
5222 
5223 	if (BTF_INFO_KIND(t->info) > BTF_KIND_MAX ||
5224 	    BTF_INFO_KIND(t->info) == BTF_KIND_UNKN) {
5225 		btf_verifier_log(env, "[%u] Invalid kind:%u",
5226 				 env->log_type_id, BTF_INFO_KIND(t->info));
5227 		return -EINVAL;
5228 	}
5229 
5230 	if (!btf_name_offset_valid(env->btf, t->name_off)) {
5231 		btf_verifier_log(env, "[%u] Invalid name_offset:%u",
5232 				 env->log_type_id, t->name_off);
5233 		return -EINVAL;
5234 	}
5235 
5236 	var_meta_size = btf_type_ops(t)->check_meta(env, t, meta_left);
5237 	if (var_meta_size < 0)
5238 		return var_meta_size;
5239 
5240 	meta_left -= var_meta_size;
5241 
5242 	return saved_meta_left - meta_left;
5243 }
5244 
5245 static int btf_check_all_metas(struct btf_verifier_env *env)
5246 {
5247 	struct btf *btf = env->btf;
5248 	struct btf_header *hdr;
5249 	void *cur, *end;
5250 
5251 	hdr = &btf->hdr;
5252 	cur = btf->nohdr_data + hdr->type_off;
5253 	end = cur + hdr->type_len;
5254 
5255 	env->log_type_id = btf->base_btf ? btf->start_id : 1;
5256 	while (cur < end) {
5257 		struct btf_type *t = cur;
5258 		s32 meta_size;
5259 
5260 		meta_size = btf_check_meta(env, t, end - cur);
5261 		if (meta_size < 0)
5262 			return meta_size;
5263 
5264 		btf_add_type(env, t);
5265 		cur += meta_size;
5266 		env->log_type_id++;
5267 	}
5268 
5269 	return 0;
5270 }
5271 
5272 static bool btf_resolve_valid(struct btf_verifier_env *env,
5273 			      const struct btf_type *t,
5274 			      u32 type_id)
5275 {
5276 	struct btf *btf = env->btf;
5277 
5278 	if (!env_type_is_resolved(env, type_id))
5279 		return false;
5280 
5281 	if (btf_type_is_struct(t) || btf_type_is_datasec(t))
5282 		return !btf_resolved_type_id(btf, type_id) &&
5283 		       !btf_resolved_type_size(btf, type_id);
5284 
5285 	if (btf_type_is_decl_tag(t) || btf_type_is_func(t))
5286 		return btf_resolved_type_id(btf, type_id) &&
5287 		       !btf_resolved_type_size(btf, type_id);
5288 
5289 	if (btf_type_is_modifier(t) || btf_type_is_ptr(t) ||
5290 	    btf_type_is_var(t)) {
5291 		t = btf_type_id_resolve(btf, &type_id);
5292 		return t &&
5293 		       !btf_type_is_modifier(t) &&
5294 		       !btf_type_is_var(t) &&
5295 		       !btf_type_is_datasec(t);
5296 	}
5297 
5298 	if (btf_type_is_array(t)) {
5299 		const struct btf_array *array = btf_type_array(t);
5300 		const struct btf_type *elem_type;
5301 		u32 elem_type_id = array->type;
5302 		u32 elem_size;
5303 
5304 		elem_type = btf_type_id_size(btf, &elem_type_id, &elem_size);
5305 		return elem_type && !btf_type_is_modifier(elem_type) &&
5306 			(array->nelems * elem_size ==
5307 			 btf_resolved_type_size(btf, type_id));
5308 	}
5309 
5310 	return false;
5311 }
5312 
5313 static int btf_resolve(struct btf_verifier_env *env,
5314 		       const struct btf_type *t, u32 type_id)
5315 {
5316 	u32 save_log_type_id = env->log_type_id;
5317 	const struct resolve_vertex *v;
5318 	int err = 0;
5319 
5320 	env->resolve_mode = RESOLVE_TBD;
5321 	env_stack_push(env, t, type_id);
5322 	while (!err && (v = env_stack_peak(env))) {
5323 		env->log_type_id = v->type_id;
5324 		err = btf_type_ops(v->t)->resolve(env, v);
5325 	}
5326 
5327 	env->log_type_id = type_id;
5328 	if (err == -E2BIG) {
5329 		btf_verifier_log_type(env, t,
5330 				      "Exceeded max resolving depth:%u",
5331 				      MAX_RESOLVE_DEPTH);
5332 	} else if (err == -EEXIST) {
5333 		btf_verifier_log_type(env, t, "Loop detected");
5334 	}
5335 
5336 	/* Final sanity check */
5337 	if (!err && !btf_resolve_valid(env, t, type_id)) {
5338 		btf_verifier_log_type(env, t, "Invalid resolve state");
5339 		err = -EINVAL;
5340 	}
5341 
5342 	env->log_type_id = save_log_type_id;
5343 	return err;
5344 }
5345 
5346 static int btf_check_all_types(struct btf_verifier_env *env)
5347 {
5348 	struct btf *btf = env->btf;
5349 	const struct btf_type *t;
5350 	u32 type_id, i;
5351 	int err;
5352 
5353 	err = env_resolve_init(env);
5354 	if (err)
5355 		return err;
5356 
5357 	env->phase++;
5358 	for (i = btf->base_btf ? 0 : 1; i < btf->nr_types; i++) {
5359 		type_id = btf->start_id + i;
5360 		t = btf_type_by_id(btf, type_id);
5361 
5362 		env->log_type_id = type_id;
5363 		if (btf_type_needs_resolve(t) &&
5364 		    !env_type_is_resolved(env, type_id)) {
5365 			err = btf_resolve(env, t, type_id);
5366 			if (err)
5367 				return err;
5368 		}
5369 
5370 		if (btf_type_is_func_proto(t)) {
5371 			err = btf_func_proto_check(env, t);
5372 			if (err)
5373 				return err;
5374 		}
5375 	}
5376 
5377 	return 0;
5378 }
5379 
5380 static int btf_parse_type_sec(struct btf_verifier_env *env)
5381 {
5382 	const struct btf_header *hdr = &env->btf->hdr;
5383 	int err;
5384 
5385 	/* Type section must align to 4 bytes */
5386 	if (hdr->type_off & (sizeof(u32) - 1)) {
5387 		btf_verifier_log(env, "Unaligned type_off");
5388 		return -EINVAL;
5389 	}
5390 
5391 	if (!env->btf->base_btf && !hdr->type_len) {
5392 		btf_verifier_log(env, "No type found");
5393 		return -EINVAL;
5394 	}
5395 
5396 	err = btf_check_all_metas(env);
5397 	if (err)
5398 		return err;
5399 
5400 	return btf_check_all_types(env);
5401 }
5402 
5403 static int btf_parse_str_sec(struct btf_verifier_env *env)
5404 {
5405 	const struct btf_header *hdr;
5406 	struct btf *btf = env->btf;
5407 	const char *start, *end;
5408 
5409 	hdr = &btf->hdr;
5410 	start = btf->nohdr_data + hdr->str_off;
5411 	end = start + hdr->str_len;
5412 
5413 	if (end != btf->data + btf->data_size) {
5414 		btf_verifier_log(env, "String section is not at the end");
5415 		return -EINVAL;
5416 	}
5417 
5418 	btf->strings = start;
5419 
5420 	if (btf->base_btf && !hdr->str_len)
5421 		return 0;
5422 	if (!hdr->str_len || hdr->str_len - 1 > BTF_MAX_NAME_OFFSET || end[-1]) {
5423 		btf_verifier_log(env, "Invalid string section");
5424 		return -EINVAL;
5425 	}
5426 	if (!btf->base_btf && start[0]) {
5427 		btf_verifier_log(env, "Invalid string section");
5428 		return -EINVAL;
5429 	}
5430 
5431 	return 0;
5432 }
5433 
5434 static const size_t btf_sec_info_offset[] = {
5435 	offsetof(struct btf_header, type_off),
5436 	offsetof(struct btf_header, str_off),
5437 };
5438 
5439 static int btf_sec_info_cmp(const void *a, const void *b)
5440 {
5441 	const struct btf_sec_info *x = a;
5442 	const struct btf_sec_info *y = b;
5443 
5444 	return (int)(x->off - y->off) ? : (int)(x->len - y->len);
5445 }
5446 
5447 static int btf_check_sec_info(struct btf_verifier_env *env,
5448 			      u32 btf_data_size)
5449 {
5450 	struct btf_sec_info secs[ARRAY_SIZE(btf_sec_info_offset)];
5451 	u32 total, expected_total, i;
5452 	const struct btf_header *hdr;
5453 	const struct btf *btf;
5454 
5455 	btf = env->btf;
5456 	hdr = &btf->hdr;
5457 
5458 	/* Populate the secs from hdr */
5459 	for (i = 0; i < ARRAY_SIZE(btf_sec_info_offset); i++)
5460 		secs[i] = *(struct btf_sec_info *)((void *)hdr +
5461 						   btf_sec_info_offset[i]);
5462 
5463 	sort(secs, ARRAY_SIZE(btf_sec_info_offset),
5464 	     sizeof(struct btf_sec_info), btf_sec_info_cmp, NULL);
5465 
5466 	/* Check for gaps and overlap among sections */
5467 	total = 0;
5468 	expected_total = btf_data_size - hdr->hdr_len;
5469 	for (i = 0; i < ARRAY_SIZE(btf_sec_info_offset); i++) {
5470 		if (expected_total < secs[i].off) {
5471 			btf_verifier_log(env, "Invalid section offset");
5472 			return -EINVAL;
5473 		}
5474 		if (total < secs[i].off) {
5475 			/* gap */
5476 			btf_verifier_log(env, "Unsupported section found");
5477 			return -EINVAL;
5478 		}
5479 		if (total > secs[i].off) {
5480 			btf_verifier_log(env, "Section overlap found");
5481 			return -EINVAL;
5482 		}
5483 		if (expected_total - total < secs[i].len) {
5484 			btf_verifier_log(env,
5485 					 "Total section length too long");
5486 			return -EINVAL;
5487 		}
5488 		total += secs[i].len;
5489 	}
5490 
5491 	/* There is data other than hdr and known sections */
5492 	if (expected_total != total) {
5493 		btf_verifier_log(env, "Unsupported section found");
5494 		return -EINVAL;
5495 	}
5496 
5497 	return 0;
5498 }
5499 
5500 static int btf_parse_hdr(struct btf_verifier_env *env)
5501 {
5502 	u32 hdr_len, hdr_copy, btf_data_size;
5503 	const struct btf_header *hdr;
5504 	struct btf *btf;
5505 
5506 	btf = env->btf;
5507 	btf_data_size = btf->data_size;
5508 
5509 	if (btf_data_size < offsetofend(struct btf_header, hdr_len)) {
5510 		btf_verifier_log(env, "hdr_len not found");
5511 		return -EINVAL;
5512 	}
5513 
5514 	hdr = btf->data;
5515 	hdr_len = hdr->hdr_len;
5516 	if (btf_data_size < hdr_len) {
5517 		btf_verifier_log(env, "btf_header not found");
5518 		return -EINVAL;
5519 	}
5520 
5521 	/* Ensure the unsupported header fields are zero */
5522 	if (hdr_len > sizeof(btf->hdr)) {
5523 		u8 *expected_zero = btf->data + sizeof(btf->hdr);
5524 		u8 *end = btf->data + hdr_len;
5525 
5526 		for (; expected_zero < end; expected_zero++) {
5527 			if (*expected_zero) {
5528 				btf_verifier_log(env, "Unsupported btf_header");
5529 				return -E2BIG;
5530 			}
5531 		}
5532 	}
5533 
5534 	hdr_copy = min_t(u32, hdr_len, sizeof(btf->hdr));
5535 	memcpy(&btf->hdr, btf->data, hdr_copy);
5536 
5537 	hdr = &btf->hdr;
5538 
5539 	btf_verifier_log_hdr(env, btf_data_size);
5540 
5541 	if (hdr->magic != BTF_MAGIC) {
5542 		btf_verifier_log(env, "Invalid magic");
5543 		return -EINVAL;
5544 	}
5545 
5546 	if (hdr->version != BTF_VERSION) {
5547 		btf_verifier_log(env, "Unsupported version");
5548 		return -ENOTSUPP;
5549 	}
5550 
5551 	if (hdr->flags) {
5552 		btf_verifier_log(env, "Unsupported flags");
5553 		return -ENOTSUPP;
5554 	}
5555 
5556 	if (!btf->base_btf && btf_data_size == hdr->hdr_len) {
5557 		btf_verifier_log(env, "No data");
5558 		return -EINVAL;
5559 	}
5560 
5561 	return btf_check_sec_info(env, btf_data_size);
5562 }
5563 
5564 static const char *alloc_obj_fields[] = {
5565 	"bpf_spin_lock",
5566 	"bpf_list_head",
5567 	"bpf_list_node",
5568 	"bpf_rb_root",
5569 	"bpf_rb_node",
5570 	"bpf_refcount",
5571 };
5572 
5573 static struct btf_struct_metas *
5574 btf_parse_struct_metas(struct bpf_verifier_log *log, struct btf *btf)
5575 {
5576 	struct btf_struct_metas *tab = NULL;
5577 	struct btf_id_set *aof;
5578 	int i, n, id, ret;
5579 
5580 	BUILD_BUG_ON(offsetof(struct btf_id_set, cnt) != 0);
5581 	BUILD_BUG_ON(sizeof(struct btf_id_set) != sizeof(u32));
5582 
5583 	aof = kmalloc(sizeof(*aof), GFP_KERNEL | __GFP_NOWARN);
5584 	if (!aof)
5585 		return ERR_PTR(-ENOMEM);
5586 	aof->cnt = 0;
5587 
5588 	for (i = 0; i < ARRAY_SIZE(alloc_obj_fields); i++) {
5589 		/* Try to find whether this special type exists in user BTF, and
5590 		 * if so remember its ID so we can easily find it among members
5591 		 * of structs that we iterate in the next loop.
5592 		 */
5593 		struct btf_id_set *new_aof;
5594 
5595 		id = btf_find_by_name_kind(btf, alloc_obj_fields[i], BTF_KIND_STRUCT);
5596 		if (id < 0)
5597 			continue;
5598 
5599 		new_aof = krealloc(aof, struct_size(new_aof, ids, aof->cnt + 1),
5600 				   GFP_KERNEL | __GFP_NOWARN);
5601 		if (!new_aof) {
5602 			ret = -ENOMEM;
5603 			goto free_aof;
5604 		}
5605 		aof = new_aof;
5606 		aof->ids[aof->cnt++] = id;
5607 	}
5608 
5609 	n = btf_nr_types(btf);
5610 	for (i = 1; i < n; i++) {
5611 		/* Try to find if there are kptrs in user BTF and remember their ID */
5612 		struct btf_id_set *new_aof;
5613 		struct btf_field_info tmp;
5614 		const struct btf_type *t;
5615 
5616 		t = btf_type_by_id(btf, i);
5617 		if (!t) {
5618 			ret = -EINVAL;
5619 			goto free_aof;
5620 		}
5621 
5622 		ret = btf_find_kptr(btf, t, 0, 0, &tmp, BPF_KPTR);
5623 		if (ret != BTF_FIELD_FOUND)
5624 			continue;
5625 
5626 		new_aof = krealloc(aof, struct_size(new_aof, ids, aof->cnt + 1),
5627 				   GFP_KERNEL | __GFP_NOWARN);
5628 		if (!new_aof) {
5629 			ret = -ENOMEM;
5630 			goto free_aof;
5631 		}
5632 		aof = new_aof;
5633 		aof->ids[aof->cnt++] = i;
5634 	}
5635 
5636 	if (!aof->cnt) {
5637 		kfree(aof);
5638 		return NULL;
5639 	}
5640 	sort(&aof->ids, aof->cnt, sizeof(aof->ids[0]), btf_id_cmp_func, NULL);
5641 
5642 	for (i = 1; i < n; i++) {
5643 		struct btf_struct_metas *new_tab;
5644 		const struct btf_member *member;
5645 		struct btf_struct_meta *type;
5646 		struct btf_record *record;
5647 		const struct btf_type *t;
5648 		int j, tab_cnt;
5649 
5650 		t = btf_type_by_id(btf, i);
5651 		if (!__btf_type_is_struct(t))
5652 			continue;
5653 
5654 		cond_resched();
5655 
5656 		for_each_member(j, t, member) {
5657 			if (btf_id_set_contains(aof, member->type))
5658 				goto parse;
5659 		}
5660 		continue;
5661 	parse:
5662 		tab_cnt = tab ? tab->cnt : 0;
5663 		new_tab = krealloc(tab, struct_size(new_tab, types, tab_cnt + 1),
5664 				   GFP_KERNEL | __GFP_NOWARN);
5665 		if (!new_tab) {
5666 			ret = -ENOMEM;
5667 			goto free;
5668 		}
5669 		if (!tab)
5670 			new_tab->cnt = 0;
5671 		tab = new_tab;
5672 
5673 		type = &tab->types[tab->cnt];
5674 		type->btf_id = i;
5675 		record = btf_parse_fields(btf, t, BPF_SPIN_LOCK | BPF_RES_SPIN_LOCK | BPF_LIST_HEAD | BPF_LIST_NODE |
5676 						  BPF_RB_ROOT | BPF_RB_NODE | BPF_REFCOUNT |
5677 						  BPF_KPTR, t->size);
5678 		/* The record cannot be unset, treat it as an error if so */
5679 		if (IS_ERR_OR_NULL(record)) {
5680 			ret = PTR_ERR_OR_ZERO(record) ?: -EFAULT;
5681 			goto free;
5682 		}
5683 		type->record = record;
5684 		tab->cnt++;
5685 	}
5686 	kfree(aof);
5687 	return tab;
5688 free:
5689 	btf_struct_metas_free(tab);
5690 free_aof:
5691 	kfree(aof);
5692 	return ERR_PTR(ret);
5693 }
5694 
5695 struct btf_struct_meta *btf_find_struct_meta(const struct btf *btf, u32 btf_id)
5696 {
5697 	struct btf_struct_metas *tab;
5698 
5699 	BUILD_BUG_ON(offsetof(struct btf_struct_meta, btf_id) != 0);
5700 	tab = btf->struct_meta_tab;
5701 	if (!tab)
5702 		return NULL;
5703 	return bsearch(&btf_id, tab->types, tab->cnt, sizeof(tab->types[0]), btf_id_cmp_func);
5704 }
5705 
5706 static int btf_check_type_tags(struct btf_verifier_env *env,
5707 			       struct btf *btf, int start_id)
5708 {
5709 	int i, n, good_id = start_id - 1;
5710 	bool in_tags;
5711 
5712 	n = btf_nr_types(btf);
5713 	for (i = start_id; i < n; i++) {
5714 		const struct btf_type *t;
5715 		int chain_limit = 32;
5716 		u32 cur_id = i;
5717 
5718 		t = btf_type_by_id(btf, i);
5719 		if (!t)
5720 			return -EINVAL;
5721 		if (!btf_type_is_modifier(t))
5722 			continue;
5723 
5724 		cond_resched();
5725 
5726 		in_tags = btf_type_is_type_tag(t);
5727 		while (btf_type_is_modifier(t)) {
5728 			if (!chain_limit--) {
5729 				btf_verifier_log(env, "Max chain length or cycle detected");
5730 				return -ELOOP;
5731 			}
5732 			if (btf_type_is_type_tag(t)) {
5733 				if (!in_tags) {
5734 					btf_verifier_log(env, "Type tags don't precede modifiers");
5735 					return -EINVAL;
5736 				}
5737 			} else if (in_tags) {
5738 				in_tags = false;
5739 			}
5740 			if (cur_id <= good_id)
5741 				break;
5742 			/* Move to next type */
5743 			cur_id = t->type;
5744 			t = btf_type_by_id(btf, cur_id);
5745 			if (!t)
5746 				return -EINVAL;
5747 		}
5748 		good_id = i;
5749 	}
5750 	return 0;
5751 }
5752 
5753 static int finalize_log(struct bpf_verifier_log *log, bpfptr_t uattr, u32 uattr_size)
5754 {
5755 	u32 log_true_size;
5756 	int err;
5757 
5758 	err = bpf_vlog_finalize(log, &log_true_size);
5759 
5760 	if (uattr_size >= offsetofend(union bpf_attr, btf_log_true_size) &&
5761 	    copy_to_bpfptr_offset(uattr, offsetof(union bpf_attr, btf_log_true_size),
5762 				  &log_true_size, sizeof(log_true_size)))
5763 		err = -EFAULT;
5764 
5765 	return err;
5766 }
5767 
5768 static struct btf *btf_parse(const union bpf_attr *attr, bpfptr_t uattr, u32 uattr_size)
5769 {
5770 	bpfptr_t btf_data = make_bpfptr(attr->btf, uattr.is_kernel);
5771 	char __user *log_ubuf = u64_to_user_ptr(attr->btf_log_buf);
5772 	struct btf_struct_metas *struct_meta_tab;
5773 	struct btf_verifier_env *env = NULL;
5774 	struct btf *btf = NULL;
5775 	u8 *data;
5776 	int err, ret;
5777 
5778 	if (attr->btf_size > BTF_MAX_SIZE)
5779 		return ERR_PTR(-E2BIG);
5780 
5781 	env = kzalloc(sizeof(*env), GFP_KERNEL | __GFP_NOWARN);
5782 	if (!env)
5783 		return ERR_PTR(-ENOMEM);
5784 
5785 	/* user could have requested verbose verifier output
5786 	 * and supplied buffer to store the verification trace
5787 	 */
5788 	err = bpf_vlog_init(&env->log, attr->btf_log_level,
5789 			    log_ubuf, attr->btf_log_size);
5790 	if (err)
5791 		goto errout_free;
5792 
5793 	btf = kzalloc(sizeof(*btf), GFP_KERNEL | __GFP_NOWARN);
5794 	if (!btf) {
5795 		err = -ENOMEM;
5796 		goto errout;
5797 	}
5798 	env->btf = btf;
5799 
5800 	data = kvmalloc(attr->btf_size, GFP_KERNEL | __GFP_NOWARN);
5801 	if (!data) {
5802 		err = -ENOMEM;
5803 		goto errout;
5804 	}
5805 
5806 	btf->data = data;
5807 	btf->data_size = attr->btf_size;
5808 
5809 	if (copy_from_bpfptr(data, btf_data, attr->btf_size)) {
5810 		err = -EFAULT;
5811 		goto errout;
5812 	}
5813 
5814 	err = btf_parse_hdr(env);
5815 	if (err)
5816 		goto errout;
5817 
5818 	btf->nohdr_data = btf->data + btf->hdr.hdr_len;
5819 
5820 	err = btf_parse_str_sec(env);
5821 	if (err)
5822 		goto errout;
5823 
5824 	err = btf_parse_type_sec(env);
5825 	if (err)
5826 		goto errout;
5827 
5828 	err = btf_check_type_tags(env, btf, 1);
5829 	if (err)
5830 		goto errout;
5831 
5832 	struct_meta_tab = btf_parse_struct_metas(&env->log, btf);
5833 	if (IS_ERR(struct_meta_tab)) {
5834 		err = PTR_ERR(struct_meta_tab);
5835 		goto errout;
5836 	}
5837 	btf->struct_meta_tab = struct_meta_tab;
5838 
5839 	if (struct_meta_tab) {
5840 		int i;
5841 
5842 		for (i = 0; i < struct_meta_tab->cnt; i++) {
5843 			err = btf_check_and_fixup_fields(btf, struct_meta_tab->types[i].record);
5844 			if (err < 0)
5845 				goto errout_meta;
5846 		}
5847 	}
5848 
5849 	err = finalize_log(&env->log, uattr, uattr_size);
5850 	if (err)
5851 		goto errout_free;
5852 
5853 	btf_verifier_env_free(env);
5854 	refcount_set(&btf->refcnt, 1);
5855 	return btf;
5856 
5857 errout_meta:
5858 	btf_free_struct_meta_tab(btf);
5859 errout:
5860 	/* overwrite err with -ENOSPC or -EFAULT */
5861 	ret = finalize_log(&env->log, uattr, uattr_size);
5862 	if (ret)
5863 		err = ret;
5864 errout_free:
5865 	btf_verifier_env_free(env);
5866 	if (btf)
5867 		btf_free(btf);
5868 	return ERR_PTR(err);
5869 }
5870 
5871 extern char __start_BTF[];
5872 extern char __stop_BTF[];
5873 extern struct btf *btf_vmlinux;
5874 
5875 #define BPF_MAP_TYPE(_id, _ops)
5876 #define BPF_LINK_TYPE(_id, _name)
5877 static union {
5878 	struct bpf_ctx_convert {
5879 #define BPF_PROG_TYPE(_id, _name, prog_ctx_type, kern_ctx_type) \
5880 	prog_ctx_type _id##_prog; \
5881 	kern_ctx_type _id##_kern;
5882 #include <linux/bpf_types.h>
5883 #undef BPF_PROG_TYPE
5884 	} *__t;
5885 	/* 't' is written once under lock. Read many times. */
5886 	const struct btf_type *t;
5887 } bpf_ctx_convert;
5888 enum {
5889 #define BPF_PROG_TYPE(_id, _name, prog_ctx_type, kern_ctx_type) \
5890 	__ctx_convert##_id,
5891 #include <linux/bpf_types.h>
5892 #undef BPF_PROG_TYPE
5893 	__ctx_convert_unused, /* to avoid empty enum in extreme .config */
5894 };
5895 static u8 bpf_ctx_convert_map[] = {
5896 #define BPF_PROG_TYPE(_id, _name, prog_ctx_type, kern_ctx_type) \
5897 	[_id] = __ctx_convert##_id,
5898 #include <linux/bpf_types.h>
5899 #undef BPF_PROG_TYPE
5900 	0, /* avoid empty array */
5901 };
5902 #undef BPF_MAP_TYPE
5903 #undef BPF_LINK_TYPE
5904 
5905 static const struct btf_type *find_canonical_prog_ctx_type(enum bpf_prog_type prog_type)
5906 {
5907 	const struct btf_type *conv_struct;
5908 	const struct btf_member *ctx_type;
5909 
5910 	conv_struct = bpf_ctx_convert.t;
5911 	if (!conv_struct)
5912 		return NULL;
5913 	/* prog_type is valid bpf program type. No need for bounds check. */
5914 	ctx_type = btf_type_member(conv_struct) + bpf_ctx_convert_map[prog_type] * 2;
5915 	/* ctx_type is a pointer to prog_ctx_type in vmlinux.
5916 	 * Like 'struct __sk_buff'
5917 	 */
5918 	return btf_type_by_id(btf_vmlinux, ctx_type->type);
5919 }
5920 
5921 static int find_kern_ctx_type_id(enum bpf_prog_type prog_type)
5922 {
5923 	const struct btf_type *conv_struct;
5924 	const struct btf_member *ctx_type;
5925 
5926 	conv_struct = bpf_ctx_convert.t;
5927 	if (!conv_struct)
5928 		return -EFAULT;
5929 	/* prog_type is valid bpf program type. No need for bounds check. */
5930 	ctx_type = btf_type_member(conv_struct) + bpf_ctx_convert_map[prog_type] * 2 + 1;
5931 	/* ctx_type is a pointer to prog_ctx_type in vmlinux.
5932 	 * Like 'struct sk_buff'
5933 	 */
5934 	return ctx_type->type;
5935 }
5936 
5937 bool btf_is_projection_of(const char *pname, const char *tname)
5938 {
5939 	if (strcmp(pname, "__sk_buff") == 0 && strcmp(tname, "sk_buff") == 0)
5940 		return true;
5941 	if (strcmp(pname, "xdp_md") == 0 && strcmp(tname, "xdp_buff") == 0)
5942 		return true;
5943 	return false;
5944 }
5945 
5946 bool btf_is_prog_ctx_type(struct bpf_verifier_log *log, const struct btf *btf,
5947 			  const struct btf_type *t, enum bpf_prog_type prog_type,
5948 			  int arg)
5949 {
5950 	const struct btf_type *ctx_type;
5951 	const char *tname, *ctx_tname;
5952 
5953 	t = btf_type_by_id(btf, t->type);
5954 
5955 	/* KPROBE programs allow bpf_user_pt_regs_t typedef, which we need to
5956 	 * check before we skip all the typedef below.
5957 	 */
5958 	if (prog_type == BPF_PROG_TYPE_KPROBE) {
5959 		while (btf_type_is_modifier(t) && !btf_type_is_typedef(t))
5960 			t = btf_type_by_id(btf, t->type);
5961 
5962 		if (btf_type_is_typedef(t)) {
5963 			tname = btf_name_by_offset(btf, t->name_off);
5964 			if (tname && strcmp(tname, "bpf_user_pt_regs_t") == 0)
5965 				return true;
5966 		}
5967 	}
5968 
5969 	while (btf_type_is_modifier(t))
5970 		t = btf_type_by_id(btf, t->type);
5971 	if (!btf_type_is_struct(t)) {
5972 		/* Only pointer to struct is supported for now.
5973 		 * That means that BPF_PROG_TYPE_TRACEPOINT with BTF
5974 		 * is not supported yet.
5975 		 * BPF_PROG_TYPE_RAW_TRACEPOINT is fine.
5976 		 */
5977 		return false;
5978 	}
5979 	tname = btf_name_by_offset(btf, t->name_off);
5980 	if (!tname) {
5981 		bpf_log(log, "arg#%d struct doesn't have a name\n", arg);
5982 		return false;
5983 	}
5984 
5985 	ctx_type = find_canonical_prog_ctx_type(prog_type);
5986 	if (!ctx_type) {
5987 		bpf_log(log, "btf_vmlinux is malformed\n");
5988 		/* should not happen */
5989 		return false;
5990 	}
5991 again:
5992 	ctx_tname = btf_name_by_offset(btf_vmlinux, ctx_type->name_off);
5993 	if (!ctx_tname) {
5994 		/* should not happen */
5995 		bpf_log(log, "Please fix kernel include/linux/bpf_types.h\n");
5996 		return false;
5997 	}
5998 	/* program types without named context types work only with arg:ctx tag */
5999 	if (ctx_tname[0] == '\0')
6000 		return false;
6001 	/* only compare that prog's ctx type name is the same as
6002 	 * kernel expects. No need to compare field by field.
6003 	 * It's ok for bpf prog to do:
6004 	 * struct __sk_buff {};
6005 	 * int socket_filter_bpf_prog(struct __sk_buff *skb)
6006 	 * { // no fields of skb are ever used }
6007 	 */
6008 	if (btf_is_projection_of(ctx_tname, tname))
6009 		return true;
6010 	if (strcmp(ctx_tname, tname)) {
6011 		/* bpf_user_pt_regs_t is a typedef, so resolve it to
6012 		 * underlying struct and check name again
6013 		 */
6014 		if (!btf_type_is_modifier(ctx_type))
6015 			return false;
6016 		while (btf_type_is_modifier(ctx_type))
6017 			ctx_type = btf_type_by_id(btf_vmlinux, ctx_type->type);
6018 		goto again;
6019 	}
6020 	return true;
6021 }
6022 
6023 /* forward declarations for arch-specific underlying types of
6024  * bpf_user_pt_regs_t; this avoids the need for arch-specific #ifdef
6025  * compilation guards below for BPF_PROG_TYPE_PERF_EVENT checks, but still
6026  * works correctly with __builtin_types_compatible_p() on respective
6027  * architectures
6028  */
6029 struct user_regs_struct;
6030 struct user_pt_regs;
6031 
6032 static int btf_validate_prog_ctx_type(struct bpf_verifier_log *log, const struct btf *btf,
6033 				      const struct btf_type *t, int arg,
6034 				      enum bpf_prog_type prog_type,
6035 				      enum bpf_attach_type attach_type)
6036 {
6037 	const struct btf_type *ctx_type;
6038 	const char *tname, *ctx_tname;
6039 
6040 	if (!btf_is_ptr(t)) {
6041 		bpf_log(log, "arg#%d type isn't a pointer\n", arg);
6042 		return -EINVAL;
6043 	}
6044 	t = btf_type_by_id(btf, t->type);
6045 
6046 	/* KPROBE and PERF_EVENT programs allow bpf_user_pt_regs_t typedef */
6047 	if (prog_type == BPF_PROG_TYPE_KPROBE || prog_type == BPF_PROG_TYPE_PERF_EVENT) {
6048 		while (btf_type_is_modifier(t) && !btf_type_is_typedef(t))
6049 			t = btf_type_by_id(btf, t->type);
6050 
6051 		if (btf_type_is_typedef(t)) {
6052 			tname = btf_name_by_offset(btf, t->name_off);
6053 			if (tname && strcmp(tname, "bpf_user_pt_regs_t") == 0)
6054 				return 0;
6055 		}
6056 	}
6057 
6058 	/* all other program types don't use typedefs for context type */
6059 	while (btf_type_is_modifier(t))
6060 		t = btf_type_by_id(btf, t->type);
6061 
6062 	/* `void *ctx __arg_ctx` is always valid */
6063 	if (btf_type_is_void(t))
6064 		return 0;
6065 
6066 	tname = btf_name_by_offset(btf, t->name_off);
6067 	if (str_is_empty(tname)) {
6068 		bpf_log(log, "arg#%d type doesn't have a name\n", arg);
6069 		return -EINVAL;
6070 	}
6071 
6072 	/* special cases */
6073 	switch (prog_type) {
6074 	case BPF_PROG_TYPE_KPROBE:
6075 		if (__btf_type_is_struct(t) && strcmp(tname, "pt_regs") == 0)
6076 			return 0;
6077 		break;
6078 	case BPF_PROG_TYPE_PERF_EVENT:
6079 		if (__builtin_types_compatible_p(bpf_user_pt_regs_t, struct pt_regs) &&
6080 		    __btf_type_is_struct(t) && strcmp(tname, "pt_regs") == 0)
6081 			return 0;
6082 		if (__builtin_types_compatible_p(bpf_user_pt_regs_t, struct user_pt_regs) &&
6083 		    __btf_type_is_struct(t) && strcmp(tname, "user_pt_regs") == 0)
6084 			return 0;
6085 		if (__builtin_types_compatible_p(bpf_user_pt_regs_t, struct user_regs_struct) &&
6086 		    __btf_type_is_struct(t) && strcmp(tname, "user_regs_struct") == 0)
6087 			return 0;
6088 		break;
6089 	case BPF_PROG_TYPE_RAW_TRACEPOINT:
6090 	case BPF_PROG_TYPE_RAW_TRACEPOINT_WRITABLE:
6091 		/* allow u64* as ctx */
6092 		if (btf_is_int(t) && t->size == 8)
6093 			return 0;
6094 		break;
6095 	case BPF_PROG_TYPE_TRACING:
6096 		switch (attach_type) {
6097 		case BPF_TRACE_RAW_TP:
6098 			/* tp_btf program is TRACING, so need special case here */
6099 			if (__btf_type_is_struct(t) &&
6100 			    strcmp(tname, "bpf_raw_tracepoint_args") == 0)
6101 				return 0;
6102 			/* allow u64* as ctx */
6103 			if (btf_is_int(t) && t->size == 8)
6104 				return 0;
6105 			break;
6106 		case BPF_TRACE_ITER:
6107 			/* allow struct bpf_iter__xxx types only */
6108 			if (__btf_type_is_struct(t) &&
6109 			    strncmp(tname, "bpf_iter__", sizeof("bpf_iter__") - 1) == 0)
6110 				return 0;
6111 			break;
6112 		case BPF_TRACE_FENTRY:
6113 		case BPF_TRACE_FEXIT:
6114 		case BPF_MODIFY_RETURN:
6115 			/* allow u64* as ctx */
6116 			if (btf_is_int(t) && t->size == 8)
6117 				return 0;
6118 			break;
6119 		default:
6120 			break;
6121 		}
6122 		break;
6123 	case BPF_PROG_TYPE_LSM:
6124 	case BPF_PROG_TYPE_STRUCT_OPS:
6125 		/* allow u64* as ctx */
6126 		if (btf_is_int(t) && t->size == 8)
6127 			return 0;
6128 		break;
6129 	case BPF_PROG_TYPE_TRACEPOINT:
6130 	case BPF_PROG_TYPE_SYSCALL:
6131 	case BPF_PROG_TYPE_EXT:
6132 		return 0; /* anything goes */
6133 	default:
6134 		break;
6135 	}
6136 
6137 	ctx_type = find_canonical_prog_ctx_type(prog_type);
6138 	if (!ctx_type) {
6139 		/* should not happen */
6140 		bpf_log(log, "btf_vmlinux is malformed\n");
6141 		return -EINVAL;
6142 	}
6143 
6144 	/* resolve typedefs and check that underlying structs are matching as well */
6145 	while (btf_type_is_modifier(ctx_type))
6146 		ctx_type = btf_type_by_id(btf_vmlinux, ctx_type->type);
6147 
6148 	/* if program type doesn't have distinctly named struct type for
6149 	 * context, then __arg_ctx argument can only be `void *`, which we
6150 	 * already checked above
6151 	 */
6152 	if (!__btf_type_is_struct(ctx_type)) {
6153 		bpf_log(log, "arg#%d should be void pointer\n", arg);
6154 		return -EINVAL;
6155 	}
6156 
6157 	ctx_tname = btf_name_by_offset(btf_vmlinux, ctx_type->name_off);
6158 	if (!__btf_type_is_struct(t) || strcmp(ctx_tname, tname) != 0) {
6159 		bpf_log(log, "arg#%d should be `struct %s *`\n", arg, ctx_tname);
6160 		return -EINVAL;
6161 	}
6162 
6163 	return 0;
6164 }
6165 
6166 static int btf_translate_to_vmlinux(struct bpf_verifier_log *log,
6167 				     struct btf *btf,
6168 				     const struct btf_type *t,
6169 				     enum bpf_prog_type prog_type,
6170 				     int arg)
6171 {
6172 	if (!btf_is_prog_ctx_type(log, btf, t, prog_type, arg))
6173 		return -ENOENT;
6174 	return find_kern_ctx_type_id(prog_type);
6175 }
6176 
6177 int get_kern_ctx_btf_id(struct bpf_verifier_log *log, enum bpf_prog_type prog_type)
6178 {
6179 	const struct btf_member *kctx_member;
6180 	const struct btf_type *conv_struct;
6181 	const struct btf_type *kctx_type;
6182 	u32 kctx_type_id;
6183 
6184 	conv_struct = bpf_ctx_convert.t;
6185 	/* get member for kernel ctx type */
6186 	kctx_member = btf_type_member(conv_struct) + bpf_ctx_convert_map[prog_type] * 2 + 1;
6187 	kctx_type_id = kctx_member->type;
6188 	kctx_type = btf_type_by_id(btf_vmlinux, kctx_type_id);
6189 	if (!btf_type_is_struct(kctx_type)) {
6190 		bpf_log(log, "kern ctx type id %u is not a struct\n", kctx_type_id);
6191 		return -EINVAL;
6192 	}
6193 
6194 	return kctx_type_id;
6195 }
6196 
6197 BTF_ID_LIST(bpf_ctx_convert_btf_id)
6198 BTF_ID(struct, bpf_ctx_convert)
6199 
6200 static struct btf *btf_parse_base(struct btf_verifier_env *env, const char *name,
6201 				  void *data, unsigned int data_size)
6202 {
6203 	struct btf *btf = NULL;
6204 	int err;
6205 
6206 	if (!IS_ENABLED(CONFIG_DEBUG_INFO_BTF))
6207 		return ERR_PTR(-ENOENT);
6208 
6209 	btf = kzalloc(sizeof(*btf), GFP_KERNEL | __GFP_NOWARN);
6210 	if (!btf) {
6211 		err = -ENOMEM;
6212 		goto errout;
6213 	}
6214 	env->btf = btf;
6215 
6216 	btf->data = data;
6217 	btf->data_size = data_size;
6218 	btf->kernel_btf = true;
6219 	snprintf(btf->name, sizeof(btf->name), "%s", name);
6220 
6221 	err = btf_parse_hdr(env);
6222 	if (err)
6223 		goto errout;
6224 
6225 	btf->nohdr_data = btf->data + btf->hdr.hdr_len;
6226 
6227 	err = btf_parse_str_sec(env);
6228 	if (err)
6229 		goto errout;
6230 
6231 	err = btf_check_all_metas(env);
6232 	if (err)
6233 		goto errout;
6234 
6235 	err = btf_check_type_tags(env, btf, 1);
6236 	if (err)
6237 		goto errout;
6238 
6239 	refcount_set(&btf->refcnt, 1);
6240 
6241 	return btf;
6242 
6243 errout:
6244 	if (btf) {
6245 		kvfree(btf->types);
6246 		kfree(btf);
6247 	}
6248 	return ERR_PTR(err);
6249 }
6250 
6251 struct btf *btf_parse_vmlinux(void)
6252 {
6253 	struct btf_verifier_env *env = NULL;
6254 	struct bpf_verifier_log *log;
6255 	struct btf *btf;
6256 	int err;
6257 
6258 	env = kzalloc(sizeof(*env), GFP_KERNEL | __GFP_NOWARN);
6259 	if (!env)
6260 		return ERR_PTR(-ENOMEM);
6261 
6262 	log = &env->log;
6263 	log->level = BPF_LOG_KERNEL;
6264 	btf = btf_parse_base(env, "vmlinux", __start_BTF, __stop_BTF - __start_BTF);
6265 	if (IS_ERR(btf))
6266 		goto err_out;
6267 
6268 	/* btf_parse_vmlinux() runs under bpf_verifier_lock */
6269 	bpf_ctx_convert.t = btf_type_by_id(btf, bpf_ctx_convert_btf_id[0]);
6270 	err = btf_alloc_id(btf);
6271 	if (err) {
6272 		btf_free(btf);
6273 		btf = ERR_PTR(err);
6274 	}
6275 err_out:
6276 	btf_verifier_env_free(env);
6277 	return btf;
6278 }
6279 
6280 /* If .BTF_ids section was created with distilled base BTF, both base and
6281  * split BTF ids will need to be mapped to actual base/split ids for
6282  * BTF now that it has been relocated.
6283  */
6284 static __u32 btf_relocate_id(const struct btf *btf, __u32 id)
6285 {
6286 	if (!btf->base_btf || !btf->base_id_map)
6287 		return id;
6288 	return btf->base_id_map[id];
6289 }
6290 
6291 #ifdef CONFIG_DEBUG_INFO_BTF_MODULES
6292 
6293 static struct btf *btf_parse_module(const char *module_name, const void *data,
6294 				    unsigned int data_size, void *base_data,
6295 				    unsigned int base_data_size)
6296 {
6297 	struct btf *btf = NULL, *vmlinux_btf, *base_btf = NULL;
6298 	struct btf_verifier_env *env = NULL;
6299 	struct bpf_verifier_log *log;
6300 	int err = 0;
6301 
6302 	vmlinux_btf = bpf_get_btf_vmlinux();
6303 	if (IS_ERR(vmlinux_btf))
6304 		return vmlinux_btf;
6305 	if (!vmlinux_btf)
6306 		return ERR_PTR(-EINVAL);
6307 
6308 	env = kzalloc(sizeof(*env), GFP_KERNEL | __GFP_NOWARN);
6309 	if (!env)
6310 		return ERR_PTR(-ENOMEM);
6311 
6312 	log = &env->log;
6313 	log->level = BPF_LOG_KERNEL;
6314 
6315 	if (base_data) {
6316 		base_btf = btf_parse_base(env, ".BTF.base", base_data, base_data_size);
6317 		if (IS_ERR(base_btf)) {
6318 			err = PTR_ERR(base_btf);
6319 			goto errout;
6320 		}
6321 	} else {
6322 		base_btf = vmlinux_btf;
6323 	}
6324 
6325 	btf = kzalloc(sizeof(*btf), GFP_KERNEL | __GFP_NOWARN);
6326 	if (!btf) {
6327 		err = -ENOMEM;
6328 		goto errout;
6329 	}
6330 	env->btf = btf;
6331 
6332 	btf->base_btf = base_btf;
6333 	btf->start_id = base_btf->nr_types;
6334 	btf->start_str_off = base_btf->hdr.str_len;
6335 	btf->kernel_btf = true;
6336 	snprintf(btf->name, sizeof(btf->name), "%s", module_name);
6337 
6338 	btf->data = kvmemdup(data, data_size, GFP_KERNEL | __GFP_NOWARN);
6339 	if (!btf->data) {
6340 		err = -ENOMEM;
6341 		goto errout;
6342 	}
6343 	btf->data_size = data_size;
6344 
6345 	err = btf_parse_hdr(env);
6346 	if (err)
6347 		goto errout;
6348 
6349 	btf->nohdr_data = btf->data + btf->hdr.hdr_len;
6350 
6351 	err = btf_parse_str_sec(env);
6352 	if (err)
6353 		goto errout;
6354 
6355 	err = btf_check_all_metas(env);
6356 	if (err)
6357 		goto errout;
6358 
6359 	err = btf_check_type_tags(env, btf, btf_nr_types(base_btf));
6360 	if (err)
6361 		goto errout;
6362 
6363 	if (base_btf != vmlinux_btf) {
6364 		err = btf_relocate(btf, vmlinux_btf, &btf->base_id_map);
6365 		if (err)
6366 			goto errout;
6367 		btf_free(base_btf);
6368 		base_btf = vmlinux_btf;
6369 	}
6370 
6371 	btf_verifier_env_free(env);
6372 	refcount_set(&btf->refcnt, 1);
6373 	return btf;
6374 
6375 errout:
6376 	btf_verifier_env_free(env);
6377 	if (!IS_ERR(base_btf) && base_btf != vmlinux_btf)
6378 		btf_free(base_btf);
6379 	if (btf) {
6380 		kvfree(btf->data);
6381 		kvfree(btf->types);
6382 		kfree(btf);
6383 	}
6384 	return ERR_PTR(err);
6385 }
6386 
6387 #endif /* CONFIG_DEBUG_INFO_BTF_MODULES */
6388 
6389 struct btf *bpf_prog_get_target_btf(const struct bpf_prog *prog)
6390 {
6391 	struct bpf_prog *tgt_prog = prog->aux->dst_prog;
6392 
6393 	if (tgt_prog)
6394 		return tgt_prog->aux->btf;
6395 	else
6396 		return prog->aux->attach_btf;
6397 }
6398 
6399 static bool is_void_or_int_ptr(struct btf *btf, const struct btf_type *t)
6400 {
6401 	/* skip modifiers */
6402 	t = btf_type_skip_modifiers(btf, t->type, NULL);
6403 	return btf_type_is_void(t) || btf_type_is_int(t);
6404 }
6405 
6406 u32 btf_ctx_arg_idx(struct btf *btf, const struct btf_type *func_proto,
6407 		    int off)
6408 {
6409 	const struct btf_param *args;
6410 	const struct btf_type *t;
6411 	u32 offset = 0, nr_args;
6412 	int i;
6413 
6414 	if (!func_proto)
6415 		return off / 8;
6416 
6417 	nr_args = btf_type_vlen(func_proto);
6418 	args = (const struct btf_param *)(func_proto + 1);
6419 	for (i = 0; i < nr_args; i++) {
6420 		t = btf_type_skip_modifiers(btf, args[i].type, NULL);
6421 		offset += btf_type_is_ptr(t) ? 8 : roundup(t->size, 8);
6422 		if (off < offset)
6423 			return i;
6424 	}
6425 
6426 	t = btf_type_skip_modifiers(btf, func_proto->type, NULL);
6427 	offset += btf_type_is_ptr(t) ? 8 : roundup(t->size, 8);
6428 	if (off < offset)
6429 		return nr_args;
6430 
6431 	return nr_args + 1;
6432 }
6433 
6434 static bool prog_args_trusted(const struct bpf_prog *prog)
6435 {
6436 	enum bpf_attach_type atype = prog->expected_attach_type;
6437 
6438 	switch (prog->type) {
6439 	case BPF_PROG_TYPE_TRACING:
6440 		return atype == BPF_TRACE_RAW_TP || atype == BPF_TRACE_ITER;
6441 	case BPF_PROG_TYPE_LSM:
6442 		return bpf_lsm_is_trusted(prog);
6443 	case BPF_PROG_TYPE_STRUCT_OPS:
6444 		return true;
6445 	default:
6446 		return false;
6447 	}
6448 }
6449 
6450 int btf_ctx_arg_offset(const struct btf *btf, const struct btf_type *func_proto,
6451 		       u32 arg_no)
6452 {
6453 	const struct btf_param *args;
6454 	const struct btf_type *t;
6455 	int off = 0, i;
6456 	u32 sz;
6457 
6458 	args = btf_params(func_proto);
6459 	for (i = 0; i < arg_no; i++) {
6460 		t = btf_type_by_id(btf, args[i].type);
6461 		t = btf_resolve_size(btf, t, &sz);
6462 		if (IS_ERR(t))
6463 			return PTR_ERR(t);
6464 		off += roundup(sz, 8);
6465 	}
6466 
6467 	return off;
6468 }
6469 
6470 struct bpf_raw_tp_null_args {
6471 	const char *func;
6472 	u64 mask;
6473 };
6474 
6475 static const struct bpf_raw_tp_null_args raw_tp_null_args[] = {
6476 	/* sched */
6477 	{ "sched_pi_setprio", 0x10 },
6478 	/* ... from sched_numa_pair_template event class */
6479 	{ "sched_stick_numa", 0x100 },
6480 	{ "sched_swap_numa", 0x100 },
6481 	/* afs */
6482 	{ "afs_make_fs_call", 0x10 },
6483 	{ "afs_make_fs_calli", 0x10 },
6484 	{ "afs_make_fs_call1", 0x10 },
6485 	{ "afs_make_fs_call2", 0x10 },
6486 	{ "afs_protocol_error", 0x1 },
6487 	{ "afs_flock_ev", 0x10 },
6488 	/* cachefiles */
6489 	{ "cachefiles_lookup", 0x1 | 0x200 },
6490 	{ "cachefiles_unlink", 0x1 },
6491 	{ "cachefiles_rename", 0x1 },
6492 	{ "cachefiles_prep_read", 0x1 },
6493 	{ "cachefiles_mark_active", 0x1 },
6494 	{ "cachefiles_mark_failed", 0x1 },
6495 	{ "cachefiles_mark_inactive", 0x1 },
6496 	{ "cachefiles_vfs_error", 0x1 },
6497 	{ "cachefiles_io_error", 0x1 },
6498 	{ "cachefiles_ondemand_open", 0x1 },
6499 	{ "cachefiles_ondemand_copen", 0x1 },
6500 	{ "cachefiles_ondemand_close", 0x1 },
6501 	{ "cachefiles_ondemand_read", 0x1 },
6502 	{ "cachefiles_ondemand_cread", 0x1 },
6503 	{ "cachefiles_ondemand_fd_write", 0x1 },
6504 	{ "cachefiles_ondemand_fd_release", 0x1 },
6505 	/* ext4, from ext4__mballoc event class */
6506 	{ "ext4_mballoc_discard", 0x10 },
6507 	{ "ext4_mballoc_free", 0x10 },
6508 	/* fib */
6509 	{ "fib_table_lookup", 0x100 },
6510 	/* filelock */
6511 	/* ... from filelock_lock event class */
6512 	{ "posix_lock_inode", 0x10 },
6513 	{ "fcntl_setlk", 0x10 },
6514 	{ "locks_remove_posix", 0x10 },
6515 	{ "flock_lock_inode", 0x10 },
6516 	/* ... from filelock_lease event class */
6517 	{ "break_lease_noblock", 0x10 },
6518 	{ "break_lease_block", 0x10 },
6519 	{ "break_lease_unblock", 0x10 },
6520 	{ "generic_delete_lease", 0x10 },
6521 	{ "time_out_leases", 0x10 },
6522 	/* host1x */
6523 	{ "host1x_cdma_push_gather", 0x10000 },
6524 	/* huge_memory */
6525 	{ "mm_khugepaged_scan_pmd", 0x10 },
6526 	{ "mm_collapse_huge_page_isolate", 0x1 },
6527 	{ "mm_khugepaged_scan_file", 0x10 },
6528 	{ "mm_khugepaged_collapse_file", 0x10 },
6529 	/* kmem */
6530 	{ "mm_page_alloc", 0x1 },
6531 	{ "mm_page_pcpu_drain", 0x1 },
6532 	/* .. from mm_page event class */
6533 	{ "mm_page_alloc_zone_locked", 0x1 },
6534 	/* netfs */
6535 	{ "netfs_failure", 0x10 },
6536 	/* power */
6537 	{ "device_pm_callback_start", 0x10 },
6538 	/* qdisc */
6539 	{ "qdisc_dequeue", 0x1000 },
6540 	/* rxrpc */
6541 	{ "rxrpc_recvdata", 0x1 },
6542 	{ "rxrpc_resend", 0x10 },
6543 	{ "rxrpc_tq", 0x10 },
6544 	{ "rxrpc_client", 0x1 },
6545 	/* skb */
6546 	{"kfree_skb", 0x1000},
6547 	/* sunrpc */
6548 	{ "xs_stream_read_data", 0x1 },
6549 	/* ... from xprt_cong_event event class */
6550 	{ "xprt_reserve_cong", 0x10 },
6551 	{ "xprt_release_cong", 0x10 },
6552 	{ "xprt_get_cong", 0x10 },
6553 	{ "xprt_put_cong", 0x10 },
6554 	/* tcp */
6555 	{ "tcp_send_reset", 0x11 },
6556 	{ "tcp_sendmsg_locked", 0x100 },
6557 	/* tegra_apb_dma */
6558 	{ "tegra_dma_tx_status", 0x100 },
6559 	/* timer_migration */
6560 	{ "tmigr_update_events", 0x1 },
6561 	/* writeback, from writeback_folio_template event class */
6562 	{ "writeback_dirty_folio", 0x10 },
6563 	{ "folio_wait_writeback", 0x10 },
6564 	/* rdma */
6565 	{ "mr_integ_alloc", 0x2000 },
6566 	/* bpf_testmod */
6567 	{ "bpf_testmod_test_read", 0x0 },
6568 	/* amdgpu */
6569 	{ "amdgpu_vm_bo_map", 0x1 },
6570 	{ "amdgpu_vm_bo_unmap", 0x1 },
6571 	/* netfs */
6572 	{ "netfs_folioq", 0x1 },
6573 	/* xfs from xfs_defer_pending_class */
6574 	{ "xfs_defer_create_intent", 0x1 },
6575 	{ "xfs_defer_cancel_list", 0x1 },
6576 	{ "xfs_defer_pending_finish", 0x1 },
6577 	{ "xfs_defer_pending_abort", 0x1 },
6578 	{ "xfs_defer_relog_intent", 0x1 },
6579 	{ "xfs_defer_isolate_paused", 0x1 },
6580 	{ "xfs_defer_item_pause", 0x1 },
6581 	{ "xfs_defer_item_unpause", 0x1 },
6582 	/* xfs from xfs_defer_pending_item_class */
6583 	{ "xfs_defer_add_item", 0x1 },
6584 	{ "xfs_defer_cancel_item", 0x1 },
6585 	{ "xfs_defer_finish_item", 0x1 },
6586 	/* xfs from xfs_icwalk_class */
6587 	{ "xfs_ioc_free_eofblocks", 0x10 },
6588 	{ "xfs_blockgc_free_space", 0x10 },
6589 	/* xfs from xfs_btree_cur_class */
6590 	{ "xfs_btree_updkeys", 0x100 },
6591 	{ "xfs_btree_overlapped_query_range", 0x100 },
6592 	/* xfs from xfs_imap_class*/
6593 	{ "xfs_map_blocks_found", 0x10000 },
6594 	{ "xfs_map_blocks_alloc", 0x10000 },
6595 	{ "xfs_iomap_alloc", 0x1000 },
6596 	{ "xfs_iomap_found", 0x1000 },
6597 	/* xfs from xfs_fs_class */
6598 	{ "xfs_inodegc_flush", 0x1 },
6599 	{ "xfs_inodegc_push", 0x1 },
6600 	{ "xfs_inodegc_start", 0x1 },
6601 	{ "xfs_inodegc_stop", 0x1 },
6602 	{ "xfs_inodegc_queue", 0x1 },
6603 	{ "xfs_inodegc_throttle", 0x1 },
6604 	{ "xfs_fs_sync_fs", 0x1 },
6605 	{ "xfs_blockgc_start", 0x1 },
6606 	{ "xfs_blockgc_stop", 0x1 },
6607 	{ "xfs_blockgc_worker", 0x1 },
6608 	{ "xfs_blockgc_flush_all", 0x1 },
6609 	/* xfs_scrub */
6610 	{ "xchk_nlinks_live_update", 0x10 },
6611 	/* xfs_scrub from xchk_metapath_class */
6612 	{ "xchk_metapath_lookup", 0x100 },
6613 	/* nfsd */
6614 	{ "nfsd_dirent", 0x1 },
6615 	{ "nfsd_file_acquire", 0x1001 },
6616 	{ "nfsd_file_insert_err", 0x1 },
6617 	{ "nfsd_file_cons_err", 0x1 },
6618 	/* nfs4 */
6619 	{ "nfs4_setup_sequence", 0x1 },
6620 	{ "pnfs_update_layout", 0x10000 },
6621 	{ "nfs4_inode_callback_event", 0x200 },
6622 	{ "nfs4_inode_stateid_callback_event", 0x200 },
6623 	/* nfs from pnfs_layout_event */
6624 	{ "pnfs_mds_fallback_pg_init_read", 0x10000 },
6625 	{ "pnfs_mds_fallback_pg_init_write", 0x10000 },
6626 	{ "pnfs_mds_fallback_pg_get_mirror_count", 0x10000 },
6627 	{ "pnfs_mds_fallback_read_done", 0x10000 },
6628 	{ "pnfs_mds_fallback_write_done", 0x10000 },
6629 	{ "pnfs_mds_fallback_read_pagelist", 0x10000 },
6630 	{ "pnfs_mds_fallback_write_pagelist", 0x10000 },
6631 	/* coda */
6632 	{ "coda_dec_pic_run", 0x10 },
6633 	{ "coda_dec_pic_done", 0x10 },
6634 	/* cfg80211 */
6635 	{ "cfg80211_scan_done", 0x11 },
6636 	{ "rdev_set_coalesce", 0x10 },
6637 	{ "cfg80211_report_wowlan_wakeup", 0x100 },
6638 	{ "cfg80211_inform_bss_frame", 0x100 },
6639 	{ "cfg80211_michael_mic_failure", 0x10000 },
6640 	/* cfg80211 from wiphy_work_event */
6641 	{ "wiphy_work_queue", 0x10 },
6642 	{ "wiphy_work_run", 0x10 },
6643 	{ "wiphy_work_cancel", 0x10 },
6644 	{ "wiphy_work_flush", 0x10 },
6645 	/* hugetlbfs */
6646 	{ "hugetlbfs_alloc_inode", 0x10 },
6647 	/* spufs */
6648 	{ "spufs_context", 0x10 },
6649 	/* kvm_hv */
6650 	{ "kvm_page_fault_enter", 0x100 },
6651 	/* dpu */
6652 	{ "dpu_crtc_setup_mixer", 0x100 },
6653 	/* binder */
6654 	{ "binder_transaction", 0x100 },
6655 	/* bcachefs */
6656 	{ "btree_path_free", 0x100 },
6657 	/* hfi1_tx */
6658 	{ "hfi1_sdma_progress", 0x1000 },
6659 	/* iptfs */
6660 	{ "iptfs_ingress_postq_event", 0x1000 },
6661 	/* neigh */
6662 	{ "neigh_update", 0x10 },
6663 	/* snd_firewire_lib */
6664 	{ "amdtp_packet", 0x100 },
6665 };
6666 
6667 bool btf_ctx_access(int off, int size, enum bpf_access_type type,
6668 		    const struct bpf_prog *prog,
6669 		    struct bpf_insn_access_aux *info)
6670 {
6671 	const struct btf_type *t = prog->aux->attach_func_proto;
6672 	struct bpf_prog *tgt_prog = prog->aux->dst_prog;
6673 	struct btf *btf = bpf_prog_get_target_btf(prog);
6674 	const char *tname = prog->aux->attach_func_name;
6675 	struct bpf_verifier_log *log = info->log;
6676 	const struct btf_param *args;
6677 	bool ptr_err_raw_tp = false;
6678 	const char *tag_value;
6679 	u32 nr_args, arg;
6680 	int i, ret;
6681 
6682 	if (off % 8) {
6683 		bpf_log(log, "func '%s' offset %d is not multiple of 8\n",
6684 			tname, off);
6685 		return false;
6686 	}
6687 	arg = btf_ctx_arg_idx(btf, t, off);
6688 	args = (const struct btf_param *)(t + 1);
6689 	/* if (t == NULL) Fall back to default BPF prog with
6690 	 * MAX_BPF_FUNC_REG_ARGS u64 arguments.
6691 	 */
6692 	nr_args = t ? btf_type_vlen(t) : MAX_BPF_FUNC_REG_ARGS;
6693 	if (prog->aux->attach_btf_trace) {
6694 		/* skip first 'void *__data' argument in btf_trace_##name typedef */
6695 		args++;
6696 		nr_args--;
6697 	}
6698 
6699 	if (arg > nr_args) {
6700 		bpf_log(log, "func '%s' doesn't have %d-th argument\n",
6701 			tname, arg + 1);
6702 		return false;
6703 	}
6704 
6705 	if (arg == nr_args) {
6706 		switch (prog->expected_attach_type) {
6707 		case BPF_LSM_MAC:
6708 			/* mark we are accessing the return value */
6709 			info->is_retval = true;
6710 			fallthrough;
6711 		case BPF_LSM_CGROUP:
6712 		case BPF_TRACE_FEXIT:
6713 			/* When LSM programs are attached to void LSM hooks
6714 			 * they use FEXIT trampolines and when attached to
6715 			 * int LSM hooks, they use MODIFY_RETURN trampolines.
6716 			 *
6717 			 * While the LSM programs are BPF_MODIFY_RETURN-like
6718 			 * the check:
6719 			 *
6720 			 *	if (ret_type != 'int')
6721 			 *		return -EINVAL;
6722 			 *
6723 			 * is _not_ done here. This is still safe as LSM hooks
6724 			 * have only void and int return types.
6725 			 */
6726 			if (!t)
6727 				return true;
6728 			t = btf_type_by_id(btf, t->type);
6729 			break;
6730 		case BPF_MODIFY_RETURN:
6731 			/* For now the BPF_MODIFY_RETURN can only be attached to
6732 			 * functions that return an int.
6733 			 */
6734 			if (!t)
6735 				return false;
6736 
6737 			t = btf_type_skip_modifiers(btf, t->type, NULL);
6738 			if (!btf_type_is_small_int(t)) {
6739 				bpf_log(log,
6740 					"ret type %s not allowed for fmod_ret\n",
6741 					btf_type_str(t));
6742 				return false;
6743 			}
6744 			break;
6745 		default:
6746 			bpf_log(log, "func '%s' doesn't have %d-th argument\n",
6747 				tname, arg + 1);
6748 			return false;
6749 		}
6750 	} else {
6751 		if (!t)
6752 			/* Default prog with MAX_BPF_FUNC_REG_ARGS args */
6753 			return true;
6754 		t = btf_type_by_id(btf, args[arg].type);
6755 	}
6756 
6757 	/* skip modifiers */
6758 	while (btf_type_is_modifier(t))
6759 		t = btf_type_by_id(btf, t->type);
6760 	if (btf_type_is_small_int(t) || btf_is_any_enum(t) || __btf_type_is_struct(t))
6761 		/* accessing a scalar */
6762 		return true;
6763 	if (!btf_type_is_ptr(t)) {
6764 		bpf_log(log,
6765 			"func '%s' arg%d '%s' has type %s. Only pointer access is allowed\n",
6766 			tname, arg,
6767 			__btf_name_by_offset(btf, t->name_off),
6768 			btf_type_str(t));
6769 		return false;
6770 	}
6771 
6772 	if (size != sizeof(u64)) {
6773 		bpf_log(log, "func '%s' size %d must be 8\n",
6774 			tname, size);
6775 		return false;
6776 	}
6777 
6778 	/* check for PTR_TO_RDONLY_BUF_OR_NULL or PTR_TO_RDWR_BUF_OR_NULL */
6779 	for (i = 0; i < prog->aux->ctx_arg_info_size; i++) {
6780 		const struct bpf_ctx_arg_aux *ctx_arg_info = &prog->aux->ctx_arg_info[i];
6781 		u32 type, flag;
6782 
6783 		type = base_type(ctx_arg_info->reg_type);
6784 		flag = type_flag(ctx_arg_info->reg_type);
6785 		if (ctx_arg_info->offset == off && type == PTR_TO_BUF &&
6786 		    (flag & PTR_MAYBE_NULL)) {
6787 			info->reg_type = ctx_arg_info->reg_type;
6788 			return true;
6789 		}
6790 	}
6791 
6792 	/*
6793 	 * If it's a pointer to void, it's the same as scalar from the verifier
6794 	 * safety POV. Either way, no futher pointer walking is allowed.
6795 	 */
6796 	if (is_void_or_int_ptr(btf, t))
6797 		return true;
6798 
6799 	/* this is a pointer to another type */
6800 	for (i = 0; i < prog->aux->ctx_arg_info_size; i++) {
6801 		const struct bpf_ctx_arg_aux *ctx_arg_info = &prog->aux->ctx_arg_info[i];
6802 
6803 		if (ctx_arg_info->offset == off) {
6804 			if (!ctx_arg_info->btf_id) {
6805 				bpf_log(log,"invalid btf_id for context argument offset %u\n", off);
6806 				return false;
6807 			}
6808 
6809 			info->reg_type = ctx_arg_info->reg_type;
6810 			info->btf = ctx_arg_info->btf ? : btf_vmlinux;
6811 			info->btf_id = ctx_arg_info->btf_id;
6812 			info->ref_obj_id = ctx_arg_info->ref_obj_id;
6813 			return true;
6814 		}
6815 	}
6816 
6817 	info->reg_type = PTR_TO_BTF_ID;
6818 	if (prog_args_trusted(prog))
6819 		info->reg_type |= PTR_TRUSTED;
6820 
6821 	if (btf_param_match_suffix(btf, &args[arg], "__nullable"))
6822 		info->reg_type |= PTR_MAYBE_NULL;
6823 
6824 	if (prog->expected_attach_type == BPF_TRACE_RAW_TP) {
6825 		struct btf *btf = prog->aux->attach_btf;
6826 		const struct btf_type *t;
6827 		const char *tname;
6828 
6829 		/* BTF lookups cannot fail, return false on error */
6830 		t = btf_type_by_id(btf, prog->aux->attach_btf_id);
6831 		if (!t)
6832 			return false;
6833 		tname = btf_name_by_offset(btf, t->name_off);
6834 		if (!tname)
6835 			return false;
6836 		/* Checked by bpf_check_attach_target */
6837 		tname += sizeof("btf_trace_") - 1;
6838 		for (i = 0; i < ARRAY_SIZE(raw_tp_null_args); i++) {
6839 			/* Is this a func with potential NULL args? */
6840 			if (strcmp(tname, raw_tp_null_args[i].func))
6841 				continue;
6842 			if (raw_tp_null_args[i].mask & (0x1ULL << (arg * 4)))
6843 				info->reg_type |= PTR_MAYBE_NULL;
6844 			/* Is the current arg IS_ERR? */
6845 			if (raw_tp_null_args[i].mask & (0x2ULL << (arg * 4)))
6846 				ptr_err_raw_tp = true;
6847 			break;
6848 		}
6849 		/* If we don't know NULL-ness specification and the tracepoint
6850 		 * is coming from a loadable module, be conservative and mark
6851 		 * argument as PTR_MAYBE_NULL.
6852 		 */
6853 		if (i == ARRAY_SIZE(raw_tp_null_args) && btf_is_module(btf))
6854 			info->reg_type |= PTR_MAYBE_NULL;
6855 	}
6856 
6857 	if (tgt_prog) {
6858 		enum bpf_prog_type tgt_type;
6859 
6860 		if (tgt_prog->type == BPF_PROG_TYPE_EXT)
6861 			tgt_type = tgt_prog->aux->saved_dst_prog_type;
6862 		else
6863 			tgt_type = tgt_prog->type;
6864 
6865 		ret = btf_translate_to_vmlinux(log, btf, t, tgt_type, arg);
6866 		if (ret > 0) {
6867 			info->btf = btf_vmlinux;
6868 			info->btf_id = ret;
6869 			return true;
6870 		} else {
6871 			return false;
6872 		}
6873 	}
6874 
6875 	info->btf = btf;
6876 	info->btf_id = t->type;
6877 	t = btf_type_by_id(btf, t->type);
6878 
6879 	if (btf_type_is_type_tag(t) && !btf_type_kflag(t)) {
6880 		tag_value = __btf_name_by_offset(btf, t->name_off);
6881 		if (strcmp(tag_value, "user") == 0)
6882 			info->reg_type |= MEM_USER;
6883 		if (strcmp(tag_value, "percpu") == 0)
6884 			info->reg_type |= MEM_PERCPU;
6885 	}
6886 
6887 	/* skip modifiers */
6888 	while (btf_type_is_modifier(t)) {
6889 		info->btf_id = t->type;
6890 		t = btf_type_by_id(btf, t->type);
6891 	}
6892 	if (!btf_type_is_struct(t)) {
6893 		bpf_log(log,
6894 			"func '%s' arg%d type %s is not a struct\n",
6895 			tname, arg, btf_type_str(t));
6896 		return false;
6897 	}
6898 	bpf_log(log, "func '%s' arg%d has btf_id %d type %s '%s'\n",
6899 		tname, arg, info->btf_id, btf_type_str(t),
6900 		__btf_name_by_offset(btf, t->name_off));
6901 
6902 	/* Perform all checks on the validity of type for this argument, but if
6903 	 * we know it can be IS_ERR at runtime, scrub pointer type and mark as
6904 	 * scalar.
6905 	 */
6906 	if (ptr_err_raw_tp) {
6907 		bpf_log(log, "marking pointer arg%d as scalar as it may encode error", arg);
6908 		info->reg_type = SCALAR_VALUE;
6909 	}
6910 	return true;
6911 }
6912 EXPORT_SYMBOL_GPL(btf_ctx_access);
6913 
6914 enum bpf_struct_walk_result {
6915 	/* < 0 error */
6916 	WALK_SCALAR = 0,
6917 	WALK_PTR,
6918 	WALK_STRUCT,
6919 };
6920 
6921 static int btf_struct_walk(struct bpf_verifier_log *log, const struct btf *btf,
6922 			   const struct btf_type *t, int off, int size,
6923 			   u32 *next_btf_id, enum bpf_type_flag *flag,
6924 			   const char **field_name)
6925 {
6926 	u32 i, moff, mtrue_end, msize = 0, total_nelems = 0;
6927 	const struct btf_type *mtype, *elem_type = NULL;
6928 	const struct btf_member *member;
6929 	const char *tname, *mname, *tag_value;
6930 	u32 vlen, elem_id, mid;
6931 
6932 again:
6933 	if (btf_type_is_modifier(t))
6934 		t = btf_type_skip_modifiers(btf, t->type, NULL);
6935 	tname = __btf_name_by_offset(btf, t->name_off);
6936 	if (!btf_type_is_struct(t)) {
6937 		bpf_log(log, "Type '%s' is not a struct\n", tname);
6938 		return -EINVAL;
6939 	}
6940 
6941 	vlen = btf_type_vlen(t);
6942 	if (BTF_INFO_KIND(t->info) == BTF_KIND_UNION && vlen != 1 && !(*flag & PTR_UNTRUSTED))
6943 		/*
6944 		 * walking unions yields untrusted pointers
6945 		 * with exception of __bpf_md_ptr and other
6946 		 * unions with a single member
6947 		 */
6948 		*flag |= PTR_UNTRUSTED;
6949 
6950 	if (off + size > t->size) {
6951 		/* If the last element is a variable size array, we may
6952 		 * need to relax the rule.
6953 		 */
6954 		struct btf_array *array_elem;
6955 
6956 		if (vlen == 0)
6957 			goto error;
6958 
6959 		member = btf_type_member(t) + vlen - 1;
6960 		mtype = btf_type_skip_modifiers(btf, member->type,
6961 						NULL);
6962 		if (!btf_type_is_array(mtype))
6963 			goto error;
6964 
6965 		array_elem = (struct btf_array *)(mtype + 1);
6966 		if (array_elem->nelems != 0)
6967 			goto error;
6968 
6969 		moff = __btf_member_bit_offset(t, member) / 8;
6970 		if (off < moff)
6971 			goto error;
6972 
6973 		/* allow structure and integer */
6974 		t = btf_type_skip_modifiers(btf, array_elem->type,
6975 					    NULL);
6976 
6977 		if (btf_type_is_int(t))
6978 			return WALK_SCALAR;
6979 
6980 		if (!btf_type_is_struct(t))
6981 			goto error;
6982 
6983 		off = (off - moff) % t->size;
6984 		goto again;
6985 
6986 error:
6987 		bpf_log(log, "access beyond struct %s at off %u size %u\n",
6988 			tname, off, size);
6989 		return -EACCES;
6990 	}
6991 
6992 	for_each_member(i, t, member) {
6993 		/* offset of the field in bytes */
6994 		moff = __btf_member_bit_offset(t, member) / 8;
6995 		if (off + size <= moff)
6996 			/* won't find anything, field is already too far */
6997 			break;
6998 
6999 		if (__btf_member_bitfield_size(t, member)) {
7000 			u32 end_bit = __btf_member_bit_offset(t, member) +
7001 				__btf_member_bitfield_size(t, member);
7002 
7003 			/* off <= moff instead of off == moff because clang
7004 			 * does not generate a BTF member for anonymous
7005 			 * bitfield like the ":16" here:
7006 			 * struct {
7007 			 *	int :16;
7008 			 *	int x:8;
7009 			 * };
7010 			 */
7011 			if (off <= moff &&
7012 			    BITS_ROUNDUP_BYTES(end_bit) <= off + size)
7013 				return WALK_SCALAR;
7014 
7015 			/* off may be accessing a following member
7016 			 *
7017 			 * or
7018 			 *
7019 			 * Doing partial access at either end of this
7020 			 * bitfield.  Continue on this case also to
7021 			 * treat it as not accessing this bitfield
7022 			 * and eventually error out as field not
7023 			 * found to keep it simple.
7024 			 * It could be relaxed if there was a legit
7025 			 * partial access case later.
7026 			 */
7027 			continue;
7028 		}
7029 
7030 		/* In case of "off" is pointing to holes of a struct */
7031 		if (off < moff)
7032 			break;
7033 
7034 		/* type of the field */
7035 		mid = member->type;
7036 		mtype = btf_type_by_id(btf, member->type);
7037 		mname = __btf_name_by_offset(btf, member->name_off);
7038 
7039 		mtype = __btf_resolve_size(btf, mtype, &msize,
7040 					   &elem_type, &elem_id, &total_nelems,
7041 					   &mid);
7042 		if (IS_ERR(mtype)) {
7043 			bpf_log(log, "field %s doesn't have size\n", mname);
7044 			return -EFAULT;
7045 		}
7046 
7047 		mtrue_end = moff + msize;
7048 		if (off >= mtrue_end)
7049 			/* no overlap with member, keep iterating */
7050 			continue;
7051 
7052 		if (btf_type_is_array(mtype)) {
7053 			u32 elem_idx;
7054 
7055 			/* __btf_resolve_size() above helps to
7056 			 * linearize a multi-dimensional array.
7057 			 *
7058 			 * The logic here is treating an array
7059 			 * in a struct as the following way:
7060 			 *
7061 			 * struct outer {
7062 			 *	struct inner array[2][2];
7063 			 * };
7064 			 *
7065 			 * looks like:
7066 			 *
7067 			 * struct outer {
7068 			 *	struct inner array_elem0;
7069 			 *	struct inner array_elem1;
7070 			 *	struct inner array_elem2;
7071 			 *	struct inner array_elem3;
7072 			 * };
7073 			 *
7074 			 * When accessing outer->array[1][0], it moves
7075 			 * moff to "array_elem2", set mtype to
7076 			 * "struct inner", and msize also becomes
7077 			 * sizeof(struct inner).  Then most of the
7078 			 * remaining logic will fall through without
7079 			 * caring the current member is an array or
7080 			 * not.
7081 			 *
7082 			 * Unlike mtype/msize/moff, mtrue_end does not
7083 			 * change.  The naming difference ("_true") tells
7084 			 * that it is not always corresponding to
7085 			 * the current mtype/msize/moff.
7086 			 * It is the true end of the current
7087 			 * member (i.e. array in this case).  That
7088 			 * will allow an int array to be accessed like
7089 			 * a scratch space,
7090 			 * i.e. allow access beyond the size of
7091 			 *      the array's element as long as it is
7092 			 *      within the mtrue_end boundary.
7093 			 */
7094 
7095 			/* skip empty array */
7096 			if (moff == mtrue_end)
7097 				continue;
7098 
7099 			msize /= total_nelems;
7100 			elem_idx = (off - moff) / msize;
7101 			moff += elem_idx * msize;
7102 			mtype = elem_type;
7103 			mid = elem_id;
7104 		}
7105 
7106 		/* the 'off' we're looking for is either equal to start
7107 		 * of this field or inside of this struct
7108 		 */
7109 		if (btf_type_is_struct(mtype)) {
7110 			/* our field must be inside that union or struct */
7111 			t = mtype;
7112 
7113 			/* return if the offset matches the member offset */
7114 			if (off == moff) {
7115 				*next_btf_id = mid;
7116 				return WALK_STRUCT;
7117 			}
7118 
7119 			/* adjust offset we're looking for */
7120 			off -= moff;
7121 			goto again;
7122 		}
7123 
7124 		if (btf_type_is_ptr(mtype)) {
7125 			const struct btf_type *stype, *t;
7126 			enum bpf_type_flag tmp_flag = 0;
7127 			u32 id;
7128 
7129 			if (msize != size || off != moff) {
7130 				bpf_log(log,
7131 					"cannot access ptr member %s with moff %u in struct %s with off %u size %u\n",
7132 					mname, moff, tname, off, size);
7133 				return -EACCES;
7134 			}
7135 
7136 			/* check type tag */
7137 			t = btf_type_by_id(btf, mtype->type);
7138 			if (btf_type_is_type_tag(t) && !btf_type_kflag(t)) {
7139 				tag_value = __btf_name_by_offset(btf, t->name_off);
7140 				/* check __user tag */
7141 				if (strcmp(tag_value, "user") == 0)
7142 					tmp_flag = MEM_USER;
7143 				/* check __percpu tag */
7144 				if (strcmp(tag_value, "percpu") == 0)
7145 					tmp_flag = MEM_PERCPU;
7146 				/* check __rcu tag */
7147 				if (strcmp(tag_value, "rcu") == 0)
7148 					tmp_flag = MEM_RCU;
7149 			}
7150 
7151 			stype = btf_type_skip_modifiers(btf, mtype->type, &id);
7152 			if (btf_type_is_struct(stype)) {
7153 				*next_btf_id = id;
7154 				*flag |= tmp_flag;
7155 				if (field_name)
7156 					*field_name = mname;
7157 				return WALK_PTR;
7158 			}
7159 		}
7160 
7161 		/* Allow more flexible access within an int as long as
7162 		 * it is within mtrue_end.
7163 		 * Since mtrue_end could be the end of an array,
7164 		 * that also allows using an array of int as a scratch
7165 		 * space. e.g. skb->cb[].
7166 		 */
7167 		if (off + size > mtrue_end && !(*flag & PTR_UNTRUSTED)) {
7168 			bpf_log(log,
7169 				"access beyond the end of member %s (mend:%u) in struct %s with off %u size %u\n",
7170 				mname, mtrue_end, tname, off, size);
7171 			return -EACCES;
7172 		}
7173 
7174 		return WALK_SCALAR;
7175 	}
7176 	bpf_log(log, "struct %s doesn't have field at offset %d\n", tname, off);
7177 	return -EINVAL;
7178 }
7179 
7180 int btf_struct_access(struct bpf_verifier_log *log,
7181 		      const struct bpf_reg_state *reg,
7182 		      int off, int size, enum bpf_access_type atype __maybe_unused,
7183 		      u32 *next_btf_id, enum bpf_type_flag *flag,
7184 		      const char **field_name)
7185 {
7186 	const struct btf *btf = reg->btf;
7187 	enum bpf_type_flag tmp_flag = 0;
7188 	const struct btf_type *t;
7189 	u32 id = reg->btf_id;
7190 	int err;
7191 
7192 	while (type_is_alloc(reg->type)) {
7193 		struct btf_struct_meta *meta;
7194 		struct btf_record *rec;
7195 		int i;
7196 
7197 		meta = btf_find_struct_meta(btf, id);
7198 		if (!meta)
7199 			break;
7200 		rec = meta->record;
7201 		for (i = 0; i < rec->cnt; i++) {
7202 			struct btf_field *field = &rec->fields[i];
7203 			u32 offset = field->offset;
7204 			if (off < offset + field->size && offset < off + size) {
7205 				bpf_log(log,
7206 					"direct access to %s is disallowed\n",
7207 					btf_field_type_name(field->type));
7208 				return -EACCES;
7209 			}
7210 		}
7211 		break;
7212 	}
7213 
7214 	t = btf_type_by_id(btf, id);
7215 	do {
7216 		err = btf_struct_walk(log, btf, t, off, size, &id, &tmp_flag, field_name);
7217 
7218 		switch (err) {
7219 		case WALK_PTR:
7220 			/* For local types, the destination register cannot
7221 			 * become a pointer again.
7222 			 */
7223 			if (type_is_alloc(reg->type))
7224 				return SCALAR_VALUE;
7225 			/* If we found the pointer or scalar on t+off,
7226 			 * we're done.
7227 			 */
7228 			*next_btf_id = id;
7229 			*flag = tmp_flag;
7230 			return PTR_TO_BTF_ID;
7231 		case WALK_SCALAR:
7232 			return SCALAR_VALUE;
7233 		case WALK_STRUCT:
7234 			/* We found nested struct, so continue the search
7235 			 * by diving in it. At this point the offset is
7236 			 * aligned with the new type, so set it to 0.
7237 			 */
7238 			t = btf_type_by_id(btf, id);
7239 			off = 0;
7240 			break;
7241 		default:
7242 			/* It's either error or unknown return value..
7243 			 * scream and leave.
7244 			 */
7245 			if (WARN_ONCE(err > 0, "unknown btf_struct_walk return value"))
7246 				return -EINVAL;
7247 			return err;
7248 		}
7249 	} while (t);
7250 
7251 	return -EINVAL;
7252 }
7253 
7254 /* Check that two BTF types, each specified as an BTF object + id, are exactly
7255  * the same. Trivial ID check is not enough due to module BTFs, because we can
7256  * end up with two different module BTFs, but IDs point to the common type in
7257  * vmlinux BTF.
7258  */
7259 bool btf_types_are_same(const struct btf *btf1, u32 id1,
7260 			const struct btf *btf2, u32 id2)
7261 {
7262 	if (id1 != id2)
7263 		return false;
7264 	if (btf1 == btf2)
7265 		return true;
7266 	return btf_type_by_id(btf1, id1) == btf_type_by_id(btf2, id2);
7267 }
7268 
7269 bool btf_struct_ids_match(struct bpf_verifier_log *log,
7270 			  const struct btf *btf, u32 id, int off,
7271 			  const struct btf *need_btf, u32 need_type_id,
7272 			  bool strict)
7273 {
7274 	const struct btf_type *type;
7275 	enum bpf_type_flag flag = 0;
7276 	int err;
7277 
7278 	/* Are we already done? */
7279 	if (off == 0 && btf_types_are_same(btf, id, need_btf, need_type_id))
7280 		return true;
7281 	/* In case of strict type match, we do not walk struct, the top level
7282 	 * type match must succeed. When strict is true, off should have already
7283 	 * been 0.
7284 	 */
7285 	if (strict)
7286 		return false;
7287 again:
7288 	type = btf_type_by_id(btf, id);
7289 	if (!type)
7290 		return false;
7291 	err = btf_struct_walk(log, btf, type, off, 1, &id, &flag, NULL);
7292 	if (err != WALK_STRUCT)
7293 		return false;
7294 
7295 	/* We found nested struct object. If it matches
7296 	 * the requested ID, we're done. Otherwise let's
7297 	 * continue the search with offset 0 in the new
7298 	 * type.
7299 	 */
7300 	if (!btf_types_are_same(btf, id, need_btf, need_type_id)) {
7301 		off = 0;
7302 		goto again;
7303 	}
7304 
7305 	return true;
7306 }
7307 
7308 static int __get_type_size(struct btf *btf, u32 btf_id,
7309 			   const struct btf_type **ret_type)
7310 {
7311 	const struct btf_type *t;
7312 
7313 	*ret_type = btf_type_by_id(btf, 0);
7314 	if (!btf_id)
7315 		/* void */
7316 		return 0;
7317 	t = btf_type_by_id(btf, btf_id);
7318 	while (t && btf_type_is_modifier(t))
7319 		t = btf_type_by_id(btf, t->type);
7320 	if (!t)
7321 		return -EINVAL;
7322 	*ret_type = t;
7323 	if (btf_type_is_ptr(t))
7324 		/* kernel size of pointer. Not BPF's size of pointer*/
7325 		return sizeof(void *);
7326 	if (btf_type_is_int(t) || btf_is_any_enum(t) || __btf_type_is_struct(t))
7327 		return t->size;
7328 	return -EINVAL;
7329 }
7330 
7331 static u8 __get_type_fmodel_flags(const struct btf_type *t)
7332 {
7333 	u8 flags = 0;
7334 
7335 	if (__btf_type_is_struct(t))
7336 		flags |= BTF_FMODEL_STRUCT_ARG;
7337 	if (btf_type_is_signed_int(t))
7338 		flags |= BTF_FMODEL_SIGNED_ARG;
7339 
7340 	return flags;
7341 }
7342 
7343 int btf_distill_func_proto(struct bpf_verifier_log *log,
7344 			   struct btf *btf,
7345 			   const struct btf_type *func,
7346 			   const char *tname,
7347 			   struct btf_func_model *m)
7348 {
7349 	const struct btf_param *args;
7350 	const struct btf_type *t;
7351 	u32 i, nargs;
7352 	int ret;
7353 
7354 	if (!func) {
7355 		/* BTF function prototype doesn't match the verifier types.
7356 		 * Fall back to MAX_BPF_FUNC_REG_ARGS u64 args.
7357 		 */
7358 		for (i = 0; i < MAX_BPF_FUNC_REG_ARGS; i++) {
7359 			m->arg_size[i] = 8;
7360 			m->arg_flags[i] = 0;
7361 		}
7362 		m->ret_size = 8;
7363 		m->ret_flags = 0;
7364 		m->nr_args = MAX_BPF_FUNC_REG_ARGS;
7365 		return 0;
7366 	}
7367 	args = (const struct btf_param *)(func + 1);
7368 	nargs = btf_type_vlen(func);
7369 	if (nargs > MAX_BPF_FUNC_ARGS) {
7370 		bpf_log(log,
7371 			"The function %s has %d arguments. Too many.\n",
7372 			tname, nargs);
7373 		return -EINVAL;
7374 	}
7375 	ret = __get_type_size(btf, func->type, &t);
7376 	if (ret < 0 || __btf_type_is_struct(t)) {
7377 		bpf_log(log,
7378 			"The function %s return type %s is unsupported.\n",
7379 			tname, btf_type_str(t));
7380 		return -EINVAL;
7381 	}
7382 	m->ret_size = ret;
7383 	m->ret_flags = __get_type_fmodel_flags(t);
7384 
7385 	for (i = 0; i < nargs; i++) {
7386 		if (i == nargs - 1 && args[i].type == 0) {
7387 			bpf_log(log,
7388 				"The function %s with variable args is unsupported.\n",
7389 				tname);
7390 			return -EINVAL;
7391 		}
7392 		ret = __get_type_size(btf, args[i].type, &t);
7393 
7394 		/* No support of struct argument size greater than 16 bytes */
7395 		if (ret < 0 || ret > 16) {
7396 			bpf_log(log,
7397 				"The function %s arg%d type %s is unsupported.\n",
7398 				tname, i, btf_type_str(t));
7399 			return -EINVAL;
7400 		}
7401 		if (ret == 0) {
7402 			bpf_log(log,
7403 				"The function %s has malformed void argument.\n",
7404 				tname);
7405 			return -EINVAL;
7406 		}
7407 		m->arg_size[i] = ret;
7408 		m->arg_flags[i] = __get_type_fmodel_flags(t);
7409 	}
7410 	m->nr_args = nargs;
7411 	return 0;
7412 }
7413 
7414 /* Compare BTFs of two functions assuming only scalars and pointers to context.
7415  * t1 points to BTF_KIND_FUNC in btf1
7416  * t2 points to BTF_KIND_FUNC in btf2
7417  * Returns:
7418  * EINVAL - function prototype mismatch
7419  * EFAULT - verifier bug
7420  * 0 - 99% match. The last 1% is validated by the verifier.
7421  */
7422 static int btf_check_func_type_match(struct bpf_verifier_log *log,
7423 				     struct btf *btf1, const struct btf_type *t1,
7424 				     struct btf *btf2, const struct btf_type *t2)
7425 {
7426 	const struct btf_param *args1, *args2;
7427 	const char *fn1, *fn2, *s1, *s2;
7428 	u32 nargs1, nargs2, i;
7429 
7430 	fn1 = btf_name_by_offset(btf1, t1->name_off);
7431 	fn2 = btf_name_by_offset(btf2, t2->name_off);
7432 
7433 	if (btf_func_linkage(t1) != BTF_FUNC_GLOBAL) {
7434 		bpf_log(log, "%s() is not a global function\n", fn1);
7435 		return -EINVAL;
7436 	}
7437 	if (btf_func_linkage(t2) != BTF_FUNC_GLOBAL) {
7438 		bpf_log(log, "%s() is not a global function\n", fn2);
7439 		return -EINVAL;
7440 	}
7441 
7442 	t1 = btf_type_by_id(btf1, t1->type);
7443 	if (!t1 || !btf_type_is_func_proto(t1))
7444 		return -EFAULT;
7445 	t2 = btf_type_by_id(btf2, t2->type);
7446 	if (!t2 || !btf_type_is_func_proto(t2))
7447 		return -EFAULT;
7448 
7449 	args1 = (const struct btf_param *)(t1 + 1);
7450 	nargs1 = btf_type_vlen(t1);
7451 	args2 = (const struct btf_param *)(t2 + 1);
7452 	nargs2 = btf_type_vlen(t2);
7453 
7454 	if (nargs1 != nargs2) {
7455 		bpf_log(log, "%s() has %d args while %s() has %d args\n",
7456 			fn1, nargs1, fn2, nargs2);
7457 		return -EINVAL;
7458 	}
7459 
7460 	t1 = btf_type_skip_modifiers(btf1, t1->type, NULL);
7461 	t2 = btf_type_skip_modifiers(btf2, t2->type, NULL);
7462 	if (t1->info != t2->info) {
7463 		bpf_log(log,
7464 			"Return type %s of %s() doesn't match type %s of %s()\n",
7465 			btf_type_str(t1), fn1,
7466 			btf_type_str(t2), fn2);
7467 		return -EINVAL;
7468 	}
7469 
7470 	for (i = 0; i < nargs1; i++) {
7471 		t1 = btf_type_skip_modifiers(btf1, args1[i].type, NULL);
7472 		t2 = btf_type_skip_modifiers(btf2, args2[i].type, NULL);
7473 
7474 		if (t1->info != t2->info) {
7475 			bpf_log(log, "arg%d in %s() is %s while %s() has %s\n",
7476 				i, fn1, btf_type_str(t1),
7477 				fn2, btf_type_str(t2));
7478 			return -EINVAL;
7479 		}
7480 		if (btf_type_has_size(t1) && t1->size != t2->size) {
7481 			bpf_log(log,
7482 				"arg%d in %s() has size %d while %s() has %d\n",
7483 				i, fn1, t1->size,
7484 				fn2, t2->size);
7485 			return -EINVAL;
7486 		}
7487 
7488 		/* global functions are validated with scalars and pointers
7489 		 * to context only. And only global functions can be replaced.
7490 		 * Hence type check only those types.
7491 		 */
7492 		if (btf_type_is_int(t1) || btf_is_any_enum(t1))
7493 			continue;
7494 		if (!btf_type_is_ptr(t1)) {
7495 			bpf_log(log,
7496 				"arg%d in %s() has unrecognized type\n",
7497 				i, fn1);
7498 			return -EINVAL;
7499 		}
7500 		t1 = btf_type_skip_modifiers(btf1, t1->type, NULL);
7501 		t2 = btf_type_skip_modifiers(btf2, t2->type, NULL);
7502 		if (!btf_type_is_struct(t1)) {
7503 			bpf_log(log,
7504 				"arg%d in %s() is not a pointer to context\n",
7505 				i, fn1);
7506 			return -EINVAL;
7507 		}
7508 		if (!btf_type_is_struct(t2)) {
7509 			bpf_log(log,
7510 				"arg%d in %s() is not a pointer to context\n",
7511 				i, fn2);
7512 			return -EINVAL;
7513 		}
7514 		/* This is an optional check to make program writing easier.
7515 		 * Compare names of structs and report an error to the user.
7516 		 * btf_prepare_func_args() already checked that t2 struct
7517 		 * is a context type. btf_prepare_func_args() will check
7518 		 * later that t1 struct is a context type as well.
7519 		 */
7520 		s1 = btf_name_by_offset(btf1, t1->name_off);
7521 		s2 = btf_name_by_offset(btf2, t2->name_off);
7522 		if (strcmp(s1, s2)) {
7523 			bpf_log(log,
7524 				"arg%d %s(struct %s *) doesn't match %s(struct %s *)\n",
7525 				i, fn1, s1, fn2, s2);
7526 			return -EINVAL;
7527 		}
7528 	}
7529 	return 0;
7530 }
7531 
7532 /* Compare BTFs of given program with BTF of target program */
7533 int btf_check_type_match(struct bpf_verifier_log *log, const struct bpf_prog *prog,
7534 			 struct btf *btf2, const struct btf_type *t2)
7535 {
7536 	struct btf *btf1 = prog->aux->btf;
7537 	const struct btf_type *t1;
7538 	u32 btf_id = 0;
7539 
7540 	if (!prog->aux->func_info) {
7541 		bpf_log(log, "Program extension requires BTF\n");
7542 		return -EINVAL;
7543 	}
7544 
7545 	btf_id = prog->aux->func_info[0].type_id;
7546 	if (!btf_id)
7547 		return -EFAULT;
7548 
7549 	t1 = btf_type_by_id(btf1, btf_id);
7550 	if (!t1 || !btf_type_is_func(t1))
7551 		return -EFAULT;
7552 
7553 	return btf_check_func_type_match(log, btf1, t1, btf2, t2);
7554 }
7555 
7556 static bool btf_is_dynptr_ptr(const struct btf *btf, const struct btf_type *t)
7557 {
7558 	const char *name;
7559 
7560 	t = btf_type_by_id(btf, t->type); /* skip PTR */
7561 
7562 	while (btf_type_is_modifier(t))
7563 		t = btf_type_by_id(btf, t->type);
7564 
7565 	/* allow either struct or struct forward declaration */
7566 	if (btf_type_is_struct(t) ||
7567 	    (btf_type_is_fwd(t) && btf_type_kflag(t) == 0)) {
7568 		name = btf_str_by_offset(btf, t->name_off);
7569 		return name && strcmp(name, "bpf_dynptr") == 0;
7570 	}
7571 
7572 	return false;
7573 }
7574 
7575 struct bpf_cand_cache {
7576 	const char *name;
7577 	u32 name_len;
7578 	u16 kind;
7579 	u16 cnt;
7580 	struct {
7581 		const struct btf *btf;
7582 		u32 id;
7583 	} cands[];
7584 };
7585 
7586 static DEFINE_MUTEX(cand_cache_mutex);
7587 
7588 static struct bpf_cand_cache *
7589 bpf_core_find_cands(struct bpf_core_ctx *ctx, u32 local_type_id);
7590 
7591 static int btf_get_ptr_to_btf_id(struct bpf_verifier_log *log, int arg_idx,
7592 				 const struct btf *btf, const struct btf_type *t)
7593 {
7594 	struct bpf_cand_cache *cc;
7595 	struct bpf_core_ctx ctx = {
7596 		.btf = btf,
7597 		.log = log,
7598 	};
7599 	u32 kern_type_id, type_id;
7600 	int err = 0;
7601 
7602 	/* skip PTR and modifiers */
7603 	type_id = t->type;
7604 	t = btf_type_by_id(btf, t->type);
7605 	while (btf_type_is_modifier(t)) {
7606 		type_id = t->type;
7607 		t = btf_type_by_id(btf, t->type);
7608 	}
7609 
7610 	mutex_lock(&cand_cache_mutex);
7611 	cc = bpf_core_find_cands(&ctx, type_id);
7612 	if (IS_ERR(cc)) {
7613 		err = PTR_ERR(cc);
7614 		bpf_log(log, "arg#%d reference type('%s %s') candidate matching error: %d\n",
7615 			arg_idx, btf_type_str(t), __btf_name_by_offset(btf, t->name_off),
7616 			err);
7617 		goto cand_cache_unlock;
7618 	}
7619 	if (cc->cnt != 1) {
7620 		bpf_log(log, "arg#%d reference type('%s %s') %s\n",
7621 			arg_idx, btf_type_str(t), __btf_name_by_offset(btf, t->name_off),
7622 			cc->cnt == 0 ? "has no matches" : "is ambiguous");
7623 		err = cc->cnt == 0 ? -ENOENT : -ESRCH;
7624 		goto cand_cache_unlock;
7625 	}
7626 	if (btf_is_module(cc->cands[0].btf)) {
7627 		bpf_log(log, "arg#%d reference type('%s %s') points to kernel module type (unsupported)\n",
7628 			arg_idx, btf_type_str(t), __btf_name_by_offset(btf, t->name_off));
7629 		err = -EOPNOTSUPP;
7630 		goto cand_cache_unlock;
7631 	}
7632 	kern_type_id = cc->cands[0].id;
7633 
7634 cand_cache_unlock:
7635 	mutex_unlock(&cand_cache_mutex);
7636 	if (err)
7637 		return err;
7638 
7639 	return kern_type_id;
7640 }
7641 
7642 enum btf_arg_tag {
7643 	ARG_TAG_CTX	 = BIT_ULL(0),
7644 	ARG_TAG_NONNULL  = BIT_ULL(1),
7645 	ARG_TAG_TRUSTED  = BIT_ULL(2),
7646 	ARG_TAG_NULLABLE = BIT_ULL(3),
7647 	ARG_TAG_ARENA	 = BIT_ULL(4),
7648 };
7649 
7650 /* Process BTF of a function to produce high-level expectation of function
7651  * arguments (like ARG_PTR_TO_CTX, or ARG_PTR_TO_MEM, etc). This information
7652  * is cached in subprog info for reuse.
7653  * Returns:
7654  * EFAULT - there is a verifier bug. Abort verification.
7655  * EINVAL - cannot convert BTF.
7656  * 0 - Successfully processed BTF and constructed argument expectations.
7657  */
7658 int btf_prepare_func_args(struct bpf_verifier_env *env, int subprog)
7659 {
7660 	bool is_global = subprog_aux(env, subprog)->linkage == BTF_FUNC_GLOBAL;
7661 	struct bpf_subprog_info *sub = subprog_info(env, subprog);
7662 	struct bpf_verifier_log *log = &env->log;
7663 	struct bpf_prog *prog = env->prog;
7664 	enum bpf_prog_type prog_type = prog->type;
7665 	struct btf *btf = prog->aux->btf;
7666 	const struct btf_param *args;
7667 	const struct btf_type *t, *ref_t, *fn_t;
7668 	u32 i, nargs, btf_id;
7669 	const char *tname;
7670 
7671 	if (sub->args_cached)
7672 		return 0;
7673 
7674 	if (!prog->aux->func_info) {
7675 		verifier_bug(env, "func_info undefined");
7676 		return -EFAULT;
7677 	}
7678 
7679 	btf_id = prog->aux->func_info[subprog].type_id;
7680 	if (!btf_id) {
7681 		if (!is_global) /* not fatal for static funcs */
7682 			return -EINVAL;
7683 		bpf_log(log, "Global functions need valid BTF\n");
7684 		return -EFAULT;
7685 	}
7686 
7687 	fn_t = btf_type_by_id(btf, btf_id);
7688 	if (!fn_t || !btf_type_is_func(fn_t)) {
7689 		/* These checks were already done by the verifier while loading
7690 		 * struct bpf_func_info
7691 		 */
7692 		bpf_log(log, "BTF of func#%d doesn't point to KIND_FUNC\n",
7693 			subprog);
7694 		return -EFAULT;
7695 	}
7696 	tname = btf_name_by_offset(btf, fn_t->name_off);
7697 
7698 	if (prog->aux->func_info_aux[subprog].unreliable) {
7699 		verifier_bug(env, "unreliable BTF for function %s()", tname);
7700 		return -EFAULT;
7701 	}
7702 	if (prog_type == BPF_PROG_TYPE_EXT)
7703 		prog_type = prog->aux->dst_prog->type;
7704 
7705 	t = btf_type_by_id(btf, fn_t->type);
7706 	if (!t || !btf_type_is_func_proto(t)) {
7707 		bpf_log(log, "Invalid type of function %s()\n", tname);
7708 		return -EFAULT;
7709 	}
7710 	args = (const struct btf_param *)(t + 1);
7711 	nargs = btf_type_vlen(t);
7712 	if (nargs > MAX_BPF_FUNC_REG_ARGS) {
7713 		if (!is_global)
7714 			return -EINVAL;
7715 		bpf_log(log, "Global function %s() with %d > %d args. Buggy compiler.\n",
7716 			tname, nargs, MAX_BPF_FUNC_REG_ARGS);
7717 		return -EINVAL;
7718 	}
7719 	/* check that function returns int, exception cb also requires this */
7720 	t = btf_type_by_id(btf, t->type);
7721 	while (btf_type_is_modifier(t))
7722 		t = btf_type_by_id(btf, t->type);
7723 	if (!btf_type_is_int(t) && !btf_is_any_enum(t)) {
7724 		if (!is_global)
7725 			return -EINVAL;
7726 		bpf_log(log,
7727 			"Global function %s() doesn't return scalar. Only those are supported.\n",
7728 			tname);
7729 		return -EINVAL;
7730 	}
7731 	/* Convert BTF function arguments into verifier types.
7732 	 * Only PTR_TO_CTX and SCALAR are supported atm.
7733 	 */
7734 	for (i = 0; i < nargs; i++) {
7735 		u32 tags = 0;
7736 		int id = 0;
7737 
7738 		/* 'arg:<tag>' decl_tag takes precedence over derivation of
7739 		 * register type from BTF type itself
7740 		 */
7741 		while ((id = btf_find_next_decl_tag(btf, fn_t, i, "arg:", id)) > 0) {
7742 			const struct btf_type *tag_t = btf_type_by_id(btf, id);
7743 			const char *tag = __btf_name_by_offset(btf, tag_t->name_off) + 4;
7744 
7745 			/* disallow arg tags in static subprogs */
7746 			if (!is_global) {
7747 				bpf_log(log, "arg#%d type tag is not supported in static functions\n", i);
7748 				return -EOPNOTSUPP;
7749 			}
7750 
7751 			if (strcmp(tag, "ctx") == 0) {
7752 				tags |= ARG_TAG_CTX;
7753 			} else if (strcmp(tag, "trusted") == 0) {
7754 				tags |= ARG_TAG_TRUSTED;
7755 			} else if (strcmp(tag, "nonnull") == 0) {
7756 				tags |= ARG_TAG_NONNULL;
7757 			} else if (strcmp(tag, "nullable") == 0) {
7758 				tags |= ARG_TAG_NULLABLE;
7759 			} else if (strcmp(tag, "arena") == 0) {
7760 				tags |= ARG_TAG_ARENA;
7761 			} else {
7762 				bpf_log(log, "arg#%d has unsupported set of tags\n", i);
7763 				return -EOPNOTSUPP;
7764 			}
7765 		}
7766 		if (id != -ENOENT) {
7767 			bpf_log(log, "arg#%d type tag fetching failure: %d\n", i, id);
7768 			return id;
7769 		}
7770 
7771 		t = btf_type_by_id(btf, args[i].type);
7772 		while (btf_type_is_modifier(t))
7773 			t = btf_type_by_id(btf, t->type);
7774 		if (!btf_type_is_ptr(t))
7775 			goto skip_pointer;
7776 
7777 		if ((tags & ARG_TAG_CTX) || btf_is_prog_ctx_type(log, btf, t, prog_type, i)) {
7778 			if (tags & ~ARG_TAG_CTX) {
7779 				bpf_log(log, "arg#%d has invalid combination of tags\n", i);
7780 				return -EINVAL;
7781 			}
7782 			if ((tags & ARG_TAG_CTX) &&
7783 			    btf_validate_prog_ctx_type(log, btf, t, i, prog_type,
7784 						       prog->expected_attach_type))
7785 				return -EINVAL;
7786 			sub->args[i].arg_type = ARG_PTR_TO_CTX;
7787 			continue;
7788 		}
7789 		if (btf_is_dynptr_ptr(btf, t)) {
7790 			if (tags) {
7791 				bpf_log(log, "arg#%d has invalid combination of tags\n", i);
7792 				return -EINVAL;
7793 			}
7794 			sub->args[i].arg_type = ARG_PTR_TO_DYNPTR | MEM_RDONLY;
7795 			continue;
7796 		}
7797 		if (tags & ARG_TAG_TRUSTED) {
7798 			int kern_type_id;
7799 
7800 			if (tags & ARG_TAG_NONNULL) {
7801 				bpf_log(log, "arg#%d has invalid combination of tags\n", i);
7802 				return -EINVAL;
7803 			}
7804 
7805 			kern_type_id = btf_get_ptr_to_btf_id(log, i, btf, t);
7806 			if (kern_type_id < 0)
7807 				return kern_type_id;
7808 
7809 			sub->args[i].arg_type = ARG_PTR_TO_BTF_ID | PTR_TRUSTED;
7810 			if (tags & ARG_TAG_NULLABLE)
7811 				sub->args[i].arg_type |= PTR_MAYBE_NULL;
7812 			sub->args[i].btf_id = kern_type_id;
7813 			continue;
7814 		}
7815 		if (tags & ARG_TAG_ARENA) {
7816 			if (tags & ~ARG_TAG_ARENA) {
7817 				bpf_log(log, "arg#%d arena cannot be combined with any other tags\n", i);
7818 				return -EINVAL;
7819 			}
7820 			sub->args[i].arg_type = ARG_PTR_TO_ARENA;
7821 			continue;
7822 		}
7823 		if (is_global) { /* generic user data pointer */
7824 			u32 mem_size;
7825 
7826 			if (tags & ARG_TAG_NULLABLE) {
7827 				bpf_log(log, "arg#%d has invalid combination of tags\n", i);
7828 				return -EINVAL;
7829 			}
7830 
7831 			t = btf_type_skip_modifiers(btf, t->type, NULL);
7832 			ref_t = btf_resolve_size(btf, t, &mem_size);
7833 			if (IS_ERR(ref_t)) {
7834 				bpf_log(log, "arg#%d reference type('%s %s') size cannot be determined: %ld\n",
7835 					i, btf_type_str(t), btf_name_by_offset(btf, t->name_off),
7836 					PTR_ERR(ref_t));
7837 				return -EINVAL;
7838 			}
7839 
7840 			sub->args[i].arg_type = ARG_PTR_TO_MEM | PTR_MAYBE_NULL;
7841 			if (tags & ARG_TAG_NONNULL)
7842 				sub->args[i].arg_type &= ~PTR_MAYBE_NULL;
7843 			sub->args[i].mem_size = mem_size;
7844 			continue;
7845 		}
7846 
7847 skip_pointer:
7848 		if (tags) {
7849 			bpf_log(log, "arg#%d has pointer tag, but is not a pointer type\n", i);
7850 			return -EINVAL;
7851 		}
7852 		if (btf_type_is_int(t) || btf_is_any_enum(t)) {
7853 			sub->args[i].arg_type = ARG_ANYTHING;
7854 			continue;
7855 		}
7856 		if (!is_global)
7857 			return -EINVAL;
7858 		bpf_log(log, "Arg#%d type %s in %s() is not supported yet.\n",
7859 			i, btf_type_str(t), tname);
7860 		return -EINVAL;
7861 	}
7862 
7863 	sub->arg_cnt = nargs;
7864 	sub->args_cached = true;
7865 
7866 	return 0;
7867 }
7868 
7869 static void btf_type_show(const struct btf *btf, u32 type_id, void *obj,
7870 			  struct btf_show *show)
7871 {
7872 	const struct btf_type *t = btf_type_by_id(btf, type_id);
7873 
7874 	show->btf = btf;
7875 	memset(&show->state, 0, sizeof(show->state));
7876 	memset(&show->obj, 0, sizeof(show->obj));
7877 
7878 	btf_type_ops(t)->show(btf, t, type_id, obj, 0, show);
7879 }
7880 
7881 __printf(2, 0) static void btf_seq_show(struct btf_show *show, const char *fmt,
7882 					va_list args)
7883 {
7884 	seq_vprintf((struct seq_file *)show->target, fmt, args);
7885 }
7886 
7887 int btf_type_seq_show_flags(const struct btf *btf, u32 type_id,
7888 			    void *obj, struct seq_file *m, u64 flags)
7889 {
7890 	struct btf_show sseq;
7891 
7892 	sseq.target = m;
7893 	sseq.showfn = btf_seq_show;
7894 	sseq.flags = flags;
7895 
7896 	btf_type_show(btf, type_id, obj, &sseq);
7897 
7898 	return sseq.state.status;
7899 }
7900 
7901 void btf_type_seq_show(const struct btf *btf, u32 type_id, void *obj,
7902 		       struct seq_file *m)
7903 {
7904 	(void) btf_type_seq_show_flags(btf, type_id, obj, m,
7905 				       BTF_SHOW_NONAME | BTF_SHOW_COMPACT |
7906 				       BTF_SHOW_ZERO | BTF_SHOW_UNSAFE);
7907 }
7908 
7909 struct btf_show_snprintf {
7910 	struct btf_show show;
7911 	int len_left;		/* space left in string */
7912 	int len;		/* length we would have written */
7913 };
7914 
7915 __printf(2, 0) static void btf_snprintf_show(struct btf_show *show, const char *fmt,
7916 					     va_list args)
7917 {
7918 	struct btf_show_snprintf *ssnprintf = (struct btf_show_snprintf *)show;
7919 	int len;
7920 
7921 	len = vsnprintf(show->target, ssnprintf->len_left, fmt, args);
7922 
7923 	if (len < 0) {
7924 		ssnprintf->len_left = 0;
7925 		ssnprintf->len = len;
7926 	} else if (len >= ssnprintf->len_left) {
7927 		/* no space, drive on to get length we would have written */
7928 		ssnprintf->len_left = 0;
7929 		ssnprintf->len += len;
7930 	} else {
7931 		ssnprintf->len_left -= len;
7932 		ssnprintf->len += len;
7933 		show->target += len;
7934 	}
7935 }
7936 
7937 int btf_type_snprintf_show(const struct btf *btf, u32 type_id, void *obj,
7938 			   char *buf, int len, u64 flags)
7939 {
7940 	struct btf_show_snprintf ssnprintf;
7941 
7942 	ssnprintf.show.target = buf;
7943 	ssnprintf.show.flags = flags;
7944 	ssnprintf.show.showfn = btf_snprintf_show;
7945 	ssnprintf.len_left = len;
7946 	ssnprintf.len = 0;
7947 
7948 	btf_type_show(btf, type_id, obj, (struct btf_show *)&ssnprintf);
7949 
7950 	/* If we encountered an error, return it. */
7951 	if (ssnprintf.show.state.status)
7952 		return ssnprintf.show.state.status;
7953 
7954 	/* Otherwise return length we would have written */
7955 	return ssnprintf.len;
7956 }
7957 
7958 #ifdef CONFIG_PROC_FS
7959 static void bpf_btf_show_fdinfo(struct seq_file *m, struct file *filp)
7960 {
7961 	const struct btf *btf = filp->private_data;
7962 
7963 	seq_printf(m, "btf_id:\t%u\n", btf->id);
7964 }
7965 #endif
7966 
7967 static int btf_release(struct inode *inode, struct file *filp)
7968 {
7969 	btf_put(filp->private_data);
7970 	return 0;
7971 }
7972 
7973 const struct file_operations btf_fops = {
7974 #ifdef CONFIG_PROC_FS
7975 	.show_fdinfo	= bpf_btf_show_fdinfo,
7976 #endif
7977 	.release	= btf_release,
7978 };
7979 
7980 static int __btf_new_fd(struct btf *btf)
7981 {
7982 	return anon_inode_getfd("btf", &btf_fops, btf, O_RDONLY | O_CLOEXEC);
7983 }
7984 
7985 int btf_new_fd(const union bpf_attr *attr, bpfptr_t uattr, u32 uattr_size)
7986 {
7987 	struct btf *btf;
7988 	int ret;
7989 
7990 	btf = btf_parse(attr, uattr, uattr_size);
7991 	if (IS_ERR(btf))
7992 		return PTR_ERR(btf);
7993 
7994 	ret = btf_alloc_id(btf);
7995 	if (ret) {
7996 		btf_free(btf);
7997 		return ret;
7998 	}
7999 
8000 	/*
8001 	 * The BTF ID is published to the userspace.
8002 	 * All BTF free must go through call_rcu() from
8003 	 * now on (i.e. free by calling btf_put()).
8004 	 */
8005 
8006 	ret = __btf_new_fd(btf);
8007 	if (ret < 0)
8008 		btf_put(btf);
8009 
8010 	return ret;
8011 }
8012 
8013 struct btf *btf_get_by_fd(int fd)
8014 {
8015 	struct btf *btf;
8016 	CLASS(fd, f)(fd);
8017 
8018 	btf = __btf_get_by_fd(f);
8019 	if (!IS_ERR(btf))
8020 		refcount_inc(&btf->refcnt);
8021 
8022 	return btf;
8023 }
8024 
8025 int btf_get_info_by_fd(const struct btf *btf,
8026 		       const union bpf_attr *attr,
8027 		       union bpf_attr __user *uattr)
8028 {
8029 	struct bpf_btf_info __user *uinfo;
8030 	struct bpf_btf_info info;
8031 	u32 info_copy, btf_copy;
8032 	void __user *ubtf;
8033 	char __user *uname;
8034 	u32 uinfo_len, uname_len, name_len;
8035 	int ret = 0;
8036 
8037 	uinfo = u64_to_user_ptr(attr->info.info);
8038 	uinfo_len = attr->info.info_len;
8039 
8040 	info_copy = min_t(u32, uinfo_len, sizeof(info));
8041 	memset(&info, 0, sizeof(info));
8042 	if (copy_from_user(&info, uinfo, info_copy))
8043 		return -EFAULT;
8044 
8045 	info.id = btf->id;
8046 	ubtf = u64_to_user_ptr(info.btf);
8047 	btf_copy = min_t(u32, btf->data_size, info.btf_size);
8048 	if (copy_to_user(ubtf, btf->data, btf_copy))
8049 		return -EFAULT;
8050 	info.btf_size = btf->data_size;
8051 
8052 	info.kernel_btf = btf->kernel_btf;
8053 
8054 	uname = u64_to_user_ptr(info.name);
8055 	uname_len = info.name_len;
8056 	if (!uname ^ !uname_len)
8057 		return -EINVAL;
8058 
8059 	name_len = strlen(btf->name);
8060 	info.name_len = name_len;
8061 
8062 	if (uname) {
8063 		if (uname_len >= name_len + 1) {
8064 			if (copy_to_user(uname, btf->name, name_len + 1))
8065 				return -EFAULT;
8066 		} else {
8067 			char zero = '\0';
8068 
8069 			if (copy_to_user(uname, btf->name, uname_len - 1))
8070 				return -EFAULT;
8071 			if (put_user(zero, uname + uname_len - 1))
8072 				return -EFAULT;
8073 			/* let user-space know about too short buffer */
8074 			ret = -ENOSPC;
8075 		}
8076 	}
8077 
8078 	if (copy_to_user(uinfo, &info, info_copy) ||
8079 	    put_user(info_copy, &uattr->info.info_len))
8080 		return -EFAULT;
8081 
8082 	return ret;
8083 }
8084 
8085 int btf_get_fd_by_id(u32 id)
8086 {
8087 	struct btf *btf;
8088 	int fd;
8089 
8090 	rcu_read_lock();
8091 	btf = idr_find(&btf_idr, id);
8092 	if (!btf || !refcount_inc_not_zero(&btf->refcnt))
8093 		btf = ERR_PTR(-ENOENT);
8094 	rcu_read_unlock();
8095 
8096 	if (IS_ERR(btf))
8097 		return PTR_ERR(btf);
8098 
8099 	fd = __btf_new_fd(btf);
8100 	if (fd < 0)
8101 		btf_put(btf);
8102 
8103 	return fd;
8104 }
8105 
8106 u32 btf_obj_id(const struct btf *btf)
8107 {
8108 	return btf->id;
8109 }
8110 
8111 bool btf_is_kernel(const struct btf *btf)
8112 {
8113 	return btf->kernel_btf;
8114 }
8115 
8116 bool btf_is_module(const struct btf *btf)
8117 {
8118 	return btf->kernel_btf && strcmp(btf->name, "vmlinux") != 0;
8119 }
8120 
8121 enum {
8122 	BTF_MODULE_F_LIVE = (1 << 0),
8123 };
8124 
8125 #ifdef CONFIG_DEBUG_INFO_BTF_MODULES
8126 struct btf_module {
8127 	struct list_head list;
8128 	struct module *module;
8129 	struct btf *btf;
8130 	struct bin_attribute *sysfs_attr;
8131 	int flags;
8132 };
8133 
8134 static LIST_HEAD(btf_modules);
8135 static DEFINE_MUTEX(btf_module_mutex);
8136 
8137 static void purge_cand_cache(struct btf *btf);
8138 
8139 static int btf_module_notify(struct notifier_block *nb, unsigned long op,
8140 			     void *module)
8141 {
8142 	struct btf_module *btf_mod, *tmp;
8143 	struct module *mod = module;
8144 	struct btf *btf;
8145 	int err = 0;
8146 
8147 	if (mod->btf_data_size == 0 ||
8148 	    (op != MODULE_STATE_COMING && op != MODULE_STATE_LIVE &&
8149 	     op != MODULE_STATE_GOING))
8150 		goto out;
8151 
8152 	switch (op) {
8153 	case MODULE_STATE_COMING:
8154 		btf_mod = kzalloc(sizeof(*btf_mod), GFP_KERNEL);
8155 		if (!btf_mod) {
8156 			err = -ENOMEM;
8157 			goto out;
8158 		}
8159 		btf = btf_parse_module(mod->name, mod->btf_data, mod->btf_data_size,
8160 				       mod->btf_base_data, mod->btf_base_data_size);
8161 		if (IS_ERR(btf)) {
8162 			kfree(btf_mod);
8163 			if (!IS_ENABLED(CONFIG_MODULE_ALLOW_BTF_MISMATCH)) {
8164 				pr_warn("failed to validate module [%s] BTF: %ld\n",
8165 					mod->name, PTR_ERR(btf));
8166 				err = PTR_ERR(btf);
8167 			} else {
8168 				pr_warn_once("Kernel module BTF mismatch detected, BTF debug info may be unavailable for some modules\n");
8169 			}
8170 			goto out;
8171 		}
8172 		err = btf_alloc_id(btf);
8173 		if (err) {
8174 			btf_free(btf);
8175 			kfree(btf_mod);
8176 			goto out;
8177 		}
8178 
8179 		purge_cand_cache(NULL);
8180 		mutex_lock(&btf_module_mutex);
8181 		btf_mod->module = module;
8182 		btf_mod->btf = btf;
8183 		list_add(&btf_mod->list, &btf_modules);
8184 		mutex_unlock(&btf_module_mutex);
8185 
8186 		if (IS_ENABLED(CONFIG_SYSFS)) {
8187 			struct bin_attribute *attr;
8188 
8189 			attr = kzalloc(sizeof(*attr), GFP_KERNEL);
8190 			if (!attr)
8191 				goto out;
8192 
8193 			sysfs_bin_attr_init(attr);
8194 			attr->attr.name = btf->name;
8195 			attr->attr.mode = 0444;
8196 			attr->size = btf->data_size;
8197 			attr->private = btf->data;
8198 			attr->read_new = sysfs_bin_attr_simple_read;
8199 
8200 			err = sysfs_create_bin_file(btf_kobj, attr);
8201 			if (err) {
8202 				pr_warn("failed to register module [%s] BTF in sysfs: %d\n",
8203 					mod->name, err);
8204 				kfree(attr);
8205 				err = 0;
8206 				goto out;
8207 			}
8208 
8209 			btf_mod->sysfs_attr = attr;
8210 		}
8211 
8212 		break;
8213 	case MODULE_STATE_LIVE:
8214 		mutex_lock(&btf_module_mutex);
8215 		list_for_each_entry_safe(btf_mod, tmp, &btf_modules, list) {
8216 			if (btf_mod->module != module)
8217 				continue;
8218 
8219 			btf_mod->flags |= BTF_MODULE_F_LIVE;
8220 			break;
8221 		}
8222 		mutex_unlock(&btf_module_mutex);
8223 		break;
8224 	case MODULE_STATE_GOING:
8225 		mutex_lock(&btf_module_mutex);
8226 		list_for_each_entry_safe(btf_mod, tmp, &btf_modules, list) {
8227 			if (btf_mod->module != module)
8228 				continue;
8229 
8230 			list_del(&btf_mod->list);
8231 			if (btf_mod->sysfs_attr)
8232 				sysfs_remove_bin_file(btf_kobj, btf_mod->sysfs_attr);
8233 			purge_cand_cache(btf_mod->btf);
8234 			btf_put(btf_mod->btf);
8235 			kfree(btf_mod->sysfs_attr);
8236 			kfree(btf_mod);
8237 			break;
8238 		}
8239 		mutex_unlock(&btf_module_mutex);
8240 		break;
8241 	}
8242 out:
8243 	return notifier_from_errno(err);
8244 }
8245 
8246 static struct notifier_block btf_module_nb = {
8247 	.notifier_call = btf_module_notify,
8248 };
8249 
8250 static int __init btf_module_init(void)
8251 {
8252 	register_module_notifier(&btf_module_nb);
8253 	return 0;
8254 }
8255 
8256 fs_initcall(btf_module_init);
8257 #endif /* CONFIG_DEBUG_INFO_BTF_MODULES */
8258 
8259 struct module *btf_try_get_module(const struct btf *btf)
8260 {
8261 	struct module *res = NULL;
8262 #ifdef CONFIG_DEBUG_INFO_BTF_MODULES
8263 	struct btf_module *btf_mod, *tmp;
8264 
8265 	mutex_lock(&btf_module_mutex);
8266 	list_for_each_entry_safe(btf_mod, tmp, &btf_modules, list) {
8267 		if (btf_mod->btf != btf)
8268 			continue;
8269 
8270 		/* We must only consider module whose __init routine has
8271 		 * finished, hence we must check for BTF_MODULE_F_LIVE flag,
8272 		 * which is set from the notifier callback for
8273 		 * MODULE_STATE_LIVE.
8274 		 */
8275 		if ((btf_mod->flags & BTF_MODULE_F_LIVE) && try_module_get(btf_mod->module))
8276 			res = btf_mod->module;
8277 
8278 		break;
8279 	}
8280 	mutex_unlock(&btf_module_mutex);
8281 #endif
8282 
8283 	return res;
8284 }
8285 
8286 /* Returns struct btf corresponding to the struct module.
8287  * This function can return NULL or ERR_PTR.
8288  */
8289 static struct btf *btf_get_module_btf(const struct module *module)
8290 {
8291 #ifdef CONFIG_DEBUG_INFO_BTF_MODULES
8292 	struct btf_module *btf_mod, *tmp;
8293 #endif
8294 	struct btf *btf = NULL;
8295 
8296 	if (!module) {
8297 		btf = bpf_get_btf_vmlinux();
8298 		if (!IS_ERR_OR_NULL(btf))
8299 			btf_get(btf);
8300 		return btf;
8301 	}
8302 
8303 #ifdef CONFIG_DEBUG_INFO_BTF_MODULES
8304 	mutex_lock(&btf_module_mutex);
8305 	list_for_each_entry_safe(btf_mod, tmp, &btf_modules, list) {
8306 		if (btf_mod->module != module)
8307 			continue;
8308 
8309 		btf_get(btf_mod->btf);
8310 		btf = btf_mod->btf;
8311 		break;
8312 	}
8313 	mutex_unlock(&btf_module_mutex);
8314 #endif
8315 
8316 	return btf;
8317 }
8318 
8319 static int check_btf_kconfigs(const struct module *module, const char *feature)
8320 {
8321 	if (!module && IS_ENABLED(CONFIG_DEBUG_INFO_BTF)) {
8322 		pr_err("missing vmlinux BTF, cannot register %s\n", feature);
8323 		return -ENOENT;
8324 	}
8325 	if (module && IS_ENABLED(CONFIG_DEBUG_INFO_BTF_MODULES))
8326 		pr_warn("missing module BTF, cannot register %s\n", feature);
8327 	return 0;
8328 }
8329 
8330 BPF_CALL_4(bpf_btf_find_by_name_kind, char *, name, int, name_sz, u32, kind, int, flags)
8331 {
8332 	struct btf *btf = NULL;
8333 	int btf_obj_fd = 0;
8334 	long ret;
8335 
8336 	if (flags)
8337 		return -EINVAL;
8338 
8339 	if (name_sz <= 1 || name[name_sz - 1])
8340 		return -EINVAL;
8341 
8342 	ret = bpf_find_btf_id(name, kind, &btf);
8343 	if (ret > 0 && btf_is_module(btf)) {
8344 		btf_obj_fd = __btf_new_fd(btf);
8345 		if (btf_obj_fd < 0) {
8346 			btf_put(btf);
8347 			return btf_obj_fd;
8348 		}
8349 		return ret | (((u64)btf_obj_fd) << 32);
8350 	}
8351 	if (ret > 0)
8352 		btf_put(btf);
8353 	return ret;
8354 }
8355 
8356 const struct bpf_func_proto bpf_btf_find_by_name_kind_proto = {
8357 	.func		= bpf_btf_find_by_name_kind,
8358 	.gpl_only	= false,
8359 	.ret_type	= RET_INTEGER,
8360 	.arg1_type	= ARG_PTR_TO_MEM | MEM_RDONLY,
8361 	.arg2_type	= ARG_CONST_SIZE,
8362 	.arg3_type	= ARG_ANYTHING,
8363 	.arg4_type	= ARG_ANYTHING,
8364 };
8365 
8366 BTF_ID_LIST_GLOBAL(btf_tracing_ids, MAX_BTF_TRACING_TYPE)
8367 #define BTF_TRACING_TYPE(name, type) BTF_ID(struct, type)
8368 BTF_TRACING_TYPE_xxx
8369 #undef BTF_TRACING_TYPE
8370 
8371 /* Validate well-formedness of iter argument type.
8372  * On success, return positive BTF ID of iter state's STRUCT type.
8373  * On error, negative error is returned.
8374  */
8375 int btf_check_iter_arg(struct btf *btf, const struct btf_type *func, int arg_idx)
8376 {
8377 	const struct btf_param *arg;
8378 	const struct btf_type *t;
8379 	const char *name;
8380 	int btf_id;
8381 
8382 	if (btf_type_vlen(func) <= arg_idx)
8383 		return -EINVAL;
8384 
8385 	arg = &btf_params(func)[arg_idx];
8386 	t = btf_type_skip_modifiers(btf, arg->type, NULL);
8387 	if (!t || !btf_type_is_ptr(t))
8388 		return -EINVAL;
8389 	t = btf_type_skip_modifiers(btf, t->type, &btf_id);
8390 	if (!t || !__btf_type_is_struct(t))
8391 		return -EINVAL;
8392 
8393 	name = btf_name_by_offset(btf, t->name_off);
8394 	if (!name || strncmp(name, ITER_PREFIX, sizeof(ITER_PREFIX) - 1))
8395 		return -EINVAL;
8396 
8397 	return btf_id;
8398 }
8399 
8400 static int btf_check_iter_kfuncs(struct btf *btf, const char *func_name,
8401 				 const struct btf_type *func, u32 func_flags)
8402 {
8403 	u32 flags = func_flags & (KF_ITER_NEW | KF_ITER_NEXT | KF_ITER_DESTROY);
8404 	const char *sfx, *iter_name;
8405 	const struct btf_type *t;
8406 	char exp_name[128];
8407 	u32 nr_args;
8408 	int btf_id;
8409 
8410 	/* exactly one of KF_ITER_{NEW,NEXT,DESTROY} can be set */
8411 	if (!flags || (flags & (flags - 1)))
8412 		return -EINVAL;
8413 
8414 	/* any BPF iter kfunc should have `struct bpf_iter_<type> *` first arg */
8415 	nr_args = btf_type_vlen(func);
8416 	if (nr_args < 1)
8417 		return -EINVAL;
8418 
8419 	btf_id = btf_check_iter_arg(btf, func, 0);
8420 	if (btf_id < 0)
8421 		return btf_id;
8422 
8423 	/* sizeof(struct bpf_iter_<type>) should be a multiple of 8 to
8424 	 * fit nicely in stack slots
8425 	 */
8426 	t = btf_type_by_id(btf, btf_id);
8427 	if (t->size == 0 || (t->size % 8))
8428 		return -EINVAL;
8429 
8430 	/* validate bpf_iter_<type>_{new,next,destroy}(struct bpf_iter_<type> *)
8431 	 * naming pattern
8432 	 */
8433 	iter_name = btf_name_by_offset(btf, t->name_off) + sizeof(ITER_PREFIX) - 1;
8434 	if (flags & KF_ITER_NEW)
8435 		sfx = "new";
8436 	else if (flags & KF_ITER_NEXT)
8437 		sfx = "next";
8438 	else /* (flags & KF_ITER_DESTROY) */
8439 		sfx = "destroy";
8440 
8441 	snprintf(exp_name, sizeof(exp_name), "bpf_iter_%s_%s", iter_name, sfx);
8442 	if (strcmp(func_name, exp_name))
8443 		return -EINVAL;
8444 
8445 	/* only iter constructor should have extra arguments */
8446 	if (!(flags & KF_ITER_NEW) && nr_args != 1)
8447 		return -EINVAL;
8448 
8449 	if (flags & KF_ITER_NEXT) {
8450 		/* bpf_iter_<type>_next() should return pointer */
8451 		t = btf_type_skip_modifiers(btf, func->type, NULL);
8452 		if (!t || !btf_type_is_ptr(t))
8453 			return -EINVAL;
8454 	}
8455 
8456 	if (flags & KF_ITER_DESTROY) {
8457 		/* bpf_iter_<type>_destroy() should return void */
8458 		t = btf_type_by_id(btf, func->type);
8459 		if (!t || !btf_type_is_void(t))
8460 			return -EINVAL;
8461 	}
8462 
8463 	return 0;
8464 }
8465 
8466 static int btf_check_kfunc_protos(struct btf *btf, u32 func_id, u32 func_flags)
8467 {
8468 	const struct btf_type *func;
8469 	const char *func_name;
8470 	int err;
8471 
8472 	/* any kfunc should be FUNC -> FUNC_PROTO */
8473 	func = btf_type_by_id(btf, func_id);
8474 	if (!func || !btf_type_is_func(func))
8475 		return -EINVAL;
8476 
8477 	/* sanity check kfunc name */
8478 	func_name = btf_name_by_offset(btf, func->name_off);
8479 	if (!func_name || !func_name[0])
8480 		return -EINVAL;
8481 
8482 	func = btf_type_by_id(btf, func->type);
8483 	if (!func || !btf_type_is_func_proto(func))
8484 		return -EINVAL;
8485 
8486 	if (func_flags & (KF_ITER_NEW | KF_ITER_NEXT | KF_ITER_DESTROY)) {
8487 		err = btf_check_iter_kfuncs(btf, func_name, func, func_flags);
8488 		if (err)
8489 			return err;
8490 	}
8491 
8492 	return 0;
8493 }
8494 
8495 /* Kernel Function (kfunc) BTF ID set registration API */
8496 
8497 static int btf_populate_kfunc_set(struct btf *btf, enum btf_kfunc_hook hook,
8498 				  const struct btf_kfunc_id_set *kset)
8499 {
8500 	struct btf_kfunc_hook_filter *hook_filter;
8501 	struct btf_id_set8 *add_set = kset->set;
8502 	bool vmlinux_set = !btf_is_module(btf);
8503 	bool add_filter = !!kset->filter;
8504 	struct btf_kfunc_set_tab *tab;
8505 	struct btf_id_set8 *set;
8506 	u32 set_cnt, i;
8507 	int ret;
8508 
8509 	if (hook >= BTF_KFUNC_HOOK_MAX) {
8510 		ret = -EINVAL;
8511 		goto end;
8512 	}
8513 
8514 	if (!add_set->cnt)
8515 		return 0;
8516 
8517 	tab = btf->kfunc_set_tab;
8518 
8519 	if (tab && add_filter) {
8520 		u32 i;
8521 
8522 		hook_filter = &tab->hook_filters[hook];
8523 		for (i = 0; i < hook_filter->nr_filters; i++) {
8524 			if (hook_filter->filters[i] == kset->filter) {
8525 				add_filter = false;
8526 				break;
8527 			}
8528 		}
8529 
8530 		if (add_filter && hook_filter->nr_filters == BTF_KFUNC_FILTER_MAX_CNT) {
8531 			ret = -E2BIG;
8532 			goto end;
8533 		}
8534 	}
8535 
8536 	if (!tab) {
8537 		tab = kzalloc(sizeof(*tab), GFP_KERNEL | __GFP_NOWARN);
8538 		if (!tab)
8539 			return -ENOMEM;
8540 		btf->kfunc_set_tab = tab;
8541 	}
8542 
8543 	set = tab->sets[hook];
8544 	/* Warn when register_btf_kfunc_id_set is called twice for the same hook
8545 	 * for module sets.
8546 	 */
8547 	if (WARN_ON_ONCE(set && !vmlinux_set)) {
8548 		ret = -EINVAL;
8549 		goto end;
8550 	}
8551 
8552 	/* In case of vmlinux sets, there may be more than one set being
8553 	 * registered per hook. To create a unified set, we allocate a new set
8554 	 * and concatenate all individual sets being registered. While each set
8555 	 * is individually sorted, they may become unsorted when concatenated,
8556 	 * hence re-sorting the final set again is required to make binary
8557 	 * searching the set using btf_id_set8_contains function work.
8558 	 *
8559 	 * For module sets, we need to allocate as we may need to relocate
8560 	 * BTF ids.
8561 	 */
8562 	set_cnt = set ? set->cnt : 0;
8563 
8564 	if (set_cnt > U32_MAX - add_set->cnt) {
8565 		ret = -EOVERFLOW;
8566 		goto end;
8567 	}
8568 
8569 	if (set_cnt + add_set->cnt > BTF_KFUNC_SET_MAX_CNT) {
8570 		ret = -E2BIG;
8571 		goto end;
8572 	}
8573 
8574 	/* Grow set */
8575 	set = krealloc(tab->sets[hook],
8576 		       struct_size(set, pairs, set_cnt + add_set->cnt),
8577 		       GFP_KERNEL | __GFP_NOWARN);
8578 	if (!set) {
8579 		ret = -ENOMEM;
8580 		goto end;
8581 	}
8582 
8583 	/* For newly allocated set, initialize set->cnt to 0 */
8584 	if (!tab->sets[hook])
8585 		set->cnt = 0;
8586 	tab->sets[hook] = set;
8587 
8588 	/* Concatenate the two sets */
8589 	memcpy(set->pairs + set->cnt, add_set->pairs, add_set->cnt * sizeof(set->pairs[0]));
8590 	/* Now that the set is copied, update with relocated BTF ids */
8591 	for (i = set->cnt; i < set->cnt + add_set->cnt; i++)
8592 		set->pairs[i].id = btf_relocate_id(btf, set->pairs[i].id);
8593 
8594 	set->cnt += add_set->cnt;
8595 
8596 	sort(set->pairs, set->cnt, sizeof(set->pairs[0]), btf_id_cmp_func, NULL);
8597 
8598 	if (add_filter) {
8599 		hook_filter = &tab->hook_filters[hook];
8600 		hook_filter->filters[hook_filter->nr_filters++] = kset->filter;
8601 	}
8602 	return 0;
8603 end:
8604 	btf_free_kfunc_set_tab(btf);
8605 	return ret;
8606 }
8607 
8608 static u32 *__btf_kfunc_id_set_contains(const struct btf *btf,
8609 					enum btf_kfunc_hook hook,
8610 					u32 kfunc_btf_id,
8611 					const struct bpf_prog *prog)
8612 {
8613 	struct btf_kfunc_hook_filter *hook_filter;
8614 	struct btf_id_set8 *set;
8615 	u32 *id, i;
8616 
8617 	if (hook >= BTF_KFUNC_HOOK_MAX)
8618 		return NULL;
8619 	if (!btf->kfunc_set_tab)
8620 		return NULL;
8621 	hook_filter = &btf->kfunc_set_tab->hook_filters[hook];
8622 	for (i = 0; i < hook_filter->nr_filters; i++) {
8623 		if (hook_filter->filters[i](prog, kfunc_btf_id))
8624 			return NULL;
8625 	}
8626 	set = btf->kfunc_set_tab->sets[hook];
8627 	if (!set)
8628 		return NULL;
8629 	id = btf_id_set8_contains(set, kfunc_btf_id);
8630 	if (!id)
8631 		return NULL;
8632 	/* The flags for BTF ID are located next to it */
8633 	return id + 1;
8634 }
8635 
8636 static int bpf_prog_type_to_kfunc_hook(enum bpf_prog_type prog_type)
8637 {
8638 	switch (prog_type) {
8639 	case BPF_PROG_TYPE_UNSPEC:
8640 		return BTF_KFUNC_HOOK_COMMON;
8641 	case BPF_PROG_TYPE_XDP:
8642 		return BTF_KFUNC_HOOK_XDP;
8643 	case BPF_PROG_TYPE_SCHED_CLS:
8644 		return BTF_KFUNC_HOOK_TC;
8645 	case BPF_PROG_TYPE_STRUCT_OPS:
8646 		return BTF_KFUNC_HOOK_STRUCT_OPS;
8647 	case BPF_PROG_TYPE_TRACING:
8648 	case BPF_PROG_TYPE_TRACEPOINT:
8649 	case BPF_PROG_TYPE_PERF_EVENT:
8650 	case BPF_PROG_TYPE_LSM:
8651 		return BTF_KFUNC_HOOK_TRACING;
8652 	case BPF_PROG_TYPE_SYSCALL:
8653 		return BTF_KFUNC_HOOK_SYSCALL;
8654 	case BPF_PROG_TYPE_CGROUP_SKB:
8655 	case BPF_PROG_TYPE_CGROUP_SOCK:
8656 	case BPF_PROG_TYPE_CGROUP_DEVICE:
8657 	case BPF_PROG_TYPE_CGROUP_SOCK_ADDR:
8658 	case BPF_PROG_TYPE_CGROUP_SOCKOPT:
8659 	case BPF_PROG_TYPE_CGROUP_SYSCTL:
8660 	case BPF_PROG_TYPE_SOCK_OPS:
8661 		return BTF_KFUNC_HOOK_CGROUP;
8662 	case BPF_PROG_TYPE_SCHED_ACT:
8663 		return BTF_KFUNC_HOOK_SCHED_ACT;
8664 	case BPF_PROG_TYPE_SK_SKB:
8665 		return BTF_KFUNC_HOOK_SK_SKB;
8666 	case BPF_PROG_TYPE_SOCKET_FILTER:
8667 		return BTF_KFUNC_HOOK_SOCKET_FILTER;
8668 	case BPF_PROG_TYPE_LWT_OUT:
8669 	case BPF_PROG_TYPE_LWT_IN:
8670 	case BPF_PROG_TYPE_LWT_XMIT:
8671 	case BPF_PROG_TYPE_LWT_SEG6LOCAL:
8672 		return BTF_KFUNC_HOOK_LWT;
8673 	case BPF_PROG_TYPE_NETFILTER:
8674 		return BTF_KFUNC_HOOK_NETFILTER;
8675 	case BPF_PROG_TYPE_KPROBE:
8676 		return BTF_KFUNC_HOOK_KPROBE;
8677 	default:
8678 		return BTF_KFUNC_HOOK_MAX;
8679 	}
8680 }
8681 
8682 /* Caution:
8683  * Reference to the module (obtained using btf_try_get_module) corresponding to
8684  * the struct btf *MUST* be held when calling this function from verifier
8685  * context. This is usually true as we stash references in prog's kfunc_btf_tab;
8686  * keeping the reference for the duration of the call provides the necessary
8687  * protection for looking up a well-formed btf->kfunc_set_tab.
8688  */
8689 u32 *btf_kfunc_id_set_contains(const struct btf *btf,
8690 			       u32 kfunc_btf_id,
8691 			       const struct bpf_prog *prog)
8692 {
8693 	enum bpf_prog_type prog_type = resolve_prog_type(prog);
8694 	enum btf_kfunc_hook hook;
8695 	u32 *kfunc_flags;
8696 
8697 	kfunc_flags = __btf_kfunc_id_set_contains(btf, BTF_KFUNC_HOOK_COMMON, kfunc_btf_id, prog);
8698 	if (kfunc_flags)
8699 		return kfunc_flags;
8700 
8701 	hook = bpf_prog_type_to_kfunc_hook(prog_type);
8702 	return __btf_kfunc_id_set_contains(btf, hook, kfunc_btf_id, prog);
8703 }
8704 
8705 u32 *btf_kfunc_is_modify_return(const struct btf *btf, u32 kfunc_btf_id,
8706 				const struct bpf_prog *prog)
8707 {
8708 	return __btf_kfunc_id_set_contains(btf, BTF_KFUNC_HOOK_FMODRET, kfunc_btf_id, prog);
8709 }
8710 
8711 static int __register_btf_kfunc_id_set(enum btf_kfunc_hook hook,
8712 				       const struct btf_kfunc_id_set *kset)
8713 {
8714 	struct btf *btf;
8715 	int ret, i;
8716 
8717 	btf = btf_get_module_btf(kset->owner);
8718 	if (!btf)
8719 		return check_btf_kconfigs(kset->owner, "kfunc");
8720 	if (IS_ERR(btf))
8721 		return PTR_ERR(btf);
8722 
8723 	for (i = 0; i < kset->set->cnt; i++) {
8724 		ret = btf_check_kfunc_protos(btf, btf_relocate_id(btf, kset->set->pairs[i].id),
8725 					     kset->set->pairs[i].flags);
8726 		if (ret)
8727 			goto err_out;
8728 	}
8729 
8730 	ret = btf_populate_kfunc_set(btf, hook, kset);
8731 
8732 err_out:
8733 	btf_put(btf);
8734 	return ret;
8735 }
8736 
8737 /* This function must be invoked only from initcalls/module init functions */
8738 int register_btf_kfunc_id_set(enum bpf_prog_type prog_type,
8739 			      const struct btf_kfunc_id_set *kset)
8740 {
8741 	enum btf_kfunc_hook hook;
8742 
8743 	/* All kfuncs need to be tagged as such in BTF.
8744 	 * WARN() for initcall registrations that do not check errors.
8745 	 */
8746 	if (!(kset->set->flags & BTF_SET8_KFUNCS)) {
8747 		WARN_ON(!kset->owner);
8748 		return -EINVAL;
8749 	}
8750 
8751 	hook = bpf_prog_type_to_kfunc_hook(prog_type);
8752 	return __register_btf_kfunc_id_set(hook, kset);
8753 }
8754 EXPORT_SYMBOL_GPL(register_btf_kfunc_id_set);
8755 
8756 /* This function must be invoked only from initcalls/module init functions */
8757 int register_btf_fmodret_id_set(const struct btf_kfunc_id_set *kset)
8758 {
8759 	return __register_btf_kfunc_id_set(BTF_KFUNC_HOOK_FMODRET, kset);
8760 }
8761 EXPORT_SYMBOL_GPL(register_btf_fmodret_id_set);
8762 
8763 s32 btf_find_dtor_kfunc(struct btf *btf, u32 btf_id)
8764 {
8765 	struct btf_id_dtor_kfunc_tab *tab = btf->dtor_kfunc_tab;
8766 	struct btf_id_dtor_kfunc *dtor;
8767 
8768 	if (!tab)
8769 		return -ENOENT;
8770 	/* Even though the size of tab->dtors[0] is > sizeof(u32), we only need
8771 	 * to compare the first u32 with btf_id, so we can reuse btf_id_cmp_func.
8772 	 */
8773 	BUILD_BUG_ON(offsetof(struct btf_id_dtor_kfunc, btf_id) != 0);
8774 	dtor = bsearch(&btf_id, tab->dtors, tab->cnt, sizeof(tab->dtors[0]), btf_id_cmp_func);
8775 	if (!dtor)
8776 		return -ENOENT;
8777 	return dtor->kfunc_btf_id;
8778 }
8779 
8780 static int btf_check_dtor_kfuncs(struct btf *btf, const struct btf_id_dtor_kfunc *dtors, u32 cnt)
8781 {
8782 	const struct btf_type *dtor_func, *dtor_func_proto, *t;
8783 	const struct btf_param *args;
8784 	s32 dtor_btf_id;
8785 	u32 nr_args, i;
8786 
8787 	for (i = 0; i < cnt; i++) {
8788 		dtor_btf_id = btf_relocate_id(btf, dtors[i].kfunc_btf_id);
8789 
8790 		dtor_func = btf_type_by_id(btf, dtor_btf_id);
8791 		if (!dtor_func || !btf_type_is_func(dtor_func))
8792 			return -EINVAL;
8793 
8794 		dtor_func_proto = btf_type_by_id(btf, dtor_func->type);
8795 		if (!dtor_func_proto || !btf_type_is_func_proto(dtor_func_proto))
8796 			return -EINVAL;
8797 
8798 		/* Make sure the prototype of the destructor kfunc is 'void func(type *)' */
8799 		t = btf_type_by_id(btf, dtor_func_proto->type);
8800 		if (!t || !btf_type_is_void(t))
8801 			return -EINVAL;
8802 
8803 		nr_args = btf_type_vlen(dtor_func_proto);
8804 		if (nr_args != 1)
8805 			return -EINVAL;
8806 		args = btf_params(dtor_func_proto);
8807 		t = btf_type_by_id(btf, args[0].type);
8808 		/* Allow any pointer type, as width on targets Linux supports
8809 		 * will be same for all pointer types (i.e. sizeof(void *))
8810 		 */
8811 		if (!t || !btf_type_is_ptr(t))
8812 			return -EINVAL;
8813 	}
8814 	return 0;
8815 }
8816 
8817 /* This function must be invoked only from initcalls/module init functions */
8818 int register_btf_id_dtor_kfuncs(const struct btf_id_dtor_kfunc *dtors, u32 add_cnt,
8819 				struct module *owner)
8820 {
8821 	struct btf_id_dtor_kfunc_tab *tab;
8822 	struct btf *btf;
8823 	u32 tab_cnt, i;
8824 	int ret;
8825 
8826 	btf = btf_get_module_btf(owner);
8827 	if (!btf)
8828 		return check_btf_kconfigs(owner, "dtor kfuncs");
8829 	if (IS_ERR(btf))
8830 		return PTR_ERR(btf);
8831 
8832 	if (add_cnt >= BTF_DTOR_KFUNC_MAX_CNT) {
8833 		pr_err("cannot register more than %d kfunc destructors\n", BTF_DTOR_KFUNC_MAX_CNT);
8834 		ret = -E2BIG;
8835 		goto end;
8836 	}
8837 
8838 	/* Ensure that the prototype of dtor kfuncs being registered is sane */
8839 	ret = btf_check_dtor_kfuncs(btf, dtors, add_cnt);
8840 	if (ret < 0)
8841 		goto end;
8842 
8843 	tab = btf->dtor_kfunc_tab;
8844 	/* Only one call allowed for modules */
8845 	if (WARN_ON_ONCE(tab && btf_is_module(btf))) {
8846 		ret = -EINVAL;
8847 		goto end;
8848 	}
8849 
8850 	tab_cnt = tab ? tab->cnt : 0;
8851 	if (tab_cnt > U32_MAX - add_cnt) {
8852 		ret = -EOVERFLOW;
8853 		goto end;
8854 	}
8855 	if (tab_cnt + add_cnt >= BTF_DTOR_KFUNC_MAX_CNT) {
8856 		pr_err("cannot register more than %d kfunc destructors\n", BTF_DTOR_KFUNC_MAX_CNT);
8857 		ret = -E2BIG;
8858 		goto end;
8859 	}
8860 
8861 	tab = krealloc(btf->dtor_kfunc_tab,
8862 		       struct_size(tab, dtors, tab_cnt + add_cnt),
8863 		       GFP_KERNEL | __GFP_NOWARN);
8864 	if (!tab) {
8865 		ret = -ENOMEM;
8866 		goto end;
8867 	}
8868 
8869 	if (!btf->dtor_kfunc_tab)
8870 		tab->cnt = 0;
8871 	btf->dtor_kfunc_tab = tab;
8872 
8873 	memcpy(tab->dtors + tab->cnt, dtors, add_cnt * sizeof(tab->dtors[0]));
8874 
8875 	/* remap BTF ids based on BTF relocation (if any) */
8876 	for (i = tab_cnt; i < tab_cnt + add_cnt; i++) {
8877 		tab->dtors[i].btf_id = btf_relocate_id(btf, tab->dtors[i].btf_id);
8878 		tab->dtors[i].kfunc_btf_id = btf_relocate_id(btf, tab->dtors[i].kfunc_btf_id);
8879 	}
8880 
8881 	tab->cnt += add_cnt;
8882 
8883 	sort(tab->dtors, tab->cnt, sizeof(tab->dtors[0]), btf_id_cmp_func, NULL);
8884 
8885 end:
8886 	if (ret)
8887 		btf_free_dtor_kfunc_tab(btf);
8888 	btf_put(btf);
8889 	return ret;
8890 }
8891 EXPORT_SYMBOL_GPL(register_btf_id_dtor_kfuncs);
8892 
8893 #define MAX_TYPES_ARE_COMPAT_DEPTH 2
8894 
8895 /* Check local and target types for compatibility. This check is used for
8896  * type-based CO-RE relocations and follow slightly different rules than
8897  * field-based relocations. This function assumes that root types were already
8898  * checked for name match. Beyond that initial root-level name check, names
8899  * are completely ignored. Compatibility rules are as follows:
8900  *   - any two STRUCTs/UNIONs/FWDs/ENUMs/INTs/ENUM64s are considered compatible, but
8901  *     kind should match for local and target types (i.e., STRUCT is not
8902  *     compatible with UNION);
8903  *   - for ENUMs/ENUM64s, the size is ignored;
8904  *   - for INT, size and signedness are ignored;
8905  *   - for ARRAY, dimensionality is ignored, element types are checked for
8906  *     compatibility recursively;
8907  *   - CONST/VOLATILE/RESTRICT modifiers are ignored;
8908  *   - TYPEDEFs/PTRs are compatible if types they pointing to are compatible;
8909  *   - FUNC_PROTOs are compatible if they have compatible signature: same
8910  *     number of input args and compatible return and argument types.
8911  * These rules are not set in stone and probably will be adjusted as we get
8912  * more experience with using BPF CO-RE relocations.
8913  */
8914 int bpf_core_types_are_compat(const struct btf *local_btf, __u32 local_id,
8915 			      const struct btf *targ_btf, __u32 targ_id)
8916 {
8917 	return __bpf_core_types_are_compat(local_btf, local_id, targ_btf, targ_id,
8918 					   MAX_TYPES_ARE_COMPAT_DEPTH);
8919 }
8920 
8921 #define MAX_TYPES_MATCH_DEPTH 2
8922 
8923 int bpf_core_types_match(const struct btf *local_btf, u32 local_id,
8924 			 const struct btf *targ_btf, u32 targ_id)
8925 {
8926 	return __bpf_core_types_match(local_btf, local_id, targ_btf, targ_id, false,
8927 				      MAX_TYPES_MATCH_DEPTH);
8928 }
8929 
8930 static bool bpf_core_is_flavor_sep(const char *s)
8931 {
8932 	/* check X___Y name pattern, where X and Y are not underscores */
8933 	return s[0] != '_' &&				      /* X */
8934 	       s[1] == '_' && s[2] == '_' && s[3] == '_' &&   /* ___ */
8935 	       s[4] != '_';				      /* Y */
8936 }
8937 
8938 size_t bpf_core_essential_name_len(const char *name)
8939 {
8940 	size_t n = strlen(name);
8941 	int i;
8942 
8943 	for (i = n - 5; i >= 0; i--) {
8944 		if (bpf_core_is_flavor_sep(name + i))
8945 			return i + 1;
8946 	}
8947 	return n;
8948 }
8949 
8950 static void bpf_free_cands(struct bpf_cand_cache *cands)
8951 {
8952 	if (!cands->cnt)
8953 		/* empty candidate array was allocated on stack */
8954 		return;
8955 	kfree(cands);
8956 }
8957 
8958 static void bpf_free_cands_from_cache(struct bpf_cand_cache *cands)
8959 {
8960 	kfree(cands->name);
8961 	kfree(cands);
8962 }
8963 
8964 #define VMLINUX_CAND_CACHE_SIZE 31
8965 static struct bpf_cand_cache *vmlinux_cand_cache[VMLINUX_CAND_CACHE_SIZE];
8966 
8967 #define MODULE_CAND_CACHE_SIZE 31
8968 static struct bpf_cand_cache *module_cand_cache[MODULE_CAND_CACHE_SIZE];
8969 
8970 static void __print_cand_cache(struct bpf_verifier_log *log,
8971 			       struct bpf_cand_cache **cache,
8972 			       int cache_size)
8973 {
8974 	struct bpf_cand_cache *cc;
8975 	int i, j;
8976 
8977 	for (i = 0; i < cache_size; i++) {
8978 		cc = cache[i];
8979 		if (!cc)
8980 			continue;
8981 		bpf_log(log, "[%d]%s(", i, cc->name);
8982 		for (j = 0; j < cc->cnt; j++) {
8983 			bpf_log(log, "%d", cc->cands[j].id);
8984 			if (j < cc->cnt - 1)
8985 				bpf_log(log, " ");
8986 		}
8987 		bpf_log(log, "), ");
8988 	}
8989 }
8990 
8991 static void print_cand_cache(struct bpf_verifier_log *log)
8992 {
8993 	mutex_lock(&cand_cache_mutex);
8994 	bpf_log(log, "vmlinux_cand_cache:");
8995 	__print_cand_cache(log, vmlinux_cand_cache, VMLINUX_CAND_CACHE_SIZE);
8996 	bpf_log(log, "\nmodule_cand_cache:");
8997 	__print_cand_cache(log, module_cand_cache, MODULE_CAND_CACHE_SIZE);
8998 	bpf_log(log, "\n");
8999 	mutex_unlock(&cand_cache_mutex);
9000 }
9001 
9002 static u32 hash_cands(struct bpf_cand_cache *cands)
9003 {
9004 	return jhash(cands->name, cands->name_len, 0);
9005 }
9006 
9007 static struct bpf_cand_cache *check_cand_cache(struct bpf_cand_cache *cands,
9008 					       struct bpf_cand_cache **cache,
9009 					       int cache_size)
9010 {
9011 	struct bpf_cand_cache *cc = cache[hash_cands(cands) % cache_size];
9012 
9013 	if (cc && cc->name_len == cands->name_len &&
9014 	    !strncmp(cc->name, cands->name, cands->name_len))
9015 		return cc;
9016 	return NULL;
9017 }
9018 
9019 static size_t sizeof_cands(int cnt)
9020 {
9021 	return offsetof(struct bpf_cand_cache, cands[cnt]);
9022 }
9023 
9024 static struct bpf_cand_cache *populate_cand_cache(struct bpf_cand_cache *cands,
9025 						  struct bpf_cand_cache **cache,
9026 						  int cache_size)
9027 {
9028 	struct bpf_cand_cache **cc = &cache[hash_cands(cands) % cache_size], *new_cands;
9029 
9030 	if (*cc) {
9031 		bpf_free_cands_from_cache(*cc);
9032 		*cc = NULL;
9033 	}
9034 	new_cands = kmemdup(cands, sizeof_cands(cands->cnt), GFP_KERNEL_ACCOUNT);
9035 	if (!new_cands) {
9036 		bpf_free_cands(cands);
9037 		return ERR_PTR(-ENOMEM);
9038 	}
9039 	/* strdup the name, since it will stay in cache.
9040 	 * the cands->name points to strings in prog's BTF and the prog can be unloaded.
9041 	 */
9042 	new_cands->name = kmemdup_nul(cands->name, cands->name_len, GFP_KERNEL_ACCOUNT);
9043 	bpf_free_cands(cands);
9044 	if (!new_cands->name) {
9045 		kfree(new_cands);
9046 		return ERR_PTR(-ENOMEM);
9047 	}
9048 	*cc = new_cands;
9049 	return new_cands;
9050 }
9051 
9052 #ifdef CONFIG_DEBUG_INFO_BTF_MODULES
9053 static void __purge_cand_cache(struct btf *btf, struct bpf_cand_cache **cache,
9054 			       int cache_size)
9055 {
9056 	struct bpf_cand_cache *cc;
9057 	int i, j;
9058 
9059 	for (i = 0; i < cache_size; i++) {
9060 		cc = cache[i];
9061 		if (!cc)
9062 			continue;
9063 		if (!btf) {
9064 			/* when new module is loaded purge all of module_cand_cache,
9065 			 * since new module might have candidates with the name
9066 			 * that matches cached cands.
9067 			 */
9068 			bpf_free_cands_from_cache(cc);
9069 			cache[i] = NULL;
9070 			continue;
9071 		}
9072 		/* when module is unloaded purge cache entries
9073 		 * that match module's btf
9074 		 */
9075 		for (j = 0; j < cc->cnt; j++)
9076 			if (cc->cands[j].btf == btf) {
9077 				bpf_free_cands_from_cache(cc);
9078 				cache[i] = NULL;
9079 				break;
9080 			}
9081 	}
9082 
9083 }
9084 
9085 static void purge_cand_cache(struct btf *btf)
9086 {
9087 	mutex_lock(&cand_cache_mutex);
9088 	__purge_cand_cache(btf, module_cand_cache, MODULE_CAND_CACHE_SIZE);
9089 	mutex_unlock(&cand_cache_mutex);
9090 }
9091 #endif
9092 
9093 static struct bpf_cand_cache *
9094 bpf_core_add_cands(struct bpf_cand_cache *cands, const struct btf *targ_btf,
9095 		   int targ_start_id)
9096 {
9097 	struct bpf_cand_cache *new_cands;
9098 	const struct btf_type *t;
9099 	const char *targ_name;
9100 	size_t targ_essent_len;
9101 	int n, i;
9102 
9103 	n = btf_nr_types(targ_btf);
9104 	for (i = targ_start_id; i < n; i++) {
9105 		t = btf_type_by_id(targ_btf, i);
9106 		if (btf_kind(t) != cands->kind)
9107 			continue;
9108 
9109 		targ_name = btf_name_by_offset(targ_btf, t->name_off);
9110 		if (!targ_name)
9111 			continue;
9112 
9113 		/* the resched point is before strncmp to make sure that search
9114 		 * for non-existing name will have a chance to schedule().
9115 		 */
9116 		cond_resched();
9117 
9118 		if (strncmp(cands->name, targ_name, cands->name_len) != 0)
9119 			continue;
9120 
9121 		targ_essent_len = bpf_core_essential_name_len(targ_name);
9122 		if (targ_essent_len != cands->name_len)
9123 			continue;
9124 
9125 		/* most of the time there is only one candidate for a given kind+name pair */
9126 		new_cands = kmalloc(sizeof_cands(cands->cnt + 1), GFP_KERNEL_ACCOUNT);
9127 		if (!new_cands) {
9128 			bpf_free_cands(cands);
9129 			return ERR_PTR(-ENOMEM);
9130 		}
9131 
9132 		memcpy(new_cands, cands, sizeof_cands(cands->cnt));
9133 		bpf_free_cands(cands);
9134 		cands = new_cands;
9135 		cands->cands[cands->cnt].btf = targ_btf;
9136 		cands->cands[cands->cnt].id = i;
9137 		cands->cnt++;
9138 	}
9139 	return cands;
9140 }
9141 
9142 static struct bpf_cand_cache *
9143 bpf_core_find_cands(struct bpf_core_ctx *ctx, u32 local_type_id)
9144 {
9145 	struct bpf_cand_cache *cands, *cc, local_cand = {};
9146 	const struct btf *local_btf = ctx->btf;
9147 	const struct btf_type *local_type;
9148 	const struct btf *main_btf;
9149 	size_t local_essent_len;
9150 	struct btf *mod_btf;
9151 	const char *name;
9152 	int id;
9153 
9154 	main_btf = bpf_get_btf_vmlinux();
9155 	if (IS_ERR(main_btf))
9156 		return ERR_CAST(main_btf);
9157 	if (!main_btf)
9158 		return ERR_PTR(-EINVAL);
9159 
9160 	local_type = btf_type_by_id(local_btf, local_type_id);
9161 	if (!local_type)
9162 		return ERR_PTR(-EINVAL);
9163 
9164 	name = btf_name_by_offset(local_btf, local_type->name_off);
9165 	if (str_is_empty(name))
9166 		return ERR_PTR(-EINVAL);
9167 	local_essent_len = bpf_core_essential_name_len(name);
9168 
9169 	cands = &local_cand;
9170 	cands->name = name;
9171 	cands->kind = btf_kind(local_type);
9172 	cands->name_len = local_essent_len;
9173 
9174 	cc = check_cand_cache(cands, vmlinux_cand_cache, VMLINUX_CAND_CACHE_SIZE);
9175 	/* cands is a pointer to stack here */
9176 	if (cc) {
9177 		if (cc->cnt)
9178 			return cc;
9179 		goto check_modules;
9180 	}
9181 
9182 	/* Attempt to find target candidates in vmlinux BTF first */
9183 	cands = bpf_core_add_cands(cands, main_btf, 1);
9184 	if (IS_ERR(cands))
9185 		return ERR_CAST(cands);
9186 
9187 	/* cands is a pointer to kmalloced memory here if cands->cnt > 0 */
9188 
9189 	/* populate cache even when cands->cnt == 0 */
9190 	cc = populate_cand_cache(cands, vmlinux_cand_cache, VMLINUX_CAND_CACHE_SIZE);
9191 	if (IS_ERR(cc))
9192 		return ERR_CAST(cc);
9193 
9194 	/* if vmlinux BTF has any candidate, don't go for module BTFs */
9195 	if (cc->cnt)
9196 		return cc;
9197 
9198 check_modules:
9199 	/* cands is a pointer to stack here and cands->cnt == 0 */
9200 	cc = check_cand_cache(cands, module_cand_cache, MODULE_CAND_CACHE_SIZE);
9201 	if (cc)
9202 		/* if cache has it return it even if cc->cnt == 0 */
9203 		return cc;
9204 
9205 	/* If candidate is not found in vmlinux's BTF then search in module's BTFs */
9206 	spin_lock_bh(&btf_idr_lock);
9207 	idr_for_each_entry(&btf_idr, mod_btf, id) {
9208 		if (!btf_is_module(mod_btf))
9209 			continue;
9210 		/* linear search could be slow hence unlock/lock
9211 		 * the IDR to avoiding holding it for too long
9212 		 */
9213 		btf_get(mod_btf);
9214 		spin_unlock_bh(&btf_idr_lock);
9215 		cands = bpf_core_add_cands(cands, mod_btf, btf_nr_types(main_btf));
9216 		btf_put(mod_btf);
9217 		if (IS_ERR(cands))
9218 			return ERR_CAST(cands);
9219 		spin_lock_bh(&btf_idr_lock);
9220 	}
9221 	spin_unlock_bh(&btf_idr_lock);
9222 	/* cands is a pointer to kmalloced memory here if cands->cnt > 0
9223 	 * or pointer to stack if cands->cnd == 0.
9224 	 * Copy it into the cache even when cands->cnt == 0 and
9225 	 * return the result.
9226 	 */
9227 	return populate_cand_cache(cands, module_cand_cache, MODULE_CAND_CACHE_SIZE);
9228 }
9229 
9230 int bpf_core_apply(struct bpf_core_ctx *ctx, const struct bpf_core_relo *relo,
9231 		   int relo_idx, void *insn)
9232 {
9233 	bool need_cands = relo->kind != BPF_CORE_TYPE_ID_LOCAL;
9234 	struct bpf_core_cand_list cands = {};
9235 	struct bpf_core_relo_res targ_res;
9236 	struct bpf_core_spec *specs;
9237 	const struct btf_type *type;
9238 	int err;
9239 
9240 	/* ~4k of temp memory necessary to convert LLVM spec like "0:1:0:5"
9241 	 * into arrays of btf_ids of struct fields and array indices.
9242 	 */
9243 	specs = kcalloc(3, sizeof(*specs), GFP_KERNEL_ACCOUNT);
9244 	if (!specs)
9245 		return -ENOMEM;
9246 
9247 	type = btf_type_by_id(ctx->btf, relo->type_id);
9248 	if (!type) {
9249 		bpf_log(ctx->log, "relo #%u: bad type id %u\n",
9250 			relo_idx, relo->type_id);
9251 		kfree(specs);
9252 		return -EINVAL;
9253 	}
9254 
9255 	if (need_cands) {
9256 		struct bpf_cand_cache *cc;
9257 		int i;
9258 
9259 		mutex_lock(&cand_cache_mutex);
9260 		cc = bpf_core_find_cands(ctx, relo->type_id);
9261 		if (IS_ERR(cc)) {
9262 			bpf_log(ctx->log, "target candidate search failed for %d\n",
9263 				relo->type_id);
9264 			err = PTR_ERR(cc);
9265 			goto out;
9266 		}
9267 		if (cc->cnt) {
9268 			cands.cands = kcalloc(cc->cnt, sizeof(*cands.cands), GFP_KERNEL_ACCOUNT);
9269 			if (!cands.cands) {
9270 				err = -ENOMEM;
9271 				goto out;
9272 			}
9273 		}
9274 		for (i = 0; i < cc->cnt; i++) {
9275 			bpf_log(ctx->log,
9276 				"CO-RE relocating %s %s: found target candidate [%d]\n",
9277 				btf_kind_str[cc->kind], cc->name, cc->cands[i].id);
9278 			cands.cands[i].btf = cc->cands[i].btf;
9279 			cands.cands[i].id = cc->cands[i].id;
9280 		}
9281 		cands.len = cc->cnt;
9282 		/* cand_cache_mutex needs to span the cache lookup and
9283 		 * copy of btf pointer into bpf_core_cand_list,
9284 		 * since module can be unloaded while bpf_core_calc_relo_insn
9285 		 * is working with module's btf.
9286 		 */
9287 	}
9288 
9289 	err = bpf_core_calc_relo_insn((void *)ctx->log, relo, relo_idx, ctx->btf, &cands, specs,
9290 				      &targ_res);
9291 	if (err)
9292 		goto out;
9293 
9294 	err = bpf_core_patch_insn((void *)ctx->log, insn, relo->insn_off / 8, relo, relo_idx,
9295 				  &targ_res);
9296 
9297 out:
9298 	kfree(specs);
9299 	if (need_cands) {
9300 		kfree(cands.cands);
9301 		mutex_unlock(&cand_cache_mutex);
9302 		if (ctx->log->level & BPF_LOG_LEVEL2)
9303 			print_cand_cache(ctx->log);
9304 	}
9305 	return err;
9306 }
9307 
9308 bool btf_nested_type_is_trusted(struct bpf_verifier_log *log,
9309 				const struct bpf_reg_state *reg,
9310 				const char *field_name, u32 btf_id, const char *suffix)
9311 {
9312 	struct btf *btf = reg->btf;
9313 	const struct btf_type *walk_type, *safe_type;
9314 	const char *tname;
9315 	char safe_tname[64];
9316 	long ret, safe_id;
9317 	const struct btf_member *member;
9318 	u32 i;
9319 
9320 	walk_type = btf_type_by_id(btf, reg->btf_id);
9321 	if (!walk_type)
9322 		return false;
9323 
9324 	tname = btf_name_by_offset(btf, walk_type->name_off);
9325 
9326 	ret = snprintf(safe_tname, sizeof(safe_tname), "%s%s", tname, suffix);
9327 	if (ret >= sizeof(safe_tname))
9328 		return false;
9329 
9330 	safe_id = btf_find_by_name_kind(btf, safe_tname, BTF_INFO_KIND(walk_type->info));
9331 	if (safe_id < 0)
9332 		return false;
9333 
9334 	safe_type = btf_type_by_id(btf, safe_id);
9335 	if (!safe_type)
9336 		return false;
9337 
9338 	for_each_member(i, safe_type, member) {
9339 		const char *m_name = __btf_name_by_offset(btf, member->name_off);
9340 		const struct btf_type *mtype = btf_type_by_id(btf, member->type);
9341 		u32 id;
9342 
9343 		if (!btf_type_is_ptr(mtype))
9344 			continue;
9345 
9346 		btf_type_skip_modifiers(btf, mtype->type, &id);
9347 		/* If we match on both type and name, the field is considered trusted. */
9348 		if (btf_id == id && !strcmp(field_name, m_name))
9349 			return true;
9350 	}
9351 
9352 	return false;
9353 }
9354 
9355 bool btf_type_ids_nocast_alias(struct bpf_verifier_log *log,
9356 			       const struct btf *reg_btf, u32 reg_id,
9357 			       const struct btf *arg_btf, u32 arg_id)
9358 {
9359 	const char *reg_name, *arg_name, *search_needle;
9360 	const struct btf_type *reg_type, *arg_type;
9361 	int reg_len, arg_len, cmp_len;
9362 	size_t pattern_len = sizeof(NOCAST_ALIAS_SUFFIX) - sizeof(char);
9363 
9364 	reg_type = btf_type_by_id(reg_btf, reg_id);
9365 	if (!reg_type)
9366 		return false;
9367 
9368 	arg_type = btf_type_by_id(arg_btf, arg_id);
9369 	if (!arg_type)
9370 		return false;
9371 
9372 	reg_name = btf_name_by_offset(reg_btf, reg_type->name_off);
9373 	arg_name = btf_name_by_offset(arg_btf, arg_type->name_off);
9374 
9375 	reg_len = strlen(reg_name);
9376 	arg_len = strlen(arg_name);
9377 
9378 	/* Exactly one of the two type names may be suffixed with ___init, so
9379 	 * if the strings are the same size, they can't possibly be no-cast
9380 	 * aliases of one another. If you have two of the same type names, e.g.
9381 	 * they're both nf_conn___init, it would be improper to return true
9382 	 * because they are _not_ no-cast aliases, they are the same type.
9383 	 */
9384 	if (reg_len == arg_len)
9385 		return false;
9386 
9387 	/* Either of the two names must be the other name, suffixed with ___init. */
9388 	if ((reg_len != arg_len + pattern_len) &&
9389 	    (arg_len != reg_len + pattern_len))
9390 		return false;
9391 
9392 	if (reg_len < arg_len) {
9393 		search_needle = strstr(arg_name, NOCAST_ALIAS_SUFFIX);
9394 		cmp_len = reg_len;
9395 	} else {
9396 		search_needle = strstr(reg_name, NOCAST_ALIAS_SUFFIX);
9397 		cmp_len = arg_len;
9398 	}
9399 
9400 	if (!search_needle)
9401 		return false;
9402 
9403 	/* ___init suffix must come at the end of the name */
9404 	if (*(search_needle + pattern_len) != '\0')
9405 		return false;
9406 
9407 	return !strncmp(reg_name, arg_name, cmp_len);
9408 }
9409 
9410 #ifdef CONFIG_BPF_JIT
9411 static int
9412 btf_add_struct_ops(struct btf *btf, struct bpf_struct_ops *st_ops,
9413 		   struct bpf_verifier_log *log)
9414 {
9415 	struct btf_struct_ops_tab *tab, *new_tab;
9416 	int i, err;
9417 
9418 	tab = btf->struct_ops_tab;
9419 	if (!tab) {
9420 		tab = kzalloc(struct_size(tab, ops, 4), GFP_KERNEL);
9421 		if (!tab)
9422 			return -ENOMEM;
9423 		tab->capacity = 4;
9424 		btf->struct_ops_tab = tab;
9425 	}
9426 
9427 	for (i = 0; i < tab->cnt; i++)
9428 		if (tab->ops[i].st_ops == st_ops)
9429 			return -EEXIST;
9430 
9431 	if (tab->cnt == tab->capacity) {
9432 		new_tab = krealloc(tab,
9433 				   struct_size(tab, ops, tab->capacity * 2),
9434 				   GFP_KERNEL);
9435 		if (!new_tab)
9436 			return -ENOMEM;
9437 		tab = new_tab;
9438 		tab->capacity *= 2;
9439 		btf->struct_ops_tab = tab;
9440 	}
9441 
9442 	tab->ops[btf->struct_ops_tab->cnt].st_ops = st_ops;
9443 
9444 	err = bpf_struct_ops_desc_init(&tab->ops[btf->struct_ops_tab->cnt], btf, log);
9445 	if (err)
9446 		return err;
9447 
9448 	btf->struct_ops_tab->cnt++;
9449 
9450 	return 0;
9451 }
9452 
9453 const struct bpf_struct_ops_desc *
9454 bpf_struct_ops_find_value(struct btf *btf, u32 value_id)
9455 {
9456 	const struct bpf_struct_ops_desc *st_ops_list;
9457 	unsigned int i;
9458 	u32 cnt;
9459 
9460 	if (!value_id)
9461 		return NULL;
9462 	if (!btf->struct_ops_tab)
9463 		return NULL;
9464 
9465 	cnt = btf->struct_ops_tab->cnt;
9466 	st_ops_list = btf->struct_ops_tab->ops;
9467 	for (i = 0; i < cnt; i++) {
9468 		if (st_ops_list[i].value_id == value_id)
9469 			return &st_ops_list[i];
9470 	}
9471 
9472 	return NULL;
9473 }
9474 
9475 const struct bpf_struct_ops_desc *
9476 bpf_struct_ops_find(struct btf *btf, u32 type_id)
9477 {
9478 	const struct bpf_struct_ops_desc *st_ops_list;
9479 	unsigned int i;
9480 	u32 cnt;
9481 
9482 	if (!type_id)
9483 		return NULL;
9484 	if (!btf->struct_ops_tab)
9485 		return NULL;
9486 
9487 	cnt = btf->struct_ops_tab->cnt;
9488 	st_ops_list = btf->struct_ops_tab->ops;
9489 	for (i = 0; i < cnt; i++) {
9490 		if (st_ops_list[i].type_id == type_id)
9491 			return &st_ops_list[i];
9492 	}
9493 
9494 	return NULL;
9495 }
9496 
9497 int __register_bpf_struct_ops(struct bpf_struct_ops *st_ops)
9498 {
9499 	struct bpf_verifier_log *log;
9500 	struct btf *btf;
9501 	int err = 0;
9502 
9503 	btf = btf_get_module_btf(st_ops->owner);
9504 	if (!btf)
9505 		return check_btf_kconfigs(st_ops->owner, "struct_ops");
9506 	if (IS_ERR(btf))
9507 		return PTR_ERR(btf);
9508 
9509 	log = kzalloc(sizeof(*log), GFP_KERNEL | __GFP_NOWARN);
9510 	if (!log) {
9511 		err = -ENOMEM;
9512 		goto errout;
9513 	}
9514 
9515 	log->level = BPF_LOG_KERNEL;
9516 
9517 	err = btf_add_struct_ops(btf, st_ops, log);
9518 
9519 errout:
9520 	kfree(log);
9521 	btf_put(btf);
9522 
9523 	return err;
9524 }
9525 EXPORT_SYMBOL_GPL(__register_bpf_struct_ops);
9526 #endif
9527 
9528 bool btf_param_match_suffix(const struct btf *btf,
9529 			    const struct btf_param *arg,
9530 			    const char *suffix)
9531 {
9532 	int suffix_len = strlen(suffix), len;
9533 	const char *param_name;
9534 
9535 	/* In the future, this can be ported to use BTF tagging */
9536 	param_name = btf_name_by_offset(btf, arg->name_off);
9537 	if (str_is_empty(param_name))
9538 		return false;
9539 	len = strlen(param_name);
9540 	if (len <= suffix_len)
9541 		return false;
9542 	param_name += len - suffix_len;
9543 	return !strncmp(param_name, suffix, suffix_len);
9544 }
9545