xref: /linux/kernel/bpf/btf.c (revision 005c54278b3dd38f6045a2450a8c988cc7d3def2)
1 // SPDX-License-Identifier: GPL-2.0
2 /* Copyright (c) 2018 Facebook */
3 
4 #include <uapi/linux/btf.h>
5 #include <uapi/linux/bpf.h>
6 #include <uapi/linux/bpf_perf_event.h>
7 #include <uapi/linux/types.h>
8 #include <linux/seq_file.h>
9 #include <linux/compiler.h>
10 #include <linux/ctype.h>
11 #include <linux/errno.h>
12 #include <linux/slab.h>
13 #include <linux/anon_inodes.h>
14 #include <linux/file.h>
15 #include <linux/uaccess.h>
16 #include <linux/kernel.h>
17 #include <linux/idr.h>
18 #include <linux/sort.h>
19 #include <linux/bpf_verifier.h>
20 #include <linux/btf.h>
21 #include <linux/btf_ids.h>
22 #include <linux/skmsg.h>
23 #include <linux/perf_event.h>
24 #include <linux/bsearch.h>
25 #include <linux/kobject.h>
26 #include <linux/sysfs.h>
27 #include <net/sock.h>
28 #include "../tools/lib/bpf/relo_core.h"
29 
30 /* BTF (BPF Type Format) is the meta data format which describes
31  * the data types of BPF program/map.  Hence, it basically focus
32  * on the C programming language which the modern BPF is primary
33  * using.
34  *
35  * ELF Section:
36  * ~~~~~~~~~~~
37  * The BTF data is stored under the ".BTF" ELF section
38  *
39  * struct btf_type:
40  * ~~~~~~~~~~~~~~~
41  * Each 'struct btf_type' object describes a C data type.
42  * Depending on the type it is describing, a 'struct btf_type'
43  * object may be followed by more data.  F.e.
44  * To describe an array, 'struct btf_type' is followed by
45  * 'struct btf_array'.
46  *
47  * 'struct btf_type' and any extra data following it are
48  * 4 bytes aligned.
49  *
50  * Type section:
51  * ~~~~~~~~~~~~~
52  * The BTF type section contains a list of 'struct btf_type' objects.
53  * Each one describes a C type.  Recall from the above section
54  * that a 'struct btf_type' object could be immediately followed by extra
55  * data in order to describe some particular C types.
56  *
57  * type_id:
58  * ~~~~~~~
59  * Each btf_type object is identified by a type_id.  The type_id
60  * is implicitly implied by the location of the btf_type object in
61  * the BTF type section.  The first one has type_id 1.  The second
62  * one has type_id 2...etc.  Hence, an earlier btf_type has
63  * a smaller type_id.
64  *
65  * A btf_type object may refer to another btf_type object by using
66  * type_id (i.e. the "type" in the "struct btf_type").
67  *
68  * NOTE that we cannot assume any reference-order.
69  * A btf_type object can refer to an earlier btf_type object
70  * but it can also refer to a later btf_type object.
71  *
72  * For example, to describe "const void *".  A btf_type
73  * object describing "const" may refer to another btf_type
74  * object describing "void *".  This type-reference is done
75  * by specifying type_id:
76  *
77  * [1] CONST (anon) type_id=2
78  * [2] PTR (anon) type_id=0
79  *
80  * The above is the btf_verifier debug log:
81  *   - Each line started with "[?]" is a btf_type object
82  *   - [?] is the type_id of the btf_type object.
83  *   - CONST/PTR is the BTF_KIND_XXX
84  *   - "(anon)" is the name of the type.  It just
85  *     happens that CONST and PTR has no name.
86  *   - type_id=XXX is the 'u32 type' in btf_type
87  *
88  * NOTE: "void" has type_id 0
89  *
90  * String section:
91  * ~~~~~~~~~~~~~~
92  * The BTF string section contains the names used by the type section.
93  * Each string is referred by an "offset" from the beginning of the
94  * string section.
95  *
96  * Each string is '\0' terminated.
97  *
98  * The first character in the string section must be '\0'
99  * which is used to mean 'anonymous'. Some btf_type may not
100  * have a name.
101  */
102 
103 /* BTF verification:
104  *
105  * To verify BTF data, two passes are needed.
106  *
107  * Pass #1
108  * ~~~~~~~
109  * The first pass is to collect all btf_type objects to
110  * an array: "btf->types".
111  *
112  * Depending on the C type that a btf_type is describing,
113  * a btf_type may be followed by extra data.  We don't know
114  * how many btf_type is there, and more importantly we don't
115  * know where each btf_type is located in the type section.
116  *
117  * Without knowing the location of each type_id, most verifications
118  * cannot be done.  e.g. an earlier btf_type may refer to a later
119  * btf_type (recall the "const void *" above), so we cannot
120  * check this type-reference in the first pass.
121  *
122  * In the first pass, it still does some verifications (e.g.
123  * checking the name is a valid offset to the string section).
124  *
125  * Pass #2
126  * ~~~~~~~
127  * The main focus is to resolve a btf_type that is referring
128  * to another type.
129  *
130  * We have to ensure the referring type:
131  * 1) does exist in the BTF (i.e. in btf->types[])
132  * 2) does not cause a loop:
133  *	struct A {
134  *		struct B b;
135  *	};
136  *
137  *	struct B {
138  *		struct A a;
139  *	};
140  *
141  * btf_type_needs_resolve() decides if a btf_type needs
142  * to be resolved.
143  *
144  * The needs_resolve type implements the "resolve()" ops which
145  * essentially does a DFS and detects backedge.
146  *
147  * During resolve (or DFS), different C types have different
148  * "RESOLVED" conditions.
149  *
150  * When resolving a BTF_KIND_STRUCT, we need to resolve all its
151  * members because a member is always referring to another
152  * type.  A struct's member can be treated as "RESOLVED" if
153  * it is referring to a BTF_KIND_PTR.  Otherwise, the
154  * following valid C struct would be rejected:
155  *
156  *	struct A {
157  *		int m;
158  *		struct A *a;
159  *	};
160  *
161  * When resolving a BTF_KIND_PTR, it needs to keep resolving if
162  * it is referring to another BTF_KIND_PTR.  Otherwise, we cannot
163  * detect a pointer loop, e.g.:
164  * BTF_KIND_CONST -> BTF_KIND_PTR -> BTF_KIND_CONST -> BTF_KIND_PTR +
165  *                        ^                                         |
166  *                        +-----------------------------------------+
167  *
168  */
169 
170 #define BITS_PER_U128 (sizeof(u64) * BITS_PER_BYTE * 2)
171 #define BITS_PER_BYTE_MASK (BITS_PER_BYTE - 1)
172 #define BITS_PER_BYTE_MASKED(bits) ((bits) & BITS_PER_BYTE_MASK)
173 #define BITS_ROUNDDOWN_BYTES(bits) ((bits) >> 3)
174 #define BITS_ROUNDUP_BYTES(bits) \
175 	(BITS_ROUNDDOWN_BYTES(bits) + !!BITS_PER_BYTE_MASKED(bits))
176 
177 #define BTF_INFO_MASK 0x9f00ffff
178 #define BTF_INT_MASK 0x0fffffff
179 #define BTF_TYPE_ID_VALID(type_id) ((type_id) <= BTF_MAX_TYPE)
180 #define BTF_STR_OFFSET_VALID(name_off) ((name_off) <= BTF_MAX_NAME_OFFSET)
181 
182 /* 16MB for 64k structs and each has 16 members and
183  * a few MB spaces for the string section.
184  * The hard limit is S32_MAX.
185  */
186 #define BTF_MAX_SIZE (16 * 1024 * 1024)
187 
188 #define for_each_member_from(i, from, struct_type, member)		\
189 	for (i = from, member = btf_type_member(struct_type) + from;	\
190 	     i < btf_type_vlen(struct_type);				\
191 	     i++, member++)
192 
193 #define for_each_vsi_from(i, from, struct_type, member)				\
194 	for (i = from, member = btf_type_var_secinfo(struct_type) + from;	\
195 	     i < btf_type_vlen(struct_type);					\
196 	     i++, member++)
197 
198 DEFINE_IDR(btf_idr);
199 DEFINE_SPINLOCK(btf_idr_lock);
200 
201 enum btf_kfunc_hook {
202 	BTF_KFUNC_HOOK_COMMON,
203 	BTF_KFUNC_HOOK_XDP,
204 	BTF_KFUNC_HOOK_TC,
205 	BTF_KFUNC_HOOK_STRUCT_OPS,
206 	BTF_KFUNC_HOOK_TRACING,
207 	BTF_KFUNC_HOOK_SYSCALL,
208 	BTF_KFUNC_HOOK_MAX,
209 };
210 
211 enum {
212 	BTF_KFUNC_SET_MAX_CNT = 256,
213 	BTF_DTOR_KFUNC_MAX_CNT = 256,
214 };
215 
216 struct btf_kfunc_set_tab {
217 	struct btf_id_set8 *sets[BTF_KFUNC_HOOK_MAX];
218 };
219 
220 struct btf_id_dtor_kfunc_tab {
221 	u32 cnt;
222 	struct btf_id_dtor_kfunc dtors[];
223 };
224 
225 struct btf {
226 	void *data;
227 	struct btf_type **types;
228 	u32 *resolved_ids;
229 	u32 *resolved_sizes;
230 	const char *strings;
231 	void *nohdr_data;
232 	struct btf_header hdr;
233 	u32 nr_types; /* includes VOID for base BTF */
234 	u32 types_size;
235 	u32 data_size;
236 	refcount_t refcnt;
237 	u32 id;
238 	struct rcu_head rcu;
239 	struct btf_kfunc_set_tab *kfunc_set_tab;
240 	struct btf_id_dtor_kfunc_tab *dtor_kfunc_tab;
241 	struct btf_struct_metas *struct_meta_tab;
242 
243 	/* split BTF support */
244 	struct btf *base_btf;
245 	u32 start_id; /* first type ID in this BTF (0 for base BTF) */
246 	u32 start_str_off; /* first string offset (0 for base BTF) */
247 	char name[MODULE_NAME_LEN];
248 	bool kernel_btf;
249 };
250 
251 enum verifier_phase {
252 	CHECK_META,
253 	CHECK_TYPE,
254 };
255 
256 struct resolve_vertex {
257 	const struct btf_type *t;
258 	u32 type_id;
259 	u16 next_member;
260 };
261 
262 enum visit_state {
263 	NOT_VISITED,
264 	VISITED,
265 	RESOLVED,
266 };
267 
268 enum resolve_mode {
269 	RESOLVE_TBD,	/* To Be Determined */
270 	RESOLVE_PTR,	/* Resolving for Pointer */
271 	RESOLVE_STRUCT_OR_ARRAY,	/* Resolving for struct/union
272 					 * or array
273 					 */
274 };
275 
276 #define MAX_RESOLVE_DEPTH 32
277 
278 struct btf_sec_info {
279 	u32 off;
280 	u32 len;
281 };
282 
283 struct btf_verifier_env {
284 	struct btf *btf;
285 	u8 *visit_states;
286 	struct resolve_vertex stack[MAX_RESOLVE_DEPTH];
287 	struct bpf_verifier_log log;
288 	u32 log_type_id;
289 	u32 top_stack;
290 	enum verifier_phase phase;
291 	enum resolve_mode resolve_mode;
292 };
293 
294 static const char * const btf_kind_str[NR_BTF_KINDS] = {
295 	[BTF_KIND_UNKN]		= "UNKNOWN",
296 	[BTF_KIND_INT]		= "INT",
297 	[BTF_KIND_PTR]		= "PTR",
298 	[BTF_KIND_ARRAY]	= "ARRAY",
299 	[BTF_KIND_STRUCT]	= "STRUCT",
300 	[BTF_KIND_UNION]	= "UNION",
301 	[BTF_KIND_ENUM]		= "ENUM",
302 	[BTF_KIND_FWD]		= "FWD",
303 	[BTF_KIND_TYPEDEF]	= "TYPEDEF",
304 	[BTF_KIND_VOLATILE]	= "VOLATILE",
305 	[BTF_KIND_CONST]	= "CONST",
306 	[BTF_KIND_RESTRICT]	= "RESTRICT",
307 	[BTF_KIND_FUNC]		= "FUNC",
308 	[BTF_KIND_FUNC_PROTO]	= "FUNC_PROTO",
309 	[BTF_KIND_VAR]		= "VAR",
310 	[BTF_KIND_DATASEC]	= "DATASEC",
311 	[BTF_KIND_FLOAT]	= "FLOAT",
312 	[BTF_KIND_DECL_TAG]	= "DECL_TAG",
313 	[BTF_KIND_TYPE_TAG]	= "TYPE_TAG",
314 	[BTF_KIND_ENUM64]	= "ENUM64",
315 };
316 
317 const char *btf_type_str(const struct btf_type *t)
318 {
319 	return btf_kind_str[BTF_INFO_KIND(t->info)];
320 }
321 
322 /* Chunk size we use in safe copy of data to be shown. */
323 #define BTF_SHOW_OBJ_SAFE_SIZE		32
324 
325 /*
326  * This is the maximum size of a base type value (equivalent to a
327  * 128-bit int); if we are at the end of our safe buffer and have
328  * less than 16 bytes space we can't be assured of being able
329  * to copy the next type safely, so in such cases we will initiate
330  * a new copy.
331  */
332 #define BTF_SHOW_OBJ_BASE_TYPE_SIZE	16
333 
334 /* Type name size */
335 #define BTF_SHOW_NAME_SIZE		80
336 
337 /*
338  * Common data to all BTF show operations. Private show functions can add
339  * their own data to a structure containing a struct btf_show and consult it
340  * in the show callback.  See btf_type_show() below.
341  *
342  * One challenge with showing nested data is we want to skip 0-valued
343  * data, but in order to figure out whether a nested object is all zeros
344  * we need to walk through it.  As a result, we need to make two passes
345  * when handling structs, unions and arrays; the first path simply looks
346  * for nonzero data, while the second actually does the display.  The first
347  * pass is signalled by show->state.depth_check being set, and if we
348  * encounter a non-zero value we set show->state.depth_to_show to
349  * the depth at which we encountered it.  When we have completed the
350  * first pass, we will know if anything needs to be displayed if
351  * depth_to_show > depth.  See btf_[struct,array]_show() for the
352  * implementation of this.
353  *
354  * Another problem is we want to ensure the data for display is safe to
355  * access.  To support this, the anonymous "struct {} obj" tracks the data
356  * object and our safe copy of it.  We copy portions of the data needed
357  * to the object "copy" buffer, but because its size is limited to
358  * BTF_SHOW_OBJ_COPY_LEN bytes, multiple copies may be required as we
359  * traverse larger objects for display.
360  *
361  * The various data type show functions all start with a call to
362  * btf_show_start_type() which returns a pointer to the safe copy
363  * of the data needed (or if BTF_SHOW_UNSAFE is specified, to the
364  * raw data itself).  btf_show_obj_safe() is responsible for
365  * using copy_from_kernel_nofault() to update the safe data if necessary
366  * as we traverse the object's data.  skbuff-like semantics are
367  * used:
368  *
369  * - obj.head points to the start of the toplevel object for display
370  * - obj.size is the size of the toplevel object
371  * - obj.data points to the current point in the original data at
372  *   which our safe data starts.  obj.data will advance as we copy
373  *   portions of the data.
374  *
375  * In most cases a single copy will suffice, but larger data structures
376  * such as "struct task_struct" will require many copies.  The logic in
377  * btf_show_obj_safe() handles the logic that determines if a new
378  * copy_from_kernel_nofault() is needed.
379  */
380 struct btf_show {
381 	u64 flags;
382 	void *target;	/* target of show operation (seq file, buffer) */
383 	void (*showfn)(struct btf_show *show, const char *fmt, va_list args);
384 	const struct btf *btf;
385 	/* below are used during iteration */
386 	struct {
387 		u8 depth;
388 		u8 depth_to_show;
389 		u8 depth_check;
390 		u8 array_member:1,
391 		   array_terminated:1;
392 		u16 array_encoding;
393 		u32 type_id;
394 		int status;			/* non-zero for error */
395 		const struct btf_type *type;
396 		const struct btf_member *member;
397 		char name[BTF_SHOW_NAME_SIZE];	/* space for member name/type */
398 	} state;
399 	struct {
400 		u32 size;
401 		void *head;
402 		void *data;
403 		u8 safe[BTF_SHOW_OBJ_SAFE_SIZE];
404 	} obj;
405 };
406 
407 struct btf_kind_operations {
408 	s32 (*check_meta)(struct btf_verifier_env *env,
409 			  const struct btf_type *t,
410 			  u32 meta_left);
411 	int (*resolve)(struct btf_verifier_env *env,
412 		       const struct resolve_vertex *v);
413 	int (*check_member)(struct btf_verifier_env *env,
414 			    const struct btf_type *struct_type,
415 			    const struct btf_member *member,
416 			    const struct btf_type *member_type);
417 	int (*check_kflag_member)(struct btf_verifier_env *env,
418 				  const struct btf_type *struct_type,
419 				  const struct btf_member *member,
420 				  const struct btf_type *member_type);
421 	void (*log_details)(struct btf_verifier_env *env,
422 			    const struct btf_type *t);
423 	void (*show)(const struct btf *btf, const struct btf_type *t,
424 			 u32 type_id, void *data, u8 bits_offsets,
425 			 struct btf_show *show);
426 };
427 
428 static const struct btf_kind_operations * const kind_ops[NR_BTF_KINDS];
429 static struct btf_type btf_void;
430 
431 static int btf_resolve(struct btf_verifier_env *env,
432 		       const struct btf_type *t, u32 type_id);
433 
434 static int btf_func_check(struct btf_verifier_env *env,
435 			  const struct btf_type *t);
436 
437 static bool btf_type_is_modifier(const struct btf_type *t)
438 {
439 	/* Some of them is not strictly a C modifier
440 	 * but they are grouped into the same bucket
441 	 * for BTF concern:
442 	 *   A type (t) that refers to another
443 	 *   type through t->type AND its size cannot
444 	 *   be determined without following the t->type.
445 	 *
446 	 * ptr does not fall into this bucket
447 	 * because its size is always sizeof(void *).
448 	 */
449 	switch (BTF_INFO_KIND(t->info)) {
450 	case BTF_KIND_TYPEDEF:
451 	case BTF_KIND_VOLATILE:
452 	case BTF_KIND_CONST:
453 	case BTF_KIND_RESTRICT:
454 	case BTF_KIND_TYPE_TAG:
455 		return true;
456 	}
457 
458 	return false;
459 }
460 
461 bool btf_type_is_void(const struct btf_type *t)
462 {
463 	return t == &btf_void;
464 }
465 
466 static bool btf_type_is_fwd(const struct btf_type *t)
467 {
468 	return BTF_INFO_KIND(t->info) == BTF_KIND_FWD;
469 }
470 
471 static bool btf_type_nosize(const struct btf_type *t)
472 {
473 	return btf_type_is_void(t) || btf_type_is_fwd(t) ||
474 	       btf_type_is_func(t) || btf_type_is_func_proto(t);
475 }
476 
477 static bool btf_type_nosize_or_null(const struct btf_type *t)
478 {
479 	return !t || btf_type_nosize(t);
480 }
481 
482 static bool btf_type_is_datasec(const struct btf_type *t)
483 {
484 	return BTF_INFO_KIND(t->info) == BTF_KIND_DATASEC;
485 }
486 
487 static bool btf_type_is_decl_tag(const struct btf_type *t)
488 {
489 	return BTF_INFO_KIND(t->info) == BTF_KIND_DECL_TAG;
490 }
491 
492 static bool btf_type_is_decl_tag_target(const struct btf_type *t)
493 {
494 	return btf_type_is_func(t) || btf_type_is_struct(t) ||
495 	       btf_type_is_var(t) || btf_type_is_typedef(t);
496 }
497 
498 u32 btf_nr_types(const struct btf *btf)
499 {
500 	u32 total = 0;
501 
502 	while (btf) {
503 		total += btf->nr_types;
504 		btf = btf->base_btf;
505 	}
506 
507 	return total;
508 }
509 
510 s32 btf_find_by_name_kind(const struct btf *btf, const char *name, u8 kind)
511 {
512 	const struct btf_type *t;
513 	const char *tname;
514 	u32 i, total;
515 
516 	total = btf_nr_types(btf);
517 	for (i = 1; i < total; i++) {
518 		t = btf_type_by_id(btf, i);
519 		if (BTF_INFO_KIND(t->info) != kind)
520 			continue;
521 
522 		tname = btf_name_by_offset(btf, t->name_off);
523 		if (!strcmp(tname, name))
524 			return i;
525 	}
526 
527 	return -ENOENT;
528 }
529 
530 static s32 bpf_find_btf_id(const char *name, u32 kind, struct btf **btf_p)
531 {
532 	struct btf *btf;
533 	s32 ret;
534 	int id;
535 
536 	btf = bpf_get_btf_vmlinux();
537 	if (IS_ERR(btf))
538 		return PTR_ERR(btf);
539 	if (!btf)
540 		return -EINVAL;
541 
542 	ret = btf_find_by_name_kind(btf, name, kind);
543 	/* ret is never zero, since btf_find_by_name_kind returns
544 	 * positive btf_id or negative error.
545 	 */
546 	if (ret > 0) {
547 		btf_get(btf);
548 		*btf_p = btf;
549 		return ret;
550 	}
551 
552 	/* If name is not found in vmlinux's BTF then search in module's BTFs */
553 	spin_lock_bh(&btf_idr_lock);
554 	idr_for_each_entry(&btf_idr, btf, id) {
555 		if (!btf_is_module(btf))
556 			continue;
557 		/* linear search could be slow hence unlock/lock
558 		 * the IDR to avoiding holding it for too long
559 		 */
560 		btf_get(btf);
561 		spin_unlock_bh(&btf_idr_lock);
562 		ret = btf_find_by_name_kind(btf, name, kind);
563 		if (ret > 0) {
564 			*btf_p = btf;
565 			return ret;
566 		}
567 		spin_lock_bh(&btf_idr_lock);
568 		btf_put(btf);
569 	}
570 	spin_unlock_bh(&btf_idr_lock);
571 	return ret;
572 }
573 
574 const struct btf_type *btf_type_skip_modifiers(const struct btf *btf,
575 					       u32 id, u32 *res_id)
576 {
577 	const struct btf_type *t = btf_type_by_id(btf, id);
578 
579 	while (btf_type_is_modifier(t)) {
580 		id = t->type;
581 		t = btf_type_by_id(btf, t->type);
582 	}
583 
584 	if (res_id)
585 		*res_id = id;
586 
587 	return t;
588 }
589 
590 const struct btf_type *btf_type_resolve_ptr(const struct btf *btf,
591 					    u32 id, u32 *res_id)
592 {
593 	const struct btf_type *t;
594 
595 	t = btf_type_skip_modifiers(btf, id, NULL);
596 	if (!btf_type_is_ptr(t))
597 		return NULL;
598 
599 	return btf_type_skip_modifiers(btf, t->type, res_id);
600 }
601 
602 const struct btf_type *btf_type_resolve_func_ptr(const struct btf *btf,
603 						 u32 id, u32 *res_id)
604 {
605 	const struct btf_type *ptype;
606 
607 	ptype = btf_type_resolve_ptr(btf, id, res_id);
608 	if (ptype && btf_type_is_func_proto(ptype))
609 		return ptype;
610 
611 	return NULL;
612 }
613 
614 /* Types that act only as a source, not sink or intermediate
615  * type when resolving.
616  */
617 static bool btf_type_is_resolve_source_only(const struct btf_type *t)
618 {
619 	return btf_type_is_var(t) ||
620 	       btf_type_is_decl_tag(t) ||
621 	       btf_type_is_datasec(t);
622 }
623 
624 /* What types need to be resolved?
625  *
626  * btf_type_is_modifier() is an obvious one.
627  *
628  * btf_type_is_struct() because its member refers to
629  * another type (through member->type).
630  *
631  * btf_type_is_var() because the variable refers to
632  * another type. btf_type_is_datasec() holds multiple
633  * btf_type_is_var() types that need resolving.
634  *
635  * btf_type_is_array() because its element (array->type)
636  * refers to another type.  Array can be thought of a
637  * special case of struct while array just has the same
638  * member-type repeated by array->nelems of times.
639  */
640 static bool btf_type_needs_resolve(const struct btf_type *t)
641 {
642 	return btf_type_is_modifier(t) ||
643 	       btf_type_is_ptr(t) ||
644 	       btf_type_is_struct(t) ||
645 	       btf_type_is_array(t) ||
646 	       btf_type_is_var(t) ||
647 	       btf_type_is_func(t) ||
648 	       btf_type_is_decl_tag(t) ||
649 	       btf_type_is_datasec(t);
650 }
651 
652 /* t->size can be used */
653 static bool btf_type_has_size(const struct btf_type *t)
654 {
655 	switch (BTF_INFO_KIND(t->info)) {
656 	case BTF_KIND_INT:
657 	case BTF_KIND_STRUCT:
658 	case BTF_KIND_UNION:
659 	case BTF_KIND_ENUM:
660 	case BTF_KIND_DATASEC:
661 	case BTF_KIND_FLOAT:
662 	case BTF_KIND_ENUM64:
663 		return true;
664 	}
665 
666 	return false;
667 }
668 
669 static const char *btf_int_encoding_str(u8 encoding)
670 {
671 	if (encoding == 0)
672 		return "(none)";
673 	else if (encoding == BTF_INT_SIGNED)
674 		return "SIGNED";
675 	else if (encoding == BTF_INT_CHAR)
676 		return "CHAR";
677 	else if (encoding == BTF_INT_BOOL)
678 		return "BOOL";
679 	else
680 		return "UNKN";
681 }
682 
683 static u32 btf_type_int(const struct btf_type *t)
684 {
685 	return *(u32 *)(t + 1);
686 }
687 
688 static const struct btf_array *btf_type_array(const struct btf_type *t)
689 {
690 	return (const struct btf_array *)(t + 1);
691 }
692 
693 static const struct btf_enum *btf_type_enum(const struct btf_type *t)
694 {
695 	return (const struct btf_enum *)(t + 1);
696 }
697 
698 static const struct btf_var *btf_type_var(const struct btf_type *t)
699 {
700 	return (const struct btf_var *)(t + 1);
701 }
702 
703 static const struct btf_decl_tag *btf_type_decl_tag(const struct btf_type *t)
704 {
705 	return (const struct btf_decl_tag *)(t + 1);
706 }
707 
708 static const struct btf_enum64 *btf_type_enum64(const struct btf_type *t)
709 {
710 	return (const struct btf_enum64 *)(t + 1);
711 }
712 
713 static const struct btf_kind_operations *btf_type_ops(const struct btf_type *t)
714 {
715 	return kind_ops[BTF_INFO_KIND(t->info)];
716 }
717 
718 static bool btf_name_offset_valid(const struct btf *btf, u32 offset)
719 {
720 	if (!BTF_STR_OFFSET_VALID(offset))
721 		return false;
722 
723 	while (offset < btf->start_str_off)
724 		btf = btf->base_btf;
725 
726 	offset -= btf->start_str_off;
727 	return offset < btf->hdr.str_len;
728 }
729 
730 static bool __btf_name_char_ok(char c, bool first, bool dot_ok)
731 {
732 	if ((first ? !isalpha(c) :
733 		     !isalnum(c)) &&
734 	    c != '_' &&
735 	    ((c == '.' && !dot_ok) ||
736 	      c != '.'))
737 		return false;
738 	return true;
739 }
740 
741 static const char *btf_str_by_offset(const struct btf *btf, u32 offset)
742 {
743 	while (offset < btf->start_str_off)
744 		btf = btf->base_btf;
745 
746 	offset -= btf->start_str_off;
747 	if (offset < btf->hdr.str_len)
748 		return &btf->strings[offset];
749 
750 	return NULL;
751 }
752 
753 static bool __btf_name_valid(const struct btf *btf, u32 offset, bool dot_ok)
754 {
755 	/* offset must be valid */
756 	const char *src = btf_str_by_offset(btf, offset);
757 	const char *src_limit;
758 
759 	if (!__btf_name_char_ok(*src, true, dot_ok))
760 		return false;
761 
762 	/* set a limit on identifier length */
763 	src_limit = src + KSYM_NAME_LEN;
764 	src++;
765 	while (*src && src < src_limit) {
766 		if (!__btf_name_char_ok(*src, false, dot_ok))
767 			return false;
768 		src++;
769 	}
770 
771 	return !*src;
772 }
773 
774 /* Only C-style identifier is permitted. This can be relaxed if
775  * necessary.
776  */
777 static bool btf_name_valid_identifier(const struct btf *btf, u32 offset)
778 {
779 	return __btf_name_valid(btf, offset, false);
780 }
781 
782 static bool btf_name_valid_section(const struct btf *btf, u32 offset)
783 {
784 	return __btf_name_valid(btf, offset, true);
785 }
786 
787 static const char *__btf_name_by_offset(const struct btf *btf, u32 offset)
788 {
789 	const char *name;
790 
791 	if (!offset)
792 		return "(anon)";
793 
794 	name = btf_str_by_offset(btf, offset);
795 	return name ?: "(invalid-name-offset)";
796 }
797 
798 const char *btf_name_by_offset(const struct btf *btf, u32 offset)
799 {
800 	return btf_str_by_offset(btf, offset);
801 }
802 
803 const struct btf_type *btf_type_by_id(const struct btf *btf, u32 type_id)
804 {
805 	while (type_id < btf->start_id)
806 		btf = btf->base_btf;
807 
808 	type_id -= btf->start_id;
809 	if (type_id >= btf->nr_types)
810 		return NULL;
811 	return btf->types[type_id];
812 }
813 EXPORT_SYMBOL_GPL(btf_type_by_id);
814 
815 /*
816  * Regular int is not a bit field and it must be either
817  * u8/u16/u32/u64 or __int128.
818  */
819 static bool btf_type_int_is_regular(const struct btf_type *t)
820 {
821 	u8 nr_bits, nr_bytes;
822 	u32 int_data;
823 
824 	int_data = btf_type_int(t);
825 	nr_bits = BTF_INT_BITS(int_data);
826 	nr_bytes = BITS_ROUNDUP_BYTES(nr_bits);
827 	if (BITS_PER_BYTE_MASKED(nr_bits) ||
828 	    BTF_INT_OFFSET(int_data) ||
829 	    (nr_bytes != sizeof(u8) && nr_bytes != sizeof(u16) &&
830 	     nr_bytes != sizeof(u32) && nr_bytes != sizeof(u64) &&
831 	     nr_bytes != (2 * sizeof(u64)))) {
832 		return false;
833 	}
834 
835 	return true;
836 }
837 
838 /*
839  * Check that given struct member is a regular int with expected
840  * offset and size.
841  */
842 bool btf_member_is_reg_int(const struct btf *btf, const struct btf_type *s,
843 			   const struct btf_member *m,
844 			   u32 expected_offset, u32 expected_size)
845 {
846 	const struct btf_type *t;
847 	u32 id, int_data;
848 	u8 nr_bits;
849 
850 	id = m->type;
851 	t = btf_type_id_size(btf, &id, NULL);
852 	if (!t || !btf_type_is_int(t))
853 		return false;
854 
855 	int_data = btf_type_int(t);
856 	nr_bits = BTF_INT_BITS(int_data);
857 	if (btf_type_kflag(s)) {
858 		u32 bitfield_size = BTF_MEMBER_BITFIELD_SIZE(m->offset);
859 		u32 bit_offset = BTF_MEMBER_BIT_OFFSET(m->offset);
860 
861 		/* if kflag set, int should be a regular int and
862 		 * bit offset should be at byte boundary.
863 		 */
864 		return !bitfield_size &&
865 		       BITS_ROUNDUP_BYTES(bit_offset) == expected_offset &&
866 		       BITS_ROUNDUP_BYTES(nr_bits) == expected_size;
867 	}
868 
869 	if (BTF_INT_OFFSET(int_data) ||
870 	    BITS_PER_BYTE_MASKED(m->offset) ||
871 	    BITS_ROUNDUP_BYTES(m->offset) != expected_offset ||
872 	    BITS_PER_BYTE_MASKED(nr_bits) ||
873 	    BITS_ROUNDUP_BYTES(nr_bits) != expected_size)
874 		return false;
875 
876 	return true;
877 }
878 
879 /* Similar to btf_type_skip_modifiers() but does not skip typedefs. */
880 static const struct btf_type *btf_type_skip_qualifiers(const struct btf *btf,
881 						       u32 id)
882 {
883 	const struct btf_type *t = btf_type_by_id(btf, id);
884 
885 	while (btf_type_is_modifier(t) &&
886 	       BTF_INFO_KIND(t->info) != BTF_KIND_TYPEDEF) {
887 		t = btf_type_by_id(btf, t->type);
888 	}
889 
890 	return t;
891 }
892 
893 #define BTF_SHOW_MAX_ITER	10
894 
895 #define BTF_KIND_BIT(kind)	(1ULL << kind)
896 
897 /*
898  * Populate show->state.name with type name information.
899  * Format of type name is
900  *
901  * [.member_name = ] (type_name)
902  */
903 static const char *btf_show_name(struct btf_show *show)
904 {
905 	/* BTF_MAX_ITER array suffixes "[]" */
906 	const char *array_suffixes = "[][][][][][][][][][]";
907 	const char *array_suffix = &array_suffixes[strlen(array_suffixes)];
908 	/* BTF_MAX_ITER pointer suffixes "*" */
909 	const char *ptr_suffixes = "**********";
910 	const char *ptr_suffix = &ptr_suffixes[strlen(ptr_suffixes)];
911 	const char *name = NULL, *prefix = "", *parens = "";
912 	const struct btf_member *m = show->state.member;
913 	const struct btf_type *t;
914 	const struct btf_array *array;
915 	u32 id = show->state.type_id;
916 	const char *member = NULL;
917 	bool show_member = false;
918 	u64 kinds = 0;
919 	int i;
920 
921 	show->state.name[0] = '\0';
922 
923 	/*
924 	 * Don't show type name if we're showing an array member;
925 	 * in that case we show the array type so don't need to repeat
926 	 * ourselves for each member.
927 	 */
928 	if (show->state.array_member)
929 		return "";
930 
931 	/* Retrieve member name, if any. */
932 	if (m) {
933 		member = btf_name_by_offset(show->btf, m->name_off);
934 		show_member = strlen(member) > 0;
935 		id = m->type;
936 	}
937 
938 	/*
939 	 * Start with type_id, as we have resolved the struct btf_type *
940 	 * via btf_modifier_show() past the parent typedef to the child
941 	 * struct, int etc it is defined as.  In such cases, the type_id
942 	 * still represents the starting type while the struct btf_type *
943 	 * in our show->state points at the resolved type of the typedef.
944 	 */
945 	t = btf_type_by_id(show->btf, id);
946 	if (!t)
947 		return "";
948 
949 	/*
950 	 * The goal here is to build up the right number of pointer and
951 	 * array suffixes while ensuring the type name for a typedef
952 	 * is represented.  Along the way we accumulate a list of
953 	 * BTF kinds we have encountered, since these will inform later
954 	 * display; for example, pointer types will not require an
955 	 * opening "{" for struct, we will just display the pointer value.
956 	 *
957 	 * We also want to accumulate the right number of pointer or array
958 	 * indices in the format string while iterating until we get to
959 	 * the typedef/pointee/array member target type.
960 	 *
961 	 * We start by pointing at the end of pointer and array suffix
962 	 * strings; as we accumulate pointers and arrays we move the pointer
963 	 * or array string backwards so it will show the expected number of
964 	 * '*' or '[]' for the type.  BTF_SHOW_MAX_ITER of nesting of pointers
965 	 * and/or arrays and typedefs are supported as a precaution.
966 	 *
967 	 * We also want to get typedef name while proceeding to resolve
968 	 * type it points to so that we can add parentheses if it is a
969 	 * "typedef struct" etc.
970 	 */
971 	for (i = 0; i < BTF_SHOW_MAX_ITER; i++) {
972 
973 		switch (BTF_INFO_KIND(t->info)) {
974 		case BTF_KIND_TYPEDEF:
975 			if (!name)
976 				name = btf_name_by_offset(show->btf,
977 							       t->name_off);
978 			kinds |= BTF_KIND_BIT(BTF_KIND_TYPEDEF);
979 			id = t->type;
980 			break;
981 		case BTF_KIND_ARRAY:
982 			kinds |= BTF_KIND_BIT(BTF_KIND_ARRAY);
983 			parens = "[";
984 			if (!t)
985 				return "";
986 			array = btf_type_array(t);
987 			if (array_suffix > array_suffixes)
988 				array_suffix -= 2;
989 			id = array->type;
990 			break;
991 		case BTF_KIND_PTR:
992 			kinds |= BTF_KIND_BIT(BTF_KIND_PTR);
993 			if (ptr_suffix > ptr_suffixes)
994 				ptr_suffix -= 1;
995 			id = t->type;
996 			break;
997 		default:
998 			id = 0;
999 			break;
1000 		}
1001 		if (!id)
1002 			break;
1003 		t = btf_type_skip_qualifiers(show->btf, id);
1004 	}
1005 	/* We may not be able to represent this type; bail to be safe */
1006 	if (i == BTF_SHOW_MAX_ITER)
1007 		return "";
1008 
1009 	if (!name)
1010 		name = btf_name_by_offset(show->btf, t->name_off);
1011 
1012 	switch (BTF_INFO_KIND(t->info)) {
1013 	case BTF_KIND_STRUCT:
1014 	case BTF_KIND_UNION:
1015 		prefix = BTF_INFO_KIND(t->info) == BTF_KIND_STRUCT ?
1016 			 "struct" : "union";
1017 		/* if it's an array of struct/union, parens is already set */
1018 		if (!(kinds & (BTF_KIND_BIT(BTF_KIND_ARRAY))))
1019 			parens = "{";
1020 		break;
1021 	case BTF_KIND_ENUM:
1022 	case BTF_KIND_ENUM64:
1023 		prefix = "enum";
1024 		break;
1025 	default:
1026 		break;
1027 	}
1028 
1029 	/* pointer does not require parens */
1030 	if (kinds & BTF_KIND_BIT(BTF_KIND_PTR))
1031 		parens = "";
1032 	/* typedef does not require struct/union/enum prefix */
1033 	if (kinds & BTF_KIND_BIT(BTF_KIND_TYPEDEF))
1034 		prefix = "";
1035 
1036 	if (!name)
1037 		name = "";
1038 
1039 	/* Even if we don't want type name info, we want parentheses etc */
1040 	if (show->flags & BTF_SHOW_NONAME)
1041 		snprintf(show->state.name, sizeof(show->state.name), "%s",
1042 			 parens);
1043 	else
1044 		snprintf(show->state.name, sizeof(show->state.name),
1045 			 "%s%s%s(%s%s%s%s%s%s)%s",
1046 			 /* first 3 strings comprise ".member = " */
1047 			 show_member ? "." : "",
1048 			 show_member ? member : "",
1049 			 show_member ? " = " : "",
1050 			 /* ...next is our prefix (struct, enum, etc) */
1051 			 prefix,
1052 			 strlen(prefix) > 0 && strlen(name) > 0 ? " " : "",
1053 			 /* ...this is the type name itself */
1054 			 name,
1055 			 /* ...suffixed by the appropriate '*', '[]' suffixes */
1056 			 strlen(ptr_suffix) > 0 ? " " : "", ptr_suffix,
1057 			 array_suffix, parens);
1058 
1059 	return show->state.name;
1060 }
1061 
1062 static const char *__btf_show_indent(struct btf_show *show)
1063 {
1064 	const char *indents = "                                ";
1065 	const char *indent = &indents[strlen(indents)];
1066 
1067 	if ((indent - show->state.depth) >= indents)
1068 		return indent - show->state.depth;
1069 	return indents;
1070 }
1071 
1072 static const char *btf_show_indent(struct btf_show *show)
1073 {
1074 	return show->flags & BTF_SHOW_COMPACT ? "" : __btf_show_indent(show);
1075 }
1076 
1077 static const char *btf_show_newline(struct btf_show *show)
1078 {
1079 	return show->flags & BTF_SHOW_COMPACT ? "" : "\n";
1080 }
1081 
1082 static const char *btf_show_delim(struct btf_show *show)
1083 {
1084 	if (show->state.depth == 0)
1085 		return "";
1086 
1087 	if ((show->flags & BTF_SHOW_COMPACT) && show->state.type &&
1088 		BTF_INFO_KIND(show->state.type->info) == BTF_KIND_UNION)
1089 		return "|";
1090 
1091 	return ",";
1092 }
1093 
1094 __printf(2, 3) static void btf_show(struct btf_show *show, const char *fmt, ...)
1095 {
1096 	va_list args;
1097 
1098 	if (!show->state.depth_check) {
1099 		va_start(args, fmt);
1100 		show->showfn(show, fmt, args);
1101 		va_end(args);
1102 	}
1103 }
1104 
1105 /* Macros are used here as btf_show_type_value[s]() prepends and appends
1106  * format specifiers to the format specifier passed in; these do the work of
1107  * adding indentation, delimiters etc while the caller simply has to specify
1108  * the type value(s) in the format specifier + value(s).
1109  */
1110 #define btf_show_type_value(show, fmt, value)				       \
1111 	do {								       \
1112 		if ((value) != (__typeof__(value))0 ||			       \
1113 		    (show->flags & BTF_SHOW_ZERO) ||			       \
1114 		    show->state.depth == 0) {				       \
1115 			btf_show(show, "%s%s" fmt "%s%s",		       \
1116 				 btf_show_indent(show),			       \
1117 				 btf_show_name(show),			       \
1118 				 value, btf_show_delim(show),		       \
1119 				 btf_show_newline(show));		       \
1120 			if (show->state.depth > show->state.depth_to_show)     \
1121 				show->state.depth_to_show = show->state.depth; \
1122 		}							       \
1123 	} while (0)
1124 
1125 #define btf_show_type_values(show, fmt, ...)				       \
1126 	do {								       \
1127 		btf_show(show, "%s%s" fmt "%s%s", btf_show_indent(show),       \
1128 			 btf_show_name(show),				       \
1129 			 __VA_ARGS__, btf_show_delim(show),		       \
1130 			 btf_show_newline(show));			       \
1131 		if (show->state.depth > show->state.depth_to_show)	       \
1132 			show->state.depth_to_show = show->state.depth;	       \
1133 	} while (0)
1134 
1135 /* How much is left to copy to safe buffer after @data? */
1136 static int btf_show_obj_size_left(struct btf_show *show, void *data)
1137 {
1138 	return show->obj.head + show->obj.size - data;
1139 }
1140 
1141 /* Is object pointed to by @data of @size already copied to our safe buffer? */
1142 static bool btf_show_obj_is_safe(struct btf_show *show, void *data, int size)
1143 {
1144 	return data >= show->obj.data &&
1145 	       (data + size) < (show->obj.data + BTF_SHOW_OBJ_SAFE_SIZE);
1146 }
1147 
1148 /*
1149  * If object pointed to by @data of @size falls within our safe buffer, return
1150  * the equivalent pointer to the same safe data.  Assumes
1151  * copy_from_kernel_nofault() has already happened and our safe buffer is
1152  * populated.
1153  */
1154 static void *__btf_show_obj_safe(struct btf_show *show, void *data, int size)
1155 {
1156 	if (btf_show_obj_is_safe(show, data, size))
1157 		return show->obj.safe + (data - show->obj.data);
1158 	return NULL;
1159 }
1160 
1161 /*
1162  * Return a safe-to-access version of data pointed to by @data.
1163  * We do this by copying the relevant amount of information
1164  * to the struct btf_show obj.safe buffer using copy_from_kernel_nofault().
1165  *
1166  * If BTF_SHOW_UNSAFE is specified, just return data as-is; no
1167  * safe copy is needed.
1168  *
1169  * Otherwise we need to determine if we have the required amount
1170  * of data (determined by the @data pointer and the size of the
1171  * largest base type we can encounter (represented by
1172  * BTF_SHOW_OBJ_BASE_TYPE_SIZE). Having that much data ensures
1173  * that we will be able to print some of the current object,
1174  * and if more is needed a copy will be triggered.
1175  * Some objects such as structs will not fit into the buffer;
1176  * in such cases additional copies when we iterate over their
1177  * members may be needed.
1178  *
1179  * btf_show_obj_safe() is used to return a safe buffer for
1180  * btf_show_start_type(); this ensures that as we recurse into
1181  * nested types we always have safe data for the given type.
1182  * This approach is somewhat wasteful; it's possible for example
1183  * that when iterating over a large union we'll end up copying the
1184  * same data repeatedly, but the goal is safety not performance.
1185  * We use stack data as opposed to per-CPU buffers because the
1186  * iteration over a type can take some time, and preemption handling
1187  * would greatly complicate use of the safe buffer.
1188  */
1189 static void *btf_show_obj_safe(struct btf_show *show,
1190 			       const struct btf_type *t,
1191 			       void *data)
1192 {
1193 	const struct btf_type *rt;
1194 	int size_left, size;
1195 	void *safe = NULL;
1196 
1197 	if (show->flags & BTF_SHOW_UNSAFE)
1198 		return data;
1199 
1200 	rt = btf_resolve_size(show->btf, t, &size);
1201 	if (IS_ERR(rt)) {
1202 		show->state.status = PTR_ERR(rt);
1203 		return NULL;
1204 	}
1205 
1206 	/*
1207 	 * Is this toplevel object? If so, set total object size and
1208 	 * initialize pointers.  Otherwise check if we still fall within
1209 	 * our safe object data.
1210 	 */
1211 	if (show->state.depth == 0) {
1212 		show->obj.size = size;
1213 		show->obj.head = data;
1214 	} else {
1215 		/*
1216 		 * If the size of the current object is > our remaining
1217 		 * safe buffer we _may_ need to do a new copy.  However
1218 		 * consider the case of a nested struct; it's size pushes
1219 		 * us over the safe buffer limit, but showing any individual
1220 		 * struct members does not.  In such cases, we don't need
1221 		 * to initiate a fresh copy yet; however we definitely need
1222 		 * at least BTF_SHOW_OBJ_BASE_TYPE_SIZE bytes left
1223 		 * in our buffer, regardless of the current object size.
1224 		 * The logic here is that as we resolve types we will
1225 		 * hit a base type at some point, and we need to be sure
1226 		 * the next chunk of data is safely available to display
1227 		 * that type info safely.  We cannot rely on the size of
1228 		 * the current object here because it may be much larger
1229 		 * than our current buffer (e.g. task_struct is 8k).
1230 		 * All we want to do here is ensure that we can print the
1231 		 * next basic type, which we can if either
1232 		 * - the current type size is within the safe buffer; or
1233 		 * - at least BTF_SHOW_OBJ_BASE_TYPE_SIZE bytes are left in
1234 		 *   the safe buffer.
1235 		 */
1236 		safe = __btf_show_obj_safe(show, data,
1237 					   min(size,
1238 					       BTF_SHOW_OBJ_BASE_TYPE_SIZE));
1239 	}
1240 
1241 	/*
1242 	 * We need a new copy to our safe object, either because we haven't
1243 	 * yet copied and are initializing safe data, or because the data
1244 	 * we want falls outside the boundaries of the safe object.
1245 	 */
1246 	if (!safe) {
1247 		size_left = btf_show_obj_size_left(show, data);
1248 		if (size_left > BTF_SHOW_OBJ_SAFE_SIZE)
1249 			size_left = BTF_SHOW_OBJ_SAFE_SIZE;
1250 		show->state.status = copy_from_kernel_nofault(show->obj.safe,
1251 							      data, size_left);
1252 		if (!show->state.status) {
1253 			show->obj.data = data;
1254 			safe = show->obj.safe;
1255 		}
1256 	}
1257 
1258 	return safe;
1259 }
1260 
1261 /*
1262  * Set the type we are starting to show and return a safe data pointer
1263  * to be used for showing the associated data.
1264  */
1265 static void *btf_show_start_type(struct btf_show *show,
1266 				 const struct btf_type *t,
1267 				 u32 type_id, void *data)
1268 {
1269 	show->state.type = t;
1270 	show->state.type_id = type_id;
1271 	show->state.name[0] = '\0';
1272 
1273 	return btf_show_obj_safe(show, t, data);
1274 }
1275 
1276 static void btf_show_end_type(struct btf_show *show)
1277 {
1278 	show->state.type = NULL;
1279 	show->state.type_id = 0;
1280 	show->state.name[0] = '\0';
1281 }
1282 
1283 static void *btf_show_start_aggr_type(struct btf_show *show,
1284 				      const struct btf_type *t,
1285 				      u32 type_id, void *data)
1286 {
1287 	void *safe_data = btf_show_start_type(show, t, type_id, data);
1288 
1289 	if (!safe_data)
1290 		return safe_data;
1291 
1292 	btf_show(show, "%s%s%s", btf_show_indent(show),
1293 		 btf_show_name(show),
1294 		 btf_show_newline(show));
1295 	show->state.depth++;
1296 	return safe_data;
1297 }
1298 
1299 static void btf_show_end_aggr_type(struct btf_show *show,
1300 				   const char *suffix)
1301 {
1302 	show->state.depth--;
1303 	btf_show(show, "%s%s%s%s", btf_show_indent(show), suffix,
1304 		 btf_show_delim(show), btf_show_newline(show));
1305 	btf_show_end_type(show);
1306 }
1307 
1308 static void btf_show_start_member(struct btf_show *show,
1309 				  const struct btf_member *m)
1310 {
1311 	show->state.member = m;
1312 }
1313 
1314 static void btf_show_start_array_member(struct btf_show *show)
1315 {
1316 	show->state.array_member = 1;
1317 	btf_show_start_member(show, NULL);
1318 }
1319 
1320 static void btf_show_end_member(struct btf_show *show)
1321 {
1322 	show->state.member = NULL;
1323 }
1324 
1325 static void btf_show_end_array_member(struct btf_show *show)
1326 {
1327 	show->state.array_member = 0;
1328 	btf_show_end_member(show);
1329 }
1330 
1331 static void *btf_show_start_array_type(struct btf_show *show,
1332 				       const struct btf_type *t,
1333 				       u32 type_id,
1334 				       u16 array_encoding,
1335 				       void *data)
1336 {
1337 	show->state.array_encoding = array_encoding;
1338 	show->state.array_terminated = 0;
1339 	return btf_show_start_aggr_type(show, t, type_id, data);
1340 }
1341 
1342 static void btf_show_end_array_type(struct btf_show *show)
1343 {
1344 	show->state.array_encoding = 0;
1345 	show->state.array_terminated = 0;
1346 	btf_show_end_aggr_type(show, "]");
1347 }
1348 
1349 static void *btf_show_start_struct_type(struct btf_show *show,
1350 					const struct btf_type *t,
1351 					u32 type_id,
1352 					void *data)
1353 {
1354 	return btf_show_start_aggr_type(show, t, type_id, data);
1355 }
1356 
1357 static void btf_show_end_struct_type(struct btf_show *show)
1358 {
1359 	btf_show_end_aggr_type(show, "}");
1360 }
1361 
1362 __printf(2, 3) static void __btf_verifier_log(struct bpf_verifier_log *log,
1363 					      const char *fmt, ...)
1364 {
1365 	va_list args;
1366 
1367 	va_start(args, fmt);
1368 	bpf_verifier_vlog(log, fmt, args);
1369 	va_end(args);
1370 }
1371 
1372 __printf(2, 3) static void btf_verifier_log(struct btf_verifier_env *env,
1373 					    const char *fmt, ...)
1374 {
1375 	struct bpf_verifier_log *log = &env->log;
1376 	va_list args;
1377 
1378 	if (!bpf_verifier_log_needed(log))
1379 		return;
1380 
1381 	va_start(args, fmt);
1382 	bpf_verifier_vlog(log, fmt, args);
1383 	va_end(args);
1384 }
1385 
1386 __printf(4, 5) static void __btf_verifier_log_type(struct btf_verifier_env *env,
1387 						   const struct btf_type *t,
1388 						   bool log_details,
1389 						   const char *fmt, ...)
1390 {
1391 	struct bpf_verifier_log *log = &env->log;
1392 	struct btf *btf = env->btf;
1393 	va_list args;
1394 
1395 	if (!bpf_verifier_log_needed(log))
1396 		return;
1397 
1398 	/* btf verifier prints all types it is processing via
1399 	 * btf_verifier_log_type(..., fmt = NULL).
1400 	 * Skip those prints for in-kernel BTF verification.
1401 	 */
1402 	if (log->level == BPF_LOG_KERNEL && !fmt)
1403 		return;
1404 
1405 	__btf_verifier_log(log, "[%u] %s %s%s",
1406 			   env->log_type_id,
1407 			   btf_type_str(t),
1408 			   __btf_name_by_offset(btf, t->name_off),
1409 			   log_details ? " " : "");
1410 
1411 	if (log_details)
1412 		btf_type_ops(t)->log_details(env, t);
1413 
1414 	if (fmt && *fmt) {
1415 		__btf_verifier_log(log, " ");
1416 		va_start(args, fmt);
1417 		bpf_verifier_vlog(log, fmt, args);
1418 		va_end(args);
1419 	}
1420 
1421 	__btf_verifier_log(log, "\n");
1422 }
1423 
1424 #define btf_verifier_log_type(env, t, ...) \
1425 	__btf_verifier_log_type((env), (t), true, __VA_ARGS__)
1426 #define btf_verifier_log_basic(env, t, ...) \
1427 	__btf_verifier_log_type((env), (t), false, __VA_ARGS__)
1428 
1429 __printf(4, 5)
1430 static void btf_verifier_log_member(struct btf_verifier_env *env,
1431 				    const struct btf_type *struct_type,
1432 				    const struct btf_member *member,
1433 				    const char *fmt, ...)
1434 {
1435 	struct bpf_verifier_log *log = &env->log;
1436 	struct btf *btf = env->btf;
1437 	va_list args;
1438 
1439 	if (!bpf_verifier_log_needed(log))
1440 		return;
1441 
1442 	if (log->level == BPF_LOG_KERNEL && !fmt)
1443 		return;
1444 	/* The CHECK_META phase already did a btf dump.
1445 	 *
1446 	 * If member is logged again, it must hit an error in
1447 	 * parsing this member.  It is useful to print out which
1448 	 * struct this member belongs to.
1449 	 */
1450 	if (env->phase != CHECK_META)
1451 		btf_verifier_log_type(env, struct_type, NULL);
1452 
1453 	if (btf_type_kflag(struct_type))
1454 		__btf_verifier_log(log,
1455 				   "\t%s type_id=%u bitfield_size=%u bits_offset=%u",
1456 				   __btf_name_by_offset(btf, member->name_off),
1457 				   member->type,
1458 				   BTF_MEMBER_BITFIELD_SIZE(member->offset),
1459 				   BTF_MEMBER_BIT_OFFSET(member->offset));
1460 	else
1461 		__btf_verifier_log(log, "\t%s type_id=%u bits_offset=%u",
1462 				   __btf_name_by_offset(btf, member->name_off),
1463 				   member->type, member->offset);
1464 
1465 	if (fmt && *fmt) {
1466 		__btf_verifier_log(log, " ");
1467 		va_start(args, fmt);
1468 		bpf_verifier_vlog(log, fmt, args);
1469 		va_end(args);
1470 	}
1471 
1472 	__btf_verifier_log(log, "\n");
1473 }
1474 
1475 __printf(4, 5)
1476 static void btf_verifier_log_vsi(struct btf_verifier_env *env,
1477 				 const struct btf_type *datasec_type,
1478 				 const struct btf_var_secinfo *vsi,
1479 				 const char *fmt, ...)
1480 {
1481 	struct bpf_verifier_log *log = &env->log;
1482 	va_list args;
1483 
1484 	if (!bpf_verifier_log_needed(log))
1485 		return;
1486 	if (log->level == BPF_LOG_KERNEL && !fmt)
1487 		return;
1488 	if (env->phase != CHECK_META)
1489 		btf_verifier_log_type(env, datasec_type, NULL);
1490 
1491 	__btf_verifier_log(log, "\t type_id=%u offset=%u size=%u",
1492 			   vsi->type, vsi->offset, vsi->size);
1493 	if (fmt && *fmt) {
1494 		__btf_verifier_log(log, " ");
1495 		va_start(args, fmt);
1496 		bpf_verifier_vlog(log, fmt, args);
1497 		va_end(args);
1498 	}
1499 
1500 	__btf_verifier_log(log, "\n");
1501 }
1502 
1503 static void btf_verifier_log_hdr(struct btf_verifier_env *env,
1504 				 u32 btf_data_size)
1505 {
1506 	struct bpf_verifier_log *log = &env->log;
1507 	const struct btf *btf = env->btf;
1508 	const struct btf_header *hdr;
1509 
1510 	if (!bpf_verifier_log_needed(log))
1511 		return;
1512 
1513 	if (log->level == BPF_LOG_KERNEL)
1514 		return;
1515 	hdr = &btf->hdr;
1516 	__btf_verifier_log(log, "magic: 0x%x\n", hdr->magic);
1517 	__btf_verifier_log(log, "version: %u\n", hdr->version);
1518 	__btf_verifier_log(log, "flags: 0x%x\n", hdr->flags);
1519 	__btf_verifier_log(log, "hdr_len: %u\n", hdr->hdr_len);
1520 	__btf_verifier_log(log, "type_off: %u\n", hdr->type_off);
1521 	__btf_verifier_log(log, "type_len: %u\n", hdr->type_len);
1522 	__btf_verifier_log(log, "str_off: %u\n", hdr->str_off);
1523 	__btf_verifier_log(log, "str_len: %u\n", hdr->str_len);
1524 	__btf_verifier_log(log, "btf_total_size: %u\n", btf_data_size);
1525 }
1526 
1527 static int btf_add_type(struct btf_verifier_env *env, struct btf_type *t)
1528 {
1529 	struct btf *btf = env->btf;
1530 
1531 	if (btf->types_size == btf->nr_types) {
1532 		/* Expand 'types' array */
1533 
1534 		struct btf_type **new_types;
1535 		u32 expand_by, new_size;
1536 
1537 		if (btf->start_id + btf->types_size == BTF_MAX_TYPE) {
1538 			btf_verifier_log(env, "Exceeded max num of types");
1539 			return -E2BIG;
1540 		}
1541 
1542 		expand_by = max_t(u32, btf->types_size >> 2, 16);
1543 		new_size = min_t(u32, BTF_MAX_TYPE,
1544 				 btf->types_size + expand_by);
1545 
1546 		new_types = kvcalloc(new_size, sizeof(*new_types),
1547 				     GFP_KERNEL | __GFP_NOWARN);
1548 		if (!new_types)
1549 			return -ENOMEM;
1550 
1551 		if (btf->nr_types == 0) {
1552 			if (!btf->base_btf) {
1553 				/* lazily init VOID type */
1554 				new_types[0] = &btf_void;
1555 				btf->nr_types++;
1556 			}
1557 		} else {
1558 			memcpy(new_types, btf->types,
1559 			       sizeof(*btf->types) * btf->nr_types);
1560 		}
1561 
1562 		kvfree(btf->types);
1563 		btf->types = new_types;
1564 		btf->types_size = new_size;
1565 	}
1566 
1567 	btf->types[btf->nr_types++] = t;
1568 
1569 	return 0;
1570 }
1571 
1572 static int btf_alloc_id(struct btf *btf)
1573 {
1574 	int id;
1575 
1576 	idr_preload(GFP_KERNEL);
1577 	spin_lock_bh(&btf_idr_lock);
1578 	id = idr_alloc_cyclic(&btf_idr, btf, 1, INT_MAX, GFP_ATOMIC);
1579 	if (id > 0)
1580 		btf->id = id;
1581 	spin_unlock_bh(&btf_idr_lock);
1582 	idr_preload_end();
1583 
1584 	if (WARN_ON_ONCE(!id))
1585 		return -ENOSPC;
1586 
1587 	return id > 0 ? 0 : id;
1588 }
1589 
1590 static void btf_free_id(struct btf *btf)
1591 {
1592 	unsigned long flags;
1593 
1594 	/*
1595 	 * In map-in-map, calling map_delete_elem() on outer
1596 	 * map will call bpf_map_put on the inner map.
1597 	 * It will then eventually call btf_free_id()
1598 	 * on the inner map.  Some of the map_delete_elem()
1599 	 * implementation may have irq disabled, so
1600 	 * we need to use the _irqsave() version instead
1601 	 * of the _bh() version.
1602 	 */
1603 	spin_lock_irqsave(&btf_idr_lock, flags);
1604 	idr_remove(&btf_idr, btf->id);
1605 	spin_unlock_irqrestore(&btf_idr_lock, flags);
1606 }
1607 
1608 static void btf_free_kfunc_set_tab(struct btf *btf)
1609 {
1610 	struct btf_kfunc_set_tab *tab = btf->kfunc_set_tab;
1611 	int hook;
1612 
1613 	if (!tab)
1614 		return;
1615 	/* For module BTF, we directly assign the sets being registered, so
1616 	 * there is nothing to free except kfunc_set_tab.
1617 	 */
1618 	if (btf_is_module(btf))
1619 		goto free_tab;
1620 	for (hook = 0; hook < ARRAY_SIZE(tab->sets); hook++)
1621 		kfree(tab->sets[hook]);
1622 free_tab:
1623 	kfree(tab);
1624 	btf->kfunc_set_tab = NULL;
1625 }
1626 
1627 static void btf_free_dtor_kfunc_tab(struct btf *btf)
1628 {
1629 	struct btf_id_dtor_kfunc_tab *tab = btf->dtor_kfunc_tab;
1630 
1631 	if (!tab)
1632 		return;
1633 	kfree(tab);
1634 	btf->dtor_kfunc_tab = NULL;
1635 }
1636 
1637 static void btf_struct_metas_free(struct btf_struct_metas *tab)
1638 {
1639 	int i;
1640 
1641 	if (!tab)
1642 		return;
1643 	for (i = 0; i < tab->cnt; i++) {
1644 		btf_record_free(tab->types[i].record);
1645 		kfree(tab->types[i].field_offs);
1646 	}
1647 	kfree(tab);
1648 }
1649 
1650 static void btf_free_struct_meta_tab(struct btf *btf)
1651 {
1652 	struct btf_struct_metas *tab = btf->struct_meta_tab;
1653 
1654 	btf_struct_metas_free(tab);
1655 	btf->struct_meta_tab = NULL;
1656 }
1657 
1658 static void btf_free(struct btf *btf)
1659 {
1660 	btf_free_struct_meta_tab(btf);
1661 	btf_free_dtor_kfunc_tab(btf);
1662 	btf_free_kfunc_set_tab(btf);
1663 	kvfree(btf->types);
1664 	kvfree(btf->resolved_sizes);
1665 	kvfree(btf->resolved_ids);
1666 	kvfree(btf->data);
1667 	kfree(btf);
1668 }
1669 
1670 static void btf_free_rcu(struct rcu_head *rcu)
1671 {
1672 	struct btf *btf = container_of(rcu, struct btf, rcu);
1673 
1674 	btf_free(btf);
1675 }
1676 
1677 void btf_get(struct btf *btf)
1678 {
1679 	refcount_inc(&btf->refcnt);
1680 }
1681 
1682 void btf_put(struct btf *btf)
1683 {
1684 	if (btf && refcount_dec_and_test(&btf->refcnt)) {
1685 		btf_free_id(btf);
1686 		call_rcu(&btf->rcu, btf_free_rcu);
1687 	}
1688 }
1689 
1690 static int env_resolve_init(struct btf_verifier_env *env)
1691 {
1692 	struct btf *btf = env->btf;
1693 	u32 nr_types = btf->nr_types;
1694 	u32 *resolved_sizes = NULL;
1695 	u32 *resolved_ids = NULL;
1696 	u8 *visit_states = NULL;
1697 
1698 	resolved_sizes = kvcalloc(nr_types, sizeof(*resolved_sizes),
1699 				  GFP_KERNEL | __GFP_NOWARN);
1700 	if (!resolved_sizes)
1701 		goto nomem;
1702 
1703 	resolved_ids = kvcalloc(nr_types, sizeof(*resolved_ids),
1704 				GFP_KERNEL | __GFP_NOWARN);
1705 	if (!resolved_ids)
1706 		goto nomem;
1707 
1708 	visit_states = kvcalloc(nr_types, sizeof(*visit_states),
1709 				GFP_KERNEL | __GFP_NOWARN);
1710 	if (!visit_states)
1711 		goto nomem;
1712 
1713 	btf->resolved_sizes = resolved_sizes;
1714 	btf->resolved_ids = resolved_ids;
1715 	env->visit_states = visit_states;
1716 
1717 	return 0;
1718 
1719 nomem:
1720 	kvfree(resolved_sizes);
1721 	kvfree(resolved_ids);
1722 	kvfree(visit_states);
1723 	return -ENOMEM;
1724 }
1725 
1726 static void btf_verifier_env_free(struct btf_verifier_env *env)
1727 {
1728 	kvfree(env->visit_states);
1729 	kfree(env);
1730 }
1731 
1732 static bool env_type_is_resolve_sink(const struct btf_verifier_env *env,
1733 				     const struct btf_type *next_type)
1734 {
1735 	switch (env->resolve_mode) {
1736 	case RESOLVE_TBD:
1737 		/* int, enum or void is a sink */
1738 		return !btf_type_needs_resolve(next_type);
1739 	case RESOLVE_PTR:
1740 		/* int, enum, void, struct, array, func or func_proto is a sink
1741 		 * for ptr
1742 		 */
1743 		return !btf_type_is_modifier(next_type) &&
1744 			!btf_type_is_ptr(next_type);
1745 	case RESOLVE_STRUCT_OR_ARRAY:
1746 		/* int, enum, void, ptr, func or func_proto is a sink
1747 		 * for struct and array
1748 		 */
1749 		return !btf_type_is_modifier(next_type) &&
1750 			!btf_type_is_array(next_type) &&
1751 			!btf_type_is_struct(next_type);
1752 	default:
1753 		BUG();
1754 	}
1755 }
1756 
1757 static bool env_type_is_resolved(const struct btf_verifier_env *env,
1758 				 u32 type_id)
1759 {
1760 	/* base BTF types should be resolved by now */
1761 	if (type_id < env->btf->start_id)
1762 		return true;
1763 
1764 	return env->visit_states[type_id - env->btf->start_id] == RESOLVED;
1765 }
1766 
1767 static int env_stack_push(struct btf_verifier_env *env,
1768 			  const struct btf_type *t, u32 type_id)
1769 {
1770 	const struct btf *btf = env->btf;
1771 	struct resolve_vertex *v;
1772 
1773 	if (env->top_stack == MAX_RESOLVE_DEPTH)
1774 		return -E2BIG;
1775 
1776 	if (type_id < btf->start_id
1777 	    || env->visit_states[type_id - btf->start_id] != NOT_VISITED)
1778 		return -EEXIST;
1779 
1780 	env->visit_states[type_id - btf->start_id] = VISITED;
1781 
1782 	v = &env->stack[env->top_stack++];
1783 	v->t = t;
1784 	v->type_id = type_id;
1785 	v->next_member = 0;
1786 
1787 	if (env->resolve_mode == RESOLVE_TBD) {
1788 		if (btf_type_is_ptr(t))
1789 			env->resolve_mode = RESOLVE_PTR;
1790 		else if (btf_type_is_struct(t) || btf_type_is_array(t))
1791 			env->resolve_mode = RESOLVE_STRUCT_OR_ARRAY;
1792 	}
1793 
1794 	return 0;
1795 }
1796 
1797 static void env_stack_set_next_member(struct btf_verifier_env *env,
1798 				      u16 next_member)
1799 {
1800 	env->stack[env->top_stack - 1].next_member = next_member;
1801 }
1802 
1803 static void env_stack_pop_resolved(struct btf_verifier_env *env,
1804 				   u32 resolved_type_id,
1805 				   u32 resolved_size)
1806 {
1807 	u32 type_id = env->stack[--(env->top_stack)].type_id;
1808 	struct btf *btf = env->btf;
1809 
1810 	type_id -= btf->start_id; /* adjust to local type id */
1811 	btf->resolved_sizes[type_id] = resolved_size;
1812 	btf->resolved_ids[type_id] = resolved_type_id;
1813 	env->visit_states[type_id] = RESOLVED;
1814 }
1815 
1816 static const struct resolve_vertex *env_stack_peak(struct btf_verifier_env *env)
1817 {
1818 	return env->top_stack ? &env->stack[env->top_stack - 1] : NULL;
1819 }
1820 
1821 /* Resolve the size of a passed-in "type"
1822  *
1823  * type: is an array (e.g. u32 array[x][y])
1824  * return type: type "u32[x][y]", i.e. BTF_KIND_ARRAY,
1825  * *type_size: (x * y * sizeof(u32)).  Hence, *type_size always
1826  *             corresponds to the return type.
1827  * *elem_type: u32
1828  * *elem_id: id of u32
1829  * *total_nelems: (x * y).  Hence, individual elem size is
1830  *                (*type_size / *total_nelems)
1831  * *type_id: id of type if it's changed within the function, 0 if not
1832  *
1833  * type: is not an array (e.g. const struct X)
1834  * return type: type "struct X"
1835  * *type_size: sizeof(struct X)
1836  * *elem_type: same as return type ("struct X")
1837  * *elem_id: 0
1838  * *total_nelems: 1
1839  * *type_id: id of type if it's changed within the function, 0 if not
1840  */
1841 static const struct btf_type *
1842 __btf_resolve_size(const struct btf *btf, const struct btf_type *type,
1843 		   u32 *type_size, const struct btf_type **elem_type,
1844 		   u32 *elem_id, u32 *total_nelems, u32 *type_id)
1845 {
1846 	const struct btf_type *array_type = NULL;
1847 	const struct btf_array *array = NULL;
1848 	u32 i, size, nelems = 1, id = 0;
1849 
1850 	for (i = 0; i < MAX_RESOLVE_DEPTH; i++) {
1851 		switch (BTF_INFO_KIND(type->info)) {
1852 		/* type->size can be used */
1853 		case BTF_KIND_INT:
1854 		case BTF_KIND_STRUCT:
1855 		case BTF_KIND_UNION:
1856 		case BTF_KIND_ENUM:
1857 		case BTF_KIND_FLOAT:
1858 		case BTF_KIND_ENUM64:
1859 			size = type->size;
1860 			goto resolved;
1861 
1862 		case BTF_KIND_PTR:
1863 			size = sizeof(void *);
1864 			goto resolved;
1865 
1866 		/* Modifiers */
1867 		case BTF_KIND_TYPEDEF:
1868 		case BTF_KIND_VOLATILE:
1869 		case BTF_KIND_CONST:
1870 		case BTF_KIND_RESTRICT:
1871 		case BTF_KIND_TYPE_TAG:
1872 			id = type->type;
1873 			type = btf_type_by_id(btf, type->type);
1874 			break;
1875 
1876 		case BTF_KIND_ARRAY:
1877 			if (!array_type)
1878 				array_type = type;
1879 			array = btf_type_array(type);
1880 			if (nelems && array->nelems > U32_MAX / nelems)
1881 				return ERR_PTR(-EINVAL);
1882 			nelems *= array->nelems;
1883 			type = btf_type_by_id(btf, array->type);
1884 			break;
1885 
1886 		/* type without size */
1887 		default:
1888 			return ERR_PTR(-EINVAL);
1889 		}
1890 	}
1891 
1892 	return ERR_PTR(-EINVAL);
1893 
1894 resolved:
1895 	if (nelems && size > U32_MAX / nelems)
1896 		return ERR_PTR(-EINVAL);
1897 
1898 	*type_size = nelems * size;
1899 	if (total_nelems)
1900 		*total_nelems = nelems;
1901 	if (elem_type)
1902 		*elem_type = type;
1903 	if (elem_id)
1904 		*elem_id = array ? array->type : 0;
1905 	if (type_id && id)
1906 		*type_id = id;
1907 
1908 	return array_type ? : type;
1909 }
1910 
1911 const struct btf_type *
1912 btf_resolve_size(const struct btf *btf, const struct btf_type *type,
1913 		 u32 *type_size)
1914 {
1915 	return __btf_resolve_size(btf, type, type_size, NULL, NULL, NULL, NULL);
1916 }
1917 
1918 static u32 btf_resolved_type_id(const struct btf *btf, u32 type_id)
1919 {
1920 	while (type_id < btf->start_id)
1921 		btf = btf->base_btf;
1922 
1923 	return btf->resolved_ids[type_id - btf->start_id];
1924 }
1925 
1926 /* The input param "type_id" must point to a needs_resolve type */
1927 static const struct btf_type *btf_type_id_resolve(const struct btf *btf,
1928 						  u32 *type_id)
1929 {
1930 	*type_id = btf_resolved_type_id(btf, *type_id);
1931 	return btf_type_by_id(btf, *type_id);
1932 }
1933 
1934 static u32 btf_resolved_type_size(const struct btf *btf, u32 type_id)
1935 {
1936 	while (type_id < btf->start_id)
1937 		btf = btf->base_btf;
1938 
1939 	return btf->resolved_sizes[type_id - btf->start_id];
1940 }
1941 
1942 const struct btf_type *btf_type_id_size(const struct btf *btf,
1943 					u32 *type_id, u32 *ret_size)
1944 {
1945 	const struct btf_type *size_type;
1946 	u32 size_type_id = *type_id;
1947 	u32 size = 0;
1948 
1949 	size_type = btf_type_by_id(btf, size_type_id);
1950 	if (btf_type_nosize_or_null(size_type))
1951 		return NULL;
1952 
1953 	if (btf_type_has_size(size_type)) {
1954 		size = size_type->size;
1955 	} else if (btf_type_is_array(size_type)) {
1956 		size = btf_resolved_type_size(btf, size_type_id);
1957 	} else if (btf_type_is_ptr(size_type)) {
1958 		size = sizeof(void *);
1959 	} else {
1960 		if (WARN_ON_ONCE(!btf_type_is_modifier(size_type) &&
1961 				 !btf_type_is_var(size_type)))
1962 			return NULL;
1963 
1964 		size_type_id = btf_resolved_type_id(btf, size_type_id);
1965 		size_type = btf_type_by_id(btf, size_type_id);
1966 		if (btf_type_nosize_or_null(size_type))
1967 			return NULL;
1968 		else if (btf_type_has_size(size_type))
1969 			size = size_type->size;
1970 		else if (btf_type_is_array(size_type))
1971 			size = btf_resolved_type_size(btf, size_type_id);
1972 		else if (btf_type_is_ptr(size_type))
1973 			size = sizeof(void *);
1974 		else
1975 			return NULL;
1976 	}
1977 
1978 	*type_id = size_type_id;
1979 	if (ret_size)
1980 		*ret_size = size;
1981 
1982 	return size_type;
1983 }
1984 
1985 static int btf_df_check_member(struct btf_verifier_env *env,
1986 			       const struct btf_type *struct_type,
1987 			       const struct btf_member *member,
1988 			       const struct btf_type *member_type)
1989 {
1990 	btf_verifier_log_basic(env, struct_type,
1991 			       "Unsupported check_member");
1992 	return -EINVAL;
1993 }
1994 
1995 static int btf_df_check_kflag_member(struct btf_verifier_env *env,
1996 				     const struct btf_type *struct_type,
1997 				     const struct btf_member *member,
1998 				     const struct btf_type *member_type)
1999 {
2000 	btf_verifier_log_basic(env, struct_type,
2001 			       "Unsupported check_kflag_member");
2002 	return -EINVAL;
2003 }
2004 
2005 /* Used for ptr, array struct/union and float type members.
2006  * int, enum and modifier types have their specific callback functions.
2007  */
2008 static int btf_generic_check_kflag_member(struct btf_verifier_env *env,
2009 					  const struct btf_type *struct_type,
2010 					  const struct btf_member *member,
2011 					  const struct btf_type *member_type)
2012 {
2013 	if (BTF_MEMBER_BITFIELD_SIZE(member->offset)) {
2014 		btf_verifier_log_member(env, struct_type, member,
2015 					"Invalid member bitfield_size");
2016 		return -EINVAL;
2017 	}
2018 
2019 	/* bitfield size is 0, so member->offset represents bit offset only.
2020 	 * It is safe to call non kflag check_member variants.
2021 	 */
2022 	return btf_type_ops(member_type)->check_member(env, struct_type,
2023 						       member,
2024 						       member_type);
2025 }
2026 
2027 static int btf_df_resolve(struct btf_verifier_env *env,
2028 			  const struct resolve_vertex *v)
2029 {
2030 	btf_verifier_log_basic(env, v->t, "Unsupported resolve");
2031 	return -EINVAL;
2032 }
2033 
2034 static void btf_df_show(const struct btf *btf, const struct btf_type *t,
2035 			u32 type_id, void *data, u8 bits_offsets,
2036 			struct btf_show *show)
2037 {
2038 	btf_show(show, "<unsupported kind:%u>", BTF_INFO_KIND(t->info));
2039 }
2040 
2041 static int btf_int_check_member(struct btf_verifier_env *env,
2042 				const struct btf_type *struct_type,
2043 				const struct btf_member *member,
2044 				const struct btf_type *member_type)
2045 {
2046 	u32 int_data = btf_type_int(member_type);
2047 	u32 struct_bits_off = member->offset;
2048 	u32 struct_size = struct_type->size;
2049 	u32 nr_copy_bits;
2050 	u32 bytes_offset;
2051 
2052 	if (U32_MAX - struct_bits_off < BTF_INT_OFFSET(int_data)) {
2053 		btf_verifier_log_member(env, struct_type, member,
2054 					"bits_offset exceeds U32_MAX");
2055 		return -EINVAL;
2056 	}
2057 
2058 	struct_bits_off += BTF_INT_OFFSET(int_data);
2059 	bytes_offset = BITS_ROUNDDOWN_BYTES(struct_bits_off);
2060 	nr_copy_bits = BTF_INT_BITS(int_data) +
2061 		BITS_PER_BYTE_MASKED(struct_bits_off);
2062 
2063 	if (nr_copy_bits > BITS_PER_U128) {
2064 		btf_verifier_log_member(env, struct_type, member,
2065 					"nr_copy_bits exceeds 128");
2066 		return -EINVAL;
2067 	}
2068 
2069 	if (struct_size < bytes_offset ||
2070 	    struct_size - bytes_offset < BITS_ROUNDUP_BYTES(nr_copy_bits)) {
2071 		btf_verifier_log_member(env, struct_type, member,
2072 					"Member exceeds struct_size");
2073 		return -EINVAL;
2074 	}
2075 
2076 	return 0;
2077 }
2078 
2079 static int btf_int_check_kflag_member(struct btf_verifier_env *env,
2080 				      const struct btf_type *struct_type,
2081 				      const struct btf_member *member,
2082 				      const struct btf_type *member_type)
2083 {
2084 	u32 struct_bits_off, nr_bits, nr_int_data_bits, bytes_offset;
2085 	u32 int_data = btf_type_int(member_type);
2086 	u32 struct_size = struct_type->size;
2087 	u32 nr_copy_bits;
2088 
2089 	/* a regular int type is required for the kflag int member */
2090 	if (!btf_type_int_is_regular(member_type)) {
2091 		btf_verifier_log_member(env, struct_type, member,
2092 					"Invalid member base type");
2093 		return -EINVAL;
2094 	}
2095 
2096 	/* check sanity of bitfield size */
2097 	nr_bits = BTF_MEMBER_BITFIELD_SIZE(member->offset);
2098 	struct_bits_off = BTF_MEMBER_BIT_OFFSET(member->offset);
2099 	nr_int_data_bits = BTF_INT_BITS(int_data);
2100 	if (!nr_bits) {
2101 		/* Not a bitfield member, member offset must be at byte
2102 		 * boundary.
2103 		 */
2104 		if (BITS_PER_BYTE_MASKED(struct_bits_off)) {
2105 			btf_verifier_log_member(env, struct_type, member,
2106 						"Invalid member offset");
2107 			return -EINVAL;
2108 		}
2109 
2110 		nr_bits = nr_int_data_bits;
2111 	} else if (nr_bits > nr_int_data_bits) {
2112 		btf_verifier_log_member(env, struct_type, member,
2113 					"Invalid member bitfield_size");
2114 		return -EINVAL;
2115 	}
2116 
2117 	bytes_offset = BITS_ROUNDDOWN_BYTES(struct_bits_off);
2118 	nr_copy_bits = nr_bits + BITS_PER_BYTE_MASKED(struct_bits_off);
2119 	if (nr_copy_bits > BITS_PER_U128) {
2120 		btf_verifier_log_member(env, struct_type, member,
2121 					"nr_copy_bits exceeds 128");
2122 		return -EINVAL;
2123 	}
2124 
2125 	if (struct_size < bytes_offset ||
2126 	    struct_size - bytes_offset < BITS_ROUNDUP_BYTES(nr_copy_bits)) {
2127 		btf_verifier_log_member(env, struct_type, member,
2128 					"Member exceeds struct_size");
2129 		return -EINVAL;
2130 	}
2131 
2132 	return 0;
2133 }
2134 
2135 static s32 btf_int_check_meta(struct btf_verifier_env *env,
2136 			      const struct btf_type *t,
2137 			      u32 meta_left)
2138 {
2139 	u32 int_data, nr_bits, meta_needed = sizeof(int_data);
2140 	u16 encoding;
2141 
2142 	if (meta_left < meta_needed) {
2143 		btf_verifier_log_basic(env, t,
2144 				       "meta_left:%u meta_needed:%u",
2145 				       meta_left, meta_needed);
2146 		return -EINVAL;
2147 	}
2148 
2149 	if (btf_type_vlen(t)) {
2150 		btf_verifier_log_type(env, t, "vlen != 0");
2151 		return -EINVAL;
2152 	}
2153 
2154 	if (btf_type_kflag(t)) {
2155 		btf_verifier_log_type(env, t, "Invalid btf_info kind_flag");
2156 		return -EINVAL;
2157 	}
2158 
2159 	int_data = btf_type_int(t);
2160 	if (int_data & ~BTF_INT_MASK) {
2161 		btf_verifier_log_basic(env, t, "Invalid int_data:%x",
2162 				       int_data);
2163 		return -EINVAL;
2164 	}
2165 
2166 	nr_bits = BTF_INT_BITS(int_data) + BTF_INT_OFFSET(int_data);
2167 
2168 	if (nr_bits > BITS_PER_U128) {
2169 		btf_verifier_log_type(env, t, "nr_bits exceeds %zu",
2170 				      BITS_PER_U128);
2171 		return -EINVAL;
2172 	}
2173 
2174 	if (BITS_ROUNDUP_BYTES(nr_bits) > t->size) {
2175 		btf_verifier_log_type(env, t, "nr_bits exceeds type_size");
2176 		return -EINVAL;
2177 	}
2178 
2179 	/*
2180 	 * Only one of the encoding bits is allowed and it
2181 	 * should be sufficient for the pretty print purpose (i.e. decoding).
2182 	 * Multiple bits can be allowed later if it is found
2183 	 * to be insufficient.
2184 	 */
2185 	encoding = BTF_INT_ENCODING(int_data);
2186 	if (encoding &&
2187 	    encoding != BTF_INT_SIGNED &&
2188 	    encoding != BTF_INT_CHAR &&
2189 	    encoding != BTF_INT_BOOL) {
2190 		btf_verifier_log_type(env, t, "Unsupported encoding");
2191 		return -ENOTSUPP;
2192 	}
2193 
2194 	btf_verifier_log_type(env, t, NULL);
2195 
2196 	return meta_needed;
2197 }
2198 
2199 static void btf_int_log(struct btf_verifier_env *env,
2200 			const struct btf_type *t)
2201 {
2202 	int int_data = btf_type_int(t);
2203 
2204 	btf_verifier_log(env,
2205 			 "size=%u bits_offset=%u nr_bits=%u encoding=%s",
2206 			 t->size, BTF_INT_OFFSET(int_data),
2207 			 BTF_INT_BITS(int_data),
2208 			 btf_int_encoding_str(BTF_INT_ENCODING(int_data)));
2209 }
2210 
2211 static void btf_int128_print(struct btf_show *show, void *data)
2212 {
2213 	/* data points to a __int128 number.
2214 	 * Suppose
2215 	 *     int128_num = *(__int128 *)data;
2216 	 * The below formulas shows what upper_num and lower_num represents:
2217 	 *     upper_num = int128_num >> 64;
2218 	 *     lower_num = int128_num & 0xffffffffFFFFFFFFULL;
2219 	 */
2220 	u64 upper_num, lower_num;
2221 
2222 #ifdef __BIG_ENDIAN_BITFIELD
2223 	upper_num = *(u64 *)data;
2224 	lower_num = *(u64 *)(data + 8);
2225 #else
2226 	upper_num = *(u64 *)(data + 8);
2227 	lower_num = *(u64 *)data;
2228 #endif
2229 	if (upper_num == 0)
2230 		btf_show_type_value(show, "0x%llx", lower_num);
2231 	else
2232 		btf_show_type_values(show, "0x%llx%016llx", upper_num,
2233 				     lower_num);
2234 }
2235 
2236 static void btf_int128_shift(u64 *print_num, u16 left_shift_bits,
2237 			     u16 right_shift_bits)
2238 {
2239 	u64 upper_num, lower_num;
2240 
2241 #ifdef __BIG_ENDIAN_BITFIELD
2242 	upper_num = print_num[0];
2243 	lower_num = print_num[1];
2244 #else
2245 	upper_num = print_num[1];
2246 	lower_num = print_num[0];
2247 #endif
2248 
2249 	/* shake out un-needed bits by shift/or operations */
2250 	if (left_shift_bits >= 64) {
2251 		upper_num = lower_num << (left_shift_bits - 64);
2252 		lower_num = 0;
2253 	} else {
2254 		upper_num = (upper_num << left_shift_bits) |
2255 			    (lower_num >> (64 - left_shift_bits));
2256 		lower_num = lower_num << left_shift_bits;
2257 	}
2258 
2259 	if (right_shift_bits >= 64) {
2260 		lower_num = upper_num >> (right_shift_bits - 64);
2261 		upper_num = 0;
2262 	} else {
2263 		lower_num = (lower_num >> right_shift_bits) |
2264 			    (upper_num << (64 - right_shift_bits));
2265 		upper_num = upper_num >> right_shift_bits;
2266 	}
2267 
2268 #ifdef __BIG_ENDIAN_BITFIELD
2269 	print_num[0] = upper_num;
2270 	print_num[1] = lower_num;
2271 #else
2272 	print_num[0] = lower_num;
2273 	print_num[1] = upper_num;
2274 #endif
2275 }
2276 
2277 static void btf_bitfield_show(void *data, u8 bits_offset,
2278 			      u8 nr_bits, struct btf_show *show)
2279 {
2280 	u16 left_shift_bits, right_shift_bits;
2281 	u8 nr_copy_bytes;
2282 	u8 nr_copy_bits;
2283 	u64 print_num[2] = {};
2284 
2285 	nr_copy_bits = nr_bits + bits_offset;
2286 	nr_copy_bytes = BITS_ROUNDUP_BYTES(nr_copy_bits);
2287 
2288 	memcpy(print_num, data, nr_copy_bytes);
2289 
2290 #ifdef __BIG_ENDIAN_BITFIELD
2291 	left_shift_bits = bits_offset;
2292 #else
2293 	left_shift_bits = BITS_PER_U128 - nr_copy_bits;
2294 #endif
2295 	right_shift_bits = BITS_PER_U128 - nr_bits;
2296 
2297 	btf_int128_shift(print_num, left_shift_bits, right_shift_bits);
2298 	btf_int128_print(show, print_num);
2299 }
2300 
2301 
2302 static void btf_int_bits_show(const struct btf *btf,
2303 			      const struct btf_type *t,
2304 			      void *data, u8 bits_offset,
2305 			      struct btf_show *show)
2306 {
2307 	u32 int_data = btf_type_int(t);
2308 	u8 nr_bits = BTF_INT_BITS(int_data);
2309 	u8 total_bits_offset;
2310 
2311 	/*
2312 	 * bits_offset is at most 7.
2313 	 * BTF_INT_OFFSET() cannot exceed 128 bits.
2314 	 */
2315 	total_bits_offset = bits_offset + BTF_INT_OFFSET(int_data);
2316 	data += BITS_ROUNDDOWN_BYTES(total_bits_offset);
2317 	bits_offset = BITS_PER_BYTE_MASKED(total_bits_offset);
2318 	btf_bitfield_show(data, bits_offset, nr_bits, show);
2319 }
2320 
2321 static void btf_int_show(const struct btf *btf, const struct btf_type *t,
2322 			 u32 type_id, void *data, u8 bits_offset,
2323 			 struct btf_show *show)
2324 {
2325 	u32 int_data = btf_type_int(t);
2326 	u8 encoding = BTF_INT_ENCODING(int_data);
2327 	bool sign = encoding & BTF_INT_SIGNED;
2328 	u8 nr_bits = BTF_INT_BITS(int_data);
2329 	void *safe_data;
2330 
2331 	safe_data = btf_show_start_type(show, t, type_id, data);
2332 	if (!safe_data)
2333 		return;
2334 
2335 	if (bits_offset || BTF_INT_OFFSET(int_data) ||
2336 	    BITS_PER_BYTE_MASKED(nr_bits)) {
2337 		btf_int_bits_show(btf, t, safe_data, bits_offset, show);
2338 		goto out;
2339 	}
2340 
2341 	switch (nr_bits) {
2342 	case 128:
2343 		btf_int128_print(show, safe_data);
2344 		break;
2345 	case 64:
2346 		if (sign)
2347 			btf_show_type_value(show, "%lld", *(s64 *)safe_data);
2348 		else
2349 			btf_show_type_value(show, "%llu", *(u64 *)safe_data);
2350 		break;
2351 	case 32:
2352 		if (sign)
2353 			btf_show_type_value(show, "%d", *(s32 *)safe_data);
2354 		else
2355 			btf_show_type_value(show, "%u", *(u32 *)safe_data);
2356 		break;
2357 	case 16:
2358 		if (sign)
2359 			btf_show_type_value(show, "%d", *(s16 *)safe_data);
2360 		else
2361 			btf_show_type_value(show, "%u", *(u16 *)safe_data);
2362 		break;
2363 	case 8:
2364 		if (show->state.array_encoding == BTF_INT_CHAR) {
2365 			/* check for null terminator */
2366 			if (show->state.array_terminated)
2367 				break;
2368 			if (*(char *)data == '\0') {
2369 				show->state.array_terminated = 1;
2370 				break;
2371 			}
2372 			if (isprint(*(char *)data)) {
2373 				btf_show_type_value(show, "'%c'",
2374 						    *(char *)safe_data);
2375 				break;
2376 			}
2377 		}
2378 		if (sign)
2379 			btf_show_type_value(show, "%d", *(s8 *)safe_data);
2380 		else
2381 			btf_show_type_value(show, "%u", *(u8 *)safe_data);
2382 		break;
2383 	default:
2384 		btf_int_bits_show(btf, t, safe_data, bits_offset, show);
2385 		break;
2386 	}
2387 out:
2388 	btf_show_end_type(show);
2389 }
2390 
2391 static const struct btf_kind_operations int_ops = {
2392 	.check_meta = btf_int_check_meta,
2393 	.resolve = btf_df_resolve,
2394 	.check_member = btf_int_check_member,
2395 	.check_kflag_member = btf_int_check_kflag_member,
2396 	.log_details = btf_int_log,
2397 	.show = btf_int_show,
2398 };
2399 
2400 static int btf_modifier_check_member(struct btf_verifier_env *env,
2401 				     const struct btf_type *struct_type,
2402 				     const struct btf_member *member,
2403 				     const struct btf_type *member_type)
2404 {
2405 	const struct btf_type *resolved_type;
2406 	u32 resolved_type_id = member->type;
2407 	struct btf_member resolved_member;
2408 	struct btf *btf = env->btf;
2409 
2410 	resolved_type = btf_type_id_size(btf, &resolved_type_id, NULL);
2411 	if (!resolved_type) {
2412 		btf_verifier_log_member(env, struct_type, member,
2413 					"Invalid member");
2414 		return -EINVAL;
2415 	}
2416 
2417 	resolved_member = *member;
2418 	resolved_member.type = resolved_type_id;
2419 
2420 	return btf_type_ops(resolved_type)->check_member(env, struct_type,
2421 							 &resolved_member,
2422 							 resolved_type);
2423 }
2424 
2425 static int btf_modifier_check_kflag_member(struct btf_verifier_env *env,
2426 					   const struct btf_type *struct_type,
2427 					   const struct btf_member *member,
2428 					   const struct btf_type *member_type)
2429 {
2430 	const struct btf_type *resolved_type;
2431 	u32 resolved_type_id = member->type;
2432 	struct btf_member resolved_member;
2433 	struct btf *btf = env->btf;
2434 
2435 	resolved_type = btf_type_id_size(btf, &resolved_type_id, NULL);
2436 	if (!resolved_type) {
2437 		btf_verifier_log_member(env, struct_type, member,
2438 					"Invalid member");
2439 		return -EINVAL;
2440 	}
2441 
2442 	resolved_member = *member;
2443 	resolved_member.type = resolved_type_id;
2444 
2445 	return btf_type_ops(resolved_type)->check_kflag_member(env, struct_type,
2446 							       &resolved_member,
2447 							       resolved_type);
2448 }
2449 
2450 static int btf_ptr_check_member(struct btf_verifier_env *env,
2451 				const struct btf_type *struct_type,
2452 				const struct btf_member *member,
2453 				const struct btf_type *member_type)
2454 {
2455 	u32 struct_size, struct_bits_off, bytes_offset;
2456 
2457 	struct_size = struct_type->size;
2458 	struct_bits_off = member->offset;
2459 	bytes_offset = BITS_ROUNDDOWN_BYTES(struct_bits_off);
2460 
2461 	if (BITS_PER_BYTE_MASKED(struct_bits_off)) {
2462 		btf_verifier_log_member(env, struct_type, member,
2463 					"Member is not byte aligned");
2464 		return -EINVAL;
2465 	}
2466 
2467 	if (struct_size - bytes_offset < sizeof(void *)) {
2468 		btf_verifier_log_member(env, struct_type, member,
2469 					"Member exceeds struct_size");
2470 		return -EINVAL;
2471 	}
2472 
2473 	return 0;
2474 }
2475 
2476 static int btf_ref_type_check_meta(struct btf_verifier_env *env,
2477 				   const struct btf_type *t,
2478 				   u32 meta_left)
2479 {
2480 	const char *value;
2481 
2482 	if (btf_type_vlen(t)) {
2483 		btf_verifier_log_type(env, t, "vlen != 0");
2484 		return -EINVAL;
2485 	}
2486 
2487 	if (btf_type_kflag(t)) {
2488 		btf_verifier_log_type(env, t, "Invalid btf_info kind_flag");
2489 		return -EINVAL;
2490 	}
2491 
2492 	if (!BTF_TYPE_ID_VALID(t->type)) {
2493 		btf_verifier_log_type(env, t, "Invalid type_id");
2494 		return -EINVAL;
2495 	}
2496 
2497 	/* typedef/type_tag type must have a valid name, and other ref types,
2498 	 * volatile, const, restrict, should have a null name.
2499 	 */
2500 	if (BTF_INFO_KIND(t->info) == BTF_KIND_TYPEDEF) {
2501 		if (!t->name_off ||
2502 		    !btf_name_valid_identifier(env->btf, t->name_off)) {
2503 			btf_verifier_log_type(env, t, "Invalid name");
2504 			return -EINVAL;
2505 		}
2506 	} else if (BTF_INFO_KIND(t->info) == BTF_KIND_TYPE_TAG) {
2507 		value = btf_name_by_offset(env->btf, t->name_off);
2508 		if (!value || !value[0]) {
2509 			btf_verifier_log_type(env, t, "Invalid name");
2510 			return -EINVAL;
2511 		}
2512 	} else {
2513 		if (t->name_off) {
2514 			btf_verifier_log_type(env, t, "Invalid name");
2515 			return -EINVAL;
2516 		}
2517 	}
2518 
2519 	btf_verifier_log_type(env, t, NULL);
2520 
2521 	return 0;
2522 }
2523 
2524 static int btf_modifier_resolve(struct btf_verifier_env *env,
2525 				const struct resolve_vertex *v)
2526 {
2527 	const struct btf_type *t = v->t;
2528 	const struct btf_type *next_type;
2529 	u32 next_type_id = t->type;
2530 	struct btf *btf = env->btf;
2531 
2532 	next_type = btf_type_by_id(btf, next_type_id);
2533 	if (!next_type || btf_type_is_resolve_source_only(next_type)) {
2534 		btf_verifier_log_type(env, v->t, "Invalid type_id");
2535 		return -EINVAL;
2536 	}
2537 
2538 	if (!env_type_is_resolve_sink(env, next_type) &&
2539 	    !env_type_is_resolved(env, next_type_id))
2540 		return env_stack_push(env, next_type, next_type_id);
2541 
2542 	/* Figure out the resolved next_type_id with size.
2543 	 * They will be stored in the current modifier's
2544 	 * resolved_ids and resolved_sizes such that it can
2545 	 * save us a few type-following when we use it later (e.g. in
2546 	 * pretty print).
2547 	 */
2548 	if (!btf_type_id_size(btf, &next_type_id, NULL)) {
2549 		if (env_type_is_resolved(env, next_type_id))
2550 			next_type = btf_type_id_resolve(btf, &next_type_id);
2551 
2552 		/* "typedef void new_void", "const void"...etc */
2553 		if (!btf_type_is_void(next_type) &&
2554 		    !btf_type_is_fwd(next_type) &&
2555 		    !btf_type_is_func_proto(next_type)) {
2556 			btf_verifier_log_type(env, v->t, "Invalid type_id");
2557 			return -EINVAL;
2558 		}
2559 	}
2560 
2561 	env_stack_pop_resolved(env, next_type_id, 0);
2562 
2563 	return 0;
2564 }
2565 
2566 static int btf_var_resolve(struct btf_verifier_env *env,
2567 			   const struct resolve_vertex *v)
2568 {
2569 	const struct btf_type *next_type;
2570 	const struct btf_type *t = v->t;
2571 	u32 next_type_id = t->type;
2572 	struct btf *btf = env->btf;
2573 
2574 	next_type = btf_type_by_id(btf, next_type_id);
2575 	if (!next_type || btf_type_is_resolve_source_only(next_type)) {
2576 		btf_verifier_log_type(env, v->t, "Invalid type_id");
2577 		return -EINVAL;
2578 	}
2579 
2580 	if (!env_type_is_resolve_sink(env, next_type) &&
2581 	    !env_type_is_resolved(env, next_type_id))
2582 		return env_stack_push(env, next_type, next_type_id);
2583 
2584 	if (btf_type_is_modifier(next_type)) {
2585 		const struct btf_type *resolved_type;
2586 		u32 resolved_type_id;
2587 
2588 		resolved_type_id = next_type_id;
2589 		resolved_type = btf_type_id_resolve(btf, &resolved_type_id);
2590 
2591 		if (btf_type_is_ptr(resolved_type) &&
2592 		    !env_type_is_resolve_sink(env, resolved_type) &&
2593 		    !env_type_is_resolved(env, resolved_type_id))
2594 			return env_stack_push(env, resolved_type,
2595 					      resolved_type_id);
2596 	}
2597 
2598 	/* We must resolve to something concrete at this point, no
2599 	 * forward types or similar that would resolve to size of
2600 	 * zero is allowed.
2601 	 */
2602 	if (!btf_type_id_size(btf, &next_type_id, NULL)) {
2603 		btf_verifier_log_type(env, v->t, "Invalid type_id");
2604 		return -EINVAL;
2605 	}
2606 
2607 	env_stack_pop_resolved(env, next_type_id, 0);
2608 
2609 	return 0;
2610 }
2611 
2612 static int btf_ptr_resolve(struct btf_verifier_env *env,
2613 			   const struct resolve_vertex *v)
2614 {
2615 	const struct btf_type *next_type;
2616 	const struct btf_type *t = v->t;
2617 	u32 next_type_id = t->type;
2618 	struct btf *btf = env->btf;
2619 
2620 	next_type = btf_type_by_id(btf, next_type_id);
2621 	if (!next_type || btf_type_is_resolve_source_only(next_type)) {
2622 		btf_verifier_log_type(env, v->t, "Invalid type_id");
2623 		return -EINVAL;
2624 	}
2625 
2626 	if (!env_type_is_resolve_sink(env, next_type) &&
2627 	    !env_type_is_resolved(env, next_type_id))
2628 		return env_stack_push(env, next_type, next_type_id);
2629 
2630 	/* If the modifier was RESOLVED during RESOLVE_STRUCT_OR_ARRAY,
2631 	 * the modifier may have stopped resolving when it was resolved
2632 	 * to a ptr (last-resolved-ptr).
2633 	 *
2634 	 * We now need to continue from the last-resolved-ptr to
2635 	 * ensure the last-resolved-ptr will not referring back to
2636 	 * the current ptr (t).
2637 	 */
2638 	if (btf_type_is_modifier(next_type)) {
2639 		const struct btf_type *resolved_type;
2640 		u32 resolved_type_id;
2641 
2642 		resolved_type_id = next_type_id;
2643 		resolved_type = btf_type_id_resolve(btf, &resolved_type_id);
2644 
2645 		if (btf_type_is_ptr(resolved_type) &&
2646 		    !env_type_is_resolve_sink(env, resolved_type) &&
2647 		    !env_type_is_resolved(env, resolved_type_id))
2648 			return env_stack_push(env, resolved_type,
2649 					      resolved_type_id);
2650 	}
2651 
2652 	if (!btf_type_id_size(btf, &next_type_id, NULL)) {
2653 		if (env_type_is_resolved(env, next_type_id))
2654 			next_type = btf_type_id_resolve(btf, &next_type_id);
2655 
2656 		if (!btf_type_is_void(next_type) &&
2657 		    !btf_type_is_fwd(next_type) &&
2658 		    !btf_type_is_func_proto(next_type)) {
2659 			btf_verifier_log_type(env, v->t, "Invalid type_id");
2660 			return -EINVAL;
2661 		}
2662 	}
2663 
2664 	env_stack_pop_resolved(env, next_type_id, 0);
2665 
2666 	return 0;
2667 }
2668 
2669 static void btf_modifier_show(const struct btf *btf,
2670 			      const struct btf_type *t,
2671 			      u32 type_id, void *data,
2672 			      u8 bits_offset, struct btf_show *show)
2673 {
2674 	if (btf->resolved_ids)
2675 		t = btf_type_id_resolve(btf, &type_id);
2676 	else
2677 		t = btf_type_skip_modifiers(btf, type_id, NULL);
2678 
2679 	btf_type_ops(t)->show(btf, t, type_id, data, bits_offset, show);
2680 }
2681 
2682 static void btf_var_show(const struct btf *btf, const struct btf_type *t,
2683 			 u32 type_id, void *data, u8 bits_offset,
2684 			 struct btf_show *show)
2685 {
2686 	t = btf_type_id_resolve(btf, &type_id);
2687 
2688 	btf_type_ops(t)->show(btf, t, type_id, data, bits_offset, show);
2689 }
2690 
2691 static void btf_ptr_show(const struct btf *btf, const struct btf_type *t,
2692 			 u32 type_id, void *data, u8 bits_offset,
2693 			 struct btf_show *show)
2694 {
2695 	void *safe_data;
2696 
2697 	safe_data = btf_show_start_type(show, t, type_id, data);
2698 	if (!safe_data)
2699 		return;
2700 
2701 	/* It is a hashed value unless BTF_SHOW_PTR_RAW is specified */
2702 	if (show->flags & BTF_SHOW_PTR_RAW)
2703 		btf_show_type_value(show, "0x%px", *(void **)safe_data);
2704 	else
2705 		btf_show_type_value(show, "0x%p", *(void **)safe_data);
2706 	btf_show_end_type(show);
2707 }
2708 
2709 static void btf_ref_type_log(struct btf_verifier_env *env,
2710 			     const struct btf_type *t)
2711 {
2712 	btf_verifier_log(env, "type_id=%u", t->type);
2713 }
2714 
2715 static struct btf_kind_operations modifier_ops = {
2716 	.check_meta = btf_ref_type_check_meta,
2717 	.resolve = btf_modifier_resolve,
2718 	.check_member = btf_modifier_check_member,
2719 	.check_kflag_member = btf_modifier_check_kflag_member,
2720 	.log_details = btf_ref_type_log,
2721 	.show = btf_modifier_show,
2722 };
2723 
2724 static struct btf_kind_operations ptr_ops = {
2725 	.check_meta = btf_ref_type_check_meta,
2726 	.resolve = btf_ptr_resolve,
2727 	.check_member = btf_ptr_check_member,
2728 	.check_kflag_member = btf_generic_check_kflag_member,
2729 	.log_details = btf_ref_type_log,
2730 	.show = btf_ptr_show,
2731 };
2732 
2733 static s32 btf_fwd_check_meta(struct btf_verifier_env *env,
2734 			      const struct btf_type *t,
2735 			      u32 meta_left)
2736 {
2737 	if (btf_type_vlen(t)) {
2738 		btf_verifier_log_type(env, t, "vlen != 0");
2739 		return -EINVAL;
2740 	}
2741 
2742 	if (t->type) {
2743 		btf_verifier_log_type(env, t, "type != 0");
2744 		return -EINVAL;
2745 	}
2746 
2747 	/* fwd type must have a valid name */
2748 	if (!t->name_off ||
2749 	    !btf_name_valid_identifier(env->btf, t->name_off)) {
2750 		btf_verifier_log_type(env, t, "Invalid name");
2751 		return -EINVAL;
2752 	}
2753 
2754 	btf_verifier_log_type(env, t, NULL);
2755 
2756 	return 0;
2757 }
2758 
2759 static void btf_fwd_type_log(struct btf_verifier_env *env,
2760 			     const struct btf_type *t)
2761 {
2762 	btf_verifier_log(env, "%s", btf_type_kflag(t) ? "union" : "struct");
2763 }
2764 
2765 static struct btf_kind_operations fwd_ops = {
2766 	.check_meta = btf_fwd_check_meta,
2767 	.resolve = btf_df_resolve,
2768 	.check_member = btf_df_check_member,
2769 	.check_kflag_member = btf_df_check_kflag_member,
2770 	.log_details = btf_fwd_type_log,
2771 	.show = btf_df_show,
2772 };
2773 
2774 static int btf_array_check_member(struct btf_verifier_env *env,
2775 				  const struct btf_type *struct_type,
2776 				  const struct btf_member *member,
2777 				  const struct btf_type *member_type)
2778 {
2779 	u32 struct_bits_off = member->offset;
2780 	u32 struct_size, bytes_offset;
2781 	u32 array_type_id, array_size;
2782 	struct btf *btf = env->btf;
2783 
2784 	if (BITS_PER_BYTE_MASKED(struct_bits_off)) {
2785 		btf_verifier_log_member(env, struct_type, member,
2786 					"Member is not byte aligned");
2787 		return -EINVAL;
2788 	}
2789 
2790 	array_type_id = member->type;
2791 	btf_type_id_size(btf, &array_type_id, &array_size);
2792 	struct_size = struct_type->size;
2793 	bytes_offset = BITS_ROUNDDOWN_BYTES(struct_bits_off);
2794 	if (struct_size - bytes_offset < array_size) {
2795 		btf_verifier_log_member(env, struct_type, member,
2796 					"Member exceeds struct_size");
2797 		return -EINVAL;
2798 	}
2799 
2800 	return 0;
2801 }
2802 
2803 static s32 btf_array_check_meta(struct btf_verifier_env *env,
2804 				const struct btf_type *t,
2805 				u32 meta_left)
2806 {
2807 	const struct btf_array *array = btf_type_array(t);
2808 	u32 meta_needed = sizeof(*array);
2809 
2810 	if (meta_left < meta_needed) {
2811 		btf_verifier_log_basic(env, t,
2812 				       "meta_left:%u meta_needed:%u",
2813 				       meta_left, meta_needed);
2814 		return -EINVAL;
2815 	}
2816 
2817 	/* array type should not have a name */
2818 	if (t->name_off) {
2819 		btf_verifier_log_type(env, t, "Invalid name");
2820 		return -EINVAL;
2821 	}
2822 
2823 	if (btf_type_vlen(t)) {
2824 		btf_verifier_log_type(env, t, "vlen != 0");
2825 		return -EINVAL;
2826 	}
2827 
2828 	if (btf_type_kflag(t)) {
2829 		btf_verifier_log_type(env, t, "Invalid btf_info kind_flag");
2830 		return -EINVAL;
2831 	}
2832 
2833 	if (t->size) {
2834 		btf_verifier_log_type(env, t, "size != 0");
2835 		return -EINVAL;
2836 	}
2837 
2838 	/* Array elem type and index type cannot be in type void,
2839 	 * so !array->type and !array->index_type are not allowed.
2840 	 */
2841 	if (!array->type || !BTF_TYPE_ID_VALID(array->type)) {
2842 		btf_verifier_log_type(env, t, "Invalid elem");
2843 		return -EINVAL;
2844 	}
2845 
2846 	if (!array->index_type || !BTF_TYPE_ID_VALID(array->index_type)) {
2847 		btf_verifier_log_type(env, t, "Invalid index");
2848 		return -EINVAL;
2849 	}
2850 
2851 	btf_verifier_log_type(env, t, NULL);
2852 
2853 	return meta_needed;
2854 }
2855 
2856 static int btf_array_resolve(struct btf_verifier_env *env,
2857 			     const struct resolve_vertex *v)
2858 {
2859 	const struct btf_array *array = btf_type_array(v->t);
2860 	const struct btf_type *elem_type, *index_type;
2861 	u32 elem_type_id, index_type_id;
2862 	struct btf *btf = env->btf;
2863 	u32 elem_size;
2864 
2865 	/* Check array->index_type */
2866 	index_type_id = array->index_type;
2867 	index_type = btf_type_by_id(btf, index_type_id);
2868 	if (btf_type_nosize_or_null(index_type) ||
2869 	    btf_type_is_resolve_source_only(index_type)) {
2870 		btf_verifier_log_type(env, v->t, "Invalid index");
2871 		return -EINVAL;
2872 	}
2873 
2874 	if (!env_type_is_resolve_sink(env, index_type) &&
2875 	    !env_type_is_resolved(env, index_type_id))
2876 		return env_stack_push(env, index_type, index_type_id);
2877 
2878 	index_type = btf_type_id_size(btf, &index_type_id, NULL);
2879 	if (!index_type || !btf_type_is_int(index_type) ||
2880 	    !btf_type_int_is_regular(index_type)) {
2881 		btf_verifier_log_type(env, v->t, "Invalid index");
2882 		return -EINVAL;
2883 	}
2884 
2885 	/* Check array->type */
2886 	elem_type_id = array->type;
2887 	elem_type = btf_type_by_id(btf, elem_type_id);
2888 	if (btf_type_nosize_or_null(elem_type) ||
2889 	    btf_type_is_resolve_source_only(elem_type)) {
2890 		btf_verifier_log_type(env, v->t,
2891 				      "Invalid elem");
2892 		return -EINVAL;
2893 	}
2894 
2895 	if (!env_type_is_resolve_sink(env, elem_type) &&
2896 	    !env_type_is_resolved(env, elem_type_id))
2897 		return env_stack_push(env, elem_type, elem_type_id);
2898 
2899 	elem_type = btf_type_id_size(btf, &elem_type_id, &elem_size);
2900 	if (!elem_type) {
2901 		btf_verifier_log_type(env, v->t, "Invalid elem");
2902 		return -EINVAL;
2903 	}
2904 
2905 	if (btf_type_is_int(elem_type) && !btf_type_int_is_regular(elem_type)) {
2906 		btf_verifier_log_type(env, v->t, "Invalid array of int");
2907 		return -EINVAL;
2908 	}
2909 
2910 	if (array->nelems && elem_size > U32_MAX / array->nelems) {
2911 		btf_verifier_log_type(env, v->t,
2912 				      "Array size overflows U32_MAX");
2913 		return -EINVAL;
2914 	}
2915 
2916 	env_stack_pop_resolved(env, elem_type_id, elem_size * array->nelems);
2917 
2918 	return 0;
2919 }
2920 
2921 static void btf_array_log(struct btf_verifier_env *env,
2922 			  const struct btf_type *t)
2923 {
2924 	const struct btf_array *array = btf_type_array(t);
2925 
2926 	btf_verifier_log(env, "type_id=%u index_type_id=%u nr_elems=%u",
2927 			 array->type, array->index_type, array->nelems);
2928 }
2929 
2930 static void __btf_array_show(const struct btf *btf, const struct btf_type *t,
2931 			     u32 type_id, void *data, u8 bits_offset,
2932 			     struct btf_show *show)
2933 {
2934 	const struct btf_array *array = btf_type_array(t);
2935 	const struct btf_kind_operations *elem_ops;
2936 	const struct btf_type *elem_type;
2937 	u32 i, elem_size = 0, elem_type_id;
2938 	u16 encoding = 0;
2939 
2940 	elem_type_id = array->type;
2941 	elem_type = btf_type_skip_modifiers(btf, elem_type_id, NULL);
2942 	if (elem_type && btf_type_has_size(elem_type))
2943 		elem_size = elem_type->size;
2944 
2945 	if (elem_type && btf_type_is_int(elem_type)) {
2946 		u32 int_type = btf_type_int(elem_type);
2947 
2948 		encoding = BTF_INT_ENCODING(int_type);
2949 
2950 		/*
2951 		 * BTF_INT_CHAR encoding never seems to be set for
2952 		 * char arrays, so if size is 1 and element is
2953 		 * printable as a char, we'll do that.
2954 		 */
2955 		if (elem_size == 1)
2956 			encoding = BTF_INT_CHAR;
2957 	}
2958 
2959 	if (!btf_show_start_array_type(show, t, type_id, encoding, data))
2960 		return;
2961 
2962 	if (!elem_type)
2963 		goto out;
2964 	elem_ops = btf_type_ops(elem_type);
2965 
2966 	for (i = 0; i < array->nelems; i++) {
2967 
2968 		btf_show_start_array_member(show);
2969 
2970 		elem_ops->show(btf, elem_type, elem_type_id, data,
2971 			       bits_offset, show);
2972 		data += elem_size;
2973 
2974 		btf_show_end_array_member(show);
2975 
2976 		if (show->state.array_terminated)
2977 			break;
2978 	}
2979 out:
2980 	btf_show_end_array_type(show);
2981 }
2982 
2983 static void btf_array_show(const struct btf *btf, const struct btf_type *t,
2984 			   u32 type_id, void *data, u8 bits_offset,
2985 			   struct btf_show *show)
2986 {
2987 	const struct btf_member *m = show->state.member;
2988 
2989 	/*
2990 	 * First check if any members would be shown (are non-zero).
2991 	 * See comments above "struct btf_show" definition for more
2992 	 * details on how this works at a high-level.
2993 	 */
2994 	if (show->state.depth > 0 && !(show->flags & BTF_SHOW_ZERO)) {
2995 		if (!show->state.depth_check) {
2996 			show->state.depth_check = show->state.depth + 1;
2997 			show->state.depth_to_show = 0;
2998 		}
2999 		__btf_array_show(btf, t, type_id, data, bits_offset, show);
3000 		show->state.member = m;
3001 
3002 		if (show->state.depth_check != show->state.depth + 1)
3003 			return;
3004 		show->state.depth_check = 0;
3005 
3006 		if (show->state.depth_to_show <= show->state.depth)
3007 			return;
3008 		/*
3009 		 * Reaching here indicates we have recursed and found
3010 		 * non-zero array member(s).
3011 		 */
3012 	}
3013 	__btf_array_show(btf, t, type_id, data, bits_offset, show);
3014 }
3015 
3016 static struct btf_kind_operations array_ops = {
3017 	.check_meta = btf_array_check_meta,
3018 	.resolve = btf_array_resolve,
3019 	.check_member = btf_array_check_member,
3020 	.check_kflag_member = btf_generic_check_kflag_member,
3021 	.log_details = btf_array_log,
3022 	.show = btf_array_show,
3023 };
3024 
3025 static int btf_struct_check_member(struct btf_verifier_env *env,
3026 				   const struct btf_type *struct_type,
3027 				   const struct btf_member *member,
3028 				   const struct btf_type *member_type)
3029 {
3030 	u32 struct_bits_off = member->offset;
3031 	u32 struct_size, bytes_offset;
3032 
3033 	if (BITS_PER_BYTE_MASKED(struct_bits_off)) {
3034 		btf_verifier_log_member(env, struct_type, member,
3035 					"Member is not byte aligned");
3036 		return -EINVAL;
3037 	}
3038 
3039 	struct_size = struct_type->size;
3040 	bytes_offset = BITS_ROUNDDOWN_BYTES(struct_bits_off);
3041 	if (struct_size - bytes_offset < member_type->size) {
3042 		btf_verifier_log_member(env, struct_type, member,
3043 					"Member exceeds struct_size");
3044 		return -EINVAL;
3045 	}
3046 
3047 	return 0;
3048 }
3049 
3050 static s32 btf_struct_check_meta(struct btf_verifier_env *env,
3051 				 const struct btf_type *t,
3052 				 u32 meta_left)
3053 {
3054 	bool is_union = BTF_INFO_KIND(t->info) == BTF_KIND_UNION;
3055 	const struct btf_member *member;
3056 	u32 meta_needed, last_offset;
3057 	struct btf *btf = env->btf;
3058 	u32 struct_size = t->size;
3059 	u32 offset;
3060 	u16 i;
3061 
3062 	meta_needed = btf_type_vlen(t) * sizeof(*member);
3063 	if (meta_left < meta_needed) {
3064 		btf_verifier_log_basic(env, t,
3065 				       "meta_left:%u meta_needed:%u",
3066 				       meta_left, meta_needed);
3067 		return -EINVAL;
3068 	}
3069 
3070 	/* struct type either no name or a valid one */
3071 	if (t->name_off &&
3072 	    !btf_name_valid_identifier(env->btf, t->name_off)) {
3073 		btf_verifier_log_type(env, t, "Invalid name");
3074 		return -EINVAL;
3075 	}
3076 
3077 	btf_verifier_log_type(env, t, NULL);
3078 
3079 	last_offset = 0;
3080 	for_each_member(i, t, member) {
3081 		if (!btf_name_offset_valid(btf, member->name_off)) {
3082 			btf_verifier_log_member(env, t, member,
3083 						"Invalid member name_offset:%u",
3084 						member->name_off);
3085 			return -EINVAL;
3086 		}
3087 
3088 		/* struct member either no name or a valid one */
3089 		if (member->name_off &&
3090 		    !btf_name_valid_identifier(btf, member->name_off)) {
3091 			btf_verifier_log_member(env, t, member, "Invalid name");
3092 			return -EINVAL;
3093 		}
3094 		/* A member cannot be in type void */
3095 		if (!member->type || !BTF_TYPE_ID_VALID(member->type)) {
3096 			btf_verifier_log_member(env, t, member,
3097 						"Invalid type_id");
3098 			return -EINVAL;
3099 		}
3100 
3101 		offset = __btf_member_bit_offset(t, member);
3102 		if (is_union && offset) {
3103 			btf_verifier_log_member(env, t, member,
3104 						"Invalid member bits_offset");
3105 			return -EINVAL;
3106 		}
3107 
3108 		/*
3109 		 * ">" instead of ">=" because the last member could be
3110 		 * "char a[0];"
3111 		 */
3112 		if (last_offset > offset) {
3113 			btf_verifier_log_member(env, t, member,
3114 						"Invalid member bits_offset");
3115 			return -EINVAL;
3116 		}
3117 
3118 		if (BITS_ROUNDUP_BYTES(offset) > struct_size) {
3119 			btf_verifier_log_member(env, t, member,
3120 						"Member bits_offset exceeds its struct size");
3121 			return -EINVAL;
3122 		}
3123 
3124 		btf_verifier_log_member(env, t, member, NULL);
3125 		last_offset = offset;
3126 	}
3127 
3128 	return meta_needed;
3129 }
3130 
3131 static int btf_struct_resolve(struct btf_verifier_env *env,
3132 			      const struct resolve_vertex *v)
3133 {
3134 	const struct btf_member *member;
3135 	int err;
3136 	u16 i;
3137 
3138 	/* Before continue resolving the next_member,
3139 	 * ensure the last member is indeed resolved to a
3140 	 * type with size info.
3141 	 */
3142 	if (v->next_member) {
3143 		const struct btf_type *last_member_type;
3144 		const struct btf_member *last_member;
3145 		u32 last_member_type_id;
3146 
3147 		last_member = btf_type_member(v->t) + v->next_member - 1;
3148 		last_member_type_id = last_member->type;
3149 		if (WARN_ON_ONCE(!env_type_is_resolved(env,
3150 						       last_member_type_id)))
3151 			return -EINVAL;
3152 
3153 		last_member_type = btf_type_by_id(env->btf,
3154 						  last_member_type_id);
3155 		if (btf_type_kflag(v->t))
3156 			err = btf_type_ops(last_member_type)->check_kflag_member(env, v->t,
3157 								last_member,
3158 								last_member_type);
3159 		else
3160 			err = btf_type_ops(last_member_type)->check_member(env, v->t,
3161 								last_member,
3162 								last_member_type);
3163 		if (err)
3164 			return err;
3165 	}
3166 
3167 	for_each_member_from(i, v->next_member, v->t, member) {
3168 		u32 member_type_id = member->type;
3169 		const struct btf_type *member_type = btf_type_by_id(env->btf,
3170 								member_type_id);
3171 
3172 		if (btf_type_nosize_or_null(member_type) ||
3173 		    btf_type_is_resolve_source_only(member_type)) {
3174 			btf_verifier_log_member(env, v->t, member,
3175 						"Invalid member");
3176 			return -EINVAL;
3177 		}
3178 
3179 		if (!env_type_is_resolve_sink(env, member_type) &&
3180 		    !env_type_is_resolved(env, member_type_id)) {
3181 			env_stack_set_next_member(env, i + 1);
3182 			return env_stack_push(env, member_type, member_type_id);
3183 		}
3184 
3185 		if (btf_type_kflag(v->t))
3186 			err = btf_type_ops(member_type)->check_kflag_member(env, v->t,
3187 									    member,
3188 									    member_type);
3189 		else
3190 			err = btf_type_ops(member_type)->check_member(env, v->t,
3191 								      member,
3192 								      member_type);
3193 		if (err)
3194 			return err;
3195 	}
3196 
3197 	env_stack_pop_resolved(env, 0, 0);
3198 
3199 	return 0;
3200 }
3201 
3202 static void btf_struct_log(struct btf_verifier_env *env,
3203 			   const struct btf_type *t)
3204 {
3205 	btf_verifier_log(env, "size=%u vlen=%u", t->size, btf_type_vlen(t));
3206 }
3207 
3208 enum btf_field_info_type {
3209 	BTF_FIELD_SPIN_LOCK,
3210 	BTF_FIELD_TIMER,
3211 	BTF_FIELD_KPTR,
3212 };
3213 
3214 enum {
3215 	BTF_FIELD_IGNORE = 0,
3216 	BTF_FIELD_FOUND  = 1,
3217 };
3218 
3219 struct btf_field_info {
3220 	enum btf_field_type type;
3221 	u32 off;
3222 	union {
3223 		struct {
3224 			u32 type_id;
3225 		} kptr;
3226 		struct {
3227 			const char *node_name;
3228 			u32 value_btf_id;
3229 		} list_head;
3230 	};
3231 };
3232 
3233 static int btf_find_struct(const struct btf *btf, const struct btf_type *t,
3234 			   u32 off, int sz, enum btf_field_type field_type,
3235 			   struct btf_field_info *info)
3236 {
3237 	if (!__btf_type_is_struct(t))
3238 		return BTF_FIELD_IGNORE;
3239 	if (t->size != sz)
3240 		return BTF_FIELD_IGNORE;
3241 	info->type = field_type;
3242 	info->off = off;
3243 	return BTF_FIELD_FOUND;
3244 }
3245 
3246 static int btf_find_kptr(const struct btf *btf, const struct btf_type *t,
3247 			 u32 off, int sz, struct btf_field_info *info)
3248 {
3249 	enum btf_field_type type;
3250 	u32 res_id;
3251 
3252 	/* Permit modifiers on the pointer itself */
3253 	if (btf_type_is_volatile(t))
3254 		t = btf_type_by_id(btf, t->type);
3255 	/* For PTR, sz is always == 8 */
3256 	if (!btf_type_is_ptr(t))
3257 		return BTF_FIELD_IGNORE;
3258 	t = btf_type_by_id(btf, t->type);
3259 
3260 	if (!btf_type_is_type_tag(t))
3261 		return BTF_FIELD_IGNORE;
3262 	/* Reject extra tags */
3263 	if (btf_type_is_type_tag(btf_type_by_id(btf, t->type)))
3264 		return -EINVAL;
3265 	if (!strcmp("kptr", __btf_name_by_offset(btf, t->name_off)))
3266 		type = BPF_KPTR_UNREF;
3267 	else if (!strcmp("kptr_ref", __btf_name_by_offset(btf, t->name_off)))
3268 		type = BPF_KPTR_REF;
3269 	else
3270 		return -EINVAL;
3271 
3272 	/* Get the base type */
3273 	t = btf_type_skip_modifiers(btf, t->type, &res_id);
3274 	/* Only pointer to struct is allowed */
3275 	if (!__btf_type_is_struct(t))
3276 		return -EINVAL;
3277 
3278 	info->type = type;
3279 	info->off = off;
3280 	info->kptr.type_id = res_id;
3281 	return BTF_FIELD_FOUND;
3282 }
3283 
3284 static const char *btf_find_decl_tag_value(const struct btf *btf,
3285 					   const struct btf_type *pt,
3286 					   int comp_idx, const char *tag_key)
3287 {
3288 	int i;
3289 
3290 	for (i = 1; i < btf_nr_types(btf); i++) {
3291 		const struct btf_type *t = btf_type_by_id(btf, i);
3292 		int len = strlen(tag_key);
3293 
3294 		if (!btf_type_is_decl_tag(t))
3295 			continue;
3296 		if (pt != btf_type_by_id(btf, t->type) ||
3297 		    btf_type_decl_tag(t)->component_idx != comp_idx)
3298 			continue;
3299 		if (strncmp(__btf_name_by_offset(btf, t->name_off), tag_key, len))
3300 			continue;
3301 		return __btf_name_by_offset(btf, t->name_off) + len;
3302 	}
3303 	return NULL;
3304 }
3305 
3306 static int btf_find_list_head(const struct btf *btf, const struct btf_type *pt,
3307 			      const struct btf_type *t, int comp_idx,
3308 			      u32 off, int sz, struct btf_field_info *info)
3309 {
3310 	const char *value_type;
3311 	const char *list_node;
3312 	s32 id;
3313 
3314 	if (!__btf_type_is_struct(t))
3315 		return BTF_FIELD_IGNORE;
3316 	if (t->size != sz)
3317 		return BTF_FIELD_IGNORE;
3318 	value_type = btf_find_decl_tag_value(btf, pt, comp_idx, "contains:");
3319 	if (!value_type)
3320 		return -EINVAL;
3321 	list_node = strstr(value_type, ":");
3322 	if (!list_node)
3323 		return -EINVAL;
3324 	value_type = kstrndup(value_type, list_node - value_type, GFP_KERNEL | __GFP_NOWARN);
3325 	if (!value_type)
3326 		return -ENOMEM;
3327 	id = btf_find_by_name_kind(btf, value_type, BTF_KIND_STRUCT);
3328 	kfree(value_type);
3329 	if (id < 0)
3330 		return id;
3331 	list_node++;
3332 	if (str_is_empty(list_node))
3333 		return -EINVAL;
3334 	info->type = BPF_LIST_HEAD;
3335 	info->off = off;
3336 	info->list_head.value_btf_id = id;
3337 	info->list_head.node_name = list_node;
3338 	return BTF_FIELD_FOUND;
3339 }
3340 
3341 static int btf_get_field_type(const char *name, u32 field_mask, u32 *seen_mask,
3342 			      int *align, int *sz)
3343 {
3344 	int type = 0;
3345 
3346 	if (field_mask & BPF_SPIN_LOCK) {
3347 		if (!strcmp(name, "bpf_spin_lock")) {
3348 			if (*seen_mask & BPF_SPIN_LOCK)
3349 				return -E2BIG;
3350 			*seen_mask |= BPF_SPIN_LOCK;
3351 			type = BPF_SPIN_LOCK;
3352 			goto end;
3353 		}
3354 	}
3355 	if (field_mask & BPF_TIMER) {
3356 		if (!strcmp(name, "bpf_timer")) {
3357 			if (*seen_mask & BPF_TIMER)
3358 				return -E2BIG;
3359 			*seen_mask |= BPF_TIMER;
3360 			type = BPF_TIMER;
3361 			goto end;
3362 		}
3363 	}
3364 	if (field_mask & BPF_LIST_HEAD) {
3365 		if (!strcmp(name, "bpf_list_head")) {
3366 			type = BPF_LIST_HEAD;
3367 			goto end;
3368 		}
3369 	}
3370 	if (field_mask & BPF_LIST_NODE) {
3371 		if (!strcmp(name, "bpf_list_node")) {
3372 			type = BPF_LIST_NODE;
3373 			goto end;
3374 		}
3375 	}
3376 	/* Only return BPF_KPTR when all other types with matchable names fail */
3377 	if (field_mask & BPF_KPTR) {
3378 		type = BPF_KPTR_REF;
3379 		goto end;
3380 	}
3381 	return 0;
3382 end:
3383 	*sz = btf_field_type_size(type);
3384 	*align = btf_field_type_align(type);
3385 	return type;
3386 }
3387 
3388 static int btf_find_struct_field(const struct btf *btf,
3389 				 const struct btf_type *t, u32 field_mask,
3390 				 struct btf_field_info *info, int info_cnt)
3391 {
3392 	int ret, idx = 0, align, sz, field_type;
3393 	const struct btf_member *member;
3394 	struct btf_field_info tmp;
3395 	u32 i, off, seen_mask = 0;
3396 
3397 	for_each_member(i, t, member) {
3398 		const struct btf_type *member_type = btf_type_by_id(btf,
3399 								    member->type);
3400 
3401 		field_type = btf_get_field_type(__btf_name_by_offset(btf, member_type->name_off),
3402 						field_mask, &seen_mask, &align, &sz);
3403 		if (field_type == 0)
3404 			continue;
3405 		if (field_type < 0)
3406 			return field_type;
3407 
3408 		off = __btf_member_bit_offset(t, member);
3409 		if (off % 8)
3410 			/* valid C code cannot generate such BTF */
3411 			return -EINVAL;
3412 		off /= 8;
3413 		if (off % align)
3414 			continue;
3415 
3416 		switch (field_type) {
3417 		case BPF_SPIN_LOCK:
3418 		case BPF_TIMER:
3419 		case BPF_LIST_NODE:
3420 			ret = btf_find_struct(btf, member_type, off, sz, field_type,
3421 					      idx < info_cnt ? &info[idx] : &tmp);
3422 			if (ret < 0)
3423 				return ret;
3424 			break;
3425 		case BPF_KPTR_UNREF:
3426 		case BPF_KPTR_REF:
3427 			ret = btf_find_kptr(btf, member_type, off, sz,
3428 					    idx < info_cnt ? &info[idx] : &tmp);
3429 			if (ret < 0)
3430 				return ret;
3431 			break;
3432 		case BPF_LIST_HEAD:
3433 			ret = btf_find_list_head(btf, t, member_type, i, off, sz,
3434 						 idx < info_cnt ? &info[idx] : &tmp);
3435 			if (ret < 0)
3436 				return ret;
3437 			break;
3438 		default:
3439 			return -EFAULT;
3440 		}
3441 
3442 		if (ret == BTF_FIELD_IGNORE)
3443 			continue;
3444 		if (idx >= info_cnt)
3445 			return -E2BIG;
3446 		++idx;
3447 	}
3448 	return idx;
3449 }
3450 
3451 static int btf_find_datasec_var(const struct btf *btf, const struct btf_type *t,
3452 				u32 field_mask, struct btf_field_info *info,
3453 				int info_cnt)
3454 {
3455 	int ret, idx = 0, align, sz, field_type;
3456 	const struct btf_var_secinfo *vsi;
3457 	struct btf_field_info tmp;
3458 	u32 i, off, seen_mask = 0;
3459 
3460 	for_each_vsi(i, t, vsi) {
3461 		const struct btf_type *var = btf_type_by_id(btf, vsi->type);
3462 		const struct btf_type *var_type = btf_type_by_id(btf, var->type);
3463 
3464 		field_type = btf_get_field_type(__btf_name_by_offset(btf, var_type->name_off),
3465 						field_mask, &seen_mask, &align, &sz);
3466 		if (field_type == 0)
3467 			continue;
3468 		if (field_type < 0)
3469 			return field_type;
3470 
3471 		off = vsi->offset;
3472 		if (vsi->size != sz)
3473 			continue;
3474 		if (off % align)
3475 			continue;
3476 
3477 		switch (field_type) {
3478 		case BPF_SPIN_LOCK:
3479 		case BPF_TIMER:
3480 		case BPF_LIST_NODE:
3481 			ret = btf_find_struct(btf, var_type, off, sz, field_type,
3482 					      idx < info_cnt ? &info[idx] : &tmp);
3483 			if (ret < 0)
3484 				return ret;
3485 			break;
3486 		case BPF_KPTR_UNREF:
3487 		case BPF_KPTR_REF:
3488 			ret = btf_find_kptr(btf, var_type, off, sz,
3489 					    idx < info_cnt ? &info[idx] : &tmp);
3490 			if (ret < 0)
3491 				return ret;
3492 			break;
3493 		case BPF_LIST_HEAD:
3494 			ret = btf_find_list_head(btf, var, var_type, -1, off, sz,
3495 						 idx < info_cnt ? &info[idx] : &tmp);
3496 			if (ret < 0)
3497 				return ret;
3498 			break;
3499 		default:
3500 			return -EFAULT;
3501 		}
3502 
3503 		if (ret == BTF_FIELD_IGNORE)
3504 			continue;
3505 		if (idx >= info_cnt)
3506 			return -E2BIG;
3507 		++idx;
3508 	}
3509 	return idx;
3510 }
3511 
3512 static int btf_find_field(const struct btf *btf, const struct btf_type *t,
3513 			  u32 field_mask, struct btf_field_info *info,
3514 			  int info_cnt)
3515 {
3516 	if (__btf_type_is_struct(t))
3517 		return btf_find_struct_field(btf, t, field_mask, info, info_cnt);
3518 	else if (btf_type_is_datasec(t))
3519 		return btf_find_datasec_var(btf, t, field_mask, info, info_cnt);
3520 	return -EINVAL;
3521 }
3522 
3523 static int btf_parse_kptr(const struct btf *btf, struct btf_field *field,
3524 			  struct btf_field_info *info)
3525 {
3526 	struct module *mod = NULL;
3527 	const struct btf_type *t;
3528 	struct btf *kernel_btf;
3529 	int ret;
3530 	s32 id;
3531 
3532 	/* Find type in map BTF, and use it to look up the matching type
3533 	 * in vmlinux or module BTFs, by name and kind.
3534 	 */
3535 	t = btf_type_by_id(btf, info->kptr.type_id);
3536 	id = bpf_find_btf_id(__btf_name_by_offset(btf, t->name_off), BTF_INFO_KIND(t->info),
3537 			     &kernel_btf);
3538 	if (id < 0)
3539 		return id;
3540 
3541 	/* Find and stash the function pointer for the destruction function that
3542 	 * needs to be eventually invoked from the map free path.
3543 	 */
3544 	if (info->type == BPF_KPTR_REF) {
3545 		const struct btf_type *dtor_func;
3546 		const char *dtor_func_name;
3547 		unsigned long addr;
3548 		s32 dtor_btf_id;
3549 
3550 		/* This call also serves as a whitelist of allowed objects that
3551 		 * can be used as a referenced pointer and be stored in a map at
3552 		 * the same time.
3553 		 */
3554 		dtor_btf_id = btf_find_dtor_kfunc(kernel_btf, id);
3555 		if (dtor_btf_id < 0) {
3556 			ret = dtor_btf_id;
3557 			goto end_btf;
3558 		}
3559 
3560 		dtor_func = btf_type_by_id(kernel_btf, dtor_btf_id);
3561 		if (!dtor_func) {
3562 			ret = -ENOENT;
3563 			goto end_btf;
3564 		}
3565 
3566 		if (btf_is_module(kernel_btf)) {
3567 			mod = btf_try_get_module(kernel_btf);
3568 			if (!mod) {
3569 				ret = -ENXIO;
3570 				goto end_btf;
3571 			}
3572 		}
3573 
3574 		/* We already verified dtor_func to be btf_type_is_func
3575 		 * in register_btf_id_dtor_kfuncs.
3576 		 */
3577 		dtor_func_name = __btf_name_by_offset(kernel_btf, dtor_func->name_off);
3578 		addr = kallsyms_lookup_name(dtor_func_name);
3579 		if (!addr) {
3580 			ret = -EINVAL;
3581 			goto end_mod;
3582 		}
3583 		field->kptr.dtor = (void *)addr;
3584 	}
3585 
3586 	field->kptr.btf_id = id;
3587 	field->kptr.btf = kernel_btf;
3588 	field->kptr.module = mod;
3589 	return 0;
3590 end_mod:
3591 	module_put(mod);
3592 end_btf:
3593 	btf_put(kernel_btf);
3594 	return ret;
3595 }
3596 
3597 static int btf_parse_list_head(const struct btf *btf, struct btf_field *field,
3598 			       struct btf_field_info *info)
3599 {
3600 	const struct btf_type *t, *n = NULL;
3601 	const struct btf_member *member;
3602 	u32 offset;
3603 	int i;
3604 
3605 	t = btf_type_by_id(btf, info->list_head.value_btf_id);
3606 	/* We've already checked that value_btf_id is a struct type. We
3607 	 * just need to figure out the offset of the list_node, and
3608 	 * verify its type.
3609 	 */
3610 	for_each_member(i, t, member) {
3611 		if (strcmp(info->list_head.node_name, __btf_name_by_offset(btf, member->name_off)))
3612 			continue;
3613 		/* Invalid BTF, two members with same name */
3614 		if (n)
3615 			return -EINVAL;
3616 		n = btf_type_by_id(btf, member->type);
3617 		if (!__btf_type_is_struct(n))
3618 			return -EINVAL;
3619 		if (strcmp("bpf_list_node", __btf_name_by_offset(btf, n->name_off)))
3620 			return -EINVAL;
3621 		offset = __btf_member_bit_offset(n, member);
3622 		if (offset % 8)
3623 			return -EINVAL;
3624 		offset /= 8;
3625 		if (offset % __alignof__(struct bpf_list_node))
3626 			return -EINVAL;
3627 
3628 		field->list_head.btf = (struct btf *)btf;
3629 		field->list_head.value_btf_id = info->list_head.value_btf_id;
3630 		field->list_head.node_offset = offset;
3631 	}
3632 	if (!n)
3633 		return -ENOENT;
3634 	return 0;
3635 }
3636 
3637 struct btf_record *btf_parse_fields(const struct btf *btf, const struct btf_type *t,
3638 				    u32 field_mask, u32 value_size)
3639 {
3640 	struct btf_field_info info_arr[BTF_FIELDS_MAX];
3641 	struct btf_record *rec;
3642 	u32 next_off = 0;
3643 	int ret, i, cnt;
3644 
3645 	ret = btf_find_field(btf, t, field_mask, info_arr, ARRAY_SIZE(info_arr));
3646 	if (ret < 0)
3647 		return ERR_PTR(ret);
3648 	if (!ret)
3649 		return NULL;
3650 
3651 	cnt = ret;
3652 	/* This needs to be kzalloc to zero out padding and unused fields, see
3653 	 * comment in btf_record_equal.
3654 	 */
3655 	rec = kzalloc(offsetof(struct btf_record, fields[cnt]), GFP_KERNEL | __GFP_NOWARN);
3656 	if (!rec)
3657 		return ERR_PTR(-ENOMEM);
3658 
3659 	rec->spin_lock_off = -EINVAL;
3660 	rec->timer_off = -EINVAL;
3661 	for (i = 0; i < cnt; i++) {
3662 		if (info_arr[i].off + btf_field_type_size(info_arr[i].type) > value_size) {
3663 			WARN_ONCE(1, "verifier bug off %d size %d", info_arr[i].off, value_size);
3664 			ret = -EFAULT;
3665 			goto end;
3666 		}
3667 		if (info_arr[i].off < next_off) {
3668 			ret = -EEXIST;
3669 			goto end;
3670 		}
3671 		next_off = info_arr[i].off + btf_field_type_size(info_arr[i].type);
3672 
3673 		rec->field_mask |= info_arr[i].type;
3674 		rec->fields[i].offset = info_arr[i].off;
3675 		rec->fields[i].type = info_arr[i].type;
3676 
3677 		switch (info_arr[i].type) {
3678 		case BPF_SPIN_LOCK:
3679 			WARN_ON_ONCE(rec->spin_lock_off >= 0);
3680 			/* Cache offset for faster lookup at runtime */
3681 			rec->spin_lock_off = rec->fields[i].offset;
3682 			break;
3683 		case BPF_TIMER:
3684 			WARN_ON_ONCE(rec->timer_off >= 0);
3685 			/* Cache offset for faster lookup at runtime */
3686 			rec->timer_off = rec->fields[i].offset;
3687 			break;
3688 		case BPF_KPTR_UNREF:
3689 		case BPF_KPTR_REF:
3690 			ret = btf_parse_kptr(btf, &rec->fields[i], &info_arr[i]);
3691 			if (ret < 0)
3692 				goto end;
3693 			break;
3694 		case BPF_LIST_HEAD:
3695 			ret = btf_parse_list_head(btf, &rec->fields[i], &info_arr[i]);
3696 			if (ret < 0)
3697 				goto end;
3698 			break;
3699 		case BPF_LIST_NODE:
3700 			break;
3701 		default:
3702 			ret = -EFAULT;
3703 			goto end;
3704 		}
3705 		rec->cnt++;
3706 	}
3707 
3708 	/* bpf_list_head requires bpf_spin_lock */
3709 	if (btf_record_has_field(rec, BPF_LIST_HEAD) && rec->spin_lock_off < 0) {
3710 		ret = -EINVAL;
3711 		goto end;
3712 	}
3713 
3714 	return rec;
3715 end:
3716 	btf_record_free(rec);
3717 	return ERR_PTR(ret);
3718 }
3719 
3720 int btf_check_and_fixup_fields(const struct btf *btf, struct btf_record *rec)
3721 {
3722 	int i;
3723 
3724 	/* There are two owning types, kptr_ref and bpf_list_head. The former
3725 	 * only supports storing kernel types, which can never store references
3726 	 * to program allocated local types, atleast not yet. Hence we only need
3727 	 * to ensure that bpf_list_head ownership does not form cycles.
3728 	 */
3729 	if (IS_ERR_OR_NULL(rec) || !(rec->field_mask & BPF_LIST_HEAD))
3730 		return 0;
3731 	for (i = 0; i < rec->cnt; i++) {
3732 		struct btf_struct_meta *meta;
3733 		u32 btf_id;
3734 
3735 		if (!(rec->fields[i].type & BPF_LIST_HEAD))
3736 			continue;
3737 		btf_id = rec->fields[i].list_head.value_btf_id;
3738 		meta = btf_find_struct_meta(btf, btf_id);
3739 		if (!meta)
3740 			return -EFAULT;
3741 		rec->fields[i].list_head.value_rec = meta->record;
3742 
3743 		if (!(rec->field_mask & BPF_LIST_NODE))
3744 			continue;
3745 
3746 		/* We need to ensure ownership acyclicity among all types. The
3747 		 * proper way to do it would be to topologically sort all BTF
3748 		 * IDs based on the ownership edges, since there can be multiple
3749 		 * bpf_list_head in a type. Instead, we use the following
3750 		 * reasoning:
3751 		 *
3752 		 * - A type can only be owned by another type in user BTF if it
3753 		 *   has a bpf_list_node.
3754 		 * - A type can only _own_ another type in user BTF if it has a
3755 		 *   bpf_list_head.
3756 		 *
3757 		 * We ensure that if a type has both bpf_list_head and
3758 		 * bpf_list_node, its element types cannot be owning types.
3759 		 *
3760 		 * To ensure acyclicity:
3761 		 *
3762 		 * When A only has bpf_list_head, ownership chain can be:
3763 		 *	A -> B -> C
3764 		 * Where:
3765 		 * - B has both bpf_list_head and bpf_list_node.
3766 		 * - C only has bpf_list_node.
3767 		 *
3768 		 * When A has both bpf_list_head and bpf_list_node, some other
3769 		 * type already owns it in the BTF domain, hence it can not own
3770 		 * another owning type through any of the bpf_list_head edges.
3771 		 *	A -> B
3772 		 * Where:
3773 		 * - B only has bpf_list_node.
3774 		 */
3775 		if (meta->record->field_mask & BPF_LIST_HEAD)
3776 			return -ELOOP;
3777 	}
3778 	return 0;
3779 }
3780 
3781 static int btf_field_offs_cmp(const void *_a, const void *_b, const void *priv)
3782 {
3783 	const u32 a = *(const u32 *)_a;
3784 	const u32 b = *(const u32 *)_b;
3785 
3786 	if (a < b)
3787 		return -1;
3788 	else if (a > b)
3789 		return 1;
3790 	return 0;
3791 }
3792 
3793 static void btf_field_offs_swap(void *_a, void *_b, int size, const void *priv)
3794 {
3795 	struct btf_field_offs *foffs = (void *)priv;
3796 	u32 *off_base = foffs->field_off;
3797 	u32 *a = _a, *b = _b;
3798 	u8 *sz_a, *sz_b;
3799 
3800 	sz_a = foffs->field_sz + (a - off_base);
3801 	sz_b = foffs->field_sz + (b - off_base);
3802 
3803 	swap(*a, *b);
3804 	swap(*sz_a, *sz_b);
3805 }
3806 
3807 struct btf_field_offs *btf_parse_field_offs(struct btf_record *rec)
3808 {
3809 	struct btf_field_offs *foffs;
3810 	u32 i, *off;
3811 	u8 *sz;
3812 
3813 	BUILD_BUG_ON(ARRAY_SIZE(foffs->field_off) != ARRAY_SIZE(foffs->field_sz));
3814 	if (IS_ERR_OR_NULL(rec))
3815 		return NULL;
3816 
3817 	foffs = kzalloc(sizeof(*foffs), GFP_KERNEL | __GFP_NOWARN);
3818 	if (!foffs)
3819 		return ERR_PTR(-ENOMEM);
3820 
3821 	off = foffs->field_off;
3822 	sz = foffs->field_sz;
3823 	for (i = 0; i < rec->cnt; i++) {
3824 		off[i] = rec->fields[i].offset;
3825 		sz[i] = btf_field_type_size(rec->fields[i].type);
3826 	}
3827 	foffs->cnt = rec->cnt;
3828 
3829 	if (foffs->cnt == 1)
3830 		return foffs;
3831 	sort_r(foffs->field_off, foffs->cnt, sizeof(foffs->field_off[0]),
3832 	       btf_field_offs_cmp, btf_field_offs_swap, foffs);
3833 	return foffs;
3834 }
3835 
3836 static void __btf_struct_show(const struct btf *btf, const struct btf_type *t,
3837 			      u32 type_id, void *data, u8 bits_offset,
3838 			      struct btf_show *show)
3839 {
3840 	const struct btf_member *member;
3841 	void *safe_data;
3842 	u32 i;
3843 
3844 	safe_data = btf_show_start_struct_type(show, t, type_id, data);
3845 	if (!safe_data)
3846 		return;
3847 
3848 	for_each_member(i, t, member) {
3849 		const struct btf_type *member_type = btf_type_by_id(btf,
3850 								member->type);
3851 		const struct btf_kind_operations *ops;
3852 		u32 member_offset, bitfield_size;
3853 		u32 bytes_offset;
3854 		u8 bits8_offset;
3855 
3856 		btf_show_start_member(show, member);
3857 
3858 		member_offset = __btf_member_bit_offset(t, member);
3859 		bitfield_size = __btf_member_bitfield_size(t, member);
3860 		bytes_offset = BITS_ROUNDDOWN_BYTES(member_offset);
3861 		bits8_offset = BITS_PER_BYTE_MASKED(member_offset);
3862 		if (bitfield_size) {
3863 			safe_data = btf_show_start_type(show, member_type,
3864 							member->type,
3865 							data + bytes_offset);
3866 			if (safe_data)
3867 				btf_bitfield_show(safe_data,
3868 						  bits8_offset,
3869 						  bitfield_size, show);
3870 			btf_show_end_type(show);
3871 		} else {
3872 			ops = btf_type_ops(member_type);
3873 			ops->show(btf, member_type, member->type,
3874 				  data + bytes_offset, bits8_offset, show);
3875 		}
3876 
3877 		btf_show_end_member(show);
3878 	}
3879 
3880 	btf_show_end_struct_type(show);
3881 }
3882 
3883 static void btf_struct_show(const struct btf *btf, const struct btf_type *t,
3884 			    u32 type_id, void *data, u8 bits_offset,
3885 			    struct btf_show *show)
3886 {
3887 	const struct btf_member *m = show->state.member;
3888 
3889 	/*
3890 	 * First check if any members would be shown (are non-zero).
3891 	 * See comments above "struct btf_show" definition for more
3892 	 * details on how this works at a high-level.
3893 	 */
3894 	if (show->state.depth > 0 && !(show->flags & BTF_SHOW_ZERO)) {
3895 		if (!show->state.depth_check) {
3896 			show->state.depth_check = show->state.depth + 1;
3897 			show->state.depth_to_show = 0;
3898 		}
3899 		__btf_struct_show(btf, t, type_id, data, bits_offset, show);
3900 		/* Restore saved member data here */
3901 		show->state.member = m;
3902 		if (show->state.depth_check != show->state.depth + 1)
3903 			return;
3904 		show->state.depth_check = 0;
3905 
3906 		if (show->state.depth_to_show <= show->state.depth)
3907 			return;
3908 		/*
3909 		 * Reaching here indicates we have recursed and found
3910 		 * non-zero child values.
3911 		 */
3912 	}
3913 
3914 	__btf_struct_show(btf, t, type_id, data, bits_offset, show);
3915 }
3916 
3917 static struct btf_kind_operations struct_ops = {
3918 	.check_meta = btf_struct_check_meta,
3919 	.resolve = btf_struct_resolve,
3920 	.check_member = btf_struct_check_member,
3921 	.check_kflag_member = btf_generic_check_kflag_member,
3922 	.log_details = btf_struct_log,
3923 	.show = btf_struct_show,
3924 };
3925 
3926 static int btf_enum_check_member(struct btf_verifier_env *env,
3927 				 const struct btf_type *struct_type,
3928 				 const struct btf_member *member,
3929 				 const struct btf_type *member_type)
3930 {
3931 	u32 struct_bits_off = member->offset;
3932 	u32 struct_size, bytes_offset;
3933 
3934 	if (BITS_PER_BYTE_MASKED(struct_bits_off)) {
3935 		btf_verifier_log_member(env, struct_type, member,
3936 					"Member is not byte aligned");
3937 		return -EINVAL;
3938 	}
3939 
3940 	struct_size = struct_type->size;
3941 	bytes_offset = BITS_ROUNDDOWN_BYTES(struct_bits_off);
3942 	if (struct_size - bytes_offset < member_type->size) {
3943 		btf_verifier_log_member(env, struct_type, member,
3944 					"Member exceeds struct_size");
3945 		return -EINVAL;
3946 	}
3947 
3948 	return 0;
3949 }
3950 
3951 static int btf_enum_check_kflag_member(struct btf_verifier_env *env,
3952 				       const struct btf_type *struct_type,
3953 				       const struct btf_member *member,
3954 				       const struct btf_type *member_type)
3955 {
3956 	u32 struct_bits_off, nr_bits, bytes_end, struct_size;
3957 	u32 int_bitsize = sizeof(int) * BITS_PER_BYTE;
3958 
3959 	struct_bits_off = BTF_MEMBER_BIT_OFFSET(member->offset);
3960 	nr_bits = BTF_MEMBER_BITFIELD_SIZE(member->offset);
3961 	if (!nr_bits) {
3962 		if (BITS_PER_BYTE_MASKED(struct_bits_off)) {
3963 			btf_verifier_log_member(env, struct_type, member,
3964 						"Member is not byte aligned");
3965 			return -EINVAL;
3966 		}
3967 
3968 		nr_bits = int_bitsize;
3969 	} else if (nr_bits > int_bitsize) {
3970 		btf_verifier_log_member(env, struct_type, member,
3971 					"Invalid member bitfield_size");
3972 		return -EINVAL;
3973 	}
3974 
3975 	struct_size = struct_type->size;
3976 	bytes_end = BITS_ROUNDUP_BYTES(struct_bits_off + nr_bits);
3977 	if (struct_size < bytes_end) {
3978 		btf_verifier_log_member(env, struct_type, member,
3979 					"Member exceeds struct_size");
3980 		return -EINVAL;
3981 	}
3982 
3983 	return 0;
3984 }
3985 
3986 static s32 btf_enum_check_meta(struct btf_verifier_env *env,
3987 			       const struct btf_type *t,
3988 			       u32 meta_left)
3989 {
3990 	const struct btf_enum *enums = btf_type_enum(t);
3991 	struct btf *btf = env->btf;
3992 	const char *fmt_str;
3993 	u16 i, nr_enums;
3994 	u32 meta_needed;
3995 
3996 	nr_enums = btf_type_vlen(t);
3997 	meta_needed = nr_enums * sizeof(*enums);
3998 
3999 	if (meta_left < meta_needed) {
4000 		btf_verifier_log_basic(env, t,
4001 				       "meta_left:%u meta_needed:%u",
4002 				       meta_left, meta_needed);
4003 		return -EINVAL;
4004 	}
4005 
4006 	if (t->size > 8 || !is_power_of_2(t->size)) {
4007 		btf_verifier_log_type(env, t, "Unexpected size");
4008 		return -EINVAL;
4009 	}
4010 
4011 	/* enum type either no name or a valid one */
4012 	if (t->name_off &&
4013 	    !btf_name_valid_identifier(env->btf, t->name_off)) {
4014 		btf_verifier_log_type(env, t, "Invalid name");
4015 		return -EINVAL;
4016 	}
4017 
4018 	btf_verifier_log_type(env, t, NULL);
4019 
4020 	for (i = 0; i < nr_enums; i++) {
4021 		if (!btf_name_offset_valid(btf, enums[i].name_off)) {
4022 			btf_verifier_log(env, "\tInvalid name_offset:%u",
4023 					 enums[i].name_off);
4024 			return -EINVAL;
4025 		}
4026 
4027 		/* enum member must have a valid name */
4028 		if (!enums[i].name_off ||
4029 		    !btf_name_valid_identifier(btf, enums[i].name_off)) {
4030 			btf_verifier_log_type(env, t, "Invalid name");
4031 			return -EINVAL;
4032 		}
4033 
4034 		if (env->log.level == BPF_LOG_KERNEL)
4035 			continue;
4036 		fmt_str = btf_type_kflag(t) ? "\t%s val=%d\n" : "\t%s val=%u\n";
4037 		btf_verifier_log(env, fmt_str,
4038 				 __btf_name_by_offset(btf, enums[i].name_off),
4039 				 enums[i].val);
4040 	}
4041 
4042 	return meta_needed;
4043 }
4044 
4045 static void btf_enum_log(struct btf_verifier_env *env,
4046 			 const struct btf_type *t)
4047 {
4048 	btf_verifier_log(env, "size=%u vlen=%u", t->size, btf_type_vlen(t));
4049 }
4050 
4051 static void btf_enum_show(const struct btf *btf, const struct btf_type *t,
4052 			  u32 type_id, void *data, u8 bits_offset,
4053 			  struct btf_show *show)
4054 {
4055 	const struct btf_enum *enums = btf_type_enum(t);
4056 	u32 i, nr_enums = btf_type_vlen(t);
4057 	void *safe_data;
4058 	int v;
4059 
4060 	safe_data = btf_show_start_type(show, t, type_id, data);
4061 	if (!safe_data)
4062 		return;
4063 
4064 	v = *(int *)safe_data;
4065 
4066 	for (i = 0; i < nr_enums; i++) {
4067 		if (v != enums[i].val)
4068 			continue;
4069 
4070 		btf_show_type_value(show, "%s",
4071 				    __btf_name_by_offset(btf,
4072 							 enums[i].name_off));
4073 
4074 		btf_show_end_type(show);
4075 		return;
4076 	}
4077 
4078 	if (btf_type_kflag(t))
4079 		btf_show_type_value(show, "%d", v);
4080 	else
4081 		btf_show_type_value(show, "%u", v);
4082 	btf_show_end_type(show);
4083 }
4084 
4085 static struct btf_kind_operations enum_ops = {
4086 	.check_meta = btf_enum_check_meta,
4087 	.resolve = btf_df_resolve,
4088 	.check_member = btf_enum_check_member,
4089 	.check_kflag_member = btf_enum_check_kflag_member,
4090 	.log_details = btf_enum_log,
4091 	.show = btf_enum_show,
4092 };
4093 
4094 static s32 btf_enum64_check_meta(struct btf_verifier_env *env,
4095 				 const struct btf_type *t,
4096 				 u32 meta_left)
4097 {
4098 	const struct btf_enum64 *enums = btf_type_enum64(t);
4099 	struct btf *btf = env->btf;
4100 	const char *fmt_str;
4101 	u16 i, nr_enums;
4102 	u32 meta_needed;
4103 
4104 	nr_enums = btf_type_vlen(t);
4105 	meta_needed = nr_enums * sizeof(*enums);
4106 
4107 	if (meta_left < meta_needed) {
4108 		btf_verifier_log_basic(env, t,
4109 				       "meta_left:%u meta_needed:%u",
4110 				       meta_left, meta_needed);
4111 		return -EINVAL;
4112 	}
4113 
4114 	if (t->size > 8 || !is_power_of_2(t->size)) {
4115 		btf_verifier_log_type(env, t, "Unexpected size");
4116 		return -EINVAL;
4117 	}
4118 
4119 	/* enum type either no name or a valid one */
4120 	if (t->name_off &&
4121 	    !btf_name_valid_identifier(env->btf, t->name_off)) {
4122 		btf_verifier_log_type(env, t, "Invalid name");
4123 		return -EINVAL;
4124 	}
4125 
4126 	btf_verifier_log_type(env, t, NULL);
4127 
4128 	for (i = 0; i < nr_enums; i++) {
4129 		if (!btf_name_offset_valid(btf, enums[i].name_off)) {
4130 			btf_verifier_log(env, "\tInvalid name_offset:%u",
4131 					 enums[i].name_off);
4132 			return -EINVAL;
4133 		}
4134 
4135 		/* enum member must have a valid name */
4136 		if (!enums[i].name_off ||
4137 		    !btf_name_valid_identifier(btf, enums[i].name_off)) {
4138 			btf_verifier_log_type(env, t, "Invalid name");
4139 			return -EINVAL;
4140 		}
4141 
4142 		if (env->log.level == BPF_LOG_KERNEL)
4143 			continue;
4144 
4145 		fmt_str = btf_type_kflag(t) ? "\t%s val=%lld\n" : "\t%s val=%llu\n";
4146 		btf_verifier_log(env, fmt_str,
4147 				 __btf_name_by_offset(btf, enums[i].name_off),
4148 				 btf_enum64_value(enums + i));
4149 	}
4150 
4151 	return meta_needed;
4152 }
4153 
4154 static void btf_enum64_show(const struct btf *btf, const struct btf_type *t,
4155 			    u32 type_id, void *data, u8 bits_offset,
4156 			    struct btf_show *show)
4157 {
4158 	const struct btf_enum64 *enums = btf_type_enum64(t);
4159 	u32 i, nr_enums = btf_type_vlen(t);
4160 	void *safe_data;
4161 	s64 v;
4162 
4163 	safe_data = btf_show_start_type(show, t, type_id, data);
4164 	if (!safe_data)
4165 		return;
4166 
4167 	v = *(u64 *)safe_data;
4168 
4169 	for (i = 0; i < nr_enums; i++) {
4170 		if (v != btf_enum64_value(enums + i))
4171 			continue;
4172 
4173 		btf_show_type_value(show, "%s",
4174 				    __btf_name_by_offset(btf,
4175 							 enums[i].name_off));
4176 
4177 		btf_show_end_type(show);
4178 		return;
4179 	}
4180 
4181 	if (btf_type_kflag(t))
4182 		btf_show_type_value(show, "%lld", v);
4183 	else
4184 		btf_show_type_value(show, "%llu", v);
4185 	btf_show_end_type(show);
4186 }
4187 
4188 static struct btf_kind_operations enum64_ops = {
4189 	.check_meta = btf_enum64_check_meta,
4190 	.resolve = btf_df_resolve,
4191 	.check_member = btf_enum_check_member,
4192 	.check_kflag_member = btf_enum_check_kflag_member,
4193 	.log_details = btf_enum_log,
4194 	.show = btf_enum64_show,
4195 };
4196 
4197 static s32 btf_func_proto_check_meta(struct btf_verifier_env *env,
4198 				     const struct btf_type *t,
4199 				     u32 meta_left)
4200 {
4201 	u32 meta_needed = btf_type_vlen(t) * sizeof(struct btf_param);
4202 
4203 	if (meta_left < meta_needed) {
4204 		btf_verifier_log_basic(env, t,
4205 				       "meta_left:%u meta_needed:%u",
4206 				       meta_left, meta_needed);
4207 		return -EINVAL;
4208 	}
4209 
4210 	if (t->name_off) {
4211 		btf_verifier_log_type(env, t, "Invalid name");
4212 		return -EINVAL;
4213 	}
4214 
4215 	if (btf_type_kflag(t)) {
4216 		btf_verifier_log_type(env, t, "Invalid btf_info kind_flag");
4217 		return -EINVAL;
4218 	}
4219 
4220 	btf_verifier_log_type(env, t, NULL);
4221 
4222 	return meta_needed;
4223 }
4224 
4225 static void btf_func_proto_log(struct btf_verifier_env *env,
4226 			       const struct btf_type *t)
4227 {
4228 	const struct btf_param *args = (const struct btf_param *)(t + 1);
4229 	u16 nr_args = btf_type_vlen(t), i;
4230 
4231 	btf_verifier_log(env, "return=%u args=(", t->type);
4232 	if (!nr_args) {
4233 		btf_verifier_log(env, "void");
4234 		goto done;
4235 	}
4236 
4237 	if (nr_args == 1 && !args[0].type) {
4238 		/* Only one vararg */
4239 		btf_verifier_log(env, "vararg");
4240 		goto done;
4241 	}
4242 
4243 	btf_verifier_log(env, "%u %s", args[0].type,
4244 			 __btf_name_by_offset(env->btf,
4245 					      args[0].name_off));
4246 	for (i = 1; i < nr_args - 1; i++)
4247 		btf_verifier_log(env, ", %u %s", args[i].type,
4248 				 __btf_name_by_offset(env->btf,
4249 						      args[i].name_off));
4250 
4251 	if (nr_args > 1) {
4252 		const struct btf_param *last_arg = &args[nr_args - 1];
4253 
4254 		if (last_arg->type)
4255 			btf_verifier_log(env, ", %u %s", last_arg->type,
4256 					 __btf_name_by_offset(env->btf,
4257 							      last_arg->name_off));
4258 		else
4259 			btf_verifier_log(env, ", vararg");
4260 	}
4261 
4262 done:
4263 	btf_verifier_log(env, ")");
4264 }
4265 
4266 static struct btf_kind_operations func_proto_ops = {
4267 	.check_meta = btf_func_proto_check_meta,
4268 	.resolve = btf_df_resolve,
4269 	/*
4270 	 * BTF_KIND_FUNC_PROTO cannot be directly referred by
4271 	 * a struct's member.
4272 	 *
4273 	 * It should be a function pointer instead.
4274 	 * (i.e. struct's member -> BTF_KIND_PTR -> BTF_KIND_FUNC_PROTO)
4275 	 *
4276 	 * Hence, there is no btf_func_check_member().
4277 	 */
4278 	.check_member = btf_df_check_member,
4279 	.check_kflag_member = btf_df_check_kflag_member,
4280 	.log_details = btf_func_proto_log,
4281 	.show = btf_df_show,
4282 };
4283 
4284 static s32 btf_func_check_meta(struct btf_verifier_env *env,
4285 			       const struct btf_type *t,
4286 			       u32 meta_left)
4287 {
4288 	if (!t->name_off ||
4289 	    !btf_name_valid_identifier(env->btf, t->name_off)) {
4290 		btf_verifier_log_type(env, t, "Invalid name");
4291 		return -EINVAL;
4292 	}
4293 
4294 	if (btf_type_vlen(t) > BTF_FUNC_GLOBAL) {
4295 		btf_verifier_log_type(env, t, "Invalid func linkage");
4296 		return -EINVAL;
4297 	}
4298 
4299 	if (btf_type_kflag(t)) {
4300 		btf_verifier_log_type(env, t, "Invalid btf_info kind_flag");
4301 		return -EINVAL;
4302 	}
4303 
4304 	btf_verifier_log_type(env, t, NULL);
4305 
4306 	return 0;
4307 }
4308 
4309 static int btf_func_resolve(struct btf_verifier_env *env,
4310 			    const struct resolve_vertex *v)
4311 {
4312 	const struct btf_type *t = v->t;
4313 	u32 next_type_id = t->type;
4314 	int err;
4315 
4316 	err = btf_func_check(env, t);
4317 	if (err)
4318 		return err;
4319 
4320 	env_stack_pop_resolved(env, next_type_id, 0);
4321 	return 0;
4322 }
4323 
4324 static struct btf_kind_operations func_ops = {
4325 	.check_meta = btf_func_check_meta,
4326 	.resolve = btf_func_resolve,
4327 	.check_member = btf_df_check_member,
4328 	.check_kflag_member = btf_df_check_kflag_member,
4329 	.log_details = btf_ref_type_log,
4330 	.show = btf_df_show,
4331 };
4332 
4333 static s32 btf_var_check_meta(struct btf_verifier_env *env,
4334 			      const struct btf_type *t,
4335 			      u32 meta_left)
4336 {
4337 	const struct btf_var *var;
4338 	u32 meta_needed = sizeof(*var);
4339 
4340 	if (meta_left < meta_needed) {
4341 		btf_verifier_log_basic(env, t,
4342 				       "meta_left:%u meta_needed:%u",
4343 				       meta_left, meta_needed);
4344 		return -EINVAL;
4345 	}
4346 
4347 	if (btf_type_vlen(t)) {
4348 		btf_verifier_log_type(env, t, "vlen != 0");
4349 		return -EINVAL;
4350 	}
4351 
4352 	if (btf_type_kflag(t)) {
4353 		btf_verifier_log_type(env, t, "Invalid btf_info kind_flag");
4354 		return -EINVAL;
4355 	}
4356 
4357 	if (!t->name_off ||
4358 	    !__btf_name_valid(env->btf, t->name_off, true)) {
4359 		btf_verifier_log_type(env, t, "Invalid name");
4360 		return -EINVAL;
4361 	}
4362 
4363 	/* A var cannot be in type void */
4364 	if (!t->type || !BTF_TYPE_ID_VALID(t->type)) {
4365 		btf_verifier_log_type(env, t, "Invalid type_id");
4366 		return -EINVAL;
4367 	}
4368 
4369 	var = btf_type_var(t);
4370 	if (var->linkage != BTF_VAR_STATIC &&
4371 	    var->linkage != BTF_VAR_GLOBAL_ALLOCATED) {
4372 		btf_verifier_log_type(env, t, "Linkage not supported");
4373 		return -EINVAL;
4374 	}
4375 
4376 	btf_verifier_log_type(env, t, NULL);
4377 
4378 	return meta_needed;
4379 }
4380 
4381 static void btf_var_log(struct btf_verifier_env *env, const struct btf_type *t)
4382 {
4383 	const struct btf_var *var = btf_type_var(t);
4384 
4385 	btf_verifier_log(env, "type_id=%u linkage=%u", t->type, var->linkage);
4386 }
4387 
4388 static const struct btf_kind_operations var_ops = {
4389 	.check_meta		= btf_var_check_meta,
4390 	.resolve		= btf_var_resolve,
4391 	.check_member		= btf_df_check_member,
4392 	.check_kflag_member	= btf_df_check_kflag_member,
4393 	.log_details		= btf_var_log,
4394 	.show			= btf_var_show,
4395 };
4396 
4397 static s32 btf_datasec_check_meta(struct btf_verifier_env *env,
4398 				  const struct btf_type *t,
4399 				  u32 meta_left)
4400 {
4401 	const struct btf_var_secinfo *vsi;
4402 	u64 last_vsi_end_off = 0, sum = 0;
4403 	u32 i, meta_needed;
4404 
4405 	meta_needed = btf_type_vlen(t) * sizeof(*vsi);
4406 	if (meta_left < meta_needed) {
4407 		btf_verifier_log_basic(env, t,
4408 				       "meta_left:%u meta_needed:%u",
4409 				       meta_left, meta_needed);
4410 		return -EINVAL;
4411 	}
4412 
4413 	if (!t->size) {
4414 		btf_verifier_log_type(env, t, "size == 0");
4415 		return -EINVAL;
4416 	}
4417 
4418 	if (btf_type_kflag(t)) {
4419 		btf_verifier_log_type(env, t, "Invalid btf_info kind_flag");
4420 		return -EINVAL;
4421 	}
4422 
4423 	if (!t->name_off ||
4424 	    !btf_name_valid_section(env->btf, t->name_off)) {
4425 		btf_verifier_log_type(env, t, "Invalid name");
4426 		return -EINVAL;
4427 	}
4428 
4429 	btf_verifier_log_type(env, t, NULL);
4430 
4431 	for_each_vsi(i, t, vsi) {
4432 		/* A var cannot be in type void */
4433 		if (!vsi->type || !BTF_TYPE_ID_VALID(vsi->type)) {
4434 			btf_verifier_log_vsi(env, t, vsi,
4435 					     "Invalid type_id");
4436 			return -EINVAL;
4437 		}
4438 
4439 		if (vsi->offset < last_vsi_end_off || vsi->offset >= t->size) {
4440 			btf_verifier_log_vsi(env, t, vsi,
4441 					     "Invalid offset");
4442 			return -EINVAL;
4443 		}
4444 
4445 		if (!vsi->size || vsi->size > t->size) {
4446 			btf_verifier_log_vsi(env, t, vsi,
4447 					     "Invalid size");
4448 			return -EINVAL;
4449 		}
4450 
4451 		last_vsi_end_off = vsi->offset + vsi->size;
4452 		if (last_vsi_end_off > t->size) {
4453 			btf_verifier_log_vsi(env, t, vsi,
4454 					     "Invalid offset+size");
4455 			return -EINVAL;
4456 		}
4457 
4458 		btf_verifier_log_vsi(env, t, vsi, NULL);
4459 		sum += vsi->size;
4460 	}
4461 
4462 	if (t->size < sum) {
4463 		btf_verifier_log_type(env, t, "Invalid btf_info size");
4464 		return -EINVAL;
4465 	}
4466 
4467 	return meta_needed;
4468 }
4469 
4470 static int btf_datasec_resolve(struct btf_verifier_env *env,
4471 			       const struct resolve_vertex *v)
4472 {
4473 	const struct btf_var_secinfo *vsi;
4474 	struct btf *btf = env->btf;
4475 	u16 i;
4476 
4477 	for_each_vsi_from(i, v->next_member, v->t, vsi) {
4478 		u32 var_type_id = vsi->type, type_id, type_size = 0;
4479 		const struct btf_type *var_type = btf_type_by_id(env->btf,
4480 								 var_type_id);
4481 		if (!var_type || !btf_type_is_var(var_type)) {
4482 			btf_verifier_log_vsi(env, v->t, vsi,
4483 					     "Not a VAR kind member");
4484 			return -EINVAL;
4485 		}
4486 
4487 		if (!env_type_is_resolve_sink(env, var_type) &&
4488 		    !env_type_is_resolved(env, var_type_id)) {
4489 			env_stack_set_next_member(env, i + 1);
4490 			return env_stack_push(env, var_type, var_type_id);
4491 		}
4492 
4493 		type_id = var_type->type;
4494 		if (!btf_type_id_size(btf, &type_id, &type_size)) {
4495 			btf_verifier_log_vsi(env, v->t, vsi, "Invalid type");
4496 			return -EINVAL;
4497 		}
4498 
4499 		if (vsi->size < type_size) {
4500 			btf_verifier_log_vsi(env, v->t, vsi, "Invalid size");
4501 			return -EINVAL;
4502 		}
4503 	}
4504 
4505 	env_stack_pop_resolved(env, 0, 0);
4506 	return 0;
4507 }
4508 
4509 static void btf_datasec_log(struct btf_verifier_env *env,
4510 			    const struct btf_type *t)
4511 {
4512 	btf_verifier_log(env, "size=%u vlen=%u", t->size, btf_type_vlen(t));
4513 }
4514 
4515 static void btf_datasec_show(const struct btf *btf,
4516 			     const struct btf_type *t, u32 type_id,
4517 			     void *data, u8 bits_offset,
4518 			     struct btf_show *show)
4519 {
4520 	const struct btf_var_secinfo *vsi;
4521 	const struct btf_type *var;
4522 	u32 i;
4523 
4524 	if (!btf_show_start_type(show, t, type_id, data))
4525 		return;
4526 
4527 	btf_show_type_value(show, "section (\"%s\") = {",
4528 			    __btf_name_by_offset(btf, t->name_off));
4529 	for_each_vsi(i, t, vsi) {
4530 		var = btf_type_by_id(btf, vsi->type);
4531 		if (i)
4532 			btf_show(show, ",");
4533 		btf_type_ops(var)->show(btf, var, vsi->type,
4534 					data + vsi->offset, bits_offset, show);
4535 	}
4536 	btf_show_end_type(show);
4537 }
4538 
4539 static const struct btf_kind_operations datasec_ops = {
4540 	.check_meta		= btf_datasec_check_meta,
4541 	.resolve		= btf_datasec_resolve,
4542 	.check_member		= btf_df_check_member,
4543 	.check_kflag_member	= btf_df_check_kflag_member,
4544 	.log_details		= btf_datasec_log,
4545 	.show			= btf_datasec_show,
4546 };
4547 
4548 static s32 btf_float_check_meta(struct btf_verifier_env *env,
4549 				const struct btf_type *t,
4550 				u32 meta_left)
4551 {
4552 	if (btf_type_vlen(t)) {
4553 		btf_verifier_log_type(env, t, "vlen != 0");
4554 		return -EINVAL;
4555 	}
4556 
4557 	if (btf_type_kflag(t)) {
4558 		btf_verifier_log_type(env, t, "Invalid btf_info kind_flag");
4559 		return -EINVAL;
4560 	}
4561 
4562 	if (t->size != 2 && t->size != 4 && t->size != 8 && t->size != 12 &&
4563 	    t->size != 16) {
4564 		btf_verifier_log_type(env, t, "Invalid type_size");
4565 		return -EINVAL;
4566 	}
4567 
4568 	btf_verifier_log_type(env, t, NULL);
4569 
4570 	return 0;
4571 }
4572 
4573 static int btf_float_check_member(struct btf_verifier_env *env,
4574 				  const struct btf_type *struct_type,
4575 				  const struct btf_member *member,
4576 				  const struct btf_type *member_type)
4577 {
4578 	u64 start_offset_bytes;
4579 	u64 end_offset_bytes;
4580 	u64 misalign_bits;
4581 	u64 align_bytes;
4582 	u64 align_bits;
4583 
4584 	/* Different architectures have different alignment requirements, so
4585 	 * here we check only for the reasonable minimum. This way we ensure
4586 	 * that types after CO-RE can pass the kernel BTF verifier.
4587 	 */
4588 	align_bytes = min_t(u64, sizeof(void *), member_type->size);
4589 	align_bits = align_bytes * BITS_PER_BYTE;
4590 	div64_u64_rem(member->offset, align_bits, &misalign_bits);
4591 	if (misalign_bits) {
4592 		btf_verifier_log_member(env, struct_type, member,
4593 					"Member is not properly aligned");
4594 		return -EINVAL;
4595 	}
4596 
4597 	start_offset_bytes = member->offset / BITS_PER_BYTE;
4598 	end_offset_bytes = start_offset_bytes + member_type->size;
4599 	if (end_offset_bytes > struct_type->size) {
4600 		btf_verifier_log_member(env, struct_type, member,
4601 					"Member exceeds struct_size");
4602 		return -EINVAL;
4603 	}
4604 
4605 	return 0;
4606 }
4607 
4608 static void btf_float_log(struct btf_verifier_env *env,
4609 			  const struct btf_type *t)
4610 {
4611 	btf_verifier_log(env, "size=%u", t->size);
4612 }
4613 
4614 static const struct btf_kind_operations float_ops = {
4615 	.check_meta = btf_float_check_meta,
4616 	.resolve = btf_df_resolve,
4617 	.check_member = btf_float_check_member,
4618 	.check_kflag_member = btf_generic_check_kflag_member,
4619 	.log_details = btf_float_log,
4620 	.show = btf_df_show,
4621 };
4622 
4623 static s32 btf_decl_tag_check_meta(struct btf_verifier_env *env,
4624 			      const struct btf_type *t,
4625 			      u32 meta_left)
4626 {
4627 	const struct btf_decl_tag *tag;
4628 	u32 meta_needed = sizeof(*tag);
4629 	s32 component_idx;
4630 	const char *value;
4631 
4632 	if (meta_left < meta_needed) {
4633 		btf_verifier_log_basic(env, t,
4634 				       "meta_left:%u meta_needed:%u",
4635 				       meta_left, meta_needed);
4636 		return -EINVAL;
4637 	}
4638 
4639 	value = btf_name_by_offset(env->btf, t->name_off);
4640 	if (!value || !value[0]) {
4641 		btf_verifier_log_type(env, t, "Invalid value");
4642 		return -EINVAL;
4643 	}
4644 
4645 	if (btf_type_vlen(t)) {
4646 		btf_verifier_log_type(env, t, "vlen != 0");
4647 		return -EINVAL;
4648 	}
4649 
4650 	if (btf_type_kflag(t)) {
4651 		btf_verifier_log_type(env, t, "Invalid btf_info kind_flag");
4652 		return -EINVAL;
4653 	}
4654 
4655 	component_idx = btf_type_decl_tag(t)->component_idx;
4656 	if (component_idx < -1) {
4657 		btf_verifier_log_type(env, t, "Invalid component_idx");
4658 		return -EINVAL;
4659 	}
4660 
4661 	btf_verifier_log_type(env, t, NULL);
4662 
4663 	return meta_needed;
4664 }
4665 
4666 static int btf_decl_tag_resolve(struct btf_verifier_env *env,
4667 			   const struct resolve_vertex *v)
4668 {
4669 	const struct btf_type *next_type;
4670 	const struct btf_type *t = v->t;
4671 	u32 next_type_id = t->type;
4672 	struct btf *btf = env->btf;
4673 	s32 component_idx;
4674 	u32 vlen;
4675 
4676 	next_type = btf_type_by_id(btf, next_type_id);
4677 	if (!next_type || !btf_type_is_decl_tag_target(next_type)) {
4678 		btf_verifier_log_type(env, v->t, "Invalid type_id");
4679 		return -EINVAL;
4680 	}
4681 
4682 	if (!env_type_is_resolve_sink(env, next_type) &&
4683 	    !env_type_is_resolved(env, next_type_id))
4684 		return env_stack_push(env, next_type, next_type_id);
4685 
4686 	component_idx = btf_type_decl_tag(t)->component_idx;
4687 	if (component_idx != -1) {
4688 		if (btf_type_is_var(next_type) || btf_type_is_typedef(next_type)) {
4689 			btf_verifier_log_type(env, v->t, "Invalid component_idx");
4690 			return -EINVAL;
4691 		}
4692 
4693 		if (btf_type_is_struct(next_type)) {
4694 			vlen = btf_type_vlen(next_type);
4695 		} else {
4696 			/* next_type should be a function */
4697 			next_type = btf_type_by_id(btf, next_type->type);
4698 			vlen = btf_type_vlen(next_type);
4699 		}
4700 
4701 		if ((u32)component_idx >= vlen) {
4702 			btf_verifier_log_type(env, v->t, "Invalid component_idx");
4703 			return -EINVAL;
4704 		}
4705 	}
4706 
4707 	env_stack_pop_resolved(env, next_type_id, 0);
4708 
4709 	return 0;
4710 }
4711 
4712 static void btf_decl_tag_log(struct btf_verifier_env *env, const struct btf_type *t)
4713 {
4714 	btf_verifier_log(env, "type=%u component_idx=%d", t->type,
4715 			 btf_type_decl_tag(t)->component_idx);
4716 }
4717 
4718 static const struct btf_kind_operations decl_tag_ops = {
4719 	.check_meta = btf_decl_tag_check_meta,
4720 	.resolve = btf_decl_tag_resolve,
4721 	.check_member = btf_df_check_member,
4722 	.check_kflag_member = btf_df_check_kflag_member,
4723 	.log_details = btf_decl_tag_log,
4724 	.show = btf_df_show,
4725 };
4726 
4727 static int btf_func_proto_check(struct btf_verifier_env *env,
4728 				const struct btf_type *t)
4729 {
4730 	const struct btf_type *ret_type;
4731 	const struct btf_param *args;
4732 	const struct btf *btf;
4733 	u16 nr_args, i;
4734 	int err;
4735 
4736 	btf = env->btf;
4737 	args = (const struct btf_param *)(t + 1);
4738 	nr_args = btf_type_vlen(t);
4739 
4740 	/* Check func return type which could be "void" (t->type == 0) */
4741 	if (t->type) {
4742 		u32 ret_type_id = t->type;
4743 
4744 		ret_type = btf_type_by_id(btf, ret_type_id);
4745 		if (!ret_type) {
4746 			btf_verifier_log_type(env, t, "Invalid return type");
4747 			return -EINVAL;
4748 		}
4749 
4750 		if (btf_type_is_resolve_source_only(ret_type)) {
4751 			btf_verifier_log_type(env, t, "Invalid return type");
4752 			return -EINVAL;
4753 		}
4754 
4755 		if (btf_type_needs_resolve(ret_type) &&
4756 		    !env_type_is_resolved(env, ret_type_id)) {
4757 			err = btf_resolve(env, ret_type, ret_type_id);
4758 			if (err)
4759 				return err;
4760 		}
4761 
4762 		/* Ensure the return type is a type that has a size */
4763 		if (!btf_type_id_size(btf, &ret_type_id, NULL)) {
4764 			btf_verifier_log_type(env, t, "Invalid return type");
4765 			return -EINVAL;
4766 		}
4767 	}
4768 
4769 	if (!nr_args)
4770 		return 0;
4771 
4772 	/* Last func arg type_id could be 0 if it is a vararg */
4773 	if (!args[nr_args - 1].type) {
4774 		if (args[nr_args - 1].name_off) {
4775 			btf_verifier_log_type(env, t, "Invalid arg#%u",
4776 					      nr_args);
4777 			return -EINVAL;
4778 		}
4779 		nr_args--;
4780 	}
4781 
4782 	for (i = 0; i < nr_args; i++) {
4783 		const struct btf_type *arg_type;
4784 		u32 arg_type_id;
4785 
4786 		arg_type_id = args[i].type;
4787 		arg_type = btf_type_by_id(btf, arg_type_id);
4788 		if (!arg_type) {
4789 			btf_verifier_log_type(env, t, "Invalid arg#%u", i + 1);
4790 			return -EINVAL;
4791 		}
4792 
4793 		if (btf_type_is_resolve_source_only(arg_type)) {
4794 			btf_verifier_log_type(env, t, "Invalid arg#%u", i + 1);
4795 			return -EINVAL;
4796 		}
4797 
4798 		if (args[i].name_off &&
4799 		    (!btf_name_offset_valid(btf, args[i].name_off) ||
4800 		     !btf_name_valid_identifier(btf, args[i].name_off))) {
4801 			btf_verifier_log_type(env, t,
4802 					      "Invalid arg#%u", i + 1);
4803 			return -EINVAL;
4804 		}
4805 
4806 		if (btf_type_needs_resolve(arg_type) &&
4807 		    !env_type_is_resolved(env, arg_type_id)) {
4808 			err = btf_resolve(env, arg_type, arg_type_id);
4809 			if (err)
4810 				return err;
4811 		}
4812 
4813 		if (!btf_type_id_size(btf, &arg_type_id, NULL)) {
4814 			btf_verifier_log_type(env, t, "Invalid arg#%u", i + 1);
4815 			return -EINVAL;
4816 		}
4817 	}
4818 
4819 	return 0;
4820 }
4821 
4822 static int btf_func_check(struct btf_verifier_env *env,
4823 			  const struct btf_type *t)
4824 {
4825 	const struct btf_type *proto_type;
4826 	const struct btf_param *args;
4827 	const struct btf *btf;
4828 	u16 nr_args, i;
4829 
4830 	btf = env->btf;
4831 	proto_type = btf_type_by_id(btf, t->type);
4832 
4833 	if (!proto_type || !btf_type_is_func_proto(proto_type)) {
4834 		btf_verifier_log_type(env, t, "Invalid type_id");
4835 		return -EINVAL;
4836 	}
4837 
4838 	args = (const struct btf_param *)(proto_type + 1);
4839 	nr_args = btf_type_vlen(proto_type);
4840 	for (i = 0; i < nr_args; i++) {
4841 		if (!args[i].name_off && args[i].type) {
4842 			btf_verifier_log_type(env, t, "Invalid arg#%u", i + 1);
4843 			return -EINVAL;
4844 		}
4845 	}
4846 
4847 	return 0;
4848 }
4849 
4850 static const struct btf_kind_operations * const kind_ops[NR_BTF_KINDS] = {
4851 	[BTF_KIND_INT] = &int_ops,
4852 	[BTF_KIND_PTR] = &ptr_ops,
4853 	[BTF_KIND_ARRAY] = &array_ops,
4854 	[BTF_KIND_STRUCT] = &struct_ops,
4855 	[BTF_KIND_UNION] = &struct_ops,
4856 	[BTF_KIND_ENUM] = &enum_ops,
4857 	[BTF_KIND_FWD] = &fwd_ops,
4858 	[BTF_KIND_TYPEDEF] = &modifier_ops,
4859 	[BTF_KIND_VOLATILE] = &modifier_ops,
4860 	[BTF_KIND_CONST] = &modifier_ops,
4861 	[BTF_KIND_RESTRICT] = &modifier_ops,
4862 	[BTF_KIND_FUNC] = &func_ops,
4863 	[BTF_KIND_FUNC_PROTO] = &func_proto_ops,
4864 	[BTF_KIND_VAR] = &var_ops,
4865 	[BTF_KIND_DATASEC] = &datasec_ops,
4866 	[BTF_KIND_FLOAT] = &float_ops,
4867 	[BTF_KIND_DECL_TAG] = &decl_tag_ops,
4868 	[BTF_KIND_TYPE_TAG] = &modifier_ops,
4869 	[BTF_KIND_ENUM64] = &enum64_ops,
4870 };
4871 
4872 static s32 btf_check_meta(struct btf_verifier_env *env,
4873 			  const struct btf_type *t,
4874 			  u32 meta_left)
4875 {
4876 	u32 saved_meta_left = meta_left;
4877 	s32 var_meta_size;
4878 
4879 	if (meta_left < sizeof(*t)) {
4880 		btf_verifier_log(env, "[%u] meta_left:%u meta_needed:%zu",
4881 				 env->log_type_id, meta_left, sizeof(*t));
4882 		return -EINVAL;
4883 	}
4884 	meta_left -= sizeof(*t);
4885 
4886 	if (t->info & ~BTF_INFO_MASK) {
4887 		btf_verifier_log(env, "[%u] Invalid btf_info:%x",
4888 				 env->log_type_id, t->info);
4889 		return -EINVAL;
4890 	}
4891 
4892 	if (BTF_INFO_KIND(t->info) > BTF_KIND_MAX ||
4893 	    BTF_INFO_KIND(t->info) == BTF_KIND_UNKN) {
4894 		btf_verifier_log(env, "[%u] Invalid kind:%u",
4895 				 env->log_type_id, BTF_INFO_KIND(t->info));
4896 		return -EINVAL;
4897 	}
4898 
4899 	if (!btf_name_offset_valid(env->btf, t->name_off)) {
4900 		btf_verifier_log(env, "[%u] Invalid name_offset:%u",
4901 				 env->log_type_id, t->name_off);
4902 		return -EINVAL;
4903 	}
4904 
4905 	var_meta_size = btf_type_ops(t)->check_meta(env, t, meta_left);
4906 	if (var_meta_size < 0)
4907 		return var_meta_size;
4908 
4909 	meta_left -= var_meta_size;
4910 
4911 	return saved_meta_left - meta_left;
4912 }
4913 
4914 static int btf_check_all_metas(struct btf_verifier_env *env)
4915 {
4916 	struct btf *btf = env->btf;
4917 	struct btf_header *hdr;
4918 	void *cur, *end;
4919 
4920 	hdr = &btf->hdr;
4921 	cur = btf->nohdr_data + hdr->type_off;
4922 	end = cur + hdr->type_len;
4923 
4924 	env->log_type_id = btf->base_btf ? btf->start_id : 1;
4925 	while (cur < end) {
4926 		struct btf_type *t = cur;
4927 		s32 meta_size;
4928 
4929 		meta_size = btf_check_meta(env, t, end - cur);
4930 		if (meta_size < 0)
4931 			return meta_size;
4932 
4933 		btf_add_type(env, t);
4934 		cur += meta_size;
4935 		env->log_type_id++;
4936 	}
4937 
4938 	return 0;
4939 }
4940 
4941 static bool btf_resolve_valid(struct btf_verifier_env *env,
4942 			      const struct btf_type *t,
4943 			      u32 type_id)
4944 {
4945 	struct btf *btf = env->btf;
4946 
4947 	if (!env_type_is_resolved(env, type_id))
4948 		return false;
4949 
4950 	if (btf_type_is_struct(t) || btf_type_is_datasec(t))
4951 		return !btf_resolved_type_id(btf, type_id) &&
4952 		       !btf_resolved_type_size(btf, type_id);
4953 
4954 	if (btf_type_is_decl_tag(t) || btf_type_is_func(t))
4955 		return btf_resolved_type_id(btf, type_id) &&
4956 		       !btf_resolved_type_size(btf, type_id);
4957 
4958 	if (btf_type_is_modifier(t) || btf_type_is_ptr(t) ||
4959 	    btf_type_is_var(t)) {
4960 		t = btf_type_id_resolve(btf, &type_id);
4961 		return t &&
4962 		       !btf_type_is_modifier(t) &&
4963 		       !btf_type_is_var(t) &&
4964 		       !btf_type_is_datasec(t);
4965 	}
4966 
4967 	if (btf_type_is_array(t)) {
4968 		const struct btf_array *array = btf_type_array(t);
4969 		const struct btf_type *elem_type;
4970 		u32 elem_type_id = array->type;
4971 		u32 elem_size;
4972 
4973 		elem_type = btf_type_id_size(btf, &elem_type_id, &elem_size);
4974 		return elem_type && !btf_type_is_modifier(elem_type) &&
4975 			(array->nelems * elem_size ==
4976 			 btf_resolved_type_size(btf, type_id));
4977 	}
4978 
4979 	return false;
4980 }
4981 
4982 static int btf_resolve(struct btf_verifier_env *env,
4983 		       const struct btf_type *t, u32 type_id)
4984 {
4985 	u32 save_log_type_id = env->log_type_id;
4986 	const struct resolve_vertex *v;
4987 	int err = 0;
4988 
4989 	env->resolve_mode = RESOLVE_TBD;
4990 	env_stack_push(env, t, type_id);
4991 	while (!err && (v = env_stack_peak(env))) {
4992 		env->log_type_id = v->type_id;
4993 		err = btf_type_ops(v->t)->resolve(env, v);
4994 	}
4995 
4996 	env->log_type_id = type_id;
4997 	if (err == -E2BIG) {
4998 		btf_verifier_log_type(env, t,
4999 				      "Exceeded max resolving depth:%u",
5000 				      MAX_RESOLVE_DEPTH);
5001 	} else if (err == -EEXIST) {
5002 		btf_verifier_log_type(env, t, "Loop detected");
5003 	}
5004 
5005 	/* Final sanity check */
5006 	if (!err && !btf_resolve_valid(env, t, type_id)) {
5007 		btf_verifier_log_type(env, t, "Invalid resolve state");
5008 		err = -EINVAL;
5009 	}
5010 
5011 	env->log_type_id = save_log_type_id;
5012 	return err;
5013 }
5014 
5015 static int btf_check_all_types(struct btf_verifier_env *env)
5016 {
5017 	struct btf *btf = env->btf;
5018 	const struct btf_type *t;
5019 	u32 type_id, i;
5020 	int err;
5021 
5022 	err = env_resolve_init(env);
5023 	if (err)
5024 		return err;
5025 
5026 	env->phase++;
5027 	for (i = btf->base_btf ? 0 : 1; i < btf->nr_types; i++) {
5028 		type_id = btf->start_id + i;
5029 		t = btf_type_by_id(btf, type_id);
5030 
5031 		env->log_type_id = type_id;
5032 		if (btf_type_needs_resolve(t) &&
5033 		    !env_type_is_resolved(env, type_id)) {
5034 			err = btf_resolve(env, t, type_id);
5035 			if (err)
5036 				return err;
5037 		}
5038 
5039 		if (btf_type_is_func_proto(t)) {
5040 			err = btf_func_proto_check(env, t);
5041 			if (err)
5042 				return err;
5043 		}
5044 	}
5045 
5046 	return 0;
5047 }
5048 
5049 static int btf_parse_type_sec(struct btf_verifier_env *env)
5050 {
5051 	const struct btf_header *hdr = &env->btf->hdr;
5052 	int err;
5053 
5054 	/* Type section must align to 4 bytes */
5055 	if (hdr->type_off & (sizeof(u32) - 1)) {
5056 		btf_verifier_log(env, "Unaligned type_off");
5057 		return -EINVAL;
5058 	}
5059 
5060 	if (!env->btf->base_btf && !hdr->type_len) {
5061 		btf_verifier_log(env, "No type found");
5062 		return -EINVAL;
5063 	}
5064 
5065 	err = btf_check_all_metas(env);
5066 	if (err)
5067 		return err;
5068 
5069 	return btf_check_all_types(env);
5070 }
5071 
5072 static int btf_parse_str_sec(struct btf_verifier_env *env)
5073 {
5074 	const struct btf_header *hdr;
5075 	struct btf *btf = env->btf;
5076 	const char *start, *end;
5077 
5078 	hdr = &btf->hdr;
5079 	start = btf->nohdr_data + hdr->str_off;
5080 	end = start + hdr->str_len;
5081 
5082 	if (end != btf->data + btf->data_size) {
5083 		btf_verifier_log(env, "String section is not at the end");
5084 		return -EINVAL;
5085 	}
5086 
5087 	btf->strings = start;
5088 
5089 	if (btf->base_btf && !hdr->str_len)
5090 		return 0;
5091 	if (!hdr->str_len || hdr->str_len - 1 > BTF_MAX_NAME_OFFSET || end[-1]) {
5092 		btf_verifier_log(env, "Invalid string section");
5093 		return -EINVAL;
5094 	}
5095 	if (!btf->base_btf && start[0]) {
5096 		btf_verifier_log(env, "Invalid string section");
5097 		return -EINVAL;
5098 	}
5099 
5100 	return 0;
5101 }
5102 
5103 static const size_t btf_sec_info_offset[] = {
5104 	offsetof(struct btf_header, type_off),
5105 	offsetof(struct btf_header, str_off),
5106 };
5107 
5108 static int btf_sec_info_cmp(const void *a, const void *b)
5109 {
5110 	const struct btf_sec_info *x = a;
5111 	const struct btf_sec_info *y = b;
5112 
5113 	return (int)(x->off - y->off) ? : (int)(x->len - y->len);
5114 }
5115 
5116 static int btf_check_sec_info(struct btf_verifier_env *env,
5117 			      u32 btf_data_size)
5118 {
5119 	struct btf_sec_info secs[ARRAY_SIZE(btf_sec_info_offset)];
5120 	u32 total, expected_total, i;
5121 	const struct btf_header *hdr;
5122 	const struct btf *btf;
5123 
5124 	btf = env->btf;
5125 	hdr = &btf->hdr;
5126 
5127 	/* Populate the secs from hdr */
5128 	for (i = 0; i < ARRAY_SIZE(btf_sec_info_offset); i++)
5129 		secs[i] = *(struct btf_sec_info *)((void *)hdr +
5130 						   btf_sec_info_offset[i]);
5131 
5132 	sort(secs, ARRAY_SIZE(btf_sec_info_offset),
5133 	     sizeof(struct btf_sec_info), btf_sec_info_cmp, NULL);
5134 
5135 	/* Check for gaps and overlap among sections */
5136 	total = 0;
5137 	expected_total = btf_data_size - hdr->hdr_len;
5138 	for (i = 0; i < ARRAY_SIZE(btf_sec_info_offset); i++) {
5139 		if (expected_total < secs[i].off) {
5140 			btf_verifier_log(env, "Invalid section offset");
5141 			return -EINVAL;
5142 		}
5143 		if (total < secs[i].off) {
5144 			/* gap */
5145 			btf_verifier_log(env, "Unsupported section found");
5146 			return -EINVAL;
5147 		}
5148 		if (total > secs[i].off) {
5149 			btf_verifier_log(env, "Section overlap found");
5150 			return -EINVAL;
5151 		}
5152 		if (expected_total - total < secs[i].len) {
5153 			btf_verifier_log(env,
5154 					 "Total section length too long");
5155 			return -EINVAL;
5156 		}
5157 		total += secs[i].len;
5158 	}
5159 
5160 	/* There is data other than hdr and known sections */
5161 	if (expected_total != total) {
5162 		btf_verifier_log(env, "Unsupported section found");
5163 		return -EINVAL;
5164 	}
5165 
5166 	return 0;
5167 }
5168 
5169 static int btf_parse_hdr(struct btf_verifier_env *env)
5170 {
5171 	u32 hdr_len, hdr_copy, btf_data_size;
5172 	const struct btf_header *hdr;
5173 	struct btf *btf;
5174 
5175 	btf = env->btf;
5176 	btf_data_size = btf->data_size;
5177 
5178 	if (btf_data_size < offsetofend(struct btf_header, hdr_len)) {
5179 		btf_verifier_log(env, "hdr_len not found");
5180 		return -EINVAL;
5181 	}
5182 
5183 	hdr = btf->data;
5184 	hdr_len = hdr->hdr_len;
5185 	if (btf_data_size < hdr_len) {
5186 		btf_verifier_log(env, "btf_header not found");
5187 		return -EINVAL;
5188 	}
5189 
5190 	/* Ensure the unsupported header fields are zero */
5191 	if (hdr_len > sizeof(btf->hdr)) {
5192 		u8 *expected_zero = btf->data + sizeof(btf->hdr);
5193 		u8 *end = btf->data + hdr_len;
5194 
5195 		for (; expected_zero < end; expected_zero++) {
5196 			if (*expected_zero) {
5197 				btf_verifier_log(env, "Unsupported btf_header");
5198 				return -E2BIG;
5199 			}
5200 		}
5201 	}
5202 
5203 	hdr_copy = min_t(u32, hdr_len, sizeof(btf->hdr));
5204 	memcpy(&btf->hdr, btf->data, hdr_copy);
5205 
5206 	hdr = &btf->hdr;
5207 
5208 	btf_verifier_log_hdr(env, btf_data_size);
5209 
5210 	if (hdr->magic != BTF_MAGIC) {
5211 		btf_verifier_log(env, "Invalid magic");
5212 		return -EINVAL;
5213 	}
5214 
5215 	if (hdr->version != BTF_VERSION) {
5216 		btf_verifier_log(env, "Unsupported version");
5217 		return -ENOTSUPP;
5218 	}
5219 
5220 	if (hdr->flags) {
5221 		btf_verifier_log(env, "Unsupported flags");
5222 		return -ENOTSUPP;
5223 	}
5224 
5225 	if (!btf->base_btf && btf_data_size == hdr->hdr_len) {
5226 		btf_verifier_log(env, "No data");
5227 		return -EINVAL;
5228 	}
5229 
5230 	return btf_check_sec_info(env, btf_data_size);
5231 }
5232 
5233 static const char *alloc_obj_fields[] = {
5234 	"bpf_spin_lock",
5235 	"bpf_list_head",
5236 	"bpf_list_node",
5237 };
5238 
5239 static struct btf_struct_metas *
5240 btf_parse_struct_metas(struct bpf_verifier_log *log, struct btf *btf)
5241 {
5242 	union {
5243 		struct btf_id_set set;
5244 		struct {
5245 			u32 _cnt;
5246 			u32 _ids[ARRAY_SIZE(alloc_obj_fields)];
5247 		} _arr;
5248 	} aof;
5249 	struct btf_struct_metas *tab = NULL;
5250 	int i, n, id, ret;
5251 
5252 	BUILD_BUG_ON(offsetof(struct btf_id_set, cnt) != 0);
5253 	BUILD_BUG_ON(sizeof(struct btf_id_set) != sizeof(u32));
5254 
5255 	memset(&aof, 0, sizeof(aof));
5256 	for (i = 0; i < ARRAY_SIZE(alloc_obj_fields); i++) {
5257 		/* Try to find whether this special type exists in user BTF, and
5258 		 * if so remember its ID so we can easily find it among members
5259 		 * of structs that we iterate in the next loop.
5260 		 */
5261 		id = btf_find_by_name_kind(btf, alloc_obj_fields[i], BTF_KIND_STRUCT);
5262 		if (id < 0)
5263 			continue;
5264 		aof.set.ids[aof.set.cnt++] = id;
5265 	}
5266 
5267 	if (!aof.set.cnt)
5268 		return NULL;
5269 	sort(&aof.set.ids, aof.set.cnt, sizeof(aof.set.ids[0]), btf_id_cmp_func, NULL);
5270 
5271 	n = btf_nr_types(btf);
5272 	for (i = 1; i < n; i++) {
5273 		struct btf_struct_metas *new_tab;
5274 		const struct btf_member *member;
5275 		struct btf_field_offs *foffs;
5276 		struct btf_struct_meta *type;
5277 		struct btf_record *record;
5278 		const struct btf_type *t;
5279 		int j, tab_cnt;
5280 
5281 		t = btf_type_by_id(btf, i);
5282 		if (!t) {
5283 			ret = -EINVAL;
5284 			goto free;
5285 		}
5286 		if (!__btf_type_is_struct(t))
5287 			continue;
5288 
5289 		cond_resched();
5290 
5291 		for_each_member(j, t, member) {
5292 			if (btf_id_set_contains(&aof.set, member->type))
5293 				goto parse;
5294 		}
5295 		continue;
5296 	parse:
5297 		tab_cnt = tab ? tab->cnt : 0;
5298 		new_tab = krealloc(tab, offsetof(struct btf_struct_metas, types[tab_cnt + 1]),
5299 				   GFP_KERNEL | __GFP_NOWARN);
5300 		if (!new_tab) {
5301 			ret = -ENOMEM;
5302 			goto free;
5303 		}
5304 		if (!tab)
5305 			new_tab->cnt = 0;
5306 		tab = new_tab;
5307 
5308 		type = &tab->types[tab->cnt];
5309 		type->btf_id = i;
5310 		record = btf_parse_fields(btf, t, BPF_SPIN_LOCK | BPF_LIST_HEAD | BPF_LIST_NODE, t->size);
5311 		/* The record cannot be unset, treat it as an error if so */
5312 		if (IS_ERR_OR_NULL(record)) {
5313 			ret = PTR_ERR_OR_ZERO(record) ?: -EFAULT;
5314 			goto free;
5315 		}
5316 		foffs = btf_parse_field_offs(record);
5317 		/* We need the field_offs to be valid for a valid record,
5318 		 * either both should be set or both should be unset.
5319 		 */
5320 		if (IS_ERR_OR_NULL(foffs)) {
5321 			btf_record_free(record);
5322 			ret = -EFAULT;
5323 			goto free;
5324 		}
5325 		type->record = record;
5326 		type->field_offs = foffs;
5327 		tab->cnt++;
5328 	}
5329 	return tab;
5330 free:
5331 	btf_struct_metas_free(tab);
5332 	return ERR_PTR(ret);
5333 }
5334 
5335 struct btf_struct_meta *btf_find_struct_meta(const struct btf *btf, u32 btf_id)
5336 {
5337 	struct btf_struct_metas *tab;
5338 
5339 	BUILD_BUG_ON(offsetof(struct btf_struct_meta, btf_id) != 0);
5340 	tab = btf->struct_meta_tab;
5341 	if (!tab)
5342 		return NULL;
5343 	return bsearch(&btf_id, tab->types, tab->cnt, sizeof(tab->types[0]), btf_id_cmp_func);
5344 }
5345 
5346 static int btf_check_type_tags(struct btf_verifier_env *env,
5347 			       struct btf *btf, int start_id)
5348 {
5349 	int i, n, good_id = start_id - 1;
5350 	bool in_tags;
5351 
5352 	n = btf_nr_types(btf);
5353 	for (i = start_id; i < n; i++) {
5354 		const struct btf_type *t;
5355 		int chain_limit = 32;
5356 		u32 cur_id = i;
5357 
5358 		t = btf_type_by_id(btf, i);
5359 		if (!t)
5360 			return -EINVAL;
5361 		if (!btf_type_is_modifier(t))
5362 			continue;
5363 
5364 		cond_resched();
5365 
5366 		in_tags = btf_type_is_type_tag(t);
5367 		while (btf_type_is_modifier(t)) {
5368 			if (!chain_limit--) {
5369 				btf_verifier_log(env, "Max chain length or cycle detected");
5370 				return -ELOOP;
5371 			}
5372 			if (btf_type_is_type_tag(t)) {
5373 				if (!in_tags) {
5374 					btf_verifier_log(env, "Type tags don't precede modifiers");
5375 					return -EINVAL;
5376 				}
5377 			} else if (in_tags) {
5378 				in_tags = false;
5379 			}
5380 			if (cur_id <= good_id)
5381 				break;
5382 			/* Move to next type */
5383 			cur_id = t->type;
5384 			t = btf_type_by_id(btf, cur_id);
5385 			if (!t)
5386 				return -EINVAL;
5387 		}
5388 		good_id = i;
5389 	}
5390 	return 0;
5391 }
5392 
5393 static struct btf *btf_parse(bpfptr_t btf_data, u32 btf_data_size,
5394 			     u32 log_level, char __user *log_ubuf, u32 log_size)
5395 {
5396 	struct btf_struct_metas *struct_meta_tab;
5397 	struct btf_verifier_env *env = NULL;
5398 	struct bpf_verifier_log *log;
5399 	struct btf *btf = NULL;
5400 	u8 *data;
5401 	int err;
5402 
5403 	if (btf_data_size > BTF_MAX_SIZE)
5404 		return ERR_PTR(-E2BIG);
5405 
5406 	env = kzalloc(sizeof(*env), GFP_KERNEL | __GFP_NOWARN);
5407 	if (!env)
5408 		return ERR_PTR(-ENOMEM);
5409 
5410 	log = &env->log;
5411 	if (log_level || log_ubuf || log_size) {
5412 		/* user requested verbose verifier output
5413 		 * and supplied buffer to store the verification trace
5414 		 */
5415 		log->level = log_level;
5416 		log->ubuf = log_ubuf;
5417 		log->len_total = log_size;
5418 
5419 		/* log attributes have to be sane */
5420 		if (!bpf_verifier_log_attr_valid(log)) {
5421 			err = -EINVAL;
5422 			goto errout;
5423 		}
5424 	}
5425 
5426 	btf = kzalloc(sizeof(*btf), GFP_KERNEL | __GFP_NOWARN);
5427 	if (!btf) {
5428 		err = -ENOMEM;
5429 		goto errout;
5430 	}
5431 	env->btf = btf;
5432 
5433 	data = kvmalloc(btf_data_size, GFP_KERNEL | __GFP_NOWARN);
5434 	if (!data) {
5435 		err = -ENOMEM;
5436 		goto errout;
5437 	}
5438 
5439 	btf->data = data;
5440 	btf->data_size = btf_data_size;
5441 
5442 	if (copy_from_bpfptr(data, btf_data, btf_data_size)) {
5443 		err = -EFAULT;
5444 		goto errout;
5445 	}
5446 
5447 	err = btf_parse_hdr(env);
5448 	if (err)
5449 		goto errout;
5450 
5451 	btf->nohdr_data = btf->data + btf->hdr.hdr_len;
5452 
5453 	err = btf_parse_str_sec(env);
5454 	if (err)
5455 		goto errout;
5456 
5457 	err = btf_parse_type_sec(env);
5458 	if (err)
5459 		goto errout;
5460 
5461 	err = btf_check_type_tags(env, btf, 1);
5462 	if (err)
5463 		goto errout;
5464 
5465 	struct_meta_tab = btf_parse_struct_metas(log, btf);
5466 	if (IS_ERR(struct_meta_tab)) {
5467 		err = PTR_ERR(struct_meta_tab);
5468 		goto errout;
5469 	}
5470 	btf->struct_meta_tab = struct_meta_tab;
5471 
5472 	if (struct_meta_tab) {
5473 		int i;
5474 
5475 		for (i = 0; i < struct_meta_tab->cnt; i++) {
5476 			err = btf_check_and_fixup_fields(btf, struct_meta_tab->types[i].record);
5477 			if (err < 0)
5478 				goto errout_meta;
5479 		}
5480 	}
5481 
5482 	if (log->level && bpf_verifier_log_full(log)) {
5483 		err = -ENOSPC;
5484 		goto errout_meta;
5485 	}
5486 
5487 	btf_verifier_env_free(env);
5488 	refcount_set(&btf->refcnt, 1);
5489 	return btf;
5490 
5491 errout_meta:
5492 	btf_free_struct_meta_tab(btf);
5493 errout:
5494 	btf_verifier_env_free(env);
5495 	if (btf)
5496 		btf_free(btf);
5497 	return ERR_PTR(err);
5498 }
5499 
5500 extern char __weak __start_BTF[];
5501 extern char __weak __stop_BTF[];
5502 extern struct btf *btf_vmlinux;
5503 
5504 #define BPF_MAP_TYPE(_id, _ops)
5505 #define BPF_LINK_TYPE(_id, _name)
5506 static union {
5507 	struct bpf_ctx_convert {
5508 #define BPF_PROG_TYPE(_id, _name, prog_ctx_type, kern_ctx_type) \
5509 	prog_ctx_type _id##_prog; \
5510 	kern_ctx_type _id##_kern;
5511 #include <linux/bpf_types.h>
5512 #undef BPF_PROG_TYPE
5513 	} *__t;
5514 	/* 't' is written once under lock. Read many times. */
5515 	const struct btf_type *t;
5516 } bpf_ctx_convert;
5517 enum {
5518 #define BPF_PROG_TYPE(_id, _name, prog_ctx_type, kern_ctx_type) \
5519 	__ctx_convert##_id,
5520 #include <linux/bpf_types.h>
5521 #undef BPF_PROG_TYPE
5522 	__ctx_convert_unused, /* to avoid empty enum in extreme .config */
5523 };
5524 static u8 bpf_ctx_convert_map[] = {
5525 #define BPF_PROG_TYPE(_id, _name, prog_ctx_type, kern_ctx_type) \
5526 	[_id] = __ctx_convert##_id,
5527 #include <linux/bpf_types.h>
5528 #undef BPF_PROG_TYPE
5529 	0, /* avoid empty array */
5530 };
5531 #undef BPF_MAP_TYPE
5532 #undef BPF_LINK_TYPE
5533 
5534 const struct btf_member *
5535 btf_get_prog_ctx_type(struct bpf_verifier_log *log, const struct btf *btf,
5536 		      const struct btf_type *t, enum bpf_prog_type prog_type,
5537 		      int arg)
5538 {
5539 	const struct btf_type *conv_struct;
5540 	const struct btf_type *ctx_struct;
5541 	const struct btf_member *ctx_type;
5542 	const char *tname, *ctx_tname;
5543 
5544 	conv_struct = bpf_ctx_convert.t;
5545 	if (!conv_struct) {
5546 		bpf_log(log, "btf_vmlinux is malformed\n");
5547 		return NULL;
5548 	}
5549 	t = btf_type_by_id(btf, t->type);
5550 	while (btf_type_is_modifier(t))
5551 		t = btf_type_by_id(btf, t->type);
5552 	if (!btf_type_is_struct(t)) {
5553 		/* Only pointer to struct is supported for now.
5554 		 * That means that BPF_PROG_TYPE_TRACEPOINT with BTF
5555 		 * is not supported yet.
5556 		 * BPF_PROG_TYPE_RAW_TRACEPOINT is fine.
5557 		 */
5558 		return NULL;
5559 	}
5560 	tname = btf_name_by_offset(btf, t->name_off);
5561 	if (!tname) {
5562 		bpf_log(log, "arg#%d struct doesn't have a name\n", arg);
5563 		return NULL;
5564 	}
5565 	/* prog_type is valid bpf program type. No need for bounds check. */
5566 	ctx_type = btf_type_member(conv_struct) + bpf_ctx_convert_map[prog_type] * 2;
5567 	/* ctx_struct is a pointer to prog_ctx_type in vmlinux.
5568 	 * Like 'struct __sk_buff'
5569 	 */
5570 	ctx_struct = btf_type_by_id(btf_vmlinux, ctx_type->type);
5571 	if (!ctx_struct)
5572 		/* should not happen */
5573 		return NULL;
5574 	ctx_tname = btf_name_by_offset(btf_vmlinux, ctx_struct->name_off);
5575 	if (!ctx_tname) {
5576 		/* should not happen */
5577 		bpf_log(log, "Please fix kernel include/linux/bpf_types.h\n");
5578 		return NULL;
5579 	}
5580 	/* only compare that prog's ctx type name is the same as
5581 	 * kernel expects. No need to compare field by field.
5582 	 * It's ok for bpf prog to do:
5583 	 * struct __sk_buff {};
5584 	 * int socket_filter_bpf_prog(struct __sk_buff *skb)
5585 	 * { // no fields of skb are ever used }
5586 	 */
5587 	if (strcmp(ctx_tname, tname))
5588 		return NULL;
5589 	return ctx_type;
5590 }
5591 
5592 static int btf_translate_to_vmlinux(struct bpf_verifier_log *log,
5593 				     struct btf *btf,
5594 				     const struct btf_type *t,
5595 				     enum bpf_prog_type prog_type,
5596 				     int arg)
5597 {
5598 	const struct btf_member *prog_ctx_type, *kern_ctx_type;
5599 
5600 	prog_ctx_type = btf_get_prog_ctx_type(log, btf, t, prog_type, arg);
5601 	if (!prog_ctx_type)
5602 		return -ENOENT;
5603 	kern_ctx_type = prog_ctx_type + 1;
5604 	return kern_ctx_type->type;
5605 }
5606 
5607 int get_kern_ctx_btf_id(struct bpf_verifier_log *log, enum bpf_prog_type prog_type)
5608 {
5609 	const struct btf_member *kctx_member;
5610 	const struct btf_type *conv_struct;
5611 	const struct btf_type *kctx_type;
5612 	u32 kctx_type_id;
5613 
5614 	conv_struct = bpf_ctx_convert.t;
5615 	/* get member for kernel ctx type */
5616 	kctx_member = btf_type_member(conv_struct) + bpf_ctx_convert_map[prog_type] * 2 + 1;
5617 	kctx_type_id = kctx_member->type;
5618 	kctx_type = btf_type_by_id(btf_vmlinux, kctx_type_id);
5619 	if (!btf_type_is_struct(kctx_type)) {
5620 		bpf_log(log, "kern ctx type id %u is not a struct\n", kctx_type_id);
5621 		return -EINVAL;
5622 	}
5623 
5624 	return kctx_type_id;
5625 }
5626 
5627 BTF_ID_LIST(bpf_ctx_convert_btf_id)
5628 BTF_ID(struct, bpf_ctx_convert)
5629 
5630 struct btf *btf_parse_vmlinux(void)
5631 {
5632 	struct btf_verifier_env *env = NULL;
5633 	struct bpf_verifier_log *log;
5634 	struct btf *btf = NULL;
5635 	int err;
5636 
5637 	env = kzalloc(sizeof(*env), GFP_KERNEL | __GFP_NOWARN);
5638 	if (!env)
5639 		return ERR_PTR(-ENOMEM);
5640 
5641 	log = &env->log;
5642 	log->level = BPF_LOG_KERNEL;
5643 
5644 	btf = kzalloc(sizeof(*btf), GFP_KERNEL | __GFP_NOWARN);
5645 	if (!btf) {
5646 		err = -ENOMEM;
5647 		goto errout;
5648 	}
5649 	env->btf = btf;
5650 
5651 	btf->data = __start_BTF;
5652 	btf->data_size = __stop_BTF - __start_BTF;
5653 	btf->kernel_btf = true;
5654 	snprintf(btf->name, sizeof(btf->name), "vmlinux");
5655 
5656 	err = btf_parse_hdr(env);
5657 	if (err)
5658 		goto errout;
5659 
5660 	btf->nohdr_data = btf->data + btf->hdr.hdr_len;
5661 
5662 	err = btf_parse_str_sec(env);
5663 	if (err)
5664 		goto errout;
5665 
5666 	err = btf_check_all_metas(env);
5667 	if (err)
5668 		goto errout;
5669 
5670 	err = btf_check_type_tags(env, btf, 1);
5671 	if (err)
5672 		goto errout;
5673 
5674 	/* btf_parse_vmlinux() runs under bpf_verifier_lock */
5675 	bpf_ctx_convert.t = btf_type_by_id(btf, bpf_ctx_convert_btf_id[0]);
5676 
5677 	bpf_struct_ops_init(btf, log);
5678 
5679 	refcount_set(&btf->refcnt, 1);
5680 
5681 	err = btf_alloc_id(btf);
5682 	if (err)
5683 		goto errout;
5684 
5685 	btf_verifier_env_free(env);
5686 	return btf;
5687 
5688 errout:
5689 	btf_verifier_env_free(env);
5690 	if (btf) {
5691 		kvfree(btf->types);
5692 		kfree(btf);
5693 	}
5694 	return ERR_PTR(err);
5695 }
5696 
5697 #ifdef CONFIG_DEBUG_INFO_BTF_MODULES
5698 
5699 static struct btf *btf_parse_module(const char *module_name, const void *data, unsigned int data_size)
5700 {
5701 	struct btf_verifier_env *env = NULL;
5702 	struct bpf_verifier_log *log;
5703 	struct btf *btf = NULL, *base_btf;
5704 	int err;
5705 
5706 	base_btf = bpf_get_btf_vmlinux();
5707 	if (IS_ERR(base_btf))
5708 		return base_btf;
5709 	if (!base_btf)
5710 		return ERR_PTR(-EINVAL);
5711 
5712 	env = kzalloc(sizeof(*env), GFP_KERNEL | __GFP_NOWARN);
5713 	if (!env)
5714 		return ERR_PTR(-ENOMEM);
5715 
5716 	log = &env->log;
5717 	log->level = BPF_LOG_KERNEL;
5718 
5719 	btf = kzalloc(sizeof(*btf), GFP_KERNEL | __GFP_NOWARN);
5720 	if (!btf) {
5721 		err = -ENOMEM;
5722 		goto errout;
5723 	}
5724 	env->btf = btf;
5725 
5726 	btf->base_btf = base_btf;
5727 	btf->start_id = base_btf->nr_types;
5728 	btf->start_str_off = base_btf->hdr.str_len;
5729 	btf->kernel_btf = true;
5730 	snprintf(btf->name, sizeof(btf->name), "%s", module_name);
5731 
5732 	btf->data = kvmalloc(data_size, GFP_KERNEL | __GFP_NOWARN);
5733 	if (!btf->data) {
5734 		err = -ENOMEM;
5735 		goto errout;
5736 	}
5737 	memcpy(btf->data, data, data_size);
5738 	btf->data_size = data_size;
5739 
5740 	err = btf_parse_hdr(env);
5741 	if (err)
5742 		goto errout;
5743 
5744 	btf->nohdr_data = btf->data + btf->hdr.hdr_len;
5745 
5746 	err = btf_parse_str_sec(env);
5747 	if (err)
5748 		goto errout;
5749 
5750 	err = btf_check_all_metas(env);
5751 	if (err)
5752 		goto errout;
5753 
5754 	err = btf_check_type_tags(env, btf, btf_nr_types(base_btf));
5755 	if (err)
5756 		goto errout;
5757 
5758 	btf_verifier_env_free(env);
5759 	refcount_set(&btf->refcnt, 1);
5760 	return btf;
5761 
5762 errout:
5763 	btf_verifier_env_free(env);
5764 	if (btf) {
5765 		kvfree(btf->data);
5766 		kvfree(btf->types);
5767 		kfree(btf);
5768 	}
5769 	return ERR_PTR(err);
5770 }
5771 
5772 #endif /* CONFIG_DEBUG_INFO_BTF_MODULES */
5773 
5774 struct btf *bpf_prog_get_target_btf(const struct bpf_prog *prog)
5775 {
5776 	struct bpf_prog *tgt_prog = prog->aux->dst_prog;
5777 
5778 	if (tgt_prog)
5779 		return tgt_prog->aux->btf;
5780 	else
5781 		return prog->aux->attach_btf;
5782 }
5783 
5784 static bool is_int_ptr(struct btf *btf, const struct btf_type *t)
5785 {
5786 	/* t comes in already as a pointer */
5787 	t = btf_type_by_id(btf, t->type);
5788 
5789 	/* allow const */
5790 	if (BTF_INFO_KIND(t->info) == BTF_KIND_CONST)
5791 		t = btf_type_by_id(btf, t->type);
5792 
5793 	return btf_type_is_int(t);
5794 }
5795 
5796 static u32 get_ctx_arg_idx(struct btf *btf, const struct btf_type *func_proto,
5797 			   int off)
5798 {
5799 	const struct btf_param *args;
5800 	const struct btf_type *t;
5801 	u32 offset = 0, nr_args;
5802 	int i;
5803 
5804 	if (!func_proto)
5805 		return off / 8;
5806 
5807 	nr_args = btf_type_vlen(func_proto);
5808 	args = (const struct btf_param *)(func_proto + 1);
5809 	for (i = 0; i < nr_args; i++) {
5810 		t = btf_type_skip_modifiers(btf, args[i].type, NULL);
5811 		offset += btf_type_is_ptr(t) ? 8 : roundup(t->size, 8);
5812 		if (off < offset)
5813 			return i;
5814 	}
5815 
5816 	t = btf_type_skip_modifiers(btf, func_proto->type, NULL);
5817 	offset += btf_type_is_ptr(t) ? 8 : roundup(t->size, 8);
5818 	if (off < offset)
5819 		return nr_args;
5820 
5821 	return nr_args + 1;
5822 }
5823 
5824 static bool prog_args_trusted(const struct bpf_prog *prog)
5825 {
5826 	enum bpf_attach_type atype = prog->expected_attach_type;
5827 
5828 	switch (prog->type) {
5829 	case BPF_PROG_TYPE_TRACING:
5830 		return atype == BPF_TRACE_RAW_TP || atype == BPF_TRACE_ITER;
5831 	case BPF_PROG_TYPE_LSM:
5832 	case BPF_PROG_TYPE_STRUCT_OPS:
5833 		return true;
5834 	default:
5835 		return false;
5836 	}
5837 }
5838 
5839 bool btf_ctx_access(int off, int size, enum bpf_access_type type,
5840 		    const struct bpf_prog *prog,
5841 		    struct bpf_insn_access_aux *info)
5842 {
5843 	const struct btf_type *t = prog->aux->attach_func_proto;
5844 	struct bpf_prog *tgt_prog = prog->aux->dst_prog;
5845 	struct btf *btf = bpf_prog_get_target_btf(prog);
5846 	const char *tname = prog->aux->attach_func_name;
5847 	struct bpf_verifier_log *log = info->log;
5848 	const struct btf_param *args;
5849 	const char *tag_value;
5850 	u32 nr_args, arg;
5851 	int i, ret;
5852 
5853 	if (off % 8) {
5854 		bpf_log(log, "func '%s' offset %d is not multiple of 8\n",
5855 			tname, off);
5856 		return false;
5857 	}
5858 	arg = get_ctx_arg_idx(btf, t, off);
5859 	args = (const struct btf_param *)(t + 1);
5860 	/* if (t == NULL) Fall back to default BPF prog with
5861 	 * MAX_BPF_FUNC_REG_ARGS u64 arguments.
5862 	 */
5863 	nr_args = t ? btf_type_vlen(t) : MAX_BPF_FUNC_REG_ARGS;
5864 	if (prog->aux->attach_btf_trace) {
5865 		/* skip first 'void *__data' argument in btf_trace_##name typedef */
5866 		args++;
5867 		nr_args--;
5868 	}
5869 
5870 	if (arg > nr_args) {
5871 		bpf_log(log, "func '%s' doesn't have %d-th argument\n",
5872 			tname, arg + 1);
5873 		return false;
5874 	}
5875 
5876 	if (arg == nr_args) {
5877 		switch (prog->expected_attach_type) {
5878 		case BPF_LSM_CGROUP:
5879 		case BPF_LSM_MAC:
5880 		case BPF_TRACE_FEXIT:
5881 			/* When LSM programs are attached to void LSM hooks
5882 			 * they use FEXIT trampolines and when attached to
5883 			 * int LSM hooks, they use MODIFY_RETURN trampolines.
5884 			 *
5885 			 * While the LSM programs are BPF_MODIFY_RETURN-like
5886 			 * the check:
5887 			 *
5888 			 *	if (ret_type != 'int')
5889 			 *		return -EINVAL;
5890 			 *
5891 			 * is _not_ done here. This is still safe as LSM hooks
5892 			 * have only void and int return types.
5893 			 */
5894 			if (!t)
5895 				return true;
5896 			t = btf_type_by_id(btf, t->type);
5897 			break;
5898 		case BPF_MODIFY_RETURN:
5899 			/* For now the BPF_MODIFY_RETURN can only be attached to
5900 			 * functions that return an int.
5901 			 */
5902 			if (!t)
5903 				return false;
5904 
5905 			t = btf_type_skip_modifiers(btf, t->type, NULL);
5906 			if (!btf_type_is_small_int(t)) {
5907 				bpf_log(log,
5908 					"ret type %s not allowed for fmod_ret\n",
5909 					btf_type_str(t));
5910 				return false;
5911 			}
5912 			break;
5913 		default:
5914 			bpf_log(log, "func '%s' doesn't have %d-th argument\n",
5915 				tname, arg + 1);
5916 			return false;
5917 		}
5918 	} else {
5919 		if (!t)
5920 			/* Default prog with MAX_BPF_FUNC_REG_ARGS args */
5921 			return true;
5922 		t = btf_type_by_id(btf, args[arg].type);
5923 	}
5924 
5925 	/* skip modifiers */
5926 	while (btf_type_is_modifier(t))
5927 		t = btf_type_by_id(btf, t->type);
5928 	if (btf_type_is_small_int(t) || btf_is_any_enum(t) || __btf_type_is_struct(t))
5929 		/* accessing a scalar */
5930 		return true;
5931 	if (!btf_type_is_ptr(t)) {
5932 		bpf_log(log,
5933 			"func '%s' arg%d '%s' has type %s. Only pointer access is allowed\n",
5934 			tname, arg,
5935 			__btf_name_by_offset(btf, t->name_off),
5936 			btf_type_str(t));
5937 		return false;
5938 	}
5939 
5940 	/* check for PTR_TO_RDONLY_BUF_OR_NULL or PTR_TO_RDWR_BUF_OR_NULL */
5941 	for (i = 0; i < prog->aux->ctx_arg_info_size; i++) {
5942 		const struct bpf_ctx_arg_aux *ctx_arg_info = &prog->aux->ctx_arg_info[i];
5943 		u32 type, flag;
5944 
5945 		type = base_type(ctx_arg_info->reg_type);
5946 		flag = type_flag(ctx_arg_info->reg_type);
5947 		if (ctx_arg_info->offset == off && type == PTR_TO_BUF &&
5948 		    (flag & PTR_MAYBE_NULL)) {
5949 			info->reg_type = ctx_arg_info->reg_type;
5950 			return true;
5951 		}
5952 	}
5953 
5954 	if (t->type == 0)
5955 		/* This is a pointer to void.
5956 		 * It is the same as scalar from the verifier safety pov.
5957 		 * No further pointer walking is allowed.
5958 		 */
5959 		return true;
5960 
5961 	if (is_int_ptr(btf, t))
5962 		return true;
5963 
5964 	/* this is a pointer to another type */
5965 	for (i = 0; i < prog->aux->ctx_arg_info_size; i++) {
5966 		const struct bpf_ctx_arg_aux *ctx_arg_info = &prog->aux->ctx_arg_info[i];
5967 
5968 		if (ctx_arg_info->offset == off) {
5969 			if (!ctx_arg_info->btf_id) {
5970 				bpf_log(log,"invalid btf_id for context argument offset %u\n", off);
5971 				return false;
5972 			}
5973 
5974 			info->reg_type = ctx_arg_info->reg_type;
5975 			info->btf = btf_vmlinux;
5976 			info->btf_id = ctx_arg_info->btf_id;
5977 			return true;
5978 		}
5979 	}
5980 
5981 	info->reg_type = PTR_TO_BTF_ID;
5982 	if (prog_args_trusted(prog))
5983 		info->reg_type |= PTR_TRUSTED;
5984 
5985 	if (tgt_prog) {
5986 		enum bpf_prog_type tgt_type;
5987 
5988 		if (tgt_prog->type == BPF_PROG_TYPE_EXT)
5989 			tgt_type = tgt_prog->aux->saved_dst_prog_type;
5990 		else
5991 			tgt_type = tgt_prog->type;
5992 
5993 		ret = btf_translate_to_vmlinux(log, btf, t, tgt_type, arg);
5994 		if (ret > 0) {
5995 			info->btf = btf_vmlinux;
5996 			info->btf_id = ret;
5997 			return true;
5998 		} else {
5999 			return false;
6000 		}
6001 	}
6002 
6003 	info->btf = btf;
6004 	info->btf_id = t->type;
6005 	t = btf_type_by_id(btf, t->type);
6006 
6007 	if (btf_type_is_type_tag(t)) {
6008 		tag_value = __btf_name_by_offset(btf, t->name_off);
6009 		if (strcmp(tag_value, "user") == 0)
6010 			info->reg_type |= MEM_USER;
6011 		if (strcmp(tag_value, "percpu") == 0)
6012 			info->reg_type |= MEM_PERCPU;
6013 	}
6014 
6015 	/* skip modifiers */
6016 	while (btf_type_is_modifier(t)) {
6017 		info->btf_id = t->type;
6018 		t = btf_type_by_id(btf, t->type);
6019 	}
6020 	if (!btf_type_is_struct(t)) {
6021 		bpf_log(log,
6022 			"func '%s' arg%d type %s is not a struct\n",
6023 			tname, arg, btf_type_str(t));
6024 		return false;
6025 	}
6026 	bpf_log(log, "func '%s' arg%d has btf_id %d type %s '%s'\n",
6027 		tname, arg, info->btf_id, btf_type_str(t),
6028 		__btf_name_by_offset(btf, t->name_off));
6029 	return true;
6030 }
6031 
6032 enum bpf_struct_walk_result {
6033 	/* < 0 error */
6034 	WALK_SCALAR = 0,
6035 	WALK_PTR,
6036 	WALK_STRUCT,
6037 };
6038 
6039 static int btf_struct_walk(struct bpf_verifier_log *log, const struct btf *btf,
6040 			   const struct btf_type *t, int off, int size,
6041 			   u32 *next_btf_id, enum bpf_type_flag *flag)
6042 {
6043 	u32 i, moff, mtrue_end, msize = 0, total_nelems = 0;
6044 	const struct btf_type *mtype, *elem_type = NULL;
6045 	const struct btf_member *member;
6046 	const char *tname, *mname, *tag_value;
6047 	u32 vlen, elem_id, mid;
6048 
6049 again:
6050 	tname = __btf_name_by_offset(btf, t->name_off);
6051 	if (!btf_type_is_struct(t)) {
6052 		bpf_log(log, "Type '%s' is not a struct\n", tname);
6053 		return -EINVAL;
6054 	}
6055 
6056 	vlen = btf_type_vlen(t);
6057 	if (off + size > t->size) {
6058 		/* If the last element is a variable size array, we may
6059 		 * need to relax the rule.
6060 		 */
6061 		struct btf_array *array_elem;
6062 
6063 		if (vlen == 0)
6064 			goto error;
6065 
6066 		member = btf_type_member(t) + vlen - 1;
6067 		mtype = btf_type_skip_modifiers(btf, member->type,
6068 						NULL);
6069 		if (!btf_type_is_array(mtype))
6070 			goto error;
6071 
6072 		array_elem = (struct btf_array *)(mtype + 1);
6073 		if (array_elem->nelems != 0)
6074 			goto error;
6075 
6076 		moff = __btf_member_bit_offset(t, member) / 8;
6077 		if (off < moff)
6078 			goto error;
6079 
6080 		/* Only allow structure for now, can be relaxed for
6081 		 * other types later.
6082 		 */
6083 		t = btf_type_skip_modifiers(btf, array_elem->type,
6084 					    NULL);
6085 		if (!btf_type_is_struct(t))
6086 			goto error;
6087 
6088 		off = (off - moff) % t->size;
6089 		goto again;
6090 
6091 error:
6092 		bpf_log(log, "access beyond struct %s at off %u size %u\n",
6093 			tname, off, size);
6094 		return -EACCES;
6095 	}
6096 
6097 	for_each_member(i, t, member) {
6098 		/* offset of the field in bytes */
6099 		moff = __btf_member_bit_offset(t, member) / 8;
6100 		if (off + size <= moff)
6101 			/* won't find anything, field is already too far */
6102 			break;
6103 
6104 		if (__btf_member_bitfield_size(t, member)) {
6105 			u32 end_bit = __btf_member_bit_offset(t, member) +
6106 				__btf_member_bitfield_size(t, member);
6107 
6108 			/* off <= moff instead of off == moff because clang
6109 			 * does not generate a BTF member for anonymous
6110 			 * bitfield like the ":16" here:
6111 			 * struct {
6112 			 *	int :16;
6113 			 *	int x:8;
6114 			 * };
6115 			 */
6116 			if (off <= moff &&
6117 			    BITS_ROUNDUP_BYTES(end_bit) <= off + size)
6118 				return WALK_SCALAR;
6119 
6120 			/* off may be accessing a following member
6121 			 *
6122 			 * or
6123 			 *
6124 			 * Doing partial access at either end of this
6125 			 * bitfield.  Continue on this case also to
6126 			 * treat it as not accessing this bitfield
6127 			 * and eventually error out as field not
6128 			 * found to keep it simple.
6129 			 * It could be relaxed if there was a legit
6130 			 * partial access case later.
6131 			 */
6132 			continue;
6133 		}
6134 
6135 		/* In case of "off" is pointing to holes of a struct */
6136 		if (off < moff)
6137 			break;
6138 
6139 		/* type of the field */
6140 		mid = member->type;
6141 		mtype = btf_type_by_id(btf, member->type);
6142 		mname = __btf_name_by_offset(btf, member->name_off);
6143 
6144 		mtype = __btf_resolve_size(btf, mtype, &msize,
6145 					   &elem_type, &elem_id, &total_nelems,
6146 					   &mid);
6147 		if (IS_ERR(mtype)) {
6148 			bpf_log(log, "field %s doesn't have size\n", mname);
6149 			return -EFAULT;
6150 		}
6151 
6152 		mtrue_end = moff + msize;
6153 		if (off >= mtrue_end)
6154 			/* no overlap with member, keep iterating */
6155 			continue;
6156 
6157 		if (btf_type_is_array(mtype)) {
6158 			u32 elem_idx;
6159 
6160 			/* __btf_resolve_size() above helps to
6161 			 * linearize a multi-dimensional array.
6162 			 *
6163 			 * The logic here is treating an array
6164 			 * in a struct as the following way:
6165 			 *
6166 			 * struct outer {
6167 			 *	struct inner array[2][2];
6168 			 * };
6169 			 *
6170 			 * looks like:
6171 			 *
6172 			 * struct outer {
6173 			 *	struct inner array_elem0;
6174 			 *	struct inner array_elem1;
6175 			 *	struct inner array_elem2;
6176 			 *	struct inner array_elem3;
6177 			 * };
6178 			 *
6179 			 * When accessing outer->array[1][0], it moves
6180 			 * moff to "array_elem2", set mtype to
6181 			 * "struct inner", and msize also becomes
6182 			 * sizeof(struct inner).  Then most of the
6183 			 * remaining logic will fall through without
6184 			 * caring the current member is an array or
6185 			 * not.
6186 			 *
6187 			 * Unlike mtype/msize/moff, mtrue_end does not
6188 			 * change.  The naming difference ("_true") tells
6189 			 * that it is not always corresponding to
6190 			 * the current mtype/msize/moff.
6191 			 * It is the true end of the current
6192 			 * member (i.e. array in this case).  That
6193 			 * will allow an int array to be accessed like
6194 			 * a scratch space,
6195 			 * i.e. allow access beyond the size of
6196 			 *      the array's element as long as it is
6197 			 *      within the mtrue_end boundary.
6198 			 */
6199 
6200 			/* skip empty array */
6201 			if (moff == mtrue_end)
6202 				continue;
6203 
6204 			msize /= total_nelems;
6205 			elem_idx = (off - moff) / msize;
6206 			moff += elem_idx * msize;
6207 			mtype = elem_type;
6208 			mid = elem_id;
6209 		}
6210 
6211 		/* the 'off' we're looking for is either equal to start
6212 		 * of this field or inside of this struct
6213 		 */
6214 		if (btf_type_is_struct(mtype)) {
6215 			/* our field must be inside that union or struct */
6216 			t = mtype;
6217 
6218 			/* return if the offset matches the member offset */
6219 			if (off == moff) {
6220 				*next_btf_id = mid;
6221 				return WALK_STRUCT;
6222 			}
6223 
6224 			/* adjust offset we're looking for */
6225 			off -= moff;
6226 			goto again;
6227 		}
6228 
6229 		if (btf_type_is_ptr(mtype)) {
6230 			const struct btf_type *stype, *t;
6231 			enum bpf_type_flag tmp_flag = 0;
6232 			u32 id;
6233 
6234 			if (msize != size || off != moff) {
6235 				bpf_log(log,
6236 					"cannot access ptr member %s with moff %u in struct %s with off %u size %u\n",
6237 					mname, moff, tname, off, size);
6238 				return -EACCES;
6239 			}
6240 
6241 			/* check type tag */
6242 			t = btf_type_by_id(btf, mtype->type);
6243 			if (btf_type_is_type_tag(t)) {
6244 				tag_value = __btf_name_by_offset(btf, t->name_off);
6245 				/* check __user tag */
6246 				if (strcmp(tag_value, "user") == 0)
6247 					tmp_flag = MEM_USER;
6248 				/* check __percpu tag */
6249 				if (strcmp(tag_value, "percpu") == 0)
6250 					tmp_flag = MEM_PERCPU;
6251 				/* check __rcu tag */
6252 				if (strcmp(tag_value, "rcu") == 0)
6253 					tmp_flag = MEM_RCU;
6254 			}
6255 
6256 			stype = btf_type_skip_modifiers(btf, mtype->type, &id);
6257 			if (btf_type_is_struct(stype)) {
6258 				*next_btf_id = id;
6259 				*flag = tmp_flag;
6260 				return WALK_PTR;
6261 			}
6262 		}
6263 
6264 		/* Allow more flexible access within an int as long as
6265 		 * it is within mtrue_end.
6266 		 * Since mtrue_end could be the end of an array,
6267 		 * that also allows using an array of int as a scratch
6268 		 * space. e.g. skb->cb[].
6269 		 */
6270 		if (off + size > mtrue_end) {
6271 			bpf_log(log,
6272 				"access beyond the end of member %s (mend:%u) in struct %s with off %u size %u\n",
6273 				mname, mtrue_end, tname, off, size);
6274 			return -EACCES;
6275 		}
6276 
6277 		return WALK_SCALAR;
6278 	}
6279 	bpf_log(log, "struct %s doesn't have field at offset %d\n", tname, off);
6280 	return -EINVAL;
6281 }
6282 
6283 int btf_struct_access(struct bpf_verifier_log *log,
6284 		      const struct bpf_reg_state *reg,
6285 		      int off, int size, enum bpf_access_type atype __maybe_unused,
6286 		      u32 *next_btf_id, enum bpf_type_flag *flag)
6287 {
6288 	const struct btf *btf = reg->btf;
6289 	enum bpf_type_flag tmp_flag = 0;
6290 	const struct btf_type *t;
6291 	u32 id = reg->btf_id;
6292 	int err;
6293 
6294 	while (type_is_alloc(reg->type)) {
6295 		struct btf_struct_meta *meta;
6296 		struct btf_record *rec;
6297 		int i;
6298 
6299 		meta = btf_find_struct_meta(btf, id);
6300 		if (!meta)
6301 			break;
6302 		rec = meta->record;
6303 		for (i = 0; i < rec->cnt; i++) {
6304 			struct btf_field *field = &rec->fields[i];
6305 			u32 offset = field->offset;
6306 			if (off < offset + btf_field_type_size(field->type) && offset < off + size) {
6307 				bpf_log(log,
6308 					"direct access to %s is disallowed\n",
6309 					btf_field_type_name(field->type));
6310 				return -EACCES;
6311 			}
6312 		}
6313 		break;
6314 	}
6315 
6316 	t = btf_type_by_id(btf, id);
6317 	do {
6318 		err = btf_struct_walk(log, btf, t, off, size, &id, &tmp_flag);
6319 
6320 		switch (err) {
6321 		case WALK_PTR:
6322 			/* For local types, the destination register cannot
6323 			 * become a pointer again.
6324 			 */
6325 			if (type_is_alloc(reg->type))
6326 				return SCALAR_VALUE;
6327 			/* If we found the pointer or scalar on t+off,
6328 			 * we're done.
6329 			 */
6330 			*next_btf_id = id;
6331 			*flag = tmp_flag;
6332 			return PTR_TO_BTF_ID;
6333 		case WALK_SCALAR:
6334 			return SCALAR_VALUE;
6335 		case WALK_STRUCT:
6336 			/* We found nested struct, so continue the search
6337 			 * by diving in it. At this point the offset is
6338 			 * aligned with the new type, so set it to 0.
6339 			 */
6340 			t = btf_type_by_id(btf, id);
6341 			off = 0;
6342 			break;
6343 		default:
6344 			/* It's either error or unknown return value..
6345 			 * scream and leave.
6346 			 */
6347 			if (WARN_ONCE(err > 0, "unknown btf_struct_walk return value"))
6348 				return -EINVAL;
6349 			return err;
6350 		}
6351 	} while (t);
6352 
6353 	return -EINVAL;
6354 }
6355 
6356 /* Check that two BTF types, each specified as an BTF object + id, are exactly
6357  * the same. Trivial ID check is not enough due to module BTFs, because we can
6358  * end up with two different module BTFs, but IDs point to the common type in
6359  * vmlinux BTF.
6360  */
6361 bool btf_types_are_same(const struct btf *btf1, u32 id1,
6362 			const struct btf *btf2, u32 id2)
6363 {
6364 	if (id1 != id2)
6365 		return false;
6366 	if (btf1 == btf2)
6367 		return true;
6368 	return btf_type_by_id(btf1, id1) == btf_type_by_id(btf2, id2);
6369 }
6370 
6371 bool btf_struct_ids_match(struct bpf_verifier_log *log,
6372 			  const struct btf *btf, u32 id, int off,
6373 			  const struct btf *need_btf, u32 need_type_id,
6374 			  bool strict)
6375 {
6376 	const struct btf_type *type;
6377 	enum bpf_type_flag flag;
6378 	int err;
6379 
6380 	/* Are we already done? */
6381 	if (off == 0 && btf_types_are_same(btf, id, need_btf, need_type_id))
6382 		return true;
6383 	/* In case of strict type match, we do not walk struct, the top level
6384 	 * type match must succeed. When strict is true, off should have already
6385 	 * been 0.
6386 	 */
6387 	if (strict)
6388 		return false;
6389 again:
6390 	type = btf_type_by_id(btf, id);
6391 	if (!type)
6392 		return false;
6393 	err = btf_struct_walk(log, btf, type, off, 1, &id, &flag);
6394 	if (err != WALK_STRUCT)
6395 		return false;
6396 
6397 	/* We found nested struct object. If it matches
6398 	 * the requested ID, we're done. Otherwise let's
6399 	 * continue the search with offset 0 in the new
6400 	 * type.
6401 	 */
6402 	if (!btf_types_are_same(btf, id, need_btf, need_type_id)) {
6403 		off = 0;
6404 		goto again;
6405 	}
6406 
6407 	return true;
6408 }
6409 
6410 static int __get_type_size(struct btf *btf, u32 btf_id,
6411 			   const struct btf_type **ret_type)
6412 {
6413 	const struct btf_type *t;
6414 
6415 	*ret_type = btf_type_by_id(btf, 0);
6416 	if (!btf_id)
6417 		/* void */
6418 		return 0;
6419 	t = btf_type_by_id(btf, btf_id);
6420 	while (t && btf_type_is_modifier(t))
6421 		t = btf_type_by_id(btf, t->type);
6422 	if (!t)
6423 		return -EINVAL;
6424 	*ret_type = t;
6425 	if (btf_type_is_ptr(t))
6426 		/* kernel size of pointer. Not BPF's size of pointer*/
6427 		return sizeof(void *);
6428 	if (btf_type_is_int(t) || btf_is_any_enum(t) || __btf_type_is_struct(t))
6429 		return t->size;
6430 	return -EINVAL;
6431 }
6432 
6433 int btf_distill_func_proto(struct bpf_verifier_log *log,
6434 			   struct btf *btf,
6435 			   const struct btf_type *func,
6436 			   const char *tname,
6437 			   struct btf_func_model *m)
6438 {
6439 	const struct btf_param *args;
6440 	const struct btf_type *t;
6441 	u32 i, nargs;
6442 	int ret;
6443 
6444 	if (!func) {
6445 		/* BTF function prototype doesn't match the verifier types.
6446 		 * Fall back to MAX_BPF_FUNC_REG_ARGS u64 args.
6447 		 */
6448 		for (i = 0; i < MAX_BPF_FUNC_REG_ARGS; i++) {
6449 			m->arg_size[i] = 8;
6450 			m->arg_flags[i] = 0;
6451 		}
6452 		m->ret_size = 8;
6453 		m->nr_args = MAX_BPF_FUNC_REG_ARGS;
6454 		return 0;
6455 	}
6456 	args = (const struct btf_param *)(func + 1);
6457 	nargs = btf_type_vlen(func);
6458 	if (nargs > MAX_BPF_FUNC_ARGS) {
6459 		bpf_log(log,
6460 			"The function %s has %d arguments. Too many.\n",
6461 			tname, nargs);
6462 		return -EINVAL;
6463 	}
6464 	ret = __get_type_size(btf, func->type, &t);
6465 	if (ret < 0 || __btf_type_is_struct(t)) {
6466 		bpf_log(log,
6467 			"The function %s return type %s is unsupported.\n",
6468 			tname, btf_type_str(t));
6469 		return -EINVAL;
6470 	}
6471 	m->ret_size = ret;
6472 
6473 	for (i = 0; i < nargs; i++) {
6474 		if (i == nargs - 1 && args[i].type == 0) {
6475 			bpf_log(log,
6476 				"The function %s with variable args is unsupported.\n",
6477 				tname);
6478 			return -EINVAL;
6479 		}
6480 		ret = __get_type_size(btf, args[i].type, &t);
6481 
6482 		/* No support of struct argument size greater than 16 bytes */
6483 		if (ret < 0 || ret > 16) {
6484 			bpf_log(log,
6485 				"The function %s arg%d type %s is unsupported.\n",
6486 				tname, i, btf_type_str(t));
6487 			return -EINVAL;
6488 		}
6489 		if (ret == 0) {
6490 			bpf_log(log,
6491 				"The function %s has malformed void argument.\n",
6492 				tname);
6493 			return -EINVAL;
6494 		}
6495 		m->arg_size[i] = ret;
6496 		m->arg_flags[i] = __btf_type_is_struct(t) ? BTF_FMODEL_STRUCT_ARG : 0;
6497 	}
6498 	m->nr_args = nargs;
6499 	return 0;
6500 }
6501 
6502 /* Compare BTFs of two functions assuming only scalars and pointers to context.
6503  * t1 points to BTF_KIND_FUNC in btf1
6504  * t2 points to BTF_KIND_FUNC in btf2
6505  * Returns:
6506  * EINVAL - function prototype mismatch
6507  * EFAULT - verifier bug
6508  * 0 - 99% match. The last 1% is validated by the verifier.
6509  */
6510 static int btf_check_func_type_match(struct bpf_verifier_log *log,
6511 				     struct btf *btf1, const struct btf_type *t1,
6512 				     struct btf *btf2, const struct btf_type *t2)
6513 {
6514 	const struct btf_param *args1, *args2;
6515 	const char *fn1, *fn2, *s1, *s2;
6516 	u32 nargs1, nargs2, i;
6517 
6518 	fn1 = btf_name_by_offset(btf1, t1->name_off);
6519 	fn2 = btf_name_by_offset(btf2, t2->name_off);
6520 
6521 	if (btf_func_linkage(t1) != BTF_FUNC_GLOBAL) {
6522 		bpf_log(log, "%s() is not a global function\n", fn1);
6523 		return -EINVAL;
6524 	}
6525 	if (btf_func_linkage(t2) != BTF_FUNC_GLOBAL) {
6526 		bpf_log(log, "%s() is not a global function\n", fn2);
6527 		return -EINVAL;
6528 	}
6529 
6530 	t1 = btf_type_by_id(btf1, t1->type);
6531 	if (!t1 || !btf_type_is_func_proto(t1))
6532 		return -EFAULT;
6533 	t2 = btf_type_by_id(btf2, t2->type);
6534 	if (!t2 || !btf_type_is_func_proto(t2))
6535 		return -EFAULT;
6536 
6537 	args1 = (const struct btf_param *)(t1 + 1);
6538 	nargs1 = btf_type_vlen(t1);
6539 	args2 = (const struct btf_param *)(t2 + 1);
6540 	nargs2 = btf_type_vlen(t2);
6541 
6542 	if (nargs1 != nargs2) {
6543 		bpf_log(log, "%s() has %d args while %s() has %d args\n",
6544 			fn1, nargs1, fn2, nargs2);
6545 		return -EINVAL;
6546 	}
6547 
6548 	t1 = btf_type_skip_modifiers(btf1, t1->type, NULL);
6549 	t2 = btf_type_skip_modifiers(btf2, t2->type, NULL);
6550 	if (t1->info != t2->info) {
6551 		bpf_log(log,
6552 			"Return type %s of %s() doesn't match type %s of %s()\n",
6553 			btf_type_str(t1), fn1,
6554 			btf_type_str(t2), fn2);
6555 		return -EINVAL;
6556 	}
6557 
6558 	for (i = 0; i < nargs1; i++) {
6559 		t1 = btf_type_skip_modifiers(btf1, args1[i].type, NULL);
6560 		t2 = btf_type_skip_modifiers(btf2, args2[i].type, NULL);
6561 
6562 		if (t1->info != t2->info) {
6563 			bpf_log(log, "arg%d in %s() is %s while %s() has %s\n",
6564 				i, fn1, btf_type_str(t1),
6565 				fn2, btf_type_str(t2));
6566 			return -EINVAL;
6567 		}
6568 		if (btf_type_has_size(t1) && t1->size != t2->size) {
6569 			bpf_log(log,
6570 				"arg%d in %s() has size %d while %s() has %d\n",
6571 				i, fn1, t1->size,
6572 				fn2, t2->size);
6573 			return -EINVAL;
6574 		}
6575 
6576 		/* global functions are validated with scalars and pointers
6577 		 * to context only. And only global functions can be replaced.
6578 		 * Hence type check only those types.
6579 		 */
6580 		if (btf_type_is_int(t1) || btf_is_any_enum(t1))
6581 			continue;
6582 		if (!btf_type_is_ptr(t1)) {
6583 			bpf_log(log,
6584 				"arg%d in %s() has unrecognized type\n",
6585 				i, fn1);
6586 			return -EINVAL;
6587 		}
6588 		t1 = btf_type_skip_modifiers(btf1, t1->type, NULL);
6589 		t2 = btf_type_skip_modifiers(btf2, t2->type, NULL);
6590 		if (!btf_type_is_struct(t1)) {
6591 			bpf_log(log,
6592 				"arg%d in %s() is not a pointer to context\n",
6593 				i, fn1);
6594 			return -EINVAL;
6595 		}
6596 		if (!btf_type_is_struct(t2)) {
6597 			bpf_log(log,
6598 				"arg%d in %s() is not a pointer to context\n",
6599 				i, fn2);
6600 			return -EINVAL;
6601 		}
6602 		/* This is an optional check to make program writing easier.
6603 		 * Compare names of structs and report an error to the user.
6604 		 * btf_prepare_func_args() already checked that t2 struct
6605 		 * is a context type. btf_prepare_func_args() will check
6606 		 * later that t1 struct is a context type as well.
6607 		 */
6608 		s1 = btf_name_by_offset(btf1, t1->name_off);
6609 		s2 = btf_name_by_offset(btf2, t2->name_off);
6610 		if (strcmp(s1, s2)) {
6611 			bpf_log(log,
6612 				"arg%d %s(struct %s *) doesn't match %s(struct %s *)\n",
6613 				i, fn1, s1, fn2, s2);
6614 			return -EINVAL;
6615 		}
6616 	}
6617 	return 0;
6618 }
6619 
6620 /* Compare BTFs of given program with BTF of target program */
6621 int btf_check_type_match(struct bpf_verifier_log *log, const struct bpf_prog *prog,
6622 			 struct btf *btf2, const struct btf_type *t2)
6623 {
6624 	struct btf *btf1 = prog->aux->btf;
6625 	const struct btf_type *t1;
6626 	u32 btf_id = 0;
6627 
6628 	if (!prog->aux->func_info) {
6629 		bpf_log(log, "Program extension requires BTF\n");
6630 		return -EINVAL;
6631 	}
6632 
6633 	btf_id = prog->aux->func_info[0].type_id;
6634 	if (!btf_id)
6635 		return -EFAULT;
6636 
6637 	t1 = btf_type_by_id(btf1, btf_id);
6638 	if (!t1 || !btf_type_is_func(t1))
6639 		return -EFAULT;
6640 
6641 	return btf_check_func_type_match(log, btf1, t1, btf2, t2);
6642 }
6643 
6644 static int btf_check_func_arg_match(struct bpf_verifier_env *env,
6645 				    const struct btf *btf, u32 func_id,
6646 				    struct bpf_reg_state *regs,
6647 				    bool ptr_to_mem_ok,
6648 				    bool processing_call)
6649 {
6650 	enum bpf_prog_type prog_type = resolve_prog_type(env->prog);
6651 	struct bpf_verifier_log *log = &env->log;
6652 	const char *func_name, *ref_tname;
6653 	const struct btf_type *t, *ref_t;
6654 	const struct btf_param *args;
6655 	u32 i, nargs, ref_id;
6656 	int ret;
6657 
6658 	t = btf_type_by_id(btf, func_id);
6659 	if (!t || !btf_type_is_func(t)) {
6660 		/* These checks were already done by the verifier while loading
6661 		 * struct bpf_func_info or in add_kfunc_call().
6662 		 */
6663 		bpf_log(log, "BTF of func_id %u doesn't point to KIND_FUNC\n",
6664 			func_id);
6665 		return -EFAULT;
6666 	}
6667 	func_name = btf_name_by_offset(btf, t->name_off);
6668 
6669 	t = btf_type_by_id(btf, t->type);
6670 	if (!t || !btf_type_is_func_proto(t)) {
6671 		bpf_log(log, "Invalid BTF of func %s\n", func_name);
6672 		return -EFAULT;
6673 	}
6674 	args = (const struct btf_param *)(t + 1);
6675 	nargs = btf_type_vlen(t);
6676 	if (nargs > MAX_BPF_FUNC_REG_ARGS) {
6677 		bpf_log(log, "Function %s has %d > %d args\n", func_name, nargs,
6678 			MAX_BPF_FUNC_REG_ARGS);
6679 		return -EINVAL;
6680 	}
6681 
6682 	/* check that BTF function arguments match actual types that the
6683 	 * verifier sees.
6684 	 */
6685 	for (i = 0; i < nargs; i++) {
6686 		enum bpf_arg_type arg_type = ARG_DONTCARE;
6687 		u32 regno = i + 1;
6688 		struct bpf_reg_state *reg = &regs[regno];
6689 
6690 		t = btf_type_skip_modifiers(btf, args[i].type, NULL);
6691 		if (btf_type_is_scalar(t)) {
6692 			if (reg->type == SCALAR_VALUE)
6693 				continue;
6694 			bpf_log(log, "R%d is not a scalar\n", regno);
6695 			return -EINVAL;
6696 		}
6697 
6698 		if (!btf_type_is_ptr(t)) {
6699 			bpf_log(log, "Unrecognized arg#%d type %s\n",
6700 				i, btf_type_str(t));
6701 			return -EINVAL;
6702 		}
6703 
6704 		ref_t = btf_type_skip_modifiers(btf, t->type, &ref_id);
6705 		ref_tname = btf_name_by_offset(btf, ref_t->name_off);
6706 
6707 		ret = check_func_arg_reg_off(env, reg, regno, arg_type);
6708 		if (ret < 0)
6709 			return ret;
6710 
6711 		if (btf_get_prog_ctx_type(log, btf, t, prog_type, i)) {
6712 			/* If function expects ctx type in BTF check that caller
6713 			 * is passing PTR_TO_CTX.
6714 			 */
6715 			if (reg->type != PTR_TO_CTX) {
6716 				bpf_log(log,
6717 					"arg#%d expected pointer to ctx, but got %s\n",
6718 					i, btf_type_str(t));
6719 				return -EINVAL;
6720 			}
6721 		} else if (ptr_to_mem_ok && processing_call) {
6722 			const struct btf_type *resolve_ret;
6723 			u32 type_size;
6724 
6725 			resolve_ret = btf_resolve_size(btf, ref_t, &type_size);
6726 			if (IS_ERR(resolve_ret)) {
6727 				bpf_log(log,
6728 					"arg#%d reference type('%s %s') size cannot be determined: %ld\n",
6729 					i, btf_type_str(ref_t), ref_tname,
6730 					PTR_ERR(resolve_ret));
6731 				return -EINVAL;
6732 			}
6733 
6734 			if (check_mem_reg(env, reg, regno, type_size))
6735 				return -EINVAL;
6736 		} else {
6737 			bpf_log(log, "reg type unsupported for arg#%d function %s#%d\n", i,
6738 				func_name, func_id);
6739 			return -EINVAL;
6740 		}
6741 	}
6742 
6743 	return 0;
6744 }
6745 
6746 /* Compare BTF of a function declaration with given bpf_reg_state.
6747  * Returns:
6748  * EFAULT - there is a verifier bug. Abort verification.
6749  * EINVAL - there is a type mismatch or BTF is not available.
6750  * 0 - BTF matches with what bpf_reg_state expects.
6751  * Only PTR_TO_CTX and SCALAR_VALUE states are recognized.
6752  */
6753 int btf_check_subprog_arg_match(struct bpf_verifier_env *env, int subprog,
6754 				struct bpf_reg_state *regs)
6755 {
6756 	struct bpf_prog *prog = env->prog;
6757 	struct btf *btf = prog->aux->btf;
6758 	bool is_global;
6759 	u32 btf_id;
6760 	int err;
6761 
6762 	if (!prog->aux->func_info)
6763 		return -EINVAL;
6764 
6765 	btf_id = prog->aux->func_info[subprog].type_id;
6766 	if (!btf_id)
6767 		return -EFAULT;
6768 
6769 	if (prog->aux->func_info_aux[subprog].unreliable)
6770 		return -EINVAL;
6771 
6772 	is_global = prog->aux->func_info_aux[subprog].linkage == BTF_FUNC_GLOBAL;
6773 	err = btf_check_func_arg_match(env, btf, btf_id, regs, is_global, false);
6774 
6775 	/* Compiler optimizations can remove arguments from static functions
6776 	 * or mismatched type can be passed into a global function.
6777 	 * In such cases mark the function as unreliable from BTF point of view.
6778 	 */
6779 	if (err)
6780 		prog->aux->func_info_aux[subprog].unreliable = true;
6781 	return err;
6782 }
6783 
6784 /* Compare BTF of a function call with given bpf_reg_state.
6785  * Returns:
6786  * EFAULT - there is a verifier bug. Abort verification.
6787  * EINVAL - there is a type mismatch or BTF is not available.
6788  * 0 - BTF matches with what bpf_reg_state expects.
6789  * Only PTR_TO_CTX and SCALAR_VALUE states are recognized.
6790  *
6791  * NOTE: the code is duplicated from btf_check_subprog_arg_match()
6792  * because btf_check_func_arg_match() is still doing both. Once that
6793  * function is split in 2, we can call from here btf_check_subprog_arg_match()
6794  * first, and then treat the calling part in a new code path.
6795  */
6796 int btf_check_subprog_call(struct bpf_verifier_env *env, int subprog,
6797 			   struct bpf_reg_state *regs)
6798 {
6799 	struct bpf_prog *prog = env->prog;
6800 	struct btf *btf = prog->aux->btf;
6801 	bool is_global;
6802 	u32 btf_id;
6803 	int err;
6804 
6805 	if (!prog->aux->func_info)
6806 		return -EINVAL;
6807 
6808 	btf_id = prog->aux->func_info[subprog].type_id;
6809 	if (!btf_id)
6810 		return -EFAULT;
6811 
6812 	if (prog->aux->func_info_aux[subprog].unreliable)
6813 		return -EINVAL;
6814 
6815 	is_global = prog->aux->func_info_aux[subprog].linkage == BTF_FUNC_GLOBAL;
6816 	err = btf_check_func_arg_match(env, btf, btf_id, regs, is_global, true);
6817 
6818 	/* Compiler optimizations can remove arguments from static functions
6819 	 * or mismatched type can be passed into a global function.
6820 	 * In such cases mark the function as unreliable from BTF point of view.
6821 	 */
6822 	if (err)
6823 		prog->aux->func_info_aux[subprog].unreliable = true;
6824 	return err;
6825 }
6826 
6827 /* Convert BTF of a function into bpf_reg_state if possible
6828  * Returns:
6829  * EFAULT - there is a verifier bug. Abort verification.
6830  * EINVAL - cannot convert BTF.
6831  * 0 - Successfully converted BTF into bpf_reg_state
6832  * (either PTR_TO_CTX or SCALAR_VALUE).
6833  */
6834 int btf_prepare_func_args(struct bpf_verifier_env *env, int subprog,
6835 			  struct bpf_reg_state *regs)
6836 {
6837 	struct bpf_verifier_log *log = &env->log;
6838 	struct bpf_prog *prog = env->prog;
6839 	enum bpf_prog_type prog_type = prog->type;
6840 	struct btf *btf = prog->aux->btf;
6841 	const struct btf_param *args;
6842 	const struct btf_type *t, *ref_t;
6843 	u32 i, nargs, btf_id;
6844 	const char *tname;
6845 
6846 	if (!prog->aux->func_info ||
6847 	    prog->aux->func_info_aux[subprog].linkage != BTF_FUNC_GLOBAL) {
6848 		bpf_log(log, "Verifier bug\n");
6849 		return -EFAULT;
6850 	}
6851 
6852 	btf_id = prog->aux->func_info[subprog].type_id;
6853 	if (!btf_id) {
6854 		bpf_log(log, "Global functions need valid BTF\n");
6855 		return -EFAULT;
6856 	}
6857 
6858 	t = btf_type_by_id(btf, btf_id);
6859 	if (!t || !btf_type_is_func(t)) {
6860 		/* These checks were already done by the verifier while loading
6861 		 * struct bpf_func_info
6862 		 */
6863 		bpf_log(log, "BTF of func#%d doesn't point to KIND_FUNC\n",
6864 			subprog);
6865 		return -EFAULT;
6866 	}
6867 	tname = btf_name_by_offset(btf, t->name_off);
6868 
6869 	if (log->level & BPF_LOG_LEVEL)
6870 		bpf_log(log, "Validating %s() func#%d...\n",
6871 			tname, subprog);
6872 
6873 	if (prog->aux->func_info_aux[subprog].unreliable) {
6874 		bpf_log(log, "Verifier bug in function %s()\n", tname);
6875 		return -EFAULT;
6876 	}
6877 	if (prog_type == BPF_PROG_TYPE_EXT)
6878 		prog_type = prog->aux->dst_prog->type;
6879 
6880 	t = btf_type_by_id(btf, t->type);
6881 	if (!t || !btf_type_is_func_proto(t)) {
6882 		bpf_log(log, "Invalid type of function %s()\n", tname);
6883 		return -EFAULT;
6884 	}
6885 	args = (const struct btf_param *)(t + 1);
6886 	nargs = btf_type_vlen(t);
6887 	if (nargs > MAX_BPF_FUNC_REG_ARGS) {
6888 		bpf_log(log, "Global function %s() with %d > %d args. Buggy compiler.\n",
6889 			tname, nargs, MAX_BPF_FUNC_REG_ARGS);
6890 		return -EINVAL;
6891 	}
6892 	/* check that function returns int */
6893 	t = btf_type_by_id(btf, t->type);
6894 	while (btf_type_is_modifier(t))
6895 		t = btf_type_by_id(btf, t->type);
6896 	if (!btf_type_is_int(t) && !btf_is_any_enum(t)) {
6897 		bpf_log(log,
6898 			"Global function %s() doesn't return scalar. Only those are supported.\n",
6899 			tname);
6900 		return -EINVAL;
6901 	}
6902 	/* Convert BTF function arguments into verifier types.
6903 	 * Only PTR_TO_CTX and SCALAR are supported atm.
6904 	 */
6905 	for (i = 0; i < nargs; i++) {
6906 		struct bpf_reg_state *reg = &regs[i + 1];
6907 
6908 		t = btf_type_by_id(btf, args[i].type);
6909 		while (btf_type_is_modifier(t))
6910 			t = btf_type_by_id(btf, t->type);
6911 		if (btf_type_is_int(t) || btf_is_any_enum(t)) {
6912 			reg->type = SCALAR_VALUE;
6913 			continue;
6914 		}
6915 		if (btf_type_is_ptr(t)) {
6916 			if (btf_get_prog_ctx_type(log, btf, t, prog_type, i)) {
6917 				reg->type = PTR_TO_CTX;
6918 				continue;
6919 			}
6920 
6921 			t = btf_type_skip_modifiers(btf, t->type, NULL);
6922 
6923 			ref_t = btf_resolve_size(btf, t, &reg->mem_size);
6924 			if (IS_ERR(ref_t)) {
6925 				bpf_log(log,
6926 				    "arg#%d reference type('%s %s') size cannot be determined: %ld\n",
6927 				    i, btf_type_str(t), btf_name_by_offset(btf, t->name_off),
6928 					PTR_ERR(ref_t));
6929 				return -EINVAL;
6930 			}
6931 
6932 			reg->type = PTR_TO_MEM | PTR_MAYBE_NULL;
6933 			reg->id = ++env->id_gen;
6934 
6935 			continue;
6936 		}
6937 		bpf_log(log, "Arg#%d type %s in %s() is not supported yet.\n",
6938 			i, btf_type_str(t), tname);
6939 		return -EINVAL;
6940 	}
6941 	return 0;
6942 }
6943 
6944 static void btf_type_show(const struct btf *btf, u32 type_id, void *obj,
6945 			  struct btf_show *show)
6946 {
6947 	const struct btf_type *t = btf_type_by_id(btf, type_id);
6948 
6949 	show->btf = btf;
6950 	memset(&show->state, 0, sizeof(show->state));
6951 	memset(&show->obj, 0, sizeof(show->obj));
6952 
6953 	btf_type_ops(t)->show(btf, t, type_id, obj, 0, show);
6954 }
6955 
6956 static void btf_seq_show(struct btf_show *show, const char *fmt,
6957 			 va_list args)
6958 {
6959 	seq_vprintf((struct seq_file *)show->target, fmt, args);
6960 }
6961 
6962 int btf_type_seq_show_flags(const struct btf *btf, u32 type_id,
6963 			    void *obj, struct seq_file *m, u64 flags)
6964 {
6965 	struct btf_show sseq;
6966 
6967 	sseq.target = m;
6968 	sseq.showfn = btf_seq_show;
6969 	sseq.flags = flags;
6970 
6971 	btf_type_show(btf, type_id, obj, &sseq);
6972 
6973 	return sseq.state.status;
6974 }
6975 
6976 void btf_type_seq_show(const struct btf *btf, u32 type_id, void *obj,
6977 		       struct seq_file *m)
6978 {
6979 	(void) btf_type_seq_show_flags(btf, type_id, obj, m,
6980 				       BTF_SHOW_NONAME | BTF_SHOW_COMPACT |
6981 				       BTF_SHOW_ZERO | BTF_SHOW_UNSAFE);
6982 }
6983 
6984 struct btf_show_snprintf {
6985 	struct btf_show show;
6986 	int len_left;		/* space left in string */
6987 	int len;		/* length we would have written */
6988 };
6989 
6990 static void btf_snprintf_show(struct btf_show *show, const char *fmt,
6991 			      va_list args)
6992 {
6993 	struct btf_show_snprintf *ssnprintf = (struct btf_show_snprintf *)show;
6994 	int len;
6995 
6996 	len = vsnprintf(show->target, ssnprintf->len_left, fmt, args);
6997 
6998 	if (len < 0) {
6999 		ssnprintf->len_left = 0;
7000 		ssnprintf->len = len;
7001 	} else if (len >= ssnprintf->len_left) {
7002 		/* no space, drive on to get length we would have written */
7003 		ssnprintf->len_left = 0;
7004 		ssnprintf->len += len;
7005 	} else {
7006 		ssnprintf->len_left -= len;
7007 		ssnprintf->len += len;
7008 		show->target += len;
7009 	}
7010 }
7011 
7012 int btf_type_snprintf_show(const struct btf *btf, u32 type_id, void *obj,
7013 			   char *buf, int len, u64 flags)
7014 {
7015 	struct btf_show_snprintf ssnprintf;
7016 
7017 	ssnprintf.show.target = buf;
7018 	ssnprintf.show.flags = flags;
7019 	ssnprintf.show.showfn = btf_snprintf_show;
7020 	ssnprintf.len_left = len;
7021 	ssnprintf.len = 0;
7022 
7023 	btf_type_show(btf, type_id, obj, (struct btf_show *)&ssnprintf);
7024 
7025 	/* If we encountered an error, return it. */
7026 	if (ssnprintf.show.state.status)
7027 		return ssnprintf.show.state.status;
7028 
7029 	/* Otherwise return length we would have written */
7030 	return ssnprintf.len;
7031 }
7032 
7033 #ifdef CONFIG_PROC_FS
7034 static void bpf_btf_show_fdinfo(struct seq_file *m, struct file *filp)
7035 {
7036 	const struct btf *btf = filp->private_data;
7037 
7038 	seq_printf(m, "btf_id:\t%u\n", btf->id);
7039 }
7040 #endif
7041 
7042 static int btf_release(struct inode *inode, struct file *filp)
7043 {
7044 	btf_put(filp->private_data);
7045 	return 0;
7046 }
7047 
7048 const struct file_operations btf_fops = {
7049 #ifdef CONFIG_PROC_FS
7050 	.show_fdinfo	= bpf_btf_show_fdinfo,
7051 #endif
7052 	.release	= btf_release,
7053 };
7054 
7055 static int __btf_new_fd(struct btf *btf)
7056 {
7057 	return anon_inode_getfd("btf", &btf_fops, btf, O_RDONLY | O_CLOEXEC);
7058 }
7059 
7060 int btf_new_fd(const union bpf_attr *attr, bpfptr_t uattr)
7061 {
7062 	struct btf *btf;
7063 	int ret;
7064 
7065 	btf = btf_parse(make_bpfptr(attr->btf, uattr.is_kernel),
7066 			attr->btf_size, attr->btf_log_level,
7067 			u64_to_user_ptr(attr->btf_log_buf),
7068 			attr->btf_log_size);
7069 	if (IS_ERR(btf))
7070 		return PTR_ERR(btf);
7071 
7072 	ret = btf_alloc_id(btf);
7073 	if (ret) {
7074 		btf_free(btf);
7075 		return ret;
7076 	}
7077 
7078 	/*
7079 	 * The BTF ID is published to the userspace.
7080 	 * All BTF free must go through call_rcu() from
7081 	 * now on (i.e. free by calling btf_put()).
7082 	 */
7083 
7084 	ret = __btf_new_fd(btf);
7085 	if (ret < 0)
7086 		btf_put(btf);
7087 
7088 	return ret;
7089 }
7090 
7091 struct btf *btf_get_by_fd(int fd)
7092 {
7093 	struct btf *btf;
7094 	struct fd f;
7095 
7096 	f = fdget(fd);
7097 
7098 	if (!f.file)
7099 		return ERR_PTR(-EBADF);
7100 
7101 	if (f.file->f_op != &btf_fops) {
7102 		fdput(f);
7103 		return ERR_PTR(-EINVAL);
7104 	}
7105 
7106 	btf = f.file->private_data;
7107 	refcount_inc(&btf->refcnt);
7108 	fdput(f);
7109 
7110 	return btf;
7111 }
7112 
7113 int btf_get_info_by_fd(const struct btf *btf,
7114 		       const union bpf_attr *attr,
7115 		       union bpf_attr __user *uattr)
7116 {
7117 	struct bpf_btf_info __user *uinfo;
7118 	struct bpf_btf_info info;
7119 	u32 info_copy, btf_copy;
7120 	void __user *ubtf;
7121 	char __user *uname;
7122 	u32 uinfo_len, uname_len, name_len;
7123 	int ret = 0;
7124 
7125 	uinfo = u64_to_user_ptr(attr->info.info);
7126 	uinfo_len = attr->info.info_len;
7127 
7128 	info_copy = min_t(u32, uinfo_len, sizeof(info));
7129 	memset(&info, 0, sizeof(info));
7130 	if (copy_from_user(&info, uinfo, info_copy))
7131 		return -EFAULT;
7132 
7133 	info.id = btf->id;
7134 	ubtf = u64_to_user_ptr(info.btf);
7135 	btf_copy = min_t(u32, btf->data_size, info.btf_size);
7136 	if (copy_to_user(ubtf, btf->data, btf_copy))
7137 		return -EFAULT;
7138 	info.btf_size = btf->data_size;
7139 
7140 	info.kernel_btf = btf->kernel_btf;
7141 
7142 	uname = u64_to_user_ptr(info.name);
7143 	uname_len = info.name_len;
7144 	if (!uname ^ !uname_len)
7145 		return -EINVAL;
7146 
7147 	name_len = strlen(btf->name);
7148 	info.name_len = name_len;
7149 
7150 	if (uname) {
7151 		if (uname_len >= name_len + 1) {
7152 			if (copy_to_user(uname, btf->name, name_len + 1))
7153 				return -EFAULT;
7154 		} else {
7155 			char zero = '\0';
7156 
7157 			if (copy_to_user(uname, btf->name, uname_len - 1))
7158 				return -EFAULT;
7159 			if (put_user(zero, uname + uname_len - 1))
7160 				return -EFAULT;
7161 			/* let user-space know about too short buffer */
7162 			ret = -ENOSPC;
7163 		}
7164 	}
7165 
7166 	if (copy_to_user(uinfo, &info, info_copy) ||
7167 	    put_user(info_copy, &uattr->info.info_len))
7168 		return -EFAULT;
7169 
7170 	return ret;
7171 }
7172 
7173 int btf_get_fd_by_id(u32 id)
7174 {
7175 	struct btf *btf;
7176 	int fd;
7177 
7178 	rcu_read_lock();
7179 	btf = idr_find(&btf_idr, id);
7180 	if (!btf || !refcount_inc_not_zero(&btf->refcnt))
7181 		btf = ERR_PTR(-ENOENT);
7182 	rcu_read_unlock();
7183 
7184 	if (IS_ERR(btf))
7185 		return PTR_ERR(btf);
7186 
7187 	fd = __btf_new_fd(btf);
7188 	if (fd < 0)
7189 		btf_put(btf);
7190 
7191 	return fd;
7192 }
7193 
7194 u32 btf_obj_id(const struct btf *btf)
7195 {
7196 	return btf->id;
7197 }
7198 
7199 bool btf_is_kernel(const struct btf *btf)
7200 {
7201 	return btf->kernel_btf;
7202 }
7203 
7204 bool btf_is_module(const struct btf *btf)
7205 {
7206 	return btf->kernel_btf && strcmp(btf->name, "vmlinux") != 0;
7207 }
7208 
7209 enum {
7210 	BTF_MODULE_F_LIVE = (1 << 0),
7211 };
7212 
7213 #ifdef CONFIG_DEBUG_INFO_BTF_MODULES
7214 struct btf_module {
7215 	struct list_head list;
7216 	struct module *module;
7217 	struct btf *btf;
7218 	struct bin_attribute *sysfs_attr;
7219 	int flags;
7220 };
7221 
7222 static LIST_HEAD(btf_modules);
7223 static DEFINE_MUTEX(btf_module_mutex);
7224 
7225 static ssize_t
7226 btf_module_read(struct file *file, struct kobject *kobj,
7227 		struct bin_attribute *bin_attr,
7228 		char *buf, loff_t off, size_t len)
7229 {
7230 	const struct btf *btf = bin_attr->private;
7231 
7232 	memcpy(buf, btf->data + off, len);
7233 	return len;
7234 }
7235 
7236 static void purge_cand_cache(struct btf *btf);
7237 
7238 static int btf_module_notify(struct notifier_block *nb, unsigned long op,
7239 			     void *module)
7240 {
7241 	struct btf_module *btf_mod, *tmp;
7242 	struct module *mod = module;
7243 	struct btf *btf;
7244 	int err = 0;
7245 
7246 	if (mod->btf_data_size == 0 ||
7247 	    (op != MODULE_STATE_COMING && op != MODULE_STATE_LIVE &&
7248 	     op != MODULE_STATE_GOING))
7249 		goto out;
7250 
7251 	switch (op) {
7252 	case MODULE_STATE_COMING:
7253 		btf_mod = kzalloc(sizeof(*btf_mod), GFP_KERNEL);
7254 		if (!btf_mod) {
7255 			err = -ENOMEM;
7256 			goto out;
7257 		}
7258 		btf = btf_parse_module(mod->name, mod->btf_data, mod->btf_data_size);
7259 		if (IS_ERR(btf)) {
7260 			pr_warn("failed to validate module [%s] BTF: %ld\n",
7261 				mod->name, PTR_ERR(btf));
7262 			kfree(btf_mod);
7263 			if (!IS_ENABLED(CONFIG_MODULE_ALLOW_BTF_MISMATCH))
7264 				err = PTR_ERR(btf);
7265 			goto out;
7266 		}
7267 		err = btf_alloc_id(btf);
7268 		if (err) {
7269 			btf_free(btf);
7270 			kfree(btf_mod);
7271 			goto out;
7272 		}
7273 
7274 		purge_cand_cache(NULL);
7275 		mutex_lock(&btf_module_mutex);
7276 		btf_mod->module = module;
7277 		btf_mod->btf = btf;
7278 		list_add(&btf_mod->list, &btf_modules);
7279 		mutex_unlock(&btf_module_mutex);
7280 
7281 		if (IS_ENABLED(CONFIG_SYSFS)) {
7282 			struct bin_attribute *attr;
7283 
7284 			attr = kzalloc(sizeof(*attr), GFP_KERNEL);
7285 			if (!attr)
7286 				goto out;
7287 
7288 			sysfs_bin_attr_init(attr);
7289 			attr->attr.name = btf->name;
7290 			attr->attr.mode = 0444;
7291 			attr->size = btf->data_size;
7292 			attr->private = btf;
7293 			attr->read = btf_module_read;
7294 
7295 			err = sysfs_create_bin_file(btf_kobj, attr);
7296 			if (err) {
7297 				pr_warn("failed to register module [%s] BTF in sysfs: %d\n",
7298 					mod->name, err);
7299 				kfree(attr);
7300 				err = 0;
7301 				goto out;
7302 			}
7303 
7304 			btf_mod->sysfs_attr = attr;
7305 		}
7306 
7307 		break;
7308 	case MODULE_STATE_LIVE:
7309 		mutex_lock(&btf_module_mutex);
7310 		list_for_each_entry_safe(btf_mod, tmp, &btf_modules, list) {
7311 			if (btf_mod->module != module)
7312 				continue;
7313 
7314 			btf_mod->flags |= BTF_MODULE_F_LIVE;
7315 			break;
7316 		}
7317 		mutex_unlock(&btf_module_mutex);
7318 		break;
7319 	case MODULE_STATE_GOING:
7320 		mutex_lock(&btf_module_mutex);
7321 		list_for_each_entry_safe(btf_mod, tmp, &btf_modules, list) {
7322 			if (btf_mod->module != module)
7323 				continue;
7324 
7325 			list_del(&btf_mod->list);
7326 			if (btf_mod->sysfs_attr)
7327 				sysfs_remove_bin_file(btf_kobj, btf_mod->sysfs_attr);
7328 			purge_cand_cache(btf_mod->btf);
7329 			btf_put(btf_mod->btf);
7330 			kfree(btf_mod->sysfs_attr);
7331 			kfree(btf_mod);
7332 			break;
7333 		}
7334 		mutex_unlock(&btf_module_mutex);
7335 		break;
7336 	}
7337 out:
7338 	return notifier_from_errno(err);
7339 }
7340 
7341 static struct notifier_block btf_module_nb = {
7342 	.notifier_call = btf_module_notify,
7343 };
7344 
7345 static int __init btf_module_init(void)
7346 {
7347 	register_module_notifier(&btf_module_nb);
7348 	return 0;
7349 }
7350 
7351 fs_initcall(btf_module_init);
7352 #endif /* CONFIG_DEBUG_INFO_BTF_MODULES */
7353 
7354 struct module *btf_try_get_module(const struct btf *btf)
7355 {
7356 	struct module *res = NULL;
7357 #ifdef CONFIG_DEBUG_INFO_BTF_MODULES
7358 	struct btf_module *btf_mod, *tmp;
7359 
7360 	mutex_lock(&btf_module_mutex);
7361 	list_for_each_entry_safe(btf_mod, tmp, &btf_modules, list) {
7362 		if (btf_mod->btf != btf)
7363 			continue;
7364 
7365 		/* We must only consider module whose __init routine has
7366 		 * finished, hence we must check for BTF_MODULE_F_LIVE flag,
7367 		 * which is set from the notifier callback for
7368 		 * MODULE_STATE_LIVE.
7369 		 */
7370 		if ((btf_mod->flags & BTF_MODULE_F_LIVE) && try_module_get(btf_mod->module))
7371 			res = btf_mod->module;
7372 
7373 		break;
7374 	}
7375 	mutex_unlock(&btf_module_mutex);
7376 #endif
7377 
7378 	return res;
7379 }
7380 
7381 /* Returns struct btf corresponding to the struct module.
7382  * This function can return NULL or ERR_PTR.
7383  */
7384 static struct btf *btf_get_module_btf(const struct module *module)
7385 {
7386 #ifdef CONFIG_DEBUG_INFO_BTF_MODULES
7387 	struct btf_module *btf_mod, *tmp;
7388 #endif
7389 	struct btf *btf = NULL;
7390 
7391 	if (!module) {
7392 		btf = bpf_get_btf_vmlinux();
7393 		if (!IS_ERR_OR_NULL(btf))
7394 			btf_get(btf);
7395 		return btf;
7396 	}
7397 
7398 #ifdef CONFIG_DEBUG_INFO_BTF_MODULES
7399 	mutex_lock(&btf_module_mutex);
7400 	list_for_each_entry_safe(btf_mod, tmp, &btf_modules, list) {
7401 		if (btf_mod->module != module)
7402 			continue;
7403 
7404 		btf_get(btf_mod->btf);
7405 		btf = btf_mod->btf;
7406 		break;
7407 	}
7408 	mutex_unlock(&btf_module_mutex);
7409 #endif
7410 
7411 	return btf;
7412 }
7413 
7414 BPF_CALL_4(bpf_btf_find_by_name_kind, char *, name, int, name_sz, u32, kind, int, flags)
7415 {
7416 	struct btf *btf = NULL;
7417 	int btf_obj_fd = 0;
7418 	long ret;
7419 
7420 	if (flags)
7421 		return -EINVAL;
7422 
7423 	if (name_sz <= 1 || name[name_sz - 1])
7424 		return -EINVAL;
7425 
7426 	ret = bpf_find_btf_id(name, kind, &btf);
7427 	if (ret > 0 && btf_is_module(btf)) {
7428 		btf_obj_fd = __btf_new_fd(btf);
7429 		if (btf_obj_fd < 0) {
7430 			btf_put(btf);
7431 			return btf_obj_fd;
7432 		}
7433 		return ret | (((u64)btf_obj_fd) << 32);
7434 	}
7435 	if (ret > 0)
7436 		btf_put(btf);
7437 	return ret;
7438 }
7439 
7440 const struct bpf_func_proto bpf_btf_find_by_name_kind_proto = {
7441 	.func		= bpf_btf_find_by_name_kind,
7442 	.gpl_only	= false,
7443 	.ret_type	= RET_INTEGER,
7444 	.arg1_type	= ARG_PTR_TO_MEM | MEM_RDONLY,
7445 	.arg2_type	= ARG_CONST_SIZE,
7446 	.arg3_type	= ARG_ANYTHING,
7447 	.arg4_type	= ARG_ANYTHING,
7448 };
7449 
7450 BTF_ID_LIST_GLOBAL(btf_tracing_ids, MAX_BTF_TRACING_TYPE)
7451 #define BTF_TRACING_TYPE(name, type) BTF_ID(struct, type)
7452 BTF_TRACING_TYPE_xxx
7453 #undef BTF_TRACING_TYPE
7454 
7455 /* Kernel Function (kfunc) BTF ID set registration API */
7456 
7457 static int btf_populate_kfunc_set(struct btf *btf, enum btf_kfunc_hook hook,
7458 				  struct btf_id_set8 *add_set)
7459 {
7460 	bool vmlinux_set = !btf_is_module(btf);
7461 	struct btf_kfunc_set_tab *tab;
7462 	struct btf_id_set8 *set;
7463 	u32 set_cnt;
7464 	int ret;
7465 
7466 	if (hook >= BTF_KFUNC_HOOK_MAX) {
7467 		ret = -EINVAL;
7468 		goto end;
7469 	}
7470 
7471 	if (!add_set->cnt)
7472 		return 0;
7473 
7474 	tab = btf->kfunc_set_tab;
7475 	if (!tab) {
7476 		tab = kzalloc(sizeof(*tab), GFP_KERNEL | __GFP_NOWARN);
7477 		if (!tab)
7478 			return -ENOMEM;
7479 		btf->kfunc_set_tab = tab;
7480 	}
7481 
7482 	set = tab->sets[hook];
7483 	/* Warn when register_btf_kfunc_id_set is called twice for the same hook
7484 	 * for module sets.
7485 	 */
7486 	if (WARN_ON_ONCE(set && !vmlinux_set)) {
7487 		ret = -EINVAL;
7488 		goto end;
7489 	}
7490 
7491 	/* We don't need to allocate, concatenate, and sort module sets, because
7492 	 * only one is allowed per hook. Hence, we can directly assign the
7493 	 * pointer and return.
7494 	 */
7495 	if (!vmlinux_set) {
7496 		tab->sets[hook] = add_set;
7497 		return 0;
7498 	}
7499 
7500 	/* In case of vmlinux sets, there may be more than one set being
7501 	 * registered per hook. To create a unified set, we allocate a new set
7502 	 * and concatenate all individual sets being registered. While each set
7503 	 * is individually sorted, they may become unsorted when concatenated,
7504 	 * hence re-sorting the final set again is required to make binary
7505 	 * searching the set using btf_id_set8_contains function work.
7506 	 */
7507 	set_cnt = set ? set->cnt : 0;
7508 
7509 	if (set_cnt > U32_MAX - add_set->cnt) {
7510 		ret = -EOVERFLOW;
7511 		goto end;
7512 	}
7513 
7514 	if (set_cnt + add_set->cnt > BTF_KFUNC_SET_MAX_CNT) {
7515 		ret = -E2BIG;
7516 		goto end;
7517 	}
7518 
7519 	/* Grow set */
7520 	set = krealloc(tab->sets[hook],
7521 		       offsetof(struct btf_id_set8, pairs[set_cnt + add_set->cnt]),
7522 		       GFP_KERNEL | __GFP_NOWARN);
7523 	if (!set) {
7524 		ret = -ENOMEM;
7525 		goto end;
7526 	}
7527 
7528 	/* For newly allocated set, initialize set->cnt to 0 */
7529 	if (!tab->sets[hook])
7530 		set->cnt = 0;
7531 	tab->sets[hook] = set;
7532 
7533 	/* Concatenate the two sets */
7534 	memcpy(set->pairs + set->cnt, add_set->pairs, add_set->cnt * sizeof(set->pairs[0]));
7535 	set->cnt += add_set->cnt;
7536 
7537 	sort(set->pairs, set->cnt, sizeof(set->pairs[0]), btf_id_cmp_func, NULL);
7538 
7539 	return 0;
7540 end:
7541 	btf_free_kfunc_set_tab(btf);
7542 	return ret;
7543 }
7544 
7545 static u32 *__btf_kfunc_id_set_contains(const struct btf *btf,
7546 					enum btf_kfunc_hook hook,
7547 					u32 kfunc_btf_id)
7548 {
7549 	struct btf_id_set8 *set;
7550 	u32 *id;
7551 
7552 	if (hook >= BTF_KFUNC_HOOK_MAX)
7553 		return NULL;
7554 	if (!btf->kfunc_set_tab)
7555 		return NULL;
7556 	set = btf->kfunc_set_tab->sets[hook];
7557 	if (!set)
7558 		return NULL;
7559 	id = btf_id_set8_contains(set, kfunc_btf_id);
7560 	if (!id)
7561 		return NULL;
7562 	/* The flags for BTF ID are located next to it */
7563 	return id + 1;
7564 }
7565 
7566 static int bpf_prog_type_to_kfunc_hook(enum bpf_prog_type prog_type)
7567 {
7568 	switch (prog_type) {
7569 	case BPF_PROG_TYPE_UNSPEC:
7570 		return BTF_KFUNC_HOOK_COMMON;
7571 	case BPF_PROG_TYPE_XDP:
7572 		return BTF_KFUNC_HOOK_XDP;
7573 	case BPF_PROG_TYPE_SCHED_CLS:
7574 		return BTF_KFUNC_HOOK_TC;
7575 	case BPF_PROG_TYPE_STRUCT_OPS:
7576 		return BTF_KFUNC_HOOK_STRUCT_OPS;
7577 	case BPF_PROG_TYPE_TRACING:
7578 	case BPF_PROG_TYPE_LSM:
7579 		return BTF_KFUNC_HOOK_TRACING;
7580 	case BPF_PROG_TYPE_SYSCALL:
7581 		return BTF_KFUNC_HOOK_SYSCALL;
7582 	default:
7583 		return BTF_KFUNC_HOOK_MAX;
7584 	}
7585 }
7586 
7587 /* Caution:
7588  * Reference to the module (obtained using btf_try_get_module) corresponding to
7589  * the struct btf *MUST* be held when calling this function from verifier
7590  * context. This is usually true as we stash references in prog's kfunc_btf_tab;
7591  * keeping the reference for the duration of the call provides the necessary
7592  * protection for looking up a well-formed btf->kfunc_set_tab.
7593  */
7594 u32 *btf_kfunc_id_set_contains(const struct btf *btf,
7595 			       enum bpf_prog_type prog_type,
7596 			       u32 kfunc_btf_id)
7597 {
7598 	enum btf_kfunc_hook hook;
7599 	u32 *kfunc_flags;
7600 
7601 	kfunc_flags = __btf_kfunc_id_set_contains(btf, BTF_KFUNC_HOOK_COMMON, kfunc_btf_id);
7602 	if (kfunc_flags)
7603 		return kfunc_flags;
7604 
7605 	hook = bpf_prog_type_to_kfunc_hook(prog_type);
7606 	return __btf_kfunc_id_set_contains(btf, hook, kfunc_btf_id);
7607 }
7608 
7609 /* This function must be invoked only from initcalls/module init functions */
7610 int register_btf_kfunc_id_set(enum bpf_prog_type prog_type,
7611 			      const struct btf_kfunc_id_set *kset)
7612 {
7613 	enum btf_kfunc_hook hook;
7614 	struct btf *btf;
7615 	int ret;
7616 
7617 	btf = btf_get_module_btf(kset->owner);
7618 	if (!btf) {
7619 		if (!kset->owner && IS_ENABLED(CONFIG_DEBUG_INFO_BTF)) {
7620 			pr_err("missing vmlinux BTF, cannot register kfuncs\n");
7621 			return -ENOENT;
7622 		}
7623 		if (kset->owner && IS_ENABLED(CONFIG_DEBUG_INFO_BTF_MODULES)) {
7624 			pr_err("missing module BTF, cannot register kfuncs\n");
7625 			return -ENOENT;
7626 		}
7627 		return 0;
7628 	}
7629 	if (IS_ERR(btf))
7630 		return PTR_ERR(btf);
7631 
7632 	hook = bpf_prog_type_to_kfunc_hook(prog_type);
7633 	ret = btf_populate_kfunc_set(btf, hook, kset->set);
7634 	btf_put(btf);
7635 	return ret;
7636 }
7637 EXPORT_SYMBOL_GPL(register_btf_kfunc_id_set);
7638 
7639 s32 btf_find_dtor_kfunc(struct btf *btf, u32 btf_id)
7640 {
7641 	struct btf_id_dtor_kfunc_tab *tab = btf->dtor_kfunc_tab;
7642 	struct btf_id_dtor_kfunc *dtor;
7643 
7644 	if (!tab)
7645 		return -ENOENT;
7646 	/* Even though the size of tab->dtors[0] is > sizeof(u32), we only need
7647 	 * to compare the first u32 with btf_id, so we can reuse btf_id_cmp_func.
7648 	 */
7649 	BUILD_BUG_ON(offsetof(struct btf_id_dtor_kfunc, btf_id) != 0);
7650 	dtor = bsearch(&btf_id, tab->dtors, tab->cnt, sizeof(tab->dtors[0]), btf_id_cmp_func);
7651 	if (!dtor)
7652 		return -ENOENT;
7653 	return dtor->kfunc_btf_id;
7654 }
7655 
7656 static int btf_check_dtor_kfuncs(struct btf *btf, const struct btf_id_dtor_kfunc *dtors, u32 cnt)
7657 {
7658 	const struct btf_type *dtor_func, *dtor_func_proto, *t;
7659 	const struct btf_param *args;
7660 	s32 dtor_btf_id;
7661 	u32 nr_args, i;
7662 
7663 	for (i = 0; i < cnt; i++) {
7664 		dtor_btf_id = dtors[i].kfunc_btf_id;
7665 
7666 		dtor_func = btf_type_by_id(btf, dtor_btf_id);
7667 		if (!dtor_func || !btf_type_is_func(dtor_func))
7668 			return -EINVAL;
7669 
7670 		dtor_func_proto = btf_type_by_id(btf, dtor_func->type);
7671 		if (!dtor_func_proto || !btf_type_is_func_proto(dtor_func_proto))
7672 			return -EINVAL;
7673 
7674 		/* Make sure the prototype of the destructor kfunc is 'void func(type *)' */
7675 		t = btf_type_by_id(btf, dtor_func_proto->type);
7676 		if (!t || !btf_type_is_void(t))
7677 			return -EINVAL;
7678 
7679 		nr_args = btf_type_vlen(dtor_func_proto);
7680 		if (nr_args != 1)
7681 			return -EINVAL;
7682 		args = btf_params(dtor_func_proto);
7683 		t = btf_type_by_id(btf, args[0].type);
7684 		/* Allow any pointer type, as width on targets Linux supports
7685 		 * will be same for all pointer types (i.e. sizeof(void *))
7686 		 */
7687 		if (!t || !btf_type_is_ptr(t))
7688 			return -EINVAL;
7689 	}
7690 	return 0;
7691 }
7692 
7693 /* This function must be invoked only from initcalls/module init functions */
7694 int register_btf_id_dtor_kfuncs(const struct btf_id_dtor_kfunc *dtors, u32 add_cnt,
7695 				struct module *owner)
7696 {
7697 	struct btf_id_dtor_kfunc_tab *tab;
7698 	struct btf *btf;
7699 	u32 tab_cnt;
7700 	int ret;
7701 
7702 	btf = btf_get_module_btf(owner);
7703 	if (!btf) {
7704 		if (!owner && IS_ENABLED(CONFIG_DEBUG_INFO_BTF)) {
7705 			pr_err("missing vmlinux BTF, cannot register dtor kfuncs\n");
7706 			return -ENOENT;
7707 		}
7708 		if (owner && IS_ENABLED(CONFIG_DEBUG_INFO_BTF_MODULES)) {
7709 			pr_err("missing module BTF, cannot register dtor kfuncs\n");
7710 			return -ENOENT;
7711 		}
7712 		return 0;
7713 	}
7714 	if (IS_ERR(btf))
7715 		return PTR_ERR(btf);
7716 
7717 	if (add_cnt >= BTF_DTOR_KFUNC_MAX_CNT) {
7718 		pr_err("cannot register more than %d kfunc destructors\n", BTF_DTOR_KFUNC_MAX_CNT);
7719 		ret = -E2BIG;
7720 		goto end;
7721 	}
7722 
7723 	/* Ensure that the prototype of dtor kfuncs being registered is sane */
7724 	ret = btf_check_dtor_kfuncs(btf, dtors, add_cnt);
7725 	if (ret < 0)
7726 		goto end;
7727 
7728 	tab = btf->dtor_kfunc_tab;
7729 	/* Only one call allowed for modules */
7730 	if (WARN_ON_ONCE(tab && btf_is_module(btf))) {
7731 		ret = -EINVAL;
7732 		goto end;
7733 	}
7734 
7735 	tab_cnt = tab ? tab->cnt : 0;
7736 	if (tab_cnt > U32_MAX - add_cnt) {
7737 		ret = -EOVERFLOW;
7738 		goto end;
7739 	}
7740 	if (tab_cnt + add_cnt >= BTF_DTOR_KFUNC_MAX_CNT) {
7741 		pr_err("cannot register more than %d kfunc destructors\n", BTF_DTOR_KFUNC_MAX_CNT);
7742 		ret = -E2BIG;
7743 		goto end;
7744 	}
7745 
7746 	tab = krealloc(btf->dtor_kfunc_tab,
7747 		       offsetof(struct btf_id_dtor_kfunc_tab, dtors[tab_cnt + add_cnt]),
7748 		       GFP_KERNEL | __GFP_NOWARN);
7749 	if (!tab) {
7750 		ret = -ENOMEM;
7751 		goto end;
7752 	}
7753 
7754 	if (!btf->dtor_kfunc_tab)
7755 		tab->cnt = 0;
7756 	btf->dtor_kfunc_tab = tab;
7757 
7758 	memcpy(tab->dtors + tab->cnt, dtors, add_cnt * sizeof(tab->dtors[0]));
7759 	tab->cnt += add_cnt;
7760 
7761 	sort(tab->dtors, tab->cnt, sizeof(tab->dtors[0]), btf_id_cmp_func, NULL);
7762 
7763 	return 0;
7764 end:
7765 	btf_free_dtor_kfunc_tab(btf);
7766 	btf_put(btf);
7767 	return ret;
7768 }
7769 EXPORT_SYMBOL_GPL(register_btf_id_dtor_kfuncs);
7770 
7771 #define MAX_TYPES_ARE_COMPAT_DEPTH 2
7772 
7773 /* Check local and target types for compatibility. This check is used for
7774  * type-based CO-RE relocations and follow slightly different rules than
7775  * field-based relocations. This function assumes that root types were already
7776  * checked for name match. Beyond that initial root-level name check, names
7777  * are completely ignored. Compatibility rules are as follows:
7778  *   - any two STRUCTs/UNIONs/FWDs/ENUMs/INTs/ENUM64s are considered compatible, but
7779  *     kind should match for local and target types (i.e., STRUCT is not
7780  *     compatible with UNION);
7781  *   - for ENUMs/ENUM64s, the size is ignored;
7782  *   - for INT, size and signedness are ignored;
7783  *   - for ARRAY, dimensionality is ignored, element types are checked for
7784  *     compatibility recursively;
7785  *   - CONST/VOLATILE/RESTRICT modifiers are ignored;
7786  *   - TYPEDEFs/PTRs are compatible if types they pointing to are compatible;
7787  *   - FUNC_PROTOs are compatible if they have compatible signature: same
7788  *     number of input args and compatible return and argument types.
7789  * These rules are not set in stone and probably will be adjusted as we get
7790  * more experience with using BPF CO-RE relocations.
7791  */
7792 int bpf_core_types_are_compat(const struct btf *local_btf, __u32 local_id,
7793 			      const struct btf *targ_btf, __u32 targ_id)
7794 {
7795 	return __bpf_core_types_are_compat(local_btf, local_id, targ_btf, targ_id,
7796 					   MAX_TYPES_ARE_COMPAT_DEPTH);
7797 }
7798 
7799 #define MAX_TYPES_MATCH_DEPTH 2
7800 
7801 int bpf_core_types_match(const struct btf *local_btf, u32 local_id,
7802 			 const struct btf *targ_btf, u32 targ_id)
7803 {
7804 	return __bpf_core_types_match(local_btf, local_id, targ_btf, targ_id, false,
7805 				      MAX_TYPES_MATCH_DEPTH);
7806 }
7807 
7808 static bool bpf_core_is_flavor_sep(const char *s)
7809 {
7810 	/* check X___Y name pattern, where X and Y are not underscores */
7811 	return s[0] != '_' &&				      /* X */
7812 	       s[1] == '_' && s[2] == '_' && s[3] == '_' &&   /* ___ */
7813 	       s[4] != '_';				      /* Y */
7814 }
7815 
7816 size_t bpf_core_essential_name_len(const char *name)
7817 {
7818 	size_t n = strlen(name);
7819 	int i;
7820 
7821 	for (i = n - 5; i >= 0; i--) {
7822 		if (bpf_core_is_flavor_sep(name + i))
7823 			return i + 1;
7824 	}
7825 	return n;
7826 }
7827 
7828 struct bpf_cand_cache {
7829 	const char *name;
7830 	u32 name_len;
7831 	u16 kind;
7832 	u16 cnt;
7833 	struct {
7834 		const struct btf *btf;
7835 		u32 id;
7836 	} cands[];
7837 };
7838 
7839 static void bpf_free_cands(struct bpf_cand_cache *cands)
7840 {
7841 	if (!cands->cnt)
7842 		/* empty candidate array was allocated on stack */
7843 		return;
7844 	kfree(cands);
7845 }
7846 
7847 static void bpf_free_cands_from_cache(struct bpf_cand_cache *cands)
7848 {
7849 	kfree(cands->name);
7850 	kfree(cands);
7851 }
7852 
7853 #define VMLINUX_CAND_CACHE_SIZE 31
7854 static struct bpf_cand_cache *vmlinux_cand_cache[VMLINUX_CAND_CACHE_SIZE];
7855 
7856 #define MODULE_CAND_CACHE_SIZE 31
7857 static struct bpf_cand_cache *module_cand_cache[MODULE_CAND_CACHE_SIZE];
7858 
7859 static DEFINE_MUTEX(cand_cache_mutex);
7860 
7861 static void __print_cand_cache(struct bpf_verifier_log *log,
7862 			       struct bpf_cand_cache **cache,
7863 			       int cache_size)
7864 {
7865 	struct bpf_cand_cache *cc;
7866 	int i, j;
7867 
7868 	for (i = 0; i < cache_size; i++) {
7869 		cc = cache[i];
7870 		if (!cc)
7871 			continue;
7872 		bpf_log(log, "[%d]%s(", i, cc->name);
7873 		for (j = 0; j < cc->cnt; j++) {
7874 			bpf_log(log, "%d", cc->cands[j].id);
7875 			if (j < cc->cnt - 1)
7876 				bpf_log(log, " ");
7877 		}
7878 		bpf_log(log, "), ");
7879 	}
7880 }
7881 
7882 static void print_cand_cache(struct bpf_verifier_log *log)
7883 {
7884 	mutex_lock(&cand_cache_mutex);
7885 	bpf_log(log, "vmlinux_cand_cache:");
7886 	__print_cand_cache(log, vmlinux_cand_cache, VMLINUX_CAND_CACHE_SIZE);
7887 	bpf_log(log, "\nmodule_cand_cache:");
7888 	__print_cand_cache(log, module_cand_cache, MODULE_CAND_CACHE_SIZE);
7889 	bpf_log(log, "\n");
7890 	mutex_unlock(&cand_cache_mutex);
7891 }
7892 
7893 static u32 hash_cands(struct bpf_cand_cache *cands)
7894 {
7895 	return jhash(cands->name, cands->name_len, 0);
7896 }
7897 
7898 static struct bpf_cand_cache *check_cand_cache(struct bpf_cand_cache *cands,
7899 					       struct bpf_cand_cache **cache,
7900 					       int cache_size)
7901 {
7902 	struct bpf_cand_cache *cc = cache[hash_cands(cands) % cache_size];
7903 
7904 	if (cc && cc->name_len == cands->name_len &&
7905 	    !strncmp(cc->name, cands->name, cands->name_len))
7906 		return cc;
7907 	return NULL;
7908 }
7909 
7910 static size_t sizeof_cands(int cnt)
7911 {
7912 	return offsetof(struct bpf_cand_cache, cands[cnt]);
7913 }
7914 
7915 static struct bpf_cand_cache *populate_cand_cache(struct bpf_cand_cache *cands,
7916 						  struct bpf_cand_cache **cache,
7917 						  int cache_size)
7918 {
7919 	struct bpf_cand_cache **cc = &cache[hash_cands(cands) % cache_size], *new_cands;
7920 
7921 	if (*cc) {
7922 		bpf_free_cands_from_cache(*cc);
7923 		*cc = NULL;
7924 	}
7925 	new_cands = kmemdup(cands, sizeof_cands(cands->cnt), GFP_KERNEL);
7926 	if (!new_cands) {
7927 		bpf_free_cands(cands);
7928 		return ERR_PTR(-ENOMEM);
7929 	}
7930 	/* strdup the name, since it will stay in cache.
7931 	 * the cands->name points to strings in prog's BTF and the prog can be unloaded.
7932 	 */
7933 	new_cands->name = kmemdup_nul(cands->name, cands->name_len, GFP_KERNEL);
7934 	bpf_free_cands(cands);
7935 	if (!new_cands->name) {
7936 		kfree(new_cands);
7937 		return ERR_PTR(-ENOMEM);
7938 	}
7939 	*cc = new_cands;
7940 	return new_cands;
7941 }
7942 
7943 #ifdef CONFIG_DEBUG_INFO_BTF_MODULES
7944 static void __purge_cand_cache(struct btf *btf, struct bpf_cand_cache **cache,
7945 			       int cache_size)
7946 {
7947 	struct bpf_cand_cache *cc;
7948 	int i, j;
7949 
7950 	for (i = 0; i < cache_size; i++) {
7951 		cc = cache[i];
7952 		if (!cc)
7953 			continue;
7954 		if (!btf) {
7955 			/* when new module is loaded purge all of module_cand_cache,
7956 			 * since new module might have candidates with the name
7957 			 * that matches cached cands.
7958 			 */
7959 			bpf_free_cands_from_cache(cc);
7960 			cache[i] = NULL;
7961 			continue;
7962 		}
7963 		/* when module is unloaded purge cache entries
7964 		 * that match module's btf
7965 		 */
7966 		for (j = 0; j < cc->cnt; j++)
7967 			if (cc->cands[j].btf == btf) {
7968 				bpf_free_cands_from_cache(cc);
7969 				cache[i] = NULL;
7970 				break;
7971 			}
7972 	}
7973 
7974 }
7975 
7976 static void purge_cand_cache(struct btf *btf)
7977 {
7978 	mutex_lock(&cand_cache_mutex);
7979 	__purge_cand_cache(btf, module_cand_cache, MODULE_CAND_CACHE_SIZE);
7980 	mutex_unlock(&cand_cache_mutex);
7981 }
7982 #endif
7983 
7984 static struct bpf_cand_cache *
7985 bpf_core_add_cands(struct bpf_cand_cache *cands, const struct btf *targ_btf,
7986 		   int targ_start_id)
7987 {
7988 	struct bpf_cand_cache *new_cands;
7989 	const struct btf_type *t;
7990 	const char *targ_name;
7991 	size_t targ_essent_len;
7992 	int n, i;
7993 
7994 	n = btf_nr_types(targ_btf);
7995 	for (i = targ_start_id; i < n; i++) {
7996 		t = btf_type_by_id(targ_btf, i);
7997 		if (btf_kind(t) != cands->kind)
7998 			continue;
7999 
8000 		targ_name = btf_name_by_offset(targ_btf, t->name_off);
8001 		if (!targ_name)
8002 			continue;
8003 
8004 		/* the resched point is before strncmp to make sure that search
8005 		 * for non-existing name will have a chance to schedule().
8006 		 */
8007 		cond_resched();
8008 
8009 		if (strncmp(cands->name, targ_name, cands->name_len) != 0)
8010 			continue;
8011 
8012 		targ_essent_len = bpf_core_essential_name_len(targ_name);
8013 		if (targ_essent_len != cands->name_len)
8014 			continue;
8015 
8016 		/* most of the time there is only one candidate for a given kind+name pair */
8017 		new_cands = kmalloc(sizeof_cands(cands->cnt + 1), GFP_KERNEL);
8018 		if (!new_cands) {
8019 			bpf_free_cands(cands);
8020 			return ERR_PTR(-ENOMEM);
8021 		}
8022 
8023 		memcpy(new_cands, cands, sizeof_cands(cands->cnt));
8024 		bpf_free_cands(cands);
8025 		cands = new_cands;
8026 		cands->cands[cands->cnt].btf = targ_btf;
8027 		cands->cands[cands->cnt].id = i;
8028 		cands->cnt++;
8029 	}
8030 	return cands;
8031 }
8032 
8033 static struct bpf_cand_cache *
8034 bpf_core_find_cands(struct bpf_core_ctx *ctx, u32 local_type_id)
8035 {
8036 	struct bpf_cand_cache *cands, *cc, local_cand = {};
8037 	const struct btf *local_btf = ctx->btf;
8038 	const struct btf_type *local_type;
8039 	const struct btf *main_btf;
8040 	size_t local_essent_len;
8041 	struct btf *mod_btf;
8042 	const char *name;
8043 	int id;
8044 
8045 	main_btf = bpf_get_btf_vmlinux();
8046 	if (IS_ERR(main_btf))
8047 		return ERR_CAST(main_btf);
8048 	if (!main_btf)
8049 		return ERR_PTR(-EINVAL);
8050 
8051 	local_type = btf_type_by_id(local_btf, local_type_id);
8052 	if (!local_type)
8053 		return ERR_PTR(-EINVAL);
8054 
8055 	name = btf_name_by_offset(local_btf, local_type->name_off);
8056 	if (str_is_empty(name))
8057 		return ERR_PTR(-EINVAL);
8058 	local_essent_len = bpf_core_essential_name_len(name);
8059 
8060 	cands = &local_cand;
8061 	cands->name = name;
8062 	cands->kind = btf_kind(local_type);
8063 	cands->name_len = local_essent_len;
8064 
8065 	cc = check_cand_cache(cands, vmlinux_cand_cache, VMLINUX_CAND_CACHE_SIZE);
8066 	/* cands is a pointer to stack here */
8067 	if (cc) {
8068 		if (cc->cnt)
8069 			return cc;
8070 		goto check_modules;
8071 	}
8072 
8073 	/* Attempt to find target candidates in vmlinux BTF first */
8074 	cands = bpf_core_add_cands(cands, main_btf, 1);
8075 	if (IS_ERR(cands))
8076 		return ERR_CAST(cands);
8077 
8078 	/* cands is a pointer to kmalloced memory here if cands->cnt > 0 */
8079 
8080 	/* populate cache even when cands->cnt == 0 */
8081 	cc = populate_cand_cache(cands, vmlinux_cand_cache, VMLINUX_CAND_CACHE_SIZE);
8082 	if (IS_ERR(cc))
8083 		return ERR_CAST(cc);
8084 
8085 	/* if vmlinux BTF has any candidate, don't go for module BTFs */
8086 	if (cc->cnt)
8087 		return cc;
8088 
8089 check_modules:
8090 	/* cands is a pointer to stack here and cands->cnt == 0 */
8091 	cc = check_cand_cache(cands, module_cand_cache, MODULE_CAND_CACHE_SIZE);
8092 	if (cc)
8093 		/* if cache has it return it even if cc->cnt == 0 */
8094 		return cc;
8095 
8096 	/* If candidate is not found in vmlinux's BTF then search in module's BTFs */
8097 	spin_lock_bh(&btf_idr_lock);
8098 	idr_for_each_entry(&btf_idr, mod_btf, id) {
8099 		if (!btf_is_module(mod_btf))
8100 			continue;
8101 		/* linear search could be slow hence unlock/lock
8102 		 * the IDR to avoiding holding it for too long
8103 		 */
8104 		btf_get(mod_btf);
8105 		spin_unlock_bh(&btf_idr_lock);
8106 		cands = bpf_core_add_cands(cands, mod_btf, btf_nr_types(main_btf));
8107 		if (IS_ERR(cands)) {
8108 			btf_put(mod_btf);
8109 			return ERR_CAST(cands);
8110 		}
8111 		spin_lock_bh(&btf_idr_lock);
8112 		btf_put(mod_btf);
8113 	}
8114 	spin_unlock_bh(&btf_idr_lock);
8115 	/* cands is a pointer to kmalloced memory here if cands->cnt > 0
8116 	 * or pointer to stack if cands->cnd == 0.
8117 	 * Copy it into the cache even when cands->cnt == 0 and
8118 	 * return the result.
8119 	 */
8120 	return populate_cand_cache(cands, module_cand_cache, MODULE_CAND_CACHE_SIZE);
8121 }
8122 
8123 int bpf_core_apply(struct bpf_core_ctx *ctx, const struct bpf_core_relo *relo,
8124 		   int relo_idx, void *insn)
8125 {
8126 	bool need_cands = relo->kind != BPF_CORE_TYPE_ID_LOCAL;
8127 	struct bpf_core_cand_list cands = {};
8128 	struct bpf_core_relo_res targ_res;
8129 	struct bpf_core_spec *specs;
8130 	int err;
8131 
8132 	/* ~4k of temp memory necessary to convert LLVM spec like "0:1:0:5"
8133 	 * into arrays of btf_ids of struct fields and array indices.
8134 	 */
8135 	specs = kcalloc(3, sizeof(*specs), GFP_KERNEL);
8136 	if (!specs)
8137 		return -ENOMEM;
8138 
8139 	if (need_cands) {
8140 		struct bpf_cand_cache *cc;
8141 		int i;
8142 
8143 		mutex_lock(&cand_cache_mutex);
8144 		cc = bpf_core_find_cands(ctx, relo->type_id);
8145 		if (IS_ERR(cc)) {
8146 			bpf_log(ctx->log, "target candidate search failed for %d\n",
8147 				relo->type_id);
8148 			err = PTR_ERR(cc);
8149 			goto out;
8150 		}
8151 		if (cc->cnt) {
8152 			cands.cands = kcalloc(cc->cnt, sizeof(*cands.cands), GFP_KERNEL);
8153 			if (!cands.cands) {
8154 				err = -ENOMEM;
8155 				goto out;
8156 			}
8157 		}
8158 		for (i = 0; i < cc->cnt; i++) {
8159 			bpf_log(ctx->log,
8160 				"CO-RE relocating %s %s: found target candidate [%d]\n",
8161 				btf_kind_str[cc->kind], cc->name, cc->cands[i].id);
8162 			cands.cands[i].btf = cc->cands[i].btf;
8163 			cands.cands[i].id = cc->cands[i].id;
8164 		}
8165 		cands.len = cc->cnt;
8166 		/* cand_cache_mutex needs to span the cache lookup and
8167 		 * copy of btf pointer into bpf_core_cand_list,
8168 		 * since module can be unloaded while bpf_core_calc_relo_insn
8169 		 * is working with module's btf.
8170 		 */
8171 	}
8172 
8173 	err = bpf_core_calc_relo_insn((void *)ctx->log, relo, relo_idx, ctx->btf, &cands, specs,
8174 				      &targ_res);
8175 	if (err)
8176 		goto out;
8177 
8178 	err = bpf_core_patch_insn((void *)ctx->log, insn, relo->insn_off / 8, relo, relo_idx,
8179 				  &targ_res);
8180 
8181 out:
8182 	kfree(specs);
8183 	if (need_cands) {
8184 		kfree(cands.cands);
8185 		mutex_unlock(&cand_cache_mutex);
8186 		if (ctx->log->level & BPF_LOG_LEVEL2)
8187 			print_cand_cache(ctx->log);
8188 	}
8189 	return err;
8190 }
8191