xref: /linux/kernel/bpf/btf.c (revision 2c63221cd9e5c0dad0424029aeb1c40faada8330)
1 /* SPDX-License-Identifier: GPL-2.0 */
2 /* Copyright (c) 2018 Facebook */
3 
4 #include <uapi/linux/btf.h>
5 #include <uapi/linux/types.h>
6 #include <linux/seq_file.h>
7 #include <linux/compiler.h>
8 #include <linux/ctype.h>
9 #include <linux/errno.h>
10 #include <linux/slab.h>
11 #include <linux/anon_inodes.h>
12 #include <linux/file.h>
13 #include <linux/uaccess.h>
14 #include <linux/kernel.h>
15 #include <linux/idr.h>
16 #include <linux/sort.h>
17 #include <linux/bpf_verifier.h>
18 #include <linux/btf.h>
19 
20 /* BTF (BPF Type Format) is the meta data format which describes
21  * the data types of BPF program/map.  Hence, it basically focus
22  * on the C programming language which the modern BPF is primary
23  * using.
24  *
25  * ELF Section:
26  * ~~~~~~~~~~~
27  * The BTF data is stored under the ".BTF" ELF section
28  *
29  * struct btf_type:
30  * ~~~~~~~~~~~~~~~
31  * Each 'struct btf_type' object describes a C data type.
32  * Depending on the type it is describing, a 'struct btf_type'
33  * object may be followed by more data.  F.e.
34  * To describe an array, 'struct btf_type' is followed by
35  * 'struct btf_array'.
36  *
37  * 'struct btf_type' and any extra data following it are
38  * 4 bytes aligned.
39  *
40  * Type section:
41  * ~~~~~~~~~~~~~
42  * The BTF type section contains a list of 'struct btf_type' objects.
43  * Each one describes a C type.  Recall from the above section
44  * that a 'struct btf_type' object could be immediately followed by extra
45  * data in order to desribe some particular C types.
46  *
47  * type_id:
48  * ~~~~~~~
49  * Each btf_type object is identified by a type_id.  The type_id
50  * is implicitly implied by the location of the btf_type object in
51  * the BTF type section.  The first one has type_id 1.  The second
52  * one has type_id 2...etc.  Hence, an earlier btf_type has
53  * a smaller type_id.
54  *
55  * A btf_type object may refer to another btf_type object by using
56  * type_id (i.e. the "type" in the "struct btf_type").
57  *
58  * NOTE that we cannot assume any reference-order.
59  * A btf_type object can refer to an earlier btf_type object
60  * but it can also refer to a later btf_type object.
61  *
62  * For example, to describe "const void *".  A btf_type
63  * object describing "const" may refer to another btf_type
64  * object describing "void *".  This type-reference is done
65  * by specifying type_id:
66  *
67  * [1] CONST (anon) type_id=2
68  * [2] PTR (anon) type_id=0
69  *
70  * The above is the btf_verifier debug log:
71  *   - Each line started with "[?]" is a btf_type object
72  *   - [?] is the type_id of the btf_type object.
73  *   - CONST/PTR is the BTF_KIND_XXX
74  *   - "(anon)" is the name of the type.  It just
75  *     happens that CONST and PTR has no name.
76  *   - type_id=XXX is the 'u32 type' in btf_type
77  *
78  * NOTE: "void" has type_id 0
79  *
80  * String section:
81  * ~~~~~~~~~~~~~~
82  * The BTF string section contains the names used by the type section.
83  * Each string is referred by an "offset" from the beginning of the
84  * string section.
85  *
86  * Each string is '\0' terminated.
87  *
88  * The first character in the string section must be '\0'
89  * which is used to mean 'anonymous'. Some btf_type may not
90  * have a name.
91  */
92 
93 /* BTF verification:
94  *
95  * To verify BTF data, two passes are needed.
96  *
97  * Pass #1
98  * ~~~~~~~
99  * The first pass is to collect all btf_type objects to
100  * an array: "btf->types".
101  *
102  * Depending on the C type that a btf_type is describing,
103  * a btf_type may be followed by extra data.  We don't know
104  * how many btf_type is there, and more importantly we don't
105  * know where each btf_type is located in the type section.
106  *
107  * Without knowing the location of each type_id, most verifications
108  * cannot be done.  e.g. an earlier btf_type may refer to a later
109  * btf_type (recall the "const void *" above), so we cannot
110  * check this type-reference in the first pass.
111  *
112  * In the first pass, it still does some verifications (e.g.
113  * checking the name is a valid offset to the string section).
114  *
115  * Pass #2
116  * ~~~~~~~
117  * The main focus is to resolve a btf_type that is referring
118  * to another type.
119  *
120  * We have to ensure the referring type:
121  * 1) does exist in the BTF (i.e. in btf->types[])
122  * 2) does not cause a loop:
123  *	struct A {
124  *		struct B b;
125  *	};
126  *
127  *	struct B {
128  *		struct A a;
129  *	};
130  *
131  * btf_type_needs_resolve() decides if a btf_type needs
132  * to be resolved.
133  *
134  * The needs_resolve type implements the "resolve()" ops which
135  * essentially does a DFS and detects backedge.
136  *
137  * During resolve (or DFS), different C types have different
138  * "RESOLVED" conditions.
139  *
140  * When resolving a BTF_KIND_STRUCT, we need to resolve all its
141  * members because a member is always referring to another
142  * type.  A struct's member can be treated as "RESOLVED" if
143  * it is referring to a BTF_KIND_PTR.  Otherwise, the
144  * following valid C struct would be rejected:
145  *
146  *	struct A {
147  *		int m;
148  *		struct A *a;
149  *	};
150  *
151  * When resolving a BTF_KIND_PTR, it needs to keep resolving if
152  * it is referring to another BTF_KIND_PTR.  Otherwise, we cannot
153  * detect a pointer loop, e.g.:
154  * BTF_KIND_CONST -> BTF_KIND_PTR -> BTF_KIND_CONST -> BTF_KIND_PTR +
155  *                        ^                                         |
156  *                        +-----------------------------------------+
157  *
158  */
159 
160 #define BITS_PER_U128 (sizeof(u64) * BITS_PER_BYTE * 2)
161 #define BITS_PER_BYTE_MASK (BITS_PER_BYTE - 1)
162 #define BITS_PER_BYTE_MASKED(bits) ((bits) & BITS_PER_BYTE_MASK)
163 #define BITS_ROUNDDOWN_BYTES(bits) ((bits) >> 3)
164 #define BITS_ROUNDUP_BYTES(bits) \
165 	(BITS_ROUNDDOWN_BYTES(bits) + !!BITS_PER_BYTE_MASKED(bits))
166 
167 #define BTF_INFO_MASK 0x8f00ffff
168 #define BTF_INT_MASK 0x0fffffff
169 #define BTF_TYPE_ID_VALID(type_id) ((type_id) <= BTF_MAX_TYPE)
170 #define BTF_STR_OFFSET_VALID(name_off) ((name_off) <= BTF_MAX_NAME_OFFSET)
171 
172 /* 16MB for 64k structs and each has 16 members and
173  * a few MB spaces for the string section.
174  * The hard limit is S32_MAX.
175  */
176 #define BTF_MAX_SIZE (16 * 1024 * 1024)
177 
178 #define for_each_member(i, struct_type, member)			\
179 	for (i = 0, member = btf_type_member(struct_type);	\
180 	     i < btf_type_vlen(struct_type);			\
181 	     i++, member++)
182 
183 #define for_each_member_from(i, from, struct_type, member)		\
184 	for (i = from, member = btf_type_member(struct_type) + from;	\
185 	     i < btf_type_vlen(struct_type);				\
186 	     i++, member++)
187 
188 #define for_each_vsi(i, struct_type, member)			\
189 	for (i = 0, member = btf_type_var_secinfo(struct_type);	\
190 	     i < btf_type_vlen(struct_type);			\
191 	     i++, member++)
192 
193 #define for_each_vsi_from(i, from, struct_type, member)				\
194 	for (i = from, member = btf_type_var_secinfo(struct_type) + from;	\
195 	     i < btf_type_vlen(struct_type);					\
196 	     i++, member++)
197 
198 DEFINE_IDR(btf_idr);
199 DEFINE_SPINLOCK(btf_idr_lock);
200 
201 struct btf {
202 	void *data;
203 	struct btf_type **types;
204 	u32 *resolved_ids;
205 	u32 *resolved_sizes;
206 	const char *strings;
207 	void *nohdr_data;
208 	struct btf_header hdr;
209 	u32 nr_types;
210 	u32 types_size;
211 	u32 data_size;
212 	refcount_t refcnt;
213 	u32 id;
214 	struct rcu_head rcu;
215 };
216 
217 enum verifier_phase {
218 	CHECK_META,
219 	CHECK_TYPE,
220 };
221 
222 struct resolve_vertex {
223 	const struct btf_type *t;
224 	u32 type_id;
225 	u16 next_member;
226 };
227 
228 enum visit_state {
229 	NOT_VISITED,
230 	VISITED,
231 	RESOLVED,
232 };
233 
234 enum resolve_mode {
235 	RESOLVE_TBD,	/* To Be Determined */
236 	RESOLVE_PTR,	/* Resolving for Pointer */
237 	RESOLVE_STRUCT_OR_ARRAY,	/* Resolving for struct/union
238 					 * or array
239 					 */
240 };
241 
242 #define MAX_RESOLVE_DEPTH 32
243 
244 struct btf_sec_info {
245 	u32 off;
246 	u32 len;
247 };
248 
249 struct btf_verifier_env {
250 	struct btf *btf;
251 	u8 *visit_states;
252 	struct resolve_vertex stack[MAX_RESOLVE_DEPTH];
253 	struct bpf_verifier_log log;
254 	u32 log_type_id;
255 	u32 top_stack;
256 	enum verifier_phase phase;
257 	enum resolve_mode resolve_mode;
258 };
259 
260 static const char * const btf_kind_str[NR_BTF_KINDS] = {
261 	[BTF_KIND_UNKN]		= "UNKNOWN",
262 	[BTF_KIND_INT]		= "INT",
263 	[BTF_KIND_PTR]		= "PTR",
264 	[BTF_KIND_ARRAY]	= "ARRAY",
265 	[BTF_KIND_STRUCT]	= "STRUCT",
266 	[BTF_KIND_UNION]	= "UNION",
267 	[BTF_KIND_ENUM]		= "ENUM",
268 	[BTF_KIND_FWD]		= "FWD",
269 	[BTF_KIND_TYPEDEF]	= "TYPEDEF",
270 	[BTF_KIND_VOLATILE]	= "VOLATILE",
271 	[BTF_KIND_CONST]	= "CONST",
272 	[BTF_KIND_RESTRICT]	= "RESTRICT",
273 	[BTF_KIND_FUNC]		= "FUNC",
274 	[BTF_KIND_FUNC_PROTO]	= "FUNC_PROTO",
275 	[BTF_KIND_VAR]		= "VAR",
276 	[BTF_KIND_DATASEC]	= "DATASEC",
277 };
278 
279 struct btf_kind_operations {
280 	s32 (*check_meta)(struct btf_verifier_env *env,
281 			  const struct btf_type *t,
282 			  u32 meta_left);
283 	int (*resolve)(struct btf_verifier_env *env,
284 		       const struct resolve_vertex *v);
285 	int (*check_member)(struct btf_verifier_env *env,
286 			    const struct btf_type *struct_type,
287 			    const struct btf_member *member,
288 			    const struct btf_type *member_type);
289 	int (*check_kflag_member)(struct btf_verifier_env *env,
290 				  const struct btf_type *struct_type,
291 				  const struct btf_member *member,
292 				  const struct btf_type *member_type);
293 	void (*log_details)(struct btf_verifier_env *env,
294 			    const struct btf_type *t);
295 	void (*seq_show)(const struct btf *btf, const struct btf_type *t,
296 			 u32 type_id, void *data, u8 bits_offsets,
297 			 struct seq_file *m);
298 };
299 
300 static const struct btf_kind_operations * const kind_ops[NR_BTF_KINDS];
301 static struct btf_type btf_void;
302 
303 static int btf_resolve(struct btf_verifier_env *env,
304 		       const struct btf_type *t, u32 type_id);
305 
306 static bool btf_type_is_modifier(const struct btf_type *t)
307 {
308 	/* Some of them is not strictly a C modifier
309 	 * but they are grouped into the same bucket
310 	 * for BTF concern:
311 	 *   A type (t) that refers to another
312 	 *   type through t->type AND its size cannot
313 	 *   be determined without following the t->type.
314 	 *
315 	 * ptr does not fall into this bucket
316 	 * because its size is always sizeof(void *).
317 	 */
318 	switch (BTF_INFO_KIND(t->info)) {
319 	case BTF_KIND_TYPEDEF:
320 	case BTF_KIND_VOLATILE:
321 	case BTF_KIND_CONST:
322 	case BTF_KIND_RESTRICT:
323 		return true;
324 	}
325 
326 	return false;
327 }
328 
329 bool btf_type_is_void(const struct btf_type *t)
330 {
331 	return t == &btf_void;
332 }
333 
334 static bool btf_type_is_fwd(const struct btf_type *t)
335 {
336 	return BTF_INFO_KIND(t->info) == BTF_KIND_FWD;
337 }
338 
339 static bool btf_type_nosize(const struct btf_type *t)
340 {
341 	return btf_type_is_void(t) || btf_type_is_fwd(t) ||
342 	       btf_type_is_func(t) || btf_type_is_func_proto(t);
343 }
344 
345 static bool btf_type_nosize_or_null(const struct btf_type *t)
346 {
347 	return !t || btf_type_nosize(t);
348 }
349 
350 /* union is only a special case of struct:
351  * all its offsetof(member) == 0
352  */
353 static bool btf_type_is_struct(const struct btf_type *t)
354 {
355 	u8 kind = BTF_INFO_KIND(t->info);
356 
357 	return kind == BTF_KIND_STRUCT || kind == BTF_KIND_UNION;
358 }
359 
360 static bool __btf_type_is_struct(const struct btf_type *t)
361 {
362 	return BTF_INFO_KIND(t->info) == BTF_KIND_STRUCT;
363 }
364 
365 static bool btf_type_is_array(const struct btf_type *t)
366 {
367 	return BTF_INFO_KIND(t->info) == BTF_KIND_ARRAY;
368 }
369 
370 static bool btf_type_is_var(const struct btf_type *t)
371 {
372 	return BTF_INFO_KIND(t->info) == BTF_KIND_VAR;
373 }
374 
375 static bool btf_type_is_datasec(const struct btf_type *t)
376 {
377 	return BTF_INFO_KIND(t->info) == BTF_KIND_DATASEC;
378 }
379 
380 /* Types that act only as a source, not sink or intermediate
381  * type when resolving.
382  */
383 static bool btf_type_is_resolve_source_only(const struct btf_type *t)
384 {
385 	return btf_type_is_var(t) ||
386 	       btf_type_is_datasec(t);
387 }
388 
389 /* What types need to be resolved?
390  *
391  * btf_type_is_modifier() is an obvious one.
392  *
393  * btf_type_is_struct() because its member refers to
394  * another type (through member->type).
395  *
396  * btf_type_is_var() because the variable refers to
397  * another type. btf_type_is_datasec() holds multiple
398  * btf_type_is_var() types that need resolving.
399  *
400  * btf_type_is_array() because its element (array->type)
401  * refers to another type.  Array can be thought of a
402  * special case of struct while array just has the same
403  * member-type repeated by array->nelems of times.
404  */
405 static bool btf_type_needs_resolve(const struct btf_type *t)
406 {
407 	return btf_type_is_modifier(t) ||
408 	       btf_type_is_ptr(t) ||
409 	       btf_type_is_struct(t) ||
410 	       btf_type_is_array(t) ||
411 	       btf_type_is_var(t) ||
412 	       btf_type_is_datasec(t);
413 }
414 
415 /* t->size can be used */
416 static bool btf_type_has_size(const struct btf_type *t)
417 {
418 	switch (BTF_INFO_KIND(t->info)) {
419 	case BTF_KIND_INT:
420 	case BTF_KIND_STRUCT:
421 	case BTF_KIND_UNION:
422 	case BTF_KIND_ENUM:
423 	case BTF_KIND_DATASEC:
424 		return true;
425 	}
426 
427 	return false;
428 }
429 
430 static const char *btf_int_encoding_str(u8 encoding)
431 {
432 	if (encoding == 0)
433 		return "(none)";
434 	else if (encoding == BTF_INT_SIGNED)
435 		return "SIGNED";
436 	else if (encoding == BTF_INT_CHAR)
437 		return "CHAR";
438 	else if (encoding == BTF_INT_BOOL)
439 		return "BOOL";
440 	else
441 		return "UNKN";
442 }
443 
444 static u16 btf_type_vlen(const struct btf_type *t)
445 {
446 	return BTF_INFO_VLEN(t->info);
447 }
448 
449 static bool btf_type_kflag(const struct btf_type *t)
450 {
451 	return BTF_INFO_KFLAG(t->info);
452 }
453 
454 static u32 btf_member_bit_offset(const struct btf_type *struct_type,
455 			     const struct btf_member *member)
456 {
457 	return btf_type_kflag(struct_type) ? BTF_MEMBER_BIT_OFFSET(member->offset)
458 					   : member->offset;
459 }
460 
461 static u32 btf_member_bitfield_size(const struct btf_type *struct_type,
462 				    const struct btf_member *member)
463 {
464 	return btf_type_kflag(struct_type) ? BTF_MEMBER_BITFIELD_SIZE(member->offset)
465 					   : 0;
466 }
467 
468 static u32 btf_type_int(const struct btf_type *t)
469 {
470 	return *(u32 *)(t + 1);
471 }
472 
473 static const struct btf_array *btf_type_array(const struct btf_type *t)
474 {
475 	return (const struct btf_array *)(t + 1);
476 }
477 
478 static const struct btf_member *btf_type_member(const struct btf_type *t)
479 {
480 	return (const struct btf_member *)(t + 1);
481 }
482 
483 static const struct btf_enum *btf_type_enum(const struct btf_type *t)
484 {
485 	return (const struct btf_enum *)(t + 1);
486 }
487 
488 static const struct btf_var *btf_type_var(const struct btf_type *t)
489 {
490 	return (const struct btf_var *)(t + 1);
491 }
492 
493 static const struct btf_var_secinfo *btf_type_var_secinfo(const struct btf_type *t)
494 {
495 	return (const struct btf_var_secinfo *)(t + 1);
496 }
497 
498 static const struct btf_kind_operations *btf_type_ops(const struct btf_type *t)
499 {
500 	return kind_ops[BTF_INFO_KIND(t->info)];
501 }
502 
503 static bool btf_name_offset_valid(const struct btf *btf, u32 offset)
504 {
505 	return BTF_STR_OFFSET_VALID(offset) &&
506 		offset < btf->hdr.str_len;
507 }
508 
509 static bool __btf_name_char_ok(char c, bool first, bool dot_ok)
510 {
511 	if ((first ? !isalpha(c) :
512 		     !isalnum(c)) &&
513 	    c != '_' &&
514 	    ((c == '.' && !dot_ok) ||
515 	      c != '.'))
516 		return false;
517 	return true;
518 }
519 
520 static bool __btf_name_valid(const struct btf *btf, u32 offset, bool dot_ok)
521 {
522 	/* offset must be valid */
523 	const char *src = &btf->strings[offset];
524 	const char *src_limit;
525 
526 	if (!__btf_name_char_ok(*src, true, dot_ok))
527 		return false;
528 
529 	/* set a limit on identifier length */
530 	src_limit = src + KSYM_NAME_LEN;
531 	src++;
532 	while (*src && src < src_limit) {
533 		if (!__btf_name_char_ok(*src, false, dot_ok))
534 			return false;
535 		src++;
536 	}
537 
538 	return !*src;
539 }
540 
541 /* Only C-style identifier is permitted. This can be relaxed if
542  * necessary.
543  */
544 static bool btf_name_valid_identifier(const struct btf *btf, u32 offset)
545 {
546 	return __btf_name_valid(btf, offset, false);
547 }
548 
549 static bool btf_name_valid_section(const struct btf *btf, u32 offset)
550 {
551 	return __btf_name_valid(btf, offset, true);
552 }
553 
554 static const char *__btf_name_by_offset(const struct btf *btf, u32 offset)
555 {
556 	if (!offset)
557 		return "(anon)";
558 	else if (offset < btf->hdr.str_len)
559 		return &btf->strings[offset];
560 	else
561 		return "(invalid-name-offset)";
562 }
563 
564 const char *btf_name_by_offset(const struct btf *btf, u32 offset)
565 {
566 	if (offset < btf->hdr.str_len)
567 		return &btf->strings[offset];
568 
569 	return NULL;
570 }
571 
572 const struct btf_type *btf_type_by_id(const struct btf *btf, u32 type_id)
573 {
574 	if (type_id > btf->nr_types)
575 		return NULL;
576 
577 	return btf->types[type_id];
578 }
579 
580 /*
581  * Regular int is not a bit field and it must be either
582  * u8/u16/u32/u64 or __int128.
583  */
584 static bool btf_type_int_is_regular(const struct btf_type *t)
585 {
586 	u8 nr_bits, nr_bytes;
587 	u32 int_data;
588 
589 	int_data = btf_type_int(t);
590 	nr_bits = BTF_INT_BITS(int_data);
591 	nr_bytes = BITS_ROUNDUP_BYTES(nr_bits);
592 	if (BITS_PER_BYTE_MASKED(nr_bits) ||
593 	    BTF_INT_OFFSET(int_data) ||
594 	    (nr_bytes != sizeof(u8) && nr_bytes != sizeof(u16) &&
595 	     nr_bytes != sizeof(u32) && nr_bytes != sizeof(u64) &&
596 	     nr_bytes != (2 * sizeof(u64)))) {
597 		return false;
598 	}
599 
600 	return true;
601 }
602 
603 /*
604  * Check that given struct member is a regular int with expected
605  * offset and size.
606  */
607 bool btf_member_is_reg_int(const struct btf *btf, const struct btf_type *s,
608 			   const struct btf_member *m,
609 			   u32 expected_offset, u32 expected_size)
610 {
611 	const struct btf_type *t;
612 	u32 id, int_data;
613 	u8 nr_bits;
614 
615 	id = m->type;
616 	t = btf_type_id_size(btf, &id, NULL);
617 	if (!t || !btf_type_is_int(t))
618 		return false;
619 
620 	int_data = btf_type_int(t);
621 	nr_bits = BTF_INT_BITS(int_data);
622 	if (btf_type_kflag(s)) {
623 		u32 bitfield_size = BTF_MEMBER_BITFIELD_SIZE(m->offset);
624 		u32 bit_offset = BTF_MEMBER_BIT_OFFSET(m->offset);
625 
626 		/* if kflag set, int should be a regular int and
627 		 * bit offset should be at byte boundary.
628 		 */
629 		return !bitfield_size &&
630 		       BITS_ROUNDUP_BYTES(bit_offset) == expected_offset &&
631 		       BITS_ROUNDUP_BYTES(nr_bits) == expected_size;
632 	}
633 
634 	if (BTF_INT_OFFSET(int_data) ||
635 	    BITS_PER_BYTE_MASKED(m->offset) ||
636 	    BITS_ROUNDUP_BYTES(m->offset) != expected_offset ||
637 	    BITS_PER_BYTE_MASKED(nr_bits) ||
638 	    BITS_ROUNDUP_BYTES(nr_bits) != expected_size)
639 		return false;
640 
641 	return true;
642 }
643 
644 __printf(2, 3) static void __btf_verifier_log(struct bpf_verifier_log *log,
645 					      const char *fmt, ...)
646 {
647 	va_list args;
648 
649 	va_start(args, fmt);
650 	bpf_verifier_vlog(log, fmt, args);
651 	va_end(args);
652 }
653 
654 __printf(2, 3) static void btf_verifier_log(struct btf_verifier_env *env,
655 					    const char *fmt, ...)
656 {
657 	struct bpf_verifier_log *log = &env->log;
658 	va_list args;
659 
660 	if (!bpf_verifier_log_needed(log))
661 		return;
662 
663 	va_start(args, fmt);
664 	bpf_verifier_vlog(log, fmt, args);
665 	va_end(args);
666 }
667 
668 __printf(4, 5) static void __btf_verifier_log_type(struct btf_verifier_env *env,
669 						   const struct btf_type *t,
670 						   bool log_details,
671 						   const char *fmt, ...)
672 {
673 	struct bpf_verifier_log *log = &env->log;
674 	u8 kind = BTF_INFO_KIND(t->info);
675 	struct btf *btf = env->btf;
676 	va_list args;
677 
678 	if (!bpf_verifier_log_needed(log))
679 		return;
680 
681 	/* btf verifier prints all types it is processing via
682 	 * btf_verifier_log_type(..., fmt = NULL).
683 	 * Skip those prints for in-kernel BTF verification.
684 	 */
685 	if (log->level == BPF_LOG_KERNEL && !fmt)
686 		return;
687 
688 	__btf_verifier_log(log, "[%u] %s %s%s",
689 			   env->log_type_id,
690 			   btf_kind_str[kind],
691 			   __btf_name_by_offset(btf, t->name_off),
692 			   log_details ? " " : "");
693 
694 	if (log_details)
695 		btf_type_ops(t)->log_details(env, t);
696 
697 	if (fmt && *fmt) {
698 		__btf_verifier_log(log, " ");
699 		va_start(args, fmt);
700 		bpf_verifier_vlog(log, fmt, args);
701 		va_end(args);
702 	}
703 
704 	__btf_verifier_log(log, "\n");
705 }
706 
707 #define btf_verifier_log_type(env, t, ...) \
708 	__btf_verifier_log_type((env), (t), true, __VA_ARGS__)
709 #define btf_verifier_log_basic(env, t, ...) \
710 	__btf_verifier_log_type((env), (t), false, __VA_ARGS__)
711 
712 __printf(4, 5)
713 static void btf_verifier_log_member(struct btf_verifier_env *env,
714 				    const struct btf_type *struct_type,
715 				    const struct btf_member *member,
716 				    const char *fmt, ...)
717 {
718 	struct bpf_verifier_log *log = &env->log;
719 	struct btf *btf = env->btf;
720 	va_list args;
721 
722 	if (!bpf_verifier_log_needed(log))
723 		return;
724 
725 	if (log->level == BPF_LOG_KERNEL && !fmt)
726 		return;
727 	/* The CHECK_META phase already did a btf dump.
728 	 *
729 	 * If member is logged again, it must hit an error in
730 	 * parsing this member.  It is useful to print out which
731 	 * struct this member belongs to.
732 	 */
733 	if (env->phase != CHECK_META)
734 		btf_verifier_log_type(env, struct_type, NULL);
735 
736 	if (btf_type_kflag(struct_type))
737 		__btf_verifier_log(log,
738 				   "\t%s type_id=%u bitfield_size=%u bits_offset=%u",
739 				   __btf_name_by_offset(btf, member->name_off),
740 				   member->type,
741 				   BTF_MEMBER_BITFIELD_SIZE(member->offset),
742 				   BTF_MEMBER_BIT_OFFSET(member->offset));
743 	else
744 		__btf_verifier_log(log, "\t%s type_id=%u bits_offset=%u",
745 				   __btf_name_by_offset(btf, member->name_off),
746 				   member->type, member->offset);
747 
748 	if (fmt && *fmt) {
749 		__btf_verifier_log(log, " ");
750 		va_start(args, fmt);
751 		bpf_verifier_vlog(log, fmt, args);
752 		va_end(args);
753 	}
754 
755 	__btf_verifier_log(log, "\n");
756 }
757 
758 __printf(4, 5)
759 static void btf_verifier_log_vsi(struct btf_verifier_env *env,
760 				 const struct btf_type *datasec_type,
761 				 const struct btf_var_secinfo *vsi,
762 				 const char *fmt, ...)
763 {
764 	struct bpf_verifier_log *log = &env->log;
765 	va_list args;
766 
767 	if (!bpf_verifier_log_needed(log))
768 		return;
769 	if (log->level == BPF_LOG_KERNEL && !fmt)
770 		return;
771 	if (env->phase != CHECK_META)
772 		btf_verifier_log_type(env, datasec_type, NULL);
773 
774 	__btf_verifier_log(log, "\t type_id=%u offset=%u size=%u",
775 			   vsi->type, vsi->offset, vsi->size);
776 	if (fmt && *fmt) {
777 		__btf_verifier_log(log, " ");
778 		va_start(args, fmt);
779 		bpf_verifier_vlog(log, fmt, args);
780 		va_end(args);
781 	}
782 
783 	__btf_verifier_log(log, "\n");
784 }
785 
786 static void btf_verifier_log_hdr(struct btf_verifier_env *env,
787 				 u32 btf_data_size)
788 {
789 	struct bpf_verifier_log *log = &env->log;
790 	const struct btf *btf = env->btf;
791 	const struct btf_header *hdr;
792 
793 	if (!bpf_verifier_log_needed(log))
794 		return;
795 
796 	if (log->level == BPF_LOG_KERNEL)
797 		return;
798 	hdr = &btf->hdr;
799 	__btf_verifier_log(log, "magic: 0x%x\n", hdr->magic);
800 	__btf_verifier_log(log, "version: %u\n", hdr->version);
801 	__btf_verifier_log(log, "flags: 0x%x\n", hdr->flags);
802 	__btf_verifier_log(log, "hdr_len: %u\n", hdr->hdr_len);
803 	__btf_verifier_log(log, "type_off: %u\n", hdr->type_off);
804 	__btf_verifier_log(log, "type_len: %u\n", hdr->type_len);
805 	__btf_verifier_log(log, "str_off: %u\n", hdr->str_off);
806 	__btf_verifier_log(log, "str_len: %u\n", hdr->str_len);
807 	__btf_verifier_log(log, "btf_total_size: %u\n", btf_data_size);
808 }
809 
810 static int btf_add_type(struct btf_verifier_env *env, struct btf_type *t)
811 {
812 	struct btf *btf = env->btf;
813 
814 	/* < 2 because +1 for btf_void which is always in btf->types[0].
815 	 * btf_void is not accounted in btf->nr_types because btf_void
816 	 * does not come from the BTF file.
817 	 */
818 	if (btf->types_size - btf->nr_types < 2) {
819 		/* Expand 'types' array */
820 
821 		struct btf_type **new_types;
822 		u32 expand_by, new_size;
823 
824 		if (btf->types_size == BTF_MAX_TYPE) {
825 			btf_verifier_log(env, "Exceeded max num of types");
826 			return -E2BIG;
827 		}
828 
829 		expand_by = max_t(u32, btf->types_size >> 2, 16);
830 		new_size = min_t(u32, BTF_MAX_TYPE,
831 				 btf->types_size + expand_by);
832 
833 		new_types = kvcalloc(new_size, sizeof(*new_types),
834 				     GFP_KERNEL | __GFP_NOWARN);
835 		if (!new_types)
836 			return -ENOMEM;
837 
838 		if (btf->nr_types == 0)
839 			new_types[0] = &btf_void;
840 		else
841 			memcpy(new_types, btf->types,
842 			       sizeof(*btf->types) * (btf->nr_types + 1));
843 
844 		kvfree(btf->types);
845 		btf->types = new_types;
846 		btf->types_size = new_size;
847 	}
848 
849 	btf->types[++(btf->nr_types)] = t;
850 
851 	return 0;
852 }
853 
854 static int btf_alloc_id(struct btf *btf)
855 {
856 	int id;
857 
858 	idr_preload(GFP_KERNEL);
859 	spin_lock_bh(&btf_idr_lock);
860 	id = idr_alloc_cyclic(&btf_idr, btf, 1, INT_MAX, GFP_ATOMIC);
861 	if (id > 0)
862 		btf->id = id;
863 	spin_unlock_bh(&btf_idr_lock);
864 	idr_preload_end();
865 
866 	if (WARN_ON_ONCE(!id))
867 		return -ENOSPC;
868 
869 	return id > 0 ? 0 : id;
870 }
871 
872 static void btf_free_id(struct btf *btf)
873 {
874 	unsigned long flags;
875 
876 	/*
877 	 * In map-in-map, calling map_delete_elem() on outer
878 	 * map will call bpf_map_put on the inner map.
879 	 * It will then eventually call btf_free_id()
880 	 * on the inner map.  Some of the map_delete_elem()
881 	 * implementation may have irq disabled, so
882 	 * we need to use the _irqsave() version instead
883 	 * of the _bh() version.
884 	 */
885 	spin_lock_irqsave(&btf_idr_lock, flags);
886 	idr_remove(&btf_idr, btf->id);
887 	spin_unlock_irqrestore(&btf_idr_lock, flags);
888 }
889 
890 static void btf_free(struct btf *btf)
891 {
892 	kvfree(btf->types);
893 	kvfree(btf->resolved_sizes);
894 	kvfree(btf->resolved_ids);
895 	kvfree(btf->data);
896 	kfree(btf);
897 }
898 
899 static void btf_free_rcu(struct rcu_head *rcu)
900 {
901 	struct btf *btf = container_of(rcu, struct btf, rcu);
902 
903 	btf_free(btf);
904 }
905 
906 void btf_put(struct btf *btf)
907 {
908 	if (btf && refcount_dec_and_test(&btf->refcnt)) {
909 		btf_free_id(btf);
910 		call_rcu(&btf->rcu, btf_free_rcu);
911 	}
912 }
913 
914 static int env_resolve_init(struct btf_verifier_env *env)
915 {
916 	struct btf *btf = env->btf;
917 	u32 nr_types = btf->nr_types;
918 	u32 *resolved_sizes = NULL;
919 	u32 *resolved_ids = NULL;
920 	u8 *visit_states = NULL;
921 
922 	/* +1 for btf_void */
923 	resolved_sizes = kvcalloc(nr_types + 1, sizeof(*resolved_sizes),
924 				  GFP_KERNEL | __GFP_NOWARN);
925 	if (!resolved_sizes)
926 		goto nomem;
927 
928 	resolved_ids = kvcalloc(nr_types + 1, sizeof(*resolved_ids),
929 				GFP_KERNEL | __GFP_NOWARN);
930 	if (!resolved_ids)
931 		goto nomem;
932 
933 	visit_states = kvcalloc(nr_types + 1, sizeof(*visit_states),
934 				GFP_KERNEL | __GFP_NOWARN);
935 	if (!visit_states)
936 		goto nomem;
937 
938 	btf->resolved_sizes = resolved_sizes;
939 	btf->resolved_ids = resolved_ids;
940 	env->visit_states = visit_states;
941 
942 	return 0;
943 
944 nomem:
945 	kvfree(resolved_sizes);
946 	kvfree(resolved_ids);
947 	kvfree(visit_states);
948 	return -ENOMEM;
949 }
950 
951 static void btf_verifier_env_free(struct btf_verifier_env *env)
952 {
953 	kvfree(env->visit_states);
954 	kfree(env);
955 }
956 
957 static bool env_type_is_resolve_sink(const struct btf_verifier_env *env,
958 				     const struct btf_type *next_type)
959 {
960 	switch (env->resolve_mode) {
961 	case RESOLVE_TBD:
962 		/* int, enum or void is a sink */
963 		return !btf_type_needs_resolve(next_type);
964 	case RESOLVE_PTR:
965 		/* int, enum, void, struct, array, func or func_proto is a sink
966 		 * for ptr
967 		 */
968 		return !btf_type_is_modifier(next_type) &&
969 			!btf_type_is_ptr(next_type);
970 	case RESOLVE_STRUCT_OR_ARRAY:
971 		/* int, enum, void, ptr, func or func_proto is a sink
972 		 * for struct and array
973 		 */
974 		return !btf_type_is_modifier(next_type) &&
975 			!btf_type_is_array(next_type) &&
976 			!btf_type_is_struct(next_type);
977 	default:
978 		BUG();
979 	}
980 }
981 
982 static bool env_type_is_resolved(const struct btf_verifier_env *env,
983 				 u32 type_id)
984 {
985 	return env->visit_states[type_id] == RESOLVED;
986 }
987 
988 static int env_stack_push(struct btf_verifier_env *env,
989 			  const struct btf_type *t, u32 type_id)
990 {
991 	struct resolve_vertex *v;
992 
993 	if (env->top_stack == MAX_RESOLVE_DEPTH)
994 		return -E2BIG;
995 
996 	if (env->visit_states[type_id] != NOT_VISITED)
997 		return -EEXIST;
998 
999 	env->visit_states[type_id] = VISITED;
1000 
1001 	v = &env->stack[env->top_stack++];
1002 	v->t = t;
1003 	v->type_id = type_id;
1004 	v->next_member = 0;
1005 
1006 	if (env->resolve_mode == RESOLVE_TBD) {
1007 		if (btf_type_is_ptr(t))
1008 			env->resolve_mode = RESOLVE_PTR;
1009 		else if (btf_type_is_struct(t) || btf_type_is_array(t))
1010 			env->resolve_mode = RESOLVE_STRUCT_OR_ARRAY;
1011 	}
1012 
1013 	return 0;
1014 }
1015 
1016 static void env_stack_set_next_member(struct btf_verifier_env *env,
1017 				      u16 next_member)
1018 {
1019 	env->stack[env->top_stack - 1].next_member = next_member;
1020 }
1021 
1022 static void env_stack_pop_resolved(struct btf_verifier_env *env,
1023 				   u32 resolved_type_id,
1024 				   u32 resolved_size)
1025 {
1026 	u32 type_id = env->stack[--(env->top_stack)].type_id;
1027 	struct btf *btf = env->btf;
1028 
1029 	btf->resolved_sizes[type_id] = resolved_size;
1030 	btf->resolved_ids[type_id] = resolved_type_id;
1031 	env->visit_states[type_id] = RESOLVED;
1032 }
1033 
1034 static const struct resolve_vertex *env_stack_peak(struct btf_verifier_env *env)
1035 {
1036 	return env->top_stack ? &env->stack[env->top_stack - 1] : NULL;
1037 }
1038 
1039 /* The input param "type_id" must point to a needs_resolve type */
1040 static const struct btf_type *btf_type_id_resolve(const struct btf *btf,
1041 						  u32 *type_id)
1042 {
1043 	*type_id = btf->resolved_ids[*type_id];
1044 	return btf_type_by_id(btf, *type_id);
1045 }
1046 
1047 const struct btf_type *btf_type_id_size(const struct btf *btf,
1048 					u32 *type_id, u32 *ret_size)
1049 {
1050 	const struct btf_type *size_type;
1051 	u32 size_type_id = *type_id;
1052 	u32 size = 0;
1053 
1054 	size_type = btf_type_by_id(btf, size_type_id);
1055 	if (btf_type_nosize_or_null(size_type))
1056 		return NULL;
1057 
1058 	if (btf_type_has_size(size_type)) {
1059 		size = size_type->size;
1060 	} else if (btf_type_is_array(size_type)) {
1061 		size = btf->resolved_sizes[size_type_id];
1062 	} else if (btf_type_is_ptr(size_type)) {
1063 		size = sizeof(void *);
1064 	} else {
1065 		if (WARN_ON_ONCE(!btf_type_is_modifier(size_type) &&
1066 				 !btf_type_is_var(size_type)))
1067 			return NULL;
1068 
1069 		size_type_id = btf->resolved_ids[size_type_id];
1070 		size_type = btf_type_by_id(btf, size_type_id);
1071 		if (btf_type_nosize_or_null(size_type))
1072 			return NULL;
1073 		else if (btf_type_has_size(size_type))
1074 			size = size_type->size;
1075 		else if (btf_type_is_array(size_type))
1076 			size = btf->resolved_sizes[size_type_id];
1077 		else if (btf_type_is_ptr(size_type))
1078 			size = sizeof(void *);
1079 		else
1080 			return NULL;
1081 	}
1082 
1083 	*type_id = size_type_id;
1084 	if (ret_size)
1085 		*ret_size = size;
1086 
1087 	return size_type;
1088 }
1089 
1090 static int btf_df_check_member(struct btf_verifier_env *env,
1091 			       const struct btf_type *struct_type,
1092 			       const struct btf_member *member,
1093 			       const struct btf_type *member_type)
1094 {
1095 	btf_verifier_log_basic(env, struct_type,
1096 			       "Unsupported check_member");
1097 	return -EINVAL;
1098 }
1099 
1100 static int btf_df_check_kflag_member(struct btf_verifier_env *env,
1101 				     const struct btf_type *struct_type,
1102 				     const struct btf_member *member,
1103 				     const struct btf_type *member_type)
1104 {
1105 	btf_verifier_log_basic(env, struct_type,
1106 			       "Unsupported check_kflag_member");
1107 	return -EINVAL;
1108 }
1109 
1110 /* Used for ptr, array and struct/union type members.
1111  * int, enum and modifier types have their specific callback functions.
1112  */
1113 static int btf_generic_check_kflag_member(struct btf_verifier_env *env,
1114 					  const struct btf_type *struct_type,
1115 					  const struct btf_member *member,
1116 					  const struct btf_type *member_type)
1117 {
1118 	if (BTF_MEMBER_BITFIELD_SIZE(member->offset)) {
1119 		btf_verifier_log_member(env, struct_type, member,
1120 					"Invalid member bitfield_size");
1121 		return -EINVAL;
1122 	}
1123 
1124 	/* bitfield size is 0, so member->offset represents bit offset only.
1125 	 * It is safe to call non kflag check_member variants.
1126 	 */
1127 	return btf_type_ops(member_type)->check_member(env, struct_type,
1128 						       member,
1129 						       member_type);
1130 }
1131 
1132 static int btf_df_resolve(struct btf_verifier_env *env,
1133 			  const struct resolve_vertex *v)
1134 {
1135 	btf_verifier_log_basic(env, v->t, "Unsupported resolve");
1136 	return -EINVAL;
1137 }
1138 
1139 static void btf_df_seq_show(const struct btf *btf, const struct btf_type *t,
1140 			    u32 type_id, void *data, u8 bits_offsets,
1141 			    struct seq_file *m)
1142 {
1143 	seq_printf(m, "<unsupported kind:%u>", BTF_INFO_KIND(t->info));
1144 }
1145 
1146 static int btf_int_check_member(struct btf_verifier_env *env,
1147 				const struct btf_type *struct_type,
1148 				const struct btf_member *member,
1149 				const struct btf_type *member_type)
1150 {
1151 	u32 int_data = btf_type_int(member_type);
1152 	u32 struct_bits_off = member->offset;
1153 	u32 struct_size = struct_type->size;
1154 	u32 nr_copy_bits;
1155 	u32 bytes_offset;
1156 
1157 	if (U32_MAX - struct_bits_off < BTF_INT_OFFSET(int_data)) {
1158 		btf_verifier_log_member(env, struct_type, member,
1159 					"bits_offset exceeds U32_MAX");
1160 		return -EINVAL;
1161 	}
1162 
1163 	struct_bits_off += BTF_INT_OFFSET(int_data);
1164 	bytes_offset = BITS_ROUNDDOWN_BYTES(struct_bits_off);
1165 	nr_copy_bits = BTF_INT_BITS(int_data) +
1166 		BITS_PER_BYTE_MASKED(struct_bits_off);
1167 
1168 	if (nr_copy_bits > BITS_PER_U128) {
1169 		btf_verifier_log_member(env, struct_type, member,
1170 					"nr_copy_bits exceeds 128");
1171 		return -EINVAL;
1172 	}
1173 
1174 	if (struct_size < bytes_offset ||
1175 	    struct_size - bytes_offset < BITS_ROUNDUP_BYTES(nr_copy_bits)) {
1176 		btf_verifier_log_member(env, struct_type, member,
1177 					"Member exceeds struct_size");
1178 		return -EINVAL;
1179 	}
1180 
1181 	return 0;
1182 }
1183 
1184 static int btf_int_check_kflag_member(struct btf_verifier_env *env,
1185 				      const struct btf_type *struct_type,
1186 				      const struct btf_member *member,
1187 				      const struct btf_type *member_type)
1188 {
1189 	u32 struct_bits_off, nr_bits, nr_int_data_bits, bytes_offset;
1190 	u32 int_data = btf_type_int(member_type);
1191 	u32 struct_size = struct_type->size;
1192 	u32 nr_copy_bits;
1193 
1194 	/* a regular int type is required for the kflag int member */
1195 	if (!btf_type_int_is_regular(member_type)) {
1196 		btf_verifier_log_member(env, struct_type, member,
1197 					"Invalid member base type");
1198 		return -EINVAL;
1199 	}
1200 
1201 	/* check sanity of bitfield size */
1202 	nr_bits = BTF_MEMBER_BITFIELD_SIZE(member->offset);
1203 	struct_bits_off = BTF_MEMBER_BIT_OFFSET(member->offset);
1204 	nr_int_data_bits = BTF_INT_BITS(int_data);
1205 	if (!nr_bits) {
1206 		/* Not a bitfield member, member offset must be at byte
1207 		 * boundary.
1208 		 */
1209 		if (BITS_PER_BYTE_MASKED(struct_bits_off)) {
1210 			btf_verifier_log_member(env, struct_type, member,
1211 						"Invalid member offset");
1212 			return -EINVAL;
1213 		}
1214 
1215 		nr_bits = nr_int_data_bits;
1216 	} else if (nr_bits > nr_int_data_bits) {
1217 		btf_verifier_log_member(env, struct_type, member,
1218 					"Invalid member bitfield_size");
1219 		return -EINVAL;
1220 	}
1221 
1222 	bytes_offset = BITS_ROUNDDOWN_BYTES(struct_bits_off);
1223 	nr_copy_bits = nr_bits + BITS_PER_BYTE_MASKED(struct_bits_off);
1224 	if (nr_copy_bits > BITS_PER_U128) {
1225 		btf_verifier_log_member(env, struct_type, member,
1226 					"nr_copy_bits exceeds 128");
1227 		return -EINVAL;
1228 	}
1229 
1230 	if (struct_size < bytes_offset ||
1231 	    struct_size - bytes_offset < BITS_ROUNDUP_BYTES(nr_copy_bits)) {
1232 		btf_verifier_log_member(env, struct_type, member,
1233 					"Member exceeds struct_size");
1234 		return -EINVAL;
1235 	}
1236 
1237 	return 0;
1238 }
1239 
1240 static s32 btf_int_check_meta(struct btf_verifier_env *env,
1241 			      const struct btf_type *t,
1242 			      u32 meta_left)
1243 {
1244 	u32 int_data, nr_bits, meta_needed = sizeof(int_data);
1245 	u16 encoding;
1246 
1247 	if (meta_left < meta_needed) {
1248 		btf_verifier_log_basic(env, t,
1249 				       "meta_left:%u meta_needed:%u",
1250 				       meta_left, meta_needed);
1251 		return -EINVAL;
1252 	}
1253 
1254 	if (btf_type_vlen(t)) {
1255 		btf_verifier_log_type(env, t, "vlen != 0");
1256 		return -EINVAL;
1257 	}
1258 
1259 	if (btf_type_kflag(t)) {
1260 		btf_verifier_log_type(env, t, "Invalid btf_info kind_flag");
1261 		return -EINVAL;
1262 	}
1263 
1264 	int_data = btf_type_int(t);
1265 	if (int_data & ~BTF_INT_MASK) {
1266 		btf_verifier_log_basic(env, t, "Invalid int_data:%x",
1267 				       int_data);
1268 		return -EINVAL;
1269 	}
1270 
1271 	nr_bits = BTF_INT_BITS(int_data) + BTF_INT_OFFSET(int_data);
1272 
1273 	if (nr_bits > BITS_PER_U128) {
1274 		btf_verifier_log_type(env, t, "nr_bits exceeds %zu",
1275 				      BITS_PER_U128);
1276 		return -EINVAL;
1277 	}
1278 
1279 	if (BITS_ROUNDUP_BYTES(nr_bits) > t->size) {
1280 		btf_verifier_log_type(env, t, "nr_bits exceeds type_size");
1281 		return -EINVAL;
1282 	}
1283 
1284 	/*
1285 	 * Only one of the encoding bits is allowed and it
1286 	 * should be sufficient for the pretty print purpose (i.e. decoding).
1287 	 * Multiple bits can be allowed later if it is found
1288 	 * to be insufficient.
1289 	 */
1290 	encoding = BTF_INT_ENCODING(int_data);
1291 	if (encoding &&
1292 	    encoding != BTF_INT_SIGNED &&
1293 	    encoding != BTF_INT_CHAR &&
1294 	    encoding != BTF_INT_BOOL) {
1295 		btf_verifier_log_type(env, t, "Unsupported encoding");
1296 		return -ENOTSUPP;
1297 	}
1298 
1299 	btf_verifier_log_type(env, t, NULL);
1300 
1301 	return meta_needed;
1302 }
1303 
1304 static void btf_int_log(struct btf_verifier_env *env,
1305 			const struct btf_type *t)
1306 {
1307 	int int_data = btf_type_int(t);
1308 
1309 	btf_verifier_log(env,
1310 			 "size=%u bits_offset=%u nr_bits=%u encoding=%s",
1311 			 t->size, BTF_INT_OFFSET(int_data),
1312 			 BTF_INT_BITS(int_data),
1313 			 btf_int_encoding_str(BTF_INT_ENCODING(int_data)));
1314 }
1315 
1316 static void btf_int128_print(struct seq_file *m, void *data)
1317 {
1318 	/* data points to a __int128 number.
1319 	 * Suppose
1320 	 *     int128_num = *(__int128 *)data;
1321 	 * The below formulas shows what upper_num and lower_num represents:
1322 	 *     upper_num = int128_num >> 64;
1323 	 *     lower_num = int128_num & 0xffffffffFFFFFFFFULL;
1324 	 */
1325 	u64 upper_num, lower_num;
1326 
1327 #ifdef __BIG_ENDIAN_BITFIELD
1328 	upper_num = *(u64 *)data;
1329 	lower_num = *(u64 *)(data + 8);
1330 #else
1331 	upper_num = *(u64 *)(data + 8);
1332 	lower_num = *(u64 *)data;
1333 #endif
1334 	if (upper_num == 0)
1335 		seq_printf(m, "0x%llx", lower_num);
1336 	else
1337 		seq_printf(m, "0x%llx%016llx", upper_num, lower_num);
1338 }
1339 
1340 static void btf_int128_shift(u64 *print_num, u16 left_shift_bits,
1341 			     u16 right_shift_bits)
1342 {
1343 	u64 upper_num, lower_num;
1344 
1345 #ifdef __BIG_ENDIAN_BITFIELD
1346 	upper_num = print_num[0];
1347 	lower_num = print_num[1];
1348 #else
1349 	upper_num = print_num[1];
1350 	lower_num = print_num[0];
1351 #endif
1352 
1353 	/* shake out un-needed bits by shift/or operations */
1354 	if (left_shift_bits >= 64) {
1355 		upper_num = lower_num << (left_shift_bits - 64);
1356 		lower_num = 0;
1357 	} else {
1358 		upper_num = (upper_num << left_shift_bits) |
1359 			    (lower_num >> (64 - left_shift_bits));
1360 		lower_num = lower_num << left_shift_bits;
1361 	}
1362 
1363 	if (right_shift_bits >= 64) {
1364 		lower_num = upper_num >> (right_shift_bits - 64);
1365 		upper_num = 0;
1366 	} else {
1367 		lower_num = (lower_num >> right_shift_bits) |
1368 			    (upper_num << (64 - right_shift_bits));
1369 		upper_num = upper_num >> right_shift_bits;
1370 	}
1371 
1372 #ifdef __BIG_ENDIAN_BITFIELD
1373 	print_num[0] = upper_num;
1374 	print_num[1] = lower_num;
1375 #else
1376 	print_num[0] = lower_num;
1377 	print_num[1] = upper_num;
1378 #endif
1379 }
1380 
1381 static void btf_bitfield_seq_show(void *data, u8 bits_offset,
1382 				  u8 nr_bits, struct seq_file *m)
1383 {
1384 	u16 left_shift_bits, right_shift_bits;
1385 	u8 nr_copy_bytes;
1386 	u8 nr_copy_bits;
1387 	u64 print_num[2] = {};
1388 
1389 	nr_copy_bits = nr_bits + bits_offset;
1390 	nr_copy_bytes = BITS_ROUNDUP_BYTES(nr_copy_bits);
1391 
1392 	memcpy(print_num, data, nr_copy_bytes);
1393 
1394 #ifdef __BIG_ENDIAN_BITFIELD
1395 	left_shift_bits = bits_offset;
1396 #else
1397 	left_shift_bits = BITS_PER_U128 - nr_copy_bits;
1398 #endif
1399 	right_shift_bits = BITS_PER_U128 - nr_bits;
1400 
1401 	btf_int128_shift(print_num, left_shift_bits, right_shift_bits);
1402 	btf_int128_print(m, print_num);
1403 }
1404 
1405 
1406 static void btf_int_bits_seq_show(const struct btf *btf,
1407 				  const struct btf_type *t,
1408 				  void *data, u8 bits_offset,
1409 				  struct seq_file *m)
1410 {
1411 	u32 int_data = btf_type_int(t);
1412 	u8 nr_bits = BTF_INT_BITS(int_data);
1413 	u8 total_bits_offset;
1414 
1415 	/*
1416 	 * bits_offset is at most 7.
1417 	 * BTF_INT_OFFSET() cannot exceed 128 bits.
1418 	 */
1419 	total_bits_offset = bits_offset + BTF_INT_OFFSET(int_data);
1420 	data += BITS_ROUNDDOWN_BYTES(total_bits_offset);
1421 	bits_offset = BITS_PER_BYTE_MASKED(total_bits_offset);
1422 	btf_bitfield_seq_show(data, bits_offset, nr_bits, m);
1423 }
1424 
1425 static void btf_int_seq_show(const struct btf *btf, const struct btf_type *t,
1426 			     u32 type_id, void *data, u8 bits_offset,
1427 			     struct seq_file *m)
1428 {
1429 	u32 int_data = btf_type_int(t);
1430 	u8 encoding = BTF_INT_ENCODING(int_data);
1431 	bool sign = encoding & BTF_INT_SIGNED;
1432 	u8 nr_bits = BTF_INT_BITS(int_data);
1433 
1434 	if (bits_offset || BTF_INT_OFFSET(int_data) ||
1435 	    BITS_PER_BYTE_MASKED(nr_bits)) {
1436 		btf_int_bits_seq_show(btf, t, data, bits_offset, m);
1437 		return;
1438 	}
1439 
1440 	switch (nr_bits) {
1441 	case 128:
1442 		btf_int128_print(m, data);
1443 		break;
1444 	case 64:
1445 		if (sign)
1446 			seq_printf(m, "%lld", *(s64 *)data);
1447 		else
1448 			seq_printf(m, "%llu", *(u64 *)data);
1449 		break;
1450 	case 32:
1451 		if (sign)
1452 			seq_printf(m, "%d", *(s32 *)data);
1453 		else
1454 			seq_printf(m, "%u", *(u32 *)data);
1455 		break;
1456 	case 16:
1457 		if (sign)
1458 			seq_printf(m, "%d", *(s16 *)data);
1459 		else
1460 			seq_printf(m, "%u", *(u16 *)data);
1461 		break;
1462 	case 8:
1463 		if (sign)
1464 			seq_printf(m, "%d", *(s8 *)data);
1465 		else
1466 			seq_printf(m, "%u", *(u8 *)data);
1467 		break;
1468 	default:
1469 		btf_int_bits_seq_show(btf, t, data, bits_offset, m);
1470 	}
1471 }
1472 
1473 static const struct btf_kind_operations int_ops = {
1474 	.check_meta = btf_int_check_meta,
1475 	.resolve = btf_df_resolve,
1476 	.check_member = btf_int_check_member,
1477 	.check_kflag_member = btf_int_check_kflag_member,
1478 	.log_details = btf_int_log,
1479 	.seq_show = btf_int_seq_show,
1480 };
1481 
1482 static int btf_modifier_check_member(struct btf_verifier_env *env,
1483 				     const struct btf_type *struct_type,
1484 				     const struct btf_member *member,
1485 				     const struct btf_type *member_type)
1486 {
1487 	const struct btf_type *resolved_type;
1488 	u32 resolved_type_id = member->type;
1489 	struct btf_member resolved_member;
1490 	struct btf *btf = env->btf;
1491 
1492 	resolved_type = btf_type_id_size(btf, &resolved_type_id, NULL);
1493 	if (!resolved_type) {
1494 		btf_verifier_log_member(env, struct_type, member,
1495 					"Invalid member");
1496 		return -EINVAL;
1497 	}
1498 
1499 	resolved_member = *member;
1500 	resolved_member.type = resolved_type_id;
1501 
1502 	return btf_type_ops(resolved_type)->check_member(env, struct_type,
1503 							 &resolved_member,
1504 							 resolved_type);
1505 }
1506 
1507 static int btf_modifier_check_kflag_member(struct btf_verifier_env *env,
1508 					   const struct btf_type *struct_type,
1509 					   const struct btf_member *member,
1510 					   const struct btf_type *member_type)
1511 {
1512 	const struct btf_type *resolved_type;
1513 	u32 resolved_type_id = member->type;
1514 	struct btf_member resolved_member;
1515 	struct btf *btf = env->btf;
1516 
1517 	resolved_type = btf_type_id_size(btf, &resolved_type_id, NULL);
1518 	if (!resolved_type) {
1519 		btf_verifier_log_member(env, struct_type, member,
1520 					"Invalid member");
1521 		return -EINVAL;
1522 	}
1523 
1524 	resolved_member = *member;
1525 	resolved_member.type = resolved_type_id;
1526 
1527 	return btf_type_ops(resolved_type)->check_kflag_member(env, struct_type,
1528 							       &resolved_member,
1529 							       resolved_type);
1530 }
1531 
1532 static int btf_ptr_check_member(struct btf_verifier_env *env,
1533 				const struct btf_type *struct_type,
1534 				const struct btf_member *member,
1535 				const struct btf_type *member_type)
1536 {
1537 	u32 struct_size, struct_bits_off, bytes_offset;
1538 
1539 	struct_size = struct_type->size;
1540 	struct_bits_off = member->offset;
1541 	bytes_offset = BITS_ROUNDDOWN_BYTES(struct_bits_off);
1542 
1543 	if (BITS_PER_BYTE_MASKED(struct_bits_off)) {
1544 		btf_verifier_log_member(env, struct_type, member,
1545 					"Member is not byte aligned");
1546 		return -EINVAL;
1547 	}
1548 
1549 	if (struct_size - bytes_offset < sizeof(void *)) {
1550 		btf_verifier_log_member(env, struct_type, member,
1551 					"Member exceeds struct_size");
1552 		return -EINVAL;
1553 	}
1554 
1555 	return 0;
1556 }
1557 
1558 static int btf_ref_type_check_meta(struct btf_verifier_env *env,
1559 				   const struct btf_type *t,
1560 				   u32 meta_left)
1561 {
1562 	if (btf_type_vlen(t)) {
1563 		btf_verifier_log_type(env, t, "vlen != 0");
1564 		return -EINVAL;
1565 	}
1566 
1567 	if (btf_type_kflag(t)) {
1568 		btf_verifier_log_type(env, t, "Invalid btf_info kind_flag");
1569 		return -EINVAL;
1570 	}
1571 
1572 	if (!BTF_TYPE_ID_VALID(t->type)) {
1573 		btf_verifier_log_type(env, t, "Invalid type_id");
1574 		return -EINVAL;
1575 	}
1576 
1577 	/* typedef type must have a valid name, and other ref types,
1578 	 * volatile, const, restrict, should have a null name.
1579 	 */
1580 	if (BTF_INFO_KIND(t->info) == BTF_KIND_TYPEDEF) {
1581 		if (!t->name_off ||
1582 		    !btf_name_valid_identifier(env->btf, t->name_off)) {
1583 			btf_verifier_log_type(env, t, "Invalid name");
1584 			return -EINVAL;
1585 		}
1586 	} else {
1587 		if (t->name_off) {
1588 			btf_verifier_log_type(env, t, "Invalid name");
1589 			return -EINVAL;
1590 		}
1591 	}
1592 
1593 	btf_verifier_log_type(env, t, NULL);
1594 
1595 	return 0;
1596 }
1597 
1598 static int btf_modifier_resolve(struct btf_verifier_env *env,
1599 				const struct resolve_vertex *v)
1600 {
1601 	const struct btf_type *t = v->t;
1602 	const struct btf_type *next_type;
1603 	u32 next_type_id = t->type;
1604 	struct btf *btf = env->btf;
1605 
1606 	next_type = btf_type_by_id(btf, next_type_id);
1607 	if (!next_type || btf_type_is_resolve_source_only(next_type)) {
1608 		btf_verifier_log_type(env, v->t, "Invalid type_id");
1609 		return -EINVAL;
1610 	}
1611 
1612 	if (!env_type_is_resolve_sink(env, next_type) &&
1613 	    !env_type_is_resolved(env, next_type_id))
1614 		return env_stack_push(env, next_type, next_type_id);
1615 
1616 	/* Figure out the resolved next_type_id with size.
1617 	 * They will be stored in the current modifier's
1618 	 * resolved_ids and resolved_sizes such that it can
1619 	 * save us a few type-following when we use it later (e.g. in
1620 	 * pretty print).
1621 	 */
1622 	if (!btf_type_id_size(btf, &next_type_id, NULL)) {
1623 		if (env_type_is_resolved(env, next_type_id))
1624 			next_type = btf_type_id_resolve(btf, &next_type_id);
1625 
1626 		/* "typedef void new_void", "const void"...etc */
1627 		if (!btf_type_is_void(next_type) &&
1628 		    !btf_type_is_fwd(next_type) &&
1629 		    !btf_type_is_func_proto(next_type)) {
1630 			btf_verifier_log_type(env, v->t, "Invalid type_id");
1631 			return -EINVAL;
1632 		}
1633 	}
1634 
1635 	env_stack_pop_resolved(env, next_type_id, 0);
1636 
1637 	return 0;
1638 }
1639 
1640 static int btf_var_resolve(struct btf_verifier_env *env,
1641 			   const struct resolve_vertex *v)
1642 {
1643 	const struct btf_type *next_type;
1644 	const struct btf_type *t = v->t;
1645 	u32 next_type_id = t->type;
1646 	struct btf *btf = env->btf;
1647 
1648 	next_type = btf_type_by_id(btf, next_type_id);
1649 	if (!next_type || btf_type_is_resolve_source_only(next_type)) {
1650 		btf_verifier_log_type(env, v->t, "Invalid type_id");
1651 		return -EINVAL;
1652 	}
1653 
1654 	if (!env_type_is_resolve_sink(env, next_type) &&
1655 	    !env_type_is_resolved(env, next_type_id))
1656 		return env_stack_push(env, next_type, next_type_id);
1657 
1658 	if (btf_type_is_modifier(next_type)) {
1659 		const struct btf_type *resolved_type;
1660 		u32 resolved_type_id;
1661 
1662 		resolved_type_id = next_type_id;
1663 		resolved_type = btf_type_id_resolve(btf, &resolved_type_id);
1664 
1665 		if (btf_type_is_ptr(resolved_type) &&
1666 		    !env_type_is_resolve_sink(env, resolved_type) &&
1667 		    !env_type_is_resolved(env, resolved_type_id))
1668 			return env_stack_push(env, resolved_type,
1669 					      resolved_type_id);
1670 	}
1671 
1672 	/* We must resolve to something concrete at this point, no
1673 	 * forward types or similar that would resolve to size of
1674 	 * zero is allowed.
1675 	 */
1676 	if (!btf_type_id_size(btf, &next_type_id, NULL)) {
1677 		btf_verifier_log_type(env, v->t, "Invalid type_id");
1678 		return -EINVAL;
1679 	}
1680 
1681 	env_stack_pop_resolved(env, next_type_id, 0);
1682 
1683 	return 0;
1684 }
1685 
1686 static int btf_ptr_resolve(struct btf_verifier_env *env,
1687 			   const struct resolve_vertex *v)
1688 {
1689 	const struct btf_type *next_type;
1690 	const struct btf_type *t = v->t;
1691 	u32 next_type_id = t->type;
1692 	struct btf *btf = env->btf;
1693 
1694 	next_type = btf_type_by_id(btf, next_type_id);
1695 	if (!next_type || btf_type_is_resolve_source_only(next_type)) {
1696 		btf_verifier_log_type(env, v->t, "Invalid type_id");
1697 		return -EINVAL;
1698 	}
1699 
1700 	if (!env_type_is_resolve_sink(env, next_type) &&
1701 	    !env_type_is_resolved(env, next_type_id))
1702 		return env_stack_push(env, next_type, next_type_id);
1703 
1704 	/* If the modifier was RESOLVED during RESOLVE_STRUCT_OR_ARRAY,
1705 	 * the modifier may have stopped resolving when it was resolved
1706 	 * to a ptr (last-resolved-ptr).
1707 	 *
1708 	 * We now need to continue from the last-resolved-ptr to
1709 	 * ensure the last-resolved-ptr will not referring back to
1710 	 * the currenct ptr (t).
1711 	 */
1712 	if (btf_type_is_modifier(next_type)) {
1713 		const struct btf_type *resolved_type;
1714 		u32 resolved_type_id;
1715 
1716 		resolved_type_id = next_type_id;
1717 		resolved_type = btf_type_id_resolve(btf, &resolved_type_id);
1718 
1719 		if (btf_type_is_ptr(resolved_type) &&
1720 		    !env_type_is_resolve_sink(env, resolved_type) &&
1721 		    !env_type_is_resolved(env, resolved_type_id))
1722 			return env_stack_push(env, resolved_type,
1723 					      resolved_type_id);
1724 	}
1725 
1726 	if (!btf_type_id_size(btf, &next_type_id, NULL)) {
1727 		if (env_type_is_resolved(env, next_type_id))
1728 			next_type = btf_type_id_resolve(btf, &next_type_id);
1729 
1730 		if (!btf_type_is_void(next_type) &&
1731 		    !btf_type_is_fwd(next_type) &&
1732 		    !btf_type_is_func_proto(next_type)) {
1733 			btf_verifier_log_type(env, v->t, "Invalid type_id");
1734 			return -EINVAL;
1735 		}
1736 	}
1737 
1738 	env_stack_pop_resolved(env, next_type_id, 0);
1739 
1740 	return 0;
1741 }
1742 
1743 static void btf_modifier_seq_show(const struct btf *btf,
1744 				  const struct btf_type *t,
1745 				  u32 type_id, void *data,
1746 				  u8 bits_offset, struct seq_file *m)
1747 {
1748 	t = btf_type_id_resolve(btf, &type_id);
1749 
1750 	btf_type_ops(t)->seq_show(btf, t, type_id, data, bits_offset, m);
1751 }
1752 
1753 static void btf_var_seq_show(const struct btf *btf, const struct btf_type *t,
1754 			     u32 type_id, void *data, u8 bits_offset,
1755 			     struct seq_file *m)
1756 {
1757 	t = btf_type_id_resolve(btf, &type_id);
1758 
1759 	btf_type_ops(t)->seq_show(btf, t, type_id, data, bits_offset, m);
1760 }
1761 
1762 static void btf_ptr_seq_show(const struct btf *btf, const struct btf_type *t,
1763 			     u32 type_id, void *data, u8 bits_offset,
1764 			     struct seq_file *m)
1765 {
1766 	/* It is a hashed value */
1767 	seq_printf(m, "%p", *(void **)data);
1768 }
1769 
1770 static void btf_ref_type_log(struct btf_verifier_env *env,
1771 			     const struct btf_type *t)
1772 {
1773 	btf_verifier_log(env, "type_id=%u", t->type);
1774 }
1775 
1776 static struct btf_kind_operations modifier_ops = {
1777 	.check_meta = btf_ref_type_check_meta,
1778 	.resolve = btf_modifier_resolve,
1779 	.check_member = btf_modifier_check_member,
1780 	.check_kflag_member = btf_modifier_check_kflag_member,
1781 	.log_details = btf_ref_type_log,
1782 	.seq_show = btf_modifier_seq_show,
1783 };
1784 
1785 static struct btf_kind_operations ptr_ops = {
1786 	.check_meta = btf_ref_type_check_meta,
1787 	.resolve = btf_ptr_resolve,
1788 	.check_member = btf_ptr_check_member,
1789 	.check_kflag_member = btf_generic_check_kflag_member,
1790 	.log_details = btf_ref_type_log,
1791 	.seq_show = btf_ptr_seq_show,
1792 };
1793 
1794 static s32 btf_fwd_check_meta(struct btf_verifier_env *env,
1795 			      const struct btf_type *t,
1796 			      u32 meta_left)
1797 {
1798 	if (btf_type_vlen(t)) {
1799 		btf_verifier_log_type(env, t, "vlen != 0");
1800 		return -EINVAL;
1801 	}
1802 
1803 	if (t->type) {
1804 		btf_verifier_log_type(env, t, "type != 0");
1805 		return -EINVAL;
1806 	}
1807 
1808 	/* fwd type must have a valid name */
1809 	if (!t->name_off ||
1810 	    !btf_name_valid_identifier(env->btf, t->name_off)) {
1811 		btf_verifier_log_type(env, t, "Invalid name");
1812 		return -EINVAL;
1813 	}
1814 
1815 	btf_verifier_log_type(env, t, NULL);
1816 
1817 	return 0;
1818 }
1819 
1820 static void btf_fwd_type_log(struct btf_verifier_env *env,
1821 			     const struct btf_type *t)
1822 {
1823 	btf_verifier_log(env, "%s", btf_type_kflag(t) ? "union" : "struct");
1824 }
1825 
1826 static struct btf_kind_operations fwd_ops = {
1827 	.check_meta = btf_fwd_check_meta,
1828 	.resolve = btf_df_resolve,
1829 	.check_member = btf_df_check_member,
1830 	.check_kflag_member = btf_df_check_kflag_member,
1831 	.log_details = btf_fwd_type_log,
1832 	.seq_show = btf_df_seq_show,
1833 };
1834 
1835 static int btf_array_check_member(struct btf_verifier_env *env,
1836 				  const struct btf_type *struct_type,
1837 				  const struct btf_member *member,
1838 				  const struct btf_type *member_type)
1839 {
1840 	u32 struct_bits_off = member->offset;
1841 	u32 struct_size, bytes_offset;
1842 	u32 array_type_id, array_size;
1843 	struct btf *btf = env->btf;
1844 
1845 	if (BITS_PER_BYTE_MASKED(struct_bits_off)) {
1846 		btf_verifier_log_member(env, struct_type, member,
1847 					"Member is not byte aligned");
1848 		return -EINVAL;
1849 	}
1850 
1851 	array_type_id = member->type;
1852 	btf_type_id_size(btf, &array_type_id, &array_size);
1853 	struct_size = struct_type->size;
1854 	bytes_offset = BITS_ROUNDDOWN_BYTES(struct_bits_off);
1855 	if (struct_size - bytes_offset < array_size) {
1856 		btf_verifier_log_member(env, struct_type, member,
1857 					"Member exceeds struct_size");
1858 		return -EINVAL;
1859 	}
1860 
1861 	return 0;
1862 }
1863 
1864 static s32 btf_array_check_meta(struct btf_verifier_env *env,
1865 				const struct btf_type *t,
1866 				u32 meta_left)
1867 {
1868 	const struct btf_array *array = btf_type_array(t);
1869 	u32 meta_needed = sizeof(*array);
1870 
1871 	if (meta_left < meta_needed) {
1872 		btf_verifier_log_basic(env, t,
1873 				       "meta_left:%u meta_needed:%u",
1874 				       meta_left, meta_needed);
1875 		return -EINVAL;
1876 	}
1877 
1878 	/* array type should not have a name */
1879 	if (t->name_off) {
1880 		btf_verifier_log_type(env, t, "Invalid name");
1881 		return -EINVAL;
1882 	}
1883 
1884 	if (btf_type_vlen(t)) {
1885 		btf_verifier_log_type(env, t, "vlen != 0");
1886 		return -EINVAL;
1887 	}
1888 
1889 	if (btf_type_kflag(t)) {
1890 		btf_verifier_log_type(env, t, "Invalid btf_info kind_flag");
1891 		return -EINVAL;
1892 	}
1893 
1894 	if (t->size) {
1895 		btf_verifier_log_type(env, t, "size != 0");
1896 		return -EINVAL;
1897 	}
1898 
1899 	/* Array elem type and index type cannot be in type void,
1900 	 * so !array->type and !array->index_type are not allowed.
1901 	 */
1902 	if (!array->type || !BTF_TYPE_ID_VALID(array->type)) {
1903 		btf_verifier_log_type(env, t, "Invalid elem");
1904 		return -EINVAL;
1905 	}
1906 
1907 	if (!array->index_type || !BTF_TYPE_ID_VALID(array->index_type)) {
1908 		btf_verifier_log_type(env, t, "Invalid index");
1909 		return -EINVAL;
1910 	}
1911 
1912 	btf_verifier_log_type(env, t, NULL);
1913 
1914 	return meta_needed;
1915 }
1916 
1917 static int btf_array_resolve(struct btf_verifier_env *env,
1918 			     const struct resolve_vertex *v)
1919 {
1920 	const struct btf_array *array = btf_type_array(v->t);
1921 	const struct btf_type *elem_type, *index_type;
1922 	u32 elem_type_id, index_type_id;
1923 	struct btf *btf = env->btf;
1924 	u32 elem_size;
1925 
1926 	/* Check array->index_type */
1927 	index_type_id = array->index_type;
1928 	index_type = btf_type_by_id(btf, index_type_id);
1929 	if (btf_type_nosize_or_null(index_type) ||
1930 	    btf_type_is_resolve_source_only(index_type)) {
1931 		btf_verifier_log_type(env, v->t, "Invalid index");
1932 		return -EINVAL;
1933 	}
1934 
1935 	if (!env_type_is_resolve_sink(env, index_type) &&
1936 	    !env_type_is_resolved(env, index_type_id))
1937 		return env_stack_push(env, index_type, index_type_id);
1938 
1939 	index_type = btf_type_id_size(btf, &index_type_id, NULL);
1940 	if (!index_type || !btf_type_is_int(index_type) ||
1941 	    !btf_type_int_is_regular(index_type)) {
1942 		btf_verifier_log_type(env, v->t, "Invalid index");
1943 		return -EINVAL;
1944 	}
1945 
1946 	/* Check array->type */
1947 	elem_type_id = array->type;
1948 	elem_type = btf_type_by_id(btf, elem_type_id);
1949 	if (btf_type_nosize_or_null(elem_type) ||
1950 	    btf_type_is_resolve_source_only(elem_type)) {
1951 		btf_verifier_log_type(env, v->t,
1952 				      "Invalid elem");
1953 		return -EINVAL;
1954 	}
1955 
1956 	if (!env_type_is_resolve_sink(env, elem_type) &&
1957 	    !env_type_is_resolved(env, elem_type_id))
1958 		return env_stack_push(env, elem_type, elem_type_id);
1959 
1960 	elem_type = btf_type_id_size(btf, &elem_type_id, &elem_size);
1961 	if (!elem_type) {
1962 		btf_verifier_log_type(env, v->t, "Invalid elem");
1963 		return -EINVAL;
1964 	}
1965 
1966 	if (btf_type_is_int(elem_type) && !btf_type_int_is_regular(elem_type)) {
1967 		btf_verifier_log_type(env, v->t, "Invalid array of int");
1968 		return -EINVAL;
1969 	}
1970 
1971 	if (array->nelems && elem_size > U32_MAX / array->nelems) {
1972 		btf_verifier_log_type(env, v->t,
1973 				      "Array size overflows U32_MAX");
1974 		return -EINVAL;
1975 	}
1976 
1977 	env_stack_pop_resolved(env, elem_type_id, elem_size * array->nelems);
1978 
1979 	return 0;
1980 }
1981 
1982 static void btf_array_log(struct btf_verifier_env *env,
1983 			  const struct btf_type *t)
1984 {
1985 	const struct btf_array *array = btf_type_array(t);
1986 
1987 	btf_verifier_log(env, "type_id=%u index_type_id=%u nr_elems=%u",
1988 			 array->type, array->index_type, array->nelems);
1989 }
1990 
1991 static void btf_array_seq_show(const struct btf *btf, const struct btf_type *t,
1992 			       u32 type_id, void *data, u8 bits_offset,
1993 			       struct seq_file *m)
1994 {
1995 	const struct btf_array *array = btf_type_array(t);
1996 	const struct btf_kind_operations *elem_ops;
1997 	const struct btf_type *elem_type;
1998 	u32 i, elem_size, elem_type_id;
1999 
2000 	elem_type_id = array->type;
2001 	elem_type = btf_type_id_size(btf, &elem_type_id, &elem_size);
2002 	elem_ops = btf_type_ops(elem_type);
2003 	seq_puts(m, "[");
2004 	for (i = 0; i < array->nelems; i++) {
2005 		if (i)
2006 			seq_puts(m, ",");
2007 
2008 		elem_ops->seq_show(btf, elem_type, elem_type_id, data,
2009 				   bits_offset, m);
2010 		data += elem_size;
2011 	}
2012 	seq_puts(m, "]");
2013 }
2014 
2015 static struct btf_kind_operations array_ops = {
2016 	.check_meta = btf_array_check_meta,
2017 	.resolve = btf_array_resolve,
2018 	.check_member = btf_array_check_member,
2019 	.check_kflag_member = btf_generic_check_kflag_member,
2020 	.log_details = btf_array_log,
2021 	.seq_show = btf_array_seq_show,
2022 };
2023 
2024 static int btf_struct_check_member(struct btf_verifier_env *env,
2025 				   const struct btf_type *struct_type,
2026 				   const struct btf_member *member,
2027 				   const struct btf_type *member_type)
2028 {
2029 	u32 struct_bits_off = member->offset;
2030 	u32 struct_size, bytes_offset;
2031 
2032 	if (BITS_PER_BYTE_MASKED(struct_bits_off)) {
2033 		btf_verifier_log_member(env, struct_type, member,
2034 					"Member is not byte aligned");
2035 		return -EINVAL;
2036 	}
2037 
2038 	struct_size = struct_type->size;
2039 	bytes_offset = BITS_ROUNDDOWN_BYTES(struct_bits_off);
2040 	if (struct_size - bytes_offset < member_type->size) {
2041 		btf_verifier_log_member(env, struct_type, member,
2042 					"Member exceeds struct_size");
2043 		return -EINVAL;
2044 	}
2045 
2046 	return 0;
2047 }
2048 
2049 static s32 btf_struct_check_meta(struct btf_verifier_env *env,
2050 				 const struct btf_type *t,
2051 				 u32 meta_left)
2052 {
2053 	bool is_union = BTF_INFO_KIND(t->info) == BTF_KIND_UNION;
2054 	const struct btf_member *member;
2055 	u32 meta_needed, last_offset;
2056 	struct btf *btf = env->btf;
2057 	u32 struct_size = t->size;
2058 	u32 offset;
2059 	u16 i;
2060 
2061 	meta_needed = btf_type_vlen(t) * sizeof(*member);
2062 	if (meta_left < meta_needed) {
2063 		btf_verifier_log_basic(env, t,
2064 				       "meta_left:%u meta_needed:%u",
2065 				       meta_left, meta_needed);
2066 		return -EINVAL;
2067 	}
2068 
2069 	/* struct type either no name or a valid one */
2070 	if (t->name_off &&
2071 	    !btf_name_valid_identifier(env->btf, t->name_off)) {
2072 		btf_verifier_log_type(env, t, "Invalid name");
2073 		return -EINVAL;
2074 	}
2075 
2076 	btf_verifier_log_type(env, t, NULL);
2077 
2078 	last_offset = 0;
2079 	for_each_member(i, t, member) {
2080 		if (!btf_name_offset_valid(btf, member->name_off)) {
2081 			btf_verifier_log_member(env, t, member,
2082 						"Invalid member name_offset:%u",
2083 						member->name_off);
2084 			return -EINVAL;
2085 		}
2086 
2087 		/* struct member either no name or a valid one */
2088 		if (member->name_off &&
2089 		    !btf_name_valid_identifier(btf, member->name_off)) {
2090 			btf_verifier_log_member(env, t, member, "Invalid name");
2091 			return -EINVAL;
2092 		}
2093 		/* A member cannot be in type void */
2094 		if (!member->type || !BTF_TYPE_ID_VALID(member->type)) {
2095 			btf_verifier_log_member(env, t, member,
2096 						"Invalid type_id");
2097 			return -EINVAL;
2098 		}
2099 
2100 		offset = btf_member_bit_offset(t, member);
2101 		if (is_union && offset) {
2102 			btf_verifier_log_member(env, t, member,
2103 						"Invalid member bits_offset");
2104 			return -EINVAL;
2105 		}
2106 
2107 		/*
2108 		 * ">" instead of ">=" because the last member could be
2109 		 * "char a[0];"
2110 		 */
2111 		if (last_offset > offset) {
2112 			btf_verifier_log_member(env, t, member,
2113 						"Invalid member bits_offset");
2114 			return -EINVAL;
2115 		}
2116 
2117 		if (BITS_ROUNDUP_BYTES(offset) > struct_size) {
2118 			btf_verifier_log_member(env, t, member,
2119 						"Member bits_offset exceeds its struct size");
2120 			return -EINVAL;
2121 		}
2122 
2123 		btf_verifier_log_member(env, t, member, NULL);
2124 		last_offset = offset;
2125 	}
2126 
2127 	return meta_needed;
2128 }
2129 
2130 static int btf_struct_resolve(struct btf_verifier_env *env,
2131 			      const struct resolve_vertex *v)
2132 {
2133 	const struct btf_member *member;
2134 	int err;
2135 	u16 i;
2136 
2137 	/* Before continue resolving the next_member,
2138 	 * ensure the last member is indeed resolved to a
2139 	 * type with size info.
2140 	 */
2141 	if (v->next_member) {
2142 		const struct btf_type *last_member_type;
2143 		const struct btf_member *last_member;
2144 		u16 last_member_type_id;
2145 
2146 		last_member = btf_type_member(v->t) + v->next_member - 1;
2147 		last_member_type_id = last_member->type;
2148 		if (WARN_ON_ONCE(!env_type_is_resolved(env,
2149 						       last_member_type_id)))
2150 			return -EINVAL;
2151 
2152 		last_member_type = btf_type_by_id(env->btf,
2153 						  last_member_type_id);
2154 		if (btf_type_kflag(v->t))
2155 			err = btf_type_ops(last_member_type)->check_kflag_member(env, v->t,
2156 								last_member,
2157 								last_member_type);
2158 		else
2159 			err = btf_type_ops(last_member_type)->check_member(env, v->t,
2160 								last_member,
2161 								last_member_type);
2162 		if (err)
2163 			return err;
2164 	}
2165 
2166 	for_each_member_from(i, v->next_member, v->t, member) {
2167 		u32 member_type_id = member->type;
2168 		const struct btf_type *member_type = btf_type_by_id(env->btf,
2169 								member_type_id);
2170 
2171 		if (btf_type_nosize_or_null(member_type) ||
2172 		    btf_type_is_resolve_source_only(member_type)) {
2173 			btf_verifier_log_member(env, v->t, member,
2174 						"Invalid member");
2175 			return -EINVAL;
2176 		}
2177 
2178 		if (!env_type_is_resolve_sink(env, member_type) &&
2179 		    !env_type_is_resolved(env, member_type_id)) {
2180 			env_stack_set_next_member(env, i + 1);
2181 			return env_stack_push(env, member_type, member_type_id);
2182 		}
2183 
2184 		if (btf_type_kflag(v->t))
2185 			err = btf_type_ops(member_type)->check_kflag_member(env, v->t,
2186 									    member,
2187 									    member_type);
2188 		else
2189 			err = btf_type_ops(member_type)->check_member(env, v->t,
2190 								      member,
2191 								      member_type);
2192 		if (err)
2193 			return err;
2194 	}
2195 
2196 	env_stack_pop_resolved(env, 0, 0);
2197 
2198 	return 0;
2199 }
2200 
2201 static void btf_struct_log(struct btf_verifier_env *env,
2202 			   const struct btf_type *t)
2203 {
2204 	btf_verifier_log(env, "size=%u vlen=%u", t->size, btf_type_vlen(t));
2205 }
2206 
2207 /* find 'struct bpf_spin_lock' in map value.
2208  * return >= 0 offset if found
2209  * and < 0 in case of error
2210  */
2211 int btf_find_spin_lock(const struct btf *btf, const struct btf_type *t)
2212 {
2213 	const struct btf_member *member;
2214 	u32 i, off = -ENOENT;
2215 
2216 	if (!__btf_type_is_struct(t))
2217 		return -EINVAL;
2218 
2219 	for_each_member(i, t, member) {
2220 		const struct btf_type *member_type = btf_type_by_id(btf,
2221 								    member->type);
2222 		if (!__btf_type_is_struct(member_type))
2223 			continue;
2224 		if (member_type->size != sizeof(struct bpf_spin_lock))
2225 			continue;
2226 		if (strcmp(__btf_name_by_offset(btf, member_type->name_off),
2227 			   "bpf_spin_lock"))
2228 			continue;
2229 		if (off != -ENOENT)
2230 			/* only one 'struct bpf_spin_lock' is allowed */
2231 			return -E2BIG;
2232 		off = btf_member_bit_offset(t, member);
2233 		if (off % 8)
2234 			/* valid C code cannot generate such BTF */
2235 			return -EINVAL;
2236 		off /= 8;
2237 		if (off % __alignof__(struct bpf_spin_lock))
2238 			/* valid struct bpf_spin_lock will be 4 byte aligned */
2239 			return -EINVAL;
2240 	}
2241 	return off;
2242 }
2243 
2244 static void btf_struct_seq_show(const struct btf *btf, const struct btf_type *t,
2245 				u32 type_id, void *data, u8 bits_offset,
2246 				struct seq_file *m)
2247 {
2248 	const char *seq = BTF_INFO_KIND(t->info) == BTF_KIND_UNION ? "|" : ",";
2249 	const struct btf_member *member;
2250 	u32 i;
2251 
2252 	seq_puts(m, "{");
2253 	for_each_member(i, t, member) {
2254 		const struct btf_type *member_type = btf_type_by_id(btf,
2255 								member->type);
2256 		const struct btf_kind_operations *ops;
2257 		u32 member_offset, bitfield_size;
2258 		u32 bytes_offset;
2259 		u8 bits8_offset;
2260 
2261 		if (i)
2262 			seq_puts(m, seq);
2263 
2264 		member_offset = btf_member_bit_offset(t, member);
2265 		bitfield_size = btf_member_bitfield_size(t, member);
2266 		bytes_offset = BITS_ROUNDDOWN_BYTES(member_offset);
2267 		bits8_offset = BITS_PER_BYTE_MASKED(member_offset);
2268 		if (bitfield_size) {
2269 			btf_bitfield_seq_show(data + bytes_offset, bits8_offset,
2270 					      bitfield_size, m);
2271 		} else {
2272 			ops = btf_type_ops(member_type);
2273 			ops->seq_show(btf, member_type, member->type,
2274 				      data + bytes_offset, bits8_offset, m);
2275 		}
2276 	}
2277 	seq_puts(m, "}");
2278 }
2279 
2280 static struct btf_kind_operations struct_ops = {
2281 	.check_meta = btf_struct_check_meta,
2282 	.resolve = btf_struct_resolve,
2283 	.check_member = btf_struct_check_member,
2284 	.check_kflag_member = btf_generic_check_kflag_member,
2285 	.log_details = btf_struct_log,
2286 	.seq_show = btf_struct_seq_show,
2287 };
2288 
2289 static int btf_enum_check_member(struct btf_verifier_env *env,
2290 				 const struct btf_type *struct_type,
2291 				 const struct btf_member *member,
2292 				 const struct btf_type *member_type)
2293 {
2294 	u32 struct_bits_off = member->offset;
2295 	u32 struct_size, bytes_offset;
2296 
2297 	if (BITS_PER_BYTE_MASKED(struct_bits_off)) {
2298 		btf_verifier_log_member(env, struct_type, member,
2299 					"Member is not byte aligned");
2300 		return -EINVAL;
2301 	}
2302 
2303 	struct_size = struct_type->size;
2304 	bytes_offset = BITS_ROUNDDOWN_BYTES(struct_bits_off);
2305 	if (struct_size - bytes_offset < sizeof(int)) {
2306 		btf_verifier_log_member(env, struct_type, member,
2307 					"Member exceeds struct_size");
2308 		return -EINVAL;
2309 	}
2310 
2311 	return 0;
2312 }
2313 
2314 static int btf_enum_check_kflag_member(struct btf_verifier_env *env,
2315 				       const struct btf_type *struct_type,
2316 				       const struct btf_member *member,
2317 				       const struct btf_type *member_type)
2318 {
2319 	u32 struct_bits_off, nr_bits, bytes_end, struct_size;
2320 	u32 int_bitsize = sizeof(int) * BITS_PER_BYTE;
2321 
2322 	struct_bits_off = BTF_MEMBER_BIT_OFFSET(member->offset);
2323 	nr_bits = BTF_MEMBER_BITFIELD_SIZE(member->offset);
2324 	if (!nr_bits) {
2325 		if (BITS_PER_BYTE_MASKED(struct_bits_off)) {
2326 			btf_verifier_log_member(env, struct_type, member,
2327 						"Member is not byte aligned");
2328 			return -EINVAL;
2329 		}
2330 
2331 		nr_bits = int_bitsize;
2332 	} else if (nr_bits > int_bitsize) {
2333 		btf_verifier_log_member(env, struct_type, member,
2334 					"Invalid member bitfield_size");
2335 		return -EINVAL;
2336 	}
2337 
2338 	struct_size = struct_type->size;
2339 	bytes_end = BITS_ROUNDUP_BYTES(struct_bits_off + nr_bits);
2340 	if (struct_size < bytes_end) {
2341 		btf_verifier_log_member(env, struct_type, member,
2342 					"Member exceeds struct_size");
2343 		return -EINVAL;
2344 	}
2345 
2346 	return 0;
2347 }
2348 
2349 static s32 btf_enum_check_meta(struct btf_verifier_env *env,
2350 			       const struct btf_type *t,
2351 			       u32 meta_left)
2352 {
2353 	const struct btf_enum *enums = btf_type_enum(t);
2354 	struct btf *btf = env->btf;
2355 	u16 i, nr_enums;
2356 	u32 meta_needed;
2357 
2358 	nr_enums = btf_type_vlen(t);
2359 	meta_needed = nr_enums * sizeof(*enums);
2360 
2361 	if (meta_left < meta_needed) {
2362 		btf_verifier_log_basic(env, t,
2363 				       "meta_left:%u meta_needed:%u",
2364 				       meta_left, meta_needed);
2365 		return -EINVAL;
2366 	}
2367 
2368 	if (btf_type_kflag(t)) {
2369 		btf_verifier_log_type(env, t, "Invalid btf_info kind_flag");
2370 		return -EINVAL;
2371 	}
2372 
2373 	if (t->size > 8 || !is_power_of_2(t->size)) {
2374 		btf_verifier_log_type(env, t, "Unexpected size");
2375 		return -EINVAL;
2376 	}
2377 
2378 	/* enum type either no name or a valid one */
2379 	if (t->name_off &&
2380 	    !btf_name_valid_identifier(env->btf, t->name_off)) {
2381 		btf_verifier_log_type(env, t, "Invalid name");
2382 		return -EINVAL;
2383 	}
2384 
2385 	btf_verifier_log_type(env, t, NULL);
2386 
2387 	for (i = 0; i < nr_enums; i++) {
2388 		if (!btf_name_offset_valid(btf, enums[i].name_off)) {
2389 			btf_verifier_log(env, "\tInvalid name_offset:%u",
2390 					 enums[i].name_off);
2391 			return -EINVAL;
2392 		}
2393 
2394 		/* enum member must have a valid name */
2395 		if (!enums[i].name_off ||
2396 		    !btf_name_valid_identifier(btf, enums[i].name_off)) {
2397 			btf_verifier_log_type(env, t, "Invalid name");
2398 			return -EINVAL;
2399 		}
2400 
2401 		if (env->log.level == BPF_LOG_KERNEL)
2402 			continue;
2403 		btf_verifier_log(env, "\t%s val=%d\n",
2404 				 __btf_name_by_offset(btf, enums[i].name_off),
2405 				 enums[i].val);
2406 	}
2407 
2408 	return meta_needed;
2409 }
2410 
2411 static void btf_enum_log(struct btf_verifier_env *env,
2412 			 const struct btf_type *t)
2413 {
2414 	btf_verifier_log(env, "size=%u vlen=%u", t->size, btf_type_vlen(t));
2415 }
2416 
2417 static void btf_enum_seq_show(const struct btf *btf, const struct btf_type *t,
2418 			      u32 type_id, void *data, u8 bits_offset,
2419 			      struct seq_file *m)
2420 {
2421 	const struct btf_enum *enums = btf_type_enum(t);
2422 	u32 i, nr_enums = btf_type_vlen(t);
2423 	int v = *(int *)data;
2424 
2425 	for (i = 0; i < nr_enums; i++) {
2426 		if (v == enums[i].val) {
2427 			seq_printf(m, "%s",
2428 				   __btf_name_by_offset(btf,
2429 							enums[i].name_off));
2430 			return;
2431 		}
2432 	}
2433 
2434 	seq_printf(m, "%d", v);
2435 }
2436 
2437 static struct btf_kind_operations enum_ops = {
2438 	.check_meta = btf_enum_check_meta,
2439 	.resolve = btf_df_resolve,
2440 	.check_member = btf_enum_check_member,
2441 	.check_kflag_member = btf_enum_check_kflag_member,
2442 	.log_details = btf_enum_log,
2443 	.seq_show = btf_enum_seq_show,
2444 };
2445 
2446 static s32 btf_func_proto_check_meta(struct btf_verifier_env *env,
2447 				     const struct btf_type *t,
2448 				     u32 meta_left)
2449 {
2450 	u32 meta_needed = btf_type_vlen(t) * sizeof(struct btf_param);
2451 
2452 	if (meta_left < meta_needed) {
2453 		btf_verifier_log_basic(env, t,
2454 				       "meta_left:%u meta_needed:%u",
2455 				       meta_left, meta_needed);
2456 		return -EINVAL;
2457 	}
2458 
2459 	if (t->name_off) {
2460 		btf_verifier_log_type(env, t, "Invalid name");
2461 		return -EINVAL;
2462 	}
2463 
2464 	if (btf_type_kflag(t)) {
2465 		btf_verifier_log_type(env, t, "Invalid btf_info kind_flag");
2466 		return -EINVAL;
2467 	}
2468 
2469 	btf_verifier_log_type(env, t, NULL);
2470 
2471 	return meta_needed;
2472 }
2473 
2474 static void btf_func_proto_log(struct btf_verifier_env *env,
2475 			       const struct btf_type *t)
2476 {
2477 	const struct btf_param *args = (const struct btf_param *)(t + 1);
2478 	u16 nr_args = btf_type_vlen(t), i;
2479 
2480 	btf_verifier_log(env, "return=%u args=(", t->type);
2481 	if (!nr_args) {
2482 		btf_verifier_log(env, "void");
2483 		goto done;
2484 	}
2485 
2486 	if (nr_args == 1 && !args[0].type) {
2487 		/* Only one vararg */
2488 		btf_verifier_log(env, "vararg");
2489 		goto done;
2490 	}
2491 
2492 	btf_verifier_log(env, "%u %s", args[0].type,
2493 			 __btf_name_by_offset(env->btf,
2494 					      args[0].name_off));
2495 	for (i = 1; i < nr_args - 1; i++)
2496 		btf_verifier_log(env, ", %u %s", args[i].type,
2497 				 __btf_name_by_offset(env->btf,
2498 						      args[i].name_off));
2499 
2500 	if (nr_args > 1) {
2501 		const struct btf_param *last_arg = &args[nr_args - 1];
2502 
2503 		if (last_arg->type)
2504 			btf_verifier_log(env, ", %u %s", last_arg->type,
2505 					 __btf_name_by_offset(env->btf,
2506 							      last_arg->name_off));
2507 		else
2508 			btf_verifier_log(env, ", vararg");
2509 	}
2510 
2511 done:
2512 	btf_verifier_log(env, ")");
2513 }
2514 
2515 static struct btf_kind_operations func_proto_ops = {
2516 	.check_meta = btf_func_proto_check_meta,
2517 	.resolve = btf_df_resolve,
2518 	/*
2519 	 * BTF_KIND_FUNC_PROTO cannot be directly referred by
2520 	 * a struct's member.
2521 	 *
2522 	 * It should be a funciton pointer instead.
2523 	 * (i.e. struct's member -> BTF_KIND_PTR -> BTF_KIND_FUNC_PROTO)
2524 	 *
2525 	 * Hence, there is no btf_func_check_member().
2526 	 */
2527 	.check_member = btf_df_check_member,
2528 	.check_kflag_member = btf_df_check_kflag_member,
2529 	.log_details = btf_func_proto_log,
2530 	.seq_show = btf_df_seq_show,
2531 };
2532 
2533 static s32 btf_func_check_meta(struct btf_verifier_env *env,
2534 			       const struct btf_type *t,
2535 			       u32 meta_left)
2536 {
2537 	if (!t->name_off ||
2538 	    !btf_name_valid_identifier(env->btf, t->name_off)) {
2539 		btf_verifier_log_type(env, t, "Invalid name");
2540 		return -EINVAL;
2541 	}
2542 
2543 	if (btf_type_vlen(t)) {
2544 		btf_verifier_log_type(env, t, "vlen != 0");
2545 		return -EINVAL;
2546 	}
2547 
2548 	if (btf_type_kflag(t)) {
2549 		btf_verifier_log_type(env, t, "Invalid btf_info kind_flag");
2550 		return -EINVAL;
2551 	}
2552 
2553 	btf_verifier_log_type(env, t, NULL);
2554 
2555 	return 0;
2556 }
2557 
2558 static struct btf_kind_operations func_ops = {
2559 	.check_meta = btf_func_check_meta,
2560 	.resolve = btf_df_resolve,
2561 	.check_member = btf_df_check_member,
2562 	.check_kflag_member = btf_df_check_kflag_member,
2563 	.log_details = btf_ref_type_log,
2564 	.seq_show = btf_df_seq_show,
2565 };
2566 
2567 static s32 btf_var_check_meta(struct btf_verifier_env *env,
2568 			      const struct btf_type *t,
2569 			      u32 meta_left)
2570 {
2571 	const struct btf_var *var;
2572 	u32 meta_needed = sizeof(*var);
2573 
2574 	if (meta_left < meta_needed) {
2575 		btf_verifier_log_basic(env, t,
2576 				       "meta_left:%u meta_needed:%u",
2577 				       meta_left, meta_needed);
2578 		return -EINVAL;
2579 	}
2580 
2581 	if (btf_type_vlen(t)) {
2582 		btf_verifier_log_type(env, t, "vlen != 0");
2583 		return -EINVAL;
2584 	}
2585 
2586 	if (btf_type_kflag(t)) {
2587 		btf_verifier_log_type(env, t, "Invalid btf_info kind_flag");
2588 		return -EINVAL;
2589 	}
2590 
2591 	if (!t->name_off ||
2592 	    !__btf_name_valid(env->btf, t->name_off, true)) {
2593 		btf_verifier_log_type(env, t, "Invalid name");
2594 		return -EINVAL;
2595 	}
2596 
2597 	/* A var cannot be in type void */
2598 	if (!t->type || !BTF_TYPE_ID_VALID(t->type)) {
2599 		btf_verifier_log_type(env, t, "Invalid type_id");
2600 		return -EINVAL;
2601 	}
2602 
2603 	var = btf_type_var(t);
2604 	if (var->linkage != BTF_VAR_STATIC &&
2605 	    var->linkage != BTF_VAR_GLOBAL_ALLOCATED) {
2606 		btf_verifier_log_type(env, t, "Linkage not supported");
2607 		return -EINVAL;
2608 	}
2609 
2610 	btf_verifier_log_type(env, t, NULL);
2611 
2612 	return meta_needed;
2613 }
2614 
2615 static void btf_var_log(struct btf_verifier_env *env, const struct btf_type *t)
2616 {
2617 	const struct btf_var *var = btf_type_var(t);
2618 
2619 	btf_verifier_log(env, "type_id=%u linkage=%u", t->type, var->linkage);
2620 }
2621 
2622 static const struct btf_kind_operations var_ops = {
2623 	.check_meta		= btf_var_check_meta,
2624 	.resolve		= btf_var_resolve,
2625 	.check_member		= btf_df_check_member,
2626 	.check_kflag_member	= btf_df_check_kflag_member,
2627 	.log_details		= btf_var_log,
2628 	.seq_show		= btf_var_seq_show,
2629 };
2630 
2631 static s32 btf_datasec_check_meta(struct btf_verifier_env *env,
2632 				  const struct btf_type *t,
2633 				  u32 meta_left)
2634 {
2635 	const struct btf_var_secinfo *vsi;
2636 	u64 last_vsi_end_off = 0, sum = 0;
2637 	u32 i, meta_needed;
2638 
2639 	meta_needed = btf_type_vlen(t) * sizeof(*vsi);
2640 	if (meta_left < meta_needed) {
2641 		btf_verifier_log_basic(env, t,
2642 				       "meta_left:%u meta_needed:%u",
2643 				       meta_left, meta_needed);
2644 		return -EINVAL;
2645 	}
2646 
2647 	if (!btf_type_vlen(t)) {
2648 		btf_verifier_log_type(env, t, "vlen == 0");
2649 		return -EINVAL;
2650 	}
2651 
2652 	if (!t->size) {
2653 		btf_verifier_log_type(env, t, "size == 0");
2654 		return -EINVAL;
2655 	}
2656 
2657 	if (btf_type_kflag(t)) {
2658 		btf_verifier_log_type(env, t, "Invalid btf_info kind_flag");
2659 		return -EINVAL;
2660 	}
2661 
2662 	if (!t->name_off ||
2663 	    !btf_name_valid_section(env->btf, t->name_off)) {
2664 		btf_verifier_log_type(env, t, "Invalid name");
2665 		return -EINVAL;
2666 	}
2667 
2668 	btf_verifier_log_type(env, t, NULL);
2669 
2670 	for_each_vsi(i, t, vsi) {
2671 		/* A var cannot be in type void */
2672 		if (!vsi->type || !BTF_TYPE_ID_VALID(vsi->type)) {
2673 			btf_verifier_log_vsi(env, t, vsi,
2674 					     "Invalid type_id");
2675 			return -EINVAL;
2676 		}
2677 
2678 		if (vsi->offset < last_vsi_end_off || vsi->offset >= t->size) {
2679 			btf_verifier_log_vsi(env, t, vsi,
2680 					     "Invalid offset");
2681 			return -EINVAL;
2682 		}
2683 
2684 		if (!vsi->size || vsi->size > t->size) {
2685 			btf_verifier_log_vsi(env, t, vsi,
2686 					     "Invalid size");
2687 			return -EINVAL;
2688 		}
2689 
2690 		last_vsi_end_off = vsi->offset + vsi->size;
2691 		if (last_vsi_end_off > t->size) {
2692 			btf_verifier_log_vsi(env, t, vsi,
2693 					     "Invalid offset+size");
2694 			return -EINVAL;
2695 		}
2696 
2697 		btf_verifier_log_vsi(env, t, vsi, NULL);
2698 		sum += vsi->size;
2699 	}
2700 
2701 	if (t->size < sum) {
2702 		btf_verifier_log_type(env, t, "Invalid btf_info size");
2703 		return -EINVAL;
2704 	}
2705 
2706 	return meta_needed;
2707 }
2708 
2709 static int btf_datasec_resolve(struct btf_verifier_env *env,
2710 			       const struct resolve_vertex *v)
2711 {
2712 	const struct btf_var_secinfo *vsi;
2713 	struct btf *btf = env->btf;
2714 	u16 i;
2715 
2716 	for_each_vsi_from(i, v->next_member, v->t, vsi) {
2717 		u32 var_type_id = vsi->type, type_id, type_size = 0;
2718 		const struct btf_type *var_type = btf_type_by_id(env->btf,
2719 								 var_type_id);
2720 		if (!var_type || !btf_type_is_var(var_type)) {
2721 			btf_verifier_log_vsi(env, v->t, vsi,
2722 					     "Not a VAR kind member");
2723 			return -EINVAL;
2724 		}
2725 
2726 		if (!env_type_is_resolve_sink(env, var_type) &&
2727 		    !env_type_is_resolved(env, var_type_id)) {
2728 			env_stack_set_next_member(env, i + 1);
2729 			return env_stack_push(env, var_type, var_type_id);
2730 		}
2731 
2732 		type_id = var_type->type;
2733 		if (!btf_type_id_size(btf, &type_id, &type_size)) {
2734 			btf_verifier_log_vsi(env, v->t, vsi, "Invalid type");
2735 			return -EINVAL;
2736 		}
2737 
2738 		if (vsi->size < type_size) {
2739 			btf_verifier_log_vsi(env, v->t, vsi, "Invalid size");
2740 			return -EINVAL;
2741 		}
2742 	}
2743 
2744 	env_stack_pop_resolved(env, 0, 0);
2745 	return 0;
2746 }
2747 
2748 static void btf_datasec_log(struct btf_verifier_env *env,
2749 			    const struct btf_type *t)
2750 {
2751 	btf_verifier_log(env, "size=%u vlen=%u", t->size, btf_type_vlen(t));
2752 }
2753 
2754 static void btf_datasec_seq_show(const struct btf *btf,
2755 				 const struct btf_type *t, u32 type_id,
2756 				 void *data, u8 bits_offset,
2757 				 struct seq_file *m)
2758 {
2759 	const struct btf_var_secinfo *vsi;
2760 	const struct btf_type *var;
2761 	u32 i;
2762 
2763 	seq_printf(m, "section (\"%s\") = {", __btf_name_by_offset(btf, t->name_off));
2764 	for_each_vsi(i, t, vsi) {
2765 		var = btf_type_by_id(btf, vsi->type);
2766 		if (i)
2767 			seq_puts(m, ",");
2768 		btf_type_ops(var)->seq_show(btf, var, vsi->type,
2769 					    data + vsi->offset, bits_offset, m);
2770 	}
2771 	seq_puts(m, "}");
2772 }
2773 
2774 static const struct btf_kind_operations datasec_ops = {
2775 	.check_meta		= btf_datasec_check_meta,
2776 	.resolve		= btf_datasec_resolve,
2777 	.check_member		= btf_df_check_member,
2778 	.check_kflag_member	= btf_df_check_kflag_member,
2779 	.log_details		= btf_datasec_log,
2780 	.seq_show		= btf_datasec_seq_show,
2781 };
2782 
2783 static int btf_func_proto_check(struct btf_verifier_env *env,
2784 				const struct btf_type *t)
2785 {
2786 	const struct btf_type *ret_type;
2787 	const struct btf_param *args;
2788 	const struct btf *btf;
2789 	u16 nr_args, i;
2790 	int err;
2791 
2792 	btf = env->btf;
2793 	args = (const struct btf_param *)(t + 1);
2794 	nr_args = btf_type_vlen(t);
2795 
2796 	/* Check func return type which could be "void" (t->type == 0) */
2797 	if (t->type) {
2798 		u32 ret_type_id = t->type;
2799 
2800 		ret_type = btf_type_by_id(btf, ret_type_id);
2801 		if (!ret_type) {
2802 			btf_verifier_log_type(env, t, "Invalid return type");
2803 			return -EINVAL;
2804 		}
2805 
2806 		if (btf_type_needs_resolve(ret_type) &&
2807 		    !env_type_is_resolved(env, ret_type_id)) {
2808 			err = btf_resolve(env, ret_type, ret_type_id);
2809 			if (err)
2810 				return err;
2811 		}
2812 
2813 		/* Ensure the return type is a type that has a size */
2814 		if (!btf_type_id_size(btf, &ret_type_id, NULL)) {
2815 			btf_verifier_log_type(env, t, "Invalid return type");
2816 			return -EINVAL;
2817 		}
2818 	}
2819 
2820 	if (!nr_args)
2821 		return 0;
2822 
2823 	/* Last func arg type_id could be 0 if it is a vararg */
2824 	if (!args[nr_args - 1].type) {
2825 		if (args[nr_args - 1].name_off) {
2826 			btf_verifier_log_type(env, t, "Invalid arg#%u",
2827 					      nr_args);
2828 			return -EINVAL;
2829 		}
2830 		nr_args--;
2831 	}
2832 
2833 	err = 0;
2834 	for (i = 0; i < nr_args; i++) {
2835 		const struct btf_type *arg_type;
2836 		u32 arg_type_id;
2837 
2838 		arg_type_id = args[i].type;
2839 		arg_type = btf_type_by_id(btf, arg_type_id);
2840 		if (!arg_type) {
2841 			btf_verifier_log_type(env, t, "Invalid arg#%u", i + 1);
2842 			err = -EINVAL;
2843 			break;
2844 		}
2845 
2846 		if (args[i].name_off &&
2847 		    (!btf_name_offset_valid(btf, args[i].name_off) ||
2848 		     !btf_name_valid_identifier(btf, args[i].name_off))) {
2849 			btf_verifier_log_type(env, t,
2850 					      "Invalid arg#%u", i + 1);
2851 			err = -EINVAL;
2852 			break;
2853 		}
2854 
2855 		if (btf_type_needs_resolve(arg_type) &&
2856 		    !env_type_is_resolved(env, arg_type_id)) {
2857 			err = btf_resolve(env, arg_type, arg_type_id);
2858 			if (err)
2859 				break;
2860 		}
2861 
2862 		if (!btf_type_id_size(btf, &arg_type_id, NULL)) {
2863 			btf_verifier_log_type(env, t, "Invalid arg#%u", i + 1);
2864 			err = -EINVAL;
2865 			break;
2866 		}
2867 	}
2868 
2869 	return err;
2870 }
2871 
2872 static int btf_func_check(struct btf_verifier_env *env,
2873 			  const struct btf_type *t)
2874 {
2875 	const struct btf_type *proto_type;
2876 	const struct btf_param *args;
2877 	const struct btf *btf;
2878 	u16 nr_args, i;
2879 
2880 	btf = env->btf;
2881 	proto_type = btf_type_by_id(btf, t->type);
2882 
2883 	if (!proto_type || !btf_type_is_func_proto(proto_type)) {
2884 		btf_verifier_log_type(env, t, "Invalid type_id");
2885 		return -EINVAL;
2886 	}
2887 
2888 	args = (const struct btf_param *)(proto_type + 1);
2889 	nr_args = btf_type_vlen(proto_type);
2890 	for (i = 0; i < nr_args; i++) {
2891 		if (!args[i].name_off && args[i].type) {
2892 			btf_verifier_log_type(env, t, "Invalid arg#%u", i + 1);
2893 			return -EINVAL;
2894 		}
2895 	}
2896 
2897 	return 0;
2898 }
2899 
2900 static const struct btf_kind_operations * const kind_ops[NR_BTF_KINDS] = {
2901 	[BTF_KIND_INT] = &int_ops,
2902 	[BTF_KIND_PTR] = &ptr_ops,
2903 	[BTF_KIND_ARRAY] = &array_ops,
2904 	[BTF_KIND_STRUCT] = &struct_ops,
2905 	[BTF_KIND_UNION] = &struct_ops,
2906 	[BTF_KIND_ENUM] = &enum_ops,
2907 	[BTF_KIND_FWD] = &fwd_ops,
2908 	[BTF_KIND_TYPEDEF] = &modifier_ops,
2909 	[BTF_KIND_VOLATILE] = &modifier_ops,
2910 	[BTF_KIND_CONST] = &modifier_ops,
2911 	[BTF_KIND_RESTRICT] = &modifier_ops,
2912 	[BTF_KIND_FUNC] = &func_ops,
2913 	[BTF_KIND_FUNC_PROTO] = &func_proto_ops,
2914 	[BTF_KIND_VAR] = &var_ops,
2915 	[BTF_KIND_DATASEC] = &datasec_ops,
2916 };
2917 
2918 static s32 btf_check_meta(struct btf_verifier_env *env,
2919 			  const struct btf_type *t,
2920 			  u32 meta_left)
2921 {
2922 	u32 saved_meta_left = meta_left;
2923 	s32 var_meta_size;
2924 
2925 	if (meta_left < sizeof(*t)) {
2926 		btf_verifier_log(env, "[%u] meta_left:%u meta_needed:%zu",
2927 				 env->log_type_id, meta_left, sizeof(*t));
2928 		return -EINVAL;
2929 	}
2930 	meta_left -= sizeof(*t);
2931 
2932 	if (t->info & ~BTF_INFO_MASK) {
2933 		btf_verifier_log(env, "[%u] Invalid btf_info:%x",
2934 				 env->log_type_id, t->info);
2935 		return -EINVAL;
2936 	}
2937 
2938 	if (BTF_INFO_KIND(t->info) > BTF_KIND_MAX ||
2939 	    BTF_INFO_KIND(t->info) == BTF_KIND_UNKN) {
2940 		btf_verifier_log(env, "[%u] Invalid kind:%u",
2941 				 env->log_type_id, BTF_INFO_KIND(t->info));
2942 		return -EINVAL;
2943 	}
2944 
2945 	if (!btf_name_offset_valid(env->btf, t->name_off)) {
2946 		btf_verifier_log(env, "[%u] Invalid name_offset:%u",
2947 				 env->log_type_id, t->name_off);
2948 		return -EINVAL;
2949 	}
2950 
2951 	var_meta_size = btf_type_ops(t)->check_meta(env, t, meta_left);
2952 	if (var_meta_size < 0)
2953 		return var_meta_size;
2954 
2955 	meta_left -= var_meta_size;
2956 
2957 	return saved_meta_left - meta_left;
2958 }
2959 
2960 static int btf_check_all_metas(struct btf_verifier_env *env)
2961 {
2962 	struct btf *btf = env->btf;
2963 	struct btf_header *hdr;
2964 	void *cur, *end;
2965 
2966 	hdr = &btf->hdr;
2967 	cur = btf->nohdr_data + hdr->type_off;
2968 	end = cur + hdr->type_len;
2969 
2970 	env->log_type_id = 1;
2971 	while (cur < end) {
2972 		struct btf_type *t = cur;
2973 		s32 meta_size;
2974 
2975 		meta_size = btf_check_meta(env, t, end - cur);
2976 		if (meta_size < 0)
2977 			return meta_size;
2978 
2979 		btf_add_type(env, t);
2980 		cur += meta_size;
2981 		env->log_type_id++;
2982 	}
2983 
2984 	return 0;
2985 }
2986 
2987 static bool btf_resolve_valid(struct btf_verifier_env *env,
2988 			      const struct btf_type *t,
2989 			      u32 type_id)
2990 {
2991 	struct btf *btf = env->btf;
2992 
2993 	if (!env_type_is_resolved(env, type_id))
2994 		return false;
2995 
2996 	if (btf_type_is_struct(t) || btf_type_is_datasec(t))
2997 		return !btf->resolved_ids[type_id] &&
2998 		       !btf->resolved_sizes[type_id];
2999 
3000 	if (btf_type_is_modifier(t) || btf_type_is_ptr(t) ||
3001 	    btf_type_is_var(t)) {
3002 		t = btf_type_id_resolve(btf, &type_id);
3003 		return t &&
3004 		       !btf_type_is_modifier(t) &&
3005 		       !btf_type_is_var(t) &&
3006 		       !btf_type_is_datasec(t);
3007 	}
3008 
3009 	if (btf_type_is_array(t)) {
3010 		const struct btf_array *array = btf_type_array(t);
3011 		const struct btf_type *elem_type;
3012 		u32 elem_type_id = array->type;
3013 		u32 elem_size;
3014 
3015 		elem_type = btf_type_id_size(btf, &elem_type_id, &elem_size);
3016 		return elem_type && !btf_type_is_modifier(elem_type) &&
3017 			(array->nelems * elem_size ==
3018 			 btf->resolved_sizes[type_id]);
3019 	}
3020 
3021 	return false;
3022 }
3023 
3024 static int btf_resolve(struct btf_verifier_env *env,
3025 		       const struct btf_type *t, u32 type_id)
3026 {
3027 	u32 save_log_type_id = env->log_type_id;
3028 	const struct resolve_vertex *v;
3029 	int err = 0;
3030 
3031 	env->resolve_mode = RESOLVE_TBD;
3032 	env_stack_push(env, t, type_id);
3033 	while (!err && (v = env_stack_peak(env))) {
3034 		env->log_type_id = v->type_id;
3035 		err = btf_type_ops(v->t)->resolve(env, v);
3036 	}
3037 
3038 	env->log_type_id = type_id;
3039 	if (err == -E2BIG) {
3040 		btf_verifier_log_type(env, t,
3041 				      "Exceeded max resolving depth:%u",
3042 				      MAX_RESOLVE_DEPTH);
3043 	} else if (err == -EEXIST) {
3044 		btf_verifier_log_type(env, t, "Loop detected");
3045 	}
3046 
3047 	/* Final sanity check */
3048 	if (!err && !btf_resolve_valid(env, t, type_id)) {
3049 		btf_verifier_log_type(env, t, "Invalid resolve state");
3050 		err = -EINVAL;
3051 	}
3052 
3053 	env->log_type_id = save_log_type_id;
3054 	return err;
3055 }
3056 
3057 static int btf_check_all_types(struct btf_verifier_env *env)
3058 {
3059 	struct btf *btf = env->btf;
3060 	u32 type_id;
3061 	int err;
3062 
3063 	err = env_resolve_init(env);
3064 	if (err)
3065 		return err;
3066 
3067 	env->phase++;
3068 	for (type_id = 1; type_id <= btf->nr_types; type_id++) {
3069 		const struct btf_type *t = btf_type_by_id(btf, type_id);
3070 
3071 		env->log_type_id = type_id;
3072 		if (btf_type_needs_resolve(t) &&
3073 		    !env_type_is_resolved(env, type_id)) {
3074 			err = btf_resolve(env, t, type_id);
3075 			if (err)
3076 				return err;
3077 		}
3078 
3079 		if (btf_type_is_func_proto(t)) {
3080 			err = btf_func_proto_check(env, t);
3081 			if (err)
3082 				return err;
3083 		}
3084 
3085 		if (btf_type_is_func(t)) {
3086 			err = btf_func_check(env, t);
3087 			if (err)
3088 				return err;
3089 		}
3090 	}
3091 
3092 	return 0;
3093 }
3094 
3095 static int btf_parse_type_sec(struct btf_verifier_env *env)
3096 {
3097 	const struct btf_header *hdr = &env->btf->hdr;
3098 	int err;
3099 
3100 	/* Type section must align to 4 bytes */
3101 	if (hdr->type_off & (sizeof(u32) - 1)) {
3102 		btf_verifier_log(env, "Unaligned type_off");
3103 		return -EINVAL;
3104 	}
3105 
3106 	if (!hdr->type_len) {
3107 		btf_verifier_log(env, "No type found");
3108 		return -EINVAL;
3109 	}
3110 
3111 	err = btf_check_all_metas(env);
3112 	if (err)
3113 		return err;
3114 
3115 	return btf_check_all_types(env);
3116 }
3117 
3118 static int btf_parse_str_sec(struct btf_verifier_env *env)
3119 {
3120 	const struct btf_header *hdr;
3121 	struct btf *btf = env->btf;
3122 	const char *start, *end;
3123 
3124 	hdr = &btf->hdr;
3125 	start = btf->nohdr_data + hdr->str_off;
3126 	end = start + hdr->str_len;
3127 
3128 	if (end != btf->data + btf->data_size) {
3129 		btf_verifier_log(env, "String section is not at the end");
3130 		return -EINVAL;
3131 	}
3132 
3133 	if (!hdr->str_len || hdr->str_len - 1 > BTF_MAX_NAME_OFFSET ||
3134 	    start[0] || end[-1]) {
3135 		btf_verifier_log(env, "Invalid string section");
3136 		return -EINVAL;
3137 	}
3138 
3139 	btf->strings = start;
3140 
3141 	return 0;
3142 }
3143 
3144 static const size_t btf_sec_info_offset[] = {
3145 	offsetof(struct btf_header, type_off),
3146 	offsetof(struct btf_header, str_off),
3147 };
3148 
3149 static int btf_sec_info_cmp(const void *a, const void *b)
3150 {
3151 	const struct btf_sec_info *x = a;
3152 	const struct btf_sec_info *y = b;
3153 
3154 	return (int)(x->off - y->off) ? : (int)(x->len - y->len);
3155 }
3156 
3157 static int btf_check_sec_info(struct btf_verifier_env *env,
3158 			      u32 btf_data_size)
3159 {
3160 	struct btf_sec_info secs[ARRAY_SIZE(btf_sec_info_offset)];
3161 	u32 total, expected_total, i;
3162 	const struct btf_header *hdr;
3163 	const struct btf *btf;
3164 
3165 	btf = env->btf;
3166 	hdr = &btf->hdr;
3167 
3168 	/* Populate the secs from hdr */
3169 	for (i = 0; i < ARRAY_SIZE(btf_sec_info_offset); i++)
3170 		secs[i] = *(struct btf_sec_info *)((void *)hdr +
3171 						   btf_sec_info_offset[i]);
3172 
3173 	sort(secs, ARRAY_SIZE(btf_sec_info_offset),
3174 	     sizeof(struct btf_sec_info), btf_sec_info_cmp, NULL);
3175 
3176 	/* Check for gaps and overlap among sections */
3177 	total = 0;
3178 	expected_total = btf_data_size - hdr->hdr_len;
3179 	for (i = 0; i < ARRAY_SIZE(btf_sec_info_offset); i++) {
3180 		if (expected_total < secs[i].off) {
3181 			btf_verifier_log(env, "Invalid section offset");
3182 			return -EINVAL;
3183 		}
3184 		if (total < secs[i].off) {
3185 			/* gap */
3186 			btf_verifier_log(env, "Unsupported section found");
3187 			return -EINVAL;
3188 		}
3189 		if (total > secs[i].off) {
3190 			btf_verifier_log(env, "Section overlap found");
3191 			return -EINVAL;
3192 		}
3193 		if (expected_total - total < secs[i].len) {
3194 			btf_verifier_log(env,
3195 					 "Total section length too long");
3196 			return -EINVAL;
3197 		}
3198 		total += secs[i].len;
3199 	}
3200 
3201 	/* There is data other than hdr and known sections */
3202 	if (expected_total != total) {
3203 		btf_verifier_log(env, "Unsupported section found");
3204 		return -EINVAL;
3205 	}
3206 
3207 	return 0;
3208 }
3209 
3210 static int btf_parse_hdr(struct btf_verifier_env *env)
3211 {
3212 	u32 hdr_len, hdr_copy, btf_data_size;
3213 	const struct btf_header *hdr;
3214 	struct btf *btf;
3215 	int err;
3216 
3217 	btf = env->btf;
3218 	btf_data_size = btf->data_size;
3219 
3220 	if (btf_data_size <
3221 	    offsetof(struct btf_header, hdr_len) + sizeof(hdr->hdr_len)) {
3222 		btf_verifier_log(env, "hdr_len not found");
3223 		return -EINVAL;
3224 	}
3225 
3226 	hdr = btf->data;
3227 	hdr_len = hdr->hdr_len;
3228 	if (btf_data_size < hdr_len) {
3229 		btf_verifier_log(env, "btf_header not found");
3230 		return -EINVAL;
3231 	}
3232 
3233 	/* Ensure the unsupported header fields are zero */
3234 	if (hdr_len > sizeof(btf->hdr)) {
3235 		u8 *expected_zero = btf->data + sizeof(btf->hdr);
3236 		u8 *end = btf->data + hdr_len;
3237 
3238 		for (; expected_zero < end; expected_zero++) {
3239 			if (*expected_zero) {
3240 				btf_verifier_log(env, "Unsupported btf_header");
3241 				return -E2BIG;
3242 			}
3243 		}
3244 	}
3245 
3246 	hdr_copy = min_t(u32, hdr_len, sizeof(btf->hdr));
3247 	memcpy(&btf->hdr, btf->data, hdr_copy);
3248 
3249 	hdr = &btf->hdr;
3250 
3251 	btf_verifier_log_hdr(env, btf_data_size);
3252 
3253 	if (hdr->magic != BTF_MAGIC) {
3254 		btf_verifier_log(env, "Invalid magic");
3255 		return -EINVAL;
3256 	}
3257 
3258 	if (hdr->version != BTF_VERSION) {
3259 		btf_verifier_log(env, "Unsupported version");
3260 		return -ENOTSUPP;
3261 	}
3262 
3263 	if (hdr->flags) {
3264 		btf_verifier_log(env, "Unsupported flags");
3265 		return -ENOTSUPP;
3266 	}
3267 
3268 	if (btf_data_size == hdr->hdr_len) {
3269 		btf_verifier_log(env, "No data");
3270 		return -EINVAL;
3271 	}
3272 
3273 	err = btf_check_sec_info(env, btf_data_size);
3274 	if (err)
3275 		return err;
3276 
3277 	return 0;
3278 }
3279 
3280 static struct btf *btf_parse(void __user *btf_data, u32 btf_data_size,
3281 			     u32 log_level, char __user *log_ubuf, u32 log_size)
3282 {
3283 	struct btf_verifier_env *env = NULL;
3284 	struct bpf_verifier_log *log;
3285 	struct btf *btf = NULL;
3286 	u8 *data;
3287 	int err;
3288 
3289 	if (btf_data_size > BTF_MAX_SIZE)
3290 		return ERR_PTR(-E2BIG);
3291 
3292 	env = kzalloc(sizeof(*env), GFP_KERNEL | __GFP_NOWARN);
3293 	if (!env)
3294 		return ERR_PTR(-ENOMEM);
3295 
3296 	log = &env->log;
3297 	if (log_level || log_ubuf || log_size) {
3298 		/* user requested verbose verifier output
3299 		 * and supplied buffer to store the verification trace
3300 		 */
3301 		log->level = log_level;
3302 		log->ubuf = log_ubuf;
3303 		log->len_total = log_size;
3304 
3305 		/* log attributes have to be sane */
3306 		if (log->len_total < 128 || log->len_total > UINT_MAX >> 8 ||
3307 		    !log->level || !log->ubuf) {
3308 			err = -EINVAL;
3309 			goto errout;
3310 		}
3311 	}
3312 
3313 	btf = kzalloc(sizeof(*btf), GFP_KERNEL | __GFP_NOWARN);
3314 	if (!btf) {
3315 		err = -ENOMEM;
3316 		goto errout;
3317 	}
3318 	env->btf = btf;
3319 
3320 	data = kvmalloc(btf_data_size, GFP_KERNEL | __GFP_NOWARN);
3321 	if (!data) {
3322 		err = -ENOMEM;
3323 		goto errout;
3324 	}
3325 
3326 	btf->data = data;
3327 	btf->data_size = btf_data_size;
3328 
3329 	if (copy_from_user(data, btf_data, btf_data_size)) {
3330 		err = -EFAULT;
3331 		goto errout;
3332 	}
3333 
3334 	err = btf_parse_hdr(env);
3335 	if (err)
3336 		goto errout;
3337 
3338 	btf->nohdr_data = btf->data + btf->hdr.hdr_len;
3339 
3340 	err = btf_parse_str_sec(env);
3341 	if (err)
3342 		goto errout;
3343 
3344 	err = btf_parse_type_sec(env);
3345 	if (err)
3346 		goto errout;
3347 
3348 	if (log->level && bpf_verifier_log_full(log)) {
3349 		err = -ENOSPC;
3350 		goto errout;
3351 	}
3352 
3353 	btf_verifier_env_free(env);
3354 	refcount_set(&btf->refcnt, 1);
3355 	return btf;
3356 
3357 errout:
3358 	btf_verifier_env_free(env);
3359 	if (btf)
3360 		btf_free(btf);
3361 	return ERR_PTR(err);
3362 }
3363 
3364 extern char __weak _binary__btf_vmlinux_bin_start[];
3365 extern char __weak _binary__btf_vmlinux_bin_end[];
3366 
3367 struct btf *btf_parse_vmlinux(void)
3368 {
3369 	struct btf_verifier_env *env = NULL;
3370 	struct bpf_verifier_log *log;
3371 	struct btf *btf = NULL;
3372 	int err;
3373 
3374 	env = kzalloc(sizeof(*env), GFP_KERNEL | __GFP_NOWARN);
3375 	if (!env)
3376 		return ERR_PTR(-ENOMEM);
3377 
3378 	log = &env->log;
3379 	log->level = BPF_LOG_KERNEL;
3380 
3381 	btf = kzalloc(sizeof(*btf), GFP_KERNEL | __GFP_NOWARN);
3382 	if (!btf) {
3383 		err = -ENOMEM;
3384 		goto errout;
3385 	}
3386 	env->btf = btf;
3387 
3388 	btf->data = _binary__btf_vmlinux_bin_start;
3389 	btf->data_size = _binary__btf_vmlinux_bin_end -
3390 		_binary__btf_vmlinux_bin_start;
3391 
3392 	err = btf_parse_hdr(env);
3393 	if (err)
3394 		goto errout;
3395 
3396 	btf->nohdr_data = btf->data + btf->hdr.hdr_len;
3397 
3398 	err = btf_parse_str_sec(env);
3399 	if (err)
3400 		goto errout;
3401 
3402 	err = btf_check_all_metas(env);
3403 	if (err)
3404 		goto errout;
3405 
3406 	btf_verifier_env_free(env);
3407 	refcount_set(&btf->refcnt, 1);
3408 	return btf;
3409 
3410 errout:
3411 	btf_verifier_env_free(env);
3412 	if (btf) {
3413 		kvfree(btf->types);
3414 		kfree(btf);
3415 	}
3416 	return ERR_PTR(err);
3417 }
3418 
3419 extern struct btf *btf_vmlinux;
3420 
3421 bool btf_ctx_access(int off, int size, enum bpf_access_type type,
3422 		    const struct bpf_prog *prog,
3423 		    struct bpf_insn_access_aux *info)
3424 {
3425 	const struct btf_type *t = prog->aux->attach_func_proto;
3426 	const char *tname = prog->aux->attach_func_name;
3427 	struct bpf_verifier_log *log = info->log;
3428 	const struct btf_param *args;
3429 	u32 nr_args, arg;
3430 
3431 	if (off % 8) {
3432 		bpf_log(log, "func '%s' offset %d is not multiple of 8\n",
3433 			tname, off);
3434 		return false;
3435 	}
3436 	arg = off / 8;
3437 	args = (const struct btf_param *)(t + 1);
3438 	nr_args = btf_type_vlen(t);
3439 	if (prog->aux->attach_btf_trace) {
3440 		/* skip first 'void *__data' argument in btf_trace_##name typedef */
3441 		args++;
3442 		nr_args--;
3443 	}
3444 	if (arg >= nr_args) {
3445 		bpf_log(log, "func '%s' doesn't have %d-th argument\n",
3446 			tname, arg);
3447 		return false;
3448 	}
3449 
3450 	t = btf_type_by_id(btf_vmlinux, args[arg].type);
3451 	/* skip modifiers */
3452 	while (btf_type_is_modifier(t))
3453 		t = btf_type_by_id(btf_vmlinux, t->type);
3454 	if (btf_type_is_int(t))
3455 		/* accessing a scalar */
3456 		return true;
3457 	if (!btf_type_is_ptr(t)) {
3458 		bpf_log(log,
3459 			"func '%s' arg%d '%s' has type %s. Only pointer access is allowed\n",
3460 			tname, arg,
3461 			__btf_name_by_offset(btf_vmlinux, t->name_off),
3462 			btf_kind_str[BTF_INFO_KIND(t->info)]);
3463 		return false;
3464 	}
3465 	if (t->type == 0)
3466 		/* This is a pointer to void.
3467 		 * It is the same as scalar from the verifier safety pov.
3468 		 * No further pointer walking is allowed.
3469 		 */
3470 		return true;
3471 
3472 	/* this is a pointer to another type */
3473 	info->reg_type = PTR_TO_BTF_ID;
3474 	info->btf_id = t->type;
3475 
3476 	t = btf_type_by_id(btf_vmlinux, t->type);
3477 	/* skip modifiers */
3478 	while (btf_type_is_modifier(t))
3479 		t = btf_type_by_id(btf_vmlinux, t->type);
3480 	if (!btf_type_is_struct(t)) {
3481 		bpf_log(log,
3482 			"func '%s' arg%d type %s is not a struct\n",
3483 			tname, arg, btf_kind_str[BTF_INFO_KIND(t->info)]);
3484 		return false;
3485 	}
3486 	bpf_log(log, "func '%s' arg%d has btf_id %d type %s '%s'\n",
3487 		tname, arg, info->btf_id, btf_kind_str[BTF_INFO_KIND(t->info)],
3488 		__btf_name_by_offset(btf_vmlinux, t->name_off));
3489 	return true;
3490 }
3491 
3492 int btf_struct_access(struct bpf_verifier_log *log,
3493 		      const struct btf_type *t, int off, int size,
3494 		      enum bpf_access_type atype,
3495 		      u32 *next_btf_id)
3496 {
3497 	const struct btf_member *member;
3498 	const struct btf_type *mtype;
3499 	const char *tname, *mname;
3500 	int i, moff = 0, msize;
3501 
3502 again:
3503 	tname = __btf_name_by_offset(btf_vmlinux, t->name_off);
3504 	if (!btf_type_is_struct(t)) {
3505 		bpf_log(log, "Type '%s' is not a struct", tname);
3506 		return -EINVAL;
3507 	}
3508 
3509 	for_each_member(i, t, member) {
3510 		/* offset of the field in bits */
3511 		moff = btf_member_bit_offset(t, member);
3512 
3513 		if (btf_member_bitfield_size(t, member))
3514 			/* bitfields are not supported yet */
3515 			continue;
3516 
3517 		if (off + size <= moff / 8)
3518 			/* won't find anything, field is already too far */
3519 			break;
3520 
3521 		/* type of the field */
3522 		mtype = btf_type_by_id(btf_vmlinux, member->type);
3523 		mname = __btf_name_by_offset(btf_vmlinux, member->name_off);
3524 
3525 		/* skip modifiers */
3526 		while (btf_type_is_modifier(mtype))
3527 			mtype = btf_type_by_id(btf_vmlinux, mtype->type);
3528 
3529 		if (btf_type_is_array(mtype))
3530 			/* array deref is not supported yet */
3531 			continue;
3532 
3533 		if (!btf_type_has_size(mtype) && !btf_type_is_ptr(mtype)) {
3534 			bpf_log(log, "field %s doesn't have size\n", mname);
3535 			return -EFAULT;
3536 		}
3537 		if (btf_type_is_ptr(mtype))
3538 			msize = 8;
3539 		else
3540 			msize = mtype->size;
3541 		if (off >= moff / 8 + msize)
3542 			/* no overlap with member, keep iterating */
3543 			continue;
3544 		/* the 'off' we're looking for is either equal to start
3545 		 * of this field or inside of this struct
3546 		 */
3547 		if (btf_type_is_struct(mtype)) {
3548 			/* our field must be inside that union or struct */
3549 			t = mtype;
3550 
3551 			/* adjust offset we're looking for */
3552 			off -= moff / 8;
3553 			goto again;
3554 		}
3555 		if (msize != size) {
3556 			/* field access size doesn't match */
3557 			bpf_log(log,
3558 				"cannot access %d bytes in struct %s field %s that has size %d\n",
3559 				size, tname, mname, msize);
3560 			return -EACCES;
3561 		}
3562 
3563 		if (btf_type_is_ptr(mtype)) {
3564 			const struct btf_type *stype;
3565 
3566 			stype = btf_type_by_id(btf_vmlinux, mtype->type);
3567 			/* skip modifiers */
3568 			while (btf_type_is_modifier(stype))
3569 				stype = btf_type_by_id(btf_vmlinux, stype->type);
3570 			if (btf_type_is_struct(stype)) {
3571 				*next_btf_id = mtype->type;
3572 				return PTR_TO_BTF_ID;
3573 			}
3574 		}
3575 		/* all other fields are treated as scalars */
3576 		return SCALAR_VALUE;
3577 	}
3578 	bpf_log(log, "struct %s doesn't have field at offset %d\n", tname, off);
3579 	return -EINVAL;
3580 }
3581 
3582 u32 btf_resolve_helper_id(struct bpf_verifier_log *log, void *fn, int arg)
3583 {
3584 	char fnname[KSYM_SYMBOL_LEN + 4] = "btf_";
3585 	const struct btf_param *args;
3586 	const struct btf_type *t;
3587 	const char *tname, *sym;
3588 	u32 btf_id, i;
3589 
3590 	if (IS_ERR(btf_vmlinux)) {
3591 		bpf_log(log, "btf_vmlinux is malformed\n");
3592 		return -EINVAL;
3593 	}
3594 
3595 	sym = kallsyms_lookup((long)fn, NULL, NULL, NULL, fnname + 4);
3596 	if (!sym) {
3597 		bpf_log(log, "kernel doesn't have kallsyms\n");
3598 		return -EFAULT;
3599 	}
3600 
3601 	for (i = 1; i <= btf_vmlinux->nr_types; i++) {
3602 		t = btf_type_by_id(btf_vmlinux, i);
3603 		if (BTF_INFO_KIND(t->info) != BTF_KIND_TYPEDEF)
3604 			continue;
3605 		tname = __btf_name_by_offset(btf_vmlinux, t->name_off);
3606 		if (!strcmp(tname, fnname))
3607 			break;
3608 	}
3609 	if (i > btf_vmlinux->nr_types) {
3610 		bpf_log(log, "helper %s type is not found\n", fnname);
3611 		return -ENOENT;
3612 	}
3613 
3614 	t = btf_type_by_id(btf_vmlinux, t->type);
3615 	if (!btf_type_is_ptr(t))
3616 		return -EFAULT;
3617 	t = btf_type_by_id(btf_vmlinux, t->type);
3618 	if (!btf_type_is_func_proto(t))
3619 		return -EFAULT;
3620 
3621 	args = (const struct btf_param *)(t + 1);
3622 	if (arg >= btf_type_vlen(t)) {
3623 		bpf_log(log, "bpf helper %s doesn't have %d-th argument\n",
3624 			fnname, arg);
3625 		return -EINVAL;
3626 	}
3627 
3628 	t = btf_type_by_id(btf_vmlinux, args[arg].type);
3629 	if (!btf_type_is_ptr(t) || !t->type) {
3630 		/* anything but the pointer to struct is a helper config bug */
3631 		bpf_log(log, "ARG_PTR_TO_BTF is misconfigured\n");
3632 		return -EFAULT;
3633 	}
3634 	btf_id = t->type;
3635 	t = btf_type_by_id(btf_vmlinux, t->type);
3636 	/* skip modifiers */
3637 	while (btf_type_is_modifier(t)) {
3638 		btf_id = t->type;
3639 		t = btf_type_by_id(btf_vmlinux, t->type);
3640 	}
3641 	if (!btf_type_is_struct(t)) {
3642 		bpf_log(log, "ARG_PTR_TO_BTF is not a struct\n");
3643 		return -EFAULT;
3644 	}
3645 	bpf_log(log, "helper %s arg%d has btf_id %d struct %s\n", fnname + 4,
3646 		arg, btf_id, __btf_name_by_offset(btf_vmlinux, t->name_off));
3647 	return btf_id;
3648 }
3649 
3650 void btf_type_seq_show(const struct btf *btf, u32 type_id, void *obj,
3651 		       struct seq_file *m)
3652 {
3653 	const struct btf_type *t = btf_type_by_id(btf, type_id);
3654 
3655 	btf_type_ops(t)->seq_show(btf, t, type_id, obj, 0, m);
3656 }
3657 
3658 #ifdef CONFIG_PROC_FS
3659 static void bpf_btf_show_fdinfo(struct seq_file *m, struct file *filp)
3660 {
3661 	const struct btf *btf = filp->private_data;
3662 
3663 	seq_printf(m, "btf_id:\t%u\n", btf->id);
3664 }
3665 #endif
3666 
3667 static int btf_release(struct inode *inode, struct file *filp)
3668 {
3669 	btf_put(filp->private_data);
3670 	return 0;
3671 }
3672 
3673 const struct file_operations btf_fops = {
3674 #ifdef CONFIG_PROC_FS
3675 	.show_fdinfo	= bpf_btf_show_fdinfo,
3676 #endif
3677 	.release	= btf_release,
3678 };
3679 
3680 static int __btf_new_fd(struct btf *btf)
3681 {
3682 	return anon_inode_getfd("btf", &btf_fops, btf, O_RDONLY | O_CLOEXEC);
3683 }
3684 
3685 int btf_new_fd(const union bpf_attr *attr)
3686 {
3687 	struct btf *btf;
3688 	int ret;
3689 
3690 	btf = btf_parse(u64_to_user_ptr(attr->btf),
3691 			attr->btf_size, attr->btf_log_level,
3692 			u64_to_user_ptr(attr->btf_log_buf),
3693 			attr->btf_log_size);
3694 	if (IS_ERR(btf))
3695 		return PTR_ERR(btf);
3696 
3697 	ret = btf_alloc_id(btf);
3698 	if (ret) {
3699 		btf_free(btf);
3700 		return ret;
3701 	}
3702 
3703 	/*
3704 	 * The BTF ID is published to the userspace.
3705 	 * All BTF free must go through call_rcu() from
3706 	 * now on (i.e. free by calling btf_put()).
3707 	 */
3708 
3709 	ret = __btf_new_fd(btf);
3710 	if (ret < 0)
3711 		btf_put(btf);
3712 
3713 	return ret;
3714 }
3715 
3716 struct btf *btf_get_by_fd(int fd)
3717 {
3718 	struct btf *btf;
3719 	struct fd f;
3720 
3721 	f = fdget(fd);
3722 
3723 	if (!f.file)
3724 		return ERR_PTR(-EBADF);
3725 
3726 	if (f.file->f_op != &btf_fops) {
3727 		fdput(f);
3728 		return ERR_PTR(-EINVAL);
3729 	}
3730 
3731 	btf = f.file->private_data;
3732 	refcount_inc(&btf->refcnt);
3733 	fdput(f);
3734 
3735 	return btf;
3736 }
3737 
3738 int btf_get_info_by_fd(const struct btf *btf,
3739 		       const union bpf_attr *attr,
3740 		       union bpf_attr __user *uattr)
3741 {
3742 	struct bpf_btf_info __user *uinfo;
3743 	struct bpf_btf_info info = {};
3744 	u32 info_copy, btf_copy;
3745 	void __user *ubtf;
3746 	u32 uinfo_len;
3747 
3748 	uinfo = u64_to_user_ptr(attr->info.info);
3749 	uinfo_len = attr->info.info_len;
3750 
3751 	info_copy = min_t(u32, uinfo_len, sizeof(info));
3752 	if (copy_from_user(&info, uinfo, info_copy))
3753 		return -EFAULT;
3754 
3755 	info.id = btf->id;
3756 	ubtf = u64_to_user_ptr(info.btf);
3757 	btf_copy = min_t(u32, btf->data_size, info.btf_size);
3758 	if (copy_to_user(ubtf, btf->data, btf_copy))
3759 		return -EFAULT;
3760 	info.btf_size = btf->data_size;
3761 
3762 	if (copy_to_user(uinfo, &info, info_copy) ||
3763 	    put_user(info_copy, &uattr->info.info_len))
3764 		return -EFAULT;
3765 
3766 	return 0;
3767 }
3768 
3769 int btf_get_fd_by_id(u32 id)
3770 {
3771 	struct btf *btf;
3772 	int fd;
3773 
3774 	rcu_read_lock();
3775 	btf = idr_find(&btf_idr, id);
3776 	if (!btf || !refcount_inc_not_zero(&btf->refcnt))
3777 		btf = ERR_PTR(-ENOENT);
3778 	rcu_read_unlock();
3779 
3780 	if (IS_ERR(btf))
3781 		return PTR_ERR(btf);
3782 
3783 	fd = __btf_new_fd(btf);
3784 	if (fd < 0)
3785 		btf_put(btf);
3786 
3787 	return fd;
3788 }
3789 
3790 u32 btf_id(const struct btf *btf)
3791 {
3792 	return btf->id;
3793 }
3794