xref: /linux/kernel/bpf/btf.c (revision dc0d1c4519095a6c6bbd9ec4a808674aba502741)
1 /* SPDX-License-Identifier: GPL-2.0 */
2 /* Copyright (c) 2018 Facebook */
3 
4 #include <uapi/linux/btf.h>
5 #include <uapi/linux/types.h>
6 #include <linux/seq_file.h>
7 #include <linux/compiler.h>
8 #include <linux/errno.h>
9 #include <linux/slab.h>
10 #include <linux/anon_inodes.h>
11 #include <linux/file.h>
12 #include <linux/uaccess.h>
13 #include <linux/kernel.h>
14 #include <linux/idr.h>
15 #include <linux/sort.h>
16 #include <linux/bpf_verifier.h>
17 #include <linux/btf.h>
18 
19 /* BTF (BPF Type Format) is the meta data format which describes
20  * the data types of BPF program/map.  Hence, it basically focus
21  * on the C programming language which the modern BPF is primary
22  * using.
23  *
24  * ELF Section:
25  * ~~~~~~~~~~~
26  * The BTF data is stored under the ".BTF" ELF section
27  *
28  * struct btf_type:
29  * ~~~~~~~~~~~~~~~
30  * Each 'struct btf_type' object describes a C data type.
31  * Depending on the type it is describing, a 'struct btf_type'
32  * object may be followed by more data.  F.e.
33  * To describe an array, 'struct btf_type' is followed by
34  * 'struct btf_array'.
35  *
36  * 'struct btf_type' and any extra data following it are
37  * 4 bytes aligned.
38  *
39  * Type section:
40  * ~~~~~~~~~~~~~
41  * The BTF type section contains a list of 'struct btf_type' objects.
42  * Each one describes a C type.  Recall from the above section
43  * that a 'struct btf_type' object could be immediately followed by extra
44  * data in order to desribe some particular C types.
45  *
46  * type_id:
47  * ~~~~~~~
48  * Each btf_type object is identified by a type_id.  The type_id
49  * is implicitly implied by the location of the btf_type object in
50  * the BTF type section.  The first one has type_id 1.  The second
51  * one has type_id 2...etc.  Hence, an earlier btf_type has
52  * a smaller type_id.
53  *
54  * A btf_type object may refer to another btf_type object by using
55  * type_id (i.e. the "type" in the "struct btf_type").
56  *
57  * NOTE that we cannot assume any reference-order.
58  * A btf_type object can refer to an earlier btf_type object
59  * but it can also refer to a later btf_type object.
60  *
61  * For example, to describe "const void *".  A btf_type
62  * object describing "const" may refer to another btf_type
63  * object describing "void *".  This type-reference is done
64  * by specifying type_id:
65  *
66  * [1] CONST (anon) type_id=2
67  * [2] PTR (anon) type_id=0
68  *
69  * The above is the btf_verifier debug log:
70  *   - Each line started with "[?]" is a btf_type object
71  *   - [?] is the type_id of the btf_type object.
72  *   - CONST/PTR is the BTF_KIND_XXX
73  *   - "(anon)" is the name of the type.  It just
74  *     happens that CONST and PTR has no name.
75  *   - type_id=XXX is the 'u32 type' in btf_type
76  *
77  * NOTE: "void" has type_id 0
78  *
79  * String section:
80  * ~~~~~~~~~~~~~~
81  * The BTF string section contains the names used by the type section.
82  * Each string is referred by an "offset" from the beginning of the
83  * string section.
84  *
85  * Each string is '\0' terminated.
86  *
87  * The first character in the string section must be '\0'
88  * which is used to mean 'anonymous'. Some btf_type may not
89  * have a name.
90  */
91 
92 /* BTF verification:
93  *
94  * To verify BTF data, two passes are needed.
95  *
96  * Pass #1
97  * ~~~~~~~
98  * The first pass is to collect all btf_type objects to
99  * an array: "btf->types".
100  *
101  * Depending on the C type that a btf_type is describing,
102  * a btf_type may be followed by extra data.  We don't know
103  * how many btf_type is there, and more importantly we don't
104  * know where each btf_type is located in the type section.
105  *
106  * Without knowing the location of each type_id, most verifications
107  * cannot be done.  e.g. an earlier btf_type may refer to a later
108  * btf_type (recall the "const void *" above), so we cannot
109  * check this type-reference in the first pass.
110  *
111  * In the first pass, it still does some verifications (e.g.
112  * checking the name is a valid offset to the string section).
113  *
114  * Pass #2
115  * ~~~~~~~
116  * The main focus is to resolve a btf_type that is referring
117  * to another type.
118  *
119  * We have to ensure the referring type:
120  * 1) does exist in the BTF (i.e. in btf->types[])
121  * 2) does not cause a loop:
122  *	struct A {
123  *		struct B b;
124  *	};
125  *
126  *	struct B {
127  *		struct A a;
128  *	};
129  *
130  * btf_type_needs_resolve() decides if a btf_type needs
131  * to be resolved.
132  *
133  * The needs_resolve type implements the "resolve()" ops which
134  * essentially does a DFS and detects backedge.
135  *
136  * During resolve (or DFS), different C types have different
137  * "RESOLVED" conditions.
138  *
139  * When resolving a BTF_KIND_STRUCT, we need to resolve all its
140  * members because a member is always referring to another
141  * type.  A struct's member can be treated as "RESOLVED" if
142  * it is referring to a BTF_KIND_PTR.  Otherwise, the
143  * following valid C struct would be rejected:
144  *
145  *	struct A {
146  *		int m;
147  *		struct A *a;
148  *	};
149  *
150  * When resolving a BTF_KIND_PTR, it needs to keep resolving if
151  * it is referring to another BTF_KIND_PTR.  Otherwise, we cannot
152  * detect a pointer loop, e.g.:
153  * BTF_KIND_CONST -> BTF_KIND_PTR -> BTF_KIND_CONST -> BTF_KIND_PTR +
154  *                        ^                                         |
155  *                        +-----------------------------------------+
156  *
157  */
158 
159 #define BITS_PER_U64 (sizeof(u64) * BITS_PER_BYTE)
160 #define BITS_PER_BYTE_MASK (BITS_PER_BYTE - 1)
161 #define BITS_PER_BYTE_MASKED(bits) ((bits) & BITS_PER_BYTE_MASK)
162 #define BITS_ROUNDDOWN_BYTES(bits) ((bits) >> 3)
163 #define BITS_ROUNDUP_BYTES(bits) \
164 	(BITS_ROUNDDOWN_BYTES(bits) + !!BITS_PER_BYTE_MASKED(bits))
165 
166 #define BTF_INFO_MASK 0x0f00ffff
167 #define BTF_INT_MASK 0x0fffffff
168 #define BTF_TYPE_ID_VALID(type_id) ((type_id) <= BTF_MAX_TYPE)
169 #define BTF_STR_OFFSET_VALID(name_off) ((name_off) <= BTF_MAX_NAME_OFFSET)
170 
171 /* 16MB for 64k structs and each has 16 members and
172  * a few MB spaces for the string section.
173  * The hard limit is S32_MAX.
174  */
175 #define BTF_MAX_SIZE (16 * 1024 * 1024)
176 
177 #define for_each_member(i, struct_type, member)			\
178 	for (i = 0, member = btf_type_member(struct_type);	\
179 	     i < btf_type_vlen(struct_type);			\
180 	     i++, member++)
181 
182 #define for_each_member_from(i, from, struct_type, member)		\
183 	for (i = from, member = btf_type_member(struct_type) + from;	\
184 	     i < btf_type_vlen(struct_type);				\
185 	     i++, member++)
186 
187 static DEFINE_IDR(btf_idr);
188 static DEFINE_SPINLOCK(btf_idr_lock);
189 
190 struct btf {
191 	void *data;
192 	struct btf_type **types;
193 	u32 *resolved_ids;
194 	u32 *resolved_sizes;
195 	const char *strings;
196 	void *nohdr_data;
197 	struct btf_header hdr;
198 	u32 nr_types;
199 	u32 types_size;
200 	u32 data_size;
201 	refcount_t refcnt;
202 	u32 id;
203 	struct rcu_head rcu;
204 };
205 
206 enum verifier_phase {
207 	CHECK_META,
208 	CHECK_TYPE,
209 };
210 
211 struct resolve_vertex {
212 	const struct btf_type *t;
213 	u32 type_id;
214 	u16 next_member;
215 };
216 
217 enum visit_state {
218 	NOT_VISITED,
219 	VISITED,
220 	RESOLVED,
221 };
222 
223 enum resolve_mode {
224 	RESOLVE_TBD,	/* To Be Determined */
225 	RESOLVE_PTR,	/* Resolving for Pointer */
226 	RESOLVE_STRUCT_OR_ARRAY,	/* Resolving for struct/union
227 					 * or array
228 					 */
229 };
230 
231 #define MAX_RESOLVE_DEPTH 32
232 
233 struct btf_sec_info {
234 	u32 off;
235 	u32 len;
236 };
237 
238 struct btf_verifier_env {
239 	struct btf *btf;
240 	u8 *visit_states;
241 	struct resolve_vertex stack[MAX_RESOLVE_DEPTH];
242 	struct bpf_verifier_log log;
243 	u32 log_type_id;
244 	u32 top_stack;
245 	enum verifier_phase phase;
246 	enum resolve_mode resolve_mode;
247 };
248 
249 static const char * const btf_kind_str[NR_BTF_KINDS] = {
250 	[BTF_KIND_UNKN]		= "UNKNOWN",
251 	[BTF_KIND_INT]		= "INT",
252 	[BTF_KIND_PTR]		= "PTR",
253 	[BTF_KIND_ARRAY]	= "ARRAY",
254 	[BTF_KIND_STRUCT]	= "STRUCT",
255 	[BTF_KIND_UNION]	= "UNION",
256 	[BTF_KIND_ENUM]		= "ENUM",
257 	[BTF_KIND_FWD]		= "FWD",
258 	[BTF_KIND_TYPEDEF]	= "TYPEDEF",
259 	[BTF_KIND_VOLATILE]	= "VOLATILE",
260 	[BTF_KIND_CONST]	= "CONST",
261 	[BTF_KIND_RESTRICT]	= "RESTRICT",
262 };
263 
264 struct btf_kind_operations {
265 	s32 (*check_meta)(struct btf_verifier_env *env,
266 			  const struct btf_type *t,
267 			  u32 meta_left);
268 	int (*resolve)(struct btf_verifier_env *env,
269 		       const struct resolve_vertex *v);
270 	int (*check_member)(struct btf_verifier_env *env,
271 			    const struct btf_type *struct_type,
272 			    const struct btf_member *member,
273 			    const struct btf_type *member_type);
274 	void (*log_details)(struct btf_verifier_env *env,
275 			    const struct btf_type *t);
276 	void (*seq_show)(const struct btf *btf, const struct btf_type *t,
277 			 u32 type_id, void *data, u8 bits_offsets,
278 			 struct seq_file *m);
279 };
280 
281 static const struct btf_kind_operations * const kind_ops[NR_BTF_KINDS];
282 static struct btf_type btf_void;
283 
284 static bool btf_type_is_modifier(const struct btf_type *t)
285 {
286 	/* Some of them is not strictly a C modifier
287 	 * but they are grouped into the same bucket
288 	 * for BTF concern:
289 	 *   A type (t) that refers to another
290 	 *   type through t->type AND its size cannot
291 	 *   be determined without following the t->type.
292 	 *
293 	 * ptr does not fall into this bucket
294 	 * because its size is always sizeof(void *).
295 	 */
296 	switch (BTF_INFO_KIND(t->info)) {
297 	case BTF_KIND_TYPEDEF:
298 	case BTF_KIND_VOLATILE:
299 	case BTF_KIND_CONST:
300 	case BTF_KIND_RESTRICT:
301 		return true;
302 	}
303 
304 	return false;
305 }
306 
307 static bool btf_type_is_void(const struct btf_type *t)
308 {
309 	/* void => no type and size info.
310 	 * Hence, FWD is also treated as void.
311 	 */
312 	return t == &btf_void || BTF_INFO_KIND(t->info) == BTF_KIND_FWD;
313 }
314 
315 static bool btf_type_is_void_or_null(const struct btf_type *t)
316 {
317 	return !t || btf_type_is_void(t);
318 }
319 
320 /* union is only a special case of struct:
321  * all its offsetof(member) == 0
322  */
323 static bool btf_type_is_struct(const struct btf_type *t)
324 {
325 	u8 kind = BTF_INFO_KIND(t->info);
326 
327 	return kind == BTF_KIND_STRUCT || kind == BTF_KIND_UNION;
328 }
329 
330 static bool btf_type_is_array(const struct btf_type *t)
331 {
332 	return BTF_INFO_KIND(t->info) == BTF_KIND_ARRAY;
333 }
334 
335 static bool btf_type_is_ptr(const struct btf_type *t)
336 {
337 	return BTF_INFO_KIND(t->info) == BTF_KIND_PTR;
338 }
339 
340 static bool btf_type_is_int(const struct btf_type *t)
341 {
342 	return BTF_INFO_KIND(t->info) == BTF_KIND_INT;
343 }
344 
345 /* What types need to be resolved?
346  *
347  * btf_type_is_modifier() is an obvious one.
348  *
349  * btf_type_is_struct() because its member refers to
350  * another type (through member->type).
351 
352  * btf_type_is_array() because its element (array->type)
353  * refers to another type.  Array can be thought of a
354  * special case of struct while array just has the same
355  * member-type repeated by array->nelems of times.
356  */
357 static bool btf_type_needs_resolve(const struct btf_type *t)
358 {
359 	return btf_type_is_modifier(t) ||
360 		btf_type_is_ptr(t) ||
361 		btf_type_is_struct(t) ||
362 		btf_type_is_array(t);
363 }
364 
365 /* t->size can be used */
366 static bool btf_type_has_size(const struct btf_type *t)
367 {
368 	switch (BTF_INFO_KIND(t->info)) {
369 	case BTF_KIND_INT:
370 	case BTF_KIND_STRUCT:
371 	case BTF_KIND_UNION:
372 	case BTF_KIND_ENUM:
373 		return true;
374 	}
375 
376 	return false;
377 }
378 
379 static const char *btf_int_encoding_str(u8 encoding)
380 {
381 	if (encoding == 0)
382 		return "(none)";
383 	else if (encoding == BTF_INT_SIGNED)
384 		return "SIGNED";
385 	else if (encoding == BTF_INT_CHAR)
386 		return "CHAR";
387 	else if (encoding == BTF_INT_BOOL)
388 		return "BOOL";
389 	else
390 		return "UNKN";
391 }
392 
393 static u16 btf_type_vlen(const struct btf_type *t)
394 {
395 	return BTF_INFO_VLEN(t->info);
396 }
397 
398 static u32 btf_type_int(const struct btf_type *t)
399 {
400 	return *(u32 *)(t + 1);
401 }
402 
403 static const struct btf_array *btf_type_array(const struct btf_type *t)
404 {
405 	return (const struct btf_array *)(t + 1);
406 }
407 
408 static const struct btf_member *btf_type_member(const struct btf_type *t)
409 {
410 	return (const struct btf_member *)(t + 1);
411 }
412 
413 static const struct btf_enum *btf_type_enum(const struct btf_type *t)
414 {
415 	return (const struct btf_enum *)(t + 1);
416 }
417 
418 static const struct btf_kind_operations *btf_type_ops(const struct btf_type *t)
419 {
420 	return kind_ops[BTF_INFO_KIND(t->info)];
421 }
422 
423 static bool btf_name_offset_valid(const struct btf *btf, u32 offset)
424 {
425 	return BTF_STR_OFFSET_VALID(offset) &&
426 		offset < btf->hdr.str_len;
427 }
428 
429 static const char *btf_name_by_offset(const struct btf *btf, u32 offset)
430 {
431 	if (!offset)
432 		return "(anon)";
433 	else if (offset < btf->hdr.str_len)
434 		return &btf->strings[offset];
435 	else
436 		return "(invalid-name-offset)";
437 }
438 
439 static const struct btf_type *btf_type_by_id(const struct btf *btf, u32 type_id)
440 {
441 	if (type_id > btf->nr_types)
442 		return NULL;
443 
444 	return btf->types[type_id];
445 }
446 
447 /*
448  * Regular int is not a bit field and it must be either
449  * u8/u16/u32/u64.
450  */
451 static bool btf_type_int_is_regular(const struct btf_type *t)
452 {
453 	u8 nr_bits, nr_bytes;
454 	u32 int_data;
455 
456 	int_data = btf_type_int(t);
457 	nr_bits = BTF_INT_BITS(int_data);
458 	nr_bytes = BITS_ROUNDUP_BYTES(nr_bits);
459 	if (BITS_PER_BYTE_MASKED(nr_bits) ||
460 	    BTF_INT_OFFSET(int_data) ||
461 	    (nr_bytes != sizeof(u8) && nr_bytes != sizeof(u16) &&
462 	     nr_bytes != sizeof(u32) && nr_bytes != sizeof(u64))) {
463 		return false;
464 	}
465 
466 	return true;
467 }
468 
469 __printf(2, 3) static void __btf_verifier_log(struct bpf_verifier_log *log,
470 					      const char *fmt, ...)
471 {
472 	va_list args;
473 
474 	va_start(args, fmt);
475 	bpf_verifier_vlog(log, fmt, args);
476 	va_end(args);
477 }
478 
479 __printf(2, 3) static void btf_verifier_log(struct btf_verifier_env *env,
480 					    const char *fmt, ...)
481 {
482 	struct bpf_verifier_log *log = &env->log;
483 	va_list args;
484 
485 	if (!bpf_verifier_log_needed(log))
486 		return;
487 
488 	va_start(args, fmt);
489 	bpf_verifier_vlog(log, fmt, args);
490 	va_end(args);
491 }
492 
493 __printf(4, 5) static void __btf_verifier_log_type(struct btf_verifier_env *env,
494 						   const struct btf_type *t,
495 						   bool log_details,
496 						   const char *fmt, ...)
497 {
498 	struct bpf_verifier_log *log = &env->log;
499 	u8 kind = BTF_INFO_KIND(t->info);
500 	struct btf *btf = env->btf;
501 	va_list args;
502 
503 	if (!bpf_verifier_log_needed(log))
504 		return;
505 
506 	__btf_verifier_log(log, "[%u] %s %s%s",
507 			   env->log_type_id,
508 			   btf_kind_str[kind],
509 			   btf_name_by_offset(btf, t->name_off),
510 			   log_details ? " " : "");
511 
512 	if (log_details)
513 		btf_type_ops(t)->log_details(env, t);
514 
515 	if (fmt && *fmt) {
516 		__btf_verifier_log(log, " ");
517 		va_start(args, fmt);
518 		bpf_verifier_vlog(log, fmt, args);
519 		va_end(args);
520 	}
521 
522 	__btf_verifier_log(log, "\n");
523 }
524 
525 #define btf_verifier_log_type(env, t, ...) \
526 	__btf_verifier_log_type((env), (t), true, __VA_ARGS__)
527 #define btf_verifier_log_basic(env, t, ...) \
528 	__btf_verifier_log_type((env), (t), false, __VA_ARGS__)
529 
530 __printf(4, 5)
531 static void btf_verifier_log_member(struct btf_verifier_env *env,
532 				    const struct btf_type *struct_type,
533 				    const struct btf_member *member,
534 				    const char *fmt, ...)
535 {
536 	struct bpf_verifier_log *log = &env->log;
537 	struct btf *btf = env->btf;
538 	va_list args;
539 
540 	if (!bpf_verifier_log_needed(log))
541 		return;
542 
543 	/* The CHECK_META phase already did a btf dump.
544 	 *
545 	 * If member is logged again, it must hit an error in
546 	 * parsing this member.  It is useful to print out which
547 	 * struct this member belongs to.
548 	 */
549 	if (env->phase != CHECK_META)
550 		btf_verifier_log_type(env, struct_type, NULL);
551 
552 	__btf_verifier_log(log, "\t%s type_id=%u bits_offset=%u",
553 			   btf_name_by_offset(btf, member->name_off),
554 			   member->type, member->offset);
555 
556 	if (fmt && *fmt) {
557 		__btf_verifier_log(log, " ");
558 		va_start(args, fmt);
559 		bpf_verifier_vlog(log, fmt, args);
560 		va_end(args);
561 	}
562 
563 	__btf_verifier_log(log, "\n");
564 }
565 
566 static void btf_verifier_log_hdr(struct btf_verifier_env *env,
567 				 u32 btf_data_size)
568 {
569 	struct bpf_verifier_log *log = &env->log;
570 	const struct btf *btf = env->btf;
571 	const struct btf_header *hdr;
572 
573 	if (!bpf_verifier_log_needed(log))
574 		return;
575 
576 	hdr = &btf->hdr;
577 	__btf_verifier_log(log, "magic: 0x%x\n", hdr->magic);
578 	__btf_verifier_log(log, "version: %u\n", hdr->version);
579 	__btf_verifier_log(log, "flags: 0x%x\n", hdr->flags);
580 	__btf_verifier_log(log, "hdr_len: %u\n", hdr->hdr_len);
581 	__btf_verifier_log(log, "type_off: %u\n", hdr->type_off);
582 	__btf_verifier_log(log, "type_len: %u\n", hdr->type_len);
583 	__btf_verifier_log(log, "str_off: %u\n", hdr->str_off);
584 	__btf_verifier_log(log, "str_len: %u\n", hdr->str_len);
585 	__btf_verifier_log(log, "btf_total_size: %u\n", btf_data_size);
586 }
587 
588 static int btf_add_type(struct btf_verifier_env *env, struct btf_type *t)
589 {
590 	struct btf *btf = env->btf;
591 
592 	/* < 2 because +1 for btf_void which is always in btf->types[0].
593 	 * btf_void is not accounted in btf->nr_types because btf_void
594 	 * does not come from the BTF file.
595 	 */
596 	if (btf->types_size - btf->nr_types < 2) {
597 		/* Expand 'types' array */
598 
599 		struct btf_type **new_types;
600 		u32 expand_by, new_size;
601 
602 		if (btf->types_size == BTF_MAX_TYPE) {
603 			btf_verifier_log(env, "Exceeded max num of types");
604 			return -E2BIG;
605 		}
606 
607 		expand_by = max_t(u32, btf->types_size >> 2, 16);
608 		new_size = min_t(u32, BTF_MAX_TYPE,
609 				 btf->types_size + expand_by);
610 
611 		new_types = kvcalloc(new_size, sizeof(*new_types),
612 				     GFP_KERNEL | __GFP_NOWARN);
613 		if (!new_types)
614 			return -ENOMEM;
615 
616 		if (btf->nr_types == 0)
617 			new_types[0] = &btf_void;
618 		else
619 			memcpy(new_types, btf->types,
620 			       sizeof(*btf->types) * (btf->nr_types + 1));
621 
622 		kvfree(btf->types);
623 		btf->types = new_types;
624 		btf->types_size = new_size;
625 	}
626 
627 	btf->types[++(btf->nr_types)] = t;
628 
629 	return 0;
630 }
631 
632 static int btf_alloc_id(struct btf *btf)
633 {
634 	int id;
635 
636 	idr_preload(GFP_KERNEL);
637 	spin_lock_bh(&btf_idr_lock);
638 	id = idr_alloc_cyclic(&btf_idr, btf, 1, INT_MAX, GFP_ATOMIC);
639 	if (id > 0)
640 		btf->id = id;
641 	spin_unlock_bh(&btf_idr_lock);
642 	idr_preload_end();
643 
644 	if (WARN_ON_ONCE(!id))
645 		return -ENOSPC;
646 
647 	return id > 0 ? 0 : id;
648 }
649 
650 static void btf_free_id(struct btf *btf)
651 {
652 	unsigned long flags;
653 
654 	/*
655 	 * In map-in-map, calling map_delete_elem() on outer
656 	 * map will call bpf_map_put on the inner map.
657 	 * It will then eventually call btf_free_id()
658 	 * on the inner map.  Some of the map_delete_elem()
659 	 * implementation may have irq disabled, so
660 	 * we need to use the _irqsave() version instead
661 	 * of the _bh() version.
662 	 */
663 	spin_lock_irqsave(&btf_idr_lock, flags);
664 	idr_remove(&btf_idr, btf->id);
665 	spin_unlock_irqrestore(&btf_idr_lock, flags);
666 }
667 
668 static void btf_free(struct btf *btf)
669 {
670 	kvfree(btf->types);
671 	kvfree(btf->resolved_sizes);
672 	kvfree(btf->resolved_ids);
673 	kvfree(btf->data);
674 	kfree(btf);
675 }
676 
677 static void btf_free_rcu(struct rcu_head *rcu)
678 {
679 	struct btf *btf = container_of(rcu, struct btf, rcu);
680 
681 	btf_free(btf);
682 }
683 
684 void btf_put(struct btf *btf)
685 {
686 	if (btf && refcount_dec_and_test(&btf->refcnt)) {
687 		btf_free_id(btf);
688 		call_rcu(&btf->rcu, btf_free_rcu);
689 	}
690 }
691 
692 static int env_resolve_init(struct btf_verifier_env *env)
693 {
694 	struct btf *btf = env->btf;
695 	u32 nr_types = btf->nr_types;
696 	u32 *resolved_sizes = NULL;
697 	u32 *resolved_ids = NULL;
698 	u8 *visit_states = NULL;
699 
700 	/* +1 for btf_void */
701 	resolved_sizes = kvcalloc(nr_types + 1, sizeof(*resolved_sizes),
702 				  GFP_KERNEL | __GFP_NOWARN);
703 	if (!resolved_sizes)
704 		goto nomem;
705 
706 	resolved_ids = kvcalloc(nr_types + 1, sizeof(*resolved_ids),
707 				GFP_KERNEL | __GFP_NOWARN);
708 	if (!resolved_ids)
709 		goto nomem;
710 
711 	visit_states = kvcalloc(nr_types + 1, sizeof(*visit_states),
712 				GFP_KERNEL | __GFP_NOWARN);
713 	if (!visit_states)
714 		goto nomem;
715 
716 	btf->resolved_sizes = resolved_sizes;
717 	btf->resolved_ids = resolved_ids;
718 	env->visit_states = visit_states;
719 
720 	return 0;
721 
722 nomem:
723 	kvfree(resolved_sizes);
724 	kvfree(resolved_ids);
725 	kvfree(visit_states);
726 	return -ENOMEM;
727 }
728 
729 static void btf_verifier_env_free(struct btf_verifier_env *env)
730 {
731 	kvfree(env->visit_states);
732 	kfree(env);
733 }
734 
735 static bool env_type_is_resolve_sink(const struct btf_verifier_env *env,
736 				     const struct btf_type *next_type)
737 {
738 	switch (env->resolve_mode) {
739 	case RESOLVE_TBD:
740 		/* int, enum or void is a sink */
741 		return !btf_type_needs_resolve(next_type);
742 	case RESOLVE_PTR:
743 		/* int, enum, void, struct or array is a sink for ptr */
744 		return !btf_type_is_modifier(next_type) &&
745 			!btf_type_is_ptr(next_type);
746 	case RESOLVE_STRUCT_OR_ARRAY:
747 		/* int, enum, void or ptr is a sink for struct and array */
748 		return !btf_type_is_modifier(next_type) &&
749 			!btf_type_is_array(next_type) &&
750 			!btf_type_is_struct(next_type);
751 	default:
752 		BUG();
753 	}
754 }
755 
756 static bool env_type_is_resolved(const struct btf_verifier_env *env,
757 				 u32 type_id)
758 {
759 	return env->visit_states[type_id] == RESOLVED;
760 }
761 
762 static int env_stack_push(struct btf_verifier_env *env,
763 			  const struct btf_type *t, u32 type_id)
764 {
765 	struct resolve_vertex *v;
766 
767 	if (env->top_stack == MAX_RESOLVE_DEPTH)
768 		return -E2BIG;
769 
770 	if (env->visit_states[type_id] != NOT_VISITED)
771 		return -EEXIST;
772 
773 	env->visit_states[type_id] = VISITED;
774 
775 	v = &env->stack[env->top_stack++];
776 	v->t = t;
777 	v->type_id = type_id;
778 	v->next_member = 0;
779 
780 	if (env->resolve_mode == RESOLVE_TBD) {
781 		if (btf_type_is_ptr(t))
782 			env->resolve_mode = RESOLVE_PTR;
783 		else if (btf_type_is_struct(t) || btf_type_is_array(t))
784 			env->resolve_mode = RESOLVE_STRUCT_OR_ARRAY;
785 	}
786 
787 	return 0;
788 }
789 
790 static void env_stack_set_next_member(struct btf_verifier_env *env,
791 				      u16 next_member)
792 {
793 	env->stack[env->top_stack - 1].next_member = next_member;
794 }
795 
796 static void env_stack_pop_resolved(struct btf_verifier_env *env,
797 				   u32 resolved_type_id,
798 				   u32 resolved_size)
799 {
800 	u32 type_id = env->stack[--(env->top_stack)].type_id;
801 	struct btf *btf = env->btf;
802 
803 	btf->resolved_sizes[type_id] = resolved_size;
804 	btf->resolved_ids[type_id] = resolved_type_id;
805 	env->visit_states[type_id] = RESOLVED;
806 }
807 
808 static const struct resolve_vertex *env_stack_peak(struct btf_verifier_env *env)
809 {
810 	return env->top_stack ? &env->stack[env->top_stack - 1] : NULL;
811 }
812 
813 /* The input param "type_id" must point to a needs_resolve type */
814 static const struct btf_type *btf_type_id_resolve(const struct btf *btf,
815 						  u32 *type_id)
816 {
817 	*type_id = btf->resolved_ids[*type_id];
818 	return btf_type_by_id(btf, *type_id);
819 }
820 
821 const struct btf_type *btf_type_id_size(const struct btf *btf,
822 					u32 *type_id, u32 *ret_size)
823 {
824 	const struct btf_type *size_type;
825 	u32 size_type_id = *type_id;
826 	u32 size = 0;
827 
828 	size_type = btf_type_by_id(btf, size_type_id);
829 	if (btf_type_is_void_or_null(size_type))
830 		return NULL;
831 
832 	if (btf_type_has_size(size_type)) {
833 		size = size_type->size;
834 	} else if (btf_type_is_array(size_type)) {
835 		size = btf->resolved_sizes[size_type_id];
836 	} else if (btf_type_is_ptr(size_type)) {
837 		size = sizeof(void *);
838 	} else {
839 		if (WARN_ON_ONCE(!btf_type_is_modifier(size_type)))
840 			return NULL;
841 
842 		size = btf->resolved_sizes[size_type_id];
843 		size_type_id = btf->resolved_ids[size_type_id];
844 		size_type = btf_type_by_id(btf, size_type_id);
845 		if (btf_type_is_void(size_type))
846 			return NULL;
847 	}
848 
849 	*type_id = size_type_id;
850 	if (ret_size)
851 		*ret_size = size;
852 
853 	return size_type;
854 }
855 
856 static int btf_df_check_member(struct btf_verifier_env *env,
857 			       const struct btf_type *struct_type,
858 			       const struct btf_member *member,
859 			       const struct btf_type *member_type)
860 {
861 	btf_verifier_log_basic(env, struct_type,
862 			       "Unsupported check_member");
863 	return -EINVAL;
864 }
865 
866 static int btf_df_resolve(struct btf_verifier_env *env,
867 			  const struct resolve_vertex *v)
868 {
869 	btf_verifier_log_basic(env, v->t, "Unsupported resolve");
870 	return -EINVAL;
871 }
872 
873 static void btf_df_seq_show(const struct btf *btf, const struct btf_type *t,
874 			    u32 type_id, void *data, u8 bits_offsets,
875 			    struct seq_file *m)
876 {
877 	seq_printf(m, "<unsupported kind:%u>", BTF_INFO_KIND(t->info));
878 }
879 
880 static int btf_int_check_member(struct btf_verifier_env *env,
881 				const struct btf_type *struct_type,
882 				const struct btf_member *member,
883 				const struct btf_type *member_type)
884 {
885 	u32 int_data = btf_type_int(member_type);
886 	u32 struct_bits_off = member->offset;
887 	u32 struct_size = struct_type->size;
888 	u32 nr_copy_bits;
889 	u32 bytes_offset;
890 
891 	if (U32_MAX - struct_bits_off < BTF_INT_OFFSET(int_data)) {
892 		btf_verifier_log_member(env, struct_type, member,
893 					"bits_offset exceeds U32_MAX");
894 		return -EINVAL;
895 	}
896 
897 	struct_bits_off += BTF_INT_OFFSET(int_data);
898 	bytes_offset = BITS_ROUNDDOWN_BYTES(struct_bits_off);
899 	nr_copy_bits = BTF_INT_BITS(int_data) +
900 		BITS_PER_BYTE_MASKED(struct_bits_off);
901 
902 	if (nr_copy_bits > BITS_PER_U64) {
903 		btf_verifier_log_member(env, struct_type, member,
904 					"nr_copy_bits exceeds 64");
905 		return -EINVAL;
906 	}
907 
908 	if (struct_size < bytes_offset ||
909 	    struct_size - bytes_offset < BITS_ROUNDUP_BYTES(nr_copy_bits)) {
910 		btf_verifier_log_member(env, struct_type, member,
911 					"Member exceeds struct_size");
912 		return -EINVAL;
913 	}
914 
915 	return 0;
916 }
917 
918 static s32 btf_int_check_meta(struct btf_verifier_env *env,
919 			      const struct btf_type *t,
920 			      u32 meta_left)
921 {
922 	u32 int_data, nr_bits, meta_needed = sizeof(int_data);
923 	u16 encoding;
924 
925 	if (meta_left < meta_needed) {
926 		btf_verifier_log_basic(env, t,
927 				       "meta_left:%u meta_needed:%u",
928 				       meta_left, meta_needed);
929 		return -EINVAL;
930 	}
931 
932 	if (btf_type_vlen(t)) {
933 		btf_verifier_log_type(env, t, "vlen != 0");
934 		return -EINVAL;
935 	}
936 
937 	int_data = btf_type_int(t);
938 	if (int_data & ~BTF_INT_MASK) {
939 		btf_verifier_log_basic(env, t, "Invalid int_data:%x",
940 				       int_data);
941 		return -EINVAL;
942 	}
943 
944 	nr_bits = BTF_INT_BITS(int_data) + BTF_INT_OFFSET(int_data);
945 
946 	if (nr_bits > BITS_PER_U64) {
947 		btf_verifier_log_type(env, t, "nr_bits exceeds %zu",
948 				      BITS_PER_U64);
949 		return -EINVAL;
950 	}
951 
952 	if (BITS_ROUNDUP_BYTES(nr_bits) > t->size) {
953 		btf_verifier_log_type(env, t, "nr_bits exceeds type_size");
954 		return -EINVAL;
955 	}
956 
957 	/*
958 	 * Only one of the encoding bits is allowed and it
959 	 * should be sufficient for the pretty print purpose (i.e. decoding).
960 	 * Multiple bits can be allowed later if it is found
961 	 * to be insufficient.
962 	 */
963 	encoding = BTF_INT_ENCODING(int_data);
964 	if (encoding &&
965 	    encoding != BTF_INT_SIGNED &&
966 	    encoding != BTF_INT_CHAR &&
967 	    encoding != BTF_INT_BOOL) {
968 		btf_verifier_log_type(env, t, "Unsupported encoding");
969 		return -ENOTSUPP;
970 	}
971 
972 	btf_verifier_log_type(env, t, NULL);
973 
974 	return meta_needed;
975 }
976 
977 static void btf_int_log(struct btf_verifier_env *env,
978 			const struct btf_type *t)
979 {
980 	int int_data = btf_type_int(t);
981 
982 	btf_verifier_log(env,
983 			 "size=%u bits_offset=%u nr_bits=%u encoding=%s",
984 			 t->size, BTF_INT_OFFSET(int_data),
985 			 BTF_INT_BITS(int_data),
986 			 btf_int_encoding_str(BTF_INT_ENCODING(int_data)));
987 }
988 
989 static void btf_int_bits_seq_show(const struct btf *btf,
990 				  const struct btf_type *t,
991 				  void *data, u8 bits_offset,
992 				  struct seq_file *m)
993 {
994 	u16 left_shift_bits, right_shift_bits;
995 	u32 int_data = btf_type_int(t);
996 	u8 nr_bits = BTF_INT_BITS(int_data);
997 	u8 total_bits_offset;
998 	u8 nr_copy_bytes;
999 	u8 nr_copy_bits;
1000 	u64 print_num;
1001 
1002 	/*
1003 	 * bits_offset is at most 7.
1004 	 * BTF_INT_OFFSET() cannot exceed 64 bits.
1005 	 */
1006 	total_bits_offset = bits_offset + BTF_INT_OFFSET(int_data);
1007 	data += BITS_ROUNDDOWN_BYTES(total_bits_offset);
1008 	bits_offset = BITS_PER_BYTE_MASKED(total_bits_offset);
1009 	nr_copy_bits = nr_bits + bits_offset;
1010 	nr_copy_bytes = BITS_ROUNDUP_BYTES(nr_copy_bits);
1011 
1012 	print_num = 0;
1013 	memcpy(&print_num, data, nr_copy_bytes);
1014 
1015 #ifdef __BIG_ENDIAN_BITFIELD
1016 	left_shift_bits = bits_offset;
1017 #else
1018 	left_shift_bits = BITS_PER_U64 - nr_copy_bits;
1019 #endif
1020 	right_shift_bits = BITS_PER_U64 - nr_bits;
1021 
1022 	print_num <<= left_shift_bits;
1023 	print_num >>= right_shift_bits;
1024 
1025 	seq_printf(m, "0x%llx", print_num);
1026 }
1027 
1028 static void btf_int_seq_show(const struct btf *btf, const struct btf_type *t,
1029 			     u32 type_id, void *data, u8 bits_offset,
1030 			     struct seq_file *m)
1031 {
1032 	u32 int_data = btf_type_int(t);
1033 	u8 encoding = BTF_INT_ENCODING(int_data);
1034 	bool sign = encoding & BTF_INT_SIGNED;
1035 	u8 nr_bits = BTF_INT_BITS(int_data);
1036 
1037 	if (bits_offset || BTF_INT_OFFSET(int_data) ||
1038 	    BITS_PER_BYTE_MASKED(nr_bits)) {
1039 		btf_int_bits_seq_show(btf, t, data, bits_offset, m);
1040 		return;
1041 	}
1042 
1043 	switch (nr_bits) {
1044 	case 64:
1045 		if (sign)
1046 			seq_printf(m, "%lld", *(s64 *)data);
1047 		else
1048 			seq_printf(m, "%llu", *(u64 *)data);
1049 		break;
1050 	case 32:
1051 		if (sign)
1052 			seq_printf(m, "%d", *(s32 *)data);
1053 		else
1054 			seq_printf(m, "%u", *(u32 *)data);
1055 		break;
1056 	case 16:
1057 		if (sign)
1058 			seq_printf(m, "%d", *(s16 *)data);
1059 		else
1060 			seq_printf(m, "%u", *(u16 *)data);
1061 		break;
1062 	case 8:
1063 		if (sign)
1064 			seq_printf(m, "%d", *(s8 *)data);
1065 		else
1066 			seq_printf(m, "%u", *(u8 *)data);
1067 		break;
1068 	default:
1069 		btf_int_bits_seq_show(btf, t, data, bits_offset, m);
1070 	}
1071 }
1072 
1073 static const struct btf_kind_operations int_ops = {
1074 	.check_meta = btf_int_check_meta,
1075 	.resolve = btf_df_resolve,
1076 	.check_member = btf_int_check_member,
1077 	.log_details = btf_int_log,
1078 	.seq_show = btf_int_seq_show,
1079 };
1080 
1081 static int btf_modifier_check_member(struct btf_verifier_env *env,
1082 				     const struct btf_type *struct_type,
1083 				     const struct btf_member *member,
1084 				     const struct btf_type *member_type)
1085 {
1086 	const struct btf_type *resolved_type;
1087 	u32 resolved_type_id = member->type;
1088 	struct btf_member resolved_member;
1089 	struct btf *btf = env->btf;
1090 
1091 	resolved_type = btf_type_id_size(btf, &resolved_type_id, NULL);
1092 	if (!resolved_type) {
1093 		btf_verifier_log_member(env, struct_type, member,
1094 					"Invalid member");
1095 		return -EINVAL;
1096 	}
1097 
1098 	resolved_member = *member;
1099 	resolved_member.type = resolved_type_id;
1100 
1101 	return btf_type_ops(resolved_type)->check_member(env, struct_type,
1102 							 &resolved_member,
1103 							 resolved_type);
1104 }
1105 
1106 static int btf_ptr_check_member(struct btf_verifier_env *env,
1107 				const struct btf_type *struct_type,
1108 				const struct btf_member *member,
1109 				const struct btf_type *member_type)
1110 {
1111 	u32 struct_size, struct_bits_off, bytes_offset;
1112 
1113 	struct_size = struct_type->size;
1114 	struct_bits_off = member->offset;
1115 	bytes_offset = BITS_ROUNDDOWN_BYTES(struct_bits_off);
1116 
1117 	if (BITS_PER_BYTE_MASKED(struct_bits_off)) {
1118 		btf_verifier_log_member(env, struct_type, member,
1119 					"Member is not byte aligned");
1120 		return -EINVAL;
1121 	}
1122 
1123 	if (struct_size - bytes_offset < sizeof(void *)) {
1124 		btf_verifier_log_member(env, struct_type, member,
1125 					"Member exceeds struct_size");
1126 		return -EINVAL;
1127 	}
1128 
1129 	return 0;
1130 }
1131 
1132 static int btf_ref_type_check_meta(struct btf_verifier_env *env,
1133 				   const struct btf_type *t,
1134 				   u32 meta_left)
1135 {
1136 	if (btf_type_vlen(t)) {
1137 		btf_verifier_log_type(env, t, "vlen != 0");
1138 		return -EINVAL;
1139 	}
1140 
1141 	if (!BTF_TYPE_ID_VALID(t->type)) {
1142 		btf_verifier_log_type(env, t, "Invalid type_id");
1143 		return -EINVAL;
1144 	}
1145 
1146 	btf_verifier_log_type(env, t, NULL);
1147 
1148 	return 0;
1149 }
1150 
1151 static int btf_modifier_resolve(struct btf_verifier_env *env,
1152 				const struct resolve_vertex *v)
1153 {
1154 	const struct btf_type *t = v->t;
1155 	const struct btf_type *next_type;
1156 	u32 next_type_id = t->type;
1157 	struct btf *btf = env->btf;
1158 	u32 next_type_size = 0;
1159 
1160 	next_type = btf_type_by_id(btf, next_type_id);
1161 	if (!next_type) {
1162 		btf_verifier_log_type(env, v->t, "Invalid type_id");
1163 		return -EINVAL;
1164 	}
1165 
1166 	/* "typedef void new_void", "const void"...etc */
1167 	if (btf_type_is_void(next_type))
1168 		goto resolved;
1169 
1170 	if (!env_type_is_resolve_sink(env, next_type) &&
1171 	    !env_type_is_resolved(env, next_type_id))
1172 		return env_stack_push(env, next_type, next_type_id);
1173 
1174 	/* Figure out the resolved next_type_id with size.
1175 	 * They will be stored in the current modifier's
1176 	 * resolved_ids and resolved_sizes such that it can
1177 	 * save us a few type-following when we use it later (e.g. in
1178 	 * pretty print).
1179 	 */
1180 	if (!btf_type_id_size(btf, &next_type_id, &next_type_size) &&
1181 	    !btf_type_is_void(btf_type_id_resolve(btf, &next_type_id))) {
1182 		btf_verifier_log_type(env, v->t, "Invalid type_id");
1183 		return -EINVAL;
1184 	}
1185 
1186 resolved:
1187 	env_stack_pop_resolved(env, next_type_id, next_type_size);
1188 
1189 	return 0;
1190 }
1191 
1192 static int btf_ptr_resolve(struct btf_verifier_env *env,
1193 			   const struct resolve_vertex *v)
1194 {
1195 	const struct btf_type *next_type;
1196 	const struct btf_type *t = v->t;
1197 	u32 next_type_id = t->type;
1198 	struct btf *btf = env->btf;
1199 	u32 next_type_size = 0;
1200 
1201 	next_type = btf_type_by_id(btf, next_type_id);
1202 	if (!next_type) {
1203 		btf_verifier_log_type(env, v->t, "Invalid type_id");
1204 		return -EINVAL;
1205 	}
1206 
1207 	/* "void *" */
1208 	if (btf_type_is_void(next_type))
1209 		goto resolved;
1210 
1211 	if (!env_type_is_resolve_sink(env, next_type) &&
1212 	    !env_type_is_resolved(env, next_type_id))
1213 		return env_stack_push(env, next_type, next_type_id);
1214 
1215 	/* If the modifier was RESOLVED during RESOLVE_STRUCT_OR_ARRAY,
1216 	 * the modifier may have stopped resolving when it was resolved
1217 	 * to a ptr (last-resolved-ptr).
1218 	 *
1219 	 * We now need to continue from the last-resolved-ptr to
1220 	 * ensure the last-resolved-ptr will not referring back to
1221 	 * the currenct ptr (t).
1222 	 */
1223 	if (btf_type_is_modifier(next_type)) {
1224 		const struct btf_type *resolved_type;
1225 		u32 resolved_type_id;
1226 
1227 		resolved_type_id = next_type_id;
1228 		resolved_type = btf_type_id_resolve(btf, &resolved_type_id);
1229 
1230 		if (btf_type_is_ptr(resolved_type) &&
1231 		    !env_type_is_resolve_sink(env, resolved_type) &&
1232 		    !env_type_is_resolved(env, resolved_type_id))
1233 			return env_stack_push(env, resolved_type,
1234 					      resolved_type_id);
1235 	}
1236 
1237 	if (!btf_type_id_size(btf, &next_type_id, &next_type_size) &&
1238 	    !btf_type_is_void(btf_type_id_resolve(btf, &next_type_id))) {
1239 		btf_verifier_log_type(env, v->t, "Invalid type_id");
1240 		return -EINVAL;
1241 	}
1242 
1243 resolved:
1244 	env_stack_pop_resolved(env, next_type_id, 0);
1245 
1246 	return 0;
1247 }
1248 
1249 static void btf_modifier_seq_show(const struct btf *btf,
1250 				  const struct btf_type *t,
1251 				  u32 type_id, void *data,
1252 				  u8 bits_offset, struct seq_file *m)
1253 {
1254 	t = btf_type_id_resolve(btf, &type_id);
1255 
1256 	btf_type_ops(t)->seq_show(btf, t, type_id, data, bits_offset, m);
1257 }
1258 
1259 static void btf_ptr_seq_show(const struct btf *btf, const struct btf_type *t,
1260 			     u32 type_id, void *data, u8 bits_offset,
1261 			     struct seq_file *m)
1262 {
1263 	/* It is a hashed value */
1264 	seq_printf(m, "%p", *(void **)data);
1265 }
1266 
1267 static void btf_ref_type_log(struct btf_verifier_env *env,
1268 			     const struct btf_type *t)
1269 {
1270 	btf_verifier_log(env, "type_id=%u", t->type);
1271 }
1272 
1273 static struct btf_kind_operations modifier_ops = {
1274 	.check_meta = btf_ref_type_check_meta,
1275 	.resolve = btf_modifier_resolve,
1276 	.check_member = btf_modifier_check_member,
1277 	.log_details = btf_ref_type_log,
1278 	.seq_show = btf_modifier_seq_show,
1279 };
1280 
1281 static struct btf_kind_operations ptr_ops = {
1282 	.check_meta = btf_ref_type_check_meta,
1283 	.resolve = btf_ptr_resolve,
1284 	.check_member = btf_ptr_check_member,
1285 	.log_details = btf_ref_type_log,
1286 	.seq_show = btf_ptr_seq_show,
1287 };
1288 
1289 static s32 btf_fwd_check_meta(struct btf_verifier_env *env,
1290 			      const struct btf_type *t,
1291 			      u32 meta_left)
1292 {
1293 	if (btf_type_vlen(t)) {
1294 		btf_verifier_log_type(env, t, "vlen != 0");
1295 		return -EINVAL;
1296 	}
1297 
1298 	if (t->type) {
1299 		btf_verifier_log_type(env, t, "type != 0");
1300 		return -EINVAL;
1301 	}
1302 
1303 	btf_verifier_log_type(env, t, NULL);
1304 
1305 	return 0;
1306 }
1307 
1308 static struct btf_kind_operations fwd_ops = {
1309 	.check_meta = btf_fwd_check_meta,
1310 	.resolve = btf_df_resolve,
1311 	.check_member = btf_df_check_member,
1312 	.log_details = btf_ref_type_log,
1313 	.seq_show = btf_df_seq_show,
1314 };
1315 
1316 static int btf_array_check_member(struct btf_verifier_env *env,
1317 				  const struct btf_type *struct_type,
1318 				  const struct btf_member *member,
1319 				  const struct btf_type *member_type)
1320 {
1321 	u32 struct_bits_off = member->offset;
1322 	u32 struct_size, bytes_offset;
1323 	u32 array_type_id, array_size;
1324 	struct btf *btf = env->btf;
1325 
1326 	if (BITS_PER_BYTE_MASKED(struct_bits_off)) {
1327 		btf_verifier_log_member(env, struct_type, member,
1328 					"Member is not byte aligned");
1329 		return -EINVAL;
1330 	}
1331 
1332 	array_type_id = member->type;
1333 	btf_type_id_size(btf, &array_type_id, &array_size);
1334 	struct_size = struct_type->size;
1335 	bytes_offset = BITS_ROUNDDOWN_BYTES(struct_bits_off);
1336 	if (struct_size - bytes_offset < array_size) {
1337 		btf_verifier_log_member(env, struct_type, member,
1338 					"Member exceeds struct_size");
1339 		return -EINVAL;
1340 	}
1341 
1342 	return 0;
1343 }
1344 
1345 static s32 btf_array_check_meta(struct btf_verifier_env *env,
1346 				const struct btf_type *t,
1347 				u32 meta_left)
1348 {
1349 	const struct btf_array *array = btf_type_array(t);
1350 	u32 meta_needed = sizeof(*array);
1351 
1352 	if (meta_left < meta_needed) {
1353 		btf_verifier_log_basic(env, t,
1354 				       "meta_left:%u meta_needed:%u",
1355 				       meta_left, meta_needed);
1356 		return -EINVAL;
1357 	}
1358 
1359 	if (btf_type_vlen(t)) {
1360 		btf_verifier_log_type(env, t, "vlen != 0");
1361 		return -EINVAL;
1362 	}
1363 
1364 	if (t->size) {
1365 		btf_verifier_log_type(env, t, "size != 0");
1366 		return -EINVAL;
1367 	}
1368 
1369 	/* Array elem type and index type cannot be in type void,
1370 	 * so !array->type and !array->index_type are not allowed.
1371 	 */
1372 	if (!array->type || !BTF_TYPE_ID_VALID(array->type)) {
1373 		btf_verifier_log_type(env, t, "Invalid elem");
1374 		return -EINVAL;
1375 	}
1376 
1377 	if (!array->index_type || !BTF_TYPE_ID_VALID(array->index_type)) {
1378 		btf_verifier_log_type(env, t, "Invalid index");
1379 		return -EINVAL;
1380 	}
1381 
1382 	btf_verifier_log_type(env, t, NULL);
1383 
1384 	return meta_needed;
1385 }
1386 
1387 static int btf_array_resolve(struct btf_verifier_env *env,
1388 			     const struct resolve_vertex *v)
1389 {
1390 	const struct btf_array *array = btf_type_array(v->t);
1391 	const struct btf_type *elem_type, *index_type;
1392 	u32 elem_type_id, index_type_id;
1393 	struct btf *btf = env->btf;
1394 	u32 elem_size;
1395 
1396 	/* Check array->index_type */
1397 	index_type_id = array->index_type;
1398 	index_type = btf_type_by_id(btf, index_type_id);
1399 	if (btf_type_is_void_or_null(index_type)) {
1400 		btf_verifier_log_type(env, v->t, "Invalid index");
1401 		return -EINVAL;
1402 	}
1403 
1404 	if (!env_type_is_resolve_sink(env, index_type) &&
1405 	    !env_type_is_resolved(env, index_type_id))
1406 		return env_stack_push(env, index_type, index_type_id);
1407 
1408 	index_type = btf_type_id_size(btf, &index_type_id, NULL);
1409 	if (!index_type || !btf_type_is_int(index_type) ||
1410 	    !btf_type_int_is_regular(index_type)) {
1411 		btf_verifier_log_type(env, v->t, "Invalid index");
1412 		return -EINVAL;
1413 	}
1414 
1415 	/* Check array->type */
1416 	elem_type_id = array->type;
1417 	elem_type = btf_type_by_id(btf, elem_type_id);
1418 	if (btf_type_is_void_or_null(elem_type)) {
1419 		btf_verifier_log_type(env, v->t,
1420 				      "Invalid elem");
1421 		return -EINVAL;
1422 	}
1423 
1424 	if (!env_type_is_resolve_sink(env, elem_type) &&
1425 	    !env_type_is_resolved(env, elem_type_id))
1426 		return env_stack_push(env, elem_type, elem_type_id);
1427 
1428 	elem_type = btf_type_id_size(btf, &elem_type_id, &elem_size);
1429 	if (!elem_type) {
1430 		btf_verifier_log_type(env, v->t, "Invalid elem");
1431 		return -EINVAL;
1432 	}
1433 
1434 	if (btf_type_is_int(elem_type) && !btf_type_int_is_regular(elem_type)) {
1435 		btf_verifier_log_type(env, v->t, "Invalid array of int");
1436 		return -EINVAL;
1437 	}
1438 
1439 	if (array->nelems && elem_size > U32_MAX / array->nelems) {
1440 		btf_verifier_log_type(env, v->t,
1441 				      "Array size overflows U32_MAX");
1442 		return -EINVAL;
1443 	}
1444 
1445 	env_stack_pop_resolved(env, elem_type_id, elem_size * array->nelems);
1446 
1447 	return 0;
1448 }
1449 
1450 static void btf_array_log(struct btf_verifier_env *env,
1451 			  const struct btf_type *t)
1452 {
1453 	const struct btf_array *array = btf_type_array(t);
1454 
1455 	btf_verifier_log(env, "type_id=%u index_type_id=%u nr_elems=%u",
1456 			 array->type, array->index_type, array->nelems);
1457 }
1458 
1459 static void btf_array_seq_show(const struct btf *btf, const struct btf_type *t,
1460 			       u32 type_id, void *data, u8 bits_offset,
1461 			       struct seq_file *m)
1462 {
1463 	const struct btf_array *array = btf_type_array(t);
1464 	const struct btf_kind_operations *elem_ops;
1465 	const struct btf_type *elem_type;
1466 	u32 i, elem_size, elem_type_id;
1467 
1468 	elem_type_id = array->type;
1469 	elem_type = btf_type_id_size(btf, &elem_type_id, &elem_size);
1470 	elem_ops = btf_type_ops(elem_type);
1471 	seq_puts(m, "[");
1472 	for (i = 0; i < array->nelems; i++) {
1473 		if (i)
1474 			seq_puts(m, ",");
1475 
1476 		elem_ops->seq_show(btf, elem_type, elem_type_id, data,
1477 				   bits_offset, m);
1478 		data += elem_size;
1479 	}
1480 	seq_puts(m, "]");
1481 }
1482 
1483 static struct btf_kind_operations array_ops = {
1484 	.check_meta = btf_array_check_meta,
1485 	.resolve = btf_array_resolve,
1486 	.check_member = btf_array_check_member,
1487 	.log_details = btf_array_log,
1488 	.seq_show = btf_array_seq_show,
1489 };
1490 
1491 static int btf_struct_check_member(struct btf_verifier_env *env,
1492 				   const struct btf_type *struct_type,
1493 				   const struct btf_member *member,
1494 				   const struct btf_type *member_type)
1495 {
1496 	u32 struct_bits_off = member->offset;
1497 	u32 struct_size, bytes_offset;
1498 
1499 	if (BITS_PER_BYTE_MASKED(struct_bits_off)) {
1500 		btf_verifier_log_member(env, struct_type, member,
1501 					"Member is not byte aligned");
1502 		return -EINVAL;
1503 	}
1504 
1505 	struct_size = struct_type->size;
1506 	bytes_offset = BITS_ROUNDDOWN_BYTES(struct_bits_off);
1507 	if (struct_size - bytes_offset < member_type->size) {
1508 		btf_verifier_log_member(env, struct_type, member,
1509 					"Member exceeds struct_size");
1510 		return -EINVAL;
1511 	}
1512 
1513 	return 0;
1514 }
1515 
1516 static s32 btf_struct_check_meta(struct btf_verifier_env *env,
1517 				 const struct btf_type *t,
1518 				 u32 meta_left)
1519 {
1520 	bool is_union = BTF_INFO_KIND(t->info) == BTF_KIND_UNION;
1521 	const struct btf_member *member;
1522 	u32 meta_needed, last_offset;
1523 	struct btf *btf = env->btf;
1524 	u32 struct_size = t->size;
1525 	u16 i;
1526 
1527 	meta_needed = btf_type_vlen(t) * sizeof(*member);
1528 	if (meta_left < meta_needed) {
1529 		btf_verifier_log_basic(env, t,
1530 				       "meta_left:%u meta_needed:%u",
1531 				       meta_left, meta_needed);
1532 		return -EINVAL;
1533 	}
1534 
1535 	btf_verifier_log_type(env, t, NULL);
1536 
1537 	last_offset = 0;
1538 	for_each_member(i, t, member) {
1539 		if (!btf_name_offset_valid(btf, member->name_off)) {
1540 			btf_verifier_log_member(env, t, member,
1541 						"Invalid member name_offset:%u",
1542 						member->name_off);
1543 			return -EINVAL;
1544 		}
1545 
1546 		/* A member cannot be in type void */
1547 		if (!member->type || !BTF_TYPE_ID_VALID(member->type)) {
1548 			btf_verifier_log_member(env, t, member,
1549 						"Invalid type_id");
1550 			return -EINVAL;
1551 		}
1552 
1553 		if (is_union && member->offset) {
1554 			btf_verifier_log_member(env, t, member,
1555 						"Invalid member bits_offset");
1556 			return -EINVAL;
1557 		}
1558 
1559 		/*
1560 		 * ">" instead of ">=" because the last member could be
1561 		 * "char a[0];"
1562 		 */
1563 		if (last_offset > member->offset) {
1564 			btf_verifier_log_member(env, t, member,
1565 						"Invalid member bits_offset");
1566 			return -EINVAL;
1567 		}
1568 
1569 		if (BITS_ROUNDUP_BYTES(member->offset) > struct_size) {
1570 			btf_verifier_log_member(env, t, member,
1571 						"Memmber bits_offset exceeds its struct size");
1572 			return -EINVAL;
1573 		}
1574 
1575 		btf_verifier_log_member(env, t, member, NULL);
1576 		last_offset = member->offset;
1577 	}
1578 
1579 	return meta_needed;
1580 }
1581 
1582 static int btf_struct_resolve(struct btf_verifier_env *env,
1583 			      const struct resolve_vertex *v)
1584 {
1585 	const struct btf_member *member;
1586 	int err;
1587 	u16 i;
1588 
1589 	/* Before continue resolving the next_member,
1590 	 * ensure the last member is indeed resolved to a
1591 	 * type with size info.
1592 	 */
1593 	if (v->next_member) {
1594 		const struct btf_type *last_member_type;
1595 		const struct btf_member *last_member;
1596 		u16 last_member_type_id;
1597 
1598 		last_member = btf_type_member(v->t) + v->next_member - 1;
1599 		last_member_type_id = last_member->type;
1600 		if (WARN_ON_ONCE(!env_type_is_resolved(env,
1601 						       last_member_type_id)))
1602 			return -EINVAL;
1603 
1604 		last_member_type = btf_type_by_id(env->btf,
1605 						  last_member_type_id);
1606 		err = btf_type_ops(last_member_type)->check_member(env, v->t,
1607 							last_member,
1608 							last_member_type);
1609 		if (err)
1610 			return err;
1611 	}
1612 
1613 	for_each_member_from(i, v->next_member, v->t, member) {
1614 		u32 member_type_id = member->type;
1615 		const struct btf_type *member_type = btf_type_by_id(env->btf,
1616 								member_type_id);
1617 
1618 		if (btf_type_is_void_or_null(member_type)) {
1619 			btf_verifier_log_member(env, v->t, member,
1620 						"Invalid member");
1621 			return -EINVAL;
1622 		}
1623 
1624 		if (!env_type_is_resolve_sink(env, member_type) &&
1625 		    !env_type_is_resolved(env, member_type_id)) {
1626 			env_stack_set_next_member(env, i + 1);
1627 			return env_stack_push(env, member_type, member_type_id);
1628 		}
1629 
1630 		err = btf_type_ops(member_type)->check_member(env, v->t,
1631 							      member,
1632 							      member_type);
1633 		if (err)
1634 			return err;
1635 	}
1636 
1637 	env_stack_pop_resolved(env, 0, 0);
1638 
1639 	return 0;
1640 }
1641 
1642 static void btf_struct_log(struct btf_verifier_env *env,
1643 			   const struct btf_type *t)
1644 {
1645 	btf_verifier_log(env, "size=%u vlen=%u", t->size, btf_type_vlen(t));
1646 }
1647 
1648 static void btf_struct_seq_show(const struct btf *btf, const struct btf_type *t,
1649 				u32 type_id, void *data, u8 bits_offset,
1650 				struct seq_file *m)
1651 {
1652 	const char *seq = BTF_INFO_KIND(t->info) == BTF_KIND_UNION ? "|" : ",";
1653 	const struct btf_member *member;
1654 	u32 i;
1655 
1656 	seq_puts(m, "{");
1657 	for_each_member(i, t, member) {
1658 		const struct btf_type *member_type = btf_type_by_id(btf,
1659 								member->type);
1660 		u32 member_offset = member->offset;
1661 		u32 bytes_offset = BITS_ROUNDDOWN_BYTES(member_offset);
1662 		u8 bits8_offset = BITS_PER_BYTE_MASKED(member_offset);
1663 		const struct btf_kind_operations *ops;
1664 
1665 		if (i)
1666 			seq_puts(m, seq);
1667 
1668 		ops = btf_type_ops(member_type);
1669 		ops->seq_show(btf, member_type, member->type,
1670 			      data + bytes_offset, bits8_offset, m);
1671 	}
1672 	seq_puts(m, "}");
1673 }
1674 
1675 static struct btf_kind_operations struct_ops = {
1676 	.check_meta = btf_struct_check_meta,
1677 	.resolve = btf_struct_resolve,
1678 	.check_member = btf_struct_check_member,
1679 	.log_details = btf_struct_log,
1680 	.seq_show = btf_struct_seq_show,
1681 };
1682 
1683 static int btf_enum_check_member(struct btf_verifier_env *env,
1684 				 const struct btf_type *struct_type,
1685 				 const struct btf_member *member,
1686 				 const struct btf_type *member_type)
1687 {
1688 	u32 struct_bits_off = member->offset;
1689 	u32 struct_size, bytes_offset;
1690 
1691 	if (BITS_PER_BYTE_MASKED(struct_bits_off)) {
1692 		btf_verifier_log_member(env, struct_type, member,
1693 					"Member is not byte aligned");
1694 		return -EINVAL;
1695 	}
1696 
1697 	struct_size = struct_type->size;
1698 	bytes_offset = BITS_ROUNDDOWN_BYTES(struct_bits_off);
1699 	if (struct_size - bytes_offset < sizeof(int)) {
1700 		btf_verifier_log_member(env, struct_type, member,
1701 					"Member exceeds struct_size");
1702 		return -EINVAL;
1703 	}
1704 
1705 	return 0;
1706 }
1707 
1708 static s32 btf_enum_check_meta(struct btf_verifier_env *env,
1709 			       const struct btf_type *t,
1710 			       u32 meta_left)
1711 {
1712 	const struct btf_enum *enums = btf_type_enum(t);
1713 	struct btf *btf = env->btf;
1714 	u16 i, nr_enums;
1715 	u32 meta_needed;
1716 
1717 	nr_enums = btf_type_vlen(t);
1718 	meta_needed = nr_enums * sizeof(*enums);
1719 
1720 	if (meta_left < meta_needed) {
1721 		btf_verifier_log_basic(env, t,
1722 				       "meta_left:%u meta_needed:%u",
1723 				       meta_left, meta_needed);
1724 		return -EINVAL;
1725 	}
1726 
1727 	if (t->size != sizeof(int)) {
1728 		btf_verifier_log_type(env, t, "Expected size:%zu",
1729 				      sizeof(int));
1730 		return -EINVAL;
1731 	}
1732 
1733 	btf_verifier_log_type(env, t, NULL);
1734 
1735 	for (i = 0; i < nr_enums; i++) {
1736 		if (!btf_name_offset_valid(btf, enums[i].name_off)) {
1737 			btf_verifier_log(env, "\tInvalid name_offset:%u",
1738 					 enums[i].name_off);
1739 			return -EINVAL;
1740 		}
1741 
1742 		btf_verifier_log(env, "\t%s val=%d\n",
1743 				 btf_name_by_offset(btf, enums[i].name_off),
1744 				 enums[i].val);
1745 	}
1746 
1747 	return meta_needed;
1748 }
1749 
1750 static void btf_enum_log(struct btf_verifier_env *env,
1751 			 const struct btf_type *t)
1752 {
1753 	btf_verifier_log(env, "size=%u vlen=%u", t->size, btf_type_vlen(t));
1754 }
1755 
1756 static void btf_enum_seq_show(const struct btf *btf, const struct btf_type *t,
1757 			      u32 type_id, void *data, u8 bits_offset,
1758 			      struct seq_file *m)
1759 {
1760 	const struct btf_enum *enums = btf_type_enum(t);
1761 	u32 i, nr_enums = btf_type_vlen(t);
1762 	int v = *(int *)data;
1763 
1764 	for (i = 0; i < nr_enums; i++) {
1765 		if (v == enums[i].val) {
1766 			seq_printf(m, "%s",
1767 				   btf_name_by_offset(btf, enums[i].name_off));
1768 			return;
1769 		}
1770 	}
1771 
1772 	seq_printf(m, "%d", v);
1773 }
1774 
1775 static struct btf_kind_operations enum_ops = {
1776 	.check_meta = btf_enum_check_meta,
1777 	.resolve = btf_df_resolve,
1778 	.check_member = btf_enum_check_member,
1779 	.log_details = btf_enum_log,
1780 	.seq_show = btf_enum_seq_show,
1781 };
1782 
1783 static const struct btf_kind_operations * const kind_ops[NR_BTF_KINDS] = {
1784 	[BTF_KIND_INT] = &int_ops,
1785 	[BTF_KIND_PTR] = &ptr_ops,
1786 	[BTF_KIND_ARRAY] = &array_ops,
1787 	[BTF_KIND_STRUCT] = &struct_ops,
1788 	[BTF_KIND_UNION] = &struct_ops,
1789 	[BTF_KIND_ENUM] = &enum_ops,
1790 	[BTF_KIND_FWD] = &fwd_ops,
1791 	[BTF_KIND_TYPEDEF] = &modifier_ops,
1792 	[BTF_KIND_VOLATILE] = &modifier_ops,
1793 	[BTF_KIND_CONST] = &modifier_ops,
1794 	[BTF_KIND_RESTRICT] = &modifier_ops,
1795 };
1796 
1797 static s32 btf_check_meta(struct btf_verifier_env *env,
1798 			  const struct btf_type *t,
1799 			  u32 meta_left)
1800 {
1801 	u32 saved_meta_left = meta_left;
1802 	s32 var_meta_size;
1803 
1804 	if (meta_left < sizeof(*t)) {
1805 		btf_verifier_log(env, "[%u] meta_left:%u meta_needed:%zu",
1806 				 env->log_type_id, meta_left, sizeof(*t));
1807 		return -EINVAL;
1808 	}
1809 	meta_left -= sizeof(*t);
1810 
1811 	if (t->info & ~BTF_INFO_MASK) {
1812 		btf_verifier_log(env, "[%u] Invalid btf_info:%x",
1813 				 env->log_type_id, t->info);
1814 		return -EINVAL;
1815 	}
1816 
1817 	if (BTF_INFO_KIND(t->info) > BTF_KIND_MAX ||
1818 	    BTF_INFO_KIND(t->info) == BTF_KIND_UNKN) {
1819 		btf_verifier_log(env, "[%u] Invalid kind:%u",
1820 				 env->log_type_id, BTF_INFO_KIND(t->info));
1821 		return -EINVAL;
1822 	}
1823 
1824 	if (!btf_name_offset_valid(env->btf, t->name_off)) {
1825 		btf_verifier_log(env, "[%u] Invalid name_offset:%u",
1826 				 env->log_type_id, t->name_off);
1827 		return -EINVAL;
1828 	}
1829 
1830 	var_meta_size = btf_type_ops(t)->check_meta(env, t, meta_left);
1831 	if (var_meta_size < 0)
1832 		return var_meta_size;
1833 
1834 	meta_left -= var_meta_size;
1835 
1836 	return saved_meta_left - meta_left;
1837 }
1838 
1839 static int btf_check_all_metas(struct btf_verifier_env *env)
1840 {
1841 	struct btf *btf = env->btf;
1842 	struct btf_header *hdr;
1843 	void *cur, *end;
1844 
1845 	hdr = &btf->hdr;
1846 	cur = btf->nohdr_data + hdr->type_off;
1847 	end = btf->nohdr_data + hdr->type_len;
1848 
1849 	env->log_type_id = 1;
1850 	while (cur < end) {
1851 		struct btf_type *t = cur;
1852 		s32 meta_size;
1853 
1854 		meta_size = btf_check_meta(env, t, end - cur);
1855 		if (meta_size < 0)
1856 			return meta_size;
1857 
1858 		btf_add_type(env, t);
1859 		cur += meta_size;
1860 		env->log_type_id++;
1861 	}
1862 
1863 	return 0;
1864 }
1865 
1866 static int btf_resolve(struct btf_verifier_env *env,
1867 		       const struct btf_type *t, u32 type_id)
1868 {
1869 	const struct resolve_vertex *v;
1870 	int err = 0;
1871 
1872 	env->resolve_mode = RESOLVE_TBD;
1873 	env_stack_push(env, t, type_id);
1874 	while (!err && (v = env_stack_peak(env))) {
1875 		env->log_type_id = v->type_id;
1876 		err = btf_type_ops(v->t)->resolve(env, v);
1877 	}
1878 
1879 	env->log_type_id = type_id;
1880 	if (err == -E2BIG)
1881 		btf_verifier_log_type(env, t,
1882 				      "Exceeded max resolving depth:%u",
1883 				      MAX_RESOLVE_DEPTH);
1884 	else if (err == -EEXIST)
1885 		btf_verifier_log_type(env, t, "Loop detected");
1886 
1887 	return err;
1888 }
1889 
1890 static bool btf_resolve_valid(struct btf_verifier_env *env,
1891 			      const struct btf_type *t,
1892 			      u32 type_id)
1893 {
1894 	struct btf *btf = env->btf;
1895 
1896 	if (!env_type_is_resolved(env, type_id))
1897 		return false;
1898 
1899 	if (btf_type_is_struct(t))
1900 		return !btf->resolved_ids[type_id] &&
1901 			!btf->resolved_sizes[type_id];
1902 
1903 	if (btf_type_is_modifier(t) || btf_type_is_ptr(t)) {
1904 		t = btf_type_id_resolve(btf, &type_id);
1905 		return t && !btf_type_is_modifier(t);
1906 	}
1907 
1908 	if (btf_type_is_array(t)) {
1909 		const struct btf_array *array = btf_type_array(t);
1910 		const struct btf_type *elem_type;
1911 		u32 elem_type_id = array->type;
1912 		u32 elem_size;
1913 
1914 		elem_type = btf_type_id_size(btf, &elem_type_id, &elem_size);
1915 		return elem_type && !btf_type_is_modifier(elem_type) &&
1916 			(array->nelems * elem_size ==
1917 			 btf->resolved_sizes[type_id]);
1918 	}
1919 
1920 	return false;
1921 }
1922 
1923 static int btf_check_all_types(struct btf_verifier_env *env)
1924 {
1925 	struct btf *btf = env->btf;
1926 	u32 type_id;
1927 	int err;
1928 
1929 	err = env_resolve_init(env);
1930 	if (err)
1931 		return err;
1932 
1933 	env->phase++;
1934 	for (type_id = 1; type_id <= btf->nr_types; type_id++) {
1935 		const struct btf_type *t = btf_type_by_id(btf, type_id);
1936 
1937 		env->log_type_id = type_id;
1938 		if (btf_type_needs_resolve(t) &&
1939 		    !env_type_is_resolved(env, type_id)) {
1940 			err = btf_resolve(env, t, type_id);
1941 			if (err)
1942 				return err;
1943 		}
1944 
1945 		if (btf_type_needs_resolve(t) &&
1946 		    !btf_resolve_valid(env, t, type_id)) {
1947 			btf_verifier_log_type(env, t, "Invalid resolve state");
1948 			return -EINVAL;
1949 		}
1950 	}
1951 
1952 	return 0;
1953 }
1954 
1955 static int btf_parse_type_sec(struct btf_verifier_env *env)
1956 {
1957 	const struct btf_header *hdr = &env->btf->hdr;
1958 	int err;
1959 
1960 	/* Type section must align to 4 bytes */
1961 	if (hdr->type_off & (sizeof(u32) - 1)) {
1962 		btf_verifier_log(env, "Unaligned type_off");
1963 		return -EINVAL;
1964 	}
1965 
1966 	if (!hdr->type_len) {
1967 		btf_verifier_log(env, "No type found");
1968 		return -EINVAL;
1969 	}
1970 
1971 	err = btf_check_all_metas(env);
1972 	if (err)
1973 		return err;
1974 
1975 	return btf_check_all_types(env);
1976 }
1977 
1978 static int btf_parse_str_sec(struct btf_verifier_env *env)
1979 {
1980 	const struct btf_header *hdr;
1981 	struct btf *btf = env->btf;
1982 	const char *start, *end;
1983 
1984 	hdr = &btf->hdr;
1985 	start = btf->nohdr_data + hdr->str_off;
1986 	end = start + hdr->str_len;
1987 
1988 	if (end != btf->data + btf->data_size) {
1989 		btf_verifier_log(env, "String section is not at the end");
1990 		return -EINVAL;
1991 	}
1992 
1993 	if (!hdr->str_len || hdr->str_len - 1 > BTF_MAX_NAME_OFFSET ||
1994 	    start[0] || end[-1]) {
1995 		btf_verifier_log(env, "Invalid string section");
1996 		return -EINVAL;
1997 	}
1998 
1999 	btf->strings = start;
2000 
2001 	return 0;
2002 }
2003 
2004 static const size_t btf_sec_info_offset[] = {
2005 	offsetof(struct btf_header, type_off),
2006 	offsetof(struct btf_header, str_off),
2007 };
2008 
2009 static int btf_sec_info_cmp(const void *a, const void *b)
2010 {
2011 	const struct btf_sec_info *x = a;
2012 	const struct btf_sec_info *y = b;
2013 
2014 	return (int)(x->off - y->off) ? : (int)(x->len - y->len);
2015 }
2016 
2017 static int btf_check_sec_info(struct btf_verifier_env *env,
2018 			      u32 btf_data_size)
2019 {
2020 	struct btf_sec_info secs[ARRAY_SIZE(btf_sec_info_offset)];
2021 	u32 total, expected_total, i;
2022 	const struct btf_header *hdr;
2023 	const struct btf *btf;
2024 
2025 	btf = env->btf;
2026 	hdr = &btf->hdr;
2027 
2028 	/* Populate the secs from hdr */
2029 	for (i = 0; i < ARRAY_SIZE(btf_sec_info_offset); i++)
2030 		secs[i] = *(struct btf_sec_info *)((void *)hdr +
2031 						   btf_sec_info_offset[i]);
2032 
2033 	sort(secs, ARRAY_SIZE(btf_sec_info_offset),
2034 	     sizeof(struct btf_sec_info), btf_sec_info_cmp, NULL);
2035 
2036 	/* Check for gaps and overlap among sections */
2037 	total = 0;
2038 	expected_total = btf_data_size - hdr->hdr_len;
2039 	for (i = 0; i < ARRAY_SIZE(btf_sec_info_offset); i++) {
2040 		if (expected_total < secs[i].off) {
2041 			btf_verifier_log(env, "Invalid section offset");
2042 			return -EINVAL;
2043 		}
2044 		if (total < secs[i].off) {
2045 			/* gap */
2046 			btf_verifier_log(env, "Unsupported section found");
2047 			return -EINVAL;
2048 		}
2049 		if (total > secs[i].off) {
2050 			btf_verifier_log(env, "Section overlap found");
2051 			return -EINVAL;
2052 		}
2053 		if (expected_total - total < secs[i].len) {
2054 			btf_verifier_log(env,
2055 					 "Total section length too long");
2056 			return -EINVAL;
2057 		}
2058 		total += secs[i].len;
2059 	}
2060 
2061 	/* There is data other than hdr and known sections */
2062 	if (expected_total != total) {
2063 		btf_verifier_log(env, "Unsupported section found");
2064 		return -EINVAL;
2065 	}
2066 
2067 	return 0;
2068 }
2069 
2070 static int btf_parse_hdr(struct btf_verifier_env *env, void __user *btf_data,
2071 			 u32 btf_data_size)
2072 {
2073 	const struct btf_header *hdr;
2074 	u32 hdr_len, hdr_copy;
2075 	/*
2076 	 * Minimal part of the "struct btf_header" that
2077 	 * contains the hdr_len.
2078 	 */
2079 	struct btf_min_header {
2080 		u16	magic;
2081 		u8	version;
2082 		u8	flags;
2083 		u32	hdr_len;
2084 	} __user *min_hdr;
2085 	struct btf *btf;
2086 	int err;
2087 
2088 	btf = env->btf;
2089 	min_hdr = btf_data;
2090 
2091 	if (btf_data_size < sizeof(*min_hdr)) {
2092 		btf_verifier_log(env, "hdr_len not found");
2093 		return -EINVAL;
2094 	}
2095 
2096 	if (get_user(hdr_len, &min_hdr->hdr_len))
2097 		return -EFAULT;
2098 
2099 	if (btf_data_size < hdr_len) {
2100 		btf_verifier_log(env, "btf_header not found");
2101 		return -EINVAL;
2102 	}
2103 
2104 	err = bpf_check_uarg_tail_zero(btf_data, sizeof(btf->hdr), hdr_len);
2105 	if (err) {
2106 		if (err == -E2BIG)
2107 			btf_verifier_log(env, "Unsupported btf_header");
2108 		return err;
2109 	}
2110 
2111 	hdr_copy = min_t(u32, hdr_len, sizeof(btf->hdr));
2112 	if (copy_from_user(&btf->hdr, btf_data, hdr_copy))
2113 		return -EFAULT;
2114 
2115 	hdr = &btf->hdr;
2116 
2117 	btf_verifier_log_hdr(env, btf_data_size);
2118 
2119 	if (hdr->magic != BTF_MAGIC) {
2120 		btf_verifier_log(env, "Invalid magic");
2121 		return -EINVAL;
2122 	}
2123 
2124 	if (hdr->version != BTF_VERSION) {
2125 		btf_verifier_log(env, "Unsupported version");
2126 		return -ENOTSUPP;
2127 	}
2128 
2129 	if (hdr->flags) {
2130 		btf_verifier_log(env, "Unsupported flags");
2131 		return -ENOTSUPP;
2132 	}
2133 
2134 	if (btf_data_size == hdr->hdr_len) {
2135 		btf_verifier_log(env, "No data");
2136 		return -EINVAL;
2137 	}
2138 
2139 	err = btf_check_sec_info(env, btf_data_size);
2140 	if (err)
2141 		return err;
2142 
2143 	return 0;
2144 }
2145 
2146 static struct btf *btf_parse(void __user *btf_data, u32 btf_data_size,
2147 			     u32 log_level, char __user *log_ubuf, u32 log_size)
2148 {
2149 	struct btf_verifier_env *env = NULL;
2150 	struct bpf_verifier_log *log;
2151 	struct btf *btf = NULL;
2152 	u8 *data;
2153 	int err;
2154 
2155 	if (btf_data_size > BTF_MAX_SIZE)
2156 		return ERR_PTR(-E2BIG);
2157 
2158 	env = kzalloc(sizeof(*env), GFP_KERNEL | __GFP_NOWARN);
2159 	if (!env)
2160 		return ERR_PTR(-ENOMEM);
2161 
2162 	log = &env->log;
2163 	if (log_level || log_ubuf || log_size) {
2164 		/* user requested verbose verifier output
2165 		 * and supplied buffer to store the verification trace
2166 		 */
2167 		log->level = log_level;
2168 		log->ubuf = log_ubuf;
2169 		log->len_total = log_size;
2170 
2171 		/* log attributes have to be sane */
2172 		if (log->len_total < 128 || log->len_total > UINT_MAX >> 8 ||
2173 		    !log->level || !log->ubuf) {
2174 			err = -EINVAL;
2175 			goto errout;
2176 		}
2177 	}
2178 
2179 	btf = kzalloc(sizeof(*btf), GFP_KERNEL | __GFP_NOWARN);
2180 	if (!btf) {
2181 		err = -ENOMEM;
2182 		goto errout;
2183 	}
2184 	env->btf = btf;
2185 
2186 	err = btf_parse_hdr(env, btf_data, btf_data_size);
2187 	if (err)
2188 		goto errout;
2189 
2190 	data = kvmalloc(btf_data_size, GFP_KERNEL | __GFP_NOWARN);
2191 	if (!data) {
2192 		err = -ENOMEM;
2193 		goto errout;
2194 	}
2195 
2196 	btf->data = data;
2197 	btf->data_size = btf_data_size;
2198 	btf->nohdr_data = btf->data + btf->hdr.hdr_len;
2199 
2200 	if (copy_from_user(data, btf_data, btf_data_size)) {
2201 		err = -EFAULT;
2202 		goto errout;
2203 	}
2204 
2205 	err = btf_parse_str_sec(env);
2206 	if (err)
2207 		goto errout;
2208 
2209 	err = btf_parse_type_sec(env);
2210 	if (err)
2211 		goto errout;
2212 
2213 	if (log->level && bpf_verifier_log_full(log)) {
2214 		err = -ENOSPC;
2215 		goto errout;
2216 	}
2217 
2218 	btf_verifier_env_free(env);
2219 	refcount_set(&btf->refcnt, 1);
2220 	return btf;
2221 
2222 errout:
2223 	btf_verifier_env_free(env);
2224 	if (btf)
2225 		btf_free(btf);
2226 	return ERR_PTR(err);
2227 }
2228 
2229 void btf_type_seq_show(const struct btf *btf, u32 type_id, void *obj,
2230 		       struct seq_file *m)
2231 {
2232 	const struct btf_type *t = btf_type_by_id(btf, type_id);
2233 
2234 	btf_type_ops(t)->seq_show(btf, t, type_id, obj, 0, m);
2235 }
2236 
2237 static int btf_release(struct inode *inode, struct file *filp)
2238 {
2239 	btf_put(filp->private_data);
2240 	return 0;
2241 }
2242 
2243 const struct file_operations btf_fops = {
2244 	.release	= btf_release,
2245 };
2246 
2247 static int __btf_new_fd(struct btf *btf)
2248 {
2249 	return anon_inode_getfd("btf", &btf_fops, btf, O_RDONLY | O_CLOEXEC);
2250 }
2251 
2252 int btf_new_fd(const union bpf_attr *attr)
2253 {
2254 	struct btf *btf;
2255 	int ret;
2256 
2257 	btf = btf_parse(u64_to_user_ptr(attr->btf),
2258 			attr->btf_size, attr->btf_log_level,
2259 			u64_to_user_ptr(attr->btf_log_buf),
2260 			attr->btf_log_size);
2261 	if (IS_ERR(btf))
2262 		return PTR_ERR(btf);
2263 
2264 	ret = btf_alloc_id(btf);
2265 	if (ret) {
2266 		btf_free(btf);
2267 		return ret;
2268 	}
2269 
2270 	/*
2271 	 * The BTF ID is published to the userspace.
2272 	 * All BTF free must go through call_rcu() from
2273 	 * now on (i.e. free by calling btf_put()).
2274 	 */
2275 
2276 	ret = __btf_new_fd(btf);
2277 	if (ret < 0)
2278 		btf_put(btf);
2279 
2280 	return ret;
2281 }
2282 
2283 struct btf *btf_get_by_fd(int fd)
2284 {
2285 	struct btf *btf;
2286 	struct fd f;
2287 
2288 	f = fdget(fd);
2289 
2290 	if (!f.file)
2291 		return ERR_PTR(-EBADF);
2292 
2293 	if (f.file->f_op != &btf_fops) {
2294 		fdput(f);
2295 		return ERR_PTR(-EINVAL);
2296 	}
2297 
2298 	btf = f.file->private_data;
2299 	refcount_inc(&btf->refcnt);
2300 	fdput(f);
2301 
2302 	return btf;
2303 }
2304 
2305 int btf_get_info_by_fd(const struct btf *btf,
2306 		       const union bpf_attr *attr,
2307 		       union bpf_attr __user *uattr)
2308 {
2309 	struct bpf_btf_info __user *uinfo;
2310 	struct bpf_btf_info info = {};
2311 	u32 info_copy, btf_copy;
2312 	void __user *ubtf;
2313 	u32 uinfo_len;
2314 
2315 	uinfo = u64_to_user_ptr(attr->info.info);
2316 	uinfo_len = attr->info.info_len;
2317 
2318 	info_copy = min_t(u32, uinfo_len, sizeof(info));
2319 	if (copy_from_user(&info, uinfo, info_copy))
2320 		return -EFAULT;
2321 
2322 	info.id = btf->id;
2323 	ubtf = u64_to_user_ptr(info.btf);
2324 	btf_copy = min_t(u32, btf->data_size, info.btf_size);
2325 	if (copy_to_user(ubtf, btf->data, btf_copy))
2326 		return -EFAULT;
2327 	info.btf_size = btf->data_size;
2328 
2329 	if (copy_to_user(uinfo, &info, info_copy) ||
2330 	    put_user(info_copy, &uattr->info.info_len))
2331 		return -EFAULT;
2332 
2333 	return 0;
2334 }
2335 
2336 int btf_get_fd_by_id(u32 id)
2337 {
2338 	struct btf *btf;
2339 	int fd;
2340 
2341 	rcu_read_lock();
2342 	btf = idr_find(&btf_idr, id);
2343 	if (!btf || !refcount_inc_not_zero(&btf->refcnt))
2344 		btf = ERR_PTR(-ENOENT);
2345 	rcu_read_unlock();
2346 
2347 	if (IS_ERR(btf))
2348 		return PTR_ERR(btf);
2349 
2350 	fd = __btf_new_fd(btf);
2351 	if (fd < 0)
2352 		btf_put(btf);
2353 
2354 	return fd;
2355 }
2356 
2357 u32 btf_id(const struct btf *btf)
2358 {
2359 	return btf->id;
2360 }
2361