xref: /linux/kernel/bpf/btf.c (revision df02351331671abb26788bc13f6d276e26ae068f)
1 // SPDX-License-Identifier: GPL-2.0
2 /* Copyright (c) 2018 Facebook */
3 
4 #include <uapi/linux/btf.h>
5 #include <uapi/linux/bpf.h>
6 #include <uapi/linux/bpf_perf_event.h>
7 #include <uapi/linux/types.h>
8 #include <linux/seq_file.h>
9 #include <linux/compiler.h>
10 #include <linux/ctype.h>
11 #include <linux/errno.h>
12 #include <linux/slab.h>
13 #include <linux/anon_inodes.h>
14 #include <linux/file.h>
15 #include <linux/uaccess.h>
16 #include <linux/kernel.h>
17 #include <linux/idr.h>
18 #include <linux/sort.h>
19 #include <linux/bpf_verifier.h>
20 #include <linux/btf.h>
21 #include <linux/btf_ids.h>
22 #include <linux/bpf.h>
23 #include <linux/bpf_lsm.h>
24 #include <linux/skmsg.h>
25 #include <linux/perf_event.h>
26 #include <linux/bsearch.h>
27 #include <linux/kobject.h>
28 #include <linux/sysfs.h>
29 
30 #include <net/netfilter/nf_bpf_link.h>
31 
32 #include <net/sock.h>
33 #include <net/xdp.h>
34 #include "../tools/lib/bpf/relo_core.h"
35 
36 /* BTF (BPF Type Format) is the meta data format which describes
37  * the data types of BPF program/map.  Hence, it basically focus
38  * on the C programming language which the modern BPF is primary
39  * using.
40  *
41  * ELF Section:
42  * ~~~~~~~~~~~
43  * The BTF data is stored under the ".BTF" ELF section
44  *
45  * struct btf_type:
46  * ~~~~~~~~~~~~~~~
47  * Each 'struct btf_type' object describes a C data type.
48  * Depending on the type it is describing, a 'struct btf_type'
49  * object may be followed by more data.  F.e.
50  * To describe an array, 'struct btf_type' is followed by
51  * 'struct btf_array'.
52  *
53  * 'struct btf_type' and any extra data following it are
54  * 4 bytes aligned.
55  *
56  * Type section:
57  * ~~~~~~~~~~~~~
58  * The BTF type section contains a list of 'struct btf_type' objects.
59  * Each one describes a C type.  Recall from the above section
60  * that a 'struct btf_type' object could be immediately followed by extra
61  * data in order to describe some particular C types.
62  *
63  * type_id:
64  * ~~~~~~~
65  * Each btf_type object is identified by a type_id.  The type_id
66  * is implicitly implied by the location of the btf_type object in
67  * the BTF type section.  The first one has type_id 1.  The second
68  * one has type_id 2...etc.  Hence, an earlier btf_type has
69  * a smaller type_id.
70  *
71  * A btf_type object may refer to another btf_type object by using
72  * type_id (i.e. the "type" in the "struct btf_type").
73  *
74  * NOTE that we cannot assume any reference-order.
75  * A btf_type object can refer to an earlier btf_type object
76  * but it can also refer to a later btf_type object.
77  *
78  * For example, to describe "const void *".  A btf_type
79  * object describing "const" may refer to another btf_type
80  * object describing "void *".  This type-reference is done
81  * by specifying type_id:
82  *
83  * [1] CONST (anon) type_id=2
84  * [2] PTR (anon) type_id=0
85  *
86  * The above is the btf_verifier debug log:
87  *   - Each line started with "[?]" is a btf_type object
88  *   - [?] is the type_id of the btf_type object.
89  *   - CONST/PTR is the BTF_KIND_XXX
90  *   - "(anon)" is the name of the type.  It just
91  *     happens that CONST and PTR has no name.
92  *   - type_id=XXX is the 'u32 type' in btf_type
93  *
94  * NOTE: "void" has type_id 0
95  *
96  * String section:
97  * ~~~~~~~~~~~~~~
98  * The BTF string section contains the names used by the type section.
99  * Each string is referred by an "offset" from the beginning of the
100  * string section.
101  *
102  * Each string is '\0' terminated.
103  *
104  * The first character in the string section must be '\0'
105  * which is used to mean 'anonymous'. Some btf_type may not
106  * have a name.
107  */
108 
109 /* BTF verification:
110  *
111  * To verify BTF data, two passes are needed.
112  *
113  * Pass #1
114  * ~~~~~~~
115  * The first pass is to collect all btf_type objects to
116  * an array: "btf->types".
117  *
118  * Depending on the C type that a btf_type is describing,
119  * a btf_type may be followed by extra data.  We don't know
120  * how many btf_type is there, and more importantly we don't
121  * know where each btf_type is located in the type section.
122  *
123  * Without knowing the location of each type_id, most verifications
124  * cannot be done.  e.g. an earlier btf_type may refer to a later
125  * btf_type (recall the "const void *" above), so we cannot
126  * check this type-reference in the first pass.
127  *
128  * In the first pass, it still does some verifications (e.g.
129  * checking the name is a valid offset to the string section).
130  *
131  * Pass #2
132  * ~~~~~~~
133  * The main focus is to resolve a btf_type that is referring
134  * to another type.
135  *
136  * We have to ensure the referring type:
137  * 1) does exist in the BTF (i.e. in btf->types[])
138  * 2) does not cause a loop:
139  *	struct A {
140  *		struct B b;
141  *	};
142  *
143  *	struct B {
144  *		struct A a;
145  *	};
146  *
147  * btf_type_needs_resolve() decides if a btf_type needs
148  * to be resolved.
149  *
150  * The needs_resolve type implements the "resolve()" ops which
151  * essentially does a DFS and detects backedge.
152  *
153  * During resolve (or DFS), different C types have different
154  * "RESOLVED" conditions.
155  *
156  * When resolving a BTF_KIND_STRUCT, we need to resolve all its
157  * members because a member is always referring to another
158  * type.  A struct's member can be treated as "RESOLVED" if
159  * it is referring to a BTF_KIND_PTR.  Otherwise, the
160  * following valid C struct would be rejected:
161  *
162  *	struct A {
163  *		int m;
164  *		struct A *a;
165  *	};
166  *
167  * When resolving a BTF_KIND_PTR, it needs to keep resolving if
168  * it is referring to another BTF_KIND_PTR.  Otherwise, we cannot
169  * detect a pointer loop, e.g.:
170  * BTF_KIND_CONST -> BTF_KIND_PTR -> BTF_KIND_CONST -> BTF_KIND_PTR +
171  *                        ^                                         |
172  *                        +-----------------------------------------+
173  *
174  */
175 
176 #define BITS_PER_U128 (sizeof(u64) * BITS_PER_BYTE * 2)
177 #define BITS_PER_BYTE_MASK (BITS_PER_BYTE - 1)
178 #define BITS_PER_BYTE_MASKED(bits) ((bits) & BITS_PER_BYTE_MASK)
179 #define BITS_ROUNDDOWN_BYTES(bits) ((bits) >> 3)
180 #define BITS_ROUNDUP_BYTES(bits) \
181 	(BITS_ROUNDDOWN_BYTES(bits) + !!BITS_PER_BYTE_MASKED(bits))
182 
183 #define BTF_INFO_MASK 0x9f00ffff
184 #define BTF_INT_MASK 0x0fffffff
185 #define BTF_TYPE_ID_VALID(type_id) ((type_id) <= BTF_MAX_TYPE)
186 #define BTF_STR_OFFSET_VALID(name_off) ((name_off) <= BTF_MAX_NAME_OFFSET)
187 
188 /* 16MB for 64k structs and each has 16 members and
189  * a few MB spaces for the string section.
190  * The hard limit is S32_MAX.
191  */
192 #define BTF_MAX_SIZE (16 * 1024 * 1024)
193 
194 #define for_each_member_from(i, from, struct_type, member)		\
195 	for (i = from, member = btf_type_member(struct_type) + from;	\
196 	     i < btf_type_vlen(struct_type);				\
197 	     i++, member++)
198 
199 #define for_each_vsi_from(i, from, struct_type, member)				\
200 	for (i = from, member = btf_type_var_secinfo(struct_type) + from;	\
201 	     i < btf_type_vlen(struct_type);					\
202 	     i++, member++)
203 
204 DEFINE_IDR(btf_idr);
205 DEFINE_SPINLOCK(btf_idr_lock);
206 
207 enum btf_kfunc_hook {
208 	BTF_KFUNC_HOOK_COMMON,
209 	BTF_KFUNC_HOOK_XDP,
210 	BTF_KFUNC_HOOK_TC,
211 	BTF_KFUNC_HOOK_STRUCT_OPS,
212 	BTF_KFUNC_HOOK_TRACING,
213 	BTF_KFUNC_HOOK_SYSCALL,
214 	BTF_KFUNC_HOOK_FMODRET,
215 	BTF_KFUNC_HOOK_CGROUP,
216 	BTF_KFUNC_HOOK_SCHED_ACT,
217 	BTF_KFUNC_HOOK_SK_SKB,
218 	BTF_KFUNC_HOOK_SOCKET_FILTER,
219 	BTF_KFUNC_HOOK_LWT,
220 	BTF_KFUNC_HOOK_NETFILTER,
221 	BTF_KFUNC_HOOK_KPROBE,
222 	BTF_KFUNC_HOOK_MAX,
223 };
224 
225 enum {
226 	BTF_KFUNC_SET_MAX_CNT = 256,
227 	BTF_DTOR_KFUNC_MAX_CNT = 256,
228 	BTF_KFUNC_FILTER_MAX_CNT = 16,
229 };
230 
231 struct btf_kfunc_hook_filter {
232 	btf_kfunc_filter_t filters[BTF_KFUNC_FILTER_MAX_CNT];
233 	u32 nr_filters;
234 };
235 
236 struct btf_kfunc_set_tab {
237 	struct btf_id_set8 *sets[BTF_KFUNC_HOOK_MAX];
238 	struct btf_kfunc_hook_filter hook_filters[BTF_KFUNC_HOOK_MAX];
239 };
240 
241 struct btf_id_dtor_kfunc_tab {
242 	u32 cnt;
243 	struct btf_id_dtor_kfunc dtors[];
244 };
245 
246 struct btf_struct_ops_tab {
247 	u32 cnt;
248 	u32 capacity;
249 	struct bpf_struct_ops_desc ops[];
250 };
251 
252 struct btf {
253 	void *data;
254 	struct btf_type **types;
255 	u32 *resolved_ids;
256 	u32 *resolved_sizes;
257 	const char *strings;
258 	void *nohdr_data;
259 	struct btf_header hdr;
260 	u32 nr_types; /* includes VOID for base BTF */
261 	u32 types_size;
262 	u32 data_size;
263 	refcount_t refcnt;
264 	u32 id;
265 	struct rcu_head rcu;
266 	struct btf_kfunc_set_tab *kfunc_set_tab;
267 	struct btf_id_dtor_kfunc_tab *dtor_kfunc_tab;
268 	struct btf_struct_metas *struct_meta_tab;
269 	struct btf_struct_ops_tab *struct_ops_tab;
270 
271 	/* split BTF support */
272 	struct btf *base_btf;
273 	u32 start_id; /* first type ID in this BTF (0 for base BTF) */
274 	u32 start_str_off; /* first string offset (0 for base BTF) */
275 	char name[MODULE_NAME_LEN];
276 	bool kernel_btf;
277 	__u32 *base_id_map; /* map from distilled base BTF -> vmlinux BTF ids */
278 };
279 
280 enum verifier_phase {
281 	CHECK_META,
282 	CHECK_TYPE,
283 };
284 
285 struct resolve_vertex {
286 	const struct btf_type *t;
287 	u32 type_id;
288 	u16 next_member;
289 };
290 
291 enum visit_state {
292 	NOT_VISITED,
293 	VISITED,
294 	RESOLVED,
295 };
296 
297 enum resolve_mode {
298 	RESOLVE_TBD,	/* To Be Determined */
299 	RESOLVE_PTR,	/* Resolving for Pointer */
300 	RESOLVE_STRUCT_OR_ARRAY,	/* Resolving for struct/union
301 					 * or array
302 					 */
303 };
304 
305 #define MAX_RESOLVE_DEPTH 32
306 
307 struct btf_sec_info {
308 	u32 off;
309 	u32 len;
310 };
311 
312 struct btf_verifier_env {
313 	struct btf *btf;
314 	u8 *visit_states;
315 	struct resolve_vertex stack[MAX_RESOLVE_DEPTH];
316 	struct bpf_verifier_log log;
317 	u32 log_type_id;
318 	u32 top_stack;
319 	enum verifier_phase phase;
320 	enum resolve_mode resolve_mode;
321 };
322 
323 static const char * const btf_kind_str[NR_BTF_KINDS] = {
324 	[BTF_KIND_UNKN]		= "UNKNOWN",
325 	[BTF_KIND_INT]		= "INT",
326 	[BTF_KIND_PTR]		= "PTR",
327 	[BTF_KIND_ARRAY]	= "ARRAY",
328 	[BTF_KIND_STRUCT]	= "STRUCT",
329 	[BTF_KIND_UNION]	= "UNION",
330 	[BTF_KIND_ENUM]		= "ENUM",
331 	[BTF_KIND_FWD]		= "FWD",
332 	[BTF_KIND_TYPEDEF]	= "TYPEDEF",
333 	[BTF_KIND_VOLATILE]	= "VOLATILE",
334 	[BTF_KIND_CONST]	= "CONST",
335 	[BTF_KIND_RESTRICT]	= "RESTRICT",
336 	[BTF_KIND_FUNC]		= "FUNC",
337 	[BTF_KIND_FUNC_PROTO]	= "FUNC_PROTO",
338 	[BTF_KIND_VAR]		= "VAR",
339 	[BTF_KIND_DATASEC]	= "DATASEC",
340 	[BTF_KIND_FLOAT]	= "FLOAT",
341 	[BTF_KIND_DECL_TAG]	= "DECL_TAG",
342 	[BTF_KIND_TYPE_TAG]	= "TYPE_TAG",
343 	[BTF_KIND_ENUM64]	= "ENUM64",
344 };
345 
btf_type_str(const struct btf_type * t)346 const char *btf_type_str(const struct btf_type *t)
347 {
348 	return btf_kind_str[BTF_INFO_KIND(t->info)];
349 }
350 
351 /* Chunk size we use in safe copy of data to be shown. */
352 #define BTF_SHOW_OBJ_SAFE_SIZE		32
353 
354 /*
355  * This is the maximum size of a base type value (equivalent to a
356  * 128-bit int); if we are at the end of our safe buffer and have
357  * less than 16 bytes space we can't be assured of being able
358  * to copy the next type safely, so in such cases we will initiate
359  * a new copy.
360  */
361 #define BTF_SHOW_OBJ_BASE_TYPE_SIZE	16
362 
363 /* Type name size */
364 #define BTF_SHOW_NAME_SIZE		80
365 
366 /*
367  * The suffix of a type that indicates it cannot alias another type when
368  * comparing BTF IDs for kfunc invocations.
369  */
370 #define NOCAST_ALIAS_SUFFIX		"___init"
371 
372 /*
373  * Common data to all BTF show operations. Private show functions can add
374  * their own data to a structure containing a struct btf_show and consult it
375  * in the show callback.  See btf_type_show() below.
376  *
377  * One challenge with showing nested data is we want to skip 0-valued
378  * data, but in order to figure out whether a nested object is all zeros
379  * we need to walk through it.  As a result, we need to make two passes
380  * when handling structs, unions and arrays; the first path simply looks
381  * for nonzero data, while the second actually does the display.  The first
382  * pass is signalled by show->state.depth_check being set, and if we
383  * encounter a non-zero value we set show->state.depth_to_show to
384  * the depth at which we encountered it.  When we have completed the
385  * first pass, we will know if anything needs to be displayed if
386  * depth_to_show > depth.  See btf_[struct,array]_show() for the
387  * implementation of this.
388  *
389  * Another problem is we want to ensure the data for display is safe to
390  * access.  To support this, the anonymous "struct {} obj" tracks the data
391  * object and our safe copy of it.  We copy portions of the data needed
392  * to the object "copy" buffer, but because its size is limited to
393  * BTF_SHOW_OBJ_COPY_LEN bytes, multiple copies may be required as we
394  * traverse larger objects for display.
395  *
396  * The various data type show functions all start with a call to
397  * btf_show_start_type() which returns a pointer to the safe copy
398  * of the data needed (or if BTF_SHOW_UNSAFE is specified, to the
399  * raw data itself).  btf_show_obj_safe() is responsible for
400  * using copy_from_kernel_nofault() to update the safe data if necessary
401  * as we traverse the object's data.  skbuff-like semantics are
402  * used:
403  *
404  * - obj.head points to the start of the toplevel object for display
405  * - obj.size is the size of the toplevel object
406  * - obj.data points to the current point in the original data at
407  *   which our safe data starts.  obj.data will advance as we copy
408  *   portions of the data.
409  *
410  * In most cases a single copy will suffice, but larger data structures
411  * such as "struct task_struct" will require many copies.  The logic in
412  * btf_show_obj_safe() handles the logic that determines if a new
413  * copy_from_kernel_nofault() is needed.
414  */
415 struct btf_show {
416 	u64 flags;
417 	void *target;	/* target of show operation (seq file, buffer) */
418 	__printf(2, 0) void (*showfn)(struct btf_show *show, const char *fmt, va_list args);
419 	const struct btf *btf;
420 	/* below are used during iteration */
421 	struct {
422 		u8 depth;
423 		u8 depth_to_show;
424 		u8 depth_check;
425 		u8 array_member:1,
426 		   array_terminated:1;
427 		u16 array_encoding;
428 		u32 type_id;
429 		int status;			/* non-zero for error */
430 		const struct btf_type *type;
431 		const struct btf_member *member;
432 		char name[BTF_SHOW_NAME_SIZE];	/* space for member name/type */
433 	} state;
434 	struct {
435 		u32 size;
436 		void *head;
437 		void *data;
438 		u8 safe[BTF_SHOW_OBJ_SAFE_SIZE];
439 	} obj;
440 };
441 
442 struct btf_kind_operations {
443 	s32 (*check_meta)(struct btf_verifier_env *env,
444 			  const struct btf_type *t,
445 			  u32 meta_left);
446 	int (*resolve)(struct btf_verifier_env *env,
447 		       const struct resolve_vertex *v);
448 	int (*check_member)(struct btf_verifier_env *env,
449 			    const struct btf_type *struct_type,
450 			    const struct btf_member *member,
451 			    const struct btf_type *member_type);
452 	int (*check_kflag_member)(struct btf_verifier_env *env,
453 				  const struct btf_type *struct_type,
454 				  const struct btf_member *member,
455 				  const struct btf_type *member_type);
456 	void (*log_details)(struct btf_verifier_env *env,
457 			    const struct btf_type *t);
458 	void (*show)(const struct btf *btf, const struct btf_type *t,
459 			 u32 type_id, void *data, u8 bits_offsets,
460 			 struct btf_show *show);
461 };
462 
463 static const struct btf_kind_operations * const kind_ops[NR_BTF_KINDS];
464 static struct btf_type btf_void;
465 
466 static int btf_resolve(struct btf_verifier_env *env,
467 		       const struct btf_type *t, u32 type_id);
468 
469 static int btf_func_check(struct btf_verifier_env *env,
470 			  const struct btf_type *t);
471 
btf_type_is_modifier(const struct btf_type * t)472 static bool btf_type_is_modifier(const struct btf_type *t)
473 {
474 	/* Some of them is not strictly a C modifier
475 	 * but they are grouped into the same bucket
476 	 * for BTF concern:
477 	 *   A type (t) that refers to another
478 	 *   type through t->type AND its size cannot
479 	 *   be determined without following the t->type.
480 	 *
481 	 * ptr does not fall into this bucket
482 	 * because its size is always sizeof(void *).
483 	 */
484 	switch (BTF_INFO_KIND(t->info)) {
485 	case BTF_KIND_TYPEDEF:
486 	case BTF_KIND_VOLATILE:
487 	case BTF_KIND_CONST:
488 	case BTF_KIND_RESTRICT:
489 	case BTF_KIND_TYPE_TAG:
490 		return true;
491 	}
492 
493 	return false;
494 }
495 
btf_type_is_void(const struct btf_type * t)496 bool btf_type_is_void(const struct btf_type *t)
497 {
498 	return t == &btf_void;
499 }
500 
btf_type_is_datasec(const struct btf_type * t)501 static bool btf_type_is_datasec(const struct btf_type *t)
502 {
503 	return BTF_INFO_KIND(t->info) == BTF_KIND_DATASEC;
504 }
505 
btf_type_is_decl_tag(const struct btf_type * t)506 static bool btf_type_is_decl_tag(const struct btf_type *t)
507 {
508 	return BTF_INFO_KIND(t->info) == BTF_KIND_DECL_TAG;
509 }
510 
btf_type_nosize(const struct btf_type * t)511 static bool btf_type_nosize(const struct btf_type *t)
512 {
513 	return btf_type_is_void(t) || btf_type_is_fwd(t) ||
514 	       btf_type_is_func(t) || btf_type_is_func_proto(t) ||
515 	       btf_type_is_decl_tag(t);
516 }
517 
btf_type_nosize_or_null(const struct btf_type * t)518 static bool btf_type_nosize_or_null(const struct btf_type *t)
519 {
520 	return !t || btf_type_nosize(t);
521 }
522 
btf_type_is_decl_tag_target(const struct btf_type * t)523 static bool btf_type_is_decl_tag_target(const struct btf_type *t)
524 {
525 	return btf_type_is_func(t) || btf_type_is_struct(t) ||
526 	       btf_type_is_var(t) || btf_type_is_typedef(t);
527 }
528 
btf_is_vmlinux(const struct btf * btf)529 bool btf_is_vmlinux(const struct btf *btf)
530 {
531 	return btf->kernel_btf && !btf->base_btf;
532 }
533 
btf_nr_types(const struct btf * btf)534 u32 btf_nr_types(const struct btf *btf)
535 {
536 	u32 total = 0;
537 
538 	while (btf) {
539 		total += btf->nr_types;
540 		btf = btf->base_btf;
541 	}
542 
543 	return total;
544 }
545 
btf_find_by_name_kind(const struct btf * btf,const char * name,u8 kind)546 s32 btf_find_by_name_kind(const struct btf *btf, const char *name, u8 kind)
547 {
548 	const struct btf_type *t;
549 	const char *tname;
550 	u32 i, total;
551 
552 	total = btf_nr_types(btf);
553 	for (i = 1; i < total; i++) {
554 		t = btf_type_by_id(btf, i);
555 		if (BTF_INFO_KIND(t->info) != kind)
556 			continue;
557 
558 		tname = btf_name_by_offset(btf, t->name_off);
559 		if (!strcmp(tname, name))
560 			return i;
561 	}
562 
563 	return -ENOENT;
564 }
565 
bpf_find_btf_id(const char * name,u32 kind,struct btf ** btf_p)566 s32 bpf_find_btf_id(const char *name, u32 kind, struct btf **btf_p)
567 {
568 	struct btf *btf;
569 	s32 ret;
570 	int id;
571 
572 	btf = bpf_get_btf_vmlinux();
573 	if (IS_ERR(btf))
574 		return PTR_ERR(btf);
575 	if (!btf)
576 		return -EINVAL;
577 
578 	ret = btf_find_by_name_kind(btf, name, kind);
579 	/* ret is never zero, since btf_find_by_name_kind returns
580 	 * positive btf_id or negative error.
581 	 */
582 	if (ret > 0) {
583 		btf_get(btf);
584 		*btf_p = btf;
585 		return ret;
586 	}
587 
588 	/* If name is not found in vmlinux's BTF then search in module's BTFs */
589 	spin_lock_bh(&btf_idr_lock);
590 	idr_for_each_entry(&btf_idr, btf, id) {
591 		if (!btf_is_module(btf))
592 			continue;
593 		/* linear search could be slow hence unlock/lock
594 		 * the IDR to avoiding holding it for too long
595 		 */
596 		btf_get(btf);
597 		spin_unlock_bh(&btf_idr_lock);
598 		ret = btf_find_by_name_kind(btf, name, kind);
599 		if (ret > 0) {
600 			*btf_p = btf;
601 			return ret;
602 		}
603 		btf_put(btf);
604 		spin_lock_bh(&btf_idr_lock);
605 	}
606 	spin_unlock_bh(&btf_idr_lock);
607 	return ret;
608 }
609 EXPORT_SYMBOL_GPL(bpf_find_btf_id);
610 
btf_type_skip_modifiers(const struct btf * btf,u32 id,u32 * res_id)611 const struct btf_type *btf_type_skip_modifiers(const struct btf *btf,
612 					       u32 id, u32 *res_id)
613 {
614 	const struct btf_type *t = btf_type_by_id(btf, id);
615 
616 	while (btf_type_is_modifier(t)) {
617 		id = t->type;
618 		t = btf_type_by_id(btf, t->type);
619 	}
620 
621 	if (res_id)
622 		*res_id = id;
623 
624 	return t;
625 }
626 
btf_type_resolve_ptr(const struct btf * btf,u32 id,u32 * res_id)627 const struct btf_type *btf_type_resolve_ptr(const struct btf *btf,
628 					    u32 id, u32 *res_id)
629 {
630 	const struct btf_type *t;
631 
632 	t = btf_type_skip_modifiers(btf, id, NULL);
633 	if (!btf_type_is_ptr(t))
634 		return NULL;
635 
636 	return btf_type_skip_modifiers(btf, t->type, res_id);
637 }
638 
btf_type_resolve_func_ptr(const struct btf * btf,u32 id,u32 * res_id)639 const struct btf_type *btf_type_resolve_func_ptr(const struct btf *btf,
640 						 u32 id, u32 *res_id)
641 {
642 	const struct btf_type *ptype;
643 
644 	ptype = btf_type_resolve_ptr(btf, id, res_id);
645 	if (ptype && btf_type_is_func_proto(ptype))
646 		return ptype;
647 
648 	return NULL;
649 }
650 
651 /* Types that act only as a source, not sink or intermediate
652  * type when resolving.
653  */
btf_type_is_resolve_source_only(const struct btf_type * t)654 static bool btf_type_is_resolve_source_only(const struct btf_type *t)
655 {
656 	return btf_type_is_var(t) ||
657 	       btf_type_is_decl_tag(t) ||
658 	       btf_type_is_datasec(t);
659 }
660 
661 /* What types need to be resolved?
662  *
663  * btf_type_is_modifier() is an obvious one.
664  *
665  * btf_type_is_struct() because its member refers to
666  * another type (through member->type).
667  *
668  * btf_type_is_var() because the variable refers to
669  * another type. btf_type_is_datasec() holds multiple
670  * btf_type_is_var() types that need resolving.
671  *
672  * btf_type_is_array() because its element (array->type)
673  * refers to another type.  Array can be thought of a
674  * special case of struct while array just has the same
675  * member-type repeated by array->nelems of times.
676  */
btf_type_needs_resolve(const struct btf_type * t)677 static bool btf_type_needs_resolve(const struct btf_type *t)
678 {
679 	return btf_type_is_modifier(t) ||
680 	       btf_type_is_ptr(t) ||
681 	       btf_type_is_struct(t) ||
682 	       btf_type_is_array(t) ||
683 	       btf_type_is_var(t) ||
684 	       btf_type_is_func(t) ||
685 	       btf_type_is_decl_tag(t) ||
686 	       btf_type_is_datasec(t);
687 }
688 
689 /* t->size can be used */
btf_type_has_size(const struct btf_type * t)690 static bool btf_type_has_size(const struct btf_type *t)
691 {
692 	switch (BTF_INFO_KIND(t->info)) {
693 	case BTF_KIND_INT:
694 	case BTF_KIND_STRUCT:
695 	case BTF_KIND_UNION:
696 	case BTF_KIND_ENUM:
697 	case BTF_KIND_DATASEC:
698 	case BTF_KIND_FLOAT:
699 	case BTF_KIND_ENUM64:
700 		return true;
701 	}
702 
703 	return false;
704 }
705 
btf_int_encoding_str(u8 encoding)706 static const char *btf_int_encoding_str(u8 encoding)
707 {
708 	if (encoding == 0)
709 		return "(none)";
710 	else if (encoding == BTF_INT_SIGNED)
711 		return "SIGNED";
712 	else if (encoding == BTF_INT_CHAR)
713 		return "CHAR";
714 	else if (encoding == BTF_INT_BOOL)
715 		return "BOOL";
716 	else
717 		return "UNKN";
718 }
719 
btf_type_int(const struct btf_type * t)720 static u32 btf_type_int(const struct btf_type *t)
721 {
722 	return *(u32 *)(t + 1);
723 }
724 
btf_type_array(const struct btf_type * t)725 static const struct btf_array *btf_type_array(const struct btf_type *t)
726 {
727 	return (const struct btf_array *)(t + 1);
728 }
729 
btf_type_enum(const struct btf_type * t)730 static const struct btf_enum *btf_type_enum(const struct btf_type *t)
731 {
732 	return (const struct btf_enum *)(t + 1);
733 }
734 
btf_type_var(const struct btf_type * t)735 static const struct btf_var *btf_type_var(const struct btf_type *t)
736 {
737 	return (const struct btf_var *)(t + 1);
738 }
739 
btf_type_decl_tag(const struct btf_type * t)740 static const struct btf_decl_tag *btf_type_decl_tag(const struct btf_type *t)
741 {
742 	return (const struct btf_decl_tag *)(t + 1);
743 }
744 
btf_type_enum64(const struct btf_type * t)745 static const struct btf_enum64 *btf_type_enum64(const struct btf_type *t)
746 {
747 	return (const struct btf_enum64 *)(t + 1);
748 }
749 
btf_type_ops(const struct btf_type * t)750 static const struct btf_kind_operations *btf_type_ops(const struct btf_type *t)
751 {
752 	return kind_ops[BTF_INFO_KIND(t->info)];
753 }
754 
btf_name_offset_valid(const struct btf * btf,u32 offset)755 static bool btf_name_offset_valid(const struct btf *btf, u32 offset)
756 {
757 	if (!BTF_STR_OFFSET_VALID(offset))
758 		return false;
759 
760 	while (offset < btf->start_str_off)
761 		btf = btf->base_btf;
762 
763 	offset -= btf->start_str_off;
764 	return offset < btf->hdr.str_len;
765 }
766 
__btf_name_char_ok(char c,bool first)767 static bool __btf_name_char_ok(char c, bool first)
768 {
769 	if ((first ? !isalpha(c) :
770 		     !isalnum(c)) &&
771 	    c != '_' &&
772 	    c != '.')
773 		return false;
774 	return true;
775 }
776 
btf_str_by_offset(const struct btf * btf,u32 offset)777 const char *btf_str_by_offset(const struct btf *btf, u32 offset)
778 {
779 	while (offset < btf->start_str_off)
780 		btf = btf->base_btf;
781 
782 	offset -= btf->start_str_off;
783 	if (offset < btf->hdr.str_len)
784 		return &btf->strings[offset];
785 
786 	return NULL;
787 }
788 
btf_name_valid_identifier(const struct btf * btf,u32 offset)789 static bool btf_name_valid_identifier(const struct btf *btf, u32 offset)
790 {
791 	/* offset must be valid */
792 	const char *src = btf_str_by_offset(btf, offset);
793 	const char *src_limit;
794 
795 	if (!__btf_name_char_ok(*src, true))
796 		return false;
797 
798 	/* set a limit on identifier length */
799 	src_limit = src + KSYM_NAME_LEN;
800 	src++;
801 	while (*src && src < src_limit) {
802 		if (!__btf_name_char_ok(*src, false))
803 			return false;
804 		src++;
805 	}
806 
807 	return !*src;
808 }
809 
810 /* Allow any printable character in DATASEC names */
btf_name_valid_section(const struct btf * btf,u32 offset)811 static bool btf_name_valid_section(const struct btf *btf, u32 offset)
812 {
813 	/* offset must be valid */
814 	const char *src = btf_str_by_offset(btf, offset);
815 	const char *src_limit;
816 
817 	if (!*src)
818 		return false;
819 
820 	/* set a limit on identifier length */
821 	src_limit = src + KSYM_NAME_LEN;
822 	while (*src && src < src_limit) {
823 		if (!isprint(*src))
824 			return false;
825 		src++;
826 	}
827 
828 	return !*src;
829 }
830 
__btf_name_by_offset(const struct btf * btf,u32 offset)831 static const char *__btf_name_by_offset(const struct btf *btf, u32 offset)
832 {
833 	const char *name;
834 
835 	if (!offset)
836 		return "(anon)";
837 
838 	name = btf_str_by_offset(btf, offset);
839 	return name ?: "(invalid-name-offset)";
840 }
841 
btf_name_by_offset(const struct btf * btf,u32 offset)842 const char *btf_name_by_offset(const struct btf *btf, u32 offset)
843 {
844 	return btf_str_by_offset(btf, offset);
845 }
846 
btf_type_by_id(const struct btf * btf,u32 type_id)847 const struct btf_type *btf_type_by_id(const struct btf *btf, u32 type_id)
848 {
849 	while (type_id < btf->start_id)
850 		btf = btf->base_btf;
851 
852 	type_id -= btf->start_id;
853 	if (type_id >= btf->nr_types)
854 		return NULL;
855 	return btf->types[type_id];
856 }
857 EXPORT_SYMBOL_GPL(btf_type_by_id);
858 
859 /*
860  * Regular int is not a bit field and it must be either
861  * u8/u16/u32/u64 or __int128.
862  */
btf_type_int_is_regular(const struct btf_type * t)863 static bool btf_type_int_is_regular(const struct btf_type *t)
864 {
865 	u8 nr_bits, nr_bytes;
866 	u32 int_data;
867 
868 	int_data = btf_type_int(t);
869 	nr_bits = BTF_INT_BITS(int_data);
870 	nr_bytes = BITS_ROUNDUP_BYTES(nr_bits);
871 	if (BITS_PER_BYTE_MASKED(nr_bits) ||
872 	    BTF_INT_OFFSET(int_data) ||
873 	    (nr_bytes != sizeof(u8) && nr_bytes != sizeof(u16) &&
874 	     nr_bytes != sizeof(u32) && nr_bytes != sizeof(u64) &&
875 	     nr_bytes != (2 * sizeof(u64)))) {
876 		return false;
877 	}
878 
879 	return true;
880 }
881 
882 /*
883  * Check that given struct member is a regular int with expected
884  * offset and size.
885  */
btf_member_is_reg_int(const struct btf * btf,const struct btf_type * s,const struct btf_member * m,u32 expected_offset,u32 expected_size)886 bool btf_member_is_reg_int(const struct btf *btf, const struct btf_type *s,
887 			   const struct btf_member *m,
888 			   u32 expected_offset, u32 expected_size)
889 {
890 	const struct btf_type *t;
891 	u32 id, int_data;
892 	u8 nr_bits;
893 
894 	id = m->type;
895 	t = btf_type_id_size(btf, &id, NULL);
896 	if (!t || !btf_type_is_int(t))
897 		return false;
898 
899 	int_data = btf_type_int(t);
900 	nr_bits = BTF_INT_BITS(int_data);
901 	if (btf_type_kflag(s)) {
902 		u32 bitfield_size = BTF_MEMBER_BITFIELD_SIZE(m->offset);
903 		u32 bit_offset = BTF_MEMBER_BIT_OFFSET(m->offset);
904 
905 		/* if kflag set, int should be a regular int and
906 		 * bit offset should be at byte boundary.
907 		 */
908 		return !bitfield_size &&
909 		       BITS_ROUNDUP_BYTES(bit_offset) == expected_offset &&
910 		       BITS_ROUNDUP_BYTES(nr_bits) == expected_size;
911 	}
912 
913 	if (BTF_INT_OFFSET(int_data) ||
914 	    BITS_PER_BYTE_MASKED(m->offset) ||
915 	    BITS_ROUNDUP_BYTES(m->offset) != expected_offset ||
916 	    BITS_PER_BYTE_MASKED(nr_bits) ||
917 	    BITS_ROUNDUP_BYTES(nr_bits) != expected_size)
918 		return false;
919 
920 	return true;
921 }
922 
923 /* Similar to btf_type_skip_modifiers() but does not skip typedefs. */
btf_type_skip_qualifiers(const struct btf * btf,u32 id)924 static const struct btf_type *btf_type_skip_qualifiers(const struct btf *btf,
925 						       u32 id)
926 {
927 	const struct btf_type *t = btf_type_by_id(btf, id);
928 
929 	while (btf_type_is_modifier(t) &&
930 	       BTF_INFO_KIND(t->info) != BTF_KIND_TYPEDEF) {
931 		t = btf_type_by_id(btf, t->type);
932 	}
933 
934 	return t;
935 }
936 
937 #define BTF_SHOW_MAX_ITER	10
938 
939 #define BTF_KIND_BIT(kind)	(1ULL << kind)
940 
941 /*
942  * Populate show->state.name with type name information.
943  * Format of type name is
944  *
945  * [.member_name = ] (type_name)
946  */
btf_show_name(struct btf_show * show)947 static const char *btf_show_name(struct btf_show *show)
948 {
949 	/* BTF_MAX_ITER array suffixes "[]" */
950 	const char *array_suffixes = "[][][][][][][][][][]";
951 	const char *array_suffix = &array_suffixes[strlen(array_suffixes)];
952 	/* BTF_MAX_ITER pointer suffixes "*" */
953 	const char *ptr_suffixes = "**********";
954 	const char *ptr_suffix = &ptr_suffixes[strlen(ptr_suffixes)];
955 	const char *name = NULL, *prefix = "", *parens = "";
956 	const struct btf_member *m = show->state.member;
957 	const struct btf_type *t;
958 	const struct btf_array *array;
959 	u32 id = show->state.type_id;
960 	const char *member = NULL;
961 	bool show_member = false;
962 	u64 kinds = 0;
963 	int i;
964 
965 	show->state.name[0] = '\0';
966 
967 	/*
968 	 * Don't show type name if we're showing an array member;
969 	 * in that case we show the array type so don't need to repeat
970 	 * ourselves for each member.
971 	 */
972 	if (show->state.array_member)
973 		return "";
974 
975 	/* Retrieve member name, if any. */
976 	if (m) {
977 		member = btf_name_by_offset(show->btf, m->name_off);
978 		show_member = strlen(member) > 0;
979 		id = m->type;
980 	}
981 
982 	/*
983 	 * Start with type_id, as we have resolved the struct btf_type *
984 	 * via btf_modifier_show() past the parent typedef to the child
985 	 * struct, int etc it is defined as.  In such cases, the type_id
986 	 * still represents the starting type while the struct btf_type *
987 	 * in our show->state points at the resolved type of the typedef.
988 	 */
989 	t = btf_type_by_id(show->btf, id);
990 	if (!t)
991 		return "";
992 
993 	/*
994 	 * The goal here is to build up the right number of pointer and
995 	 * array suffixes while ensuring the type name for a typedef
996 	 * is represented.  Along the way we accumulate a list of
997 	 * BTF kinds we have encountered, since these will inform later
998 	 * display; for example, pointer types will not require an
999 	 * opening "{" for struct, we will just display the pointer value.
1000 	 *
1001 	 * We also want to accumulate the right number of pointer or array
1002 	 * indices in the format string while iterating until we get to
1003 	 * the typedef/pointee/array member target type.
1004 	 *
1005 	 * We start by pointing at the end of pointer and array suffix
1006 	 * strings; as we accumulate pointers and arrays we move the pointer
1007 	 * or array string backwards so it will show the expected number of
1008 	 * '*' or '[]' for the type.  BTF_SHOW_MAX_ITER of nesting of pointers
1009 	 * and/or arrays and typedefs are supported as a precaution.
1010 	 *
1011 	 * We also want to get typedef name while proceeding to resolve
1012 	 * type it points to so that we can add parentheses if it is a
1013 	 * "typedef struct" etc.
1014 	 */
1015 	for (i = 0; i < BTF_SHOW_MAX_ITER; i++) {
1016 
1017 		switch (BTF_INFO_KIND(t->info)) {
1018 		case BTF_KIND_TYPEDEF:
1019 			if (!name)
1020 				name = btf_name_by_offset(show->btf,
1021 							       t->name_off);
1022 			kinds |= BTF_KIND_BIT(BTF_KIND_TYPEDEF);
1023 			id = t->type;
1024 			break;
1025 		case BTF_KIND_ARRAY:
1026 			kinds |= BTF_KIND_BIT(BTF_KIND_ARRAY);
1027 			parens = "[";
1028 			if (!t)
1029 				return "";
1030 			array = btf_type_array(t);
1031 			if (array_suffix > array_suffixes)
1032 				array_suffix -= 2;
1033 			id = array->type;
1034 			break;
1035 		case BTF_KIND_PTR:
1036 			kinds |= BTF_KIND_BIT(BTF_KIND_PTR);
1037 			if (ptr_suffix > ptr_suffixes)
1038 				ptr_suffix -= 1;
1039 			id = t->type;
1040 			break;
1041 		default:
1042 			id = 0;
1043 			break;
1044 		}
1045 		if (!id)
1046 			break;
1047 		t = btf_type_skip_qualifiers(show->btf, id);
1048 	}
1049 	/* We may not be able to represent this type; bail to be safe */
1050 	if (i == BTF_SHOW_MAX_ITER)
1051 		return "";
1052 
1053 	if (!name)
1054 		name = btf_name_by_offset(show->btf, t->name_off);
1055 
1056 	switch (BTF_INFO_KIND(t->info)) {
1057 	case BTF_KIND_STRUCT:
1058 	case BTF_KIND_UNION:
1059 		prefix = BTF_INFO_KIND(t->info) == BTF_KIND_STRUCT ?
1060 			 "struct" : "union";
1061 		/* if it's an array of struct/union, parens is already set */
1062 		if (!(kinds & (BTF_KIND_BIT(BTF_KIND_ARRAY))))
1063 			parens = "{";
1064 		break;
1065 	case BTF_KIND_ENUM:
1066 	case BTF_KIND_ENUM64:
1067 		prefix = "enum";
1068 		break;
1069 	default:
1070 		break;
1071 	}
1072 
1073 	/* pointer does not require parens */
1074 	if (kinds & BTF_KIND_BIT(BTF_KIND_PTR))
1075 		parens = "";
1076 	/* typedef does not require struct/union/enum prefix */
1077 	if (kinds & BTF_KIND_BIT(BTF_KIND_TYPEDEF))
1078 		prefix = "";
1079 
1080 	if (!name)
1081 		name = "";
1082 
1083 	/* Even if we don't want type name info, we want parentheses etc */
1084 	if (show->flags & BTF_SHOW_NONAME)
1085 		snprintf(show->state.name, sizeof(show->state.name), "%s",
1086 			 parens);
1087 	else
1088 		snprintf(show->state.name, sizeof(show->state.name),
1089 			 "%s%s%s(%s%s%s%s%s%s)%s",
1090 			 /* first 3 strings comprise ".member = " */
1091 			 show_member ? "." : "",
1092 			 show_member ? member : "",
1093 			 show_member ? " = " : "",
1094 			 /* ...next is our prefix (struct, enum, etc) */
1095 			 prefix,
1096 			 strlen(prefix) > 0 && strlen(name) > 0 ? " " : "",
1097 			 /* ...this is the type name itself */
1098 			 name,
1099 			 /* ...suffixed by the appropriate '*', '[]' suffixes */
1100 			 strlen(ptr_suffix) > 0 ? " " : "", ptr_suffix,
1101 			 array_suffix, parens);
1102 
1103 	return show->state.name;
1104 }
1105 
__btf_show_indent(struct btf_show * show)1106 static const char *__btf_show_indent(struct btf_show *show)
1107 {
1108 	const char *indents = "                                ";
1109 	const char *indent = &indents[strlen(indents)];
1110 
1111 	if ((indent - show->state.depth) >= indents)
1112 		return indent - show->state.depth;
1113 	return indents;
1114 }
1115 
btf_show_indent(struct btf_show * show)1116 static const char *btf_show_indent(struct btf_show *show)
1117 {
1118 	return show->flags & BTF_SHOW_COMPACT ? "" : __btf_show_indent(show);
1119 }
1120 
btf_show_newline(struct btf_show * show)1121 static const char *btf_show_newline(struct btf_show *show)
1122 {
1123 	return show->flags & BTF_SHOW_COMPACT ? "" : "\n";
1124 }
1125 
btf_show_delim(struct btf_show * show)1126 static const char *btf_show_delim(struct btf_show *show)
1127 {
1128 	if (show->state.depth == 0)
1129 		return "";
1130 
1131 	if ((show->flags & BTF_SHOW_COMPACT) && show->state.type &&
1132 		BTF_INFO_KIND(show->state.type->info) == BTF_KIND_UNION)
1133 		return "|";
1134 
1135 	return ",";
1136 }
1137 
btf_show(struct btf_show * show,const char * fmt,...)1138 __printf(2, 3) static void btf_show(struct btf_show *show, const char *fmt, ...)
1139 {
1140 	va_list args;
1141 
1142 	if (!show->state.depth_check) {
1143 		va_start(args, fmt);
1144 		show->showfn(show, fmt, args);
1145 		va_end(args);
1146 	}
1147 }
1148 
1149 /* Macros are used here as btf_show_type_value[s]() prepends and appends
1150  * format specifiers to the format specifier passed in; these do the work of
1151  * adding indentation, delimiters etc while the caller simply has to specify
1152  * the type value(s) in the format specifier + value(s).
1153  */
1154 #define btf_show_type_value(show, fmt, value)				       \
1155 	do {								       \
1156 		if ((value) != (__typeof__(value))0 ||			       \
1157 		    (show->flags & BTF_SHOW_ZERO) ||			       \
1158 		    show->state.depth == 0) {				       \
1159 			btf_show(show, "%s%s" fmt "%s%s",		       \
1160 				 btf_show_indent(show),			       \
1161 				 btf_show_name(show),			       \
1162 				 value, btf_show_delim(show),		       \
1163 				 btf_show_newline(show));		       \
1164 			if (show->state.depth > show->state.depth_to_show)     \
1165 				show->state.depth_to_show = show->state.depth; \
1166 		}							       \
1167 	} while (0)
1168 
1169 #define btf_show_type_values(show, fmt, ...)				       \
1170 	do {								       \
1171 		btf_show(show, "%s%s" fmt "%s%s", btf_show_indent(show),       \
1172 			 btf_show_name(show),				       \
1173 			 __VA_ARGS__, btf_show_delim(show),		       \
1174 			 btf_show_newline(show));			       \
1175 		if (show->state.depth > show->state.depth_to_show)	       \
1176 			show->state.depth_to_show = show->state.depth;	       \
1177 	} while (0)
1178 
1179 /* How much is left to copy to safe buffer after @data? */
btf_show_obj_size_left(struct btf_show * show,void * data)1180 static int btf_show_obj_size_left(struct btf_show *show, void *data)
1181 {
1182 	return show->obj.head + show->obj.size - data;
1183 }
1184 
1185 /* Is object pointed to by @data of @size already copied to our safe buffer? */
btf_show_obj_is_safe(struct btf_show * show,void * data,int size)1186 static bool btf_show_obj_is_safe(struct btf_show *show, void *data, int size)
1187 {
1188 	return data >= show->obj.data &&
1189 	       (data + size) < (show->obj.data + BTF_SHOW_OBJ_SAFE_SIZE);
1190 }
1191 
1192 /*
1193  * If object pointed to by @data of @size falls within our safe buffer, return
1194  * the equivalent pointer to the same safe data.  Assumes
1195  * copy_from_kernel_nofault() has already happened and our safe buffer is
1196  * populated.
1197  */
__btf_show_obj_safe(struct btf_show * show,void * data,int size)1198 static void *__btf_show_obj_safe(struct btf_show *show, void *data, int size)
1199 {
1200 	if (btf_show_obj_is_safe(show, data, size))
1201 		return show->obj.safe + (data - show->obj.data);
1202 	return NULL;
1203 }
1204 
1205 /*
1206  * Return a safe-to-access version of data pointed to by @data.
1207  * We do this by copying the relevant amount of information
1208  * to the struct btf_show obj.safe buffer using copy_from_kernel_nofault().
1209  *
1210  * If BTF_SHOW_UNSAFE is specified, just return data as-is; no
1211  * safe copy is needed.
1212  *
1213  * Otherwise we need to determine if we have the required amount
1214  * of data (determined by the @data pointer and the size of the
1215  * largest base type we can encounter (represented by
1216  * BTF_SHOW_OBJ_BASE_TYPE_SIZE). Having that much data ensures
1217  * that we will be able to print some of the current object,
1218  * and if more is needed a copy will be triggered.
1219  * Some objects such as structs will not fit into the buffer;
1220  * in such cases additional copies when we iterate over their
1221  * members may be needed.
1222  *
1223  * btf_show_obj_safe() is used to return a safe buffer for
1224  * btf_show_start_type(); this ensures that as we recurse into
1225  * nested types we always have safe data for the given type.
1226  * This approach is somewhat wasteful; it's possible for example
1227  * that when iterating over a large union we'll end up copying the
1228  * same data repeatedly, but the goal is safety not performance.
1229  * We use stack data as opposed to per-CPU buffers because the
1230  * iteration over a type can take some time, and preemption handling
1231  * would greatly complicate use of the safe buffer.
1232  */
btf_show_obj_safe(struct btf_show * show,const struct btf_type * t,void * data)1233 static void *btf_show_obj_safe(struct btf_show *show,
1234 			       const struct btf_type *t,
1235 			       void *data)
1236 {
1237 	const struct btf_type *rt;
1238 	int size_left, size;
1239 	void *safe = NULL;
1240 
1241 	if (show->flags & BTF_SHOW_UNSAFE)
1242 		return data;
1243 
1244 	rt = btf_resolve_size(show->btf, t, &size);
1245 	if (IS_ERR(rt)) {
1246 		show->state.status = PTR_ERR(rt);
1247 		return NULL;
1248 	}
1249 
1250 	/*
1251 	 * Is this toplevel object? If so, set total object size and
1252 	 * initialize pointers.  Otherwise check if we still fall within
1253 	 * our safe object data.
1254 	 */
1255 	if (show->state.depth == 0) {
1256 		show->obj.size = size;
1257 		show->obj.head = data;
1258 	} else {
1259 		/*
1260 		 * If the size of the current object is > our remaining
1261 		 * safe buffer we _may_ need to do a new copy.  However
1262 		 * consider the case of a nested struct; it's size pushes
1263 		 * us over the safe buffer limit, but showing any individual
1264 		 * struct members does not.  In such cases, we don't need
1265 		 * to initiate a fresh copy yet; however we definitely need
1266 		 * at least BTF_SHOW_OBJ_BASE_TYPE_SIZE bytes left
1267 		 * in our buffer, regardless of the current object size.
1268 		 * The logic here is that as we resolve types we will
1269 		 * hit a base type at some point, and we need to be sure
1270 		 * the next chunk of data is safely available to display
1271 		 * that type info safely.  We cannot rely on the size of
1272 		 * the current object here because it may be much larger
1273 		 * than our current buffer (e.g. task_struct is 8k).
1274 		 * All we want to do here is ensure that we can print the
1275 		 * next basic type, which we can if either
1276 		 * - the current type size is within the safe buffer; or
1277 		 * - at least BTF_SHOW_OBJ_BASE_TYPE_SIZE bytes are left in
1278 		 *   the safe buffer.
1279 		 */
1280 		safe = __btf_show_obj_safe(show, data,
1281 					   min(size,
1282 					       BTF_SHOW_OBJ_BASE_TYPE_SIZE));
1283 	}
1284 
1285 	/*
1286 	 * We need a new copy to our safe object, either because we haven't
1287 	 * yet copied and are initializing safe data, or because the data
1288 	 * we want falls outside the boundaries of the safe object.
1289 	 */
1290 	if (!safe) {
1291 		size_left = btf_show_obj_size_left(show, data);
1292 		if (size_left > BTF_SHOW_OBJ_SAFE_SIZE)
1293 			size_left = BTF_SHOW_OBJ_SAFE_SIZE;
1294 		show->state.status = copy_from_kernel_nofault(show->obj.safe,
1295 							      data, size_left);
1296 		if (!show->state.status) {
1297 			show->obj.data = data;
1298 			safe = show->obj.safe;
1299 		}
1300 	}
1301 
1302 	return safe;
1303 }
1304 
1305 /*
1306  * Set the type we are starting to show and return a safe data pointer
1307  * to be used for showing the associated data.
1308  */
btf_show_start_type(struct btf_show * show,const struct btf_type * t,u32 type_id,void * data)1309 static void *btf_show_start_type(struct btf_show *show,
1310 				 const struct btf_type *t,
1311 				 u32 type_id, void *data)
1312 {
1313 	show->state.type = t;
1314 	show->state.type_id = type_id;
1315 	show->state.name[0] = '\0';
1316 
1317 	return btf_show_obj_safe(show, t, data);
1318 }
1319 
btf_show_end_type(struct btf_show * show)1320 static void btf_show_end_type(struct btf_show *show)
1321 {
1322 	show->state.type = NULL;
1323 	show->state.type_id = 0;
1324 	show->state.name[0] = '\0';
1325 }
1326 
btf_show_start_aggr_type(struct btf_show * show,const struct btf_type * t,u32 type_id,void * data)1327 static void *btf_show_start_aggr_type(struct btf_show *show,
1328 				      const struct btf_type *t,
1329 				      u32 type_id, void *data)
1330 {
1331 	void *safe_data = btf_show_start_type(show, t, type_id, data);
1332 
1333 	if (!safe_data)
1334 		return safe_data;
1335 
1336 	btf_show(show, "%s%s%s", btf_show_indent(show),
1337 		 btf_show_name(show),
1338 		 btf_show_newline(show));
1339 	show->state.depth++;
1340 	return safe_data;
1341 }
1342 
btf_show_end_aggr_type(struct btf_show * show,const char * suffix)1343 static void btf_show_end_aggr_type(struct btf_show *show,
1344 				   const char *suffix)
1345 {
1346 	show->state.depth--;
1347 	btf_show(show, "%s%s%s%s", btf_show_indent(show), suffix,
1348 		 btf_show_delim(show), btf_show_newline(show));
1349 	btf_show_end_type(show);
1350 }
1351 
btf_show_start_member(struct btf_show * show,const struct btf_member * m)1352 static void btf_show_start_member(struct btf_show *show,
1353 				  const struct btf_member *m)
1354 {
1355 	show->state.member = m;
1356 }
1357 
btf_show_start_array_member(struct btf_show * show)1358 static void btf_show_start_array_member(struct btf_show *show)
1359 {
1360 	show->state.array_member = 1;
1361 	btf_show_start_member(show, NULL);
1362 }
1363 
btf_show_end_member(struct btf_show * show)1364 static void btf_show_end_member(struct btf_show *show)
1365 {
1366 	show->state.member = NULL;
1367 }
1368 
btf_show_end_array_member(struct btf_show * show)1369 static void btf_show_end_array_member(struct btf_show *show)
1370 {
1371 	show->state.array_member = 0;
1372 	btf_show_end_member(show);
1373 }
1374 
btf_show_start_array_type(struct btf_show * show,const struct btf_type * t,u32 type_id,u16 array_encoding,void * data)1375 static void *btf_show_start_array_type(struct btf_show *show,
1376 				       const struct btf_type *t,
1377 				       u32 type_id,
1378 				       u16 array_encoding,
1379 				       void *data)
1380 {
1381 	show->state.array_encoding = array_encoding;
1382 	show->state.array_terminated = 0;
1383 	return btf_show_start_aggr_type(show, t, type_id, data);
1384 }
1385 
btf_show_end_array_type(struct btf_show * show)1386 static void btf_show_end_array_type(struct btf_show *show)
1387 {
1388 	show->state.array_encoding = 0;
1389 	show->state.array_terminated = 0;
1390 	btf_show_end_aggr_type(show, "]");
1391 }
1392 
btf_show_start_struct_type(struct btf_show * show,const struct btf_type * t,u32 type_id,void * data)1393 static void *btf_show_start_struct_type(struct btf_show *show,
1394 					const struct btf_type *t,
1395 					u32 type_id,
1396 					void *data)
1397 {
1398 	return btf_show_start_aggr_type(show, t, type_id, data);
1399 }
1400 
btf_show_end_struct_type(struct btf_show * show)1401 static void btf_show_end_struct_type(struct btf_show *show)
1402 {
1403 	btf_show_end_aggr_type(show, "}");
1404 }
1405 
__btf_verifier_log(struct bpf_verifier_log * log,const char * fmt,...)1406 __printf(2, 3) static void __btf_verifier_log(struct bpf_verifier_log *log,
1407 					      const char *fmt, ...)
1408 {
1409 	va_list args;
1410 
1411 	va_start(args, fmt);
1412 	bpf_verifier_vlog(log, fmt, args);
1413 	va_end(args);
1414 }
1415 
btf_verifier_log(struct btf_verifier_env * env,const char * fmt,...)1416 __printf(2, 3) static void btf_verifier_log(struct btf_verifier_env *env,
1417 					    const char *fmt, ...)
1418 {
1419 	struct bpf_verifier_log *log = &env->log;
1420 	va_list args;
1421 
1422 	if (!bpf_verifier_log_needed(log))
1423 		return;
1424 
1425 	va_start(args, fmt);
1426 	bpf_verifier_vlog(log, fmt, args);
1427 	va_end(args);
1428 }
1429 
__btf_verifier_log_type(struct btf_verifier_env * env,const struct btf_type * t,bool log_details,const char * fmt,...)1430 __printf(4, 5) static void __btf_verifier_log_type(struct btf_verifier_env *env,
1431 						   const struct btf_type *t,
1432 						   bool log_details,
1433 						   const char *fmt, ...)
1434 {
1435 	struct bpf_verifier_log *log = &env->log;
1436 	struct btf *btf = env->btf;
1437 	va_list args;
1438 
1439 	if (!bpf_verifier_log_needed(log))
1440 		return;
1441 
1442 	if (log->level == BPF_LOG_KERNEL) {
1443 		/* btf verifier prints all types it is processing via
1444 		 * btf_verifier_log_type(..., fmt = NULL).
1445 		 * Skip those prints for in-kernel BTF verification.
1446 		 */
1447 		if (!fmt)
1448 			return;
1449 
1450 		/* Skip logging when loading module BTF with mismatches permitted */
1451 		if (env->btf->base_btf && IS_ENABLED(CONFIG_MODULE_ALLOW_BTF_MISMATCH))
1452 			return;
1453 	}
1454 
1455 	__btf_verifier_log(log, "[%u] %s %s%s",
1456 			   env->log_type_id,
1457 			   btf_type_str(t),
1458 			   __btf_name_by_offset(btf, t->name_off),
1459 			   log_details ? " " : "");
1460 
1461 	if (log_details)
1462 		btf_type_ops(t)->log_details(env, t);
1463 
1464 	if (fmt && *fmt) {
1465 		__btf_verifier_log(log, " ");
1466 		va_start(args, fmt);
1467 		bpf_verifier_vlog(log, fmt, args);
1468 		va_end(args);
1469 	}
1470 
1471 	__btf_verifier_log(log, "\n");
1472 }
1473 
1474 #define btf_verifier_log_type(env, t, ...) \
1475 	__btf_verifier_log_type((env), (t), true, __VA_ARGS__)
1476 #define btf_verifier_log_basic(env, t, ...) \
1477 	__btf_verifier_log_type((env), (t), false, __VA_ARGS__)
1478 
1479 __printf(4, 5)
btf_verifier_log_member(struct btf_verifier_env * env,const struct btf_type * struct_type,const struct btf_member * member,const char * fmt,...)1480 static void btf_verifier_log_member(struct btf_verifier_env *env,
1481 				    const struct btf_type *struct_type,
1482 				    const struct btf_member *member,
1483 				    const char *fmt, ...)
1484 {
1485 	struct bpf_verifier_log *log = &env->log;
1486 	struct btf *btf = env->btf;
1487 	va_list args;
1488 
1489 	if (!bpf_verifier_log_needed(log))
1490 		return;
1491 
1492 	if (log->level == BPF_LOG_KERNEL) {
1493 		if (!fmt)
1494 			return;
1495 
1496 		/* Skip logging when loading module BTF with mismatches permitted */
1497 		if (env->btf->base_btf && IS_ENABLED(CONFIG_MODULE_ALLOW_BTF_MISMATCH))
1498 			return;
1499 	}
1500 
1501 	/* The CHECK_META phase already did a btf dump.
1502 	 *
1503 	 * If member is logged again, it must hit an error in
1504 	 * parsing this member.  It is useful to print out which
1505 	 * struct this member belongs to.
1506 	 */
1507 	if (env->phase != CHECK_META)
1508 		btf_verifier_log_type(env, struct_type, NULL);
1509 
1510 	if (btf_type_kflag(struct_type))
1511 		__btf_verifier_log(log,
1512 				   "\t%s type_id=%u bitfield_size=%u bits_offset=%u",
1513 				   __btf_name_by_offset(btf, member->name_off),
1514 				   member->type,
1515 				   BTF_MEMBER_BITFIELD_SIZE(member->offset),
1516 				   BTF_MEMBER_BIT_OFFSET(member->offset));
1517 	else
1518 		__btf_verifier_log(log, "\t%s type_id=%u bits_offset=%u",
1519 				   __btf_name_by_offset(btf, member->name_off),
1520 				   member->type, member->offset);
1521 
1522 	if (fmt && *fmt) {
1523 		__btf_verifier_log(log, " ");
1524 		va_start(args, fmt);
1525 		bpf_verifier_vlog(log, fmt, args);
1526 		va_end(args);
1527 	}
1528 
1529 	__btf_verifier_log(log, "\n");
1530 }
1531 
1532 __printf(4, 5)
btf_verifier_log_vsi(struct btf_verifier_env * env,const struct btf_type * datasec_type,const struct btf_var_secinfo * vsi,const char * fmt,...)1533 static void btf_verifier_log_vsi(struct btf_verifier_env *env,
1534 				 const struct btf_type *datasec_type,
1535 				 const struct btf_var_secinfo *vsi,
1536 				 const char *fmt, ...)
1537 {
1538 	struct bpf_verifier_log *log = &env->log;
1539 	va_list args;
1540 
1541 	if (!bpf_verifier_log_needed(log))
1542 		return;
1543 	if (log->level == BPF_LOG_KERNEL && !fmt)
1544 		return;
1545 	if (env->phase != CHECK_META)
1546 		btf_verifier_log_type(env, datasec_type, NULL);
1547 
1548 	__btf_verifier_log(log, "\t type_id=%u offset=%u size=%u",
1549 			   vsi->type, vsi->offset, vsi->size);
1550 	if (fmt && *fmt) {
1551 		__btf_verifier_log(log, " ");
1552 		va_start(args, fmt);
1553 		bpf_verifier_vlog(log, fmt, args);
1554 		va_end(args);
1555 	}
1556 
1557 	__btf_verifier_log(log, "\n");
1558 }
1559 
btf_verifier_log_hdr(struct btf_verifier_env * env,u32 btf_data_size)1560 static void btf_verifier_log_hdr(struct btf_verifier_env *env,
1561 				 u32 btf_data_size)
1562 {
1563 	struct bpf_verifier_log *log = &env->log;
1564 	const struct btf *btf = env->btf;
1565 	const struct btf_header *hdr;
1566 
1567 	if (!bpf_verifier_log_needed(log))
1568 		return;
1569 
1570 	if (log->level == BPF_LOG_KERNEL)
1571 		return;
1572 	hdr = &btf->hdr;
1573 	__btf_verifier_log(log, "magic: 0x%x\n", hdr->magic);
1574 	__btf_verifier_log(log, "version: %u\n", hdr->version);
1575 	__btf_verifier_log(log, "flags: 0x%x\n", hdr->flags);
1576 	__btf_verifier_log(log, "hdr_len: %u\n", hdr->hdr_len);
1577 	__btf_verifier_log(log, "type_off: %u\n", hdr->type_off);
1578 	__btf_verifier_log(log, "type_len: %u\n", hdr->type_len);
1579 	__btf_verifier_log(log, "str_off: %u\n", hdr->str_off);
1580 	__btf_verifier_log(log, "str_len: %u\n", hdr->str_len);
1581 	__btf_verifier_log(log, "btf_total_size: %u\n", btf_data_size);
1582 }
1583 
btf_add_type(struct btf_verifier_env * env,struct btf_type * t)1584 static int btf_add_type(struct btf_verifier_env *env, struct btf_type *t)
1585 {
1586 	struct btf *btf = env->btf;
1587 
1588 	if (btf->types_size == btf->nr_types) {
1589 		/* Expand 'types' array */
1590 
1591 		struct btf_type **new_types;
1592 		u32 expand_by, new_size;
1593 
1594 		if (btf->start_id + btf->types_size == BTF_MAX_TYPE) {
1595 			btf_verifier_log(env, "Exceeded max num of types");
1596 			return -E2BIG;
1597 		}
1598 
1599 		expand_by = max_t(u32, btf->types_size >> 2, 16);
1600 		new_size = min_t(u32, BTF_MAX_TYPE,
1601 				 btf->types_size + expand_by);
1602 
1603 		new_types = kvcalloc(new_size, sizeof(*new_types),
1604 				     GFP_KERNEL | __GFP_NOWARN);
1605 		if (!new_types)
1606 			return -ENOMEM;
1607 
1608 		if (btf->nr_types == 0) {
1609 			if (!btf->base_btf) {
1610 				/* lazily init VOID type */
1611 				new_types[0] = &btf_void;
1612 				btf->nr_types++;
1613 			}
1614 		} else {
1615 			memcpy(new_types, btf->types,
1616 			       sizeof(*btf->types) * btf->nr_types);
1617 		}
1618 
1619 		kvfree(btf->types);
1620 		btf->types = new_types;
1621 		btf->types_size = new_size;
1622 	}
1623 
1624 	btf->types[btf->nr_types++] = t;
1625 
1626 	return 0;
1627 }
1628 
btf_alloc_id(struct btf * btf)1629 static int btf_alloc_id(struct btf *btf)
1630 {
1631 	int id;
1632 
1633 	idr_preload(GFP_KERNEL);
1634 	spin_lock_bh(&btf_idr_lock);
1635 	id = idr_alloc_cyclic(&btf_idr, btf, 1, INT_MAX, GFP_ATOMIC);
1636 	if (id > 0)
1637 		btf->id = id;
1638 	spin_unlock_bh(&btf_idr_lock);
1639 	idr_preload_end();
1640 
1641 	if (WARN_ON_ONCE(!id))
1642 		return -ENOSPC;
1643 
1644 	return id > 0 ? 0 : id;
1645 }
1646 
btf_free_id(struct btf * btf)1647 static void btf_free_id(struct btf *btf)
1648 {
1649 	unsigned long flags;
1650 
1651 	/*
1652 	 * In map-in-map, calling map_delete_elem() on outer
1653 	 * map will call bpf_map_put on the inner map.
1654 	 * It will then eventually call btf_free_id()
1655 	 * on the inner map.  Some of the map_delete_elem()
1656 	 * implementation may have irq disabled, so
1657 	 * we need to use the _irqsave() version instead
1658 	 * of the _bh() version.
1659 	 */
1660 	spin_lock_irqsave(&btf_idr_lock, flags);
1661 	idr_remove(&btf_idr, btf->id);
1662 	spin_unlock_irqrestore(&btf_idr_lock, flags);
1663 }
1664 
btf_free_kfunc_set_tab(struct btf * btf)1665 static void btf_free_kfunc_set_tab(struct btf *btf)
1666 {
1667 	struct btf_kfunc_set_tab *tab = btf->kfunc_set_tab;
1668 	int hook;
1669 
1670 	if (!tab)
1671 		return;
1672 	for (hook = 0; hook < ARRAY_SIZE(tab->sets); hook++)
1673 		kfree(tab->sets[hook]);
1674 	kfree(tab);
1675 	btf->kfunc_set_tab = NULL;
1676 }
1677 
btf_free_dtor_kfunc_tab(struct btf * btf)1678 static void btf_free_dtor_kfunc_tab(struct btf *btf)
1679 {
1680 	struct btf_id_dtor_kfunc_tab *tab = btf->dtor_kfunc_tab;
1681 
1682 	if (!tab)
1683 		return;
1684 	kfree(tab);
1685 	btf->dtor_kfunc_tab = NULL;
1686 }
1687 
btf_struct_metas_free(struct btf_struct_metas * tab)1688 static void btf_struct_metas_free(struct btf_struct_metas *tab)
1689 {
1690 	int i;
1691 
1692 	if (!tab)
1693 		return;
1694 	for (i = 0; i < tab->cnt; i++)
1695 		btf_record_free(tab->types[i].record);
1696 	kfree(tab);
1697 }
1698 
btf_free_struct_meta_tab(struct btf * btf)1699 static void btf_free_struct_meta_tab(struct btf *btf)
1700 {
1701 	struct btf_struct_metas *tab = btf->struct_meta_tab;
1702 
1703 	btf_struct_metas_free(tab);
1704 	btf->struct_meta_tab = NULL;
1705 }
1706 
btf_free_struct_ops_tab(struct btf * btf)1707 static void btf_free_struct_ops_tab(struct btf *btf)
1708 {
1709 	struct btf_struct_ops_tab *tab = btf->struct_ops_tab;
1710 	u32 i;
1711 
1712 	if (!tab)
1713 		return;
1714 
1715 	for (i = 0; i < tab->cnt; i++)
1716 		bpf_struct_ops_desc_release(&tab->ops[i]);
1717 
1718 	kfree(tab);
1719 	btf->struct_ops_tab = NULL;
1720 }
1721 
btf_free(struct btf * btf)1722 static void btf_free(struct btf *btf)
1723 {
1724 	btf_free_struct_meta_tab(btf);
1725 	btf_free_dtor_kfunc_tab(btf);
1726 	btf_free_kfunc_set_tab(btf);
1727 	btf_free_struct_ops_tab(btf);
1728 	kvfree(btf->types);
1729 	kvfree(btf->resolved_sizes);
1730 	kvfree(btf->resolved_ids);
1731 	/* vmlinux does not allocate btf->data, it simply points it at
1732 	 * __start_BTF.
1733 	 */
1734 	if (!btf_is_vmlinux(btf))
1735 		kvfree(btf->data);
1736 	kvfree(btf->base_id_map);
1737 	kfree(btf);
1738 }
1739 
btf_free_rcu(struct rcu_head * rcu)1740 static void btf_free_rcu(struct rcu_head *rcu)
1741 {
1742 	struct btf *btf = container_of(rcu, struct btf, rcu);
1743 
1744 	btf_free(btf);
1745 }
1746 
btf_get_name(const struct btf * btf)1747 const char *btf_get_name(const struct btf *btf)
1748 {
1749 	return btf->name;
1750 }
1751 
btf_get(struct btf * btf)1752 void btf_get(struct btf *btf)
1753 {
1754 	refcount_inc(&btf->refcnt);
1755 }
1756 
btf_put(struct btf * btf)1757 void btf_put(struct btf *btf)
1758 {
1759 	if (btf && refcount_dec_and_test(&btf->refcnt)) {
1760 		btf_free_id(btf);
1761 		call_rcu(&btf->rcu, btf_free_rcu);
1762 	}
1763 }
1764 
btf_base_btf(const struct btf * btf)1765 struct btf *btf_base_btf(const struct btf *btf)
1766 {
1767 	return btf->base_btf;
1768 }
1769 
btf_header(const struct btf * btf)1770 const struct btf_header *btf_header(const struct btf *btf)
1771 {
1772 	return &btf->hdr;
1773 }
1774 
btf_set_base_btf(struct btf * btf,const struct btf * base_btf)1775 void btf_set_base_btf(struct btf *btf, const struct btf *base_btf)
1776 {
1777 	btf->base_btf = (struct btf *)base_btf;
1778 	btf->start_id = btf_nr_types(base_btf);
1779 	btf->start_str_off = base_btf->hdr.str_len;
1780 }
1781 
env_resolve_init(struct btf_verifier_env * env)1782 static int env_resolve_init(struct btf_verifier_env *env)
1783 {
1784 	struct btf *btf = env->btf;
1785 	u32 nr_types = btf->nr_types;
1786 	u32 *resolved_sizes = NULL;
1787 	u32 *resolved_ids = NULL;
1788 	u8 *visit_states = NULL;
1789 
1790 	resolved_sizes = kvcalloc(nr_types, sizeof(*resolved_sizes),
1791 				  GFP_KERNEL | __GFP_NOWARN);
1792 	if (!resolved_sizes)
1793 		goto nomem;
1794 
1795 	resolved_ids = kvcalloc(nr_types, sizeof(*resolved_ids),
1796 				GFP_KERNEL | __GFP_NOWARN);
1797 	if (!resolved_ids)
1798 		goto nomem;
1799 
1800 	visit_states = kvcalloc(nr_types, sizeof(*visit_states),
1801 				GFP_KERNEL | __GFP_NOWARN);
1802 	if (!visit_states)
1803 		goto nomem;
1804 
1805 	btf->resolved_sizes = resolved_sizes;
1806 	btf->resolved_ids = resolved_ids;
1807 	env->visit_states = visit_states;
1808 
1809 	return 0;
1810 
1811 nomem:
1812 	kvfree(resolved_sizes);
1813 	kvfree(resolved_ids);
1814 	kvfree(visit_states);
1815 	return -ENOMEM;
1816 }
1817 
btf_verifier_env_free(struct btf_verifier_env * env)1818 static void btf_verifier_env_free(struct btf_verifier_env *env)
1819 {
1820 	kvfree(env->visit_states);
1821 	kfree(env);
1822 }
1823 
env_type_is_resolve_sink(const struct btf_verifier_env * env,const struct btf_type * next_type)1824 static bool env_type_is_resolve_sink(const struct btf_verifier_env *env,
1825 				     const struct btf_type *next_type)
1826 {
1827 	switch (env->resolve_mode) {
1828 	case RESOLVE_TBD:
1829 		/* int, enum or void is a sink */
1830 		return !btf_type_needs_resolve(next_type);
1831 	case RESOLVE_PTR:
1832 		/* int, enum, void, struct, array, func or func_proto is a sink
1833 		 * for ptr
1834 		 */
1835 		return !btf_type_is_modifier(next_type) &&
1836 			!btf_type_is_ptr(next_type);
1837 	case RESOLVE_STRUCT_OR_ARRAY:
1838 		/* int, enum, void, ptr, func or func_proto is a sink
1839 		 * for struct and array
1840 		 */
1841 		return !btf_type_is_modifier(next_type) &&
1842 			!btf_type_is_array(next_type) &&
1843 			!btf_type_is_struct(next_type);
1844 	default:
1845 		BUG();
1846 	}
1847 }
1848 
env_type_is_resolved(const struct btf_verifier_env * env,u32 type_id)1849 static bool env_type_is_resolved(const struct btf_verifier_env *env,
1850 				 u32 type_id)
1851 {
1852 	/* base BTF types should be resolved by now */
1853 	if (type_id < env->btf->start_id)
1854 		return true;
1855 
1856 	return env->visit_states[type_id - env->btf->start_id] == RESOLVED;
1857 }
1858 
env_stack_push(struct btf_verifier_env * env,const struct btf_type * t,u32 type_id)1859 static int env_stack_push(struct btf_verifier_env *env,
1860 			  const struct btf_type *t, u32 type_id)
1861 {
1862 	const struct btf *btf = env->btf;
1863 	struct resolve_vertex *v;
1864 
1865 	if (env->top_stack == MAX_RESOLVE_DEPTH)
1866 		return -E2BIG;
1867 
1868 	if (type_id < btf->start_id
1869 	    || env->visit_states[type_id - btf->start_id] != NOT_VISITED)
1870 		return -EEXIST;
1871 
1872 	env->visit_states[type_id - btf->start_id] = VISITED;
1873 
1874 	v = &env->stack[env->top_stack++];
1875 	v->t = t;
1876 	v->type_id = type_id;
1877 	v->next_member = 0;
1878 
1879 	if (env->resolve_mode == RESOLVE_TBD) {
1880 		if (btf_type_is_ptr(t))
1881 			env->resolve_mode = RESOLVE_PTR;
1882 		else if (btf_type_is_struct(t) || btf_type_is_array(t))
1883 			env->resolve_mode = RESOLVE_STRUCT_OR_ARRAY;
1884 	}
1885 
1886 	return 0;
1887 }
1888 
env_stack_set_next_member(struct btf_verifier_env * env,u16 next_member)1889 static void env_stack_set_next_member(struct btf_verifier_env *env,
1890 				      u16 next_member)
1891 {
1892 	env->stack[env->top_stack - 1].next_member = next_member;
1893 }
1894 
env_stack_pop_resolved(struct btf_verifier_env * env,u32 resolved_type_id,u32 resolved_size)1895 static void env_stack_pop_resolved(struct btf_verifier_env *env,
1896 				   u32 resolved_type_id,
1897 				   u32 resolved_size)
1898 {
1899 	u32 type_id = env->stack[--(env->top_stack)].type_id;
1900 	struct btf *btf = env->btf;
1901 
1902 	type_id -= btf->start_id; /* adjust to local type id */
1903 	btf->resolved_sizes[type_id] = resolved_size;
1904 	btf->resolved_ids[type_id] = resolved_type_id;
1905 	env->visit_states[type_id] = RESOLVED;
1906 }
1907 
env_stack_peak(struct btf_verifier_env * env)1908 static const struct resolve_vertex *env_stack_peak(struct btf_verifier_env *env)
1909 {
1910 	return env->top_stack ? &env->stack[env->top_stack - 1] : NULL;
1911 }
1912 
1913 /* Resolve the size of a passed-in "type"
1914  *
1915  * type: is an array (e.g. u32 array[x][y])
1916  * return type: type "u32[x][y]", i.e. BTF_KIND_ARRAY,
1917  * *type_size: (x * y * sizeof(u32)).  Hence, *type_size always
1918  *             corresponds to the return type.
1919  * *elem_type: u32
1920  * *elem_id: id of u32
1921  * *total_nelems: (x * y).  Hence, individual elem size is
1922  *                (*type_size / *total_nelems)
1923  * *type_id: id of type if it's changed within the function, 0 if not
1924  *
1925  * type: is not an array (e.g. const struct X)
1926  * return type: type "struct X"
1927  * *type_size: sizeof(struct X)
1928  * *elem_type: same as return type ("struct X")
1929  * *elem_id: 0
1930  * *total_nelems: 1
1931  * *type_id: id of type if it's changed within the function, 0 if not
1932  */
1933 static const struct btf_type *
__btf_resolve_size(const struct btf * btf,const struct btf_type * type,u32 * type_size,const struct btf_type ** elem_type,u32 * elem_id,u32 * total_nelems,u32 * type_id)1934 __btf_resolve_size(const struct btf *btf, const struct btf_type *type,
1935 		   u32 *type_size, const struct btf_type **elem_type,
1936 		   u32 *elem_id, u32 *total_nelems, u32 *type_id)
1937 {
1938 	const struct btf_type *array_type = NULL;
1939 	const struct btf_array *array = NULL;
1940 	u32 i, size, nelems = 1, id = 0;
1941 
1942 	for (i = 0; i < MAX_RESOLVE_DEPTH; i++) {
1943 		switch (BTF_INFO_KIND(type->info)) {
1944 		/* type->size can be used */
1945 		case BTF_KIND_INT:
1946 		case BTF_KIND_STRUCT:
1947 		case BTF_KIND_UNION:
1948 		case BTF_KIND_ENUM:
1949 		case BTF_KIND_FLOAT:
1950 		case BTF_KIND_ENUM64:
1951 			size = type->size;
1952 			goto resolved;
1953 
1954 		case BTF_KIND_PTR:
1955 			size = sizeof(void *);
1956 			goto resolved;
1957 
1958 		/* Modifiers */
1959 		case BTF_KIND_TYPEDEF:
1960 		case BTF_KIND_VOLATILE:
1961 		case BTF_KIND_CONST:
1962 		case BTF_KIND_RESTRICT:
1963 		case BTF_KIND_TYPE_TAG:
1964 			id = type->type;
1965 			type = btf_type_by_id(btf, type->type);
1966 			break;
1967 
1968 		case BTF_KIND_ARRAY:
1969 			if (!array_type)
1970 				array_type = type;
1971 			array = btf_type_array(type);
1972 			if (nelems && array->nelems > U32_MAX / nelems)
1973 				return ERR_PTR(-EINVAL);
1974 			nelems *= array->nelems;
1975 			type = btf_type_by_id(btf, array->type);
1976 			break;
1977 
1978 		/* type without size */
1979 		default:
1980 			return ERR_PTR(-EINVAL);
1981 		}
1982 	}
1983 
1984 	return ERR_PTR(-EINVAL);
1985 
1986 resolved:
1987 	if (nelems && size > U32_MAX / nelems)
1988 		return ERR_PTR(-EINVAL);
1989 
1990 	*type_size = nelems * size;
1991 	if (total_nelems)
1992 		*total_nelems = nelems;
1993 	if (elem_type)
1994 		*elem_type = type;
1995 	if (elem_id)
1996 		*elem_id = array ? array->type : 0;
1997 	if (type_id && id)
1998 		*type_id = id;
1999 
2000 	return array_type ? : type;
2001 }
2002 
2003 const struct btf_type *
btf_resolve_size(const struct btf * btf,const struct btf_type * type,u32 * type_size)2004 btf_resolve_size(const struct btf *btf, const struct btf_type *type,
2005 		 u32 *type_size)
2006 {
2007 	return __btf_resolve_size(btf, type, type_size, NULL, NULL, NULL, NULL);
2008 }
2009 
btf_resolved_type_id(const struct btf * btf,u32 type_id)2010 static u32 btf_resolved_type_id(const struct btf *btf, u32 type_id)
2011 {
2012 	while (type_id < btf->start_id)
2013 		btf = btf->base_btf;
2014 
2015 	return btf->resolved_ids[type_id - btf->start_id];
2016 }
2017 
2018 /* The input param "type_id" must point to a needs_resolve type */
btf_type_id_resolve(const struct btf * btf,u32 * type_id)2019 static const struct btf_type *btf_type_id_resolve(const struct btf *btf,
2020 						  u32 *type_id)
2021 {
2022 	*type_id = btf_resolved_type_id(btf, *type_id);
2023 	return btf_type_by_id(btf, *type_id);
2024 }
2025 
btf_resolved_type_size(const struct btf * btf,u32 type_id)2026 static u32 btf_resolved_type_size(const struct btf *btf, u32 type_id)
2027 {
2028 	while (type_id < btf->start_id)
2029 		btf = btf->base_btf;
2030 
2031 	return btf->resolved_sizes[type_id - btf->start_id];
2032 }
2033 
btf_type_id_size(const struct btf * btf,u32 * type_id,u32 * ret_size)2034 const struct btf_type *btf_type_id_size(const struct btf *btf,
2035 					u32 *type_id, u32 *ret_size)
2036 {
2037 	const struct btf_type *size_type;
2038 	u32 size_type_id = *type_id;
2039 	u32 size = 0;
2040 
2041 	size_type = btf_type_by_id(btf, size_type_id);
2042 	if (btf_type_nosize_or_null(size_type))
2043 		return NULL;
2044 
2045 	if (btf_type_has_size(size_type)) {
2046 		size = size_type->size;
2047 	} else if (btf_type_is_array(size_type)) {
2048 		size = btf_resolved_type_size(btf, size_type_id);
2049 	} else if (btf_type_is_ptr(size_type)) {
2050 		size = sizeof(void *);
2051 	} else {
2052 		if (WARN_ON_ONCE(!btf_type_is_modifier(size_type) &&
2053 				 !btf_type_is_var(size_type)))
2054 			return NULL;
2055 
2056 		size_type_id = btf_resolved_type_id(btf, size_type_id);
2057 		size_type = btf_type_by_id(btf, size_type_id);
2058 		if (btf_type_nosize_or_null(size_type))
2059 			return NULL;
2060 		else if (btf_type_has_size(size_type))
2061 			size = size_type->size;
2062 		else if (btf_type_is_array(size_type))
2063 			size = btf_resolved_type_size(btf, size_type_id);
2064 		else if (btf_type_is_ptr(size_type))
2065 			size = sizeof(void *);
2066 		else
2067 			return NULL;
2068 	}
2069 
2070 	*type_id = size_type_id;
2071 	if (ret_size)
2072 		*ret_size = size;
2073 
2074 	return size_type;
2075 }
2076 
btf_df_check_member(struct btf_verifier_env * env,const struct btf_type * struct_type,const struct btf_member * member,const struct btf_type * member_type)2077 static int btf_df_check_member(struct btf_verifier_env *env,
2078 			       const struct btf_type *struct_type,
2079 			       const struct btf_member *member,
2080 			       const struct btf_type *member_type)
2081 {
2082 	btf_verifier_log_basic(env, struct_type,
2083 			       "Unsupported check_member");
2084 	return -EINVAL;
2085 }
2086 
btf_df_check_kflag_member(struct btf_verifier_env * env,const struct btf_type * struct_type,const struct btf_member * member,const struct btf_type * member_type)2087 static int btf_df_check_kflag_member(struct btf_verifier_env *env,
2088 				     const struct btf_type *struct_type,
2089 				     const struct btf_member *member,
2090 				     const struct btf_type *member_type)
2091 {
2092 	btf_verifier_log_basic(env, struct_type,
2093 			       "Unsupported check_kflag_member");
2094 	return -EINVAL;
2095 }
2096 
2097 /* Used for ptr, array struct/union and float type members.
2098  * int, enum and modifier types have their specific callback functions.
2099  */
btf_generic_check_kflag_member(struct btf_verifier_env * env,const struct btf_type * struct_type,const struct btf_member * member,const struct btf_type * member_type)2100 static int btf_generic_check_kflag_member(struct btf_verifier_env *env,
2101 					  const struct btf_type *struct_type,
2102 					  const struct btf_member *member,
2103 					  const struct btf_type *member_type)
2104 {
2105 	if (BTF_MEMBER_BITFIELD_SIZE(member->offset)) {
2106 		btf_verifier_log_member(env, struct_type, member,
2107 					"Invalid member bitfield_size");
2108 		return -EINVAL;
2109 	}
2110 
2111 	/* bitfield size is 0, so member->offset represents bit offset only.
2112 	 * It is safe to call non kflag check_member variants.
2113 	 */
2114 	return btf_type_ops(member_type)->check_member(env, struct_type,
2115 						       member,
2116 						       member_type);
2117 }
2118 
btf_df_resolve(struct btf_verifier_env * env,const struct resolve_vertex * v)2119 static int btf_df_resolve(struct btf_verifier_env *env,
2120 			  const struct resolve_vertex *v)
2121 {
2122 	btf_verifier_log_basic(env, v->t, "Unsupported resolve");
2123 	return -EINVAL;
2124 }
2125 
btf_df_show(const struct btf * btf,const struct btf_type * t,u32 type_id,void * data,u8 bits_offsets,struct btf_show * show)2126 static void btf_df_show(const struct btf *btf, const struct btf_type *t,
2127 			u32 type_id, void *data, u8 bits_offsets,
2128 			struct btf_show *show)
2129 {
2130 	btf_show(show, "<unsupported kind:%u>", BTF_INFO_KIND(t->info));
2131 }
2132 
btf_int_check_member(struct btf_verifier_env * env,const struct btf_type * struct_type,const struct btf_member * member,const struct btf_type * member_type)2133 static int btf_int_check_member(struct btf_verifier_env *env,
2134 				const struct btf_type *struct_type,
2135 				const struct btf_member *member,
2136 				const struct btf_type *member_type)
2137 {
2138 	u32 int_data = btf_type_int(member_type);
2139 	u32 struct_bits_off = member->offset;
2140 	u32 struct_size = struct_type->size;
2141 	u32 nr_copy_bits;
2142 	u32 bytes_offset;
2143 
2144 	if (U32_MAX - struct_bits_off < BTF_INT_OFFSET(int_data)) {
2145 		btf_verifier_log_member(env, struct_type, member,
2146 					"bits_offset exceeds U32_MAX");
2147 		return -EINVAL;
2148 	}
2149 
2150 	struct_bits_off += BTF_INT_OFFSET(int_data);
2151 	bytes_offset = BITS_ROUNDDOWN_BYTES(struct_bits_off);
2152 	nr_copy_bits = BTF_INT_BITS(int_data) +
2153 		BITS_PER_BYTE_MASKED(struct_bits_off);
2154 
2155 	if (nr_copy_bits > BITS_PER_U128) {
2156 		btf_verifier_log_member(env, struct_type, member,
2157 					"nr_copy_bits exceeds 128");
2158 		return -EINVAL;
2159 	}
2160 
2161 	if (struct_size < bytes_offset ||
2162 	    struct_size - bytes_offset < BITS_ROUNDUP_BYTES(nr_copy_bits)) {
2163 		btf_verifier_log_member(env, struct_type, member,
2164 					"Member exceeds struct_size");
2165 		return -EINVAL;
2166 	}
2167 
2168 	return 0;
2169 }
2170 
btf_int_check_kflag_member(struct btf_verifier_env * env,const struct btf_type * struct_type,const struct btf_member * member,const struct btf_type * member_type)2171 static int btf_int_check_kflag_member(struct btf_verifier_env *env,
2172 				      const struct btf_type *struct_type,
2173 				      const struct btf_member *member,
2174 				      const struct btf_type *member_type)
2175 {
2176 	u32 struct_bits_off, nr_bits, nr_int_data_bits, bytes_offset;
2177 	u32 int_data = btf_type_int(member_type);
2178 	u32 struct_size = struct_type->size;
2179 	u32 nr_copy_bits;
2180 
2181 	/* a regular int type is required for the kflag int member */
2182 	if (!btf_type_int_is_regular(member_type)) {
2183 		btf_verifier_log_member(env, struct_type, member,
2184 					"Invalid member base type");
2185 		return -EINVAL;
2186 	}
2187 
2188 	/* check sanity of bitfield size */
2189 	nr_bits = BTF_MEMBER_BITFIELD_SIZE(member->offset);
2190 	struct_bits_off = BTF_MEMBER_BIT_OFFSET(member->offset);
2191 	nr_int_data_bits = BTF_INT_BITS(int_data);
2192 	if (!nr_bits) {
2193 		/* Not a bitfield member, member offset must be at byte
2194 		 * boundary.
2195 		 */
2196 		if (BITS_PER_BYTE_MASKED(struct_bits_off)) {
2197 			btf_verifier_log_member(env, struct_type, member,
2198 						"Invalid member offset");
2199 			return -EINVAL;
2200 		}
2201 
2202 		nr_bits = nr_int_data_bits;
2203 	} else if (nr_bits > nr_int_data_bits) {
2204 		btf_verifier_log_member(env, struct_type, member,
2205 					"Invalid member bitfield_size");
2206 		return -EINVAL;
2207 	}
2208 
2209 	bytes_offset = BITS_ROUNDDOWN_BYTES(struct_bits_off);
2210 	nr_copy_bits = nr_bits + BITS_PER_BYTE_MASKED(struct_bits_off);
2211 	if (nr_copy_bits > BITS_PER_U128) {
2212 		btf_verifier_log_member(env, struct_type, member,
2213 					"nr_copy_bits exceeds 128");
2214 		return -EINVAL;
2215 	}
2216 
2217 	if (struct_size < bytes_offset ||
2218 	    struct_size - bytes_offset < BITS_ROUNDUP_BYTES(nr_copy_bits)) {
2219 		btf_verifier_log_member(env, struct_type, member,
2220 					"Member exceeds struct_size");
2221 		return -EINVAL;
2222 	}
2223 
2224 	return 0;
2225 }
2226 
btf_int_check_meta(struct btf_verifier_env * env,const struct btf_type * t,u32 meta_left)2227 static s32 btf_int_check_meta(struct btf_verifier_env *env,
2228 			      const struct btf_type *t,
2229 			      u32 meta_left)
2230 {
2231 	u32 int_data, nr_bits, meta_needed = sizeof(int_data);
2232 	u16 encoding;
2233 
2234 	if (meta_left < meta_needed) {
2235 		btf_verifier_log_basic(env, t,
2236 				       "meta_left:%u meta_needed:%u",
2237 				       meta_left, meta_needed);
2238 		return -EINVAL;
2239 	}
2240 
2241 	if (btf_type_vlen(t)) {
2242 		btf_verifier_log_type(env, t, "vlen != 0");
2243 		return -EINVAL;
2244 	}
2245 
2246 	if (btf_type_kflag(t)) {
2247 		btf_verifier_log_type(env, t, "Invalid btf_info kind_flag");
2248 		return -EINVAL;
2249 	}
2250 
2251 	int_data = btf_type_int(t);
2252 	if (int_data & ~BTF_INT_MASK) {
2253 		btf_verifier_log_basic(env, t, "Invalid int_data:%x",
2254 				       int_data);
2255 		return -EINVAL;
2256 	}
2257 
2258 	nr_bits = BTF_INT_BITS(int_data) + BTF_INT_OFFSET(int_data);
2259 
2260 	if (nr_bits > BITS_PER_U128) {
2261 		btf_verifier_log_type(env, t, "nr_bits exceeds %zu",
2262 				      BITS_PER_U128);
2263 		return -EINVAL;
2264 	}
2265 
2266 	if (BITS_ROUNDUP_BYTES(nr_bits) > t->size) {
2267 		btf_verifier_log_type(env, t, "nr_bits exceeds type_size");
2268 		return -EINVAL;
2269 	}
2270 
2271 	/*
2272 	 * Only one of the encoding bits is allowed and it
2273 	 * should be sufficient for the pretty print purpose (i.e. decoding).
2274 	 * Multiple bits can be allowed later if it is found
2275 	 * to be insufficient.
2276 	 */
2277 	encoding = BTF_INT_ENCODING(int_data);
2278 	if (encoding &&
2279 	    encoding != BTF_INT_SIGNED &&
2280 	    encoding != BTF_INT_CHAR &&
2281 	    encoding != BTF_INT_BOOL) {
2282 		btf_verifier_log_type(env, t, "Unsupported encoding");
2283 		return -ENOTSUPP;
2284 	}
2285 
2286 	btf_verifier_log_type(env, t, NULL);
2287 
2288 	return meta_needed;
2289 }
2290 
btf_int_log(struct btf_verifier_env * env,const struct btf_type * t)2291 static void btf_int_log(struct btf_verifier_env *env,
2292 			const struct btf_type *t)
2293 {
2294 	int int_data = btf_type_int(t);
2295 
2296 	btf_verifier_log(env,
2297 			 "size=%u bits_offset=%u nr_bits=%u encoding=%s",
2298 			 t->size, BTF_INT_OFFSET(int_data),
2299 			 BTF_INT_BITS(int_data),
2300 			 btf_int_encoding_str(BTF_INT_ENCODING(int_data)));
2301 }
2302 
btf_int128_print(struct btf_show * show,void * data)2303 static void btf_int128_print(struct btf_show *show, void *data)
2304 {
2305 	/* data points to a __int128 number.
2306 	 * Suppose
2307 	 *     int128_num = *(__int128 *)data;
2308 	 * The below formulas shows what upper_num and lower_num represents:
2309 	 *     upper_num = int128_num >> 64;
2310 	 *     lower_num = int128_num & 0xffffffffFFFFFFFFULL;
2311 	 */
2312 	u64 upper_num, lower_num;
2313 
2314 #ifdef __BIG_ENDIAN_BITFIELD
2315 	upper_num = *(u64 *)data;
2316 	lower_num = *(u64 *)(data + 8);
2317 #else
2318 	upper_num = *(u64 *)(data + 8);
2319 	lower_num = *(u64 *)data;
2320 #endif
2321 	if (upper_num == 0)
2322 		btf_show_type_value(show, "0x%llx", lower_num);
2323 	else
2324 		btf_show_type_values(show, "0x%llx%016llx", upper_num,
2325 				     lower_num);
2326 }
2327 
btf_int128_shift(u64 * print_num,u16 left_shift_bits,u16 right_shift_bits)2328 static void btf_int128_shift(u64 *print_num, u16 left_shift_bits,
2329 			     u16 right_shift_bits)
2330 {
2331 	u64 upper_num, lower_num;
2332 
2333 #ifdef __BIG_ENDIAN_BITFIELD
2334 	upper_num = print_num[0];
2335 	lower_num = print_num[1];
2336 #else
2337 	upper_num = print_num[1];
2338 	lower_num = print_num[0];
2339 #endif
2340 
2341 	/* shake out un-needed bits by shift/or operations */
2342 	if (left_shift_bits >= 64) {
2343 		upper_num = lower_num << (left_shift_bits - 64);
2344 		lower_num = 0;
2345 	} else {
2346 		upper_num = (upper_num << left_shift_bits) |
2347 			    (lower_num >> (64 - left_shift_bits));
2348 		lower_num = lower_num << left_shift_bits;
2349 	}
2350 
2351 	if (right_shift_bits >= 64) {
2352 		lower_num = upper_num >> (right_shift_bits - 64);
2353 		upper_num = 0;
2354 	} else {
2355 		lower_num = (lower_num >> right_shift_bits) |
2356 			    (upper_num << (64 - right_shift_bits));
2357 		upper_num = upper_num >> right_shift_bits;
2358 	}
2359 
2360 #ifdef __BIG_ENDIAN_BITFIELD
2361 	print_num[0] = upper_num;
2362 	print_num[1] = lower_num;
2363 #else
2364 	print_num[0] = lower_num;
2365 	print_num[1] = upper_num;
2366 #endif
2367 }
2368 
btf_bitfield_show(void * data,u8 bits_offset,u8 nr_bits,struct btf_show * show)2369 static void btf_bitfield_show(void *data, u8 bits_offset,
2370 			      u8 nr_bits, struct btf_show *show)
2371 {
2372 	u16 left_shift_bits, right_shift_bits;
2373 	u8 nr_copy_bytes;
2374 	u8 nr_copy_bits;
2375 	u64 print_num[2] = {};
2376 
2377 	nr_copy_bits = nr_bits + bits_offset;
2378 	nr_copy_bytes = BITS_ROUNDUP_BYTES(nr_copy_bits);
2379 
2380 	memcpy(print_num, data, nr_copy_bytes);
2381 
2382 #ifdef __BIG_ENDIAN_BITFIELD
2383 	left_shift_bits = bits_offset;
2384 #else
2385 	left_shift_bits = BITS_PER_U128 - nr_copy_bits;
2386 #endif
2387 	right_shift_bits = BITS_PER_U128 - nr_bits;
2388 
2389 	btf_int128_shift(print_num, left_shift_bits, right_shift_bits);
2390 	btf_int128_print(show, print_num);
2391 }
2392 
2393 
btf_int_bits_show(const struct btf * btf,const struct btf_type * t,void * data,u8 bits_offset,struct btf_show * show)2394 static void btf_int_bits_show(const struct btf *btf,
2395 			      const struct btf_type *t,
2396 			      void *data, u8 bits_offset,
2397 			      struct btf_show *show)
2398 {
2399 	u32 int_data = btf_type_int(t);
2400 	u8 nr_bits = BTF_INT_BITS(int_data);
2401 	u8 total_bits_offset;
2402 
2403 	/*
2404 	 * bits_offset is at most 7.
2405 	 * BTF_INT_OFFSET() cannot exceed 128 bits.
2406 	 */
2407 	total_bits_offset = bits_offset + BTF_INT_OFFSET(int_data);
2408 	data += BITS_ROUNDDOWN_BYTES(total_bits_offset);
2409 	bits_offset = BITS_PER_BYTE_MASKED(total_bits_offset);
2410 	btf_bitfield_show(data, bits_offset, nr_bits, show);
2411 }
2412 
btf_int_show(const struct btf * btf,const struct btf_type * t,u32 type_id,void * data,u8 bits_offset,struct btf_show * show)2413 static void btf_int_show(const struct btf *btf, const struct btf_type *t,
2414 			 u32 type_id, void *data, u8 bits_offset,
2415 			 struct btf_show *show)
2416 {
2417 	u32 int_data = btf_type_int(t);
2418 	u8 encoding = BTF_INT_ENCODING(int_data);
2419 	bool sign = encoding & BTF_INT_SIGNED;
2420 	u8 nr_bits = BTF_INT_BITS(int_data);
2421 	void *safe_data;
2422 
2423 	safe_data = btf_show_start_type(show, t, type_id, data);
2424 	if (!safe_data)
2425 		return;
2426 
2427 	if (bits_offset || BTF_INT_OFFSET(int_data) ||
2428 	    BITS_PER_BYTE_MASKED(nr_bits)) {
2429 		btf_int_bits_show(btf, t, safe_data, bits_offset, show);
2430 		goto out;
2431 	}
2432 
2433 	switch (nr_bits) {
2434 	case 128:
2435 		btf_int128_print(show, safe_data);
2436 		break;
2437 	case 64:
2438 		if (sign)
2439 			btf_show_type_value(show, "%lld", *(s64 *)safe_data);
2440 		else
2441 			btf_show_type_value(show, "%llu", *(u64 *)safe_data);
2442 		break;
2443 	case 32:
2444 		if (sign)
2445 			btf_show_type_value(show, "%d", *(s32 *)safe_data);
2446 		else
2447 			btf_show_type_value(show, "%u", *(u32 *)safe_data);
2448 		break;
2449 	case 16:
2450 		if (sign)
2451 			btf_show_type_value(show, "%d", *(s16 *)safe_data);
2452 		else
2453 			btf_show_type_value(show, "%u", *(u16 *)safe_data);
2454 		break;
2455 	case 8:
2456 		if (show->state.array_encoding == BTF_INT_CHAR) {
2457 			/* check for null terminator */
2458 			if (show->state.array_terminated)
2459 				break;
2460 			if (*(char *)data == '\0') {
2461 				show->state.array_terminated = 1;
2462 				break;
2463 			}
2464 			if (isprint(*(char *)data)) {
2465 				btf_show_type_value(show, "'%c'",
2466 						    *(char *)safe_data);
2467 				break;
2468 			}
2469 		}
2470 		if (sign)
2471 			btf_show_type_value(show, "%d", *(s8 *)safe_data);
2472 		else
2473 			btf_show_type_value(show, "%u", *(u8 *)safe_data);
2474 		break;
2475 	default:
2476 		btf_int_bits_show(btf, t, safe_data, bits_offset, show);
2477 		break;
2478 	}
2479 out:
2480 	btf_show_end_type(show);
2481 }
2482 
2483 static const struct btf_kind_operations int_ops = {
2484 	.check_meta = btf_int_check_meta,
2485 	.resolve = btf_df_resolve,
2486 	.check_member = btf_int_check_member,
2487 	.check_kflag_member = btf_int_check_kflag_member,
2488 	.log_details = btf_int_log,
2489 	.show = btf_int_show,
2490 };
2491 
btf_modifier_check_member(struct btf_verifier_env * env,const struct btf_type * struct_type,const struct btf_member * member,const struct btf_type * member_type)2492 static int btf_modifier_check_member(struct btf_verifier_env *env,
2493 				     const struct btf_type *struct_type,
2494 				     const struct btf_member *member,
2495 				     const struct btf_type *member_type)
2496 {
2497 	const struct btf_type *resolved_type;
2498 	u32 resolved_type_id = member->type;
2499 	struct btf_member resolved_member;
2500 	struct btf *btf = env->btf;
2501 
2502 	resolved_type = btf_type_id_size(btf, &resolved_type_id, NULL);
2503 	if (!resolved_type) {
2504 		btf_verifier_log_member(env, struct_type, member,
2505 					"Invalid member");
2506 		return -EINVAL;
2507 	}
2508 
2509 	resolved_member = *member;
2510 	resolved_member.type = resolved_type_id;
2511 
2512 	return btf_type_ops(resolved_type)->check_member(env, struct_type,
2513 							 &resolved_member,
2514 							 resolved_type);
2515 }
2516 
btf_modifier_check_kflag_member(struct btf_verifier_env * env,const struct btf_type * struct_type,const struct btf_member * member,const struct btf_type * member_type)2517 static int btf_modifier_check_kflag_member(struct btf_verifier_env *env,
2518 					   const struct btf_type *struct_type,
2519 					   const struct btf_member *member,
2520 					   const struct btf_type *member_type)
2521 {
2522 	const struct btf_type *resolved_type;
2523 	u32 resolved_type_id = member->type;
2524 	struct btf_member resolved_member;
2525 	struct btf *btf = env->btf;
2526 
2527 	resolved_type = btf_type_id_size(btf, &resolved_type_id, NULL);
2528 	if (!resolved_type) {
2529 		btf_verifier_log_member(env, struct_type, member,
2530 					"Invalid member");
2531 		return -EINVAL;
2532 	}
2533 
2534 	resolved_member = *member;
2535 	resolved_member.type = resolved_type_id;
2536 
2537 	return btf_type_ops(resolved_type)->check_kflag_member(env, struct_type,
2538 							       &resolved_member,
2539 							       resolved_type);
2540 }
2541 
btf_ptr_check_member(struct btf_verifier_env * env,const struct btf_type * struct_type,const struct btf_member * member,const struct btf_type * member_type)2542 static int btf_ptr_check_member(struct btf_verifier_env *env,
2543 				const struct btf_type *struct_type,
2544 				const struct btf_member *member,
2545 				const struct btf_type *member_type)
2546 {
2547 	u32 struct_size, struct_bits_off, bytes_offset;
2548 
2549 	struct_size = struct_type->size;
2550 	struct_bits_off = member->offset;
2551 	bytes_offset = BITS_ROUNDDOWN_BYTES(struct_bits_off);
2552 
2553 	if (BITS_PER_BYTE_MASKED(struct_bits_off)) {
2554 		btf_verifier_log_member(env, struct_type, member,
2555 					"Member is not byte aligned");
2556 		return -EINVAL;
2557 	}
2558 
2559 	if (struct_size - bytes_offset < sizeof(void *)) {
2560 		btf_verifier_log_member(env, struct_type, member,
2561 					"Member exceeds struct_size");
2562 		return -EINVAL;
2563 	}
2564 
2565 	return 0;
2566 }
2567 
btf_ref_type_check_meta(struct btf_verifier_env * env,const struct btf_type * t,u32 meta_left)2568 static int btf_ref_type_check_meta(struct btf_verifier_env *env,
2569 				   const struct btf_type *t,
2570 				   u32 meta_left)
2571 {
2572 	const char *value;
2573 
2574 	if (btf_type_vlen(t)) {
2575 		btf_verifier_log_type(env, t, "vlen != 0");
2576 		return -EINVAL;
2577 	}
2578 
2579 	if (btf_type_kflag(t) && !btf_type_is_type_tag(t)) {
2580 		btf_verifier_log_type(env, t, "Invalid btf_info kind_flag");
2581 		return -EINVAL;
2582 	}
2583 
2584 	if (!BTF_TYPE_ID_VALID(t->type)) {
2585 		btf_verifier_log_type(env, t, "Invalid type_id");
2586 		return -EINVAL;
2587 	}
2588 
2589 	/* typedef/type_tag type must have a valid name, and other ref types,
2590 	 * volatile, const, restrict, should have a null name.
2591 	 */
2592 	if (BTF_INFO_KIND(t->info) == BTF_KIND_TYPEDEF) {
2593 		if (!t->name_off ||
2594 		    !btf_name_valid_identifier(env->btf, t->name_off)) {
2595 			btf_verifier_log_type(env, t, "Invalid name");
2596 			return -EINVAL;
2597 		}
2598 	} else if (BTF_INFO_KIND(t->info) == BTF_KIND_TYPE_TAG) {
2599 		value = btf_name_by_offset(env->btf, t->name_off);
2600 		if (!value || !value[0]) {
2601 			btf_verifier_log_type(env, t, "Invalid name");
2602 			return -EINVAL;
2603 		}
2604 	} else {
2605 		if (t->name_off) {
2606 			btf_verifier_log_type(env, t, "Invalid name");
2607 			return -EINVAL;
2608 		}
2609 	}
2610 
2611 	btf_verifier_log_type(env, t, NULL);
2612 
2613 	return 0;
2614 }
2615 
btf_modifier_resolve(struct btf_verifier_env * env,const struct resolve_vertex * v)2616 static int btf_modifier_resolve(struct btf_verifier_env *env,
2617 				const struct resolve_vertex *v)
2618 {
2619 	const struct btf_type *t = v->t;
2620 	const struct btf_type *next_type;
2621 	u32 next_type_id = t->type;
2622 	struct btf *btf = env->btf;
2623 
2624 	next_type = btf_type_by_id(btf, next_type_id);
2625 	if (!next_type || btf_type_is_resolve_source_only(next_type)) {
2626 		btf_verifier_log_type(env, v->t, "Invalid type_id");
2627 		return -EINVAL;
2628 	}
2629 
2630 	if (!env_type_is_resolve_sink(env, next_type) &&
2631 	    !env_type_is_resolved(env, next_type_id))
2632 		return env_stack_push(env, next_type, next_type_id);
2633 
2634 	/* Figure out the resolved next_type_id with size.
2635 	 * They will be stored in the current modifier's
2636 	 * resolved_ids and resolved_sizes such that it can
2637 	 * save us a few type-following when we use it later (e.g. in
2638 	 * pretty print).
2639 	 */
2640 	if (!btf_type_id_size(btf, &next_type_id, NULL)) {
2641 		if (env_type_is_resolved(env, next_type_id))
2642 			next_type = btf_type_id_resolve(btf, &next_type_id);
2643 
2644 		/* "typedef void new_void", "const void"...etc */
2645 		if (!btf_type_is_void(next_type) &&
2646 		    !btf_type_is_fwd(next_type) &&
2647 		    !btf_type_is_func_proto(next_type)) {
2648 			btf_verifier_log_type(env, v->t, "Invalid type_id");
2649 			return -EINVAL;
2650 		}
2651 	}
2652 
2653 	env_stack_pop_resolved(env, next_type_id, 0);
2654 
2655 	return 0;
2656 }
2657 
btf_var_resolve(struct btf_verifier_env * env,const struct resolve_vertex * v)2658 static int btf_var_resolve(struct btf_verifier_env *env,
2659 			   const struct resolve_vertex *v)
2660 {
2661 	const struct btf_type *next_type;
2662 	const struct btf_type *t = v->t;
2663 	u32 next_type_id = t->type;
2664 	struct btf *btf = env->btf;
2665 
2666 	next_type = btf_type_by_id(btf, next_type_id);
2667 	if (!next_type || btf_type_is_resolve_source_only(next_type)) {
2668 		btf_verifier_log_type(env, v->t, "Invalid type_id");
2669 		return -EINVAL;
2670 	}
2671 
2672 	if (!env_type_is_resolve_sink(env, next_type) &&
2673 	    !env_type_is_resolved(env, next_type_id))
2674 		return env_stack_push(env, next_type, next_type_id);
2675 
2676 	if (btf_type_is_modifier(next_type)) {
2677 		const struct btf_type *resolved_type;
2678 		u32 resolved_type_id;
2679 
2680 		resolved_type_id = next_type_id;
2681 		resolved_type = btf_type_id_resolve(btf, &resolved_type_id);
2682 
2683 		if (btf_type_is_ptr(resolved_type) &&
2684 		    !env_type_is_resolve_sink(env, resolved_type) &&
2685 		    !env_type_is_resolved(env, resolved_type_id))
2686 			return env_stack_push(env, resolved_type,
2687 					      resolved_type_id);
2688 	}
2689 
2690 	/* We must resolve to something concrete at this point, no
2691 	 * forward types or similar that would resolve to size of
2692 	 * zero is allowed.
2693 	 */
2694 	if (!btf_type_id_size(btf, &next_type_id, NULL)) {
2695 		btf_verifier_log_type(env, v->t, "Invalid type_id");
2696 		return -EINVAL;
2697 	}
2698 
2699 	env_stack_pop_resolved(env, next_type_id, 0);
2700 
2701 	return 0;
2702 }
2703 
btf_ptr_resolve(struct btf_verifier_env * env,const struct resolve_vertex * v)2704 static int btf_ptr_resolve(struct btf_verifier_env *env,
2705 			   const struct resolve_vertex *v)
2706 {
2707 	const struct btf_type *next_type;
2708 	const struct btf_type *t = v->t;
2709 	u32 next_type_id = t->type;
2710 	struct btf *btf = env->btf;
2711 
2712 	next_type = btf_type_by_id(btf, next_type_id);
2713 	if (!next_type || btf_type_is_resolve_source_only(next_type)) {
2714 		btf_verifier_log_type(env, v->t, "Invalid type_id");
2715 		return -EINVAL;
2716 	}
2717 
2718 	if (!env_type_is_resolve_sink(env, next_type) &&
2719 	    !env_type_is_resolved(env, next_type_id))
2720 		return env_stack_push(env, next_type, next_type_id);
2721 
2722 	/* If the modifier was RESOLVED during RESOLVE_STRUCT_OR_ARRAY,
2723 	 * the modifier may have stopped resolving when it was resolved
2724 	 * to a ptr (last-resolved-ptr).
2725 	 *
2726 	 * We now need to continue from the last-resolved-ptr to
2727 	 * ensure the last-resolved-ptr will not referring back to
2728 	 * the current ptr (t).
2729 	 */
2730 	if (btf_type_is_modifier(next_type)) {
2731 		const struct btf_type *resolved_type;
2732 		u32 resolved_type_id;
2733 
2734 		resolved_type_id = next_type_id;
2735 		resolved_type = btf_type_id_resolve(btf, &resolved_type_id);
2736 
2737 		if (btf_type_is_ptr(resolved_type) &&
2738 		    !env_type_is_resolve_sink(env, resolved_type) &&
2739 		    !env_type_is_resolved(env, resolved_type_id))
2740 			return env_stack_push(env, resolved_type,
2741 					      resolved_type_id);
2742 	}
2743 
2744 	if (!btf_type_id_size(btf, &next_type_id, NULL)) {
2745 		if (env_type_is_resolved(env, next_type_id))
2746 			next_type = btf_type_id_resolve(btf, &next_type_id);
2747 
2748 		if (!btf_type_is_void(next_type) &&
2749 		    !btf_type_is_fwd(next_type) &&
2750 		    !btf_type_is_func_proto(next_type)) {
2751 			btf_verifier_log_type(env, v->t, "Invalid type_id");
2752 			return -EINVAL;
2753 		}
2754 	}
2755 
2756 	env_stack_pop_resolved(env, next_type_id, 0);
2757 
2758 	return 0;
2759 }
2760 
btf_modifier_show(const struct btf * btf,const struct btf_type * t,u32 type_id,void * data,u8 bits_offset,struct btf_show * show)2761 static void btf_modifier_show(const struct btf *btf,
2762 			      const struct btf_type *t,
2763 			      u32 type_id, void *data,
2764 			      u8 bits_offset, struct btf_show *show)
2765 {
2766 	if (btf->resolved_ids)
2767 		t = btf_type_id_resolve(btf, &type_id);
2768 	else
2769 		t = btf_type_skip_modifiers(btf, type_id, NULL);
2770 
2771 	btf_type_ops(t)->show(btf, t, type_id, data, bits_offset, show);
2772 }
2773 
btf_var_show(const struct btf * btf,const struct btf_type * t,u32 type_id,void * data,u8 bits_offset,struct btf_show * show)2774 static void btf_var_show(const struct btf *btf, const struct btf_type *t,
2775 			 u32 type_id, void *data, u8 bits_offset,
2776 			 struct btf_show *show)
2777 {
2778 	t = btf_type_id_resolve(btf, &type_id);
2779 
2780 	btf_type_ops(t)->show(btf, t, type_id, data, bits_offset, show);
2781 }
2782 
btf_ptr_show(const struct btf * btf,const struct btf_type * t,u32 type_id,void * data,u8 bits_offset,struct btf_show * show)2783 static void btf_ptr_show(const struct btf *btf, const struct btf_type *t,
2784 			 u32 type_id, void *data, u8 bits_offset,
2785 			 struct btf_show *show)
2786 {
2787 	void *safe_data;
2788 
2789 	safe_data = btf_show_start_type(show, t, type_id, data);
2790 	if (!safe_data)
2791 		return;
2792 
2793 	/* It is a hashed value unless BTF_SHOW_PTR_RAW is specified */
2794 	if (show->flags & BTF_SHOW_PTR_RAW)
2795 		btf_show_type_value(show, "0x%px", *(void **)safe_data);
2796 	else
2797 		btf_show_type_value(show, "0x%p", *(void **)safe_data);
2798 	btf_show_end_type(show);
2799 }
2800 
btf_ref_type_log(struct btf_verifier_env * env,const struct btf_type * t)2801 static void btf_ref_type_log(struct btf_verifier_env *env,
2802 			     const struct btf_type *t)
2803 {
2804 	btf_verifier_log(env, "type_id=%u", t->type);
2805 }
2806 
2807 static const struct btf_kind_operations modifier_ops = {
2808 	.check_meta = btf_ref_type_check_meta,
2809 	.resolve = btf_modifier_resolve,
2810 	.check_member = btf_modifier_check_member,
2811 	.check_kflag_member = btf_modifier_check_kflag_member,
2812 	.log_details = btf_ref_type_log,
2813 	.show = btf_modifier_show,
2814 };
2815 
2816 static const struct btf_kind_operations ptr_ops = {
2817 	.check_meta = btf_ref_type_check_meta,
2818 	.resolve = btf_ptr_resolve,
2819 	.check_member = btf_ptr_check_member,
2820 	.check_kflag_member = btf_generic_check_kflag_member,
2821 	.log_details = btf_ref_type_log,
2822 	.show = btf_ptr_show,
2823 };
2824 
btf_fwd_check_meta(struct btf_verifier_env * env,const struct btf_type * t,u32 meta_left)2825 static s32 btf_fwd_check_meta(struct btf_verifier_env *env,
2826 			      const struct btf_type *t,
2827 			      u32 meta_left)
2828 {
2829 	if (btf_type_vlen(t)) {
2830 		btf_verifier_log_type(env, t, "vlen != 0");
2831 		return -EINVAL;
2832 	}
2833 
2834 	if (t->type) {
2835 		btf_verifier_log_type(env, t, "type != 0");
2836 		return -EINVAL;
2837 	}
2838 
2839 	/* fwd type must have a valid name */
2840 	if (!t->name_off ||
2841 	    !btf_name_valid_identifier(env->btf, t->name_off)) {
2842 		btf_verifier_log_type(env, t, "Invalid name");
2843 		return -EINVAL;
2844 	}
2845 
2846 	btf_verifier_log_type(env, t, NULL);
2847 
2848 	return 0;
2849 }
2850 
btf_fwd_type_log(struct btf_verifier_env * env,const struct btf_type * t)2851 static void btf_fwd_type_log(struct btf_verifier_env *env,
2852 			     const struct btf_type *t)
2853 {
2854 	btf_verifier_log(env, "%s", btf_type_kflag(t) ? "union" : "struct");
2855 }
2856 
2857 static const struct btf_kind_operations fwd_ops = {
2858 	.check_meta = btf_fwd_check_meta,
2859 	.resolve = btf_df_resolve,
2860 	.check_member = btf_df_check_member,
2861 	.check_kflag_member = btf_df_check_kflag_member,
2862 	.log_details = btf_fwd_type_log,
2863 	.show = btf_df_show,
2864 };
2865 
btf_array_check_member(struct btf_verifier_env * env,const struct btf_type * struct_type,const struct btf_member * member,const struct btf_type * member_type)2866 static int btf_array_check_member(struct btf_verifier_env *env,
2867 				  const struct btf_type *struct_type,
2868 				  const struct btf_member *member,
2869 				  const struct btf_type *member_type)
2870 {
2871 	u32 struct_bits_off = member->offset;
2872 	u32 struct_size, bytes_offset;
2873 	u32 array_type_id, array_size;
2874 	struct btf *btf = env->btf;
2875 
2876 	if (BITS_PER_BYTE_MASKED(struct_bits_off)) {
2877 		btf_verifier_log_member(env, struct_type, member,
2878 					"Member is not byte aligned");
2879 		return -EINVAL;
2880 	}
2881 
2882 	array_type_id = member->type;
2883 	btf_type_id_size(btf, &array_type_id, &array_size);
2884 	struct_size = struct_type->size;
2885 	bytes_offset = BITS_ROUNDDOWN_BYTES(struct_bits_off);
2886 	if (struct_size - bytes_offset < array_size) {
2887 		btf_verifier_log_member(env, struct_type, member,
2888 					"Member exceeds struct_size");
2889 		return -EINVAL;
2890 	}
2891 
2892 	return 0;
2893 }
2894 
btf_array_check_meta(struct btf_verifier_env * env,const struct btf_type * t,u32 meta_left)2895 static s32 btf_array_check_meta(struct btf_verifier_env *env,
2896 				const struct btf_type *t,
2897 				u32 meta_left)
2898 {
2899 	const struct btf_array *array = btf_type_array(t);
2900 	u32 meta_needed = sizeof(*array);
2901 
2902 	if (meta_left < meta_needed) {
2903 		btf_verifier_log_basic(env, t,
2904 				       "meta_left:%u meta_needed:%u",
2905 				       meta_left, meta_needed);
2906 		return -EINVAL;
2907 	}
2908 
2909 	/* array type should not have a name */
2910 	if (t->name_off) {
2911 		btf_verifier_log_type(env, t, "Invalid name");
2912 		return -EINVAL;
2913 	}
2914 
2915 	if (btf_type_vlen(t)) {
2916 		btf_verifier_log_type(env, t, "vlen != 0");
2917 		return -EINVAL;
2918 	}
2919 
2920 	if (btf_type_kflag(t)) {
2921 		btf_verifier_log_type(env, t, "Invalid btf_info kind_flag");
2922 		return -EINVAL;
2923 	}
2924 
2925 	if (t->size) {
2926 		btf_verifier_log_type(env, t, "size != 0");
2927 		return -EINVAL;
2928 	}
2929 
2930 	/* Array elem type and index type cannot be in type void,
2931 	 * so !array->type and !array->index_type are not allowed.
2932 	 */
2933 	if (!array->type || !BTF_TYPE_ID_VALID(array->type)) {
2934 		btf_verifier_log_type(env, t, "Invalid elem");
2935 		return -EINVAL;
2936 	}
2937 
2938 	if (!array->index_type || !BTF_TYPE_ID_VALID(array->index_type)) {
2939 		btf_verifier_log_type(env, t, "Invalid index");
2940 		return -EINVAL;
2941 	}
2942 
2943 	btf_verifier_log_type(env, t, NULL);
2944 
2945 	return meta_needed;
2946 }
2947 
btf_array_resolve(struct btf_verifier_env * env,const struct resolve_vertex * v)2948 static int btf_array_resolve(struct btf_verifier_env *env,
2949 			     const struct resolve_vertex *v)
2950 {
2951 	const struct btf_array *array = btf_type_array(v->t);
2952 	const struct btf_type *elem_type, *index_type;
2953 	u32 elem_type_id, index_type_id;
2954 	struct btf *btf = env->btf;
2955 	u32 elem_size;
2956 
2957 	/* Check array->index_type */
2958 	index_type_id = array->index_type;
2959 	index_type = btf_type_by_id(btf, index_type_id);
2960 	if (btf_type_nosize_or_null(index_type) ||
2961 	    btf_type_is_resolve_source_only(index_type)) {
2962 		btf_verifier_log_type(env, v->t, "Invalid index");
2963 		return -EINVAL;
2964 	}
2965 
2966 	if (!env_type_is_resolve_sink(env, index_type) &&
2967 	    !env_type_is_resolved(env, index_type_id))
2968 		return env_stack_push(env, index_type, index_type_id);
2969 
2970 	index_type = btf_type_id_size(btf, &index_type_id, NULL);
2971 	if (!index_type || !btf_type_is_int(index_type) ||
2972 	    !btf_type_int_is_regular(index_type)) {
2973 		btf_verifier_log_type(env, v->t, "Invalid index");
2974 		return -EINVAL;
2975 	}
2976 
2977 	/* Check array->type */
2978 	elem_type_id = array->type;
2979 	elem_type = btf_type_by_id(btf, elem_type_id);
2980 	if (btf_type_nosize_or_null(elem_type) ||
2981 	    btf_type_is_resolve_source_only(elem_type)) {
2982 		btf_verifier_log_type(env, v->t,
2983 				      "Invalid elem");
2984 		return -EINVAL;
2985 	}
2986 
2987 	if (!env_type_is_resolve_sink(env, elem_type) &&
2988 	    !env_type_is_resolved(env, elem_type_id))
2989 		return env_stack_push(env, elem_type, elem_type_id);
2990 
2991 	elem_type = btf_type_id_size(btf, &elem_type_id, &elem_size);
2992 	if (!elem_type) {
2993 		btf_verifier_log_type(env, v->t, "Invalid elem");
2994 		return -EINVAL;
2995 	}
2996 
2997 	if (btf_type_is_int(elem_type) && !btf_type_int_is_regular(elem_type)) {
2998 		btf_verifier_log_type(env, v->t, "Invalid array of int");
2999 		return -EINVAL;
3000 	}
3001 
3002 	if (array->nelems && elem_size > U32_MAX / array->nelems) {
3003 		btf_verifier_log_type(env, v->t,
3004 				      "Array size overflows U32_MAX");
3005 		return -EINVAL;
3006 	}
3007 
3008 	env_stack_pop_resolved(env, elem_type_id, elem_size * array->nelems);
3009 
3010 	return 0;
3011 }
3012 
btf_array_log(struct btf_verifier_env * env,const struct btf_type * t)3013 static void btf_array_log(struct btf_verifier_env *env,
3014 			  const struct btf_type *t)
3015 {
3016 	const struct btf_array *array = btf_type_array(t);
3017 
3018 	btf_verifier_log(env, "type_id=%u index_type_id=%u nr_elems=%u",
3019 			 array->type, array->index_type, array->nelems);
3020 }
3021 
__btf_array_show(const struct btf * btf,const struct btf_type * t,u32 type_id,void * data,u8 bits_offset,struct btf_show * show)3022 static void __btf_array_show(const struct btf *btf, const struct btf_type *t,
3023 			     u32 type_id, void *data, u8 bits_offset,
3024 			     struct btf_show *show)
3025 {
3026 	const struct btf_array *array = btf_type_array(t);
3027 	const struct btf_kind_operations *elem_ops;
3028 	const struct btf_type *elem_type;
3029 	u32 i, elem_size = 0, elem_type_id;
3030 	u16 encoding = 0;
3031 
3032 	elem_type_id = array->type;
3033 	elem_type = btf_type_skip_modifiers(btf, elem_type_id, NULL);
3034 	if (elem_type && btf_type_has_size(elem_type))
3035 		elem_size = elem_type->size;
3036 
3037 	if (elem_type && btf_type_is_int(elem_type)) {
3038 		u32 int_type = btf_type_int(elem_type);
3039 
3040 		encoding = BTF_INT_ENCODING(int_type);
3041 
3042 		/*
3043 		 * BTF_INT_CHAR encoding never seems to be set for
3044 		 * char arrays, so if size is 1 and element is
3045 		 * printable as a char, we'll do that.
3046 		 */
3047 		if (elem_size == 1)
3048 			encoding = BTF_INT_CHAR;
3049 	}
3050 
3051 	if (!btf_show_start_array_type(show, t, type_id, encoding, data))
3052 		return;
3053 
3054 	if (!elem_type)
3055 		goto out;
3056 	elem_ops = btf_type_ops(elem_type);
3057 
3058 	for (i = 0; i < array->nelems; i++) {
3059 
3060 		btf_show_start_array_member(show);
3061 
3062 		elem_ops->show(btf, elem_type, elem_type_id, data,
3063 			       bits_offset, show);
3064 		data += elem_size;
3065 
3066 		btf_show_end_array_member(show);
3067 
3068 		if (show->state.array_terminated)
3069 			break;
3070 	}
3071 out:
3072 	btf_show_end_array_type(show);
3073 }
3074 
btf_array_show(const struct btf * btf,const struct btf_type * t,u32 type_id,void * data,u8 bits_offset,struct btf_show * show)3075 static void btf_array_show(const struct btf *btf, const struct btf_type *t,
3076 			   u32 type_id, void *data, u8 bits_offset,
3077 			   struct btf_show *show)
3078 {
3079 	const struct btf_member *m = show->state.member;
3080 
3081 	/*
3082 	 * First check if any members would be shown (are non-zero).
3083 	 * See comments above "struct btf_show" definition for more
3084 	 * details on how this works at a high-level.
3085 	 */
3086 	if (show->state.depth > 0 && !(show->flags & BTF_SHOW_ZERO)) {
3087 		if (!show->state.depth_check) {
3088 			show->state.depth_check = show->state.depth + 1;
3089 			show->state.depth_to_show = 0;
3090 		}
3091 		__btf_array_show(btf, t, type_id, data, bits_offset, show);
3092 		show->state.member = m;
3093 
3094 		if (show->state.depth_check != show->state.depth + 1)
3095 			return;
3096 		show->state.depth_check = 0;
3097 
3098 		if (show->state.depth_to_show <= show->state.depth)
3099 			return;
3100 		/*
3101 		 * Reaching here indicates we have recursed and found
3102 		 * non-zero array member(s).
3103 		 */
3104 	}
3105 	__btf_array_show(btf, t, type_id, data, bits_offset, show);
3106 }
3107 
3108 static const struct btf_kind_operations array_ops = {
3109 	.check_meta = btf_array_check_meta,
3110 	.resolve = btf_array_resolve,
3111 	.check_member = btf_array_check_member,
3112 	.check_kflag_member = btf_generic_check_kflag_member,
3113 	.log_details = btf_array_log,
3114 	.show = btf_array_show,
3115 };
3116 
btf_struct_check_member(struct btf_verifier_env * env,const struct btf_type * struct_type,const struct btf_member * member,const struct btf_type * member_type)3117 static int btf_struct_check_member(struct btf_verifier_env *env,
3118 				   const struct btf_type *struct_type,
3119 				   const struct btf_member *member,
3120 				   const struct btf_type *member_type)
3121 {
3122 	u32 struct_bits_off = member->offset;
3123 	u32 struct_size, bytes_offset;
3124 
3125 	if (BITS_PER_BYTE_MASKED(struct_bits_off)) {
3126 		btf_verifier_log_member(env, struct_type, member,
3127 					"Member is not byte aligned");
3128 		return -EINVAL;
3129 	}
3130 
3131 	struct_size = struct_type->size;
3132 	bytes_offset = BITS_ROUNDDOWN_BYTES(struct_bits_off);
3133 	if (struct_size - bytes_offset < member_type->size) {
3134 		btf_verifier_log_member(env, struct_type, member,
3135 					"Member exceeds struct_size");
3136 		return -EINVAL;
3137 	}
3138 
3139 	return 0;
3140 }
3141 
btf_struct_check_meta(struct btf_verifier_env * env,const struct btf_type * t,u32 meta_left)3142 static s32 btf_struct_check_meta(struct btf_verifier_env *env,
3143 				 const struct btf_type *t,
3144 				 u32 meta_left)
3145 {
3146 	bool is_union = BTF_INFO_KIND(t->info) == BTF_KIND_UNION;
3147 	const struct btf_member *member;
3148 	u32 meta_needed, last_offset;
3149 	struct btf *btf = env->btf;
3150 	u32 struct_size = t->size;
3151 	u32 offset;
3152 	u16 i;
3153 
3154 	meta_needed = btf_type_vlen(t) * sizeof(*member);
3155 	if (meta_left < meta_needed) {
3156 		btf_verifier_log_basic(env, t,
3157 				       "meta_left:%u meta_needed:%u",
3158 				       meta_left, meta_needed);
3159 		return -EINVAL;
3160 	}
3161 
3162 	/* struct type either no name or a valid one */
3163 	if (t->name_off &&
3164 	    !btf_name_valid_identifier(env->btf, t->name_off)) {
3165 		btf_verifier_log_type(env, t, "Invalid name");
3166 		return -EINVAL;
3167 	}
3168 
3169 	btf_verifier_log_type(env, t, NULL);
3170 
3171 	last_offset = 0;
3172 	for_each_member(i, t, member) {
3173 		if (!btf_name_offset_valid(btf, member->name_off)) {
3174 			btf_verifier_log_member(env, t, member,
3175 						"Invalid member name_offset:%u",
3176 						member->name_off);
3177 			return -EINVAL;
3178 		}
3179 
3180 		/* struct member either no name or a valid one */
3181 		if (member->name_off &&
3182 		    !btf_name_valid_identifier(btf, member->name_off)) {
3183 			btf_verifier_log_member(env, t, member, "Invalid name");
3184 			return -EINVAL;
3185 		}
3186 		/* A member cannot be in type void */
3187 		if (!member->type || !BTF_TYPE_ID_VALID(member->type)) {
3188 			btf_verifier_log_member(env, t, member,
3189 						"Invalid type_id");
3190 			return -EINVAL;
3191 		}
3192 
3193 		offset = __btf_member_bit_offset(t, member);
3194 		if (is_union && offset) {
3195 			btf_verifier_log_member(env, t, member,
3196 						"Invalid member bits_offset");
3197 			return -EINVAL;
3198 		}
3199 
3200 		/*
3201 		 * ">" instead of ">=" because the last member could be
3202 		 * "char a[0];"
3203 		 */
3204 		if (last_offset > offset) {
3205 			btf_verifier_log_member(env, t, member,
3206 						"Invalid member bits_offset");
3207 			return -EINVAL;
3208 		}
3209 
3210 		if (BITS_ROUNDUP_BYTES(offset) > struct_size) {
3211 			btf_verifier_log_member(env, t, member,
3212 						"Member bits_offset exceeds its struct size");
3213 			return -EINVAL;
3214 		}
3215 
3216 		btf_verifier_log_member(env, t, member, NULL);
3217 		last_offset = offset;
3218 	}
3219 
3220 	return meta_needed;
3221 }
3222 
btf_struct_resolve(struct btf_verifier_env * env,const struct resolve_vertex * v)3223 static int btf_struct_resolve(struct btf_verifier_env *env,
3224 			      const struct resolve_vertex *v)
3225 {
3226 	const struct btf_member *member;
3227 	int err;
3228 	u16 i;
3229 
3230 	/* Before continue resolving the next_member,
3231 	 * ensure the last member is indeed resolved to a
3232 	 * type with size info.
3233 	 */
3234 	if (v->next_member) {
3235 		const struct btf_type *last_member_type;
3236 		const struct btf_member *last_member;
3237 		u32 last_member_type_id;
3238 
3239 		last_member = btf_type_member(v->t) + v->next_member - 1;
3240 		last_member_type_id = last_member->type;
3241 		if (WARN_ON_ONCE(!env_type_is_resolved(env,
3242 						       last_member_type_id)))
3243 			return -EINVAL;
3244 
3245 		last_member_type = btf_type_by_id(env->btf,
3246 						  last_member_type_id);
3247 		if (btf_type_kflag(v->t))
3248 			err = btf_type_ops(last_member_type)->check_kflag_member(env, v->t,
3249 								last_member,
3250 								last_member_type);
3251 		else
3252 			err = btf_type_ops(last_member_type)->check_member(env, v->t,
3253 								last_member,
3254 								last_member_type);
3255 		if (err)
3256 			return err;
3257 	}
3258 
3259 	for_each_member_from(i, v->next_member, v->t, member) {
3260 		u32 member_type_id = member->type;
3261 		const struct btf_type *member_type = btf_type_by_id(env->btf,
3262 								member_type_id);
3263 
3264 		if (btf_type_nosize_or_null(member_type) ||
3265 		    btf_type_is_resolve_source_only(member_type)) {
3266 			btf_verifier_log_member(env, v->t, member,
3267 						"Invalid member");
3268 			return -EINVAL;
3269 		}
3270 
3271 		if (!env_type_is_resolve_sink(env, member_type) &&
3272 		    !env_type_is_resolved(env, member_type_id)) {
3273 			env_stack_set_next_member(env, i + 1);
3274 			return env_stack_push(env, member_type, member_type_id);
3275 		}
3276 
3277 		if (btf_type_kflag(v->t))
3278 			err = btf_type_ops(member_type)->check_kflag_member(env, v->t,
3279 									    member,
3280 									    member_type);
3281 		else
3282 			err = btf_type_ops(member_type)->check_member(env, v->t,
3283 								      member,
3284 								      member_type);
3285 		if (err)
3286 			return err;
3287 	}
3288 
3289 	env_stack_pop_resolved(env, 0, 0);
3290 
3291 	return 0;
3292 }
3293 
btf_struct_log(struct btf_verifier_env * env,const struct btf_type * t)3294 static void btf_struct_log(struct btf_verifier_env *env,
3295 			   const struct btf_type *t)
3296 {
3297 	btf_verifier_log(env, "size=%u vlen=%u", t->size, btf_type_vlen(t));
3298 }
3299 
3300 enum {
3301 	BTF_FIELD_IGNORE = 0,
3302 	BTF_FIELD_FOUND  = 1,
3303 };
3304 
3305 struct btf_field_info {
3306 	enum btf_field_type type;
3307 	u32 off;
3308 	union {
3309 		struct {
3310 			u32 type_id;
3311 		} kptr;
3312 		struct {
3313 			const char *node_name;
3314 			u32 value_btf_id;
3315 		} graph_root;
3316 	};
3317 };
3318 
btf_find_struct(const struct btf * btf,const struct btf_type * t,u32 off,int sz,enum btf_field_type field_type,struct btf_field_info * info)3319 static int btf_find_struct(const struct btf *btf, const struct btf_type *t,
3320 			   u32 off, int sz, enum btf_field_type field_type,
3321 			   struct btf_field_info *info)
3322 {
3323 	if (!__btf_type_is_struct(t))
3324 		return BTF_FIELD_IGNORE;
3325 	if (t->size != sz)
3326 		return BTF_FIELD_IGNORE;
3327 	info->type = field_type;
3328 	info->off = off;
3329 	return BTF_FIELD_FOUND;
3330 }
3331 
btf_find_kptr(const struct btf * btf,const struct btf_type * t,u32 off,int sz,struct btf_field_info * info,u32 field_mask)3332 static int btf_find_kptr(const struct btf *btf, const struct btf_type *t,
3333 			 u32 off, int sz, struct btf_field_info *info, u32 field_mask)
3334 {
3335 	enum btf_field_type type;
3336 	const char *tag_value;
3337 	bool is_type_tag;
3338 	u32 res_id;
3339 
3340 	/* Permit modifiers on the pointer itself */
3341 	if (btf_type_is_volatile(t))
3342 		t = btf_type_by_id(btf, t->type);
3343 	/* For PTR, sz is always == 8 */
3344 	if (!btf_type_is_ptr(t))
3345 		return BTF_FIELD_IGNORE;
3346 	t = btf_type_by_id(btf, t->type);
3347 	is_type_tag = btf_type_is_type_tag(t) && !btf_type_kflag(t);
3348 	if (!is_type_tag)
3349 		return BTF_FIELD_IGNORE;
3350 	/* Reject extra tags */
3351 	if (btf_type_is_type_tag(btf_type_by_id(btf, t->type)))
3352 		return -EINVAL;
3353 	tag_value = __btf_name_by_offset(btf, t->name_off);
3354 	if (!strcmp("kptr_untrusted", tag_value))
3355 		type = BPF_KPTR_UNREF;
3356 	else if (!strcmp("kptr", tag_value))
3357 		type = BPF_KPTR_REF;
3358 	else if (!strcmp("percpu_kptr", tag_value))
3359 		type = BPF_KPTR_PERCPU;
3360 	else if (!strcmp("uptr", tag_value))
3361 		type = BPF_UPTR;
3362 	else
3363 		return -EINVAL;
3364 
3365 	if (!(type & field_mask))
3366 		return BTF_FIELD_IGNORE;
3367 
3368 	/* Get the base type */
3369 	t = btf_type_skip_modifiers(btf, t->type, &res_id);
3370 	/* Only pointer to struct is allowed */
3371 	if (!__btf_type_is_struct(t))
3372 		return -EINVAL;
3373 
3374 	info->type = type;
3375 	info->off = off;
3376 	info->kptr.type_id = res_id;
3377 	return BTF_FIELD_FOUND;
3378 }
3379 
btf_find_next_decl_tag(const struct btf * btf,const struct btf_type * pt,int comp_idx,const char * tag_key,int last_id)3380 int btf_find_next_decl_tag(const struct btf *btf, const struct btf_type *pt,
3381 			   int comp_idx, const char *tag_key, int last_id)
3382 {
3383 	int len = strlen(tag_key);
3384 	int i, n;
3385 
3386 	for (i = last_id + 1, n = btf_nr_types(btf); i < n; i++) {
3387 		const struct btf_type *t = btf_type_by_id(btf, i);
3388 
3389 		if (!btf_type_is_decl_tag(t))
3390 			continue;
3391 		if (pt != btf_type_by_id(btf, t->type))
3392 			continue;
3393 		if (btf_type_decl_tag(t)->component_idx != comp_idx)
3394 			continue;
3395 		if (strncmp(__btf_name_by_offset(btf, t->name_off), tag_key, len))
3396 			continue;
3397 		return i;
3398 	}
3399 	return -ENOENT;
3400 }
3401 
btf_find_decl_tag_value(const struct btf * btf,const struct btf_type * pt,int comp_idx,const char * tag_key)3402 const char *btf_find_decl_tag_value(const struct btf *btf, const struct btf_type *pt,
3403 				    int comp_idx, const char *tag_key)
3404 {
3405 	const char *value = NULL;
3406 	const struct btf_type *t;
3407 	int len, id;
3408 
3409 	id = btf_find_next_decl_tag(btf, pt, comp_idx, tag_key, 0);
3410 	if (id < 0)
3411 		return ERR_PTR(id);
3412 
3413 	t = btf_type_by_id(btf, id);
3414 	len = strlen(tag_key);
3415 	value = __btf_name_by_offset(btf, t->name_off) + len;
3416 
3417 	/* Prevent duplicate entries for same type */
3418 	id = btf_find_next_decl_tag(btf, pt, comp_idx, tag_key, id);
3419 	if (id >= 0)
3420 		return ERR_PTR(-EEXIST);
3421 
3422 	return value;
3423 }
3424 
3425 static int
btf_find_graph_root(const struct btf * btf,const struct btf_type * pt,const struct btf_type * t,int comp_idx,u32 off,int sz,struct btf_field_info * info,enum btf_field_type head_type)3426 btf_find_graph_root(const struct btf *btf, const struct btf_type *pt,
3427 		    const struct btf_type *t, int comp_idx, u32 off,
3428 		    int sz, struct btf_field_info *info,
3429 		    enum btf_field_type head_type)
3430 {
3431 	const char *node_field_name;
3432 	const char *value_type;
3433 	s32 id;
3434 
3435 	if (!__btf_type_is_struct(t))
3436 		return BTF_FIELD_IGNORE;
3437 	if (t->size != sz)
3438 		return BTF_FIELD_IGNORE;
3439 	value_type = btf_find_decl_tag_value(btf, pt, comp_idx, "contains:");
3440 	if (IS_ERR(value_type))
3441 		return -EINVAL;
3442 	node_field_name = strstr(value_type, ":");
3443 	if (!node_field_name)
3444 		return -EINVAL;
3445 	value_type = kstrndup(value_type, node_field_name - value_type, GFP_KERNEL | __GFP_NOWARN);
3446 	if (!value_type)
3447 		return -ENOMEM;
3448 	id = btf_find_by_name_kind(btf, value_type, BTF_KIND_STRUCT);
3449 	kfree(value_type);
3450 	if (id < 0)
3451 		return id;
3452 	node_field_name++;
3453 	if (str_is_empty(node_field_name))
3454 		return -EINVAL;
3455 	info->type = head_type;
3456 	info->off = off;
3457 	info->graph_root.value_btf_id = id;
3458 	info->graph_root.node_name = node_field_name;
3459 	return BTF_FIELD_FOUND;
3460 }
3461 
3462 #define field_mask_test_name(field_type, field_type_str) \
3463 	if (field_mask & field_type && !strcmp(name, field_type_str)) { \
3464 		type = field_type;					\
3465 		goto end;						\
3466 	}
3467 
btf_get_field_type(const struct btf * btf,const struct btf_type * var_type,u32 field_mask,u32 * seen_mask,int * align,int * sz)3468 static int btf_get_field_type(const struct btf *btf, const struct btf_type *var_type,
3469 			      u32 field_mask, u32 *seen_mask,
3470 			      int *align, int *sz)
3471 {
3472 	int type = 0;
3473 	const char *name = __btf_name_by_offset(btf, var_type->name_off);
3474 
3475 	if (field_mask & BPF_SPIN_LOCK) {
3476 		if (!strcmp(name, "bpf_spin_lock")) {
3477 			if (*seen_mask & BPF_SPIN_LOCK)
3478 				return -E2BIG;
3479 			*seen_mask |= BPF_SPIN_LOCK;
3480 			type = BPF_SPIN_LOCK;
3481 			goto end;
3482 		}
3483 	}
3484 	if (field_mask & BPF_RES_SPIN_LOCK) {
3485 		if (!strcmp(name, "bpf_res_spin_lock")) {
3486 			if (*seen_mask & BPF_RES_SPIN_LOCK)
3487 				return -E2BIG;
3488 			*seen_mask |= BPF_RES_SPIN_LOCK;
3489 			type = BPF_RES_SPIN_LOCK;
3490 			goto end;
3491 		}
3492 	}
3493 	if (field_mask & BPF_TIMER) {
3494 		if (!strcmp(name, "bpf_timer")) {
3495 			if (*seen_mask & BPF_TIMER)
3496 				return -E2BIG;
3497 			*seen_mask |= BPF_TIMER;
3498 			type = BPF_TIMER;
3499 			goto end;
3500 		}
3501 	}
3502 	if (field_mask & BPF_WORKQUEUE) {
3503 		if (!strcmp(name, "bpf_wq")) {
3504 			if (*seen_mask & BPF_WORKQUEUE)
3505 				return -E2BIG;
3506 			*seen_mask |= BPF_WORKQUEUE;
3507 			type = BPF_WORKQUEUE;
3508 			goto end;
3509 		}
3510 	}
3511 	field_mask_test_name(BPF_LIST_HEAD, "bpf_list_head");
3512 	field_mask_test_name(BPF_LIST_NODE, "bpf_list_node");
3513 	field_mask_test_name(BPF_RB_ROOT,   "bpf_rb_root");
3514 	field_mask_test_name(BPF_RB_NODE,   "bpf_rb_node");
3515 	field_mask_test_name(BPF_REFCOUNT,  "bpf_refcount");
3516 
3517 	/* Only return BPF_KPTR when all other types with matchable names fail */
3518 	if (field_mask & (BPF_KPTR | BPF_UPTR) && !__btf_type_is_struct(var_type)) {
3519 		type = BPF_KPTR_REF;
3520 		goto end;
3521 	}
3522 	return 0;
3523 end:
3524 	*sz = btf_field_type_size(type);
3525 	*align = btf_field_type_align(type);
3526 	return type;
3527 }
3528 
3529 #undef field_mask_test_name
3530 
3531 /* Repeat a number of fields for a specified number of times.
3532  *
3533  * Copy the fields starting from the first field and repeat them for
3534  * repeat_cnt times. The fields are repeated by adding the offset of each
3535  * field with
3536  *   (i + 1) * elem_size
3537  * where i is the repeat index and elem_size is the size of an element.
3538  */
btf_repeat_fields(struct btf_field_info * info,int info_cnt,u32 field_cnt,u32 repeat_cnt,u32 elem_size)3539 static int btf_repeat_fields(struct btf_field_info *info, int info_cnt,
3540 			     u32 field_cnt, u32 repeat_cnt, u32 elem_size)
3541 {
3542 	u32 i, j;
3543 	u32 cur;
3544 
3545 	/* Ensure not repeating fields that should not be repeated. */
3546 	for (i = 0; i < field_cnt; i++) {
3547 		switch (info[i].type) {
3548 		case BPF_KPTR_UNREF:
3549 		case BPF_KPTR_REF:
3550 		case BPF_KPTR_PERCPU:
3551 		case BPF_UPTR:
3552 		case BPF_LIST_HEAD:
3553 		case BPF_RB_ROOT:
3554 			break;
3555 		default:
3556 			return -EINVAL;
3557 		}
3558 	}
3559 
3560 	/* The type of struct size or variable size is u32,
3561 	 * so the multiplication will not overflow.
3562 	 */
3563 	if (field_cnt * (repeat_cnt + 1) > info_cnt)
3564 		return -E2BIG;
3565 
3566 	cur = field_cnt;
3567 	for (i = 0; i < repeat_cnt; i++) {
3568 		memcpy(&info[cur], &info[0], field_cnt * sizeof(info[0]));
3569 		for (j = 0; j < field_cnt; j++)
3570 			info[cur++].off += (i + 1) * elem_size;
3571 	}
3572 
3573 	return 0;
3574 }
3575 
3576 static int btf_find_struct_field(const struct btf *btf,
3577 				 const struct btf_type *t, u32 field_mask,
3578 				 struct btf_field_info *info, int info_cnt,
3579 				 u32 level);
3580 
3581 /* Find special fields in the struct type of a field.
3582  *
3583  * This function is used to find fields of special types that is not a
3584  * global variable or a direct field of a struct type. It also handles the
3585  * repetition if it is the element type of an array.
3586  */
btf_find_nested_struct(const struct btf * btf,const struct btf_type * t,u32 off,u32 nelems,u32 field_mask,struct btf_field_info * info,int info_cnt,u32 level)3587 static int btf_find_nested_struct(const struct btf *btf, const struct btf_type *t,
3588 				  u32 off, u32 nelems,
3589 				  u32 field_mask, struct btf_field_info *info,
3590 				  int info_cnt, u32 level)
3591 {
3592 	int ret, err, i;
3593 
3594 	level++;
3595 	if (level >= MAX_RESOLVE_DEPTH)
3596 		return -E2BIG;
3597 
3598 	ret = btf_find_struct_field(btf, t, field_mask, info, info_cnt, level);
3599 
3600 	if (ret <= 0)
3601 		return ret;
3602 
3603 	/* Shift the offsets of the nested struct fields to the offsets
3604 	 * related to the container.
3605 	 */
3606 	for (i = 0; i < ret; i++)
3607 		info[i].off += off;
3608 
3609 	if (nelems > 1) {
3610 		err = btf_repeat_fields(info, info_cnt, ret, nelems - 1, t->size);
3611 		if (err == 0)
3612 			ret *= nelems;
3613 		else
3614 			ret = err;
3615 	}
3616 
3617 	return ret;
3618 }
3619 
btf_find_field_one(const struct btf * btf,const struct btf_type * var,const struct btf_type * var_type,int var_idx,u32 off,u32 expected_size,u32 field_mask,u32 * seen_mask,struct btf_field_info * info,int info_cnt,u32 level)3620 static int btf_find_field_one(const struct btf *btf,
3621 			      const struct btf_type *var,
3622 			      const struct btf_type *var_type,
3623 			      int var_idx,
3624 			      u32 off, u32 expected_size,
3625 			      u32 field_mask, u32 *seen_mask,
3626 			      struct btf_field_info *info, int info_cnt,
3627 			      u32 level)
3628 {
3629 	int ret, align, sz, field_type;
3630 	struct btf_field_info tmp;
3631 	const struct btf_array *array;
3632 	u32 i, nelems = 1;
3633 
3634 	/* Walk into array types to find the element type and the number of
3635 	 * elements in the (flattened) array.
3636 	 */
3637 	for (i = 0; i < MAX_RESOLVE_DEPTH && btf_type_is_array(var_type); i++) {
3638 		array = btf_array(var_type);
3639 		nelems *= array->nelems;
3640 		var_type = btf_type_by_id(btf, array->type);
3641 	}
3642 	if (i == MAX_RESOLVE_DEPTH)
3643 		return -E2BIG;
3644 	if (nelems == 0)
3645 		return 0;
3646 
3647 	field_type = btf_get_field_type(btf, var_type,
3648 					field_mask, seen_mask, &align, &sz);
3649 	/* Look into variables of struct types */
3650 	if (!field_type && __btf_type_is_struct(var_type)) {
3651 		sz = var_type->size;
3652 		if (expected_size && expected_size != sz * nelems)
3653 			return 0;
3654 		ret = btf_find_nested_struct(btf, var_type, off, nelems, field_mask,
3655 					     &info[0], info_cnt, level);
3656 		return ret;
3657 	}
3658 
3659 	if (field_type == 0)
3660 		return 0;
3661 	if (field_type < 0)
3662 		return field_type;
3663 
3664 	if (expected_size && expected_size != sz * nelems)
3665 		return 0;
3666 	if (off % align)
3667 		return 0;
3668 
3669 	switch (field_type) {
3670 	case BPF_SPIN_LOCK:
3671 	case BPF_RES_SPIN_LOCK:
3672 	case BPF_TIMER:
3673 	case BPF_WORKQUEUE:
3674 	case BPF_LIST_NODE:
3675 	case BPF_RB_NODE:
3676 	case BPF_REFCOUNT:
3677 		ret = btf_find_struct(btf, var_type, off, sz, field_type,
3678 				      info_cnt ? &info[0] : &tmp);
3679 		if (ret < 0)
3680 			return ret;
3681 		break;
3682 	case BPF_KPTR_UNREF:
3683 	case BPF_KPTR_REF:
3684 	case BPF_KPTR_PERCPU:
3685 	case BPF_UPTR:
3686 		ret = btf_find_kptr(btf, var_type, off, sz,
3687 				    info_cnt ? &info[0] : &tmp, field_mask);
3688 		if (ret < 0)
3689 			return ret;
3690 		break;
3691 	case BPF_LIST_HEAD:
3692 	case BPF_RB_ROOT:
3693 		ret = btf_find_graph_root(btf, var, var_type,
3694 					  var_idx, off, sz,
3695 					  info_cnt ? &info[0] : &tmp,
3696 					  field_type);
3697 		if (ret < 0)
3698 			return ret;
3699 		break;
3700 	default:
3701 		return -EFAULT;
3702 	}
3703 
3704 	if (ret == BTF_FIELD_IGNORE)
3705 		return 0;
3706 	if (!info_cnt)
3707 		return -E2BIG;
3708 	if (nelems > 1) {
3709 		ret = btf_repeat_fields(info, info_cnt, 1, nelems - 1, sz);
3710 		if (ret < 0)
3711 			return ret;
3712 	}
3713 	return nelems;
3714 }
3715 
btf_find_struct_field(const struct btf * btf,const struct btf_type * t,u32 field_mask,struct btf_field_info * info,int info_cnt,u32 level)3716 static int btf_find_struct_field(const struct btf *btf,
3717 				 const struct btf_type *t, u32 field_mask,
3718 				 struct btf_field_info *info, int info_cnt,
3719 				 u32 level)
3720 {
3721 	int ret, idx = 0;
3722 	const struct btf_member *member;
3723 	u32 i, off, seen_mask = 0;
3724 
3725 	for_each_member(i, t, member) {
3726 		const struct btf_type *member_type = btf_type_by_id(btf,
3727 								    member->type);
3728 
3729 		off = __btf_member_bit_offset(t, member);
3730 		if (off % 8)
3731 			/* valid C code cannot generate such BTF */
3732 			return -EINVAL;
3733 		off /= 8;
3734 
3735 		ret = btf_find_field_one(btf, t, member_type, i,
3736 					 off, 0,
3737 					 field_mask, &seen_mask,
3738 					 &info[idx], info_cnt - idx, level);
3739 		if (ret < 0)
3740 			return ret;
3741 		idx += ret;
3742 	}
3743 	return idx;
3744 }
3745 
btf_find_datasec_var(const struct btf * btf,const struct btf_type * t,u32 field_mask,struct btf_field_info * info,int info_cnt,u32 level)3746 static int btf_find_datasec_var(const struct btf *btf, const struct btf_type *t,
3747 				u32 field_mask, struct btf_field_info *info,
3748 				int info_cnt, u32 level)
3749 {
3750 	int ret, idx = 0;
3751 	const struct btf_var_secinfo *vsi;
3752 	u32 i, off, seen_mask = 0;
3753 
3754 	for_each_vsi(i, t, vsi) {
3755 		const struct btf_type *var = btf_type_by_id(btf, vsi->type);
3756 		const struct btf_type *var_type = btf_type_by_id(btf, var->type);
3757 
3758 		off = vsi->offset;
3759 		ret = btf_find_field_one(btf, var, var_type, -1, off, vsi->size,
3760 					 field_mask, &seen_mask,
3761 					 &info[idx], info_cnt - idx,
3762 					 level);
3763 		if (ret < 0)
3764 			return ret;
3765 		idx += ret;
3766 	}
3767 	return idx;
3768 }
3769 
btf_find_field(const struct btf * btf,const struct btf_type * t,u32 field_mask,struct btf_field_info * info,int info_cnt)3770 static int btf_find_field(const struct btf *btf, const struct btf_type *t,
3771 			  u32 field_mask, struct btf_field_info *info,
3772 			  int info_cnt)
3773 {
3774 	if (__btf_type_is_struct(t))
3775 		return btf_find_struct_field(btf, t, field_mask, info, info_cnt, 0);
3776 	else if (btf_type_is_datasec(t))
3777 		return btf_find_datasec_var(btf, t, field_mask, info, info_cnt, 0);
3778 	return -EINVAL;
3779 }
3780 
3781 /* Callers have to ensure the life cycle of btf if it is program BTF */
btf_parse_kptr(const struct btf * btf,struct btf_field * field,struct btf_field_info * info)3782 static int btf_parse_kptr(const struct btf *btf, struct btf_field *field,
3783 			  struct btf_field_info *info)
3784 {
3785 	struct module *mod = NULL;
3786 	const struct btf_type *t;
3787 	/* If a matching btf type is found in kernel or module BTFs, kptr_ref
3788 	 * is that BTF, otherwise it's program BTF
3789 	 */
3790 	struct btf *kptr_btf;
3791 	int ret;
3792 	s32 id;
3793 
3794 	/* Find type in map BTF, and use it to look up the matching type
3795 	 * in vmlinux or module BTFs, by name and kind.
3796 	 */
3797 	t = btf_type_by_id(btf, info->kptr.type_id);
3798 	id = bpf_find_btf_id(__btf_name_by_offset(btf, t->name_off), BTF_INFO_KIND(t->info),
3799 			     &kptr_btf);
3800 	if (id == -ENOENT) {
3801 		/* btf_parse_kptr should only be called w/ btf = program BTF */
3802 		WARN_ON_ONCE(btf_is_kernel(btf));
3803 
3804 		/* Type exists only in program BTF. Assume that it's a MEM_ALLOC
3805 		 * kptr allocated via bpf_obj_new
3806 		 */
3807 		field->kptr.dtor = NULL;
3808 		id = info->kptr.type_id;
3809 		kptr_btf = (struct btf *)btf;
3810 		goto found_dtor;
3811 	}
3812 	if (id < 0)
3813 		return id;
3814 
3815 	/* Find and stash the function pointer for the destruction function that
3816 	 * needs to be eventually invoked from the map free path.
3817 	 */
3818 	if (info->type == BPF_KPTR_REF) {
3819 		const struct btf_type *dtor_func;
3820 		const char *dtor_func_name;
3821 		unsigned long addr;
3822 		s32 dtor_btf_id;
3823 
3824 		/* This call also serves as a whitelist of allowed objects that
3825 		 * can be used as a referenced pointer and be stored in a map at
3826 		 * the same time.
3827 		 */
3828 		dtor_btf_id = btf_find_dtor_kfunc(kptr_btf, id);
3829 		if (dtor_btf_id < 0) {
3830 			ret = dtor_btf_id;
3831 			goto end_btf;
3832 		}
3833 
3834 		dtor_func = btf_type_by_id(kptr_btf, dtor_btf_id);
3835 		if (!dtor_func) {
3836 			ret = -ENOENT;
3837 			goto end_btf;
3838 		}
3839 
3840 		if (btf_is_module(kptr_btf)) {
3841 			mod = btf_try_get_module(kptr_btf);
3842 			if (!mod) {
3843 				ret = -ENXIO;
3844 				goto end_btf;
3845 			}
3846 		}
3847 
3848 		/* We already verified dtor_func to be btf_type_is_func
3849 		 * in register_btf_id_dtor_kfuncs.
3850 		 */
3851 		dtor_func_name = __btf_name_by_offset(kptr_btf, dtor_func->name_off);
3852 		addr = kallsyms_lookup_name(dtor_func_name);
3853 		if (!addr) {
3854 			ret = -EINVAL;
3855 			goto end_mod;
3856 		}
3857 		field->kptr.dtor = (void *)addr;
3858 	}
3859 
3860 found_dtor:
3861 	field->kptr.btf_id = id;
3862 	field->kptr.btf = kptr_btf;
3863 	field->kptr.module = mod;
3864 	return 0;
3865 end_mod:
3866 	module_put(mod);
3867 end_btf:
3868 	btf_put(kptr_btf);
3869 	return ret;
3870 }
3871 
btf_parse_graph_root(const struct btf * btf,struct btf_field * field,struct btf_field_info * info,const char * node_type_name,size_t node_type_align)3872 static int btf_parse_graph_root(const struct btf *btf,
3873 				struct btf_field *field,
3874 				struct btf_field_info *info,
3875 				const char *node_type_name,
3876 				size_t node_type_align)
3877 {
3878 	const struct btf_type *t, *n = NULL;
3879 	const struct btf_member *member;
3880 	u32 offset;
3881 	int i;
3882 
3883 	t = btf_type_by_id(btf, info->graph_root.value_btf_id);
3884 	/* We've already checked that value_btf_id is a struct type. We
3885 	 * just need to figure out the offset of the list_node, and
3886 	 * verify its type.
3887 	 */
3888 	for_each_member(i, t, member) {
3889 		if (strcmp(info->graph_root.node_name,
3890 			   __btf_name_by_offset(btf, member->name_off)))
3891 			continue;
3892 		/* Invalid BTF, two members with same name */
3893 		if (n)
3894 			return -EINVAL;
3895 		n = btf_type_by_id(btf, member->type);
3896 		if (!__btf_type_is_struct(n))
3897 			return -EINVAL;
3898 		if (strcmp(node_type_name, __btf_name_by_offset(btf, n->name_off)))
3899 			return -EINVAL;
3900 		offset = __btf_member_bit_offset(n, member);
3901 		if (offset % 8)
3902 			return -EINVAL;
3903 		offset /= 8;
3904 		if (offset % node_type_align)
3905 			return -EINVAL;
3906 
3907 		field->graph_root.btf = (struct btf *)btf;
3908 		field->graph_root.value_btf_id = info->graph_root.value_btf_id;
3909 		field->graph_root.node_offset = offset;
3910 	}
3911 	if (!n)
3912 		return -ENOENT;
3913 	return 0;
3914 }
3915 
btf_parse_list_head(const struct btf * btf,struct btf_field * field,struct btf_field_info * info)3916 static int btf_parse_list_head(const struct btf *btf, struct btf_field *field,
3917 			       struct btf_field_info *info)
3918 {
3919 	return btf_parse_graph_root(btf, field, info, "bpf_list_node",
3920 					    __alignof__(struct bpf_list_node));
3921 }
3922 
btf_parse_rb_root(const struct btf * btf,struct btf_field * field,struct btf_field_info * info)3923 static int btf_parse_rb_root(const struct btf *btf, struct btf_field *field,
3924 			     struct btf_field_info *info)
3925 {
3926 	return btf_parse_graph_root(btf, field, info, "bpf_rb_node",
3927 					    __alignof__(struct bpf_rb_node));
3928 }
3929 
btf_field_cmp(const void * _a,const void * _b,const void * priv)3930 static int btf_field_cmp(const void *_a, const void *_b, const void *priv)
3931 {
3932 	const struct btf_field *a = (const struct btf_field *)_a;
3933 	const struct btf_field *b = (const struct btf_field *)_b;
3934 
3935 	if (a->offset < b->offset)
3936 		return -1;
3937 	else if (a->offset > b->offset)
3938 		return 1;
3939 	return 0;
3940 }
3941 
btf_parse_fields(const struct btf * btf,const struct btf_type * t,u32 field_mask,u32 value_size)3942 struct btf_record *btf_parse_fields(const struct btf *btf, const struct btf_type *t,
3943 				    u32 field_mask, u32 value_size)
3944 {
3945 	struct btf_field_info info_arr[BTF_FIELDS_MAX];
3946 	u32 next_off = 0, field_type_size;
3947 	struct btf_record *rec;
3948 	int ret, i, cnt;
3949 
3950 	ret = btf_find_field(btf, t, field_mask, info_arr, ARRAY_SIZE(info_arr));
3951 	if (ret < 0)
3952 		return ERR_PTR(ret);
3953 	if (!ret)
3954 		return NULL;
3955 
3956 	cnt = ret;
3957 	/* This needs to be kzalloc to zero out padding and unused fields, see
3958 	 * comment in btf_record_equal.
3959 	 */
3960 	rec = kzalloc(offsetof(struct btf_record, fields[cnt]), GFP_KERNEL | __GFP_NOWARN);
3961 	if (!rec)
3962 		return ERR_PTR(-ENOMEM);
3963 
3964 	rec->spin_lock_off = -EINVAL;
3965 	rec->res_spin_lock_off = -EINVAL;
3966 	rec->timer_off = -EINVAL;
3967 	rec->wq_off = -EINVAL;
3968 	rec->refcount_off = -EINVAL;
3969 	for (i = 0; i < cnt; i++) {
3970 		field_type_size = btf_field_type_size(info_arr[i].type);
3971 		if (info_arr[i].off + field_type_size > value_size) {
3972 			WARN_ONCE(1, "verifier bug off %d size %d", info_arr[i].off, value_size);
3973 			ret = -EFAULT;
3974 			goto end;
3975 		}
3976 		if (info_arr[i].off < next_off) {
3977 			ret = -EEXIST;
3978 			goto end;
3979 		}
3980 		next_off = info_arr[i].off + field_type_size;
3981 
3982 		rec->field_mask |= info_arr[i].type;
3983 		rec->fields[i].offset = info_arr[i].off;
3984 		rec->fields[i].type = info_arr[i].type;
3985 		rec->fields[i].size = field_type_size;
3986 
3987 		switch (info_arr[i].type) {
3988 		case BPF_SPIN_LOCK:
3989 			WARN_ON_ONCE(rec->spin_lock_off >= 0);
3990 			/* Cache offset for faster lookup at runtime */
3991 			rec->spin_lock_off = rec->fields[i].offset;
3992 			break;
3993 		case BPF_RES_SPIN_LOCK:
3994 			WARN_ON_ONCE(rec->spin_lock_off >= 0);
3995 			/* Cache offset for faster lookup at runtime */
3996 			rec->res_spin_lock_off = rec->fields[i].offset;
3997 			break;
3998 		case BPF_TIMER:
3999 			WARN_ON_ONCE(rec->timer_off >= 0);
4000 			/* Cache offset for faster lookup at runtime */
4001 			rec->timer_off = rec->fields[i].offset;
4002 			break;
4003 		case BPF_WORKQUEUE:
4004 			WARN_ON_ONCE(rec->wq_off >= 0);
4005 			/* Cache offset for faster lookup at runtime */
4006 			rec->wq_off = rec->fields[i].offset;
4007 			break;
4008 		case BPF_REFCOUNT:
4009 			WARN_ON_ONCE(rec->refcount_off >= 0);
4010 			/* Cache offset for faster lookup at runtime */
4011 			rec->refcount_off = rec->fields[i].offset;
4012 			break;
4013 		case BPF_KPTR_UNREF:
4014 		case BPF_KPTR_REF:
4015 		case BPF_KPTR_PERCPU:
4016 		case BPF_UPTR:
4017 			ret = btf_parse_kptr(btf, &rec->fields[i], &info_arr[i]);
4018 			if (ret < 0)
4019 				goto end;
4020 			break;
4021 		case BPF_LIST_HEAD:
4022 			ret = btf_parse_list_head(btf, &rec->fields[i], &info_arr[i]);
4023 			if (ret < 0)
4024 				goto end;
4025 			break;
4026 		case BPF_RB_ROOT:
4027 			ret = btf_parse_rb_root(btf, &rec->fields[i], &info_arr[i]);
4028 			if (ret < 0)
4029 				goto end;
4030 			break;
4031 		case BPF_LIST_NODE:
4032 		case BPF_RB_NODE:
4033 			break;
4034 		default:
4035 			ret = -EFAULT;
4036 			goto end;
4037 		}
4038 		rec->cnt++;
4039 	}
4040 
4041 	if (rec->spin_lock_off >= 0 && rec->res_spin_lock_off >= 0) {
4042 		ret = -EINVAL;
4043 		goto end;
4044 	}
4045 
4046 	/* bpf_{list_head, rb_node} require bpf_spin_lock */
4047 	if ((btf_record_has_field(rec, BPF_LIST_HEAD) ||
4048 	     btf_record_has_field(rec, BPF_RB_ROOT)) &&
4049 		 (rec->spin_lock_off < 0 && rec->res_spin_lock_off < 0)) {
4050 		ret = -EINVAL;
4051 		goto end;
4052 	}
4053 
4054 	if (rec->refcount_off < 0 &&
4055 	    btf_record_has_field(rec, BPF_LIST_NODE) &&
4056 	    btf_record_has_field(rec, BPF_RB_NODE)) {
4057 		ret = -EINVAL;
4058 		goto end;
4059 	}
4060 
4061 	sort_r(rec->fields, rec->cnt, sizeof(struct btf_field), btf_field_cmp,
4062 	       NULL, rec);
4063 
4064 	return rec;
4065 end:
4066 	btf_record_free(rec);
4067 	return ERR_PTR(ret);
4068 }
4069 
btf_check_and_fixup_fields(const struct btf * btf,struct btf_record * rec)4070 int btf_check_and_fixup_fields(const struct btf *btf, struct btf_record *rec)
4071 {
4072 	int i;
4073 
4074 	/* There are three types that signify ownership of some other type:
4075 	 *  kptr_ref, bpf_list_head, bpf_rb_root.
4076 	 * kptr_ref only supports storing kernel types, which can't store
4077 	 * references to program allocated local types.
4078 	 *
4079 	 * Hence we only need to ensure that bpf_{list_head,rb_root} ownership
4080 	 * does not form cycles.
4081 	 */
4082 	if (IS_ERR_OR_NULL(rec) || !(rec->field_mask & (BPF_GRAPH_ROOT | BPF_UPTR)))
4083 		return 0;
4084 	for (i = 0; i < rec->cnt; i++) {
4085 		struct btf_struct_meta *meta;
4086 		const struct btf_type *t;
4087 		u32 btf_id;
4088 
4089 		if (rec->fields[i].type == BPF_UPTR) {
4090 			/* The uptr only supports pinning one page and cannot
4091 			 * point to a kernel struct
4092 			 */
4093 			if (btf_is_kernel(rec->fields[i].kptr.btf))
4094 				return -EINVAL;
4095 			t = btf_type_by_id(rec->fields[i].kptr.btf,
4096 					   rec->fields[i].kptr.btf_id);
4097 			if (!t->size)
4098 				return -EINVAL;
4099 			if (t->size > PAGE_SIZE)
4100 				return -E2BIG;
4101 			continue;
4102 		}
4103 
4104 		if (!(rec->fields[i].type & BPF_GRAPH_ROOT))
4105 			continue;
4106 		btf_id = rec->fields[i].graph_root.value_btf_id;
4107 		meta = btf_find_struct_meta(btf, btf_id);
4108 		if (!meta)
4109 			return -EFAULT;
4110 		rec->fields[i].graph_root.value_rec = meta->record;
4111 
4112 		/* We need to set value_rec for all root types, but no need
4113 		 * to check ownership cycle for a type unless it's also a
4114 		 * node type.
4115 		 */
4116 		if (!(rec->field_mask & BPF_GRAPH_NODE))
4117 			continue;
4118 
4119 		/* We need to ensure ownership acyclicity among all types. The
4120 		 * proper way to do it would be to topologically sort all BTF
4121 		 * IDs based on the ownership edges, since there can be multiple
4122 		 * bpf_{list_head,rb_node} in a type. Instead, we use the
4123 		 * following resaoning:
4124 		 *
4125 		 * - A type can only be owned by another type in user BTF if it
4126 		 *   has a bpf_{list,rb}_node. Let's call these node types.
4127 		 * - A type can only _own_ another type in user BTF if it has a
4128 		 *   bpf_{list_head,rb_root}. Let's call these root types.
4129 		 *
4130 		 * We ensure that if a type is both a root and node, its
4131 		 * element types cannot be root types.
4132 		 *
4133 		 * To ensure acyclicity:
4134 		 *
4135 		 * When A is an root type but not a node, its ownership
4136 		 * chain can be:
4137 		 *	A -> B -> C
4138 		 * Where:
4139 		 * - A is an root, e.g. has bpf_rb_root.
4140 		 * - B is both a root and node, e.g. has bpf_rb_node and
4141 		 *   bpf_list_head.
4142 		 * - C is only an root, e.g. has bpf_list_node
4143 		 *
4144 		 * When A is both a root and node, some other type already
4145 		 * owns it in the BTF domain, hence it can not own
4146 		 * another root type through any of the ownership edges.
4147 		 *	A -> B
4148 		 * Where:
4149 		 * - A is both an root and node.
4150 		 * - B is only an node.
4151 		 */
4152 		if (meta->record->field_mask & BPF_GRAPH_ROOT)
4153 			return -ELOOP;
4154 	}
4155 	return 0;
4156 }
4157 
__btf_struct_show(const struct btf * btf,const struct btf_type * t,u32 type_id,void * data,u8 bits_offset,struct btf_show * show)4158 static void __btf_struct_show(const struct btf *btf, const struct btf_type *t,
4159 			      u32 type_id, void *data, u8 bits_offset,
4160 			      struct btf_show *show)
4161 {
4162 	const struct btf_member *member;
4163 	void *safe_data;
4164 	u32 i;
4165 
4166 	safe_data = btf_show_start_struct_type(show, t, type_id, data);
4167 	if (!safe_data)
4168 		return;
4169 
4170 	for_each_member(i, t, member) {
4171 		const struct btf_type *member_type = btf_type_by_id(btf,
4172 								member->type);
4173 		const struct btf_kind_operations *ops;
4174 		u32 member_offset, bitfield_size;
4175 		u32 bytes_offset;
4176 		u8 bits8_offset;
4177 
4178 		btf_show_start_member(show, member);
4179 
4180 		member_offset = __btf_member_bit_offset(t, member);
4181 		bitfield_size = __btf_member_bitfield_size(t, member);
4182 		bytes_offset = BITS_ROUNDDOWN_BYTES(member_offset);
4183 		bits8_offset = BITS_PER_BYTE_MASKED(member_offset);
4184 		if (bitfield_size) {
4185 			safe_data = btf_show_start_type(show, member_type,
4186 							member->type,
4187 							data + bytes_offset);
4188 			if (safe_data)
4189 				btf_bitfield_show(safe_data,
4190 						  bits8_offset,
4191 						  bitfield_size, show);
4192 			btf_show_end_type(show);
4193 		} else {
4194 			ops = btf_type_ops(member_type);
4195 			ops->show(btf, member_type, member->type,
4196 				  data + bytes_offset, bits8_offset, show);
4197 		}
4198 
4199 		btf_show_end_member(show);
4200 	}
4201 
4202 	btf_show_end_struct_type(show);
4203 }
4204 
btf_struct_show(const struct btf * btf,const struct btf_type * t,u32 type_id,void * data,u8 bits_offset,struct btf_show * show)4205 static void btf_struct_show(const struct btf *btf, const struct btf_type *t,
4206 			    u32 type_id, void *data, u8 bits_offset,
4207 			    struct btf_show *show)
4208 {
4209 	const struct btf_member *m = show->state.member;
4210 
4211 	/*
4212 	 * First check if any members would be shown (are non-zero).
4213 	 * See comments above "struct btf_show" definition for more
4214 	 * details on how this works at a high-level.
4215 	 */
4216 	if (show->state.depth > 0 && !(show->flags & BTF_SHOW_ZERO)) {
4217 		if (!show->state.depth_check) {
4218 			show->state.depth_check = show->state.depth + 1;
4219 			show->state.depth_to_show = 0;
4220 		}
4221 		__btf_struct_show(btf, t, type_id, data, bits_offset, show);
4222 		/* Restore saved member data here */
4223 		show->state.member = m;
4224 		if (show->state.depth_check != show->state.depth + 1)
4225 			return;
4226 		show->state.depth_check = 0;
4227 
4228 		if (show->state.depth_to_show <= show->state.depth)
4229 			return;
4230 		/*
4231 		 * Reaching here indicates we have recursed and found
4232 		 * non-zero child values.
4233 		 */
4234 	}
4235 
4236 	__btf_struct_show(btf, t, type_id, data, bits_offset, show);
4237 }
4238 
4239 static const struct btf_kind_operations struct_ops = {
4240 	.check_meta = btf_struct_check_meta,
4241 	.resolve = btf_struct_resolve,
4242 	.check_member = btf_struct_check_member,
4243 	.check_kflag_member = btf_generic_check_kflag_member,
4244 	.log_details = btf_struct_log,
4245 	.show = btf_struct_show,
4246 };
4247 
btf_enum_check_member(struct btf_verifier_env * env,const struct btf_type * struct_type,const struct btf_member * member,const struct btf_type * member_type)4248 static int btf_enum_check_member(struct btf_verifier_env *env,
4249 				 const struct btf_type *struct_type,
4250 				 const struct btf_member *member,
4251 				 const struct btf_type *member_type)
4252 {
4253 	u32 struct_bits_off = member->offset;
4254 	u32 struct_size, bytes_offset;
4255 
4256 	if (BITS_PER_BYTE_MASKED(struct_bits_off)) {
4257 		btf_verifier_log_member(env, struct_type, member,
4258 					"Member is not byte aligned");
4259 		return -EINVAL;
4260 	}
4261 
4262 	struct_size = struct_type->size;
4263 	bytes_offset = BITS_ROUNDDOWN_BYTES(struct_bits_off);
4264 	if (struct_size - bytes_offset < member_type->size) {
4265 		btf_verifier_log_member(env, struct_type, member,
4266 					"Member exceeds struct_size");
4267 		return -EINVAL;
4268 	}
4269 
4270 	return 0;
4271 }
4272 
btf_enum_check_kflag_member(struct btf_verifier_env * env,const struct btf_type * struct_type,const struct btf_member * member,const struct btf_type * member_type)4273 static int btf_enum_check_kflag_member(struct btf_verifier_env *env,
4274 				       const struct btf_type *struct_type,
4275 				       const struct btf_member *member,
4276 				       const struct btf_type *member_type)
4277 {
4278 	u32 struct_bits_off, nr_bits, bytes_end, struct_size;
4279 	u32 int_bitsize = sizeof(int) * BITS_PER_BYTE;
4280 
4281 	struct_bits_off = BTF_MEMBER_BIT_OFFSET(member->offset);
4282 	nr_bits = BTF_MEMBER_BITFIELD_SIZE(member->offset);
4283 	if (!nr_bits) {
4284 		if (BITS_PER_BYTE_MASKED(struct_bits_off)) {
4285 			btf_verifier_log_member(env, struct_type, member,
4286 						"Member is not byte aligned");
4287 			return -EINVAL;
4288 		}
4289 
4290 		nr_bits = int_bitsize;
4291 	} else if (nr_bits > int_bitsize) {
4292 		btf_verifier_log_member(env, struct_type, member,
4293 					"Invalid member bitfield_size");
4294 		return -EINVAL;
4295 	}
4296 
4297 	struct_size = struct_type->size;
4298 	bytes_end = BITS_ROUNDUP_BYTES(struct_bits_off + nr_bits);
4299 	if (struct_size < bytes_end) {
4300 		btf_verifier_log_member(env, struct_type, member,
4301 					"Member exceeds struct_size");
4302 		return -EINVAL;
4303 	}
4304 
4305 	return 0;
4306 }
4307 
btf_enum_check_meta(struct btf_verifier_env * env,const struct btf_type * t,u32 meta_left)4308 static s32 btf_enum_check_meta(struct btf_verifier_env *env,
4309 			       const struct btf_type *t,
4310 			       u32 meta_left)
4311 {
4312 	const struct btf_enum *enums = btf_type_enum(t);
4313 	struct btf *btf = env->btf;
4314 	const char *fmt_str;
4315 	u16 i, nr_enums;
4316 	u32 meta_needed;
4317 
4318 	nr_enums = btf_type_vlen(t);
4319 	meta_needed = nr_enums * sizeof(*enums);
4320 
4321 	if (meta_left < meta_needed) {
4322 		btf_verifier_log_basic(env, t,
4323 				       "meta_left:%u meta_needed:%u",
4324 				       meta_left, meta_needed);
4325 		return -EINVAL;
4326 	}
4327 
4328 	if (t->size > 8 || !is_power_of_2(t->size)) {
4329 		btf_verifier_log_type(env, t, "Unexpected size");
4330 		return -EINVAL;
4331 	}
4332 
4333 	/* enum type either no name or a valid one */
4334 	if (t->name_off &&
4335 	    !btf_name_valid_identifier(env->btf, t->name_off)) {
4336 		btf_verifier_log_type(env, t, "Invalid name");
4337 		return -EINVAL;
4338 	}
4339 
4340 	btf_verifier_log_type(env, t, NULL);
4341 
4342 	for (i = 0; i < nr_enums; i++) {
4343 		if (!btf_name_offset_valid(btf, enums[i].name_off)) {
4344 			btf_verifier_log(env, "\tInvalid name_offset:%u",
4345 					 enums[i].name_off);
4346 			return -EINVAL;
4347 		}
4348 
4349 		/* enum member must have a valid name */
4350 		if (!enums[i].name_off ||
4351 		    !btf_name_valid_identifier(btf, enums[i].name_off)) {
4352 			btf_verifier_log_type(env, t, "Invalid name");
4353 			return -EINVAL;
4354 		}
4355 
4356 		if (env->log.level == BPF_LOG_KERNEL)
4357 			continue;
4358 		fmt_str = btf_type_kflag(t) ? "\t%s val=%d\n" : "\t%s val=%u\n";
4359 		btf_verifier_log(env, fmt_str,
4360 				 __btf_name_by_offset(btf, enums[i].name_off),
4361 				 enums[i].val);
4362 	}
4363 
4364 	return meta_needed;
4365 }
4366 
btf_enum_log(struct btf_verifier_env * env,const struct btf_type * t)4367 static void btf_enum_log(struct btf_verifier_env *env,
4368 			 const struct btf_type *t)
4369 {
4370 	btf_verifier_log(env, "size=%u vlen=%u", t->size, btf_type_vlen(t));
4371 }
4372 
btf_enum_show(const struct btf * btf,const struct btf_type * t,u32 type_id,void * data,u8 bits_offset,struct btf_show * show)4373 static void btf_enum_show(const struct btf *btf, const struct btf_type *t,
4374 			  u32 type_id, void *data, u8 bits_offset,
4375 			  struct btf_show *show)
4376 {
4377 	const struct btf_enum *enums = btf_type_enum(t);
4378 	u32 i, nr_enums = btf_type_vlen(t);
4379 	void *safe_data;
4380 	int v;
4381 
4382 	safe_data = btf_show_start_type(show, t, type_id, data);
4383 	if (!safe_data)
4384 		return;
4385 
4386 	v = *(int *)safe_data;
4387 
4388 	for (i = 0; i < nr_enums; i++) {
4389 		if (v != enums[i].val)
4390 			continue;
4391 
4392 		btf_show_type_value(show, "%s",
4393 				    __btf_name_by_offset(btf,
4394 							 enums[i].name_off));
4395 
4396 		btf_show_end_type(show);
4397 		return;
4398 	}
4399 
4400 	if (btf_type_kflag(t))
4401 		btf_show_type_value(show, "%d", v);
4402 	else
4403 		btf_show_type_value(show, "%u", v);
4404 	btf_show_end_type(show);
4405 }
4406 
4407 static const struct btf_kind_operations enum_ops = {
4408 	.check_meta = btf_enum_check_meta,
4409 	.resolve = btf_df_resolve,
4410 	.check_member = btf_enum_check_member,
4411 	.check_kflag_member = btf_enum_check_kflag_member,
4412 	.log_details = btf_enum_log,
4413 	.show = btf_enum_show,
4414 };
4415 
btf_enum64_check_meta(struct btf_verifier_env * env,const struct btf_type * t,u32 meta_left)4416 static s32 btf_enum64_check_meta(struct btf_verifier_env *env,
4417 				 const struct btf_type *t,
4418 				 u32 meta_left)
4419 {
4420 	const struct btf_enum64 *enums = btf_type_enum64(t);
4421 	struct btf *btf = env->btf;
4422 	const char *fmt_str;
4423 	u16 i, nr_enums;
4424 	u32 meta_needed;
4425 
4426 	nr_enums = btf_type_vlen(t);
4427 	meta_needed = nr_enums * sizeof(*enums);
4428 
4429 	if (meta_left < meta_needed) {
4430 		btf_verifier_log_basic(env, t,
4431 				       "meta_left:%u meta_needed:%u",
4432 				       meta_left, meta_needed);
4433 		return -EINVAL;
4434 	}
4435 
4436 	if (t->size > 8 || !is_power_of_2(t->size)) {
4437 		btf_verifier_log_type(env, t, "Unexpected size");
4438 		return -EINVAL;
4439 	}
4440 
4441 	/* enum type either no name or a valid one */
4442 	if (t->name_off &&
4443 	    !btf_name_valid_identifier(env->btf, t->name_off)) {
4444 		btf_verifier_log_type(env, t, "Invalid name");
4445 		return -EINVAL;
4446 	}
4447 
4448 	btf_verifier_log_type(env, t, NULL);
4449 
4450 	for (i = 0; i < nr_enums; i++) {
4451 		if (!btf_name_offset_valid(btf, enums[i].name_off)) {
4452 			btf_verifier_log(env, "\tInvalid name_offset:%u",
4453 					 enums[i].name_off);
4454 			return -EINVAL;
4455 		}
4456 
4457 		/* enum member must have a valid name */
4458 		if (!enums[i].name_off ||
4459 		    !btf_name_valid_identifier(btf, enums[i].name_off)) {
4460 			btf_verifier_log_type(env, t, "Invalid name");
4461 			return -EINVAL;
4462 		}
4463 
4464 		if (env->log.level == BPF_LOG_KERNEL)
4465 			continue;
4466 
4467 		fmt_str = btf_type_kflag(t) ? "\t%s val=%lld\n" : "\t%s val=%llu\n";
4468 		btf_verifier_log(env, fmt_str,
4469 				 __btf_name_by_offset(btf, enums[i].name_off),
4470 				 btf_enum64_value(enums + i));
4471 	}
4472 
4473 	return meta_needed;
4474 }
4475 
btf_enum64_show(const struct btf * btf,const struct btf_type * t,u32 type_id,void * data,u8 bits_offset,struct btf_show * show)4476 static void btf_enum64_show(const struct btf *btf, const struct btf_type *t,
4477 			    u32 type_id, void *data, u8 bits_offset,
4478 			    struct btf_show *show)
4479 {
4480 	const struct btf_enum64 *enums = btf_type_enum64(t);
4481 	u32 i, nr_enums = btf_type_vlen(t);
4482 	void *safe_data;
4483 	s64 v;
4484 
4485 	safe_data = btf_show_start_type(show, t, type_id, data);
4486 	if (!safe_data)
4487 		return;
4488 
4489 	v = *(u64 *)safe_data;
4490 
4491 	for (i = 0; i < nr_enums; i++) {
4492 		if (v != btf_enum64_value(enums + i))
4493 			continue;
4494 
4495 		btf_show_type_value(show, "%s",
4496 				    __btf_name_by_offset(btf,
4497 							 enums[i].name_off));
4498 
4499 		btf_show_end_type(show);
4500 		return;
4501 	}
4502 
4503 	if (btf_type_kflag(t))
4504 		btf_show_type_value(show, "%lld", v);
4505 	else
4506 		btf_show_type_value(show, "%llu", v);
4507 	btf_show_end_type(show);
4508 }
4509 
4510 static const struct btf_kind_operations enum64_ops = {
4511 	.check_meta = btf_enum64_check_meta,
4512 	.resolve = btf_df_resolve,
4513 	.check_member = btf_enum_check_member,
4514 	.check_kflag_member = btf_enum_check_kflag_member,
4515 	.log_details = btf_enum_log,
4516 	.show = btf_enum64_show,
4517 };
4518 
btf_func_proto_check_meta(struct btf_verifier_env * env,const struct btf_type * t,u32 meta_left)4519 static s32 btf_func_proto_check_meta(struct btf_verifier_env *env,
4520 				     const struct btf_type *t,
4521 				     u32 meta_left)
4522 {
4523 	u32 meta_needed = btf_type_vlen(t) * sizeof(struct btf_param);
4524 
4525 	if (meta_left < meta_needed) {
4526 		btf_verifier_log_basic(env, t,
4527 				       "meta_left:%u meta_needed:%u",
4528 				       meta_left, meta_needed);
4529 		return -EINVAL;
4530 	}
4531 
4532 	if (t->name_off) {
4533 		btf_verifier_log_type(env, t, "Invalid name");
4534 		return -EINVAL;
4535 	}
4536 
4537 	if (btf_type_kflag(t)) {
4538 		btf_verifier_log_type(env, t, "Invalid btf_info kind_flag");
4539 		return -EINVAL;
4540 	}
4541 
4542 	btf_verifier_log_type(env, t, NULL);
4543 
4544 	return meta_needed;
4545 }
4546 
btf_func_proto_log(struct btf_verifier_env * env,const struct btf_type * t)4547 static void btf_func_proto_log(struct btf_verifier_env *env,
4548 			       const struct btf_type *t)
4549 {
4550 	const struct btf_param *args = (const struct btf_param *)(t + 1);
4551 	u16 nr_args = btf_type_vlen(t), i;
4552 
4553 	btf_verifier_log(env, "return=%u args=(", t->type);
4554 	if (!nr_args) {
4555 		btf_verifier_log(env, "void");
4556 		goto done;
4557 	}
4558 
4559 	if (nr_args == 1 && !args[0].type) {
4560 		/* Only one vararg */
4561 		btf_verifier_log(env, "vararg");
4562 		goto done;
4563 	}
4564 
4565 	btf_verifier_log(env, "%u %s", args[0].type,
4566 			 __btf_name_by_offset(env->btf,
4567 					      args[0].name_off));
4568 	for (i = 1; i < nr_args - 1; i++)
4569 		btf_verifier_log(env, ", %u %s", args[i].type,
4570 				 __btf_name_by_offset(env->btf,
4571 						      args[i].name_off));
4572 
4573 	if (nr_args > 1) {
4574 		const struct btf_param *last_arg = &args[nr_args - 1];
4575 
4576 		if (last_arg->type)
4577 			btf_verifier_log(env, ", %u %s", last_arg->type,
4578 					 __btf_name_by_offset(env->btf,
4579 							      last_arg->name_off));
4580 		else
4581 			btf_verifier_log(env, ", vararg");
4582 	}
4583 
4584 done:
4585 	btf_verifier_log(env, ")");
4586 }
4587 
4588 static const struct btf_kind_operations func_proto_ops = {
4589 	.check_meta = btf_func_proto_check_meta,
4590 	.resolve = btf_df_resolve,
4591 	/*
4592 	 * BTF_KIND_FUNC_PROTO cannot be directly referred by
4593 	 * a struct's member.
4594 	 *
4595 	 * It should be a function pointer instead.
4596 	 * (i.e. struct's member -> BTF_KIND_PTR -> BTF_KIND_FUNC_PROTO)
4597 	 *
4598 	 * Hence, there is no btf_func_check_member().
4599 	 */
4600 	.check_member = btf_df_check_member,
4601 	.check_kflag_member = btf_df_check_kflag_member,
4602 	.log_details = btf_func_proto_log,
4603 	.show = btf_df_show,
4604 };
4605 
btf_func_check_meta(struct btf_verifier_env * env,const struct btf_type * t,u32 meta_left)4606 static s32 btf_func_check_meta(struct btf_verifier_env *env,
4607 			       const struct btf_type *t,
4608 			       u32 meta_left)
4609 {
4610 	if (!t->name_off ||
4611 	    !btf_name_valid_identifier(env->btf, t->name_off)) {
4612 		btf_verifier_log_type(env, t, "Invalid name");
4613 		return -EINVAL;
4614 	}
4615 
4616 	if (btf_type_vlen(t) > BTF_FUNC_GLOBAL) {
4617 		btf_verifier_log_type(env, t, "Invalid func linkage");
4618 		return -EINVAL;
4619 	}
4620 
4621 	if (btf_type_kflag(t)) {
4622 		btf_verifier_log_type(env, t, "Invalid btf_info kind_flag");
4623 		return -EINVAL;
4624 	}
4625 
4626 	btf_verifier_log_type(env, t, NULL);
4627 
4628 	return 0;
4629 }
4630 
btf_func_resolve(struct btf_verifier_env * env,const struct resolve_vertex * v)4631 static int btf_func_resolve(struct btf_verifier_env *env,
4632 			    const struct resolve_vertex *v)
4633 {
4634 	const struct btf_type *t = v->t;
4635 	u32 next_type_id = t->type;
4636 	int err;
4637 
4638 	err = btf_func_check(env, t);
4639 	if (err)
4640 		return err;
4641 
4642 	env_stack_pop_resolved(env, next_type_id, 0);
4643 	return 0;
4644 }
4645 
4646 static const struct btf_kind_operations func_ops = {
4647 	.check_meta = btf_func_check_meta,
4648 	.resolve = btf_func_resolve,
4649 	.check_member = btf_df_check_member,
4650 	.check_kflag_member = btf_df_check_kflag_member,
4651 	.log_details = btf_ref_type_log,
4652 	.show = btf_df_show,
4653 };
4654 
btf_var_check_meta(struct btf_verifier_env * env,const struct btf_type * t,u32 meta_left)4655 static s32 btf_var_check_meta(struct btf_verifier_env *env,
4656 			      const struct btf_type *t,
4657 			      u32 meta_left)
4658 {
4659 	const struct btf_var *var;
4660 	u32 meta_needed = sizeof(*var);
4661 
4662 	if (meta_left < meta_needed) {
4663 		btf_verifier_log_basic(env, t,
4664 				       "meta_left:%u meta_needed:%u",
4665 				       meta_left, meta_needed);
4666 		return -EINVAL;
4667 	}
4668 
4669 	if (btf_type_vlen(t)) {
4670 		btf_verifier_log_type(env, t, "vlen != 0");
4671 		return -EINVAL;
4672 	}
4673 
4674 	if (btf_type_kflag(t)) {
4675 		btf_verifier_log_type(env, t, "Invalid btf_info kind_flag");
4676 		return -EINVAL;
4677 	}
4678 
4679 	if (!t->name_off ||
4680 	    !btf_name_valid_identifier(env->btf, t->name_off)) {
4681 		btf_verifier_log_type(env, t, "Invalid name");
4682 		return -EINVAL;
4683 	}
4684 
4685 	/* A var cannot be in type void */
4686 	if (!t->type || !BTF_TYPE_ID_VALID(t->type)) {
4687 		btf_verifier_log_type(env, t, "Invalid type_id");
4688 		return -EINVAL;
4689 	}
4690 
4691 	var = btf_type_var(t);
4692 	if (var->linkage != BTF_VAR_STATIC &&
4693 	    var->linkage != BTF_VAR_GLOBAL_ALLOCATED) {
4694 		btf_verifier_log_type(env, t, "Linkage not supported");
4695 		return -EINVAL;
4696 	}
4697 
4698 	btf_verifier_log_type(env, t, NULL);
4699 
4700 	return meta_needed;
4701 }
4702 
btf_var_log(struct btf_verifier_env * env,const struct btf_type * t)4703 static void btf_var_log(struct btf_verifier_env *env, const struct btf_type *t)
4704 {
4705 	const struct btf_var *var = btf_type_var(t);
4706 
4707 	btf_verifier_log(env, "type_id=%u linkage=%u", t->type, var->linkage);
4708 }
4709 
4710 static const struct btf_kind_operations var_ops = {
4711 	.check_meta		= btf_var_check_meta,
4712 	.resolve		= btf_var_resolve,
4713 	.check_member		= btf_df_check_member,
4714 	.check_kflag_member	= btf_df_check_kflag_member,
4715 	.log_details		= btf_var_log,
4716 	.show			= btf_var_show,
4717 };
4718 
btf_datasec_check_meta(struct btf_verifier_env * env,const struct btf_type * t,u32 meta_left)4719 static s32 btf_datasec_check_meta(struct btf_verifier_env *env,
4720 				  const struct btf_type *t,
4721 				  u32 meta_left)
4722 {
4723 	const struct btf_var_secinfo *vsi;
4724 	u64 last_vsi_end_off = 0, sum = 0;
4725 	u32 i, meta_needed;
4726 
4727 	meta_needed = btf_type_vlen(t) * sizeof(*vsi);
4728 	if (meta_left < meta_needed) {
4729 		btf_verifier_log_basic(env, t,
4730 				       "meta_left:%u meta_needed:%u",
4731 				       meta_left, meta_needed);
4732 		return -EINVAL;
4733 	}
4734 
4735 	if (!t->size) {
4736 		btf_verifier_log_type(env, t, "size == 0");
4737 		return -EINVAL;
4738 	}
4739 
4740 	if (btf_type_kflag(t)) {
4741 		btf_verifier_log_type(env, t, "Invalid btf_info kind_flag");
4742 		return -EINVAL;
4743 	}
4744 
4745 	if (!t->name_off ||
4746 	    !btf_name_valid_section(env->btf, t->name_off)) {
4747 		btf_verifier_log_type(env, t, "Invalid name");
4748 		return -EINVAL;
4749 	}
4750 
4751 	btf_verifier_log_type(env, t, NULL);
4752 
4753 	for_each_vsi(i, t, vsi) {
4754 		/* A var cannot be in type void */
4755 		if (!vsi->type || !BTF_TYPE_ID_VALID(vsi->type)) {
4756 			btf_verifier_log_vsi(env, t, vsi,
4757 					     "Invalid type_id");
4758 			return -EINVAL;
4759 		}
4760 
4761 		if (vsi->offset < last_vsi_end_off || vsi->offset >= t->size) {
4762 			btf_verifier_log_vsi(env, t, vsi,
4763 					     "Invalid offset");
4764 			return -EINVAL;
4765 		}
4766 
4767 		if (!vsi->size || vsi->size > t->size) {
4768 			btf_verifier_log_vsi(env, t, vsi,
4769 					     "Invalid size");
4770 			return -EINVAL;
4771 		}
4772 
4773 		last_vsi_end_off = vsi->offset + vsi->size;
4774 		if (last_vsi_end_off > t->size) {
4775 			btf_verifier_log_vsi(env, t, vsi,
4776 					     "Invalid offset+size");
4777 			return -EINVAL;
4778 		}
4779 
4780 		btf_verifier_log_vsi(env, t, vsi, NULL);
4781 		sum += vsi->size;
4782 	}
4783 
4784 	if (t->size < sum) {
4785 		btf_verifier_log_type(env, t, "Invalid btf_info size");
4786 		return -EINVAL;
4787 	}
4788 
4789 	return meta_needed;
4790 }
4791 
btf_datasec_resolve(struct btf_verifier_env * env,const struct resolve_vertex * v)4792 static int btf_datasec_resolve(struct btf_verifier_env *env,
4793 			       const struct resolve_vertex *v)
4794 {
4795 	const struct btf_var_secinfo *vsi;
4796 	struct btf *btf = env->btf;
4797 	u16 i;
4798 
4799 	env->resolve_mode = RESOLVE_TBD;
4800 	for_each_vsi_from(i, v->next_member, v->t, vsi) {
4801 		u32 var_type_id = vsi->type, type_id, type_size = 0;
4802 		const struct btf_type *var_type = btf_type_by_id(env->btf,
4803 								 var_type_id);
4804 		if (!var_type || !btf_type_is_var(var_type)) {
4805 			btf_verifier_log_vsi(env, v->t, vsi,
4806 					     "Not a VAR kind member");
4807 			return -EINVAL;
4808 		}
4809 
4810 		if (!env_type_is_resolve_sink(env, var_type) &&
4811 		    !env_type_is_resolved(env, var_type_id)) {
4812 			env_stack_set_next_member(env, i + 1);
4813 			return env_stack_push(env, var_type, var_type_id);
4814 		}
4815 
4816 		type_id = var_type->type;
4817 		if (!btf_type_id_size(btf, &type_id, &type_size)) {
4818 			btf_verifier_log_vsi(env, v->t, vsi, "Invalid type");
4819 			return -EINVAL;
4820 		}
4821 
4822 		if (vsi->size < type_size) {
4823 			btf_verifier_log_vsi(env, v->t, vsi, "Invalid size");
4824 			return -EINVAL;
4825 		}
4826 	}
4827 
4828 	env_stack_pop_resolved(env, 0, 0);
4829 	return 0;
4830 }
4831 
btf_datasec_log(struct btf_verifier_env * env,const struct btf_type * t)4832 static void btf_datasec_log(struct btf_verifier_env *env,
4833 			    const struct btf_type *t)
4834 {
4835 	btf_verifier_log(env, "size=%u vlen=%u", t->size, btf_type_vlen(t));
4836 }
4837 
btf_datasec_show(const struct btf * btf,const struct btf_type * t,u32 type_id,void * data,u8 bits_offset,struct btf_show * show)4838 static void btf_datasec_show(const struct btf *btf,
4839 			     const struct btf_type *t, u32 type_id,
4840 			     void *data, u8 bits_offset,
4841 			     struct btf_show *show)
4842 {
4843 	const struct btf_var_secinfo *vsi;
4844 	const struct btf_type *var;
4845 	u32 i;
4846 
4847 	if (!btf_show_start_type(show, t, type_id, data))
4848 		return;
4849 
4850 	btf_show_type_value(show, "section (\"%s\") = {",
4851 			    __btf_name_by_offset(btf, t->name_off));
4852 	for_each_vsi(i, t, vsi) {
4853 		var = btf_type_by_id(btf, vsi->type);
4854 		if (i)
4855 			btf_show(show, ",");
4856 		btf_type_ops(var)->show(btf, var, vsi->type,
4857 					data + vsi->offset, bits_offset, show);
4858 	}
4859 	btf_show_end_type(show);
4860 }
4861 
4862 static const struct btf_kind_operations datasec_ops = {
4863 	.check_meta		= btf_datasec_check_meta,
4864 	.resolve		= btf_datasec_resolve,
4865 	.check_member		= btf_df_check_member,
4866 	.check_kflag_member	= btf_df_check_kflag_member,
4867 	.log_details		= btf_datasec_log,
4868 	.show			= btf_datasec_show,
4869 };
4870 
btf_float_check_meta(struct btf_verifier_env * env,const struct btf_type * t,u32 meta_left)4871 static s32 btf_float_check_meta(struct btf_verifier_env *env,
4872 				const struct btf_type *t,
4873 				u32 meta_left)
4874 {
4875 	if (btf_type_vlen(t)) {
4876 		btf_verifier_log_type(env, t, "vlen != 0");
4877 		return -EINVAL;
4878 	}
4879 
4880 	if (btf_type_kflag(t)) {
4881 		btf_verifier_log_type(env, t, "Invalid btf_info kind_flag");
4882 		return -EINVAL;
4883 	}
4884 
4885 	if (t->size != 2 && t->size != 4 && t->size != 8 && t->size != 12 &&
4886 	    t->size != 16) {
4887 		btf_verifier_log_type(env, t, "Invalid type_size");
4888 		return -EINVAL;
4889 	}
4890 
4891 	btf_verifier_log_type(env, t, NULL);
4892 
4893 	return 0;
4894 }
4895 
btf_float_check_member(struct btf_verifier_env * env,const struct btf_type * struct_type,const struct btf_member * member,const struct btf_type * member_type)4896 static int btf_float_check_member(struct btf_verifier_env *env,
4897 				  const struct btf_type *struct_type,
4898 				  const struct btf_member *member,
4899 				  const struct btf_type *member_type)
4900 {
4901 	u64 start_offset_bytes;
4902 	u64 end_offset_bytes;
4903 	u64 misalign_bits;
4904 	u64 align_bytes;
4905 	u64 align_bits;
4906 
4907 	/* Different architectures have different alignment requirements, so
4908 	 * here we check only for the reasonable minimum. This way we ensure
4909 	 * that types after CO-RE can pass the kernel BTF verifier.
4910 	 */
4911 	align_bytes = min_t(u64, sizeof(void *), member_type->size);
4912 	align_bits = align_bytes * BITS_PER_BYTE;
4913 	div64_u64_rem(member->offset, align_bits, &misalign_bits);
4914 	if (misalign_bits) {
4915 		btf_verifier_log_member(env, struct_type, member,
4916 					"Member is not properly aligned");
4917 		return -EINVAL;
4918 	}
4919 
4920 	start_offset_bytes = member->offset / BITS_PER_BYTE;
4921 	end_offset_bytes = start_offset_bytes + member_type->size;
4922 	if (end_offset_bytes > struct_type->size) {
4923 		btf_verifier_log_member(env, struct_type, member,
4924 					"Member exceeds struct_size");
4925 		return -EINVAL;
4926 	}
4927 
4928 	return 0;
4929 }
4930 
btf_float_log(struct btf_verifier_env * env,const struct btf_type * t)4931 static void btf_float_log(struct btf_verifier_env *env,
4932 			  const struct btf_type *t)
4933 {
4934 	btf_verifier_log(env, "size=%u", t->size);
4935 }
4936 
4937 static const struct btf_kind_operations float_ops = {
4938 	.check_meta = btf_float_check_meta,
4939 	.resolve = btf_df_resolve,
4940 	.check_member = btf_float_check_member,
4941 	.check_kflag_member = btf_generic_check_kflag_member,
4942 	.log_details = btf_float_log,
4943 	.show = btf_df_show,
4944 };
4945 
btf_decl_tag_check_meta(struct btf_verifier_env * env,const struct btf_type * t,u32 meta_left)4946 static s32 btf_decl_tag_check_meta(struct btf_verifier_env *env,
4947 			      const struct btf_type *t,
4948 			      u32 meta_left)
4949 {
4950 	const struct btf_decl_tag *tag;
4951 	u32 meta_needed = sizeof(*tag);
4952 	s32 component_idx;
4953 	const char *value;
4954 
4955 	if (meta_left < meta_needed) {
4956 		btf_verifier_log_basic(env, t,
4957 				       "meta_left:%u meta_needed:%u",
4958 				       meta_left, meta_needed);
4959 		return -EINVAL;
4960 	}
4961 
4962 	value = btf_name_by_offset(env->btf, t->name_off);
4963 	if (!value || !value[0]) {
4964 		btf_verifier_log_type(env, t, "Invalid value");
4965 		return -EINVAL;
4966 	}
4967 
4968 	if (btf_type_vlen(t)) {
4969 		btf_verifier_log_type(env, t, "vlen != 0");
4970 		return -EINVAL;
4971 	}
4972 
4973 	component_idx = btf_type_decl_tag(t)->component_idx;
4974 	if (component_idx < -1) {
4975 		btf_verifier_log_type(env, t, "Invalid component_idx");
4976 		return -EINVAL;
4977 	}
4978 
4979 	btf_verifier_log_type(env, t, NULL);
4980 
4981 	return meta_needed;
4982 }
4983 
btf_decl_tag_resolve(struct btf_verifier_env * env,const struct resolve_vertex * v)4984 static int btf_decl_tag_resolve(struct btf_verifier_env *env,
4985 			   const struct resolve_vertex *v)
4986 {
4987 	const struct btf_type *next_type;
4988 	const struct btf_type *t = v->t;
4989 	u32 next_type_id = t->type;
4990 	struct btf *btf = env->btf;
4991 	s32 component_idx;
4992 	u32 vlen;
4993 
4994 	next_type = btf_type_by_id(btf, next_type_id);
4995 	if (!next_type || !btf_type_is_decl_tag_target(next_type)) {
4996 		btf_verifier_log_type(env, v->t, "Invalid type_id");
4997 		return -EINVAL;
4998 	}
4999 
5000 	if (!env_type_is_resolve_sink(env, next_type) &&
5001 	    !env_type_is_resolved(env, next_type_id))
5002 		return env_stack_push(env, next_type, next_type_id);
5003 
5004 	component_idx = btf_type_decl_tag(t)->component_idx;
5005 	if (component_idx != -1) {
5006 		if (btf_type_is_var(next_type) || btf_type_is_typedef(next_type)) {
5007 			btf_verifier_log_type(env, v->t, "Invalid component_idx");
5008 			return -EINVAL;
5009 		}
5010 
5011 		if (btf_type_is_struct(next_type)) {
5012 			vlen = btf_type_vlen(next_type);
5013 		} else {
5014 			/* next_type should be a function */
5015 			next_type = btf_type_by_id(btf, next_type->type);
5016 			vlen = btf_type_vlen(next_type);
5017 		}
5018 
5019 		if ((u32)component_idx >= vlen) {
5020 			btf_verifier_log_type(env, v->t, "Invalid component_idx");
5021 			return -EINVAL;
5022 		}
5023 	}
5024 
5025 	env_stack_pop_resolved(env, next_type_id, 0);
5026 
5027 	return 0;
5028 }
5029 
btf_decl_tag_log(struct btf_verifier_env * env,const struct btf_type * t)5030 static void btf_decl_tag_log(struct btf_verifier_env *env, const struct btf_type *t)
5031 {
5032 	btf_verifier_log(env, "type=%u component_idx=%d", t->type,
5033 			 btf_type_decl_tag(t)->component_idx);
5034 }
5035 
5036 static const struct btf_kind_operations decl_tag_ops = {
5037 	.check_meta = btf_decl_tag_check_meta,
5038 	.resolve = btf_decl_tag_resolve,
5039 	.check_member = btf_df_check_member,
5040 	.check_kflag_member = btf_df_check_kflag_member,
5041 	.log_details = btf_decl_tag_log,
5042 	.show = btf_df_show,
5043 };
5044 
btf_func_proto_check(struct btf_verifier_env * env,const struct btf_type * t)5045 static int btf_func_proto_check(struct btf_verifier_env *env,
5046 				const struct btf_type *t)
5047 {
5048 	const struct btf_type *ret_type;
5049 	const struct btf_param *args;
5050 	const struct btf *btf;
5051 	u16 nr_args, i;
5052 	int err;
5053 
5054 	btf = env->btf;
5055 	args = (const struct btf_param *)(t + 1);
5056 	nr_args = btf_type_vlen(t);
5057 
5058 	/* Check func return type which could be "void" (t->type == 0) */
5059 	if (t->type) {
5060 		u32 ret_type_id = t->type;
5061 
5062 		ret_type = btf_type_by_id(btf, ret_type_id);
5063 		if (!ret_type) {
5064 			btf_verifier_log_type(env, t, "Invalid return type");
5065 			return -EINVAL;
5066 		}
5067 
5068 		if (btf_type_is_resolve_source_only(ret_type)) {
5069 			btf_verifier_log_type(env, t, "Invalid return type");
5070 			return -EINVAL;
5071 		}
5072 
5073 		if (btf_type_needs_resolve(ret_type) &&
5074 		    !env_type_is_resolved(env, ret_type_id)) {
5075 			err = btf_resolve(env, ret_type, ret_type_id);
5076 			if (err)
5077 				return err;
5078 		}
5079 
5080 		/* Ensure the return type is a type that has a size */
5081 		if (!btf_type_id_size(btf, &ret_type_id, NULL)) {
5082 			btf_verifier_log_type(env, t, "Invalid return type");
5083 			return -EINVAL;
5084 		}
5085 	}
5086 
5087 	if (!nr_args)
5088 		return 0;
5089 
5090 	/* Last func arg type_id could be 0 if it is a vararg */
5091 	if (!args[nr_args - 1].type) {
5092 		if (args[nr_args - 1].name_off) {
5093 			btf_verifier_log_type(env, t, "Invalid arg#%u",
5094 					      nr_args);
5095 			return -EINVAL;
5096 		}
5097 		nr_args--;
5098 	}
5099 
5100 	for (i = 0; i < nr_args; i++) {
5101 		const struct btf_type *arg_type;
5102 		u32 arg_type_id;
5103 
5104 		arg_type_id = args[i].type;
5105 		arg_type = btf_type_by_id(btf, arg_type_id);
5106 		if (!arg_type) {
5107 			btf_verifier_log_type(env, t, "Invalid arg#%u", i + 1);
5108 			return -EINVAL;
5109 		}
5110 
5111 		if (btf_type_is_resolve_source_only(arg_type)) {
5112 			btf_verifier_log_type(env, t, "Invalid arg#%u", i + 1);
5113 			return -EINVAL;
5114 		}
5115 
5116 		if (args[i].name_off &&
5117 		    (!btf_name_offset_valid(btf, args[i].name_off) ||
5118 		     !btf_name_valid_identifier(btf, args[i].name_off))) {
5119 			btf_verifier_log_type(env, t,
5120 					      "Invalid arg#%u", i + 1);
5121 			return -EINVAL;
5122 		}
5123 
5124 		if (btf_type_needs_resolve(arg_type) &&
5125 		    !env_type_is_resolved(env, arg_type_id)) {
5126 			err = btf_resolve(env, arg_type, arg_type_id);
5127 			if (err)
5128 				return err;
5129 		}
5130 
5131 		if (!btf_type_id_size(btf, &arg_type_id, NULL)) {
5132 			btf_verifier_log_type(env, t, "Invalid arg#%u", i + 1);
5133 			return -EINVAL;
5134 		}
5135 	}
5136 
5137 	return 0;
5138 }
5139 
btf_func_check(struct btf_verifier_env * env,const struct btf_type * t)5140 static int btf_func_check(struct btf_verifier_env *env,
5141 			  const struct btf_type *t)
5142 {
5143 	const struct btf_type *proto_type;
5144 	const struct btf_param *args;
5145 	const struct btf *btf;
5146 	u16 nr_args, i;
5147 
5148 	btf = env->btf;
5149 	proto_type = btf_type_by_id(btf, t->type);
5150 
5151 	if (!proto_type || !btf_type_is_func_proto(proto_type)) {
5152 		btf_verifier_log_type(env, t, "Invalid type_id");
5153 		return -EINVAL;
5154 	}
5155 
5156 	args = (const struct btf_param *)(proto_type + 1);
5157 	nr_args = btf_type_vlen(proto_type);
5158 	for (i = 0; i < nr_args; i++) {
5159 		if (!args[i].name_off && args[i].type) {
5160 			btf_verifier_log_type(env, t, "Invalid arg#%u", i + 1);
5161 			return -EINVAL;
5162 		}
5163 	}
5164 
5165 	return 0;
5166 }
5167 
5168 static const struct btf_kind_operations * const kind_ops[NR_BTF_KINDS] = {
5169 	[BTF_KIND_INT] = &int_ops,
5170 	[BTF_KIND_PTR] = &ptr_ops,
5171 	[BTF_KIND_ARRAY] = &array_ops,
5172 	[BTF_KIND_STRUCT] = &struct_ops,
5173 	[BTF_KIND_UNION] = &struct_ops,
5174 	[BTF_KIND_ENUM] = &enum_ops,
5175 	[BTF_KIND_FWD] = &fwd_ops,
5176 	[BTF_KIND_TYPEDEF] = &modifier_ops,
5177 	[BTF_KIND_VOLATILE] = &modifier_ops,
5178 	[BTF_KIND_CONST] = &modifier_ops,
5179 	[BTF_KIND_RESTRICT] = &modifier_ops,
5180 	[BTF_KIND_FUNC] = &func_ops,
5181 	[BTF_KIND_FUNC_PROTO] = &func_proto_ops,
5182 	[BTF_KIND_VAR] = &var_ops,
5183 	[BTF_KIND_DATASEC] = &datasec_ops,
5184 	[BTF_KIND_FLOAT] = &float_ops,
5185 	[BTF_KIND_DECL_TAG] = &decl_tag_ops,
5186 	[BTF_KIND_TYPE_TAG] = &modifier_ops,
5187 	[BTF_KIND_ENUM64] = &enum64_ops,
5188 };
5189 
btf_check_meta(struct btf_verifier_env * env,const struct btf_type * t,u32 meta_left)5190 static s32 btf_check_meta(struct btf_verifier_env *env,
5191 			  const struct btf_type *t,
5192 			  u32 meta_left)
5193 {
5194 	u32 saved_meta_left = meta_left;
5195 	s32 var_meta_size;
5196 
5197 	if (meta_left < sizeof(*t)) {
5198 		btf_verifier_log(env, "[%u] meta_left:%u meta_needed:%zu",
5199 				 env->log_type_id, meta_left, sizeof(*t));
5200 		return -EINVAL;
5201 	}
5202 	meta_left -= sizeof(*t);
5203 
5204 	if (t->info & ~BTF_INFO_MASK) {
5205 		btf_verifier_log(env, "[%u] Invalid btf_info:%x",
5206 				 env->log_type_id, t->info);
5207 		return -EINVAL;
5208 	}
5209 
5210 	if (BTF_INFO_KIND(t->info) > BTF_KIND_MAX ||
5211 	    BTF_INFO_KIND(t->info) == BTF_KIND_UNKN) {
5212 		btf_verifier_log(env, "[%u] Invalid kind:%u",
5213 				 env->log_type_id, BTF_INFO_KIND(t->info));
5214 		return -EINVAL;
5215 	}
5216 
5217 	if (!btf_name_offset_valid(env->btf, t->name_off)) {
5218 		btf_verifier_log(env, "[%u] Invalid name_offset:%u",
5219 				 env->log_type_id, t->name_off);
5220 		return -EINVAL;
5221 	}
5222 
5223 	var_meta_size = btf_type_ops(t)->check_meta(env, t, meta_left);
5224 	if (var_meta_size < 0)
5225 		return var_meta_size;
5226 
5227 	meta_left -= var_meta_size;
5228 
5229 	return saved_meta_left - meta_left;
5230 }
5231 
btf_check_all_metas(struct btf_verifier_env * env)5232 static int btf_check_all_metas(struct btf_verifier_env *env)
5233 {
5234 	struct btf *btf = env->btf;
5235 	struct btf_header *hdr;
5236 	void *cur, *end;
5237 
5238 	hdr = &btf->hdr;
5239 	cur = btf->nohdr_data + hdr->type_off;
5240 	end = cur + hdr->type_len;
5241 
5242 	env->log_type_id = btf->base_btf ? btf->start_id : 1;
5243 	while (cur < end) {
5244 		struct btf_type *t = cur;
5245 		s32 meta_size;
5246 
5247 		meta_size = btf_check_meta(env, t, end - cur);
5248 		if (meta_size < 0)
5249 			return meta_size;
5250 
5251 		btf_add_type(env, t);
5252 		cur += meta_size;
5253 		env->log_type_id++;
5254 	}
5255 
5256 	return 0;
5257 }
5258 
btf_resolve_valid(struct btf_verifier_env * env,const struct btf_type * t,u32 type_id)5259 static bool btf_resolve_valid(struct btf_verifier_env *env,
5260 			      const struct btf_type *t,
5261 			      u32 type_id)
5262 {
5263 	struct btf *btf = env->btf;
5264 
5265 	if (!env_type_is_resolved(env, type_id))
5266 		return false;
5267 
5268 	if (btf_type_is_struct(t) || btf_type_is_datasec(t))
5269 		return !btf_resolved_type_id(btf, type_id) &&
5270 		       !btf_resolved_type_size(btf, type_id);
5271 
5272 	if (btf_type_is_decl_tag(t) || btf_type_is_func(t))
5273 		return btf_resolved_type_id(btf, type_id) &&
5274 		       !btf_resolved_type_size(btf, type_id);
5275 
5276 	if (btf_type_is_modifier(t) || btf_type_is_ptr(t) ||
5277 	    btf_type_is_var(t)) {
5278 		t = btf_type_id_resolve(btf, &type_id);
5279 		return t &&
5280 		       !btf_type_is_modifier(t) &&
5281 		       !btf_type_is_var(t) &&
5282 		       !btf_type_is_datasec(t);
5283 	}
5284 
5285 	if (btf_type_is_array(t)) {
5286 		const struct btf_array *array = btf_type_array(t);
5287 		const struct btf_type *elem_type;
5288 		u32 elem_type_id = array->type;
5289 		u32 elem_size;
5290 
5291 		elem_type = btf_type_id_size(btf, &elem_type_id, &elem_size);
5292 		return elem_type && !btf_type_is_modifier(elem_type) &&
5293 			(array->nelems * elem_size ==
5294 			 btf_resolved_type_size(btf, type_id));
5295 	}
5296 
5297 	return false;
5298 }
5299 
btf_resolve(struct btf_verifier_env * env,const struct btf_type * t,u32 type_id)5300 static int btf_resolve(struct btf_verifier_env *env,
5301 		       const struct btf_type *t, u32 type_id)
5302 {
5303 	u32 save_log_type_id = env->log_type_id;
5304 	const struct resolve_vertex *v;
5305 	int err = 0;
5306 
5307 	env->resolve_mode = RESOLVE_TBD;
5308 	env_stack_push(env, t, type_id);
5309 	while (!err && (v = env_stack_peak(env))) {
5310 		env->log_type_id = v->type_id;
5311 		err = btf_type_ops(v->t)->resolve(env, v);
5312 	}
5313 
5314 	env->log_type_id = type_id;
5315 	if (err == -E2BIG) {
5316 		btf_verifier_log_type(env, t,
5317 				      "Exceeded max resolving depth:%u",
5318 				      MAX_RESOLVE_DEPTH);
5319 	} else if (err == -EEXIST) {
5320 		btf_verifier_log_type(env, t, "Loop detected");
5321 	}
5322 
5323 	/* Final sanity check */
5324 	if (!err && !btf_resolve_valid(env, t, type_id)) {
5325 		btf_verifier_log_type(env, t, "Invalid resolve state");
5326 		err = -EINVAL;
5327 	}
5328 
5329 	env->log_type_id = save_log_type_id;
5330 	return err;
5331 }
5332 
btf_check_all_types(struct btf_verifier_env * env)5333 static int btf_check_all_types(struct btf_verifier_env *env)
5334 {
5335 	struct btf *btf = env->btf;
5336 	const struct btf_type *t;
5337 	u32 type_id, i;
5338 	int err;
5339 
5340 	err = env_resolve_init(env);
5341 	if (err)
5342 		return err;
5343 
5344 	env->phase++;
5345 	for (i = btf->base_btf ? 0 : 1; i < btf->nr_types; i++) {
5346 		type_id = btf->start_id + i;
5347 		t = btf_type_by_id(btf, type_id);
5348 
5349 		env->log_type_id = type_id;
5350 		if (btf_type_needs_resolve(t) &&
5351 		    !env_type_is_resolved(env, type_id)) {
5352 			err = btf_resolve(env, t, type_id);
5353 			if (err)
5354 				return err;
5355 		}
5356 
5357 		if (btf_type_is_func_proto(t)) {
5358 			err = btf_func_proto_check(env, t);
5359 			if (err)
5360 				return err;
5361 		}
5362 	}
5363 
5364 	return 0;
5365 }
5366 
btf_parse_type_sec(struct btf_verifier_env * env)5367 static int btf_parse_type_sec(struct btf_verifier_env *env)
5368 {
5369 	const struct btf_header *hdr = &env->btf->hdr;
5370 	int err;
5371 
5372 	/* Type section must align to 4 bytes */
5373 	if (hdr->type_off & (sizeof(u32) - 1)) {
5374 		btf_verifier_log(env, "Unaligned type_off");
5375 		return -EINVAL;
5376 	}
5377 
5378 	if (!env->btf->base_btf && !hdr->type_len) {
5379 		btf_verifier_log(env, "No type found");
5380 		return -EINVAL;
5381 	}
5382 
5383 	err = btf_check_all_metas(env);
5384 	if (err)
5385 		return err;
5386 
5387 	return btf_check_all_types(env);
5388 }
5389 
btf_parse_str_sec(struct btf_verifier_env * env)5390 static int btf_parse_str_sec(struct btf_verifier_env *env)
5391 {
5392 	const struct btf_header *hdr;
5393 	struct btf *btf = env->btf;
5394 	const char *start, *end;
5395 
5396 	hdr = &btf->hdr;
5397 	start = btf->nohdr_data + hdr->str_off;
5398 	end = start + hdr->str_len;
5399 
5400 	if (end != btf->data + btf->data_size) {
5401 		btf_verifier_log(env, "String section is not at the end");
5402 		return -EINVAL;
5403 	}
5404 
5405 	btf->strings = start;
5406 
5407 	if (btf->base_btf && !hdr->str_len)
5408 		return 0;
5409 	if (!hdr->str_len || hdr->str_len - 1 > BTF_MAX_NAME_OFFSET || end[-1]) {
5410 		btf_verifier_log(env, "Invalid string section");
5411 		return -EINVAL;
5412 	}
5413 	if (!btf->base_btf && start[0]) {
5414 		btf_verifier_log(env, "Invalid string section");
5415 		return -EINVAL;
5416 	}
5417 
5418 	return 0;
5419 }
5420 
5421 static const size_t btf_sec_info_offset[] = {
5422 	offsetof(struct btf_header, type_off),
5423 	offsetof(struct btf_header, str_off),
5424 };
5425 
btf_sec_info_cmp(const void * a,const void * b)5426 static int btf_sec_info_cmp(const void *a, const void *b)
5427 {
5428 	const struct btf_sec_info *x = a;
5429 	const struct btf_sec_info *y = b;
5430 
5431 	return (int)(x->off - y->off) ? : (int)(x->len - y->len);
5432 }
5433 
btf_check_sec_info(struct btf_verifier_env * env,u32 btf_data_size)5434 static int btf_check_sec_info(struct btf_verifier_env *env,
5435 			      u32 btf_data_size)
5436 {
5437 	struct btf_sec_info secs[ARRAY_SIZE(btf_sec_info_offset)];
5438 	u32 total, expected_total, i;
5439 	const struct btf_header *hdr;
5440 	const struct btf *btf;
5441 
5442 	btf = env->btf;
5443 	hdr = &btf->hdr;
5444 
5445 	/* Populate the secs from hdr */
5446 	for (i = 0; i < ARRAY_SIZE(btf_sec_info_offset); i++)
5447 		secs[i] = *(struct btf_sec_info *)((void *)hdr +
5448 						   btf_sec_info_offset[i]);
5449 
5450 	sort(secs, ARRAY_SIZE(btf_sec_info_offset),
5451 	     sizeof(struct btf_sec_info), btf_sec_info_cmp, NULL);
5452 
5453 	/* Check for gaps and overlap among sections */
5454 	total = 0;
5455 	expected_total = btf_data_size - hdr->hdr_len;
5456 	for (i = 0; i < ARRAY_SIZE(btf_sec_info_offset); i++) {
5457 		if (expected_total < secs[i].off) {
5458 			btf_verifier_log(env, "Invalid section offset");
5459 			return -EINVAL;
5460 		}
5461 		if (total < secs[i].off) {
5462 			/* gap */
5463 			btf_verifier_log(env, "Unsupported section found");
5464 			return -EINVAL;
5465 		}
5466 		if (total > secs[i].off) {
5467 			btf_verifier_log(env, "Section overlap found");
5468 			return -EINVAL;
5469 		}
5470 		if (expected_total - total < secs[i].len) {
5471 			btf_verifier_log(env,
5472 					 "Total section length too long");
5473 			return -EINVAL;
5474 		}
5475 		total += secs[i].len;
5476 	}
5477 
5478 	/* There is data other than hdr and known sections */
5479 	if (expected_total != total) {
5480 		btf_verifier_log(env, "Unsupported section found");
5481 		return -EINVAL;
5482 	}
5483 
5484 	return 0;
5485 }
5486 
btf_parse_hdr(struct btf_verifier_env * env)5487 static int btf_parse_hdr(struct btf_verifier_env *env)
5488 {
5489 	u32 hdr_len, hdr_copy, btf_data_size;
5490 	const struct btf_header *hdr;
5491 	struct btf *btf;
5492 
5493 	btf = env->btf;
5494 	btf_data_size = btf->data_size;
5495 
5496 	if (btf_data_size < offsetofend(struct btf_header, hdr_len)) {
5497 		btf_verifier_log(env, "hdr_len not found");
5498 		return -EINVAL;
5499 	}
5500 
5501 	hdr = btf->data;
5502 	hdr_len = hdr->hdr_len;
5503 	if (btf_data_size < hdr_len) {
5504 		btf_verifier_log(env, "btf_header not found");
5505 		return -EINVAL;
5506 	}
5507 
5508 	/* Ensure the unsupported header fields are zero */
5509 	if (hdr_len > sizeof(btf->hdr)) {
5510 		u8 *expected_zero = btf->data + sizeof(btf->hdr);
5511 		u8 *end = btf->data + hdr_len;
5512 
5513 		for (; expected_zero < end; expected_zero++) {
5514 			if (*expected_zero) {
5515 				btf_verifier_log(env, "Unsupported btf_header");
5516 				return -E2BIG;
5517 			}
5518 		}
5519 	}
5520 
5521 	hdr_copy = min_t(u32, hdr_len, sizeof(btf->hdr));
5522 	memcpy(&btf->hdr, btf->data, hdr_copy);
5523 
5524 	hdr = &btf->hdr;
5525 
5526 	btf_verifier_log_hdr(env, btf_data_size);
5527 
5528 	if (hdr->magic != BTF_MAGIC) {
5529 		btf_verifier_log(env, "Invalid magic");
5530 		return -EINVAL;
5531 	}
5532 
5533 	if (hdr->version != BTF_VERSION) {
5534 		btf_verifier_log(env, "Unsupported version");
5535 		return -ENOTSUPP;
5536 	}
5537 
5538 	if (hdr->flags) {
5539 		btf_verifier_log(env, "Unsupported flags");
5540 		return -ENOTSUPP;
5541 	}
5542 
5543 	if (!btf->base_btf && btf_data_size == hdr->hdr_len) {
5544 		btf_verifier_log(env, "No data");
5545 		return -EINVAL;
5546 	}
5547 
5548 	return btf_check_sec_info(env, btf_data_size);
5549 }
5550 
5551 static const char *alloc_obj_fields[] = {
5552 	"bpf_spin_lock",
5553 	"bpf_list_head",
5554 	"bpf_list_node",
5555 	"bpf_rb_root",
5556 	"bpf_rb_node",
5557 	"bpf_refcount",
5558 };
5559 
5560 static struct btf_struct_metas *
btf_parse_struct_metas(struct bpf_verifier_log * log,struct btf * btf)5561 btf_parse_struct_metas(struct bpf_verifier_log *log, struct btf *btf)
5562 {
5563 	struct btf_struct_metas *tab = NULL;
5564 	struct btf_id_set *aof;
5565 	int i, n, id, ret;
5566 
5567 	BUILD_BUG_ON(offsetof(struct btf_id_set, cnt) != 0);
5568 	BUILD_BUG_ON(sizeof(struct btf_id_set) != sizeof(u32));
5569 
5570 	aof = kmalloc(sizeof(*aof), GFP_KERNEL | __GFP_NOWARN);
5571 	if (!aof)
5572 		return ERR_PTR(-ENOMEM);
5573 	aof->cnt = 0;
5574 
5575 	for (i = 0; i < ARRAY_SIZE(alloc_obj_fields); i++) {
5576 		/* Try to find whether this special type exists in user BTF, and
5577 		 * if so remember its ID so we can easily find it among members
5578 		 * of structs that we iterate in the next loop.
5579 		 */
5580 		struct btf_id_set *new_aof;
5581 
5582 		id = btf_find_by_name_kind(btf, alloc_obj_fields[i], BTF_KIND_STRUCT);
5583 		if (id < 0)
5584 			continue;
5585 
5586 		new_aof = krealloc(aof, offsetof(struct btf_id_set, ids[aof->cnt + 1]),
5587 				   GFP_KERNEL | __GFP_NOWARN);
5588 		if (!new_aof) {
5589 			ret = -ENOMEM;
5590 			goto free_aof;
5591 		}
5592 		aof = new_aof;
5593 		aof->ids[aof->cnt++] = id;
5594 	}
5595 
5596 	n = btf_nr_types(btf);
5597 	for (i = 1; i < n; i++) {
5598 		/* Try to find if there are kptrs in user BTF and remember their ID */
5599 		struct btf_id_set *new_aof;
5600 		struct btf_field_info tmp;
5601 		const struct btf_type *t;
5602 
5603 		t = btf_type_by_id(btf, i);
5604 		if (!t) {
5605 			ret = -EINVAL;
5606 			goto free_aof;
5607 		}
5608 
5609 		ret = btf_find_kptr(btf, t, 0, 0, &tmp, BPF_KPTR);
5610 		if (ret != BTF_FIELD_FOUND)
5611 			continue;
5612 
5613 		new_aof = krealloc(aof, offsetof(struct btf_id_set, ids[aof->cnt + 1]),
5614 				   GFP_KERNEL | __GFP_NOWARN);
5615 		if (!new_aof) {
5616 			ret = -ENOMEM;
5617 			goto free_aof;
5618 		}
5619 		aof = new_aof;
5620 		aof->ids[aof->cnt++] = i;
5621 	}
5622 
5623 	if (!aof->cnt) {
5624 		kfree(aof);
5625 		return NULL;
5626 	}
5627 	sort(&aof->ids, aof->cnt, sizeof(aof->ids[0]), btf_id_cmp_func, NULL);
5628 
5629 	for (i = 1; i < n; i++) {
5630 		struct btf_struct_metas *new_tab;
5631 		const struct btf_member *member;
5632 		struct btf_struct_meta *type;
5633 		struct btf_record *record;
5634 		const struct btf_type *t;
5635 		int j, tab_cnt;
5636 
5637 		t = btf_type_by_id(btf, i);
5638 		if (!__btf_type_is_struct(t))
5639 			continue;
5640 
5641 		cond_resched();
5642 
5643 		for_each_member(j, t, member) {
5644 			if (btf_id_set_contains(aof, member->type))
5645 				goto parse;
5646 		}
5647 		continue;
5648 	parse:
5649 		tab_cnt = tab ? tab->cnt : 0;
5650 		new_tab = krealloc(tab, offsetof(struct btf_struct_metas, types[tab_cnt + 1]),
5651 				   GFP_KERNEL | __GFP_NOWARN);
5652 		if (!new_tab) {
5653 			ret = -ENOMEM;
5654 			goto free;
5655 		}
5656 		if (!tab)
5657 			new_tab->cnt = 0;
5658 		tab = new_tab;
5659 
5660 		type = &tab->types[tab->cnt];
5661 		type->btf_id = i;
5662 		record = btf_parse_fields(btf, t, BPF_SPIN_LOCK | BPF_RES_SPIN_LOCK | BPF_LIST_HEAD | BPF_LIST_NODE |
5663 						  BPF_RB_ROOT | BPF_RB_NODE | BPF_REFCOUNT |
5664 						  BPF_KPTR, t->size);
5665 		/* The record cannot be unset, treat it as an error if so */
5666 		if (IS_ERR_OR_NULL(record)) {
5667 			ret = PTR_ERR_OR_ZERO(record) ?: -EFAULT;
5668 			goto free;
5669 		}
5670 		type->record = record;
5671 		tab->cnt++;
5672 	}
5673 	kfree(aof);
5674 	return tab;
5675 free:
5676 	btf_struct_metas_free(tab);
5677 free_aof:
5678 	kfree(aof);
5679 	return ERR_PTR(ret);
5680 }
5681 
btf_find_struct_meta(const struct btf * btf,u32 btf_id)5682 struct btf_struct_meta *btf_find_struct_meta(const struct btf *btf, u32 btf_id)
5683 {
5684 	struct btf_struct_metas *tab;
5685 
5686 	BUILD_BUG_ON(offsetof(struct btf_struct_meta, btf_id) != 0);
5687 	tab = btf->struct_meta_tab;
5688 	if (!tab)
5689 		return NULL;
5690 	return bsearch(&btf_id, tab->types, tab->cnt, sizeof(tab->types[0]), btf_id_cmp_func);
5691 }
5692 
btf_check_type_tags(struct btf_verifier_env * env,struct btf * btf,int start_id)5693 static int btf_check_type_tags(struct btf_verifier_env *env,
5694 			       struct btf *btf, int start_id)
5695 {
5696 	int i, n, good_id = start_id - 1;
5697 	bool in_tags;
5698 
5699 	n = btf_nr_types(btf);
5700 	for (i = start_id; i < n; i++) {
5701 		const struct btf_type *t;
5702 		int chain_limit = 32;
5703 		u32 cur_id = i;
5704 
5705 		t = btf_type_by_id(btf, i);
5706 		if (!t)
5707 			return -EINVAL;
5708 		if (!btf_type_is_modifier(t))
5709 			continue;
5710 
5711 		cond_resched();
5712 
5713 		in_tags = btf_type_is_type_tag(t);
5714 		while (btf_type_is_modifier(t)) {
5715 			if (!chain_limit--) {
5716 				btf_verifier_log(env, "Max chain length or cycle detected");
5717 				return -ELOOP;
5718 			}
5719 			if (btf_type_is_type_tag(t)) {
5720 				if (!in_tags) {
5721 					btf_verifier_log(env, "Type tags don't precede modifiers");
5722 					return -EINVAL;
5723 				}
5724 			} else if (in_tags) {
5725 				in_tags = false;
5726 			}
5727 			if (cur_id <= good_id)
5728 				break;
5729 			/* Move to next type */
5730 			cur_id = t->type;
5731 			t = btf_type_by_id(btf, cur_id);
5732 			if (!t)
5733 				return -EINVAL;
5734 		}
5735 		good_id = i;
5736 	}
5737 	return 0;
5738 }
5739 
finalize_log(struct bpf_verifier_log * log,bpfptr_t uattr,u32 uattr_size)5740 static int finalize_log(struct bpf_verifier_log *log, bpfptr_t uattr, u32 uattr_size)
5741 {
5742 	u32 log_true_size;
5743 	int err;
5744 
5745 	err = bpf_vlog_finalize(log, &log_true_size);
5746 
5747 	if (uattr_size >= offsetofend(union bpf_attr, btf_log_true_size) &&
5748 	    copy_to_bpfptr_offset(uattr, offsetof(union bpf_attr, btf_log_true_size),
5749 				  &log_true_size, sizeof(log_true_size)))
5750 		err = -EFAULT;
5751 
5752 	return err;
5753 }
5754 
btf_parse(const union bpf_attr * attr,bpfptr_t uattr,u32 uattr_size)5755 static struct btf *btf_parse(const union bpf_attr *attr, bpfptr_t uattr, u32 uattr_size)
5756 {
5757 	bpfptr_t btf_data = make_bpfptr(attr->btf, uattr.is_kernel);
5758 	char __user *log_ubuf = u64_to_user_ptr(attr->btf_log_buf);
5759 	struct btf_struct_metas *struct_meta_tab;
5760 	struct btf_verifier_env *env = NULL;
5761 	struct btf *btf = NULL;
5762 	u8 *data;
5763 	int err, ret;
5764 
5765 	if (attr->btf_size > BTF_MAX_SIZE)
5766 		return ERR_PTR(-E2BIG);
5767 
5768 	env = kzalloc(sizeof(*env), GFP_KERNEL | __GFP_NOWARN);
5769 	if (!env)
5770 		return ERR_PTR(-ENOMEM);
5771 
5772 	/* user could have requested verbose verifier output
5773 	 * and supplied buffer to store the verification trace
5774 	 */
5775 	err = bpf_vlog_init(&env->log, attr->btf_log_level,
5776 			    log_ubuf, attr->btf_log_size);
5777 	if (err)
5778 		goto errout_free;
5779 
5780 	btf = kzalloc(sizeof(*btf), GFP_KERNEL | __GFP_NOWARN);
5781 	if (!btf) {
5782 		err = -ENOMEM;
5783 		goto errout;
5784 	}
5785 	env->btf = btf;
5786 
5787 	data = kvmalloc(attr->btf_size, GFP_KERNEL | __GFP_NOWARN);
5788 	if (!data) {
5789 		err = -ENOMEM;
5790 		goto errout;
5791 	}
5792 
5793 	btf->data = data;
5794 	btf->data_size = attr->btf_size;
5795 
5796 	if (copy_from_bpfptr(data, btf_data, attr->btf_size)) {
5797 		err = -EFAULT;
5798 		goto errout;
5799 	}
5800 
5801 	err = btf_parse_hdr(env);
5802 	if (err)
5803 		goto errout;
5804 
5805 	btf->nohdr_data = btf->data + btf->hdr.hdr_len;
5806 
5807 	err = btf_parse_str_sec(env);
5808 	if (err)
5809 		goto errout;
5810 
5811 	err = btf_parse_type_sec(env);
5812 	if (err)
5813 		goto errout;
5814 
5815 	err = btf_check_type_tags(env, btf, 1);
5816 	if (err)
5817 		goto errout;
5818 
5819 	struct_meta_tab = btf_parse_struct_metas(&env->log, btf);
5820 	if (IS_ERR(struct_meta_tab)) {
5821 		err = PTR_ERR(struct_meta_tab);
5822 		goto errout;
5823 	}
5824 	btf->struct_meta_tab = struct_meta_tab;
5825 
5826 	if (struct_meta_tab) {
5827 		int i;
5828 
5829 		for (i = 0; i < struct_meta_tab->cnt; i++) {
5830 			err = btf_check_and_fixup_fields(btf, struct_meta_tab->types[i].record);
5831 			if (err < 0)
5832 				goto errout_meta;
5833 		}
5834 	}
5835 
5836 	err = finalize_log(&env->log, uattr, uattr_size);
5837 	if (err)
5838 		goto errout_free;
5839 
5840 	btf_verifier_env_free(env);
5841 	refcount_set(&btf->refcnt, 1);
5842 	return btf;
5843 
5844 errout_meta:
5845 	btf_free_struct_meta_tab(btf);
5846 errout:
5847 	/* overwrite err with -ENOSPC or -EFAULT */
5848 	ret = finalize_log(&env->log, uattr, uattr_size);
5849 	if (ret)
5850 		err = ret;
5851 errout_free:
5852 	btf_verifier_env_free(env);
5853 	if (btf)
5854 		btf_free(btf);
5855 	return ERR_PTR(err);
5856 }
5857 
5858 extern char __start_BTF[];
5859 extern char __stop_BTF[];
5860 extern struct btf *btf_vmlinux;
5861 
5862 #define BPF_MAP_TYPE(_id, _ops)
5863 #define BPF_LINK_TYPE(_id, _name)
5864 static union {
5865 	struct bpf_ctx_convert {
5866 #define BPF_PROG_TYPE(_id, _name, prog_ctx_type, kern_ctx_type) \
5867 	prog_ctx_type _id##_prog; \
5868 	kern_ctx_type _id##_kern;
5869 #include <linux/bpf_types.h>
5870 #undef BPF_PROG_TYPE
5871 	} *__t;
5872 	/* 't' is written once under lock. Read many times. */
5873 	const struct btf_type *t;
5874 } bpf_ctx_convert;
5875 enum {
5876 #define BPF_PROG_TYPE(_id, _name, prog_ctx_type, kern_ctx_type) \
5877 	__ctx_convert##_id,
5878 #include <linux/bpf_types.h>
5879 #undef BPF_PROG_TYPE
5880 	__ctx_convert_unused, /* to avoid empty enum in extreme .config */
5881 };
5882 static u8 bpf_ctx_convert_map[] = {
5883 #define BPF_PROG_TYPE(_id, _name, prog_ctx_type, kern_ctx_type) \
5884 	[_id] = __ctx_convert##_id,
5885 #include <linux/bpf_types.h>
5886 #undef BPF_PROG_TYPE
5887 	0, /* avoid empty array */
5888 };
5889 #undef BPF_MAP_TYPE
5890 #undef BPF_LINK_TYPE
5891 
find_canonical_prog_ctx_type(enum bpf_prog_type prog_type)5892 static const struct btf_type *find_canonical_prog_ctx_type(enum bpf_prog_type prog_type)
5893 {
5894 	const struct btf_type *conv_struct;
5895 	const struct btf_member *ctx_type;
5896 
5897 	conv_struct = bpf_ctx_convert.t;
5898 	if (!conv_struct)
5899 		return NULL;
5900 	/* prog_type is valid bpf program type. No need for bounds check. */
5901 	ctx_type = btf_type_member(conv_struct) + bpf_ctx_convert_map[prog_type] * 2;
5902 	/* ctx_type is a pointer to prog_ctx_type in vmlinux.
5903 	 * Like 'struct __sk_buff'
5904 	 */
5905 	return btf_type_by_id(btf_vmlinux, ctx_type->type);
5906 }
5907 
find_kern_ctx_type_id(enum bpf_prog_type prog_type)5908 static int find_kern_ctx_type_id(enum bpf_prog_type prog_type)
5909 {
5910 	const struct btf_type *conv_struct;
5911 	const struct btf_member *ctx_type;
5912 
5913 	conv_struct = bpf_ctx_convert.t;
5914 	if (!conv_struct)
5915 		return -EFAULT;
5916 	/* prog_type is valid bpf program type. No need for bounds check. */
5917 	ctx_type = btf_type_member(conv_struct) + bpf_ctx_convert_map[prog_type] * 2 + 1;
5918 	/* ctx_type is a pointer to prog_ctx_type in vmlinux.
5919 	 * Like 'struct sk_buff'
5920 	 */
5921 	return ctx_type->type;
5922 }
5923 
btf_is_projection_of(const char * pname,const char * tname)5924 bool btf_is_projection_of(const char *pname, const char *tname)
5925 {
5926 	if (strcmp(pname, "__sk_buff") == 0 && strcmp(tname, "sk_buff") == 0)
5927 		return true;
5928 	if (strcmp(pname, "xdp_md") == 0 && strcmp(tname, "xdp_buff") == 0)
5929 		return true;
5930 	return false;
5931 }
5932 
btf_is_prog_ctx_type(struct bpf_verifier_log * log,const struct btf * btf,const struct btf_type * t,enum bpf_prog_type prog_type,int arg)5933 bool btf_is_prog_ctx_type(struct bpf_verifier_log *log, const struct btf *btf,
5934 			  const struct btf_type *t, enum bpf_prog_type prog_type,
5935 			  int arg)
5936 {
5937 	const struct btf_type *ctx_type;
5938 	const char *tname, *ctx_tname;
5939 
5940 	t = btf_type_by_id(btf, t->type);
5941 
5942 	/* KPROBE programs allow bpf_user_pt_regs_t typedef, which we need to
5943 	 * check before we skip all the typedef below.
5944 	 */
5945 	if (prog_type == BPF_PROG_TYPE_KPROBE) {
5946 		while (btf_type_is_modifier(t) && !btf_type_is_typedef(t))
5947 			t = btf_type_by_id(btf, t->type);
5948 
5949 		if (btf_type_is_typedef(t)) {
5950 			tname = btf_name_by_offset(btf, t->name_off);
5951 			if (tname && strcmp(tname, "bpf_user_pt_regs_t") == 0)
5952 				return true;
5953 		}
5954 	}
5955 
5956 	while (btf_type_is_modifier(t))
5957 		t = btf_type_by_id(btf, t->type);
5958 	if (!btf_type_is_struct(t)) {
5959 		/* Only pointer to struct is supported for now.
5960 		 * That means that BPF_PROG_TYPE_TRACEPOINT with BTF
5961 		 * is not supported yet.
5962 		 * BPF_PROG_TYPE_RAW_TRACEPOINT is fine.
5963 		 */
5964 		return false;
5965 	}
5966 	tname = btf_name_by_offset(btf, t->name_off);
5967 	if (!tname) {
5968 		bpf_log(log, "arg#%d struct doesn't have a name\n", arg);
5969 		return false;
5970 	}
5971 
5972 	ctx_type = find_canonical_prog_ctx_type(prog_type);
5973 	if (!ctx_type) {
5974 		bpf_log(log, "btf_vmlinux is malformed\n");
5975 		/* should not happen */
5976 		return false;
5977 	}
5978 again:
5979 	ctx_tname = btf_name_by_offset(btf_vmlinux, ctx_type->name_off);
5980 	if (!ctx_tname) {
5981 		/* should not happen */
5982 		bpf_log(log, "Please fix kernel include/linux/bpf_types.h\n");
5983 		return false;
5984 	}
5985 	/* program types without named context types work only with arg:ctx tag */
5986 	if (ctx_tname[0] == '\0')
5987 		return false;
5988 	/* only compare that prog's ctx type name is the same as
5989 	 * kernel expects. No need to compare field by field.
5990 	 * It's ok for bpf prog to do:
5991 	 * struct __sk_buff {};
5992 	 * int socket_filter_bpf_prog(struct __sk_buff *skb)
5993 	 * { // no fields of skb are ever used }
5994 	 */
5995 	if (btf_is_projection_of(ctx_tname, tname))
5996 		return true;
5997 	if (strcmp(ctx_tname, tname)) {
5998 		/* bpf_user_pt_regs_t is a typedef, so resolve it to
5999 		 * underlying struct and check name again
6000 		 */
6001 		if (!btf_type_is_modifier(ctx_type))
6002 			return false;
6003 		while (btf_type_is_modifier(ctx_type))
6004 			ctx_type = btf_type_by_id(btf_vmlinux, ctx_type->type);
6005 		goto again;
6006 	}
6007 	return true;
6008 }
6009 
6010 /* forward declarations for arch-specific underlying types of
6011  * bpf_user_pt_regs_t; this avoids the need for arch-specific #ifdef
6012  * compilation guards below for BPF_PROG_TYPE_PERF_EVENT checks, but still
6013  * works correctly with __builtin_types_compatible_p() on respective
6014  * architectures
6015  */
6016 struct user_regs_struct;
6017 struct user_pt_regs;
6018 
btf_validate_prog_ctx_type(struct bpf_verifier_log * log,const struct btf * btf,const struct btf_type * t,int arg,enum bpf_prog_type prog_type,enum bpf_attach_type attach_type)6019 static int btf_validate_prog_ctx_type(struct bpf_verifier_log *log, const struct btf *btf,
6020 				      const struct btf_type *t, int arg,
6021 				      enum bpf_prog_type prog_type,
6022 				      enum bpf_attach_type attach_type)
6023 {
6024 	const struct btf_type *ctx_type;
6025 	const char *tname, *ctx_tname;
6026 
6027 	if (!btf_is_ptr(t)) {
6028 		bpf_log(log, "arg#%d type isn't a pointer\n", arg);
6029 		return -EINVAL;
6030 	}
6031 	t = btf_type_by_id(btf, t->type);
6032 
6033 	/* KPROBE and PERF_EVENT programs allow bpf_user_pt_regs_t typedef */
6034 	if (prog_type == BPF_PROG_TYPE_KPROBE || prog_type == BPF_PROG_TYPE_PERF_EVENT) {
6035 		while (btf_type_is_modifier(t) && !btf_type_is_typedef(t))
6036 			t = btf_type_by_id(btf, t->type);
6037 
6038 		if (btf_type_is_typedef(t)) {
6039 			tname = btf_name_by_offset(btf, t->name_off);
6040 			if (tname && strcmp(tname, "bpf_user_pt_regs_t") == 0)
6041 				return 0;
6042 		}
6043 	}
6044 
6045 	/* all other program types don't use typedefs for context type */
6046 	while (btf_type_is_modifier(t))
6047 		t = btf_type_by_id(btf, t->type);
6048 
6049 	/* `void *ctx __arg_ctx` is always valid */
6050 	if (btf_type_is_void(t))
6051 		return 0;
6052 
6053 	tname = btf_name_by_offset(btf, t->name_off);
6054 	if (str_is_empty(tname)) {
6055 		bpf_log(log, "arg#%d type doesn't have a name\n", arg);
6056 		return -EINVAL;
6057 	}
6058 
6059 	/* special cases */
6060 	switch (prog_type) {
6061 	case BPF_PROG_TYPE_KPROBE:
6062 		if (__btf_type_is_struct(t) && strcmp(tname, "pt_regs") == 0)
6063 			return 0;
6064 		break;
6065 	case BPF_PROG_TYPE_PERF_EVENT:
6066 		if (__builtin_types_compatible_p(bpf_user_pt_regs_t, struct pt_regs) &&
6067 		    __btf_type_is_struct(t) && strcmp(tname, "pt_regs") == 0)
6068 			return 0;
6069 		if (__builtin_types_compatible_p(bpf_user_pt_regs_t, struct user_pt_regs) &&
6070 		    __btf_type_is_struct(t) && strcmp(tname, "user_pt_regs") == 0)
6071 			return 0;
6072 		if (__builtin_types_compatible_p(bpf_user_pt_regs_t, struct user_regs_struct) &&
6073 		    __btf_type_is_struct(t) && strcmp(tname, "user_regs_struct") == 0)
6074 			return 0;
6075 		break;
6076 	case BPF_PROG_TYPE_RAW_TRACEPOINT:
6077 	case BPF_PROG_TYPE_RAW_TRACEPOINT_WRITABLE:
6078 		/* allow u64* as ctx */
6079 		if (btf_is_int(t) && t->size == 8)
6080 			return 0;
6081 		break;
6082 	case BPF_PROG_TYPE_TRACING:
6083 		switch (attach_type) {
6084 		case BPF_TRACE_RAW_TP:
6085 			/* tp_btf program is TRACING, so need special case here */
6086 			if (__btf_type_is_struct(t) &&
6087 			    strcmp(tname, "bpf_raw_tracepoint_args") == 0)
6088 				return 0;
6089 			/* allow u64* as ctx */
6090 			if (btf_is_int(t) && t->size == 8)
6091 				return 0;
6092 			break;
6093 		case BPF_TRACE_ITER:
6094 			/* allow struct bpf_iter__xxx types only */
6095 			if (__btf_type_is_struct(t) &&
6096 			    strncmp(tname, "bpf_iter__", sizeof("bpf_iter__") - 1) == 0)
6097 				return 0;
6098 			break;
6099 		case BPF_TRACE_FENTRY:
6100 		case BPF_TRACE_FEXIT:
6101 		case BPF_MODIFY_RETURN:
6102 			/* allow u64* as ctx */
6103 			if (btf_is_int(t) && t->size == 8)
6104 				return 0;
6105 			break;
6106 		default:
6107 			break;
6108 		}
6109 		break;
6110 	case BPF_PROG_TYPE_LSM:
6111 	case BPF_PROG_TYPE_STRUCT_OPS:
6112 		/* allow u64* as ctx */
6113 		if (btf_is_int(t) && t->size == 8)
6114 			return 0;
6115 		break;
6116 	case BPF_PROG_TYPE_TRACEPOINT:
6117 	case BPF_PROG_TYPE_SYSCALL:
6118 	case BPF_PROG_TYPE_EXT:
6119 		return 0; /* anything goes */
6120 	default:
6121 		break;
6122 	}
6123 
6124 	ctx_type = find_canonical_prog_ctx_type(prog_type);
6125 	if (!ctx_type) {
6126 		/* should not happen */
6127 		bpf_log(log, "btf_vmlinux is malformed\n");
6128 		return -EINVAL;
6129 	}
6130 
6131 	/* resolve typedefs and check that underlying structs are matching as well */
6132 	while (btf_type_is_modifier(ctx_type))
6133 		ctx_type = btf_type_by_id(btf_vmlinux, ctx_type->type);
6134 
6135 	/* if program type doesn't have distinctly named struct type for
6136 	 * context, then __arg_ctx argument can only be `void *`, which we
6137 	 * already checked above
6138 	 */
6139 	if (!__btf_type_is_struct(ctx_type)) {
6140 		bpf_log(log, "arg#%d should be void pointer\n", arg);
6141 		return -EINVAL;
6142 	}
6143 
6144 	ctx_tname = btf_name_by_offset(btf_vmlinux, ctx_type->name_off);
6145 	if (!__btf_type_is_struct(t) || strcmp(ctx_tname, tname) != 0) {
6146 		bpf_log(log, "arg#%d should be `struct %s *`\n", arg, ctx_tname);
6147 		return -EINVAL;
6148 	}
6149 
6150 	return 0;
6151 }
6152 
btf_translate_to_vmlinux(struct bpf_verifier_log * log,struct btf * btf,const struct btf_type * t,enum bpf_prog_type prog_type,int arg)6153 static int btf_translate_to_vmlinux(struct bpf_verifier_log *log,
6154 				     struct btf *btf,
6155 				     const struct btf_type *t,
6156 				     enum bpf_prog_type prog_type,
6157 				     int arg)
6158 {
6159 	if (!btf_is_prog_ctx_type(log, btf, t, prog_type, arg))
6160 		return -ENOENT;
6161 	return find_kern_ctx_type_id(prog_type);
6162 }
6163 
get_kern_ctx_btf_id(struct bpf_verifier_log * log,enum bpf_prog_type prog_type)6164 int get_kern_ctx_btf_id(struct bpf_verifier_log *log, enum bpf_prog_type prog_type)
6165 {
6166 	const struct btf_member *kctx_member;
6167 	const struct btf_type *conv_struct;
6168 	const struct btf_type *kctx_type;
6169 	u32 kctx_type_id;
6170 
6171 	conv_struct = bpf_ctx_convert.t;
6172 	/* get member for kernel ctx type */
6173 	kctx_member = btf_type_member(conv_struct) + bpf_ctx_convert_map[prog_type] * 2 + 1;
6174 	kctx_type_id = kctx_member->type;
6175 	kctx_type = btf_type_by_id(btf_vmlinux, kctx_type_id);
6176 	if (!btf_type_is_struct(kctx_type)) {
6177 		bpf_log(log, "kern ctx type id %u is not a struct\n", kctx_type_id);
6178 		return -EINVAL;
6179 	}
6180 
6181 	return kctx_type_id;
6182 }
6183 
6184 BTF_ID_LIST(bpf_ctx_convert_btf_id)
BTF_ID(struct,bpf_ctx_convert)6185 BTF_ID(struct, bpf_ctx_convert)
6186 
6187 static struct btf *btf_parse_base(struct btf_verifier_env *env, const char *name,
6188 				  void *data, unsigned int data_size)
6189 {
6190 	struct btf *btf = NULL;
6191 	int err;
6192 
6193 	if (!IS_ENABLED(CONFIG_DEBUG_INFO_BTF))
6194 		return ERR_PTR(-ENOENT);
6195 
6196 	btf = kzalloc(sizeof(*btf), GFP_KERNEL | __GFP_NOWARN);
6197 	if (!btf) {
6198 		err = -ENOMEM;
6199 		goto errout;
6200 	}
6201 	env->btf = btf;
6202 
6203 	btf->data = data;
6204 	btf->data_size = data_size;
6205 	btf->kernel_btf = true;
6206 	snprintf(btf->name, sizeof(btf->name), "%s", name);
6207 
6208 	err = btf_parse_hdr(env);
6209 	if (err)
6210 		goto errout;
6211 
6212 	btf->nohdr_data = btf->data + btf->hdr.hdr_len;
6213 
6214 	err = btf_parse_str_sec(env);
6215 	if (err)
6216 		goto errout;
6217 
6218 	err = btf_check_all_metas(env);
6219 	if (err)
6220 		goto errout;
6221 
6222 	err = btf_check_type_tags(env, btf, 1);
6223 	if (err)
6224 		goto errout;
6225 
6226 	refcount_set(&btf->refcnt, 1);
6227 
6228 	return btf;
6229 
6230 errout:
6231 	if (btf) {
6232 		kvfree(btf->types);
6233 		kfree(btf);
6234 	}
6235 	return ERR_PTR(err);
6236 }
6237 
btf_parse_vmlinux(void)6238 struct btf *btf_parse_vmlinux(void)
6239 {
6240 	struct btf_verifier_env *env = NULL;
6241 	struct bpf_verifier_log *log;
6242 	struct btf *btf;
6243 	int err;
6244 
6245 	env = kzalloc(sizeof(*env), GFP_KERNEL | __GFP_NOWARN);
6246 	if (!env)
6247 		return ERR_PTR(-ENOMEM);
6248 
6249 	log = &env->log;
6250 	log->level = BPF_LOG_KERNEL;
6251 	btf = btf_parse_base(env, "vmlinux", __start_BTF, __stop_BTF - __start_BTF);
6252 	if (IS_ERR(btf))
6253 		goto err_out;
6254 
6255 	/* btf_parse_vmlinux() runs under bpf_verifier_lock */
6256 	bpf_ctx_convert.t = btf_type_by_id(btf, bpf_ctx_convert_btf_id[0]);
6257 	err = btf_alloc_id(btf);
6258 	if (err) {
6259 		btf_free(btf);
6260 		btf = ERR_PTR(err);
6261 	}
6262 err_out:
6263 	btf_verifier_env_free(env);
6264 	return btf;
6265 }
6266 
6267 /* If .BTF_ids section was created with distilled base BTF, both base and
6268  * split BTF ids will need to be mapped to actual base/split ids for
6269  * BTF now that it has been relocated.
6270  */
btf_relocate_id(const struct btf * btf,__u32 id)6271 static __u32 btf_relocate_id(const struct btf *btf, __u32 id)
6272 {
6273 	if (!btf->base_btf || !btf->base_id_map)
6274 		return id;
6275 	return btf->base_id_map[id];
6276 }
6277 
6278 #ifdef CONFIG_DEBUG_INFO_BTF_MODULES
6279 
btf_parse_module(const char * module_name,const void * data,unsigned int data_size,void * base_data,unsigned int base_data_size)6280 static struct btf *btf_parse_module(const char *module_name, const void *data,
6281 				    unsigned int data_size, void *base_data,
6282 				    unsigned int base_data_size)
6283 {
6284 	struct btf *btf = NULL, *vmlinux_btf, *base_btf = NULL;
6285 	struct btf_verifier_env *env = NULL;
6286 	struct bpf_verifier_log *log;
6287 	int err = 0;
6288 
6289 	vmlinux_btf = bpf_get_btf_vmlinux();
6290 	if (IS_ERR(vmlinux_btf))
6291 		return vmlinux_btf;
6292 	if (!vmlinux_btf)
6293 		return ERR_PTR(-EINVAL);
6294 
6295 	env = kzalloc(sizeof(*env), GFP_KERNEL | __GFP_NOWARN);
6296 	if (!env)
6297 		return ERR_PTR(-ENOMEM);
6298 
6299 	log = &env->log;
6300 	log->level = BPF_LOG_KERNEL;
6301 
6302 	if (base_data) {
6303 		base_btf = btf_parse_base(env, ".BTF.base", base_data, base_data_size);
6304 		if (IS_ERR(base_btf)) {
6305 			err = PTR_ERR(base_btf);
6306 			goto errout;
6307 		}
6308 	} else {
6309 		base_btf = vmlinux_btf;
6310 	}
6311 
6312 	btf = kzalloc(sizeof(*btf), GFP_KERNEL | __GFP_NOWARN);
6313 	if (!btf) {
6314 		err = -ENOMEM;
6315 		goto errout;
6316 	}
6317 	env->btf = btf;
6318 
6319 	btf->base_btf = base_btf;
6320 	btf->start_id = base_btf->nr_types;
6321 	btf->start_str_off = base_btf->hdr.str_len;
6322 	btf->kernel_btf = true;
6323 	snprintf(btf->name, sizeof(btf->name), "%s", module_name);
6324 
6325 	btf->data = kvmemdup(data, data_size, GFP_KERNEL | __GFP_NOWARN);
6326 	if (!btf->data) {
6327 		err = -ENOMEM;
6328 		goto errout;
6329 	}
6330 	btf->data_size = data_size;
6331 
6332 	err = btf_parse_hdr(env);
6333 	if (err)
6334 		goto errout;
6335 
6336 	btf->nohdr_data = btf->data + btf->hdr.hdr_len;
6337 
6338 	err = btf_parse_str_sec(env);
6339 	if (err)
6340 		goto errout;
6341 
6342 	err = btf_check_all_metas(env);
6343 	if (err)
6344 		goto errout;
6345 
6346 	err = btf_check_type_tags(env, btf, btf_nr_types(base_btf));
6347 	if (err)
6348 		goto errout;
6349 
6350 	if (base_btf != vmlinux_btf) {
6351 		err = btf_relocate(btf, vmlinux_btf, &btf->base_id_map);
6352 		if (err)
6353 			goto errout;
6354 		btf_free(base_btf);
6355 		base_btf = vmlinux_btf;
6356 	}
6357 
6358 	btf_verifier_env_free(env);
6359 	refcount_set(&btf->refcnt, 1);
6360 	return btf;
6361 
6362 errout:
6363 	btf_verifier_env_free(env);
6364 	if (!IS_ERR(base_btf) && base_btf != vmlinux_btf)
6365 		btf_free(base_btf);
6366 	if (btf) {
6367 		kvfree(btf->data);
6368 		kvfree(btf->types);
6369 		kfree(btf);
6370 	}
6371 	return ERR_PTR(err);
6372 }
6373 
6374 #endif /* CONFIG_DEBUG_INFO_BTF_MODULES */
6375 
bpf_prog_get_target_btf(const struct bpf_prog * prog)6376 struct btf *bpf_prog_get_target_btf(const struct bpf_prog *prog)
6377 {
6378 	struct bpf_prog *tgt_prog = prog->aux->dst_prog;
6379 
6380 	if (tgt_prog)
6381 		return tgt_prog->aux->btf;
6382 	else
6383 		return prog->aux->attach_btf;
6384 }
6385 
is_int_ptr(struct btf * btf,const struct btf_type * t)6386 static bool is_int_ptr(struct btf *btf, const struct btf_type *t)
6387 {
6388 	/* skip modifiers */
6389 	t = btf_type_skip_modifiers(btf, t->type, NULL);
6390 
6391 	return btf_type_is_int(t);
6392 }
6393 
get_ctx_arg_idx(struct btf * btf,const struct btf_type * func_proto,int off)6394 static u32 get_ctx_arg_idx(struct btf *btf, const struct btf_type *func_proto,
6395 			   int off)
6396 {
6397 	const struct btf_param *args;
6398 	const struct btf_type *t;
6399 	u32 offset = 0, nr_args;
6400 	int i;
6401 
6402 	if (!func_proto)
6403 		return off / 8;
6404 
6405 	nr_args = btf_type_vlen(func_proto);
6406 	args = (const struct btf_param *)(func_proto + 1);
6407 	for (i = 0; i < nr_args; i++) {
6408 		t = btf_type_skip_modifiers(btf, args[i].type, NULL);
6409 		offset += btf_type_is_ptr(t) ? 8 : roundup(t->size, 8);
6410 		if (off < offset)
6411 			return i;
6412 	}
6413 
6414 	t = btf_type_skip_modifiers(btf, func_proto->type, NULL);
6415 	offset += btf_type_is_ptr(t) ? 8 : roundup(t->size, 8);
6416 	if (off < offset)
6417 		return nr_args;
6418 
6419 	return nr_args + 1;
6420 }
6421 
prog_args_trusted(const struct bpf_prog * prog)6422 static bool prog_args_trusted(const struct bpf_prog *prog)
6423 {
6424 	enum bpf_attach_type atype = prog->expected_attach_type;
6425 
6426 	switch (prog->type) {
6427 	case BPF_PROG_TYPE_TRACING:
6428 		return atype == BPF_TRACE_RAW_TP || atype == BPF_TRACE_ITER;
6429 	case BPF_PROG_TYPE_LSM:
6430 		return bpf_lsm_is_trusted(prog);
6431 	case BPF_PROG_TYPE_STRUCT_OPS:
6432 		return true;
6433 	default:
6434 		return false;
6435 	}
6436 }
6437 
btf_ctx_arg_offset(const struct btf * btf,const struct btf_type * func_proto,u32 arg_no)6438 int btf_ctx_arg_offset(const struct btf *btf, const struct btf_type *func_proto,
6439 		       u32 arg_no)
6440 {
6441 	const struct btf_param *args;
6442 	const struct btf_type *t;
6443 	int off = 0, i;
6444 	u32 sz;
6445 
6446 	args = btf_params(func_proto);
6447 	for (i = 0; i < arg_no; i++) {
6448 		t = btf_type_by_id(btf, args[i].type);
6449 		t = btf_resolve_size(btf, t, &sz);
6450 		if (IS_ERR(t))
6451 			return PTR_ERR(t);
6452 		off += roundup(sz, 8);
6453 	}
6454 
6455 	return off;
6456 }
6457 
6458 struct bpf_raw_tp_null_args {
6459 	const char *func;
6460 	u64 mask;
6461 };
6462 
6463 static const struct bpf_raw_tp_null_args raw_tp_null_args[] = {
6464 	/* sched */
6465 	{ "sched_pi_setprio", 0x10 },
6466 	/* ... from sched_numa_pair_template event class */
6467 	{ "sched_stick_numa", 0x100 },
6468 	{ "sched_swap_numa", 0x100 },
6469 	/* afs */
6470 	{ "afs_make_fs_call", 0x10 },
6471 	{ "afs_make_fs_calli", 0x10 },
6472 	{ "afs_make_fs_call1", 0x10 },
6473 	{ "afs_make_fs_call2", 0x10 },
6474 	{ "afs_protocol_error", 0x1 },
6475 	{ "afs_flock_ev", 0x10 },
6476 	/* cachefiles */
6477 	{ "cachefiles_lookup", 0x1 | 0x200 },
6478 	{ "cachefiles_unlink", 0x1 },
6479 	{ "cachefiles_rename", 0x1 },
6480 	{ "cachefiles_prep_read", 0x1 },
6481 	{ "cachefiles_mark_active", 0x1 },
6482 	{ "cachefiles_mark_failed", 0x1 },
6483 	{ "cachefiles_mark_inactive", 0x1 },
6484 	{ "cachefiles_vfs_error", 0x1 },
6485 	{ "cachefiles_io_error", 0x1 },
6486 	{ "cachefiles_ondemand_open", 0x1 },
6487 	{ "cachefiles_ondemand_copen", 0x1 },
6488 	{ "cachefiles_ondemand_close", 0x1 },
6489 	{ "cachefiles_ondemand_read", 0x1 },
6490 	{ "cachefiles_ondemand_cread", 0x1 },
6491 	{ "cachefiles_ondemand_fd_write", 0x1 },
6492 	{ "cachefiles_ondemand_fd_release", 0x1 },
6493 	/* ext4, from ext4__mballoc event class */
6494 	{ "ext4_mballoc_discard", 0x10 },
6495 	{ "ext4_mballoc_free", 0x10 },
6496 	/* fib */
6497 	{ "fib_table_lookup", 0x100 },
6498 	/* filelock */
6499 	/* ... from filelock_lock event class */
6500 	{ "posix_lock_inode", 0x10 },
6501 	{ "fcntl_setlk", 0x10 },
6502 	{ "locks_remove_posix", 0x10 },
6503 	{ "flock_lock_inode", 0x10 },
6504 	/* ... from filelock_lease event class */
6505 	{ "break_lease_noblock", 0x10 },
6506 	{ "break_lease_block", 0x10 },
6507 	{ "break_lease_unblock", 0x10 },
6508 	{ "generic_delete_lease", 0x10 },
6509 	{ "time_out_leases", 0x10 },
6510 	/* host1x */
6511 	{ "host1x_cdma_push_gather", 0x10000 },
6512 	/* huge_memory */
6513 	{ "mm_khugepaged_scan_pmd", 0x10 },
6514 	{ "mm_collapse_huge_page_isolate", 0x1 },
6515 	{ "mm_khugepaged_scan_file", 0x10 },
6516 	{ "mm_khugepaged_collapse_file", 0x10 },
6517 	/* kmem */
6518 	{ "mm_page_alloc", 0x1 },
6519 	{ "mm_page_pcpu_drain", 0x1 },
6520 	/* .. from mm_page event class */
6521 	{ "mm_page_alloc_zone_locked", 0x1 },
6522 	/* netfs */
6523 	{ "netfs_failure", 0x10 },
6524 	/* power */
6525 	{ "device_pm_callback_start", 0x10 },
6526 	/* qdisc */
6527 	{ "qdisc_dequeue", 0x1000 },
6528 	/* rxrpc */
6529 	{ "rxrpc_recvdata", 0x1 },
6530 	{ "rxrpc_resend", 0x10 },
6531 	{ "rxrpc_tq", 0x10 },
6532 	{ "rxrpc_client", 0x1 },
6533 	/* skb */
6534 	{"kfree_skb", 0x1000},
6535 	/* sunrpc */
6536 	{ "xs_stream_read_data", 0x1 },
6537 	/* ... from xprt_cong_event event class */
6538 	{ "xprt_reserve_cong", 0x10 },
6539 	{ "xprt_release_cong", 0x10 },
6540 	{ "xprt_get_cong", 0x10 },
6541 	{ "xprt_put_cong", 0x10 },
6542 	/* tcp */
6543 	{ "tcp_send_reset", 0x11 },
6544 	/* tegra_apb_dma */
6545 	{ "tegra_dma_tx_status", 0x100 },
6546 	/* timer_migration */
6547 	{ "tmigr_update_events", 0x1 },
6548 	/* writeback, from writeback_folio_template event class */
6549 	{ "writeback_dirty_folio", 0x10 },
6550 	{ "folio_wait_writeback", 0x10 },
6551 	/* rdma */
6552 	{ "mr_integ_alloc", 0x2000 },
6553 	/* bpf_testmod */
6554 	{ "bpf_testmod_test_read", 0x0 },
6555 	/* amdgpu */
6556 	{ "amdgpu_vm_bo_map", 0x1 },
6557 	{ "amdgpu_vm_bo_unmap", 0x1 },
6558 	/* netfs */
6559 	{ "netfs_folioq", 0x1 },
6560 	/* xfs from xfs_defer_pending_class */
6561 	{ "xfs_defer_create_intent", 0x1 },
6562 	{ "xfs_defer_cancel_list", 0x1 },
6563 	{ "xfs_defer_pending_finish", 0x1 },
6564 	{ "xfs_defer_pending_abort", 0x1 },
6565 	{ "xfs_defer_relog_intent", 0x1 },
6566 	{ "xfs_defer_isolate_paused", 0x1 },
6567 	{ "xfs_defer_item_pause", 0x1 },
6568 	{ "xfs_defer_item_unpause", 0x1 },
6569 	/* xfs from xfs_defer_pending_item_class */
6570 	{ "xfs_defer_add_item", 0x1 },
6571 	{ "xfs_defer_cancel_item", 0x1 },
6572 	{ "xfs_defer_finish_item", 0x1 },
6573 	/* xfs from xfs_icwalk_class */
6574 	{ "xfs_ioc_free_eofblocks", 0x10 },
6575 	{ "xfs_blockgc_free_space", 0x10 },
6576 	/* xfs from xfs_btree_cur_class */
6577 	{ "xfs_btree_updkeys", 0x100 },
6578 	{ "xfs_btree_overlapped_query_range", 0x100 },
6579 	/* xfs from xfs_imap_class*/
6580 	{ "xfs_map_blocks_found", 0x10000 },
6581 	{ "xfs_map_blocks_alloc", 0x10000 },
6582 	{ "xfs_iomap_alloc", 0x1000 },
6583 	{ "xfs_iomap_found", 0x1000 },
6584 	/* xfs from xfs_fs_class */
6585 	{ "xfs_inodegc_flush", 0x1 },
6586 	{ "xfs_inodegc_push", 0x1 },
6587 	{ "xfs_inodegc_start", 0x1 },
6588 	{ "xfs_inodegc_stop", 0x1 },
6589 	{ "xfs_inodegc_queue", 0x1 },
6590 	{ "xfs_inodegc_throttle", 0x1 },
6591 	{ "xfs_fs_sync_fs", 0x1 },
6592 	{ "xfs_blockgc_start", 0x1 },
6593 	{ "xfs_blockgc_stop", 0x1 },
6594 	{ "xfs_blockgc_worker", 0x1 },
6595 	{ "xfs_blockgc_flush_all", 0x1 },
6596 	/* xfs_scrub */
6597 	{ "xchk_nlinks_live_update", 0x10 },
6598 	/* xfs_scrub from xchk_metapath_class */
6599 	{ "xchk_metapath_lookup", 0x100 },
6600 	/* nfsd */
6601 	{ "nfsd_dirent", 0x1 },
6602 	{ "nfsd_file_acquire", 0x1001 },
6603 	{ "nfsd_file_insert_err", 0x1 },
6604 	{ "nfsd_file_cons_err", 0x1 },
6605 	/* nfs4 */
6606 	{ "nfs4_setup_sequence", 0x1 },
6607 	{ "pnfs_update_layout", 0x10000 },
6608 	{ "nfs4_inode_callback_event", 0x200 },
6609 	{ "nfs4_inode_stateid_callback_event", 0x200 },
6610 	/* nfs from pnfs_layout_event */
6611 	{ "pnfs_mds_fallback_pg_init_read", 0x10000 },
6612 	{ "pnfs_mds_fallback_pg_init_write", 0x10000 },
6613 	{ "pnfs_mds_fallback_pg_get_mirror_count", 0x10000 },
6614 	{ "pnfs_mds_fallback_read_done", 0x10000 },
6615 	{ "pnfs_mds_fallback_write_done", 0x10000 },
6616 	{ "pnfs_mds_fallback_read_pagelist", 0x10000 },
6617 	{ "pnfs_mds_fallback_write_pagelist", 0x10000 },
6618 	/* coda */
6619 	{ "coda_dec_pic_run", 0x10 },
6620 	{ "coda_dec_pic_done", 0x10 },
6621 	/* cfg80211 */
6622 	{ "cfg80211_scan_done", 0x11 },
6623 	{ "rdev_set_coalesce", 0x10 },
6624 	{ "cfg80211_report_wowlan_wakeup", 0x100 },
6625 	{ "cfg80211_inform_bss_frame", 0x100 },
6626 	{ "cfg80211_michael_mic_failure", 0x10000 },
6627 	/* cfg80211 from wiphy_work_event */
6628 	{ "wiphy_work_queue", 0x10 },
6629 	{ "wiphy_work_run", 0x10 },
6630 	{ "wiphy_work_cancel", 0x10 },
6631 	{ "wiphy_work_flush", 0x10 },
6632 	/* hugetlbfs */
6633 	{ "hugetlbfs_alloc_inode", 0x10 },
6634 	/* spufs */
6635 	{ "spufs_context", 0x10 },
6636 	/* kvm_hv */
6637 	{ "kvm_page_fault_enter", 0x100 },
6638 	/* dpu */
6639 	{ "dpu_crtc_setup_mixer", 0x100 },
6640 	/* binder */
6641 	{ "binder_transaction", 0x100 },
6642 	/* bcachefs */
6643 	{ "btree_path_free", 0x100 },
6644 	/* hfi1_tx */
6645 	{ "hfi1_sdma_progress", 0x1000 },
6646 	/* iptfs */
6647 	{ "iptfs_ingress_postq_event", 0x1000 },
6648 	/* neigh */
6649 	{ "neigh_update", 0x10 },
6650 	/* snd_firewire_lib */
6651 	{ "amdtp_packet", 0x100 },
6652 };
6653 
btf_ctx_access(int off,int size,enum bpf_access_type type,const struct bpf_prog * prog,struct bpf_insn_access_aux * info)6654 bool btf_ctx_access(int off, int size, enum bpf_access_type type,
6655 		    const struct bpf_prog *prog,
6656 		    struct bpf_insn_access_aux *info)
6657 {
6658 	const struct btf_type *t = prog->aux->attach_func_proto;
6659 	struct bpf_prog *tgt_prog = prog->aux->dst_prog;
6660 	struct btf *btf = bpf_prog_get_target_btf(prog);
6661 	const char *tname = prog->aux->attach_func_name;
6662 	struct bpf_verifier_log *log = info->log;
6663 	const struct btf_param *args;
6664 	bool ptr_err_raw_tp = false;
6665 	const char *tag_value;
6666 	u32 nr_args, arg;
6667 	int i, ret;
6668 
6669 	if (off % 8) {
6670 		bpf_log(log, "func '%s' offset %d is not multiple of 8\n",
6671 			tname, off);
6672 		return false;
6673 	}
6674 	arg = get_ctx_arg_idx(btf, t, off);
6675 	args = (const struct btf_param *)(t + 1);
6676 	/* if (t == NULL) Fall back to default BPF prog with
6677 	 * MAX_BPF_FUNC_REG_ARGS u64 arguments.
6678 	 */
6679 	nr_args = t ? btf_type_vlen(t) : MAX_BPF_FUNC_REG_ARGS;
6680 	if (prog->aux->attach_btf_trace) {
6681 		/* skip first 'void *__data' argument in btf_trace_##name typedef */
6682 		args++;
6683 		nr_args--;
6684 	}
6685 
6686 	if (arg > nr_args) {
6687 		bpf_log(log, "func '%s' doesn't have %d-th argument\n",
6688 			tname, arg + 1);
6689 		return false;
6690 	}
6691 
6692 	if (arg == nr_args) {
6693 		switch (prog->expected_attach_type) {
6694 		case BPF_LSM_MAC:
6695 			/* mark we are accessing the return value */
6696 			info->is_retval = true;
6697 			fallthrough;
6698 		case BPF_LSM_CGROUP:
6699 		case BPF_TRACE_FEXIT:
6700 			/* When LSM programs are attached to void LSM hooks
6701 			 * they use FEXIT trampolines and when attached to
6702 			 * int LSM hooks, they use MODIFY_RETURN trampolines.
6703 			 *
6704 			 * While the LSM programs are BPF_MODIFY_RETURN-like
6705 			 * the check:
6706 			 *
6707 			 *	if (ret_type != 'int')
6708 			 *		return -EINVAL;
6709 			 *
6710 			 * is _not_ done here. This is still safe as LSM hooks
6711 			 * have only void and int return types.
6712 			 */
6713 			if (!t)
6714 				return true;
6715 			t = btf_type_by_id(btf, t->type);
6716 			break;
6717 		case BPF_MODIFY_RETURN:
6718 			/* For now the BPF_MODIFY_RETURN can only be attached to
6719 			 * functions that return an int.
6720 			 */
6721 			if (!t)
6722 				return false;
6723 
6724 			t = btf_type_skip_modifiers(btf, t->type, NULL);
6725 			if (!btf_type_is_small_int(t)) {
6726 				bpf_log(log,
6727 					"ret type %s not allowed for fmod_ret\n",
6728 					btf_type_str(t));
6729 				return false;
6730 			}
6731 			break;
6732 		default:
6733 			bpf_log(log, "func '%s' doesn't have %d-th argument\n",
6734 				tname, arg + 1);
6735 			return false;
6736 		}
6737 	} else {
6738 		if (!t)
6739 			/* Default prog with MAX_BPF_FUNC_REG_ARGS args */
6740 			return true;
6741 		t = btf_type_by_id(btf, args[arg].type);
6742 	}
6743 
6744 	/* skip modifiers */
6745 	while (btf_type_is_modifier(t))
6746 		t = btf_type_by_id(btf, t->type);
6747 	if (btf_type_is_small_int(t) || btf_is_any_enum(t) || __btf_type_is_struct(t))
6748 		/* accessing a scalar */
6749 		return true;
6750 	if (!btf_type_is_ptr(t)) {
6751 		bpf_log(log,
6752 			"func '%s' arg%d '%s' has type %s. Only pointer access is allowed\n",
6753 			tname, arg,
6754 			__btf_name_by_offset(btf, t->name_off),
6755 			btf_type_str(t));
6756 		return false;
6757 	}
6758 
6759 	if (size != sizeof(u64)) {
6760 		bpf_log(log, "func '%s' size %d must be 8\n",
6761 			tname, size);
6762 		return false;
6763 	}
6764 
6765 	/* check for PTR_TO_RDONLY_BUF_OR_NULL or PTR_TO_RDWR_BUF_OR_NULL */
6766 	for (i = 0; i < prog->aux->ctx_arg_info_size; i++) {
6767 		const struct bpf_ctx_arg_aux *ctx_arg_info = &prog->aux->ctx_arg_info[i];
6768 		u32 type, flag;
6769 
6770 		type = base_type(ctx_arg_info->reg_type);
6771 		flag = type_flag(ctx_arg_info->reg_type);
6772 		if (ctx_arg_info->offset == off && type == PTR_TO_BUF &&
6773 		    (flag & PTR_MAYBE_NULL)) {
6774 			info->reg_type = ctx_arg_info->reg_type;
6775 			return true;
6776 		}
6777 	}
6778 
6779 	if (t->type == 0)
6780 		/* This is a pointer to void.
6781 		 * It is the same as scalar from the verifier safety pov.
6782 		 * No further pointer walking is allowed.
6783 		 */
6784 		return true;
6785 
6786 	if (is_int_ptr(btf, t))
6787 		return true;
6788 
6789 	/* this is a pointer to another type */
6790 	for (i = 0; i < prog->aux->ctx_arg_info_size; i++) {
6791 		const struct bpf_ctx_arg_aux *ctx_arg_info = &prog->aux->ctx_arg_info[i];
6792 
6793 		if (ctx_arg_info->offset == off) {
6794 			if (!ctx_arg_info->btf_id) {
6795 				bpf_log(log,"invalid btf_id for context argument offset %u\n", off);
6796 				return false;
6797 			}
6798 
6799 			info->reg_type = ctx_arg_info->reg_type;
6800 			info->btf = ctx_arg_info->btf ? : btf_vmlinux;
6801 			info->btf_id = ctx_arg_info->btf_id;
6802 			info->ref_obj_id = ctx_arg_info->ref_obj_id;
6803 			return true;
6804 		}
6805 	}
6806 
6807 	info->reg_type = PTR_TO_BTF_ID;
6808 	if (prog_args_trusted(prog))
6809 		info->reg_type |= PTR_TRUSTED;
6810 
6811 	if (btf_param_match_suffix(btf, &args[arg], "__nullable"))
6812 		info->reg_type |= PTR_MAYBE_NULL;
6813 
6814 	if (prog->expected_attach_type == BPF_TRACE_RAW_TP) {
6815 		struct btf *btf = prog->aux->attach_btf;
6816 		const struct btf_type *t;
6817 		const char *tname;
6818 
6819 		/* BTF lookups cannot fail, return false on error */
6820 		t = btf_type_by_id(btf, prog->aux->attach_btf_id);
6821 		if (!t)
6822 			return false;
6823 		tname = btf_name_by_offset(btf, t->name_off);
6824 		if (!tname)
6825 			return false;
6826 		/* Checked by bpf_check_attach_target */
6827 		tname += sizeof("btf_trace_") - 1;
6828 		for (i = 0; i < ARRAY_SIZE(raw_tp_null_args); i++) {
6829 			/* Is this a func with potential NULL args? */
6830 			if (strcmp(tname, raw_tp_null_args[i].func))
6831 				continue;
6832 			if (raw_tp_null_args[i].mask & (0x1 << (arg * 4)))
6833 				info->reg_type |= PTR_MAYBE_NULL;
6834 			/* Is the current arg IS_ERR? */
6835 			if (raw_tp_null_args[i].mask & (0x2 << (arg * 4)))
6836 				ptr_err_raw_tp = true;
6837 			break;
6838 		}
6839 		/* If we don't know NULL-ness specification and the tracepoint
6840 		 * is coming from a loadable module, be conservative and mark
6841 		 * argument as PTR_MAYBE_NULL.
6842 		 */
6843 		if (i == ARRAY_SIZE(raw_tp_null_args) && btf_is_module(btf))
6844 			info->reg_type |= PTR_MAYBE_NULL;
6845 	}
6846 
6847 	if (tgt_prog) {
6848 		enum bpf_prog_type tgt_type;
6849 
6850 		if (tgt_prog->type == BPF_PROG_TYPE_EXT)
6851 			tgt_type = tgt_prog->aux->saved_dst_prog_type;
6852 		else
6853 			tgt_type = tgt_prog->type;
6854 
6855 		ret = btf_translate_to_vmlinux(log, btf, t, tgt_type, arg);
6856 		if (ret > 0) {
6857 			info->btf = btf_vmlinux;
6858 			info->btf_id = ret;
6859 			return true;
6860 		} else {
6861 			return false;
6862 		}
6863 	}
6864 
6865 	info->btf = btf;
6866 	info->btf_id = t->type;
6867 	t = btf_type_by_id(btf, t->type);
6868 
6869 	if (btf_type_is_type_tag(t) && !btf_type_kflag(t)) {
6870 		tag_value = __btf_name_by_offset(btf, t->name_off);
6871 		if (strcmp(tag_value, "user") == 0)
6872 			info->reg_type |= MEM_USER;
6873 		if (strcmp(tag_value, "percpu") == 0)
6874 			info->reg_type |= MEM_PERCPU;
6875 	}
6876 
6877 	/* skip modifiers */
6878 	while (btf_type_is_modifier(t)) {
6879 		info->btf_id = t->type;
6880 		t = btf_type_by_id(btf, t->type);
6881 	}
6882 	if (!btf_type_is_struct(t)) {
6883 		bpf_log(log,
6884 			"func '%s' arg%d type %s is not a struct\n",
6885 			tname, arg, btf_type_str(t));
6886 		return false;
6887 	}
6888 	bpf_log(log, "func '%s' arg%d has btf_id %d type %s '%s'\n",
6889 		tname, arg, info->btf_id, btf_type_str(t),
6890 		__btf_name_by_offset(btf, t->name_off));
6891 
6892 	/* Perform all checks on the validity of type for this argument, but if
6893 	 * we know it can be IS_ERR at runtime, scrub pointer type and mark as
6894 	 * scalar.
6895 	 */
6896 	if (ptr_err_raw_tp) {
6897 		bpf_log(log, "marking pointer arg%d as scalar as it may encode error", arg);
6898 		info->reg_type = SCALAR_VALUE;
6899 	}
6900 	return true;
6901 }
6902 EXPORT_SYMBOL_GPL(btf_ctx_access);
6903 
6904 enum bpf_struct_walk_result {
6905 	/* < 0 error */
6906 	WALK_SCALAR = 0,
6907 	WALK_PTR,
6908 	WALK_STRUCT,
6909 };
6910 
btf_struct_walk(struct bpf_verifier_log * log,const struct btf * btf,const struct btf_type * t,int off,int size,u32 * next_btf_id,enum bpf_type_flag * flag,const char ** field_name)6911 static int btf_struct_walk(struct bpf_verifier_log *log, const struct btf *btf,
6912 			   const struct btf_type *t, int off, int size,
6913 			   u32 *next_btf_id, enum bpf_type_flag *flag,
6914 			   const char **field_name)
6915 {
6916 	u32 i, moff, mtrue_end, msize = 0, total_nelems = 0;
6917 	const struct btf_type *mtype, *elem_type = NULL;
6918 	const struct btf_member *member;
6919 	const char *tname, *mname, *tag_value;
6920 	u32 vlen, elem_id, mid;
6921 
6922 again:
6923 	if (btf_type_is_modifier(t))
6924 		t = btf_type_skip_modifiers(btf, t->type, NULL);
6925 	tname = __btf_name_by_offset(btf, t->name_off);
6926 	if (!btf_type_is_struct(t)) {
6927 		bpf_log(log, "Type '%s' is not a struct\n", tname);
6928 		return -EINVAL;
6929 	}
6930 
6931 	vlen = btf_type_vlen(t);
6932 	if (BTF_INFO_KIND(t->info) == BTF_KIND_UNION && vlen != 1 && !(*flag & PTR_UNTRUSTED))
6933 		/*
6934 		 * walking unions yields untrusted pointers
6935 		 * with exception of __bpf_md_ptr and other
6936 		 * unions with a single member
6937 		 */
6938 		*flag |= PTR_UNTRUSTED;
6939 
6940 	if (off + size > t->size) {
6941 		/* If the last element is a variable size array, we may
6942 		 * need to relax the rule.
6943 		 */
6944 		struct btf_array *array_elem;
6945 
6946 		if (vlen == 0)
6947 			goto error;
6948 
6949 		member = btf_type_member(t) + vlen - 1;
6950 		mtype = btf_type_skip_modifiers(btf, member->type,
6951 						NULL);
6952 		if (!btf_type_is_array(mtype))
6953 			goto error;
6954 
6955 		array_elem = (struct btf_array *)(mtype + 1);
6956 		if (array_elem->nelems != 0)
6957 			goto error;
6958 
6959 		moff = __btf_member_bit_offset(t, member) / 8;
6960 		if (off < moff)
6961 			goto error;
6962 
6963 		/* allow structure and integer */
6964 		t = btf_type_skip_modifiers(btf, array_elem->type,
6965 					    NULL);
6966 
6967 		if (btf_type_is_int(t))
6968 			return WALK_SCALAR;
6969 
6970 		if (!btf_type_is_struct(t))
6971 			goto error;
6972 
6973 		off = (off - moff) % t->size;
6974 		goto again;
6975 
6976 error:
6977 		bpf_log(log, "access beyond struct %s at off %u size %u\n",
6978 			tname, off, size);
6979 		return -EACCES;
6980 	}
6981 
6982 	for_each_member(i, t, member) {
6983 		/* offset of the field in bytes */
6984 		moff = __btf_member_bit_offset(t, member) / 8;
6985 		if (off + size <= moff)
6986 			/* won't find anything, field is already too far */
6987 			break;
6988 
6989 		if (__btf_member_bitfield_size(t, member)) {
6990 			u32 end_bit = __btf_member_bit_offset(t, member) +
6991 				__btf_member_bitfield_size(t, member);
6992 
6993 			/* off <= moff instead of off == moff because clang
6994 			 * does not generate a BTF member for anonymous
6995 			 * bitfield like the ":16" here:
6996 			 * struct {
6997 			 *	int :16;
6998 			 *	int x:8;
6999 			 * };
7000 			 */
7001 			if (off <= moff &&
7002 			    BITS_ROUNDUP_BYTES(end_bit) <= off + size)
7003 				return WALK_SCALAR;
7004 
7005 			/* off may be accessing a following member
7006 			 *
7007 			 * or
7008 			 *
7009 			 * Doing partial access at either end of this
7010 			 * bitfield.  Continue on this case also to
7011 			 * treat it as not accessing this bitfield
7012 			 * and eventually error out as field not
7013 			 * found to keep it simple.
7014 			 * It could be relaxed if there was a legit
7015 			 * partial access case later.
7016 			 */
7017 			continue;
7018 		}
7019 
7020 		/* In case of "off" is pointing to holes of a struct */
7021 		if (off < moff)
7022 			break;
7023 
7024 		/* type of the field */
7025 		mid = member->type;
7026 		mtype = btf_type_by_id(btf, member->type);
7027 		mname = __btf_name_by_offset(btf, member->name_off);
7028 
7029 		mtype = __btf_resolve_size(btf, mtype, &msize,
7030 					   &elem_type, &elem_id, &total_nelems,
7031 					   &mid);
7032 		if (IS_ERR(mtype)) {
7033 			bpf_log(log, "field %s doesn't have size\n", mname);
7034 			return -EFAULT;
7035 		}
7036 
7037 		mtrue_end = moff + msize;
7038 		if (off >= mtrue_end)
7039 			/* no overlap with member, keep iterating */
7040 			continue;
7041 
7042 		if (btf_type_is_array(mtype)) {
7043 			u32 elem_idx;
7044 
7045 			/* __btf_resolve_size() above helps to
7046 			 * linearize a multi-dimensional array.
7047 			 *
7048 			 * The logic here is treating an array
7049 			 * in a struct as the following way:
7050 			 *
7051 			 * struct outer {
7052 			 *	struct inner array[2][2];
7053 			 * };
7054 			 *
7055 			 * looks like:
7056 			 *
7057 			 * struct outer {
7058 			 *	struct inner array_elem0;
7059 			 *	struct inner array_elem1;
7060 			 *	struct inner array_elem2;
7061 			 *	struct inner array_elem3;
7062 			 * };
7063 			 *
7064 			 * When accessing outer->array[1][0], it moves
7065 			 * moff to "array_elem2", set mtype to
7066 			 * "struct inner", and msize also becomes
7067 			 * sizeof(struct inner).  Then most of the
7068 			 * remaining logic will fall through without
7069 			 * caring the current member is an array or
7070 			 * not.
7071 			 *
7072 			 * Unlike mtype/msize/moff, mtrue_end does not
7073 			 * change.  The naming difference ("_true") tells
7074 			 * that it is not always corresponding to
7075 			 * the current mtype/msize/moff.
7076 			 * It is the true end of the current
7077 			 * member (i.e. array in this case).  That
7078 			 * will allow an int array to be accessed like
7079 			 * a scratch space,
7080 			 * i.e. allow access beyond the size of
7081 			 *      the array's element as long as it is
7082 			 *      within the mtrue_end boundary.
7083 			 */
7084 
7085 			/* skip empty array */
7086 			if (moff == mtrue_end)
7087 				continue;
7088 
7089 			msize /= total_nelems;
7090 			elem_idx = (off - moff) / msize;
7091 			moff += elem_idx * msize;
7092 			mtype = elem_type;
7093 			mid = elem_id;
7094 		}
7095 
7096 		/* the 'off' we're looking for is either equal to start
7097 		 * of this field or inside of this struct
7098 		 */
7099 		if (btf_type_is_struct(mtype)) {
7100 			/* our field must be inside that union or struct */
7101 			t = mtype;
7102 
7103 			/* return if the offset matches the member offset */
7104 			if (off == moff) {
7105 				*next_btf_id = mid;
7106 				return WALK_STRUCT;
7107 			}
7108 
7109 			/* adjust offset we're looking for */
7110 			off -= moff;
7111 			goto again;
7112 		}
7113 
7114 		if (btf_type_is_ptr(mtype)) {
7115 			const struct btf_type *stype, *t;
7116 			enum bpf_type_flag tmp_flag = 0;
7117 			u32 id;
7118 
7119 			if (msize != size || off != moff) {
7120 				bpf_log(log,
7121 					"cannot access ptr member %s with moff %u in struct %s with off %u size %u\n",
7122 					mname, moff, tname, off, size);
7123 				return -EACCES;
7124 			}
7125 
7126 			/* check type tag */
7127 			t = btf_type_by_id(btf, mtype->type);
7128 			if (btf_type_is_type_tag(t) && !btf_type_kflag(t)) {
7129 				tag_value = __btf_name_by_offset(btf, t->name_off);
7130 				/* check __user tag */
7131 				if (strcmp(tag_value, "user") == 0)
7132 					tmp_flag = MEM_USER;
7133 				/* check __percpu tag */
7134 				if (strcmp(tag_value, "percpu") == 0)
7135 					tmp_flag = MEM_PERCPU;
7136 				/* check __rcu tag */
7137 				if (strcmp(tag_value, "rcu") == 0)
7138 					tmp_flag = MEM_RCU;
7139 			}
7140 
7141 			stype = btf_type_skip_modifiers(btf, mtype->type, &id);
7142 			if (btf_type_is_struct(stype)) {
7143 				*next_btf_id = id;
7144 				*flag |= tmp_flag;
7145 				if (field_name)
7146 					*field_name = mname;
7147 				return WALK_PTR;
7148 			}
7149 		}
7150 
7151 		/* Allow more flexible access within an int as long as
7152 		 * it is within mtrue_end.
7153 		 * Since mtrue_end could be the end of an array,
7154 		 * that also allows using an array of int as a scratch
7155 		 * space. e.g. skb->cb[].
7156 		 */
7157 		if (off + size > mtrue_end && !(*flag & PTR_UNTRUSTED)) {
7158 			bpf_log(log,
7159 				"access beyond the end of member %s (mend:%u) in struct %s with off %u size %u\n",
7160 				mname, mtrue_end, tname, off, size);
7161 			return -EACCES;
7162 		}
7163 
7164 		return WALK_SCALAR;
7165 	}
7166 	bpf_log(log, "struct %s doesn't have field at offset %d\n", tname, off);
7167 	return -EINVAL;
7168 }
7169 
btf_struct_access(struct bpf_verifier_log * log,const struct bpf_reg_state * reg,int off,int size,enum bpf_access_type atype __maybe_unused,u32 * next_btf_id,enum bpf_type_flag * flag,const char ** field_name)7170 int btf_struct_access(struct bpf_verifier_log *log,
7171 		      const struct bpf_reg_state *reg,
7172 		      int off, int size, enum bpf_access_type atype __maybe_unused,
7173 		      u32 *next_btf_id, enum bpf_type_flag *flag,
7174 		      const char **field_name)
7175 {
7176 	const struct btf *btf = reg->btf;
7177 	enum bpf_type_flag tmp_flag = 0;
7178 	const struct btf_type *t;
7179 	u32 id = reg->btf_id;
7180 	int err;
7181 
7182 	while (type_is_alloc(reg->type)) {
7183 		struct btf_struct_meta *meta;
7184 		struct btf_record *rec;
7185 		int i;
7186 
7187 		meta = btf_find_struct_meta(btf, id);
7188 		if (!meta)
7189 			break;
7190 		rec = meta->record;
7191 		for (i = 0; i < rec->cnt; i++) {
7192 			struct btf_field *field = &rec->fields[i];
7193 			u32 offset = field->offset;
7194 			if (off < offset + field->size && offset < off + size) {
7195 				bpf_log(log,
7196 					"direct access to %s is disallowed\n",
7197 					btf_field_type_name(field->type));
7198 				return -EACCES;
7199 			}
7200 		}
7201 		break;
7202 	}
7203 
7204 	t = btf_type_by_id(btf, id);
7205 	do {
7206 		err = btf_struct_walk(log, btf, t, off, size, &id, &tmp_flag, field_name);
7207 
7208 		switch (err) {
7209 		case WALK_PTR:
7210 			/* For local types, the destination register cannot
7211 			 * become a pointer again.
7212 			 */
7213 			if (type_is_alloc(reg->type))
7214 				return SCALAR_VALUE;
7215 			/* If we found the pointer or scalar on t+off,
7216 			 * we're done.
7217 			 */
7218 			*next_btf_id = id;
7219 			*flag = tmp_flag;
7220 			return PTR_TO_BTF_ID;
7221 		case WALK_SCALAR:
7222 			return SCALAR_VALUE;
7223 		case WALK_STRUCT:
7224 			/* We found nested struct, so continue the search
7225 			 * by diving in it. At this point the offset is
7226 			 * aligned with the new type, so set it to 0.
7227 			 */
7228 			t = btf_type_by_id(btf, id);
7229 			off = 0;
7230 			break;
7231 		default:
7232 			/* It's either error or unknown return value..
7233 			 * scream and leave.
7234 			 */
7235 			if (WARN_ONCE(err > 0, "unknown btf_struct_walk return value"))
7236 				return -EINVAL;
7237 			return err;
7238 		}
7239 	} while (t);
7240 
7241 	return -EINVAL;
7242 }
7243 
7244 /* Check that two BTF types, each specified as an BTF object + id, are exactly
7245  * the same. Trivial ID check is not enough due to module BTFs, because we can
7246  * end up with two different module BTFs, but IDs point to the common type in
7247  * vmlinux BTF.
7248  */
btf_types_are_same(const struct btf * btf1,u32 id1,const struct btf * btf2,u32 id2)7249 bool btf_types_are_same(const struct btf *btf1, u32 id1,
7250 			const struct btf *btf2, u32 id2)
7251 {
7252 	if (id1 != id2)
7253 		return false;
7254 	if (btf1 == btf2)
7255 		return true;
7256 	return btf_type_by_id(btf1, id1) == btf_type_by_id(btf2, id2);
7257 }
7258 
btf_struct_ids_match(struct bpf_verifier_log * log,const struct btf * btf,u32 id,int off,const struct btf * need_btf,u32 need_type_id,bool strict)7259 bool btf_struct_ids_match(struct bpf_verifier_log *log,
7260 			  const struct btf *btf, u32 id, int off,
7261 			  const struct btf *need_btf, u32 need_type_id,
7262 			  bool strict)
7263 {
7264 	const struct btf_type *type;
7265 	enum bpf_type_flag flag = 0;
7266 	int err;
7267 
7268 	/* Are we already done? */
7269 	if (off == 0 && btf_types_are_same(btf, id, need_btf, need_type_id))
7270 		return true;
7271 	/* In case of strict type match, we do not walk struct, the top level
7272 	 * type match must succeed. When strict is true, off should have already
7273 	 * been 0.
7274 	 */
7275 	if (strict)
7276 		return false;
7277 again:
7278 	type = btf_type_by_id(btf, id);
7279 	if (!type)
7280 		return false;
7281 	err = btf_struct_walk(log, btf, type, off, 1, &id, &flag, NULL);
7282 	if (err != WALK_STRUCT)
7283 		return false;
7284 
7285 	/* We found nested struct object. If it matches
7286 	 * the requested ID, we're done. Otherwise let's
7287 	 * continue the search with offset 0 in the new
7288 	 * type.
7289 	 */
7290 	if (!btf_types_are_same(btf, id, need_btf, need_type_id)) {
7291 		off = 0;
7292 		goto again;
7293 	}
7294 
7295 	return true;
7296 }
7297 
__get_type_size(struct btf * btf,u32 btf_id,const struct btf_type ** ret_type)7298 static int __get_type_size(struct btf *btf, u32 btf_id,
7299 			   const struct btf_type **ret_type)
7300 {
7301 	const struct btf_type *t;
7302 
7303 	*ret_type = btf_type_by_id(btf, 0);
7304 	if (!btf_id)
7305 		/* void */
7306 		return 0;
7307 	t = btf_type_by_id(btf, btf_id);
7308 	while (t && btf_type_is_modifier(t))
7309 		t = btf_type_by_id(btf, t->type);
7310 	if (!t)
7311 		return -EINVAL;
7312 	*ret_type = t;
7313 	if (btf_type_is_ptr(t))
7314 		/* kernel size of pointer. Not BPF's size of pointer*/
7315 		return sizeof(void *);
7316 	if (btf_type_is_int(t) || btf_is_any_enum(t) || __btf_type_is_struct(t))
7317 		return t->size;
7318 	return -EINVAL;
7319 }
7320 
__get_type_fmodel_flags(const struct btf_type * t)7321 static u8 __get_type_fmodel_flags(const struct btf_type *t)
7322 {
7323 	u8 flags = 0;
7324 
7325 	if (__btf_type_is_struct(t))
7326 		flags |= BTF_FMODEL_STRUCT_ARG;
7327 	if (btf_type_is_signed_int(t))
7328 		flags |= BTF_FMODEL_SIGNED_ARG;
7329 
7330 	return flags;
7331 }
7332 
btf_distill_func_proto(struct bpf_verifier_log * log,struct btf * btf,const struct btf_type * func,const char * tname,struct btf_func_model * m)7333 int btf_distill_func_proto(struct bpf_verifier_log *log,
7334 			   struct btf *btf,
7335 			   const struct btf_type *func,
7336 			   const char *tname,
7337 			   struct btf_func_model *m)
7338 {
7339 	const struct btf_param *args;
7340 	const struct btf_type *t;
7341 	u32 i, nargs;
7342 	int ret;
7343 
7344 	if (!func) {
7345 		/* BTF function prototype doesn't match the verifier types.
7346 		 * Fall back to MAX_BPF_FUNC_REG_ARGS u64 args.
7347 		 */
7348 		for (i = 0; i < MAX_BPF_FUNC_REG_ARGS; i++) {
7349 			m->arg_size[i] = 8;
7350 			m->arg_flags[i] = 0;
7351 		}
7352 		m->ret_size = 8;
7353 		m->ret_flags = 0;
7354 		m->nr_args = MAX_BPF_FUNC_REG_ARGS;
7355 		return 0;
7356 	}
7357 	args = (const struct btf_param *)(func + 1);
7358 	nargs = btf_type_vlen(func);
7359 	if (nargs > MAX_BPF_FUNC_ARGS) {
7360 		bpf_log(log,
7361 			"The function %s has %d arguments. Too many.\n",
7362 			tname, nargs);
7363 		return -EINVAL;
7364 	}
7365 	ret = __get_type_size(btf, func->type, &t);
7366 	if (ret < 0 || __btf_type_is_struct(t)) {
7367 		bpf_log(log,
7368 			"The function %s return type %s is unsupported.\n",
7369 			tname, btf_type_str(t));
7370 		return -EINVAL;
7371 	}
7372 	m->ret_size = ret;
7373 	m->ret_flags = __get_type_fmodel_flags(t);
7374 
7375 	for (i = 0; i < nargs; i++) {
7376 		if (i == nargs - 1 && args[i].type == 0) {
7377 			bpf_log(log,
7378 				"The function %s with variable args is unsupported.\n",
7379 				tname);
7380 			return -EINVAL;
7381 		}
7382 		ret = __get_type_size(btf, args[i].type, &t);
7383 
7384 		/* No support of struct argument size greater than 16 bytes */
7385 		if (ret < 0 || ret > 16) {
7386 			bpf_log(log,
7387 				"The function %s arg%d type %s is unsupported.\n",
7388 				tname, i, btf_type_str(t));
7389 			return -EINVAL;
7390 		}
7391 		if (ret == 0) {
7392 			bpf_log(log,
7393 				"The function %s has malformed void argument.\n",
7394 				tname);
7395 			return -EINVAL;
7396 		}
7397 		m->arg_size[i] = ret;
7398 		m->arg_flags[i] = __get_type_fmodel_flags(t);
7399 	}
7400 	m->nr_args = nargs;
7401 	return 0;
7402 }
7403 
7404 /* Compare BTFs of two functions assuming only scalars and pointers to context.
7405  * t1 points to BTF_KIND_FUNC in btf1
7406  * t2 points to BTF_KIND_FUNC in btf2
7407  * Returns:
7408  * EINVAL - function prototype mismatch
7409  * EFAULT - verifier bug
7410  * 0 - 99% match. The last 1% is validated by the verifier.
7411  */
btf_check_func_type_match(struct bpf_verifier_log * log,struct btf * btf1,const struct btf_type * t1,struct btf * btf2,const struct btf_type * t2)7412 static int btf_check_func_type_match(struct bpf_verifier_log *log,
7413 				     struct btf *btf1, const struct btf_type *t1,
7414 				     struct btf *btf2, const struct btf_type *t2)
7415 {
7416 	const struct btf_param *args1, *args2;
7417 	const char *fn1, *fn2, *s1, *s2;
7418 	u32 nargs1, nargs2, i;
7419 
7420 	fn1 = btf_name_by_offset(btf1, t1->name_off);
7421 	fn2 = btf_name_by_offset(btf2, t2->name_off);
7422 
7423 	if (btf_func_linkage(t1) != BTF_FUNC_GLOBAL) {
7424 		bpf_log(log, "%s() is not a global function\n", fn1);
7425 		return -EINVAL;
7426 	}
7427 	if (btf_func_linkage(t2) != BTF_FUNC_GLOBAL) {
7428 		bpf_log(log, "%s() is not a global function\n", fn2);
7429 		return -EINVAL;
7430 	}
7431 
7432 	t1 = btf_type_by_id(btf1, t1->type);
7433 	if (!t1 || !btf_type_is_func_proto(t1))
7434 		return -EFAULT;
7435 	t2 = btf_type_by_id(btf2, t2->type);
7436 	if (!t2 || !btf_type_is_func_proto(t2))
7437 		return -EFAULT;
7438 
7439 	args1 = (const struct btf_param *)(t1 + 1);
7440 	nargs1 = btf_type_vlen(t1);
7441 	args2 = (const struct btf_param *)(t2 + 1);
7442 	nargs2 = btf_type_vlen(t2);
7443 
7444 	if (nargs1 != nargs2) {
7445 		bpf_log(log, "%s() has %d args while %s() has %d args\n",
7446 			fn1, nargs1, fn2, nargs2);
7447 		return -EINVAL;
7448 	}
7449 
7450 	t1 = btf_type_skip_modifiers(btf1, t1->type, NULL);
7451 	t2 = btf_type_skip_modifiers(btf2, t2->type, NULL);
7452 	if (t1->info != t2->info) {
7453 		bpf_log(log,
7454 			"Return type %s of %s() doesn't match type %s of %s()\n",
7455 			btf_type_str(t1), fn1,
7456 			btf_type_str(t2), fn2);
7457 		return -EINVAL;
7458 	}
7459 
7460 	for (i = 0; i < nargs1; i++) {
7461 		t1 = btf_type_skip_modifiers(btf1, args1[i].type, NULL);
7462 		t2 = btf_type_skip_modifiers(btf2, args2[i].type, NULL);
7463 
7464 		if (t1->info != t2->info) {
7465 			bpf_log(log, "arg%d in %s() is %s while %s() has %s\n",
7466 				i, fn1, btf_type_str(t1),
7467 				fn2, btf_type_str(t2));
7468 			return -EINVAL;
7469 		}
7470 		if (btf_type_has_size(t1) && t1->size != t2->size) {
7471 			bpf_log(log,
7472 				"arg%d in %s() has size %d while %s() has %d\n",
7473 				i, fn1, t1->size,
7474 				fn2, t2->size);
7475 			return -EINVAL;
7476 		}
7477 
7478 		/* global functions are validated with scalars and pointers
7479 		 * to context only. And only global functions can be replaced.
7480 		 * Hence type check only those types.
7481 		 */
7482 		if (btf_type_is_int(t1) || btf_is_any_enum(t1))
7483 			continue;
7484 		if (!btf_type_is_ptr(t1)) {
7485 			bpf_log(log,
7486 				"arg%d in %s() has unrecognized type\n",
7487 				i, fn1);
7488 			return -EINVAL;
7489 		}
7490 		t1 = btf_type_skip_modifiers(btf1, t1->type, NULL);
7491 		t2 = btf_type_skip_modifiers(btf2, t2->type, NULL);
7492 		if (!btf_type_is_struct(t1)) {
7493 			bpf_log(log,
7494 				"arg%d in %s() is not a pointer to context\n",
7495 				i, fn1);
7496 			return -EINVAL;
7497 		}
7498 		if (!btf_type_is_struct(t2)) {
7499 			bpf_log(log,
7500 				"arg%d in %s() is not a pointer to context\n",
7501 				i, fn2);
7502 			return -EINVAL;
7503 		}
7504 		/* This is an optional check to make program writing easier.
7505 		 * Compare names of structs and report an error to the user.
7506 		 * btf_prepare_func_args() already checked that t2 struct
7507 		 * is a context type. btf_prepare_func_args() will check
7508 		 * later that t1 struct is a context type as well.
7509 		 */
7510 		s1 = btf_name_by_offset(btf1, t1->name_off);
7511 		s2 = btf_name_by_offset(btf2, t2->name_off);
7512 		if (strcmp(s1, s2)) {
7513 			bpf_log(log,
7514 				"arg%d %s(struct %s *) doesn't match %s(struct %s *)\n",
7515 				i, fn1, s1, fn2, s2);
7516 			return -EINVAL;
7517 		}
7518 	}
7519 	return 0;
7520 }
7521 
7522 /* Compare BTFs of given program with BTF of target program */
btf_check_type_match(struct bpf_verifier_log * log,const struct bpf_prog * prog,struct btf * btf2,const struct btf_type * t2)7523 int btf_check_type_match(struct bpf_verifier_log *log, const struct bpf_prog *prog,
7524 			 struct btf *btf2, const struct btf_type *t2)
7525 {
7526 	struct btf *btf1 = prog->aux->btf;
7527 	const struct btf_type *t1;
7528 	u32 btf_id = 0;
7529 
7530 	if (!prog->aux->func_info) {
7531 		bpf_log(log, "Program extension requires BTF\n");
7532 		return -EINVAL;
7533 	}
7534 
7535 	btf_id = prog->aux->func_info[0].type_id;
7536 	if (!btf_id)
7537 		return -EFAULT;
7538 
7539 	t1 = btf_type_by_id(btf1, btf_id);
7540 	if (!t1 || !btf_type_is_func(t1))
7541 		return -EFAULT;
7542 
7543 	return btf_check_func_type_match(log, btf1, t1, btf2, t2);
7544 }
7545 
btf_is_dynptr_ptr(const struct btf * btf,const struct btf_type * t)7546 static bool btf_is_dynptr_ptr(const struct btf *btf, const struct btf_type *t)
7547 {
7548 	const char *name;
7549 
7550 	t = btf_type_by_id(btf, t->type); /* skip PTR */
7551 
7552 	while (btf_type_is_modifier(t))
7553 		t = btf_type_by_id(btf, t->type);
7554 
7555 	/* allow either struct or struct forward declaration */
7556 	if (btf_type_is_struct(t) ||
7557 	    (btf_type_is_fwd(t) && btf_type_kflag(t) == 0)) {
7558 		name = btf_str_by_offset(btf, t->name_off);
7559 		return name && strcmp(name, "bpf_dynptr") == 0;
7560 	}
7561 
7562 	return false;
7563 }
7564 
7565 struct bpf_cand_cache {
7566 	const char *name;
7567 	u32 name_len;
7568 	u16 kind;
7569 	u16 cnt;
7570 	struct {
7571 		const struct btf *btf;
7572 		u32 id;
7573 	} cands[];
7574 };
7575 
7576 static DEFINE_MUTEX(cand_cache_mutex);
7577 
7578 static struct bpf_cand_cache *
7579 bpf_core_find_cands(struct bpf_core_ctx *ctx, u32 local_type_id);
7580 
btf_get_ptr_to_btf_id(struct bpf_verifier_log * log,int arg_idx,const struct btf * btf,const struct btf_type * t)7581 static int btf_get_ptr_to_btf_id(struct bpf_verifier_log *log, int arg_idx,
7582 				 const struct btf *btf, const struct btf_type *t)
7583 {
7584 	struct bpf_cand_cache *cc;
7585 	struct bpf_core_ctx ctx = {
7586 		.btf = btf,
7587 		.log = log,
7588 	};
7589 	u32 kern_type_id, type_id;
7590 	int err = 0;
7591 
7592 	/* skip PTR and modifiers */
7593 	type_id = t->type;
7594 	t = btf_type_by_id(btf, t->type);
7595 	while (btf_type_is_modifier(t)) {
7596 		type_id = t->type;
7597 		t = btf_type_by_id(btf, t->type);
7598 	}
7599 
7600 	mutex_lock(&cand_cache_mutex);
7601 	cc = bpf_core_find_cands(&ctx, type_id);
7602 	if (IS_ERR(cc)) {
7603 		err = PTR_ERR(cc);
7604 		bpf_log(log, "arg#%d reference type('%s %s') candidate matching error: %d\n",
7605 			arg_idx, btf_type_str(t), __btf_name_by_offset(btf, t->name_off),
7606 			err);
7607 		goto cand_cache_unlock;
7608 	}
7609 	if (cc->cnt != 1) {
7610 		bpf_log(log, "arg#%d reference type('%s %s') %s\n",
7611 			arg_idx, btf_type_str(t), __btf_name_by_offset(btf, t->name_off),
7612 			cc->cnt == 0 ? "has no matches" : "is ambiguous");
7613 		err = cc->cnt == 0 ? -ENOENT : -ESRCH;
7614 		goto cand_cache_unlock;
7615 	}
7616 	if (btf_is_module(cc->cands[0].btf)) {
7617 		bpf_log(log, "arg#%d reference type('%s %s') points to kernel module type (unsupported)\n",
7618 			arg_idx, btf_type_str(t), __btf_name_by_offset(btf, t->name_off));
7619 		err = -EOPNOTSUPP;
7620 		goto cand_cache_unlock;
7621 	}
7622 	kern_type_id = cc->cands[0].id;
7623 
7624 cand_cache_unlock:
7625 	mutex_unlock(&cand_cache_mutex);
7626 	if (err)
7627 		return err;
7628 
7629 	return kern_type_id;
7630 }
7631 
7632 enum btf_arg_tag {
7633 	ARG_TAG_CTX	 = BIT_ULL(0),
7634 	ARG_TAG_NONNULL  = BIT_ULL(1),
7635 	ARG_TAG_TRUSTED  = BIT_ULL(2),
7636 	ARG_TAG_NULLABLE = BIT_ULL(3),
7637 	ARG_TAG_ARENA	 = BIT_ULL(4),
7638 };
7639 
7640 /* Process BTF of a function to produce high-level expectation of function
7641  * arguments (like ARG_PTR_TO_CTX, or ARG_PTR_TO_MEM, etc). This information
7642  * is cached in subprog info for reuse.
7643  * Returns:
7644  * EFAULT - there is a verifier bug. Abort verification.
7645  * EINVAL - cannot convert BTF.
7646  * 0 - Successfully processed BTF and constructed argument expectations.
7647  */
btf_prepare_func_args(struct bpf_verifier_env * env,int subprog)7648 int btf_prepare_func_args(struct bpf_verifier_env *env, int subprog)
7649 {
7650 	bool is_global = subprog_aux(env, subprog)->linkage == BTF_FUNC_GLOBAL;
7651 	struct bpf_subprog_info *sub = subprog_info(env, subprog);
7652 	struct bpf_verifier_log *log = &env->log;
7653 	struct bpf_prog *prog = env->prog;
7654 	enum bpf_prog_type prog_type = prog->type;
7655 	struct btf *btf = prog->aux->btf;
7656 	const struct btf_param *args;
7657 	const struct btf_type *t, *ref_t, *fn_t;
7658 	u32 i, nargs, btf_id;
7659 	const char *tname;
7660 
7661 	if (sub->args_cached)
7662 		return 0;
7663 
7664 	if (!prog->aux->func_info) {
7665 		bpf_log(log, "Verifier bug\n");
7666 		return -EFAULT;
7667 	}
7668 
7669 	btf_id = prog->aux->func_info[subprog].type_id;
7670 	if (!btf_id) {
7671 		if (!is_global) /* not fatal for static funcs */
7672 			return -EINVAL;
7673 		bpf_log(log, "Global functions need valid BTF\n");
7674 		return -EFAULT;
7675 	}
7676 
7677 	fn_t = btf_type_by_id(btf, btf_id);
7678 	if (!fn_t || !btf_type_is_func(fn_t)) {
7679 		/* These checks were already done by the verifier while loading
7680 		 * struct bpf_func_info
7681 		 */
7682 		bpf_log(log, "BTF of func#%d doesn't point to KIND_FUNC\n",
7683 			subprog);
7684 		return -EFAULT;
7685 	}
7686 	tname = btf_name_by_offset(btf, fn_t->name_off);
7687 
7688 	if (prog->aux->func_info_aux[subprog].unreliable) {
7689 		bpf_log(log, "Verifier bug in function %s()\n", tname);
7690 		return -EFAULT;
7691 	}
7692 	if (prog_type == BPF_PROG_TYPE_EXT)
7693 		prog_type = prog->aux->dst_prog->type;
7694 
7695 	t = btf_type_by_id(btf, fn_t->type);
7696 	if (!t || !btf_type_is_func_proto(t)) {
7697 		bpf_log(log, "Invalid type of function %s()\n", tname);
7698 		return -EFAULT;
7699 	}
7700 	args = (const struct btf_param *)(t + 1);
7701 	nargs = btf_type_vlen(t);
7702 	if (nargs > MAX_BPF_FUNC_REG_ARGS) {
7703 		if (!is_global)
7704 			return -EINVAL;
7705 		bpf_log(log, "Global function %s() with %d > %d args. Buggy compiler.\n",
7706 			tname, nargs, MAX_BPF_FUNC_REG_ARGS);
7707 		return -EINVAL;
7708 	}
7709 	/* check that function returns int, exception cb also requires this */
7710 	t = btf_type_by_id(btf, t->type);
7711 	while (btf_type_is_modifier(t))
7712 		t = btf_type_by_id(btf, t->type);
7713 	if (!btf_type_is_int(t) && !btf_is_any_enum(t)) {
7714 		if (!is_global)
7715 			return -EINVAL;
7716 		bpf_log(log,
7717 			"Global function %s() doesn't return scalar. Only those are supported.\n",
7718 			tname);
7719 		return -EINVAL;
7720 	}
7721 	/* Convert BTF function arguments into verifier types.
7722 	 * Only PTR_TO_CTX and SCALAR are supported atm.
7723 	 */
7724 	for (i = 0; i < nargs; i++) {
7725 		u32 tags = 0;
7726 		int id = 0;
7727 
7728 		/* 'arg:<tag>' decl_tag takes precedence over derivation of
7729 		 * register type from BTF type itself
7730 		 */
7731 		while ((id = btf_find_next_decl_tag(btf, fn_t, i, "arg:", id)) > 0) {
7732 			const struct btf_type *tag_t = btf_type_by_id(btf, id);
7733 			const char *tag = __btf_name_by_offset(btf, tag_t->name_off) + 4;
7734 
7735 			/* disallow arg tags in static subprogs */
7736 			if (!is_global) {
7737 				bpf_log(log, "arg#%d type tag is not supported in static functions\n", i);
7738 				return -EOPNOTSUPP;
7739 			}
7740 
7741 			if (strcmp(tag, "ctx") == 0) {
7742 				tags |= ARG_TAG_CTX;
7743 			} else if (strcmp(tag, "trusted") == 0) {
7744 				tags |= ARG_TAG_TRUSTED;
7745 			} else if (strcmp(tag, "nonnull") == 0) {
7746 				tags |= ARG_TAG_NONNULL;
7747 			} else if (strcmp(tag, "nullable") == 0) {
7748 				tags |= ARG_TAG_NULLABLE;
7749 			} else if (strcmp(tag, "arena") == 0) {
7750 				tags |= ARG_TAG_ARENA;
7751 			} else {
7752 				bpf_log(log, "arg#%d has unsupported set of tags\n", i);
7753 				return -EOPNOTSUPP;
7754 			}
7755 		}
7756 		if (id != -ENOENT) {
7757 			bpf_log(log, "arg#%d type tag fetching failure: %d\n", i, id);
7758 			return id;
7759 		}
7760 
7761 		t = btf_type_by_id(btf, args[i].type);
7762 		while (btf_type_is_modifier(t))
7763 			t = btf_type_by_id(btf, t->type);
7764 		if (!btf_type_is_ptr(t))
7765 			goto skip_pointer;
7766 
7767 		if ((tags & ARG_TAG_CTX) || btf_is_prog_ctx_type(log, btf, t, prog_type, i)) {
7768 			if (tags & ~ARG_TAG_CTX) {
7769 				bpf_log(log, "arg#%d has invalid combination of tags\n", i);
7770 				return -EINVAL;
7771 			}
7772 			if ((tags & ARG_TAG_CTX) &&
7773 			    btf_validate_prog_ctx_type(log, btf, t, i, prog_type,
7774 						       prog->expected_attach_type))
7775 				return -EINVAL;
7776 			sub->args[i].arg_type = ARG_PTR_TO_CTX;
7777 			continue;
7778 		}
7779 		if (btf_is_dynptr_ptr(btf, t)) {
7780 			if (tags) {
7781 				bpf_log(log, "arg#%d has invalid combination of tags\n", i);
7782 				return -EINVAL;
7783 			}
7784 			sub->args[i].arg_type = ARG_PTR_TO_DYNPTR | MEM_RDONLY;
7785 			continue;
7786 		}
7787 		if (tags & ARG_TAG_TRUSTED) {
7788 			int kern_type_id;
7789 
7790 			if (tags & ARG_TAG_NONNULL) {
7791 				bpf_log(log, "arg#%d has invalid combination of tags\n", i);
7792 				return -EINVAL;
7793 			}
7794 
7795 			kern_type_id = btf_get_ptr_to_btf_id(log, i, btf, t);
7796 			if (kern_type_id < 0)
7797 				return kern_type_id;
7798 
7799 			sub->args[i].arg_type = ARG_PTR_TO_BTF_ID | PTR_TRUSTED;
7800 			if (tags & ARG_TAG_NULLABLE)
7801 				sub->args[i].arg_type |= PTR_MAYBE_NULL;
7802 			sub->args[i].btf_id = kern_type_id;
7803 			continue;
7804 		}
7805 		if (tags & ARG_TAG_ARENA) {
7806 			if (tags & ~ARG_TAG_ARENA) {
7807 				bpf_log(log, "arg#%d arena cannot be combined with any other tags\n", i);
7808 				return -EINVAL;
7809 			}
7810 			sub->args[i].arg_type = ARG_PTR_TO_ARENA;
7811 			continue;
7812 		}
7813 		if (is_global) { /* generic user data pointer */
7814 			u32 mem_size;
7815 
7816 			if (tags & ARG_TAG_NULLABLE) {
7817 				bpf_log(log, "arg#%d has invalid combination of tags\n", i);
7818 				return -EINVAL;
7819 			}
7820 
7821 			t = btf_type_skip_modifiers(btf, t->type, NULL);
7822 			ref_t = btf_resolve_size(btf, t, &mem_size);
7823 			if (IS_ERR(ref_t)) {
7824 				bpf_log(log, "arg#%d reference type('%s %s') size cannot be determined: %ld\n",
7825 					i, btf_type_str(t), btf_name_by_offset(btf, t->name_off),
7826 					PTR_ERR(ref_t));
7827 				return -EINVAL;
7828 			}
7829 
7830 			sub->args[i].arg_type = ARG_PTR_TO_MEM | PTR_MAYBE_NULL;
7831 			if (tags & ARG_TAG_NONNULL)
7832 				sub->args[i].arg_type &= ~PTR_MAYBE_NULL;
7833 			sub->args[i].mem_size = mem_size;
7834 			continue;
7835 		}
7836 
7837 skip_pointer:
7838 		if (tags) {
7839 			bpf_log(log, "arg#%d has pointer tag, but is not a pointer type\n", i);
7840 			return -EINVAL;
7841 		}
7842 		if (btf_type_is_int(t) || btf_is_any_enum(t)) {
7843 			sub->args[i].arg_type = ARG_ANYTHING;
7844 			continue;
7845 		}
7846 		if (!is_global)
7847 			return -EINVAL;
7848 		bpf_log(log, "Arg#%d type %s in %s() is not supported yet.\n",
7849 			i, btf_type_str(t), tname);
7850 		return -EINVAL;
7851 	}
7852 
7853 	sub->arg_cnt = nargs;
7854 	sub->args_cached = true;
7855 
7856 	return 0;
7857 }
7858 
btf_type_show(const struct btf * btf,u32 type_id,void * obj,struct btf_show * show)7859 static void btf_type_show(const struct btf *btf, u32 type_id, void *obj,
7860 			  struct btf_show *show)
7861 {
7862 	const struct btf_type *t = btf_type_by_id(btf, type_id);
7863 
7864 	show->btf = btf;
7865 	memset(&show->state, 0, sizeof(show->state));
7866 	memset(&show->obj, 0, sizeof(show->obj));
7867 
7868 	btf_type_ops(t)->show(btf, t, type_id, obj, 0, show);
7869 }
7870 
btf_seq_show(struct btf_show * show,const char * fmt,va_list args)7871 __printf(2, 0) static void btf_seq_show(struct btf_show *show, const char *fmt,
7872 					va_list args)
7873 {
7874 	seq_vprintf((struct seq_file *)show->target, fmt, args);
7875 }
7876 
btf_type_seq_show_flags(const struct btf * btf,u32 type_id,void * obj,struct seq_file * m,u64 flags)7877 int btf_type_seq_show_flags(const struct btf *btf, u32 type_id,
7878 			    void *obj, struct seq_file *m, u64 flags)
7879 {
7880 	struct btf_show sseq;
7881 
7882 	sseq.target = m;
7883 	sseq.showfn = btf_seq_show;
7884 	sseq.flags = flags;
7885 
7886 	btf_type_show(btf, type_id, obj, &sseq);
7887 
7888 	return sseq.state.status;
7889 }
7890 
btf_type_seq_show(const struct btf * btf,u32 type_id,void * obj,struct seq_file * m)7891 void btf_type_seq_show(const struct btf *btf, u32 type_id, void *obj,
7892 		       struct seq_file *m)
7893 {
7894 	(void) btf_type_seq_show_flags(btf, type_id, obj, m,
7895 				       BTF_SHOW_NONAME | BTF_SHOW_COMPACT |
7896 				       BTF_SHOW_ZERO | BTF_SHOW_UNSAFE);
7897 }
7898 
7899 struct btf_show_snprintf {
7900 	struct btf_show show;
7901 	int len_left;		/* space left in string */
7902 	int len;		/* length we would have written */
7903 };
7904 
btf_snprintf_show(struct btf_show * show,const char * fmt,va_list args)7905 __printf(2, 0) static void btf_snprintf_show(struct btf_show *show, const char *fmt,
7906 					     va_list args)
7907 {
7908 	struct btf_show_snprintf *ssnprintf = (struct btf_show_snprintf *)show;
7909 	int len;
7910 
7911 	len = vsnprintf(show->target, ssnprintf->len_left, fmt, args);
7912 
7913 	if (len < 0) {
7914 		ssnprintf->len_left = 0;
7915 		ssnprintf->len = len;
7916 	} else if (len >= ssnprintf->len_left) {
7917 		/* no space, drive on to get length we would have written */
7918 		ssnprintf->len_left = 0;
7919 		ssnprintf->len += len;
7920 	} else {
7921 		ssnprintf->len_left -= len;
7922 		ssnprintf->len += len;
7923 		show->target += len;
7924 	}
7925 }
7926 
btf_type_snprintf_show(const struct btf * btf,u32 type_id,void * obj,char * buf,int len,u64 flags)7927 int btf_type_snprintf_show(const struct btf *btf, u32 type_id, void *obj,
7928 			   char *buf, int len, u64 flags)
7929 {
7930 	struct btf_show_snprintf ssnprintf;
7931 
7932 	ssnprintf.show.target = buf;
7933 	ssnprintf.show.flags = flags;
7934 	ssnprintf.show.showfn = btf_snprintf_show;
7935 	ssnprintf.len_left = len;
7936 	ssnprintf.len = 0;
7937 
7938 	btf_type_show(btf, type_id, obj, (struct btf_show *)&ssnprintf);
7939 
7940 	/* If we encountered an error, return it. */
7941 	if (ssnprintf.show.state.status)
7942 		return ssnprintf.show.state.status;
7943 
7944 	/* Otherwise return length we would have written */
7945 	return ssnprintf.len;
7946 }
7947 
7948 #ifdef CONFIG_PROC_FS
bpf_btf_show_fdinfo(struct seq_file * m,struct file * filp)7949 static void bpf_btf_show_fdinfo(struct seq_file *m, struct file *filp)
7950 {
7951 	const struct btf *btf = filp->private_data;
7952 
7953 	seq_printf(m, "btf_id:\t%u\n", btf->id);
7954 }
7955 #endif
7956 
btf_release(struct inode * inode,struct file * filp)7957 static int btf_release(struct inode *inode, struct file *filp)
7958 {
7959 	btf_put(filp->private_data);
7960 	return 0;
7961 }
7962 
7963 const struct file_operations btf_fops = {
7964 #ifdef CONFIG_PROC_FS
7965 	.show_fdinfo	= bpf_btf_show_fdinfo,
7966 #endif
7967 	.release	= btf_release,
7968 };
7969 
__btf_new_fd(struct btf * btf)7970 static int __btf_new_fd(struct btf *btf)
7971 {
7972 	return anon_inode_getfd("btf", &btf_fops, btf, O_RDONLY | O_CLOEXEC);
7973 }
7974 
btf_new_fd(const union bpf_attr * attr,bpfptr_t uattr,u32 uattr_size)7975 int btf_new_fd(const union bpf_attr *attr, bpfptr_t uattr, u32 uattr_size)
7976 {
7977 	struct btf *btf;
7978 	int ret;
7979 
7980 	btf = btf_parse(attr, uattr, uattr_size);
7981 	if (IS_ERR(btf))
7982 		return PTR_ERR(btf);
7983 
7984 	ret = btf_alloc_id(btf);
7985 	if (ret) {
7986 		btf_free(btf);
7987 		return ret;
7988 	}
7989 
7990 	/*
7991 	 * The BTF ID is published to the userspace.
7992 	 * All BTF free must go through call_rcu() from
7993 	 * now on (i.e. free by calling btf_put()).
7994 	 */
7995 
7996 	ret = __btf_new_fd(btf);
7997 	if (ret < 0)
7998 		btf_put(btf);
7999 
8000 	return ret;
8001 }
8002 
btf_get_by_fd(int fd)8003 struct btf *btf_get_by_fd(int fd)
8004 {
8005 	struct btf *btf;
8006 	CLASS(fd, f)(fd);
8007 
8008 	btf = __btf_get_by_fd(f);
8009 	if (!IS_ERR(btf))
8010 		refcount_inc(&btf->refcnt);
8011 
8012 	return btf;
8013 }
8014 
btf_get_info_by_fd(const struct btf * btf,const union bpf_attr * attr,union bpf_attr __user * uattr)8015 int btf_get_info_by_fd(const struct btf *btf,
8016 		       const union bpf_attr *attr,
8017 		       union bpf_attr __user *uattr)
8018 {
8019 	struct bpf_btf_info __user *uinfo;
8020 	struct bpf_btf_info info;
8021 	u32 info_copy, btf_copy;
8022 	void __user *ubtf;
8023 	char __user *uname;
8024 	u32 uinfo_len, uname_len, name_len;
8025 	int ret = 0;
8026 
8027 	uinfo = u64_to_user_ptr(attr->info.info);
8028 	uinfo_len = attr->info.info_len;
8029 
8030 	info_copy = min_t(u32, uinfo_len, sizeof(info));
8031 	memset(&info, 0, sizeof(info));
8032 	if (copy_from_user(&info, uinfo, info_copy))
8033 		return -EFAULT;
8034 
8035 	info.id = btf->id;
8036 	ubtf = u64_to_user_ptr(info.btf);
8037 	btf_copy = min_t(u32, btf->data_size, info.btf_size);
8038 	if (copy_to_user(ubtf, btf->data, btf_copy))
8039 		return -EFAULT;
8040 	info.btf_size = btf->data_size;
8041 
8042 	info.kernel_btf = btf->kernel_btf;
8043 
8044 	uname = u64_to_user_ptr(info.name);
8045 	uname_len = info.name_len;
8046 	if (!uname ^ !uname_len)
8047 		return -EINVAL;
8048 
8049 	name_len = strlen(btf->name);
8050 	info.name_len = name_len;
8051 
8052 	if (uname) {
8053 		if (uname_len >= name_len + 1) {
8054 			if (copy_to_user(uname, btf->name, name_len + 1))
8055 				return -EFAULT;
8056 		} else {
8057 			char zero = '\0';
8058 
8059 			if (copy_to_user(uname, btf->name, uname_len - 1))
8060 				return -EFAULT;
8061 			if (put_user(zero, uname + uname_len - 1))
8062 				return -EFAULT;
8063 			/* let user-space know about too short buffer */
8064 			ret = -ENOSPC;
8065 		}
8066 	}
8067 
8068 	if (copy_to_user(uinfo, &info, info_copy) ||
8069 	    put_user(info_copy, &uattr->info.info_len))
8070 		return -EFAULT;
8071 
8072 	return ret;
8073 }
8074 
btf_get_fd_by_id(u32 id)8075 int btf_get_fd_by_id(u32 id)
8076 {
8077 	struct btf *btf;
8078 	int fd;
8079 
8080 	rcu_read_lock();
8081 	btf = idr_find(&btf_idr, id);
8082 	if (!btf || !refcount_inc_not_zero(&btf->refcnt))
8083 		btf = ERR_PTR(-ENOENT);
8084 	rcu_read_unlock();
8085 
8086 	if (IS_ERR(btf))
8087 		return PTR_ERR(btf);
8088 
8089 	fd = __btf_new_fd(btf);
8090 	if (fd < 0)
8091 		btf_put(btf);
8092 
8093 	return fd;
8094 }
8095 
btf_obj_id(const struct btf * btf)8096 u32 btf_obj_id(const struct btf *btf)
8097 {
8098 	return btf->id;
8099 }
8100 
btf_is_kernel(const struct btf * btf)8101 bool btf_is_kernel(const struct btf *btf)
8102 {
8103 	return btf->kernel_btf;
8104 }
8105 
btf_is_module(const struct btf * btf)8106 bool btf_is_module(const struct btf *btf)
8107 {
8108 	return btf->kernel_btf && strcmp(btf->name, "vmlinux") != 0;
8109 }
8110 
8111 enum {
8112 	BTF_MODULE_F_LIVE = (1 << 0),
8113 };
8114 
8115 #ifdef CONFIG_DEBUG_INFO_BTF_MODULES
8116 struct btf_module {
8117 	struct list_head list;
8118 	struct module *module;
8119 	struct btf *btf;
8120 	struct bin_attribute *sysfs_attr;
8121 	int flags;
8122 };
8123 
8124 static LIST_HEAD(btf_modules);
8125 static DEFINE_MUTEX(btf_module_mutex);
8126 
8127 static void purge_cand_cache(struct btf *btf);
8128 
btf_module_notify(struct notifier_block * nb,unsigned long op,void * module)8129 static int btf_module_notify(struct notifier_block *nb, unsigned long op,
8130 			     void *module)
8131 {
8132 	struct btf_module *btf_mod, *tmp;
8133 	struct module *mod = module;
8134 	struct btf *btf;
8135 	int err = 0;
8136 
8137 	if (mod->btf_data_size == 0 ||
8138 	    (op != MODULE_STATE_COMING && op != MODULE_STATE_LIVE &&
8139 	     op != MODULE_STATE_GOING))
8140 		goto out;
8141 
8142 	switch (op) {
8143 	case MODULE_STATE_COMING:
8144 		btf_mod = kzalloc(sizeof(*btf_mod), GFP_KERNEL);
8145 		if (!btf_mod) {
8146 			err = -ENOMEM;
8147 			goto out;
8148 		}
8149 		btf = btf_parse_module(mod->name, mod->btf_data, mod->btf_data_size,
8150 				       mod->btf_base_data, mod->btf_base_data_size);
8151 		if (IS_ERR(btf)) {
8152 			kfree(btf_mod);
8153 			if (!IS_ENABLED(CONFIG_MODULE_ALLOW_BTF_MISMATCH)) {
8154 				pr_warn("failed to validate module [%s] BTF: %ld\n",
8155 					mod->name, PTR_ERR(btf));
8156 				err = PTR_ERR(btf);
8157 			} else {
8158 				pr_warn_once("Kernel module BTF mismatch detected, BTF debug info may be unavailable for some modules\n");
8159 			}
8160 			goto out;
8161 		}
8162 		err = btf_alloc_id(btf);
8163 		if (err) {
8164 			btf_free(btf);
8165 			kfree(btf_mod);
8166 			goto out;
8167 		}
8168 
8169 		purge_cand_cache(NULL);
8170 		mutex_lock(&btf_module_mutex);
8171 		btf_mod->module = module;
8172 		btf_mod->btf = btf;
8173 		list_add(&btf_mod->list, &btf_modules);
8174 		mutex_unlock(&btf_module_mutex);
8175 
8176 		if (IS_ENABLED(CONFIG_SYSFS)) {
8177 			struct bin_attribute *attr;
8178 
8179 			attr = kzalloc(sizeof(*attr), GFP_KERNEL);
8180 			if (!attr)
8181 				goto out;
8182 
8183 			sysfs_bin_attr_init(attr);
8184 			attr->attr.name = btf->name;
8185 			attr->attr.mode = 0444;
8186 			attr->size = btf->data_size;
8187 			attr->private = btf->data;
8188 			attr->read_new = sysfs_bin_attr_simple_read;
8189 
8190 			err = sysfs_create_bin_file(btf_kobj, attr);
8191 			if (err) {
8192 				pr_warn("failed to register module [%s] BTF in sysfs: %d\n",
8193 					mod->name, err);
8194 				kfree(attr);
8195 				err = 0;
8196 				goto out;
8197 			}
8198 
8199 			btf_mod->sysfs_attr = attr;
8200 		}
8201 
8202 		break;
8203 	case MODULE_STATE_LIVE:
8204 		mutex_lock(&btf_module_mutex);
8205 		list_for_each_entry_safe(btf_mod, tmp, &btf_modules, list) {
8206 			if (btf_mod->module != module)
8207 				continue;
8208 
8209 			btf_mod->flags |= BTF_MODULE_F_LIVE;
8210 			break;
8211 		}
8212 		mutex_unlock(&btf_module_mutex);
8213 		break;
8214 	case MODULE_STATE_GOING:
8215 		mutex_lock(&btf_module_mutex);
8216 		list_for_each_entry_safe(btf_mod, tmp, &btf_modules, list) {
8217 			if (btf_mod->module != module)
8218 				continue;
8219 
8220 			list_del(&btf_mod->list);
8221 			if (btf_mod->sysfs_attr)
8222 				sysfs_remove_bin_file(btf_kobj, btf_mod->sysfs_attr);
8223 			purge_cand_cache(btf_mod->btf);
8224 			btf_put(btf_mod->btf);
8225 			kfree(btf_mod->sysfs_attr);
8226 			kfree(btf_mod);
8227 			break;
8228 		}
8229 		mutex_unlock(&btf_module_mutex);
8230 		break;
8231 	}
8232 out:
8233 	return notifier_from_errno(err);
8234 }
8235 
8236 static struct notifier_block btf_module_nb = {
8237 	.notifier_call = btf_module_notify,
8238 };
8239 
btf_module_init(void)8240 static int __init btf_module_init(void)
8241 {
8242 	register_module_notifier(&btf_module_nb);
8243 	return 0;
8244 }
8245 
8246 fs_initcall(btf_module_init);
8247 #endif /* CONFIG_DEBUG_INFO_BTF_MODULES */
8248 
btf_try_get_module(const struct btf * btf)8249 struct module *btf_try_get_module(const struct btf *btf)
8250 {
8251 	struct module *res = NULL;
8252 #ifdef CONFIG_DEBUG_INFO_BTF_MODULES
8253 	struct btf_module *btf_mod, *tmp;
8254 
8255 	mutex_lock(&btf_module_mutex);
8256 	list_for_each_entry_safe(btf_mod, tmp, &btf_modules, list) {
8257 		if (btf_mod->btf != btf)
8258 			continue;
8259 
8260 		/* We must only consider module whose __init routine has
8261 		 * finished, hence we must check for BTF_MODULE_F_LIVE flag,
8262 		 * which is set from the notifier callback for
8263 		 * MODULE_STATE_LIVE.
8264 		 */
8265 		if ((btf_mod->flags & BTF_MODULE_F_LIVE) && try_module_get(btf_mod->module))
8266 			res = btf_mod->module;
8267 
8268 		break;
8269 	}
8270 	mutex_unlock(&btf_module_mutex);
8271 #endif
8272 
8273 	return res;
8274 }
8275 
8276 /* Returns struct btf corresponding to the struct module.
8277  * This function can return NULL or ERR_PTR.
8278  */
btf_get_module_btf(const struct module * module)8279 static struct btf *btf_get_module_btf(const struct module *module)
8280 {
8281 #ifdef CONFIG_DEBUG_INFO_BTF_MODULES
8282 	struct btf_module *btf_mod, *tmp;
8283 #endif
8284 	struct btf *btf = NULL;
8285 
8286 	if (!module) {
8287 		btf = bpf_get_btf_vmlinux();
8288 		if (!IS_ERR_OR_NULL(btf))
8289 			btf_get(btf);
8290 		return btf;
8291 	}
8292 
8293 #ifdef CONFIG_DEBUG_INFO_BTF_MODULES
8294 	mutex_lock(&btf_module_mutex);
8295 	list_for_each_entry_safe(btf_mod, tmp, &btf_modules, list) {
8296 		if (btf_mod->module != module)
8297 			continue;
8298 
8299 		btf_get(btf_mod->btf);
8300 		btf = btf_mod->btf;
8301 		break;
8302 	}
8303 	mutex_unlock(&btf_module_mutex);
8304 #endif
8305 
8306 	return btf;
8307 }
8308 
check_btf_kconfigs(const struct module * module,const char * feature)8309 static int check_btf_kconfigs(const struct module *module, const char *feature)
8310 {
8311 	if (!module && IS_ENABLED(CONFIG_DEBUG_INFO_BTF)) {
8312 		pr_err("missing vmlinux BTF, cannot register %s\n", feature);
8313 		return -ENOENT;
8314 	}
8315 	if (module && IS_ENABLED(CONFIG_DEBUG_INFO_BTF_MODULES))
8316 		pr_warn("missing module BTF, cannot register %s\n", feature);
8317 	return 0;
8318 }
8319 
BPF_CALL_4(bpf_btf_find_by_name_kind,char *,name,int,name_sz,u32,kind,int,flags)8320 BPF_CALL_4(bpf_btf_find_by_name_kind, char *, name, int, name_sz, u32, kind, int, flags)
8321 {
8322 	struct btf *btf = NULL;
8323 	int btf_obj_fd = 0;
8324 	long ret;
8325 
8326 	if (flags)
8327 		return -EINVAL;
8328 
8329 	if (name_sz <= 1 || name[name_sz - 1])
8330 		return -EINVAL;
8331 
8332 	ret = bpf_find_btf_id(name, kind, &btf);
8333 	if (ret > 0 && btf_is_module(btf)) {
8334 		btf_obj_fd = __btf_new_fd(btf);
8335 		if (btf_obj_fd < 0) {
8336 			btf_put(btf);
8337 			return btf_obj_fd;
8338 		}
8339 		return ret | (((u64)btf_obj_fd) << 32);
8340 	}
8341 	if (ret > 0)
8342 		btf_put(btf);
8343 	return ret;
8344 }
8345 
8346 const struct bpf_func_proto bpf_btf_find_by_name_kind_proto = {
8347 	.func		= bpf_btf_find_by_name_kind,
8348 	.gpl_only	= false,
8349 	.ret_type	= RET_INTEGER,
8350 	.arg1_type	= ARG_PTR_TO_MEM | MEM_RDONLY,
8351 	.arg2_type	= ARG_CONST_SIZE,
8352 	.arg3_type	= ARG_ANYTHING,
8353 	.arg4_type	= ARG_ANYTHING,
8354 };
8355 
BTF_ID_LIST_GLOBAL(btf_tracing_ids,MAX_BTF_TRACING_TYPE)8356 BTF_ID_LIST_GLOBAL(btf_tracing_ids, MAX_BTF_TRACING_TYPE)
8357 #define BTF_TRACING_TYPE(name, type) BTF_ID(struct, type)
8358 BTF_TRACING_TYPE_xxx
8359 #undef BTF_TRACING_TYPE
8360 
8361 /* Validate well-formedness of iter argument type.
8362  * On success, return positive BTF ID of iter state's STRUCT type.
8363  * On error, negative error is returned.
8364  */
8365 int btf_check_iter_arg(struct btf *btf, const struct btf_type *func, int arg_idx)
8366 {
8367 	const struct btf_param *arg;
8368 	const struct btf_type *t;
8369 	const char *name;
8370 	int btf_id;
8371 
8372 	if (btf_type_vlen(func) <= arg_idx)
8373 		return -EINVAL;
8374 
8375 	arg = &btf_params(func)[arg_idx];
8376 	t = btf_type_skip_modifiers(btf, arg->type, NULL);
8377 	if (!t || !btf_type_is_ptr(t))
8378 		return -EINVAL;
8379 	t = btf_type_skip_modifiers(btf, t->type, &btf_id);
8380 	if (!t || !__btf_type_is_struct(t))
8381 		return -EINVAL;
8382 
8383 	name = btf_name_by_offset(btf, t->name_off);
8384 	if (!name || strncmp(name, ITER_PREFIX, sizeof(ITER_PREFIX) - 1))
8385 		return -EINVAL;
8386 
8387 	return btf_id;
8388 }
8389 
btf_check_iter_kfuncs(struct btf * btf,const char * func_name,const struct btf_type * func,u32 func_flags)8390 static int btf_check_iter_kfuncs(struct btf *btf, const char *func_name,
8391 				 const struct btf_type *func, u32 func_flags)
8392 {
8393 	u32 flags = func_flags & (KF_ITER_NEW | KF_ITER_NEXT | KF_ITER_DESTROY);
8394 	const char *sfx, *iter_name;
8395 	const struct btf_type *t;
8396 	char exp_name[128];
8397 	u32 nr_args;
8398 	int btf_id;
8399 
8400 	/* exactly one of KF_ITER_{NEW,NEXT,DESTROY} can be set */
8401 	if (!flags || (flags & (flags - 1)))
8402 		return -EINVAL;
8403 
8404 	/* any BPF iter kfunc should have `struct bpf_iter_<type> *` first arg */
8405 	nr_args = btf_type_vlen(func);
8406 	if (nr_args < 1)
8407 		return -EINVAL;
8408 
8409 	btf_id = btf_check_iter_arg(btf, func, 0);
8410 	if (btf_id < 0)
8411 		return btf_id;
8412 
8413 	/* sizeof(struct bpf_iter_<type>) should be a multiple of 8 to
8414 	 * fit nicely in stack slots
8415 	 */
8416 	t = btf_type_by_id(btf, btf_id);
8417 	if (t->size == 0 || (t->size % 8))
8418 		return -EINVAL;
8419 
8420 	/* validate bpf_iter_<type>_{new,next,destroy}(struct bpf_iter_<type> *)
8421 	 * naming pattern
8422 	 */
8423 	iter_name = btf_name_by_offset(btf, t->name_off) + sizeof(ITER_PREFIX) - 1;
8424 	if (flags & KF_ITER_NEW)
8425 		sfx = "new";
8426 	else if (flags & KF_ITER_NEXT)
8427 		sfx = "next";
8428 	else /* (flags & KF_ITER_DESTROY) */
8429 		sfx = "destroy";
8430 
8431 	snprintf(exp_name, sizeof(exp_name), "bpf_iter_%s_%s", iter_name, sfx);
8432 	if (strcmp(func_name, exp_name))
8433 		return -EINVAL;
8434 
8435 	/* only iter constructor should have extra arguments */
8436 	if (!(flags & KF_ITER_NEW) && nr_args != 1)
8437 		return -EINVAL;
8438 
8439 	if (flags & KF_ITER_NEXT) {
8440 		/* bpf_iter_<type>_next() should return pointer */
8441 		t = btf_type_skip_modifiers(btf, func->type, NULL);
8442 		if (!t || !btf_type_is_ptr(t))
8443 			return -EINVAL;
8444 	}
8445 
8446 	if (flags & KF_ITER_DESTROY) {
8447 		/* bpf_iter_<type>_destroy() should return void */
8448 		t = btf_type_by_id(btf, func->type);
8449 		if (!t || !btf_type_is_void(t))
8450 			return -EINVAL;
8451 	}
8452 
8453 	return 0;
8454 }
8455 
btf_check_kfunc_protos(struct btf * btf,u32 func_id,u32 func_flags)8456 static int btf_check_kfunc_protos(struct btf *btf, u32 func_id, u32 func_flags)
8457 {
8458 	const struct btf_type *func;
8459 	const char *func_name;
8460 	int err;
8461 
8462 	/* any kfunc should be FUNC -> FUNC_PROTO */
8463 	func = btf_type_by_id(btf, func_id);
8464 	if (!func || !btf_type_is_func(func))
8465 		return -EINVAL;
8466 
8467 	/* sanity check kfunc name */
8468 	func_name = btf_name_by_offset(btf, func->name_off);
8469 	if (!func_name || !func_name[0])
8470 		return -EINVAL;
8471 
8472 	func = btf_type_by_id(btf, func->type);
8473 	if (!func || !btf_type_is_func_proto(func))
8474 		return -EINVAL;
8475 
8476 	if (func_flags & (KF_ITER_NEW | KF_ITER_NEXT | KF_ITER_DESTROY)) {
8477 		err = btf_check_iter_kfuncs(btf, func_name, func, func_flags);
8478 		if (err)
8479 			return err;
8480 	}
8481 
8482 	return 0;
8483 }
8484 
8485 /* Kernel Function (kfunc) BTF ID set registration API */
8486 
btf_populate_kfunc_set(struct btf * btf,enum btf_kfunc_hook hook,const struct btf_kfunc_id_set * kset)8487 static int btf_populate_kfunc_set(struct btf *btf, enum btf_kfunc_hook hook,
8488 				  const struct btf_kfunc_id_set *kset)
8489 {
8490 	struct btf_kfunc_hook_filter *hook_filter;
8491 	struct btf_id_set8 *add_set = kset->set;
8492 	bool vmlinux_set = !btf_is_module(btf);
8493 	bool add_filter = !!kset->filter;
8494 	struct btf_kfunc_set_tab *tab;
8495 	struct btf_id_set8 *set;
8496 	u32 set_cnt, i;
8497 	int ret;
8498 
8499 	if (hook >= BTF_KFUNC_HOOK_MAX) {
8500 		ret = -EINVAL;
8501 		goto end;
8502 	}
8503 
8504 	if (!add_set->cnt)
8505 		return 0;
8506 
8507 	tab = btf->kfunc_set_tab;
8508 
8509 	if (tab && add_filter) {
8510 		u32 i;
8511 
8512 		hook_filter = &tab->hook_filters[hook];
8513 		for (i = 0; i < hook_filter->nr_filters; i++) {
8514 			if (hook_filter->filters[i] == kset->filter) {
8515 				add_filter = false;
8516 				break;
8517 			}
8518 		}
8519 
8520 		if (add_filter && hook_filter->nr_filters == BTF_KFUNC_FILTER_MAX_CNT) {
8521 			ret = -E2BIG;
8522 			goto end;
8523 		}
8524 	}
8525 
8526 	if (!tab) {
8527 		tab = kzalloc(sizeof(*tab), GFP_KERNEL | __GFP_NOWARN);
8528 		if (!tab)
8529 			return -ENOMEM;
8530 		btf->kfunc_set_tab = tab;
8531 	}
8532 
8533 	set = tab->sets[hook];
8534 	/* Warn when register_btf_kfunc_id_set is called twice for the same hook
8535 	 * for module sets.
8536 	 */
8537 	if (WARN_ON_ONCE(set && !vmlinux_set)) {
8538 		ret = -EINVAL;
8539 		goto end;
8540 	}
8541 
8542 	/* In case of vmlinux sets, there may be more than one set being
8543 	 * registered per hook. To create a unified set, we allocate a new set
8544 	 * and concatenate all individual sets being registered. While each set
8545 	 * is individually sorted, they may become unsorted when concatenated,
8546 	 * hence re-sorting the final set again is required to make binary
8547 	 * searching the set using btf_id_set8_contains function work.
8548 	 *
8549 	 * For module sets, we need to allocate as we may need to relocate
8550 	 * BTF ids.
8551 	 */
8552 	set_cnt = set ? set->cnt : 0;
8553 
8554 	if (set_cnt > U32_MAX - add_set->cnt) {
8555 		ret = -EOVERFLOW;
8556 		goto end;
8557 	}
8558 
8559 	if (set_cnt + add_set->cnt > BTF_KFUNC_SET_MAX_CNT) {
8560 		ret = -E2BIG;
8561 		goto end;
8562 	}
8563 
8564 	/* Grow set */
8565 	set = krealloc(tab->sets[hook],
8566 		       offsetof(struct btf_id_set8, pairs[set_cnt + add_set->cnt]),
8567 		       GFP_KERNEL | __GFP_NOWARN);
8568 	if (!set) {
8569 		ret = -ENOMEM;
8570 		goto end;
8571 	}
8572 
8573 	/* For newly allocated set, initialize set->cnt to 0 */
8574 	if (!tab->sets[hook])
8575 		set->cnt = 0;
8576 	tab->sets[hook] = set;
8577 
8578 	/* Concatenate the two sets */
8579 	memcpy(set->pairs + set->cnt, add_set->pairs, add_set->cnt * sizeof(set->pairs[0]));
8580 	/* Now that the set is copied, update with relocated BTF ids */
8581 	for (i = set->cnt; i < set->cnt + add_set->cnt; i++)
8582 		set->pairs[i].id = btf_relocate_id(btf, set->pairs[i].id);
8583 
8584 	set->cnt += add_set->cnt;
8585 
8586 	sort(set->pairs, set->cnt, sizeof(set->pairs[0]), btf_id_cmp_func, NULL);
8587 
8588 	if (add_filter) {
8589 		hook_filter = &tab->hook_filters[hook];
8590 		hook_filter->filters[hook_filter->nr_filters++] = kset->filter;
8591 	}
8592 	return 0;
8593 end:
8594 	btf_free_kfunc_set_tab(btf);
8595 	return ret;
8596 }
8597 
__btf_kfunc_id_set_contains(const struct btf * btf,enum btf_kfunc_hook hook,u32 kfunc_btf_id,const struct bpf_prog * prog)8598 static u32 *__btf_kfunc_id_set_contains(const struct btf *btf,
8599 					enum btf_kfunc_hook hook,
8600 					u32 kfunc_btf_id,
8601 					const struct bpf_prog *prog)
8602 {
8603 	struct btf_kfunc_hook_filter *hook_filter;
8604 	struct btf_id_set8 *set;
8605 	u32 *id, i;
8606 
8607 	if (hook >= BTF_KFUNC_HOOK_MAX)
8608 		return NULL;
8609 	if (!btf->kfunc_set_tab)
8610 		return NULL;
8611 	hook_filter = &btf->kfunc_set_tab->hook_filters[hook];
8612 	for (i = 0; i < hook_filter->nr_filters; i++) {
8613 		if (hook_filter->filters[i](prog, kfunc_btf_id))
8614 			return NULL;
8615 	}
8616 	set = btf->kfunc_set_tab->sets[hook];
8617 	if (!set)
8618 		return NULL;
8619 	id = btf_id_set8_contains(set, kfunc_btf_id);
8620 	if (!id)
8621 		return NULL;
8622 	/* The flags for BTF ID are located next to it */
8623 	return id + 1;
8624 }
8625 
bpf_prog_type_to_kfunc_hook(enum bpf_prog_type prog_type)8626 static int bpf_prog_type_to_kfunc_hook(enum bpf_prog_type prog_type)
8627 {
8628 	switch (prog_type) {
8629 	case BPF_PROG_TYPE_UNSPEC:
8630 		return BTF_KFUNC_HOOK_COMMON;
8631 	case BPF_PROG_TYPE_XDP:
8632 		return BTF_KFUNC_HOOK_XDP;
8633 	case BPF_PROG_TYPE_SCHED_CLS:
8634 		return BTF_KFUNC_HOOK_TC;
8635 	case BPF_PROG_TYPE_STRUCT_OPS:
8636 		return BTF_KFUNC_HOOK_STRUCT_OPS;
8637 	case BPF_PROG_TYPE_TRACING:
8638 	case BPF_PROG_TYPE_TRACEPOINT:
8639 	case BPF_PROG_TYPE_PERF_EVENT:
8640 	case BPF_PROG_TYPE_LSM:
8641 		return BTF_KFUNC_HOOK_TRACING;
8642 	case BPF_PROG_TYPE_SYSCALL:
8643 		return BTF_KFUNC_HOOK_SYSCALL;
8644 	case BPF_PROG_TYPE_CGROUP_SKB:
8645 	case BPF_PROG_TYPE_CGROUP_SOCK:
8646 	case BPF_PROG_TYPE_CGROUP_DEVICE:
8647 	case BPF_PROG_TYPE_CGROUP_SOCK_ADDR:
8648 	case BPF_PROG_TYPE_CGROUP_SOCKOPT:
8649 	case BPF_PROG_TYPE_CGROUP_SYSCTL:
8650 	case BPF_PROG_TYPE_SOCK_OPS:
8651 		return BTF_KFUNC_HOOK_CGROUP;
8652 	case BPF_PROG_TYPE_SCHED_ACT:
8653 		return BTF_KFUNC_HOOK_SCHED_ACT;
8654 	case BPF_PROG_TYPE_SK_SKB:
8655 		return BTF_KFUNC_HOOK_SK_SKB;
8656 	case BPF_PROG_TYPE_SOCKET_FILTER:
8657 		return BTF_KFUNC_HOOK_SOCKET_FILTER;
8658 	case BPF_PROG_TYPE_LWT_OUT:
8659 	case BPF_PROG_TYPE_LWT_IN:
8660 	case BPF_PROG_TYPE_LWT_XMIT:
8661 	case BPF_PROG_TYPE_LWT_SEG6LOCAL:
8662 		return BTF_KFUNC_HOOK_LWT;
8663 	case BPF_PROG_TYPE_NETFILTER:
8664 		return BTF_KFUNC_HOOK_NETFILTER;
8665 	case BPF_PROG_TYPE_KPROBE:
8666 		return BTF_KFUNC_HOOK_KPROBE;
8667 	default:
8668 		return BTF_KFUNC_HOOK_MAX;
8669 	}
8670 }
8671 
8672 /* Caution:
8673  * Reference to the module (obtained using btf_try_get_module) corresponding to
8674  * the struct btf *MUST* be held when calling this function from verifier
8675  * context. This is usually true as we stash references in prog's kfunc_btf_tab;
8676  * keeping the reference for the duration of the call provides the necessary
8677  * protection for looking up a well-formed btf->kfunc_set_tab.
8678  */
btf_kfunc_id_set_contains(const struct btf * btf,u32 kfunc_btf_id,const struct bpf_prog * prog)8679 u32 *btf_kfunc_id_set_contains(const struct btf *btf,
8680 			       u32 kfunc_btf_id,
8681 			       const struct bpf_prog *prog)
8682 {
8683 	enum bpf_prog_type prog_type = resolve_prog_type(prog);
8684 	enum btf_kfunc_hook hook;
8685 	u32 *kfunc_flags;
8686 
8687 	kfunc_flags = __btf_kfunc_id_set_contains(btf, BTF_KFUNC_HOOK_COMMON, kfunc_btf_id, prog);
8688 	if (kfunc_flags)
8689 		return kfunc_flags;
8690 
8691 	hook = bpf_prog_type_to_kfunc_hook(prog_type);
8692 	return __btf_kfunc_id_set_contains(btf, hook, kfunc_btf_id, prog);
8693 }
8694 
btf_kfunc_is_modify_return(const struct btf * btf,u32 kfunc_btf_id,const struct bpf_prog * prog)8695 u32 *btf_kfunc_is_modify_return(const struct btf *btf, u32 kfunc_btf_id,
8696 				const struct bpf_prog *prog)
8697 {
8698 	return __btf_kfunc_id_set_contains(btf, BTF_KFUNC_HOOK_FMODRET, kfunc_btf_id, prog);
8699 }
8700 
__register_btf_kfunc_id_set(enum btf_kfunc_hook hook,const struct btf_kfunc_id_set * kset)8701 static int __register_btf_kfunc_id_set(enum btf_kfunc_hook hook,
8702 				       const struct btf_kfunc_id_set *kset)
8703 {
8704 	struct btf *btf;
8705 	int ret, i;
8706 
8707 	btf = btf_get_module_btf(kset->owner);
8708 	if (!btf)
8709 		return check_btf_kconfigs(kset->owner, "kfunc");
8710 	if (IS_ERR(btf))
8711 		return PTR_ERR(btf);
8712 
8713 	for (i = 0; i < kset->set->cnt; i++) {
8714 		ret = btf_check_kfunc_protos(btf, btf_relocate_id(btf, kset->set->pairs[i].id),
8715 					     kset->set->pairs[i].flags);
8716 		if (ret)
8717 			goto err_out;
8718 	}
8719 
8720 	ret = btf_populate_kfunc_set(btf, hook, kset);
8721 
8722 err_out:
8723 	btf_put(btf);
8724 	return ret;
8725 }
8726 
8727 /* This function must be invoked only from initcalls/module init functions */
register_btf_kfunc_id_set(enum bpf_prog_type prog_type,const struct btf_kfunc_id_set * kset)8728 int register_btf_kfunc_id_set(enum bpf_prog_type prog_type,
8729 			      const struct btf_kfunc_id_set *kset)
8730 {
8731 	enum btf_kfunc_hook hook;
8732 
8733 	/* All kfuncs need to be tagged as such in BTF.
8734 	 * WARN() for initcall registrations that do not check errors.
8735 	 */
8736 	if (!(kset->set->flags & BTF_SET8_KFUNCS)) {
8737 		WARN_ON(!kset->owner);
8738 		return -EINVAL;
8739 	}
8740 
8741 	hook = bpf_prog_type_to_kfunc_hook(prog_type);
8742 	return __register_btf_kfunc_id_set(hook, kset);
8743 }
8744 EXPORT_SYMBOL_GPL(register_btf_kfunc_id_set);
8745 
8746 /* This function must be invoked only from initcalls/module init functions */
register_btf_fmodret_id_set(const struct btf_kfunc_id_set * kset)8747 int register_btf_fmodret_id_set(const struct btf_kfunc_id_set *kset)
8748 {
8749 	return __register_btf_kfunc_id_set(BTF_KFUNC_HOOK_FMODRET, kset);
8750 }
8751 EXPORT_SYMBOL_GPL(register_btf_fmodret_id_set);
8752 
btf_find_dtor_kfunc(struct btf * btf,u32 btf_id)8753 s32 btf_find_dtor_kfunc(struct btf *btf, u32 btf_id)
8754 {
8755 	struct btf_id_dtor_kfunc_tab *tab = btf->dtor_kfunc_tab;
8756 	struct btf_id_dtor_kfunc *dtor;
8757 
8758 	if (!tab)
8759 		return -ENOENT;
8760 	/* Even though the size of tab->dtors[0] is > sizeof(u32), we only need
8761 	 * to compare the first u32 with btf_id, so we can reuse btf_id_cmp_func.
8762 	 */
8763 	BUILD_BUG_ON(offsetof(struct btf_id_dtor_kfunc, btf_id) != 0);
8764 	dtor = bsearch(&btf_id, tab->dtors, tab->cnt, sizeof(tab->dtors[0]), btf_id_cmp_func);
8765 	if (!dtor)
8766 		return -ENOENT;
8767 	return dtor->kfunc_btf_id;
8768 }
8769 
btf_check_dtor_kfuncs(struct btf * btf,const struct btf_id_dtor_kfunc * dtors,u32 cnt)8770 static int btf_check_dtor_kfuncs(struct btf *btf, const struct btf_id_dtor_kfunc *dtors, u32 cnt)
8771 {
8772 	const struct btf_type *dtor_func, *dtor_func_proto, *t;
8773 	const struct btf_param *args;
8774 	s32 dtor_btf_id;
8775 	u32 nr_args, i;
8776 
8777 	for (i = 0; i < cnt; i++) {
8778 		dtor_btf_id = btf_relocate_id(btf, dtors[i].kfunc_btf_id);
8779 
8780 		dtor_func = btf_type_by_id(btf, dtor_btf_id);
8781 		if (!dtor_func || !btf_type_is_func(dtor_func))
8782 			return -EINVAL;
8783 
8784 		dtor_func_proto = btf_type_by_id(btf, dtor_func->type);
8785 		if (!dtor_func_proto || !btf_type_is_func_proto(dtor_func_proto))
8786 			return -EINVAL;
8787 
8788 		/* Make sure the prototype of the destructor kfunc is 'void func(type *)' */
8789 		t = btf_type_by_id(btf, dtor_func_proto->type);
8790 		if (!t || !btf_type_is_void(t))
8791 			return -EINVAL;
8792 
8793 		nr_args = btf_type_vlen(dtor_func_proto);
8794 		if (nr_args != 1)
8795 			return -EINVAL;
8796 		args = btf_params(dtor_func_proto);
8797 		t = btf_type_by_id(btf, args[0].type);
8798 		/* Allow any pointer type, as width on targets Linux supports
8799 		 * will be same for all pointer types (i.e. sizeof(void *))
8800 		 */
8801 		if (!t || !btf_type_is_ptr(t))
8802 			return -EINVAL;
8803 	}
8804 	return 0;
8805 }
8806 
8807 /* This function must be invoked only from initcalls/module init functions */
register_btf_id_dtor_kfuncs(const struct btf_id_dtor_kfunc * dtors,u32 add_cnt,struct module * owner)8808 int register_btf_id_dtor_kfuncs(const struct btf_id_dtor_kfunc *dtors, u32 add_cnt,
8809 				struct module *owner)
8810 {
8811 	struct btf_id_dtor_kfunc_tab *tab;
8812 	struct btf *btf;
8813 	u32 tab_cnt, i;
8814 	int ret;
8815 
8816 	btf = btf_get_module_btf(owner);
8817 	if (!btf)
8818 		return check_btf_kconfigs(owner, "dtor kfuncs");
8819 	if (IS_ERR(btf))
8820 		return PTR_ERR(btf);
8821 
8822 	if (add_cnt >= BTF_DTOR_KFUNC_MAX_CNT) {
8823 		pr_err("cannot register more than %d kfunc destructors\n", BTF_DTOR_KFUNC_MAX_CNT);
8824 		ret = -E2BIG;
8825 		goto end;
8826 	}
8827 
8828 	/* Ensure that the prototype of dtor kfuncs being registered is sane */
8829 	ret = btf_check_dtor_kfuncs(btf, dtors, add_cnt);
8830 	if (ret < 0)
8831 		goto end;
8832 
8833 	tab = btf->dtor_kfunc_tab;
8834 	/* Only one call allowed for modules */
8835 	if (WARN_ON_ONCE(tab && btf_is_module(btf))) {
8836 		ret = -EINVAL;
8837 		goto end;
8838 	}
8839 
8840 	tab_cnt = tab ? tab->cnt : 0;
8841 	if (tab_cnt > U32_MAX - add_cnt) {
8842 		ret = -EOVERFLOW;
8843 		goto end;
8844 	}
8845 	if (tab_cnt + add_cnt >= BTF_DTOR_KFUNC_MAX_CNT) {
8846 		pr_err("cannot register more than %d kfunc destructors\n", BTF_DTOR_KFUNC_MAX_CNT);
8847 		ret = -E2BIG;
8848 		goto end;
8849 	}
8850 
8851 	tab = krealloc(btf->dtor_kfunc_tab,
8852 		       offsetof(struct btf_id_dtor_kfunc_tab, dtors[tab_cnt + add_cnt]),
8853 		       GFP_KERNEL | __GFP_NOWARN);
8854 	if (!tab) {
8855 		ret = -ENOMEM;
8856 		goto end;
8857 	}
8858 
8859 	if (!btf->dtor_kfunc_tab)
8860 		tab->cnt = 0;
8861 	btf->dtor_kfunc_tab = tab;
8862 
8863 	memcpy(tab->dtors + tab->cnt, dtors, add_cnt * sizeof(tab->dtors[0]));
8864 
8865 	/* remap BTF ids based on BTF relocation (if any) */
8866 	for (i = tab_cnt; i < tab_cnt + add_cnt; i++) {
8867 		tab->dtors[i].btf_id = btf_relocate_id(btf, tab->dtors[i].btf_id);
8868 		tab->dtors[i].kfunc_btf_id = btf_relocate_id(btf, tab->dtors[i].kfunc_btf_id);
8869 	}
8870 
8871 	tab->cnt += add_cnt;
8872 
8873 	sort(tab->dtors, tab->cnt, sizeof(tab->dtors[0]), btf_id_cmp_func, NULL);
8874 
8875 end:
8876 	if (ret)
8877 		btf_free_dtor_kfunc_tab(btf);
8878 	btf_put(btf);
8879 	return ret;
8880 }
8881 EXPORT_SYMBOL_GPL(register_btf_id_dtor_kfuncs);
8882 
8883 #define MAX_TYPES_ARE_COMPAT_DEPTH 2
8884 
8885 /* Check local and target types for compatibility. This check is used for
8886  * type-based CO-RE relocations and follow slightly different rules than
8887  * field-based relocations. This function assumes that root types were already
8888  * checked for name match. Beyond that initial root-level name check, names
8889  * are completely ignored. Compatibility rules are as follows:
8890  *   - any two STRUCTs/UNIONs/FWDs/ENUMs/INTs/ENUM64s are considered compatible, but
8891  *     kind should match for local and target types (i.e., STRUCT is not
8892  *     compatible with UNION);
8893  *   - for ENUMs/ENUM64s, the size is ignored;
8894  *   - for INT, size and signedness are ignored;
8895  *   - for ARRAY, dimensionality is ignored, element types are checked for
8896  *     compatibility recursively;
8897  *   - CONST/VOLATILE/RESTRICT modifiers are ignored;
8898  *   - TYPEDEFs/PTRs are compatible if types they pointing to are compatible;
8899  *   - FUNC_PROTOs are compatible if they have compatible signature: same
8900  *     number of input args and compatible return and argument types.
8901  * These rules are not set in stone and probably will be adjusted as we get
8902  * more experience with using BPF CO-RE relocations.
8903  */
bpf_core_types_are_compat(const struct btf * local_btf,__u32 local_id,const struct btf * targ_btf,__u32 targ_id)8904 int bpf_core_types_are_compat(const struct btf *local_btf, __u32 local_id,
8905 			      const struct btf *targ_btf, __u32 targ_id)
8906 {
8907 	return __bpf_core_types_are_compat(local_btf, local_id, targ_btf, targ_id,
8908 					   MAX_TYPES_ARE_COMPAT_DEPTH);
8909 }
8910 
8911 #define MAX_TYPES_MATCH_DEPTH 2
8912 
bpf_core_types_match(const struct btf * local_btf,u32 local_id,const struct btf * targ_btf,u32 targ_id)8913 int bpf_core_types_match(const struct btf *local_btf, u32 local_id,
8914 			 const struct btf *targ_btf, u32 targ_id)
8915 {
8916 	return __bpf_core_types_match(local_btf, local_id, targ_btf, targ_id, false,
8917 				      MAX_TYPES_MATCH_DEPTH);
8918 }
8919 
bpf_core_is_flavor_sep(const char * s)8920 static bool bpf_core_is_flavor_sep(const char *s)
8921 {
8922 	/* check X___Y name pattern, where X and Y are not underscores */
8923 	return s[0] != '_' &&				      /* X */
8924 	       s[1] == '_' && s[2] == '_' && s[3] == '_' &&   /* ___ */
8925 	       s[4] != '_';				      /* Y */
8926 }
8927 
bpf_core_essential_name_len(const char * name)8928 size_t bpf_core_essential_name_len(const char *name)
8929 {
8930 	size_t n = strlen(name);
8931 	int i;
8932 
8933 	for (i = n - 5; i >= 0; i--) {
8934 		if (bpf_core_is_flavor_sep(name + i))
8935 			return i + 1;
8936 	}
8937 	return n;
8938 }
8939 
bpf_free_cands(struct bpf_cand_cache * cands)8940 static void bpf_free_cands(struct bpf_cand_cache *cands)
8941 {
8942 	if (!cands->cnt)
8943 		/* empty candidate array was allocated on stack */
8944 		return;
8945 	kfree(cands);
8946 }
8947 
bpf_free_cands_from_cache(struct bpf_cand_cache * cands)8948 static void bpf_free_cands_from_cache(struct bpf_cand_cache *cands)
8949 {
8950 	kfree(cands->name);
8951 	kfree(cands);
8952 }
8953 
8954 #define VMLINUX_CAND_CACHE_SIZE 31
8955 static struct bpf_cand_cache *vmlinux_cand_cache[VMLINUX_CAND_CACHE_SIZE];
8956 
8957 #define MODULE_CAND_CACHE_SIZE 31
8958 static struct bpf_cand_cache *module_cand_cache[MODULE_CAND_CACHE_SIZE];
8959 
__print_cand_cache(struct bpf_verifier_log * log,struct bpf_cand_cache ** cache,int cache_size)8960 static void __print_cand_cache(struct bpf_verifier_log *log,
8961 			       struct bpf_cand_cache **cache,
8962 			       int cache_size)
8963 {
8964 	struct bpf_cand_cache *cc;
8965 	int i, j;
8966 
8967 	for (i = 0; i < cache_size; i++) {
8968 		cc = cache[i];
8969 		if (!cc)
8970 			continue;
8971 		bpf_log(log, "[%d]%s(", i, cc->name);
8972 		for (j = 0; j < cc->cnt; j++) {
8973 			bpf_log(log, "%d", cc->cands[j].id);
8974 			if (j < cc->cnt - 1)
8975 				bpf_log(log, " ");
8976 		}
8977 		bpf_log(log, "), ");
8978 	}
8979 }
8980 
print_cand_cache(struct bpf_verifier_log * log)8981 static void print_cand_cache(struct bpf_verifier_log *log)
8982 {
8983 	mutex_lock(&cand_cache_mutex);
8984 	bpf_log(log, "vmlinux_cand_cache:");
8985 	__print_cand_cache(log, vmlinux_cand_cache, VMLINUX_CAND_CACHE_SIZE);
8986 	bpf_log(log, "\nmodule_cand_cache:");
8987 	__print_cand_cache(log, module_cand_cache, MODULE_CAND_CACHE_SIZE);
8988 	bpf_log(log, "\n");
8989 	mutex_unlock(&cand_cache_mutex);
8990 }
8991 
hash_cands(struct bpf_cand_cache * cands)8992 static u32 hash_cands(struct bpf_cand_cache *cands)
8993 {
8994 	return jhash(cands->name, cands->name_len, 0);
8995 }
8996 
check_cand_cache(struct bpf_cand_cache * cands,struct bpf_cand_cache ** cache,int cache_size)8997 static struct bpf_cand_cache *check_cand_cache(struct bpf_cand_cache *cands,
8998 					       struct bpf_cand_cache **cache,
8999 					       int cache_size)
9000 {
9001 	struct bpf_cand_cache *cc = cache[hash_cands(cands) % cache_size];
9002 
9003 	if (cc && cc->name_len == cands->name_len &&
9004 	    !strncmp(cc->name, cands->name, cands->name_len))
9005 		return cc;
9006 	return NULL;
9007 }
9008 
sizeof_cands(int cnt)9009 static size_t sizeof_cands(int cnt)
9010 {
9011 	return offsetof(struct bpf_cand_cache, cands[cnt]);
9012 }
9013 
populate_cand_cache(struct bpf_cand_cache * cands,struct bpf_cand_cache ** cache,int cache_size)9014 static struct bpf_cand_cache *populate_cand_cache(struct bpf_cand_cache *cands,
9015 						  struct bpf_cand_cache **cache,
9016 						  int cache_size)
9017 {
9018 	struct bpf_cand_cache **cc = &cache[hash_cands(cands) % cache_size], *new_cands;
9019 
9020 	if (*cc) {
9021 		bpf_free_cands_from_cache(*cc);
9022 		*cc = NULL;
9023 	}
9024 	new_cands = kmemdup(cands, sizeof_cands(cands->cnt), GFP_KERNEL);
9025 	if (!new_cands) {
9026 		bpf_free_cands(cands);
9027 		return ERR_PTR(-ENOMEM);
9028 	}
9029 	/* strdup the name, since it will stay in cache.
9030 	 * the cands->name points to strings in prog's BTF and the prog can be unloaded.
9031 	 */
9032 	new_cands->name = kmemdup_nul(cands->name, cands->name_len, GFP_KERNEL);
9033 	bpf_free_cands(cands);
9034 	if (!new_cands->name) {
9035 		kfree(new_cands);
9036 		return ERR_PTR(-ENOMEM);
9037 	}
9038 	*cc = new_cands;
9039 	return new_cands;
9040 }
9041 
9042 #ifdef CONFIG_DEBUG_INFO_BTF_MODULES
__purge_cand_cache(struct btf * btf,struct bpf_cand_cache ** cache,int cache_size)9043 static void __purge_cand_cache(struct btf *btf, struct bpf_cand_cache **cache,
9044 			       int cache_size)
9045 {
9046 	struct bpf_cand_cache *cc;
9047 	int i, j;
9048 
9049 	for (i = 0; i < cache_size; i++) {
9050 		cc = cache[i];
9051 		if (!cc)
9052 			continue;
9053 		if (!btf) {
9054 			/* when new module is loaded purge all of module_cand_cache,
9055 			 * since new module might have candidates with the name
9056 			 * that matches cached cands.
9057 			 */
9058 			bpf_free_cands_from_cache(cc);
9059 			cache[i] = NULL;
9060 			continue;
9061 		}
9062 		/* when module is unloaded purge cache entries
9063 		 * that match module's btf
9064 		 */
9065 		for (j = 0; j < cc->cnt; j++)
9066 			if (cc->cands[j].btf == btf) {
9067 				bpf_free_cands_from_cache(cc);
9068 				cache[i] = NULL;
9069 				break;
9070 			}
9071 	}
9072 
9073 }
9074 
purge_cand_cache(struct btf * btf)9075 static void purge_cand_cache(struct btf *btf)
9076 {
9077 	mutex_lock(&cand_cache_mutex);
9078 	__purge_cand_cache(btf, module_cand_cache, MODULE_CAND_CACHE_SIZE);
9079 	mutex_unlock(&cand_cache_mutex);
9080 }
9081 #endif
9082 
9083 static struct bpf_cand_cache *
bpf_core_add_cands(struct bpf_cand_cache * cands,const struct btf * targ_btf,int targ_start_id)9084 bpf_core_add_cands(struct bpf_cand_cache *cands, const struct btf *targ_btf,
9085 		   int targ_start_id)
9086 {
9087 	struct bpf_cand_cache *new_cands;
9088 	const struct btf_type *t;
9089 	const char *targ_name;
9090 	size_t targ_essent_len;
9091 	int n, i;
9092 
9093 	n = btf_nr_types(targ_btf);
9094 	for (i = targ_start_id; i < n; i++) {
9095 		t = btf_type_by_id(targ_btf, i);
9096 		if (btf_kind(t) != cands->kind)
9097 			continue;
9098 
9099 		targ_name = btf_name_by_offset(targ_btf, t->name_off);
9100 		if (!targ_name)
9101 			continue;
9102 
9103 		/* the resched point is before strncmp to make sure that search
9104 		 * for non-existing name will have a chance to schedule().
9105 		 */
9106 		cond_resched();
9107 
9108 		if (strncmp(cands->name, targ_name, cands->name_len) != 0)
9109 			continue;
9110 
9111 		targ_essent_len = bpf_core_essential_name_len(targ_name);
9112 		if (targ_essent_len != cands->name_len)
9113 			continue;
9114 
9115 		/* most of the time there is only one candidate for a given kind+name pair */
9116 		new_cands = kmalloc(sizeof_cands(cands->cnt + 1), GFP_KERNEL);
9117 		if (!new_cands) {
9118 			bpf_free_cands(cands);
9119 			return ERR_PTR(-ENOMEM);
9120 		}
9121 
9122 		memcpy(new_cands, cands, sizeof_cands(cands->cnt));
9123 		bpf_free_cands(cands);
9124 		cands = new_cands;
9125 		cands->cands[cands->cnt].btf = targ_btf;
9126 		cands->cands[cands->cnt].id = i;
9127 		cands->cnt++;
9128 	}
9129 	return cands;
9130 }
9131 
9132 static struct bpf_cand_cache *
bpf_core_find_cands(struct bpf_core_ctx * ctx,u32 local_type_id)9133 bpf_core_find_cands(struct bpf_core_ctx *ctx, u32 local_type_id)
9134 {
9135 	struct bpf_cand_cache *cands, *cc, local_cand = {};
9136 	const struct btf *local_btf = ctx->btf;
9137 	const struct btf_type *local_type;
9138 	const struct btf *main_btf;
9139 	size_t local_essent_len;
9140 	struct btf *mod_btf;
9141 	const char *name;
9142 	int id;
9143 
9144 	main_btf = bpf_get_btf_vmlinux();
9145 	if (IS_ERR(main_btf))
9146 		return ERR_CAST(main_btf);
9147 	if (!main_btf)
9148 		return ERR_PTR(-EINVAL);
9149 
9150 	local_type = btf_type_by_id(local_btf, local_type_id);
9151 	if (!local_type)
9152 		return ERR_PTR(-EINVAL);
9153 
9154 	name = btf_name_by_offset(local_btf, local_type->name_off);
9155 	if (str_is_empty(name))
9156 		return ERR_PTR(-EINVAL);
9157 	local_essent_len = bpf_core_essential_name_len(name);
9158 
9159 	cands = &local_cand;
9160 	cands->name = name;
9161 	cands->kind = btf_kind(local_type);
9162 	cands->name_len = local_essent_len;
9163 
9164 	cc = check_cand_cache(cands, vmlinux_cand_cache, VMLINUX_CAND_CACHE_SIZE);
9165 	/* cands is a pointer to stack here */
9166 	if (cc) {
9167 		if (cc->cnt)
9168 			return cc;
9169 		goto check_modules;
9170 	}
9171 
9172 	/* Attempt to find target candidates in vmlinux BTF first */
9173 	cands = bpf_core_add_cands(cands, main_btf, 1);
9174 	if (IS_ERR(cands))
9175 		return ERR_CAST(cands);
9176 
9177 	/* cands is a pointer to kmalloced memory here if cands->cnt > 0 */
9178 
9179 	/* populate cache even when cands->cnt == 0 */
9180 	cc = populate_cand_cache(cands, vmlinux_cand_cache, VMLINUX_CAND_CACHE_SIZE);
9181 	if (IS_ERR(cc))
9182 		return ERR_CAST(cc);
9183 
9184 	/* if vmlinux BTF has any candidate, don't go for module BTFs */
9185 	if (cc->cnt)
9186 		return cc;
9187 
9188 check_modules:
9189 	/* cands is a pointer to stack here and cands->cnt == 0 */
9190 	cc = check_cand_cache(cands, module_cand_cache, MODULE_CAND_CACHE_SIZE);
9191 	if (cc)
9192 		/* if cache has it return it even if cc->cnt == 0 */
9193 		return cc;
9194 
9195 	/* If candidate is not found in vmlinux's BTF then search in module's BTFs */
9196 	spin_lock_bh(&btf_idr_lock);
9197 	idr_for_each_entry(&btf_idr, mod_btf, id) {
9198 		if (!btf_is_module(mod_btf))
9199 			continue;
9200 		/* linear search could be slow hence unlock/lock
9201 		 * the IDR to avoiding holding it for too long
9202 		 */
9203 		btf_get(mod_btf);
9204 		spin_unlock_bh(&btf_idr_lock);
9205 		cands = bpf_core_add_cands(cands, mod_btf, btf_nr_types(main_btf));
9206 		btf_put(mod_btf);
9207 		if (IS_ERR(cands))
9208 			return ERR_CAST(cands);
9209 		spin_lock_bh(&btf_idr_lock);
9210 	}
9211 	spin_unlock_bh(&btf_idr_lock);
9212 	/* cands is a pointer to kmalloced memory here if cands->cnt > 0
9213 	 * or pointer to stack if cands->cnd == 0.
9214 	 * Copy it into the cache even when cands->cnt == 0 and
9215 	 * return the result.
9216 	 */
9217 	return populate_cand_cache(cands, module_cand_cache, MODULE_CAND_CACHE_SIZE);
9218 }
9219 
bpf_core_apply(struct bpf_core_ctx * ctx,const struct bpf_core_relo * relo,int relo_idx,void * insn)9220 int bpf_core_apply(struct bpf_core_ctx *ctx, const struct bpf_core_relo *relo,
9221 		   int relo_idx, void *insn)
9222 {
9223 	bool need_cands = relo->kind != BPF_CORE_TYPE_ID_LOCAL;
9224 	struct bpf_core_cand_list cands = {};
9225 	struct bpf_core_relo_res targ_res;
9226 	struct bpf_core_spec *specs;
9227 	const struct btf_type *type;
9228 	int err;
9229 
9230 	/* ~4k of temp memory necessary to convert LLVM spec like "0:1:0:5"
9231 	 * into arrays of btf_ids of struct fields and array indices.
9232 	 */
9233 	specs = kcalloc(3, sizeof(*specs), GFP_KERNEL);
9234 	if (!specs)
9235 		return -ENOMEM;
9236 
9237 	type = btf_type_by_id(ctx->btf, relo->type_id);
9238 	if (!type) {
9239 		bpf_log(ctx->log, "relo #%u: bad type id %u\n",
9240 			relo_idx, relo->type_id);
9241 		kfree(specs);
9242 		return -EINVAL;
9243 	}
9244 
9245 	if (need_cands) {
9246 		struct bpf_cand_cache *cc;
9247 		int i;
9248 
9249 		mutex_lock(&cand_cache_mutex);
9250 		cc = bpf_core_find_cands(ctx, relo->type_id);
9251 		if (IS_ERR(cc)) {
9252 			bpf_log(ctx->log, "target candidate search failed for %d\n",
9253 				relo->type_id);
9254 			err = PTR_ERR(cc);
9255 			goto out;
9256 		}
9257 		if (cc->cnt) {
9258 			cands.cands = kcalloc(cc->cnt, sizeof(*cands.cands), GFP_KERNEL);
9259 			if (!cands.cands) {
9260 				err = -ENOMEM;
9261 				goto out;
9262 			}
9263 		}
9264 		for (i = 0; i < cc->cnt; i++) {
9265 			bpf_log(ctx->log,
9266 				"CO-RE relocating %s %s: found target candidate [%d]\n",
9267 				btf_kind_str[cc->kind], cc->name, cc->cands[i].id);
9268 			cands.cands[i].btf = cc->cands[i].btf;
9269 			cands.cands[i].id = cc->cands[i].id;
9270 		}
9271 		cands.len = cc->cnt;
9272 		/* cand_cache_mutex needs to span the cache lookup and
9273 		 * copy of btf pointer into bpf_core_cand_list,
9274 		 * since module can be unloaded while bpf_core_calc_relo_insn
9275 		 * is working with module's btf.
9276 		 */
9277 	}
9278 
9279 	err = bpf_core_calc_relo_insn((void *)ctx->log, relo, relo_idx, ctx->btf, &cands, specs,
9280 				      &targ_res);
9281 	if (err)
9282 		goto out;
9283 
9284 	err = bpf_core_patch_insn((void *)ctx->log, insn, relo->insn_off / 8, relo, relo_idx,
9285 				  &targ_res);
9286 
9287 out:
9288 	kfree(specs);
9289 	if (need_cands) {
9290 		kfree(cands.cands);
9291 		mutex_unlock(&cand_cache_mutex);
9292 		if (ctx->log->level & BPF_LOG_LEVEL2)
9293 			print_cand_cache(ctx->log);
9294 	}
9295 	return err;
9296 }
9297 
btf_nested_type_is_trusted(struct bpf_verifier_log * log,const struct bpf_reg_state * reg,const char * field_name,u32 btf_id,const char * suffix)9298 bool btf_nested_type_is_trusted(struct bpf_verifier_log *log,
9299 				const struct bpf_reg_state *reg,
9300 				const char *field_name, u32 btf_id, const char *suffix)
9301 {
9302 	struct btf *btf = reg->btf;
9303 	const struct btf_type *walk_type, *safe_type;
9304 	const char *tname;
9305 	char safe_tname[64];
9306 	long ret, safe_id;
9307 	const struct btf_member *member;
9308 	u32 i;
9309 
9310 	walk_type = btf_type_by_id(btf, reg->btf_id);
9311 	if (!walk_type)
9312 		return false;
9313 
9314 	tname = btf_name_by_offset(btf, walk_type->name_off);
9315 
9316 	ret = snprintf(safe_tname, sizeof(safe_tname), "%s%s", tname, suffix);
9317 	if (ret >= sizeof(safe_tname))
9318 		return false;
9319 
9320 	safe_id = btf_find_by_name_kind(btf, safe_tname, BTF_INFO_KIND(walk_type->info));
9321 	if (safe_id < 0)
9322 		return false;
9323 
9324 	safe_type = btf_type_by_id(btf, safe_id);
9325 	if (!safe_type)
9326 		return false;
9327 
9328 	for_each_member(i, safe_type, member) {
9329 		const char *m_name = __btf_name_by_offset(btf, member->name_off);
9330 		const struct btf_type *mtype = btf_type_by_id(btf, member->type);
9331 		u32 id;
9332 
9333 		if (!btf_type_is_ptr(mtype))
9334 			continue;
9335 
9336 		btf_type_skip_modifiers(btf, mtype->type, &id);
9337 		/* If we match on both type and name, the field is considered trusted. */
9338 		if (btf_id == id && !strcmp(field_name, m_name))
9339 			return true;
9340 	}
9341 
9342 	return false;
9343 }
9344 
btf_type_ids_nocast_alias(struct bpf_verifier_log * log,const struct btf * reg_btf,u32 reg_id,const struct btf * arg_btf,u32 arg_id)9345 bool btf_type_ids_nocast_alias(struct bpf_verifier_log *log,
9346 			       const struct btf *reg_btf, u32 reg_id,
9347 			       const struct btf *arg_btf, u32 arg_id)
9348 {
9349 	const char *reg_name, *arg_name, *search_needle;
9350 	const struct btf_type *reg_type, *arg_type;
9351 	int reg_len, arg_len, cmp_len;
9352 	size_t pattern_len = sizeof(NOCAST_ALIAS_SUFFIX) - sizeof(char);
9353 
9354 	reg_type = btf_type_by_id(reg_btf, reg_id);
9355 	if (!reg_type)
9356 		return false;
9357 
9358 	arg_type = btf_type_by_id(arg_btf, arg_id);
9359 	if (!arg_type)
9360 		return false;
9361 
9362 	reg_name = btf_name_by_offset(reg_btf, reg_type->name_off);
9363 	arg_name = btf_name_by_offset(arg_btf, arg_type->name_off);
9364 
9365 	reg_len = strlen(reg_name);
9366 	arg_len = strlen(arg_name);
9367 
9368 	/* Exactly one of the two type names may be suffixed with ___init, so
9369 	 * if the strings are the same size, they can't possibly be no-cast
9370 	 * aliases of one another. If you have two of the same type names, e.g.
9371 	 * they're both nf_conn___init, it would be improper to return true
9372 	 * because they are _not_ no-cast aliases, they are the same type.
9373 	 */
9374 	if (reg_len == arg_len)
9375 		return false;
9376 
9377 	/* Either of the two names must be the other name, suffixed with ___init. */
9378 	if ((reg_len != arg_len + pattern_len) &&
9379 	    (arg_len != reg_len + pattern_len))
9380 		return false;
9381 
9382 	if (reg_len < arg_len) {
9383 		search_needle = strstr(arg_name, NOCAST_ALIAS_SUFFIX);
9384 		cmp_len = reg_len;
9385 	} else {
9386 		search_needle = strstr(reg_name, NOCAST_ALIAS_SUFFIX);
9387 		cmp_len = arg_len;
9388 	}
9389 
9390 	if (!search_needle)
9391 		return false;
9392 
9393 	/* ___init suffix must come at the end of the name */
9394 	if (*(search_needle + pattern_len) != '\0')
9395 		return false;
9396 
9397 	return !strncmp(reg_name, arg_name, cmp_len);
9398 }
9399 
9400 #ifdef CONFIG_BPF_JIT
9401 static int
btf_add_struct_ops(struct btf * btf,struct bpf_struct_ops * st_ops,struct bpf_verifier_log * log)9402 btf_add_struct_ops(struct btf *btf, struct bpf_struct_ops *st_ops,
9403 		   struct bpf_verifier_log *log)
9404 {
9405 	struct btf_struct_ops_tab *tab, *new_tab;
9406 	int i, err;
9407 
9408 	tab = btf->struct_ops_tab;
9409 	if (!tab) {
9410 		tab = kzalloc(offsetof(struct btf_struct_ops_tab, ops[4]),
9411 			      GFP_KERNEL);
9412 		if (!tab)
9413 			return -ENOMEM;
9414 		tab->capacity = 4;
9415 		btf->struct_ops_tab = tab;
9416 	}
9417 
9418 	for (i = 0; i < tab->cnt; i++)
9419 		if (tab->ops[i].st_ops == st_ops)
9420 			return -EEXIST;
9421 
9422 	if (tab->cnt == tab->capacity) {
9423 		new_tab = krealloc(tab,
9424 				   offsetof(struct btf_struct_ops_tab,
9425 					    ops[tab->capacity * 2]),
9426 				   GFP_KERNEL);
9427 		if (!new_tab)
9428 			return -ENOMEM;
9429 		tab = new_tab;
9430 		tab->capacity *= 2;
9431 		btf->struct_ops_tab = tab;
9432 	}
9433 
9434 	tab->ops[btf->struct_ops_tab->cnt].st_ops = st_ops;
9435 
9436 	err = bpf_struct_ops_desc_init(&tab->ops[btf->struct_ops_tab->cnt], btf, log);
9437 	if (err)
9438 		return err;
9439 
9440 	btf->struct_ops_tab->cnt++;
9441 
9442 	return 0;
9443 }
9444 
9445 const struct bpf_struct_ops_desc *
bpf_struct_ops_find_value(struct btf * btf,u32 value_id)9446 bpf_struct_ops_find_value(struct btf *btf, u32 value_id)
9447 {
9448 	const struct bpf_struct_ops_desc *st_ops_list;
9449 	unsigned int i;
9450 	u32 cnt;
9451 
9452 	if (!value_id)
9453 		return NULL;
9454 	if (!btf->struct_ops_tab)
9455 		return NULL;
9456 
9457 	cnt = btf->struct_ops_tab->cnt;
9458 	st_ops_list = btf->struct_ops_tab->ops;
9459 	for (i = 0; i < cnt; i++) {
9460 		if (st_ops_list[i].value_id == value_id)
9461 			return &st_ops_list[i];
9462 	}
9463 
9464 	return NULL;
9465 }
9466 
9467 const struct bpf_struct_ops_desc *
bpf_struct_ops_find(struct btf * btf,u32 type_id)9468 bpf_struct_ops_find(struct btf *btf, u32 type_id)
9469 {
9470 	const struct bpf_struct_ops_desc *st_ops_list;
9471 	unsigned int i;
9472 	u32 cnt;
9473 
9474 	if (!type_id)
9475 		return NULL;
9476 	if (!btf->struct_ops_tab)
9477 		return NULL;
9478 
9479 	cnt = btf->struct_ops_tab->cnt;
9480 	st_ops_list = btf->struct_ops_tab->ops;
9481 	for (i = 0; i < cnt; i++) {
9482 		if (st_ops_list[i].type_id == type_id)
9483 			return &st_ops_list[i];
9484 	}
9485 
9486 	return NULL;
9487 }
9488 
__register_bpf_struct_ops(struct bpf_struct_ops * st_ops)9489 int __register_bpf_struct_ops(struct bpf_struct_ops *st_ops)
9490 {
9491 	struct bpf_verifier_log *log;
9492 	struct btf *btf;
9493 	int err = 0;
9494 
9495 	btf = btf_get_module_btf(st_ops->owner);
9496 	if (!btf)
9497 		return check_btf_kconfigs(st_ops->owner, "struct_ops");
9498 	if (IS_ERR(btf))
9499 		return PTR_ERR(btf);
9500 
9501 	log = kzalloc(sizeof(*log), GFP_KERNEL | __GFP_NOWARN);
9502 	if (!log) {
9503 		err = -ENOMEM;
9504 		goto errout;
9505 	}
9506 
9507 	log->level = BPF_LOG_KERNEL;
9508 
9509 	err = btf_add_struct_ops(btf, st_ops, log);
9510 
9511 errout:
9512 	kfree(log);
9513 	btf_put(btf);
9514 
9515 	return err;
9516 }
9517 EXPORT_SYMBOL_GPL(__register_bpf_struct_ops);
9518 #endif
9519 
btf_param_match_suffix(const struct btf * btf,const struct btf_param * arg,const char * suffix)9520 bool btf_param_match_suffix(const struct btf *btf,
9521 			    const struct btf_param *arg,
9522 			    const char *suffix)
9523 {
9524 	int suffix_len = strlen(suffix), len;
9525 	const char *param_name;
9526 
9527 	/* In the future, this can be ported to use BTF tagging */
9528 	param_name = btf_name_by_offset(btf, arg->name_off);
9529 	if (str_is_empty(param_name))
9530 		return false;
9531 	len = strlen(param_name);
9532 	if (len <= suffix_len)
9533 		return false;
9534 	param_name += len - suffix_len;
9535 	return !strncmp(param_name, suffix, suffix_len);
9536 }
9537