xref: /linux/kernel/bpf/btf.c (revision 1a9239bb4253f9076b5b4b2a1a4e8d7defd77a95)
1  // SPDX-License-Identifier: GPL-2.0
2  /* Copyright (c) 2018 Facebook */
3  
4  #include <uapi/linux/btf.h>
5  #include <uapi/linux/bpf.h>
6  #include <uapi/linux/bpf_perf_event.h>
7  #include <uapi/linux/types.h>
8  #include <linux/seq_file.h>
9  #include <linux/compiler.h>
10  #include <linux/ctype.h>
11  #include <linux/errno.h>
12  #include <linux/slab.h>
13  #include <linux/anon_inodes.h>
14  #include <linux/file.h>
15  #include <linux/uaccess.h>
16  #include <linux/kernel.h>
17  #include <linux/idr.h>
18  #include <linux/sort.h>
19  #include <linux/bpf_verifier.h>
20  #include <linux/btf.h>
21  #include <linux/btf_ids.h>
22  #include <linux/bpf.h>
23  #include <linux/bpf_lsm.h>
24  #include <linux/skmsg.h>
25  #include <linux/perf_event.h>
26  #include <linux/bsearch.h>
27  #include <linux/kobject.h>
28  #include <linux/sysfs.h>
29  
30  #include <net/netfilter/nf_bpf_link.h>
31  
32  #include <net/sock.h>
33  #include <net/xdp.h>
34  #include "../tools/lib/bpf/relo_core.h"
35  
36  /* BTF (BPF Type Format) is the meta data format which describes
37   * the data types of BPF program/map.  Hence, it basically focus
38   * on the C programming language which the modern BPF is primary
39   * using.
40   *
41   * ELF Section:
42   * ~~~~~~~~~~~
43   * The BTF data is stored under the ".BTF" ELF section
44   *
45   * struct btf_type:
46   * ~~~~~~~~~~~~~~~
47   * Each 'struct btf_type' object describes a C data type.
48   * Depending on the type it is describing, a 'struct btf_type'
49   * object may be followed by more data.  F.e.
50   * To describe an array, 'struct btf_type' is followed by
51   * 'struct btf_array'.
52   *
53   * 'struct btf_type' and any extra data following it are
54   * 4 bytes aligned.
55   *
56   * Type section:
57   * ~~~~~~~~~~~~~
58   * The BTF type section contains a list of 'struct btf_type' objects.
59   * Each one describes a C type.  Recall from the above section
60   * that a 'struct btf_type' object could be immediately followed by extra
61   * data in order to describe some particular C types.
62   *
63   * type_id:
64   * ~~~~~~~
65   * Each btf_type object is identified by a type_id.  The type_id
66   * is implicitly implied by the location of the btf_type object in
67   * the BTF type section.  The first one has type_id 1.  The second
68   * one has type_id 2...etc.  Hence, an earlier btf_type has
69   * a smaller type_id.
70   *
71   * A btf_type object may refer to another btf_type object by using
72   * type_id (i.e. the "type" in the "struct btf_type").
73   *
74   * NOTE that we cannot assume any reference-order.
75   * A btf_type object can refer to an earlier btf_type object
76   * but it can also refer to a later btf_type object.
77   *
78   * For example, to describe "const void *".  A btf_type
79   * object describing "const" may refer to another btf_type
80   * object describing "void *".  This type-reference is done
81   * by specifying type_id:
82   *
83   * [1] CONST (anon) type_id=2
84   * [2] PTR (anon) type_id=0
85   *
86   * The above is the btf_verifier debug log:
87   *   - Each line started with "[?]" is a btf_type object
88   *   - [?] is the type_id of the btf_type object.
89   *   - CONST/PTR is the BTF_KIND_XXX
90   *   - "(anon)" is the name of the type.  It just
91   *     happens that CONST and PTR has no name.
92   *   - type_id=XXX is the 'u32 type' in btf_type
93   *
94   * NOTE: "void" has type_id 0
95   *
96   * String section:
97   * ~~~~~~~~~~~~~~
98   * The BTF string section contains the names used by the type section.
99   * Each string is referred by an "offset" from the beginning of the
100   * string section.
101   *
102   * Each string is '\0' terminated.
103   *
104   * The first character in the string section must be '\0'
105   * which is used to mean 'anonymous'. Some btf_type may not
106   * have a name.
107   */
108  
109  /* BTF verification:
110   *
111   * To verify BTF data, two passes are needed.
112   *
113   * Pass #1
114   * ~~~~~~~
115   * The first pass is to collect all btf_type objects to
116   * an array: "btf->types".
117   *
118   * Depending on the C type that a btf_type is describing,
119   * a btf_type may be followed by extra data.  We don't know
120   * how many btf_type is there, and more importantly we don't
121   * know where each btf_type is located in the type section.
122   *
123   * Without knowing the location of each type_id, most verifications
124   * cannot be done.  e.g. an earlier btf_type may refer to a later
125   * btf_type (recall the "const void *" above), so we cannot
126   * check this type-reference in the first pass.
127   *
128   * In the first pass, it still does some verifications (e.g.
129   * checking the name is a valid offset to the string section).
130   *
131   * Pass #2
132   * ~~~~~~~
133   * The main focus is to resolve a btf_type that is referring
134   * to another type.
135   *
136   * We have to ensure the referring type:
137   * 1) does exist in the BTF (i.e. in btf->types[])
138   * 2) does not cause a loop:
139   *	struct A {
140   *		struct B b;
141   *	};
142   *
143   *	struct B {
144   *		struct A a;
145   *	};
146   *
147   * btf_type_needs_resolve() decides if a btf_type needs
148   * to be resolved.
149   *
150   * The needs_resolve type implements the "resolve()" ops which
151   * essentially does a DFS and detects backedge.
152   *
153   * During resolve (or DFS), different C types have different
154   * "RESOLVED" conditions.
155   *
156   * When resolving a BTF_KIND_STRUCT, we need to resolve all its
157   * members because a member is always referring to another
158   * type.  A struct's member can be treated as "RESOLVED" if
159   * it is referring to a BTF_KIND_PTR.  Otherwise, the
160   * following valid C struct would be rejected:
161   *
162   *	struct A {
163   *		int m;
164   *		struct A *a;
165   *	};
166   *
167   * When resolving a BTF_KIND_PTR, it needs to keep resolving if
168   * it is referring to another BTF_KIND_PTR.  Otherwise, we cannot
169   * detect a pointer loop, e.g.:
170   * BTF_KIND_CONST -> BTF_KIND_PTR -> BTF_KIND_CONST -> BTF_KIND_PTR +
171   *                        ^                                         |
172   *                        +-----------------------------------------+
173   *
174   */
175  
176  #define BITS_PER_U128 (sizeof(u64) * BITS_PER_BYTE * 2)
177  #define BITS_PER_BYTE_MASK (BITS_PER_BYTE - 1)
178  #define BITS_PER_BYTE_MASKED(bits) ((bits) & BITS_PER_BYTE_MASK)
179  #define BITS_ROUNDDOWN_BYTES(bits) ((bits) >> 3)
180  #define BITS_ROUNDUP_BYTES(bits) \
181  	(BITS_ROUNDDOWN_BYTES(bits) + !!BITS_PER_BYTE_MASKED(bits))
182  
183  #define BTF_INFO_MASK 0x9f00ffff
184  #define BTF_INT_MASK 0x0fffffff
185  #define BTF_TYPE_ID_VALID(type_id) ((type_id) <= BTF_MAX_TYPE)
186  #define BTF_STR_OFFSET_VALID(name_off) ((name_off) <= BTF_MAX_NAME_OFFSET)
187  
188  /* 16MB for 64k structs and each has 16 members and
189   * a few MB spaces for the string section.
190   * The hard limit is S32_MAX.
191   */
192  #define BTF_MAX_SIZE (16 * 1024 * 1024)
193  
194  #define for_each_member_from(i, from, struct_type, member)		\
195  	for (i = from, member = btf_type_member(struct_type) + from;	\
196  	     i < btf_type_vlen(struct_type);				\
197  	     i++, member++)
198  
199  #define for_each_vsi_from(i, from, struct_type, member)				\
200  	for (i = from, member = btf_type_var_secinfo(struct_type) + from;	\
201  	     i < btf_type_vlen(struct_type);					\
202  	     i++, member++)
203  
204  DEFINE_IDR(btf_idr);
205  DEFINE_SPINLOCK(btf_idr_lock);
206  
207  enum btf_kfunc_hook {
208  	BTF_KFUNC_HOOK_COMMON,
209  	BTF_KFUNC_HOOK_XDP,
210  	BTF_KFUNC_HOOK_TC,
211  	BTF_KFUNC_HOOK_STRUCT_OPS,
212  	BTF_KFUNC_HOOK_TRACING,
213  	BTF_KFUNC_HOOK_SYSCALL,
214  	BTF_KFUNC_HOOK_FMODRET,
215  	BTF_KFUNC_HOOK_CGROUP,
216  	BTF_KFUNC_HOOK_SCHED_ACT,
217  	BTF_KFUNC_HOOK_SK_SKB,
218  	BTF_KFUNC_HOOK_SOCKET_FILTER,
219  	BTF_KFUNC_HOOK_LWT,
220  	BTF_KFUNC_HOOK_NETFILTER,
221  	BTF_KFUNC_HOOK_KPROBE,
222  	BTF_KFUNC_HOOK_MAX,
223  };
224  
225  enum {
226  	BTF_KFUNC_SET_MAX_CNT = 256,
227  	BTF_DTOR_KFUNC_MAX_CNT = 256,
228  	BTF_KFUNC_FILTER_MAX_CNT = 16,
229  };
230  
231  struct btf_kfunc_hook_filter {
232  	btf_kfunc_filter_t filters[BTF_KFUNC_FILTER_MAX_CNT];
233  	u32 nr_filters;
234  };
235  
236  struct btf_kfunc_set_tab {
237  	struct btf_id_set8 *sets[BTF_KFUNC_HOOK_MAX];
238  	struct btf_kfunc_hook_filter hook_filters[BTF_KFUNC_HOOK_MAX];
239  };
240  
241  struct btf_id_dtor_kfunc_tab {
242  	u32 cnt;
243  	struct btf_id_dtor_kfunc dtors[];
244  };
245  
246  struct btf_struct_ops_tab {
247  	u32 cnt;
248  	u32 capacity;
249  	struct bpf_struct_ops_desc ops[];
250  };
251  
252  struct btf {
253  	void *data;
254  	struct btf_type **types;
255  	u32 *resolved_ids;
256  	u32 *resolved_sizes;
257  	const char *strings;
258  	void *nohdr_data;
259  	struct btf_header hdr;
260  	u32 nr_types; /* includes VOID for base BTF */
261  	u32 types_size;
262  	u32 data_size;
263  	refcount_t refcnt;
264  	u32 id;
265  	struct rcu_head rcu;
266  	struct btf_kfunc_set_tab *kfunc_set_tab;
267  	struct btf_id_dtor_kfunc_tab *dtor_kfunc_tab;
268  	struct btf_struct_metas *struct_meta_tab;
269  	struct btf_struct_ops_tab *struct_ops_tab;
270  
271  	/* split BTF support */
272  	struct btf *base_btf;
273  	u32 start_id; /* first type ID in this BTF (0 for base BTF) */
274  	u32 start_str_off; /* first string offset (0 for base BTF) */
275  	char name[MODULE_NAME_LEN];
276  	bool kernel_btf;
277  	__u32 *base_id_map; /* map from distilled base BTF -> vmlinux BTF ids */
278  };
279  
280  enum verifier_phase {
281  	CHECK_META,
282  	CHECK_TYPE,
283  };
284  
285  struct resolve_vertex {
286  	const struct btf_type *t;
287  	u32 type_id;
288  	u16 next_member;
289  };
290  
291  enum visit_state {
292  	NOT_VISITED,
293  	VISITED,
294  	RESOLVED,
295  };
296  
297  enum resolve_mode {
298  	RESOLVE_TBD,	/* To Be Determined */
299  	RESOLVE_PTR,	/* Resolving for Pointer */
300  	RESOLVE_STRUCT_OR_ARRAY,	/* Resolving for struct/union
301  					 * or array
302  					 */
303  };
304  
305  #define MAX_RESOLVE_DEPTH 32
306  
307  struct btf_sec_info {
308  	u32 off;
309  	u32 len;
310  };
311  
312  struct btf_verifier_env {
313  	struct btf *btf;
314  	u8 *visit_states;
315  	struct resolve_vertex stack[MAX_RESOLVE_DEPTH];
316  	struct bpf_verifier_log log;
317  	u32 log_type_id;
318  	u32 top_stack;
319  	enum verifier_phase phase;
320  	enum resolve_mode resolve_mode;
321  };
322  
323  static const char * const btf_kind_str[NR_BTF_KINDS] = {
324  	[BTF_KIND_UNKN]		= "UNKNOWN",
325  	[BTF_KIND_INT]		= "INT",
326  	[BTF_KIND_PTR]		= "PTR",
327  	[BTF_KIND_ARRAY]	= "ARRAY",
328  	[BTF_KIND_STRUCT]	= "STRUCT",
329  	[BTF_KIND_UNION]	= "UNION",
330  	[BTF_KIND_ENUM]		= "ENUM",
331  	[BTF_KIND_FWD]		= "FWD",
332  	[BTF_KIND_TYPEDEF]	= "TYPEDEF",
333  	[BTF_KIND_VOLATILE]	= "VOLATILE",
334  	[BTF_KIND_CONST]	= "CONST",
335  	[BTF_KIND_RESTRICT]	= "RESTRICT",
336  	[BTF_KIND_FUNC]		= "FUNC",
337  	[BTF_KIND_FUNC_PROTO]	= "FUNC_PROTO",
338  	[BTF_KIND_VAR]		= "VAR",
339  	[BTF_KIND_DATASEC]	= "DATASEC",
340  	[BTF_KIND_FLOAT]	= "FLOAT",
341  	[BTF_KIND_DECL_TAG]	= "DECL_TAG",
342  	[BTF_KIND_TYPE_TAG]	= "TYPE_TAG",
343  	[BTF_KIND_ENUM64]	= "ENUM64",
344  };
345  
btf_type_str(const struct btf_type * t)346  const char *btf_type_str(const struct btf_type *t)
347  {
348  	return btf_kind_str[BTF_INFO_KIND(t->info)];
349  }
350  
351  /* Chunk size we use in safe copy of data to be shown. */
352  #define BTF_SHOW_OBJ_SAFE_SIZE		32
353  
354  /*
355   * This is the maximum size of a base type value (equivalent to a
356   * 128-bit int); if we are at the end of our safe buffer and have
357   * less than 16 bytes space we can't be assured of being able
358   * to copy the next type safely, so in such cases we will initiate
359   * a new copy.
360   */
361  #define BTF_SHOW_OBJ_BASE_TYPE_SIZE	16
362  
363  /* Type name size */
364  #define BTF_SHOW_NAME_SIZE		80
365  
366  /*
367   * The suffix of a type that indicates it cannot alias another type when
368   * comparing BTF IDs for kfunc invocations.
369   */
370  #define NOCAST_ALIAS_SUFFIX		"___init"
371  
372  /*
373   * Common data to all BTF show operations. Private show functions can add
374   * their own data to a structure containing a struct btf_show and consult it
375   * in the show callback.  See btf_type_show() below.
376   *
377   * One challenge with showing nested data is we want to skip 0-valued
378   * data, but in order to figure out whether a nested object is all zeros
379   * we need to walk through it.  As a result, we need to make two passes
380   * when handling structs, unions and arrays; the first path simply looks
381   * for nonzero data, while the second actually does the display.  The first
382   * pass is signalled by show->state.depth_check being set, and if we
383   * encounter a non-zero value we set show->state.depth_to_show to
384   * the depth at which we encountered it.  When we have completed the
385   * first pass, we will know if anything needs to be displayed if
386   * depth_to_show > depth.  See btf_[struct,array]_show() for the
387   * implementation of this.
388   *
389   * Another problem is we want to ensure the data for display is safe to
390   * access.  To support this, the anonymous "struct {} obj" tracks the data
391   * object and our safe copy of it.  We copy portions of the data needed
392   * to the object "copy" buffer, but because its size is limited to
393   * BTF_SHOW_OBJ_COPY_LEN bytes, multiple copies may be required as we
394   * traverse larger objects for display.
395   *
396   * The various data type show functions all start with a call to
397   * btf_show_start_type() which returns a pointer to the safe copy
398   * of the data needed (or if BTF_SHOW_UNSAFE is specified, to the
399   * raw data itself).  btf_show_obj_safe() is responsible for
400   * using copy_from_kernel_nofault() to update the safe data if necessary
401   * as we traverse the object's data.  skbuff-like semantics are
402   * used:
403   *
404   * - obj.head points to the start of the toplevel object for display
405   * - obj.size is the size of the toplevel object
406   * - obj.data points to the current point in the original data at
407   *   which our safe data starts.  obj.data will advance as we copy
408   *   portions of the data.
409   *
410   * In most cases a single copy will suffice, but larger data structures
411   * such as "struct task_struct" will require many copies.  The logic in
412   * btf_show_obj_safe() handles the logic that determines if a new
413   * copy_from_kernel_nofault() is needed.
414   */
415  struct btf_show {
416  	u64 flags;
417  	void *target;	/* target of show operation (seq file, buffer) */
418  	__printf(2, 0) void (*showfn)(struct btf_show *show, const char *fmt, va_list args);
419  	const struct btf *btf;
420  	/* below are used during iteration */
421  	struct {
422  		u8 depth;
423  		u8 depth_to_show;
424  		u8 depth_check;
425  		u8 array_member:1,
426  		   array_terminated:1;
427  		u16 array_encoding;
428  		u32 type_id;
429  		int status;			/* non-zero for error */
430  		const struct btf_type *type;
431  		const struct btf_member *member;
432  		char name[BTF_SHOW_NAME_SIZE];	/* space for member name/type */
433  	} state;
434  	struct {
435  		u32 size;
436  		void *head;
437  		void *data;
438  		u8 safe[BTF_SHOW_OBJ_SAFE_SIZE];
439  	} obj;
440  };
441  
442  struct btf_kind_operations {
443  	s32 (*check_meta)(struct btf_verifier_env *env,
444  			  const struct btf_type *t,
445  			  u32 meta_left);
446  	int (*resolve)(struct btf_verifier_env *env,
447  		       const struct resolve_vertex *v);
448  	int (*check_member)(struct btf_verifier_env *env,
449  			    const struct btf_type *struct_type,
450  			    const struct btf_member *member,
451  			    const struct btf_type *member_type);
452  	int (*check_kflag_member)(struct btf_verifier_env *env,
453  				  const struct btf_type *struct_type,
454  				  const struct btf_member *member,
455  				  const struct btf_type *member_type);
456  	void (*log_details)(struct btf_verifier_env *env,
457  			    const struct btf_type *t);
458  	void (*show)(const struct btf *btf, const struct btf_type *t,
459  			 u32 type_id, void *data, u8 bits_offsets,
460  			 struct btf_show *show);
461  };
462  
463  static const struct btf_kind_operations * const kind_ops[NR_BTF_KINDS];
464  static struct btf_type btf_void;
465  
466  static int btf_resolve(struct btf_verifier_env *env,
467  		       const struct btf_type *t, u32 type_id);
468  
469  static int btf_func_check(struct btf_verifier_env *env,
470  			  const struct btf_type *t);
471  
btf_type_is_modifier(const struct btf_type * t)472  static bool btf_type_is_modifier(const struct btf_type *t)
473  {
474  	/* Some of them is not strictly a C modifier
475  	 * but they are grouped into the same bucket
476  	 * for BTF concern:
477  	 *   A type (t) that refers to another
478  	 *   type through t->type AND its size cannot
479  	 *   be determined without following the t->type.
480  	 *
481  	 * ptr does not fall into this bucket
482  	 * because its size is always sizeof(void *).
483  	 */
484  	switch (BTF_INFO_KIND(t->info)) {
485  	case BTF_KIND_TYPEDEF:
486  	case BTF_KIND_VOLATILE:
487  	case BTF_KIND_CONST:
488  	case BTF_KIND_RESTRICT:
489  	case BTF_KIND_TYPE_TAG:
490  		return true;
491  	}
492  
493  	return false;
494  }
495  
btf_type_is_void(const struct btf_type * t)496  bool btf_type_is_void(const struct btf_type *t)
497  {
498  	return t == &btf_void;
499  }
500  
btf_type_is_datasec(const struct btf_type * t)501  static bool btf_type_is_datasec(const struct btf_type *t)
502  {
503  	return BTF_INFO_KIND(t->info) == BTF_KIND_DATASEC;
504  }
505  
btf_type_is_decl_tag(const struct btf_type * t)506  static bool btf_type_is_decl_tag(const struct btf_type *t)
507  {
508  	return BTF_INFO_KIND(t->info) == BTF_KIND_DECL_TAG;
509  }
510  
btf_type_nosize(const struct btf_type * t)511  static bool btf_type_nosize(const struct btf_type *t)
512  {
513  	return btf_type_is_void(t) || btf_type_is_fwd(t) ||
514  	       btf_type_is_func(t) || btf_type_is_func_proto(t) ||
515  	       btf_type_is_decl_tag(t);
516  }
517  
btf_type_nosize_or_null(const struct btf_type * t)518  static bool btf_type_nosize_or_null(const struct btf_type *t)
519  {
520  	return !t || btf_type_nosize(t);
521  }
522  
btf_type_is_decl_tag_target(const struct btf_type * t)523  static bool btf_type_is_decl_tag_target(const struct btf_type *t)
524  {
525  	return btf_type_is_func(t) || btf_type_is_struct(t) ||
526  	       btf_type_is_var(t) || btf_type_is_typedef(t);
527  }
528  
btf_is_vmlinux(const struct btf * btf)529  bool btf_is_vmlinux(const struct btf *btf)
530  {
531  	return btf->kernel_btf && !btf->base_btf;
532  }
533  
btf_nr_types(const struct btf * btf)534  u32 btf_nr_types(const struct btf *btf)
535  {
536  	u32 total = 0;
537  
538  	while (btf) {
539  		total += btf->nr_types;
540  		btf = btf->base_btf;
541  	}
542  
543  	return total;
544  }
545  
btf_find_by_name_kind(const struct btf * btf,const char * name,u8 kind)546  s32 btf_find_by_name_kind(const struct btf *btf, const char *name, u8 kind)
547  {
548  	const struct btf_type *t;
549  	const char *tname;
550  	u32 i, total;
551  
552  	total = btf_nr_types(btf);
553  	for (i = 1; i < total; i++) {
554  		t = btf_type_by_id(btf, i);
555  		if (BTF_INFO_KIND(t->info) != kind)
556  			continue;
557  
558  		tname = btf_name_by_offset(btf, t->name_off);
559  		if (!strcmp(tname, name))
560  			return i;
561  	}
562  
563  	return -ENOENT;
564  }
565  
bpf_find_btf_id(const char * name,u32 kind,struct btf ** btf_p)566  s32 bpf_find_btf_id(const char *name, u32 kind, struct btf **btf_p)
567  {
568  	struct btf *btf;
569  	s32 ret;
570  	int id;
571  
572  	btf = bpf_get_btf_vmlinux();
573  	if (IS_ERR(btf))
574  		return PTR_ERR(btf);
575  	if (!btf)
576  		return -EINVAL;
577  
578  	ret = btf_find_by_name_kind(btf, name, kind);
579  	/* ret is never zero, since btf_find_by_name_kind returns
580  	 * positive btf_id or negative error.
581  	 */
582  	if (ret > 0) {
583  		btf_get(btf);
584  		*btf_p = btf;
585  		return ret;
586  	}
587  
588  	/* If name is not found in vmlinux's BTF then search in module's BTFs */
589  	spin_lock_bh(&btf_idr_lock);
590  	idr_for_each_entry(&btf_idr, btf, id) {
591  		if (!btf_is_module(btf))
592  			continue;
593  		/* linear search could be slow hence unlock/lock
594  		 * the IDR to avoiding holding it for too long
595  		 */
596  		btf_get(btf);
597  		spin_unlock_bh(&btf_idr_lock);
598  		ret = btf_find_by_name_kind(btf, name, kind);
599  		if (ret > 0) {
600  			*btf_p = btf;
601  			return ret;
602  		}
603  		btf_put(btf);
604  		spin_lock_bh(&btf_idr_lock);
605  	}
606  	spin_unlock_bh(&btf_idr_lock);
607  	return ret;
608  }
609  
btf_type_skip_modifiers(const struct btf * btf,u32 id,u32 * res_id)610  const struct btf_type *btf_type_skip_modifiers(const struct btf *btf,
611  					       u32 id, u32 *res_id)
612  {
613  	const struct btf_type *t = btf_type_by_id(btf, id);
614  
615  	while (btf_type_is_modifier(t)) {
616  		id = t->type;
617  		t = btf_type_by_id(btf, t->type);
618  	}
619  
620  	if (res_id)
621  		*res_id = id;
622  
623  	return t;
624  }
625  
btf_type_resolve_ptr(const struct btf * btf,u32 id,u32 * res_id)626  const struct btf_type *btf_type_resolve_ptr(const struct btf *btf,
627  					    u32 id, u32 *res_id)
628  {
629  	const struct btf_type *t;
630  
631  	t = btf_type_skip_modifiers(btf, id, NULL);
632  	if (!btf_type_is_ptr(t))
633  		return NULL;
634  
635  	return btf_type_skip_modifiers(btf, t->type, res_id);
636  }
637  
btf_type_resolve_func_ptr(const struct btf * btf,u32 id,u32 * res_id)638  const struct btf_type *btf_type_resolve_func_ptr(const struct btf *btf,
639  						 u32 id, u32 *res_id)
640  {
641  	const struct btf_type *ptype;
642  
643  	ptype = btf_type_resolve_ptr(btf, id, res_id);
644  	if (ptype && btf_type_is_func_proto(ptype))
645  		return ptype;
646  
647  	return NULL;
648  }
649  
650  /* Types that act only as a source, not sink or intermediate
651   * type when resolving.
652   */
btf_type_is_resolve_source_only(const struct btf_type * t)653  static bool btf_type_is_resolve_source_only(const struct btf_type *t)
654  {
655  	return btf_type_is_var(t) ||
656  	       btf_type_is_decl_tag(t) ||
657  	       btf_type_is_datasec(t);
658  }
659  
660  /* What types need to be resolved?
661   *
662   * btf_type_is_modifier() is an obvious one.
663   *
664   * btf_type_is_struct() because its member refers to
665   * another type (through member->type).
666   *
667   * btf_type_is_var() because the variable refers to
668   * another type. btf_type_is_datasec() holds multiple
669   * btf_type_is_var() types that need resolving.
670   *
671   * btf_type_is_array() because its element (array->type)
672   * refers to another type.  Array can be thought of a
673   * special case of struct while array just has the same
674   * member-type repeated by array->nelems of times.
675   */
btf_type_needs_resolve(const struct btf_type * t)676  static bool btf_type_needs_resolve(const struct btf_type *t)
677  {
678  	return btf_type_is_modifier(t) ||
679  	       btf_type_is_ptr(t) ||
680  	       btf_type_is_struct(t) ||
681  	       btf_type_is_array(t) ||
682  	       btf_type_is_var(t) ||
683  	       btf_type_is_func(t) ||
684  	       btf_type_is_decl_tag(t) ||
685  	       btf_type_is_datasec(t);
686  }
687  
688  /* t->size can be used */
btf_type_has_size(const struct btf_type * t)689  static bool btf_type_has_size(const struct btf_type *t)
690  {
691  	switch (BTF_INFO_KIND(t->info)) {
692  	case BTF_KIND_INT:
693  	case BTF_KIND_STRUCT:
694  	case BTF_KIND_UNION:
695  	case BTF_KIND_ENUM:
696  	case BTF_KIND_DATASEC:
697  	case BTF_KIND_FLOAT:
698  	case BTF_KIND_ENUM64:
699  		return true;
700  	}
701  
702  	return false;
703  }
704  
btf_int_encoding_str(u8 encoding)705  static const char *btf_int_encoding_str(u8 encoding)
706  {
707  	if (encoding == 0)
708  		return "(none)";
709  	else if (encoding == BTF_INT_SIGNED)
710  		return "SIGNED";
711  	else if (encoding == BTF_INT_CHAR)
712  		return "CHAR";
713  	else if (encoding == BTF_INT_BOOL)
714  		return "BOOL";
715  	else
716  		return "UNKN";
717  }
718  
btf_type_int(const struct btf_type * t)719  static u32 btf_type_int(const struct btf_type *t)
720  {
721  	return *(u32 *)(t + 1);
722  }
723  
btf_type_array(const struct btf_type * t)724  static const struct btf_array *btf_type_array(const struct btf_type *t)
725  {
726  	return (const struct btf_array *)(t + 1);
727  }
728  
btf_type_enum(const struct btf_type * t)729  static const struct btf_enum *btf_type_enum(const struct btf_type *t)
730  {
731  	return (const struct btf_enum *)(t + 1);
732  }
733  
btf_type_var(const struct btf_type * t)734  static const struct btf_var *btf_type_var(const struct btf_type *t)
735  {
736  	return (const struct btf_var *)(t + 1);
737  }
738  
btf_type_decl_tag(const struct btf_type * t)739  static const struct btf_decl_tag *btf_type_decl_tag(const struct btf_type *t)
740  {
741  	return (const struct btf_decl_tag *)(t + 1);
742  }
743  
btf_type_enum64(const struct btf_type * t)744  static const struct btf_enum64 *btf_type_enum64(const struct btf_type *t)
745  {
746  	return (const struct btf_enum64 *)(t + 1);
747  }
748  
btf_type_ops(const struct btf_type * t)749  static const struct btf_kind_operations *btf_type_ops(const struct btf_type *t)
750  {
751  	return kind_ops[BTF_INFO_KIND(t->info)];
752  }
753  
btf_name_offset_valid(const struct btf * btf,u32 offset)754  static bool btf_name_offset_valid(const struct btf *btf, u32 offset)
755  {
756  	if (!BTF_STR_OFFSET_VALID(offset))
757  		return false;
758  
759  	while (offset < btf->start_str_off)
760  		btf = btf->base_btf;
761  
762  	offset -= btf->start_str_off;
763  	return offset < btf->hdr.str_len;
764  }
765  
__btf_name_char_ok(char c,bool first)766  static bool __btf_name_char_ok(char c, bool first)
767  {
768  	if ((first ? !isalpha(c) :
769  		     !isalnum(c)) &&
770  	    c != '_' &&
771  	    c != '.')
772  		return false;
773  	return true;
774  }
775  
btf_str_by_offset(const struct btf * btf,u32 offset)776  const char *btf_str_by_offset(const struct btf *btf, u32 offset)
777  {
778  	while (offset < btf->start_str_off)
779  		btf = btf->base_btf;
780  
781  	offset -= btf->start_str_off;
782  	if (offset < btf->hdr.str_len)
783  		return &btf->strings[offset];
784  
785  	return NULL;
786  }
787  
btf_name_valid_identifier(const struct btf * btf,u32 offset)788  static bool btf_name_valid_identifier(const struct btf *btf, u32 offset)
789  {
790  	/* offset must be valid */
791  	const char *src = btf_str_by_offset(btf, offset);
792  	const char *src_limit;
793  
794  	if (!__btf_name_char_ok(*src, true))
795  		return false;
796  
797  	/* set a limit on identifier length */
798  	src_limit = src + KSYM_NAME_LEN;
799  	src++;
800  	while (*src && src < src_limit) {
801  		if (!__btf_name_char_ok(*src, false))
802  			return false;
803  		src++;
804  	}
805  
806  	return !*src;
807  }
808  
809  /* Allow any printable character in DATASEC names */
btf_name_valid_section(const struct btf * btf,u32 offset)810  static bool btf_name_valid_section(const struct btf *btf, u32 offset)
811  {
812  	/* offset must be valid */
813  	const char *src = btf_str_by_offset(btf, offset);
814  	const char *src_limit;
815  
816  	if (!*src)
817  		return false;
818  
819  	/* set a limit on identifier length */
820  	src_limit = src + KSYM_NAME_LEN;
821  	while (*src && src < src_limit) {
822  		if (!isprint(*src))
823  			return false;
824  		src++;
825  	}
826  
827  	return !*src;
828  }
829  
__btf_name_by_offset(const struct btf * btf,u32 offset)830  static const char *__btf_name_by_offset(const struct btf *btf, u32 offset)
831  {
832  	const char *name;
833  
834  	if (!offset)
835  		return "(anon)";
836  
837  	name = btf_str_by_offset(btf, offset);
838  	return name ?: "(invalid-name-offset)";
839  }
840  
btf_name_by_offset(const struct btf * btf,u32 offset)841  const char *btf_name_by_offset(const struct btf *btf, u32 offset)
842  {
843  	return btf_str_by_offset(btf, offset);
844  }
845  
btf_type_by_id(const struct btf * btf,u32 type_id)846  const struct btf_type *btf_type_by_id(const struct btf *btf, u32 type_id)
847  {
848  	while (type_id < btf->start_id)
849  		btf = btf->base_btf;
850  
851  	type_id -= btf->start_id;
852  	if (type_id >= btf->nr_types)
853  		return NULL;
854  	return btf->types[type_id];
855  }
856  EXPORT_SYMBOL_GPL(btf_type_by_id);
857  
858  /*
859   * Regular int is not a bit field and it must be either
860   * u8/u16/u32/u64 or __int128.
861   */
btf_type_int_is_regular(const struct btf_type * t)862  static bool btf_type_int_is_regular(const struct btf_type *t)
863  {
864  	u8 nr_bits, nr_bytes;
865  	u32 int_data;
866  
867  	int_data = btf_type_int(t);
868  	nr_bits = BTF_INT_BITS(int_data);
869  	nr_bytes = BITS_ROUNDUP_BYTES(nr_bits);
870  	if (BITS_PER_BYTE_MASKED(nr_bits) ||
871  	    BTF_INT_OFFSET(int_data) ||
872  	    (nr_bytes != sizeof(u8) && nr_bytes != sizeof(u16) &&
873  	     nr_bytes != sizeof(u32) && nr_bytes != sizeof(u64) &&
874  	     nr_bytes != (2 * sizeof(u64)))) {
875  		return false;
876  	}
877  
878  	return true;
879  }
880  
881  /*
882   * Check that given struct member is a regular int with expected
883   * offset and size.
884   */
btf_member_is_reg_int(const struct btf * btf,const struct btf_type * s,const struct btf_member * m,u32 expected_offset,u32 expected_size)885  bool btf_member_is_reg_int(const struct btf *btf, const struct btf_type *s,
886  			   const struct btf_member *m,
887  			   u32 expected_offset, u32 expected_size)
888  {
889  	const struct btf_type *t;
890  	u32 id, int_data;
891  	u8 nr_bits;
892  
893  	id = m->type;
894  	t = btf_type_id_size(btf, &id, NULL);
895  	if (!t || !btf_type_is_int(t))
896  		return false;
897  
898  	int_data = btf_type_int(t);
899  	nr_bits = BTF_INT_BITS(int_data);
900  	if (btf_type_kflag(s)) {
901  		u32 bitfield_size = BTF_MEMBER_BITFIELD_SIZE(m->offset);
902  		u32 bit_offset = BTF_MEMBER_BIT_OFFSET(m->offset);
903  
904  		/* if kflag set, int should be a regular int and
905  		 * bit offset should be at byte boundary.
906  		 */
907  		return !bitfield_size &&
908  		       BITS_ROUNDUP_BYTES(bit_offset) == expected_offset &&
909  		       BITS_ROUNDUP_BYTES(nr_bits) == expected_size;
910  	}
911  
912  	if (BTF_INT_OFFSET(int_data) ||
913  	    BITS_PER_BYTE_MASKED(m->offset) ||
914  	    BITS_ROUNDUP_BYTES(m->offset) != expected_offset ||
915  	    BITS_PER_BYTE_MASKED(nr_bits) ||
916  	    BITS_ROUNDUP_BYTES(nr_bits) != expected_size)
917  		return false;
918  
919  	return true;
920  }
921  
922  /* Similar to btf_type_skip_modifiers() but does not skip typedefs. */
btf_type_skip_qualifiers(const struct btf * btf,u32 id)923  static const struct btf_type *btf_type_skip_qualifiers(const struct btf *btf,
924  						       u32 id)
925  {
926  	const struct btf_type *t = btf_type_by_id(btf, id);
927  
928  	while (btf_type_is_modifier(t) &&
929  	       BTF_INFO_KIND(t->info) != BTF_KIND_TYPEDEF) {
930  		t = btf_type_by_id(btf, t->type);
931  	}
932  
933  	return t;
934  }
935  
936  #define BTF_SHOW_MAX_ITER	10
937  
938  #define BTF_KIND_BIT(kind)	(1ULL << kind)
939  
940  /*
941   * Populate show->state.name with type name information.
942   * Format of type name is
943   *
944   * [.member_name = ] (type_name)
945   */
btf_show_name(struct btf_show * show)946  static const char *btf_show_name(struct btf_show *show)
947  {
948  	/* BTF_MAX_ITER array suffixes "[]" */
949  	const char *array_suffixes = "[][][][][][][][][][]";
950  	const char *array_suffix = &array_suffixes[strlen(array_suffixes)];
951  	/* BTF_MAX_ITER pointer suffixes "*" */
952  	const char *ptr_suffixes = "**********";
953  	const char *ptr_suffix = &ptr_suffixes[strlen(ptr_suffixes)];
954  	const char *name = NULL, *prefix = "", *parens = "";
955  	const struct btf_member *m = show->state.member;
956  	const struct btf_type *t;
957  	const struct btf_array *array;
958  	u32 id = show->state.type_id;
959  	const char *member = NULL;
960  	bool show_member = false;
961  	u64 kinds = 0;
962  	int i;
963  
964  	show->state.name[0] = '\0';
965  
966  	/*
967  	 * Don't show type name if we're showing an array member;
968  	 * in that case we show the array type so don't need to repeat
969  	 * ourselves for each member.
970  	 */
971  	if (show->state.array_member)
972  		return "";
973  
974  	/* Retrieve member name, if any. */
975  	if (m) {
976  		member = btf_name_by_offset(show->btf, m->name_off);
977  		show_member = strlen(member) > 0;
978  		id = m->type;
979  	}
980  
981  	/*
982  	 * Start with type_id, as we have resolved the struct btf_type *
983  	 * via btf_modifier_show() past the parent typedef to the child
984  	 * struct, int etc it is defined as.  In such cases, the type_id
985  	 * still represents the starting type while the struct btf_type *
986  	 * in our show->state points at the resolved type of the typedef.
987  	 */
988  	t = btf_type_by_id(show->btf, id);
989  	if (!t)
990  		return "";
991  
992  	/*
993  	 * The goal here is to build up the right number of pointer and
994  	 * array suffixes while ensuring the type name for a typedef
995  	 * is represented.  Along the way we accumulate a list of
996  	 * BTF kinds we have encountered, since these will inform later
997  	 * display; for example, pointer types will not require an
998  	 * opening "{" for struct, we will just display the pointer value.
999  	 *
1000  	 * We also want to accumulate the right number of pointer or array
1001  	 * indices in the format string while iterating until we get to
1002  	 * the typedef/pointee/array member target type.
1003  	 *
1004  	 * We start by pointing at the end of pointer and array suffix
1005  	 * strings; as we accumulate pointers and arrays we move the pointer
1006  	 * or array string backwards so it will show the expected number of
1007  	 * '*' or '[]' for the type.  BTF_SHOW_MAX_ITER of nesting of pointers
1008  	 * and/or arrays and typedefs are supported as a precaution.
1009  	 *
1010  	 * We also want to get typedef name while proceeding to resolve
1011  	 * type it points to so that we can add parentheses if it is a
1012  	 * "typedef struct" etc.
1013  	 */
1014  	for (i = 0; i < BTF_SHOW_MAX_ITER; i++) {
1015  
1016  		switch (BTF_INFO_KIND(t->info)) {
1017  		case BTF_KIND_TYPEDEF:
1018  			if (!name)
1019  				name = btf_name_by_offset(show->btf,
1020  							       t->name_off);
1021  			kinds |= BTF_KIND_BIT(BTF_KIND_TYPEDEF);
1022  			id = t->type;
1023  			break;
1024  		case BTF_KIND_ARRAY:
1025  			kinds |= BTF_KIND_BIT(BTF_KIND_ARRAY);
1026  			parens = "[";
1027  			if (!t)
1028  				return "";
1029  			array = btf_type_array(t);
1030  			if (array_suffix > array_suffixes)
1031  				array_suffix -= 2;
1032  			id = array->type;
1033  			break;
1034  		case BTF_KIND_PTR:
1035  			kinds |= BTF_KIND_BIT(BTF_KIND_PTR);
1036  			if (ptr_suffix > ptr_suffixes)
1037  				ptr_suffix -= 1;
1038  			id = t->type;
1039  			break;
1040  		default:
1041  			id = 0;
1042  			break;
1043  		}
1044  		if (!id)
1045  			break;
1046  		t = btf_type_skip_qualifiers(show->btf, id);
1047  	}
1048  	/* We may not be able to represent this type; bail to be safe */
1049  	if (i == BTF_SHOW_MAX_ITER)
1050  		return "";
1051  
1052  	if (!name)
1053  		name = btf_name_by_offset(show->btf, t->name_off);
1054  
1055  	switch (BTF_INFO_KIND(t->info)) {
1056  	case BTF_KIND_STRUCT:
1057  	case BTF_KIND_UNION:
1058  		prefix = BTF_INFO_KIND(t->info) == BTF_KIND_STRUCT ?
1059  			 "struct" : "union";
1060  		/* if it's an array of struct/union, parens is already set */
1061  		if (!(kinds & (BTF_KIND_BIT(BTF_KIND_ARRAY))))
1062  			parens = "{";
1063  		break;
1064  	case BTF_KIND_ENUM:
1065  	case BTF_KIND_ENUM64:
1066  		prefix = "enum";
1067  		break;
1068  	default:
1069  		break;
1070  	}
1071  
1072  	/* pointer does not require parens */
1073  	if (kinds & BTF_KIND_BIT(BTF_KIND_PTR))
1074  		parens = "";
1075  	/* typedef does not require struct/union/enum prefix */
1076  	if (kinds & BTF_KIND_BIT(BTF_KIND_TYPEDEF))
1077  		prefix = "";
1078  
1079  	if (!name)
1080  		name = "";
1081  
1082  	/* Even if we don't want type name info, we want parentheses etc */
1083  	if (show->flags & BTF_SHOW_NONAME)
1084  		snprintf(show->state.name, sizeof(show->state.name), "%s",
1085  			 parens);
1086  	else
1087  		snprintf(show->state.name, sizeof(show->state.name),
1088  			 "%s%s%s(%s%s%s%s%s%s)%s",
1089  			 /* first 3 strings comprise ".member = " */
1090  			 show_member ? "." : "",
1091  			 show_member ? member : "",
1092  			 show_member ? " = " : "",
1093  			 /* ...next is our prefix (struct, enum, etc) */
1094  			 prefix,
1095  			 strlen(prefix) > 0 && strlen(name) > 0 ? " " : "",
1096  			 /* ...this is the type name itself */
1097  			 name,
1098  			 /* ...suffixed by the appropriate '*', '[]' suffixes */
1099  			 strlen(ptr_suffix) > 0 ? " " : "", ptr_suffix,
1100  			 array_suffix, parens);
1101  
1102  	return show->state.name;
1103  }
1104  
__btf_show_indent(struct btf_show * show)1105  static const char *__btf_show_indent(struct btf_show *show)
1106  {
1107  	const char *indents = "                                ";
1108  	const char *indent = &indents[strlen(indents)];
1109  
1110  	if ((indent - show->state.depth) >= indents)
1111  		return indent - show->state.depth;
1112  	return indents;
1113  }
1114  
btf_show_indent(struct btf_show * show)1115  static const char *btf_show_indent(struct btf_show *show)
1116  {
1117  	return show->flags & BTF_SHOW_COMPACT ? "" : __btf_show_indent(show);
1118  }
1119  
btf_show_newline(struct btf_show * show)1120  static const char *btf_show_newline(struct btf_show *show)
1121  {
1122  	return show->flags & BTF_SHOW_COMPACT ? "" : "\n";
1123  }
1124  
btf_show_delim(struct btf_show * show)1125  static const char *btf_show_delim(struct btf_show *show)
1126  {
1127  	if (show->state.depth == 0)
1128  		return "";
1129  
1130  	if ((show->flags & BTF_SHOW_COMPACT) && show->state.type &&
1131  		BTF_INFO_KIND(show->state.type->info) == BTF_KIND_UNION)
1132  		return "|";
1133  
1134  	return ",";
1135  }
1136  
btf_show(struct btf_show * show,const char * fmt,...)1137  __printf(2, 3) static void btf_show(struct btf_show *show, const char *fmt, ...)
1138  {
1139  	va_list args;
1140  
1141  	if (!show->state.depth_check) {
1142  		va_start(args, fmt);
1143  		show->showfn(show, fmt, args);
1144  		va_end(args);
1145  	}
1146  }
1147  
1148  /* Macros are used here as btf_show_type_value[s]() prepends and appends
1149   * format specifiers to the format specifier passed in; these do the work of
1150   * adding indentation, delimiters etc while the caller simply has to specify
1151   * the type value(s) in the format specifier + value(s).
1152   */
1153  #define btf_show_type_value(show, fmt, value)				       \
1154  	do {								       \
1155  		if ((value) != (__typeof__(value))0 ||			       \
1156  		    (show->flags & BTF_SHOW_ZERO) ||			       \
1157  		    show->state.depth == 0) {				       \
1158  			btf_show(show, "%s%s" fmt "%s%s",		       \
1159  				 btf_show_indent(show),			       \
1160  				 btf_show_name(show),			       \
1161  				 value, btf_show_delim(show),		       \
1162  				 btf_show_newline(show));		       \
1163  			if (show->state.depth > show->state.depth_to_show)     \
1164  				show->state.depth_to_show = show->state.depth; \
1165  		}							       \
1166  	} while (0)
1167  
1168  #define btf_show_type_values(show, fmt, ...)				       \
1169  	do {								       \
1170  		btf_show(show, "%s%s" fmt "%s%s", btf_show_indent(show),       \
1171  			 btf_show_name(show),				       \
1172  			 __VA_ARGS__, btf_show_delim(show),		       \
1173  			 btf_show_newline(show));			       \
1174  		if (show->state.depth > show->state.depth_to_show)	       \
1175  			show->state.depth_to_show = show->state.depth;	       \
1176  	} while (0)
1177  
1178  /* How much is left to copy to safe buffer after @data? */
btf_show_obj_size_left(struct btf_show * show,void * data)1179  static int btf_show_obj_size_left(struct btf_show *show, void *data)
1180  {
1181  	return show->obj.head + show->obj.size - data;
1182  }
1183  
1184  /* Is object pointed to by @data of @size already copied to our safe buffer? */
btf_show_obj_is_safe(struct btf_show * show,void * data,int size)1185  static bool btf_show_obj_is_safe(struct btf_show *show, void *data, int size)
1186  {
1187  	return data >= show->obj.data &&
1188  	       (data + size) < (show->obj.data + BTF_SHOW_OBJ_SAFE_SIZE);
1189  }
1190  
1191  /*
1192   * If object pointed to by @data of @size falls within our safe buffer, return
1193   * the equivalent pointer to the same safe data.  Assumes
1194   * copy_from_kernel_nofault() has already happened and our safe buffer is
1195   * populated.
1196   */
__btf_show_obj_safe(struct btf_show * show,void * data,int size)1197  static void *__btf_show_obj_safe(struct btf_show *show, void *data, int size)
1198  {
1199  	if (btf_show_obj_is_safe(show, data, size))
1200  		return show->obj.safe + (data - show->obj.data);
1201  	return NULL;
1202  }
1203  
1204  /*
1205   * Return a safe-to-access version of data pointed to by @data.
1206   * We do this by copying the relevant amount of information
1207   * to the struct btf_show obj.safe buffer using copy_from_kernel_nofault().
1208   *
1209   * If BTF_SHOW_UNSAFE is specified, just return data as-is; no
1210   * safe copy is needed.
1211   *
1212   * Otherwise we need to determine if we have the required amount
1213   * of data (determined by the @data pointer and the size of the
1214   * largest base type we can encounter (represented by
1215   * BTF_SHOW_OBJ_BASE_TYPE_SIZE). Having that much data ensures
1216   * that we will be able to print some of the current object,
1217   * and if more is needed a copy will be triggered.
1218   * Some objects such as structs will not fit into the buffer;
1219   * in such cases additional copies when we iterate over their
1220   * members may be needed.
1221   *
1222   * btf_show_obj_safe() is used to return a safe buffer for
1223   * btf_show_start_type(); this ensures that as we recurse into
1224   * nested types we always have safe data for the given type.
1225   * This approach is somewhat wasteful; it's possible for example
1226   * that when iterating over a large union we'll end up copying the
1227   * same data repeatedly, but the goal is safety not performance.
1228   * We use stack data as opposed to per-CPU buffers because the
1229   * iteration over a type can take some time, and preemption handling
1230   * would greatly complicate use of the safe buffer.
1231   */
btf_show_obj_safe(struct btf_show * show,const struct btf_type * t,void * data)1232  static void *btf_show_obj_safe(struct btf_show *show,
1233  			       const struct btf_type *t,
1234  			       void *data)
1235  {
1236  	const struct btf_type *rt;
1237  	int size_left, size;
1238  	void *safe = NULL;
1239  
1240  	if (show->flags & BTF_SHOW_UNSAFE)
1241  		return data;
1242  
1243  	rt = btf_resolve_size(show->btf, t, &size);
1244  	if (IS_ERR(rt)) {
1245  		show->state.status = PTR_ERR(rt);
1246  		return NULL;
1247  	}
1248  
1249  	/*
1250  	 * Is this toplevel object? If so, set total object size and
1251  	 * initialize pointers.  Otherwise check if we still fall within
1252  	 * our safe object data.
1253  	 */
1254  	if (show->state.depth == 0) {
1255  		show->obj.size = size;
1256  		show->obj.head = data;
1257  	} else {
1258  		/*
1259  		 * If the size of the current object is > our remaining
1260  		 * safe buffer we _may_ need to do a new copy.  However
1261  		 * consider the case of a nested struct; it's size pushes
1262  		 * us over the safe buffer limit, but showing any individual
1263  		 * struct members does not.  In such cases, we don't need
1264  		 * to initiate a fresh copy yet; however we definitely need
1265  		 * at least BTF_SHOW_OBJ_BASE_TYPE_SIZE bytes left
1266  		 * in our buffer, regardless of the current object size.
1267  		 * The logic here is that as we resolve types we will
1268  		 * hit a base type at some point, and we need to be sure
1269  		 * the next chunk of data is safely available to display
1270  		 * that type info safely.  We cannot rely on the size of
1271  		 * the current object here because it may be much larger
1272  		 * than our current buffer (e.g. task_struct is 8k).
1273  		 * All we want to do here is ensure that we can print the
1274  		 * next basic type, which we can if either
1275  		 * - the current type size is within the safe buffer; or
1276  		 * - at least BTF_SHOW_OBJ_BASE_TYPE_SIZE bytes are left in
1277  		 *   the safe buffer.
1278  		 */
1279  		safe = __btf_show_obj_safe(show, data,
1280  					   min(size,
1281  					       BTF_SHOW_OBJ_BASE_TYPE_SIZE));
1282  	}
1283  
1284  	/*
1285  	 * We need a new copy to our safe object, either because we haven't
1286  	 * yet copied and are initializing safe data, or because the data
1287  	 * we want falls outside the boundaries of the safe object.
1288  	 */
1289  	if (!safe) {
1290  		size_left = btf_show_obj_size_left(show, data);
1291  		if (size_left > BTF_SHOW_OBJ_SAFE_SIZE)
1292  			size_left = BTF_SHOW_OBJ_SAFE_SIZE;
1293  		show->state.status = copy_from_kernel_nofault(show->obj.safe,
1294  							      data, size_left);
1295  		if (!show->state.status) {
1296  			show->obj.data = data;
1297  			safe = show->obj.safe;
1298  		}
1299  	}
1300  
1301  	return safe;
1302  }
1303  
1304  /*
1305   * Set the type we are starting to show and return a safe data pointer
1306   * to be used for showing the associated data.
1307   */
btf_show_start_type(struct btf_show * show,const struct btf_type * t,u32 type_id,void * data)1308  static void *btf_show_start_type(struct btf_show *show,
1309  				 const struct btf_type *t,
1310  				 u32 type_id, void *data)
1311  {
1312  	show->state.type = t;
1313  	show->state.type_id = type_id;
1314  	show->state.name[0] = '\0';
1315  
1316  	return btf_show_obj_safe(show, t, data);
1317  }
1318  
btf_show_end_type(struct btf_show * show)1319  static void btf_show_end_type(struct btf_show *show)
1320  {
1321  	show->state.type = NULL;
1322  	show->state.type_id = 0;
1323  	show->state.name[0] = '\0';
1324  }
1325  
btf_show_start_aggr_type(struct btf_show * show,const struct btf_type * t,u32 type_id,void * data)1326  static void *btf_show_start_aggr_type(struct btf_show *show,
1327  				      const struct btf_type *t,
1328  				      u32 type_id, void *data)
1329  {
1330  	void *safe_data = btf_show_start_type(show, t, type_id, data);
1331  
1332  	if (!safe_data)
1333  		return safe_data;
1334  
1335  	btf_show(show, "%s%s%s", btf_show_indent(show),
1336  		 btf_show_name(show),
1337  		 btf_show_newline(show));
1338  	show->state.depth++;
1339  	return safe_data;
1340  }
1341  
btf_show_end_aggr_type(struct btf_show * show,const char * suffix)1342  static void btf_show_end_aggr_type(struct btf_show *show,
1343  				   const char *suffix)
1344  {
1345  	show->state.depth--;
1346  	btf_show(show, "%s%s%s%s", btf_show_indent(show), suffix,
1347  		 btf_show_delim(show), btf_show_newline(show));
1348  	btf_show_end_type(show);
1349  }
1350  
btf_show_start_member(struct btf_show * show,const struct btf_member * m)1351  static void btf_show_start_member(struct btf_show *show,
1352  				  const struct btf_member *m)
1353  {
1354  	show->state.member = m;
1355  }
1356  
btf_show_start_array_member(struct btf_show * show)1357  static void btf_show_start_array_member(struct btf_show *show)
1358  {
1359  	show->state.array_member = 1;
1360  	btf_show_start_member(show, NULL);
1361  }
1362  
btf_show_end_member(struct btf_show * show)1363  static void btf_show_end_member(struct btf_show *show)
1364  {
1365  	show->state.member = NULL;
1366  }
1367  
btf_show_end_array_member(struct btf_show * show)1368  static void btf_show_end_array_member(struct btf_show *show)
1369  {
1370  	show->state.array_member = 0;
1371  	btf_show_end_member(show);
1372  }
1373  
btf_show_start_array_type(struct btf_show * show,const struct btf_type * t,u32 type_id,u16 array_encoding,void * data)1374  static void *btf_show_start_array_type(struct btf_show *show,
1375  				       const struct btf_type *t,
1376  				       u32 type_id,
1377  				       u16 array_encoding,
1378  				       void *data)
1379  {
1380  	show->state.array_encoding = array_encoding;
1381  	show->state.array_terminated = 0;
1382  	return btf_show_start_aggr_type(show, t, type_id, data);
1383  }
1384  
btf_show_end_array_type(struct btf_show * show)1385  static void btf_show_end_array_type(struct btf_show *show)
1386  {
1387  	show->state.array_encoding = 0;
1388  	show->state.array_terminated = 0;
1389  	btf_show_end_aggr_type(show, "]");
1390  }
1391  
btf_show_start_struct_type(struct btf_show * show,const struct btf_type * t,u32 type_id,void * data)1392  static void *btf_show_start_struct_type(struct btf_show *show,
1393  					const struct btf_type *t,
1394  					u32 type_id,
1395  					void *data)
1396  {
1397  	return btf_show_start_aggr_type(show, t, type_id, data);
1398  }
1399  
btf_show_end_struct_type(struct btf_show * show)1400  static void btf_show_end_struct_type(struct btf_show *show)
1401  {
1402  	btf_show_end_aggr_type(show, "}");
1403  }
1404  
__btf_verifier_log(struct bpf_verifier_log * log,const char * fmt,...)1405  __printf(2, 3) static void __btf_verifier_log(struct bpf_verifier_log *log,
1406  					      const char *fmt, ...)
1407  {
1408  	va_list args;
1409  
1410  	va_start(args, fmt);
1411  	bpf_verifier_vlog(log, fmt, args);
1412  	va_end(args);
1413  }
1414  
btf_verifier_log(struct btf_verifier_env * env,const char * fmt,...)1415  __printf(2, 3) static void btf_verifier_log(struct btf_verifier_env *env,
1416  					    const char *fmt, ...)
1417  {
1418  	struct bpf_verifier_log *log = &env->log;
1419  	va_list args;
1420  
1421  	if (!bpf_verifier_log_needed(log))
1422  		return;
1423  
1424  	va_start(args, fmt);
1425  	bpf_verifier_vlog(log, fmt, args);
1426  	va_end(args);
1427  }
1428  
__btf_verifier_log_type(struct btf_verifier_env * env,const struct btf_type * t,bool log_details,const char * fmt,...)1429  __printf(4, 5) static void __btf_verifier_log_type(struct btf_verifier_env *env,
1430  						   const struct btf_type *t,
1431  						   bool log_details,
1432  						   const char *fmt, ...)
1433  {
1434  	struct bpf_verifier_log *log = &env->log;
1435  	struct btf *btf = env->btf;
1436  	va_list args;
1437  
1438  	if (!bpf_verifier_log_needed(log))
1439  		return;
1440  
1441  	if (log->level == BPF_LOG_KERNEL) {
1442  		/* btf verifier prints all types it is processing via
1443  		 * btf_verifier_log_type(..., fmt = NULL).
1444  		 * Skip those prints for in-kernel BTF verification.
1445  		 */
1446  		if (!fmt)
1447  			return;
1448  
1449  		/* Skip logging when loading module BTF with mismatches permitted */
1450  		if (env->btf->base_btf && IS_ENABLED(CONFIG_MODULE_ALLOW_BTF_MISMATCH))
1451  			return;
1452  	}
1453  
1454  	__btf_verifier_log(log, "[%u] %s %s%s",
1455  			   env->log_type_id,
1456  			   btf_type_str(t),
1457  			   __btf_name_by_offset(btf, t->name_off),
1458  			   log_details ? " " : "");
1459  
1460  	if (log_details)
1461  		btf_type_ops(t)->log_details(env, t);
1462  
1463  	if (fmt && *fmt) {
1464  		__btf_verifier_log(log, " ");
1465  		va_start(args, fmt);
1466  		bpf_verifier_vlog(log, fmt, args);
1467  		va_end(args);
1468  	}
1469  
1470  	__btf_verifier_log(log, "\n");
1471  }
1472  
1473  #define btf_verifier_log_type(env, t, ...) \
1474  	__btf_verifier_log_type((env), (t), true, __VA_ARGS__)
1475  #define btf_verifier_log_basic(env, t, ...) \
1476  	__btf_verifier_log_type((env), (t), false, __VA_ARGS__)
1477  
1478  __printf(4, 5)
btf_verifier_log_member(struct btf_verifier_env * env,const struct btf_type * struct_type,const struct btf_member * member,const char * fmt,...)1479  static void btf_verifier_log_member(struct btf_verifier_env *env,
1480  				    const struct btf_type *struct_type,
1481  				    const struct btf_member *member,
1482  				    const char *fmt, ...)
1483  {
1484  	struct bpf_verifier_log *log = &env->log;
1485  	struct btf *btf = env->btf;
1486  	va_list args;
1487  
1488  	if (!bpf_verifier_log_needed(log))
1489  		return;
1490  
1491  	if (log->level == BPF_LOG_KERNEL) {
1492  		if (!fmt)
1493  			return;
1494  
1495  		/* Skip logging when loading module BTF with mismatches permitted */
1496  		if (env->btf->base_btf && IS_ENABLED(CONFIG_MODULE_ALLOW_BTF_MISMATCH))
1497  			return;
1498  	}
1499  
1500  	/* The CHECK_META phase already did a btf dump.
1501  	 *
1502  	 * If member is logged again, it must hit an error in
1503  	 * parsing this member.  It is useful to print out which
1504  	 * struct this member belongs to.
1505  	 */
1506  	if (env->phase != CHECK_META)
1507  		btf_verifier_log_type(env, struct_type, NULL);
1508  
1509  	if (btf_type_kflag(struct_type))
1510  		__btf_verifier_log(log,
1511  				   "\t%s type_id=%u bitfield_size=%u bits_offset=%u",
1512  				   __btf_name_by_offset(btf, member->name_off),
1513  				   member->type,
1514  				   BTF_MEMBER_BITFIELD_SIZE(member->offset),
1515  				   BTF_MEMBER_BIT_OFFSET(member->offset));
1516  	else
1517  		__btf_verifier_log(log, "\t%s type_id=%u bits_offset=%u",
1518  				   __btf_name_by_offset(btf, member->name_off),
1519  				   member->type, member->offset);
1520  
1521  	if (fmt && *fmt) {
1522  		__btf_verifier_log(log, " ");
1523  		va_start(args, fmt);
1524  		bpf_verifier_vlog(log, fmt, args);
1525  		va_end(args);
1526  	}
1527  
1528  	__btf_verifier_log(log, "\n");
1529  }
1530  
1531  __printf(4, 5)
btf_verifier_log_vsi(struct btf_verifier_env * env,const struct btf_type * datasec_type,const struct btf_var_secinfo * vsi,const char * fmt,...)1532  static void btf_verifier_log_vsi(struct btf_verifier_env *env,
1533  				 const struct btf_type *datasec_type,
1534  				 const struct btf_var_secinfo *vsi,
1535  				 const char *fmt, ...)
1536  {
1537  	struct bpf_verifier_log *log = &env->log;
1538  	va_list args;
1539  
1540  	if (!bpf_verifier_log_needed(log))
1541  		return;
1542  	if (log->level == BPF_LOG_KERNEL && !fmt)
1543  		return;
1544  	if (env->phase != CHECK_META)
1545  		btf_verifier_log_type(env, datasec_type, NULL);
1546  
1547  	__btf_verifier_log(log, "\t type_id=%u offset=%u size=%u",
1548  			   vsi->type, vsi->offset, vsi->size);
1549  	if (fmt && *fmt) {
1550  		__btf_verifier_log(log, " ");
1551  		va_start(args, fmt);
1552  		bpf_verifier_vlog(log, fmt, args);
1553  		va_end(args);
1554  	}
1555  
1556  	__btf_verifier_log(log, "\n");
1557  }
1558  
btf_verifier_log_hdr(struct btf_verifier_env * env,u32 btf_data_size)1559  static void btf_verifier_log_hdr(struct btf_verifier_env *env,
1560  				 u32 btf_data_size)
1561  {
1562  	struct bpf_verifier_log *log = &env->log;
1563  	const struct btf *btf = env->btf;
1564  	const struct btf_header *hdr;
1565  
1566  	if (!bpf_verifier_log_needed(log))
1567  		return;
1568  
1569  	if (log->level == BPF_LOG_KERNEL)
1570  		return;
1571  	hdr = &btf->hdr;
1572  	__btf_verifier_log(log, "magic: 0x%x\n", hdr->magic);
1573  	__btf_verifier_log(log, "version: %u\n", hdr->version);
1574  	__btf_verifier_log(log, "flags: 0x%x\n", hdr->flags);
1575  	__btf_verifier_log(log, "hdr_len: %u\n", hdr->hdr_len);
1576  	__btf_verifier_log(log, "type_off: %u\n", hdr->type_off);
1577  	__btf_verifier_log(log, "type_len: %u\n", hdr->type_len);
1578  	__btf_verifier_log(log, "str_off: %u\n", hdr->str_off);
1579  	__btf_verifier_log(log, "str_len: %u\n", hdr->str_len);
1580  	__btf_verifier_log(log, "btf_total_size: %u\n", btf_data_size);
1581  }
1582  
btf_add_type(struct btf_verifier_env * env,struct btf_type * t)1583  static int btf_add_type(struct btf_verifier_env *env, struct btf_type *t)
1584  {
1585  	struct btf *btf = env->btf;
1586  
1587  	if (btf->types_size == btf->nr_types) {
1588  		/* Expand 'types' array */
1589  
1590  		struct btf_type **new_types;
1591  		u32 expand_by, new_size;
1592  
1593  		if (btf->start_id + btf->types_size == BTF_MAX_TYPE) {
1594  			btf_verifier_log(env, "Exceeded max num of types");
1595  			return -E2BIG;
1596  		}
1597  
1598  		expand_by = max_t(u32, btf->types_size >> 2, 16);
1599  		new_size = min_t(u32, BTF_MAX_TYPE,
1600  				 btf->types_size + expand_by);
1601  
1602  		new_types = kvcalloc(new_size, sizeof(*new_types),
1603  				     GFP_KERNEL | __GFP_NOWARN);
1604  		if (!new_types)
1605  			return -ENOMEM;
1606  
1607  		if (btf->nr_types == 0) {
1608  			if (!btf->base_btf) {
1609  				/* lazily init VOID type */
1610  				new_types[0] = &btf_void;
1611  				btf->nr_types++;
1612  			}
1613  		} else {
1614  			memcpy(new_types, btf->types,
1615  			       sizeof(*btf->types) * btf->nr_types);
1616  		}
1617  
1618  		kvfree(btf->types);
1619  		btf->types = new_types;
1620  		btf->types_size = new_size;
1621  	}
1622  
1623  	btf->types[btf->nr_types++] = t;
1624  
1625  	return 0;
1626  }
1627  
btf_alloc_id(struct btf * btf)1628  static int btf_alloc_id(struct btf *btf)
1629  {
1630  	int id;
1631  
1632  	idr_preload(GFP_KERNEL);
1633  	spin_lock_bh(&btf_idr_lock);
1634  	id = idr_alloc_cyclic(&btf_idr, btf, 1, INT_MAX, GFP_ATOMIC);
1635  	if (id > 0)
1636  		btf->id = id;
1637  	spin_unlock_bh(&btf_idr_lock);
1638  	idr_preload_end();
1639  
1640  	if (WARN_ON_ONCE(!id))
1641  		return -ENOSPC;
1642  
1643  	return id > 0 ? 0 : id;
1644  }
1645  
btf_free_id(struct btf * btf)1646  static void btf_free_id(struct btf *btf)
1647  {
1648  	unsigned long flags;
1649  
1650  	/*
1651  	 * In map-in-map, calling map_delete_elem() on outer
1652  	 * map will call bpf_map_put on the inner map.
1653  	 * It will then eventually call btf_free_id()
1654  	 * on the inner map.  Some of the map_delete_elem()
1655  	 * implementation may have irq disabled, so
1656  	 * we need to use the _irqsave() version instead
1657  	 * of the _bh() version.
1658  	 */
1659  	spin_lock_irqsave(&btf_idr_lock, flags);
1660  	idr_remove(&btf_idr, btf->id);
1661  	spin_unlock_irqrestore(&btf_idr_lock, flags);
1662  }
1663  
btf_free_kfunc_set_tab(struct btf * btf)1664  static void btf_free_kfunc_set_tab(struct btf *btf)
1665  {
1666  	struct btf_kfunc_set_tab *tab = btf->kfunc_set_tab;
1667  	int hook;
1668  
1669  	if (!tab)
1670  		return;
1671  	for (hook = 0; hook < ARRAY_SIZE(tab->sets); hook++)
1672  		kfree(tab->sets[hook]);
1673  	kfree(tab);
1674  	btf->kfunc_set_tab = NULL;
1675  }
1676  
btf_free_dtor_kfunc_tab(struct btf * btf)1677  static void btf_free_dtor_kfunc_tab(struct btf *btf)
1678  {
1679  	struct btf_id_dtor_kfunc_tab *tab = btf->dtor_kfunc_tab;
1680  
1681  	if (!tab)
1682  		return;
1683  	kfree(tab);
1684  	btf->dtor_kfunc_tab = NULL;
1685  }
1686  
btf_struct_metas_free(struct btf_struct_metas * tab)1687  static void btf_struct_metas_free(struct btf_struct_metas *tab)
1688  {
1689  	int i;
1690  
1691  	if (!tab)
1692  		return;
1693  	for (i = 0; i < tab->cnt; i++)
1694  		btf_record_free(tab->types[i].record);
1695  	kfree(tab);
1696  }
1697  
btf_free_struct_meta_tab(struct btf * btf)1698  static void btf_free_struct_meta_tab(struct btf *btf)
1699  {
1700  	struct btf_struct_metas *tab = btf->struct_meta_tab;
1701  
1702  	btf_struct_metas_free(tab);
1703  	btf->struct_meta_tab = NULL;
1704  }
1705  
btf_free_struct_ops_tab(struct btf * btf)1706  static void btf_free_struct_ops_tab(struct btf *btf)
1707  {
1708  	struct btf_struct_ops_tab *tab = btf->struct_ops_tab;
1709  	u32 i;
1710  
1711  	if (!tab)
1712  		return;
1713  
1714  	for (i = 0; i < tab->cnt; i++)
1715  		bpf_struct_ops_desc_release(&tab->ops[i]);
1716  
1717  	kfree(tab);
1718  	btf->struct_ops_tab = NULL;
1719  }
1720  
btf_free(struct btf * btf)1721  static void btf_free(struct btf *btf)
1722  {
1723  	btf_free_struct_meta_tab(btf);
1724  	btf_free_dtor_kfunc_tab(btf);
1725  	btf_free_kfunc_set_tab(btf);
1726  	btf_free_struct_ops_tab(btf);
1727  	kvfree(btf->types);
1728  	kvfree(btf->resolved_sizes);
1729  	kvfree(btf->resolved_ids);
1730  	/* vmlinux does not allocate btf->data, it simply points it at
1731  	 * __start_BTF.
1732  	 */
1733  	if (!btf_is_vmlinux(btf))
1734  		kvfree(btf->data);
1735  	kvfree(btf->base_id_map);
1736  	kfree(btf);
1737  }
1738  
btf_free_rcu(struct rcu_head * rcu)1739  static void btf_free_rcu(struct rcu_head *rcu)
1740  {
1741  	struct btf *btf = container_of(rcu, struct btf, rcu);
1742  
1743  	btf_free(btf);
1744  }
1745  
btf_get_name(const struct btf * btf)1746  const char *btf_get_name(const struct btf *btf)
1747  {
1748  	return btf->name;
1749  }
1750  
btf_get(struct btf * btf)1751  void btf_get(struct btf *btf)
1752  {
1753  	refcount_inc(&btf->refcnt);
1754  }
1755  
btf_put(struct btf * btf)1756  void btf_put(struct btf *btf)
1757  {
1758  	if (btf && refcount_dec_and_test(&btf->refcnt)) {
1759  		btf_free_id(btf);
1760  		call_rcu(&btf->rcu, btf_free_rcu);
1761  	}
1762  }
1763  
btf_base_btf(const struct btf * btf)1764  struct btf *btf_base_btf(const struct btf *btf)
1765  {
1766  	return btf->base_btf;
1767  }
1768  
btf_header(const struct btf * btf)1769  const struct btf_header *btf_header(const struct btf *btf)
1770  {
1771  	return &btf->hdr;
1772  }
1773  
btf_set_base_btf(struct btf * btf,const struct btf * base_btf)1774  void btf_set_base_btf(struct btf *btf, const struct btf *base_btf)
1775  {
1776  	btf->base_btf = (struct btf *)base_btf;
1777  	btf->start_id = btf_nr_types(base_btf);
1778  	btf->start_str_off = base_btf->hdr.str_len;
1779  }
1780  
env_resolve_init(struct btf_verifier_env * env)1781  static int env_resolve_init(struct btf_verifier_env *env)
1782  {
1783  	struct btf *btf = env->btf;
1784  	u32 nr_types = btf->nr_types;
1785  	u32 *resolved_sizes = NULL;
1786  	u32 *resolved_ids = NULL;
1787  	u8 *visit_states = NULL;
1788  
1789  	resolved_sizes = kvcalloc(nr_types, sizeof(*resolved_sizes),
1790  				  GFP_KERNEL | __GFP_NOWARN);
1791  	if (!resolved_sizes)
1792  		goto nomem;
1793  
1794  	resolved_ids = kvcalloc(nr_types, sizeof(*resolved_ids),
1795  				GFP_KERNEL | __GFP_NOWARN);
1796  	if (!resolved_ids)
1797  		goto nomem;
1798  
1799  	visit_states = kvcalloc(nr_types, sizeof(*visit_states),
1800  				GFP_KERNEL | __GFP_NOWARN);
1801  	if (!visit_states)
1802  		goto nomem;
1803  
1804  	btf->resolved_sizes = resolved_sizes;
1805  	btf->resolved_ids = resolved_ids;
1806  	env->visit_states = visit_states;
1807  
1808  	return 0;
1809  
1810  nomem:
1811  	kvfree(resolved_sizes);
1812  	kvfree(resolved_ids);
1813  	kvfree(visit_states);
1814  	return -ENOMEM;
1815  }
1816  
btf_verifier_env_free(struct btf_verifier_env * env)1817  static void btf_verifier_env_free(struct btf_verifier_env *env)
1818  {
1819  	kvfree(env->visit_states);
1820  	kfree(env);
1821  }
1822  
env_type_is_resolve_sink(const struct btf_verifier_env * env,const struct btf_type * next_type)1823  static bool env_type_is_resolve_sink(const struct btf_verifier_env *env,
1824  				     const struct btf_type *next_type)
1825  {
1826  	switch (env->resolve_mode) {
1827  	case RESOLVE_TBD:
1828  		/* int, enum or void is a sink */
1829  		return !btf_type_needs_resolve(next_type);
1830  	case RESOLVE_PTR:
1831  		/* int, enum, void, struct, array, func or func_proto is a sink
1832  		 * for ptr
1833  		 */
1834  		return !btf_type_is_modifier(next_type) &&
1835  			!btf_type_is_ptr(next_type);
1836  	case RESOLVE_STRUCT_OR_ARRAY:
1837  		/* int, enum, void, ptr, func or func_proto is a sink
1838  		 * for struct and array
1839  		 */
1840  		return !btf_type_is_modifier(next_type) &&
1841  			!btf_type_is_array(next_type) &&
1842  			!btf_type_is_struct(next_type);
1843  	default:
1844  		BUG();
1845  	}
1846  }
1847  
env_type_is_resolved(const struct btf_verifier_env * env,u32 type_id)1848  static bool env_type_is_resolved(const struct btf_verifier_env *env,
1849  				 u32 type_id)
1850  {
1851  	/* base BTF types should be resolved by now */
1852  	if (type_id < env->btf->start_id)
1853  		return true;
1854  
1855  	return env->visit_states[type_id - env->btf->start_id] == RESOLVED;
1856  }
1857  
env_stack_push(struct btf_verifier_env * env,const struct btf_type * t,u32 type_id)1858  static int env_stack_push(struct btf_verifier_env *env,
1859  			  const struct btf_type *t, u32 type_id)
1860  {
1861  	const struct btf *btf = env->btf;
1862  	struct resolve_vertex *v;
1863  
1864  	if (env->top_stack == MAX_RESOLVE_DEPTH)
1865  		return -E2BIG;
1866  
1867  	if (type_id < btf->start_id
1868  	    || env->visit_states[type_id - btf->start_id] != NOT_VISITED)
1869  		return -EEXIST;
1870  
1871  	env->visit_states[type_id - btf->start_id] = VISITED;
1872  
1873  	v = &env->stack[env->top_stack++];
1874  	v->t = t;
1875  	v->type_id = type_id;
1876  	v->next_member = 0;
1877  
1878  	if (env->resolve_mode == RESOLVE_TBD) {
1879  		if (btf_type_is_ptr(t))
1880  			env->resolve_mode = RESOLVE_PTR;
1881  		else if (btf_type_is_struct(t) || btf_type_is_array(t))
1882  			env->resolve_mode = RESOLVE_STRUCT_OR_ARRAY;
1883  	}
1884  
1885  	return 0;
1886  }
1887  
env_stack_set_next_member(struct btf_verifier_env * env,u16 next_member)1888  static void env_stack_set_next_member(struct btf_verifier_env *env,
1889  				      u16 next_member)
1890  {
1891  	env->stack[env->top_stack - 1].next_member = next_member;
1892  }
1893  
env_stack_pop_resolved(struct btf_verifier_env * env,u32 resolved_type_id,u32 resolved_size)1894  static void env_stack_pop_resolved(struct btf_verifier_env *env,
1895  				   u32 resolved_type_id,
1896  				   u32 resolved_size)
1897  {
1898  	u32 type_id = env->stack[--(env->top_stack)].type_id;
1899  	struct btf *btf = env->btf;
1900  
1901  	type_id -= btf->start_id; /* adjust to local type id */
1902  	btf->resolved_sizes[type_id] = resolved_size;
1903  	btf->resolved_ids[type_id] = resolved_type_id;
1904  	env->visit_states[type_id] = RESOLVED;
1905  }
1906  
env_stack_peak(struct btf_verifier_env * env)1907  static const struct resolve_vertex *env_stack_peak(struct btf_verifier_env *env)
1908  {
1909  	return env->top_stack ? &env->stack[env->top_stack - 1] : NULL;
1910  }
1911  
1912  /* Resolve the size of a passed-in "type"
1913   *
1914   * type: is an array (e.g. u32 array[x][y])
1915   * return type: type "u32[x][y]", i.e. BTF_KIND_ARRAY,
1916   * *type_size: (x * y * sizeof(u32)).  Hence, *type_size always
1917   *             corresponds to the return type.
1918   * *elem_type: u32
1919   * *elem_id: id of u32
1920   * *total_nelems: (x * y).  Hence, individual elem size is
1921   *                (*type_size / *total_nelems)
1922   * *type_id: id of type if it's changed within the function, 0 if not
1923   *
1924   * type: is not an array (e.g. const struct X)
1925   * return type: type "struct X"
1926   * *type_size: sizeof(struct X)
1927   * *elem_type: same as return type ("struct X")
1928   * *elem_id: 0
1929   * *total_nelems: 1
1930   * *type_id: id of type if it's changed within the function, 0 if not
1931   */
1932  static const struct btf_type *
__btf_resolve_size(const struct btf * btf,const struct btf_type * type,u32 * type_size,const struct btf_type ** elem_type,u32 * elem_id,u32 * total_nelems,u32 * type_id)1933  __btf_resolve_size(const struct btf *btf, const struct btf_type *type,
1934  		   u32 *type_size, const struct btf_type **elem_type,
1935  		   u32 *elem_id, u32 *total_nelems, u32 *type_id)
1936  {
1937  	const struct btf_type *array_type = NULL;
1938  	const struct btf_array *array = NULL;
1939  	u32 i, size, nelems = 1, id = 0;
1940  
1941  	for (i = 0; i < MAX_RESOLVE_DEPTH; i++) {
1942  		switch (BTF_INFO_KIND(type->info)) {
1943  		/* type->size can be used */
1944  		case BTF_KIND_INT:
1945  		case BTF_KIND_STRUCT:
1946  		case BTF_KIND_UNION:
1947  		case BTF_KIND_ENUM:
1948  		case BTF_KIND_FLOAT:
1949  		case BTF_KIND_ENUM64:
1950  			size = type->size;
1951  			goto resolved;
1952  
1953  		case BTF_KIND_PTR:
1954  			size = sizeof(void *);
1955  			goto resolved;
1956  
1957  		/* Modifiers */
1958  		case BTF_KIND_TYPEDEF:
1959  		case BTF_KIND_VOLATILE:
1960  		case BTF_KIND_CONST:
1961  		case BTF_KIND_RESTRICT:
1962  		case BTF_KIND_TYPE_TAG:
1963  			id = type->type;
1964  			type = btf_type_by_id(btf, type->type);
1965  			break;
1966  
1967  		case BTF_KIND_ARRAY:
1968  			if (!array_type)
1969  				array_type = type;
1970  			array = btf_type_array(type);
1971  			if (nelems && array->nelems > U32_MAX / nelems)
1972  				return ERR_PTR(-EINVAL);
1973  			nelems *= array->nelems;
1974  			type = btf_type_by_id(btf, array->type);
1975  			break;
1976  
1977  		/* type without size */
1978  		default:
1979  			return ERR_PTR(-EINVAL);
1980  		}
1981  	}
1982  
1983  	return ERR_PTR(-EINVAL);
1984  
1985  resolved:
1986  	if (nelems && size > U32_MAX / nelems)
1987  		return ERR_PTR(-EINVAL);
1988  
1989  	*type_size = nelems * size;
1990  	if (total_nelems)
1991  		*total_nelems = nelems;
1992  	if (elem_type)
1993  		*elem_type = type;
1994  	if (elem_id)
1995  		*elem_id = array ? array->type : 0;
1996  	if (type_id && id)
1997  		*type_id = id;
1998  
1999  	return array_type ? : type;
2000  }
2001  
2002  const struct btf_type *
btf_resolve_size(const struct btf * btf,const struct btf_type * type,u32 * type_size)2003  btf_resolve_size(const struct btf *btf, const struct btf_type *type,
2004  		 u32 *type_size)
2005  {
2006  	return __btf_resolve_size(btf, type, type_size, NULL, NULL, NULL, NULL);
2007  }
2008  
btf_resolved_type_id(const struct btf * btf,u32 type_id)2009  static u32 btf_resolved_type_id(const struct btf *btf, u32 type_id)
2010  {
2011  	while (type_id < btf->start_id)
2012  		btf = btf->base_btf;
2013  
2014  	return btf->resolved_ids[type_id - btf->start_id];
2015  }
2016  
2017  /* The input param "type_id" must point to a needs_resolve type */
btf_type_id_resolve(const struct btf * btf,u32 * type_id)2018  static const struct btf_type *btf_type_id_resolve(const struct btf *btf,
2019  						  u32 *type_id)
2020  {
2021  	*type_id = btf_resolved_type_id(btf, *type_id);
2022  	return btf_type_by_id(btf, *type_id);
2023  }
2024  
btf_resolved_type_size(const struct btf * btf,u32 type_id)2025  static u32 btf_resolved_type_size(const struct btf *btf, u32 type_id)
2026  {
2027  	while (type_id < btf->start_id)
2028  		btf = btf->base_btf;
2029  
2030  	return btf->resolved_sizes[type_id - btf->start_id];
2031  }
2032  
btf_type_id_size(const struct btf * btf,u32 * type_id,u32 * ret_size)2033  const struct btf_type *btf_type_id_size(const struct btf *btf,
2034  					u32 *type_id, u32 *ret_size)
2035  {
2036  	const struct btf_type *size_type;
2037  	u32 size_type_id = *type_id;
2038  	u32 size = 0;
2039  
2040  	size_type = btf_type_by_id(btf, size_type_id);
2041  	if (btf_type_nosize_or_null(size_type))
2042  		return NULL;
2043  
2044  	if (btf_type_has_size(size_type)) {
2045  		size = size_type->size;
2046  	} else if (btf_type_is_array(size_type)) {
2047  		size = btf_resolved_type_size(btf, size_type_id);
2048  	} else if (btf_type_is_ptr(size_type)) {
2049  		size = sizeof(void *);
2050  	} else {
2051  		if (WARN_ON_ONCE(!btf_type_is_modifier(size_type) &&
2052  				 !btf_type_is_var(size_type)))
2053  			return NULL;
2054  
2055  		size_type_id = btf_resolved_type_id(btf, size_type_id);
2056  		size_type = btf_type_by_id(btf, size_type_id);
2057  		if (btf_type_nosize_or_null(size_type))
2058  			return NULL;
2059  		else if (btf_type_has_size(size_type))
2060  			size = size_type->size;
2061  		else if (btf_type_is_array(size_type))
2062  			size = btf_resolved_type_size(btf, size_type_id);
2063  		else if (btf_type_is_ptr(size_type))
2064  			size = sizeof(void *);
2065  		else
2066  			return NULL;
2067  	}
2068  
2069  	*type_id = size_type_id;
2070  	if (ret_size)
2071  		*ret_size = size;
2072  
2073  	return size_type;
2074  }
2075  
btf_df_check_member(struct btf_verifier_env * env,const struct btf_type * struct_type,const struct btf_member * member,const struct btf_type * member_type)2076  static int btf_df_check_member(struct btf_verifier_env *env,
2077  			       const struct btf_type *struct_type,
2078  			       const struct btf_member *member,
2079  			       const struct btf_type *member_type)
2080  {
2081  	btf_verifier_log_basic(env, struct_type,
2082  			       "Unsupported check_member");
2083  	return -EINVAL;
2084  }
2085  
btf_df_check_kflag_member(struct btf_verifier_env * env,const struct btf_type * struct_type,const struct btf_member * member,const struct btf_type * member_type)2086  static int btf_df_check_kflag_member(struct btf_verifier_env *env,
2087  				     const struct btf_type *struct_type,
2088  				     const struct btf_member *member,
2089  				     const struct btf_type *member_type)
2090  {
2091  	btf_verifier_log_basic(env, struct_type,
2092  			       "Unsupported check_kflag_member");
2093  	return -EINVAL;
2094  }
2095  
2096  /* Used for ptr, array struct/union and float type members.
2097   * int, enum and modifier types have their specific callback functions.
2098   */
btf_generic_check_kflag_member(struct btf_verifier_env * env,const struct btf_type * struct_type,const struct btf_member * member,const struct btf_type * member_type)2099  static int btf_generic_check_kflag_member(struct btf_verifier_env *env,
2100  					  const struct btf_type *struct_type,
2101  					  const struct btf_member *member,
2102  					  const struct btf_type *member_type)
2103  {
2104  	if (BTF_MEMBER_BITFIELD_SIZE(member->offset)) {
2105  		btf_verifier_log_member(env, struct_type, member,
2106  					"Invalid member bitfield_size");
2107  		return -EINVAL;
2108  	}
2109  
2110  	/* bitfield size is 0, so member->offset represents bit offset only.
2111  	 * It is safe to call non kflag check_member variants.
2112  	 */
2113  	return btf_type_ops(member_type)->check_member(env, struct_type,
2114  						       member,
2115  						       member_type);
2116  }
2117  
btf_df_resolve(struct btf_verifier_env * env,const struct resolve_vertex * v)2118  static int btf_df_resolve(struct btf_verifier_env *env,
2119  			  const struct resolve_vertex *v)
2120  {
2121  	btf_verifier_log_basic(env, v->t, "Unsupported resolve");
2122  	return -EINVAL;
2123  }
2124  
btf_df_show(const struct btf * btf,const struct btf_type * t,u32 type_id,void * data,u8 bits_offsets,struct btf_show * show)2125  static void btf_df_show(const struct btf *btf, const struct btf_type *t,
2126  			u32 type_id, void *data, u8 bits_offsets,
2127  			struct btf_show *show)
2128  {
2129  	btf_show(show, "<unsupported kind:%u>", BTF_INFO_KIND(t->info));
2130  }
2131  
btf_int_check_member(struct btf_verifier_env * env,const struct btf_type * struct_type,const struct btf_member * member,const struct btf_type * member_type)2132  static int btf_int_check_member(struct btf_verifier_env *env,
2133  				const struct btf_type *struct_type,
2134  				const struct btf_member *member,
2135  				const struct btf_type *member_type)
2136  {
2137  	u32 int_data = btf_type_int(member_type);
2138  	u32 struct_bits_off = member->offset;
2139  	u32 struct_size = struct_type->size;
2140  	u32 nr_copy_bits;
2141  	u32 bytes_offset;
2142  
2143  	if (U32_MAX - struct_bits_off < BTF_INT_OFFSET(int_data)) {
2144  		btf_verifier_log_member(env, struct_type, member,
2145  					"bits_offset exceeds U32_MAX");
2146  		return -EINVAL;
2147  	}
2148  
2149  	struct_bits_off += BTF_INT_OFFSET(int_data);
2150  	bytes_offset = BITS_ROUNDDOWN_BYTES(struct_bits_off);
2151  	nr_copy_bits = BTF_INT_BITS(int_data) +
2152  		BITS_PER_BYTE_MASKED(struct_bits_off);
2153  
2154  	if (nr_copy_bits > BITS_PER_U128) {
2155  		btf_verifier_log_member(env, struct_type, member,
2156  					"nr_copy_bits exceeds 128");
2157  		return -EINVAL;
2158  	}
2159  
2160  	if (struct_size < bytes_offset ||
2161  	    struct_size - bytes_offset < BITS_ROUNDUP_BYTES(nr_copy_bits)) {
2162  		btf_verifier_log_member(env, struct_type, member,
2163  					"Member exceeds struct_size");
2164  		return -EINVAL;
2165  	}
2166  
2167  	return 0;
2168  }
2169  
btf_int_check_kflag_member(struct btf_verifier_env * env,const struct btf_type * struct_type,const struct btf_member * member,const struct btf_type * member_type)2170  static int btf_int_check_kflag_member(struct btf_verifier_env *env,
2171  				      const struct btf_type *struct_type,
2172  				      const struct btf_member *member,
2173  				      const struct btf_type *member_type)
2174  {
2175  	u32 struct_bits_off, nr_bits, nr_int_data_bits, bytes_offset;
2176  	u32 int_data = btf_type_int(member_type);
2177  	u32 struct_size = struct_type->size;
2178  	u32 nr_copy_bits;
2179  
2180  	/* a regular int type is required for the kflag int member */
2181  	if (!btf_type_int_is_regular(member_type)) {
2182  		btf_verifier_log_member(env, struct_type, member,
2183  					"Invalid member base type");
2184  		return -EINVAL;
2185  	}
2186  
2187  	/* check sanity of bitfield size */
2188  	nr_bits = BTF_MEMBER_BITFIELD_SIZE(member->offset);
2189  	struct_bits_off = BTF_MEMBER_BIT_OFFSET(member->offset);
2190  	nr_int_data_bits = BTF_INT_BITS(int_data);
2191  	if (!nr_bits) {
2192  		/* Not a bitfield member, member offset must be at byte
2193  		 * boundary.
2194  		 */
2195  		if (BITS_PER_BYTE_MASKED(struct_bits_off)) {
2196  			btf_verifier_log_member(env, struct_type, member,
2197  						"Invalid member offset");
2198  			return -EINVAL;
2199  		}
2200  
2201  		nr_bits = nr_int_data_bits;
2202  	} else if (nr_bits > nr_int_data_bits) {
2203  		btf_verifier_log_member(env, struct_type, member,
2204  					"Invalid member bitfield_size");
2205  		return -EINVAL;
2206  	}
2207  
2208  	bytes_offset = BITS_ROUNDDOWN_BYTES(struct_bits_off);
2209  	nr_copy_bits = nr_bits + BITS_PER_BYTE_MASKED(struct_bits_off);
2210  	if (nr_copy_bits > BITS_PER_U128) {
2211  		btf_verifier_log_member(env, struct_type, member,
2212  					"nr_copy_bits exceeds 128");
2213  		return -EINVAL;
2214  	}
2215  
2216  	if (struct_size < bytes_offset ||
2217  	    struct_size - bytes_offset < BITS_ROUNDUP_BYTES(nr_copy_bits)) {
2218  		btf_verifier_log_member(env, struct_type, member,
2219  					"Member exceeds struct_size");
2220  		return -EINVAL;
2221  	}
2222  
2223  	return 0;
2224  }
2225  
btf_int_check_meta(struct btf_verifier_env * env,const struct btf_type * t,u32 meta_left)2226  static s32 btf_int_check_meta(struct btf_verifier_env *env,
2227  			      const struct btf_type *t,
2228  			      u32 meta_left)
2229  {
2230  	u32 int_data, nr_bits, meta_needed = sizeof(int_data);
2231  	u16 encoding;
2232  
2233  	if (meta_left < meta_needed) {
2234  		btf_verifier_log_basic(env, t,
2235  				       "meta_left:%u meta_needed:%u",
2236  				       meta_left, meta_needed);
2237  		return -EINVAL;
2238  	}
2239  
2240  	if (btf_type_vlen(t)) {
2241  		btf_verifier_log_type(env, t, "vlen != 0");
2242  		return -EINVAL;
2243  	}
2244  
2245  	if (btf_type_kflag(t)) {
2246  		btf_verifier_log_type(env, t, "Invalid btf_info kind_flag");
2247  		return -EINVAL;
2248  	}
2249  
2250  	int_data = btf_type_int(t);
2251  	if (int_data & ~BTF_INT_MASK) {
2252  		btf_verifier_log_basic(env, t, "Invalid int_data:%x",
2253  				       int_data);
2254  		return -EINVAL;
2255  	}
2256  
2257  	nr_bits = BTF_INT_BITS(int_data) + BTF_INT_OFFSET(int_data);
2258  
2259  	if (nr_bits > BITS_PER_U128) {
2260  		btf_verifier_log_type(env, t, "nr_bits exceeds %zu",
2261  				      BITS_PER_U128);
2262  		return -EINVAL;
2263  	}
2264  
2265  	if (BITS_ROUNDUP_BYTES(nr_bits) > t->size) {
2266  		btf_verifier_log_type(env, t, "nr_bits exceeds type_size");
2267  		return -EINVAL;
2268  	}
2269  
2270  	/*
2271  	 * Only one of the encoding bits is allowed and it
2272  	 * should be sufficient for the pretty print purpose (i.e. decoding).
2273  	 * Multiple bits can be allowed later if it is found
2274  	 * to be insufficient.
2275  	 */
2276  	encoding = BTF_INT_ENCODING(int_data);
2277  	if (encoding &&
2278  	    encoding != BTF_INT_SIGNED &&
2279  	    encoding != BTF_INT_CHAR &&
2280  	    encoding != BTF_INT_BOOL) {
2281  		btf_verifier_log_type(env, t, "Unsupported encoding");
2282  		return -ENOTSUPP;
2283  	}
2284  
2285  	btf_verifier_log_type(env, t, NULL);
2286  
2287  	return meta_needed;
2288  }
2289  
btf_int_log(struct btf_verifier_env * env,const struct btf_type * t)2290  static void btf_int_log(struct btf_verifier_env *env,
2291  			const struct btf_type *t)
2292  {
2293  	int int_data = btf_type_int(t);
2294  
2295  	btf_verifier_log(env,
2296  			 "size=%u bits_offset=%u nr_bits=%u encoding=%s",
2297  			 t->size, BTF_INT_OFFSET(int_data),
2298  			 BTF_INT_BITS(int_data),
2299  			 btf_int_encoding_str(BTF_INT_ENCODING(int_data)));
2300  }
2301  
btf_int128_print(struct btf_show * show,void * data)2302  static void btf_int128_print(struct btf_show *show, void *data)
2303  {
2304  	/* data points to a __int128 number.
2305  	 * Suppose
2306  	 *     int128_num = *(__int128 *)data;
2307  	 * The below formulas shows what upper_num and lower_num represents:
2308  	 *     upper_num = int128_num >> 64;
2309  	 *     lower_num = int128_num & 0xffffffffFFFFFFFFULL;
2310  	 */
2311  	u64 upper_num, lower_num;
2312  
2313  #ifdef __BIG_ENDIAN_BITFIELD
2314  	upper_num = *(u64 *)data;
2315  	lower_num = *(u64 *)(data + 8);
2316  #else
2317  	upper_num = *(u64 *)(data + 8);
2318  	lower_num = *(u64 *)data;
2319  #endif
2320  	if (upper_num == 0)
2321  		btf_show_type_value(show, "0x%llx", lower_num);
2322  	else
2323  		btf_show_type_values(show, "0x%llx%016llx", upper_num,
2324  				     lower_num);
2325  }
2326  
btf_int128_shift(u64 * print_num,u16 left_shift_bits,u16 right_shift_bits)2327  static void btf_int128_shift(u64 *print_num, u16 left_shift_bits,
2328  			     u16 right_shift_bits)
2329  {
2330  	u64 upper_num, lower_num;
2331  
2332  #ifdef __BIG_ENDIAN_BITFIELD
2333  	upper_num = print_num[0];
2334  	lower_num = print_num[1];
2335  #else
2336  	upper_num = print_num[1];
2337  	lower_num = print_num[0];
2338  #endif
2339  
2340  	/* shake out un-needed bits by shift/or operations */
2341  	if (left_shift_bits >= 64) {
2342  		upper_num = lower_num << (left_shift_bits - 64);
2343  		lower_num = 0;
2344  	} else {
2345  		upper_num = (upper_num << left_shift_bits) |
2346  			    (lower_num >> (64 - left_shift_bits));
2347  		lower_num = lower_num << left_shift_bits;
2348  	}
2349  
2350  	if (right_shift_bits >= 64) {
2351  		lower_num = upper_num >> (right_shift_bits - 64);
2352  		upper_num = 0;
2353  	} else {
2354  		lower_num = (lower_num >> right_shift_bits) |
2355  			    (upper_num << (64 - right_shift_bits));
2356  		upper_num = upper_num >> right_shift_bits;
2357  	}
2358  
2359  #ifdef __BIG_ENDIAN_BITFIELD
2360  	print_num[0] = upper_num;
2361  	print_num[1] = lower_num;
2362  #else
2363  	print_num[0] = lower_num;
2364  	print_num[1] = upper_num;
2365  #endif
2366  }
2367  
btf_bitfield_show(void * data,u8 bits_offset,u8 nr_bits,struct btf_show * show)2368  static void btf_bitfield_show(void *data, u8 bits_offset,
2369  			      u8 nr_bits, struct btf_show *show)
2370  {
2371  	u16 left_shift_bits, right_shift_bits;
2372  	u8 nr_copy_bytes;
2373  	u8 nr_copy_bits;
2374  	u64 print_num[2] = {};
2375  
2376  	nr_copy_bits = nr_bits + bits_offset;
2377  	nr_copy_bytes = BITS_ROUNDUP_BYTES(nr_copy_bits);
2378  
2379  	memcpy(print_num, data, nr_copy_bytes);
2380  
2381  #ifdef __BIG_ENDIAN_BITFIELD
2382  	left_shift_bits = bits_offset;
2383  #else
2384  	left_shift_bits = BITS_PER_U128 - nr_copy_bits;
2385  #endif
2386  	right_shift_bits = BITS_PER_U128 - nr_bits;
2387  
2388  	btf_int128_shift(print_num, left_shift_bits, right_shift_bits);
2389  	btf_int128_print(show, print_num);
2390  }
2391  
2392  
btf_int_bits_show(const struct btf * btf,const struct btf_type * t,void * data,u8 bits_offset,struct btf_show * show)2393  static void btf_int_bits_show(const struct btf *btf,
2394  			      const struct btf_type *t,
2395  			      void *data, u8 bits_offset,
2396  			      struct btf_show *show)
2397  {
2398  	u32 int_data = btf_type_int(t);
2399  	u8 nr_bits = BTF_INT_BITS(int_data);
2400  	u8 total_bits_offset;
2401  
2402  	/*
2403  	 * bits_offset is at most 7.
2404  	 * BTF_INT_OFFSET() cannot exceed 128 bits.
2405  	 */
2406  	total_bits_offset = bits_offset + BTF_INT_OFFSET(int_data);
2407  	data += BITS_ROUNDDOWN_BYTES(total_bits_offset);
2408  	bits_offset = BITS_PER_BYTE_MASKED(total_bits_offset);
2409  	btf_bitfield_show(data, bits_offset, nr_bits, show);
2410  }
2411  
btf_int_show(const struct btf * btf,const struct btf_type * t,u32 type_id,void * data,u8 bits_offset,struct btf_show * show)2412  static void btf_int_show(const struct btf *btf, const struct btf_type *t,
2413  			 u32 type_id, void *data, u8 bits_offset,
2414  			 struct btf_show *show)
2415  {
2416  	u32 int_data = btf_type_int(t);
2417  	u8 encoding = BTF_INT_ENCODING(int_data);
2418  	bool sign = encoding & BTF_INT_SIGNED;
2419  	u8 nr_bits = BTF_INT_BITS(int_data);
2420  	void *safe_data;
2421  
2422  	safe_data = btf_show_start_type(show, t, type_id, data);
2423  	if (!safe_data)
2424  		return;
2425  
2426  	if (bits_offset || BTF_INT_OFFSET(int_data) ||
2427  	    BITS_PER_BYTE_MASKED(nr_bits)) {
2428  		btf_int_bits_show(btf, t, safe_data, bits_offset, show);
2429  		goto out;
2430  	}
2431  
2432  	switch (nr_bits) {
2433  	case 128:
2434  		btf_int128_print(show, safe_data);
2435  		break;
2436  	case 64:
2437  		if (sign)
2438  			btf_show_type_value(show, "%lld", *(s64 *)safe_data);
2439  		else
2440  			btf_show_type_value(show, "%llu", *(u64 *)safe_data);
2441  		break;
2442  	case 32:
2443  		if (sign)
2444  			btf_show_type_value(show, "%d", *(s32 *)safe_data);
2445  		else
2446  			btf_show_type_value(show, "%u", *(u32 *)safe_data);
2447  		break;
2448  	case 16:
2449  		if (sign)
2450  			btf_show_type_value(show, "%d", *(s16 *)safe_data);
2451  		else
2452  			btf_show_type_value(show, "%u", *(u16 *)safe_data);
2453  		break;
2454  	case 8:
2455  		if (show->state.array_encoding == BTF_INT_CHAR) {
2456  			/* check for null terminator */
2457  			if (show->state.array_terminated)
2458  				break;
2459  			if (*(char *)data == '\0') {
2460  				show->state.array_terminated = 1;
2461  				break;
2462  			}
2463  			if (isprint(*(char *)data)) {
2464  				btf_show_type_value(show, "'%c'",
2465  						    *(char *)safe_data);
2466  				break;
2467  			}
2468  		}
2469  		if (sign)
2470  			btf_show_type_value(show, "%d", *(s8 *)safe_data);
2471  		else
2472  			btf_show_type_value(show, "%u", *(u8 *)safe_data);
2473  		break;
2474  	default:
2475  		btf_int_bits_show(btf, t, safe_data, bits_offset, show);
2476  		break;
2477  	}
2478  out:
2479  	btf_show_end_type(show);
2480  }
2481  
2482  static const struct btf_kind_operations int_ops = {
2483  	.check_meta = btf_int_check_meta,
2484  	.resolve = btf_df_resolve,
2485  	.check_member = btf_int_check_member,
2486  	.check_kflag_member = btf_int_check_kflag_member,
2487  	.log_details = btf_int_log,
2488  	.show = btf_int_show,
2489  };
2490  
btf_modifier_check_member(struct btf_verifier_env * env,const struct btf_type * struct_type,const struct btf_member * member,const struct btf_type * member_type)2491  static int btf_modifier_check_member(struct btf_verifier_env *env,
2492  				     const struct btf_type *struct_type,
2493  				     const struct btf_member *member,
2494  				     const struct btf_type *member_type)
2495  {
2496  	const struct btf_type *resolved_type;
2497  	u32 resolved_type_id = member->type;
2498  	struct btf_member resolved_member;
2499  	struct btf *btf = env->btf;
2500  
2501  	resolved_type = btf_type_id_size(btf, &resolved_type_id, NULL);
2502  	if (!resolved_type) {
2503  		btf_verifier_log_member(env, struct_type, member,
2504  					"Invalid member");
2505  		return -EINVAL;
2506  	}
2507  
2508  	resolved_member = *member;
2509  	resolved_member.type = resolved_type_id;
2510  
2511  	return btf_type_ops(resolved_type)->check_member(env, struct_type,
2512  							 &resolved_member,
2513  							 resolved_type);
2514  }
2515  
btf_modifier_check_kflag_member(struct btf_verifier_env * env,const struct btf_type * struct_type,const struct btf_member * member,const struct btf_type * member_type)2516  static int btf_modifier_check_kflag_member(struct btf_verifier_env *env,
2517  					   const struct btf_type *struct_type,
2518  					   const struct btf_member *member,
2519  					   const struct btf_type *member_type)
2520  {
2521  	const struct btf_type *resolved_type;
2522  	u32 resolved_type_id = member->type;
2523  	struct btf_member resolved_member;
2524  	struct btf *btf = env->btf;
2525  
2526  	resolved_type = btf_type_id_size(btf, &resolved_type_id, NULL);
2527  	if (!resolved_type) {
2528  		btf_verifier_log_member(env, struct_type, member,
2529  					"Invalid member");
2530  		return -EINVAL;
2531  	}
2532  
2533  	resolved_member = *member;
2534  	resolved_member.type = resolved_type_id;
2535  
2536  	return btf_type_ops(resolved_type)->check_kflag_member(env, struct_type,
2537  							       &resolved_member,
2538  							       resolved_type);
2539  }
2540  
btf_ptr_check_member(struct btf_verifier_env * env,const struct btf_type * struct_type,const struct btf_member * member,const struct btf_type * member_type)2541  static int btf_ptr_check_member(struct btf_verifier_env *env,
2542  				const struct btf_type *struct_type,
2543  				const struct btf_member *member,
2544  				const struct btf_type *member_type)
2545  {
2546  	u32 struct_size, struct_bits_off, bytes_offset;
2547  
2548  	struct_size = struct_type->size;
2549  	struct_bits_off = member->offset;
2550  	bytes_offset = BITS_ROUNDDOWN_BYTES(struct_bits_off);
2551  
2552  	if (BITS_PER_BYTE_MASKED(struct_bits_off)) {
2553  		btf_verifier_log_member(env, struct_type, member,
2554  					"Member is not byte aligned");
2555  		return -EINVAL;
2556  	}
2557  
2558  	if (struct_size - bytes_offset < sizeof(void *)) {
2559  		btf_verifier_log_member(env, struct_type, member,
2560  					"Member exceeds struct_size");
2561  		return -EINVAL;
2562  	}
2563  
2564  	return 0;
2565  }
2566  
btf_ref_type_check_meta(struct btf_verifier_env * env,const struct btf_type * t,u32 meta_left)2567  static int btf_ref_type_check_meta(struct btf_verifier_env *env,
2568  				   const struct btf_type *t,
2569  				   u32 meta_left)
2570  {
2571  	const char *value;
2572  
2573  	if (btf_type_vlen(t)) {
2574  		btf_verifier_log_type(env, t, "vlen != 0");
2575  		return -EINVAL;
2576  	}
2577  
2578  	if (btf_type_kflag(t)) {
2579  		btf_verifier_log_type(env, t, "Invalid btf_info kind_flag");
2580  		return -EINVAL;
2581  	}
2582  
2583  	if (!BTF_TYPE_ID_VALID(t->type)) {
2584  		btf_verifier_log_type(env, t, "Invalid type_id");
2585  		return -EINVAL;
2586  	}
2587  
2588  	/* typedef/type_tag type must have a valid name, and other ref types,
2589  	 * volatile, const, restrict, should have a null name.
2590  	 */
2591  	if (BTF_INFO_KIND(t->info) == BTF_KIND_TYPEDEF) {
2592  		if (!t->name_off ||
2593  		    !btf_name_valid_identifier(env->btf, t->name_off)) {
2594  			btf_verifier_log_type(env, t, "Invalid name");
2595  			return -EINVAL;
2596  		}
2597  	} else if (BTF_INFO_KIND(t->info) == BTF_KIND_TYPE_TAG) {
2598  		value = btf_name_by_offset(env->btf, t->name_off);
2599  		if (!value || !value[0]) {
2600  			btf_verifier_log_type(env, t, "Invalid name");
2601  			return -EINVAL;
2602  		}
2603  	} else {
2604  		if (t->name_off) {
2605  			btf_verifier_log_type(env, t, "Invalid name");
2606  			return -EINVAL;
2607  		}
2608  	}
2609  
2610  	btf_verifier_log_type(env, t, NULL);
2611  
2612  	return 0;
2613  }
2614  
btf_modifier_resolve(struct btf_verifier_env * env,const struct resolve_vertex * v)2615  static int btf_modifier_resolve(struct btf_verifier_env *env,
2616  				const struct resolve_vertex *v)
2617  {
2618  	const struct btf_type *t = v->t;
2619  	const struct btf_type *next_type;
2620  	u32 next_type_id = t->type;
2621  	struct btf *btf = env->btf;
2622  
2623  	next_type = btf_type_by_id(btf, next_type_id);
2624  	if (!next_type || btf_type_is_resolve_source_only(next_type)) {
2625  		btf_verifier_log_type(env, v->t, "Invalid type_id");
2626  		return -EINVAL;
2627  	}
2628  
2629  	if (!env_type_is_resolve_sink(env, next_type) &&
2630  	    !env_type_is_resolved(env, next_type_id))
2631  		return env_stack_push(env, next_type, next_type_id);
2632  
2633  	/* Figure out the resolved next_type_id with size.
2634  	 * They will be stored in the current modifier's
2635  	 * resolved_ids and resolved_sizes such that it can
2636  	 * save us a few type-following when we use it later (e.g. in
2637  	 * pretty print).
2638  	 */
2639  	if (!btf_type_id_size(btf, &next_type_id, NULL)) {
2640  		if (env_type_is_resolved(env, next_type_id))
2641  			next_type = btf_type_id_resolve(btf, &next_type_id);
2642  
2643  		/* "typedef void new_void", "const void"...etc */
2644  		if (!btf_type_is_void(next_type) &&
2645  		    !btf_type_is_fwd(next_type) &&
2646  		    !btf_type_is_func_proto(next_type)) {
2647  			btf_verifier_log_type(env, v->t, "Invalid type_id");
2648  			return -EINVAL;
2649  		}
2650  	}
2651  
2652  	env_stack_pop_resolved(env, next_type_id, 0);
2653  
2654  	return 0;
2655  }
2656  
btf_var_resolve(struct btf_verifier_env * env,const struct resolve_vertex * v)2657  static int btf_var_resolve(struct btf_verifier_env *env,
2658  			   const struct resolve_vertex *v)
2659  {
2660  	const struct btf_type *next_type;
2661  	const struct btf_type *t = v->t;
2662  	u32 next_type_id = t->type;
2663  	struct btf *btf = env->btf;
2664  
2665  	next_type = btf_type_by_id(btf, next_type_id);
2666  	if (!next_type || btf_type_is_resolve_source_only(next_type)) {
2667  		btf_verifier_log_type(env, v->t, "Invalid type_id");
2668  		return -EINVAL;
2669  	}
2670  
2671  	if (!env_type_is_resolve_sink(env, next_type) &&
2672  	    !env_type_is_resolved(env, next_type_id))
2673  		return env_stack_push(env, next_type, next_type_id);
2674  
2675  	if (btf_type_is_modifier(next_type)) {
2676  		const struct btf_type *resolved_type;
2677  		u32 resolved_type_id;
2678  
2679  		resolved_type_id = next_type_id;
2680  		resolved_type = btf_type_id_resolve(btf, &resolved_type_id);
2681  
2682  		if (btf_type_is_ptr(resolved_type) &&
2683  		    !env_type_is_resolve_sink(env, resolved_type) &&
2684  		    !env_type_is_resolved(env, resolved_type_id))
2685  			return env_stack_push(env, resolved_type,
2686  					      resolved_type_id);
2687  	}
2688  
2689  	/* We must resolve to something concrete at this point, no
2690  	 * forward types or similar that would resolve to size of
2691  	 * zero is allowed.
2692  	 */
2693  	if (!btf_type_id_size(btf, &next_type_id, NULL)) {
2694  		btf_verifier_log_type(env, v->t, "Invalid type_id");
2695  		return -EINVAL;
2696  	}
2697  
2698  	env_stack_pop_resolved(env, next_type_id, 0);
2699  
2700  	return 0;
2701  }
2702  
btf_ptr_resolve(struct btf_verifier_env * env,const struct resolve_vertex * v)2703  static int btf_ptr_resolve(struct btf_verifier_env *env,
2704  			   const struct resolve_vertex *v)
2705  {
2706  	const struct btf_type *next_type;
2707  	const struct btf_type *t = v->t;
2708  	u32 next_type_id = t->type;
2709  	struct btf *btf = env->btf;
2710  
2711  	next_type = btf_type_by_id(btf, next_type_id);
2712  	if (!next_type || btf_type_is_resolve_source_only(next_type)) {
2713  		btf_verifier_log_type(env, v->t, "Invalid type_id");
2714  		return -EINVAL;
2715  	}
2716  
2717  	if (!env_type_is_resolve_sink(env, next_type) &&
2718  	    !env_type_is_resolved(env, next_type_id))
2719  		return env_stack_push(env, next_type, next_type_id);
2720  
2721  	/* If the modifier was RESOLVED during RESOLVE_STRUCT_OR_ARRAY,
2722  	 * the modifier may have stopped resolving when it was resolved
2723  	 * to a ptr (last-resolved-ptr).
2724  	 *
2725  	 * We now need to continue from the last-resolved-ptr to
2726  	 * ensure the last-resolved-ptr will not referring back to
2727  	 * the current ptr (t).
2728  	 */
2729  	if (btf_type_is_modifier(next_type)) {
2730  		const struct btf_type *resolved_type;
2731  		u32 resolved_type_id;
2732  
2733  		resolved_type_id = next_type_id;
2734  		resolved_type = btf_type_id_resolve(btf, &resolved_type_id);
2735  
2736  		if (btf_type_is_ptr(resolved_type) &&
2737  		    !env_type_is_resolve_sink(env, resolved_type) &&
2738  		    !env_type_is_resolved(env, resolved_type_id))
2739  			return env_stack_push(env, resolved_type,
2740  					      resolved_type_id);
2741  	}
2742  
2743  	if (!btf_type_id_size(btf, &next_type_id, NULL)) {
2744  		if (env_type_is_resolved(env, next_type_id))
2745  			next_type = btf_type_id_resolve(btf, &next_type_id);
2746  
2747  		if (!btf_type_is_void(next_type) &&
2748  		    !btf_type_is_fwd(next_type) &&
2749  		    !btf_type_is_func_proto(next_type)) {
2750  			btf_verifier_log_type(env, v->t, "Invalid type_id");
2751  			return -EINVAL;
2752  		}
2753  	}
2754  
2755  	env_stack_pop_resolved(env, next_type_id, 0);
2756  
2757  	return 0;
2758  }
2759  
btf_modifier_show(const struct btf * btf,const struct btf_type * t,u32 type_id,void * data,u8 bits_offset,struct btf_show * show)2760  static void btf_modifier_show(const struct btf *btf,
2761  			      const struct btf_type *t,
2762  			      u32 type_id, void *data,
2763  			      u8 bits_offset, struct btf_show *show)
2764  {
2765  	if (btf->resolved_ids)
2766  		t = btf_type_id_resolve(btf, &type_id);
2767  	else
2768  		t = btf_type_skip_modifiers(btf, type_id, NULL);
2769  
2770  	btf_type_ops(t)->show(btf, t, type_id, data, bits_offset, show);
2771  }
2772  
btf_var_show(const struct btf * btf,const struct btf_type * t,u32 type_id,void * data,u8 bits_offset,struct btf_show * show)2773  static void btf_var_show(const struct btf *btf, const struct btf_type *t,
2774  			 u32 type_id, void *data, u8 bits_offset,
2775  			 struct btf_show *show)
2776  {
2777  	t = btf_type_id_resolve(btf, &type_id);
2778  
2779  	btf_type_ops(t)->show(btf, t, type_id, data, bits_offset, show);
2780  }
2781  
btf_ptr_show(const struct btf * btf,const struct btf_type * t,u32 type_id,void * data,u8 bits_offset,struct btf_show * show)2782  static void btf_ptr_show(const struct btf *btf, const struct btf_type *t,
2783  			 u32 type_id, void *data, u8 bits_offset,
2784  			 struct btf_show *show)
2785  {
2786  	void *safe_data;
2787  
2788  	safe_data = btf_show_start_type(show, t, type_id, data);
2789  	if (!safe_data)
2790  		return;
2791  
2792  	/* It is a hashed value unless BTF_SHOW_PTR_RAW is specified */
2793  	if (show->flags & BTF_SHOW_PTR_RAW)
2794  		btf_show_type_value(show, "0x%px", *(void **)safe_data);
2795  	else
2796  		btf_show_type_value(show, "0x%p", *(void **)safe_data);
2797  	btf_show_end_type(show);
2798  }
2799  
btf_ref_type_log(struct btf_verifier_env * env,const struct btf_type * t)2800  static void btf_ref_type_log(struct btf_verifier_env *env,
2801  			     const struct btf_type *t)
2802  {
2803  	btf_verifier_log(env, "type_id=%u", t->type);
2804  }
2805  
2806  static const struct btf_kind_operations modifier_ops = {
2807  	.check_meta = btf_ref_type_check_meta,
2808  	.resolve = btf_modifier_resolve,
2809  	.check_member = btf_modifier_check_member,
2810  	.check_kflag_member = btf_modifier_check_kflag_member,
2811  	.log_details = btf_ref_type_log,
2812  	.show = btf_modifier_show,
2813  };
2814  
2815  static const struct btf_kind_operations ptr_ops = {
2816  	.check_meta = btf_ref_type_check_meta,
2817  	.resolve = btf_ptr_resolve,
2818  	.check_member = btf_ptr_check_member,
2819  	.check_kflag_member = btf_generic_check_kflag_member,
2820  	.log_details = btf_ref_type_log,
2821  	.show = btf_ptr_show,
2822  };
2823  
btf_fwd_check_meta(struct btf_verifier_env * env,const struct btf_type * t,u32 meta_left)2824  static s32 btf_fwd_check_meta(struct btf_verifier_env *env,
2825  			      const struct btf_type *t,
2826  			      u32 meta_left)
2827  {
2828  	if (btf_type_vlen(t)) {
2829  		btf_verifier_log_type(env, t, "vlen != 0");
2830  		return -EINVAL;
2831  	}
2832  
2833  	if (t->type) {
2834  		btf_verifier_log_type(env, t, "type != 0");
2835  		return -EINVAL;
2836  	}
2837  
2838  	/* fwd type must have a valid name */
2839  	if (!t->name_off ||
2840  	    !btf_name_valid_identifier(env->btf, t->name_off)) {
2841  		btf_verifier_log_type(env, t, "Invalid name");
2842  		return -EINVAL;
2843  	}
2844  
2845  	btf_verifier_log_type(env, t, NULL);
2846  
2847  	return 0;
2848  }
2849  
btf_fwd_type_log(struct btf_verifier_env * env,const struct btf_type * t)2850  static void btf_fwd_type_log(struct btf_verifier_env *env,
2851  			     const struct btf_type *t)
2852  {
2853  	btf_verifier_log(env, "%s", btf_type_kflag(t) ? "union" : "struct");
2854  }
2855  
2856  static const struct btf_kind_operations fwd_ops = {
2857  	.check_meta = btf_fwd_check_meta,
2858  	.resolve = btf_df_resolve,
2859  	.check_member = btf_df_check_member,
2860  	.check_kflag_member = btf_df_check_kflag_member,
2861  	.log_details = btf_fwd_type_log,
2862  	.show = btf_df_show,
2863  };
2864  
btf_array_check_member(struct btf_verifier_env * env,const struct btf_type * struct_type,const struct btf_member * member,const struct btf_type * member_type)2865  static int btf_array_check_member(struct btf_verifier_env *env,
2866  				  const struct btf_type *struct_type,
2867  				  const struct btf_member *member,
2868  				  const struct btf_type *member_type)
2869  {
2870  	u32 struct_bits_off = member->offset;
2871  	u32 struct_size, bytes_offset;
2872  	u32 array_type_id, array_size;
2873  	struct btf *btf = env->btf;
2874  
2875  	if (BITS_PER_BYTE_MASKED(struct_bits_off)) {
2876  		btf_verifier_log_member(env, struct_type, member,
2877  					"Member is not byte aligned");
2878  		return -EINVAL;
2879  	}
2880  
2881  	array_type_id = member->type;
2882  	btf_type_id_size(btf, &array_type_id, &array_size);
2883  	struct_size = struct_type->size;
2884  	bytes_offset = BITS_ROUNDDOWN_BYTES(struct_bits_off);
2885  	if (struct_size - bytes_offset < array_size) {
2886  		btf_verifier_log_member(env, struct_type, member,
2887  					"Member exceeds struct_size");
2888  		return -EINVAL;
2889  	}
2890  
2891  	return 0;
2892  }
2893  
btf_array_check_meta(struct btf_verifier_env * env,const struct btf_type * t,u32 meta_left)2894  static s32 btf_array_check_meta(struct btf_verifier_env *env,
2895  				const struct btf_type *t,
2896  				u32 meta_left)
2897  {
2898  	const struct btf_array *array = btf_type_array(t);
2899  	u32 meta_needed = sizeof(*array);
2900  
2901  	if (meta_left < meta_needed) {
2902  		btf_verifier_log_basic(env, t,
2903  				       "meta_left:%u meta_needed:%u",
2904  				       meta_left, meta_needed);
2905  		return -EINVAL;
2906  	}
2907  
2908  	/* array type should not have a name */
2909  	if (t->name_off) {
2910  		btf_verifier_log_type(env, t, "Invalid name");
2911  		return -EINVAL;
2912  	}
2913  
2914  	if (btf_type_vlen(t)) {
2915  		btf_verifier_log_type(env, t, "vlen != 0");
2916  		return -EINVAL;
2917  	}
2918  
2919  	if (btf_type_kflag(t)) {
2920  		btf_verifier_log_type(env, t, "Invalid btf_info kind_flag");
2921  		return -EINVAL;
2922  	}
2923  
2924  	if (t->size) {
2925  		btf_verifier_log_type(env, t, "size != 0");
2926  		return -EINVAL;
2927  	}
2928  
2929  	/* Array elem type and index type cannot be in type void,
2930  	 * so !array->type and !array->index_type are not allowed.
2931  	 */
2932  	if (!array->type || !BTF_TYPE_ID_VALID(array->type)) {
2933  		btf_verifier_log_type(env, t, "Invalid elem");
2934  		return -EINVAL;
2935  	}
2936  
2937  	if (!array->index_type || !BTF_TYPE_ID_VALID(array->index_type)) {
2938  		btf_verifier_log_type(env, t, "Invalid index");
2939  		return -EINVAL;
2940  	}
2941  
2942  	btf_verifier_log_type(env, t, NULL);
2943  
2944  	return meta_needed;
2945  }
2946  
btf_array_resolve(struct btf_verifier_env * env,const struct resolve_vertex * v)2947  static int btf_array_resolve(struct btf_verifier_env *env,
2948  			     const struct resolve_vertex *v)
2949  {
2950  	const struct btf_array *array = btf_type_array(v->t);
2951  	const struct btf_type *elem_type, *index_type;
2952  	u32 elem_type_id, index_type_id;
2953  	struct btf *btf = env->btf;
2954  	u32 elem_size;
2955  
2956  	/* Check array->index_type */
2957  	index_type_id = array->index_type;
2958  	index_type = btf_type_by_id(btf, index_type_id);
2959  	if (btf_type_nosize_or_null(index_type) ||
2960  	    btf_type_is_resolve_source_only(index_type)) {
2961  		btf_verifier_log_type(env, v->t, "Invalid index");
2962  		return -EINVAL;
2963  	}
2964  
2965  	if (!env_type_is_resolve_sink(env, index_type) &&
2966  	    !env_type_is_resolved(env, index_type_id))
2967  		return env_stack_push(env, index_type, index_type_id);
2968  
2969  	index_type = btf_type_id_size(btf, &index_type_id, NULL);
2970  	if (!index_type || !btf_type_is_int(index_type) ||
2971  	    !btf_type_int_is_regular(index_type)) {
2972  		btf_verifier_log_type(env, v->t, "Invalid index");
2973  		return -EINVAL;
2974  	}
2975  
2976  	/* Check array->type */
2977  	elem_type_id = array->type;
2978  	elem_type = btf_type_by_id(btf, elem_type_id);
2979  	if (btf_type_nosize_or_null(elem_type) ||
2980  	    btf_type_is_resolve_source_only(elem_type)) {
2981  		btf_verifier_log_type(env, v->t,
2982  				      "Invalid elem");
2983  		return -EINVAL;
2984  	}
2985  
2986  	if (!env_type_is_resolve_sink(env, elem_type) &&
2987  	    !env_type_is_resolved(env, elem_type_id))
2988  		return env_stack_push(env, elem_type, elem_type_id);
2989  
2990  	elem_type = btf_type_id_size(btf, &elem_type_id, &elem_size);
2991  	if (!elem_type) {
2992  		btf_verifier_log_type(env, v->t, "Invalid elem");
2993  		return -EINVAL;
2994  	}
2995  
2996  	if (btf_type_is_int(elem_type) && !btf_type_int_is_regular(elem_type)) {
2997  		btf_verifier_log_type(env, v->t, "Invalid array of int");
2998  		return -EINVAL;
2999  	}
3000  
3001  	if (array->nelems && elem_size > U32_MAX / array->nelems) {
3002  		btf_verifier_log_type(env, v->t,
3003  				      "Array size overflows U32_MAX");
3004  		return -EINVAL;
3005  	}
3006  
3007  	env_stack_pop_resolved(env, elem_type_id, elem_size * array->nelems);
3008  
3009  	return 0;
3010  }
3011  
btf_array_log(struct btf_verifier_env * env,const struct btf_type * t)3012  static void btf_array_log(struct btf_verifier_env *env,
3013  			  const struct btf_type *t)
3014  {
3015  	const struct btf_array *array = btf_type_array(t);
3016  
3017  	btf_verifier_log(env, "type_id=%u index_type_id=%u nr_elems=%u",
3018  			 array->type, array->index_type, array->nelems);
3019  }
3020  
__btf_array_show(const struct btf * btf,const struct btf_type * t,u32 type_id,void * data,u8 bits_offset,struct btf_show * show)3021  static void __btf_array_show(const struct btf *btf, const struct btf_type *t,
3022  			     u32 type_id, void *data, u8 bits_offset,
3023  			     struct btf_show *show)
3024  {
3025  	const struct btf_array *array = btf_type_array(t);
3026  	const struct btf_kind_operations *elem_ops;
3027  	const struct btf_type *elem_type;
3028  	u32 i, elem_size = 0, elem_type_id;
3029  	u16 encoding = 0;
3030  
3031  	elem_type_id = array->type;
3032  	elem_type = btf_type_skip_modifiers(btf, elem_type_id, NULL);
3033  	if (elem_type && btf_type_has_size(elem_type))
3034  		elem_size = elem_type->size;
3035  
3036  	if (elem_type && btf_type_is_int(elem_type)) {
3037  		u32 int_type = btf_type_int(elem_type);
3038  
3039  		encoding = BTF_INT_ENCODING(int_type);
3040  
3041  		/*
3042  		 * BTF_INT_CHAR encoding never seems to be set for
3043  		 * char arrays, so if size is 1 and element is
3044  		 * printable as a char, we'll do that.
3045  		 */
3046  		if (elem_size == 1)
3047  			encoding = BTF_INT_CHAR;
3048  	}
3049  
3050  	if (!btf_show_start_array_type(show, t, type_id, encoding, data))
3051  		return;
3052  
3053  	if (!elem_type)
3054  		goto out;
3055  	elem_ops = btf_type_ops(elem_type);
3056  
3057  	for (i = 0; i < array->nelems; i++) {
3058  
3059  		btf_show_start_array_member(show);
3060  
3061  		elem_ops->show(btf, elem_type, elem_type_id, data,
3062  			       bits_offset, show);
3063  		data += elem_size;
3064  
3065  		btf_show_end_array_member(show);
3066  
3067  		if (show->state.array_terminated)
3068  			break;
3069  	}
3070  out:
3071  	btf_show_end_array_type(show);
3072  }
3073  
btf_array_show(const struct btf * btf,const struct btf_type * t,u32 type_id,void * data,u8 bits_offset,struct btf_show * show)3074  static void btf_array_show(const struct btf *btf, const struct btf_type *t,
3075  			   u32 type_id, void *data, u8 bits_offset,
3076  			   struct btf_show *show)
3077  {
3078  	const struct btf_member *m = show->state.member;
3079  
3080  	/*
3081  	 * First check if any members would be shown (are non-zero).
3082  	 * See comments above "struct btf_show" definition for more
3083  	 * details on how this works at a high-level.
3084  	 */
3085  	if (show->state.depth > 0 && !(show->flags & BTF_SHOW_ZERO)) {
3086  		if (!show->state.depth_check) {
3087  			show->state.depth_check = show->state.depth + 1;
3088  			show->state.depth_to_show = 0;
3089  		}
3090  		__btf_array_show(btf, t, type_id, data, bits_offset, show);
3091  		show->state.member = m;
3092  
3093  		if (show->state.depth_check != show->state.depth + 1)
3094  			return;
3095  		show->state.depth_check = 0;
3096  
3097  		if (show->state.depth_to_show <= show->state.depth)
3098  			return;
3099  		/*
3100  		 * Reaching here indicates we have recursed and found
3101  		 * non-zero array member(s).
3102  		 */
3103  	}
3104  	__btf_array_show(btf, t, type_id, data, bits_offset, show);
3105  }
3106  
3107  static const struct btf_kind_operations array_ops = {
3108  	.check_meta = btf_array_check_meta,
3109  	.resolve = btf_array_resolve,
3110  	.check_member = btf_array_check_member,
3111  	.check_kflag_member = btf_generic_check_kflag_member,
3112  	.log_details = btf_array_log,
3113  	.show = btf_array_show,
3114  };
3115  
btf_struct_check_member(struct btf_verifier_env * env,const struct btf_type * struct_type,const struct btf_member * member,const struct btf_type * member_type)3116  static int btf_struct_check_member(struct btf_verifier_env *env,
3117  				   const struct btf_type *struct_type,
3118  				   const struct btf_member *member,
3119  				   const struct btf_type *member_type)
3120  {
3121  	u32 struct_bits_off = member->offset;
3122  	u32 struct_size, bytes_offset;
3123  
3124  	if (BITS_PER_BYTE_MASKED(struct_bits_off)) {
3125  		btf_verifier_log_member(env, struct_type, member,
3126  					"Member is not byte aligned");
3127  		return -EINVAL;
3128  	}
3129  
3130  	struct_size = struct_type->size;
3131  	bytes_offset = BITS_ROUNDDOWN_BYTES(struct_bits_off);
3132  	if (struct_size - bytes_offset < member_type->size) {
3133  		btf_verifier_log_member(env, struct_type, member,
3134  					"Member exceeds struct_size");
3135  		return -EINVAL;
3136  	}
3137  
3138  	return 0;
3139  }
3140  
btf_struct_check_meta(struct btf_verifier_env * env,const struct btf_type * t,u32 meta_left)3141  static s32 btf_struct_check_meta(struct btf_verifier_env *env,
3142  				 const struct btf_type *t,
3143  				 u32 meta_left)
3144  {
3145  	bool is_union = BTF_INFO_KIND(t->info) == BTF_KIND_UNION;
3146  	const struct btf_member *member;
3147  	u32 meta_needed, last_offset;
3148  	struct btf *btf = env->btf;
3149  	u32 struct_size = t->size;
3150  	u32 offset;
3151  	u16 i;
3152  
3153  	meta_needed = btf_type_vlen(t) * sizeof(*member);
3154  	if (meta_left < meta_needed) {
3155  		btf_verifier_log_basic(env, t,
3156  				       "meta_left:%u meta_needed:%u",
3157  				       meta_left, meta_needed);
3158  		return -EINVAL;
3159  	}
3160  
3161  	/* struct type either no name or a valid one */
3162  	if (t->name_off &&
3163  	    !btf_name_valid_identifier(env->btf, t->name_off)) {
3164  		btf_verifier_log_type(env, t, "Invalid name");
3165  		return -EINVAL;
3166  	}
3167  
3168  	btf_verifier_log_type(env, t, NULL);
3169  
3170  	last_offset = 0;
3171  	for_each_member(i, t, member) {
3172  		if (!btf_name_offset_valid(btf, member->name_off)) {
3173  			btf_verifier_log_member(env, t, member,
3174  						"Invalid member name_offset:%u",
3175  						member->name_off);
3176  			return -EINVAL;
3177  		}
3178  
3179  		/* struct member either no name or a valid one */
3180  		if (member->name_off &&
3181  		    !btf_name_valid_identifier(btf, member->name_off)) {
3182  			btf_verifier_log_member(env, t, member, "Invalid name");
3183  			return -EINVAL;
3184  		}
3185  		/* A member cannot be in type void */
3186  		if (!member->type || !BTF_TYPE_ID_VALID(member->type)) {
3187  			btf_verifier_log_member(env, t, member,
3188  						"Invalid type_id");
3189  			return -EINVAL;
3190  		}
3191  
3192  		offset = __btf_member_bit_offset(t, member);
3193  		if (is_union && offset) {
3194  			btf_verifier_log_member(env, t, member,
3195  						"Invalid member bits_offset");
3196  			return -EINVAL;
3197  		}
3198  
3199  		/*
3200  		 * ">" instead of ">=" because the last member could be
3201  		 * "char a[0];"
3202  		 */
3203  		if (last_offset > offset) {
3204  			btf_verifier_log_member(env, t, member,
3205  						"Invalid member bits_offset");
3206  			return -EINVAL;
3207  		}
3208  
3209  		if (BITS_ROUNDUP_BYTES(offset) > struct_size) {
3210  			btf_verifier_log_member(env, t, member,
3211  						"Member bits_offset exceeds its struct size");
3212  			return -EINVAL;
3213  		}
3214  
3215  		btf_verifier_log_member(env, t, member, NULL);
3216  		last_offset = offset;
3217  	}
3218  
3219  	return meta_needed;
3220  }
3221  
btf_struct_resolve(struct btf_verifier_env * env,const struct resolve_vertex * v)3222  static int btf_struct_resolve(struct btf_verifier_env *env,
3223  			      const struct resolve_vertex *v)
3224  {
3225  	const struct btf_member *member;
3226  	int err;
3227  	u16 i;
3228  
3229  	/* Before continue resolving the next_member,
3230  	 * ensure the last member is indeed resolved to a
3231  	 * type with size info.
3232  	 */
3233  	if (v->next_member) {
3234  		const struct btf_type *last_member_type;
3235  		const struct btf_member *last_member;
3236  		u32 last_member_type_id;
3237  
3238  		last_member = btf_type_member(v->t) + v->next_member - 1;
3239  		last_member_type_id = last_member->type;
3240  		if (WARN_ON_ONCE(!env_type_is_resolved(env,
3241  						       last_member_type_id)))
3242  			return -EINVAL;
3243  
3244  		last_member_type = btf_type_by_id(env->btf,
3245  						  last_member_type_id);
3246  		if (btf_type_kflag(v->t))
3247  			err = btf_type_ops(last_member_type)->check_kflag_member(env, v->t,
3248  								last_member,
3249  								last_member_type);
3250  		else
3251  			err = btf_type_ops(last_member_type)->check_member(env, v->t,
3252  								last_member,
3253  								last_member_type);
3254  		if (err)
3255  			return err;
3256  	}
3257  
3258  	for_each_member_from(i, v->next_member, v->t, member) {
3259  		u32 member_type_id = member->type;
3260  		const struct btf_type *member_type = btf_type_by_id(env->btf,
3261  								member_type_id);
3262  
3263  		if (btf_type_nosize_or_null(member_type) ||
3264  		    btf_type_is_resolve_source_only(member_type)) {
3265  			btf_verifier_log_member(env, v->t, member,
3266  						"Invalid member");
3267  			return -EINVAL;
3268  		}
3269  
3270  		if (!env_type_is_resolve_sink(env, member_type) &&
3271  		    !env_type_is_resolved(env, member_type_id)) {
3272  			env_stack_set_next_member(env, i + 1);
3273  			return env_stack_push(env, member_type, member_type_id);
3274  		}
3275  
3276  		if (btf_type_kflag(v->t))
3277  			err = btf_type_ops(member_type)->check_kflag_member(env, v->t,
3278  									    member,
3279  									    member_type);
3280  		else
3281  			err = btf_type_ops(member_type)->check_member(env, v->t,
3282  								      member,
3283  								      member_type);
3284  		if (err)
3285  			return err;
3286  	}
3287  
3288  	env_stack_pop_resolved(env, 0, 0);
3289  
3290  	return 0;
3291  }
3292  
btf_struct_log(struct btf_verifier_env * env,const struct btf_type * t)3293  static void btf_struct_log(struct btf_verifier_env *env,
3294  			   const struct btf_type *t)
3295  {
3296  	btf_verifier_log(env, "size=%u vlen=%u", t->size, btf_type_vlen(t));
3297  }
3298  
3299  enum {
3300  	BTF_FIELD_IGNORE = 0,
3301  	BTF_FIELD_FOUND  = 1,
3302  };
3303  
3304  struct btf_field_info {
3305  	enum btf_field_type type;
3306  	u32 off;
3307  	union {
3308  		struct {
3309  			u32 type_id;
3310  		} kptr;
3311  		struct {
3312  			const char *node_name;
3313  			u32 value_btf_id;
3314  		} graph_root;
3315  	};
3316  };
3317  
btf_find_struct(const struct btf * btf,const struct btf_type * t,u32 off,int sz,enum btf_field_type field_type,struct btf_field_info * info)3318  static int btf_find_struct(const struct btf *btf, const struct btf_type *t,
3319  			   u32 off, int sz, enum btf_field_type field_type,
3320  			   struct btf_field_info *info)
3321  {
3322  	if (!__btf_type_is_struct(t))
3323  		return BTF_FIELD_IGNORE;
3324  	if (t->size != sz)
3325  		return BTF_FIELD_IGNORE;
3326  	info->type = field_type;
3327  	info->off = off;
3328  	return BTF_FIELD_FOUND;
3329  }
3330  
btf_find_kptr(const struct btf * btf,const struct btf_type * t,u32 off,int sz,struct btf_field_info * info,u32 field_mask)3331  static int btf_find_kptr(const struct btf *btf, const struct btf_type *t,
3332  			 u32 off, int sz, struct btf_field_info *info, u32 field_mask)
3333  {
3334  	enum btf_field_type type;
3335  	u32 res_id;
3336  
3337  	/* Permit modifiers on the pointer itself */
3338  	if (btf_type_is_volatile(t))
3339  		t = btf_type_by_id(btf, t->type);
3340  	/* For PTR, sz is always == 8 */
3341  	if (!btf_type_is_ptr(t))
3342  		return BTF_FIELD_IGNORE;
3343  	t = btf_type_by_id(btf, t->type);
3344  
3345  	if (!btf_type_is_type_tag(t))
3346  		return BTF_FIELD_IGNORE;
3347  	/* Reject extra tags */
3348  	if (btf_type_is_type_tag(btf_type_by_id(btf, t->type)))
3349  		return -EINVAL;
3350  	if (!strcmp("kptr_untrusted", __btf_name_by_offset(btf, t->name_off)))
3351  		type = BPF_KPTR_UNREF;
3352  	else if (!strcmp("kptr", __btf_name_by_offset(btf, t->name_off)))
3353  		type = BPF_KPTR_REF;
3354  	else if (!strcmp("percpu_kptr", __btf_name_by_offset(btf, t->name_off)))
3355  		type = BPF_KPTR_PERCPU;
3356  	else if (!strcmp("uptr", __btf_name_by_offset(btf, t->name_off)))
3357  		type = BPF_UPTR;
3358  	else
3359  		return -EINVAL;
3360  
3361  	if (!(type & field_mask))
3362  		return BTF_FIELD_IGNORE;
3363  
3364  	/* Get the base type */
3365  	t = btf_type_skip_modifiers(btf, t->type, &res_id);
3366  	/* Only pointer to struct is allowed */
3367  	if (!__btf_type_is_struct(t))
3368  		return -EINVAL;
3369  
3370  	info->type = type;
3371  	info->off = off;
3372  	info->kptr.type_id = res_id;
3373  	return BTF_FIELD_FOUND;
3374  }
3375  
btf_find_next_decl_tag(const struct btf * btf,const struct btf_type * pt,int comp_idx,const char * tag_key,int last_id)3376  int btf_find_next_decl_tag(const struct btf *btf, const struct btf_type *pt,
3377  			   int comp_idx, const char *tag_key, int last_id)
3378  {
3379  	int len = strlen(tag_key);
3380  	int i, n;
3381  
3382  	for (i = last_id + 1, n = btf_nr_types(btf); i < n; i++) {
3383  		const struct btf_type *t = btf_type_by_id(btf, i);
3384  
3385  		if (!btf_type_is_decl_tag(t))
3386  			continue;
3387  		if (pt != btf_type_by_id(btf, t->type))
3388  			continue;
3389  		if (btf_type_decl_tag(t)->component_idx != comp_idx)
3390  			continue;
3391  		if (strncmp(__btf_name_by_offset(btf, t->name_off), tag_key, len))
3392  			continue;
3393  		return i;
3394  	}
3395  	return -ENOENT;
3396  }
3397  
btf_find_decl_tag_value(const struct btf * btf,const struct btf_type * pt,int comp_idx,const char * tag_key)3398  const char *btf_find_decl_tag_value(const struct btf *btf, const struct btf_type *pt,
3399  				    int comp_idx, const char *tag_key)
3400  {
3401  	const char *value = NULL;
3402  	const struct btf_type *t;
3403  	int len, id;
3404  
3405  	id = btf_find_next_decl_tag(btf, pt, comp_idx, tag_key, 0);
3406  	if (id < 0)
3407  		return ERR_PTR(id);
3408  
3409  	t = btf_type_by_id(btf, id);
3410  	len = strlen(tag_key);
3411  	value = __btf_name_by_offset(btf, t->name_off) + len;
3412  
3413  	/* Prevent duplicate entries for same type */
3414  	id = btf_find_next_decl_tag(btf, pt, comp_idx, tag_key, id);
3415  	if (id >= 0)
3416  		return ERR_PTR(-EEXIST);
3417  
3418  	return value;
3419  }
3420  
3421  static int
btf_find_graph_root(const struct btf * btf,const struct btf_type * pt,const struct btf_type * t,int comp_idx,u32 off,int sz,struct btf_field_info * info,enum btf_field_type head_type)3422  btf_find_graph_root(const struct btf *btf, const struct btf_type *pt,
3423  		    const struct btf_type *t, int comp_idx, u32 off,
3424  		    int sz, struct btf_field_info *info,
3425  		    enum btf_field_type head_type)
3426  {
3427  	const char *node_field_name;
3428  	const char *value_type;
3429  	s32 id;
3430  
3431  	if (!__btf_type_is_struct(t))
3432  		return BTF_FIELD_IGNORE;
3433  	if (t->size != sz)
3434  		return BTF_FIELD_IGNORE;
3435  	value_type = btf_find_decl_tag_value(btf, pt, comp_idx, "contains:");
3436  	if (IS_ERR(value_type))
3437  		return -EINVAL;
3438  	node_field_name = strstr(value_type, ":");
3439  	if (!node_field_name)
3440  		return -EINVAL;
3441  	value_type = kstrndup(value_type, node_field_name - value_type, GFP_KERNEL | __GFP_NOWARN);
3442  	if (!value_type)
3443  		return -ENOMEM;
3444  	id = btf_find_by_name_kind(btf, value_type, BTF_KIND_STRUCT);
3445  	kfree(value_type);
3446  	if (id < 0)
3447  		return id;
3448  	node_field_name++;
3449  	if (str_is_empty(node_field_name))
3450  		return -EINVAL;
3451  	info->type = head_type;
3452  	info->off = off;
3453  	info->graph_root.value_btf_id = id;
3454  	info->graph_root.node_name = node_field_name;
3455  	return BTF_FIELD_FOUND;
3456  }
3457  
3458  #define field_mask_test_name(field_type, field_type_str) \
3459  	if (field_mask & field_type && !strcmp(name, field_type_str)) { \
3460  		type = field_type;					\
3461  		goto end;						\
3462  	}
3463  
btf_get_field_type(const struct btf * btf,const struct btf_type * var_type,u32 field_mask,u32 * seen_mask,int * align,int * sz)3464  static int btf_get_field_type(const struct btf *btf, const struct btf_type *var_type,
3465  			      u32 field_mask, u32 *seen_mask,
3466  			      int *align, int *sz)
3467  {
3468  	int type = 0;
3469  	const char *name = __btf_name_by_offset(btf, var_type->name_off);
3470  
3471  	if (field_mask & BPF_SPIN_LOCK) {
3472  		if (!strcmp(name, "bpf_spin_lock")) {
3473  			if (*seen_mask & BPF_SPIN_LOCK)
3474  				return -E2BIG;
3475  			*seen_mask |= BPF_SPIN_LOCK;
3476  			type = BPF_SPIN_LOCK;
3477  			goto end;
3478  		}
3479  	}
3480  	if (field_mask & BPF_TIMER) {
3481  		if (!strcmp(name, "bpf_timer")) {
3482  			if (*seen_mask & BPF_TIMER)
3483  				return -E2BIG;
3484  			*seen_mask |= BPF_TIMER;
3485  			type = BPF_TIMER;
3486  			goto end;
3487  		}
3488  	}
3489  	if (field_mask & BPF_WORKQUEUE) {
3490  		if (!strcmp(name, "bpf_wq")) {
3491  			if (*seen_mask & BPF_WORKQUEUE)
3492  				return -E2BIG;
3493  			*seen_mask |= BPF_WORKQUEUE;
3494  			type = BPF_WORKQUEUE;
3495  			goto end;
3496  		}
3497  	}
3498  	field_mask_test_name(BPF_LIST_HEAD, "bpf_list_head");
3499  	field_mask_test_name(BPF_LIST_NODE, "bpf_list_node");
3500  	field_mask_test_name(BPF_RB_ROOT,   "bpf_rb_root");
3501  	field_mask_test_name(BPF_RB_NODE,   "bpf_rb_node");
3502  	field_mask_test_name(BPF_REFCOUNT,  "bpf_refcount");
3503  
3504  	/* Only return BPF_KPTR when all other types with matchable names fail */
3505  	if (field_mask & (BPF_KPTR | BPF_UPTR) && !__btf_type_is_struct(var_type)) {
3506  		type = BPF_KPTR_REF;
3507  		goto end;
3508  	}
3509  	return 0;
3510  end:
3511  	*sz = btf_field_type_size(type);
3512  	*align = btf_field_type_align(type);
3513  	return type;
3514  }
3515  
3516  #undef field_mask_test_name
3517  
3518  /* Repeat a number of fields for a specified number of times.
3519   *
3520   * Copy the fields starting from the first field and repeat them for
3521   * repeat_cnt times. The fields are repeated by adding the offset of each
3522   * field with
3523   *   (i + 1) * elem_size
3524   * where i is the repeat index and elem_size is the size of an element.
3525   */
btf_repeat_fields(struct btf_field_info * info,int info_cnt,u32 field_cnt,u32 repeat_cnt,u32 elem_size)3526  static int btf_repeat_fields(struct btf_field_info *info, int info_cnt,
3527  			     u32 field_cnt, u32 repeat_cnt, u32 elem_size)
3528  {
3529  	u32 i, j;
3530  	u32 cur;
3531  
3532  	/* Ensure not repeating fields that should not be repeated. */
3533  	for (i = 0; i < field_cnt; i++) {
3534  		switch (info[i].type) {
3535  		case BPF_KPTR_UNREF:
3536  		case BPF_KPTR_REF:
3537  		case BPF_KPTR_PERCPU:
3538  		case BPF_UPTR:
3539  		case BPF_LIST_HEAD:
3540  		case BPF_RB_ROOT:
3541  			break;
3542  		default:
3543  			return -EINVAL;
3544  		}
3545  	}
3546  
3547  	/* The type of struct size or variable size is u32,
3548  	 * so the multiplication will not overflow.
3549  	 */
3550  	if (field_cnt * (repeat_cnt + 1) > info_cnt)
3551  		return -E2BIG;
3552  
3553  	cur = field_cnt;
3554  	for (i = 0; i < repeat_cnt; i++) {
3555  		memcpy(&info[cur], &info[0], field_cnt * sizeof(info[0]));
3556  		for (j = 0; j < field_cnt; j++)
3557  			info[cur++].off += (i + 1) * elem_size;
3558  	}
3559  
3560  	return 0;
3561  }
3562  
3563  static int btf_find_struct_field(const struct btf *btf,
3564  				 const struct btf_type *t, u32 field_mask,
3565  				 struct btf_field_info *info, int info_cnt,
3566  				 u32 level);
3567  
3568  /* Find special fields in the struct type of a field.
3569   *
3570   * This function is used to find fields of special types that is not a
3571   * global variable or a direct field of a struct type. It also handles the
3572   * repetition if it is the element type of an array.
3573   */
btf_find_nested_struct(const struct btf * btf,const struct btf_type * t,u32 off,u32 nelems,u32 field_mask,struct btf_field_info * info,int info_cnt,u32 level)3574  static int btf_find_nested_struct(const struct btf *btf, const struct btf_type *t,
3575  				  u32 off, u32 nelems,
3576  				  u32 field_mask, struct btf_field_info *info,
3577  				  int info_cnt, u32 level)
3578  {
3579  	int ret, err, i;
3580  
3581  	level++;
3582  	if (level >= MAX_RESOLVE_DEPTH)
3583  		return -E2BIG;
3584  
3585  	ret = btf_find_struct_field(btf, t, field_mask, info, info_cnt, level);
3586  
3587  	if (ret <= 0)
3588  		return ret;
3589  
3590  	/* Shift the offsets of the nested struct fields to the offsets
3591  	 * related to the container.
3592  	 */
3593  	for (i = 0; i < ret; i++)
3594  		info[i].off += off;
3595  
3596  	if (nelems > 1) {
3597  		err = btf_repeat_fields(info, info_cnt, ret, nelems - 1, t->size);
3598  		if (err == 0)
3599  			ret *= nelems;
3600  		else
3601  			ret = err;
3602  	}
3603  
3604  	return ret;
3605  }
3606  
btf_find_field_one(const struct btf * btf,const struct btf_type * var,const struct btf_type * var_type,int var_idx,u32 off,u32 expected_size,u32 field_mask,u32 * seen_mask,struct btf_field_info * info,int info_cnt,u32 level)3607  static int btf_find_field_one(const struct btf *btf,
3608  			      const struct btf_type *var,
3609  			      const struct btf_type *var_type,
3610  			      int var_idx,
3611  			      u32 off, u32 expected_size,
3612  			      u32 field_mask, u32 *seen_mask,
3613  			      struct btf_field_info *info, int info_cnt,
3614  			      u32 level)
3615  {
3616  	int ret, align, sz, field_type;
3617  	struct btf_field_info tmp;
3618  	const struct btf_array *array;
3619  	u32 i, nelems = 1;
3620  
3621  	/* Walk into array types to find the element type and the number of
3622  	 * elements in the (flattened) array.
3623  	 */
3624  	for (i = 0; i < MAX_RESOLVE_DEPTH && btf_type_is_array(var_type); i++) {
3625  		array = btf_array(var_type);
3626  		nelems *= array->nelems;
3627  		var_type = btf_type_by_id(btf, array->type);
3628  	}
3629  	if (i == MAX_RESOLVE_DEPTH)
3630  		return -E2BIG;
3631  	if (nelems == 0)
3632  		return 0;
3633  
3634  	field_type = btf_get_field_type(btf, var_type,
3635  					field_mask, seen_mask, &align, &sz);
3636  	/* Look into variables of struct types */
3637  	if (!field_type && __btf_type_is_struct(var_type)) {
3638  		sz = var_type->size;
3639  		if (expected_size && expected_size != sz * nelems)
3640  			return 0;
3641  		ret = btf_find_nested_struct(btf, var_type, off, nelems, field_mask,
3642  					     &info[0], info_cnt, level);
3643  		return ret;
3644  	}
3645  
3646  	if (field_type == 0)
3647  		return 0;
3648  	if (field_type < 0)
3649  		return field_type;
3650  
3651  	if (expected_size && expected_size != sz * nelems)
3652  		return 0;
3653  	if (off % align)
3654  		return 0;
3655  
3656  	switch (field_type) {
3657  	case BPF_SPIN_LOCK:
3658  	case BPF_TIMER:
3659  	case BPF_WORKQUEUE:
3660  	case BPF_LIST_NODE:
3661  	case BPF_RB_NODE:
3662  	case BPF_REFCOUNT:
3663  		ret = btf_find_struct(btf, var_type, off, sz, field_type,
3664  				      info_cnt ? &info[0] : &tmp);
3665  		if (ret < 0)
3666  			return ret;
3667  		break;
3668  	case BPF_KPTR_UNREF:
3669  	case BPF_KPTR_REF:
3670  	case BPF_KPTR_PERCPU:
3671  	case BPF_UPTR:
3672  		ret = btf_find_kptr(btf, var_type, off, sz,
3673  				    info_cnt ? &info[0] : &tmp, field_mask);
3674  		if (ret < 0)
3675  			return ret;
3676  		break;
3677  	case BPF_LIST_HEAD:
3678  	case BPF_RB_ROOT:
3679  		ret = btf_find_graph_root(btf, var, var_type,
3680  					  var_idx, off, sz,
3681  					  info_cnt ? &info[0] : &tmp,
3682  					  field_type);
3683  		if (ret < 0)
3684  			return ret;
3685  		break;
3686  	default:
3687  		return -EFAULT;
3688  	}
3689  
3690  	if (ret == BTF_FIELD_IGNORE)
3691  		return 0;
3692  	if (!info_cnt)
3693  		return -E2BIG;
3694  	if (nelems > 1) {
3695  		ret = btf_repeat_fields(info, info_cnt, 1, nelems - 1, sz);
3696  		if (ret < 0)
3697  			return ret;
3698  	}
3699  	return nelems;
3700  }
3701  
btf_find_struct_field(const struct btf * btf,const struct btf_type * t,u32 field_mask,struct btf_field_info * info,int info_cnt,u32 level)3702  static int btf_find_struct_field(const struct btf *btf,
3703  				 const struct btf_type *t, u32 field_mask,
3704  				 struct btf_field_info *info, int info_cnt,
3705  				 u32 level)
3706  {
3707  	int ret, idx = 0;
3708  	const struct btf_member *member;
3709  	u32 i, off, seen_mask = 0;
3710  
3711  	for_each_member(i, t, member) {
3712  		const struct btf_type *member_type = btf_type_by_id(btf,
3713  								    member->type);
3714  
3715  		off = __btf_member_bit_offset(t, member);
3716  		if (off % 8)
3717  			/* valid C code cannot generate such BTF */
3718  			return -EINVAL;
3719  		off /= 8;
3720  
3721  		ret = btf_find_field_one(btf, t, member_type, i,
3722  					 off, 0,
3723  					 field_mask, &seen_mask,
3724  					 &info[idx], info_cnt - idx, level);
3725  		if (ret < 0)
3726  			return ret;
3727  		idx += ret;
3728  	}
3729  	return idx;
3730  }
3731  
btf_find_datasec_var(const struct btf * btf,const struct btf_type * t,u32 field_mask,struct btf_field_info * info,int info_cnt,u32 level)3732  static int btf_find_datasec_var(const struct btf *btf, const struct btf_type *t,
3733  				u32 field_mask, struct btf_field_info *info,
3734  				int info_cnt, u32 level)
3735  {
3736  	int ret, idx = 0;
3737  	const struct btf_var_secinfo *vsi;
3738  	u32 i, off, seen_mask = 0;
3739  
3740  	for_each_vsi(i, t, vsi) {
3741  		const struct btf_type *var = btf_type_by_id(btf, vsi->type);
3742  		const struct btf_type *var_type = btf_type_by_id(btf, var->type);
3743  
3744  		off = vsi->offset;
3745  		ret = btf_find_field_one(btf, var, var_type, -1, off, vsi->size,
3746  					 field_mask, &seen_mask,
3747  					 &info[idx], info_cnt - idx,
3748  					 level);
3749  		if (ret < 0)
3750  			return ret;
3751  		idx += ret;
3752  	}
3753  	return idx;
3754  }
3755  
btf_find_field(const struct btf * btf,const struct btf_type * t,u32 field_mask,struct btf_field_info * info,int info_cnt)3756  static int btf_find_field(const struct btf *btf, const struct btf_type *t,
3757  			  u32 field_mask, struct btf_field_info *info,
3758  			  int info_cnt)
3759  {
3760  	if (__btf_type_is_struct(t))
3761  		return btf_find_struct_field(btf, t, field_mask, info, info_cnt, 0);
3762  	else if (btf_type_is_datasec(t))
3763  		return btf_find_datasec_var(btf, t, field_mask, info, info_cnt, 0);
3764  	return -EINVAL;
3765  }
3766  
3767  /* Callers have to ensure the life cycle of btf if it is program BTF */
btf_parse_kptr(const struct btf * btf,struct btf_field * field,struct btf_field_info * info)3768  static int btf_parse_kptr(const struct btf *btf, struct btf_field *field,
3769  			  struct btf_field_info *info)
3770  {
3771  	struct module *mod = NULL;
3772  	const struct btf_type *t;
3773  	/* If a matching btf type is found in kernel or module BTFs, kptr_ref
3774  	 * is that BTF, otherwise it's program BTF
3775  	 */
3776  	struct btf *kptr_btf;
3777  	int ret;
3778  	s32 id;
3779  
3780  	/* Find type in map BTF, and use it to look up the matching type
3781  	 * in vmlinux or module BTFs, by name and kind.
3782  	 */
3783  	t = btf_type_by_id(btf, info->kptr.type_id);
3784  	id = bpf_find_btf_id(__btf_name_by_offset(btf, t->name_off), BTF_INFO_KIND(t->info),
3785  			     &kptr_btf);
3786  	if (id == -ENOENT) {
3787  		/* btf_parse_kptr should only be called w/ btf = program BTF */
3788  		WARN_ON_ONCE(btf_is_kernel(btf));
3789  
3790  		/* Type exists only in program BTF. Assume that it's a MEM_ALLOC
3791  		 * kptr allocated via bpf_obj_new
3792  		 */
3793  		field->kptr.dtor = NULL;
3794  		id = info->kptr.type_id;
3795  		kptr_btf = (struct btf *)btf;
3796  		goto found_dtor;
3797  	}
3798  	if (id < 0)
3799  		return id;
3800  
3801  	/* Find and stash the function pointer for the destruction function that
3802  	 * needs to be eventually invoked from the map free path.
3803  	 */
3804  	if (info->type == BPF_KPTR_REF) {
3805  		const struct btf_type *dtor_func;
3806  		const char *dtor_func_name;
3807  		unsigned long addr;
3808  		s32 dtor_btf_id;
3809  
3810  		/* This call also serves as a whitelist of allowed objects that
3811  		 * can be used as a referenced pointer and be stored in a map at
3812  		 * the same time.
3813  		 */
3814  		dtor_btf_id = btf_find_dtor_kfunc(kptr_btf, id);
3815  		if (dtor_btf_id < 0) {
3816  			ret = dtor_btf_id;
3817  			goto end_btf;
3818  		}
3819  
3820  		dtor_func = btf_type_by_id(kptr_btf, dtor_btf_id);
3821  		if (!dtor_func) {
3822  			ret = -ENOENT;
3823  			goto end_btf;
3824  		}
3825  
3826  		if (btf_is_module(kptr_btf)) {
3827  			mod = btf_try_get_module(kptr_btf);
3828  			if (!mod) {
3829  				ret = -ENXIO;
3830  				goto end_btf;
3831  			}
3832  		}
3833  
3834  		/* We already verified dtor_func to be btf_type_is_func
3835  		 * in register_btf_id_dtor_kfuncs.
3836  		 */
3837  		dtor_func_name = __btf_name_by_offset(kptr_btf, dtor_func->name_off);
3838  		addr = kallsyms_lookup_name(dtor_func_name);
3839  		if (!addr) {
3840  			ret = -EINVAL;
3841  			goto end_mod;
3842  		}
3843  		field->kptr.dtor = (void *)addr;
3844  	}
3845  
3846  found_dtor:
3847  	field->kptr.btf_id = id;
3848  	field->kptr.btf = kptr_btf;
3849  	field->kptr.module = mod;
3850  	return 0;
3851  end_mod:
3852  	module_put(mod);
3853  end_btf:
3854  	btf_put(kptr_btf);
3855  	return ret;
3856  }
3857  
btf_parse_graph_root(const struct btf * btf,struct btf_field * field,struct btf_field_info * info,const char * node_type_name,size_t node_type_align)3858  static int btf_parse_graph_root(const struct btf *btf,
3859  				struct btf_field *field,
3860  				struct btf_field_info *info,
3861  				const char *node_type_name,
3862  				size_t node_type_align)
3863  {
3864  	const struct btf_type *t, *n = NULL;
3865  	const struct btf_member *member;
3866  	u32 offset;
3867  	int i;
3868  
3869  	t = btf_type_by_id(btf, info->graph_root.value_btf_id);
3870  	/* We've already checked that value_btf_id is a struct type. We
3871  	 * just need to figure out the offset of the list_node, and
3872  	 * verify its type.
3873  	 */
3874  	for_each_member(i, t, member) {
3875  		if (strcmp(info->graph_root.node_name,
3876  			   __btf_name_by_offset(btf, member->name_off)))
3877  			continue;
3878  		/* Invalid BTF, two members with same name */
3879  		if (n)
3880  			return -EINVAL;
3881  		n = btf_type_by_id(btf, member->type);
3882  		if (!__btf_type_is_struct(n))
3883  			return -EINVAL;
3884  		if (strcmp(node_type_name, __btf_name_by_offset(btf, n->name_off)))
3885  			return -EINVAL;
3886  		offset = __btf_member_bit_offset(n, member);
3887  		if (offset % 8)
3888  			return -EINVAL;
3889  		offset /= 8;
3890  		if (offset % node_type_align)
3891  			return -EINVAL;
3892  
3893  		field->graph_root.btf = (struct btf *)btf;
3894  		field->graph_root.value_btf_id = info->graph_root.value_btf_id;
3895  		field->graph_root.node_offset = offset;
3896  	}
3897  	if (!n)
3898  		return -ENOENT;
3899  	return 0;
3900  }
3901  
btf_parse_list_head(const struct btf * btf,struct btf_field * field,struct btf_field_info * info)3902  static int btf_parse_list_head(const struct btf *btf, struct btf_field *field,
3903  			       struct btf_field_info *info)
3904  {
3905  	return btf_parse_graph_root(btf, field, info, "bpf_list_node",
3906  					    __alignof__(struct bpf_list_node));
3907  }
3908  
btf_parse_rb_root(const struct btf * btf,struct btf_field * field,struct btf_field_info * info)3909  static int btf_parse_rb_root(const struct btf *btf, struct btf_field *field,
3910  			     struct btf_field_info *info)
3911  {
3912  	return btf_parse_graph_root(btf, field, info, "bpf_rb_node",
3913  					    __alignof__(struct bpf_rb_node));
3914  }
3915  
btf_field_cmp(const void * _a,const void * _b,const void * priv)3916  static int btf_field_cmp(const void *_a, const void *_b, const void *priv)
3917  {
3918  	const struct btf_field *a = (const struct btf_field *)_a;
3919  	const struct btf_field *b = (const struct btf_field *)_b;
3920  
3921  	if (a->offset < b->offset)
3922  		return -1;
3923  	else if (a->offset > b->offset)
3924  		return 1;
3925  	return 0;
3926  }
3927  
btf_parse_fields(const struct btf * btf,const struct btf_type * t,u32 field_mask,u32 value_size)3928  struct btf_record *btf_parse_fields(const struct btf *btf, const struct btf_type *t,
3929  				    u32 field_mask, u32 value_size)
3930  {
3931  	struct btf_field_info info_arr[BTF_FIELDS_MAX];
3932  	u32 next_off = 0, field_type_size;
3933  	struct btf_record *rec;
3934  	int ret, i, cnt;
3935  
3936  	ret = btf_find_field(btf, t, field_mask, info_arr, ARRAY_SIZE(info_arr));
3937  	if (ret < 0)
3938  		return ERR_PTR(ret);
3939  	if (!ret)
3940  		return NULL;
3941  
3942  	cnt = ret;
3943  	/* This needs to be kzalloc to zero out padding and unused fields, see
3944  	 * comment in btf_record_equal.
3945  	 */
3946  	rec = kzalloc(offsetof(struct btf_record, fields[cnt]), GFP_KERNEL | __GFP_NOWARN);
3947  	if (!rec)
3948  		return ERR_PTR(-ENOMEM);
3949  
3950  	rec->spin_lock_off = -EINVAL;
3951  	rec->timer_off = -EINVAL;
3952  	rec->wq_off = -EINVAL;
3953  	rec->refcount_off = -EINVAL;
3954  	for (i = 0; i < cnt; i++) {
3955  		field_type_size = btf_field_type_size(info_arr[i].type);
3956  		if (info_arr[i].off + field_type_size > value_size) {
3957  			WARN_ONCE(1, "verifier bug off %d size %d", info_arr[i].off, value_size);
3958  			ret = -EFAULT;
3959  			goto end;
3960  		}
3961  		if (info_arr[i].off < next_off) {
3962  			ret = -EEXIST;
3963  			goto end;
3964  		}
3965  		next_off = info_arr[i].off + field_type_size;
3966  
3967  		rec->field_mask |= info_arr[i].type;
3968  		rec->fields[i].offset = info_arr[i].off;
3969  		rec->fields[i].type = info_arr[i].type;
3970  		rec->fields[i].size = field_type_size;
3971  
3972  		switch (info_arr[i].type) {
3973  		case BPF_SPIN_LOCK:
3974  			WARN_ON_ONCE(rec->spin_lock_off >= 0);
3975  			/* Cache offset for faster lookup at runtime */
3976  			rec->spin_lock_off = rec->fields[i].offset;
3977  			break;
3978  		case BPF_TIMER:
3979  			WARN_ON_ONCE(rec->timer_off >= 0);
3980  			/* Cache offset for faster lookup at runtime */
3981  			rec->timer_off = rec->fields[i].offset;
3982  			break;
3983  		case BPF_WORKQUEUE:
3984  			WARN_ON_ONCE(rec->wq_off >= 0);
3985  			/* Cache offset for faster lookup at runtime */
3986  			rec->wq_off = rec->fields[i].offset;
3987  			break;
3988  		case BPF_REFCOUNT:
3989  			WARN_ON_ONCE(rec->refcount_off >= 0);
3990  			/* Cache offset for faster lookup at runtime */
3991  			rec->refcount_off = rec->fields[i].offset;
3992  			break;
3993  		case BPF_KPTR_UNREF:
3994  		case BPF_KPTR_REF:
3995  		case BPF_KPTR_PERCPU:
3996  		case BPF_UPTR:
3997  			ret = btf_parse_kptr(btf, &rec->fields[i], &info_arr[i]);
3998  			if (ret < 0)
3999  				goto end;
4000  			break;
4001  		case BPF_LIST_HEAD:
4002  			ret = btf_parse_list_head(btf, &rec->fields[i], &info_arr[i]);
4003  			if (ret < 0)
4004  				goto end;
4005  			break;
4006  		case BPF_RB_ROOT:
4007  			ret = btf_parse_rb_root(btf, &rec->fields[i], &info_arr[i]);
4008  			if (ret < 0)
4009  				goto end;
4010  			break;
4011  		case BPF_LIST_NODE:
4012  		case BPF_RB_NODE:
4013  			break;
4014  		default:
4015  			ret = -EFAULT;
4016  			goto end;
4017  		}
4018  		rec->cnt++;
4019  	}
4020  
4021  	/* bpf_{list_head, rb_node} require bpf_spin_lock */
4022  	if ((btf_record_has_field(rec, BPF_LIST_HEAD) ||
4023  	     btf_record_has_field(rec, BPF_RB_ROOT)) && rec->spin_lock_off < 0) {
4024  		ret = -EINVAL;
4025  		goto end;
4026  	}
4027  
4028  	if (rec->refcount_off < 0 &&
4029  	    btf_record_has_field(rec, BPF_LIST_NODE) &&
4030  	    btf_record_has_field(rec, BPF_RB_NODE)) {
4031  		ret = -EINVAL;
4032  		goto end;
4033  	}
4034  
4035  	sort_r(rec->fields, rec->cnt, sizeof(struct btf_field), btf_field_cmp,
4036  	       NULL, rec);
4037  
4038  	return rec;
4039  end:
4040  	btf_record_free(rec);
4041  	return ERR_PTR(ret);
4042  }
4043  
btf_check_and_fixup_fields(const struct btf * btf,struct btf_record * rec)4044  int btf_check_and_fixup_fields(const struct btf *btf, struct btf_record *rec)
4045  {
4046  	int i;
4047  
4048  	/* There are three types that signify ownership of some other type:
4049  	 *  kptr_ref, bpf_list_head, bpf_rb_root.
4050  	 * kptr_ref only supports storing kernel types, which can't store
4051  	 * references to program allocated local types.
4052  	 *
4053  	 * Hence we only need to ensure that bpf_{list_head,rb_root} ownership
4054  	 * does not form cycles.
4055  	 */
4056  	if (IS_ERR_OR_NULL(rec) || !(rec->field_mask & (BPF_GRAPH_ROOT | BPF_UPTR)))
4057  		return 0;
4058  	for (i = 0; i < rec->cnt; i++) {
4059  		struct btf_struct_meta *meta;
4060  		const struct btf_type *t;
4061  		u32 btf_id;
4062  
4063  		if (rec->fields[i].type == BPF_UPTR) {
4064  			/* The uptr only supports pinning one page and cannot
4065  			 * point to a kernel struct
4066  			 */
4067  			if (btf_is_kernel(rec->fields[i].kptr.btf))
4068  				return -EINVAL;
4069  			t = btf_type_by_id(rec->fields[i].kptr.btf,
4070  					   rec->fields[i].kptr.btf_id);
4071  			if (!t->size)
4072  				return -EINVAL;
4073  			if (t->size > PAGE_SIZE)
4074  				return -E2BIG;
4075  			continue;
4076  		}
4077  
4078  		if (!(rec->fields[i].type & BPF_GRAPH_ROOT))
4079  			continue;
4080  		btf_id = rec->fields[i].graph_root.value_btf_id;
4081  		meta = btf_find_struct_meta(btf, btf_id);
4082  		if (!meta)
4083  			return -EFAULT;
4084  		rec->fields[i].graph_root.value_rec = meta->record;
4085  
4086  		/* We need to set value_rec for all root types, but no need
4087  		 * to check ownership cycle for a type unless it's also a
4088  		 * node type.
4089  		 */
4090  		if (!(rec->field_mask & BPF_GRAPH_NODE))
4091  			continue;
4092  
4093  		/* We need to ensure ownership acyclicity among all types. The
4094  		 * proper way to do it would be to topologically sort all BTF
4095  		 * IDs based on the ownership edges, since there can be multiple
4096  		 * bpf_{list_head,rb_node} in a type. Instead, we use the
4097  		 * following resaoning:
4098  		 *
4099  		 * - A type can only be owned by another type in user BTF if it
4100  		 *   has a bpf_{list,rb}_node. Let's call these node types.
4101  		 * - A type can only _own_ another type in user BTF if it has a
4102  		 *   bpf_{list_head,rb_root}. Let's call these root types.
4103  		 *
4104  		 * We ensure that if a type is both a root and node, its
4105  		 * element types cannot be root types.
4106  		 *
4107  		 * To ensure acyclicity:
4108  		 *
4109  		 * When A is an root type but not a node, its ownership
4110  		 * chain can be:
4111  		 *	A -> B -> C
4112  		 * Where:
4113  		 * - A is an root, e.g. has bpf_rb_root.
4114  		 * - B is both a root and node, e.g. has bpf_rb_node and
4115  		 *   bpf_list_head.
4116  		 * - C is only an root, e.g. has bpf_list_node
4117  		 *
4118  		 * When A is both a root and node, some other type already
4119  		 * owns it in the BTF domain, hence it can not own
4120  		 * another root type through any of the ownership edges.
4121  		 *	A -> B
4122  		 * Where:
4123  		 * - A is both an root and node.
4124  		 * - B is only an node.
4125  		 */
4126  		if (meta->record->field_mask & BPF_GRAPH_ROOT)
4127  			return -ELOOP;
4128  	}
4129  	return 0;
4130  }
4131  
__btf_struct_show(const struct btf * btf,const struct btf_type * t,u32 type_id,void * data,u8 bits_offset,struct btf_show * show)4132  static void __btf_struct_show(const struct btf *btf, const struct btf_type *t,
4133  			      u32 type_id, void *data, u8 bits_offset,
4134  			      struct btf_show *show)
4135  {
4136  	const struct btf_member *member;
4137  	void *safe_data;
4138  	u32 i;
4139  
4140  	safe_data = btf_show_start_struct_type(show, t, type_id, data);
4141  	if (!safe_data)
4142  		return;
4143  
4144  	for_each_member(i, t, member) {
4145  		const struct btf_type *member_type = btf_type_by_id(btf,
4146  								member->type);
4147  		const struct btf_kind_operations *ops;
4148  		u32 member_offset, bitfield_size;
4149  		u32 bytes_offset;
4150  		u8 bits8_offset;
4151  
4152  		btf_show_start_member(show, member);
4153  
4154  		member_offset = __btf_member_bit_offset(t, member);
4155  		bitfield_size = __btf_member_bitfield_size(t, member);
4156  		bytes_offset = BITS_ROUNDDOWN_BYTES(member_offset);
4157  		bits8_offset = BITS_PER_BYTE_MASKED(member_offset);
4158  		if (bitfield_size) {
4159  			safe_data = btf_show_start_type(show, member_type,
4160  							member->type,
4161  							data + bytes_offset);
4162  			if (safe_data)
4163  				btf_bitfield_show(safe_data,
4164  						  bits8_offset,
4165  						  bitfield_size, show);
4166  			btf_show_end_type(show);
4167  		} else {
4168  			ops = btf_type_ops(member_type);
4169  			ops->show(btf, member_type, member->type,
4170  				  data + bytes_offset, bits8_offset, show);
4171  		}
4172  
4173  		btf_show_end_member(show);
4174  	}
4175  
4176  	btf_show_end_struct_type(show);
4177  }
4178  
btf_struct_show(const struct btf * btf,const struct btf_type * t,u32 type_id,void * data,u8 bits_offset,struct btf_show * show)4179  static void btf_struct_show(const struct btf *btf, const struct btf_type *t,
4180  			    u32 type_id, void *data, u8 bits_offset,
4181  			    struct btf_show *show)
4182  {
4183  	const struct btf_member *m = show->state.member;
4184  
4185  	/*
4186  	 * First check if any members would be shown (are non-zero).
4187  	 * See comments above "struct btf_show" definition for more
4188  	 * details on how this works at a high-level.
4189  	 */
4190  	if (show->state.depth > 0 && !(show->flags & BTF_SHOW_ZERO)) {
4191  		if (!show->state.depth_check) {
4192  			show->state.depth_check = show->state.depth + 1;
4193  			show->state.depth_to_show = 0;
4194  		}
4195  		__btf_struct_show(btf, t, type_id, data, bits_offset, show);
4196  		/* Restore saved member data here */
4197  		show->state.member = m;
4198  		if (show->state.depth_check != show->state.depth + 1)
4199  			return;
4200  		show->state.depth_check = 0;
4201  
4202  		if (show->state.depth_to_show <= show->state.depth)
4203  			return;
4204  		/*
4205  		 * Reaching here indicates we have recursed and found
4206  		 * non-zero child values.
4207  		 */
4208  	}
4209  
4210  	__btf_struct_show(btf, t, type_id, data, bits_offset, show);
4211  }
4212  
4213  static const struct btf_kind_operations struct_ops = {
4214  	.check_meta = btf_struct_check_meta,
4215  	.resolve = btf_struct_resolve,
4216  	.check_member = btf_struct_check_member,
4217  	.check_kflag_member = btf_generic_check_kflag_member,
4218  	.log_details = btf_struct_log,
4219  	.show = btf_struct_show,
4220  };
4221  
btf_enum_check_member(struct btf_verifier_env * env,const struct btf_type * struct_type,const struct btf_member * member,const struct btf_type * member_type)4222  static int btf_enum_check_member(struct btf_verifier_env *env,
4223  				 const struct btf_type *struct_type,
4224  				 const struct btf_member *member,
4225  				 const struct btf_type *member_type)
4226  {
4227  	u32 struct_bits_off = member->offset;
4228  	u32 struct_size, bytes_offset;
4229  
4230  	if (BITS_PER_BYTE_MASKED(struct_bits_off)) {
4231  		btf_verifier_log_member(env, struct_type, member,
4232  					"Member is not byte aligned");
4233  		return -EINVAL;
4234  	}
4235  
4236  	struct_size = struct_type->size;
4237  	bytes_offset = BITS_ROUNDDOWN_BYTES(struct_bits_off);
4238  	if (struct_size - bytes_offset < member_type->size) {
4239  		btf_verifier_log_member(env, struct_type, member,
4240  					"Member exceeds struct_size");
4241  		return -EINVAL;
4242  	}
4243  
4244  	return 0;
4245  }
4246  
btf_enum_check_kflag_member(struct btf_verifier_env * env,const struct btf_type * struct_type,const struct btf_member * member,const struct btf_type * member_type)4247  static int btf_enum_check_kflag_member(struct btf_verifier_env *env,
4248  				       const struct btf_type *struct_type,
4249  				       const struct btf_member *member,
4250  				       const struct btf_type *member_type)
4251  {
4252  	u32 struct_bits_off, nr_bits, bytes_end, struct_size;
4253  	u32 int_bitsize = sizeof(int) * BITS_PER_BYTE;
4254  
4255  	struct_bits_off = BTF_MEMBER_BIT_OFFSET(member->offset);
4256  	nr_bits = BTF_MEMBER_BITFIELD_SIZE(member->offset);
4257  	if (!nr_bits) {
4258  		if (BITS_PER_BYTE_MASKED(struct_bits_off)) {
4259  			btf_verifier_log_member(env, struct_type, member,
4260  						"Member is not byte aligned");
4261  			return -EINVAL;
4262  		}
4263  
4264  		nr_bits = int_bitsize;
4265  	} else if (nr_bits > int_bitsize) {
4266  		btf_verifier_log_member(env, struct_type, member,
4267  					"Invalid member bitfield_size");
4268  		return -EINVAL;
4269  	}
4270  
4271  	struct_size = struct_type->size;
4272  	bytes_end = BITS_ROUNDUP_BYTES(struct_bits_off + nr_bits);
4273  	if (struct_size < bytes_end) {
4274  		btf_verifier_log_member(env, struct_type, member,
4275  					"Member exceeds struct_size");
4276  		return -EINVAL;
4277  	}
4278  
4279  	return 0;
4280  }
4281  
btf_enum_check_meta(struct btf_verifier_env * env,const struct btf_type * t,u32 meta_left)4282  static s32 btf_enum_check_meta(struct btf_verifier_env *env,
4283  			       const struct btf_type *t,
4284  			       u32 meta_left)
4285  {
4286  	const struct btf_enum *enums = btf_type_enum(t);
4287  	struct btf *btf = env->btf;
4288  	const char *fmt_str;
4289  	u16 i, nr_enums;
4290  	u32 meta_needed;
4291  
4292  	nr_enums = btf_type_vlen(t);
4293  	meta_needed = nr_enums * sizeof(*enums);
4294  
4295  	if (meta_left < meta_needed) {
4296  		btf_verifier_log_basic(env, t,
4297  				       "meta_left:%u meta_needed:%u",
4298  				       meta_left, meta_needed);
4299  		return -EINVAL;
4300  	}
4301  
4302  	if (t->size > 8 || !is_power_of_2(t->size)) {
4303  		btf_verifier_log_type(env, t, "Unexpected size");
4304  		return -EINVAL;
4305  	}
4306  
4307  	/* enum type either no name or a valid one */
4308  	if (t->name_off &&
4309  	    !btf_name_valid_identifier(env->btf, t->name_off)) {
4310  		btf_verifier_log_type(env, t, "Invalid name");
4311  		return -EINVAL;
4312  	}
4313  
4314  	btf_verifier_log_type(env, t, NULL);
4315  
4316  	for (i = 0; i < nr_enums; i++) {
4317  		if (!btf_name_offset_valid(btf, enums[i].name_off)) {
4318  			btf_verifier_log(env, "\tInvalid name_offset:%u",
4319  					 enums[i].name_off);
4320  			return -EINVAL;
4321  		}
4322  
4323  		/* enum member must have a valid name */
4324  		if (!enums[i].name_off ||
4325  		    !btf_name_valid_identifier(btf, enums[i].name_off)) {
4326  			btf_verifier_log_type(env, t, "Invalid name");
4327  			return -EINVAL;
4328  		}
4329  
4330  		if (env->log.level == BPF_LOG_KERNEL)
4331  			continue;
4332  		fmt_str = btf_type_kflag(t) ? "\t%s val=%d\n" : "\t%s val=%u\n";
4333  		btf_verifier_log(env, fmt_str,
4334  				 __btf_name_by_offset(btf, enums[i].name_off),
4335  				 enums[i].val);
4336  	}
4337  
4338  	return meta_needed;
4339  }
4340  
btf_enum_log(struct btf_verifier_env * env,const struct btf_type * t)4341  static void btf_enum_log(struct btf_verifier_env *env,
4342  			 const struct btf_type *t)
4343  {
4344  	btf_verifier_log(env, "size=%u vlen=%u", t->size, btf_type_vlen(t));
4345  }
4346  
btf_enum_show(const struct btf * btf,const struct btf_type * t,u32 type_id,void * data,u8 bits_offset,struct btf_show * show)4347  static void btf_enum_show(const struct btf *btf, const struct btf_type *t,
4348  			  u32 type_id, void *data, u8 bits_offset,
4349  			  struct btf_show *show)
4350  {
4351  	const struct btf_enum *enums = btf_type_enum(t);
4352  	u32 i, nr_enums = btf_type_vlen(t);
4353  	void *safe_data;
4354  	int v;
4355  
4356  	safe_data = btf_show_start_type(show, t, type_id, data);
4357  	if (!safe_data)
4358  		return;
4359  
4360  	v = *(int *)safe_data;
4361  
4362  	for (i = 0; i < nr_enums; i++) {
4363  		if (v != enums[i].val)
4364  			continue;
4365  
4366  		btf_show_type_value(show, "%s",
4367  				    __btf_name_by_offset(btf,
4368  							 enums[i].name_off));
4369  
4370  		btf_show_end_type(show);
4371  		return;
4372  	}
4373  
4374  	if (btf_type_kflag(t))
4375  		btf_show_type_value(show, "%d", v);
4376  	else
4377  		btf_show_type_value(show, "%u", v);
4378  	btf_show_end_type(show);
4379  }
4380  
4381  static const struct btf_kind_operations enum_ops = {
4382  	.check_meta = btf_enum_check_meta,
4383  	.resolve = btf_df_resolve,
4384  	.check_member = btf_enum_check_member,
4385  	.check_kflag_member = btf_enum_check_kflag_member,
4386  	.log_details = btf_enum_log,
4387  	.show = btf_enum_show,
4388  };
4389  
btf_enum64_check_meta(struct btf_verifier_env * env,const struct btf_type * t,u32 meta_left)4390  static s32 btf_enum64_check_meta(struct btf_verifier_env *env,
4391  				 const struct btf_type *t,
4392  				 u32 meta_left)
4393  {
4394  	const struct btf_enum64 *enums = btf_type_enum64(t);
4395  	struct btf *btf = env->btf;
4396  	const char *fmt_str;
4397  	u16 i, nr_enums;
4398  	u32 meta_needed;
4399  
4400  	nr_enums = btf_type_vlen(t);
4401  	meta_needed = nr_enums * sizeof(*enums);
4402  
4403  	if (meta_left < meta_needed) {
4404  		btf_verifier_log_basic(env, t,
4405  				       "meta_left:%u meta_needed:%u",
4406  				       meta_left, meta_needed);
4407  		return -EINVAL;
4408  	}
4409  
4410  	if (t->size > 8 || !is_power_of_2(t->size)) {
4411  		btf_verifier_log_type(env, t, "Unexpected size");
4412  		return -EINVAL;
4413  	}
4414  
4415  	/* enum type either no name or a valid one */
4416  	if (t->name_off &&
4417  	    !btf_name_valid_identifier(env->btf, t->name_off)) {
4418  		btf_verifier_log_type(env, t, "Invalid name");
4419  		return -EINVAL;
4420  	}
4421  
4422  	btf_verifier_log_type(env, t, NULL);
4423  
4424  	for (i = 0; i < nr_enums; i++) {
4425  		if (!btf_name_offset_valid(btf, enums[i].name_off)) {
4426  			btf_verifier_log(env, "\tInvalid name_offset:%u",
4427  					 enums[i].name_off);
4428  			return -EINVAL;
4429  		}
4430  
4431  		/* enum member must have a valid name */
4432  		if (!enums[i].name_off ||
4433  		    !btf_name_valid_identifier(btf, enums[i].name_off)) {
4434  			btf_verifier_log_type(env, t, "Invalid name");
4435  			return -EINVAL;
4436  		}
4437  
4438  		if (env->log.level == BPF_LOG_KERNEL)
4439  			continue;
4440  
4441  		fmt_str = btf_type_kflag(t) ? "\t%s val=%lld\n" : "\t%s val=%llu\n";
4442  		btf_verifier_log(env, fmt_str,
4443  				 __btf_name_by_offset(btf, enums[i].name_off),
4444  				 btf_enum64_value(enums + i));
4445  	}
4446  
4447  	return meta_needed;
4448  }
4449  
btf_enum64_show(const struct btf * btf,const struct btf_type * t,u32 type_id,void * data,u8 bits_offset,struct btf_show * show)4450  static void btf_enum64_show(const struct btf *btf, const struct btf_type *t,
4451  			    u32 type_id, void *data, u8 bits_offset,
4452  			    struct btf_show *show)
4453  {
4454  	const struct btf_enum64 *enums = btf_type_enum64(t);
4455  	u32 i, nr_enums = btf_type_vlen(t);
4456  	void *safe_data;
4457  	s64 v;
4458  
4459  	safe_data = btf_show_start_type(show, t, type_id, data);
4460  	if (!safe_data)
4461  		return;
4462  
4463  	v = *(u64 *)safe_data;
4464  
4465  	for (i = 0; i < nr_enums; i++) {
4466  		if (v != btf_enum64_value(enums + i))
4467  			continue;
4468  
4469  		btf_show_type_value(show, "%s",
4470  				    __btf_name_by_offset(btf,
4471  							 enums[i].name_off));
4472  
4473  		btf_show_end_type(show);
4474  		return;
4475  	}
4476  
4477  	if (btf_type_kflag(t))
4478  		btf_show_type_value(show, "%lld", v);
4479  	else
4480  		btf_show_type_value(show, "%llu", v);
4481  	btf_show_end_type(show);
4482  }
4483  
4484  static const struct btf_kind_operations enum64_ops = {
4485  	.check_meta = btf_enum64_check_meta,
4486  	.resolve = btf_df_resolve,
4487  	.check_member = btf_enum_check_member,
4488  	.check_kflag_member = btf_enum_check_kflag_member,
4489  	.log_details = btf_enum_log,
4490  	.show = btf_enum64_show,
4491  };
4492  
btf_func_proto_check_meta(struct btf_verifier_env * env,const struct btf_type * t,u32 meta_left)4493  static s32 btf_func_proto_check_meta(struct btf_verifier_env *env,
4494  				     const struct btf_type *t,
4495  				     u32 meta_left)
4496  {
4497  	u32 meta_needed = btf_type_vlen(t) * sizeof(struct btf_param);
4498  
4499  	if (meta_left < meta_needed) {
4500  		btf_verifier_log_basic(env, t,
4501  				       "meta_left:%u meta_needed:%u",
4502  				       meta_left, meta_needed);
4503  		return -EINVAL;
4504  	}
4505  
4506  	if (t->name_off) {
4507  		btf_verifier_log_type(env, t, "Invalid name");
4508  		return -EINVAL;
4509  	}
4510  
4511  	if (btf_type_kflag(t)) {
4512  		btf_verifier_log_type(env, t, "Invalid btf_info kind_flag");
4513  		return -EINVAL;
4514  	}
4515  
4516  	btf_verifier_log_type(env, t, NULL);
4517  
4518  	return meta_needed;
4519  }
4520  
btf_func_proto_log(struct btf_verifier_env * env,const struct btf_type * t)4521  static void btf_func_proto_log(struct btf_verifier_env *env,
4522  			       const struct btf_type *t)
4523  {
4524  	const struct btf_param *args = (const struct btf_param *)(t + 1);
4525  	u16 nr_args = btf_type_vlen(t), i;
4526  
4527  	btf_verifier_log(env, "return=%u args=(", t->type);
4528  	if (!nr_args) {
4529  		btf_verifier_log(env, "void");
4530  		goto done;
4531  	}
4532  
4533  	if (nr_args == 1 && !args[0].type) {
4534  		/* Only one vararg */
4535  		btf_verifier_log(env, "vararg");
4536  		goto done;
4537  	}
4538  
4539  	btf_verifier_log(env, "%u %s", args[0].type,
4540  			 __btf_name_by_offset(env->btf,
4541  					      args[0].name_off));
4542  	for (i = 1; i < nr_args - 1; i++)
4543  		btf_verifier_log(env, ", %u %s", args[i].type,
4544  				 __btf_name_by_offset(env->btf,
4545  						      args[i].name_off));
4546  
4547  	if (nr_args > 1) {
4548  		const struct btf_param *last_arg = &args[nr_args - 1];
4549  
4550  		if (last_arg->type)
4551  			btf_verifier_log(env, ", %u %s", last_arg->type,
4552  					 __btf_name_by_offset(env->btf,
4553  							      last_arg->name_off));
4554  		else
4555  			btf_verifier_log(env, ", vararg");
4556  	}
4557  
4558  done:
4559  	btf_verifier_log(env, ")");
4560  }
4561  
4562  static const struct btf_kind_operations func_proto_ops = {
4563  	.check_meta = btf_func_proto_check_meta,
4564  	.resolve = btf_df_resolve,
4565  	/*
4566  	 * BTF_KIND_FUNC_PROTO cannot be directly referred by
4567  	 * a struct's member.
4568  	 *
4569  	 * It should be a function pointer instead.
4570  	 * (i.e. struct's member -> BTF_KIND_PTR -> BTF_KIND_FUNC_PROTO)
4571  	 *
4572  	 * Hence, there is no btf_func_check_member().
4573  	 */
4574  	.check_member = btf_df_check_member,
4575  	.check_kflag_member = btf_df_check_kflag_member,
4576  	.log_details = btf_func_proto_log,
4577  	.show = btf_df_show,
4578  };
4579  
btf_func_check_meta(struct btf_verifier_env * env,const struct btf_type * t,u32 meta_left)4580  static s32 btf_func_check_meta(struct btf_verifier_env *env,
4581  			       const struct btf_type *t,
4582  			       u32 meta_left)
4583  {
4584  	if (!t->name_off ||
4585  	    !btf_name_valid_identifier(env->btf, t->name_off)) {
4586  		btf_verifier_log_type(env, t, "Invalid name");
4587  		return -EINVAL;
4588  	}
4589  
4590  	if (btf_type_vlen(t) > BTF_FUNC_GLOBAL) {
4591  		btf_verifier_log_type(env, t, "Invalid func linkage");
4592  		return -EINVAL;
4593  	}
4594  
4595  	if (btf_type_kflag(t)) {
4596  		btf_verifier_log_type(env, t, "Invalid btf_info kind_flag");
4597  		return -EINVAL;
4598  	}
4599  
4600  	btf_verifier_log_type(env, t, NULL);
4601  
4602  	return 0;
4603  }
4604  
btf_func_resolve(struct btf_verifier_env * env,const struct resolve_vertex * v)4605  static int btf_func_resolve(struct btf_verifier_env *env,
4606  			    const struct resolve_vertex *v)
4607  {
4608  	const struct btf_type *t = v->t;
4609  	u32 next_type_id = t->type;
4610  	int err;
4611  
4612  	err = btf_func_check(env, t);
4613  	if (err)
4614  		return err;
4615  
4616  	env_stack_pop_resolved(env, next_type_id, 0);
4617  	return 0;
4618  }
4619  
4620  static const struct btf_kind_operations func_ops = {
4621  	.check_meta = btf_func_check_meta,
4622  	.resolve = btf_func_resolve,
4623  	.check_member = btf_df_check_member,
4624  	.check_kflag_member = btf_df_check_kflag_member,
4625  	.log_details = btf_ref_type_log,
4626  	.show = btf_df_show,
4627  };
4628  
btf_var_check_meta(struct btf_verifier_env * env,const struct btf_type * t,u32 meta_left)4629  static s32 btf_var_check_meta(struct btf_verifier_env *env,
4630  			      const struct btf_type *t,
4631  			      u32 meta_left)
4632  {
4633  	const struct btf_var *var;
4634  	u32 meta_needed = sizeof(*var);
4635  
4636  	if (meta_left < meta_needed) {
4637  		btf_verifier_log_basic(env, t,
4638  				       "meta_left:%u meta_needed:%u",
4639  				       meta_left, meta_needed);
4640  		return -EINVAL;
4641  	}
4642  
4643  	if (btf_type_vlen(t)) {
4644  		btf_verifier_log_type(env, t, "vlen != 0");
4645  		return -EINVAL;
4646  	}
4647  
4648  	if (btf_type_kflag(t)) {
4649  		btf_verifier_log_type(env, t, "Invalid btf_info kind_flag");
4650  		return -EINVAL;
4651  	}
4652  
4653  	if (!t->name_off ||
4654  	    !btf_name_valid_identifier(env->btf, t->name_off)) {
4655  		btf_verifier_log_type(env, t, "Invalid name");
4656  		return -EINVAL;
4657  	}
4658  
4659  	/* A var cannot be in type void */
4660  	if (!t->type || !BTF_TYPE_ID_VALID(t->type)) {
4661  		btf_verifier_log_type(env, t, "Invalid type_id");
4662  		return -EINVAL;
4663  	}
4664  
4665  	var = btf_type_var(t);
4666  	if (var->linkage != BTF_VAR_STATIC &&
4667  	    var->linkage != BTF_VAR_GLOBAL_ALLOCATED) {
4668  		btf_verifier_log_type(env, t, "Linkage not supported");
4669  		return -EINVAL;
4670  	}
4671  
4672  	btf_verifier_log_type(env, t, NULL);
4673  
4674  	return meta_needed;
4675  }
4676  
btf_var_log(struct btf_verifier_env * env,const struct btf_type * t)4677  static void btf_var_log(struct btf_verifier_env *env, const struct btf_type *t)
4678  {
4679  	const struct btf_var *var = btf_type_var(t);
4680  
4681  	btf_verifier_log(env, "type_id=%u linkage=%u", t->type, var->linkage);
4682  }
4683  
4684  static const struct btf_kind_operations var_ops = {
4685  	.check_meta		= btf_var_check_meta,
4686  	.resolve		= btf_var_resolve,
4687  	.check_member		= btf_df_check_member,
4688  	.check_kflag_member	= btf_df_check_kflag_member,
4689  	.log_details		= btf_var_log,
4690  	.show			= btf_var_show,
4691  };
4692  
btf_datasec_check_meta(struct btf_verifier_env * env,const struct btf_type * t,u32 meta_left)4693  static s32 btf_datasec_check_meta(struct btf_verifier_env *env,
4694  				  const struct btf_type *t,
4695  				  u32 meta_left)
4696  {
4697  	const struct btf_var_secinfo *vsi;
4698  	u64 last_vsi_end_off = 0, sum = 0;
4699  	u32 i, meta_needed;
4700  
4701  	meta_needed = btf_type_vlen(t) * sizeof(*vsi);
4702  	if (meta_left < meta_needed) {
4703  		btf_verifier_log_basic(env, t,
4704  				       "meta_left:%u meta_needed:%u",
4705  				       meta_left, meta_needed);
4706  		return -EINVAL;
4707  	}
4708  
4709  	if (!t->size) {
4710  		btf_verifier_log_type(env, t, "size == 0");
4711  		return -EINVAL;
4712  	}
4713  
4714  	if (btf_type_kflag(t)) {
4715  		btf_verifier_log_type(env, t, "Invalid btf_info kind_flag");
4716  		return -EINVAL;
4717  	}
4718  
4719  	if (!t->name_off ||
4720  	    !btf_name_valid_section(env->btf, t->name_off)) {
4721  		btf_verifier_log_type(env, t, "Invalid name");
4722  		return -EINVAL;
4723  	}
4724  
4725  	btf_verifier_log_type(env, t, NULL);
4726  
4727  	for_each_vsi(i, t, vsi) {
4728  		/* A var cannot be in type void */
4729  		if (!vsi->type || !BTF_TYPE_ID_VALID(vsi->type)) {
4730  			btf_verifier_log_vsi(env, t, vsi,
4731  					     "Invalid type_id");
4732  			return -EINVAL;
4733  		}
4734  
4735  		if (vsi->offset < last_vsi_end_off || vsi->offset >= t->size) {
4736  			btf_verifier_log_vsi(env, t, vsi,
4737  					     "Invalid offset");
4738  			return -EINVAL;
4739  		}
4740  
4741  		if (!vsi->size || vsi->size > t->size) {
4742  			btf_verifier_log_vsi(env, t, vsi,
4743  					     "Invalid size");
4744  			return -EINVAL;
4745  		}
4746  
4747  		last_vsi_end_off = vsi->offset + vsi->size;
4748  		if (last_vsi_end_off > t->size) {
4749  			btf_verifier_log_vsi(env, t, vsi,
4750  					     "Invalid offset+size");
4751  			return -EINVAL;
4752  		}
4753  
4754  		btf_verifier_log_vsi(env, t, vsi, NULL);
4755  		sum += vsi->size;
4756  	}
4757  
4758  	if (t->size < sum) {
4759  		btf_verifier_log_type(env, t, "Invalid btf_info size");
4760  		return -EINVAL;
4761  	}
4762  
4763  	return meta_needed;
4764  }
4765  
btf_datasec_resolve(struct btf_verifier_env * env,const struct resolve_vertex * v)4766  static int btf_datasec_resolve(struct btf_verifier_env *env,
4767  			       const struct resolve_vertex *v)
4768  {
4769  	const struct btf_var_secinfo *vsi;
4770  	struct btf *btf = env->btf;
4771  	u16 i;
4772  
4773  	env->resolve_mode = RESOLVE_TBD;
4774  	for_each_vsi_from(i, v->next_member, v->t, vsi) {
4775  		u32 var_type_id = vsi->type, type_id, type_size = 0;
4776  		const struct btf_type *var_type = btf_type_by_id(env->btf,
4777  								 var_type_id);
4778  		if (!var_type || !btf_type_is_var(var_type)) {
4779  			btf_verifier_log_vsi(env, v->t, vsi,
4780  					     "Not a VAR kind member");
4781  			return -EINVAL;
4782  		}
4783  
4784  		if (!env_type_is_resolve_sink(env, var_type) &&
4785  		    !env_type_is_resolved(env, var_type_id)) {
4786  			env_stack_set_next_member(env, i + 1);
4787  			return env_stack_push(env, var_type, var_type_id);
4788  		}
4789  
4790  		type_id = var_type->type;
4791  		if (!btf_type_id_size(btf, &type_id, &type_size)) {
4792  			btf_verifier_log_vsi(env, v->t, vsi, "Invalid type");
4793  			return -EINVAL;
4794  		}
4795  
4796  		if (vsi->size < type_size) {
4797  			btf_verifier_log_vsi(env, v->t, vsi, "Invalid size");
4798  			return -EINVAL;
4799  		}
4800  	}
4801  
4802  	env_stack_pop_resolved(env, 0, 0);
4803  	return 0;
4804  }
4805  
btf_datasec_log(struct btf_verifier_env * env,const struct btf_type * t)4806  static void btf_datasec_log(struct btf_verifier_env *env,
4807  			    const struct btf_type *t)
4808  {
4809  	btf_verifier_log(env, "size=%u vlen=%u", t->size, btf_type_vlen(t));
4810  }
4811  
btf_datasec_show(const struct btf * btf,const struct btf_type * t,u32 type_id,void * data,u8 bits_offset,struct btf_show * show)4812  static void btf_datasec_show(const struct btf *btf,
4813  			     const struct btf_type *t, u32 type_id,
4814  			     void *data, u8 bits_offset,
4815  			     struct btf_show *show)
4816  {
4817  	const struct btf_var_secinfo *vsi;
4818  	const struct btf_type *var;
4819  	u32 i;
4820  
4821  	if (!btf_show_start_type(show, t, type_id, data))
4822  		return;
4823  
4824  	btf_show_type_value(show, "section (\"%s\") = {",
4825  			    __btf_name_by_offset(btf, t->name_off));
4826  	for_each_vsi(i, t, vsi) {
4827  		var = btf_type_by_id(btf, vsi->type);
4828  		if (i)
4829  			btf_show(show, ",");
4830  		btf_type_ops(var)->show(btf, var, vsi->type,
4831  					data + vsi->offset, bits_offset, show);
4832  	}
4833  	btf_show_end_type(show);
4834  }
4835  
4836  static const struct btf_kind_operations datasec_ops = {
4837  	.check_meta		= btf_datasec_check_meta,
4838  	.resolve		= btf_datasec_resolve,
4839  	.check_member		= btf_df_check_member,
4840  	.check_kflag_member	= btf_df_check_kflag_member,
4841  	.log_details		= btf_datasec_log,
4842  	.show			= btf_datasec_show,
4843  };
4844  
btf_float_check_meta(struct btf_verifier_env * env,const struct btf_type * t,u32 meta_left)4845  static s32 btf_float_check_meta(struct btf_verifier_env *env,
4846  				const struct btf_type *t,
4847  				u32 meta_left)
4848  {
4849  	if (btf_type_vlen(t)) {
4850  		btf_verifier_log_type(env, t, "vlen != 0");
4851  		return -EINVAL;
4852  	}
4853  
4854  	if (btf_type_kflag(t)) {
4855  		btf_verifier_log_type(env, t, "Invalid btf_info kind_flag");
4856  		return -EINVAL;
4857  	}
4858  
4859  	if (t->size != 2 && t->size != 4 && t->size != 8 && t->size != 12 &&
4860  	    t->size != 16) {
4861  		btf_verifier_log_type(env, t, "Invalid type_size");
4862  		return -EINVAL;
4863  	}
4864  
4865  	btf_verifier_log_type(env, t, NULL);
4866  
4867  	return 0;
4868  }
4869  
btf_float_check_member(struct btf_verifier_env * env,const struct btf_type * struct_type,const struct btf_member * member,const struct btf_type * member_type)4870  static int btf_float_check_member(struct btf_verifier_env *env,
4871  				  const struct btf_type *struct_type,
4872  				  const struct btf_member *member,
4873  				  const struct btf_type *member_type)
4874  {
4875  	u64 start_offset_bytes;
4876  	u64 end_offset_bytes;
4877  	u64 misalign_bits;
4878  	u64 align_bytes;
4879  	u64 align_bits;
4880  
4881  	/* Different architectures have different alignment requirements, so
4882  	 * here we check only for the reasonable minimum. This way we ensure
4883  	 * that types after CO-RE can pass the kernel BTF verifier.
4884  	 */
4885  	align_bytes = min_t(u64, sizeof(void *), member_type->size);
4886  	align_bits = align_bytes * BITS_PER_BYTE;
4887  	div64_u64_rem(member->offset, align_bits, &misalign_bits);
4888  	if (misalign_bits) {
4889  		btf_verifier_log_member(env, struct_type, member,
4890  					"Member is not properly aligned");
4891  		return -EINVAL;
4892  	}
4893  
4894  	start_offset_bytes = member->offset / BITS_PER_BYTE;
4895  	end_offset_bytes = start_offset_bytes + member_type->size;
4896  	if (end_offset_bytes > struct_type->size) {
4897  		btf_verifier_log_member(env, struct_type, member,
4898  					"Member exceeds struct_size");
4899  		return -EINVAL;
4900  	}
4901  
4902  	return 0;
4903  }
4904  
btf_float_log(struct btf_verifier_env * env,const struct btf_type * t)4905  static void btf_float_log(struct btf_verifier_env *env,
4906  			  const struct btf_type *t)
4907  {
4908  	btf_verifier_log(env, "size=%u", t->size);
4909  }
4910  
4911  static const struct btf_kind_operations float_ops = {
4912  	.check_meta = btf_float_check_meta,
4913  	.resolve = btf_df_resolve,
4914  	.check_member = btf_float_check_member,
4915  	.check_kflag_member = btf_generic_check_kflag_member,
4916  	.log_details = btf_float_log,
4917  	.show = btf_df_show,
4918  };
4919  
btf_decl_tag_check_meta(struct btf_verifier_env * env,const struct btf_type * t,u32 meta_left)4920  static s32 btf_decl_tag_check_meta(struct btf_verifier_env *env,
4921  			      const struct btf_type *t,
4922  			      u32 meta_left)
4923  {
4924  	const struct btf_decl_tag *tag;
4925  	u32 meta_needed = sizeof(*tag);
4926  	s32 component_idx;
4927  	const char *value;
4928  
4929  	if (meta_left < meta_needed) {
4930  		btf_verifier_log_basic(env, t,
4931  				       "meta_left:%u meta_needed:%u",
4932  				       meta_left, meta_needed);
4933  		return -EINVAL;
4934  	}
4935  
4936  	value = btf_name_by_offset(env->btf, t->name_off);
4937  	if (!value || !value[0]) {
4938  		btf_verifier_log_type(env, t, "Invalid value");
4939  		return -EINVAL;
4940  	}
4941  
4942  	if (btf_type_vlen(t)) {
4943  		btf_verifier_log_type(env, t, "vlen != 0");
4944  		return -EINVAL;
4945  	}
4946  
4947  	if (btf_type_kflag(t)) {
4948  		btf_verifier_log_type(env, t, "Invalid btf_info kind_flag");
4949  		return -EINVAL;
4950  	}
4951  
4952  	component_idx = btf_type_decl_tag(t)->component_idx;
4953  	if (component_idx < -1) {
4954  		btf_verifier_log_type(env, t, "Invalid component_idx");
4955  		return -EINVAL;
4956  	}
4957  
4958  	btf_verifier_log_type(env, t, NULL);
4959  
4960  	return meta_needed;
4961  }
4962  
btf_decl_tag_resolve(struct btf_verifier_env * env,const struct resolve_vertex * v)4963  static int btf_decl_tag_resolve(struct btf_verifier_env *env,
4964  			   const struct resolve_vertex *v)
4965  {
4966  	const struct btf_type *next_type;
4967  	const struct btf_type *t = v->t;
4968  	u32 next_type_id = t->type;
4969  	struct btf *btf = env->btf;
4970  	s32 component_idx;
4971  	u32 vlen;
4972  
4973  	next_type = btf_type_by_id(btf, next_type_id);
4974  	if (!next_type || !btf_type_is_decl_tag_target(next_type)) {
4975  		btf_verifier_log_type(env, v->t, "Invalid type_id");
4976  		return -EINVAL;
4977  	}
4978  
4979  	if (!env_type_is_resolve_sink(env, next_type) &&
4980  	    !env_type_is_resolved(env, next_type_id))
4981  		return env_stack_push(env, next_type, next_type_id);
4982  
4983  	component_idx = btf_type_decl_tag(t)->component_idx;
4984  	if (component_idx != -1) {
4985  		if (btf_type_is_var(next_type) || btf_type_is_typedef(next_type)) {
4986  			btf_verifier_log_type(env, v->t, "Invalid component_idx");
4987  			return -EINVAL;
4988  		}
4989  
4990  		if (btf_type_is_struct(next_type)) {
4991  			vlen = btf_type_vlen(next_type);
4992  		} else {
4993  			/* next_type should be a function */
4994  			next_type = btf_type_by_id(btf, next_type->type);
4995  			vlen = btf_type_vlen(next_type);
4996  		}
4997  
4998  		if ((u32)component_idx >= vlen) {
4999  			btf_verifier_log_type(env, v->t, "Invalid component_idx");
5000  			return -EINVAL;
5001  		}
5002  	}
5003  
5004  	env_stack_pop_resolved(env, next_type_id, 0);
5005  
5006  	return 0;
5007  }
5008  
btf_decl_tag_log(struct btf_verifier_env * env,const struct btf_type * t)5009  static void btf_decl_tag_log(struct btf_verifier_env *env, const struct btf_type *t)
5010  {
5011  	btf_verifier_log(env, "type=%u component_idx=%d", t->type,
5012  			 btf_type_decl_tag(t)->component_idx);
5013  }
5014  
5015  static const struct btf_kind_operations decl_tag_ops = {
5016  	.check_meta = btf_decl_tag_check_meta,
5017  	.resolve = btf_decl_tag_resolve,
5018  	.check_member = btf_df_check_member,
5019  	.check_kflag_member = btf_df_check_kflag_member,
5020  	.log_details = btf_decl_tag_log,
5021  	.show = btf_df_show,
5022  };
5023  
btf_func_proto_check(struct btf_verifier_env * env,const struct btf_type * t)5024  static int btf_func_proto_check(struct btf_verifier_env *env,
5025  				const struct btf_type *t)
5026  {
5027  	const struct btf_type *ret_type;
5028  	const struct btf_param *args;
5029  	const struct btf *btf;
5030  	u16 nr_args, i;
5031  	int err;
5032  
5033  	btf = env->btf;
5034  	args = (const struct btf_param *)(t + 1);
5035  	nr_args = btf_type_vlen(t);
5036  
5037  	/* Check func return type which could be "void" (t->type == 0) */
5038  	if (t->type) {
5039  		u32 ret_type_id = t->type;
5040  
5041  		ret_type = btf_type_by_id(btf, ret_type_id);
5042  		if (!ret_type) {
5043  			btf_verifier_log_type(env, t, "Invalid return type");
5044  			return -EINVAL;
5045  		}
5046  
5047  		if (btf_type_is_resolve_source_only(ret_type)) {
5048  			btf_verifier_log_type(env, t, "Invalid return type");
5049  			return -EINVAL;
5050  		}
5051  
5052  		if (btf_type_needs_resolve(ret_type) &&
5053  		    !env_type_is_resolved(env, ret_type_id)) {
5054  			err = btf_resolve(env, ret_type, ret_type_id);
5055  			if (err)
5056  				return err;
5057  		}
5058  
5059  		/* Ensure the return type is a type that has a size */
5060  		if (!btf_type_id_size(btf, &ret_type_id, NULL)) {
5061  			btf_verifier_log_type(env, t, "Invalid return type");
5062  			return -EINVAL;
5063  		}
5064  	}
5065  
5066  	if (!nr_args)
5067  		return 0;
5068  
5069  	/* Last func arg type_id could be 0 if it is a vararg */
5070  	if (!args[nr_args - 1].type) {
5071  		if (args[nr_args - 1].name_off) {
5072  			btf_verifier_log_type(env, t, "Invalid arg#%u",
5073  					      nr_args);
5074  			return -EINVAL;
5075  		}
5076  		nr_args--;
5077  	}
5078  
5079  	for (i = 0; i < nr_args; i++) {
5080  		const struct btf_type *arg_type;
5081  		u32 arg_type_id;
5082  
5083  		arg_type_id = args[i].type;
5084  		arg_type = btf_type_by_id(btf, arg_type_id);
5085  		if (!arg_type) {
5086  			btf_verifier_log_type(env, t, "Invalid arg#%u", i + 1);
5087  			return -EINVAL;
5088  		}
5089  
5090  		if (btf_type_is_resolve_source_only(arg_type)) {
5091  			btf_verifier_log_type(env, t, "Invalid arg#%u", i + 1);
5092  			return -EINVAL;
5093  		}
5094  
5095  		if (args[i].name_off &&
5096  		    (!btf_name_offset_valid(btf, args[i].name_off) ||
5097  		     !btf_name_valid_identifier(btf, args[i].name_off))) {
5098  			btf_verifier_log_type(env, t,
5099  					      "Invalid arg#%u", i + 1);
5100  			return -EINVAL;
5101  		}
5102  
5103  		if (btf_type_needs_resolve(arg_type) &&
5104  		    !env_type_is_resolved(env, arg_type_id)) {
5105  			err = btf_resolve(env, arg_type, arg_type_id);
5106  			if (err)
5107  				return err;
5108  		}
5109  
5110  		if (!btf_type_id_size(btf, &arg_type_id, NULL)) {
5111  			btf_verifier_log_type(env, t, "Invalid arg#%u", i + 1);
5112  			return -EINVAL;
5113  		}
5114  	}
5115  
5116  	return 0;
5117  }
5118  
btf_func_check(struct btf_verifier_env * env,const struct btf_type * t)5119  static int btf_func_check(struct btf_verifier_env *env,
5120  			  const struct btf_type *t)
5121  {
5122  	const struct btf_type *proto_type;
5123  	const struct btf_param *args;
5124  	const struct btf *btf;
5125  	u16 nr_args, i;
5126  
5127  	btf = env->btf;
5128  	proto_type = btf_type_by_id(btf, t->type);
5129  
5130  	if (!proto_type || !btf_type_is_func_proto(proto_type)) {
5131  		btf_verifier_log_type(env, t, "Invalid type_id");
5132  		return -EINVAL;
5133  	}
5134  
5135  	args = (const struct btf_param *)(proto_type + 1);
5136  	nr_args = btf_type_vlen(proto_type);
5137  	for (i = 0; i < nr_args; i++) {
5138  		if (!args[i].name_off && args[i].type) {
5139  			btf_verifier_log_type(env, t, "Invalid arg#%u", i + 1);
5140  			return -EINVAL;
5141  		}
5142  	}
5143  
5144  	return 0;
5145  }
5146  
5147  static const struct btf_kind_operations * const kind_ops[NR_BTF_KINDS] = {
5148  	[BTF_KIND_INT] = &int_ops,
5149  	[BTF_KIND_PTR] = &ptr_ops,
5150  	[BTF_KIND_ARRAY] = &array_ops,
5151  	[BTF_KIND_STRUCT] = &struct_ops,
5152  	[BTF_KIND_UNION] = &struct_ops,
5153  	[BTF_KIND_ENUM] = &enum_ops,
5154  	[BTF_KIND_FWD] = &fwd_ops,
5155  	[BTF_KIND_TYPEDEF] = &modifier_ops,
5156  	[BTF_KIND_VOLATILE] = &modifier_ops,
5157  	[BTF_KIND_CONST] = &modifier_ops,
5158  	[BTF_KIND_RESTRICT] = &modifier_ops,
5159  	[BTF_KIND_FUNC] = &func_ops,
5160  	[BTF_KIND_FUNC_PROTO] = &func_proto_ops,
5161  	[BTF_KIND_VAR] = &var_ops,
5162  	[BTF_KIND_DATASEC] = &datasec_ops,
5163  	[BTF_KIND_FLOAT] = &float_ops,
5164  	[BTF_KIND_DECL_TAG] = &decl_tag_ops,
5165  	[BTF_KIND_TYPE_TAG] = &modifier_ops,
5166  	[BTF_KIND_ENUM64] = &enum64_ops,
5167  };
5168  
btf_check_meta(struct btf_verifier_env * env,const struct btf_type * t,u32 meta_left)5169  static s32 btf_check_meta(struct btf_verifier_env *env,
5170  			  const struct btf_type *t,
5171  			  u32 meta_left)
5172  {
5173  	u32 saved_meta_left = meta_left;
5174  	s32 var_meta_size;
5175  
5176  	if (meta_left < sizeof(*t)) {
5177  		btf_verifier_log(env, "[%u] meta_left:%u meta_needed:%zu",
5178  				 env->log_type_id, meta_left, sizeof(*t));
5179  		return -EINVAL;
5180  	}
5181  	meta_left -= sizeof(*t);
5182  
5183  	if (t->info & ~BTF_INFO_MASK) {
5184  		btf_verifier_log(env, "[%u] Invalid btf_info:%x",
5185  				 env->log_type_id, t->info);
5186  		return -EINVAL;
5187  	}
5188  
5189  	if (BTF_INFO_KIND(t->info) > BTF_KIND_MAX ||
5190  	    BTF_INFO_KIND(t->info) == BTF_KIND_UNKN) {
5191  		btf_verifier_log(env, "[%u] Invalid kind:%u",
5192  				 env->log_type_id, BTF_INFO_KIND(t->info));
5193  		return -EINVAL;
5194  	}
5195  
5196  	if (!btf_name_offset_valid(env->btf, t->name_off)) {
5197  		btf_verifier_log(env, "[%u] Invalid name_offset:%u",
5198  				 env->log_type_id, t->name_off);
5199  		return -EINVAL;
5200  	}
5201  
5202  	var_meta_size = btf_type_ops(t)->check_meta(env, t, meta_left);
5203  	if (var_meta_size < 0)
5204  		return var_meta_size;
5205  
5206  	meta_left -= var_meta_size;
5207  
5208  	return saved_meta_left - meta_left;
5209  }
5210  
btf_check_all_metas(struct btf_verifier_env * env)5211  static int btf_check_all_metas(struct btf_verifier_env *env)
5212  {
5213  	struct btf *btf = env->btf;
5214  	struct btf_header *hdr;
5215  	void *cur, *end;
5216  
5217  	hdr = &btf->hdr;
5218  	cur = btf->nohdr_data + hdr->type_off;
5219  	end = cur + hdr->type_len;
5220  
5221  	env->log_type_id = btf->base_btf ? btf->start_id : 1;
5222  	while (cur < end) {
5223  		struct btf_type *t = cur;
5224  		s32 meta_size;
5225  
5226  		meta_size = btf_check_meta(env, t, end - cur);
5227  		if (meta_size < 0)
5228  			return meta_size;
5229  
5230  		btf_add_type(env, t);
5231  		cur += meta_size;
5232  		env->log_type_id++;
5233  	}
5234  
5235  	return 0;
5236  }
5237  
btf_resolve_valid(struct btf_verifier_env * env,const struct btf_type * t,u32 type_id)5238  static bool btf_resolve_valid(struct btf_verifier_env *env,
5239  			      const struct btf_type *t,
5240  			      u32 type_id)
5241  {
5242  	struct btf *btf = env->btf;
5243  
5244  	if (!env_type_is_resolved(env, type_id))
5245  		return false;
5246  
5247  	if (btf_type_is_struct(t) || btf_type_is_datasec(t))
5248  		return !btf_resolved_type_id(btf, type_id) &&
5249  		       !btf_resolved_type_size(btf, type_id);
5250  
5251  	if (btf_type_is_decl_tag(t) || btf_type_is_func(t))
5252  		return btf_resolved_type_id(btf, type_id) &&
5253  		       !btf_resolved_type_size(btf, type_id);
5254  
5255  	if (btf_type_is_modifier(t) || btf_type_is_ptr(t) ||
5256  	    btf_type_is_var(t)) {
5257  		t = btf_type_id_resolve(btf, &type_id);
5258  		return t &&
5259  		       !btf_type_is_modifier(t) &&
5260  		       !btf_type_is_var(t) &&
5261  		       !btf_type_is_datasec(t);
5262  	}
5263  
5264  	if (btf_type_is_array(t)) {
5265  		const struct btf_array *array = btf_type_array(t);
5266  		const struct btf_type *elem_type;
5267  		u32 elem_type_id = array->type;
5268  		u32 elem_size;
5269  
5270  		elem_type = btf_type_id_size(btf, &elem_type_id, &elem_size);
5271  		return elem_type && !btf_type_is_modifier(elem_type) &&
5272  			(array->nelems * elem_size ==
5273  			 btf_resolved_type_size(btf, type_id));
5274  	}
5275  
5276  	return false;
5277  }
5278  
btf_resolve(struct btf_verifier_env * env,const struct btf_type * t,u32 type_id)5279  static int btf_resolve(struct btf_verifier_env *env,
5280  		       const struct btf_type *t, u32 type_id)
5281  {
5282  	u32 save_log_type_id = env->log_type_id;
5283  	const struct resolve_vertex *v;
5284  	int err = 0;
5285  
5286  	env->resolve_mode = RESOLVE_TBD;
5287  	env_stack_push(env, t, type_id);
5288  	while (!err && (v = env_stack_peak(env))) {
5289  		env->log_type_id = v->type_id;
5290  		err = btf_type_ops(v->t)->resolve(env, v);
5291  	}
5292  
5293  	env->log_type_id = type_id;
5294  	if (err == -E2BIG) {
5295  		btf_verifier_log_type(env, t,
5296  				      "Exceeded max resolving depth:%u",
5297  				      MAX_RESOLVE_DEPTH);
5298  	} else if (err == -EEXIST) {
5299  		btf_verifier_log_type(env, t, "Loop detected");
5300  	}
5301  
5302  	/* Final sanity check */
5303  	if (!err && !btf_resolve_valid(env, t, type_id)) {
5304  		btf_verifier_log_type(env, t, "Invalid resolve state");
5305  		err = -EINVAL;
5306  	}
5307  
5308  	env->log_type_id = save_log_type_id;
5309  	return err;
5310  }
5311  
btf_check_all_types(struct btf_verifier_env * env)5312  static int btf_check_all_types(struct btf_verifier_env *env)
5313  {
5314  	struct btf *btf = env->btf;
5315  	const struct btf_type *t;
5316  	u32 type_id, i;
5317  	int err;
5318  
5319  	err = env_resolve_init(env);
5320  	if (err)
5321  		return err;
5322  
5323  	env->phase++;
5324  	for (i = btf->base_btf ? 0 : 1; i < btf->nr_types; i++) {
5325  		type_id = btf->start_id + i;
5326  		t = btf_type_by_id(btf, type_id);
5327  
5328  		env->log_type_id = type_id;
5329  		if (btf_type_needs_resolve(t) &&
5330  		    !env_type_is_resolved(env, type_id)) {
5331  			err = btf_resolve(env, t, type_id);
5332  			if (err)
5333  				return err;
5334  		}
5335  
5336  		if (btf_type_is_func_proto(t)) {
5337  			err = btf_func_proto_check(env, t);
5338  			if (err)
5339  				return err;
5340  		}
5341  	}
5342  
5343  	return 0;
5344  }
5345  
btf_parse_type_sec(struct btf_verifier_env * env)5346  static int btf_parse_type_sec(struct btf_verifier_env *env)
5347  {
5348  	const struct btf_header *hdr = &env->btf->hdr;
5349  	int err;
5350  
5351  	/* Type section must align to 4 bytes */
5352  	if (hdr->type_off & (sizeof(u32) - 1)) {
5353  		btf_verifier_log(env, "Unaligned type_off");
5354  		return -EINVAL;
5355  	}
5356  
5357  	if (!env->btf->base_btf && !hdr->type_len) {
5358  		btf_verifier_log(env, "No type found");
5359  		return -EINVAL;
5360  	}
5361  
5362  	err = btf_check_all_metas(env);
5363  	if (err)
5364  		return err;
5365  
5366  	return btf_check_all_types(env);
5367  }
5368  
btf_parse_str_sec(struct btf_verifier_env * env)5369  static int btf_parse_str_sec(struct btf_verifier_env *env)
5370  {
5371  	const struct btf_header *hdr;
5372  	struct btf *btf = env->btf;
5373  	const char *start, *end;
5374  
5375  	hdr = &btf->hdr;
5376  	start = btf->nohdr_data + hdr->str_off;
5377  	end = start + hdr->str_len;
5378  
5379  	if (end != btf->data + btf->data_size) {
5380  		btf_verifier_log(env, "String section is not at the end");
5381  		return -EINVAL;
5382  	}
5383  
5384  	btf->strings = start;
5385  
5386  	if (btf->base_btf && !hdr->str_len)
5387  		return 0;
5388  	if (!hdr->str_len || hdr->str_len - 1 > BTF_MAX_NAME_OFFSET || end[-1]) {
5389  		btf_verifier_log(env, "Invalid string section");
5390  		return -EINVAL;
5391  	}
5392  	if (!btf->base_btf && start[0]) {
5393  		btf_verifier_log(env, "Invalid string section");
5394  		return -EINVAL;
5395  	}
5396  
5397  	return 0;
5398  }
5399  
5400  static const size_t btf_sec_info_offset[] = {
5401  	offsetof(struct btf_header, type_off),
5402  	offsetof(struct btf_header, str_off),
5403  };
5404  
btf_sec_info_cmp(const void * a,const void * b)5405  static int btf_sec_info_cmp(const void *a, const void *b)
5406  {
5407  	const struct btf_sec_info *x = a;
5408  	const struct btf_sec_info *y = b;
5409  
5410  	return (int)(x->off - y->off) ? : (int)(x->len - y->len);
5411  }
5412  
btf_check_sec_info(struct btf_verifier_env * env,u32 btf_data_size)5413  static int btf_check_sec_info(struct btf_verifier_env *env,
5414  			      u32 btf_data_size)
5415  {
5416  	struct btf_sec_info secs[ARRAY_SIZE(btf_sec_info_offset)];
5417  	u32 total, expected_total, i;
5418  	const struct btf_header *hdr;
5419  	const struct btf *btf;
5420  
5421  	btf = env->btf;
5422  	hdr = &btf->hdr;
5423  
5424  	/* Populate the secs from hdr */
5425  	for (i = 0; i < ARRAY_SIZE(btf_sec_info_offset); i++)
5426  		secs[i] = *(struct btf_sec_info *)((void *)hdr +
5427  						   btf_sec_info_offset[i]);
5428  
5429  	sort(secs, ARRAY_SIZE(btf_sec_info_offset),
5430  	     sizeof(struct btf_sec_info), btf_sec_info_cmp, NULL);
5431  
5432  	/* Check for gaps and overlap among sections */
5433  	total = 0;
5434  	expected_total = btf_data_size - hdr->hdr_len;
5435  	for (i = 0; i < ARRAY_SIZE(btf_sec_info_offset); i++) {
5436  		if (expected_total < secs[i].off) {
5437  			btf_verifier_log(env, "Invalid section offset");
5438  			return -EINVAL;
5439  		}
5440  		if (total < secs[i].off) {
5441  			/* gap */
5442  			btf_verifier_log(env, "Unsupported section found");
5443  			return -EINVAL;
5444  		}
5445  		if (total > secs[i].off) {
5446  			btf_verifier_log(env, "Section overlap found");
5447  			return -EINVAL;
5448  		}
5449  		if (expected_total - total < secs[i].len) {
5450  			btf_verifier_log(env,
5451  					 "Total section length too long");
5452  			return -EINVAL;
5453  		}
5454  		total += secs[i].len;
5455  	}
5456  
5457  	/* There is data other than hdr and known sections */
5458  	if (expected_total != total) {
5459  		btf_verifier_log(env, "Unsupported section found");
5460  		return -EINVAL;
5461  	}
5462  
5463  	return 0;
5464  }
5465  
btf_parse_hdr(struct btf_verifier_env * env)5466  static int btf_parse_hdr(struct btf_verifier_env *env)
5467  {
5468  	u32 hdr_len, hdr_copy, btf_data_size;
5469  	const struct btf_header *hdr;
5470  	struct btf *btf;
5471  
5472  	btf = env->btf;
5473  	btf_data_size = btf->data_size;
5474  
5475  	if (btf_data_size < offsetofend(struct btf_header, hdr_len)) {
5476  		btf_verifier_log(env, "hdr_len not found");
5477  		return -EINVAL;
5478  	}
5479  
5480  	hdr = btf->data;
5481  	hdr_len = hdr->hdr_len;
5482  	if (btf_data_size < hdr_len) {
5483  		btf_verifier_log(env, "btf_header not found");
5484  		return -EINVAL;
5485  	}
5486  
5487  	/* Ensure the unsupported header fields are zero */
5488  	if (hdr_len > sizeof(btf->hdr)) {
5489  		u8 *expected_zero = btf->data + sizeof(btf->hdr);
5490  		u8 *end = btf->data + hdr_len;
5491  
5492  		for (; expected_zero < end; expected_zero++) {
5493  			if (*expected_zero) {
5494  				btf_verifier_log(env, "Unsupported btf_header");
5495  				return -E2BIG;
5496  			}
5497  		}
5498  	}
5499  
5500  	hdr_copy = min_t(u32, hdr_len, sizeof(btf->hdr));
5501  	memcpy(&btf->hdr, btf->data, hdr_copy);
5502  
5503  	hdr = &btf->hdr;
5504  
5505  	btf_verifier_log_hdr(env, btf_data_size);
5506  
5507  	if (hdr->magic != BTF_MAGIC) {
5508  		btf_verifier_log(env, "Invalid magic");
5509  		return -EINVAL;
5510  	}
5511  
5512  	if (hdr->version != BTF_VERSION) {
5513  		btf_verifier_log(env, "Unsupported version");
5514  		return -ENOTSUPP;
5515  	}
5516  
5517  	if (hdr->flags) {
5518  		btf_verifier_log(env, "Unsupported flags");
5519  		return -ENOTSUPP;
5520  	}
5521  
5522  	if (!btf->base_btf && btf_data_size == hdr->hdr_len) {
5523  		btf_verifier_log(env, "No data");
5524  		return -EINVAL;
5525  	}
5526  
5527  	return btf_check_sec_info(env, btf_data_size);
5528  }
5529  
5530  static const char *alloc_obj_fields[] = {
5531  	"bpf_spin_lock",
5532  	"bpf_list_head",
5533  	"bpf_list_node",
5534  	"bpf_rb_root",
5535  	"bpf_rb_node",
5536  	"bpf_refcount",
5537  };
5538  
5539  static struct btf_struct_metas *
btf_parse_struct_metas(struct bpf_verifier_log * log,struct btf * btf)5540  btf_parse_struct_metas(struct bpf_verifier_log *log, struct btf *btf)
5541  {
5542  	struct btf_struct_metas *tab = NULL;
5543  	struct btf_id_set *aof;
5544  	int i, n, id, ret;
5545  
5546  	BUILD_BUG_ON(offsetof(struct btf_id_set, cnt) != 0);
5547  	BUILD_BUG_ON(sizeof(struct btf_id_set) != sizeof(u32));
5548  
5549  	aof = kmalloc(sizeof(*aof), GFP_KERNEL | __GFP_NOWARN);
5550  	if (!aof)
5551  		return ERR_PTR(-ENOMEM);
5552  	aof->cnt = 0;
5553  
5554  	for (i = 0; i < ARRAY_SIZE(alloc_obj_fields); i++) {
5555  		/* Try to find whether this special type exists in user BTF, and
5556  		 * if so remember its ID so we can easily find it among members
5557  		 * of structs that we iterate in the next loop.
5558  		 */
5559  		struct btf_id_set *new_aof;
5560  
5561  		id = btf_find_by_name_kind(btf, alloc_obj_fields[i], BTF_KIND_STRUCT);
5562  		if (id < 0)
5563  			continue;
5564  
5565  		new_aof = krealloc(aof, offsetof(struct btf_id_set, ids[aof->cnt + 1]),
5566  				   GFP_KERNEL | __GFP_NOWARN);
5567  		if (!new_aof) {
5568  			ret = -ENOMEM;
5569  			goto free_aof;
5570  		}
5571  		aof = new_aof;
5572  		aof->ids[aof->cnt++] = id;
5573  	}
5574  
5575  	n = btf_nr_types(btf);
5576  	for (i = 1; i < n; i++) {
5577  		/* Try to find if there are kptrs in user BTF and remember their ID */
5578  		struct btf_id_set *new_aof;
5579  		struct btf_field_info tmp;
5580  		const struct btf_type *t;
5581  
5582  		t = btf_type_by_id(btf, i);
5583  		if (!t) {
5584  			ret = -EINVAL;
5585  			goto free_aof;
5586  		}
5587  
5588  		ret = btf_find_kptr(btf, t, 0, 0, &tmp, BPF_KPTR);
5589  		if (ret != BTF_FIELD_FOUND)
5590  			continue;
5591  
5592  		new_aof = krealloc(aof, offsetof(struct btf_id_set, ids[aof->cnt + 1]),
5593  				   GFP_KERNEL | __GFP_NOWARN);
5594  		if (!new_aof) {
5595  			ret = -ENOMEM;
5596  			goto free_aof;
5597  		}
5598  		aof = new_aof;
5599  		aof->ids[aof->cnt++] = i;
5600  	}
5601  
5602  	if (!aof->cnt) {
5603  		kfree(aof);
5604  		return NULL;
5605  	}
5606  	sort(&aof->ids, aof->cnt, sizeof(aof->ids[0]), btf_id_cmp_func, NULL);
5607  
5608  	for (i = 1; i < n; i++) {
5609  		struct btf_struct_metas *new_tab;
5610  		const struct btf_member *member;
5611  		struct btf_struct_meta *type;
5612  		struct btf_record *record;
5613  		const struct btf_type *t;
5614  		int j, tab_cnt;
5615  
5616  		t = btf_type_by_id(btf, i);
5617  		if (!__btf_type_is_struct(t))
5618  			continue;
5619  
5620  		cond_resched();
5621  
5622  		for_each_member(j, t, member) {
5623  			if (btf_id_set_contains(aof, member->type))
5624  				goto parse;
5625  		}
5626  		continue;
5627  	parse:
5628  		tab_cnt = tab ? tab->cnt : 0;
5629  		new_tab = krealloc(tab, offsetof(struct btf_struct_metas, types[tab_cnt + 1]),
5630  				   GFP_KERNEL | __GFP_NOWARN);
5631  		if (!new_tab) {
5632  			ret = -ENOMEM;
5633  			goto free;
5634  		}
5635  		if (!tab)
5636  			new_tab->cnt = 0;
5637  		tab = new_tab;
5638  
5639  		type = &tab->types[tab->cnt];
5640  		type->btf_id = i;
5641  		record = btf_parse_fields(btf, t, BPF_SPIN_LOCK | BPF_LIST_HEAD | BPF_LIST_NODE |
5642  						  BPF_RB_ROOT | BPF_RB_NODE | BPF_REFCOUNT |
5643  						  BPF_KPTR, t->size);
5644  		/* The record cannot be unset, treat it as an error if so */
5645  		if (IS_ERR_OR_NULL(record)) {
5646  			ret = PTR_ERR_OR_ZERO(record) ?: -EFAULT;
5647  			goto free;
5648  		}
5649  		type->record = record;
5650  		tab->cnt++;
5651  	}
5652  	kfree(aof);
5653  	return tab;
5654  free:
5655  	btf_struct_metas_free(tab);
5656  free_aof:
5657  	kfree(aof);
5658  	return ERR_PTR(ret);
5659  }
5660  
btf_find_struct_meta(const struct btf * btf,u32 btf_id)5661  struct btf_struct_meta *btf_find_struct_meta(const struct btf *btf, u32 btf_id)
5662  {
5663  	struct btf_struct_metas *tab;
5664  
5665  	BUILD_BUG_ON(offsetof(struct btf_struct_meta, btf_id) != 0);
5666  	tab = btf->struct_meta_tab;
5667  	if (!tab)
5668  		return NULL;
5669  	return bsearch(&btf_id, tab->types, tab->cnt, sizeof(tab->types[0]), btf_id_cmp_func);
5670  }
5671  
btf_check_type_tags(struct btf_verifier_env * env,struct btf * btf,int start_id)5672  static int btf_check_type_tags(struct btf_verifier_env *env,
5673  			       struct btf *btf, int start_id)
5674  {
5675  	int i, n, good_id = start_id - 1;
5676  	bool in_tags;
5677  
5678  	n = btf_nr_types(btf);
5679  	for (i = start_id; i < n; i++) {
5680  		const struct btf_type *t;
5681  		int chain_limit = 32;
5682  		u32 cur_id = i;
5683  
5684  		t = btf_type_by_id(btf, i);
5685  		if (!t)
5686  			return -EINVAL;
5687  		if (!btf_type_is_modifier(t))
5688  			continue;
5689  
5690  		cond_resched();
5691  
5692  		in_tags = btf_type_is_type_tag(t);
5693  		while (btf_type_is_modifier(t)) {
5694  			if (!chain_limit--) {
5695  				btf_verifier_log(env, "Max chain length or cycle detected");
5696  				return -ELOOP;
5697  			}
5698  			if (btf_type_is_type_tag(t)) {
5699  				if (!in_tags) {
5700  					btf_verifier_log(env, "Type tags don't precede modifiers");
5701  					return -EINVAL;
5702  				}
5703  			} else if (in_tags) {
5704  				in_tags = false;
5705  			}
5706  			if (cur_id <= good_id)
5707  				break;
5708  			/* Move to next type */
5709  			cur_id = t->type;
5710  			t = btf_type_by_id(btf, cur_id);
5711  			if (!t)
5712  				return -EINVAL;
5713  		}
5714  		good_id = i;
5715  	}
5716  	return 0;
5717  }
5718  
finalize_log(struct bpf_verifier_log * log,bpfptr_t uattr,u32 uattr_size)5719  static int finalize_log(struct bpf_verifier_log *log, bpfptr_t uattr, u32 uattr_size)
5720  {
5721  	u32 log_true_size;
5722  	int err;
5723  
5724  	err = bpf_vlog_finalize(log, &log_true_size);
5725  
5726  	if (uattr_size >= offsetofend(union bpf_attr, btf_log_true_size) &&
5727  	    copy_to_bpfptr_offset(uattr, offsetof(union bpf_attr, btf_log_true_size),
5728  				  &log_true_size, sizeof(log_true_size)))
5729  		err = -EFAULT;
5730  
5731  	return err;
5732  }
5733  
btf_parse(const union bpf_attr * attr,bpfptr_t uattr,u32 uattr_size)5734  static struct btf *btf_parse(const union bpf_attr *attr, bpfptr_t uattr, u32 uattr_size)
5735  {
5736  	bpfptr_t btf_data = make_bpfptr(attr->btf, uattr.is_kernel);
5737  	char __user *log_ubuf = u64_to_user_ptr(attr->btf_log_buf);
5738  	struct btf_struct_metas *struct_meta_tab;
5739  	struct btf_verifier_env *env = NULL;
5740  	struct btf *btf = NULL;
5741  	u8 *data;
5742  	int err, ret;
5743  
5744  	if (attr->btf_size > BTF_MAX_SIZE)
5745  		return ERR_PTR(-E2BIG);
5746  
5747  	env = kzalloc(sizeof(*env), GFP_KERNEL | __GFP_NOWARN);
5748  	if (!env)
5749  		return ERR_PTR(-ENOMEM);
5750  
5751  	/* user could have requested verbose verifier output
5752  	 * and supplied buffer to store the verification trace
5753  	 */
5754  	err = bpf_vlog_init(&env->log, attr->btf_log_level,
5755  			    log_ubuf, attr->btf_log_size);
5756  	if (err)
5757  		goto errout_free;
5758  
5759  	btf = kzalloc(sizeof(*btf), GFP_KERNEL | __GFP_NOWARN);
5760  	if (!btf) {
5761  		err = -ENOMEM;
5762  		goto errout;
5763  	}
5764  	env->btf = btf;
5765  
5766  	data = kvmalloc(attr->btf_size, GFP_KERNEL | __GFP_NOWARN);
5767  	if (!data) {
5768  		err = -ENOMEM;
5769  		goto errout;
5770  	}
5771  
5772  	btf->data = data;
5773  	btf->data_size = attr->btf_size;
5774  
5775  	if (copy_from_bpfptr(data, btf_data, attr->btf_size)) {
5776  		err = -EFAULT;
5777  		goto errout;
5778  	}
5779  
5780  	err = btf_parse_hdr(env);
5781  	if (err)
5782  		goto errout;
5783  
5784  	btf->nohdr_data = btf->data + btf->hdr.hdr_len;
5785  
5786  	err = btf_parse_str_sec(env);
5787  	if (err)
5788  		goto errout;
5789  
5790  	err = btf_parse_type_sec(env);
5791  	if (err)
5792  		goto errout;
5793  
5794  	err = btf_check_type_tags(env, btf, 1);
5795  	if (err)
5796  		goto errout;
5797  
5798  	struct_meta_tab = btf_parse_struct_metas(&env->log, btf);
5799  	if (IS_ERR(struct_meta_tab)) {
5800  		err = PTR_ERR(struct_meta_tab);
5801  		goto errout;
5802  	}
5803  	btf->struct_meta_tab = struct_meta_tab;
5804  
5805  	if (struct_meta_tab) {
5806  		int i;
5807  
5808  		for (i = 0; i < struct_meta_tab->cnt; i++) {
5809  			err = btf_check_and_fixup_fields(btf, struct_meta_tab->types[i].record);
5810  			if (err < 0)
5811  				goto errout_meta;
5812  		}
5813  	}
5814  
5815  	err = finalize_log(&env->log, uattr, uattr_size);
5816  	if (err)
5817  		goto errout_free;
5818  
5819  	btf_verifier_env_free(env);
5820  	refcount_set(&btf->refcnt, 1);
5821  	return btf;
5822  
5823  errout_meta:
5824  	btf_free_struct_meta_tab(btf);
5825  errout:
5826  	/* overwrite err with -ENOSPC or -EFAULT */
5827  	ret = finalize_log(&env->log, uattr, uattr_size);
5828  	if (ret)
5829  		err = ret;
5830  errout_free:
5831  	btf_verifier_env_free(env);
5832  	if (btf)
5833  		btf_free(btf);
5834  	return ERR_PTR(err);
5835  }
5836  
5837  extern char __start_BTF[];
5838  extern char __stop_BTF[];
5839  extern struct btf *btf_vmlinux;
5840  
5841  #define BPF_MAP_TYPE(_id, _ops)
5842  #define BPF_LINK_TYPE(_id, _name)
5843  static union {
5844  	struct bpf_ctx_convert {
5845  #define BPF_PROG_TYPE(_id, _name, prog_ctx_type, kern_ctx_type) \
5846  	prog_ctx_type _id##_prog; \
5847  	kern_ctx_type _id##_kern;
5848  #include <linux/bpf_types.h>
5849  #undef BPF_PROG_TYPE
5850  	} *__t;
5851  	/* 't' is written once under lock. Read many times. */
5852  	const struct btf_type *t;
5853  } bpf_ctx_convert;
5854  enum {
5855  #define BPF_PROG_TYPE(_id, _name, prog_ctx_type, kern_ctx_type) \
5856  	__ctx_convert##_id,
5857  #include <linux/bpf_types.h>
5858  #undef BPF_PROG_TYPE
5859  	__ctx_convert_unused, /* to avoid empty enum in extreme .config */
5860  };
5861  static u8 bpf_ctx_convert_map[] = {
5862  #define BPF_PROG_TYPE(_id, _name, prog_ctx_type, kern_ctx_type) \
5863  	[_id] = __ctx_convert##_id,
5864  #include <linux/bpf_types.h>
5865  #undef BPF_PROG_TYPE
5866  	0, /* avoid empty array */
5867  };
5868  #undef BPF_MAP_TYPE
5869  #undef BPF_LINK_TYPE
5870  
find_canonical_prog_ctx_type(enum bpf_prog_type prog_type)5871  static const struct btf_type *find_canonical_prog_ctx_type(enum bpf_prog_type prog_type)
5872  {
5873  	const struct btf_type *conv_struct;
5874  	const struct btf_member *ctx_type;
5875  
5876  	conv_struct = bpf_ctx_convert.t;
5877  	if (!conv_struct)
5878  		return NULL;
5879  	/* prog_type is valid bpf program type. No need for bounds check. */
5880  	ctx_type = btf_type_member(conv_struct) + bpf_ctx_convert_map[prog_type] * 2;
5881  	/* ctx_type is a pointer to prog_ctx_type in vmlinux.
5882  	 * Like 'struct __sk_buff'
5883  	 */
5884  	return btf_type_by_id(btf_vmlinux, ctx_type->type);
5885  }
5886  
find_kern_ctx_type_id(enum bpf_prog_type prog_type)5887  static int find_kern_ctx_type_id(enum bpf_prog_type prog_type)
5888  {
5889  	const struct btf_type *conv_struct;
5890  	const struct btf_member *ctx_type;
5891  
5892  	conv_struct = bpf_ctx_convert.t;
5893  	if (!conv_struct)
5894  		return -EFAULT;
5895  	/* prog_type is valid bpf program type. No need for bounds check. */
5896  	ctx_type = btf_type_member(conv_struct) + bpf_ctx_convert_map[prog_type] * 2 + 1;
5897  	/* ctx_type is a pointer to prog_ctx_type in vmlinux.
5898  	 * Like 'struct sk_buff'
5899  	 */
5900  	return ctx_type->type;
5901  }
5902  
btf_is_projection_of(const char * pname,const char * tname)5903  bool btf_is_projection_of(const char *pname, const char *tname)
5904  {
5905  	if (strcmp(pname, "__sk_buff") == 0 && strcmp(tname, "sk_buff") == 0)
5906  		return true;
5907  	if (strcmp(pname, "xdp_md") == 0 && strcmp(tname, "xdp_buff") == 0)
5908  		return true;
5909  	return false;
5910  }
5911  
btf_is_prog_ctx_type(struct bpf_verifier_log * log,const struct btf * btf,const struct btf_type * t,enum bpf_prog_type prog_type,int arg)5912  bool btf_is_prog_ctx_type(struct bpf_verifier_log *log, const struct btf *btf,
5913  			  const struct btf_type *t, enum bpf_prog_type prog_type,
5914  			  int arg)
5915  {
5916  	const struct btf_type *ctx_type;
5917  	const char *tname, *ctx_tname;
5918  
5919  	t = btf_type_by_id(btf, t->type);
5920  
5921  	/* KPROBE programs allow bpf_user_pt_regs_t typedef, which we need to
5922  	 * check before we skip all the typedef below.
5923  	 */
5924  	if (prog_type == BPF_PROG_TYPE_KPROBE) {
5925  		while (btf_type_is_modifier(t) && !btf_type_is_typedef(t))
5926  			t = btf_type_by_id(btf, t->type);
5927  
5928  		if (btf_type_is_typedef(t)) {
5929  			tname = btf_name_by_offset(btf, t->name_off);
5930  			if (tname && strcmp(tname, "bpf_user_pt_regs_t") == 0)
5931  				return true;
5932  		}
5933  	}
5934  
5935  	while (btf_type_is_modifier(t))
5936  		t = btf_type_by_id(btf, t->type);
5937  	if (!btf_type_is_struct(t)) {
5938  		/* Only pointer to struct is supported for now.
5939  		 * That means that BPF_PROG_TYPE_TRACEPOINT with BTF
5940  		 * is not supported yet.
5941  		 * BPF_PROG_TYPE_RAW_TRACEPOINT is fine.
5942  		 */
5943  		return false;
5944  	}
5945  	tname = btf_name_by_offset(btf, t->name_off);
5946  	if (!tname) {
5947  		bpf_log(log, "arg#%d struct doesn't have a name\n", arg);
5948  		return false;
5949  	}
5950  
5951  	ctx_type = find_canonical_prog_ctx_type(prog_type);
5952  	if (!ctx_type) {
5953  		bpf_log(log, "btf_vmlinux is malformed\n");
5954  		/* should not happen */
5955  		return false;
5956  	}
5957  again:
5958  	ctx_tname = btf_name_by_offset(btf_vmlinux, ctx_type->name_off);
5959  	if (!ctx_tname) {
5960  		/* should not happen */
5961  		bpf_log(log, "Please fix kernel include/linux/bpf_types.h\n");
5962  		return false;
5963  	}
5964  	/* program types without named context types work only with arg:ctx tag */
5965  	if (ctx_tname[0] == '\0')
5966  		return false;
5967  	/* only compare that prog's ctx type name is the same as
5968  	 * kernel expects. No need to compare field by field.
5969  	 * It's ok for bpf prog to do:
5970  	 * struct __sk_buff {};
5971  	 * int socket_filter_bpf_prog(struct __sk_buff *skb)
5972  	 * { // no fields of skb are ever used }
5973  	 */
5974  	if (btf_is_projection_of(ctx_tname, tname))
5975  		return true;
5976  	if (strcmp(ctx_tname, tname)) {
5977  		/* bpf_user_pt_regs_t is a typedef, so resolve it to
5978  		 * underlying struct and check name again
5979  		 */
5980  		if (!btf_type_is_modifier(ctx_type))
5981  			return false;
5982  		while (btf_type_is_modifier(ctx_type))
5983  			ctx_type = btf_type_by_id(btf_vmlinux, ctx_type->type);
5984  		goto again;
5985  	}
5986  	return true;
5987  }
5988  
5989  /* forward declarations for arch-specific underlying types of
5990   * bpf_user_pt_regs_t; this avoids the need for arch-specific #ifdef
5991   * compilation guards below for BPF_PROG_TYPE_PERF_EVENT checks, but still
5992   * works correctly with __builtin_types_compatible_p() on respective
5993   * architectures
5994   */
5995  struct user_regs_struct;
5996  struct user_pt_regs;
5997  
btf_validate_prog_ctx_type(struct bpf_verifier_log * log,const struct btf * btf,const struct btf_type * t,int arg,enum bpf_prog_type prog_type,enum bpf_attach_type attach_type)5998  static int btf_validate_prog_ctx_type(struct bpf_verifier_log *log, const struct btf *btf,
5999  				      const struct btf_type *t, int arg,
6000  				      enum bpf_prog_type prog_type,
6001  				      enum bpf_attach_type attach_type)
6002  {
6003  	const struct btf_type *ctx_type;
6004  	const char *tname, *ctx_tname;
6005  
6006  	if (!btf_is_ptr(t)) {
6007  		bpf_log(log, "arg#%d type isn't a pointer\n", arg);
6008  		return -EINVAL;
6009  	}
6010  	t = btf_type_by_id(btf, t->type);
6011  
6012  	/* KPROBE and PERF_EVENT programs allow bpf_user_pt_regs_t typedef */
6013  	if (prog_type == BPF_PROG_TYPE_KPROBE || prog_type == BPF_PROG_TYPE_PERF_EVENT) {
6014  		while (btf_type_is_modifier(t) && !btf_type_is_typedef(t))
6015  			t = btf_type_by_id(btf, t->type);
6016  
6017  		if (btf_type_is_typedef(t)) {
6018  			tname = btf_name_by_offset(btf, t->name_off);
6019  			if (tname && strcmp(tname, "bpf_user_pt_regs_t") == 0)
6020  				return 0;
6021  		}
6022  	}
6023  
6024  	/* all other program types don't use typedefs for context type */
6025  	while (btf_type_is_modifier(t))
6026  		t = btf_type_by_id(btf, t->type);
6027  
6028  	/* `void *ctx __arg_ctx` is always valid */
6029  	if (btf_type_is_void(t))
6030  		return 0;
6031  
6032  	tname = btf_name_by_offset(btf, t->name_off);
6033  	if (str_is_empty(tname)) {
6034  		bpf_log(log, "arg#%d type doesn't have a name\n", arg);
6035  		return -EINVAL;
6036  	}
6037  
6038  	/* special cases */
6039  	switch (prog_type) {
6040  	case BPF_PROG_TYPE_KPROBE:
6041  		if (__btf_type_is_struct(t) && strcmp(tname, "pt_regs") == 0)
6042  			return 0;
6043  		break;
6044  	case BPF_PROG_TYPE_PERF_EVENT:
6045  		if (__builtin_types_compatible_p(bpf_user_pt_regs_t, struct pt_regs) &&
6046  		    __btf_type_is_struct(t) && strcmp(tname, "pt_regs") == 0)
6047  			return 0;
6048  		if (__builtin_types_compatible_p(bpf_user_pt_regs_t, struct user_pt_regs) &&
6049  		    __btf_type_is_struct(t) && strcmp(tname, "user_pt_regs") == 0)
6050  			return 0;
6051  		if (__builtin_types_compatible_p(bpf_user_pt_regs_t, struct user_regs_struct) &&
6052  		    __btf_type_is_struct(t) && strcmp(tname, "user_regs_struct") == 0)
6053  			return 0;
6054  		break;
6055  	case BPF_PROG_TYPE_RAW_TRACEPOINT:
6056  	case BPF_PROG_TYPE_RAW_TRACEPOINT_WRITABLE:
6057  		/* allow u64* as ctx */
6058  		if (btf_is_int(t) && t->size == 8)
6059  			return 0;
6060  		break;
6061  	case BPF_PROG_TYPE_TRACING:
6062  		switch (attach_type) {
6063  		case BPF_TRACE_RAW_TP:
6064  			/* tp_btf program is TRACING, so need special case here */
6065  			if (__btf_type_is_struct(t) &&
6066  			    strcmp(tname, "bpf_raw_tracepoint_args") == 0)
6067  				return 0;
6068  			/* allow u64* as ctx */
6069  			if (btf_is_int(t) && t->size == 8)
6070  				return 0;
6071  			break;
6072  		case BPF_TRACE_ITER:
6073  			/* allow struct bpf_iter__xxx types only */
6074  			if (__btf_type_is_struct(t) &&
6075  			    strncmp(tname, "bpf_iter__", sizeof("bpf_iter__") - 1) == 0)
6076  				return 0;
6077  			break;
6078  		case BPF_TRACE_FENTRY:
6079  		case BPF_TRACE_FEXIT:
6080  		case BPF_MODIFY_RETURN:
6081  			/* allow u64* as ctx */
6082  			if (btf_is_int(t) && t->size == 8)
6083  				return 0;
6084  			break;
6085  		default:
6086  			break;
6087  		}
6088  		break;
6089  	case BPF_PROG_TYPE_LSM:
6090  	case BPF_PROG_TYPE_STRUCT_OPS:
6091  		/* allow u64* as ctx */
6092  		if (btf_is_int(t) && t->size == 8)
6093  			return 0;
6094  		break;
6095  	case BPF_PROG_TYPE_TRACEPOINT:
6096  	case BPF_PROG_TYPE_SYSCALL:
6097  	case BPF_PROG_TYPE_EXT:
6098  		return 0; /* anything goes */
6099  	default:
6100  		break;
6101  	}
6102  
6103  	ctx_type = find_canonical_prog_ctx_type(prog_type);
6104  	if (!ctx_type) {
6105  		/* should not happen */
6106  		bpf_log(log, "btf_vmlinux is malformed\n");
6107  		return -EINVAL;
6108  	}
6109  
6110  	/* resolve typedefs and check that underlying structs are matching as well */
6111  	while (btf_type_is_modifier(ctx_type))
6112  		ctx_type = btf_type_by_id(btf_vmlinux, ctx_type->type);
6113  
6114  	/* if program type doesn't have distinctly named struct type for
6115  	 * context, then __arg_ctx argument can only be `void *`, which we
6116  	 * already checked above
6117  	 */
6118  	if (!__btf_type_is_struct(ctx_type)) {
6119  		bpf_log(log, "arg#%d should be void pointer\n", arg);
6120  		return -EINVAL;
6121  	}
6122  
6123  	ctx_tname = btf_name_by_offset(btf_vmlinux, ctx_type->name_off);
6124  	if (!__btf_type_is_struct(t) || strcmp(ctx_tname, tname) != 0) {
6125  		bpf_log(log, "arg#%d should be `struct %s *`\n", arg, ctx_tname);
6126  		return -EINVAL;
6127  	}
6128  
6129  	return 0;
6130  }
6131  
btf_translate_to_vmlinux(struct bpf_verifier_log * log,struct btf * btf,const struct btf_type * t,enum bpf_prog_type prog_type,int arg)6132  static int btf_translate_to_vmlinux(struct bpf_verifier_log *log,
6133  				     struct btf *btf,
6134  				     const struct btf_type *t,
6135  				     enum bpf_prog_type prog_type,
6136  				     int arg)
6137  {
6138  	if (!btf_is_prog_ctx_type(log, btf, t, prog_type, arg))
6139  		return -ENOENT;
6140  	return find_kern_ctx_type_id(prog_type);
6141  }
6142  
get_kern_ctx_btf_id(struct bpf_verifier_log * log,enum bpf_prog_type prog_type)6143  int get_kern_ctx_btf_id(struct bpf_verifier_log *log, enum bpf_prog_type prog_type)
6144  {
6145  	const struct btf_member *kctx_member;
6146  	const struct btf_type *conv_struct;
6147  	const struct btf_type *kctx_type;
6148  	u32 kctx_type_id;
6149  
6150  	conv_struct = bpf_ctx_convert.t;
6151  	/* get member for kernel ctx type */
6152  	kctx_member = btf_type_member(conv_struct) + bpf_ctx_convert_map[prog_type] * 2 + 1;
6153  	kctx_type_id = kctx_member->type;
6154  	kctx_type = btf_type_by_id(btf_vmlinux, kctx_type_id);
6155  	if (!btf_type_is_struct(kctx_type)) {
6156  		bpf_log(log, "kern ctx type id %u is not a struct\n", kctx_type_id);
6157  		return -EINVAL;
6158  	}
6159  
6160  	return kctx_type_id;
6161  }
6162  
6163  BTF_ID_LIST(bpf_ctx_convert_btf_id)
BTF_ID(struct,bpf_ctx_convert)6164  BTF_ID(struct, bpf_ctx_convert)
6165  
6166  static struct btf *btf_parse_base(struct btf_verifier_env *env, const char *name,
6167  				  void *data, unsigned int data_size)
6168  {
6169  	struct btf *btf = NULL;
6170  	int err;
6171  
6172  	if (!IS_ENABLED(CONFIG_DEBUG_INFO_BTF))
6173  		return ERR_PTR(-ENOENT);
6174  
6175  	btf = kzalloc(sizeof(*btf), GFP_KERNEL | __GFP_NOWARN);
6176  	if (!btf) {
6177  		err = -ENOMEM;
6178  		goto errout;
6179  	}
6180  	env->btf = btf;
6181  
6182  	btf->data = data;
6183  	btf->data_size = data_size;
6184  	btf->kernel_btf = true;
6185  	snprintf(btf->name, sizeof(btf->name), "%s", name);
6186  
6187  	err = btf_parse_hdr(env);
6188  	if (err)
6189  		goto errout;
6190  
6191  	btf->nohdr_data = btf->data + btf->hdr.hdr_len;
6192  
6193  	err = btf_parse_str_sec(env);
6194  	if (err)
6195  		goto errout;
6196  
6197  	err = btf_check_all_metas(env);
6198  	if (err)
6199  		goto errout;
6200  
6201  	err = btf_check_type_tags(env, btf, 1);
6202  	if (err)
6203  		goto errout;
6204  
6205  	refcount_set(&btf->refcnt, 1);
6206  
6207  	return btf;
6208  
6209  errout:
6210  	if (btf) {
6211  		kvfree(btf->types);
6212  		kfree(btf);
6213  	}
6214  	return ERR_PTR(err);
6215  }
6216  
btf_parse_vmlinux(void)6217  struct btf *btf_parse_vmlinux(void)
6218  {
6219  	struct btf_verifier_env *env = NULL;
6220  	struct bpf_verifier_log *log;
6221  	struct btf *btf;
6222  	int err;
6223  
6224  	env = kzalloc(sizeof(*env), GFP_KERNEL | __GFP_NOWARN);
6225  	if (!env)
6226  		return ERR_PTR(-ENOMEM);
6227  
6228  	log = &env->log;
6229  	log->level = BPF_LOG_KERNEL;
6230  	btf = btf_parse_base(env, "vmlinux", __start_BTF, __stop_BTF - __start_BTF);
6231  	if (IS_ERR(btf))
6232  		goto err_out;
6233  
6234  	/* btf_parse_vmlinux() runs under bpf_verifier_lock */
6235  	bpf_ctx_convert.t = btf_type_by_id(btf, bpf_ctx_convert_btf_id[0]);
6236  	err = btf_alloc_id(btf);
6237  	if (err) {
6238  		btf_free(btf);
6239  		btf = ERR_PTR(err);
6240  	}
6241  err_out:
6242  	btf_verifier_env_free(env);
6243  	return btf;
6244  }
6245  
6246  /* If .BTF_ids section was created with distilled base BTF, both base and
6247   * split BTF ids will need to be mapped to actual base/split ids for
6248   * BTF now that it has been relocated.
6249   */
btf_relocate_id(const struct btf * btf,__u32 id)6250  static __u32 btf_relocate_id(const struct btf *btf, __u32 id)
6251  {
6252  	if (!btf->base_btf || !btf->base_id_map)
6253  		return id;
6254  	return btf->base_id_map[id];
6255  }
6256  
6257  #ifdef CONFIG_DEBUG_INFO_BTF_MODULES
6258  
btf_parse_module(const char * module_name,const void * data,unsigned int data_size,void * base_data,unsigned int base_data_size)6259  static struct btf *btf_parse_module(const char *module_name, const void *data,
6260  				    unsigned int data_size, void *base_data,
6261  				    unsigned int base_data_size)
6262  {
6263  	struct btf *btf = NULL, *vmlinux_btf, *base_btf = NULL;
6264  	struct btf_verifier_env *env = NULL;
6265  	struct bpf_verifier_log *log;
6266  	int err = 0;
6267  
6268  	vmlinux_btf = bpf_get_btf_vmlinux();
6269  	if (IS_ERR(vmlinux_btf))
6270  		return vmlinux_btf;
6271  	if (!vmlinux_btf)
6272  		return ERR_PTR(-EINVAL);
6273  
6274  	env = kzalloc(sizeof(*env), GFP_KERNEL | __GFP_NOWARN);
6275  	if (!env)
6276  		return ERR_PTR(-ENOMEM);
6277  
6278  	log = &env->log;
6279  	log->level = BPF_LOG_KERNEL;
6280  
6281  	if (base_data) {
6282  		base_btf = btf_parse_base(env, ".BTF.base", base_data, base_data_size);
6283  		if (IS_ERR(base_btf)) {
6284  			err = PTR_ERR(base_btf);
6285  			goto errout;
6286  		}
6287  	} else {
6288  		base_btf = vmlinux_btf;
6289  	}
6290  
6291  	btf = kzalloc(sizeof(*btf), GFP_KERNEL | __GFP_NOWARN);
6292  	if (!btf) {
6293  		err = -ENOMEM;
6294  		goto errout;
6295  	}
6296  	env->btf = btf;
6297  
6298  	btf->base_btf = base_btf;
6299  	btf->start_id = base_btf->nr_types;
6300  	btf->start_str_off = base_btf->hdr.str_len;
6301  	btf->kernel_btf = true;
6302  	snprintf(btf->name, sizeof(btf->name), "%s", module_name);
6303  
6304  	btf->data = kvmemdup(data, data_size, GFP_KERNEL | __GFP_NOWARN);
6305  	if (!btf->data) {
6306  		err = -ENOMEM;
6307  		goto errout;
6308  	}
6309  	btf->data_size = data_size;
6310  
6311  	err = btf_parse_hdr(env);
6312  	if (err)
6313  		goto errout;
6314  
6315  	btf->nohdr_data = btf->data + btf->hdr.hdr_len;
6316  
6317  	err = btf_parse_str_sec(env);
6318  	if (err)
6319  		goto errout;
6320  
6321  	err = btf_check_all_metas(env);
6322  	if (err)
6323  		goto errout;
6324  
6325  	err = btf_check_type_tags(env, btf, btf_nr_types(base_btf));
6326  	if (err)
6327  		goto errout;
6328  
6329  	if (base_btf != vmlinux_btf) {
6330  		err = btf_relocate(btf, vmlinux_btf, &btf->base_id_map);
6331  		if (err)
6332  			goto errout;
6333  		btf_free(base_btf);
6334  		base_btf = vmlinux_btf;
6335  	}
6336  
6337  	btf_verifier_env_free(env);
6338  	refcount_set(&btf->refcnt, 1);
6339  	return btf;
6340  
6341  errout:
6342  	btf_verifier_env_free(env);
6343  	if (!IS_ERR(base_btf) && base_btf != vmlinux_btf)
6344  		btf_free(base_btf);
6345  	if (btf) {
6346  		kvfree(btf->data);
6347  		kvfree(btf->types);
6348  		kfree(btf);
6349  	}
6350  	return ERR_PTR(err);
6351  }
6352  
6353  #endif /* CONFIG_DEBUG_INFO_BTF_MODULES */
6354  
bpf_prog_get_target_btf(const struct bpf_prog * prog)6355  struct btf *bpf_prog_get_target_btf(const struct bpf_prog *prog)
6356  {
6357  	struct bpf_prog *tgt_prog = prog->aux->dst_prog;
6358  
6359  	if (tgt_prog)
6360  		return tgt_prog->aux->btf;
6361  	else
6362  		return prog->aux->attach_btf;
6363  }
6364  
is_int_ptr(struct btf * btf,const struct btf_type * t)6365  static bool is_int_ptr(struct btf *btf, const struct btf_type *t)
6366  {
6367  	/* skip modifiers */
6368  	t = btf_type_skip_modifiers(btf, t->type, NULL);
6369  
6370  	return btf_type_is_int(t);
6371  }
6372  
get_ctx_arg_idx(struct btf * btf,const struct btf_type * func_proto,int off)6373  static u32 get_ctx_arg_idx(struct btf *btf, const struct btf_type *func_proto,
6374  			   int off)
6375  {
6376  	const struct btf_param *args;
6377  	const struct btf_type *t;
6378  	u32 offset = 0, nr_args;
6379  	int i;
6380  
6381  	if (!func_proto)
6382  		return off / 8;
6383  
6384  	nr_args = btf_type_vlen(func_proto);
6385  	args = (const struct btf_param *)(func_proto + 1);
6386  	for (i = 0; i < nr_args; i++) {
6387  		t = btf_type_skip_modifiers(btf, args[i].type, NULL);
6388  		offset += btf_type_is_ptr(t) ? 8 : roundup(t->size, 8);
6389  		if (off < offset)
6390  			return i;
6391  	}
6392  
6393  	t = btf_type_skip_modifiers(btf, func_proto->type, NULL);
6394  	offset += btf_type_is_ptr(t) ? 8 : roundup(t->size, 8);
6395  	if (off < offset)
6396  		return nr_args;
6397  
6398  	return nr_args + 1;
6399  }
6400  
prog_args_trusted(const struct bpf_prog * prog)6401  static bool prog_args_trusted(const struct bpf_prog *prog)
6402  {
6403  	enum bpf_attach_type atype = prog->expected_attach_type;
6404  
6405  	switch (prog->type) {
6406  	case BPF_PROG_TYPE_TRACING:
6407  		return atype == BPF_TRACE_RAW_TP || atype == BPF_TRACE_ITER;
6408  	case BPF_PROG_TYPE_LSM:
6409  		return bpf_lsm_is_trusted(prog);
6410  	case BPF_PROG_TYPE_STRUCT_OPS:
6411  		return true;
6412  	default:
6413  		return false;
6414  	}
6415  }
6416  
btf_ctx_arg_offset(const struct btf * btf,const struct btf_type * func_proto,u32 arg_no)6417  int btf_ctx_arg_offset(const struct btf *btf, const struct btf_type *func_proto,
6418  		       u32 arg_no)
6419  {
6420  	const struct btf_param *args;
6421  	const struct btf_type *t;
6422  	int off = 0, i;
6423  	u32 sz;
6424  
6425  	args = btf_params(func_proto);
6426  	for (i = 0; i < arg_no; i++) {
6427  		t = btf_type_by_id(btf, args[i].type);
6428  		t = btf_resolve_size(btf, t, &sz);
6429  		if (IS_ERR(t))
6430  			return PTR_ERR(t);
6431  		off += roundup(sz, 8);
6432  	}
6433  
6434  	return off;
6435  }
6436  
6437  struct bpf_raw_tp_null_args {
6438  	const char *func;
6439  	u64 mask;
6440  };
6441  
6442  static const struct bpf_raw_tp_null_args raw_tp_null_args[] = {
6443  	/* sched */
6444  	{ "sched_pi_setprio", 0x10 },
6445  	/* ... from sched_numa_pair_template event class */
6446  	{ "sched_stick_numa", 0x100 },
6447  	{ "sched_swap_numa", 0x100 },
6448  	/* afs */
6449  	{ "afs_make_fs_call", 0x10 },
6450  	{ "afs_make_fs_calli", 0x10 },
6451  	{ "afs_make_fs_call1", 0x10 },
6452  	{ "afs_make_fs_call2", 0x10 },
6453  	{ "afs_protocol_error", 0x1 },
6454  	{ "afs_flock_ev", 0x10 },
6455  	/* cachefiles */
6456  	{ "cachefiles_lookup", 0x1 | 0x200 },
6457  	{ "cachefiles_unlink", 0x1 },
6458  	{ "cachefiles_rename", 0x1 },
6459  	{ "cachefiles_prep_read", 0x1 },
6460  	{ "cachefiles_mark_active", 0x1 },
6461  	{ "cachefiles_mark_failed", 0x1 },
6462  	{ "cachefiles_mark_inactive", 0x1 },
6463  	{ "cachefiles_vfs_error", 0x1 },
6464  	{ "cachefiles_io_error", 0x1 },
6465  	{ "cachefiles_ondemand_open", 0x1 },
6466  	{ "cachefiles_ondemand_copen", 0x1 },
6467  	{ "cachefiles_ondemand_close", 0x1 },
6468  	{ "cachefiles_ondemand_read", 0x1 },
6469  	{ "cachefiles_ondemand_cread", 0x1 },
6470  	{ "cachefiles_ondemand_fd_write", 0x1 },
6471  	{ "cachefiles_ondemand_fd_release", 0x1 },
6472  	/* ext4, from ext4__mballoc event class */
6473  	{ "ext4_mballoc_discard", 0x10 },
6474  	{ "ext4_mballoc_free", 0x10 },
6475  	/* fib */
6476  	{ "fib_table_lookup", 0x100 },
6477  	/* filelock */
6478  	/* ... from filelock_lock event class */
6479  	{ "posix_lock_inode", 0x10 },
6480  	{ "fcntl_setlk", 0x10 },
6481  	{ "locks_remove_posix", 0x10 },
6482  	{ "flock_lock_inode", 0x10 },
6483  	/* ... from filelock_lease event class */
6484  	{ "break_lease_noblock", 0x10 },
6485  	{ "break_lease_block", 0x10 },
6486  	{ "break_lease_unblock", 0x10 },
6487  	{ "generic_delete_lease", 0x10 },
6488  	{ "time_out_leases", 0x10 },
6489  	/* host1x */
6490  	{ "host1x_cdma_push_gather", 0x10000 },
6491  	/* huge_memory */
6492  	{ "mm_khugepaged_scan_pmd", 0x10 },
6493  	{ "mm_collapse_huge_page_isolate", 0x1 },
6494  	{ "mm_khugepaged_scan_file", 0x10 },
6495  	{ "mm_khugepaged_collapse_file", 0x10 },
6496  	/* kmem */
6497  	{ "mm_page_alloc", 0x1 },
6498  	{ "mm_page_pcpu_drain", 0x1 },
6499  	/* .. from mm_page event class */
6500  	{ "mm_page_alloc_zone_locked", 0x1 },
6501  	/* netfs */
6502  	{ "netfs_failure", 0x10 },
6503  	/* power */
6504  	{ "device_pm_callback_start", 0x10 },
6505  	/* qdisc */
6506  	{ "qdisc_dequeue", 0x1000 },
6507  	/* rxrpc */
6508  	{ "rxrpc_recvdata", 0x1 },
6509  	{ "rxrpc_resend", 0x10 },
6510  	/* skb */
6511  	{"kfree_skb", 0x1000},
6512  	/* sunrpc */
6513  	{ "xs_stream_read_data", 0x1 },
6514  	/* ... from xprt_cong_event event class */
6515  	{ "xprt_reserve_cong", 0x10 },
6516  	{ "xprt_release_cong", 0x10 },
6517  	{ "xprt_get_cong", 0x10 },
6518  	{ "xprt_put_cong", 0x10 },
6519  	/* tcp */
6520  	{ "tcp_send_reset", 0x11 },
6521  	/* tegra_apb_dma */
6522  	{ "tegra_dma_tx_status", 0x100 },
6523  	/* timer_migration */
6524  	{ "tmigr_update_events", 0x1 },
6525  	/* writeback, from writeback_folio_template event class */
6526  	{ "writeback_dirty_folio", 0x10 },
6527  	{ "folio_wait_writeback", 0x10 },
6528  	/* rdma */
6529  	{ "mr_integ_alloc", 0x2000 },
6530  	/* bpf_testmod */
6531  	{ "bpf_testmod_test_read", 0x0 },
6532  };
6533  
btf_ctx_access(int off,int size,enum bpf_access_type type,const struct bpf_prog * prog,struct bpf_insn_access_aux * info)6534  bool btf_ctx_access(int off, int size, enum bpf_access_type type,
6535  		    const struct bpf_prog *prog,
6536  		    struct bpf_insn_access_aux *info)
6537  {
6538  	const struct btf_type *t = prog->aux->attach_func_proto;
6539  	struct bpf_prog *tgt_prog = prog->aux->dst_prog;
6540  	struct btf *btf = bpf_prog_get_target_btf(prog);
6541  	const char *tname = prog->aux->attach_func_name;
6542  	struct bpf_verifier_log *log = info->log;
6543  	const struct btf_param *args;
6544  	bool ptr_err_raw_tp = false;
6545  	const char *tag_value;
6546  	u32 nr_args, arg;
6547  	int i, ret;
6548  
6549  	if (off % 8) {
6550  		bpf_log(log, "func '%s' offset %d is not multiple of 8\n",
6551  			tname, off);
6552  		return false;
6553  	}
6554  	arg = get_ctx_arg_idx(btf, t, off);
6555  	args = (const struct btf_param *)(t + 1);
6556  	/* if (t == NULL) Fall back to default BPF prog with
6557  	 * MAX_BPF_FUNC_REG_ARGS u64 arguments.
6558  	 */
6559  	nr_args = t ? btf_type_vlen(t) : MAX_BPF_FUNC_REG_ARGS;
6560  	if (prog->aux->attach_btf_trace) {
6561  		/* skip first 'void *__data' argument in btf_trace_##name typedef */
6562  		args++;
6563  		nr_args--;
6564  	}
6565  
6566  	if (arg > nr_args) {
6567  		bpf_log(log, "func '%s' doesn't have %d-th argument\n",
6568  			tname, arg + 1);
6569  		return false;
6570  	}
6571  
6572  	if (arg == nr_args) {
6573  		switch (prog->expected_attach_type) {
6574  		case BPF_LSM_MAC:
6575  			/* mark we are accessing the return value */
6576  			info->is_retval = true;
6577  			fallthrough;
6578  		case BPF_LSM_CGROUP:
6579  		case BPF_TRACE_FEXIT:
6580  			/* When LSM programs are attached to void LSM hooks
6581  			 * they use FEXIT trampolines and when attached to
6582  			 * int LSM hooks, they use MODIFY_RETURN trampolines.
6583  			 *
6584  			 * While the LSM programs are BPF_MODIFY_RETURN-like
6585  			 * the check:
6586  			 *
6587  			 *	if (ret_type != 'int')
6588  			 *		return -EINVAL;
6589  			 *
6590  			 * is _not_ done here. This is still safe as LSM hooks
6591  			 * have only void and int return types.
6592  			 */
6593  			if (!t)
6594  				return true;
6595  			t = btf_type_by_id(btf, t->type);
6596  			break;
6597  		case BPF_MODIFY_RETURN:
6598  			/* For now the BPF_MODIFY_RETURN can only be attached to
6599  			 * functions that return an int.
6600  			 */
6601  			if (!t)
6602  				return false;
6603  
6604  			t = btf_type_skip_modifiers(btf, t->type, NULL);
6605  			if (!btf_type_is_small_int(t)) {
6606  				bpf_log(log,
6607  					"ret type %s not allowed for fmod_ret\n",
6608  					btf_type_str(t));
6609  				return false;
6610  			}
6611  			break;
6612  		default:
6613  			bpf_log(log, "func '%s' doesn't have %d-th argument\n",
6614  				tname, arg + 1);
6615  			return false;
6616  		}
6617  	} else {
6618  		if (!t)
6619  			/* Default prog with MAX_BPF_FUNC_REG_ARGS args */
6620  			return true;
6621  		t = btf_type_by_id(btf, args[arg].type);
6622  	}
6623  
6624  	/* skip modifiers */
6625  	while (btf_type_is_modifier(t))
6626  		t = btf_type_by_id(btf, t->type);
6627  	if (btf_type_is_small_int(t) || btf_is_any_enum(t) || __btf_type_is_struct(t))
6628  		/* accessing a scalar */
6629  		return true;
6630  	if (!btf_type_is_ptr(t)) {
6631  		bpf_log(log,
6632  			"func '%s' arg%d '%s' has type %s. Only pointer access is allowed\n",
6633  			tname, arg,
6634  			__btf_name_by_offset(btf, t->name_off),
6635  			btf_type_str(t));
6636  		return false;
6637  	}
6638  
6639  	if (size != sizeof(u64)) {
6640  		bpf_log(log, "func '%s' size %d must be 8\n",
6641  			tname, size);
6642  		return false;
6643  	}
6644  
6645  	/* check for PTR_TO_RDONLY_BUF_OR_NULL or PTR_TO_RDWR_BUF_OR_NULL */
6646  	for (i = 0; i < prog->aux->ctx_arg_info_size; i++) {
6647  		const struct bpf_ctx_arg_aux *ctx_arg_info = &prog->aux->ctx_arg_info[i];
6648  		u32 type, flag;
6649  
6650  		type = base_type(ctx_arg_info->reg_type);
6651  		flag = type_flag(ctx_arg_info->reg_type);
6652  		if (ctx_arg_info->offset == off && type == PTR_TO_BUF &&
6653  		    (flag & PTR_MAYBE_NULL)) {
6654  			info->reg_type = ctx_arg_info->reg_type;
6655  			return true;
6656  		}
6657  	}
6658  
6659  	if (t->type == 0)
6660  		/* This is a pointer to void.
6661  		 * It is the same as scalar from the verifier safety pov.
6662  		 * No further pointer walking is allowed.
6663  		 */
6664  		return true;
6665  
6666  	if (is_int_ptr(btf, t))
6667  		return true;
6668  
6669  	/* this is a pointer to another type */
6670  	for (i = 0; i < prog->aux->ctx_arg_info_size; i++) {
6671  		const struct bpf_ctx_arg_aux *ctx_arg_info = &prog->aux->ctx_arg_info[i];
6672  
6673  		if (ctx_arg_info->offset == off) {
6674  			if (!ctx_arg_info->btf_id) {
6675  				bpf_log(log,"invalid btf_id for context argument offset %u\n", off);
6676  				return false;
6677  			}
6678  
6679  			info->reg_type = ctx_arg_info->reg_type;
6680  			info->btf = ctx_arg_info->btf ? : btf_vmlinux;
6681  			info->btf_id = ctx_arg_info->btf_id;
6682  			return true;
6683  		}
6684  	}
6685  
6686  	info->reg_type = PTR_TO_BTF_ID;
6687  	if (prog_args_trusted(prog))
6688  		info->reg_type |= PTR_TRUSTED;
6689  
6690  	if (btf_param_match_suffix(btf, &args[arg], "__nullable"))
6691  		info->reg_type |= PTR_MAYBE_NULL;
6692  
6693  	if (prog->expected_attach_type == BPF_TRACE_RAW_TP) {
6694  		struct btf *btf = prog->aux->attach_btf;
6695  		const struct btf_type *t;
6696  		const char *tname;
6697  
6698  		/* BTF lookups cannot fail, return false on error */
6699  		t = btf_type_by_id(btf, prog->aux->attach_btf_id);
6700  		if (!t)
6701  			return false;
6702  		tname = btf_name_by_offset(btf, t->name_off);
6703  		if (!tname)
6704  			return false;
6705  		/* Checked by bpf_check_attach_target */
6706  		tname += sizeof("btf_trace_") - 1;
6707  		for (i = 0; i < ARRAY_SIZE(raw_tp_null_args); i++) {
6708  			/* Is this a func with potential NULL args? */
6709  			if (strcmp(tname, raw_tp_null_args[i].func))
6710  				continue;
6711  			if (raw_tp_null_args[i].mask & (0x1 << (arg * 4)))
6712  				info->reg_type |= PTR_MAYBE_NULL;
6713  			/* Is the current arg IS_ERR? */
6714  			if (raw_tp_null_args[i].mask & (0x2 << (arg * 4)))
6715  				ptr_err_raw_tp = true;
6716  			break;
6717  		}
6718  		/* If we don't know NULL-ness specification and the tracepoint
6719  		 * is coming from a loadable module, be conservative and mark
6720  		 * argument as PTR_MAYBE_NULL.
6721  		 */
6722  		if (i == ARRAY_SIZE(raw_tp_null_args) && btf_is_module(btf))
6723  			info->reg_type |= PTR_MAYBE_NULL;
6724  	}
6725  
6726  	if (tgt_prog) {
6727  		enum bpf_prog_type tgt_type;
6728  
6729  		if (tgt_prog->type == BPF_PROG_TYPE_EXT)
6730  			tgt_type = tgt_prog->aux->saved_dst_prog_type;
6731  		else
6732  			tgt_type = tgt_prog->type;
6733  
6734  		ret = btf_translate_to_vmlinux(log, btf, t, tgt_type, arg);
6735  		if (ret > 0) {
6736  			info->btf = btf_vmlinux;
6737  			info->btf_id = ret;
6738  			return true;
6739  		} else {
6740  			return false;
6741  		}
6742  	}
6743  
6744  	info->btf = btf;
6745  	info->btf_id = t->type;
6746  	t = btf_type_by_id(btf, t->type);
6747  
6748  	if (btf_type_is_type_tag(t)) {
6749  		tag_value = __btf_name_by_offset(btf, t->name_off);
6750  		if (strcmp(tag_value, "user") == 0)
6751  			info->reg_type |= MEM_USER;
6752  		if (strcmp(tag_value, "percpu") == 0)
6753  			info->reg_type |= MEM_PERCPU;
6754  	}
6755  
6756  	/* skip modifiers */
6757  	while (btf_type_is_modifier(t)) {
6758  		info->btf_id = t->type;
6759  		t = btf_type_by_id(btf, t->type);
6760  	}
6761  	if (!btf_type_is_struct(t)) {
6762  		bpf_log(log,
6763  			"func '%s' arg%d type %s is not a struct\n",
6764  			tname, arg, btf_type_str(t));
6765  		return false;
6766  	}
6767  	bpf_log(log, "func '%s' arg%d has btf_id %d type %s '%s'\n",
6768  		tname, arg, info->btf_id, btf_type_str(t),
6769  		__btf_name_by_offset(btf, t->name_off));
6770  
6771  	/* Perform all checks on the validity of type for this argument, but if
6772  	 * we know it can be IS_ERR at runtime, scrub pointer type and mark as
6773  	 * scalar.
6774  	 */
6775  	if (ptr_err_raw_tp) {
6776  		bpf_log(log, "marking pointer arg%d as scalar as it may encode error", arg);
6777  		info->reg_type = SCALAR_VALUE;
6778  	}
6779  	return true;
6780  }
6781  EXPORT_SYMBOL_GPL(btf_ctx_access);
6782  
6783  enum bpf_struct_walk_result {
6784  	/* < 0 error */
6785  	WALK_SCALAR = 0,
6786  	WALK_PTR,
6787  	WALK_STRUCT,
6788  };
6789  
btf_struct_walk(struct bpf_verifier_log * log,const struct btf * btf,const struct btf_type * t,int off,int size,u32 * next_btf_id,enum bpf_type_flag * flag,const char ** field_name)6790  static int btf_struct_walk(struct bpf_verifier_log *log, const struct btf *btf,
6791  			   const struct btf_type *t, int off, int size,
6792  			   u32 *next_btf_id, enum bpf_type_flag *flag,
6793  			   const char **field_name)
6794  {
6795  	u32 i, moff, mtrue_end, msize = 0, total_nelems = 0;
6796  	const struct btf_type *mtype, *elem_type = NULL;
6797  	const struct btf_member *member;
6798  	const char *tname, *mname, *tag_value;
6799  	u32 vlen, elem_id, mid;
6800  
6801  again:
6802  	if (btf_type_is_modifier(t))
6803  		t = btf_type_skip_modifiers(btf, t->type, NULL);
6804  	tname = __btf_name_by_offset(btf, t->name_off);
6805  	if (!btf_type_is_struct(t)) {
6806  		bpf_log(log, "Type '%s' is not a struct\n", tname);
6807  		return -EINVAL;
6808  	}
6809  
6810  	vlen = btf_type_vlen(t);
6811  	if (BTF_INFO_KIND(t->info) == BTF_KIND_UNION && vlen != 1 && !(*flag & PTR_UNTRUSTED))
6812  		/*
6813  		 * walking unions yields untrusted pointers
6814  		 * with exception of __bpf_md_ptr and other
6815  		 * unions with a single member
6816  		 */
6817  		*flag |= PTR_UNTRUSTED;
6818  
6819  	if (off + size > t->size) {
6820  		/* If the last element is a variable size array, we may
6821  		 * need to relax the rule.
6822  		 */
6823  		struct btf_array *array_elem;
6824  
6825  		if (vlen == 0)
6826  			goto error;
6827  
6828  		member = btf_type_member(t) + vlen - 1;
6829  		mtype = btf_type_skip_modifiers(btf, member->type,
6830  						NULL);
6831  		if (!btf_type_is_array(mtype))
6832  			goto error;
6833  
6834  		array_elem = (struct btf_array *)(mtype + 1);
6835  		if (array_elem->nelems != 0)
6836  			goto error;
6837  
6838  		moff = __btf_member_bit_offset(t, member) / 8;
6839  		if (off < moff)
6840  			goto error;
6841  
6842  		/* allow structure and integer */
6843  		t = btf_type_skip_modifiers(btf, array_elem->type,
6844  					    NULL);
6845  
6846  		if (btf_type_is_int(t))
6847  			return WALK_SCALAR;
6848  
6849  		if (!btf_type_is_struct(t))
6850  			goto error;
6851  
6852  		off = (off - moff) % t->size;
6853  		goto again;
6854  
6855  error:
6856  		bpf_log(log, "access beyond struct %s at off %u size %u\n",
6857  			tname, off, size);
6858  		return -EACCES;
6859  	}
6860  
6861  	for_each_member(i, t, member) {
6862  		/* offset of the field in bytes */
6863  		moff = __btf_member_bit_offset(t, member) / 8;
6864  		if (off + size <= moff)
6865  			/* won't find anything, field is already too far */
6866  			break;
6867  
6868  		if (__btf_member_bitfield_size(t, member)) {
6869  			u32 end_bit = __btf_member_bit_offset(t, member) +
6870  				__btf_member_bitfield_size(t, member);
6871  
6872  			/* off <= moff instead of off == moff because clang
6873  			 * does not generate a BTF member for anonymous
6874  			 * bitfield like the ":16" here:
6875  			 * struct {
6876  			 *	int :16;
6877  			 *	int x:8;
6878  			 * };
6879  			 */
6880  			if (off <= moff &&
6881  			    BITS_ROUNDUP_BYTES(end_bit) <= off + size)
6882  				return WALK_SCALAR;
6883  
6884  			/* off may be accessing a following member
6885  			 *
6886  			 * or
6887  			 *
6888  			 * Doing partial access at either end of this
6889  			 * bitfield.  Continue on this case also to
6890  			 * treat it as not accessing this bitfield
6891  			 * and eventually error out as field not
6892  			 * found to keep it simple.
6893  			 * It could be relaxed if there was a legit
6894  			 * partial access case later.
6895  			 */
6896  			continue;
6897  		}
6898  
6899  		/* In case of "off" is pointing to holes of a struct */
6900  		if (off < moff)
6901  			break;
6902  
6903  		/* type of the field */
6904  		mid = member->type;
6905  		mtype = btf_type_by_id(btf, member->type);
6906  		mname = __btf_name_by_offset(btf, member->name_off);
6907  
6908  		mtype = __btf_resolve_size(btf, mtype, &msize,
6909  					   &elem_type, &elem_id, &total_nelems,
6910  					   &mid);
6911  		if (IS_ERR(mtype)) {
6912  			bpf_log(log, "field %s doesn't have size\n", mname);
6913  			return -EFAULT;
6914  		}
6915  
6916  		mtrue_end = moff + msize;
6917  		if (off >= mtrue_end)
6918  			/* no overlap with member, keep iterating */
6919  			continue;
6920  
6921  		if (btf_type_is_array(mtype)) {
6922  			u32 elem_idx;
6923  
6924  			/* __btf_resolve_size() above helps to
6925  			 * linearize a multi-dimensional array.
6926  			 *
6927  			 * The logic here is treating an array
6928  			 * in a struct as the following way:
6929  			 *
6930  			 * struct outer {
6931  			 *	struct inner array[2][2];
6932  			 * };
6933  			 *
6934  			 * looks like:
6935  			 *
6936  			 * struct outer {
6937  			 *	struct inner array_elem0;
6938  			 *	struct inner array_elem1;
6939  			 *	struct inner array_elem2;
6940  			 *	struct inner array_elem3;
6941  			 * };
6942  			 *
6943  			 * When accessing outer->array[1][0], it moves
6944  			 * moff to "array_elem2", set mtype to
6945  			 * "struct inner", and msize also becomes
6946  			 * sizeof(struct inner).  Then most of the
6947  			 * remaining logic will fall through without
6948  			 * caring the current member is an array or
6949  			 * not.
6950  			 *
6951  			 * Unlike mtype/msize/moff, mtrue_end does not
6952  			 * change.  The naming difference ("_true") tells
6953  			 * that it is not always corresponding to
6954  			 * the current mtype/msize/moff.
6955  			 * It is the true end of the current
6956  			 * member (i.e. array in this case).  That
6957  			 * will allow an int array to be accessed like
6958  			 * a scratch space,
6959  			 * i.e. allow access beyond the size of
6960  			 *      the array's element as long as it is
6961  			 *      within the mtrue_end boundary.
6962  			 */
6963  
6964  			/* skip empty array */
6965  			if (moff == mtrue_end)
6966  				continue;
6967  
6968  			msize /= total_nelems;
6969  			elem_idx = (off - moff) / msize;
6970  			moff += elem_idx * msize;
6971  			mtype = elem_type;
6972  			mid = elem_id;
6973  		}
6974  
6975  		/* the 'off' we're looking for is either equal to start
6976  		 * of this field or inside of this struct
6977  		 */
6978  		if (btf_type_is_struct(mtype)) {
6979  			/* our field must be inside that union or struct */
6980  			t = mtype;
6981  
6982  			/* return if the offset matches the member offset */
6983  			if (off == moff) {
6984  				*next_btf_id = mid;
6985  				return WALK_STRUCT;
6986  			}
6987  
6988  			/* adjust offset we're looking for */
6989  			off -= moff;
6990  			goto again;
6991  		}
6992  
6993  		if (btf_type_is_ptr(mtype)) {
6994  			const struct btf_type *stype, *t;
6995  			enum bpf_type_flag tmp_flag = 0;
6996  			u32 id;
6997  
6998  			if (msize != size || off != moff) {
6999  				bpf_log(log,
7000  					"cannot access ptr member %s with moff %u in struct %s with off %u size %u\n",
7001  					mname, moff, tname, off, size);
7002  				return -EACCES;
7003  			}
7004  
7005  			/* check type tag */
7006  			t = btf_type_by_id(btf, mtype->type);
7007  			if (btf_type_is_type_tag(t)) {
7008  				tag_value = __btf_name_by_offset(btf, t->name_off);
7009  				/* check __user tag */
7010  				if (strcmp(tag_value, "user") == 0)
7011  					tmp_flag = MEM_USER;
7012  				/* check __percpu tag */
7013  				if (strcmp(tag_value, "percpu") == 0)
7014  					tmp_flag = MEM_PERCPU;
7015  				/* check __rcu tag */
7016  				if (strcmp(tag_value, "rcu") == 0)
7017  					tmp_flag = MEM_RCU;
7018  			}
7019  
7020  			stype = btf_type_skip_modifiers(btf, mtype->type, &id);
7021  			if (btf_type_is_struct(stype)) {
7022  				*next_btf_id = id;
7023  				*flag |= tmp_flag;
7024  				if (field_name)
7025  					*field_name = mname;
7026  				return WALK_PTR;
7027  			}
7028  		}
7029  
7030  		/* Allow more flexible access within an int as long as
7031  		 * it is within mtrue_end.
7032  		 * Since mtrue_end could be the end of an array,
7033  		 * that also allows using an array of int as a scratch
7034  		 * space. e.g. skb->cb[].
7035  		 */
7036  		if (off + size > mtrue_end && !(*flag & PTR_UNTRUSTED)) {
7037  			bpf_log(log,
7038  				"access beyond the end of member %s (mend:%u) in struct %s with off %u size %u\n",
7039  				mname, mtrue_end, tname, off, size);
7040  			return -EACCES;
7041  		}
7042  
7043  		return WALK_SCALAR;
7044  	}
7045  	bpf_log(log, "struct %s doesn't have field at offset %d\n", tname, off);
7046  	return -EINVAL;
7047  }
7048  
btf_struct_access(struct bpf_verifier_log * log,const struct bpf_reg_state * reg,int off,int size,enum bpf_access_type atype __maybe_unused,u32 * next_btf_id,enum bpf_type_flag * flag,const char ** field_name)7049  int btf_struct_access(struct bpf_verifier_log *log,
7050  		      const struct bpf_reg_state *reg,
7051  		      int off, int size, enum bpf_access_type atype __maybe_unused,
7052  		      u32 *next_btf_id, enum bpf_type_flag *flag,
7053  		      const char **field_name)
7054  {
7055  	const struct btf *btf = reg->btf;
7056  	enum bpf_type_flag tmp_flag = 0;
7057  	const struct btf_type *t;
7058  	u32 id = reg->btf_id;
7059  	int err;
7060  
7061  	while (type_is_alloc(reg->type)) {
7062  		struct btf_struct_meta *meta;
7063  		struct btf_record *rec;
7064  		int i;
7065  
7066  		meta = btf_find_struct_meta(btf, id);
7067  		if (!meta)
7068  			break;
7069  		rec = meta->record;
7070  		for (i = 0; i < rec->cnt; i++) {
7071  			struct btf_field *field = &rec->fields[i];
7072  			u32 offset = field->offset;
7073  			if (off < offset + field->size && offset < off + size) {
7074  				bpf_log(log,
7075  					"direct access to %s is disallowed\n",
7076  					btf_field_type_name(field->type));
7077  				return -EACCES;
7078  			}
7079  		}
7080  		break;
7081  	}
7082  
7083  	t = btf_type_by_id(btf, id);
7084  	do {
7085  		err = btf_struct_walk(log, btf, t, off, size, &id, &tmp_flag, field_name);
7086  
7087  		switch (err) {
7088  		case WALK_PTR:
7089  			/* For local types, the destination register cannot
7090  			 * become a pointer again.
7091  			 */
7092  			if (type_is_alloc(reg->type))
7093  				return SCALAR_VALUE;
7094  			/* If we found the pointer or scalar on t+off,
7095  			 * we're done.
7096  			 */
7097  			*next_btf_id = id;
7098  			*flag = tmp_flag;
7099  			return PTR_TO_BTF_ID;
7100  		case WALK_SCALAR:
7101  			return SCALAR_VALUE;
7102  		case WALK_STRUCT:
7103  			/* We found nested struct, so continue the search
7104  			 * by diving in it. At this point the offset is
7105  			 * aligned with the new type, so set it to 0.
7106  			 */
7107  			t = btf_type_by_id(btf, id);
7108  			off = 0;
7109  			break;
7110  		default:
7111  			/* It's either error or unknown return value..
7112  			 * scream and leave.
7113  			 */
7114  			if (WARN_ONCE(err > 0, "unknown btf_struct_walk return value"))
7115  				return -EINVAL;
7116  			return err;
7117  		}
7118  	} while (t);
7119  
7120  	return -EINVAL;
7121  }
7122  
7123  /* Check that two BTF types, each specified as an BTF object + id, are exactly
7124   * the same. Trivial ID check is not enough due to module BTFs, because we can
7125   * end up with two different module BTFs, but IDs point to the common type in
7126   * vmlinux BTF.
7127   */
btf_types_are_same(const struct btf * btf1,u32 id1,const struct btf * btf2,u32 id2)7128  bool btf_types_are_same(const struct btf *btf1, u32 id1,
7129  			const struct btf *btf2, u32 id2)
7130  {
7131  	if (id1 != id2)
7132  		return false;
7133  	if (btf1 == btf2)
7134  		return true;
7135  	return btf_type_by_id(btf1, id1) == btf_type_by_id(btf2, id2);
7136  }
7137  
btf_struct_ids_match(struct bpf_verifier_log * log,const struct btf * btf,u32 id,int off,const struct btf * need_btf,u32 need_type_id,bool strict)7138  bool btf_struct_ids_match(struct bpf_verifier_log *log,
7139  			  const struct btf *btf, u32 id, int off,
7140  			  const struct btf *need_btf, u32 need_type_id,
7141  			  bool strict)
7142  {
7143  	const struct btf_type *type;
7144  	enum bpf_type_flag flag = 0;
7145  	int err;
7146  
7147  	/* Are we already done? */
7148  	if (off == 0 && btf_types_are_same(btf, id, need_btf, need_type_id))
7149  		return true;
7150  	/* In case of strict type match, we do not walk struct, the top level
7151  	 * type match must succeed. When strict is true, off should have already
7152  	 * been 0.
7153  	 */
7154  	if (strict)
7155  		return false;
7156  again:
7157  	type = btf_type_by_id(btf, id);
7158  	if (!type)
7159  		return false;
7160  	err = btf_struct_walk(log, btf, type, off, 1, &id, &flag, NULL);
7161  	if (err != WALK_STRUCT)
7162  		return false;
7163  
7164  	/* We found nested struct object. If it matches
7165  	 * the requested ID, we're done. Otherwise let's
7166  	 * continue the search with offset 0 in the new
7167  	 * type.
7168  	 */
7169  	if (!btf_types_are_same(btf, id, need_btf, need_type_id)) {
7170  		off = 0;
7171  		goto again;
7172  	}
7173  
7174  	return true;
7175  }
7176  
__get_type_size(struct btf * btf,u32 btf_id,const struct btf_type ** ret_type)7177  static int __get_type_size(struct btf *btf, u32 btf_id,
7178  			   const struct btf_type **ret_type)
7179  {
7180  	const struct btf_type *t;
7181  
7182  	*ret_type = btf_type_by_id(btf, 0);
7183  	if (!btf_id)
7184  		/* void */
7185  		return 0;
7186  	t = btf_type_by_id(btf, btf_id);
7187  	while (t && btf_type_is_modifier(t))
7188  		t = btf_type_by_id(btf, t->type);
7189  	if (!t)
7190  		return -EINVAL;
7191  	*ret_type = t;
7192  	if (btf_type_is_ptr(t))
7193  		/* kernel size of pointer. Not BPF's size of pointer*/
7194  		return sizeof(void *);
7195  	if (btf_type_is_int(t) || btf_is_any_enum(t) || __btf_type_is_struct(t))
7196  		return t->size;
7197  	return -EINVAL;
7198  }
7199  
__get_type_fmodel_flags(const struct btf_type * t)7200  static u8 __get_type_fmodel_flags(const struct btf_type *t)
7201  {
7202  	u8 flags = 0;
7203  
7204  	if (__btf_type_is_struct(t))
7205  		flags |= BTF_FMODEL_STRUCT_ARG;
7206  	if (btf_type_is_signed_int(t))
7207  		flags |= BTF_FMODEL_SIGNED_ARG;
7208  
7209  	return flags;
7210  }
7211  
btf_distill_func_proto(struct bpf_verifier_log * log,struct btf * btf,const struct btf_type * func,const char * tname,struct btf_func_model * m)7212  int btf_distill_func_proto(struct bpf_verifier_log *log,
7213  			   struct btf *btf,
7214  			   const struct btf_type *func,
7215  			   const char *tname,
7216  			   struct btf_func_model *m)
7217  {
7218  	const struct btf_param *args;
7219  	const struct btf_type *t;
7220  	u32 i, nargs;
7221  	int ret;
7222  
7223  	if (!func) {
7224  		/* BTF function prototype doesn't match the verifier types.
7225  		 * Fall back to MAX_BPF_FUNC_REG_ARGS u64 args.
7226  		 */
7227  		for (i = 0; i < MAX_BPF_FUNC_REG_ARGS; i++) {
7228  			m->arg_size[i] = 8;
7229  			m->arg_flags[i] = 0;
7230  		}
7231  		m->ret_size = 8;
7232  		m->ret_flags = 0;
7233  		m->nr_args = MAX_BPF_FUNC_REG_ARGS;
7234  		return 0;
7235  	}
7236  	args = (const struct btf_param *)(func + 1);
7237  	nargs = btf_type_vlen(func);
7238  	if (nargs > MAX_BPF_FUNC_ARGS) {
7239  		bpf_log(log,
7240  			"The function %s has %d arguments. Too many.\n",
7241  			tname, nargs);
7242  		return -EINVAL;
7243  	}
7244  	ret = __get_type_size(btf, func->type, &t);
7245  	if (ret < 0 || __btf_type_is_struct(t)) {
7246  		bpf_log(log,
7247  			"The function %s return type %s is unsupported.\n",
7248  			tname, btf_type_str(t));
7249  		return -EINVAL;
7250  	}
7251  	m->ret_size = ret;
7252  	m->ret_flags = __get_type_fmodel_flags(t);
7253  
7254  	for (i = 0; i < nargs; i++) {
7255  		if (i == nargs - 1 && args[i].type == 0) {
7256  			bpf_log(log,
7257  				"The function %s with variable args is unsupported.\n",
7258  				tname);
7259  			return -EINVAL;
7260  		}
7261  		ret = __get_type_size(btf, args[i].type, &t);
7262  
7263  		/* No support of struct argument size greater than 16 bytes */
7264  		if (ret < 0 || ret > 16) {
7265  			bpf_log(log,
7266  				"The function %s arg%d type %s is unsupported.\n",
7267  				tname, i, btf_type_str(t));
7268  			return -EINVAL;
7269  		}
7270  		if (ret == 0) {
7271  			bpf_log(log,
7272  				"The function %s has malformed void argument.\n",
7273  				tname);
7274  			return -EINVAL;
7275  		}
7276  		m->arg_size[i] = ret;
7277  		m->arg_flags[i] = __get_type_fmodel_flags(t);
7278  	}
7279  	m->nr_args = nargs;
7280  	return 0;
7281  }
7282  
7283  /* Compare BTFs of two functions assuming only scalars and pointers to context.
7284   * t1 points to BTF_KIND_FUNC in btf1
7285   * t2 points to BTF_KIND_FUNC in btf2
7286   * Returns:
7287   * EINVAL - function prototype mismatch
7288   * EFAULT - verifier bug
7289   * 0 - 99% match. The last 1% is validated by the verifier.
7290   */
btf_check_func_type_match(struct bpf_verifier_log * log,struct btf * btf1,const struct btf_type * t1,struct btf * btf2,const struct btf_type * t2)7291  static int btf_check_func_type_match(struct bpf_verifier_log *log,
7292  				     struct btf *btf1, const struct btf_type *t1,
7293  				     struct btf *btf2, const struct btf_type *t2)
7294  {
7295  	const struct btf_param *args1, *args2;
7296  	const char *fn1, *fn2, *s1, *s2;
7297  	u32 nargs1, nargs2, i;
7298  
7299  	fn1 = btf_name_by_offset(btf1, t1->name_off);
7300  	fn2 = btf_name_by_offset(btf2, t2->name_off);
7301  
7302  	if (btf_func_linkage(t1) != BTF_FUNC_GLOBAL) {
7303  		bpf_log(log, "%s() is not a global function\n", fn1);
7304  		return -EINVAL;
7305  	}
7306  	if (btf_func_linkage(t2) != BTF_FUNC_GLOBAL) {
7307  		bpf_log(log, "%s() is not a global function\n", fn2);
7308  		return -EINVAL;
7309  	}
7310  
7311  	t1 = btf_type_by_id(btf1, t1->type);
7312  	if (!t1 || !btf_type_is_func_proto(t1))
7313  		return -EFAULT;
7314  	t2 = btf_type_by_id(btf2, t2->type);
7315  	if (!t2 || !btf_type_is_func_proto(t2))
7316  		return -EFAULT;
7317  
7318  	args1 = (const struct btf_param *)(t1 + 1);
7319  	nargs1 = btf_type_vlen(t1);
7320  	args2 = (const struct btf_param *)(t2 + 1);
7321  	nargs2 = btf_type_vlen(t2);
7322  
7323  	if (nargs1 != nargs2) {
7324  		bpf_log(log, "%s() has %d args while %s() has %d args\n",
7325  			fn1, nargs1, fn2, nargs2);
7326  		return -EINVAL;
7327  	}
7328  
7329  	t1 = btf_type_skip_modifiers(btf1, t1->type, NULL);
7330  	t2 = btf_type_skip_modifiers(btf2, t2->type, NULL);
7331  	if (t1->info != t2->info) {
7332  		bpf_log(log,
7333  			"Return type %s of %s() doesn't match type %s of %s()\n",
7334  			btf_type_str(t1), fn1,
7335  			btf_type_str(t2), fn2);
7336  		return -EINVAL;
7337  	}
7338  
7339  	for (i = 0; i < nargs1; i++) {
7340  		t1 = btf_type_skip_modifiers(btf1, args1[i].type, NULL);
7341  		t2 = btf_type_skip_modifiers(btf2, args2[i].type, NULL);
7342  
7343  		if (t1->info != t2->info) {
7344  			bpf_log(log, "arg%d in %s() is %s while %s() has %s\n",
7345  				i, fn1, btf_type_str(t1),
7346  				fn2, btf_type_str(t2));
7347  			return -EINVAL;
7348  		}
7349  		if (btf_type_has_size(t1) && t1->size != t2->size) {
7350  			bpf_log(log,
7351  				"arg%d in %s() has size %d while %s() has %d\n",
7352  				i, fn1, t1->size,
7353  				fn2, t2->size);
7354  			return -EINVAL;
7355  		}
7356  
7357  		/* global functions are validated with scalars and pointers
7358  		 * to context only. And only global functions can be replaced.
7359  		 * Hence type check only those types.
7360  		 */
7361  		if (btf_type_is_int(t1) || btf_is_any_enum(t1))
7362  			continue;
7363  		if (!btf_type_is_ptr(t1)) {
7364  			bpf_log(log,
7365  				"arg%d in %s() has unrecognized type\n",
7366  				i, fn1);
7367  			return -EINVAL;
7368  		}
7369  		t1 = btf_type_skip_modifiers(btf1, t1->type, NULL);
7370  		t2 = btf_type_skip_modifiers(btf2, t2->type, NULL);
7371  		if (!btf_type_is_struct(t1)) {
7372  			bpf_log(log,
7373  				"arg%d in %s() is not a pointer to context\n",
7374  				i, fn1);
7375  			return -EINVAL;
7376  		}
7377  		if (!btf_type_is_struct(t2)) {
7378  			bpf_log(log,
7379  				"arg%d in %s() is not a pointer to context\n",
7380  				i, fn2);
7381  			return -EINVAL;
7382  		}
7383  		/* This is an optional check to make program writing easier.
7384  		 * Compare names of structs and report an error to the user.
7385  		 * btf_prepare_func_args() already checked that t2 struct
7386  		 * is a context type. btf_prepare_func_args() will check
7387  		 * later that t1 struct is a context type as well.
7388  		 */
7389  		s1 = btf_name_by_offset(btf1, t1->name_off);
7390  		s2 = btf_name_by_offset(btf2, t2->name_off);
7391  		if (strcmp(s1, s2)) {
7392  			bpf_log(log,
7393  				"arg%d %s(struct %s *) doesn't match %s(struct %s *)\n",
7394  				i, fn1, s1, fn2, s2);
7395  			return -EINVAL;
7396  		}
7397  	}
7398  	return 0;
7399  }
7400  
7401  /* Compare BTFs of given program with BTF of target program */
btf_check_type_match(struct bpf_verifier_log * log,const struct bpf_prog * prog,struct btf * btf2,const struct btf_type * t2)7402  int btf_check_type_match(struct bpf_verifier_log *log, const struct bpf_prog *prog,
7403  			 struct btf *btf2, const struct btf_type *t2)
7404  {
7405  	struct btf *btf1 = prog->aux->btf;
7406  	const struct btf_type *t1;
7407  	u32 btf_id = 0;
7408  
7409  	if (!prog->aux->func_info) {
7410  		bpf_log(log, "Program extension requires BTF\n");
7411  		return -EINVAL;
7412  	}
7413  
7414  	btf_id = prog->aux->func_info[0].type_id;
7415  	if (!btf_id)
7416  		return -EFAULT;
7417  
7418  	t1 = btf_type_by_id(btf1, btf_id);
7419  	if (!t1 || !btf_type_is_func(t1))
7420  		return -EFAULT;
7421  
7422  	return btf_check_func_type_match(log, btf1, t1, btf2, t2);
7423  }
7424  
btf_is_dynptr_ptr(const struct btf * btf,const struct btf_type * t)7425  static bool btf_is_dynptr_ptr(const struct btf *btf, const struct btf_type *t)
7426  {
7427  	const char *name;
7428  
7429  	t = btf_type_by_id(btf, t->type); /* skip PTR */
7430  
7431  	while (btf_type_is_modifier(t))
7432  		t = btf_type_by_id(btf, t->type);
7433  
7434  	/* allow either struct or struct forward declaration */
7435  	if (btf_type_is_struct(t) ||
7436  	    (btf_type_is_fwd(t) && btf_type_kflag(t) == 0)) {
7437  		name = btf_str_by_offset(btf, t->name_off);
7438  		return name && strcmp(name, "bpf_dynptr") == 0;
7439  	}
7440  
7441  	return false;
7442  }
7443  
7444  struct bpf_cand_cache {
7445  	const char *name;
7446  	u32 name_len;
7447  	u16 kind;
7448  	u16 cnt;
7449  	struct {
7450  		const struct btf *btf;
7451  		u32 id;
7452  	} cands[];
7453  };
7454  
7455  static DEFINE_MUTEX(cand_cache_mutex);
7456  
7457  static struct bpf_cand_cache *
7458  bpf_core_find_cands(struct bpf_core_ctx *ctx, u32 local_type_id);
7459  
btf_get_ptr_to_btf_id(struct bpf_verifier_log * log,int arg_idx,const struct btf * btf,const struct btf_type * t)7460  static int btf_get_ptr_to_btf_id(struct bpf_verifier_log *log, int arg_idx,
7461  				 const struct btf *btf, const struct btf_type *t)
7462  {
7463  	struct bpf_cand_cache *cc;
7464  	struct bpf_core_ctx ctx = {
7465  		.btf = btf,
7466  		.log = log,
7467  	};
7468  	u32 kern_type_id, type_id;
7469  	int err = 0;
7470  
7471  	/* skip PTR and modifiers */
7472  	type_id = t->type;
7473  	t = btf_type_by_id(btf, t->type);
7474  	while (btf_type_is_modifier(t)) {
7475  		type_id = t->type;
7476  		t = btf_type_by_id(btf, t->type);
7477  	}
7478  
7479  	mutex_lock(&cand_cache_mutex);
7480  	cc = bpf_core_find_cands(&ctx, type_id);
7481  	if (IS_ERR(cc)) {
7482  		err = PTR_ERR(cc);
7483  		bpf_log(log, "arg#%d reference type('%s %s') candidate matching error: %d\n",
7484  			arg_idx, btf_type_str(t), __btf_name_by_offset(btf, t->name_off),
7485  			err);
7486  		goto cand_cache_unlock;
7487  	}
7488  	if (cc->cnt != 1) {
7489  		bpf_log(log, "arg#%d reference type('%s %s') %s\n",
7490  			arg_idx, btf_type_str(t), __btf_name_by_offset(btf, t->name_off),
7491  			cc->cnt == 0 ? "has no matches" : "is ambiguous");
7492  		err = cc->cnt == 0 ? -ENOENT : -ESRCH;
7493  		goto cand_cache_unlock;
7494  	}
7495  	if (btf_is_module(cc->cands[0].btf)) {
7496  		bpf_log(log, "arg#%d reference type('%s %s') points to kernel module type (unsupported)\n",
7497  			arg_idx, btf_type_str(t), __btf_name_by_offset(btf, t->name_off));
7498  		err = -EOPNOTSUPP;
7499  		goto cand_cache_unlock;
7500  	}
7501  	kern_type_id = cc->cands[0].id;
7502  
7503  cand_cache_unlock:
7504  	mutex_unlock(&cand_cache_mutex);
7505  	if (err)
7506  		return err;
7507  
7508  	return kern_type_id;
7509  }
7510  
7511  enum btf_arg_tag {
7512  	ARG_TAG_CTX	 = BIT_ULL(0),
7513  	ARG_TAG_NONNULL  = BIT_ULL(1),
7514  	ARG_TAG_TRUSTED  = BIT_ULL(2),
7515  	ARG_TAG_NULLABLE = BIT_ULL(3),
7516  	ARG_TAG_ARENA	 = BIT_ULL(4),
7517  };
7518  
7519  /* Process BTF of a function to produce high-level expectation of function
7520   * arguments (like ARG_PTR_TO_CTX, or ARG_PTR_TO_MEM, etc). This information
7521   * is cached in subprog info for reuse.
7522   * Returns:
7523   * EFAULT - there is a verifier bug. Abort verification.
7524   * EINVAL - cannot convert BTF.
7525   * 0 - Successfully processed BTF and constructed argument expectations.
7526   */
btf_prepare_func_args(struct bpf_verifier_env * env,int subprog)7527  int btf_prepare_func_args(struct bpf_verifier_env *env, int subprog)
7528  {
7529  	bool is_global = subprog_aux(env, subprog)->linkage == BTF_FUNC_GLOBAL;
7530  	struct bpf_subprog_info *sub = subprog_info(env, subprog);
7531  	struct bpf_verifier_log *log = &env->log;
7532  	struct bpf_prog *prog = env->prog;
7533  	enum bpf_prog_type prog_type = prog->type;
7534  	struct btf *btf = prog->aux->btf;
7535  	const struct btf_param *args;
7536  	const struct btf_type *t, *ref_t, *fn_t;
7537  	u32 i, nargs, btf_id;
7538  	const char *tname;
7539  
7540  	if (sub->args_cached)
7541  		return 0;
7542  
7543  	if (!prog->aux->func_info) {
7544  		bpf_log(log, "Verifier bug\n");
7545  		return -EFAULT;
7546  	}
7547  
7548  	btf_id = prog->aux->func_info[subprog].type_id;
7549  	if (!btf_id) {
7550  		if (!is_global) /* not fatal for static funcs */
7551  			return -EINVAL;
7552  		bpf_log(log, "Global functions need valid BTF\n");
7553  		return -EFAULT;
7554  	}
7555  
7556  	fn_t = btf_type_by_id(btf, btf_id);
7557  	if (!fn_t || !btf_type_is_func(fn_t)) {
7558  		/* These checks were already done by the verifier while loading
7559  		 * struct bpf_func_info
7560  		 */
7561  		bpf_log(log, "BTF of func#%d doesn't point to KIND_FUNC\n",
7562  			subprog);
7563  		return -EFAULT;
7564  	}
7565  	tname = btf_name_by_offset(btf, fn_t->name_off);
7566  
7567  	if (prog->aux->func_info_aux[subprog].unreliable) {
7568  		bpf_log(log, "Verifier bug in function %s()\n", tname);
7569  		return -EFAULT;
7570  	}
7571  	if (prog_type == BPF_PROG_TYPE_EXT)
7572  		prog_type = prog->aux->dst_prog->type;
7573  
7574  	t = btf_type_by_id(btf, fn_t->type);
7575  	if (!t || !btf_type_is_func_proto(t)) {
7576  		bpf_log(log, "Invalid type of function %s()\n", tname);
7577  		return -EFAULT;
7578  	}
7579  	args = (const struct btf_param *)(t + 1);
7580  	nargs = btf_type_vlen(t);
7581  	if (nargs > MAX_BPF_FUNC_REG_ARGS) {
7582  		if (!is_global)
7583  			return -EINVAL;
7584  		bpf_log(log, "Global function %s() with %d > %d args. Buggy compiler.\n",
7585  			tname, nargs, MAX_BPF_FUNC_REG_ARGS);
7586  		return -EINVAL;
7587  	}
7588  	/* check that function returns int, exception cb also requires this */
7589  	t = btf_type_by_id(btf, t->type);
7590  	while (btf_type_is_modifier(t))
7591  		t = btf_type_by_id(btf, t->type);
7592  	if (!btf_type_is_int(t) && !btf_is_any_enum(t)) {
7593  		if (!is_global)
7594  			return -EINVAL;
7595  		bpf_log(log,
7596  			"Global function %s() doesn't return scalar. Only those are supported.\n",
7597  			tname);
7598  		return -EINVAL;
7599  	}
7600  	/* Convert BTF function arguments into verifier types.
7601  	 * Only PTR_TO_CTX and SCALAR are supported atm.
7602  	 */
7603  	for (i = 0; i < nargs; i++) {
7604  		u32 tags = 0;
7605  		int id = 0;
7606  
7607  		/* 'arg:<tag>' decl_tag takes precedence over derivation of
7608  		 * register type from BTF type itself
7609  		 */
7610  		while ((id = btf_find_next_decl_tag(btf, fn_t, i, "arg:", id)) > 0) {
7611  			const struct btf_type *tag_t = btf_type_by_id(btf, id);
7612  			const char *tag = __btf_name_by_offset(btf, tag_t->name_off) + 4;
7613  
7614  			/* disallow arg tags in static subprogs */
7615  			if (!is_global) {
7616  				bpf_log(log, "arg#%d type tag is not supported in static functions\n", i);
7617  				return -EOPNOTSUPP;
7618  			}
7619  
7620  			if (strcmp(tag, "ctx") == 0) {
7621  				tags |= ARG_TAG_CTX;
7622  			} else if (strcmp(tag, "trusted") == 0) {
7623  				tags |= ARG_TAG_TRUSTED;
7624  			} else if (strcmp(tag, "nonnull") == 0) {
7625  				tags |= ARG_TAG_NONNULL;
7626  			} else if (strcmp(tag, "nullable") == 0) {
7627  				tags |= ARG_TAG_NULLABLE;
7628  			} else if (strcmp(tag, "arena") == 0) {
7629  				tags |= ARG_TAG_ARENA;
7630  			} else {
7631  				bpf_log(log, "arg#%d has unsupported set of tags\n", i);
7632  				return -EOPNOTSUPP;
7633  			}
7634  		}
7635  		if (id != -ENOENT) {
7636  			bpf_log(log, "arg#%d type tag fetching failure: %d\n", i, id);
7637  			return id;
7638  		}
7639  
7640  		t = btf_type_by_id(btf, args[i].type);
7641  		while (btf_type_is_modifier(t))
7642  			t = btf_type_by_id(btf, t->type);
7643  		if (!btf_type_is_ptr(t))
7644  			goto skip_pointer;
7645  
7646  		if ((tags & ARG_TAG_CTX) || btf_is_prog_ctx_type(log, btf, t, prog_type, i)) {
7647  			if (tags & ~ARG_TAG_CTX) {
7648  				bpf_log(log, "arg#%d has invalid combination of tags\n", i);
7649  				return -EINVAL;
7650  			}
7651  			if ((tags & ARG_TAG_CTX) &&
7652  			    btf_validate_prog_ctx_type(log, btf, t, i, prog_type,
7653  						       prog->expected_attach_type))
7654  				return -EINVAL;
7655  			sub->args[i].arg_type = ARG_PTR_TO_CTX;
7656  			continue;
7657  		}
7658  		if (btf_is_dynptr_ptr(btf, t)) {
7659  			if (tags) {
7660  				bpf_log(log, "arg#%d has invalid combination of tags\n", i);
7661  				return -EINVAL;
7662  			}
7663  			sub->args[i].arg_type = ARG_PTR_TO_DYNPTR | MEM_RDONLY;
7664  			continue;
7665  		}
7666  		if (tags & ARG_TAG_TRUSTED) {
7667  			int kern_type_id;
7668  
7669  			if (tags & ARG_TAG_NONNULL) {
7670  				bpf_log(log, "arg#%d has invalid combination of tags\n", i);
7671  				return -EINVAL;
7672  			}
7673  
7674  			kern_type_id = btf_get_ptr_to_btf_id(log, i, btf, t);
7675  			if (kern_type_id < 0)
7676  				return kern_type_id;
7677  
7678  			sub->args[i].arg_type = ARG_PTR_TO_BTF_ID | PTR_TRUSTED;
7679  			if (tags & ARG_TAG_NULLABLE)
7680  				sub->args[i].arg_type |= PTR_MAYBE_NULL;
7681  			sub->args[i].btf_id = kern_type_id;
7682  			continue;
7683  		}
7684  		if (tags & ARG_TAG_ARENA) {
7685  			if (tags & ~ARG_TAG_ARENA) {
7686  				bpf_log(log, "arg#%d arena cannot be combined with any other tags\n", i);
7687  				return -EINVAL;
7688  			}
7689  			sub->args[i].arg_type = ARG_PTR_TO_ARENA;
7690  			continue;
7691  		}
7692  		if (is_global) { /* generic user data pointer */
7693  			u32 mem_size;
7694  
7695  			if (tags & ARG_TAG_NULLABLE) {
7696  				bpf_log(log, "arg#%d has invalid combination of tags\n", i);
7697  				return -EINVAL;
7698  			}
7699  
7700  			t = btf_type_skip_modifiers(btf, t->type, NULL);
7701  			ref_t = btf_resolve_size(btf, t, &mem_size);
7702  			if (IS_ERR(ref_t)) {
7703  				bpf_log(log, "arg#%d reference type('%s %s') size cannot be determined: %ld\n",
7704  					i, btf_type_str(t), btf_name_by_offset(btf, t->name_off),
7705  					PTR_ERR(ref_t));
7706  				return -EINVAL;
7707  			}
7708  
7709  			sub->args[i].arg_type = ARG_PTR_TO_MEM | PTR_MAYBE_NULL;
7710  			if (tags & ARG_TAG_NONNULL)
7711  				sub->args[i].arg_type &= ~PTR_MAYBE_NULL;
7712  			sub->args[i].mem_size = mem_size;
7713  			continue;
7714  		}
7715  
7716  skip_pointer:
7717  		if (tags) {
7718  			bpf_log(log, "arg#%d has pointer tag, but is not a pointer type\n", i);
7719  			return -EINVAL;
7720  		}
7721  		if (btf_type_is_int(t) || btf_is_any_enum(t)) {
7722  			sub->args[i].arg_type = ARG_ANYTHING;
7723  			continue;
7724  		}
7725  		if (!is_global)
7726  			return -EINVAL;
7727  		bpf_log(log, "Arg#%d type %s in %s() is not supported yet.\n",
7728  			i, btf_type_str(t), tname);
7729  		return -EINVAL;
7730  	}
7731  
7732  	sub->arg_cnt = nargs;
7733  	sub->args_cached = true;
7734  
7735  	return 0;
7736  }
7737  
btf_type_show(const struct btf * btf,u32 type_id,void * obj,struct btf_show * show)7738  static void btf_type_show(const struct btf *btf, u32 type_id, void *obj,
7739  			  struct btf_show *show)
7740  {
7741  	const struct btf_type *t = btf_type_by_id(btf, type_id);
7742  
7743  	show->btf = btf;
7744  	memset(&show->state, 0, sizeof(show->state));
7745  	memset(&show->obj, 0, sizeof(show->obj));
7746  
7747  	btf_type_ops(t)->show(btf, t, type_id, obj, 0, show);
7748  }
7749  
btf_seq_show(struct btf_show * show,const char * fmt,va_list args)7750  __printf(2, 0) static void btf_seq_show(struct btf_show *show, const char *fmt,
7751  					va_list args)
7752  {
7753  	seq_vprintf((struct seq_file *)show->target, fmt, args);
7754  }
7755  
btf_type_seq_show_flags(const struct btf * btf,u32 type_id,void * obj,struct seq_file * m,u64 flags)7756  int btf_type_seq_show_flags(const struct btf *btf, u32 type_id,
7757  			    void *obj, struct seq_file *m, u64 flags)
7758  {
7759  	struct btf_show sseq;
7760  
7761  	sseq.target = m;
7762  	sseq.showfn = btf_seq_show;
7763  	sseq.flags = flags;
7764  
7765  	btf_type_show(btf, type_id, obj, &sseq);
7766  
7767  	return sseq.state.status;
7768  }
7769  
btf_type_seq_show(const struct btf * btf,u32 type_id,void * obj,struct seq_file * m)7770  void btf_type_seq_show(const struct btf *btf, u32 type_id, void *obj,
7771  		       struct seq_file *m)
7772  {
7773  	(void) btf_type_seq_show_flags(btf, type_id, obj, m,
7774  				       BTF_SHOW_NONAME | BTF_SHOW_COMPACT |
7775  				       BTF_SHOW_ZERO | BTF_SHOW_UNSAFE);
7776  }
7777  
7778  struct btf_show_snprintf {
7779  	struct btf_show show;
7780  	int len_left;		/* space left in string */
7781  	int len;		/* length we would have written */
7782  };
7783  
btf_snprintf_show(struct btf_show * show,const char * fmt,va_list args)7784  __printf(2, 0) static void btf_snprintf_show(struct btf_show *show, const char *fmt,
7785  					     va_list args)
7786  {
7787  	struct btf_show_snprintf *ssnprintf = (struct btf_show_snprintf *)show;
7788  	int len;
7789  
7790  	len = vsnprintf(show->target, ssnprintf->len_left, fmt, args);
7791  
7792  	if (len < 0) {
7793  		ssnprintf->len_left = 0;
7794  		ssnprintf->len = len;
7795  	} else if (len >= ssnprintf->len_left) {
7796  		/* no space, drive on to get length we would have written */
7797  		ssnprintf->len_left = 0;
7798  		ssnprintf->len += len;
7799  	} else {
7800  		ssnprintf->len_left -= len;
7801  		ssnprintf->len += len;
7802  		show->target += len;
7803  	}
7804  }
7805  
btf_type_snprintf_show(const struct btf * btf,u32 type_id,void * obj,char * buf,int len,u64 flags)7806  int btf_type_snprintf_show(const struct btf *btf, u32 type_id, void *obj,
7807  			   char *buf, int len, u64 flags)
7808  {
7809  	struct btf_show_snprintf ssnprintf;
7810  
7811  	ssnprintf.show.target = buf;
7812  	ssnprintf.show.flags = flags;
7813  	ssnprintf.show.showfn = btf_snprintf_show;
7814  	ssnprintf.len_left = len;
7815  	ssnprintf.len = 0;
7816  
7817  	btf_type_show(btf, type_id, obj, (struct btf_show *)&ssnprintf);
7818  
7819  	/* If we encountered an error, return it. */
7820  	if (ssnprintf.show.state.status)
7821  		return ssnprintf.show.state.status;
7822  
7823  	/* Otherwise return length we would have written */
7824  	return ssnprintf.len;
7825  }
7826  
7827  #ifdef CONFIG_PROC_FS
bpf_btf_show_fdinfo(struct seq_file * m,struct file * filp)7828  static void bpf_btf_show_fdinfo(struct seq_file *m, struct file *filp)
7829  {
7830  	const struct btf *btf = filp->private_data;
7831  
7832  	seq_printf(m, "btf_id:\t%u\n", btf->id);
7833  }
7834  #endif
7835  
btf_release(struct inode * inode,struct file * filp)7836  static int btf_release(struct inode *inode, struct file *filp)
7837  {
7838  	btf_put(filp->private_data);
7839  	return 0;
7840  }
7841  
7842  const struct file_operations btf_fops = {
7843  #ifdef CONFIG_PROC_FS
7844  	.show_fdinfo	= bpf_btf_show_fdinfo,
7845  #endif
7846  	.release	= btf_release,
7847  };
7848  
__btf_new_fd(struct btf * btf)7849  static int __btf_new_fd(struct btf *btf)
7850  {
7851  	return anon_inode_getfd("btf", &btf_fops, btf, O_RDONLY | O_CLOEXEC);
7852  }
7853  
btf_new_fd(const union bpf_attr * attr,bpfptr_t uattr,u32 uattr_size)7854  int btf_new_fd(const union bpf_attr *attr, bpfptr_t uattr, u32 uattr_size)
7855  {
7856  	struct btf *btf;
7857  	int ret;
7858  
7859  	btf = btf_parse(attr, uattr, uattr_size);
7860  	if (IS_ERR(btf))
7861  		return PTR_ERR(btf);
7862  
7863  	ret = btf_alloc_id(btf);
7864  	if (ret) {
7865  		btf_free(btf);
7866  		return ret;
7867  	}
7868  
7869  	/*
7870  	 * The BTF ID is published to the userspace.
7871  	 * All BTF free must go through call_rcu() from
7872  	 * now on (i.e. free by calling btf_put()).
7873  	 */
7874  
7875  	ret = __btf_new_fd(btf);
7876  	if (ret < 0)
7877  		btf_put(btf);
7878  
7879  	return ret;
7880  }
7881  
btf_get_by_fd(int fd)7882  struct btf *btf_get_by_fd(int fd)
7883  {
7884  	struct btf *btf;
7885  	CLASS(fd, f)(fd);
7886  
7887  	btf = __btf_get_by_fd(f);
7888  	if (!IS_ERR(btf))
7889  		refcount_inc(&btf->refcnt);
7890  
7891  	return btf;
7892  }
7893  
btf_get_info_by_fd(const struct btf * btf,const union bpf_attr * attr,union bpf_attr __user * uattr)7894  int btf_get_info_by_fd(const struct btf *btf,
7895  		       const union bpf_attr *attr,
7896  		       union bpf_attr __user *uattr)
7897  {
7898  	struct bpf_btf_info __user *uinfo;
7899  	struct bpf_btf_info info;
7900  	u32 info_copy, btf_copy;
7901  	void __user *ubtf;
7902  	char __user *uname;
7903  	u32 uinfo_len, uname_len, name_len;
7904  	int ret = 0;
7905  
7906  	uinfo = u64_to_user_ptr(attr->info.info);
7907  	uinfo_len = attr->info.info_len;
7908  
7909  	info_copy = min_t(u32, uinfo_len, sizeof(info));
7910  	memset(&info, 0, sizeof(info));
7911  	if (copy_from_user(&info, uinfo, info_copy))
7912  		return -EFAULT;
7913  
7914  	info.id = btf->id;
7915  	ubtf = u64_to_user_ptr(info.btf);
7916  	btf_copy = min_t(u32, btf->data_size, info.btf_size);
7917  	if (copy_to_user(ubtf, btf->data, btf_copy))
7918  		return -EFAULT;
7919  	info.btf_size = btf->data_size;
7920  
7921  	info.kernel_btf = btf->kernel_btf;
7922  
7923  	uname = u64_to_user_ptr(info.name);
7924  	uname_len = info.name_len;
7925  	if (!uname ^ !uname_len)
7926  		return -EINVAL;
7927  
7928  	name_len = strlen(btf->name);
7929  	info.name_len = name_len;
7930  
7931  	if (uname) {
7932  		if (uname_len >= name_len + 1) {
7933  			if (copy_to_user(uname, btf->name, name_len + 1))
7934  				return -EFAULT;
7935  		} else {
7936  			char zero = '\0';
7937  
7938  			if (copy_to_user(uname, btf->name, uname_len - 1))
7939  				return -EFAULT;
7940  			if (put_user(zero, uname + uname_len - 1))
7941  				return -EFAULT;
7942  			/* let user-space know about too short buffer */
7943  			ret = -ENOSPC;
7944  		}
7945  	}
7946  
7947  	if (copy_to_user(uinfo, &info, info_copy) ||
7948  	    put_user(info_copy, &uattr->info.info_len))
7949  		return -EFAULT;
7950  
7951  	return ret;
7952  }
7953  
btf_get_fd_by_id(u32 id)7954  int btf_get_fd_by_id(u32 id)
7955  {
7956  	struct btf *btf;
7957  	int fd;
7958  
7959  	rcu_read_lock();
7960  	btf = idr_find(&btf_idr, id);
7961  	if (!btf || !refcount_inc_not_zero(&btf->refcnt))
7962  		btf = ERR_PTR(-ENOENT);
7963  	rcu_read_unlock();
7964  
7965  	if (IS_ERR(btf))
7966  		return PTR_ERR(btf);
7967  
7968  	fd = __btf_new_fd(btf);
7969  	if (fd < 0)
7970  		btf_put(btf);
7971  
7972  	return fd;
7973  }
7974  
btf_obj_id(const struct btf * btf)7975  u32 btf_obj_id(const struct btf *btf)
7976  {
7977  	return btf->id;
7978  }
7979  
btf_is_kernel(const struct btf * btf)7980  bool btf_is_kernel(const struct btf *btf)
7981  {
7982  	return btf->kernel_btf;
7983  }
7984  
btf_is_module(const struct btf * btf)7985  bool btf_is_module(const struct btf *btf)
7986  {
7987  	return btf->kernel_btf && strcmp(btf->name, "vmlinux") != 0;
7988  }
7989  
7990  enum {
7991  	BTF_MODULE_F_LIVE = (1 << 0),
7992  };
7993  
7994  #ifdef CONFIG_DEBUG_INFO_BTF_MODULES
7995  struct btf_module {
7996  	struct list_head list;
7997  	struct module *module;
7998  	struct btf *btf;
7999  	struct bin_attribute *sysfs_attr;
8000  	int flags;
8001  };
8002  
8003  static LIST_HEAD(btf_modules);
8004  static DEFINE_MUTEX(btf_module_mutex);
8005  
8006  static void purge_cand_cache(struct btf *btf);
8007  
btf_module_notify(struct notifier_block * nb,unsigned long op,void * module)8008  static int btf_module_notify(struct notifier_block *nb, unsigned long op,
8009  			     void *module)
8010  {
8011  	struct btf_module *btf_mod, *tmp;
8012  	struct module *mod = module;
8013  	struct btf *btf;
8014  	int err = 0;
8015  
8016  	if (mod->btf_data_size == 0 ||
8017  	    (op != MODULE_STATE_COMING && op != MODULE_STATE_LIVE &&
8018  	     op != MODULE_STATE_GOING))
8019  		goto out;
8020  
8021  	switch (op) {
8022  	case MODULE_STATE_COMING:
8023  		btf_mod = kzalloc(sizeof(*btf_mod), GFP_KERNEL);
8024  		if (!btf_mod) {
8025  			err = -ENOMEM;
8026  			goto out;
8027  		}
8028  		btf = btf_parse_module(mod->name, mod->btf_data, mod->btf_data_size,
8029  				       mod->btf_base_data, mod->btf_base_data_size);
8030  		if (IS_ERR(btf)) {
8031  			kfree(btf_mod);
8032  			if (!IS_ENABLED(CONFIG_MODULE_ALLOW_BTF_MISMATCH)) {
8033  				pr_warn("failed to validate module [%s] BTF: %ld\n",
8034  					mod->name, PTR_ERR(btf));
8035  				err = PTR_ERR(btf);
8036  			} else {
8037  				pr_warn_once("Kernel module BTF mismatch detected, BTF debug info may be unavailable for some modules\n");
8038  			}
8039  			goto out;
8040  		}
8041  		err = btf_alloc_id(btf);
8042  		if (err) {
8043  			btf_free(btf);
8044  			kfree(btf_mod);
8045  			goto out;
8046  		}
8047  
8048  		purge_cand_cache(NULL);
8049  		mutex_lock(&btf_module_mutex);
8050  		btf_mod->module = module;
8051  		btf_mod->btf = btf;
8052  		list_add(&btf_mod->list, &btf_modules);
8053  		mutex_unlock(&btf_module_mutex);
8054  
8055  		if (IS_ENABLED(CONFIG_SYSFS)) {
8056  			struct bin_attribute *attr;
8057  
8058  			attr = kzalloc(sizeof(*attr), GFP_KERNEL);
8059  			if (!attr)
8060  				goto out;
8061  
8062  			sysfs_bin_attr_init(attr);
8063  			attr->attr.name = btf->name;
8064  			attr->attr.mode = 0444;
8065  			attr->size = btf->data_size;
8066  			attr->private = btf->data;
8067  			attr->read_new = sysfs_bin_attr_simple_read;
8068  
8069  			err = sysfs_create_bin_file(btf_kobj, attr);
8070  			if (err) {
8071  				pr_warn("failed to register module [%s] BTF in sysfs: %d\n",
8072  					mod->name, err);
8073  				kfree(attr);
8074  				err = 0;
8075  				goto out;
8076  			}
8077  
8078  			btf_mod->sysfs_attr = attr;
8079  		}
8080  
8081  		break;
8082  	case MODULE_STATE_LIVE:
8083  		mutex_lock(&btf_module_mutex);
8084  		list_for_each_entry_safe(btf_mod, tmp, &btf_modules, list) {
8085  			if (btf_mod->module != module)
8086  				continue;
8087  
8088  			btf_mod->flags |= BTF_MODULE_F_LIVE;
8089  			break;
8090  		}
8091  		mutex_unlock(&btf_module_mutex);
8092  		break;
8093  	case MODULE_STATE_GOING:
8094  		mutex_lock(&btf_module_mutex);
8095  		list_for_each_entry_safe(btf_mod, tmp, &btf_modules, list) {
8096  			if (btf_mod->module != module)
8097  				continue;
8098  
8099  			list_del(&btf_mod->list);
8100  			if (btf_mod->sysfs_attr)
8101  				sysfs_remove_bin_file(btf_kobj, btf_mod->sysfs_attr);
8102  			purge_cand_cache(btf_mod->btf);
8103  			btf_put(btf_mod->btf);
8104  			kfree(btf_mod->sysfs_attr);
8105  			kfree(btf_mod);
8106  			break;
8107  		}
8108  		mutex_unlock(&btf_module_mutex);
8109  		break;
8110  	}
8111  out:
8112  	return notifier_from_errno(err);
8113  }
8114  
8115  static struct notifier_block btf_module_nb = {
8116  	.notifier_call = btf_module_notify,
8117  };
8118  
btf_module_init(void)8119  static int __init btf_module_init(void)
8120  {
8121  	register_module_notifier(&btf_module_nb);
8122  	return 0;
8123  }
8124  
8125  fs_initcall(btf_module_init);
8126  #endif /* CONFIG_DEBUG_INFO_BTF_MODULES */
8127  
btf_try_get_module(const struct btf * btf)8128  struct module *btf_try_get_module(const struct btf *btf)
8129  {
8130  	struct module *res = NULL;
8131  #ifdef CONFIG_DEBUG_INFO_BTF_MODULES
8132  	struct btf_module *btf_mod, *tmp;
8133  
8134  	mutex_lock(&btf_module_mutex);
8135  	list_for_each_entry_safe(btf_mod, tmp, &btf_modules, list) {
8136  		if (btf_mod->btf != btf)
8137  			continue;
8138  
8139  		/* We must only consider module whose __init routine has
8140  		 * finished, hence we must check for BTF_MODULE_F_LIVE flag,
8141  		 * which is set from the notifier callback for
8142  		 * MODULE_STATE_LIVE.
8143  		 */
8144  		if ((btf_mod->flags & BTF_MODULE_F_LIVE) && try_module_get(btf_mod->module))
8145  			res = btf_mod->module;
8146  
8147  		break;
8148  	}
8149  	mutex_unlock(&btf_module_mutex);
8150  #endif
8151  
8152  	return res;
8153  }
8154  
8155  /* Returns struct btf corresponding to the struct module.
8156   * This function can return NULL or ERR_PTR.
8157   */
btf_get_module_btf(const struct module * module)8158  static struct btf *btf_get_module_btf(const struct module *module)
8159  {
8160  #ifdef CONFIG_DEBUG_INFO_BTF_MODULES
8161  	struct btf_module *btf_mod, *tmp;
8162  #endif
8163  	struct btf *btf = NULL;
8164  
8165  	if (!module) {
8166  		btf = bpf_get_btf_vmlinux();
8167  		if (!IS_ERR_OR_NULL(btf))
8168  			btf_get(btf);
8169  		return btf;
8170  	}
8171  
8172  #ifdef CONFIG_DEBUG_INFO_BTF_MODULES
8173  	mutex_lock(&btf_module_mutex);
8174  	list_for_each_entry_safe(btf_mod, tmp, &btf_modules, list) {
8175  		if (btf_mod->module != module)
8176  			continue;
8177  
8178  		btf_get(btf_mod->btf);
8179  		btf = btf_mod->btf;
8180  		break;
8181  	}
8182  	mutex_unlock(&btf_module_mutex);
8183  #endif
8184  
8185  	return btf;
8186  }
8187  
check_btf_kconfigs(const struct module * module,const char * feature)8188  static int check_btf_kconfigs(const struct module *module, const char *feature)
8189  {
8190  	if (!module && IS_ENABLED(CONFIG_DEBUG_INFO_BTF)) {
8191  		pr_err("missing vmlinux BTF, cannot register %s\n", feature);
8192  		return -ENOENT;
8193  	}
8194  	if (module && IS_ENABLED(CONFIG_DEBUG_INFO_BTF_MODULES))
8195  		pr_warn("missing module BTF, cannot register %s\n", feature);
8196  	return 0;
8197  }
8198  
BPF_CALL_4(bpf_btf_find_by_name_kind,char *,name,int,name_sz,u32,kind,int,flags)8199  BPF_CALL_4(bpf_btf_find_by_name_kind, char *, name, int, name_sz, u32, kind, int, flags)
8200  {
8201  	struct btf *btf = NULL;
8202  	int btf_obj_fd = 0;
8203  	long ret;
8204  
8205  	if (flags)
8206  		return -EINVAL;
8207  
8208  	if (name_sz <= 1 || name[name_sz - 1])
8209  		return -EINVAL;
8210  
8211  	ret = bpf_find_btf_id(name, kind, &btf);
8212  	if (ret > 0 && btf_is_module(btf)) {
8213  		btf_obj_fd = __btf_new_fd(btf);
8214  		if (btf_obj_fd < 0) {
8215  			btf_put(btf);
8216  			return btf_obj_fd;
8217  		}
8218  		return ret | (((u64)btf_obj_fd) << 32);
8219  	}
8220  	if (ret > 0)
8221  		btf_put(btf);
8222  	return ret;
8223  }
8224  
8225  const struct bpf_func_proto bpf_btf_find_by_name_kind_proto = {
8226  	.func		= bpf_btf_find_by_name_kind,
8227  	.gpl_only	= false,
8228  	.ret_type	= RET_INTEGER,
8229  	.arg1_type	= ARG_PTR_TO_MEM | MEM_RDONLY,
8230  	.arg2_type	= ARG_CONST_SIZE,
8231  	.arg3_type	= ARG_ANYTHING,
8232  	.arg4_type	= ARG_ANYTHING,
8233  };
8234  
BTF_ID_LIST_GLOBAL(btf_tracing_ids,MAX_BTF_TRACING_TYPE)8235  BTF_ID_LIST_GLOBAL(btf_tracing_ids, MAX_BTF_TRACING_TYPE)
8236  #define BTF_TRACING_TYPE(name, type) BTF_ID(struct, type)
8237  BTF_TRACING_TYPE_xxx
8238  #undef BTF_TRACING_TYPE
8239  
8240  /* Validate well-formedness of iter argument type.
8241   * On success, return positive BTF ID of iter state's STRUCT type.
8242   * On error, negative error is returned.
8243   */
8244  int btf_check_iter_arg(struct btf *btf, const struct btf_type *func, int arg_idx)
8245  {
8246  	const struct btf_param *arg;
8247  	const struct btf_type *t;
8248  	const char *name;
8249  	int btf_id;
8250  
8251  	if (btf_type_vlen(func) <= arg_idx)
8252  		return -EINVAL;
8253  
8254  	arg = &btf_params(func)[arg_idx];
8255  	t = btf_type_skip_modifiers(btf, arg->type, NULL);
8256  	if (!t || !btf_type_is_ptr(t))
8257  		return -EINVAL;
8258  	t = btf_type_skip_modifiers(btf, t->type, &btf_id);
8259  	if (!t || !__btf_type_is_struct(t))
8260  		return -EINVAL;
8261  
8262  	name = btf_name_by_offset(btf, t->name_off);
8263  	if (!name || strncmp(name, ITER_PREFIX, sizeof(ITER_PREFIX) - 1))
8264  		return -EINVAL;
8265  
8266  	return btf_id;
8267  }
8268  
btf_check_iter_kfuncs(struct btf * btf,const char * func_name,const struct btf_type * func,u32 func_flags)8269  static int btf_check_iter_kfuncs(struct btf *btf, const char *func_name,
8270  				 const struct btf_type *func, u32 func_flags)
8271  {
8272  	u32 flags = func_flags & (KF_ITER_NEW | KF_ITER_NEXT | KF_ITER_DESTROY);
8273  	const char *sfx, *iter_name;
8274  	const struct btf_type *t;
8275  	char exp_name[128];
8276  	u32 nr_args;
8277  	int btf_id;
8278  
8279  	/* exactly one of KF_ITER_{NEW,NEXT,DESTROY} can be set */
8280  	if (!flags || (flags & (flags - 1)))
8281  		return -EINVAL;
8282  
8283  	/* any BPF iter kfunc should have `struct bpf_iter_<type> *` first arg */
8284  	nr_args = btf_type_vlen(func);
8285  	if (nr_args < 1)
8286  		return -EINVAL;
8287  
8288  	btf_id = btf_check_iter_arg(btf, func, 0);
8289  	if (btf_id < 0)
8290  		return btf_id;
8291  
8292  	/* sizeof(struct bpf_iter_<type>) should be a multiple of 8 to
8293  	 * fit nicely in stack slots
8294  	 */
8295  	t = btf_type_by_id(btf, btf_id);
8296  	if (t->size == 0 || (t->size % 8))
8297  		return -EINVAL;
8298  
8299  	/* validate bpf_iter_<type>_{new,next,destroy}(struct bpf_iter_<type> *)
8300  	 * naming pattern
8301  	 */
8302  	iter_name = btf_name_by_offset(btf, t->name_off) + sizeof(ITER_PREFIX) - 1;
8303  	if (flags & KF_ITER_NEW)
8304  		sfx = "new";
8305  	else if (flags & KF_ITER_NEXT)
8306  		sfx = "next";
8307  	else /* (flags & KF_ITER_DESTROY) */
8308  		sfx = "destroy";
8309  
8310  	snprintf(exp_name, sizeof(exp_name), "bpf_iter_%s_%s", iter_name, sfx);
8311  	if (strcmp(func_name, exp_name))
8312  		return -EINVAL;
8313  
8314  	/* only iter constructor should have extra arguments */
8315  	if (!(flags & KF_ITER_NEW) && nr_args != 1)
8316  		return -EINVAL;
8317  
8318  	if (flags & KF_ITER_NEXT) {
8319  		/* bpf_iter_<type>_next() should return pointer */
8320  		t = btf_type_skip_modifiers(btf, func->type, NULL);
8321  		if (!t || !btf_type_is_ptr(t))
8322  			return -EINVAL;
8323  	}
8324  
8325  	if (flags & KF_ITER_DESTROY) {
8326  		/* bpf_iter_<type>_destroy() should return void */
8327  		t = btf_type_by_id(btf, func->type);
8328  		if (!t || !btf_type_is_void(t))
8329  			return -EINVAL;
8330  	}
8331  
8332  	return 0;
8333  }
8334  
btf_check_kfunc_protos(struct btf * btf,u32 func_id,u32 func_flags)8335  static int btf_check_kfunc_protos(struct btf *btf, u32 func_id, u32 func_flags)
8336  {
8337  	const struct btf_type *func;
8338  	const char *func_name;
8339  	int err;
8340  
8341  	/* any kfunc should be FUNC -> FUNC_PROTO */
8342  	func = btf_type_by_id(btf, func_id);
8343  	if (!func || !btf_type_is_func(func))
8344  		return -EINVAL;
8345  
8346  	/* sanity check kfunc name */
8347  	func_name = btf_name_by_offset(btf, func->name_off);
8348  	if (!func_name || !func_name[0])
8349  		return -EINVAL;
8350  
8351  	func = btf_type_by_id(btf, func->type);
8352  	if (!func || !btf_type_is_func_proto(func))
8353  		return -EINVAL;
8354  
8355  	if (func_flags & (KF_ITER_NEW | KF_ITER_NEXT | KF_ITER_DESTROY)) {
8356  		err = btf_check_iter_kfuncs(btf, func_name, func, func_flags);
8357  		if (err)
8358  			return err;
8359  	}
8360  
8361  	return 0;
8362  }
8363  
8364  /* Kernel Function (kfunc) BTF ID set registration API */
8365  
btf_populate_kfunc_set(struct btf * btf,enum btf_kfunc_hook hook,const struct btf_kfunc_id_set * kset)8366  static int btf_populate_kfunc_set(struct btf *btf, enum btf_kfunc_hook hook,
8367  				  const struct btf_kfunc_id_set *kset)
8368  {
8369  	struct btf_kfunc_hook_filter *hook_filter;
8370  	struct btf_id_set8 *add_set = kset->set;
8371  	bool vmlinux_set = !btf_is_module(btf);
8372  	bool add_filter = !!kset->filter;
8373  	struct btf_kfunc_set_tab *tab;
8374  	struct btf_id_set8 *set;
8375  	u32 set_cnt, i;
8376  	int ret;
8377  
8378  	if (hook >= BTF_KFUNC_HOOK_MAX) {
8379  		ret = -EINVAL;
8380  		goto end;
8381  	}
8382  
8383  	if (!add_set->cnt)
8384  		return 0;
8385  
8386  	tab = btf->kfunc_set_tab;
8387  
8388  	if (tab && add_filter) {
8389  		u32 i;
8390  
8391  		hook_filter = &tab->hook_filters[hook];
8392  		for (i = 0; i < hook_filter->nr_filters; i++) {
8393  			if (hook_filter->filters[i] == kset->filter) {
8394  				add_filter = false;
8395  				break;
8396  			}
8397  		}
8398  
8399  		if (add_filter && hook_filter->nr_filters == BTF_KFUNC_FILTER_MAX_CNT) {
8400  			ret = -E2BIG;
8401  			goto end;
8402  		}
8403  	}
8404  
8405  	if (!tab) {
8406  		tab = kzalloc(sizeof(*tab), GFP_KERNEL | __GFP_NOWARN);
8407  		if (!tab)
8408  			return -ENOMEM;
8409  		btf->kfunc_set_tab = tab;
8410  	}
8411  
8412  	set = tab->sets[hook];
8413  	/* Warn when register_btf_kfunc_id_set is called twice for the same hook
8414  	 * for module sets.
8415  	 */
8416  	if (WARN_ON_ONCE(set && !vmlinux_set)) {
8417  		ret = -EINVAL;
8418  		goto end;
8419  	}
8420  
8421  	/* In case of vmlinux sets, there may be more than one set being
8422  	 * registered per hook. To create a unified set, we allocate a new set
8423  	 * and concatenate all individual sets being registered. While each set
8424  	 * is individually sorted, they may become unsorted when concatenated,
8425  	 * hence re-sorting the final set again is required to make binary
8426  	 * searching the set using btf_id_set8_contains function work.
8427  	 *
8428  	 * For module sets, we need to allocate as we may need to relocate
8429  	 * BTF ids.
8430  	 */
8431  	set_cnt = set ? set->cnt : 0;
8432  
8433  	if (set_cnt > U32_MAX - add_set->cnt) {
8434  		ret = -EOVERFLOW;
8435  		goto end;
8436  	}
8437  
8438  	if (set_cnt + add_set->cnt > BTF_KFUNC_SET_MAX_CNT) {
8439  		ret = -E2BIG;
8440  		goto end;
8441  	}
8442  
8443  	/* Grow set */
8444  	set = krealloc(tab->sets[hook],
8445  		       offsetof(struct btf_id_set8, pairs[set_cnt + add_set->cnt]),
8446  		       GFP_KERNEL | __GFP_NOWARN);
8447  	if (!set) {
8448  		ret = -ENOMEM;
8449  		goto end;
8450  	}
8451  
8452  	/* For newly allocated set, initialize set->cnt to 0 */
8453  	if (!tab->sets[hook])
8454  		set->cnt = 0;
8455  	tab->sets[hook] = set;
8456  
8457  	/* Concatenate the two sets */
8458  	memcpy(set->pairs + set->cnt, add_set->pairs, add_set->cnt * sizeof(set->pairs[0]));
8459  	/* Now that the set is copied, update with relocated BTF ids */
8460  	for (i = set->cnt; i < set->cnt + add_set->cnt; i++)
8461  		set->pairs[i].id = btf_relocate_id(btf, set->pairs[i].id);
8462  
8463  	set->cnt += add_set->cnt;
8464  
8465  	sort(set->pairs, set->cnt, sizeof(set->pairs[0]), btf_id_cmp_func, NULL);
8466  
8467  	if (add_filter) {
8468  		hook_filter = &tab->hook_filters[hook];
8469  		hook_filter->filters[hook_filter->nr_filters++] = kset->filter;
8470  	}
8471  	return 0;
8472  end:
8473  	btf_free_kfunc_set_tab(btf);
8474  	return ret;
8475  }
8476  
__btf_kfunc_id_set_contains(const struct btf * btf,enum btf_kfunc_hook hook,u32 kfunc_btf_id,const struct bpf_prog * prog)8477  static u32 *__btf_kfunc_id_set_contains(const struct btf *btf,
8478  					enum btf_kfunc_hook hook,
8479  					u32 kfunc_btf_id,
8480  					const struct bpf_prog *prog)
8481  {
8482  	struct btf_kfunc_hook_filter *hook_filter;
8483  	struct btf_id_set8 *set;
8484  	u32 *id, i;
8485  
8486  	if (hook >= BTF_KFUNC_HOOK_MAX)
8487  		return NULL;
8488  	if (!btf->kfunc_set_tab)
8489  		return NULL;
8490  	hook_filter = &btf->kfunc_set_tab->hook_filters[hook];
8491  	for (i = 0; i < hook_filter->nr_filters; i++) {
8492  		if (hook_filter->filters[i](prog, kfunc_btf_id))
8493  			return NULL;
8494  	}
8495  	set = btf->kfunc_set_tab->sets[hook];
8496  	if (!set)
8497  		return NULL;
8498  	id = btf_id_set8_contains(set, kfunc_btf_id);
8499  	if (!id)
8500  		return NULL;
8501  	/* The flags for BTF ID are located next to it */
8502  	return id + 1;
8503  }
8504  
bpf_prog_type_to_kfunc_hook(enum bpf_prog_type prog_type)8505  static int bpf_prog_type_to_kfunc_hook(enum bpf_prog_type prog_type)
8506  {
8507  	switch (prog_type) {
8508  	case BPF_PROG_TYPE_UNSPEC:
8509  		return BTF_KFUNC_HOOK_COMMON;
8510  	case BPF_PROG_TYPE_XDP:
8511  		return BTF_KFUNC_HOOK_XDP;
8512  	case BPF_PROG_TYPE_SCHED_CLS:
8513  		return BTF_KFUNC_HOOK_TC;
8514  	case BPF_PROG_TYPE_STRUCT_OPS:
8515  		return BTF_KFUNC_HOOK_STRUCT_OPS;
8516  	case BPF_PROG_TYPE_TRACING:
8517  	case BPF_PROG_TYPE_TRACEPOINT:
8518  	case BPF_PROG_TYPE_PERF_EVENT:
8519  	case BPF_PROG_TYPE_LSM:
8520  		return BTF_KFUNC_HOOK_TRACING;
8521  	case BPF_PROG_TYPE_SYSCALL:
8522  		return BTF_KFUNC_HOOK_SYSCALL;
8523  	case BPF_PROG_TYPE_CGROUP_SKB:
8524  	case BPF_PROG_TYPE_CGROUP_SOCK:
8525  	case BPF_PROG_TYPE_CGROUP_DEVICE:
8526  	case BPF_PROG_TYPE_CGROUP_SOCK_ADDR:
8527  	case BPF_PROG_TYPE_CGROUP_SOCKOPT:
8528  	case BPF_PROG_TYPE_CGROUP_SYSCTL:
8529  	case BPF_PROG_TYPE_SOCK_OPS:
8530  		return BTF_KFUNC_HOOK_CGROUP;
8531  	case BPF_PROG_TYPE_SCHED_ACT:
8532  		return BTF_KFUNC_HOOK_SCHED_ACT;
8533  	case BPF_PROG_TYPE_SK_SKB:
8534  		return BTF_KFUNC_HOOK_SK_SKB;
8535  	case BPF_PROG_TYPE_SOCKET_FILTER:
8536  		return BTF_KFUNC_HOOK_SOCKET_FILTER;
8537  	case BPF_PROG_TYPE_LWT_OUT:
8538  	case BPF_PROG_TYPE_LWT_IN:
8539  	case BPF_PROG_TYPE_LWT_XMIT:
8540  	case BPF_PROG_TYPE_LWT_SEG6LOCAL:
8541  		return BTF_KFUNC_HOOK_LWT;
8542  	case BPF_PROG_TYPE_NETFILTER:
8543  		return BTF_KFUNC_HOOK_NETFILTER;
8544  	case BPF_PROG_TYPE_KPROBE:
8545  		return BTF_KFUNC_HOOK_KPROBE;
8546  	default:
8547  		return BTF_KFUNC_HOOK_MAX;
8548  	}
8549  }
8550  
8551  /* Caution:
8552   * Reference to the module (obtained using btf_try_get_module) corresponding to
8553   * the struct btf *MUST* be held when calling this function from verifier
8554   * context. This is usually true as we stash references in prog's kfunc_btf_tab;
8555   * keeping the reference for the duration of the call provides the necessary
8556   * protection for looking up a well-formed btf->kfunc_set_tab.
8557   */
btf_kfunc_id_set_contains(const struct btf * btf,u32 kfunc_btf_id,const struct bpf_prog * prog)8558  u32 *btf_kfunc_id_set_contains(const struct btf *btf,
8559  			       u32 kfunc_btf_id,
8560  			       const struct bpf_prog *prog)
8561  {
8562  	enum bpf_prog_type prog_type = resolve_prog_type(prog);
8563  	enum btf_kfunc_hook hook;
8564  	u32 *kfunc_flags;
8565  
8566  	kfunc_flags = __btf_kfunc_id_set_contains(btf, BTF_KFUNC_HOOK_COMMON, kfunc_btf_id, prog);
8567  	if (kfunc_flags)
8568  		return kfunc_flags;
8569  
8570  	hook = bpf_prog_type_to_kfunc_hook(prog_type);
8571  	return __btf_kfunc_id_set_contains(btf, hook, kfunc_btf_id, prog);
8572  }
8573  
btf_kfunc_is_modify_return(const struct btf * btf,u32 kfunc_btf_id,const struct bpf_prog * prog)8574  u32 *btf_kfunc_is_modify_return(const struct btf *btf, u32 kfunc_btf_id,
8575  				const struct bpf_prog *prog)
8576  {
8577  	return __btf_kfunc_id_set_contains(btf, BTF_KFUNC_HOOK_FMODRET, kfunc_btf_id, prog);
8578  }
8579  
__register_btf_kfunc_id_set(enum btf_kfunc_hook hook,const struct btf_kfunc_id_set * kset)8580  static int __register_btf_kfunc_id_set(enum btf_kfunc_hook hook,
8581  				       const struct btf_kfunc_id_set *kset)
8582  {
8583  	struct btf *btf;
8584  	int ret, i;
8585  
8586  	btf = btf_get_module_btf(kset->owner);
8587  	if (!btf)
8588  		return check_btf_kconfigs(kset->owner, "kfunc");
8589  	if (IS_ERR(btf))
8590  		return PTR_ERR(btf);
8591  
8592  	for (i = 0; i < kset->set->cnt; i++) {
8593  		ret = btf_check_kfunc_protos(btf, btf_relocate_id(btf, kset->set->pairs[i].id),
8594  					     kset->set->pairs[i].flags);
8595  		if (ret)
8596  			goto err_out;
8597  	}
8598  
8599  	ret = btf_populate_kfunc_set(btf, hook, kset);
8600  
8601  err_out:
8602  	btf_put(btf);
8603  	return ret;
8604  }
8605  
8606  /* This function must be invoked only from initcalls/module init functions */
register_btf_kfunc_id_set(enum bpf_prog_type prog_type,const struct btf_kfunc_id_set * kset)8607  int register_btf_kfunc_id_set(enum bpf_prog_type prog_type,
8608  			      const struct btf_kfunc_id_set *kset)
8609  {
8610  	enum btf_kfunc_hook hook;
8611  
8612  	/* All kfuncs need to be tagged as such in BTF.
8613  	 * WARN() for initcall registrations that do not check errors.
8614  	 */
8615  	if (!(kset->set->flags & BTF_SET8_KFUNCS)) {
8616  		WARN_ON(!kset->owner);
8617  		return -EINVAL;
8618  	}
8619  
8620  	hook = bpf_prog_type_to_kfunc_hook(prog_type);
8621  	return __register_btf_kfunc_id_set(hook, kset);
8622  }
8623  EXPORT_SYMBOL_GPL(register_btf_kfunc_id_set);
8624  
8625  /* This function must be invoked only from initcalls/module init functions */
register_btf_fmodret_id_set(const struct btf_kfunc_id_set * kset)8626  int register_btf_fmodret_id_set(const struct btf_kfunc_id_set *kset)
8627  {
8628  	return __register_btf_kfunc_id_set(BTF_KFUNC_HOOK_FMODRET, kset);
8629  }
8630  EXPORT_SYMBOL_GPL(register_btf_fmodret_id_set);
8631  
btf_find_dtor_kfunc(struct btf * btf,u32 btf_id)8632  s32 btf_find_dtor_kfunc(struct btf *btf, u32 btf_id)
8633  {
8634  	struct btf_id_dtor_kfunc_tab *tab = btf->dtor_kfunc_tab;
8635  	struct btf_id_dtor_kfunc *dtor;
8636  
8637  	if (!tab)
8638  		return -ENOENT;
8639  	/* Even though the size of tab->dtors[0] is > sizeof(u32), we only need
8640  	 * to compare the first u32 with btf_id, so we can reuse btf_id_cmp_func.
8641  	 */
8642  	BUILD_BUG_ON(offsetof(struct btf_id_dtor_kfunc, btf_id) != 0);
8643  	dtor = bsearch(&btf_id, tab->dtors, tab->cnt, sizeof(tab->dtors[0]), btf_id_cmp_func);
8644  	if (!dtor)
8645  		return -ENOENT;
8646  	return dtor->kfunc_btf_id;
8647  }
8648  
btf_check_dtor_kfuncs(struct btf * btf,const struct btf_id_dtor_kfunc * dtors,u32 cnt)8649  static int btf_check_dtor_kfuncs(struct btf *btf, const struct btf_id_dtor_kfunc *dtors, u32 cnt)
8650  {
8651  	const struct btf_type *dtor_func, *dtor_func_proto, *t;
8652  	const struct btf_param *args;
8653  	s32 dtor_btf_id;
8654  	u32 nr_args, i;
8655  
8656  	for (i = 0; i < cnt; i++) {
8657  		dtor_btf_id = btf_relocate_id(btf, dtors[i].kfunc_btf_id);
8658  
8659  		dtor_func = btf_type_by_id(btf, dtor_btf_id);
8660  		if (!dtor_func || !btf_type_is_func(dtor_func))
8661  			return -EINVAL;
8662  
8663  		dtor_func_proto = btf_type_by_id(btf, dtor_func->type);
8664  		if (!dtor_func_proto || !btf_type_is_func_proto(dtor_func_proto))
8665  			return -EINVAL;
8666  
8667  		/* Make sure the prototype of the destructor kfunc is 'void func(type *)' */
8668  		t = btf_type_by_id(btf, dtor_func_proto->type);
8669  		if (!t || !btf_type_is_void(t))
8670  			return -EINVAL;
8671  
8672  		nr_args = btf_type_vlen(dtor_func_proto);
8673  		if (nr_args != 1)
8674  			return -EINVAL;
8675  		args = btf_params(dtor_func_proto);
8676  		t = btf_type_by_id(btf, args[0].type);
8677  		/* Allow any pointer type, as width on targets Linux supports
8678  		 * will be same for all pointer types (i.e. sizeof(void *))
8679  		 */
8680  		if (!t || !btf_type_is_ptr(t))
8681  			return -EINVAL;
8682  	}
8683  	return 0;
8684  }
8685  
8686  /* This function must be invoked only from initcalls/module init functions */
register_btf_id_dtor_kfuncs(const struct btf_id_dtor_kfunc * dtors,u32 add_cnt,struct module * owner)8687  int register_btf_id_dtor_kfuncs(const struct btf_id_dtor_kfunc *dtors, u32 add_cnt,
8688  				struct module *owner)
8689  {
8690  	struct btf_id_dtor_kfunc_tab *tab;
8691  	struct btf *btf;
8692  	u32 tab_cnt, i;
8693  	int ret;
8694  
8695  	btf = btf_get_module_btf(owner);
8696  	if (!btf)
8697  		return check_btf_kconfigs(owner, "dtor kfuncs");
8698  	if (IS_ERR(btf))
8699  		return PTR_ERR(btf);
8700  
8701  	if (add_cnt >= BTF_DTOR_KFUNC_MAX_CNT) {
8702  		pr_err("cannot register more than %d kfunc destructors\n", BTF_DTOR_KFUNC_MAX_CNT);
8703  		ret = -E2BIG;
8704  		goto end;
8705  	}
8706  
8707  	/* Ensure that the prototype of dtor kfuncs being registered is sane */
8708  	ret = btf_check_dtor_kfuncs(btf, dtors, add_cnt);
8709  	if (ret < 0)
8710  		goto end;
8711  
8712  	tab = btf->dtor_kfunc_tab;
8713  	/* Only one call allowed for modules */
8714  	if (WARN_ON_ONCE(tab && btf_is_module(btf))) {
8715  		ret = -EINVAL;
8716  		goto end;
8717  	}
8718  
8719  	tab_cnt = tab ? tab->cnt : 0;
8720  	if (tab_cnt > U32_MAX - add_cnt) {
8721  		ret = -EOVERFLOW;
8722  		goto end;
8723  	}
8724  	if (tab_cnt + add_cnt >= BTF_DTOR_KFUNC_MAX_CNT) {
8725  		pr_err("cannot register more than %d kfunc destructors\n", BTF_DTOR_KFUNC_MAX_CNT);
8726  		ret = -E2BIG;
8727  		goto end;
8728  	}
8729  
8730  	tab = krealloc(btf->dtor_kfunc_tab,
8731  		       offsetof(struct btf_id_dtor_kfunc_tab, dtors[tab_cnt + add_cnt]),
8732  		       GFP_KERNEL | __GFP_NOWARN);
8733  	if (!tab) {
8734  		ret = -ENOMEM;
8735  		goto end;
8736  	}
8737  
8738  	if (!btf->dtor_kfunc_tab)
8739  		tab->cnt = 0;
8740  	btf->dtor_kfunc_tab = tab;
8741  
8742  	memcpy(tab->dtors + tab->cnt, dtors, add_cnt * sizeof(tab->dtors[0]));
8743  
8744  	/* remap BTF ids based on BTF relocation (if any) */
8745  	for (i = tab_cnt; i < tab_cnt + add_cnt; i++) {
8746  		tab->dtors[i].btf_id = btf_relocate_id(btf, tab->dtors[i].btf_id);
8747  		tab->dtors[i].kfunc_btf_id = btf_relocate_id(btf, tab->dtors[i].kfunc_btf_id);
8748  	}
8749  
8750  	tab->cnt += add_cnt;
8751  
8752  	sort(tab->dtors, tab->cnt, sizeof(tab->dtors[0]), btf_id_cmp_func, NULL);
8753  
8754  end:
8755  	if (ret)
8756  		btf_free_dtor_kfunc_tab(btf);
8757  	btf_put(btf);
8758  	return ret;
8759  }
8760  EXPORT_SYMBOL_GPL(register_btf_id_dtor_kfuncs);
8761  
8762  #define MAX_TYPES_ARE_COMPAT_DEPTH 2
8763  
8764  /* Check local and target types for compatibility. This check is used for
8765   * type-based CO-RE relocations and follow slightly different rules than
8766   * field-based relocations. This function assumes that root types were already
8767   * checked for name match. Beyond that initial root-level name check, names
8768   * are completely ignored. Compatibility rules are as follows:
8769   *   - any two STRUCTs/UNIONs/FWDs/ENUMs/INTs/ENUM64s are considered compatible, but
8770   *     kind should match for local and target types (i.e., STRUCT is not
8771   *     compatible with UNION);
8772   *   - for ENUMs/ENUM64s, the size is ignored;
8773   *   - for INT, size and signedness are ignored;
8774   *   - for ARRAY, dimensionality is ignored, element types are checked for
8775   *     compatibility recursively;
8776   *   - CONST/VOLATILE/RESTRICT modifiers are ignored;
8777   *   - TYPEDEFs/PTRs are compatible if types they pointing to are compatible;
8778   *   - FUNC_PROTOs are compatible if they have compatible signature: same
8779   *     number of input args and compatible return and argument types.
8780   * These rules are not set in stone and probably will be adjusted as we get
8781   * more experience with using BPF CO-RE relocations.
8782   */
bpf_core_types_are_compat(const struct btf * local_btf,__u32 local_id,const struct btf * targ_btf,__u32 targ_id)8783  int bpf_core_types_are_compat(const struct btf *local_btf, __u32 local_id,
8784  			      const struct btf *targ_btf, __u32 targ_id)
8785  {
8786  	return __bpf_core_types_are_compat(local_btf, local_id, targ_btf, targ_id,
8787  					   MAX_TYPES_ARE_COMPAT_DEPTH);
8788  }
8789  
8790  #define MAX_TYPES_MATCH_DEPTH 2
8791  
bpf_core_types_match(const struct btf * local_btf,u32 local_id,const struct btf * targ_btf,u32 targ_id)8792  int bpf_core_types_match(const struct btf *local_btf, u32 local_id,
8793  			 const struct btf *targ_btf, u32 targ_id)
8794  {
8795  	return __bpf_core_types_match(local_btf, local_id, targ_btf, targ_id, false,
8796  				      MAX_TYPES_MATCH_DEPTH);
8797  }
8798  
bpf_core_is_flavor_sep(const char * s)8799  static bool bpf_core_is_flavor_sep(const char *s)
8800  {
8801  	/* check X___Y name pattern, where X and Y are not underscores */
8802  	return s[0] != '_' &&				      /* X */
8803  	       s[1] == '_' && s[2] == '_' && s[3] == '_' &&   /* ___ */
8804  	       s[4] != '_';				      /* Y */
8805  }
8806  
bpf_core_essential_name_len(const char * name)8807  size_t bpf_core_essential_name_len(const char *name)
8808  {
8809  	size_t n = strlen(name);
8810  	int i;
8811  
8812  	for (i = n - 5; i >= 0; i--) {
8813  		if (bpf_core_is_flavor_sep(name + i))
8814  			return i + 1;
8815  	}
8816  	return n;
8817  }
8818  
bpf_free_cands(struct bpf_cand_cache * cands)8819  static void bpf_free_cands(struct bpf_cand_cache *cands)
8820  {
8821  	if (!cands->cnt)
8822  		/* empty candidate array was allocated on stack */
8823  		return;
8824  	kfree(cands);
8825  }
8826  
bpf_free_cands_from_cache(struct bpf_cand_cache * cands)8827  static void bpf_free_cands_from_cache(struct bpf_cand_cache *cands)
8828  {
8829  	kfree(cands->name);
8830  	kfree(cands);
8831  }
8832  
8833  #define VMLINUX_CAND_CACHE_SIZE 31
8834  static struct bpf_cand_cache *vmlinux_cand_cache[VMLINUX_CAND_CACHE_SIZE];
8835  
8836  #define MODULE_CAND_CACHE_SIZE 31
8837  static struct bpf_cand_cache *module_cand_cache[MODULE_CAND_CACHE_SIZE];
8838  
__print_cand_cache(struct bpf_verifier_log * log,struct bpf_cand_cache ** cache,int cache_size)8839  static void __print_cand_cache(struct bpf_verifier_log *log,
8840  			       struct bpf_cand_cache **cache,
8841  			       int cache_size)
8842  {
8843  	struct bpf_cand_cache *cc;
8844  	int i, j;
8845  
8846  	for (i = 0; i < cache_size; i++) {
8847  		cc = cache[i];
8848  		if (!cc)
8849  			continue;
8850  		bpf_log(log, "[%d]%s(", i, cc->name);
8851  		for (j = 0; j < cc->cnt; j++) {
8852  			bpf_log(log, "%d", cc->cands[j].id);
8853  			if (j < cc->cnt - 1)
8854  				bpf_log(log, " ");
8855  		}
8856  		bpf_log(log, "), ");
8857  	}
8858  }
8859  
print_cand_cache(struct bpf_verifier_log * log)8860  static void print_cand_cache(struct bpf_verifier_log *log)
8861  {
8862  	mutex_lock(&cand_cache_mutex);
8863  	bpf_log(log, "vmlinux_cand_cache:");
8864  	__print_cand_cache(log, vmlinux_cand_cache, VMLINUX_CAND_CACHE_SIZE);
8865  	bpf_log(log, "\nmodule_cand_cache:");
8866  	__print_cand_cache(log, module_cand_cache, MODULE_CAND_CACHE_SIZE);
8867  	bpf_log(log, "\n");
8868  	mutex_unlock(&cand_cache_mutex);
8869  }
8870  
hash_cands(struct bpf_cand_cache * cands)8871  static u32 hash_cands(struct bpf_cand_cache *cands)
8872  {
8873  	return jhash(cands->name, cands->name_len, 0);
8874  }
8875  
check_cand_cache(struct bpf_cand_cache * cands,struct bpf_cand_cache ** cache,int cache_size)8876  static struct bpf_cand_cache *check_cand_cache(struct bpf_cand_cache *cands,
8877  					       struct bpf_cand_cache **cache,
8878  					       int cache_size)
8879  {
8880  	struct bpf_cand_cache *cc = cache[hash_cands(cands) % cache_size];
8881  
8882  	if (cc && cc->name_len == cands->name_len &&
8883  	    !strncmp(cc->name, cands->name, cands->name_len))
8884  		return cc;
8885  	return NULL;
8886  }
8887  
sizeof_cands(int cnt)8888  static size_t sizeof_cands(int cnt)
8889  {
8890  	return offsetof(struct bpf_cand_cache, cands[cnt]);
8891  }
8892  
populate_cand_cache(struct bpf_cand_cache * cands,struct bpf_cand_cache ** cache,int cache_size)8893  static struct bpf_cand_cache *populate_cand_cache(struct bpf_cand_cache *cands,
8894  						  struct bpf_cand_cache **cache,
8895  						  int cache_size)
8896  {
8897  	struct bpf_cand_cache **cc = &cache[hash_cands(cands) % cache_size], *new_cands;
8898  
8899  	if (*cc) {
8900  		bpf_free_cands_from_cache(*cc);
8901  		*cc = NULL;
8902  	}
8903  	new_cands = kmemdup(cands, sizeof_cands(cands->cnt), GFP_KERNEL);
8904  	if (!new_cands) {
8905  		bpf_free_cands(cands);
8906  		return ERR_PTR(-ENOMEM);
8907  	}
8908  	/* strdup the name, since it will stay in cache.
8909  	 * the cands->name points to strings in prog's BTF and the prog can be unloaded.
8910  	 */
8911  	new_cands->name = kmemdup_nul(cands->name, cands->name_len, GFP_KERNEL);
8912  	bpf_free_cands(cands);
8913  	if (!new_cands->name) {
8914  		kfree(new_cands);
8915  		return ERR_PTR(-ENOMEM);
8916  	}
8917  	*cc = new_cands;
8918  	return new_cands;
8919  }
8920  
8921  #ifdef CONFIG_DEBUG_INFO_BTF_MODULES
__purge_cand_cache(struct btf * btf,struct bpf_cand_cache ** cache,int cache_size)8922  static void __purge_cand_cache(struct btf *btf, struct bpf_cand_cache **cache,
8923  			       int cache_size)
8924  {
8925  	struct bpf_cand_cache *cc;
8926  	int i, j;
8927  
8928  	for (i = 0; i < cache_size; i++) {
8929  		cc = cache[i];
8930  		if (!cc)
8931  			continue;
8932  		if (!btf) {
8933  			/* when new module is loaded purge all of module_cand_cache,
8934  			 * since new module might have candidates with the name
8935  			 * that matches cached cands.
8936  			 */
8937  			bpf_free_cands_from_cache(cc);
8938  			cache[i] = NULL;
8939  			continue;
8940  		}
8941  		/* when module is unloaded purge cache entries
8942  		 * that match module's btf
8943  		 */
8944  		for (j = 0; j < cc->cnt; j++)
8945  			if (cc->cands[j].btf == btf) {
8946  				bpf_free_cands_from_cache(cc);
8947  				cache[i] = NULL;
8948  				break;
8949  			}
8950  	}
8951  
8952  }
8953  
purge_cand_cache(struct btf * btf)8954  static void purge_cand_cache(struct btf *btf)
8955  {
8956  	mutex_lock(&cand_cache_mutex);
8957  	__purge_cand_cache(btf, module_cand_cache, MODULE_CAND_CACHE_SIZE);
8958  	mutex_unlock(&cand_cache_mutex);
8959  }
8960  #endif
8961  
8962  static struct bpf_cand_cache *
bpf_core_add_cands(struct bpf_cand_cache * cands,const struct btf * targ_btf,int targ_start_id)8963  bpf_core_add_cands(struct bpf_cand_cache *cands, const struct btf *targ_btf,
8964  		   int targ_start_id)
8965  {
8966  	struct bpf_cand_cache *new_cands;
8967  	const struct btf_type *t;
8968  	const char *targ_name;
8969  	size_t targ_essent_len;
8970  	int n, i;
8971  
8972  	n = btf_nr_types(targ_btf);
8973  	for (i = targ_start_id; i < n; i++) {
8974  		t = btf_type_by_id(targ_btf, i);
8975  		if (btf_kind(t) != cands->kind)
8976  			continue;
8977  
8978  		targ_name = btf_name_by_offset(targ_btf, t->name_off);
8979  		if (!targ_name)
8980  			continue;
8981  
8982  		/* the resched point is before strncmp to make sure that search
8983  		 * for non-existing name will have a chance to schedule().
8984  		 */
8985  		cond_resched();
8986  
8987  		if (strncmp(cands->name, targ_name, cands->name_len) != 0)
8988  			continue;
8989  
8990  		targ_essent_len = bpf_core_essential_name_len(targ_name);
8991  		if (targ_essent_len != cands->name_len)
8992  			continue;
8993  
8994  		/* most of the time there is only one candidate for a given kind+name pair */
8995  		new_cands = kmalloc(sizeof_cands(cands->cnt + 1), GFP_KERNEL);
8996  		if (!new_cands) {
8997  			bpf_free_cands(cands);
8998  			return ERR_PTR(-ENOMEM);
8999  		}
9000  
9001  		memcpy(new_cands, cands, sizeof_cands(cands->cnt));
9002  		bpf_free_cands(cands);
9003  		cands = new_cands;
9004  		cands->cands[cands->cnt].btf = targ_btf;
9005  		cands->cands[cands->cnt].id = i;
9006  		cands->cnt++;
9007  	}
9008  	return cands;
9009  }
9010  
9011  static struct bpf_cand_cache *
bpf_core_find_cands(struct bpf_core_ctx * ctx,u32 local_type_id)9012  bpf_core_find_cands(struct bpf_core_ctx *ctx, u32 local_type_id)
9013  {
9014  	struct bpf_cand_cache *cands, *cc, local_cand = {};
9015  	const struct btf *local_btf = ctx->btf;
9016  	const struct btf_type *local_type;
9017  	const struct btf *main_btf;
9018  	size_t local_essent_len;
9019  	struct btf *mod_btf;
9020  	const char *name;
9021  	int id;
9022  
9023  	main_btf = bpf_get_btf_vmlinux();
9024  	if (IS_ERR(main_btf))
9025  		return ERR_CAST(main_btf);
9026  	if (!main_btf)
9027  		return ERR_PTR(-EINVAL);
9028  
9029  	local_type = btf_type_by_id(local_btf, local_type_id);
9030  	if (!local_type)
9031  		return ERR_PTR(-EINVAL);
9032  
9033  	name = btf_name_by_offset(local_btf, local_type->name_off);
9034  	if (str_is_empty(name))
9035  		return ERR_PTR(-EINVAL);
9036  	local_essent_len = bpf_core_essential_name_len(name);
9037  
9038  	cands = &local_cand;
9039  	cands->name = name;
9040  	cands->kind = btf_kind(local_type);
9041  	cands->name_len = local_essent_len;
9042  
9043  	cc = check_cand_cache(cands, vmlinux_cand_cache, VMLINUX_CAND_CACHE_SIZE);
9044  	/* cands is a pointer to stack here */
9045  	if (cc) {
9046  		if (cc->cnt)
9047  			return cc;
9048  		goto check_modules;
9049  	}
9050  
9051  	/* Attempt to find target candidates in vmlinux BTF first */
9052  	cands = bpf_core_add_cands(cands, main_btf, 1);
9053  	if (IS_ERR(cands))
9054  		return ERR_CAST(cands);
9055  
9056  	/* cands is a pointer to kmalloced memory here if cands->cnt > 0 */
9057  
9058  	/* populate cache even when cands->cnt == 0 */
9059  	cc = populate_cand_cache(cands, vmlinux_cand_cache, VMLINUX_CAND_CACHE_SIZE);
9060  	if (IS_ERR(cc))
9061  		return ERR_CAST(cc);
9062  
9063  	/* if vmlinux BTF has any candidate, don't go for module BTFs */
9064  	if (cc->cnt)
9065  		return cc;
9066  
9067  check_modules:
9068  	/* cands is a pointer to stack here and cands->cnt == 0 */
9069  	cc = check_cand_cache(cands, module_cand_cache, MODULE_CAND_CACHE_SIZE);
9070  	if (cc)
9071  		/* if cache has it return it even if cc->cnt == 0 */
9072  		return cc;
9073  
9074  	/* If candidate is not found in vmlinux's BTF then search in module's BTFs */
9075  	spin_lock_bh(&btf_idr_lock);
9076  	idr_for_each_entry(&btf_idr, mod_btf, id) {
9077  		if (!btf_is_module(mod_btf))
9078  			continue;
9079  		/* linear search could be slow hence unlock/lock
9080  		 * the IDR to avoiding holding it for too long
9081  		 */
9082  		btf_get(mod_btf);
9083  		spin_unlock_bh(&btf_idr_lock);
9084  		cands = bpf_core_add_cands(cands, mod_btf, btf_nr_types(main_btf));
9085  		btf_put(mod_btf);
9086  		if (IS_ERR(cands))
9087  			return ERR_CAST(cands);
9088  		spin_lock_bh(&btf_idr_lock);
9089  	}
9090  	spin_unlock_bh(&btf_idr_lock);
9091  	/* cands is a pointer to kmalloced memory here if cands->cnt > 0
9092  	 * or pointer to stack if cands->cnd == 0.
9093  	 * Copy it into the cache even when cands->cnt == 0 and
9094  	 * return the result.
9095  	 */
9096  	return populate_cand_cache(cands, module_cand_cache, MODULE_CAND_CACHE_SIZE);
9097  }
9098  
bpf_core_apply(struct bpf_core_ctx * ctx,const struct bpf_core_relo * relo,int relo_idx,void * insn)9099  int bpf_core_apply(struct bpf_core_ctx *ctx, const struct bpf_core_relo *relo,
9100  		   int relo_idx, void *insn)
9101  {
9102  	bool need_cands = relo->kind != BPF_CORE_TYPE_ID_LOCAL;
9103  	struct bpf_core_cand_list cands = {};
9104  	struct bpf_core_relo_res targ_res;
9105  	struct bpf_core_spec *specs;
9106  	const struct btf_type *type;
9107  	int err;
9108  
9109  	/* ~4k of temp memory necessary to convert LLVM spec like "0:1:0:5"
9110  	 * into arrays of btf_ids of struct fields and array indices.
9111  	 */
9112  	specs = kcalloc(3, sizeof(*specs), GFP_KERNEL);
9113  	if (!specs)
9114  		return -ENOMEM;
9115  
9116  	type = btf_type_by_id(ctx->btf, relo->type_id);
9117  	if (!type) {
9118  		bpf_log(ctx->log, "relo #%u: bad type id %u\n",
9119  			relo_idx, relo->type_id);
9120  		kfree(specs);
9121  		return -EINVAL;
9122  	}
9123  
9124  	if (need_cands) {
9125  		struct bpf_cand_cache *cc;
9126  		int i;
9127  
9128  		mutex_lock(&cand_cache_mutex);
9129  		cc = bpf_core_find_cands(ctx, relo->type_id);
9130  		if (IS_ERR(cc)) {
9131  			bpf_log(ctx->log, "target candidate search failed for %d\n",
9132  				relo->type_id);
9133  			err = PTR_ERR(cc);
9134  			goto out;
9135  		}
9136  		if (cc->cnt) {
9137  			cands.cands = kcalloc(cc->cnt, sizeof(*cands.cands), GFP_KERNEL);
9138  			if (!cands.cands) {
9139  				err = -ENOMEM;
9140  				goto out;
9141  			}
9142  		}
9143  		for (i = 0; i < cc->cnt; i++) {
9144  			bpf_log(ctx->log,
9145  				"CO-RE relocating %s %s: found target candidate [%d]\n",
9146  				btf_kind_str[cc->kind], cc->name, cc->cands[i].id);
9147  			cands.cands[i].btf = cc->cands[i].btf;
9148  			cands.cands[i].id = cc->cands[i].id;
9149  		}
9150  		cands.len = cc->cnt;
9151  		/* cand_cache_mutex needs to span the cache lookup and
9152  		 * copy of btf pointer into bpf_core_cand_list,
9153  		 * since module can be unloaded while bpf_core_calc_relo_insn
9154  		 * is working with module's btf.
9155  		 */
9156  	}
9157  
9158  	err = bpf_core_calc_relo_insn((void *)ctx->log, relo, relo_idx, ctx->btf, &cands, specs,
9159  				      &targ_res);
9160  	if (err)
9161  		goto out;
9162  
9163  	err = bpf_core_patch_insn((void *)ctx->log, insn, relo->insn_off / 8, relo, relo_idx,
9164  				  &targ_res);
9165  
9166  out:
9167  	kfree(specs);
9168  	if (need_cands) {
9169  		kfree(cands.cands);
9170  		mutex_unlock(&cand_cache_mutex);
9171  		if (ctx->log->level & BPF_LOG_LEVEL2)
9172  			print_cand_cache(ctx->log);
9173  	}
9174  	return err;
9175  }
9176  
btf_nested_type_is_trusted(struct bpf_verifier_log * log,const struct bpf_reg_state * reg,const char * field_name,u32 btf_id,const char * suffix)9177  bool btf_nested_type_is_trusted(struct bpf_verifier_log *log,
9178  				const struct bpf_reg_state *reg,
9179  				const char *field_name, u32 btf_id, const char *suffix)
9180  {
9181  	struct btf *btf = reg->btf;
9182  	const struct btf_type *walk_type, *safe_type;
9183  	const char *tname;
9184  	char safe_tname[64];
9185  	long ret, safe_id;
9186  	const struct btf_member *member;
9187  	u32 i;
9188  
9189  	walk_type = btf_type_by_id(btf, reg->btf_id);
9190  	if (!walk_type)
9191  		return false;
9192  
9193  	tname = btf_name_by_offset(btf, walk_type->name_off);
9194  
9195  	ret = snprintf(safe_tname, sizeof(safe_tname), "%s%s", tname, suffix);
9196  	if (ret >= sizeof(safe_tname))
9197  		return false;
9198  
9199  	safe_id = btf_find_by_name_kind(btf, safe_tname, BTF_INFO_KIND(walk_type->info));
9200  	if (safe_id < 0)
9201  		return false;
9202  
9203  	safe_type = btf_type_by_id(btf, safe_id);
9204  	if (!safe_type)
9205  		return false;
9206  
9207  	for_each_member(i, safe_type, member) {
9208  		const char *m_name = __btf_name_by_offset(btf, member->name_off);
9209  		const struct btf_type *mtype = btf_type_by_id(btf, member->type);
9210  		u32 id;
9211  
9212  		if (!btf_type_is_ptr(mtype))
9213  			continue;
9214  
9215  		btf_type_skip_modifiers(btf, mtype->type, &id);
9216  		/* If we match on both type and name, the field is considered trusted. */
9217  		if (btf_id == id && !strcmp(field_name, m_name))
9218  			return true;
9219  	}
9220  
9221  	return false;
9222  }
9223  
btf_type_ids_nocast_alias(struct bpf_verifier_log * log,const struct btf * reg_btf,u32 reg_id,const struct btf * arg_btf,u32 arg_id)9224  bool btf_type_ids_nocast_alias(struct bpf_verifier_log *log,
9225  			       const struct btf *reg_btf, u32 reg_id,
9226  			       const struct btf *arg_btf, u32 arg_id)
9227  {
9228  	const char *reg_name, *arg_name, *search_needle;
9229  	const struct btf_type *reg_type, *arg_type;
9230  	int reg_len, arg_len, cmp_len;
9231  	size_t pattern_len = sizeof(NOCAST_ALIAS_SUFFIX) - sizeof(char);
9232  
9233  	reg_type = btf_type_by_id(reg_btf, reg_id);
9234  	if (!reg_type)
9235  		return false;
9236  
9237  	arg_type = btf_type_by_id(arg_btf, arg_id);
9238  	if (!arg_type)
9239  		return false;
9240  
9241  	reg_name = btf_name_by_offset(reg_btf, reg_type->name_off);
9242  	arg_name = btf_name_by_offset(arg_btf, arg_type->name_off);
9243  
9244  	reg_len = strlen(reg_name);
9245  	arg_len = strlen(arg_name);
9246  
9247  	/* Exactly one of the two type names may be suffixed with ___init, so
9248  	 * if the strings are the same size, they can't possibly be no-cast
9249  	 * aliases of one another. If you have two of the same type names, e.g.
9250  	 * they're both nf_conn___init, it would be improper to return true
9251  	 * because they are _not_ no-cast aliases, they are the same type.
9252  	 */
9253  	if (reg_len == arg_len)
9254  		return false;
9255  
9256  	/* Either of the two names must be the other name, suffixed with ___init. */
9257  	if ((reg_len != arg_len + pattern_len) &&
9258  	    (arg_len != reg_len + pattern_len))
9259  		return false;
9260  
9261  	if (reg_len < arg_len) {
9262  		search_needle = strstr(arg_name, NOCAST_ALIAS_SUFFIX);
9263  		cmp_len = reg_len;
9264  	} else {
9265  		search_needle = strstr(reg_name, NOCAST_ALIAS_SUFFIX);
9266  		cmp_len = arg_len;
9267  	}
9268  
9269  	if (!search_needle)
9270  		return false;
9271  
9272  	/* ___init suffix must come at the end of the name */
9273  	if (*(search_needle + pattern_len) != '\0')
9274  		return false;
9275  
9276  	return !strncmp(reg_name, arg_name, cmp_len);
9277  }
9278  
9279  #ifdef CONFIG_BPF_JIT
9280  static int
btf_add_struct_ops(struct btf * btf,struct bpf_struct_ops * st_ops,struct bpf_verifier_log * log)9281  btf_add_struct_ops(struct btf *btf, struct bpf_struct_ops *st_ops,
9282  		   struct bpf_verifier_log *log)
9283  {
9284  	struct btf_struct_ops_tab *tab, *new_tab;
9285  	int i, err;
9286  
9287  	tab = btf->struct_ops_tab;
9288  	if (!tab) {
9289  		tab = kzalloc(offsetof(struct btf_struct_ops_tab, ops[4]),
9290  			      GFP_KERNEL);
9291  		if (!tab)
9292  			return -ENOMEM;
9293  		tab->capacity = 4;
9294  		btf->struct_ops_tab = tab;
9295  	}
9296  
9297  	for (i = 0; i < tab->cnt; i++)
9298  		if (tab->ops[i].st_ops == st_ops)
9299  			return -EEXIST;
9300  
9301  	if (tab->cnt == tab->capacity) {
9302  		new_tab = krealloc(tab,
9303  				   offsetof(struct btf_struct_ops_tab,
9304  					    ops[tab->capacity * 2]),
9305  				   GFP_KERNEL);
9306  		if (!new_tab)
9307  			return -ENOMEM;
9308  		tab = new_tab;
9309  		tab->capacity *= 2;
9310  		btf->struct_ops_tab = tab;
9311  	}
9312  
9313  	tab->ops[btf->struct_ops_tab->cnt].st_ops = st_ops;
9314  
9315  	err = bpf_struct_ops_desc_init(&tab->ops[btf->struct_ops_tab->cnt], btf, log);
9316  	if (err)
9317  		return err;
9318  
9319  	btf->struct_ops_tab->cnt++;
9320  
9321  	return 0;
9322  }
9323  
9324  const struct bpf_struct_ops_desc *
bpf_struct_ops_find_value(struct btf * btf,u32 value_id)9325  bpf_struct_ops_find_value(struct btf *btf, u32 value_id)
9326  {
9327  	const struct bpf_struct_ops_desc *st_ops_list;
9328  	unsigned int i;
9329  	u32 cnt;
9330  
9331  	if (!value_id)
9332  		return NULL;
9333  	if (!btf->struct_ops_tab)
9334  		return NULL;
9335  
9336  	cnt = btf->struct_ops_tab->cnt;
9337  	st_ops_list = btf->struct_ops_tab->ops;
9338  	for (i = 0; i < cnt; i++) {
9339  		if (st_ops_list[i].value_id == value_id)
9340  			return &st_ops_list[i];
9341  	}
9342  
9343  	return NULL;
9344  }
9345  
9346  const struct bpf_struct_ops_desc *
bpf_struct_ops_find(struct btf * btf,u32 type_id)9347  bpf_struct_ops_find(struct btf *btf, u32 type_id)
9348  {
9349  	const struct bpf_struct_ops_desc *st_ops_list;
9350  	unsigned int i;
9351  	u32 cnt;
9352  
9353  	if (!type_id)
9354  		return NULL;
9355  	if (!btf->struct_ops_tab)
9356  		return NULL;
9357  
9358  	cnt = btf->struct_ops_tab->cnt;
9359  	st_ops_list = btf->struct_ops_tab->ops;
9360  	for (i = 0; i < cnt; i++) {
9361  		if (st_ops_list[i].type_id == type_id)
9362  			return &st_ops_list[i];
9363  	}
9364  
9365  	return NULL;
9366  }
9367  
__register_bpf_struct_ops(struct bpf_struct_ops * st_ops)9368  int __register_bpf_struct_ops(struct bpf_struct_ops *st_ops)
9369  {
9370  	struct bpf_verifier_log *log;
9371  	struct btf *btf;
9372  	int err = 0;
9373  
9374  	btf = btf_get_module_btf(st_ops->owner);
9375  	if (!btf)
9376  		return check_btf_kconfigs(st_ops->owner, "struct_ops");
9377  	if (IS_ERR(btf))
9378  		return PTR_ERR(btf);
9379  
9380  	log = kzalloc(sizeof(*log), GFP_KERNEL | __GFP_NOWARN);
9381  	if (!log) {
9382  		err = -ENOMEM;
9383  		goto errout;
9384  	}
9385  
9386  	log->level = BPF_LOG_KERNEL;
9387  
9388  	err = btf_add_struct_ops(btf, st_ops, log);
9389  
9390  errout:
9391  	kfree(log);
9392  	btf_put(btf);
9393  
9394  	return err;
9395  }
9396  EXPORT_SYMBOL_GPL(__register_bpf_struct_ops);
9397  #endif
9398  
btf_param_match_suffix(const struct btf * btf,const struct btf_param * arg,const char * suffix)9399  bool btf_param_match_suffix(const struct btf *btf,
9400  			    const struct btf_param *arg,
9401  			    const char *suffix)
9402  {
9403  	int suffix_len = strlen(suffix), len;
9404  	const char *param_name;
9405  
9406  	/* In the future, this can be ported to use BTF tagging */
9407  	param_name = btf_name_by_offset(btf, arg->name_off);
9408  	if (str_is_empty(param_name))
9409  		return false;
9410  	len = strlen(param_name);
9411  	if (len <= suffix_len)
9412  		return false;
9413  	param_name += len - suffix_len;
9414  	return !strncmp(param_name, suffix, suffix_len);
9415  }
9416