xref: /linux/kernel/bpf/verifier.c (revision 1677293ed891664796af51b64feba12a99def4a8)
1 // SPDX-License-Identifier: GPL-2.0-only
2 /* Copyright (c) 2011-2014 PLUMgrid, http://plumgrid.com
3  * Copyright (c) 2016 Facebook
4  * Copyright (c) 2018 Covalent IO, Inc. http://covalent.io
5  */
6 #include <uapi/linux/btf.h>
7 #include <linux/bpf-cgroup.h>
8 #include <linux/kernel.h>
9 #include <linux/types.h>
10 #include <linux/slab.h>
11 #include <linux/bpf.h>
12 #include <linux/btf.h>
13 #include <linux/bpf_verifier.h>
14 #include <linux/filter.h>
15 #include <net/netlink.h>
16 #include <linux/file.h>
17 #include <linux/vmalloc.h>
18 #include <linux/stringify.h>
19 #include <linux/bsearch.h>
20 #include <linux/sort.h>
21 #include <linux/perf_event.h>
22 #include <linux/ctype.h>
23 #include <linux/error-injection.h>
24 #include <linux/bpf_lsm.h>
25 #include <linux/btf_ids.h>
26 #include <linux/poison.h>
27 #include <linux/module.h>
28 #include <linux/cpumask.h>
29 #include <linux/bpf_mem_alloc.h>
30 #include <net/xdp.h>
31 
32 #include "disasm.h"
33 
34 static const struct bpf_verifier_ops * const bpf_verifier_ops[] = {
35 #define BPF_PROG_TYPE(_id, _name, prog_ctx_type, kern_ctx_type) \
36 	[_id] = & _name ## _verifier_ops,
37 #define BPF_MAP_TYPE(_id, _ops)
38 #define BPF_LINK_TYPE(_id, _name)
39 #include <linux/bpf_types.h>
40 #undef BPF_PROG_TYPE
41 #undef BPF_MAP_TYPE
42 #undef BPF_LINK_TYPE
43 };
44 
45 struct bpf_mem_alloc bpf_global_percpu_ma;
46 static bool bpf_global_percpu_ma_set;
47 
48 /* bpf_check() is a static code analyzer that walks eBPF program
49  * instruction by instruction and updates register/stack state.
50  * All paths of conditional branches are analyzed until 'bpf_exit' insn.
51  *
52  * The first pass is depth-first-search to check that the program is a DAG.
53  * It rejects the following programs:
54  * - larger than BPF_MAXINSNS insns
55  * - if loop is present (detected via back-edge)
56  * - unreachable insns exist (shouldn't be a forest. program = one function)
57  * - out of bounds or malformed jumps
58  * The second pass is all possible path descent from the 1st insn.
59  * Since it's analyzing all paths through the program, the length of the
60  * analysis is limited to 64k insn, which may be hit even if total number of
61  * insn is less then 4K, but there are too many branches that change stack/regs.
62  * Number of 'branches to be analyzed' is limited to 1k
63  *
64  * On entry to each instruction, each register has a type, and the instruction
65  * changes the types of the registers depending on instruction semantics.
66  * If instruction is BPF_MOV64_REG(BPF_REG_1, BPF_REG_5), then type of R5 is
67  * copied to R1.
68  *
69  * All registers are 64-bit.
70  * R0 - return register
71  * R1-R5 argument passing registers
72  * R6-R9 callee saved registers
73  * R10 - frame pointer read-only
74  *
75  * At the start of BPF program the register R1 contains a pointer to bpf_context
76  * and has type PTR_TO_CTX.
77  *
78  * Verifier tracks arithmetic operations on pointers in case:
79  *    BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
80  *    BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -20),
81  * 1st insn copies R10 (which has FRAME_PTR) type into R1
82  * and 2nd arithmetic instruction is pattern matched to recognize
83  * that it wants to construct a pointer to some element within stack.
84  * So after 2nd insn, the register R1 has type PTR_TO_STACK
85  * (and -20 constant is saved for further stack bounds checking).
86  * Meaning that this reg is a pointer to stack plus known immediate constant.
87  *
88  * Most of the time the registers have SCALAR_VALUE type, which
89  * means the register has some value, but it's not a valid pointer.
90  * (like pointer plus pointer becomes SCALAR_VALUE type)
91  *
92  * When verifier sees load or store instructions the type of base register
93  * can be: PTR_TO_MAP_VALUE, PTR_TO_CTX, PTR_TO_STACK, PTR_TO_SOCKET. These are
94  * four pointer types recognized by check_mem_access() function.
95  *
96  * PTR_TO_MAP_VALUE means that this register is pointing to 'map element value'
97  * and the range of [ptr, ptr + map's value_size) is accessible.
98  *
99  * registers used to pass values to function calls are checked against
100  * function argument constraints.
101  *
102  * ARG_PTR_TO_MAP_KEY is one of such argument constraints.
103  * It means that the register type passed to this function must be
104  * PTR_TO_STACK and it will be used inside the function as
105  * 'pointer to map element key'
106  *
107  * For example the argument constraints for bpf_map_lookup_elem():
108  *   .ret_type = RET_PTR_TO_MAP_VALUE_OR_NULL,
109  *   .arg1_type = ARG_CONST_MAP_PTR,
110  *   .arg2_type = ARG_PTR_TO_MAP_KEY,
111  *
112  * ret_type says that this function returns 'pointer to map elem value or null'
113  * function expects 1st argument to be a const pointer to 'struct bpf_map' and
114  * 2nd argument should be a pointer to stack, which will be used inside
115  * the helper function as a pointer to map element key.
116  *
117  * On the kernel side the helper function looks like:
118  * u64 bpf_map_lookup_elem(u64 r1, u64 r2, u64 r3, u64 r4, u64 r5)
119  * {
120  *    struct bpf_map *map = (struct bpf_map *) (unsigned long) r1;
121  *    void *key = (void *) (unsigned long) r2;
122  *    void *value;
123  *
124  *    here kernel can access 'key' and 'map' pointers safely, knowing that
125  *    [key, key + map->key_size) bytes are valid and were initialized on
126  *    the stack of eBPF program.
127  * }
128  *
129  * Corresponding eBPF program may look like:
130  *    BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),  // after this insn R2 type is FRAME_PTR
131  *    BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -4), // after this insn R2 type is PTR_TO_STACK
132  *    BPF_LD_MAP_FD(BPF_REG_1, map_fd),      // after this insn R1 type is CONST_PTR_TO_MAP
133  *    BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_map_lookup_elem),
134  * here verifier looks at prototype of map_lookup_elem() and sees:
135  * .arg1_type == ARG_CONST_MAP_PTR and R1->type == CONST_PTR_TO_MAP, which is ok,
136  * Now verifier knows that this map has key of R1->map_ptr->key_size bytes
137  *
138  * Then .arg2_type == ARG_PTR_TO_MAP_KEY and R2->type == PTR_TO_STACK, ok so far,
139  * Now verifier checks that [R2, R2 + map's key_size) are within stack limits
140  * and were initialized prior to this call.
141  * If it's ok, then verifier allows this BPF_CALL insn and looks at
142  * .ret_type which is RET_PTR_TO_MAP_VALUE_OR_NULL, so it sets
143  * R0->type = PTR_TO_MAP_VALUE_OR_NULL which means bpf_map_lookup_elem() function
144  * returns either pointer to map value or NULL.
145  *
146  * When type PTR_TO_MAP_VALUE_OR_NULL passes through 'if (reg != 0) goto +off'
147  * insn, the register holding that pointer in the true branch changes state to
148  * PTR_TO_MAP_VALUE and the same register changes state to CONST_IMM in the false
149  * branch. See check_cond_jmp_op().
150  *
151  * After the call R0 is set to return type of the function and registers R1-R5
152  * are set to NOT_INIT to indicate that they are no longer readable.
153  *
154  * The following reference types represent a potential reference to a kernel
155  * resource which, after first being allocated, must be checked and freed by
156  * the BPF program:
157  * - PTR_TO_SOCKET_OR_NULL, PTR_TO_SOCKET
158  *
159  * When the verifier sees a helper call return a reference type, it allocates a
160  * pointer id for the reference and stores it in the current function state.
161  * Similar to the way that PTR_TO_MAP_VALUE_OR_NULL is converted into
162  * PTR_TO_MAP_VALUE, PTR_TO_SOCKET_OR_NULL becomes PTR_TO_SOCKET when the type
163  * passes through a NULL-check conditional. For the branch wherein the state is
164  * changed to CONST_IMM, the verifier releases the reference.
165  *
166  * For each helper function that allocates a reference, such as
167  * bpf_sk_lookup_tcp(), there is a corresponding release function, such as
168  * bpf_sk_release(). When a reference type passes into the release function,
169  * the verifier also releases the reference. If any unchecked or unreleased
170  * reference remains at the end of the program, the verifier rejects it.
171  */
172 
173 /* verifier_state + insn_idx are pushed to stack when branch is encountered */
174 struct bpf_verifier_stack_elem {
175 	/* verifer state is 'st'
176 	 * before processing instruction 'insn_idx'
177 	 * and after processing instruction 'prev_insn_idx'
178 	 */
179 	struct bpf_verifier_state st;
180 	int insn_idx;
181 	int prev_insn_idx;
182 	struct bpf_verifier_stack_elem *next;
183 	/* length of verifier log at the time this state was pushed on stack */
184 	u32 log_pos;
185 };
186 
187 #define BPF_COMPLEXITY_LIMIT_JMP_SEQ	8192
188 #define BPF_COMPLEXITY_LIMIT_STATES	64
189 
190 #define BPF_MAP_KEY_POISON	(1ULL << 63)
191 #define BPF_MAP_KEY_SEEN	(1ULL << 62)
192 
193 #define BPF_MAP_PTR_UNPRIV	1UL
194 #define BPF_MAP_PTR_POISON	((void *)((0xeB9FUL << 1) +	\
195 					  POISON_POINTER_DELTA))
196 #define BPF_MAP_PTR(X)		((struct bpf_map *)((X) & ~BPF_MAP_PTR_UNPRIV))
197 
198 #define BPF_GLOBAL_PERCPU_MA_MAX_SIZE  512
199 
200 static int acquire_reference_state(struct bpf_verifier_env *env, int insn_idx);
201 static int release_reference(struct bpf_verifier_env *env, int ref_obj_id);
202 static void invalidate_non_owning_refs(struct bpf_verifier_env *env);
203 static bool in_rbtree_lock_required_cb(struct bpf_verifier_env *env);
204 static int ref_set_non_owning(struct bpf_verifier_env *env,
205 			      struct bpf_reg_state *reg);
206 static void specialize_kfunc(struct bpf_verifier_env *env,
207 			     u32 func_id, u16 offset, unsigned long *addr);
208 static bool is_trusted_reg(const struct bpf_reg_state *reg);
209 
210 static bool bpf_map_ptr_poisoned(const struct bpf_insn_aux_data *aux)
211 {
212 	return BPF_MAP_PTR(aux->map_ptr_state) == BPF_MAP_PTR_POISON;
213 }
214 
215 static bool bpf_map_ptr_unpriv(const struct bpf_insn_aux_data *aux)
216 {
217 	return aux->map_ptr_state & BPF_MAP_PTR_UNPRIV;
218 }
219 
220 static void bpf_map_ptr_store(struct bpf_insn_aux_data *aux,
221 			      const struct bpf_map *map, bool unpriv)
222 {
223 	BUILD_BUG_ON((unsigned long)BPF_MAP_PTR_POISON & BPF_MAP_PTR_UNPRIV);
224 	unpriv |= bpf_map_ptr_unpriv(aux);
225 	aux->map_ptr_state = (unsigned long)map |
226 			     (unpriv ? BPF_MAP_PTR_UNPRIV : 0UL);
227 }
228 
229 static bool bpf_map_key_poisoned(const struct bpf_insn_aux_data *aux)
230 {
231 	return aux->map_key_state & BPF_MAP_KEY_POISON;
232 }
233 
234 static bool bpf_map_key_unseen(const struct bpf_insn_aux_data *aux)
235 {
236 	return !(aux->map_key_state & BPF_MAP_KEY_SEEN);
237 }
238 
239 static u64 bpf_map_key_immediate(const struct bpf_insn_aux_data *aux)
240 {
241 	return aux->map_key_state & ~(BPF_MAP_KEY_SEEN | BPF_MAP_KEY_POISON);
242 }
243 
244 static void bpf_map_key_store(struct bpf_insn_aux_data *aux, u64 state)
245 {
246 	bool poisoned = bpf_map_key_poisoned(aux);
247 
248 	aux->map_key_state = state | BPF_MAP_KEY_SEEN |
249 			     (poisoned ? BPF_MAP_KEY_POISON : 0ULL);
250 }
251 
252 static bool bpf_helper_call(const struct bpf_insn *insn)
253 {
254 	return insn->code == (BPF_JMP | BPF_CALL) &&
255 	       insn->src_reg == 0;
256 }
257 
258 static bool bpf_pseudo_call(const struct bpf_insn *insn)
259 {
260 	return insn->code == (BPF_JMP | BPF_CALL) &&
261 	       insn->src_reg == BPF_PSEUDO_CALL;
262 }
263 
264 static bool bpf_pseudo_kfunc_call(const struct bpf_insn *insn)
265 {
266 	return insn->code == (BPF_JMP | BPF_CALL) &&
267 	       insn->src_reg == BPF_PSEUDO_KFUNC_CALL;
268 }
269 
270 struct bpf_call_arg_meta {
271 	struct bpf_map *map_ptr;
272 	bool raw_mode;
273 	bool pkt_access;
274 	u8 release_regno;
275 	int regno;
276 	int access_size;
277 	int mem_size;
278 	u64 msize_max_value;
279 	int ref_obj_id;
280 	int dynptr_id;
281 	int map_uid;
282 	int func_id;
283 	struct btf *btf;
284 	u32 btf_id;
285 	struct btf *ret_btf;
286 	u32 ret_btf_id;
287 	u32 subprogno;
288 	struct btf_field *kptr_field;
289 };
290 
291 struct bpf_kfunc_call_arg_meta {
292 	/* In parameters */
293 	struct btf *btf;
294 	u32 func_id;
295 	u32 kfunc_flags;
296 	const struct btf_type *func_proto;
297 	const char *func_name;
298 	/* Out parameters */
299 	u32 ref_obj_id;
300 	u8 release_regno;
301 	bool r0_rdonly;
302 	u32 ret_btf_id;
303 	u64 r0_size;
304 	u32 subprogno;
305 	struct {
306 		u64 value;
307 		bool found;
308 	} arg_constant;
309 
310 	/* arg_{btf,btf_id,owning_ref} are used by kfunc-specific handling,
311 	 * generally to pass info about user-defined local kptr types to later
312 	 * verification logic
313 	 *   bpf_obj_drop/bpf_percpu_obj_drop
314 	 *     Record the local kptr type to be drop'd
315 	 *   bpf_refcount_acquire (via KF_ARG_PTR_TO_REFCOUNTED_KPTR arg type)
316 	 *     Record the local kptr type to be refcount_incr'd and use
317 	 *     arg_owning_ref to determine whether refcount_acquire should be
318 	 *     fallible
319 	 */
320 	struct btf *arg_btf;
321 	u32 arg_btf_id;
322 	bool arg_owning_ref;
323 
324 	struct {
325 		struct btf_field *field;
326 	} arg_list_head;
327 	struct {
328 		struct btf_field *field;
329 	} arg_rbtree_root;
330 	struct {
331 		enum bpf_dynptr_type type;
332 		u32 id;
333 		u32 ref_obj_id;
334 	} initialized_dynptr;
335 	struct {
336 		u8 spi;
337 		u8 frameno;
338 	} iter;
339 	u64 mem_size;
340 };
341 
342 struct btf *btf_vmlinux;
343 
344 static const char *btf_type_name(const struct btf *btf, u32 id)
345 {
346 	return btf_name_by_offset(btf, btf_type_by_id(btf, id)->name_off);
347 }
348 
349 static DEFINE_MUTEX(bpf_verifier_lock);
350 static DEFINE_MUTEX(bpf_percpu_ma_lock);
351 
352 __printf(2, 3) static void verbose(void *private_data, const char *fmt, ...)
353 {
354 	struct bpf_verifier_env *env = private_data;
355 	va_list args;
356 
357 	if (!bpf_verifier_log_needed(&env->log))
358 		return;
359 
360 	va_start(args, fmt);
361 	bpf_verifier_vlog(&env->log, fmt, args);
362 	va_end(args);
363 }
364 
365 static void verbose_invalid_scalar(struct bpf_verifier_env *env,
366 				   struct bpf_reg_state *reg,
367 				   struct bpf_retval_range range, const char *ctx,
368 				   const char *reg_name)
369 {
370 	bool unknown = true;
371 
372 	verbose(env, "%s the register %s has", ctx, reg_name);
373 	if (reg->smin_value > S64_MIN) {
374 		verbose(env, " smin=%lld", reg->smin_value);
375 		unknown = false;
376 	}
377 	if (reg->smax_value < S64_MAX) {
378 		verbose(env, " smax=%lld", reg->smax_value);
379 		unknown = false;
380 	}
381 	if (unknown)
382 		verbose(env, " unknown scalar value");
383 	verbose(env, " should have been in [%d, %d]\n", range.minval, range.maxval);
384 }
385 
386 static bool type_may_be_null(u32 type)
387 {
388 	return type & PTR_MAYBE_NULL;
389 }
390 
391 static bool reg_not_null(const struct bpf_reg_state *reg)
392 {
393 	enum bpf_reg_type type;
394 
395 	type = reg->type;
396 	if (type_may_be_null(type))
397 		return false;
398 
399 	type = base_type(type);
400 	return type == PTR_TO_SOCKET ||
401 		type == PTR_TO_TCP_SOCK ||
402 		type == PTR_TO_MAP_VALUE ||
403 		type == PTR_TO_MAP_KEY ||
404 		type == PTR_TO_SOCK_COMMON ||
405 		(type == PTR_TO_BTF_ID && is_trusted_reg(reg)) ||
406 		type == PTR_TO_MEM;
407 }
408 
409 static struct btf_record *reg_btf_record(const struct bpf_reg_state *reg)
410 {
411 	struct btf_record *rec = NULL;
412 	struct btf_struct_meta *meta;
413 
414 	if (reg->type == PTR_TO_MAP_VALUE) {
415 		rec = reg->map_ptr->record;
416 	} else if (type_is_ptr_alloc_obj(reg->type)) {
417 		meta = btf_find_struct_meta(reg->btf, reg->btf_id);
418 		if (meta)
419 			rec = meta->record;
420 	}
421 	return rec;
422 }
423 
424 static bool subprog_is_global(const struct bpf_verifier_env *env, int subprog)
425 {
426 	struct bpf_func_info_aux *aux = env->prog->aux->func_info_aux;
427 
428 	return aux && aux[subprog].linkage == BTF_FUNC_GLOBAL;
429 }
430 
431 static const char *subprog_name(const struct bpf_verifier_env *env, int subprog)
432 {
433 	struct bpf_func_info *info;
434 
435 	if (!env->prog->aux->func_info)
436 		return "";
437 
438 	info = &env->prog->aux->func_info[subprog];
439 	return btf_type_name(env->prog->aux->btf, info->type_id);
440 }
441 
442 static void mark_subprog_exc_cb(struct bpf_verifier_env *env, int subprog)
443 {
444 	struct bpf_subprog_info *info = subprog_info(env, subprog);
445 
446 	info->is_cb = true;
447 	info->is_async_cb = true;
448 	info->is_exception_cb = true;
449 }
450 
451 static bool subprog_is_exc_cb(struct bpf_verifier_env *env, int subprog)
452 {
453 	return subprog_info(env, subprog)->is_exception_cb;
454 }
455 
456 static bool reg_may_point_to_spin_lock(const struct bpf_reg_state *reg)
457 {
458 	return btf_record_has_field(reg_btf_record(reg), BPF_SPIN_LOCK);
459 }
460 
461 static bool type_is_rdonly_mem(u32 type)
462 {
463 	return type & MEM_RDONLY;
464 }
465 
466 static bool is_acquire_function(enum bpf_func_id func_id,
467 				const struct bpf_map *map)
468 {
469 	enum bpf_map_type map_type = map ? map->map_type : BPF_MAP_TYPE_UNSPEC;
470 
471 	if (func_id == BPF_FUNC_sk_lookup_tcp ||
472 	    func_id == BPF_FUNC_sk_lookup_udp ||
473 	    func_id == BPF_FUNC_skc_lookup_tcp ||
474 	    func_id == BPF_FUNC_ringbuf_reserve ||
475 	    func_id == BPF_FUNC_kptr_xchg)
476 		return true;
477 
478 	if (func_id == BPF_FUNC_map_lookup_elem &&
479 	    (map_type == BPF_MAP_TYPE_SOCKMAP ||
480 	     map_type == BPF_MAP_TYPE_SOCKHASH))
481 		return true;
482 
483 	return false;
484 }
485 
486 static bool is_ptr_cast_function(enum bpf_func_id func_id)
487 {
488 	return func_id == BPF_FUNC_tcp_sock ||
489 		func_id == BPF_FUNC_sk_fullsock ||
490 		func_id == BPF_FUNC_skc_to_tcp_sock ||
491 		func_id == BPF_FUNC_skc_to_tcp6_sock ||
492 		func_id == BPF_FUNC_skc_to_udp6_sock ||
493 		func_id == BPF_FUNC_skc_to_mptcp_sock ||
494 		func_id == BPF_FUNC_skc_to_tcp_timewait_sock ||
495 		func_id == BPF_FUNC_skc_to_tcp_request_sock;
496 }
497 
498 static bool is_dynptr_ref_function(enum bpf_func_id func_id)
499 {
500 	return func_id == BPF_FUNC_dynptr_data;
501 }
502 
503 static bool is_sync_callback_calling_kfunc(u32 btf_id);
504 static bool is_bpf_throw_kfunc(struct bpf_insn *insn);
505 
506 static bool is_sync_callback_calling_function(enum bpf_func_id func_id)
507 {
508 	return func_id == BPF_FUNC_for_each_map_elem ||
509 	       func_id == BPF_FUNC_find_vma ||
510 	       func_id == BPF_FUNC_loop ||
511 	       func_id == BPF_FUNC_user_ringbuf_drain;
512 }
513 
514 static bool is_async_callback_calling_function(enum bpf_func_id func_id)
515 {
516 	return func_id == BPF_FUNC_timer_set_callback;
517 }
518 
519 static bool is_callback_calling_function(enum bpf_func_id func_id)
520 {
521 	return is_sync_callback_calling_function(func_id) ||
522 	       is_async_callback_calling_function(func_id);
523 }
524 
525 static bool is_sync_callback_calling_insn(struct bpf_insn *insn)
526 {
527 	return (bpf_helper_call(insn) && is_sync_callback_calling_function(insn->imm)) ||
528 	       (bpf_pseudo_kfunc_call(insn) && is_sync_callback_calling_kfunc(insn->imm));
529 }
530 
531 static bool is_async_callback_calling_insn(struct bpf_insn *insn)
532 {
533 	return bpf_helper_call(insn) && is_async_callback_calling_function(insn->imm);
534 }
535 
536 static bool is_storage_get_function(enum bpf_func_id func_id)
537 {
538 	return func_id == BPF_FUNC_sk_storage_get ||
539 	       func_id == BPF_FUNC_inode_storage_get ||
540 	       func_id == BPF_FUNC_task_storage_get ||
541 	       func_id == BPF_FUNC_cgrp_storage_get;
542 }
543 
544 static bool helper_multiple_ref_obj_use(enum bpf_func_id func_id,
545 					const struct bpf_map *map)
546 {
547 	int ref_obj_uses = 0;
548 
549 	if (is_ptr_cast_function(func_id))
550 		ref_obj_uses++;
551 	if (is_acquire_function(func_id, map))
552 		ref_obj_uses++;
553 	if (is_dynptr_ref_function(func_id))
554 		ref_obj_uses++;
555 
556 	return ref_obj_uses > 1;
557 }
558 
559 static bool is_cmpxchg_insn(const struct bpf_insn *insn)
560 {
561 	return BPF_CLASS(insn->code) == BPF_STX &&
562 	       BPF_MODE(insn->code) == BPF_ATOMIC &&
563 	       insn->imm == BPF_CMPXCHG;
564 }
565 
566 static int __get_spi(s32 off)
567 {
568 	return (-off - 1) / BPF_REG_SIZE;
569 }
570 
571 static struct bpf_func_state *func(struct bpf_verifier_env *env,
572 				   const struct bpf_reg_state *reg)
573 {
574 	struct bpf_verifier_state *cur = env->cur_state;
575 
576 	return cur->frame[reg->frameno];
577 }
578 
579 static bool is_spi_bounds_valid(struct bpf_func_state *state, int spi, int nr_slots)
580 {
581        int allocated_slots = state->allocated_stack / BPF_REG_SIZE;
582 
583        /* We need to check that slots between [spi - nr_slots + 1, spi] are
584 	* within [0, allocated_stack).
585 	*
586 	* Please note that the spi grows downwards. For example, a dynptr
587 	* takes the size of two stack slots; the first slot will be at
588 	* spi and the second slot will be at spi - 1.
589 	*/
590        return spi - nr_slots + 1 >= 0 && spi < allocated_slots;
591 }
592 
593 static int stack_slot_obj_get_spi(struct bpf_verifier_env *env, struct bpf_reg_state *reg,
594 			          const char *obj_kind, int nr_slots)
595 {
596 	int off, spi;
597 
598 	if (!tnum_is_const(reg->var_off)) {
599 		verbose(env, "%s has to be at a constant offset\n", obj_kind);
600 		return -EINVAL;
601 	}
602 
603 	off = reg->off + reg->var_off.value;
604 	if (off % BPF_REG_SIZE) {
605 		verbose(env, "cannot pass in %s at an offset=%d\n", obj_kind, off);
606 		return -EINVAL;
607 	}
608 
609 	spi = __get_spi(off);
610 	if (spi + 1 < nr_slots) {
611 		verbose(env, "cannot pass in %s at an offset=%d\n", obj_kind, off);
612 		return -EINVAL;
613 	}
614 
615 	if (!is_spi_bounds_valid(func(env, reg), spi, nr_slots))
616 		return -ERANGE;
617 	return spi;
618 }
619 
620 static int dynptr_get_spi(struct bpf_verifier_env *env, struct bpf_reg_state *reg)
621 {
622 	return stack_slot_obj_get_spi(env, reg, "dynptr", BPF_DYNPTR_NR_SLOTS);
623 }
624 
625 static int iter_get_spi(struct bpf_verifier_env *env, struct bpf_reg_state *reg, int nr_slots)
626 {
627 	return stack_slot_obj_get_spi(env, reg, "iter", nr_slots);
628 }
629 
630 static enum bpf_dynptr_type arg_to_dynptr_type(enum bpf_arg_type arg_type)
631 {
632 	switch (arg_type & DYNPTR_TYPE_FLAG_MASK) {
633 	case DYNPTR_TYPE_LOCAL:
634 		return BPF_DYNPTR_TYPE_LOCAL;
635 	case DYNPTR_TYPE_RINGBUF:
636 		return BPF_DYNPTR_TYPE_RINGBUF;
637 	case DYNPTR_TYPE_SKB:
638 		return BPF_DYNPTR_TYPE_SKB;
639 	case DYNPTR_TYPE_XDP:
640 		return BPF_DYNPTR_TYPE_XDP;
641 	default:
642 		return BPF_DYNPTR_TYPE_INVALID;
643 	}
644 }
645 
646 static enum bpf_type_flag get_dynptr_type_flag(enum bpf_dynptr_type type)
647 {
648 	switch (type) {
649 	case BPF_DYNPTR_TYPE_LOCAL:
650 		return DYNPTR_TYPE_LOCAL;
651 	case BPF_DYNPTR_TYPE_RINGBUF:
652 		return DYNPTR_TYPE_RINGBUF;
653 	case BPF_DYNPTR_TYPE_SKB:
654 		return DYNPTR_TYPE_SKB;
655 	case BPF_DYNPTR_TYPE_XDP:
656 		return DYNPTR_TYPE_XDP;
657 	default:
658 		return 0;
659 	}
660 }
661 
662 static bool dynptr_type_refcounted(enum bpf_dynptr_type type)
663 {
664 	return type == BPF_DYNPTR_TYPE_RINGBUF;
665 }
666 
667 static void __mark_dynptr_reg(struct bpf_reg_state *reg,
668 			      enum bpf_dynptr_type type,
669 			      bool first_slot, int dynptr_id);
670 
671 static void __mark_reg_not_init(const struct bpf_verifier_env *env,
672 				struct bpf_reg_state *reg);
673 
674 static void mark_dynptr_stack_regs(struct bpf_verifier_env *env,
675 				   struct bpf_reg_state *sreg1,
676 				   struct bpf_reg_state *sreg2,
677 				   enum bpf_dynptr_type type)
678 {
679 	int id = ++env->id_gen;
680 
681 	__mark_dynptr_reg(sreg1, type, true, id);
682 	__mark_dynptr_reg(sreg2, type, false, id);
683 }
684 
685 static void mark_dynptr_cb_reg(struct bpf_verifier_env *env,
686 			       struct bpf_reg_state *reg,
687 			       enum bpf_dynptr_type type)
688 {
689 	__mark_dynptr_reg(reg, type, true, ++env->id_gen);
690 }
691 
692 static int destroy_if_dynptr_stack_slot(struct bpf_verifier_env *env,
693 				        struct bpf_func_state *state, int spi);
694 
695 static int mark_stack_slots_dynptr(struct bpf_verifier_env *env, struct bpf_reg_state *reg,
696 				   enum bpf_arg_type arg_type, int insn_idx, int clone_ref_obj_id)
697 {
698 	struct bpf_func_state *state = func(env, reg);
699 	enum bpf_dynptr_type type;
700 	int spi, i, err;
701 
702 	spi = dynptr_get_spi(env, reg);
703 	if (spi < 0)
704 		return spi;
705 
706 	/* We cannot assume both spi and spi - 1 belong to the same dynptr,
707 	 * hence we need to call destroy_if_dynptr_stack_slot twice for both,
708 	 * to ensure that for the following example:
709 	 *	[d1][d1][d2][d2]
710 	 * spi    3   2   1   0
711 	 * So marking spi = 2 should lead to destruction of both d1 and d2. In
712 	 * case they do belong to same dynptr, second call won't see slot_type
713 	 * as STACK_DYNPTR and will simply skip destruction.
714 	 */
715 	err = destroy_if_dynptr_stack_slot(env, state, spi);
716 	if (err)
717 		return err;
718 	err = destroy_if_dynptr_stack_slot(env, state, spi - 1);
719 	if (err)
720 		return err;
721 
722 	for (i = 0; i < BPF_REG_SIZE; i++) {
723 		state->stack[spi].slot_type[i] = STACK_DYNPTR;
724 		state->stack[spi - 1].slot_type[i] = STACK_DYNPTR;
725 	}
726 
727 	type = arg_to_dynptr_type(arg_type);
728 	if (type == BPF_DYNPTR_TYPE_INVALID)
729 		return -EINVAL;
730 
731 	mark_dynptr_stack_regs(env, &state->stack[spi].spilled_ptr,
732 			       &state->stack[spi - 1].spilled_ptr, type);
733 
734 	if (dynptr_type_refcounted(type)) {
735 		/* The id is used to track proper releasing */
736 		int id;
737 
738 		if (clone_ref_obj_id)
739 			id = clone_ref_obj_id;
740 		else
741 			id = acquire_reference_state(env, insn_idx);
742 
743 		if (id < 0)
744 			return id;
745 
746 		state->stack[spi].spilled_ptr.ref_obj_id = id;
747 		state->stack[spi - 1].spilled_ptr.ref_obj_id = id;
748 	}
749 
750 	state->stack[spi].spilled_ptr.live |= REG_LIVE_WRITTEN;
751 	state->stack[spi - 1].spilled_ptr.live |= REG_LIVE_WRITTEN;
752 
753 	return 0;
754 }
755 
756 static void invalidate_dynptr(struct bpf_verifier_env *env, struct bpf_func_state *state, int spi)
757 {
758 	int i;
759 
760 	for (i = 0; i < BPF_REG_SIZE; i++) {
761 		state->stack[spi].slot_type[i] = STACK_INVALID;
762 		state->stack[spi - 1].slot_type[i] = STACK_INVALID;
763 	}
764 
765 	__mark_reg_not_init(env, &state->stack[spi].spilled_ptr);
766 	__mark_reg_not_init(env, &state->stack[spi - 1].spilled_ptr);
767 
768 	/* Why do we need to set REG_LIVE_WRITTEN for STACK_INVALID slot?
769 	 *
770 	 * While we don't allow reading STACK_INVALID, it is still possible to
771 	 * do <8 byte writes marking some but not all slots as STACK_MISC. Then,
772 	 * helpers or insns can do partial read of that part without failing,
773 	 * but check_stack_range_initialized, check_stack_read_var_off, and
774 	 * check_stack_read_fixed_off will do mark_reg_read for all 8-bytes of
775 	 * the slot conservatively. Hence we need to prevent those liveness
776 	 * marking walks.
777 	 *
778 	 * This was not a problem before because STACK_INVALID is only set by
779 	 * default (where the default reg state has its reg->parent as NULL), or
780 	 * in clean_live_states after REG_LIVE_DONE (at which point
781 	 * mark_reg_read won't walk reg->parent chain), but not randomly during
782 	 * verifier state exploration (like we did above). Hence, for our case
783 	 * parentage chain will still be live (i.e. reg->parent may be
784 	 * non-NULL), while earlier reg->parent was NULL, so we need
785 	 * REG_LIVE_WRITTEN to screen off read marker propagation when it is
786 	 * done later on reads or by mark_dynptr_read as well to unnecessary
787 	 * mark registers in verifier state.
788 	 */
789 	state->stack[spi].spilled_ptr.live |= REG_LIVE_WRITTEN;
790 	state->stack[spi - 1].spilled_ptr.live |= REG_LIVE_WRITTEN;
791 }
792 
793 static int unmark_stack_slots_dynptr(struct bpf_verifier_env *env, struct bpf_reg_state *reg)
794 {
795 	struct bpf_func_state *state = func(env, reg);
796 	int spi, ref_obj_id, i;
797 
798 	spi = dynptr_get_spi(env, reg);
799 	if (spi < 0)
800 		return spi;
801 
802 	if (!dynptr_type_refcounted(state->stack[spi].spilled_ptr.dynptr.type)) {
803 		invalidate_dynptr(env, state, spi);
804 		return 0;
805 	}
806 
807 	ref_obj_id = state->stack[spi].spilled_ptr.ref_obj_id;
808 
809 	/* If the dynptr has a ref_obj_id, then we need to invalidate
810 	 * two things:
811 	 *
812 	 * 1) Any dynptrs with a matching ref_obj_id (clones)
813 	 * 2) Any slices derived from this dynptr.
814 	 */
815 
816 	/* Invalidate any slices associated with this dynptr */
817 	WARN_ON_ONCE(release_reference(env, ref_obj_id));
818 
819 	/* Invalidate any dynptr clones */
820 	for (i = 1; i < state->allocated_stack / BPF_REG_SIZE; i++) {
821 		if (state->stack[i].spilled_ptr.ref_obj_id != ref_obj_id)
822 			continue;
823 
824 		/* it should always be the case that if the ref obj id
825 		 * matches then the stack slot also belongs to a
826 		 * dynptr
827 		 */
828 		if (state->stack[i].slot_type[0] != STACK_DYNPTR) {
829 			verbose(env, "verifier internal error: misconfigured ref_obj_id\n");
830 			return -EFAULT;
831 		}
832 		if (state->stack[i].spilled_ptr.dynptr.first_slot)
833 			invalidate_dynptr(env, state, i);
834 	}
835 
836 	return 0;
837 }
838 
839 static void __mark_reg_unknown(const struct bpf_verifier_env *env,
840 			       struct bpf_reg_state *reg);
841 
842 static void mark_reg_invalid(const struct bpf_verifier_env *env, struct bpf_reg_state *reg)
843 {
844 	if (!env->allow_ptr_leaks)
845 		__mark_reg_not_init(env, reg);
846 	else
847 		__mark_reg_unknown(env, reg);
848 }
849 
850 static int destroy_if_dynptr_stack_slot(struct bpf_verifier_env *env,
851 				        struct bpf_func_state *state, int spi)
852 {
853 	struct bpf_func_state *fstate;
854 	struct bpf_reg_state *dreg;
855 	int i, dynptr_id;
856 
857 	/* We always ensure that STACK_DYNPTR is never set partially,
858 	 * hence just checking for slot_type[0] is enough. This is
859 	 * different for STACK_SPILL, where it may be only set for
860 	 * 1 byte, so code has to use is_spilled_reg.
861 	 */
862 	if (state->stack[spi].slot_type[0] != STACK_DYNPTR)
863 		return 0;
864 
865 	/* Reposition spi to first slot */
866 	if (!state->stack[spi].spilled_ptr.dynptr.first_slot)
867 		spi = spi + 1;
868 
869 	if (dynptr_type_refcounted(state->stack[spi].spilled_ptr.dynptr.type)) {
870 		verbose(env, "cannot overwrite referenced dynptr\n");
871 		return -EINVAL;
872 	}
873 
874 	mark_stack_slot_scratched(env, spi);
875 	mark_stack_slot_scratched(env, spi - 1);
876 
877 	/* Writing partially to one dynptr stack slot destroys both. */
878 	for (i = 0; i < BPF_REG_SIZE; i++) {
879 		state->stack[spi].slot_type[i] = STACK_INVALID;
880 		state->stack[spi - 1].slot_type[i] = STACK_INVALID;
881 	}
882 
883 	dynptr_id = state->stack[spi].spilled_ptr.id;
884 	/* Invalidate any slices associated with this dynptr */
885 	bpf_for_each_reg_in_vstate(env->cur_state, fstate, dreg, ({
886 		/* Dynptr slices are only PTR_TO_MEM_OR_NULL and PTR_TO_MEM */
887 		if (dreg->type != (PTR_TO_MEM | PTR_MAYBE_NULL) && dreg->type != PTR_TO_MEM)
888 			continue;
889 		if (dreg->dynptr_id == dynptr_id)
890 			mark_reg_invalid(env, dreg);
891 	}));
892 
893 	/* Do not release reference state, we are destroying dynptr on stack,
894 	 * not using some helper to release it. Just reset register.
895 	 */
896 	__mark_reg_not_init(env, &state->stack[spi].spilled_ptr);
897 	__mark_reg_not_init(env, &state->stack[spi - 1].spilled_ptr);
898 
899 	/* Same reason as unmark_stack_slots_dynptr above */
900 	state->stack[spi].spilled_ptr.live |= REG_LIVE_WRITTEN;
901 	state->stack[spi - 1].spilled_ptr.live |= REG_LIVE_WRITTEN;
902 
903 	return 0;
904 }
905 
906 static bool is_dynptr_reg_valid_uninit(struct bpf_verifier_env *env, struct bpf_reg_state *reg)
907 {
908 	int spi;
909 
910 	if (reg->type == CONST_PTR_TO_DYNPTR)
911 		return false;
912 
913 	spi = dynptr_get_spi(env, reg);
914 
915 	/* -ERANGE (i.e. spi not falling into allocated stack slots) isn't an
916 	 * error because this just means the stack state hasn't been updated yet.
917 	 * We will do check_mem_access to check and update stack bounds later.
918 	 */
919 	if (spi < 0 && spi != -ERANGE)
920 		return false;
921 
922 	/* We don't need to check if the stack slots are marked by previous
923 	 * dynptr initializations because we allow overwriting existing unreferenced
924 	 * STACK_DYNPTR slots, see mark_stack_slots_dynptr which calls
925 	 * destroy_if_dynptr_stack_slot to ensure dynptr objects at the slots we are
926 	 * touching are completely destructed before we reinitialize them for a new
927 	 * one. For referenced ones, destroy_if_dynptr_stack_slot returns an error early
928 	 * instead of delaying it until the end where the user will get "Unreleased
929 	 * reference" error.
930 	 */
931 	return true;
932 }
933 
934 static bool is_dynptr_reg_valid_init(struct bpf_verifier_env *env, struct bpf_reg_state *reg)
935 {
936 	struct bpf_func_state *state = func(env, reg);
937 	int i, spi;
938 
939 	/* This already represents first slot of initialized bpf_dynptr.
940 	 *
941 	 * CONST_PTR_TO_DYNPTR already has fixed and var_off as 0 due to
942 	 * check_func_arg_reg_off's logic, so we don't need to check its
943 	 * offset and alignment.
944 	 */
945 	if (reg->type == CONST_PTR_TO_DYNPTR)
946 		return true;
947 
948 	spi = dynptr_get_spi(env, reg);
949 	if (spi < 0)
950 		return false;
951 	if (!state->stack[spi].spilled_ptr.dynptr.first_slot)
952 		return false;
953 
954 	for (i = 0; i < BPF_REG_SIZE; i++) {
955 		if (state->stack[spi].slot_type[i] != STACK_DYNPTR ||
956 		    state->stack[spi - 1].slot_type[i] != STACK_DYNPTR)
957 			return false;
958 	}
959 
960 	return true;
961 }
962 
963 static bool is_dynptr_type_expected(struct bpf_verifier_env *env, struct bpf_reg_state *reg,
964 				    enum bpf_arg_type arg_type)
965 {
966 	struct bpf_func_state *state = func(env, reg);
967 	enum bpf_dynptr_type dynptr_type;
968 	int spi;
969 
970 	/* ARG_PTR_TO_DYNPTR takes any type of dynptr */
971 	if (arg_type == ARG_PTR_TO_DYNPTR)
972 		return true;
973 
974 	dynptr_type = arg_to_dynptr_type(arg_type);
975 	if (reg->type == CONST_PTR_TO_DYNPTR) {
976 		return reg->dynptr.type == dynptr_type;
977 	} else {
978 		spi = dynptr_get_spi(env, reg);
979 		if (spi < 0)
980 			return false;
981 		return state->stack[spi].spilled_ptr.dynptr.type == dynptr_type;
982 	}
983 }
984 
985 static void __mark_reg_known_zero(struct bpf_reg_state *reg);
986 
987 static bool in_rcu_cs(struct bpf_verifier_env *env);
988 
989 static bool is_kfunc_rcu_protected(struct bpf_kfunc_call_arg_meta *meta);
990 
991 static int mark_stack_slots_iter(struct bpf_verifier_env *env,
992 				 struct bpf_kfunc_call_arg_meta *meta,
993 				 struct bpf_reg_state *reg, int insn_idx,
994 				 struct btf *btf, u32 btf_id, int nr_slots)
995 {
996 	struct bpf_func_state *state = func(env, reg);
997 	int spi, i, j, id;
998 
999 	spi = iter_get_spi(env, reg, nr_slots);
1000 	if (spi < 0)
1001 		return spi;
1002 
1003 	id = acquire_reference_state(env, insn_idx);
1004 	if (id < 0)
1005 		return id;
1006 
1007 	for (i = 0; i < nr_slots; i++) {
1008 		struct bpf_stack_state *slot = &state->stack[spi - i];
1009 		struct bpf_reg_state *st = &slot->spilled_ptr;
1010 
1011 		__mark_reg_known_zero(st);
1012 		st->type = PTR_TO_STACK; /* we don't have dedicated reg type */
1013 		if (is_kfunc_rcu_protected(meta)) {
1014 			if (in_rcu_cs(env))
1015 				st->type |= MEM_RCU;
1016 			else
1017 				st->type |= PTR_UNTRUSTED;
1018 		}
1019 		st->live |= REG_LIVE_WRITTEN;
1020 		st->ref_obj_id = i == 0 ? id : 0;
1021 		st->iter.btf = btf;
1022 		st->iter.btf_id = btf_id;
1023 		st->iter.state = BPF_ITER_STATE_ACTIVE;
1024 		st->iter.depth = 0;
1025 
1026 		for (j = 0; j < BPF_REG_SIZE; j++)
1027 			slot->slot_type[j] = STACK_ITER;
1028 
1029 		mark_stack_slot_scratched(env, spi - i);
1030 	}
1031 
1032 	return 0;
1033 }
1034 
1035 static int unmark_stack_slots_iter(struct bpf_verifier_env *env,
1036 				   struct bpf_reg_state *reg, int nr_slots)
1037 {
1038 	struct bpf_func_state *state = func(env, reg);
1039 	int spi, i, j;
1040 
1041 	spi = iter_get_spi(env, reg, nr_slots);
1042 	if (spi < 0)
1043 		return spi;
1044 
1045 	for (i = 0; i < nr_slots; i++) {
1046 		struct bpf_stack_state *slot = &state->stack[spi - i];
1047 		struct bpf_reg_state *st = &slot->spilled_ptr;
1048 
1049 		if (i == 0)
1050 			WARN_ON_ONCE(release_reference(env, st->ref_obj_id));
1051 
1052 		__mark_reg_not_init(env, st);
1053 
1054 		/* see unmark_stack_slots_dynptr() for why we need to set REG_LIVE_WRITTEN */
1055 		st->live |= REG_LIVE_WRITTEN;
1056 
1057 		for (j = 0; j < BPF_REG_SIZE; j++)
1058 			slot->slot_type[j] = STACK_INVALID;
1059 
1060 		mark_stack_slot_scratched(env, spi - i);
1061 	}
1062 
1063 	return 0;
1064 }
1065 
1066 static bool is_iter_reg_valid_uninit(struct bpf_verifier_env *env,
1067 				     struct bpf_reg_state *reg, int nr_slots)
1068 {
1069 	struct bpf_func_state *state = func(env, reg);
1070 	int spi, i, j;
1071 
1072 	/* For -ERANGE (i.e. spi not falling into allocated stack slots), we
1073 	 * will do check_mem_access to check and update stack bounds later, so
1074 	 * return true for that case.
1075 	 */
1076 	spi = iter_get_spi(env, reg, nr_slots);
1077 	if (spi == -ERANGE)
1078 		return true;
1079 	if (spi < 0)
1080 		return false;
1081 
1082 	for (i = 0; i < nr_slots; i++) {
1083 		struct bpf_stack_state *slot = &state->stack[spi - i];
1084 
1085 		for (j = 0; j < BPF_REG_SIZE; j++)
1086 			if (slot->slot_type[j] == STACK_ITER)
1087 				return false;
1088 	}
1089 
1090 	return true;
1091 }
1092 
1093 static int is_iter_reg_valid_init(struct bpf_verifier_env *env, struct bpf_reg_state *reg,
1094 				   struct btf *btf, u32 btf_id, int nr_slots)
1095 {
1096 	struct bpf_func_state *state = func(env, reg);
1097 	int spi, i, j;
1098 
1099 	spi = iter_get_spi(env, reg, nr_slots);
1100 	if (spi < 0)
1101 		return -EINVAL;
1102 
1103 	for (i = 0; i < nr_slots; i++) {
1104 		struct bpf_stack_state *slot = &state->stack[spi - i];
1105 		struct bpf_reg_state *st = &slot->spilled_ptr;
1106 
1107 		if (st->type & PTR_UNTRUSTED)
1108 			return -EPROTO;
1109 		/* only main (first) slot has ref_obj_id set */
1110 		if (i == 0 && !st->ref_obj_id)
1111 			return -EINVAL;
1112 		if (i != 0 && st->ref_obj_id)
1113 			return -EINVAL;
1114 		if (st->iter.btf != btf || st->iter.btf_id != btf_id)
1115 			return -EINVAL;
1116 
1117 		for (j = 0; j < BPF_REG_SIZE; j++)
1118 			if (slot->slot_type[j] != STACK_ITER)
1119 				return -EINVAL;
1120 	}
1121 
1122 	return 0;
1123 }
1124 
1125 /* Check if given stack slot is "special":
1126  *   - spilled register state (STACK_SPILL);
1127  *   - dynptr state (STACK_DYNPTR);
1128  *   - iter state (STACK_ITER).
1129  */
1130 static bool is_stack_slot_special(const struct bpf_stack_state *stack)
1131 {
1132 	enum bpf_stack_slot_type type = stack->slot_type[BPF_REG_SIZE - 1];
1133 
1134 	switch (type) {
1135 	case STACK_SPILL:
1136 	case STACK_DYNPTR:
1137 	case STACK_ITER:
1138 		return true;
1139 	case STACK_INVALID:
1140 	case STACK_MISC:
1141 	case STACK_ZERO:
1142 		return false;
1143 	default:
1144 		WARN_ONCE(1, "unknown stack slot type %d\n", type);
1145 		return true;
1146 	}
1147 }
1148 
1149 /* The reg state of a pointer or a bounded scalar was saved when
1150  * it was spilled to the stack.
1151  */
1152 static bool is_spilled_reg(const struct bpf_stack_state *stack)
1153 {
1154 	return stack->slot_type[BPF_REG_SIZE - 1] == STACK_SPILL;
1155 }
1156 
1157 static bool is_spilled_scalar_reg(const struct bpf_stack_state *stack)
1158 {
1159 	return stack->slot_type[BPF_REG_SIZE - 1] == STACK_SPILL &&
1160 	       stack->spilled_ptr.type == SCALAR_VALUE;
1161 }
1162 
1163 static bool is_spilled_scalar_reg64(const struct bpf_stack_state *stack)
1164 {
1165 	return stack->slot_type[0] == STACK_SPILL &&
1166 	       stack->spilled_ptr.type == SCALAR_VALUE;
1167 }
1168 
1169 /* Mark stack slot as STACK_MISC, unless it is already STACK_INVALID, in which
1170  * case they are equivalent, or it's STACK_ZERO, in which case we preserve
1171  * more precise STACK_ZERO.
1172  * Note, in uprivileged mode leaving STACK_INVALID is wrong, so we take
1173  * env->allow_ptr_leaks into account and force STACK_MISC, if necessary.
1174  */
1175 static void mark_stack_slot_misc(struct bpf_verifier_env *env, u8 *stype)
1176 {
1177 	if (*stype == STACK_ZERO)
1178 		return;
1179 	if (env->allow_ptr_leaks && *stype == STACK_INVALID)
1180 		return;
1181 	*stype = STACK_MISC;
1182 }
1183 
1184 static void scrub_spilled_slot(u8 *stype)
1185 {
1186 	if (*stype != STACK_INVALID)
1187 		*stype = STACK_MISC;
1188 }
1189 
1190 /* copy array src of length n * size bytes to dst. dst is reallocated if it's too
1191  * small to hold src. This is different from krealloc since we don't want to preserve
1192  * the contents of dst.
1193  *
1194  * Leaves dst untouched if src is NULL or length is zero. Returns NULL if memory could
1195  * not be allocated.
1196  */
1197 static void *copy_array(void *dst, const void *src, size_t n, size_t size, gfp_t flags)
1198 {
1199 	size_t alloc_bytes;
1200 	void *orig = dst;
1201 	size_t bytes;
1202 
1203 	if (ZERO_OR_NULL_PTR(src))
1204 		goto out;
1205 
1206 	if (unlikely(check_mul_overflow(n, size, &bytes)))
1207 		return NULL;
1208 
1209 	alloc_bytes = max(ksize(orig), kmalloc_size_roundup(bytes));
1210 	dst = krealloc(orig, alloc_bytes, flags);
1211 	if (!dst) {
1212 		kfree(orig);
1213 		return NULL;
1214 	}
1215 
1216 	memcpy(dst, src, bytes);
1217 out:
1218 	return dst ? dst : ZERO_SIZE_PTR;
1219 }
1220 
1221 /* resize an array from old_n items to new_n items. the array is reallocated if it's too
1222  * small to hold new_n items. new items are zeroed out if the array grows.
1223  *
1224  * Contrary to krealloc_array, does not free arr if new_n is zero.
1225  */
1226 static void *realloc_array(void *arr, size_t old_n, size_t new_n, size_t size)
1227 {
1228 	size_t alloc_size;
1229 	void *new_arr;
1230 
1231 	if (!new_n || old_n == new_n)
1232 		goto out;
1233 
1234 	alloc_size = kmalloc_size_roundup(size_mul(new_n, size));
1235 	new_arr = krealloc(arr, alloc_size, GFP_KERNEL);
1236 	if (!new_arr) {
1237 		kfree(arr);
1238 		return NULL;
1239 	}
1240 	arr = new_arr;
1241 
1242 	if (new_n > old_n)
1243 		memset(arr + old_n * size, 0, (new_n - old_n) * size);
1244 
1245 out:
1246 	return arr ? arr : ZERO_SIZE_PTR;
1247 }
1248 
1249 static int copy_reference_state(struct bpf_func_state *dst, const struct bpf_func_state *src)
1250 {
1251 	dst->refs = copy_array(dst->refs, src->refs, src->acquired_refs,
1252 			       sizeof(struct bpf_reference_state), GFP_KERNEL);
1253 	if (!dst->refs)
1254 		return -ENOMEM;
1255 
1256 	dst->acquired_refs = src->acquired_refs;
1257 	return 0;
1258 }
1259 
1260 static int copy_stack_state(struct bpf_func_state *dst, const struct bpf_func_state *src)
1261 {
1262 	size_t n = src->allocated_stack / BPF_REG_SIZE;
1263 
1264 	dst->stack = copy_array(dst->stack, src->stack, n, sizeof(struct bpf_stack_state),
1265 				GFP_KERNEL);
1266 	if (!dst->stack)
1267 		return -ENOMEM;
1268 
1269 	dst->allocated_stack = src->allocated_stack;
1270 	return 0;
1271 }
1272 
1273 static int resize_reference_state(struct bpf_func_state *state, size_t n)
1274 {
1275 	state->refs = realloc_array(state->refs, state->acquired_refs, n,
1276 				    sizeof(struct bpf_reference_state));
1277 	if (!state->refs)
1278 		return -ENOMEM;
1279 
1280 	state->acquired_refs = n;
1281 	return 0;
1282 }
1283 
1284 /* Possibly update state->allocated_stack to be at least size bytes. Also
1285  * possibly update the function's high-water mark in its bpf_subprog_info.
1286  */
1287 static int grow_stack_state(struct bpf_verifier_env *env, struct bpf_func_state *state, int size)
1288 {
1289 	size_t old_n = state->allocated_stack / BPF_REG_SIZE, n;
1290 
1291 	/* The stack size is always a multiple of BPF_REG_SIZE. */
1292 	size = round_up(size, BPF_REG_SIZE);
1293 	n = size / BPF_REG_SIZE;
1294 
1295 	if (old_n >= n)
1296 		return 0;
1297 
1298 	state->stack = realloc_array(state->stack, old_n, n, sizeof(struct bpf_stack_state));
1299 	if (!state->stack)
1300 		return -ENOMEM;
1301 
1302 	state->allocated_stack = size;
1303 
1304 	/* update known max for given subprogram */
1305 	if (env->subprog_info[state->subprogno].stack_depth < size)
1306 		env->subprog_info[state->subprogno].stack_depth = size;
1307 
1308 	return 0;
1309 }
1310 
1311 /* Acquire a pointer id from the env and update the state->refs to include
1312  * this new pointer reference.
1313  * On success, returns a valid pointer id to associate with the register
1314  * On failure, returns a negative errno.
1315  */
1316 static int acquire_reference_state(struct bpf_verifier_env *env, int insn_idx)
1317 {
1318 	struct bpf_func_state *state = cur_func(env);
1319 	int new_ofs = state->acquired_refs;
1320 	int id, err;
1321 
1322 	err = resize_reference_state(state, state->acquired_refs + 1);
1323 	if (err)
1324 		return err;
1325 	id = ++env->id_gen;
1326 	state->refs[new_ofs].id = id;
1327 	state->refs[new_ofs].insn_idx = insn_idx;
1328 	state->refs[new_ofs].callback_ref = state->in_callback_fn ? state->frameno : 0;
1329 
1330 	return id;
1331 }
1332 
1333 /* release function corresponding to acquire_reference_state(). Idempotent. */
1334 static int release_reference_state(struct bpf_func_state *state, int ptr_id)
1335 {
1336 	int i, last_idx;
1337 
1338 	last_idx = state->acquired_refs - 1;
1339 	for (i = 0; i < state->acquired_refs; i++) {
1340 		if (state->refs[i].id == ptr_id) {
1341 			/* Cannot release caller references in callbacks */
1342 			if (state->in_callback_fn && state->refs[i].callback_ref != state->frameno)
1343 				return -EINVAL;
1344 			if (last_idx && i != last_idx)
1345 				memcpy(&state->refs[i], &state->refs[last_idx],
1346 				       sizeof(*state->refs));
1347 			memset(&state->refs[last_idx], 0, sizeof(*state->refs));
1348 			state->acquired_refs--;
1349 			return 0;
1350 		}
1351 	}
1352 	return -EINVAL;
1353 }
1354 
1355 static void free_func_state(struct bpf_func_state *state)
1356 {
1357 	if (!state)
1358 		return;
1359 	kfree(state->refs);
1360 	kfree(state->stack);
1361 	kfree(state);
1362 }
1363 
1364 static void clear_jmp_history(struct bpf_verifier_state *state)
1365 {
1366 	kfree(state->jmp_history);
1367 	state->jmp_history = NULL;
1368 	state->jmp_history_cnt = 0;
1369 }
1370 
1371 static void free_verifier_state(struct bpf_verifier_state *state,
1372 				bool free_self)
1373 {
1374 	int i;
1375 
1376 	for (i = 0; i <= state->curframe; i++) {
1377 		free_func_state(state->frame[i]);
1378 		state->frame[i] = NULL;
1379 	}
1380 	clear_jmp_history(state);
1381 	if (free_self)
1382 		kfree(state);
1383 }
1384 
1385 /* copy verifier state from src to dst growing dst stack space
1386  * when necessary to accommodate larger src stack
1387  */
1388 static int copy_func_state(struct bpf_func_state *dst,
1389 			   const struct bpf_func_state *src)
1390 {
1391 	int err;
1392 
1393 	memcpy(dst, src, offsetof(struct bpf_func_state, acquired_refs));
1394 	err = copy_reference_state(dst, src);
1395 	if (err)
1396 		return err;
1397 	return copy_stack_state(dst, src);
1398 }
1399 
1400 static int copy_verifier_state(struct bpf_verifier_state *dst_state,
1401 			       const struct bpf_verifier_state *src)
1402 {
1403 	struct bpf_func_state *dst;
1404 	int i, err;
1405 
1406 	dst_state->jmp_history = copy_array(dst_state->jmp_history, src->jmp_history,
1407 					  src->jmp_history_cnt, sizeof(*dst_state->jmp_history),
1408 					  GFP_USER);
1409 	if (!dst_state->jmp_history)
1410 		return -ENOMEM;
1411 	dst_state->jmp_history_cnt = src->jmp_history_cnt;
1412 
1413 	/* if dst has more stack frames then src frame, free them, this is also
1414 	 * necessary in case of exceptional exits using bpf_throw.
1415 	 */
1416 	for (i = src->curframe + 1; i <= dst_state->curframe; i++) {
1417 		free_func_state(dst_state->frame[i]);
1418 		dst_state->frame[i] = NULL;
1419 	}
1420 	dst_state->speculative = src->speculative;
1421 	dst_state->active_rcu_lock = src->active_rcu_lock;
1422 	dst_state->curframe = src->curframe;
1423 	dst_state->active_lock.ptr = src->active_lock.ptr;
1424 	dst_state->active_lock.id = src->active_lock.id;
1425 	dst_state->branches = src->branches;
1426 	dst_state->parent = src->parent;
1427 	dst_state->first_insn_idx = src->first_insn_idx;
1428 	dst_state->last_insn_idx = src->last_insn_idx;
1429 	dst_state->dfs_depth = src->dfs_depth;
1430 	dst_state->callback_unroll_depth = src->callback_unroll_depth;
1431 	dst_state->used_as_loop_entry = src->used_as_loop_entry;
1432 	for (i = 0; i <= src->curframe; i++) {
1433 		dst = dst_state->frame[i];
1434 		if (!dst) {
1435 			dst = kzalloc(sizeof(*dst), GFP_KERNEL);
1436 			if (!dst)
1437 				return -ENOMEM;
1438 			dst_state->frame[i] = dst;
1439 		}
1440 		err = copy_func_state(dst, src->frame[i]);
1441 		if (err)
1442 			return err;
1443 	}
1444 	return 0;
1445 }
1446 
1447 static u32 state_htab_size(struct bpf_verifier_env *env)
1448 {
1449 	return env->prog->len;
1450 }
1451 
1452 static struct bpf_verifier_state_list **explored_state(struct bpf_verifier_env *env, int idx)
1453 {
1454 	struct bpf_verifier_state *cur = env->cur_state;
1455 	struct bpf_func_state *state = cur->frame[cur->curframe];
1456 
1457 	return &env->explored_states[(idx ^ state->callsite) % state_htab_size(env)];
1458 }
1459 
1460 static bool same_callsites(struct bpf_verifier_state *a, struct bpf_verifier_state *b)
1461 {
1462 	int fr;
1463 
1464 	if (a->curframe != b->curframe)
1465 		return false;
1466 
1467 	for (fr = a->curframe; fr >= 0; fr--)
1468 		if (a->frame[fr]->callsite != b->frame[fr]->callsite)
1469 			return false;
1470 
1471 	return true;
1472 }
1473 
1474 /* Open coded iterators allow back-edges in the state graph in order to
1475  * check unbounded loops that iterators.
1476  *
1477  * In is_state_visited() it is necessary to know if explored states are
1478  * part of some loops in order to decide whether non-exact states
1479  * comparison could be used:
1480  * - non-exact states comparison establishes sub-state relation and uses
1481  *   read and precision marks to do so, these marks are propagated from
1482  *   children states and thus are not guaranteed to be final in a loop;
1483  * - exact states comparison just checks if current and explored states
1484  *   are identical (and thus form a back-edge).
1485  *
1486  * Paper "A New Algorithm for Identifying Loops in Decompilation"
1487  * by Tao Wei, Jian Mao, Wei Zou and Yu Chen [1] presents a convenient
1488  * algorithm for loop structure detection and gives an overview of
1489  * relevant terminology. It also has helpful illustrations.
1490  *
1491  * [1] https://api.semanticscholar.org/CorpusID:15784067
1492  *
1493  * We use a similar algorithm but because loop nested structure is
1494  * irrelevant for verifier ours is significantly simpler and resembles
1495  * strongly connected components algorithm from Sedgewick's textbook.
1496  *
1497  * Define topmost loop entry as a first node of the loop traversed in a
1498  * depth first search starting from initial state. The goal of the loop
1499  * tracking algorithm is to associate topmost loop entries with states
1500  * derived from these entries.
1501  *
1502  * For each step in the DFS states traversal algorithm needs to identify
1503  * the following situations:
1504  *
1505  *          initial                     initial                   initial
1506  *            |                           |                         |
1507  *            V                           V                         V
1508  *           ...                         ...           .---------> hdr
1509  *            |                           |            |            |
1510  *            V                           V            |            V
1511  *           cur                     .-> succ          |    .------...
1512  *            |                      |    |            |    |       |
1513  *            V                      |    V            |    V       V
1514  *           succ                    '-- cur           |   ...     ...
1515  *                                                     |    |       |
1516  *                                                     |    V       V
1517  *                                                     |   succ <- cur
1518  *                                                     |    |
1519  *                                                     |    V
1520  *                                                     |   ...
1521  *                                                     |    |
1522  *                                                     '----'
1523  *
1524  *  (A) successor state of cur   (B) successor state of cur or it's entry
1525  *      not yet traversed            are in current DFS path, thus cur and succ
1526  *                                   are members of the same outermost loop
1527  *
1528  *                      initial                  initial
1529  *                        |                        |
1530  *                        V                        V
1531  *                       ...                      ...
1532  *                        |                        |
1533  *                        V                        V
1534  *                .------...               .------...
1535  *                |       |                |       |
1536  *                V       V                V       V
1537  *           .-> hdr     ...              ...     ...
1538  *           |    |       |                |       |
1539  *           |    V       V                V       V
1540  *           |   succ <- cur              succ <- cur
1541  *           |    |                        |
1542  *           |    V                        V
1543  *           |   ...                      ...
1544  *           |    |                        |
1545  *           '----'                       exit
1546  *
1547  * (C) successor state of cur is a part of some loop but this loop
1548  *     does not include cur or successor state is not in a loop at all.
1549  *
1550  * Algorithm could be described as the following python code:
1551  *
1552  *     traversed = set()   # Set of traversed nodes
1553  *     entries = {}        # Mapping from node to loop entry
1554  *     depths = {}         # Depth level assigned to graph node
1555  *     path = set()        # Current DFS path
1556  *
1557  *     # Find outermost loop entry known for n
1558  *     def get_loop_entry(n):
1559  *         h = entries.get(n, None)
1560  *         while h in entries and entries[h] != h:
1561  *             h = entries[h]
1562  *         return h
1563  *
1564  *     # Update n's loop entry if h's outermost entry comes
1565  *     # before n's outermost entry in current DFS path.
1566  *     def update_loop_entry(n, h):
1567  *         n1 = get_loop_entry(n) or n
1568  *         h1 = get_loop_entry(h) or h
1569  *         if h1 in path and depths[h1] <= depths[n1]:
1570  *             entries[n] = h1
1571  *
1572  *     def dfs(n, depth):
1573  *         traversed.add(n)
1574  *         path.add(n)
1575  *         depths[n] = depth
1576  *         for succ in G.successors(n):
1577  *             if succ not in traversed:
1578  *                 # Case A: explore succ and update cur's loop entry
1579  *                 #         only if succ's entry is in current DFS path.
1580  *                 dfs(succ, depth + 1)
1581  *                 h = get_loop_entry(succ)
1582  *                 update_loop_entry(n, h)
1583  *             else:
1584  *                 # Case B or C depending on `h1 in path` check in update_loop_entry().
1585  *                 update_loop_entry(n, succ)
1586  *         path.remove(n)
1587  *
1588  * To adapt this algorithm for use with verifier:
1589  * - use st->branch == 0 as a signal that DFS of succ had been finished
1590  *   and cur's loop entry has to be updated (case A), handle this in
1591  *   update_branch_counts();
1592  * - use st->branch > 0 as a signal that st is in the current DFS path;
1593  * - handle cases B and C in is_state_visited();
1594  * - update topmost loop entry for intermediate states in get_loop_entry().
1595  */
1596 static struct bpf_verifier_state *get_loop_entry(struct bpf_verifier_state *st)
1597 {
1598 	struct bpf_verifier_state *topmost = st->loop_entry, *old;
1599 
1600 	while (topmost && topmost->loop_entry && topmost != topmost->loop_entry)
1601 		topmost = topmost->loop_entry;
1602 	/* Update loop entries for intermediate states to avoid this
1603 	 * traversal in future get_loop_entry() calls.
1604 	 */
1605 	while (st && st->loop_entry != topmost) {
1606 		old = st->loop_entry;
1607 		st->loop_entry = topmost;
1608 		st = old;
1609 	}
1610 	return topmost;
1611 }
1612 
1613 static void update_loop_entry(struct bpf_verifier_state *cur, struct bpf_verifier_state *hdr)
1614 {
1615 	struct bpf_verifier_state *cur1, *hdr1;
1616 
1617 	cur1 = get_loop_entry(cur) ?: cur;
1618 	hdr1 = get_loop_entry(hdr) ?: hdr;
1619 	/* The head1->branches check decides between cases B and C in
1620 	 * comment for get_loop_entry(). If hdr1->branches == 0 then
1621 	 * head's topmost loop entry is not in current DFS path,
1622 	 * hence 'cur' and 'hdr' are not in the same loop and there is
1623 	 * no need to update cur->loop_entry.
1624 	 */
1625 	if (hdr1->branches && hdr1->dfs_depth <= cur1->dfs_depth) {
1626 		cur->loop_entry = hdr;
1627 		hdr->used_as_loop_entry = true;
1628 	}
1629 }
1630 
1631 static void update_branch_counts(struct bpf_verifier_env *env, struct bpf_verifier_state *st)
1632 {
1633 	while (st) {
1634 		u32 br = --st->branches;
1635 
1636 		/* br == 0 signals that DFS exploration for 'st' is finished,
1637 		 * thus it is necessary to update parent's loop entry if it
1638 		 * turned out that st is a part of some loop.
1639 		 * This is a part of 'case A' in get_loop_entry() comment.
1640 		 */
1641 		if (br == 0 && st->parent && st->loop_entry)
1642 			update_loop_entry(st->parent, st->loop_entry);
1643 
1644 		/* WARN_ON(br > 1) technically makes sense here,
1645 		 * but see comment in push_stack(), hence:
1646 		 */
1647 		WARN_ONCE((int)br < 0,
1648 			  "BUG update_branch_counts:branches_to_explore=%d\n",
1649 			  br);
1650 		if (br)
1651 			break;
1652 		st = st->parent;
1653 	}
1654 }
1655 
1656 static int pop_stack(struct bpf_verifier_env *env, int *prev_insn_idx,
1657 		     int *insn_idx, bool pop_log)
1658 {
1659 	struct bpf_verifier_state *cur = env->cur_state;
1660 	struct bpf_verifier_stack_elem *elem, *head = env->head;
1661 	int err;
1662 
1663 	if (env->head == NULL)
1664 		return -ENOENT;
1665 
1666 	if (cur) {
1667 		err = copy_verifier_state(cur, &head->st);
1668 		if (err)
1669 			return err;
1670 	}
1671 	if (pop_log)
1672 		bpf_vlog_reset(&env->log, head->log_pos);
1673 	if (insn_idx)
1674 		*insn_idx = head->insn_idx;
1675 	if (prev_insn_idx)
1676 		*prev_insn_idx = head->prev_insn_idx;
1677 	elem = head->next;
1678 	free_verifier_state(&head->st, false);
1679 	kfree(head);
1680 	env->head = elem;
1681 	env->stack_size--;
1682 	return 0;
1683 }
1684 
1685 static struct bpf_verifier_state *push_stack(struct bpf_verifier_env *env,
1686 					     int insn_idx, int prev_insn_idx,
1687 					     bool speculative)
1688 {
1689 	struct bpf_verifier_state *cur = env->cur_state;
1690 	struct bpf_verifier_stack_elem *elem;
1691 	int err;
1692 
1693 	elem = kzalloc(sizeof(struct bpf_verifier_stack_elem), GFP_KERNEL);
1694 	if (!elem)
1695 		goto err;
1696 
1697 	elem->insn_idx = insn_idx;
1698 	elem->prev_insn_idx = prev_insn_idx;
1699 	elem->next = env->head;
1700 	elem->log_pos = env->log.end_pos;
1701 	env->head = elem;
1702 	env->stack_size++;
1703 	err = copy_verifier_state(&elem->st, cur);
1704 	if (err)
1705 		goto err;
1706 	elem->st.speculative |= speculative;
1707 	if (env->stack_size > BPF_COMPLEXITY_LIMIT_JMP_SEQ) {
1708 		verbose(env, "The sequence of %d jumps is too complex.\n",
1709 			env->stack_size);
1710 		goto err;
1711 	}
1712 	if (elem->st.parent) {
1713 		++elem->st.parent->branches;
1714 		/* WARN_ON(branches > 2) technically makes sense here,
1715 		 * but
1716 		 * 1. speculative states will bump 'branches' for non-branch
1717 		 * instructions
1718 		 * 2. is_state_visited() heuristics may decide not to create
1719 		 * a new state for a sequence of branches and all such current
1720 		 * and cloned states will be pointing to a single parent state
1721 		 * which might have large 'branches' count.
1722 		 */
1723 	}
1724 	return &elem->st;
1725 err:
1726 	free_verifier_state(env->cur_state, true);
1727 	env->cur_state = NULL;
1728 	/* pop all elements and return */
1729 	while (!pop_stack(env, NULL, NULL, false));
1730 	return NULL;
1731 }
1732 
1733 #define CALLER_SAVED_REGS 6
1734 static const int caller_saved[CALLER_SAVED_REGS] = {
1735 	BPF_REG_0, BPF_REG_1, BPF_REG_2, BPF_REG_3, BPF_REG_4, BPF_REG_5
1736 };
1737 
1738 /* This helper doesn't clear reg->id */
1739 static void ___mark_reg_known(struct bpf_reg_state *reg, u64 imm)
1740 {
1741 	reg->var_off = tnum_const(imm);
1742 	reg->smin_value = (s64)imm;
1743 	reg->smax_value = (s64)imm;
1744 	reg->umin_value = imm;
1745 	reg->umax_value = imm;
1746 
1747 	reg->s32_min_value = (s32)imm;
1748 	reg->s32_max_value = (s32)imm;
1749 	reg->u32_min_value = (u32)imm;
1750 	reg->u32_max_value = (u32)imm;
1751 }
1752 
1753 /* Mark the unknown part of a register (variable offset or scalar value) as
1754  * known to have the value @imm.
1755  */
1756 static void __mark_reg_known(struct bpf_reg_state *reg, u64 imm)
1757 {
1758 	/* Clear off and union(map_ptr, range) */
1759 	memset(((u8 *)reg) + sizeof(reg->type), 0,
1760 	       offsetof(struct bpf_reg_state, var_off) - sizeof(reg->type));
1761 	reg->id = 0;
1762 	reg->ref_obj_id = 0;
1763 	___mark_reg_known(reg, imm);
1764 }
1765 
1766 static void __mark_reg32_known(struct bpf_reg_state *reg, u64 imm)
1767 {
1768 	reg->var_off = tnum_const_subreg(reg->var_off, imm);
1769 	reg->s32_min_value = (s32)imm;
1770 	reg->s32_max_value = (s32)imm;
1771 	reg->u32_min_value = (u32)imm;
1772 	reg->u32_max_value = (u32)imm;
1773 }
1774 
1775 /* Mark the 'variable offset' part of a register as zero.  This should be
1776  * used only on registers holding a pointer type.
1777  */
1778 static void __mark_reg_known_zero(struct bpf_reg_state *reg)
1779 {
1780 	__mark_reg_known(reg, 0);
1781 }
1782 
1783 static void __mark_reg_const_zero(const struct bpf_verifier_env *env, struct bpf_reg_state *reg)
1784 {
1785 	__mark_reg_known(reg, 0);
1786 	reg->type = SCALAR_VALUE;
1787 	/* all scalars are assumed imprecise initially (unless unprivileged,
1788 	 * in which case everything is forced to be precise)
1789 	 */
1790 	reg->precise = !env->bpf_capable;
1791 }
1792 
1793 static void mark_reg_known_zero(struct bpf_verifier_env *env,
1794 				struct bpf_reg_state *regs, u32 regno)
1795 {
1796 	if (WARN_ON(regno >= MAX_BPF_REG)) {
1797 		verbose(env, "mark_reg_known_zero(regs, %u)\n", regno);
1798 		/* Something bad happened, let's kill all regs */
1799 		for (regno = 0; regno < MAX_BPF_REG; regno++)
1800 			__mark_reg_not_init(env, regs + regno);
1801 		return;
1802 	}
1803 	__mark_reg_known_zero(regs + regno);
1804 }
1805 
1806 static void __mark_dynptr_reg(struct bpf_reg_state *reg, enum bpf_dynptr_type type,
1807 			      bool first_slot, int dynptr_id)
1808 {
1809 	/* reg->type has no meaning for STACK_DYNPTR, but when we set reg for
1810 	 * callback arguments, it does need to be CONST_PTR_TO_DYNPTR, so simply
1811 	 * set it unconditionally as it is ignored for STACK_DYNPTR anyway.
1812 	 */
1813 	__mark_reg_known_zero(reg);
1814 	reg->type = CONST_PTR_TO_DYNPTR;
1815 	/* Give each dynptr a unique id to uniquely associate slices to it. */
1816 	reg->id = dynptr_id;
1817 	reg->dynptr.type = type;
1818 	reg->dynptr.first_slot = first_slot;
1819 }
1820 
1821 static void mark_ptr_not_null_reg(struct bpf_reg_state *reg)
1822 {
1823 	if (base_type(reg->type) == PTR_TO_MAP_VALUE) {
1824 		const struct bpf_map *map = reg->map_ptr;
1825 
1826 		if (map->inner_map_meta) {
1827 			reg->type = CONST_PTR_TO_MAP;
1828 			reg->map_ptr = map->inner_map_meta;
1829 			/* transfer reg's id which is unique for every map_lookup_elem
1830 			 * as UID of the inner map.
1831 			 */
1832 			if (btf_record_has_field(map->inner_map_meta->record, BPF_TIMER))
1833 				reg->map_uid = reg->id;
1834 		} else if (map->map_type == BPF_MAP_TYPE_XSKMAP) {
1835 			reg->type = PTR_TO_XDP_SOCK;
1836 		} else if (map->map_type == BPF_MAP_TYPE_SOCKMAP ||
1837 			   map->map_type == BPF_MAP_TYPE_SOCKHASH) {
1838 			reg->type = PTR_TO_SOCKET;
1839 		} else {
1840 			reg->type = PTR_TO_MAP_VALUE;
1841 		}
1842 		return;
1843 	}
1844 
1845 	reg->type &= ~PTR_MAYBE_NULL;
1846 }
1847 
1848 static void mark_reg_graph_node(struct bpf_reg_state *regs, u32 regno,
1849 				struct btf_field_graph_root *ds_head)
1850 {
1851 	__mark_reg_known_zero(&regs[regno]);
1852 	regs[regno].type = PTR_TO_BTF_ID | MEM_ALLOC;
1853 	regs[regno].btf = ds_head->btf;
1854 	regs[regno].btf_id = ds_head->value_btf_id;
1855 	regs[regno].off = ds_head->node_offset;
1856 }
1857 
1858 static bool reg_is_pkt_pointer(const struct bpf_reg_state *reg)
1859 {
1860 	return type_is_pkt_pointer(reg->type);
1861 }
1862 
1863 static bool reg_is_pkt_pointer_any(const struct bpf_reg_state *reg)
1864 {
1865 	return reg_is_pkt_pointer(reg) ||
1866 	       reg->type == PTR_TO_PACKET_END;
1867 }
1868 
1869 static bool reg_is_dynptr_slice_pkt(const struct bpf_reg_state *reg)
1870 {
1871 	return base_type(reg->type) == PTR_TO_MEM &&
1872 		(reg->type & DYNPTR_TYPE_SKB || reg->type & DYNPTR_TYPE_XDP);
1873 }
1874 
1875 /* Unmodified PTR_TO_PACKET[_META,_END] register from ctx access. */
1876 static bool reg_is_init_pkt_pointer(const struct bpf_reg_state *reg,
1877 				    enum bpf_reg_type which)
1878 {
1879 	/* The register can already have a range from prior markings.
1880 	 * This is fine as long as it hasn't been advanced from its
1881 	 * origin.
1882 	 */
1883 	return reg->type == which &&
1884 	       reg->id == 0 &&
1885 	       reg->off == 0 &&
1886 	       tnum_equals_const(reg->var_off, 0);
1887 }
1888 
1889 /* Reset the min/max bounds of a register */
1890 static void __mark_reg_unbounded(struct bpf_reg_state *reg)
1891 {
1892 	reg->smin_value = S64_MIN;
1893 	reg->smax_value = S64_MAX;
1894 	reg->umin_value = 0;
1895 	reg->umax_value = U64_MAX;
1896 
1897 	reg->s32_min_value = S32_MIN;
1898 	reg->s32_max_value = S32_MAX;
1899 	reg->u32_min_value = 0;
1900 	reg->u32_max_value = U32_MAX;
1901 }
1902 
1903 static void __mark_reg64_unbounded(struct bpf_reg_state *reg)
1904 {
1905 	reg->smin_value = S64_MIN;
1906 	reg->smax_value = S64_MAX;
1907 	reg->umin_value = 0;
1908 	reg->umax_value = U64_MAX;
1909 }
1910 
1911 static void __mark_reg32_unbounded(struct bpf_reg_state *reg)
1912 {
1913 	reg->s32_min_value = S32_MIN;
1914 	reg->s32_max_value = S32_MAX;
1915 	reg->u32_min_value = 0;
1916 	reg->u32_max_value = U32_MAX;
1917 }
1918 
1919 static void __update_reg32_bounds(struct bpf_reg_state *reg)
1920 {
1921 	struct tnum var32_off = tnum_subreg(reg->var_off);
1922 
1923 	/* min signed is max(sign bit) | min(other bits) */
1924 	reg->s32_min_value = max_t(s32, reg->s32_min_value,
1925 			var32_off.value | (var32_off.mask & S32_MIN));
1926 	/* max signed is min(sign bit) | max(other bits) */
1927 	reg->s32_max_value = min_t(s32, reg->s32_max_value,
1928 			var32_off.value | (var32_off.mask & S32_MAX));
1929 	reg->u32_min_value = max_t(u32, reg->u32_min_value, (u32)var32_off.value);
1930 	reg->u32_max_value = min(reg->u32_max_value,
1931 				 (u32)(var32_off.value | var32_off.mask));
1932 }
1933 
1934 static void __update_reg64_bounds(struct bpf_reg_state *reg)
1935 {
1936 	/* min signed is max(sign bit) | min(other bits) */
1937 	reg->smin_value = max_t(s64, reg->smin_value,
1938 				reg->var_off.value | (reg->var_off.mask & S64_MIN));
1939 	/* max signed is min(sign bit) | max(other bits) */
1940 	reg->smax_value = min_t(s64, reg->smax_value,
1941 				reg->var_off.value | (reg->var_off.mask & S64_MAX));
1942 	reg->umin_value = max(reg->umin_value, reg->var_off.value);
1943 	reg->umax_value = min(reg->umax_value,
1944 			      reg->var_off.value | reg->var_off.mask);
1945 }
1946 
1947 static void __update_reg_bounds(struct bpf_reg_state *reg)
1948 {
1949 	__update_reg32_bounds(reg);
1950 	__update_reg64_bounds(reg);
1951 }
1952 
1953 /* Uses signed min/max values to inform unsigned, and vice-versa */
1954 static void __reg32_deduce_bounds(struct bpf_reg_state *reg)
1955 {
1956 	/* If upper 32 bits of u64/s64 range don't change, we can use lower 32
1957 	 * bits to improve our u32/s32 boundaries.
1958 	 *
1959 	 * E.g., the case where we have upper 32 bits as zero ([10, 20] in
1960 	 * u64) is pretty trivial, it's obvious that in u32 we'll also have
1961 	 * [10, 20] range. But this property holds for any 64-bit range as
1962 	 * long as upper 32 bits in that entire range of values stay the same.
1963 	 *
1964 	 * E.g., u64 range [0x10000000A, 0x10000000F] ([4294967306, 4294967311]
1965 	 * in decimal) has the same upper 32 bits throughout all the values in
1966 	 * that range. As such, lower 32 bits form a valid [0xA, 0xF] ([10, 15])
1967 	 * range.
1968 	 *
1969 	 * Note also, that [0xA, 0xF] is a valid range both in u32 and in s32,
1970 	 * following the rules outlined below about u64/s64 correspondence
1971 	 * (which equally applies to u32 vs s32 correspondence). In general it
1972 	 * depends on actual hexadecimal values of 32-bit range. They can form
1973 	 * only valid u32, or only valid s32 ranges in some cases.
1974 	 *
1975 	 * So we use all these insights to derive bounds for subregisters here.
1976 	 */
1977 	if ((reg->umin_value >> 32) == (reg->umax_value >> 32)) {
1978 		/* u64 to u32 casting preserves validity of low 32 bits as
1979 		 * a range, if upper 32 bits are the same
1980 		 */
1981 		reg->u32_min_value = max_t(u32, reg->u32_min_value, (u32)reg->umin_value);
1982 		reg->u32_max_value = min_t(u32, reg->u32_max_value, (u32)reg->umax_value);
1983 
1984 		if ((s32)reg->umin_value <= (s32)reg->umax_value) {
1985 			reg->s32_min_value = max_t(s32, reg->s32_min_value, (s32)reg->umin_value);
1986 			reg->s32_max_value = min_t(s32, reg->s32_max_value, (s32)reg->umax_value);
1987 		}
1988 	}
1989 	if ((reg->smin_value >> 32) == (reg->smax_value >> 32)) {
1990 		/* low 32 bits should form a proper u32 range */
1991 		if ((u32)reg->smin_value <= (u32)reg->smax_value) {
1992 			reg->u32_min_value = max_t(u32, reg->u32_min_value, (u32)reg->smin_value);
1993 			reg->u32_max_value = min_t(u32, reg->u32_max_value, (u32)reg->smax_value);
1994 		}
1995 		/* low 32 bits should form a proper s32 range */
1996 		if ((s32)reg->smin_value <= (s32)reg->smax_value) {
1997 			reg->s32_min_value = max_t(s32, reg->s32_min_value, (s32)reg->smin_value);
1998 			reg->s32_max_value = min_t(s32, reg->s32_max_value, (s32)reg->smax_value);
1999 		}
2000 	}
2001 	/* Special case where upper bits form a small sequence of two
2002 	 * sequential numbers (in 32-bit unsigned space, so 0xffffffff to
2003 	 * 0x00000000 is also valid), while lower bits form a proper s32 range
2004 	 * going from negative numbers to positive numbers. E.g., let's say we
2005 	 * have s64 range [-1, 1] ([0xffffffffffffffff, 0x0000000000000001]).
2006 	 * Possible s64 values are {-1, 0, 1} ({0xffffffffffffffff,
2007 	 * 0x0000000000000000, 0x00000000000001}). Ignoring upper 32 bits,
2008 	 * we still get a valid s32 range [-1, 1] ([0xffffffff, 0x00000001]).
2009 	 * Note that it doesn't have to be 0xffffffff going to 0x00000000 in
2010 	 * upper 32 bits. As a random example, s64 range
2011 	 * [0xfffffff0fffffff0; 0xfffffff100000010], forms a valid s32 range
2012 	 * [-16, 16] ([0xfffffff0; 0x00000010]) in its 32 bit subregister.
2013 	 */
2014 	if ((u32)(reg->umin_value >> 32) + 1 == (u32)(reg->umax_value >> 32) &&
2015 	    (s32)reg->umin_value < 0 && (s32)reg->umax_value >= 0) {
2016 		reg->s32_min_value = max_t(s32, reg->s32_min_value, (s32)reg->umin_value);
2017 		reg->s32_max_value = min_t(s32, reg->s32_max_value, (s32)reg->umax_value);
2018 	}
2019 	if ((u32)(reg->smin_value >> 32) + 1 == (u32)(reg->smax_value >> 32) &&
2020 	    (s32)reg->smin_value < 0 && (s32)reg->smax_value >= 0) {
2021 		reg->s32_min_value = max_t(s32, reg->s32_min_value, (s32)reg->smin_value);
2022 		reg->s32_max_value = min_t(s32, reg->s32_max_value, (s32)reg->smax_value);
2023 	}
2024 	/* if u32 range forms a valid s32 range (due to matching sign bit),
2025 	 * try to learn from that
2026 	 */
2027 	if ((s32)reg->u32_min_value <= (s32)reg->u32_max_value) {
2028 		reg->s32_min_value = max_t(s32, reg->s32_min_value, reg->u32_min_value);
2029 		reg->s32_max_value = min_t(s32, reg->s32_max_value, reg->u32_max_value);
2030 	}
2031 	/* If we cannot cross the sign boundary, then signed and unsigned bounds
2032 	 * are the same, so combine.  This works even in the negative case, e.g.
2033 	 * -3 s<= x s<= -1 implies 0xf...fd u<= x u<= 0xf...ff.
2034 	 */
2035 	if ((u32)reg->s32_min_value <= (u32)reg->s32_max_value) {
2036 		reg->u32_min_value = max_t(u32, reg->s32_min_value, reg->u32_min_value);
2037 		reg->u32_max_value = min_t(u32, reg->s32_max_value, reg->u32_max_value);
2038 	}
2039 }
2040 
2041 static void __reg64_deduce_bounds(struct bpf_reg_state *reg)
2042 {
2043 	/* If u64 range forms a valid s64 range (due to matching sign bit),
2044 	 * try to learn from that. Let's do a bit of ASCII art to see when
2045 	 * this is happening. Let's take u64 range first:
2046 	 *
2047 	 * 0             0x7fffffffffffffff 0x8000000000000000        U64_MAX
2048 	 * |-------------------------------|--------------------------------|
2049 	 *
2050 	 * Valid u64 range is formed when umin and umax are anywhere in the
2051 	 * range [0, U64_MAX], and umin <= umax. u64 case is simple and
2052 	 * straightforward. Let's see how s64 range maps onto the same range
2053 	 * of values, annotated below the line for comparison:
2054 	 *
2055 	 * 0             0x7fffffffffffffff 0x8000000000000000        U64_MAX
2056 	 * |-------------------------------|--------------------------------|
2057 	 * 0                        S64_MAX S64_MIN                        -1
2058 	 *
2059 	 * So s64 values basically start in the middle and they are logically
2060 	 * contiguous to the right of it, wrapping around from -1 to 0, and
2061 	 * then finishing as S64_MAX (0x7fffffffffffffff) right before
2062 	 * S64_MIN. We can try drawing the continuity of u64 vs s64 values
2063 	 * more visually as mapped to sign-agnostic range of hex values.
2064 	 *
2065 	 *  u64 start                                               u64 end
2066 	 *  _______________________________________________________________
2067 	 * /                                                               \
2068 	 * 0             0x7fffffffffffffff 0x8000000000000000        U64_MAX
2069 	 * |-------------------------------|--------------------------------|
2070 	 * 0                        S64_MAX S64_MIN                        -1
2071 	 *                                / \
2072 	 * >------------------------------   ------------------------------->
2073 	 * s64 continues...        s64 end   s64 start          s64 "midpoint"
2074 	 *
2075 	 * What this means is that, in general, we can't always derive
2076 	 * something new about u64 from any random s64 range, and vice versa.
2077 	 *
2078 	 * But we can do that in two particular cases. One is when entire
2079 	 * u64/s64 range is *entirely* contained within left half of the above
2080 	 * diagram or when it is *entirely* contained in the right half. I.e.:
2081 	 *
2082 	 * |-------------------------------|--------------------------------|
2083 	 *     ^                   ^            ^                 ^
2084 	 *     A                   B            C                 D
2085 	 *
2086 	 * [A, B] and [C, D] are contained entirely in their respective halves
2087 	 * and form valid contiguous ranges as both u64 and s64 values. [A, B]
2088 	 * will be non-negative both as u64 and s64 (and in fact it will be
2089 	 * identical ranges no matter the signedness). [C, D] treated as s64
2090 	 * will be a range of negative values, while in u64 it will be
2091 	 * non-negative range of values larger than 0x8000000000000000.
2092 	 *
2093 	 * Now, any other range here can't be represented in both u64 and s64
2094 	 * simultaneously. E.g., [A, C], [A, D], [B, C], [B, D] are valid
2095 	 * contiguous u64 ranges, but they are discontinuous in s64. [B, C]
2096 	 * in s64 would be properly presented as [S64_MIN, C] and [B, S64_MAX],
2097 	 * for example. Similarly, valid s64 range [D, A] (going from negative
2098 	 * to positive values), would be two separate [D, U64_MAX] and [0, A]
2099 	 * ranges as u64. Currently reg_state can't represent two segments per
2100 	 * numeric domain, so in such situations we can only derive maximal
2101 	 * possible range ([0, U64_MAX] for u64, and [S64_MIN, S64_MAX] for s64).
2102 	 *
2103 	 * So we use these facts to derive umin/umax from smin/smax and vice
2104 	 * versa only if they stay within the same "half". This is equivalent
2105 	 * to checking sign bit: lower half will have sign bit as zero, upper
2106 	 * half have sign bit 1. Below in code we simplify this by just
2107 	 * casting umin/umax as smin/smax and checking if they form valid
2108 	 * range, and vice versa. Those are equivalent checks.
2109 	 */
2110 	if ((s64)reg->umin_value <= (s64)reg->umax_value) {
2111 		reg->smin_value = max_t(s64, reg->smin_value, reg->umin_value);
2112 		reg->smax_value = min_t(s64, reg->smax_value, reg->umax_value);
2113 	}
2114 	/* If we cannot cross the sign boundary, then signed and unsigned bounds
2115 	 * are the same, so combine.  This works even in the negative case, e.g.
2116 	 * -3 s<= x s<= -1 implies 0xf...fd u<= x u<= 0xf...ff.
2117 	 */
2118 	if ((u64)reg->smin_value <= (u64)reg->smax_value) {
2119 		reg->umin_value = max_t(u64, reg->smin_value, reg->umin_value);
2120 		reg->umax_value = min_t(u64, reg->smax_value, reg->umax_value);
2121 	}
2122 }
2123 
2124 static void __reg_deduce_mixed_bounds(struct bpf_reg_state *reg)
2125 {
2126 	/* Try to tighten 64-bit bounds from 32-bit knowledge, using 32-bit
2127 	 * values on both sides of 64-bit range in hope to have tigher range.
2128 	 * E.g., if r1 is [0x1'00000000, 0x3'80000000], and we learn from
2129 	 * 32-bit signed > 0 operation that s32 bounds are now [1; 0x7fffffff].
2130 	 * With this, we can substitute 1 as low 32-bits of _low_ 64-bit bound
2131 	 * (0x100000000 -> 0x100000001) and 0x7fffffff as low 32-bits of
2132 	 * _high_ 64-bit bound (0x380000000 -> 0x37fffffff) and arrive at a
2133 	 * better overall bounds for r1 as [0x1'000000001; 0x3'7fffffff].
2134 	 * We just need to make sure that derived bounds we are intersecting
2135 	 * with are well-formed ranges in respecitve s64 or u64 domain, just
2136 	 * like we do with similar kinds of 32-to-64 or 64-to-32 adjustments.
2137 	 */
2138 	__u64 new_umin, new_umax;
2139 	__s64 new_smin, new_smax;
2140 
2141 	/* u32 -> u64 tightening, it's always well-formed */
2142 	new_umin = (reg->umin_value & ~0xffffffffULL) | reg->u32_min_value;
2143 	new_umax = (reg->umax_value & ~0xffffffffULL) | reg->u32_max_value;
2144 	reg->umin_value = max_t(u64, reg->umin_value, new_umin);
2145 	reg->umax_value = min_t(u64, reg->umax_value, new_umax);
2146 	/* u32 -> s64 tightening, u32 range embedded into s64 preserves range validity */
2147 	new_smin = (reg->smin_value & ~0xffffffffULL) | reg->u32_min_value;
2148 	new_smax = (reg->smax_value & ~0xffffffffULL) | reg->u32_max_value;
2149 	reg->smin_value = max_t(s64, reg->smin_value, new_smin);
2150 	reg->smax_value = min_t(s64, reg->smax_value, new_smax);
2151 
2152 	/* if s32 can be treated as valid u32 range, we can use it as well */
2153 	if ((u32)reg->s32_min_value <= (u32)reg->s32_max_value) {
2154 		/* s32 -> u64 tightening */
2155 		new_umin = (reg->umin_value & ~0xffffffffULL) | (u32)reg->s32_min_value;
2156 		new_umax = (reg->umax_value & ~0xffffffffULL) | (u32)reg->s32_max_value;
2157 		reg->umin_value = max_t(u64, reg->umin_value, new_umin);
2158 		reg->umax_value = min_t(u64, reg->umax_value, new_umax);
2159 		/* s32 -> s64 tightening */
2160 		new_smin = (reg->smin_value & ~0xffffffffULL) | (u32)reg->s32_min_value;
2161 		new_smax = (reg->smax_value & ~0xffffffffULL) | (u32)reg->s32_max_value;
2162 		reg->smin_value = max_t(s64, reg->smin_value, new_smin);
2163 		reg->smax_value = min_t(s64, reg->smax_value, new_smax);
2164 	}
2165 }
2166 
2167 static void __reg_deduce_bounds(struct bpf_reg_state *reg)
2168 {
2169 	__reg32_deduce_bounds(reg);
2170 	__reg64_deduce_bounds(reg);
2171 	__reg_deduce_mixed_bounds(reg);
2172 }
2173 
2174 /* Attempts to improve var_off based on unsigned min/max information */
2175 static void __reg_bound_offset(struct bpf_reg_state *reg)
2176 {
2177 	struct tnum var64_off = tnum_intersect(reg->var_off,
2178 					       tnum_range(reg->umin_value,
2179 							  reg->umax_value));
2180 	struct tnum var32_off = tnum_intersect(tnum_subreg(var64_off),
2181 					       tnum_range(reg->u32_min_value,
2182 							  reg->u32_max_value));
2183 
2184 	reg->var_off = tnum_or(tnum_clear_subreg(var64_off), var32_off);
2185 }
2186 
2187 static void reg_bounds_sync(struct bpf_reg_state *reg)
2188 {
2189 	/* We might have learned new bounds from the var_off. */
2190 	__update_reg_bounds(reg);
2191 	/* We might have learned something about the sign bit. */
2192 	__reg_deduce_bounds(reg);
2193 	__reg_deduce_bounds(reg);
2194 	/* We might have learned some bits from the bounds. */
2195 	__reg_bound_offset(reg);
2196 	/* Intersecting with the old var_off might have improved our bounds
2197 	 * slightly, e.g. if umax was 0x7f...f and var_off was (0; 0xf...fc),
2198 	 * then new var_off is (0; 0x7f...fc) which improves our umax.
2199 	 */
2200 	__update_reg_bounds(reg);
2201 }
2202 
2203 static int reg_bounds_sanity_check(struct bpf_verifier_env *env,
2204 				   struct bpf_reg_state *reg, const char *ctx)
2205 {
2206 	const char *msg;
2207 
2208 	if (reg->umin_value > reg->umax_value ||
2209 	    reg->smin_value > reg->smax_value ||
2210 	    reg->u32_min_value > reg->u32_max_value ||
2211 	    reg->s32_min_value > reg->s32_max_value) {
2212 		    msg = "range bounds violation";
2213 		    goto out;
2214 	}
2215 
2216 	if (tnum_is_const(reg->var_off)) {
2217 		u64 uval = reg->var_off.value;
2218 		s64 sval = (s64)uval;
2219 
2220 		if (reg->umin_value != uval || reg->umax_value != uval ||
2221 		    reg->smin_value != sval || reg->smax_value != sval) {
2222 			msg = "const tnum out of sync with range bounds";
2223 			goto out;
2224 		}
2225 	}
2226 
2227 	if (tnum_subreg_is_const(reg->var_off)) {
2228 		u32 uval32 = tnum_subreg(reg->var_off).value;
2229 		s32 sval32 = (s32)uval32;
2230 
2231 		if (reg->u32_min_value != uval32 || reg->u32_max_value != uval32 ||
2232 		    reg->s32_min_value != sval32 || reg->s32_max_value != sval32) {
2233 			msg = "const subreg tnum out of sync with range bounds";
2234 			goto out;
2235 		}
2236 	}
2237 
2238 	return 0;
2239 out:
2240 	verbose(env, "REG INVARIANTS VIOLATION (%s): %s u64=[%#llx, %#llx] "
2241 		"s64=[%#llx, %#llx] u32=[%#x, %#x] s32=[%#x, %#x] var_off=(%#llx, %#llx)\n",
2242 		ctx, msg, reg->umin_value, reg->umax_value,
2243 		reg->smin_value, reg->smax_value,
2244 		reg->u32_min_value, reg->u32_max_value,
2245 		reg->s32_min_value, reg->s32_max_value,
2246 		reg->var_off.value, reg->var_off.mask);
2247 	if (env->test_reg_invariants)
2248 		return -EFAULT;
2249 	__mark_reg_unbounded(reg);
2250 	return 0;
2251 }
2252 
2253 static bool __reg32_bound_s64(s32 a)
2254 {
2255 	return a >= 0 && a <= S32_MAX;
2256 }
2257 
2258 static void __reg_assign_32_into_64(struct bpf_reg_state *reg)
2259 {
2260 	reg->umin_value = reg->u32_min_value;
2261 	reg->umax_value = reg->u32_max_value;
2262 
2263 	/* Attempt to pull 32-bit signed bounds into 64-bit bounds but must
2264 	 * be positive otherwise set to worse case bounds and refine later
2265 	 * from tnum.
2266 	 */
2267 	if (__reg32_bound_s64(reg->s32_min_value) &&
2268 	    __reg32_bound_s64(reg->s32_max_value)) {
2269 		reg->smin_value = reg->s32_min_value;
2270 		reg->smax_value = reg->s32_max_value;
2271 	} else {
2272 		reg->smin_value = 0;
2273 		reg->smax_value = U32_MAX;
2274 	}
2275 }
2276 
2277 /* Mark a register as having a completely unknown (scalar) value. */
2278 static void __mark_reg_unknown_imprecise(struct bpf_reg_state *reg)
2279 {
2280 	/*
2281 	 * Clear type, off, and union(map_ptr, range) and
2282 	 * padding between 'type' and union
2283 	 */
2284 	memset(reg, 0, offsetof(struct bpf_reg_state, var_off));
2285 	reg->type = SCALAR_VALUE;
2286 	reg->id = 0;
2287 	reg->ref_obj_id = 0;
2288 	reg->var_off = tnum_unknown;
2289 	reg->frameno = 0;
2290 	reg->precise = false;
2291 	__mark_reg_unbounded(reg);
2292 }
2293 
2294 /* Mark a register as having a completely unknown (scalar) value,
2295  * initialize .precise as true when not bpf capable.
2296  */
2297 static void __mark_reg_unknown(const struct bpf_verifier_env *env,
2298 			       struct bpf_reg_state *reg)
2299 {
2300 	__mark_reg_unknown_imprecise(reg);
2301 	reg->precise = !env->bpf_capable;
2302 }
2303 
2304 static void mark_reg_unknown(struct bpf_verifier_env *env,
2305 			     struct bpf_reg_state *regs, u32 regno)
2306 {
2307 	if (WARN_ON(regno >= MAX_BPF_REG)) {
2308 		verbose(env, "mark_reg_unknown(regs, %u)\n", regno);
2309 		/* Something bad happened, let's kill all regs except FP */
2310 		for (regno = 0; regno < BPF_REG_FP; regno++)
2311 			__mark_reg_not_init(env, regs + regno);
2312 		return;
2313 	}
2314 	__mark_reg_unknown(env, regs + regno);
2315 }
2316 
2317 static void __mark_reg_not_init(const struct bpf_verifier_env *env,
2318 				struct bpf_reg_state *reg)
2319 {
2320 	__mark_reg_unknown(env, reg);
2321 	reg->type = NOT_INIT;
2322 }
2323 
2324 static void mark_reg_not_init(struct bpf_verifier_env *env,
2325 			      struct bpf_reg_state *regs, u32 regno)
2326 {
2327 	if (WARN_ON(regno >= MAX_BPF_REG)) {
2328 		verbose(env, "mark_reg_not_init(regs, %u)\n", regno);
2329 		/* Something bad happened, let's kill all regs except FP */
2330 		for (regno = 0; regno < BPF_REG_FP; regno++)
2331 			__mark_reg_not_init(env, regs + regno);
2332 		return;
2333 	}
2334 	__mark_reg_not_init(env, regs + regno);
2335 }
2336 
2337 static void mark_btf_ld_reg(struct bpf_verifier_env *env,
2338 			    struct bpf_reg_state *regs, u32 regno,
2339 			    enum bpf_reg_type reg_type,
2340 			    struct btf *btf, u32 btf_id,
2341 			    enum bpf_type_flag flag)
2342 {
2343 	if (reg_type == SCALAR_VALUE) {
2344 		mark_reg_unknown(env, regs, regno);
2345 		return;
2346 	}
2347 	mark_reg_known_zero(env, regs, regno);
2348 	regs[regno].type = PTR_TO_BTF_ID | flag;
2349 	regs[regno].btf = btf;
2350 	regs[regno].btf_id = btf_id;
2351 }
2352 
2353 #define DEF_NOT_SUBREG	(0)
2354 static void init_reg_state(struct bpf_verifier_env *env,
2355 			   struct bpf_func_state *state)
2356 {
2357 	struct bpf_reg_state *regs = state->regs;
2358 	int i;
2359 
2360 	for (i = 0; i < MAX_BPF_REG; i++) {
2361 		mark_reg_not_init(env, regs, i);
2362 		regs[i].live = REG_LIVE_NONE;
2363 		regs[i].parent = NULL;
2364 		regs[i].subreg_def = DEF_NOT_SUBREG;
2365 	}
2366 
2367 	/* frame pointer */
2368 	regs[BPF_REG_FP].type = PTR_TO_STACK;
2369 	mark_reg_known_zero(env, regs, BPF_REG_FP);
2370 	regs[BPF_REG_FP].frameno = state->frameno;
2371 }
2372 
2373 static struct bpf_retval_range retval_range(s32 minval, s32 maxval)
2374 {
2375 	return (struct bpf_retval_range){ minval, maxval };
2376 }
2377 
2378 #define BPF_MAIN_FUNC (-1)
2379 static void init_func_state(struct bpf_verifier_env *env,
2380 			    struct bpf_func_state *state,
2381 			    int callsite, int frameno, int subprogno)
2382 {
2383 	state->callsite = callsite;
2384 	state->frameno = frameno;
2385 	state->subprogno = subprogno;
2386 	state->callback_ret_range = retval_range(0, 0);
2387 	init_reg_state(env, state);
2388 	mark_verifier_state_scratched(env);
2389 }
2390 
2391 /* Similar to push_stack(), but for async callbacks */
2392 static struct bpf_verifier_state *push_async_cb(struct bpf_verifier_env *env,
2393 						int insn_idx, int prev_insn_idx,
2394 						int subprog)
2395 {
2396 	struct bpf_verifier_stack_elem *elem;
2397 	struct bpf_func_state *frame;
2398 
2399 	elem = kzalloc(sizeof(struct bpf_verifier_stack_elem), GFP_KERNEL);
2400 	if (!elem)
2401 		goto err;
2402 
2403 	elem->insn_idx = insn_idx;
2404 	elem->prev_insn_idx = prev_insn_idx;
2405 	elem->next = env->head;
2406 	elem->log_pos = env->log.end_pos;
2407 	env->head = elem;
2408 	env->stack_size++;
2409 	if (env->stack_size > BPF_COMPLEXITY_LIMIT_JMP_SEQ) {
2410 		verbose(env,
2411 			"The sequence of %d jumps is too complex for async cb.\n",
2412 			env->stack_size);
2413 		goto err;
2414 	}
2415 	/* Unlike push_stack() do not copy_verifier_state().
2416 	 * The caller state doesn't matter.
2417 	 * This is async callback. It starts in a fresh stack.
2418 	 * Initialize it similar to do_check_common().
2419 	 */
2420 	elem->st.branches = 1;
2421 	frame = kzalloc(sizeof(*frame), GFP_KERNEL);
2422 	if (!frame)
2423 		goto err;
2424 	init_func_state(env, frame,
2425 			BPF_MAIN_FUNC /* callsite */,
2426 			0 /* frameno within this callchain */,
2427 			subprog /* subprog number within this prog */);
2428 	elem->st.frame[0] = frame;
2429 	return &elem->st;
2430 err:
2431 	free_verifier_state(env->cur_state, true);
2432 	env->cur_state = NULL;
2433 	/* pop all elements and return */
2434 	while (!pop_stack(env, NULL, NULL, false));
2435 	return NULL;
2436 }
2437 
2438 
2439 enum reg_arg_type {
2440 	SRC_OP,		/* register is used as source operand */
2441 	DST_OP,		/* register is used as destination operand */
2442 	DST_OP_NO_MARK	/* same as above, check only, don't mark */
2443 };
2444 
2445 static int cmp_subprogs(const void *a, const void *b)
2446 {
2447 	return ((struct bpf_subprog_info *)a)->start -
2448 	       ((struct bpf_subprog_info *)b)->start;
2449 }
2450 
2451 static int find_subprog(struct bpf_verifier_env *env, int off)
2452 {
2453 	struct bpf_subprog_info *p;
2454 
2455 	p = bsearch(&off, env->subprog_info, env->subprog_cnt,
2456 		    sizeof(env->subprog_info[0]), cmp_subprogs);
2457 	if (!p)
2458 		return -ENOENT;
2459 	return p - env->subprog_info;
2460 
2461 }
2462 
2463 static int add_subprog(struct bpf_verifier_env *env, int off)
2464 {
2465 	int insn_cnt = env->prog->len;
2466 	int ret;
2467 
2468 	if (off >= insn_cnt || off < 0) {
2469 		verbose(env, "call to invalid destination\n");
2470 		return -EINVAL;
2471 	}
2472 	ret = find_subprog(env, off);
2473 	if (ret >= 0)
2474 		return ret;
2475 	if (env->subprog_cnt >= BPF_MAX_SUBPROGS) {
2476 		verbose(env, "too many subprograms\n");
2477 		return -E2BIG;
2478 	}
2479 	/* determine subprog starts. The end is one before the next starts */
2480 	env->subprog_info[env->subprog_cnt++].start = off;
2481 	sort(env->subprog_info, env->subprog_cnt,
2482 	     sizeof(env->subprog_info[0]), cmp_subprogs, NULL);
2483 	return env->subprog_cnt - 1;
2484 }
2485 
2486 static int bpf_find_exception_callback_insn_off(struct bpf_verifier_env *env)
2487 {
2488 	struct bpf_prog_aux *aux = env->prog->aux;
2489 	struct btf *btf = aux->btf;
2490 	const struct btf_type *t;
2491 	u32 main_btf_id, id;
2492 	const char *name;
2493 	int ret, i;
2494 
2495 	/* Non-zero func_info_cnt implies valid btf */
2496 	if (!aux->func_info_cnt)
2497 		return 0;
2498 	main_btf_id = aux->func_info[0].type_id;
2499 
2500 	t = btf_type_by_id(btf, main_btf_id);
2501 	if (!t) {
2502 		verbose(env, "invalid btf id for main subprog in func_info\n");
2503 		return -EINVAL;
2504 	}
2505 
2506 	name = btf_find_decl_tag_value(btf, t, -1, "exception_callback:");
2507 	if (IS_ERR(name)) {
2508 		ret = PTR_ERR(name);
2509 		/* If there is no tag present, there is no exception callback */
2510 		if (ret == -ENOENT)
2511 			ret = 0;
2512 		else if (ret == -EEXIST)
2513 			verbose(env, "multiple exception callback tags for main subprog\n");
2514 		return ret;
2515 	}
2516 
2517 	ret = btf_find_by_name_kind(btf, name, BTF_KIND_FUNC);
2518 	if (ret < 0) {
2519 		verbose(env, "exception callback '%s' could not be found in BTF\n", name);
2520 		return ret;
2521 	}
2522 	id = ret;
2523 	t = btf_type_by_id(btf, id);
2524 	if (btf_func_linkage(t) != BTF_FUNC_GLOBAL) {
2525 		verbose(env, "exception callback '%s' must have global linkage\n", name);
2526 		return -EINVAL;
2527 	}
2528 	ret = 0;
2529 	for (i = 0; i < aux->func_info_cnt; i++) {
2530 		if (aux->func_info[i].type_id != id)
2531 			continue;
2532 		ret = aux->func_info[i].insn_off;
2533 		/* Further func_info and subprog checks will also happen
2534 		 * later, so assume this is the right insn_off for now.
2535 		 */
2536 		if (!ret) {
2537 			verbose(env, "invalid exception callback insn_off in func_info: 0\n");
2538 			ret = -EINVAL;
2539 		}
2540 	}
2541 	if (!ret) {
2542 		verbose(env, "exception callback type id not found in func_info\n");
2543 		ret = -EINVAL;
2544 	}
2545 	return ret;
2546 }
2547 
2548 #define MAX_KFUNC_DESCS 256
2549 #define MAX_KFUNC_BTFS	256
2550 
2551 struct bpf_kfunc_desc {
2552 	struct btf_func_model func_model;
2553 	u32 func_id;
2554 	s32 imm;
2555 	u16 offset;
2556 	unsigned long addr;
2557 };
2558 
2559 struct bpf_kfunc_btf {
2560 	struct btf *btf;
2561 	struct module *module;
2562 	u16 offset;
2563 };
2564 
2565 struct bpf_kfunc_desc_tab {
2566 	/* Sorted by func_id (BTF ID) and offset (fd_array offset) during
2567 	 * verification. JITs do lookups by bpf_insn, where func_id may not be
2568 	 * available, therefore at the end of verification do_misc_fixups()
2569 	 * sorts this by imm and offset.
2570 	 */
2571 	struct bpf_kfunc_desc descs[MAX_KFUNC_DESCS];
2572 	u32 nr_descs;
2573 };
2574 
2575 struct bpf_kfunc_btf_tab {
2576 	struct bpf_kfunc_btf descs[MAX_KFUNC_BTFS];
2577 	u32 nr_descs;
2578 };
2579 
2580 static int kfunc_desc_cmp_by_id_off(const void *a, const void *b)
2581 {
2582 	const struct bpf_kfunc_desc *d0 = a;
2583 	const struct bpf_kfunc_desc *d1 = b;
2584 
2585 	/* func_id is not greater than BTF_MAX_TYPE */
2586 	return d0->func_id - d1->func_id ?: d0->offset - d1->offset;
2587 }
2588 
2589 static int kfunc_btf_cmp_by_off(const void *a, const void *b)
2590 {
2591 	const struct bpf_kfunc_btf *d0 = a;
2592 	const struct bpf_kfunc_btf *d1 = b;
2593 
2594 	return d0->offset - d1->offset;
2595 }
2596 
2597 static const struct bpf_kfunc_desc *
2598 find_kfunc_desc(const struct bpf_prog *prog, u32 func_id, u16 offset)
2599 {
2600 	struct bpf_kfunc_desc desc = {
2601 		.func_id = func_id,
2602 		.offset = offset,
2603 	};
2604 	struct bpf_kfunc_desc_tab *tab;
2605 
2606 	tab = prog->aux->kfunc_tab;
2607 	return bsearch(&desc, tab->descs, tab->nr_descs,
2608 		       sizeof(tab->descs[0]), kfunc_desc_cmp_by_id_off);
2609 }
2610 
2611 int bpf_get_kfunc_addr(const struct bpf_prog *prog, u32 func_id,
2612 		       u16 btf_fd_idx, u8 **func_addr)
2613 {
2614 	const struct bpf_kfunc_desc *desc;
2615 
2616 	desc = find_kfunc_desc(prog, func_id, btf_fd_idx);
2617 	if (!desc)
2618 		return -EFAULT;
2619 
2620 	*func_addr = (u8 *)desc->addr;
2621 	return 0;
2622 }
2623 
2624 static struct btf *__find_kfunc_desc_btf(struct bpf_verifier_env *env,
2625 					 s16 offset)
2626 {
2627 	struct bpf_kfunc_btf kf_btf = { .offset = offset };
2628 	struct bpf_kfunc_btf_tab *tab;
2629 	struct bpf_kfunc_btf *b;
2630 	struct module *mod;
2631 	struct btf *btf;
2632 	int btf_fd;
2633 
2634 	tab = env->prog->aux->kfunc_btf_tab;
2635 	b = bsearch(&kf_btf, tab->descs, tab->nr_descs,
2636 		    sizeof(tab->descs[0]), kfunc_btf_cmp_by_off);
2637 	if (!b) {
2638 		if (tab->nr_descs == MAX_KFUNC_BTFS) {
2639 			verbose(env, "too many different module BTFs\n");
2640 			return ERR_PTR(-E2BIG);
2641 		}
2642 
2643 		if (bpfptr_is_null(env->fd_array)) {
2644 			verbose(env, "kfunc offset > 0 without fd_array is invalid\n");
2645 			return ERR_PTR(-EPROTO);
2646 		}
2647 
2648 		if (copy_from_bpfptr_offset(&btf_fd, env->fd_array,
2649 					    offset * sizeof(btf_fd),
2650 					    sizeof(btf_fd)))
2651 			return ERR_PTR(-EFAULT);
2652 
2653 		btf = btf_get_by_fd(btf_fd);
2654 		if (IS_ERR(btf)) {
2655 			verbose(env, "invalid module BTF fd specified\n");
2656 			return btf;
2657 		}
2658 
2659 		if (!btf_is_module(btf)) {
2660 			verbose(env, "BTF fd for kfunc is not a module BTF\n");
2661 			btf_put(btf);
2662 			return ERR_PTR(-EINVAL);
2663 		}
2664 
2665 		mod = btf_try_get_module(btf);
2666 		if (!mod) {
2667 			btf_put(btf);
2668 			return ERR_PTR(-ENXIO);
2669 		}
2670 
2671 		b = &tab->descs[tab->nr_descs++];
2672 		b->btf = btf;
2673 		b->module = mod;
2674 		b->offset = offset;
2675 
2676 		sort(tab->descs, tab->nr_descs, sizeof(tab->descs[0]),
2677 		     kfunc_btf_cmp_by_off, NULL);
2678 	}
2679 	return b->btf;
2680 }
2681 
2682 void bpf_free_kfunc_btf_tab(struct bpf_kfunc_btf_tab *tab)
2683 {
2684 	if (!tab)
2685 		return;
2686 
2687 	while (tab->nr_descs--) {
2688 		module_put(tab->descs[tab->nr_descs].module);
2689 		btf_put(tab->descs[tab->nr_descs].btf);
2690 	}
2691 	kfree(tab);
2692 }
2693 
2694 static struct btf *find_kfunc_desc_btf(struct bpf_verifier_env *env, s16 offset)
2695 {
2696 	if (offset) {
2697 		if (offset < 0) {
2698 			/* In the future, this can be allowed to increase limit
2699 			 * of fd index into fd_array, interpreted as u16.
2700 			 */
2701 			verbose(env, "negative offset disallowed for kernel module function call\n");
2702 			return ERR_PTR(-EINVAL);
2703 		}
2704 
2705 		return __find_kfunc_desc_btf(env, offset);
2706 	}
2707 	return btf_vmlinux ?: ERR_PTR(-ENOENT);
2708 }
2709 
2710 static int add_kfunc_call(struct bpf_verifier_env *env, u32 func_id, s16 offset)
2711 {
2712 	const struct btf_type *func, *func_proto;
2713 	struct bpf_kfunc_btf_tab *btf_tab;
2714 	struct bpf_kfunc_desc_tab *tab;
2715 	struct bpf_prog_aux *prog_aux;
2716 	struct bpf_kfunc_desc *desc;
2717 	const char *func_name;
2718 	struct btf *desc_btf;
2719 	unsigned long call_imm;
2720 	unsigned long addr;
2721 	int err;
2722 
2723 	prog_aux = env->prog->aux;
2724 	tab = prog_aux->kfunc_tab;
2725 	btf_tab = prog_aux->kfunc_btf_tab;
2726 	if (!tab) {
2727 		if (!btf_vmlinux) {
2728 			verbose(env, "calling kernel function is not supported without CONFIG_DEBUG_INFO_BTF\n");
2729 			return -ENOTSUPP;
2730 		}
2731 
2732 		if (!env->prog->jit_requested) {
2733 			verbose(env, "JIT is required for calling kernel function\n");
2734 			return -ENOTSUPP;
2735 		}
2736 
2737 		if (!bpf_jit_supports_kfunc_call()) {
2738 			verbose(env, "JIT does not support calling kernel function\n");
2739 			return -ENOTSUPP;
2740 		}
2741 
2742 		if (!env->prog->gpl_compatible) {
2743 			verbose(env, "cannot call kernel function from non-GPL compatible program\n");
2744 			return -EINVAL;
2745 		}
2746 
2747 		tab = kzalloc(sizeof(*tab), GFP_KERNEL);
2748 		if (!tab)
2749 			return -ENOMEM;
2750 		prog_aux->kfunc_tab = tab;
2751 	}
2752 
2753 	/* func_id == 0 is always invalid, but instead of returning an error, be
2754 	 * conservative and wait until the code elimination pass before returning
2755 	 * error, so that invalid calls that get pruned out can be in BPF programs
2756 	 * loaded from userspace.  It is also required that offset be untouched
2757 	 * for such calls.
2758 	 */
2759 	if (!func_id && !offset)
2760 		return 0;
2761 
2762 	if (!btf_tab && offset) {
2763 		btf_tab = kzalloc(sizeof(*btf_tab), GFP_KERNEL);
2764 		if (!btf_tab)
2765 			return -ENOMEM;
2766 		prog_aux->kfunc_btf_tab = btf_tab;
2767 	}
2768 
2769 	desc_btf = find_kfunc_desc_btf(env, offset);
2770 	if (IS_ERR(desc_btf)) {
2771 		verbose(env, "failed to find BTF for kernel function\n");
2772 		return PTR_ERR(desc_btf);
2773 	}
2774 
2775 	if (find_kfunc_desc(env->prog, func_id, offset))
2776 		return 0;
2777 
2778 	if (tab->nr_descs == MAX_KFUNC_DESCS) {
2779 		verbose(env, "too many different kernel function calls\n");
2780 		return -E2BIG;
2781 	}
2782 
2783 	func = btf_type_by_id(desc_btf, func_id);
2784 	if (!func || !btf_type_is_func(func)) {
2785 		verbose(env, "kernel btf_id %u is not a function\n",
2786 			func_id);
2787 		return -EINVAL;
2788 	}
2789 	func_proto = btf_type_by_id(desc_btf, func->type);
2790 	if (!func_proto || !btf_type_is_func_proto(func_proto)) {
2791 		verbose(env, "kernel function btf_id %u does not have a valid func_proto\n",
2792 			func_id);
2793 		return -EINVAL;
2794 	}
2795 
2796 	func_name = btf_name_by_offset(desc_btf, func->name_off);
2797 	addr = kallsyms_lookup_name(func_name);
2798 	if (!addr) {
2799 		verbose(env, "cannot find address for kernel function %s\n",
2800 			func_name);
2801 		return -EINVAL;
2802 	}
2803 	specialize_kfunc(env, func_id, offset, &addr);
2804 
2805 	if (bpf_jit_supports_far_kfunc_call()) {
2806 		call_imm = func_id;
2807 	} else {
2808 		call_imm = BPF_CALL_IMM(addr);
2809 		/* Check whether the relative offset overflows desc->imm */
2810 		if ((unsigned long)(s32)call_imm != call_imm) {
2811 			verbose(env, "address of kernel function %s is out of range\n",
2812 				func_name);
2813 			return -EINVAL;
2814 		}
2815 	}
2816 
2817 	if (bpf_dev_bound_kfunc_id(func_id)) {
2818 		err = bpf_dev_bound_kfunc_check(&env->log, prog_aux);
2819 		if (err)
2820 			return err;
2821 	}
2822 
2823 	desc = &tab->descs[tab->nr_descs++];
2824 	desc->func_id = func_id;
2825 	desc->imm = call_imm;
2826 	desc->offset = offset;
2827 	desc->addr = addr;
2828 	err = btf_distill_func_proto(&env->log, desc_btf,
2829 				     func_proto, func_name,
2830 				     &desc->func_model);
2831 	if (!err)
2832 		sort(tab->descs, tab->nr_descs, sizeof(tab->descs[0]),
2833 		     kfunc_desc_cmp_by_id_off, NULL);
2834 	return err;
2835 }
2836 
2837 static int kfunc_desc_cmp_by_imm_off(const void *a, const void *b)
2838 {
2839 	const struct bpf_kfunc_desc *d0 = a;
2840 	const struct bpf_kfunc_desc *d1 = b;
2841 
2842 	if (d0->imm != d1->imm)
2843 		return d0->imm < d1->imm ? -1 : 1;
2844 	if (d0->offset != d1->offset)
2845 		return d0->offset < d1->offset ? -1 : 1;
2846 	return 0;
2847 }
2848 
2849 static void sort_kfunc_descs_by_imm_off(struct bpf_prog *prog)
2850 {
2851 	struct bpf_kfunc_desc_tab *tab;
2852 
2853 	tab = prog->aux->kfunc_tab;
2854 	if (!tab)
2855 		return;
2856 
2857 	sort(tab->descs, tab->nr_descs, sizeof(tab->descs[0]),
2858 	     kfunc_desc_cmp_by_imm_off, NULL);
2859 }
2860 
2861 bool bpf_prog_has_kfunc_call(const struct bpf_prog *prog)
2862 {
2863 	return !!prog->aux->kfunc_tab;
2864 }
2865 
2866 const struct btf_func_model *
2867 bpf_jit_find_kfunc_model(const struct bpf_prog *prog,
2868 			 const struct bpf_insn *insn)
2869 {
2870 	const struct bpf_kfunc_desc desc = {
2871 		.imm = insn->imm,
2872 		.offset = insn->off,
2873 	};
2874 	const struct bpf_kfunc_desc *res;
2875 	struct bpf_kfunc_desc_tab *tab;
2876 
2877 	tab = prog->aux->kfunc_tab;
2878 	res = bsearch(&desc, tab->descs, tab->nr_descs,
2879 		      sizeof(tab->descs[0]), kfunc_desc_cmp_by_imm_off);
2880 
2881 	return res ? &res->func_model : NULL;
2882 }
2883 
2884 static int add_subprog_and_kfunc(struct bpf_verifier_env *env)
2885 {
2886 	struct bpf_subprog_info *subprog = env->subprog_info;
2887 	int i, ret, insn_cnt = env->prog->len, ex_cb_insn;
2888 	struct bpf_insn *insn = env->prog->insnsi;
2889 
2890 	/* Add entry function. */
2891 	ret = add_subprog(env, 0);
2892 	if (ret)
2893 		return ret;
2894 
2895 	for (i = 0; i < insn_cnt; i++, insn++) {
2896 		if (!bpf_pseudo_func(insn) && !bpf_pseudo_call(insn) &&
2897 		    !bpf_pseudo_kfunc_call(insn))
2898 			continue;
2899 
2900 		if (!env->bpf_capable) {
2901 			verbose(env, "loading/calling other bpf or kernel functions are allowed for CAP_BPF and CAP_SYS_ADMIN\n");
2902 			return -EPERM;
2903 		}
2904 
2905 		if (bpf_pseudo_func(insn) || bpf_pseudo_call(insn))
2906 			ret = add_subprog(env, i + insn->imm + 1);
2907 		else
2908 			ret = add_kfunc_call(env, insn->imm, insn->off);
2909 
2910 		if (ret < 0)
2911 			return ret;
2912 	}
2913 
2914 	ret = bpf_find_exception_callback_insn_off(env);
2915 	if (ret < 0)
2916 		return ret;
2917 	ex_cb_insn = ret;
2918 
2919 	/* If ex_cb_insn > 0, this means that the main program has a subprog
2920 	 * marked using BTF decl tag to serve as the exception callback.
2921 	 */
2922 	if (ex_cb_insn) {
2923 		ret = add_subprog(env, ex_cb_insn);
2924 		if (ret < 0)
2925 			return ret;
2926 		for (i = 1; i < env->subprog_cnt; i++) {
2927 			if (env->subprog_info[i].start != ex_cb_insn)
2928 				continue;
2929 			env->exception_callback_subprog = i;
2930 			mark_subprog_exc_cb(env, i);
2931 			break;
2932 		}
2933 	}
2934 
2935 	/* Add a fake 'exit' subprog which could simplify subprog iteration
2936 	 * logic. 'subprog_cnt' should not be increased.
2937 	 */
2938 	subprog[env->subprog_cnt].start = insn_cnt;
2939 
2940 	if (env->log.level & BPF_LOG_LEVEL2)
2941 		for (i = 0; i < env->subprog_cnt; i++)
2942 			verbose(env, "func#%d @%d\n", i, subprog[i].start);
2943 
2944 	return 0;
2945 }
2946 
2947 static int check_subprogs(struct bpf_verifier_env *env)
2948 {
2949 	int i, subprog_start, subprog_end, off, cur_subprog = 0;
2950 	struct bpf_subprog_info *subprog = env->subprog_info;
2951 	struct bpf_insn *insn = env->prog->insnsi;
2952 	int insn_cnt = env->prog->len;
2953 
2954 	/* now check that all jumps are within the same subprog */
2955 	subprog_start = subprog[cur_subprog].start;
2956 	subprog_end = subprog[cur_subprog + 1].start;
2957 	for (i = 0; i < insn_cnt; i++) {
2958 		u8 code = insn[i].code;
2959 
2960 		if (code == (BPF_JMP | BPF_CALL) &&
2961 		    insn[i].src_reg == 0 &&
2962 		    insn[i].imm == BPF_FUNC_tail_call)
2963 			subprog[cur_subprog].has_tail_call = true;
2964 		if (BPF_CLASS(code) == BPF_LD &&
2965 		    (BPF_MODE(code) == BPF_ABS || BPF_MODE(code) == BPF_IND))
2966 			subprog[cur_subprog].has_ld_abs = true;
2967 		if (BPF_CLASS(code) != BPF_JMP && BPF_CLASS(code) != BPF_JMP32)
2968 			goto next;
2969 		if (BPF_OP(code) == BPF_EXIT || BPF_OP(code) == BPF_CALL)
2970 			goto next;
2971 		if (code == (BPF_JMP32 | BPF_JA))
2972 			off = i + insn[i].imm + 1;
2973 		else
2974 			off = i + insn[i].off + 1;
2975 		if (off < subprog_start || off >= subprog_end) {
2976 			verbose(env, "jump out of range from insn %d to %d\n", i, off);
2977 			return -EINVAL;
2978 		}
2979 next:
2980 		if (i == subprog_end - 1) {
2981 			/* to avoid fall-through from one subprog into another
2982 			 * the last insn of the subprog should be either exit
2983 			 * or unconditional jump back or bpf_throw call
2984 			 */
2985 			if (code != (BPF_JMP | BPF_EXIT) &&
2986 			    code != (BPF_JMP32 | BPF_JA) &&
2987 			    code != (BPF_JMP | BPF_JA)) {
2988 				verbose(env, "last insn is not an exit or jmp\n");
2989 				return -EINVAL;
2990 			}
2991 			subprog_start = subprog_end;
2992 			cur_subprog++;
2993 			if (cur_subprog < env->subprog_cnt)
2994 				subprog_end = subprog[cur_subprog + 1].start;
2995 		}
2996 	}
2997 	return 0;
2998 }
2999 
3000 /* Parentage chain of this register (or stack slot) should take care of all
3001  * issues like callee-saved registers, stack slot allocation time, etc.
3002  */
3003 static int mark_reg_read(struct bpf_verifier_env *env,
3004 			 const struct bpf_reg_state *state,
3005 			 struct bpf_reg_state *parent, u8 flag)
3006 {
3007 	bool writes = parent == state->parent; /* Observe write marks */
3008 	int cnt = 0;
3009 
3010 	while (parent) {
3011 		/* if read wasn't screened by an earlier write ... */
3012 		if (writes && state->live & REG_LIVE_WRITTEN)
3013 			break;
3014 		if (parent->live & REG_LIVE_DONE) {
3015 			verbose(env, "verifier BUG type %s var_off %lld off %d\n",
3016 				reg_type_str(env, parent->type),
3017 				parent->var_off.value, parent->off);
3018 			return -EFAULT;
3019 		}
3020 		/* The first condition is more likely to be true than the
3021 		 * second, checked it first.
3022 		 */
3023 		if ((parent->live & REG_LIVE_READ) == flag ||
3024 		    parent->live & REG_LIVE_READ64)
3025 			/* The parentage chain never changes and
3026 			 * this parent was already marked as LIVE_READ.
3027 			 * There is no need to keep walking the chain again and
3028 			 * keep re-marking all parents as LIVE_READ.
3029 			 * This case happens when the same register is read
3030 			 * multiple times without writes into it in-between.
3031 			 * Also, if parent has the stronger REG_LIVE_READ64 set,
3032 			 * then no need to set the weak REG_LIVE_READ32.
3033 			 */
3034 			break;
3035 		/* ... then we depend on parent's value */
3036 		parent->live |= flag;
3037 		/* REG_LIVE_READ64 overrides REG_LIVE_READ32. */
3038 		if (flag == REG_LIVE_READ64)
3039 			parent->live &= ~REG_LIVE_READ32;
3040 		state = parent;
3041 		parent = state->parent;
3042 		writes = true;
3043 		cnt++;
3044 	}
3045 
3046 	if (env->longest_mark_read_walk < cnt)
3047 		env->longest_mark_read_walk = cnt;
3048 	return 0;
3049 }
3050 
3051 static int mark_dynptr_read(struct bpf_verifier_env *env, struct bpf_reg_state *reg)
3052 {
3053 	struct bpf_func_state *state = func(env, reg);
3054 	int spi, ret;
3055 
3056 	/* For CONST_PTR_TO_DYNPTR, it must have already been done by
3057 	 * check_reg_arg in check_helper_call and mark_btf_func_reg_size in
3058 	 * check_kfunc_call.
3059 	 */
3060 	if (reg->type == CONST_PTR_TO_DYNPTR)
3061 		return 0;
3062 	spi = dynptr_get_spi(env, reg);
3063 	if (spi < 0)
3064 		return spi;
3065 	/* Caller ensures dynptr is valid and initialized, which means spi is in
3066 	 * bounds and spi is the first dynptr slot. Simply mark stack slot as
3067 	 * read.
3068 	 */
3069 	ret = mark_reg_read(env, &state->stack[spi].spilled_ptr,
3070 			    state->stack[spi].spilled_ptr.parent, REG_LIVE_READ64);
3071 	if (ret)
3072 		return ret;
3073 	return mark_reg_read(env, &state->stack[spi - 1].spilled_ptr,
3074 			     state->stack[spi - 1].spilled_ptr.parent, REG_LIVE_READ64);
3075 }
3076 
3077 static int mark_iter_read(struct bpf_verifier_env *env, struct bpf_reg_state *reg,
3078 			  int spi, int nr_slots)
3079 {
3080 	struct bpf_func_state *state = func(env, reg);
3081 	int err, i;
3082 
3083 	for (i = 0; i < nr_slots; i++) {
3084 		struct bpf_reg_state *st = &state->stack[spi - i].spilled_ptr;
3085 
3086 		err = mark_reg_read(env, st, st->parent, REG_LIVE_READ64);
3087 		if (err)
3088 			return err;
3089 
3090 		mark_stack_slot_scratched(env, spi - i);
3091 	}
3092 
3093 	return 0;
3094 }
3095 
3096 /* This function is supposed to be used by the following 32-bit optimization
3097  * code only. It returns TRUE if the source or destination register operates
3098  * on 64-bit, otherwise return FALSE.
3099  */
3100 static bool is_reg64(struct bpf_verifier_env *env, struct bpf_insn *insn,
3101 		     u32 regno, struct bpf_reg_state *reg, enum reg_arg_type t)
3102 {
3103 	u8 code, class, op;
3104 
3105 	code = insn->code;
3106 	class = BPF_CLASS(code);
3107 	op = BPF_OP(code);
3108 	if (class == BPF_JMP) {
3109 		/* BPF_EXIT for "main" will reach here. Return TRUE
3110 		 * conservatively.
3111 		 */
3112 		if (op == BPF_EXIT)
3113 			return true;
3114 		if (op == BPF_CALL) {
3115 			/* BPF to BPF call will reach here because of marking
3116 			 * caller saved clobber with DST_OP_NO_MARK for which we
3117 			 * don't care the register def because they are anyway
3118 			 * marked as NOT_INIT already.
3119 			 */
3120 			if (insn->src_reg == BPF_PSEUDO_CALL)
3121 				return false;
3122 			/* Helper call will reach here because of arg type
3123 			 * check, conservatively return TRUE.
3124 			 */
3125 			if (t == SRC_OP)
3126 				return true;
3127 
3128 			return false;
3129 		}
3130 	}
3131 
3132 	if (class == BPF_ALU64 && op == BPF_END && (insn->imm == 16 || insn->imm == 32))
3133 		return false;
3134 
3135 	if (class == BPF_ALU64 || class == BPF_JMP ||
3136 	    (class == BPF_ALU && op == BPF_END && insn->imm == 64))
3137 		return true;
3138 
3139 	if (class == BPF_ALU || class == BPF_JMP32)
3140 		return false;
3141 
3142 	if (class == BPF_LDX) {
3143 		if (t != SRC_OP)
3144 			return BPF_SIZE(code) == BPF_DW || BPF_MODE(code) == BPF_MEMSX;
3145 		/* LDX source must be ptr. */
3146 		return true;
3147 	}
3148 
3149 	if (class == BPF_STX) {
3150 		/* BPF_STX (including atomic variants) has multiple source
3151 		 * operands, one of which is a ptr. Check whether the caller is
3152 		 * asking about it.
3153 		 */
3154 		if (t == SRC_OP && reg->type != SCALAR_VALUE)
3155 			return true;
3156 		return BPF_SIZE(code) == BPF_DW;
3157 	}
3158 
3159 	if (class == BPF_LD) {
3160 		u8 mode = BPF_MODE(code);
3161 
3162 		/* LD_IMM64 */
3163 		if (mode == BPF_IMM)
3164 			return true;
3165 
3166 		/* Both LD_IND and LD_ABS return 32-bit data. */
3167 		if (t != SRC_OP)
3168 			return  false;
3169 
3170 		/* Implicit ctx ptr. */
3171 		if (regno == BPF_REG_6)
3172 			return true;
3173 
3174 		/* Explicit source could be any width. */
3175 		return true;
3176 	}
3177 
3178 	if (class == BPF_ST)
3179 		/* The only source register for BPF_ST is a ptr. */
3180 		return true;
3181 
3182 	/* Conservatively return true at default. */
3183 	return true;
3184 }
3185 
3186 /* Return the regno defined by the insn, or -1. */
3187 static int insn_def_regno(const struct bpf_insn *insn)
3188 {
3189 	switch (BPF_CLASS(insn->code)) {
3190 	case BPF_JMP:
3191 	case BPF_JMP32:
3192 	case BPF_ST:
3193 		return -1;
3194 	case BPF_STX:
3195 		if (BPF_MODE(insn->code) == BPF_ATOMIC &&
3196 		    (insn->imm & BPF_FETCH)) {
3197 			if (insn->imm == BPF_CMPXCHG)
3198 				return BPF_REG_0;
3199 			else
3200 				return insn->src_reg;
3201 		} else {
3202 			return -1;
3203 		}
3204 	default:
3205 		return insn->dst_reg;
3206 	}
3207 }
3208 
3209 /* Return TRUE if INSN has defined any 32-bit value explicitly. */
3210 static bool insn_has_def32(struct bpf_verifier_env *env, struct bpf_insn *insn)
3211 {
3212 	int dst_reg = insn_def_regno(insn);
3213 
3214 	if (dst_reg == -1)
3215 		return false;
3216 
3217 	return !is_reg64(env, insn, dst_reg, NULL, DST_OP);
3218 }
3219 
3220 static void mark_insn_zext(struct bpf_verifier_env *env,
3221 			   struct bpf_reg_state *reg)
3222 {
3223 	s32 def_idx = reg->subreg_def;
3224 
3225 	if (def_idx == DEF_NOT_SUBREG)
3226 		return;
3227 
3228 	env->insn_aux_data[def_idx - 1].zext_dst = true;
3229 	/* The dst will be zero extended, so won't be sub-register anymore. */
3230 	reg->subreg_def = DEF_NOT_SUBREG;
3231 }
3232 
3233 static int __check_reg_arg(struct bpf_verifier_env *env, struct bpf_reg_state *regs, u32 regno,
3234 			   enum reg_arg_type t)
3235 {
3236 	struct bpf_insn *insn = env->prog->insnsi + env->insn_idx;
3237 	struct bpf_reg_state *reg;
3238 	bool rw64;
3239 
3240 	if (regno >= MAX_BPF_REG) {
3241 		verbose(env, "R%d is invalid\n", regno);
3242 		return -EINVAL;
3243 	}
3244 
3245 	mark_reg_scratched(env, regno);
3246 
3247 	reg = &regs[regno];
3248 	rw64 = is_reg64(env, insn, regno, reg, t);
3249 	if (t == SRC_OP) {
3250 		/* check whether register used as source operand can be read */
3251 		if (reg->type == NOT_INIT) {
3252 			verbose(env, "R%d !read_ok\n", regno);
3253 			return -EACCES;
3254 		}
3255 		/* We don't need to worry about FP liveness because it's read-only */
3256 		if (regno == BPF_REG_FP)
3257 			return 0;
3258 
3259 		if (rw64)
3260 			mark_insn_zext(env, reg);
3261 
3262 		return mark_reg_read(env, reg, reg->parent,
3263 				     rw64 ? REG_LIVE_READ64 : REG_LIVE_READ32);
3264 	} else {
3265 		/* check whether register used as dest operand can be written to */
3266 		if (regno == BPF_REG_FP) {
3267 			verbose(env, "frame pointer is read only\n");
3268 			return -EACCES;
3269 		}
3270 		reg->live |= REG_LIVE_WRITTEN;
3271 		reg->subreg_def = rw64 ? DEF_NOT_SUBREG : env->insn_idx + 1;
3272 		if (t == DST_OP)
3273 			mark_reg_unknown(env, regs, regno);
3274 	}
3275 	return 0;
3276 }
3277 
3278 static int check_reg_arg(struct bpf_verifier_env *env, u32 regno,
3279 			 enum reg_arg_type t)
3280 {
3281 	struct bpf_verifier_state *vstate = env->cur_state;
3282 	struct bpf_func_state *state = vstate->frame[vstate->curframe];
3283 
3284 	return __check_reg_arg(env, state->regs, regno, t);
3285 }
3286 
3287 static int insn_stack_access_flags(int frameno, int spi)
3288 {
3289 	return INSN_F_STACK_ACCESS | (spi << INSN_F_SPI_SHIFT) | frameno;
3290 }
3291 
3292 static int insn_stack_access_spi(int insn_flags)
3293 {
3294 	return (insn_flags >> INSN_F_SPI_SHIFT) & INSN_F_SPI_MASK;
3295 }
3296 
3297 static int insn_stack_access_frameno(int insn_flags)
3298 {
3299 	return insn_flags & INSN_F_FRAMENO_MASK;
3300 }
3301 
3302 static void mark_jmp_point(struct bpf_verifier_env *env, int idx)
3303 {
3304 	env->insn_aux_data[idx].jmp_point = true;
3305 }
3306 
3307 static bool is_jmp_point(struct bpf_verifier_env *env, int insn_idx)
3308 {
3309 	return env->insn_aux_data[insn_idx].jmp_point;
3310 }
3311 
3312 /* for any branch, call, exit record the history of jmps in the given state */
3313 static int push_jmp_history(struct bpf_verifier_env *env, struct bpf_verifier_state *cur,
3314 			    int insn_flags)
3315 {
3316 	u32 cnt = cur->jmp_history_cnt;
3317 	struct bpf_jmp_history_entry *p;
3318 	size_t alloc_size;
3319 
3320 	/* combine instruction flags if we already recorded this instruction */
3321 	if (env->cur_hist_ent) {
3322 		/* atomic instructions push insn_flags twice, for READ and
3323 		 * WRITE sides, but they should agree on stack slot
3324 		 */
3325 		WARN_ONCE((env->cur_hist_ent->flags & insn_flags) &&
3326 			  (env->cur_hist_ent->flags & insn_flags) != insn_flags,
3327 			  "verifier insn history bug: insn_idx %d cur flags %x new flags %x\n",
3328 			  env->insn_idx, env->cur_hist_ent->flags, insn_flags);
3329 		env->cur_hist_ent->flags |= insn_flags;
3330 		return 0;
3331 	}
3332 
3333 	cnt++;
3334 	alloc_size = kmalloc_size_roundup(size_mul(cnt, sizeof(*p)));
3335 	p = krealloc(cur->jmp_history, alloc_size, GFP_USER);
3336 	if (!p)
3337 		return -ENOMEM;
3338 	cur->jmp_history = p;
3339 
3340 	p = &cur->jmp_history[cnt - 1];
3341 	p->idx = env->insn_idx;
3342 	p->prev_idx = env->prev_insn_idx;
3343 	p->flags = insn_flags;
3344 	cur->jmp_history_cnt = cnt;
3345 	env->cur_hist_ent = p;
3346 
3347 	return 0;
3348 }
3349 
3350 static struct bpf_jmp_history_entry *get_jmp_hist_entry(struct bpf_verifier_state *st,
3351 						        u32 hist_end, int insn_idx)
3352 {
3353 	if (hist_end > 0 && st->jmp_history[hist_end - 1].idx == insn_idx)
3354 		return &st->jmp_history[hist_end - 1];
3355 	return NULL;
3356 }
3357 
3358 /* Backtrack one insn at a time. If idx is not at the top of recorded
3359  * history then previous instruction came from straight line execution.
3360  * Return -ENOENT if we exhausted all instructions within given state.
3361  *
3362  * It's legal to have a bit of a looping with the same starting and ending
3363  * insn index within the same state, e.g.: 3->4->5->3, so just because current
3364  * instruction index is the same as state's first_idx doesn't mean we are
3365  * done. If there is still some jump history left, we should keep going. We
3366  * need to take into account that we might have a jump history between given
3367  * state's parent and itself, due to checkpointing. In this case, we'll have
3368  * history entry recording a jump from last instruction of parent state and
3369  * first instruction of given state.
3370  */
3371 static int get_prev_insn_idx(struct bpf_verifier_state *st, int i,
3372 			     u32 *history)
3373 {
3374 	u32 cnt = *history;
3375 
3376 	if (i == st->first_insn_idx) {
3377 		if (cnt == 0)
3378 			return -ENOENT;
3379 		if (cnt == 1 && st->jmp_history[0].idx == i)
3380 			return -ENOENT;
3381 	}
3382 
3383 	if (cnt && st->jmp_history[cnt - 1].idx == i) {
3384 		i = st->jmp_history[cnt - 1].prev_idx;
3385 		(*history)--;
3386 	} else {
3387 		i--;
3388 	}
3389 	return i;
3390 }
3391 
3392 static const char *disasm_kfunc_name(void *data, const struct bpf_insn *insn)
3393 {
3394 	const struct btf_type *func;
3395 	struct btf *desc_btf;
3396 
3397 	if (insn->src_reg != BPF_PSEUDO_KFUNC_CALL)
3398 		return NULL;
3399 
3400 	desc_btf = find_kfunc_desc_btf(data, insn->off);
3401 	if (IS_ERR(desc_btf))
3402 		return "<error>";
3403 
3404 	func = btf_type_by_id(desc_btf, insn->imm);
3405 	return btf_name_by_offset(desc_btf, func->name_off);
3406 }
3407 
3408 static inline void bt_init(struct backtrack_state *bt, u32 frame)
3409 {
3410 	bt->frame = frame;
3411 }
3412 
3413 static inline void bt_reset(struct backtrack_state *bt)
3414 {
3415 	struct bpf_verifier_env *env = bt->env;
3416 
3417 	memset(bt, 0, sizeof(*bt));
3418 	bt->env = env;
3419 }
3420 
3421 static inline u32 bt_empty(struct backtrack_state *bt)
3422 {
3423 	u64 mask = 0;
3424 	int i;
3425 
3426 	for (i = 0; i <= bt->frame; i++)
3427 		mask |= bt->reg_masks[i] | bt->stack_masks[i];
3428 
3429 	return mask == 0;
3430 }
3431 
3432 static inline int bt_subprog_enter(struct backtrack_state *bt)
3433 {
3434 	if (bt->frame == MAX_CALL_FRAMES - 1) {
3435 		verbose(bt->env, "BUG subprog enter from frame %d\n", bt->frame);
3436 		WARN_ONCE(1, "verifier backtracking bug");
3437 		return -EFAULT;
3438 	}
3439 	bt->frame++;
3440 	return 0;
3441 }
3442 
3443 static inline int bt_subprog_exit(struct backtrack_state *bt)
3444 {
3445 	if (bt->frame == 0) {
3446 		verbose(bt->env, "BUG subprog exit from frame 0\n");
3447 		WARN_ONCE(1, "verifier backtracking bug");
3448 		return -EFAULT;
3449 	}
3450 	bt->frame--;
3451 	return 0;
3452 }
3453 
3454 static inline void bt_set_frame_reg(struct backtrack_state *bt, u32 frame, u32 reg)
3455 {
3456 	bt->reg_masks[frame] |= 1 << reg;
3457 }
3458 
3459 static inline void bt_clear_frame_reg(struct backtrack_state *bt, u32 frame, u32 reg)
3460 {
3461 	bt->reg_masks[frame] &= ~(1 << reg);
3462 }
3463 
3464 static inline void bt_set_reg(struct backtrack_state *bt, u32 reg)
3465 {
3466 	bt_set_frame_reg(bt, bt->frame, reg);
3467 }
3468 
3469 static inline void bt_clear_reg(struct backtrack_state *bt, u32 reg)
3470 {
3471 	bt_clear_frame_reg(bt, bt->frame, reg);
3472 }
3473 
3474 static inline void bt_set_frame_slot(struct backtrack_state *bt, u32 frame, u32 slot)
3475 {
3476 	bt->stack_masks[frame] |= 1ull << slot;
3477 }
3478 
3479 static inline void bt_clear_frame_slot(struct backtrack_state *bt, u32 frame, u32 slot)
3480 {
3481 	bt->stack_masks[frame] &= ~(1ull << slot);
3482 }
3483 
3484 static inline u32 bt_frame_reg_mask(struct backtrack_state *bt, u32 frame)
3485 {
3486 	return bt->reg_masks[frame];
3487 }
3488 
3489 static inline u32 bt_reg_mask(struct backtrack_state *bt)
3490 {
3491 	return bt->reg_masks[bt->frame];
3492 }
3493 
3494 static inline u64 bt_frame_stack_mask(struct backtrack_state *bt, u32 frame)
3495 {
3496 	return bt->stack_masks[frame];
3497 }
3498 
3499 static inline u64 bt_stack_mask(struct backtrack_state *bt)
3500 {
3501 	return bt->stack_masks[bt->frame];
3502 }
3503 
3504 static inline bool bt_is_reg_set(struct backtrack_state *bt, u32 reg)
3505 {
3506 	return bt->reg_masks[bt->frame] & (1 << reg);
3507 }
3508 
3509 static inline bool bt_is_frame_slot_set(struct backtrack_state *bt, u32 frame, u32 slot)
3510 {
3511 	return bt->stack_masks[frame] & (1ull << slot);
3512 }
3513 
3514 /* format registers bitmask, e.g., "r0,r2,r4" for 0x15 mask */
3515 static void fmt_reg_mask(char *buf, ssize_t buf_sz, u32 reg_mask)
3516 {
3517 	DECLARE_BITMAP(mask, 64);
3518 	bool first = true;
3519 	int i, n;
3520 
3521 	buf[0] = '\0';
3522 
3523 	bitmap_from_u64(mask, reg_mask);
3524 	for_each_set_bit(i, mask, 32) {
3525 		n = snprintf(buf, buf_sz, "%sr%d", first ? "" : ",", i);
3526 		first = false;
3527 		buf += n;
3528 		buf_sz -= n;
3529 		if (buf_sz < 0)
3530 			break;
3531 	}
3532 }
3533 /* format stack slots bitmask, e.g., "-8,-24,-40" for 0x15 mask */
3534 static void fmt_stack_mask(char *buf, ssize_t buf_sz, u64 stack_mask)
3535 {
3536 	DECLARE_BITMAP(mask, 64);
3537 	bool first = true;
3538 	int i, n;
3539 
3540 	buf[0] = '\0';
3541 
3542 	bitmap_from_u64(mask, stack_mask);
3543 	for_each_set_bit(i, mask, 64) {
3544 		n = snprintf(buf, buf_sz, "%s%d", first ? "" : ",", -(i + 1) * 8);
3545 		first = false;
3546 		buf += n;
3547 		buf_sz -= n;
3548 		if (buf_sz < 0)
3549 			break;
3550 	}
3551 }
3552 
3553 static bool calls_callback(struct bpf_verifier_env *env, int insn_idx);
3554 
3555 /* For given verifier state backtrack_insn() is called from the last insn to
3556  * the first insn. Its purpose is to compute a bitmask of registers and
3557  * stack slots that needs precision in the parent verifier state.
3558  *
3559  * @idx is an index of the instruction we are currently processing;
3560  * @subseq_idx is an index of the subsequent instruction that:
3561  *   - *would be* executed next, if jump history is viewed in forward order;
3562  *   - *was* processed previously during backtracking.
3563  */
3564 static int backtrack_insn(struct bpf_verifier_env *env, int idx, int subseq_idx,
3565 			  struct bpf_jmp_history_entry *hist, struct backtrack_state *bt)
3566 {
3567 	const struct bpf_insn_cbs cbs = {
3568 		.cb_call	= disasm_kfunc_name,
3569 		.cb_print	= verbose,
3570 		.private_data	= env,
3571 	};
3572 	struct bpf_insn *insn = env->prog->insnsi + idx;
3573 	u8 class = BPF_CLASS(insn->code);
3574 	u8 opcode = BPF_OP(insn->code);
3575 	u8 mode = BPF_MODE(insn->code);
3576 	u32 dreg = insn->dst_reg;
3577 	u32 sreg = insn->src_reg;
3578 	u32 spi, i, fr;
3579 
3580 	if (insn->code == 0)
3581 		return 0;
3582 	if (env->log.level & BPF_LOG_LEVEL2) {
3583 		fmt_reg_mask(env->tmp_str_buf, TMP_STR_BUF_LEN, bt_reg_mask(bt));
3584 		verbose(env, "mark_precise: frame%d: regs=%s ",
3585 			bt->frame, env->tmp_str_buf);
3586 		fmt_stack_mask(env->tmp_str_buf, TMP_STR_BUF_LEN, bt_stack_mask(bt));
3587 		verbose(env, "stack=%s before ", env->tmp_str_buf);
3588 		verbose(env, "%d: ", idx);
3589 		print_bpf_insn(&cbs, insn, env->allow_ptr_leaks);
3590 	}
3591 
3592 	if (class == BPF_ALU || class == BPF_ALU64) {
3593 		if (!bt_is_reg_set(bt, dreg))
3594 			return 0;
3595 		if (opcode == BPF_END || opcode == BPF_NEG) {
3596 			/* sreg is reserved and unused
3597 			 * dreg still need precision before this insn
3598 			 */
3599 			return 0;
3600 		} else if (opcode == BPF_MOV) {
3601 			if (BPF_SRC(insn->code) == BPF_X) {
3602 				/* dreg = sreg or dreg = (s8, s16, s32)sreg
3603 				 * dreg needs precision after this insn
3604 				 * sreg needs precision before this insn
3605 				 */
3606 				bt_clear_reg(bt, dreg);
3607 				bt_set_reg(bt, sreg);
3608 			} else {
3609 				/* dreg = K
3610 				 * dreg needs precision after this insn.
3611 				 * Corresponding register is already marked
3612 				 * as precise=true in this verifier state.
3613 				 * No further markings in parent are necessary
3614 				 */
3615 				bt_clear_reg(bt, dreg);
3616 			}
3617 		} else {
3618 			if (BPF_SRC(insn->code) == BPF_X) {
3619 				/* dreg += sreg
3620 				 * both dreg and sreg need precision
3621 				 * before this insn
3622 				 */
3623 				bt_set_reg(bt, sreg);
3624 			} /* else dreg += K
3625 			   * dreg still needs precision before this insn
3626 			   */
3627 		}
3628 	} else if (class == BPF_LDX) {
3629 		if (!bt_is_reg_set(bt, dreg))
3630 			return 0;
3631 		bt_clear_reg(bt, dreg);
3632 
3633 		/* scalars can only be spilled into stack w/o losing precision.
3634 		 * Load from any other memory can be zero extended.
3635 		 * The desire to keep that precision is already indicated
3636 		 * by 'precise' mark in corresponding register of this state.
3637 		 * No further tracking necessary.
3638 		 */
3639 		if (!hist || !(hist->flags & INSN_F_STACK_ACCESS))
3640 			return 0;
3641 		/* dreg = *(u64 *)[fp - off] was a fill from the stack.
3642 		 * that [fp - off] slot contains scalar that needs to be
3643 		 * tracked with precision
3644 		 */
3645 		spi = insn_stack_access_spi(hist->flags);
3646 		fr = insn_stack_access_frameno(hist->flags);
3647 		bt_set_frame_slot(bt, fr, spi);
3648 	} else if (class == BPF_STX || class == BPF_ST) {
3649 		if (bt_is_reg_set(bt, dreg))
3650 			/* stx & st shouldn't be using _scalar_ dst_reg
3651 			 * to access memory. It means backtracking
3652 			 * encountered a case of pointer subtraction.
3653 			 */
3654 			return -ENOTSUPP;
3655 		/* scalars can only be spilled into stack */
3656 		if (!hist || !(hist->flags & INSN_F_STACK_ACCESS))
3657 			return 0;
3658 		spi = insn_stack_access_spi(hist->flags);
3659 		fr = insn_stack_access_frameno(hist->flags);
3660 		if (!bt_is_frame_slot_set(bt, fr, spi))
3661 			return 0;
3662 		bt_clear_frame_slot(bt, fr, spi);
3663 		if (class == BPF_STX)
3664 			bt_set_reg(bt, sreg);
3665 	} else if (class == BPF_JMP || class == BPF_JMP32) {
3666 		if (bpf_pseudo_call(insn)) {
3667 			int subprog_insn_idx, subprog;
3668 
3669 			subprog_insn_idx = idx + insn->imm + 1;
3670 			subprog = find_subprog(env, subprog_insn_idx);
3671 			if (subprog < 0)
3672 				return -EFAULT;
3673 
3674 			if (subprog_is_global(env, subprog)) {
3675 				/* check that jump history doesn't have any
3676 				 * extra instructions from subprog; the next
3677 				 * instruction after call to global subprog
3678 				 * should be literally next instruction in
3679 				 * caller program
3680 				 */
3681 				WARN_ONCE(idx + 1 != subseq_idx, "verifier backtracking bug");
3682 				/* r1-r5 are invalidated after subprog call,
3683 				 * so for global func call it shouldn't be set
3684 				 * anymore
3685 				 */
3686 				if (bt_reg_mask(bt) & BPF_REGMASK_ARGS) {
3687 					verbose(env, "BUG regs %x\n", bt_reg_mask(bt));
3688 					WARN_ONCE(1, "verifier backtracking bug");
3689 					return -EFAULT;
3690 				}
3691 				/* global subprog always sets R0 */
3692 				bt_clear_reg(bt, BPF_REG_0);
3693 				return 0;
3694 			} else {
3695 				/* static subprog call instruction, which
3696 				 * means that we are exiting current subprog,
3697 				 * so only r1-r5 could be still requested as
3698 				 * precise, r0 and r6-r10 or any stack slot in
3699 				 * the current frame should be zero by now
3700 				 */
3701 				if (bt_reg_mask(bt) & ~BPF_REGMASK_ARGS) {
3702 					verbose(env, "BUG regs %x\n", bt_reg_mask(bt));
3703 					WARN_ONCE(1, "verifier backtracking bug");
3704 					return -EFAULT;
3705 				}
3706 				/* we are now tracking register spills correctly,
3707 				 * so any instance of leftover slots is a bug
3708 				 */
3709 				if (bt_stack_mask(bt) != 0) {
3710 					verbose(env, "BUG stack slots %llx\n", bt_stack_mask(bt));
3711 					WARN_ONCE(1, "verifier backtracking bug (subprog leftover stack slots)");
3712 					return -EFAULT;
3713 				}
3714 				/* propagate r1-r5 to the caller */
3715 				for (i = BPF_REG_1; i <= BPF_REG_5; i++) {
3716 					if (bt_is_reg_set(bt, i)) {
3717 						bt_clear_reg(bt, i);
3718 						bt_set_frame_reg(bt, bt->frame - 1, i);
3719 					}
3720 				}
3721 				if (bt_subprog_exit(bt))
3722 					return -EFAULT;
3723 				return 0;
3724 			}
3725 		} else if (is_sync_callback_calling_insn(insn) && idx != subseq_idx - 1) {
3726 			/* exit from callback subprog to callback-calling helper or
3727 			 * kfunc call. Use idx/subseq_idx check to discern it from
3728 			 * straight line code backtracking.
3729 			 * Unlike the subprog call handling above, we shouldn't
3730 			 * propagate precision of r1-r5 (if any requested), as they are
3731 			 * not actually arguments passed directly to callback subprogs
3732 			 */
3733 			if (bt_reg_mask(bt) & ~BPF_REGMASK_ARGS) {
3734 				verbose(env, "BUG regs %x\n", bt_reg_mask(bt));
3735 				WARN_ONCE(1, "verifier backtracking bug");
3736 				return -EFAULT;
3737 			}
3738 			if (bt_stack_mask(bt) != 0) {
3739 				verbose(env, "BUG stack slots %llx\n", bt_stack_mask(bt));
3740 				WARN_ONCE(1, "verifier backtracking bug (callback leftover stack slots)");
3741 				return -EFAULT;
3742 			}
3743 			/* clear r1-r5 in callback subprog's mask */
3744 			for (i = BPF_REG_1; i <= BPF_REG_5; i++)
3745 				bt_clear_reg(bt, i);
3746 			if (bt_subprog_exit(bt))
3747 				return -EFAULT;
3748 			return 0;
3749 		} else if (opcode == BPF_CALL) {
3750 			/* kfunc with imm==0 is invalid and fixup_kfunc_call will
3751 			 * catch this error later. Make backtracking conservative
3752 			 * with ENOTSUPP.
3753 			 */
3754 			if (insn->src_reg == BPF_PSEUDO_KFUNC_CALL && insn->imm == 0)
3755 				return -ENOTSUPP;
3756 			/* regular helper call sets R0 */
3757 			bt_clear_reg(bt, BPF_REG_0);
3758 			if (bt_reg_mask(bt) & BPF_REGMASK_ARGS) {
3759 				/* if backtracing was looking for registers R1-R5
3760 				 * they should have been found already.
3761 				 */
3762 				verbose(env, "BUG regs %x\n", bt_reg_mask(bt));
3763 				WARN_ONCE(1, "verifier backtracking bug");
3764 				return -EFAULT;
3765 			}
3766 		} else if (opcode == BPF_EXIT) {
3767 			bool r0_precise;
3768 
3769 			/* Backtracking to a nested function call, 'idx' is a part of
3770 			 * the inner frame 'subseq_idx' is a part of the outer frame.
3771 			 * In case of a regular function call, instructions giving
3772 			 * precision to registers R1-R5 should have been found already.
3773 			 * In case of a callback, it is ok to have R1-R5 marked for
3774 			 * backtracking, as these registers are set by the function
3775 			 * invoking callback.
3776 			 */
3777 			if (subseq_idx >= 0 && calls_callback(env, subseq_idx))
3778 				for (i = BPF_REG_1; i <= BPF_REG_5; i++)
3779 					bt_clear_reg(bt, i);
3780 			if (bt_reg_mask(bt) & BPF_REGMASK_ARGS) {
3781 				verbose(env, "BUG regs %x\n", bt_reg_mask(bt));
3782 				WARN_ONCE(1, "verifier backtracking bug");
3783 				return -EFAULT;
3784 			}
3785 
3786 			/* BPF_EXIT in subprog or callback always returns
3787 			 * right after the call instruction, so by checking
3788 			 * whether the instruction at subseq_idx-1 is subprog
3789 			 * call or not we can distinguish actual exit from
3790 			 * *subprog* from exit from *callback*. In the former
3791 			 * case, we need to propagate r0 precision, if
3792 			 * necessary. In the former we never do that.
3793 			 */
3794 			r0_precise = subseq_idx - 1 >= 0 &&
3795 				     bpf_pseudo_call(&env->prog->insnsi[subseq_idx - 1]) &&
3796 				     bt_is_reg_set(bt, BPF_REG_0);
3797 
3798 			bt_clear_reg(bt, BPF_REG_0);
3799 			if (bt_subprog_enter(bt))
3800 				return -EFAULT;
3801 
3802 			if (r0_precise)
3803 				bt_set_reg(bt, BPF_REG_0);
3804 			/* r6-r9 and stack slots will stay set in caller frame
3805 			 * bitmasks until we return back from callee(s)
3806 			 */
3807 			return 0;
3808 		} else if (BPF_SRC(insn->code) == BPF_X) {
3809 			if (!bt_is_reg_set(bt, dreg) && !bt_is_reg_set(bt, sreg))
3810 				return 0;
3811 			/* dreg <cond> sreg
3812 			 * Both dreg and sreg need precision before
3813 			 * this insn. If only sreg was marked precise
3814 			 * before it would be equally necessary to
3815 			 * propagate it to dreg.
3816 			 */
3817 			bt_set_reg(bt, dreg);
3818 			bt_set_reg(bt, sreg);
3819 			 /* else dreg <cond> K
3820 			  * Only dreg still needs precision before
3821 			  * this insn, so for the K-based conditional
3822 			  * there is nothing new to be marked.
3823 			  */
3824 		}
3825 	} else if (class == BPF_LD) {
3826 		if (!bt_is_reg_set(bt, dreg))
3827 			return 0;
3828 		bt_clear_reg(bt, dreg);
3829 		/* It's ld_imm64 or ld_abs or ld_ind.
3830 		 * For ld_imm64 no further tracking of precision
3831 		 * into parent is necessary
3832 		 */
3833 		if (mode == BPF_IND || mode == BPF_ABS)
3834 			/* to be analyzed */
3835 			return -ENOTSUPP;
3836 	}
3837 	return 0;
3838 }
3839 
3840 /* the scalar precision tracking algorithm:
3841  * . at the start all registers have precise=false.
3842  * . scalar ranges are tracked as normal through alu and jmp insns.
3843  * . once precise value of the scalar register is used in:
3844  *   .  ptr + scalar alu
3845  *   . if (scalar cond K|scalar)
3846  *   .  helper_call(.., scalar, ...) where ARG_CONST is expected
3847  *   backtrack through the verifier states and mark all registers and
3848  *   stack slots with spilled constants that these scalar regisers
3849  *   should be precise.
3850  * . during state pruning two registers (or spilled stack slots)
3851  *   are equivalent if both are not precise.
3852  *
3853  * Note the verifier cannot simply walk register parentage chain,
3854  * since many different registers and stack slots could have been
3855  * used to compute single precise scalar.
3856  *
3857  * The approach of starting with precise=true for all registers and then
3858  * backtrack to mark a register as not precise when the verifier detects
3859  * that program doesn't care about specific value (e.g., when helper
3860  * takes register as ARG_ANYTHING parameter) is not safe.
3861  *
3862  * It's ok to walk single parentage chain of the verifier states.
3863  * It's possible that this backtracking will go all the way till 1st insn.
3864  * All other branches will be explored for needing precision later.
3865  *
3866  * The backtracking needs to deal with cases like:
3867  *   R8=map_value(id=0,off=0,ks=4,vs=1952,imm=0) R9_w=map_value(id=0,off=40,ks=4,vs=1952,imm=0)
3868  * r9 -= r8
3869  * r5 = r9
3870  * if r5 > 0x79f goto pc+7
3871  *    R5_w=inv(id=0,umax_value=1951,var_off=(0x0; 0x7ff))
3872  * r5 += 1
3873  * ...
3874  * call bpf_perf_event_output#25
3875  *   where .arg5_type = ARG_CONST_SIZE_OR_ZERO
3876  *
3877  * and this case:
3878  * r6 = 1
3879  * call foo // uses callee's r6 inside to compute r0
3880  * r0 += r6
3881  * if r0 == 0 goto
3882  *
3883  * to track above reg_mask/stack_mask needs to be independent for each frame.
3884  *
3885  * Also if parent's curframe > frame where backtracking started,
3886  * the verifier need to mark registers in both frames, otherwise callees
3887  * may incorrectly prune callers. This is similar to
3888  * commit 7640ead93924 ("bpf: verifier: make sure callees don't prune with caller differences")
3889  *
3890  * For now backtracking falls back into conservative marking.
3891  */
3892 static void mark_all_scalars_precise(struct bpf_verifier_env *env,
3893 				     struct bpf_verifier_state *st)
3894 {
3895 	struct bpf_func_state *func;
3896 	struct bpf_reg_state *reg;
3897 	int i, j;
3898 
3899 	if (env->log.level & BPF_LOG_LEVEL2) {
3900 		verbose(env, "mark_precise: frame%d: falling back to forcing all scalars precise\n",
3901 			st->curframe);
3902 	}
3903 
3904 	/* big hammer: mark all scalars precise in this path.
3905 	 * pop_stack may still get !precise scalars.
3906 	 * We also skip current state and go straight to first parent state,
3907 	 * because precision markings in current non-checkpointed state are
3908 	 * not needed. See why in the comment in __mark_chain_precision below.
3909 	 */
3910 	for (st = st->parent; st; st = st->parent) {
3911 		for (i = 0; i <= st->curframe; i++) {
3912 			func = st->frame[i];
3913 			for (j = 0; j < BPF_REG_FP; j++) {
3914 				reg = &func->regs[j];
3915 				if (reg->type != SCALAR_VALUE || reg->precise)
3916 					continue;
3917 				reg->precise = true;
3918 				if (env->log.level & BPF_LOG_LEVEL2) {
3919 					verbose(env, "force_precise: frame%d: forcing r%d to be precise\n",
3920 						i, j);
3921 				}
3922 			}
3923 			for (j = 0; j < func->allocated_stack / BPF_REG_SIZE; j++) {
3924 				if (!is_spilled_reg(&func->stack[j]))
3925 					continue;
3926 				reg = &func->stack[j].spilled_ptr;
3927 				if (reg->type != SCALAR_VALUE || reg->precise)
3928 					continue;
3929 				reg->precise = true;
3930 				if (env->log.level & BPF_LOG_LEVEL2) {
3931 					verbose(env, "force_precise: frame%d: forcing fp%d to be precise\n",
3932 						i, -(j + 1) * 8);
3933 				}
3934 			}
3935 		}
3936 	}
3937 }
3938 
3939 static void mark_all_scalars_imprecise(struct bpf_verifier_env *env, struct bpf_verifier_state *st)
3940 {
3941 	struct bpf_func_state *func;
3942 	struct bpf_reg_state *reg;
3943 	int i, j;
3944 
3945 	for (i = 0; i <= st->curframe; i++) {
3946 		func = st->frame[i];
3947 		for (j = 0; j < BPF_REG_FP; j++) {
3948 			reg = &func->regs[j];
3949 			if (reg->type != SCALAR_VALUE)
3950 				continue;
3951 			reg->precise = false;
3952 		}
3953 		for (j = 0; j < func->allocated_stack / BPF_REG_SIZE; j++) {
3954 			if (!is_spilled_reg(&func->stack[j]))
3955 				continue;
3956 			reg = &func->stack[j].spilled_ptr;
3957 			if (reg->type != SCALAR_VALUE)
3958 				continue;
3959 			reg->precise = false;
3960 		}
3961 	}
3962 }
3963 
3964 static bool idset_contains(struct bpf_idset *s, u32 id)
3965 {
3966 	u32 i;
3967 
3968 	for (i = 0; i < s->count; ++i)
3969 		if (s->ids[i] == id)
3970 			return true;
3971 
3972 	return false;
3973 }
3974 
3975 static int idset_push(struct bpf_idset *s, u32 id)
3976 {
3977 	if (WARN_ON_ONCE(s->count >= ARRAY_SIZE(s->ids)))
3978 		return -EFAULT;
3979 	s->ids[s->count++] = id;
3980 	return 0;
3981 }
3982 
3983 static void idset_reset(struct bpf_idset *s)
3984 {
3985 	s->count = 0;
3986 }
3987 
3988 /* Collect a set of IDs for all registers currently marked as precise in env->bt.
3989  * Mark all registers with these IDs as precise.
3990  */
3991 static int mark_precise_scalar_ids(struct bpf_verifier_env *env, struct bpf_verifier_state *st)
3992 {
3993 	struct bpf_idset *precise_ids = &env->idset_scratch;
3994 	struct backtrack_state *bt = &env->bt;
3995 	struct bpf_func_state *func;
3996 	struct bpf_reg_state *reg;
3997 	DECLARE_BITMAP(mask, 64);
3998 	int i, fr;
3999 
4000 	idset_reset(precise_ids);
4001 
4002 	for (fr = bt->frame; fr >= 0; fr--) {
4003 		func = st->frame[fr];
4004 
4005 		bitmap_from_u64(mask, bt_frame_reg_mask(bt, fr));
4006 		for_each_set_bit(i, mask, 32) {
4007 			reg = &func->regs[i];
4008 			if (!reg->id || reg->type != SCALAR_VALUE)
4009 				continue;
4010 			if (idset_push(precise_ids, reg->id))
4011 				return -EFAULT;
4012 		}
4013 
4014 		bitmap_from_u64(mask, bt_frame_stack_mask(bt, fr));
4015 		for_each_set_bit(i, mask, 64) {
4016 			if (i >= func->allocated_stack / BPF_REG_SIZE)
4017 				break;
4018 			if (!is_spilled_scalar_reg(&func->stack[i]))
4019 				continue;
4020 			reg = &func->stack[i].spilled_ptr;
4021 			if (!reg->id)
4022 				continue;
4023 			if (idset_push(precise_ids, reg->id))
4024 				return -EFAULT;
4025 		}
4026 	}
4027 
4028 	for (fr = 0; fr <= st->curframe; ++fr) {
4029 		func = st->frame[fr];
4030 
4031 		for (i = BPF_REG_0; i < BPF_REG_10; ++i) {
4032 			reg = &func->regs[i];
4033 			if (!reg->id)
4034 				continue;
4035 			if (!idset_contains(precise_ids, reg->id))
4036 				continue;
4037 			bt_set_frame_reg(bt, fr, i);
4038 		}
4039 		for (i = 0; i < func->allocated_stack / BPF_REG_SIZE; ++i) {
4040 			if (!is_spilled_scalar_reg(&func->stack[i]))
4041 				continue;
4042 			reg = &func->stack[i].spilled_ptr;
4043 			if (!reg->id)
4044 				continue;
4045 			if (!idset_contains(precise_ids, reg->id))
4046 				continue;
4047 			bt_set_frame_slot(bt, fr, i);
4048 		}
4049 	}
4050 
4051 	return 0;
4052 }
4053 
4054 /*
4055  * __mark_chain_precision() backtracks BPF program instruction sequence and
4056  * chain of verifier states making sure that register *regno* (if regno >= 0)
4057  * and/or stack slot *spi* (if spi >= 0) are marked as precisely tracked
4058  * SCALARS, as well as any other registers and slots that contribute to
4059  * a tracked state of given registers/stack slots, depending on specific BPF
4060  * assembly instructions (see backtrack_insns() for exact instruction handling
4061  * logic). This backtracking relies on recorded jmp_history and is able to
4062  * traverse entire chain of parent states. This process ends only when all the
4063  * necessary registers/slots and their transitive dependencies are marked as
4064  * precise.
4065  *
4066  * One important and subtle aspect is that precise marks *do not matter* in
4067  * the currently verified state (current state). It is important to understand
4068  * why this is the case.
4069  *
4070  * First, note that current state is the state that is not yet "checkpointed",
4071  * i.e., it is not yet put into env->explored_states, and it has no children
4072  * states as well. It's ephemeral, and can end up either a) being discarded if
4073  * compatible explored state is found at some point or BPF_EXIT instruction is
4074  * reached or b) checkpointed and put into env->explored_states, branching out
4075  * into one or more children states.
4076  *
4077  * In the former case, precise markings in current state are completely
4078  * ignored by state comparison code (see regsafe() for details). Only
4079  * checkpointed ("old") state precise markings are important, and if old
4080  * state's register/slot is precise, regsafe() assumes current state's
4081  * register/slot as precise and checks value ranges exactly and precisely. If
4082  * states turn out to be compatible, current state's necessary precise
4083  * markings and any required parent states' precise markings are enforced
4084  * after the fact with propagate_precision() logic, after the fact. But it's
4085  * important to realize that in this case, even after marking current state
4086  * registers/slots as precise, we immediately discard current state. So what
4087  * actually matters is any of the precise markings propagated into current
4088  * state's parent states, which are always checkpointed (due to b) case above).
4089  * As such, for scenario a) it doesn't matter if current state has precise
4090  * markings set or not.
4091  *
4092  * Now, for the scenario b), checkpointing and forking into child(ren)
4093  * state(s). Note that before current state gets to checkpointing step, any
4094  * processed instruction always assumes precise SCALAR register/slot
4095  * knowledge: if precise value or range is useful to prune jump branch, BPF
4096  * verifier takes this opportunity enthusiastically. Similarly, when
4097  * register's value is used to calculate offset or memory address, exact
4098  * knowledge of SCALAR range is assumed, checked, and enforced. So, similar to
4099  * what we mentioned above about state comparison ignoring precise markings
4100  * during state comparison, BPF verifier ignores and also assumes precise
4101  * markings *at will* during instruction verification process. But as verifier
4102  * assumes precision, it also propagates any precision dependencies across
4103  * parent states, which are not yet finalized, so can be further restricted
4104  * based on new knowledge gained from restrictions enforced by their children
4105  * states. This is so that once those parent states are finalized, i.e., when
4106  * they have no more active children state, state comparison logic in
4107  * is_state_visited() would enforce strict and precise SCALAR ranges, if
4108  * required for correctness.
4109  *
4110  * To build a bit more intuition, note also that once a state is checkpointed,
4111  * the path we took to get to that state is not important. This is crucial
4112  * property for state pruning. When state is checkpointed and finalized at
4113  * some instruction index, it can be correctly and safely used to "short
4114  * circuit" any *compatible* state that reaches exactly the same instruction
4115  * index. I.e., if we jumped to that instruction from a completely different
4116  * code path than original finalized state was derived from, it doesn't
4117  * matter, current state can be discarded because from that instruction
4118  * forward having a compatible state will ensure we will safely reach the
4119  * exit. States describe preconditions for further exploration, but completely
4120  * forget the history of how we got here.
4121  *
4122  * This also means that even if we needed precise SCALAR range to get to
4123  * finalized state, but from that point forward *that same* SCALAR register is
4124  * never used in a precise context (i.e., it's precise value is not needed for
4125  * correctness), it's correct and safe to mark such register as "imprecise"
4126  * (i.e., precise marking set to false). This is what we rely on when we do
4127  * not set precise marking in current state. If no child state requires
4128  * precision for any given SCALAR register, it's safe to dictate that it can
4129  * be imprecise. If any child state does require this register to be precise,
4130  * we'll mark it precise later retroactively during precise markings
4131  * propagation from child state to parent states.
4132  *
4133  * Skipping precise marking setting in current state is a mild version of
4134  * relying on the above observation. But we can utilize this property even
4135  * more aggressively by proactively forgetting any precise marking in the
4136  * current state (which we inherited from the parent state), right before we
4137  * checkpoint it and branch off into new child state. This is done by
4138  * mark_all_scalars_imprecise() to hopefully get more permissive and generic
4139  * finalized states which help in short circuiting more future states.
4140  */
4141 static int __mark_chain_precision(struct bpf_verifier_env *env, int regno)
4142 {
4143 	struct backtrack_state *bt = &env->bt;
4144 	struct bpf_verifier_state *st = env->cur_state;
4145 	int first_idx = st->first_insn_idx;
4146 	int last_idx = env->insn_idx;
4147 	int subseq_idx = -1;
4148 	struct bpf_func_state *func;
4149 	struct bpf_reg_state *reg;
4150 	bool skip_first = true;
4151 	int i, fr, err;
4152 
4153 	if (!env->bpf_capable)
4154 		return 0;
4155 
4156 	/* set frame number from which we are starting to backtrack */
4157 	bt_init(bt, env->cur_state->curframe);
4158 
4159 	/* Do sanity checks against current state of register and/or stack
4160 	 * slot, but don't set precise flag in current state, as precision
4161 	 * tracking in the current state is unnecessary.
4162 	 */
4163 	func = st->frame[bt->frame];
4164 	if (regno >= 0) {
4165 		reg = &func->regs[regno];
4166 		if (reg->type != SCALAR_VALUE) {
4167 			WARN_ONCE(1, "backtracing misuse");
4168 			return -EFAULT;
4169 		}
4170 		bt_set_reg(bt, regno);
4171 	}
4172 
4173 	if (bt_empty(bt))
4174 		return 0;
4175 
4176 	for (;;) {
4177 		DECLARE_BITMAP(mask, 64);
4178 		u32 history = st->jmp_history_cnt;
4179 		struct bpf_jmp_history_entry *hist;
4180 
4181 		if (env->log.level & BPF_LOG_LEVEL2) {
4182 			verbose(env, "mark_precise: frame%d: last_idx %d first_idx %d subseq_idx %d \n",
4183 				bt->frame, last_idx, first_idx, subseq_idx);
4184 		}
4185 
4186 		/* If some register with scalar ID is marked as precise,
4187 		 * make sure that all registers sharing this ID are also precise.
4188 		 * This is needed to estimate effect of find_equal_scalars().
4189 		 * Do this at the last instruction of each state,
4190 		 * bpf_reg_state::id fields are valid for these instructions.
4191 		 *
4192 		 * Allows to track precision in situation like below:
4193 		 *
4194 		 *     r2 = unknown value
4195 		 *     ...
4196 		 *   --- state #0 ---
4197 		 *     ...
4198 		 *     r1 = r2                 // r1 and r2 now share the same ID
4199 		 *     ...
4200 		 *   --- state #1 {r1.id = A, r2.id = A} ---
4201 		 *     ...
4202 		 *     if (r2 > 10) goto exit; // find_equal_scalars() assigns range to r1
4203 		 *     ...
4204 		 *   --- state #2 {r1.id = A, r2.id = A} ---
4205 		 *     r3 = r10
4206 		 *     r3 += r1                // need to mark both r1 and r2
4207 		 */
4208 		if (mark_precise_scalar_ids(env, st))
4209 			return -EFAULT;
4210 
4211 		if (last_idx < 0) {
4212 			/* we are at the entry into subprog, which
4213 			 * is expected for global funcs, but only if
4214 			 * requested precise registers are R1-R5
4215 			 * (which are global func's input arguments)
4216 			 */
4217 			if (st->curframe == 0 &&
4218 			    st->frame[0]->subprogno > 0 &&
4219 			    st->frame[0]->callsite == BPF_MAIN_FUNC &&
4220 			    bt_stack_mask(bt) == 0 &&
4221 			    (bt_reg_mask(bt) & ~BPF_REGMASK_ARGS) == 0) {
4222 				bitmap_from_u64(mask, bt_reg_mask(bt));
4223 				for_each_set_bit(i, mask, 32) {
4224 					reg = &st->frame[0]->regs[i];
4225 					bt_clear_reg(bt, i);
4226 					if (reg->type == SCALAR_VALUE)
4227 						reg->precise = true;
4228 				}
4229 				return 0;
4230 			}
4231 
4232 			verbose(env, "BUG backtracking func entry subprog %d reg_mask %x stack_mask %llx\n",
4233 				st->frame[0]->subprogno, bt_reg_mask(bt), bt_stack_mask(bt));
4234 			WARN_ONCE(1, "verifier backtracking bug");
4235 			return -EFAULT;
4236 		}
4237 
4238 		for (i = last_idx;;) {
4239 			if (skip_first) {
4240 				err = 0;
4241 				skip_first = false;
4242 			} else {
4243 				hist = get_jmp_hist_entry(st, history, i);
4244 				err = backtrack_insn(env, i, subseq_idx, hist, bt);
4245 			}
4246 			if (err == -ENOTSUPP) {
4247 				mark_all_scalars_precise(env, env->cur_state);
4248 				bt_reset(bt);
4249 				return 0;
4250 			} else if (err) {
4251 				return err;
4252 			}
4253 			if (bt_empty(bt))
4254 				/* Found assignment(s) into tracked register in this state.
4255 				 * Since this state is already marked, just return.
4256 				 * Nothing to be tracked further in the parent state.
4257 				 */
4258 				return 0;
4259 			subseq_idx = i;
4260 			i = get_prev_insn_idx(st, i, &history);
4261 			if (i == -ENOENT)
4262 				break;
4263 			if (i >= env->prog->len) {
4264 				/* This can happen if backtracking reached insn 0
4265 				 * and there are still reg_mask or stack_mask
4266 				 * to backtrack.
4267 				 * It means the backtracking missed the spot where
4268 				 * particular register was initialized with a constant.
4269 				 */
4270 				verbose(env, "BUG backtracking idx %d\n", i);
4271 				WARN_ONCE(1, "verifier backtracking bug");
4272 				return -EFAULT;
4273 			}
4274 		}
4275 		st = st->parent;
4276 		if (!st)
4277 			break;
4278 
4279 		for (fr = bt->frame; fr >= 0; fr--) {
4280 			func = st->frame[fr];
4281 			bitmap_from_u64(mask, bt_frame_reg_mask(bt, fr));
4282 			for_each_set_bit(i, mask, 32) {
4283 				reg = &func->regs[i];
4284 				if (reg->type != SCALAR_VALUE) {
4285 					bt_clear_frame_reg(bt, fr, i);
4286 					continue;
4287 				}
4288 				if (reg->precise)
4289 					bt_clear_frame_reg(bt, fr, i);
4290 				else
4291 					reg->precise = true;
4292 			}
4293 
4294 			bitmap_from_u64(mask, bt_frame_stack_mask(bt, fr));
4295 			for_each_set_bit(i, mask, 64) {
4296 				if (i >= func->allocated_stack / BPF_REG_SIZE) {
4297 					verbose(env, "BUG backtracking (stack slot %d, total slots %d)\n",
4298 						i, func->allocated_stack / BPF_REG_SIZE);
4299 					WARN_ONCE(1, "verifier backtracking bug (stack slot out of bounds)");
4300 					return -EFAULT;
4301 				}
4302 
4303 				if (!is_spilled_scalar_reg(&func->stack[i])) {
4304 					bt_clear_frame_slot(bt, fr, i);
4305 					continue;
4306 				}
4307 				reg = &func->stack[i].spilled_ptr;
4308 				if (reg->precise)
4309 					bt_clear_frame_slot(bt, fr, i);
4310 				else
4311 					reg->precise = true;
4312 			}
4313 			if (env->log.level & BPF_LOG_LEVEL2) {
4314 				fmt_reg_mask(env->tmp_str_buf, TMP_STR_BUF_LEN,
4315 					     bt_frame_reg_mask(bt, fr));
4316 				verbose(env, "mark_precise: frame%d: parent state regs=%s ",
4317 					fr, env->tmp_str_buf);
4318 				fmt_stack_mask(env->tmp_str_buf, TMP_STR_BUF_LEN,
4319 					       bt_frame_stack_mask(bt, fr));
4320 				verbose(env, "stack=%s: ", env->tmp_str_buf);
4321 				print_verifier_state(env, func, true);
4322 			}
4323 		}
4324 
4325 		if (bt_empty(bt))
4326 			return 0;
4327 
4328 		subseq_idx = first_idx;
4329 		last_idx = st->last_insn_idx;
4330 		first_idx = st->first_insn_idx;
4331 	}
4332 
4333 	/* if we still have requested precise regs or slots, we missed
4334 	 * something (e.g., stack access through non-r10 register), so
4335 	 * fallback to marking all precise
4336 	 */
4337 	if (!bt_empty(bt)) {
4338 		mark_all_scalars_precise(env, env->cur_state);
4339 		bt_reset(bt);
4340 	}
4341 
4342 	return 0;
4343 }
4344 
4345 int mark_chain_precision(struct bpf_verifier_env *env, int regno)
4346 {
4347 	return __mark_chain_precision(env, regno);
4348 }
4349 
4350 /* mark_chain_precision_batch() assumes that env->bt is set in the caller to
4351  * desired reg and stack masks across all relevant frames
4352  */
4353 static int mark_chain_precision_batch(struct bpf_verifier_env *env)
4354 {
4355 	return __mark_chain_precision(env, -1);
4356 }
4357 
4358 static bool is_spillable_regtype(enum bpf_reg_type type)
4359 {
4360 	switch (base_type(type)) {
4361 	case PTR_TO_MAP_VALUE:
4362 	case PTR_TO_STACK:
4363 	case PTR_TO_CTX:
4364 	case PTR_TO_PACKET:
4365 	case PTR_TO_PACKET_META:
4366 	case PTR_TO_PACKET_END:
4367 	case PTR_TO_FLOW_KEYS:
4368 	case CONST_PTR_TO_MAP:
4369 	case PTR_TO_SOCKET:
4370 	case PTR_TO_SOCK_COMMON:
4371 	case PTR_TO_TCP_SOCK:
4372 	case PTR_TO_XDP_SOCK:
4373 	case PTR_TO_BTF_ID:
4374 	case PTR_TO_BUF:
4375 	case PTR_TO_MEM:
4376 	case PTR_TO_FUNC:
4377 	case PTR_TO_MAP_KEY:
4378 		return true;
4379 	default:
4380 		return false;
4381 	}
4382 }
4383 
4384 /* Does this register contain a constant zero? */
4385 static bool register_is_null(struct bpf_reg_state *reg)
4386 {
4387 	return reg->type == SCALAR_VALUE && tnum_equals_const(reg->var_off, 0);
4388 }
4389 
4390 /* check if register is a constant scalar value */
4391 static bool is_reg_const(struct bpf_reg_state *reg, bool subreg32)
4392 {
4393 	return reg->type == SCALAR_VALUE &&
4394 	       tnum_is_const(subreg32 ? tnum_subreg(reg->var_off) : reg->var_off);
4395 }
4396 
4397 /* assuming is_reg_const() is true, return constant value of a register */
4398 static u64 reg_const_value(struct bpf_reg_state *reg, bool subreg32)
4399 {
4400 	return subreg32 ? tnum_subreg(reg->var_off).value : reg->var_off.value;
4401 }
4402 
4403 static bool __is_pointer_value(bool allow_ptr_leaks,
4404 			       const struct bpf_reg_state *reg)
4405 {
4406 	if (allow_ptr_leaks)
4407 		return false;
4408 
4409 	return reg->type != SCALAR_VALUE;
4410 }
4411 
4412 static void assign_scalar_id_before_mov(struct bpf_verifier_env *env,
4413 					struct bpf_reg_state *src_reg)
4414 {
4415 	if (src_reg->type == SCALAR_VALUE && !src_reg->id &&
4416 	    !tnum_is_const(src_reg->var_off))
4417 		/* Ensure that src_reg has a valid ID that will be copied to
4418 		 * dst_reg and then will be used by find_equal_scalars() to
4419 		 * propagate min/max range.
4420 		 */
4421 		src_reg->id = ++env->id_gen;
4422 }
4423 
4424 /* Copy src state preserving dst->parent and dst->live fields */
4425 static void copy_register_state(struct bpf_reg_state *dst, const struct bpf_reg_state *src)
4426 {
4427 	struct bpf_reg_state *parent = dst->parent;
4428 	enum bpf_reg_liveness live = dst->live;
4429 
4430 	*dst = *src;
4431 	dst->parent = parent;
4432 	dst->live = live;
4433 }
4434 
4435 static void save_register_state(struct bpf_verifier_env *env,
4436 				struct bpf_func_state *state,
4437 				int spi, struct bpf_reg_state *reg,
4438 				int size)
4439 {
4440 	int i;
4441 
4442 	copy_register_state(&state->stack[spi].spilled_ptr, reg);
4443 	if (size == BPF_REG_SIZE)
4444 		state->stack[spi].spilled_ptr.live |= REG_LIVE_WRITTEN;
4445 
4446 	for (i = BPF_REG_SIZE; i > BPF_REG_SIZE - size; i--)
4447 		state->stack[spi].slot_type[i - 1] = STACK_SPILL;
4448 
4449 	/* size < 8 bytes spill */
4450 	for (; i; i--)
4451 		mark_stack_slot_misc(env, &state->stack[spi].slot_type[i - 1]);
4452 }
4453 
4454 static bool is_bpf_st_mem(struct bpf_insn *insn)
4455 {
4456 	return BPF_CLASS(insn->code) == BPF_ST && BPF_MODE(insn->code) == BPF_MEM;
4457 }
4458 
4459 static int get_reg_width(struct bpf_reg_state *reg)
4460 {
4461 	return fls64(reg->umax_value);
4462 }
4463 
4464 /* check_stack_{read,write}_fixed_off functions track spill/fill of registers,
4465  * stack boundary and alignment are checked in check_mem_access()
4466  */
4467 static int check_stack_write_fixed_off(struct bpf_verifier_env *env,
4468 				       /* stack frame we're writing to */
4469 				       struct bpf_func_state *state,
4470 				       int off, int size, int value_regno,
4471 				       int insn_idx)
4472 {
4473 	struct bpf_func_state *cur; /* state of the current function */
4474 	int i, slot = -off - 1, spi = slot / BPF_REG_SIZE, err;
4475 	struct bpf_insn *insn = &env->prog->insnsi[insn_idx];
4476 	struct bpf_reg_state *reg = NULL;
4477 	int insn_flags = insn_stack_access_flags(state->frameno, spi);
4478 
4479 	/* caller checked that off % size == 0 and -MAX_BPF_STACK <= off < 0,
4480 	 * so it's aligned access and [off, off + size) are within stack limits
4481 	 */
4482 	if (!env->allow_ptr_leaks &&
4483 	    is_spilled_reg(&state->stack[spi]) &&
4484 	    size != BPF_REG_SIZE) {
4485 		verbose(env, "attempt to corrupt spilled pointer on stack\n");
4486 		return -EACCES;
4487 	}
4488 
4489 	cur = env->cur_state->frame[env->cur_state->curframe];
4490 	if (value_regno >= 0)
4491 		reg = &cur->regs[value_regno];
4492 	if (!env->bypass_spec_v4) {
4493 		bool sanitize = reg && is_spillable_regtype(reg->type);
4494 
4495 		for (i = 0; i < size; i++) {
4496 			u8 type = state->stack[spi].slot_type[i];
4497 
4498 			if (type != STACK_MISC && type != STACK_ZERO) {
4499 				sanitize = true;
4500 				break;
4501 			}
4502 		}
4503 
4504 		if (sanitize)
4505 			env->insn_aux_data[insn_idx].sanitize_stack_spill = true;
4506 	}
4507 
4508 	err = destroy_if_dynptr_stack_slot(env, state, spi);
4509 	if (err)
4510 		return err;
4511 
4512 	mark_stack_slot_scratched(env, spi);
4513 	if (reg && !(off % BPF_REG_SIZE) && reg->type == SCALAR_VALUE && env->bpf_capable) {
4514 		bool reg_value_fits;
4515 
4516 		reg_value_fits = get_reg_width(reg) <= BITS_PER_BYTE * size;
4517 		/* Make sure that reg had an ID to build a relation on spill. */
4518 		if (reg_value_fits)
4519 			assign_scalar_id_before_mov(env, reg);
4520 		save_register_state(env, state, spi, reg, size);
4521 		/* Break the relation on a narrowing spill. */
4522 		if (!reg_value_fits)
4523 			state->stack[spi].spilled_ptr.id = 0;
4524 	} else if (!reg && !(off % BPF_REG_SIZE) && is_bpf_st_mem(insn) &&
4525 		   env->bpf_capable) {
4526 		struct bpf_reg_state fake_reg = {};
4527 
4528 		__mark_reg_known(&fake_reg, insn->imm);
4529 		fake_reg.type = SCALAR_VALUE;
4530 		save_register_state(env, state, spi, &fake_reg, size);
4531 	} else if (reg && is_spillable_regtype(reg->type)) {
4532 		/* register containing pointer is being spilled into stack */
4533 		if (size != BPF_REG_SIZE) {
4534 			verbose_linfo(env, insn_idx, "; ");
4535 			verbose(env, "invalid size of register spill\n");
4536 			return -EACCES;
4537 		}
4538 		if (state != cur && reg->type == PTR_TO_STACK) {
4539 			verbose(env, "cannot spill pointers to stack into stack frame of the caller\n");
4540 			return -EINVAL;
4541 		}
4542 		save_register_state(env, state, spi, reg, size);
4543 	} else {
4544 		u8 type = STACK_MISC;
4545 
4546 		/* regular write of data into stack destroys any spilled ptr */
4547 		state->stack[spi].spilled_ptr.type = NOT_INIT;
4548 		/* Mark slots as STACK_MISC if they belonged to spilled ptr/dynptr/iter. */
4549 		if (is_stack_slot_special(&state->stack[spi]))
4550 			for (i = 0; i < BPF_REG_SIZE; i++)
4551 				scrub_spilled_slot(&state->stack[spi].slot_type[i]);
4552 
4553 		/* only mark the slot as written if all 8 bytes were written
4554 		 * otherwise read propagation may incorrectly stop too soon
4555 		 * when stack slots are partially written.
4556 		 * This heuristic means that read propagation will be
4557 		 * conservative, since it will add reg_live_read marks
4558 		 * to stack slots all the way to first state when programs
4559 		 * writes+reads less than 8 bytes
4560 		 */
4561 		if (size == BPF_REG_SIZE)
4562 			state->stack[spi].spilled_ptr.live |= REG_LIVE_WRITTEN;
4563 
4564 		/* when we zero initialize stack slots mark them as such */
4565 		if ((reg && register_is_null(reg)) ||
4566 		    (!reg && is_bpf_st_mem(insn) && insn->imm == 0)) {
4567 			/* STACK_ZERO case happened because register spill
4568 			 * wasn't properly aligned at the stack slot boundary,
4569 			 * so it's not a register spill anymore; force
4570 			 * originating register to be precise to make
4571 			 * STACK_ZERO correct for subsequent states
4572 			 */
4573 			err = mark_chain_precision(env, value_regno);
4574 			if (err)
4575 				return err;
4576 			type = STACK_ZERO;
4577 		}
4578 
4579 		/* Mark slots affected by this stack write. */
4580 		for (i = 0; i < size; i++)
4581 			state->stack[spi].slot_type[(slot - i) % BPF_REG_SIZE] = type;
4582 		insn_flags = 0; /* not a register spill */
4583 	}
4584 
4585 	if (insn_flags)
4586 		return push_jmp_history(env, env->cur_state, insn_flags);
4587 	return 0;
4588 }
4589 
4590 /* Write the stack: 'stack[ptr_regno + off] = value_regno'. 'ptr_regno' is
4591  * known to contain a variable offset.
4592  * This function checks whether the write is permitted and conservatively
4593  * tracks the effects of the write, considering that each stack slot in the
4594  * dynamic range is potentially written to.
4595  *
4596  * 'off' includes 'regno->off'.
4597  * 'value_regno' can be -1, meaning that an unknown value is being written to
4598  * the stack.
4599  *
4600  * Spilled pointers in range are not marked as written because we don't know
4601  * what's going to be actually written. This means that read propagation for
4602  * future reads cannot be terminated by this write.
4603  *
4604  * For privileged programs, uninitialized stack slots are considered
4605  * initialized by this write (even though we don't know exactly what offsets
4606  * are going to be written to). The idea is that we don't want the verifier to
4607  * reject future reads that access slots written to through variable offsets.
4608  */
4609 static int check_stack_write_var_off(struct bpf_verifier_env *env,
4610 				     /* func where register points to */
4611 				     struct bpf_func_state *state,
4612 				     int ptr_regno, int off, int size,
4613 				     int value_regno, int insn_idx)
4614 {
4615 	struct bpf_func_state *cur; /* state of the current function */
4616 	int min_off, max_off;
4617 	int i, err;
4618 	struct bpf_reg_state *ptr_reg = NULL, *value_reg = NULL;
4619 	struct bpf_insn *insn = &env->prog->insnsi[insn_idx];
4620 	bool writing_zero = false;
4621 	/* set if the fact that we're writing a zero is used to let any
4622 	 * stack slots remain STACK_ZERO
4623 	 */
4624 	bool zero_used = false;
4625 
4626 	cur = env->cur_state->frame[env->cur_state->curframe];
4627 	ptr_reg = &cur->regs[ptr_regno];
4628 	min_off = ptr_reg->smin_value + off;
4629 	max_off = ptr_reg->smax_value + off + size;
4630 	if (value_regno >= 0)
4631 		value_reg = &cur->regs[value_regno];
4632 	if ((value_reg && register_is_null(value_reg)) ||
4633 	    (!value_reg && is_bpf_st_mem(insn) && insn->imm == 0))
4634 		writing_zero = true;
4635 
4636 	for (i = min_off; i < max_off; i++) {
4637 		int spi;
4638 
4639 		spi = __get_spi(i);
4640 		err = destroy_if_dynptr_stack_slot(env, state, spi);
4641 		if (err)
4642 			return err;
4643 	}
4644 
4645 	/* Variable offset writes destroy any spilled pointers in range. */
4646 	for (i = min_off; i < max_off; i++) {
4647 		u8 new_type, *stype;
4648 		int slot, spi;
4649 
4650 		slot = -i - 1;
4651 		spi = slot / BPF_REG_SIZE;
4652 		stype = &state->stack[spi].slot_type[slot % BPF_REG_SIZE];
4653 		mark_stack_slot_scratched(env, spi);
4654 
4655 		if (!env->allow_ptr_leaks && *stype != STACK_MISC && *stype != STACK_ZERO) {
4656 			/* Reject the write if range we may write to has not
4657 			 * been initialized beforehand. If we didn't reject
4658 			 * here, the ptr status would be erased below (even
4659 			 * though not all slots are actually overwritten),
4660 			 * possibly opening the door to leaks.
4661 			 *
4662 			 * We do however catch STACK_INVALID case below, and
4663 			 * only allow reading possibly uninitialized memory
4664 			 * later for CAP_PERFMON, as the write may not happen to
4665 			 * that slot.
4666 			 */
4667 			verbose(env, "spilled ptr in range of var-offset stack write; insn %d, ptr off: %d",
4668 				insn_idx, i);
4669 			return -EINVAL;
4670 		}
4671 
4672 		/* If writing_zero and the spi slot contains a spill of value 0,
4673 		 * maintain the spill type.
4674 		 */
4675 		if (writing_zero && *stype == STACK_SPILL &&
4676 		    is_spilled_scalar_reg(&state->stack[spi])) {
4677 			struct bpf_reg_state *spill_reg = &state->stack[spi].spilled_ptr;
4678 
4679 			if (tnum_is_const(spill_reg->var_off) && spill_reg->var_off.value == 0) {
4680 				zero_used = true;
4681 				continue;
4682 			}
4683 		}
4684 
4685 		/* Erase all other spilled pointers. */
4686 		state->stack[spi].spilled_ptr.type = NOT_INIT;
4687 
4688 		/* Update the slot type. */
4689 		new_type = STACK_MISC;
4690 		if (writing_zero && *stype == STACK_ZERO) {
4691 			new_type = STACK_ZERO;
4692 			zero_used = true;
4693 		}
4694 		/* If the slot is STACK_INVALID, we check whether it's OK to
4695 		 * pretend that it will be initialized by this write. The slot
4696 		 * might not actually be written to, and so if we mark it as
4697 		 * initialized future reads might leak uninitialized memory.
4698 		 * For privileged programs, we will accept such reads to slots
4699 		 * that may or may not be written because, if we're reject
4700 		 * them, the error would be too confusing.
4701 		 */
4702 		if (*stype == STACK_INVALID && !env->allow_uninit_stack) {
4703 			verbose(env, "uninit stack in range of var-offset write prohibited for !root; insn %d, off: %d",
4704 					insn_idx, i);
4705 			return -EINVAL;
4706 		}
4707 		*stype = new_type;
4708 	}
4709 	if (zero_used) {
4710 		/* backtracking doesn't work for STACK_ZERO yet. */
4711 		err = mark_chain_precision(env, value_regno);
4712 		if (err)
4713 			return err;
4714 	}
4715 	return 0;
4716 }
4717 
4718 /* When register 'dst_regno' is assigned some values from stack[min_off,
4719  * max_off), we set the register's type according to the types of the
4720  * respective stack slots. If all the stack values are known to be zeros, then
4721  * so is the destination reg. Otherwise, the register is considered to be
4722  * SCALAR. This function does not deal with register filling; the caller must
4723  * ensure that all spilled registers in the stack range have been marked as
4724  * read.
4725  */
4726 static void mark_reg_stack_read(struct bpf_verifier_env *env,
4727 				/* func where src register points to */
4728 				struct bpf_func_state *ptr_state,
4729 				int min_off, int max_off, int dst_regno)
4730 {
4731 	struct bpf_verifier_state *vstate = env->cur_state;
4732 	struct bpf_func_state *state = vstate->frame[vstate->curframe];
4733 	int i, slot, spi;
4734 	u8 *stype;
4735 	int zeros = 0;
4736 
4737 	for (i = min_off; i < max_off; i++) {
4738 		slot = -i - 1;
4739 		spi = slot / BPF_REG_SIZE;
4740 		mark_stack_slot_scratched(env, spi);
4741 		stype = ptr_state->stack[spi].slot_type;
4742 		if (stype[slot % BPF_REG_SIZE] != STACK_ZERO)
4743 			break;
4744 		zeros++;
4745 	}
4746 	if (zeros == max_off - min_off) {
4747 		/* Any access_size read into register is zero extended,
4748 		 * so the whole register == const_zero.
4749 		 */
4750 		__mark_reg_const_zero(env, &state->regs[dst_regno]);
4751 	} else {
4752 		/* have read misc data from the stack */
4753 		mark_reg_unknown(env, state->regs, dst_regno);
4754 	}
4755 	state->regs[dst_regno].live |= REG_LIVE_WRITTEN;
4756 }
4757 
4758 /* Read the stack at 'off' and put the results into the register indicated by
4759  * 'dst_regno'. It handles reg filling if the addressed stack slot is a
4760  * spilled reg.
4761  *
4762  * 'dst_regno' can be -1, meaning that the read value is not going to a
4763  * register.
4764  *
4765  * The access is assumed to be within the current stack bounds.
4766  */
4767 static int check_stack_read_fixed_off(struct bpf_verifier_env *env,
4768 				      /* func where src register points to */
4769 				      struct bpf_func_state *reg_state,
4770 				      int off, int size, int dst_regno)
4771 {
4772 	struct bpf_verifier_state *vstate = env->cur_state;
4773 	struct bpf_func_state *state = vstate->frame[vstate->curframe];
4774 	int i, slot = -off - 1, spi = slot / BPF_REG_SIZE;
4775 	struct bpf_reg_state *reg;
4776 	u8 *stype, type;
4777 	int insn_flags = insn_stack_access_flags(reg_state->frameno, spi);
4778 
4779 	stype = reg_state->stack[spi].slot_type;
4780 	reg = &reg_state->stack[spi].spilled_ptr;
4781 
4782 	mark_stack_slot_scratched(env, spi);
4783 
4784 	if (is_spilled_reg(&reg_state->stack[spi])) {
4785 		u8 spill_size = 1;
4786 
4787 		for (i = BPF_REG_SIZE - 1; i > 0 && stype[i - 1] == STACK_SPILL; i--)
4788 			spill_size++;
4789 
4790 		if (size != BPF_REG_SIZE || spill_size != BPF_REG_SIZE) {
4791 			if (reg->type != SCALAR_VALUE) {
4792 				verbose_linfo(env, env->insn_idx, "; ");
4793 				verbose(env, "invalid size of register fill\n");
4794 				return -EACCES;
4795 			}
4796 
4797 			mark_reg_read(env, reg, reg->parent, REG_LIVE_READ64);
4798 			if (dst_regno < 0)
4799 				return 0;
4800 
4801 			if (size <= spill_size &&
4802 			    bpf_stack_narrow_access_ok(off, size, spill_size)) {
4803 				/* The earlier check_reg_arg() has decided the
4804 				 * subreg_def for this insn.  Save it first.
4805 				 */
4806 				s32 subreg_def = state->regs[dst_regno].subreg_def;
4807 
4808 				copy_register_state(&state->regs[dst_regno], reg);
4809 				state->regs[dst_regno].subreg_def = subreg_def;
4810 
4811 				/* Break the relation on a narrowing fill.
4812 				 * coerce_reg_to_size will adjust the boundaries.
4813 				 */
4814 				if (get_reg_width(reg) > size * BITS_PER_BYTE)
4815 					state->regs[dst_regno].id = 0;
4816 			} else {
4817 				int spill_cnt = 0, zero_cnt = 0;
4818 
4819 				for (i = 0; i < size; i++) {
4820 					type = stype[(slot - i) % BPF_REG_SIZE];
4821 					if (type == STACK_SPILL) {
4822 						spill_cnt++;
4823 						continue;
4824 					}
4825 					if (type == STACK_MISC)
4826 						continue;
4827 					if (type == STACK_ZERO) {
4828 						zero_cnt++;
4829 						continue;
4830 					}
4831 					if (type == STACK_INVALID && env->allow_uninit_stack)
4832 						continue;
4833 					verbose(env, "invalid read from stack off %d+%d size %d\n",
4834 						off, i, size);
4835 					return -EACCES;
4836 				}
4837 
4838 				if (spill_cnt == size &&
4839 				    tnum_is_const(reg->var_off) && reg->var_off.value == 0) {
4840 					__mark_reg_const_zero(env, &state->regs[dst_regno]);
4841 					/* this IS register fill, so keep insn_flags */
4842 				} else if (zero_cnt == size) {
4843 					/* similarly to mark_reg_stack_read(), preserve zeroes */
4844 					__mark_reg_const_zero(env, &state->regs[dst_regno]);
4845 					insn_flags = 0; /* not restoring original register state */
4846 				} else {
4847 					mark_reg_unknown(env, state->regs, dst_regno);
4848 					insn_flags = 0; /* not restoring original register state */
4849 				}
4850 			}
4851 			state->regs[dst_regno].live |= REG_LIVE_WRITTEN;
4852 		} else if (dst_regno >= 0) {
4853 			/* restore register state from stack */
4854 			copy_register_state(&state->regs[dst_regno], reg);
4855 			/* mark reg as written since spilled pointer state likely
4856 			 * has its liveness marks cleared by is_state_visited()
4857 			 * which resets stack/reg liveness for state transitions
4858 			 */
4859 			state->regs[dst_regno].live |= REG_LIVE_WRITTEN;
4860 		} else if (__is_pointer_value(env->allow_ptr_leaks, reg)) {
4861 			/* If dst_regno==-1, the caller is asking us whether
4862 			 * it is acceptable to use this value as a SCALAR_VALUE
4863 			 * (e.g. for XADD).
4864 			 * We must not allow unprivileged callers to do that
4865 			 * with spilled pointers.
4866 			 */
4867 			verbose(env, "leaking pointer from stack off %d\n",
4868 				off);
4869 			return -EACCES;
4870 		}
4871 		mark_reg_read(env, reg, reg->parent, REG_LIVE_READ64);
4872 	} else {
4873 		for (i = 0; i < size; i++) {
4874 			type = stype[(slot - i) % BPF_REG_SIZE];
4875 			if (type == STACK_MISC)
4876 				continue;
4877 			if (type == STACK_ZERO)
4878 				continue;
4879 			if (type == STACK_INVALID && env->allow_uninit_stack)
4880 				continue;
4881 			verbose(env, "invalid read from stack off %d+%d size %d\n",
4882 				off, i, size);
4883 			return -EACCES;
4884 		}
4885 		mark_reg_read(env, reg, reg->parent, REG_LIVE_READ64);
4886 		if (dst_regno >= 0)
4887 			mark_reg_stack_read(env, reg_state, off, off + size, dst_regno);
4888 		insn_flags = 0; /* we are not restoring spilled register */
4889 	}
4890 	if (insn_flags)
4891 		return push_jmp_history(env, env->cur_state, insn_flags);
4892 	return 0;
4893 }
4894 
4895 enum bpf_access_src {
4896 	ACCESS_DIRECT = 1,  /* the access is performed by an instruction */
4897 	ACCESS_HELPER = 2,  /* the access is performed by a helper */
4898 };
4899 
4900 static int check_stack_range_initialized(struct bpf_verifier_env *env,
4901 					 int regno, int off, int access_size,
4902 					 bool zero_size_allowed,
4903 					 enum bpf_access_src type,
4904 					 struct bpf_call_arg_meta *meta);
4905 
4906 static struct bpf_reg_state *reg_state(struct bpf_verifier_env *env, int regno)
4907 {
4908 	return cur_regs(env) + regno;
4909 }
4910 
4911 /* Read the stack at 'ptr_regno + off' and put the result into the register
4912  * 'dst_regno'.
4913  * 'off' includes the pointer register's fixed offset(i.e. 'ptr_regno.off'),
4914  * but not its variable offset.
4915  * 'size' is assumed to be <= reg size and the access is assumed to be aligned.
4916  *
4917  * As opposed to check_stack_read_fixed_off, this function doesn't deal with
4918  * filling registers (i.e. reads of spilled register cannot be detected when
4919  * the offset is not fixed). We conservatively mark 'dst_regno' as containing
4920  * SCALAR_VALUE. That's why we assert that the 'ptr_regno' has a variable
4921  * offset; for a fixed offset check_stack_read_fixed_off should be used
4922  * instead.
4923  */
4924 static int check_stack_read_var_off(struct bpf_verifier_env *env,
4925 				    int ptr_regno, int off, int size, int dst_regno)
4926 {
4927 	/* The state of the source register. */
4928 	struct bpf_reg_state *reg = reg_state(env, ptr_regno);
4929 	struct bpf_func_state *ptr_state = func(env, reg);
4930 	int err;
4931 	int min_off, max_off;
4932 
4933 	/* Note that we pass a NULL meta, so raw access will not be permitted.
4934 	 */
4935 	err = check_stack_range_initialized(env, ptr_regno, off, size,
4936 					    false, ACCESS_DIRECT, NULL);
4937 	if (err)
4938 		return err;
4939 
4940 	min_off = reg->smin_value + off;
4941 	max_off = reg->smax_value + off;
4942 	mark_reg_stack_read(env, ptr_state, min_off, max_off + size, dst_regno);
4943 	return 0;
4944 }
4945 
4946 /* check_stack_read dispatches to check_stack_read_fixed_off or
4947  * check_stack_read_var_off.
4948  *
4949  * The caller must ensure that the offset falls within the allocated stack
4950  * bounds.
4951  *
4952  * 'dst_regno' is a register which will receive the value from the stack. It
4953  * can be -1, meaning that the read value is not going to a register.
4954  */
4955 static int check_stack_read(struct bpf_verifier_env *env,
4956 			    int ptr_regno, int off, int size,
4957 			    int dst_regno)
4958 {
4959 	struct bpf_reg_state *reg = reg_state(env, ptr_regno);
4960 	struct bpf_func_state *state = func(env, reg);
4961 	int err;
4962 	/* Some accesses are only permitted with a static offset. */
4963 	bool var_off = !tnum_is_const(reg->var_off);
4964 
4965 	/* The offset is required to be static when reads don't go to a
4966 	 * register, in order to not leak pointers (see
4967 	 * check_stack_read_fixed_off).
4968 	 */
4969 	if (dst_regno < 0 && var_off) {
4970 		char tn_buf[48];
4971 
4972 		tnum_strn(tn_buf, sizeof(tn_buf), reg->var_off);
4973 		verbose(env, "variable offset stack pointer cannot be passed into helper function; var_off=%s off=%d size=%d\n",
4974 			tn_buf, off, size);
4975 		return -EACCES;
4976 	}
4977 	/* Variable offset is prohibited for unprivileged mode for simplicity
4978 	 * since it requires corresponding support in Spectre masking for stack
4979 	 * ALU. See also retrieve_ptr_limit(). The check in
4980 	 * check_stack_access_for_ptr_arithmetic() called by
4981 	 * adjust_ptr_min_max_vals() prevents users from creating stack pointers
4982 	 * with variable offsets, therefore no check is required here. Further,
4983 	 * just checking it here would be insufficient as speculative stack
4984 	 * writes could still lead to unsafe speculative behaviour.
4985 	 */
4986 	if (!var_off) {
4987 		off += reg->var_off.value;
4988 		err = check_stack_read_fixed_off(env, state, off, size,
4989 						 dst_regno);
4990 	} else {
4991 		/* Variable offset stack reads need more conservative handling
4992 		 * than fixed offset ones. Note that dst_regno >= 0 on this
4993 		 * branch.
4994 		 */
4995 		err = check_stack_read_var_off(env, ptr_regno, off, size,
4996 					       dst_regno);
4997 	}
4998 	return err;
4999 }
5000 
5001 
5002 /* check_stack_write dispatches to check_stack_write_fixed_off or
5003  * check_stack_write_var_off.
5004  *
5005  * 'ptr_regno' is the register used as a pointer into the stack.
5006  * 'off' includes 'ptr_regno->off', but not its variable offset (if any).
5007  * 'value_regno' is the register whose value we're writing to the stack. It can
5008  * be -1, meaning that we're not writing from a register.
5009  *
5010  * The caller must ensure that the offset falls within the maximum stack size.
5011  */
5012 static int check_stack_write(struct bpf_verifier_env *env,
5013 			     int ptr_regno, int off, int size,
5014 			     int value_regno, int insn_idx)
5015 {
5016 	struct bpf_reg_state *reg = reg_state(env, ptr_regno);
5017 	struct bpf_func_state *state = func(env, reg);
5018 	int err;
5019 
5020 	if (tnum_is_const(reg->var_off)) {
5021 		off += reg->var_off.value;
5022 		err = check_stack_write_fixed_off(env, state, off, size,
5023 						  value_regno, insn_idx);
5024 	} else {
5025 		/* Variable offset stack reads need more conservative handling
5026 		 * than fixed offset ones.
5027 		 */
5028 		err = check_stack_write_var_off(env, state,
5029 						ptr_regno, off, size,
5030 						value_regno, insn_idx);
5031 	}
5032 	return err;
5033 }
5034 
5035 static int check_map_access_type(struct bpf_verifier_env *env, u32 regno,
5036 				 int off, int size, enum bpf_access_type type)
5037 {
5038 	struct bpf_reg_state *regs = cur_regs(env);
5039 	struct bpf_map *map = regs[regno].map_ptr;
5040 	u32 cap = bpf_map_flags_to_cap(map);
5041 
5042 	if (type == BPF_WRITE && !(cap & BPF_MAP_CAN_WRITE)) {
5043 		verbose(env, "write into map forbidden, value_size=%d off=%d size=%d\n",
5044 			map->value_size, off, size);
5045 		return -EACCES;
5046 	}
5047 
5048 	if (type == BPF_READ && !(cap & BPF_MAP_CAN_READ)) {
5049 		verbose(env, "read from map forbidden, value_size=%d off=%d size=%d\n",
5050 			map->value_size, off, size);
5051 		return -EACCES;
5052 	}
5053 
5054 	return 0;
5055 }
5056 
5057 /* check read/write into memory region (e.g., map value, ringbuf sample, etc) */
5058 static int __check_mem_access(struct bpf_verifier_env *env, int regno,
5059 			      int off, int size, u32 mem_size,
5060 			      bool zero_size_allowed)
5061 {
5062 	bool size_ok = size > 0 || (size == 0 && zero_size_allowed);
5063 	struct bpf_reg_state *reg;
5064 
5065 	if (off >= 0 && size_ok && (u64)off + size <= mem_size)
5066 		return 0;
5067 
5068 	reg = &cur_regs(env)[regno];
5069 	switch (reg->type) {
5070 	case PTR_TO_MAP_KEY:
5071 		verbose(env, "invalid access to map key, key_size=%d off=%d size=%d\n",
5072 			mem_size, off, size);
5073 		break;
5074 	case PTR_TO_MAP_VALUE:
5075 		verbose(env, "invalid access to map value, value_size=%d off=%d size=%d\n",
5076 			mem_size, off, size);
5077 		break;
5078 	case PTR_TO_PACKET:
5079 	case PTR_TO_PACKET_META:
5080 	case PTR_TO_PACKET_END:
5081 		verbose(env, "invalid access to packet, off=%d size=%d, R%d(id=%d,off=%d,r=%d)\n",
5082 			off, size, regno, reg->id, off, mem_size);
5083 		break;
5084 	case PTR_TO_MEM:
5085 	default:
5086 		verbose(env, "invalid access to memory, mem_size=%u off=%d size=%d\n",
5087 			mem_size, off, size);
5088 	}
5089 
5090 	return -EACCES;
5091 }
5092 
5093 /* check read/write into a memory region with possible variable offset */
5094 static int check_mem_region_access(struct bpf_verifier_env *env, u32 regno,
5095 				   int off, int size, u32 mem_size,
5096 				   bool zero_size_allowed)
5097 {
5098 	struct bpf_verifier_state *vstate = env->cur_state;
5099 	struct bpf_func_state *state = vstate->frame[vstate->curframe];
5100 	struct bpf_reg_state *reg = &state->regs[regno];
5101 	int err;
5102 
5103 	/* We may have adjusted the register pointing to memory region, so we
5104 	 * need to try adding each of min_value and max_value to off
5105 	 * to make sure our theoretical access will be safe.
5106 	 *
5107 	 * The minimum value is only important with signed
5108 	 * comparisons where we can't assume the floor of a
5109 	 * value is 0.  If we are using signed variables for our
5110 	 * index'es we need to make sure that whatever we use
5111 	 * will have a set floor within our range.
5112 	 */
5113 	if (reg->smin_value < 0 &&
5114 	    (reg->smin_value == S64_MIN ||
5115 	     (off + reg->smin_value != (s64)(s32)(off + reg->smin_value)) ||
5116 	      reg->smin_value + off < 0)) {
5117 		verbose(env, "R%d min value is negative, either use unsigned index or do a if (index >=0) check.\n",
5118 			regno);
5119 		return -EACCES;
5120 	}
5121 	err = __check_mem_access(env, regno, reg->smin_value + off, size,
5122 				 mem_size, zero_size_allowed);
5123 	if (err) {
5124 		verbose(env, "R%d min value is outside of the allowed memory range\n",
5125 			regno);
5126 		return err;
5127 	}
5128 
5129 	/* If we haven't set a max value then we need to bail since we can't be
5130 	 * sure we won't do bad things.
5131 	 * If reg->umax_value + off could overflow, treat that as unbounded too.
5132 	 */
5133 	if (reg->umax_value >= BPF_MAX_VAR_OFF) {
5134 		verbose(env, "R%d unbounded memory access, make sure to bounds check any such access\n",
5135 			regno);
5136 		return -EACCES;
5137 	}
5138 	err = __check_mem_access(env, regno, reg->umax_value + off, size,
5139 				 mem_size, zero_size_allowed);
5140 	if (err) {
5141 		verbose(env, "R%d max value is outside of the allowed memory range\n",
5142 			regno);
5143 		return err;
5144 	}
5145 
5146 	return 0;
5147 }
5148 
5149 static int __check_ptr_off_reg(struct bpf_verifier_env *env,
5150 			       const struct bpf_reg_state *reg, int regno,
5151 			       bool fixed_off_ok)
5152 {
5153 	/* Access to this pointer-typed register or passing it to a helper
5154 	 * is only allowed in its original, unmodified form.
5155 	 */
5156 
5157 	if (reg->off < 0) {
5158 		verbose(env, "negative offset %s ptr R%d off=%d disallowed\n",
5159 			reg_type_str(env, reg->type), regno, reg->off);
5160 		return -EACCES;
5161 	}
5162 
5163 	if (!fixed_off_ok && reg->off) {
5164 		verbose(env, "dereference of modified %s ptr R%d off=%d disallowed\n",
5165 			reg_type_str(env, reg->type), regno, reg->off);
5166 		return -EACCES;
5167 	}
5168 
5169 	if (!tnum_is_const(reg->var_off) || reg->var_off.value) {
5170 		char tn_buf[48];
5171 
5172 		tnum_strn(tn_buf, sizeof(tn_buf), reg->var_off);
5173 		verbose(env, "variable %s access var_off=%s disallowed\n",
5174 			reg_type_str(env, reg->type), tn_buf);
5175 		return -EACCES;
5176 	}
5177 
5178 	return 0;
5179 }
5180 
5181 static int check_ptr_off_reg(struct bpf_verifier_env *env,
5182 		             const struct bpf_reg_state *reg, int regno)
5183 {
5184 	return __check_ptr_off_reg(env, reg, regno, false);
5185 }
5186 
5187 static int map_kptr_match_type(struct bpf_verifier_env *env,
5188 			       struct btf_field *kptr_field,
5189 			       struct bpf_reg_state *reg, u32 regno)
5190 {
5191 	const char *targ_name = btf_type_name(kptr_field->kptr.btf, kptr_field->kptr.btf_id);
5192 	int perm_flags;
5193 	const char *reg_name = "";
5194 
5195 	if (btf_is_kernel(reg->btf)) {
5196 		perm_flags = PTR_MAYBE_NULL | PTR_TRUSTED | MEM_RCU;
5197 
5198 		/* Only unreferenced case accepts untrusted pointers */
5199 		if (kptr_field->type == BPF_KPTR_UNREF)
5200 			perm_flags |= PTR_UNTRUSTED;
5201 	} else {
5202 		perm_flags = PTR_MAYBE_NULL | MEM_ALLOC;
5203 		if (kptr_field->type == BPF_KPTR_PERCPU)
5204 			perm_flags |= MEM_PERCPU;
5205 	}
5206 
5207 	if (base_type(reg->type) != PTR_TO_BTF_ID || (type_flag(reg->type) & ~perm_flags))
5208 		goto bad_type;
5209 
5210 	/* We need to verify reg->type and reg->btf, before accessing reg->btf */
5211 	reg_name = btf_type_name(reg->btf, reg->btf_id);
5212 
5213 	/* For ref_ptr case, release function check should ensure we get one
5214 	 * referenced PTR_TO_BTF_ID, and that its fixed offset is 0. For the
5215 	 * normal store of unreferenced kptr, we must ensure var_off is zero.
5216 	 * Since ref_ptr cannot be accessed directly by BPF insns, checks for
5217 	 * reg->off and reg->ref_obj_id are not needed here.
5218 	 */
5219 	if (__check_ptr_off_reg(env, reg, regno, true))
5220 		return -EACCES;
5221 
5222 	/* A full type match is needed, as BTF can be vmlinux, module or prog BTF, and
5223 	 * we also need to take into account the reg->off.
5224 	 *
5225 	 * We want to support cases like:
5226 	 *
5227 	 * struct foo {
5228 	 *         struct bar br;
5229 	 *         struct baz bz;
5230 	 * };
5231 	 *
5232 	 * struct foo *v;
5233 	 * v = func();	      // PTR_TO_BTF_ID
5234 	 * val->foo = v;      // reg->off is zero, btf and btf_id match type
5235 	 * val->bar = &v->br; // reg->off is still zero, but we need to retry with
5236 	 *                    // first member type of struct after comparison fails
5237 	 * val->baz = &v->bz; // reg->off is non-zero, so struct needs to be walked
5238 	 *                    // to match type
5239 	 *
5240 	 * In the kptr_ref case, check_func_arg_reg_off already ensures reg->off
5241 	 * is zero. We must also ensure that btf_struct_ids_match does not walk
5242 	 * the struct to match type against first member of struct, i.e. reject
5243 	 * second case from above. Hence, when type is BPF_KPTR_REF, we set
5244 	 * strict mode to true for type match.
5245 	 */
5246 	if (!btf_struct_ids_match(&env->log, reg->btf, reg->btf_id, reg->off,
5247 				  kptr_field->kptr.btf, kptr_field->kptr.btf_id,
5248 				  kptr_field->type != BPF_KPTR_UNREF))
5249 		goto bad_type;
5250 	return 0;
5251 bad_type:
5252 	verbose(env, "invalid kptr access, R%d type=%s%s ", regno,
5253 		reg_type_str(env, reg->type), reg_name);
5254 	verbose(env, "expected=%s%s", reg_type_str(env, PTR_TO_BTF_ID), targ_name);
5255 	if (kptr_field->type == BPF_KPTR_UNREF)
5256 		verbose(env, " or %s%s\n", reg_type_str(env, PTR_TO_BTF_ID | PTR_UNTRUSTED),
5257 			targ_name);
5258 	else
5259 		verbose(env, "\n");
5260 	return -EINVAL;
5261 }
5262 
5263 static bool in_sleepable(struct bpf_verifier_env *env)
5264 {
5265 	return env->prog->aux->sleepable;
5266 }
5267 
5268 /* The non-sleepable programs and sleepable programs with explicit bpf_rcu_read_lock()
5269  * can dereference RCU protected pointers and result is PTR_TRUSTED.
5270  */
5271 static bool in_rcu_cs(struct bpf_verifier_env *env)
5272 {
5273 	return env->cur_state->active_rcu_lock ||
5274 	       env->cur_state->active_lock.ptr ||
5275 	       !in_sleepable(env);
5276 }
5277 
5278 /* Once GCC supports btf_type_tag the following mechanism will be replaced with tag check */
5279 BTF_SET_START(rcu_protected_types)
5280 BTF_ID(struct, prog_test_ref_kfunc)
5281 #ifdef CONFIG_CGROUPS
5282 BTF_ID(struct, cgroup)
5283 #endif
5284 #ifdef CONFIG_BPF_JIT
5285 BTF_ID(struct, bpf_cpumask)
5286 #endif
5287 BTF_ID(struct, task_struct)
5288 BTF_SET_END(rcu_protected_types)
5289 
5290 static bool rcu_protected_object(const struct btf *btf, u32 btf_id)
5291 {
5292 	if (!btf_is_kernel(btf))
5293 		return true;
5294 	return btf_id_set_contains(&rcu_protected_types, btf_id);
5295 }
5296 
5297 static struct btf_record *kptr_pointee_btf_record(struct btf_field *kptr_field)
5298 {
5299 	struct btf_struct_meta *meta;
5300 
5301 	if (btf_is_kernel(kptr_field->kptr.btf))
5302 		return NULL;
5303 
5304 	meta = btf_find_struct_meta(kptr_field->kptr.btf,
5305 				    kptr_field->kptr.btf_id);
5306 
5307 	return meta ? meta->record : NULL;
5308 }
5309 
5310 static bool rcu_safe_kptr(const struct btf_field *field)
5311 {
5312 	const struct btf_field_kptr *kptr = &field->kptr;
5313 
5314 	return field->type == BPF_KPTR_PERCPU ||
5315 	       (field->type == BPF_KPTR_REF && rcu_protected_object(kptr->btf, kptr->btf_id));
5316 }
5317 
5318 static u32 btf_ld_kptr_type(struct bpf_verifier_env *env, struct btf_field *kptr_field)
5319 {
5320 	struct btf_record *rec;
5321 	u32 ret;
5322 
5323 	ret = PTR_MAYBE_NULL;
5324 	if (rcu_safe_kptr(kptr_field) && in_rcu_cs(env)) {
5325 		ret |= MEM_RCU;
5326 		if (kptr_field->type == BPF_KPTR_PERCPU)
5327 			ret |= MEM_PERCPU;
5328 		else if (!btf_is_kernel(kptr_field->kptr.btf))
5329 			ret |= MEM_ALLOC;
5330 
5331 		rec = kptr_pointee_btf_record(kptr_field);
5332 		if (rec && btf_record_has_field(rec, BPF_GRAPH_NODE))
5333 			ret |= NON_OWN_REF;
5334 	} else {
5335 		ret |= PTR_UNTRUSTED;
5336 	}
5337 
5338 	return ret;
5339 }
5340 
5341 static int check_map_kptr_access(struct bpf_verifier_env *env, u32 regno,
5342 				 int value_regno, int insn_idx,
5343 				 struct btf_field *kptr_field)
5344 {
5345 	struct bpf_insn *insn = &env->prog->insnsi[insn_idx];
5346 	int class = BPF_CLASS(insn->code);
5347 	struct bpf_reg_state *val_reg;
5348 
5349 	/* Things we already checked for in check_map_access and caller:
5350 	 *  - Reject cases where variable offset may touch kptr
5351 	 *  - size of access (must be BPF_DW)
5352 	 *  - tnum_is_const(reg->var_off)
5353 	 *  - kptr_field->offset == off + reg->var_off.value
5354 	 */
5355 	/* Only BPF_[LDX,STX,ST] | BPF_MEM | BPF_DW is supported */
5356 	if (BPF_MODE(insn->code) != BPF_MEM) {
5357 		verbose(env, "kptr in map can only be accessed using BPF_MEM instruction mode\n");
5358 		return -EACCES;
5359 	}
5360 
5361 	/* We only allow loading referenced kptr, since it will be marked as
5362 	 * untrusted, similar to unreferenced kptr.
5363 	 */
5364 	if (class != BPF_LDX &&
5365 	    (kptr_field->type == BPF_KPTR_REF || kptr_field->type == BPF_KPTR_PERCPU)) {
5366 		verbose(env, "store to referenced kptr disallowed\n");
5367 		return -EACCES;
5368 	}
5369 
5370 	if (class == BPF_LDX) {
5371 		val_reg = reg_state(env, value_regno);
5372 		/* We can simply mark the value_regno receiving the pointer
5373 		 * value from map as PTR_TO_BTF_ID, with the correct type.
5374 		 */
5375 		mark_btf_ld_reg(env, cur_regs(env), value_regno, PTR_TO_BTF_ID, kptr_field->kptr.btf,
5376 				kptr_field->kptr.btf_id, btf_ld_kptr_type(env, kptr_field));
5377 		/* For mark_ptr_or_null_reg */
5378 		val_reg->id = ++env->id_gen;
5379 	} else if (class == BPF_STX) {
5380 		val_reg = reg_state(env, value_regno);
5381 		if (!register_is_null(val_reg) &&
5382 		    map_kptr_match_type(env, kptr_field, val_reg, value_regno))
5383 			return -EACCES;
5384 	} else if (class == BPF_ST) {
5385 		if (insn->imm) {
5386 			verbose(env, "BPF_ST imm must be 0 when storing to kptr at off=%u\n",
5387 				kptr_field->offset);
5388 			return -EACCES;
5389 		}
5390 	} else {
5391 		verbose(env, "kptr in map can only be accessed using BPF_LDX/BPF_STX/BPF_ST\n");
5392 		return -EACCES;
5393 	}
5394 	return 0;
5395 }
5396 
5397 /* check read/write into a map element with possible variable offset */
5398 static int check_map_access(struct bpf_verifier_env *env, u32 regno,
5399 			    int off, int size, bool zero_size_allowed,
5400 			    enum bpf_access_src src)
5401 {
5402 	struct bpf_verifier_state *vstate = env->cur_state;
5403 	struct bpf_func_state *state = vstate->frame[vstate->curframe];
5404 	struct bpf_reg_state *reg = &state->regs[regno];
5405 	struct bpf_map *map = reg->map_ptr;
5406 	struct btf_record *rec;
5407 	int err, i;
5408 
5409 	err = check_mem_region_access(env, regno, off, size, map->value_size,
5410 				      zero_size_allowed);
5411 	if (err)
5412 		return err;
5413 
5414 	if (IS_ERR_OR_NULL(map->record))
5415 		return 0;
5416 	rec = map->record;
5417 	for (i = 0; i < rec->cnt; i++) {
5418 		struct btf_field *field = &rec->fields[i];
5419 		u32 p = field->offset;
5420 
5421 		/* If any part of a field  can be touched by load/store, reject
5422 		 * this program. To check that [x1, x2) overlaps with [y1, y2),
5423 		 * it is sufficient to check x1 < y2 && y1 < x2.
5424 		 */
5425 		if (reg->smin_value + off < p + btf_field_type_size(field->type) &&
5426 		    p < reg->umax_value + off + size) {
5427 			switch (field->type) {
5428 			case BPF_KPTR_UNREF:
5429 			case BPF_KPTR_REF:
5430 			case BPF_KPTR_PERCPU:
5431 				if (src != ACCESS_DIRECT) {
5432 					verbose(env, "kptr cannot be accessed indirectly by helper\n");
5433 					return -EACCES;
5434 				}
5435 				if (!tnum_is_const(reg->var_off)) {
5436 					verbose(env, "kptr access cannot have variable offset\n");
5437 					return -EACCES;
5438 				}
5439 				if (p != off + reg->var_off.value) {
5440 					verbose(env, "kptr access misaligned expected=%u off=%llu\n",
5441 						p, off + reg->var_off.value);
5442 					return -EACCES;
5443 				}
5444 				if (size != bpf_size_to_bytes(BPF_DW)) {
5445 					verbose(env, "kptr access size must be BPF_DW\n");
5446 					return -EACCES;
5447 				}
5448 				break;
5449 			default:
5450 				verbose(env, "%s cannot be accessed directly by load/store\n",
5451 					btf_field_type_name(field->type));
5452 				return -EACCES;
5453 			}
5454 		}
5455 	}
5456 	return 0;
5457 }
5458 
5459 #define MAX_PACKET_OFF 0xffff
5460 
5461 static bool may_access_direct_pkt_data(struct bpf_verifier_env *env,
5462 				       const struct bpf_call_arg_meta *meta,
5463 				       enum bpf_access_type t)
5464 {
5465 	enum bpf_prog_type prog_type = resolve_prog_type(env->prog);
5466 
5467 	switch (prog_type) {
5468 	/* Program types only with direct read access go here! */
5469 	case BPF_PROG_TYPE_LWT_IN:
5470 	case BPF_PROG_TYPE_LWT_OUT:
5471 	case BPF_PROG_TYPE_LWT_SEG6LOCAL:
5472 	case BPF_PROG_TYPE_SK_REUSEPORT:
5473 	case BPF_PROG_TYPE_FLOW_DISSECTOR:
5474 	case BPF_PROG_TYPE_CGROUP_SKB:
5475 		if (t == BPF_WRITE)
5476 			return false;
5477 		fallthrough;
5478 
5479 	/* Program types with direct read + write access go here! */
5480 	case BPF_PROG_TYPE_SCHED_CLS:
5481 	case BPF_PROG_TYPE_SCHED_ACT:
5482 	case BPF_PROG_TYPE_XDP:
5483 	case BPF_PROG_TYPE_LWT_XMIT:
5484 	case BPF_PROG_TYPE_SK_SKB:
5485 	case BPF_PROG_TYPE_SK_MSG:
5486 		if (meta)
5487 			return meta->pkt_access;
5488 
5489 		env->seen_direct_write = true;
5490 		return true;
5491 
5492 	case BPF_PROG_TYPE_CGROUP_SOCKOPT:
5493 		if (t == BPF_WRITE)
5494 			env->seen_direct_write = true;
5495 
5496 		return true;
5497 
5498 	default:
5499 		return false;
5500 	}
5501 }
5502 
5503 static int check_packet_access(struct bpf_verifier_env *env, u32 regno, int off,
5504 			       int size, bool zero_size_allowed)
5505 {
5506 	struct bpf_reg_state *regs = cur_regs(env);
5507 	struct bpf_reg_state *reg = &regs[regno];
5508 	int err;
5509 
5510 	/* We may have added a variable offset to the packet pointer; but any
5511 	 * reg->range we have comes after that.  We are only checking the fixed
5512 	 * offset.
5513 	 */
5514 
5515 	/* We don't allow negative numbers, because we aren't tracking enough
5516 	 * detail to prove they're safe.
5517 	 */
5518 	if (reg->smin_value < 0) {
5519 		verbose(env, "R%d min value is negative, either use unsigned index or do a if (index >=0) check.\n",
5520 			regno);
5521 		return -EACCES;
5522 	}
5523 
5524 	err = reg->range < 0 ? -EINVAL :
5525 	      __check_mem_access(env, regno, off, size, reg->range,
5526 				 zero_size_allowed);
5527 	if (err) {
5528 		verbose(env, "R%d offset is outside of the packet\n", regno);
5529 		return err;
5530 	}
5531 
5532 	/* __check_mem_access has made sure "off + size - 1" is within u16.
5533 	 * reg->umax_value can't be bigger than MAX_PACKET_OFF which is 0xffff,
5534 	 * otherwise find_good_pkt_pointers would have refused to set range info
5535 	 * that __check_mem_access would have rejected this pkt access.
5536 	 * Therefore, "off + reg->umax_value + size - 1" won't overflow u32.
5537 	 */
5538 	env->prog->aux->max_pkt_offset =
5539 		max_t(u32, env->prog->aux->max_pkt_offset,
5540 		      off + reg->umax_value + size - 1);
5541 
5542 	return err;
5543 }
5544 
5545 /* check access to 'struct bpf_context' fields.  Supports fixed offsets only */
5546 static int check_ctx_access(struct bpf_verifier_env *env, int insn_idx, int off, int size,
5547 			    enum bpf_access_type t, enum bpf_reg_type *reg_type,
5548 			    struct btf **btf, u32 *btf_id)
5549 {
5550 	struct bpf_insn_access_aux info = {
5551 		.reg_type = *reg_type,
5552 		.log = &env->log,
5553 	};
5554 
5555 	if (env->ops->is_valid_access &&
5556 	    env->ops->is_valid_access(off, size, t, env->prog, &info)) {
5557 		/* A non zero info.ctx_field_size indicates that this field is a
5558 		 * candidate for later verifier transformation to load the whole
5559 		 * field and then apply a mask when accessed with a narrower
5560 		 * access than actual ctx access size. A zero info.ctx_field_size
5561 		 * will only allow for whole field access and rejects any other
5562 		 * type of narrower access.
5563 		 */
5564 		*reg_type = info.reg_type;
5565 
5566 		if (base_type(*reg_type) == PTR_TO_BTF_ID) {
5567 			*btf = info.btf;
5568 			*btf_id = info.btf_id;
5569 		} else {
5570 			env->insn_aux_data[insn_idx].ctx_field_size = info.ctx_field_size;
5571 		}
5572 		/* remember the offset of last byte accessed in ctx */
5573 		if (env->prog->aux->max_ctx_offset < off + size)
5574 			env->prog->aux->max_ctx_offset = off + size;
5575 		return 0;
5576 	}
5577 
5578 	verbose(env, "invalid bpf_context access off=%d size=%d\n", off, size);
5579 	return -EACCES;
5580 }
5581 
5582 static int check_flow_keys_access(struct bpf_verifier_env *env, int off,
5583 				  int size)
5584 {
5585 	if (size < 0 || off < 0 ||
5586 	    (u64)off + size > sizeof(struct bpf_flow_keys)) {
5587 		verbose(env, "invalid access to flow keys off=%d size=%d\n",
5588 			off, size);
5589 		return -EACCES;
5590 	}
5591 	return 0;
5592 }
5593 
5594 static int check_sock_access(struct bpf_verifier_env *env, int insn_idx,
5595 			     u32 regno, int off, int size,
5596 			     enum bpf_access_type t)
5597 {
5598 	struct bpf_reg_state *regs = cur_regs(env);
5599 	struct bpf_reg_state *reg = &regs[regno];
5600 	struct bpf_insn_access_aux info = {};
5601 	bool valid;
5602 
5603 	if (reg->smin_value < 0) {
5604 		verbose(env, "R%d min value is negative, either use unsigned index or do a if (index >=0) check.\n",
5605 			regno);
5606 		return -EACCES;
5607 	}
5608 
5609 	switch (reg->type) {
5610 	case PTR_TO_SOCK_COMMON:
5611 		valid = bpf_sock_common_is_valid_access(off, size, t, &info);
5612 		break;
5613 	case PTR_TO_SOCKET:
5614 		valid = bpf_sock_is_valid_access(off, size, t, &info);
5615 		break;
5616 	case PTR_TO_TCP_SOCK:
5617 		valid = bpf_tcp_sock_is_valid_access(off, size, t, &info);
5618 		break;
5619 	case PTR_TO_XDP_SOCK:
5620 		valid = bpf_xdp_sock_is_valid_access(off, size, t, &info);
5621 		break;
5622 	default:
5623 		valid = false;
5624 	}
5625 
5626 
5627 	if (valid) {
5628 		env->insn_aux_data[insn_idx].ctx_field_size =
5629 			info.ctx_field_size;
5630 		return 0;
5631 	}
5632 
5633 	verbose(env, "R%d invalid %s access off=%d size=%d\n",
5634 		regno, reg_type_str(env, reg->type), off, size);
5635 
5636 	return -EACCES;
5637 }
5638 
5639 static bool is_pointer_value(struct bpf_verifier_env *env, int regno)
5640 {
5641 	return __is_pointer_value(env->allow_ptr_leaks, reg_state(env, regno));
5642 }
5643 
5644 static bool is_ctx_reg(struct bpf_verifier_env *env, int regno)
5645 {
5646 	const struct bpf_reg_state *reg = reg_state(env, regno);
5647 
5648 	return reg->type == PTR_TO_CTX;
5649 }
5650 
5651 static bool is_sk_reg(struct bpf_verifier_env *env, int regno)
5652 {
5653 	const struct bpf_reg_state *reg = reg_state(env, regno);
5654 
5655 	return type_is_sk_pointer(reg->type);
5656 }
5657 
5658 static bool is_pkt_reg(struct bpf_verifier_env *env, int regno)
5659 {
5660 	const struct bpf_reg_state *reg = reg_state(env, regno);
5661 
5662 	return type_is_pkt_pointer(reg->type);
5663 }
5664 
5665 static bool is_flow_key_reg(struct bpf_verifier_env *env, int regno)
5666 {
5667 	const struct bpf_reg_state *reg = reg_state(env, regno);
5668 
5669 	/* Separate to is_ctx_reg() since we still want to allow BPF_ST here. */
5670 	return reg->type == PTR_TO_FLOW_KEYS;
5671 }
5672 
5673 static u32 *reg2btf_ids[__BPF_REG_TYPE_MAX] = {
5674 #ifdef CONFIG_NET
5675 	[PTR_TO_SOCKET] = &btf_sock_ids[BTF_SOCK_TYPE_SOCK],
5676 	[PTR_TO_SOCK_COMMON] = &btf_sock_ids[BTF_SOCK_TYPE_SOCK_COMMON],
5677 	[PTR_TO_TCP_SOCK] = &btf_sock_ids[BTF_SOCK_TYPE_TCP],
5678 #endif
5679 	[CONST_PTR_TO_MAP] = btf_bpf_map_id,
5680 };
5681 
5682 static bool is_trusted_reg(const struct bpf_reg_state *reg)
5683 {
5684 	/* A referenced register is always trusted. */
5685 	if (reg->ref_obj_id)
5686 		return true;
5687 
5688 	/* Types listed in the reg2btf_ids are always trusted */
5689 	if (reg2btf_ids[base_type(reg->type)])
5690 		return true;
5691 
5692 	/* If a register is not referenced, it is trusted if it has the
5693 	 * MEM_ALLOC or PTR_TRUSTED type modifiers, and no others. Some of the
5694 	 * other type modifiers may be safe, but we elect to take an opt-in
5695 	 * approach here as some (e.g. PTR_UNTRUSTED and PTR_MAYBE_NULL) are
5696 	 * not.
5697 	 *
5698 	 * Eventually, we should make PTR_TRUSTED the single source of truth
5699 	 * for whether a register is trusted.
5700 	 */
5701 	return type_flag(reg->type) & BPF_REG_TRUSTED_MODIFIERS &&
5702 	       !bpf_type_has_unsafe_modifiers(reg->type);
5703 }
5704 
5705 static bool is_rcu_reg(const struct bpf_reg_state *reg)
5706 {
5707 	return reg->type & MEM_RCU;
5708 }
5709 
5710 static void clear_trusted_flags(enum bpf_type_flag *flag)
5711 {
5712 	*flag &= ~(BPF_REG_TRUSTED_MODIFIERS | MEM_RCU);
5713 }
5714 
5715 static int check_pkt_ptr_alignment(struct bpf_verifier_env *env,
5716 				   const struct bpf_reg_state *reg,
5717 				   int off, int size, bool strict)
5718 {
5719 	struct tnum reg_off;
5720 	int ip_align;
5721 
5722 	/* Byte size accesses are always allowed. */
5723 	if (!strict || size == 1)
5724 		return 0;
5725 
5726 	/* For platforms that do not have a Kconfig enabling
5727 	 * CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS the value of
5728 	 * NET_IP_ALIGN is universally set to '2'.  And on platforms
5729 	 * that do set CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS, we get
5730 	 * to this code only in strict mode where we want to emulate
5731 	 * the NET_IP_ALIGN==2 checking.  Therefore use an
5732 	 * unconditional IP align value of '2'.
5733 	 */
5734 	ip_align = 2;
5735 
5736 	reg_off = tnum_add(reg->var_off, tnum_const(ip_align + reg->off + off));
5737 	if (!tnum_is_aligned(reg_off, size)) {
5738 		char tn_buf[48];
5739 
5740 		tnum_strn(tn_buf, sizeof(tn_buf), reg->var_off);
5741 		verbose(env,
5742 			"misaligned packet access off %d+%s+%d+%d size %d\n",
5743 			ip_align, tn_buf, reg->off, off, size);
5744 		return -EACCES;
5745 	}
5746 
5747 	return 0;
5748 }
5749 
5750 static int check_generic_ptr_alignment(struct bpf_verifier_env *env,
5751 				       const struct bpf_reg_state *reg,
5752 				       const char *pointer_desc,
5753 				       int off, int size, bool strict)
5754 {
5755 	struct tnum reg_off;
5756 
5757 	/* Byte size accesses are always allowed. */
5758 	if (!strict || size == 1)
5759 		return 0;
5760 
5761 	reg_off = tnum_add(reg->var_off, tnum_const(reg->off + off));
5762 	if (!tnum_is_aligned(reg_off, size)) {
5763 		char tn_buf[48];
5764 
5765 		tnum_strn(tn_buf, sizeof(tn_buf), reg->var_off);
5766 		verbose(env, "misaligned %saccess off %s+%d+%d size %d\n",
5767 			pointer_desc, tn_buf, reg->off, off, size);
5768 		return -EACCES;
5769 	}
5770 
5771 	return 0;
5772 }
5773 
5774 static int check_ptr_alignment(struct bpf_verifier_env *env,
5775 			       const struct bpf_reg_state *reg, int off,
5776 			       int size, bool strict_alignment_once)
5777 {
5778 	bool strict = env->strict_alignment || strict_alignment_once;
5779 	const char *pointer_desc = "";
5780 
5781 	switch (reg->type) {
5782 	case PTR_TO_PACKET:
5783 	case PTR_TO_PACKET_META:
5784 		/* Special case, because of NET_IP_ALIGN. Given metadata sits
5785 		 * right in front, treat it the very same way.
5786 		 */
5787 		return check_pkt_ptr_alignment(env, reg, off, size, strict);
5788 	case PTR_TO_FLOW_KEYS:
5789 		pointer_desc = "flow keys ";
5790 		break;
5791 	case PTR_TO_MAP_KEY:
5792 		pointer_desc = "key ";
5793 		break;
5794 	case PTR_TO_MAP_VALUE:
5795 		pointer_desc = "value ";
5796 		break;
5797 	case PTR_TO_CTX:
5798 		pointer_desc = "context ";
5799 		break;
5800 	case PTR_TO_STACK:
5801 		pointer_desc = "stack ";
5802 		/* The stack spill tracking logic in check_stack_write_fixed_off()
5803 		 * and check_stack_read_fixed_off() relies on stack accesses being
5804 		 * aligned.
5805 		 */
5806 		strict = true;
5807 		break;
5808 	case PTR_TO_SOCKET:
5809 		pointer_desc = "sock ";
5810 		break;
5811 	case PTR_TO_SOCK_COMMON:
5812 		pointer_desc = "sock_common ";
5813 		break;
5814 	case PTR_TO_TCP_SOCK:
5815 		pointer_desc = "tcp_sock ";
5816 		break;
5817 	case PTR_TO_XDP_SOCK:
5818 		pointer_desc = "xdp_sock ";
5819 		break;
5820 	default:
5821 		break;
5822 	}
5823 	return check_generic_ptr_alignment(env, reg, pointer_desc, off, size,
5824 					   strict);
5825 }
5826 
5827 static int round_up_stack_depth(struct bpf_verifier_env *env, int stack_depth)
5828 {
5829 	if (env->prog->jit_requested)
5830 		return round_up(stack_depth, 16);
5831 
5832 	/* round up to 32-bytes, since this is granularity
5833 	 * of interpreter stack size
5834 	 */
5835 	return round_up(max_t(u32, stack_depth, 1), 32);
5836 }
5837 
5838 /* starting from main bpf function walk all instructions of the function
5839  * and recursively walk all callees that given function can call.
5840  * Ignore jump and exit insns.
5841  * Since recursion is prevented by check_cfg() this algorithm
5842  * only needs a local stack of MAX_CALL_FRAMES to remember callsites
5843  */
5844 static int check_max_stack_depth_subprog(struct bpf_verifier_env *env, int idx)
5845 {
5846 	struct bpf_subprog_info *subprog = env->subprog_info;
5847 	struct bpf_insn *insn = env->prog->insnsi;
5848 	int depth = 0, frame = 0, i, subprog_end;
5849 	bool tail_call_reachable = false;
5850 	int ret_insn[MAX_CALL_FRAMES];
5851 	int ret_prog[MAX_CALL_FRAMES];
5852 	int j;
5853 
5854 	i = subprog[idx].start;
5855 process_func:
5856 	/* protect against potential stack overflow that might happen when
5857 	 * bpf2bpf calls get combined with tailcalls. Limit the caller's stack
5858 	 * depth for such case down to 256 so that the worst case scenario
5859 	 * would result in 8k stack size (32 which is tailcall limit * 256 =
5860 	 * 8k).
5861 	 *
5862 	 * To get the idea what might happen, see an example:
5863 	 * func1 -> sub rsp, 128
5864 	 *  subfunc1 -> sub rsp, 256
5865 	 *  tailcall1 -> add rsp, 256
5866 	 *   func2 -> sub rsp, 192 (total stack size = 128 + 192 = 320)
5867 	 *   subfunc2 -> sub rsp, 64
5868 	 *   subfunc22 -> sub rsp, 128
5869 	 *   tailcall2 -> add rsp, 128
5870 	 *    func3 -> sub rsp, 32 (total stack size 128 + 192 + 64 + 32 = 416)
5871 	 *
5872 	 * tailcall will unwind the current stack frame but it will not get rid
5873 	 * of caller's stack as shown on the example above.
5874 	 */
5875 	if (idx && subprog[idx].has_tail_call && depth >= 256) {
5876 		verbose(env,
5877 			"tail_calls are not allowed when call stack of previous frames is %d bytes. Too large\n",
5878 			depth);
5879 		return -EACCES;
5880 	}
5881 	depth += round_up_stack_depth(env, subprog[idx].stack_depth);
5882 	if (depth > MAX_BPF_STACK) {
5883 		verbose(env, "combined stack size of %d calls is %d. Too large\n",
5884 			frame + 1, depth);
5885 		return -EACCES;
5886 	}
5887 continue_func:
5888 	subprog_end = subprog[idx + 1].start;
5889 	for (; i < subprog_end; i++) {
5890 		int next_insn, sidx;
5891 
5892 		if (bpf_pseudo_kfunc_call(insn + i) && !insn[i].off) {
5893 			bool err = false;
5894 
5895 			if (!is_bpf_throw_kfunc(insn + i))
5896 				continue;
5897 			if (subprog[idx].is_cb)
5898 				err = true;
5899 			for (int c = 0; c < frame && !err; c++) {
5900 				if (subprog[ret_prog[c]].is_cb) {
5901 					err = true;
5902 					break;
5903 				}
5904 			}
5905 			if (!err)
5906 				continue;
5907 			verbose(env,
5908 				"bpf_throw kfunc (insn %d) cannot be called from callback subprog %d\n",
5909 				i, idx);
5910 			return -EINVAL;
5911 		}
5912 
5913 		if (!bpf_pseudo_call(insn + i) && !bpf_pseudo_func(insn + i))
5914 			continue;
5915 		/* remember insn and function to return to */
5916 		ret_insn[frame] = i + 1;
5917 		ret_prog[frame] = idx;
5918 
5919 		/* find the callee */
5920 		next_insn = i + insn[i].imm + 1;
5921 		sidx = find_subprog(env, next_insn);
5922 		if (sidx < 0) {
5923 			WARN_ONCE(1, "verifier bug. No program starts at insn %d\n",
5924 				  next_insn);
5925 			return -EFAULT;
5926 		}
5927 		if (subprog[sidx].is_async_cb) {
5928 			if (subprog[sidx].has_tail_call) {
5929 				verbose(env, "verifier bug. subprog has tail_call and async cb\n");
5930 				return -EFAULT;
5931 			}
5932 			/* async callbacks don't increase bpf prog stack size unless called directly */
5933 			if (!bpf_pseudo_call(insn + i))
5934 				continue;
5935 			if (subprog[sidx].is_exception_cb) {
5936 				verbose(env, "insn %d cannot call exception cb directly\n", i);
5937 				return -EINVAL;
5938 			}
5939 		}
5940 		i = next_insn;
5941 		idx = sidx;
5942 
5943 		if (subprog[idx].has_tail_call)
5944 			tail_call_reachable = true;
5945 
5946 		frame++;
5947 		if (frame >= MAX_CALL_FRAMES) {
5948 			verbose(env, "the call stack of %d frames is too deep !\n",
5949 				frame);
5950 			return -E2BIG;
5951 		}
5952 		goto process_func;
5953 	}
5954 	/* if tail call got detected across bpf2bpf calls then mark each of the
5955 	 * currently present subprog frames as tail call reachable subprogs;
5956 	 * this info will be utilized by JIT so that we will be preserving the
5957 	 * tail call counter throughout bpf2bpf calls combined with tailcalls
5958 	 */
5959 	if (tail_call_reachable)
5960 		for (j = 0; j < frame; j++) {
5961 			if (subprog[ret_prog[j]].is_exception_cb) {
5962 				verbose(env, "cannot tail call within exception cb\n");
5963 				return -EINVAL;
5964 			}
5965 			subprog[ret_prog[j]].tail_call_reachable = true;
5966 		}
5967 	if (subprog[0].tail_call_reachable)
5968 		env->prog->aux->tail_call_reachable = true;
5969 
5970 	/* end of for() loop means the last insn of the 'subprog'
5971 	 * was reached. Doesn't matter whether it was JA or EXIT
5972 	 */
5973 	if (frame == 0)
5974 		return 0;
5975 	depth -= round_up_stack_depth(env, subprog[idx].stack_depth);
5976 	frame--;
5977 	i = ret_insn[frame];
5978 	idx = ret_prog[frame];
5979 	goto continue_func;
5980 }
5981 
5982 static int check_max_stack_depth(struct bpf_verifier_env *env)
5983 {
5984 	struct bpf_subprog_info *si = env->subprog_info;
5985 	int ret;
5986 
5987 	for (int i = 0; i < env->subprog_cnt; i++) {
5988 		if (!i || si[i].is_async_cb) {
5989 			ret = check_max_stack_depth_subprog(env, i);
5990 			if (ret < 0)
5991 				return ret;
5992 		}
5993 		continue;
5994 	}
5995 	return 0;
5996 }
5997 
5998 #ifndef CONFIG_BPF_JIT_ALWAYS_ON
5999 static int get_callee_stack_depth(struct bpf_verifier_env *env,
6000 				  const struct bpf_insn *insn, int idx)
6001 {
6002 	int start = idx + insn->imm + 1, subprog;
6003 
6004 	subprog = find_subprog(env, start);
6005 	if (subprog < 0) {
6006 		WARN_ONCE(1, "verifier bug. No program starts at insn %d\n",
6007 			  start);
6008 		return -EFAULT;
6009 	}
6010 	return env->subprog_info[subprog].stack_depth;
6011 }
6012 #endif
6013 
6014 static int __check_buffer_access(struct bpf_verifier_env *env,
6015 				 const char *buf_info,
6016 				 const struct bpf_reg_state *reg,
6017 				 int regno, int off, int size)
6018 {
6019 	if (off < 0) {
6020 		verbose(env,
6021 			"R%d invalid %s buffer access: off=%d, size=%d\n",
6022 			regno, buf_info, off, size);
6023 		return -EACCES;
6024 	}
6025 	if (!tnum_is_const(reg->var_off) || reg->var_off.value) {
6026 		char tn_buf[48];
6027 
6028 		tnum_strn(tn_buf, sizeof(tn_buf), reg->var_off);
6029 		verbose(env,
6030 			"R%d invalid variable buffer offset: off=%d, var_off=%s\n",
6031 			regno, off, tn_buf);
6032 		return -EACCES;
6033 	}
6034 
6035 	return 0;
6036 }
6037 
6038 static int check_tp_buffer_access(struct bpf_verifier_env *env,
6039 				  const struct bpf_reg_state *reg,
6040 				  int regno, int off, int size)
6041 {
6042 	int err;
6043 
6044 	err = __check_buffer_access(env, "tracepoint", reg, regno, off, size);
6045 	if (err)
6046 		return err;
6047 
6048 	if (off + size > env->prog->aux->max_tp_access)
6049 		env->prog->aux->max_tp_access = off + size;
6050 
6051 	return 0;
6052 }
6053 
6054 static int check_buffer_access(struct bpf_verifier_env *env,
6055 			       const struct bpf_reg_state *reg,
6056 			       int regno, int off, int size,
6057 			       bool zero_size_allowed,
6058 			       u32 *max_access)
6059 {
6060 	const char *buf_info = type_is_rdonly_mem(reg->type) ? "rdonly" : "rdwr";
6061 	int err;
6062 
6063 	err = __check_buffer_access(env, buf_info, reg, regno, off, size);
6064 	if (err)
6065 		return err;
6066 
6067 	if (off + size > *max_access)
6068 		*max_access = off + size;
6069 
6070 	return 0;
6071 }
6072 
6073 /* BPF architecture zero extends alu32 ops into 64-bit registesr */
6074 static void zext_32_to_64(struct bpf_reg_state *reg)
6075 {
6076 	reg->var_off = tnum_subreg(reg->var_off);
6077 	__reg_assign_32_into_64(reg);
6078 }
6079 
6080 /* truncate register to smaller size (in bytes)
6081  * must be called with size < BPF_REG_SIZE
6082  */
6083 static void coerce_reg_to_size(struct bpf_reg_state *reg, int size)
6084 {
6085 	u64 mask;
6086 
6087 	/* clear high bits in bit representation */
6088 	reg->var_off = tnum_cast(reg->var_off, size);
6089 
6090 	/* fix arithmetic bounds */
6091 	mask = ((u64)1 << (size * 8)) - 1;
6092 	if ((reg->umin_value & ~mask) == (reg->umax_value & ~mask)) {
6093 		reg->umin_value &= mask;
6094 		reg->umax_value &= mask;
6095 	} else {
6096 		reg->umin_value = 0;
6097 		reg->umax_value = mask;
6098 	}
6099 	reg->smin_value = reg->umin_value;
6100 	reg->smax_value = reg->umax_value;
6101 
6102 	/* If size is smaller than 32bit register the 32bit register
6103 	 * values are also truncated so we push 64-bit bounds into
6104 	 * 32-bit bounds. Above were truncated < 32-bits already.
6105 	 */
6106 	if (size < 4)
6107 		__mark_reg32_unbounded(reg);
6108 
6109 	reg_bounds_sync(reg);
6110 }
6111 
6112 static void set_sext64_default_val(struct bpf_reg_state *reg, int size)
6113 {
6114 	if (size == 1) {
6115 		reg->smin_value = reg->s32_min_value = S8_MIN;
6116 		reg->smax_value = reg->s32_max_value = S8_MAX;
6117 	} else if (size == 2) {
6118 		reg->smin_value = reg->s32_min_value = S16_MIN;
6119 		reg->smax_value = reg->s32_max_value = S16_MAX;
6120 	} else {
6121 		/* size == 4 */
6122 		reg->smin_value = reg->s32_min_value = S32_MIN;
6123 		reg->smax_value = reg->s32_max_value = S32_MAX;
6124 	}
6125 	reg->umin_value = reg->u32_min_value = 0;
6126 	reg->umax_value = U64_MAX;
6127 	reg->u32_max_value = U32_MAX;
6128 	reg->var_off = tnum_unknown;
6129 }
6130 
6131 static void coerce_reg_to_size_sx(struct bpf_reg_state *reg, int size)
6132 {
6133 	s64 init_s64_max, init_s64_min, s64_max, s64_min, u64_cval;
6134 	u64 top_smax_value, top_smin_value;
6135 	u64 num_bits = size * 8;
6136 
6137 	if (tnum_is_const(reg->var_off)) {
6138 		u64_cval = reg->var_off.value;
6139 		if (size == 1)
6140 			reg->var_off = tnum_const((s8)u64_cval);
6141 		else if (size == 2)
6142 			reg->var_off = tnum_const((s16)u64_cval);
6143 		else
6144 			/* size == 4 */
6145 			reg->var_off = tnum_const((s32)u64_cval);
6146 
6147 		u64_cval = reg->var_off.value;
6148 		reg->smax_value = reg->smin_value = u64_cval;
6149 		reg->umax_value = reg->umin_value = u64_cval;
6150 		reg->s32_max_value = reg->s32_min_value = u64_cval;
6151 		reg->u32_max_value = reg->u32_min_value = u64_cval;
6152 		return;
6153 	}
6154 
6155 	top_smax_value = ((u64)reg->smax_value >> num_bits) << num_bits;
6156 	top_smin_value = ((u64)reg->smin_value >> num_bits) << num_bits;
6157 
6158 	if (top_smax_value != top_smin_value)
6159 		goto out;
6160 
6161 	/* find the s64_min and s64_min after sign extension */
6162 	if (size == 1) {
6163 		init_s64_max = (s8)reg->smax_value;
6164 		init_s64_min = (s8)reg->smin_value;
6165 	} else if (size == 2) {
6166 		init_s64_max = (s16)reg->smax_value;
6167 		init_s64_min = (s16)reg->smin_value;
6168 	} else {
6169 		init_s64_max = (s32)reg->smax_value;
6170 		init_s64_min = (s32)reg->smin_value;
6171 	}
6172 
6173 	s64_max = max(init_s64_max, init_s64_min);
6174 	s64_min = min(init_s64_max, init_s64_min);
6175 
6176 	/* both of s64_max/s64_min positive or negative */
6177 	if ((s64_max >= 0) == (s64_min >= 0)) {
6178 		reg->smin_value = reg->s32_min_value = s64_min;
6179 		reg->smax_value = reg->s32_max_value = s64_max;
6180 		reg->umin_value = reg->u32_min_value = s64_min;
6181 		reg->umax_value = reg->u32_max_value = s64_max;
6182 		reg->var_off = tnum_range(s64_min, s64_max);
6183 		return;
6184 	}
6185 
6186 out:
6187 	set_sext64_default_val(reg, size);
6188 }
6189 
6190 static void set_sext32_default_val(struct bpf_reg_state *reg, int size)
6191 {
6192 	if (size == 1) {
6193 		reg->s32_min_value = S8_MIN;
6194 		reg->s32_max_value = S8_MAX;
6195 	} else {
6196 		/* size == 2 */
6197 		reg->s32_min_value = S16_MIN;
6198 		reg->s32_max_value = S16_MAX;
6199 	}
6200 	reg->u32_min_value = 0;
6201 	reg->u32_max_value = U32_MAX;
6202 }
6203 
6204 static void coerce_subreg_to_size_sx(struct bpf_reg_state *reg, int size)
6205 {
6206 	s32 init_s32_max, init_s32_min, s32_max, s32_min, u32_val;
6207 	u32 top_smax_value, top_smin_value;
6208 	u32 num_bits = size * 8;
6209 
6210 	if (tnum_is_const(reg->var_off)) {
6211 		u32_val = reg->var_off.value;
6212 		if (size == 1)
6213 			reg->var_off = tnum_const((s8)u32_val);
6214 		else
6215 			reg->var_off = tnum_const((s16)u32_val);
6216 
6217 		u32_val = reg->var_off.value;
6218 		reg->s32_min_value = reg->s32_max_value = u32_val;
6219 		reg->u32_min_value = reg->u32_max_value = u32_val;
6220 		return;
6221 	}
6222 
6223 	top_smax_value = ((u32)reg->s32_max_value >> num_bits) << num_bits;
6224 	top_smin_value = ((u32)reg->s32_min_value >> num_bits) << num_bits;
6225 
6226 	if (top_smax_value != top_smin_value)
6227 		goto out;
6228 
6229 	/* find the s32_min and s32_min after sign extension */
6230 	if (size == 1) {
6231 		init_s32_max = (s8)reg->s32_max_value;
6232 		init_s32_min = (s8)reg->s32_min_value;
6233 	} else {
6234 		/* size == 2 */
6235 		init_s32_max = (s16)reg->s32_max_value;
6236 		init_s32_min = (s16)reg->s32_min_value;
6237 	}
6238 	s32_max = max(init_s32_max, init_s32_min);
6239 	s32_min = min(init_s32_max, init_s32_min);
6240 
6241 	if ((s32_min >= 0) == (s32_max >= 0)) {
6242 		reg->s32_min_value = s32_min;
6243 		reg->s32_max_value = s32_max;
6244 		reg->u32_min_value = (u32)s32_min;
6245 		reg->u32_max_value = (u32)s32_max;
6246 		return;
6247 	}
6248 
6249 out:
6250 	set_sext32_default_val(reg, size);
6251 }
6252 
6253 static bool bpf_map_is_rdonly(const struct bpf_map *map)
6254 {
6255 	/* A map is considered read-only if the following condition are true:
6256 	 *
6257 	 * 1) BPF program side cannot change any of the map content. The
6258 	 *    BPF_F_RDONLY_PROG flag is throughout the lifetime of a map
6259 	 *    and was set at map creation time.
6260 	 * 2) The map value(s) have been initialized from user space by a
6261 	 *    loader and then "frozen", such that no new map update/delete
6262 	 *    operations from syscall side are possible for the rest of
6263 	 *    the map's lifetime from that point onwards.
6264 	 * 3) Any parallel/pending map update/delete operations from syscall
6265 	 *    side have been completed. Only after that point, it's safe to
6266 	 *    assume that map value(s) are immutable.
6267 	 */
6268 	return (map->map_flags & BPF_F_RDONLY_PROG) &&
6269 	       READ_ONCE(map->frozen) &&
6270 	       !bpf_map_write_active(map);
6271 }
6272 
6273 static int bpf_map_direct_read(struct bpf_map *map, int off, int size, u64 *val,
6274 			       bool is_ldsx)
6275 {
6276 	void *ptr;
6277 	u64 addr;
6278 	int err;
6279 
6280 	err = map->ops->map_direct_value_addr(map, &addr, off);
6281 	if (err)
6282 		return err;
6283 	ptr = (void *)(long)addr + off;
6284 
6285 	switch (size) {
6286 	case sizeof(u8):
6287 		*val = is_ldsx ? (s64)*(s8 *)ptr : (u64)*(u8 *)ptr;
6288 		break;
6289 	case sizeof(u16):
6290 		*val = is_ldsx ? (s64)*(s16 *)ptr : (u64)*(u16 *)ptr;
6291 		break;
6292 	case sizeof(u32):
6293 		*val = is_ldsx ? (s64)*(s32 *)ptr : (u64)*(u32 *)ptr;
6294 		break;
6295 	case sizeof(u64):
6296 		*val = *(u64 *)ptr;
6297 		break;
6298 	default:
6299 		return -EINVAL;
6300 	}
6301 	return 0;
6302 }
6303 
6304 #define BTF_TYPE_SAFE_RCU(__type)  __PASTE(__type, __safe_rcu)
6305 #define BTF_TYPE_SAFE_RCU_OR_NULL(__type)  __PASTE(__type, __safe_rcu_or_null)
6306 #define BTF_TYPE_SAFE_TRUSTED(__type)  __PASTE(__type, __safe_trusted)
6307 
6308 /*
6309  * Allow list few fields as RCU trusted or full trusted.
6310  * This logic doesn't allow mix tagging and will be removed once GCC supports
6311  * btf_type_tag.
6312  */
6313 
6314 /* RCU trusted: these fields are trusted in RCU CS and never NULL */
6315 BTF_TYPE_SAFE_RCU(struct task_struct) {
6316 	const cpumask_t *cpus_ptr;
6317 	struct css_set __rcu *cgroups;
6318 	struct task_struct __rcu *real_parent;
6319 	struct task_struct *group_leader;
6320 };
6321 
6322 BTF_TYPE_SAFE_RCU(struct cgroup) {
6323 	/* cgrp->kn is always accessible as documented in kernel/cgroup/cgroup.c */
6324 	struct kernfs_node *kn;
6325 };
6326 
6327 BTF_TYPE_SAFE_RCU(struct css_set) {
6328 	struct cgroup *dfl_cgrp;
6329 };
6330 
6331 /* RCU trusted: these fields are trusted in RCU CS and can be NULL */
6332 BTF_TYPE_SAFE_RCU_OR_NULL(struct mm_struct) {
6333 	struct file __rcu *exe_file;
6334 };
6335 
6336 /* skb->sk, req->sk are not RCU protected, but we mark them as such
6337  * because bpf prog accessible sockets are SOCK_RCU_FREE.
6338  */
6339 BTF_TYPE_SAFE_RCU_OR_NULL(struct sk_buff) {
6340 	struct sock *sk;
6341 };
6342 
6343 BTF_TYPE_SAFE_RCU_OR_NULL(struct request_sock) {
6344 	struct sock *sk;
6345 };
6346 
6347 /* full trusted: these fields are trusted even outside of RCU CS and never NULL */
6348 BTF_TYPE_SAFE_TRUSTED(struct bpf_iter_meta) {
6349 	struct seq_file *seq;
6350 };
6351 
6352 BTF_TYPE_SAFE_TRUSTED(struct bpf_iter__task) {
6353 	struct bpf_iter_meta *meta;
6354 	struct task_struct *task;
6355 };
6356 
6357 BTF_TYPE_SAFE_TRUSTED(struct linux_binprm) {
6358 	struct file *file;
6359 };
6360 
6361 BTF_TYPE_SAFE_TRUSTED(struct file) {
6362 	struct inode *f_inode;
6363 };
6364 
6365 BTF_TYPE_SAFE_TRUSTED(struct dentry) {
6366 	/* no negative dentry-s in places where bpf can see it */
6367 	struct inode *d_inode;
6368 };
6369 
6370 BTF_TYPE_SAFE_TRUSTED(struct socket) {
6371 	struct sock *sk;
6372 };
6373 
6374 static bool type_is_rcu(struct bpf_verifier_env *env,
6375 			struct bpf_reg_state *reg,
6376 			const char *field_name, u32 btf_id)
6377 {
6378 	BTF_TYPE_EMIT(BTF_TYPE_SAFE_RCU(struct task_struct));
6379 	BTF_TYPE_EMIT(BTF_TYPE_SAFE_RCU(struct cgroup));
6380 	BTF_TYPE_EMIT(BTF_TYPE_SAFE_RCU(struct css_set));
6381 
6382 	return btf_nested_type_is_trusted(&env->log, reg, field_name, btf_id, "__safe_rcu");
6383 }
6384 
6385 static bool type_is_rcu_or_null(struct bpf_verifier_env *env,
6386 				struct bpf_reg_state *reg,
6387 				const char *field_name, u32 btf_id)
6388 {
6389 	BTF_TYPE_EMIT(BTF_TYPE_SAFE_RCU_OR_NULL(struct mm_struct));
6390 	BTF_TYPE_EMIT(BTF_TYPE_SAFE_RCU_OR_NULL(struct sk_buff));
6391 	BTF_TYPE_EMIT(BTF_TYPE_SAFE_RCU_OR_NULL(struct request_sock));
6392 
6393 	return btf_nested_type_is_trusted(&env->log, reg, field_name, btf_id, "__safe_rcu_or_null");
6394 }
6395 
6396 static bool type_is_trusted(struct bpf_verifier_env *env,
6397 			    struct bpf_reg_state *reg,
6398 			    const char *field_name, u32 btf_id)
6399 {
6400 	BTF_TYPE_EMIT(BTF_TYPE_SAFE_TRUSTED(struct bpf_iter_meta));
6401 	BTF_TYPE_EMIT(BTF_TYPE_SAFE_TRUSTED(struct bpf_iter__task));
6402 	BTF_TYPE_EMIT(BTF_TYPE_SAFE_TRUSTED(struct linux_binprm));
6403 	BTF_TYPE_EMIT(BTF_TYPE_SAFE_TRUSTED(struct file));
6404 	BTF_TYPE_EMIT(BTF_TYPE_SAFE_TRUSTED(struct dentry));
6405 	BTF_TYPE_EMIT(BTF_TYPE_SAFE_TRUSTED(struct socket));
6406 
6407 	return btf_nested_type_is_trusted(&env->log, reg, field_name, btf_id, "__safe_trusted");
6408 }
6409 
6410 static int check_ptr_to_btf_access(struct bpf_verifier_env *env,
6411 				   struct bpf_reg_state *regs,
6412 				   int regno, int off, int size,
6413 				   enum bpf_access_type atype,
6414 				   int value_regno)
6415 {
6416 	struct bpf_reg_state *reg = regs + regno;
6417 	const struct btf_type *t = btf_type_by_id(reg->btf, reg->btf_id);
6418 	const char *tname = btf_name_by_offset(reg->btf, t->name_off);
6419 	const char *field_name = NULL;
6420 	enum bpf_type_flag flag = 0;
6421 	u32 btf_id = 0;
6422 	int ret;
6423 
6424 	if (!env->allow_ptr_leaks) {
6425 		verbose(env,
6426 			"'struct %s' access is allowed only to CAP_PERFMON and CAP_SYS_ADMIN\n",
6427 			tname);
6428 		return -EPERM;
6429 	}
6430 	if (!env->prog->gpl_compatible && btf_is_kernel(reg->btf)) {
6431 		verbose(env,
6432 			"Cannot access kernel 'struct %s' from non-GPL compatible program\n",
6433 			tname);
6434 		return -EINVAL;
6435 	}
6436 	if (off < 0) {
6437 		verbose(env,
6438 			"R%d is ptr_%s invalid negative access: off=%d\n",
6439 			regno, tname, off);
6440 		return -EACCES;
6441 	}
6442 	if (!tnum_is_const(reg->var_off) || reg->var_off.value) {
6443 		char tn_buf[48];
6444 
6445 		tnum_strn(tn_buf, sizeof(tn_buf), reg->var_off);
6446 		verbose(env,
6447 			"R%d is ptr_%s invalid variable offset: off=%d, var_off=%s\n",
6448 			regno, tname, off, tn_buf);
6449 		return -EACCES;
6450 	}
6451 
6452 	if (reg->type & MEM_USER) {
6453 		verbose(env,
6454 			"R%d is ptr_%s access user memory: off=%d\n",
6455 			regno, tname, off);
6456 		return -EACCES;
6457 	}
6458 
6459 	if (reg->type & MEM_PERCPU) {
6460 		verbose(env,
6461 			"R%d is ptr_%s access percpu memory: off=%d\n",
6462 			regno, tname, off);
6463 		return -EACCES;
6464 	}
6465 
6466 	if (env->ops->btf_struct_access && !type_is_alloc(reg->type) && atype == BPF_WRITE) {
6467 		if (!btf_is_kernel(reg->btf)) {
6468 			verbose(env, "verifier internal error: reg->btf must be kernel btf\n");
6469 			return -EFAULT;
6470 		}
6471 		ret = env->ops->btf_struct_access(&env->log, reg, off, size);
6472 	} else {
6473 		/* Writes are permitted with default btf_struct_access for
6474 		 * program allocated objects (which always have ref_obj_id > 0),
6475 		 * but not for untrusted PTR_TO_BTF_ID | MEM_ALLOC.
6476 		 */
6477 		if (atype != BPF_READ && !type_is_ptr_alloc_obj(reg->type)) {
6478 			verbose(env, "only read is supported\n");
6479 			return -EACCES;
6480 		}
6481 
6482 		if (type_is_alloc(reg->type) && !type_is_non_owning_ref(reg->type) &&
6483 		    !(reg->type & MEM_RCU) && !reg->ref_obj_id) {
6484 			verbose(env, "verifier internal error: ref_obj_id for allocated object must be non-zero\n");
6485 			return -EFAULT;
6486 		}
6487 
6488 		ret = btf_struct_access(&env->log, reg, off, size, atype, &btf_id, &flag, &field_name);
6489 	}
6490 
6491 	if (ret < 0)
6492 		return ret;
6493 
6494 	if (ret != PTR_TO_BTF_ID) {
6495 		/* just mark; */
6496 
6497 	} else if (type_flag(reg->type) & PTR_UNTRUSTED) {
6498 		/* If this is an untrusted pointer, all pointers formed by walking it
6499 		 * also inherit the untrusted flag.
6500 		 */
6501 		flag = PTR_UNTRUSTED;
6502 
6503 	} else if (is_trusted_reg(reg) || is_rcu_reg(reg)) {
6504 		/* By default any pointer obtained from walking a trusted pointer is no
6505 		 * longer trusted, unless the field being accessed has explicitly been
6506 		 * marked as inheriting its parent's state of trust (either full or RCU).
6507 		 * For example:
6508 		 * 'cgroups' pointer is untrusted if task->cgroups dereference
6509 		 * happened in a sleepable program outside of bpf_rcu_read_lock()
6510 		 * section. In a non-sleepable program it's trusted while in RCU CS (aka MEM_RCU).
6511 		 * Note bpf_rcu_read_unlock() converts MEM_RCU pointers to PTR_UNTRUSTED.
6512 		 *
6513 		 * A regular RCU-protected pointer with __rcu tag can also be deemed
6514 		 * trusted if we are in an RCU CS. Such pointer can be NULL.
6515 		 */
6516 		if (type_is_trusted(env, reg, field_name, btf_id)) {
6517 			flag |= PTR_TRUSTED;
6518 		} else if (in_rcu_cs(env) && !type_may_be_null(reg->type)) {
6519 			if (type_is_rcu(env, reg, field_name, btf_id)) {
6520 				/* ignore __rcu tag and mark it MEM_RCU */
6521 				flag |= MEM_RCU;
6522 			} else if (flag & MEM_RCU ||
6523 				   type_is_rcu_or_null(env, reg, field_name, btf_id)) {
6524 				/* __rcu tagged pointers can be NULL */
6525 				flag |= MEM_RCU | PTR_MAYBE_NULL;
6526 
6527 				/* We always trust them */
6528 				if (type_is_rcu_or_null(env, reg, field_name, btf_id) &&
6529 				    flag & PTR_UNTRUSTED)
6530 					flag &= ~PTR_UNTRUSTED;
6531 			} else if (flag & (MEM_PERCPU | MEM_USER)) {
6532 				/* keep as-is */
6533 			} else {
6534 				/* walking unknown pointers yields old deprecated PTR_TO_BTF_ID */
6535 				clear_trusted_flags(&flag);
6536 			}
6537 		} else {
6538 			/*
6539 			 * If not in RCU CS or MEM_RCU pointer can be NULL then
6540 			 * aggressively mark as untrusted otherwise such
6541 			 * pointers will be plain PTR_TO_BTF_ID without flags
6542 			 * and will be allowed to be passed into helpers for
6543 			 * compat reasons.
6544 			 */
6545 			flag = PTR_UNTRUSTED;
6546 		}
6547 	} else {
6548 		/* Old compat. Deprecated */
6549 		clear_trusted_flags(&flag);
6550 	}
6551 
6552 	if (atype == BPF_READ && value_regno >= 0)
6553 		mark_btf_ld_reg(env, regs, value_regno, ret, reg->btf, btf_id, flag);
6554 
6555 	return 0;
6556 }
6557 
6558 static int check_ptr_to_map_access(struct bpf_verifier_env *env,
6559 				   struct bpf_reg_state *regs,
6560 				   int regno, int off, int size,
6561 				   enum bpf_access_type atype,
6562 				   int value_regno)
6563 {
6564 	struct bpf_reg_state *reg = regs + regno;
6565 	struct bpf_map *map = reg->map_ptr;
6566 	struct bpf_reg_state map_reg;
6567 	enum bpf_type_flag flag = 0;
6568 	const struct btf_type *t;
6569 	const char *tname;
6570 	u32 btf_id;
6571 	int ret;
6572 
6573 	if (!btf_vmlinux) {
6574 		verbose(env, "map_ptr access not supported without CONFIG_DEBUG_INFO_BTF\n");
6575 		return -ENOTSUPP;
6576 	}
6577 
6578 	if (!map->ops->map_btf_id || !*map->ops->map_btf_id) {
6579 		verbose(env, "map_ptr access not supported for map type %d\n",
6580 			map->map_type);
6581 		return -ENOTSUPP;
6582 	}
6583 
6584 	t = btf_type_by_id(btf_vmlinux, *map->ops->map_btf_id);
6585 	tname = btf_name_by_offset(btf_vmlinux, t->name_off);
6586 
6587 	if (!env->allow_ptr_leaks) {
6588 		verbose(env,
6589 			"'struct %s' access is allowed only to CAP_PERFMON and CAP_SYS_ADMIN\n",
6590 			tname);
6591 		return -EPERM;
6592 	}
6593 
6594 	if (off < 0) {
6595 		verbose(env, "R%d is %s invalid negative access: off=%d\n",
6596 			regno, tname, off);
6597 		return -EACCES;
6598 	}
6599 
6600 	if (atype != BPF_READ) {
6601 		verbose(env, "only read from %s is supported\n", tname);
6602 		return -EACCES;
6603 	}
6604 
6605 	/* Simulate access to a PTR_TO_BTF_ID */
6606 	memset(&map_reg, 0, sizeof(map_reg));
6607 	mark_btf_ld_reg(env, &map_reg, 0, PTR_TO_BTF_ID, btf_vmlinux, *map->ops->map_btf_id, 0);
6608 	ret = btf_struct_access(&env->log, &map_reg, off, size, atype, &btf_id, &flag, NULL);
6609 	if (ret < 0)
6610 		return ret;
6611 
6612 	if (value_regno >= 0)
6613 		mark_btf_ld_reg(env, regs, value_regno, ret, btf_vmlinux, btf_id, flag);
6614 
6615 	return 0;
6616 }
6617 
6618 /* Check that the stack access at the given offset is within bounds. The
6619  * maximum valid offset is -1.
6620  *
6621  * The minimum valid offset is -MAX_BPF_STACK for writes, and
6622  * -state->allocated_stack for reads.
6623  */
6624 static int check_stack_slot_within_bounds(struct bpf_verifier_env *env,
6625                                           s64 off,
6626                                           struct bpf_func_state *state,
6627                                           enum bpf_access_type t)
6628 {
6629 	int min_valid_off;
6630 
6631 	if (t == BPF_WRITE || env->allow_uninit_stack)
6632 		min_valid_off = -MAX_BPF_STACK;
6633 	else
6634 		min_valid_off = -state->allocated_stack;
6635 
6636 	if (off < min_valid_off || off > -1)
6637 		return -EACCES;
6638 	return 0;
6639 }
6640 
6641 /* Check that the stack access at 'regno + off' falls within the maximum stack
6642  * bounds.
6643  *
6644  * 'off' includes `regno->offset`, but not its dynamic part (if any).
6645  */
6646 static int check_stack_access_within_bounds(
6647 		struct bpf_verifier_env *env,
6648 		int regno, int off, int access_size,
6649 		enum bpf_access_src src, enum bpf_access_type type)
6650 {
6651 	struct bpf_reg_state *regs = cur_regs(env);
6652 	struct bpf_reg_state *reg = regs + regno;
6653 	struct bpf_func_state *state = func(env, reg);
6654 	s64 min_off, max_off;
6655 	int err;
6656 	char *err_extra;
6657 
6658 	if (src == ACCESS_HELPER)
6659 		/* We don't know if helpers are reading or writing (or both). */
6660 		err_extra = " indirect access to";
6661 	else if (type == BPF_READ)
6662 		err_extra = " read from";
6663 	else
6664 		err_extra = " write to";
6665 
6666 	if (tnum_is_const(reg->var_off)) {
6667 		min_off = (s64)reg->var_off.value + off;
6668 		max_off = min_off + access_size;
6669 	} else {
6670 		if (reg->smax_value >= BPF_MAX_VAR_OFF ||
6671 		    reg->smin_value <= -BPF_MAX_VAR_OFF) {
6672 			verbose(env, "invalid unbounded variable-offset%s stack R%d\n",
6673 				err_extra, regno);
6674 			return -EACCES;
6675 		}
6676 		min_off = reg->smin_value + off;
6677 		max_off = reg->smax_value + off + access_size;
6678 	}
6679 
6680 	err = check_stack_slot_within_bounds(env, min_off, state, type);
6681 	if (!err && max_off > 0)
6682 		err = -EINVAL; /* out of stack access into non-negative offsets */
6683 
6684 	if (err) {
6685 		if (tnum_is_const(reg->var_off)) {
6686 			verbose(env, "invalid%s stack R%d off=%d size=%d\n",
6687 				err_extra, regno, off, access_size);
6688 		} else {
6689 			char tn_buf[48];
6690 
6691 			tnum_strn(tn_buf, sizeof(tn_buf), reg->var_off);
6692 			verbose(env, "invalid variable-offset%s stack R%d var_off=%s off=%d size=%d\n",
6693 				err_extra, regno, tn_buf, off, access_size);
6694 		}
6695 		return err;
6696 	}
6697 
6698 	/* Note that there is no stack access with offset zero, so the needed stack
6699 	 * size is -min_off, not -min_off+1.
6700 	 */
6701 	return grow_stack_state(env, state, -min_off /* size */);
6702 }
6703 
6704 /* check whether memory at (regno + off) is accessible for t = (read | write)
6705  * if t==write, value_regno is a register which value is stored into memory
6706  * if t==read, value_regno is a register which will receive the value from memory
6707  * if t==write && value_regno==-1, some unknown value is stored into memory
6708  * if t==read && value_regno==-1, don't care what we read from memory
6709  */
6710 static int check_mem_access(struct bpf_verifier_env *env, int insn_idx, u32 regno,
6711 			    int off, int bpf_size, enum bpf_access_type t,
6712 			    int value_regno, bool strict_alignment_once, bool is_ldsx)
6713 {
6714 	struct bpf_reg_state *regs = cur_regs(env);
6715 	struct bpf_reg_state *reg = regs + regno;
6716 	int size, err = 0;
6717 
6718 	size = bpf_size_to_bytes(bpf_size);
6719 	if (size < 0)
6720 		return size;
6721 
6722 	/* alignment checks will add in reg->off themselves */
6723 	err = check_ptr_alignment(env, reg, off, size, strict_alignment_once);
6724 	if (err)
6725 		return err;
6726 
6727 	/* for access checks, reg->off is just part of off */
6728 	off += reg->off;
6729 
6730 	if (reg->type == PTR_TO_MAP_KEY) {
6731 		if (t == BPF_WRITE) {
6732 			verbose(env, "write to change key R%d not allowed\n", regno);
6733 			return -EACCES;
6734 		}
6735 
6736 		err = check_mem_region_access(env, regno, off, size,
6737 					      reg->map_ptr->key_size, false);
6738 		if (err)
6739 			return err;
6740 		if (value_regno >= 0)
6741 			mark_reg_unknown(env, regs, value_regno);
6742 	} else if (reg->type == PTR_TO_MAP_VALUE) {
6743 		struct btf_field *kptr_field = NULL;
6744 
6745 		if (t == BPF_WRITE && value_regno >= 0 &&
6746 		    is_pointer_value(env, value_regno)) {
6747 			verbose(env, "R%d leaks addr into map\n", value_regno);
6748 			return -EACCES;
6749 		}
6750 		err = check_map_access_type(env, regno, off, size, t);
6751 		if (err)
6752 			return err;
6753 		err = check_map_access(env, regno, off, size, false, ACCESS_DIRECT);
6754 		if (err)
6755 			return err;
6756 		if (tnum_is_const(reg->var_off))
6757 			kptr_field = btf_record_find(reg->map_ptr->record,
6758 						     off + reg->var_off.value, BPF_KPTR);
6759 		if (kptr_field) {
6760 			err = check_map_kptr_access(env, regno, value_regno, insn_idx, kptr_field);
6761 		} else if (t == BPF_READ && value_regno >= 0) {
6762 			struct bpf_map *map = reg->map_ptr;
6763 
6764 			/* if map is read-only, track its contents as scalars */
6765 			if (tnum_is_const(reg->var_off) &&
6766 			    bpf_map_is_rdonly(map) &&
6767 			    map->ops->map_direct_value_addr) {
6768 				int map_off = off + reg->var_off.value;
6769 				u64 val = 0;
6770 
6771 				err = bpf_map_direct_read(map, map_off, size,
6772 							  &val, is_ldsx);
6773 				if (err)
6774 					return err;
6775 
6776 				regs[value_regno].type = SCALAR_VALUE;
6777 				__mark_reg_known(&regs[value_regno], val);
6778 			} else {
6779 				mark_reg_unknown(env, regs, value_regno);
6780 			}
6781 		}
6782 	} else if (base_type(reg->type) == PTR_TO_MEM) {
6783 		bool rdonly_mem = type_is_rdonly_mem(reg->type);
6784 
6785 		if (type_may_be_null(reg->type)) {
6786 			verbose(env, "R%d invalid mem access '%s'\n", regno,
6787 				reg_type_str(env, reg->type));
6788 			return -EACCES;
6789 		}
6790 
6791 		if (t == BPF_WRITE && rdonly_mem) {
6792 			verbose(env, "R%d cannot write into %s\n",
6793 				regno, reg_type_str(env, reg->type));
6794 			return -EACCES;
6795 		}
6796 
6797 		if (t == BPF_WRITE && value_regno >= 0 &&
6798 		    is_pointer_value(env, value_regno)) {
6799 			verbose(env, "R%d leaks addr into mem\n", value_regno);
6800 			return -EACCES;
6801 		}
6802 
6803 		err = check_mem_region_access(env, regno, off, size,
6804 					      reg->mem_size, false);
6805 		if (!err && value_regno >= 0 && (t == BPF_READ || rdonly_mem))
6806 			mark_reg_unknown(env, regs, value_regno);
6807 	} else if (reg->type == PTR_TO_CTX) {
6808 		enum bpf_reg_type reg_type = SCALAR_VALUE;
6809 		struct btf *btf = NULL;
6810 		u32 btf_id = 0;
6811 
6812 		if (t == BPF_WRITE && value_regno >= 0 &&
6813 		    is_pointer_value(env, value_regno)) {
6814 			verbose(env, "R%d leaks addr into ctx\n", value_regno);
6815 			return -EACCES;
6816 		}
6817 
6818 		err = check_ptr_off_reg(env, reg, regno);
6819 		if (err < 0)
6820 			return err;
6821 
6822 		err = check_ctx_access(env, insn_idx, off, size, t, &reg_type, &btf,
6823 				       &btf_id);
6824 		if (err)
6825 			verbose_linfo(env, insn_idx, "; ");
6826 		if (!err && t == BPF_READ && value_regno >= 0) {
6827 			/* ctx access returns either a scalar, or a
6828 			 * PTR_TO_PACKET[_META,_END]. In the latter
6829 			 * case, we know the offset is zero.
6830 			 */
6831 			if (reg_type == SCALAR_VALUE) {
6832 				mark_reg_unknown(env, regs, value_regno);
6833 			} else {
6834 				mark_reg_known_zero(env, regs,
6835 						    value_regno);
6836 				if (type_may_be_null(reg_type))
6837 					regs[value_regno].id = ++env->id_gen;
6838 				/* A load of ctx field could have different
6839 				 * actual load size with the one encoded in the
6840 				 * insn. When the dst is PTR, it is for sure not
6841 				 * a sub-register.
6842 				 */
6843 				regs[value_regno].subreg_def = DEF_NOT_SUBREG;
6844 				if (base_type(reg_type) == PTR_TO_BTF_ID) {
6845 					regs[value_regno].btf = btf;
6846 					regs[value_regno].btf_id = btf_id;
6847 				}
6848 			}
6849 			regs[value_regno].type = reg_type;
6850 		}
6851 
6852 	} else if (reg->type == PTR_TO_STACK) {
6853 		/* Basic bounds checks. */
6854 		err = check_stack_access_within_bounds(env, regno, off, size, ACCESS_DIRECT, t);
6855 		if (err)
6856 			return err;
6857 
6858 		if (t == BPF_READ)
6859 			err = check_stack_read(env, regno, off, size,
6860 					       value_regno);
6861 		else
6862 			err = check_stack_write(env, regno, off, size,
6863 						value_regno, insn_idx);
6864 	} else if (reg_is_pkt_pointer(reg)) {
6865 		if (t == BPF_WRITE && !may_access_direct_pkt_data(env, NULL, t)) {
6866 			verbose(env, "cannot write into packet\n");
6867 			return -EACCES;
6868 		}
6869 		if (t == BPF_WRITE && value_regno >= 0 &&
6870 		    is_pointer_value(env, value_regno)) {
6871 			verbose(env, "R%d leaks addr into packet\n",
6872 				value_regno);
6873 			return -EACCES;
6874 		}
6875 		err = check_packet_access(env, regno, off, size, false);
6876 		if (!err && t == BPF_READ && value_regno >= 0)
6877 			mark_reg_unknown(env, regs, value_regno);
6878 	} else if (reg->type == PTR_TO_FLOW_KEYS) {
6879 		if (t == BPF_WRITE && value_regno >= 0 &&
6880 		    is_pointer_value(env, value_regno)) {
6881 			verbose(env, "R%d leaks addr into flow keys\n",
6882 				value_regno);
6883 			return -EACCES;
6884 		}
6885 
6886 		err = check_flow_keys_access(env, off, size);
6887 		if (!err && t == BPF_READ && value_regno >= 0)
6888 			mark_reg_unknown(env, regs, value_regno);
6889 	} else if (type_is_sk_pointer(reg->type)) {
6890 		if (t == BPF_WRITE) {
6891 			verbose(env, "R%d cannot write into %s\n",
6892 				regno, reg_type_str(env, reg->type));
6893 			return -EACCES;
6894 		}
6895 		err = check_sock_access(env, insn_idx, regno, off, size, t);
6896 		if (!err && value_regno >= 0)
6897 			mark_reg_unknown(env, regs, value_regno);
6898 	} else if (reg->type == PTR_TO_TP_BUFFER) {
6899 		err = check_tp_buffer_access(env, reg, regno, off, size);
6900 		if (!err && t == BPF_READ && value_regno >= 0)
6901 			mark_reg_unknown(env, regs, value_regno);
6902 	} else if (base_type(reg->type) == PTR_TO_BTF_ID &&
6903 		   !type_may_be_null(reg->type)) {
6904 		err = check_ptr_to_btf_access(env, regs, regno, off, size, t,
6905 					      value_regno);
6906 	} else if (reg->type == CONST_PTR_TO_MAP) {
6907 		err = check_ptr_to_map_access(env, regs, regno, off, size, t,
6908 					      value_regno);
6909 	} else if (base_type(reg->type) == PTR_TO_BUF) {
6910 		bool rdonly_mem = type_is_rdonly_mem(reg->type);
6911 		u32 *max_access;
6912 
6913 		if (rdonly_mem) {
6914 			if (t == BPF_WRITE) {
6915 				verbose(env, "R%d cannot write into %s\n",
6916 					regno, reg_type_str(env, reg->type));
6917 				return -EACCES;
6918 			}
6919 			max_access = &env->prog->aux->max_rdonly_access;
6920 		} else {
6921 			max_access = &env->prog->aux->max_rdwr_access;
6922 		}
6923 
6924 		err = check_buffer_access(env, reg, regno, off, size, false,
6925 					  max_access);
6926 
6927 		if (!err && value_regno >= 0 && (rdonly_mem || t == BPF_READ))
6928 			mark_reg_unknown(env, regs, value_regno);
6929 	} else {
6930 		verbose(env, "R%d invalid mem access '%s'\n", regno,
6931 			reg_type_str(env, reg->type));
6932 		return -EACCES;
6933 	}
6934 
6935 	if (!err && size < BPF_REG_SIZE && value_regno >= 0 && t == BPF_READ &&
6936 	    regs[value_regno].type == SCALAR_VALUE) {
6937 		if (!is_ldsx)
6938 			/* b/h/w load zero-extends, mark upper bits as known 0 */
6939 			coerce_reg_to_size(&regs[value_regno], size);
6940 		else
6941 			coerce_reg_to_size_sx(&regs[value_regno], size);
6942 	}
6943 	return err;
6944 }
6945 
6946 static int check_atomic(struct bpf_verifier_env *env, int insn_idx, struct bpf_insn *insn)
6947 {
6948 	int load_reg;
6949 	int err;
6950 
6951 	switch (insn->imm) {
6952 	case BPF_ADD:
6953 	case BPF_ADD | BPF_FETCH:
6954 	case BPF_AND:
6955 	case BPF_AND | BPF_FETCH:
6956 	case BPF_OR:
6957 	case BPF_OR | BPF_FETCH:
6958 	case BPF_XOR:
6959 	case BPF_XOR | BPF_FETCH:
6960 	case BPF_XCHG:
6961 	case BPF_CMPXCHG:
6962 		break;
6963 	default:
6964 		verbose(env, "BPF_ATOMIC uses invalid atomic opcode %02x\n", insn->imm);
6965 		return -EINVAL;
6966 	}
6967 
6968 	if (BPF_SIZE(insn->code) != BPF_W && BPF_SIZE(insn->code) != BPF_DW) {
6969 		verbose(env, "invalid atomic operand size\n");
6970 		return -EINVAL;
6971 	}
6972 
6973 	/* check src1 operand */
6974 	err = check_reg_arg(env, insn->src_reg, SRC_OP);
6975 	if (err)
6976 		return err;
6977 
6978 	/* check src2 operand */
6979 	err = check_reg_arg(env, insn->dst_reg, SRC_OP);
6980 	if (err)
6981 		return err;
6982 
6983 	if (insn->imm == BPF_CMPXCHG) {
6984 		/* Check comparison of R0 with memory location */
6985 		const u32 aux_reg = BPF_REG_0;
6986 
6987 		err = check_reg_arg(env, aux_reg, SRC_OP);
6988 		if (err)
6989 			return err;
6990 
6991 		if (is_pointer_value(env, aux_reg)) {
6992 			verbose(env, "R%d leaks addr into mem\n", aux_reg);
6993 			return -EACCES;
6994 		}
6995 	}
6996 
6997 	if (is_pointer_value(env, insn->src_reg)) {
6998 		verbose(env, "R%d leaks addr into mem\n", insn->src_reg);
6999 		return -EACCES;
7000 	}
7001 
7002 	if (is_ctx_reg(env, insn->dst_reg) ||
7003 	    is_pkt_reg(env, insn->dst_reg) ||
7004 	    is_flow_key_reg(env, insn->dst_reg) ||
7005 	    is_sk_reg(env, insn->dst_reg)) {
7006 		verbose(env, "BPF_ATOMIC stores into R%d %s is not allowed\n",
7007 			insn->dst_reg,
7008 			reg_type_str(env, reg_state(env, insn->dst_reg)->type));
7009 		return -EACCES;
7010 	}
7011 
7012 	if (insn->imm & BPF_FETCH) {
7013 		if (insn->imm == BPF_CMPXCHG)
7014 			load_reg = BPF_REG_0;
7015 		else
7016 			load_reg = insn->src_reg;
7017 
7018 		/* check and record load of old value */
7019 		err = check_reg_arg(env, load_reg, DST_OP);
7020 		if (err)
7021 			return err;
7022 	} else {
7023 		/* This instruction accesses a memory location but doesn't
7024 		 * actually load it into a register.
7025 		 */
7026 		load_reg = -1;
7027 	}
7028 
7029 	/* Check whether we can read the memory, with second call for fetch
7030 	 * case to simulate the register fill.
7031 	 */
7032 	err = check_mem_access(env, insn_idx, insn->dst_reg, insn->off,
7033 			       BPF_SIZE(insn->code), BPF_READ, -1, true, false);
7034 	if (!err && load_reg >= 0)
7035 		err = check_mem_access(env, insn_idx, insn->dst_reg, insn->off,
7036 				       BPF_SIZE(insn->code), BPF_READ, load_reg,
7037 				       true, false);
7038 	if (err)
7039 		return err;
7040 
7041 	/* Check whether we can write into the same memory. */
7042 	err = check_mem_access(env, insn_idx, insn->dst_reg, insn->off,
7043 			       BPF_SIZE(insn->code), BPF_WRITE, -1, true, false);
7044 	if (err)
7045 		return err;
7046 	return 0;
7047 }
7048 
7049 /* When register 'regno' is used to read the stack (either directly or through
7050  * a helper function) make sure that it's within stack boundary and, depending
7051  * on the access type and privileges, that all elements of the stack are
7052  * initialized.
7053  *
7054  * 'off' includes 'regno->off', but not its dynamic part (if any).
7055  *
7056  * All registers that have been spilled on the stack in the slots within the
7057  * read offsets are marked as read.
7058  */
7059 static int check_stack_range_initialized(
7060 		struct bpf_verifier_env *env, int regno, int off,
7061 		int access_size, bool zero_size_allowed,
7062 		enum bpf_access_src type, struct bpf_call_arg_meta *meta)
7063 {
7064 	struct bpf_reg_state *reg = reg_state(env, regno);
7065 	struct bpf_func_state *state = func(env, reg);
7066 	int err, min_off, max_off, i, j, slot, spi;
7067 	char *err_extra = type == ACCESS_HELPER ? " indirect" : "";
7068 	enum bpf_access_type bounds_check_type;
7069 	/* Some accesses can write anything into the stack, others are
7070 	 * read-only.
7071 	 */
7072 	bool clobber = false;
7073 
7074 	if (access_size == 0 && !zero_size_allowed) {
7075 		verbose(env, "invalid zero-sized read\n");
7076 		return -EACCES;
7077 	}
7078 
7079 	if (type == ACCESS_HELPER) {
7080 		/* The bounds checks for writes are more permissive than for
7081 		 * reads. However, if raw_mode is not set, we'll do extra
7082 		 * checks below.
7083 		 */
7084 		bounds_check_type = BPF_WRITE;
7085 		clobber = true;
7086 	} else {
7087 		bounds_check_type = BPF_READ;
7088 	}
7089 	err = check_stack_access_within_bounds(env, regno, off, access_size,
7090 					       type, bounds_check_type);
7091 	if (err)
7092 		return err;
7093 
7094 
7095 	if (tnum_is_const(reg->var_off)) {
7096 		min_off = max_off = reg->var_off.value + off;
7097 	} else {
7098 		/* Variable offset is prohibited for unprivileged mode for
7099 		 * simplicity since it requires corresponding support in
7100 		 * Spectre masking for stack ALU.
7101 		 * See also retrieve_ptr_limit().
7102 		 */
7103 		if (!env->bypass_spec_v1) {
7104 			char tn_buf[48];
7105 
7106 			tnum_strn(tn_buf, sizeof(tn_buf), reg->var_off);
7107 			verbose(env, "R%d%s variable offset stack access prohibited for !root, var_off=%s\n",
7108 				regno, err_extra, tn_buf);
7109 			return -EACCES;
7110 		}
7111 		/* Only initialized buffer on stack is allowed to be accessed
7112 		 * with variable offset. With uninitialized buffer it's hard to
7113 		 * guarantee that whole memory is marked as initialized on
7114 		 * helper return since specific bounds are unknown what may
7115 		 * cause uninitialized stack leaking.
7116 		 */
7117 		if (meta && meta->raw_mode)
7118 			meta = NULL;
7119 
7120 		min_off = reg->smin_value + off;
7121 		max_off = reg->smax_value + off;
7122 	}
7123 
7124 	if (meta && meta->raw_mode) {
7125 		/* Ensure we won't be overwriting dynptrs when simulating byte
7126 		 * by byte access in check_helper_call using meta.access_size.
7127 		 * This would be a problem if we have a helper in the future
7128 		 * which takes:
7129 		 *
7130 		 *	helper(uninit_mem, len, dynptr)
7131 		 *
7132 		 * Now, uninint_mem may overlap with dynptr pointer. Hence, it
7133 		 * may end up writing to dynptr itself when touching memory from
7134 		 * arg 1. This can be relaxed on a case by case basis for known
7135 		 * safe cases, but reject due to the possibilitiy of aliasing by
7136 		 * default.
7137 		 */
7138 		for (i = min_off; i < max_off + access_size; i++) {
7139 			int stack_off = -i - 1;
7140 
7141 			spi = __get_spi(i);
7142 			/* raw_mode may write past allocated_stack */
7143 			if (state->allocated_stack <= stack_off)
7144 				continue;
7145 			if (state->stack[spi].slot_type[stack_off % BPF_REG_SIZE] == STACK_DYNPTR) {
7146 				verbose(env, "potential write to dynptr at off=%d disallowed\n", i);
7147 				return -EACCES;
7148 			}
7149 		}
7150 		meta->access_size = access_size;
7151 		meta->regno = regno;
7152 		return 0;
7153 	}
7154 
7155 	for (i = min_off; i < max_off + access_size; i++) {
7156 		u8 *stype;
7157 
7158 		slot = -i - 1;
7159 		spi = slot / BPF_REG_SIZE;
7160 		if (state->allocated_stack <= slot) {
7161 			verbose(env, "verifier bug: allocated_stack too small");
7162 			return -EFAULT;
7163 		}
7164 
7165 		stype = &state->stack[spi].slot_type[slot % BPF_REG_SIZE];
7166 		if (*stype == STACK_MISC)
7167 			goto mark;
7168 		if ((*stype == STACK_ZERO) ||
7169 		    (*stype == STACK_INVALID && env->allow_uninit_stack)) {
7170 			if (clobber) {
7171 				/* helper can write anything into the stack */
7172 				*stype = STACK_MISC;
7173 			}
7174 			goto mark;
7175 		}
7176 
7177 		if (is_spilled_reg(&state->stack[spi]) &&
7178 		    (state->stack[spi].spilled_ptr.type == SCALAR_VALUE ||
7179 		     env->allow_ptr_leaks)) {
7180 			if (clobber) {
7181 				__mark_reg_unknown(env, &state->stack[spi].spilled_ptr);
7182 				for (j = 0; j < BPF_REG_SIZE; j++)
7183 					scrub_spilled_slot(&state->stack[spi].slot_type[j]);
7184 			}
7185 			goto mark;
7186 		}
7187 
7188 		if (tnum_is_const(reg->var_off)) {
7189 			verbose(env, "invalid%s read from stack R%d off %d+%d size %d\n",
7190 				err_extra, regno, min_off, i - min_off, access_size);
7191 		} else {
7192 			char tn_buf[48];
7193 
7194 			tnum_strn(tn_buf, sizeof(tn_buf), reg->var_off);
7195 			verbose(env, "invalid%s read from stack R%d var_off %s+%d size %d\n",
7196 				err_extra, regno, tn_buf, i - min_off, access_size);
7197 		}
7198 		return -EACCES;
7199 mark:
7200 		/* reading any byte out of 8-byte 'spill_slot' will cause
7201 		 * the whole slot to be marked as 'read'
7202 		 */
7203 		mark_reg_read(env, &state->stack[spi].spilled_ptr,
7204 			      state->stack[spi].spilled_ptr.parent,
7205 			      REG_LIVE_READ64);
7206 		/* We do not set REG_LIVE_WRITTEN for stack slot, as we can not
7207 		 * be sure that whether stack slot is written to or not. Hence,
7208 		 * we must still conservatively propagate reads upwards even if
7209 		 * helper may write to the entire memory range.
7210 		 */
7211 	}
7212 	return 0;
7213 }
7214 
7215 static int check_helper_mem_access(struct bpf_verifier_env *env, int regno,
7216 				   int access_size, bool zero_size_allowed,
7217 				   struct bpf_call_arg_meta *meta)
7218 {
7219 	struct bpf_reg_state *regs = cur_regs(env), *reg = &regs[regno];
7220 	u32 *max_access;
7221 
7222 	switch (base_type(reg->type)) {
7223 	case PTR_TO_PACKET:
7224 	case PTR_TO_PACKET_META:
7225 		return check_packet_access(env, regno, reg->off, access_size,
7226 					   zero_size_allowed);
7227 	case PTR_TO_MAP_KEY:
7228 		if (meta && meta->raw_mode) {
7229 			verbose(env, "R%d cannot write into %s\n", regno,
7230 				reg_type_str(env, reg->type));
7231 			return -EACCES;
7232 		}
7233 		return check_mem_region_access(env, regno, reg->off, access_size,
7234 					       reg->map_ptr->key_size, false);
7235 	case PTR_TO_MAP_VALUE:
7236 		if (check_map_access_type(env, regno, reg->off, access_size,
7237 					  meta && meta->raw_mode ? BPF_WRITE :
7238 					  BPF_READ))
7239 			return -EACCES;
7240 		return check_map_access(env, regno, reg->off, access_size,
7241 					zero_size_allowed, ACCESS_HELPER);
7242 	case PTR_TO_MEM:
7243 		if (type_is_rdonly_mem(reg->type)) {
7244 			if (meta && meta->raw_mode) {
7245 				verbose(env, "R%d cannot write into %s\n", regno,
7246 					reg_type_str(env, reg->type));
7247 				return -EACCES;
7248 			}
7249 		}
7250 		return check_mem_region_access(env, regno, reg->off,
7251 					       access_size, reg->mem_size,
7252 					       zero_size_allowed);
7253 	case PTR_TO_BUF:
7254 		if (type_is_rdonly_mem(reg->type)) {
7255 			if (meta && meta->raw_mode) {
7256 				verbose(env, "R%d cannot write into %s\n", regno,
7257 					reg_type_str(env, reg->type));
7258 				return -EACCES;
7259 			}
7260 
7261 			max_access = &env->prog->aux->max_rdonly_access;
7262 		} else {
7263 			max_access = &env->prog->aux->max_rdwr_access;
7264 		}
7265 		return check_buffer_access(env, reg, regno, reg->off,
7266 					   access_size, zero_size_allowed,
7267 					   max_access);
7268 	case PTR_TO_STACK:
7269 		return check_stack_range_initialized(
7270 				env,
7271 				regno, reg->off, access_size,
7272 				zero_size_allowed, ACCESS_HELPER, meta);
7273 	case PTR_TO_BTF_ID:
7274 		return check_ptr_to_btf_access(env, regs, regno, reg->off,
7275 					       access_size, BPF_READ, -1);
7276 	case PTR_TO_CTX:
7277 		/* in case the function doesn't know how to access the context,
7278 		 * (because we are in a program of type SYSCALL for example), we
7279 		 * can not statically check its size.
7280 		 * Dynamically check it now.
7281 		 */
7282 		if (!env->ops->convert_ctx_access) {
7283 			enum bpf_access_type atype = meta && meta->raw_mode ? BPF_WRITE : BPF_READ;
7284 			int offset = access_size - 1;
7285 
7286 			/* Allow zero-byte read from PTR_TO_CTX */
7287 			if (access_size == 0)
7288 				return zero_size_allowed ? 0 : -EACCES;
7289 
7290 			return check_mem_access(env, env->insn_idx, regno, offset, BPF_B,
7291 						atype, -1, false, false);
7292 		}
7293 
7294 		fallthrough;
7295 	default: /* scalar_value or invalid ptr */
7296 		/* Allow zero-byte read from NULL, regardless of pointer type */
7297 		if (zero_size_allowed && access_size == 0 &&
7298 		    register_is_null(reg))
7299 			return 0;
7300 
7301 		verbose(env, "R%d type=%s ", regno,
7302 			reg_type_str(env, reg->type));
7303 		verbose(env, "expected=%s\n", reg_type_str(env, PTR_TO_STACK));
7304 		return -EACCES;
7305 	}
7306 }
7307 
7308 /* verify arguments to helpers or kfuncs consisting of a pointer and an access
7309  * size.
7310  *
7311  * @regno is the register containing the access size. regno-1 is the register
7312  * containing the pointer.
7313  */
7314 static int check_mem_size_reg(struct bpf_verifier_env *env,
7315 			      struct bpf_reg_state *reg, u32 regno,
7316 			      bool zero_size_allowed,
7317 			      struct bpf_call_arg_meta *meta)
7318 {
7319 	int err;
7320 
7321 	/* This is used to refine r0 return value bounds for helpers
7322 	 * that enforce this value as an upper bound on return values.
7323 	 * See do_refine_retval_range() for helpers that can refine
7324 	 * the return value. C type of helper is u32 so we pull register
7325 	 * bound from umax_value however, if negative verifier errors
7326 	 * out. Only upper bounds can be learned because retval is an
7327 	 * int type and negative retvals are allowed.
7328 	 */
7329 	meta->msize_max_value = reg->umax_value;
7330 
7331 	/* The register is SCALAR_VALUE; the access check
7332 	 * happens using its boundaries.
7333 	 */
7334 	if (!tnum_is_const(reg->var_off))
7335 		/* For unprivileged variable accesses, disable raw
7336 		 * mode so that the program is required to
7337 		 * initialize all the memory that the helper could
7338 		 * just partially fill up.
7339 		 */
7340 		meta = NULL;
7341 
7342 	if (reg->smin_value < 0) {
7343 		verbose(env, "R%d min value is negative, either use unsigned or 'var &= const'\n",
7344 			regno);
7345 		return -EACCES;
7346 	}
7347 
7348 	if (reg->umin_value == 0 && !zero_size_allowed) {
7349 		verbose(env, "R%d invalid zero-sized read: u64=[%lld,%lld]\n",
7350 			regno, reg->umin_value, reg->umax_value);
7351 		return -EACCES;
7352 	}
7353 
7354 	if (reg->umax_value >= BPF_MAX_VAR_SIZ) {
7355 		verbose(env, "R%d unbounded memory access, use 'var &= const' or 'if (var < const)'\n",
7356 			regno);
7357 		return -EACCES;
7358 	}
7359 	err = check_helper_mem_access(env, regno - 1,
7360 				      reg->umax_value,
7361 				      zero_size_allowed, meta);
7362 	if (!err)
7363 		err = mark_chain_precision(env, regno);
7364 	return err;
7365 }
7366 
7367 static int check_mem_reg(struct bpf_verifier_env *env, struct bpf_reg_state *reg,
7368 			 u32 regno, u32 mem_size)
7369 {
7370 	bool may_be_null = type_may_be_null(reg->type);
7371 	struct bpf_reg_state saved_reg;
7372 	struct bpf_call_arg_meta meta;
7373 	int err;
7374 
7375 	if (register_is_null(reg))
7376 		return 0;
7377 
7378 	memset(&meta, 0, sizeof(meta));
7379 	/* Assuming that the register contains a value check if the memory
7380 	 * access is safe. Temporarily save and restore the register's state as
7381 	 * the conversion shouldn't be visible to a caller.
7382 	 */
7383 	if (may_be_null) {
7384 		saved_reg = *reg;
7385 		mark_ptr_not_null_reg(reg);
7386 	}
7387 
7388 	err = check_helper_mem_access(env, regno, mem_size, true, &meta);
7389 	/* Check access for BPF_WRITE */
7390 	meta.raw_mode = true;
7391 	err = err ?: check_helper_mem_access(env, regno, mem_size, true, &meta);
7392 
7393 	if (may_be_null)
7394 		*reg = saved_reg;
7395 
7396 	return err;
7397 }
7398 
7399 static int check_kfunc_mem_size_reg(struct bpf_verifier_env *env, struct bpf_reg_state *reg,
7400 				    u32 regno)
7401 {
7402 	struct bpf_reg_state *mem_reg = &cur_regs(env)[regno - 1];
7403 	bool may_be_null = type_may_be_null(mem_reg->type);
7404 	struct bpf_reg_state saved_reg;
7405 	struct bpf_call_arg_meta meta;
7406 	int err;
7407 
7408 	WARN_ON_ONCE(regno < BPF_REG_2 || regno > BPF_REG_5);
7409 
7410 	memset(&meta, 0, sizeof(meta));
7411 
7412 	if (may_be_null) {
7413 		saved_reg = *mem_reg;
7414 		mark_ptr_not_null_reg(mem_reg);
7415 	}
7416 
7417 	err = check_mem_size_reg(env, reg, regno, true, &meta);
7418 	/* Check access for BPF_WRITE */
7419 	meta.raw_mode = true;
7420 	err = err ?: check_mem_size_reg(env, reg, regno, true, &meta);
7421 
7422 	if (may_be_null)
7423 		*mem_reg = saved_reg;
7424 	return err;
7425 }
7426 
7427 /* Implementation details:
7428  * bpf_map_lookup returns PTR_TO_MAP_VALUE_OR_NULL.
7429  * bpf_obj_new returns PTR_TO_BTF_ID | MEM_ALLOC | PTR_MAYBE_NULL.
7430  * Two bpf_map_lookups (even with the same key) will have different reg->id.
7431  * Two separate bpf_obj_new will also have different reg->id.
7432  * For traditional PTR_TO_MAP_VALUE or PTR_TO_BTF_ID | MEM_ALLOC, the verifier
7433  * clears reg->id after value_or_null->value transition, since the verifier only
7434  * cares about the range of access to valid map value pointer and doesn't care
7435  * about actual address of the map element.
7436  * For maps with 'struct bpf_spin_lock' inside map value the verifier keeps
7437  * reg->id > 0 after value_or_null->value transition. By doing so
7438  * two bpf_map_lookups will be considered two different pointers that
7439  * point to different bpf_spin_locks. Likewise for pointers to allocated objects
7440  * returned from bpf_obj_new.
7441  * The verifier allows taking only one bpf_spin_lock at a time to avoid
7442  * dead-locks.
7443  * Since only one bpf_spin_lock is allowed the checks are simpler than
7444  * reg_is_refcounted() logic. The verifier needs to remember only
7445  * one spin_lock instead of array of acquired_refs.
7446  * cur_state->active_lock remembers which map value element or allocated
7447  * object got locked and clears it after bpf_spin_unlock.
7448  */
7449 static int process_spin_lock(struct bpf_verifier_env *env, int regno,
7450 			     bool is_lock)
7451 {
7452 	struct bpf_reg_state *regs = cur_regs(env), *reg = &regs[regno];
7453 	struct bpf_verifier_state *cur = env->cur_state;
7454 	bool is_const = tnum_is_const(reg->var_off);
7455 	u64 val = reg->var_off.value;
7456 	struct bpf_map *map = NULL;
7457 	struct btf *btf = NULL;
7458 	struct btf_record *rec;
7459 
7460 	if (!is_const) {
7461 		verbose(env,
7462 			"R%d doesn't have constant offset. bpf_spin_lock has to be at the constant offset\n",
7463 			regno);
7464 		return -EINVAL;
7465 	}
7466 	if (reg->type == PTR_TO_MAP_VALUE) {
7467 		map = reg->map_ptr;
7468 		if (!map->btf) {
7469 			verbose(env,
7470 				"map '%s' has to have BTF in order to use bpf_spin_lock\n",
7471 				map->name);
7472 			return -EINVAL;
7473 		}
7474 	} else {
7475 		btf = reg->btf;
7476 	}
7477 
7478 	rec = reg_btf_record(reg);
7479 	if (!btf_record_has_field(rec, BPF_SPIN_LOCK)) {
7480 		verbose(env, "%s '%s' has no valid bpf_spin_lock\n", map ? "map" : "local",
7481 			map ? map->name : "kptr");
7482 		return -EINVAL;
7483 	}
7484 	if (rec->spin_lock_off != val + reg->off) {
7485 		verbose(env, "off %lld doesn't point to 'struct bpf_spin_lock' that is at %d\n",
7486 			val + reg->off, rec->spin_lock_off);
7487 		return -EINVAL;
7488 	}
7489 	if (is_lock) {
7490 		if (cur->active_lock.ptr) {
7491 			verbose(env,
7492 				"Locking two bpf_spin_locks are not allowed\n");
7493 			return -EINVAL;
7494 		}
7495 		if (map)
7496 			cur->active_lock.ptr = map;
7497 		else
7498 			cur->active_lock.ptr = btf;
7499 		cur->active_lock.id = reg->id;
7500 	} else {
7501 		void *ptr;
7502 
7503 		if (map)
7504 			ptr = map;
7505 		else
7506 			ptr = btf;
7507 
7508 		if (!cur->active_lock.ptr) {
7509 			verbose(env, "bpf_spin_unlock without taking a lock\n");
7510 			return -EINVAL;
7511 		}
7512 		if (cur->active_lock.ptr != ptr ||
7513 		    cur->active_lock.id != reg->id) {
7514 			verbose(env, "bpf_spin_unlock of different lock\n");
7515 			return -EINVAL;
7516 		}
7517 
7518 		invalidate_non_owning_refs(env);
7519 
7520 		cur->active_lock.ptr = NULL;
7521 		cur->active_lock.id = 0;
7522 	}
7523 	return 0;
7524 }
7525 
7526 static int process_timer_func(struct bpf_verifier_env *env, int regno,
7527 			      struct bpf_call_arg_meta *meta)
7528 {
7529 	struct bpf_reg_state *regs = cur_regs(env), *reg = &regs[regno];
7530 	bool is_const = tnum_is_const(reg->var_off);
7531 	struct bpf_map *map = reg->map_ptr;
7532 	u64 val = reg->var_off.value;
7533 
7534 	if (!is_const) {
7535 		verbose(env,
7536 			"R%d doesn't have constant offset. bpf_timer has to be at the constant offset\n",
7537 			regno);
7538 		return -EINVAL;
7539 	}
7540 	if (!map->btf) {
7541 		verbose(env, "map '%s' has to have BTF in order to use bpf_timer\n",
7542 			map->name);
7543 		return -EINVAL;
7544 	}
7545 	if (!btf_record_has_field(map->record, BPF_TIMER)) {
7546 		verbose(env, "map '%s' has no valid bpf_timer\n", map->name);
7547 		return -EINVAL;
7548 	}
7549 	if (map->record->timer_off != val + reg->off) {
7550 		verbose(env, "off %lld doesn't point to 'struct bpf_timer' that is at %d\n",
7551 			val + reg->off, map->record->timer_off);
7552 		return -EINVAL;
7553 	}
7554 	if (meta->map_ptr) {
7555 		verbose(env, "verifier bug. Two map pointers in a timer helper\n");
7556 		return -EFAULT;
7557 	}
7558 	meta->map_uid = reg->map_uid;
7559 	meta->map_ptr = map;
7560 	return 0;
7561 }
7562 
7563 static int process_kptr_func(struct bpf_verifier_env *env, int regno,
7564 			     struct bpf_call_arg_meta *meta)
7565 {
7566 	struct bpf_reg_state *regs = cur_regs(env), *reg = &regs[regno];
7567 	struct bpf_map *map_ptr = reg->map_ptr;
7568 	struct btf_field *kptr_field;
7569 	u32 kptr_off;
7570 
7571 	if (!tnum_is_const(reg->var_off)) {
7572 		verbose(env,
7573 			"R%d doesn't have constant offset. kptr has to be at the constant offset\n",
7574 			regno);
7575 		return -EINVAL;
7576 	}
7577 	if (!map_ptr->btf) {
7578 		verbose(env, "map '%s' has to have BTF in order to use bpf_kptr_xchg\n",
7579 			map_ptr->name);
7580 		return -EINVAL;
7581 	}
7582 	if (!btf_record_has_field(map_ptr->record, BPF_KPTR)) {
7583 		verbose(env, "map '%s' has no valid kptr\n", map_ptr->name);
7584 		return -EINVAL;
7585 	}
7586 
7587 	meta->map_ptr = map_ptr;
7588 	kptr_off = reg->off + reg->var_off.value;
7589 	kptr_field = btf_record_find(map_ptr->record, kptr_off, BPF_KPTR);
7590 	if (!kptr_field) {
7591 		verbose(env, "off=%d doesn't point to kptr\n", kptr_off);
7592 		return -EACCES;
7593 	}
7594 	if (kptr_field->type != BPF_KPTR_REF && kptr_field->type != BPF_KPTR_PERCPU) {
7595 		verbose(env, "off=%d kptr isn't referenced kptr\n", kptr_off);
7596 		return -EACCES;
7597 	}
7598 	meta->kptr_field = kptr_field;
7599 	return 0;
7600 }
7601 
7602 /* There are two register types representing a bpf_dynptr, one is PTR_TO_STACK
7603  * which points to a stack slot, and the other is CONST_PTR_TO_DYNPTR.
7604  *
7605  * In both cases we deal with the first 8 bytes, but need to mark the next 8
7606  * bytes as STACK_DYNPTR in case of PTR_TO_STACK. In case of
7607  * CONST_PTR_TO_DYNPTR, we are guaranteed to get the beginning of the object.
7608  *
7609  * Mutability of bpf_dynptr is at two levels, one is at the level of struct
7610  * bpf_dynptr itself, i.e. whether the helper is receiving a pointer to struct
7611  * bpf_dynptr or pointer to const struct bpf_dynptr. In the former case, it can
7612  * mutate the view of the dynptr and also possibly destroy it. In the latter
7613  * case, it cannot mutate the bpf_dynptr itself but it can still mutate the
7614  * memory that dynptr points to.
7615  *
7616  * The verifier will keep track both levels of mutation (bpf_dynptr's in
7617  * reg->type and the memory's in reg->dynptr.type), but there is no support for
7618  * readonly dynptr view yet, hence only the first case is tracked and checked.
7619  *
7620  * This is consistent with how C applies the const modifier to a struct object,
7621  * where the pointer itself inside bpf_dynptr becomes const but not what it
7622  * points to.
7623  *
7624  * Helpers which do not mutate the bpf_dynptr set MEM_RDONLY in their argument
7625  * type, and declare it as 'const struct bpf_dynptr *' in their prototype.
7626  */
7627 static int process_dynptr_func(struct bpf_verifier_env *env, int regno, int insn_idx,
7628 			       enum bpf_arg_type arg_type, int clone_ref_obj_id)
7629 {
7630 	struct bpf_reg_state *regs = cur_regs(env), *reg = &regs[regno];
7631 	int err;
7632 
7633 	/* MEM_UNINIT and MEM_RDONLY are exclusive, when applied to an
7634 	 * ARG_PTR_TO_DYNPTR (or ARG_PTR_TO_DYNPTR | DYNPTR_TYPE_*):
7635 	 */
7636 	if ((arg_type & (MEM_UNINIT | MEM_RDONLY)) == (MEM_UNINIT | MEM_RDONLY)) {
7637 		verbose(env, "verifier internal error: misconfigured dynptr helper type flags\n");
7638 		return -EFAULT;
7639 	}
7640 
7641 	/*  MEM_UNINIT - Points to memory that is an appropriate candidate for
7642 	 *		 constructing a mutable bpf_dynptr object.
7643 	 *
7644 	 *		 Currently, this is only possible with PTR_TO_STACK
7645 	 *		 pointing to a region of at least 16 bytes which doesn't
7646 	 *		 contain an existing bpf_dynptr.
7647 	 *
7648 	 *  MEM_RDONLY - Points to a initialized bpf_dynptr that will not be
7649 	 *		 mutated or destroyed. However, the memory it points to
7650 	 *		 may be mutated.
7651 	 *
7652 	 *  None       - Points to a initialized dynptr that can be mutated and
7653 	 *		 destroyed, including mutation of the memory it points
7654 	 *		 to.
7655 	 */
7656 	if (arg_type & MEM_UNINIT) {
7657 		int i;
7658 
7659 		if (!is_dynptr_reg_valid_uninit(env, reg)) {
7660 			verbose(env, "Dynptr has to be an uninitialized dynptr\n");
7661 			return -EINVAL;
7662 		}
7663 
7664 		/* we write BPF_DW bits (8 bytes) at a time */
7665 		for (i = 0; i < BPF_DYNPTR_SIZE; i += 8) {
7666 			err = check_mem_access(env, insn_idx, regno,
7667 					       i, BPF_DW, BPF_WRITE, -1, false, false);
7668 			if (err)
7669 				return err;
7670 		}
7671 
7672 		err = mark_stack_slots_dynptr(env, reg, arg_type, insn_idx, clone_ref_obj_id);
7673 	} else /* MEM_RDONLY and None case from above */ {
7674 		/* For the reg->type == PTR_TO_STACK case, bpf_dynptr is never const */
7675 		if (reg->type == CONST_PTR_TO_DYNPTR && !(arg_type & MEM_RDONLY)) {
7676 			verbose(env, "cannot pass pointer to const bpf_dynptr, the helper mutates it\n");
7677 			return -EINVAL;
7678 		}
7679 
7680 		if (!is_dynptr_reg_valid_init(env, reg)) {
7681 			verbose(env,
7682 				"Expected an initialized dynptr as arg #%d\n",
7683 				regno);
7684 			return -EINVAL;
7685 		}
7686 
7687 		/* Fold modifiers (in this case, MEM_RDONLY) when checking expected type */
7688 		if (!is_dynptr_type_expected(env, reg, arg_type & ~MEM_RDONLY)) {
7689 			verbose(env,
7690 				"Expected a dynptr of type %s as arg #%d\n",
7691 				dynptr_type_str(arg_to_dynptr_type(arg_type)), regno);
7692 			return -EINVAL;
7693 		}
7694 
7695 		err = mark_dynptr_read(env, reg);
7696 	}
7697 	return err;
7698 }
7699 
7700 static u32 iter_ref_obj_id(struct bpf_verifier_env *env, struct bpf_reg_state *reg, int spi)
7701 {
7702 	struct bpf_func_state *state = func(env, reg);
7703 
7704 	return state->stack[spi].spilled_ptr.ref_obj_id;
7705 }
7706 
7707 static bool is_iter_kfunc(struct bpf_kfunc_call_arg_meta *meta)
7708 {
7709 	return meta->kfunc_flags & (KF_ITER_NEW | KF_ITER_NEXT | KF_ITER_DESTROY);
7710 }
7711 
7712 static bool is_iter_new_kfunc(struct bpf_kfunc_call_arg_meta *meta)
7713 {
7714 	return meta->kfunc_flags & KF_ITER_NEW;
7715 }
7716 
7717 static bool is_iter_next_kfunc(struct bpf_kfunc_call_arg_meta *meta)
7718 {
7719 	return meta->kfunc_flags & KF_ITER_NEXT;
7720 }
7721 
7722 static bool is_iter_destroy_kfunc(struct bpf_kfunc_call_arg_meta *meta)
7723 {
7724 	return meta->kfunc_flags & KF_ITER_DESTROY;
7725 }
7726 
7727 static bool is_kfunc_arg_iter(struct bpf_kfunc_call_arg_meta *meta, int arg)
7728 {
7729 	/* btf_check_iter_kfuncs() guarantees that first argument of any iter
7730 	 * kfunc is iter state pointer
7731 	 */
7732 	return arg == 0 && is_iter_kfunc(meta);
7733 }
7734 
7735 static int process_iter_arg(struct bpf_verifier_env *env, int regno, int insn_idx,
7736 			    struct bpf_kfunc_call_arg_meta *meta)
7737 {
7738 	struct bpf_reg_state *regs = cur_regs(env), *reg = &regs[regno];
7739 	const struct btf_type *t;
7740 	const struct btf_param *arg;
7741 	int spi, err, i, nr_slots;
7742 	u32 btf_id;
7743 
7744 	/* btf_check_iter_kfuncs() ensures we don't need to validate anything here */
7745 	arg = &btf_params(meta->func_proto)[0];
7746 	t = btf_type_skip_modifiers(meta->btf, arg->type, NULL);	/* PTR */
7747 	t = btf_type_skip_modifiers(meta->btf, t->type, &btf_id);	/* STRUCT */
7748 	nr_slots = t->size / BPF_REG_SIZE;
7749 
7750 	if (is_iter_new_kfunc(meta)) {
7751 		/* bpf_iter_<type>_new() expects pointer to uninit iter state */
7752 		if (!is_iter_reg_valid_uninit(env, reg, nr_slots)) {
7753 			verbose(env, "expected uninitialized iter_%s as arg #%d\n",
7754 				iter_type_str(meta->btf, btf_id), regno);
7755 			return -EINVAL;
7756 		}
7757 
7758 		for (i = 0; i < nr_slots * 8; i += BPF_REG_SIZE) {
7759 			err = check_mem_access(env, insn_idx, regno,
7760 					       i, BPF_DW, BPF_WRITE, -1, false, false);
7761 			if (err)
7762 				return err;
7763 		}
7764 
7765 		err = mark_stack_slots_iter(env, meta, reg, insn_idx, meta->btf, btf_id, nr_slots);
7766 		if (err)
7767 			return err;
7768 	} else {
7769 		/* iter_next() or iter_destroy() expect initialized iter state*/
7770 		err = is_iter_reg_valid_init(env, reg, meta->btf, btf_id, nr_slots);
7771 		switch (err) {
7772 		case 0:
7773 			break;
7774 		case -EINVAL:
7775 			verbose(env, "expected an initialized iter_%s as arg #%d\n",
7776 				iter_type_str(meta->btf, btf_id), regno);
7777 			return err;
7778 		case -EPROTO:
7779 			verbose(env, "expected an RCU CS when using %s\n", meta->func_name);
7780 			return err;
7781 		default:
7782 			return err;
7783 		}
7784 
7785 		spi = iter_get_spi(env, reg, nr_slots);
7786 		if (spi < 0)
7787 			return spi;
7788 
7789 		err = mark_iter_read(env, reg, spi, nr_slots);
7790 		if (err)
7791 			return err;
7792 
7793 		/* remember meta->iter info for process_iter_next_call() */
7794 		meta->iter.spi = spi;
7795 		meta->iter.frameno = reg->frameno;
7796 		meta->ref_obj_id = iter_ref_obj_id(env, reg, spi);
7797 
7798 		if (is_iter_destroy_kfunc(meta)) {
7799 			err = unmark_stack_slots_iter(env, reg, nr_slots);
7800 			if (err)
7801 				return err;
7802 		}
7803 	}
7804 
7805 	return 0;
7806 }
7807 
7808 /* Look for a previous loop entry at insn_idx: nearest parent state
7809  * stopped at insn_idx with callsites matching those in cur->frame.
7810  */
7811 static struct bpf_verifier_state *find_prev_entry(struct bpf_verifier_env *env,
7812 						  struct bpf_verifier_state *cur,
7813 						  int insn_idx)
7814 {
7815 	struct bpf_verifier_state_list *sl;
7816 	struct bpf_verifier_state *st;
7817 
7818 	/* Explored states are pushed in stack order, most recent states come first */
7819 	sl = *explored_state(env, insn_idx);
7820 	for (; sl; sl = sl->next) {
7821 		/* If st->branches != 0 state is a part of current DFS verification path,
7822 		 * hence cur & st for a loop.
7823 		 */
7824 		st = &sl->state;
7825 		if (st->insn_idx == insn_idx && st->branches && same_callsites(st, cur) &&
7826 		    st->dfs_depth < cur->dfs_depth)
7827 			return st;
7828 	}
7829 
7830 	return NULL;
7831 }
7832 
7833 static void reset_idmap_scratch(struct bpf_verifier_env *env);
7834 static bool regs_exact(const struct bpf_reg_state *rold,
7835 		       const struct bpf_reg_state *rcur,
7836 		       struct bpf_idmap *idmap);
7837 
7838 static void maybe_widen_reg(struct bpf_verifier_env *env,
7839 			    struct bpf_reg_state *rold, struct bpf_reg_state *rcur,
7840 			    struct bpf_idmap *idmap)
7841 {
7842 	if (rold->type != SCALAR_VALUE)
7843 		return;
7844 	if (rold->type != rcur->type)
7845 		return;
7846 	if (rold->precise || rcur->precise || regs_exact(rold, rcur, idmap))
7847 		return;
7848 	__mark_reg_unknown(env, rcur);
7849 }
7850 
7851 static int widen_imprecise_scalars(struct bpf_verifier_env *env,
7852 				   struct bpf_verifier_state *old,
7853 				   struct bpf_verifier_state *cur)
7854 {
7855 	struct bpf_func_state *fold, *fcur;
7856 	int i, fr;
7857 
7858 	reset_idmap_scratch(env);
7859 	for (fr = old->curframe; fr >= 0; fr--) {
7860 		fold = old->frame[fr];
7861 		fcur = cur->frame[fr];
7862 
7863 		for (i = 0; i < MAX_BPF_REG; i++)
7864 			maybe_widen_reg(env,
7865 					&fold->regs[i],
7866 					&fcur->regs[i],
7867 					&env->idmap_scratch);
7868 
7869 		for (i = 0; i < fold->allocated_stack / BPF_REG_SIZE; i++) {
7870 			if (!is_spilled_reg(&fold->stack[i]) ||
7871 			    !is_spilled_reg(&fcur->stack[i]))
7872 				continue;
7873 
7874 			maybe_widen_reg(env,
7875 					&fold->stack[i].spilled_ptr,
7876 					&fcur->stack[i].spilled_ptr,
7877 					&env->idmap_scratch);
7878 		}
7879 	}
7880 	return 0;
7881 }
7882 
7883 /* process_iter_next_call() is called when verifier gets to iterator's next
7884  * "method" (e.g., bpf_iter_num_next() for numbers iterator) call. We'll refer
7885  * to it as just "iter_next()" in comments below.
7886  *
7887  * BPF verifier relies on a crucial contract for any iter_next()
7888  * implementation: it should *eventually* return NULL, and once that happens
7889  * it should keep returning NULL. That is, once iterator exhausts elements to
7890  * iterate, it should never reset or spuriously return new elements.
7891  *
7892  * With the assumption of such contract, process_iter_next_call() simulates
7893  * a fork in the verifier state to validate loop logic correctness and safety
7894  * without having to simulate infinite amount of iterations.
7895  *
7896  * In current state, we first assume that iter_next() returned NULL and
7897  * iterator state is set to DRAINED (BPF_ITER_STATE_DRAINED). In such
7898  * conditions we should not form an infinite loop and should eventually reach
7899  * exit.
7900  *
7901  * Besides that, we also fork current state and enqueue it for later
7902  * verification. In a forked state we keep iterator state as ACTIVE
7903  * (BPF_ITER_STATE_ACTIVE) and assume non-NULL return from iter_next(). We
7904  * also bump iteration depth to prevent erroneous infinite loop detection
7905  * later on (see iter_active_depths_differ() comment for details). In this
7906  * state we assume that we'll eventually loop back to another iter_next()
7907  * calls (it could be in exactly same location or in some other instruction,
7908  * it doesn't matter, we don't make any unnecessary assumptions about this,
7909  * everything revolves around iterator state in a stack slot, not which
7910  * instruction is calling iter_next()). When that happens, we either will come
7911  * to iter_next() with equivalent state and can conclude that next iteration
7912  * will proceed in exactly the same way as we just verified, so it's safe to
7913  * assume that loop converges. If not, we'll go on another iteration
7914  * simulation with a different input state, until all possible starting states
7915  * are validated or we reach maximum number of instructions limit.
7916  *
7917  * This way, we will either exhaustively discover all possible input states
7918  * that iterator loop can start with and eventually will converge, or we'll
7919  * effectively regress into bounded loop simulation logic and either reach
7920  * maximum number of instructions if loop is not provably convergent, or there
7921  * is some statically known limit on number of iterations (e.g., if there is
7922  * an explicit `if n > 100 then break;` statement somewhere in the loop).
7923  *
7924  * Iteration convergence logic in is_state_visited() relies on exact
7925  * states comparison, which ignores read and precision marks.
7926  * This is necessary because read and precision marks are not finalized
7927  * while in the loop. Exact comparison might preclude convergence for
7928  * simple programs like below:
7929  *
7930  *     i = 0;
7931  *     while(iter_next(&it))
7932  *       i++;
7933  *
7934  * At each iteration step i++ would produce a new distinct state and
7935  * eventually instruction processing limit would be reached.
7936  *
7937  * To avoid such behavior speculatively forget (widen) range for
7938  * imprecise scalar registers, if those registers were not precise at the
7939  * end of the previous iteration and do not match exactly.
7940  *
7941  * This is a conservative heuristic that allows to verify wide range of programs,
7942  * however it precludes verification of programs that conjure an
7943  * imprecise value on the first loop iteration and use it as precise on a second.
7944  * For example, the following safe program would fail to verify:
7945  *
7946  *     struct bpf_num_iter it;
7947  *     int arr[10];
7948  *     int i = 0, a = 0;
7949  *     bpf_iter_num_new(&it, 0, 10);
7950  *     while (bpf_iter_num_next(&it)) {
7951  *       if (a == 0) {
7952  *         a = 1;
7953  *         i = 7; // Because i changed verifier would forget
7954  *                // it's range on second loop entry.
7955  *       } else {
7956  *         arr[i] = 42; // This would fail to verify.
7957  *       }
7958  *     }
7959  *     bpf_iter_num_destroy(&it);
7960  */
7961 static int process_iter_next_call(struct bpf_verifier_env *env, int insn_idx,
7962 				  struct bpf_kfunc_call_arg_meta *meta)
7963 {
7964 	struct bpf_verifier_state *cur_st = env->cur_state, *queued_st, *prev_st;
7965 	struct bpf_func_state *cur_fr = cur_st->frame[cur_st->curframe], *queued_fr;
7966 	struct bpf_reg_state *cur_iter, *queued_iter;
7967 	int iter_frameno = meta->iter.frameno;
7968 	int iter_spi = meta->iter.spi;
7969 
7970 	BTF_TYPE_EMIT(struct bpf_iter);
7971 
7972 	cur_iter = &env->cur_state->frame[iter_frameno]->stack[iter_spi].spilled_ptr;
7973 
7974 	if (cur_iter->iter.state != BPF_ITER_STATE_ACTIVE &&
7975 	    cur_iter->iter.state != BPF_ITER_STATE_DRAINED) {
7976 		verbose(env, "verifier internal error: unexpected iterator state %d (%s)\n",
7977 			cur_iter->iter.state, iter_state_str(cur_iter->iter.state));
7978 		return -EFAULT;
7979 	}
7980 
7981 	if (cur_iter->iter.state == BPF_ITER_STATE_ACTIVE) {
7982 		/* Because iter_next() call is a checkpoint is_state_visitied()
7983 		 * should guarantee parent state with same call sites and insn_idx.
7984 		 */
7985 		if (!cur_st->parent || cur_st->parent->insn_idx != insn_idx ||
7986 		    !same_callsites(cur_st->parent, cur_st)) {
7987 			verbose(env, "bug: bad parent state for iter next call");
7988 			return -EFAULT;
7989 		}
7990 		/* Note cur_st->parent in the call below, it is necessary to skip
7991 		 * checkpoint created for cur_st by is_state_visited()
7992 		 * right at this instruction.
7993 		 */
7994 		prev_st = find_prev_entry(env, cur_st->parent, insn_idx);
7995 		/* branch out active iter state */
7996 		queued_st = push_stack(env, insn_idx + 1, insn_idx, false);
7997 		if (!queued_st)
7998 			return -ENOMEM;
7999 
8000 		queued_iter = &queued_st->frame[iter_frameno]->stack[iter_spi].spilled_ptr;
8001 		queued_iter->iter.state = BPF_ITER_STATE_ACTIVE;
8002 		queued_iter->iter.depth++;
8003 		if (prev_st)
8004 			widen_imprecise_scalars(env, prev_st, queued_st);
8005 
8006 		queued_fr = queued_st->frame[queued_st->curframe];
8007 		mark_ptr_not_null_reg(&queued_fr->regs[BPF_REG_0]);
8008 	}
8009 
8010 	/* switch to DRAINED state, but keep the depth unchanged */
8011 	/* mark current iter state as drained and assume returned NULL */
8012 	cur_iter->iter.state = BPF_ITER_STATE_DRAINED;
8013 	__mark_reg_const_zero(env, &cur_fr->regs[BPF_REG_0]);
8014 
8015 	return 0;
8016 }
8017 
8018 static bool arg_type_is_mem_size(enum bpf_arg_type type)
8019 {
8020 	return type == ARG_CONST_SIZE ||
8021 	       type == ARG_CONST_SIZE_OR_ZERO;
8022 }
8023 
8024 static bool arg_type_is_release(enum bpf_arg_type type)
8025 {
8026 	return type & OBJ_RELEASE;
8027 }
8028 
8029 static bool arg_type_is_dynptr(enum bpf_arg_type type)
8030 {
8031 	return base_type(type) == ARG_PTR_TO_DYNPTR;
8032 }
8033 
8034 static int int_ptr_type_to_size(enum bpf_arg_type type)
8035 {
8036 	if (type == ARG_PTR_TO_INT)
8037 		return sizeof(u32);
8038 	else if (type == ARG_PTR_TO_LONG)
8039 		return sizeof(u64);
8040 
8041 	return -EINVAL;
8042 }
8043 
8044 static int resolve_map_arg_type(struct bpf_verifier_env *env,
8045 				 const struct bpf_call_arg_meta *meta,
8046 				 enum bpf_arg_type *arg_type)
8047 {
8048 	if (!meta->map_ptr) {
8049 		/* kernel subsystem misconfigured verifier */
8050 		verbose(env, "invalid map_ptr to access map->type\n");
8051 		return -EACCES;
8052 	}
8053 
8054 	switch (meta->map_ptr->map_type) {
8055 	case BPF_MAP_TYPE_SOCKMAP:
8056 	case BPF_MAP_TYPE_SOCKHASH:
8057 		if (*arg_type == ARG_PTR_TO_MAP_VALUE) {
8058 			*arg_type = ARG_PTR_TO_BTF_ID_SOCK_COMMON;
8059 		} else {
8060 			verbose(env, "invalid arg_type for sockmap/sockhash\n");
8061 			return -EINVAL;
8062 		}
8063 		break;
8064 	case BPF_MAP_TYPE_BLOOM_FILTER:
8065 		if (meta->func_id == BPF_FUNC_map_peek_elem)
8066 			*arg_type = ARG_PTR_TO_MAP_VALUE;
8067 		break;
8068 	default:
8069 		break;
8070 	}
8071 	return 0;
8072 }
8073 
8074 struct bpf_reg_types {
8075 	const enum bpf_reg_type types[10];
8076 	u32 *btf_id;
8077 };
8078 
8079 static const struct bpf_reg_types sock_types = {
8080 	.types = {
8081 		PTR_TO_SOCK_COMMON,
8082 		PTR_TO_SOCKET,
8083 		PTR_TO_TCP_SOCK,
8084 		PTR_TO_XDP_SOCK,
8085 	},
8086 };
8087 
8088 #ifdef CONFIG_NET
8089 static const struct bpf_reg_types btf_id_sock_common_types = {
8090 	.types = {
8091 		PTR_TO_SOCK_COMMON,
8092 		PTR_TO_SOCKET,
8093 		PTR_TO_TCP_SOCK,
8094 		PTR_TO_XDP_SOCK,
8095 		PTR_TO_BTF_ID,
8096 		PTR_TO_BTF_ID | PTR_TRUSTED,
8097 	},
8098 	.btf_id = &btf_sock_ids[BTF_SOCK_TYPE_SOCK_COMMON],
8099 };
8100 #endif
8101 
8102 static const struct bpf_reg_types mem_types = {
8103 	.types = {
8104 		PTR_TO_STACK,
8105 		PTR_TO_PACKET,
8106 		PTR_TO_PACKET_META,
8107 		PTR_TO_MAP_KEY,
8108 		PTR_TO_MAP_VALUE,
8109 		PTR_TO_MEM,
8110 		PTR_TO_MEM | MEM_RINGBUF,
8111 		PTR_TO_BUF,
8112 		PTR_TO_BTF_ID | PTR_TRUSTED,
8113 	},
8114 };
8115 
8116 static const struct bpf_reg_types int_ptr_types = {
8117 	.types = {
8118 		PTR_TO_STACK,
8119 		PTR_TO_PACKET,
8120 		PTR_TO_PACKET_META,
8121 		PTR_TO_MAP_KEY,
8122 		PTR_TO_MAP_VALUE,
8123 	},
8124 };
8125 
8126 static const struct bpf_reg_types spin_lock_types = {
8127 	.types = {
8128 		PTR_TO_MAP_VALUE,
8129 		PTR_TO_BTF_ID | MEM_ALLOC,
8130 	}
8131 };
8132 
8133 static const struct bpf_reg_types fullsock_types = { .types = { PTR_TO_SOCKET } };
8134 static const struct bpf_reg_types scalar_types = { .types = { SCALAR_VALUE } };
8135 static const struct bpf_reg_types context_types = { .types = { PTR_TO_CTX } };
8136 static const struct bpf_reg_types ringbuf_mem_types = { .types = { PTR_TO_MEM | MEM_RINGBUF } };
8137 static const struct bpf_reg_types const_map_ptr_types = { .types = { CONST_PTR_TO_MAP } };
8138 static const struct bpf_reg_types btf_ptr_types = {
8139 	.types = {
8140 		PTR_TO_BTF_ID,
8141 		PTR_TO_BTF_ID | PTR_TRUSTED,
8142 		PTR_TO_BTF_ID | MEM_RCU,
8143 	},
8144 };
8145 static const struct bpf_reg_types percpu_btf_ptr_types = {
8146 	.types = {
8147 		PTR_TO_BTF_ID | MEM_PERCPU,
8148 		PTR_TO_BTF_ID | MEM_PERCPU | MEM_RCU,
8149 		PTR_TO_BTF_ID | MEM_PERCPU | PTR_TRUSTED,
8150 	}
8151 };
8152 static const struct bpf_reg_types func_ptr_types = { .types = { PTR_TO_FUNC } };
8153 static const struct bpf_reg_types stack_ptr_types = { .types = { PTR_TO_STACK } };
8154 static const struct bpf_reg_types const_str_ptr_types = { .types = { PTR_TO_MAP_VALUE } };
8155 static const struct bpf_reg_types timer_types = { .types = { PTR_TO_MAP_VALUE } };
8156 static const struct bpf_reg_types kptr_types = { .types = { PTR_TO_MAP_VALUE } };
8157 static const struct bpf_reg_types dynptr_types = {
8158 	.types = {
8159 		PTR_TO_STACK,
8160 		CONST_PTR_TO_DYNPTR,
8161 	}
8162 };
8163 
8164 static const struct bpf_reg_types *compatible_reg_types[__BPF_ARG_TYPE_MAX] = {
8165 	[ARG_PTR_TO_MAP_KEY]		= &mem_types,
8166 	[ARG_PTR_TO_MAP_VALUE]		= &mem_types,
8167 	[ARG_CONST_SIZE]		= &scalar_types,
8168 	[ARG_CONST_SIZE_OR_ZERO]	= &scalar_types,
8169 	[ARG_CONST_ALLOC_SIZE_OR_ZERO]	= &scalar_types,
8170 	[ARG_CONST_MAP_PTR]		= &const_map_ptr_types,
8171 	[ARG_PTR_TO_CTX]		= &context_types,
8172 	[ARG_PTR_TO_SOCK_COMMON]	= &sock_types,
8173 #ifdef CONFIG_NET
8174 	[ARG_PTR_TO_BTF_ID_SOCK_COMMON]	= &btf_id_sock_common_types,
8175 #endif
8176 	[ARG_PTR_TO_SOCKET]		= &fullsock_types,
8177 	[ARG_PTR_TO_BTF_ID]		= &btf_ptr_types,
8178 	[ARG_PTR_TO_SPIN_LOCK]		= &spin_lock_types,
8179 	[ARG_PTR_TO_MEM]		= &mem_types,
8180 	[ARG_PTR_TO_RINGBUF_MEM]	= &ringbuf_mem_types,
8181 	[ARG_PTR_TO_INT]		= &int_ptr_types,
8182 	[ARG_PTR_TO_LONG]		= &int_ptr_types,
8183 	[ARG_PTR_TO_PERCPU_BTF_ID]	= &percpu_btf_ptr_types,
8184 	[ARG_PTR_TO_FUNC]		= &func_ptr_types,
8185 	[ARG_PTR_TO_STACK]		= &stack_ptr_types,
8186 	[ARG_PTR_TO_CONST_STR]		= &const_str_ptr_types,
8187 	[ARG_PTR_TO_TIMER]		= &timer_types,
8188 	[ARG_PTR_TO_KPTR]		= &kptr_types,
8189 	[ARG_PTR_TO_DYNPTR]		= &dynptr_types,
8190 };
8191 
8192 static int check_reg_type(struct bpf_verifier_env *env, u32 regno,
8193 			  enum bpf_arg_type arg_type,
8194 			  const u32 *arg_btf_id,
8195 			  struct bpf_call_arg_meta *meta)
8196 {
8197 	struct bpf_reg_state *regs = cur_regs(env), *reg = &regs[regno];
8198 	enum bpf_reg_type expected, type = reg->type;
8199 	const struct bpf_reg_types *compatible;
8200 	int i, j;
8201 
8202 	compatible = compatible_reg_types[base_type(arg_type)];
8203 	if (!compatible) {
8204 		verbose(env, "verifier internal error: unsupported arg type %d\n", arg_type);
8205 		return -EFAULT;
8206 	}
8207 
8208 	/* ARG_PTR_TO_MEM + RDONLY is compatible with PTR_TO_MEM and PTR_TO_MEM + RDONLY,
8209 	 * but ARG_PTR_TO_MEM is compatible only with PTR_TO_MEM and NOT with PTR_TO_MEM + RDONLY
8210 	 *
8211 	 * Same for MAYBE_NULL:
8212 	 *
8213 	 * ARG_PTR_TO_MEM + MAYBE_NULL is compatible with PTR_TO_MEM and PTR_TO_MEM + MAYBE_NULL,
8214 	 * but ARG_PTR_TO_MEM is compatible only with PTR_TO_MEM but NOT with PTR_TO_MEM + MAYBE_NULL
8215 	 *
8216 	 * ARG_PTR_TO_MEM is compatible with PTR_TO_MEM that is tagged with a dynptr type.
8217 	 *
8218 	 * Therefore we fold these flags depending on the arg_type before comparison.
8219 	 */
8220 	if (arg_type & MEM_RDONLY)
8221 		type &= ~MEM_RDONLY;
8222 	if (arg_type & PTR_MAYBE_NULL)
8223 		type &= ~PTR_MAYBE_NULL;
8224 	if (base_type(arg_type) == ARG_PTR_TO_MEM)
8225 		type &= ~DYNPTR_TYPE_FLAG_MASK;
8226 
8227 	if (meta->func_id == BPF_FUNC_kptr_xchg && type_is_alloc(type)) {
8228 		type &= ~MEM_ALLOC;
8229 		type &= ~MEM_PERCPU;
8230 	}
8231 
8232 	for (i = 0; i < ARRAY_SIZE(compatible->types); i++) {
8233 		expected = compatible->types[i];
8234 		if (expected == NOT_INIT)
8235 			break;
8236 
8237 		if (type == expected)
8238 			goto found;
8239 	}
8240 
8241 	verbose(env, "R%d type=%s expected=", regno, reg_type_str(env, reg->type));
8242 	for (j = 0; j + 1 < i; j++)
8243 		verbose(env, "%s, ", reg_type_str(env, compatible->types[j]));
8244 	verbose(env, "%s\n", reg_type_str(env, compatible->types[j]));
8245 	return -EACCES;
8246 
8247 found:
8248 	if (base_type(reg->type) != PTR_TO_BTF_ID)
8249 		return 0;
8250 
8251 	if (compatible == &mem_types) {
8252 		if (!(arg_type & MEM_RDONLY)) {
8253 			verbose(env,
8254 				"%s() may write into memory pointed by R%d type=%s\n",
8255 				func_id_name(meta->func_id),
8256 				regno, reg_type_str(env, reg->type));
8257 			return -EACCES;
8258 		}
8259 		return 0;
8260 	}
8261 
8262 	switch ((int)reg->type) {
8263 	case PTR_TO_BTF_ID:
8264 	case PTR_TO_BTF_ID | PTR_TRUSTED:
8265 	case PTR_TO_BTF_ID | PTR_TRUSTED | PTR_MAYBE_NULL:
8266 	case PTR_TO_BTF_ID | MEM_RCU:
8267 	case PTR_TO_BTF_ID | PTR_MAYBE_NULL:
8268 	case PTR_TO_BTF_ID | PTR_MAYBE_NULL | MEM_RCU:
8269 	{
8270 		/* For bpf_sk_release, it needs to match against first member
8271 		 * 'struct sock_common', hence make an exception for it. This
8272 		 * allows bpf_sk_release to work for multiple socket types.
8273 		 */
8274 		bool strict_type_match = arg_type_is_release(arg_type) &&
8275 					 meta->func_id != BPF_FUNC_sk_release;
8276 
8277 		if (type_may_be_null(reg->type) &&
8278 		    (!type_may_be_null(arg_type) || arg_type_is_release(arg_type))) {
8279 			verbose(env, "Possibly NULL pointer passed to helper arg%d\n", regno);
8280 			return -EACCES;
8281 		}
8282 
8283 		if (!arg_btf_id) {
8284 			if (!compatible->btf_id) {
8285 				verbose(env, "verifier internal error: missing arg compatible BTF ID\n");
8286 				return -EFAULT;
8287 			}
8288 			arg_btf_id = compatible->btf_id;
8289 		}
8290 
8291 		if (meta->func_id == BPF_FUNC_kptr_xchg) {
8292 			if (map_kptr_match_type(env, meta->kptr_field, reg, regno))
8293 				return -EACCES;
8294 		} else {
8295 			if (arg_btf_id == BPF_PTR_POISON) {
8296 				verbose(env, "verifier internal error:");
8297 				verbose(env, "R%d has non-overwritten BPF_PTR_POISON type\n",
8298 					regno);
8299 				return -EACCES;
8300 			}
8301 
8302 			if (!btf_struct_ids_match(&env->log, reg->btf, reg->btf_id, reg->off,
8303 						  btf_vmlinux, *arg_btf_id,
8304 						  strict_type_match)) {
8305 				verbose(env, "R%d is of type %s but %s is expected\n",
8306 					regno, btf_type_name(reg->btf, reg->btf_id),
8307 					btf_type_name(btf_vmlinux, *arg_btf_id));
8308 				return -EACCES;
8309 			}
8310 		}
8311 		break;
8312 	}
8313 	case PTR_TO_BTF_ID | MEM_ALLOC:
8314 	case PTR_TO_BTF_ID | MEM_PERCPU | MEM_ALLOC:
8315 		if (meta->func_id != BPF_FUNC_spin_lock && meta->func_id != BPF_FUNC_spin_unlock &&
8316 		    meta->func_id != BPF_FUNC_kptr_xchg) {
8317 			verbose(env, "verifier internal error: unimplemented handling of MEM_ALLOC\n");
8318 			return -EFAULT;
8319 		}
8320 		if (meta->func_id == BPF_FUNC_kptr_xchg) {
8321 			if (map_kptr_match_type(env, meta->kptr_field, reg, regno))
8322 				return -EACCES;
8323 		}
8324 		break;
8325 	case PTR_TO_BTF_ID | MEM_PERCPU:
8326 	case PTR_TO_BTF_ID | MEM_PERCPU | MEM_RCU:
8327 	case PTR_TO_BTF_ID | MEM_PERCPU | PTR_TRUSTED:
8328 		/* Handled by helper specific checks */
8329 		break;
8330 	default:
8331 		verbose(env, "verifier internal error: invalid PTR_TO_BTF_ID register for type match\n");
8332 		return -EFAULT;
8333 	}
8334 	return 0;
8335 }
8336 
8337 static struct btf_field *
8338 reg_find_field_offset(const struct bpf_reg_state *reg, s32 off, u32 fields)
8339 {
8340 	struct btf_field *field;
8341 	struct btf_record *rec;
8342 
8343 	rec = reg_btf_record(reg);
8344 	if (!rec)
8345 		return NULL;
8346 
8347 	field = btf_record_find(rec, off, fields);
8348 	if (!field)
8349 		return NULL;
8350 
8351 	return field;
8352 }
8353 
8354 static int check_func_arg_reg_off(struct bpf_verifier_env *env,
8355 				  const struct bpf_reg_state *reg, int regno,
8356 				  enum bpf_arg_type arg_type)
8357 {
8358 	u32 type = reg->type;
8359 
8360 	/* When referenced register is passed to release function, its fixed
8361 	 * offset must be 0.
8362 	 *
8363 	 * We will check arg_type_is_release reg has ref_obj_id when storing
8364 	 * meta->release_regno.
8365 	 */
8366 	if (arg_type_is_release(arg_type)) {
8367 		/* ARG_PTR_TO_DYNPTR with OBJ_RELEASE is a bit special, as it
8368 		 * may not directly point to the object being released, but to
8369 		 * dynptr pointing to such object, which might be at some offset
8370 		 * on the stack. In that case, we simply to fallback to the
8371 		 * default handling.
8372 		 */
8373 		if (arg_type_is_dynptr(arg_type) && type == PTR_TO_STACK)
8374 			return 0;
8375 
8376 		/* Doing check_ptr_off_reg check for the offset will catch this
8377 		 * because fixed_off_ok is false, but checking here allows us
8378 		 * to give the user a better error message.
8379 		 */
8380 		if (reg->off) {
8381 			verbose(env, "R%d must have zero offset when passed to release func or trusted arg to kfunc\n",
8382 				regno);
8383 			return -EINVAL;
8384 		}
8385 		return __check_ptr_off_reg(env, reg, regno, false);
8386 	}
8387 
8388 	switch (type) {
8389 	/* Pointer types where both fixed and variable offset is explicitly allowed: */
8390 	case PTR_TO_STACK:
8391 	case PTR_TO_PACKET:
8392 	case PTR_TO_PACKET_META:
8393 	case PTR_TO_MAP_KEY:
8394 	case PTR_TO_MAP_VALUE:
8395 	case PTR_TO_MEM:
8396 	case PTR_TO_MEM | MEM_RDONLY:
8397 	case PTR_TO_MEM | MEM_RINGBUF:
8398 	case PTR_TO_BUF:
8399 	case PTR_TO_BUF | MEM_RDONLY:
8400 	case SCALAR_VALUE:
8401 		return 0;
8402 	/* All the rest must be rejected, except PTR_TO_BTF_ID which allows
8403 	 * fixed offset.
8404 	 */
8405 	case PTR_TO_BTF_ID:
8406 	case PTR_TO_BTF_ID | MEM_ALLOC:
8407 	case PTR_TO_BTF_ID | PTR_TRUSTED:
8408 	case PTR_TO_BTF_ID | MEM_RCU:
8409 	case PTR_TO_BTF_ID | MEM_ALLOC | NON_OWN_REF:
8410 	case PTR_TO_BTF_ID | MEM_ALLOC | NON_OWN_REF | MEM_RCU:
8411 		/* When referenced PTR_TO_BTF_ID is passed to release function,
8412 		 * its fixed offset must be 0. In the other cases, fixed offset
8413 		 * can be non-zero. This was already checked above. So pass
8414 		 * fixed_off_ok as true to allow fixed offset for all other
8415 		 * cases. var_off always must be 0 for PTR_TO_BTF_ID, hence we
8416 		 * still need to do checks instead of returning.
8417 		 */
8418 		return __check_ptr_off_reg(env, reg, regno, true);
8419 	default:
8420 		return __check_ptr_off_reg(env, reg, regno, false);
8421 	}
8422 }
8423 
8424 static struct bpf_reg_state *get_dynptr_arg_reg(struct bpf_verifier_env *env,
8425 						const struct bpf_func_proto *fn,
8426 						struct bpf_reg_state *regs)
8427 {
8428 	struct bpf_reg_state *state = NULL;
8429 	int i;
8430 
8431 	for (i = 0; i < MAX_BPF_FUNC_REG_ARGS; i++)
8432 		if (arg_type_is_dynptr(fn->arg_type[i])) {
8433 			if (state) {
8434 				verbose(env, "verifier internal error: multiple dynptr args\n");
8435 				return NULL;
8436 			}
8437 			state = &regs[BPF_REG_1 + i];
8438 		}
8439 
8440 	if (!state)
8441 		verbose(env, "verifier internal error: no dynptr arg found\n");
8442 
8443 	return state;
8444 }
8445 
8446 static int dynptr_id(struct bpf_verifier_env *env, struct bpf_reg_state *reg)
8447 {
8448 	struct bpf_func_state *state = func(env, reg);
8449 	int spi;
8450 
8451 	if (reg->type == CONST_PTR_TO_DYNPTR)
8452 		return reg->id;
8453 	spi = dynptr_get_spi(env, reg);
8454 	if (spi < 0)
8455 		return spi;
8456 	return state->stack[spi].spilled_ptr.id;
8457 }
8458 
8459 static int dynptr_ref_obj_id(struct bpf_verifier_env *env, struct bpf_reg_state *reg)
8460 {
8461 	struct bpf_func_state *state = func(env, reg);
8462 	int spi;
8463 
8464 	if (reg->type == CONST_PTR_TO_DYNPTR)
8465 		return reg->ref_obj_id;
8466 	spi = dynptr_get_spi(env, reg);
8467 	if (spi < 0)
8468 		return spi;
8469 	return state->stack[spi].spilled_ptr.ref_obj_id;
8470 }
8471 
8472 static enum bpf_dynptr_type dynptr_get_type(struct bpf_verifier_env *env,
8473 					    struct bpf_reg_state *reg)
8474 {
8475 	struct bpf_func_state *state = func(env, reg);
8476 	int spi;
8477 
8478 	if (reg->type == CONST_PTR_TO_DYNPTR)
8479 		return reg->dynptr.type;
8480 
8481 	spi = __get_spi(reg->off);
8482 	if (spi < 0) {
8483 		verbose(env, "verifier internal error: invalid spi when querying dynptr type\n");
8484 		return BPF_DYNPTR_TYPE_INVALID;
8485 	}
8486 
8487 	return state->stack[spi].spilled_ptr.dynptr.type;
8488 }
8489 
8490 static int check_reg_const_str(struct bpf_verifier_env *env,
8491 			       struct bpf_reg_state *reg, u32 regno)
8492 {
8493 	struct bpf_map *map = reg->map_ptr;
8494 	int err;
8495 	int map_off;
8496 	u64 map_addr;
8497 	char *str_ptr;
8498 
8499 	if (reg->type != PTR_TO_MAP_VALUE)
8500 		return -EINVAL;
8501 
8502 	if (!bpf_map_is_rdonly(map)) {
8503 		verbose(env, "R%d does not point to a readonly map'\n", regno);
8504 		return -EACCES;
8505 	}
8506 
8507 	if (!tnum_is_const(reg->var_off)) {
8508 		verbose(env, "R%d is not a constant address'\n", regno);
8509 		return -EACCES;
8510 	}
8511 
8512 	if (!map->ops->map_direct_value_addr) {
8513 		verbose(env, "no direct value access support for this map type\n");
8514 		return -EACCES;
8515 	}
8516 
8517 	err = check_map_access(env, regno, reg->off,
8518 			       map->value_size - reg->off, false,
8519 			       ACCESS_HELPER);
8520 	if (err)
8521 		return err;
8522 
8523 	map_off = reg->off + reg->var_off.value;
8524 	err = map->ops->map_direct_value_addr(map, &map_addr, map_off);
8525 	if (err) {
8526 		verbose(env, "direct value access on string failed\n");
8527 		return err;
8528 	}
8529 
8530 	str_ptr = (char *)(long)(map_addr);
8531 	if (!strnchr(str_ptr + map_off, map->value_size - map_off, 0)) {
8532 		verbose(env, "string is not zero-terminated\n");
8533 		return -EINVAL;
8534 	}
8535 	return 0;
8536 }
8537 
8538 static int check_func_arg(struct bpf_verifier_env *env, u32 arg,
8539 			  struct bpf_call_arg_meta *meta,
8540 			  const struct bpf_func_proto *fn,
8541 			  int insn_idx)
8542 {
8543 	u32 regno = BPF_REG_1 + arg;
8544 	struct bpf_reg_state *regs = cur_regs(env), *reg = &regs[regno];
8545 	enum bpf_arg_type arg_type = fn->arg_type[arg];
8546 	enum bpf_reg_type type = reg->type;
8547 	u32 *arg_btf_id = NULL;
8548 	int err = 0;
8549 
8550 	if (arg_type == ARG_DONTCARE)
8551 		return 0;
8552 
8553 	err = check_reg_arg(env, regno, SRC_OP);
8554 	if (err)
8555 		return err;
8556 
8557 	if (arg_type == ARG_ANYTHING) {
8558 		if (is_pointer_value(env, regno)) {
8559 			verbose(env, "R%d leaks addr into helper function\n",
8560 				regno);
8561 			return -EACCES;
8562 		}
8563 		return 0;
8564 	}
8565 
8566 	if (type_is_pkt_pointer(type) &&
8567 	    !may_access_direct_pkt_data(env, meta, BPF_READ)) {
8568 		verbose(env, "helper access to the packet is not allowed\n");
8569 		return -EACCES;
8570 	}
8571 
8572 	if (base_type(arg_type) == ARG_PTR_TO_MAP_VALUE) {
8573 		err = resolve_map_arg_type(env, meta, &arg_type);
8574 		if (err)
8575 			return err;
8576 	}
8577 
8578 	if (register_is_null(reg) && type_may_be_null(arg_type))
8579 		/* A NULL register has a SCALAR_VALUE type, so skip
8580 		 * type checking.
8581 		 */
8582 		goto skip_type_check;
8583 
8584 	/* arg_btf_id and arg_size are in a union. */
8585 	if (base_type(arg_type) == ARG_PTR_TO_BTF_ID ||
8586 	    base_type(arg_type) == ARG_PTR_TO_SPIN_LOCK)
8587 		arg_btf_id = fn->arg_btf_id[arg];
8588 
8589 	err = check_reg_type(env, regno, arg_type, arg_btf_id, meta);
8590 	if (err)
8591 		return err;
8592 
8593 	err = check_func_arg_reg_off(env, reg, regno, arg_type);
8594 	if (err)
8595 		return err;
8596 
8597 skip_type_check:
8598 	if (arg_type_is_release(arg_type)) {
8599 		if (arg_type_is_dynptr(arg_type)) {
8600 			struct bpf_func_state *state = func(env, reg);
8601 			int spi;
8602 
8603 			/* Only dynptr created on stack can be released, thus
8604 			 * the get_spi and stack state checks for spilled_ptr
8605 			 * should only be done before process_dynptr_func for
8606 			 * PTR_TO_STACK.
8607 			 */
8608 			if (reg->type == PTR_TO_STACK) {
8609 				spi = dynptr_get_spi(env, reg);
8610 				if (spi < 0 || !state->stack[spi].spilled_ptr.ref_obj_id) {
8611 					verbose(env, "arg %d is an unacquired reference\n", regno);
8612 					return -EINVAL;
8613 				}
8614 			} else {
8615 				verbose(env, "cannot release unowned const bpf_dynptr\n");
8616 				return -EINVAL;
8617 			}
8618 		} else if (!reg->ref_obj_id && !register_is_null(reg)) {
8619 			verbose(env, "R%d must be referenced when passed to release function\n",
8620 				regno);
8621 			return -EINVAL;
8622 		}
8623 		if (meta->release_regno) {
8624 			verbose(env, "verifier internal error: more than one release argument\n");
8625 			return -EFAULT;
8626 		}
8627 		meta->release_regno = regno;
8628 	}
8629 
8630 	if (reg->ref_obj_id) {
8631 		if (meta->ref_obj_id) {
8632 			verbose(env, "verifier internal error: more than one arg with ref_obj_id R%d %u %u\n",
8633 				regno, reg->ref_obj_id,
8634 				meta->ref_obj_id);
8635 			return -EFAULT;
8636 		}
8637 		meta->ref_obj_id = reg->ref_obj_id;
8638 	}
8639 
8640 	switch (base_type(arg_type)) {
8641 	case ARG_CONST_MAP_PTR:
8642 		/* bpf_map_xxx(map_ptr) call: remember that map_ptr */
8643 		if (meta->map_ptr) {
8644 			/* Use map_uid (which is unique id of inner map) to reject:
8645 			 * inner_map1 = bpf_map_lookup_elem(outer_map, key1)
8646 			 * inner_map2 = bpf_map_lookup_elem(outer_map, key2)
8647 			 * if (inner_map1 && inner_map2) {
8648 			 *     timer = bpf_map_lookup_elem(inner_map1);
8649 			 *     if (timer)
8650 			 *         // mismatch would have been allowed
8651 			 *         bpf_timer_init(timer, inner_map2);
8652 			 * }
8653 			 *
8654 			 * Comparing map_ptr is enough to distinguish normal and outer maps.
8655 			 */
8656 			if (meta->map_ptr != reg->map_ptr ||
8657 			    meta->map_uid != reg->map_uid) {
8658 				verbose(env,
8659 					"timer pointer in R1 map_uid=%d doesn't match map pointer in R2 map_uid=%d\n",
8660 					meta->map_uid, reg->map_uid);
8661 				return -EINVAL;
8662 			}
8663 		}
8664 		meta->map_ptr = reg->map_ptr;
8665 		meta->map_uid = reg->map_uid;
8666 		break;
8667 	case ARG_PTR_TO_MAP_KEY:
8668 		/* bpf_map_xxx(..., map_ptr, ..., key) call:
8669 		 * check that [key, key + map->key_size) are within
8670 		 * stack limits and initialized
8671 		 */
8672 		if (!meta->map_ptr) {
8673 			/* in function declaration map_ptr must come before
8674 			 * map_key, so that it's verified and known before
8675 			 * we have to check map_key here. Otherwise it means
8676 			 * that kernel subsystem misconfigured verifier
8677 			 */
8678 			verbose(env, "invalid map_ptr to access map->key\n");
8679 			return -EACCES;
8680 		}
8681 		err = check_helper_mem_access(env, regno,
8682 					      meta->map_ptr->key_size, false,
8683 					      NULL);
8684 		break;
8685 	case ARG_PTR_TO_MAP_VALUE:
8686 		if (type_may_be_null(arg_type) && register_is_null(reg))
8687 			return 0;
8688 
8689 		/* bpf_map_xxx(..., map_ptr, ..., value) call:
8690 		 * check [value, value + map->value_size) validity
8691 		 */
8692 		if (!meta->map_ptr) {
8693 			/* kernel subsystem misconfigured verifier */
8694 			verbose(env, "invalid map_ptr to access map->value\n");
8695 			return -EACCES;
8696 		}
8697 		meta->raw_mode = arg_type & MEM_UNINIT;
8698 		err = check_helper_mem_access(env, regno,
8699 					      meta->map_ptr->value_size, false,
8700 					      meta);
8701 		break;
8702 	case ARG_PTR_TO_PERCPU_BTF_ID:
8703 		if (!reg->btf_id) {
8704 			verbose(env, "Helper has invalid btf_id in R%d\n", regno);
8705 			return -EACCES;
8706 		}
8707 		meta->ret_btf = reg->btf;
8708 		meta->ret_btf_id = reg->btf_id;
8709 		break;
8710 	case ARG_PTR_TO_SPIN_LOCK:
8711 		if (in_rbtree_lock_required_cb(env)) {
8712 			verbose(env, "can't spin_{lock,unlock} in rbtree cb\n");
8713 			return -EACCES;
8714 		}
8715 		if (meta->func_id == BPF_FUNC_spin_lock) {
8716 			err = process_spin_lock(env, regno, true);
8717 			if (err)
8718 				return err;
8719 		} else if (meta->func_id == BPF_FUNC_spin_unlock) {
8720 			err = process_spin_lock(env, regno, false);
8721 			if (err)
8722 				return err;
8723 		} else {
8724 			verbose(env, "verifier internal error\n");
8725 			return -EFAULT;
8726 		}
8727 		break;
8728 	case ARG_PTR_TO_TIMER:
8729 		err = process_timer_func(env, regno, meta);
8730 		if (err)
8731 			return err;
8732 		break;
8733 	case ARG_PTR_TO_FUNC:
8734 		meta->subprogno = reg->subprogno;
8735 		break;
8736 	case ARG_PTR_TO_MEM:
8737 		/* The access to this pointer is only checked when we hit the
8738 		 * next is_mem_size argument below.
8739 		 */
8740 		meta->raw_mode = arg_type & MEM_UNINIT;
8741 		if (arg_type & MEM_FIXED_SIZE) {
8742 			err = check_helper_mem_access(env, regno,
8743 						      fn->arg_size[arg], false,
8744 						      meta);
8745 		}
8746 		break;
8747 	case ARG_CONST_SIZE:
8748 		err = check_mem_size_reg(env, reg, regno, false, meta);
8749 		break;
8750 	case ARG_CONST_SIZE_OR_ZERO:
8751 		err = check_mem_size_reg(env, reg, regno, true, meta);
8752 		break;
8753 	case ARG_PTR_TO_DYNPTR:
8754 		err = process_dynptr_func(env, regno, insn_idx, arg_type, 0);
8755 		if (err)
8756 			return err;
8757 		break;
8758 	case ARG_CONST_ALLOC_SIZE_OR_ZERO:
8759 		if (!tnum_is_const(reg->var_off)) {
8760 			verbose(env, "R%d is not a known constant'\n",
8761 				regno);
8762 			return -EACCES;
8763 		}
8764 		meta->mem_size = reg->var_off.value;
8765 		err = mark_chain_precision(env, regno);
8766 		if (err)
8767 			return err;
8768 		break;
8769 	case ARG_PTR_TO_INT:
8770 	case ARG_PTR_TO_LONG:
8771 	{
8772 		int size = int_ptr_type_to_size(arg_type);
8773 
8774 		err = check_helper_mem_access(env, regno, size, false, meta);
8775 		if (err)
8776 			return err;
8777 		err = check_ptr_alignment(env, reg, 0, size, true);
8778 		break;
8779 	}
8780 	case ARG_PTR_TO_CONST_STR:
8781 	{
8782 		err = check_reg_const_str(env, reg, regno);
8783 		if (err)
8784 			return err;
8785 		break;
8786 	}
8787 	case ARG_PTR_TO_KPTR:
8788 		err = process_kptr_func(env, regno, meta);
8789 		if (err)
8790 			return err;
8791 		break;
8792 	}
8793 
8794 	return err;
8795 }
8796 
8797 static bool may_update_sockmap(struct bpf_verifier_env *env, int func_id)
8798 {
8799 	enum bpf_attach_type eatype = env->prog->expected_attach_type;
8800 	enum bpf_prog_type type = resolve_prog_type(env->prog);
8801 
8802 	if (func_id != BPF_FUNC_map_update_elem)
8803 		return false;
8804 
8805 	/* It's not possible to get access to a locked struct sock in these
8806 	 * contexts, so updating is safe.
8807 	 */
8808 	switch (type) {
8809 	case BPF_PROG_TYPE_TRACING:
8810 		if (eatype == BPF_TRACE_ITER)
8811 			return true;
8812 		break;
8813 	case BPF_PROG_TYPE_SOCKET_FILTER:
8814 	case BPF_PROG_TYPE_SCHED_CLS:
8815 	case BPF_PROG_TYPE_SCHED_ACT:
8816 	case BPF_PROG_TYPE_XDP:
8817 	case BPF_PROG_TYPE_SK_REUSEPORT:
8818 	case BPF_PROG_TYPE_FLOW_DISSECTOR:
8819 	case BPF_PROG_TYPE_SK_LOOKUP:
8820 		return true;
8821 	default:
8822 		break;
8823 	}
8824 
8825 	verbose(env, "cannot update sockmap in this context\n");
8826 	return false;
8827 }
8828 
8829 static bool allow_tail_call_in_subprogs(struct bpf_verifier_env *env)
8830 {
8831 	return env->prog->jit_requested &&
8832 	       bpf_jit_supports_subprog_tailcalls();
8833 }
8834 
8835 static int check_map_func_compatibility(struct bpf_verifier_env *env,
8836 					struct bpf_map *map, int func_id)
8837 {
8838 	if (!map)
8839 		return 0;
8840 
8841 	/* We need a two way check, first is from map perspective ... */
8842 	switch (map->map_type) {
8843 	case BPF_MAP_TYPE_PROG_ARRAY:
8844 		if (func_id != BPF_FUNC_tail_call)
8845 			goto error;
8846 		break;
8847 	case BPF_MAP_TYPE_PERF_EVENT_ARRAY:
8848 		if (func_id != BPF_FUNC_perf_event_read &&
8849 		    func_id != BPF_FUNC_perf_event_output &&
8850 		    func_id != BPF_FUNC_skb_output &&
8851 		    func_id != BPF_FUNC_perf_event_read_value &&
8852 		    func_id != BPF_FUNC_xdp_output)
8853 			goto error;
8854 		break;
8855 	case BPF_MAP_TYPE_RINGBUF:
8856 		if (func_id != BPF_FUNC_ringbuf_output &&
8857 		    func_id != BPF_FUNC_ringbuf_reserve &&
8858 		    func_id != BPF_FUNC_ringbuf_query &&
8859 		    func_id != BPF_FUNC_ringbuf_reserve_dynptr &&
8860 		    func_id != BPF_FUNC_ringbuf_submit_dynptr &&
8861 		    func_id != BPF_FUNC_ringbuf_discard_dynptr)
8862 			goto error;
8863 		break;
8864 	case BPF_MAP_TYPE_USER_RINGBUF:
8865 		if (func_id != BPF_FUNC_user_ringbuf_drain)
8866 			goto error;
8867 		break;
8868 	case BPF_MAP_TYPE_STACK_TRACE:
8869 		if (func_id != BPF_FUNC_get_stackid)
8870 			goto error;
8871 		break;
8872 	case BPF_MAP_TYPE_CGROUP_ARRAY:
8873 		if (func_id != BPF_FUNC_skb_under_cgroup &&
8874 		    func_id != BPF_FUNC_current_task_under_cgroup)
8875 			goto error;
8876 		break;
8877 	case BPF_MAP_TYPE_CGROUP_STORAGE:
8878 	case BPF_MAP_TYPE_PERCPU_CGROUP_STORAGE:
8879 		if (func_id != BPF_FUNC_get_local_storage)
8880 			goto error;
8881 		break;
8882 	case BPF_MAP_TYPE_DEVMAP:
8883 	case BPF_MAP_TYPE_DEVMAP_HASH:
8884 		if (func_id != BPF_FUNC_redirect_map &&
8885 		    func_id != BPF_FUNC_map_lookup_elem)
8886 			goto error;
8887 		break;
8888 	/* Restrict bpf side of cpumap and xskmap, open when use-cases
8889 	 * appear.
8890 	 */
8891 	case BPF_MAP_TYPE_CPUMAP:
8892 		if (func_id != BPF_FUNC_redirect_map)
8893 			goto error;
8894 		break;
8895 	case BPF_MAP_TYPE_XSKMAP:
8896 		if (func_id != BPF_FUNC_redirect_map &&
8897 		    func_id != BPF_FUNC_map_lookup_elem)
8898 			goto error;
8899 		break;
8900 	case BPF_MAP_TYPE_ARRAY_OF_MAPS:
8901 	case BPF_MAP_TYPE_HASH_OF_MAPS:
8902 		if (func_id != BPF_FUNC_map_lookup_elem)
8903 			goto error;
8904 		break;
8905 	case BPF_MAP_TYPE_SOCKMAP:
8906 		if (func_id != BPF_FUNC_sk_redirect_map &&
8907 		    func_id != BPF_FUNC_sock_map_update &&
8908 		    func_id != BPF_FUNC_map_delete_elem &&
8909 		    func_id != BPF_FUNC_msg_redirect_map &&
8910 		    func_id != BPF_FUNC_sk_select_reuseport &&
8911 		    func_id != BPF_FUNC_map_lookup_elem &&
8912 		    !may_update_sockmap(env, func_id))
8913 			goto error;
8914 		break;
8915 	case BPF_MAP_TYPE_SOCKHASH:
8916 		if (func_id != BPF_FUNC_sk_redirect_hash &&
8917 		    func_id != BPF_FUNC_sock_hash_update &&
8918 		    func_id != BPF_FUNC_map_delete_elem &&
8919 		    func_id != BPF_FUNC_msg_redirect_hash &&
8920 		    func_id != BPF_FUNC_sk_select_reuseport &&
8921 		    func_id != BPF_FUNC_map_lookup_elem &&
8922 		    !may_update_sockmap(env, func_id))
8923 			goto error;
8924 		break;
8925 	case BPF_MAP_TYPE_REUSEPORT_SOCKARRAY:
8926 		if (func_id != BPF_FUNC_sk_select_reuseport)
8927 			goto error;
8928 		break;
8929 	case BPF_MAP_TYPE_QUEUE:
8930 	case BPF_MAP_TYPE_STACK:
8931 		if (func_id != BPF_FUNC_map_peek_elem &&
8932 		    func_id != BPF_FUNC_map_pop_elem &&
8933 		    func_id != BPF_FUNC_map_push_elem)
8934 			goto error;
8935 		break;
8936 	case BPF_MAP_TYPE_SK_STORAGE:
8937 		if (func_id != BPF_FUNC_sk_storage_get &&
8938 		    func_id != BPF_FUNC_sk_storage_delete &&
8939 		    func_id != BPF_FUNC_kptr_xchg)
8940 			goto error;
8941 		break;
8942 	case BPF_MAP_TYPE_INODE_STORAGE:
8943 		if (func_id != BPF_FUNC_inode_storage_get &&
8944 		    func_id != BPF_FUNC_inode_storage_delete &&
8945 		    func_id != BPF_FUNC_kptr_xchg)
8946 			goto error;
8947 		break;
8948 	case BPF_MAP_TYPE_TASK_STORAGE:
8949 		if (func_id != BPF_FUNC_task_storage_get &&
8950 		    func_id != BPF_FUNC_task_storage_delete &&
8951 		    func_id != BPF_FUNC_kptr_xchg)
8952 			goto error;
8953 		break;
8954 	case BPF_MAP_TYPE_CGRP_STORAGE:
8955 		if (func_id != BPF_FUNC_cgrp_storage_get &&
8956 		    func_id != BPF_FUNC_cgrp_storage_delete &&
8957 		    func_id != BPF_FUNC_kptr_xchg)
8958 			goto error;
8959 		break;
8960 	case BPF_MAP_TYPE_BLOOM_FILTER:
8961 		if (func_id != BPF_FUNC_map_peek_elem &&
8962 		    func_id != BPF_FUNC_map_push_elem)
8963 			goto error;
8964 		break;
8965 	default:
8966 		break;
8967 	}
8968 
8969 	/* ... and second from the function itself. */
8970 	switch (func_id) {
8971 	case BPF_FUNC_tail_call:
8972 		if (map->map_type != BPF_MAP_TYPE_PROG_ARRAY)
8973 			goto error;
8974 		if (env->subprog_cnt > 1 && !allow_tail_call_in_subprogs(env)) {
8975 			verbose(env, "tail_calls are not allowed in non-JITed programs with bpf-to-bpf calls\n");
8976 			return -EINVAL;
8977 		}
8978 		break;
8979 	case BPF_FUNC_perf_event_read:
8980 	case BPF_FUNC_perf_event_output:
8981 	case BPF_FUNC_perf_event_read_value:
8982 	case BPF_FUNC_skb_output:
8983 	case BPF_FUNC_xdp_output:
8984 		if (map->map_type != BPF_MAP_TYPE_PERF_EVENT_ARRAY)
8985 			goto error;
8986 		break;
8987 	case BPF_FUNC_ringbuf_output:
8988 	case BPF_FUNC_ringbuf_reserve:
8989 	case BPF_FUNC_ringbuf_query:
8990 	case BPF_FUNC_ringbuf_reserve_dynptr:
8991 	case BPF_FUNC_ringbuf_submit_dynptr:
8992 	case BPF_FUNC_ringbuf_discard_dynptr:
8993 		if (map->map_type != BPF_MAP_TYPE_RINGBUF)
8994 			goto error;
8995 		break;
8996 	case BPF_FUNC_user_ringbuf_drain:
8997 		if (map->map_type != BPF_MAP_TYPE_USER_RINGBUF)
8998 			goto error;
8999 		break;
9000 	case BPF_FUNC_get_stackid:
9001 		if (map->map_type != BPF_MAP_TYPE_STACK_TRACE)
9002 			goto error;
9003 		break;
9004 	case BPF_FUNC_current_task_under_cgroup:
9005 	case BPF_FUNC_skb_under_cgroup:
9006 		if (map->map_type != BPF_MAP_TYPE_CGROUP_ARRAY)
9007 			goto error;
9008 		break;
9009 	case BPF_FUNC_redirect_map:
9010 		if (map->map_type != BPF_MAP_TYPE_DEVMAP &&
9011 		    map->map_type != BPF_MAP_TYPE_DEVMAP_HASH &&
9012 		    map->map_type != BPF_MAP_TYPE_CPUMAP &&
9013 		    map->map_type != BPF_MAP_TYPE_XSKMAP)
9014 			goto error;
9015 		break;
9016 	case BPF_FUNC_sk_redirect_map:
9017 	case BPF_FUNC_msg_redirect_map:
9018 	case BPF_FUNC_sock_map_update:
9019 		if (map->map_type != BPF_MAP_TYPE_SOCKMAP)
9020 			goto error;
9021 		break;
9022 	case BPF_FUNC_sk_redirect_hash:
9023 	case BPF_FUNC_msg_redirect_hash:
9024 	case BPF_FUNC_sock_hash_update:
9025 		if (map->map_type != BPF_MAP_TYPE_SOCKHASH)
9026 			goto error;
9027 		break;
9028 	case BPF_FUNC_get_local_storage:
9029 		if (map->map_type != BPF_MAP_TYPE_CGROUP_STORAGE &&
9030 		    map->map_type != BPF_MAP_TYPE_PERCPU_CGROUP_STORAGE)
9031 			goto error;
9032 		break;
9033 	case BPF_FUNC_sk_select_reuseport:
9034 		if (map->map_type != BPF_MAP_TYPE_REUSEPORT_SOCKARRAY &&
9035 		    map->map_type != BPF_MAP_TYPE_SOCKMAP &&
9036 		    map->map_type != BPF_MAP_TYPE_SOCKHASH)
9037 			goto error;
9038 		break;
9039 	case BPF_FUNC_map_pop_elem:
9040 		if (map->map_type != BPF_MAP_TYPE_QUEUE &&
9041 		    map->map_type != BPF_MAP_TYPE_STACK)
9042 			goto error;
9043 		break;
9044 	case BPF_FUNC_map_peek_elem:
9045 	case BPF_FUNC_map_push_elem:
9046 		if (map->map_type != BPF_MAP_TYPE_QUEUE &&
9047 		    map->map_type != BPF_MAP_TYPE_STACK &&
9048 		    map->map_type != BPF_MAP_TYPE_BLOOM_FILTER)
9049 			goto error;
9050 		break;
9051 	case BPF_FUNC_map_lookup_percpu_elem:
9052 		if (map->map_type != BPF_MAP_TYPE_PERCPU_ARRAY &&
9053 		    map->map_type != BPF_MAP_TYPE_PERCPU_HASH &&
9054 		    map->map_type != BPF_MAP_TYPE_LRU_PERCPU_HASH)
9055 			goto error;
9056 		break;
9057 	case BPF_FUNC_sk_storage_get:
9058 	case BPF_FUNC_sk_storage_delete:
9059 		if (map->map_type != BPF_MAP_TYPE_SK_STORAGE)
9060 			goto error;
9061 		break;
9062 	case BPF_FUNC_inode_storage_get:
9063 	case BPF_FUNC_inode_storage_delete:
9064 		if (map->map_type != BPF_MAP_TYPE_INODE_STORAGE)
9065 			goto error;
9066 		break;
9067 	case BPF_FUNC_task_storage_get:
9068 	case BPF_FUNC_task_storage_delete:
9069 		if (map->map_type != BPF_MAP_TYPE_TASK_STORAGE)
9070 			goto error;
9071 		break;
9072 	case BPF_FUNC_cgrp_storage_get:
9073 	case BPF_FUNC_cgrp_storage_delete:
9074 		if (map->map_type != BPF_MAP_TYPE_CGRP_STORAGE)
9075 			goto error;
9076 		break;
9077 	default:
9078 		break;
9079 	}
9080 
9081 	return 0;
9082 error:
9083 	verbose(env, "cannot pass map_type %d into func %s#%d\n",
9084 		map->map_type, func_id_name(func_id), func_id);
9085 	return -EINVAL;
9086 }
9087 
9088 static bool check_raw_mode_ok(const struct bpf_func_proto *fn)
9089 {
9090 	int count = 0;
9091 
9092 	if (fn->arg1_type == ARG_PTR_TO_UNINIT_MEM)
9093 		count++;
9094 	if (fn->arg2_type == ARG_PTR_TO_UNINIT_MEM)
9095 		count++;
9096 	if (fn->arg3_type == ARG_PTR_TO_UNINIT_MEM)
9097 		count++;
9098 	if (fn->arg4_type == ARG_PTR_TO_UNINIT_MEM)
9099 		count++;
9100 	if (fn->arg5_type == ARG_PTR_TO_UNINIT_MEM)
9101 		count++;
9102 
9103 	/* We only support one arg being in raw mode at the moment,
9104 	 * which is sufficient for the helper functions we have
9105 	 * right now.
9106 	 */
9107 	return count <= 1;
9108 }
9109 
9110 static bool check_args_pair_invalid(const struct bpf_func_proto *fn, int arg)
9111 {
9112 	bool is_fixed = fn->arg_type[arg] & MEM_FIXED_SIZE;
9113 	bool has_size = fn->arg_size[arg] != 0;
9114 	bool is_next_size = false;
9115 
9116 	if (arg + 1 < ARRAY_SIZE(fn->arg_type))
9117 		is_next_size = arg_type_is_mem_size(fn->arg_type[arg + 1]);
9118 
9119 	if (base_type(fn->arg_type[arg]) != ARG_PTR_TO_MEM)
9120 		return is_next_size;
9121 
9122 	return has_size == is_next_size || is_next_size == is_fixed;
9123 }
9124 
9125 static bool check_arg_pair_ok(const struct bpf_func_proto *fn)
9126 {
9127 	/* bpf_xxx(..., buf, len) call will access 'len'
9128 	 * bytes from memory 'buf'. Both arg types need
9129 	 * to be paired, so make sure there's no buggy
9130 	 * helper function specification.
9131 	 */
9132 	if (arg_type_is_mem_size(fn->arg1_type) ||
9133 	    check_args_pair_invalid(fn, 0) ||
9134 	    check_args_pair_invalid(fn, 1) ||
9135 	    check_args_pair_invalid(fn, 2) ||
9136 	    check_args_pair_invalid(fn, 3) ||
9137 	    check_args_pair_invalid(fn, 4))
9138 		return false;
9139 
9140 	return true;
9141 }
9142 
9143 static bool check_btf_id_ok(const struct bpf_func_proto *fn)
9144 {
9145 	int i;
9146 
9147 	for (i = 0; i < ARRAY_SIZE(fn->arg_type); i++) {
9148 		if (base_type(fn->arg_type[i]) == ARG_PTR_TO_BTF_ID)
9149 			return !!fn->arg_btf_id[i];
9150 		if (base_type(fn->arg_type[i]) == ARG_PTR_TO_SPIN_LOCK)
9151 			return fn->arg_btf_id[i] == BPF_PTR_POISON;
9152 		if (base_type(fn->arg_type[i]) != ARG_PTR_TO_BTF_ID && fn->arg_btf_id[i] &&
9153 		    /* arg_btf_id and arg_size are in a union. */
9154 		    (base_type(fn->arg_type[i]) != ARG_PTR_TO_MEM ||
9155 		     !(fn->arg_type[i] & MEM_FIXED_SIZE)))
9156 			return false;
9157 	}
9158 
9159 	return true;
9160 }
9161 
9162 static int check_func_proto(const struct bpf_func_proto *fn, int func_id)
9163 {
9164 	return check_raw_mode_ok(fn) &&
9165 	       check_arg_pair_ok(fn) &&
9166 	       check_btf_id_ok(fn) ? 0 : -EINVAL;
9167 }
9168 
9169 /* Packet data might have moved, any old PTR_TO_PACKET[_META,_END]
9170  * are now invalid, so turn them into unknown SCALAR_VALUE.
9171  *
9172  * This also applies to dynptr slices belonging to skb and xdp dynptrs,
9173  * since these slices point to packet data.
9174  */
9175 static void clear_all_pkt_pointers(struct bpf_verifier_env *env)
9176 {
9177 	struct bpf_func_state *state;
9178 	struct bpf_reg_state *reg;
9179 
9180 	bpf_for_each_reg_in_vstate(env->cur_state, state, reg, ({
9181 		if (reg_is_pkt_pointer_any(reg) || reg_is_dynptr_slice_pkt(reg))
9182 			mark_reg_invalid(env, reg);
9183 	}));
9184 }
9185 
9186 enum {
9187 	AT_PKT_END = -1,
9188 	BEYOND_PKT_END = -2,
9189 };
9190 
9191 static void mark_pkt_end(struct bpf_verifier_state *vstate, int regn, bool range_open)
9192 {
9193 	struct bpf_func_state *state = vstate->frame[vstate->curframe];
9194 	struct bpf_reg_state *reg = &state->regs[regn];
9195 
9196 	if (reg->type != PTR_TO_PACKET)
9197 		/* PTR_TO_PACKET_META is not supported yet */
9198 		return;
9199 
9200 	/* The 'reg' is pkt > pkt_end or pkt >= pkt_end.
9201 	 * How far beyond pkt_end it goes is unknown.
9202 	 * if (!range_open) it's the case of pkt >= pkt_end
9203 	 * if (range_open) it's the case of pkt > pkt_end
9204 	 * hence this pointer is at least 1 byte bigger than pkt_end
9205 	 */
9206 	if (range_open)
9207 		reg->range = BEYOND_PKT_END;
9208 	else
9209 		reg->range = AT_PKT_END;
9210 }
9211 
9212 /* The pointer with the specified id has released its reference to kernel
9213  * resources. Identify all copies of the same pointer and clear the reference.
9214  */
9215 static int release_reference(struct bpf_verifier_env *env,
9216 			     int ref_obj_id)
9217 {
9218 	struct bpf_func_state *state;
9219 	struct bpf_reg_state *reg;
9220 	int err;
9221 
9222 	err = release_reference_state(cur_func(env), ref_obj_id);
9223 	if (err)
9224 		return err;
9225 
9226 	bpf_for_each_reg_in_vstate(env->cur_state, state, reg, ({
9227 		if (reg->ref_obj_id == ref_obj_id)
9228 			mark_reg_invalid(env, reg);
9229 	}));
9230 
9231 	return 0;
9232 }
9233 
9234 static void invalidate_non_owning_refs(struct bpf_verifier_env *env)
9235 {
9236 	struct bpf_func_state *unused;
9237 	struct bpf_reg_state *reg;
9238 
9239 	bpf_for_each_reg_in_vstate(env->cur_state, unused, reg, ({
9240 		if (type_is_non_owning_ref(reg->type))
9241 			mark_reg_invalid(env, reg);
9242 	}));
9243 }
9244 
9245 static void clear_caller_saved_regs(struct bpf_verifier_env *env,
9246 				    struct bpf_reg_state *regs)
9247 {
9248 	int i;
9249 
9250 	/* after the call registers r0 - r5 were scratched */
9251 	for (i = 0; i < CALLER_SAVED_REGS; i++) {
9252 		mark_reg_not_init(env, regs, caller_saved[i]);
9253 		__check_reg_arg(env, regs, caller_saved[i], DST_OP_NO_MARK);
9254 	}
9255 }
9256 
9257 typedef int (*set_callee_state_fn)(struct bpf_verifier_env *env,
9258 				   struct bpf_func_state *caller,
9259 				   struct bpf_func_state *callee,
9260 				   int insn_idx);
9261 
9262 static int set_callee_state(struct bpf_verifier_env *env,
9263 			    struct bpf_func_state *caller,
9264 			    struct bpf_func_state *callee, int insn_idx);
9265 
9266 static int setup_func_entry(struct bpf_verifier_env *env, int subprog, int callsite,
9267 			    set_callee_state_fn set_callee_state_cb,
9268 			    struct bpf_verifier_state *state)
9269 {
9270 	struct bpf_func_state *caller, *callee;
9271 	int err;
9272 
9273 	if (state->curframe + 1 >= MAX_CALL_FRAMES) {
9274 		verbose(env, "the call stack of %d frames is too deep\n",
9275 			state->curframe + 2);
9276 		return -E2BIG;
9277 	}
9278 
9279 	if (state->frame[state->curframe + 1]) {
9280 		verbose(env, "verifier bug. Frame %d already allocated\n",
9281 			state->curframe + 1);
9282 		return -EFAULT;
9283 	}
9284 
9285 	caller = state->frame[state->curframe];
9286 	callee = kzalloc(sizeof(*callee), GFP_KERNEL);
9287 	if (!callee)
9288 		return -ENOMEM;
9289 	state->frame[state->curframe + 1] = callee;
9290 
9291 	/* callee cannot access r0, r6 - r9 for reading and has to write
9292 	 * into its own stack before reading from it.
9293 	 * callee can read/write into caller's stack
9294 	 */
9295 	init_func_state(env, callee,
9296 			/* remember the callsite, it will be used by bpf_exit */
9297 			callsite,
9298 			state->curframe + 1 /* frameno within this callchain */,
9299 			subprog /* subprog number within this prog */);
9300 	/* Transfer references to the callee */
9301 	err = copy_reference_state(callee, caller);
9302 	err = err ?: set_callee_state_cb(env, caller, callee, callsite);
9303 	if (err)
9304 		goto err_out;
9305 
9306 	/* only increment it after check_reg_arg() finished */
9307 	state->curframe++;
9308 
9309 	return 0;
9310 
9311 err_out:
9312 	free_func_state(callee);
9313 	state->frame[state->curframe + 1] = NULL;
9314 	return err;
9315 }
9316 
9317 static int btf_check_func_arg_match(struct bpf_verifier_env *env, int subprog,
9318 				    const struct btf *btf,
9319 				    struct bpf_reg_state *regs)
9320 {
9321 	struct bpf_subprog_info *sub = subprog_info(env, subprog);
9322 	struct bpf_verifier_log *log = &env->log;
9323 	u32 i;
9324 	int ret;
9325 
9326 	ret = btf_prepare_func_args(env, subprog);
9327 	if (ret)
9328 		return ret;
9329 
9330 	/* check that BTF function arguments match actual types that the
9331 	 * verifier sees.
9332 	 */
9333 	for (i = 0; i < sub->arg_cnt; i++) {
9334 		u32 regno = i + 1;
9335 		struct bpf_reg_state *reg = &regs[regno];
9336 		struct bpf_subprog_arg_info *arg = &sub->args[i];
9337 
9338 		if (arg->arg_type == ARG_ANYTHING) {
9339 			if (reg->type != SCALAR_VALUE) {
9340 				bpf_log(log, "R%d is not a scalar\n", regno);
9341 				return -EINVAL;
9342 			}
9343 		} else if (arg->arg_type == ARG_PTR_TO_CTX) {
9344 			ret = check_func_arg_reg_off(env, reg, regno, ARG_DONTCARE);
9345 			if (ret < 0)
9346 				return ret;
9347 			/* If function expects ctx type in BTF check that caller
9348 			 * is passing PTR_TO_CTX.
9349 			 */
9350 			if (reg->type != PTR_TO_CTX) {
9351 				bpf_log(log, "arg#%d expects pointer to ctx\n", i);
9352 				return -EINVAL;
9353 			}
9354 		} else if (base_type(arg->arg_type) == ARG_PTR_TO_MEM) {
9355 			ret = check_func_arg_reg_off(env, reg, regno, ARG_DONTCARE);
9356 			if (ret < 0)
9357 				return ret;
9358 			if (check_mem_reg(env, reg, regno, arg->mem_size))
9359 				return -EINVAL;
9360 			if (!(arg->arg_type & PTR_MAYBE_NULL) && (reg->type & PTR_MAYBE_NULL)) {
9361 				bpf_log(log, "arg#%d is expected to be non-NULL\n", i);
9362 				return -EINVAL;
9363 			}
9364 		} else if (arg->arg_type == (ARG_PTR_TO_DYNPTR | MEM_RDONLY)) {
9365 			ret = process_dynptr_func(env, regno, -1, arg->arg_type, 0);
9366 			if (ret)
9367 				return ret;
9368 		} else if (base_type(arg->arg_type) == ARG_PTR_TO_BTF_ID) {
9369 			struct bpf_call_arg_meta meta;
9370 			int err;
9371 
9372 			if (register_is_null(reg) && type_may_be_null(arg->arg_type))
9373 				continue;
9374 
9375 			memset(&meta, 0, sizeof(meta)); /* leave func_id as zero */
9376 			err = check_reg_type(env, regno, arg->arg_type, &arg->btf_id, &meta);
9377 			err = err ?: check_func_arg_reg_off(env, reg, regno, arg->arg_type);
9378 			if (err)
9379 				return err;
9380 		} else {
9381 			bpf_log(log, "verifier bug: unrecognized arg#%d type %d\n",
9382 				i, arg->arg_type);
9383 			return -EFAULT;
9384 		}
9385 	}
9386 
9387 	return 0;
9388 }
9389 
9390 /* Compare BTF of a function call with given bpf_reg_state.
9391  * Returns:
9392  * EFAULT - there is a verifier bug. Abort verification.
9393  * EINVAL - there is a type mismatch or BTF is not available.
9394  * 0 - BTF matches with what bpf_reg_state expects.
9395  * Only PTR_TO_CTX and SCALAR_VALUE states are recognized.
9396  */
9397 static int btf_check_subprog_call(struct bpf_verifier_env *env, int subprog,
9398 				  struct bpf_reg_state *regs)
9399 {
9400 	struct bpf_prog *prog = env->prog;
9401 	struct btf *btf = prog->aux->btf;
9402 	u32 btf_id;
9403 	int err;
9404 
9405 	if (!prog->aux->func_info)
9406 		return -EINVAL;
9407 
9408 	btf_id = prog->aux->func_info[subprog].type_id;
9409 	if (!btf_id)
9410 		return -EFAULT;
9411 
9412 	if (prog->aux->func_info_aux[subprog].unreliable)
9413 		return -EINVAL;
9414 
9415 	err = btf_check_func_arg_match(env, subprog, btf, regs);
9416 	/* Compiler optimizations can remove arguments from static functions
9417 	 * or mismatched type can be passed into a global function.
9418 	 * In such cases mark the function as unreliable from BTF point of view.
9419 	 */
9420 	if (err)
9421 		prog->aux->func_info_aux[subprog].unreliable = true;
9422 	return err;
9423 }
9424 
9425 static int push_callback_call(struct bpf_verifier_env *env, struct bpf_insn *insn,
9426 			      int insn_idx, int subprog,
9427 			      set_callee_state_fn set_callee_state_cb)
9428 {
9429 	struct bpf_verifier_state *state = env->cur_state, *callback_state;
9430 	struct bpf_func_state *caller, *callee;
9431 	int err;
9432 
9433 	caller = state->frame[state->curframe];
9434 	err = btf_check_subprog_call(env, subprog, caller->regs);
9435 	if (err == -EFAULT)
9436 		return err;
9437 
9438 	/* set_callee_state is used for direct subprog calls, but we are
9439 	 * interested in validating only BPF helpers that can call subprogs as
9440 	 * callbacks
9441 	 */
9442 	env->subprog_info[subprog].is_cb = true;
9443 	if (bpf_pseudo_kfunc_call(insn) &&
9444 	    !is_sync_callback_calling_kfunc(insn->imm)) {
9445 		verbose(env, "verifier bug: kfunc %s#%d not marked as callback-calling\n",
9446 			func_id_name(insn->imm), insn->imm);
9447 		return -EFAULT;
9448 	} else if (!bpf_pseudo_kfunc_call(insn) &&
9449 		   !is_callback_calling_function(insn->imm)) { /* helper */
9450 		verbose(env, "verifier bug: helper %s#%d not marked as callback-calling\n",
9451 			func_id_name(insn->imm), insn->imm);
9452 		return -EFAULT;
9453 	}
9454 
9455 	if (is_async_callback_calling_insn(insn)) {
9456 		struct bpf_verifier_state *async_cb;
9457 
9458 		/* there is no real recursion here. timer callbacks are async */
9459 		env->subprog_info[subprog].is_async_cb = true;
9460 		async_cb = push_async_cb(env, env->subprog_info[subprog].start,
9461 					 insn_idx, subprog);
9462 		if (!async_cb)
9463 			return -EFAULT;
9464 		callee = async_cb->frame[0];
9465 		callee->async_entry_cnt = caller->async_entry_cnt + 1;
9466 
9467 		/* Convert bpf_timer_set_callback() args into timer callback args */
9468 		err = set_callee_state_cb(env, caller, callee, insn_idx);
9469 		if (err)
9470 			return err;
9471 
9472 		return 0;
9473 	}
9474 
9475 	/* for callback functions enqueue entry to callback and
9476 	 * proceed with next instruction within current frame.
9477 	 */
9478 	callback_state = push_stack(env, env->subprog_info[subprog].start, insn_idx, false);
9479 	if (!callback_state)
9480 		return -ENOMEM;
9481 
9482 	err = setup_func_entry(env, subprog, insn_idx, set_callee_state_cb,
9483 			       callback_state);
9484 	if (err)
9485 		return err;
9486 
9487 	callback_state->callback_unroll_depth++;
9488 	callback_state->frame[callback_state->curframe - 1]->callback_depth++;
9489 	caller->callback_depth = 0;
9490 	return 0;
9491 }
9492 
9493 static int check_func_call(struct bpf_verifier_env *env, struct bpf_insn *insn,
9494 			   int *insn_idx)
9495 {
9496 	struct bpf_verifier_state *state = env->cur_state;
9497 	struct bpf_func_state *caller;
9498 	int err, subprog, target_insn;
9499 
9500 	target_insn = *insn_idx + insn->imm + 1;
9501 	subprog = find_subprog(env, target_insn);
9502 	if (subprog < 0) {
9503 		verbose(env, "verifier bug. No program starts at insn %d\n", target_insn);
9504 		return -EFAULT;
9505 	}
9506 
9507 	caller = state->frame[state->curframe];
9508 	err = btf_check_subprog_call(env, subprog, caller->regs);
9509 	if (err == -EFAULT)
9510 		return err;
9511 	if (subprog_is_global(env, subprog)) {
9512 		const char *sub_name = subprog_name(env, subprog);
9513 
9514 		/* Only global subprogs cannot be called with a lock held. */
9515 		if (env->cur_state->active_lock.ptr) {
9516 			verbose(env, "global function calls are not allowed while holding a lock,\n"
9517 				     "use static function instead\n");
9518 			return -EINVAL;
9519 		}
9520 
9521 		if (err) {
9522 			verbose(env, "Caller passes invalid args into func#%d ('%s')\n",
9523 				subprog, sub_name);
9524 			return err;
9525 		}
9526 
9527 		verbose(env, "Func#%d ('%s') is global and assumed valid.\n",
9528 			subprog, sub_name);
9529 		/* mark global subprog for verifying after main prog */
9530 		subprog_aux(env, subprog)->called = true;
9531 		clear_caller_saved_regs(env, caller->regs);
9532 
9533 		/* All global functions return a 64-bit SCALAR_VALUE */
9534 		mark_reg_unknown(env, caller->regs, BPF_REG_0);
9535 		caller->regs[BPF_REG_0].subreg_def = DEF_NOT_SUBREG;
9536 
9537 		/* continue with next insn after call */
9538 		return 0;
9539 	}
9540 
9541 	/* for regular function entry setup new frame and continue
9542 	 * from that frame.
9543 	 */
9544 	err = setup_func_entry(env, subprog, *insn_idx, set_callee_state, state);
9545 	if (err)
9546 		return err;
9547 
9548 	clear_caller_saved_regs(env, caller->regs);
9549 
9550 	/* and go analyze first insn of the callee */
9551 	*insn_idx = env->subprog_info[subprog].start - 1;
9552 
9553 	if (env->log.level & BPF_LOG_LEVEL) {
9554 		verbose(env, "caller:\n");
9555 		print_verifier_state(env, caller, true);
9556 		verbose(env, "callee:\n");
9557 		print_verifier_state(env, state->frame[state->curframe], true);
9558 	}
9559 
9560 	return 0;
9561 }
9562 
9563 int map_set_for_each_callback_args(struct bpf_verifier_env *env,
9564 				   struct bpf_func_state *caller,
9565 				   struct bpf_func_state *callee)
9566 {
9567 	/* bpf_for_each_map_elem(struct bpf_map *map, void *callback_fn,
9568 	 *      void *callback_ctx, u64 flags);
9569 	 * callback_fn(struct bpf_map *map, void *key, void *value,
9570 	 *      void *callback_ctx);
9571 	 */
9572 	callee->regs[BPF_REG_1] = caller->regs[BPF_REG_1];
9573 
9574 	callee->regs[BPF_REG_2].type = PTR_TO_MAP_KEY;
9575 	__mark_reg_known_zero(&callee->regs[BPF_REG_2]);
9576 	callee->regs[BPF_REG_2].map_ptr = caller->regs[BPF_REG_1].map_ptr;
9577 
9578 	callee->regs[BPF_REG_3].type = PTR_TO_MAP_VALUE;
9579 	__mark_reg_known_zero(&callee->regs[BPF_REG_3]);
9580 	callee->regs[BPF_REG_3].map_ptr = caller->regs[BPF_REG_1].map_ptr;
9581 
9582 	/* pointer to stack or null */
9583 	callee->regs[BPF_REG_4] = caller->regs[BPF_REG_3];
9584 
9585 	/* unused */
9586 	__mark_reg_not_init(env, &callee->regs[BPF_REG_5]);
9587 	return 0;
9588 }
9589 
9590 static int set_callee_state(struct bpf_verifier_env *env,
9591 			    struct bpf_func_state *caller,
9592 			    struct bpf_func_state *callee, int insn_idx)
9593 {
9594 	int i;
9595 
9596 	/* copy r1 - r5 args that callee can access.  The copy includes parent
9597 	 * pointers, which connects us up to the liveness chain
9598 	 */
9599 	for (i = BPF_REG_1; i <= BPF_REG_5; i++)
9600 		callee->regs[i] = caller->regs[i];
9601 	return 0;
9602 }
9603 
9604 static int set_map_elem_callback_state(struct bpf_verifier_env *env,
9605 				       struct bpf_func_state *caller,
9606 				       struct bpf_func_state *callee,
9607 				       int insn_idx)
9608 {
9609 	struct bpf_insn_aux_data *insn_aux = &env->insn_aux_data[insn_idx];
9610 	struct bpf_map *map;
9611 	int err;
9612 
9613 	if (bpf_map_ptr_poisoned(insn_aux)) {
9614 		verbose(env, "tail_call abusing map_ptr\n");
9615 		return -EINVAL;
9616 	}
9617 
9618 	map = BPF_MAP_PTR(insn_aux->map_ptr_state);
9619 	if (!map->ops->map_set_for_each_callback_args ||
9620 	    !map->ops->map_for_each_callback) {
9621 		verbose(env, "callback function not allowed for map\n");
9622 		return -ENOTSUPP;
9623 	}
9624 
9625 	err = map->ops->map_set_for_each_callback_args(env, caller, callee);
9626 	if (err)
9627 		return err;
9628 
9629 	callee->in_callback_fn = true;
9630 	callee->callback_ret_range = retval_range(0, 1);
9631 	return 0;
9632 }
9633 
9634 static int set_loop_callback_state(struct bpf_verifier_env *env,
9635 				   struct bpf_func_state *caller,
9636 				   struct bpf_func_state *callee,
9637 				   int insn_idx)
9638 {
9639 	/* bpf_loop(u32 nr_loops, void *callback_fn, void *callback_ctx,
9640 	 *	    u64 flags);
9641 	 * callback_fn(u32 index, void *callback_ctx);
9642 	 */
9643 	callee->regs[BPF_REG_1].type = SCALAR_VALUE;
9644 	callee->regs[BPF_REG_2] = caller->regs[BPF_REG_3];
9645 
9646 	/* unused */
9647 	__mark_reg_not_init(env, &callee->regs[BPF_REG_3]);
9648 	__mark_reg_not_init(env, &callee->regs[BPF_REG_4]);
9649 	__mark_reg_not_init(env, &callee->regs[BPF_REG_5]);
9650 
9651 	callee->in_callback_fn = true;
9652 	callee->callback_ret_range = retval_range(0, 1);
9653 	return 0;
9654 }
9655 
9656 static int set_timer_callback_state(struct bpf_verifier_env *env,
9657 				    struct bpf_func_state *caller,
9658 				    struct bpf_func_state *callee,
9659 				    int insn_idx)
9660 {
9661 	struct bpf_map *map_ptr = caller->regs[BPF_REG_1].map_ptr;
9662 
9663 	/* bpf_timer_set_callback(struct bpf_timer *timer, void *callback_fn);
9664 	 * callback_fn(struct bpf_map *map, void *key, void *value);
9665 	 */
9666 	callee->regs[BPF_REG_1].type = CONST_PTR_TO_MAP;
9667 	__mark_reg_known_zero(&callee->regs[BPF_REG_1]);
9668 	callee->regs[BPF_REG_1].map_ptr = map_ptr;
9669 
9670 	callee->regs[BPF_REG_2].type = PTR_TO_MAP_KEY;
9671 	__mark_reg_known_zero(&callee->regs[BPF_REG_2]);
9672 	callee->regs[BPF_REG_2].map_ptr = map_ptr;
9673 
9674 	callee->regs[BPF_REG_3].type = PTR_TO_MAP_VALUE;
9675 	__mark_reg_known_zero(&callee->regs[BPF_REG_3]);
9676 	callee->regs[BPF_REG_3].map_ptr = map_ptr;
9677 
9678 	/* unused */
9679 	__mark_reg_not_init(env, &callee->regs[BPF_REG_4]);
9680 	__mark_reg_not_init(env, &callee->regs[BPF_REG_5]);
9681 	callee->in_async_callback_fn = true;
9682 	callee->callback_ret_range = retval_range(0, 1);
9683 	return 0;
9684 }
9685 
9686 static int set_find_vma_callback_state(struct bpf_verifier_env *env,
9687 				       struct bpf_func_state *caller,
9688 				       struct bpf_func_state *callee,
9689 				       int insn_idx)
9690 {
9691 	/* bpf_find_vma(struct task_struct *task, u64 addr,
9692 	 *               void *callback_fn, void *callback_ctx, u64 flags)
9693 	 * (callback_fn)(struct task_struct *task,
9694 	 *               struct vm_area_struct *vma, void *callback_ctx);
9695 	 */
9696 	callee->regs[BPF_REG_1] = caller->regs[BPF_REG_1];
9697 
9698 	callee->regs[BPF_REG_2].type = PTR_TO_BTF_ID;
9699 	__mark_reg_known_zero(&callee->regs[BPF_REG_2]);
9700 	callee->regs[BPF_REG_2].btf =  btf_vmlinux;
9701 	callee->regs[BPF_REG_2].btf_id = btf_tracing_ids[BTF_TRACING_TYPE_VMA];
9702 
9703 	/* pointer to stack or null */
9704 	callee->regs[BPF_REG_3] = caller->regs[BPF_REG_4];
9705 
9706 	/* unused */
9707 	__mark_reg_not_init(env, &callee->regs[BPF_REG_4]);
9708 	__mark_reg_not_init(env, &callee->regs[BPF_REG_5]);
9709 	callee->in_callback_fn = true;
9710 	callee->callback_ret_range = retval_range(0, 1);
9711 	return 0;
9712 }
9713 
9714 static int set_user_ringbuf_callback_state(struct bpf_verifier_env *env,
9715 					   struct bpf_func_state *caller,
9716 					   struct bpf_func_state *callee,
9717 					   int insn_idx)
9718 {
9719 	/* bpf_user_ringbuf_drain(struct bpf_map *map, void *callback_fn, void
9720 	 *			  callback_ctx, u64 flags);
9721 	 * callback_fn(const struct bpf_dynptr_t* dynptr, void *callback_ctx);
9722 	 */
9723 	__mark_reg_not_init(env, &callee->regs[BPF_REG_0]);
9724 	mark_dynptr_cb_reg(env, &callee->regs[BPF_REG_1], BPF_DYNPTR_TYPE_LOCAL);
9725 	callee->regs[BPF_REG_2] = caller->regs[BPF_REG_3];
9726 
9727 	/* unused */
9728 	__mark_reg_not_init(env, &callee->regs[BPF_REG_3]);
9729 	__mark_reg_not_init(env, &callee->regs[BPF_REG_4]);
9730 	__mark_reg_not_init(env, &callee->regs[BPF_REG_5]);
9731 
9732 	callee->in_callback_fn = true;
9733 	callee->callback_ret_range = retval_range(0, 1);
9734 	return 0;
9735 }
9736 
9737 static int set_rbtree_add_callback_state(struct bpf_verifier_env *env,
9738 					 struct bpf_func_state *caller,
9739 					 struct bpf_func_state *callee,
9740 					 int insn_idx)
9741 {
9742 	/* void bpf_rbtree_add_impl(struct bpf_rb_root *root, struct bpf_rb_node *node,
9743 	 *                     bool (less)(struct bpf_rb_node *a, const struct bpf_rb_node *b));
9744 	 *
9745 	 * 'struct bpf_rb_node *node' arg to bpf_rbtree_add_impl is the same PTR_TO_BTF_ID w/ offset
9746 	 * that 'less' callback args will be receiving. However, 'node' arg was release_reference'd
9747 	 * by this point, so look at 'root'
9748 	 */
9749 	struct btf_field *field;
9750 
9751 	field = reg_find_field_offset(&caller->regs[BPF_REG_1], caller->regs[BPF_REG_1].off,
9752 				      BPF_RB_ROOT);
9753 	if (!field || !field->graph_root.value_btf_id)
9754 		return -EFAULT;
9755 
9756 	mark_reg_graph_node(callee->regs, BPF_REG_1, &field->graph_root);
9757 	ref_set_non_owning(env, &callee->regs[BPF_REG_1]);
9758 	mark_reg_graph_node(callee->regs, BPF_REG_2, &field->graph_root);
9759 	ref_set_non_owning(env, &callee->regs[BPF_REG_2]);
9760 
9761 	__mark_reg_not_init(env, &callee->regs[BPF_REG_3]);
9762 	__mark_reg_not_init(env, &callee->regs[BPF_REG_4]);
9763 	__mark_reg_not_init(env, &callee->regs[BPF_REG_5]);
9764 	callee->in_callback_fn = true;
9765 	callee->callback_ret_range = retval_range(0, 1);
9766 	return 0;
9767 }
9768 
9769 static bool is_rbtree_lock_required_kfunc(u32 btf_id);
9770 
9771 /* Are we currently verifying the callback for a rbtree helper that must
9772  * be called with lock held? If so, no need to complain about unreleased
9773  * lock
9774  */
9775 static bool in_rbtree_lock_required_cb(struct bpf_verifier_env *env)
9776 {
9777 	struct bpf_verifier_state *state = env->cur_state;
9778 	struct bpf_insn *insn = env->prog->insnsi;
9779 	struct bpf_func_state *callee;
9780 	int kfunc_btf_id;
9781 
9782 	if (!state->curframe)
9783 		return false;
9784 
9785 	callee = state->frame[state->curframe];
9786 
9787 	if (!callee->in_callback_fn)
9788 		return false;
9789 
9790 	kfunc_btf_id = insn[callee->callsite].imm;
9791 	return is_rbtree_lock_required_kfunc(kfunc_btf_id);
9792 }
9793 
9794 static bool retval_range_within(struct bpf_retval_range range, const struct bpf_reg_state *reg)
9795 {
9796 	return range.minval <= reg->smin_value && reg->smax_value <= range.maxval;
9797 }
9798 
9799 static int prepare_func_exit(struct bpf_verifier_env *env, int *insn_idx)
9800 {
9801 	struct bpf_verifier_state *state = env->cur_state, *prev_st;
9802 	struct bpf_func_state *caller, *callee;
9803 	struct bpf_reg_state *r0;
9804 	bool in_callback_fn;
9805 	int err;
9806 
9807 	callee = state->frame[state->curframe];
9808 	r0 = &callee->regs[BPF_REG_0];
9809 	if (r0->type == PTR_TO_STACK) {
9810 		/* technically it's ok to return caller's stack pointer
9811 		 * (or caller's caller's pointer) back to the caller,
9812 		 * since these pointers are valid. Only current stack
9813 		 * pointer will be invalid as soon as function exits,
9814 		 * but let's be conservative
9815 		 */
9816 		verbose(env, "cannot return stack pointer to the caller\n");
9817 		return -EINVAL;
9818 	}
9819 
9820 	caller = state->frame[state->curframe - 1];
9821 	if (callee->in_callback_fn) {
9822 		if (r0->type != SCALAR_VALUE) {
9823 			verbose(env, "R0 not a scalar value\n");
9824 			return -EACCES;
9825 		}
9826 
9827 		/* we are going to rely on register's precise value */
9828 		err = mark_reg_read(env, r0, r0->parent, REG_LIVE_READ64);
9829 		err = err ?: mark_chain_precision(env, BPF_REG_0);
9830 		if (err)
9831 			return err;
9832 
9833 		/* enforce R0 return value range */
9834 		if (!retval_range_within(callee->callback_ret_range, r0)) {
9835 			verbose_invalid_scalar(env, r0, callee->callback_ret_range,
9836 					       "At callback return", "R0");
9837 			return -EINVAL;
9838 		}
9839 		if (!calls_callback(env, callee->callsite)) {
9840 			verbose(env, "BUG: in callback at %d, callsite %d !calls_callback\n",
9841 				*insn_idx, callee->callsite);
9842 			return -EFAULT;
9843 		}
9844 	} else {
9845 		/* return to the caller whatever r0 had in the callee */
9846 		caller->regs[BPF_REG_0] = *r0;
9847 	}
9848 
9849 	/* callback_fn frame should have released its own additions to parent's
9850 	 * reference state at this point, or check_reference_leak would
9851 	 * complain, hence it must be the same as the caller. There is no need
9852 	 * to copy it back.
9853 	 */
9854 	if (!callee->in_callback_fn) {
9855 		/* Transfer references to the caller */
9856 		err = copy_reference_state(caller, callee);
9857 		if (err)
9858 			return err;
9859 	}
9860 
9861 	/* for callbacks like bpf_loop or bpf_for_each_map_elem go back to callsite,
9862 	 * there function call logic would reschedule callback visit. If iteration
9863 	 * converges is_state_visited() would prune that visit eventually.
9864 	 */
9865 	in_callback_fn = callee->in_callback_fn;
9866 	if (in_callback_fn)
9867 		*insn_idx = callee->callsite;
9868 	else
9869 		*insn_idx = callee->callsite + 1;
9870 
9871 	if (env->log.level & BPF_LOG_LEVEL) {
9872 		verbose(env, "returning from callee:\n");
9873 		print_verifier_state(env, callee, true);
9874 		verbose(env, "to caller at %d:\n", *insn_idx);
9875 		print_verifier_state(env, caller, true);
9876 	}
9877 	/* clear everything in the callee. In case of exceptional exits using
9878 	 * bpf_throw, this will be done by copy_verifier_state for extra frames. */
9879 	free_func_state(callee);
9880 	state->frame[state->curframe--] = NULL;
9881 
9882 	/* for callbacks widen imprecise scalars to make programs like below verify:
9883 	 *
9884 	 *   struct ctx { int i; }
9885 	 *   void cb(int idx, struct ctx *ctx) { ctx->i++; ... }
9886 	 *   ...
9887 	 *   struct ctx = { .i = 0; }
9888 	 *   bpf_loop(100, cb, &ctx, 0);
9889 	 *
9890 	 * This is similar to what is done in process_iter_next_call() for open
9891 	 * coded iterators.
9892 	 */
9893 	prev_st = in_callback_fn ? find_prev_entry(env, state, *insn_idx) : NULL;
9894 	if (prev_st) {
9895 		err = widen_imprecise_scalars(env, prev_st, state);
9896 		if (err)
9897 			return err;
9898 	}
9899 	return 0;
9900 }
9901 
9902 static int do_refine_retval_range(struct bpf_verifier_env *env,
9903 				  struct bpf_reg_state *regs, int ret_type,
9904 				  int func_id,
9905 				  struct bpf_call_arg_meta *meta)
9906 {
9907 	struct bpf_reg_state *ret_reg = &regs[BPF_REG_0];
9908 
9909 	if (ret_type != RET_INTEGER)
9910 		return 0;
9911 
9912 	switch (func_id) {
9913 	case BPF_FUNC_get_stack:
9914 	case BPF_FUNC_get_task_stack:
9915 	case BPF_FUNC_probe_read_str:
9916 	case BPF_FUNC_probe_read_kernel_str:
9917 	case BPF_FUNC_probe_read_user_str:
9918 		ret_reg->smax_value = meta->msize_max_value;
9919 		ret_reg->s32_max_value = meta->msize_max_value;
9920 		ret_reg->smin_value = -MAX_ERRNO;
9921 		ret_reg->s32_min_value = -MAX_ERRNO;
9922 		reg_bounds_sync(ret_reg);
9923 		break;
9924 	case BPF_FUNC_get_smp_processor_id:
9925 		ret_reg->umax_value = nr_cpu_ids - 1;
9926 		ret_reg->u32_max_value = nr_cpu_ids - 1;
9927 		ret_reg->smax_value = nr_cpu_ids - 1;
9928 		ret_reg->s32_max_value = nr_cpu_ids - 1;
9929 		ret_reg->umin_value = 0;
9930 		ret_reg->u32_min_value = 0;
9931 		ret_reg->smin_value = 0;
9932 		ret_reg->s32_min_value = 0;
9933 		reg_bounds_sync(ret_reg);
9934 		break;
9935 	}
9936 
9937 	return reg_bounds_sanity_check(env, ret_reg, "retval");
9938 }
9939 
9940 static int
9941 record_func_map(struct bpf_verifier_env *env, struct bpf_call_arg_meta *meta,
9942 		int func_id, int insn_idx)
9943 {
9944 	struct bpf_insn_aux_data *aux = &env->insn_aux_data[insn_idx];
9945 	struct bpf_map *map = meta->map_ptr;
9946 
9947 	if (func_id != BPF_FUNC_tail_call &&
9948 	    func_id != BPF_FUNC_map_lookup_elem &&
9949 	    func_id != BPF_FUNC_map_update_elem &&
9950 	    func_id != BPF_FUNC_map_delete_elem &&
9951 	    func_id != BPF_FUNC_map_push_elem &&
9952 	    func_id != BPF_FUNC_map_pop_elem &&
9953 	    func_id != BPF_FUNC_map_peek_elem &&
9954 	    func_id != BPF_FUNC_for_each_map_elem &&
9955 	    func_id != BPF_FUNC_redirect_map &&
9956 	    func_id != BPF_FUNC_map_lookup_percpu_elem)
9957 		return 0;
9958 
9959 	if (map == NULL) {
9960 		verbose(env, "kernel subsystem misconfigured verifier\n");
9961 		return -EINVAL;
9962 	}
9963 
9964 	/* In case of read-only, some additional restrictions
9965 	 * need to be applied in order to prevent altering the
9966 	 * state of the map from program side.
9967 	 */
9968 	if ((map->map_flags & BPF_F_RDONLY_PROG) &&
9969 	    (func_id == BPF_FUNC_map_delete_elem ||
9970 	     func_id == BPF_FUNC_map_update_elem ||
9971 	     func_id == BPF_FUNC_map_push_elem ||
9972 	     func_id == BPF_FUNC_map_pop_elem)) {
9973 		verbose(env, "write into map forbidden\n");
9974 		return -EACCES;
9975 	}
9976 
9977 	if (!BPF_MAP_PTR(aux->map_ptr_state))
9978 		bpf_map_ptr_store(aux, meta->map_ptr,
9979 				  !meta->map_ptr->bypass_spec_v1);
9980 	else if (BPF_MAP_PTR(aux->map_ptr_state) != meta->map_ptr)
9981 		bpf_map_ptr_store(aux, BPF_MAP_PTR_POISON,
9982 				  !meta->map_ptr->bypass_spec_v1);
9983 	return 0;
9984 }
9985 
9986 static int
9987 record_func_key(struct bpf_verifier_env *env, struct bpf_call_arg_meta *meta,
9988 		int func_id, int insn_idx)
9989 {
9990 	struct bpf_insn_aux_data *aux = &env->insn_aux_data[insn_idx];
9991 	struct bpf_reg_state *regs = cur_regs(env), *reg;
9992 	struct bpf_map *map = meta->map_ptr;
9993 	u64 val, max;
9994 	int err;
9995 
9996 	if (func_id != BPF_FUNC_tail_call)
9997 		return 0;
9998 	if (!map || map->map_type != BPF_MAP_TYPE_PROG_ARRAY) {
9999 		verbose(env, "kernel subsystem misconfigured verifier\n");
10000 		return -EINVAL;
10001 	}
10002 
10003 	reg = &regs[BPF_REG_3];
10004 	val = reg->var_off.value;
10005 	max = map->max_entries;
10006 
10007 	if (!(is_reg_const(reg, false) && val < max)) {
10008 		bpf_map_key_store(aux, BPF_MAP_KEY_POISON);
10009 		return 0;
10010 	}
10011 
10012 	err = mark_chain_precision(env, BPF_REG_3);
10013 	if (err)
10014 		return err;
10015 	if (bpf_map_key_unseen(aux))
10016 		bpf_map_key_store(aux, val);
10017 	else if (!bpf_map_key_poisoned(aux) &&
10018 		  bpf_map_key_immediate(aux) != val)
10019 		bpf_map_key_store(aux, BPF_MAP_KEY_POISON);
10020 	return 0;
10021 }
10022 
10023 static int check_reference_leak(struct bpf_verifier_env *env, bool exception_exit)
10024 {
10025 	struct bpf_func_state *state = cur_func(env);
10026 	bool refs_lingering = false;
10027 	int i;
10028 
10029 	if (!exception_exit && state->frameno && !state->in_callback_fn)
10030 		return 0;
10031 
10032 	for (i = 0; i < state->acquired_refs; i++) {
10033 		if (!exception_exit && state->in_callback_fn && state->refs[i].callback_ref != state->frameno)
10034 			continue;
10035 		verbose(env, "Unreleased reference id=%d alloc_insn=%d\n",
10036 			state->refs[i].id, state->refs[i].insn_idx);
10037 		refs_lingering = true;
10038 	}
10039 	return refs_lingering ? -EINVAL : 0;
10040 }
10041 
10042 static int check_bpf_snprintf_call(struct bpf_verifier_env *env,
10043 				   struct bpf_reg_state *regs)
10044 {
10045 	struct bpf_reg_state *fmt_reg = &regs[BPF_REG_3];
10046 	struct bpf_reg_state *data_len_reg = &regs[BPF_REG_5];
10047 	struct bpf_map *fmt_map = fmt_reg->map_ptr;
10048 	struct bpf_bprintf_data data = {};
10049 	int err, fmt_map_off, num_args;
10050 	u64 fmt_addr;
10051 	char *fmt;
10052 
10053 	/* data must be an array of u64 */
10054 	if (data_len_reg->var_off.value % 8)
10055 		return -EINVAL;
10056 	num_args = data_len_reg->var_off.value / 8;
10057 
10058 	/* fmt being ARG_PTR_TO_CONST_STR guarantees that var_off is const
10059 	 * and map_direct_value_addr is set.
10060 	 */
10061 	fmt_map_off = fmt_reg->off + fmt_reg->var_off.value;
10062 	err = fmt_map->ops->map_direct_value_addr(fmt_map, &fmt_addr,
10063 						  fmt_map_off);
10064 	if (err) {
10065 		verbose(env, "verifier bug\n");
10066 		return -EFAULT;
10067 	}
10068 	fmt = (char *)(long)fmt_addr + fmt_map_off;
10069 
10070 	/* We are also guaranteed that fmt+fmt_map_off is NULL terminated, we
10071 	 * can focus on validating the format specifiers.
10072 	 */
10073 	err = bpf_bprintf_prepare(fmt, UINT_MAX, NULL, num_args, &data);
10074 	if (err < 0)
10075 		verbose(env, "Invalid format string\n");
10076 
10077 	return err;
10078 }
10079 
10080 static int check_get_func_ip(struct bpf_verifier_env *env)
10081 {
10082 	enum bpf_prog_type type = resolve_prog_type(env->prog);
10083 	int func_id = BPF_FUNC_get_func_ip;
10084 
10085 	if (type == BPF_PROG_TYPE_TRACING) {
10086 		if (!bpf_prog_has_trampoline(env->prog)) {
10087 			verbose(env, "func %s#%d supported only for fentry/fexit/fmod_ret programs\n",
10088 				func_id_name(func_id), func_id);
10089 			return -ENOTSUPP;
10090 		}
10091 		return 0;
10092 	} else if (type == BPF_PROG_TYPE_KPROBE) {
10093 		return 0;
10094 	}
10095 
10096 	verbose(env, "func %s#%d not supported for program type %d\n",
10097 		func_id_name(func_id), func_id, type);
10098 	return -ENOTSUPP;
10099 }
10100 
10101 static struct bpf_insn_aux_data *cur_aux(struct bpf_verifier_env *env)
10102 {
10103 	return &env->insn_aux_data[env->insn_idx];
10104 }
10105 
10106 static bool loop_flag_is_zero(struct bpf_verifier_env *env)
10107 {
10108 	struct bpf_reg_state *regs = cur_regs(env);
10109 	struct bpf_reg_state *reg = &regs[BPF_REG_4];
10110 	bool reg_is_null = register_is_null(reg);
10111 
10112 	if (reg_is_null)
10113 		mark_chain_precision(env, BPF_REG_4);
10114 
10115 	return reg_is_null;
10116 }
10117 
10118 static void update_loop_inline_state(struct bpf_verifier_env *env, u32 subprogno)
10119 {
10120 	struct bpf_loop_inline_state *state = &cur_aux(env)->loop_inline_state;
10121 
10122 	if (!state->initialized) {
10123 		state->initialized = 1;
10124 		state->fit_for_inline = loop_flag_is_zero(env);
10125 		state->callback_subprogno = subprogno;
10126 		return;
10127 	}
10128 
10129 	if (!state->fit_for_inline)
10130 		return;
10131 
10132 	state->fit_for_inline = (loop_flag_is_zero(env) &&
10133 				 state->callback_subprogno == subprogno);
10134 }
10135 
10136 static int check_helper_call(struct bpf_verifier_env *env, struct bpf_insn *insn,
10137 			     int *insn_idx_p)
10138 {
10139 	enum bpf_prog_type prog_type = resolve_prog_type(env->prog);
10140 	bool returns_cpu_specific_alloc_ptr = false;
10141 	const struct bpf_func_proto *fn = NULL;
10142 	enum bpf_return_type ret_type;
10143 	enum bpf_type_flag ret_flag;
10144 	struct bpf_reg_state *regs;
10145 	struct bpf_call_arg_meta meta;
10146 	int insn_idx = *insn_idx_p;
10147 	bool changes_data;
10148 	int i, err, func_id;
10149 
10150 	/* find function prototype */
10151 	func_id = insn->imm;
10152 	if (func_id < 0 || func_id >= __BPF_FUNC_MAX_ID) {
10153 		verbose(env, "invalid func %s#%d\n", func_id_name(func_id),
10154 			func_id);
10155 		return -EINVAL;
10156 	}
10157 
10158 	if (env->ops->get_func_proto)
10159 		fn = env->ops->get_func_proto(func_id, env->prog);
10160 	if (!fn) {
10161 		verbose(env, "unknown func %s#%d\n", func_id_name(func_id),
10162 			func_id);
10163 		return -EINVAL;
10164 	}
10165 
10166 	/* eBPF programs must be GPL compatible to use GPL-ed functions */
10167 	if (!env->prog->gpl_compatible && fn->gpl_only) {
10168 		verbose(env, "cannot call GPL-restricted function from non-GPL compatible program\n");
10169 		return -EINVAL;
10170 	}
10171 
10172 	if (fn->allowed && !fn->allowed(env->prog)) {
10173 		verbose(env, "helper call is not allowed in probe\n");
10174 		return -EINVAL;
10175 	}
10176 
10177 	if (!in_sleepable(env) && fn->might_sleep) {
10178 		verbose(env, "helper call might sleep in a non-sleepable prog\n");
10179 		return -EINVAL;
10180 	}
10181 
10182 	/* With LD_ABS/IND some JITs save/restore skb from r1. */
10183 	changes_data = bpf_helper_changes_pkt_data(fn->func);
10184 	if (changes_data && fn->arg1_type != ARG_PTR_TO_CTX) {
10185 		verbose(env, "kernel subsystem misconfigured func %s#%d: r1 != ctx\n",
10186 			func_id_name(func_id), func_id);
10187 		return -EINVAL;
10188 	}
10189 
10190 	memset(&meta, 0, sizeof(meta));
10191 	meta.pkt_access = fn->pkt_access;
10192 
10193 	err = check_func_proto(fn, func_id);
10194 	if (err) {
10195 		verbose(env, "kernel subsystem misconfigured func %s#%d\n",
10196 			func_id_name(func_id), func_id);
10197 		return err;
10198 	}
10199 
10200 	if (env->cur_state->active_rcu_lock) {
10201 		if (fn->might_sleep) {
10202 			verbose(env, "sleepable helper %s#%d in rcu_read_lock region\n",
10203 				func_id_name(func_id), func_id);
10204 			return -EINVAL;
10205 		}
10206 
10207 		if (in_sleepable(env) && is_storage_get_function(func_id))
10208 			env->insn_aux_data[insn_idx].storage_get_func_atomic = true;
10209 	}
10210 
10211 	meta.func_id = func_id;
10212 	/* check args */
10213 	for (i = 0; i < MAX_BPF_FUNC_REG_ARGS; i++) {
10214 		err = check_func_arg(env, i, &meta, fn, insn_idx);
10215 		if (err)
10216 			return err;
10217 	}
10218 
10219 	err = record_func_map(env, &meta, func_id, insn_idx);
10220 	if (err)
10221 		return err;
10222 
10223 	err = record_func_key(env, &meta, func_id, insn_idx);
10224 	if (err)
10225 		return err;
10226 
10227 	/* Mark slots with STACK_MISC in case of raw mode, stack offset
10228 	 * is inferred from register state.
10229 	 */
10230 	for (i = 0; i < meta.access_size; i++) {
10231 		err = check_mem_access(env, insn_idx, meta.regno, i, BPF_B,
10232 				       BPF_WRITE, -1, false, false);
10233 		if (err)
10234 			return err;
10235 	}
10236 
10237 	regs = cur_regs(env);
10238 
10239 	if (meta.release_regno) {
10240 		err = -EINVAL;
10241 		/* This can only be set for PTR_TO_STACK, as CONST_PTR_TO_DYNPTR cannot
10242 		 * be released by any dynptr helper. Hence, unmark_stack_slots_dynptr
10243 		 * is safe to do directly.
10244 		 */
10245 		if (arg_type_is_dynptr(fn->arg_type[meta.release_regno - BPF_REG_1])) {
10246 			if (regs[meta.release_regno].type == CONST_PTR_TO_DYNPTR) {
10247 				verbose(env, "verifier internal error: CONST_PTR_TO_DYNPTR cannot be released\n");
10248 				return -EFAULT;
10249 			}
10250 			err = unmark_stack_slots_dynptr(env, &regs[meta.release_regno]);
10251 		} else if (func_id == BPF_FUNC_kptr_xchg && meta.ref_obj_id) {
10252 			u32 ref_obj_id = meta.ref_obj_id;
10253 			bool in_rcu = in_rcu_cs(env);
10254 			struct bpf_func_state *state;
10255 			struct bpf_reg_state *reg;
10256 
10257 			err = release_reference_state(cur_func(env), ref_obj_id);
10258 			if (!err) {
10259 				bpf_for_each_reg_in_vstate(env->cur_state, state, reg, ({
10260 					if (reg->ref_obj_id == ref_obj_id) {
10261 						if (in_rcu && (reg->type & MEM_ALLOC) && (reg->type & MEM_PERCPU)) {
10262 							reg->ref_obj_id = 0;
10263 							reg->type &= ~MEM_ALLOC;
10264 							reg->type |= MEM_RCU;
10265 						} else {
10266 							mark_reg_invalid(env, reg);
10267 						}
10268 					}
10269 				}));
10270 			}
10271 		} else if (meta.ref_obj_id) {
10272 			err = release_reference(env, meta.ref_obj_id);
10273 		} else if (register_is_null(&regs[meta.release_regno])) {
10274 			/* meta.ref_obj_id can only be 0 if register that is meant to be
10275 			 * released is NULL, which must be > R0.
10276 			 */
10277 			err = 0;
10278 		}
10279 		if (err) {
10280 			verbose(env, "func %s#%d reference has not been acquired before\n",
10281 				func_id_name(func_id), func_id);
10282 			return err;
10283 		}
10284 	}
10285 
10286 	switch (func_id) {
10287 	case BPF_FUNC_tail_call:
10288 		err = check_reference_leak(env, false);
10289 		if (err) {
10290 			verbose(env, "tail_call would lead to reference leak\n");
10291 			return err;
10292 		}
10293 		break;
10294 	case BPF_FUNC_get_local_storage:
10295 		/* check that flags argument in get_local_storage(map, flags) is 0,
10296 		 * this is required because get_local_storage() can't return an error.
10297 		 */
10298 		if (!register_is_null(&regs[BPF_REG_2])) {
10299 			verbose(env, "get_local_storage() doesn't support non-zero flags\n");
10300 			return -EINVAL;
10301 		}
10302 		break;
10303 	case BPF_FUNC_for_each_map_elem:
10304 		err = push_callback_call(env, insn, insn_idx, meta.subprogno,
10305 					 set_map_elem_callback_state);
10306 		break;
10307 	case BPF_FUNC_timer_set_callback:
10308 		err = push_callback_call(env, insn, insn_idx, meta.subprogno,
10309 					 set_timer_callback_state);
10310 		break;
10311 	case BPF_FUNC_find_vma:
10312 		err = push_callback_call(env, insn, insn_idx, meta.subprogno,
10313 					 set_find_vma_callback_state);
10314 		break;
10315 	case BPF_FUNC_snprintf:
10316 		err = check_bpf_snprintf_call(env, regs);
10317 		break;
10318 	case BPF_FUNC_loop:
10319 		update_loop_inline_state(env, meta.subprogno);
10320 		/* Verifier relies on R1 value to determine if bpf_loop() iteration
10321 		 * is finished, thus mark it precise.
10322 		 */
10323 		err = mark_chain_precision(env, BPF_REG_1);
10324 		if (err)
10325 			return err;
10326 		if (cur_func(env)->callback_depth < regs[BPF_REG_1].umax_value) {
10327 			err = push_callback_call(env, insn, insn_idx, meta.subprogno,
10328 						 set_loop_callback_state);
10329 		} else {
10330 			cur_func(env)->callback_depth = 0;
10331 			if (env->log.level & BPF_LOG_LEVEL2)
10332 				verbose(env, "frame%d bpf_loop iteration limit reached\n",
10333 					env->cur_state->curframe);
10334 		}
10335 		break;
10336 	case BPF_FUNC_dynptr_from_mem:
10337 		if (regs[BPF_REG_1].type != PTR_TO_MAP_VALUE) {
10338 			verbose(env, "Unsupported reg type %s for bpf_dynptr_from_mem data\n",
10339 				reg_type_str(env, regs[BPF_REG_1].type));
10340 			return -EACCES;
10341 		}
10342 		break;
10343 	case BPF_FUNC_set_retval:
10344 		if (prog_type == BPF_PROG_TYPE_LSM &&
10345 		    env->prog->expected_attach_type == BPF_LSM_CGROUP) {
10346 			if (!env->prog->aux->attach_func_proto->type) {
10347 				/* Make sure programs that attach to void
10348 				 * hooks don't try to modify return value.
10349 				 */
10350 				verbose(env, "BPF_LSM_CGROUP that attach to void LSM hooks can't modify return value!\n");
10351 				return -EINVAL;
10352 			}
10353 		}
10354 		break;
10355 	case BPF_FUNC_dynptr_data:
10356 	{
10357 		struct bpf_reg_state *reg;
10358 		int id, ref_obj_id;
10359 
10360 		reg = get_dynptr_arg_reg(env, fn, regs);
10361 		if (!reg)
10362 			return -EFAULT;
10363 
10364 
10365 		if (meta.dynptr_id) {
10366 			verbose(env, "verifier internal error: meta.dynptr_id already set\n");
10367 			return -EFAULT;
10368 		}
10369 		if (meta.ref_obj_id) {
10370 			verbose(env, "verifier internal error: meta.ref_obj_id already set\n");
10371 			return -EFAULT;
10372 		}
10373 
10374 		id = dynptr_id(env, reg);
10375 		if (id < 0) {
10376 			verbose(env, "verifier internal error: failed to obtain dynptr id\n");
10377 			return id;
10378 		}
10379 
10380 		ref_obj_id = dynptr_ref_obj_id(env, reg);
10381 		if (ref_obj_id < 0) {
10382 			verbose(env, "verifier internal error: failed to obtain dynptr ref_obj_id\n");
10383 			return ref_obj_id;
10384 		}
10385 
10386 		meta.dynptr_id = id;
10387 		meta.ref_obj_id = ref_obj_id;
10388 
10389 		break;
10390 	}
10391 	case BPF_FUNC_dynptr_write:
10392 	{
10393 		enum bpf_dynptr_type dynptr_type;
10394 		struct bpf_reg_state *reg;
10395 
10396 		reg = get_dynptr_arg_reg(env, fn, regs);
10397 		if (!reg)
10398 			return -EFAULT;
10399 
10400 		dynptr_type = dynptr_get_type(env, reg);
10401 		if (dynptr_type == BPF_DYNPTR_TYPE_INVALID)
10402 			return -EFAULT;
10403 
10404 		if (dynptr_type == BPF_DYNPTR_TYPE_SKB)
10405 			/* this will trigger clear_all_pkt_pointers(), which will
10406 			 * invalidate all dynptr slices associated with the skb
10407 			 */
10408 			changes_data = true;
10409 
10410 		break;
10411 	}
10412 	case BPF_FUNC_per_cpu_ptr:
10413 	case BPF_FUNC_this_cpu_ptr:
10414 	{
10415 		struct bpf_reg_state *reg = &regs[BPF_REG_1];
10416 		const struct btf_type *type;
10417 
10418 		if (reg->type & MEM_RCU) {
10419 			type = btf_type_by_id(reg->btf, reg->btf_id);
10420 			if (!type || !btf_type_is_struct(type)) {
10421 				verbose(env, "Helper has invalid btf/btf_id in R1\n");
10422 				return -EFAULT;
10423 			}
10424 			returns_cpu_specific_alloc_ptr = true;
10425 			env->insn_aux_data[insn_idx].call_with_percpu_alloc_ptr = true;
10426 		}
10427 		break;
10428 	}
10429 	case BPF_FUNC_user_ringbuf_drain:
10430 		err = push_callback_call(env, insn, insn_idx, meta.subprogno,
10431 					 set_user_ringbuf_callback_state);
10432 		break;
10433 	}
10434 
10435 	if (err)
10436 		return err;
10437 
10438 	/* reset caller saved regs */
10439 	for (i = 0; i < CALLER_SAVED_REGS; i++) {
10440 		mark_reg_not_init(env, regs, caller_saved[i]);
10441 		check_reg_arg(env, caller_saved[i], DST_OP_NO_MARK);
10442 	}
10443 
10444 	/* helper call returns 64-bit value. */
10445 	regs[BPF_REG_0].subreg_def = DEF_NOT_SUBREG;
10446 
10447 	/* update return register (already marked as written above) */
10448 	ret_type = fn->ret_type;
10449 	ret_flag = type_flag(ret_type);
10450 
10451 	switch (base_type(ret_type)) {
10452 	case RET_INTEGER:
10453 		/* sets type to SCALAR_VALUE */
10454 		mark_reg_unknown(env, regs, BPF_REG_0);
10455 		break;
10456 	case RET_VOID:
10457 		regs[BPF_REG_0].type = NOT_INIT;
10458 		break;
10459 	case RET_PTR_TO_MAP_VALUE:
10460 		/* There is no offset yet applied, variable or fixed */
10461 		mark_reg_known_zero(env, regs, BPF_REG_0);
10462 		/* remember map_ptr, so that check_map_access()
10463 		 * can check 'value_size' boundary of memory access
10464 		 * to map element returned from bpf_map_lookup_elem()
10465 		 */
10466 		if (meta.map_ptr == NULL) {
10467 			verbose(env,
10468 				"kernel subsystem misconfigured verifier\n");
10469 			return -EINVAL;
10470 		}
10471 		regs[BPF_REG_0].map_ptr = meta.map_ptr;
10472 		regs[BPF_REG_0].map_uid = meta.map_uid;
10473 		regs[BPF_REG_0].type = PTR_TO_MAP_VALUE | ret_flag;
10474 		if (!type_may_be_null(ret_type) &&
10475 		    btf_record_has_field(meta.map_ptr->record, BPF_SPIN_LOCK)) {
10476 			regs[BPF_REG_0].id = ++env->id_gen;
10477 		}
10478 		break;
10479 	case RET_PTR_TO_SOCKET:
10480 		mark_reg_known_zero(env, regs, BPF_REG_0);
10481 		regs[BPF_REG_0].type = PTR_TO_SOCKET | ret_flag;
10482 		break;
10483 	case RET_PTR_TO_SOCK_COMMON:
10484 		mark_reg_known_zero(env, regs, BPF_REG_0);
10485 		regs[BPF_REG_0].type = PTR_TO_SOCK_COMMON | ret_flag;
10486 		break;
10487 	case RET_PTR_TO_TCP_SOCK:
10488 		mark_reg_known_zero(env, regs, BPF_REG_0);
10489 		regs[BPF_REG_0].type = PTR_TO_TCP_SOCK | ret_flag;
10490 		break;
10491 	case RET_PTR_TO_MEM:
10492 		mark_reg_known_zero(env, regs, BPF_REG_0);
10493 		regs[BPF_REG_0].type = PTR_TO_MEM | ret_flag;
10494 		regs[BPF_REG_0].mem_size = meta.mem_size;
10495 		break;
10496 	case RET_PTR_TO_MEM_OR_BTF_ID:
10497 	{
10498 		const struct btf_type *t;
10499 
10500 		mark_reg_known_zero(env, regs, BPF_REG_0);
10501 		t = btf_type_skip_modifiers(meta.ret_btf, meta.ret_btf_id, NULL);
10502 		if (!btf_type_is_struct(t)) {
10503 			u32 tsize;
10504 			const struct btf_type *ret;
10505 			const char *tname;
10506 
10507 			/* resolve the type size of ksym. */
10508 			ret = btf_resolve_size(meta.ret_btf, t, &tsize);
10509 			if (IS_ERR(ret)) {
10510 				tname = btf_name_by_offset(meta.ret_btf, t->name_off);
10511 				verbose(env, "unable to resolve the size of type '%s': %ld\n",
10512 					tname, PTR_ERR(ret));
10513 				return -EINVAL;
10514 			}
10515 			regs[BPF_REG_0].type = PTR_TO_MEM | ret_flag;
10516 			regs[BPF_REG_0].mem_size = tsize;
10517 		} else {
10518 			if (returns_cpu_specific_alloc_ptr) {
10519 				regs[BPF_REG_0].type = PTR_TO_BTF_ID | MEM_ALLOC | MEM_RCU;
10520 			} else {
10521 				/* MEM_RDONLY may be carried from ret_flag, but it
10522 				 * doesn't apply on PTR_TO_BTF_ID. Fold it, otherwise
10523 				 * it will confuse the check of PTR_TO_BTF_ID in
10524 				 * check_mem_access().
10525 				 */
10526 				ret_flag &= ~MEM_RDONLY;
10527 				regs[BPF_REG_0].type = PTR_TO_BTF_ID | ret_flag;
10528 			}
10529 
10530 			regs[BPF_REG_0].btf = meta.ret_btf;
10531 			regs[BPF_REG_0].btf_id = meta.ret_btf_id;
10532 		}
10533 		break;
10534 	}
10535 	case RET_PTR_TO_BTF_ID:
10536 	{
10537 		struct btf *ret_btf;
10538 		int ret_btf_id;
10539 
10540 		mark_reg_known_zero(env, regs, BPF_REG_0);
10541 		regs[BPF_REG_0].type = PTR_TO_BTF_ID | ret_flag;
10542 		if (func_id == BPF_FUNC_kptr_xchg) {
10543 			ret_btf = meta.kptr_field->kptr.btf;
10544 			ret_btf_id = meta.kptr_field->kptr.btf_id;
10545 			if (!btf_is_kernel(ret_btf)) {
10546 				regs[BPF_REG_0].type |= MEM_ALLOC;
10547 				if (meta.kptr_field->type == BPF_KPTR_PERCPU)
10548 					regs[BPF_REG_0].type |= MEM_PERCPU;
10549 			}
10550 		} else {
10551 			if (fn->ret_btf_id == BPF_PTR_POISON) {
10552 				verbose(env, "verifier internal error:");
10553 				verbose(env, "func %s has non-overwritten BPF_PTR_POISON return type\n",
10554 					func_id_name(func_id));
10555 				return -EINVAL;
10556 			}
10557 			ret_btf = btf_vmlinux;
10558 			ret_btf_id = *fn->ret_btf_id;
10559 		}
10560 		if (ret_btf_id == 0) {
10561 			verbose(env, "invalid return type %u of func %s#%d\n",
10562 				base_type(ret_type), func_id_name(func_id),
10563 				func_id);
10564 			return -EINVAL;
10565 		}
10566 		regs[BPF_REG_0].btf = ret_btf;
10567 		regs[BPF_REG_0].btf_id = ret_btf_id;
10568 		break;
10569 	}
10570 	default:
10571 		verbose(env, "unknown return type %u of func %s#%d\n",
10572 			base_type(ret_type), func_id_name(func_id), func_id);
10573 		return -EINVAL;
10574 	}
10575 
10576 	if (type_may_be_null(regs[BPF_REG_0].type))
10577 		regs[BPF_REG_0].id = ++env->id_gen;
10578 
10579 	if (helper_multiple_ref_obj_use(func_id, meta.map_ptr)) {
10580 		verbose(env, "verifier internal error: func %s#%d sets ref_obj_id more than once\n",
10581 			func_id_name(func_id), func_id);
10582 		return -EFAULT;
10583 	}
10584 
10585 	if (is_dynptr_ref_function(func_id))
10586 		regs[BPF_REG_0].dynptr_id = meta.dynptr_id;
10587 
10588 	if (is_ptr_cast_function(func_id) || is_dynptr_ref_function(func_id)) {
10589 		/* For release_reference() */
10590 		regs[BPF_REG_0].ref_obj_id = meta.ref_obj_id;
10591 	} else if (is_acquire_function(func_id, meta.map_ptr)) {
10592 		int id = acquire_reference_state(env, insn_idx);
10593 
10594 		if (id < 0)
10595 			return id;
10596 		/* For mark_ptr_or_null_reg() */
10597 		regs[BPF_REG_0].id = id;
10598 		/* For release_reference() */
10599 		regs[BPF_REG_0].ref_obj_id = id;
10600 	}
10601 
10602 	err = do_refine_retval_range(env, regs, fn->ret_type, func_id, &meta);
10603 	if (err)
10604 		return err;
10605 
10606 	err = check_map_func_compatibility(env, meta.map_ptr, func_id);
10607 	if (err)
10608 		return err;
10609 
10610 	if ((func_id == BPF_FUNC_get_stack ||
10611 	     func_id == BPF_FUNC_get_task_stack) &&
10612 	    !env->prog->has_callchain_buf) {
10613 		const char *err_str;
10614 
10615 #ifdef CONFIG_PERF_EVENTS
10616 		err = get_callchain_buffers(sysctl_perf_event_max_stack);
10617 		err_str = "cannot get callchain buffer for func %s#%d\n";
10618 #else
10619 		err = -ENOTSUPP;
10620 		err_str = "func %s#%d not supported without CONFIG_PERF_EVENTS\n";
10621 #endif
10622 		if (err) {
10623 			verbose(env, err_str, func_id_name(func_id), func_id);
10624 			return err;
10625 		}
10626 
10627 		env->prog->has_callchain_buf = true;
10628 	}
10629 
10630 	if (func_id == BPF_FUNC_get_stackid || func_id == BPF_FUNC_get_stack)
10631 		env->prog->call_get_stack = true;
10632 
10633 	if (func_id == BPF_FUNC_get_func_ip) {
10634 		if (check_get_func_ip(env))
10635 			return -ENOTSUPP;
10636 		env->prog->call_get_func_ip = true;
10637 	}
10638 
10639 	if (changes_data)
10640 		clear_all_pkt_pointers(env);
10641 	return 0;
10642 }
10643 
10644 /* mark_btf_func_reg_size() is used when the reg size is determined by
10645  * the BTF func_proto's return value size and argument.
10646  */
10647 static void mark_btf_func_reg_size(struct bpf_verifier_env *env, u32 regno,
10648 				   size_t reg_size)
10649 {
10650 	struct bpf_reg_state *reg = &cur_regs(env)[regno];
10651 
10652 	if (regno == BPF_REG_0) {
10653 		/* Function return value */
10654 		reg->live |= REG_LIVE_WRITTEN;
10655 		reg->subreg_def = reg_size == sizeof(u64) ?
10656 			DEF_NOT_SUBREG : env->insn_idx + 1;
10657 	} else {
10658 		/* Function argument */
10659 		if (reg_size == sizeof(u64)) {
10660 			mark_insn_zext(env, reg);
10661 			mark_reg_read(env, reg, reg->parent, REG_LIVE_READ64);
10662 		} else {
10663 			mark_reg_read(env, reg, reg->parent, REG_LIVE_READ32);
10664 		}
10665 	}
10666 }
10667 
10668 static bool is_kfunc_acquire(struct bpf_kfunc_call_arg_meta *meta)
10669 {
10670 	return meta->kfunc_flags & KF_ACQUIRE;
10671 }
10672 
10673 static bool is_kfunc_release(struct bpf_kfunc_call_arg_meta *meta)
10674 {
10675 	return meta->kfunc_flags & KF_RELEASE;
10676 }
10677 
10678 static bool is_kfunc_trusted_args(struct bpf_kfunc_call_arg_meta *meta)
10679 {
10680 	return (meta->kfunc_flags & KF_TRUSTED_ARGS) || is_kfunc_release(meta);
10681 }
10682 
10683 static bool is_kfunc_sleepable(struct bpf_kfunc_call_arg_meta *meta)
10684 {
10685 	return meta->kfunc_flags & KF_SLEEPABLE;
10686 }
10687 
10688 static bool is_kfunc_destructive(struct bpf_kfunc_call_arg_meta *meta)
10689 {
10690 	return meta->kfunc_flags & KF_DESTRUCTIVE;
10691 }
10692 
10693 static bool is_kfunc_rcu(struct bpf_kfunc_call_arg_meta *meta)
10694 {
10695 	return meta->kfunc_flags & KF_RCU;
10696 }
10697 
10698 static bool is_kfunc_rcu_protected(struct bpf_kfunc_call_arg_meta *meta)
10699 {
10700 	return meta->kfunc_flags & KF_RCU_PROTECTED;
10701 }
10702 
10703 static bool is_kfunc_arg_mem_size(const struct btf *btf,
10704 				  const struct btf_param *arg,
10705 				  const struct bpf_reg_state *reg)
10706 {
10707 	const struct btf_type *t;
10708 
10709 	t = btf_type_skip_modifiers(btf, arg->type, NULL);
10710 	if (!btf_type_is_scalar(t) || reg->type != SCALAR_VALUE)
10711 		return false;
10712 
10713 	return btf_param_match_suffix(btf, arg, "__sz");
10714 }
10715 
10716 static bool is_kfunc_arg_const_mem_size(const struct btf *btf,
10717 					const struct btf_param *arg,
10718 					const struct bpf_reg_state *reg)
10719 {
10720 	const struct btf_type *t;
10721 
10722 	t = btf_type_skip_modifiers(btf, arg->type, NULL);
10723 	if (!btf_type_is_scalar(t) || reg->type != SCALAR_VALUE)
10724 		return false;
10725 
10726 	return btf_param_match_suffix(btf, arg, "__szk");
10727 }
10728 
10729 static bool is_kfunc_arg_optional(const struct btf *btf, const struct btf_param *arg)
10730 {
10731 	return btf_param_match_suffix(btf, arg, "__opt");
10732 }
10733 
10734 static bool is_kfunc_arg_constant(const struct btf *btf, const struct btf_param *arg)
10735 {
10736 	return btf_param_match_suffix(btf, arg, "__k");
10737 }
10738 
10739 static bool is_kfunc_arg_ignore(const struct btf *btf, const struct btf_param *arg)
10740 {
10741 	return btf_param_match_suffix(btf, arg, "__ign");
10742 }
10743 
10744 static bool is_kfunc_arg_alloc_obj(const struct btf *btf, const struct btf_param *arg)
10745 {
10746 	return btf_param_match_suffix(btf, arg, "__alloc");
10747 }
10748 
10749 static bool is_kfunc_arg_uninit(const struct btf *btf, const struct btf_param *arg)
10750 {
10751 	return btf_param_match_suffix(btf, arg, "__uninit");
10752 }
10753 
10754 static bool is_kfunc_arg_refcounted_kptr(const struct btf *btf, const struct btf_param *arg)
10755 {
10756 	return btf_param_match_suffix(btf, arg, "__refcounted_kptr");
10757 }
10758 
10759 static bool is_kfunc_arg_nullable(const struct btf *btf, const struct btf_param *arg)
10760 {
10761 	return btf_param_match_suffix(btf, arg, "__nullable");
10762 }
10763 
10764 static bool is_kfunc_arg_const_str(const struct btf *btf, const struct btf_param *arg)
10765 {
10766 	return btf_param_match_suffix(btf, arg, "__str");
10767 }
10768 
10769 static bool is_kfunc_arg_scalar_with_name(const struct btf *btf,
10770 					  const struct btf_param *arg,
10771 					  const char *name)
10772 {
10773 	int len, target_len = strlen(name);
10774 	const char *param_name;
10775 
10776 	param_name = btf_name_by_offset(btf, arg->name_off);
10777 	if (str_is_empty(param_name))
10778 		return false;
10779 	len = strlen(param_name);
10780 	if (len != target_len)
10781 		return false;
10782 	if (strcmp(param_name, name))
10783 		return false;
10784 
10785 	return true;
10786 }
10787 
10788 enum {
10789 	KF_ARG_DYNPTR_ID,
10790 	KF_ARG_LIST_HEAD_ID,
10791 	KF_ARG_LIST_NODE_ID,
10792 	KF_ARG_RB_ROOT_ID,
10793 	KF_ARG_RB_NODE_ID,
10794 };
10795 
10796 BTF_ID_LIST(kf_arg_btf_ids)
10797 BTF_ID(struct, bpf_dynptr_kern)
10798 BTF_ID(struct, bpf_list_head)
10799 BTF_ID(struct, bpf_list_node)
10800 BTF_ID(struct, bpf_rb_root)
10801 BTF_ID(struct, bpf_rb_node)
10802 
10803 static bool __is_kfunc_ptr_arg_type(const struct btf *btf,
10804 				    const struct btf_param *arg, int type)
10805 {
10806 	const struct btf_type *t;
10807 	u32 res_id;
10808 
10809 	t = btf_type_skip_modifiers(btf, arg->type, NULL);
10810 	if (!t)
10811 		return false;
10812 	if (!btf_type_is_ptr(t))
10813 		return false;
10814 	t = btf_type_skip_modifiers(btf, t->type, &res_id);
10815 	if (!t)
10816 		return false;
10817 	return btf_types_are_same(btf, res_id, btf_vmlinux, kf_arg_btf_ids[type]);
10818 }
10819 
10820 static bool is_kfunc_arg_dynptr(const struct btf *btf, const struct btf_param *arg)
10821 {
10822 	return __is_kfunc_ptr_arg_type(btf, arg, KF_ARG_DYNPTR_ID);
10823 }
10824 
10825 static bool is_kfunc_arg_list_head(const struct btf *btf, const struct btf_param *arg)
10826 {
10827 	return __is_kfunc_ptr_arg_type(btf, arg, KF_ARG_LIST_HEAD_ID);
10828 }
10829 
10830 static bool is_kfunc_arg_list_node(const struct btf *btf, const struct btf_param *arg)
10831 {
10832 	return __is_kfunc_ptr_arg_type(btf, arg, KF_ARG_LIST_NODE_ID);
10833 }
10834 
10835 static bool is_kfunc_arg_rbtree_root(const struct btf *btf, const struct btf_param *arg)
10836 {
10837 	return __is_kfunc_ptr_arg_type(btf, arg, KF_ARG_RB_ROOT_ID);
10838 }
10839 
10840 static bool is_kfunc_arg_rbtree_node(const struct btf *btf, const struct btf_param *arg)
10841 {
10842 	return __is_kfunc_ptr_arg_type(btf, arg, KF_ARG_RB_NODE_ID);
10843 }
10844 
10845 static bool is_kfunc_arg_callback(struct bpf_verifier_env *env, const struct btf *btf,
10846 				  const struct btf_param *arg)
10847 {
10848 	const struct btf_type *t;
10849 
10850 	t = btf_type_resolve_func_ptr(btf, arg->type, NULL);
10851 	if (!t)
10852 		return false;
10853 
10854 	return true;
10855 }
10856 
10857 /* Returns true if struct is composed of scalars, 4 levels of nesting allowed */
10858 static bool __btf_type_is_scalar_struct(struct bpf_verifier_env *env,
10859 					const struct btf *btf,
10860 					const struct btf_type *t, int rec)
10861 {
10862 	const struct btf_type *member_type;
10863 	const struct btf_member *member;
10864 	u32 i;
10865 
10866 	if (!btf_type_is_struct(t))
10867 		return false;
10868 
10869 	for_each_member(i, t, member) {
10870 		const struct btf_array *array;
10871 
10872 		member_type = btf_type_skip_modifiers(btf, member->type, NULL);
10873 		if (btf_type_is_struct(member_type)) {
10874 			if (rec >= 3) {
10875 				verbose(env, "max struct nesting depth exceeded\n");
10876 				return false;
10877 			}
10878 			if (!__btf_type_is_scalar_struct(env, btf, member_type, rec + 1))
10879 				return false;
10880 			continue;
10881 		}
10882 		if (btf_type_is_array(member_type)) {
10883 			array = btf_array(member_type);
10884 			if (!array->nelems)
10885 				return false;
10886 			member_type = btf_type_skip_modifiers(btf, array->type, NULL);
10887 			if (!btf_type_is_scalar(member_type))
10888 				return false;
10889 			continue;
10890 		}
10891 		if (!btf_type_is_scalar(member_type))
10892 			return false;
10893 	}
10894 	return true;
10895 }
10896 
10897 enum kfunc_ptr_arg_type {
10898 	KF_ARG_PTR_TO_CTX,
10899 	KF_ARG_PTR_TO_ALLOC_BTF_ID,    /* Allocated object */
10900 	KF_ARG_PTR_TO_REFCOUNTED_KPTR, /* Refcounted local kptr */
10901 	KF_ARG_PTR_TO_DYNPTR,
10902 	KF_ARG_PTR_TO_ITER,
10903 	KF_ARG_PTR_TO_LIST_HEAD,
10904 	KF_ARG_PTR_TO_LIST_NODE,
10905 	KF_ARG_PTR_TO_BTF_ID,	       /* Also covers reg2btf_ids conversions */
10906 	KF_ARG_PTR_TO_MEM,
10907 	KF_ARG_PTR_TO_MEM_SIZE,	       /* Size derived from next argument, skip it */
10908 	KF_ARG_PTR_TO_CALLBACK,
10909 	KF_ARG_PTR_TO_RB_ROOT,
10910 	KF_ARG_PTR_TO_RB_NODE,
10911 	KF_ARG_PTR_TO_NULL,
10912 	KF_ARG_PTR_TO_CONST_STR,
10913 };
10914 
10915 enum special_kfunc_type {
10916 	KF_bpf_obj_new_impl,
10917 	KF_bpf_obj_drop_impl,
10918 	KF_bpf_refcount_acquire_impl,
10919 	KF_bpf_list_push_front_impl,
10920 	KF_bpf_list_push_back_impl,
10921 	KF_bpf_list_pop_front,
10922 	KF_bpf_list_pop_back,
10923 	KF_bpf_cast_to_kern_ctx,
10924 	KF_bpf_rdonly_cast,
10925 	KF_bpf_rcu_read_lock,
10926 	KF_bpf_rcu_read_unlock,
10927 	KF_bpf_rbtree_remove,
10928 	KF_bpf_rbtree_add_impl,
10929 	KF_bpf_rbtree_first,
10930 	KF_bpf_dynptr_from_skb,
10931 	KF_bpf_dynptr_from_xdp,
10932 	KF_bpf_dynptr_slice,
10933 	KF_bpf_dynptr_slice_rdwr,
10934 	KF_bpf_dynptr_clone,
10935 	KF_bpf_percpu_obj_new_impl,
10936 	KF_bpf_percpu_obj_drop_impl,
10937 	KF_bpf_throw,
10938 	KF_bpf_iter_css_task_new,
10939 };
10940 
10941 BTF_SET_START(special_kfunc_set)
10942 BTF_ID(func, bpf_obj_new_impl)
10943 BTF_ID(func, bpf_obj_drop_impl)
10944 BTF_ID(func, bpf_refcount_acquire_impl)
10945 BTF_ID(func, bpf_list_push_front_impl)
10946 BTF_ID(func, bpf_list_push_back_impl)
10947 BTF_ID(func, bpf_list_pop_front)
10948 BTF_ID(func, bpf_list_pop_back)
10949 BTF_ID(func, bpf_cast_to_kern_ctx)
10950 BTF_ID(func, bpf_rdonly_cast)
10951 BTF_ID(func, bpf_rbtree_remove)
10952 BTF_ID(func, bpf_rbtree_add_impl)
10953 BTF_ID(func, bpf_rbtree_first)
10954 BTF_ID(func, bpf_dynptr_from_skb)
10955 BTF_ID(func, bpf_dynptr_from_xdp)
10956 BTF_ID(func, bpf_dynptr_slice)
10957 BTF_ID(func, bpf_dynptr_slice_rdwr)
10958 BTF_ID(func, bpf_dynptr_clone)
10959 BTF_ID(func, bpf_percpu_obj_new_impl)
10960 BTF_ID(func, bpf_percpu_obj_drop_impl)
10961 BTF_ID(func, bpf_throw)
10962 #ifdef CONFIG_CGROUPS
10963 BTF_ID(func, bpf_iter_css_task_new)
10964 #endif
10965 BTF_SET_END(special_kfunc_set)
10966 
10967 BTF_ID_LIST(special_kfunc_list)
10968 BTF_ID(func, bpf_obj_new_impl)
10969 BTF_ID(func, bpf_obj_drop_impl)
10970 BTF_ID(func, bpf_refcount_acquire_impl)
10971 BTF_ID(func, bpf_list_push_front_impl)
10972 BTF_ID(func, bpf_list_push_back_impl)
10973 BTF_ID(func, bpf_list_pop_front)
10974 BTF_ID(func, bpf_list_pop_back)
10975 BTF_ID(func, bpf_cast_to_kern_ctx)
10976 BTF_ID(func, bpf_rdonly_cast)
10977 BTF_ID(func, bpf_rcu_read_lock)
10978 BTF_ID(func, bpf_rcu_read_unlock)
10979 BTF_ID(func, bpf_rbtree_remove)
10980 BTF_ID(func, bpf_rbtree_add_impl)
10981 BTF_ID(func, bpf_rbtree_first)
10982 BTF_ID(func, bpf_dynptr_from_skb)
10983 BTF_ID(func, bpf_dynptr_from_xdp)
10984 BTF_ID(func, bpf_dynptr_slice)
10985 BTF_ID(func, bpf_dynptr_slice_rdwr)
10986 BTF_ID(func, bpf_dynptr_clone)
10987 BTF_ID(func, bpf_percpu_obj_new_impl)
10988 BTF_ID(func, bpf_percpu_obj_drop_impl)
10989 BTF_ID(func, bpf_throw)
10990 #ifdef CONFIG_CGROUPS
10991 BTF_ID(func, bpf_iter_css_task_new)
10992 #else
10993 BTF_ID_UNUSED
10994 #endif
10995 
10996 static bool is_kfunc_ret_null(struct bpf_kfunc_call_arg_meta *meta)
10997 {
10998 	if (meta->func_id == special_kfunc_list[KF_bpf_refcount_acquire_impl] &&
10999 	    meta->arg_owning_ref) {
11000 		return false;
11001 	}
11002 
11003 	return meta->kfunc_flags & KF_RET_NULL;
11004 }
11005 
11006 static bool is_kfunc_bpf_rcu_read_lock(struct bpf_kfunc_call_arg_meta *meta)
11007 {
11008 	return meta->func_id == special_kfunc_list[KF_bpf_rcu_read_lock];
11009 }
11010 
11011 static bool is_kfunc_bpf_rcu_read_unlock(struct bpf_kfunc_call_arg_meta *meta)
11012 {
11013 	return meta->func_id == special_kfunc_list[KF_bpf_rcu_read_unlock];
11014 }
11015 
11016 static enum kfunc_ptr_arg_type
11017 get_kfunc_ptr_arg_type(struct bpf_verifier_env *env,
11018 		       struct bpf_kfunc_call_arg_meta *meta,
11019 		       const struct btf_type *t, const struct btf_type *ref_t,
11020 		       const char *ref_tname, const struct btf_param *args,
11021 		       int argno, int nargs)
11022 {
11023 	u32 regno = argno + 1;
11024 	struct bpf_reg_state *regs = cur_regs(env);
11025 	struct bpf_reg_state *reg = &regs[regno];
11026 	bool arg_mem_size = false;
11027 
11028 	if (meta->func_id == special_kfunc_list[KF_bpf_cast_to_kern_ctx])
11029 		return KF_ARG_PTR_TO_CTX;
11030 
11031 	/* In this function, we verify the kfunc's BTF as per the argument type,
11032 	 * leaving the rest of the verification with respect to the register
11033 	 * type to our caller. When a set of conditions hold in the BTF type of
11034 	 * arguments, we resolve it to a known kfunc_ptr_arg_type.
11035 	 */
11036 	if (btf_is_prog_ctx_type(&env->log, meta->btf, t, resolve_prog_type(env->prog), argno))
11037 		return KF_ARG_PTR_TO_CTX;
11038 
11039 	if (is_kfunc_arg_alloc_obj(meta->btf, &args[argno]))
11040 		return KF_ARG_PTR_TO_ALLOC_BTF_ID;
11041 
11042 	if (is_kfunc_arg_refcounted_kptr(meta->btf, &args[argno]))
11043 		return KF_ARG_PTR_TO_REFCOUNTED_KPTR;
11044 
11045 	if (is_kfunc_arg_dynptr(meta->btf, &args[argno]))
11046 		return KF_ARG_PTR_TO_DYNPTR;
11047 
11048 	if (is_kfunc_arg_iter(meta, argno))
11049 		return KF_ARG_PTR_TO_ITER;
11050 
11051 	if (is_kfunc_arg_list_head(meta->btf, &args[argno]))
11052 		return KF_ARG_PTR_TO_LIST_HEAD;
11053 
11054 	if (is_kfunc_arg_list_node(meta->btf, &args[argno]))
11055 		return KF_ARG_PTR_TO_LIST_NODE;
11056 
11057 	if (is_kfunc_arg_rbtree_root(meta->btf, &args[argno]))
11058 		return KF_ARG_PTR_TO_RB_ROOT;
11059 
11060 	if (is_kfunc_arg_rbtree_node(meta->btf, &args[argno]))
11061 		return KF_ARG_PTR_TO_RB_NODE;
11062 
11063 	if (is_kfunc_arg_const_str(meta->btf, &args[argno]))
11064 		return KF_ARG_PTR_TO_CONST_STR;
11065 
11066 	if ((base_type(reg->type) == PTR_TO_BTF_ID || reg2btf_ids[base_type(reg->type)])) {
11067 		if (!btf_type_is_struct(ref_t)) {
11068 			verbose(env, "kernel function %s args#%d pointer type %s %s is not supported\n",
11069 				meta->func_name, argno, btf_type_str(ref_t), ref_tname);
11070 			return -EINVAL;
11071 		}
11072 		return KF_ARG_PTR_TO_BTF_ID;
11073 	}
11074 
11075 	if (is_kfunc_arg_callback(env, meta->btf, &args[argno]))
11076 		return KF_ARG_PTR_TO_CALLBACK;
11077 
11078 	if (is_kfunc_arg_nullable(meta->btf, &args[argno]) && register_is_null(reg))
11079 		return KF_ARG_PTR_TO_NULL;
11080 
11081 	if (argno + 1 < nargs &&
11082 	    (is_kfunc_arg_mem_size(meta->btf, &args[argno + 1], &regs[regno + 1]) ||
11083 	     is_kfunc_arg_const_mem_size(meta->btf, &args[argno + 1], &regs[regno + 1])))
11084 		arg_mem_size = true;
11085 
11086 	/* This is the catch all argument type of register types supported by
11087 	 * check_helper_mem_access. However, we only allow when argument type is
11088 	 * pointer to scalar, or struct composed (recursively) of scalars. When
11089 	 * arg_mem_size is true, the pointer can be void *.
11090 	 */
11091 	if (!btf_type_is_scalar(ref_t) && !__btf_type_is_scalar_struct(env, meta->btf, ref_t, 0) &&
11092 	    (arg_mem_size ? !btf_type_is_void(ref_t) : 1)) {
11093 		verbose(env, "arg#%d pointer type %s %s must point to %sscalar, or struct with scalar\n",
11094 			argno, btf_type_str(ref_t), ref_tname, arg_mem_size ? "void, " : "");
11095 		return -EINVAL;
11096 	}
11097 	return arg_mem_size ? KF_ARG_PTR_TO_MEM_SIZE : KF_ARG_PTR_TO_MEM;
11098 }
11099 
11100 static int process_kf_arg_ptr_to_btf_id(struct bpf_verifier_env *env,
11101 					struct bpf_reg_state *reg,
11102 					const struct btf_type *ref_t,
11103 					const char *ref_tname, u32 ref_id,
11104 					struct bpf_kfunc_call_arg_meta *meta,
11105 					int argno)
11106 {
11107 	const struct btf_type *reg_ref_t;
11108 	bool strict_type_match = false;
11109 	const struct btf *reg_btf;
11110 	const char *reg_ref_tname;
11111 	u32 reg_ref_id;
11112 
11113 	if (base_type(reg->type) == PTR_TO_BTF_ID) {
11114 		reg_btf = reg->btf;
11115 		reg_ref_id = reg->btf_id;
11116 	} else {
11117 		reg_btf = btf_vmlinux;
11118 		reg_ref_id = *reg2btf_ids[base_type(reg->type)];
11119 	}
11120 
11121 	/* Enforce strict type matching for calls to kfuncs that are acquiring
11122 	 * or releasing a reference, or are no-cast aliases. We do _not_
11123 	 * enforce strict matching for plain KF_TRUSTED_ARGS kfuncs by default,
11124 	 * as we want to enable BPF programs to pass types that are bitwise
11125 	 * equivalent without forcing them to explicitly cast with something
11126 	 * like bpf_cast_to_kern_ctx().
11127 	 *
11128 	 * For example, say we had a type like the following:
11129 	 *
11130 	 * struct bpf_cpumask {
11131 	 *	cpumask_t cpumask;
11132 	 *	refcount_t usage;
11133 	 * };
11134 	 *
11135 	 * Note that as specified in <linux/cpumask.h>, cpumask_t is typedef'ed
11136 	 * to a struct cpumask, so it would be safe to pass a struct
11137 	 * bpf_cpumask * to a kfunc expecting a struct cpumask *.
11138 	 *
11139 	 * The philosophy here is similar to how we allow scalars of different
11140 	 * types to be passed to kfuncs as long as the size is the same. The
11141 	 * only difference here is that we're simply allowing
11142 	 * btf_struct_ids_match() to walk the struct at the 0th offset, and
11143 	 * resolve types.
11144 	 */
11145 	if (is_kfunc_acquire(meta) ||
11146 	    (is_kfunc_release(meta) && reg->ref_obj_id) ||
11147 	    btf_type_ids_nocast_alias(&env->log, reg_btf, reg_ref_id, meta->btf, ref_id))
11148 		strict_type_match = true;
11149 
11150 	WARN_ON_ONCE(is_kfunc_trusted_args(meta) && reg->off);
11151 
11152 	reg_ref_t = btf_type_skip_modifiers(reg_btf, reg_ref_id, &reg_ref_id);
11153 	reg_ref_tname = btf_name_by_offset(reg_btf, reg_ref_t->name_off);
11154 	if (!btf_struct_ids_match(&env->log, reg_btf, reg_ref_id, reg->off, meta->btf, ref_id, strict_type_match)) {
11155 		verbose(env, "kernel function %s args#%d expected pointer to %s %s but R%d has a pointer to %s %s\n",
11156 			meta->func_name, argno, btf_type_str(ref_t), ref_tname, argno + 1,
11157 			btf_type_str(reg_ref_t), reg_ref_tname);
11158 		return -EINVAL;
11159 	}
11160 	return 0;
11161 }
11162 
11163 static int ref_set_non_owning(struct bpf_verifier_env *env, struct bpf_reg_state *reg)
11164 {
11165 	struct bpf_verifier_state *state = env->cur_state;
11166 	struct btf_record *rec = reg_btf_record(reg);
11167 
11168 	if (!state->active_lock.ptr) {
11169 		verbose(env, "verifier internal error: ref_set_non_owning w/o active lock\n");
11170 		return -EFAULT;
11171 	}
11172 
11173 	if (type_flag(reg->type) & NON_OWN_REF) {
11174 		verbose(env, "verifier internal error: NON_OWN_REF already set\n");
11175 		return -EFAULT;
11176 	}
11177 
11178 	reg->type |= NON_OWN_REF;
11179 	if (rec->refcount_off >= 0)
11180 		reg->type |= MEM_RCU;
11181 
11182 	return 0;
11183 }
11184 
11185 static int ref_convert_owning_non_owning(struct bpf_verifier_env *env, u32 ref_obj_id)
11186 {
11187 	struct bpf_func_state *state, *unused;
11188 	struct bpf_reg_state *reg;
11189 	int i;
11190 
11191 	state = cur_func(env);
11192 
11193 	if (!ref_obj_id) {
11194 		verbose(env, "verifier internal error: ref_obj_id is zero for "
11195 			     "owning -> non-owning conversion\n");
11196 		return -EFAULT;
11197 	}
11198 
11199 	for (i = 0; i < state->acquired_refs; i++) {
11200 		if (state->refs[i].id != ref_obj_id)
11201 			continue;
11202 
11203 		/* Clear ref_obj_id here so release_reference doesn't clobber
11204 		 * the whole reg
11205 		 */
11206 		bpf_for_each_reg_in_vstate(env->cur_state, unused, reg, ({
11207 			if (reg->ref_obj_id == ref_obj_id) {
11208 				reg->ref_obj_id = 0;
11209 				ref_set_non_owning(env, reg);
11210 			}
11211 		}));
11212 		return 0;
11213 	}
11214 
11215 	verbose(env, "verifier internal error: ref state missing for ref_obj_id\n");
11216 	return -EFAULT;
11217 }
11218 
11219 /* Implementation details:
11220  *
11221  * Each register points to some region of memory, which we define as an
11222  * allocation. Each allocation may embed a bpf_spin_lock which protects any
11223  * special BPF objects (bpf_list_head, bpf_rb_root, etc.) part of the same
11224  * allocation. The lock and the data it protects are colocated in the same
11225  * memory region.
11226  *
11227  * Hence, everytime a register holds a pointer value pointing to such
11228  * allocation, the verifier preserves a unique reg->id for it.
11229  *
11230  * The verifier remembers the lock 'ptr' and the lock 'id' whenever
11231  * bpf_spin_lock is called.
11232  *
11233  * To enable this, lock state in the verifier captures two values:
11234  *	active_lock.ptr = Register's type specific pointer
11235  *	active_lock.id  = A unique ID for each register pointer value
11236  *
11237  * Currently, PTR_TO_MAP_VALUE and PTR_TO_BTF_ID | MEM_ALLOC are the two
11238  * supported register types.
11239  *
11240  * The active_lock.ptr in case of map values is the reg->map_ptr, and in case of
11241  * allocated objects is the reg->btf pointer.
11242  *
11243  * The active_lock.id is non-unique for maps supporting direct_value_addr, as we
11244  * can establish the provenance of the map value statically for each distinct
11245  * lookup into such maps. They always contain a single map value hence unique
11246  * IDs for each pseudo load pessimizes the algorithm and rejects valid programs.
11247  *
11248  * So, in case of global variables, they use array maps with max_entries = 1,
11249  * hence their active_lock.ptr becomes map_ptr and id = 0 (since they all point
11250  * into the same map value as max_entries is 1, as described above).
11251  *
11252  * In case of inner map lookups, the inner map pointer has same map_ptr as the
11253  * outer map pointer (in verifier context), but each lookup into an inner map
11254  * assigns a fresh reg->id to the lookup, so while lookups into distinct inner
11255  * maps from the same outer map share the same map_ptr as active_lock.ptr, they
11256  * will get different reg->id assigned to each lookup, hence different
11257  * active_lock.id.
11258  *
11259  * In case of allocated objects, active_lock.ptr is the reg->btf, and the
11260  * reg->id is a unique ID preserved after the NULL pointer check on the pointer
11261  * returned from bpf_obj_new. Each allocation receives a new reg->id.
11262  */
11263 static int check_reg_allocation_locked(struct bpf_verifier_env *env, struct bpf_reg_state *reg)
11264 {
11265 	void *ptr;
11266 	u32 id;
11267 
11268 	switch ((int)reg->type) {
11269 	case PTR_TO_MAP_VALUE:
11270 		ptr = reg->map_ptr;
11271 		break;
11272 	case PTR_TO_BTF_ID | MEM_ALLOC:
11273 		ptr = reg->btf;
11274 		break;
11275 	default:
11276 		verbose(env, "verifier internal error: unknown reg type for lock check\n");
11277 		return -EFAULT;
11278 	}
11279 	id = reg->id;
11280 
11281 	if (!env->cur_state->active_lock.ptr)
11282 		return -EINVAL;
11283 	if (env->cur_state->active_lock.ptr != ptr ||
11284 	    env->cur_state->active_lock.id != id) {
11285 		verbose(env, "held lock and object are not in the same allocation\n");
11286 		return -EINVAL;
11287 	}
11288 	return 0;
11289 }
11290 
11291 static bool is_bpf_list_api_kfunc(u32 btf_id)
11292 {
11293 	return btf_id == special_kfunc_list[KF_bpf_list_push_front_impl] ||
11294 	       btf_id == special_kfunc_list[KF_bpf_list_push_back_impl] ||
11295 	       btf_id == special_kfunc_list[KF_bpf_list_pop_front] ||
11296 	       btf_id == special_kfunc_list[KF_bpf_list_pop_back];
11297 }
11298 
11299 static bool is_bpf_rbtree_api_kfunc(u32 btf_id)
11300 {
11301 	return btf_id == special_kfunc_list[KF_bpf_rbtree_add_impl] ||
11302 	       btf_id == special_kfunc_list[KF_bpf_rbtree_remove] ||
11303 	       btf_id == special_kfunc_list[KF_bpf_rbtree_first];
11304 }
11305 
11306 static bool is_bpf_graph_api_kfunc(u32 btf_id)
11307 {
11308 	return is_bpf_list_api_kfunc(btf_id) || is_bpf_rbtree_api_kfunc(btf_id) ||
11309 	       btf_id == special_kfunc_list[KF_bpf_refcount_acquire_impl];
11310 }
11311 
11312 static bool is_sync_callback_calling_kfunc(u32 btf_id)
11313 {
11314 	return btf_id == special_kfunc_list[KF_bpf_rbtree_add_impl];
11315 }
11316 
11317 static bool is_bpf_throw_kfunc(struct bpf_insn *insn)
11318 {
11319 	return bpf_pseudo_kfunc_call(insn) && insn->off == 0 &&
11320 	       insn->imm == special_kfunc_list[KF_bpf_throw];
11321 }
11322 
11323 static bool is_rbtree_lock_required_kfunc(u32 btf_id)
11324 {
11325 	return is_bpf_rbtree_api_kfunc(btf_id);
11326 }
11327 
11328 static bool check_kfunc_is_graph_root_api(struct bpf_verifier_env *env,
11329 					  enum btf_field_type head_field_type,
11330 					  u32 kfunc_btf_id)
11331 {
11332 	bool ret;
11333 
11334 	switch (head_field_type) {
11335 	case BPF_LIST_HEAD:
11336 		ret = is_bpf_list_api_kfunc(kfunc_btf_id);
11337 		break;
11338 	case BPF_RB_ROOT:
11339 		ret = is_bpf_rbtree_api_kfunc(kfunc_btf_id);
11340 		break;
11341 	default:
11342 		verbose(env, "verifier internal error: unexpected graph root argument type %s\n",
11343 			btf_field_type_name(head_field_type));
11344 		return false;
11345 	}
11346 
11347 	if (!ret)
11348 		verbose(env, "verifier internal error: %s head arg for unknown kfunc\n",
11349 			btf_field_type_name(head_field_type));
11350 	return ret;
11351 }
11352 
11353 static bool check_kfunc_is_graph_node_api(struct bpf_verifier_env *env,
11354 					  enum btf_field_type node_field_type,
11355 					  u32 kfunc_btf_id)
11356 {
11357 	bool ret;
11358 
11359 	switch (node_field_type) {
11360 	case BPF_LIST_NODE:
11361 		ret = (kfunc_btf_id == special_kfunc_list[KF_bpf_list_push_front_impl] ||
11362 		       kfunc_btf_id == special_kfunc_list[KF_bpf_list_push_back_impl]);
11363 		break;
11364 	case BPF_RB_NODE:
11365 		ret = (kfunc_btf_id == special_kfunc_list[KF_bpf_rbtree_remove] ||
11366 		       kfunc_btf_id == special_kfunc_list[KF_bpf_rbtree_add_impl]);
11367 		break;
11368 	default:
11369 		verbose(env, "verifier internal error: unexpected graph node argument type %s\n",
11370 			btf_field_type_name(node_field_type));
11371 		return false;
11372 	}
11373 
11374 	if (!ret)
11375 		verbose(env, "verifier internal error: %s node arg for unknown kfunc\n",
11376 			btf_field_type_name(node_field_type));
11377 	return ret;
11378 }
11379 
11380 static int
11381 __process_kf_arg_ptr_to_graph_root(struct bpf_verifier_env *env,
11382 				   struct bpf_reg_state *reg, u32 regno,
11383 				   struct bpf_kfunc_call_arg_meta *meta,
11384 				   enum btf_field_type head_field_type,
11385 				   struct btf_field **head_field)
11386 {
11387 	const char *head_type_name;
11388 	struct btf_field *field;
11389 	struct btf_record *rec;
11390 	u32 head_off;
11391 
11392 	if (meta->btf != btf_vmlinux) {
11393 		verbose(env, "verifier internal error: unexpected btf mismatch in kfunc call\n");
11394 		return -EFAULT;
11395 	}
11396 
11397 	if (!check_kfunc_is_graph_root_api(env, head_field_type, meta->func_id))
11398 		return -EFAULT;
11399 
11400 	head_type_name = btf_field_type_name(head_field_type);
11401 	if (!tnum_is_const(reg->var_off)) {
11402 		verbose(env,
11403 			"R%d doesn't have constant offset. %s has to be at the constant offset\n",
11404 			regno, head_type_name);
11405 		return -EINVAL;
11406 	}
11407 
11408 	rec = reg_btf_record(reg);
11409 	head_off = reg->off + reg->var_off.value;
11410 	field = btf_record_find(rec, head_off, head_field_type);
11411 	if (!field) {
11412 		verbose(env, "%s not found at offset=%u\n", head_type_name, head_off);
11413 		return -EINVAL;
11414 	}
11415 
11416 	/* All functions require bpf_list_head to be protected using a bpf_spin_lock */
11417 	if (check_reg_allocation_locked(env, reg)) {
11418 		verbose(env, "bpf_spin_lock at off=%d must be held for %s\n",
11419 			rec->spin_lock_off, head_type_name);
11420 		return -EINVAL;
11421 	}
11422 
11423 	if (*head_field) {
11424 		verbose(env, "verifier internal error: repeating %s arg\n", head_type_name);
11425 		return -EFAULT;
11426 	}
11427 	*head_field = field;
11428 	return 0;
11429 }
11430 
11431 static int process_kf_arg_ptr_to_list_head(struct bpf_verifier_env *env,
11432 					   struct bpf_reg_state *reg, u32 regno,
11433 					   struct bpf_kfunc_call_arg_meta *meta)
11434 {
11435 	return __process_kf_arg_ptr_to_graph_root(env, reg, regno, meta, BPF_LIST_HEAD,
11436 							  &meta->arg_list_head.field);
11437 }
11438 
11439 static int process_kf_arg_ptr_to_rbtree_root(struct bpf_verifier_env *env,
11440 					     struct bpf_reg_state *reg, u32 regno,
11441 					     struct bpf_kfunc_call_arg_meta *meta)
11442 {
11443 	return __process_kf_arg_ptr_to_graph_root(env, reg, regno, meta, BPF_RB_ROOT,
11444 							  &meta->arg_rbtree_root.field);
11445 }
11446 
11447 static int
11448 __process_kf_arg_ptr_to_graph_node(struct bpf_verifier_env *env,
11449 				   struct bpf_reg_state *reg, u32 regno,
11450 				   struct bpf_kfunc_call_arg_meta *meta,
11451 				   enum btf_field_type head_field_type,
11452 				   enum btf_field_type node_field_type,
11453 				   struct btf_field **node_field)
11454 {
11455 	const char *node_type_name;
11456 	const struct btf_type *et, *t;
11457 	struct btf_field *field;
11458 	u32 node_off;
11459 
11460 	if (meta->btf != btf_vmlinux) {
11461 		verbose(env, "verifier internal error: unexpected btf mismatch in kfunc call\n");
11462 		return -EFAULT;
11463 	}
11464 
11465 	if (!check_kfunc_is_graph_node_api(env, node_field_type, meta->func_id))
11466 		return -EFAULT;
11467 
11468 	node_type_name = btf_field_type_name(node_field_type);
11469 	if (!tnum_is_const(reg->var_off)) {
11470 		verbose(env,
11471 			"R%d doesn't have constant offset. %s has to be at the constant offset\n",
11472 			regno, node_type_name);
11473 		return -EINVAL;
11474 	}
11475 
11476 	node_off = reg->off + reg->var_off.value;
11477 	field = reg_find_field_offset(reg, node_off, node_field_type);
11478 	if (!field || field->offset != node_off) {
11479 		verbose(env, "%s not found at offset=%u\n", node_type_name, node_off);
11480 		return -EINVAL;
11481 	}
11482 
11483 	field = *node_field;
11484 
11485 	et = btf_type_by_id(field->graph_root.btf, field->graph_root.value_btf_id);
11486 	t = btf_type_by_id(reg->btf, reg->btf_id);
11487 	if (!btf_struct_ids_match(&env->log, reg->btf, reg->btf_id, 0, field->graph_root.btf,
11488 				  field->graph_root.value_btf_id, true)) {
11489 		verbose(env, "operation on %s expects arg#1 %s at offset=%d "
11490 			"in struct %s, but arg is at offset=%d in struct %s\n",
11491 			btf_field_type_name(head_field_type),
11492 			btf_field_type_name(node_field_type),
11493 			field->graph_root.node_offset,
11494 			btf_name_by_offset(field->graph_root.btf, et->name_off),
11495 			node_off, btf_name_by_offset(reg->btf, t->name_off));
11496 		return -EINVAL;
11497 	}
11498 	meta->arg_btf = reg->btf;
11499 	meta->arg_btf_id = reg->btf_id;
11500 
11501 	if (node_off != field->graph_root.node_offset) {
11502 		verbose(env, "arg#1 offset=%d, but expected %s at offset=%d in struct %s\n",
11503 			node_off, btf_field_type_name(node_field_type),
11504 			field->graph_root.node_offset,
11505 			btf_name_by_offset(field->graph_root.btf, et->name_off));
11506 		return -EINVAL;
11507 	}
11508 
11509 	return 0;
11510 }
11511 
11512 static int process_kf_arg_ptr_to_list_node(struct bpf_verifier_env *env,
11513 					   struct bpf_reg_state *reg, u32 regno,
11514 					   struct bpf_kfunc_call_arg_meta *meta)
11515 {
11516 	return __process_kf_arg_ptr_to_graph_node(env, reg, regno, meta,
11517 						  BPF_LIST_HEAD, BPF_LIST_NODE,
11518 						  &meta->arg_list_head.field);
11519 }
11520 
11521 static int process_kf_arg_ptr_to_rbtree_node(struct bpf_verifier_env *env,
11522 					     struct bpf_reg_state *reg, u32 regno,
11523 					     struct bpf_kfunc_call_arg_meta *meta)
11524 {
11525 	return __process_kf_arg_ptr_to_graph_node(env, reg, regno, meta,
11526 						  BPF_RB_ROOT, BPF_RB_NODE,
11527 						  &meta->arg_rbtree_root.field);
11528 }
11529 
11530 /*
11531  * css_task iter allowlist is needed to avoid dead locking on css_set_lock.
11532  * LSM hooks and iters (both sleepable and non-sleepable) are safe.
11533  * Any sleepable progs are also safe since bpf_check_attach_target() enforce
11534  * them can only be attached to some specific hook points.
11535  */
11536 static bool check_css_task_iter_allowlist(struct bpf_verifier_env *env)
11537 {
11538 	enum bpf_prog_type prog_type = resolve_prog_type(env->prog);
11539 
11540 	switch (prog_type) {
11541 	case BPF_PROG_TYPE_LSM:
11542 		return true;
11543 	case BPF_PROG_TYPE_TRACING:
11544 		if (env->prog->expected_attach_type == BPF_TRACE_ITER)
11545 			return true;
11546 		fallthrough;
11547 	default:
11548 		return in_sleepable(env);
11549 	}
11550 }
11551 
11552 static int check_kfunc_args(struct bpf_verifier_env *env, struct bpf_kfunc_call_arg_meta *meta,
11553 			    int insn_idx)
11554 {
11555 	const char *func_name = meta->func_name, *ref_tname;
11556 	const struct btf *btf = meta->btf;
11557 	const struct btf_param *args;
11558 	struct btf_record *rec;
11559 	u32 i, nargs;
11560 	int ret;
11561 
11562 	args = (const struct btf_param *)(meta->func_proto + 1);
11563 	nargs = btf_type_vlen(meta->func_proto);
11564 	if (nargs > MAX_BPF_FUNC_REG_ARGS) {
11565 		verbose(env, "Function %s has %d > %d args\n", func_name, nargs,
11566 			MAX_BPF_FUNC_REG_ARGS);
11567 		return -EINVAL;
11568 	}
11569 
11570 	/* Check that BTF function arguments match actual types that the
11571 	 * verifier sees.
11572 	 */
11573 	for (i = 0; i < nargs; i++) {
11574 		struct bpf_reg_state *regs = cur_regs(env), *reg = &regs[i + 1];
11575 		const struct btf_type *t, *ref_t, *resolve_ret;
11576 		enum bpf_arg_type arg_type = ARG_DONTCARE;
11577 		u32 regno = i + 1, ref_id, type_size;
11578 		bool is_ret_buf_sz = false;
11579 		int kf_arg_type;
11580 
11581 		t = btf_type_skip_modifiers(btf, args[i].type, NULL);
11582 
11583 		if (is_kfunc_arg_ignore(btf, &args[i]))
11584 			continue;
11585 
11586 		if (btf_type_is_scalar(t)) {
11587 			if (reg->type != SCALAR_VALUE) {
11588 				verbose(env, "R%d is not a scalar\n", regno);
11589 				return -EINVAL;
11590 			}
11591 
11592 			if (is_kfunc_arg_constant(meta->btf, &args[i])) {
11593 				if (meta->arg_constant.found) {
11594 					verbose(env, "verifier internal error: only one constant argument permitted\n");
11595 					return -EFAULT;
11596 				}
11597 				if (!tnum_is_const(reg->var_off)) {
11598 					verbose(env, "R%d must be a known constant\n", regno);
11599 					return -EINVAL;
11600 				}
11601 				ret = mark_chain_precision(env, regno);
11602 				if (ret < 0)
11603 					return ret;
11604 				meta->arg_constant.found = true;
11605 				meta->arg_constant.value = reg->var_off.value;
11606 			} else if (is_kfunc_arg_scalar_with_name(btf, &args[i], "rdonly_buf_size")) {
11607 				meta->r0_rdonly = true;
11608 				is_ret_buf_sz = true;
11609 			} else if (is_kfunc_arg_scalar_with_name(btf, &args[i], "rdwr_buf_size")) {
11610 				is_ret_buf_sz = true;
11611 			}
11612 
11613 			if (is_ret_buf_sz) {
11614 				if (meta->r0_size) {
11615 					verbose(env, "2 or more rdonly/rdwr_buf_size parameters for kfunc");
11616 					return -EINVAL;
11617 				}
11618 
11619 				if (!tnum_is_const(reg->var_off)) {
11620 					verbose(env, "R%d is not a const\n", regno);
11621 					return -EINVAL;
11622 				}
11623 
11624 				meta->r0_size = reg->var_off.value;
11625 				ret = mark_chain_precision(env, regno);
11626 				if (ret)
11627 					return ret;
11628 			}
11629 			continue;
11630 		}
11631 
11632 		if (!btf_type_is_ptr(t)) {
11633 			verbose(env, "Unrecognized arg#%d type %s\n", i, btf_type_str(t));
11634 			return -EINVAL;
11635 		}
11636 
11637 		if ((is_kfunc_trusted_args(meta) || is_kfunc_rcu(meta)) &&
11638 		    (register_is_null(reg) || type_may_be_null(reg->type)) &&
11639 			!is_kfunc_arg_nullable(meta->btf, &args[i])) {
11640 			verbose(env, "Possibly NULL pointer passed to trusted arg%d\n", i);
11641 			return -EACCES;
11642 		}
11643 
11644 		if (reg->ref_obj_id) {
11645 			if (is_kfunc_release(meta) && meta->ref_obj_id) {
11646 				verbose(env, "verifier internal error: more than one arg with ref_obj_id R%d %u %u\n",
11647 					regno, reg->ref_obj_id,
11648 					meta->ref_obj_id);
11649 				return -EFAULT;
11650 			}
11651 			meta->ref_obj_id = reg->ref_obj_id;
11652 			if (is_kfunc_release(meta))
11653 				meta->release_regno = regno;
11654 		}
11655 
11656 		ref_t = btf_type_skip_modifiers(btf, t->type, &ref_id);
11657 		ref_tname = btf_name_by_offset(btf, ref_t->name_off);
11658 
11659 		kf_arg_type = get_kfunc_ptr_arg_type(env, meta, t, ref_t, ref_tname, args, i, nargs);
11660 		if (kf_arg_type < 0)
11661 			return kf_arg_type;
11662 
11663 		switch (kf_arg_type) {
11664 		case KF_ARG_PTR_TO_NULL:
11665 			continue;
11666 		case KF_ARG_PTR_TO_ALLOC_BTF_ID:
11667 		case KF_ARG_PTR_TO_BTF_ID:
11668 			if (!is_kfunc_trusted_args(meta) && !is_kfunc_rcu(meta))
11669 				break;
11670 
11671 			if (!is_trusted_reg(reg)) {
11672 				if (!is_kfunc_rcu(meta)) {
11673 					verbose(env, "R%d must be referenced or trusted\n", regno);
11674 					return -EINVAL;
11675 				}
11676 				if (!is_rcu_reg(reg)) {
11677 					verbose(env, "R%d must be a rcu pointer\n", regno);
11678 					return -EINVAL;
11679 				}
11680 			}
11681 
11682 			fallthrough;
11683 		case KF_ARG_PTR_TO_CTX:
11684 			/* Trusted arguments have the same offset checks as release arguments */
11685 			arg_type |= OBJ_RELEASE;
11686 			break;
11687 		case KF_ARG_PTR_TO_DYNPTR:
11688 		case KF_ARG_PTR_TO_ITER:
11689 		case KF_ARG_PTR_TO_LIST_HEAD:
11690 		case KF_ARG_PTR_TO_LIST_NODE:
11691 		case KF_ARG_PTR_TO_RB_ROOT:
11692 		case KF_ARG_PTR_TO_RB_NODE:
11693 		case KF_ARG_PTR_TO_MEM:
11694 		case KF_ARG_PTR_TO_MEM_SIZE:
11695 		case KF_ARG_PTR_TO_CALLBACK:
11696 		case KF_ARG_PTR_TO_REFCOUNTED_KPTR:
11697 		case KF_ARG_PTR_TO_CONST_STR:
11698 			/* Trusted by default */
11699 			break;
11700 		default:
11701 			WARN_ON_ONCE(1);
11702 			return -EFAULT;
11703 		}
11704 
11705 		if (is_kfunc_release(meta) && reg->ref_obj_id)
11706 			arg_type |= OBJ_RELEASE;
11707 		ret = check_func_arg_reg_off(env, reg, regno, arg_type);
11708 		if (ret < 0)
11709 			return ret;
11710 
11711 		switch (kf_arg_type) {
11712 		case KF_ARG_PTR_TO_CTX:
11713 			if (reg->type != PTR_TO_CTX) {
11714 				verbose(env, "arg#%d expected pointer to ctx, but got %s\n", i, btf_type_str(t));
11715 				return -EINVAL;
11716 			}
11717 
11718 			if (meta->func_id == special_kfunc_list[KF_bpf_cast_to_kern_ctx]) {
11719 				ret = get_kern_ctx_btf_id(&env->log, resolve_prog_type(env->prog));
11720 				if (ret < 0)
11721 					return -EINVAL;
11722 				meta->ret_btf_id  = ret;
11723 			}
11724 			break;
11725 		case KF_ARG_PTR_TO_ALLOC_BTF_ID:
11726 			if (reg->type == (PTR_TO_BTF_ID | MEM_ALLOC)) {
11727 				if (meta->func_id != special_kfunc_list[KF_bpf_obj_drop_impl]) {
11728 					verbose(env, "arg#%d expected for bpf_obj_drop_impl()\n", i);
11729 					return -EINVAL;
11730 				}
11731 			} else if (reg->type == (PTR_TO_BTF_ID | MEM_ALLOC | MEM_PERCPU)) {
11732 				if (meta->func_id != special_kfunc_list[KF_bpf_percpu_obj_drop_impl]) {
11733 					verbose(env, "arg#%d expected for bpf_percpu_obj_drop_impl()\n", i);
11734 					return -EINVAL;
11735 				}
11736 			} else {
11737 				verbose(env, "arg#%d expected pointer to allocated object\n", i);
11738 				return -EINVAL;
11739 			}
11740 			if (!reg->ref_obj_id) {
11741 				verbose(env, "allocated object must be referenced\n");
11742 				return -EINVAL;
11743 			}
11744 			if (meta->btf == btf_vmlinux) {
11745 				meta->arg_btf = reg->btf;
11746 				meta->arg_btf_id = reg->btf_id;
11747 			}
11748 			break;
11749 		case KF_ARG_PTR_TO_DYNPTR:
11750 		{
11751 			enum bpf_arg_type dynptr_arg_type = ARG_PTR_TO_DYNPTR;
11752 			int clone_ref_obj_id = 0;
11753 
11754 			if (reg->type != PTR_TO_STACK &&
11755 			    reg->type != CONST_PTR_TO_DYNPTR) {
11756 				verbose(env, "arg#%d expected pointer to stack or dynptr_ptr\n", i);
11757 				return -EINVAL;
11758 			}
11759 
11760 			if (reg->type == CONST_PTR_TO_DYNPTR)
11761 				dynptr_arg_type |= MEM_RDONLY;
11762 
11763 			if (is_kfunc_arg_uninit(btf, &args[i]))
11764 				dynptr_arg_type |= MEM_UNINIT;
11765 
11766 			if (meta->func_id == special_kfunc_list[KF_bpf_dynptr_from_skb]) {
11767 				dynptr_arg_type |= DYNPTR_TYPE_SKB;
11768 			} else if (meta->func_id == special_kfunc_list[KF_bpf_dynptr_from_xdp]) {
11769 				dynptr_arg_type |= DYNPTR_TYPE_XDP;
11770 			} else if (meta->func_id == special_kfunc_list[KF_bpf_dynptr_clone] &&
11771 				   (dynptr_arg_type & MEM_UNINIT)) {
11772 				enum bpf_dynptr_type parent_type = meta->initialized_dynptr.type;
11773 
11774 				if (parent_type == BPF_DYNPTR_TYPE_INVALID) {
11775 					verbose(env, "verifier internal error: no dynptr type for parent of clone\n");
11776 					return -EFAULT;
11777 				}
11778 
11779 				dynptr_arg_type |= (unsigned int)get_dynptr_type_flag(parent_type);
11780 				clone_ref_obj_id = meta->initialized_dynptr.ref_obj_id;
11781 				if (dynptr_type_refcounted(parent_type) && !clone_ref_obj_id) {
11782 					verbose(env, "verifier internal error: missing ref obj id for parent of clone\n");
11783 					return -EFAULT;
11784 				}
11785 			}
11786 
11787 			ret = process_dynptr_func(env, regno, insn_idx, dynptr_arg_type, clone_ref_obj_id);
11788 			if (ret < 0)
11789 				return ret;
11790 
11791 			if (!(dynptr_arg_type & MEM_UNINIT)) {
11792 				int id = dynptr_id(env, reg);
11793 
11794 				if (id < 0) {
11795 					verbose(env, "verifier internal error: failed to obtain dynptr id\n");
11796 					return id;
11797 				}
11798 				meta->initialized_dynptr.id = id;
11799 				meta->initialized_dynptr.type = dynptr_get_type(env, reg);
11800 				meta->initialized_dynptr.ref_obj_id = dynptr_ref_obj_id(env, reg);
11801 			}
11802 
11803 			break;
11804 		}
11805 		case KF_ARG_PTR_TO_ITER:
11806 			if (meta->func_id == special_kfunc_list[KF_bpf_iter_css_task_new]) {
11807 				if (!check_css_task_iter_allowlist(env)) {
11808 					verbose(env, "css_task_iter is only allowed in bpf_lsm, bpf_iter and sleepable progs\n");
11809 					return -EINVAL;
11810 				}
11811 			}
11812 			ret = process_iter_arg(env, regno, insn_idx, meta);
11813 			if (ret < 0)
11814 				return ret;
11815 			break;
11816 		case KF_ARG_PTR_TO_LIST_HEAD:
11817 			if (reg->type != PTR_TO_MAP_VALUE &&
11818 			    reg->type != (PTR_TO_BTF_ID | MEM_ALLOC)) {
11819 				verbose(env, "arg#%d expected pointer to map value or allocated object\n", i);
11820 				return -EINVAL;
11821 			}
11822 			if (reg->type == (PTR_TO_BTF_ID | MEM_ALLOC) && !reg->ref_obj_id) {
11823 				verbose(env, "allocated object must be referenced\n");
11824 				return -EINVAL;
11825 			}
11826 			ret = process_kf_arg_ptr_to_list_head(env, reg, regno, meta);
11827 			if (ret < 0)
11828 				return ret;
11829 			break;
11830 		case KF_ARG_PTR_TO_RB_ROOT:
11831 			if (reg->type != PTR_TO_MAP_VALUE &&
11832 			    reg->type != (PTR_TO_BTF_ID | MEM_ALLOC)) {
11833 				verbose(env, "arg#%d expected pointer to map value or allocated object\n", i);
11834 				return -EINVAL;
11835 			}
11836 			if (reg->type == (PTR_TO_BTF_ID | MEM_ALLOC) && !reg->ref_obj_id) {
11837 				verbose(env, "allocated object must be referenced\n");
11838 				return -EINVAL;
11839 			}
11840 			ret = process_kf_arg_ptr_to_rbtree_root(env, reg, regno, meta);
11841 			if (ret < 0)
11842 				return ret;
11843 			break;
11844 		case KF_ARG_PTR_TO_LIST_NODE:
11845 			if (reg->type != (PTR_TO_BTF_ID | MEM_ALLOC)) {
11846 				verbose(env, "arg#%d expected pointer to allocated object\n", i);
11847 				return -EINVAL;
11848 			}
11849 			if (!reg->ref_obj_id) {
11850 				verbose(env, "allocated object must be referenced\n");
11851 				return -EINVAL;
11852 			}
11853 			ret = process_kf_arg_ptr_to_list_node(env, reg, regno, meta);
11854 			if (ret < 0)
11855 				return ret;
11856 			break;
11857 		case KF_ARG_PTR_TO_RB_NODE:
11858 			if (meta->func_id == special_kfunc_list[KF_bpf_rbtree_remove]) {
11859 				if (!type_is_non_owning_ref(reg->type) || reg->ref_obj_id) {
11860 					verbose(env, "rbtree_remove node input must be non-owning ref\n");
11861 					return -EINVAL;
11862 				}
11863 				if (in_rbtree_lock_required_cb(env)) {
11864 					verbose(env, "rbtree_remove not allowed in rbtree cb\n");
11865 					return -EINVAL;
11866 				}
11867 			} else {
11868 				if (reg->type != (PTR_TO_BTF_ID | MEM_ALLOC)) {
11869 					verbose(env, "arg#%d expected pointer to allocated object\n", i);
11870 					return -EINVAL;
11871 				}
11872 				if (!reg->ref_obj_id) {
11873 					verbose(env, "allocated object must be referenced\n");
11874 					return -EINVAL;
11875 				}
11876 			}
11877 
11878 			ret = process_kf_arg_ptr_to_rbtree_node(env, reg, regno, meta);
11879 			if (ret < 0)
11880 				return ret;
11881 			break;
11882 		case KF_ARG_PTR_TO_BTF_ID:
11883 			/* Only base_type is checked, further checks are done here */
11884 			if ((base_type(reg->type) != PTR_TO_BTF_ID ||
11885 			     (bpf_type_has_unsafe_modifiers(reg->type) && !is_rcu_reg(reg))) &&
11886 			    !reg2btf_ids[base_type(reg->type)]) {
11887 				verbose(env, "arg#%d is %s ", i, reg_type_str(env, reg->type));
11888 				verbose(env, "expected %s or socket\n",
11889 					reg_type_str(env, base_type(reg->type) |
11890 							  (type_flag(reg->type) & BPF_REG_TRUSTED_MODIFIERS)));
11891 				return -EINVAL;
11892 			}
11893 			ret = process_kf_arg_ptr_to_btf_id(env, reg, ref_t, ref_tname, ref_id, meta, i);
11894 			if (ret < 0)
11895 				return ret;
11896 			break;
11897 		case KF_ARG_PTR_TO_MEM:
11898 			resolve_ret = btf_resolve_size(btf, ref_t, &type_size);
11899 			if (IS_ERR(resolve_ret)) {
11900 				verbose(env, "arg#%d reference type('%s %s') size cannot be determined: %ld\n",
11901 					i, btf_type_str(ref_t), ref_tname, PTR_ERR(resolve_ret));
11902 				return -EINVAL;
11903 			}
11904 			ret = check_mem_reg(env, reg, regno, type_size);
11905 			if (ret < 0)
11906 				return ret;
11907 			break;
11908 		case KF_ARG_PTR_TO_MEM_SIZE:
11909 		{
11910 			struct bpf_reg_state *buff_reg = &regs[regno];
11911 			const struct btf_param *buff_arg = &args[i];
11912 			struct bpf_reg_state *size_reg = &regs[regno + 1];
11913 			const struct btf_param *size_arg = &args[i + 1];
11914 
11915 			if (!register_is_null(buff_reg) || !is_kfunc_arg_optional(meta->btf, buff_arg)) {
11916 				ret = check_kfunc_mem_size_reg(env, size_reg, regno + 1);
11917 				if (ret < 0) {
11918 					verbose(env, "arg#%d arg#%d memory, len pair leads to invalid memory access\n", i, i + 1);
11919 					return ret;
11920 				}
11921 			}
11922 
11923 			if (is_kfunc_arg_const_mem_size(meta->btf, size_arg, size_reg)) {
11924 				if (meta->arg_constant.found) {
11925 					verbose(env, "verifier internal error: only one constant argument permitted\n");
11926 					return -EFAULT;
11927 				}
11928 				if (!tnum_is_const(size_reg->var_off)) {
11929 					verbose(env, "R%d must be a known constant\n", regno + 1);
11930 					return -EINVAL;
11931 				}
11932 				meta->arg_constant.found = true;
11933 				meta->arg_constant.value = size_reg->var_off.value;
11934 			}
11935 
11936 			/* Skip next '__sz' or '__szk' argument */
11937 			i++;
11938 			break;
11939 		}
11940 		case KF_ARG_PTR_TO_CALLBACK:
11941 			if (reg->type != PTR_TO_FUNC) {
11942 				verbose(env, "arg%d expected pointer to func\n", i);
11943 				return -EINVAL;
11944 			}
11945 			meta->subprogno = reg->subprogno;
11946 			break;
11947 		case KF_ARG_PTR_TO_REFCOUNTED_KPTR:
11948 			if (!type_is_ptr_alloc_obj(reg->type)) {
11949 				verbose(env, "arg#%d is neither owning or non-owning ref\n", i);
11950 				return -EINVAL;
11951 			}
11952 			if (!type_is_non_owning_ref(reg->type))
11953 				meta->arg_owning_ref = true;
11954 
11955 			rec = reg_btf_record(reg);
11956 			if (!rec) {
11957 				verbose(env, "verifier internal error: Couldn't find btf_record\n");
11958 				return -EFAULT;
11959 			}
11960 
11961 			if (rec->refcount_off < 0) {
11962 				verbose(env, "arg#%d doesn't point to a type with bpf_refcount field\n", i);
11963 				return -EINVAL;
11964 			}
11965 
11966 			meta->arg_btf = reg->btf;
11967 			meta->arg_btf_id = reg->btf_id;
11968 			break;
11969 		case KF_ARG_PTR_TO_CONST_STR:
11970 			if (reg->type != PTR_TO_MAP_VALUE) {
11971 				verbose(env, "arg#%d doesn't point to a const string\n", i);
11972 				return -EINVAL;
11973 			}
11974 			ret = check_reg_const_str(env, reg, regno);
11975 			if (ret)
11976 				return ret;
11977 			break;
11978 		}
11979 	}
11980 
11981 	if (is_kfunc_release(meta) && !meta->release_regno) {
11982 		verbose(env, "release kernel function %s expects refcounted PTR_TO_BTF_ID\n",
11983 			func_name);
11984 		return -EINVAL;
11985 	}
11986 
11987 	return 0;
11988 }
11989 
11990 static int fetch_kfunc_meta(struct bpf_verifier_env *env,
11991 			    struct bpf_insn *insn,
11992 			    struct bpf_kfunc_call_arg_meta *meta,
11993 			    const char **kfunc_name)
11994 {
11995 	const struct btf_type *func, *func_proto;
11996 	u32 func_id, *kfunc_flags;
11997 	const char *func_name;
11998 	struct btf *desc_btf;
11999 
12000 	if (kfunc_name)
12001 		*kfunc_name = NULL;
12002 
12003 	if (!insn->imm)
12004 		return -EINVAL;
12005 
12006 	desc_btf = find_kfunc_desc_btf(env, insn->off);
12007 	if (IS_ERR(desc_btf))
12008 		return PTR_ERR(desc_btf);
12009 
12010 	func_id = insn->imm;
12011 	func = btf_type_by_id(desc_btf, func_id);
12012 	func_name = btf_name_by_offset(desc_btf, func->name_off);
12013 	if (kfunc_name)
12014 		*kfunc_name = func_name;
12015 	func_proto = btf_type_by_id(desc_btf, func->type);
12016 
12017 	kfunc_flags = btf_kfunc_id_set_contains(desc_btf, func_id, env->prog);
12018 	if (!kfunc_flags) {
12019 		return -EACCES;
12020 	}
12021 
12022 	memset(meta, 0, sizeof(*meta));
12023 	meta->btf = desc_btf;
12024 	meta->func_id = func_id;
12025 	meta->kfunc_flags = *kfunc_flags;
12026 	meta->func_proto = func_proto;
12027 	meta->func_name = func_name;
12028 
12029 	return 0;
12030 }
12031 
12032 static int check_return_code(struct bpf_verifier_env *env, int regno, const char *reg_name);
12033 
12034 static int check_kfunc_call(struct bpf_verifier_env *env, struct bpf_insn *insn,
12035 			    int *insn_idx_p)
12036 {
12037 	const struct btf_type *t, *ptr_type;
12038 	u32 i, nargs, ptr_type_id, release_ref_obj_id;
12039 	struct bpf_reg_state *regs = cur_regs(env);
12040 	const char *func_name, *ptr_type_name;
12041 	bool sleepable, rcu_lock, rcu_unlock;
12042 	struct bpf_kfunc_call_arg_meta meta;
12043 	struct bpf_insn_aux_data *insn_aux;
12044 	int err, insn_idx = *insn_idx_p;
12045 	const struct btf_param *args;
12046 	const struct btf_type *ret_t;
12047 	struct btf *desc_btf;
12048 
12049 	/* skip for now, but return error when we find this in fixup_kfunc_call */
12050 	if (!insn->imm)
12051 		return 0;
12052 
12053 	err = fetch_kfunc_meta(env, insn, &meta, &func_name);
12054 	if (err == -EACCES && func_name)
12055 		verbose(env, "calling kernel function %s is not allowed\n", func_name);
12056 	if (err)
12057 		return err;
12058 	desc_btf = meta.btf;
12059 	insn_aux = &env->insn_aux_data[insn_idx];
12060 
12061 	insn_aux->is_iter_next = is_iter_next_kfunc(&meta);
12062 
12063 	if (is_kfunc_destructive(&meta) && !capable(CAP_SYS_BOOT)) {
12064 		verbose(env, "destructive kfunc calls require CAP_SYS_BOOT capability\n");
12065 		return -EACCES;
12066 	}
12067 
12068 	sleepable = is_kfunc_sleepable(&meta);
12069 	if (sleepable && !in_sleepable(env)) {
12070 		verbose(env, "program must be sleepable to call sleepable kfunc %s\n", func_name);
12071 		return -EACCES;
12072 	}
12073 
12074 	/* Check the arguments */
12075 	err = check_kfunc_args(env, &meta, insn_idx);
12076 	if (err < 0)
12077 		return err;
12078 
12079 	if (meta.func_id == special_kfunc_list[KF_bpf_rbtree_add_impl]) {
12080 		err = push_callback_call(env, insn, insn_idx, meta.subprogno,
12081 					 set_rbtree_add_callback_state);
12082 		if (err) {
12083 			verbose(env, "kfunc %s#%d failed callback verification\n",
12084 				func_name, meta.func_id);
12085 			return err;
12086 		}
12087 	}
12088 
12089 	rcu_lock = is_kfunc_bpf_rcu_read_lock(&meta);
12090 	rcu_unlock = is_kfunc_bpf_rcu_read_unlock(&meta);
12091 
12092 	if (env->cur_state->active_rcu_lock) {
12093 		struct bpf_func_state *state;
12094 		struct bpf_reg_state *reg;
12095 		u32 clear_mask = (1 << STACK_SPILL) | (1 << STACK_ITER);
12096 
12097 		if (in_rbtree_lock_required_cb(env) && (rcu_lock || rcu_unlock)) {
12098 			verbose(env, "Calling bpf_rcu_read_{lock,unlock} in unnecessary rbtree callback\n");
12099 			return -EACCES;
12100 		}
12101 
12102 		if (rcu_lock) {
12103 			verbose(env, "nested rcu read lock (kernel function %s)\n", func_name);
12104 			return -EINVAL;
12105 		} else if (rcu_unlock) {
12106 			bpf_for_each_reg_in_vstate_mask(env->cur_state, state, reg, clear_mask, ({
12107 				if (reg->type & MEM_RCU) {
12108 					reg->type &= ~(MEM_RCU | PTR_MAYBE_NULL);
12109 					reg->type |= PTR_UNTRUSTED;
12110 				}
12111 			}));
12112 			env->cur_state->active_rcu_lock = false;
12113 		} else if (sleepable) {
12114 			verbose(env, "kernel func %s is sleepable within rcu_read_lock region\n", func_name);
12115 			return -EACCES;
12116 		}
12117 	} else if (rcu_lock) {
12118 		env->cur_state->active_rcu_lock = true;
12119 	} else if (rcu_unlock) {
12120 		verbose(env, "unmatched rcu read unlock (kernel function %s)\n", func_name);
12121 		return -EINVAL;
12122 	}
12123 
12124 	/* In case of release function, we get register number of refcounted
12125 	 * PTR_TO_BTF_ID in bpf_kfunc_arg_meta, do the release now.
12126 	 */
12127 	if (meta.release_regno) {
12128 		err = release_reference(env, regs[meta.release_regno].ref_obj_id);
12129 		if (err) {
12130 			verbose(env, "kfunc %s#%d reference has not been acquired before\n",
12131 				func_name, meta.func_id);
12132 			return err;
12133 		}
12134 	}
12135 
12136 	if (meta.func_id == special_kfunc_list[KF_bpf_list_push_front_impl] ||
12137 	    meta.func_id == special_kfunc_list[KF_bpf_list_push_back_impl] ||
12138 	    meta.func_id == special_kfunc_list[KF_bpf_rbtree_add_impl]) {
12139 		release_ref_obj_id = regs[BPF_REG_2].ref_obj_id;
12140 		insn_aux->insert_off = regs[BPF_REG_2].off;
12141 		insn_aux->kptr_struct_meta = btf_find_struct_meta(meta.arg_btf, meta.arg_btf_id);
12142 		err = ref_convert_owning_non_owning(env, release_ref_obj_id);
12143 		if (err) {
12144 			verbose(env, "kfunc %s#%d conversion of owning ref to non-owning failed\n",
12145 				func_name, meta.func_id);
12146 			return err;
12147 		}
12148 
12149 		err = release_reference(env, release_ref_obj_id);
12150 		if (err) {
12151 			verbose(env, "kfunc %s#%d reference has not been acquired before\n",
12152 				func_name, meta.func_id);
12153 			return err;
12154 		}
12155 	}
12156 
12157 	if (meta.func_id == special_kfunc_list[KF_bpf_throw]) {
12158 		if (!bpf_jit_supports_exceptions()) {
12159 			verbose(env, "JIT does not support calling kfunc %s#%d\n",
12160 				func_name, meta.func_id);
12161 			return -ENOTSUPP;
12162 		}
12163 		env->seen_exception = true;
12164 
12165 		/* In the case of the default callback, the cookie value passed
12166 		 * to bpf_throw becomes the return value of the program.
12167 		 */
12168 		if (!env->exception_callback_subprog) {
12169 			err = check_return_code(env, BPF_REG_1, "R1");
12170 			if (err < 0)
12171 				return err;
12172 		}
12173 	}
12174 
12175 	for (i = 0; i < CALLER_SAVED_REGS; i++)
12176 		mark_reg_not_init(env, regs, caller_saved[i]);
12177 
12178 	/* Check return type */
12179 	t = btf_type_skip_modifiers(desc_btf, meta.func_proto->type, NULL);
12180 
12181 	if (is_kfunc_acquire(&meta) && !btf_type_is_struct_ptr(meta.btf, t)) {
12182 		/* Only exception is bpf_obj_new_impl */
12183 		if (meta.btf != btf_vmlinux ||
12184 		    (meta.func_id != special_kfunc_list[KF_bpf_obj_new_impl] &&
12185 		     meta.func_id != special_kfunc_list[KF_bpf_percpu_obj_new_impl] &&
12186 		     meta.func_id != special_kfunc_list[KF_bpf_refcount_acquire_impl])) {
12187 			verbose(env, "acquire kernel function does not return PTR_TO_BTF_ID\n");
12188 			return -EINVAL;
12189 		}
12190 	}
12191 
12192 	if (btf_type_is_scalar(t)) {
12193 		mark_reg_unknown(env, regs, BPF_REG_0);
12194 		mark_btf_func_reg_size(env, BPF_REG_0, t->size);
12195 	} else if (btf_type_is_ptr(t)) {
12196 		ptr_type = btf_type_skip_modifiers(desc_btf, t->type, &ptr_type_id);
12197 
12198 		if (meta.btf == btf_vmlinux && btf_id_set_contains(&special_kfunc_set, meta.func_id)) {
12199 			if (meta.func_id == special_kfunc_list[KF_bpf_obj_new_impl] ||
12200 			    meta.func_id == special_kfunc_list[KF_bpf_percpu_obj_new_impl]) {
12201 				struct btf_struct_meta *struct_meta;
12202 				struct btf *ret_btf;
12203 				u32 ret_btf_id;
12204 
12205 				if (meta.func_id == special_kfunc_list[KF_bpf_obj_new_impl] && !bpf_global_ma_set)
12206 					return -ENOMEM;
12207 
12208 				if (((u64)(u32)meta.arg_constant.value) != meta.arg_constant.value) {
12209 					verbose(env, "local type ID argument must be in range [0, U32_MAX]\n");
12210 					return -EINVAL;
12211 				}
12212 
12213 				ret_btf = env->prog->aux->btf;
12214 				ret_btf_id = meta.arg_constant.value;
12215 
12216 				/* This may be NULL due to user not supplying a BTF */
12217 				if (!ret_btf) {
12218 					verbose(env, "bpf_obj_new/bpf_percpu_obj_new requires prog BTF\n");
12219 					return -EINVAL;
12220 				}
12221 
12222 				ret_t = btf_type_by_id(ret_btf, ret_btf_id);
12223 				if (!ret_t || !__btf_type_is_struct(ret_t)) {
12224 					verbose(env, "bpf_obj_new/bpf_percpu_obj_new type ID argument must be of a struct\n");
12225 					return -EINVAL;
12226 				}
12227 
12228 				if (meta.func_id == special_kfunc_list[KF_bpf_percpu_obj_new_impl]) {
12229 					if (ret_t->size > BPF_GLOBAL_PERCPU_MA_MAX_SIZE) {
12230 						verbose(env, "bpf_percpu_obj_new type size (%d) is greater than %d\n",
12231 							ret_t->size, BPF_GLOBAL_PERCPU_MA_MAX_SIZE);
12232 						return -EINVAL;
12233 					}
12234 
12235 					if (!bpf_global_percpu_ma_set) {
12236 						mutex_lock(&bpf_percpu_ma_lock);
12237 						if (!bpf_global_percpu_ma_set) {
12238 							/* Charge memory allocated with bpf_global_percpu_ma to
12239 							 * root memcg. The obj_cgroup for root memcg is NULL.
12240 							 */
12241 							err = bpf_mem_alloc_percpu_init(&bpf_global_percpu_ma, NULL);
12242 							if (!err)
12243 								bpf_global_percpu_ma_set = true;
12244 						}
12245 						mutex_unlock(&bpf_percpu_ma_lock);
12246 						if (err)
12247 							return err;
12248 					}
12249 
12250 					mutex_lock(&bpf_percpu_ma_lock);
12251 					err = bpf_mem_alloc_percpu_unit_init(&bpf_global_percpu_ma, ret_t->size);
12252 					mutex_unlock(&bpf_percpu_ma_lock);
12253 					if (err)
12254 						return err;
12255 				}
12256 
12257 				struct_meta = btf_find_struct_meta(ret_btf, ret_btf_id);
12258 				if (meta.func_id == special_kfunc_list[KF_bpf_percpu_obj_new_impl]) {
12259 					if (!__btf_type_is_scalar_struct(env, ret_btf, ret_t, 0)) {
12260 						verbose(env, "bpf_percpu_obj_new type ID argument must be of a struct of scalars\n");
12261 						return -EINVAL;
12262 					}
12263 
12264 					if (struct_meta) {
12265 						verbose(env, "bpf_percpu_obj_new type ID argument must not contain special fields\n");
12266 						return -EINVAL;
12267 					}
12268 				}
12269 
12270 				mark_reg_known_zero(env, regs, BPF_REG_0);
12271 				regs[BPF_REG_0].type = PTR_TO_BTF_ID | MEM_ALLOC;
12272 				regs[BPF_REG_0].btf = ret_btf;
12273 				regs[BPF_REG_0].btf_id = ret_btf_id;
12274 				if (meta.func_id == special_kfunc_list[KF_bpf_percpu_obj_new_impl])
12275 					regs[BPF_REG_0].type |= MEM_PERCPU;
12276 
12277 				insn_aux->obj_new_size = ret_t->size;
12278 				insn_aux->kptr_struct_meta = struct_meta;
12279 			} else if (meta.func_id == special_kfunc_list[KF_bpf_refcount_acquire_impl]) {
12280 				mark_reg_known_zero(env, regs, BPF_REG_0);
12281 				regs[BPF_REG_0].type = PTR_TO_BTF_ID | MEM_ALLOC;
12282 				regs[BPF_REG_0].btf = meta.arg_btf;
12283 				regs[BPF_REG_0].btf_id = meta.arg_btf_id;
12284 
12285 				insn_aux->kptr_struct_meta =
12286 					btf_find_struct_meta(meta.arg_btf,
12287 							     meta.arg_btf_id);
12288 			} else if (meta.func_id == special_kfunc_list[KF_bpf_list_pop_front] ||
12289 				   meta.func_id == special_kfunc_list[KF_bpf_list_pop_back]) {
12290 				struct btf_field *field = meta.arg_list_head.field;
12291 
12292 				mark_reg_graph_node(regs, BPF_REG_0, &field->graph_root);
12293 			} else if (meta.func_id == special_kfunc_list[KF_bpf_rbtree_remove] ||
12294 				   meta.func_id == special_kfunc_list[KF_bpf_rbtree_first]) {
12295 				struct btf_field *field = meta.arg_rbtree_root.field;
12296 
12297 				mark_reg_graph_node(regs, BPF_REG_0, &field->graph_root);
12298 			} else if (meta.func_id == special_kfunc_list[KF_bpf_cast_to_kern_ctx]) {
12299 				mark_reg_known_zero(env, regs, BPF_REG_0);
12300 				regs[BPF_REG_0].type = PTR_TO_BTF_ID | PTR_TRUSTED;
12301 				regs[BPF_REG_0].btf = desc_btf;
12302 				regs[BPF_REG_0].btf_id = meta.ret_btf_id;
12303 			} else if (meta.func_id == special_kfunc_list[KF_bpf_rdonly_cast]) {
12304 				ret_t = btf_type_by_id(desc_btf, meta.arg_constant.value);
12305 				if (!ret_t || !btf_type_is_struct(ret_t)) {
12306 					verbose(env,
12307 						"kfunc bpf_rdonly_cast type ID argument must be of a struct\n");
12308 					return -EINVAL;
12309 				}
12310 
12311 				mark_reg_known_zero(env, regs, BPF_REG_0);
12312 				regs[BPF_REG_0].type = PTR_TO_BTF_ID | PTR_UNTRUSTED;
12313 				regs[BPF_REG_0].btf = desc_btf;
12314 				regs[BPF_REG_0].btf_id = meta.arg_constant.value;
12315 			} else if (meta.func_id == special_kfunc_list[KF_bpf_dynptr_slice] ||
12316 				   meta.func_id == special_kfunc_list[KF_bpf_dynptr_slice_rdwr]) {
12317 				enum bpf_type_flag type_flag = get_dynptr_type_flag(meta.initialized_dynptr.type);
12318 
12319 				mark_reg_known_zero(env, regs, BPF_REG_0);
12320 
12321 				if (!meta.arg_constant.found) {
12322 					verbose(env, "verifier internal error: bpf_dynptr_slice(_rdwr) no constant size\n");
12323 					return -EFAULT;
12324 				}
12325 
12326 				regs[BPF_REG_0].mem_size = meta.arg_constant.value;
12327 
12328 				/* PTR_MAYBE_NULL will be added when is_kfunc_ret_null is checked */
12329 				regs[BPF_REG_0].type = PTR_TO_MEM | type_flag;
12330 
12331 				if (meta.func_id == special_kfunc_list[KF_bpf_dynptr_slice]) {
12332 					regs[BPF_REG_0].type |= MEM_RDONLY;
12333 				} else {
12334 					/* this will set env->seen_direct_write to true */
12335 					if (!may_access_direct_pkt_data(env, NULL, BPF_WRITE)) {
12336 						verbose(env, "the prog does not allow writes to packet data\n");
12337 						return -EINVAL;
12338 					}
12339 				}
12340 
12341 				if (!meta.initialized_dynptr.id) {
12342 					verbose(env, "verifier internal error: no dynptr id\n");
12343 					return -EFAULT;
12344 				}
12345 				regs[BPF_REG_0].dynptr_id = meta.initialized_dynptr.id;
12346 
12347 				/* we don't need to set BPF_REG_0's ref obj id
12348 				 * because packet slices are not refcounted (see
12349 				 * dynptr_type_refcounted)
12350 				 */
12351 			} else {
12352 				verbose(env, "kernel function %s unhandled dynamic return type\n",
12353 					meta.func_name);
12354 				return -EFAULT;
12355 			}
12356 		} else if (!__btf_type_is_struct(ptr_type)) {
12357 			if (!meta.r0_size) {
12358 				__u32 sz;
12359 
12360 				if (!IS_ERR(btf_resolve_size(desc_btf, ptr_type, &sz))) {
12361 					meta.r0_size = sz;
12362 					meta.r0_rdonly = true;
12363 				}
12364 			}
12365 			if (!meta.r0_size) {
12366 				ptr_type_name = btf_name_by_offset(desc_btf,
12367 								   ptr_type->name_off);
12368 				verbose(env,
12369 					"kernel function %s returns pointer type %s %s is not supported\n",
12370 					func_name,
12371 					btf_type_str(ptr_type),
12372 					ptr_type_name);
12373 				return -EINVAL;
12374 			}
12375 
12376 			mark_reg_known_zero(env, regs, BPF_REG_0);
12377 			regs[BPF_REG_0].type = PTR_TO_MEM;
12378 			regs[BPF_REG_0].mem_size = meta.r0_size;
12379 
12380 			if (meta.r0_rdonly)
12381 				regs[BPF_REG_0].type |= MEM_RDONLY;
12382 
12383 			/* Ensures we don't access the memory after a release_reference() */
12384 			if (meta.ref_obj_id)
12385 				regs[BPF_REG_0].ref_obj_id = meta.ref_obj_id;
12386 		} else {
12387 			mark_reg_known_zero(env, regs, BPF_REG_0);
12388 			regs[BPF_REG_0].btf = desc_btf;
12389 			regs[BPF_REG_0].type = PTR_TO_BTF_ID;
12390 			regs[BPF_REG_0].btf_id = ptr_type_id;
12391 		}
12392 
12393 		if (is_kfunc_ret_null(&meta)) {
12394 			regs[BPF_REG_0].type |= PTR_MAYBE_NULL;
12395 			/* For mark_ptr_or_null_reg, see 93c230e3f5bd6 */
12396 			regs[BPF_REG_0].id = ++env->id_gen;
12397 		}
12398 		mark_btf_func_reg_size(env, BPF_REG_0, sizeof(void *));
12399 		if (is_kfunc_acquire(&meta)) {
12400 			int id = acquire_reference_state(env, insn_idx);
12401 
12402 			if (id < 0)
12403 				return id;
12404 			if (is_kfunc_ret_null(&meta))
12405 				regs[BPF_REG_0].id = id;
12406 			regs[BPF_REG_0].ref_obj_id = id;
12407 		} else if (meta.func_id == special_kfunc_list[KF_bpf_rbtree_first]) {
12408 			ref_set_non_owning(env, &regs[BPF_REG_0]);
12409 		}
12410 
12411 		if (reg_may_point_to_spin_lock(&regs[BPF_REG_0]) && !regs[BPF_REG_0].id)
12412 			regs[BPF_REG_0].id = ++env->id_gen;
12413 	} else if (btf_type_is_void(t)) {
12414 		if (meta.btf == btf_vmlinux && btf_id_set_contains(&special_kfunc_set, meta.func_id)) {
12415 			if (meta.func_id == special_kfunc_list[KF_bpf_obj_drop_impl] ||
12416 			    meta.func_id == special_kfunc_list[KF_bpf_percpu_obj_drop_impl]) {
12417 				insn_aux->kptr_struct_meta =
12418 					btf_find_struct_meta(meta.arg_btf,
12419 							     meta.arg_btf_id);
12420 			}
12421 		}
12422 	}
12423 
12424 	nargs = btf_type_vlen(meta.func_proto);
12425 	args = (const struct btf_param *)(meta.func_proto + 1);
12426 	for (i = 0; i < nargs; i++) {
12427 		u32 regno = i + 1;
12428 
12429 		t = btf_type_skip_modifiers(desc_btf, args[i].type, NULL);
12430 		if (btf_type_is_ptr(t))
12431 			mark_btf_func_reg_size(env, regno, sizeof(void *));
12432 		else
12433 			/* scalar. ensured by btf_check_kfunc_arg_match() */
12434 			mark_btf_func_reg_size(env, regno, t->size);
12435 	}
12436 
12437 	if (is_iter_next_kfunc(&meta)) {
12438 		err = process_iter_next_call(env, insn_idx, &meta);
12439 		if (err)
12440 			return err;
12441 	}
12442 
12443 	return 0;
12444 }
12445 
12446 static bool signed_add_overflows(s64 a, s64 b)
12447 {
12448 	/* Do the add in u64, where overflow is well-defined */
12449 	s64 res = (s64)((u64)a + (u64)b);
12450 
12451 	if (b < 0)
12452 		return res > a;
12453 	return res < a;
12454 }
12455 
12456 static bool signed_add32_overflows(s32 a, s32 b)
12457 {
12458 	/* Do the add in u32, where overflow is well-defined */
12459 	s32 res = (s32)((u32)a + (u32)b);
12460 
12461 	if (b < 0)
12462 		return res > a;
12463 	return res < a;
12464 }
12465 
12466 static bool signed_sub_overflows(s64 a, s64 b)
12467 {
12468 	/* Do the sub in u64, where overflow is well-defined */
12469 	s64 res = (s64)((u64)a - (u64)b);
12470 
12471 	if (b < 0)
12472 		return res < a;
12473 	return res > a;
12474 }
12475 
12476 static bool signed_sub32_overflows(s32 a, s32 b)
12477 {
12478 	/* Do the sub in u32, where overflow is well-defined */
12479 	s32 res = (s32)((u32)a - (u32)b);
12480 
12481 	if (b < 0)
12482 		return res < a;
12483 	return res > a;
12484 }
12485 
12486 static bool check_reg_sane_offset(struct bpf_verifier_env *env,
12487 				  const struct bpf_reg_state *reg,
12488 				  enum bpf_reg_type type)
12489 {
12490 	bool known = tnum_is_const(reg->var_off);
12491 	s64 val = reg->var_off.value;
12492 	s64 smin = reg->smin_value;
12493 
12494 	if (known && (val >= BPF_MAX_VAR_OFF || val <= -BPF_MAX_VAR_OFF)) {
12495 		verbose(env, "math between %s pointer and %lld is not allowed\n",
12496 			reg_type_str(env, type), val);
12497 		return false;
12498 	}
12499 
12500 	if (reg->off >= BPF_MAX_VAR_OFF || reg->off <= -BPF_MAX_VAR_OFF) {
12501 		verbose(env, "%s pointer offset %d is not allowed\n",
12502 			reg_type_str(env, type), reg->off);
12503 		return false;
12504 	}
12505 
12506 	if (smin == S64_MIN) {
12507 		verbose(env, "math between %s pointer and register with unbounded min value is not allowed\n",
12508 			reg_type_str(env, type));
12509 		return false;
12510 	}
12511 
12512 	if (smin >= BPF_MAX_VAR_OFF || smin <= -BPF_MAX_VAR_OFF) {
12513 		verbose(env, "value %lld makes %s pointer be out of bounds\n",
12514 			smin, reg_type_str(env, type));
12515 		return false;
12516 	}
12517 
12518 	return true;
12519 }
12520 
12521 enum {
12522 	REASON_BOUNDS	= -1,
12523 	REASON_TYPE	= -2,
12524 	REASON_PATHS	= -3,
12525 	REASON_LIMIT	= -4,
12526 	REASON_STACK	= -5,
12527 };
12528 
12529 static int retrieve_ptr_limit(const struct bpf_reg_state *ptr_reg,
12530 			      u32 *alu_limit, bool mask_to_left)
12531 {
12532 	u32 max = 0, ptr_limit = 0;
12533 
12534 	switch (ptr_reg->type) {
12535 	case PTR_TO_STACK:
12536 		/* Offset 0 is out-of-bounds, but acceptable start for the
12537 		 * left direction, see BPF_REG_FP. Also, unknown scalar
12538 		 * offset where we would need to deal with min/max bounds is
12539 		 * currently prohibited for unprivileged.
12540 		 */
12541 		max = MAX_BPF_STACK + mask_to_left;
12542 		ptr_limit = -(ptr_reg->var_off.value + ptr_reg->off);
12543 		break;
12544 	case PTR_TO_MAP_VALUE:
12545 		max = ptr_reg->map_ptr->value_size;
12546 		ptr_limit = (mask_to_left ?
12547 			     ptr_reg->smin_value :
12548 			     ptr_reg->umax_value) + ptr_reg->off;
12549 		break;
12550 	default:
12551 		return REASON_TYPE;
12552 	}
12553 
12554 	if (ptr_limit >= max)
12555 		return REASON_LIMIT;
12556 	*alu_limit = ptr_limit;
12557 	return 0;
12558 }
12559 
12560 static bool can_skip_alu_sanitation(const struct bpf_verifier_env *env,
12561 				    const struct bpf_insn *insn)
12562 {
12563 	return env->bypass_spec_v1 || BPF_SRC(insn->code) == BPF_K;
12564 }
12565 
12566 static int update_alu_sanitation_state(struct bpf_insn_aux_data *aux,
12567 				       u32 alu_state, u32 alu_limit)
12568 {
12569 	/* If we arrived here from different branches with different
12570 	 * state or limits to sanitize, then this won't work.
12571 	 */
12572 	if (aux->alu_state &&
12573 	    (aux->alu_state != alu_state ||
12574 	     aux->alu_limit != alu_limit))
12575 		return REASON_PATHS;
12576 
12577 	/* Corresponding fixup done in do_misc_fixups(). */
12578 	aux->alu_state = alu_state;
12579 	aux->alu_limit = alu_limit;
12580 	return 0;
12581 }
12582 
12583 static int sanitize_val_alu(struct bpf_verifier_env *env,
12584 			    struct bpf_insn *insn)
12585 {
12586 	struct bpf_insn_aux_data *aux = cur_aux(env);
12587 
12588 	if (can_skip_alu_sanitation(env, insn))
12589 		return 0;
12590 
12591 	return update_alu_sanitation_state(aux, BPF_ALU_NON_POINTER, 0);
12592 }
12593 
12594 static bool sanitize_needed(u8 opcode)
12595 {
12596 	return opcode == BPF_ADD || opcode == BPF_SUB;
12597 }
12598 
12599 struct bpf_sanitize_info {
12600 	struct bpf_insn_aux_data aux;
12601 	bool mask_to_left;
12602 };
12603 
12604 static struct bpf_verifier_state *
12605 sanitize_speculative_path(struct bpf_verifier_env *env,
12606 			  const struct bpf_insn *insn,
12607 			  u32 next_idx, u32 curr_idx)
12608 {
12609 	struct bpf_verifier_state *branch;
12610 	struct bpf_reg_state *regs;
12611 
12612 	branch = push_stack(env, next_idx, curr_idx, true);
12613 	if (branch && insn) {
12614 		regs = branch->frame[branch->curframe]->regs;
12615 		if (BPF_SRC(insn->code) == BPF_K) {
12616 			mark_reg_unknown(env, regs, insn->dst_reg);
12617 		} else if (BPF_SRC(insn->code) == BPF_X) {
12618 			mark_reg_unknown(env, regs, insn->dst_reg);
12619 			mark_reg_unknown(env, regs, insn->src_reg);
12620 		}
12621 	}
12622 	return branch;
12623 }
12624 
12625 static int sanitize_ptr_alu(struct bpf_verifier_env *env,
12626 			    struct bpf_insn *insn,
12627 			    const struct bpf_reg_state *ptr_reg,
12628 			    const struct bpf_reg_state *off_reg,
12629 			    struct bpf_reg_state *dst_reg,
12630 			    struct bpf_sanitize_info *info,
12631 			    const bool commit_window)
12632 {
12633 	struct bpf_insn_aux_data *aux = commit_window ? cur_aux(env) : &info->aux;
12634 	struct bpf_verifier_state *vstate = env->cur_state;
12635 	bool off_is_imm = tnum_is_const(off_reg->var_off);
12636 	bool off_is_neg = off_reg->smin_value < 0;
12637 	bool ptr_is_dst_reg = ptr_reg == dst_reg;
12638 	u8 opcode = BPF_OP(insn->code);
12639 	u32 alu_state, alu_limit;
12640 	struct bpf_reg_state tmp;
12641 	bool ret;
12642 	int err;
12643 
12644 	if (can_skip_alu_sanitation(env, insn))
12645 		return 0;
12646 
12647 	/* We already marked aux for masking from non-speculative
12648 	 * paths, thus we got here in the first place. We only care
12649 	 * to explore bad access from here.
12650 	 */
12651 	if (vstate->speculative)
12652 		goto do_sim;
12653 
12654 	if (!commit_window) {
12655 		if (!tnum_is_const(off_reg->var_off) &&
12656 		    (off_reg->smin_value < 0) != (off_reg->smax_value < 0))
12657 			return REASON_BOUNDS;
12658 
12659 		info->mask_to_left = (opcode == BPF_ADD &&  off_is_neg) ||
12660 				     (opcode == BPF_SUB && !off_is_neg);
12661 	}
12662 
12663 	err = retrieve_ptr_limit(ptr_reg, &alu_limit, info->mask_to_left);
12664 	if (err < 0)
12665 		return err;
12666 
12667 	if (commit_window) {
12668 		/* In commit phase we narrow the masking window based on
12669 		 * the observed pointer move after the simulated operation.
12670 		 */
12671 		alu_state = info->aux.alu_state;
12672 		alu_limit = abs(info->aux.alu_limit - alu_limit);
12673 	} else {
12674 		alu_state  = off_is_neg ? BPF_ALU_NEG_VALUE : 0;
12675 		alu_state |= off_is_imm ? BPF_ALU_IMMEDIATE : 0;
12676 		alu_state |= ptr_is_dst_reg ?
12677 			     BPF_ALU_SANITIZE_SRC : BPF_ALU_SANITIZE_DST;
12678 
12679 		/* Limit pruning on unknown scalars to enable deep search for
12680 		 * potential masking differences from other program paths.
12681 		 */
12682 		if (!off_is_imm)
12683 			env->explore_alu_limits = true;
12684 	}
12685 
12686 	err = update_alu_sanitation_state(aux, alu_state, alu_limit);
12687 	if (err < 0)
12688 		return err;
12689 do_sim:
12690 	/* If we're in commit phase, we're done here given we already
12691 	 * pushed the truncated dst_reg into the speculative verification
12692 	 * stack.
12693 	 *
12694 	 * Also, when register is a known constant, we rewrite register-based
12695 	 * operation to immediate-based, and thus do not need masking (and as
12696 	 * a consequence, do not need to simulate the zero-truncation either).
12697 	 */
12698 	if (commit_window || off_is_imm)
12699 		return 0;
12700 
12701 	/* Simulate and find potential out-of-bounds access under
12702 	 * speculative execution from truncation as a result of
12703 	 * masking when off was not within expected range. If off
12704 	 * sits in dst, then we temporarily need to move ptr there
12705 	 * to simulate dst (== 0) +/-= ptr. Needed, for example,
12706 	 * for cases where we use K-based arithmetic in one direction
12707 	 * and truncated reg-based in the other in order to explore
12708 	 * bad access.
12709 	 */
12710 	if (!ptr_is_dst_reg) {
12711 		tmp = *dst_reg;
12712 		copy_register_state(dst_reg, ptr_reg);
12713 	}
12714 	ret = sanitize_speculative_path(env, NULL, env->insn_idx + 1,
12715 					env->insn_idx);
12716 	if (!ptr_is_dst_reg && ret)
12717 		*dst_reg = tmp;
12718 	return !ret ? REASON_STACK : 0;
12719 }
12720 
12721 static void sanitize_mark_insn_seen(struct bpf_verifier_env *env)
12722 {
12723 	struct bpf_verifier_state *vstate = env->cur_state;
12724 
12725 	/* If we simulate paths under speculation, we don't update the
12726 	 * insn as 'seen' such that when we verify unreachable paths in
12727 	 * the non-speculative domain, sanitize_dead_code() can still
12728 	 * rewrite/sanitize them.
12729 	 */
12730 	if (!vstate->speculative)
12731 		env->insn_aux_data[env->insn_idx].seen = env->pass_cnt;
12732 }
12733 
12734 static int sanitize_err(struct bpf_verifier_env *env,
12735 			const struct bpf_insn *insn, int reason,
12736 			const struct bpf_reg_state *off_reg,
12737 			const struct bpf_reg_state *dst_reg)
12738 {
12739 	static const char *err = "pointer arithmetic with it prohibited for !root";
12740 	const char *op = BPF_OP(insn->code) == BPF_ADD ? "add" : "sub";
12741 	u32 dst = insn->dst_reg, src = insn->src_reg;
12742 
12743 	switch (reason) {
12744 	case REASON_BOUNDS:
12745 		verbose(env, "R%d has unknown scalar with mixed signed bounds, %s\n",
12746 			off_reg == dst_reg ? dst : src, err);
12747 		break;
12748 	case REASON_TYPE:
12749 		verbose(env, "R%d has pointer with unsupported alu operation, %s\n",
12750 			off_reg == dst_reg ? src : dst, err);
12751 		break;
12752 	case REASON_PATHS:
12753 		verbose(env, "R%d tried to %s from different maps, paths or scalars, %s\n",
12754 			dst, op, err);
12755 		break;
12756 	case REASON_LIMIT:
12757 		verbose(env, "R%d tried to %s beyond pointer bounds, %s\n",
12758 			dst, op, err);
12759 		break;
12760 	case REASON_STACK:
12761 		verbose(env, "R%d could not be pushed for speculative verification, %s\n",
12762 			dst, err);
12763 		break;
12764 	default:
12765 		verbose(env, "verifier internal error: unknown reason (%d)\n",
12766 			reason);
12767 		break;
12768 	}
12769 
12770 	return -EACCES;
12771 }
12772 
12773 /* check that stack access falls within stack limits and that 'reg' doesn't
12774  * have a variable offset.
12775  *
12776  * Variable offset is prohibited for unprivileged mode for simplicity since it
12777  * requires corresponding support in Spectre masking for stack ALU.  See also
12778  * retrieve_ptr_limit().
12779  *
12780  *
12781  * 'off' includes 'reg->off'.
12782  */
12783 static int check_stack_access_for_ptr_arithmetic(
12784 				struct bpf_verifier_env *env,
12785 				int regno,
12786 				const struct bpf_reg_state *reg,
12787 				int off)
12788 {
12789 	if (!tnum_is_const(reg->var_off)) {
12790 		char tn_buf[48];
12791 
12792 		tnum_strn(tn_buf, sizeof(tn_buf), reg->var_off);
12793 		verbose(env, "R%d variable stack access prohibited for !root, var_off=%s off=%d\n",
12794 			regno, tn_buf, off);
12795 		return -EACCES;
12796 	}
12797 
12798 	if (off >= 0 || off < -MAX_BPF_STACK) {
12799 		verbose(env, "R%d stack pointer arithmetic goes out of range, "
12800 			"prohibited for !root; off=%d\n", regno, off);
12801 		return -EACCES;
12802 	}
12803 
12804 	return 0;
12805 }
12806 
12807 static int sanitize_check_bounds(struct bpf_verifier_env *env,
12808 				 const struct bpf_insn *insn,
12809 				 const struct bpf_reg_state *dst_reg)
12810 {
12811 	u32 dst = insn->dst_reg;
12812 
12813 	/* For unprivileged we require that resulting offset must be in bounds
12814 	 * in order to be able to sanitize access later on.
12815 	 */
12816 	if (env->bypass_spec_v1)
12817 		return 0;
12818 
12819 	switch (dst_reg->type) {
12820 	case PTR_TO_STACK:
12821 		if (check_stack_access_for_ptr_arithmetic(env, dst, dst_reg,
12822 					dst_reg->off + dst_reg->var_off.value))
12823 			return -EACCES;
12824 		break;
12825 	case PTR_TO_MAP_VALUE:
12826 		if (check_map_access(env, dst, dst_reg->off, 1, false, ACCESS_HELPER)) {
12827 			verbose(env, "R%d pointer arithmetic of map value goes out of range, "
12828 				"prohibited for !root\n", dst);
12829 			return -EACCES;
12830 		}
12831 		break;
12832 	default:
12833 		break;
12834 	}
12835 
12836 	return 0;
12837 }
12838 
12839 /* Handles arithmetic on a pointer and a scalar: computes new min/max and var_off.
12840  * Caller should also handle BPF_MOV case separately.
12841  * If we return -EACCES, caller may want to try again treating pointer as a
12842  * scalar.  So we only emit a diagnostic if !env->allow_ptr_leaks.
12843  */
12844 static int adjust_ptr_min_max_vals(struct bpf_verifier_env *env,
12845 				   struct bpf_insn *insn,
12846 				   const struct bpf_reg_state *ptr_reg,
12847 				   const struct bpf_reg_state *off_reg)
12848 {
12849 	struct bpf_verifier_state *vstate = env->cur_state;
12850 	struct bpf_func_state *state = vstate->frame[vstate->curframe];
12851 	struct bpf_reg_state *regs = state->regs, *dst_reg;
12852 	bool known = tnum_is_const(off_reg->var_off);
12853 	s64 smin_val = off_reg->smin_value, smax_val = off_reg->smax_value,
12854 	    smin_ptr = ptr_reg->smin_value, smax_ptr = ptr_reg->smax_value;
12855 	u64 umin_val = off_reg->umin_value, umax_val = off_reg->umax_value,
12856 	    umin_ptr = ptr_reg->umin_value, umax_ptr = ptr_reg->umax_value;
12857 	struct bpf_sanitize_info info = {};
12858 	u8 opcode = BPF_OP(insn->code);
12859 	u32 dst = insn->dst_reg;
12860 	int ret;
12861 
12862 	dst_reg = &regs[dst];
12863 
12864 	if ((known && (smin_val != smax_val || umin_val != umax_val)) ||
12865 	    smin_val > smax_val || umin_val > umax_val) {
12866 		/* Taint dst register if offset had invalid bounds derived from
12867 		 * e.g. dead branches.
12868 		 */
12869 		__mark_reg_unknown(env, dst_reg);
12870 		return 0;
12871 	}
12872 
12873 	if (BPF_CLASS(insn->code) != BPF_ALU64) {
12874 		/* 32-bit ALU ops on pointers produce (meaningless) scalars */
12875 		if (opcode == BPF_SUB && env->allow_ptr_leaks) {
12876 			__mark_reg_unknown(env, dst_reg);
12877 			return 0;
12878 		}
12879 
12880 		verbose(env,
12881 			"R%d 32-bit pointer arithmetic prohibited\n",
12882 			dst);
12883 		return -EACCES;
12884 	}
12885 
12886 	if (ptr_reg->type & PTR_MAYBE_NULL) {
12887 		verbose(env, "R%d pointer arithmetic on %s prohibited, null-check it first\n",
12888 			dst, reg_type_str(env, ptr_reg->type));
12889 		return -EACCES;
12890 	}
12891 
12892 	switch (base_type(ptr_reg->type)) {
12893 	case PTR_TO_CTX:
12894 	case PTR_TO_MAP_VALUE:
12895 	case PTR_TO_MAP_KEY:
12896 	case PTR_TO_STACK:
12897 	case PTR_TO_PACKET_META:
12898 	case PTR_TO_PACKET:
12899 	case PTR_TO_TP_BUFFER:
12900 	case PTR_TO_BTF_ID:
12901 	case PTR_TO_MEM:
12902 	case PTR_TO_BUF:
12903 	case PTR_TO_FUNC:
12904 	case CONST_PTR_TO_DYNPTR:
12905 		break;
12906 	case PTR_TO_FLOW_KEYS:
12907 		if (known)
12908 			break;
12909 		fallthrough;
12910 	case CONST_PTR_TO_MAP:
12911 		/* smin_val represents the known value */
12912 		if (known && smin_val == 0 && opcode == BPF_ADD)
12913 			break;
12914 		fallthrough;
12915 	default:
12916 		verbose(env, "R%d pointer arithmetic on %s prohibited\n",
12917 			dst, reg_type_str(env, ptr_reg->type));
12918 		return -EACCES;
12919 	}
12920 
12921 	/* In case of 'scalar += pointer', dst_reg inherits pointer type and id.
12922 	 * The id may be overwritten later if we create a new variable offset.
12923 	 */
12924 	dst_reg->type = ptr_reg->type;
12925 	dst_reg->id = ptr_reg->id;
12926 
12927 	if (!check_reg_sane_offset(env, off_reg, ptr_reg->type) ||
12928 	    !check_reg_sane_offset(env, ptr_reg, ptr_reg->type))
12929 		return -EINVAL;
12930 
12931 	/* pointer types do not carry 32-bit bounds at the moment. */
12932 	__mark_reg32_unbounded(dst_reg);
12933 
12934 	if (sanitize_needed(opcode)) {
12935 		ret = sanitize_ptr_alu(env, insn, ptr_reg, off_reg, dst_reg,
12936 				       &info, false);
12937 		if (ret < 0)
12938 			return sanitize_err(env, insn, ret, off_reg, dst_reg);
12939 	}
12940 
12941 	switch (opcode) {
12942 	case BPF_ADD:
12943 		/* We can take a fixed offset as long as it doesn't overflow
12944 		 * the s32 'off' field
12945 		 */
12946 		if (known && (ptr_reg->off + smin_val ==
12947 			      (s64)(s32)(ptr_reg->off + smin_val))) {
12948 			/* pointer += K.  Accumulate it into fixed offset */
12949 			dst_reg->smin_value = smin_ptr;
12950 			dst_reg->smax_value = smax_ptr;
12951 			dst_reg->umin_value = umin_ptr;
12952 			dst_reg->umax_value = umax_ptr;
12953 			dst_reg->var_off = ptr_reg->var_off;
12954 			dst_reg->off = ptr_reg->off + smin_val;
12955 			dst_reg->raw = ptr_reg->raw;
12956 			break;
12957 		}
12958 		/* A new variable offset is created.  Note that off_reg->off
12959 		 * == 0, since it's a scalar.
12960 		 * dst_reg gets the pointer type and since some positive
12961 		 * integer value was added to the pointer, give it a new 'id'
12962 		 * if it's a PTR_TO_PACKET.
12963 		 * this creates a new 'base' pointer, off_reg (variable) gets
12964 		 * added into the variable offset, and we copy the fixed offset
12965 		 * from ptr_reg.
12966 		 */
12967 		if (signed_add_overflows(smin_ptr, smin_val) ||
12968 		    signed_add_overflows(smax_ptr, smax_val)) {
12969 			dst_reg->smin_value = S64_MIN;
12970 			dst_reg->smax_value = S64_MAX;
12971 		} else {
12972 			dst_reg->smin_value = smin_ptr + smin_val;
12973 			dst_reg->smax_value = smax_ptr + smax_val;
12974 		}
12975 		if (umin_ptr + umin_val < umin_ptr ||
12976 		    umax_ptr + umax_val < umax_ptr) {
12977 			dst_reg->umin_value = 0;
12978 			dst_reg->umax_value = U64_MAX;
12979 		} else {
12980 			dst_reg->umin_value = umin_ptr + umin_val;
12981 			dst_reg->umax_value = umax_ptr + umax_val;
12982 		}
12983 		dst_reg->var_off = tnum_add(ptr_reg->var_off, off_reg->var_off);
12984 		dst_reg->off = ptr_reg->off;
12985 		dst_reg->raw = ptr_reg->raw;
12986 		if (reg_is_pkt_pointer(ptr_reg)) {
12987 			dst_reg->id = ++env->id_gen;
12988 			/* something was added to pkt_ptr, set range to zero */
12989 			memset(&dst_reg->raw, 0, sizeof(dst_reg->raw));
12990 		}
12991 		break;
12992 	case BPF_SUB:
12993 		if (dst_reg == off_reg) {
12994 			/* scalar -= pointer.  Creates an unknown scalar */
12995 			verbose(env, "R%d tried to subtract pointer from scalar\n",
12996 				dst);
12997 			return -EACCES;
12998 		}
12999 		/* We don't allow subtraction from FP, because (according to
13000 		 * test_verifier.c test "invalid fp arithmetic", JITs might not
13001 		 * be able to deal with it.
13002 		 */
13003 		if (ptr_reg->type == PTR_TO_STACK) {
13004 			verbose(env, "R%d subtraction from stack pointer prohibited\n",
13005 				dst);
13006 			return -EACCES;
13007 		}
13008 		if (known && (ptr_reg->off - smin_val ==
13009 			      (s64)(s32)(ptr_reg->off - smin_val))) {
13010 			/* pointer -= K.  Subtract it from fixed offset */
13011 			dst_reg->smin_value = smin_ptr;
13012 			dst_reg->smax_value = smax_ptr;
13013 			dst_reg->umin_value = umin_ptr;
13014 			dst_reg->umax_value = umax_ptr;
13015 			dst_reg->var_off = ptr_reg->var_off;
13016 			dst_reg->id = ptr_reg->id;
13017 			dst_reg->off = ptr_reg->off - smin_val;
13018 			dst_reg->raw = ptr_reg->raw;
13019 			break;
13020 		}
13021 		/* A new variable offset is created.  If the subtrahend is known
13022 		 * nonnegative, then any reg->range we had before is still good.
13023 		 */
13024 		if (signed_sub_overflows(smin_ptr, smax_val) ||
13025 		    signed_sub_overflows(smax_ptr, smin_val)) {
13026 			/* Overflow possible, we know nothing */
13027 			dst_reg->smin_value = S64_MIN;
13028 			dst_reg->smax_value = S64_MAX;
13029 		} else {
13030 			dst_reg->smin_value = smin_ptr - smax_val;
13031 			dst_reg->smax_value = smax_ptr - smin_val;
13032 		}
13033 		if (umin_ptr < umax_val) {
13034 			/* Overflow possible, we know nothing */
13035 			dst_reg->umin_value = 0;
13036 			dst_reg->umax_value = U64_MAX;
13037 		} else {
13038 			/* Cannot overflow (as long as bounds are consistent) */
13039 			dst_reg->umin_value = umin_ptr - umax_val;
13040 			dst_reg->umax_value = umax_ptr - umin_val;
13041 		}
13042 		dst_reg->var_off = tnum_sub(ptr_reg->var_off, off_reg->var_off);
13043 		dst_reg->off = ptr_reg->off;
13044 		dst_reg->raw = ptr_reg->raw;
13045 		if (reg_is_pkt_pointer(ptr_reg)) {
13046 			dst_reg->id = ++env->id_gen;
13047 			/* something was added to pkt_ptr, set range to zero */
13048 			if (smin_val < 0)
13049 				memset(&dst_reg->raw, 0, sizeof(dst_reg->raw));
13050 		}
13051 		break;
13052 	case BPF_AND:
13053 	case BPF_OR:
13054 	case BPF_XOR:
13055 		/* bitwise ops on pointers are troublesome, prohibit. */
13056 		verbose(env, "R%d bitwise operator %s on pointer prohibited\n",
13057 			dst, bpf_alu_string[opcode >> 4]);
13058 		return -EACCES;
13059 	default:
13060 		/* other operators (e.g. MUL,LSH) produce non-pointer results */
13061 		verbose(env, "R%d pointer arithmetic with %s operator prohibited\n",
13062 			dst, bpf_alu_string[opcode >> 4]);
13063 		return -EACCES;
13064 	}
13065 
13066 	if (!check_reg_sane_offset(env, dst_reg, ptr_reg->type))
13067 		return -EINVAL;
13068 	reg_bounds_sync(dst_reg);
13069 	if (sanitize_check_bounds(env, insn, dst_reg) < 0)
13070 		return -EACCES;
13071 	if (sanitize_needed(opcode)) {
13072 		ret = sanitize_ptr_alu(env, insn, dst_reg, off_reg, dst_reg,
13073 				       &info, true);
13074 		if (ret < 0)
13075 			return sanitize_err(env, insn, ret, off_reg, dst_reg);
13076 	}
13077 
13078 	return 0;
13079 }
13080 
13081 static void scalar32_min_max_add(struct bpf_reg_state *dst_reg,
13082 				 struct bpf_reg_state *src_reg)
13083 {
13084 	s32 smin_val = src_reg->s32_min_value;
13085 	s32 smax_val = src_reg->s32_max_value;
13086 	u32 umin_val = src_reg->u32_min_value;
13087 	u32 umax_val = src_reg->u32_max_value;
13088 
13089 	if (signed_add32_overflows(dst_reg->s32_min_value, smin_val) ||
13090 	    signed_add32_overflows(dst_reg->s32_max_value, smax_val)) {
13091 		dst_reg->s32_min_value = S32_MIN;
13092 		dst_reg->s32_max_value = S32_MAX;
13093 	} else {
13094 		dst_reg->s32_min_value += smin_val;
13095 		dst_reg->s32_max_value += smax_val;
13096 	}
13097 	if (dst_reg->u32_min_value + umin_val < umin_val ||
13098 	    dst_reg->u32_max_value + umax_val < umax_val) {
13099 		dst_reg->u32_min_value = 0;
13100 		dst_reg->u32_max_value = U32_MAX;
13101 	} else {
13102 		dst_reg->u32_min_value += umin_val;
13103 		dst_reg->u32_max_value += umax_val;
13104 	}
13105 }
13106 
13107 static void scalar_min_max_add(struct bpf_reg_state *dst_reg,
13108 			       struct bpf_reg_state *src_reg)
13109 {
13110 	s64 smin_val = src_reg->smin_value;
13111 	s64 smax_val = src_reg->smax_value;
13112 	u64 umin_val = src_reg->umin_value;
13113 	u64 umax_val = src_reg->umax_value;
13114 
13115 	if (signed_add_overflows(dst_reg->smin_value, smin_val) ||
13116 	    signed_add_overflows(dst_reg->smax_value, smax_val)) {
13117 		dst_reg->smin_value = S64_MIN;
13118 		dst_reg->smax_value = S64_MAX;
13119 	} else {
13120 		dst_reg->smin_value += smin_val;
13121 		dst_reg->smax_value += smax_val;
13122 	}
13123 	if (dst_reg->umin_value + umin_val < umin_val ||
13124 	    dst_reg->umax_value + umax_val < umax_val) {
13125 		dst_reg->umin_value = 0;
13126 		dst_reg->umax_value = U64_MAX;
13127 	} else {
13128 		dst_reg->umin_value += umin_val;
13129 		dst_reg->umax_value += umax_val;
13130 	}
13131 }
13132 
13133 static void scalar32_min_max_sub(struct bpf_reg_state *dst_reg,
13134 				 struct bpf_reg_state *src_reg)
13135 {
13136 	s32 smin_val = src_reg->s32_min_value;
13137 	s32 smax_val = src_reg->s32_max_value;
13138 	u32 umin_val = src_reg->u32_min_value;
13139 	u32 umax_val = src_reg->u32_max_value;
13140 
13141 	if (signed_sub32_overflows(dst_reg->s32_min_value, smax_val) ||
13142 	    signed_sub32_overflows(dst_reg->s32_max_value, smin_val)) {
13143 		/* Overflow possible, we know nothing */
13144 		dst_reg->s32_min_value = S32_MIN;
13145 		dst_reg->s32_max_value = S32_MAX;
13146 	} else {
13147 		dst_reg->s32_min_value -= smax_val;
13148 		dst_reg->s32_max_value -= smin_val;
13149 	}
13150 	if (dst_reg->u32_min_value < umax_val) {
13151 		/* Overflow possible, we know nothing */
13152 		dst_reg->u32_min_value = 0;
13153 		dst_reg->u32_max_value = U32_MAX;
13154 	} else {
13155 		/* Cannot overflow (as long as bounds are consistent) */
13156 		dst_reg->u32_min_value -= umax_val;
13157 		dst_reg->u32_max_value -= umin_val;
13158 	}
13159 }
13160 
13161 static void scalar_min_max_sub(struct bpf_reg_state *dst_reg,
13162 			       struct bpf_reg_state *src_reg)
13163 {
13164 	s64 smin_val = src_reg->smin_value;
13165 	s64 smax_val = src_reg->smax_value;
13166 	u64 umin_val = src_reg->umin_value;
13167 	u64 umax_val = src_reg->umax_value;
13168 
13169 	if (signed_sub_overflows(dst_reg->smin_value, smax_val) ||
13170 	    signed_sub_overflows(dst_reg->smax_value, smin_val)) {
13171 		/* Overflow possible, we know nothing */
13172 		dst_reg->smin_value = S64_MIN;
13173 		dst_reg->smax_value = S64_MAX;
13174 	} else {
13175 		dst_reg->smin_value -= smax_val;
13176 		dst_reg->smax_value -= smin_val;
13177 	}
13178 	if (dst_reg->umin_value < umax_val) {
13179 		/* Overflow possible, we know nothing */
13180 		dst_reg->umin_value = 0;
13181 		dst_reg->umax_value = U64_MAX;
13182 	} else {
13183 		/* Cannot overflow (as long as bounds are consistent) */
13184 		dst_reg->umin_value -= umax_val;
13185 		dst_reg->umax_value -= umin_val;
13186 	}
13187 }
13188 
13189 static void scalar32_min_max_mul(struct bpf_reg_state *dst_reg,
13190 				 struct bpf_reg_state *src_reg)
13191 {
13192 	s32 smin_val = src_reg->s32_min_value;
13193 	u32 umin_val = src_reg->u32_min_value;
13194 	u32 umax_val = src_reg->u32_max_value;
13195 
13196 	if (smin_val < 0 || dst_reg->s32_min_value < 0) {
13197 		/* Ain't nobody got time to multiply that sign */
13198 		__mark_reg32_unbounded(dst_reg);
13199 		return;
13200 	}
13201 	/* Both values are positive, so we can work with unsigned and
13202 	 * copy the result to signed (unless it exceeds S32_MAX).
13203 	 */
13204 	if (umax_val > U16_MAX || dst_reg->u32_max_value > U16_MAX) {
13205 		/* Potential overflow, we know nothing */
13206 		__mark_reg32_unbounded(dst_reg);
13207 		return;
13208 	}
13209 	dst_reg->u32_min_value *= umin_val;
13210 	dst_reg->u32_max_value *= umax_val;
13211 	if (dst_reg->u32_max_value > S32_MAX) {
13212 		/* Overflow possible, we know nothing */
13213 		dst_reg->s32_min_value = S32_MIN;
13214 		dst_reg->s32_max_value = S32_MAX;
13215 	} else {
13216 		dst_reg->s32_min_value = dst_reg->u32_min_value;
13217 		dst_reg->s32_max_value = dst_reg->u32_max_value;
13218 	}
13219 }
13220 
13221 static void scalar_min_max_mul(struct bpf_reg_state *dst_reg,
13222 			       struct bpf_reg_state *src_reg)
13223 {
13224 	s64 smin_val = src_reg->smin_value;
13225 	u64 umin_val = src_reg->umin_value;
13226 	u64 umax_val = src_reg->umax_value;
13227 
13228 	if (smin_val < 0 || dst_reg->smin_value < 0) {
13229 		/* Ain't nobody got time to multiply that sign */
13230 		__mark_reg64_unbounded(dst_reg);
13231 		return;
13232 	}
13233 	/* Both values are positive, so we can work with unsigned and
13234 	 * copy the result to signed (unless it exceeds S64_MAX).
13235 	 */
13236 	if (umax_val > U32_MAX || dst_reg->umax_value > U32_MAX) {
13237 		/* Potential overflow, we know nothing */
13238 		__mark_reg64_unbounded(dst_reg);
13239 		return;
13240 	}
13241 	dst_reg->umin_value *= umin_val;
13242 	dst_reg->umax_value *= umax_val;
13243 	if (dst_reg->umax_value > S64_MAX) {
13244 		/* Overflow possible, we know nothing */
13245 		dst_reg->smin_value = S64_MIN;
13246 		dst_reg->smax_value = S64_MAX;
13247 	} else {
13248 		dst_reg->smin_value = dst_reg->umin_value;
13249 		dst_reg->smax_value = dst_reg->umax_value;
13250 	}
13251 }
13252 
13253 static void scalar32_min_max_and(struct bpf_reg_state *dst_reg,
13254 				 struct bpf_reg_state *src_reg)
13255 {
13256 	bool src_known = tnum_subreg_is_const(src_reg->var_off);
13257 	bool dst_known = tnum_subreg_is_const(dst_reg->var_off);
13258 	struct tnum var32_off = tnum_subreg(dst_reg->var_off);
13259 	s32 smin_val = src_reg->s32_min_value;
13260 	u32 umax_val = src_reg->u32_max_value;
13261 
13262 	if (src_known && dst_known) {
13263 		__mark_reg32_known(dst_reg, var32_off.value);
13264 		return;
13265 	}
13266 
13267 	/* We get our minimum from the var_off, since that's inherently
13268 	 * bitwise.  Our maximum is the minimum of the operands' maxima.
13269 	 */
13270 	dst_reg->u32_min_value = var32_off.value;
13271 	dst_reg->u32_max_value = min(dst_reg->u32_max_value, umax_val);
13272 	if (dst_reg->s32_min_value < 0 || smin_val < 0) {
13273 		/* Lose signed bounds when ANDing negative numbers,
13274 		 * ain't nobody got time for that.
13275 		 */
13276 		dst_reg->s32_min_value = S32_MIN;
13277 		dst_reg->s32_max_value = S32_MAX;
13278 	} else {
13279 		/* ANDing two positives gives a positive, so safe to
13280 		 * cast result into s64.
13281 		 */
13282 		dst_reg->s32_min_value = dst_reg->u32_min_value;
13283 		dst_reg->s32_max_value = dst_reg->u32_max_value;
13284 	}
13285 }
13286 
13287 static void scalar_min_max_and(struct bpf_reg_state *dst_reg,
13288 			       struct bpf_reg_state *src_reg)
13289 {
13290 	bool src_known = tnum_is_const(src_reg->var_off);
13291 	bool dst_known = tnum_is_const(dst_reg->var_off);
13292 	s64 smin_val = src_reg->smin_value;
13293 	u64 umax_val = src_reg->umax_value;
13294 
13295 	if (src_known && dst_known) {
13296 		__mark_reg_known(dst_reg, dst_reg->var_off.value);
13297 		return;
13298 	}
13299 
13300 	/* We get our minimum from the var_off, since that's inherently
13301 	 * bitwise.  Our maximum is the minimum of the operands' maxima.
13302 	 */
13303 	dst_reg->umin_value = dst_reg->var_off.value;
13304 	dst_reg->umax_value = min(dst_reg->umax_value, umax_val);
13305 	if (dst_reg->smin_value < 0 || smin_val < 0) {
13306 		/* Lose signed bounds when ANDing negative numbers,
13307 		 * ain't nobody got time for that.
13308 		 */
13309 		dst_reg->smin_value = S64_MIN;
13310 		dst_reg->smax_value = S64_MAX;
13311 	} else {
13312 		/* ANDing two positives gives a positive, so safe to
13313 		 * cast result into s64.
13314 		 */
13315 		dst_reg->smin_value = dst_reg->umin_value;
13316 		dst_reg->smax_value = dst_reg->umax_value;
13317 	}
13318 	/* We may learn something more from the var_off */
13319 	__update_reg_bounds(dst_reg);
13320 }
13321 
13322 static void scalar32_min_max_or(struct bpf_reg_state *dst_reg,
13323 				struct bpf_reg_state *src_reg)
13324 {
13325 	bool src_known = tnum_subreg_is_const(src_reg->var_off);
13326 	bool dst_known = tnum_subreg_is_const(dst_reg->var_off);
13327 	struct tnum var32_off = tnum_subreg(dst_reg->var_off);
13328 	s32 smin_val = src_reg->s32_min_value;
13329 	u32 umin_val = src_reg->u32_min_value;
13330 
13331 	if (src_known && dst_known) {
13332 		__mark_reg32_known(dst_reg, var32_off.value);
13333 		return;
13334 	}
13335 
13336 	/* We get our maximum from the var_off, and our minimum is the
13337 	 * maximum of the operands' minima
13338 	 */
13339 	dst_reg->u32_min_value = max(dst_reg->u32_min_value, umin_val);
13340 	dst_reg->u32_max_value = var32_off.value | var32_off.mask;
13341 	if (dst_reg->s32_min_value < 0 || smin_val < 0) {
13342 		/* Lose signed bounds when ORing negative numbers,
13343 		 * ain't nobody got time for that.
13344 		 */
13345 		dst_reg->s32_min_value = S32_MIN;
13346 		dst_reg->s32_max_value = S32_MAX;
13347 	} else {
13348 		/* ORing two positives gives a positive, so safe to
13349 		 * cast result into s64.
13350 		 */
13351 		dst_reg->s32_min_value = dst_reg->u32_min_value;
13352 		dst_reg->s32_max_value = dst_reg->u32_max_value;
13353 	}
13354 }
13355 
13356 static void scalar_min_max_or(struct bpf_reg_state *dst_reg,
13357 			      struct bpf_reg_state *src_reg)
13358 {
13359 	bool src_known = tnum_is_const(src_reg->var_off);
13360 	bool dst_known = tnum_is_const(dst_reg->var_off);
13361 	s64 smin_val = src_reg->smin_value;
13362 	u64 umin_val = src_reg->umin_value;
13363 
13364 	if (src_known && dst_known) {
13365 		__mark_reg_known(dst_reg, dst_reg->var_off.value);
13366 		return;
13367 	}
13368 
13369 	/* We get our maximum from the var_off, and our minimum is the
13370 	 * maximum of the operands' minima
13371 	 */
13372 	dst_reg->umin_value = max(dst_reg->umin_value, umin_val);
13373 	dst_reg->umax_value = dst_reg->var_off.value | dst_reg->var_off.mask;
13374 	if (dst_reg->smin_value < 0 || smin_val < 0) {
13375 		/* Lose signed bounds when ORing negative numbers,
13376 		 * ain't nobody got time for that.
13377 		 */
13378 		dst_reg->smin_value = S64_MIN;
13379 		dst_reg->smax_value = S64_MAX;
13380 	} else {
13381 		/* ORing two positives gives a positive, so safe to
13382 		 * cast result into s64.
13383 		 */
13384 		dst_reg->smin_value = dst_reg->umin_value;
13385 		dst_reg->smax_value = dst_reg->umax_value;
13386 	}
13387 	/* We may learn something more from the var_off */
13388 	__update_reg_bounds(dst_reg);
13389 }
13390 
13391 static void scalar32_min_max_xor(struct bpf_reg_state *dst_reg,
13392 				 struct bpf_reg_state *src_reg)
13393 {
13394 	bool src_known = tnum_subreg_is_const(src_reg->var_off);
13395 	bool dst_known = tnum_subreg_is_const(dst_reg->var_off);
13396 	struct tnum var32_off = tnum_subreg(dst_reg->var_off);
13397 	s32 smin_val = src_reg->s32_min_value;
13398 
13399 	if (src_known && dst_known) {
13400 		__mark_reg32_known(dst_reg, var32_off.value);
13401 		return;
13402 	}
13403 
13404 	/* We get both minimum and maximum from the var32_off. */
13405 	dst_reg->u32_min_value = var32_off.value;
13406 	dst_reg->u32_max_value = var32_off.value | var32_off.mask;
13407 
13408 	if (dst_reg->s32_min_value >= 0 && smin_val >= 0) {
13409 		/* XORing two positive sign numbers gives a positive,
13410 		 * so safe to cast u32 result into s32.
13411 		 */
13412 		dst_reg->s32_min_value = dst_reg->u32_min_value;
13413 		dst_reg->s32_max_value = dst_reg->u32_max_value;
13414 	} else {
13415 		dst_reg->s32_min_value = S32_MIN;
13416 		dst_reg->s32_max_value = S32_MAX;
13417 	}
13418 }
13419 
13420 static void scalar_min_max_xor(struct bpf_reg_state *dst_reg,
13421 			       struct bpf_reg_state *src_reg)
13422 {
13423 	bool src_known = tnum_is_const(src_reg->var_off);
13424 	bool dst_known = tnum_is_const(dst_reg->var_off);
13425 	s64 smin_val = src_reg->smin_value;
13426 
13427 	if (src_known && dst_known) {
13428 		/* dst_reg->var_off.value has been updated earlier */
13429 		__mark_reg_known(dst_reg, dst_reg->var_off.value);
13430 		return;
13431 	}
13432 
13433 	/* We get both minimum and maximum from the var_off. */
13434 	dst_reg->umin_value = dst_reg->var_off.value;
13435 	dst_reg->umax_value = dst_reg->var_off.value | dst_reg->var_off.mask;
13436 
13437 	if (dst_reg->smin_value >= 0 && smin_val >= 0) {
13438 		/* XORing two positive sign numbers gives a positive,
13439 		 * so safe to cast u64 result into s64.
13440 		 */
13441 		dst_reg->smin_value = dst_reg->umin_value;
13442 		dst_reg->smax_value = dst_reg->umax_value;
13443 	} else {
13444 		dst_reg->smin_value = S64_MIN;
13445 		dst_reg->smax_value = S64_MAX;
13446 	}
13447 
13448 	__update_reg_bounds(dst_reg);
13449 }
13450 
13451 static void __scalar32_min_max_lsh(struct bpf_reg_state *dst_reg,
13452 				   u64 umin_val, u64 umax_val)
13453 {
13454 	/* We lose all sign bit information (except what we can pick
13455 	 * up from var_off)
13456 	 */
13457 	dst_reg->s32_min_value = S32_MIN;
13458 	dst_reg->s32_max_value = S32_MAX;
13459 	/* If we might shift our top bit out, then we know nothing */
13460 	if (umax_val > 31 || dst_reg->u32_max_value > 1ULL << (31 - umax_val)) {
13461 		dst_reg->u32_min_value = 0;
13462 		dst_reg->u32_max_value = U32_MAX;
13463 	} else {
13464 		dst_reg->u32_min_value <<= umin_val;
13465 		dst_reg->u32_max_value <<= umax_val;
13466 	}
13467 }
13468 
13469 static void scalar32_min_max_lsh(struct bpf_reg_state *dst_reg,
13470 				 struct bpf_reg_state *src_reg)
13471 {
13472 	u32 umax_val = src_reg->u32_max_value;
13473 	u32 umin_val = src_reg->u32_min_value;
13474 	/* u32 alu operation will zext upper bits */
13475 	struct tnum subreg = tnum_subreg(dst_reg->var_off);
13476 
13477 	__scalar32_min_max_lsh(dst_reg, umin_val, umax_val);
13478 	dst_reg->var_off = tnum_subreg(tnum_lshift(subreg, umin_val));
13479 	/* Not required but being careful mark reg64 bounds as unknown so
13480 	 * that we are forced to pick them up from tnum and zext later and
13481 	 * if some path skips this step we are still safe.
13482 	 */
13483 	__mark_reg64_unbounded(dst_reg);
13484 	__update_reg32_bounds(dst_reg);
13485 }
13486 
13487 static void __scalar64_min_max_lsh(struct bpf_reg_state *dst_reg,
13488 				   u64 umin_val, u64 umax_val)
13489 {
13490 	/* Special case <<32 because it is a common compiler pattern to sign
13491 	 * extend subreg by doing <<32 s>>32. In this case if 32bit bounds are
13492 	 * positive we know this shift will also be positive so we can track
13493 	 * bounds correctly. Otherwise we lose all sign bit information except
13494 	 * what we can pick up from var_off. Perhaps we can generalize this
13495 	 * later to shifts of any length.
13496 	 */
13497 	if (umin_val == 32 && umax_val == 32 && dst_reg->s32_max_value >= 0)
13498 		dst_reg->smax_value = (s64)dst_reg->s32_max_value << 32;
13499 	else
13500 		dst_reg->smax_value = S64_MAX;
13501 
13502 	if (umin_val == 32 && umax_val == 32 && dst_reg->s32_min_value >= 0)
13503 		dst_reg->smin_value = (s64)dst_reg->s32_min_value << 32;
13504 	else
13505 		dst_reg->smin_value = S64_MIN;
13506 
13507 	/* If we might shift our top bit out, then we know nothing */
13508 	if (dst_reg->umax_value > 1ULL << (63 - umax_val)) {
13509 		dst_reg->umin_value = 0;
13510 		dst_reg->umax_value = U64_MAX;
13511 	} else {
13512 		dst_reg->umin_value <<= umin_val;
13513 		dst_reg->umax_value <<= umax_val;
13514 	}
13515 }
13516 
13517 static void scalar_min_max_lsh(struct bpf_reg_state *dst_reg,
13518 			       struct bpf_reg_state *src_reg)
13519 {
13520 	u64 umax_val = src_reg->umax_value;
13521 	u64 umin_val = src_reg->umin_value;
13522 
13523 	/* scalar64 calc uses 32bit unshifted bounds so must be called first */
13524 	__scalar64_min_max_lsh(dst_reg, umin_val, umax_val);
13525 	__scalar32_min_max_lsh(dst_reg, umin_val, umax_val);
13526 
13527 	dst_reg->var_off = tnum_lshift(dst_reg->var_off, umin_val);
13528 	/* We may learn something more from the var_off */
13529 	__update_reg_bounds(dst_reg);
13530 }
13531 
13532 static void scalar32_min_max_rsh(struct bpf_reg_state *dst_reg,
13533 				 struct bpf_reg_state *src_reg)
13534 {
13535 	struct tnum subreg = tnum_subreg(dst_reg->var_off);
13536 	u32 umax_val = src_reg->u32_max_value;
13537 	u32 umin_val = src_reg->u32_min_value;
13538 
13539 	/* BPF_RSH is an unsigned shift.  If the value in dst_reg might
13540 	 * be negative, then either:
13541 	 * 1) src_reg might be zero, so the sign bit of the result is
13542 	 *    unknown, so we lose our signed bounds
13543 	 * 2) it's known negative, thus the unsigned bounds capture the
13544 	 *    signed bounds
13545 	 * 3) the signed bounds cross zero, so they tell us nothing
13546 	 *    about the result
13547 	 * If the value in dst_reg is known nonnegative, then again the
13548 	 * unsigned bounds capture the signed bounds.
13549 	 * Thus, in all cases it suffices to blow away our signed bounds
13550 	 * and rely on inferring new ones from the unsigned bounds and
13551 	 * var_off of the result.
13552 	 */
13553 	dst_reg->s32_min_value = S32_MIN;
13554 	dst_reg->s32_max_value = S32_MAX;
13555 
13556 	dst_reg->var_off = tnum_rshift(subreg, umin_val);
13557 	dst_reg->u32_min_value >>= umax_val;
13558 	dst_reg->u32_max_value >>= umin_val;
13559 
13560 	__mark_reg64_unbounded(dst_reg);
13561 	__update_reg32_bounds(dst_reg);
13562 }
13563 
13564 static void scalar_min_max_rsh(struct bpf_reg_state *dst_reg,
13565 			       struct bpf_reg_state *src_reg)
13566 {
13567 	u64 umax_val = src_reg->umax_value;
13568 	u64 umin_val = src_reg->umin_value;
13569 
13570 	/* BPF_RSH is an unsigned shift.  If the value in dst_reg might
13571 	 * be negative, then either:
13572 	 * 1) src_reg might be zero, so the sign bit of the result is
13573 	 *    unknown, so we lose our signed bounds
13574 	 * 2) it's known negative, thus the unsigned bounds capture the
13575 	 *    signed bounds
13576 	 * 3) the signed bounds cross zero, so they tell us nothing
13577 	 *    about the result
13578 	 * If the value in dst_reg is known nonnegative, then again the
13579 	 * unsigned bounds capture the signed bounds.
13580 	 * Thus, in all cases it suffices to blow away our signed bounds
13581 	 * and rely on inferring new ones from the unsigned bounds and
13582 	 * var_off of the result.
13583 	 */
13584 	dst_reg->smin_value = S64_MIN;
13585 	dst_reg->smax_value = S64_MAX;
13586 	dst_reg->var_off = tnum_rshift(dst_reg->var_off, umin_val);
13587 	dst_reg->umin_value >>= umax_val;
13588 	dst_reg->umax_value >>= umin_val;
13589 
13590 	/* Its not easy to operate on alu32 bounds here because it depends
13591 	 * on bits being shifted in. Take easy way out and mark unbounded
13592 	 * so we can recalculate later from tnum.
13593 	 */
13594 	__mark_reg32_unbounded(dst_reg);
13595 	__update_reg_bounds(dst_reg);
13596 }
13597 
13598 static void scalar32_min_max_arsh(struct bpf_reg_state *dst_reg,
13599 				  struct bpf_reg_state *src_reg)
13600 {
13601 	u64 umin_val = src_reg->u32_min_value;
13602 
13603 	/* Upon reaching here, src_known is true and
13604 	 * umax_val is equal to umin_val.
13605 	 */
13606 	dst_reg->s32_min_value = (u32)(((s32)dst_reg->s32_min_value) >> umin_val);
13607 	dst_reg->s32_max_value = (u32)(((s32)dst_reg->s32_max_value) >> umin_val);
13608 
13609 	dst_reg->var_off = tnum_arshift(tnum_subreg(dst_reg->var_off), umin_val, 32);
13610 
13611 	/* blow away the dst_reg umin_value/umax_value and rely on
13612 	 * dst_reg var_off to refine the result.
13613 	 */
13614 	dst_reg->u32_min_value = 0;
13615 	dst_reg->u32_max_value = U32_MAX;
13616 
13617 	__mark_reg64_unbounded(dst_reg);
13618 	__update_reg32_bounds(dst_reg);
13619 }
13620 
13621 static void scalar_min_max_arsh(struct bpf_reg_state *dst_reg,
13622 				struct bpf_reg_state *src_reg)
13623 {
13624 	u64 umin_val = src_reg->umin_value;
13625 
13626 	/* Upon reaching here, src_known is true and umax_val is equal
13627 	 * to umin_val.
13628 	 */
13629 	dst_reg->smin_value >>= umin_val;
13630 	dst_reg->smax_value >>= umin_val;
13631 
13632 	dst_reg->var_off = tnum_arshift(dst_reg->var_off, umin_val, 64);
13633 
13634 	/* blow away the dst_reg umin_value/umax_value and rely on
13635 	 * dst_reg var_off to refine the result.
13636 	 */
13637 	dst_reg->umin_value = 0;
13638 	dst_reg->umax_value = U64_MAX;
13639 
13640 	/* Its not easy to operate on alu32 bounds here because it depends
13641 	 * on bits being shifted in from upper 32-bits. Take easy way out
13642 	 * and mark unbounded so we can recalculate later from tnum.
13643 	 */
13644 	__mark_reg32_unbounded(dst_reg);
13645 	__update_reg_bounds(dst_reg);
13646 }
13647 
13648 /* WARNING: This function does calculations on 64-bit values, but the actual
13649  * execution may occur on 32-bit values. Therefore, things like bitshifts
13650  * need extra checks in the 32-bit case.
13651  */
13652 static int adjust_scalar_min_max_vals(struct bpf_verifier_env *env,
13653 				      struct bpf_insn *insn,
13654 				      struct bpf_reg_state *dst_reg,
13655 				      struct bpf_reg_state src_reg)
13656 {
13657 	struct bpf_reg_state *regs = cur_regs(env);
13658 	u8 opcode = BPF_OP(insn->code);
13659 	bool src_known;
13660 	s64 smin_val, smax_val;
13661 	u64 umin_val, umax_val;
13662 	s32 s32_min_val, s32_max_val;
13663 	u32 u32_min_val, u32_max_val;
13664 	u64 insn_bitness = (BPF_CLASS(insn->code) == BPF_ALU64) ? 64 : 32;
13665 	bool alu32 = (BPF_CLASS(insn->code) != BPF_ALU64);
13666 	int ret;
13667 
13668 	smin_val = src_reg.smin_value;
13669 	smax_val = src_reg.smax_value;
13670 	umin_val = src_reg.umin_value;
13671 	umax_val = src_reg.umax_value;
13672 
13673 	s32_min_val = src_reg.s32_min_value;
13674 	s32_max_val = src_reg.s32_max_value;
13675 	u32_min_val = src_reg.u32_min_value;
13676 	u32_max_val = src_reg.u32_max_value;
13677 
13678 	if (alu32) {
13679 		src_known = tnum_subreg_is_const(src_reg.var_off);
13680 		if ((src_known &&
13681 		     (s32_min_val != s32_max_val || u32_min_val != u32_max_val)) ||
13682 		    s32_min_val > s32_max_val || u32_min_val > u32_max_val) {
13683 			/* Taint dst register if offset had invalid bounds
13684 			 * derived from e.g. dead branches.
13685 			 */
13686 			__mark_reg_unknown(env, dst_reg);
13687 			return 0;
13688 		}
13689 	} else {
13690 		src_known = tnum_is_const(src_reg.var_off);
13691 		if ((src_known &&
13692 		     (smin_val != smax_val || umin_val != umax_val)) ||
13693 		    smin_val > smax_val || umin_val > umax_val) {
13694 			/* Taint dst register if offset had invalid bounds
13695 			 * derived from e.g. dead branches.
13696 			 */
13697 			__mark_reg_unknown(env, dst_reg);
13698 			return 0;
13699 		}
13700 	}
13701 
13702 	if (!src_known &&
13703 	    opcode != BPF_ADD && opcode != BPF_SUB && opcode != BPF_AND) {
13704 		__mark_reg_unknown(env, dst_reg);
13705 		return 0;
13706 	}
13707 
13708 	if (sanitize_needed(opcode)) {
13709 		ret = sanitize_val_alu(env, insn);
13710 		if (ret < 0)
13711 			return sanitize_err(env, insn, ret, NULL, NULL);
13712 	}
13713 
13714 	/* Calculate sign/unsigned bounds and tnum for alu32 and alu64 bit ops.
13715 	 * There are two classes of instructions: The first class we track both
13716 	 * alu32 and alu64 sign/unsigned bounds independently this provides the
13717 	 * greatest amount of precision when alu operations are mixed with jmp32
13718 	 * operations. These operations are BPF_ADD, BPF_SUB, BPF_MUL, BPF_ADD,
13719 	 * and BPF_OR. This is possible because these ops have fairly easy to
13720 	 * understand and calculate behavior in both 32-bit and 64-bit alu ops.
13721 	 * See alu32 verifier tests for examples. The second class of
13722 	 * operations, BPF_LSH, BPF_RSH, and BPF_ARSH, however are not so easy
13723 	 * with regards to tracking sign/unsigned bounds because the bits may
13724 	 * cross subreg boundaries in the alu64 case. When this happens we mark
13725 	 * the reg unbounded in the subreg bound space and use the resulting
13726 	 * tnum to calculate an approximation of the sign/unsigned bounds.
13727 	 */
13728 	switch (opcode) {
13729 	case BPF_ADD:
13730 		scalar32_min_max_add(dst_reg, &src_reg);
13731 		scalar_min_max_add(dst_reg, &src_reg);
13732 		dst_reg->var_off = tnum_add(dst_reg->var_off, src_reg.var_off);
13733 		break;
13734 	case BPF_SUB:
13735 		scalar32_min_max_sub(dst_reg, &src_reg);
13736 		scalar_min_max_sub(dst_reg, &src_reg);
13737 		dst_reg->var_off = tnum_sub(dst_reg->var_off, src_reg.var_off);
13738 		break;
13739 	case BPF_MUL:
13740 		dst_reg->var_off = tnum_mul(dst_reg->var_off, src_reg.var_off);
13741 		scalar32_min_max_mul(dst_reg, &src_reg);
13742 		scalar_min_max_mul(dst_reg, &src_reg);
13743 		break;
13744 	case BPF_AND:
13745 		dst_reg->var_off = tnum_and(dst_reg->var_off, src_reg.var_off);
13746 		scalar32_min_max_and(dst_reg, &src_reg);
13747 		scalar_min_max_and(dst_reg, &src_reg);
13748 		break;
13749 	case BPF_OR:
13750 		dst_reg->var_off = tnum_or(dst_reg->var_off, src_reg.var_off);
13751 		scalar32_min_max_or(dst_reg, &src_reg);
13752 		scalar_min_max_or(dst_reg, &src_reg);
13753 		break;
13754 	case BPF_XOR:
13755 		dst_reg->var_off = tnum_xor(dst_reg->var_off, src_reg.var_off);
13756 		scalar32_min_max_xor(dst_reg, &src_reg);
13757 		scalar_min_max_xor(dst_reg, &src_reg);
13758 		break;
13759 	case BPF_LSH:
13760 		if (umax_val >= insn_bitness) {
13761 			/* Shifts greater than 31 or 63 are undefined.
13762 			 * This includes shifts by a negative number.
13763 			 */
13764 			mark_reg_unknown(env, regs, insn->dst_reg);
13765 			break;
13766 		}
13767 		if (alu32)
13768 			scalar32_min_max_lsh(dst_reg, &src_reg);
13769 		else
13770 			scalar_min_max_lsh(dst_reg, &src_reg);
13771 		break;
13772 	case BPF_RSH:
13773 		if (umax_val >= insn_bitness) {
13774 			/* Shifts greater than 31 or 63 are undefined.
13775 			 * This includes shifts by a negative number.
13776 			 */
13777 			mark_reg_unknown(env, regs, insn->dst_reg);
13778 			break;
13779 		}
13780 		if (alu32)
13781 			scalar32_min_max_rsh(dst_reg, &src_reg);
13782 		else
13783 			scalar_min_max_rsh(dst_reg, &src_reg);
13784 		break;
13785 	case BPF_ARSH:
13786 		if (umax_val >= insn_bitness) {
13787 			/* Shifts greater than 31 or 63 are undefined.
13788 			 * This includes shifts by a negative number.
13789 			 */
13790 			mark_reg_unknown(env, regs, insn->dst_reg);
13791 			break;
13792 		}
13793 		if (alu32)
13794 			scalar32_min_max_arsh(dst_reg, &src_reg);
13795 		else
13796 			scalar_min_max_arsh(dst_reg, &src_reg);
13797 		break;
13798 	default:
13799 		mark_reg_unknown(env, regs, insn->dst_reg);
13800 		break;
13801 	}
13802 
13803 	/* ALU32 ops are zero extended into 64bit register */
13804 	if (alu32)
13805 		zext_32_to_64(dst_reg);
13806 	reg_bounds_sync(dst_reg);
13807 	return 0;
13808 }
13809 
13810 /* Handles ALU ops other than BPF_END, BPF_NEG and BPF_MOV: computes new min/max
13811  * and var_off.
13812  */
13813 static int adjust_reg_min_max_vals(struct bpf_verifier_env *env,
13814 				   struct bpf_insn *insn)
13815 {
13816 	struct bpf_verifier_state *vstate = env->cur_state;
13817 	struct bpf_func_state *state = vstate->frame[vstate->curframe];
13818 	struct bpf_reg_state *regs = state->regs, *dst_reg, *src_reg;
13819 	struct bpf_reg_state *ptr_reg = NULL, off_reg = {0};
13820 	u8 opcode = BPF_OP(insn->code);
13821 	int err;
13822 
13823 	dst_reg = &regs[insn->dst_reg];
13824 	src_reg = NULL;
13825 	if (dst_reg->type != SCALAR_VALUE)
13826 		ptr_reg = dst_reg;
13827 	else
13828 		/* Make sure ID is cleared otherwise dst_reg min/max could be
13829 		 * incorrectly propagated into other registers by find_equal_scalars()
13830 		 */
13831 		dst_reg->id = 0;
13832 	if (BPF_SRC(insn->code) == BPF_X) {
13833 		src_reg = &regs[insn->src_reg];
13834 		if (src_reg->type != SCALAR_VALUE) {
13835 			if (dst_reg->type != SCALAR_VALUE) {
13836 				/* Combining two pointers by any ALU op yields
13837 				 * an arbitrary scalar. Disallow all math except
13838 				 * pointer subtraction
13839 				 */
13840 				if (opcode == BPF_SUB && env->allow_ptr_leaks) {
13841 					mark_reg_unknown(env, regs, insn->dst_reg);
13842 					return 0;
13843 				}
13844 				verbose(env, "R%d pointer %s pointer prohibited\n",
13845 					insn->dst_reg,
13846 					bpf_alu_string[opcode >> 4]);
13847 				return -EACCES;
13848 			} else {
13849 				/* scalar += pointer
13850 				 * This is legal, but we have to reverse our
13851 				 * src/dest handling in computing the range
13852 				 */
13853 				err = mark_chain_precision(env, insn->dst_reg);
13854 				if (err)
13855 					return err;
13856 				return adjust_ptr_min_max_vals(env, insn,
13857 							       src_reg, dst_reg);
13858 			}
13859 		} else if (ptr_reg) {
13860 			/* pointer += scalar */
13861 			err = mark_chain_precision(env, insn->src_reg);
13862 			if (err)
13863 				return err;
13864 			return adjust_ptr_min_max_vals(env, insn,
13865 						       dst_reg, src_reg);
13866 		} else if (dst_reg->precise) {
13867 			/* if dst_reg is precise, src_reg should be precise as well */
13868 			err = mark_chain_precision(env, insn->src_reg);
13869 			if (err)
13870 				return err;
13871 		}
13872 	} else {
13873 		/* Pretend the src is a reg with a known value, since we only
13874 		 * need to be able to read from this state.
13875 		 */
13876 		off_reg.type = SCALAR_VALUE;
13877 		__mark_reg_known(&off_reg, insn->imm);
13878 		src_reg = &off_reg;
13879 		if (ptr_reg) /* pointer += K */
13880 			return adjust_ptr_min_max_vals(env, insn,
13881 						       ptr_reg, src_reg);
13882 	}
13883 
13884 	/* Got here implies adding two SCALAR_VALUEs */
13885 	if (WARN_ON_ONCE(ptr_reg)) {
13886 		print_verifier_state(env, state, true);
13887 		verbose(env, "verifier internal error: unexpected ptr_reg\n");
13888 		return -EINVAL;
13889 	}
13890 	if (WARN_ON(!src_reg)) {
13891 		print_verifier_state(env, state, true);
13892 		verbose(env, "verifier internal error: no src_reg\n");
13893 		return -EINVAL;
13894 	}
13895 	return adjust_scalar_min_max_vals(env, insn, dst_reg, *src_reg);
13896 }
13897 
13898 /* check validity of 32-bit and 64-bit arithmetic operations */
13899 static int check_alu_op(struct bpf_verifier_env *env, struct bpf_insn *insn)
13900 {
13901 	struct bpf_reg_state *regs = cur_regs(env);
13902 	u8 opcode = BPF_OP(insn->code);
13903 	int err;
13904 
13905 	if (opcode == BPF_END || opcode == BPF_NEG) {
13906 		if (opcode == BPF_NEG) {
13907 			if (BPF_SRC(insn->code) != BPF_K ||
13908 			    insn->src_reg != BPF_REG_0 ||
13909 			    insn->off != 0 || insn->imm != 0) {
13910 				verbose(env, "BPF_NEG uses reserved fields\n");
13911 				return -EINVAL;
13912 			}
13913 		} else {
13914 			if (insn->src_reg != BPF_REG_0 || insn->off != 0 ||
13915 			    (insn->imm != 16 && insn->imm != 32 && insn->imm != 64) ||
13916 			    (BPF_CLASS(insn->code) == BPF_ALU64 &&
13917 			     BPF_SRC(insn->code) != BPF_TO_LE)) {
13918 				verbose(env, "BPF_END uses reserved fields\n");
13919 				return -EINVAL;
13920 			}
13921 		}
13922 
13923 		/* check src operand */
13924 		err = check_reg_arg(env, insn->dst_reg, SRC_OP);
13925 		if (err)
13926 			return err;
13927 
13928 		if (is_pointer_value(env, insn->dst_reg)) {
13929 			verbose(env, "R%d pointer arithmetic prohibited\n",
13930 				insn->dst_reg);
13931 			return -EACCES;
13932 		}
13933 
13934 		/* check dest operand */
13935 		err = check_reg_arg(env, insn->dst_reg, DST_OP);
13936 		if (err)
13937 			return err;
13938 
13939 	} else if (opcode == BPF_MOV) {
13940 
13941 		if (BPF_SRC(insn->code) == BPF_X) {
13942 			if (insn->imm != 0) {
13943 				verbose(env, "BPF_MOV uses reserved fields\n");
13944 				return -EINVAL;
13945 			}
13946 
13947 			if (BPF_CLASS(insn->code) == BPF_ALU) {
13948 				if (insn->off != 0 && insn->off != 8 && insn->off != 16) {
13949 					verbose(env, "BPF_MOV uses reserved fields\n");
13950 					return -EINVAL;
13951 				}
13952 			} else {
13953 				if (insn->off != 0 && insn->off != 8 && insn->off != 16 &&
13954 				    insn->off != 32) {
13955 					verbose(env, "BPF_MOV uses reserved fields\n");
13956 					return -EINVAL;
13957 				}
13958 			}
13959 
13960 			/* check src operand */
13961 			err = check_reg_arg(env, insn->src_reg, SRC_OP);
13962 			if (err)
13963 				return err;
13964 		} else {
13965 			if (insn->src_reg != BPF_REG_0 || insn->off != 0) {
13966 				verbose(env, "BPF_MOV uses reserved fields\n");
13967 				return -EINVAL;
13968 			}
13969 		}
13970 
13971 		/* check dest operand, mark as required later */
13972 		err = check_reg_arg(env, insn->dst_reg, DST_OP_NO_MARK);
13973 		if (err)
13974 			return err;
13975 
13976 		if (BPF_SRC(insn->code) == BPF_X) {
13977 			struct bpf_reg_state *src_reg = regs + insn->src_reg;
13978 			struct bpf_reg_state *dst_reg = regs + insn->dst_reg;
13979 
13980 			if (BPF_CLASS(insn->code) == BPF_ALU64) {
13981 				if (insn->off == 0) {
13982 					/* case: R1 = R2
13983 					 * copy register state to dest reg
13984 					 */
13985 					assign_scalar_id_before_mov(env, src_reg);
13986 					copy_register_state(dst_reg, src_reg);
13987 					dst_reg->live |= REG_LIVE_WRITTEN;
13988 					dst_reg->subreg_def = DEF_NOT_SUBREG;
13989 				} else {
13990 					/* case: R1 = (s8, s16 s32)R2 */
13991 					if (is_pointer_value(env, insn->src_reg)) {
13992 						verbose(env,
13993 							"R%d sign-extension part of pointer\n",
13994 							insn->src_reg);
13995 						return -EACCES;
13996 					} else if (src_reg->type == SCALAR_VALUE) {
13997 						bool no_sext;
13998 
13999 						no_sext = src_reg->umax_value < (1ULL << (insn->off - 1));
14000 						if (no_sext)
14001 							assign_scalar_id_before_mov(env, src_reg);
14002 						copy_register_state(dst_reg, src_reg);
14003 						if (!no_sext)
14004 							dst_reg->id = 0;
14005 						coerce_reg_to_size_sx(dst_reg, insn->off >> 3);
14006 						dst_reg->live |= REG_LIVE_WRITTEN;
14007 						dst_reg->subreg_def = DEF_NOT_SUBREG;
14008 					} else {
14009 						mark_reg_unknown(env, regs, insn->dst_reg);
14010 					}
14011 				}
14012 			} else {
14013 				/* R1 = (u32) R2 */
14014 				if (is_pointer_value(env, insn->src_reg)) {
14015 					verbose(env,
14016 						"R%d partial copy of pointer\n",
14017 						insn->src_reg);
14018 					return -EACCES;
14019 				} else if (src_reg->type == SCALAR_VALUE) {
14020 					if (insn->off == 0) {
14021 						bool is_src_reg_u32 = get_reg_width(src_reg) <= 32;
14022 
14023 						if (is_src_reg_u32)
14024 							assign_scalar_id_before_mov(env, src_reg);
14025 						copy_register_state(dst_reg, src_reg);
14026 						/* Make sure ID is cleared if src_reg is not in u32
14027 						 * range otherwise dst_reg min/max could be incorrectly
14028 						 * propagated into src_reg by find_equal_scalars()
14029 						 */
14030 						if (!is_src_reg_u32)
14031 							dst_reg->id = 0;
14032 						dst_reg->live |= REG_LIVE_WRITTEN;
14033 						dst_reg->subreg_def = env->insn_idx + 1;
14034 					} else {
14035 						/* case: W1 = (s8, s16)W2 */
14036 						bool no_sext = src_reg->umax_value < (1ULL << (insn->off - 1));
14037 
14038 						if (no_sext)
14039 							assign_scalar_id_before_mov(env, src_reg);
14040 						copy_register_state(dst_reg, src_reg);
14041 						if (!no_sext)
14042 							dst_reg->id = 0;
14043 						dst_reg->live |= REG_LIVE_WRITTEN;
14044 						dst_reg->subreg_def = env->insn_idx + 1;
14045 						coerce_subreg_to_size_sx(dst_reg, insn->off >> 3);
14046 					}
14047 				} else {
14048 					mark_reg_unknown(env, regs,
14049 							 insn->dst_reg);
14050 				}
14051 				zext_32_to_64(dst_reg);
14052 				reg_bounds_sync(dst_reg);
14053 			}
14054 		} else {
14055 			/* case: R = imm
14056 			 * remember the value we stored into this reg
14057 			 */
14058 			/* clear any state __mark_reg_known doesn't set */
14059 			mark_reg_unknown(env, regs, insn->dst_reg);
14060 			regs[insn->dst_reg].type = SCALAR_VALUE;
14061 			if (BPF_CLASS(insn->code) == BPF_ALU64) {
14062 				__mark_reg_known(regs + insn->dst_reg,
14063 						 insn->imm);
14064 			} else {
14065 				__mark_reg_known(regs + insn->dst_reg,
14066 						 (u32)insn->imm);
14067 			}
14068 		}
14069 
14070 	} else if (opcode > BPF_END) {
14071 		verbose(env, "invalid BPF_ALU opcode %x\n", opcode);
14072 		return -EINVAL;
14073 
14074 	} else {	/* all other ALU ops: and, sub, xor, add, ... */
14075 
14076 		if (BPF_SRC(insn->code) == BPF_X) {
14077 			if (insn->imm != 0 || insn->off > 1 ||
14078 			    (insn->off == 1 && opcode != BPF_MOD && opcode != BPF_DIV)) {
14079 				verbose(env, "BPF_ALU uses reserved fields\n");
14080 				return -EINVAL;
14081 			}
14082 			/* check src1 operand */
14083 			err = check_reg_arg(env, insn->src_reg, SRC_OP);
14084 			if (err)
14085 				return err;
14086 		} else {
14087 			if (insn->src_reg != BPF_REG_0 || insn->off > 1 ||
14088 			    (insn->off == 1 && opcode != BPF_MOD && opcode != BPF_DIV)) {
14089 				verbose(env, "BPF_ALU uses reserved fields\n");
14090 				return -EINVAL;
14091 			}
14092 		}
14093 
14094 		/* check src2 operand */
14095 		err = check_reg_arg(env, insn->dst_reg, SRC_OP);
14096 		if (err)
14097 			return err;
14098 
14099 		if ((opcode == BPF_MOD || opcode == BPF_DIV) &&
14100 		    BPF_SRC(insn->code) == BPF_K && insn->imm == 0) {
14101 			verbose(env, "div by zero\n");
14102 			return -EINVAL;
14103 		}
14104 
14105 		if ((opcode == BPF_LSH || opcode == BPF_RSH ||
14106 		     opcode == BPF_ARSH) && BPF_SRC(insn->code) == BPF_K) {
14107 			int size = BPF_CLASS(insn->code) == BPF_ALU64 ? 64 : 32;
14108 
14109 			if (insn->imm < 0 || insn->imm >= size) {
14110 				verbose(env, "invalid shift %d\n", insn->imm);
14111 				return -EINVAL;
14112 			}
14113 		}
14114 
14115 		/* check dest operand */
14116 		err = check_reg_arg(env, insn->dst_reg, DST_OP_NO_MARK);
14117 		err = err ?: adjust_reg_min_max_vals(env, insn);
14118 		if (err)
14119 			return err;
14120 	}
14121 
14122 	return reg_bounds_sanity_check(env, &regs[insn->dst_reg], "alu");
14123 }
14124 
14125 static void find_good_pkt_pointers(struct bpf_verifier_state *vstate,
14126 				   struct bpf_reg_state *dst_reg,
14127 				   enum bpf_reg_type type,
14128 				   bool range_right_open)
14129 {
14130 	struct bpf_func_state *state;
14131 	struct bpf_reg_state *reg;
14132 	int new_range;
14133 
14134 	if (dst_reg->off < 0 ||
14135 	    (dst_reg->off == 0 && range_right_open))
14136 		/* This doesn't give us any range */
14137 		return;
14138 
14139 	if (dst_reg->umax_value > MAX_PACKET_OFF ||
14140 	    dst_reg->umax_value + dst_reg->off > MAX_PACKET_OFF)
14141 		/* Risk of overflow.  For instance, ptr + (1<<63) may be less
14142 		 * than pkt_end, but that's because it's also less than pkt.
14143 		 */
14144 		return;
14145 
14146 	new_range = dst_reg->off;
14147 	if (range_right_open)
14148 		new_range++;
14149 
14150 	/* Examples for register markings:
14151 	 *
14152 	 * pkt_data in dst register:
14153 	 *
14154 	 *   r2 = r3;
14155 	 *   r2 += 8;
14156 	 *   if (r2 > pkt_end) goto <handle exception>
14157 	 *   <access okay>
14158 	 *
14159 	 *   r2 = r3;
14160 	 *   r2 += 8;
14161 	 *   if (r2 < pkt_end) goto <access okay>
14162 	 *   <handle exception>
14163 	 *
14164 	 *   Where:
14165 	 *     r2 == dst_reg, pkt_end == src_reg
14166 	 *     r2=pkt(id=n,off=8,r=0)
14167 	 *     r3=pkt(id=n,off=0,r=0)
14168 	 *
14169 	 * pkt_data in src register:
14170 	 *
14171 	 *   r2 = r3;
14172 	 *   r2 += 8;
14173 	 *   if (pkt_end >= r2) goto <access okay>
14174 	 *   <handle exception>
14175 	 *
14176 	 *   r2 = r3;
14177 	 *   r2 += 8;
14178 	 *   if (pkt_end <= r2) goto <handle exception>
14179 	 *   <access okay>
14180 	 *
14181 	 *   Where:
14182 	 *     pkt_end == dst_reg, r2 == src_reg
14183 	 *     r2=pkt(id=n,off=8,r=0)
14184 	 *     r3=pkt(id=n,off=0,r=0)
14185 	 *
14186 	 * Find register r3 and mark its range as r3=pkt(id=n,off=0,r=8)
14187 	 * or r3=pkt(id=n,off=0,r=8-1), so that range of bytes [r3, r3 + 8)
14188 	 * and [r3, r3 + 8-1) respectively is safe to access depending on
14189 	 * the check.
14190 	 */
14191 
14192 	/* If our ids match, then we must have the same max_value.  And we
14193 	 * don't care about the other reg's fixed offset, since if it's too big
14194 	 * the range won't allow anything.
14195 	 * dst_reg->off is known < MAX_PACKET_OFF, therefore it fits in a u16.
14196 	 */
14197 	bpf_for_each_reg_in_vstate(vstate, state, reg, ({
14198 		if (reg->type == type && reg->id == dst_reg->id)
14199 			/* keep the maximum range already checked */
14200 			reg->range = max(reg->range, new_range);
14201 	}));
14202 }
14203 
14204 /*
14205  * <reg1> <op> <reg2>, currently assuming reg2 is a constant
14206  */
14207 static int is_scalar_branch_taken(struct bpf_reg_state *reg1, struct bpf_reg_state *reg2,
14208 				  u8 opcode, bool is_jmp32)
14209 {
14210 	struct tnum t1 = is_jmp32 ? tnum_subreg(reg1->var_off) : reg1->var_off;
14211 	struct tnum t2 = is_jmp32 ? tnum_subreg(reg2->var_off) : reg2->var_off;
14212 	u64 umin1 = is_jmp32 ? (u64)reg1->u32_min_value : reg1->umin_value;
14213 	u64 umax1 = is_jmp32 ? (u64)reg1->u32_max_value : reg1->umax_value;
14214 	s64 smin1 = is_jmp32 ? (s64)reg1->s32_min_value : reg1->smin_value;
14215 	s64 smax1 = is_jmp32 ? (s64)reg1->s32_max_value : reg1->smax_value;
14216 	u64 umin2 = is_jmp32 ? (u64)reg2->u32_min_value : reg2->umin_value;
14217 	u64 umax2 = is_jmp32 ? (u64)reg2->u32_max_value : reg2->umax_value;
14218 	s64 smin2 = is_jmp32 ? (s64)reg2->s32_min_value : reg2->smin_value;
14219 	s64 smax2 = is_jmp32 ? (s64)reg2->s32_max_value : reg2->smax_value;
14220 
14221 	switch (opcode) {
14222 	case BPF_JEQ:
14223 		/* constants, umin/umax and smin/smax checks would be
14224 		 * redundant in this case because they all should match
14225 		 */
14226 		if (tnum_is_const(t1) && tnum_is_const(t2))
14227 			return t1.value == t2.value;
14228 		/* non-overlapping ranges */
14229 		if (umin1 > umax2 || umax1 < umin2)
14230 			return 0;
14231 		if (smin1 > smax2 || smax1 < smin2)
14232 			return 0;
14233 		if (!is_jmp32) {
14234 			/* if 64-bit ranges are inconclusive, see if we can
14235 			 * utilize 32-bit subrange knowledge to eliminate
14236 			 * branches that can't be taken a priori
14237 			 */
14238 			if (reg1->u32_min_value > reg2->u32_max_value ||
14239 			    reg1->u32_max_value < reg2->u32_min_value)
14240 				return 0;
14241 			if (reg1->s32_min_value > reg2->s32_max_value ||
14242 			    reg1->s32_max_value < reg2->s32_min_value)
14243 				return 0;
14244 		}
14245 		break;
14246 	case BPF_JNE:
14247 		/* constants, umin/umax and smin/smax checks would be
14248 		 * redundant in this case because they all should match
14249 		 */
14250 		if (tnum_is_const(t1) && tnum_is_const(t2))
14251 			return t1.value != t2.value;
14252 		/* non-overlapping ranges */
14253 		if (umin1 > umax2 || umax1 < umin2)
14254 			return 1;
14255 		if (smin1 > smax2 || smax1 < smin2)
14256 			return 1;
14257 		if (!is_jmp32) {
14258 			/* if 64-bit ranges are inconclusive, see if we can
14259 			 * utilize 32-bit subrange knowledge to eliminate
14260 			 * branches that can't be taken a priori
14261 			 */
14262 			if (reg1->u32_min_value > reg2->u32_max_value ||
14263 			    reg1->u32_max_value < reg2->u32_min_value)
14264 				return 1;
14265 			if (reg1->s32_min_value > reg2->s32_max_value ||
14266 			    reg1->s32_max_value < reg2->s32_min_value)
14267 				return 1;
14268 		}
14269 		break;
14270 	case BPF_JSET:
14271 		if (!is_reg_const(reg2, is_jmp32)) {
14272 			swap(reg1, reg2);
14273 			swap(t1, t2);
14274 		}
14275 		if (!is_reg_const(reg2, is_jmp32))
14276 			return -1;
14277 		if ((~t1.mask & t1.value) & t2.value)
14278 			return 1;
14279 		if (!((t1.mask | t1.value) & t2.value))
14280 			return 0;
14281 		break;
14282 	case BPF_JGT:
14283 		if (umin1 > umax2)
14284 			return 1;
14285 		else if (umax1 <= umin2)
14286 			return 0;
14287 		break;
14288 	case BPF_JSGT:
14289 		if (smin1 > smax2)
14290 			return 1;
14291 		else if (smax1 <= smin2)
14292 			return 0;
14293 		break;
14294 	case BPF_JLT:
14295 		if (umax1 < umin2)
14296 			return 1;
14297 		else if (umin1 >= umax2)
14298 			return 0;
14299 		break;
14300 	case BPF_JSLT:
14301 		if (smax1 < smin2)
14302 			return 1;
14303 		else if (smin1 >= smax2)
14304 			return 0;
14305 		break;
14306 	case BPF_JGE:
14307 		if (umin1 >= umax2)
14308 			return 1;
14309 		else if (umax1 < umin2)
14310 			return 0;
14311 		break;
14312 	case BPF_JSGE:
14313 		if (smin1 >= smax2)
14314 			return 1;
14315 		else if (smax1 < smin2)
14316 			return 0;
14317 		break;
14318 	case BPF_JLE:
14319 		if (umax1 <= umin2)
14320 			return 1;
14321 		else if (umin1 > umax2)
14322 			return 0;
14323 		break;
14324 	case BPF_JSLE:
14325 		if (smax1 <= smin2)
14326 			return 1;
14327 		else if (smin1 > smax2)
14328 			return 0;
14329 		break;
14330 	}
14331 
14332 	return -1;
14333 }
14334 
14335 static int flip_opcode(u32 opcode)
14336 {
14337 	/* How can we transform "a <op> b" into "b <op> a"? */
14338 	static const u8 opcode_flip[16] = {
14339 		/* these stay the same */
14340 		[BPF_JEQ  >> 4] = BPF_JEQ,
14341 		[BPF_JNE  >> 4] = BPF_JNE,
14342 		[BPF_JSET >> 4] = BPF_JSET,
14343 		/* these swap "lesser" and "greater" (L and G in the opcodes) */
14344 		[BPF_JGE  >> 4] = BPF_JLE,
14345 		[BPF_JGT  >> 4] = BPF_JLT,
14346 		[BPF_JLE  >> 4] = BPF_JGE,
14347 		[BPF_JLT  >> 4] = BPF_JGT,
14348 		[BPF_JSGE >> 4] = BPF_JSLE,
14349 		[BPF_JSGT >> 4] = BPF_JSLT,
14350 		[BPF_JSLE >> 4] = BPF_JSGE,
14351 		[BPF_JSLT >> 4] = BPF_JSGT
14352 	};
14353 	return opcode_flip[opcode >> 4];
14354 }
14355 
14356 static int is_pkt_ptr_branch_taken(struct bpf_reg_state *dst_reg,
14357 				   struct bpf_reg_state *src_reg,
14358 				   u8 opcode)
14359 {
14360 	struct bpf_reg_state *pkt;
14361 
14362 	if (src_reg->type == PTR_TO_PACKET_END) {
14363 		pkt = dst_reg;
14364 	} else if (dst_reg->type == PTR_TO_PACKET_END) {
14365 		pkt = src_reg;
14366 		opcode = flip_opcode(opcode);
14367 	} else {
14368 		return -1;
14369 	}
14370 
14371 	if (pkt->range >= 0)
14372 		return -1;
14373 
14374 	switch (opcode) {
14375 	case BPF_JLE:
14376 		/* pkt <= pkt_end */
14377 		fallthrough;
14378 	case BPF_JGT:
14379 		/* pkt > pkt_end */
14380 		if (pkt->range == BEYOND_PKT_END)
14381 			/* pkt has at last one extra byte beyond pkt_end */
14382 			return opcode == BPF_JGT;
14383 		break;
14384 	case BPF_JLT:
14385 		/* pkt < pkt_end */
14386 		fallthrough;
14387 	case BPF_JGE:
14388 		/* pkt >= pkt_end */
14389 		if (pkt->range == BEYOND_PKT_END || pkt->range == AT_PKT_END)
14390 			return opcode == BPF_JGE;
14391 		break;
14392 	}
14393 	return -1;
14394 }
14395 
14396 /* compute branch direction of the expression "if (<reg1> opcode <reg2>) goto target;"
14397  * and return:
14398  *  1 - branch will be taken and "goto target" will be executed
14399  *  0 - branch will not be taken and fall-through to next insn
14400  * -1 - unknown. Example: "if (reg1 < 5)" is unknown when register value
14401  *      range [0,10]
14402  */
14403 static int is_branch_taken(struct bpf_reg_state *reg1, struct bpf_reg_state *reg2,
14404 			   u8 opcode, bool is_jmp32)
14405 {
14406 	if (reg_is_pkt_pointer_any(reg1) && reg_is_pkt_pointer_any(reg2) && !is_jmp32)
14407 		return is_pkt_ptr_branch_taken(reg1, reg2, opcode);
14408 
14409 	if (__is_pointer_value(false, reg1) || __is_pointer_value(false, reg2)) {
14410 		u64 val;
14411 
14412 		/* arrange that reg2 is a scalar, and reg1 is a pointer */
14413 		if (!is_reg_const(reg2, is_jmp32)) {
14414 			opcode = flip_opcode(opcode);
14415 			swap(reg1, reg2);
14416 		}
14417 		/* and ensure that reg2 is a constant */
14418 		if (!is_reg_const(reg2, is_jmp32))
14419 			return -1;
14420 
14421 		if (!reg_not_null(reg1))
14422 			return -1;
14423 
14424 		/* If pointer is valid tests against zero will fail so we can
14425 		 * use this to direct branch taken.
14426 		 */
14427 		val = reg_const_value(reg2, is_jmp32);
14428 		if (val != 0)
14429 			return -1;
14430 
14431 		switch (opcode) {
14432 		case BPF_JEQ:
14433 			return 0;
14434 		case BPF_JNE:
14435 			return 1;
14436 		default:
14437 			return -1;
14438 		}
14439 	}
14440 
14441 	/* now deal with two scalars, but not necessarily constants */
14442 	return is_scalar_branch_taken(reg1, reg2, opcode, is_jmp32);
14443 }
14444 
14445 /* Opcode that corresponds to a *false* branch condition.
14446  * E.g., if r1 < r2, then reverse (false) condition is r1 >= r2
14447  */
14448 static u8 rev_opcode(u8 opcode)
14449 {
14450 	switch (opcode) {
14451 	case BPF_JEQ:		return BPF_JNE;
14452 	case BPF_JNE:		return BPF_JEQ;
14453 	/* JSET doesn't have it's reverse opcode in BPF, so add
14454 	 * BPF_X flag to denote the reverse of that operation
14455 	 */
14456 	case BPF_JSET:		return BPF_JSET | BPF_X;
14457 	case BPF_JSET | BPF_X:	return BPF_JSET;
14458 	case BPF_JGE:		return BPF_JLT;
14459 	case BPF_JGT:		return BPF_JLE;
14460 	case BPF_JLE:		return BPF_JGT;
14461 	case BPF_JLT:		return BPF_JGE;
14462 	case BPF_JSGE:		return BPF_JSLT;
14463 	case BPF_JSGT:		return BPF_JSLE;
14464 	case BPF_JSLE:		return BPF_JSGT;
14465 	case BPF_JSLT:		return BPF_JSGE;
14466 	default:		return 0;
14467 	}
14468 }
14469 
14470 /* Refine range knowledge for <reg1> <op> <reg>2 conditional operation. */
14471 static void regs_refine_cond_op(struct bpf_reg_state *reg1, struct bpf_reg_state *reg2,
14472 				u8 opcode, bool is_jmp32)
14473 {
14474 	struct tnum t;
14475 	u64 val;
14476 
14477 again:
14478 	switch (opcode) {
14479 	case BPF_JEQ:
14480 		if (is_jmp32) {
14481 			reg1->u32_min_value = max(reg1->u32_min_value, reg2->u32_min_value);
14482 			reg1->u32_max_value = min(reg1->u32_max_value, reg2->u32_max_value);
14483 			reg1->s32_min_value = max(reg1->s32_min_value, reg2->s32_min_value);
14484 			reg1->s32_max_value = min(reg1->s32_max_value, reg2->s32_max_value);
14485 			reg2->u32_min_value = reg1->u32_min_value;
14486 			reg2->u32_max_value = reg1->u32_max_value;
14487 			reg2->s32_min_value = reg1->s32_min_value;
14488 			reg2->s32_max_value = reg1->s32_max_value;
14489 
14490 			t = tnum_intersect(tnum_subreg(reg1->var_off), tnum_subreg(reg2->var_off));
14491 			reg1->var_off = tnum_with_subreg(reg1->var_off, t);
14492 			reg2->var_off = tnum_with_subreg(reg2->var_off, t);
14493 		} else {
14494 			reg1->umin_value = max(reg1->umin_value, reg2->umin_value);
14495 			reg1->umax_value = min(reg1->umax_value, reg2->umax_value);
14496 			reg1->smin_value = max(reg1->smin_value, reg2->smin_value);
14497 			reg1->smax_value = min(reg1->smax_value, reg2->smax_value);
14498 			reg2->umin_value = reg1->umin_value;
14499 			reg2->umax_value = reg1->umax_value;
14500 			reg2->smin_value = reg1->smin_value;
14501 			reg2->smax_value = reg1->smax_value;
14502 
14503 			reg1->var_off = tnum_intersect(reg1->var_off, reg2->var_off);
14504 			reg2->var_off = reg1->var_off;
14505 		}
14506 		break;
14507 	case BPF_JNE:
14508 		if (!is_reg_const(reg2, is_jmp32))
14509 			swap(reg1, reg2);
14510 		if (!is_reg_const(reg2, is_jmp32))
14511 			break;
14512 
14513 		/* try to recompute the bound of reg1 if reg2 is a const and
14514 		 * is exactly the edge of reg1.
14515 		 */
14516 		val = reg_const_value(reg2, is_jmp32);
14517 		if (is_jmp32) {
14518 			/* u32_min_value is not equal to 0xffffffff at this point,
14519 			 * because otherwise u32_max_value is 0xffffffff as well,
14520 			 * in such a case both reg1 and reg2 would be constants,
14521 			 * jump would be predicted and reg_set_min_max() won't
14522 			 * be called.
14523 			 *
14524 			 * Same reasoning works for all {u,s}{min,max}{32,64} cases
14525 			 * below.
14526 			 */
14527 			if (reg1->u32_min_value == (u32)val)
14528 				reg1->u32_min_value++;
14529 			if (reg1->u32_max_value == (u32)val)
14530 				reg1->u32_max_value--;
14531 			if (reg1->s32_min_value == (s32)val)
14532 				reg1->s32_min_value++;
14533 			if (reg1->s32_max_value == (s32)val)
14534 				reg1->s32_max_value--;
14535 		} else {
14536 			if (reg1->umin_value == (u64)val)
14537 				reg1->umin_value++;
14538 			if (reg1->umax_value == (u64)val)
14539 				reg1->umax_value--;
14540 			if (reg1->smin_value == (s64)val)
14541 				reg1->smin_value++;
14542 			if (reg1->smax_value == (s64)val)
14543 				reg1->smax_value--;
14544 		}
14545 		break;
14546 	case BPF_JSET:
14547 		if (!is_reg_const(reg2, is_jmp32))
14548 			swap(reg1, reg2);
14549 		if (!is_reg_const(reg2, is_jmp32))
14550 			break;
14551 		val = reg_const_value(reg2, is_jmp32);
14552 		/* BPF_JSET (i.e., TRUE branch, *not* BPF_JSET | BPF_X)
14553 		 * requires single bit to learn something useful. E.g., if we
14554 		 * know that `r1 & 0x3` is true, then which bits (0, 1, or both)
14555 		 * are actually set? We can learn something definite only if
14556 		 * it's a single-bit value to begin with.
14557 		 *
14558 		 * BPF_JSET | BPF_X (i.e., negation of BPF_JSET) doesn't have
14559 		 * this restriction. I.e., !(r1 & 0x3) means neither bit 0 nor
14560 		 * bit 1 is set, which we can readily use in adjustments.
14561 		 */
14562 		if (!is_power_of_2(val))
14563 			break;
14564 		if (is_jmp32) {
14565 			t = tnum_or(tnum_subreg(reg1->var_off), tnum_const(val));
14566 			reg1->var_off = tnum_with_subreg(reg1->var_off, t);
14567 		} else {
14568 			reg1->var_off = tnum_or(reg1->var_off, tnum_const(val));
14569 		}
14570 		break;
14571 	case BPF_JSET | BPF_X: /* reverse of BPF_JSET, see rev_opcode() */
14572 		if (!is_reg_const(reg2, is_jmp32))
14573 			swap(reg1, reg2);
14574 		if (!is_reg_const(reg2, is_jmp32))
14575 			break;
14576 		val = reg_const_value(reg2, is_jmp32);
14577 		if (is_jmp32) {
14578 			t = tnum_and(tnum_subreg(reg1->var_off), tnum_const(~val));
14579 			reg1->var_off = tnum_with_subreg(reg1->var_off, t);
14580 		} else {
14581 			reg1->var_off = tnum_and(reg1->var_off, tnum_const(~val));
14582 		}
14583 		break;
14584 	case BPF_JLE:
14585 		if (is_jmp32) {
14586 			reg1->u32_max_value = min(reg1->u32_max_value, reg2->u32_max_value);
14587 			reg2->u32_min_value = max(reg1->u32_min_value, reg2->u32_min_value);
14588 		} else {
14589 			reg1->umax_value = min(reg1->umax_value, reg2->umax_value);
14590 			reg2->umin_value = max(reg1->umin_value, reg2->umin_value);
14591 		}
14592 		break;
14593 	case BPF_JLT:
14594 		if (is_jmp32) {
14595 			reg1->u32_max_value = min(reg1->u32_max_value, reg2->u32_max_value - 1);
14596 			reg2->u32_min_value = max(reg1->u32_min_value + 1, reg2->u32_min_value);
14597 		} else {
14598 			reg1->umax_value = min(reg1->umax_value, reg2->umax_value - 1);
14599 			reg2->umin_value = max(reg1->umin_value + 1, reg2->umin_value);
14600 		}
14601 		break;
14602 	case BPF_JSLE:
14603 		if (is_jmp32) {
14604 			reg1->s32_max_value = min(reg1->s32_max_value, reg2->s32_max_value);
14605 			reg2->s32_min_value = max(reg1->s32_min_value, reg2->s32_min_value);
14606 		} else {
14607 			reg1->smax_value = min(reg1->smax_value, reg2->smax_value);
14608 			reg2->smin_value = max(reg1->smin_value, reg2->smin_value);
14609 		}
14610 		break;
14611 	case BPF_JSLT:
14612 		if (is_jmp32) {
14613 			reg1->s32_max_value = min(reg1->s32_max_value, reg2->s32_max_value - 1);
14614 			reg2->s32_min_value = max(reg1->s32_min_value + 1, reg2->s32_min_value);
14615 		} else {
14616 			reg1->smax_value = min(reg1->smax_value, reg2->smax_value - 1);
14617 			reg2->smin_value = max(reg1->smin_value + 1, reg2->smin_value);
14618 		}
14619 		break;
14620 	case BPF_JGE:
14621 	case BPF_JGT:
14622 	case BPF_JSGE:
14623 	case BPF_JSGT:
14624 		/* just reuse LE/LT logic above */
14625 		opcode = flip_opcode(opcode);
14626 		swap(reg1, reg2);
14627 		goto again;
14628 	default:
14629 		return;
14630 	}
14631 }
14632 
14633 /* Adjusts the register min/max values in the case that the dst_reg and
14634  * src_reg are both SCALAR_VALUE registers (or we are simply doing a BPF_K
14635  * check, in which case we havea fake SCALAR_VALUE representing insn->imm).
14636  * Technically we can do similar adjustments for pointers to the same object,
14637  * but we don't support that right now.
14638  */
14639 static int reg_set_min_max(struct bpf_verifier_env *env,
14640 			   struct bpf_reg_state *true_reg1,
14641 			   struct bpf_reg_state *true_reg2,
14642 			   struct bpf_reg_state *false_reg1,
14643 			   struct bpf_reg_state *false_reg2,
14644 			   u8 opcode, bool is_jmp32)
14645 {
14646 	int err;
14647 
14648 	/* If either register is a pointer, we can't learn anything about its
14649 	 * variable offset from the compare (unless they were a pointer into
14650 	 * the same object, but we don't bother with that).
14651 	 */
14652 	if (false_reg1->type != SCALAR_VALUE || false_reg2->type != SCALAR_VALUE)
14653 		return 0;
14654 
14655 	/* fallthrough (FALSE) branch */
14656 	regs_refine_cond_op(false_reg1, false_reg2, rev_opcode(opcode), is_jmp32);
14657 	reg_bounds_sync(false_reg1);
14658 	reg_bounds_sync(false_reg2);
14659 
14660 	/* jump (TRUE) branch */
14661 	regs_refine_cond_op(true_reg1, true_reg2, opcode, is_jmp32);
14662 	reg_bounds_sync(true_reg1);
14663 	reg_bounds_sync(true_reg2);
14664 
14665 	err = reg_bounds_sanity_check(env, true_reg1, "true_reg1");
14666 	err = err ?: reg_bounds_sanity_check(env, true_reg2, "true_reg2");
14667 	err = err ?: reg_bounds_sanity_check(env, false_reg1, "false_reg1");
14668 	err = err ?: reg_bounds_sanity_check(env, false_reg2, "false_reg2");
14669 	return err;
14670 }
14671 
14672 static void mark_ptr_or_null_reg(struct bpf_func_state *state,
14673 				 struct bpf_reg_state *reg, u32 id,
14674 				 bool is_null)
14675 {
14676 	if (type_may_be_null(reg->type) && reg->id == id &&
14677 	    (is_rcu_reg(reg) || !WARN_ON_ONCE(!reg->id))) {
14678 		/* Old offset (both fixed and variable parts) should have been
14679 		 * known-zero, because we don't allow pointer arithmetic on
14680 		 * pointers that might be NULL. If we see this happening, don't
14681 		 * convert the register.
14682 		 *
14683 		 * But in some cases, some helpers that return local kptrs
14684 		 * advance offset for the returned pointer. In those cases, it
14685 		 * is fine to expect to see reg->off.
14686 		 */
14687 		if (WARN_ON_ONCE(reg->smin_value || reg->smax_value || !tnum_equals_const(reg->var_off, 0)))
14688 			return;
14689 		if (!(type_is_ptr_alloc_obj(reg->type) || type_is_non_owning_ref(reg->type)) &&
14690 		    WARN_ON_ONCE(reg->off))
14691 			return;
14692 
14693 		if (is_null) {
14694 			reg->type = SCALAR_VALUE;
14695 			/* We don't need id and ref_obj_id from this point
14696 			 * onwards anymore, thus we should better reset it,
14697 			 * so that state pruning has chances to take effect.
14698 			 */
14699 			reg->id = 0;
14700 			reg->ref_obj_id = 0;
14701 
14702 			return;
14703 		}
14704 
14705 		mark_ptr_not_null_reg(reg);
14706 
14707 		if (!reg_may_point_to_spin_lock(reg)) {
14708 			/* For not-NULL ptr, reg->ref_obj_id will be reset
14709 			 * in release_reference().
14710 			 *
14711 			 * reg->id is still used by spin_lock ptr. Other
14712 			 * than spin_lock ptr type, reg->id can be reset.
14713 			 */
14714 			reg->id = 0;
14715 		}
14716 	}
14717 }
14718 
14719 /* The logic is similar to find_good_pkt_pointers(), both could eventually
14720  * be folded together at some point.
14721  */
14722 static void mark_ptr_or_null_regs(struct bpf_verifier_state *vstate, u32 regno,
14723 				  bool is_null)
14724 {
14725 	struct bpf_func_state *state = vstate->frame[vstate->curframe];
14726 	struct bpf_reg_state *regs = state->regs, *reg;
14727 	u32 ref_obj_id = regs[regno].ref_obj_id;
14728 	u32 id = regs[regno].id;
14729 
14730 	if (ref_obj_id && ref_obj_id == id && is_null)
14731 		/* regs[regno] is in the " == NULL" branch.
14732 		 * No one could have freed the reference state before
14733 		 * doing the NULL check.
14734 		 */
14735 		WARN_ON_ONCE(release_reference_state(state, id));
14736 
14737 	bpf_for_each_reg_in_vstate(vstate, state, reg, ({
14738 		mark_ptr_or_null_reg(state, reg, id, is_null);
14739 	}));
14740 }
14741 
14742 static bool try_match_pkt_pointers(const struct bpf_insn *insn,
14743 				   struct bpf_reg_state *dst_reg,
14744 				   struct bpf_reg_state *src_reg,
14745 				   struct bpf_verifier_state *this_branch,
14746 				   struct bpf_verifier_state *other_branch)
14747 {
14748 	if (BPF_SRC(insn->code) != BPF_X)
14749 		return false;
14750 
14751 	/* Pointers are always 64-bit. */
14752 	if (BPF_CLASS(insn->code) == BPF_JMP32)
14753 		return false;
14754 
14755 	switch (BPF_OP(insn->code)) {
14756 	case BPF_JGT:
14757 		if ((dst_reg->type == PTR_TO_PACKET &&
14758 		     src_reg->type == PTR_TO_PACKET_END) ||
14759 		    (dst_reg->type == PTR_TO_PACKET_META &&
14760 		     reg_is_init_pkt_pointer(src_reg, PTR_TO_PACKET))) {
14761 			/* pkt_data' > pkt_end, pkt_meta' > pkt_data */
14762 			find_good_pkt_pointers(this_branch, dst_reg,
14763 					       dst_reg->type, false);
14764 			mark_pkt_end(other_branch, insn->dst_reg, true);
14765 		} else if ((dst_reg->type == PTR_TO_PACKET_END &&
14766 			    src_reg->type == PTR_TO_PACKET) ||
14767 			   (reg_is_init_pkt_pointer(dst_reg, PTR_TO_PACKET) &&
14768 			    src_reg->type == PTR_TO_PACKET_META)) {
14769 			/* pkt_end > pkt_data', pkt_data > pkt_meta' */
14770 			find_good_pkt_pointers(other_branch, src_reg,
14771 					       src_reg->type, true);
14772 			mark_pkt_end(this_branch, insn->src_reg, false);
14773 		} else {
14774 			return false;
14775 		}
14776 		break;
14777 	case BPF_JLT:
14778 		if ((dst_reg->type == PTR_TO_PACKET &&
14779 		     src_reg->type == PTR_TO_PACKET_END) ||
14780 		    (dst_reg->type == PTR_TO_PACKET_META &&
14781 		     reg_is_init_pkt_pointer(src_reg, PTR_TO_PACKET))) {
14782 			/* pkt_data' < pkt_end, pkt_meta' < pkt_data */
14783 			find_good_pkt_pointers(other_branch, dst_reg,
14784 					       dst_reg->type, true);
14785 			mark_pkt_end(this_branch, insn->dst_reg, false);
14786 		} else if ((dst_reg->type == PTR_TO_PACKET_END &&
14787 			    src_reg->type == PTR_TO_PACKET) ||
14788 			   (reg_is_init_pkt_pointer(dst_reg, PTR_TO_PACKET) &&
14789 			    src_reg->type == PTR_TO_PACKET_META)) {
14790 			/* pkt_end < pkt_data', pkt_data > pkt_meta' */
14791 			find_good_pkt_pointers(this_branch, src_reg,
14792 					       src_reg->type, false);
14793 			mark_pkt_end(other_branch, insn->src_reg, true);
14794 		} else {
14795 			return false;
14796 		}
14797 		break;
14798 	case BPF_JGE:
14799 		if ((dst_reg->type == PTR_TO_PACKET &&
14800 		     src_reg->type == PTR_TO_PACKET_END) ||
14801 		    (dst_reg->type == PTR_TO_PACKET_META &&
14802 		     reg_is_init_pkt_pointer(src_reg, PTR_TO_PACKET))) {
14803 			/* pkt_data' >= pkt_end, pkt_meta' >= pkt_data */
14804 			find_good_pkt_pointers(this_branch, dst_reg,
14805 					       dst_reg->type, true);
14806 			mark_pkt_end(other_branch, insn->dst_reg, false);
14807 		} else if ((dst_reg->type == PTR_TO_PACKET_END &&
14808 			    src_reg->type == PTR_TO_PACKET) ||
14809 			   (reg_is_init_pkt_pointer(dst_reg, PTR_TO_PACKET) &&
14810 			    src_reg->type == PTR_TO_PACKET_META)) {
14811 			/* pkt_end >= pkt_data', pkt_data >= pkt_meta' */
14812 			find_good_pkt_pointers(other_branch, src_reg,
14813 					       src_reg->type, false);
14814 			mark_pkt_end(this_branch, insn->src_reg, true);
14815 		} else {
14816 			return false;
14817 		}
14818 		break;
14819 	case BPF_JLE:
14820 		if ((dst_reg->type == PTR_TO_PACKET &&
14821 		     src_reg->type == PTR_TO_PACKET_END) ||
14822 		    (dst_reg->type == PTR_TO_PACKET_META &&
14823 		     reg_is_init_pkt_pointer(src_reg, PTR_TO_PACKET))) {
14824 			/* pkt_data' <= pkt_end, pkt_meta' <= pkt_data */
14825 			find_good_pkt_pointers(other_branch, dst_reg,
14826 					       dst_reg->type, false);
14827 			mark_pkt_end(this_branch, insn->dst_reg, true);
14828 		} else if ((dst_reg->type == PTR_TO_PACKET_END &&
14829 			    src_reg->type == PTR_TO_PACKET) ||
14830 			   (reg_is_init_pkt_pointer(dst_reg, PTR_TO_PACKET) &&
14831 			    src_reg->type == PTR_TO_PACKET_META)) {
14832 			/* pkt_end <= pkt_data', pkt_data <= pkt_meta' */
14833 			find_good_pkt_pointers(this_branch, src_reg,
14834 					       src_reg->type, true);
14835 			mark_pkt_end(other_branch, insn->src_reg, false);
14836 		} else {
14837 			return false;
14838 		}
14839 		break;
14840 	default:
14841 		return false;
14842 	}
14843 
14844 	return true;
14845 }
14846 
14847 static void find_equal_scalars(struct bpf_verifier_state *vstate,
14848 			       struct bpf_reg_state *known_reg)
14849 {
14850 	struct bpf_func_state *state;
14851 	struct bpf_reg_state *reg;
14852 
14853 	bpf_for_each_reg_in_vstate(vstate, state, reg, ({
14854 		if (reg->type == SCALAR_VALUE && reg->id == known_reg->id)
14855 			copy_register_state(reg, known_reg);
14856 	}));
14857 }
14858 
14859 static int check_cond_jmp_op(struct bpf_verifier_env *env,
14860 			     struct bpf_insn *insn, int *insn_idx)
14861 {
14862 	struct bpf_verifier_state *this_branch = env->cur_state;
14863 	struct bpf_verifier_state *other_branch;
14864 	struct bpf_reg_state *regs = this_branch->frame[this_branch->curframe]->regs;
14865 	struct bpf_reg_state *dst_reg, *other_branch_regs, *src_reg = NULL;
14866 	struct bpf_reg_state *eq_branch_regs;
14867 	struct bpf_reg_state fake_reg = {};
14868 	u8 opcode = BPF_OP(insn->code);
14869 	bool is_jmp32;
14870 	int pred = -1;
14871 	int err;
14872 
14873 	/* Only conditional jumps are expected to reach here. */
14874 	if (opcode == BPF_JA || opcode > BPF_JSLE) {
14875 		verbose(env, "invalid BPF_JMP/JMP32 opcode %x\n", opcode);
14876 		return -EINVAL;
14877 	}
14878 
14879 	/* check src2 operand */
14880 	err = check_reg_arg(env, insn->dst_reg, SRC_OP);
14881 	if (err)
14882 		return err;
14883 
14884 	dst_reg = &regs[insn->dst_reg];
14885 	if (BPF_SRC(insn->code) == BPF_X) {
14886 		if (insn->imm != 0) {
14887 			verbose(env, "BPF_JMP/JMP32 uses reserved fields\n");
14888 			return -EINVAL;
14889 		}
14890 
14891 		/* check src1 operand */
14892 		err = check_reg_arg(env, insn->src_reg, SRC_OP);
14893 		if (err)
14894 			return err;
14895 
14896 		src_reg = &regs[insn->src_reg];
14897 		if (!(reg_is_pkt_pointer_any(dst_reg) && reg_is_pkt_pointer_any(src_reg)) &&
14898 		    is_pointer_value(env, insn->src_reg)) {
14899 			verbose(env, "R%d pointer comparison prohibited\n",
14900 				insn->src_reg);
14901 			return -EACCES;
14902 		}
14903 	} else {
14904 		if (insn->src_reg != BPF_REG_0) {
14905 			verbose(env, "BPF_JMP/JMP32 uses reserved fields\n");
14906 			return -EINVAL;
14907 		}
14908 		src_reg = &fake_reg;
14909 		src_reg->type = SCALAR_VALUE;
14910 		__mark_reg_known(src_reg, insn->imm);
14911 	}
14912 
14913 	is_jmp32 = BPF_CLASS(insn->code) == BPF_JMP32;
14914 	pred = is_branch_taken(dst_reg, src_reg, opcode, is_jmp32);
14915 	if (pred >= 0) {
14916 		/* If we get here with a dst_reg pointer type it is because
14917 		 * above is_branch_taken() special cased the 0 comparison.
14918 		 */
14919 		if (!__is_pointer_value(false, dst_reg))
14920 			err = mark_chain_precision(env, insn->dst_reg);
14921 		if (BPF_SRC(insn->code) == BPF_X && !err &&
14922 		    !__is_pointer_value(false, src_reg))
14923 			err = mark_chain_precision(env, insn->src_reg);
14924 		if (err)
14925 			return err;
14926 	}
14927 
14928 	if (pred == 1) {
14929 		/* Only follow the goto, ignore fall-through. If needed, push
14930 		 * the fall-through branch for simulation under speculative
14931 		 * execution.
14932 		 */
14933 		if (!env->bypass_spec_v1 &&
14934 		    !sanitize_speculative_path(env, insn, *insn_idx + 1,
14935 					       *insn_idx))
14936 			return -EFAULT;
14937 		if (env->log.level & BPF_LOG_LEVEL)
14938 			print_insn_state(env, this_branch->frame[this_branch->curframe]);
14939 		*insn_idx += insn->off;
14940 		return 0;
14941 	} else if (pred == 0) {
14942 		/* Only follow the fall-through branch, since that's where the
14943 		 * program will go. If needed, push the goto branch for
14944 		 * simulation under speculative execution.
14945 		 */
14946 		if (!env->bypass_spec_v1 &&
14947 		    !sanitize_speculative_path(env, insn,
14948 					       *insn_idx + insn->off + 1,
14949 					       *insn_idx))
14950 			return -EFAULT;
14951 		if (env->log.level & BPF_LOG_LEVEL)
14952 			print_insn_state(env, this_branch->frame[this_branch->curframe]);
14953 		return 0;
14954 	}
14955 
14956 	other_branch = push_stack(env, *insn_idx + insn->off + 1, *insn_idx,
14957 				  false);
14958 	if (!other_branch)
14959 		return -EFAULT;
14960 	other_branch_regs = other_branch->frame[other_branch->curframe]->regs;
14961 
14962 	if (BPF_SRC(insn->code) == BPF_X) {
14963 		err = reg_set_min_max(env,
14964 				      &other_branch_regs[insn->dst_reg],
14965 				      &other_branch_regs[insn->src_reg],
14966 				      dst_reg, src_reg, opcode, is_jmp32);
14967 	} else /* BPF_SRC(insn->code) == BPF_K */ {
14968 		err = reg_set_min_max(env,
14969 				      &other_branch_regs[insn->dst_reg],
14970 				      src_reg /* fake one */,
14971 				      dst_reg, src_reg /* same fake one */,
14972 				      opcode, is_jmp32);
14973 	}
14974 	if (err)
14975 		return err;
14976 
14977 	if (BPF_SRC(insn->code) == BPF_X &&
14978 	    src_reg->type == SCALAR_VALUE && src_reg->id &&
14979 	    !WARN_ON_ONCE(src_reg->id != other_branch_regs[insn->src_reg].id)) {
14980 		find_equal_scalars(this_branch, src_reg);
14981 		find_equal_scalars(other_branch, &other_branch_regs[insn->src_reg]);
14982 	}
14983 	if (dst_reg->type == SCALAR_VALUE && dst_reg->id &&
14984 	    !WARN_ON_ONCE(dst_reg->id != other_branch_regs[insn->dst_reg].id)) {
14985 		find_equal_scalars(this_branch, dst_reg);
14986 		find_equal_scalars(other_branch, &other_branch_regs[insn->dst_reg]);
14987 	}
14988 
14989 	/* if one pointer register is compared to another pointer
14990 	 * register check if PTR_MAYBE_NULL could be lifted.
14991 	 * E.g. register A - maybe null
14992 	 *      register B - not null
14993 	 * for JNE A, B, ... - A is not null in the false branch;
14994 	 * for JEQ A, B, ... - A is not null in the true branch.
14995 	 *
14996 	 * Since PTR_TO_BTF_ID points to a kernel struct that does
14997 	 * not need to be null checked by the BPF program, i.e.,
14998 	 * could be null even without PTR_MAYBE_NULL marking, so
14999 	 * only propagate nullness when neither reg is that type.
15000 	 */
15001 	if (!is_jmp32 && BPF_SRC(insn->code) == BPF_X &&
15002 	    __is_pointer_value(false, src_reg) && __is_pointer_value(false, dst_reg) &&
15003 	    type_may_be_null(src_reg->type) != type_may_be_null(dst_reg->type) &&
15004 	    base_type(src_reg->type) != PTR_TO_BTF_ID &&
15005 	    base_type(dst_reg->type) != PTR_TO_BTF_ID) {
15006 		eq_branch_regs = NULL;
15007 		switch (opcode) {
15008 		case BPF_JEQ:
15009 			eq_branch_regs = other_branch_regs;
15010 			break;
15011 		case BPF_JNE:
15012 			eq_branch_regs = regs;
15013 			break;
15014 		default:
15015 			/* do nothing */
15016 			break;
15017 		}
15018 		if (eq_branch_regs) {
15019 			if (type_may_be_null(src_reg->type))
15020 				mark_ptr_not_null_reg(&eq_branch_regs[insn->src_reg]);
15021 			else
15022 				mark_ptr_not_null_reg(&eq_branch_regs[insn->dst_reg]);
15023 		}
15024 	}
15025 
15026 	/* detect if R == 0 where R is returned from bpf_map_lookup_elem().
15027 	 * NOTE: these optimizations below are related with pointer comparison
15028 	 *       which will never be JMP32.
15029 	 */
15030 	if (!is_jmp32 && BPF_SRC(insn->code) == BPF_K &&
15031 	    insn->imm == 0 && (opcode == BPF_JEQ || opcode == BPF_JNE) &&
15032 	    type_may_be_null(dst_reg->type)) {
15033 		/* Mark all identical registers in each branch as either
15034 		 * safe or unknown depending R == 0 or R != 0 conditional.
15035 		 */
15036 		mark_ptr_or_null_regs(this_branch, insn->dst_reg,
15037 				      opcode == BPF_JNE);
15038 		mark_ptr_or_null_regs(other_branch, insn->dst_reg,
15039 				      opcode == BPF_JEQ);
15040 	} else if (!try_match_pkt_pointers(insn, dst_reg, &regs[insn->src_reg],
15041 					   this_branch, other_branch) &&
15042 		   is_pointer_value(env, insn->dst_reg)) {
15043 		verbose(env, "R%d pointer comparison prohibited\n",
15044 			insn->dst_reg);
15045 		return -EACCES;
15046 	}
15047 	if (env->log.level & BPF_LOG_LEVEL)
15048 		print_insn_state(env, this_branch->frame[this_branch->curframe]);
15049 	return 0;
15050 }
15051 
15052 /* verify BPF_LD_IMM64 instruction */
15053 static int check_ld_imm(struct bpf_verifier_env *env, struct bpf_insn *insn)
15054 {
15055 	struct bpf_insn_aux_data *aux = cur_aux(env);
15056 	struct bpf_reg_state *regs = cur_regs(env);
15057 	struct bpf_reg_state *dst_reg;
15058 	struct bpf_map *map;
15059 	int err;
15060 
15061 	if (BPF_SIZE(insn->code) != BPF_DW) {
15062 		verbose(env, "invalid BPF_LD_IMM insn\n");
15063 		return -EINVAL;
15064 	}
15065 	if (insn->off != 0) {
15066 		verbose(env, "BPF_LD_IMM64 uses reserved fields\n");
15067 		return -EINVAL;
15068 	}
15069 
15070 	err = check_reg_arg(env, insn->dst_reg, DST_OP);
15071 	if (err)
15072 		return err;
15073 
15074 	dst_reg = &regs[insn->dst_reg];
15075 	if (insn->src_reg == 0) {
15076 		u64 imm = ((u64)(insn + 1)->imm << 32) | (u32)insn->imm;
15077 
15078 		dst_reg->type = SCALAR_VALUE;
15079 		__mark_reg_known(&regs[insn->dst_reg], imm);
15080 		return 0;
15081 	}
15082 
15083 	/* All special src_reg cases are listed below. From this point onwards
15084 	 * we either succeed and assign a corresponding dst_reg->type after
15085 	 * zeroing the offset, or fail and reject the program.
15086 	 */
15087 	mark_reg_known_zero(env, regs, insn->dst_reg);
15088 
15089 	if (insn->src_reg == BPF_PSEUDO_BTF_ID) {
15090 		dst_reg->type = aux->btf_var.reg_type;
15091 		switch (base_type(dst_reg->type)) {
15092 		case PTR_TO_MEM:
15093 			dst_reg->mem_size = aux->btf_var.mem_size;
15094 			break;
15095 		case PTR_TO_BTF_ID:
15096 			dst_reg->btf = aux->btf_var.btf;
15097 			dst_reg->btf_id = aux->btf_var.btf_id;
15098 			break;
15099 		default:
15100 			verbose(env, "bpf verifier is misconfigured\n");
15101 			return -EFAULT;
15102 		}
15103 		return 0;
15104 	}
15105 
15106 	if (insn->src_reg == BPF_PSEUDO_FUNC) {
15107 		struct bpf_prog_aux *aux = env->prog->aux;
15108 		u32 subprogno = find_subprog(env,
15109 					     env->insn_idx + insn->imm + 1);
15110 
15111 		if (!aux->func_info) {
15112 			verbose(env, "missing btf func_info\n");
15113 			return -EINVAL;
15114 		}
15115 		if (aux->func_info_aux[subprogno].linkage != BTF_FUNC_STATIC) {
15116 			verbose(env, "callback function not static\n");
15117 			return -EINVAL;
15118 		}
15119 
15120 		dst_reg->type = PTR_TO_FUNC;
15121 		dst_reg->subprogno = subprogno;
15122 		return 0;
15123 	}
15124 
15125 	map = env->used_maps[aux->map_index];
15126 	dst_reg->map_ptr = map;
15127 
15128 	if (insn->src_reg == BPF_PSEUDO_MAP_VALUE ||
15129 	    insn->src_reg == BPF_PSEUDO_MAP_IDX_VALUE) {
15130 		dst_reg->type = PTR_TO_MAP_VALUE;
15131 		dst_reg->off = aux->map_off;
15132 		WARN_ON_ONCE(map->max_entries != 1);
15133 		/* We want reg->id to be same (0) as map_value is not distinct */
15134 	} else if (insn->src_reg == BPF_PSEUDO_MAP_FD ||
15135 		   insn->src_reg == BPF_PSEUDO_MAP_IDX) {
15136 		dst_reg->type = CONST_PTR_TO_MAP;
15137 	} else {
15138 		verbose(env, "bpf verifier is misconfigured\n");
15139 		return -EINVAL;
15140 	}
15141 
15142 	return 0;
15143 }
15144 
15145 static bool may_access_skb(enum bpf_prog_type type)
15146 {
15147 	switch (type) {
15148 	case BPF_PROG_TYPE_SOCKET_FILTER:
15149 	case BPF_PROG_TYPE_SCHED_CLS:
15150 	case BPF_PROG_TYPE_SCHED_ACT:
15151 		return true;
15152 	default:
15153 		return false;
15154 	}
15155 }
15156 
15157 /* verify safety of LD_ABS|LD_IND instructions:
15158  * - they can only appear in the programs where ctx == skb
15159  * - since they are wrappers of function calls, they scratch R1-R5 registers,
15160  *   preserve R6-R9, and store return value into R0
15161  *
15162  * Implicit input:
15163  *   ctx == skb == R6 == CTX
15164  *
15165  * Explicit input:
15166  *   SRC == any register
15167  *   IMM == 32-bit immediate
15168  *
15169  * Output:
15170  *   R0 - 8/16/32-bit skb data converted to cpu endianness
15171  */
15172 static int check_ld_abs(struct bpf_verifier_env *env, struct bpf_insn *insn)
15173 {
15174 	struct bpf_reg_state *regs = cur_regs(env);
15175 	static const int ctx_reg = BPF_REG_6;
15176 	u8 mode = BPF_MODE(insn->code);
15177 	int i, err;
15178 
15179 	if (!may_access_skb(resolve_prog_type(env->prog))) {
15180 		verbose(env, "BPF_LD_[ABS|IND] instructions not allowed for this program type\n");
15181 		return -EINVAL;
15182 	}
15183 
15184 	if (!env->ops->gen_ld_abs) {
15185 		verbose(env, "bpf verifier is misconfigured\n");
15186 		return -EINVAL;
15187 	}
15188 
15189 	if (insn->dst_reg != BPF_REG_0 || insn->off != 0 ||
15190 	    BPF_SIZE(insn->code) == BPF_DW ||
15191 	    (mode == BPF_ABS && insn->src_reg != BPF_REG_0)) {
15192 		verbose(env, "BPF_LD_[ABS|IND] uses reserved fields\n");
15193 		return -EINVAL;
15194 	}
15195 
15196 	/* check whether implicit source operand (register R6) is readable */
15197 	err = check_reg_arg(env, ctx_reg, SRC_OP);
15198 	if (err)
15199 		return err;
15200 
15201 	/* Disallow usage of BPF_LD_[ABS|IND] with reference tracking, as
15202 	 * gen_ld_abs() may terminate the program at runtime, leading to
15203 	 * reference leak.
15204 	 */
15205 	err = check_reference_leak(env, false);
15206 	if (err) {
15207 		verbose(env, "BPF_LD_[ABS|IND] cannot be mixed with socket references\n");
15208 		return err;
15209 	}
15210 
15211 	if (env->cur_state->active_lock.ptr) {
15212 		verbose(env, "BPF_LD_[ABS|IND] cannot be used inside bpf_spin_lock-ed region\n");
15213 		return -EINVAL;
15214 	}
15215 
15216 	if (env->cur_state->active_rcu_lock) {
15217 		verbose(env, "BPF_LD_[ABS|IND] cannot be used inside bpf_rcu_read_lock-ed region\n");
15218 		return -EINVAL;
15219 	}
15220 
15221 	if (regs[ctx_reg].type != PTR_TO_CTX) {
15222 		verbose(env,
15223 			"at the time of BPF_LD_ABS|IND R6 != pointer to skb\n");
15224 		return -EINVAL;
15225 	}
15226 
15227 	if (mode == BPF_IND) {
15228 		/* check explicit source operand */
15229 		err = check_reg_arg(env, insn->src_reg, SRC_OP);
15230 		if (err)
15231 			return err;
15232 	}
15233 
15234 	err = check_ptr_off_reg(env, &regs[ctx_reg], ctx_reg);
15235 	if (err < 0)
15236 		return err;
15237 
15238 	/* reset caller saved regs to unreadable */
15239 	for (i = 0; i < CALLER_SAVED_REGS; i++) {
15240 		mark_reg_not_init(env, regs, caller_saved[i]);
15241 		check_reg_arg(env, caller_saved[i], DST_OP_NO_MARK);
15242 	}
15243 
15244 	/* mark destination R0 register as readable, since it contains
15245 	 * the value fetched from the packet.
15246 	 * Already marked as written above.
15247 	 */
15248 	mark_reg_unknown(env, regs, BPF_REG_0);
15249 	/* ld_abs load up to 32-bit skb data. */
15250 	regs[BPF_REG_0].subreg_def = env->insn_idx + 1;
15251 	return 0;
15252 }
15253 
15254 static int check_return_code(struct bpf_verifier_env *env, int regno, const char *reg_name)
15255 {
15256 	const char *exit_ctx = "At program exit";
15257 	struct tnum enforce_attach_type_range = tnum_unknown;
15258 	const struct bpf_prog *prog = env->prog;
15259 	struct bpf_reg_state *reg;
15260 	struct bpf_retval_range range = retval_range(0, 1);
15261 	enum bpf_prog_type prog_type = resolve_prog_type(env->prog);
15262 	int err;
15263 	struct bpf_func_state *frame = env->cur_state->frame[0];
15264 	const bool is_subprog = frame->subprogno;
15265 
15266 	/* LSM and struct_ops func-ptr's return type could be "void" */
15267 	if (!is_subprog || frame->in_exception_callback_fn) {
15268 		switch (prog_type) {
15269 		case BPF_PROG_TYPE_LSM:
15270 			if (prog->expected_attach_type == BPF_LSM_CGROUP)
15271 				/* See below, can be 0 or 0-1 depending on hook. */
15272 				break;
15273 			fallthrough;
15274 		case BPF_PROG_TYPE_STRUCT_OPS:
15275 			if (!prog->aux->attach_func_proto->type)
15276 				return 0;
15277 			break;
15278 		default:
15279 			break;
15280 		}
15281 	}
15282 
15283 	/* eBPF calling convention is such that R0 is used
15284 	 * to return the value from eBPF program.
15285 	 * Make sure that it's readable at this time
15286 	 * of bpf_exit, which means that program wrote
15287 	 * something into it earlier
15288 	 */
15289 	err = check_reg_arg(env, regno, SRC_OP);
15290 	if (err)
15291 		return err;
15292 
15293 	if (is_pointer_value(env, regno)) {
15294 		verbose(env, "R%d leaks addr as return value\n", regno);
15295 		return -EACCES;
15296 	}
15297 
15298 	reg = cur_regs(env) + regno;
15299 
15300 	if (frame->in_async_callback_fn) {
15301 		/* enforce return zero from async callbacks like timer */
15302 		exit_ctx = "At async callback return";
15303 		range = retval_range(0, 0);
15304 		goto enforce_retval;
15305 	}
15306 
15307 	if (is_subprog && !frame->in_exception_callback_fn) {
15308 		if (reg->type != SCALAR_VALUE) {
15309 			verbose(env, "At subprogram exit the register R%d is not a scalar value (%s)\n",
15310 				regno, reg_type_str(env, reg->type));
15311 			return -EINVAL;
15312 		}
15313 		return 0;
15314 	}
15315 
15316 	switch (prog_type) {
15317 	case BPF_PROG_TYPE_CGROUP_SOCK_ADDR:
15318 		if (env->prog->expected_attach_type == BPF_CGROUP_UDP4_RECVMSG ||
15319 		    env->prog->expected_attach_type == BPF_CGROUP_UDP6_RECVMSG ||
15320 		    env->prog->expected_attach_type == BPF_CGROUP_UNIX_RECVMSG ||
15321 		    env->prog->expected_attach_type == BPF_CGROUP_INET4_GETPEERNAME ||
15322 		    env->prog->expected_attach_type == BPF_CGROUP_INET6_GETPEERNAME ||
15323 		    env->prog->expected_attach_type == BPF_CGROUP_UNIX_GETPEERNAME ||
15324 		    env->prog->expected_attach_type == BPF_CGROUP_INET4_GETSOCKNAME ||
15325 		    env->prog->expected_attach_type == BPF_CGROUP_INET6_GETSOCKNAME ||
15326 		    env->prog->expected_attach_type == BPF_CGROUP_UNIX_GETSOCKNAME)
15327 			range = retval_range(1, 1);
15328 		if (env->prog->expected_attach_type == BPF_CGROUP_INET4_BIND ||
15329 		    env->prog->expected_attach_type == BPF_CGROUP_INET6_BIND)
15330 			range = retval_range(0, 3);
15331 		break;
15332 	case BPF_PROG_TYPE_CGROUP_SKB:
15333 		if (env->prog->expected_attach_type == BPF_CGROUP_INET_EGRESS) {
15334 			range = retval_range(0, 3);
15335 			enforce_attach_type_range = tnum_range(2, 3);
15336 		}
15337 		break;
15338 	case BPF_PROG_TYPE_CGROUP_SOCK:
15339 	case BPF_PROG_TYPE_SOCK_OPS:
15340 	case BPF_PROG_TYPE_CGROUP_DEVICE:
15341 	case BPF_PROG_TYPE_CGROUP_SYSCTL:
15342 	case BPF_PROG_TYPE_CGROUP_SOCKOPT:
15343 		break;
15344 	case BPF_PROG_TYPE_RAW_TRACEPOINT:
15345 		if (!env->prog->aux->attach_btf_id)
15346 			return 0;
15347 		range = retval_range(0, 0);
15348 		break;
15349 	case BPF_PROG_TYPE_TRACING:
15350 		switch (env->prog->expected_attach_type) {
15351 		case BPF_TRACE_FENTRY:
15352 		case BPF_TRACE_FEXIT:
15353 			range = retval_range(0, 0);
15354 			break;
15355 		case BPF_TRACE_RAW_TP:
15356 		case BPF_MODIFY_RETURN:
15357 			return 0;
15358 		case BPF_TRACE_ITER:
15359 			break;
15360 		default:
15361 			return -ENOTSUPP;
15362 		}
15363 		break;
15364 	case BPF_PROG_TYPE_SK_LOOKUP:
15365 		range = retval_range(SK_DROP, SK_PASS);
15366 		break;
15367 
15368 	case BPF_PROG_TYPE_LSM:
15369 		if (env->prog->expected_attach_type != BPF_LSM_CGROUP) {
15370 			/* Regular BPF_PROG_TYPE_LSM programs can return
15371 			 * any value.
15372 			 */
15373 			return 0;
15374 		}
15375 		if (!env->prog->aux->attach_func_proto->type) {
15376 			/* Make sure programs that attach to void
15377 			 * hooks don't try to modify return value.
15378 			 */
15379 			range = retval_range(1, 1);
15380 		}
15381 		break;
15382 
15383 	case BPF_PROG_TYPE_NETFILTER:
15384 		range = retval_range(NF_DROP, NF_ACCEPT);
15385 		break;
15386 	case BPF_PROG_TYPE_EXT:
15387 		/* freplace program can return anything as its return value
15388 		 * depends on the to-be-replaced kernel func or bpf program.
15389 		 */
15390 	default:
15391 		return 0;
15392 	}
15393 
15394 enforce_retval:
15395 	if (reg->type != SCALAR_VALUE) {
15396 		verbose(env, "%s the register R%d is not a known value (%s)\n",
15397 			exit_ctx, regno, reg_type_str(env, reg->type));
15398 		return -EINVAL;
15399 	}
15400 
15401 	err = mark_chain_precision(env, regno);
15402 	if (err)
15403 		return err;
15404 
15405 	if (!retval_range_within(range, reg)) {
15406 		verbose_invalid_scalar(env, reg, range, exit_ctx, reg_name);
15407 		if (!is_subprog &&
15408 		    prog->expected_attach_type == BPF_LSM_CGROUP &&
15409 		    prog_type == BPF_PROG_TYPE_LSM &&
15410 		    !prog->aux->attach_func_proto->type)
15411 			verbose(env, "Note, BPF_LSM_CGROUP that attach to void LSM hooks can't modify return value!\n");
15412 		return -EINVAL;
15413 	}
15414 
15415 	if (!tnum_is_unknown(enforce_attach_type_range) &&
15416 	    tnum_in(enforce_attach_type_range, reg->var_off))
15417 		env->prog->enforce_expected_attach_type = 1;
15418 	return 0;
15419 }
15420 
15421 /* non-recursive DFS pseudo code
15422  * 1  procedure DFS-iterative(G,v):
15423  * 2      label v as discovered
15424  * 3      let S be a stack
15425  * 4      S.push(v)
15426  * 5      while S is not empty
15427  * 6            t <- S.peek()
15428  * 7            if t is what we're looking for:
15429  * 8                return t
15430  * 9            for all edges e in G.adjacentEdges(t) do
15431  * 10               if edge e is already labelled
15432  * 11                   continue with the next edge
15433  * 12               w <- G.adjacentVertex(t,e)
15434  * 13               if vertex w is not discovered and not explored
15435  * 14                   label e as tree-edge
15436  * 15                   label w as discovered
15437  * 16                   S.push(w)
15438  * 17                   continue at 5
15439  * 18               else if vertex w is discovered
15440  * 19                   label e as back-edge
15441  * 20               else
15442  * 21                   // vertex w is explored
15443  * 22                   label e as forward- or cross-edge
15444  * 23           label t as explored
15445  * 24           S.pop()
15446  *
15447  * convention:
15448  * 0x10 - discovered
15449  * 0x11 - discovered and fall-through edge labelled
15450  * 0x12 - discovered and fall-through and branch edges labelled
15451  * 0x20 - explored
15452  */
15453 
15454 enum {
15455 	DISCOVERED = 0x10,
15456 	EXPLORED = 0x20,
15457 	FALLTHROUGH = 1,
15458 	BRANCH = 2,
15459 };
15460 
15461 static void mark_prune_point(struct bpf_verifier_env *env, int idx)
15462 {
15463 	env->insn_aux_data[idx].prune_point = true;
15464 }
15465 
15466 static bool is_prune_point(struct bpf_verifier_env *env, int insn_idx)
15467 {
15468 	return env->insn_aux_data[insn_idx].prune_point;
15469 }
15470 
15471 static void mark_force_checkpoint(struct bpf_verifier_env *env, int idx)
15472 {
15473 	env->insn_aux_data[idx].force_checkpoint = true;
15474 }
15475 
15476 static bool is_force_checkpoint(struct bpf_verifier_env *env, int insn_idx)
15477 {
15478 	return env->insn_aux_data[insn_idx].force_checkpoint;
15479 }
15480 
15481 static void mark_calls_callback(struct bpf_verifier_env *env, int idx)
15482 {
15483 	env->insn_aux_data[idx].calls_callback = true;
15484 }
15485 
15486 static bool calls_callback(struct bpf_verifier_env *env, int insn_idx)
15487 {
15488 	return env->insn_aux_data[insn_idx].calls_callback;
15489 }
15490 
15491 enum {
15492 	DONE_EXPLORING = 0,
15493 	KEEP_EXPLORING = 1,
15494 };
15495 
15496 /* t, w, e - match pseudo-code above:
15497  * t - index of current instruction
15498  * w - next instruction
15499  * e - edge
15500  */
15501 static int push_insn(int t, int w, int e, struct bpf_verifier_env *env)
15502 {
15503 	int *insn_stack = env->cfg.insn_stack;
15504 	int *insn_state = env->cfg.insn_state;
15505 
15506 	if (e == FALLTHROUGH && insn_state[t] >= (DISCOVERED | FALLTHROUGH))
15507 		return DONE_EXPLORING;
15508 
15509 	if (e == BRANCH && insn_state[t] >= (DISCOVERED | BRANCH))
15510 		return DONE_EXPLORING;
15511 
15512 	if (w < 0 || w >= env->prog->len) {
15513 		verbose_linfo(env, t, "%d: ", t);
15514 		verbose(env, "jump out of range from insn %d to %d\n", t, w);
15515 		return -EINVAL;
15516 	}
15517 
15518 	if (e == BRANCH) {
15519 		/* mark branch target for state pruning */
15520 		mark_prune_point(env, w);
15521 		mark_jmp_point(env, w);
15522 	}
15523 
15524 	if (insn_state[w] == 0) {
15525 		/* tree-edge */
15526 		insn_state[t] = DISCOVERED | e;
15527 		insn_state[w] = DISCOVERED;
15528 		if (env->cfg.cur_stack >= env->prog->len)
15529 			return -E2BIG;
15530 		insn_stack[env->cfg.cur_stack++] = w;
15531 		return KEEP_EXPLORING;
15532 	} else if ((insn_state[w] & 0xF0) == DISCOVERED) {
15533 		if (env->bpf_capable)
15534 			return DONE_EXPLORING;
15535 		verbose_linfo(env, t, "%d: ", t);
15536 		verbose_linfo(env, w, "%d: ", w);
15537 		verbose(env, "back-edge from insn %d to %d\n", t, w);
15538 		return -EINVAL;
15539 	} else if (insn_state[w] == EXPLORED) {
15540 		/* forward- or cross-edge */
15541 		insn_state[t] = DISCOVERED | e;
15542 	} else {
15543 		verbose(env, "insn state internal bug\n");
15544 		return -EFAULT;
15545 	}
15546 	return DONE_EXPLORING;
15547 }
15548 
15549 static int visit_func_call_insn(int t, struct bpf_insn *insns,
15550 				struct bpf_verifier_env *env,
15551 				bool visit_callee)
15552 {
15553 	int ret, insn_sz;
15554 
15555 	insn_sz = bpf_is_ldimm64(&insns[t]) ? 2 : 1;
15556 	ret = push_insn(t, t + insn_sz, FALLTHROUGH, env);
15557 	if (ret)
15558 		return ret;
15559 
15560 	mark_prune_point(env, t + insn_sz);
15561 	/* when we exit from subprog, we need to record non-linear history */
15562 	mark_jmp_point(env, t + insn_sz);
15563 
15564 	if (visit_callee) {
15565 		mark_prune_point(env, t);
15566 		ret = push_insn(t, t + insns[t].imm + 1, BRANCH, env);
15567 	}
15568 	return ret;
15569 }
15570 
15571 /* Visits the instruction at index t and returns one of the following:
15572  *  < 0 - an error occurred
15573  *  DONE_EXPLORING - the instruction was fully explored
15574  *  KEEP_EXPLORING - there is still work to be done before it is fully explored
15575  */
15576 static int visit_insn(int t, struct bpf_verifier_env *env)
15577 {
15578 	struct bpf_insn *insns = env->prog->insnsi, *insn = &insns[t];
15579 	int ret, off, insn_sz;
15580 
15581 	if (bpf_pseudo_func(insn))
15582 		return visit_func_call_insn(t, insns, env, true);
15583 
15584 	/* All non-branch instructions have a single fall-through edge. */
15585 	if (BPF_CLASS(insn->code) != BPF_JMP &&
15586 	    BPF_CLASS(insn->code) != BPF_JMP32) {
15587 		insn_sz = bpf_is_ldimm64(insn) ? 2 : 1;
15588 		return push_insn(t, t + insn_sz, FALLTHROUGH, env);
15589 	}
15590 
15591 	switch (BPF_OP(insn->code)) {
15592 	case BPF_EXIT:
15593 		return DONE_EXPLORING;
15594 
15595 	case BPF_CALL:
15596 		if (is_async_callback_calling_insn(insn))
15597 			/* Mark this call insn as a prune point to trigger
15598 			 * is_state_visited() check before call itself is
15599 			 * processed by __check_func_call(). Otherwise new
15600 			 * async state will be pushed for further exploration.
15601 			 */
15602 			mark_prune_point(env, t);
15603 		/* For functions that invoke callbacks it is not known how many times
15604 		 * callback would be called. Verifier models callback calling functions
15605 		 * by repeatedly visiting callback bodies and returning to origin call
15606 		 * instruction.
15607 		 * In order to stop such iteration verifier needs to identify when a
15608 		 * state identical some state from a previous iteration is reached.
15609 		 * Check below forces creation of checkpoint before callback calling
15610 		 * instruction to allow search for such identical states.
15611 		 */
15612 		if (is_sync_callback_calling_insn(insn)) {
15613 			mark_calls_callback(env, t);
15614 			mark_force_checkpoint(env, t);
15615 			mark_prune_point(env, t);
15616 			mark_jmp_point(env, t);
15617 		}
15618 		if (insn->src_reg == BPF_PSEUDO_KFUNC_CALL) {
15619 			struct bpf_kfunc_call_arg_meta meta;
15620 
15621 			ret = fetch_kfunc_meta(env, insn, &meta, NULL);
15622 			if (ret == 0 && is_iter_next_kfunc(&meta)) {
15623 				mark_prune_point(env, t);
15624 				/* Checking and saving state checkpoints at iter_next() call
15625 				 * is crucial for fast convergence of open-coded iterator loop
15626 				 * logic, so we need to force it. If we don't do that,
15627 				 * is_state_visited() might skip saving a checkpoint, causing
15628 				 * unnecessarily long sequence of not checkpointed
15629 				 * instructions and jumps, leading to exhaustion of jump
15630 				 * history buffer, and potentially other undesired outcomes.
15631 				 * It is expected that with correct open-coded iterators
15632 				 * convergence will happen quickly, so we don't run a risk of
15633 				 * exhausting memory.
15634 				 */
15635 				mark_force_checkpoint(env, t);
15636 			}
15637 		}
15638 		return visit_func_call_insn(t, insns, env, insn->src_reg == BPF_PSEUDO_CALL);
15639 
15640 	case BPF_JA:
15641 		if (BPF_SRC(insn->code) != BPF_K)
15642 			return -EINVAL;
15643 
15644 		if (BPF_CLASS(insn->code) == BPF_JMP)
15645 			off = insn->off;
15646 		else
15647 			off = insn->imm;
15648 
15649 		/* unconditional jump with single edge */
15650 		ret = push_insn(t, t + off + 1, FALLTHROUGH, env);
15651 		if (ret)
15652 			return ret;
15653 
15654 		mark_prune_point(env, t + off + 1);
15655 		mark_jmp_point(env, t + off + 1);
15656 
15657 		return ret;
15658 
15659 	default:
15660 		/* conditional jump with two edges */
15661 		mark_prune_point(env, t);
15662 
15663 		ret = push_insn(t, t + 1, FALLTHROUGH, env);
15664 		if (ret)
15665 			return ret;
15666 
15667 		return push_insn(t, t + insn->off + 1, BRANCH, env);
15668 	}
15669 }
15670 
15671 /* non-recursive depth-first-search to detect loops in BPF program
15672  * loop == back-edge in directed graph
15673  */
15674 static int check_cfg(struct bpf_verifier_env *env)
15675 {
15676 	int insn_cnt = env->prog->len;
15677 	int *insn_stack, *insn_state;
15678 	int ex_insn_beg, i, ret = 0;
15679 	bool ex_done = false;
15680 
15681 	insn_state = env->cfg.insn_state = kvcalloc(insn_cnt, sizeof(int), GFP_KERNEL);
15682 	if (!insn_state)
15683 		return -ENOMEM;
15684 
15685 	insn_stack = env->cfg.insn_stack = kvcalloc(insn_cnt, sizeof(int), GFP_KERNEL);
15686 	if (!insn_stack) {
15687 		kvfree(insn_state);
15688 		return -ENOMEM;
15689 	}
15690 
15691 	insn_state[0] = DISCOVERED; /* mark 1st insn as discovered */
15692 	insn_stack[0] = 0; /* 0 is the first instruction */
15693 	env->cfg.cur_stack = 1;
15694 
15695 walk_cfg:
15696 	while (env->cfg.cur_stack > 0) {
15697 		int t = insn_stack[env->cfg.cur_stack - 1];
15698 
15699 		ret = visit_insn(t, env);
15700 		switch (ret) {
15701 		case DONE_EXPLORING:
15702 			insn_state[t] = EXPLORED;
15703 			env->cfg.cur_stack--;
15704 			break;
15705 		case KEEP_EXPLORING:
15706 			break;
15707 		default:
15708 			if (ret > 0) {
15709 				verbose(env, "visit_insn internal bug\n");
15710 				ret = -EFAULT;
15711 			}
15712 			goto err_free;
15713 		}
15714 	}
15715 
15716 	if (env->cfg.cur_stack < 0) {
15717 		verbose(env, "pop stack internal bug\n");
15718 		ret = -EFAULT;
15719 		goto err_free;
15720 	}
15721 
15722 	if (env->exception_callback_subprog && !ex_done) {
15723 		ex_insn_beg = env->subprog_info[env->exception_callback_subprog].start;
15724 
15725 		insn_state[ex_insn_beg] = DISCOVERED;
15726 		insn_stack[0] = ex_insn_beg;
15727 		env->cfg.cur_stack = 1;
15728 		ex_done = true;
15729 		goto walk_cfg;
15730 	}
15731 
15732 	for (i = 0; i < insn_cnt; i++) {
15733 		struct bpf_insn *insn = &env->prog->insnsi[i];
15734 
15735 		if (insn_state[i] != EXPLORED) {
15736 			verbose(env, "unreachable insn %d\n", i);
15737 			ret = -EINVAL;
15738 			goto err_free;
15739 		}
15740 		if (bpf_is_ldimm64(insn)) {
15741 			if (insn_state[i + 1] != 0) {
15742 				verbose(env, "jump into the middle of ldimm64 insn %d\n", i);
15743 				ret = -EINVAL;
15744 				goto err_free;
15745 			}
15746 			i++; /* skip second half of ldimm64 */
15747 		}
15748 	}
15749 	ret = 0; /* cfg looks good */
15750 
15751 err_free:
15752 	kvfree(insn_state);
15753 	kvfree(insn_stack);
15754 	env->cfg.insn_state = env->cfg.insn_stack = NULL;
15755 	return ret;
15756 }
15757 
15758 static int check_abnormal_return(struct bpf_verifier_env *env)
15759 {
15760 	int i;
15761 
15762 	for (i = 1; i < env->subprog_cnt; i++) {
15763 		if (env->subprog_info[i].has_ld_abs) {
15764 			verbose(env, "LD_ABS is not allowed in subprogs without BTF\n");
15765 			return -EINVAL;
15766 		}
15767 		if (env->subprog_info[i].has_tail_call) {
15768 			verbose(env, "tail_call is not allowed in subprogs without BTF\n");
15769 			return -EINVAL;
15770 		}
15771 	}
15772 	return 0;
15773 }
15774 
15775 /* The minimum supported BTF func info size */
15776 #define MIN_BPF_FUNCINFO_SIZE	8
15777 #define MAX_FUNCINFO_REC_SIZE	252
15778 
15779 static int check_btf_func_early(struct bpf_verifier_env *env,
15780 				const union bpf_attr *attr,
15781 				bpfptr_t uattr)
15782 {
15783 	u32 krec_size = sizeof(struct bpf_func_info);
15784 	const struct btf_type *type, *func_proto;
15785 	u32 i, nfuncs, urec_size, min_size;
15786 	struct bpf_func_info *krecord;
15787 	struct bpf_prog *prog;
15788 	const struct btf *btf;
15789 	u32 prev_offset = 0;
15790 	bpfptr_t urecord;
15791 	int ret = -ENOMEM;
15792 
15793 	nfuncs = attr->func_info_cnt;
15794 	if (!nfuncs) {
15795 		if (check_abnormal_return(env))
15796 			return -EINVAL;
15797 		return 0;
15798 	}
15799 
15800 	urec_size = attr->func_info_rec_size;
15801 	if (urec_size < MIN_BPF_FUNCINFO_SIZE ||
15802 	    urec_size > MAX_FUNCINFO_REC_SIZE ||
15803 	    urec_size % sizeof(u32)) {
15804 		verbose(env, "invalid func info rec size %u\n", urec_size);
15805 		return -EINVAL;
15806 	}
15807 
15808 	prog = env->prog;
15809 	btf = prog->aux->btf;
15810 
15811 	urecord = make_bpfptr(attr->func_info, uattr.is_kernel);
15812 	min_size = min_t(u32, krec_size, urec_size);
15813 
15814 	krecord = kvcalloc(nfuncs, krec_size, GFP_KERNEL | __GFP_NOWARN);
15815 	if (!krecord)
15816 		return -ENOMEM;
15817 
15818 	for (i = 0; i < nfuncs; i++) {
15819 		ret = bpf_check_uarg_tail_zero(urecord, krec_size, urec_size);
15820 		if (ret) {
15821 			if (ret == -E2BIG) {
15822 				verbose(env, "nonzero tailing record in func info");
15823 				/* set the size kernel expects so loader can zero
15824 				 * out the rest of the record.
15825 				 */
15826 				if (copy_to_bpfptr_offset(uattr,
15827 							  offsetof(union bpf_attr, func_info_rec_size),
15828 							  &min_size, sizeof(min_size)))
15829 					ret = -EFAULT;
15830 			}
15831 			goto err_free;
15832 		}
15833 
15834 		if (copy_from_bpfptr(&krecord[i], urecord, min_size)) {
15835 			ret = -EFAULT;
15836 			goto err_free;
15837 		}
15838 
15839 		/* check insn_off */
15840 		ret = -EINVAL;
15841 		if (i == 0) {
15842 			if (krecord[i].insn_off) {
15843 				verbose(env,
15844 					"nonzero insn_off %u for the first func info record",
15845 					krecord[i].insn_off);
15846 				goto err_free;
15847 			}
15848 		} else if (krecord[i].insn_off <= prev_offset) {
15849 			verbose(env,
15850 				"same or smaller insn offset (%u) than previous func info record (%u)",
15851 				krecord[i].insn_off, prev_offset);
15852 			goto err_free;
15853 		}
15854 
15855 		/* check type_id */
15856 		type = btf_type_by_id(btf, krecord[i].type_id);
15857 		if (!type || !btf_type_is_func(type)) {
15858 			verbose(env, "invalid type id %d in func info",
15859 				krecord[i].type_id);
15860 			goto err_free;
15861 		}
15862 
15863 		func_proto = btf_type_by_id(btf, type->type);
15864 		if (unlikely(!func_proto || !btf_type_is_func_proto(func_proto)))
15865 			/* btf_func_check() already verified it during BTF load */
15866 			goto err_free;
15867 
15868 		prev_offset = krecord[i].insn_off;
15869 		bpfptr_add(&urecord, urec_size);
15870 	}
15871 
15872 	prog->aux->func_info = krecord;
15873 	prog->aux->func_info_cnt = nfuncs;
15874 	return 0;
15875 
15876 err_free:
15877 	kvfree(krecord);
15878 	return ret;
15879 }
15880 
15881 static int check_btf_func(struct bpf_verifier_env *env,
15882 			  const union bpf_attr *attr,
15883 			  bpfptr_t uattr)
15884 {
15885 	const struct btf_type *type, *func_proto, *ret_type;
15886 	u32 i, nfuncs, urec_size;
15887 	struct bpf_func_info *krecord;
15888 	struct bpf_func_info_aux *info_aux = NULL;
15889 	struct bpf_prog *prog;
15890 	const struct btf *btf;
15891 	bpfptr_t urecord;
15892 	bool scalar_return;
15893 	int ret = -ENOMEM;
15894 
15895 	nfuncs = attr->func_info_cnt;
15896 	if (!nfuncs) {
15897 		if (check_abnormal_return(env))
15898 			return -EINVAL;
15899 		return 0;
15900 	}
15901 	if (nfuncs != env->subprog_cnt) {
15902 		verbose(env, "number of funcs in func_info doesn't match number of subprogs\n");
15903 		return -EINVAL;
15904 	}
15905 
15906 	urec_size = attr->func_info_rec_size;
15907 
15908 	prog = env->prog;
15909 	btf = prog->aux->btf;
15910 
15911 	urecord = make_bpfptr(attr->func_info, uattr.is_kernel);
15912 
15913 	krecord = prog->aux->func_info;
15914 	info_aux = kcalloc(nfuncs, sizeof(*info_aux), GFP_KERNEL | __GFP_NOWARN);
15915 	if (!info_aux)
15916 		return -ENOMEM;
15917 
15918 	for (i = 0; i < nfuncs; i++) {
15919 		/* check insn_off */
15920 		ret = -EINVAL;
15921 
15922 		if (env->subprog_info[i].start != krecord[i].insn_off) {
15923 			verbose(env, "func_info BTF section doesn't match subprog layout in BPF program\n");
15924 			goto err_free;
15925 		}
15926 
15927 		/* Already checked type_id */
15928 		type = btf_type_by_id(btf, krecord[i].type_id);
15929 		info_aux[i].linkage = BTF_INFO_VLEN(type->info);
15930 		/* Already checked func_proto */
15931 		func_proto = btf_type_by_id(btf, type->type);
15932 
15933 		ret_type = btf_type_skip_modifiers(btf, func_proto->type, NULL);
15934 		scalar_return =
15935 			btf_type_is_small_int(ret_type) || btf_is_any_enum(ret_type);
15936 		if (i && !scalar_return && env->subprog_info[i].has_ld_abs) {
15937 			verbose(env, "LD_ABS is only allowed in functions that return 'int'.\n");
15938 			goto err_free;
15939 		}
15940 		if (i && !scalar_return && env->subprog_info[i].has_tail_call) {
15941 			verbose(env, "tail_call is only allowed in functions that return 'int'.\n");
15942 			goto err_free;
15943 		}
15944 
15945 		bpfptr_add(&urecord, urec_size);
15946 	}
15947 
15948 	prog->aux->func_info_aux = info_aux;
15949 	return 0;
15950 
15951 err_free:
15952 	kfree(info_aux);
15953 	return ret;
15954 }
15955 
15956 static void adjust_btf_func(struct bpf_verifier_env *env)
15957 {
15958 	struct bpf_prog_aux *aux = env->prog->aux;
15959 	int i;
15960 
15961 	if (!aux->func_info)
15962 		return;
15963 
15964 	/* func_info is not available for hidden subprogs */
15965 	for (i = 0; i < env->subprog_cnt - env->hidden_subprog_cnt; i++)
15966 		aux->func_info[i].insn_off = env->subprog_info[i].start;
15967 }
15968 
15969 #define MIN_BPF_LINEINFO_SIZE	offsetofend(struct bpf_line_info, line_col)
15970 #define MAX_LINEINFO_REC_SIZE	MAX_FUNCINFO_REC_SIZE
15971 
15972 static int check_btf_line(struct bpf_verifier_env *env,
15973 			  const union bpf_attr *attr,
15974 			  bpfptr_t uattr)
15975 {
15976 	u32 i, s, nr_linfo, ncopy, expected_size, rec_size, prev_offset = 0;
15977 	struct bpf_subprog_info *sub;
15978 	struct bpf_line_info *linfo;
15979 	struct bpf_prog *prog;
15980 	const struct btf *btf;
15981 	bpfptr_t ulinfo;
15982 	int err;
15983 
15984 	nr_linfo = attr->line_info_cnt;
15985 	if (!nr_linfo)
15986 		return 0;
15987 	if (nr_linfo > INT_MAX / sizeof(struct bpf_line_info))
15988 		return -EINVAL;
15989 
15990 	rec_size = attr->line_info_rec_size;
15991 	if (rec_size < MIN_BPF_LINEINFO_SIZE ||
15992 	    rec_size > MAX_LINEINFO_REC_SIZE ||
15993 	    rec_size & (sizeof(u32) - 1))
15994 		return -EINVAL;
15995 
15996 	/* Need to zero it in case the userspace may
15997 	 * pass in a smaller bpf_line_info object.
15998 	 */
15999 	linfo = kvcalloc(nr_linfo, sizeof(struct bpf_line_info),
16000 			 GFP_KERNEL | __GFP_NOWARN);
16001 	if (!linfo)
16002 		return -ENOMEM;
16003 
16004 	prog = env->prog;
16005 	btf = prog->aux->btf;
16006 
16007 	s = 0;
16008 	sub = env->subprog_info;
16009 	ulinfo = make_bpfptr(attr->line_info, uattr.is_kernel);
16010 	expected_size = sizeof(struct bpf_line_info);
16011 	ncopy = min_t(u32, expected_size, rec_size);
16012 	for (i = 0; i < nr_linfo; i++) {
16013 		err = bpf_check_uarg_tail_zero(ulinfo, expected_size, rec_size);
16014 		if (err) {
16015 			if (err == -E2BIG) {
16016 				verbose(env, "nonzero tailing record in line_info");
16017 				if (copy_to_bpfptr_offset(uattr,
16018 							  offsetof(union bpf_attr, line_info_rec_size),
16019 							  &expected_size, sizeof(expected_size)))
16020 					err = -EFAULT;
16021 			}
16022 			goto err_free;
16023 		}
16024 
16025 		if (copy_from_bpfptr(&linfo[i], ulinfo, ncopy)) {
16026 			err = -EFAULT;
16027 			goto err_free;
16028 		}
16029 
16030 		/*
16031 		 * Check insn_off to ensure
16032 		 * 1) strictly increasing AND
16033 		 * 2) bounded by prog->len
16034 		 *
16035 		 * The linfo[0].insn_off == 0 check logically falls into
16036 		 * the later "missing bpf_line_info for func..." case
16037 		 * because the first linfo[0].insn_off must be the
16038 		 * first sub also and the first sub must have
16039 		 * subprog_info[0].start == 0.
16040 		 */
16041 		if ((i && linfo[i].insn_off <= prev_offset) ||
16042 		    linfo[i].insn_off >= prog->len) {
16043 			verbose(env, "Invalid line_info[%u].insn_off:%u (prev_offset:%u prog->len:%u)\n",
16044 				i, linfo[i].insn_off, prev_offset,
16045 				prog->len);
16046 			err = -EINVAL;
16047 			goto err_free;
16048 		}
16049 
16050 		if (!prog->insnsi[linfo[i].insn_off].code) {
16051 			verbose(env,
16052 				"Invalid insn code at line_info[%u].insn_off\n",
16053 				i);
16054 			err = -EINVAL;
16055 			goto err_free;
16056 		}
16057 
16058 		if (!btf_name_by_offset(btf, linfo[i].line_off) ||
16059 		    !btf_name_by_offset(btf, linfo[i].file_name_off)) {
16060 			verbose(env, "Invalid line_info[%u].line_off or .file_name_off\n", i);
16061 			err = -EINVAL;
16062 			goto err_free;
16063 		}
16064 
16065 		if (s != env->subprog_cnt) {
16066 			if (linfo[i].insn_off == sub[s].start) {
16067 				sub[s].linfo_idx = i;
16068 				s++;
16069 			} else if (sub[s].start < linfo[i].insn_off) {
16070 				verbose(env, "missing bpf_line_info for func#%u\n", s);
16071 				err = -EINVAL;
16072 				goto err_free;
16073 			}
16074 		}
16075 
16076 		prev_offset = linfo[i].insn_off;
16077 		bpfptr_add(&ulinfo, rec_size);
16078 	}
16079 
16080 	if (s != env->subprog_cnt) {
16081 		verbose(env, "missing bpf_line_info for %u funcs starting from func#%u\n",
16082 			env->subprog_cnt - s, s);
16083 		err = -EINVAL;
16084 		goto err_free;
16085 	}
16086 
16087 	prog->aux->linfo = linfo;
16088 	prog->aux->nr_linfo = nr_linfo;
16089 
16090 	return 0;
16091 
16092 err_free:
16093 	kvfree(linfo);
16094 	return err;
16095 }
16096 
16097 #define MIN_CORE_RELO_SIZE	sizeof(struct bpf_core_relo)
16098 #define MAX_CORE_RELO_SIZE	MAX_FUNCINFO_REC_SIZE
16099 
16100 static int check_core_relo(struct bpf_verifier_env *env,
16101 			   const union bpf_attr *attr,
16102 			   bpfptr_t uattr)
16103 {
16104 	u32 i, nr_core_relo, ncopy, expected_size, rec_size;
16105 	struct bpf_core_relo core_relo = {};
16106 	struct bpf_prog *prog = env->prog;
16107 	const struct btf *btf = prog->aux->btf;
16108 	struct bpf_core_ctx ctx = {
16109 		.log = &env->log,
16110 		.btf = btf,
16111 	};
16112 	bpfptr_t u_core_relo;
16113 	int err;
16114 
16115 	nr_core_relo = attr->core_relo_cnt;
16116 	if (!nr_core_relo)
16117 		return 0;
16118 	if (nr_core_relo > INT_MAX / sizeof(struct bpf_core_relo))
16119 		return -EINVAL;
16120 
16121 	rec_size = attr->core_relo_rec_size;
16122 	if (rec_size < MIN_CORE_RELO_SIZE ||
16123 	    rec_size > MAX_CORE_RELO_SIZE ||
16124 	    rec_size % sizeof(u32))
16125 		return -EINVAL;
16126 
16127 	u_core_relo = make_bpfptr(attr->core_relos, uattr.is_kernel);
16128 	expected_size = sizeof(struct bpf_core_relo);
16129 	ncopy = min_t(u32, expected_size, rec_size);
16130 
16131 	/* Unlike func_info and line_info, copy and apply each CO-RE
16132 	 * relocation record one at a time.
16133 	 */
16134 	for (i = 0; i < nr_core_relo; i++) {
16135 		/* future proofing when sizeof(bpf_core_relo) changes */
16136 		err = bpf_check_uarg_tail_zero(u_core_relo, expected_size, rec_size);
16137 		if (err) {
16138 			if (err == -E2BIG) {
16139 				verbose(env, "nonzero tailing record in core_relo");
16140 				if (copy_to_bpfptr_offset(uattr,
16141 							  offsetof(union bpf_attr, core_relo_rec_size),
16142 							  &expected_size, sizeof(expected_size)))
16143 					err = -EFAULT;
16144 			}
16145 			break;
16146 		}
16147 
16148 		if (copy_from_bpfptr(&core_relo, u_core_relo, ncopy)) {
16149 			err = -EFAULT;
16150 			break;
16151 		}
16152 
16153 		if (core_relo.insn_off % 8 || core_relo.insn_off / 8 >= prog->len) {
16154 			verbose(env, "Invalid core_relo[%u].insn_off:%u prog->len:%u\n",
16155 				i, core_relo.insn_off, prog->len);
16156 			err = -EINVAL;
16157 			break;
16158 		}
16159 
16160 		err = bpf_core_apply(&ctx, &core_relo, i,
16161 				     &prog->insnsi[core_relo.insn_off / 8]);
16162 		if (err)
16163 			break;
16164 		bpfptr_add(&u_core_relo, rec_size);
16165 	}
16166 	return err;
16167 }
16168 
16169 static int check_btf_info_early(struct bpf_verifier_env *env,
16170 				const union bpf_attr *attr,
16171 				bpfptr_t uattr)
16172 {
16173 	struct btf *btf;
16174 	int err;
16175 
16176 	if (!attr->func_info_cnt && !attr->line_info_cnt) {
16177 		if (check_abnormal_return(env))
16178 			return -EINVAL;
16179 		return 0;
16180 	}
16181 
16182 	btf = btf_get_by_fd(attr->prog_btf_fd);
16183 	if (IS_ERR(btf))
16184 		return PTR_ERR(btf);
16185 	if (btf_is_kernel(btf)) {
16186 		btf_put(btf);
16187 		return -EACCES;
16188 	}
16189 	env->prog->aux->btf = btf;
16190 
16191 	err = check_btf_func_early(env, attr, uattr);
16192 	if (err)
16193 		return err;
16194 	return 0;
16195 }
16196 
16197 static int check_btf_info(struct bpf_verifier_env *env,
16198 			  const union bpf_attr *attr,
16199 			  bpfptr_t uattr)
16200 {
16201 	int err;
16202 
16203 	if (!attr->func_info_cnt && !attr->line_info_cnt) {
16204 		if (check_abnormal_return(env))
16205 			return -EINVAL;
16206 		return 0;
16207 	}
16208 
16209 	err = check_btf_func(env, attr, uattr);
16210 	if (err)
16211 		return err;
16212 
16213 	err = check_btf_line(env, attr, uattr);
16214 	if (err)
16215 		return err;
16216 
16217 	err = check_core_relo(env, attr, uattr);
16218 	if (err)
16219 		return err;
16220 
16221 	return 0;
16222 }
16223 
16224 /* check %cur's range satisfies %old's */
16225 static bool range_within(struct bpf_reg_state *old,
16226 			 struct bpf_reg_state *cur)
16227 {
16228 	return old->umin_value <= cur->umin_value &&
16229 	       old->umax_value >= cur->umax_value &&
16230 	       old->smin_value <= cur->smin_value &&
16231 	       old->smax_value >= cur->smax_value &&
16232 	       old->u32_min_value <= cur->u32_min_value &&
16233 	       old->u32_max_value >= cur->u32_max_value &&
16234 	       old->s32_min_value <= cur->s32_min_value &&
16235 	       old->s32_max_value >= cur->s32_max_value;
16236 }
16237 
16238 /* If in the old state two registers had the same id, then they need to have
16239  * the same id in the new state as well.  But that id could be different from
16240  * the old state, so we need to track the mapping from old to new ids.
16241  * Once we have seen that, say, a reg with old id 5 had new id 9, any subsequent
16242  * regs with old id 5 must also have new id 9 for the new state to be safe.  But
16243  * regs with a different old id could still have new id 9, we don't care about
16244  * that.
16245  * So we look through our idmap to see if this old id has been seen before.  If
16246  * so, we require the new id to match; otherwise, we add the id pair to the map.
16247  */
16248 static bool check_ids(u32 old_id, u32 cur_id, struct bpf_idmap *idmap)
16249 {
16250 	struct bpf_id_pair *map = idmap->map;
16251 	unsigned int i;
16252 
16253 	/* either both IDs should be set or both should be zero */
16254 	if (!!old_id != !!cur_id)
16255 		return false;
16256 
16257 	if (old_id == 0) /* cur_id == 0 as well */
16258 		return true;
16259 
16260 	for (i = 0; i < BPF_ID_MAP_SIZE; i++) {
16261 		if (!map[i].old) {
16262 			/* Reached an empty slot; haven't seen this id before */
16263 			map[i].old = old_id;
16264 			map[i].cur = cur_id;
16265 			return true;
16266 		}
16267 		if (map[i].old == old_id)
16268 			return map[i].cur == cur_id;
16269 		if (map[i].cur == cur_id)
16270 			return false;
16271 	}
16272 	/* We ran out of idmap slots, which should be impossible */
16273 	WARN_ON_ONCE(1);
16274 	return false;
16275 }
16276 
16277 /* Similar to check_ids(), but allocate a unique temporary ID
16278  * for 'old_id' or 'cur_id' of zero.
16279  * This makes pairs like '0 vs unique ID', 'unique ID vs 0' valid.
16280  */
16281 static bool check_scalar_ids(u32 old_id, u32 cur_id, struct bpf_idmap *idmap)
16282 {
16283 	old_id = old_id ? old_id : ++idmap->tmp_id_gen;
16284 	cur_id = cur_id ? cur_id : ++idmap->tmp_id_gen;
16285 
16286 	return check_ids(old_id, cur_id, idmap);
16287 }
16288 
16289 static void clean_func_state(struct bpf_verifier_env *env,
16290 			     struct bpf_func_state *st)
16291 {
16292 	enum bpf_reg_liveness live;
16293 	int i, j;
16294 
16295 	for (i = 0; i < BPF_REG_FP; i++) {
16296 		live = st->regs[i].live;
16297 		/* liveness must not touch this register anymore */
16298 		st->regs[i].live |= REG_LIVE_DONE;
16299 		if (!(live & REG_LIVE_READ))
16300 			/* since the register is unused, clear its state
16301 			 * to make further comparison simpler
16302 			 */
16303 			__mark_reg_not_init(env, &st->regs[i]);
16304 	}
16305 
16306 	for (i = 0; i < st->allocated_stack / BPF_REG_SIZE; i++) {
16307 		live = st->stack[i].spilled_ptr.live;
16308 		/* liveness must not touch this stack slot anymore */
16309 		st->stack[i].spilled_ptr.live |= REG_LIVE_DONE;
16310 		if (!(live & REG_LIVE_READ)) {
16311 			__mark_reg_not_init(env, &st->stack[i].spilled_ptr);
16312 			for (j = 0; j < BPF_REG_SIZE; j++)
16313 				st->stack[i].slot_type[j] = STACK_INVALID;
16314 		}
16315 	}
16316 }
16317 
16318 static void clean_verifier_state(struct bpf_verifier_env *env,
16319 				 struct bpf_verifier_state *st)
16320 {
16321 	int i;
16322 
16323 	if (st->frame[0]->regs[0].live & REG_LIVE_DONE)
16324 		/* all regs in this state in all frames were already marked */
16325 		return;
16326 
16327 	for (i = 0; i <= st->curframe; i++)
16328 		clean_func_state(env, st->frame[i]);
16329 }
16330 
16331 /* the parentage chains form a tree.
16332  * the verifier states are added to state lists at given insn and
16333  * pushed into state stack for future exploration.
16334  * when the verifier reaches bpf_exit insn some of the verifer states
16335  * stored in the state lists have their final liveness state already,
16336  * but a lot of states will get revised from liveness point of view when
16337  * the verifier explores other branches.
16338  * Example:
16339  * 1: r0 = 1
16340  * 2: if r1 == 100 goto pc+1
16341  * 3: r0 = 2
16342  * 4: exit
16343  * when the verifier reaches exit insn the register r0 in the state list of
16344  * insn 2 will be seen as !REG_LIVE_READ. Then the verifier pops the other_branch
16345  * of insn 2 and goes exploring further. At the insn 4 it will walk the
16346  * parentage chain from insn 4 into insn 2 and will mark r0 as REG_LIVE_READ.
16347  *
16348  * Since the verifier pushes the branch states as it sees them while exploring
16349  * the program the condition of walking the branch instruction for the second
16350  * time means that all states below this branch were already explored and
16351  * their final liveness marks are already propagated.
16352  * Hence when the verifier completes the search of state list in is_state_visited()
16353  * we can call this clean_live_states() function to mark all liveness states
16354  * as REG_LIVE_DONE to indicate that 'parent' pointers of 'struct bpf_reg_state'
16355  * will not be used.
16356  * This function also clears the registers and stack for states that !READ
16357  * to simplify state merging.
16358  *
16359  * Important note here that walking the same branch instruction in the callee
16360  * doesn't meant that the states are DONE. The verifier has to compare
16361  * the callsites
16362  */
16363 static void clean_live_states(struct bpf_verifier_env *env, int insn,
16364 			      struct bpf_verifier_state *cur)
16365 {
16366 	struct bpf_verifier_state_list *sl;
16367 
16368 	sl = *explored_state(env, insn);
16369 	while (sl) {
16370 		if (sl->state.branches)
16371 			goto next;
16372 		if (sl->state.insn_idx != insn ||
16373 		    !same_callsites(&sl->state, cur))
16374 			goto next;
16375 		clean_verifier_state(env, &sl->state);
16376 next:
16377 		sl = sl->next;
16378 	}
16379 }
16380 
16381 static bool regs_exact(const struct bpf_reg_state *rold,
16382 		       const struct bpf_reg_state *rcur,
16383 		       struct bpf_idmap *idmap)
16384 {
16385 	return memcmp(rold, rcur, offsetof(struct bpf_reg_state, id)) == 0 &&
16386 	       check_ids(rold->id, rcur->id, idmap) &&
16387 	       check_ids(rold->ref_obj_id, rcur->ref_obj_id, idmap);
16388 }
16389 
16390 /* Returns true if (rold safe implies rcur safe) */
16391 static bool regsafe(struct bpf_verifier_env *env, struct bpf_reg_state *rold,
16392 		    struct bpf_reg_state *rcur, struct bpf_idmap *idmap, bool exact)
16393 {
16394 	if (exact)
16395 		return regs_exact(rold, rcur, idmap);
16396 
16397 	if (!(rold->live & REG_LIVE_READ))
16398 		/* explored state didn't use this */
16399 		return true;
16400 	if (rold->type == NOT_INIT)
16401 		/* explored state can't have used this */
16402 		return true;
16403 	if (rcur->type == NOT_INIT)
16404 		return false;
16405 
16406 	/* Enforce that register types have to match exactly, including their
16407 	 * modifiers (like PTR_MAYBE_NULL, MEM_RDONLY, etc), as a general
16408 	 * rule.
16409 	 *
16410 	 * One can make a point that using a pointer register as unbounded
16411 	 * SCALAR would be technically acceptable, but this could lead to
16412 	 * pointer leaks because scalars are allowed to leak while pointers
16413 	 * are not. We could make this safe in special cases if root is
16414 	 * calling us, but it's probably not worth the hassle.
16415 	 *
16416 	 * Also, register types that are *not* MAYBE_NULL could technically be
16417 	 * safe to use as their MAYBE_NULL variants (e.g., PTR_TO_MAP_VALUE
16418 	 * is safe to be used as PTR_TO_MAP_VALUE_OR_NULL, provided both point
16419 	 * to the same map).
16420 	 * However, if the old MAYBE_NULL register then got NULL checked,
16421 	 * doing so could have affected others with the same id, and we can't
16422 	 * check for that because we lost the id when we converted to
16423 	 * a non-MAYBE_NULL variant.
16424 	 * So, as a general rule we don't allow mixing MAYBE_NULL and
16425 	 * non-MAYBE_NULL registers as well.
16426 	 */
16427 	if (rold->type != rcur->type)
16428 		return false;
16429 
16430 	switch (base_type(rold->type)) {
16431 	case SCALAR_VALUE:
16432 		if (env->explore_alu_limits) {
16433 			/* explore_alu_limits disables tnum_in() and range_within()
16434 			 * logic and requires everything to be strict
16435 			 */
16436 			return memcmp(rold, rcur, offsetof(struct bpf_reg_state, id)) == 0 &&
16437 			       check_scalar_ids(rold->id, rcur->id, idmap);
16438 		}
16439 		if (!rold->precise)
16440 			return true;
16441 		/* Why check_ids() for scalar registers?
16442 		 *
16443 		 * Consider the following BPF code:
16444 		 *   1: r6 = ... unbound scalar, ID=a ...
16445 		 *   2: r7 = ... unbound scalar, ID=b ...
16446 		 *   3: if (r6 > r7) goto +1
16447 		 *   4: r6 = r7
16448 		 *   5: if (r6 > X) goto ...
16449 		 *   6: ... memory operation using r7 ...
16450 		 *
16451 		 * First verification path is [1-6]:
16452 		 * - at (4) same bpf_reg_state::id (b) would be assigned to r6 and r7;
16453 		 * - at (5) r6 would be marked <= X, find_equal_scalars() would also mark
16454 		 *   r7 <= X, because r6 and r7 share same id.
16455 		 * Next verification path is [1-4, 6].
16456 		 *
16457 		 * Instruction (6) would be reached in two states:
16458 		 *   I.  r6{.id=b}, r7{.id=b} via path 1-6;
16459 		 *   II. r6{.id=a}, r7{.id=b} via path 1-4, 6.
16460 		 *
16461 		 * Use check_ids() to distinguish these states.
16462 		 * ---
16463 		 * Also verify that new value satisfies old value range knowledge.
16464 		 */
16465 		return range_within(rold, rcur) &&
16466 		       tnum_in(rold->var_off, rcur->var_off) &&
16467 		       check_scalar_ids(rold->id, rcur->id, idmap);
16468 	case PTR_TO_MAP_KEY:
16469 	case PTR_TO_MAP_VALUE:
16470 	case PTR_TO_MEM:
16471 	case PTR_TO_BUF:
16472 	case PTR_TO_TP_BUFFER:
16473 		/* If the new min/max/var_off satisfy the old ones and
16474 		 * everything else matches, we are OK.
16475 		 */
16476 		return memcmp(rold, rcur, offsetof(struct bpf_reg_state, var_off)) == 0 &&
16477 		       range_within(rold, rcur) &&
16478 		       tnum_in(rold->var_off, rcur->var_off) &&
16479 		       check_ids(rold->id, rcur->id, idmap) &&
16480 		       check_ids(rold->ref_obj_id, rcur->ref_obj_id, idmap);
16481 	case PTR_TO_PACKET_META:
16482 	case PTR_TO_PACKET:
16483 		/* We must have at least as much range as the old ptr
16484 		 * did, so that any accesses which were safe before are
16485 		 * still safe.  This is true even if old range < old off,
16486 		 * since someone could have accessed through (ptr - k), or
16487 		 * even done ptr -= k in a register, to get a safe access.
16488 		 */
16489 		if (rold->range > rcur->range)
16490 			return false;
16491 		/* If the offsets don't match, we can't trust our alignment;
16492 		 * nor can we be sure that we won't fall out of range.
16493 		 */
16494 		if (rold->off != rcur->off)
16495 			return false;
16496 		/* id relations must be preserved */
16497 		if (!check_ids(rold->id, rcur->id, idmap))
16498 			return false;
16499 		/* new val must satisfy old val knowledge */
16500 		return range_within(rold, rcur) &&
16501 		       tnum_in(rold->var_off, rcur->var_off);
16502 	case PTR_TO_STACK:
16503 		/* two stack pointers are equal only if they're pointing to
16504 		 * the same stack frame, since fp-8 in foo != fp-8 in bar
16505 		 */
16506 		return regs_exact(rold, rcur, idmap) && rold->frameno == rcur->frameno;
16507 	default:
16508 		return regs_exact(rold, rcur, idmap);
16509 	}
16510 }
16511 
16512 static struct bpf_reg_state unbound_reg;
16513 
16514 static __init int unbound_reg_init(void)
16515 {
16516 	__mark_reg_unknown_imprecise(&unbound_reg);
16517 	unbound_reg.live |= REG_LIVE_READ;
16518 	return 0;
16519 }
16520 late_initcall(unbound_reg_init);
16521 
16522 static bool is_stack_all_misc(struct bpf_verifier_env *env,
16523 			      struct bpf_stack_state *stack)
16524 {
16525 	u32 i;
16526 
16527 	for (i = 0; i < ARRAY_SIZE(stack->slot_type); ++i) {
16528 		if ((stack->slot_type[i] == STACK_MISC) ||
16529 		    (stack->slot_type[i] == STACK_INVALID && env->allow_uninit_stack))
16530 			continue;
16531 		return false;
16532 	}
16533 
16534 	return true;
16535 }
16536 
16537 static struct bpf_reg_state *scalar_reg_for_stack(struct bpf_verifier_env *env,
16538 						  struct bpf_stack_state *stack)
16539 {
16540 	if (is_spilled_scalar_reg64(stack))
16541 		return &stack->spilled_ptr;
16542 
16543 	if (is_stack_all_misc(env, stack))
16544 		return &unbound_reg;
16545 
16546 	return NULL;
16547 }
16548 
16549 static bool stacksafe(struct bpf_verifier_env *env, struct bpf_func_state *old,
16550 		      struct bpf_func_state *cur, struct bpf_idmap *idmap, bool exact)
16551 {
16552 	int i, spi;
16553 
16554 	/* walk slots of the explored stack and ignore any additional
16555 	 * slots in the current stack, since explored(safe) state
16556 	 * didn't use them
16557 	 */
16558 	for (i = 0; i < old->allocated_stack; i++) {
16559 		struct bpf_reg_state *old_reg, *cur_reg;
16560 
16561 		spi = i / BPF_REG_SIZE;
16562 
16563 		if (exact &&
16564 		    old->stack[spi].slot_type[i % BPF_REG_SIZE] !=
16565 		    cur->stack[spi].slot_type[i % BPF_REG_SIZE])
16566 			return false;
16567 
16568 		if (!(old->stack[spi].spilled_ptr.live & REG_LIVE_READ) && !exact) {
16569 			i += BPF_REG_SIZE - 1;
16570 			/* explored state didn't use this */
16571 			continue;
16572 		}
16573 
16574 		if (old->stack[spi].slot_type[i % BPF_REG_SIZE] == STACK_INVALID)
16575 			continue;
16576 
16577 		if (env->allow_uninit_stack &&
16578 		    old->stack[spi].slot_type[i % BPF_REG_SIZE] == STACK_MISC)
16579 			continue;
16580 
16581 		/* explored stack has more populated slots than current stack
16582 		 * and these slots were used
16583 		 */
16584 		if (i >= cur->allocated_stack)
16585 			return false;
16586 
16587 		/* 64-bit scalar spill vs all slots MISC and vice versa.
16588 		 * Load from all slots MISC produces unbound scalar.
16589 		 * Construct a fake register for such stack and call
16590 		 * regsafe() to ensure scalar ids are compared.
16591 		 */
16592 		old_reg = scalar_reg_for_stack(env, &old->stack[spi]);
16593 		cur_reg = scalar_reg_for_stack(env, &cur->stack[spi]);
16594 		if (old_reg && cur_reg) {
16595 			if (!regsafe(env, old_reg, cur_reg, idmap, exact))
16596 				return false;
16597 			i += BPF_REG_SIZE - 1;
16598 			continue;
16599 		}
16600 
16601 		/* if old state was safe with misc data in the stack
16602 		 * it will be safe with zero-initialized stack.
16603 		 * The opposite is not true
16604 		 */
16605 		if (old->stack[spi].slot_type[i % BPF_REG_SIZE] == STACK_MISC &&
16606 		    cur->stack[spi].slot_type[i % BPF_REG_SIZE] == STACK_ZERO)
16607 			continue;
16608 		if (old->stack[spi].slot_type[i % BPF_REG_SIZE] !=
16609 		    cur->stack[spi].slot_type[i % BPF_REG_SIZE])
16610 			/* Ex: old explored (safe) state has STACK_SPILL in
16611 			 * this stack slot, but current has STACK_MISC ->
16612 			 * this verifier states are not equivalent,
16613 			 * return false to continue verification of this path
16614 			 */
16615 			return false;
16616 		if (i % BPF_REG_SIZE != BPF_REG_SIZE - 1)
16617 			continue;
16618 		/* Both old and cur are having same slot_type */
16619 		switch (old->stack[spi].slot_type[BPF_REG_SIZE - 1]) {
16620 		case STACK_SPILL:
16621 			/* when explored and current stack slot are both storing
16622 			 * spilled registers, check that stored pointers types
16623 			 * are the same as well.
16624 			 * Ex: explored safe path could have stored
16625 			 * (bpf_reg_state) {.type = PTR_TO_STACK, .off = -8}
16626 			 * but current path has stored:
16627 			 * (bpf_reg_state) {.type = PTR_TO_STACK, .off = -16}
16628 			 * such verifier states are not equivalent.
16629 			 * return false to continue verification of this path
16630 			 */
16631 			if (!regsafe(env, &old->stack[spi].spilled_ptr,
16632 				     &cur->stack[spi].spilled_ptr, idmap, exact))
16633 				return false;
16634 			break;
16635 		case STACK_DYNPTR:
16636 			old_reg = &old->stack[spi].spilled_ptr;
16637 			cur_reg = &cur->stack[spi].spilled_ptr;
16638 			if (old_reg->dynptr.type != cur_reg->dynptr.type ||
16639 			    old_reg->dynptr.first_slot != cur_reg->dynptr.first_slot ||
16640 			    !check_ids(old_reg->ref_obj_id, cur_reg->ref_obj_id, idmap))
16641 				return false;
16642 			break;
16643 		case STACK_ITER:
16644 			old_reg = &old->stack[spi].spilled_ptr;
16645 			cur_reg = &cur->stack[spi].spilled_ptr;
16646 			/* iter.depth is not compared between states as it
16647 			 * doesn't matter for correctness and would otherwise
16648 			 * prevent convergence; we maintain it only to prevent
16649 			 * infinite loop check triggering, see
16650 			 * iter_active_depths_differ()
16651 			 */
16652 			if (old_reg->iter.btf != cur_reg->iter.btf ||
16653 			    old_reg->iter.btf_id != cur_reg->iter.btf_id ||
16654 			    old_reg->iter.state != cur_reg->iter.state ||
16655 			    /* ignore {old_reg,cur_reg}->iter.depth, see above */
16656 			    !check_ids(old_reg->ref_obj_id, cur_reg->ref_obj_id, idmap))
16657 				return false;
16658 			break;
16659 		case STACK_MISC:
16660 		case STACK_ZERO:
16661 		case STACK_INVALID:
16662 			continue;
16663 		/* Ensure that new unhandled slot types return false by default */
16664 		default:
16665 			return false;
16666 		}
16667 	}
16668 	return true;
16669 }
16670 
16671 static bool refsafe(struct bpf_func_state *old, struct bpf_func_state *cur,
16672 		    struct bpf_idmap *idmap)
16673 {
16674 	int i;
16675 
16676 	if (old->acquired_refs != cur->acquired_refs)
16677 		return false;
16678 
16679 	for (i = 0; i < old->acquired_refs; i++) {
16680 		if (!check_ids(old->refs[i].id, cur->refs[i].id, idmap))
16681 			return false;
16682 	}
16683 
16684 	return true;
16685 }
16686 
16687 /* compare two verifier states
16688  *
16689  * all states stored in state_list are known to be valid, since
16690  * verifier reached 'bpf_exit' instruction through them
16691  *
16692  * this function is called when verifier exploring different branches of
16693  * execution popped from the state stack. If it sees an old state that has
16694  * more strict register state and more strict stack state then this execution
16695  * branch doesn't need to be explored further, since verifier already
16696  * concluded that more strict state leads to valid finish.
16697  *
16698  * Therefore two states are equivalent if register state is more conservative
16699  * and explored stack state is more conservative than the current one.
16700  * Example:
16701  *       explored                   current
16702  * (slot1=INV slot2=MISC) == (slot1=MISC slot2=MISC)
16703  * (slot1=MISC slot2=MISC) != (slot1=INV slot2=MISC)
16704  *
16705  * In other words if current stack state (one being explored) has more
16706  * valid slots than old one that already passed validation, it means
16707  * the verifier can stop exploring and conclude that current state is valid too
16708  *
16709  * Similarly with registers. If explored state has register type as invalid
16710  * whereas register type in current state is meaningful, it means that
16711  * the current state will reach 'bpf_exit' instruction safely
16712  */
16713 static bool func_states_equal(struct bpf_verifier_env *env, struct bpf_func_state *old,
16714 			      struct bpf_func_state *cur, bool exact)
16715 {
16716 	int i;
16717 
16718 	for (i = 0; i < MAX_BPF_REG; i++)
16719 		if (!regsafe(env, &old->regs[i], &cur->regs[i],
16720 			     &env->idmap_scratch, exact))
16721 			return false;
16722 
16723 	if (!stacksafe(env, old, cur, &env->idmap_scratch, exact))
16724 		return false;
16725 
16726 	if (!refsafe(old, cur, &env->idmap_scratch))
16727 		return false;
16728 
16729 	return true;
16730 }
16731 
16732 static void reset_idmap_scratch(struct bpf_verifier_env *env)
16733 {
16734 	env->idmap_scratch.tmp_id_gen = env->id_gen;
16735 	memset(&env->idmap_scratch.map, 0, sizeof(env->idmap_scratch.map));
16736 }
16737 
16738 static bool states_equal(struct bpf_verifier_env *env,
16739 			 struct bpf_verifier_state *old,
16740 			 struct bpf_verifier_state *cur,
16741 			 bool exact)
16742 {
16743 	int i;
16744 
16745 	if (old->curframe != cur->curframe)
16746 		return false;
16747 
16748 	reset_idmap_scratch(env);
16749 
16750 	/* Verification state from speculative execution simulation
16751 	 * must never prune a non-speculative execution one.
16752 	 */
16753 	if (old->speculative && !cur->speculative)
16754 		return false;
16755 
16756 	if (old->active_lock.ptr != cur->active_lock.ptr)
16757 		return false;
16758 
16759 	/* Old and cur active_lock's have to be either both present
16760 	 * or both absent.
16761 	 */
16762 	if (!!old->active_lock.id != !!cur->active_lock.id)
16763 		return false;
16764 
16765 	if (old->active_lock.id &&
16766 	    !check_ids(old->active_lock.id, cur->active_lock.id, &env->idmap_scratch))
16767 		return false;
16768 
16769 	if (old->active_rcu_lock != cur->active_rcu_lock)
16770 		return false;
16771 
16772 	/* for states to be equal callsites have to be the same
16773 	 * and all frame states need to be equivalent
16774 	 */
16775 	for (i = 0; i <= old->curframe; i++) {
16776 		if (old->frame[i]->callsite != cur->frame[i]->callsite)
16777 			return false;
16778 		if (!func_states_equal(env, old->frame[i], cur->frame[i], exact))
16779 			return false;
16780 	}
16781 	return true;
16782 }
16783 
16784 /* Return 0 if no propagation happened. Return negative error code if error
16785  * happened. Otherwise, return the propagated bit.
16786  */
16787 static int propagate_liveness_reg(struct bpf_verifier_env *env,
16788 				  struct bpf_reg_state *reg,
16789 				  struct bpf_reg_state *parent_reg)
16790 {
16791 	u8 parent_flag = parent_reg->live & REG_LIVE_READ;
16792 	u8 flag = reg->live & REG_LIVE_READ;
16793 	int err;
16794 
16795 	/* When comes here, read flags of PARENT_REG or REG could be any of
16796 	 * REG_LIVE_READ64, REG_LIVE_READ32, REG_LIVE_NONE. There is no need
16797 	 * of propagation if PARENT_REG has strongest REG_LIVE_READ64.
16798 	 */
16799 	if (parent_flag == REG_LIVE_READ64 ||
16800 	    /* Or if there is no read flag from REG. */
16801 	    !flag ||
16802 	    /* Or if the read flag from REG is the same as PARENT_REG. */
16803 	    parent_flag == flag)
16804 		return 0;
16805 
16806 	err = mark_reg_read(env, reg, parent_reg, flag);
16807 	if (err)
16808 		return err;
16809 
16810 	return flag;
16811 }
16812 
16813 /* A write screens off any subsequent reads; but write marks come from the
16814  * straight-line code between a state and its parent.  When we arrive at an
16815  * equivalent state (jump target or such) we didn't arrive by the straight-line
16816  * code, so read marks in the state must propagate to the parent regardless
16817  * of the state's write marks. That's what 'parent == state->parent' comparison
16818  * in mark_reg_read() is for.
16819  */
16820 static int propagate_liveness(struct bpf_verifier_env *env,
16821 			      const struct bpf_verifier_state *vstate,
16822 			      struct bpf_verifier_state *vparent)
16823 {
16824 	struct bpf_reg_state *state_reg, *parent_reg;
16825 	struct bpf_func_state *state, *parent;
16826 	int i, frame, err = 0;
16827 
16828 	if (vparent->curframe != vstate->curframe) {
16829 		WARN(1, "propagate_live: parent frame %d current frame %d\n",
16830 		     vparent->curframe, vstate->curframe);
16831 		return -EFAULT;
16832 	}
16833 	/* Propagate read liveness of registers... */
16834 	BUILD_BUG_ON(BPF_REG_FP + 1 != MAX_BPF_REG);
16835 	for (frame = 0; frame <= vstate->curframe; frame++) {
16836 		parent = vparent->frame[frame];
16837 		state = vstate->frame[frame];
16838 		parent_reg = parent->regs;
16839 		state_reg = state->regs;
16840 		/* We don't need to worry about FP liveness, it's read-only */
16841 		for (i = frame < vstate->curframe ? BPF_REG_6 : 0; i < BPF_REG_FP; i++) {
16842 			err = propagate_liveness_reg(env, &state_reg[i],
16843 						     &parent_reg[i]);
16844 			if (err < 0)
16845 				return err;
16846 			if (err == REG_LIVE_READ64)
16847 				mark_insn_zext(env, &parent_reg[i]);
16848 		}
16849 
16850 		/* Propagate stack slots. */
16851 		for (i = 0; i < state->allocated_stack / BPF_REG_SIZE &&
16852 			    i < parent->allocated_stack / BPF_REG_SIZE; i++) {
16853 			parent_reg = &parent->stack[i].spilled_ptr;
16854 			state_reg = &state->stack[i].spilled_ptr;
16855 			err = propagate_liveness_reg(env, state_reg,
16856 						     parent_reg);
16857 			if (err < 0)
16858 				return err;
16859 		}
16860 	}
16861 	return 0;
16862 }
16863 
16864 /* find precise scalars in the previous equivalent state and
16865  * propagate them into the current state
16866  */
16867 static int propagate_precision(struct bpf_verifier_env *env,
16868 			       const struct bpf_verifier_state *old)
16869 {
16870 	struct bpf_reg_state *state_reg;
16871 	struct bpf_func_state *state;
16872 	int i, err = 0, fr;
16873 	bool first;
16874 
16875 	for (fr = old->curframe; fr >= 0; fr--) {
16876 		state = old->frame[fr];
16877 		state_reg = state->regs;
16878 		first = true;
16879 		for (i = 0; i < BPF_REG_FP; i++, state_reg++) {
16880 			if (state_reg->type != SCALAR_VALUE ||
16881 			    !state_reg->precise ||
16882 			    !(state_reg->live & REG_LIVE_READ))
16883 				continue;
16884 			if (env->log.level & BPF_LOG_LEVEL2) {
16885 				if (first)
16886 					verbose(env, "frame %d: propagating r%d", fr, i);
16887 				else
16888 					verbose(env, ",r%d", i);
16889 			}
16890 			bt_set_frame_reg(&env->bt, fr, i);
16891 			first = false;
16892 		}
16893 
16894 		for (i = 0; i < state->allocated_stack / BPF_REG_SIZE; i++) {
16895 			if (!is_spilled_reg(&state->stack[i]))
16896 				continue;
16897 			state_reg = &state->stack[i].spilled_ptr;
16898 			if (state_reg->type != SCALAR_VALUE ||
16899 			    !state_reg->precise ||
16900 			    !(state_reg->live & REG_LIVE_READ))
16901 				continue;
16902 			if (env->log.level & BPF_LOG_LEVEL2) {
16903 				if (first)
16904 					verbose(env, "frame %d: propagating fp%d",
16905 						fr, (-i - 1) * BPF_REG_SIZE);
16906 				else
16907 					verbose(env, ",fp%d", (-i - 1) * BPF_REG_SIZE);
16908 			}
16909 			bt_set_frame_slot(&env->bt, fr, i);
16910 			first = false;
16911 		}
16912 		if (!first)
16913 			verbose(env, "\n");
16914 	}
16915 
16916 	err = mark_chain_precision_batch(env);
16917 	if (err < 0)
16918 		return err;
16919 
16920 	return 0;
16921 }
16922 
16923 static bool states_maybe_looping(struct bpf_verifier_state *old,
16924 				 struct bpf_verifier_state *cur)
16925 {
16926 	struct bpf_func_state *fold, *fcur;
16927 	int i, fr = cur->curframe;
16928 
16929 	if (old->curframe != fr)
16930 		return false;
16931 
16932 	fold = old->frame[fr];
16933 	fcur = cur->frame[fr];
16934 	for (i = 0; i < MAX_BPF_REG; i++)
16935 		if (memcmp(&fold->regs[i], &fcur->regs[i],
16936 			   offsetof(struct bpf_reg_state, parent)))
16937 			return false;
16938 	return true;
16939 }
16940 
16941 static bool is_iter_next_insn(struct bpf_verifier_env *env, int insn_idx)
16942 {
16943 	return env->insn_aux_data[insn_idx].is_iter_next;
16944 }
16945 
16946 /* is_state_visited() handles iter_next() (see process_iter_next_call() for
16947  * terminology) calls specially: as opposed to bounded BPF loops, it *expects*
16948  * states to match, which otherwise would look like an infinite loop. So while
16949  * iter_next() calls are taken care of, we still need to be careful and
16950  * prevent erroneous and too eager declaration of "ininite loop", when
16951  * iterators are involved.
16952  *
16953  * Here's a situation in pseudo-BPF assembly form:
16954  *
16955  *   0: again:                          ; set up iter_next() call args
16956  *   1:   r1 = &it                      ; <CHECKPOINT HERE>
16957  *   2:   call bpf_iter_num_next        ; this is iter_next() call
16958  *   3:   if r0 == 0 goto done
16959  *   4:   ... something useful here ...
16960  *   5:   goto again                    ; another iteration
16961  *   6: done:
16962  *   7:   r1 = &it
16963  *   8:   call bpf_iter_num_destroy     ; clean up iter state
16964  *   9:   exit
16965  *
16966  * This is a typical loop. Let's assume that we have a prune point at 1:,
16967  * before we get to `call bpf_iter_num_next` (e.g., because of that `goto
16968  * again`, assuming other heuristics don't get in a way).
16969  *
16970  * When we first time come to 1:, let's say we have some state X. We proceed
16971  * to 2:, fork states, enqueue ACTIVE, validate NULL case successfully, exit.
16972  * Now we come back to validate that forked ACTIVE state. We proceed through
16973  * 3-5, come to goto, jump to 1:. Let's assume our state didn't change, so we
16974  * are converging. But the problem is that we don't know that yet, as this
16975  * convergence has to happen at iter_next() call site only. So if nothing is
16976  * done, at 1: verifier will use bounded loop logic and declare infinite
16977  * looping (and would be *technically* correct, if not for iterator's
16978  * "eventual sticky NULL" contract, see process_iter_next_call()). But we
16979  * don't want that. So what we do in process_iter_next_call() when we go on
16980  * another ACTIVE iteration, we bump slot->iter.depth, to mark that it's
16981  * a different iteration. So when we suspect an infinite loop, we additionally
16982  * check if any of the *ACTIVE* iterator states depths differ. If yes, we
16983  * pretend we are not looping and wait for next iter_next() call.
16984  *
16985  * This only applies to ACTIVE state. In DRAINED state we don't expect to
16986  * loop, because that would actually mean infinite loop, as DRAINED state is
16987  * "sticky", and so we'll keep returning into the same instruction with the
16988  * same state (at least in one of possible code paths).
16989  *
16990  * This approach allows to keep infinite loop heuristic even in the face of
16991  * active iterator. E.g., C snippet below is and will be detected as
16992  * inifintely looping:
16993  *
16994  *   struct bpf_iter_num it;
16995  *   int *p, x;
16996  *
16997  *   bpf_iter_num_new(&it, 0, 10);
16998  *   while ((p = bpf_iter_num_next(&t))) {
16999  *       x = p;
17000  *       while (x--) {} // <<-- infinite loop here
17001  *   }
17002  *
17003  */
17004 static bool iter_active_depths_differ(struct bpf_verifier_state *old, struct bpf_verifier_state *cur)
17005 {
17006 	struct bpf_reg_state *slot, *cur_slot;
17007 	struct bpf_func_state *state;
17008 	int i, fr;
17009 
17010 	for (fr = old->curframe; fr >= 0; fr--) {
17011 		state = old->frame[fr];
17012 		for (i = 0; i < state->allocated_stack / BPF_REG_SIZE; i++) {
17013 			if (state->stack[i].slot_type[0] != STACK_ITER)
17014 				continue;
17015 
17016 			slot = &state->stack[i].spilled_ptr;
17017 			if (slot->iter.state != BPF_ITER_STATE_ACTIVE)
17018 				continue;
17019 
17020 			cur_slot = &cur->frame[fr]->stack[i].spilled_ptr;
17021 			if (cur_slot->iter.depth != slot->iter.depth)
17022 				return true;
17023 		}
17024 	}
17025 	return false;
17026 }
17027 
17028 static int is_state_visited(struct bpf_verifier_env *env, int insn_idx)
17029 {
17030 	struct bpf_verifier_state_list *new_sl;
17031 	struct bpf_verifier_state_list *sl, **pprev;
17032 	struct bpf_verifier_state *cur = env->cur_state, *new, *loop_entry;
17033 	int i, j, n, err, states_cnt = 0;
17034 	bool force_new_state = env->test_state_freq || is_force_checkpoint(env, insn_idx);
17035 	bool add_new_state = force_new_state;
17036 	bool force_exact;
17037 
17038 	/* bpf progs typically have pruning point every 4 instructions
17039 	 * http://vger.kernel.org/bpfconf2019.html#session-1
17040 	 * Do not add new state for future pruning if the verifier hasn't seen
17041 	 * at least 2 jumps and at least 8 instructions.
17042 	 * This heuristics helps decrease 'total_states' and 'peak_states' metric.
17043 	 * In tests that amounts to up to 50% reduction into total verifier
17044 	 * memory consumption and 20% verifier time speedup.
17045 	 */
17046 	if (env->jmps_processed - env->prev_jmps_processed >= 2 &&
17047 	    env->insn_processed - env->prev_insn_processed >= 8)
17048 		add_new_state = true;
17049 
17050 	pprev = explored_state(env, insn_idx);
17051 	sl = *pprev;
17052 
17053 	clean_live_states(env, insn_idx, cur);
17054 
17055 	while (sl) {
17056 		states_cnt++;
17057 		if (sl->state.insn_idx != insn_idx)
17058 			goto next;
17059 
17060 		if (sl->state.branches) {
17061 			struct bpf_func_state *frame = sl->state.frame[sl->state.curframe];
17062 
17063 			if (frame->in_async_callback_fn &&
17064 			    frame->async_entry_cnt != cur->frame[cur->curframe]->async_entry_cnt) {
17065 				/* Different async_entry_cnt means that the verifier is
17066 				 * processing another entry into async callback.
17067 				 * Seeing the same state is not an indication of infinite
17068 				 * loop or infinite recursion.
17069 				 * But finding the same state doesn't mean that it's safe
17070 				 * to stop processing the current state. The previous state
17071 				 * hasn't yet reached bpf_exit, since state.branches > 0.
17072 				 * Checking in_async_callback_fn alone is not enough either.
17073 				 * Since the verifier still needs to catch infinite loops
17074 				 * inside async callbacks.
17075 				 */
17076 				goto skip_inf_loop_check;
17077 			}
17078 			/* BPF open-coded iterators loop detection is special.
17079 			 * states_maybe_looping() logic is too simplistic in detecting
17080 			 * states that *might* be equivalent, because it doesn't know
17081 			 * about ID remapping, so don't even perform it.
17082 			 * See process_iter_next_call() and iter_active_depths_differ()
17083 			 * for overview of the logic. When current and one of parent
17084 			 * states are detected as equivalent, it's a good thing: we prove
17085 			 * convergence and can stop simulating further iterations.
17086 			 * It's safe to assume that iterator loop will finish, taking into
17087 			 * account iter_next() contract of eventually returning
17088 			 * sticky NULL result.
17089 			 *
17090 			 * Note, that states have to be compared exactly in this case because
17091 			 * read and precision marks might not be finalized inside the loop.
17092 			 * E.g. as in the program below:
17093 			 *
17094 			 *     1. r7 = -16
17095 			 *     2. r6 = bpf_get_prandom_u32()
17096 			 *     3. while (bpf_iter_num_next(&fp[-8])) {
17097 			 *     4.   if (r6 != 42) {
17098 			 *     5.     r7 = -32
17099 			 *     6.     r6 = bpf_get_prandom_u32()
17100 			 *     7.     continue
17101 			 *     8.   }
17102 			 *     9.   r0 = r10
17103 			 *    10.   r0 += r7
17104 			 *    11.   r8 = *(u64 *)(r0 + 0)
17105 			 *    12.   r6 = bpf_get_prandom_u32()
17106 			 *    13. }
17107 			 *
17108 			 * Here verifier would first visit path 1-3, create a checkpoint at 3
17109 			 * with r7=-16, continue to 4-7,3. Existing checkpoint at 3 does
17110 			 * not have read or precision mark for r7 yet, thus inexact states
17111 			 * comparison would discard current state with r7=-32
17112 			 * => unsafe memory access at 11 would not be caught.
17113 			 */
17114 			if (is_iter_next_insn(env, insn_idx)) {
17115 				if (states_equal(env, &sl->state, cur, true)) {
17116 					struct bpf_func_state *cur_frame;
17117 					struct bpf_reg_state *iter_state, *iter_reg;
17118 					int spi;
17119 
17120 					cur_frame = cur->frame[cur->curframe];
17121 					/* btf_check_iter_kfuncs() enforces that
17122 					 * iter state pointer is always the first arg
17123 					 */
17124 					iter_reg = &cur_frame->regs[BPF_REG_1];
17125 					/* current state is valid due to states_equal(),
17126 					 * so we can assume valid iter and reg state,
17127 					 * no need for extra (re-)validations
17128 					 */
17129 					spi = __get_spi(iter_reg->off + iter_reg->var_off.value);
17130 					iter_state = &func(env, iter_reg)->stack[spi].spilled_ptr;
17131 					if (iter_state->iter.state == BPF_ITER_STATE_ACTIVE) {
17132 						update_loop_entry(cur, &sl->state);
17133 						goto hit;
17134 					}
17135 				}
17136 				goto skip_inf_loop_check;
17137 			}
17138 			if (calls_callback(env, insn_idx)) {
17139 				if (states_equal(env, &sl->state, cur, true))
17140 					goto hit;
17141 				goto skip_inf_loop_check;
17142 			}
17143 			/* attempt to detect infinite loop to avoid unnecessary doomed work */
17144 			if (states_maybe_looping(&sl->state, cur) &&
17145 			    states_equal(env, &sl->state, cur, true) &&
17146 			    !iter_active_depths_differ(&sl->state, cur) &&
17147 			    sl->state.callback_unroll_depth == cur->callback_unroll_depth) {
17148 				verbose_linfo(env, insn_idx, "; ");
17149 				verbose(env, "infinite loop detected at insn %d\n", insn_idx);
17150 				verbose(env, "cur state:");
17151 				print_verifier_state(env, cur->frame[cur->curframe], true);
17152 				verbose(env, "old state:");
17153 				print_verifier_state(env, sl->state.frame[cur->curframe], true);
17154 				return -EINVAL;
17155 			}
17156 			/* if the verifier is processing a loop, avoid adding new state
17157 			 * too often, since different loop iterations have distinct
17158 			 * states and may not help future pruning.
17159 			 * This threshold shouldn't be too low to make sure that
17160 			 * a loop with large bound will be rejected quickly.
17161 			 * The most abusive loop will be:
17162 			 * r1 += 1
17163 			 * if r1 < 1000000 goto pc-2
17164 			 * 1M insn_procssed limit / 100 == 10k peak states.
17165 			 * This threshold shouldn't be too high either, since states
17166 			 * at the end of the loop are likely to be useful in pruning.
17167 			 */
17168 skip_inf_loop_check:
17169 			if (!force_new_state &&
17170 			    env->jmps_processed - env->prev_jmps_processed < 20 &&
17171 			    env->insn_processed - env->prev_insn_processed < 100)
17172 				add_new_state = false;
17173 			goto miss;
17174 		}
17175 		/* If sl->state is a part of a loop and this loop's entry is a part of
17176 		 * current verification path then states have to be compared exactly.
17177 		 * 'force_exact' is needed to catch the following case:
17178 		 *
17179 		 *                initial     Here state 'succ' was processed first,
17180 		 *                  |         it was eventually tracked to produce a
17181 		 *                  V         state identical to 'hdr'.
17182 		 *     .---------> hdr        All branches from 'succ' had been explored
17183 		 *     |            |         and thus 'succ' has its .branches == 0.
17184 		 *     |            V
17185 		 *     |    .------...        Suppose states 'cur' and 'succ' correspond
17186 		 *     |    |       |         to the same instruction + callsites.
17187 		 *     |    V       V         In such case it is necessary to check
17188 		 *     |   ...     ...        if 'succ' and 'cur' are states_equal().
17189 		 *     |    |       |         If 'succ' and 'cur' are a part of the
17190 		 *     |    V       V         same loop exact flag has to be set.
17191 		 *     |   succ <- cur        To check if that is the case, verify
17192 		 *     |    |                 if loop entry of 'succ' is in current
17193 		 *     |    V                 DFS path.
17194 		 *     |   ...
17195 		 *     |    |
17196 		 *     '----'
17197 		 *
17198 		 * Additional details are in the comment before get_loop_entry().
17199 		 */
17200 		loop_entry = get_loop_entry(&sl->state);
17201 		force_exact = loop_entry && loop_entry->branches > 0;
17202 		if (states_equal(env, &sl->state, cur, force_exact)) {
17203 			if (force_exact)
17204 				update_loop_entry(cur, loop_entry);
17205 hit:
17206 			sl->hit_cnt++;
17207 			/* reached equivalent register/stack state,
17208 			 * prune the search.
17209 			 * Registers read by the continuation are read by us.
17210 			 * If we have any write marks in env->cur_state, they
17211 			 * will prevent corresponding reads in the continuation
17212 			 * from reaching our parent (an explored_state).  Our
17213 			 * own state will get the read marks recorded, but
17214 			 * they'll be immediately forgotten as we're pruning
17215 			 * this state and will pop a new one.
17216 			 */
17217 			err = propagate_liveness(env, &sl->state, cur);
17218 
17219 			/* if previous state reached the exit with precision and
17220 			 * current state is equivalent to it (except precsion marks)
17221 			 * the precision needs to be propagated back in
17222 			 * the current state.
17223 			 */
17224 			if (is_jmp_point(env, env->insn_idx))
17225 				err = err ? : push_jmp_history(env, cur, 0);
17226 			err = err ? : propagate_precision(env, &sl->state);
17227 			if (err)
17228 				return err;
17229 			return 1;
17230 		}
17231 miss:
17232 		/* when new state is not going to be added do not increase miss count.
17233 		 * Otherwise several loop iterations will remove the state
17234 		 * recorded earlier. The goal of these heuristics is to have
17235 		 * states from some iterations of the loop (some in the beginning
17236 		 * and some at the end) to help pruning.
17237 		 */
17238 		if (add_new_state)
17239 			sl->miss_cnt++;
17240 		/* heuristic to determine whether this state is beneficial
17241 		 * to keep checking from state equivalence point of view.
17242 		 * Higher numbers increase max_states_per_insn and verification time,
17243 		 * but do not meaningfully decrease insn_processed.
17244 		 * 'n' controls how many times state could miss before eviction.
17245 		 * Use bigger 'n' for checkpoints because evicting checkpoint states
17246 		 * too early would hinder iterator convergence.
17247 		 */
17248 		n = is_force_checkpoint(env, insn_idx) && sl->state.branches > 0 ? 64 : 3;
17249 		if (sl->miss_cnt > sl->hit_cnt * n + n) {
17250 			/* the state is unlikely to be useful. Remove it to
17251 			 * speed up verification
17252 			 */
17253 			*pprev = sl->next;
17254 			if (sl->state.frame[0]->regs[0].live & REG_LIVE_DONE &&
17255 			    !sl->state.used_as_loop_entry) {
17256 				u32 br = sl->state.branches;
17257 
17258 				WARN_ONCE(br,
17259 					  "BUG live_done but branches_to_explore %d\n",
17260 					  br);
17261 				free_verifier_state(&sl->state, false);
17262 				kfree(sl);
17263 				env->peak_states--;
17264 			} else {
17265 				/* cannot free this state, since parentage chain may
17266 				 * walk it later. Add it for free_list instead to
17267 				 * be freed at the end of verification
17268 				 */
17269 				sl->next = env->free_list;
17270 				env->free_list = sl;
17271 			}
17272 			sl = *pprev;
17273 			continue;
17274 		}
17275 next:
17276 		pprev = &sl->next;
17277 		sl = *pprev;
17278 	}
17279 
17280 	if (env->max_states_per_insn < states_cnt)
17281 		env->max_states_per_insn = states_cnt;
17282 
17283 	if (!env->bpf_capable && states_cnt > BPF_COMPLEXITY_LIMIT_STATES)
17284 		return 0;
17285 
17286 	if (!add_new_state)
17287 		return 0;
17288 
17289 	/* There were no equivalent states, remember the current one.
17290 	 * Technically the current state is not proven to be safe yet,
17291 	 * but it will either reach outer most bpf_exit (which means it's safe)
17292 	 * or it will be rejected. When there are no loops the verifier won't be
17293 	 * seeing this tuple (frame[0].callsite, frame[1].callsite, .. insn_idx)
17294 	 * again on the way to bpf_exit.
17295 	 * When looping the sl->state.branches will be > 0 and this state
17296 	 * will not be considered for equivalence until branches == 0.
17297 	 */
17298 	new_sl = kzalloc(sizeof(struct bpf_verifier_state_list), GFP_KERNEL);
17299 	if (!new_sl)
17300 		return -ENOMEM;
17301 	env->total_states++;
17302 	env->peak_states++;
17303 	env->prev_jmps_processed = env->jmps_processed;
17304 	env->prev_insn_processed = env->insn_processed;
17305 
17306 	/* forget precise markings we inherited, see __mark_chain_precision */
17307 	if (env->bpf_capable)
17308 		mark_all_scalars_imprecise(env, cur);
17309 
17310 	/* add new state to the head of linked list */
17311 	new = &new_sl->state;
17312 	err = copy_verifier_state(new, cur);
17313 	if (err) {
17314 		free_verifier_state(new, false);
17315 		kfree(new_sl);
17316 		return err;
17317 	}
17318 	new->insn_idx = insn_idx;
17319 	WARN_ONCE(new->branches != 1,
17320 		  "BUG is_state_visited:branches_to_explore=%d insn %d\n", new->branches, insn_idx);
17321 
17322 	cur->parent = new;
17323 	cur->first_insn_idx = insn_idx;
17324 	cur->dfs_depth = new->dfs_depth + 1;
17325 	clear_jmp_history(cur);
17326 	new_sl->next = *explored_state(env, insn_idx);
17327 	*explored_state(env, insn_idx) = new_sl;
17328 	/* connect new state to parentage chain. Current frame needs all
17329 	 * registers connected. Only r6 - r9 of the callers are alive (pushed
17330 	 * to the stack implicitly by JITs) so in callers' frames connect just
17331 	 * r6 - r9 as an optimization. Callers will have r1 - r5 connected to
17332 	 * the state of the call instruction (with WRITTEN set), and r0 comes
17333 	 * from callee with its full parentage chain, anyway.
17334 	 */
17335 	/* clear write marks in current state: the writes we did are not writes
17336 	 * our child did, so they don't screen off its reads from us.
17337 	 * (There are no read marks in current state, because reads always mark
17338 	 * their parent and current state never has children yet.  Only
17339 	 * explored_states can get read marks.)
17340 	 */
17341 	for (j = 0; j <= cur->curframe; j++) {
17342 		for (i = j < cur->curframe ? BPF_REG_6 : 0; i < BPF_REG_FP; i++)
17343 			cur->frame[j]->regs[i].parent = &new->frame[j]->regs[i];
17344 		for (i = 0; i < BPF_REG_FP; i++)
17345 			cur->frame[j]->regs[i].live = REG_LIVE_NONE;
17346 	}
17347 
17348 	/* all stack frames are accessible from callee, clear them all */
17349 	for (j = 0; j <= cur->curframe; j++) {
17350 		struct bpf_func_state *frame = cur->frame[j];
17351 		struct bpf_func_state *newframe = new->frame[j];
17352 
17353 		for (i = 0; i < frame->allocated_stack / BPF_REG_SIZE; i++) {
17354 			frame->stack[i].spilled_ptr.live = REG_LIVE_NONE;
17355 			frame->stack[i].spilled_ptr.parent =
17356 						&newframe->stack[i].spilled_ptr;
17357 		}
17358 	}
17359 	return 0;
17360 }
17361 
17362 /* Return true if it's OK to have the same insn return a different type. */
17363 static bool reg_type_mismatch_ok(enum bpf_reg_type type)
17364 {
17365 	switch (base_type(type)) {
17366 	case PTR_TO_CTX:
17367 	case PTR_TO_SOCKET:
17368 	case PTR_TO_SOCK_COMMON:
17369 	case PTR_TO_TCP_SOCK:
17370 	case PTR_TO_XDP_SOCK:
17371 	case PTR_TO_BTF_ID:
17372 		return false;
17373 	default:
17374 		return true;
17375 	}
17376 }
17377 
17378 /* If an instruction was previously used with particular pointer types, then we
17379  * need to be careful to avoid cases such as the below, where it may be ok
17380  * for one branch accessing the pointer, but not ok for the other branch:
17381  *
17382  * R1 = sock_ptr
17383  * goto X;
17384  * ...
17385  * R1 = some_other_valid_ptr;
17386  * goto X;
17387  * ...
17388  * R2 = *(u32 *)(R1 + 0);
17389  */
17390 static bool reg_type_mismatch(enum bpf_reg_type src, enum bpf_reg_type prev)
17391 {
17392 	return src != prev && (!reg_type_mismatch_ok(src) ||
17393 			       !reg_type_mismatch_ok(prev));
17394 }
17395 
17396 static int save_aux_ptr_type(struct bpf_verifier_env *env, enum bpf_reg_type type,
17397 			     bool allow_trust_missmatch)
17398 {
17399 	enum bpf_reg_type *prev_type = &env->insn_aux_data[env->insn_idx].ptr_type;
17400 
17401 	if (*prev_type == NOT_INIT) {
17402 		/* Saw a valid insn
17403 		 * dst_reg = *(u32 *)(src_reg + off)
17404 		 * save type to validate intersecting paths
17405 		 */
17406 		*prev_type = type;
17407 	} else if (reg_type_mismatch(type, *prev_type)) {
17408 		/* Abuser program is trying to use the same insn
17409 		 * dst_reg = *(u32*) (src_reg + off)
17410 		 * with different pointer types:
17411 		 * src_reg == ctx in one branch and
17412 		 * src_reg == stack|map in some other branch.
17413 		 * Reject it.
17414 		 */
17415 		if (allow_trust_missmatch &&
17416 		    base_type(type) == PTR_TO_BTF_ID &&
17417 		    base_type(*prev_type) == PTR_TO_BTF_ID) {
17418 			/*
17419 			 * Have to support a use case when one path through
17420 			 * the program yields TRUSTED pointer while another
17421 			 * is UNTRUSTED. Fallback to UNTRUSTED to generate
17422 			 * BPF_PROBE_MEM/BPF_PROBE_MEMSX.
17423 			 */
17424 			*prev_type = PTR_TO_BTF_ID | PTR_UNTRUSTED;
17425 		} else {
17426 			verbose(env, "same insn cannot be used with different pointers\n");
17427 			return -EINVAL;
17428 		}
17429 	}
17430 
17431 	return 0;
17432 }
17433 
17434 static int do_check(struct bpf_verifier_env *env)
17435 {
17436 	bool pop_log = !(env->log.level & BPF_LOG_LEVEL2);
17437 	struct bpf_verifier_state *state = env->cur_state;
17438 	struct bpf_insn *insns = env->prog->insnsi;
17439 	struct bpf_reg_state *regs;
17440 	int insn_cnt = env->prog->len;
17441 	bool do_print_state = false;
17442 	int prev_insn_idx = -1;
17443 
17444 	for (;;) {
17445 		bool exception_exit = false;
17446 		struct bpf_insn *insn;
17447 		u8 class;
17448 		int err;
17449 
17450 		/* reset current history entry on each new instruction */
17451 		env->cur_hist_ent = NULL;
17452 
17453 		env->prev_insn_idx = prev_insn_idx;
17454 		if (env->insn_idx >= insn_cnt) {
17455 			verbose(env, "invalid insn idx %d insn_cnt %d\n",
17456 				env->insn_idx, insn_cnt);
17457 			return -EFAULT;
17458 		}
17459 
17460 		insn = &insns[env->insn_idx];
17461 		class = BPF_CLASS(insn->code);
17462 
17463 		if (++env->insn_processed > BPF_COMPLEXITY_LIMIT_INSNS) {
17464 			verbose(env,
17465 				"BPF program is too large. Processed %d insn\n",
17466 				env->insn_processed);
17467 			return -E2BIG;
17468 		}
17469 
17470 		state->last_insn_idx = env->prev_insn_idx;
17471 
17472 		if (is_prune_point(env, env->insn_idx)) {
17473 			err = is_state_visited(env, env->insn_idx);
17474 			if (err < 0)
17475 				return err;
17476 			if (err == 1) {
17477 				/* found equivalent state, can prune the search */
17478 				if (env->log.level & BPF_LOG_LEVEL) {
17479 					if (do_print_state)
17480 						verbose(env, "\nfrom %d to %d%s: safe\n",
17481 							env->prev_insn_idx, env->insn_idx,
17482 							env->cur_state->speculative ?
17483 							" (speculative execution)" : "");
17484 					else
17485 						verbose(env, "%d: safe\n", env->insn_idx);
17486 				}
17487 				goto process_bpf_exit;
17488 			}
17489 		}
17490 
17491 		if (is_jmp_point(env, env->insn_idx)) {
17492 			err = push_jmp_history(env, state, 0);
17493 			if (err)
17494 				return err;
17495 		}
17496 
17497 		if (signal_pending(current))
17498 			return -EAGAIN;
17499 
17500 		if (need_resched())
17501 			cond_resched();
17502 
17503 		if (env->log.level & BPF_LOG_LEVEL2 && do_print_state) {
17504 			verbose(env, "\nfrom %d to %d%s:",
17505 				env->prev_insn_idx, env->insn_idx,
17506 				env->cur_state->speculative ?
17507 				" (speculative execution)" : "");
17508 			print_verifier_state(env, state->frame[state->curframe], true);
17509 			do_print_state = false;
17510 		}
17511 
17512 		if (env->log.level & BPF_LOG_LEVEL) {
17513 			const struct bpf_insn_cbs cbs = {
17514 				.cb_call	= disasm_kfunc_name,
17515 				.cb_print	= verbose,
17516 				.private_data	= env,
17517 			};
17518 
17519 			if (verifier_state_scratched(env))
17520 				print_insn_state(env, state->frame[state->curframe]);
17521 
17522 			verbose_linfo(env, env->insn_idx, "; ");
17523 			env->prev_log_pos = env->log.end_pos;
17524 			verbose(env, "%d: ", env->insn_idx);
17525 			print_bpf_insn(&cbs, insn, env->allow_ptr_leaks);
17526 			env->prev_insn_print_pos = env->log.end_pos - env->prev_log_pos;
17527 			env->prev_log_pos = env->log.end_pos;
17528 		}
17529 
17530 		if (bpf_prog_is_offloaded(env->prog->aux)) {
17531 			err = bpf_prog_offload_verify_insn(env, env->insn_idx,
17532 							   env->prev_insn_idx);
17533 			if (err)
17534 				return err;
17535 		}
17536 
17537 		regs = cur_regs(env);
17538 		sanitize_mark_insn_seen(env);
17539 		prev_insn_idx = env->insn_idx;
17540 
17541 		if (class == BPF_ALU || class == BPF_ALU64) {
17542 			err = check_alu_op(env, insn);
17543 			if (err)
17544 				return err;
17545 
17546 		} else if (class == BPF_LDX) {
17547 			enum bpf_reg_type src_reg_type;
17548 
17549 			/* check for reserved fields is already done */
17550 
17551 			/* check src operand */
17552 			err = check_reg_arg(env, insn->src_reg, SRC_OP);
17553 			if (err)
17554 				return err;
17555 
17556 			err = check_reg_arg(env, insn->dst_reg, DST_OP_NO_MARK);
17557 			if (err)
17558 				return err;
17559 
17560 			src_reg_type = regs[insn->src_reg].type;
17561 
17562 			/* check that memory (src_reg + off) is readable,
17563 			 * the state of dst_reg will be updated by this func
17564 			 */
17565 			err = check_mem_access(env, env->insn_idx, insn->src_reg,
17566 					       insn->off, BPF_SIZE(insn->code),
17567 					       BPF_READ, insn->dst_reg, false,
17568 					       BPF_MODE(insn->code) == BPF_MEMSX);
17569 			err = err ?: save_aux_ptr_type(env, src_reg_type, true);
17570 			err = err ?: reg_bounds_sanity_check(env, &regs[insn->dst_reg], "ldx");
17571 			if (err)
17572 				return err;
17573 		} else if (class == BPF_STX) {
17574 			enum bpf_reg_type dst_reg_type;
17575 
17576 			if (BPF_MODE(insn->code) == BPF_ATOMIC) {
17577 				err = check_atomic(env, env->insn_idx, insn);
17578 				if (err)
17579 					return err;
17580 				env->insn_idx++;
17581 				continue;
17582 			}
17583 
17584 			if (BPF_MODE(insn->code) != BPF_MEM || insn->imm != 0) {
17585 				verbose(env, "BPF_STX uses reserved fields\n");
17586 				return -EINVAL;
17587 			}
17588 
17589 			/* check src1 operand */
17590 			err = check_reg_arg(env, insn->src_reg, SRC_OP);
17591 			if (err)
17592 				return err;
17593 			/* check src2 operand */
17594 			err = check_reg_arg(env, insn->dst_reg, SRC_OP);
17595 			if (err)
17596 				return err;
17597 
17598 			dst_reg_type = regs[insn->dst_reg].type;
17599 
17600 			/* check that memory (dst_reg + off) is writeable */
17601 			err = check_mem_access(env, env->insn_idx, insn->dst_reg,
17602 					       insn->off, BPF_SIZE(insn->code),
17603 					       BPF_WRITE, insn->src_reg, false, false);
17604 			if (err)
17605 				return err;
17606 
17607 			err = save_aux_ptr_type(env, dst_reg_type, false);
17608 			if (err)
17609 				return err;
17610 		} else if (class == BPF_ST) {
17611 			enum bpf_reg_type dst_reg_type;
17612 
17613 			if (BPF_MODE(insn->code) != BPF_MEM ||
17614 			    insn->src_reg != BPF_REG_0) {
17615 				verbose(env, "BPF_ST uses reserved fields\n");
17616 				return -EINVAL;
17617 			}
17618 			/* check src operand */
17619 			err = check_reg_arg(env, insn->dst_reg, SRC_OP);
17620 			if (err)
17621 				return err;
17622 
17623 			dst_reg_type = regs[insn->dst_reg].type;
17624 
17625 			/* check that memory (dst_reg + off) is writeable */
17626 			err = check_mem_access(env, env->insn_idx, insn->dst_reg,
17627 					       insn->off, BPF_SIZE(insn->code),
17628 					       BPF_WRITE, -1, false, false);
17629 			if (err)
17630 				return err;
17631 
17632 			err = save_aux_ptr_type(env, dst_reg_type, false);
17633 			if (err)
17634 				return err;
17635 		} else if (class == BPF_JMP || class == BPF_JMP32) {
17636 			u8 opcode = BPF_OP(insn->code);
17637 
17638 			env->jmps_processed++;
17639 			if (opcode == BPF_CALL) {
17640 				if (BPF_SRC(insn->code) != BPF_K ||
17641 				    (insn->src_reg != BPF_PSEUDO_KFUNC_CALL
17642 				     && insn->off != 0) ||
17643 				    (insn->src_reg != BPF_REG_0 &&
17644 				     insn->src_reg != BPF_PSEUDO_CALL &&
17645 				     insn->src_reg != BPF_PSEUDO_KFUNC_CALL) ||
17646 				    insn->dst_reg != BPF_REG_0 ||
17647 				    class == BPF_JMP32) {
17648 					verbose(env, "BPF_CALL uses reserved fields\n");
17649 					return -EINVAL;
17650 				}
17651 
17652 				if (env->cur_state->active_lock.ptr) {
17653 					if ((insn->src_reg == BPF_REG_0 && insn->imm != BPF_FUNC_spin_unlock) ||
17654 					    (insn->src_reg == BPF_PSEUDO_KFUNC_CALL &&
17655 					     (insn->off != 0 || !is_bpf_graph_api_kfunc(insn->imm)))) {
17656 						verbose(env, "function calls are not allowed while holding a lock\n");
17657 						return -EINVAL;
17658 					}
17659 				}
17660 				if (insn->src_reg == BPF_PSEUDO_CALL) {
17661 					err = check_func_call(env, insn, &env->insn_idx);
17662 				} else if (insn->src_reg == BPF_PSEUDO_KFUNC_CALL) {
17663 					err = check_kfunc_call(env, insn, &env->insn_idx);
17664 					if (!err && is_bpf_throw_kfunc(insn)) {
17665 						exception_exit = true;
17666 						goto process_bpf_exit_full;
17667 					}
17668 				} else {
17669 					err = check_helper_call(env, insn, &env->insn_idx);
17670 				}
17671 				if (err)
17672 					return err;
17673 
17674 				mark_reg_scratched(env, BPF_REG_0);
17675 			} else if (opcode == BPF_JA) {
17676 				if (BPF_SRC(insn->code) != BPF_K ||
17677 				    insn->src_reg != BPF_REG_0 ||
17678 				    insn->dst_reg != BPF_REG_0 ||
17679 				    (class == BPF_JMP && insn->imm != 0) ||
17680 				    (class == BPF_JMP32 && insn->off != 0)) {
17681 					verbose(env, "BPF_JA uses reserved fields\n");
17682 					return -EINVAL;
17683 				}
17684 
17685 				if (class == BPF_JMP)
17686 					env->insn_idx += insn->off + 1;
17687 				else
17688 					env->insn_idx += insn->imm + 1;
17689 				continue;
17690 
17691 			} else if (opcode == BPF_EXIT) {
17692 				if (BPF_SRC(insn->code) != BPF_K ||
17693 				    insn->imm != 0 ||
17694 				    insn->src_reg != BPF_REG_0 ||
17695 				    insn->dst_reg != BPF_REG_0 ||
17696 				    class == BPF_JMP32) {
17697 					verbose(env, "BPF_EXIT uses reserved fields\n");
17698 					return -EINVAL;
17699 				}
17700 process_bpf_exit_full:
17701 				if (env->cur_state->active_lock.ptr && !env->cur_state->curframe) {
17702 					verbose(env, "bpf_spin_unlock is missing\n");
17703 					return -EINVAL;
17704 				}
17705 
17706 				if (env->cur_state->active_rcu_lock && !env->cur_state->curframe) {
17707 					verbose(env, "bpf_rcu_read_unlock is missing\n");
17708 					return -EINVAL;
17709 				}
17710 
17711 				/* We must do check_reference_leak here before
17712 				 * prepare_func_exit to handle the case when
17713 				 * state->curframe > 0, it may be a callback
17714 				 * function, for which reference_state must
17715 				 * match caller reference state when it exits.
17716 				 */
17717 				err = check_reference_leak(env, exception_exit);
17718 				if (err)
17719 					return err;
17720 
17721 				/* The side effect of the prepare_func_exit
17722 				 * which is being skipped is that it frees
17723 				 * bpf_func_state. Typically, process_bpf_exit
17724 				 * will only be hit with outermost exit.
17725 				 * copy_verifier_state in pop_stack will handle
17726 				 * freeing of any extra bpf_func_state left over
17727 				 * from not processing all nested function
17728 				 * exits. We also skip return code checks as
17729 				 * they are not needed for exceptional exits.
17730 				 */
17731 				if (exception_exit)
17732 					goto process_bpf_exit;
17733 
17734 				if (state->curframe) {
17735 					/* exit from nested function */
17736 					err = prepare_func_exit(env, &env->insn_idx);
17737 					if (err)
17738 						return err;
17739 					do_print_state = true;
17740 					continue;
17741 				}
17742 
17743 				err = check_return_code(env, BPF_REG_0, "R0");
17744 				if (err)
17745 					return err;
17746 process_bpf_exit:
17747 				mark_verifier_state_scratched(env);
17748 				update_branch_counts(env, env->cur_state);
17749 				err = pop_stack(env, &prev_insn_idx,
17750 						&env->insn_idx, pop_log);
17751 				if (err < 0) {
17752 					if (err != -ENOENT)
17753 						return err;
17754 					break;
17755 				} else {
17756 					do_print_state = true;
17757 					continue;
17758 				}
17759 			} else {
17760 				err = check_cond_jmp_op(env, insn, &env->insn_idx);
17761 				if (err)
17762 					return err;
17763 			}
17764 		} else if (class == BPF_LD) {
17765 			u8 mode = BPF_MODE(insn->code);
17766 
17767 			if (mode == BPF_ABS || mode == BPF_IND) {
17768 				err = check_ld_abs(env, insn);
17769 				if (err)
17770 					return err;
17771 
17772 			} else if (mode == BPF_IMM) {
17773 				err = check_ld_imm(env, insn);
17774 				if (err)
17775 					return err;
17776 
17777 				env->insn_idx++;
17778 				sanitize_mark_insn_seen(env);
17779 			} else {
17780 				verbose(env, "invalid BPF_LD mode\n");
17781 				return -EINVAL;
17782 			}
17783 		} else {
17784 			verbose(env, "unknown insn class %d\n", class);
17785 			return -EINVAL;
17786 		}
17787 
17788 		env->insn_idx++;
17789 	}
17790 
17791 	return 0;
17792 }
17793 
17794 static int find_btf_percpu_datasec(struct btf *btf)
17795 {
17796 	const struct btf_type *t;
17797 	const char *tname;
17798 	int i, n;
17799 
17800 	/*
17801 	 * Both vmlinux and module each have their own ".data..percpu"
17802 	 * DATASECs in BTF. So for module's case, we need to skip vmlinux BTF
17803 	 * types to look at only module's own BTF types.
17804 	 */
17805 	n = btf_nr_types(btf);
17806 	if (btf_is_module(btf))
17807 		i = btf_nr_types(btf_vmlinux);
17808 	else
17809 		i = 1;
17810 
17811 	for(; i < n; i++) {
17812 		t = btf_type_by_id(btf, i);
17813 		if (BTF_INFO_KIND(t->info) != BTF_KIND_DATASEC)
17814 			continue;
17815 
17816 		tname = btf_name_by_offset(btf, t->name_off);
17817 		if (!strcmp(tname, ".data..percpu"))
17818 			return i;
17819 	}
17820 
17821 	return -ENOENT;
17822 }
17823 
17824 /* replace pseudo btf_id with kernel symbol address */
17825 static int check_pseudo_btf_id(struct bpf_verifier_env *env,
17826 			       struct bpf_insn *insn,
17827 			       struct bpf_insn_aux_data *aux)
17828 {
17829 	const struct btf_var_secinfo *vsi;
17830 	const struct btf_type *datasec;
17831 	struct btf_mod_pair *btf_mod;
17832 	const struct btf_type *t;
17833 	const char *sym_name;
17834 	bool percpu = false;
17835 	u32 type, id = insn->imm;
17836 	struct btf *btf;
17837 	s32 datasec_id;
17838 	u64 addr;
17839 	int i, btf_fd, err;
17840 
17841 	btf_fd = insn[1].imm;
17842 	if (btf_fd) {
17843 		btf = btf_get_by_fd(btf_fd);
17844 		if (IS_ERR(btf)) {
17845 			verbose(env, "invalid module BTF object FD specified.\n");
17846 			return -EINVAL;
17847 		}
17848 	} else {
17849 		if (!btf_vmlinux) {
17850 			verbose(env, "kernel is missing BTF, make sure CONFIG_DEBUG_INFO_BTF=y is specified in Kconfig.\n");
17851 			return -EINVAL;
17852 		}
17853 		btf = btf_vmlinux;
17854 		btf_get(btf);
17855 	}
17856 
17857 	t = btf_type_by_id(btf, id);
17858 	if (!t) {
17859 		verbose(env, "ldimm64 insn specifies invalid btf_id %d.\n", id);
17860 		err = -ENOENT;
17861 		goto err_put;
17862 	}
17863 
17864 	if (!btf_type_is_var(t) && !btf_type_is_func(t)) {
17865 		verbose(env, "pseudo btf_id %d in ldimm64 isn't KIND_VAR or KIND_FUNC\n", id);
17866 		err = -EINVAL;
17867 		goto err_put;
17868 	}
17869 
17870 	sym_name = btf_name_by_offset(btf, t->name_off);
17871 	addr = kallsyms_lookup_name(sym_name);
17872 	if (!addr) {
17873 		verbose(env, "ldimm64 failed to find the address for kernel symbol '%s'.\n",
17874 			sym_name);
17875 		err = -ENOENT;
17876 		goto err_put;
17877 	}
17878 	insn[0].imm = (u32)addr;
17879 	insn[1].imm = addr >> 32;
17880 
17881 	if (btf_type_is_func(t)) {
17882 		aux->btf_var.reg_type = PTR_TO_MEM | MEM_RDONLY;
17883 		aux->btf_var.mem_size = 0;
17884 		goto check_btf;
17885 	}
17886 
17887 	datasec_id = find_btf_percpu_datasec(btf);
17888 	if (datasec_id > 0) {
17889 		datasec = btf_type_by_id(btf, datasec_id);
17890 		for_each_vsi(i, datasec, vsi) {
17891 			if (vsi->type == id) {
17892 				percpu = true;
17893 				break;
17894 			}
17895 		}
17896 	}
17897 
17898 	type = t->type;
17899 	t = btf_type_skip_modifiers(btf, type, NULL);
17900 	if (percpu) {
17901 		aux->btf_var.reg_type = PTR_TO_BTF_ID | MEM_PERCPU;
17902 		aux->btf_var.btf = btf;
17903 		aux->btf_var.btf_id = type;
17904 	} else if (!btf_type_is_struct(t)) {
17905 		const struct btf_type *ret;
17906 		const char *tname;
17907 		u32 tsize;
17908 
17909 		/* resolve the type size of ksym. */
17910 		ret = btf_resolve_size(btf, t, &tsize);
17911 		if (IS_ERR(ret)) {
17912 			tname = btf_name_by_offset(btf, t->name_off);
17913 			verbose(env, "ldimm64 unable to resolve the size of type '%s': %ld\n",
17914 				tname, PTR_ERR(ret));
17915 			err = -EINVAL;
17916 			goto err_put;
17917 		}
17918 		aux->btf_var.reg_type = PTR_TO_MEM | MEM_RDONLY;
17919 		aux->btf_var.mem_size = tsize;
17920 	} else {
17921 		aux->btf_var.reg_type = PTR_TO_BTF_ID;
17922 		aux->btf_var.btf = btf;
17923 		aux->btf_var.btf_id = type;
17924 	}
17925 check_btf:
17926 	/* check whether we recorded this BTF (and maybe module) already */
17927 	for (i = 0; i < env->used_btf_cnt; i++) {
17928 		if (env->used_btfs[i].btf == btf) {
17929 			btf_put(btf);
17930 			return 0;
17931 		}
17932 	}
17933 
17934 	if (env->used_btf_cnt >= MAX_USED_BTFS) {
17935 		err = -E2BIG;
17936 		goto err_put;
17937 	}
17938 
17939 	btf_mod = &env->used_btfs[env->used_btf_cnt];
17940 	btf_mod->btf = btf;
17941 	btf_mod->module = NULL;
17942 
17943 	/* if we reference variables from kernel module, bump its refcount */
17944 	if (btf_is_module(btf)) {
17945 		btf_mod->module = btf_try_get_module(btf);
17946 		if (!btf_mod->module) {
17947 			err = -ENXIO;
17948 			goto err_put;
17949 		}
17950 	}
17951 
17952 	env->used_btf_cnt++;
17953 
17954 	return 0;
17955 err_put:
17956 	btf_put(btf);
17957 	return err;
17958 }
17959 
17960 static bool is_tracing_prog_type(enum bpf_prog_type type)
17961 {
17962 	switch (type) {
17963 	case BPF_PROG_TYPE_KPROBE:
17964 	case BPF_PROG_TYPE_TRACEPOINT:
17965 	case BPF_PROG_TYPE_PERF_EVENT:
17966 	case BPF_PROG_TYPE_RAW_TRACEPOINT:
17967 	case BPF_PROG_TYPE_RAW_TRACEPOINT_WRITABLE:
17968 		return true;
17969 	default:
17970 		return false;
17971 	}
17972 }
17973 
17974 static int check_map_prog_compatibility(struct bpf_verifier_env *env,
17975 					struct bpf_map *map,
17976 					struct bpf_prog *prog)
17977 
17978 {
17979 	enum bpf_prog_type prog_type = resolve_prog_type(prog);
17980 
17981 	if (btf_record_has_field(map->record, BPF_LIST_HEAD) ||
17982 	    btf_record_has_field(map->record, BPF_RB_ROOT)) {
17983 		if (is_tracing_prog_type(prog_type)) {
17984 			verbose(env, "tracing progs cannot use bpf_{list_head,rb_root} yet\n");
17985 			return -EINVAL;
17986 		}
17987 	}
17988 
17989 	if (btf_record_has_field(map->record, BPF_SPIN_LOCK)) {
17990 		if (prog_type == BPF_PROG_TYPE_SOCKET_FILTER) {
17991 			verbose(env, "socket filter progs cannot use bpf_spin_lock yet\n");
17992 			return -EINVAL;
17993 		}
17994 
17995 		if (is_tracing_prog_type(prog_type)) {
17996 			verbose(env, "tracing progs cannot use bpf_spin_lock yet\n");
17997 			return -EINVAL;
17998 		}
17999 	}
18000 
18001 	if (btf_record_has_field(map->record, BPF_TIMER)) {
18002 		if (is_tracing_prog_type(prog_type)) {
18003 			verbose(env, "tracing progs cannot use bpf_timer yet\n");
18004 			return -EINVAL;
18005 		}
18006 	}
18007 
18008 	if ((bpf_prog_is_offloaded(prog->aux) || bpf_map_is_offloaded(map)) &&
18009 	    !bpf_offload_prog_map_match(prog, map)) {
18010 		verbose(env, "offload device mismatch between prog and map\n");
18011 		return -EINVAL;
18012 	}
18013 
18014 	if (map->map_type == BPF_MAP_TYPE_STRUCT_OPS) {
18015 		verbose(env, "bpf_struct_ops map cannot be used in prog\n");
18016 		return -EINVAL;
18017 	}
18018 
18019 	if (prog->aux->sleepable)
18020 		switch (map->map_type) {
18021 		case BPF_MAP_TYPE_HASH:
18022 		case BPF_MAP_TYPE_LRU_HASH:
18023 		case BPF_MAP_TYPE_ARRAY:
18024 		case BPF_MAP_TYPE_PERCPU_HASH:
18025 		case BPF_MAP_TYPE_PERCPU_ARRAY:
18026 		case BPF_MAP_TYPE_LRU_PERCPU_HASH:
18027 		case BPF_MAP_TYPE_ARRAY_OF_MAPS:
18028 		case BPF_MAP_TYPE_HASH_OF_MAPS:
18029 		case BPF_MAP_TYPE_RINGBUF:
18030 		case BPF_MAP_TYPE_USER_RINGBUF:
18031 		case BPF_MAP_TYPE_INODE_STORAGE:
18032 		case BPF_MAP_TYPE_SK_STORAGE:
18033 		case BPF_MAP_TYPE_TASK_STORAGE:
18034 		case BPF_MAP_TYPE_CGRP_STORAGE:
18035 		case BPF_MAP_TYPE_QUEUE:
18036 		case BPF_MAP_TYPE_STACK:
18037 			break;
18038 		default:
18039 			verbose(env,
18040 				"Sleepable programs can only use array, hash, ringbuf and local storage maps\n");
18041 			return -EINVAL;
18042 		}
18043 
18044 	return 0;
18045 }
18046 
18047 static bool bpf_map_is_cgroup_storage(struct bpf_map *map)
18048 {
18049 	return (map->map_type == BPF_MAP_TYPE_CGROUP_STORAGE ||
18050 		map->map_type == BPF_MAP_TYPE_PERCPU_CGROUP_STORAGE);
18051 }
18052 
18053 /* find and rewrite pseudo imm in ld_imm64 instructions:
18054  *
18055  * 1. if it accesses map FD, replace it with actual map pointer.
18056  * 2. if it accesses btf_id of a VAR, replace it with pointer to the var.
18057  *
18058  * NOTE: btf_vmlinux is required for converting pseudo btf_id.
18059  */
18060 static int resolve_pseudo_ldimm64(struct bpf_verifier_env *env)
18061 {
18062 	struct bpf_insn *insn = env->prog->insnsi;
18063 	int insn_cnt = env->prog->len;
18064 	int i, j, err;
18065 
18066 	err = bpf_prog_calc_tag(env->prog);
18067 	if (err)
18068 		return err;
18069 
18070 	for (i = 0; i < insn_cnt; i++, insn++) {
18071 		if (BPF_CLASS(insn->code) == BPF_LDX &&
18072 		    ((BPF_MODE(insn->code) != BPF_MEM && BPF_MODE(insn->code) != BPF_MEMSX) ||
18073 		    insn->imm != 0)) {
18074 			verbose(env, "BPF_LDX uses reserved fields\n");
18075 			return -EINVAL;
18076 		}
18077 
18078 		if (insn[0].code == (BPF_LD | BPF_IMM | BPF_DW)) {
18079 			struct bpf_insn_aux_data *aux;
18080 			struct bpf_map *map;
18081 			struct fd f;
18082 			u64 addr;
18083 			u32 fd;
18084 
18085 			if (i == insn_cnt - 1 || insn[1].code != 0 ||
18086 			    insn[1].dst_reg != 0 || insn[1].src_reg != 0 ||
18087 			    insn[1].off != 0) {
18088 				verbose(env, "invalid bpf_ld_imm64 insn\n");
18089 				return -EINVAL;
18090 			}
18091 
18092 			if (insn[0].src_reg == 0)
18093 				/* valid generic load 64-bit imm */
18094 				goto next_insn;
18095 
18096 			if (insn[0].src_reg == BPF_PSEUDO_BTF_ID) {
18097 				aux = &env->insn_aux_data[i];
18098 				err = check_pseudo_btf_id(env, insn, aux);
18099 				if (err)
18100 					return err;
18101 				goto next_insn;
18102 			}
18103 
18104 			if (insn[0].src_reg == BPF_PSEUDO_FUNC) {
18105 				aux = &env->insn_aux_data[i];
18106 				aux->ptr_type = PTR_TO_FUNC;
18107 				goto next_insn;
18108 			}
18109 
18110 			/* In final convert_pseudo_ld_imm64() step, this is
18111 			 * converted into regular 64-bit imm load insn.
18112 			 */
18113 			switch (insn[0].src_reg) {
18114 			case BPF_PSEUDO_MAP_VALUE:
18115 			case BPF_PSEUDO_MAP_IDX_VALUE:
18116 				break;
18117 			case BPF_PSEUDO_MAP_FD:
18118 			case BPF_PSEUDO_MAP_IDX:
18119 				if (insn[1].imm == 0)
18120 					break;
18121 				fallthrough;
18122 			default:
18123 				verbose(env, "unrecognized bpf_ld_imm64 insn\n");
18124 				return -EINVAL;
18125 			}
18126 
18127 			switch (insn[0].src_reg) {
18128 			case BPF_PSEUDO_MAP_IDX_VALUE:
18129 			case BPF_PSEUDO_MAP_IDX:
18130 				if (bpfptr_is_null(env->fd_array)) {
18131 					verbose(env, "fd_idx without fd_array is invalid\n");
18132 					return -EPROTO;
18133 				}
18134 				if (copy_from_bpfptr_offset(&fd, env->fd_array,
18135 							    insn[0].imm * sizeof(fd),
18136 							    sizeof(fd)))
18137 					return -EFAULT;
18138 				break;
18139 			default:
18140 				fd = insn[0].imm;
18141 				break;
18142 			}
18143 
18144 			f = fdget(fd);
18145 			map = __bpf_map_get(f);
18146 			if (IS_ERR(map)) {
18147 				verbose(env, "fd %d is not pointing to valid bpf_map\n",
18148 					insn[0].imm);
18149 				return PTR_ERR(map);
18150 			}
18151 
18152 			err = check_map_prog_compatibility(env, map, env->prog);
18153 			if (err) {
18154 				fdput(f);
18155 				return err;
18156 			}
18157 
18158 			aux = &env->insn_aux_data[i];
18159 			if (insn[0].src_reg == BPF_PSEUDO_MAP_FD ||
18160 			    insn[0].src_reg == BPF_PSEUDO_MAP_IDX) {
18161 				addr = (unsigned long)map;
18162 			} else {
18163 				u32 off = insn[1].imm;
18164 
18165 				if (off >= BPF_MAX_VAR_OFF) {
18166 					verbose(env, "direct value offset of %u is not allowed\n", off);
18167 					fdput(f);
18168 					return -EINVAL;
18169 				}
18170 
18171 				if (!map->ops->map_direct_value_addr) {
18172 					verbose(env, "no direct value access support for this map type\n");
18173 					fdput(f);
18174 					return -EINVAL;
18175 				}
18176 
18177 				err = map->ops->map_direct_value_addr(map, &addr, off);
18178 				if (err) {
18179 					verbose(env, "invalid access to map value pointer, value_size=%u off=%u\n",
18180 						map->value_size, off);
18181 					fdput(f);
18182 					return err;
18183 				}
18184 
18185 				aux->map_off = off;
18186 				addr += off;
18187 			}
18188 
18189 			insn[0].imm = (u32)addr;
18190 			insn[1].imm = addr >> 32;
18191 
18192 			/* check whether we recorded this map already */
18193 			for (j = 0; j < env->used_map_cnt; j++) {
18194 				if (env->used_maps[j] == map) {
18195 					aux->map_index = j;
18196 					fdput(f);
18197 					goto next_insn;
18198 				}
18199 			}
18200 
18201 			if (env->used_map_cnt >= MAX_USED_MAPS) {
18202 				fdput(f);
18203 				return -E2BIG;
18204 			}
18205 
18206 			if (env->prog->aux->sleepable)
18207 				atomic64_inc(&map->sleepable_refcnt);
18208 			/* hold the map. If the program is rejected by verifier,
18209 			 * the map will be released by release_maps() or it
18210 			 * will be used by the valid program until it's unloaded
18211 			 * and all maps are released in bpf_free_used_maps()
18212 			 */
18213 			bpf_map_inc(map);
18214 
18215 			aux->map_index = env->used_map_cnt;
18216 			env->used_maps[env->used_map_cnt++] = map;
18217 
18218 			if (bpf_map_is_cgroup_storage(map) &&
18219 			    bpf_cgroup_storage_assign(env->prog->aux, map)) {
18220 				verbose(env, "only one cgroup storage of each type is allowed\n");
18221 				fdput(f);
18222 				return -EBUSY;
18223 			}
18224 
18225 			fdput(f);
18226 next_insn:
18227 			insn++;
18228 			i++;
18229 			continue;
18230 		}
18231 
18232 		/* Basic sanity check before we invest more work here. */
18233 		if (!bpf_opcode_in_insntable(insn->code)) {
18234 			verbose(env, "unknown opcode %02x\n", insn->code);
18235 			return -EINVAL;
18236 		}
18237 	}
18238 
18239 	/* now all pseudo BPF_LD_IMM64 instructions load valid
18240 	 * 'struct bpf_map *' into a register instead of user map_fd.
18241 	 * These pointers will be used later by verifier to validate map access.
18242 	 */
18243 	return 0;
18244 }
18245 
18246 /* drop refcnt of maps used by the rejected program */
18247 static void release_maps(struct bpf_verifier_env *env)
18248 {
18249 	__bpf_free_used_maps(env->prog->aux, env->used_maps,
18250 			     env->used_map_cnt);
18251 }
18252 
18253 /* drop refcnt of maps used by the rejected program */
18254 static void release_btfs(struct bpf_verifier_env *env)
18255 {
18256 	__bpf_free_used_btfs(env->prog->aux, env->used_btfs,
18257 			     env->used_btf_cnt);
18258 }
18259 
18260 /* convert pseudo BPF_LD_IMM64 into generic BPF_LD_IMM64 */
18261 static void convert_pseudo_ld_imm64(struct bpf_verifier_env *env)
18262 {
18263 	struct bpf_insn *insn = env->prog->insnsi;
18264 	int insn_cnt = env->prog->len;
18265 	int i;
18266 
18267 	for (i = 0; i < insn_cnt; i++, insn++) {
18268 		if (insn->code != (BPF_LD | BPF_IMM | BPF_DW))
18269 			continue;
18270 		if (insn->src_reg == BPF_PSEUDO_FUNC)
18271 			continue;
18272 		insn->src_reg = 0;
18273 	}
18274 }
18275 
18276 /* single env->prog->insni[off] instruction was replaced with the range
18277  * insni[off, off + cnt).  Adjust corresponding insn_aux_data by copying
18278  * [0, off) and [off, end) to new locations, so the patched range stays zero
18279  */
18280 static void adjust_insn_aux_data(struct bpf_verifier_env *env,
18281 				 struct bpf_insn_aux_data *new_data,
18282 				 struct bpf_prog *new_prog, u32 off, u32 cnt)
18283 {
18284 	struct bpf_insn_aux_data *old_data = env->insn_aux_data;
18285 	struct bpf_insn *insn = new_prog->insnsi;
18286 	u32 old_seen = old_data[off].seen;
18287 	u32 prog_len;
18288 	int i;
18289 
18290 	/* aux info at OFF always needs adjustment, no matter fast path
18291 	 * (cnt == 1) is taken or not. There is no guarantee INSN at OFF is the
18292 	 * original insn at old prog.
18293 	 */
18294 	old_data[off].zext_dst = insn_has_def32(env, insn + off + cnt - 1);
18295 
18296 	if (cnt == 1)
18297 		return;
18298 	prog_len = new_prog->len;
18299 
18300 	memcpy(new_data, old_data, sizeof(struct bpf_insn_aux_data) * off);
18301 	memcpy(new_data + off + cnt - 1, old_data + off,
18302 	       sizeof(struct bpf_insn_aux_data) * (prog_len - off - cnt + 1));
18303 	for (i = off; i < off + cnt - 1; i++) {
18304 		/* Expand insni[off]'s seen count to the patched range. */
18305 		new_data[i].seen = old_seen;
18306 		new_data[i].zext_dst = insn_has_def32(env, insn + i);
18307 	}
18308 	env->insn_aux_data = new_data;
18309 	vfree(old_data);
18310 }
18311 
18312 static void adjust_subprog_starts(struct bpf_verifier_env *env, u32 off, u32 len)
18313 {
18314 	int i;
18315 
18316 	if (len == 1)
18317 		return;
18318 	/* NOTE: fake 'exit' subprog should be updated as well. */
18319 	for (i = 0; i <= env->subprog_cnt; i++) {
18320 		if (env->subprog_info[i].start <= off)
18321 			continue;
18322 		env->subprog_info[i].start += len - 1;
18323 	}
18324 }
18325 
18326 static void adjust_poke_descs(struct bpf_prog *prog, u32 off, u32 len)
18327 {
18328 	struct bpf_jit_poke_descriptor *tab = prog->aux->poke_tab;
18329 	int i, sz = prog->aux->size_poke_tab;
18330 	struct bpf_jit_poke_descriptor *desc;
18331 
18332 	for (i = 0; i < sz; i++) {
18333 		desc = &tab[i];
18334 		if (desc->insn_idx <= off)
18335 			continue;
18336 		desc->insn_idx += len - 1;
18337 	}
18338 }
18339 
18340 static struct bpf_prog *bpf_patch_insn_data(struct bpf_verifier_env *env, u32 off,
18341 					    const struct bpf_insn *patch, u32 len)
18342 {
18343 	struct bpf_prog *new_prog;
18344 	struct bpf_insn_aux_data *new_data = NULL;
18345 
18346 	if (len > 1) {
18347 		new_data = vzalloc(array_size(env->prog->len + len - 1,
18348 					      sizeof(struct bpf_insn_aux_data)));
18349 		if (!new_data)
18350 			return NULL;
18351 	}
18352 
18353 	new_prog = bpf_patch_insn_single(env->prog, off, patch, len);
18354 	if (IS_ERR(new_prog)) {
18355 		if (PTR_ERR(new_prog) == -ERANGE)
18356 			verbose(env,
18357 				"insn %d cannot be patched due to 16-bit range\n",
18358 				env->insn_aux_data[off].orig_idx);
18359 		vfree(new_data);
18360 		return NULL;
18361 	}
18362 	adjust_insn_aux_data(env, new_data, new_prog, off, len);
18363 	adjust_subprog_starts(env, off, len);
18364 	adjust_poke_descs(new_prog, off, len);
18365 	return new_prog;
18366 }
18367 
18368 static int adjust_subprog_starts_after_remove(struct bpf_verifier_env *env,
18369 					      u32 off, u32 cnt)
18370 {
18371 	int i, j;
18372 
18373 	/* find first prog starting at or after off (first to remove) */
18374 	for (i = 0; i < env->subprog_cnt; i++)
18375 		if (env->subprog_info[i].start >= off)
18376 			break;
18377 	/* find first prog starting at or after off + cnt (first to stay) */
18378 	for (j = i; j < env->subprog_cnt; j++)
18379 		if (env->subprog_info[j].start >= off + cnt)
18380 			break;
18381 	/* if j doesn't start exactly at off + cnt, we are just removing
18382 	 * the front of previous prog
18383 	 */
18384 	if (env->subprog_info[j].start != off + cnt)
18385 		j--;
18386 
18387 	if (j > i) {
18388 		struct bpf_prog_aux *aux = env->prog->aux;
18389 		int move;
18390 
18391 		/* move fake 'exit' subprog as well */
18392 		move = env->subprog_cnt + 1 - j;
18393 
18394 		memmove(env->subprog_info + i,
18395 			env->subprog_info + j,
18396 			sizeof(*env->subprog_info) * move);
18397 		env->subprog_cnt -= j - i;
18398 
18399 		/* remove func_info */
18400 		if (aux->func_info) {
18401 			move = aux->func_info_cnt - j;
18402 
18403 			memmove(aux->func_info + i,
18404 				aux->func_info + j,
18405 				sizeof(*aux->func_info) * move);
18406 			aux->func_info_cnt -= j - i;
18407 			/* func_info->insn_off is set after all code rewrites,
18408 			 * in adjust_btf_func() - no need to adjust
18409 			 */
18410 		}
18411 	} else {
18412 		/* convert i from "first prog to remove" to "first to adjust" */
18413 		if (env->subprog_info[i].start == off)
18414 			i++;
18415 	}
18416 
18417 	/* update fake 'exit' subprog as well */
18418 	for (; i <= env->subprog_cnt; i++)
18419 		env->subprog_info[i].start -= cnt;
18420 
18421 	return 0;
18422 }
18423 
18424 static int bpf_adj_linfo_after_remove(struct bpf_verifier_env *env, u32 off,
18425 				      u32 cnt)
18426 {
18427 	struct bpf_prog *prog = env->prog;
18428 	u32 i, l_off, l_cnt, nr_linfo;
18429 	struct bpf_line_info *linfo;
18430 
18431 	nr_linfo = prog->aux->nr_linfo;
18432 	if (!nr_linfo)
18433 		return 0;
18434 
18435 	linfo = prog->aux->linfo;
18436 
18437 	/* find first line info to remove, count lines to be removed */
18438 	for (i = 0; i < nr_linfo; i++)
18439 		if (linfo[i].insn_off >= off)
18440 			break;
18441 
18442 	l_off = i;
18443 	l_cnt = 0;
18444 	for (; i < nr_linfo; i++)
18445 		if (linfo[i].insn_off < off + cnt)
18446 			l_cnt++;
18447 		else
18448 			break;
18449 
18450 	/* First live insn doesn't match first live linfo, it needs to "inherit"
18451 	 * last removed linfo.  prog is already modified, so prog->len == off
18452 	 * means no live instructions after (tail of the program was removed).
18453 	 */
18454 	if (prog->len != off && l_cnt &&
18455 	    (i == nr_linfo || linfo[i].insn_off != off + cnt)) {
18456 		l_cnt--;
18457 		linfo[--i].insn_off = off + cnt;
18458 	}
18459 
18460 	/* remove the line info which refer to the removed instructions */
18461 	if (l_cnt) {
18462 		memmove(linfo + l_off, linfo + i,
18463 			sizeof(*linfo) * (nr_linfo - i));
18464 
18465 		prog->aux->nr_linfo -= l_cnt;
18466 		nr_linfo = prog->aux->nr_linfo;
18467 	}
18468 
18469 	/* pull all linfo[i].insn_off >= off + cnt in by cnt */
18470 	for (i = l_off; i < nr_linfo; i++)
18471 		linfo[i].insn_off -= cnt;
18472 
18473 	/* fix up all subprogs (incl. 'exit') which start >= off */
18474 	for (i = 0; i <= env->subprog_cnt; i++)
18475 		if (env->subprog_info[i].linfo_idx > l_off) {
18476 			/* program may have started in the removed region but
18477 			 * may not be fully removed
18478 			 */
18479 			if (env->subprog_info[i].linfo_idx >= l_off + l_cnt)
18480 				env->subprog_info[i].linfo_idx -= l_cnt;
18481 			else
18482 				env->subprog_info[i].linfo_idx = l_off;
18483 		}
18484 
18485 	return 0;
18486 }
18487 
18488 static int verifier_remove_insns(struct bpf_verifier_env *env, u32 off, u32 cnt)
18489 {
18490 	struct bpf_insn_aux_data *aux_data = env->insn_aux_data;
18491 	unsigned int orig_prog_len = env->prog->len;
18492 	int err;
18493 
18494 	if (bpf_prog_is_offloaded(env->prog->aux))
18495 		bpf_prog_offload_remove_insns(env, off, cnt);
18496 
18497 	err = bpf_remove_insns(env->prog, off, cnt);
18498 	if (err)
18499 		return err;
18500 
18501 	err = adjust_subprog_starts_after_remove(env, off, cnt);
18502 	if (err)
18503 		return err;
18504 
18505 	err = bpf_adj_linfo_after_remove(env, off, cnt);
18506 	if (err)
18507 		return err;
18508 
18509 	memmove(aux_data + off,	aux_data + off + cnt,
18510 		sizeof(*aux_data) * (orig_prog_len - off - cnt));
18511 
18512 	return 0;
18513 }
18514 
18515 /* The verifier does more data flow analysis than llvm and will not
18516  * explore branches that are dead at run time. Malicious programs can
18517  * have dead code too. Therefore replace all dead at-run-time code
18518  * with 'ja -1'.
18519  *
18520  * Just nops are not optimal, e.g. if they would sit at the end of the
18521  * program and through another bug we would manage to jump there, then
18522  * we'd execute beyond program memory otherwise. Returning exception
18523  * code also wouldn't work since we can have subprogs where the dead
18524  * code could be located.
18525  */
18526 static void sanitize_dead_code(struct bpf_verifier_env *env)
18527 {
18528 	struct bpf_insn_aux_data *aux_data = env->insn_aux_data;
18529 	struct bpf_insn trap = BPF_JMP_IMM(BPF_JA, 0, 0, -1);
18530 	struct bpf_insn *insn = env->prog->insnsi;
18531 	const int insn_cnt = env->prog->len;
18532 	int i;
18533 
18534 	for (i = 0; i < insn_cnt; i++) {
18535 		if (aux_data[i].seen)
18536 			continue;
18537 		memcpy(insn + i, &trap, sizeof(trap));
18538 		aux_data[i].zext_dst = false;
18539 	}
18540 }
18541 
18542 static bool insn_is_cond_jump(u8 code)
18543 {
18544 	u8 op;
18545 
18546 	op = BPF_OP(code);
18547 	if (BPF_CLASS(code) == BPF_JMP32)
18548 		return op != BPF_JA;
18549 
18550 	if (BPF_CLASS(code) != BPF_JMP)
18551 		return false;
18552 
18553 	return op != BPF_JA && op != BPF_EXIT && op != BPF_CALL;
18554 }
18555 
18556 static void opt_hard_wire_dead_code_branches(struct bpf_verifier_env *env)
18557 {
18558 	struct bpf_insn_aux_data *aux_data = env->insn_aux_data;
18559 	struct bpf_insn ja = BPF_JMP_IMM(BPF_JA, 0, 0, 0);
18560 	struct bpf_insn *insn = env->prog->insnsi;
18561 	const int insn_cnt = env->prog->len;
18562 	int i;
18563 
18564 	for (i = 0; i < insn_cnt; i++, insn++) {
18565 		if (!insn_is_cond_jump(insn->code))
18566 			continue;
18567 
18568 		if (!aux_data[i + 1].seen)
18569 			ja.off = insn->off;
18570 		else if (!aux_data[i + 1 + insn->off].seen)
18571 			ja.off = 0;
18572 		else
18573 			continue;
18574 
18575 		if (bpf_prog_is_offloaded(env->prog->aux))
18576 			bpf_prog_offload_replace_insn(env, i, &ja);
18577 
18578 		memcpy(insn, &ja, sizeof(ja));
18579 	}
18580 }
18581 
18582 static int opt_remove_dead_code(struct bpf_verifier_env *env)
18583 {
18584 	struct bpf_insn_aux_data *aux_data = env->insn_aux_data;
18585 	int insn_cnt = env->prog->len;
18586 	int i, err;
18587 
18588 	for (i = 0; i < insn_cnt; i++) {
18589 		int j;
18590 
18591 		j = 0;
18592 		while (i + j < insn_cnt && !aux_data[i + j].seen)
18593 			j++;
18594 		if (!j)
18595 			continue;
18596 
18597 		err = verifier_remove_insns(env, i, j);
18598 		if (err)
18599 			return err;
18600 		insn_cnt = env->prog->len;
18601 	}
18602 
18603 	return 0;
18604 }
18605 
18606 static int opt_remove_nops(struct bpf_verifier_env *env)
18607 {
18608 	const struct bpf_insn ja = BPF_JMP_IMM(BPF_JA, 0, 0, 0);
18609 	struct bpf_insn *insn = env->prog->insnsi;
18610 	int insn_cnt = env->prog->len;
18611 	int i, err;
18612 
18613 	for (i = 0; i < insn_cnt; i++) {
18614 		if (memcmp(&insn[i], &ja, sizeof(ja)))
18615 			continue;
18616 
18617 		err = verifier_remove_insns(env, i, 1);
18618 		if (err)
18619 			return err;
18620 		insn_cnt--;
18621 		i--;
18622 	}
18623 
18624 	return 0;
18625 }
18626 
18627 static int opt_subreg_zext_lo32_rnd_hi32(struct bpf_verifier_env *env,
18628 					 const union bpf_attr *attr)
18629 {
18630 	struct bpf_insn *patch, zext_patch[2], rnd_hi32_patch[4];
18631 	struct bpf_insn_aux_data *aux = env->insn_aux_data;
18632 	int i, patch_len, delta = 0, len = env->prog->len;
18633 	struct bpf_insn *insns = env->prog->insnsi;
18634 	struct bpf_prog *new_prog;
18635 	bool rnd_hi32;
18636 
18637 	rnd_hi32 = attr->prog_flags & BPF_F_TEST_RND_HI32;
18638 	zext_patch[1] = BPF_ZEXT_REG(0);
18639 	rnd_hi32_patch[1] = BPF_ALU64_IMM(BPF_MOV, BPF_REG_AX, 0);
18640 	rnd_hi32_patch[2] = BPF_ALU64_IMM(BPF_LSH, BPF_REG_AX, 32);
18641 	rnd_hi32_patch[3] = BPF_ALU64_REG(BPF_OR, 0, BPF_REG_AX);
18642 	for (i = 0; i < len; i++) {
18643 		int adj_idx = i + delta;
18644 		struct bpf_insn insn;
18645 		int load_reg;
18646 
18647 		insn = insns[adj_idx];
18648 		load_reg = insn_def_regno(&insn);
18649 		if (!aux[adj_idx].zext_dst) {
18650 			u8 code, class;
18651 			u32 imm_rnd;
18652 
18653 			if (!rnd_hi32)
18654 				continue;
18655 
18656 			code = insn.code;
18657 			class = BPF_CLASS(code);
18658 			if (load_reg == -1)
18659 				continue;
18660 
18661 			/* NOTE: arg "reg" (the fourth one) is only used for
18662 			 *       BPF_STX + SRC_OP, so it is safe to pass NULL
18663 			 *       here.
18664 			 */
18665 			if (is_reg64(env, &insn, load_reg, NULL, DST_OP)) {
18666 				if (class == BPF_LD &&
18667 				    BPF_MODE(code) == BPF_IMM)
18668 					i++;
18669 				continue;
18670 			}
18671 
18672 			/* ctx load could be transformed into wider load. */
18673 			if (class == BPF_LDX &&
18674 			    aux[adj_idx].ptr_type == PTR_TO_CTX)
18675 				continue;
18676 
18677 			imm_rnd = get_random_u32();
18678 			rnd_hi32_patch[0] = insn;
18679 			rnd_hi32_patch[1].imm = imm_rnd;
18680 			rnd_hi32_patch[3].dst_reg = load_reg;
18681 			patch = rnd_hi32_patch;
18682 			patch_len = 4;
18683 			goto apply_patch_buffer;
18684 		}
18685 
18686 		/* Add in an zero-extend instruction if a) the JIT has requested
18687 		 * it or b) it's a CMPXCHG.
18688 		 *
18689 		 * The latter is because: BPF_CMPXCHG always loads a value into
18690 		 * R0, therefore always zero-extends. However some archs'
18691 		 * equivalent instruction only does this load when the
18692 		 * comparison is successful. This detail of CMPXCHG is
18693 		 * orthogonal to the general zero-extension behaviour of the
18694 		 * CPU, so it's treated independently of bpf_jit_needs_zext.
18695 		 */
18696 		if (!bpf_jit_needs_zext() && !is_cmpxchg_insn(&insn))
18697 			continue;
18698 
18699 		/* Zero-extension is done by the caller. */
18700 		if (bpf_pseudo_kfunc_call(&insn))
18701 			continue;
18702 
18703 		if (WARN_ON(load_reg == -1)) {
18704 			verbose(env, "verifier bug. zext_dst is set, but no reg is defined\n");
18705 			return -EFAULT;
18706 		}
18707 
18708 		zext_patch[0] = insn;
18709 		zext_patch[1].dst_reg = load_reg;
18710 		zext_patch[1].src_reg = load_reg;
18711 		patch = zext_patch;
18712 		patch_len = 2;
18713 apply_patch_buffer:
18714 		new_prog = bpf_patch_insn_data(env, adj_idx, patch, patch_len);
18715 		if (!new_prog)
18716 			return -ENOMEM;
18717 		env->prog = new_prog;
18718 		insns = new_prog->insnsi;
18719 		aux = env->insn_aux_data;
18720 		delta += patch_len - 1;
18721 	}
18722 
18723 	return 0;
18724 }
18725 
18726 /* convert load instructions that access fields of a context type into a
18727  * sequence of instructions that access fields of the underlying structure:
18728  *     struct __sk_buff    -> struct sk_buff
18729  *     struct bpf_sock_ops -> struct sock
18730  */
18731 static int convert_ctx_accesses(struct bpf_verifier_env *env)
18732 {
18733 	const struct bpf_verifier_ops *ops = env->ops;
18734 	int i, cnt, size, ctx_field_size, delta = 0;
18735 	const int insn_cnt = env->prog->len;
18736 	struct bpf_insn insn_buf[16], *insn;
18737 	u32 target_size, size_default, off;
18738 	struct bpf_prog *new_prog;
18739 	enum bpf_access_type type;
18740 	bool is_narrower_load;
18741 
18742 	if (ops->gen_prologue || env->seen_direct_write) {
18743 		if (!ops->gen_prologue) {
18744 			verbose(env, "bpf verifier is misconfigured\n");
18745 			return -EINVAL;
18746 		}
18747 		cnt = ops->gen_prologue(insn_buf, env->seen_direct_write,
18748 					env->prog);
18749 		if (cnt >= ARRAY_SIZE(insn_buf)) {
18750 			verbose(env, "bpf verifier is misconfigured\n");
18751 			return -EINVAL;
18752 		} else if (cnt) {
18753 			new_prog = bpf_patch_insn_data(env, 0, insn_buf, cnt);
18754 			if (!new_prog)
18755 				return -ENOMEM;
18756 
18757 			env->prog = new_prog;
18758 			delta += cnt - 1;
18759 		}
18760 	}
18761 
18762 	if (bpf_prog_is_offloaded(env->prog->aux))
18763 		return 0;
18764 
18765 	insn = env->prog->insnsi + delta;
18766 
18767 	for (i = 0; i < insn_cnt; i++, insn++) {
18768 		bpf_convert_ctx_access_t convert_ctx_access;
18769 		u8 mode;
18770 
18771 		if (insn->code == (BPF_LDX | BPF_MEM | BPF_B) ||
18772 		    insn->code == (BPF_LDX | BPF_MEM | BPF_H) ||
18773 		    insn->code == (BPF_LDX | BPF_MEM | BPF_W) ||
18774 		    insn->code == (BPF_LDX | BPF_MEM | BPF_DW) ||
18775 		    insn->code == (BPF_LDX | BPF_MEMSX | BPF_B) ||
18776 		    insn->code == (BPF_LDX | BPF_MEMSX | BPF_H) ||
18777 		    insn->code == (BPF_LDX | BPF_MEMSX | BPF_W)) {
18778 			type = BPF_READ;
18779 		} else if (insn->code == (BPF_STX | BPF_MEM | BPF_B) ||
18780 			   insn->code == (BPF_STX | BPF_MEM | BPF_H) ||
18781 			   insn->code == (BPF_STX | BPF_MEM | BPF_W) ||
18782 			   insn->code == (BPF_STX | BPF_MEM | BPF_DW) ||
18783 			   insn->code == (BPF_ST | BPF_MEM | BPF_B) ||
18784 			   insn->code == (BPF_ST | BPF_MEM | BPF_H) ||
18785 			   insn->code == (BPF_ST | BPF_MEM | BPF_W) ||
18786 			   insn->code == (BPF_ST | BPF_MEM | BPF_DW)) {
18787 			type = BPF_WRITE;
18788 		} else {
18789 			continue;
18790 		}
18791 
18792 		if (type == BPF_WRITE &&
18793 		    env->insn_aux_data[i + delta].sanitize_stack_spill) {
18794 			struct bpf_insn patch[] = {
18795 				*insn,
18796 				BPF_ST_NOSPEC(),
18797 			};
18798 
18799 			cnt = ARRAY_SIZE(patch);
18800 			new_prog = bpf_patch_insn_data(env, i + delta, patch, cnt);
18801 			if (!new_prog)
18802 				return -ENOMEM;
18803 
18804 			delta    += cnt - 1;
18805 			env->prog = new_prog;
18806 			insn      = new_prog->insnsi + i + delta;
18807 			continue;
18808 		}
18809 
18810 		switch ((int)env->insn_aux_data[i + delta].ptr_type) {
18811 		case PTR_TO_CTX:
18812 			if (!ops->convert_ctx_access)
18813 				continue;
18814 			convert_ctx_access = ops->convert_ctx_access;
18815 			break;
18816 		case PTR_TO_SOCKET:
18817 		case PTR_TO_SOCK_COMMON:
18818 			convert_ctx_access = bpf_sock_convert_ctx_access;
18819 			break;
18820 		case PTR_TO_TCP_SOCK:
18821 			convert_ctx_access = bpf_tcp_sock_convert_ctx_access;
18822 			break;
18823 		case PTR_TO_XDP_SOCK:
18824 			convert_ctx_access = bpf_xdp_sock_convert_ctx_access;
18825 			break;
18826 		case PTR_TO_BTF_ID:
18827 		case PTR_TO_BTF_ID | PTR_UNTRUSTED:
18828 		/* PTR_TO_BTF_ID | MEM_ALLOC always has a valid lifetime, unlike
18829 		 * PTR_TO_BTF_ID, and an active ref_obj_id, but the same cannot
18830 		 * be said once it is marked PTR_UNTRUSTED, hence we must handle
18831 		 * any faults for loads into such types. BPF_WRITE is disallowed
18832 		 * for this case.
18833 		 */
18834 		case PTR_TO_BTF_ID | MEM_ALLOC | PTR_UNTRUSTED:
18835 			if (type == BPF_READ) {
18836 				if (BPF_MODE(insn->code) == BPF_MEM)
18837 					insn->code = BPF_LDX | BPF_PROBE_MEM |
18838 						     BPF_SIZE((insn)->code);
18839 				else
18840 					insn->code = BPF_LDX | BPF_PROBE_MEMSX |
18841 						     BPF_SIZE((insn)->code);
18842 				env->prog->aux->num_exentries++;
18843 			}
18844 			continue;
18845 		default:
18846 			continue;
18847 		}
18848 
18849 		ctx_field_size = env->insn_aux_data[i + delta].ctx_field_size;
18850 		size = BPF_LDST_BYTES(insn);
18851 		mode = BPF_MODE(insn->code);
18852 
18853 		/* If the read access is a narrower load of the field,
18854 		 * convert to a 4/8-byte load, to minimum program type specific
18855 		 * convert_ctx_access changes. If conversion is successful,
18856 		 * we will apply proper mask to the result.
18857 		 */
18858 		is_narrower_load = size < ctx_field_size;
18859 		size_default = bpf_ctx_off_adjust_machine(ctx_field_size);
18860 		off = insn->off;
18861 		if (is_narrower_load) {
18862 			u8 size_code;
18863 
18864 			if (type == BPF_WRITE) {
18865 				verbose(env, "bpf verifier narrow ctx access misconfigured\n");
18866 				return -EINVAL;
18867 			}
18868 
18869 			size_code = BPF_H;
18870 			if (ctx_field_size == 4)
18871 				size_code = BPF_W;
18872 			else if (ctx_field_size == 8)
18873 				size_code = BPF_DW;
18874 
18875 			insn->off = off & ~(size_default - 1);
18876 			insn->code = BPF_LDX | BPF_MEM | size_code;
18877 		}
18878 
18879 		target_size = 0;
18880 		cnt = convert_ctx_access(type, insn, insn_buf, env->prog,
18881 					 &target_size);
18882 		if (cnt == 0 || cnt >= ARRAY_SIZE(insn_buf) ||
18883 		    (ctx_field_size && !target_size)) {
18884 			verbose(env, "bpf verifier is misconfigured\n");
18885 			return -EINVAL;
18886 		}
18887 
18888 		if (is_narrower_load && size < target_size) {
18889 			u8 shift = bpf_ctx_narrow_access_offset(
18890 				off, size, size_default) * 8;
18891 			if (shift && cnt + 1 >= ARRAY_SIZE(insn_buf)) {
18892 				verbose(env, "bpf verifier narrow ctx load misconfigured\n");
18893 				return -EINVAL;
18894 			}
18895 			if (ctx_field_size <= 4) {
18896 				if (shift)
18897 					insn_buf[cnt++] = BPF_ALU32_IMM(BPF_RSH,
18898 									insn->dst_reg,
18899 									shift);
18900 				insn_buf[cnt++] = BPF_ALU32_IMM(BPF_AND, insn->dst_reg,
18901 								(1 << size * 8) - 1);
18902 			} else {
18903 				if (shift)
18904 					insn_buf[cnt++] = BPF_ALU64_IMM(BPF_RSH,
18905 									insn->dst_reg,
18906 									shift);
18907 				insn_buf[cnt++] = BPF_ALU32_IMM(BPF_AND, insn->dst_reg,
18908 								(1ULL << size * 8) - 1);
18909 			}
18910 		}
18911 		if (mode == BPF_MEMSX)
18912 			insn_buf[cnt++] = BPF_RAW_INSN(BPF_ALU64 | BPF_MOV | BPF_X,
18913 						       insn->dst_reg, insn->dst_reg,
18914 						       size * 8, 0);
18915 
18916 		new_prog = bpf_patch_insn_data(env, i + delta, insn_buf, cnt);
18917 		if (!new_prog)
18918 			return -ENOMEM;
18919 
18920 		delta += cnt - 1;
18921 
18922 		/* keep walking new program and skip insns we just inserted */
18923 		env->prog = new_prog;
18924 		insn      = new_prog->insnsi + i + delta;
18925 	}
18926 
18927 	return 0;
18928 }
18929 
18930 static int jit_subprogs(struct bpf_verifier_env *env)
18931 {
18932 	struct bpf_prog *prog = env->prog, **func, *tmp;
18933 	int i, j, subprog_start, subprog_end = 0, len, subprog;
18934 	struct bpf_map *map_ptr;
18935 	struct bpf_insn *insn;
18936 	void *old_bpf_func;
18937 	int err, num_exentries;
18938 
18939 	if (env->subprog_cnt <= 1)
18940 		return 0;
18941 
18942 	for (i = 0, insn = prog->insnsi; i < prog->len; i++, insn++) {
18943 		if (!bpf_pseudo_func(insn) && !bpf_pseudo_call(insn))
18944 			continue;
18945 
18946 		/* Upon error here we cannot fall back to interpreter but
18947 		 * need a hard reject of the program. Thus -EFAULT is
18948 		 * propagated in any case.
18949 		 */
18950 		subprog = find_subprog(env, i + insn->imm + 1);
18951 		if (subprog < 0) {
18952 			WARN_ONCE(1, "verifier bug. No program starts at insn %d\n",
18953 				  i + insn->imm + 1);
18954 			return -EFAULT;
18955 		}
18956 		/* temporarily remember subprog id inside insn instead of
18957 		 * aux_data, since next loop will split up all insns into funcs
18958 		 */
18959 		insn->off = subprog;
18960 		/* remember original imm in case JIT fails and fallback
18961 		 * to interpreter will be needed
18962 		 */
18963 		env->insn_aux_data[i].call_imm = insn->imm;
18964 		/* point imm to __bpf_call_base+1 from JITs point of view */
18965 		insn->imm = 1;
18966 		if (bpf_pseudo_func(insn))
18967 			/* jit (e.g. x86_64) may emit fewer instructions
18968 			 * if it learns a u32 imm is the same as a u64 imm.
18969 			 * Force a non zero here.
18970 			 */
18971 			insn[1].imm = 1;
18972 	}
18973 
18974 	err = bpf_prog_alloc_jited_linfo(prog);
18975 	if (err)
18976 		goto out_undo_insn;
18977 
18978 	err = -ENOMEM;
18979 	func = kcalloc(env->subprog_cnt, sizeof(prog), GFP_KERNEL);
18980 	if (!func)
18981 		goto out_undo_insn;
18982 
18983 	for (i = 0; i < env->subprog_cnt; i++) {
18984 		subprog_start = subprog_end;
18985 		subprog_end = env->subprog_info[i + 1].start;
18986 
18987 		len = subprog_end - subprog_start;
18988 		/* bpf_prog_run() doesn't call subprogs directly,
18989 		 * hence main prog stats include the runtime of subprogs.
18990 		 * subprogs don't have IDs and not reachable via prog_get_next_id
18991 		 * func[i]->stats will never be accessed and stays NULL
18992 		 */
18993 		func[i] = bpf_prog_alloc_no_stats(bpf_prog_size(len), GFP_USER);
18994 		if (!func[i])
18995 			goto out_free;
18996 		memcpy(func[i]->insnsi, &prog->insnsi[subprog_start],
18997 		       len * sizeof(struct bpf_insn));
18998 		func[i]->type = prog->type;
18999 		func[i]->len = len;
19000 		if (bpf_prog_calc_tag(func[i]))
19001 			goto out_free;
19002 		func[i]->is_func = 1;
19003 		func[i]->aux->func_idx = i;
19004 		/* Below members will be freed only at prog->aux */
19005 		func[i]->aux->btf = prog->aux->btf;
19006 		func[i]->aux->func_info = prog->aux->func_info;
19007 		func[i]->aux->func_info_cnt = prog->aux->func_info_cnt;
19008 		func[i]->aux->poke_tab = prog->aux->poke_tab;
19009 		func[i]->aux->size_poke_tab = prog->aux->size_poke_tab;
19010 
19011 		for (j = 0; j < prog->aux->size_poke_tab; j++) {
19012 			struct bpf_jit_poke_descriptor *poke;
19013 
19014 			poke = &prog->aux->poke_tab[j];
19015 			if (poke->insn_idx < subprog_end &&
19016 			    poke->insn_idx >= subprog_start)
19017 				poke->aux = func[i]->aux;
19018 		}
19019 
19020 		func[i]->aux->name[0] = 'F';
19021 		func[i]->aux->stack_depth = env->subprog_info[i].stack_depth;
19022 		func[i]->jit_requested = 1;
19023 		func[i]->blinding_requested = prog->blinding_requested;
19024 		func[i]->aux->kfunc_tab = prog->aux->kfunc_tab;
19025 		func[i]->aux->kfunc_btf_tab = prog->aux->kfunc_btf_tab;
19026 		func[i]->aux->linfo = prog->aux->linfo;
19027 		func[i]->aux->nr_linfo = prog->aux->nr_linfo;
19028 		func[i]->aux->jited_linfo = prog->aux->jited_linfo;
19029 		func[i]->aux->linfo_idx = env->subprog_info[i].linfo_idx;
19030 		num_exentries = 0;
19031 		insn = func[i]->insnsi;
19032 		for (j = 0; j < func[i]->len; j++, insn++) {
19033 			if (BPF_CLASS(insn->code) == BPF_LDX &&
19034 			    (BPF_MODE(insn->code) == BPF_PROBE_MEM ||
19035 			     BPF_MODE(insn->code) == BPF_PROBE_MEMSX))
19036 				num_exentries++;
19037 		}
19038 		func[i]->aux->num_exentries = num_exentries;
19039 		func[i]->aux->tail_call_reachable = env->subprog_info[i].tail_call_reachable;
19040 		func[i]->aux->exception_cb = env->subprog_info[i].is_exception_cb;
19041 		if (!i)
19042 			func[i]->aux->exception_boundary = env->seen_exception;
19043 		func[i] = bpf_int_jit_compile(func[i]);
19044 		if (!func[i]->jited) {
19045 			err = -ENOTSUPP;
19046 			goto out_free;
19047 		}
19048 		cond_resched();
19049 	}
19050 
19051 	/* at this point all bpf functions were successfully JITed
19052 	 * now populate all bpf_calls with correct addresses and
19053 	 * run last pass of JIT
19054 	 */
19055 	for (i = 0; i < env->subprog_cnt; i++) {
19056 		insn = func[i]->insnsi;
19057 		for (j = 0; j < func[i]->len; j++, insn++) {
19058 			if (bpf_pseudo_func(insn)) {
19059 				subprog = insn->off;
19060 				insn[0].imm = (u32)(long)func[subprog]->bpf_func;
19061 				insn[1].imm = ((u64)(long)func[subprog]->bpf_func) >> 32;
19062 				continue;
19063 			}
19064 			if (!bpf_pseudo_call(insn))
19065 				continue;
19066 			subprog = insn->off;
19067 			insn->imm = BPF_CALL_IMM(func[subprog]->bpf_func);
19068 		}
19069 
19070 		/* we use the aux data to keep a list of the start addresses
19071 		 * of the JITed images for each function in the program
19072 		 *
19073 		 * for some architectures, such as powerpc64, the imm field
19074 		 * might not be large enough to hold the offset of the start
19075 		 * address of the callee's JITed image from __bpf_call_base
19076 		 *
19077 		 * in such cases, we can lookup the start address of a callee
19078 		 * by using its subprog id, available from the off field of
19079 		 * the call instruction, as an index for this list
19080 		 */
19081 		func[i]->aux->func = func;
19082 		func[i]->aux->func_cnt = env->subprog_cnt - env->hidden_subprog_cnt;
19083 		func[i]->aux->real_func_cnt = env->subprog_cnt;
19084 	}
19085 	for (i = 0; i < env->subprog_cnt; i++) {
19086 		old_bpf_func = func[i]->bpf_func;
19087 		tmp = bpf_int_jit_compile(func[i]);
19088 		if (tmp != func[i] || func[i]->bpf_func != old_bpf_func) {
19089 			verbose(env, "JIT doesn't support bpf-to-bpf calls\n");
19090 			err = -ENOTSUPP;
19091 			goto out_free;
19092 		}
19093 		cond_resched();
19094 	}
19095 
19096 	/* finally lock prog and jit images for all functions and
19097 	 * populate kallsysm. Begin at the first subprogram, since
19098 	 * bpf_prog_load will add the kallsyms for the main program.
19099 	 */
19100 	for (i = 1; i < env->subprog_cnt; i++) {
19101 		bpf_prog_lock_ro(func[i]);
19102 		bpf_prog_kallsyms_add(func[i]);
19103 	}
19104 
19105 	/* Last step: make now unused interpreter insns from main
19106 	 * prog consistent for later dump requests, so they can
19107 	 * later look the same as if they were interpreted only.
19108 	 */
19109 	for (i = 0, insn = prog->insnsi; i < prog->len; i++, insn++) {
19110 		if (bpf_pseudo_func(insn)) {
19111 			insn[0].imm = env->insn_aux_data[i].call_imm;
19112 			insn[1].imm = insn->off;
19113 			insn->off = 0;
19114 			continue;
19115 		}
19116 		if (!bpf_pseudo_call(insn))
19117 			continue;
19118 		insn->off = env->insn_aux_data[i].call_imm;
19119 		subprog = find_subprog(env, i + insn->off + 1);
19120 		insn->imm = subprog;
19121 	}
19122 
19123 	prog->jited = 1;
19124 	prog->bpf_func = func[0]->bpf_func;
19125 	prog->jited_len = func[0]->jited_len;
19126 	prog->aux->extable = func[0]->aux->extable;
19127 	prog->aux->num_exentries = func[0]->aux->num_exentries;
19128 	prog->aux->func = func;
19129 	prog->aux->func_cnt = env->subprog_cnt - env->hidden_subprog_cnt;
19130 	prog->aux->real_func_cnt = env->subprog_cnt;
19131 	prog->aux->bpf_exception_cb = (void *)func[env->exception_callback_subprog]->bpf_func;
19132 	prog->aux->exception_boundary = func[0]->aux->exception_boundary;
19133 	bpf_prog_jit_attempt_done(prog);
19134 	return 0;
19135 out_free:
19136 	/* We failed JIT'ing, so at this point we need to unregister poke
19137 	 * descriptors from subprogs, so that kernel is not attempting to
19138 	 * patch it anymore as we're freeing the subprog JIT memory.
19139 	 */
19140 	for (i = 0; i < prog->aux->size_poke_tab; i++) {
19141 		map_ptr = prog->aux->poke_tab[i].tail_call.map;
19142 		map_ptr->ops->map_poke_untrack(map_ptr, prog->aux);
19143 	}
19144 	/* At this point we're guaranteed that poke descriptors are not
19145 	 * live anymore. We can just unlink its descriptor table as it's
19146 	 * released with the main prog.
19147 	 */
19148 	for (i = 0; i < env->subprog_cnt; i++) {
19149 		if (!func[i])
19150 			continue;
19151 		func[i]->aux->poke_tab = NULL;
19152 		bpf_jit_free(func[i]);
19153 	}
19154 	kfree(func);
19155 out_undo_insn:
19156 	/* cleanup main prog to be interpreted */
19157 	prog->jit_requested = 0;
19158 	prog->blinding_requested = 0;
19159 	for (i = 0, insn = prog->insnsi; i < prog->len; i++, insn++) {
19160 		if (!bpf_pseudo_call(insn))
19161 			continue;
19162 		insn->off = 0;
19163 		insn->imm = env->insn_aux_data[i].call_imm;
19164 	}
19165 	bpf_prog_jit_attempt_done(prog);
19166 	return err;
19167 }
19168 
19169 static int fixup_call_args(struct bpf_verifier_env *env)
19170 {
19171 #ifndef CONFIG_BPF_JIT_ALWAYS_ON
19172 	struct bpf_prog *prog = env->prog;
19173 	struct bpf_insn *insn = prog->insnsi;
19174 	bool has_kfunc_call = bpf_prog_has_kfunc_call(prog);
19175 	int i, depth;
19176 #endif
19177 	int err = 0;
19178 
19179 	if (env->prog->jit_requested &&
19180 	    !bpf_prog_is_offloaded(env->prog->aux)) {
19181 		err = jit_subprogs(env);
19182 		if (err == 0)
19183 			return 0;
19184 		if (err == -EFAULT)
19185 			return err;
19186 	}
19187 #ifndef CONFIG_BPF_JIT_ALWAYS_ON
19188 	if (has_kfunc_call) {
19189 		verbose(env, "calling kernel functions are not allowed in non-JITed programs\n");
19190 		return -EINVAL;
19191 	}
19192 	if (env->subprog_cnt > 1 && env->prog->aux->tail_call_reachable) {
19193 		/* When JIT fails the progs with bpf2bpf calls and tail_calls
19194 		 * have to be rejected, since interpreter doesn't support them yet.
19195 		 */
19196 		verbose(env, "tail_calls are not allowed in non-JITed programs with bpf-to-bpf calls\n");
19197 		return -EINVAL;
19198 	}
19199 	for (i = 0; i < prog->len; i++, insn++) {
19200 		if (bpf_pseudo_func(insn)) {
19201 			/* When JIT fails the progs with callback calls
19202 			 * have to be rejected, since interpreter doesn't support them yet.
19203 			 */
19204 			verbose(env, "callbacks are not allowed in non-JITed programs\n");
19205 			return -EINVAL;
19206 		}
19207 
19208 		if (!bpf_pseudo_call(insn))
19209 			continue;
19210 		depth = get_callee_stack_depth(env, insn, i);
19211 		if (depth < 0)
19212 			return depth;
19213 		bpf_patch_call_args(insn, depth);
19214 	}
19215 	err = 0;
19216 #endif
19217 	return err;
19218 }
19219 
19220 /* replace a generic kfunc with a specialized version if necessary */
19221 static void specialize_kfunc(struct bpf_verifier_env *env,
19222 			     u32 func_id, u16 offset, unsigned long *addr)
19223 {
19224 	struct bpf_prog *prog = env->prog;
19225 	bool seen_direct_write;
19226 	void *xdp_kfunc;
19227 	bool is_rdonly;
19228 
19229 	if (bpf_dev_bound_kfunc_id(func_id)) {
19230 		xdp_kfunc = bpf_dev_bound_resolve_kfunc(prog, func_id);
19231 		if (xdp_kfunc) {
19232 			*addr = (unsigned long)xdp_kfunc;
19233 			return;
19234 		}
19235 		/* fallback to default kfunc when not supported by netdev */
19236 	}
19237 
19238 	if (offset)
19239 		return;
19240 
19241 	if (func_id == special_kfunc_list[KF_bpf_dynptr_from_skb]) {
19242 		seen_direct_write = env->seen_direct_write;
19243 		is_rdonly = !may_access_direct_pkt_data(env, NULL, BPF_WRITE);
19244 
19245 		if (is_rdonly)
19246 			*addr = (unsigned long)bpf_dynptr_from_skb_rdonly;
19247 
19248 		/* restore env->seen_direct_write to its original value, since
19249 		 * may_access_direct_pkt_data mutates it
19250 		 */
19251 		env->seen_direct_write = seen_direct_write;
19252 	}
19253 }
19254 
19255 static void __fixup_collection_insert_kfunc(struct bpf_insn_aux_data *insn_aux,
19256 					    u16 struct_meta_reg,
19257 					    u16 node_offset_reg,
19258 					    struct bpf_insn *insn,
19259 					    struct bpf_insn *insn_buf,
19260 					    int *cnt)
19261 {
19262 	struct btf_struct_meta *kptr_struct_meta = insn_aux->kptr_struct_meta;
19263 	struct bpf_insn addr[2] = { BPF_LD_IMM64(struct_meta_reg, (long)kptr_struct_meta) };
19264 
19265 	insn_buf[0] = addr[0];
19266 	insn_buf[1] = addr[1];
19267 	insn_buf[2] = BPF_MOV64_IMM(node_offset_reg, insn_aux->insert_off);
19268 	insn_buf[3] = *insn;
19269 	*cnt = 4;
19270 }
19271 
19272 static int fixup_kfunc_call(struct bpf_verifier_env *env, struct bpf_insn *insn,
19273 			    struct bpf_insn *insn_buf, int insn_idx, int *cnt)
19274 {
19275 	const struct bpf_kfunc_desc *desc;
19276 
19277 	if (!insn->imm) {
19278 		verbose(env, "invalid kernel function call not eliminated in verifier pass\n");
19279 		return -EINVAL;
19280 	}
19281 
19282 	*cnt = 0;
19283 
19284 	/* insn->imm has the btf func_id. Replace it with an offset relative to
19285 	 * __bpf_call_base, unless the JIT needs to call functions that are
19286 	 * further than 32 bits away (bpf_jit_supports_far_kfunc_call()).
19287 	 */
19288 	desc = find_kfunc_desc(env->prog, insn->imm, insn->off);
19289 	if (!desc) {
19290 		verbose(env, "verifier internal error: kernel function descriptor not found for func_id %u\n",
19291 			insn->imm);
19292 		return -EFAULT;
19293 	}
19294 
19295 	if (!bpf_jit_supports_far_kfunc_call())
19296 		insn->imm = BPF_CALL_IMM(desc->addr);
19297 	if (insn->off)
19298 		return 0;
19299 	if (desc->func_id == special_kfunc_list[KF_bpf_obj_new_impl] ||
19300 	    desc->func_id == special_kfunc_list[KF_bpf_percpu_obj_new_impl]) {
19301 		struct btf_struct_meta *kptr_struct_meta = env->insn_aux_data[insn_idx].kptr_struct_meta;
19302 		struct bpf_insn addr[2] = { BPF_LD_IMM64(BPF_REG_2, (long)kptr_struct_meta) };
19303 		u64 obj_new_size = env->insn_aux_data[insn_idx].obj_new_size;
19304 
19305 		if (desc->func_id == special_kfunc_list[KF_bpf_percpu_obj_new_impl] && kptr_struct_meta) {
19306 			verbose(env, "verifier internal error: NULL kptr_struct_meta expected at insn_idx %d\n",
19307 				insn_idx);
19308 			return -EFAULT;
19309 		}
19310 
19311 		insn_buf[0] = BPF_MOV64_IMM(BPF_REG_1, obj_new_size);
19312 		insn_buf[1] = addr[0];
19313 		insn_buf[2] = addr[1];
19314 		insn_buf[3] = *insn;
19315 		*cnt = 4;
19316 	} else if (desc->func_id == special_kfunc_list[KF_bpf_obj_drop_impl] ||
19317 		   desc->func_id == special_kfunc_list[KF_bpf_percpu_obj_drop_impl] ||
19318 		   desc->func_id == special_kfunc_list[KF_bpf_refcount_acquire_impl]) {
19319 		struct btf_struct_meta *kptr_struct_meta = env->insn_aux_data[insn_idx].kptr_struct_meta;
19320 		struct bpf_insn addr[2] = { BPF_LD_IMM64(BPF_REG_2, (long)kptr_struct_meta) };
19321 
19322 		if (desc->func_id == special_kfunc_list[KF_bpf_percpu_obj_drop_impl] && kptr_struct_meta) {
19323 			verbose(env, "verifier internal error: NULL kptr_struct_meta expected at insn_idx %d\n",
19324 				insn_idx);
19325 			return -EFAULT;
19326 		}
19327 
19328 		if (desc->func_id == special_kfunc_list[KF_bpf_refcount_acquire_impl] &&
19329 		    !kptr_struct_meta) {
19330 			verbose(env, "verifier internal error: kptr_struct_meta expected at insn_idx %d\n",
19331 				insn_idx);
19332 			return -EFAULT;
19333 		}
19334 
19335 		insn_buf[0] = addr[0];
19336 		insn_buf[1] = addr[1];
19337 		insn_buf[2] = *insn;
19338 		*cnt = 3;
19339 	} else if (desc->func_id == special_kfunc_list[KF_bpf_list_push_back_impl] ||
19340 		   desc->func_id == special_kfunc_list[KF_bpf_list_push_front_impl] ||
19341 		   desc->func_id == special_kfunc_list[KF_bpf_rbtree_add_impl]) {
19342 		struct btf_struct_meta *kptr_struct_meta = env->insn_aux_data[insn_idx].kptr_struct_meta;
19343 		int struct_meta_reg = BPF_REG_3;
19344 		int node_offset_reg = BPF_REG_4;
19345 
19346 		/* rbtree_add has extra 'less' arg, so args-to-fixup are in diff regs */
19347 		if (desc->func_id == special_kfunc_list[KF_bpf_rbtree_add_impl]) {
19348 			struct_meta_reg = BPF_REG_4;
19349 			node_offset_reg = BPF_REG_5;
19350 		}
19351 
19352 		if (!kptr_struct_meta) {
19353 			verbose(env, "verifier internal error: kptr_struct_meta expected at insn_idx %d\n",
19354 				insn_idx);
19355 			return -EFAULT;
19356 		}
19357 
19358 		__fixup_collection_insert_kfunc(&env->insn_aux_data[insn_idx], struct_meta_reg,
19359 						node_offset_reg, insn, insn_buf, cnt);
19360 	} else if (desc->func_id == special_kfunc_list[KF_bpf_cast_to_kern_ctx] ||
19361 		   desc->func_id == special_kfunc_list[KF_bpf_rdonly_cast]) {
19362 		insn_buf[0] = BPF_MOV64_REG(BPF_REG_0, BPF_REG_1);
19363 		*cnt = 1;
19364 	}
19365 	return 0;
19366 }
19367 
19368 /* The function requires that first instruction in 'patch' is insnsi[prog->len - 1] */
19369 static int add_hidden_subprog(struct bpf_verifier_env *env, struct bpf_insn *patch, int len)
19370 {
19371 	struct bpf_subprog_info *info = env->subprog_info;
19372 	int cnt = env->subprog_cnt;
19373 	struct bpf_prog *prog;
19374 
19375 	/* We only reserve one slot for hidden subprogs in subprog_info. */
19376 	if (env->hidden_subprog_cnt) {
19377 		verbose(env, "verifier internal error: only one hidden subprog supported\n");
19378 		return -EFAULT;
19379 	}
19380 	/* We're not patching any existing instruction, just appending the new
19381 	 * ones for the hidden subprog. Hence all of the adjustment operations
19382 	 * in bpf_patch_insn_data are no-ops.
19383 	 */
19384 	prog = bpf_patch_insn_data(env, env->prog->len - 1, patch, len);
19385 	if (!prog)
19386 		return -ENOMEM;
19387 	env->prog = prog;
19388 	info[cnt + 1].start = info[cnt].start;
19389 	info[cnt].start = prog->len - len + 1;
19390 	env->subprog_cnt++;
19391 	env->hidden_subprog_cnt++;
19392 	return 0;
19393 }
19394 
19395 /* Do various post-verification rewrites in a single program pass.
19396  * These rewrites simplify JIT and interpreter implementations.
19397  */
19398 static int do_misc_fixups(struct bpf_verifier_env *env)
19399 {
19400 	struct bpf_prog *prog = env->prog;
19401 	enum bpf_attach_type eatype = prog->expected_attach_type;
19402 	enum bpf_prog_type prog_type = resolve_prog_type(prog);
19403 	struct bpf_insn *insn = prog->insnsi;
19404 	const struct bpf_func_proto *fn;
19405 	const int insn_cnt = prog->len;
19406 	const struct bpf_map_ops *ops;
19407 	struct bpf_insn_aux_data *aux;
19408 	struct bpf_insn insn_buf[16];
19409 	struct bpf_prog *new_prog;
19410 	struct bpf_map *map_ptr;
19411 	int i, ret, cnt, delta = 0;
19412 
19413 	if (env->seen_exception && !env->exception_callback_subprog) {
19414 		struct bpf_insn patch[] = {
19415 			env->prog->insnsi[insn_cnt - 1],
19416 			BPF_MOV64_REG(BPF_REG_0, BPF_REG_1),
19417 			BPF_EXIT_INSN(),
19418 		};
19419 
19420 		ret = add_hidden_subprog(env, patch, ARRAY_SIZE(patch));
19421 		if (ret < 0)
19422 			return ret;
19423 		prog = env->prog;
19424 		insn = prog->insnsi;
19425 
19426 		env->exception_callback_subprog = env->subprog_cnt - 1;
19427 		/* Don't update insn_cnt, as add_hidden_subprog always appends insns */
19428 		mark_subprog_exc_cb(env, env->exception_callback_subprog);
19429 	}
19430 
19431 	for (i = 0; i < insn_cnt; i++, insn++) {
19432 		/* Make divide-by-zero exceptions impossible. */
19433 		if (insn->code == (BPF_ALU64 | BPF_MOD | BPF_X) ||
19434 		    insn->code == (BPF_ALU64 | BPF_DIV | BPF_X) ||
19435 		    insn->code == (BPF_ALU | BPF_MOD | BPF_X) ||
19436 		    insn->code == (BPF_ALU | BPF_DIV | BPF_X)) {
19437 			bool is64 = BPF_CLASS(insn->code) == BPF_ALU64;
19438 			bool isdiv = BPF_OP(insn->code) == BPF_DIV;
19439 			struct bpf_insn *patchlet;
19440 			struct bpf_insn chk_and_div[] = {
19441 				/* [R,W]x div 0 -> 0 */
19442 				BPF_RAW_INSN((is64 ? BPF_JMP : BPF_JMP32) |
19443 					     BPF_JNE | BPF_K, insn->src_reg,
19444 					     0, 2, 0),
19445 				BPF_ALU32_REG(BPF_XOR, insn->dst_reg, insn->dst_reg),
19446 				BPF_JMP_IMM(BPF_JA, 0, 0, 1),
19447 				*insn,
19448 			};
19449 			struct bpf_insn chk_and_mod[] = {
19450 				/* [R,W]x mod 0 -> [R,W]x */
19451 				BPF_RAW_INSN((is64 ? BPF_JMP : BPF_JMP32) |
19452 					     BPF_JEQ | BPF_K, insn->src_reg,
19453 					     0, 1 + (is64 ? 0 : 1), 0),
19454 				*insn,
19455 				BPF_JMP_IMM(BPF_JA, 0, 0, 1),
19456 				BPF_MOV32_REG(insn->dst_reg, insn->dst_reg),
19457 			};
19458 
19459 			patchlet = isdiv ? chk_and_div : chk_and_mod;
19460 			cnt = isdiv ? ARRAY_SIZE(chk_and_div) :
19461 				      ARRAY_SIZE(chk_and_mod) - (is64 ? 2 : 0);
19462 
19463 			new_prog = bpf_patch_insn_data(env, i + delta, patchlet, cnt);
19464 			if (!new_prog)
19465 				return -ENOMEM;
19466 
19467 			delta    += cnt - 1;
19468 			env->prog = prog = new_prog;
19469 			insn      = new_prog->insnsi + i + delta;
19470 			continue;
19471 		}
19472 
19473 		/* Implement LD_ABS and LD_IND with a rewrite, if supported by the program type. */
19474 		if (BPF_CLASS(insn->code) == BPF_LD &&
19475 		    (BPF_MODE(insn->code) == BPF_ABS ||
19476 		     BPF_MODE(insn->code) == BPF_IND)) {
19477 			cnt = env->ops->gen_ld_abs(insn, insn_buf);
19478 			if (cnt == 0 || cnt >= ARRAY_SIZE(insn_buf)) {
19479 				verbose(env, "bpf verifier is misconfigured\n");
19480 				return -EINVAL;
19481 			}
19482 
19483 			new_prog = bpf_patch_insn_data(env, i + delta, insn_buf, cnt);
19484 			if (!new_prog)
19485 				return -ENOMEM;
19486 
19487 			delta    += cnt - 1;
19488 			env->prog = prog = new_prog;
19489 			insn      = new_prog->insnsi + i + delta;
19490 			continue;
19491 		}
19492 
19493 		/* Rewrite pointer arithmetic to mitigate speculation attacks. */
19494 		if (insn->code == (BPF_ALU64 | BPF_ADD | BPF_X) ||
19495 		    insn->code == (BPF_ALU64 | BPF_SUB | BPF_X)) {
19496 			const u8 code_add = BPF_ALU64 | BPF_ADD | BPF_X;
19497 			const u8 code_sub = BPF_ALU64 | BPF_SUB | BPF_X;
19498 			struct bpf_insn *patch = &insn_buf[0];
19499 			bool issrc, isneg, isimm;
19500 			u32 off_reg;
19501 
19502 			aux = &env->insn_aux_data[i + delta];
19503 			if (!aux->alu_state ||
19504 			    aux->alu_state == BPF_ALU_NON_POINTER)
19505 				continue;
19506 
19507 			isneg = aux->alu_state & BPF_ALU_NEG_VALUE;
19508 			issrc = (aux->alu_state & BPF_ALU_SANITIZE) ==
19509 				BPF_ALU_SANITIZE_SRC;
19510 			isimm = aux->alu_state & BPF_ALU_IMMEDIATE;
19511 
19512 			off_reg = issrc ? insn->src_reg : insn->dst_reg;
19513 			if (isimm) {
19514 				*patch++ = BPF_MOV32_IMM(BPF_REG_AX, aux->alu_limit);
19515 			} else {
19516 				if (isneg)
19517 					*patch++ = BPF_ALU64_IMM(BPF_MUL, off_reg, -1);
19518 				*patch++ = BPF_MOV32_IMM(BPF_REG_AX, aux->alu_limit);
19519 				*patch++ = BPF_ALU64_REG(BPF_SUB, BPF_REG_AX, off_reg);
19520 				*patch++ = BPF_ALU64_REG(BPF_OR, BPF_REG_AX, off_reg);
19521 				*patch++ = BPF_ALU64_IMM(BPF_NEG, BPF_REG_AX, 0);
19522 				*patch++ = BPF_ALU64_IMM(BPF_ARSH, BPF_REG_AX, 63);
19523 				*patch++ = BPF_ALU64_REG(BPF_AND, BPF_REG_AX, off_reg);
19524 			}
19525 			if (!issrc)
19526 				*patch++ = BPF_MOV64_REG(insn->dst_reg, insn->src_reg);
19527 			insn->src_reg = BPF_REG_AX;
19528 			if (isneg)
19529 				insn->code = insn->code == code_add ?
19530 					     code_sub : code_add;
19531 			*patch++ = *insn;
19532 			if (issrc && isneg && !isimm)
19533 				*patch++ = BPF_ALU64_IMM(BPF_MUL, off_reg, -1);
19534 			cnt = patch - insn_buf;
19535 
19536 			new_prog = bpf_patch_insn_data(env, i + delta, insn_buf, cnt);
19537 			if (!new_prog)
19538 				return -ENOMEM;
19539 
19540 			delta    += cnt - 1;
19541 			env->prog = prog = new_prog;
19542 			insn      = new_prog->insnsi + i + delta;
19543 			continue;
19544 		}
19545 
19546 		if (insn->code != (BPF_JMP | BPF_CALL))
19547 			continue;
19548 		if (insn->src_reg == BPF_PSEUDO_CALL)
19549 			continue;
19550 		if (insn->src_reg == BPF_PSEUDO_KFUNC_CALL) {
19551 			ret = fixup_kfunc_call(env, insn, insn_buf, i + delta, &cnt);
19552 			if (ret)
19553 				return ret;
19554 			if (cnt == 0)
19555 				continue;
19556 
19557 			new_prog = bpf_patch_insn_data(env, i + delta, insn_buf, cnt);
19558 			if (!new_prog)
19559 				return -ENOMEM;
19560 
19561 			delta	 += cnt - 1;
19562 			env->prog = prog = new_prog;
19563 			insn	  = new_prog->insnsi + i + delta;
19564 			continue;
19565 		}
19566 
19567 		if (insn->imm == BPF_FUNC_get_route_realm)
19568 			prog->dst_needed = 1;
19569 		if (insn->imm == BPF_FUNC_get_prandom_u32)
19570 			bpf_user_rnd_init_once();
19571 		if (insn->imm == BPF_FUNC_override_return)
19572 			prog->kprobe_override = 1;
19573 		if (insn->imm == BPF_FUNC_tail_call) {
19574 			/* If we tail call into other programs, we
19575 			 * cannot make any assumptions since they can
19576 			 * be replaced dynamically during runtime in
19577 			 * the program array.
19578 			 */
19579 			prog->cb_access = 1;
19580 			if (!allow_tail_call_in_subprogs(env))
19581 				prog->aux->stack_depth = MAX_BPF_STACK;
19582 			prog->aux->max_pkt_offset = MAX_PACKET_OFF;
19583 
19584 			/* mark bpf_tail_call as different opcode to avoid
19585 			 * conditional branch in the interpreter for every normal
19586 			 * call and to prevent accidental JITing by JIT compiler
19587 			 * that doesn't support bpf_tail_call yet
19588 			 */
19589 			insn->imm = 0;
19590 			insn->code = BPF_JMP | BPF_TAIL_CALL;
19591 
19592 			aux = &env->insn_aux_data[i + delta];
19593 			if (env->bpf_capable && !prog->blinding_requested &&
19594 			    prog->jit_requested &&
19595 			    !bpf_map_key_poisoned(aux) &&
19596 			    !bpf_map_ptr_poisoned(aux) &&
19597 			    !bpf_map_ptr_unpriv(aux)) {
19598 				struct bpf_jit_poke_descriptor desc = {
19599 					.reason = BPF_POKE_REASON_TAIL_CALL,
19600 					.tail_call.map = BPF_MAP_PTR(aux->map_ptr_state),
19601 					.tail_call.key = bpf_map_key_immediate(aux),
19602 					.insn_idx = i + delta,
19603 				};
19604 
19605 				ret = bpf_jit_add_poke_descriptor(prog, &desc);
19606 				if (ret < 0) {
19607 					verbose(env, "adding tail call poke descriptor failed\n");
19608 					return ret;
19609 				}
19610 
19611 				insn->imm = ret + 1;
19612 				continue;
19613 			}
19614 
19615 			if (!bpf_map_ptr_unpriv(aux))
19616 				continue;
19617 
19618 			/* instead of changing every JIT dealing with tail_call
19619 			 * emit two extra insns:
19620 			 * if (index >= max_entries) goto out;
19621 			 * index &= array->index_mask;
19622 			 * to avoid out-of-bounds cpu speculation
19623 			 */
19624 			if (bpf_map_ptr_poisoned(aux)) {
19625 				verbose(env, "tail_call abusing map_ptr\n");
19626 				return -EINVAL;
19627 			}
19628 
19629 			map_ptr = BPF_MAP_PTR(aux->map_ptr_state);
19630 			insn_buf[0] = BPF_JMP_IMM(BPF_JGE, BPF_REG_3,
19631 						  map_ptr->max_entries, 2);
19632 			insn_buf[1] = BPF_ALU32_IMM(BPF_AND, BPF_REG_3,
19633 						    container_of(map_ptr,
19634 								 struct bpf_array,
19635 								 map)->index_mask);
19636 			insn_buf[2] = *insn;
19637 			cnt = 3;
19638 			new_prog = bpf_patch_insn_data(env, i + delta, insn_buf, cnt);
19639 			if (!new_prog)
19640 				return -ENOMEM;
19641 
19642 			delta    += cnt - 1;
19643 			env->prog = prog = new_prog;
19644 			insn      = new_prog->insnsi + i + delta;
19645 			continue;
19646 		}
19647 
19648 		if (insn->imm == BPF_FUNC_timer_set_callback) {
19649 			/* The verifier will process callback_fn as many times as necessary
19650 			 * with different maps and the register states prepared by
19651 			 * set_timer_callback_state will be accurate.
19652 			 *
19653 			 * The following use case is valid:
19654 			 *   map1 is shared by prog1, prog2, prog3.
19655 			 *   prog1 calls bpf_timer_init for some map1 elements
19656 			 *   prog2 calls bpf_timer_set_callback for some map1 elements.
19657 			 *     Those that were not bpf_timer_init-ed will return -EINVAL.
19658 			 *   prog3 calls bpf_timer_start for some map1 elements.
19659 			 *     Those that were not both bpf_timer_init-ed and
19660 			 *     bpf_timer_set_callback-ed will return -EINVAL.
19661 			 */
19662 			struct bpf_insn ld_addrs[2] = {
19663 				BPF_LD_IMM64(BPF_REG_3, (long)prog->aux),
19664 			};
19665 
19666 			insn_buf[0] = ld_addrs[0];
19667 			insn_buf[1] = ld_addrs[1];
19668 			insn_buf[2] = *insn;
19669 			cnt = 3;
19670 
19671 			new_prog = bpf_patch_insn_data(env, i + delta, insn_buf, cnt);
19672 			if (!new_prog)
19673 				return -ENOMEM;
19674 
19675 			delta    += cnt - 1;
19676 			env->prog = prog = new_prog;
19677 			insn      = new_prog->insnsi + i + delta;
19678 			goto patch_call_imm;
19679 		}
19680 
19681 		if (is_storage_get_function(insn->imm)) {
19682 			if (!in_sleepable(env) ||
19683 			    env->insn_aux_data[i + delta].storage_get_func_atomic)
19684 				insn_buf[0] = BPF_MOV64_IMM(BPF_REG_5, (__force __s32)GFP_ATOMIC);
19685 			else
19686 				insn_buf[0] = BPF_MOV64_IMM(BPF_REG_5, (__force __s32)GFP_KERNEL);
19687 			insn_buf[1] = *insn;
19688 			cnt = 2;
19689 
19690 			new_prog = bpf_patch_insn_data(env, i + delta, insn_buf, cnt);
19691 			if (!new_prog)
19692 				return -ENOMEM;
19693 
19694 			delta += cnt - 1;
19695 			env->prog = prog = new_prog;
19696 			insn = new_prog->insnsi + i + delta;
19697 			goto patch_call_imm;
19698 		}
19699 
19700 		/* bpf_per_cpu_ptr() and bpf_this_cpu_ptr() */
19701 		if (env->insn_aux_data[i + delta].call_with_percpu_alloc_ptr) {
19702 			/* patch with 'r1 = *(u64 *)(r1 + 0)' since for percpu data,
19703 			 * bpf_mem_alloc() returns a ptr to the percpu data ptr.
19704 			 */
19705 			insn_buf[0] = BPF_LDX_MEM(BPF_DW, BPF_REG_1, BPF_REG_1, 0);
19706 			insn_buf[1] = *insn;
19707 			cnt = 2;
19708 
19709 			new_prog = bpf_patch_insn_data(env, i + delta, insn_buf, cnt);
19710 			if (!new_prog)
19711 				return -ENOMEM;
19712 
19713 			delta += cnt - 1;
19714 			env->prog = prog = new_prog;
19715 			insn = new_prog->insnsi + i + delta;
19716 			goto patch_call_imm;
19717 		}
19718 
19719 		/* BPF_EMIT_CALL() assumptions in some of the map_gen_lookup
19720 		 * and other inlining handlers are currently limited to 64 bit
19721 		 * only.
19722 		 */
19723 		if (prog->jit_requested && BITS_PER_LONG == 64 &&
19724 		    (insn->imm == BPF_FUNC_map_lookup_elem ||
19725 		     insn->imm == BPF_FUNC_map_update_elem ||
19726 		     insn->imm == BPF_FUNC_map_delete_elem ||
19727 		     insn->imm == BPF_FUNC_map_push_elem   ||
19728 		     insn->imm == BPF_FUNC_map_pop_elem    ||
19729 		     insn->imm == BPF_FUNC_map_peek_elem   ||
19730 		     insn->imm == BPF_FUNC_redirect_map    ||
19731 		     insn->imm == BPF_FUNC_for_each_map_elem ||
19732 		     insn->imm == BPF_FUNC_map_lookup_percpu_elem)) {
19733 			aux = &env->insn_aux_data[i + delta];
19734 			if (bpf_map_ptr_poisoned(aux))
19735 				goto patch_call_imm;
19736 
19737 			map_ptr = BPF_MAP_PTR(aux->map_ptr_state);
19738 			ops = map_ptr->ops;
19739 			if (insn->imm == BPF_FUNC_map_lookup_elem &&
19740 			    ops->map_gen_lookup) {
19741 				cnt = ops->map_gen_lookup(map_ptr, insn_buf);
19742 				if (cnt == -EOPNOTSUPP)
19743 					goto patch_map_ops_generic;
19744 				if (cnt <= 0 || cnt >= ARRAY_SIZE(insn_buf)) {
19745 					verbose(env, "bpf verifier is misconfigured\n");
19746 					return -EINVAL;
19747 				}
19748 
19749 				new_prog = bpf_patch_insn_data(env, i + delta,
19750 							       insn_buf, cnt);
19751 				if (!new_prog)
19752 					return -ENOMEM;
19753 
19754 				delta    += cnt - 1;
19755 				env->prog = prog = new_prog;
19756 				insn      = new_prog->insnsi + i + delta;
19757 				continue;
19758 			}
19759 
19760 			BUILD_BUG_ON(!__same_type(ops->map_lookup_elem,
19761 				     (void *(*)(struct bpf_map *map, void *key))NULL));
19762 			BUILD_BUG_ON(!__same_type(ops->map_delete_elem,
19763 				     (long (*)(struct bpf_map *map, void *key))NULL));
19764 			BUILD_BUG_ON(!__same_type(ops->map_update_elem,
19765 				     (long (*)(struct bpf_map *map, void *key, void *value,
19766 					      u64 flags))NULL));
19767 			BUILD_BUG_ON(!__same_type(ops->map_push_elem,
19768 				     (long (*)(struct bpf_map *map, void *value,
19769 					      u64 flags))NULL));
19770 			BUILD_BUG_ON(!__same_type(ops->map_pop_elem,
19771 				     (long (*)(struct bpf_map *map, void *value))NULL));
19772 			BUILD_BUG_ON(!__same_type(ops->map_peek_elem,
19773 				     (long (*)(struct bpf_map *map, void *value))NULL));
19774 			BUILD_BUG_ON(!__same_type(ops->map_redirect,
19775 				     (long (*)(struct bpf_map *map, u64 index, u64 flags))NULL));
19776 			BUILD_BUG_ON(!__same_type(ops->map_for_each_callback,
19777 				     (long (*)(struct bpf_map *map,
19778 					      bpf_callback_t callback_fn,
19779 					      void *callback_ctx,
19780 					      u64 flags))NULL));
19781 			BUILD_BUG_ON(!__same_type(ops->map_lookup_percpu_elem,
19782 				     (void *(*)(struct bpf_map *map, void *key, u32 cpu))NULL));
19783 
19784 patch_map_ops_generic:
19785 			switch (insn->imm) {
19786 			case BPF_FUNC_map_lookup_elem:
19787 				insn->imm = BPF_CALL_IMM(ops->map_lookup_elem);
19788 				continue;
19789 			case BPF_FUNC_map_update_elem:
19790 				insn->imm = BPF_CALL_IMM(ops->map_update_elem);
19791 				continue;
19792 			case BPF_FUNC_map_delete_elem:
19793 				insn->imm = BPF_CALL_IMM(ops->map_delete_elem);
19794 				continue;
19795 			case BPF_FUNC_map_push_elem:
19796 				insn->imm = BPF_CALL_IMM(ops->map_push_elem);
19797 				continue;
19798 			case BPF_FUNC_map_pop_elem:
19799 				insn->imm = BPF_CALL_IMM(ops->map_pop_elem);
19800 				continue;
19801 			case BPF_FUNC_map_peek_elem:
19802 				insn->imm = BPF_CALL_IMM(ops->map_peek_elem);
19803 				continue;
19804 			case BPF_FUNC_redirect_map:
19805 				insn->imm = BPF_CALL_IMM(ops->map_redirect);
19806 				continue;
19807 			case BPF_FUNC_for_each_map_elem:
19808 				insn->imm = BPF_CALL_IMM(ops->map_for_each_callback);
19809 				continue;
19810 			case BPF_FUNC_map_lookup_percpu_elem:
19811 				insn->imm = BPF_CALL_IMM(ops->map_lookup_percpu_elem);
19812 				continue;
19813 			}
19814 
19815 			goto patch_call_imm;
19816 		}
19817 
19818 		/* Implement bpf_jiffies64 inline. */
19819 		if (prog->jit_requested && BITS_PER_LONG == 64 &&
19820 		    insn->imm == BPF_FUNC_jiffies64) {
19821 			struct bpf_insn ld_jiffies_addr[2] = {
19822 				BPF_LD_IMM64(BPF_REG_0,
19823 					     (unsigned long)&jiffies),
19824 			};
19825 
19826 			insn_buf[0] = ld_jiffies_addr[0];
19827 			insn_buf[1] = ld_jiffies_addr[1];
19828 			insn_buf[2] = BPF_LDX_MEM(BPF_DW, BPF_REG_0,
19829 						  BPF_REG_0, 0);
19830 			cnt = 3;
19831 
19832 			new_prog = bpf_patch_insn_data(env, i + delta, insn_buf,
19833 						       cnt);
19834 			if (!new_prog)
19835 				return -ENOMEM;
19836 
19837 			delta    += cnt - 1;
19838 			env->prog = prog = new_prog;
19839 			insn      = new_prog->insnsi + i + delta;
19840 			continue;
19841 		}
19842 
19843 		/* Implement bpf_get_func_arg inline. */
19844 		if (prog_type == BPF_PROG_TYPE_TRACING &&
19845 		    insn->imm == BPF_FUNC_get_func_arg) {
19846 			/* Load nr_args from ctx - 8 */
19847 			insn_buf[0] = BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, -8);
19848 			insn_buf[1] = BPF_JMP32_REG(BPF_JGE, BPF_REG_2, BPF_REG_0, 6);
19849 			insn_buf[2] = BPF_ALU64_IMM(BPF_LSH, BPF_REG_2, 3);
19850 			insn_buf[3] = BPF_ALU64_REG(BPF_ADD, BPF_REG_2, BPF_REG_1);
19851 			insn_buf[4] = BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_2, 0);
19852 			insn_buf[5] = BPF_STX_MEM(BPF_DW, BPF_REG_3, BPF_REG_0, 0);
19853 			insn_buf[6] = BPF_MOV64_IMM(BPF_REG_0, 0);
19854 			insn_buf[7] = BPF_JMP_A(1);
19855 			insn_buf[8] = BPF_MOV64_IMM(BPF_REG_0, -EINVAL);
19856 			cnt = 9;
19857 
19858 			new_prog = bpf_patch_insn_data(env, i + delta, insn_buf, cnt);
19859 			if (!new_prog)
19860 				return -ENOMEM;
19861 
19862 			delta    += cnt - 1;
19863 			env->prog = prog = new_prog;
19864 			insn      = new_prog->insnsi + i + delta;
19865 			continue;
19866 		}
19867 
19868 		/* Implement bpf_get_func_ret inline. */
19869 		if (prog_type == BPF_PROG_TYPE_TRACING &&
19870 		    insn->imm == BPF_FUNC_get_func_ret) {
19871 			if (eatype == BPF_TRACE_FEXIT ||
19872 			    eatype == BPF_MODIFY_RETURN) {
19873 				/* Load nr_args from ctx - 8 */
19874 				insn_buf[0] = BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, -8);
19875 				insn_buf[1] = BPF_ALU64_IMM(BPF_LSH, BPF_REG_0, 3);
19876 				insn_buf[2] = BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_1);
19877 				insn_buf[3] = BPF_LDX_MEM(BPF_DW, BPF_REG_3, BPF_REG_0, 0);
19878 				insn_buf[4] = BPF_STX_MEM(BPF_DW, BPF_REG_2, BPF_REG_3, 0);
19879 				insn_buf[5] = BPF_MOV64_IMM(BPF_REG_0, 0);
19880 				cnt = 6;
19881 			} else {
19882 				insn_buf[0] = BPF_MOV64_IMM(BPF_REG_0, -EOPNOTSUPP);
19883 				cnt = 1;
19884 			}
19885 
19886 			new_prog = bpf_patch_insn_data(env, i + delta, insn_buf, cnt);
19887 			if (!new_prog)
19888 				return -ENOMEM;
19889 
19890 			delta    += cnt - 1;
19891 			env->prog = prog = new_prog;
19892 			insn      = new_prog->insnsi + i + delta;
19893 			continue;
19894 		}
19895 
19896 		/* Implement get_func_arg_cnt inline. */
19897 		if (prog_type == BPF_PROG_TYPE_TRACING &&
19898 		    insn->imm == BPF_FUNC_get_func_arg_cnt) {
19899 			/* Load nr_args from ctx - 8 */
19900 			insn_buf[0] = BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, -8);
19901 
19902 			new_prog = bpf_patch_insn_data(env, i + delta, insn_buf, 1);
19903 			if (!new_prog)
19904 				return -ENOMEM;
19905 
19906 			env->prog = prog = new_prog;
19907 			insn      = new_prog->insnsi + i + delta;
19908 			continue;
19909 		}
19910 
19911 		/* Implement bpf_get_func_ip inline. */
19912 		if (prog_type == BPF_PROG_TYPE_TRACING &&
19913 		    insn->imm == BPF_FUNC_get_func_ip) {
19914 			/* Load IP address from ctx - 16 */
19915 			insn_buf[0] = BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, -16);
19916 
19917 			new_prog = bpf_patch_insn_data(env, i + delta, insn_buf, 1);
19918 			if (!new_prog)
19919 				return -ENOMEM;
19920 
19921 			env->prog = prog = new_prog;
19922 			insn      = new_prog->insnsi + i + delta;
19923 			continue;
19924 		}
19925 
19926 		/* Implement bpf_kptr_xchg inline */
19927 		if (prog->jit_requested && BITS_PER_LONG == 64 &&
19928 		    insn->imm == BPF_FUNC_kptr_xchg &&
19929 		    bpf_jit_supports_ptr_xchg()) {
19930 			insn_buf[0] = BPF_MOV64_REG(BPF_REG_0, BPF_REG_2);
19931 			insn_buf[1] = BPF_ATOMIC_OP(BPF_DW, BPF_XCHG, BPF_REG_1, BPF_REG_0, 0);
19932 			cnt = 2;
19933 
19934 			new_prog = bpf_patch_insn_data(env, i + delta, insn_buf, cnt);
19935 			if (!new_prog)
19936 				return -ENOMEM;
19937 
19938 			delta    += cnt - 1;
19939 			env->prog = prog = new_prog;
19940 			insn      = new_prog->insnsi + i + delta;
19941 			continue;
19942 		}
19943 patch_call_imm:
19944 		fn = env->ops->get_func_proto(insn->imm, env->prog);
19945 		/* all functions that have prototype and verifier allowed
19946 		 * programs to call them, must be real in-kernel functions
19947 		 */
19948 		if (!fn->func) {
19949 			verbose(env,
19950 				"kernel subsystem misconfigured func %s#%d\n",
19951 				func_id_name(insn->imm), insn->imm);
19952 			return -EFAULT;
19953 		}
19954 		insn->imm = fn->func - __bpf_call_base;
19955 	}
19956 
19957 	/* Since poke tab is now finalized, publish aux to tracker. */
19958 	for (i = 0; i < prog->aux->size_poke_tab; i++) {
19959 		map_ptr = prog->aux->poke_tab[i].tail_call.map;
19960 		if (!map_ptr->ops->map_poke_track ||
19961 		    !map_ptr->ops->map_poke_untrack ||
19962 		    !map_ptr->ops->map_poke_run) {
19963 			verbose(env, "bpf verifier is misconfigured\n");
19964 			return -EINVAL;
19965 		}
19966 
19967 		ret = map_ptr->ops->map_poke_track(map_ptr, prog->aux);
19968 		if (ret < 0) {
19969 			verbose(env, "tracking tail call prog failed\n");
19970 			return ret;
19971 		}
19972 	}
19973 
19974 	sort_kfunc_descs_by_imm_off(env->prog);
19975 
19976 	return 0;
19977 }
19978 
19979 static struct bpf_prog *inline_bpf_loop(struct bpf_verifier_env *env,
19980 					int position,
19981 					s32 stack_base,
19982 					u32 callback_subprogno,
19983 					u32 *cnt)
19984 {
19985 	s32 r6_offset = stack_base + 0 * BPF_REG_SIZE;
19986 	s32 r7_offset = stack_base + 1 * BPF_REG_SIZE;
19987 	s32 r8_offset = stack_base + 2 * BPF_REG_SIZE;
19988 	int reg_loop_max = BPF_REG_6;
19989 	int reg_loop_cnt = BPF_REG_7;
19990 	int reg_loop_ctx = BPF_REG_8;
19991 
19992 	struct bpf_prog *new_prog;
19993 	u32 callback_start;
19994 	u32 call_insn_offset;
19995 	s32 callback_offset;
19996 
19997 	/* This represents an inlined version of bpf_iter.c:bpf_loop,
19998 	 * be careful to modify this code in sync.
19999 	 */
20000 	struct bpf_insn insn_buf[] = {
20001 		/* Return error and jump to the end of the patch if
20002 		 * expected number of iterations is too big.
20003 		 */
20004 		BPF_JMP_IMM(BPF_JLE, BPF_REG_1, BPF_MAX_LOOPS, 2),
20005 		BPF_MOV32_IMM(BPF_REG_0, -E2BIG),
20006 		BPF_JMP_IMM(BPF_JA, 0, 0, 16),
20007 		/* spill R6, R7, R8 to use these as loop vars */
20008 		BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_6, r6_offset),
20009 		BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_7, r7_offset),
20010 		BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_8, r8_offset),
20011 		/* initialize loop vars */
20012 		BPF_MOV64_REG(reg_loop_max, BPF_REG_1),
20013 		BPF_MOV32_IMM(reg_loop_cnt, 0),
20014 		BPF_MOV64_REG(reg_loop_ctx, BPF_REG_3),
20015 		/* loop header,
20016 		 * if reg_loop_cnt >= reg_loop_max skip the loop body
20017 		 */
20018 		BPF_JMP_REG(BPF_JGE, reg_loop_cnt, reg_loop_max, 5),
20019 		/* callback call,
20020 		 * correct callback offset would be set after patching
20021 		 */
20022 		BPF_MOV64_REG(BPF_REG_1, reg_loop_cnt),
20023 		BPF_MOV64_REG(BPF_REG_2, reg_loop_ctx),
20024 		BPF_CALL_REL(0),
20025 		/* increment loop counter */
20026 		BPF_ALU64_IMM(BPF_ADD, reg_loop_cnt, 1),
20027 		/* jump to loop header if callback returned 0 */
20028 		BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, -6),
20029 		/* return value of bpf_loop,
20030 		 * set R0 to the number of iterations
20031 		 */
20032 		BPF_MOV64_REG(BPF_REG_0, reg_loop_cnt),
20033 		/* restore original values of R6, R7, R8 */
20034 		BPF_LDX_MEM(BPF_DW, BPF_REG_6, BPF_REG_10, r6_offset),
20035 		BPF_LDX_MEM(BPF_DW, BPF_REG_7, BPF_REG_10, r7_offset),
20036 		BPF_LDX_MEM(BPF_DW, BPF_REG_8, BPF_REG_10, r8_offset),
20037 	};
20038 
20039 	*cnt = ARRAY_SIZE(insn_buf);
20040 	new_prog = bpf_patch_insn_data(env, position, insn_buf, *cnt);
20041 	if (!new_prog)
20042 		return new_prog;
20043 
20044 	/* callback start is known only after patching */
20045 	callback_start = env->subprog_info[callback_subprogno].start;
20046 	/* Note: insn_buf[12] is an offset of BPF_CALL_REL instruction */
20047 	call_insn_offset = position + 12;
20048 	callback_offset = callback_start - call_insn_offset - 1;
20049 	new_prog->insnsi[call_insn_offset].imm = callback_offset;
20050 
20051 	return new_prog;
20052 }
20053 
20054 static bool is_bpf_loop_call(struct bpf_insn *insn)
20055 {
20056 	return insn->code == (BPF_JMP | BPF_CALL) &&
20057 		insn->src_reg == 0 &&
20058 		insn->imm == BPF_FUNC_loop;
20059 }
20060 
20061 /* For all sub-programs in the program (including main) check
20062  * insn_aux_data to see if there are bpf_loop calls that require
20063  * inlining. If such calls are found the calls are replaced with a
20064  * sequence of instructions produced by `inline_bpf_loop` function and
20065  * subprog stack_depth is increased by the size of 3 registers.
20066  * This stack space is used to spill values of the R6, R7, R8.  These
20067  * registers are used to store the loop bound, counter and context
20068  * variables.
20069  */
20070 static int optimize_bpf_loop(struct bpf_verifier_env *env)
20071 {
20072 	struct bpf_subprog_info *subprogs = env->subprog_info;
20073 	int i, cur_subprog = 0, cnt, delta = 0;
20074 	struct bpf_insn *insn = env->prog->insnsi;
20075 	int insn_cnt = env->prog->len;
20076 	u16 stack_depth = subprogs[cur_subprog].stack_depth;
20077 	u16 stack_depth_roundup = round_up(stack_depth, 8) - stack_depth;
20078 	u16 stack_depth_extra = 0;
20079 
20080 	for (i = 0; i < insn_cnt; i++, insn++) {
20081 		struct bpf_loop_inline_state *inline_state =
20082 			&env->insn_aux_data[i + delta].loop_inline_state;
20083 
20084 		if (is_bpf_loop_call(insn) && inline_state->fit_for_inline) {
20085 			struct bpf_prog *new_prog;
20086 
20087 			stack_depth_extra = BPF_REG_SIZE * 3 + stack_depth_roundup;
20088 			new_prog = inline_bpf_loop(env,
20089 						   i + delta,
20090 						   -(stack_depth + stack_depth_extra),
20091 						   inline_state->callback_subprogno,
20092 						   &cnt);
20093 			if (!new_prog)
20094 				return -ENOMEM;
20095 
20096 			delta     += cnt - 1;
20097 			env->prog  = new_prog;
20098 			insn       = new_prog->insnsi + i + delta;
20099 		}
20100 
20101 		if (subprogs[cur_subprog + 1].start == i + delta + 1) {
20102 			subprogs[cur_subprog].stack_depth += stack_depth_extra;
20103 			cur_subprog++;
20104 			stack_depth = subprogs[cur_subprog].stack_depth;
20105 			stack_depth_roundup = round_up(stack_depth, 8) - stack_depth;
20106 			stack_depth_extra = 0;
20107 		}
20108 	}
20109 
20110 	env->prog->aux->stack_depth = env->subprog_info[0].stack_depth;
20111 
20112 	return 0;
20113 }
20114 
20115 static void free_states(struct bpf_verifier_env *env)
20116 {
20117 	struct bpf_verifier_state_list *sl, *sln;
20118 	int i;
20119 
20120 	sl = env->free_list;
20121 	while (sl) {
20122 		sln = sl->next;
20123 		free_verifier_state(&sl->state, false);
20124 		kfree(sl);
20125 		sl = sln;
20126 	}
20127 	env->free_list = NULL;
20128 
20129 	if (!env->explored_states)
20130 		return;
20131 
20132 	for (i = 0; i < state_htab_size(env); i++) {
20133 		sl = env->explored_states[i];
20134 
20135 		while (sl) {
20136 			sln = sl->next;
20137 			free_verifier_state(&sl->state, false);
20138 			kfree(sl);
20139 			sl = sln;
20140 		}
20141 		env->explored_states[i] = NULL;
20142 	}
20143 }
20144 
20145 static int do_check_common(struct bpf_verifier_env *env, int subprog)
20146 {
20147 	bool pop_log = !(env->log.level & BPF_LOG_LEVEL2);
20148 	struct bpf_subprog_info *sub = subprog_info(env, subprog);
20149 	struct bpf_verifier_state *state;
20150 	struct bpf_reg_state *regs;
20151 	int ret, i;
20152 
20153 	env->prev_linfo = NULL;
20154 	env->pass_cnt++;
20155 
20156 	state = kzalloc(sizeof(struct bpf_verifier_state), GFP_KERNEL);
20157 	if (!state)
20158 		return -ENOMEM;
20159 	state->curframe = 0;
20160 	state->speculative = false;
20161 	state->branches = 1;
20162 	state->frame[0] = kzalloc(sizeof(struct bpf_func_state), GFP_KERNEL);
20163 	if (!state->frame[0]) {
20164 		kfree(state);
20165 		return -ENOMEM;
20166 	}
20167 	env->cur_state = state;
20168 	init_func_state(env, state->frame[0],
20169 			BPF_MAIN_FUNC /* callsite */,
20170 			0 /* frameno */,
20171 			subprog);
20172 	state->first_insn_idx = env->subprog_info[subprog].start;
20173 	state->last_insn_idx = -1;
20174 
20175 	regs = state->frame[state->curframe]->regs;
20176 	if (subprog || env->prog->type == BPF_PROG_TYPE_EXT) {
20177 		const char *sub_name = subprog_name(env, subprog);
20178 		struct bpf_subprog_arg_info *arg;
20179 		struct bpf_reg_state *reg;
20180 
20181 		verbose(env, "Validating %s() func#%d...\n", sub_name, subprog);
20182 		ret = btf_prepare_func_args(env, subprog);
20183 		if (ret)
20184 			goto out;
20185 
20186 		if (subprog_is_exc_cb(env, subprog)) {
20187 			state->frame[0]->in_exception_callback_fn = true;
20188 			/* We have already ensured that the callback returns an integer, just
20189 			 * like all global subprogs. We need to determine it only has a single
20190 			 * scalar argument.
20191 			 */
20192 			if (sub->arg_cnt != 1 || sub->args[0].arg_type != ARG_ANYTHING) {
20193 				verbose(env, "exception cb only supports single integer argument\n");
20194 				ret = -EINVAL;
20195 				goto out;
20196 			}
20197 		}
20198 		for (i = BPF_REG_1; i <= sub->arg_cnt; i++) {
20199 			arg = &sub->args[i - BPF_REG_1];
20200 			reg = &regs[i];
20201 
20202 			if (arg->arg_type == ARG_PTR_TO_CTX) {
20203 				reg->type = PTR_TO_CTX;
20204 				mark_reg_known_zero(env, regs, i);
20205 			} else if (arg->arg_type == ARG_ANYTHING) {
20206 				reg->type = SCALAR_VALUE;
20207 				mark_reg_unknown(env, regs, i);
20208 			} else if (arg->arg_type == (ARG_PTR_TO_DYNPTR | MEM_RDONLY)) {
20209 				/* assume unspecial LOCAL dynptr type */
20210 				__mark_dynptr_reg(reg, BPF_DYNPTR_TYPE_LOCAL, true, ++env->id_gen);
20211 			} else if (base_type(arg->arg_type) == ARG_PTR_TO_MEM) {
20212 				reg->type = PTR_TO_MEM;
20213 				if (arg->arg_type & PTR_MAYBE_NULL)
20214 					reg->type |= PTR_MAYBE_NULL;
20215 				mark_reg_known_zero(env, regs, i);
20216 				reg->mem_size = arg->mem_size;
20217 				reg->id = ++env->id_gen;
20218 			} else if (base_type(arg->arg_type) == ARG_PTR_TO_BTF_ID) {
20219 				reg->type = PTR_TO_BTF_ID;
20220 				if (arg->arg_type & PTR_MAYBE_NULL)
20221 					reg->type |= PTR_MAYBE_NULL;
20222 				if (arg->arg_type & PTR_UNTRUSTED)
20223 					reg->type |= PTR_UNTRUSTED;
20224 				if (arg->arg_type & PTR_TRUSTED)
20225 					reg->type |= PTR_TRUSTED;
20226 				mark_reg_known_zero(env, regs, i);
20227 				reg->btf = bpf_get_btf_vmlinux(); /* can't fail at this point */
20228 				reg->btf_id = arg->btf_id;
20229 				reg->id = ++env->id_gen;
20230 			} else {
20231 				WARN_ONCE(1, "BUG: unhandled arg#%d type %d\n",
20232 					  i - BPF_REG_1, arg->arg_type);
20233 				ret = -EFAULT;
20234 				goto out;
20235 			}
20236 		}
20237 	} else {
20238 		/* if main BPF program has associated BTF info, validate that
20239 		 * it's matching expected signature, and otherwise mark BTF
20240 		 * info for main program as unreliable
20241 		 */
20242 		if (env->prog->aux->func_info_aux) {
20243 			ret = btf_prepare_func_args(env, 0);
20244 			if (ret || sub->arg_cnt != 1 || sub->args[0].arg_type != ARG_PTR_TO_CTX)
20245 				env->prog->aux->func_info_aux[0].unreliable = true;
20246 		}
20247 
20248 		/* 1st arg to a function */
20249 		regs[BPF_REG_1].type = PTR_TO_CTX;
20250 		mark_reg_known_zero(env, regs, BPF_REG_1);
20251 	}
20252 
20253 	ret = do_check(env);
20254 out:
20255 	/* check for NULL is necessary, since cur_state can be freed inside
20256 	 * do_check() under memory pressure.
20257 	 */
20258 	if (env->cur_state) {
20259 		free_verifier_state(env->cur_state, true);
20260 		env->cur_state = NULL;
20261 	}
20262 	while (!pop_stack(env, NULL, NULL, false));
20263 	if (!ret && pop_log)
20264 		bpf_vlog_reset(&env->log, 0);
20265 	free_states(env);
20266 	return ret;
20267 }
20268 
20269 /* Lazily verify all global functions based on their BTF, if they are called
20270  * from main BPF program or any of subprograms transitively.
20271  * BPF global subprogs called from dead code are not validated.
20272  * All callable global functions must pass verification.
20273  * Otherwise the whole program is rejected.
20274  * Consider:
20275  * int bar(int);
20276  * int foo(int f)
20277  * {
20278  *    return bar(f);
20279  * }
20280  * int bar(int b)
20281  * {
20282  *    ...
20283  * }
20284  * foo() will be verified first for R1=any_scalar_value. During verification it
20285  * will be assumed that bar() already verified successfully and call to bar()
20286  * from foo() will be checked for type match only. Later bar() will be verified
20287  * independently to check that it's safe for R1=any_scalar_value.
20288  */
20289 static int do_check_subprogs(struct bpf_verifier_env *env)
20290 {
20291 	struct bpf_prog_aux *aux = env->prog->aux;
20292 	struct bpf_func_info_aux *sub_aux;
20293 	int i, ret, new_cnt;
20294 
20295 	if (!aux->func_info)
20296 		return 0;
20297 
20298 	/* exception callback is presumed to be always called */
20299 	if (env->exception_callback_subprog)
20300 		subprog_aux(env, env->exception_callback_subprog)->called = true;
20301 
20302 again:
20303 	new_cnt = 0;
20304 	for (i = 1; i < env->subprog_cnt; i++) {
20305 		if (!subprog_is_global(env, i))
20306 			continue;
20307 
20308 		sub_aux = subprog_aux(env, i);
20309 		if (!sub_aux->called || sub_aux->verified)
20310 			continue;
20311 
20312 		env->insn_idx = env->subprog_info[i].start;
20313 		WARN_ON_ONCE(env->insn_idx == 0);
20314 		ret = do_check_common(env, i);
20315 		if (ret) {
20316 			return ret;
20317 		} else if (env->log.level & BPF_LOG_LEVEL) {
20318 			verbose(env, "Func#%d ('%s') is safe for any args that match its prototype\n",
20319 				i, subprog_name(env, i));
20320 		}
20321 
20322 		/* We verified new global subprog, it might have called some
20323 		 * more global subprogs that we haven't verified yet, so we
20324 		 * need to do another pass over subprogs to verify those.
20325 		 */
20326 		sub_aux->verified = true;
20327 		new_cnt++;
20328 	}
20329 
20330 	/* We can't loop forever as we verify at least one global subprog on
20331 	 * each pass.
20332 	 */
20333 	if (new_cnt)
20334 		goto again;
20335 
20336 	return 0;
20337 }
20338 
20339 static int do_check_main(struct bpf_verifier_env *env)
20340 {
20341 	int ret;
20342 
20343 	env->insn_idx = 0;
20344 	ret = do_check_common(env, 0);
20345 	if (!ret)
20346 		env->prog->aux->stack_depth = env->subprog_info[0].stack_depth;
20347 	return ret;
20348 }
20349 
20350 
20351 static void print_verification_stats(struct bpf_verifier_env *env)
20352 {
20353 	int i;
20354 
20355 	if (env->log.level & BPF_LOG_STATS) {
20356 		verbose(env, "verification time %lld usec\n",
20357 			div_u64(env->verification_time, 1000));
20358 		verbose(env, "stack depth ");
20359 		for (i = 0; i < env->subprog_cnt; i++) {
20360 			u32 depth = env->subprog_info[i].stack_depth;
20361 
20362 			verbose(env, "%d", depth);
20363 			if (i + 1 < env->subprog_cnt)
20364 				verbose(env, "+");
20365 		}
20366 		verbose(env, "\n");
20367 	}
20368 	verbose(env, "processed %d insns (limit %d) max_states_per_insn %d "
20369 		"total_states %d peak_states %d mark_read %d\n",
20370 		env->insn_processed, BPF_COMPLEXITY_LIMIT_INSNS,
20371 		env->max_states_per_insn, env->total_states,
20372 		env->peak_states, env->longest_mark_read_walk);
20373 }
20374 
20375 static int check_struct_ops_btf_id(struct bpf_verifier_env *env)
20376 {
20377 	const struct btf_type *t, *func_proto;
20378 	const struct bpf_struct_ops_desc *st_ops_desc;
20379 	const struct bpf_struct_ops *st_ops;
20380 	const struct btf_member *member;
20381 	struct bpf_prog *prog = env->prog;
20382 	u32 btf_id, member_idx;
20383 	struct btf *btf;
20384 	const char *mname;
20385 
20386 	if (!prog->gpl_compatible) {
20387 		verbose(env, "struct ops programs must have a GPL compatible license\n");
20388 		return -EINVAL;
20389 	}
20390 
20391 	if (!prog->aux->attach_btf_id)
20392 		return -ENOTSUPP;
20393 
20394 	btf = prog->aux->attach_btf;
20395 	if (btf_is_module(btf)) {
20396 		/* Make sure st_ops is valid through the lifetime of env */
20397 		env->attach_btf_mod = btf_try_get_module(btf);
20398 		if (!env->attach_btf_mod) {
20399 			verbose(env, "struct_ops module %s is not found\n",
20400 				btf_get_name(btf));
20401 			return -ENOTSUPP;
20402 		}
20403 	}
20404 
20405 	btf_id = prog->aux->attach_btf_id;
20406 	st_ops_desc = bpf_struct_ops_find(btf, btf_id);
20407 	if (!st_ops_desc) {
20408 		verbose(env, "attach_btf_id %u is not a supported struct\n",
20409 			btf_id);
20410 		return -ENOTSUPP;
20411 	}
20412 	st_ops = st_ops_desc->st_ops;
20413 
20414 	t = st_ops_desc->type;
20415 	member_idx = prog->expected_attach_type;
20416 	if (member_idx >= btf_type_vlen(t)) {
20417 		verbose(env, "attach to invalid member idx %u of struct %s\n",
20418 			member_idx, st_ops->name);
20419 		return -EINVAL;
20420 	}
20421 
20422 	member = &btf_type_member(t)[member_idx];
20423 	mname = btf_name_by_offset(btf, member->name_off);
20424 	func_proto = btf_type_resolve_func_ptr(btf, member->type,
20425 					       NULL);
20426 	if (!func_proto) {
20427 		verbose(env, "attach to invalid member %s(@idx %u) of struct %s\n",
20428 			mname, member_idx, st_ops->name);
20429 		return -EINVAL;
20430 	}
20431 
20432 	if (st_ops->check_member) {
20433 		int err = st_ops->check_member(t, member, prog);
20434 
20435 		if (err) {
20436 			verbose(env, "attach to unsupported member %s of struct %s\n",
20437 				mname, st_ops->name);
20438 			return err;
20439 		}
20440 	}
20441 
20442 	/* btf_ctx_access() used this to provide argument type info */
20443 	prog->aux->ctx_arg_info =
20444 		st_ops_desc->arg_info[member_idx].info;
20445 	prog->aux->ctx_arg_info_size =
20446 		st_ops_desc->arg_info[member_idx].cnt;
20447 
20448 	prog->aux->attach_func_proto = func_proto;
20449 	prog->aux->attach_func_name = mname;
20450 	env->ops = st_ops->verifier_ops;
20451 
20452 	return 0;
20453 }
20454 #define SECURITY_PREFIX "security_"
20455 
20456 static int check_attach_modify_return(unsigned long addr, const char *func_name)
20457 {
20458 	if (within_error_injection_list(addr) ||
20459 	    !strncmp(SECURITY_PREFIX, func_name, sizeof(SECURITY_PREFIX) - 1))
20460 		return 0;
20461 
20462 	return -EINVAL;
20463 }
20464 
20465 /* list of non-sleepable functions that are otherwise on
20466  * ALLOW_ERROR_INJECTION list
20467  */
20468 BTF_SET_START(btf_non_sleepable_error_inject)
20469 /* Three functions below can be called from sleepable and non-sleepable context.
20470  * Assume non-sleepable from bpf safety point of view.
20471  */
20472 BTF_ID(func, __filemap_add_folio)
20473 BTF_ID(func, should_fail_alloc_page)
20474 BTF_ID(func, should_failslab)
20475 BTF_SET_END(btf_non_sleepable_error_inject)
20476 
20477 static int check_non_sleepable_error_inject(u32 btf_id)
20478 {
20479 	return btf_id_set_contains(&btf_non_sleepable_error_inject, btf_id);
20480 }
20481 
20482 int bpf_check_attach_target(struct bpf_verifier_log *log,
20483 			    const struct bpf_prog *prog,
20484 			    const struct bpf_prog *tgt_prog,
20485 			    u32 btf_id,
20486 			    struct bpf_attach_target_info *tgt_info)
20487 {
20488 	bool prog_extension = prog->type == BPF_PROG_TYPE_EXT;
20489 	bool prog_tracing = prog->type == BPF_PROG_TYPE_TRACING;
20490 	const char prefix[] = "btf_trace_";
20491 	int ret = 0, subprog = -1, i;
20492 	const struct btf_type *t;
20493 	bool conservative = true;
20494 	const char *tname;
20495 	struct btf *btf;
20496 	long addr = 0;
20497 	struct module *mod = NULL;
20498 
20499 	if (!btf_id) {
20500 		bpf_log(log, "Tracing programs must provide btf_id\n");
20501 		return -EINVAL;
20502 	}
20503 	btf = tgt_prog ? tgt_prog->aux->btf : prog->aux->attach_btf;
20504 	if (!btf) {
20505 		bpf_log(log,
20506 			"FENTRY/FEXIT program can only be attached to another program annotated with BTF\n");
20507 		return -EINVAL;
20508 	}
20509 	t = btf_type_by_id(btf, btf_id);
20510 	if (!t) {
20511 		bpf_log(log, "attach_btf_id %u is invalid\n", btf_id);
20512 		return -EINVAL;
20513 	}
20514 	tname = btf_name_by_offset(btf, t->name_off);
20515 	if (!tname) {
20516 		bpf_log(log, "attach_btf_id %u doesn't have a name\n", btf_id);
20517 		return -EINVAL;
20518 	}
20519 	if (tgt_prog) {
20520 		struct bpf_prog_aux *aux = tgt_prog->aux;
20521 
20522 		if (bpf_prog_is_dev_bound(prog->aux) &&
20523 		    !bpf_prog_dev_bound_match(prog, tgt_prog)) {
20524 			bpf_log(log, "Target program bound device mismatch");
20525 			return -EINVAL;
20526 		}
20527 
20528 		for (i = 0; i < aux->func_info_cnt; i++)
20529 			if (aux->func_info[i].type_id == btf_id) {
20530 				subprog = i;
20531 				break;
20532 			}
20533 		if (subprog == -1) {
20534 			bpf_log(log, "Subprog %s doesn't exist\n", tname);
20535 			return -EINVAL;
20536 		}
20537 		if (aux->func && aux->func[subprog]->aux->exception_cb) {
20538 			bpf_log(log,
20539 				"%s programs cannot attach to exception callback\n",
20540 				prog_extension ? "Extension" : "FENTRY/FEXIT");
20541 			return -EINVAL;
20542 		}
20543 		conservative = aux->func_info_aux[subprog].unreliable;
20544 		if (prog_extension) {
20545 			if (conservative) {
20546 				bpf_log(log,
20547 					"Cannot replace static functions\n");
20548 				return -EINVAL;
20549 			}
20550 			if (!prog->jit_requested) {
20551 				bpf_log(log,
20552 					"Extension programs should be JITed\n");
20553 				return -EINVAL;
20554 			}
20555 		}
20556 		if (!tgt_prog->jited) {
20557 			bpf_log(log, "Can attach to only JITed progs\n");
20558 			return -EINVAL;
20559 		}
20560 		if (prog_tracing) {
20561 			if (aux->attach_tracing_prog) {
20562 				/*
20563 				 * Target program is an fentry/fexit which is already attached
20564 				 * to another tracing program. More levels of nesting
20565 				 * attachment are not allowed.
20566 				 */
20567 				bpf_log(log, "Cannot nest tracing program attach more than once\n");
20568 				return -EINVAL;
20569 			}
20570 		} else if (tgt_prog->type == prog->type) {
20571 			/*
20572 			 * To avoid potential call chain cycles, prevent attaching of a
20573 			 * program extension to another extension. It's ok to attach
20574 			 * fentry/fexit to extension program.
20575 			 */
20576 			bpf_log(log, "Cannot recursively attach\n");
20577 			return -EINVAL;
20578 		}
20579 		if (tgt_prog->type == BPF_PROG_TYPE_TRACING &&
20580 		    prog_extension &&
20581 		    (tgt_prog->expected_attach_type == BPF_TRACE_FENTRY ||
20582 		     tgt_prog->expected_attach_type == BPF_TRACE_FEXIT)) {
20583 			/* Program extensions can extend all program types
20584 			 * except fentry/fexit. The reason is the following.
20585 			 * The fentry/fexit programs are used for performance
20586 			 * analysis, stats and can be attached to any program
20587 			 * type. When extension program is replacing XDP function
20588 			 * it is necessary to allow performance analysis of all
20589 			 * functions. Both original XDP program and its program
20590 			 * extension. Hence attaching fentry/fexit to
20591 			 * BPF_PROG_TYPE_EXT is allowed. If extending of
20592 			 * fentry/fexit was allowed it would be possible to create
20593 			 * long call chain fentry->extension->fentry->extension
20594 			 * beyond reasonable stack size. Hence extending fentry
20595 			 * is not allowed.
20596 			 */
20597 			bpf_log(log, "Cannot extend fentry/fexit\n");
20598 			return -EINVAL;
20599 		}
20600 	} else {
20601 		if (prog_extension) {
20602 			bpf_log(log, "Cannot replace kernel functions\n");
20603 			return -EINVAL;
20604 		}
20605 	}
20606 
20607 	switch (prog->expected_attach_type) {
20608 	case BPF_TRACE_RAW_TP:
20609 		if (tgt_prog) {
20610 			bpf_log(log,
20611 				"Only FENTRY/FEXIT progs are attachable to another BPF prog\n");
20612 			return -EINVAL;
20613 		}
20614 		if (!btf_type_is_typedef(t)) {
20615 			bpf_log(log, "attach_btf_id %u is not a typedef\n",
20616 				btf_id);
20617 			return -EINVAL;
20618 		}
20619 		if (strncmp(prefix, tname, sizeof(prefix) - 1)) {
20620 			bpf_log(log, "attach_btf_id %u points to wrong type name %s\n",
20621 				btf_id, tname);
20622 			return -EINVAL;
20623 		}
20624 		tname += sizeof(prefix) - 1;
20625 		t = btf_type_by_id(btf, t->type);
20626 		if (!btf_type_is_ptr(t))
20627 			/* should never happen in valid vmlinux build */
20628 			return -EINVAL;
20629 		t = btf_type_by_id(btf, t->type);
20630 		if (!btf_type_is_func_proto(t))
20631 			/* should never happen in valid vmlinux build */
20632 			return -EINVAL;
20633 
20634 		break;
20635 	case BPF_TRACE_ITER:
20636 		if (!btf_type_is_func(t)) {
20637 			bpf_log(log, "attach_btf_id %u is not a function\n",
20638 				btf_id);
20639 			return -EINVAL;
20640 		}
20641 		t = btf_type_by_id(btf, t->type);
20642 		if (!btf_type_is_func_proto(t))
20643 			return -EINVAL;
20644 		ret = btf_distill_func_proto(log, btf, t, tname, &tgt_info->fmodel);
20645 		if (ret)
20646 			return ret;
20647 		break;
20648 	default:
20649 		if (!prog_extension)
20650 			return -EINVAL;
20651 		fallthrough;
20652 	case BPF_MODIFY_RETURN:
20653 	case BPF_LSM_MAC:
20654 	case BPF_LSM_CGROUP:
20655 	case BPF_TRACE_FENTRY:
20656 	case BPF_TRACE_FEXIT:
20657 		if (!btf_type_is_func(t)) {
20658 			bpf_log(log, "attach_btf_id %u is not a function\n",
20659 				btf_id);
20660 			return -EINVAL;
20661 		}
20662 		if (prog_extension &&
20663 		    btf_check_type_match(log, prog, btf, t))
20664 			return -EINVAL;
20665 		t = btf_type_by_id(btf, t->type);
20666 		if (!btf_type_is_func_proto(t))
20667 			return -EINVAL;
20668 
20669 		if ((prog->aux->saved_dst_prog_type || prog->aux->saved_dst_attach_type) &&
20670 		    (!tgt_prog || prog->aux->saved_dst_prog_type != tgt_prog->type ||
20671 		     prog->aux->saved_dst_attach_type != tgt_prog->expected_attach_type))
20672 			return -EINVAL;
20673 
20674 		if (tgt_prog && conservative)
20675 			t = NULL;
20676 
20677 		ret = btf_distill_func_proto(log, btf, t, tname, &tgt_info->fmodel);
20678 		if (ret < 0)
20679 			return ret;
20680 
20681 		if (tgt_prog) {
20682 			if (subprog == 0)
20683 				addr = (long) tgt_prog->bpf_func;
20684 			else
20685 				addr = (long) tgt_prog->aux->func[subprog]->bpf_func;
20686 		} else {
20687 			if (btf_is_module(btf)) {
20688 				mod = btf_try_get_module(btf);
20689 				if (mod)
20690 					addr = find_kallsyms_symbol_value(mod, tname);
20691 				else
20692 					addr = 0;
20693 			} else {
20694 				addr = kallsyms_lookup_name(tname);
20695 			}
20696 			if (!addr) {
20697 				module_put(mod);
20698 				bpf_log(log,
20699 					"The address of function %s cannot be found\n",
20700 					tname);
20701 				return -ENOENT;
20702 			}
20703 		}
20704 
20705 		if (prog->aux->sleepable) {
20706 			ret = -EINVAL;
20707 			switch (prog->type) {
20708 			case BPF_PROG_TYPE_TRACING:
20709 
20710 				/* fentry/fexit/fmod_ret progs can be sleepable if they are
20711 				 * attached to ALLOW_ERROR_INJECTION and are not in denylist.
20712 				 */
20713 				if (!check_non_sleepable_error_inject(btf_id) &&
20714 				    within_error_injection_list(addr))
20715 					ret = 0;
20716 				/* fentry/fexit/fmod_ret progs can also be sleepable if they are
20717 				 * in the fmodret id set with the KF_SLEEPABLE flag.
20718 				 */
20719 				else {
20720 					u32 *flags = btf_kfunc_is_modify_return(btf, btf_id,
20721 										prog);
20722 
20723 					if (flags && (*flags & KF_SLEEPABLE))
20724 						ret = 0;
20725 				}
20726 				break;
20727 			case BPF_PROG_TYPE_LSM:
20728 				/* LSM progs check that they are attached to bpf_lsm_*() funcs.
20729 				 * Only some of them are sleepable.
20730 				 */
20731 				if (bpf_lsm_is_sleepable_hook(btf_id))
20732 					ret = 0;
20733 				break;
20734 			default:
20735 				break;
20736 			}
20737 			if (ret) {
20738 				module_put(mod);
20739 				bpf_log(log, "%s is not sleepable\n", tname);
20740 				return ret;
20741 			}
20742 		} else if (prog->expected_attach_type == BPF_MODIFY_RETURN) {
20743 			if (tgt_prog) {
20744 				module_put(mod);
20745 				bpf_log(log, "can't modify return codes of BPF programs\n");
20746 				return -EINVAL;
20747 			}
20748 			ret = -EINVAL;
20749 			if (btf_kfunc_is_modify_return(btf, btf_id, prog) ||
20750 			    !check_attach_modify_return(addr, tname))
20751 				ret = 0;
20752 			if (ret) {
20753 				module_put(mod);
20754 				bpf_log(log, "%s() is not modifiable\n", tname);
20755 				return ret;
20756 			}
20757 		}
20758 
20759 		break;
20760 	}
20761 	tgt_info->tgt_addr = addr;
20762 	tgt_info->tgt_name = tname;
20763 	tgt_info->tgt_type = t;
20764 	tgt_info->tgt_mod = mod;
20765 	return 0;
20766 }
20767 
20768 BTF_SET_START(btf_id_deny)
20769 BTF_ID_UNUSED
20770 #ifdef CONFIG_SMP
20771 BTF_ID(func, migrate_disable)
20772 BTF_ID(func, migrate_enable)
20773 #endif
20774 #if !defined CONFIG_PREEMPT_RCU && !defined CONFIG_TINY_RCU
20775 BTF_ID(func, rcu_read_unlock_strict)
20776 #endif
20777 #if defined(CONFIG_DEBUG_PREEMPT) || defined(CONFIG_TRACE_PREEMPT_TOGGLE)
20778 BTF_ID(func, preempt_count_add)
20779 BTF_ID(func, preempt_count_sub)
20780 #endif
20781 #ifdef CONFIG_PREEMPT_RCU
20782 BTF_ID(func, __rcu_read_lock)
20783 BTF_ID(func, __rcu_read_unlock)
20784 #endif
20785 BTF_SET_END(btf_id_deny)
20786 
20787 static bool can_be_sleepable(struct bpf_prog *prog)
20788 {
20789 	if (prog->type == BPF_PROG_TYPE_TRACING) {
20790 		switch (prog->expected_attach_type) {
20791 		case BPF_TRACE_FENTRY:
20792 		case BPF_TRACE_FEXIT:
20793 		case BPF_MODIFY_RETURN:
20794 		case BPF_TRACE_ITER:
20795 			return true;
20796 		default:
20797 			return false;
20798 		}
20799 	}
20800 	return prog->type == BPF_PROG_TYPE_LSM ||
20801 	       prog->type == BPF_PROG_TYPE_KPROBE /* only for uprobes */ ||
20802 	       prog->type == BPF_PROG_TYPE_STRUCT_OPS;
20803 }
20804 
20805 static int check_attach_btf_id(struct bpf_verifier_env *env)
20806 {
20807 	struct bpf_prog *prog = env->prog;
20808 	struct bpf_prog *tgt_prog = prog->aux->dst_prog;
20809 	struct bpf_attach_target_info tgt_info = {};
20810 	u32 btf_id = prog->aux->attach_btf_id;
20811 	struct bpf_trampoline *tr;
20812 	int ret;
20813 	u64 key;
20814 
20815 	if (prog->type == BPF_PROG_TYPE_SYSCALL) {
20816 		if (prog->aux->sleepable)
20817 			/* attach_btf_id checked to be zero already */
20818 			return 0;
20819 		verbose(env, "Syscall programs can only be sleepable\n");
20820 		return -EINVAL;
20821 	}
20822 
20823 	if (prog->aux->sleepable && !can_be_sleepable(prog)) {
20824 		verbose(env, "Only fentry/fexit/fmod_ret, lsm, iter, uprobe, and struct_ops programs can be sleepable\n");
20825 		return -EINVAL;
20826 	}
20827 
20828 	if (prog->type == BPF_PROG_TYPE_STRUCT_OPS)
20829 		return check_struct_ops_btf_id(env);
20830 
20831 	if (prog->type != BPF_PROG_TYPE_TRACING &&
20832 	    prog->type != BPF_PROG_TYPE_LSM &&
20833 	    prog->type != BPF_PROG_TYPE_EXT)
20834 		return 0;
20835 
20836 	ret = bpf_check_attach_target(&env->log, prog, tgt_prog, btf_id, &tgt_info);
20837 	if (ret)
20838 		return ret;
20839 
20840 	if (tgt_prog && prog->type == BPF_PROG_TYPE_EXT) {
20841 		/* to make freplace equivalent to their targets, they need to
20842 		 * inherit env->ops and expected_attach_type for the rest of the
20843 		 * verification
20844 		 */
20845 		env->ops = bpf_verifier_ops[tgt_prog->type];
20846 		prog->expected_attach_type = tgt_prog->expected_attach_type;
20847 	}
20848 
20849 	/* store info about the attachment target that will be used later */
20850 	prog->aux->attach_func_proto = tgt_info.tgt_type;
20851 	prog->aux->attach_func_name = tgt_info.tgt_name;
20852 	prog->aux->mod = tgt_info.tgt_mod;
20853 
20854 	if (tgt_prog) {
20855 		prog->aux->saved_dst_prog_type = tgt_prog->type;
20856 		prog->aux->saved_dst_attach_type = tgt_prog->expected_attach_type;
20857 	}
20858 
20859 	if (prog->expected_attach_type == BPF_TRACE_RAW_TP) {
20860 		prog->aux->attach_btf_trace = true;
20861 		return 0;
20862 	} else if (prog->expected_attach_type == BPF_TRACE_ITER) {
20863 		if (!bpf_iter_prog_supported(prog))
20864 			return -EINVAL;
20865 		return 0;
20866 	}
20867 
20868 	if (prog->type == BPF_PROG_TYPE_LSM) {
20869 		ret = bpf_lsm_verify_prog(&env->log, prog);
20870 		if (ret < 0)
20871 			return ret;
20872 	} else if (prog->type == BPF_PROG_TYPE_TRACING &&
20873 		   btf_id_set_contains(&btf_id_deny, btf_id)) {
20874 		return -EINVAL;
20875 	}
20876 
20877 	key = bpf_trampoline_compute_key(tgt_prog, prog->aux->attach_btf, btf_id);
20878 	tr = bpf_trampoline_get(key, &tgt_info);
20879 	if (!tr)
20880 		return -ENOMEM;
20881 
20882 	if (tgt_prog && tgt_prog->aux->tail_call_reachable)
20883 		tr->flags = BPF_TRAMP_F_TAIL_CALL_CTX;
20884 
20885 	prog->aux->dst_trampoline = tr;
20886 	return 0;
20887 }
20888 
20889 struct btf *bpf_get_btf_vmlinux(void)
20890 {
20891 	if (!btf_vmlinux && IS_ENABLED(CONFIG_DEBUG_INFO_BTF)) {
20892 		mutex_lock(&bpf_verifier_lock);
20893 		if (!btf_vmlinux)
20894 			btf_vmlinux = btf_parse_vmlinux();
20895 		mutex_unlock(&bpf_verifier_lock);
20896 	}
20897 	return btf_vmlinux;
20898 }
20899 
20900 int bpf_check(struct bpf_prog **prog, union bpf_attr *attr, bpfptr_t uattr, __u32 uattr_size)
20901 {
20902 	u64 start_time = ktime_get_ns();
20903 	struct bpf_verifier_env *env;
20904 	int i, len, ret = -EINVAL, err;
20905 	u32 log_true_size;
20906 	bool is_priv;
20907 
20908 	/* no program is valid */
20909 	if (ARRAY_SIZE(bpf_verifier_ops) == 0)
20910 		return -EINVAL;
20911 
20912 	/* 'struct bpf_verifier_env' can be global, but since it's not small,
20913 	 * allocate/free it every time bpf_check() is called
20914 	 */
20915 	env = kzalloc(sizeof(struct bpf_verifier_env), GFP_KERNEL);
20916 	if (!env)
20917 		return -ENOMEM;
20918 
20919 	env->bt.env = env;
20920 
20921 	len = (*prog)->len;
20922 	env->insn_aux_data =
20923 		vzalloc(array_size(sizeof(struct bpf_insn_aux_data), len));
20924 	ret = -ENOMEM;
20925 	if (!env->insn_aux_data)
20926 		goto err_free_env;
20927 	for (i = 0; i < len; i++)
20928 		env->insn_aux_data[i].orig_idx = i;
20929 	env->prog = *prog;
20930 	env->ops = bpf_verifier_ops[env->prog->type];
20931 	env->fd_array = make_bpfptr(attr->fd_array, uattr.is_kernel);
20932 
20933 	env->allow_ptr_leaks = bpf_allow_ptr_leaks(env->prog->aux->token);
20934 	env->allow_uninit_stack = bpf_allow_uninit_stack(env->prog->aux->token);
20935 	env->bypass_spec_v1 = bpf_bypass_spec_v1(env->prog->aux->token);
20936 	env->bypass_spec_v4 = bpf_bypass_spec_v4(env->prog->aux->token);
20937 	env->bpf_capable = is_priv = bpf_token_capable(env->prog->aux->token, CAP_BPF);
20938 
20939 	bpf_get_btf_vmlinux();
20940 
20941 	/* grab the mutex to protect few globals used by verifier */
20942 	if (!is_priv)
20943 		mutex_lock(&bpf_verifier_lock);
20944 
20945 	/* user could have requested verbose verifier output
20946 	 * and supplied buffer to store the verification trace
20947 	 */
20948 	ret = bpf_vlog_init(&env->log, attr->log_level,
20949 			    (char __user *) (unsigned long) attr->log_buf,
20950 			    attr->log_size);
20951 	if (ret)
20952 		goto err_unlock;
20953 
20954 	mark_verifier_state_clean(env);
20955 
20956 	if (IS_ERR(btf_vmlinux)) {
20957 		/* Either gcc or pahole or kernel are broken. */
20958 		verbose(env, "in-kernel BTF is malformed\n");
20959 		ret = PTR_ERR(btf_vmlinux);
20960 		goto skip_full_check;
20961 	}
20962 
20963 	env->strict_alignment = !!(attr->prog_flags & BPF_F_STRICT_ALIGNMENT);
20964 	if (!IS_ENABLED(CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS))
20965 		env->strict_alignment = true;
20966 	if (attr->prog_flags & BPF_F_ANY_ALIGNMENT)
20967 		env->strict_alignment = false;
20968 
20969 	if (is_priv)
20970 		env->test_state_freq = attr->prog_flags & BPF_F_TEST_STATE_FREQ;
20971 	env->test_reg_invariants = attr->prog_flags & BPF_F_TEST_REG_INVARIANTS;
20972 
20973 	env->explored_states = kvcalloc(state_htab_size(env),
20974 				       sizeof(struct bpf_verifier_state_list *),
20975 				       GFP_USER);
20976 	ret = -ENOMEM;
20977 	if (!env->explored_states)
20978 		goto skip_full_check;
20979 
20980 	ret = check_btf_info_early(env, attr, uattr);
20981 	if (ret < 0)
20982 		goto skip_full_check;
20983 
20984 	ret = add_subprog_and_kfunc(env);
20985 	if (ret < 0)
20986 		goto skip_full_check;
20987 
20988 	ret = check_subprogs(env);
20989 	if (ret < 0)
20990 		goto skip_full_check;
20991 
20992 	ret = check_btf_info(env, attr, uattr);
20993 	if (ret < 0)
20994 		goto skip_full_check;
20995 
20996 	ret = check_attach_btf_id(env);
20997 	if (ret)
20998 		goto skip_full_check;
20999 
21000 	ret = resolve_pseudo_ldimm64(env);
21001 	if (ret < 0)
21002 		goto skip_full_check;
21003 
21004 	if (bpf_prog_is_offloaded(env->prog->aux)) {
21005 		ret = bpf_prog_offload_verifier_prep(env->prog);
21006 		if (ret)
21007 			goto skip_full_check;
21008 	}
21009 
21010 	ret = check_cfg(env);
21011 	if (ret < 0)
21012 		goto skip_full_check;
21013 
21014 	ret = do_check_main(env);
21015 	ret = ret ?: do_check_subprogs(env);
21016 
21017 	if (ret == 0 && bpf_prog_is_offloaded(env->prog->aux))
21018 		ret = bpf_prog_offload_finalize(env);
21019 
21020 skip_full_check:
21021 	kvfree(env->explored_states);
21022 
21023 	if (ret == 0)
21024 		ret = check_max_stack_depth(env);
21025 
21026 	/* instruction rewrites happen after this point */
21027 	if (ret == 0)
21028 		ret = optimize_bpf_loop(env);
21029 
21030 	if (is_priv) {
21031 		if (ret == 0)
21032 			opt_hard_wire_dead_code_branches(env);
21033 		if (ret == 0)
21034 			ret = opt_remove_dead_code(env);
21035 		if (ret == 0)
21036 			ret = opt_remove_nops(env);
21037 	} else {
21038 		if (ret == 0)
21039 			sanitize_dead_code(env);
21040 	}
21041 
21042 	if (ret == 0)
21043 		/* program is valid, convert *(u32*)(ctx + off) accesses */
21044 		ret = convert_ctx_accesses(env);
21045 
21046 	if (ret == 0)
21047 		ret = do_misc_fixups(env);
21048 
21049 	/* do 32-bit optimization after insn patching has done so those patched
21050 	 * insns could be handled correctly.
21051 	 */
21052 	if (ret == 0 && !bpf_prog_is_offloaded(env->prog->aux)) {
21053 		ret = opt_subreg_zext_lo32_rnd_hi32(env, attr);
21054 		env->prog->aux->verifier_zext = bpf_jit_needs_zext() ? !ret
21055 								     : false;
21056 	}
21057 
21058 	if (ret == 0)
21059 		ret = fixup_call_args(env);
21060 
21061 	env->verification_time = ktime_get_ns() - start_time;
21062 	print_verification_stats(env);
21063 	env->prog->aux->verified_insns = env->insn_processed;
21064 
21065 	/* preserve original error even if log finalization is successful */
21066 	err = bpf_vlog_finalize(&env->log, &log_true_size);
21067 	if (err)
21068 		ret = err;
21069 
21070 	if (uattr_size >= offsetofend(union bpf_attr, log_true_size) &&
21071 	    copy_to_bpfptr_offset(uattr, offsetof(union bpf_attr, log_true_size),
21072 				  &log_true_size, sizeof(log_true_size))) {
21073 		ret = -EFAULT;
21074 		goto err_release_maps;
21075 	}
21076 
21077 	if (ret)
21078 		goto err_release_maps;
21079 
21080 	if (env->used_map_cnt) {
21081 		/* if program passed verifier, update used_maps in bpf_prog_info */
21082 		env->prog->aux->used_maps = kmalloc_array(env->used_map_cnt,
21083 							  sizeof(env->used_maps[0]),
21084 							  GFP_KERNEL);
21085 
21086 		if (!env->prog->aux->used_maps) {
21087 			ret = -ENOMEM;
21088 			goto err_release_maps;
21089 		}
21090 
21091 		memcpy(env->prog->aux->used_maps, env->used_maps,
21092 		       sizeof(env->used_maps[0]) * env->used_map_cnt);
21093 		env->prog->aux->used_map_cnt = env->used_map_cnt;
21094 	}
21095 	if (env->used_btf_cnt) {
21096 		/* if program passed verifier, update used_btfs in bpf_prog_aux */
21097 		env->prog->aux->used_btfs = kmalloc_array(env->used_btf_cnt,
21098 							  sizeof(env->used_btfs[0]),
21099 							  GFP_KERNEL);
21100 		if (!env->prog->aux->used_btfs) {
21101 			ret = -ENOMEM;
21102 			goto err_release_maps;
21103 		}
21104 
21105 		memcpy(env->prog->aux->used_btfs, env->used_btfs,
21106 		       sizeof(env->used_btfs[0]) * env->used_btf_cnt);
21107 		env->prog->aux->used_btf_cnt = env->used_btf_cnt;
21108 	}
21109 	if (env->used_map_cnt || env->used_btf_cnt) {
21110 		/* program is valid. Convert pseudo bpf_ld_imm64 into generic
21111 		 * bpf_ld_imm64 instructions
21112 		 */
21113 		convert_pseudo_ld_imm64(env);
21114 	}
21115 
21116 	adjust_btf_func(env);
21117 
21118 err_release_maps:
21119 	if (!env->prog->aux->used_maps)
21120 		/* if we didn't copy map pointers into bpf_prog_info, release
21121 		 * them now. Otherwise free_used_maps() will release them.
21122 		 */
21123 		release_maps(env);
21124 	if (!env->prog->aux->used_btfs)
21125 		release_btfs(env);
21126 
21127 	/* extension progs temporarily inherit the attach_type of their targets
21128 	   for verification purposes, so set it back to zero before returning
21129 	 */
21130 	if (env->prog->type == BPF_PROG_TYPE_EXT)
21131 		env->prog->expected_attach_type = 0;
21132 
21133 	*prog = env->prog;
21134 
21135 	module_put(env->attach_btf_mod);
21136 err_unlock:
21137 	if (!is_priv)
21138 		mutex_unlock(&bpf_verifier_lock);
21139 	vfree(env->insn_aux_data);
21140 err_free_env:
21141 	kfree(env);
21142 	return ret;
21143 }
21144