xref: /linux/kernel/bpf/verifier.c (revision 2dcb8e8782d8e4c38903bf37b1a24d3ffd193da7)
1 // SPDX-License-Identifier: GPL-2.0-only
2 /* Copyright (c) 2011-2014 PLUMgrid, http://plumgrid.com
3  * Copyright (c) 2016 Facebook
4  * Copyright (c) 2018 Covalent IO, Inc. http://covalent.io
5  */
6 #include <uapi/linux/btf.h>
7 #include <linux/bpf-cgroup.h>
8 #include <linux/kernel.h>
9 #include <linux/types.h>
10 #include <linux/slab.h>
11 #include <linux/bpf.h>
12 #include <linux/btf.h>
13 #include <linux/bpf_verifier.h>
14 #include <linux/filter.h>
15 #include <net/netlink.h>
16 #include <linux/file.h>
17 #include <linux/vmalloc.h>
18 #include <linux/stringify.h>
19 #include <linux/bsearch.h>
20 #include <linux/sort.h>
21 #include <linux/perf_event.h>
22 #include <linux/ctype.h>
23 #include <linux/error-injection.h>
24 #include <linux/bpf_lsm.h>
25 #include <linux/btf_ids.h>
26 
27 #include "disasm.h"
28 
29 static const struct bpf_verifier_ops * const bpf_verifier_ops[] = {
30 #define BPF_PROG_TYPE(_id, _name, prog_ctx_type, kern_ctx_type) \
31 	[_id] = & _name ## _verifier_ops,
32 #define BPF_MAP_TYPE(_id, _ops)
33 #define BPF_LINK_TYPE(_id, _name)
34 #include <linux/bpf_types.h>
35 #undef BPF_PROG_TYPE
36 #undef BPF_MAP_TYPE
37 #undef BPF_LINK_TYPE
38 };
39 
40 /* bpf_check() is a static code analyzer that walks eBPF program
41  * instruction by instruction and updates register/stack state.
42  * All paths of conditional branches are analyzed until 'bpf_exit' insn.
43  *
44  * The first pass is depth-first-search to check that the program is a DAG.
45  * It rejects the following programs:
46  * - larger than BPF_MAXINSNS insns
47  * - if loop is present (detected via back-edge)
48  * - unreachable insns exist (shouldn't be a forest. program = one function)
49  * - out of bounds or malformed jumps
50  * The second pass is all possible path descent from the 1st insn.
51  * Since it's analyzing all paths through the program, the length of the
52  * analysis is limited to 64k insn, which may be hit even if total number of
53  * insn is less then 4K, but there are too many branches that change stack/regs.
54  * Number of 'branches to be analyzed' is limited to 1k
55  *
56  * On entry to each instruction, each register has a type, and the instruction
57  * changes the types of the registers depending on instruction semantics.
58  * If instruction is BPF_MOV64_REG(BPF_REG_1, BPF_REG_5), then type of R5 is
59  * copied to R1.
60  *
61  * All registers are 64-bit.
62  * R0 - return register
63  * R1-R5 argument passing registers
64  * R6-R9 callee saved registers
65  * R10 - frame pointer read-only
66  *
67  * At the start of BPF program the register R1 contains a pointer to bpf_context
68  * and has type PTR_TO_CTX.
69  *
70  * Verifier tracks arithmetic operations on pointers in case:
71  *    BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
72  *    BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -20),
73  * 1st insn copies R10 (which has FRAME_PTR) type into R1
74  * and 2nd arithmetic instruction is pattern matched to recognize
75  * that it wants to construct a pointer to some element within stack.
76  * So after 2nd insn, the register R1 has type PTR_TO_STACK
77  * (and -20 constant is saved for further stack bounds checking).
78  * Meaning that this reg is a pointer to stack plus known immediate constant.
79  *
80  * Most of the time the registers have SCALAR_VALUE type, which
81  * means the register has some value, but it's not a valid pointer.
82  * (like pointer plus pointer becomes SCALAR_VALUE type)
83  *
84  * When verifier sees load or store instructions the type of base register
85  * can be: PTR_TO_MAP_VALUE, PTR_TO_CTX, PTR_TO_STACK, PTR_TO_SOCKET. These are
86  * four pointer types recognized by check_mem_access() function.
87  *
88  * PTR_TO_MAP_VALUE means that this register is pointing to 'map element value'
89  * and the range of [ptr, ptr + map's value_size) is accessible.
90  *
91  * registers used to pass values to function calls are checked against
92  * function argument constraints.
93  *
94  * ARG_PTR_TO_MAP_KEY is one of such argument constraints.
95  * It means that the register type passed to this function must be
96  * PTR_TO_STACK and it will be used inside the function as
97  * 'pointer to map element key'
98  *
99  * For example the argument constraints for bpf_map_lookup_elem():
100  *   .ret_type = RET_PTR_TO_MAP_VALUE_OR_NULL,
101  *   .arg1_type = ARG_CONST_MAP_PTR,
102  *   .arg2_type = ARG_PTR_TO_MAP_KEY,
103  *
104  * ret_type says that this function returns 'pointer to map elem value or null'
105  * function expects 1st argument to be a const pointer to 'struct bpf_map' and
106  * 2nd argument should be a pointer to stack, which will be used inside
107  * the helper function as a pointer to map element key.
108  *
109  * On the kernel side the helper function looks like:
110  * u64 bpf_map_lookup_elem(u64 r1, u64 r2, u64 r3, u64 r4, u64 r5)
111  * {
112  *    struct bpf_map *map = (struct bpf_map *) (unsigned long) r1;
113  *    void *key = (void *) (unsigned long) r2;
114  *    void *value;
115  *
116  *    here kernel can access 'key' and 'map' pointers safely, knowing that
117  *    [key, key + map->key_size) bytes are valid and were initialized on
118  *    the stack of eBPF program.
119  * }
120  *
121  * Corresponding eBPF program may look like:
122  *    BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),  // after this insn R2 type is FRAME_PTR
123  *    BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -4), // after this insn R2 type is PTR_TO_STACK
124  *    BPF_LD_MAP_FD(BPF_REG_1, map_fd),      // after this insn R1 type is CONST_PTR_TO_MAP
125  *    BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_map_lookup_elem),
126  * here verifier looks at prototype of map_lookup_elem() and sees:
127  * .arg1_type == ARG_CONST_MAP_PTR and R1->type == CONST_PTR_TO_MAP, which is ok,
128  * Now verifier knows that this map has key of R1->map_ptr->key_size bytes
129  *
130  * Then .arg2_type == ARG_PTR_TO_MAP_KEY and R2->type == PTR_TO_STACK, ok so far,
131  * Now verifier checks that [R2, R2 + map's key_size) are within stack limits
132  * and were initialized prior to this call.
133  * If it's ok, then verifier allows this BPF_CALL insn and looks at
134  * .ret_type which is RET_PTR_TO_MAP_VALUE_OR_NULL, so it sets
135  * R0->type = PTR_TO_MAP_VALUE_OR_NULL which means bpf_map_lookup_elem() function
136  * returns either pointer to map value or NULL.
137  *
138  * When type PTR_TO_MAP_VALUE_OR_NULL passes through 'if (reg != 0) goto +off'
139  * insn, the register holding that pointer in the true branch changes state to
140  * PTR_TO_MAP_VALUE and the same register changes state to CONST_IMM in the false
141  * branch. See check_cond_jmp_op().
142  *
143  * After the call R0 is set to return type of the function and registers R1-R5
144  * are set to NOT_INIT to indicate that they are no longer readable.
145  *
146  * The following reference types represent a potential reference to a kernel
147  * resource which, after first being allocated, must be checked and freed by
148  * the BPF program:
149  * - PTR_TO_SOCKET_OR_NULL, PTR_TO_SOCKET
150  *
151  * When the verifier sees a helper call return a reference type, it allocates a
152  * pointer id for the reference and stores it in the current function state.
153  * Similar to the way that PTR_TO_MAP_VALUE_OR_NULL is converted into
154  * PTR_TO_MAP_VALUE, PTR_TO_SOCKET_OR_NULL becomes PTR_TO_SOCKET when the type
155  * passes through a NULL-check conditional. For the branch wherein the state is
156  * changed to CONST_IMM, the verifier releases the reference.
157  *
158  * For each helper function that allocates a reference, such as
159  * bpf_sk_lookup_tcp(), there is a corresponding release function, such as
160  * bpf_sk_release(). When a reference type passes into the release function,
161  * the verifier also releases the reference. If any unchecked or unreleased
162  * reference remains at the end of the program, the verifier rejects it.
163  */
164 
165 /* verifier_state + insn_idx are pushed to stack when branch is encountered */
166 struct bpf_verifier_stack_elem {
167 	/* verifer state is 'st'
168 	 * before processing instruction 'insn_idx'
169 	 * and after processing instruction 'prev_insn_idx'
170 	 */
171 	struct bpf_verifier_state st;
172 	int insn_idx;
173 	int prev_insn_idx;
174 	struct bpf_verifier_stack_elem *next;
175 	/* length of verifier log at the time this state was pushed on stack */
176 	u32 log_pos;
177 };
178 
179 #define BPF_COMPLEXITY_LIMIT_JMP_SEQ	8192
180 #define BPF_COMPLEXITY_LIMIT_STATES	64
181 
182 #define BPF_MAP_KEY_POISON	(1ULL << 63)
183 #define BPF_MAP_KEY_SEEN	(1ULL << 62)
184 
185 #define BPF_MAP_PTR_UNPRIV	1UL
186 #define BPF_MAP_PTR_POISON	((void *)((0xeB9FUL << 1) +	\
187 					  POISON_POINTER_DELTA))
188 #define BPF_MAP_PTR(X)		((struct bpf_map *)((X) & ~BPF_MAP_PTR_UNPRIV))
189 
190 static bool bpf_map_ptr_poisoned(const struct bpf_insn_aux_data *aux)
191 {
192 	return BPF_MAP_PTR(aux->map_ptr_state) == BPF_MAP_PTR_POISON;
193 }
194 
195 static bool bpf_map_ptr_unpriv(const struct bpf_insn_aux_data *aux)
196 {
197 	return aux->map_ptr_state & BPF_MAP_PTR_UNPRIV;
198 }
199 
200 static void bpf_map_ptr_store(struct bpf_insn_aux_data *aux,
201 			      const struct bpf_map *map, bool unpriv)
202 {
203 	BUILD_BUG_ON((unsigned long)BPF_MAP_PTR_POISON & BPF_MAP_PTR_UNPRIV);
204 	unpriv |= bpf_map_ptr_unpriv(aux);
205 	aux->map_ptr_state = (unsigned long)map |
206 			     (unpriv ? BPF_MAP_PTR_UNPRIV : 0UL);
207 }
208 
209 static bool bpf_map_key_poisoned(const struct bpf_insn_aux_data *aux)
210 {
211 	return aux->map_key_state & BPF_MAP_KEY_POISON;
212 }
213 
214 static bool bpf_map_key_unseen(const struct bpf_insn_aux_data *aux)
215 {
216 	return !(aux->map_key_state & BPF_MAP_KEY_SEEN);
217 }
218 
219 static u64 bpf_map_key_immediate(const struct bpf_insn_aux_data *aux)
220 {
221 	return aux->map_key_state & ~(BPF_MAP_KEY_SEEN | BPF_MAP_KEY_POISON);
222 }
223 
224 static void bpf_map_key_store(struct bpf_insn_aux_data *aux, u64 state)
225 {
226 	bool poisoned = bpf_map_key_poisoned(aux);
227 
228 	aux->map_key_state = state | BPF_MAP_KEY_SEEN |
229 			     (poisoned ? BPF_MAP_KEY_POISON : 0ULL);
230 }
231 
232 static bool bpf_pseudo_call(const struct bpf_insn *insn)
233 {
234 	return insn->code == (BPF_JMP | BPF_CALL) &&
235 	       insn->src_reg == BPF_PSEUDO_CALL;
236 }
237 
238 static bool bpf_pseudo_kfunc_call(const struct bpf_insn *insn)
239 {
240 	return insn->code == (BPF_JMP | BPF_CALL) &&
241 	       insn->src_reg == BPF_PSEUDO_KFUNC_CALL;
242 }
243 
244 struct bpf_call_arg_meta {
245 	struct bpf_map *map_ptr;
246 	bool raw_mode;
247 	bool pkt_access;
248 	int regno;
249 	int access_size;
250 	int mem_size;
251 	u64 msize_max_value;
252 	int ref_obj_id;
253 	int map_uid;
254 	int func_id;
255 	struct btf *btf;
256 	u32 btf_id;
257 	struct btf *ret_btf;
258 	u32 ret_btf_id;
259 	u32 subprogno;
260 };
261 
262 struct btf *btf_vmlinux;
263 
264 static DEFINE_MUTEX(bpf_verifier_lock);
265 
266 static const struct bpf_line_info *
267 find_linfo(const struct bpf_verifier_env *env, u32 insn_off)
268 {
269 	const struct bpf_line_info *linfo;
270 	const struct bpf_prog *prog;
271 	u32 i, nr_linfo;
272 
273 	prog = env->prog;
274 	nr_linfo = prog->aux->nr_linfo;
275 
276 	if (!nr_linfo || insn_off >= prog->len)
277 		return NULL;
278 
279 	linfo = prog->aux->linfo;
280 	for (i = 1; i < nr_linfo; i++)
281 		if (insn_off < linfo[i].insn_off)
282 			break;
283 
284 	return &linfo[i - 1];
285 }
286 
287 void bpf_verifier_vlog(struct bpf_verifier_log *log, const char *fmt,
288 		       va_list args)
289 {
290 	unsigned int n;
291 
292 	n = vscnprintf(log->kbuf, BPF_VERIFIER_TMP_LOG_SIZE, fmt, args);
293 
294 	WARN_ONCE(n >= BPF_VERIFIER_TMP_LOG_SIZE - 1,
295 		  "verifier log line truncated - local buffer too short\n");
296 
297 	if (log->level == BPF_LOG_KERNEL) {
298 		bool newline = n > 0 && log->kbuf[n - 1] == '\n';
299 
300 		pr_err("BPF: %s%s", log->kbuf, newline ? "" : "\n");
301 		return;
302 	}
303 
304 	n = min(log->len_total - log->len_used - 1, n);
305 	log->kbuf[n] = '\0';
306 	if (!copy_to_user(log->ubuf + log->len_used, log->kbuf, n + 1))
307 		log->len_used += n;
308 	else
309 		log->ubuf = NULL;
310 }
311 
312 static void bpf_vlog_reset(struct bpf_verifier_log *log, u32 new_pos)
313 {
314 	char zero = 0;
315 
316 	if (!bpf_verifier_log_needed(log))
317 		return;
318 
319 	log->len_used = new_pos;
320 	if (put_user(zero, log->ubuf + new_pos))
321 		log->ubuf = NULL;
322 }
323 
324 /* log_level controls verbosity level of eBPF verifier.
325  * bpf_verifier_log_write() is used to dump the verification trace to the log,
326  * so the user can figure out what's wrong with the program
327  */
328 __printf(2, 3) void bpf_verifier_log_write(struct bpf_verifier_env *env,
329 					   const char *fmt, ...)
330 {
331 	va_list args;
332 
333 	if (!bpf_verifier_log_needed(&env->log))
334 		return;
335 
336 	va_start(args, fmt);
337 	bpf_verifier_vlog(&env->log, fmt, args);
338 	va_end(args);
339 }
340 EXPORT_SYMBOL_GPL(bpf_verifier_log_write);
341 
342 __printf(2, 3) static void verbose(void *private_data, const char *fmt, ...)
343 {
344 	struct bpf_verifier_env *env = private_data;
345 	va_list args;
346 
347 	if (!bpf_verifier_log_needed(&env->log))
348 		return;
349 
350 	va_start(args, fmt);
351 	bpf_verifier_vlog(&env->log, fmt, args);
352 	va_end(args);
353 }
354 
355 __printf(2, 3) void bpf_log(struct bpf_verifier_log *log,
356 			    const char *fmt, ...)
357 {
358 	va_list args;
359 
360 	if (!bpf_verifier_log_needed(log))
361 		return;
362 
363 	va_start(args, fmt);
364 	bpf_verifier_vlog(log, fmt, args);
365 	va_end(args);
366 }
367 
368 static const char *ltrim(const char *s)
369 {
370 	while (isspace(*s))
371 		s++;
372 
373 	return s;
374 }
375 
376 __printf(3, 4) static void verbose_linfo(struct bpf_verifier_env *env,
377 					 u32 insn_off,
378 					 const char *prefix_fmt, ...)
379 {
380 	const struct bpf_line_info *linfo;
381 
382 	if (!bpf_verifier_log_needed(&env->log))
383 		return;
384 
385 	linfo = find_linfo(env, insn_off);
386 	if (!linfo || linfo == env->prev_linfo)
387 		return;
388 
389 	if (prefix_fmt) {
390 		va_list args;
391 
392 		va_start(args, prefix_fmt);
393 		bpf_verifier_vlog(&env->log, prefix_fmt, args);
394 		va_end(args);
395 	}
396 
397 	verbose(env, "%s\n",
398 		ltrim(btf_name_by_offset(env->prog->aux->btf,
399 					 linfo->line_off)));
400 
401 	env->prev_linfo = linfo;
402 }
403 
404 static void verbose_invalid_scalar(struct bpf_verifier_env *env,
405 				   struct bpf_reg_state *reg,
406 				   struct tnum *range, const char *ctx,
407 				   const char *reg_name)
408 {
409 	char tn_buf[48];
410 
411 	verbose(env, "At %s the register %s ", ctx, reg_name);
412 	if (!tnum_is_unknown(reg->var_off)) {
413 		tnum_strn(tn_buf, sizeof(tn_buf), reg->var_off);
414 		verbose(env, "has value %s", tn_buf);
415 	} else {
416 		verbose(env, "has unknown scalar value");
417 	}
418 	tnum_strn(tn_buf, sizeof(tn_buf), *range);
419 	verbose(env, " should have been in %s\n", tn_buf);
420 }
421 
422 static bool type_is_pkt_pointer(enum bpf_reg_type type)
423 {
424 	return type == PTR_TO_PACKET ||
425 	       type == PTR_TO_PACKET_META;
426 }
427 
428 static bool type_is_sk_pointer(enum bpf_reg_type type)
429 {
430 	return type == PTR_TO_SOCKET ||
431 		type == PTR_TO_SOCK_COMMON ||
432 		type == PTR_TO_TCP_SOCK ||
433 		type == PTR_TO_XDP_SOCK;
434 }
435 
436 static bool reg_type_not_null(enum bpf_reg_type type)
437 {
438 	return type == PTR_TO_SOCKET ||
439 		type == PTR_TO_TCP_SOCK ||
440 		type == PTR_TO_MAP_VALUE ||
441 		type == PTR_TO_MAP_KEY ||
442 		type == PTR_TO_SOCK_COMMON;
443 }
444 
445 static bool reg_may_point_to_spin_lock(const struct bpf_reg_state *reg)
446 {
447 	return reg->type == PTR_TO_MAP_VALUE &&
448 		map_value_has_spin_lock(reg->map_ptr);
449 }
450 
451 static bool reg_type_may_be_refcounted_or_null(enum bpf_reg_type type)
452 {
453 	return base_type(type) == PTR_TO_SOCKET ||
454 		base_type(type) == PTR_TO_TCP_SOCK ||
455 		base_type(type) == PTR_TO_MEM ||
456 		base_type(type) == PTR_TO_BTF_ID;
457 }
458 
459 static bool type_is_rdonly_mem(u32 type)
460 {
461 	return type & MEM_RDONLY;
462 }
463 
464 static bool arg_type_may_be_refcounted(enum bpf_arg_type type)
465 {
466 	return type == ARG_PTR_TO_SOCK_COMMON;
467 }
468 
469 static bool type_may_be_null(u32 type)
470 {
471 	return type & PTR_MAYBE_NULL;
472 }
473 
474 /* Determine whether the function releases some resources allocated by another
475  * function call. The first reference type argument will be assumed to be
476  * released by release_reference().
477  */
478 static bool is_release_function(enum bpf_func_id func_id)
479 {
480 	return func_id == BPF_FUNC_sk_release ||
481 	       func_id == BPF_FUNC_ringbuf_submit ||
482 	       func_id == BPF_FUNC_ringbuf_discard;
483 }
484 
485 static bool may_be_acquire_function(enum bpf_func_id func_id)
486 {
487 	return func_id == BPF_FUNC_sk_lookup_tcp ||
488 		func_id == BPF_FUNC_sk_lookup_udp ||
489 		func_id == BPF_FUNC_skc_lookup_tcp ||
490 		func_id == BPF_FUNC_map_lookup_elem ||
491 	        func_id == BPF_FUNC_ringbuf_reserve;
492 }
493 
494 static bool is_acquire_function(enum bpf_func_id func_id,
495 				const struct bpf_map *map)
496 {
497 	enum bpf_map_type map_type = map ? map->map_type : BPF_MAP_TYPE_UNSPEC;
498 
499 	if (func_id == BPF_FUNC_sk_lookup_tcp ||
500 	    func_id == BPF_FUNC_sk_lookup_udp ||
501 	    func_id == BPF_FUNC_skc_lookup_tcp ||
502 	    func_id == BPF_FUNC_ringbuf_reserve)
503 		return true;
504 
505 	if (func_id == BPF_FUNC_map_lookup_elem &&
506 	    (map_type == BPF_MAP_TYPE_SOCKMAP ||
507 	     map_type == BPF_MAP_TYPE_SOCKHASH))
508 		return true;
509 
510 	return false;
511 }
512 
513 static bool is_ptr_cast_function(enum bpf_func_id func_id)
514 {
515 	return func_id == BPF_FUNC_tcp_sock ||
516 		func_id == BPF_FUNC_sk_fullsock ||
517 		func_id == BPF_FUNC_skc_to_tcp_sock ||
518 		func_id == BPF_FUNC_skc_to_tcp6_sock ||
519 		func_id == BPF_FUNC_skc_to_udp6_sock ||
520 		func_id == BPF_FUNC_skc_to_tcp_timewait_sock ||
521 		func_id == BPF_FUNC_skc_to_tcp_request_sock;
522 }
523 
524 static bool is_cmpxchg_insn(const struct bpf_insn *insn)
525 {
526 	return BPF_CLASS(insn->code) == BPF_STX &&
527 	       BPF_MODE(insn->code) == BPF_ATOMIC &&
528 	       insn->imm == BPF_CMPXCHG;
529 }
530 
531 /* string representation of 'enum bpf_reg_type'
532  *
533  * Note that reg_type_str() can not appear more than once in a single verbose()
534  * statement.
535  */
536 static const char *reg_type_str(struct bpf_verifier_env *env,
537 				enum bpf_reg_type type)
538 {
539 	char postfix[16] = {0}, prefix[32] = {0};
540 	static const char * const str[] = {
541 		[NOT_INIT]		= "?",
542 		[SCALAR_VALUE]		= "scalar",
543 		[PTR_TO_CTX]		= "ctx",
544 		[CONST_PTR_TO_MAP]	= "map_ptr",
545 		[PTR_TO_MAP_VALUE]	= "map_value",
546 		[PTR_TO_STACK]		= "fp",
547 		[PTR_TO_PACKET]		= "pkt",
548 		[PTR_TO_PACKET_META]	= "pkt_meta",
549 		[PTR_TO_PACKET_END]	= "pkt_end",
550 		[PTR_TO_FLOW_KEYS]	= "flow_keys",
551 		[PTR_TO_SOCKET]		= "sock",
552 		[PTR_TO_SOCK_COMMON]	= "sock_common",
553 		[PTR_TO_TCP_SOCK]	= "tcp_sock",
554 		[PTR_TO_TP_BUFFER]	= "tp_buffer",
555 		[PTR_TO_XDP_SOCK]	= "xdp_sock",
556 		[PTR_TO_BTF_ID]		= "ptr_",
557 		[PTR_TO_MEM]		= "mem",
558 		[PTR_TO_BUF]		= "buf",
559 		[PTR_TO_FUNC]		= "func",
560 		[PTR_TO_MAP_KEY]	= "map_key",
561 	};
562 
563 	if (type & PTR_MAYBE_NULL) {
564 		if (base_type(type) == PTR_TO_BTF_ID)
565 			strncpy(postfix, "or_null_", 16);
566 		else
567 			strncpy(postfix, "_or_null", 16);
568 	}
569 
570 	if (type & MEM_RDONLY)
571 		strncpy(prefix, "rdonly_", 32);
572 	if (type & MEM_ALLOC)
573 		strncpy(prefix, "alloc_", 32);
574 	if (type & MEM_USER)
575 		strncpy(prefix, "user_", 32);
576 	if (type & MEM_PERCPU)
577 		strncpy(prefix, "percpu_", 32);
578 
579 	snprintf(env->type_str_buf, TYPE_STR_BUF_LEN, "%s%s%s",
580 		 prefix, str[base_type(type)], postfix);
581 	return env->type_str_buf;
582 }
583 
584 static char slot_type_char[] = {
585 	[STACK_INVALID]	= '?',
586 	[STACK_SPILL]	= 'r',
587 	[STACK_MISC]	= 'm',
588 	[STACK_ZERO]	= '0',
589 };
590 
591 static void print_liveness(struct bpf_verifier_env *env,
592 			   enum bpf_reg_liveness live)
593 {
594 	if (live & (REG_LIVE_READ | REG_LIVE_WRITTEN | REG_LIVE_DONE))
595 	    verbose(env, "_");
596 	if (live & REG_LIVE_READ)
597 		verbose(env, "r");
598 	if (live & REG_LIVE_WRITTEN)
599 		verbose(env, "w");
600 	if (live & REG_LIVE_DONE)
601 		verbose(env, "D");
602 }
603 
604 static struct bpf_func_state *func(struct bpf_verifier_env *env,
605 				   const struct bpf_reg_state *reg)
606 {
607 	struct bpf_verifier_state *cur = env->cur_state;
608 
609 	return cur->frame[reg->frameno];
610 }
611 
612 static const char *kernel_type_name(const struct btf* btf, u32 id)
613 {
614 	return btf_name_by_offset(btf, btf_type_by_id(btf, id)->name_off);
615 }
616 
617 static void mark_reg_scratched(struct bpf_verifier_env *env, u32 regno)
618 {
619 	env->scratched_regs |= 1U << regno;
620 }
621 
622 static void mark_stack_slot_scratched(struct bpf_verifier_env *env, u32 spi)
623 {
624 	env->scratched_stack_slots |= 1ULL << spi;
625 }
626 
627 static bool reg_scratched(const struct bpf_verifier_env *env, u32 regno)
628 {
629 	return (env->scratched_regs >> regno) & 1;
630 }
631 
632 static bool stack_slot_scratched(const struct bpf_verifier_env *env, u64 regno)
633 {
634 	return (env->scratched_stack_slots >> regno) & 1;
635 }
636 
637 static bool verifier_state_scratched(const struct bpf_verifier_env *env)
638 {
639 	return env->scratched_regs || env->scratched_stack_slots;
640 }
641 
642 static void mark_verifier_state_clean(struct bpf_verifier_env *env)
643 {
644 	env->scratched_regs = 0U;
645 	env->scratched_stack_slots = 0ULL;
646 }
647 
648 /* Used for printing the entire verifier state. */
649 static void mark_verifier_state_scratched(struct bpf_verifier_env *env)
650 {
651 	env->scratched_regs = ~0U;
652 	env->scratched_stack_slots = ~0ULL;
653 }
654 
655 /* The reg state of a pointer or a bounded scalar was saved when
656  * it was spilled to the stack.
657  */
658 static bool is_spilled_reg(const struct bpf_stack_state *stack)
659 {
660 	return stack->slot_type[BPF_REG_SIZE - 1] == STACK_SPILL;
661 }
662 
663 static void scrub_spilled_slot(u8 *stype)
664 {
665 	if (*stype != STACK_INVALID)
666 		*stype = STACK_MISC;
667 }
668 
669 static void print_verifier_state(struct bpf_verifier_env *env,
670 				 const struct bpf_func_state *state,
671 				 bool print_all)
672 {
673 	const struct bpf_reg_state *reg;
674 	enum bpf_reg_type t;
675 	int i;
676 
677 	if (state->frameno)
678 		verbose(env, " frame%d:", state->frameno);
679 	for (i = 0; i < MAX_BPF_REG; i++) {
680 		reg = &state->regs[i];
681 		t = reg->type;
682 		if (t == NOT_INIT)
683 			continue;
684 		if (!print_all && !reg_scratched(env, i))
685 			continue;
686 		verbose(env, " R%d", i);
687 		print_liveness(env, reg->live);
688 		verbose(env, "=");
689 		if (t == SCALAR_VALUE && reg->precise)
690 			verbose(env, "P");
691 		if ((t == SCALAR_VALUE || t == PTR_TO_STACK) &&
692 		    tnum_is_const(reg->var_off)) {
693 			/* reg->off should be 0 for SCALAR_VALUE */
694 			verbose(env, "%s", t == SCALAR_VALUE ? "" : reg_type_str(env, t));
695 			verbose(env, "%lld", reg->var_off.value + reg->off);
696 		} else {
697 			const char *sep = "";
698 
699 			verbose(env, "%s", reg_type_str(env, t));
700 			if (base_type(t) == PTR_TO_BTF_ID)
701 				verbose(env, "%s", kernel_type_name(reg->btf, reg->btf_id));
702 			verbose(env, "(");
703 /*
704  * _a stands for append, was shortened to avoid multiline statements below.
705  * This macro is used to output a comma separated list of attributes.
706  */
707 #define verbose_a(fmt, ...) ({ verbose(env, "%s" fmt, sep, __VA_ARGS__); sep = ","; })
708 
709 			if (reg->id)
710 				verbose_a("id=%d", reg->id);
711 			if (reg_type_may_be_refcounted_or_null(t) && reg->ref_obj_id)
712 				verbose_a("ref_obj_id=%d", reg->ref_obj_id);
713 			if (t != SCALAR_VALUE)
714 				verbose_a("off=%d", reg->off);
715 			if (type_is_pkt_pointer(t))
716 				verbose_a("r=%d", reg->range);
717 			else if (base_type(t) == CONST_PTR_TO_MAP ||
718 				 base_type(t) == PTR_TO_MAP_KEY ||
719 				 base_type(t) == PTR_TO_MAP_VALUE)
720 				verbose_a("ks=%d,vs=%d",
721 					  reg->map_ptr->key_size,
722 					  reg->map_ptr->value_size);
723 			if (tnum_is_const(reg->var_off)) {
724 				/* Typically an immediate SCALAR_VALUE, but
725 				 * could be a pointer whose offset is too big
726 				 * for reg->off
727 				 */
728 				verbose_a("imm=%llx", reg->var_off.value);
729 			} else {
730 				if (reg->smin_value != reg->umin_value &&
731 				    reg->smin_value != S64_MIN)
732 					verbose_a("smin=%lld", (long long)reg->smin_value);
733 				if (reg->smax_value != reg->umax_value &&
734 				    reg->smax_value != S64_MAX)
735 					verbose_a("smax=%lld", (long long)reg->smax_value);
736 				if (reg->umin_value != 0)
737 					verbose_a("umin=%llu", (unsigned long long)reg->umin_value);
738 				if (reg->umax_value != U64_MAX)
739 					verbose_a("umax=%llu", (unsigned long long)reg->umax_value);
740 				if (!tnum_is_unknown(reg->var_off)) {
741 					char tn_buf[48];
742 
743 					tnum_strn(tn_buf, sizeof(tn_buf), reg->var_off);
744 					verbose_a("var_off=%s", tn_buf);
745 				}
746 				if (reg->s32_min_value != reg->smin_value &&
747 				    reg->s32_min_value != S32_MIN)
748 					verbose_a("s32_min=%d", (int)(reg->s32_min_value));
749 				if (reg->s32_max_value != reg->smax_value &&
750 				    reg->s32_max_value != S32_MAX)
751 					verbose_a("s32_max=%d", (int)(reg->s32_max_value));
752 				if (reg->u32_min_value != reg->umin_value &&
753 				    reg->u32_min_value != U32_MIN)
754 					verbose_a("u32_min=%d", (int)(reg->u32_min_value));
755 				if (reg->u32_max_value != reg->umax_value &&
756 				    reg->u32_max_value != U32_MAX)
757 					verbose_a("u32_max=%d", (int)(reg->u32_max_value));
758 			}
759 #undef verbose_a
760 
761 			verbose(env, ")");
762 		}
763 	}
764 	for (i = 0; i < state->allocated_stack / BPF_REG_SIZE; i++) {
765 		char types_buf[BPF_REG_SIZE + 1];
766 		bool valid = false;
767 		int j;
768 
769 		for (j = 0; j < BPF_REG_SIZE; j++) {
770 			if (state->stack[i].slot_type[j] != STACK_INVALID)
771 				valid = true;
772 			types_buf[j] = slot_type_char[
773 					state->stack[i].slot_type[j]];
774 		}
775 		types_buf[BPF_REG_SIZE] = 0;
776 		if (!valid)
777 			continue;
778 		if (!print_all && !stack_slot_scratched(env, i))
779 			continue;
780 		verbose(env, " fp%d", (-i - 1) * BPF_REG_SIZE);
781 		print_liveness(env, state->stack[i].spilled_ptr.live);
782 		if (is_spilled_reg(&state->stack[i])) {
783 			reg = &state->stack[i].spilled_ptr;
784 			t = reg->type;
785 			verbose(env, "=%s", t == SCALAR_VALUE ? "" : reg_type_str(env, t));
786 			if (t == SCALAR_VALUE && reg->precise)
787 				verbose(env, "P");
788 			if (t == SCALAR_VALUE && tnum_is_const(reg->var_off))
789 				verbose(env, "%lld", reg->var_off.value + reg->off);
790 		} else {
791 			verbose(env, "=%s", types_buf);
792 		}
793 	}
794 	if (state->acquired_refs && state->refs[0].id) {
795 		verbose(env, " refs=%d", state->refs[0].id);
796 		for (i = 1; i < state->acquired_refs; i++)
797 			if (state->refs[i].id)
798 				verbose(env, ",%d", state->refs[i].id);
799 	}
800 	if (state->in_callback_fn)
801 		verbose(env, " cb");
802 	if (state->in_async_callback_fn)
803 		verbose(env, " async_cb");
804 	verbose(env, "\n");
805 	mark_verifier_state_clean(env);
806 }
807 
808 static inline u32 vlog_alignment(u32 pos)
809 {
810 	return round_up(max(pos + BPF_LOG_MIN_ALIGNMENT / 2, BPF_LOG_ALIGNMENT),
811 			BPF_LOG_MIN_ALIGNMENT) - pos - 1;
812 }
813 
814 static void print_insn_state(struct bpf_verifier_env *env,
815 			     const struct bpf_func_state *state)
816 {
817 	if (env->prev_log_len && env->prev_log_len == env->log.len_used) {
818 		/* remove new line character */
819 		bpf_vlog_reset(&env->log, env->prev_log_len - 1);
820 		verbose(env, "%*c;", vlog_alignment(env->prev_insn_print_len), ' ');
821 	} else {
822 		verbose(env, "%d:", env->insn_idx);
823 	}
824 	print_verifier_state(env, state, false);
825 }
826 
827 /* copy array src of length n * size bytes to dst. dst is reallocated if it's too
828  * small to hold src. This is different from krealloc since we don't want to preserve
829  * the contents of dst.
830  *
831  * Leaves dst untouched if src is NULL or length is zero. Returns NULL if memory could
832  * not be allocated.
833  */
834 static void *copy_array(void *dst, const void *src, size_t n, size_t size, gfp_t flags)
835 {
836 	size_t bytes;
837 
838 	if (ZERO_OR_NULL_PTR(src))
839 		goto out;
840 
841 	if (unlikely(check_mul_overflow(n, size, &bytes)))
842 		return NULL;
843 
844 	if (ksize(dst) < bytes) {
845 		kfree(dst);
846 		dst = kmalloc_track_caller(bytes, flags);
847 		if (!dst)
848 			return NULL;
849 	}
850 
851 	memcpy(dst, src, bytes);
852 out:
853 	return dst ? dst : ZERO_SIZE_PTR;
854 }
855 
856 /* resize an array from old_n items to new_n items. the array is reallocated if it's too
857  * small to hold new_n items. new items are zeroed out if the array grows.
858  *
859  * Contrary to krealloc_array, does not free arr if new_n is zero.
860  */
861 static void *realloc_array(void *arr, size_t old_n, size_t new_n, size_t size)
862 {
863 	if (!new_n || old_n == new_n)
864 		goto out;
865 
866 	arr = krealloc_array(arr, new_n, size, GFP_KERNEL);
867 	if (!arr)
868 		return NULL;
869 
870 	if (new_n > old_n)
871 		memset(arr + old_n * size, 0, (new_n - old_n) * size);
872 
873 out:
874 	return arr ? arr : ZERO_SIZE_PTR;
875 }
876 
877 static int copy_reference_state(struct bpf_func_state *dst, const struct bpf_func_state *src)
878 {
879 	dst->refs = copy_array(dst->refs, src->refs, src->acquired_refs,
880 			       sizeof(struct bpf_reference_state), GFP_KERNEL);
881 	if (!dst->refs)
882 		return -ENOMEM;
883 
884 	dst->acquired_refs = src->acquired_refs;
885 	return 0;
886 }
887 
888 static int copy_stack_state(struct bpf_func_state *dst, const struct bpf_func_state *src)
889 {
890 	size_t n = src->allocated_stack / BPF_REG_SIZE;
891 
892 	dst->stack = copy_array(dst->stack, src->stack, n, sizeof(struct bpf_stack_state),
893 				GFP_KERNEL);
894 	if (!dst->stack)
895 		return -ENOMEM;
896 
897 	dst->allocated_stack = src->allocated_stack;
898 	return 0;
899 }
900 
901 static int resize_reference_state(struct bpf_func_state *state, size_t n)
902 {
903 	state->refs = realloc_array(state->refs, state->acquired_refs, n,
904 				    sizeof(struct bpf_reference_state));
905 	if (!state->refs)
906 		return -ENOMEM;
907 
908 	state->acquired_refs = n;
909 	return 0;
910 }
911 
912 static int grow_stack_state(struct bpf_func_state *state, int size)
913 {
914 	size_t old_n = state->allocated_stack / BPF_REG_SIZE, n = size / BPF_REG_SIZE;
915 
916 	if (old_n >= n)
917 		return 0;
918 
919 	state->stack = realloc_array(state->stack, old_n, n, sizeof(struct bpf_stack_state));
920 	if (!state->stack)
921 		return -ENOMEM;
922 
923 	state->allocated_stack = size;
924 	return 0;
925 }
926 
927 /* Acquire a pointer id from the env and update the state->refs to include
928  * this new pointer reference.
929  * On success, returns a valid pointer id to associate with the register
930  * On failure, returns a negative errno.
931  */
932 static int acquire_reference_state(struct bpf_verifier_env *env, int insn_idx)
933 {
934 	struct bpf_func_state *state = cur_func(env);
935 	int new_ofs = state->acquired_refs;
936 	int id, err;
937 
938 	err = resize_reference_state(state, state->acquired_refs + 1);
939 	if (err)
940 		return err;
941 	id = ++env->id_gen;
942 	state->refs[new_ofs].id = id;
943 	state->refs[new_ofs].insn_idx = insn_idx;
944 
945 	return id;
946 }
947 
948 /* release function corresponding to acquire_reference_state(). Idempotent. */
949 static int release_reference_state(struct bpf_func_state *state, int ptr_id)
950 {
951 	int i, last_idx;
952 
953 	last_idx = state->acquired_refs - 1;
954 	for (i = 0; i < state->acquired_refs; i++) {
955 		if (state->refs[i].id == ptr_id) {
956 			if (last_idx && i != last_idx)
957 				memcpy(&state->refs[i], &state->refs[last_idx],
958 				       sizeof(*state->refs));
959 			memset(&state->refs[last_idx], 0, sizeof(*state->refs));
960 			state->acquired_refs--;
961 			return 0;
962 		}
963 	}
964 	return -EINVAL;
965 }
966 
967 static void free_func_state(struct bpf_func_state *state)
968 {
969 	if (!state)
970 		return;
971 	kfree(state->refs);
972 	kfree(state->stack);
973 	kfree(state);
974 }
975 
976 static void clear_jmp_history(struct bpf_verifier_state *state)
977 {
978 	kfree(state->jmp_history);
979 	state->jmp_history = NULL;
980 	state->jmp_history_cnt = 0;
981 }
982 
983 static void free_verifier_state(struct bpf_verifier_state *state,
984 				bool free_self)
985 {
986 	int i;
987 
988 	for (i = 0; i <= state->curframe; i++) {
989 		free_func_state(state->frame[i]);
990 		state->frame[i] = NULL;
991 	}
992 	clear_jmp_history(state);
993 	if (free_self)
994 		kfree(state);
995 }
996 
997 /* copy verifier state from src to dst growing dst stack space
998  * when necessary to accommodate larger src stack
999  */
1000 static int copy_func_state(struct bpf_func_state *dst,
1001 			   const struct bpf_func_state *src)
1002 {
1003 	int err;
1004 
1005 	memcpy(dst, src, offsetof(struct bpf_func_state, acquired_refs));
1006 	err = copy_reference_state(dst, src);
1007 	if (err)
1008 		return err;
1009 	return copy_stack_state(dst, src);
1010 }
1011 
1012 static int copy_verifier_state(struct bpf_verifier_state *dst_state,
1013 			       const struct bpf_verifier_state *src)
1014 {
1015 	struct bpf_func_state *dst;
1016 	int i, err;
1017 
1018 	dst_state->jmp_history = copy_array(dst_state->jmp_history, src->jmp_history,
1019 					    src->jmp_history_cnt, sizeof(struct bpf_idx_pair),
1020 					    GFP_USER);
1021 	if (!dst_state->jmp_history)
1022 		return -ENOMEM;
1023 	dst_state->jmp_history_cnt = src->jmp_history_cnt;
1024 
1025 	/* if dst has more stack frames then src frame, free them */
1026 	for (i = src->curframe + 1; i <= dst_state->curframe; i++) {
1027 		free_func_state(dst_state->frame[i]);
1028 		dst_state->frame[i] = NULL;
1029 	}
1030 	dst_state->speculative = src->speculative;
1031 	dst_state->curframe = src->curframe;
1032 	dst_state->active_spin_lock = src->active_spin_lock;
1033 	dst_state->branches = src->branches;
1034 	dst_state->parent = src->parent;
1035 	dst_state->first_insn_idx = src->first_insn_idx;
1036 	dst_state->last_insn_idx = src->last_insn_idx;
1037 	for (i = 0; i <= src->curframe; i++) {
1038 		dst = dst_state->frame[i];
1039 		if (!dst) {
1040 			dst = kzalloc(sizeof(*dst), GFP_KERNEL);
1041 			if (!dst)
1042 				return -ENOMEM;
1043 			dst_state->frame[i] = dst;
1044 		}
1045 		err = copy_func_state(dst, src->frame[i]);
1046 		if (err)
1047 			return err;
1048 	}
1049 	return 0;
1050 }
1051 
1052 static void update_branch_counts(struct bpf_verifier_env *env, struct bpf_verifier_state *st)
1053 {
1054 	while (st) {
1055 		u32 br = --st->branches;
1056 
1057 		/* WARN_ON(br > 1) technically makes sense here,
1058 		 * but see comment in push_stack(), hence:
1059 		 */
1060 		WARN_ONCE((int)br < 0,
1061 			  "BUG update_branch_counts:branches_to_explore=%d\n",
1062 			  br);
1063 		if (br)
1064 			break;
1065 		st = st->parent;
1066 	}
1067 }
1068 
1069 static int pop_stack(struct bpf_verifier_env *env, int *prev_insn_idx,
1070 		     int *insn_idx, bool pop_log)
1071 {
1072 	struct bpf_verifier_state *cur = env->cur_state;
1073 	struct bpf_verifier_stack_elem *elem, *head = env->head;
1074 	int err;
1075 
1076 	if (env->head == NULL)
1077 		return -ENOENT;
1078 
1079 	if (cur) {
1080 		err = copy_verifier_state(cur, &head->st);
1081 		if (err)
1082 			return err;
1083 	}
1084 	if (pop_log)
1085 		bpf_vlog_reset(&env->log, head->log_pos);
1086 	if (insn_idx)
1087 		*insn_idx = head->insn_idx;
1088 	if (prev_insn_idx)
1089 		*prev_insn_idx = head->prev_insn_idx;
1090 	elem = head->next;
1091 	free_verifier_state(&head->st, false);
1092 	kfree(head);
1093 	env->head = elem;
1094 	env->stack_size--;
1095 	return 0;
1096 }
1097 
1098 static struct bpf_verifier_state *push_stack(struct bpf_verifier_env *env,
1099 					     int insn_idx, int prev_insn_idx,
1100 					     bool speculative)
1101 {
1102 	struct bpf_verifier_state *cur = env->cur_state;
1103 	struct bpf_verifier_stack_elem *elem;
1104 	int err;
1105 
1106 	elem = kzalloc(sizeof(struct bpf_verifier_stack_elem), GFP_KERNEL);
1107 	if (!elem)
1108 		goto err;
1109 
1110 	elem->insn_idx = insn_idx;
1111 	elem->prev_insn_idx = prev_insn_idx;
1112 	elem->next = env->head;
1113 	elem->log_pos = env->log.len_used;
1114 	env->head = elem;
1115 	env->stack_size++;
1116 	err = copy_verifier_state(&elem->st, cur);
1117 	if (err)
1118 		goto err;
1119 	elem->st.speculative |= speculative;
1120 	if (env->stack_size > BPF_COMPLEXITY_LIMIT_JMP_SEQ) {
1121 		verbose(env, "The sequence of %d jumps is too complex.\n",
1122 			env->stack_size);
1123 		goto err;
1124 	}
1125 	if (elem->st.parent) {
1126 		++elem->st.parent->branches;
1127 		/* WARN_ON(branches > 2) technically makes sense here,
1128 		 * but
1129 		 * 1. speculative states will bump 'branches' for non-branch
1130 		 * instructions
1131 		 * 2. is_state_visited() heuristics may decide not to create
1132 		 * a new state for a sequence of branches and all such current
1133 		 * and cloned states will be pointing to a single parent state
1134 		 * which might have large 'branches' count.
1135 		 */
1136 	}
1137 	return &elem->st;
1138 err:
1139 	free_verifier_state(env->cur_state, true);
1140 	env->cur_state = NULL;
1141 	/* pop all elements and return */
1142 	while (!pop_stack(env, NULL, NULL, false));
1143 	return NULL;
1144 }
1145 
1146 #define CALLER_SAVED_REGS 6
1147 static const int caller_saved[CALLER_SAVED_REGS] = {
1148 	BPF_REG_0, BPF_REG_1, BPF_REG_2, BPF_REG_3, BPF_REG_4, BPF_REG_5
1149 };
1150 
1151 static void __mark_reg_not_init(const struct bpf_verifier_env *env,
1152 				struct bpf_reg_state *reg);
1153 
1154 /* This helper doesn't clear reg->id */
1155 static void ___mark_reg_known(struct bpf_reg_state *reg, u64 imm)
1156 {
1157 	reg->var_off = tnum_const(imm);
1158 	reg->smin_value = (s64)imm;
1159 	reg->smax_value = (s64)imm;
1160 	reg->umin_value = imm;
1161 	reg->umax_value = imm;
1162 
1163 	reg->s32_min_value = (s32)imm;
1164 	reg->s32_max_value = (s32)imm;
1165 	reg->u32_min_value = (u32)imm;
1166 	reg->u32_max_value = (u32)imm;
1167 }
1168 
1169 /* Mark the unknown part of a register (variable offset or scalar value) as
1170  * known to have the value @imm.
1171  */
1172 static void __mark_reg_known(struct bpf_reg_state *reg, u64 imm)
1173 {
1174 	/* Clear id, off, and union(map_ptr, range) */
1175 	memset(((u8 *)reg) + sizeof(reg->type), 0,
1176 	       offsetof(struct bpf_reg_state, var_off) - sizeof(reg->type));
1177 	___mark_reg_known(reg, imm);
1178 }
1179 
1180 static void __mark_reg32_known(struct bpf_reg_state *reg, u64 imm)
1181 {
1182 	reg->var_off = tnum_const_subreg(reg->var_off, imm);
1183 	reg->s32_min_value = (s32)imm;
1184 	reg->s32_max_value = (s32)imm;
1185 	reg->u32_min_value = (u32)imm;
1186 	reg->u32_max_value = (u32)imm;
1187 }
1188 
1189 /* Mark the 'variable offset' part of a register as zero.  This should be
1190  * used only on registers holding a pointer type.
1191  */
1192 static void __mark_reg_known_zero(struct bpf_reg_state *reg)
1193 {
1194 	__mark_reg_known(reg, 0);
1195 }
1196 
1197 static void __mark_reg_const_zero(struct bpf_reg_state *reg)
1198 {
1199 	__mark_reg_known(reg, 0);
1200 	reg->type = SCALAR_VALUE;
1201 }
1202 
1203 static void mark_reg_known_zero(struct bpf_verifier_env *env,
1204 				struct bpf_reg_state *regs, u32 regno)
1205 {
1206 	if (WARN_ON(regno >= MAX_BPF_REG)) {
1207 		verbose(env, "mark_reg_known_zero(regs, %u)\n", regno);
1208 		/* Something bad happened, let's kill all regs */
1209 		for (regno = 0; regno < MAX_BPF_REG; regno++)
1210 			__mark_reg_not_init(env, regs + regno);
1211 		return;
1212 	}
1213 	__mark_reg_known_zero(regs + regno);
1214 }
1215 
1216 static void mark_ptr_not_null_reg(struct bpf_reg_state *reg)
1217 {
1218 	if (base_type(reg->type) == PTR_TO_MAP_VALUE) {
1219 		const struct bpf_map *map = reg->map_ptr;
1220 
1221 		if (map->inner_map_meta) {
1222 			reg->type = CONST_PTR_TO_MAP;
1223 			reg->map_ptr = map->inner_map_meta;
1224 			/* transfer reg's id which is unique for every map_lookup_elem
1225 			 * as UID of the inner map.
1226 			 */
1227 			if (map_value_has_timer(map->inner_map_meta))
1228 				reg->map_uid = reg->id;
1229 		} else if (map->map_type == BPF_MAP_TYPE_XSKMAP) {
1230 			reg->type = PTR_TO_XDP_SOCK;
1231 		} else if (map->map_type == BPF_MAP_TYPE_SOCKMAP ||
1232 			   map->map_type == BPF_MAP_TYPE_SOCKHASH) {
1233 			reg->type = PTR_TO_SOCKET;
1234 		} else {
1235 			reg->type = PTR_TO_MAP_VALUE;
1236 		}
1237 		return;
1238 	}
1239 
1240 	reg->type &= ~PTR_MAYBE_NULL;
1241 }
1242 
1243 static bool reg_is_pkt_pointer(const struct bpf_reg_state *reg)
1244 {
1245 	return type_is_pkt_pointer(reg->type);
1246 }
1247 
1248 static bool reg_is_pkt_pointer_any(const struct bpf_reg_state *reg)
1249 {
1250 	return reg_is_pkt_pointer(reg) ||
1251 	       reg->type == PTR_TO_PACKET_END;
1252 }
1253 
1254 /* Unmodified PTR_TO_PACKET[_META,_END] register from ctx access. */
1255 static bool reg_is_init_pkt_pointer(const struct bpf_reg_state *reg,
1256 				    enum bpf_reg_type which)
1257 {
1258 	/* The register can already have a range from prior markings.
1259 	 * This is fine as long as it hasn't been advanced from its
1260 	 * origin.
1261 	 */
1262 	return reg->type == which &&
1263 	       reg->id == 0 &&
1264 	       reg->off == 0 &&
1265 	       tnum_equals_const(reg->var_off, 0);
1266 }
1267 
1268 /* Reset the min/max bounds of a register */
1269 static void __mark_reg_unbounded(struct bpf_reg_state *reg)
1270 {
1271 	reg->smin_value = S64_MIN;
1272 	reg->smax_value = S64_MAX;
1273 	reg->umin_value = 0;
1274 	reg->umax_value = U64_MAX;
1275 
1276 	reg->s32_min_value = S32_MIN;
1277 	reg->s32_max_value = S32_MAX;
1278 	reg->u32_min_value = 0;
1279 	reg->u32_max_value = U32_MAX;
1280 }
1281 
1282 static void __mark_reg64_unbounded(struct bpf_reg_state *reg)
1283 {
1284 	reg->smin_value = S64_MIN;
1285 	reg->smax_value = S64_MAX;
1286 	reg->umin_value = 0;
1287 	reg->umax_value = U64_MAX;
1288 }
1289 
1290 static void __mark_reg32_unbounded(struct bpf_reg_state *reg)
1291 {
1292 	reg->s32_min_value = S32_MIN;
1293 	reg->s32_max_value = S32_MAX;
1294 	reg->u32_min_value = 0;
1295 	reg->u32_max_value = U32_MAX;
1296 }
1297 
1298 static void __update_reg32_bounds(struct bpf_reg_state *reg)
1299 {
1300 	struct tnum var32_off = tnum_subreg(reg->var_off);
1301 
1302 	/* min signed is max(sign bit) | min(other bits) */
1303 	reg->s32_min_value = max_t(s32, reg->s32_min_value,
1304 			var32_off.value | (var32_off.mask & S32_MIN));
1305 	/* max signed is min(sign bit) | max(other bits) */
1306 	reg->s32_max_value = min_t(s32, reg->s32_max_value,
1307 			var32_off.value | (var32_off.mask & S32_MAX));
1308 	reg->u32_min_value = max_t(u32, reg->u32_min_value, (u32)var32_off.value);
1309 	reg->u32_max_value = min(reg->u32_max_value,
1310 				 (u32)(var32_off.value | var32_off.mask));
1311 }
1312 
1313 static void __update_reg64_bounds(struct bpf_reg_state *reg)
1314 {
1315 	/* min signed is max(sign bit) | min(other bits) */
1316 	reg->smin_value = max_t(s64, reg->smin_value,
1317 				reg->var_off.value | (reg->var_off.mask & S64_MIN));
1318 	/* max signed is min(sign bit) | max(other bits) */
1319 	reg->smax_value = min_t(s64, reg->smax_value,
1320 				reg->var_off.value | (reg->var_off.mask & S64_MAX));
1321 	reg->umin_value = max(reg->umin_value, reg->var_off.value);
1322 	reg->umax_value = min(reg->umax_value,
1323 			      reg->var_off.value | reg->var_off.mask);
1324 }
1325 
1326 static void __update_reg_bounds(struct bpf_reg_state *reg)
1327 {
1328 	__update_reg32_bounds(reg);
1329 	__update_reg64_bounds(reg);
1330 }
1331 
1332 /* Uses signed min/max values to inform unsigned, and vice-versa */
1333 static void __reg32_deduce_bounds(struct bpf_reg_state *reg)
1334 {
1335 	/* Learn sign from signed bounds.
1336 	 * If we cannot cross the sign boundary, then signed and unsigned bounds
1337 	 * are the same, so combine.  This works even in the negative case, e.g.
1338 	 * -3 s<= x s<= -1 implies 0xf...fd u<= x u<= 0xf...ff.
1339 	 */
1340 	if (reg->s32_min_value >= 0 || reg->s32_max_value < 0) {
1341 		reg->s32_min_value = reg->u32_min_value =
1342 			max_t(u32, reg->s32_min_value, reg->u32_min_value);
1343 		reg->s32_max_value = reg->u32_max_value =
1344 			min_t(u32, reg->s32_max_value, reg->u32_max_value);
1345 		return;
1346 	}
1347 	/* Learn sign from unsigned bounds.  Signed bounds cross the sign
1348 	 * boundary, so we must be careful.
1349 	 */
1350 	if ((s32)reg->u32_max_value >= 0) {
1351 		/* Positive.  We can't learn anything from the smin, but smax
1352 		 * is positive, hence safe.
1353 		 */
1354 		reg->s32_min_value = reg->u32_min_value;
1355 		reg->s32_max_value = reg->u32_max_value =
1356 			min_t(u32, reg->s32_max_value, reg->u32_max_value);
1357 	} else if ((s32)reg->u32_min_value < 0) {
1358 		/* Negative.  We can't learn anything from the smax, but smin
1359 		 * is negative, hence safe.
1360 		 */
1361 		reg->s32_min_value = reg->u32_min_value =
1362 			max_t(u32, reg->s32_min_value, reg->u32_min_value);
1363 		reg->s32_max_value = reg->u32_max_value;
1364 	}
1365 }
1366 
1367 static void __reg64_deduce_bounds(struct bpf_reg_state *reg)
1368 {
1369 	/* Learn sign from signed bounds.
1370 	 * If we cannot cross the sign boundary, then signed and unsigned bounds
1371 	 * are the same, so combine.  This works even in the negative case, e.g.
1372 	 * -3 s<= x s<= -1 implies 0xf...fd u<= x u<= 0xf...ff.
1373 	 */
1374 	if (reg->smin_value >= 0 || reg->smax_value < 0) {
1375 		reg->smin_value = reg->umin_value = max_t(u64, reg->smin_value,
1376 							  reg->umin_value);
1377 		reg->smax_value = reg->umax_value = min_t(u64, reg->smax_value,
1378 							  reg->umax_value);
1379 		return;
1380 	}
1381 	/* Learn sign from unsigned bounds.  Signed bounds cross the sign
1382 	 * boundary, so we must be careful.
1383 	 */
1384 	if ((s64)reg->umax_value >= 0) {
1385 		/* Positive.  We can't learn anything from the smin, but smax
1386 		 * is positive, hence safe.
1387 		 */
1388 		reg->smin_value = reg->umin_value;
1389 		reg->smax_value = reg->umax_value = min_t(u64, reg->smax_value,
1390 							  reg->umax_value);
1391 	} else if ((s64)reg->umin_value < 0) {
1392 		/* Negative.  We can't learn anything from the smax, but smin
1393 		 * is negative, hence safe.
1394 		 */
1395 		reg->smin_value = reg->umin_value = max_t(u64, reg->smin_value,
1396 							  reg->umin_value);
1397 		reg->smax_value = reg->umax_value;
1398 	}
1399 }
1400 
1401 static void __reg_deduce_bounds(struct bpf_reg_state *reg)
1402 {
1403 	__reg32_deduce_bounds(reg);
1404 	__reg64_deduce_bounds(reg);
1405 }
1406 
1407 /* Attempts to improve var_off based on unsigned min/max information */
1408 static void __reg_bound_offset(struct bpf_reg_state *reg)
1409 {
1410 	struct tnum var64_off = tnum_intersect(reg->var_off,
1411 					       tnum_range(reg->umin_value,
1412 							  reg->umax_value));
1413 	struct tnum var32_off = tnum_intersect(tnum_subreg(reg->var_off),
1414 						tnum_range(reg->u32_min_value,
1415 							   reg->u32_max_value));
1416 
1417 	reg->var_off = tnum_or(tnum_clear_subreg(var64_off), var32_off);
1418 }
1419 
1420 static bool __reg32_bound_s64(s32 a)
1421 {
1422 	return a >= 0 && a <= S32_MAX;
1423 }
1424 
1425 static void __reg_assign_32_into_64(struct bpf_reg_state *reg)
1426 {
1427 	reg->umin_value = reg->u32_min_value;
1428 	reg->umax_value = reg->u32_max_value;
1429 
1430 	/* Attempt to pull 32-bit signed bounds into 64-bit bounds but must
1431 	 * be positive otherwise set to worse case bounds and refine later
1432 	 * from tnum.
1433 	 */
1434 	if (__reg32_bound_s64(reg->s32_min_value) &&
1435 	    __reg32_bound_s64(reg->s32_max_value)) {
1436 		reg->smin_value = reg->s32_min_value;
1437 		reg->smax_value = reg->s32_max_value;
1438 	} else {
1439 		reg->smin_value = 0;
1440 		reg->smax_value = U32_MAX;
1441 	}
1442 }
1443 
1444 static void __reg_combine_32_into_64(struct bpf_reg_state *reg)
1445 {
1446 	/* special case when 64-bit register has upper 32-bit register
1447 	 * zeroed. Typically happens after zext or <<32, >>32 sequence
1448 	 * allowing us to use 32-bit bounds directly,
1449 	 */
1450 	if (tnum_equals_const(tnum_clear_subreg(reg->var_off), 0)) {
1451 		__reg_assign_32_into_64(reg);
1452 	} else {
1453 		/* Otherwise the best we can do is push lower 32bit known and
1454 		 * unknown bits into register (var_off set from jmp logic)
1455 		 * then learn as much as possible from the 64-bit tnum
1456 		 * known and unknown bits. The previous smin/smax bounds are
1457 		 * invalid here because of jmp32 compare so mark them unknown
1458 		 * so they do not impact tnum bounds calculation.
1459 		 */
1460 		__mark_reg64_unbounded(reg);
1461 		__update_reg_bounds(reg);
1462 	}
1463 
1464 	/* Intersecting with the old var_off might have improved our bounds
1465 	 * slightly.  e.g. if umax was 0x7f...f and var_off was (0; 0xf...fc),
1466 	 * then new var_off is (0; 0x7f...fc) which improves our umax.
1467 	 */
1468 	__reg_deduce_bounds(reg);
1469 	__reg_bound_offset(reg);
1470 	__update_reg_bounds(reg);
1471 }
1472 
1473 static bool __reg64_bound_s32(s64 a)
1474 {
1475 	return a >= S32_MIN && a <= S32_MAX;
1476 }
1477 
1478 static bool __reg64_bound_u32(u64 a)
1479 {
1480 	return a >= U32_MIN && a <= U32_MAX;
1481 }
1482 
1483 static void __reg_combine_64_into_32(struct bpf_reg_state *reg)
1484 {
1485 	__mark_reg32_unbounded(reg);
1486 
1487 	if (__reg64_bound_s32(reg->smin_value) && __reg64_bound_s32(reg->smax_value)) {
1488 		reg->s32_min_value = (s32)reg->smin_value;
1489 		reg->s32_max_value = (s32)reg->smax_value;
1490 	}
1491 	if (__reg64_bound_u32(reg->umin_value) && __reg64_bound_u32(reg->umax_value)) {
1492 		reg->u32_min_value = (u32)reg->umin_value;
1493 		reg->u32_max_value = (u32)reg->umax_value;
1494 	}
1495 
1496 	/* Intersecting with the old var_off might have improved our bounds
1497 	 * slightly.  e.g. if umax was 0x7f...f and var_off was (0; 0xf...fc),
1498 	 * then new var_off is (0; 0x7f...fc) which improves our umax.
1499 	 */
1500 	__reg_deduce_bounds(reg);
1501 	__reg_bound_offset(reg);
1502 	__update_reg_bounds(reg);
1503 }
1504 
1505 /* Mark a register as having a completely unknown (scalar) value. */
1506 static void __mark_reg_unknown(const struct bpf_verifier_env *env,
1507 			       struct bpf_reg_state *reg)
1508 {
1509 	/*
1510 	 * Clear type, id, off, and union(map_ptr, range) and
1511 	 * padding between 'type' and union
1512 	 */
1513 	memset(reg, 0, offsetof(struct bpf_reg_state, var_off));
1514 	reg->type = SCALAR_VALUE;
1515 	reg->var_off = tnum_unknown;
1516 	reg->frameno = 0;
1517 	reg->precise = env->subprog_cnt > 1 || !env->bpf_capable;
1518 	__mark_reg_unbounded(reg);
1519 }
1520 
1521 static void mark_reg_unknown(struct bpf_verifier_env *env,
1522 			     struct bpf_reg_state *regs, u32 regno)
1523 {
1524 	if (WARN_ON(regno >= MAX_BPF_REG)) {
1525 		verbose(env, "mark_reg_unknown(regs, %u)\n", regno);
1526 		/* Something bad happened, let's kill all regs except FP */
1527 		for (regno = 0; regno < BPF_REG_FP; regno++)
1528 			__mark_reg_not_init(env, regs + regno);
1529 		return;
1530 	}
1531 	__mark_reg_unknown(env, regs + regno);
1532 }
1533 
1534 static void __mark_reg_not_init(const struct bpf_verifier_env *env,
1535 				struct bpf_reg_state *reg)
1536 {
1537 	__mark_reg_unknown(env, reg);
1538 	reg->type = NOT_INIT;
1539 }
1540 
1541 static void mark_reg_not_init(struct bpf_verifier_env *env,
1542 			      struct bpf_reg_state *regs, u32 regno)
1543 {
1544 	if (WARN_ON(regno >= MAX_BPF_REG)) {
1545 		verbose(env, "mark_reg_not_init(regs, %u)\n", regno);
1546 		/* Something bad happened, let's kill all regs except FP */
1547 		for (regno = 0; regno < BPF_REG_FP; regno++)
1548 			__mark_reg_not_init(env, regs + regno);
1549 		return;
1550 	}
1551 	__mark_reg_not_init(env, regs + regno);
1552 }
1553 
1554 static void mark_btf_ld_reg(struct bpf_verifier_env *env,
1555 			    struct bpf_reg_state *regs, u32 regno,
1556 			    enum bpf_reg_type reg_type,
1557 			    struct btf *btf, u32 btf_id,
1558 			    enum bpf_type_flag flag)
1559 {
1560 	if (reg_type == SCALAR_VALUE) {
1561 		mark_reg_unknown(env, regs, regno);
1562 		return;
1563 	}
1564 	mark_reg_known_zero(env, regs, regno);
1565 	regs[regno].type = PTR_TO_BTF_ID | flag;
1566 	regs[regno].btf = btf;
1567 	regs[regno].btf_id = btf_id;
1568 }
1569 
1570 #define DEF_NOT_SUBREG	(0)
1571 static void init_reg_state(struct bpf_verifier_env *env,
1572 			   struct bpf_func_state *state)
1573 {
1574 	struct bpf_reg_state *regs = state->regs;
1575 	int i;
1576 
1577 	for (i = 0; i < MAX_BPF_REG; i++) {
1578 		mark_reg_not_init(env, regs, i);
1579 		regs[i].live = REG_LIVE_NONE;
1580 		regs[i].parent = NULL;
1581 		regs[i].subreg_def = DEF_NOT_SUBREG;
1582 	}
1583 
1584 	/* frame pointer */
1585 	regs[BPF_REG_FP].type = PTR_TO_STACK;
1586 	mark_reg_known_zero(env, regs, BPF_REG_FP);
1587 	regs[BPF_REG_FP].frameno = state->frameno;
1588 }
1589 
1590 #define BPF_MAIN_FUNC (-1)
1591 static void init_func_state(struct bpf_verifier_env *env,
1592 			    struct bpf_func_state *state,
1593 			    int callsite, int frameno, int subprogno)
1594 {
1595 	state->callsite = callsite;
1596 	state->frameno = frameno;
1597 	state->subprogno = subprogno;
1598 	init_reg_state(env, state);
1599 	mark_verifier_state_scratched(env);
1600 }
1601 
1602 /* Similar to push_stack(), but for async callbacks */
1603 static struct bpf_verifier_state *push_async_cb(struct bpf_verifier_env *env,
1604 						int insn_idx, int prev_insn_idx,
1605 						int subprog)
1606 {
1607 	struct bpf_verifier_stack_elem *elem;
1608 	struct bpf_func_state *frame;
1609 
1610 	elem = kzalloc(sizeof(struct bpf_verifier_stack_elem), GFP_KERNEL);
1611 	if (!elem)
1612 		goto err;
1613 
1614 	elem->insn_idx = insn_idx;
1615 	elem->prev_insn_idx = prev_insn_idx;
1616 	elem->next = env->head;
1617 	elem->log_pos = env->log.len_used;
1618 	env->head = elem;
1619 	env->stack_size++;
1620 	if (env->stack_size > BPF_COMPLEXITY_LIMIT_JMP_SEQ) {
1621 		verbose(env,
1622 			"The sequence of %d jumps is too complex for async cb.\n",
1623 			env->stack_size);
1624 		goto err;
1625 	}
1626 	/* Unlike push_stack() do not copy_verifier_state().
1627 	 * The caller state doesn't matter.
1628 	 * This is async callback. It starts in a fresh stack.
1629 	 * Initialize it similar to do_check_common().
1630 	 */
1631 	elem->st.branches = 1;
1632 	frame = kzalloc(sizeof(*frame), GFP_KERNEL);
1633 	if (!frame)
1634 		goto err;
1635 	init_func_state(env, frame,
1636 			BPF_MAIN_FUNC /* callsite */,
1637 			0 /* frameno within this callchain */,
1638 			subprog /* subprog number within this prog */);
1639 	elem->st.frame[0] = frame;
1640 	return &elem->st;
1641 err:
1642 	free_verifier_state(env->cur_state, true);
1643 	env->cur_state = NULL;
1644 	/* pop all elements and return */
1645 	while (!pop_stack(env, NULL, NULL, false));
1646 	return NULL;
1647 }
1648 
1649 
1650 enum reg_arg_type {
1651 	SRC_OP,		/* register is used as source operand */
1652 	DST_OP,		/* register is used as destination operand */
1653 	DST_OP_NO_MARK	/* same as above, check only, don't mark */
1654 };
1655 
1656 static int cmp_subprogs(const void *a, const void *b)
1657 {
1658 	return ((struct bpf_subprog_info *)a)->start -
1659 	       ((struct bpf_subprog_info *)b)->start;
1660 }
1661 
1662 static int find_subprog(struct bpf_verifier_env *env, int off)
1663 {
1664 	struct bpf_subprog_info *p;
1665 
1666 	p = bsearch(&off, env->subprog_info, env->subprog_cnt,
1667 		    sizeof(env->subprog_info[0]), cmp_subprogs);
1668 	if (!p)
1669 		return -ENOENT;
1670 	return p - env->subprog_info;
1671 
1672 }
1673 
1674 static int add_subprog(struct bpf_verifier_env *env, int off)
1675 {
1676 	int insn_cnt = env->prog->len;
1677 	int ret;
1678 
1679 	if (off >= insn_cnt || off < 0) {
1680 		verbose(env, "call to invalid destination\n");
1681 		return -EINVAL;
1682 	}
1683 	ret = find_subprog(env, off);
1684 	if (ret >= 0)
1685 		return ret;
1686 	if (env->subprog_cnt >= BPF_MAX_SUBPROGS) {
1687 		verbose(env, "too many subprograms\n");
1688 		return -E2BIG;
1689 	}
1690 	/* determine subprog starts. The end is one before the next starts */
1691 	env->subprog_info[env->subprog_cnt++].start = off;
1692 	sort(env->subprog_info, env->subprog_cnt,
1693 	     sizeof(env->subprog_info[0]), cmp_subprogs, NULL);
1694 	return env->subprog_cnt - 1;
1695 }
1696 
1697 #define MAX_KFUNC_DESCS 256
1698 #define MAX_KFUNC_BTFS	256
1699 
1700 struct bpf_kfunc_desc {
1701 	struct btf_func_model func_model;
1702 	u32 func_id;
1703 	s32 imm;
1704 	u16 offset;
1705 };
1706 
1707 struct bpf_kfunc_btf {
1708 	struct btf *btf;
1709 	struct module *module;
1710 	u16 offset;
1711 };
1712 
1713 struct bpf_kfunc_desc_tab {
1714 	struct bpf_kfunc_desc descs[MAX_KFUNC_DESCS];
1715 	u32 nr_descs;
1716 };
1717 
1718 struct bpf_kfunc_btf_tab {
1719 	struct bpf_kfunc_btf descs[MAX_KFUNC_BTFS];
1720 	u32 nr_descs;
1721 };
1722 
1723 static int kfunc_desc_cmp_by_id_off(const void *a, const void *b)
1724 {
1725 	const struct bpf_kfunc_desc *d0 = a;
1726 	const struct bpf_kfunc_desc *d1 = b;
1727 
1728 	/* func_id is not greater than BTF_MAX_TYPE */
1729 	return d0->func_id - d1->func_id ?: d0->offset - d1->offset;
1730 }
1731 
1732 static int kfunc_btf_cmp_by_off(const void *a, const void *b)
1733 {
1734 	const struct bpf_kfunc_btf *d0 = a;
1735 	const struct bpf_kfunc_btf *d1 = b;
1736 
1737 	return d0->offset - d1->offset;
1738 }
1739 
1740 static const struct bpf_kfunc_desc *
1741 find_kfunc_desc(const struct bpf_prog *prog, u32 func_id, u16 offset)
1742 {
1743 	struct bpf_kfunc_desc desc = {
1744 		.func_id = func_id,
1745 		.offset = offset,
1746 	};
1747 	struct bpf_kfunc_desc_tab *tab;
1748 
1749 	tab = prog->aux->kfunc_tab;
1750 	return bsearch(&desc, tab->descs, tab->nr_descs,
1751 		       sizeof(tab->descs[0]), kfunc_desc_cmp_by_id_off);
1752 }
1753 
1754 static struct btf *__find_kfunc_desc_btf(struct bpf_verifier_env *env,
1755 					 s16 offset)
1756 {
1757 	struct bpf_kfunc_btf kf_btf = { .offset = offset };
1758 	struct bpf_kfunc_btf_tab *tab;
1759 	struct bpf_kfunc_btf *b;
1760 	struct module *mod;
1761 	struct btf *btf;
1762 	int btf_fd;
1763 
1764 	tab = env->prog->aux->kfunc_btf_tab;
1765 	b = bsearch(&kf_btf, tab->descs, tab->nr_descs,
1766 		    sizeof(tab->descs[0]), kfunc_btf_cmp_by_off);
1767 	if (!b) {
1768 		if (tab->nr_descs == MAX_KFUNC_BTFS) {
1769 			verbose(env, "too many different module BTFs\n");
1770 			return ERR_PTR(-E2BIG);
1771 		}
1772 
1773 		if (bpfptr_is_null(env->fd_array)) {
1774 			verbose(env, "kfunc offset > 0 without fd_array is invalid\n");
1775 			return ERR_PTR(-EPROTO);
1776 		}
1777 
1778 		if (copy_from_bpfptr_offset(&btf_fd, env->fd_array,
1779 					    offset * sizeof(btf_fd),
1780 					    sizeof(btf_fd)))
1781 			return ERR_PTR(-EFAULT);
1782 
1783 		btf = btf_get_by_fd(btf_fd);
1784 		if (IS_ERR(btf)) {
1785 			verbose(env, "invalid module BTF fd specified\n");
1786 			return btf;
1787 		}
1788 
1789 		if (!btf_is_module(btf)) {
1790 			verbose(env, "BTF fd for kfunc is not a module BTF\n");
1791 			btf_put(btf);
1792 			return ERR_PTR(-EINVAL);
1793 		}
1794 
1795 		mod = btf_try_get_module(btf);
1796 		if (!mod) {
1797 			btf_put(btf);
1798 			return ERR_PTR(-ENXIO);
1799 		}
1800 
1801 		b = &tab->descs[tab->nr_descs++];
1802 		b->btf = btf;
1803 		b->module = mod;
1804 		b->offset = offset;
1805 
1806 		sort(tab->descs, tab->nr_descs, sizeof(tab->descs[0]),
1807 		     kfunc_btf_cmp_by_off, NULL);
1808 	}
1809 	return b->btf;
1810 }
1811 
1812 void bpf_free_kfunc_btf_tab(struct bpf_kfunc_btf_tab *tab)
1813 {
1814 	if (!tab)
1815 		return;
1816 
1817 	while (tab->nr_descs--) {
1818 		module_put(tab->descs[tab->nr_descs].module);
1819 		btf_put(tab->descs[tab->nr_descs].btf);
1820 	}
1821 	kfree(tab);
1822 }
1823 
1824 static struct btf *find_kfunc_desc_btf(struct bpf_verifier_env *env,
1825 				       u32 func_id, s16 offset)
1826 {
1827 	if (offset) {
1828 		if (offset < 0) {
1829 			/* In the future, this can be allowed to increase limit
1830 			 * of fd index into fd_array, interpreted as u16.
1831 			 */
1832 			verbose(env, "negative offset disallowed for kernel module function call\n");
1833 			return ERR_PTR(-EINVAL);
1834 		}
1835 
1836 		return __find_kfunc_desc_btf(env, offset);
1837 	}
1838 	return btf_vmlinux ?: ERR_PTR(-ENOENT);
1839 }
1840 
1841 static int add_kfunc_call(struct bpf_verifier_env *env, u32 func_id, s16 offset)
1842 {
1843 	const struct btf_type *func, *func_proto;
1844 	struct bpf_kfunc_btf_tab *btf_tab;
1845 	struct bpf_kfunc_desc_tab *tab;
1846 	struct bpf_prog_aux *prog_aux;
1847 	struct bpf_kfunc_desc *desc;
1848 	const char *func_name;
1849 	struct btf *desc_btf;
1850 	unsigned long call_imm;
1851 	unsigned long addr;
1852 	int err;
1853 
1854 	prog_aux = env->prog->aux;
1855 	tab = prog_aux->kfunc_tab;
1856 	btf_tab = prog_aux->kfunc_btf_tab;
1857 	if (!tab) {
1858 		if (!btf_vmlinux) {
1859 			verbose(env, "calling kernel function is not supported without CONFIG_DEBUG_INFO_BTF\n");
1860 			return -ENOTSUPP;
1861 		}
1862 
1863 		if (!env->prog->jit_requested) {
1864 			verbose(env, "JIT is required for calling kernel function\n");
1865 			return -ENOTSUPP;
1866 		}
1867 
1868 		if (!bpf_jit_supports_kfunc_call()) {
1869 			verbose(env, "JIT does not support calling kernel function\n");
1870 			return -ENOTSUPP;
1871 		}
1872 
1873 		if (!env->prog->gpl_compatible) {
1874 			verbose(env, "cannot call kernel function from non-GPL compatible program\n");
1875 			return -EINVAL;
1876 		}
1877 
1878 		tab = kzalloc(sizeof(*tab), GFP_KERNEL);
1879 		if (!tab)
1880 			return -ENOMEM;
1881 		prog_aux->kfunc_tab = tab;
1882 	}
1883 
1884 	/* func_id == 0 is always invalid, but instead of returning an error, be
1885 	 * conservative and wait until the code elimination pass before returning
1886 	 * error, so that invalid calls that get pruned out can be in BPF programs
1887 	 * loaded from userspace.  It is also required that offset be untouched
1888 	 * for such calls.
1889 	 */
1890 	if (!func_id && !offset)
1891 		return 0;
1892 
1893 	if (!btf_tab && offset) {
1894 		btf_tab = kzalloc(sizeof(*btf_tab), GFP_KERNEL);
1895 		if (!btf_tab)
1896 			return -ENOMEM;
1897 		prog_aux->kfunc_btf_tab = btf_tab;
1898 	}
1899 
1900 	desc_btf = find_kfunc_desc_btf(env, func_id, offset);
1901 	if (IS_ERR(desc_btf)) {
1902 		verbose(env, "failed to find BTF for kernel function\n");
1903 		return PTR_ERR(desc_btf);
1904 	}
1905 
1906 	if (find_kfunc_desc(env->prog, func_id, offset))
1907 		return 0;
1908 
1909 	if (tab->nr_descs == MAX_KFUNC_DESCS) {
1910 		verbose(env, "too many different kernel function calls\n");
1911 		return -E2BIG;
1912 	}
1913 
1914 	func = btf_type_by_id(desc_btf, func_id);
1915 	if (!func || !btf_type_is_func(func)) {
1916 		verbose(env, "kernel btf_id %u is not a function\n",
1917 			func_id);
1918 		return -EINVAL;
1919 	}
1920 	func_proto = btf_type_by_id(desc_btf, func->type);
1921 	if (!func_proto || !btf_type_is_func_proto(func_proto)) {
1922 		verbose(env, "kernel function btf_id %u does not have a valid func_proto\n",
1923 			func_id);
1924 		return -EINVAL;
1925 	}
1926 
1927 	func_name = btf_name_by_offset(desc_btf, func->name_off);
1928 	addr = kallsyms_lookup_name(func_name);
1929 	if (!addr) {
1930 		verbose(env, "cannot find address for kernel function %s\n",
1931 			func_name);
1932 		return -EINVAL;
1933 	}
1934 
1935 	call_imm = BPF_CALL_IMM(addr);
1936 	/* Check whether or not the relative offset overflows desc->imm */
1937 	if ((unsigned long)(s32)call_imm != call_imm) {
1938 		verbose(env, "address of kernel function %s is out of range\n",
1939 			func_name);
1940 		return -EINVAL;
1941 	}
1942 
1943 	desc = &tab->descs[tab->nr_descs++];
1944 	desc->func_id = func_id;
1945 	desc->imm = call_imm;
1946 	desc->offset = offset;
1947 	err = btf_distill_func_proto(&env->log, desc_btf,
1948 				     func_proto, func_name,
1949 				     &desc->func_model);
1950 	if (!err)
1951 		sort(tab->descs, tab->nr_descs, sizeof(tab->descs[0]),
1952 		     kfunc_desc_cmp_by_id_off, NULL);
1953 	return err;
1954 }
1955 
1956 static int kfunc_desc_cmp_by_imm(const void *a, const void *b)
1957 {
1958 	const struct bpf_kfunc_desc *d0 = a;
1959 	const struct bpf_kfunc_desc *d1 = b;
1960 
1961 	if (d0->imm > d1->imm)
1962 		return 1;
1963 	else if (d0->imm < d1->imm)
1964 		return -1;
1965 	return 0;
1966 }
1967 
1968 static void sort_kfunc_descs_by_imm(struct bpf_prog *prog)
1969 {
1970 	struct bpf_kfunc_desc_tab *tab;
1971 
1972 	tab = prog->aux->kfunc_tab;
1973 	if (!tab)
1974 		return;
1975 
1976 	sort(tab->descs, tab->nr_descs, sizeof(tab->descs[0]),
1977 	     kfunc_desc_cmp_by_imm, NULL);
1978 }
1979 
1980 bool bpf_prog_has_kfunc_call(const struct bpf_prog *prog)
1981 {
1982 	return !!prog->aux->kfunc_tab;
1983 }
1984 
1985 const struct btf_func_model *
1986 bpf_jit_find_kfunc_model(const struct bpf_prog *prog,
1987 			 const struct bpf_insn *insn)
1988 {
1989 	const struct bpf_kfunc_desc desc = {
1990 		.imm = insn->imm,
1991 	};
1992 	const struct bpf_kfunc_desc *res;
1993 	struct bpf_kfunc_desc_tab *tab;
1994 
1995 	tab = prog->aux->kfunc_tab;
1996 	res = bsearch(&desc, tab->descs, tab->nr_descs,
1997 		      sizeof(tab->descs[0]), kfunc_desc_cmp_by_imm);
1998 
1999 	return res ? &res->func_model : NULL;
2000 }
2001 
2002 static int add_subprog_and_kfunc(struct bpf_verifier_env *env)
2003 {
2004 	struct bpf_subprog_info *subprog = env->subprog_info;
2005 	struct bpf_insn *insn = env->prog->insnsi;
2006 	int i, ret, insn_cnt = env->prog->len;
2007 
2008 	/* Add entry function. */
2009 	ret = add_subprog(env, 0);
2010 	if (ret)
2011 		return ret;
2012 
2013 	for (i = 0; i < insn_cnt; i++, insn++) {
2014 		if (!bpf_pseudo_func(insn) && !bpf_pseudo_call(insn) &&
2015 		    !bpf_pseudo_kfunc_call(insn))
2016 			continue;
2017 
2018 		if (!env->bpf_capable) {
2019 			verbose(env, "loading/calling other bpf or kernel functions are allowed for CAP_BPF and CAP_SYS_ADMIN\n");
2020 			return -EPERM;
2021 		}
2022 
2023 		if (bpf_pseudo_func(insn) || bpf_pseudo_call(insn))
2024 			ret = add_subprog(env, i + insn->imm + 1);
2025 		else
2026 			ret = add_kfunc_call(env, insn->imm, insn->off);
2027 
2028 		if (ret < 0)
2029 			return ret;
2030 	}
2031 
2032 	/* Add a fake 'exit' subprog which could simplify subprog iteration
2033 	 * logic. 'subprog_cnt' should not be increased.
2034 	 */
2035 	subprog[env->subprog_cnt].start = insn_cnt;
2036 
2037 	if (env->log.level & BPF_LOG_LEVEL2)
2038 		for (i = 0; i < env->subprog_cnt; i++)
2039 			verbose(env, "func#%d @%d\n", i, subprog[i].start);
2040 
2041 	return 0;
2042 }
2043 
2044 static int check_subprogs(struct bpf_verifier_env *env)
2045 {
2046 	int i, subprog_start, subprog_end, off, cur_subprog = 0;
2047 	struct bpf_subprog_info *subprog = env->subprog_info;
2048 	struct bpf_insn *insn = env->prog->insnsi;
2049 	int insn_cnt = env->prog->len;
2050 
2051 	/* now check that all jumps are within the same subprog */
2052 	subprog_start = subprog[cur_subprog].start;
2053 	subprog_end = subprog[cur_subprog + 1].start;
2054 	for (i = 0; i < insn_cnt; i++) {
2055 		u8 code = insn[i].code;
2056 
2057 		if (code == (BPF_JMP | BPF_CALL) &&
2058 		    insn[i].imm == BPF_FUNC_tail_call &&
2059 		    insn[i].src_reg != BPF_PSEUDO_CALL)
2060 			subprog[cur_subprog].has_tail_call = true;
2061 		if (BPF_CLASS(code) == BPF_LD &&
2062 		    (BPF_MODE(code) == BPF_ABS || BPF_MODE(code) == BPF_IND))
2063 			subprog[cur_subprog].has_ld_abs = true;
2064 		if (BPF_CLASS(code) != BPF_JMP && BPF_CLASS(code) != BPF_JMP32)
2065 			goto next;
2066 		if (BPF_OP(code) == BPF_EXIT || BPF_OP(code) == BPF_CALL)
2067 			goto next;
2068 		off = i + insn[i].off + 1;
2069 		if (off < subprog_start || off >= subprog_end) {
2070 			verbose(env, "jump out of range from insn %d to %d\n", i, off);
2071 			return -EINVAL;
2072 		}
2073 next:
2074 		if (i == subprog_end - 1) {
2075 			/* to avoid fall-through from one subprog into another
2076 			 * the last insn of the subprog should be either exit
2077 			 * or unconditional jump back
2078 			 */
2079 			if (code != (BPF_JMP | BPF_EXIT) &&
2080 			    code != (BPF_JMP | BPF_JA)) {
2081 				verbose(env, "last insn is not an exit or jmp\n");
2082 				return -EINVAL;
2083 			}
2084 			subprog_start = subprog_end;
2085 			cur_subprog++;
2086 			if (cur_subprog < env->subprog_cnt)
2087 				subprog_end = subprog[cur_subprog + 1].start;
2088 		}
2089 	}
2090 	return 0;
2091 }
2092 
2093 /* Parentage chain of this register (or stack slot) should take care of all
2094  * issues like callee-saved registers, stack slot allocation time, etc.
2095  */
2096 static int mark_reg_read(struct bpf_verifier_env *env,
2097 			 const struct bpf_reg_state *state,
2098 			 struct bpf_reg_state *parent, u8 flag)
2099 {
2100 	bool writes = parent == state->parent; /* Observe write marks */
2101 	int cnt = 0;
2102 
2103 	while (parent) {
2104 		/* if read wasn't screened by an earlier write ... */
2105 		if (writes && state->live & REG_LIVE_WRITTEN)
2106 			break;
2107 		if (parent->live & REG_LIVE_DONE) {
2108 			verbose(env, "verifier BUG type %s var_off %lld off %d\n",
2109 				reg_type_str(env, parent->type),
2110 				parent->var_off.value, parent->off);
2111 			return -EFAULT;
2112 		}
2113 		/* The first condition is more likely to be true than the
2114 		 * second, checked it first.
2115 		 */
2116 		if ((parent->live & REG_LIVE_READ) == flag ||
2117 		    parent->live & REG_LIVE_READ64)
2118 			/* The parentage chain never changes and
2119 			 * this parent was already marked as LIVE_READ.
2120 			 * There is no need to keep walking the chain again and
2121 			 * keep re-marking all parents as LIVE_READ.
2122 			 * This case happens when the same register is read
2123 			 * multiple times without writes into it in-between.
2124 			 * Also, if parent has the stronger REG_LIVE_READ64 set,
2125 			 * then no need to set the weak REG_LIVE_READ32.
2126 			 */
2127 			break;
2128 		/* ... then we depend on parent's value */
2129 		parent->live |= flag;
2130 		/* REG_LIVE_READ64 overrides REG_LIVE_READ32. */
2131 		if (flag == REG_LIVE_READ64)
2132 			parent->live &= ~REG_LIVE_READ32;
2133 		state = parent;
2134 		parent = state->parent;
2135 		writes = true;
2136 		cnt++;
2137 	}
2138 
2139 	if (env->longest_mark_read_walk < cnt)
2140 		env->longest_mark_read_walk = cnt;
2141 	return 0;
2142 }
2143 
2144 /* This function is supposed to be used by the following 32-bit optimization
2145  * code only. It returns TRUE if the source or destination register operates
2146  * on 64-bit, otherwise return FALSE.
2147  */
2148 static bool is_reg64(struct bpf_verifier_env *env, struct bpf_insn *insn,
2149 		     u32 regno, struct bpf_reg_state *reg, enum reg_arg_type t)
2150 {
2151 	u8 code, class, op;
2152 
2153 	code = insn->code;
2154 	class = BPF_CLASS(code);
2155 	op = BPF_OP(code);
2156 	if (class == BPF_JMP) {
2157 		/* BPF_EXIT for "main" will reach here. Return TRUE
2158 		 * conservatively.
2159 		 */
2160 		if (op == BPF_EXIT)
2161 			return true;
2162 		if (op == BPF_CALL) {
2163 			/* BPF to BPF call will reach here because of marking
2164 			 * caller saved clobber with DST_OP_NO_MARK for which we
2165 			 * don't care the register def because they are anyway
2166 			 * marked as NOT_INIT already.
2167 			 */
2168 			if (insn->src_reg == BPF_PSEUDO_CALL)
2169 				return false;
2170 			/* Helper call will reach here because of arg type
2171 			 * check, conservatively return TRUE.
2172 			 */
2173 			if (t == SRC_OP)
2174 				return true;
2175 
2176 			return false;
2177 		}
2178 	}
2179 
2180 	if (class == BPF_ALU64 || class == BPF_JMP ||
2181 	    /* BPF_END always use BPF_ALU class. */
2182 	    (class == BPF_ALU && op == BPF_END && insn->imm == 64))
2183 		return true;
2184 
2185 	if (class == BPF_ALU || class == BPF_JMP32)
2186 		return false;
2187 
2188 	if (class == BPF_LDX) {
2189 		if (t != SRC_OP)
2190 			return BPF_SIZE(code) == BPF_DW;
2191 		/* LDX source must be ptr. */
2192 		return true;
2193 	}
2194 
2195 	if (class == BPF_STX) {
2196 		/* BPF_STX (including atomic variants) has multiple source
2197 		 * operands, one of which is a ptr. Check whether the caller is
2198 		 * asking about it.
2199 		 */
2200 		if (t == SRC_OP && reg->type != SCALAR_VALUE)
2201 			return true;
2202 		return BPF_SIZE(code) == BPF_DW;
2203 	}
2204 
2205 	if (class == BPF_LD) {
2206 		u8 mode = BPF_MODE(code);
2207 
2208 		/* LD_IMM64 */
2209 		if (mode == BPF_IMM)
2210 			return true;
2211 
2212 		/* Both LD_IND and LD_ABS return 32-bit data. */
2213 		if (t != SRC_OP)
2214 			return  false;
2215 
2216 		/* Implicit ctx ptr. */
2217 		if (regno == BPF_REG_6)
2218 			return true;
2219 
2220 		/* Explicit source could be any width. */
2221 		return true;
2222 	}
2223 
2224 	if (class == BPF_ST)
2225 		/* The only source register for BPF_ST is a ptr. */
2226 		return true;
2227 
2228 	/* Conservatively return true at default. */
2229 	return true;
2230 }
2231 
2232 /* Return the regno defined by the insn, or -1. */
2233 static int insn_def_regno(const struct bpf_insn *insn)
2234 {
2235 	switch (BPF_CLASS(insn->code)) {
2236 	case BPF_JMP:
2237 	case BPF_JMP32:
2238 	case BPF_ST:
2239 		return -1;
2240 	case BPF_STX:
2241 		if (BPF_MODE(insn->code) == BPF_ATOMIC &&
2242 		    (insn->imm & BPF_FETCH)) {
2243 			if (insn->imm == BPF_CMPXCHG)
2244 				return BPF_REG_0;
2245 			else
2246 				return insn->src_reg;
2247 		} else {
2248 			return -1;
2249 		}
2250 	default:
2251 		return insn->dst_reg;
2252 	}
2253 }
2254 
2255 /* Return TRUE if INSN has defined any 32-bit value explicitly. */
2256 static bool insn_has_def32(struct bpf_verifier_env *env, struct bpf_insn *insn)
2257 {
2258 	int dst_reg = insn_def_regno(insn);
2259 
2260 	if (dst_reg == -1)
2261 		return false;
2262 
2263 	return !is_reg64(env, insn, dst_reg, NULL, DST_OP);
2264 }
2265 
2266 static void mark_insn_zext(struct bpf_verifier_env *env,
2267 			   struct bpf_reg_state *reg)
2268 {
2269 	s32 def_idx = reg->subreg_def;
2270 
2271 	if (def_idx == DEF_NOT_SUBREG)
2272 		return;
2273 
2274 	env->insn_aux_data[def_idx - 1].zext_dst = true;
2275 	/* The dst will be zero extended, so won't be sub-register anymore. */
2276 	reg->subreg_def = DEF_NOT_SUBREG;
2277 }
2278 
2279 static int check_reg_arg(struct bpf_verifier_env *env, u32 regno,
2280 			 enum reg_arg_type t)
2281 {
2282 	struct bpf_verifier_state *vstate = env->cur_state;
2283 	struct bpf_func_state *state = vstate->frame[vstate->curframe];
2284 	struct bpf_insn *insn = env->prog->insnsi + env->insn_idx;
2285 	struct bpf_reg_state *reg, *regs = state->regs;
2286 	bool rw64;
2287 
2288 	if (regno >= MAX_BPF_REG) {
2289 		verbose(env, "R%d is invalid\n", regno);
2290 		return -EINVAL;
2291 	}
2292 
2293 	mark_reg_scratched(env, regno);
2294 
2295 	reg = &regs[regno];
2296 	rw64 = is_reg64(env, insn, regno, reg, t);
2297 	if (t == SRC_OP) {
2298 		/* check whether register used as source operand can be read */
2299 		if (reg->type == NOT_INIT) {
2300 			verbose(env, "R%d !read_ok\n", regno);
2301 			return -EACCES;
2302 		}
2303 		/* We don't need to worry about FP liveness because it's read-only */
2304 		if (regno == BPF_REG_FP)
2305 			return 0;
2306 
2307 		if (rw64)
2308 			mark_insn_zext(env, reg);
2309 
2310 		return mark_reg_read(env, reg, reg->parent,
2311 				     rw64 ? REG_LIVE_READ64 : REG_LIVE_READ32);
2312 	} else {
2313 		/* check whether register used as dest operand can be written to */
2314 		if (regno == BPF_REG_FP) {
2315 			verbose(env, "frame pointer is read only\n");
2316 			return -EACCES;
2317 		}
2318 		reg->live |= REG_LIVE_WRITTEN;
2319 		reg->subreg_def = rw64 ? DEF_NOT_SUBREG : env->insn_idx + 1;
2320 		if (t == DST_OP)
2321 			mark_reg_unknown(env, regs, regno);
2322 	}
2323 	return 0;
2324 }
2325 
2326 /* for any branch, call, exit record the history of jmps in the given state */
2327 static int push_jmp_history(struct bpf_verifier_env *env,
2328 			    struct bpf_verifier_state *cur)
2329 {
2330 	u32 cnt = cur->jmp_history_cnt;
2331 	struct bpf_idx_pair *p;
2332 
2333 	cnt++;
2334 	p = krealloc(cur->jmp_history, cnt * sizeof(*p), GFP_USER);
2335 	if (!p)
2336 		return -ENOMEM;
2337 	p[cnt - 1].idx = env->insn_idx;
2338 	p[cnt - 1].prev_idx = env->prev_insn_idx;
2339 	cur->jmp_history = p;
2340 	cur->jmp_history_cnt = cnt;
2341 	return 0;
2342 }
2343 
2344 /* Backtrack one insn at a time. If idx is not at the top of recorded
2345  * history then previous instruction came from straight line execution.
2346  */
2347 static int get_prev_insn_idx(struct bpf_verifier_state *st, int i,
2348 			     u32 *history)
2349 {
2350 	u32 cnt = *history;
2351 
2352 	if (cnt && st->jmp_history[cnt - 1].idx == i) {
2353 		i = st->jmp_history[cnt - 1].prev_idx;
2354 		(*history)--;
2355 	} else {
2356 		i--;
2357 	}
2358 	return i;
2359 }
2360 
2361 static const char *disasm_kfunc_name(void *data, const struct bpf_insn *insn)
2362 {
2363 	const struct btf_type *func;
2364 	struct btf *desc_btf;
2365 
2366 	if (insn->src_reg != BPF_PSEUDO_KFUNC_CALL)
2367 		return NULL;
2368 
2369 	desc_btf = find_kfunc_desc_btf(data, insn->imm, insn->off);
2370 	if (IS_ERR(desc_btf))
2371 		return "<error>";
2372 
2373 	func = btf_type_by_id(desc_btf, insn->imm);
2374 	return btf_name_by_offset(desc_btf, func->name_off);
2375 }
2376 
2377 /* For given verifier state backtrack_insn() is called from the last insn to
2378  * the first insn. Its purpose is to compute a bitmask of registers and
2379  * stack slots that needs precision in the parent verifier state.
2380  */
2381 static int backtrack_insn(struct bpf_verifier_env *env, int idx,
2382 			  u32 *reg_mask, u64 *stack_mask)
2383 {
2384 	const struct bpf_insn_cbs cbs = {
2385 		.cb_call	= disasm_kfunc_name,
2386 		.cb_print	= verbose,
2387 		.private_data	= env,
2388 	};
2389 	struct bpf_insn *insn = env->prog->insnsi + idx;
2390 	u8 class = BPF_CLASS(insn->code);
2391 	u8 opcode = BPF_OP(insn->code);
2392 	u8 mode = BPF_MODE(insn->code);
2393 	u32 dreg = 1u << insn->dst_reg;
2394 	u32 sreg = 1u << insn->src_reg;
2395 	u32 spi;
2396 
2397 	if (insn->code == 0)
2398 		return 0;
2399 	if (env->log.level & BPF_LOG_LEVEL2) {
2400 		verbose(env, "regs=%x stack=%llx before ", *reg_mask, *stack_mask);
2401 		verbose(env, "%d: ", idx);
2402 		print_bpf_insn(&cbs, insn, env->allow_ptr_leaks);
2403 	}
2404 
2405 	if (class == BPF_ALU || class == BPF_ALU64) {
2406 		if (!(*reg_mask & dreg))
2407 			return 0;
2408 		if (opcode == BPF_MOV) {
2409 			if (BPF_SRC(insn->code) == BPF_X) {
2410 				/* dreg = sreg
2411 				 * dreg needs precision after this insn
2412 				 * sreg needs precision before this insn
2413 				 */
2414 				*reg_mask &= ~dreg;
2415 				*reg_mask |= sreg;
2416 			} else {
2417 				/* dreg = K
2418 				 * dreg needs precision after this insn.
2419 				 * Corresponding register is already marked
2420 				 * as precise=true in this verifier state.
2421 				 * No further markings in parent are necessary
2422 				 */
2423 				*reg_mask &= ~dreg;
2424 			}
2425 		} else {
2426 			if (BPF_SRC(insn->code) == BPF_X) {
2427 				/* dreg += sreg
2428 				 * both dreg and sreg need precision
2429 				 * before this insn
2430 				 */
2431 				*reg_mask |= sreg;
2432 			} /* else dreg += K
2433 			   * dreg still needs precision before this insn
2434 			   */
2435 		}
2436 	} else if (class == BPF_LDX) {
2437 		if (!(*reg_mask & dreg))
2438 			return 0;
2439 		*reg_mask &= ~dreg;
2440 
2441 		/* scalars can only be spilled into stack w/o losing precision.
2442 		 * Load from any other memory can be zero extended.
2443 		 * The desire to keep that precision is already indicated
2444 		 * by 'precise' mark in corresponding register of this state.
2445 		 * No further tracking necessary.
2446 		 */
2447 		if (insn->src_reg != BPF_REG_FP)
2448 			return 0;
2449 
2450 		/* dreg = *(u64 *)[fp - off] was a fill from the stack.
2451 		 * that [fp - off] slot contains scalar that needs to be
2452 		 * tracked with precision
2453 		 */
2454 		spi = (-insn->off - 1) / BPF_REG_SIZE;
2455 		if (spi >= 64) {
2456 			verbose(env, "BUG spi %d\n", spi);
2457 			WARN_ONCE(1, "verifier backtracking bug");
2458 			return -EFAULT;
2459 		}
2460 		*stack_mask |= 1ull << spi;
2461 	} else if (class == BPF_STX || class == BPF_ST) {
2462 		if (*reg_mask & dreg)
2463 			/* stx & st shouldn't be using _scalar_ dst_reg
2464 			 * to access memory. It means backtracking
2465 			 * encountered a case of pointer subtraction.
2466 			 */
2467 			return -ENOTSUPP;
2468 		/* scalars can only be spilled into stack */
2469 		if (insn->dst_reg != BPF_REG_FP)
2470 			return 0;
2471 		spi = (-insn->off - 1) / BPF_REG_SIZE;
2472 		if (spi >= 64) {
2473 			verbose(env, "BUG spi %d\n", spi);
2474 			WARN_ONCE(1, "verifier backtracking bug");
2475 			return -EFAULT;
2476 		}
2477 		if (!(*stack_mask & (1ull << spi)))
2478 			return 0;
2479 		*stack_mask &= ~(1ull << spi);
2480 		if (class == BPF_STX)
2481 			*reg_mask |= sreg;
2482 	} else if (class == BPF_JMP || class == BPF_JMP32) {
2483 		if (opcode == BPF_CALL) {
2484 			if (insn->src_reg == BPF_PSEUDO_CALL)
2485 				return -ENOTSUPP;
2486 			/* regular helper call sets R0 */
2487 			*reg_mask &= ~1;
2488 			if (*reg_mask & 0x3f) {
2489 				/* if backtracing was looking for registers R1-R5
2490 				 * they should have been found already.
2491 				 */
2492 				verbose(env, "BUG regs %x\n", *reg_mask);
2493 				WARN_ONCE(1, "verifier backtracking bug");
2494 				return -EFAULT;
2495 			}
2496 		} else if (opcode == BPF_EXIT) {
2497 			return -ENOTSUPP;
2498 		}
2499 	} else if (class == BPF_LD) {
2500 		if (!(*reg_mask & dreg))
2501 			return 0;
2502 		*reg_mask &= ~dreg;
2503 		/* It's ld_imm64 or ld_abs or ld_ind.
2504 		 * For ld_imm64 no further tracking of precision
2505 		 * into parent is necessary
2506 		 */
2507 		if (mode == BPF_IND || mode == BPF_ABS)
2508 			/* to be analyzed */
2509 			return -ENOTSUPP;
2510 	}
2511 	return 0;
2512 }
2513 
2514 /* the scalar precision tracking algorithm:
2515  * . at the start all registers have precise=false.
2516  * . scalar ranges are tracked as normal through alu and jmp insns.
2517  * . once precise value of the scalar register is used in:
2518  *   .  ptr + scalar alu
2519  *   . if (scalar cond K|scalar)
2520  *   .  helper_call(.., scalar, ...) where ARG_CONST is expected
2521  *   backtrack through the verifier states and mark all registers and
2522  *   stack slots with spilled constants that these scalar regisers
2523  *   should be precise.
2524  * . during state pruning two registers (or spilled stack slots)
2525  *   are equivalent if both are not precise.
2526  *
2527  * Note the verifier cannot simply walk register parentage chain,
2528  * since many different registers and stack slots could have been
2529  * used to compute single precise scalar.
2530  *
2531  * The approach of starting with precise=true for all registers and then
2532  * backtrack to mark a register as not precise when the verifier detects
2533  * that program doesn't care about specific value (e.g., when helper
2534  * takes register as ARG_ANYTHING parameter) is not safe.
2535  *
2536  * It's ok to walk single parentage chain of the verifier states.
2537  * It's possible that this backtracking will go all the way till 1st insn.
2538  * All other branches will be explored for needing precision later.
2539  *
2540  * The backtracking needs to deal with cases like:
2541  *   R8=map_value(id=0,off=0,ks=4,vs=1952,imm=0) R9_w=map_value(id=0,off=40,ks=4,vs=1952,imm=0)
2542  * r9 -= r8
2543  * r5 = r9
2544  * if r5 > 0x79f goto pc+7
2545  *    R5_w=inv(id=0,umax_value=1951,var_off=(0x0; 0x7ff))
2546  * r5 += 1
2547  * ...
2548  * call bpf_perf_event_output#25
2549  *   where .arg5_type = ARG_CONST_SIZE_OR_ZERO
2550  *
2551  * and this case:
2552  * r6 = 1
2553  * call foo // uses callee's r6 inside to compute r0
2554  * r0 += r6
2555  * if r0 == 0 goto
2556  *
2557  * to track above reg_mask/stack_mask needs to be independent for each frame.
2558  *
2559  * Also if parent's curframe > frame where backtracking started,
2560  * the verifier need to mark registers in both frames, otherwise callees
2561  * may incorrectly prune callers. This is similar to
2562  * commit 7640ead93924 ("bpf: verifier: make sure callees don't prune with caller differences")
2563  *
2564  * For now backtracking falls back into conservative marking.
2565  */
2566 static void mark_all_scalars_precise(struct bpf_verifier_env *env,
2567 				     struct bpf_verifier_state *st)
2568 {
2569 	struct bpf_func_state *func;
2570 	struct bpf_reg_state *reg;
2571 	int i, j;
2572 
2573 	/* big hammer: mark all scalars precise in this path.
2574 	 * pop_stack may still get !precise scalars.
2575 	 */
2576 	for (; st; st = st->parent)
2577 		for (i = 0; i <= st->curframe; i++) {
2578 			func = st->frame[i];
2579 			for (j = 0; j < BPF_REG_FP; j++) {
2580 				reg = &func->regs[j];
2581 				if (reg->type != SCALAR_VALUE)
2582 					continue;
2583 				reg->precise = true;
2584 			}
2585 			for (j = 0; j < func->allocated_stack / BPF_REG_SIZE; j++) {
2586 				if (!is_spilled_reg(&func->stack[j]))
2587 					continue;
2588 				reg = &func->stack[j].spilled_ptr;
2589 				if (reg->type != SCALAR_VALUE)
2590 					continue;
2591 				reg->precise = true;
2592 			}
2593 		}
2594 }
2595 
2596 static int __mark_chain_precision(struct bpf_verifier_env *env, int regno,
2597 				  int spi)
2598 {
2599 	struct bpf_verifier_state *st = env->cur_state;
2600 	int first_idx = st->first_insn_idx;
2601 	int last_idx = env->insn_idx;
2602 	struct bpf_func_state *func;
2603 	struct bpf_reg_state *reg;
2604 	u32 reg_mask = regno >= 0 ? 1u << regno : 0;
2605 	u64 stack_mask = spi >= 0 ? 1ull << spi : 0;
2606 	bool skip_first = true;
2607 	bool new_marks = false;
2608 	int i, err;
2609 
2610 	if (!env->bpf_capable)
2611 		return 0;
2612 
2613 	func = st->frame[st->curframe];
2614 	if (regno >= 0) {
2615 		reg = &func->regs[regno];
2616 		if (reg->type != SCALAR_VALUE) {
2617 			WARN_ONCE(1, "backtracing misuse");
2618 			return -EFAULT;
2619 		}
2620 		if (!reg->precise)
2621 			new_marks = true;
2622 		else
2623 			reg_mask = 0;
2624 		reg->precise = true;
2625 	}
2626 
2627 	while (spi >= 0) {
2628 		if (!is_spilled_reg(&func->stack[spi])) {
2629 			stack_mask = 0;
2630 			break;
2631 		}
2632 		reg = &func->stack[spi].spilled_ptr;
2633 		if (reg->type != SCALAR_VALUE) {
2634 			stack_mask = 0;
2635 			break;
2636 		}
2637 		if (!reg->precise)
2638 			new_marks = true;
2639 		else
2640 			stack_mask = 0;
2641 		reg->precise = true;
2642 		break;
2643 	}
2644 
2645 	if (!new_marks)
2646 		return 0;
2647 	if (!reg_mask && !stack_mask)
2648 		return 0;
2649 	for (;;) {
2650 		DECLARE_BITMAP(mask, 64);
2651 		u32 history = st->jmp_history_cnt;
2652 
2653 		if (env->log.level & BPF_LOG_LEVEL2)
2654 			verbose(env, "last_idx %d first_idx %d\n", last_idx, first_idx);
2655 		for (i = last_idx;;) {
2656 			if (skip_first) {
2657 				err = 0;
2658 				skip_first = false;
2659 			} else {
2660 				err = backtrack_insn(env, i, &reg_mask, &stack_mask);
2661 			}
2662 			if (err == -ENOTSUPP) {
2663 				mark_all_scalars_precise(env, st);
2664 				return 0;
2665 			} else if (err) {
2666 				return err;
2667 			}
2668 			if (!reg_mask && !stack_mask)
2669 				/* Found assignment(s) into tracked register in this state.
2670 				 * Since this state is already marked, just return.
2671 				 * Nothing to be tracked further in the parent state.
2672 				 */
2673 				return 0;
2674 			if (i == first_idx)
2675 				break;
2676 			i = get_prev_insn_idx(st, i, &history);
2677 			if (i >= env->prog->len) {
2678 				/* This can happen if backtracking reached insn 0
2679 				 * and there are still reg_mask or stack_mask
2680 				 * to backtrack.
2681 				 * It means the backtracking missed the spot where
2682 				 * particular register was initialized with a constant.
2683 				 */
2684 				verbose(env, "BUG backtracking idx %d\n", i);
2685 				WARN_ONCE(1, "verifier backtracking bug");
2686 				return -EFAULT;
2687 			}
2688 		}
2689 		st = st->parent;
2690 		if (!st)
2691 			break;
2692 
2693 		new_marks = false;
2694 		func = st->frame[st->curframe];
2695 		bitmap_from_u64(mask, reg_mask);
2696 		for_each_set_bit(i, mask, 32) {
2697 			reg = &func->regs[i];
2698 			if (reg->type != SCALAR_VALUE) {
2699 				reg_mask &= ~(1u << i);
2700 				continue;
2701 			}
2702 			if (!reg->precise)
2703 				new_marks = true;
2704 			reg->precise = true;
2705 		}
2706 
2707 		bitmap_from_u64(mask, stack_mask);
2708 		for_each_set_bit(i, mask, 64) {
2709 			if (i >= func->allocated_stack / BPF_REG_SIZE) {
2710 				/* the sequence of instructions:
2711 				 * 2: (bf) r3 = r10
2712 				 * 3: (7b) *(u64 *)(r3 -8) = r0
2713 				 * 4: (79) r4 = *(u64 *)(r10 -8)
2714 				 * doesn't contain jmps. It's backtracked
2715 				 * as a single block.
2716 				 * During backtracking insn 3 is not recognized as
2717 				 * stack access, so at the end of backtracking
2718 				 * stack slot fp-8 is still marked in stack_mask.
2719 				 * However the parent state may not have accessed
2720 				 * fp-8 and it's "unallocated" stack space.
2721 				 * In such case fallback to conservative.
2722 				 */
2723 				mark_all_scalars_precise(env, st);
2724 				return 0;
2725 			}
2726 
2727 			if (!is_spilled_reg(&func->stack[i])) {
2728 				stack_mask &= ~(1ull << i);
2729 				continue;
2730 			}
2731 			reg = &func->stack[i].spilled_ptr;
2732 			if (reg->type != SCALAR_VALUE) {
2733 				stack_mask &= ~(1ull << i);
2734 				continue;
2735 			}
2736 			if (!reg->precise)
2737 				new_marks = true;
2738 			reg->precise = true;
2739 		}
2740 		if (env->log.level & BPF_LOG_LEVEL2) {
2741 			verbose(env, "parent %s regs=%x stack=%llx marks:",
2742 				new_marks ? "didn't have" : "already had",
2743 				reg_mask, stack_mask);
2744 			print_verifier_state(env, func, true);
2745 		}
2746 
2747 		if (!reg_mask && !stack_mask)
2748 			break;
2749 		if (!new_marks)
2750 			break;
2751 
2752 		last_idx = st->last_insn_idx;
2753 		first_idx = st->first_insn_idx;
2754 	}
2755 	return 0;
2756 }
2757 
2758 static int mark_chain_precision(struct bpf_verifier_env *env, int regno)
2759 {
2760 	return __mark_chain_precision(env, regno, -1);
2761 }
2762 
2763 static int mark_chain_precision_stack(struct bpf_verifier_env *env, int spi)
2764 {
2765 	return __mark_chain_precision(env, -1, spi);
2766 }
2767 
2768 static bool is_spillable_regtype(enum bpf_reg_type type)
2769 {
2770 	switch (base_type(type)) {
2771 	case PTR_TO_MAP_VALUE:
2772 	case PTR_TO_STACK:
2773 	case PTR_TO_CTX:
2774 	case PTR_TO_PACKET:
2775 	case PTR_TO_PACKET_META:
2776 	case PTR_TO_PACKET_END:
2777 	case PTR_TO_FLOW_KEYS:
2778 	case CONST_PTR_TO_MAP:
2779 	case PTR_TO_SOCKET:
2780 	case PTR_TO_SOCK_COMMON:
2781 	case PTR_TO_TCP_SOCK:
2782 	case PTR_TO_XDP_SOCK:
2783 	case PTR_TO_BTF_ID:
2784 	case PTR_TO_BUF:
2785 	case PTR_TO_MEM:
2786 	case PTR_TO_FUNC:
2787 	case PTR_TO_MAP_KEY:
2788 		return true;
2789 	default:
2790 		return false;
2791 	}
2792 }
2793 
2794 /* Does this register contain a constant zero? */
2795 static bool register_is_null(struct bpf_reg_state *reg)
2796 {
2797 	return reg->type == SCALAR_VALUE && tnum_equals_const(reg->var_off, 0);
2798 }
2799 
2800 static bool register_is_const(struct bpf_reg_state *reg)
2801 {
2802 	return reg->type == SCALAR_VALUE && tnum_is_const(reg->var_off);
2803 }
2804 
2805 static bool __is_scalar_unbounded(struct bpf_reg_state *reg)
2806 {
2807 	return tnum_is_unknown(reg->var_off) &&
2808 	       reg->smin_value == S64_MIN && reg->smax_value == S64_MAX &&
2809 	       reg->umin_value == 0 && reg->umax_value == U64_MAX &&
2810 	       reg->s32_min_value == S32_MIN && reg->s32_max_value == S32_MAX &&
2811 	       reg->u32_min_value == 0 && reg->u32_max_value == U32_MAX;
2812 }
2813 
2814 static bool register_is_bounded(struct bpf_reg_state *reg)
2815 {
2816 	return reg->type == SCALAR_VALUE && !__is_scalar_unbounded(reg);
2817 }
2818 
2819 static bool __is_pointer_value(bool allow_ptr_leaks,
2820 			       const struct bpf_reg_state *reg)
2821 {
2822 	if (allow_ptr_leaks)
2823 		return false;
2824 
2825 	return reg->type != SCALAR_VALUE;
2826 }
2827 
2828 static void save_register_state(struct bpf_func_state *state,
2829 				int spi, struct bpf_reg_state *reg,
2830 				int size)
2831 {
2832 	int i;
2833 
2834 	state->stack[spi].spilled_ptr = *reg;
2835 	if (size == BPF_REG_SIZE)
2836 		state->stack[spi].spilled_ptr.live |= REG_LIVE_WRITTEN;
2837 
2838 	for (i = BPF_REG_SIZE; i > BPF_REG_SIZE - size; i--)
2839 		state->stack[spi].slot_type[i - 1] = STACK_SPILL;
2840 
2841 	/* size < 8 bytes spill */
2842 	for (; i; i--)
2843 		scrub_spilled_slot(&state->stack[spi].slot_type[i - 1]);
2844 }
2845 
2846 /* check_stack_{read,write}_fixed_off functions track spill/fill of registers,
2847  * stack boundary and alignment are checked in check_mem_access()
2848  */
2849 static int check_stack_write_fixed_off(struct bpf_verifier_env *env,
2850 				       /* stack frame we're writing to */
2851 				       struct bpf_func_state *state,
2852 				       int off, int size, int value_regno,
2853 				       int insn_idx)
2854 {
2855 	struct bpf_func_state *cur; /* state of the current function */
2856 	int i, slot = -off - 1, spi = slot / BPF_REG_SIZE, err;
2857 	u32 dst_reg = env->prog->insnsi[insn_idx].dst_reg;
2858 	struct bpf_reg_state *reg = NULL;
2859 
2860 	err = grow_stack_state(state, round_up(slot + 1, BPF_REG_SIZE));
2861 	if (err)
2862 		return err;
2863 	/* caller checked that off % size == 0 and -MAX_BPF_STACK <= off < 0,
2864 	 * so it's aligned access and [off, off + size) are within stack limits
2865 	 */
2866 	if (!env->allow_ptr_leaks &&
2867 	    state->stack[spi].slot_type[0] == STACK_SPILL &&
2868 	    size != BPF_REG_SIZE) {
2869 		verbose(env, "attempt to corrupt spilled pointer on stack\n");
2870 		return -EACCES;
2871 	}
2872 
2873 	cur = env->cur_state->frame[env->cur_state->curframe];
2874 	if (value_regno >= 0)
2875 		reg = &cur->regs[value_regno];
2876 	if (!env->bypass_spec_v4) {
2877 		bool sanitize = reg && is_spillable_regtype(reg->type);
2878 
2879 		for (i = 0; i < size; i++) {
2880 			if (state->stack[spi].slot_type[i] == STACK_INVALID) {
2881 				sanitize = true;
2882 				break;
2883 			}
2884 		}
2885 
2886 		if (sanitize)
2887 			env->insn_aux_data[insn_idx].sanitize_stack_spill = true;
2888 	}
2889 
2890 	mark_stack_slot_scratched(env, spi);
2891 	if (reg && !(off % BPF_REG_SIZE) && register_is_bounded(reg) &&
2892 	    !register_is_null(reg) && env->bpf_capable) {
2893 		if (dst_reg != BPF_REG_FP) {
2894 			/* The backtracking logic can only recognize explicit
2895 			 * stack slot address like [fp - 8]. Other spill of
2896 			 * scalar via different register has to be conservative.
2897 			 * Backtrack from here and mark all registers as precise
2898 			 * that contributed into 'reg' being a constant.
2899 			 */
2900 			err = mark_chain_precision(env, value_regno);
2901 			if (err)
2902 				return err;
2903 		}
2904 		save_register_state(state, spi, reg, size);
2905 	} else if (reg && is_spillable_regtype(reg->type)) {
2906 		/* register containing pointer is being spilled into stack */
2907 		if (size != BPF_REG_SIZE) {
2908 			verbose_linfo(env, insn_idx, "; ");
2909 			verbose(env, "invalid size of register spill\n");
2910 			return -EACCES;
2911 		}
2912 		if (state != cur && reg->type == PTR_TO_STACK) {
2913 			verbose(env, "cannot spill pointers to stack into stack frame of the caller\n");
2914 			return -EINVAL;
2915 		}
2916 		save_register_state(state, spi, reg, size);
2917 	} else {
2918 		u8 type = STACK_MISC;
2919 
2920 		/* regular write of data into stack destroys any spilled ptr */
2921 		state->stack[spi].spilled_ptr.type = NOT_INIT;
2922 		/* Mark slots as STACK_MISC if they belonged to spilled ptr. */
2923 		if (is_spilled_reg(&state->stack[spi]))
2924 			for (i = 0; i < BPF_REG_SIZE; i++)
2925 				scrub_spilled_slot(&state->stack[spi].slot_type[i]);
2926 
2927 		/* only mark the slot as written if all 8 bytes were written
2928 		 * otherwise read propagation may incorrectly stop too soon
2929 		 * when stack slots are partially written.
2930 		 * This heuristic means that read propagation will be
2931 		 * conservative, since it will add reg_live_read marks
2932 		 * to stack slots all the way to first state when programs
2933 		 * writes+reads less than 8 bytes
2934 		 */
2935 		if (size == BPF_REG_SIZE)
2936 			state->stack[spi].spilled_ptr.live |= REG_LIVE_WRITTEN;
2937 
2938 		/* when we zero initialize stack slots mark them as such */
2939 		if (reg && register_is_null(reg)) {
2940 			/* backtracking doesn't work for STACK_ZERO yet. */
2941 			err = mark_chain_precision(env, value_regno);
2942 			if (err)
2943 				return err;
2944 			type = STACK_ZERO;
2945 		}
2946 
2947 		/* Mark slots affected by this stack write. */
2948 		for (i = 0; i < size; i++)
2949 			state->stack[spi].slot_type[(slot - i) % BPF_REG_SIZE] =
2950 				type;
2951 	}
2952 	return 0;
2953 }
2954 
2955 /* Write the stack: 'stack[ptr_regno + off] = value_regno'. 'ptr_regno' is
2956  * known to contain a variable offset.
2957  * This function checks whether the write is permitted and conservatively
2958  * tracks the effects of the write, considering that each stack slot in the
2959  * dynamic range is potentially written to.
2960  *
2961  * 'off' includes 'regno->off'.
2962  * 'value_regno' can be -1, meaning that an unknown value is being written to
2963  * the stack.
2964  *
2965  * Spilled pointers in range are not marked as written because we don't know
2966  * what's going to be actually written. This means that read propagation for
2967  * future reads cannot be terminated by this write.
2968  *
2969  * For privileged programs, uninitialized stack slots are considered
2970  * initialized by this write (even though we don't know exactly what offsets
2971  * are going to be written to). The idea is that we don't want the verifier to
2972  * reject future reads that access slots written to through variable offsets.
2973  */
2974 static int check_stack_write_var_off(struct bpf_verifier_env *env,
2975 				     /* func where register points to */
2976 				     struct bpf_func_state *state,
2977 				     int ptr_regno, int off, int size,
2978 				     int value_regno, int insn_idx)
2979 {
2980 	struct bpf_func_state *cur; /* state of the current function */
2981 	int min_off, max_off;
2982 	int i, err;
2983 	struct bpf_reg_state *ptr_reg = NULL, *value_reg = NULL;
2984 	bool writing_zero = false;
2985 	/* set if the fact that we're writing a zero is used to let any
2986 	 * stack slots remain STACK_ZERO
2987 	 */
2988 	bool zero_used = false;
2989 
2990 	cur = env->cur_state->frame[env->cur_state->curframe];
2991 	ptr_reg = &cur->regs[ptr_regno];
2992 	min_off = ptr_reg->smin_value + off;
2993 	max_off = ptr_reg->smax_value + off + size;
2994 	if (value_regno >= 0)
2995 		value_reg = &cur->regs[value_regno];
2996 	if (value_reg && register_is_null(value_reg))
2997 		writing_zero = true;
2998 
2999 	err = grow_stack_state(state, round_up(-min_off, BPF_REG_SIZE));
3000 	if (err)
3001 		return err;
3002 
3003 
3004 	/* Variable offset writes destroy any spilled pointers in range. */
3005 	for (i = min_off; i < max_off; i++) {
3006 		u8 new_type, *stype;
3007 		int slot, spi;
3008 
3009 		slot = -i - 1;
3010 		spi = slot / BPF_REG_SIZE;
3011 		stype = &state->stack[spi].slot_type[slot % BPF_REG_SIZE];
3012 		mark_stack_slot_scratched(env, spi);
3013 
3014 		if (!env->allow_ptr_leaks
3015 				&& *stype != NOT_INIT
3016 				&& *stype != SCALAR_VALUE) {
3017 			/* Reject the write if there's are spilled pointers in
3018 			 * range. If we didn't reject here, the ptr status
3019 			 * would be erased below (even though not all slots are
3020 			 * actually overwritten), possibly opening the door to
3021 			 * leaks.
3022 			 */
3023 			verbose(env, "spilled ptr in range of var-offset stack write; insn %d, ptr off: %d",
3024 				insn_idx, i);
3025 			return -EINVAL;
3026 		}
3027 
3028 		/* Erase all spilled pointers. */
3029 		state->stack[spi].spilled_ptr.type = NOT_INIT;
3030 
3031 		/* Update the slot type. */
3032 		new_type = STACK_MISC;
3033 		if (writing_zero && *stype == STACK_ZERO) {
3034 			new_type = STACK_ZERO;
3035 			zero_used = true;
3036 		}
3037 		/* If the slot is STACK_INVALID, we check whether it's OK to
3038 		 * pretend that it will be initialized by this write. The slot
3039 		 * might not actually be written to, and so if we mark it as
3040 		 * initialized future reads might leak uninitialized memory.
3041 		 * For privileged programs, we will accept such reads to slots
3042 		 * that may or may not be written because, if we're reject
3043 		 * them, the error would be too confusing.
3044 		 */
3045 		if (*stype == STACK_INVALID && !env->allow_uninit_stack) {
3046 			verbose(env, "uninit stack in range of var-offset write prohibited for !root; insn %d, off: %d",
3047 					insn_idx, i);
3048 			return -EINVAL;
3049 		}
3050 		*stype = new_type;
3051 	}
3052 	if (zero_used) {
3053 		/* backtracking doesn't work for STACK_ZERO yet. */
3054 		err = mark_chain_precision(env, value_regno);
3055 		if (err)
3056 			return err;
3057 	}
3058 	return 0;
3059 }
3060 
3061 /* When register 'dst_regno' is assigned some values from stack[min_off,
3062  * max_off), we set the register's type according to the types of the
3063  * respective stack slots. If all the stack values are known to be zeros, then
3064  * so is the destination reg. Otherwise, the register is considered to be
3065  * SCALAR. This function does not deal with register filling; the caller must
3066  * ensure that all spilled registers in the stack range have been marked as
3067  * read.
3068  */
3069 static void mark_reg_stack_read(struct bpf_verifier_env *env,
3070 				/* func where src register points to */
3071 				struct bpf_func_state *ptr_state,
3072 				int min_off, int max_off, int dst_regno)
3073 {
3074 	struct bpf_verifier_state *vstate = env->cur_state;
3075 	struct bpf_func_state *state = vstate->frame[vstate->curframe];
3076 	int i, slot, spi;
3077 	u8 *stype;
3078 	int zeros = 0;
3079 
3080 	for (i = min_off; i < max_off; i++) {
3081 		slot = -i - 1;
3082 		spi = slot / BPF_REG_SIZE;
3083 		stype = ptr_state->stack[spi].slot_type;
3084 		if (stype[slot % BPF_REG_SIZE] != STACK_ZERO)
3085 			break;
3086 		zeros++;
3087 	}
3088 	if (zeros == max_off - min_off) {
3089 		/* any access_size read into register is zero extended,
3090 		 * so the whole register == const_zero
3091 		 */
3092 		__mark_reg_const_zero(&state->regs[dst_regno]);
3093 		/* backtracking doesn't support STACK_ZERO yet,
3094 		 * so mark it precise here, so that later
3095 		 * backtracking can stop here.
3096 		 * Backtracking may not need this if this register
3097 		 * doesn't participate in pointer adjustment.
3098 		 * Forward propagation of precise flag is not
3099 		 * necessary either. This mark is only to stop
3100 		 * backtracking. Any register that contributed
3101 		 * to const 0 was marked precise before spill.
3102 		 */
3103 		state->regs[dst_regno].precise = true;
3104 	} else {
3105 		/* have read misc data from the stack */
3106 		mark_reg_unknown(env, state->regs, dst_regno);
3107 	}
3108 	state->regs[dst_regno].live |= REG_LIVE_WRITTEN;
3109 }
3110 
3111 /* Read the stack at 'off' and put the results into the register indicated by
3112  * 'dst_regno'. It handles reg filling if the addressed stack slot is a
3113  * spilled reg.
3114  *
3115  * 'dst_regno' can be -1, meaning that the read value is not going to a
3116  * register.
3117  *
3118  * The access is assumed to be within the current stack bounds.
3119  */
3120 static int check_stack_read_fixed_off(struct bpf_verifier_env *env,
3121 				      /* func where src register points to */
3122 				      struct bpf_func_state *reg_state,
3123 				      int off, int size, int dst_regno)
3124 {
3125 	struct bpf_verifier_state *vstate = env->cur_state;
3126 	struct bpf_func_state *state = vstate->frame[vstate->curframe];
3127 	int i, slot = -off - 1, spi = slot / BPF_REG_SIZE;
3128 	struct bpf_reg_state *reg;
3129 	u8 *stype, type;
3130 
3131 	stype = reg_state->stack[spi].slot_type;
3132 	reg = &reg_state->stack[spi].spilled_ptr;
3133 
3134 	if (is_spilled_reg(&reg_state->stack[spi])) {
3135 		u8 spill_size = 1;
3136 
3137 		for (i = BPF_REG_SIZE - 1; i > 0 && stype[i - 1] == STACK_SPILL; i--)
3138 			spill_size++;
3139 
3140 		if (size != BPF_REG_SIZE || spill_size != BPF_REG_SIZE) {
3141 			if (reg->type != SCALAR_VALUE) {
3142 				verbose_linfo(env, env->insn_idx, "; ");
3143 				verbose(env, "invalid size of register fill\n");
3144 				return -EACCES;
3145 			}
3146 
3147 			mark_reg_read(env, reg, reg->parent, REG_LIVE_READ64);
3148 			if (dst_regno < 0)
3149 				return 0;
3150 
3151 			if (!(off % BPF_REG_SIZE) && size == spill_size) {
3152 				/* The earlier check_reg_arg() has decided the
3153 				 * subreg_def for this insn.  Save it first.
3154 				 */
3155 				s32 subreg_def = state->regs[dst_regno].subreg_def;
3156 
3157 				state->regs[dst_regno] = *reg;
3158 				state->regs[dst_regno].subreg_def = subreg_def;
3159 			} else {
3160 				for (i = 0; i < size; i++) {
3161 					type = stype[(slot - i) % BPF_REG_SIZE];
3162 					if (type == STACK_SPILL)
3163 						continue;
3164 					if (type == STACK_MISC)
3165 						continue;
3166 					verbose(env, "invalid read from stack off %d+%d size %d\n",
3167 						off, i, size);
3168 					return -EACCES;
3169 				}
3170 				mark_reg_unknown(env, state->regs, dst_regno);
3171 			}
3172 			state->regs[dst_regno].live |= REG_LIVE_WRITTEN;
3173 			return 0;
3174 		}
3175 
3176 		if (dst_regno >= 0) {
3177 			/* restore register state from stack */
3178 			state->regs[dst_regno] = *reg;
3179 			/* mark reg as written since spilled pointer state likely
3180 			 * has its liveness marks cleared by is_state_visited()
3181 			 * which resets stack/reg liveness for state transitions
3182 			 */
3183 			state->regs[dst_regno].live |= REG_LIVE_WRITTEN;
3184 		} else if (__is_pointer_value(env->allow_ptr_leaks, reg)) {
3185 			/* If dst_regno==-1, the caller is asking us whether
3186 			 * it is acceptable to use this value as a SCALAR_VALUE
3187 			 * (e.g. for XADD).
3188 			 * We must not allow unprivileged callers to do that
3189 			 * with spilled pointers.
3190 			 */
3191 			verbose(env, "leaking pointer from stack off %d\n",
3192 				off);
3193 			return -EACCES;
3194 		}
3195 		mark_reg_read(env, reg, reg->parent, REG_LIVE_READ64);
3196 	} else {
3197 		for (i = 0; i < size; i++) {
3198 			type = stype[(slot - i) % BPF_REG_SIZE];
3199 			if (type == STACK_MISC)
3200 				continue;
3201 			if (type == STACK_ZERO)
3202 				continue;
3203 			verbose(env, "invalid read from stack off %d+%d size %d\n",
3204 				off, i, size);
3205 			return -EACCES;
3206 		}
3207 		mark_reg_read(env, reg, reg->parent, REG_LIVE_READ64);
3208 		if (dst_regno >= 0)
3209 			mark_reg_stack_read(env, reg_state, off, off + size, dst_regno);
3210 	}
3211 	return 0;
3212 }
3213 
3214 enum stack_access_src {
3215 	ACCESS_DIRECT = 1,  /* the access is performed by an instruction */
3216 	ACCESS_HELPER = 2,  /* the access is performed by a helper */
3217 };
3218 
3219 static int check_stack_range_initialized(struct bpf_verifier_env *env,
3220 					 int regno, int off, int access_size,
3221 					 bool zero_size_allowed,
3222 					 enum stack_access_src type,
3223 					 struct bpf_call_arg_meta *meta);
3224 
3225 static struct bpf_reg_state *reg_state(struct bpf_verifier_env *env, int regno)
3226 {
3227 	return cur_regs(env) + regno;
3228 }
3229 
3230 /* Read the stack at 'ptr_regno + off' and put the result into the register
3231  * 'dst_regno'.
3232  * 'off' includes the pointer register's fixed offset(i.e. 'ptr_regno.off'),
3233  * but not its variable offset.
3234  * 'size' is assumed to be <= reg size and the access is assumed to be aligned.
3235  *
3236  * As opposed to check_stack_read_fixed_off, this function doesn't deal with
3237  * filling registers (i.e. reads of spilled register cannot be detected when
3238  * the offset is not fixed). We conservatively mark 'dst_regno' as containing
3239  * SCALAR_VALUE. That's why we assert that the 'ptr_regno' has a variable
3240  * offset; for a fixed offset check_stack_read_fixed_off should be used
3241  * instead.
3242  */
3243 static int check_stack_read_var_off(struct bpf_verifier_env *env,
3244 				    int ptr_regno, int off, int size, int dst_regno)
3245 {
3246 	/* The state of the source register. */
3247 	struct bpf_reg_state *reg = reg_state(env, ptr_regno);
3248 	struct bpf_func_state *ptr_state = func(env, reg);
3249 	int err;
3250 	int min_off, max_off;
3251 
3252 	/* Note that we pass a NULL meta, so raw access will not be permitted.
3253 	 */
3254 	err = check_stack_range_initialized(env, ptr_regno, off, size,
3255 					    false, ACCESS_DIRECT, NULL);
3256 	if (err)
3257 		return err;
3258 
3259 	min_off = reg->smin_value + off;
3260 	max_off = reg->smax_value + off;
3261 	mark_reg_stack_read(env, ptr_state, min_off, max_off + size, dst_regno);
3262 	return 0;
3263 }
3264 
3265 /* check_stack_read dispatches to check_stack_read_fixed_off or
3266  * check_stack_read_var_off.
3267  *
3268  * The caller must ensure that the offset falls within the allocated stack
3269  * bounds.
3270  *
3271  * 'dst_regno' is a register which will receive the value from the stack. It
3272  * can be -1, meaning that the read value is not going to a register.
3273  */
3274 static int check_stack_read(struct bpf_verifier_env *env,
3275 			    int ptr_regno, int off, int size,
3276 			    int dst_regno)
3277 {
3278 	struct bpf_reg_state *reg = reg_state(env, ptr_regno);
3279 	struct bpf_func_state *state = func(env, reg);
3280 	int err;
3281 	/* Some accesses are only permitted with a static offset. */
3282 	bool var_off = !tnum_is_const(reg->var_off);
3283 
3284 	/* The offset is required to be static when reads don't go to a
3285 	 * register, in order to not leak pointers (see
3286 	 * check_stack_read_fixed_off).
3287 	 */
3288 	if (dst_regno < 0 && var_off) {
3289 		char tn_buf[48];
3290 
3291 		tnum_strn(tn_buf, sizeof(tn_buf), reg->var_off);
3292 		verbose(env, "variable offset stack pointer cannot be passed into helper function; var_off=%s off=%d size=%d\n",
3293 			tn_buf, off, size);
3294 		return -EACCES;
3295 	}
3296 	/* Variable offset is prohibited for unprivileged mode for simplicity
3297 	 * since it requires corresponding support in Spectre masking for stack
3298 	 * ALU. See also retrieve_ptr_limit().
3299 	 */
3300 	if (!env->bypass_spec_v1 && var_off) {
3301 		char tn_buf[48];
3302 
3303 		tnum_strn(tn_buf, sizeof(tn_buf), reg->var_off);
3304 		verbose(env, "R%d variable offset stack access prohibited for !root, var_off=%s\n",
3305 				ptr_regno, tn_buf);
3306 		return -EACCES;
3307 	}
3308 
3309 	if (!var_off) {
3310 		off += reg->var_off.value;
3311 		err = check_stack_read_fixed_off(env, state, off, size,
3312 						 dst_regno);
3313 	} else {
3314 		/* Variable offset stack reads need more conservative handling
3315 		 * than fixed offset ones. Note that dst_regno >= 0 on this
3316 		 * branch.
3317 		 */
3318 		err = check_stack_read_var_off(env, ptr_regno, off, size,
3319 					       dst_regno);
3320 	}
3321 	return err;
3322 }
3323 
3324 
3325 /* check_stack_write dispatches to check_stack_write_fixed_off or
3326  * check_stack_write_var_off.
3327  *
3328  * 'ptr_regno' is the register used as a pointer into the stack.
3329  * 'off' includes 'ptr_regno->off', but not its variable offset (if any).
3330  * 'value_regno' is the register whose value we're writing to the stack. It can
3331  * be -1, meaning that we're not writing from a register.
3332  *
3333  * The caller must ensure that the offset falls within the maximum stack size.
3334  */
3335 static int check_stack_write(struct bpf_verifier_env *env,
3336 			     int ptr_regno, int off, int size,
3337 			     int value_regno, int insn_idx)
3338 {
3339 	struct bpf_reg_state *reg = reg_state(env, ptr_regno);
3340 	struct bpf_func_state *state = func(env, reg);
3341 	int err;
3342 
3343 	if (tnum_is_const(reg->var_off)) {
3344 		off += reg->var_off.value;
3345 		err = check_stack_write_fixed_off(env, state, off, size,
3346 						  value_regno, insn_idx);
3347 	} else {
3348 		/* Variable offset stack reads need more conservative handling
3349 		 * than fixed offset ones.
3350 		 */
3351 		err = check_stack_write_var_off(env, state,
3352 						ptr_regno, off, size,
3353 						value_regno, insn_idx);
3354 	}
3355 	return err;
3356 }
3357 
3358 static int check_map_access_type(struct bpf_verifier_env *env, u32 regno,
3359 				 int off, int size, enum bpf_access_type type)
3360 {
3361 	struct bpf_reg_state *regs = cur_regs(env);
3362 	struct bpf_map *map = regs[regno].map_ptr;
3363 	u32 cap = bpf_map_flags_to_cap(map);
3364 
3365 	if (type == BPF_WRITE && !(cap & BPF_MAP_CAN_WRITE)) {
3366 		verbose(env, "write into map forbidden, value_size=%d off=%d size=%d\n",
3367 			map->value_size, off, size);
3368 		return -EACCES;
3369 	}
3370 
3371 	if (type == BPF_READ && !(cap & BPF_MAP_CAN_READ)) {
3372 		verbose(env, "read from map forbidden, value_size=%d off=%d size=%d\n",
3373 			map->value_size, off, size);
3374 		return -EACCES;
3375 	}
3376 
3377 	return 0;
3378 }
3379 
3380 /* check read/write into memory region (e.g., map value, ringbuf sample, etc) */
3381 static int __check_mem_access(struct bpf_verifier_env *env, int regno,
3382 			      int off, int size, u32 mem_size,
3383 			      bool zero_size_allowed)
3384 {
3385 	bool size_ok = size > 0 || (size == 0 && zero_size_allowed);
3386 	struct bpf_reg_state *reg;
3387 
3388 	if (off >= 0 && size_ok && (u64)off + size <= mem_size)
3389 		return 0;
3390 
3391 	reg = &cur_regs(env)[regno];
3392 	switch (reg->type) {
3393 	case PTR_TO_MAP_KEY:
3394 		verbose(env, "invalid access to map key, key_size=%d off=%d size=%d\n",
3395 			mem_size, off, size);
3396 		break;
3397 	case PTR_TO_MAP_VALUE:
3398 		verbose(env, "invalid access to map value, value_size=%d off=%d size=%d\n",
3399 			mem_size, off, size);
3400 		break;
3401 	case PTR_TO_PACKET:
3402 	case PTR_TO_PACKET_META:
3403 	case PTR_TO_PACKET_END:
3404 		verbose(env, "invalid access to packet, off=%d size=%d, R%d(id=%d,off=%d,r=%d)\n",
3405 			off, size, regno, reg->id, off, mem_size);
3406 		break;
3407 	case PTR_TO_MEM:
3408 	default:
3409 		verbose(env, "invalid access to memory, mem_size=%u off=%d size=%d\n",
3410 			mem_size, off, size);
3411 	}
3412 
3413 	return -EACCES;
3414 }
3415 
3416 /* check read/write into a memory region with possible variable offset */
3417 static int check_mem_region_access(struct bpf_verifier_env *env, u32 regno,
3418 				   int off, int size, u32 mem_size,
3419 				   bool zero_size_allowed)
3420 {
3421 	struct bpf_verifier_state *vstate = env->cur_state;
3422 	struct bpf_func_state *state = vstate->frame[vstate->curframe];
3423 	struct bpf_reg_state *reg = &state->regs[regno];
3424 	int err;
3425 
3426 	/* We may have adjusted the register pointing to memory region, so we
3427 	 * need to try adding each of min_value and max_value to off
3428 	 * to make sure our theoretical access will be safe.
3429 	 *
3430 	 * The minimum value is only important with signed
3431 	 * comparisons where we can't assume the floor of a
3432 	 * value is 0.  If we are using signed variables for our
3433 	 * index'es we need to make sure that whatever we use
3434 	 * will have a set floor within our range.
3435 	 */
3436 	if (reg->smin_value < 0 &&
3437 	    (reg->smin_value == S64_MIN ||
3438 	     (off + reg->smin_value != (s64)(s32)(off + reg->smin_value)) ||
3439 	      reg->smin_value + off < 0)) {
3440 		verbose(env, "R%d min value is negative, either use unsigned index or do a if (index >=0) check.\n",
3441 			regno);
3442 		return -EACCES;
3443 	}
3444 	err = __check_mem_access(env, regno, reg->smin_value + off, size,
3445 				 mem_size, zero_size_allowed);
3446 	if (err) {
3447 		verbose(env, "R%d min value is outside of the allowed memory range\n",
3448 			regno);
3449 		return err;
3450 	}
3451 
3452 	/* If we haven't set a max value then we need to bail since we can't be
3453 	 * sure we won't do bad things.
3454 	 * If reg->umax_value + off could overflow, treat that as unbounded too.
3455 	 */
3456 	if (reg->umax_value >= BPF_MAX_VAR_OFF) {
3457 		verbose(env, "R%d unbounded memory access, make sure to bounds check any such access\n",
3458 			regno);
3459 		return -EACCES;
3460 	}
3461 	err = __check_mem_access(env, regno, reg->umax_value + off, size,
3462 				 mem_size, zero_size_allowed);
3463 	if (err) {
3464 		verbose(env, "R%d max value is outside of the allowed memory range\n",
3465 			regno);
3466 		return err;
3467 	}
3468 
3469 	return 0;
3470 }
3471 
3472 /* check read/write into a map element with possible variable offset */
3473 static int check_map_access(struct bpf_verifier_env *env, u32 regno,
3474 			    int off, int size, bool zero_size_allowed)
3475 {
3476 	struct bpf_verifier_state *vstate = env->cur_state;
3477 	struct bpf_func_state *state = vstate->frame[vstate->curframe];
3478 	struct bpf_reg_state *reg = &state->regs[regno];
3479 	struct bpf_map *map = reg->map_ptr;
3480 	int err;
3481 
3482 	err = check_mem_region_access(env, regno, off, size, map->value_size,
3483 				      zero_size_allowed);
3484 	if (err)
3485 		return err;
3486 
3487 	if (map_value_has_spin_lock(map)) {
3488 		u32 lock = map->spin_lock_off;
3489 
3490 		/* if any part of struct bpf_spin_lock can be touched by
3491 		 * load/store reject this program.
3492 		 * To check that [x1, x2) overlaps with [y1, y2)
3493 		 * it is sufficient to check x1 < y2 && y1 < x2.
3494 		 */
3495 		if (reg->smin_value + off < lock + sizeof(struct bpf_spin_lock) &&
3496 		     lock < reg->umax_value + off + size) {
3497 			verbose(env, "bpf_spin_lock cannot be accessed directly by load/store\n");
3498 			return -EACCES;
3499 		}
3500 	}
3501 	if (map_value_has_timer(map)) {
3502 		u32 t = map->timer_off;
3503 
3504 		if (reg->smin_value + off < t + sizeof(struct bpf_timer) &&
3505 		     t < reg->umax_value + off + size) {
3506 			verbose(env, "bpf_timer cannot be accessed directly by load/store\n");
3507 			return -EACCES;
3508 		}
3509 	}
3510 	return err;
3511 }
3512 
3513 #define MAX_PACKET_OFF 0xffff
3514 
3515 static bool may_access_direct_pkt_data(struct bpf_verifier_env *env,
3516 				       const struct bpf_call_arg_meta *meta,
3517 				       enum bpf_access_type t)
3518 {
3519 	enum bpf_prog_type prog_type = resolve_prog_type(env->prog);
3520 
3521 	switch (prog_type) {
3522 	/* Program types only with direct read access go here! */
3523 	case BPF_PROG_TYPE_LWT_IN:
3524 	case BPF_PROG_TYPE_LWT_OUT:
3525 	case BPF_PROG_TYPE_LWT_SEG6LOCAL:
3526 	case BPF_PROG_TYPE_SK_REUSEPORT:
3527 	case BPF_PROG_TYPE_FLOW_DISSECTOR:
3528 	case BPF_PROG_TYPE_CGROUP_SKB:
3529 		if (t == BPF_WRITE)
3530 			return false;
3531 		fallthrough;
3532 
3533 	/* Program types with direct read + write access go here! */
3534 	case BPF_PROG_TYPE_SCHED_CLS:
3535 	case BPF_PROG_TYPE_SCHED_ACT:
3536 	case BPF_PROG_TYPE_XDP:
3537 	case BPF_PROG_TYPE_LWT_XMIT:
3538 	case BPF_PROG_TYPE_SK_SKB:
3539 	case BPF_PROG_TYPE_SK_MSG:
3540 		if (meta)
3541 			return meta->pkt_access;
3542 
3543 		env->seen_direct_write = true;
3544 		return true;
3545 
3546 	case BPF_PROG_TYPE_CGROUP_SOCKOPT:
3547 		if (t == BPF_WRITE)
3548 			env->seen_direct_write = true;
3549 
3550 		return true;
3551 
3552 	default:
3553 		return false;
3554 	}
3555 }
3556 
3557 static int check_packet_access(struct bpf_verifier_env *env, u32 regno, int off,
3558 			       int size, bool zero_size_allowed)
3559 {
3560 	struct bpf_reg_state *regs = cur_regs(env);
3561 	struct bpf_reg_state *reg = &regs[regno];
3562 	int err;
3563 
3564 	/* We may have added a variable offset to the packet pointer; but any
3565 	 * reg->range we have comes after that.  We are only checking the fixed
3566 	 * offset.
3567 	 */
3568 
3569 	/* We don't allow negative numbers, because we aren't tracking enough
3570 	 * detail to prove they're safe.
3571 	 */
3572 	if (reg->smin_value < 0) {
3573 		verbose(env, "R%d min value is negative, either use unsigned index or do a if (index >=0) check.\n",
3574 			regno);
3575 		return -EACCES;
3576 	}
3577 
3578 	err = reg->range < 0 ? -EINVAL :
3579 	      __check_mem_access(env, regno, off, size, reg->range,
3580 				 zero_size_allowed);
3581 	if (err) {
3582 		verbose(env, "R%d offset is outside of the packet\n", regno);
3583 		return err;
3584 	}
3585 
3586 	/* __check_mem_access has made sure "off + size - 1" is within u16.
3587 	 * reg->umax_value can't be bigger than MAX_PACKET_OFF which is 0xffff,
3588 	 * otherwise find_good_pkt_pointers would have refused to set range info
3589 	 * that __check_mem_access would have rejected this pkt access.
3590 	 * Therefore, "off + reg->umax_value + size - 1" won't overflow u32.
3591 	 */
3592 	env->prog->aux->max_pkt_offset =
3593 		max_t(u32, env->prog->aux->max_pkt_offset,
3594 		      off + reg->umax_value + size - 1);
3595 
3596 	return err;
3597 }
3598 
3599 /* check access to 'struct bpf_context' fields.  Supports fixed offsets only */
3600 static int check_ctx_access(struct bpf_verifier_env *env, int insn_idx, int off, int size,
3601 			    enum bpf_access_type t, enum bpf_reg_type *reg_type,
3602 			    struct btf **btf, u32 *btf_id)
3603 {
3604 	struct bpf_insn_access_aux info = {
3605 		.reg_type = *reg_type,
3606 		.log = &env->log,
3607 	};
3608 
3609 	if (env->ops->is_valid_access &&
3610 	    env->ops->is_valid_access(off, size, t, env->prog, &info)) {
3611 		/* A non zero info.ctx_field_size indicates that this field is a
3612 		 * candidate for later verifier transformation to load the whole
3613 		 * field and then apply a mask when accessed with a narrower
3614 		 * access than actual ctx access size. A zero info.ctx_field_size
3615 		 * will only allow for whole field access and rejects any other
3616 		 * type of narrower access.
3617 		 */
3618 		*reg_type = info.reg_type;
3619 
3620 		if (base_type(*reg_type) == PTR_TO_BTF_ID) {
3621 			*btf = info.btf;
3622 			*btf_id = info.btf_id;
3623 		} else {
3624 			env->insn_aux_data[insn_idx].ctx_field_size = info.ctx_field_size;
3625 		}
3626 		/* remember the offset of last byte accessed in ctx */
3627 		if (env->prog->aux->max_ctx_offset < off + size)
3628 			env->prog->aux->max_ctx_offset = off + size;
3629 		return 0;
3630 	}
3631 
3632 	verbose(env, "invalid bpf_context access off=%d size=%d\n", off, size);
3633 	return -EACCES;
3634 }
3635 
3636 static int check_flow_keys_access(struct bpf_verifier_env *env, int off,
3637 				  int size)
3638 {
3639 	if (size < 0 || off < 0 ||
3640 	    (u64)off + size > sizeof(struct bpf_flow_keys)) {
3641 		verbose(env, "invalid access to flow keys off=%d size=%d\n",
3642 			off, size);
3643 		return -EACCES;
3644 	}
3645 	return 0;
3646 }
3647 
3648 static int check_sock_access(struct bpf_verifier_env *env, int insn_idx,
3649 			     u32 regno, int off, int size,
3650 			     enum bpf_access_type t)
3651 {
3652 	struct bpf_reg_state *regs = cur_regs(env);
3653 	struct bpf_reg_state *reg = &regs[regno];
3654 	struct bpf_insn_access_aux info = {};
3655 	bool valid;
3656 
3657 	if (reg->smin_value < 0) {
3658 		verbose(env, "R%d min value is negative, either use unsigned index or do a if (index >=0) check.\n",
3659 			regno);
3660 		return -EACCES;
3661 	}
3662 
3663 	switch (reg->type) {
3664 	case PTR_TO_SOCK_COMMON:
3665 		valid = bpf_sock_common_is_valid_access(off, size, t, &info);
3666 		break;
3667 	case PTR_TO_SOCKET:
3668 		valid = bpf_sock_is_valid_access(off, size, t, &info);
3669 		break;
3670 	case PTR_TO_TCP_SOCK:
3671 		valid = bpf_tcp_sock_is_valid_access(off, size, t, &info);
3672 		break;
3673 	case PTR_TO_XDP_SOCK:
3674 		valid = bpf_xdp_sock_is_valid_access(off, size, t, &info);
3675 		break;
3676 	default:
3677 		valid = false;
3678 	}
3679 
3680 
3681 	if (valid) {
3682 		env->insn_aux_data[insn_idx].ctx_field_size =
3683 			info.ctx_field_size;
3684 		return 0;
3685 	}
3686 
3687 	verbose(env, "R%d invalid %s access off=%d size=%d\n",
3688 		regno, reg_type_str(env, reg->type), off, size);
3689 
3690 	return -EACCES;
3691 }
3692 
3693 static bool is_pointer_value(struct bpf_verifier_env *env, int regno)
3694 {
3695 	return __is_pointer_value(env->allow_ptr_leaks, reg_state(env, regno));
3696 }
3697 
3698 static bool is_ctx_reg(struct bpf_verifier_env *env, int regno)
3699 {
3700 	const struct bpf_reg_state *reg = reg_state(env, regno);
3701 
3702 	return reg->type == PTR_TO_CTX;
3703 }
3704 
3705 static bool is_sk_reg(struct bpf_verifier_env *env, int regno)
3706 {
3707 	const struct bpf_reg_state *reg = reg_state(env, regno);
3708 
3709 	return type_is_sk_pointer(reg->type);
3710 }
3711 
3712 static bool is_pkt_reg(struct bpf_verifier_env *env, int regno)
3713 {
3714 	const struct bpf_reg_state *reg = reg_state(env, regno);
3715 
3716 	return type_is_pkt_pointer(reg->type);
3717 }
3718 
3719 static bool is_flow_key_reg(struct bpf_verifier_env *env, int regno)
3720 {
3721 	const struct bpf_reg_state *reg = reg_state(env, regno);
3722 
3723 	/* Separate to is_ctx_reg() since we still want to allow BPF_ST here. */
3724 	return reg->type == PTR_TO_FLOW_KEYS;
3725 }
3726 
3727 static int check_pkt_ptr_alignment(struct bpf_verifier_env *env,
3728 				   const struct bpf_reg_state *reg,
3729 				   int off, int size, bool strict)
3730 {
3731 	struct tnum reg_off;
3732 	int ip_align;
3733 
3734 	/* Byte size accesses are always allowed. */
3735 	if (!strict || size == 1)
3736 		return 0;
3737 
3738 	/* For platforms that do not have a Kconfig enabling
3739 	 * CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS the value of
3740 	 * NET_IP_ALIGN is universally set to '2'.  And on platforms
3741 	 * that do set CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS, we get
3742 	 * to this code only in strict mode where we want to emulate
3743 	 * the NET_IP_ALIGN==2 checking.  Therefore use an
3744 	 * unconditional IP align value of '2'.
3745 	 */
3746 	ip_align = 2;
3747 
3748 	reg_off = tnum_add(reg->var_off, tnum_const(ip_align + reg->off + off));
3749 	if (!tnum_is_aligned(reg_off, size)) {
3750 		char tn_buf[48];
3751 
3752 		tnum_strn(tn_buf, sizeof(tn_buf), reg->var_off);
3753 		verbose(env,
3754 			"misaligned packet access off %d+%s+%d+%d size %d\n",
3755 			ip_align, tn_buf, reg->off, off, size);
3756 		return -EACCES;
3757 	}
3758 
3759 	return 0;
3760 }
3761 
3762 static int check_generic_ptr_alignment(struct bpf_verifier_env *env,
3763 				       const struct bpf_reg_state *reg,
3764 				       const char *pointer_desc,
3765 				       int off, int size, bool strict)
3766 {
3767 	struct tnum reg_off;
3768 
3769 	/* Byte size accesses are always allowed. */
3770 	if (!strict || size == 1)
3771 		return 0;
3772 
3773 	reg_off = tnum_add(reg->var_off, tnum_const(reg->off + off));
3774 	if (!tnum_is_aligned(reg_off, size)) {
3775 		char tn_buf[48];
3776 
3777 		tnum_strn(tn_buf, sizeof(tn_buf), reg->var_off);
3778 		verbose(env, "misaligned %saccess off %s+%d+%d size %d\n",
3779 			pointer_desc, tn_buf, reg->off, off, size);
3780 		return -EACCES;
3781 	}
3782 
3783 	return 0;
3784 }
3785 
3786 static int check_ptr_alignment(struct bpf_verifier_env *env,
3787 			       const struct bpf_reg_state *reg, int off,
3788 			       int size, bool strict_alignment_once)
3789 {
3790 	bool strict = env->strict_alignment || strict_alignment_once;
3791 	const char *pointer_desc = "";
3792 
3793 	switch (reg->type) {
3794 	case PTR_TO_PACKET:
3795 	case PTR_TO_PACKET_META:
3796 		/* Special case, because of NET_IP_ALIGN. Given metadata sits
3797 		 * right in front, treat it the very same way.
3798 		 */
3799 		return check_pkt_ptr_alignment(env, reg, off, size, strict);
3800 	case PTR_TO_FLOW_KEYS:
3801 		pointer_desc = "flow keys ";
3802 		break;
3803 	case PTR_TO_MAP_KEY:
3804 		pointer_desc = "key ";
3805 		break;
3806 	case PTR_TO_MAP_VALUE:
3807 		pointer_desc = "value ";
3808 		break;
3809 	case PTR_TO_CTX:
3810 		pointer_desc = "context ";
3811 		break;
3812 	case PTR_TO_STACK:
3813 		pointer_desc = "stack ";
3814 		/* The stack spill tracking logic in check_stack_write_fixed_off()
3815 		 * and check_stack_read_fixed_off() relies on stack accesses being
3816 		 * aligned.
3817 		 */
3818 		strict = true;
3819 		break;
3820 	case PTR_TO_SOCKET:
3821 		pointer_desc = "sock ";
3822 		break;
3823 	case PTR_TO_SOCK_COMMON:
3824 		pointer_desc = "sock_common ";
3825 		break;
3826 	case PTR_TO_TCP_SOCK:
3827 		pointer_desc = "tcp_sock ";
3828 		break;
3829 	case PTR_TO_XDP_SOCK:
3830 		pointer_desc = "xdp_sock ";
3831 		break;
3832 	default:
3833 		break;
3834 	}
3835 	return check_generic_ptr_alignment(env, reg, pointer_desc, off, size,
3836 					   strict);
3837 }
3838 
3839 static int update_stack_depth(struct bpf_verifier_env *env,
3840 			      const struct bpf_func_state *func,
3841 			      int off)
3842 {
3843 	u16 stack = env->subprog_info[func->subprogno].stack_depth;
3844 
3845 	if (stack >= -off)
3846 		return 0;
3847 
3848 	/* update known max for given subprogram */
3849 	env->subprog_info[func->subprogno].stack_depth = -off;
3850 	return 0;
3851 }
3852 
3853 /* starting from main bpf function walk all instructions of the function
3854  * and recursively walk all callees that given function can call.
3855  * Ignore jump and exit insns.
3856  * Since recursion is prevented by check_cfg() this algorithm
3857  * only needs a local stack of MAX_CALL_FRAMES to remember callsites
3858  */
3859 static int check_max_stack_depth(struct bpf_verifier_env *env)
3860 {
3861 	int depth = 0, frame = 0, idx = 0, i = 0, subprog_end;
3862 	struct bpf_subprog_info *subprog = env->subprog_info;
3863 	struct bpf_insn *insn = env->prog->insnsi;
3864 	bool tail_call_reachable = false;
3865 	int ret_insn[MAX_CALL_FRAMES];
3866 	int ret_prog[MAX_CALL_FRAMES];
3867 	int j;
3868 
3869 process_func:
3870 	/* protect against potential stack overflow that might happen when
3871 	 * bpf2bpf calls get combined with tailcalls. Limit the caller's stack
3872 	 * depth for such case down to 256 so that the worst case scenario
3873 	 * would result in 8k stack size (32 which is tailcall limit * 256 =
3874 	 * 8k).
3875 	 *
3876 	 * To get the idea what might happen, see an example:
3877 	 * func1 -> sub rsp, 128
3878 	 *  subfunc1 -> sub rsp, 256
3879 	 *  tailcall1 -> add rsp, 256
3880 	 *   func2 -> sub rsp, 192 (total stack size = 128 + 192 = 320)
3881 	 *   subfunc2 -> sub rsp, 64
3882 	 *   subfunc22 -> sub rsp, 128
3883 	 *   tailcall2 -> add rsp, 128
3884 	 *    func3 -> sub rsp, 32 (total stack size 128 + 192 + 64 + 32 = 416)
3885 	 *
3886 	 * tailcall will unwind the current stack frame but it will not get rid
3887 	 * of caller's stack as shown on the example above.
3888 	 */
3889 	if (idx && subprog[idx].has_tail_call && depth >= 256) {
3890 		verbose(env,
3891 			"tail_calls are not allowed when call stack of previous frames is %d bytes. Too large\n",
3892 			depth);
3893 		return -EACCES;
3894 	}
3895 	/* round up to 32-bytes, since this is granularity
3896 	 * of interpreter stack size
3897 	 */
3898 	depth += round_up(max_t(u32, subprog[idx].stack_depth, 1), 32);
3899 	if (depth > MAX_BPF_STACK) {
3900 		verbose(env, "combined stack size of %d calls is %d. Too large\n",
3901 			frame + 1, depth);
3902 		return -EACCES;
3903 	}
3904 continue_func:
3905 	subprog_end = subprog[idx + 1].start;
3906 	for (; i < subprog_end; i++) {
3907 		int next_insn;
3908 
3909 		if (!bpf_pseudo_call(insn + i) && !bpf_pseudo_func(insn + i))
3910 			continue;
3911 		/* remember insn and function to return to */
3912 		ret_insn[frame] = i + 1;
3913 		ret_prog[frame] = idx;
3914 
3915 		/* find the callee */
3916 		next_insn = i + insn[i].imm + 1;
3917 		idx = find_subprog(env, next_insn);
3918 		if (idx < 0) {
3919 			WARN_ONCE(1, "verifier bug. No program starts at insn %d\n",
3920 				  next_insn);
3921 			return -EFAULT;
3922 		}
3923 		if (subprog[idx].is_async_cb) {
3924 			if (subprog[idx].has_tail_call) {
3925 				verbose(env, "verifier bug. subprog has tail_call and async cb\n");
3926 				return -EFAULT;
3927 			}
3928 			 /* async callbacks don't increase bpf prog stack size */
3929 			continue;
3930 		}
3931 		i = next_insn;
3932 
3933 		if (subprog[idx].has_tail_call)
3934 			tail_call_reachable = true;
3935 
3936 		frame++;
3937 		if (frame >= MAX_CALL_FRAMES) {
3938 			verbose(env, "the call stack of %d frames is too deep !\n",
3939 				frame);
3940 			return -E2BIG;
3941 		}
3942 		goto process_func;
3943 	}
3944 	/* if tail call got detected across bpf2bpf calls then mark each of the
3945 	 * currently present subprog frames as tail call reachable subprogs;
3946 	 * this info will be utilized by JIT so that we will be preserving the
3947 	 * tail call counter throughout bpf2bpf calls combined with tailcalls
3948 	 */
3949 	if (tail_call_reachable)
3950 		for (j = 0; j < frame; j++)
3951 			subprog[ret_prog[j]].tail_call_reachable = true;
3952 	if (subprog[0].tail_call_reachable)
3953 		env->prog->aux->tail_call_reachable = true;
3954 
3955 	/* end of for() loop means the last insn of the 'subprog'
3956 	 * was reached. Doesn't matter whether it was JA or EXIT
3957 	 */
3958 	if (frame == 0)
3959 		return 0;
3960 	depth -= round_up(max_t(u32, subprog[idx].stack_depth, 1), 32);
3961 	frame--;
3962 	i = ret_insn[frame];
3963 	idx = ret_prog[frame];
3964 	goto continue_func;
3965 }
3966 
3967 #ifndef CONFIG_BPF_JIT_ALWAYS_ON
3968 static int get_callee_stack_depth(struct bpf_verifier_env *env,
3969 				  const struct bpf_insn *insn, int idx)
3970 {
3971 	int start = idx + insn->imm + 1, subprog;
3972 
3973 	subprog = find_subprog(env, start);
3974 	if (subprog < 0) {
3975 		WARN_ONCE(1, "verifier bug. No program starts at insn %d\n",
3976 			  start);
3977 		return -EFAULT;
3978 	}
3979 	return env->subprog_info[subprog].stack_depth;
3980 }
3981 #endif
3982 
3983 static int __check_ptr_off_reg(struct bpf_verifier_env *env,
3984 			       const struct bpf_reg_state *reg, int regno,
3985 			       bool fixed_off_ok)
3986 {
3987 	/* Access to this pointer-typed register or passing it to a helper
3988 	 * is only allowed in its original, unmodified form.
3989 	 */
3990 
3991 	if (reg->off < 0) {
3992 		verbose(env, "negative offset %s ptr R%d off=%d disallowed\n",
3993 			reg_type_str(env, reg->type), regno, reg->off);
3994 		return -EACCES;
3995 	}
3996 
3997 	if (!fixed_off_ok && reg->off) {
3998 		verbose(env, "dereference of modified %s ptr R%d off=%d disallowed\n",
3999 			reg_type_str(env, reg->type), regno, reg->off);
4000 		return -EACCES;
4001 	}
4002 
4003 	if (!tnum_is_const(reg->var_off) || reg->var_off.value) {
4004 		char tn_buf[48];
4005 
4006 		tnum_strn(tn_buf, sizeof(tn_buf), reg->var_off);
4007 		verbose(env, "variable %s access var_off=%s disallowed\n",
4008 			reg_type_str(env, reg->type), tn_buf);
4009 		return -EACCES;
4010 	}
4011 
4012 	return 0;
4013 }
4014 
4015 int check_ptr_off_reg(struct bpf_verifier_env *env,
4016 		      const struct bpf_reg_state *reg, int regno)
4017 {
4018 	return __check_ptr_off_reg(env, reg, regno, false);
4019 }
4020 
4021 static int __check_buffer_access(struct bpf_verifier_env *env,
4022 				 const char *buf_info,
4023 				 const struct bpf_reg_state *reg,
4024 				 int regno, int off, int size)
4025 {
4026 	if (off < 0) {
4027 		verbose(env,
4028 			"R%d invalid %s buffer access: off=%d, size=%d\n",
4029 			regno, buf_info, off, size);
4030 		return -EACCES;
4031 	}
4032 	if (!tnum_is_const(reg->var_off) || reg->var_off.value) {
4033 		char tn_buf[48];
4034 
4035 		tnum_strn(tn_buf, sizeof(tn_buf), reg->var_off);
4036 		verbose(env,
4037 			"R%d invalid variable buffer offset: off=%d, var_off=%s\n",
4038 			regno, off, tn_buf);
4039 		return -EACCES;
4040 	}
4041 
4042 	return 0;
4043 }
4044 
4045 static int check_tp_buffer_access(struct bpf_verifier_env *env,
4046 				  const struct bpf_reg_state *reg,
4047 				  int regno, int off, int size)
4048 {
4049 	int err;
4050 
4051 	err = __check_buffer_access(env, "tracepoint", reg, regno, off, size);
4052 	if (err)
4053 		return err;
4054 
4055 	if (off + size > env->prog->aux->max_tp_access)
4056 		env->prog->aux->max_tp_access = off + size;
4057 
4058 	return 0;
4059 }
4060 
4061 static int check_buffer_access(struct bpf_verifier_env *env,
4062 			       const struct bpf_reg_state *reg,
4063 			       int regno, int off, int size,
4064 			       bool zero_size_allowed,
4065 			       u32 *max_access)
4066 {
4067 	const char *buf_info = type_is_rdonly_mem(reg->type) ? "rdonly" : "rdwr";
4068 	int err;
4069 
4070 	err = __check_buffer_access(env, buf_info, reg, regno, off, size);
4071 	if (err)
4072 		return err;
4073 
4074 	if (off + size > *max_access)
4075 		*max_access = off + size;
4076 
4077 	return 0;
4078 }
4079 
4080 /* BPF architecture zero extends alu32 ops into 64-bit registesr */
4081 static void zext_32_to_64(struct bpf_reg_state *reg)
4082 {
4083 	reg->var_off = tnum_subreg(reg->var_off);
4084 	__reg_assign_32_into_64(reg);
4085 }
4086 
4087 /* truncate register to smaller size (in bytes)
4088  * must be called with size < BPF_REG_SIZE
4089  */
4090 static void coerce_reg_to_size(struct bpf_reg_state *reg, int size)
4091 {
4092 	u64 mask;
4093 
4094 	/* clear high bits in bit representation */
4095 	reg->var_off = tnum_cast(reg->var_off, size);
4096 
4097 	/* fix arithmetic bounds */
4098 	mask = ((u64)1 << (size * 8)) - 1;
4099 	if ((reg->umin_value & ~mask) == (reg->umax_value & ~mask)) {
4100 		reg->umin_value &= mask;
4101 		reg->umax_value &= mask;
4102 	} else {
4103 		reg->umin_value = 0;
4104 		reg->umax_value = mask;
4105 	}
4106 	reg->smin_value = reg->umin_value;
4107 	reg->smax_value = reg->umax_value;
4108 
4109 	/* If size is smaller than 32bit register the 32bit register
4110 	 * values are also truncated so we push 64-bit bounds into
4111 	 * 32-bit bounds. Above were truncated < 32-bits already.
4112 	 */
4113 	if (size >= 4)
4114 		return;
4115 	__reg_combine_64_into_32(reg);
4116 }
4117 
4118 static bool bpf_map_is_rdonly(const struct bpf_map *map)
4119 {
4120 	/* A map is considered read-only if the following condition are true:
4121 	 *
4122 	 * 1) BPF program side cannot change any of the map content. The
4123 	 *    BPF_F_RDONLY_PROG flag is throughout the lifetime of a map
4124 	 *    and was set at map creation time.
4125 	 * 2) The map value(s) have been initialized from user space by a
4126 	 *    loader and then "frozen", such that no new map update/delete
4127 	 *    operations from syscall side are possible for the rest of
4128 	 *    the map's lifetime from that point onwards.
4129 	 * 3) Any parallel/pending map update/delete operations from syscall
4130 	 *    side have been completed. Only after that point, it's safe to
4131 	 *    assume that map value(s) are immutable.
4132 	 */
4133 	return (map->map_flags & BPF_F_RDONLY_PROG) &&
4134 	       READ_ONCE(map->frozen) &&
4135 	       !bpf_map_write_active(map);
4136 }
4137 
4138 static int bpf_map_direct_read(struct bpf_map *map, int off, int size, u64 *val)
4139 {
4140 	void *ptr;
4141 	u64 addr;
4142 	int err;
4143 
4144 	err = map->ops->map_direct_value_addr(map, &addr, off);
4145 	if (err)
4146 		return err;
4147 	ptr = (void *)(long)addr + off;
4148 
4149 	switch (size) {
4150 	case sizeof(u8):
4151 		*val = (u64)*(u8 *)ptr;
4152 		break;
4153 	case sizeof(u16):
4154 		*val = (u64)*(u16 *)ptr;
4155 		break;
4156 	case sizeof(u32):
4157 		*val = (u64)*(u32 *)ptr;
4158 		break;
4159 	case sizeof(u64):
4160 		*val = *(u64 *)ptr;
4161 		break;
4162 	default:
4163 		return -EINVAL;
4164 	}
4165 	return 0;
4166 }
4167 
4168 static int check_ptr_to_btf_access(struct bpf_verifier_env *env,
4169 				   struct bpf_reg_state *regs,
4170 				   int regno, int off, int size,
4171 				   enum bpf_access_type atype,
4172 				   int value_regno)
4173 {
4174 	struct bpf_reg_state *reg = regs + regno;
4175 	const struct btf_type *t = btf_type_by_id(reg->btf, reg->btf_id);
4176 	const char *tname = btf_name_by_offset(reg->btf, t->name_off);
4177 	enum bpf_type_flag flag = 0;
4178 	u32 btf_id;
4179 	int ret;
4180 
4181 	if (off < 0) {
4182 		verbose(env,
4183 			"R%d is ptr_%s invalid negative access: off=%d\n",
4184 			regno, tname, off);
4185 		return -EACCES;
4186 	}
4187 	if (!tnum_is_const(reg->var_off) || reg->var_off.value) {
4188 		char tn_buf[48];
4189 
4190 		tnum_strn(tn_buf, sizeof(tn_buf), reg->var_off);
4191 		verbose(env,
4192 			"R%d is ptr_%s invalid variable offset: off=%d, var_off=%s\n",
4193 			regno, tname, off, tn_buf);
4194 		return -EACCES;
4195 	}
4196 
4197 	if (reg->type & MEM_USER) {
4198 		verbose(env,
4199 			"R%d is ptr_%s access user memory: off=%d\n",
4200 			regno, tname, off);
4201 		return -EACCES;
4202 	}
4203 
4204 	if (reg->type & MEM_PERCPU) {
4205 		verbose(env,
4206 			"R%d is ptr_%s access percpu memory: off=%d\n",
4207 			regno, tname, off);
4208 		return -EACCES;
4209 	}
4210 
4211 	if (env->ops->btf_struct_access) {
4212 		ret = env->ops->btf_struct_access(&env->log, reg->btf, t,
4213 						  off, size, atype, &btf_id, &flag);
4214 	} else {
4215 		if (atype != BPF_READ) {
4216 			verbose(env, "only read is supported\n");
4217 			return -EACCES;
4218 		}
4219 
4220 		ret = btf_struct_access(&env->log, reg->btf, t, off, size,
4221 					atype, &btf_id, &flag);
4222 	}
4223 
4224 	if (ret < 0)
4225 		return ret;
4226 
4227 	if (atype == BPF_READ && value_regno >= 0)
4228 		mark_btf_ld_reg(env, regs, value_regno, ret, reg->btf, btf_id, flag);
4229 
4230 	return 0;
4231 }
4232 
4233 static int check_ptr_to_map_access(struct bpf_verifier_env *env,
4234 				   struct bpf_reg_state *regs,
4235 				   int regno, int off, int size,
4236 				   enum bpf_access_type atype,
4237 				   int value_regno)
4238 {
4239 	struct bpf_reg_state *reg = regs + regno;
4240 	struct bpf_map *map = reg->map_ptr;
4241 	enum bpf_type_flag flag = 0;
4242 	const struct btf_type *t;
4243 	const char *tname;
4244 	u32 btf_id;
4245 	int ret;
4246 
4247 	if (!btf_vmlinux) {
4248 		verbose(env, "map_ptr access not supported without CONFIG_DEBUG_INFO_BTF\n");
4249 		return -ENOTSUPP;
4250 	}
4251 
4252 	if (!map->ops->map_btf_id || !*map->ops->map_btf_id) {
4253 		verbose(env, "map_ptr access not supported for map type %d\n",
4254 			map->map_type);
4255 		return -ENOTSUPP;
4256 	}
4257 
4258 	t = btf_type_by_id(btf_vmlinux, *map->ops->map_btf_id);
4259 	tname = btf_name_by_offset(btf_vmlinux, t->name_off);
4260 
4261 	if (!env->allow_ptr_to_map_access) {
4262 		verbose(env,
4263 			"%s access is allowed only to CAP_PERFMON and CAP_SYS_ADMIN\n",
4264 			tname);
4265 		return -EPERM;
4266 	}
4267 
4268 	if (off < 0) {
4269 		verbose(env, "R%d is %s invalid negative access: off=%d\n",
4270 			regno, tname, off);
4271 		return -EACCES;
4272 	}
4273 
4274 	if (atype != BPF_READ) {
4275 		verbose(env, "only read from %s is supported\n", tname);
4276 		return -EACCES;
4277 	}
4278 
4279 	ret = btf_struct_access(&env->log, btf_vmlinux, t, off, size, atype, &btf_id, &flag);
4280 	if (ret < 0)
4281 		return ret;
4282 
4283 	if (value_regno >= 0)
4284 		mark_btf_ld_reg(env, regs, value_regno, ret, btf_vmlinux, btf_id, flag);
4285 
4286 	return 0;
4287 }
4288 
4289 /* Check that the stack access at the given offset is within bounds. The
4290  * maximum valid offset is -1.
4291  *
4292  * The minimum valid offset is -MAX_BPF_STACK for writes, and
4293  * -state->allocated_stack for reads.
4294  */
4295 static int check_stack_slot_within_bounds(int off,
4296 					  struct bpf_func_state *state,
4297 					  enum bpf_access_type t)
4298 {
4299 	int min_valid_off;
4300 
4301 	if (t == BPF_WRITE)
4302 		min_valid_off = -MAX_BPF_STACK;
4303 	else
4304 		min_valid_off = -state->allocated_stack;
4305 
4306 	if (off < min_valid_off || off > -1)
4307 		return -EACCES;
4308 	return 0;
4309 }
4310 
4311 /* Check that the stack access at 'regno + off' falls within the maximum stack
4312  * bounds.
4313  *
4314  * 'off' includes `regno->offset`, but not its dynamic part (if any).
4315  */
4316 static int check_stack_access_within_bounds(
4317 		struct bpf_verifier_env *env,
4318 		int regno, int off, int access_size,
4319 		enum stack_access_src src, enum bpf_access_type type)
4320 {
4321 	struct bpf_reg_state *regs = cur_regs(env);
4322 	struct bpf_reg_state *reg = regs + regno;
4323 	struct bpf_func_state *state = func(env, reg);
4324 	int min_off, max_off;
4325 	int err;
4326 	char *err_extra;
4327 
4328 	if (src == ACCESS_HELPER)
4329 		/* We don't know if helpers are reading or writing (or both). */
4330 		err_extra = " indirect access to";
4331 	else if (type == BPF_READ)
4332 		err_extra = " read from";
4333 	else
4334 		err_extra = " write to";
4335 
4336 	if (tnum_is_const(reg->var_off)) {
4337 		min_off = reg->var_off.value + off;
4338 		if (access_size > 0)
4339 			max_off = min_off + access_size - 1;
4340 		else
4341 			max_off = min_off;
4342 	} else {
4343 		if (reg->smax_value >= BPF_MAX_VAR_OFF ||
4344 		    reg->smin_value <= -BPF_MAX_VAR_OFF) {
4345 			verbose(env, "invalid unbounded variable-offset%s stack R%d\n",
4346 				err_extra, regno);
4347 			return -EACCES;
4348 		}
4349 		min_off = reg->smin_value + off;
4350 		if (access_size > 0)
4351 			max_off = reg->smax_value + off + access_size - 1;
4352 		else
4353 			max_off = min_off;
4354 	}
4355 
4356 	err = check_stack_slot_within_bounds(min_off, state, type);
4357 	if (!err)
4358 		err = check_stack_slot_within_bounds(max_off, state, type);
4359 
4360 	if (err) {
4361 		if (tnum_is_const(reg->var_off)) {
4362 			verbose(env, "invalid%s stack R%d off=%d size=%d\n",
4363 				err_extra, regno, off, access_size);
4364 		} else {
4365 			char tn_buf[48];
4366 
4367 			tnum_strn(tn_buf, sizeof(tn_buf), reg->var_off);
4368 			verbose(env, "invalid variable-offset%s stack R%d var_off=%s size=%d\n",
4369 				err_extra, regno, tn_buf, access_size);
4370 		}
4371 	}
4372 	return err;
4373 }
4374 
4375 /* check whether memory at (regno + off) is accessible for t = (read | write)
4376  * if t==write, value_regno is a register which value is stored into memory
4377  * if t==read, value_regno is a register which will receive the value from memory
4378  * if t==write && value_regno==-1, some unknown value is stored into memory
4379  * if t==read && value_regno==-1, don't care what we read from memory
4380  */
4381 static int check_mem_access(struct bpf_verifier_env *env, int insn_idx, u32 regno,
4382 			    int off, int bpf_size, enum bpf_access_type t,
4383 			    int value_regno, bool strict_alignment_once)
4384 {
4385 	struct bpf_reg_state *regs = cur_regs(env);
4386 	struct bpf_reg_state *reg = regs + regno;
4387 	struct bpf_func_state *state;
4388 	int size, err = 0;
4389 
4390 	size = bpf_size_to_bytes(bpf_size);
4391 	if (size < 0)
4392 		return size;
4393 
4394 	/* alignment checks will add in reg->off themselves */
4395 	err = check_ptr_alignment(env, reg, off, size, strict_alignment_once);
4396 	if (err)
4397 		return err;
4398 
4399 	/* for access checks, reg->off is just part of off */
4400 	off += reg->off;
4401 
4402 	if (reg->type == PTR_TO_MAP_KEY) {
4403 		if (t == BPF_WRITE) {
4404 			verbose(env, "write to change key R%d not allowed\n", regno);
4405 			return -EACCES;
4406 		}
4407 
4408 		err = check_mem_region_access(env, regno, off, size,
4409 					      reg->map_ptr->key_size, false);
4410 		if (err)
4411 			return err;
4412 		if (value_regno >= 0)
4413 			mark_reg_unknown(env, regs, value_regno);
4414 	} else if (reg->type == PTR_TO_MAP_VALUE) {
4415 		if (t == BPF_WRITE && value_regno >= 0 &&
4416 		    is_pointer_value(env, value_regno)) {
4417 			verbose(env, "R%d leaks addr into map\n", value_regno);
4418 			return -EACCES;
4419 		}
4420 		err = check_map_access_type(env, regno, off, size, t);
4421 		if (err)
4422 			return err;
4423 		err = check_map_access(env, regno, off, size, false);
4424 		if (!err && t == BPF_READ && value_regno >= 0) {
4425 			struct bpf_map *map = reg->map_ptr;
4426 
4427 			/* if map is read-only, track its contents as scalars */
4428 			if (tnum_is_const(reg->var_off) &&
4429 			    bpf_map_is_rdonly(map) &&
4430 			    map->ops->map_direct_value_addr) {
4431 				int map_off = off + reg->var_off.value;
4432 				u64 val = 0;
4433 
4434 				err = bpf_map_direct_read(map, map_off, size,
4435 							  &val);
4436 				if (err)
4437 					return err;
4438 
4439 				regs[value_regno].type = SCALAR_VALUE;
4440 				__mark_reg_known(&regs[value_regno], val);
4441 			} else {
4442 				mark_reg_unknown(env, regs, value_regno);
4443 			}
4444 		}
4445 	} else if (base_type(reg->type) == PTR_TO_MEM) {
4446 		bool rdonly_mem = type_is_rdonly_mem(reg->type);
4447 
4448 		if (type_may_be_null(reg->type)) {
4449 			verbose(env, "R%d invalid mem access '%s'\n", regno,
4450 				reg_type_str(env, reg->type));
4451 			return -EACCES;
4452 		}
4453 
4454 		if (t == BPF_WRITE && rdonly_mem) {
4455 			verbose(env, "R%d cannot write into %s\n",
4456 				regno, reg_type_str(env, reg->type));
4457 			return -EACCES;
4458 		}
4459 
4460 		if (t == BPF_WRITE && value_regno >= 0 &&
4461 		    is_pointer_value(env, value_regno)) {
4462 			verbose(env, "R%d leaks addr into mem\n", value_regno);
4463 			return -EACCES;
4464 		}
4465 
4466 		err = check_mem_region_access(env, regno, off, size,
4467 					      reg->mem_size, false);
4468 		if (!err && value_regno >= 0 && (t == BPF_READ || rdonly_mem))
4469 			mark_reg_unknown(env, regs, value_regno);
4470 	} else if (reg->type == PTR_TO_CTX) {
4471 		enum bpf_reg_type reg_type = SCALAR_VALUE;
4472 		struct btf *btf = NULL;
4473 		u32 btf_id = 0;
4474 
4475 		if (t == BPF_WRITE && value_regno >= 0 &&
4476 		    is_pointer_value(env, value_regno)) {
4477 			verbose(env, "R%d leaks addr into ctx\n", value_regno);
4478 			return -EACCES;
4479 		}
4480 
4481 		err = check_ptr_off_reg(env, reg, regno);
4482 		if (err < 0)
4483 			return err;
4484 
4485 		err = check_ctx_access(env, insn_idx, off, size, t, &reg_type, &btf,
4486 				       &btf_id);
4487 		if (err)
4488 			verbose_linfo(env, insn_idx, "; ");
4489 		if (!err && t == BPF_READ && value_regno >= 0) {
4490 			/* ctx access returns either a scalar, or a
4491 			 * PTR_TO_PACKET[_META,_END]. In the latter
4492 			 * case, we know the offset is zero.
4493 			 */
4494 			if (reg_type == SCALAR_VALUE) {
4495 				mark_reg_unknown(env, regs, value_regno);
4496 			} else {
4497 				mark_reg_known_zero(env, regs,
4498 						    value_regno);
4499 				if (type_may_be_null(reg_type))
4500 					regs[value_regno].id = ++env->id_gen;
4501 				/* A load of ctx field could have different
4502 				 * actual load size with the one encoded in the
4503 				 * insn. When the dst is PTR, it is for sure not
4504 				 * a sub-register.
4505 				 */
4506 				regs[value_regno].subreg_def = DEF_NOT_SUBREG;
4507 				if (base_type(reg_type) == PTR_TO_BTF_ID) {
4508 					regs[value_regno].btf = btf;
4509 					regs[value_regno].btf_id = btf_id;
4510 				}
4511 			}
4512 			regs[value_regno].type = reg_type;
4513 		}
4514 
4515 	} else if (reg->type == PTR_TO_STACK) {
4516 		/* Basic bounds checks. */
4517 		err = check_stack_access_within_bounds(env, regno, off, size, ACCESS_DIRECT, t);
4518 		if (err)
4519 			return err;
4520 
4521 		state = func(env, reg);
4522 		err = update_stack_depth(env, state, off);
4523 		if (err)
4524 			return err;
4525 
4526 		if (t == BPF_READ)
4527 			err = check_stack_read(env, regno, off, size,
4528 					       value_regno);
4529 		else
4530 			err = check_stack_write(env, regno, off, size,
4531 						value_regno, insn_idx);
4532 	} else if (reg_is_pkt_pointer(reg)) {
4533 		if (t == BPF_WRITE && !may_access_direct_pkt_data(env, NULL, t)) {
4534 			verbose(env, "cannot write into packet\n");
4535 			return -EACCES;
4536 		}
4537 		if (t == BPF_WRITE && value_regno >= 0 &&
4538 		    is_pointer_value(env, value_regno)) {
4539 			verbose(env, "R%d leaks addr into packet\n",
4540 				value_regno);
4541 			return -EACCES;
4542 		}
4543 		err = check_packet_access(env, regno, off, size, false);
4544 		if (!err && t == BPF_READ && value_regno >= 0)
4545 			mark_reg_unknown(env, regs, value_regno);
4546 	} else if (reg->type == PTR_TO_FLOW_KEYS) {
4547 		if (t == BPF_WRITE && value_regno >= 0 &&
4548 		    is_pointer_value(env, value_regno)) {
4549 			verbose(env, "R%d leaks addr into flow keys\n",
4550 				value_regno);
4551 			return -EACCES;
4552 		}
4553 
4554 		err = check_flow_keys_access(env, off, size);
4555 		if (!err && t == BPF_READ && value_regno >= 0)
4556 			mark_reg_unknown(env, regs, value_regno);
4557 	} else if (type_is_sk_pointer(reg->type)) {
4558 		if (t == BPF_WRITE) {
4559 			verbose(env, "R%d cannot write into %s\n",
4560 				regno, reg_type_str(env, reg->type));
4561 			return -EACCES;
4562 		}
4563 		err = check_sock_access(env, insn_idx, regno, off, size, t);
4564 		if (!err && value_regno >= 0)
4565 			mark_reg_unknown(env, regs, value_regno);
4566 	} else if (reg->type == PTR_TO_TP_BUFFER) {
4567 		err = check_tp_buffer_access(env, reg, regno, off, size);
4568 		if (!err && t == BPF_READ && value_regno >= 0)
4569 			mark_reg_unknown(env, regs, value_regno);
4570 	} else if (base_type(reg->type) == PTR_TO_BTF_ID &&
4571 		   !type_may_be_null(reg->type)) {
4572 		err = check_ptr_to_btf_access(env, regs, regno, off, size, t,
4573 					      value_regno);
4574 	} else if (reg->type == CONST_PTR_TO_MAP) {
4575 		err = check_ptr_to_map_access(env, regs, regno, off, size, t,
4576 					      value_regno);
4577 	} else if (base_type(reg->type) == PTR_TO_BUF) {
4578 		bool rdonly_mem = type_is_rdonly_mem(reg->type);
4579 		u32 *max_access;
4580 
4581 		if (rdonly_mem) {
4582 			if (t == BPF_WRITE) {
4583 				verbose(env, "R%d cannot write into %s\n",
4584 					regno, reg_type_str(env, reg->type));
4585 				return -EACCES;
4586 			}
4587 			max_access = &env->prog->aux->max_rdonly_access;
4588 		} else {
4589 			max_access = &env->prog->aux->max_rdwr_access;
4590 		}
4591 
4592 		err = check_buffer_access(env, reg, regno, off, size, false,
4593 					  max_access);
4594 
4595 		if (!err && value_regno >= 0 && (rdonly_mem || t == BPF_READ))
4596 			mark_reg_unknown(env, regs, value_regno);
4597 	} else {
4598 		verbose(env, "R%d invalid mem access '%s'\n", regno,
4599 			reg_type_str(env, reg->type));
4600 		return -EACCES;
4601 	}
4602 
4603 	if (!err && size < BPF_REG_SIZE && value_regno >= 0 && t == BPF_READ &&
4604 	    regs[value_regno].type == SCALAR_VALUE) {
4605 		/* b/h/w load zero-extends, mark upper bits as known 0 */
4606 		coerce_reg_to_size(&regs[value_regno], size);
4607 	}
4608 	return err;
4609 }
4610 
4611 static int check_atomic(struct bpf_verifier_env *env, int insn_idx, struct bpf_insn *insn)
4612 {
4613 	int load_reg;
4614 	int err;
4615 
4616 	switch (insn->imm) {
4617 	case BPF_ADD:
4618 	case BPF_ADD | BPF_FETCH:
4619 	case BPF_AND:
4620 	case BPF_AND | BPF_FETCH:
4621 	case BPF_OR:
4622 	case BPF_OR | BPF_FETCH:
4623 	case BPF_XOR:
4624 	case BPF_XOR | BPF_FETCH:
4625 	case BPF_XCHG:
4626 	case BPF_CMPXCHG:
4627 		break;
4628 	default:
4629 		verbose(env, "BPF_ATOMIC uses invalid atomic opcode %02x\n", insn->imm);
4630 		return -EINVAL;
4631 	}
4632 
4633 	if (BPF_SIZE(insn->code) != BPF_W && BPF_SIZE(insn->code) != BPF_DW) {
4634 		verbose(env, "invalid atomic operand size\n");
4635 		return -EINVAL;
4636 	}
4637 
4638 	/* check src1 operand */
4639 	err = check_reg_arg(env, insn->src_reg, SRC_OP);
4640 	if (err)
4641 		return err;
4642 
4643 	/* check src2 operand */
4644 	err = check_reg_arg(env, insn->dst_reg, SRC_OP);
4645 	if (err)
4646 		return err;
4647 
4648 	if (insn->imm == BPF_CMPXCHG) {
4649 		/* Check comparison of R0 with memory location */
4650 		const u32 aux_reg = BPF_REG_0;
4651 
4652 		err = check_reg_arg(env, aux_reg, SRC_OP);
4653 		if (err)
4654 			return err;
4655 
4656 		if (is_pointer_value(env, aux_reg)) {
4657 			verbose(env, "R%d leaks addr into mem\n", aux_reg);
4658 			return -EACCES;
4659 		}
4660 	}
4661 
4662 	if (is_pointer_value(env, insn->src_reg)) {
4663 		verbose(env, "R%d leaks addr into mem\n", insn->src_reg);
4664 		return -EACCES;
4665 	}
4666 
4667 	if (is_ctx_reg(env, insn->dst_reg) ||
4668 	    is_pkt_reg(env, insn->dst_reg) ||
4669 	    is_flow_key_reg(env, insn->dst_reg) ||
4670 	    is_sk_reg(env, insn->dst_reg)) {
4671 		verbose(env, "BPF_ATOMIC stores into R%d %s is not allowed\n",
4672 			insn->dst_reg,
4673 			reg_type_str(env, reg_state(env, insn->dst_reg)->type));
4674 		return -EACCES;
4675 	}
4676 
4677 	if (insn->imm & BPF_FETCH) {
4678 		if (insn->imm == BPF_CMPXCHG)
4679 			load_reg = BPF_REG_0;
4680 		else
4681 			load_reg = insn->src_reg;
4682 
4683 		/* check and record load of old value */
4684 		err = check_reg_arg(env, load_reg, DST_OP);
4685 		if (err)
4686 			return err;
4687 	} else {
4688 		/* This instruction accesses a memory location but doesn't
4689 		 * actually load it into a register.
4690 		 */
4691 		load_reg = -1;
4692 	}
4693 
4694 	/* Check whether we can read the memory, with second call for fetch
4695 	 * case to simulate the register fill.
4696 	 */
4697 	err = check_mem_access(env, insn_idx, insn->dst_reg, insn->off,
4698 			       BPF_SIZE(insn->code), BPF_READ, -1, true);
4699 	if (!err && load_reg >= 0)
4700 		err = check_mem_access(env, insn_idx, insn->dst_reg, insn->off,
4701 				       BPF_SIZE(insn->code), BPF_READ, load_reg,
4702 				       true);
4703 	if (err)
4704 		return err;
4705 
4706 	/* Check whether we can write into the same memory. */
4707 	err = check_mem_access(env, insn_idx, insn->dst_reg, insn->off,
4708 			       BPF_SIZE(insn->code), BPF_WRITE, -1, true);
4709 	if (err)
4710 		return err;
4711 
4712 	return 0;
4713 }
4714 
4715 /* When register 'regno' is used to read the stack (either directly or through
4716  * a helper function) make sure that it's within stack boundary and, depending
4717  * on the access type, that all elements of the stack are initialized.
4718  *
4719  * 'off' includes 'regno->off', but not its dynamic part (if any).
4720  *
4721  * All registers that have been spilled on the stack in the slots within the
4722  * read offsets are marked as read.
4723  */
4724 static int check_stack_range_initialized(
4725 		struct bpf_verifier_env *env, int regno, int off,
4726 		int access_size, bool zero_size_allowed,
4727 		enum stack_access_src type, struct bpf_call_arg_meta *meta)
4728 {
4729 	struct bpf_reg_state *reg = reg_state(env, regno);
4730 	struct bpf_func_state *state = func(env, reg);
4731 	int err, min_off, max_off, i, j, slot, spi;
4732 	char *err_extra = type == ACCESS_HELPER ? " indirect" : "";
4733 	enum bpf_access_type bounds_check_type;
4734 	/* Some accesses can write anything into the stack, others are
4735 	 * read-only.
4736 	 */
4737 	bool clobber = false;
4738 
4739 	if (access_size == 0 && !zero_size_allowed) {
4740 		verbose(env, "invalid zero-sized read\n");
4741 		return -EACCES;
4742 	}
4743 
4744 	if (type == ACCESS_HELPER) {
4745 		/* The bounds checks for writes are more permissive than for
4746 		 * reads. However, if raw_mode is not set, we'll do extra
4747 		 * checks below.
4748 		 */
4749 		bounds_check_type = BPF_WRITE;
4750 		clobber = true;
4751 	} else {
4752 		bounds_check_type = BPF_READ;
4753 	}
4754 	err = check_stack_access_within_bounds(env, regno, off, access_size,
4755 					       type, bounds_check_type);
4756 	if (err)
4757 		return err;
4758 
4759 
4760 	if (tnum_is_const(reg->var_off)) {
4761 		min_off = max_off = reg->var_off.value + off;
4762 	} else {
4763 		/* Variable offset is prohibited for unprivileged mode for
4764 		 * simplicity since it requires corresponding support in
4765 		 * Spectre masking for stack ALU.
4766 		 * See also retrieve_ptr_limit().
4767 		 */
4768 		if (!env->bypass_spec_v1) {
4769 			char tn_buf[48];
4770 
4771 			tnum_strn(tn_buf, sizeof(tn_buf), reg->var_off);
4772 			verbose(env, "R%d%s variable offset stack access prohibited for !root, var_off=%s\n",
4773 				regno, err_extra, tn_buf);
4774 			return -EACCES;
4775 		}
4776 		/* Only initialized buffer on stack is allowed to be accessed
4777 		 * with variable offset. With uninitialized buffer it's hard to
4778 		 * guarantee that whole memory is marked as initialized on
4779 		 * helper return since specific bounds are unknown what may
4780 		 * cause uninitialized stack leaking.
4781 		 */
4782 		if (meta && meta->raw_mode)
4783 			meta = NULL;
4784 
4785 		min_off = reg->smin_value + off;
4786 		max_off = reg->smax_value + off;
4787 	}
4788 
4789 	if (meta && meta->raw_mode) {
4790 		meta->access_size = access_size;
4791 		meta->regno = regno;
4792 		return 0;
4793 	}
4794 
4795 	for (i = min_off; i < max_off + access_size; i++) {
4796 		u8 *stype;
4797 
4798 		slot = -i - 1;
4799 		spi = slot / BPF_REG_SIZE;
4800 		if (state->allocated_stack <= slot)
4801 			goto err;
4802 		stype = &state->stack[spi].slot_type[slot % BPF_REG_SIZE];
4803 		if (*stype == STACK_MISC)
4804 			goto mark;
4805 		if (*stype == STACK_ZERO) {
4806 			if (clobber) {
4807 				/* helper can write anything into the stack */
4808 				*stype = STACK_MISC;
4809 			}
4810 			goto mark;
4811 		}
4812 
4813 		if (is_spilled_reg(&state->stack[spi]) &&
4814 		    base_type(state->stack[spi].spilled_ptr.type) == PTR_TO_BTF_ID)
4815 			goto mark;
4816 
4817 		if (is_spilled_reg(&state->stack[spi]) &&
4818 		    (state->stack[spi].spilled_ptr.type == SCALAR_VALUE ||
4819 		     env->allow_ptr_leaks)) {
4820 			if (clobber) {
4821 				__mark_reg_unknown(env, &state->stack[spi].spilled_ptr);
4822 				for (j = 0; j < BPF_REG_SIZE; j++)
4823 					scrub_spilled_slot(&state->stack[spi].slot_type[j]);
4824 			}
4825 			goto mark;
4826 		}
4827 
4828 err:
4829 		if (tnum_is_const(reg->var_off)) {
4830 			verbose(env, "invalid%s read from stack R%d off %d+%d size %d\n",
4831 				err_extra, regno, min_off, i - min_off, access_size);
4832 		} else {
4833 			char tn_buf[48];
4834 
4835 			tnum_strn(tn_buf, sizeof(tn_buf), reg->var_off);
4836 			verbose(env, "invalid%s read from stack R%d var_off %s+%d size %d\n",
4837 				err_extra, regno, tn_buf, i - min_off, access_size);
4838 		}
4839 		return -EACCES;
4840 mark:
4841 		/* reading any byte out of 8-byte 'spill_slot' will cause
4842 		 * the whole slot to be marked as 'read'
4843 		 */
4844 		mark_reg_read(env, &state->stack[spi].spilled_ptr,
4845 			      state->stack[spi].spilled_ptr.parent,
4846 			      REG_LIVE_READ64);
4847 	}
4848 	return update_stack_depth(env, state, min_off);
4849 }
4850 
4851 static int check_helper_mem_access(struct bpf_verifier_env *env, int regno,
4852 				   int access_size, bool zero_size_allowed,
4853 				   struct bpf_call_arg_meta *meta)
4854 {
4855 	struct bpf_reg_state *regs = cur_regs(env), *reg = &regs[regno];
4856 	u32 *max_access;
4857 
4858 	switch (base_type(reg->type)) {
4859 	case PTR_TO_PACKET:
4860 	case PTR_TO_PACKET_META:
4861 		return check_packet_access(env, regno, reg->off, access_size,
4862 					   zero_size_allowed);
4863 	case PTR_TO_MAP_KEY:
4864 		if (meta && meta->raw_mode) {
4865 			verbose(env, "R%d cannot write into %s\n", regno,
4866 				reg_type_str(env, reg->type));
4867 			return -EACCES;
4868 		}
4869 		return check_mem_region_access(env, regno, reg->off, access_size,
4870 					       reg->map_ptr->key_size, false);
4871 	case PTR_TO_MAP_VALUE:
4872 		if (check_map_access_type(env, regno, reg->off, access_size,
4873 					  meta && meta->raw_mode ? BPF_WRITE :
4874 					  BPF_READ))
4875 			return -EACCES;
4876 		return check_map_access(env, regno, reg->off, access_size,
4877 					zero_size_allowed);
4878 	case PTR_TO_MEM:
4879 		if (type_is_rdonly_mem(reg->type)) {
4880 			if (meta && meta->raw_mode) {
4881 				verbose(env, "R%d cannot write into %s\n", regno,
4882 					reg_type_str(env, reg->type));
4883 				return -EACCES;
4884 			}
4885 		}
4886 		return check_mem_region_access(env, regno, reg->off,
4887 					       access_size, reg->mem_size,
4888 					       zero_size_allowed);
4889 	case PTR_TO_BUF:
4890 		if (type_is_rdonly_mem(reg->type)) {
4891 			if (meta && meta->raw_mode) {
4892 				verbose(env, "R%d cannot write into %s\n", regno,
4893 					reg_type_str(env, reg->type));
4894 				return -EACCES;
4895 			}
4896 
4897 			max_access = &env->prog->aux->max_rdonly_access;
4898 		} else {
4899 			max_access = &env->prog->aux->max_rdwr_access;
4900 		}
4901 		return check_buffer_access(env, reg, regno, reg->off,
4902 					   access_size, zero_size_allowed,
4903 					   max_access);
4904 	case PTR_TO_STACK:
4905 		return check_stack_range_initialized(
4906 				env,
4907 				regno, reg->off, access_size,
4908 				zero_size_allowed, ACCESS_HELPER, meta);
4909 	default: /* scalar_value or invalid ptr */
4910 		/* Allow zero-byte read from NULL, regardless of pointer type */
4911 		if (zero_size_allowed && access_size == 0 &&
4912 		    register_is_null(reg))
4913 			return 0;
4914 
4915 		verbose(env, "R%d type=%s ", regno,
4916 			reg_type_str(env, reg->type));
4917 		verbose(env, "expected=%s\n", reg_type_str(env, PTR_TO_STACK));
4918 		return -EACCES;
4919 	}
4920 }
4921 
4922 static int check_mem_size_reg(struct bpf_verifier_env *env,
4923 			      struct bpf_reg_state *reg, u32 regno,
4924 			      bool zero_size_allowed,
4925 			      struct bpf_call_arg_meta *meta)
4926 {
4927 	int err;
4928 
4929 	/* This is used to refine r0 return value bounds for helpers
4930 	 * that enforce this value as an upper bound on return values.
4931 	 * See do_refine_retval_range() for helpers that can refine
4932 	 * the return value. C type of helper is u32 so we pull register
4933 	 * bound from umax_value however, if negative verifier errors
4934 	 * out. Only upper bounds can be learned because retval is an
4935 	 * int type and negative retvals are allowed.
4936 	 */
4937 	meta->msize_max_value = reg->umax_value;
4938 
4939 	/* The register is SCALAR_VALUE; the access check
4940 	 * happens using its boundaries.
4941 	 */
4942 	if (!tnum_is_const(reg->var_off))
4943 		/* For unprivileged variable accesses, disable raw
4944 		 * mode so that the program is required to
4945 		 * initialize all the memory that the helper could
4946 		 * just partially fill up.
4947 		 */
4948 		meta = NULL;
4949 
4950 	if (reg->smin_value < 0) {
4951 		verbose(env, "R%d min value is negative, either use unsigned or 'var &= const'\n",
4952 			regno);
4953 		return -EACCES;
4954 	}
4955 
4956 	if (reg->umin_value == 0) {
4957 		err = check_helper_mem_access(env, regno - 1, 0,
4958 					      zero_size_allowed,
4959 					      meta);
4960 		if (err)
4961 			return err;
4962 	}
4963 
4964 	if (reg->umax_value >= BPF_MAX_VAR_SIZ) {
4965 		verbose(env, "R%d unbounded memory access, use 'var &= const' or 'if (var < const)'\n",
4966 			regno);
4967 		return -EACCES;
4968 	}
4969 	err = check_helper_mem_access(env, regno - 1,
4970 				      reg->umax_value,
4971 				      zero_size_allowed, meta);
4972 	if (!err)
4973 		err = mark_chain_precision(env, regno);
4974 	return err;
4975 }
4976 
4977 int check_mem_reg(struct bpf_verifier_env *env, struct bpf_reg_state *reg,
4978 		   u32 regno, u32 mem_size)
4979 {
4980 	bool may_be_null = type_may_be_null(reg->type);
4981 	struct bpf_reg_state saved_reg;
4982 	struct bpf_call_arg_meta meta;
4983 	int err;
4984 
4985 	if (register_is_null(reg))
4986 		return 0;
4987 
4988 	memset(&meta, 0, sizeof(meta));
4989 	/* Assuming that the register contains a value check if the memory
4990 	 * access is safe. Temporarily save and restore the register's state as
4991 	 * the conversion shouldn't be visible to a caller.
4992 	 */
4993 	if (may_be_null) {
4994 		saved_reg = *reg;
4995 		mark_ptr_not_null_reg(reg);
4996 	}
4997 
4998 	err = check_helper_mem_access(env, regno, mem_size, true, &meta);
4999 	/* Check access for BPF_WRITE */
5000 	meta.raw_mode = true;
5001 	err = err ?: check_helper_mem_access(env, regno, mem_size, true, &meta);
5002 
5003 	if (may_be_null)
5004 		*reg = saved_reg;
5005 
5006 	return err;
5007 }
5008 
5009 int check_kfunc_mem_size_reg(struct bpf_verifier_env *env, struct bpf_reg_state *reg,
5010 			     u32 regno)
5011 {
5012 	struct bpf_reg_state *mem_reg = &cur_regs(env)[regno - 1];
5013 	bool may_be_null = type_may_be_null(mem_reg->type);
5014 	struct bpf_reg_state saved_reg;
5015 	struct bpf_call_arg_meta meta;
5016 	int err;
5017 
5018 	WARN_ON_ONCE(regno < BPF_REG_2 || regno > BPF_REG_5);
5019 
5020 	memset(&meta, 0, sizeof(meta));
5021 
5022 	if (may_be_null) {
5023 		saved_reg = *mem_reg;
5024 		mark_ptr_not_null_reg(mem_reg);
5025 	}
5026 
5027 	err = check_mem_size_reg(env, reg, regno, true, &meta);
5028 	/* Check access for BPF_WRITE */
5029 	meta.raw_mode = true;
5030 	err = err ?: check_mem_size_reg(env, reg, regno, true, &meta);
5031 
5032 	if (may_be_null)
5033 		*mem_reg = saved_reg;
5034 	return err;
5035 }
5036 
5037 /* Implementation details:
5038  * bpf_map_lookup returns PTR_TO_MAP_VALUE_OR_NULL
5039  * Two bpf_map_lookups (even with the same key) will have different reg->id.
5040  * For traditional PTR_TO_MAP_VALUE the verifier clears reg->id after
5041  * value_or_null->value transition, since the verifier only cares about
5042  * the range of access to valid map value pointer and doesn't care about actual
5043  * address of the map element.
5044  * For maps with 'struct bpf_spin_lock' inside map value the verifier keeps
5045  * reg->id > 0 after value_or_null->value transition. By doing so
5046  * two bpf_map_lookups will be considered two different pointers that
5047  * point to different bpf_spin_locks.
5048  * The verifier allows taking only one bpf_spin_lock at a time to avoid
5049  * dead-locks.
5050  * Since only one bpf_spin_lock is allowed the checks are simpler than
5051  * reg_is_refcounted() logic. The verifier needs to remember only
5052  * one spin_lock instead of array of acquired_refs.
5053  * cur_state->active_spin_lock remembers which map value element got locked
5054  * and clears it after bpf_spin_unlock.
5055  */
5056 static int process_spin_lock(struct bpf_verifier_env *env, int regno,
5057 			     bool is_lock)
5058 {
5059 	struct bpf_reg_state *regs = cur_regs(env), *reg = &regs[regno];
5060 	struct bpf_verifier_state *cur = env->cur_state;
5061 	bool is_const = tnum_is_const(reg->var_off);
5062 	struct bpf_map *map = reg->map_ptr;
5063 	u64 val = reg->var_off.value;
5064 
5065 	if (!is_const) {
5066 		verbose(env,
5067 			"R%d doesn't have constant offset. bpf_spin_lock has to be at the constant offset\n",
5068 			regno);
5069 		return -EINVAL;
5070 	}
5071 	if (!map->btf) {
5072 		verbose(env,
5073 			"map '%s' has to have BTF in order to use bpf_spin_lock\n",
5074 			map->name);
5075 		return -EINVAL;
5076 	}
5077 	if (!map_value_has_spin_lock(map)) {
5078 		if (map->spin_lock_off == -E2BIG)
5079 			verbose(env,
5080 				"map '%s' has more than one 'struct bpf_spin_lock'\n",
5081 				map->name);
5082 		else if (map->spin_lock_off == -ENOENT)
5083 			verbose(env,
5084 				"map '%s' doesn't have 'struct bpf_spin_lock'\n",
5085 				map->name);
5086 		else
5087 			verbose(env,
5088 				"map '%s' is not a struct type or bpf_spin_lock is mangled\n",
5089 				map->name);
5090 		return -EINVAL;
5091 	}
5092 	if (map->spin_lock_off != val + reg->off) {
5093 		verbose(env, "off %lld doesn't point to 'struct bpf_spin_lock'\n",
5094 			val + reg->off);
5095 		return -EINVAL;
5096 	}
5097 	if (is_lock) {
5098 		if (cur->active_spin_lock) {
5099 			verbose(env,
5100 				"Locking two bpf_spin_locks are not allowed\n");
5101 			return -EINVAL;
5102 		}
5103 		cur->active_spin_lock = reg->id;
5104 	} else {
5105 		if (!cur->active_spin_lock) {
5106 			verbose(env, "bpf_spin_unlock without taking a lock\n");
5107 			return -EINVAL;
5108 		}
5109 		if (cur->active_spin_lock != reg->id) {
5110 			verbose(env, "bpf_spin_unlock of different lock\n");
5111 			return -EINVAL;
5112 		}
5113 		cur->active_spin_lock = 0;
5114 	}
5115 	return 0;
5116 }
5117 
5118 static int process_timer_func(struct bpf_verifier_env *env, int regno,
5119 			      struct bpf_call_arg_meta *meta)
5120 {
5121 	struct bpf_reg_state *regs = cur_regs(env), *reg = &regs[regno];
5122 	bool is_const = tnum_is_const(reg->var_off);
5123 	struct bpf_map *map = reg->map_ptr;
5124 	u64 val = reg->var_off.value;
5125 
5126 	if (!is_const) {
5127 		verbose(env,
5128 			"R%d doesn't have constant offset. bpf_timer has to be at the constant offset\n",
5129 			regno);
5130 		return -EINVAL;
5131 	}
5132 	if (!map->btf) {
5133 		verbose(env, "map '%s' has to have BTF in order to use bpf_timer\n",
5134 			map->name);
5135 		return -EINVAL;
5136 	}
5137 	if (!map_value_has_timer(map)) {
5138 		if (map->timer_off == -E2BIG)
5139 			verbose(env,
5140 				"map '%s' has more than one 'struct bpf_timer'\n",
5141 				map->name);
5142 		else if (map->timer_off == -ENOENT)
5143 			verbose(env,
5144 				"map '%s' doesn't have 'struct bpf_timer'\n",
5145 				map->name);
5146 		else
5147 			verbose(env,
5148 				"map '%s' is not a struct type or bpf_timer is mangled\n",
5149 				map->name);
5150 		return -EINVAL;
5151 	}
5152 	if (map->timer_off != val + reg->off) {
5153 		verbose(env, "off %lld doesn't point to 'struct bpf_timer' that is at %d\n",
5154 			val + reg->off, map->timer_off);
5155 		return -EINVAL;
5156 	}
5157 	if (meta->map_ptr) {
5158 		verbose(env, "verifier bug. Two map pointers in a timer helper\n");
5159 		return -EFAULT;
5160 	}
5161 	meta->map_uid = reg->map_uid;
5162 	meta->map_ptr = map;
5163 	return 0;
5164 }
5165 
5166 static bool arg_type_is_mem_ptr(enum bpf_arg_type type)
5167 {
5168 	return base_type(type) == ARG_PTR_TO_MEM ||
5169 	       base_type(type) == ARG_PTR_TO_UNINIT_MEM;
5170 }
5171 
5172 static bool arg_type_is_mem_size(enum bpf_arg_type type)
5173 {
5174 	return type == ARG_CONST_SIZE ||
5175 	       type == ARG_CONST_SIZE_OR_ZERO;
5176 }
5177 
5178 static bool arg_type_is_alloc_size(enum bpf_arg_type type)
5179 {
5180 	return type == ARG_CONST_ALLOC_SIZE_OR_ZERO;
5181 }
5182 
5183 static bool arg_type_is_int_ptr(enum bpf_arg_type type)
5184 {
5185 	return type == ARG_PTR_TO_INT ||
5186 	       type == ARG_PTR_TO_LONG;
5187 }
5188 
5189 static int int_ptr_type_to_size(enum bpf_arg_type type)
5190 {
5191 	if (type == ARG_PTR_TO_INT)
5192 		return sizeof(u32);
5193 	else if (type == ARG_PTR_TO_LONG)
5194 		return sizeof(u64);
5195 
5196 	return -EINVAL;
5197 }
5198 
5199 static int resolve_map_arg_type(struct bpf_verifier_env *env,
5200 				 const struct bpf_call_arg_meta *meta,
5201 				 enum bpf_arg_type *arg_type)
5202 {
5203 	if (!meta->map_ptr) {
5204 		/* kernel subsystem misconfigured verifier */
5205 		verbose(env, "invalid map_ptr to access map->type\n");
5206 		return -EACCES;
5207 	}
5208 
5209 	switch (meta->map_ptr->map_type) {
5210 	case BPF_MAP_TYPE_SOCKMAP:
5211 	case BPF_MAP_TYPE_SOCKHASH:
5212 		if (*arg_type == ARG_PTR_TO_MAP_VALUE) {
5213 			*arg_type = ARG_PTR_TO_BTF_ID_SOCK_COMMON;
5214 		} else {
5215 			verbose(env, "invalid arg_type for sockmap/sockhash\n");
5216 			return -EINVAL;
5217 		}
5218 		break;
5219 	case BPF_MAP_TYPE_BLOOM_FILTER:
5220 		if (meta->func_id == BPF_FUNC_map_peek_elem)
5221 			*arg_type = ARG_PTR_TO_MAP_VALUE;
5222 		break;
5223 	default:
5224 		break;
5225 	}
5226 	return 0;
5227 }
5228 
5229 struct bpf_reg_types {
5230 	const enum bpf_reg_type types[10];
5231 	u32 *btf_id;
5232 };
5233 
5234 static const struct bpf_reg_types map_key_value_types = {
5235 	.types = {
5236 		PTR_TO_STACK,
5237 		PTR_TO_PACKET,
5238 		PTR_TO_PACKET_META,
5239 		PTR_TO_MAP_KEY,
5240 		PTR_TO_MAP_VALUE,
5241 	},
5242 };
5243 
5244 static const struct bpf_reg_types sock_types = {
5245 	.types = {
5246 		PTR_TO_SOCK_COMMON,
5247 		PTR_TO_SOCKET,
5248 		PTR_TO_TCP_SOCK,
5249 		PTR_TO_XDP_SOCK,
5250 	},
5251 };
5252 
5253 #ifdef CONFIG_NET
5254 static const struct bpf_reg_types btf_id_sock_common_types = {
5255 	.types = {
5256 		PTR_TO_SOCK_COMMON,
5257 		PTR_TO_SOCKET,
5258 		PTR_TO_TCP_SOCK,
5259 		PTR_TO_XDP_SOCK,
5260 		PTR_TO_BTF_ID,
5261 	},
5262 	.btf_id = &btf_sock_ids[BTF_SOCK_TYPE_SOCK_COMMON],
5263 };
5264 #endif
5265 
5266 static const struct bpf_reg_types mem_types = {
5267 	.types = {
5268 		PTR_TO_STACK,
5269 		PTR_TO_PACKET,
5270 		PTR_TO_PACKET_META,
5271 		PTR_TO_MAP_KEY,
5272 		PTR_TO_MAP_VALUE,
5273 		PTR_TO_MEM,
5274 		PTR_TO_MEM | MEM_ALLOC,
5275 		PTR_TO_BUF,
5276 	},
5277 };
5278 
5279 static const struct bpf_reg_types int_ptr_types = {
5280 	.types = {
5281 		PTR_TO_STACK,
5282 		PTR_TO_PACKET,
5283 		PTR_TO_PACKET_META,
5284 		PTR_TO_MAP_KEY,
5285 		PTR_TO_MAP_VALUE,
5286 	},
5287 };
5288 
5289 static const struct bpf_reg_types fullsock_types = { .types = { PTR_TO_SOCKET } };
5290 static const struct bpf_reg_types scalar_types = { .types = { SCALAR_VALUE } };
5291 static const struct bpf_reg_types context_types = { .types = { PTR_TO_CTX } };
5292 static const struct bpf_reg_types alloc_mem_types = { .types = { PTR_TO_MEM | MEM_ALLOC } };
5293 static const struct bpf_reg_types const_map_ptr_types = { .types = { CONST_PTR_TO_MAP } };
5294 static const struct bpf_reg_types btf_ptr_types = { .types = { PTR_TO_BTF_ID } };
5295 static const struct bpf_reg_types spin_lock_types = { .types = { PTR_TO_MAP_VALUE } };
5296 static const struct bpf_reg_types percpu_btf_ptr_types = { .types = { PTR_TO_BTF_ID | MEM_PERCPU } };
5297 static const struct bpf_reg_types func_ptr_types = { .types = { PTR_TO_FUNC } };
5298 static const struct bpf_reg_types stack_ptr_types = { .types = { PTR_TO_STACK } };
5299 static const struct bpf_reg_types const_str_ptr_types = { .types = { PTR_TO_MAP_VALUE } };
5300 static const struct bpf_reg_types timer_types = { .types = { PTR_TO_MAP_VALUE } };
5301 
5302 static const struct bpf_reg_types *compatible_reg_types[__BPF_ARG_TYPE_MAX] = {
5303 	[ARG_PTR_TO_MAP_KEY]		= &map_key_value_types,
5304 	[ARG_PTR_TO_MAP_VALUE]		= &map_key_value_types,
5305 	[ARG_PTR_TO_UNINIT_MAP_VALUE]	= &map_key_value_types,
5306 	[ARG_CONST_SIZE]		= &scalar_types,
5307 	[ARG_CONST_SIZE_OR_ZERO]	= &scalar_types,
5308 	[ARG_CONST_ALLOC_SIZE_OR_ZERO]	= &scalar_types,
5309 	[ARG_CONST_MAP_PTR]		= &const_map_ptr_types,
5310 	[ARG_PTR_TO_CTX]		= &context_types,
5311 	[ARG_PTR_TO_SOCK_COMMON]	= &sock_types,
5312 #ifdef CONFIG_NET
5313 	[ARG_PTR_TO_BTF_ID_SOCK_COMMON]	= &btf_id_sock_common_types,
5314 #endif
5315 	[ARG_PTR_TO_SOCKET]		= &fullsock_types,
5316 	[ARG_PTR_TO_BTF_ID]		= &btf_ptr_types,
5317 	[ARG_PTR_TO_SPIN_LOCK]		= &spin_lock_types,
5318 	[ARG_PTR_TO_MEM]		= &mem_types,
5319 	[ARG_PTR_TO_UNINIT_MEM]		= &mem_types,
5320 	[ARG_PTR_TO_ALLOC_MEM]		= &alloc_mem_types,
5321 	[ARG_PTR_TO_INT]		= &int_ptr_types,
5322 	[ARG_PTR_TO_LONG]		= &int_ptr_types,
5323 	[ARG_PTR_TO_PERCPU_BTF_ID]	= &percpu_btf_ptr_types,
5324 	[ARG_PTR_TO_FUNC]		= &func_ptr_types,
5325 	[ARG_PTR_TO_STACK]		= &stack_ptr_types,
5326 	[ARG_PTR_TO_CONST_STR]		= &const_str_ptr_types,
5327 	[ARG_PTR_TO_TIMER]		= &timer_types,
5328 };
5329 
5330 static int check_reg_type(struct bpf_verifier_env *env, u32 regno,
5331 			  enum bpf_arg_type arg_type,
5332 			  const u32 *arg_btf_id)
5333 {
5334 	struct bpf_reg_state *regs = cur_regs(env), *reg = &regs[regno];
5335 	enum bpf_reg_type expected, type = reg->type;
5336 	const struct bpf_reg_types *compatible;
5337 	int i, j;
5338 
5339 	compatible = compatible_reg_types[base_type(arg_type)];
5340 	if (!compatible) {
5341 		verbose(env, "verifier internal error: unsupported arg type %d\n", arg_type);
5342 		return -EFAULT;
5343 	}
5344 
5345 	/* ARG_PTR_TO_MEM + RDONLY is compatible with PTR_TO_MEM and PTR_TO_MEM + RDONLY,
5346 	 * but ARG_PTR_TO_MEM is compatible only with PTR_TO_MEM and NOT with PTR_TO_MEM + RDONLY
5347 	 *
5348 	 * Same for MAYBE_NULL:
5349 	 *
5350 	 * ARG_PTR_TO_MEM + MAYBE_NULL is compatible with PTR_TO_MEM and PTR_TO_MEM + MAYBE_NULL,
5351 	 * but ARG_PTR_TO_MEM is compatible only with PTR_TO_MEM but NOT with PTR_TO_MEM + MAYBE_NULL
5352 	 *
5353 	 * Therefore we fold these flags depending on the arg_type before comparison.
5354 	 */
5355 	if (arg_type & MEM_RDONLY)
5356 		type &= ~MEM_RDONLY;
5357 	if (arg_type & PTR_MAYBE_NULL)
5358 		type &= ~PTR_MAYBE_NULL;
5359 
5360 	for (i = 0; i < ARRAY_SIZE(compatible->types); i++) {
5361 		expected = compatible->types[i];
5362 		if (expected == NOT_INIT)
5363 			break;
5364 
5365 		if (type == expected)
5366 			goto found;
5367 	}
5368 
5369 	verbose(env, "R%d type=%s expected=", regno, reg_type_str(env, reg->type));
5370 	for (j = 0; j + 1 < i; j++)
5371 		verbose(env, "%s, ", reg_type_str(env, compatible->types[j]));
5372 	verbose(env, "%s\n", reg_type_str(env, compatible->types[j]));
5373 	return -EACCES;
5374 
5375 found:
5376 	if (reg->type == PTR_TO_BTF_ID) {
5377 		if (!arg_btf_id) {
5378 			if (!compatible->btf_id) {
5379 				verbose(env, "verifier internal error: missing arg compatible BTF ID\n");
5380 				return -EFAULT;
5381 			}
5382 			arg_btf_id = compatible->btf_id;
5383 		}
5384 
5385 		if (!btf_struct_ids_match(&env->log, reg->btf, reg->btf_id, reg->off,
5386 					  btf_vmlinux, *arg_btf_id)) {
5387 			verbose(env, "R%d is of type %s but %s is expected\n",
5388 				regno, kernel_type_name(reg->btf, reg->btf_id),
5389 				kernel_type_name(btf_vmlinux, *arg_btf_id));
5390 			return -EACCES;
5391 		}
5392 	}
5393 
5394 	return 0;
5395 }
5396 
5397 int check_func_arg_reg_off(struct bpf_verifier_env *env,
5398 			   const struct bpf_reg_state *reg, int regno,
5399 			   enum bpf_arg_type arg_type,
5400 			   bool is_release_func)
5401 {
5402 	bool fixed_off_ok = false, release_reg;
5403 	enum bpf_reg_type type = reg->type;
5404 
5405 	switch ((u32)type) {
5406 	case SCALAR_VALUE:
5407 	/* Pointer types where reg offset is explicitly allowed: */
5408 	case PTR_TO_PACKET:
5409 	case PTR_TO_PACKET_META:
5410 	case PTR_TO_MAP_KEY:
5411 	case PTR_TO_MAP_VALUE:
5412 	case PTR_TO_MEM:
5413 	case PTR_TO_MEM | MEM_RDONLY:
5414 	case PTR_TO_MEM | MEM_ALLOC:
5415 	case PTR_TO_BUF:
5416 	case PTR_TO_BUF | MEM_RDONLY:
5417 	case PTR_TO_STACK:
5418 		/* Some of the argument types nevertheless require a
5419 		 * zero register offset.
5420 		 */
5421 		if (arg_type != ARG_PTR_TO_ALLOC_MEM)
5422 			return 0;
5423 		break;
5424 	/* All the rest must be rejected, except PTR_TO_BTF_ID which allows
5425 	 * fixed offset.
5426 	 */
5427 	case PTR_TO_BTF_ID:
5428 		/* When referenced PTR_TO_BTF_ID is passed to release function,
5429 		 * it's fixed offset must be 0. We rely on the property that
5430 		 * only one referenced register can be passed to BPF helpers and
5431 		 * kfuncs. In the other cases, fixed offset can be non-zero.
5432 		 */
5433 		release_reg = is_release_func && reg->ref_obj_id;
5434 		if (release_reg && reg->off) {
5435 			verbose(env, "R%d must have zero offset when passed to release func\n",
5436 				regno);
5437 			return -EINVAL;
5438 		}
5439 		/* For release_reg == true, fixed_off_ok must be false, but we
5440 		 * already checked and rejected reg->off != 0 above, so set to
5441 		 * true to allow fixed offset for all other cases.
5442 		 */
5443 		fixed_off_ok = true;
5444 		break;
5445 	default:
5446 		break;
5447 	}
5448 	return __check_ptr_off_reg(env, reg, regno, fixed_off_ok);
5449 }
5450 
5451 static int check_func_arg(struct bpf_verifier_env *env, u32 arg,
5452 			  struct bpf_call_arg_meta *meta,
5453 			  const struct bpf_func_proto *fn)
5454 {
5455 	u32 regno = BPF_REG_1 + arg;
5456 	struct bpf_reg_state *regs = cur_regs(env), *reg = &regs[regno];
5457 	enum bpf_arg_type arg_type = fn->arg_type[arg];
5458 	enum bpf_reg_type type = reg->type;
5459 	int err = 0;
5460 
5461 	if (arg_type == ARG_DONTCARE)
5462 		return 0;
5463 
5464 	err = check_reg_arg(env, regno, SRC_OP);
5465 	if (err)
5466 		return err;
5467 
5468 	if (arg_type == ARG_ANYTHING) {
5469 		if (is_pointer_value(env, regno)) {
5470 			verbose(env, "R%d leaks addr into helper function\n",
5471 				regno);
5472 			return -EACCES;
5473 		}
5474 		return 0;
5475 	}
5476 
5477 	if (type_is_pkt_pointer(type) &&
5478 	    !may_access_direct_pkt_data(env, meta, BPF_READ)) {
5479 		verbose(env, "helper access to the packet is not allowed\n");
5480 		return -EACCES;
5481 	}
5482 
5483 	if (base_type(arg_type) == ARG_PTR_TO_MAP_VALUE ||
5484 	    base_type(arg_type) == ARG_PTR_TO_UNINIT_MAP_VALUE) {
5485 		err = resolve_map_arg_type(env, meta, &arg_type);
5486 		if (err)
5487 			return err;
5488 	}
5489 
5490 	if (register_is_null(reg) && type_may_be_null(arg_type))
5491 		/* A NULL register has a SCALAR_VALUE type, so skip
5492 		 * type checking.
5493 		 */
5494 		goto skip_type_check;
5495 
5496 	err = check_reg_type(env, regno, arg_type, fn->arg_btf_id[arg]);
5497 	if (err)
5498 		return err;
5499 
5500 	err = check_func_arg_reg_off(env, reg, regno, arg_type, is_release_function(meta->func_id));
5501 	if (err)
5502 		return err;
5503 
5504 skip_type_check:
5505 	/* check_func_arg_reg_off relies on only one referenced register being
5506 	 * allowed for BPF helpers.
5507 	 */
5508 	if (reg->ref_obj_id) {
5509 		if (meta->ref_obj_id) {
5510 			verbose(env, "verifier internal error: more than one arg with ref_obj_id R%d %u %u\n",
5511 				regno, reg->ref_obj_id,
5512 				meta->ref_obj_id);
5513 			return -EFAULT;
5514 		}
5515 		meta->ref_obj_id = reg->ref_obj_id;
5516 	}
5517 
5518 	if (arg_type == ARG_CONST_MAP_PTR) {
5519 		/* bpf_map_xxx(map_ptr) call: remember that map_ptr */
5520 		if (meta->map_ptr) {
5521 			/* Use map_uid (which is unique id of inner map) to reject:
5522 			 * inner_map1 = bpf_map_lookup_elem(outer_map, key1)
5523 			 * inner_map2 = bpf_map_lookup_elem(outer_map, key2)
5524 			 * if (inner_map1 && inner_map2) {
5525 			 *     timer = bpf_map_lookup_elem(inner_map1);
5526 			 *     if (timer)
5527 			 *         // mismatch would have been allowed
5528 			 *         bpf_timer_init(timer, inner_map2);
5529 			 * }
5530 			 *
5531 			 * Comparing map_ptr is enough to distinguish normal and outer maps.
5532 			 */
5533 			if (meta->map_ptr != reg->map_ptr ||
5534 			    meta->map_uid != reg->map_uid) {
5535 				verbose(env,
5536 					"timer pointer in R1 map_uid=%d doesn't match map pointer in R2 map_uid=%d\n",
5537 					meta->map_uid, reg->map_uid);
5538 				return -EINVAL;
5539 			}
5540 		}
5541 		meta->map_ptr = reg->map_ptr;
5542 		meta->map_uid = reg->map_uid;
5543 	} else if (arg_type == ARG_PTR_TO_MAP_KEY) {
5544 		/* bpf_map_xxx(..., map_ptr, ..., key) call:
5545 		 * check that [key, key + map->key_size) are within
5546 		 * stack limits and initialized
5547 		 */
5548 		if (!meta->map_ptr) {
5549 			/* in function declaration map_ptr must come before
5550 			 * map_key, so that it's verified and known before
5551 			 * we have to check map_key here. Otherwise it means
5552 			 * that kernel subsystem misconfigured verifier
5553 			 */
5554 			verbose(env, "invalid map_ptr to access map->key\n");
5555 			return -EACCES;
5556 		}
5557 		err = check_helper_mem_access(env, regno,
5558 					      meta->map_ptr->key_size, false,
5559 					      NULL);
5560 	} else if (base_type(arg_type) == ARG_PTR_TO_MAP_VALUE ||
5561 		   base_type(arg_type) == ARG_PTR_TO_UNINIT_MAP_VALUE) {
5562 		if (type_may_be_null(arg_type) && register_is_null(reg))
5563 			return 0;
5564 
5565 		/* bpf_map_xxx(..., map_ptr, ..., value) call:
5566 		 * check [value, value + map->value_size) validity
5567 		 */
5568 		if (!meta->map_ptr) {
5569 			/* kernel subsystem misconfigured verifier */
5570 			verbose(env, "invalid map_ptr to access map->value\n");
5571 			return -EACCES;
5572 		}
5573 		meta->raw_mode = (arg_type == ARG_PTR_TO_UNINIT_MAP_VALUE);
5574 		err = check_helper_mem_access(env, regno,
5575 					      meta->map_ptr->value_size, false,
5576 					      meta);
5577 	} else if (arg_type == ARG_PTR_TO_PERCPU_BTF_ID) {
5578 		if (!reg->btf_id) {
5579 			verbose(env, "Helper has invalid btf_id in R%d\n", regno);
5580 			return -EACCES;
5581 		}
5582 		meta->ret_btf = reg->btf;
5583 		meta->ret_btf_id = reg->btf_id;
5584 	} else if (arg_type == ARG_PTR_TO_SPIN_LOCK) {
5585 		if (meta->func_id == BPF_FUNC_spin_lock) {
5586 			if (process_spin_lock(env, regno, true))
5587 				return -EACCES;
5588 		} else if (meta->func_id == BPF_FUNC_spin_unlock) {
5589 			if (process_spin_lock(env, regno, false))
5590 				return -EACCES;
5591 		} else {
5592 			verbose(env, "verifier internal error\n");
5593 			return -EFAULT;
5594 		}
5595 	} else if (arg_type == ARG_PTR_TO_TIMER) {
5596 		if (process_timer_func(env, regno, meta))
5597 			return -EACCES;
5598 	} else if (arg_type == ARG_PTR_TO_FUNC) {
5599 		meta->subprogno = reg->subprogno;
5600 	} else if (arg_type_is_mem_ptr(arg_type)) {
5601 		/* The access to this pointer is only checked when we hit the
5602 		 * next is_mem_size argument below.
5603 		 */
5604 		meta->raw_mode = (arg_type == ARG_PTR_TO_UNINIT_MEM);
5605 	} else if (arg_type_is_mem_size(arg_type)) {
5606 		bool zero_size_allowed = (arg_type == ARG_CONST_SIZE_OR_ZERO);
5607 
5608 		err = check_mem_size_reg(env, reg, regno, zero_size_allowed, meta);
5609 	} else if (arg_type_is_alloc_size(arg_type)) {
5610 		if (!tnum_is_const(reg->var_off)) {
5611 			verbose(env, "R%d is not a known constant'\n",
5612 				regno);
5613 			return -EACCES;
5614 		}
5615 		meta->mem_size = reg->var_off.value;
5616 	} else if (arg_type_is_int_ptr(arg_type)) {
5617 		int size = int_ptr_type_to_size(arg_type);
5618 
5619 		err = check_helper_mem_access(env, regno, size, false, meta);
5620 		if (err)
5621 			return err;
5622 		err = check_ptr_alignment(env, reg, 0, size, true);
5623 	} else if (arg_type == ARG_PTR_TO_CONST_STR) {
5624 		struct bpf_map *map = reg->map_ptr;
5625 		int map_off;
5626 		u64 map_addr;
5627 		char *str_ptr;
5628 
5629 		if (!bpf_map_is_rdonly(map)) {
5630 			verbose(env, "R%d does not point to a readonly map'\n", regno);
5631 			return -EACCES;
5632 		}
5633 
5634 		if (!tnum_is_const(reg->var_off)) {
5635 			verbose(env, "R%d is not a constant address'\n", regno);
5636 			return -EACCES;
5637 		}
5638 
5639 		if (!map->ops->map_direct_value_addr) {
5640 			verbose(env, "no direct value access support for this map type\n");
5641 			return -EACCES;
5642 		}
5643 
5644 		err = check_map_access(env, regno, reg->off,
5645 				       map->value_size - reg->off, false);
5646 		if (err)
5647 			return err;
5648 
5649 		map_off = reg->off + reg->var_off.value;
5650 		err = map->ops->map_direct_value_addr(map, &map_addr, map_off);
5651 		if (err) {
5652 			verbose(env, "direct value access on string failed\n");
5653 			return err;
5654 		}
5655 
5656 		str_ptr = (char *)(long)(map_addr);
5657 		if (!strnchr(str_ptr + map_off, map->value_size - map_off, 0)) {
5658 			verbose(env, "string is not zero-terminated\n");
5659 			return -EINVAL;
5660 		}
5661 	}
5662 
5663 	return err;
5664 }
5665 
5666 static bool may_update_sockmap(struct bpf_verifier_env *env, int func_id)
5667 {
5668 	enum bpf_attach_type eatype = env->prog->expected_attach_type;
5669 	enum bpf_prog_type type = resolve_prog_type(env->prog);
5670 
5671 	if (func_id != BPF_FUNC_map_update_elem)
5672 		return false;
5673 
5674 	/* It's not possible to get access to a locked struct sock in these
5675 	 * contexts, so updating is safe.
5676 	 */
5677 	switch (type) {
5678 	case BPF_PROG_TYPE_TRACING:
5679 		if (eatype == BPF_TRACE_ITER)
5680 			return true;
5681 		break;
5682 	case BPF_PROG_TYPE_SOCKET_FILTER:
5683 	case BPF_PROG_TYPE_SCHED_CLS:
5684 	case BPF_PROG_TYPE_SCHED_ACT:
5685 	case BPF_PROG_TYPE_XDP:
5686 	case BPF_PROG_TYPE_SK_REUSEPORT:
5687 	case BPF_PROG_TYPE_FLOW_DISSECTOR:
5688 	case BPF_PROG_TYPE_SK_LOOKUP:
5689 		return true;
5690 	default:
5691 		break;
5692 	}
5693 
5694 	verbose(env, "cannot update sockmap in this context\n");
5695 	return false;
5696 }
5697 
5698 static bool allow_tail_call_in_subprogs(struct bpf_verifier_env *env)
5699 {
5700 	return env->prog->jit_requested && IS_ENABLED(CONFIG_X86_64);
5701 }
5702 
5703 static int check_map_func_compatibility(struct bpf_verifier_env *env,
5704 					struct bpf_map *map, int func_id)
5705 {
5706 	if (!map)
5707 		return 0;
5708 
5709 	/* We need a two way check, first is from map perspective ... */
5710 	switch (map->map_type) {
5711 	case BPF_MAP_TYPE_PROG_ARRAY:
5712 		if (func_id != BPF_FUNC_tail_call)
5713 			goto error;
5714 		break;
5715 	case BPF_MAP_TYPE_PERF_EVENT_ARRAY:
5716 		if (func_id != BPF_FUNC_perf_event_read &&
5717 		    func_id != BPF_FUNC_perf_event_output &&
5718 		    func_id != BPF_FUNC_skb_output &&
5719 		    func_id != BPF_FUNC_perf_event_read_value &&
5720 		    func_id != BPF_FUNC_xdp_output)
5721 			goto error;
5722 		break;
5723 	case BPF_MAP_TYPE_RINGBUF:
5724 		if (func_id != BPF_FUNC_ringbuf_output &&
5725 		    func_id != BPF_FUNC_ringbuf_reserve &&
5726 		    func_id != BPF_FUNC_ringbuf_query)
5727 			goto error;
5728 		break;
5729 	case BPF_MAP_TYPE_STACK_TRACE:
5730 		if (func_id != BPF_FUNC_get_stackid)
5731 			goto error;
5732 		break;
5733 	case BPF_MAP_TYPE_CGROUP_ARRAY:
5734 		if (func_id != BPF_FUNC_skb_under_cgroup &&
5735 		    func_id != BPF_FUNC_current_task_under_cgroup)
5736 			goto error;
5737 		break;
5738 	case BPF_MAP_TYPE_CGROUP_STORAGE:
5739 	case BPF_MAP_TYPE_PERCPU_CGROUP_STORAGE:
5740 		if (func_id != BPF_FUNC_get_local_storage)
5741 			goto error;
5742 		break;
5743 	case BPF_MAP_TYPE_DEVMAP:
5744 	case BPF_MAP_TYPE_DEVMAP_HASH:
5745 		if (func_id != BPF_FUNC_redirect_map &&
5746 		    func_id != BPF_FUNC_map_lookup_elem)
5747 			goto error;
5748 		break;
5749 	/* Restrict bpf side of cpumap and xskmap, open when use-cases
5750 	 * appear.
5751 	 */
5752 	case BPF_MAP_TYPE_CPUMAP:
5753 		if (func_id != BPF_FUNC_redirect_map)
5754 			goto error;
5755 		break;
5756 	case BPF_MAP_TYPE_XSKMAP:
5757 		if (func_id != BPF_FUNC_redirect_map &&
5758 		    func_id != BPF_FUNC_map_lookup_elem)
5759 			goto error;
5760 		break;
5761 	case BPF_MAP_TYPE_ARRAY_OF_MAPS:
5762 	case BPF_MAP_TYPE_HASH_OF_MAPS:
5763 		if (func_id != BPF_FUNC_map_lookup_elem)
5764 			goto error;
5765 		break;
5766 	case BPF_MAP_TYPE_SOCKMAP:
5767 		if (func_id != BPF_FUNC_sk_redirect_map &&
5768 		    func_id != BPF_FUNC_sock_map_update &&
5769 		    func_id != BPF_FUNC_map_delete_elem &&
5770 		    func_id != BPF_FUNC_msg_redirect_map &&
5771 		    func_id != BPF_FUNC_sk_select_reuseport &&
5772 		    func_id != BPF_FUNC_map_lookup_elem &&
5773 		    !may_update_sockmap(env, func_id))
5774 			goto error;
5775 		break;
5776 	case BPF_MAP_TYPE_SOCKHASH:
5777 		if (func_id != BPF_FUNC_sk_redirect_hash &&
5778 		    func_id != BPF_FUNC_sock_hash_update &&
5779 		    func_id != BPF_FUNC_map_delete_elem &&
5780 		    func_id != BPF_FUNC_msg_redirect_hash &&
5781 		    func_id != BPF_FUNC_sk_select_reuseport &&
5782 		    func_id != BPF_FUNC_map_lookup_elem &&
5783 		    !may_update_sockmap(env, func_id))
5784 			goto error;
5785 		break;
5786 	case BPF_MAP_TYPE_REUSEPORT_SOCKARRAY:
5787 		if (func_id != BPF_FUNC_sk_select_reuseport)
5788 			goto error;
5789 		break;
5790 	case BPF_MAP_TYPE_QUEUE:
5791 	case BPF_MAP_TYPE_STACK:
5792 		if (func_id != BPF_FUNC_map_peek_elem &&
5793 		    func_id != BPF_FUNC_map_pop_elem &&
5794 		    func_id != BPF_FUNC_map_push_elem)
5795 			goto error;
5796 		break;
5797 	case BPF_MAP_TYPE_SK_STORAGE:
5798 		if (func_id != BPF_FUNC_sk_storage_get &&
5799 		    func_id != BPF_FUNC_sk_storage_delete)
5800 			goto error;
5801 		break;
5802 	case BPF_MAP_TYPE_INODE_STORAGE:
5803 		if (func_id != BPF_FUNC_inode_storage_get &&
5804 		    func_id != BPF_FUNC_inode_storage_delete)
5805 			goto error;
5806 		break;
5807 	case BPF_MAP_TYPE_TASK_STORAGE:
5808 		if (func_id != BPF_FUNC_task_storage_get &&
5809 		    func_id != BPF_FUNC_task_storage_delete)
5810 			goto error;
5811 		break;
5812 	case BPF_MAP_TYPE_BLOOM_FILTER:
5813 		if (func_id != BPF_FUNC_map_peek_elem &&
5814 		    func_id != BPF_FUNC_map_push_elem)
5815 			goto error;
5816 		break;
5817 	default:
5818 		break;
5819 	}
5820 
5821 	/* ... and second from the function itself. */
5822 	switch (func_id) {
5823 	case BPF_FUNC_tail_call:
5824 		if (map->map_type != BPF_MAP_TYPE_PROG_ARRAY)
5825 			goto error;
5826 		if (env->subprog_cnt > 1 && !allow_tail_call_in_subprogs(env)) {
5827 			verbose(env, "tail_calls are not allowed in non-JITed programs with bpf-to-bpf calls\n");
5828 			return -EINVAL;
5829 		}
5830 		break;
5831 	case BPF_FUNC_perf_event_read:
5832 	case BPF_FUNC_perf_event_output:
5833 	case BPF_FUNC_perf_event_read_value:
5834 	case BPF_FUNC_skb_output:
5835 	case BPF_FUNC_xdp_output:
5836 		if (map->map_type != BPF_MAP_TYPE_PERF_EVENT_ARRAY)
5837 			goto error;
5838 		break;
5839 	case BPF_FUNC_ringbuf_output:
5840 	case BPF_FUNC_ringbuf_reserve:
5841 	case BPF_FUNC_ringbuf_query:
5842 		if (map->map_type != BPF_MAP_TYPE_RINGBUF)
5843 			goto error;
5844 		break;
5845 	case BPF_FUNC_get_stackid:
5846 		if (map->map_type != BPF_MAP_TYPE_STACK_TRACE)
5847 			goto error;
5848 		break;
5849 	case BPF_FUNC_current_task_under_cgroup:
5850 	case BPF_FUNC_skb_under_cgroup:
5851 		if (map->map_type != BPF_MAP_TYPE_CGROUP_ARRAY)
5852 			goto error;
5853 		break;
5854 	case BPF_FUNC_redirect_map:
5855 		if (map->map_type != BPF_MAP_TYPE_DEVMAP &&
5856 		    map->map_type != BPF_MAP_TYPE_DEVMAP_HASH &&
5857 		    map->map_type != BPF_MAP_TYPE_CPUMAP &&
5858 		    map->map_type != BPF_MAP_TYPE_XSKMAP)
5859 			goto error;
5860 		break;
5861 	case BPF_FUNC_sk_redirect_map:
5862 	case BPF_FUNC_msg_redirect_map:
5863 	case BPF_FUNC_sock_map_update:
5864 		if (map->map_type != BPF_MAP_TYPE_SOCKMAP)
5865 			goto error;
5866 		break;
5867 	case BPF_FUNC_sk_redirect_hash:
5868 	case BPF_FUNC_msg_redirect_hash:
5869 	case BPF_FUNC_sock_hash_update:
5870 		if (map->map_type != BPF_MAP_TYPE_SOCKHASH)
5871 			goto error;
5872 		break;
5873 	case BPF_FUNC_get_local_storage:
5874 		if (map->map_type != BPF_MAP_TYPE_CGROUP_STORAGE &&
5875 		    map->map_type != BPF_MAP_TYPE_PERCPU_CGROUP_STORAGE)
5876 			goto error;
5877 		break;
5878 	case BPF_FUNC_sk_select_reuseport:
5879 		if (map->map_type != BPF_MAP_TYPE_REUSEPORT_SOCKARRAY &&
5880 		    map->map_type != BPF_MAP_TYPE_SOCKMAP &&
5881 		    map->map_type != BPF_MAP_TYPE_SOCKHASH)
5882 			goto error;
5883 		break;
5884 	case BPF_FUNC_map_pop_elem:
5885 		if (map->map_type != BPF_MAP_TYPE_QUEUE &&
5886 		    map->map_type != BPF_MAP_TYPE_STACK)
5887 			goto error;
5888 		break;
5889 	case BPF_FUNC_map_peek_elem:
5890 	case BPF_FUNC_map_push_elem:
5891 		if (map->map_type != BPF_MAP_TYPE_QUEUE &&
5892 		    map->map_type != BPF_MAP_TYPE_STACK &&
5893 		    map->map_type != BPF_MAP_TYPE_BLOOM_FILTER)
5894 			goto error;
5895 		break;
5896 	case BPF_FUNC_sk_storage_get:
5897 	case BPF_FUNC_sk_storage_delete:
5898 		if (map->map_type != BPF_MAP_TYPE_SK_STORAGE)
5899 			goto error;
5900 		break;
5901 	case BPF_FUNC_inode_storage_get:
5902 	case BPF_FUNC_inode_storage_delete:
5903 		if (map->map_type != BPF_MAP_TYPE_INODE_STORAGE)
5904 			goto error;
5905 		break;
5906 	case BPF_FUNC_task_storage_get:
5907 	case BPF_FUNC_task_storage_delete:
5908 		if (map->map_type != BPF_MAP_TYPE_TASK_STORAGE)
5909 			goto error;
5910 		break;
5911 	default:
5912 		break;
5913 	}
5914 
5915 	return 0;
5916 error:
5917 	verbose(env, "cannot pass map_type %d into func %s#%d\n",
5918 		map->map_type, func_id_name(func_id), func_id);
5919 	return -EINVAL;
5920 }
5921 
5922 static bool check_raw_mode_ok(const struct bpf_func_proto *fn)
5923 {
5924 	int count = 0;
5925 
5926 	if (fn->arg1_type == ARG_PTR_TO_UNINIT_MEM)
5927 		count++;
5928 	if (fn->arg2_type == ARG_PTR_TO_UNINIT_MEM)
5929 		count++;
5930 	if (fn->arg3_type == ARG_PTR_TO_UNINIT_MEM)
5931 		count++;
5932 	if (fn->arg4_type == ARG_PTR_TO_UNINIT_MEM)
5933 		count++;
5934 	if (fn->arg5_type == ARG_PTR_TO_UNINIT_MEM)
5935 		count++;
5936 
5937 	/* We only support one arg being in raw mode at the moment,
5938 	 * which is sufficient for the helper functions we have
5939 	 * right now.
5940 	 */
5941 	return count <= 1;
5942 }
5943 
5944 static bool check_args_pair_invalid(enum bpf_arg_type arg_curr,
5945 				    enum bpf_arg_type arg_next)
5946 {
5947 	return (arg_type_is_mem_ptr(arg_curr) &&
5948 	        !arg_type_is_mem_size(arg_next)) ||
5949 	       (!arg_type_is_mem_ptr(arg_curr) &&
5950 		arg_type_is_mem_size(arg_next));
5951 }
5952 
5953 static bool check_arg_pair_ok(const struct bpf_func_proto *fn)
5954 {
5955 	/* bpf_xxx(..., buf, len) call will access 'len'
5956 	 * bytes from memory 'buf'. Both arg types need
5957 	 * to be paired, so make sure there's no buggy
5958 	 * helper function specification.
5959 	 */
5960 	if (arg_type_is_mem_size(fn->arg1_type) ||
5961 	    arg_type_is_mem_ptr(fn->arg5_type)  ||
5962 	    check_args_pair_invalid(fn->arg1_type, fn->arg2_type) ||
5963 	    check_args_pair_invalid(fn->arg2_type, fn->arg3_type) ||
5964 	    check_args_pair_invalid(fn->arg3_type, fn->arg4_type) ||
5965 	    check_args_pair_invalid(fn->arg4_type, fn->arg5_type))
5966 		return false;
5967 
5968 	return true;
5969 }
5970 
5971 static bool check_refcount_ok(const struct bpf_func_proto *fn, int func_id)
5972 {
5973 	int count = 0;
5974 
5975 	if (arg_type_may_be_refcounted(fn->arg1_type))
5976 		count++;
5977 	if (arg_type_may_be_refcounted(fn->arg2_type))
5978 		count++;
5979 	if (arg_type_may_be_refcounted(fn->arg3_type))
5980 		count++;
5981 	if (arg_type_may_be_refcounted(fn->arg4_type))
5982 		count++;
5983 	if (arg_type_may_be_refcounted(fn->arg5_type))
5984 		count++;
5985 
5986 	/* A reference acquiring function cannot acquire
5987 	 * another refcounted ptr.
5988 	 */
5989 	if (may_be_acquire_function(func_id) && count)
5990 		return false;
5991 
5992 	/* We only support one arg being unreferenced at the moment,
5993 	 * which is sufficient for the helper functions we have right now.
5994 	 */
5995 	return count <= 1;
5996 }
5997 
5998 static bool check_btf_id_ok(const struct bpf_func_proto *fn)
5999 {
6000 	int i;
6001 
6002 	for (i = 0; i < ARRAY_SIZE(fn->arg_type); i++) {
6003 		if (fn->arg_type[i] == ARG_PTR_TO_BTF_ID && !fn->arg_btf_id[i])
6004 			return false;
6005 
6006 		if (fn->arg_type[i] != ARG_PTR_TO_BTF_ID && fn->arg_btf_id[i])
6007 			return false;
6008 	}
6009 
6010 	return true;
6011 }
6012 
6013 static int check_func_proto(const struct bpf_func_proto *fn, int func_id)
6014 {
6015 	return check_raw_mode_ok(fn) &&
6016 	       check_arg_pair_ok(fn) &&
6017 	       check_btf_id_ok(fn) &&
6018 	       check_refcount_ok(fn, func_id) ? 0 : -EINVAL;
6019 }
6020 
6021 /* Packet data might have moved, any old PTR_TO_PACKET[_META,_END]
6022  * are now invalid, so turn them into unknown SCALAR_VALUE.
6023  */
6024 static void __clear_all_pkt_pointers(struct bpf_verifier_env *env,
6025 				     struct bpf_func_state *state)
6026 {
6027 	struct bpf_reg_state *regs = state->regs, *reg;
6028 	int i;
6029 
6030 	for (i = 0; i < MAX_BPF_REG; i++)
6031 		if (reg_is_pkt_pointer_any(&regs[i]))
6032 			mark_reg_unknown(env, regs, i);
6033 
6034 	bpf_for_each_spilled_reg(i, state, reg) {
6035 		if (!reg)
6036 			continue;
6037 		if (reg_is_pkt_pointer_any(reg))
6038 			__mark_reg_unknown(env, reg);
6039 	}
6040 }
6041 
6042 static void clear_all_pkt_pointers(struct bpf_verifier_env *env)
6043 {
6044 	struct bpf_verifier_state *vstate = env->cur_state;
6045 	int i;
6046 
6047 	for (i = 0; i <= vstate->curframe; i++)
6048 		__clear_all_pkt_pointers(env, vstate->frame[i]);
6049 }
6050 
6051 enum {
6052 	AT_PKT_END = -1,
6053 	BEYOND_PKT_END = -2,
6054 };
6055 
6056 static void mark_pkt_end(struct bpf_verifier_state *vstate, int regn, bool range_open)
6057 {
6058 	struct bpf_func_state *state = vstate->frame[vstate->curframe];
6059 	struct bpf_reg_state *reg = &state->regs[regn];
6060 
6061 	if (reg->type != PTR_TO_PACKET)
6062 		/* PTR_TO_PACKET_META is not supported yet */
6063 		return;
6064 
6065 	/* The 'reg' is pkt > pkt_end or pkt >= pkt_end.
6066 	 * How far beyond pkt_end it goes is unknown.
6067 	 * if (!range_open) it's the case of pkt >= pkt_end
6068 	 * if (range_open) it's the case of pkt > pkt_end
6069 	 * hence this pointer is at least 1 byte bigger than pkt_end
6070 	 */
6071 	if (range_open)
6072 		reg->range = BEYOND_PKT_END;
6073 	else
6074 		reg->range = AT_PKT_END;
6075 }
6076 
6077 static void release_reg_references(struct bpf_verifier_env *env,
6078 				   struct bpf_func_state *state,
6079 				   int ref_obj_id)
6080 {
6081 	struct bpf_reg_state *regs = state->regs, *reg;
6082 	int i;
6083 
6084 	for (i = 0; i < MAX_BPF_REG; i++)
6085 		if (regs[i].ref_obj_id == ref_obj_id)
6086 			mark_reg_unknown(env, regs, i);
6087 
6088 	bpf_for_each_spilled_reg(i, state, reg) {
6089 		if (!reg)
6090 			continue;
6091 		if (reg->ref_obj_id == ref_obj_id)
6092 			__mark_reg_unknown(env, reg);
6093 	}
6094 }
6095 
6096 /* The pointer with the specified id has released its reference to kernel
6097  * resources. Identify all copies of the same pointer and clear the reference.
6098  */
6099 static int release_reference(struct bpf_verifier_env *env,
6100 			     int ref_obj_id)
6101 {
6102 	struct bpf_verifier_state *vstate = env->cur_state;
6103 	int err;
6104 	int i;
6105 
6106 	err = release_reference_state(cur_func(env), ref_obj_id);
6107 	if (err)
6108 		return err;
6109 
6110 	for (i = 0; i <= vstate->curframe; i++)
6111 		release_reg_references(env, vstate->frame[i], ref_obj_id);
6112 
6113 	return 0;
6114 }
6115 
6116 static void clear_caller_saved_regs(struct bpf_verifier_env *env,
6117 				    struct bpf_reg_state *regs)
6118 {
6119 	int i;
6120 
6121 	/* after the call registers r0 - r5 were scratched */
6122 	for (i = 0; i < CALLER_SAVED_REGS; i++) {
6123 		mark_reg_not_init(env, regs, caller_saved[i]);
6124 		check_reg_arg(env, caller_saved[i], DST_OP_NO_MARK);
6125 	}
6126 }
6127 
6128 typedef int (*set_callee_state_fn)(struct bpf_verifier_env *env,
6129 				   struct bpf_func_state *caller,
6130 				   struct bpf_func_state *callee,
6131 				   int insn_idx);
6132 
6133 static int __check_func_call(struct bpf_verifier_env *env, struct bpf_insn *insn,
6134 			     int *insn_idx, int subprog,
6135 			     set_callee_state_fn set_callee_state_cb)
6136 {
6137 	struct bpf_verifier_state *state = env->cur_state;
6138 	struct bpf_func_info_aux *func_info_aux;
6139 	struct bpf_func_state *caller, *callee;
6140 	int err;
6141 	bool is_global = false;
6142 
6143 	if (state->curframe + 1 >= MAX_CALL_FRAMES) {
6144 		verbose(env, "the call stack of %d frames is too deep\n",
6145 			state->curframe + 2);
6146 		return -E2BIG;
6147 	}
6148 
6149 	caller = state->frame[state->curframe];
6150 	if (state->frame[state->curframe + 1]) {
6151 		verbose(env, "verifier bug. Frame %d already allocated\n",
6152 			state->curframe + 1);
6153 		return -EFAULT;
6154 	}
6155 
6156 	func_info_aux = env->prog->aux->func_info_aux;
6157 	if (func_info_aux)
6158 		is_global = func_info_aux[subprog].linkage == BTF_FUNC_GLOBAL;
6159 	err = btf_check_subprog_arg_match(env, subprog, caller->regs);
6160 	if (err == -EFAULT)
6161 		return err;
6162 	if (is_global) {
6163 		if (err) {
6164 			verbose(env, "Caller passes invalid args into func#%d\n",
6165 				subprog);
6166 			return err;
6167 		} else {
6168 			if (env->log.level & BPF_LOG_LEVEL)
6169 				verbose(env,
6170 					"Func#%d is global and valid. Skipping.\n",
6171 					subprog);
6172 			clear_caller_saved_regs(env, caller->regs);
6173 
6174 			/* All global functions return a 64-bit SCALAR_VALUE */
6175 			mark_reg_unknown(env, caller->regs, BPF_REG_0);
6176 			caller->regs[BPF_REG_0].subreg_def = DEF_NOT_SUBREG;
6177 
6178 			/* continue with next insn after call */
6179 			return 0;
6180 		}
6181 	}
6182 
6183 	if (insn->code == (BPF_JMP | BPF_CALL) &&
6184 	    insn->src_reg == 0 &&
6185 	    insn->imm == BPF_FUNC_timer_set_callback) {
6186 		struct bpf_verifier_state *async_cb;
6187 
6188 		/* there is no real recursion here. timer callbacks are async */
6189 		env->subprog_info[subprog].is_async_cb = true;
6190 		async_cb = push_async_cb(env, env->subprog_info[subprog].start,
6191 					 *insn_idx, subprog);
6192 		if (!async_cb)
6193 			return -EFAULT;
6194 		callee = async_cb->frame[0];
6195 		callee->async_entry_cnt = caller->async_entry_cnt + 1;
6196 
6197 		/* Convert bpf_timer_set_callback() args into timer callback args */
6198 		err = set_callee_state_cb(env, caller, callee, *insn_idx);
6199 		if (err)
6200 			return err;
6201 
6202 		clear_caller_saved_regs(env, caller->regs);
6203 		mark_reg_unknown(env, caller->regs, BPF_REG_0);
6204 		caller->regs[BPF_REG_0].subreg_def = DEF_NOT_SUBREG;
6205 		/* continue with next insn after call */
6206 		return 0;
6207 	}
6208 
6209 	callee = kzalloc(sizeof(*callee), GFP_KERNEL);
6210 	if (!callee)
6211 		return -ENOMEM;
6212 	state->frame[state->curframe + 1] = callee;
6213 
6214 	/* callee cannot access r0, r6 - r9 for reading and has to write
6215 	 * into its own stack before reading from it.
6216 	 * callee can read/write into caller's stack
6217 	 */
6218 	init_func_state(env, callee,
6219 			/* remember the callsite, it will be used by bpf_exit */
6220 			*insn_idx /* callsite */,
6221 			state->curframe + 1 /* frameno within this callchain */,
6222 			subprog /* subprog number within this prog */);
6223 
6224 	/* Transfer references to the callee */
6225 	err = copy_reference_state(callee, caller);
6226 	if (err)
6227 		return err;
6228 
6229 	err = set_callee_state_cb(env, caller, callee, *insn_idx);
6230 	if (err)
6231 		return err;
6232 
6233 	clear_caller_saved_regs(env, caller->regs);
6234 
6235 	/* only increment it after check_reg_arg() finished */
6236 	state->curframe++;
6237 
6238 	/* and go analyze first insn of the callee */
6239 	*insn_idx = env->subprog_info[subprog].start - 1;
6240 
6241 	if (env->log.level & BPF_LOG_LEVEL) {
6242 		verbose(env, "caller:\n");
6243 		print_verifier_state(env, caller, true);
6244 		verbose(env, "callee:\n");
6245 		print_verifier_state(env, callee, true);
6246 	}
6247 	return 0;
6248 }
6249 
6250 int map_set_for_each_callback_args(struct bpf_verifier_env *env,
6251 				   struct bpf_func_state *caller,
6252 				   struct bpf_func_state *callee)
6253 {
6254 	/* bpf_for_each_map_elem(struct bpf_map *map, void *callback_fn,
6255 	 *      void *callback_ctx, u64 flags);
6256 	 * callback_fn(struct bpf_map *map, void *key, void *value,
6257 	 *      void *callback_ctx);
6258 	 */
6259 	callee->regs[BPF_REG_1] = caller->regs[BPF_REG_1];
6260 
6261 	callee->regs[BPF_REG_2].type = PTR_TO_MAP_KEY;
6262 	__mark_reg_known_zero(&callee->regs[BPF_REG_2]);
6263 	callee->regs[BPF_REG_2].map_ptr = caller->regs[BPF_REG_1].map_ptr;
6264 
6265 	callee->regs[BPF_REG_3].type = PTR_TO_MAP_VALUE;
6266 	__mark_reg_known_zero(&callee->regs[BPF_REG_3]);
6267 	callee->regs[BPF_REG_3].map_ptr = caller->regs[BPF_REG_1].map_ptr;
6268 
6269 	/* pointer to stack or null */
6270 	callee->regs[BPF_REG_4] = caller->regs[BPF_REG_3];
6271 
6272 	/* unused */
6273 	__mark_reg_not_init(env, &callee->regs[BPF_REG_5]);
6274 	return 0;
6275 }
6276 
6277 static int set_callee_state(struct bpf_verifier_env *env,
6278 			    struct bpf_func_state *caller,
6279 			    struct bpf_func_state *callee, int insn_idx)
6280 {
6281 	int i;
6282 
6283 	/* copy r1 - r5 args that callee can access.  The copy includes parent
6284 	 * pointers, which connects us up to the liveness chain
6285 	 */
6286 	for (i = BPF_REG_1; i <= BPF_REG_5; i++)
6287 		callee->regs[i] = caller->regs[i];
6288 	return 0;
6289 }
6290 
6291 static int check_func_call(struct bpf_verifier_env *env, struct bpf_insn *insn,
6292 			   int *insn_idx)
6293 {
6294 	int subprog, target_insn;
6295 
6296 	target_insn = *insn_idx + insn->imm + 1;
6297 	subprog = find_subprog(env, target_insn);
6298 	if (subprog < 0) {
6299 		verbose(env, "verifier bug. No program starts at insn %d\n",
6300 			target_insn);
6301 		return -EFAULT;
6302 	}
6303 
6304 	return __check_func_call(env, insn, insn_idx, subprog, set_callee_state);
6305 }
6306 
6307 static int set_map_elem_callback_state(struct bpf_verifier_env *env,
6308 				       struct bpf_func_state *caller,
6309 				       struct bpf_func_state *callee,
6310 				       int insn_idx)
6311 {
6312 	struct bpf_insn_aux_data *insn_aux = &env->insn_aux_data[insn_idx];
6313 	struct bpf_map *map;
6314 	int err;
6315 
6316 	if (bpf_map_ptr_poisoned(insn_aux)) {
6317 		verbose(env, "tail_call abusing map_ptr\n");
6318 		return -EINVAL;
6319 	}
6320 
6321 	map = BPF_MAP_PTR(insn_aux->map_ptr_state);
6322 	if (!map->ops->map_set_for_each_callback_args ||
6323 	    !map->ops->map_for_each_callback) {
6324 		verbose(env, "callback function not allowed for map\n");
6325 		return -ENOTSUPP;
6326 	}
6327 
6328 	err = map->ops->map_set_for_each_callback_args(env, caller, callee);
6329 	if (err)
6330 		return err;
6331 
6332 	callee->in_callback_fn = true;
6333 	return 0;
6334 }
6335 
6336 static int set_loop_callback_state(struct bpf_verifier_env *env,
6337 				   struct bpf_func_state *caller,
6338 				   struct bpf_func_state *callee,
6339 				   int insn_idx)
6340 {
6341 	/* bpf_loop(u32 nr_loops, void *callback_fn, void *callback_ctx,
6342 	 *	    u64 flags);
6343 	 * callback_fn(u32 index, void *callback_ctx);
6344 	 */
6345 	callee->regs[BPF_REG_1].type = SCALAR_VALUE;
6346 	callee->regs[BPF_REG_2] = caller->regs[BPF_REG_3];
6347 
6348 	/* unused */
6349 	__mark_reg_not_init(env, &callee->regs[BPF_REG_3]);
6350 	__mark_reg_not_init(env, &callee->regs[BPF_REG_4]);
6351 	__mark_reg_not_init(env, &callee->regs[BPF_REG_5]);
6352 
6353 	callee->in_callback_fn = true;
6354 	return 0;
6355 }
6356 
6357 static int set_timer_callback_state(struct bpf_verifier_env *env,
6358 				    struct bpf_func_state *caller,
6359 				    struct bpf_func_state *callee,
6360 				    int insn_idx)
6361 {
6362 	struct bpf_map *map_ptr = caller->regs[BPF_REG_1].map_ptr;
6363 
6364 	/* bpf_timer_set_callback(struct bpf_timer *timer, void *callback_fn);
6365 	 * callback_fn(struct bpf_map *map, void *key, void *value);
6366 	 */
6367 	callee->regs[BPF_REG_1].type = CONST_PTR_TO_MAP;
6368 	__mark_reg_known_zero(&callee->regs[BPF_REG_1]);
6369 	callee->regs[BPF_REG_1].map_ptr = map_ptr;
6370 
6371 	callee->regs[BPF_REG_2].type = PTR_TO_MAP_KEY;
6372 	__mark_reg_known_zero(&callee->regs[BPF_REG_2]);
6373 	callee->regs[BPF_REG_2].map_ptr = map_ptr;
6374 
6375 	callee->regs[BPF_REG_3].type = PTR_TO_MAP_VALUE;
6376 	__mark_reg_known_zero(&callee->regs[BPF_REG_3]);
6377 	callee->regs[BPF_REG_3].map_ptr = map_ptr;
6378 
6379 	/* unused */
6380 	__mark_reg_not_init(env, &callee->regs[BPF_REG_4]);
6381 	__mark_reg_not_init(env, &callee->regs[BPF_REG_5]);
6382 	callee->in_async_callback_fn = true;
6383 	return 0;
6384 }
6385 
6386 static int set_find_vma_callback_state(struct bpf_verifier_env *env,
6387 				       struct bpf_func_state *caller,
6388 				       struct bpf_func_state *callee,
6389 				       int insn_idx)
6390 {
6391 	/* bpf_find_vma(struct task_struct *task, u64 addr,
6392 	 *               void *callback_fn, void *callback_ctx, u64 flags)
6393 	 * (callback_fn)(struct task_struct *task,
6394 	 *               struct vm_area_struct *vma, void *callback_ctx);
6395 	 */
6396 	callee->regs[BPF_REG_1] = caller->regs[BPF_REG_1];
6397 
6398 	callee->regs[BPF_REG_2].type = PTR_TO_BTF_ID;
6399 	__mark_reg_known_zero(&callee->regs[BPF_REG_2]);
6400 	callee->regs[BPF_REG_2].btf =  btf_vmlinux;
6401 	callee->regs[BPF_REG_2].btf_id = btf_tracing_ids[BTF_TRACING_TYPE_VMA],
6402 
6403 	/* pointer to stack or null */
6404 	callee->regs[BPF_REG_3] = caller->regs[BPF_REG_4];
6405 
6406 	/* unused */
6407 	__mark_reg_not_init(env, &callee->regs[BPF_REG_4]);
6408 	__mark_reg_not_init(env, &callee->regs[BPF_REG_5]);
6409 	callee->in_callback_fn = true;
6410 	return 0;
6411 }
6412 
6413 static int prepare_func_exit(struct bpf_verifier_env *env, int *insn_idx)
6414 {
6415 	struct bpf_verifier_state *state = env->cur_state;
6416 	struct bpf_func_state *caller, *callee;
6417 	struct bpf_reg_state *r0;
6418 	int err;
6419 
6420 	callee = state->frame[state->curframe];
6421 	r0 = &callee->regs[BPF_REG_0];
6422 	if (r0->type == PTR_TO_STACK) {
6423 		/* technically it's ok to return caller's stack pointer
6424 		 * (or caller's caller's pointer) back to the caller,
6425 		 * since these pointers are valid. Only current stack
6426 		 * pointer will be invalid as soon as function exits,
6427 		 * but let's be conservative
6428 		 */
6429 		verbose(env, "cannot return stack pointer to the caller\n");
6430 		return -EINVAL;
6431 	}
6432 
6433 	state->curframe--;
6434 	caller = state->frame[state->curframe];
6435 	if (callee->in_callback_fn) {
6436 		/* enforce R0 return value range [0, 1]. */
6437 		struct tnum range = tnum_range(0, 1);
6438 
6439 		if (r0->type != SCALAR_VALUE) {
6440 			verbose(env, "R0 not a scalar value\n");
6441 			return -EACCES;
6442 		}
6443 		if (!tnum_in(range, r0->var_off)) {
6444 			verbose_invalid_scalar(env, r0, &range, "callback return", "R0");
6445 			return -EINVAL;
6446 		}
6447 	} else {
6448 		/* return to the caller whatever r0 had in the callee */
6449 		caller->regs[BPF_REG_0] = *r0;
6450 	}
6451 
6452 	/* Transfer references to the caller */
6453 	err = copy_reference_state(caller, callee);
6454 	if (err)
6455 		return err;
6456 
6457 	*insn_idx = callee->callsite + 1;
6458 	if (env->log.level & BPF_LOG_LEVEL) {
6459 		verbose(env, "returning from callee:\n");
6460 		print_verifier_state(env, callee, true);
6461 		verbose(env, "to caller at %d:\n", *insn_idx);
6462 		print_verifier_state(env, caller, true);
6463 	}
6464 	/* clear everything in the callee */
6465 	free_func_state(callee);
6466 	state->frame[state->curframe + 1] = NULL;
6467 	return 0;
6468 }
6469 
6470 static void do_refine_retval_range(struct bpf_reg_state *regs, int ret_type,
6471 				   int func_id,
6472 				   struct bpf_call_arg_meta *meta)
6473 {
6474 	struct bpf_reg_state *ret_reg = &regs[BPF_REG_0];
6475 
6476 	if (ret_type != RET_INTEGER ||
6477 	    (func_id != BPF_FUNC_get_stack &&
6478 	     func_id != BPF_FUNC_get_task_stack &&
6479 	     func_id != BPF_FUNC_probe_read_str &&
6480 	     func_id != BPF_FUNC_probe_read_kernel_str &&
6481 	     func_id != BPF_FUNC_probe_read_user_str))
6482 		return;
6483 
6484 	ret_reg->smax_value = meta->msize_max_value;
6485 	ret_reg->s32_max_value = meta->msize_max_value;
6486 	ret_reg->smin_value = -MAX_ERRNO;
6487 	ret_reg->s32_min_value = -MAX_ERRNO;
6488 	__reg_deduce_bounds(ret_reg);
6489 	__reg_bound_offset(ret_reg);
6490 	__update_reg_bounds(ret_reg);
6491 }
6492 
6493 static int
6494 record_func_map(struct bpf_verifier_env *env, struct bpf_call_arg_meta *meta,
6495 		int func_id, int insn_idx)
6496 {
6497 	struct bpf_insn_aux_data *aux = &env->insn_aux_data[insn_idx];
6498 	struct bpf_map *map = meta->map_ptr;
6499 
6500 	if (func_id != BPF_FUNC_tail_call &&
6501 	    func_id != BPF_FUNC_map_lookup_elem &&
6502 	    func_id != BPF_FUNC_map_update_elem &&
6503 	    func_id != BPF_FUNC_map_delete_elem &&
6504 	    func_id != BPF_FUNC_map_push_elem &&
6505 	    func_id != BPF_FUNC_map_pop_elem &&
6506 	    func_id != BPF_FUNC_map_peek_elem &&
6507 	    func_id != BPF_FUNC_for_each_map_elem &&
6508 	    func_id != BPF_FUNC_redirect_map)
6509 		return 0;
6510 
6511 	if (map == NULL) {
6512 		verbose(env, "kernel subsystem misconfigured verifier\n");
6513 		return -EINVAL;
6514 	}
6515 
6516 	/* In case of read-only, some additional restrictions
6517 	 * need to be applied in order to prevent altering the
6518 	 * state of the map from program side.
6519 	 */
6520 	if ((map->map_flags & BPF_F_RDONLY_PROG) &&
6521 	    (func_id == BPF_FUNC_map_delete_elem ||
6522 	     func_id == BPF_FUNC_map_update_elem ||
6523 	     func_id == BPF_FUNC_map_push_elem ||
6524 	     func_id == BPF_FUNC_map_pop_elem)) {
6525 		verbose(env, "write into map forbidden\n");
6526 		return -EACCES;
6527 	}
6528 
6529 	if (!BPF_MAP_PTR(aux->map_ptr_state))
6530 		bpf_map_ptr_store(aux, meta->map_ptr,
6531 				  !meta->map_ptr->bypass_spec_v1);
6532 	else if (BPF_MAP_PTR(aux->map_ptr_state) != meta->map_ptr)
6533 		bpf_map_ptr_store(aux, BPF_MAP_PTR_POISON,
6534 				  !meta->map_ptr->bypass_spec_v1);
6535 	return 0;
6536 }
6537 
6538 static int
6539 record_func_key(struct bpf_verifier_env *env, struct bpf_call_arg_meta *meta,
6540 		int func_id, int insn_idx)
6541 {
6542 	struct bpf_insn_aux_data *aux = &env->insn_aux_data[insn_idx];
6543 	struct bpf_reg_state *regs = cur_regs(env), *reg;
6544 	struct bpf_map *map = meta->map_ptr;
6545 	struct tnum range;
6546 	u64 val;
6547 	int err;
6548 
6549 	if (func_id != BPF_FUNC_tail_call)
6550 		return 0;
6551 	if (!map || map->map_type != BPF_MAP_TYPE_PROG_ARRAY) {
6552 		verbose(env, "kernel subsystem misconfigured verifier\n");
6553 		return -EINVAL;
6554 	}
6555 
6556 	range = tnum_range(0, map->max_entries - 1);
6557 	reg = &regs[BPF_REG_3];
6558 
6559 	if (!register_is_const(reg) || !tnum_in(range, reg->var_off)) {
6560 		bpf_map_key_store(aux, BPF_MAP_KEY_POISON);
6561 		return 0;
6562 	}
6563 
6564 	err = mark_chain_precision(env, BPF_REG_3);
6565 	if (err)
6566 		return err;
6567 
6568 	val = reg->var_off.value;
6569 	if (bpf_map_key_unseen(aux))
6570 		bpf_map_key_store(aux, val);
6571 	else if (!bpf_map_key_poisoned(aux) &&
6572 		  bpf_map_key_immediate(aux) != val)
6573 		bpf_map_key_store(aux, BPF_MAP_KEY_POISON);
6574 	return 0;
6575 }
6576 
6577 static int check_reference_leak(struct bpf_verifier_env *env)
6578 {
6579 	struct bpf_func_state *state = cur_func(env);
6580 	int i;
6581 
6582 	for (i = 0; i < state->acquired_refs; i++) {
6583 		verbose(env, "Unreleased reference id=%d alloc_insn=%d\n",
6584 			state->refs[i].id, state->refs[i].insn_idx);
6585 	}
6586 	return state->acquired_refs ? -EINVAL : 0;
6587 }
6588 
6589 static int check_bpf_snprintf_call(struct bpf_verifier_env *env,
6590 				   struct bpf_reg_state *regs)
6591 {
6592 	struct bpf_reg_state *fmt_reg = &regs[BPF_REG_3];
6593 	struct bpf_reg_state *data_len_reg = &regs[BPF_REG_5];
6594 	struct bpf_map *fmt_map = fmt_reg->map_ptr;
6595 	int err, fmt_map_off, num_args;
6596 	u64 fmt_addr;
6597 	char *fmt;
6598 
6599 	/* data must be an array of u64 */
6600 	if (data_len_reg->var_off.value % 8)
6601 		return -EINVAL;
6602 	num_args = data_len_reg->var_off.value / 8;
6603 
6604 	/* fmt being ARG_PTR_TO_CONST_STR guarantees that var_off is const
6605 	 * and map_direct_value_addr is set.
6606 	 */
6607 	fmt_map_off = fmt_reg->off + fmt_reg->var_off.value;
6608 	err = fmt_map->ops->map_direct_value_addr(fmt_map, &fmt_addr,
6609 						  fmt_map_off);
6610 	if (err) {
6611 		verbose(env, "verifier bug\n");
6612 		return -EFAULT;
6613 	}
6614 	fmt = (char *)(long)fmt_addr + fmt_map_off;
6615 
6616 	/* We are also guaranteed that fmt+fmt_map_off is NULL terminated, we
6617 	 * can focus on validating the format specifiers.
6618 	 */
6619 	err = bpf_bprintf_prepare(fmt, UINT_MAX, NULL, NULL, num_args);
6620 	if (err < 0)
6621 		verbose(env, "Invalid format string\n");
6622 
6623 	return err;
6624 }
6625 
6626 static int check_get_func_ip(struct bpf_verifier_env *env)
6627 {
6628 	enum bpf_prog_type type = resolve_prog_type(env->prog);
6629 	int func_id = BPF_FUNC_get_func_ip;
6630 
6631 	if (type == BPF_PROG_TYPE_TRACING) {
6632 		if (!bpf_prog_has_trampoline(env->prog)) {
6633 			verbose(env, "func %s#%d supported only for fentry/fexit/fmod_ret programs\n",
6634 				func_id_name(func_id), func_id);
6635 			return -ENOTSUPP;
6636 		}
6637 		return 0;
6638 	} else if (type == BPF_PROG_TYPE_KPROBE) {
6639 		return 0;
6640 	}
6641 
6642 	verbose(env, "func %s#%d not supported for program type %d\n",
6643 		func_id_name(func_id), func_id, type);
6644 	return -ENOTSUPP;
6645 }
6646 
6647 static int check_helper_call(struct bpf_verifier_env *env, struct bpf_insn *insn,
6648 			     int *insn_idx_p)
6649 {
6650 	const struct bpf_func_proto *fn = NULL;
6651 	enum bpf_return_type ret_type;
6652 	enum bpf_type_flag ret_flag;
6653 	struct bpf_reg_state *regs;
6654 	struct bpf_call_arg_meta meta;
6655 	int insn_idx = *insn_idx_p;
6656 	bool changes_data;
6657 	int i, err, func_id;
6658 
6659 	/* find function prototype */
6660 	func_id = insn->imm;
6661 	if (func_id < 0 || func_id >= __BPF_FUNC_MAX_ID) {
6662 		verbose(env, "invalid func %s#%d\n", func_id_name(func_id),
6663 			func_id);
6664 		return -EINVAL;
6665 	}
6666 
6667 	if (env->ops->get_func_proto)
6668 		fn = env->ops->get_func_proto(func_id, env->prog);
6669 	if (!fn) {
6670 		verbose(env, "unknown func %s#%d\n", func_id_name(func_id),
6671 			func_id);
6672 		return -EINVAL;
6673 	}
6674 
6675 	/* eBPF programs must be GPL compatible to use GPL-ed functions */
6676 	if (!env->prog->gpl_compatible && fn->gpl_only) {
6677 		verbose(env, "cannot call GPL-restricted function from non-GPL compatible program\n");
6678 		return -EINVAL;
6679 	}
6680 
6681 	if (fn->allowed && !fn->allowed(env->prog)) {
6682 		verbose(env, "helper call is not allowed in probe\n");
6683 		return -EINVAL;
6684 	}
6685 
6686 	/* With LD_ABS/IND some JITs save/restore skb from r1. */
6687 	changes_data = bpf_helper_changes_pkt_data(fn->func);
6688 	if (changes_data && fn->arg1_type != ARG_PTR_TO_CTX) {
6689 		verbose(env, "kernel subsystem misconfigured func %s#%d: r1 != ctx\n",
6690 			func_id_name(func_id), func_id);
6691 		return -EINVAL;
6692 	}
6693 
6694 	memset(&meta, 0, sizeof(meta));
6695 	meta.pkt_access = fn->pkt_access;
6696 
6697 	err = check_func_proto(fn, func_id);
6698 	if (err) {
6699 		verbose(env, "kernel subsystem misconfigured func %s#%d\n",
6700 			func_id_name(func_id), func_id);
6701 		return err;
6702 	}
6703 
6704 	meta.func_id = func_id;
6705 	/* check args */
6706 	for (i = 0; i < MAX_BPF_FUNC_REG_ARGS; i++) {
6707 		err = check_func_arg(env, i, &meta, fn);
6708 		if (err)
6709 			return err;
6710 	}
6711 
6712 	err = record_func_map(env, &meta, func_id, insn_idx);
6713 	if (err)
6714 		return err;
6715 
6716 	err = record_func_key(env, &meta, func_id, insn_idx);
6717 	if (err)
6718 		return err;
6719 
6720 	/* Mark slots with STACK_MISC in case of raw mode, stack offset
6721 	 * is inferred from register state.
6722 	 */
6723 	for (i = 0; i < meta.access_size; i++) {
6724 		err = check_mem_access(env, insn_idx, meta.regno, i, BPF_B,
6725 				       BPF_WRITE, -1, false);
6726 		if (err)
6727 			return err;
6728 	}
6729 
6730 	if (is_release_function(func_id)) {
6731 		err = release_reference(env, meta.ref_obj_id);
6732 		if (err) {
6733 			verbose(env, "func %s#%d reference has not been acquired before\n",
6734 				func_id_name(func_id), func_id);
6735 			return err;
6736 		}
6737 	}
6738 
6739 	regs = cur_regs(env);
6740 
6741 	switch (func_id) {
6742 	case BPF_FUNC_tail_call:
6743 		err = check_reference_leak(env);
6744 		if (err) {
6745 			verbose(env, "tail_call would lead to reference leak\n");
6746 			return err;
6747 		}
6748 		break;
6749 	case BPF_FUNC_get_local_storage:
6750 		/* check that flags argument in get_local_storage(map, flags) is 0,
6751 		 * this is required because get_local_storage() can't return an error.
6752 		 */
6753 		if (!register_is_null(&regs[BPF_REG_2])) {
6754 			verbose(env, "get_local_storage() doesn't support non-zero flags\n");
6755 			return -EINVAL;
6756 		}
6757 		break;
6758 	case BPF_FUNC_for_each_map_elem:
6759 		err = __check_func_call(env, insn, insn_idx_p, meta.subprogno,
6760 					set_map_elem_callback_state);
6761 		break;
6762 	case BPF_FUNC_timer_set_callback:
6763 		err = __check_func_call(env, insn, insn_idx_p, meta.subprogno,
6764 					set_timer_callback_state);
6765 		break;
6766 	case BPF_FUNC_find_vma:
6767 		err = __check_func_call(env, insn, insn_idx_p, meta.subprogno,
6768 					set_find_vma_callback_state);
6769 		break;
6770 	case BPF_FUNC_snprintf:
6771 		err = check_bpf_snprintf_call(env, regs);
6772 		break;
6773 	case BPF_FUNC_loop:
6774 		err = __check_func_call(env, insn, insn_idx_p, meta.subprogno,
6775 					set_loop_callback_state);
6776 		break;
6777 	}
6778 
6779 	if (err)
6780 		return err;
6781 
6782 	/* reset caller saved regs */
6783 	for (i = 0; i < CALLER_SAVED_REGS; i++) {
6784 		mark_reg_not_init(env, regs, caller_saved[i]);
6785 		check_reg_arg(env, caller_saved[i], DST_OP_NO_MARK);
6786 	}
6787 
6788 	/* helper call returns 64-bit value. */
6789 	regs[BPF_REG_0].subreg_def = DEF_NOT_SUBREG;
6790 
6791 	/* update return register (already marked as written above) */
6792 	ret_type = fn->ret_type;
6793 	ret_flag = type_flag(fn->ret_type);
6794 	if (ret_type == RET_INTEGER) {
6795 		/* sets type to SCALAR_VALUE */
6796 		mark_reg_unknown(env, regs, BPF_REG_0);
6797 	} else if (ret_type == RET_VOID) {
6798 		regs[BPF_REG_0].type = NOT_INIT;
6799 	} else if (base_type(ret_type) == RET_PTR_TO_MAP_VALUE) {
6800 		/* There is no offset yet applied, variable or fixed */
6801 		mark_reg_known_zero(env, regs, BPF_REG_0);
6802 		/* remember map_ptr, so that check_map_access()
6803 		 * can check 'value_size' boundary of memory access
6804 		 * to map element returned from bpf_map_lookup_elem()
6805 		 */
6806 		if (meta.map_ptr == NULL) {
6807 			verbose(env,
6808 				"kernel subsystem misconfigured verifier\n");
6809 			return -EINVAL;
6810 		}
6811 		regs[BPF_REG_0].map_ptr = meta.map_ptr;
6812 		regs[BPF_REG_0].map_uid = meta.map_uid;
6813 		regs[BPF_REG_0].type = PTR_TO_MAP_VALUE | ret_flag;
6814 		if (!type_may_be_null(ret_type) &&
6815 		    map_value_has_spin_lock(meta.map_ptr)) {
6816 			regs[BPF_REG_0].id = ++env->id_gen;
6817 		}
6818 	} else if (base_type(ret_type) == RET_PTR_TO_SOCKET) {
6819 		mark_reg_known_zero(env, regs, BPF_REG_0);
6820 		regs[BPF_REG_0].type = PTR_TO_SOCKET | ret_flag;
6821 	} else if (base_type(ret_type) == RET_PTR_TO_SOCK_COMMON) {
6822 		mark_reg_known_zero(env, regs, BPF_REG_0);
6823 		regs[BPF_REG_0].type = PTR_TO_SOCK_COMMON | ret_flag;
6824 	} else if (base_type(ret_type) == RET_PTR_TO_TCP_SOCK) {
6825 		mark_reg_known_zero(env, regs, BPF_REG_0);
6826 		regs[BPF_REG_0].type = PTR_TO_TCP_SOCK | ret_flag;
6827 	} else if (base_type(ret_type) == RET_PTR_TO_ALLOC_MEM) {
6828 		mark_reg_known_zero(env, regs, BPF_REG_0);
6829 		regs[BPF_REG_0].type = PTR_TO_MEM | ret_flag;
6830 		regs[BPF_REG_0].mem_size = meta.mem_size;
6831 	} else if (base_type(ret_type) == RET_PTR_TO_MEM_OR_BTF_ID) {
6832 		const struct btf_type *t;
6833 
6834 		mark_reg_known_zero(env, regs, BPF_REG_0);
6835 		t = btf_type_skip_modifiers(meta.ret_btf, meta.ret_btf_id, NULL);
6836 		if (!btf_type_is_struct(t)) {
6837 			u32 tsize;
6838 			const struct btf_type *ret;
6839 			const char *tname;
6840 
6841 			/* resolve the type size of ksym. */
6842 			ret = btf_resolve_size(meta.ret_btf, t, &tsize);
6843 			if (IS_ERR(ret)) {
6844 				tname = btf_name_by_offset(meta.ret_btf, t->name_off);
6845 				verbose(env, "unable to resolve the size of type '%s': %ld\n",
6846 					tname, PTR_ERR(ret));
6847 				return -EINVAL;
6848 			}
6849 			regs[BPF_REG_0].type = PTR_TO_MEM | ret_flag;
6850 			regs[BPF_REG_0].mem_size = tsize;
6851 		} else {
6852 			/* MEM_RDONLY may be carried from ret_flag, but it
6853 			 * doesn't apply on PTR_TO_BTF_ID. Fold it, otherwise
6854 			 * it will confuse the check of PTR_TO_BTF_ID in
6855 			 * check_mem_access().
6856 			 */
6857 			ret_flag &= ~MEM_RDONLY;
6858 
6859 			regs[BPF_REG_0].type = PTR_TO_BTF_ID | ret_flag;
6860 			regs[BPF_REG_0].btf = meta.ret_btf;
6861 			regs[BPF_REG_0].btf_id = meta.ret_btf_id;
6862 		}
6863 	} else if (base_type(ret_type) == RET_PTR_TO_BTF_ID) {
6864 		int ret_btf_id;
6865 
6866 		mark_reg_known_zero(env, regs, BPF_REG_0);
6867 		regs[BPF_REG_0].type = PTR_TO_BTF_ID | ret_flag;
6868 		ret_btf_id = *fn->ret_btf_id;
6869 		if (ret_btf_id == 0) {
6870 			verbose(env, "invalid return type %u of func %s#%d\n",
6871 				base_type(ret_type), func_id_name(func_id),
6872 				func_id);
6873 			return -EINVAL;
6874 		}
6875 		/* current BPF helper definitions are only coming from
6876 		 * built-in code with type IDs from  vmlinux BTF
6877 		 */
6878 		regs[BPF_REG_0].btf = btf_vmlinux;
6879 		regs[BPF_REG_0].btf_id = ret_btf_id;
6880 	} else {
6881 		verbose(env, "unknown return type %u of func %s#%d\n",
6882 			base_type(ret_type), func_id_name(func_id), func_id);
6883 		return -EINVAL;
6884 	}
6885 
6886 	if (type_may_be_null(regs[BPF_REG_0].type))
6887 		regs[BPF_REG_0].id = ++env->id_gen;
6888 
6889 	if (is_ptr_cast_function(func_id)) {
6890 		/* For release_reference() */
6891 		regs[BPF_REG_0].ref_obj_id = meta.ref_obj_id;
6892 	} else if (is_acquire_function(func_id, meta.map_ptr)) {
6893 		int id = acquire_reference_state(env, insn_idx);
6894 
6895 		if (id < 0)
6896 			return id;
6897 		/* For mark_ptr_or_null_reg() */
6898 		regs[BPF_REG_0].id = id;
6899 		/* For release_reference() */
6900 		regs[BPF_REG_0].ref_obj_id = id;
6901 	}
6902 
6903 	do_refine_retval_range(regs, fn->ret_type, func_id, &meta);
6904 
6905 	err = check_map_func_compatibility(env, meta.map_ptr, func_id);
6906 	if (err)
6907 		return err;
6908 
6909 	if ((func_id == BPF_FUNC_get_stack ||
6910 	     func_id == BPF_FUNC_get_task_stack) &&
6911 	    !env->prog->has_callchain_buf) {
6912 		const char *err_str;
6913 
6914 #ifdef CONFIG_PERF_EVENTS
6915 		err = get_callchain_buffers(sysctl_perf_event_max_stack);
6916 		err_str = "cannot get callchain buffer for func %s#%d\n";
6917 #else
6918 		err = -ENOTSUPP;
6919 		err_str = "func %s#%d not supported without CONFIG_PERF_EVENTS\n";
6920 #endif
6921 		if (err) {
6922 			verbose(env, err_str, func_id_name(func_id), func_id);
6923 			return err;
6924 		}
6925 
6926 		env->prog->has_callchain_buf = true;
6927 	}
6928 
6929 	if (func_id == BPF_FUNC_get_stackid || func_id == BPF_FUNC_get_stack)
6930 		env->prog->call_get_stack = true;
6931 
6932 	if (func_id == BPF_FUNC_get_func_ip) {
6933 		if (check_get_func_ip(env))
6934 			return -ENOTSUPP;
6935 		env->prog->call_get_func_ip = true;
6936 	}
6937 
6938 	if (changes_data)
6939 		clear_all_pkt_pointers(env);
6940 	return 0;
6941 }
6942 
6943 /* mark_btf_func_reg_size() is used when the reg size is determined by
6944  * the BTF func_proto's return value size and argument.
6945  */
6946 static void mark_btf_func_reg_size(struct bpf_verifier_env *env, u32 regno,
6947 				   size_t reg_size)
6948 {
6949 	struct bpf_reg_state *reg = &cur_regs(env)[regno];
6950 
6951 	if (regno == BPF_REG_0) {
6952 		/* Function return value */
6953 		reg->live |= REG_LIVE_WRITTEN;
6954 		reg->subreg_def = reg_size == sizeof(u64) ?
6955 			DEF_NOT_SUBREG : env->insn_idx + 1;
6956 	} else {
6957 		/* Function argument */
6958 		if (reg_size == sizeof(u64)) {
6959 			mark_insn_zext(env, reg);
6960 			mark_reg_read(env, reg, reg->parent, REG_LIVE_READ64);
6961 		} else {
6962 			mark_reg_read(env, reg, reg->parent, REG_LIVE_READ32);
6963 		}
6964 	}
6965 }
6966 
6967 static int check_kfunc_call(struct bpf_verifier_env *env, struct bpf_insn *insn,
6968 			    int *insn_idx_p)
6969 {
6970 	const struct btf_type *t, *func, *func_proto, *ptr_type;
6971 	struct bpf_reg_state *regs = cur_regs(env);
6972 	const char *func_name, *ptr_type_name;
6973 	u32 i, nargs, func_id, ptr_type_id;
6974 	int err, insn_idx = *insn_idx_p;
6975 	const struct btf_param *args;
6976 	struct btf *desc_btf;
6977 	bool acq;
6978 
6979 	/* skip for now, but return error when we find this in fixup_kfunc_call */
6980 	if (!insn->imm)
6981 		return 0;
6982 
6983 	desc_btf = find_kfunc_desc_btf(env, insn->imm, insn->off);
6984 	if (IS_ERR(desc_btf))
6985 		return PTR_ERR(desc_btf);
6986 
6987 	func_id = insn->imm;
6988 	func = btf_type_by_id(desc_btf, func_id);
6989 	func_name = btf_name_by_offset(desc_btf, func->name_off);
6990 	func_proto = btf_type_by_id(desc_btf, func->type);
6991 
6992 	if (!btf_kfunc_id_set_contains(desc_btf, resolve_prog_type(env->prog),
6993 				      BTF_KFUNC_TYPE_CHECK, func_id)) {
6994 		verbose(env, "calling kernel function %s is not allowed\n",
6995 			func_name);
6996 		return -EACCES;
6997 	}
6998 
6999 	acq = btf_kfunc_id_set_contains(desc_btf, resolve_prog_type(env->prog),
7000 					BTF_KFUNC_TYPE_ACQUIRE, func_id);
7001 
7002 	/* Check the arguments */
7003 	err = btf_check_kfunc_arg_match(env, desc_btf, func_id, regs);
7004 	if (err < 0)
7005 		return err;
7006 	/* In case of release function, we get register number of refcounted
7007 	 * PTR_TO_BTF_ID back from btf_check_kfunc_arg_match, do the release now
7008 	 */
7009 	if (err) {
7010 		err = release_reference(env, regs[err].ref_obj_id);
7011 		if (err) {
7012 			verbose(env, "kfunc %s#%d reference has not been acquired before\n",
7013 				func_name, func_id);
7014 			return err;
7015 		}
7016 	}
7017 
7018 	for (i = 0; i < CALLER_SAVED_REGS; i++)
7019 		mark_reg_not_init(env, regs, caller_saved[i]);
7020 
7021 	/* Check return type */
7022 	t = btf_type_skip_modifiers(desc_btf, func_proto->type, NULL);
7023 
7024 	if (acq && !btf_type_is_ptr(t)) {
7025 		verbose(env, "acquire kernel function does not return PTR_TO_BTF_ID\n");
7026 		return -EINVAL;
7027 	}
7028 
7029 	if (btf_type_is_scalar(t)) {
7030 		mark_reg_unknown(env, regs, BPF_REG_0);
7031 		mark_btf_func_reg_size(env, BPF_REG_0, t->size);
7032 	} else if (btf_type_is_ptr(t)) {
7033 		ptr_type = btf_type_skip_modifiers(desc_btf, t->type,
7034 						   &ptr_type_id);
7035 		if (!btf_type_is_struct(ptr_type)) {
7036 			ptr_type_name = btf_name_by_offset(desc_btf,
7037 							   ptr_type->name_off);
7038 			verbose(env, "kernel function %s returns pointer type %s %s is not supported\n",
7039 				func_name, btf_type_str(ptr_type),
7040 				ptr_type_name);
7041 			return -EINVAL;
7042 		}
7043 		mark_reg_known_zero(env, regs, BPF_REG_0);
7044 		regs[BPF_REG_0].btf = desc_btf;
7045 		regs[BPF_REG_0].type = PTR_TO_BTF_ID;
7046 		regs[BPF_REG_0].btf_id = ptr_type_id;
7047 		if (btf_kfunc_id_set_contains(desc_btf, resolve_prog_type(env->prog),
7048 					      BTF_KFUNC_TYPE_RET_NULL, func_id)) {
7049 			regs[BPF_REG_0].type |= PTR_MAYBE_NULL;
7050 			/* For mark_ptr_or_null_reg, see 93c230e3f5bd6 */
7051 			regs[BPF_REG_0].id = ++env->id_gen;
7052 		}
7053 		mark_btf_func_reg_size(env, BPF_REG_0, sizeof(void *));
7054 		if (acq) {
7055 			int id = acquire_reference_state(env, insn_idx);
7056 
7057 			if (id < 0)
7058 				return id;
7059 			regs[BPF_REG_0].id = id;
7060 			regs[BPF_REG_0].ref_obj_id = id;
7061 		}
7062 	} /* else { add_kfunc_call() ensures it is btf_type_is_void(t) } */
7063 
7064 	nargs = btf_type_vlen(func_proto);
7065 	args = (const struct btf_param *)(func_proto + 1);
7066 	for (i = 0; i < nargs; i++) {
7067 		u32 regno = i + 1;
7068 
7069 		t = btf_type_skip_modifiers(desc_btf, args[i].type, NULL);
7070 		if (btf_type_is_ptr(t))
7071 			mark_btf_func_reg_size(env, regno, sizeof(void *));
7072 		else
7073 			/* scalar. ensured by btf_check_kfunc_arg_match() */
7074 			mark_btf_func_reg_size(env, regno, t->size);
7075 	}
7076 
7077 	return 0;
7078 }
7079 
7080 static bool signed_add_overflows(s64 a, s64 b)
7081 {
7082 	/* Do the add in u64, where overflow is well-defined */
7083 	s64 res = (s64)((u64)a + (u64)b);
7084 
7085 	if (b < 0)
7086 		return res > a;
7087 	return res < a;
7088 }
7089 
7090 static bool signed_add32_overflows(s32 a, s32 b)
7091 {
7092 	/* Do the add in u32, where overflow is well-defined */
7093 	s32 res = (s32)((u32)a + (u32)b);
7094 
7095 	if (b < 0)
7096 		return res > a;
7097 	return res < a;
7098 }
7099 
7100 static bool signed_sub_overflows(s64 a, s64 b)
7101 {
7102 	/* Do the sub in u64, where overflow is well-defined */
7103 	s64 res = (s64)((u64)a - (u64)b);
7104 
7105 	if (b < 0)
7106 		return res < a;
7107 	return res > a;
7108 }
7109 
7110 static bool signed_sub32_overflows(s32 a, s32 b)
7111 {
7112 	/* Do the sub in u32, where overflow is well-defined */
7113 	s32 res = (s32)((u32)a - (u32)b);
7114 
7115 	if (b < 0)
7116 		return res < a;
7117 	return res > a;
7118 }
7119 
7120 static bool check_reg_sane_offset(struct bpf_verifier_env *env,
7121 				  const struct bpf_reg_state *reg,
7122 				  enum bpf_reg_type type)
7123 {
7124 	bool known = tnum_is_const(reg->var_off);
7125 	s64 val = reg->var_off.value;
7126 	s64 smin = reg->smin_value;
7127 
7128 	if (known && (val >= BPF_MAX_VAR_OFF || val <= -BPF_MAX_VAR_OFF)) {
7129 		verbose(env, "math between %s pointer and %lld is not allowed\n",
7130 			reg_type_str(env, type), val);
7131 		return false;
7132 	}
7133 
7134 	if (reg->off >= BPF_MAX_VAR_OFF || reg->off <= -BPF_MAX_VAR_OFF) {
7135 		verbose(env, "%s pointer offset %d is not allowed\n",
7136 			reg_type_str(env, type), reg->off);
7137 		return false;
7138 	}
7139 
7140 	if (smin == S64_MIN) {
7141 		verbose(env, "math between %s pointer and register with unbounded min value is not allowed\n",
7142 			reg_type_str(env, type));
7143 		return false;
7144 	}
7145 
7146 	if (smin >= BPF_MAX_VAR_OFF || smin <= -BPF_MAX_VAR_OFF) {
7147 		verbose(env, "value %lld makes %s pointer be out of bounds\n",
7148 			smin, reg_type_str(env, type));
7149 		return false;
7150 	}
7151 
7152 	return true;
7153 }
7154 
7155 static struct bpf_insn_aux_data *cur_aux(struct bpf_verifier_env *env)
7156 {
7157 	return &env->insn_aux_data[env->insn_idx];
7158 }
7159 
7160 enum {
7161 	REASON_BOUNDS	= -1,
7162 	REASON_TYPE	= -2,
7163 	REASON_PATHS	= -3,
7164 	REASON_LIMIT	= -4,
7165 	REASON_STACK	= -5,
7166 };
7167 
7168 static int retrieve_ptr_limit(const struct bpf_reg_state *ptr_reg,
7169 			      u32 *alu_limit, bool mask_to_left)
7170 {
7171 	u32 max = 0, ptr_limit = 0;
7172 
7173 	switch (ptr_reg->type) {
7174 	case PTR_TO_STACK:
7175 		/* Offset 0 is out-of-bounds, but acceptable start for the
7176 		 * left direction, see BPF_REG_FP. Also, unknown scalar
7177 		 * offset where we would need to deal with min/max bounds is
7178 		 * currently prohibited for unprivileged.
7179 		 */
7180 		max = MAX_BPF_STACK + mask_to_left;
7181 		ptr_limit = -(ptr_reg->var_off.value + ptr_reg->off);
7182 		break;
7183 	case PTR_TO_MAP_VALUE:
7184 		max = ptr_reg->map_ptr->value_size;
7185 		ptr_limit = (mask_to_left ?
7186 			     ptr_reg->smin_value :
7187 			     ptr_reg->umax_value) + ptr_reg->off;
7188 		break;
7189 	default:
7190 		return REASON_TYPE;
7191 	}
7192 
7193 	if (ptr_limit >= max)
7194 		return REASON_LIMIT;
7195 	*alu_limit = ptr_limit;
7196 	return 0;
7197 }
7198 
7199 static bool can_skip_alu_sanitation(const struct bpf_verifier_env *env,
7200 				    const struct bpf_insn *insn)
7201 {
7202 	return env->bypass_spec_v1 || BPF_SRC(insn->code) == BPF_K;
7203 }
7204 
7205 static int update_alu_sanitation_state(struct bpf_insn_aux_data *aux,
7206 				       u32 alu_state, u32 alu_limit)
7207 {
7208 	/* If we arrived here from different branches with different
7209 	 * state or limits to sanitize, then this won't work.
7210 	 */
7211 	if (aux->alu_state &&
7212 	    (aux->alu_state != alu_state ||
7213 	     aux->alu_limit != alu_limit))
7214 		return REASON_PATHS;
7215 
7216 	/* Corresponding fixup done in do_misc_fixups(). */
7217 	aux->alu_state = alu_state;
7218 	aux->alu_limit = alu_limit;
7219 	return 0;
7220 }
7221 
7222 static int sanitize_val_alu(struct bpf_verifier_env *env,
7223 			    struct bpf_insn *insn)
7224 {
7225 	struct bpf_insn_aux_data *aux = cur_aux(env);
7226 
7227 	if (can_skip_alu_sanitation(env, insn))
7228 		return 0;
7229 
7230 	return update_alu_sanitation_state(aux, BPF_ALU_NON_POINTER, 0);
7231 }
7232 
7233 static bool sanitize_needed(u8 opcode)
7234 {
7235 	return opcode == BPF_ADD || opcode == BPF_SUB;
7236 }
7237 
7238 struct bpf_sanitize_info {
7239 	struct bpf_insn_aux_data aux;
7240 	bool mask_to_left;
7241 };
7242 
7243 static struct bpf_verifier_state *
7244 sanitize_speculative_path(struct bpf_verifier_env *env,
7245 			  const struct bpf_insn *insn,
7246 			  u32 next_idx, u32 curr_idx)
7247 {
7248 	struct bpf_verifier_state *branch;
7249 	struct bpf_reg_state *regs;
7250 
7251 	branch = push_stack(env, next_idx, curr_idx, true);
7252 	if (branch && insn) {
7253 		regs = branch->frame[branch->curframe]->regs;
7254 		if (BPF_SRC(insn->code) == BPF_K) {
7255 			mark_reg_unknown(env, regs, insn->dst_reg);
7256 		} else if (BPF_SRC(insn->code) == BPF_X) {
7257 			mark_reg_unknown(env, regs, insn->dst_reg);
7258 			mark_reg_unknown(env, regs, insn->src_reg);
7259 		}
7260 	}
7261 	return branch;
7262 }
7263 
7264 static int sanitize_ptr_alu(struct bpf_verifier_env *env,
7265 			    struct bpf_insn *insn,
7266 			    const struct bpf_reg_state *ptr_reg,
7267 			    const struct bpf_reg_state *off_reg,
7268 			    struct bpf_reg_state *dst_reg,
7269 			    struct bpf_sanitize_info *info,
7270 			    const bool commit_window)
7271 {
7272 	struct bpf_insn_aux_data *aux = commit_window ? cur_aux(env) : &info->aux;
7273 	struct bpf_verifier_state *vstate = env->cur_state;
7274 	bool off_is_imm = tnum_is_const(off_reg->var_off);
7275 	bool off_is_neg = off_reg->smin_value < 0;
7276 	bool ptr_is_dst_reg = ptr_reg == dst_reg;
7277 	u8 opcode = BPF_OP(insn->code);
7278 	u32 alu_state, alu_limit;
7279 	struct bpf_reg_state tmp;
7280 	bool ret;
7281 	int err;
7282 
7283 	if (can_skip_alu_sanitation(env, insn))
7284 		return 0;
7285 
7286 	/* We already marked aux for masking from non-speculative
7287 	 * paths, thus we got here in the first place. We only care
7288 	 * to explore bad access from here.
7289 	 */
7290 	if (vstate->speculative)
7291 		goto do_sim;
7292 
7293 	if (!commit_window) {
7294 		if (!tnum_is_const(off_reg->var_off) &&
7295 		    (off_reg->smin_value < 0) != (off_reg->smax_value < 0))
7296 			return REASON_BOUNDS;
7297 
7298 		info->mask_to_left = (opcode == BPF_ADD &&  off_is_neg) ||
7299 				     (opcode == BPF_SUB && !off_is_neg);
7300 	}
7301 
7302 	err = retrieve_ptr_limit(ptr_reg, &alu_limit, info->mask_to_left);
7303 	if (err < 0)
7304 		return err;
7305 
7306 	if (commit_window) {
7307 		/* In commit phase we narrow the masking window based on
7308 		 * the observed pointer move after the simulated operation.
7309 		 */
7310 		alu_state = info->aux.alu_state;
7311 		alu_limit = abs(info->aux.alu_limit - alu_limit);
7312 	} else {
7313 		alu_state  = off_is_neg ? BPF_ALU_NEG_VALUE : 0;
7314 		alu_state |= off_is_imm ? BPF_ALU_IMMEDIATE : 0;
7315 		alu_state |= ptr_is_dst_reg ?
7316 			     BPF_ALU_SANITIZE_SRC : BPF_ALU_SANITIZE_DST;
7317 
7318 		/* Limit pruning on unknown scalars to enable deep search for
7319 		 * potential masking differences from other program paths.
7320 		 */
7321 		if (!off_is_imm)
7322 			env->explore_alu_limits = true;
7323 	}
7324 
7325 	err = update_alu_sanitation_state(aux, alu_state, alu_limit);
7326 	if (err < 0)
7327 		return err;
7328 do_sim:
7329 	/* If we're in commit phase, we're done here given we already
7330 	 * pushed the truncated dst_reg into the speculative verification
7331 	 * stack.
7332 	 *
7333 	 * Also, when register is a known constant, we rewrite register-based
7334 	 * operation to immediate-based, and thus do not need masking (and as
7335 	 * a consequence, do not need to simulate the zero-truncation either).
7336 	 */
7337 	if (commit_window || off_is_imm)
7338 		return 0;
7339 
7340 	/* Simulate and find potential out-of-bounds access under
7341 	 * speculative execution from truncation as a result of
7342 	 * masking when off was not within expected range. If off
7343 	 * sits in dst, then we temporarily need to move ptr there
7344 	 * to simulate dst (== 0) +/-= ptr. Needed, for example,
7345 	 * for cases where we use K-based arithmetic in one direction
7346 	 * and truncated reg-based in the other in order to explore
7347 	 * bad access.
7348 	 */
7349 	if (!ptr_is_dst_reg) {
7350 		tmp = *dst_reg;
7351 		*dst_reg = *ptr_reg;
7352 	}
7353 	ret = sanitize_speculative_path(env, NULL, env->insn_idx + 1,
7354 					env->insn_idx);
7355 	if (!ptr_is_dst_reg && ret)
7356 		*dst_reg = tmp;
7357 	return !ret ? REASON_STACK : 0;
7358 }
7359 
7360 static void sanitize_mark_insn_seen(struct bpf_verifier_env *env)
7361 {
7362 	struct bpf_verifier_state *vstate = env->cur_state;
7363 
7364 	/* If we simulate paths under speculation, we don't update the
7365 	 * insn as 'seen' such that when we verify unreachable paths in
7366 	 * the non-speculative domain, sanitize_dead_code() can still
7367 	 * rewrite/sanitize them.
7368 	 */
7369 	if (!vstate->speculative)
7370 		env->insn_aux_data[env->insn_idx].seen = env->pass_cnt;
7371 }
7372 
7373 static int sanitize_err(struct bpf_verifier_env *env,
7374 			const struct bpf_insn *insn, int reason,
7375 			const struct bpf_reg_state *off_reg,
7376 			const struct bpf_reg_state *dst_reg)
7377 {
7378 	static const char *err = "pointer arithmetic with it prohibited for !root";
7379 	const char *op = BPF_OP(insn->code) == BPF_ADD ? "add" : "sub";
7380 	u32 dst = insn->dst_reg, src = insn->src_reg;
7381 
7382 	switch (reason) {
7383 	case REASON_BOUNDS:
7384 		verbose(env, "R%d has unknown scalar with mixed signed bounds, %s\n",
7385 			off_reg == dst_reg ? dst : src, err);
7386 		break;
7387 	case REASON_TYPE:
7388 		verbose(env, "R%d has pointer with unsupported alu operation, %s\n",
7389 			off_reg == dst_reg ? src : dst, err);
7390 		break;
7391 	case REASON_PATHS:
7392 		verbose(env, "R%d tried to %s from different maps, paths or scalars, %s\n",
7393 			dst, op, err);
7394 		break;
7395 	case REASON_LIMIT:
7396 		verbose(env, "R%d tried to %s beyond pointer bounds, %s\n",
7397 			dst, op, err);
7398 		break;
7399 	case REASON_STACK:
7400 		verbose(env, "R%d could not be pushed for speculative verification, %s\n",
7401 			dst, err);
7402 		break;
7403 	default:
7404 		verbose(env, "verifier internal error: unknown reason (%d)\n",
7405 			reason);
7406 		break;
7407 	}
7408 
7409 	return -EACCES;
7410 }
7411 
7412 /* check that stack access falls within stack limits and that 'reg' doesn't
7413  * have a variable offset.
7414  *
7415  * Variable offset is prohibited for unprivileged mode for simplicity since it
7416  * requires corresponding support in Spectre masking for stack ALU.  See also
7417  * retrieve_ptr_limit().
7418  *
7419  *
7420  * 'off' includes 'reg->off'.
7421  */
7422 static int check_stack_access_for_ptr_arithmetic(
7423 				struct bpf_verifier_env *env,
7424 				int regno,
7425 				const struct bpf_reg_state *reg,
7426 				int off)
7427 {
7428 	if (!tnum_is_const(reg->var_off)) {
7429 		char tn_buf[48];
7430 
7431 		tnum_strn(tn_buf, sizeof(tn_buf), reg->var_off);
7432 		verbose(env, "R%d variable stack access prohibited for !root, var_off=%s off=%d\n",
7433 			regno, tn_buf, off);
7434 		return -EACCES;
7435 	}
7436 
7437 	if (off >= 0 || off < -MAX_BPF_STACK) {
7438 		verbose(env, "R%d stack pointer arithmetic goes out of range, "
7439 			"prohibited for !root; off=%d\n", regno, off);
7440 		return -EACCES;
7441 	}
7442 
7443 	return 0;
7444 }
7445 
7446 static int sanitize_check_bounds(struct bpf_verifier_env *env,
7447 				 const struct bpf_insn *insn,
7448 				 const struct bpf_reg_state *dst_reg)
7449 {
7450 	u32 dst = insn->dst_reg;
7451 
7452 	/* For unprivileged we require that resulting offset must be in bounds
7453 	 * in order to be able to sanitize access later on.
7454 	 */
7455 	if (env->bypass_spec_v1)
7456 		return 0;
7457 
7458 	switch (dst_reg->type) {
7459 	case PTR_TO_STACK:
7460 		if (check_stack_access_for_ptr_arithmetic(env, dst, dst_reg,
7461 					dst_reg->off + dst_reg->var_off.value))
7462 			return -EACCES;
7463 		break;
7464 	case PTR_TO_MAP_VALUE:
7465 		if (check_map_access(env, dst, dst_reg->off, 1, false)) {
7466 			verbose(env, "R%d pointer arithmetic of map value goes out of range, "
7467 				"prohibited for !root\n", dst);
7468 			return -EACCES;
7469 		}
7470 		break;
7471 	default:
7472 		break;
7473 	}
7474 
7475 	return 0;
7476 }
7477 
7478 /* Handles arithmetic on a pointer and a scalar: computes new min/max and var_off.
7479  * Caller should also handle BPF_MOV case separately.
7480  * If we return -EACCES, caller may want to try again treating pointer as a
7481  * scalar.  So we only emit a diagnostic if !env->allow_ptr_leaks.
7482  */
7483 static int adjust_ptr_min_max_vals(struct bpf_verifier_env *env,
7484 				   struct bpf_insn *insn,
7485 				   const struct bpf_reg_state *ptr_reg,
7486 				   const struct bpf_reg_state *off_reg)
7487 {
7488 	struct bpf_verifier_state *vstate = env->cur_state;
7489 	struct bpf_func_state *state = vstate->frame[vstate->curframe];
7490 	struct bpf_reg_state *regs = state->regs, *dst_reg;
7491 	bool known = tnum_is_const(off_reg->var_off);
7492 	s64 smin_val = off_reg->smin_value, smax_val = off_reg->smax_value,
7493 	    smin_ptr = ptr_reg->smin_value, smax_ptr = ptr_reg->smax_value;
7494 	u64 umin_val = off_reg->umin_value, umax_val = off_reg->umax_value,
7495 	    umin_ptr = ptr_reg->umin_value, umax_ptr = ptr_reg->umax_value;
7496 	struct bpf_sanitize_info info = {};
7497 	u8 opcode = BPF_OP(insn->code);
7498 	u32 dst = insn->dst_reg;
7499 	int ret;
7500 
7501 	dst_reg = &regs[dst];
7502 
7503 	if ((known && (smin_val != smax_val || umin_val != umax_val)) ||
7504 	    smin_val > smax_val || umin_val > umax_val) {
7505 		/* Taint dst register if offset had invalid bounds derived from
7506 		 * e.g. dead branches.
7507 		 */
7508 		__mark_reg_unknown(env, dst_reg);
7509 		return 0;
7510 	}
7511 
7512 	if (BPF_CLASS(insn->code) != BPF_ALU64) {
7513 		/* 32-bit ALU ops on pointers produce (meaningless) scalars */
7514 		if (opcode == BPF_SUB && env->allow_ptr_leaks) {
7515 			__mark_reg_unknown(env, dst_reg);
7516 			return 0;
7517 		}
7518 
7519 		verbose(env,
7520 			"R%d 32-bit pointer arithmetic prohibited\n",
7521 			dst);
7522 		return -EACCES;
7523 	}
7524 
7525 	if (ptr_reg->type & PTR_MAYBE_NULL) {
7526 		verbose(env, "R%d pointer arithmetic on %s prohibited, null-check it first\n",
7527 			dst, reg_type_str(env, ptr_reg->type));
7528 		return -EACCES;
7529 	}
7530 
7531 	switch (base_type(ptr_reg->type)) {
7532 	case CONST_PTR_TO_MAP:
7533 		/* smin_val represents the known value */
7534 		if (known && smin_val == 0 && opcode == BPF_ADD)
7535 			break;
7536 		fallthrough;
7537 	case PTR_TO_PACKET_END:
7538 	case PTR_TO_SOCKET:
7539 	case PTR_TO_SOCK_COMMON:
7540 	case PTR_TO_TCP_SOCK:
7541 	case PTR_TO_XDP_SOCK:
7542 		verbose(env, "R%d pointer arithmetic on %s prohibited\n",
7543 			dst, reg_type_str(env, ptr_reg->type));
7544 		return -EACCES;
7545 	default:
7546 		break;
7547 	}
7548 
7549 	/* In case of 'scalar += pointer', dst_reg inherits pointer type and id.
7550 	 * The id may be overwritten later if we create a new variable offset.
7551 	 */
7552 	dst_reg->type = ptr_reg->type;
7553 	dst_reg->id = ptr_reg->id;
7554 
7555 	if (!check_reg_sane_offset(env, off_reg, ptr_reg->type) ||
7556 	    !check_reg_sane_offset(env, ptr_reg, ptr_reg->type))
7557 		return -EINVAL;
7558 
7559 	/* pointer types do not carry 32-bit bounds at the moment. */
7560 	__mark_reg32_unbounded(dst_reg);
7561 
7562 	if (sanitize_needed(opcode)) {
7563 		ret = sanitize_ptr_alu(env, insn, ptr_reg, off_reg, dst_reg,
7564 				       &info, false);
7565 		if (ret < 0)
7566 			return sanitize_err(env, insn, ret, off_reg, dst_reg);
7567 	}
7568 
7569 	switch (opcode) {
7570 	case BPF_ADD:
7571 		/* We can take a fixed offset as long as it doesn't overflow
7572 		 * the s32 'off' field
7573 		 */
7574 		if (known && (ptr_reg->off + smin_val ==
7575 			      (s64)(s32)(ptr_reg->off + smin_val))) {
7576 			/* pointer += K.  Accumulate it into fixed offset */
7577 			dst_reg->smin_value = smin_ptr;
7578 			dst_reg->smax_value = smax_ptr;
7579 			dst_reg->umin_value = umin_ptr;
7580 			dst_reg->umax_value = umax_ptr;
7581 			dst_reg->var_off = ptr_reg->var_off;
7582 			dst_reg->off = ptr_reg->off + smin_val;
7583 			dst_reg->raw = ptr_reg->raw;
7584 			break;
7585 		}
7586 		/* A new variable offset is created.  Note that off_reg->off
7587 		 * == 0, since it's a scalar.
7588 		 * dst_reg gets the pointer type and since some positive
7589 		 * integer value was added to the pointer, give it a new 'id'
7590 		 * if it's a PTR_TO_PACKET.
7591 		 * this creates a new 'base' pointer, off_reg (variable) gets
7592 		 * added into the variable offset, and we copy the fixed offset
7593 		 * from ptr_reg.
7594 		 */
7595 		if (signed_add_overflows(smin_ptr, smin_val) ||
7596 		    signed_add_overflows(smax_ptr, smax_val)) {
7597 			dst_reg->smin_value = S64_MIN;
7598 			dst_reg->smax_value = S64_MAX;
7599 		} else {
7600 			dst_reg->smin_value = smin_ptr + smin_val;
7601 			dst_reg->smax_value = smax_ptr + smax_val;
7602 		}
7603 		if (umin_ptr + umin_val < umin_ptr ||
7604 		    umax_ptr + umax_val < umax_ptr) {
7605 			dst_reg->umin_value = 0;
7606 			dst_reg->umax_value = U64_MAX;
7607 		} else {
7608 			dst_reg->umin_value = umin_ptr + umin_val;
7609 			dst_reg->umax_value = umax_ptr + umax_val;
7610 		}
7611 		dst_reg->var_off = tnum_add(ptr_reg->var_off, off_reg->var_off);
7612 		dst_reg->off = ptr_reg->off;
7613 		dst_reg->raw = ptr_reg->raw;
7614 		if (reg_is_pkt_pointer(ptr_reg)) {
7615 			dst_reg->id = ++env->id_gen;
7616 			/* something was added to pkt_ptr, set range to zero */
7617 			memset(&dst_reg->raw, 0, sizeof(dst_reg->raw));
7618 		}
7619 		break;
7620 	case BPF_SUB:
7621 		if (dst_reg == off_reg) {
7622 			/* scalar -= pointer.  Creates an unknown scalar */
7623 			verbose(env, "R%d tried to subtract pointer from scalar\n",
7624 				dst);
7625 			return -EACCES;
7626 		}
7627 		/* We don't allow subtraction from FP, because (according to
7628 		 * test_verifier.c test "invalid fp arithmetic", JITs might not
7629 		 * be able to deal with it.
7630 		 */
7631 		if (ptr_reg->type == PTR_TO_STACK) {
7632 			verbose(env, "R%d subtraction from stack pointer prohibited\n",
7633 				dst);
7634 			return -EACCES;
7635 		}
7636 		if (known && (ptr_reg->off - smin_val ==
7637 			      (s64)(s32)(ptr_reg->off - smin_val))) {
7638 			/* pointer -= K.  Subtract it from fixed offset */
7639 			dst_reg->smin_value = smin_ptr;
7640 			dst_reg->smax_value = smax_ptr;
7641 			dst_reg->umin_value = umin_ptr;
7642 			dst_reg->umax_value = umax_ptr;
7643 			dst_reg->var_off = ptr_reg->var_off;
7644 			dst_reg->id = ptr_reg->id;
7645 			dst_reg->off = ptr_reg->off - smin_val;
7646 			dst_reg->raw = ptr_reg->raw;
7647 			break;
7648 		}
7649 		/* A new variable offset is created.  If the subtrahend is known
7650 		 * nonnegative, then any reg->range we had before is still good.
7651 		 */
7652 		if (signed_sub_overflows(smin_ptr, smax_val) ||
7653 		    signed_sub_overflows(smax_ptr, smin_val)) {
7654 			/* Overflow possible, we know nothing */
7655 			dst_reg->smin_value = S64_MIN;
7656 			dst_reg->smax_value = S64_MAX;
7657 		} else {
7658 			dst_reg->smin_value = smin_ptr - smax_val;
7659 			dst_reg->smax_value = smax_ptr - smin_val;
7660 		}
7661 		if (umin_ptr < umax_val) {
7662 			/* Overflow possible, we know nothing */
7663 			dst_reg->umin_value = 0;
7664 			dst_reg->umax_value = U64_MAX;
7665 		} else {
7666 			/* Cannot overflow (as long as bounds are consistent) */
7667 			dst_reg->umin_value = umin_ptr - umax_val;
7668 			dst_reg->umax_value = umax_ptr - umin_val;
7669 		}
7670 		dst_reg->var_off = tnum_sub(ptr_reg->var_off, off_reg->var_off);
7671 		dst_reg->off = ptr_reg->off;
7672 		dst_reg->raw = ptr_reg->raw;
7673 		if (reg_is_pkt_pointer(ptr_reg)) {
7674 			dst_reg->id = ++env->id_gen;
7675 			/* something was added to pkt_ptr, set range to zero */
7676 			if (smin_val < 0)
7677 				memset(&dst_reg->raw, 0, sizeof(dst_reg->raw));
7678 		}
7679 		break;
7680 	case BPF_AND:
7681 	case BPF_OR:
7682 	case BPF_XOR:
7683 		/* bitwise ops on pointers are troublesome, prohibit. */
7684 		verbose(env, "R%d bitwise operator %s on pointer prohibited\n",
7685 			dst, bpf_alu_string[opcode >> 4]);
7686 		return -EACCES;
7687 	default:
7688 		/* other operators (e.g. MUL,LSH) produce non-pointer results */
7689 		verbose(env, "R%d pointer arithmetic with %s operator prohibited\n",
7690 			dst, bpf_alu_string[opcode >> 4]);
7691 		return -EACCES;
7692 	}
7693 
7694 	if (!check_reg_sane_offset(env, dst_reg, ptr_reg->type))
7695 		return -EINVAL;
7696 
7697 	__update_reg_bounds(dst_reg);
7698 	__reg_deduce_bounds(dst_reg);
7699 	__reg_bound_offset(dst_reg);
7700 
7701 	if (sanitize_check_bounds(env, insn, dst_reg) < 0)
7702 		return -EACCES;
7703 	if (sanitize_needed(opcode)) {
7704 		ret = sanitize_ptr_alu(env, insn, dst_reg, off_reg, dst_reg,
7705 				       &info, true);
7706 		if (ret < 0)
7707 			return sanitize_err(env, insn, ret, off_reg, dst_reg);
7708 	}
7709 
7710 	return 0;
7711 }
7712 
7713 static void scalar32_min_max_add(struct bpf_reg_state *dst_reg,
7714 				 struct bpf_reg_state *src_reg)
7715 {
7716 	s32 smin_val = src_reg->s32_min_value;
7717 	s32 smax_val = src_reg->s32_max_value;
7718 	u32 umin_val = src_reg->u32_min_value;
7719 	u32 umax_val = src_reg->u32_max_value;
7720 
7721 	if (signed_add32_overflows(dst_reg->s32_min_value, smin_val) ||
7722 	    signed_add32_overflows(dst_reg->s32_max_value, smax_val)) {
7723 		dst_reg->s32_min_value = S32_MIN;
7724 		dst_reg->s32_max_value = S32_MAX;
7725 	} else {
7726 		dst_reg->s32_min_value += smin_val;
7727 		dst_reg->s32_max_value += smax_val;
7728 	}
7729 	if (dst_reg->u32_min_value + umin_val < umin_val ||
7730 	    dst_reg->u32_max_value + umax_val < umax_val) {
7731 		dst_reg->u32_min_value = 0;
7732 		dst_reg->u32_max_value = U32_MAX;
7733 	} else {
7734 		dst_reg->u32_min_value += umin_val;
7735 		dst_reg->u32_max_value += umax_val;
7736 	}
7737 }
7738 
7739 static void scalar_min_max_add(struct bpf_reg_state *dst_reg,
7740 			       struct bpf_reg_state *src_reg)
7741 {
7742 	s64 smin_val = src_reg->smin_value;
7743 	s64 smax_val = src_reg->smax_value;
7744 	u64 umin_val = src_reg->umin_value;
7745 	u64 umax_val = src_reg->umax_value;
7746 
7747 	if (signed_add_overflows(dst_reg->smin_value, smin_val) ||
7748 	    signed_add_overflows(dst_reg->smax_value, smax_val)) {
7749 		dst_reg->smin_value = S64_MIN;
7750 		dst_reg->smax_value = S64_MAX;
7751 	} else {
7752 		dst_reg->smin_value += smin_val;
7753 		dst_reg->smax_value += smax_val;
7754 	}
7755 	if (dst_reg->umin_value + umin_val < umin_val ||
7756 	    dst_reg->umax_value + umax_val < umax_val) {
7757 		dst_reg->umin_value = 0;
7758 		dst_reg->umax_value = U64_MAX;
7759 	} else {
7760 		dst_reg->umin_value += umin_val;
7761 		dst_reg->umax_value += umax_val;
7762 	}
7763 }
7764 
7765 static void scalar32_min_max_sub(struct bpf_reg_state *dst_reg,
7766 				 struct bpf_reg_state *src_reg)
7767 {
7768 	s32 smin_val = src_reg->s32_min_value;
7769 	s32 smax_val = src_reg->s32_max_value;
7770 	u32 umin_val = src_reg->u32_min_value;
7771 	u32 umax_val = src_reg->u32_max_value;
7772 
7773 	if (signed_sub32_overflows(dst_reg->s32_min_value, smax_val) ||
7774 	    signed_sub32_overflows(dst_reg->s32_max_value, smin_val)) {
7775 		/* Overflow possible, we know nothing */
7776 		dst_reg->s32_min_value = S32_MIN;
7777 		dst_reg->s32_max_value = S32_MAX;
7778 	} else {
7779 		dst_reg->s32_min_value -= smax_val;
7780 		dst_reg->s32_max_value -= smin_val;
7781 	}
7782 	if (dst_reg->u32_min_value < umax_val) {
7783 		/* Overflow possible, we know nothing */
7784 		dst_reg->u32_min_value = 0;
7785 		dst_reg->u32_max_value = U32_MAX;
7786 	} else {
7787 		/* Cannot overflow (as long as bounds are consistent) */
7788 		dst_reg->u32_min_value -= umax_val;
7789 		dst_reg->u32_max_value -= umin_val;
7790 	}
7791 }
7792 
7793 static void scalar_min_max_sub(struct bpf_reg_state *dst_reg,
7794 			       struct bpf_reg_state *src_reg)
7795 {
7796 	s64 smin_val = src_reg->smin_value;
7797 	s64 smax_val = src_reg->smax_value;
7798 	u64 umin_val = src_reg->umin_value;
7799 	u64 umax_val = src_reg->umax_value;
7800 
7801 	if (signed_sub_overflows(dst_reg->smin_value, smax_val) ||
7802 	    signed_sub_overflows(dst_reg->smax_value, smin_val)) {
7803 		/* Overflow possible, we know nothing */
7804 		dst_reg->smin_value = S64_MIN;
7805 		dst_reg->smax_value = S64_MAX;
7806 	} else {
7807 		dst_reg->smin_value -= smax_val;
7808 		dst_reg->smax_value -= smin_val;
7809 	}
7810 	if (dst_reg->umin_value < umax_val) {
7811 		/* Overflow possible, we know nothing */
7812 		dst_reg->umin_value = 0;
7813 		dst_reg->umax_value = U64_MAX;
7814 	} else {
7815 		/* Cannot overflow (as long as bounds are consistent) */
7816 		dst_reg->umin_value -= umax_val;
7817 		dst_reg->umax_value -= umin_val;
7818 	}
7819 }
7820 
7821 static void scalar32_min_max_mul(struct bpf_reg_state *dst_reg,
7822 				 struct bpf_reg_state *src_reg)
7823 {
7824 	s32 smin_val = src_reg->s32_min_value;
7825 	u32 umin_val = src_reg->u32_min_value;
7826 	u32 umax_val = src_reg->u32_max_value;
7827 
7828 	if (smin_val < 0 || dst_reg->s32_min_value < 0) {
7829 		/* Ain't nobody got time to multiply that sign */
7830 		__mark_reg32_unbounded(dst_reg);
7831 		return;
7832 	}
7833 	/* Both values are positive, so we can work with unsigned and
7834 	 * copy the result to signed (unless it exceeds S32_MAX).
7835 	 */
7836 	if (umax_val > U16_MAX || dst_reg->u32_max_value > U16_MAX) {
7837 		/* Potential overflow, we know nothing */
7838 		__mark_reg32_unbounded(dst_reg);
7839 		return;
7840 	}
7841 	dst_reg->u32_min_value *= umin_val;
7842 	dst_reg->u32_max_value *= umax_val;
7843 	if (dst_reg->u32_max_value > S32_MAX) {
7844 		/* Overflow possible, we know nothing */
7845 		dst_reg->s32_min_value = S32_MIN;
7846 		dst_reg->s32_max_value = S32_MAX;
7847 	} else {
7848 		dst_reg->s32_min_value = dst_reg->u32_min_value;
7849 		dst_reg->s32_max_value = dst_reg->u32_max_value;
7850 	}
7851 }
7852 
7853 static void scalar_min_max_mul(struct bpf_reg_state *dst_reg,
7854 			       struct bpf_reg_state *src_reg)
7855 {
7856 	s64 smin_val = src_reg->smin_value;
7857 	u64 umin_val = src_reg->umin_value;
7858 	u64 umax_val = src_reg->umax_value;
7859 
7860 	if (smin_val < 0 || dst_reg->smin_value < 0) {
7861 		/* Ain't nobody got time to multiply that sign */
7862 		__mark_reg64_unbounded(dst_reg);
7863 		return;
7864 	}
7865 	/* Both values are positive, so we can work with unsigned and
7866 	 * copy the result to signed (unless it exceeds S64_MAX).
7867 	 */
7868 	if (umax_val > U32_MAX || dst_reg->umax_value > U32_MAX) {
7869 		/* Potential overflow, we know nothing */
7870 		__mark_reg64_unbounded(dst_reg);
7871 		return;
7872 	}
7873 	dst_reg->umin_value *= umin_val;
7874 	dst_reg->umax_value *= umax_val;
7875 	if (dst_reg->umax_value > S64_MAX) {
7876 		/* Overflow possible, we know nothing */
7877 		dst_reg->smin_value = S64_MIN;
7878 		dst_reg->smax_value = S64_MAX;
7879 	} else {
7880 		dst_reg->smin_value = dst_reg->umin_value;
7881 		dst_reg->smax_value = dst_reg->umax_value;
7882 	}
7883 }
7884 
7885 static void scalar32_min_max_and(struct bpf_reg_state *dst_reg,
7886 				 struct bpf_reg_state *src_reg)
7887 {
7888 	bool src_known = tnum_subreg_is_const(src_reg->var_off);
7889 	bool dst_known = tnum_subreg_is_const(dst_reg->var_off);
7890 	struct tnum var32_off = tnum_subreg(dst_reg->var_off);
7891 	s32 smin_val = src_reg->s32_min_value;
7892 	u32 umax_val = src_reg->u32_max_value;
7893 
7894 	if (src_known && dst_known) {
7895 		__mark_reg32_known(dst_reg, var32_off.value);
7896 		return;
7897 	}
7898 
7899 	/* We get our minimum from the var_off, since that's inherently
7900 	 * bitwise.  Our maximum is the minimum of the operands' maxima.
7901 	 */
7902 	dst_reg->u32_min_value = var32_off.value;
7903 	dst_reg->u32_max_value = min(dst_reg->u32_max_value, umax_val);
7904 	if (dst_reg->s32_min_value < 0 || smin_val < 0) {
7905 		/* Lose signed bounds when ANDing negative numbers,
7906 		 * ain't nobody got time for that.
7907 		 */
7908 		dst_reg->s32_min_value = S32_MIN;
7909 		dst_reg->s32_max_value = S32_MAX;
7910 	} else {
7911 		/* ANDing two positives gives a positive, so safe to
7912 		 * cast result into s64.
7913 		 */
7914 		dst_reg->s32_min_value = dst_reg->u32_min_value;
7915 		dst_reg->s32_max_value = dst_reg->u32_max_value;
7916 	}
7917 }
7918 
7919 static void scalar_min_max_and(struct bpf_reg_state *dst_reg,
7920 			       struct bpf_reg_state *src_reg)
7921 {
7922 	bool src_known = tnum_is_const(src_reg->var_off);
7923 	bool dst_known = tnum_is_const(dst_reg->var_off);
7924 	s64 smin_val = src_reg->smin_value;
7925 	u64 umax_val = src_reg->umax_value;
7926 
7927 	if (src_known && dst_known) {
7928 		__mark_reg_known(dst_reg, dst_reg->var_off.value);
7929 		return;
7930 	}
7931 
7932 	/* We get our minimum from the var_off, since that's inherently
7933 	 * bitwise.  Our maximum is the minimum of the operands' maxima.
7934 	 */
7935 	dst_reg->umin_value = dst_reg->var_off.value;
7936 	dst_reg->umax_value = min(dst_reg->umax_value, umax_val);
7937 	if (dst_reg->smin_value < 0 || smin_val < 0) {
7938 		/* Lose signed bounds when ANDing negative numbers,
7939 		 * ain't nobody got time for that.
7940 		 */
7941 		dst_reg->smin_value = S64_MIN;
7942 		dst_reg->smax_value = S64_MAX;
7943 	} else {
7944 		/* ANDing two positives gives a positive, so safe to
7945 		 * cast result into s64.
7946 		 */
7947 		dst_reg->smin_value = dst_reg->umin_value;
7948 		dst_reg->smax_value = dst_reg->umax_value;
7949 	}
7950 	/* We may learn something more from the var_off */
7951 	__update_reg_bounds(dst_reg);
7952 }
7953 
7954 static void scalar32_min_max_or(struct bpf_reg_state *dst_reg,
7955 				struct bpf_reg_state *src_reg)
7956 {
7957 	bool src_known = tnum_subreg_is_const(src_reg->var_off);
7958 	bool dst_known = tnum_subreg_is_const(dst_reg->var_off);
7959 	struct tnum var32_off = tnum_subreg(dst_reg->var_off);
7960 	s32 smin_val = src_reg->s32_min_value;
7961 	u32 umin_val = src_reg->u32_min_value;
7962 
7963 	if (src_known && dst_known) {
7964 		__mark_reg32_known(dst_reg, var32_off.value);
7965 		return;
7966 	}
7967 
7968 	/* We get our maximum from the var_off, and our minimum is the
7969 	 * maximum of the operands' minima
7970 	 */
7971 	dst_reg->u32_min_value = max(dst_reg->u32_min_value, umin_val);
7972 	dst_reg->u32_max_value = var32_off.value | var32_off.mask;
7973 	if (dst_reg->s32_min_value < 0 || smin_val < 0) {
7974 		/* Lose signed bounds when ORing negative numbers,
7975 		 * ain't nobody got time for that.
7976 		 */
7977 		dst_reg->s32_min_value = S32_MIN;
7978 		dst_reg->s32_max_value = S32_MAX;
7979 	} else {
7980 		/* ORing two positives gives a positive, so safe to
7981 		 * cast result into s64.
7982 		 */
7983 		dst_reg->s32_min_value = dst_reg->u32_min_value;
7984 		dst_reg->s32_max_value = dst_reg->u32_max_value;
7985 	}
7986 }
7987 
7988 static void scalar_min_max_or(struct bpf_reg_state *dst_reg,
7989 			      struct bpf_reg_state *src_reg)
7990 {
7991 	bool src_known = tnum_is_const(src_reg->var_off);
7992 	bool dst_known = tnum_is_const(dst_reg->var_off);
7993 	s64 smin_val = src_reg->smin_value;
7994 	u64 umin_val = src_reg->umin_value;
7995 
7996 	if (src_known && dst_known) {
7997 		__mark_reg_known(dst_reg, dst_reg->var_off.value);
7998 		return;
7999 	}
8000 
8001 	/* We get our maximum from the var_off, and our minimum is the
8002 	 * maximum of the operands' minima
8003 	 */
8004 	dst_reg->umin_value = max(dst_reg->umin_value, umin_val);
8005 	dst_reg->umax_value = dst_reg->var_off.value | dst_reg->var_off.mask;
8006 	if (dst_reg->smin_value < 0 || smin_val < 0) {
8007 		/* Lose signed bounds when ORing negative numbers,
8008 		 * ain't nobody got time for that.
8009 		 */
8010 		dst_reg->smin_value = S64_MIN;
8011 		dst_reg->smax_value = S64_MAX;
8012 	} else {
8013 		/* ORing two positives gives a positive, so safe to
8014 		 * cast result into s64.
8015 		 */
8016 		dst_reg->smin_value = dst_reg->umin_value;
8017 		dst_reg->smax_value = dst_reg->umax_value;
8018 	}
8019 	/* We may learn something more from the var_off */
8020 	__update_reg_bounds(dst_reg);
8021 }
8022 
8023 static void scalar32_min_max_xor(struct bpf_reg_state *dst_reg,
8024 				 struct bpf_reg_state *src_reg)
8025 {
8026 	bool src_known = tnum_subreg_is_const(src_reg->var_off);
8027 	bool dst_known = tnum_subreg_is_const(dst_reg->var_off);
8028 	struct tnum var32_off = tnum_subreg(dst_reg->var_off);
8029 	s32 smin_val = src_reg->s32_min_value;
8030 
8031 	if (src_known && dst_known) {
8032 		__mark_reg32_known(dst_reg, var32_off.value);
8033 		return;
8034 	}
8035 
8036 	/* We get both minimum and maximum from the var32_off. */
8037 	dst_reg->u32_min_value = var32_off.value;
8038 	dst_reg->u32_max_value = var32_off.value | var32_off.mask;
8039 
8040 	if (dst_reg->s32_min_value >= 0 && smin_val >= 0) {
8041 		/* XORing two positive sign numbers gives a positive,
8042 		 * so safe to cast u32 result into s32.
8043 		 */
8044 		dst_reg->s32_min_value = dst_reg->u32_min_value;
8045 		dst_reg->s32_max_value = dst_reg->u32_max_value;
8046 	} else {
8047 		dst_reg->s32_min_value = S32_MIN;
8048 		dst_reg->s32_max_value = S32_MAX;
8049 	}
8050 }
8051 
8052 static void scalar_min_max_xor(struct bpf_reg_state *dst_reg,
8053 			       struct bpf_reg_state *src_reg)
8054 {
8055 	bool src_known = tnum_is_const(src_reg->var_off);
8056 	bool dst_known = tnum_is_const(dst_reg->var_off);
8057 	s64 smin_val = src_reg->smin_value;
8058 
8059 	if (src_known && dst_known) {
8060 		/* dst_reg->var_off.value has been updated earlier */
8061 		__mark_reg_known(dst_reg, dst_reg->var_off.value);
8062 		return;
8063 	}
8064 
8065 	/* We get both minimum and maximum from the var_off. */
8066 	dst_reg->umin_value = dst_reg->var_off.value;
8067 	dst_reg->umax_value = dst_reg->var_off.value | dst_reg->var_off.mask;
8068 
8069 	if (dst_reg->smin_value >= 0 && smin_val >= 0) {
8070 		/* XORing two positive sign numbers gives a positive,
8071 		 * so safe to cast u64 result into s64.
8072 		 */
8073 		dst_reg->smin_value = dst_reg->umin_value;
8074 		dst_reg->smax_value = dst_reg->umax_value;
8075 	} else {
8076 		dst_reg->smin_value = S64_MIN;
8077 		dst_reg->smax_value = S64_MAX;
8078 	}
8079 
8080 	__update_reg_bounds(dst_reg);
8081 }
8082 
8083 static void __scalar32_min_max_lsh(struct bpf_reg_state *dst_reg,
8084 				   u64 umin_val, u64 umax_val)
8085 {
8086 	/* We lose all sign bit information (except what we can pick
8087 	 * up from var_off)
8088 	 */
8089 	dst_reg->s32_min_value = S32_MIN;
8090 	dst_reg->s32_max_value = S32_MAX;
8091 	/* If we might shift our top bit out, then we know nothing */
8092 	if (umax_val > 31 || dst_reg->u32_max_value > 1ULL << (31 - umax_val)) {
8093 		dst_reg->u32_min_value = 0;
8094 		dst_reg->u32_max_value = U32_MAX;
8095 	} else {
8096 		dst_reg->u32_min_value <<= umin_val;
8097 		dst_reg->u32_max_value <<= umax_val;
8098 	}
8099 }
8100 
8101 static void scalar32_min_max_lsh(struct bpf_reg_state *dst_reg,
8102 				 struct bpf_reg_state *src_reg)
8103 {
8104 	u32 umax_val = src_reg->u32_max_value;
8105 	u32 umin_val = src_reg->u32_min_value;
8106 	/* u32 alu operation will zext upper bits */
8107 	struct tnum subreg = tnum_subreg(dst_reg->var_off);
8108 
8109 	__scalar32_min_max_lsh(dst_reg, umin_val, umax_val);
8110 	dst_reg->var_off = tnum_subreg(tnum_lshift(subreg, umin_val));
8111 	/* Not required but being careful mark reg64 bounds as unknown so
8112 	 * that we are forced to pick them up from tnum and zext later and
8113 	 * if some path skips this step we are still safe.
8114 	 */
8115 	__mark_reg64_unbounded(dst_reg);
8116 	__update_reg32_bounds(dst_reg);
8117 }
8118 
8119 static void __scalar64_min_max_lsh(struct bpf_reg_state *dst_reg,
8120 				   u64 umin_val, u64 umax_val)
8121 {
8122 	/* Special case <<32 because it is a common compiler pattern to sign
8123 	 * extend subreg by doing <<32 s>>32. In this case if 32bit bounds are
8124 	 * positive we know this shift will also be positive so we can track
8125 	 * bounds correctly. Otherwise we lose all sign bit information except
8126 	 * what we can pick up from var_off. Perhaps we can generalize this
8127 	 * later to shifts of any length.
8128 	 */
8129 	if (umin_val == 32 && umax_val == 32 && dst_reg->s32_max_value >= 0)
8130 		dst_reg->smax_value = (s64)dst_reg->s32_max_value << 32;
8131 	else
8132 		dst_reg->smax_value = S64_MAX;
8133 
8134 	if (umin_val == 32 && umax_val == 32 && dst_reg->s32_min_value >= 0)
8135 		dst_reg->smin_value = (s64)dst_reg->s32_min_value << 32;
8136 	else
8137 		dst_reg->smin_value = S64_MIN;
8138 
8139 	/* If we might shift our top bit out, then we know nothing */
8140 	if (dst_reg->umax_value > 1ULL << (63 - umax_val)) {
8141 		dst_reg->umin_value = 0;
8142 		dst_reg->umax_value = U64_MAX;
8143 	} else {
8144 		dst_reg->umin_value <<= umin_val;
8145 		dst_reg->umax_value <<= umax_val;
8146 	}
8147 }
8148 
8149 static void scalar_min_max_lsh(struct bpf_reg_state *dst_reg,
8150 			       struct bpf_reg_state *src_reg)
8151 {
8152 	u64 umax_val = src_reg->umax_value;
8153 	u64 umin_val = src_reg->umin_value;
8154 
8155 	/* scalar64 calc uses 32bit unshifted bounds so must be called first */
8156 	__scalar64_min_max_lsh(dst_reg, umin_val, umax_val);
8157 	__scalar32_min_max_lsh(dst_reg, umin_val, umax_val);
8158 
8159 	dst_reg->var_off = tnum_lshift(dst_reg->var_off, umin_val);
8160 	/* We may learn something more from the var_off */
8161 	__update_reg_bounds(dst_reg);
8162 }
8163 
8164 static void scalar32_min_max_rsh(struct bpf_reg_state *dst_reg,
8165 				 struct bpf_reg_state *src_reg)
8166 {
8167 	struct tnum subreg = tnum_subreg(dst_reg->var_off);
8168 	u32 umax_val = src_reg->u32_max_value;
8169 	u32 umin_val = src_reg->u32_min_value;
8170 
8171 	/* BPF_RSH is an unsigned shift.  If the value in dst_reg might
8172 	 * be negative, then either:
8173 	 * 1) src_reg might be zero, so the sign bit of the result is
8174 	 *    unknown, so we lose our signed bounds
8175 	 * 2) it's known negative, thus the unsigned bounds capture the
8176 	 *    signed bounds
8177 	 * 3) the signed bounds cross zero, so they tell us nothing
8178 	 *    about the result
8179 	 * If the value in dst_reg is known nonnegative, then again the
8180 	 * unsigned bounds capture the signed bounds.
8181 	 * Thus, in all cases it suffices to blow away our signed bounds
8182 	 * and rely on inferring new ones from the unsigned bounds and
8183 	 * var_off of the result.
8184 	 */
8185 	dst_reg->s32_min_value = S32_MIN;
8186 	dst_reg->s32_max_value = S32_MAX;
8187 
8188 	dst_reg->var_off = tnum_rshift(subreg, umin_val);
8189 	dst_reg->u32_min_value >>= umax_val;
8190 	dst_reg->u32_max_value >>= umin_val;
8191 
8192 	__mark_reg64_unbounded(dst_reg);
8193 	__update_reg32_bounds(dst_reg);
8194 }
8195 
8196 static void scalar_min_max_rsh(struct bpf_reg_state *dst_reg,
8197 			       struct bpf_reg_state *src_reg)
8198 {
8199 	u64 umax_val = src_reg->umax_value;
8200 	u64 umin_val = src_reg->umin_value;
8201 
8202 	/* BPF_RSH is an unsigned shift.  If the value in dst_reg might
8203 	 * be negative, then either:
8204 	 * 1) src_reg might be zero, so the sign bit of the result is
8205 	 *    unknown, so we lose our signed bounds
8206 	 * 2) it's known negative, thus the unsigned bounds capture the
8207 	 *    signed bounds
8208 	 * 3) the signed bounds cross zero, so they tell us nothing
8209 	 *    about the result
8210 	 * If the value in dst_reg is known nonnegative, then again the
8211 	 * unsigned bounds capture the signed bounds.
8212 	 * Thus, in all cases it suffices to blow away our signed bounds
8213 	 * and rely on inferring new ones from the unsigned bounds and
8214 	 * var_off of the result.
8215 	 */
8216 	dst_reg->smin_value = S64_MIN;
8217 	dst_reg->smax_value = S64_MAX;
8218 	dst_reg->var_off = tnum_rshift(dst_reg->var_off, umin_val);
8219 	dst_reg->umin_value >>= umax_val;
8220 	dst_reg->umax_value >>= umin_val;
8221 
8222 	/* Its not easy to operate on alu32 bounds here because it depends
8223 	 * on bits being shifted in. Take easy way out and mark unbounded
8224 	 * so we can recalculate later from tnum.
8225 	 */
8226 	__mark_reg32_unbounded(dst_reg);
8227 	__update_reg_bounds(dst_reg);
8228 }
8229 
8230 static void scalar32_min_max_arsh(struct bpf_reg_state *dst_reg,
8231 				  struct bpf_reg_state *src_reg)
8232 {
8233 	u64 umin_val = src_reg->u32_min_value;
8234 
8235 	/* Upon reaching here, src_known is true and
8236 	 * umax_val is equal to umin_val.
8237 	 */
8238 	dst_reg->s32_min_value = (u32)(((s32)dst_reg->s32_min_value) >> umin_val);
8239 	dst_reg->s32_max_value = (u32)(((s32)dst_reg->s32_max_value) >> umin_val);
8240 
8241 	dst_reg->var_off = tnum_arshift(tnum_subreg(dst_reg->var_off), umin_val, 32);
8242 
8243 	/* blow away the dst_reg umin_value/umax_value and rely on
8244 	 * dst_reg var_off to refine the result.
8245 	 */
8246 	dst_reg->u32_min_value = 0;
8247 	dst_reg->u32_max_value = U32_MAX;
8248 
8249 	__mark_reg64_unbounded(dst_reg);
8250 	__update_reg32_bounds(dst_reg);
8251 }
8252 
8253 static void scalar_min_max_arsh(struct bpf_reg_state *dst_reg,
8254 				struct bpf_reg_state *src_reg)
8255 {
8256 	u64 umin_val = src_reg->umin_value;
8257 
8258 	/* Upon reaching here, src_known is true and umax_val is equal
8259 	 * to umin_val.
8260 	 */
8261 	dst_reg->smin_value >>= umin_val;
8262 	dst_reg->smax_value >>= umin_val;
8263 
8264 	dst_reg->var_off = tnum_arshift(dst_reg->var_off, umin_val, 64);
8265 
8266 	/* blow away the dst_reg umin_value/umax_value and rely on
8267 	 * dst_reg var_off to refine the result.
8268 	 */
8269 	dst_reg->umin_value = 0;
8270 	dst_reg->umax_value = U64_MAX;
8271 
8272 	/* Its not easy to operate on alu32 bounds here because it depends
8273 	 * on bits being shifted in from upper 32-bits. Take easy way out
8274 	 * and mark unbounded so we can recalculate later from tnum.
8275 	 */
8276 	__mark_reg32_unbounded(dst_reg);
8277 	__update_reg_bounds(dst_reg);
8278 }
8279 
8280 /* WARNING: This function does calculations on 64-bit values, but the actual
8281  * execution may occur on 32-bit values. Therefore, things like bitshifts
8282  * need extra checks in the 32-bit case.
8283  */
8284 static int adjust_scalar_min_max_vals(struct bpf_verifier_env *env,
8285 				      struct bpf_insn *insn,
8286 				      struct bpf_reg_state *dst_reg,
8287 				      struct bpf_reg_state src_reg)
8288 {
8289 	struct bpf_reg_state *regs = cur_regs(env);
8290 	u8 opcode = BPF_OP(insn->code);
8291 	bool src_known;
8292 	s64 smin_val, smax_val;
8293 	u64 umin_val, umax_val;
8294 	s32 s32_min_val, s32_max_val;
8295 	u32 u32_min_val, u32_max_val;
8296 	u64 insn_bitness = (BPF_CLASS(insn->code) == BPF_ALU64) ? 64 : 32;
8297 	bool alu32 = (BPF_CLASS(insn->code) != BPF_ALU64);
8298 	int ret;
8299 
8300 	smin_val = src_reg.smin_value;
8301 	smax_val = src_reg.smax_value;
8302 	umin_val = src_reg.umin_value;
8303 	umax_val = src_reg.umax_value;
8304 
8305 	s32_min_val = src_reg.s32_min_value;
8306 	s32_max_val = src_reg.s32_max_value;
8307 	u32_min_val = src_reg.u32_min_value;
8308 	u32_max_val = src_reg.u32_max_value;
8309 
8310 	if (alu32) {
8311 		src_known = tnum_subreg_is_const(src_reg.var_off);
8312 		if ((src_known &&
8313 		     (s32_min_val != s32_max_val || u32_min_val != u32_max_val)) ||
8314 		    s32_min_val > s32_max_val || u32_min_val > u32_max_val) {
8315 			/* Taint dst register if offset had invalid bounds
8316 			 * derived from e.g. dead branches.
8317 			 */
8318 			__mark_reg_unknown(env, dst_reg);
8319 			return 0;
8320 		}
8321 	} else {
8322 		src_known = tnum_is_const(src_reg.var_off);
8323 		if ((src_known &&
8324 		     (smin_val != smax_val || umin_val != umax_val)) ||
8325 		    smin_val > smax_val || umin_val > umax_val) {
8326 			/* Taint dst register if offset had invalid bounds
8327 			 * derived from e.g. dead branches.
8328 			 */
8329 			__mark_reg_unknown(env, dst_reg);
8330 			return 0;
8331 		}
8332 	}
8333 
8334 	if (!src_known &&
8335 	    opcode != BPF_ADD && opcode != BPF_SUB && opcode != BPF_AND) {
8336 		__mark_reg_unknown(env, dst_reg);
8337 		return 0;
8338 	}
8339 
8340 	if (sanitize_needed(opcode)) {
8341 		ret = sanitize_val_alu(env, insn);
8342 		if (ret < 0)
8343 			return sanitize_err(env, insn, ret, NULL, NULL);
8344 	}
8345 
8346 	/* Calculate sign/unsigned bounds and tnum for alu32 and alu64 bit ops.
8347 	 * There are two classes of instructions: The first class we track both
8348 	 * alu32 and alu64 sign/unsigned bounds independently this provides the
8349 	 * greatest amount of precision when alu operations are mixed with jmp32
8350 	 * operations. These operations are BPF_ADD, BPF_SUB, BPF_MUL, BPF_ADD,
8351 	 * and BPF_OR. This is possible because these ops have fairly easy to
8352 	 * understand and calculate behavior in both 32-bit and 64-bit alu ops.
8353 	 * See alu32 verifier tests for examples. The second class of
8354 	 * operations, BPF_LSH, BPF_RSH, and BPF_ARSH, however are not so easy
8355 	 * with regards to tracking sign/unsigned bounds because the bits may
8356 	 * cross subreg boundaries in the alu64 case. When this happens we mark
8357 	 * the reg unbounded in the subreg bound space and use the resulting
8358 	 * tnum to calculate an approximation of the sign/unsigned bounds.
8359 	 */
8360 	switch (opcode) {
8361 	case BPF_ADD:
8362 		scalar32_min_max_add(dst_reg, &src_reg);
8363 		scalar_min_max_add(dst_reg, &src_reg);
8364 		dst_reg->var_off = tnum_add(dst_reg->var_off, src_reg.var_off);
8365 		break;
8366 	case BPF_SUB:
8367 		scalar32_min_max_sub(dst_reg, &src_reg);
8368 		scalar_min_max_sub(dst_reg, &src_reg);
8369 		dst_reg->var_off = tnum_sub(dst_reg->var_off, src_reg.var_off);
8370 		break;
8371 	case BPF_MUL:
8372 		dst_reg->var_off = tnum_mul(dst_reg->var_off, src_reg.var_off);
8373 		scalar32_min_max_mul(dst_reg, &src_reg);
8374 		scalar_min_max_mul(dst_reg, &src_reg);
8375 		break;
8376 	case BPF_AND:
8377 		dst_reg->var_off = tnum_and(dst_reg->var_off, src_reg.var_off);
8378 		scalar32_min_max_and(dst_reg, &src_reg);
8379 		scalar_min_max_and(dst_reg, &src_reg);
8380 		break;
8381 	case BPF_OR:
8382 		dst_reg->var_off = tnum_or(dst_reg->var_off, src_reg.var_off);
8383 		scalar32_min_max_or(dst_reg, &src_reg);
8384 		scalar_min_max_or(dst_reg, &src_reg);
8385 		break;
8386 	case BPF_XOR:
8387 		dst_reg->var_off = tnum_xor(dst_reg->var_off, src_reg.var_off);
8388 		scalar32_min_max_xor(dst_reg, &src_reg);
8389 		scalar_min_max_xor(dst_reg, &src_reg);
8390 		break;
8391 	case BPF_LSH:
8392 		if (umax_val >= insn_bitness) {
8393 			/* Shifts greater than 31 or 63 are undefined.
8394 			 * This includes shifts by a negative number.
8395 			 */
8396 			mark_reg_unknown(env, regs, insn->dst_reg);
8397 			break;
8398 		}
8399 		if (alu32)
8400 			scalar32_min_max_lsh(dst_reg, &src_reg);
8401 		else
8402 			scalar_min_max_lsh(dst_reg, &src_reg);
8403 		break;
8404 	case BPF_RSH:
8405 		if (umax_val >= insn_bitness) {
8406 			/* Shifts greater than 31 or 63 are undefined.
8407 			 * This includes shifts by a negative number.
8408 			 */
8409 			mark_reg_unknown(env, regs, insn->dst_reg);
8410 			break;
8411 		}
8412 		if (alu32)
8413 			scalar32_min_max_rsh(dst_reg, &src_reg);
8414 		else
8415 			scalar_min_max_rsh(dst_reg, &src_reg);
8416 		break;
8417 	case BPF_ARSH:
8418 		if (umax_val >= insn_bitness) {
8419 			/* Shifts greater than 31 or 63 are undefined.
8420 			 * This includes shifts by a negative number.
8421 			 */
8422 			mark_reg_unknown(env, regs, insn->dst_reg);
8423 			break;
8424 		}
8425 		if (alu32)
8426 			scalar32_min_max_arsh(dst_reg, &src_reg);
8427 		else
8428 			scalar_min_max_arsh(dst_reg, &src_reg);
8429 		break;
8430 	default:
8431 		mark_reg_unknown(env, regs, insn->dst_reg);
8432 		break;
8433 	}
8434 
8435 	/* ALU32 ops are zero extended into 64bit register */
8436 	if (alu32)
8437 		zext_32_to_64(dst_reg);
8438 
8439 	__update_reg_bounds(dst_reg);
8440 	__reg_deduce_bounds(dst_reg);
8441 	__reg_bound_offset(dst_reg);
8442 	return 0;
8443 }
8444 
8445 /* Handles ALU ops other than BPF_END, BPF_NEG and BPF_MOV: computes new min/max
8446  * and var_off.
8447  */
8448 static int adjust_reg_min_max_vals(struct bpf_verifier_env *env,
8449 				   struct bpf_insn *insn)
8450 {
8451 	struct bpf_verifier_state *vstate = env->cur_state;
8452 	struct bpf_func_state *state = vstate->frame[vstate->curframe];
8453 	struct bpf_reg_state *regs = state->regs, *dst_reg, *src_reg;
8454 	struct bpf_reg_state *ptr_reg = NULL, off_reg = {0};
8455 	u8 opcode = BPF_OP(insn->code);
8456 	int err;
8457 
8458 	dst_reg = &regs[insn->dst_reg];
8459 	src_reg = NULL;
8460 	if (dst_reg->type != SCALAR_VALUE)
8461 		ptr_reg = dst_reg;
8462 	else
8463 		/* Make sure ID is cleared otherwise dst_reg min/max could be
8464 		 * incorrectly propagated into other registers by find_equal_scalars()
8465 		 */
8466 		dst_reg->id = 0;
8467 	if (BPF_SRC(insn->code) == BPF_X) {
8468 		src_reg = &regs[insn->src_reg];
8469 		if (src_reg->type != SCALAR_VALUE) {
8470 			if (dst_reg->type != SCALAR_VALUE) {
8471 				/* Combining two pointers by any ALU op yields
8472 				 * an arbitrary scalar. Disallow all math except
8473 				 * pointer subtraction
8474 				 */
8475 				if (opcode == BPF_SUB && env->allow_ptr_leaks) {
8476 					mark_reg_unknown(env, regs, insn->dst_reg);
8477 					return 0;
8478 				}
8479 				verbose(env, "R%d pointer %s pointer prohibited\n",
8480 					insn->dst_reg,
8481 					bpf_alu_string[opcode >> 4]);
8482 				return -EACCES;
8483 			} else {
8484 				/* scalar += pointer
8485 				 * This is legal, but we have to reverse our
8486 				 * src/dest handling in computing the range
8487 				 */
8488 				err = mark_chain_precision(env, insn->dst_reg);
8489 				if (err)
8490 					return err;
8491 				return adjust_ptr_min_max_vals(env, insn,
8492 							       src_reg, dst_reg);
8493 			}
8494 		} else if (ptr_reg) {
8495 			/* pointer += scalar */
8496 			err = mark_chain_precision(env, insn->src_reg);
8497 			if (err)
8498 				return err;
8499 			return adjust_ptr_min_max_vals(env, insn,
8500 						       dst_reg, src_reg);
8501 		}
8502 	} else {
8503 		/* Pretend the src is a reg with a known value, since we only
8504 		 * need to be able to read from this state.
8505 		 */
8506 		off_reg.type = SCALAR_VALUE;
8507 		__mark_reg_known(&off_reg, insn->imm);
8508 		src_reg = &off_reg;
8509 		if (ptr_reg) /* pointer += K */
8510 			return adjust_ptr_min_max_vals(env, insn,
8511 						       ptr_reg, src_reg);
8512 	}
8513 
8514 	/* Got here implies adding two SCALAR_VALUEs */
8515 	if (WARN_ON_ONCE(ptr_reg)) {
8516 		print_verifier_state(env, state, true);
8517 		verbose(env, "verifier internal error: unexpected ptr_reg\n");
8518 		return -EINVAL;
8519 	}
8520 	if (WARN_ON(!src_reg)) {
8521 		print_verifier_state(env, state, true);
8522 		verbose(env, "verifier internal error: no src_reg\n");
8523 		return -EINVAL;
8524 	}
8525 	return adjust_scalar_min_max_vals(env, insn, dst_reg, *src_reg);
8526 }
8527 
8528 /* check validity of 32-bit and 64-bit arithmetic operations */
8529 static int check_alu_op(struct bpf_verifier_env *env, struct bpf_insn *insn)
8530 {
8531 	struct bpf_reg_state *regs = cur_regs(env);
8532 	u8 opcode = BPF_OP(insn->code);
8533 	int err;
8534 
8535 	if (opcode == BPF_END || opcode == BPF_NEG) {
8536 		if (opcode == BPF_NEG) {
8537 			if (BPF_SRC(insn->code) != 0 ||
8538 			    insn->src_reg != BPF_REG_0 ||
8539 			    insn->off != 0 || insn->imm != 0) {
8540 				verbose(env, "BPF_NEG uses reserved fields\n");
8541 				return -EINVAL;
8542 			}
8543 		} else {
8544 			if (insn->src_reg != BPF_REG_0 || insn->off != 0 ||
8545 			    (insn->imm != 16 && insn->imm != 32 && insn->imm != 64) ||
8546 			    BPF_CLASS(insn->code) == BPF_ALU64) {
8547 				verbose(env, "BPF_END uses reserved fields\n");
8548 				return -EINVAL;
8549 			}
8550 		}
8551 
8552 		/* check src operand */
8553 		err = check_reg_arg(env, insn->dst_reg, SRC_OP);
8554 		if (err)
8555 			return err;
8556 
8557 		if (is_pointer_value(env, insn->dst_reg)) {
8558 			verbose(env, "R%d pointer arithmetic prohibited\n",
8559 				insn->dst_reg);
8560 			return -EACCES;
8561 		}
8562 
8563 		/* check dest operand */
8564 		err = check_reg_arg(env, insn->dst_reg, DST_OP);
8565 		if (err)
8566 			return err;
8567 
8568 	} else if (opcode == BPF_MOV) {
8569 
8570 		if (BPF_SRC(insn->code) == BPF_X) {
8571 			if (insn->imm != 0 || insn->off != 0) {
8572 				verbose(env, "BPF_MOV uses reserved fields\n");
8573 				return -EINVAL;
8574 			}
8575 
8576 			/* check src operand */
8577 			err = check_reg_arg(env, insn->src_reg, SRC_OP);
8578 			if (err)
8579 				return err;
8580 		} else {
8581 			if (insn->src_reg != BPF_REG_0 || insn->off != 0) {
8582 				verbose(env, "BPF_MOV uses reserved fields\n");
8583 				return -EINVAL;
8584 			}
8585 		}
8586 
8587 		/* check dest operand, mark as required later */
8588 		err = check_reg_arg(env, insn->dst_reg, DST_OP_NO_MARK);
8589 		if (err)
8590 			return err;
8591 
8592 		if (BPF_SRC(insn->code) == BPF_X) {
8593 			struct bpf_reg_state *src_reg = regs + insn->src_reg;
8594 			struct bpf_reg_state *dst_reg = regs + insn->dst_reg;
8595 
8596 			if (BPF_CLASS(insn->code) == BPF_ALU64) {
8597 				/* case: R1 = R2
8598 				 * copy register state to dest reg
8599 				 */
8600 				if (src_reg->type == SCALAR_VALUE && !src_reg->id)
8601 					/* Assign src and dst registers the same ID
8602 					 * that will be used by find_equal_scalars()
8603 					 * to propagate min/max range.
8604 					 */
8605 					src_reg->id = ++env->id_gen;
8606 				*dst_reg = *src_reg;
8607 				dst_reg->live |= REG_LIVE_WRITTEN;
8608 				dst_reg->subreg_def = DEF_NOT_SUBREG;
8609 			} else {
8610 				/* R1 = (u32) R2 */
8611 				if (is_pointer_value(env, insn->src_reg)) {
8612 					verbose(env,
8613 						"R%d partial copy of pointer\n",
8614 						insn->src_reg);
8615 					return -EACCES;
8616 				} else if (src_reg->type == SCALAR_VALUE) {
8617 					*dst_reg = *src_reg;
8618 					/* Make sure ID is cleared otherwise
8619 					 * dst_reg min/max could be incorrectly
8620 					 * propagated into src_reg by find_equal_scalars()
8621 					 */
8622 					dst_reg->id = 0;
8623 					dst_reg->live |= REG_LIVE_WRITTEN;
8624 					dst_reg->subreg_def = env->insn_idx + 1;
8625 				} else {
8626 					mark_reg_unknown(env, regs,
8627 							 insn->dst_reg);
8628 				}
8629 				zext_32_to_64(dst_reg);
8630 
8631 				__update_reg_bounds(dst_reg);
8632 				__reg_deduce_bounds(dst_reg);
8633 				__reg_bound_offset(dst_reg);
8634 			}
8635 		} else {
8636 			/* case: R = imm
8637 			 * remember the value we stored into this reg
8638 			 */
8639 			/* clear any state __mark_reg_known doesn't set */
8640 			mark_reg_unknown(env, regs, insn->dst_reg);
8641 			regs[insn->dst_reg].type = SCALAR_VALUE;
8642 			if (BPF_CLASS(insn->code) == BPF_ALU64) {
8643 				__mark_reg_known(regs + insn->dst_reg,
8644 						 insn->imm);
8645 			} else {
8646 				__mark_reg_known(regs + insn->dst_reg,
8647 						 (u32)insn->imm);
8648 			}
8649 		}
8650 
8651 	} else if (opcode > BPF_END) {
8652 		verbose(env, "invalid BPF_ALU opcode %x\n", opcode);
8653 		return -EINVAL;
8654 
8655 	} else {	/* all other ALU ops: and, sub, xor, add, ... */
8656 
8657 		if (BPF_SRC(insn->code) == BPF_X) {
8658 			if (insn->imm != 0 || insn->off != 0) {
8659 				verbose(env, "BPF_ALU uses reserved fields\n");
8660 				return -EINVAL;
8661 			}
8662 			/* check src1 operand */
8663 			err = check_reg_arg(env, insn->src_reg, SRC_OP);
8664 			if (err)
8665 				return err;
8666 		} else {
8667 			if (insn->src_reg != BPF_REG_0 || insn->off != 0) {
8668 				verbose(env, "BPF_ALU uses reserved fields\n");
8669 				return -EINVAL;
8670 			}
8671 		}
8672 
8673 		/* check src2 operand */
8674 		err = check_reg_arg(env, insn->dst_reg, SRC_OP);
8675 		if (err)
8676 			return err;
8677 
8678 		if ((opcode == BPF_MOD || opcode == BPF_DIV) &&
8679 		    BPF_SRC(insn->code) == BPF_K && insn->imm == 0) {
8680 			verbose(env, "div by zero\n");
8681 			return -EINVAL;
8682 		}
8683 
8684 		if ((opcode == BPF_LSH || opcode == BPF_RSH ||
8685 		     opcode == BPF_ARSH) && BPF_SRC(insn->code) == BPF_K) {
8686 			int size = BPF_CLASS(insn->code) == BPF_ALU64 ? 64 : 32;
8687 
8688 			if (insn->imm < 0 || insn->imm >= size) {
8689 				verbose(env, "invalid shift %d\n", insn->imm);
8690 				return -EINVAL;
8691 			}
8692 		}
8693 
8694 		/* check dest operand */
8695 		err = check_reg_arg(env, insn->dst_reg, DST_OP_NO_MARK);
8696 		if (err)
8697 			return err;
8698 
8699 		return adjust_reg_min_max_vals(env, insn);
8700 	}
8701 
8702 	return 0;
8703 }
8704 
8705 static void __find_good_pkt_pointers(struct bpf_func_state *state,
8706 				     struct bpf_reg_state *dst_reg,
8707 				     enum bpf_reg_type type, int new_range)
8708 {
8709 	struct bpf_reg_state *reg;
8710 	int i;
8711 
8712 	for (i = 0; i < MAX_BPF_REG; i++) {
8713 		reg = &state->regs[i];
8714 		if (reg->type == type && reg->id == dst_reg->id)
8715 			/* keep the maximum range already checked */
8716 			reg->range = max(reg->range, new_range);
8717 	}
8718 
8719 	bpf_for_each_spilled_reg(i, state, reg) {
8720 		if (!reg)
8721 			continue;
8722 		if (reg->type == type && reg->id == dst_reg->id)
8723 			reg->range = max(reg->range, new_range);
8724 	}
8725 }
8726 
8727 static void find_good_pkt_pointers(struct bpf_verifier_state *vstate,
8728 				   struct bpf_reg_state *dst_reg,
8729 				   enum bpf_reg_type type,
8730 				   bool range_right_open)
8731 {
8732 	int new_range, i;
8733 
8734 	if (dst_reg->off < 0 ||
8735 	    (dst_reg->off == 0 && range_right_open))
8736 		/* This doesn't give us any range */
8737 		return;
8738 
8739 	if (dst_reg->umax_value > MAX_PACKET_OFF ||
8740 	    dst_reg->umax_value + dst_reg->off > MAX_PACKET_OFF)
8741 		/* Risk of overflow.  For instance, ptr + (1<<63) may be less
8742 		 * than pkt_end, but that's because it's also less than pkt.
8743 		 */
8744 		return;
8745 
8746 	new_range = dst_reg->off;
8747 	if (range_right_open)
8748 		new_range++;
8749 
8750 	/* Examples for register markings:
8751 	 *
8752 	 * pkt_data in dst register:
8753 	 *
8754 	 *   r2 = r3;
8755 	 *   r2 += 8;
8756 	 *   if (r2 > pkt_end) goto <handle exception>
8757 	 *   <access okay>
8758 	 *
8759 	 *   r2 = r3;
8760 	 *   r2 += 8;
8761 	 *   if (r2 < pkt_end) goto <access okay>
8762 	 *   <handle exception>
8763 	 *
8764 	 *   Where:
8765 	 *     r2 == dst_reg, pkt_end == src_reg
8766 	 *     r2=pkt(id=n,off=8,r=0)
8767 	 *     r3=pkt(id=n,off=0,r=0)
8768 	 *
8769 	 * pkt_data in src register:
8770 	 *
8771 	 *   r2 = r3;
8772 	 *   r2 += 8;
8773 	 *   if (pkt_end >= r2) goto <access okay>
8774 	 *   <handle exception>
8775 	 *
8776 	 *   r2 = r3;
8777 	 *   r2 += 8;
8778 	 *   if (pkt_end <= r2) goto <handle exception>
8779 	 *   <access okay>
8780 	 *
8781 	 *   Where:
8782 	 *     pkt_end == dst_reg, r2 == src_reg
8783 	 *     r2=pkt(id=n,off=8,r=0)
8784 	 *     r3=pkt(id=n,off=0,r=0)
8785 	 *
8786 	 * Find register r3 and mark its range as r3=pkt(id=n,off=0,r=8)
8787 	 * or r3=pkt(id=n,off=0,r=8-1), so that range of bytes [r3, r3 + 8)
8788 	 * and [r3, r3 + 8-1) respectively is safe to access depending on
8789 	 * the check.
8790 	 */
8791 
8792 	/* If our ids match, then we must have the same max_value.  And we
8793 	 * don't care about the other reg's fixed offset, since if it's too big
8794 	 * the range won't allow anything.
8795 	 * dst_reg->off is known < MAX_PACKET_OFF, therefore it fits in a u16.
8796 	 */
8797 	for (i = 0; i <= vstate->curframe; i++)
8798 		__find_good_pkt_pointers(vstate->frame[i], dst_reg, type,
8799 					 new_range);
8800 }
8801 
8802 static int is_branch32_taken(struct bpf_reg_state *reg, u32 val, u8 opcode)
8803 {
8804 	struct tnum subreg = tnum_subreg(reg->var_off);
8805 	s32 sval = (s32)val;
8806 
8807 	switch (opcode) {
8808 	case BPF_JEQ:
8809 		if (tnum_is_const(subreg))
8810 			return !!tnum_equals_const(subreg, val);
8811 		break;
8812 	case BPF_JNE:
8813 		if (tnum_is_const(subreg))
8814 			return !tnum_equals_const(subreg, val);
8815 		break;
8816 	case BPF_JSET:
8817 		if ((~subreg.mask & subreg.value) & val)
8818 			return 1;
8819 		if (!((subreg.mask | subreg.value) & val))
8820 			return 0;
8821 		break;
8822 	case BPF_JGT:
8823 		if (reg->u32_min_value > val)
8824 			return 1;
8825 		else if (reg->u32_max_value <= val)
8826 			return 0;
8827 		break;
8828 	case BPF_JSGT:
8829 		if (reg->s32_min_value > sval)
8830 			return 1;
8831 		else if (reg->s32_max_value <= sval)
8832 			return 0;
8833 		break;
8834 	case BPF_JLT:
8835 		if (reg->u32_max_value < val)
8836 			return 1;
8837 		else if (reg->u32_min_value >= val)
8838 			return 0;
8839 		break;
8840 	case BPF_JSLT:
8841 		if (reg->s32_max_value < sval)
8842 			return 1;
8843 		else if (reg->s32_min_value >= sval)
8844 			return 0;
8845 		break;
8846 	case BPF_JGE:
8847 		if (reg->u32_min_value >= val)
8848 			return 1;
8849 		else if (reg->u32_max_value < val)
8850 			return 0;
8851 		break;
8852 	case BPF_JSGE:
8853 		if (reg->s32_min_value >= sval)
8854 			return 1;
8855 		else if (reg->s32_max_value < sval)
8856 			return 0;
8857 		break;
8858 	case BPF_JLE:
8859 		if (reg->u32_max_value <= val)
8860 			return 1;
8861 		else if (reg->u32_min_value > val)
8862 			return 0;
8863 		break;
8864 	case BPF_JSLE:
8865 		if (reg->s32_max_value <= sval)
8866 			return 1;
8867 		else if (reg->s32_min_value > sval)
8868 			return 0;
8869 		break;
8870 	}
8871 
8872 	return -1;
8873 }
8874 
8875 
8876 static int is_branch64_taken(struct bpf_reg_state *reg, u64 val, u8 opcode)
8877 {
8878 	s64 sval = (s64)val;
8879 
8880 	switch (opcode) {
8881 	case BPF_JEQ:
8882 		if (tnum_is_const(reg->var_off))
8883 			return !!tnum_equals_const(reg->var_off, val);
8884 		break;
8885 	case BPF_JNE:
8886 		if (tnum_is_const(reg->var_off))
8887 			return !tnum_equals_const(reg->var_off, val);
8888 		break;
8889 	case BPF_JSET:
8890 		if ((~reg->var_off.mask & reg->var_off.value) & val)
8891 			return 1;
8892 		if (!((reg->var_off.mask | reg->var_off.value) & val))
8893 			return 0;
8894 		break;
8895 	case BPF_JGT:
8896 		if (reg->umin_value > val)
8897 			return 1;
8898 		else if (reg->umax_value <= val)
8899 			return 0;
8900 		break;
8901 	case BPF_JSGT:
8902 		if (reg->smin_value > sval)
8903 			return 1;
8904 		else if (reg->smax_value <= sval)
8905 			return 0;
8906 		break;
8907 	case BPF_JLT:
8908 		if (reg->umax_value < val)
8909 			return 1;
8910 		else if (reg->umin_value >= val)
8911 			return 0;
8912 		break;
8913 	case BPF_JSLT:
8914 		if (reg->smax_value < sval)
8915 			return 1;
8916 		else if (reg->smin_value >= sval)
8917 			return 0;
8918 		break;
8919 	case BPF_JGE:
8920 		if (reg->umin_value >= val)
8921 			return 1;
8922 		else if (reg->umax_value < val)
8923 			return 0;
8924 		break;
8925 	case BPF_JSGE:
8926 		if (reg->smin_value >= sval)
8927 			return 1;
8928 		else if (reg->smax_value < sval)
8929 			return 0;
8930 		break;
8931 	case BPF_JLE:
8932 		if (reg->umax_value <= val)
8933 			return 1;
8934 		else if (reg->umin_value > val)
8935 			return 0;
8936 		break;
8937 	case BPF_JSLE:
8938 		if (reg->smax_value <= sval)
8939 			return 1;
8940 		else if (reg->smin_value > sval)
8941 			return 0;
8942 		break;
8943 	}
8944 
8945 	return -1;
8946 }
8947 
8948 /* compute branch direction of the expression "if (reg opcode val) goto target;"
8949  * and return:
8950  *  1 - branch will be taken and "goto target" will be executed
8951  *  0 - branch will not be taken and fall-through to next insn
8952  * -1 - unknown. Example: "if (reg < 5)" is unknown when register value
8953  *      range [0,10]
8954  */
8955 static int is_branch_taken(struct bpf_reg_state *reg, u64 val, u8 opcode,
8956 			   bool is_jmp32)
8957 {
8958 	if (__is_pointer_value(false, reg)) {
8959 		if (!reg_type_not_null(reg->type))
8960 			return -1;
8961 
8962 		/* If pointer is valid tests against zero will fail so we can
8963 		 * use this to direct branch taken.
8964 		 */
8965 		if (val != 0)
8966 			return -1;
8967 
8968 		switch (opcode) {
8969 		case BPF_JEQ:
8970 			return 0;
8971 		case BPF_JNE:
8972 			return 1;
8973 		default:
8974 			return -1;
8975 		}
8976 	}
8977 
8978 	if (is_jmp32)
8979 		return is_branch32_taken(reg, val, opcode);
8980 	return is_branch64_taken(reg, val, opcode);
8981 }
8982 
8983 static int flip_opcode(u32 opcode)
8984 {
8985 	/* How can we transform "a <op> b" into "b <op> a"? */
8986 	static const u8 opcode_flip[16] = {
8987 		/* these stay the same */
8988 		[BPF_JEQ  >> 4] = BPF_JEQ,
8989 		[BPF_JNE  >> 4] = BPF_JNE,
8990 		[BPF_JSET >> 4] = BPF_JSET,
8991 		/* these swap "lesser" and "greater" (L and G in the opcodes) */
8992 		[BPF_JGE  >> 4] = BPF_JLE,
8993 		[BPF_JGT  >> 4] = BPF_JLT,
8994 		[BPF_JLE  >> 4] = BPF_JGE,
8995 		[BPF_JLT  >> 4] = BPF_JGT,
8996 		[BPF_JSGE >> 4] = BPF_JSLE,
8997 		[BPF_JSGT >> 4] = BPF_JSLT,
8998 		[BPF_JSLE >> 4] = BPF_JSGE,
8999 		[BPF_JSLT >> 4] = BPF_JSGT
9000 	};
9001 	return opcode_flip[opcode >> 4];
9002 }
9003 
9004 static int is_pkt_ptr_branch_taken(struct bpf_reg_state *dst_reg,
9005 				   struct bpf_reg_state *src_reg,
9006 				   u8 opcode)
9007 {
9008 	struct bpf_reg_state *pkt;
9009 
9010 	if (src_reg->type == PTR_TO_PACKET_END) {
9011 		pkt = dst_reg;
9012 	} else if (dst_reg->type == PTR_TO_PACKET_END) {
9013 		pkt = src_reg;
9014 		opcode = flip_opcode(opcode);
9015 	} else {
9016 		return -1;
9017 	}
9018 
9019 	if (pkt->range >= 0)
9020 		return -1;
9021 
9022 	switch (opcode) {
9023 	case BPF_JLE:
9024 		/* pkt <= pkt_end */
9025 		fallthrough;
9026 	case BPF_JGT:
9027 		/* pkt > pkt_end */
9028 		if (pkt->range == BEYOND_PKT_END)
9029 			/* pkt has at last one extra byte beyond pkt_end */
9030 			return opcode == BPF_JGT;
9031 		break;
9032 	case BPF_JLT:
9033 		/* pkt < pkt_end */
9034 		fallthrough;
9035 	case BPF_JGE:
9036 		/* pkt >= pkt_end */
9037 		if (pkt->range == BEYOND_PKT_END || pkt->range == AT_PKT_END)
9038 			return opcode == BPF_JGE;
9039 		break;
9040 	}
9041 	return -1;
9042 }
9043 
9044 /* Adjusts the register min/max values in the case that the dst_reg is the
9045  * variable register that we are working on, and src_reg is a constant or we're
9046  * simply doing a BPF_K check.
9047  * In JEQ/JNE cases we also adjust the var_off values.
9048  */
9049 static void reg_set_min_max(struct bpf_reg_state *true_reg,
9050 			    struct bpf_reg_state *false_reg,
9051 			    u64 val, u32 val32,
9052 			    u8 opcode, bool is_jmp32)
9053 {
9054 	struct tnum false_32off = tnum_subreg(false_reg->var_off);
9055 	struct tnum false_64off = false_reg->var_off;
9056 	struct tnum true_32off = tnum_subreg(true_reg->var_off);
9057 	struct tnum true_64off = true_reg->var_off;
9058 	s64 sval = (s64)val;
9059 	s32 sval32 = (s32)val32;
9060 
9061 	/* If the dst_reg is a pointer, we can't learn anything about its
9062 	 * variable offset from the compare (unless src_reg were a pointer into
9063 	 * the same object, but we don't bother with that.
9064 	 * Since false_reg and true_reg have the same type by construction, we
9065 	 * only need to check one of them for pointerness.
9066 	 */
9067 	if (__is_pointer_value(false, false_reg))
9068 		return;
9069 
9070 	switch (opcode) {
9071 	case BPF_JEQ:
9072 	case BPF_JNE:
9073 	{
9074 		struct bpf_reg_state *reg =
9075 			opcode == BPF_JEQ ? true_reg : false_reg;
9076 
9077 		/* JEQ/JNE comparison doesn't change the register equivalence.
9078 		 * r1 = r2;
9079 		 * if (r1 == 42) goto label;
9080 		 * ...
9081 		 * label: // here both r1 and r2 are known to be 42.
9082 		 *
9083 		 * Hence when marking register as known preserve it's ID.
9084 		 */
9085 		if (is_jmp32)
9086 			__mark_reg32_known(reg, val32);
9087 		else
9088 			___mark_reg_known(reg, val);
9089 		break;
9090 	}
9091 	case BPF_JSET:
9092 		if (is_jmp32) {
9093 			false_32off = tnum_and(false_32off, tnum_const(~val32));
9094 			if (is_power_of_2(val32))
9095 				true_32off = tnum_or(true_32off,
9096 						     tnum_const(val32));
9097 		} else {
9098 			false_64off = tnum_and(false_64off, tnum_const(~val));
9099 			if (is_power_of_2(val))
9100 				true_64off = tnum_or(true_64off,
9101 						     tnum_const(val));
9102 		}
9103 		break;
9104 	case BPF_JGE:
9105 	case BPF_JGT:
9106 	{
9107 		if (is_jmp32) {
9108 			u32 false_umax = opcode == BPF_JGT ? val32  : val32 - 1;
9109 			u32 true_umin = opcode == BPF_JGT ? val32 + 1 : val32;
9110 
9111 			false_reg->u32_max_value = min(false_reg->u32_max_value,
9112 						       false_umax);
9113 			true_reg->u32_min_value = max(true_reg->u32_min_value,
9114 						      true_umin);
9115 		} else {
9116 			u64 false_umax = opcode == BPF_JGT ? val    : val - 1;
9117 			u64 true_umin = opcode == BPF_JGT ? val + 1 : val;
9118 
9119 			false_reg->umax_value = min(false_reg->umax_value, false_umax);
9120 			true_reg->umin_value = max(true_reg->umin_value, true_umin);
9121 		}
9122 		break;
9123 	}
9124 	case BPF_JSGE:
9125 	case BPF_JSGT:
9126 	{
9127 		if (is_jmp32) {
9128 			s32 false_smax = opcode == BPF_JSGT ? sval32    : sval32 - 1;
9129 			s32 true_smin = opcode == BPF_JSGT ? sval32 + 1 : sval32;
9130 
9131 			false_reg->s32_max_value = min(false_reg->s32_max_value, false_smax);
9132 			true_reg->s32_min_value = max(true_reg->s32_min_value, true_smin);
9133 		} else {
9134 			s64 false_smax = opcode == BPF_JSGT ? sval    : sval - 1;
9135 			s64 true_smin = opcode == BPF_JSGT ? sval + 1 : sval;
9136 
9137 			false_reg->smax_value = min(false_reg->smax_value, false_smax);
9138 			true_reg->smin_value = max(true_reg->smin_value, true_smin);
9139 		}
9140 		break;
9141 	}
9142 	case BPF_JLE:
9143 	case BPF_JLT:
9144 	{
9145 		if (is_jmp32) {
9146 			u32 false_umin = opcode == BPF_JLT ? val32  : val32 + 1;
9147 			u32 true_umax = opcode == BPF_JLT ? val32 - 1 : val32;
9148 
9149 			false_reg->u32_min_value = max(false_reg->u32_min_value,
9150 						       false_umin);
9151 			true_reg->u32_max_value = min(true_reg->u32_max_value,
9152 						      true_umax);
9153 		} else {
9154 			u64 false_umin = opcode == BPF_JLT ? val    : val + 1;
9155 			u64 true_umax = opcode == BPF_JLT ? val - 1 : val;
9156 
9157 			false_reg->umin_value = max(false_reg->umin_value, false_umin);
9158 			true_reg->umax_value = min(true_reg->umax_value, true_umax);
9159 		}
9160 		break;
9161 	}
9162 	case BPF_JSLE:
9163 	case BPF_JSLT:
9164 	{
9165 		if (is_jmp32) {
9166 			s32 false_smin = opcode == BPF_JSLT ? sval32    : sval32 + 1;
9167 			s32 true_smax = opcode == BPF_JSLT ? sval32 - 1 : sval32;
9168 
9169 			false_reg->s32_min_value = max(false_reg->s32_min_value, false_smin);
9170 			true_reg->s32_max_value = min(true_reg->s32_max_value, true_smax);
9171 		} else {
9172 			s64 false_smin = opcode == BPF_JSLT ? sval    : sval + 1;
9173 			s64 true_smax = opcode == BPF_JSLT ? sval - 1 : sval;
9174 
9175 			false_reg->smin_value = max(false_reg->smin_value, false_smin);
9176 			true_reg->smax_value = min(true_reg->smax_value, true_smax);
9177 		}
9178 		break;
9179 	}
9180 	default:
9181 		return;
9182 	}
9183 
9184 	if (is_jmp32) {
9185 		false_reg->var_off = tnum_or(tnum_clear_subreg(false_64off),
9186 					     tnum_subreg(false_32off));
9187 		true_reg->var_off = tnum_or(tnum_clear_subreg(true_64off),
9188 					    tnum_subreg(true_32off));
9189 		__reg_combine_32_into_64(false_reg);
9190 		__reg_combine_32_into_64(true_reg);
9191 	} else {
9192 		false_reg->var_off = false_64off;
9193 		true_reg->var_off = true_64off;
9194 		__reg_combine_64_into_32(false_reg);
9195 		__reg_combine_64_into_32(true_reg);
9196 	}
9197 }
9198 
9199 /* Same as above, but for the case that dst_reg holds a constant and src_reg is
9200  * the variable reg.
9201  */
9202 static void reg_set_min_max_inv(struct bpf_reg_state *true_reg,
9203 				struct bpf_reg_state *false_reg,
9204 				u64 val, u32 val32,
9205 				u8 opcode, bool is_jmp32)
9206 {
9207 	opcode = flip_opcode(opcode);
9208 	/* This uses zero as "not present in table"; luckily the zero opcode,
9209 	 * BPF_JA, can't get here.
9210 	 */
9211 	if (opcode)
9212 		reg_set_min_max(true_reg, false_reg, val, val32, opcode, is_jmp32);
9213 }
9214 
9215 /* Regs are known to be equal, so intersect their min/max/var_off */
9216 static void __reg_combine_min_max(struct bpf_reg_state *src_reg,
9217 				  struct bpf_reg_state *dst_reg)
9218 {
9219 	src_reg->umin_value = dst_reg->umin_value = max(src_reg->umin_value,
9220 							dst_reg->umin_value);
9221 	src_reg->umax_value = dst_reg->umax_value = min(src_reg->umax_value,
9222 							dst_reg->umax_value);
9223 	src_reg->smin_value = dst_reg->smin_value = max(src_reg->smin_value,
9224 							dst_reg->smin_value);
9225 	src_reg->smax_value = dst_reg->smax_value = min(src_reg->smax_value,
9226 							dst_reg->smax_value);
9227 	src_reg->var_off = dst_reg->var_off = tnum_intersect(src_reg->var_off,
9228 							     dst_reg->var_off);
9229 	/* We might have learned new bounds from the var_off. */
9230 	__update_reg_bounds(src_reg);
9231 	__update_reg_bounds(dst_reg);
9232 	/* We might have learned something about the sign bit. */
9233 	__reg_deduce_bounds(src_reg);
9234 	__reg_deduce_bounds(dst_reg);
9235 	/* We might have learned some bits from the bounds. */
9236 	__reg_bound_offset(src_reg);
9237 	__reg_bound_offset(dst_reg);
9238 	/* Intersecting with the old var_off might have improved our bounds
9239 	 * slightly.  e.g. if umax was 0x7f...f and var_off was (0; 0xf...fc),
9240 	 * then new var_off is (0; 0x7f...fc) which improves our umax.
9241 	 */
9242 	__update_reg_bounds(src_reg);
9243 	__update_reg_bounds(dst_reg);
9244 }
9245 
9246 static void reg_combine_min_max(struct bpf_reg_state *true_src,
9247 				struct bpf_reg_state *true_dst,
9248 				struct bpf_reg_state *false_src,
9249 				struct bpf_reg_state *false_dst,
9250 				u8 opcode)
9251 {
9252 	switch (opcode) {
9253 	case BPF_JEQ:
9254 		__reg_combine_min_max(true_src, true_dst);
9255 		break;
9256 	case BPF_JNE:
9257 		__reg_combine_min_max(false_src, false_dst);
9258 		break;
9259 	}
9260 }
9261 
9262 static void mark_ptr_or_null_reg(struct bpf_func_state *state,
9263 				 struct bpf_reg_state *reg, u32 id,
9264 				 bool is_null)
9265 {
9266 	if (type_may_be_null(reg->type) && reg->id == id &&
9267 	    !WARN_ON_ONCE(!reg->id)) {
9268 		if (WARN_ON_ONCE(reg->smin_value || reg->smax_value ||
9269 				 !tnum_equals_const(reg->var_off, 0) ||
9270 				 reg->off)) {
9271 			/* Old offset (both fixed and variable parts) should
9272 			 * have been known-zero, because we don't allow pointer
9273 			 * arithmetic on pointers that might be NULL. If we
9274 			 * see this happening, don't convert the register.
9275 			 */
9276 			return;
9277 		}
9278 		if (is_null) {
9279 			reg->type = SCALAR_VALUE;
9280 			/* We don't need id and ref_obj_id from this point
9281 			 * onwards anymore, thus we should better reset it,
9282 			 * so that state pruning has chances to take effect.
9283 			 */
9284 			reg->id = 0;
9285 			reg->ref_obj_id = 0;
9286 
9287 			return;
9288 		}
9289 
9290 		mark_ptr_not_null_reg(reg);
9291 
9292 		if (!reg_may_point_to_spin_lock(reg)) {
9293 			/* For not-NULL ptr, reg->ref_obj_id will be reset
9294 			 * in release_reg_references().
9295 			 *
9296 			 * reg->id is still used by spin_lock ptr. Other
9297 			 * than spin_lock ptr type, reg->id can be reset.
9298 			 */
9299 			reg->id = 0;
9300 		}
9301 	}
9302 }
9303 
9304 static void __mark_ptr_or_null_regs(struct bpf_func_state *state, u32 id,
9305 				    bool is_null)
9306 {
9307 	struct bpf_reg_state *reg;
9308 	int i;
9309 
9310 	for (i = 0; i < MAX_BPF_REG; i++)
9311 		mark_ptr_or_null_reg(state, &state->regs[i], id, is_null);
9312 
9313 	bpf_for_each_spilled_reg(i, state, reg) {
9314 		if (!reg)
9315 			continue;
9316 		mark_ptr_or_null_reg(state, reg, id, is_null);
9317 	}
9318 }
9319 
9320 /* The logic is similar to find_good_pkt_pointers(), both could eventually
9321  * be folded together at some point.
9322  */
9323 static void mark_ptr_or_null_regs(struct bpf_verifier_state *vstate, u32 regno,
9324 				  bool is_null)
9325 {
9326 	struct bpf_func_state *state = vstate->frame[vstate->curframe];
9327 	struct bpf_reg_state *regs = state->regs;
9328 	u32 ref_obj_id = regs[regno].ref_obj_id;
9329 	u32 id = regs[regno].id;
9330 	int i;
9331 
9332 	if (ref_obj_id && ref_obj_id == id && is_null)
9333 		/* regs[regno] is in the " == NULL" branch.
9334 		 * No one could have freed the reference state before
9335 		 * doing the NULL check.
9336 		 */
9337 		WARN_ON_ONCE(release_reference_state(state, id));
9338 
9339 	for (i = 0; i <= vstate->curframe; i++)
9340 		__mark_ptr_or_null_regs(vstate->frame[i], id, is_null);
9341 }
9342 
9343 static bool try_match_pkt_pointers(const struct bpf_insn *insn,
9344 				   struct bpf_reg_state *dst_reg,
9345 				   struct bpf_reg_state *src_reg,
9346 				   struct bpf_verifier_state *this_branch,
9347 				   struct bpf_verifier_state *other_branch)
9348 {
9349 	if (BPF_SRC(insn->code) != BPF_X)
9350 		return false;
9351 
9352 	/* Pointers are always 64-bit. */
9353 	if (BPF_CLASS(insn->code) == BPF_JMP32)
9354 		return false;
9355 
9356 	switch (BPF_OP(insn->code)) {
9357 	case BPF_JGT:
9358 		if ((dst_reg->type == PTR_TO_PACKET &&
9359 		     src_reg->type == PTR_TO_PACKET_END) ||
9360 		    (dst_reg->type == PTR_TO_PACKET_META &&
9361 		     reg_is_init_pkt_pointer(src_reg, PTR_TO_PACKET))) {
9362 			/* pkt_data' > pkt_end, pkt_meta' > pkt_data */
9363 			find_good_pkt_pointers(this_branch, dst_reg,
9364 					       dst_reg->type, false);
9365 			mark_pkt_end(other_branch, insn->dst_reg, true);
9366 		} else if ((dst_reg->type == PTR_TO_PACKET_END &&
9367 			    src_reg->type == PTR_TO_PACKET) ||
9368 			   (reg_is_init_pkt_pointer(dst_reg, PTR_TO_PACKET) &&
9369 			    src_reg->type == PTR_TO_PACKET_META)) {
9370 			/* pkt_end > pkt_data', pkt_data > pkt_meta' */
9371 			find_good_pkt_pointers(other_branch, src_reg,
9372 					       src_reg->type, true);
9373 			mark_pkt_end(this_branch, insn->src_reg, false);
9374 		} else {
9375 			return false;
9376 		}
9377 		break;
9378 	case BPF_JLT:
9379 		if ((dst_reg->type == PTR_TO_PACKET &&
9380 		     src_reg->type == PTR_TO_PACKET_END) ||
9381 		    (dst_reg->type == PTR_TO_PACKET_META &&
9382 		     reg_is_init_pkt_pointer(src_reg, PTR_TO_PACKET))) {
9383 			/* pkt_data' < pkt_end, pkt_meta' < pkt_data */
9384 			find_good_pkt_pointers(other_branch, dst_reg,
9385 					       dst_reg->type, true);
9386 			mark_pkt_end(this_branch, insn->dst_reg, false);
9387 		} else if ((dst_reg->type == PTR_TO_PACKET_END &&
9388 			    src_reg->type == PTR_TO_PACKET) ||
9389 			   (reg_is_init_pkt_pointer(dst_reg, PTR_TO_PACKET) &&
9390 			    src_reg->type == PTR_TO_PACKET_META)) {
9391 			/* pkt_end < pkt_data', pkt_data > pkt_meta' */
9392 			find_good_pkt_pointers(this_branch, src_reg,
9393 					       src_reg->type, false);
9394 			mark_pkt_end(other_branch, insn->src_reg, true);
9395 		} else {
9396 			return false;
9397 		}
9398 		break;
9399 	case BPF_JGE:
9400 		if ((dst_reg->type == PTR_TO_PACKET &&
9401 		     src_reg->type == PTR_TO_PACKET_END) ||
9402 		    (dst_reg->type == PTR_TO_PACKET_META &&
9403 		     reg_is_init_pkt_pointer(src_reg, PTR_TO_PACKET))) {
9404 			/* pkt_data' >= pkt_end, pkt_meta' >= pkt_data */
9405 			find_good_pkt_pointers(this_branch, dst_reg,
9406 					       dst_reg->type, true);
9407 			mark_pkt_end(other_branch, insn->dst_reg, false);
9408 		} else if ((dst_reg->type == PTR_TO_PACKET_END &&
9409 			    src_reg->type == PTR_TO_PACKET) ||
9410 			   (reg_is_init_pkt_pointer(dst_reg, PTR_TO_PACKET) &&
9411 			    src_reg->type == PTR_TO_PACKET_META)) {
9412 			/* pkt_end >= pkt_data', pkt_data >= pkt_meta' */
9413 			find_good_pkt_pointers(other_branch, src_reg,
9414 					       src_reg->type, false);
9415 			mark_pkt_end(this_branch, insn->src_reg, true);
9416 		} else {
9417 			return false;
9418 		}
9419 		break;
9420 	case BPF_JLE:
9421 		if ((dst_reg->type == PTR_TO_PACKET &&
9422 		     src_reg->type == PTR_TO_PACKET_END) ||
9423 		    (dst_reg->type == PTR_TO_PACKET_META &&
9424 		     reg_is_init_pkt_pointer(src_reg, PTR_TO_PACKET))) {
9425 			/* pkt_data' <= pkt_end, pkt_meta' <= pkt_data */
9426 			find_good_pkt_pointers(other_branch, dst_reg,
9427 					       dst_reg->type, false);
9428 			mark_pkt_end(this_branch, insn->dst_reg, true);
9429 		} else if ((dst_reg->type == PTR_TO_PACKET_END &&
9430 			    src_reg->type == PTR_TO_PACKET) ||
9431 			   (reg_is_init_pkt_pointer(dst_reg, PTR_TO_PACKET) &&
9432 			    src_reg->type == PTR_TO_PACKET_META)) {
9433 			/* pkt_end <= pkt_data', pkt_data <= pkt_meta' */
9434 			find_good_pkt_pointers(this_branch, src_reg,
9435 					       src_reg->type, true);
9436 			mark_pkt_end(other_branch, insn->src_reg, false);
9437 		} else {
9438 			return false;
9439 		}
9440 		break;
9441 	default:
9442 		return false;
9443 	}
9444 
9445 	return true;
9446 }
9447 
9448 static void find_equal_scalars(struct bpf_verifier_state *vstate,
9449 			       struct bpf_reg_state *known_reg)
9450 {
9451 	struct bpf_func_state *state;
9452 	struct bpf_reg_state *reg;
9453 	int i, j;
9454 
9455 	for (i = 0; i <= vstate->curframe; i++) {
9456 		state = vstate->frame[i];
9457 		for (j = 0; j < MAX_BPF_REG; j++) {
9458 			reg = &state->regs[j];
9459 			if (reg->type == SCALAR_VALUE && reg->id == known_reg->id)
9460 				*reg = *known_reg;
9461 		}
9462 
9463 		bpf_for_each_spilled_reg(j, state, reg) {
9464 			if (!reg)
9465 				continue;
9466 			if (reg->type == SCALAR_VALUE && reg->id == known_reg->id)
9467 				*reg = *known_reg;
9468 		}
9469 	}
9470 }
9471 
9472 static int check_cond_jmp_op(struct bpf_verifier_env *env,
9473 			     struct bpf_insn *insn, int *insn_idx)
9474 {
9475 	struct bpf_verifier_state *this_branch = env->cur_state;
9476 	struct bpf_verifier_state *other_branch;
9477 	struct bpf_reg_state *regs = this_branch->frame[this_branch->curframe]->regs;
9478 	struct bpf_reg_state *dst_reg, *other_branch_regs, *src_reg = NULL;
9479 	u8 opcode = BPF_OP(insn->code);
9480 	bool is_jmp32;
9481 	int pred = -1;
9482 	int err;
9483 
9484 	/* Only conditional jumps are expected to reach here. */
9485 	if (opcode == BPF_JA || opcode > BPF_JSLE) {
9486 		verbose(env, "invalid BPF_JMP/JMP32 opcode %x\n", opcode);
9487 		return -EINVAL;
9488 	}
9489 
9490 	if (BPF_SRC(insn->code) == BPF_X) {
9491 		if (insn->imm != 0) {
9492 			verbose(env, "BPF_JMP/JMP32 uses reserved fields\n");
9493 			return -EINVAL;
9494 		}
9495 
9496 		/* check src1 operand */
9497 		err = check_reg_arg(env, insn->src_reg, SRC_OP);
9498 		if (err)
9499 			return err;
9500 
9501 		if (is_pointer_value(env, insn->src_reg)) {
9502 			verbose(env, "R%d pointer comparison prohibited\n",
9503 				insn->src_reg);
9504 			return -EACCES;
9505 		}
9506 		src_reg = &regs[insn->src_reg];
9507 	} else {
9508 		if (insn->src_reg != BPF_REG_0) {
9509 			verbose(env, "BPF_JMP/JMP32 uses reserved fields\n");
9510 			return -EINVAL;
9511 		}
9512 	}
9513 
9514 	/* check src2 operand */
9515 	err = check_reg_arg(env, insn->dst_reg, SRC_OP);
9516 	if (err)
9517 		return err;
9518 
9519 	dst_reg = &regs[insn->dst_reg];
9520 	is_jmp32 = BPF_CLASS(insn->code) == BPF_JMP32;
9521 
9522 	if (BPF_SRC(insn->code) == BPF_K) {
9523 		pred = is_branch_taken(dst_reg, insn->imm, opcode, is_jmp32);
9524 	} else if (src_reg->type == SCALAR_VALUE &&
9525 		   is_jmp32 && tnum_is_const(tnum_subreg(src_reg->var_off))) {
9526 		pred = is_branch_taken(dst_reg,
9527 				       tnum_subreg(src_reg->var_off).value,
9528 				       opcode,
9529 				       is_jmp32);
9530 	} else if (src_reg->type == SCALAR_VALUE &&
9531 		   !is_jmp32 && tnum_is_const(src_reg->var_off)) {
9532 		pred = is_branch_taken(dst_reg,
9533 				       src_reg->var_off.value,
9534 				       opcode,
9535 				       is_jmp32);
9536 	} else if (reg_is_pkt_pointer_any(dst_reg) &&
9537 		   reg_is_pkt_pointer_any(src_reg) &&
9538 		   !is_jmp32) {
9539 		pred = is_pkt_ptr_branch_taken(dst_reg, src_reg, opcode);
9540 	}
9541 
9542 	if (pred >= 0) {
9543 		/* If we get here with a dst_reg pointer type it is because
9544 		 * above is_branch_taken() special cased the 0 comparison.
9545 		 */
9546 		if (!__is_pointer_value(false, dst_reg))
9547 			err = mark_chain_precision(env, insn->dst_reg);
9548 		if (BPF_SRC(insn->code) == BPF_X && !err &&
9549 		    !__is_pointer_value(false, src_reg))
9550 			err = mark_chain_precision(env, insn->src_reg);
9551 		if (err)
9552 			return err;
9553 	}
9554 
9555 	if (pred == 1) {
9556 		/* Only follow the goto, ignore fall-through. If needed, push
9557 		 * the fall-through branch for simulation under speculative
9558 		 * execution.
9559 		 */
9560 		if (!env->bypass_spec_v1 &&
9561 		    !sanitize_speculative_path(env, insn, *insn_idx + 1,
9562 					       *insn_idx))
9563 			return -EFAULT;
9564 		*insn_idx += insn->off;
9565 		return 0;
9566 	} else if (pred == 0) {
9567 		/* Only follow the fall-through branch, since that's where the
9568 		 * program will go. If needed, push the goto branch for
9569 		 * simulation under speculative execution.
9570 		 */
9571 		if (!env->bypass_spec_v1 &&
9572 		    !sanitize_speculative_path(env, insn,
9573 					       *insn_idx + insn->off + 1,
9574 					       *insn_idx))
9575 			return -EFAULT;
9576 		return 0;
9577 	}
9578 
9579 	other_branch = push_stack(env, *insn_idx + insn->off + 1, *insn_idx,
9580 				  false);
9581 	if (!other_branch)
9582 		return -EFAULT;
9583 	other_branch_regs = other_branch->frame[other_branch->curframe]->regs;
9584 
9585 	/* detect if we are comparing against a constant value so we can adjust
9586 	 * our min/max values for our dst register.
9587 	 * this is only legit if both are scalars (or pointers to the same
9588 	 * object, I suppose, but we don't support that right now), because
9589 	 * otherwise the different base pointers mean the offsets aren't
9590 	 * comparable.
9591 	 */
9592 	if (BPF_SRC(insn->code) == BPF_X) {
9593 		struct bpf_reg_state *src_reg = &regs[insn->src_reg];
9594 
9595 		if (dst_reg->type == SCALAR_VALUE &&
9596 		    src_reg->type == SCALAR_VALUE) {
9597 			if (tnum_is_const(src_reg->var_off) ||
9598 			    (is_jmp32 &&
9599 			     tnum_is_const(tnum_subreg(src_reg->var_off))))
9600 				reg_set_min_max(&other_branch_regs[insn->dst_reg],
9601 						dst_reg,
9602 						src_reg->var_off.value,
9603 						tnum_subreg(src_reg->var_off).value,
9604 						opcode, is_jmp32);
9605 			else if (tnum_is_const(dst_reg->var_off) ||
9606 				 (is_jmp32 &&
9607 				  tnum_is_const(tnum_subreg(dst_reg->var_off))))
9608 				reg_set_min_max_inv(&other_branch_regs[insn->src_reg],
9609 						    src_reg,
9610 						    dst_reg->var_off.value,
9611 						    tnum_subreg(dst_reg->var_off).value,
9612 						    opcode, is_jmp32);
9613 			else if (!is_jmp32 &&
9614 				 (opcode == BPF_JEQ || opcode == BPF_JNE))
9615 				/* Comparing for equality, we can combine knowledge */
9616 				reg_combine_min_max(&other_branch_regs[insn->src_reg],
9617 						    &other_branch_regs[insn->dst_reg],
9618 						    src_reg, dst_reg, opcode);
9619 			if (src_reg->id &&
9620 			    !WARN_ON_ONCE(src_reg->id != other_branch_regs[insn->src_reg].id)) {
9621 				find_equal_scalars(this_branch, src_reg);
9622 				find_equal_scalars(other_branch, &other_branch_regs[insn->src_reg]);
9623 			}
9624 
9625 		}
9626 	} else if (dst_reg->type == SCALAR_VALUE) {
9627 		reg_set_min_max(&other_branch_regs[insn->dst_reg],
9628 					dst_reg, insn->imm, (u32)insn->imm,
9629 					opcode, is_jmp32);
9630 	}
9631 
9632 	if (dst_reg->type == SCALAR_VALUE && dst_reg->id &&
9633 	    !WARN_ON_ONCE(dst_reg->id != other_branch_regs[insn->dst_reg].id)) {
9634 		find_equal_scalars(this_branch, dst_reg);
9635 		find_equal_scalars(other_branch, &other_branch_regs[insn->dst_reg]);
9636 	}
9637 
9638 	/* detect if R == 0 where R is returned from bpf_map_lookup_elem().
9639 	 * NOTE: these optimizations below are related with pointer comparison
9640 	 *       which will never be JMP32.
9641 	 */
9642 	if (!is_jmp32 && BPF_SRC(insn->code) == BPF_K &&
9643 	    insn->imm == 0 && (opcode == BPF_JEQ || opcode == BPF_JNE) &&
9644 	    type_may_be_null(dst_reg->type)) {
9645 		/* Mark all identical registers in each branch as either
9646 		 * safe or unknown depending R == 0 or R != 0 conditional.
9647 		 */
9648 		mark_ptr_or_null_regs(this_branch, insn->dst_reg,
9649 				      opcode == BPF_JNE);
9650 		mark_ptr_or_null_regs(other_branch, insn->dst_reg,
9651 				      opcode == BPF_JEQ);
9652 	} else if (!try_match_pkt_pointers(insn, dst_reg, &regs[insn->src_reg],
9653 					   this_branch, other_branch) &&
9654 		   is_pointer_value(env, insn->dst_reg)) {
9655 		verbose(env, "R%d pointer comparison prohibited\n",
9656 			insn->dst_reg);
9657 		return -EACCES;
9658 	}
9659 	if (env->log.level & BPF_LOG_LEVEL)
9660 		print_insn_state(env, this_branch->frame[this_branch->curframe]);
9661 	return 0;
9662 }
9663 
9664 /* verify BPF_LD_IMM64 instruction */
9665 static int check_ld_imm(struct bpf_verifier_env *env, struct bpf_insn *insn)
9666 {
9667 	struct bpf_insn_aux_data *aux = cur_aux(env);
9668 	struct bpf_reg_state *regs = cur_regs(env);
9669 	struct bpf_reg_state *dst_reg;
9670 	struct bpf_map *map;
9671 	int err;
9672 
9673 	if (BPF_SIZE(insn->code) != BPF_DW) {
9674 		verbose(env, "invalid BPF_LD_IMM insn\n");
9675 		return -EINVAL;
9676 	}
9677 	if (insn->off != 0) {
9678 		verbose(env, "BPF_LD_IMM64 uses reserved fields\n");
9679 		return -EINVAL;
9680 	}
9681 
9682 	err = check_reg_arg(env, insn->dst_reg, DST_OP);
9683 	if (err)
9684 		return err;
9685 
9686 	dst_reg = &regs[insn->dst_reg];
9687 	if (insn->src_reg == 0) {
9688 		u64 imm = ((u64)(insn + 1)->imm << 32) | (u32)insn->imm;
9689 
9690 		dst_reg->type = SCALAR_VALUE;
9691 		__mark_reg_known(&regs[insn->dst_reg], imm);
9692 		return 0;
9693 	}
9694 
9695 	/* All special src_reg cases are listed below. From this point onwards
9696 	 * we either succeed and assign a corresponding dst_reg->type after
9697 	 * zeroing the offset, or fail and reject the program.
9698 	 */
9699 	mark_reg_known_zero(env, regs, insn->dst_reg);
9700 
9701 	if (insn->src_reg == BPF_PSEUDO_BTF_ID) {
9702 		dst_reg->type = aux->btf_var.reg_type;
9703 		switch (base_type(dst_reg->type)) {
9704 		case PTR_TO_MEM:
9705 			dst_reg->mem_size = aux->btf_var.mem_size;
9706 			break;
9707 		case PTR_TO_BTF_ID:
9708 			dst_reg->btf = aux->btf_var.btf;
9709 			dst_reg->btf_id = aux->btf_var.btf_id;
9710 			break;
9711 		default:
9712 			verbose(env, "bpf verifier is misconfigured\n");
9713 			return -EFAULT;
9714 		}
9715 		return 0;
9716 	}
9717 
9718 	if (insn->src_reg == BPF_PSEUDO_FUNC) {
9719 		struct bpf_prog_aux *aux = env->prog->aux;
9720 		u32 subprogno = find_subprog(env,
9721 					     env->insn_idx + insn->imm + 1);
9722 
9723 		if (!aux->func_info) {
9724 			verbose(env, "missing btf func_info\n");
9725 			return -EINVAL;
9726 		}
9727 		if (aux->func_info_aux[subprogno].linkage != BTF_FUNC_STATIC) {
9728 			verbose(env, "callback function not static\n");
9729 			return -EINVAL;
9730 		}
9731 
9732 		dst_reg->type = PTR_TO_FUNC;
9733 		dst_reg->subprogno = subprogno;
9734 		return 0;
9735 	}
9736 
9737 	map = env->used_maps[aux->map_index];
9738 	dst_reg->map_ptr = map;
9739 
9740 	if (insn->src_reg == BPF_PSEUDO_MAP_VALUE ||
9741 	    insn->src_reg == BPF_PSEUDO_MAP_IDX_VALUE) {
9742 		dst_reg->type = PTR_TO_MAP_VALUE;
9743 		dst_reg->off = aux->map_off;
9744 		if (map_value_has_spin_lock(map))
9745 			dst_reg->id = ++env->id_gen;
9746 	} else if (insn->src_reg == BPF_PSEUDO_MAP_FD ||
9747 		   insn->src_reg == BPF_PSEUDO_MAP_IDX) {
9748 		dst_reg->type = CONST_PTR_TO_MAP;
9749 	} else {
9750 		verbose(env, "bpf verifier is misconfigured\n");
9751 		return -EINVAL;
9752 	}
9753 
9754 	return 0;
9755 }
9756 
9757 static bool may_access_skb(enum bpf_prog_type type)
9758 {
9759 	switch (type) {
9760 	case BPF_PROG_TYPE_SOCKET_FILTER:
9761 	case BPF_PROG_TYPE_SCHED_CLS:
9762 	case BPF_PROG_TYPE_SCHED_ACT:
9763 		return true;
9764 	default:
9765 		return false;
9766 	}
9767 }
9768 
9769 /* verify safety of LD_ABS|LD_IND instructions:
9770  * - they can only appear in the programs where ctx == skb
9771  * - since they are wrappers of function calls, they scratch R1-R5 registers,
9772  *   preserve R6-R9, and store return value into R0
9773  *
9774  * Implicit input:
9775  *   ctx == skb == R6 == CTX
9776  *
9777  * Explicit input:
9778  *   SRC == any register
9779  *   IMM == 32-bit immediate
9780  *
9781  * Output:
9782  *   R0 - 8/16/32-bit skb data converted to cpu endianness
9783  */
9784 static int check_ld_abs(struct bpf_verifier_env *env, struct bpf_insn *insn)
9785 {
9786 	struct bpf_reg_state *regs = cur_regs(env);
9787 	static const int ctx_reg = BPF_REG_6;
9788 	u8 mode = BPF_MODE(insn->code);
9789 	int i, err;
9790 
9791 	if (!may_access_skb(resolve_prog_type(env->prog))) {
9792 		verbose(env, "BPF_LD_[ABS|IND] instructions not allowed for this program type\n");
9793 		return -EINVAL;
9794 	}
9795 
9796 	if (!env->ops->gen_ld_abs) {
9797 		verbose(env, "bpf verifier is misconfigured\n");
9798 		return -EINVAL;
9799 	}
9800 
9801 	if (insn->dst_reg != BPF_REG_0 || insn->off != 0 ||
9802 	    BPF_SIZE(insn->code) == BPF_DW ||
9803 	    (mode == BPF_ABS && insn->src_reg != BPF_REG_0)) {
9804 		verbose(env, "BPF_LD_[ABS|IND] uses reserved fields\n");
9805 		return -EINVAL;
9806 	}
9807 
9808 	/* check whether implicit source operand (register R6) is readable */
9809 	err = check_reg_arg(env, ctx_reg, SRC_OP);
9810 	if (err)
9811 		return err;
9812 
9813 	/* Disallow usage of BPF_LD_[ABS|IND] with reference tracking, as
9814 	 * gen_ld_abs() may terminate the program at runtime, leading to
9815 	 * reference leak.
9816 	 */
9817 	err = check_reference_leak(env);
9818 	if (err) {
9819 		verbose(env, "BPF_LD_[ABS|IND] cannot be mixed with socket references\n");
9820 		return err;
9821 	}
9822 
9823 	if (env->cur_state->active_spin_lock) {
9824 		verbose(env, "BPF_LD_[ABS|IND] cannot be used inside bpf_spin_lock-ed region\n");
9825 		return -EINVAL;
9826 	}
9827 
9828 	if (regs[ctx_reg].type != PTR_TO_CTX) {
9829 		verbose(env,
9830 			"at the time of BPF_LD_ABS|IND R6 != pointer to skb\n");
9831 		return -EINVAL;
9832 	}
9833 
9834 	if (mode == BPF_IND) {
9835 		/* check explicit source operand */
9836 		err = check_reg_arg(env, insn->src_reg, SRC_OP);
9837 		if (err)
9838 			return err;
9839 	}
9840 
9841 	err = check_ptr_off_reg(env, &regs[ctx_reg], ctx_reg);
9842 	if (err < 0)
9843 		return err;
9844 
9845 	/* reset caller saved regs to unreadable */
9846 	for (i = 0; i < CALLER_SAVED_REGS; i++) {
9847 		mark_reg_not_init(env, regs, caller_saved[i]);
9848 		check_reg_arg(env, caller_saved[i], DST_OP_NO_MARK);
9849 	}
9850 
9851 	/* mark destination R0 register as readable, since it contains
9852 	 * the value fetched from the packet.
9853 	 * Already marked as written above.
9854 	 */
9855 	mark_reg_unknown(env, regs, BPF_REG_0);
9856 	/* ld_abs load up to 32-bit skb data. */
9857 	regs[BPF_REG_0].subreg_def = env->insn_idx + 1;
9858 	return 0;
9859 }
9860 
9861 static int check_return_code(struct bpf_verifier_env *env)
9862 {
9863 	struct tnum enforce_attach_type_range = tnum_unknown;
9864 	const struct bpf_prog *prog = env->prog;
9865 	struct bpf_reg_state *reg;
9866 	struct tnum range = tnum_range(0, 1);
9867 	enum bpf_prog_type prog_type = resolve_prog_type(env->prog);
9868 	int err;
9869 	struct bpf_func_state *frame = env->cur_state->frame[0];
9870 	const bool is_subprog = frame->subprogno;
9871 
9872 	/* LSM and struct_ops func-ptr's return type could be "void" */
9873 	if (!is_subprog &&
9874 	    (prog_type == BPF_PROG_TYPE_STRUCT_OPS ||
9875 	     prog_type == BPF_PROG_TYPE_LSM) &&
9876 	    !prog->aux->attach_func_proto->type)
9877 		return 0;
9878 
9879 	/* eBPF calling convention is such that R0 is used
9880 	 * to return the value from eBPF program.
9881 	 * Make sure that it's readable at this time
9882 	 * of bpf_exit, which means that program wrote
9883 	 * something into it earlier
9884 	 */
9885 	err = check_reg_arg(env, BPF_REG_0, SRC_OP);
9886 	if (err)
9887 		return err;
9888 
9889 	if (is_pointer_value(env, BPF_REG_0)) {
9890 		verbose(env, "R0 leaks addr as return value\n");
9891 		return -EACCES;
9892 	}
9893 
9894 	reg = cur_regs(env) + BPF_REG_0;
9895 
9896 	if (frame->in_async_callback_fn) {
9897 		/* enforce return zero from async callbacks like timer */
9898 		if (reg->type != SCALAR_VALUE) {
9899 			verbose(env, "In async callback the register R0 is not a known value (%s)\n",
9900 				reg_type_str(env, reg->type));
9901 			return -EINVAL;
9902 		}
9903 
9904 		if (!tnum_in(tnum_const(0), reg->var_off)) {
9905 			verbose_invalid_scalar(env, reg, &range, "async callback", "R0");
9906 			return -EINVAL;
9907 		}
9908 		return 0;
9909 	}
9910 
9911 	if (is_subprog) {
9912 		if (reg->type != SCALAR_VALUE) {
9913 			verbose(env, "At subprogram exit the register R0 is not a scalar value (%s)\n",
9914 				reg_type_str(env, reg->type));
9915 			return -EINVAL;
9916 		}
9917 		return 0;
9918 	}
9919 
9920 	switch (prog_type) {
9921 	case BPF_PROG_TYPE_CGROUP_SOCK_ADDR:
9922 		if (env->prog->expected_attach_type == BPF_CGROUP_UDP4_RECVMSG ||
9923 		    env->prog->expected_attach_type == BPF_CGROUP_UDP6_RECVMSG ||
9924 		    env->prog->expected_attach_type == BPF_CGROUP_INET4_GETPEERNAME ||
9925 		    env->prog->expected_attach_type == BPF_CGROUP_INET6_GETPEERNAME ||
9926 		    env->prog->expected_attach_type == BPF_CGROUP_INET4_GETSOCKNAME ||
9927 		    env->prog->expected_attach_type == BPF_CGROUP_INET6_GETSOCKNAME)
9928 			range = tnum_range(1, 1);
9929 		if (env->prog->expected_attach_type == BPF_CGROUP_INET4_BIND ||
9930 		    env->prog->expected_attach_type == BPF_CGROUP_INET6_BIND)
9931 			range = tnum_range(0, 3);
9932 		break;
9933 	case BPF_PROG_TYPE_CGROUP_SKB:
9934 		if (env->prog->expected_attach_type == BPF_CGROUP_INET_EGRESS) {
9935 			range = tnum_range(0, 3);
9936 			enforce_attach_type_range = tnum_range(2, 3);
9937 		}
9938 		break;
9939 	case BPF_PROG_TYPE_CGROUP_SOCK:
9940 	case BPF_PROG_TYPE_SOCK_OPS:
9941 	case BPF_PROG_TYPE_CGROUP_DEVICE:
9942 	case BPF_PROG_TYPE_CGROUP_SYSCTL:
9943 	case BPF_PROG_TYPE_CGROUP_SOCKOPT:
9944 		break;
9945 	case BPF_PROG_TYPE_RAW_TRACEPOINT:
9946 		if (!env->prog->aux->attach_btf_id)
9947 			return 0;
9948 		range = tnum_const(0);
9949 		break;
9950 	case BPF_PROG_TYPE_TRACING:
9951 		switch (env->prog->expected_attach_type) {
9952 		case BPF_TRACE_FENTRY:
9953 		case BPF_TRACE_FEXIT:
9954 			range = tnum_const(0);
9955 			break;
9956 		case BPF_TRACE_RAW_TP:
9957 		case BPF_MODIFY_RETURN:
9958 			return 0;
9959 		case BPF_TRACE_ITER:
9960 			break;
9961 		default:
9962 			return -ENOTSUPP;
9963 		}
9964 		break;
9965 	case BPF_PROG_TYPE_SK_LOOKUP:
9966 		range = tnum_range(SK_DROP, SK_PASS);
9967 		break;
9968 	case BPF_PROG_TYPE_EXT:
9969 		/* freplace program can return anything as its return value
9970 		 * depends on the to-be-replaced kernel func or bpf program.
9971 		 */
9972 	default:
9973 		return 0;
9974 	}
9975 
9976 	if (reg->type != SCALAR_VALUE) {
9977 		verbose(env, "At program exit the register R0 is not a known value (%s)\n",
9978 			reg_type_str(env, reg->type));
9979 		return -EINVAL;
9980 	}
9981 
9982 	if (!tnum_in(range, reg->var_off)) {
9983 		verbose_invalid_scalar(env, reg, &range, "program exit", "R0");
9984 		return -EINVAL;
9985 	}
9986 
9987 	if (!tnum_is_unknown(enforce_attach_type_range) &&
9988 	    tnum_in(enforce_attach_type_range, reg->var_off))
9989 		env->prog->enforce_expected_attach_type = 1;
9990 	return 0;
9991 }
9992 
9993 /* non-recursive DFS pseudo code
9994  * 1  procedure DFS-iterative(G,v):
9995  * 2      label v as discovered
9996  * 3      let S be a stack
9997  * 4      S.push(v)
9998  * 5      while S is not empty
9999  * 6            t <- S.pop()
10000  * 7            if t is what we're looking for:
10001  * 8                return t
10002  * 9            for all edges e in G.adjacentEdges(t) do
10003  * 10               if edge e is already labelled
10004  * 11                   continue with the next edge
10005  * 12               w <- G.adjacentVertex(t,e)
10006  * 13               if vertex w is not discovered and not explored
10007  * 14                   label e as tree-edge
10008  * 15                   label w as discovered
10009  * 16                   S.push(w)
10010  * 17                   continue at 5
10011  * 18               else if vertex w is discovered
10012  * 19                   label e as back-edge
10013  * 20               else
10014  * 21                   // vertex w is explored
10015  * 22                   label e as forward- or cross-edge
10016  * 23           label t as explored
10017  * 24           S.pop()
10018  *
10019  * convention:
10020  * 0x10 - discovered
10021  * 0x11 - discovered and fall-through edge labelled
10022  * 0x12 - discovered and fall-through and branch edges labelled
10023  * 0x20 - explored
10024  */
10025 
10026 enum {
10027 	DISCOVERED = 0x10,
10028 	EXPLORED = 0x20,
10029 	FALLTHROUGH = 1,
10030 	BRANCH = 2,
10031 };
10032 
10033 static u32 state_htab_size(struct bpf_verifier_env *env)
10034 {
10035 	return env->prog->len;
10036 }
10037 
10038 static struct bpf_verifier_state_list **explored_state(
10039 					struct bpf_verifier_env *env,
10040 					int idx)
10041 {
10042 	struct bpf_verifier_state *cur = env->cur_state;
10043 	struct bpf_func_state *state = cur->frame[cur->curframe];
10044 
10045 	return &env->explored_states[(idx ^ state->callsite) % state_htab_size(env)];
10046 }
10047 
10048 static void init_explored_state(struct bpf_verifier_env *env, int idx)
10049 {
10050 	env->insn_aux_data[idx].prune_point = true;
10051 }
10052 
10053 enum {
10054 	DONE_EXPLORING = 0,
10055 	KEEP_EXPLORING = 1,
10056 };
10057 
10058 /* t, w, e - match pseudo-code above:
10059  * t - index of current instruction
10060  * w - next instruction
10061  * e - edge
10062  */
10063 static int push_insn(int t, int w, int e, struct bpf_verifier_env *env,
10064 		     bool loop_ok)
10065 {
10066 	int *insn_stack = env->cfg.insn_stack;
10067 	int *insn_state = env->cfg.insn_state;
10068 
10069 	if (e == FALLTHROUGH && insn_state[t] >= (DISCOVERED | FALLTHROUGH))
10070 		return DONE_EXPLORING;
10071 
10072 	if (e == BRANCH && insn_state[t] >= (DISCOVERED | BRANCH))
10073 		return DONE_EXPLORING;
10074 
10075 	if (w < 0 || w >= env->prog->len) {
10076 		verbose_linfo(env, t, "%d: ", t);
10077 		verbose(env, "jump out of range from insn %d to %d\n", t, w);
10078 		return -EINVAL;
10079 	}
10080 
10081 	if (e == BRANCH)
10082 		/* mark branch target for state pruning */
10083 		init_explored_state(env, w);
10084 
10085 	if (insn_state[w] == 0) {
10086 		/* tree-edge */
10087 		insn_state[t] = DISCOVERED | e;
10088 		insn_state[w] = DISCOVERED;
10089 		if (env->cfg.cur_stack >= env->prog->len)
10090 			return -E2BIG;
10091 		insn_stack[env->cfg.cur_stack++] = w;
10092 		return KEEP_EXPLORING;
10093 	} else if ((insn_state[w] & 0xF0) == DISCOVERED) {
10094 		if (loop_ok && env->bpf_capable)
10095 			return DONE_EXPLORING;
10096 		verbose_linfo(env, t, "%d: ", t);
10097 		verbose_linfo(env, w, "%d: ", w);
10098 		verbose(env, "back-edge from insn %d to %d\n", t, w);
10099 		return -EINVAL;
10100 	} else if (insn_state[w] == EXPLORED) {
10101 		/* forward- or cross-edge */
10102 		insn_state[t] = DISCOVERED | e;
10103 	} else {
10104 		verbose(env, "insn state internal bug\n");
10105 		return -EFAULT;
10106 	}
10107 	return DONE_EXPLORING;
10108 }
10109 
10110 static int visit_func_call_insn(int t, int insn_cnt,
10111 				struct bpf_insn *insns,
10112 				struct bpf_verifier_env *env,
10113 				bool visit_callee)
10114 {
10115 	int ret;
10116 
10117 	ret = push_insn(t, t + 1, FALLTHROUGH, env, false);
10118 	if (ret)
10119 		return ret;
10120 
10121 	if (t + 1 < insn_cnt)
10122 		init_explored_state(env, t + 1);
10123 	if (visit_callee) {
10124 		init_explored_state(env, t);
10125 		ret = push_insn(t, t + insns[t].imm + 1, BRANCH, env,
10126 				/* It's ok to allow recursion from CFG point of
10127 				 * view. __check_func_call() will do the actual
10128 				 * check.
10129 				 */
10130 				bpf_pseudo_func(insns + t));
10131 	}
10132 	return ret;
10133 }
10134 
10135 /* Visits the instruction at index t and returns one of the following:
10136  *  < 0 - an error occurred
10137  *  DONE_EXPLORING - the instruction was fully explored
10138  *  KEEP_EXPLORING - there is still work to be done before it is fully explored
10139  */
10140 static int visit_insn(int t, int insn_cnt, struct bpf_verifier_env *env)
10141 {
10142 	struct bpf_insn *insns = env->prog->insnsi;
10143 	int ret;
10144 
10145 	if (bpf_pseudo_func(insns + t))
10146 		return visit_func_call_insn(t, insn_cnt, insns, env, true);
10147 
10148 	/* All non-branch instructions have a single fall-through edge. */
10149 	if (BPF_CLASS(insns[t].code) != BPF_JMP &&
10150 	    BPF_CLASS(insns[t].code) != BPF_JMP32)
10151 		return push_insn(t, t + 1, FALLTHROUGH, env, false);
10152 
10153 	switch (BPF_OP(insns[t].code)) {
10154 	case BPF_EXIT:
10155 		return DONE_EXPLORING;
10156 
10157 	case BPF_CALL:
10158 		if (insns[t].imm == BPF_FUNC_timer_set_callback)
10159 			/* Mark this call insn to trigger is_state_visited() check
10160 			 * before call itself is processed by __check_func_call().
10161 			 * Otherwise new async state will be pushed for further
10162 			 * exploration.
10163 			 */
10164 			init_explored_state(env, t);
10165 		return visit_func_call_insn(t, insn_cnt, insns, env,
10166 					    insns[t].src_reg == BPF_PSEUDO_CALL);
10167 
10168 	case BPF_JA:
10169 		if (BPF_SRC(insns[t].code) != BPF_K)
10170 			return -EINVAL;
10171 
10172 		/* unconditional jump with single edge */
10173 		ret = push_insn(t, t + insns[t].off + 1, FALLTHROUGH, env,
10174 				true);
10175 		if (ret)
10176 			return ret;
10177 
10178 		/* unconditional jmp is not a good pruning point,
10179 		 * but it's marked, since backtracking needs
10180 		 * to record jmp history in is_state_visited().
10181 		 */
10182 		init_explored_state(env, t + insns[t].off + 1);
10183 		/* tell verifier to check for equivalent states
10184 		 * after every call and jump
10185 		 */
10186 		if (t + 1 < insn_cnt)
10187 			init_explored_state(env, t + 1);
10188 
10189 		return ret;
10190 
10191 	default:
10192 		/* conditional jump with two edges */
10193 		init_explored_state(env, t);
10194 		ret = push_insn(t, t + 1, FALLTHROUGH, env, true);
10195 		if (ret)
10196 			return ret;
10197 
10198 		return push_insn(t, t + insns[t].off + 1, BRANCH, env, true);
10199 	}
10200 }
10201 
10202 /* non-recursive depth-first-search to detect loops in BPF program
10203  * loop == back-edge in directed graph
10204  */
10205 static int check_cfg(struct bpf_verifier_env *env)
10206 {
10207 	int insn_cnt = env->prog->len;
10208 	int *insn_stack, *insn_state;
10209 	int ret = 0;
10210 	int i;
10211 
10212 	insn_state = env->cfg.insn_state = kvcalloc(insn_cnt, sizeof(int), GFP_KERNEL);
10213 	if (!insn_state)
10214 		return -ENOMEM;
10215 
10216 	insn_stack = env->cfg.insn_stack = kvcalloc(insn_cnt, sizeof(int), GFP_KERNEL);
10217 	if (!insn_stack) {
10218 		kvfree(insn_state);
10219 		return -ENOMEM;
10220 	}
10221 
10222 	insn_state[0] = DISCOVERED; /* mark 1st insn as discovered */
10223 	insn_stack[0] = 0; /* 0 is the first instruction */
10224 	env->cfg.cur_stack = 1;
10225 
10226 	while (env->cfg.cur_stack > 0) {
10227 		int t = insn_stack[env->cfg.cur_stack - 1];
10228 
10229 		ret = visit_insn(t, insn_cnt, env);
10230 		switch (ret) {
10231 		case DONE_EXPLORING:
10232 			insn_state[t] = EXPLORED;
10233 			env->cfg.cur_stack--;
10234 			break;
10235 		case KEEP_EXPLORING:
10236 			break;
10237 		default:
10238 			if (ret > 0) {
10239 				verbose(env, "visit_insn internal bug\n");
10240 				ret = -EFAULT;
10241 			}
10242 			goto err_free;
10243 		}
10244 	}
10245 
10246 	if (env->cfg.cur_stack < 0) {
10247 		verbose(env, "pop stack internal bug\n");
10248 		ret = -EFAULT;
10249 		goto err_free;
10250 	}
10251 
10252 	for (i = 0; i < insn_cnt; i++) {
10253 		if (insn_state[i] != EXPLORED) {
10254 			verbose(env, "unreachable insn %d\n", i);
10255 			ret = -EINVAL;
10256 			goto err_free;
10257 		}
10258 	}
10259 	ret = 0; /* cfg looks good */
10260 
10261 err_free:
10262 	kvfree(insn_state);
10263 	kvfree(insn_stack);
10264 	env->cfg.insn_state = env->cfg.insn_stack = NULL;
10265 	return ret;
10266 }
10267 
10268 static int check_abnormal_return(struct bpf_verifier_env *env)
10269 {
10270 	int i;
10271 
10272 	for (i = 1; i < env->subprog_cnt; i++) {
10273 		if (env->subprog_info[i].has_ld_abs) {
10274 			verbose(env, "LD_ABS is not allowed in subprogs without BTF\n");
10275 			return -EINVAL;
10276 		}
10277 		if (env->subprog_info[i].has_tail_call) {
10278 			verbose(env, "tail_call is not allowed in subprogs without BTF\n");
10279 			return -EINVAL;
10280 		}
10281 	}
10282 	return 0;
10283 }
10284 
10285 /* The minimum supported BTF func info size */
10286 #define MIN_BPF_FUNCINFO_SIZE	8
10287 #define MAX_FUNCINFO_REC_SIZE	252
10288 
10289 static int check_btf_func(struct bpf_verifier_env *env,
10290 			  const union bpf_attr *attr,
10291 			  bpfptr_t uattr)
10292 {
10293 	const struct btf_type *type, *func_proto, *ret_type;
10294 	u32 i, nfuncs, urec_size, min_size;
10295 	u32 krec_size = sizeof(struct bpf_func_info);
10296 	struct bpf_func_info *krecord;
10297 	struct bpf_func_info_aux *info_aux = NULL;
10298 	struct bpf_prog *prog;
10299 	const struct btf *btf;
10300 	bpfptr_t urecord;
10301 	u32 prev_offset = 0;
10302 	bool scalar_return;
10303 	int ret = -ENOMEM;
10304 
10305 	nfuncs = attr->func_info_cnt;
10306 	if (!nfuncs) {
10307 		if (check_abnormal_return(env))
10308 			return -EINVAL;
10309 		return 0;
10310 	}
10311 
10312 	if (nfuncs != env->subprog_cnt) {
10313 		verbose(env, "number of funcs in func_info doesn't match number of subprogs\n");
10314 		return -EINVAL;
10315 	}
10316 
10317 	urec_size = attr->func_info_rec_size;
10318 	if (urec_size < MIN_BPF_FUNCINFO_SIZE ||
10319 	    urec_size > MAX_FUNCINFO_REC_SIZE ||
10320 	    urec_size % sizeof(u32)) {
10321 		verbose(env, "invalid func info rec size %u\n", urec_size);
10322 		return -EINVAL;
10323 	}
10324 
10325 	prog = env->prog;
10326 	btf = prog->aux->btf;
10327 
10328 	urecord = make_bpfptr(attr->func_info, uattr.is_kernel);
10329 	min_size = min_t(u32, krec_size, urec_size);
10330 
10331 	krecord = kvcalloc(nfuncs, krec_size, GFP_KERNEL | __GFP_NOWARN);
10332 	if (!krecord)
10333 		return -ENOMEM;
10334 	info_aux = kcalloc(nfuncs, sizeof(*info_aux), GFP_KERNEL | __GFP_NOWARN);
10335 	if (!info_aux)
10336 		goto err_free;
10337 
10338 	for (i = 0; i < nfuncs; i++) {
10339 		ret = bpf_check_uarg_tail_zero(urecord, krec_size, urec_size);
10340 		if (ret) {
10341 			if (ret == -E2BIG) {
10342 				verbose(env, "nonzero tailing record in func info");
10343 				/* set the size kernel expects so loader can zero
10344 				 * out the rest of the record.
10345 				 */
10346 				if (copy_to_bpfptr_offset(uattr,
10347 							  offsetof(union bpf_attr, func_info_rec_size),
10348 							  &min_size, sizeof(min_size)))
10349 					ret = -EFAULT;
10350 			}
10351 			goto err_free;
10352 		}
10353 
10354 		if (copy_from_bpfptr(&krecord[i], urecord, min_size)) {
10355 			ret = -EFAULT;
10356 			goto err_free;
10357 		}
10358 
10359 		/* check insn_off */
10360 		ret = -EINVAL;
10361 		if (i == 0) {
10362 			if (krecord[i].insn_off) {
10363 				verbose(env,
10364 					"nonzero insn_off %u for the first func info record",
10365 					krecord[i].insn_off);
10366 				goto err_free;
10367 			}
10368 		} else if (krecord[i].insn_off <= prev_offset) {
10369 			verbose(env,
10370 				"same or smaller insn offset (%u) than previous func info record (%u)",
10371 				krecord[i].insn_off, prev_offset);
10372 			goto err_free;
10373 		}
10374 
10375 		if (env->subprog_info[i].start != krecord[i].insn_off) {
10376 			verbose(env, "func_info BTF section doesn't match subprog layout in BPF program\n");
10377 			goto err_free;
10378 		}
10379 
10380 		/* check type_id */
10381 		type = btf_type_by_id(btf, krecord[i].type_id);
10382 		if (!type || !btf_type_is_func(type)) {
10383 			verbose(env, "invalid type id %d in func info",
10384 				krecord[i].type_id);
10385 			goto err_free;
10386 		}
10387 		info_aux[i].linkage = BTF_INFO_VLEN(type->info);
10388 
10389 		func_proto = btf_type_by_id(btf, type->type);
10390 		if (unlikely(!func_proto || !btf_type_is_func_proto(func_proto)))
10391 			/* btf_func_check() already verified it during BTF load */
10392 			goto err_free;
10393 		ret_type = btf_type_skip_modifiers(btf, func_proto->type, NULL);
10394 		scalar_return =
10395 			btf_type_is_small_int(ret_type) || btf_type_is_enum(ret_type);
10396 		if (i && !scalar_return && env->subprog_info[i].has_ld_abs) {
10397 			verbose(env, "LD_ABS is only allowed in functions that return 'int'.\n");
10398 			goto err_free;
10399 		}
10400 		if (i && !scalar_return && env->subprog_info[i].has_tail_call) {
10401 			verbose(env, "tail_call is only allowed in functions that return 'int'.\n");
10402 			goto err_free;
10403 		}
10404 
10405 		prev_offset = krecord[i].insn_off;
10406 		bpfptr_add(&urecord, urec_size);
10407 	}
10408 
10409 	prog->aux->func_info = krecord;
10410 	prog->aux->func_info_cnt = nfuncs;
10411 	prog->aux->func_info_aux = info_aux;
10412 	return 0;
10413 
10414 err_free:
10415 	kvfree(krecord);
10416 	kfree(info_aux);
10417 	return ret;
10418 }
10419 
10420 static void adjust_btf_func(struct bpf_verifier_env *env)
10421 {
10422 	struct bpf_prog_aux *aux = env->prog->aux;
10423 	int i;
10424 
10425 	if (!aux->func_info)
10426 		return;
10427 
10428 	for (i = 0; i < env->subprog_cnt; i++)
10429 		aux->func_info[i].insn_off = env->subprog_info[i].start;
10430 }
10431 
10432 #define MIN_BPF_LINEINFO_SIZE	offsetofend(struct bpf_line_info, line_col)
10433 #define MAX_LINEINFO_REC_SIZE	MAX_FUNCINFO_REC_SIZE
10434 
10435 static int check_btf_line(struct bpf_verifier_env *env,
10436 			  const union bpf_attr *attr,
10437 			  bpfptr_t uattr)
10438 {
10439 	u32 i, s, nr_linfo, ncopy, expected_size, rec_size, prev_offset = 0;
10440 	struct bpf_subprog_info *sub;
10441 	struct bpf_line_info *linfo;
10442 	struct bpf_prog *prog;
10443 	const struct btf *btf;
10444 	bpfptr_t ulinfo;
10445 	int err;
10446 
10447 	nr_linfo = attr->line_info_cnt;
10448 	if (!nr_linfo)
10449 		return 0;
10450 	if (nr_linfo > INT_MAX / sizeof(struct bpf_line_info))
10451 		return -EINVAL;
10452 
10453 	rec_size = attr->line_info_rec_size;
10454 	if (rec_size < MIN_BPF_LINEINFO_SIZE ||
10455 	    rec_size > MAX_LINEINFO_REC_SIZE ||
10456 	    rec_size & (sizeof(u32) - 1))
10457 		return -EINVAL;
10458 
10459 	/* Need to zero it in case the userspace may
10460 	 * pass in a smaller bpf_line_info object.
10461 	 */
10462 	linfo = kvcalloc(nr_linfo, sizeof(struct bpf_line_info),
10463 			 GFP_KERNEL | __GFP_NOWARN);
10464 	if (!linfo)
10465 		return -ENOMEM;
10466 
10467 	prog = env->prog;
10468 	btf = prog->aux->btf;
10469 
10470 	s = 0;
10471 	sub = env->subprog_info;
10472 	ulinfo = make_bpfptr(attr->line_info, uattr.is_kernel);
10473 	expected_size = sizeof(struct bpf_line_info);
10474 	ncopy = min_t(u32, expected_size, rec_size);
10475 	for (i = 0; i < nr_linfo; i++) {
10476 		err = bpf_check_uarg_tail_zero(ulinfo, expected_size, rec_size);
10477 		if (err) {
10478 			if (err == -E2BIG) {
10479 				verbose(env, "nonzero tailing record in line_info");
10480 				if (copy_to_bpfptr_offset(uattr,
10481 							  offsetof(union bpf_attr, line_info_rec_size),
10482 							  &expected_size, sizeof(expected_size)))
10483 					err = -EFAULT;
10484 			}
10485 			goto err_free;
10486 		}
10487 
10488 		if (copy_from_bpfptr(&linfo[i], ulinfo, ncopy)) {
10489 			err = -EFAULT;
10490 			goto err_free;
10491 		}
10492 
10493 		/*
10494 		 * Check insn_off to ensure
10495 		 * 1) strictly increasing AND
10496 		 * 2) bounded by prog->len
10497 		 *
10498 		 * The linfo[0].insn_off == 0 check logically falls into
10499 		 * the later "missing bpf_line_info for func..." case
10500 		 * because the first linfo[0].insn_off must be the
10501 		 * first sub also and the first sub must have
10502 		 * subprog_info[0].start == 0.
10503 		 */
10504 		if ((i && linfo[i].insn_off <= prev_offset) ||
10505 		    linfo[i].insn_off >= prog->len) {
10506 			verbose(env, "Invalid line_info[%u].insn_off:%u (prev_offset:%u prog->len:%u)\n",
10507 				i, linfo[i].insn_off, prev_offset,
10508 				prog->len);
10509 			err = -EINVAL;
10510 			goto err_free;
10511 		}
10512 
10513 		if (!prog->insnsi[linfo[i].insn_off].code) {
10514 			verbose(env,
10515 				"Invalid insn code at line_info[%u].insn_off\n",
10516 				i);
10517 			err = -EINVAL;
10518 			goto err_free;
10519 		}
10520 
10521 		if (!btf_name_by_offset(btf, linfo[i].line_off) ||
10522 		    !btf_name_by_offset(btf, linfo[i].file_name_off)) {
10523 			verbose(env, "Invalid line_info[%u].line_off or .file_name_off\n", i);
10524 			err = -EINVAL;
10525 			goto err_free;
10526 		}
10527 
10528 		if (s != env->subprog_cnt) {
10529 			if (linfo[i].insn_off == sub[s].start) {
10530 				sub[s].linfo_idx = i;
10531 				s++;
10532 			} else if (sub[s].start < linfo[i].insn_off) {
10533 				verbose(env, "missing bpf_line_info for func#%u\n", s);
10534 				err = -EINVAL;
10535 				goto err_free;
10536 			}
10537 		}
10538 
10539 		prev_offset = linfo[i].insn_off;
10540 		bpfptr_add(&ulinfo, rec_size);
10541 	}
10542 
10543 	if (s != env->subprog_cnt) {
10544 		verbose(env, "missing bpf_line_info for %u funcs starting from func#%u\n",
10545 			env->subprog_cnt - s, s);
10546 		err = -EINVAL;
10547 		goto err_free;
10548 	}
10549 
10550 	prog->aux->linfo = linfo;
10551 	prog->aux->nr_linfo = nr_linfo;
10552 
10553 	return 0;
10554 
10555 err_free:
10556 	kvfree(linfo);
10557 	return err;
10558 }
10559 
10560 #define MIN_CORE_RELO_SIZE	sizeof(struct bpf_core_relo)
10561 #define MAX_CORE_RELO_SIZE	MAX_FUNCINFO_REC_SIZE
10562 
10563 static int check_core_relo(struct bpf_verifier_env *env,
10564 			   const union bpf_attr *attr,
10565 			   bpfptr_t uattr)
10566 {
10567 	u32 i, nr_core_relo, ncopy, expected_size, rec_size;
10568 	struct bpf_core_relo core_relo = {};
10569 	struct bpf_prog *prog = env->prog;
10570 	const struct btf *btf = prog->aux->btf;
10571 	struct bpf_core_ctx ctx = {
10572 		.log = &env->log,
10573 		.btf = btf,
10574 	};
10575 	bpfptr_t u_core_relo;
10576 	int err;
10577 
10578 	nr_core_relo = attr->core_relo_cnt;
10579 	if (!nr_core_relo)
10580 		return 0;
10581 	if (nr_core_relo > INT_MAX / sizeof(struct bpf_core_relo))
10582 		return -EINVAL;
10583 
10584 	rec_size = attr->core_relo_rec_size;
10585 	if (rec_size < MIN_CORE_RELO_SIZE ||
10586 	    rec_size > MAX_CORE_RELO_SIZE ||
10587 	    rec_size % sizeof(u32))
10588 		return -EINVAL;
10589 
10590 	u_core_relo = make_bpfptr(attr->core_relos, uattr.is_kernel);
10591 	expected_size = sizeof(struct bpf_core_relo);
10592 	ncopy = min_t(u32, expected_size, rec_size);
10593 
10594 	/* Unlike func_info and line_info, copy and apply each CO-RE
10595 	 * relocation record one at a time.
10596 	 */
10597 	for (i = 0; i < nr_core_relo; i++) {
10598 		/* future proofing when sizeof(bpf_core_relo) changes */
10599 		err = bpf_check_uarg_tail_zero(u_core_relo, expected_size, rec_size);
10600 		if (err) {
10601 			if (err == -E2BIG) {
10602 				verbose(env, "nonzero tailing record in core_relo");
10603 				if (copy_to_bpfptr_offset(uattr,
10604 							  offsetof(union bpf_attr, core_relo_rec_size),
10605 							  &expected_size, sizeof(expected_size)))
10606 					err = -EFAULT;
10607 			}
10608 			break;
10609 		}
10610 
10611 		if (copy_from_bpfptr(&core_relo, u_core_relo, ncopy)) {
10612 			err = -EFAULT;
10613 			break;
10614 		}
10615 
10616 		if (core_relo.insn_off % 8 || core_relo.insn_off / 8 >= prog->len) {
10617 			verbose(env, "Invalid core_relo[%u].insn_off:%u prog->len:%u\n",
10618 				i, core_relo.insn_off, prog->len);
10619 			err = -EINVAL;
10620 			break;
10621 		}
10622 
10623 		err = bpf_core_apply(&ctx, &core_relo, i,
10624 				     &prog->insnsi[core_relo.insn_off / 8]);
10625 		if (err)
10626 			break;
10627 		bpfptr_add(&u_core_relo, rec_size);
10628 	}
10629 	return err;
10630 }
10631 
10632 static int check_btf_info(struct bpf_verifier_env *env,
10633 			  const union bpf_attr *attr,
10634 			  bpfptr_t uattr)
10635 {
10636 	struct btf *btf;
10637 	int err;
10638 
10639 	if (!attr->func_info_cnt && !attr->line_info_cnt) {
10640 		if (check_abnormal_return(env))
10641 			return -EINVAL;
10642 		return 0;
10643 	}
10644 
10645 	btf = btf_get_by_fd(attr->prog_btf_fd);
10646 	if (IS_ERR(btf))
10647 		return PTR_ERR(btf);
10648 	if (btf_is_kernel(btf)) {
10649 		btf_put(btf);
10650 		return -EACCES;
10651 	}
10652 	env->prog->aux->btf = btf;
10653 
10654 	err = check_btf_func(env, attr, uattr);
10655 	if (err)
10656 		return err;
10657 
10658 	err = check_btf_line(env, attr, uattr);
10659 	if (err)
10660 		return err;
10661 
10662 	err = check_core_relo(env, attr, uattr);
10663 	if (err)
10664 		return err;
10665 
10666 	return 0;
10667 }
10668 
10669 /* check %cur's range satisfies %old's */
10670 static bool range_within(struct bpf_reg_state *old,
10671 			 struct bpf_reg_state *cur)
10672 {
10673 	return old->umin_value <= cur->umin_value &&
10674 	       old->umax_value >= cur->umax_value &&
10675 	       old->smin_value <= cur->smin_value &&
10676 	       old->smax_value >= cur->smax_value &&
10677 	       old->u32_min_value <= cur->u32_min_value &&
10678 	       old->u32_max_value >= cur->u32_max_value &&
10679 	       old->s32_min_value <= cur->s32_min_value &&
10680 	       old->s32_max_value >= cur->s32_max_value;
10681 }
10682 
10683 /* If in the old state two registers had the same id, then they need to have
10684  * the same id in the new state as well.  But that id could be different from
10685  * the old state, so we need to track the mapping from old to new ids.
10686  * Once we have seen that, say, a reg with old id 5 had new id 9, any subsequent
10687  * regs with old id 5 must also have new id 9 for the new state to be safe.  But
10688  * regs with a different old id could still have new id 9, we don't care about
10689  * that.
10690  * So we look through our idmap to see if this old id has been seen before.  If
10691  * so, we require the new id to match; otherwise, we add the id pair to the map.
10692  */
10693 static bool check_ids(u32 old_id, u32 cur_id, struct bpf_id_pair *idmap)
10694 {
10695 	unsigned int i;
10696 
10697 	for (i = 0; i < BPF_ID_MAP_SIZE; i++) {
10698 		if (!idmap[i].old) {
10699 			/* Reached an empty slot; haven't seen this id before */
10700 			idmap[i].old = old_id;
10701 			idmap[i].cur = cur_id;
10702 			return true;
10703 		}
10704 		if (idmap[i].old == old_id)
10705 			return idmap[i].cur == cur_id;
10706 	}
10707 	/* We ran out of idmap slots, which should be impossible */
10708 	WARN_ON_ONCE(1);
10709 	return false;
10710 }
10711 
10712 static void clean_func_state(struct bpf_verifier_env *env,
10713 			     struct bpf_func_state *st)
10714 {
10715 	enum bpf_reg_liveness live;
10716 	int i, j;
10717 
10718 	for (i = 0; i < BPF_REG_FP; i++) {
10719 		live = st->regs[i].live;
10720 		/* liveness must not touch this register anymore */
10721 		st->regs[i].live |= REG_LIVE_DONE;
10722 		if (!(live & REG_LIVE_READ))
10723 			/* since the register is unused, clear its state
10724 			 * to make further comparison simpler
10725 			 */
10726 			__mark_reg_not_init(env, &st->regs[i]);
10727 	}
10728 
10729 	for (i = 0; i < st->allocated_stack / BPF_REG_SIZE; i++) {
10730 		live = st->stack[i].spilled_ptr.live;
10731 		/* liveness must not touch this stack slot anymore */
10732 		st->stack[i].spilled_ptr.live |= REG_LIVE_DONE;
10733 		if (!(live & REG_LIVE_READ)) {
10734 			__mark_reg_not_init(env, &st->stack[i].spilled_ptr);
10735 			for (j = 0; j < BPF_REG_SIZE; j++)
10736 				st->stack[i].slot_type[j] = STACK_INVALID;
10737 		}
10738 	}
10739 }
10740 
10741 static void clean_verifier_state(struct bpf_verifier_env *env,
10742 				 struct bpf_verifier_state *st)
10743 {
10744 	int i;
10745 
10746 	if (st->frame[0]->regs[0].live & REG_LIVE_DONE)
10747 		/* all regs in this state in all frames were already marked */
10748 		return;
10749 
10750 	for (i = 0; i <= st->curframe; i++)
10751 		clean_func_state(env, st->frame[i]);
10752 }
10753 
10754 /* the parentage chains form a tree.
10755  * the verifier states are added to state lists at given insn and
10756  * pushed into state stack for future exploration.
10757  * when the verifier reaches bpf_exit insn some of the verifer states
10758  * stored in the state lists have their final liveness state already,
10759  * but a lot of states will get revised from liveness point of view when
10760  * the verifier explores other branches.
10761  * Example:
10762  * 1: r0 = 1
10763  * 2: if r1 == 100 goto pc+1
10764  * 3: r0 = 2
10765  * 4: exit
10766  * when the verifier reaches exit insn the register r0 in the state list of
10767  * insn 2 will be seen as !REG_LIVE_READ. Then the verifier pops the other_branch
10768  * of insn 2 and goes exploring further. At the insn 4 it will walk the
10769  * parentage chain from insn 4 into insn 2 and will mark r0 as REG_LIVE_READ.
10770  *
10771  * Since the verifier pushes the branch states as it sees them while exploring
10772  * the program the condition of walking the branch instruction for the second
10773  * time means that all states below this branch were already explored and
10774  * their final liveness marks are already propagated.
10775  * Hence when the verifier completes the search of state list in is_state_visited()
10776  * we can call this clean_live_states() function to mark all liveness states
10777  * as REG_LIVE_DONE to indicate that 'parent' pointers of 'struct bpf_reg_state'
10778  * will not be used.
10779  * This function also clears the registers and stack for states that !READ
10780  * to simplify state merging.
10781  *
10782  * Important note here that walking the same branch instruction in the callee
10783  * doesn't meant that the states are DONE. The verifier has to compare
10784  * the callsites
10785  */
10786 static void clean_live_states(struct bpf_verifier_env *env, int insn,
10787 			      struct bpf_verifier_state *cur)
10788 {
10789 	struct bpf_verifier_state_list *sl;
10790 	int i;
10791 
10792 	sl = *explored_state(env, insn);
10793 	while (sl) {
10794 		if (sl->state.branches)
10795 			goto next;
10796 		if (sl->state.insn_idx != insn ||
10797 		    sl->state.curframe != cur->curframe)
10798 			goto next;
10799 		for (i = 0; i <= cur->curframe; i++)
10800 			if (sl->state.frame[i]->callsite != cur->frame[i]->callsite)
10801 				goto next;
10802 		clean_verifier_state(env, &sl->state);
10803 next:
10804 		sl = sl->next;
10805 	}
10806 }
10807 
10808 /* Returns true if (rold safe implies rcur safe) */
10809 static bool regsafe(struct bpf_verifier_env *env, struct bpf_reg_state *rold,
10810 		    struct bpf_reg_state *rcur, struct bpf_id_pair *idmap)
10811 {
10812 	bool equal;
10813 
10814 	if (!(rold->live & REG_LIVE_READ))
10815 		/* explored state didn't use this */
10816 		return true;
10817 
10818 	equal = memcmp(rold, rcur, offsetof(struct bpf_reg_state, parent)) == 0;
10819 
10820 	if (rold->type == PTR_TO_STACK)
10821 		/* two stack pointers are equal only if they're pointing to
10822 		 * the same stack frame, since fp-8 in foo != fp-8 in bar
10823 		 */
10824 		return equal && rold->frameno == rcur->frameno;
10825 
10826 	if (equal)
10827 		return true;
10828 
10829 	if (rold->type == NOT_INIT)
10830 		/* explored state can't have used this */
10831 		return true;
10832 	if (rcur->type == NOT_INIT)
10833 		return false;
10834 	switch (base_type(rold->type)) {
10835 	case SCALAR_VALUE:
10836 		if (env->explore_alu_limits)
10837 			return false;
10838 		if (rcur->type == SCALAR_VALUE) {
10839 			if (!rold->precise && !rcur->precise)
10840 				return true;
10841 			/* new val must satisfy old val knowledge */
10842 			return range_within(rold, rcur) &&
10843 			       tnum_in(rold->var_off, rcur->var_off);
10844 		} else {
10845 			/* We're trying to use a pointer in place of a scalar.
10846 			 * Even if the scalar was unbounded, this could lead to
10847 			 * pointer leaks because scalars are allowed to leak
10848 			 * while pointers are not. We could make this safe in
10849 			 * special cases if root is calling us, but it's
10850 			 * probably not worth the hassle.
10851 			 */
10852 			return false;
10853 		}
10854 	case PTR_TO_MAP_KEY:
10855 	case PTR_TO_MAP_VALUE:
10856 		/* a PTR_TO_MAP_VALUE could be safe to use as a
10857 		 * PTR_TO_MAP_VALUE_OR_NULL into the same map.
10858 		 * However, if the old PTR_TO_MAP_VALUE_OR_NULL then got NULL-
10859 		 * checked, doing so could have affected others with the same
10860 		 * id, and we can't check for that because we lost the id when
10861 		 * we converted to a PTR_TO_MAP_VALUE.
10862 		 */
10863 		if (type_may_be_null(rold->type)) {
10864 			if (!type_may_be_null(rcur->type))
10865 				return false;
10866 			if (memcmp(rold, rcur, offsetof(struct bpf_reg_state, id)))
10867 				return false;
10868 			/* Check our ids match any regs they're supposed to */
10869 			return check_ids(rold->id, rcur->id, idmap);
10870 		}
10871 
10872 		/* If the new min/max/var_off satisfy the old ones and
10873 		 * everything else matches, we are OK.
10874 		 * 'id' is not compared, since it's only used for maps with
10875 		 * bpf_spin_lock inside map element and in such cases if
10876 		 * the rest of the prog is valid for one map element then
10877 		 * it's valid for all map elements regardless of the key
10878 		 * used in bpf_map_lookup()
10879 		 */
10880 		return memcmp(rold, rcur, offsetof(struct bpf_reg_state, id)) == 0 &&
10881 		       range_within(rold, rcur) &&
10882 		       tnum_in(rold->var_off, rcur->var_off);
10883 	case PTR_TO_PACKET_META:
10884 	case PTR_TO_PACKET:
10885 		if (rcur->type != rold->type)
10886 			return false;
10887 		/* We must have at least as much range as the old ptr
10888 		 * did, so that any accesses which were safe before are
10889 		 * still safe.  This is true even if old range < old off,
10890 		 * since someone could have accessed through (ptr - k), or
10891 		 * even done ptr -= k in a register, to get a safe access.
10892 		 */
10893 		if (rold->range > rcur->range)
10894 			return false;
10895 		/* If the offsets don't match, we can't trust our alignment;
10896 		 * nor can we be sure that we won't fall out of range.
10897 		 */
10898 		if (rold->off != rcur->off)
10899 			return false;
10900 		/* id relations must be preserved */
10901 		if (rold->id && !check_ids(rold->id, rcur->id, idmap))
10902 			return false;
10903 		/* new val must satisfy old val knowledge */
10904 		return range_within(rold, rcur) &&
10905 		       tnum_in(rold->var_off, rcur->var_off);
10906 	case PTR_TO_CTX:
10907 	case CONST_PTR_TO_MAP:
10908 	case PTR_TO_PACKET_END:
10909 	case PTR_TO_FLOW_KEYS:
10910 	case PTR_TO_SOCKET:
10911 	case PTR_TO_SOCK_COMMON:
10912 	case PTR_TO_TCP_SOCK:
10913 	case PTR_TO_XDP_SOCK:
10914 		/* Only valid matches are exact, which memcmp() above
10915 		 * would have accepted
10916 		 */
10917 	default:
10918 		/* Don't know what's going on, just say it's not safe */
10919 		return false;
10920 	}
10921 
10922 	/* Shouldn't get here; if we do, say it's not safe */
10923 	WARN_ON_ONCE(1);
10924 	return false;
10925 }
10926 
10927 static bool stacksafe(struct bpf_verifier_env *env, struct bpf_func_state *old,
10928 		      struct bpf_func_state *cur, struct bpf_id_pair *idmap)
10929 {
10930 	int i, spi;
10931 
10932 	/* walk slots of the explored stack and ignore any additional
10933 	 * slots in the current stack, since explored(safe) state
10934 	 * didn't use them
10935 	 */
10936 	for (i = 0; i < old->allocated_stack; i++) {
10937 		spi = i / BPF_REG_SIZE;
10938 
10939 		if (!(old->stack[spi].spilled_ptr.live & REG_LIVE_READ)) {
10940 			i += BPF_REG_SIZE - 1;
10941 			/* explored state didn't use this */
10942 			continue;
10943 		}
10944 
10945 		if (old->stack[spi].slot_type[i % BPF_REG_SIZE] == STACK_INVALID)
10946 			continue;
10947 
10948 		/* explored stack has more populated slots than current stack
10949 		 * and these slots were used
10950 		 */
10951 		if (i >= cur->allocated_stack)
10952 			return false;
10953 
10954 		/* if old state was safe with misc data in the stack
10955 		 * it will be safe with zero-initialized stack.
10956 		 * The opposite is not true
10957 		 */
10958 		if (old->stack[spi].slot_type[i % BPF_REG_SIZE] == STACK_MISC &&
10959 		    cur->stack[spi].slot_type[i % BPF_REG_SIZE] == STACK_ZERO)
10960 			continue;
10961 		if (old->stack[spi].slot_type[i % BPF_REG_SIZE] !=
10962 		    cur->stack[spi].slot_type[i % BPF_REG_SIZE])
10963 			/* Ex: old explored (safe) state has STACK_SPILL in
10964 			 * this stack slot, but current has STACK_MISC ->
10965 			 * this verifier states are not equivalent,
10966 			 * return false to continue verification of this path
10967 			 */
10968 			return false;
10969 		if (i % BPF_REG_SIZE != BPF_REG_SIZE - 1)
10970 			continue;
10971 		if (!is_spilled_reg(&old->stack[spi]))
10972 			continue;
10973 		if (!regsafe(env, &old->stack[spi].spilled_ptr,
10974 			     &cur->stack[spi].spilled_ptr, idmap))
10975 			/* when explored and current stack slot are both storing
10976 			 * spilled registers, check that stored pointers types
10977 			 * are the same as well.
10978 			 * Ex: explored safe path could have stored
10979 			 * (bpf_reg_state) {.type = PTR_TO_STACK, .off = -8}
10980 			 * but current path has stored:
10981 			 * (bpf_reg_state) {.type = PTR_TO_STACK, .off = -16}
10982 			 * such verifier states are not equivalent.
10983 			 * return false to continue verification of this path
10984 			 */
10985 			return false;
10986 	}
10987 	return true;
10988 }
10989 
10990 static bool refsafe(struct bpf_func_state *old, struct bpf_func_state *cur)
10991 {
10992 	if (old->acquired_refs != cur->acquired_refs)
10993 		return false;
10994 	return !memcmp(old->refs, cur->refs,
10995 		       sizeof(*old->refs) * old->acquired_refs);
10996 }
10997 
10998 /* compare two verifier states
10999  *
11000  * all states stored in state_list are known to be valid, since
11001  * verifier reached 'bpf_exit' instruction through them
11002  *
11003  * this function is called when verifier exploring different branches of
11004  * execution popped from the state stack. If it sees an old state that has
11005  * more strict register state and more strict stack state then this execution
11006  * branch doesn't need to be explored further, since verifier already
11007  * concluded that more strict state leads to valid finish.
11008  *
11009  * Therefore two states are equivalent if register state is more conservative
11010  * and explored stack state is more conservative than the current one.
11011  * Example:
11012  *       explored                   current
11013  * (slot1=INV slot2=MISC) == (slot1=MISC slot2=MISC)
11014  * (slot1=MISC slot2=MISC) != (slot1=INV slot2=MISC)
11015  *
11016  * In other words if current stack state (one being explored) has more
11017  * valid slots than old one that already passed validation, it means
11018  * the verifier can stop exploring and conclude that current state is valid too
11019  *
11020  * Similarly with registers. If explored state has register type as invalid
11021  * whereas register type in current state is meaningful, it means that
11022  * the current state will reach 'bpf_exit' instruction safely
11023  */
11024 static bool func_states_equal(struct bpf_verifier_env *env, struct bpf_func_state *old,
11025 			      struct bpf_func_state *cur)
11026 {
11027 	int i;
11028 
11029 	memset(env->idmap_scratch, 0, sizeof(env->idmap_scratch));
11030 	for (i = 0; i < MAX_BPF_REG; i++)
11031 		if (!regsafe(env, &old->regs[i], &cur->regs[i],
11032 			     env->idmap_scratch))
11033 			return false;
11034 
11035 	if (!stacksafe(env, old, cur, env->idmap_scratch))
11036 		return false;
11037 
11038 	if (!refsafe(old, cur))
11039 		return false;
11040 
11041 	return true;
11042 }
11043 
11044 static bool states_equal(struct bpf_verifier_env *env,
11045 			 struct bpf_verifier_state *old,
11046 			 struct bpf_verifier_state *cur)
11047 {
11048 	int i;
11049 
11050 	if (old->curframe != cur->curframe)
11051 		return false;
11052 
11053 	/* Verification state from speculative execution simulation
11054 	 * must never prune a non-speculative execution one.
11055 	 */
11056 	if (old->speculative && !cur->speculative)
11057 		return false;
11058 
11059 	if (old->active_spin_lock != cur->active_spin_lock)
11060 		return false;
11061 
11062 	/* for states to be equal callsites have to be the same
11063 	 * and all frame states need to be equivalent
11064 	 */
11065 	for (i = 0; i <= old->curframe; i++) {
11066 		if (old->frame[i]->callsite != cur->frame[i]->callsite)
11067 			return false;
11068 		if (!func_states_equal(env, old->frame[i], cur->frame[i]))
11069 			return false;
11070 	}
11071 	return true;
11072 }
11073 
11074 /* Return 0 if no propagation happened. Return negative error code if error
11075  * happened. Otherwise, return the propagated bit.
11076  */
11077 static int propagate_liveness_reg(struct bpf_verifier_env *env,
11078 				  struct bpf_reg_state *reg,
11079 				  struct bpf_reg_state *parent_reg)
11080 {
11081 	u8 parent_flag = parent_reg->live & REG_LIVE_READ;
11082 	u8 flag = reg->live & REG_LIVE_READ;
11083 	int err;
11084 
11085 	/* When comes here, read flags of PARENT_REG or REG could be any of
11086 	 * REG_LIVE_READ64, REG_LIVE_READ32, REG_LIVE_NONE. There is no need
11087 	 * of propagation if PARENT_REG has strongest REG_LIVE_READ64.
11088 	 */
11089 	if (parent_flag == REG_LIVE_READ64 ||
11090 	    /* Or if there is no read flag from REG. */
11091 	    !flag ||
11092 	    /* Or if the read flag from REG is the same as PARENT_REG. */
11093 	    parent_flag == flag)
11094 		return 0;
11095 
11096 	err = mark_reg_read(env, reg, parent_reg, flag);
11097 	if (err)
11098 		return err;
11099 
11100 	return flag;
11101 }
11102 
11103 /* A write screens off any subsequent reads; but write marks come from the
11104  * straight-line code between a state and its parent.  When we arrive at an
11105  * equivalent state (jump target or such) we didn't arrive by the straight-line
11106  * code, so read marks in the state must propagate to the parent regardless
11107  * of the state's write marks. That's what 'parent == state->parent' comparison
11108  * in mark_reg_read() is for.
11109  */
11110 static int propagate_liveness(struct bpf_verifier_env *env,
11111 			      const struct bpf_verifier_state *vstate,
11112 			      struct bpf_verifier_state *vparent)
11113 {
11114 	struct bpf_reg_state *state_reg, *parent_reg;
11115 	struct bpf_func_state *state, *parent;
11116 	int i, frame, err = 0;
11117 
11118 	if (vparent->curframe != vstate->curframe) {
11119 		WARN(1, "propagate_live: parent frame %d current frame %d\n",
11120 		     vparent->curframe, vstate->curframe);
11121 		return -EFAULT;
11122 	}
11123 	/* Propagate read liveness of registers... */
11124 	BUILD_BUG_ON(BPF_REG_FP + 1 != MAX_BPF_REG);
11125 	for (frame = 0; frame <= vstate->curframe; frame++) {
11126 		parent = vparent->frame[frame];
11127 		state = vstate->frame[frame];
11128 		parent_reg = parent->regs;
11129 		state_reg = state->regs;
11130 		/* We don't need to worry about FP liveness, it's read-only */
11131 		for (i = frame < vstate->curframe ? BPF_REG_6 : 0; i < BPF_REG_FP; i++) {
11132 			err = propagate_liveness_reg(env, &state_reg[i],
11133 						     &parent_reg[i]);
11134 			if (err < 0)
11135 				return err;
11136 			if (err == REG_LIVE_READ64)
11137 				mark_insn_zext(env, &parent_reg[i]);
11138 		}
11139 
11140 		/* Propagate stack slots. */
11141 		for (i = 0; i < state->allocated_stack / BPF_REG_SIZE &&
11142 			    i < parent->allocated_stack / BPF_REG_SIZE; i++) {
11143 			parent_reg = &parent->stack[i].spilled_ptr;
11144 			state_reg = &state->stack[i].spilled_ptr;
11145 			err = propagate_liveness_reg(env, state_reg,
11146 						     parent_reg);
11147 			if (err < 0)
11148 				return err;
11149 		}
11150 	}
11151 	return 0;
11152 }
11153 
11154 /* find precise scalars in the previous equivalent state and
11155  * propagate them into the current state
11156  */
11157 static int propagate_precision(struct bpf_verifier_env *env,
11158 			       const struct bpf_verifier_state *old)
11159 {
11160 	struct bpf_reg_state *state_reg;
11161 	struct bpf_func_state *state;
11162 	int i, err = 0;
11163 
11164 	state = old->frame[old->curframe];
11165 	state_reg = state->regs;
11166 	for (i = 0; i < BPF_REG_FP; i++, state_reg++) {
11167 		if (state_reg->type != SCALAR_VALUE ||
11168 		    !state_reg->precise)
11169 			continue;
11170 		if (env->log.level & BPF_LOG_LEVEL2)
11171 			verbose(env, "propagating r%d\n", i);
11172 		err = mark_chain_precision(env, i);
11173 		if (err < 0)
11174 			return err;
11175 	}
11176 
11177 	for (i = 0; i < state->allocated_stack / BPF_REG_SIZE; i++) {
11178 		if (!is_spilled_reg(&state->stack[i]))
11179 			continue;
11180 		state_reg = &state->stack[i].spilled_ptr;
11181 		if (state_reg->type != SCALAR_VALUE ||
11182 		    !state_reg->precise)
11183 			continue;
11184 		if (env->log.level & BPF_LOG_LEVEL2)
11185 			verbose(env, "propagating fp%d\n",
11186 				(-i - 1) * BPF_REG_SIZE);
11187 		err = mark_chain_precision_stack(env, i);
11188 		if (err < 0)
11189 			return err;
11190 	}
11191 	return 0;
11192 }
11193 
11194 static bool states_maybe_looping(struct bpf_verifier_state *old,
11195 				 struct bpf_verifier_state *cur)
11196 {
11197 	struct bpf_func_state *fold, *fcur;
11198 	int i, fr = cur->curframe;
11199 
11200 	if (old->curframe != fr)
11201 		return false;
11202 
11203 	fold = old->frame[fr];
11204 	fcur = cur->frame[fr];
11205 	for (i = 0; i < MAX_BPF_REG; i++)
11206 		if (memcmp(&fold->regs[i], &fcur->regs[i],
11207 			   offsetof(struct bpf_reg_state, parent)))
11208 			return false;
11209 	return true;
11210 }
11211 
11212 
11213 static int is_state_visited(struct bpf_verifier_env *env, int insn_idx)
11214 {
11215 	struct bpf_verifier_state_list *new_sl;
11216 	struct bpf_verifier_state_list *sl, **pprev;
11217 	struct bpf_verifier_state *cur = env->cur_state, *new;
11218 	int i, j, err, states_cnt = 0;
11219 	bool add_new_state = env->test_state_freq ? true : false;
11220 
11221 	cur->last_insn_idx = env->prev_insn_idx;
11222 	if (!env->insn_aux_data[insn_idx].prune_point)
11223 		/* this 'insn_idx' instruction wasn't marked, so we will not
11224 		 * be doing state search here
11225 		 */
11226 		return 0;
11227 
11228 	/* bpf progs typically have pruning point every 4 instructions
11229 	 * http://vger.kernel.org/bpfconf2019.html#session-1
11230 	 * Do not add new state for future pruning if the verifier hasn't seen
11231 	 * at least 2 jumps and at least 8 instructions.
11232 	 * This heuristics helps decrease 'total_states' and 'peak_states' metric.
11233 	 * In tests that amounts to up to 50% reduction into total verifier
11234 	 * memory consumption and 20% verifier time speedup.
11235 	 */
11236 	if (env->jmps_processed - env->prev_jmps_processed >= 2 &&
11237 	    env->insn_processed - env->prev_insn_processed >= 8)
11238 		add_new_state = true;
11239 
11240 	pprev = explored_state(env, insn_idx);
11241 	sl = *pprev;
11242 
11243 	clean_live_states(env, insn_idx, cur);
11244 
11245 	while (sl) {
11246 		states_cnt++;
11247 		if (sl->state.insn_idx != insn_idx)
11248 			goto next;
11249 
11250 		if (sl->state.branches) {
11251 			struct bpf_func_state *frame = sl->state.frame[sl->state.curframe];
11252 
11253 			if (frame->in_async_callback_fn &&
11254 			    frame->async_entry_cnt != cur->frame[cur->curframe]->async_entry_cnt) {
11255 				/* Different async_entry_cnt means that the verifier is
11256 				 * processing another entry into async callback.
11257 				 * Seeing the same state is not an indication of infinite
11258 				 * loop or infinite recursion.
11259 				 * But finding the same state doesn't mean that it's safe
11260 				 * to stop processing the current state. The previous state
11261 				 * hasn't yet reached bpf_exit, since state.branches > 0.
11262 				 * Checking in_async_callback_fn alone is not enough either.
11263 				 * Since the verifier still needs to catch infinite loops
11264 				 * inside async callbacks.
11265 				 */
11266 			} else if (states_maybe_looping(&sl->state, cur) &&
11267 				   states_equal(env, &sl->state, cur)) {
11268 				verbose_linfo(env, insn_idx, "; ");
11269 				verbose(env, "infinite loop detected at insn %d\n", insn_idx);
11270 				return -EINVAL;
11271 			}
11272 			/* if the verifier is processing a loop, avoid adding new state
11273 			 * too often, since different loop iterations have distinct
11274 			 * states and may not help future pruning.
11275 			 * This threshold shouldn't be too low to make sure that
11276 			 * a loop with large bound will be rejected quickly.
11277 			 * The most abusive loop will be:
11278 			 * r1 += 1
11279 			 * if r1 < 1000000 goto pc-2
11280 			 * 1M insn_procssed limit / 100 == 10k peak states.
11281 			 * This threshold shouldn't be too high either, since states
11282 			 * at the end of the loop are likely to be useful in pruning.
11283 			 */
11284 			if (env->jmps_processed - env->prev_jmps_processed < 20 &&
11285 			    env->insn_processed - env->prev_insn_processed < 100)
11286 				add_new_state = false;
11287 			goto miss;
11288 		}
11289 		if (states_equal(env, &sl->state, cur)) {
11290 			sl->hit_cnt++;
11291 			/* reached equivalent register/stack state,
11292 			 * prune the search.
11293 			 * Registers read by the continuation are read by us.
11294 			 * If we have any write marks in env->cur_state, they
11295 			 * will prevent corresponding reads in the continuation
11296 			 * from reaching our parent (an explored_state).  Our
11297 			 * own state will get the read marks recorded, but
11298 			 * they'll be immediately forgotten as we're pruning
11299 			 * this state and will pop a new one.
11300 			 */
11301 			err = propagate_liveness(env, &sl->state, cur);
11302 
11303 			/* if previous state reached the exit with precision and
11304 			 * current state is equivalent to it (except precsion marks)
11305 			 * the precision needs to be propagated back in
11306 			 * the current state.
11307 			 */
11308 			err = err ? : push_jmp_history(env, cur);
11309 			err = err ? : propagate_precision(env, &sl->state);
11310 			if (err)
11311 				return err;
11312 			return 1;
11313 		}
11314 miss:
11315 		/* when new state is not going to be added do not increase miss count.
11316 		 * Otherwise several loop iterations will remove the state
11317 		 * recorded earlier. The goal of these heuristics is to have
11318 		 * states from some iterations of the loop (some in the beginning
11319 		 * and some at the end) to help pruning.
11320 		 */
11321 		if (add_new_state)
11322 			sl->miss_cnt++;
11323 		/* heuristic to determine whether this state is beneficial
11324 		 * to keep checking from state equivalence point of view.
11325 		 * Higher numbers increase max_states_per_insn and verification time,
11326 		 * but do not meaningfully decrease insn_processed.
11327 		 */
11328 		if (sl->miss_cnt > sl->hit_cnt * 3 + 3) {
11329 			/* the state is unlikely to be useful. Remove it to
11330 			 * speed up verification
11331 			 */
11332 			*pprev = sl->next;
11333 			if (sl->state.frame[0]->regs[0].live & REG_LIVE_DONE) {
11334 				u32 br = sl->state.branches;
11335 
11336 				WARN_ONCE(br,
11337 					  "BUG live_done but branches_to_explore %d\n",
11338 					  br);
11339 				free_verifier_state(&sl->state, false);
11340 				kfree(sl);
11341 				env->peak_states--;
11342 			} else {
11343 				/* cannot free this state, since parentage chain may
11344 				 * walk it later. Add it for free_list instead to
11345 				 * be freed at the end of verification
11346 				 */
11347 				sl->next = env->free_list;
11348 				env->free_list = sl;
11349 			}
11350 			sl = *pprev;
11351 			continue;
11352 		}
11353 next:
11354 		pprev = &sl->next;
11355 		sl = *pprev;
11356 	}
11357 
11358 	if (env->max_states_per_insn < states_cnt)
11359 		env->max_states_per_insn = states_cnt;
11360 
11361 	if (!env->bpf_capable && states_cnt > BPF_COMPLEXITY_LIMIT_STATES)
11362 		return push_jmp_history(env, cur);
11363 
11364 	if (!add_new_state)
11365 		return push_jmp_history(env, cur);
11366 
11367 	/* There were no equivalent states, remember the current one.
11368 	 * Technically the current state is not proven to be safe yet,
11369 	 * but it will either reach outer most bpf_exit (which means it's safe)
11370 	 * or it will be rejected. When there are no loops the verifier won't be
11371 	 * seeing this tuple (frame[0].callsite, frame[1].callsite, .. insn_idx)
11372 	 * again on the way to bpf_exit.
11373 	 * When looping the sl->state.branches will be > 0 and this state
11374 	 * will not be considered for equivalence until branches == 0.
11375 	 */
11376 	new_sl = kzalloc(sizeof(struct bpf_verifier_state_list), GFP_KERNEL);
11377 	if (!new_sl)
11378 		return -ENOMEM;
11379 	env->total_states++;
11380 	env->peak_states++;
11381 	env->prev_jmps_processed = env->jmps_processed;
11382 	env->prev_insn_processed = env->insn_processed;
11383 
11384 	/* add new state to the head of linked list */
11385 	new = &new_sl->state;
11386 	err = copy_verifier_state(new, cur);
11387 	if (err) {
11388 		free_verifier_state(new, false);
11389 		kfree(new_sl);
11390 		return err;
11391 	}
11392 	new->insn_idx = insn_idx;
11393 	WARN_ONCE(new->branches != 1,
11394 		  "BUG is_state_visited:branches_to_explore=%d insn %d\n", new->branches, insn_idx);
11395 
11396 	cur->parent = new;
11397 	cur->first_insn_idx = insn_idx;
11398 	clear_jmp_history(cur);
11399 	new_sl->next = *explored_state(env, insn_idx);
11400 	*explored_state(env, insn_idx) = new_sl;
11401 	/* connect new state to parentage chain. Current frame needs all
11402 	 * registers connected. Only r6 - r9 of the callers are alive (pushed
11403 	 * to the stack implicitly by JITs) so in callers' frames connect just
11404 	 * r6 - r9 as an optimization. Callers will have r1 - r5 connected to
11405 	 * the state of the call instruction (with WRITTEN set), and r0 comes
11406 	 * from callee with its full parentage chain, anyway.
11407 	 */
11408 	/* clear write marks in current state: the writes we did are not writes
11409 	 * our child did, so they don't screen off its reads from us.
11410 	 * (There are no read marks in current state, because reads always mark
11411 	 * their parent and current state never has children yet.  Only
11412 	 * explored_states can get read marks.)
11413 	 */
11414 	for (j = 0; j <= cur->curframe; j++) {
11415 		for (i = j < cur->curframe ? BPF_REG_6 : 0; i < BPF_REG_FP; i++)
11416 			cur->frame[j]->regs[i].parent = &new->frame[j]->regs[i];
11417 		for (i = 0; i < BPF_REG_FP; i++)
11418 			cur->frame[j]->regs[i].live = REG_LIVE_NONE;
11419 	}
11420 
11421 	/* all stack frames are accessible from callee, clear them all */
11422 	for (j = 0; j <= cur->curframe; j++) {
11423 		struct bpf_func_state *frame = cur->frame[j];
11424 		struct bpf_func_state *newframe = new->frame[j];
11425 
11426 		for (i = 0; i < frame->allocated_stack / BPF_REG_SIZE; i++) {
11427 			frame->stack[i].spilled_ptr.live = REG_LIVE_NONE;
11428 			frame->stack[i].spilled_ptr.parent =
11429 						&newframe->stack[i].spilled_ptr;
11430 		}
11431 	}
11432 	return 0;
11433 }
11434 
11435 /* Return true if it's OK to have the same insn return a different type. */
11436 static bool reg_type_mismatch_ok(enum bpf_reg_type type)
11437 {
11438 	switch (base_type(type)) {
11439 	case PTR_TO_CTX:
11440 	case PTR_TO_SOCKET:
11441 	case PTR_TO_SOCK_COMMON:
11442 	case PTR_TO_TCP_SOCK:
11443 	case PTR_TO_XDP_SOCK:
11444 	case PTR_TO_BTF_ID:
11445 		return false;
11446 	default:
11447 		return true;
11448 	}
11449 }
11450 
11451 /* If an instruction was previously used with particular pointer types, then we
11452  * need to be careful to avoid cases such as the below, where it may be ok
11453  * for one branch accessing the pointer, but not ok for the other branch:
11454  *
11455  * R1 = sock_ptr
11456  * goto X;
11457  * ...
11458  * R1 = some_other_valid_ptr;
11459  * goto X;
11460  * ...
11461  * R2 = *(u32 *)(R1 + 0);
11462  */
11463 static bool reg_type_mismatch(enum bpf_reg_type src, enum bpf_reg_type prev)
11464 {
11465 	return src != prev && (!reg_type_mismatch_ok(src) ||
11466 			       !reg_type_mismatch_ok(prev));
11467 }
11468 
11469 static int do_check(struct bpf_verifier_env *env)
11470 {
11471 	bool pop_log = !(env->log.level & BPF_LOG_LEVEL2);
11472 	struct bpf_verifier_state *state = env->cur_state;
11473 	struct bpf_insn *insns = env->prog->insnsi;
11474 	struct bpf_reg_state *regs;
11475 	int insn_cnt = env->prog->len;
11476 	bool do_print_state = false;
11477 	int prev_insn_idx = -1;
11478 
11479 	for (;;) {
11480 		struct bpf_insn *insn;
11481 		u8 class;
11482 		int err;
11483 
11484 		env->prev_insn_idx = prev_insn_idx;
11485 		if (env->insn_idx >= insn_cnt) {
11486 			verbose(env, "invalid insn idx %d insn_cnt %d\n",
11487 				env->insn_idx, insn_cnt);
11488 			return -EFAULT;
11489 		}
11490 
11491 		insn = &insns[env->insn_idx];
11492 		class = BPF_CLASS(insn->code);
11493 
11494 		if (++env->insn_processed > BPF_COMPLEXITY_LIMIT_INSNS) {
11495 			verbose(env,
11496 				"BPF program is too large. Processed %d insn\n",
11497 				env->insn_processed);
11498 			return -E2BIG;
11499 		}
11500 
11501 		err = is_state_visited(env, env->insn_idx);
11502 		if (err < 0)
11503 			return err;
11504 		if (err == 1) {
11505 			/* found equivalent state, can prune the search */
11506 			if (env->log.level & BPF_LOG_LEVEL) {
11507 				if (do_print_state)
11508 					verbose(env, "\nfrom %d to %d%s: safe\n",
11509 						env->prev_insn_idx, env->insn_idx,
11510 						env->cur_state->speculative ?
11511 						" (speculative execution)" : "");
11512 				else
11513 					verbose(env, "%d: safe\n", env->insn_idx);
11514 			}
11515 			goto process_bpf_exit;
11516 		}
11517 
11518 		if (signal_pending(current))
11519 			return -EAGAIN;
11520 
11521 		if (need_resched())
11522 			cond_resched();
11523 
11524 		if (env->log.level & BPF_LOG_LEVEL2 && do_print_state) {
11525 			verbose(env, "\nfrom %d to %d%s:",
11526 				env->prev_insn_idx, env->insn_idx,
11527 				env->cur_state->speculative ?
11528 				" (speculative execution)" : "");
11529 			print_verifier_state(env, state->frame[state->curframe], true);
11530 			do_print_state = false;
11531 		}
11532 
11533 		if (env->log.level & BPF_LOG_LEVEL) {
11534 			const struct bpf_insn_cbs cbs = {
11535 				.cb_call	= disasm_kfunc_name,
11536 				.cb_print	= verbose,
11537 				.private_data	= env,
11538 			};
11539 
11540 			if (verifier_state_scratched(env))
11541 				print_insn_state(env, state->frame[state->curframe]);
11542 
11543 			verbose_linfo(env, env->insn_idx, "; ");
11544 			env->prev_log_len = env->log.len_used;
11545 			verbose(env, "%d: ", env->insn_idx);
11546 			print_bpf_insn(&cbs, insn, env->allow_ptr_leaks);
11547 			env->prev_insn_print_len = env->log.len_used - env->prev_log_len;
11548 			env->prev_log_len = env->log.len_used;
11549 		}
11550 
11551 		if (bpf_prog_is_dev_bound(env->prog->aux)) {
11552 			err = bpf_prog_offload_verify_insn(env, env->insn_idx,
11553 							   env->prev_insn_idx);
11554 			if (err)
11555 				return err;
11556 		}
11557 
11558 		regs = cur_regs(env);
11559 		sanitize_mark_insn_seen(env);
11560 		prev_insn_idx = env->insn_idx;
11561 
11562 		if (class == BPF_ALU || class == BPF_ALU64) {
11563 			err = check_alu_op(env, insn);
11564 			if (err)
11565 				return err;
11566 
11567 		} else if (class == BPF_LDX) {
11568 			enum bpf_reg_type *prev_src_type, src_reg_type;
11569 
11570 			/* check for reserved fields is already done */
11571 
11572 			/* check src operand */
11573 			err = check_reg_arg(env, insn->src_reg, SRC_OP);
11574 			if (err)
11575 				return err;
11576 
11577 			err = check_reg_arg(env, insn->dst_reg, DST_OP_NO_MARK);
11578 			if (err)
11579 				return err;
11580 
11581 			src_reg_type = regs[insn->src_reg].type;
11582 
11583 			/* check that memory (src_reg + off) is readable,
11584 			 * the state of dst_reg will be updated by this func
11585 			 */
11586 			err = check_mem_access(env, env->insn_idx, insn->src_reg,
11587 					       insn->off, BPF_SIZE(insn->code),
11588 					       BPF_READ, insn->dst_reg, false);
11589 			if (err)
11590 				return err;
11591 
11592 			prev_src_type = &env->insn_aux_data[env->insn_idx].ptr_type;
11593 
11594 			if (*prev_src_type == NOT_INIT) {
11595 				/* saw a valid insn
11596 				 * dst_reg = *(u32 *)(src_reg + off)
11597 				 * save type to validate intersecting paths
11598 				 */
11599 				*prev_src_type = src_reg_type;
11600 
11601 			} else if (reg_type_mismatch(src_reg_type, *prev_src_type)) {
11602 				/* ABuser program is trying to use the same insn
11603 				 * dst_reg = *(u32*) (src_reg + off)
11604 				 * with different pointer types:
11605 				 * src_reg == ctx in one branch and
11606 				 * src_reg == stack|map in some other branch.
11607 				 * Reject it.
11608 				 */
11609 				verbose(env, "same insn cannot be used with different pointers\n");
11610 				return -EINVAL;
11611 			}
11612 
11613 		} else if (class == BPF_STX) {
11614 			enum bpf_reg_type *prev_dst_type, dst_reg_type;
11615 
11616 			if (BPF_MODE(insn->code) == BPF_ATOMIC) {
11617 				err = check_atomic(env, env->insn_idx, insn);
11618 				if (err)
11619 					return err;
11620 				env->insn_idx++;
11621 				continue;
11622 			}
11623 
11624 			if (BPF_MODE(insn->code) != BPF_MEM || insn->imm != 0) {
11625 				verbose(env, "BPF_STX uses reserved fields\n");
11626 				return -EINVAL;
11627 			}
11628 
11629 			/* check src1 operand */
11630 			err = check_reg_arg(env, insn->src_reg, SRC_OP);
11631 			if (err)
11632 				return err;
11633 			/* check src2 operand */
11634 			err = check_reg_arg(env, insn->dst_reg, SRC_OP);
11635 			if (err)
11636 				return err;
11637 
11638 			dst_reg_type = regs[insn->dst_reg].type;
11639 
11640 			/* check that memory (dst_reg + off) is writeable */
11641 			err = check_mem_access(env, env->insn_idx, insn->dst_reg,
11642 					       insn->off, BPF_SIZE(insn->code),
11643 					       BPF_WRITE, insn->src_reg, false);
11644 			if (err)
11645 				return err;
11646 
11647 			prev_dst_type = &env->insn_aux_data[env->insn_idx].ptr_type;
11648 
11649 			if (*prev_dst_type == NOT_INIT) {
11650 				*prev_dst_type = dst_reg_type;
11651 			} else if (reg_type_mismatch(dst_reg_type, *prev_dst_type)) {
11652 				verbose(env, "same insn cannot be used with different pointers\n");
11653 				return -EINVAL;
11654 			}
11655 
11656 		} else if (class == BPF_ST) {
11657 			if (BPF_MODE(insn->code) != BPF_MEM ||
11658 			    insn->src_reg != BPF_REG_0) {
11659 				verbose(env, "BPF_ST uses reserved fields\n");
11660 				return -EINVAL;
11661 			}
11662 			/* check src operand */
11663 			err = check_reg_arg(env, insn->dst_reg, SRC_OP);
11664 			if (err)
11665 				return err;
11666 
11667 			if (is_ctx_reg(env, insn->dst_reg)) {
11668 				verbose(env, "BPF_ST stores into R%d %s is not allowed\n",
11669 					insn->dst_reg,
11670 					reg_type_str(env, reg_state(env, insn->dst_reg)->type));
11671 				return -EACCES;
11672 			}
11673 
11674 			/* check that memory (dst_reg + off) is writeable */
11675 			err = check_mem_access(env, env->insn_idx, insn->dst_reg,
11676 					       insn->off, BPF_SIZE(insn->code),
11677 					       BPF_WRITE, -1, false);
11678 			if (err)
11679 				return err;
11680 
11681 		} else if (class == BPF_JMP || class == BPF_JMP32) {
11682 			u8 opcode = BPF_OP(insn->code);
11683 
11684 			env->jmps_processed++;
11685 			if (opcode == BPF_CALL) {
11686 				if (BPF_SRC(insn->code) != BPF_K ||
11687 				    (insn->src_reg != BPF_PSEUDO_KFUNC_CALL
11688 				     && insn->off != 0) ||
11689 				    (insn->src_reg != BPF_REG_0 &&
11690 				     insn->src_reg != BPF_PSEUDO_CALL &&
11691 				     insn->src_reg != BPF_PSEUDO_KFUNC_CALL) ||
11692 				    insn->dst_reg != BPF_REG_0 ||
11693 				    class == BPF_JMP32) {
11694 					verbose(env, "BPF_CALL uses reserved fields\n");
11695 					return -EINVAL;
11696 				}
11697 
11698 				if (env->cur_state->active_spin_lock &&
11699 				    (insn->src_reg == BPF_PSEUDO_CALL ||
11700 				     insn->imm != BPF_FUNC_spin_unlock)) {
11701 					verbose(env, "function calls are not allowed while holding a lock\n");
11702 					return -EINVAL;
11703 				}
11704 				if (insn->src_reg == BPF_PSEUDO_CALL)
11705 					err = check_func_call(env, insn, &env->insn_idx);
11706 				else if (insn->src_reg == BPF_PSEUDO_KFUNC_CALL)
11707 					err = check_kfunc_call(env, insn, &env->insn_idx);
11708 				else
11709 					err = check_helper_call(env, insn, &env->insn_idx);
11710 				if (err)
11711 					return err;
11712 			} else if (opcode == BPF_JA) {
11713 				if (BPF_SRC(insn->code) != BPF_K ||
11714 				    insn->imm != 0 ||
11715 				    insn->src_reg != BPF_REG_0 ||
11716 				    insn->dst_reg != BPF_REG_0 ||
11717 				    class == BPF_JMP32) {
11718 					verbose(env, "BPF_JA uses reserved fields\n");
11719 					return -EINVAL;
11720 				}
11721 
11722 				env->insn_idx += insn->off + 1;
11723 				continue;
11724 
11725 			} else if (opcode == BPF_EXIT) {
11726 				if (BPF_SRC(insn->code) != BPF_K ||
11727 				    insn->imm != 0 ||
11728 				    insn->src_reg != BPF_REG_0 ||
11729 				    insn->dst_reg != BPF_REG_0 ||
11730 				    class == BPF_JMP32) {
11731 					verbose(env, "BPF_EXIT uses reserved fields\n");
11732 					return -EINVAL;
11733 				}
11734 
11735 				if (env->cur_state->active_spin_lock) {
11736 					verbose(env, "bpf_spin_unlock is missing\n");
11737 					return -EINVAL;
11738 				}
11739 
11740 				if (state->curframe) {
11741 					/* exit from nested function */
11742 					err = prepare_func_exit(env, &env->insn_idx);
11743 					if (err)
11744 						return err;
11745 					do_print_state = true;
11746 					continue;
11747 				}
11748 
11749 				err = check_reference_leak(env);
11750 				if (err)
11751 					return err;
11752 
11753 				err = check_return_code(env);
11754 				if (err)
11755 					return err;
11756 process_bpf_exit:
11757 				mark_verifier_state_scratched(env);
11758 				update_branch_counts(env, env->cur_state);
11759 				err = pop_stack(env, &prev_insn_idx,
11760 						&env->insn_idx, pop_log);
11761 				if (err < 0) {
11762 					if (err != -ENOENT)
11763 						return err;
11764 					break;
11765 				} else {
11766 					do_print_state = true;
11767 					continue;
11768 				}
11769 			} else {
11770 				err = check_cond_jmp_op(env, insn, &env->insn_idx);
11771 				if (err)
11772 					return err;
11773 			}
11774 		} else if (class == BPF_LD) {
11775 			u8 mode = BPF_MODE(insn->code);
11776 
11777 			if (mode == BPF_ABS || mode == BPF_IND) {
11778 				err = check_ld_abs(env, insn);
11779 				if (err)
11780 					return err;
11781 
11782 			} else if (mode == BPF_IMM) {
11783 				err = check_ld_imm(env, insn);
11784 				if (err)
11785 					return err;
11786 
11787 				env->insn_idx++;
11788 				sanitize_mark_insn_seen(env);
11789 			} else {
11790 				verbose(env, "invalid BPF_LD mode\n");
11791 				return -EINVAL;
11792 			}
11793 		} else {
11794 			verbose(env, "unknown insn class %d\n", class);
11795 			return -EINVAL;
11796 		}
11797 
11798 		env->insn_idx++;
11799 	}
11800 
11801 	return 0;
11802 }
11803 
11804 static int find_btf_percpu_datasec(struct btf *btf)
11805 {
11806 	const struct btf_type *t;
11807 	const char *tname;
11808 	int i, n;
11809 
11810 	/*
11811 	 * Both vmlinux and module each have their own ".data..percpu"
11812 	 * DATASECs in BTF. So for module's case, we need to skip vmlinux BTF
11813 	 * types to look at only module's own BTF types.
11814 	 */
11815 	n = btf_nr_types(btf);
11816 	if (btf_is_module(btf))
11817 		i = btf_nr_types(btf_vmlinux);
11818 	else
11819 		i = 1;
11820 
11821 	for(; i < n; i++) {
11822 		t = btf_type_by_id(btf, i);
11823 		if (BTF_INFO_KIND(t->info) != BTF_KIND_DATASEC)
11824 			continue;
11825 
11826 		tname = btf_name_by_offset(btf, t->name_off);
11827 		if (!strcmp(tname, ".data..percpu"))
11828 			return i;
11829 	}
11830 
11831 	return -ENOENT;
11832 }
11833 
11834 /* replace pseudo btf_id with kernel symbol address */
11835 static int check_pseudo_btf_id(struct bpf_verifier_env *env,
11836 			       struct bpf_insn *insn,
11837 			       struct bpf_insn_aux_data *aux)
11838 {
11839 	const struct btf_var_secinfo *vsi;
11840 	const struct btf_type *datasec;
11841 	struct btf_mod_pair *btf_mod;
11842 	const struct btf_type *t;
11843 	const char *sym_name;
11844 	bool percpu = false;
11845 	u32 type, id = insn->imm;
11846 	struct btf *btf;
11847 	s32 datasec_id;
11848 	u64 addr;
11849 	int i, btf_fd, err;
11850 
11851 	btf_fd = insn[1].imm;
11852 	if (btf_fd) {
11853 		btf = btf_get_by_fd(btf_fd);
11854 		if (IS_ERR(btf)) {
11855 			verbose(env, "invalid module BTF object FD specified.\n");
11856 			return -EINVAL;
11857 		}
11858 	} else {
11859 		if (!btf_vmlinux) {
11860 			verbose(env, "kernel is missing BTF, make sure CONFIG_DEBUG_INFO_BTF=y is specified in Kconfig.\n");
11861 			return -EINVAL;
11862 		}
11863 		btf = btf_vmlinux;
11864 		btf_get(btf);
11865 	}
11866 
11867 	t = btf_type_by_id(btf, id);
11868 	if (!t) {
11869 		verbose(env, "ldimm64 insn specifies invalid btf_id %d.\n", id);
11870 		err = -ENOENT;
11871 		goto err_put;
11872 	}
11873 
11874 	if (!btf_type_is_var(t)) {
11875 		verbose(env, "pseudo btf_id %d in ldimm64 isn't KIND_VAR.\n", id);
11876 		err = -EINVAL;
11877 		goto err_put;
11878 	}
11879 
11880 	sym_name = btf_name_by_offset(btf, t->name_off);
11881 	addr = kallsyms_lookup_name(sym_name);
11882 	if (!addr) {
11883 		verbose(env, "ldimm64 failed to find the address for kernel symbol '%s'.\n",
11884 			sym_name);
11885 		err = -ENOENT;
11886 		goto err_put;
11887 	}
11888 
11889 	datasec_id = find_btf_percpu_datasec(btf);
11890 	if (datasec_id > 0) {
11891 		datasec = btf_type_by_id(btf, datasec_id);
11892 		for_each_vsi(i, datasec, vsi) {
11893 			if (vsi->type == id) {
11894 				percpu = true;
11895 				break;
11896 			}
11897 		}
11898 	}
11899 
11900 	insn[0].imm = (u32)addr;
11901 	insn[1].imm = addr >> 32;
11902 
11903 	type = t->type;
11904 	t = btf_type_skip_modifiers(btf, type, NULL);
11905 	if (percpu) {
11906 		aux->btf_var.reg_type = PTR_TO_BTF_ID | MEM_PERCPU;
11907 		aux->btf_var.btf = btf;
11908 		aux->btf_var.btf_id = type;
11909 	} else if (!btf_type_is_struct(t)) {
11910 		const struct btf_type *ret;
11911 		const char *tname;
11912 		u32 tsize;
11913 
11914 		/* resolve the type size of ksym. */
11915 		ret = btf_resolve_size(btf, t, &tsize);
11916 		if (IS_ERR(ret)) {
11917 			tname = btf_name_by_offset(btf, t->name_off);
11918 			verbose(env, "ldimm64 unable to resolve the size of type '%s': %ld\n",
11919 				tname, PTR_ERR(ret));
11920 			err = -EINVAL;
11921 			goto err_put;
11922 		}
11923 		aux->btf_var.reg_type = PTR_TO_MEM | MEM_RDONLY;
11924 		aux->btf_var.mem_size = tsize;
11925 	} else {
11926 		aux->btf_var.reg_type = PTR_TO_BTF_ID;
11927 		aux->btf_var.btf = btf;
11928 		aux->btf_var.btf_id = type;
11929 	}
11930 
11931 	/* check whether we recorded this BTF (and maybe module) already */
11932 	for (i = 0; i < env->used_btf_cnt; i++) {
11933 		if (env->used_btfs[i].btf == btf) {
11934 			btf_put(btf);
11935 			return 0;
11936 		}
11937 	}
11938 
11939 	if (env->used_btf_cnt >= MAX_USED_BTFS) {
11940 		err = -E2BIG;
11941 		goto err_put;
11942 	}
11943 
11944 	btf_mod = &env->used_btfs[env->used_btf_cnt];
11945 	btf_mod->btf = btf;
11946 	btf_mod->module = NULL;
11947 
11948 	/* if we reference variables from kernel module, bump its refcount */
11949 	if (btf_is_module(btf)) {
11950 		btf_mod->module = btf_try_get_module(btf);
11951 		if (!btf_mod->module) {
11952 			err = -ENXIO;
11953 			goto err_put;
11954 		}
11955 	}
11956 
11957 	env->used_btf_cnt++;
11958 
11959 	return 0;
11960 err_put:
11961 	btf_put(btf);
11962 	return err;
11963 }
11964 
11965 static int check_map_prealloc(struct bpf_map *map)
11966 {
11967 	return (map->map_type != BPF_MAP_TYPE_HASH &&
11968 		map->map_type != BPF_MAP_TYPE_PERCPU_HASH &&
11969 		map->map_type != BPF_MAP_TYPE_HASH_OF_MAPS) ||
11970 		!(map->map_flags & BPF_F_NO_PREALLOC);
11971 }
11972 
11973 static bool is_tracing_prog_type(enum bpf_prog_type type)
11974 {
11975 	switch (type) {
11976 	case BPF_PROG_TYPE_KPROBE:
11977 	case BPF_PROG_TYPE_TRACEPOINT:
11978 	case BPF_PROG_TYPE_PERF_EVENT:
11979 	case BPF_PROG_TYPE_RAW_TRACEPOINT:
11980 		return true;
11981 	default:
11982 		return false;
11983 	}
11984 }
11985 
11986 static bool is_preallocated_map(struct bpf_map *map)
11987 {
11988 	if (!check_map_prealloc(map))
11989 		return false;
11990 	if (map->inner_map_meta && !check_map_prealloc(map->inner_map_meta))
11991 		return false;
11992 	return true;
11993 }
11994 
11995 static int check_map_prog_compatibility(struct bpf_verifier_env *env,
11996 					struct bpf_map *map,
11997 					struct bpf_prog *prog)
11998 
11999 {
12000 	enum bpf_prog_type prog_type = resolve_prog_type(prog);
12001 	/*
12002 	 * Validate that trace type programs use preallocated hash maps.
12003 	 *
12004 	 * For programs attached to PERF events this is mandatory as the
12005 	 * perf NMI can hit any arbitrary code sequence.
12006 	 *
12007 	 * All other trace types using preallocated hash maps are unsafe as
12008 	 * well because tracepoint or kprobes can be inside locked regions
12009 	 * of the memory allocator or at a place where a recursion into the
12010 	 * memory allocator would see inconsistent state.
12011 	 *
12012 	 * On RT enabled kernels run-time allocation of all trace type
12013 	 * programs is strictly prohibited due to lock type constraints. On
12014 	 * !RT kernels it is allowed for backwards compatibility reasons for
12015 	 * now, but warnings are emitted so developers are made aware of
12016 	 * the unsafety and can fix their programs before this is enforced.
12017 	 */
12018 	if (is_tracing_prog_type(prog_type) && !is_preallocated_map(map)) {
12019 		if (prog_type == BPF_PROG_TYPE_PERF_EVENT) {
12020 			verbose(env, "perf_event programs can only use preallocated hash map\n");
12021 			return -EINVAL;
12022 		}
12023 		if (IS_ENABLED(CONFIG_PREEMPT_RT)) {
12024 			verbose(env, "trace type programs can only use preallocated hash map\n");
12025 			return -EINVAL;
12026 		}
12027 		WARN_ONCE(1, "trace type BPF program uses run-time allocation\n");
12028 		verbose(env, "trace type programs with run-time allocated hash maps are unsafe. Switch to preallocated hash maps.\n");
12029 	}
12030 
12031 	if (map_value_has_spin_lock(map)) {
12032 		if (prog_type == BPF_PROG_TYPE_SOCKET_FILTER) {
12033 			verbose(env, "socket filter progs cannot use bpf_spin_lock yet\n");
12034 			return -EINVAL;
12035 		}
12036 
12037 		if (is_tracing_prog_type(prog_type)) {
12038 			verbose(env, "tracing progs cannot use bpf_spin_lock yet\n");
12039 			return -EINVAL;
12040 		}
12041 
12042 		if (prog->aux->sleepable) {
12043 			verbose(env, "sleepable progs cannot use bpf_spin_lock yet\n");
12044 			return -EINVAL;
12045 		}
12046 	}
12047 
12048 	if (map_value_has_timer(map)) {
12049 		if (is_tracing_prog_type(prog_type)) {
12050 			verbose(env, "tracing progs cannot use bpf_timer yet\n");
12051 			return -EINVAL;
12052 		}
12053 	}
12054 
12055 	if ((bpf_prog_is_dev_bound(prog->aux) || bpf_map_is_dev_bound(map)) &&
12056 	    !bpf_offload_prog_map_match(prog, map)) {
12057 		verbose(env, "offload device mismatch between prog and map\n");
12058 		return -EINVAL;
12059 	}
12060 
12061 	if (map->map_type == BPF_MAP_TYPE_STRUCT_OPS) {
12062 		verbose(env, "bpf_struct_ops map cannot be used in prog\n");
12063 		return -EINVAL;
12064 	}
12065 
12066 	if (prog->aux->sleepable)
12067 		switch (map->map_type) {
12068 		case BPF_MAP_TYPE_HASH:
12069 		case BPF_MAP_TYPE_LRU_HASH:
12070 		case BPF_MAP_TYPE_ARRAY:
12071 		case BPF_MAP_TYPE_PERCPU_HASH:
12072 		case BPF_MAP_TYPE_PERCPU_ARRAY:
12073 		case BPF_MAP_TYPE_LRU_PERCPU_HASH:
12074 		case BPF_MAP_TYPE_ARRAY_OF_MAPS:
12075 		case BPF_MAP_TYPE_HASH_OF_MAPS:
12076 			if (!is_preallocated_map(map)) {
12077 				verbose(env,
12078 					"Sleepable programs can only use preallocated maps\n");
12079 				return -EINVAL;
12080 			}
12081 			break;
12082 		case BPF_MAP_TYPE_RINGBUF:
12083 		case BPF_MAP_TYPE_INODE_STORAGE:
12084 		case BPF_MAP_TYPE_SK_STORAGE:
12085 		case BPF_MAP_TYPE_TASK_STORAGE:
12086 			break;
12087 		default:
12088 			verbose(env,
12089 				"Sleepable programs can only use array, hash, and ringbuf maps\n");
12090 			return -EINVAL;
12091 		}
12092 
12093 	return 0;
12094 }
12095 
12096 static bool bpf_map_is_cgroup_storage(struct bpf_map *map)
12097 {
12098 	return (map->map_type == BPF_MAP_TYPE_CGROUP_STORAGE ||
12099 		map->map_type == BPF_MAP_TYPE_PERCPU_CGROUP_STORAGE);
12100 }
12101 
12102 /* find and rewrite pseudo imm in ld_imm64 instructions:
12103  *
12104  * 1. if it accesses map FD, replace it with actual map pointer.
12105  * 2. if it accesses btf_id of a VAR, replace it with pointer to the var.
12106  *
12107  * NOTE: btf_vmlinux is required for converting pseudo btf_id.
12108  */
12109 static int resolve_pseudo_ldimm64(struct bpf_verifier_env *env)
12110 {
12111 	struct bpf_insn *insn = env->prog->insnsi;
12112 	int insn_cnt = env->prog->len;
12113 	int i, j, err;
12114 
12115 	err = bpf_prog_calc_tag(env->prog);
12116 	if (err)
12117 		return err;
12118 
12119 	for (i = 0; i < insn_cnt; i++, insn++) {
12120 		if (BPF_CLASS(insn->code) == BPF_LDX &&
12121 		    (BPF_MODE(insn->code) != BPF_MEM || insn->imm != 0)) {
12122 			verbose(env, "BPF_LDX uses reserved fields\n");
12123 			return -EINVAL;
12124 		}
12125 
12126 		if (insn[0].code == (BPF_LD | BPF_IMM | BPF_DW)) {
12127 			struct bpf_insn_aux_data *aux;
12128 			struct bpf_map *map;
12129 			struct fd f;
12130 			u64 addr;
12131 			u32 fd;
12132 
12133 			if (i == insn_cnt - 1 || insn[1].code != 0 ||
12134 			    insn[1].dst_reg != 0 || insn[1].src_reg != 0 ||
12135 			    insn[1].off != 0) {
12136 				verbose(env, "invalid bpf_ld_imm64 insn\n");
12137 				return -EINVAL;
12138 			}
12139 
12140 			if (insn[0].src_reg == 0)
12141 				/* valid generic load 64-bit imm */
12142 				goto next_insn;
12143 
12144 			if (insn[0].src_reg == BPF_PSEUDO_BTF_ID) {
12145 				aux = &env->insn_aux_data[i];
12146 				err = check_pseudo_btf_id(env, insn, aux);
12147 				if (err)
12148 					return err;
12149 				goto next_insn;
12150 			}
12151 
12152 			if (insn[0].src_reg == BPF_PSEUDO_FUNC) {
12153 				aux = &env->insn_aux_data[i];
12154 				aux->ptr_type = PTR_TO_FUNC;
12155 				goto next_insn;
12156 			}
12157 
12158 			/* In final convert_pseudo_ld_imm64() step, this is
12159 			 * converted into regular 64-bit imm load insn.
12160 			 */
12161 			switch (insn[0].src_reg) {
12162 			case BPF_PSEUDO_MAP_VALUE:
12163 			case BPF_PSEUDO_MAP_IDX_VALUE:
12164 				break;
12165 			case BPF_PSEUDO_MAP_FD:
12166 			case BPF_PSEUDO_MAP_IDX:
12167 				if (insn[1].imm == 0)
12168 					break;
12169 				fallthrough;
12170 			default:
12171 				verbose(env, "unrecognized bpf_ld_imm64 insn\n");
12172 				return -EINVAL;
12173 			}
12174 
12175 			switch (insn[0].src_reg) {
12176 			case BPF_PSEUDO_MAP_IDX_VALUE:
12177 			case BPF_PSEUDO_MAP_IDX:
12178 				if (bpfptr_is_null(env->fd_array)) {
12179 					verbose(env, "fd_idx without fd_array is invalid\n");
12180 					return -EPROTO;
12181 				}
12182 				if (copy_from_bpfptr_offset(&fd, env->fd_array,
12183 							    insn[0].imm * sizeof(fd),
12184 							    sizeof(fd)))
12185 					return -EFAULT;
12186 				break;
12187 			default:
12188 				fd = insn[0].imm;
12189 				break;
12190 			}
12191 
12192 			f = fdget(fd);
12193 			map = __bpf_map_get(f);
12194 			if (IS_ERR(map)) {
12195 				verbose(env, "fd %d is not pointing to valid bpf_map\n",
12196 					insn[0].imm);
12197 				return PTR_ERR(map);
12198 			}
12199 
12200 			err = check_map_prog_compatibility(env, map, env->prog);
12201 			if (err) {
12202 				fdput(f);
12203 				return err;
12204 			}
12205 
12206 			aux = &env->insn_aux_data[i];
12207 			if (insn[0].src_reg == BPF_PSEUDO_MAP_FD ||
12208 			    insn[0].src_reg == BPF_PSEUDO_MAP_IDX) {
12209 				addr = (unsigned long)map;
12210 			} else {
12211 				u32 off = insn[1].imm;
12212 
12213 				if (off >= BPF_MAX_VAR_OFF) {
12214 					verbose(env, "direct value offset of %u is not allowed\n", off);
12215 					fdput(f);
12216 					return -EINVAL;
12217 				}
12218 
12219 				if (!map->ops->map_direct_value_addr) {
12220 					verbose(env, "no direct value access support for this map type\n");
12221 					fdput(f);
12222 					return -EINVAL;
12223 				}
12224 
12225 				err = map->ops->map_direct_value_addr(map, &addr, off);
12226 				if (err) {
12227 					verbose(env, "invalid access to map value pointer, value_size=%u off=%u\n",
12228 						map->value_size, off);
12229 					fdput(f);
12230 					return err;
12231 				}
12232 
12233 				aux->map_off = off;
12234 				addr += off;
12235 			}
12236 
12237 			insn[0].imm = (u32)addr;
12238 			insn[1].imm = addr >> 32;
12239 
12240 			/* check whether we recorded this map already */
12241 			for (j = 0; j < env->used_map_cnt; j++) {
12242 				if (env->used_maps[j] == map) {
12243 					aux->map_index = j;
12244 					fdput(f);
12245 					goto next_insn;
12246 				}
12247 			}
12248 
12249 			if (env->used_map_cnt >= MAX_USED_MAPS) {
12250 				fdput(f);
12251 				return -E2BIG;
12252 			}
12253 
12254 			/* hold the map. If the program is rejected by verifier,
12255 			 * the map will be released by release_maps() or it
12256 			 * will be used by the valid program until it's unloaded
12257 			 * and all maps are released in free_used_maps()
12258 			 */
12259 			bpf_map_inc(map);
12260 
12261 			aux->map_index = env->used_map_cnt;
12262 			env->used_maps[env->used_map_cnt++] = map;
12263 
12264 			if (bpf_map_is_cgroup_storage(map) &&
12265 			    bpf_cgroup_storage_assign(env->prog->aux, map)) {
12266 				verbose(env, "only one cgroup storage of each type is allowed\n");
12267 				fdput(f);
12268 				return -EBUSY;
12269 			}
12270 
12271 			fdput(f);
12272 next_insn:
12273 			insn++;
12274 			i++;
12275 			continue;
12276 		}
12277 
12278 		/* Basic sanity check before we invest more work here. */
12279 		if (!bpf_opcode_in_insntable(insn->code)) {
12280 			verbose(env, "unknown opcode %02x\n", insn->code);
12281 			return -EINVAL;
12282 		}
12283 	}
12284 
12285 	/* now all pseudo BPF_LD_IMM64 instructions load valid
12286 	 * 'struct bpf_map *' into a register instead of user map_fd.
12287 	 * These pointers will be used later by verifier to validate map access.
12288 	 */
12289 	return 0;
12290 }
12291 
12292 /* drop refcnt of maps used by the rejected program */
12293 static void release_maps(struct bpf_verifier_env *env)
12294 {
12295 	__bpf_free_used_maps(env->prog->aux, env->used_maps,
12296 			     env->used_map_cnt);
12297 }
12298 
12299 /* drop refcnt of maps used by the rejected program */
12300 static void release_btfs(struct bpf_verifier_env *env)
12301 {
12302 	__bpf_free_used_btfs(env->prog->aux, env->used_btfs,
12303 			     env->used_btf_cnt);
12304 }
12305 
12306 /* convert pseudo BPF_LD_IMM64 into generic BPF_LD_IMM64 */
12307 static void convert_pseudo_ld_imm64(struct bpf_verifier_env *env)
12308 {
12309 	struct bpf_insn *insn = env->prog->insnsi;
12310 	int insn_cnt = env->prog->len;
12311 	int i;
12312 
12313 	for (i = 0; i < insn_cnt; i++, insn++) {
12314 		if (insn->code != (BPF_LD | BPF_IMM | BPF_DW))
12315 			continue;
12316 		if (insn->src_reg == BPF_PSEUDO_FUNC)
12317 			continue;
12318 		insn->src_reg = 0;
12319 	}
12320 }
12321 
12322 /* single env->prog->insni[off] instruction was replaced with the range
12323  * insni[off, off + cnt).  Adjust corresponding insn_aux_data by copying
12324  * [0, off) and [off, end) to new locations, so the patched range stays zero
12325  */
12326 static void adjust_insn_aux_data(struct bpf_verifier_env *env,
12327 				 struct bpf_insn_aux_data *new_data,
12328 				 struct bpf_prog *new_prog, u32 off, u32 cnt)
12329 {
12330 	struct bpf_insn_aux_data *old_data = env->insn_aux_data;
12331 	struct bpf_insn *insn = new_prog->insnsi;
12332 	u32 old_seen = old_data[off].seen;
12333 	u32 prog_len;
12334 	int i;
12335 
12336 	/* aux info at OFF always needs adjustment, no matter fast path
12337 	 * (cnt == 1) is taken or not. There is no guarantee INSN at OFF is the
12338 	 * original insn at old prog.
12339 	 */
12340 	old_data[off].zext_dst = insn_has_def32(env, insn + off + cnt - 1);
12341 
12342 	if (cnt == 1)
12343 		return;
12344 	prog_len = new_prog->len;
12345 
12346 	memcpy(new_data, old_data, sizeof(struct bpf_insn_aux_data) * off);
12347 	memcpy(new_data + off + cnt - 1, old_data + off,
12348 	       sizeof(struct bpf_insn_aux_data) * (prog_len - off - cnt + 1));
12349 	for (i = off; i < off + cnt - 1; i++) {
12350 		/* Expand insni[off]'s seen count to the patched range. */
12351 		new_data[i].seen = old_seen;
12352 		new_data[i].zext_dst = insn_has_def32(env, insn + i);
12353 	}
12354 	env->insn_aux_data = new_data;
12355 	vfree(old_data);
12356 }
12357 
12358 static void adjust_subprog_starts(struct bpf_verifier_env *env, u32 off, u32 len)
12359 {
12360 	int i;
12361 
12362 	if (len == 1)
12363 		return;
12364 	/* NOTE: fake 'exit' subprog should be updated as well. */
12365 	for (i = 0; i <= env->subprog_cnt; i++) {
12366 		if (env->subprog_info[i].start <= off)
12367 			continue;
12368 		env->subprog_info[i].start += len - 1;
12369 	}
12370 }
12371 
12372 static void adjust_poke_descs(struct bpf_prog *prog, u32 off, u32 len)
12373 {
12374 	struct bpf_jit_poke_descriptor *tab = prog->aux->poke_tab;
12375 	int i, sz = prog->aux->size_poke_tab;
12376 	struct bpf_jit_poke_descriptor *desc;
12377 
12378 	for (i = 0; i < sz; i++) {
12379 		desc = &tab[i];
12380 		if (desc->insn_idx <= off)
12381 			continue;
12382 		desc->insn_idx += len - 1;
12383 	}
12384 }
12385 
12386 static struct bpf_prog *bpf_patch_insn_data(struct bpf_verifier_env *env, u32 off,
12387 					    const struct bpf_insn *patch, u32 len)
12388 {
12389 	struct bpf_prog *new_prog;
12390 	struct bpf_insn_aux_data *new_data = NULL;
12391 
12392 	if (len > 1) {
12393 		new_data = vzalloc(array_size(env->prog->len + len - 1,
12394 					      sizeof(struct bpf_insn_aux_data)));
12395 		if (!new_data)
12396 			return NULL;
12397 	}
12398 
12399 	new_prog = bpf_patch_insn_single(env->prog, off, patch, len);
12400 	if (IS_ERR(new_prog)) {
12401 		if (PTR_ERR(new_prog) == -ERANGE)
12402 			verbose(env,
12403 				"insn %d cannot be patched due to 16-bit range\n",
12404 				env->insn_aux_data[off].orig_idx);
12405 		vfree(new_data);
12406 		return NULL;
12407 	}
12408 	adjust_insn_aux_data(env, new_data, new_prog, off, len);
12409 	adjust_subprog_starts(env, off, len);
12410 	adjust_poke_descs(new_prog, off, len);
12411 	return new_prog;
12412 }
12413 
12414 static int adjust_subprog_starts_after_remove(struct bpf_verifier_env *env,
12415 					      u32 off, u32 cnt)
12416 {
12417 	int i, j;
12418 
12419 	/* find first prog starting at or after off (first to remove) */
12420 	for (i = 0; i < env->subprog_cnt; i++)
12421 		if (env->subprog_info[i].start >= off)
12422 			break;
12423 	/* find first prog starting at or after off + cnt (first to stay) */
12424 	for (j = i; j < env->subprog_cnt; j++)
12425 		if (env->subprog_info[j].start >= off + cnt)
12426 			break;
12427 	/* if j doesn't start exactly at off + cnt, we are just removing
12428 	 * the front of previous prog
12429 	 */
12430 	if (env->subprog_info[j].start != off + cnt)
12431 		j--;
12432 
12433 	if (j > i) {
12434 		struct bpf_prog_aux *aux = env->prog->aux;
12435 		int move;
12436 
12437 		/* move fake 'exit' subprog as well */
12438 		move = env->subprog_cnt + 1 - j;
12439 
12440 		memmove(env->subprog_info + i,
12441 			env->subprog_info + j,
12442 			sizeof(*env->subprog_info) * move);
12443 		env->subprog_cnt -= j - i;
12444 
12445 		/* remove func_info */
12446 		if (aux->func_info) {
12447 			move = aux->func_info_cnt - j;
12448 
12449 			memmove(aux->func_info + i,
12450 				aux->func_info + j,
12451 				sizeof(*aux->func_info) * move);
12452 			aux->func_info_cnt -= j - i;
12453 			/* func_info->insn_off is set after all code rewrites,
12454 			 * in adjust_btf_func() - no need to adjust
12455 			 */
12456 		}
12457 	} else {
12458 		/* convert i from "first prog to remove" to "first to adjust" */
12459 		if (env->subprog_info[i].start == off)
12460 			i++;
12461 	}
12462 
12463 	/* update fake 'exit' subprog as well */
12464 	for (; i <= env->subprog_cnt; i++)
12465 		env->subprog_info[i].start -= cnt;
12466 
12467 	return 0;
12468 }
12469 
12470 static int bpf_adj_linfo_after_remove(struct bpf_verifier_env *env, u32 off,
12471 				      u32 cnt)
12472 {
12473 	struct bpf_prog *prog = env->prog;
12474 	u32 i, l_off, l_cnt, nr_linfo;
12475 	struct bpf_line_info *linfo;
12476 
12477 	nr_linfo = prog->aux->nr_linfo;
12478 	if (!nr_linfo)
12479 		return 0;
12480 
12481 	linfo = prog->aux->linfo;
12482 
12483 	/* find first line info to remove, count lines to be removed */
12484 	for (i = 0; i < nr_linfo; i++)
12485 		if (linfo[i].insn_off >= off)
12486 			break;
12487 
12488 	l_off = i;
12489 	l_cnt = 0;
12490 	for (; i < nr_linfo; i++)
12491 		if (linfo[i].insn_off < off + cnt)
12492 			l_cnt++;
12493 		else
12494 			break;
12495 
12496 	/* First live insn doesn't match first live linfo, it needs to "inherit"
12497 	 * last removed linfo.  prog is already modified, so prog->len == off
12498 	 * means no live instructions after (tail of the program was removed).
12499 	 */
12500 	if (prog->len != off && l_cnt &&
12501 	    (i == nr_linfo || linfo[i].insn_off != off + cnt)) {
12502 		l_cnt--;
12503 		linfo[--i].insn_off = off + cnt;
12504 	}
12505 
12506 	/* remove the line info which refer to the removed instructions */
12507 	if (l_cnt) {
12508 		memmove(linfo + l_off, linfo + i,
12509 			sizeof(*linfo) * (nr_linfo - i));
12510 
12511 		prog->aux->nr_linfo -= l_cnt;
12512 		nr_linfo = prog->aux->nr_linfo;
12513 	}
12514 
12515 	/* pull all linfo[i].insn_off >= off + cnt in by cnt */
12516 	for (i = l_off; i < nr_linfo; i++)
12517 		linfo[i].insn_off -= cnt;
12518 
12519 	/* fix up all subprogs (incl. 'exit') which start >= off */
12520 	for (i = 0; i <= env->subprog_cnt; i++)
12521 		if (env->subprog_info[i].linfo_idx > l_off) {
12522 			/* program may have started in the removed region but
12523 			 * may not be fully removed
12524 			 */
12525 			if (env->subprog_info[i].linfo_idx >= l_off + l_cnt)
12526 				env->subprog_info[i].linfo_idx -= l_cnt;
12527 			else
12528 				env->subprog_info[i].linfo_idx = l_off;
12529 		}
12530 
12531 	return 0;
12532 }
12533 
12534 static int verifier_remove_insns(struct bpf_verifier_env *env, u32 off, u32 cnt)
12535 {
12536 	struct bpf_insn_aux_data *aux_data = env->insn_aux_data;
12537 	unsigned int orig_prog_len = env->prog->len;
12538 	int err;
12539 
12540 	if (bpf_prog_is_dev_bound(env->prog->aux))
12541 		bpf_prog_offload_remove_insns(env, off, cnt);
12542 
12543 	err = bpf_remove_insns(env->prog, off, cnt);
12544 	if (err)
12545 		return err;
12546 
12547 	err = adjust_subprog_starts_after_remove(env, off, cnt);
12548 	if (err)
12549 		return err;
12550 
12551 	err = bpf_adj_linfo_after_remove(env, off, cnt);
12552 	if (err)
12553 		return err;
12554 
12555 	memmove(aux_data + off,	aux_data + off + cnt,
12556 		sizeof(*aux_data) * (orig_prog_len - off - cnt));
12557 
12558 	return 0;
12559 }
12560 
12561 /* The verifier does more data flow analysis than llvm and will not
12562  * explore branches that are dead at run time. Malicious programs can
12563  * have dead code too. Therefore replace all dead at-run-time code
12564  * with 'ja -1'.
12565  *
12566  * Just nops are not optimal, e.g. if they would sit at the end of the
12567  * program and through another bug we would manage to jump there, then
12568  * we'd execute beyond program memory otherwise. Returning exception
12569  * code also wouldn't work since we can have subprogs where the dead
12570  * code could be located.
12571  */
12572 static void sanitize_dead_code(struct bpf_verifier_env *env)
12573 {
12574 	struct bpf_insn_aux_data *aux_data = env->insn_aux_data;
12575 	struct bpf_insn trap = BPF_JMP_IMM(BPF_JA, 0, 0, -1);
12576 	struct bpf_insn *insn = env->prog->insnsi;
12577 	const int insn_cnt = env->prog->len;
12578 	int i;
12579 
12580 	for (i = 0; i < insn_cnt; i++) {
12581 		if (aux_data[i].seen)
12582 			continue;
12583 		memcpy(insn + i, &trap, sizeof(trap));
12584 		aux_data[i].zext_dst = false;
12585 	}
12586 }
12587 
12588 static bool insn_is_cond_jump(u8 code)
12589 {
12590 	u8 op;
12591 
12592 	if (BPF_CLASS(code) == BPF_JMP32)
12593 		return true;
12594 
12595 	if (BPF_CLASS(code) != BPF_JMP)
12596 		return false;
12597 
12598 	op = BPF_OP(code);
12599 	return op != BPF_JA && op != BPF_EXIT && op != BPF_CALL;
12600 }
12601 
12602 static void opt_hard_wire_dead_code_branches(struct bpf_verifier_env *env)
12603 {
12604 	struct bpf_insn_aux_data *aux_data = env->insn_aux_data;
12605 	struct bpf_insn ja = BPF_JMP_IMM(BPF_JA, 0, 0, 0);
12606 	struct bpf_insn *insn = env->prog->insnsi;
12607 	const int insn_cnt = env->prog->len;
12608 	int i;
12609 
12610 	for (i = 0; i < insn_cnt; i++, insn++) {
12611 		if (!insn_is_cond_jump(insn->code))
12612 			continue;
12613 
12614 		if (!aux_data[i + 1].seen)
12615 			ja.off = insn->off;
12616 		else if (!aux_data[i + 1 + insn->off].seen)
12617 			ja.off = 0;
12618 		else
12619 			continue;
12620 
12621 		if (bpf_prog_is_dev_bound(env->prog->aux))
12622 			bpf_prog_offload_replace_insn(env, i, &ja);
12623 
12624 		memcpy(insn, &ja, sizeof(ja));
12625 	}
12626 }
12627 
12628 static int opt_remove_dead_code(struct bpf_verifier_env *env)
12629 {
12630 	struct bpf_insn_aux_data *aux_data = env->insn_aux_data;
12631 	int insn_cnt = env->prog->len;
12632 	int i, err;
12633 
12634 	for (i = 0; i < insn_cnt; i++) {
12635 		int j;
12636 
12637 		j = 0;
12638 		while (i + j < insn_cnt && !aux_data[i + j].seen)
12639 			j++;
12640 		if (!j)
12641 			continue;
12642 
12643 		err = verifier_remove_insns(env, i, j);
12644 		if (err)
12645 			return err;
12646 		insn_cnt = env->prog->len;
12647 	}
12648 
12649 	return 0;
12650 }
12651 
12652 static int opt_remove_nops(struct bpf_verifier_env *env)
12653 {
12654 	const struct bpf_insn ja = BPF_JMP_IMM(BPF_JA, 0, 0, 0);
12655 	struct bpf_insn *insn = env->prog->insnsi;
12656 	int insn_cnt = env->prog->len;
12657 	int i, err;
12658 
12659 	for (i = 0; i < insn_cnt; i++) {
12660 		if (memcmp(&insn[i], &ja, sizeof(ja)))
12661 			continue;
12662 
12663 		err = verifier_remove_insns(env, i, 1);
12664 		if (err)
12665 			return err;
12666 		insn_cnt--;
12667 		i--;
12668 	}
12669 
12670 	return 0;
12671 }
12672 
12673 static int opt_subreg_zext_lo32_rnd_hi32(struct bpf_verifier_env *env,
12674 					 const union bpf_attr *attr)
12675 {
12676 	struct bpf_insn *patch, zext_patch[2], rnd_hi32_patch[4];
12677 	struct bpf_insn_aux_data *aux = env->insn_aux_data;
12678 	int i, patch_len, delta = 0, len = env->prog->len;
12679 	struct bpf_insn *insns = env->prog->insnsi;
12680 	struct bpf_prog *new_prog;
12681 	bool rnd_hi32;
12682 
12683 	rnd_hi32 = attr->prog_flags & BPF_F_TEST_RND_HI32;
12684 	zext_patch[1] = BPF_ZEXT_REG(0);
12685 	rnd_hi32_patch[1] = BPF_ALU64_IMM(BPF_MOV, BPF_REG_AX, 0);
12686 	rnd_hi32_patch[2] = BPF_ALU64_IMM(BPF_LSH, BPF_REG_AX, 32);
12687 	rnd_hi32_patch[3] = BPF_ALU64_REG(BPF_OR, 0, BPF_REG_AX);
12688 	for (i = 0; i < len; i++) {
12689 		int adj_idx = i + delta;
12690 		struct bpf_insn insn;
12691 		int load_reg;
12692 
12693 		insn = insns[adj_idx];
12694 		load_reg = insn_def_regno(&insn);
12695 		if (!aux[adj_idx].zext_dst) {
12696 			u8 code, class;
12697 			u32 imm_rnd;
12698 
12699 			if (!rnd_hi32)
12700 				continue;
12701 
12702 			code = insn.code;
12703 			class = BPF_CLASS(code);
12704 			if (load_reg == -1)
12705 				continue;
12706 
12707 			/* NOTE: arg "reg" (the fourth one) is only used for
12708 			 *       BPF_STX + SRC_OP, so it is safe to pass NULL
12709 			 *       here.
12710 			 */
12711 			if (is_reg64(env, &insn, load_reg, NULL, DST_OP)) {
12712 				if (class == BPF_LD &&
12713 				    BPF_MODE(code) == BPF_IMM)
12714 					i++;
12715 				continue;
12716 			}
12717 
12718 			/* ctx load could be transformed into wider load. */
12719 			if (class == BPF_LDX &&
12720 			    aux[adj_idx].ptr_type == PTR_TO_CTX)
12721 				continue;
12722 
12723 			imm_rnd = get_random_int();
12724 			rnd_hi32_patch[0] = insn;
12725 			rnd_hi32_patch[1].imm = imm_rnd;
12726 			rnd_hi32_patch[3].dst_reg = load_reg;
12727 			patch = rnd_hi32_patch;
12728 			patch_len = 4;
12729 			goto apply_patch_buffer;
12730 		}
12731 
12732 		/* Add in an zero-extend instruction if a) the JIT has requested
12733 		 * it or b) it's a CMPXCHG.
12734 		 *
12735 		 * The latter is because: BPF_CMPXCHG always loads a value into
12736 		 * R0, therefore always zero-extends. However some archs'
12737 		 * equivalent instruction only does this load when the
12738 		 * comparison is successful. This detail of CMPXCHG is
12739 		 * orthogonal to the general zero-extension behaviour of the
12740 		 * CPU, so it's treated independently of bpf_jit_needs_zext.
12741 		 */
12742 		if (!bpf_jit_needs_zext() && !is_cmpxchg_insn(&insn))
12743 			continue;
12744 
12745 		if (WARN_ON(load_reg == -1)) {
12746 			verbose(env, "verifier bug. zext_dst is set, but no reg is defined\n");
12747 			return -EFAULT;
12748 		}
12749 
12750 		zext_patch[0] = insn;
12751 		zext_patch[1].dst_reg = load_reg;
12752 		zext_patch[1].src_reg = load_reg;
12753 		patch = zext_patch;
12754 		patch_len = 2;
12755 apply_patch_buffer:
12756 		new_prog = bpf_patch_insn_data(env, adj_idx, patch, patch_len);
12757 		if (!new_prog)
12758 			return -ENOMEM;
12759 		env->prog = new_prog;
12760 		insns = new_prog->insnsi;
12761 		aux = env->insn_aux_data;
12762 		delta += patch_len - 1;
12763 	}
12764 
12765 	return 0;
12766 }
12767 
12768 /* convert load instructions that access fields of a context type into a
12769  * sequence of instructions that access fields of the underlying structure:
12770  *     struct __sk_buff    -> struct sk_buff
12771  *     struct bpf_sock_ops -> struct sock
12772  */
12773 static int convert_ctx_accesses(struct bpf_verifier_env *env)
12774 {
12775 	const struct bpf_verifier_ops *ops = env->ops;
12776 	int i, cnt, size, ctx_field_size, delta = 0;
12777 	const int insn_cnt = env->prog->len;
12778 	struct bpf_insn insn_buf[16], *insn;
12779 	u32 target_size, size_default, off;
12780 	struct bpf_prog *new_prog;
12781 	enum bpf_access_type type;
12782 	bool is_narrower_load;
12783 
12784 	if (ops->gen_prologue || env->seen_direct_write) {
12785 		if (!ops->gen_prologue) {
12786 			verbose(env, "bpf verifier is misconfigured\n");
12787 			return -EINVAL;
12788 		}
12789 		cnt = ops->gen_prologue(insn_buf, env->seen_direct_write,
12790 					env->prog);
12791 		if (cnt >= ARRAY_SIZE(insn_buf)) {
12792 			verbose(env, "bpf verifier is misconfigured\n");
12793 			return -EINVAL;
12794 		} else if (cnt) {
12795 			new_prog = bpf_patch_insn_data(env, 0, insn_buf, cnt);
12796 			if (!new_prog)
12797 				return -ENOMEM;
12798 
12799 			env->prog = new_prog;
12800 			delta += cnt - 1;
12801 		}
12802 	}
12803 
12804 	if (bpf_prog_is_dev_bound(env->prog->aux))
12805 		return 0;
12806 
12807 	insn = env->prog->insnsi + delta;
12808 
12809 	for (i = 0; i < insn_cnt; i++, insn++) {
12810 		bpf_convert_ctx_access_t convert_ctx_access;
12811 		bool ctx_access;
12812 
12813 		if (insn->code == (BPF_LDX | BPF_MEM | BPF_B) ||
12814 		    insn->code == (BPF_LDX | BPF_MEM | BPF_H) ||
12815 		    insn->code == (BPF_LDX | BPF_MEM | BPF_W) ||
12816 		    insn->code == (BPF_LDX | BPF_MEM | BPF_DW)) {
12817 			type = BPF_READ;
12818 			ctx_access = true;
12819 		} else if (insn->code == (BPF_STX | BPF_MEM | BPF_B) ||
12820 			   insn->code == (BPF_STX | BPF_MEM | BPF_H) ||
12821 			   insn->code == (BPF_STX | BPF_MEM | BPF_W) ||
12822 			   insn->code == (BPF_STX | BPF_MEM | BPF_DW) ||
12823 			   insn->code == (BPF_ST | BPF_MEM | BPF_B) ||
12824 			   insn->code == (BPF_ST | BPF_MEM | BPF_H) ||
12825 			   insn->code == (BPF_ST | BPF_MEM | BPF_W) ||
12826 			   insn->code == (BPF_ST | BPF_MEM | BPF_DW)) {
12827 			type = BPF_WRITE;
12828 			ctx_access = BPF_CLASS(insn->code) == BPF_STX;
12829 		} else {
12830 			continue;
12831 		}
12832 
12833 		if (type == BPF_WRITE &&
12834 		    env->insn_aux_data[i + delta].sanitize_stack_spill) {
12835 			struct bpf_insn patch[] = {
12836 				*insn,
12837 				BPF_ST_NOSPEC(),
12838 			};
12839 
12840 			cnt = ARRAY_SIZE(patch);
12841 			new_prog = bpf_patch_insn_data(env, i + delta, patch, cnt);
12842 			if (!new_prog)
12843 				return -ENOMEM;
12844 
12845 			delta    += cnt - 1;
12846 			env->prog = new_prog;
12847 			insn      = new_prog->insnsi + i + delta;
12848 			continue;
12849 		}
12850 
12851 		if (!ctx_access)
12852 			continue;
12853 
12854 		switch (env->insn_aux_data[i + delta].ptr_type) {
12855 		case PTR_TO_CTX:
12856 			if (!ops->convert_ctx_access)
12857 				continue;
12858 			convert_ctx_access = ops->convert_ctx_access;
12859 			break;
12860 		case PTR_TO_SOCKET:
12861 		case PTR_TO_SOCK_COMMON:
12862 			convert_ctx_access = bpf_sock_convert_ctx_access;
12863 			break;
12864 		case PTR_TO_TCP_SOCK:
12865 			convert_ctx_access = bpf_tcp_sock_convert_ctx_access;
12866 			break;
12867 		case PTR_TO_XDP_SOCK:
12868 			convert_ctx_access = bpf_xdp_sock_convert_ctx_access;
12869 			break;
12870 		case PTR_TO_BTF_ID:
12871 			if (type == BPF_READ) {
12872 				insn->code = BPF_LDX | BPF_PROBE_MEM |
12873 					BPF_SIZE((insn)->code);
12874 				env->prog->aux->num_exentries++;
12875 			} else if (resolve_prog_type(env->prog) != BPF_PROG_TYPE_STRUCT_OPS) {
12876 				verbose(env, "Writes through BTF pointers are not allowed\n");
12877 				return -EINVAL;
12878 			}
12879 			continue;
12880 		default:
12881 			continue;
12882 		}
12883 
12884 		ctx_field_size = env->insn_aux_data[i + delta].ctx_field_size;
12885 		size = BPF_LDST_BYTES(insn);
12886 
12887 		/* If the read access is a narrower load of the field,
12888 		 * convert to a 4/8-byte load, to minimum program type specific
12889 		 * convert_ctx_access changes. If conversion is successful,
12890 		 * we will apply proper mask to the result.
12891 		 */
12892 		is_narrower_load = size < ctx_field_size;
12893 		size_default = bpf_ctx_off_adjust_machine(ctx_field_size);
12894 		off = insn->off;
12895 		if (is_narrower_load) {
12896 			u8 size_code;
12897 
12898 			if (type == BPF_WRITE) {
12899 				verbose(env, "bpf verifier narrow ctx access misconfigured\n");
12900 				return -EINVAL;
12901 			}
12902 
12903 			size_code = BPF_H;
12904 			if (ctx_field_size == 4)
12905 				size_code = BPF_W;
12906 			else if (ctx_field_size == 8)
12907 				size_code = BPF_DW;
12908 
12909 			insn->off = off & ~(size_default - 1);
12910 			insn->code = BPF_LDX | BPF_MEM | size_code;
12911 		}
12912 
12913 		target_size = 0;
12914 		cnt = convert_ctx_access(type, insn, insn_buf, env->prog,
12915 					 &target_size);
12916 		if (cnt == 0 || cnt >= ARRAY_SIZE(insn_buf) ||
12917 		    (ctx_field_size && !target_size)) {
12918 			verbose(env, "bpf verifier is misconfigured\n");
12919 			return -EINVAL;
12920 		}
12921 
12922 		if (is_narrower_load && size < target_size) {
12923 			u8 shift = bpf_ctx_narrow_access_offset(
12924 				off, size, size_default) * 8;
12925 			if (shift && cnt + 1 >= ARRAY_SIZE(insn_buf)) {
12926 				verbose(env, "bpf verifier narrow ctx load misconfigured\n");
12927 				return -EINVAL;
12928 			}
12929 			if (ctx_field_size <= 4) {
12930 				if (shift)
12931 					insn_buf[cnt++] = BPF_ALU32_IMM(BPF_RSH,
12932 									insn->dst_reg,
12933 									shift);
12934 				insn_buf[cnt++] = BPF_ALU32_IMM(BPF_AND, insn->dst_reg,
12935 								(1 << size * 8) - 1);
12936 			} else {
12937 				if (shift)
12938 					insn_buf[cnt++] = BPF_ALU64_IMM(BPF_RSH,
12939 									insn->dst_reg,
12940 									shift);
12941 				insn_buf[cnt++] = BPF_ALU64_IMM(BPF_AND, insn->dst_reg,
12942 								(1ULL << size * 8) - 1);
12943 			}
12944 		}
12945 
12946 		new_prog = bpf_patch_insn_data(env, i + delta, insn_buf, cnt);
12947 		if (!new_prog)
12948 			return -ENOMEM;
12949 
12950 		delta += cnt - 1;
12951 
12952 		/* keep walking new program and skip insns we just inserted */
12953 		env->prog = new_prog;
12954 		insn      = new_prog->insnsi + i + delta;
12955 	}
12956 
12957 	return 0;
12958 }
12959 
12960 static int jit_subprogs(struct bpf_verifier_env *env)
12961 {
12962 	struct bpf_prog *prog = env->prog, **func, *tmp;
12963 	int i, j, subprog_start, subprog_end = 0, len, subprog;
12964 	struct bpf_map *map_ptr;
12965 	struct bpf_insn *insn;
12966 	void *old_bpf_func;
12967 	int err, num_exentries;
12968 
12969 	if (env->subprog_cnt <= 1)
12970 		return 0;
12971 
12972 	for (i = 0, insn = prog->insnsi; i < prog->len; i++, insn++) {
12973 		if (!bpf_pseudo_func(insn) && !bpf_pseudo_call(insn))
12974 			continue;
12975 
12976 		/* Upon error here we cannot fall back to interpreter but
12977 		 * need a hard reject of the program. Thus -EFAULT is
12978 		 * propagated in any case.
12979 		 */
12980 		subprog = find_subprog(env, i + insn->imm + 1);
12981 		if (subprog < 0) {
12982 			WARN_ONCE(1, "verifier bug. No program starts at insn %d\n",
12983 				  i + insn->imm + 1);
12984 			return -EFAULT;
12985 		}
12986 		/* temporarily remember subprog id inside insn instead of
12987 		 * aux_data, since next loop will split up all insns into funcs
12988 		 */
12989 		insn->off = subprog;
12990 		/* remember original imm in case JIT fails and fallback
12991 		 * to interpreter will be needed
12992 		 */
12993 		env->insn_aux_data[i].call_imm = insn->imm;
12994 		/* point imm to __bpf_call_base+1 from JITs point of view */
12995 		insn->imm = 1;
12996 		if (bpf_pseudo_func(insn))
12997 			/* jit (e.g. x86_64) may emit fewer instructions
12998 			 * if it learns a u32 imm is the same as a u64 imm.
12999 			 * Force a non zero here.
13000 			 */
13001 			insn[1].imm = 1;
13002 	}
13003 
13004 	err = bpf_prog_alloc_jited_linfo(prog);
13005 	if (err)
13006 		goto out_undo_insn;
13007 
13008 	err = -ENOMEM;
13009 	func = kcalloc(env->subprog_cnt, sizeof(prog), GFP_KERNEL);
13010 	if (!func)
13011 		goto out_undo_insn;
13012 
13013 	for (i = 0; i < env->subprog_cnt; i++) {
13014 		subprog_start = subprog_end;
13015 		subprog_end = env->subprog_info[i + 1].start;
13016 
13017 		len = subprog_end - subprog_start;
13018 		/* bpf_prog_run() doesn't call subprogs directly,
13019 		 * hence main prog stats include the runtime of subprogs.
13020 		 * subprogs don't have IDs and not reachable via prog_get_next_id
13021 		 * func[i]->stats will never be accessed and stays NULL
13022 		 */
13023 		func[i] = bpf_prog_alloc_no_stats(bpf_prog_size(len), GFP_USER);
13024 		if (!func[i])
13025 			goto out_free;
13026 		memcpy(func[i]->insnsi, &prog->insnsi[subprog_start],
13027 		       len * sizeof(struct bpf_insn));
13028 		func[i]->type = prog->type;
13029 		func[i]->len = len;
13030 		if (bpf_prog_calc_tag(func[i]))
13031 			goto out_free;
13032 		func[i]->is_func = 1;
13033 		func[i]->aux->func_idx = i;
13034 		/* Below members will be freed only at prog->aux */
13035 		func[i]->aux->btf = prog->aux->btf;
13036 		func[i]->aux->func_info = prog->aux->func_info;
13037 		func[i]->aux->poke_tab = prog->aux->poke_tab;
13038 		func[i]->aux->size_poke_tab = prog->aux->size_poke_tab;
13039 
13040 		for (j = 0; j < prog->aux->size_poke_tab; j++) {
13041 			struct bpf_jit_poke_descriptor *poke;
13042 
13043 			poke = &prog->aux->poke_tab[j];
13044 			if (poke->insn_idx < subprog_end &&
13045 			    poke->insn_idx >= subprog_start)
13046 				poke->aux = func[i]->aux;
13047 		}
13048 
13049 		/* Use bpf_prog_F_tag to indicate functions in stack traces.
13050 		 * Long term would need debug info to populate names
13051 		 */
13052 		func[i]->aux->name[0] = 'F';
13053 		func[i]->aux->stack_depth = env->subprog_info[i].stack_depth;
13054 		func[i]->jit_requested = 1;
13055 		func[i]->blinding_requested = prog->blinding_requested;
13056 		func[i]->aux->kfunc_tab = prog->aux->kfunc_tab;
13057 		func[i]->aux->kfunc_btf_tab = prog->aux->kfunc_btf_tab;
13058 		func[i]->aux->linfo = prog->aux->linfo;
13059 		func[i]->aux->nr_linfo = prog->aux->nr_linfo;
13060 		func[i]->aux->jited_linfo = prog->aux->jited_linfo;
13061 		func[i]->aux->linfo_idx = env->subprog_info[i].linfo_idx;
13062 		num_exentries = 0;
13063 		insn = func[i]->insnsi;
13064 		for (j = 0; j < func[i]->len; j++, insn++) {
13065 			if (BPF_CLASS(insn->code) == BPF_LDX &&
13066 			    BPF_MODE(insn->code) == BPF_PROBE_MEM)
13067 				num_exentries++;
13068 		}
13069 		func[i]->aux->num_exentries = num_exentries;
13070 		func[i]->aux->tail_call_reachable = env->subprog_info[i].tail_call_reachable;
13071 		func[i] = bpf_int_jit_compile(func[i]);
13072 		if (!func[i]->jited) {
13073 			err = -ENOTSUPP;
13074 			goto out_free;
13075 		}
13076 		cond_resched();
13077 	}
13078 
13079 	/* at this point all bpf functions were successfully JITed
13080 	 * now populate all bpf_calls with correct addresses and
13081 	 * run last pass of JIT
13082 	 */
13083 	for (i = 0; i < env->subprog_cnt; i++) {
13084 		insn = func[i]->insnsi;
13085 		for (j = 0; j < func[i]->len; j++, insn++) {
13086 			if (bpf_pseudo_func(insn)) {
13087 				subprog = insn->off;
13088 				insn[0].imm = (u32)(long)func[subprog]->bpf_func;
13089 				insn[1].imm = ((u64)(long)func[subprog]->bpf_func) >> 32;
13090 				continue;
13091 			}
13092 			if (!bpf_pseudo_call(insn))
13093 				continue;
13094 			subprog = insn->off;
13095 			insn->imm = BPF_CALL_IMM(func[subprog]->bpf_func);
13096 		}
13097 
13098 		/* we use the aux data to keep a list of the start addresses
13099 		 * of the JITed images for each function in the program
13100 		 *
13101 		 * for some architectures, such as powerpc64, the imm field
13102 		 * might not be large enough to hold the offset of the start
13103 		 * address of the callee's JITed image from __bpf_call_base
13104 		 *
13105 		 * in such cases, we can lookup the start address of a callee
13106 		 * by using its subprog id, available from the off field of
13107 		 * the call instruction, as an index for this list
13108 		 */
13109 		func[i]->aux->func = func;
13110 		func[i]->aux->func_cnt = env->subprog_cnt;
13111 	}
13112 	for (i = 0; i < env->subprog_cnt; i++) {
13113 		old_bpf_func = func[i]->bpf_func;
13114 		tmp = bpf_int_jit_compile(func[i]);
13115 		if (tmp != func[i] || func[i]->bpf_func != old_bpf_func) {
13116 			verbose(env, "JIT doesn't support bpf-to-bpf calls\n");
13117 			err = -ENOTSUPP;
13118 			goto out_free;
13119 		}
13120 		cond_resched();
13121 	}
13122 
13123 	/* finally lock prog and jit images for all functions and
13124 	 * populate kallsysm
13125 	 */
13126 	for (i = 0; i < env->subprog_cnt; i++) {
13127 		bpf_prog_lock_ro(func[i]);
13128 		bpf_prog_kallsyms_add(func[i]);
13129 	}
13130 
13131 	/* Last step: make now unused interpreter insns from main
13132 	 * prog consistent for later dump requests, so they can
13133 	 * later look the same as if they were interpreted only.
13134 	 */
13135 	for (i = 0, insn = prog->insnsi; i < prog->len; i++, insn++) {
13136 		if (bpf_pseudo_func(insn)) {
13137 			insn[0].imm = env->insn_aux_data[i].call_imm;
13138 			insn[1].imm = insn->off;
13139 			insn->off = 0;
13140 			continue;
13141 		}
13142 		if (!bpf_pseudo_call(insn))
13143 			continue;
13144 		insn->off = env->insn_aux_data[i].call_imm;
13145 		subprog = find_subprog(env, i + insn->off + 1);
13146 		insn->imm = subprog;
13147 	}
13148 
13149 	prog->jited = 1;
13150 	prog->bpf_func = func[0]->bpf_func;
13151 	prog->jited_len = func[0]->jited_len;
13152 	prog->aux->func = func;
13153 	prog->aux->func_cnt = env->subprog_cnt;
13154 	bpf_prog_jit_attempt_done(prog);
13155 	return 0;
13156 out_free:
13157 	/* We failed JIT'ing, so at this point we need to unregister poke
13158 	 * descriptors from subprogs, so that kernel is not attempting to
13159 	 * patch it anymore as we're freeing the subprog JIT memory.
13160 	 */
13161 	for (i = 0; i < prog->aux->size_poke_tab; i++) {
13162 		map_ptr = prog->aux->poke_tab[i].tail_call.map;
13163 		map_ptr->ops->map_poke_untrack(map_ptr, prog->aux);
13164 	}
13165 	/* At this point we're guaranteed that poke descriptors are not
13166 	 * live anymore. We can just unlink its descriptor table as it's
13167 	 * released with the main prog.
13168 	 */
13169 	for (i = 0; i < env->subprog_cnt; i++) {
13170 		if (!func[i])
13171 			continue;
13172 		func[i]->aux->poke_tab = NULL;
13173 		bpf_jit_free(func[i]);
13174 	}
13175 	kfree(func);
13176 out_undo_insn:
13177 	/* cleanup main prog to be interpreted */
13178 	prog->jit_requested = 0;
13179 	prog->blinding_requested = 0;
13180 	for (i = 0, insn = prog->insnsi; i < prog->len; i++, insn++) {
13181 		if (!bpf_pseudo_call(insn))
13182 			continue;
13183 		insn->off = 0;
13184 		insn->imm = env->insn_aux_data[i].call_imm;
13185 	}
13186 	bpf_prog_jit_attempt_done(prog);
13187 	return err;
13188 }
13189 
13190 static int fixup_call_args(struct bpf_verifier_env *env)
13191 {
13192 #ifndef CONFIG_BPF_JIT_ALWAYS_ON
13193 	struct bpf_prog *prog = env->prog;
13194 	struct bpf_insn *insn = prog->insnsi;
13195 	bool has_kfunc_call = bpf_prog_has_kfunc_call(prog);
13196 	int i, depth;
13197 #endif
13198 	int err = 0;
13199 
13200 	if (env->prog->jit_requested &&
13201 	    !bpf_prog_is_dev_bound(env->prog->aux)) {
13202 		err = jit_subprogs(env);
13203 		if (err == 0)
13204 			return 0;
13205 		if (err == -EFAULT)
13206 			return err;
13207 	}
13208 #ifndef CONFIG_BPF_JIT_ALWAYS_ON
13209 	if (has_kfunc_call) {
13210 		verbose(env, "calling kernel functions are not allowed in non-JITed programs\n");
13211 		return -EINVAL;
13212 	}
13213 	if (env->subprog_cnt > 1 && env->prog->aux->tail_call_reachable) {
13214 		/* When JIT fails the progs with bpf2bpf calls and tail_calls
13215 		 * have to be rejected, since interpreter doesn't support them yet.
13216 		 */
13217 		verbose(env, "tail_calls are not allowed in non-JITed programs with bpf-to-bpf calls\n");
13218 		return -EINVAL;
13219 	}
13220 	for (i = 0; i < prog->len; i++, insn++) {
13221 		if (bpf_pseudo_func(insn)) {
13222 			/* When JIT fails the progs with callback calls
13223 			 * have to be rejected, since interpreter doesn't support them yet.
13224 			 */
13225 			verbose(env, "callbacks are not allowed in non-JITed programs\n");
13226 			return -EINVAL;
13227 		}
13228 
13229 		if (!bpf_pseudo_call(insn))
13230 			continue;
13231 		depth = get_callee_stack_depth(env, insn, i);
13232 		if (depth < 0)
13233 			return depth;
13234 		bpf_patch_call_args(insn, depth);
13235 	}
13236 	err = 0;
13237 #endif
13238 	return err;
13239 }
13240 
13241 static int fixup_kfunc_call(struct bpf_verifier_env *env,
13242 			    struct bpf_insn *insn)
13243 {
13244 	const struct bpf_kfunc_desc *desc;
13245 
13246 	if (!insn->imm) {
13247 		verbose(env, "invalid kernel function call not eliminated in verifier pass\n");
13248 		return -EINVAL;
13249 	}
13250 
13251 	/* insn->imm has the btf func_id. Replace it with
13252 	 * an address (relative to __bpf_base_call).
13253 	 */
13254 	desc = find_kfunc_desc(env->prog, insn->imm, insn->off);
13255 	if (!desc) {
13256 		verbose(env, "verifier internal error: kernel function descriptor not found for func_id %u\n",
13257 			insn->imm);
13258 		return -EFAULT;
13259 	}
13260 
13261 	insn->imm = desc->imm;
13262 
13263 	return 0;
13264 }
13265 
13266 /* Do various post-verification rewrites in a single program pass.
13267  * These rewrites simplify JIT and interpreter implementations.
13268  */
13269 static int do_misc_fixups(struct bpf_verifier_env *env)
13270 {
13271 	struct bpf_prog *prog = env->prog;
13272 	enum bpf_attach_type eatype = prog->expected_attach_type;
13273 	enum bpf_prog_type prog_type = resolve_prog_type(prog);
13274 	struct bpf_insn *insn = prog->insnsi;
13275 	const struct bpf_func_proto *fn;
13276 	const int insn_cnt = prog->len;
13277 	const struct bpf_map_ops *ops;
13278 	struct bpf_insn_aux_data *aux;
13279 	struct bpf_insn insn_buf[16];
13280 	struct bpf_prog *new_prog;
13281 	struct bpf_map *map_ptr;
13282 	int i, ret, cnt, delta = 0;
13283 
13284 	for (i = 0; i < insn_cnt; i++, insn++) {
13285 		/* Make divide-by-zero exceptions impossible. */
13286 		if (insn->code == (BPF_ALU64 | BPF_MOD | BPF_X) ||
13287 		    insn->code == (BPF_ALU64 | BPF_DIV | BPF_X) ||
13288 		    insn->code == (BPF_ALU | BPF_MOD | BPF_X) ||
13289 		    insn->code == (BPF_ALU | BPF_DIV | BPF_X)) {
13290 			bool is64 = BPF_CLASS(insn->code) == BPF_ALU64;
13291 			bool isdiv = BPF_OP(insn->code) == BPF_DIV;
13292 			struct bpf_insn *patchlet;
13293 			struct bpf_insn chk_and_div[] = {
13294 				/* [R,W]x div 0 -> 0 */
13295 				BPF_RAW_INSN((is64 ? BPF_JMP : BPF_JMP32) |
13296 					     BPF_JNE | BPF_K, insn->src_reg,
13297 					     0, 2, 0),
13298 				BPF_ALU32_REG(BPF_XOR, insn->dst_reg, insn->dst_reg),
13299 				BPF_JMP_IMM(BPF_JA, 0, 0, 1),
13300 				*insn,
13301 			};
13302 			struct bpf_insn chk_and_mod[] = {
13303 				/* [R,W]x mod 0 -> [R,W]x */
13304 				BPF_RAW_INSN((is64 ? BPF_JMP : BPF_JMP32) |
13305 					     BPF_JEQ | BPF_K, insn->src_reg,
13306 					     0, 1 + (is64 ? 0 : 1), 0),
13307 				*insn,
13308 				BPF_JMP_IMM(BPF_JA, 0, 0, 1),
13309 				BPF_MOV32_REG(insn->dst_reg, insn->dst_reg),
13310 			};
13311 
13312 			patchlet = isdiv ? chk_and_div : chk_and_mod;
13313 			cnt = isdiv ? ARRAY_SIZE(chk_and_div) :
13314 				      ARRAY_SIZE(chk_and_mod) - (is64 ? 2 : 0);
13315 
13316 			new_prog = bpf_patch_insn_data(env, i + delta, patchlet, cnt);
13317 			if (!new_prog)
13318 				return -ENOMEM;
13319 
13320 			delta    += cnt - 1;
13321 			env->prog = prog = new_prog;
13322 			insn      = new_prog->insnsi + i + delta;
13323 			continue;
13324 		}
13325 
13326 		/* Implement LD_ABS and LD_IND with a rewrite, if supported by the program type. */
13327 		if (BPF_CLASS(insn->code) == BPF_LD &&
13328 		    (BPF_MODE(insn->code) == BPF_ABS ||
13329 		     BPF_MODE(insn->code) == BPF_IND)) {
13330 			cnt = env->ops->gen_ld_abs(insn, insn_buf);
13331 			if (cnt == 0 || cnt >= ARRAY_SIZE(insn_buf)) {
13332 				verbose(env, "bpf verifier is misconfigured\n");
13333 				return -EINVAL;
13334 			}
13335 
13336 			new_prog = bpf_patch_insn_data(env, i + delta, insn_buf, cnt);
13337 			if (!new_prog)
13338 				return -ENOMEM;
13339 
13340 			delta    += cnt - 1;
13341 			env->prog = prog = new_prog;
13342 			insn      = new_prog->insnsi + i + delta;
13343 			continue;
13344 		}
13345 
13346 		/* Rewrite pointer arithmetic to mitigate speculation attacks. */
13347 		if (insn->code == (BPF_ALU64 | BPF_ADD | BPF_X) ||
13348 		    insn->code == (BPF_ALU64 | BPF_SUB | BPF_X)) {
13349 			const u8 code_add = BPF_ALU64 | BPF_ADD | BPF_X;
13350 			const u8 code_sub = BPF_ALU64 | BPF_SUB | BPF_X;
13351 			struct bpf_insn *patch = &insn_buf[0];
13352 			bool issrc, isneg, isimm;
13353 			u32 off_reg;
13354 
13355 			aux = &env->insn_aux_data[i + delta];
13356 			if (!aux->alu_state ||
13357 			    aux->alu_state == BPF_ALU_NON_POINTER)
13358 				continue;
13359 
13360 			isneg = aux->alu_state & BPF_ALU_NEG_VALUE;
13361 			issrc = (aux->alu_state & BPF_ALU_SANITIZE) ==
13362 				BPF_ALU_SANITIZE_SRC;
13363 			isimm = aux->alu_state & BPF_ALU_IMMEDIATE;
13364 
13365 			off_reg = issrc ? insn->src_reg : insn->dst_reg;
13366 			if (isimm) {
13367 				*patch++ = BPF_MOV32_IMM(BPF_REG_AX, aux->alu_limit);
13368 			} else {
13369 				if (isneg)
13370 					*patch++ = BPF_ALU64_IMM(BPF_MUL, off_reg, -1);
13371 				*patch++ = BPF_MOV32_IMM(BPF_REG_AX, aux->alu_limit);
13372 				*patch++ = BPF_ALU64_REG(BPF_SUB, BPF_REG_AX, off_reg);
13373 				*patch++ = BPF_ALU64_REG(BPF_OR, BPF_REG_AX, off_reg);
13374 				*patch++ = BPF_ALU64_IMM(BPF_NEG, BPF_REG_AX, 0);
13375 				*patch++ = BPF_ALU64_IMM(BPF_ARSH, BPF_REG_AX, 63);
13376 				*patch++ = BPF_ALU64_REG(BPF_AND, BPF_REG_AX, off_reg);
13377 			}
13378 			if (!issrc)
13379 				*patch++ = BPF_MOV64_REG(insn->dst_reg, insn->src_reg);
13380 			insn->src_reg = BPF_REG_AX;
13381 			if (isneg)
13382 				insn->code = insn->code == code_add ?
13383 					     code_sub : code_add;
13384 			*patch++ = *insn;
13385 			if (issrc && isneg && !isimm)
13386 				*patch++ = BPF_ALU64_IMM(BPF_MUL, off_reg, -1);
13387 			cnt = patch - insn_buf;
13388 
13389 			new_prog = bpf_patch_insn_data(env, i + delta, insn_buf, cnt);
13390 			if (!new_prog)
13391 				return -ENOMEM;
13392 
13393 			delta    += cnt - 1;
13394 			env->prog = prog = new_prog;
13395 			insn      = new_prog->insnsi + i + delta;
13396 			continue;
13397 		}
13398 
13399 		if (insn->code != (BPF_JMP | BPF_CALL))
13400 			continue;
13401 		if (insn->src_reg == BPF_PSEUDO_CALL)
13402 			continue;
13403 		if (insn->src_reg == BPF_PSEUDO_KFUNC_CALL) {
13404 			ret = fixup_kfunc_call(env, insn);
13405 			if (ret)
13406 				return ret;
13407 			continue;
13408 		}
13409 
13410 		if (insn->imm == BPF_FUNC_get_route_realm)
13411 			prog->dst_needed = 1;
13412 		if (insn->imm == BPF_FUNC_get_prandom_u32)
13413 			bpf_user_rnd_init_once();
13414 		if (insn->imm == BPF_FUNC_override_return)
13415 			prog->kprobe_override = 1;
13416 		if (insn->imm == BPF_FUNC_tail_call) {
13417 			/* If we tail call into other programs, we
13418 			 * cannot make any assumptions since they can
13419 			 * be replaced dynamically during runtime in
13420 			 * the program array.
13421 			 */
13422 			prog->cb_access = 1;
13423 			if (!allow_tail_call_in_subprogs(env))
13424 				prog->aux->stack_depth = MAX_BPF_STACK;
13425 			prog->aux->max_pkt_offset = MAX_PACKET_OFF;
13426 
13427 			/* mark bpf_tail_call as different opcode to avoid
13428 			 * conditional branch in the interpreter for every normal
13429 			 * call and to prevent accidental JITing by JIT compiler
13430 			 * that doesn't support bpf_tail_call yet
13431 			 */
13432 			insn->imm = 0;
13433 			insn->code = BPF_JMP | BPF_TAIL_CALL;
13434 
13435 			aux = &env->insn_aux_data[i + delta];
13436 			if (env->bpf_capable && !prog->blinding_requested &&
13437 			    prog->jit_requested &&
13438 			    !bpf_map_key_poisoned(aux) &&
13439 			    !bpf_map_ptr_poisoned(aux) &&
13440 			    !bpf_map_ptr_unpriv(aux)) {
13441 				struct bpf_jit_poke_descriptor desc = {
13442 					.reason = BPF_POKE_REASON_TAIL_CALL,
13443 					.tail_call.map = BPF_MAP_PTR(aux->map_ptr_state),
13444 					.tail_call.key = bpf_map_key_immediate(aux),
13445 					.insn_idx = i + delta,
13446 				};
13447 
13448 				ret = bpf_jit_add_poke_descriptor(prog, &desc);
13449 				if (ret < 0) {
13450 					verbose(env, "adding tail call poke descriptor failed\n");
13451 					return ret;
13452 				}
13453 
13454 				insn->imm = ret + 1;
13455 				continue;
13456 			}
13457 
13458 			if (!bpf_map_ptr_unpriv(aux))
13459 				continue;
13460 
13461 			/* instead of changing every JIT dealing with tail_call
13462 			 * emit two extra insns:
13463 			 * if (index >= max_entries) goto out;
13464 			 * index &= array->index_mask;
13465 			 * to avoid out-of-bounds cpu speculation
13466 			 */
13467 			if (bpf_map_ptr_poisoned(aux)) {
13468 				verbose(env, "tail_call abusing map_ptr\n");
13469 				return -EINVAL;
13470 			}
13471 
13472 			map_ptr = BPF_MAP_PTR(aux->map_ptr_state);
13473 			insn_buf[0] = BPF_JMP_IMM(BPF_JGE, BPF_REG_3,
13474 						  map_ptr->max_entries, 2);
13475 			insn_buf[1] = BPF_ALU32_IMM(BPF_AND, BPF_REG_3,
13476 						    container_of(map_ptr,
13477 								 struct bpf_array,
13478 								 map)->index_mask);
13479 			insn_buf[2] = *insn;
13480 			cnt = 3;
13481 			new_prog = bpf_patch_insn_data(env, i + delta, insn_buf, cnt);
13482 			if (!new_prog)
13483 				return -ENOMEM;
13484 
13485 			delta    += cnt - 1;
13486 			env->prog = prog = new_prog;
13487 			insn      = new_prog->insnsi + i + delta;
13488 			continue;
13489 		}
13490 
13491 		if (insn->imm == BPF_FUNC_timer_set_callback) {
13492 			/* The verifier will process callback_fn as many times as necessary
13493 			 * with different maps and the register states prepared by
13494 			 * set_timer_callback_state will be accurate.
13495 			 *
13496 			 * The following use case is valid:
13497 			 *   map1 is shared by prog1, prog2, prog3.
13498 			 *   prog1 calls bpf_timer_init for some map1 elements
13499 			 *   prog2 calls bpf_timer_set_callback for some map1 elements.
13500 			 *     Those that were not bpf_timer_init-ed will return -EINVAL.
13501 			 *   prog3 calls bpf_timer_start for some map1 elements.
13502 			 *     Those that were not both bpf_timer_init-ed and
13503 			 *     bpf_timer_set_callback-ed will return -EINVAL.
13504 			 */
13505 			struct bpf_insn ld_addrs[2] = {
13506 				BPF_LD_IMM64(BPF_REG_3, (long)prog->aux),
13507 			};
13508 
13509 			insn_buf[0] = ld_addrs[0];
13510 			insn_buf[1] = ld_addrs[1];
13511 			insn_buf[2] = *insn;
13512 			cnt = 3;
13513 
13514 			new_prog = bpf_patch_insn_data(env, i + delta, insn_buf, cnt);
13515 			if (!new_prog)
13516 				return -ENOMEM;
13517 
13518 			delta    += cnt - 1;
13519 			env->prog = prog = new_prog;
13520 			insn      = new_prog->insnsi + i + delta;
13521 			goto patch_call_imm;
13522 		}
13523 
13524 		if (insn->imm == BPF_FUNC_task_storage_get ||
13525 		    insn->imm == BPF_FUNC_sk_storage_get ||
13526 		    insn->imm == BPF_FUNC_inode_storage_get) {
13527 			if (env->prog->aux->sleepable)
13528 				insn_buf[0] = BPF_MOV64_IMM(BPF_REG_5, (__force __s32)GFP_KERNEL);
13529 			else
13530 				insn_buf[0] = BPF_MOV64_IMM(BPF_REG_5, (__force __s32)GFP_ATOMIC);
13531 			insn_buf[1] = *insn;
13532 			cnt = 2;
13533 
13534 			new_prog = bpf_patch_insn_data(env, i + delta, insn_buf, cnt);
13535 			if (!new_prog)
13536 				return -ENOMEM;
13537 
13538 			delta += cnt - 1;
13539 			env->prog = prog = new_prog;
13540 			insn = new_prog->insnsi + i + delta;
13541 			goto patch_call_imm;
13542 		}
13543 
13544 		/* BPF_EMIT_CALL() assumptions in some of the map_gen_lookup
13545 		 * and other inlining handlers are currently limited to 64 bit
13546 		 * only.
13547 		 */
13548 		if (prog->jit_requested && BITS_PER_LONG == 64 &&
13549 		    (insn->imm == BPF_FUNC_map_lookup_elem ||
13550 		     insn->imm == BPF_FUNC_map_update_elem ||
13551 		     insn->imm == BPF_FUNC_map_delete_elem ||
13552 		     insn->imm == BPF_FUNC_map_push_elem   ||
13553 		     insn->imm == BPF_FUNC_map_pop_elem    ||
13554 		     insn->imm == BPF_FUNC_map_peek_elem   ||
13555 		     insn->imm == BPF_FUNC_redirect_map    ||
13556 		     insn->imm == BPF_FUNC_for_each_map_elem)) {
13557 			aux = &env->insn_aux_data[i + delta];
13558 			if (bpf_map_ptr_poisoned(aux))
13559 				goto patch_call_imm;
13560 
13561 			map_ptr = BPF_MAP_PTR(aux->map_ptr_state);
13562 			ops = map_ptr->ops;
13563 			if (insn->imm == BPF_FUNC_map_lookup_elem &&
13564 			    ops->map_gen_lookup) {
13565 				cnt = ops->map_gen_lookup(map_ptr, insn_buf);
13566 				if (cnt == -EOPNOTSUPP)
13567 					goto patch_map_ops_generic;
13568 				if (cnt <= 0 || cnt >= ARRAY_SIZE(insn_buf)) {
13569 					verbose(env, "bpf verifier is misconfigured\n");
13570 					return -EINVAL;
13571 				}
13572 
13573 				new_prog = bpf_patch_insn_data(env, i + delta,
13574 							       insn_buf, cnt);
13575 				if (!new_prog)
13576 					return -ENOMEM;
13577 
13578 				delta    += cnt - 1;
13579 				env->prog = prog = new_prog;
13580 				insn      = new_prog->insnsi + i + delta;
13581 				continue;
13582 			}
13583 
13584 			BUILD_BUG_ON(!__same_type(ops->map_lookup_elem,
13585 				     (void *(*)(struct bpf_map *map, void *key))NULL));
13586 			BUILD_BUG_ON(!__same_type(ops->map_delete_elem,
13587 				     (int (*)(struct bpf_map *map, void *key))NULL));
13588 			BUILD_BUG_ON(!__same_type(ops->map_update_elem,
13589 				     (int (*)(struct bpf_map *map, void *key, void *value,
13590 					      u64 flags))NULL));
13591 			BUILD_BUG_ON(!__same_type(ops->map_push_elem,
13592 				     (int (*)(struct bpf_map *map, void *value,
13593 					      u64 flags))NULL));
13594 			BUILD_BUG_ON(!__same_type(ops->map_pop_elem,
13595 				     (int (*)(struct bpf_map *map, void *value))NULL));
13596 			BUILD_BUG_ON(!__same_type(ops->map_peek_elem,
13597 				     (int (*)(struct bpf_map *map, void *value))NULL));
13598 			BUILD_BUG_ON(!__same_type(ops->map_redirect,
13599 				     (int (*)(struct bpf_map *map, u32 ifindex, u64 flags))NULL));
13600 			BUILD_BUG_ON(!__same_type(ops->map_for_each_callback,
13601 				     (int (*)(struct bpf_map *map,
13602 					      bpf_callback_t callback_fn,
13603 					      void *callback_ctx,
13604 					      u64 flags))NULL));
13605 
13606 patch_map_ops_generic:
13607 			switch (insn->imm) {
13608 			case BPF_FUNC_map_lookup_elem:
13609 				insn->imm = BPF_CALL_IMM(ops->map_lookup_elem);
13610 				continue;
13611 			case BPF_FUNC_map_update_elem:
13612 				insn->imm = BPF_CALL_IMM(ops->map_update_elem);
13613 				continue;
13614 			case BPF_FUNC_map_delete_elem:
13615 				insn->imm = BPF_CALL_IMM(ops->map_delete_elem);
13616 				continue;
13617 			case BPF_FUNC_map_push_elem:
13618 				insn->imm = BPF_CALL_IMM(ops->map_push_elem);
13619 				continue;
13620 			case BPF_FUNC_map_pop_elem:
13621 				insn->imm = BPF_CALL_IMM(ops->map_pop_elem);
13622 				continue;
13623 			case BPF_FUNC_map_peek_elem:
13624 				insn->imm = BPF_CALL_IMM(ops->map_peek_elem);
13625 				continue;
13626 			case BPF_FUNC_redirect_map:
13627 				insn->imm = BPF_CALL_IMM(ops->map_redirect);
13628 				continue;
13629 			case BPF_FUNC_for_each_map_elem:
13630 				insn->imm = BPF_CALL_IMM(ops->map_for_each_callback);
13631 				continue;
13632 			}
13633 
13634 			goto patch_call_imm;
13635 		}
13636 
13637 		/* Implement bpf_jiffies64 inline. */
13638 		if (prog->jit_requested && BITS_PER_LONG == 64 &&
13639 		    insn->imm == BPF_FUNC_jiffies64) {
13640 			struct bpf_insn ld_jiffies_addr[2] = {
13641 				BPF_LD_IMM64(BPF_REG_0,
13642 					     (unsigned long)&jiffies),
13643 			};
13644 
13645 			insn_buf[0] = ld_jiffies_addr[0];
13646 			insn_buf[1] = ld_jiffies_addr[1];
13647 			insn_buf[2] = BPF_LDX_MEM(BPF_DW, BPF_REG_0,
13648 						  BPF_REG_0, 0);
13649 			cnt = 3;
13650 
13651 			new_prog = bpf_patch_insn_data(env, i + delta, insn_buf,
13652 						       cnt);
13653 			if (!new_prog)
13654 				return -ENOMEM;
13655 
13656 			delta    += cnt - 1;
13657 			env->prog = prog = new_prog;
13658 			insn      = new_prog->insnsi + i + delta;
13659 			continue;
13660 		}
13661 
13662 		/* Implement bpf_get_func_arg inline. */
13663 		if (prog_type == BPF_PROG_TYPE_TRACING &&
13664 		    insn->imm == BPF_FUNC_get_func_arg) {
13665 			/* Load nr_args from ctx - 8 */
13666 			insn_buf[0] = BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, -8);
13667 			insn_buf[1] = BPF_JMP32_REG(BPF_JGE, BPF_REG_2, BPF_REG_0, 6);
13668 			insn_buf[2] = BPF_ALU64_IMM(BPF_LSH, BPF_REG_2, 3);
13669 			insn_buf[3] = BPF_ALU64_REG(BPF_ADD, BPF_REG_2, BPF_REG_1);
13670 			insn_buf[4] = BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_2, 0);
13671 			insn_buf[5] = BPF_STX_MEM(BPF_DW, BPF_REG_3, BPF_REG_0, 0);
13672 			insn_buf[6] = BPF_MOV64_IMM(BPF_REG_0, 0);
13673 			insn_buf[7] = BPF_JMP_A(1);
13674 			insn_buf[8] = BPF_MOV64_IMM(BPF_REG_0, -EINVAL);
13675 			cnt = 9;
13676 
13677 			new_prog = bpf_patch_insn_data(env, i + delta, insn_buf, cnt);
13678 			if (!new_prog)
13679 				return -ENOMEM;
13680 
13681 			delta    += cnt - 1;
13682 			env->prog = prog = new_prog;
13683 			insn      = new_prog->insnsi + i + delta;
13684 			continue;
13685 		}
13686 
13687 		/* Implement bpf_get_func_ret inline. */
13688 		if (prog_type == BPF_PROG_TYPE_TRACING &&
13689 		    insn->imm == BPF_FUNC_get_func_ret) {
13690 			if (eatype == BPF_TRACE_FEXIT ||
13691 			    eatype == BPF_MODIFY_RETURN) {
13692 				/* Load nr_args from ctx - 8 */
13693 				insn_buf[0] = BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, -8);
13694 				insn_buf[1] = BPF_ALU64_IMM(BPF_LSH, BPF_REG_0, 3);
13695 				insn_buf[2] = BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_1);
13696 				insn_buf[3] = BPF_LDX_MEM(BPF_DW, BPF_REG_3, BPF_REG_0, 0);
13697 				insn_buf[4] = BPF_STX_MEM(BPF_DW, BPF_REG_2, BPF_REG_3, 0);
13698 				insn_buf[5] = BPF_MOV64_IMM(BPF_REG_0, 0);
13699 				cnt = 6;
13700 			} else {
13701 				insn_buf[0] = BPF_MOV64_IMM(BPF_REG_0, -EOPNOTSUPP);
13702 				cnt = 1;
13703 			}
13704 
13705 			new_prog = bpf_patch_insn_data(env, i + delta, insn_buf, cnt);
13706 			if (!new_prog)
13707 				return -ENOMEM;
13708 
13709 			delta    += cnt - 1;
13710 			env->prog = prog = new_prog;
13711 			insn      = new_prog->insnsi + i + delta;
13712 			continue;
13713 		}
13714 
13715 		/* Implement get_func_arg_cnt inline. */
13716 		if (prog_type == BPF_PROG_TYPE_TRACING &&
13717 		    insn->imm == BPF_FUNC_get_func_arg_cnt) {
13718 			/* Load nr_args from ctx - 8 */
13719 			insn_buf[0] = BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, -8);
13720 
13721 			new_prog = bpf_patch_insn_data(env, i + delta, insn_buf, 1);
13722 			if (!new_prog)
13723 				return -ENOMEM;
13724 
13725 			env->prog = prog = new_prog;
13726 			insn      = new_prog->insnsi + i + delta;
13727 			continue;
13728 		}
13729 
13730 		/* Implement bpf_get_func_ip inline. */
13731 		if (prog_type == BPF_PROG_TYPE_TRACING &&
13732 		    insn->imm == BPF_FUNC_get_func_ip) {
13733 			/* Load IP address from ctx - 16 */
13734 			insn_buf[0] = BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, -16);
13735 
13736 			new_prog = bpf_patch_insn_data(env, i + delta, insn_buf, 1);
13737 			if (!new_prog)
13738 				return -ENOMEM;
13739 
13740 			env->prog = prog = new_prog;
13741 			insn      = new_prog->insnsi + i + delta;
13742 			continue;
13743 		}
13744 
13745 patch_call_imm:
13746 		fn = env->ops->get_func_proto(insn->imm, env->prog);
13747 		/* all functions that have prototype and verifier allowed
13748 		 * programs to call them, must be real in-kernel functions
13749 		 */
13750 		if (!fn->func) {
13751 			verbose(env,
13752 				"kernel subsystem misconfigured func %s#%d\n",
13753 				func_id_name(insn->imm), insn->imm);
13754 			return -EFAULT;
13755 		}
13756 		insn->imm = fn->func - __bpf_call_base;
13757 	}
13758 
13759 	/* Since poke tab is now finalized, publish aux to tracker. */
13760 	for (i = 0; i < prog->aux->size_poke_tab; i++) {
13761 		map_ptr = prog->aux->poke_tab[i].tail_call.map;
13762 		if (!map_ptr->ops->map_poke_track ||
13763 		    !map_ptr->ops->map_poke_untrack ||
13764 		    !map_ptr->ops->map_poke_run) {
13765 			verbose(env, "bpf verifier is misconfigured\n");
13766 			return -EINVAL;
13767 		}
13768 
13769 		ret = map_ptr->ops->map_poke_track(map_ptr, prog->aux);
13770 		if (ret < 0) {
13771 			verbose(env, "tracking tail call prog failed\n");
13772 			return ret;
13773 		}
13774 	}
13775 
13776 	sort_kfunc_descs_by_imm(env->prog);
13777 
13778 	return 0;
13779 }
13780 
13781 static void free_states(struct bpf_verifier_env *env)
13782 {
13783 	struct bpf_verifier_state_list *sl, *sln;
13784 	int i;
13785 
13786 	sl = env->free_list;
13787 	while (sl) {
13788 		sln = sl->next;
13789 		free_verifier_state(&sl->state, false);
13790 		kfree(sl);
13791 		sl = sln;
13792 	}
13793 	env->free_list = NULL;
13794 
13795 	if (!env->explored_states)
13796 		return;
13797 
13798 	for (i = 0; i < state_htab_size(env); i++) {
13799 		sl = env->explored_states[i];
13800 
13801 		while (sl) {
13802 			sln = sl->next;
13803 			free_verifier_state(&sl->state, false);
13804 			kfree(sl);
13805 			sl = sln;
13806 		}
13807 		env->explored_states[i] = NULL;
13808 	}
13809 }
13810 
13811 static int do_check_common(struct bpf_verifier_env *env, int subprog)
13812 {
13813 	bool pop_log = !(env->log.level & BPF_LOG_LEVEL2);
13814 	struct bpf_verifier_state *state;
13815 	struct bpf_reg_state *regs;
13816 	int ret, i;
13817 
13818 	env->prev_linfo = NULL;
13819 	env->pass_cnt++;
13820 
13821 	state = kzalloc(sizeof(struct bpf_verifier_state), GFP_KERNEL);
13822 	if (!state)
13823 		return -ENOMEM;
13824 	state->curframe = 0;
13825 	state->speculative = false;
13826 	state->branches = 1;
13827 	state->frame[0] = kzalloc(sizeof(struct bpf_func_state), GFP_KERNEL);
13828 	if (!state->frame[0]) {
13829 		kfree(state);
13830 		return -ENOMEM;
13831 	}
13832 	env->cur_state = state;
13833 	init_func_state(env, state->frame[0],
13834 			BPF_MAIN_FUNC /* callsite */,
13835 			0 /* frameno */,
13836 			subprog);
13837 
13838 	regs = state->frame[state->curframe]->regs;
13839 	if (subprog || env->prog->type == BPF_PROG_TYPE_EXT) {
13840 		ret = btf_prepare_func_args(env, subprog, regs);
13841 		if (ret)
13842 			goto out;
13843 		for (i = BPF_REG_1; i <= BPF_REG_5; i++) {
13844 			if (regs[i].type == PTR_TO_CTX)
13845 				mark_reg_known_zero(env, regs, i);
13846 			else if (regs[i].type == SCALAR_VALUE)
13847 				mark_reg_unknown(env, regs, i);
13848 			else if (base_type(regs[i].type) == PTR_TO_MEM) {
13849 				const u32 mem_size = regs[i].mem_size;
13850 
13851 				mark_reg_known_zero(env, regs, i);
13852 				regs[i].mem_size = mem_size;
13853 				regs[i].id = ++env->id_gen;
13854 			}
13855 		}
13856 	} else {
13857 		/* 1st arg to a function */
13858 		regs[BPF_REG_1].type = PTR_TO_CTX;
13859 		mark_reg_known_zero(env, regs, BPF_REG_1);
13860 		ret = btf_check_subprog_arg_match(env, subprog, regs);
13861 		if (ret == -EFAULT)
13862 			/* unlikely verifier bug. abort.
13863 			 * ret == 0 and ret < 0 are sadly acceptable for
13864 			 * main() function due to backward compatibility.
13865 			 * Like socket filter program may be written as:
13866 			 * int bpf_prog(struct pt_regs *ctx)
13867 			 * and never dereference that ctx in the program.
13868 			 * 'struct pt_regs' is a type mismatch for socket
13869 			 * filter that should be using 'struct __sk_buff'.
13870 			 */
13871 			goto out;
13872 	}
13873 
13874 	ret = do_check(env);
13875 out:
13876 	/* check for NULL is necessary, since cur_state can be freed inside
13877 	 * do_check() under memory pressure.
13878 	 */
13879 	if (env->cur_state) {
13880 		free_verifier_state(env->cur_state, true);
13881 		env->cur_state = NULL;
13882 	}
13883 	while (!pop_stack(env, NULL, NULL, false));
13884 	if (!ret && pop_log)
13885 		bpf_vlog_reset(&env->log, 0);
13886 	free_states(env);
13887 	return ret;
13888 }
13889 
13890 /* Verify all global functions in a BPF program one by one based on their BTF.
13891  * All global functions must pass verification. Otherwise the whole program is rejected.
13892  * Consider:
13893  * int bar(int);
13894  * int foo(int f)
13895  * {
13896  *    return bar(f);
13897  * }
13898  * int bar(int b)
13899  * {
13900  *    ...
13901  * }
13902  * foo() will be verified first for R1=any_scalar_value. During verification it
13903  * will be assumed that bar() already verified successfully and call to bar()
13904  * from foo() will be checked for type match only. Later bar() will be verified
13905  * independently to check that it's safe for R1=any_scalar_value.
13906  */
13907 static int do_check_subprogs(struct bpf_verifier_env *env)
13908 {
13909 	struct bpf_prog_aux *aux = env->prog->aux;
13910 	int i, ret;
13911 
13912 	if (!aux->func_info)
13913 		return 0;
13914 
13915 	for (i = 1; i < env->subprog_cnt; i++) {
13916 		if (aux->func_info_aux[i].linkage != BTF_FUNC_GLOBAL)
13917 			continue;
13918 		env->insn_idx = env->subprog_info[i].start;
13919 		WARN_ON_ONCE(env->insn_idx == 0);
13920 		ret = do_check_common(env, i);
13921 		if (ret) {
13922 			return ret;
13923 		} else if (env->log.level & BPF_LOG_LEVEL) {
13924 			verbose(env,
13925 				"Func#%d is safe for any args that match its prototype\n",
13926 				i);
13927 		}
13928 	}
13929 	return 0;
13930 }
13931 
13932 static int do_check_main(struct bpf_verifier_env *env)
13933 {
13934 	int ret;
13935 
13936 	env->insn_idx = 0;
13937 	ret = do_check_common(env, 0);
13938 	if (!ret)
13939 		env->prog->aux->stack_depth = env->subprog_info[0].stack_depth;
13940 	return ret;
13941 }
13942 
13943 
13944 static void print_verification_stats(struct bpf_verifier_env *env)
13945 {
13946 	int i;
13947 
13948 	if (env->log.level & BPF_LOG_STATS) {
13949 		verbose(env, "verification time %lld usec\n",
13950 			div_u64(env->verification_time, 1000));
13951 		verbose(env, "stack depth ");
13952 		for (i = 0; i < env->subprog_cnt; i++) {
13953 			u32 depth = env->subprog_info[i].stack_depth;
13954 
13955 			verbose(env, "%d", depth);
13956 			if (i + 1 < env->subprog_cnt)
13957 				verbose(env, "+");
13958 		}
13959 		verbose(env, "\n");
13960 	}
13961 	verbose(env, "processed %d insns (limit %d) max_states_per_insn %d "
13962 		"total_states %d peak_states %d mark_read %d\n",
13963 		env->insn_processed, BPF_COMPLEXITY_LIMIT_INSNS,
13964 		env->max_states_per_insn, env->total_states,
13965 		env->peak_states, env->longest_mark_read_walk);
13966 }
13967 
13968 static int check_struct_ops_btf_id(struct bpf_verifier_env *env)
13969 {
13970 	const struct btf_type *t, *func_proto;
13971 	const struct bpf_struct_ops *st_ops;
13972 	const struct btf_member *member;
13973 	struct bpf_prog *prog = env->prog;
13974 	u32 btf_id, member_idx;
13975 	const char *mname;
13976 
13977 	if (!prog->gpl_compatible) {
13978 		verbose(env, "struct ops programs must have a GPL compatible license\n");
13979 		return -EINVAL;
13980 	}
13981 
13982 	btf_id = prog->aux->attach_btf_id;
13983 	st_ops = bpf_struct_ops_find(btf_id);
13984 	if (!st_ops) {
13985 		verbose(env, "attach_btf_id %u is not a supported struct\n",
13986 			btf_id);
13987 		return -ENOTSUPP;
13988 	}
13989 
13990 	t = st_ops->type;
13991 	member_idx = prog->expected_attach_type;
13992 	if (member_idx >= btf_type_vlen(t)) {
13993 		verbose(env, "attach to invalid member idx %u of struct %s\n",
13994 			member_idx, st_ops->name);
13995 		return -EINVAL;
13996 	}
13997 
13998 	member = &btf_type_member(t)[member_idx];
13999 	mname = btf_name_by_offset(btf_vmlinux, member->name_off);
14000 	func_proto = btf_type_resolve_func_ptr(btf_vmlinux, member->type,
14001 					       NULL);
14002 	if (!func_proto) {
14003 		verbose(env, "attach to invalid member %s(@idx %u) of struct %s\n",
14004 			mname, member_idx, st_ops->name);
14005 		return -EINVAL;
14006 	}
14007 
14008 	if (st_ops->check_member) {
14009 		int err = st_ops->check_member(t, member);
14010 
14011 		if (err) {
14012 			verbose(env, "attach to unsupported member %s of struct %s\n",
14013 				mname, st_ops->name);
14014 			return err;
14015 		}
14016 	}
14017 
14018 	prog->aux->attach_func_proto = func_proto;
14019 	prog->aux->attach_func_name = mname;
14020 	env->ops = st_ops->verifier_ops;
14021 
14022 	return 0;
14023 }
14024 #define SECURITY_PREFIX "security_"
14025 
14026 static int check_attach_modify_return(unsigned long addr, const char *func_name)
14027 {
14028 	if (within_error_injection_list(addr) ||
14029 	    !strncmp(SECURITY_PREFIX, func_name, sizeof(SECURITY_PREFIX) - 1))
14030 		return 0;
14031 
14032 	return -EINVAL;
14033 }
14034 
14035 /* list of non-sleepable functions that are otherwise on
14036  * ALLOW_ERROR_INJECTION list
14037  */
14038 BTF_SET_START(btf_non_sleepable_error_inject)
14039 /* Three functions below can be called from sleepable and non-sleepable context.
14040  * Assume non-sleepable from bpf safety point of view.
14041  */
14042 BTF_ID(func, __filemap_add_folio)
14043 BTF_ID(func, should_fail_alloc_page)
14044 BTF_ID(func, should_failslab)
14045 BTF_SET_END(btf_non_sleepable_error_inject)
14046 
14047 static int check_non_sleepable_error_inject(u32 btf_id)
14048 {
14049 	return btf_id_set_contains(&btf_non_sleepable_error_inject, btf_id);
14050 }
14051 
14052 int bpf_check_attach_target(struct bpf_verifier_log *log,
14053 			    const struct bpf_prog *prog,
14054 			    const struct bpf_prog *tgt_prog,
14055 			    u32 btf_id,
14056 			    struct bpf_attach_target_info *tgt_info)
14057 {
14058 	bool prog_extension = prog->type == BPF_PROG_TYPE_EXT;
14059 	const char prefix[] = "btf_trace_";
14060 	int ret = 0, subprog = -1, i;
14061 	const struct btf_type *t;
14062 	bool conservative = true;
14063 	const char *tname;
14064 	struct btf *btf;
14065 	long addr = 0;
14066 
14067 	if (!btf_id) {
14068 		bpf_log(log, "Tracing programs must provide btf_id\n");
14069 		return -EINVAL;
14070 	}
14071 	btf = tgt_prog ? tgt_prog->aux->btf : prog->aux->attach_btf;
14072 	if (!btf) {
14073 		bpf_log(log,
14074 			"FENTRY/FEXIT program can only be attached to another program annotated with BTF\n");
14075 		return -EINVAL;
14076 	}
14077 	t = btf_type_by_id(btf, btf_id);
14078 	if (!t) {
14079 		bpf_log(log, "attach_btf_id %u is invalid\n", btf_id);
14080 		return -EINVAL;
14081 	}
14082 	tname = btf_name_by_offset(btf, t->name_off);
14083 	if (!tname) {
14084 		bpf_log(log, "attach_btf_id %u doesn't have a name\n", btf_id);
14085 		return -EINVAL;
14086 	}
14087 	if (tgt_prog) {
14088 		struct bpf_prog_aux *aux = tgt_prog->aux;
14089 
14090 		for (i = 0; i < aux->func_info_cnt; i++)
14091 			if (aux->func_info[i].type_id == btf_id) {
14092 				subprog = i;
14093 				break;
14094 			}
14095 		if (subprog == -1) {
14096 			bpf_log(log, "Subprog %s doesn't exist\n", tname);
14097 			return -EINVAL;
14098 		}
14099 		conservative = aux->func_info_aux[subprog].unreliable;
14100 		if (prog_extension) {
14101 			if (conservative) {
14102 				bpf_log(log,
14103 					"Cannot replace static functions\n");
14104 				return -EINVAL;
14105 			}
14106 			if (!prog->jit_requested) {
14107 				bpf_log(log,
14108 					"Extension programs should be JITed\n");
14109 				return -EINVAL;
14110 			}
14111 		}
14112 		if (!tgt_prog->jited) {
14113 			bpf_log(log, "Can attach to only JITed progs\n");
14114 			return -EINVAL;
14115 		}
14116 		if (tgt_prog->type == prog->type) {
14117 			/* Cannot fentry/fexit another fentry/fexit program.
14118 			 * Cannot attach program extension to another extension.
14119 			 * It's ok to attach fentry/fexit to extension program.
14120 			 */
14121 			bpf_log(log, "Cannot recursively attach\n");
14122 			return -EINVAL;
14123 		}
14124 		if (tgt_prog->type == BPF_PROG_TYPE_TRACING &&
14125 		    prog_extension &&
14126 		    (tgt_prog->expected_attach_type == BPF_TRACE_FENTRY ||
14127 		     tgt_prog->expected_attach_type == BPF_TRACE_FEXIT)) {
14128 			/* Program extensions can extend all program types
14129 			 * except fentry/fexit. The reason is the following.
14130 			 * The fentry/fexit programs are used for performance
14131 			 * analysis, stats and can be attached to any program
14132 			 * type except themselves. When extension program is
14133 			 * replacing XDP function it is necessary to allow
14134 			 * performance analysis of all functions. Both original
14135 			 * XDP program and its program extension. Hence
14136 			 * attaching fentry/fexit to BPF_PROG_TYPE_EXT is
14137 			 * allowed. If extending of fentry/fexit was allowed it
14138 			 * would be possible to create long call chain
14139 			 * fentry->extension->fentry->extension beyond
14140 			 * reasonable stack size. Hence extending fentry is not
14141 			 * allowed.
14142 			 */
14143 			bpf_log(log, "Cannot extend fentry/fexit\n");
14144 			return -EINVAL;
14145 		}
14146 	} else {
14147 		if (prog_extension) {
14148 			bpf_log(log, "Cannot replace kernel functions\n");
14149 			return -EINVAL;
14150 		}
14151 	}
14152 
14153 	switch (prog->expected_attach_type) {
14154 	case BPF_TRACE_RAW_TP:
14155 		if (tgt_prog) {
14156 			bpf_log(log,
14157 				"Only FENTRY/FEXIT progs are attachable to another BPF prog\n");
14158 			return -EINVAL;
14159 		}
14160 		if (!btf_type_is_typedef(t)) {
14161 			bpf_log(log, "attach_btf_id %u is not a typedef\n",
14162 				btf_id);
14163 			return -EINVAL;
14164 		}
14165 		if (strncmp(prefix, tname, sizeof(prefix) - 1)) {
14166 			bpf_log(log, "attach_btf_id %u points to wrong type name %s\n",
14167 				btf_id, tname);
14168 			return -EINVAL;
14169 		}
14170 		tname += sizeof(prefix) - 1;
14171 		t = btf_type_by_id(btf, t->type);
14172 		if (!btf_type_is_ptr(t))
14173 			/* should never happen in valid vmlinux build */
14174 			return -EINVAL;
14175 		t = btf_type_by_id(btf, t->type);
14176 		if (!btf_type_is_func_proto(t))
14177 			/* should never happen in valid vmlinux build */
14178 			return -EINVAL;
14179 
14180 		break;
14181 	case BPF_TRACE_ITER:
14182 		if (!btf_type_is_func(t)) {
14183 			bpf_log(log, "attach_btf_id %u is not a function\n",
14184 				btf_id);
14185 			return -EINVAL;
14186 		}
14187 		t = btf_type_by_id(btf, t->type);
14188 		if (!btf_type_is_func_proto(t))
14189 			return -EINVAL;
14190 		ret = btf_distill_func_proto(log, btf, t, tname, &tgt_info->fmodel);
14191 		if (ret)
14192 			return ret;
14193 		break;
14194 	default:
14195 		if (!prog_extension)
14196 			return -EINVAL;
14197 		fallthrough;
14198 	case BPF_MODIFY_RETURN:
14199 	case BPF_LSM_MAC:
14200 	case BPF_TRACE_FENTRY:
14201 	case BPF_TRACE_FEXIT:
14202 		if (!btf_type_is_func(t)) {
14203 			bpf_log(log, "attach_btf_id %u is not a function\n",
14204 				btf_id);
14205 			return -EINVAL;
14206 		}
14207 		if (prog_extension &&
14208 		    btf_check_type_match(log, prog, btf, t))
14209 			return -EINVAL;
14210 		t = btf_type_by_id(btf, t->type);
14211 		if (!btf_type_is_func_proto(t))
14212 			return -EINVAL;
14213 
14214 		if ((prog->aux->saved_dst_prog_type || prog->aux->saved_dst_attach_type) &&
14215 		    (!tgt_prog || prog->aux->saved_dst_prog_type != tgt_prog->type ||
14216 		     prog->aux->saved_dst_attach_type != tgt_prog->expected_attach_type))
14217 			return -EINVAL;
14218 
14219 		if (tgt_prog && conservative)
14220 			t = NULL;
14221 
14222 		ret = btf_distill_func_proto(log, btf, t, tname, &tgt_info->fmodel);
14223 		if (ret < 0)
14224 			return ret;
14225 
14226 		if (tgt_prog) {
14227 			if (subprog == 0)
14228 				addr = (long) tgt_prog->bpf_func;
14229 			else
14230 				addr = (long) tgt_prog->aux->func[subprog]->bpf_func;
14231 		} else {
14232 			addr = kallsyms_lookup_name(tname);
14233 			if (!addr) {
14234 				bpf_log(log,
14235 					"The address of function %s cannot be found\n",
14236 					tname);
14237 				return -ENOENT;
14238 			}
14239 		}
14240 
14241 		if (prog->aux->sleepable) {
14242 			ret = -EINVAL;
14243 			switch (prog->type) {
14244 			case BPF_PROG_TYPE_TRACING:
14245 				/* fentry/fexit/fmod_ret progs can be sleepable only if they are
14246 				 * attached to ALLOW_ERROR_INJECTION and are not in denylist.
14247 				 */
14248 				if (!check_non_sleepable_error_inject(btf_id) &&
14249 				    within_error_injection_list(addr))
14250 					ret = 0;
14251 				break;
14252 			case BPF_PROG_TYPE_LSM:
14253 				/* LSM progs check that they are attached to bpf_lsm_*() funcs.
14254 				 * Only some of them are sleepable.
14255 				 */
14256 				if (bpf_lsm_is_sleepable_hook(btf_id))
14257 					ret = 0;
14258 				break;
14259 			default:
14260 				break;
14261 			}
14262 			if (ret) {
14263 				bpf_log(log, "%s is not sleepable\n", tname);
14264 				return ret;
14265 			}
14266 		} else if (prog->expected_attach_type == BPF_MODIFY_RETURN) {
14267 			if (tgt_prog) {
14268 				bpf_log(log, "can't modify return codes of BPF programs\n");
14269 				return -EINVAL;
14270 			}
14271 			ret = check_attach_modify_return(addr, tname);
14272 			if (ret) {
14273 				bpf_log(log, "%s() is not modifiable\n", tname);
14274 				return ret;
14275 			}
14276 		}
14277 
14278 		break;
14279 	}
14280 	tgt_info->tgt_addr = addr;
14281 	tgt_info->tgt_name = tname;
14282 	tgt_info->tgt_type = t;
14283 	return 0;
14284 }
14285 
14286 BTF_SET_START(btf_id_deny)
14287 BTF_ID_UNUSED
14288 #ifdef CONFIG_SMP
14289 BTF_ID(func, migrate_disable)
14290 BTF_ID(func, migrate_enable)
14291 #endif
14292 #if !defined CONFIG_PREEMPT_RCU && !defined CONFIG_TINY_RCU
14293 BTF_ID(func, rcu_read_unlock_strict)
14294 #endif
14295 BTF_SET_END(btf_id_deny)
14296 
14297 static int check_attach_btf_id(struct bpf_verifier_env *env)
14298 {
14299 	struct bpf_prog *prog = env->prog;
14300 	struct bpf_prog *tgt_prog = prog->aux->dst_prog;
14301 	struct bpf_attach_target_info tgt_info = {};
14302 	u32 btf_id = prog->aux->attach_btf_id;
14303 	struct bpf_trampoline *tr;
14304 	int ret;
14305 	u64 key;
14306 
14307 	if (prog->type == BPF_PROG_TYPE_SYSCALL) {
14308 		if (prog->aux->sleepable)
14309 			/* attach_btf_id checked to be zero already */
14310 			return 0;
14311 		verbose(env, "Syscall programs can only be sleepable\n");
14312 		return -EINVAL;
14313 	}
14314 
14315 	if (prog->aux->sleepable && prog->type != BPF_PROG_TYPE_TRACING &&
14316 	    prog->type != BPF_PROG_TYPE_LSM) {
14317 		verbose(env, "Only fentry/fexit/fmod_ret and lsm programs can be sleepable\n");
14318 		return -EINVAL;
14319 	}
14320 
14321 	if (prog->type == BPF_PROG_TYPE_STRUCT_OPS)
14322 		return check_struct_ops_btf_id(env);
14323 
14324 	if (prog->type != BPF_PROG_TYPE_TRACING &&
14325 	    prog->type != BPF_PROG_TYPE_LSM &&
14326 	    prog->type != BPF_PROG_TYPE_EXT)
14327 		return 0;
14328 
14329 	ret = bpf_check_attach_target(&env->log, prog, tgt_prog, btf_id, &tgt_info);
14330 	if (ret)
14331 		return ret;
14332 
14333 	if (tgt_prog && prog->type == BPF_PROG_TYPE_EXT) {
14334 		/* to make freplace equivalent to their targets, they need to
14335 		 * inherit env->ops and expected_attach_type for the rest of the
14336 		 * verification
14337 		 */
14338 		env->ops = bpf_verifier_ops[tgt_prog->type];
14339 		prog->expected_attach_type = tgt_prog->expected_attach_type;
14340 	}
14341 
14342 	/* store info about the attachment target that will be used later */
14343 	prog->aux->attach_func_proto = tgt_info.tgt_type;
14344 	prog->aux->attach_func_name = tgt_info.tgt_name;
14345 
14346 	if (tgt_prog) {
14347 		prog->aux->saved_dst_prog_type = tgt_prog->type;
14348 		prog->aux->saved_dst_attach_type = tgt_prog->expected_attach_type;
14349 	}
14350 
14351 	if (prog->expected_attach_type == BPF_TRACE_RAW_TP) {
14352 		prog->aux->attach_btf_trace = true;
14353 		return 0;
14354 	} else if (prog->expected_attach_type == BPF_TRACE_ITER) {
14355 		if (!bpf_iter_prog_supported(prog))
14356 			return -EINVAL;
14357 		return 0;
14358 	}
14359 
14360 	if (prog->type == BPF_PROG_TYPE_LSM) {
14361 		ret = bpf_lsm_verify_prog(&env->log, prog);
14362 		if (ret < 0)
14363 			return ret;
14364 	} else if (prog->type == BPF_PROG_TYPE_TRACING &&
14365 		   btf_id_set_contains(&btf_id_deny, btf_id)) {
14366 		return -EINVAL;
14367 	}
14368 
14369 	key = bpf_trampoline_compute_key(tgt_prog, prog->aux->attach_btf, btf_id);
14370 	tr = bpf_trampoline_get(key, &tgt_info);
14371 	if (!tr)
14372 		return -ENOMEM;
14373 
14374 	prog->aux->dst_trampoline = tr;
14375 	return 0;
14376 }
14377 
14378 struct btf *bpf_get_btf_vmlinux(void)
14379 {
14380 	if (!btf_vmlinux && IS_ENABLED(CONFIG_DEBUG_INFO_BTF)) {
14381 		mutex_lock(&bpf_verifier_lock);
14382 		if (!btf_vmlinux)
14383 			btf_vmlinux = btf_parse_vmlinux();
14384 		mutex_unlock(&bpf_verifier_lock);
14385 	}
14386 	return btf_vmlinux;
14387 }
14388 
14389 int bpf_check(struct bpf_prog **prog, union bpf_attr *attr, bpfptr_t uattr)
14390 {
14391 	u64 start_time = ktime_get_ns();
14392 	struct bpf_verifier_env *env;
14393 	struct bpf_verifier_log *log;
14394 	int i, len, ret = -EINVAL;
14395 	bool is_priv;
14396 
14397 	/* no program is valid */
14398 	if (ARRAY_SIZE(bpf_verifier_ops) == 0)
14399 		return -EINVAL;
14400 
14401 	/* 'struct bpf_verifier_env' can be global, but since it's not small,
14402 	 * allocate/free it every time bpf_check() is called
14403 	 */
14404 	env = kzalloc(sizeof(struct bpf_verifier_env), GFP_KERNEL);
14405 	if (!env)
14406 		return -ENOMEM;
14407 	log = &env->log;
14408 
14409 	len = (*prog)->len;
14410 	env->insn_aux_data =
14411 		vzalloc(array_size(sizeof(struct bpf_insn_aux_data), len));
14412 	ret = -ENOMEM;
14413 	if (!env->insn_aux_data)
14414 		goto err_free_env;
14415 	for (i = 0; i < len; i++)
14416 		env->insn_aux_data[i].orig_idx = i;
14417 	env->prog = *prog;
14418 	env->ops = bpf_verifier_ops[env->prog->type];
14419 	env->fd_array = make_bpfptr(attr->fd_array, uattr.is_kernel);
14420 	is_priv = bpf_capable();
14421 
14422 	bpf_get_btf_vmlinux();
14423 
14424 	/* grab the mutex to protect few globals used by verifier */
14425 	if (!is_priv)
14426 		mutex_lock(&bpf_verifier_lock);
14427 
14428 	if (attr->log_level || attr->log_buf || attr->log_size) {
14429 		/* user requested verbose verifier output
14430 		 * and supplied buffer to store the verification trace
14431 		 */
14432 		log->level = attr->log_level;
14433 		log->ubuf = (char __user *) (unsigned long) attr->log_buf;
14434 		log->len_total = attr->log_size;
14435 
14436 		/* log attributes have to be sane */
14437 		if (!bpf_verifier_log_attr_valid(log)) {
14438 			ret = -EINVAL;
14439 			goto err_unlock;
14440 		}
14441 	}
14442 
14443 	mark_verifier_state_clean(env);
14444 
14445 	if (IS_ERR(btf_vmlinux)) {
14446 		/* Either gcc or pahole or kernel are broken. */
14447 		verbose(env, "in-kernel BTF is malformed\n");
14448 		ret = PTR_ERR(btf_vmlinux);
14449 		goto skip_full_check;
14450 	}
14451 
14452 	env->strict_alignment = !!(attr->prog_flags & BPF_F_STRICT_ALIGNMENT);
14453 	if (!IS_ENABLED(CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS))
14454 		env->strict_alignment = true;
14455 	if (attr->prog_flags & BPF_F_ANY_ALIGNMENT)
14456 		env->strict_alignment = false;
14457 
14458 	env->allow_ptr_leaks = bpf_allow_ptr_leaks();
14459 	env->allow_uninit_stack = bpf_allow_uninit_stack();
14460 	env->allow_ptr_to_map_access = bpf_allow_ptr_to_map_access();
14461 	env->bypass_spec_v1 = bpf_bypass_spec_v1();
14462 	env->bypass_spec_v4 = bpf_bypass_spec_v4();
14463 	env->bpf_capable = bpf_capable();
14464 
14465 	if (is_priv)
14466 		env->test_state_freq = attr->prog_flags & BPF_F_TEST_STATE_FREQ;
14467 
14468 	env->explored_states = kvcalloc(state_htab_size(env),
14469 				       sizeof(struct bpf_verifier_state_list *),
14470 				       GFP_USER);
14471 	ret = -ENOMEM;
14472 	if (!env->explored_states)
14473 		goto skip_full_check;
14474 
14475 	ret = add_subprog_and_kfunc(env);
14476 	if (ret < 0)
14477 		goto skip_full_check;
14478 
14479 	ret = check_subprogs(env);
14480 	if (ret < 0)
14481 		goto skip_full_check;
14482 
14483 	ret = check_btf_info(env, attr, uattr);
14484 	if (ret < 0)
14485 		goto skip_full_check;
14486 
14487 	ret = check_attach_btf_id(env);
14488 	if (ret)
14489 		goto skip_full_check;
14490 
14491 	ret = resolve_pseudo_ldimm64(env);
14492 	if (ret < 0)
14493 		goto skip_full_check;
14494 
14495 	if (bpf_prog_is_dev_bound(env->prog->aux)) {
14496 		ret = bpf_prog_offload_verifier_prep(env->prog);
14497 		if (ret)
14498 			goto skip_full_check;
14499 	}
14500 
14501 	ret = check_cfg(env);
14502 	if (ret < 0)
14503 		goto skip_full_check;
14504 
14505 	ret = do_check_subprogs(env);
14506 	ret = ret ?: do_check_main(env);
14507 
14508 	if (ret == 0 && bpf_prog_is_dev_bound(env->prog->aux))
14509 		ret = bpf_prog_offload_finalize(env);
14510 
14511 skip_full_check:
14512 	kvfree(env->explored_states);
14513 
14514 	if (ret == 0)
14515 		ret = check_max_stack_depth(env);
14516 
14517 	/* instruction rewrites happen after this point */
14518 	if (is_priv) {
14519 		if (ret == 0)
14520 			opt_hard_wire_dead_code_branches(env);
14521 		if (ret == 0)
14522 			ret = opt_remove_dead_code(env);
14523 		if (ret == 0)
14524 			ret = opt_remove_nops(env);
14525 	} else {
14526 		if (ret == 0)
14527 			sanitize_dead_code(env);
14528 	}
14529 
14530 	if (ret == 0)
14531 		/* program is valid, convert *(u32*)(ctx + off) accesses */
14532 		ret = convert_ctx_accesses(env);
14533 
14534 	if (ret == 0)
14535 		ret = do_misc_fixups(env);
14536 
14537 	/* do 32-bit optimization after insn patching has done so those patched
14538 	 * insns could be handled correctly.
14539 	 */
14540 	if (ret == 0 && !bpf_prog_is_dev_bound(env->prog->aux)) {
14541 		ret = opt_subreg_zext_lo32_rnd_hi32(env, attr);
14542 		env->prog->aux->verifier_zext = bpf_jit_needs_zext() ? !ret
14543 								     : false;
14544 	}
14545 
14546 	if (ret == 0)
14547 		ret = fixup_call_args(env);
14548 
14549 	env->verification_time = ktime_get_ns() - start_time;
14550 	print_verification_stats(env);
14551 	env->prog->aux->verified_insns = env->insn_processed;
14552 
14553 	if (log->level && bpf_verifier_log_full(log))
14554 		ret = -ENOSPC;
14555 	if (log->level && !log->ubuf) {
14556 		ret = -EFAULT;
14557 		goto err_release_maps;
14558 	}
14559 
14560 	if (ret)
14561 		goto err_release_maps;
14562 
14563 	if (env->used_map_cnt) {
14564 		/* if program passed verifier, update used_maps in bpf_prog_info */
14565 		env->prog->aux->used_maps = kmalloc_array(env->used_map_cnt,
14566 							  sizeof(env->used_maps[0]),
14567 							  GFP_KERNEL);
14568 
14569 		if (!env->prog->aux->used_maps) {
14570 			ret = -ENOMEM;
14571 			goto err_release_maps;
14572 		}
14573 
14574 		memcpy(env->prog->aux->used_maps, env->used_maps,
14575 		       sizeof(env->used_maps[0]) * env->used_map_cnt);
14576 		env->prog->aux->used_map_cnt = env->used_map_cnt;
14577 	}
14578 	if (env->used_btf_cnt) {
14579 		/* if program passed verifier, update used_btfs in bpf_prog_aux */
14580 		env->prog->aux->used_btfs = kmalloc_array(env->used_btf_cnt,
14581 							  sizeof(env->used_btfs[0]),
14582 							  GFP_KERNEL);
14583 		if (!env->prog->aux->used_btfs) {
14584 			ret = -ENOMEM;
14585 			goto err_release_maps;
14586 		}
14587 
14588 		memcpy(env->prog->aux->used_btfs, env->used_btfs,
14589 		       sizeof(env->used_btfs[0]) * env->used_btf_cnt);
14590 		env->prog->aux->used_btf_cnt = env->used_btf_cnt;
14591 	}
14592 	if (env->used_map_cnt || env->used_btf_cnt) {
14593 		/* program is valid. Convert pseudo bpf_ld_imm64 into generic
14594 		 * bpf_ld_imm64 instructions
14595 		 */
14596 		convert_pseudo_ld_imm64(env);
14597 	}
14598 
14599 	adjust_btf_func(env);
14600 
14601 err_release_maps:
14602 	if (!env->prog->aux->used_maps)
14603 		/* if we didn't copy map pointers into bpf_prog_info, release
14604 		 * them now. Otherwise free_used_maps() will release them.
14605 		 */
14606 		release_maps(env);
14607 	if (!env->prog->aux->used_btfs)
14608 		release_btfs(env);
14609 
14610 	/* extension progs temporarily inherit the attach_type of their targets
14611 	   for verification purposes, so set it back to zero before returning
14612 	 */
14613 	if (env->prog->type == BPF_PROG_TYPE_EXT)
14614 		env->prog->expected_attach_type = 0;
14615 
14616 	*prog = env->prog;
14617 err_unlock:
14618 	if (!is_priv)
14619 		mutex_unlock(&bpf_verifier_lock);
14620 	vfree(env->insn_aux_data);
14621 err_free_env:
14622 	kfree(env);
14623 	return ret;
14624 }
14625