xref: /linux/kernel/bpf/verifier.c (revision e5d3a64e650c721f9e9b1f76e5df8c62f16b734d)
1 // SPDX-License-Identifier: GPL-2.0-only
2 /* Copyright (c) 2011-2014 PLUMgrid, http://plumgrid.com
3  * Copyright (c) 2016 Facebook
4  * Copyright (c) 2018 Covalent IO, Inc. http://covalent.io
5  */
6 #include <uapi/linux/btf.h>
7 #include <linux/bpf-cgroup.h>
8 #include <linux/kernel.h>
9 #include <linux/types.h>
10 #include <linux/slab.h>
11 #include <linux/bpf.h>
12 #include <linux/btf.h>
13 #include <linux/bpf_verifier.h>
14 #include <linux/filter.h>
15 #include <net/netlink.h>
16 #include <linux/file.h>
17 #include <linux/vmalloc.h>
18 #include <linux/stringify.h>
19 #include <linux/bsearch.h>
20 #include <linux/sort.h>
21 #include <linux/perf_event.h>
22 #include <linux/ctype.h>
23 #include <linux/error-injection.h>
24 #include <linux/bpf_lsm.h>
25 #include <linux/btf_ids.h>
26 #include <linux/poison.h>
27 
28 #include "disasm.h"
29 
30 static const struct bpf_verifier_ops * const bpf_verifier_ops[] = {
31 #define BPF_PROG_TYPE(_id, _name, prog_ctx_type, kern_ctx_type) \
32 	[_id] = & _name ## _verifier_ops,
33 #define BPF_MAP_TYPE(_id, _ops)
34 #define BPF_LINK_TYPE(_id, _name)
35 #include <linux/bpf_types.h>
36 #undef BPF_PROG_TYPE
37 #undef BPF_MAP_TYPE
38 #undef BPF_LINK_TYPE
39 };
40 
41 /* bpf_check() is a static code analyzer that walks eBPF program
42  * instruction by instruction and updates register/stack state.
43  * All paths of conditional branches are analyzed until 'bpf_exit' insn.
44  *
45  * The first pass is depth-first-search to check that the program is a DAG.
46  * It rejects the following programs:
47  * - larger than BPF_MAXINSNS insns
48  * - if loop is present (detected via back-edge)
49  * - unreachable insns exist (shouldn't be a forest. program = one function)
50  * - out of bounds or malformed jumps
51  * The second pass is all possible path descent from the 1st insn.
52  * Since it's analyzing all paths through the program, the length of the
53  * analysis is limited to 64k insn, which may be hit even if total number of
54  * insn is less then 4K, but there are too many branches that change stack/regs.
55  * Number of 'branches to be analyzed' is limited to 1k
56  *
57  * On entry to each instruction, each register has a type, and the instruction
58  * changes the types of the registers depending on instruction semantics.
59  * If instruction is BPF_MOV64_REG(BPF_REG_1, BPF_REG_5), then type of R5 is
60  * copied to R1.
61  *
62  * All registers are 64-bit.
63  * R0 - return register
64  * R1-R5 argument passing registers
65  * R6-R9 callee saved registers
66  * R10 - frame pointer read-only
67  *
68  * At the start of BPF program the register R1 contains a pointer to bpf_context
69  * and has type PTR_TO_CTX.
70  *
71  * Verifier tracks arithmetic operations on pointers in case:
72  *    BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
73  *    BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -20),
74  * 1st insn copies R10 (which has FRAME_PTR) type into R1
75  * and 2nd arithmetic instruction is pattern matched to recognize
76  * that it wants to construct a pointer to some element within stack.
77  * So after 2nd insn, the register R1 has type PTR_TO_STACK
78  * (and -20 constant is saved for further stack bounds checking).
79  * Meaning that this reg is a pointer to stack plus known immediate constant.
80  *
81  * Most of the time the registers have SCALAR_VALUE type, which
82  * means the register has some value, but it's not a valid pointer.
83  * (like pointer plus pointer becomes SCALAR_VALUE type)
84  *
85  * When verifier sees load or store instructions the type of base register
86  * can be: PTR_TO_MAP_VALUE, PTR_TO_CTX, PTR_TO_STACK, PTR_TO_SOCKET. These are
87  * four pointer types recognized by check_mem_access() function.
88  *
89  * PTR_TO_MAP_VALUE means that this register is pointing to 'map element value'
90  * and the range of [ptr, ptr + map's value_size) is accessible.
91  *
92  * registers used to pass values to function calls are checked against
93  * function argument constraints.
94  *
95  * ARG_PTR_TO_MAP_KEY is one of such argument constraints.
96  * It means that the register type passed to this function must be
97  * PTR_TO_STACK and it will be used inside the function as
98  * 'pointer to map element key'
99  *
100  * For example the argument constraints for bpf_map_lookup_elem():
101  *   .ret_type = RET_PTR_TO_MAP_VALUE_OR_NULL,
102  *   .arg1_type = ARG_CONST_MAP_PTR,
103  *   .arg2_type = ARG_PTR_TO_MAP_KEY,
104  *
105  * ret_type says that this function returns 'pointer to map elem value or null'
106  * function expects 1st argument to be a const pointer to 'struct bpf_map' and
107  * 2nd argument should be a pointer to stack, which will be used inside
108  * the helper function as a pointer to map element key.
109  *
110  * On the kernel side the helper function looks like:
111  * u64 bpf_map_lookup_elem(u64 r1, u64 r2, u64 r3, u64 r4, u64 r5)
112  * {
113  *    struct bpf_map *map = (struct bpf_map *) (unsigned long) r1;
114  *    void *key = (void *) (unsigned long) r2;
115  *    void *value;
116  *
117  *    here kernel can access 'key' and 'map' pointers safely, knowing that
118  *    [key, key + map->key_size) bytes are valid and were initialized on
119  *    the stack of eBPF program.
120  * }
121  *
122  * Corresponding eBPF program may look like:
123  *    BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),  // after this insn R2 type is FRAME_PTR
124  *    BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -4), // after this insn R2 type is PTR_TO_STACK
125  *    BPF_LD_MAP_FD(BPF_REG_1, map_fd),      // after this insn R1 type is CONST_PTR_TO_MAP
126  *    BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_map_lookup_elem),
127  * here verifier looks at prototype of map_lookup_elem() and sees:
128  * .arg1_type == ARG_CONST_MAP_PTR and R1->type == CONST_PTR_TO_MAP, which is ok,
129  * Now verifier knows that this map has key of R1->map_ptr->key_size bytes
130  *
131  * Then .arg2_type == ARG_PTR_TO_MAP_KEY and R2->type == PTR_TO_STACK, ok so far,
132  * Now verifier checks that [R2, R2 + map's key_size) are within stack limits
133  * and were initialized prior to this call.
134  * If it's ok, then verifier allows this BPF_CALL insn and looks at
135  * .ret_type which is RET_PTR_TO_MAP_VALUE_OR_NULL, so it sets
136  * R0->type = PTR_TO_MAP_VALUE_OR_NULL which means bpf_map_lookup_elem() function
137  * returns either pointer to map value or NULL.
138  *
139  * When type PTR_TO_MAP_VALUE_OR_NULL passes through 'if (reg != 0) goto +off'
140  * insn, the register holding that pointer in the true branch changes state to
141  * PTR_TO_MAP_VALUE and the same register changes state to CONST_IMM in the false
142  * branch. See check_cond_jmp_op().
143  *
144  * After the call R0 is set to return type of the function and registers R1-R5
145  * are set to NOT_INIT to indicate that they are no longer readable.
146  *
147  * The following reference types represent a potential reference to a kernel
148  * resource which, after first being allocated, must be checked and freed by
149  * the BPF program:
150  * - PTR_TO_SOCKET_OR_NULL, PTR_TO_SOCKET
151  *
152  * When the verifier sees a helper call return a reference type, it allocates a
153  * pointer id for the reference and stores it in the current function state.
154  * Similar to the way that PTR_TO_MAP_VALUE_OR_NULL is converted into
155  * PTR_TO_MAP_VALUE, PTR_TO_SOCKET_OR_NULL becomes PTR_TO_SOCKET when the type
156  * passes through a NULL-check conditional. For the branch wherein the state is
157  * changed to CONST_IMM, the verifier releases the reference.
158  *
159  * For each helper function that allocates a reference, such as
160  * bpf_sk_lookup_tcp(), there is a corresponding release function, such as
161  * bpf_sk_release(). When a reference type passes into the release function,
162  * the verifier also releases the reference. If any unchecked or unreleased
163  * reference remains at the end of the program, the verifier rejects it.
164  */
165 
166 /* verifier_state + insn_idx are pushed to stack when branch is encountered */
167 struct bpf_verifier_stack_elem {
168 	/* verifer state is 'st'
169 	 * before processing instruction 'insn_idx'
170 	 * and after processing instruction 'prev_insn_idx'
171 	 */
172 	struct bpf_verifier_state st;
173 	int insn_idx;
174 	int prev_insn_idx;
175 	struct bpf_verifier_stack_elem *next;
176 	/* length of verifier log at the time this state was pushed on stack */
177 	u32 log_pos;
178 };
179 
180 #define BPF_COMPLEXITY_LIMIT_JMP_SEQ	8192
181 #define BPF_COMPLEXITY_LIMIT_STATES	64
182 
183 #define BPF_MAP_KEY_POISON	(1ULL << 63)
184 #define BPF_MAP_KEY_SEEN	(1ULL << 62)
185 
186 #define BPF_MAP_PTR_UNPRIV	1UL
187 #define BPF_MAP_PTR_POISON	((void *)((0xeB9FUL << 1) +	\
188 					  POISON_POINTER_DELTA))
189 #define BPF_MAP_PTR(X)		((struct bpf_map *)((X) & ~BPF_MAP_PTR_UNPRIV))
190 
191 static int acquire_reference_state(struct bpf_verifier_env *env, int insn_idx);
192 static int release_reference(struct bpf_verifier_env *env, int ref_obj_id);
193 
194 static bool bpf_map_ptr_poisoned(const struct bpf_insn_aux_data *aux)
195 {
196 	return BPF_MAP_PTR(aux->map_ptr_state) == BPF_MAP_PTR_POISON;
197 }
198 
199 static bool bpf_map_ptr_unpriv(const struct bpf_insn_aux_data *aux)
200 {
201 	return aux->map_ptr_state & BPF_MAP_PTR_UNPRIV;
202 }
203 
204 static void bpf_map_ptr_store(struct bpf_insn_aux_data *aux,
205 			      const struct bpf_map *map, bool unpriv)
206 {
207 	BUILD_BUG_ON((unsigned long)BPF_MAP_PTR_POISON & BPF_MAP_PTR_UNPRIV);
208 	unpriv |= bpf_map_ptr_unpriv(aux);
209 	aux->map_ptr_state = (unsigned long)map |
210 			     (unpriv ? BPF_MAP_PTR_UNPRIV : 0UL);
211 }
212 
213 static bool bpf_map_key_poisoned(const struct bpf_insn_aux_data *aux)
214 {
215 	return aux->map_key_state & BPF_MAP_KEY_POISON;
216 }
217 
218 static bool bpf_map_key_unseen(const struct bpf_insn_aux_data *aux)
219 {
220 	return !(aux->map_key_state & BPF_MAP_KEY_SEEN);
221 }
222 
223 static u64 bpf_map_key_immediate(const struct bpf_insn_aux_data *aux)
224 {
225 	return aux->map_key_state & ~(BPF_MAP_KEY_SEEN | BPF_MAP_KEY_POISON);
226 }
227 
228 static void bpf_map_key_store(struct bpf_insn_aux_data *aux, u64 state)
229 {
230 	bool poisoned = bpf_map_key_poisoned(aux);
231 
232 	aux->map_key_state = state | BPF_MAP_KEY_SEEN |
233 			     (poisoned ? BPF_MAP_KEY_POISON : 0ULL);
234 }
235 
236 static bool bpf_pseudo_call(const struct bpf_insn *insn)
237 {
238 	return insn->code == (BPF_JMP | BPF_CALL) &&
239 	       insn->src_reg == BPF_PSEUDO_CALL;
240 }
241 
242 static bool bpf_pseudo_kfunc_call(const struct bpf_insn *insn)
243 {
244 	return insn->code == (BPF_JMP | BPF_CALL) &&
245 	       insn->src_reg == BPF_PSEUDO_KFUNC_CALL;
246 }
247 
248 struct bpf_call_arg_meta {
249 	struct bpf_map *map_ptr;
250 	bool raw_mode;
251 	bool pkt_access;
252 	u8 release_regno;
253 	int regno;
254 	int access_size;
255 	int mem_size;
256 	u64 msize_max_value;
257 	int ref_obj_id;
258 	int map_uid;
259 	int func_id;
260 	struct btf *btf;
261 	u32 btf_id;
262 	struct btf *ret_btf;
263 	u32 ret_btf_id;
264 	u32 subprogno;
265 	struct bpf_map_value_off_desc *kptr_off_desc;
266 	u8 uninit_dynptr_regno;
267 };
268 
269 struct btf *btf_vmlinux;
270 
271 static DEFINE_MUTEX(bpf_verifier_lock);
272 
273 static const struct bpf_line_info *
274 find_linfo(const struct bpf_verifier_env *env, u32 insn_off)
275 {
276 	const struct bpf_line_info *linfo;
277 	const struct bpf_prog *prog;
278 	u32 i, nr_linfo;
279 
280 	prog = env->prog;
281 	nr_linfo = prog->aux->nr_linfo;
282 
283 	if (!nr_linfo || insn_off >= prog->len)
284 		return NULL;
285 
286 	linfo = prog->aux->linfo;
287 	for (i = 1; i < nr_linfo; i++)
288 		if (insn_off < linfo[i].insn_off)
289 			break;
290 
291 	return &linfo[i - 1];
292 }
293 
294 void bpf_verifier_vlog(struct bpf_verifier_log *log, const char *fmt,
295 		       va_list args)
296 {
297 	unsigned int n;
298 
299 	n = vscnprintf(log->kbuf, BPF_VERIFIER_TMP_LOG_SIZE, fmt, args);
300 
301 	WARN_ONCE(n >= BPF_VERIFIER_TMP_LOG_SIZE - 1,
302 		  "verifier log line truncated - local buffer too short\n");
303 
304 	if (log->level == BPF_LOG_KERNEL) {
305 		bool newline = n > 0 && log->kbuf[n - 1] == '\n';
306 
307 		pr_err("BPF: %s%s", log->kbuf, newline ? "" : "\n");
308 		return;
309 	}
310 
311 	n = min(log->len_total - log->len_used - 1, n);
312 	log->kbuf[n] = '\0';
313 	if (!copy_to_user(log->ubuf + log->len_used, log->kbuf, n + 1))
314 		log->len_used += n;
315 	else
316 		log->ubuf = NULL;
317 }
318 
319 static void bpf_vlog_reset(struct bpf_verifier_log *log, u32 new_pos)
320 {
321 	char zero = 0;
322 
323 	if (!bpf_verifier_log_needed(log))
324 		return;
325 
326 	log->len_used = new_pos;
327 	if (put_user(zero, log->ubuf + new_pos))
328 		log->ubuf = NULL;
329 }
330 
331 /* log_level controls verbosity level of eBPF verifier.
332  * bpf_verifier_log_write() is used to dump the verification trace to the log,
333  * so the user can figure out what's wrong with the program
334  */
335 __printf(2, 3) void bpf_verifier_log_write(struct bpf_verifier_env *env,
336 					   const char *fmt, ...)
337 {
338 	va_list args;
339 
340 	if (!bpf_verifier_log_needed(&env->log))
341 		return;
342 
343 	va_start(args, fmt);
344 	bpf_verifier_vlog(&env->log, fmt, args);
345 	va_end(args);
346 }
347 EXPORT_SYMBOL_GPL(bpf_verifier_log_write);
348 
349 __printf(2, 3) static void verbose(void *private_data, const char *fmt, ...)
350 {
351 	struct bpf_verifier_env *env = private_data;
352 	va_list args;
353 
354 	if (!bpf_verifier_log_needed(&env->log))
355 		return;
356 
357 	va_start(args, fmt);
358 	bpf_verifier_vlog(&env->log, fmt, args);
359 	va_end(args);
360 }
361 
362 __printf(2, 3) void bpf_log(struct bpf_verifier_log *log,
363 			    const char *fmt, ...)
364 {
365 	va_list args;
366 
367 	if (!bpf_verifier_log_needed(log))
368 		return;
369 
370 	va_start(args, fmt);
371 	bpf_verifier_vlog(log, fmt, args);
372 	va_end(args);
373 }
374 EXPORT_SYMBOL_GPL(bpf_log);
375 
376 static const char *ltrim(const char *s)
377 {
378 	while (isspace(*s))
379 		s++;
380 
381 	return s;
382 }
383 
384 __printf(3, 4) static void verbose_linfo(struct bpf_verifier_env *env,
385 					 u32 insn_off,
386 					 const char *prefix_fmt, ...)
387 {
388 	const struct bpf_line_info *linfo;
389 
390 	if (!bpf_verifier_log_needed(&env->log))
391 		return;
392 
393 	linfo = find_linfo(env, insn_off);
394 	if (!linfo || linfo == env->prev_linfo)
395 		return;
396 
397 	if (prefix_fmt) {
398 		va_list args;
399 
400 		va_start(args, prefix_fmt);
401 		bpf_verifier_vlog(&env->log, prefix_fmt, args);
402 		va_end(args);
403 	}
404 
405 	verbose(env, "%s\n",
406 		ltrim(btf_name_by_offset(env->prog->aux->btf,
407 					 linfo->line_off)));
408 
409 	env->prev_linfo = linfo;
410 }
411 
412 static void verbose_invalid_scalar(struct bpf_verifier_env *env,
413 				   struct bpf_reg_state *reg,
414 				   struct tnum *range, const char *ctx,
415 				   const char *reg_name)
416 {
417 	char tn_buf[48];
418 
419 	verbose(env, "At %s the register %s ", ctx, reg_name);
420 	if (!tnum_is_unknown(reg->var_off)) {
421 		tnum_strn(tn_buf, sizeof(tn_buf), reg->var_off);
422 		verbose(env, "has value %s", tn_buf);
423 	} else {
424 		verbose(env, "has unknown scalar value");
425 	}
426 	tnum_strn(tn_buf, sizeof(tn_buf), *range);
427 	verbose(env, " should have been in %s\n", tn_buf);
428 }
429 
430 static bool type_is_pkt_pointer(enum bpf_reg_type type)
431 {
432 	type = base_type(type);
433 	return type == PTR_TO_PACKET ||
434 	       type == PTR_TO_PACKET_META;
435 }
436 
437 static bool type_is_sk_pointer(enum bpf_reg_type type)
438 {
439 	return type == PTR_TO_SOCKET ||
440 		type == PTR_TO_SOCK_COMMON ||
441 		type == PTR_TO_TCP_SOCK ||
442 		type == PTR_TO_XDP_SOCK;
443 }
444 
445 static bool reg_type_not_null(enum bpf_reg_type type)
446 {
447 	return type == PTR_TO_SOCKET ||
448 		type == PTR_TO_TCP_SOCK ||
449 		type == PTR_TO_MAP_VALUE ||
450 		type == PTR_TO_MAP_KEY ||
451 		type == PTR_TO_SOCK_COMMON;
452 }
453 
454 static bool reg_may_point_to_spin_lock(const struct bpf_reg_state *reg)
455 {
456 	return reg->type == PTR_TO_MAP_VALUE &&
457 		map_value_has_spin_lock(reg->map_ptr);
458 }
459 
460 static bool reg_type_may_be_refcounted_or_null(enum bpf_reg_type type)
461 {
462 	type = base_type(type);
463 	return type == PTR_TO_SOCKET || type == PTR_TO_TCP_SOCK ||
464 		type == PTR_TO_MEM || type == PTR_TO_BTF_ID;
465 }
466 
467 static bool type_is_rdonly_mem(u32 type)
468 {
469 	return type & MEM_RDONLY;
470 }
471 
472 static bool type_may_be_null(u32 type)
473 {
474 	return type & PTR_MAYBE_NULL;
475 }
476 
477 static bool is_acquire_function(enum bpf_func_id func_id,
478 				const struct bpf_map *map)
479 {
480 	enum bpf_map_type map_type = map ? map->map_type : BPF_MAP_TYPE_UNSPEC;
481 
482 	if (func_id == BPF_FUNC_sk_lookup_tcp ||
483 	    func_id == BPF_FUNC_sk_lookup_udp ||
484 	    func_id == BPF_FUNC_skc_lookup_tcp ||
485 	    func_id == BPF_FUNC_ringbuf_reserve ||
486 	    func_id == BPF_FUNC_kptr_xchg)
487 		return true;
488 
489 	if (func_id == BPF_FUNC_map_lookup_elem &&
490 	    (map_type == BPF_MAP_TYPE_SOCKMAP ||
491 	     map_type == BPF_MAP_TYPE_SOCKHASH))
492 		return true;
493 
494 	return false;
495 }
496 
497 static bool is_ptr_cast_function(enum bpf_func_id func_id)
498 {
499 	return func_id == BPF_FUNC_tcp_sock ||
500 		func_id == BPF_FUNC_sk_fullsock ||
501 		func_id == BPF_FUNC_skc_to_tcp_sock ||
502 		func_id == BPF_FUNC_skc_to_tcp6_sock ||
503 		func_id == BPF_FUNC_skc_to_udp6_sock ||
504 		func_id == BPF_FUNC_skc_to_mptcp_sock ||
505 		func_id == BPF_FUNC_skc_to_tcp_timewait_sock ||
506 		func_id == BPF_FUNC_skc_to_tcp_request_sock;
507 }
508 
509 static bool is_dynptr_ref_function(enum bpf_func_id func_id)
510 {
511 	return func_id == BPF_FUNC_dynptr_data;
512 }
513 
514 static bool helper_multiple_ref_obj_use(enum bpf_func_id func_id,
515 					const struct bpf_map *map)
516 {
517 	int ref_obj_uses = 0;
518 
519 	if (is_ptr_cast_function(func_id))
520 		ref_obj_uses++;
521 	if (is_acquire_function(func_id, map))
522 		ref_obj_uses++;
523 	if (is_dynptr_ref_function(func_id))
524 		ref_obj_uses++;
525 
526 	return ref_obj_uses > 1;
527 }
528 
529 static bool is_cmpxchg_insn(const struct bpf_insn *insn)
530 {
531 	return BPF_CLASS(insn->code) == BPF_STX &&
532 	       BPF_MODE(insn->code) == BPF_ATOMIC &&
533 	       insn->imm == BPF_CMPXCHG;
534 }
535 
536 /* string representation of 'enum bpf_reg_type'
537  *
538  * Note that reg_type_str() can not appear more than once in a single verbose()
539  * statement.
540  */
541 static const char *reg_type_str(struct bpf_verifier_env *env,
542 				enum bpf_reg_type type)
543 {
544 	char postfix[16] = {0}, prefix[32] = {0};
545 	static const char * const str[] = {
546 		[NOT_INIT]		= "?",
547 		[SCALAR_VALUE]		= "scalar",
548 		[PTR_TO_CTX]		= "ctx",
549 		[CONST_PTR_TO_MAP]	= "map_ptr",
550 		[PTR_TO_MAP_VALUE]	= "map_value",
551 		[PTR_TO_STACK]		= "fp",
552 		[PTR_TO_PACKET]		= "pkt",
553 		[PTR_TO_PACKET_META]	= "pkt_meta",
554 		[PTR_TO_PACKET_END]	= "pkt_end",
555 		[PTR_TO_FLOW_KEYS]	= "flow_keys",
556 		[PTR_TO_SOCKET]		= "sock",
557 		[PTR_TO_SOCK_COMMON]	= "sock_common",
558 		[PTR_TO_TCP_SOCK]	= "tcp_sock",
559 		[PTR_TO_TP_BUFFER]	= "tp_buffer",
560 		[PTR_TO_XDP_SOCK]	= "xdp_sock",
561 		[PTR_TO_BTF_ID]		= "ptr_",
562 		[PTR_TO_MEM]		= "mem",
563 		[PTR_TO_BUF]		= "buf",
564 		[PTR_TO_FUNC]		= "func",
565 		[PTR_TO_MAP_KEY]	= "map_key",
566 		[PTR_TO_DYNPTR]		= "dynptr_ptr",
567 	};
568 
569 	if (type & PTR_MAYBE_NULL) {
570 		if (base_type(type) == PTR_TO_BTF_ID)
571 			strncpy(postfix, "or_null_", 16);
572 		else
573 			strncpy(postfix, "_or_null", 16);
574 	}
575 
576 	if (type & MEM_RDONLY)
577 		strncpy(prefix, "rdonly_", 32);
578 	if (type & MEM_ALLOC)
579 		strncpy(prefix, "alloc_", 32);
580 	if (type & MEM_USER)
581 		strncpy(prefix, "user_", 32);
582 	if (type & MEM_PERCPU)
583 		strncpy(prefix, "percpu_", 32);
584 	if (type & PTR_UNTRUSTED)
585 		strncpy(prefix, "untrusted_", 32);
586 
587 	snprintf(env->type_str_buf, TYPE_STR_BUF_LEN, "%s%s%s",
588 		 prefix, str[base_type(type)], postfix);
589 	return env->type_str_buf;
590 }
591 
592 static char slot_type_char[] = {
593 	[STACK_INVALID]	= '?',
594 	[STACK_SPILL]	= 'r',
595 	[STACK_MISC]	= 'm',
596 	[STACK_ZERO]	= '0',
597 	[STACK_DYNPTR]	= 'd',
598 };
599 
600 static void print_liveness(struct bpf_verifier_env *env,
601 			   enum bpf_reg_liveness live)
602 {
603 	if (live & (REG_LIVE_READ | REG_LIVE_WRITTEN | REG_LIVE_DONE))
604 	    verbose(env, "_");
605 	if (live & REG_LIVE_READ)
606 		verbose(env, "r");
607 	if (live & REG_LIVE_WRITTEN)
608 		verbose(env, "w");
609 	if (live & REG_LIVE_DONE)
610 		verbose(env, "D");
611 }
612 
613 static int get_spi(s32 off)
614 {
615 	return (-off - 1) / BPF_REG_SIZE;
616 }
617 
618 static bool is_spi_bounds_valid(struct bpf_func_state *state, int spi, int nr_slots)
619 {
620 	int allocated_slots = state->allocated_stack / BPF_REG_SIZE;
621 
622 	/* We need to check that slots between [spi - nr_slots + 1, spi] are
623 	 * within [0, allocated_stack).
624 	 *
625 	 * Please note that the spi grows downwards. For example, a dynptr
626 	 * takes the size of two stack slots; the first slot will be at
627 	 * spi and the second slot will be at spi - 1.
628 	 */
629 	return spi - nr_slots + 1 >= 0 && spi < allocated_slots;
630 }
631 
632 static struct bpf_func_state *func(struct bpf_verifier_env *env,
633 				   const struct bpf_reg_state *reg)
634 {
635 	struct bpf_verifier_state *cur = env->cur_state;
636 
637 	return cur->frame[reg->frameno];
638 }
639 
640 static const char *kernel_type_name(const struct btf* btf, u32 id)
641 {
642 	return btf_name_by_offset(btf, btf_type_by_id(btf, id)->name_off);
643 }
644 
645 static void mark_reg_scratched(struct bpf_verifier_env *env, u32 regno)
646 {
647 	env->scratched_regs |= 1U << regno;
648 }
649 
650 static void mark_stack_slot_scratched(struct bpf_verifier_env *env, u32 spi)
651 {
652 	env->scratched_stack_slots |= 1ULL << spi;
653 }
654 
655 static bool reg_scratched(const struct bpf_verifier_env *env, u32 regno)
656 {
657 	return (env->scratched_regs >> regno) & 1;
658 }
659 
660 static bool stack_slot_scratched(const struct bpf_verifier_env *env, u64 regno)
661 {
662 	return (env->scratched_stack_slots >> regno) & 1;
663 }
664 
665 static bool verifier_state_scratched(const struct bpf_verifier_env *env)
666 {
667 	return env->scratched_regs || env->scratched_stack_slots;
668 }
669 
670 static void mark_verifier_state_clean(struct bpf_verifier_env *env)
671 {
672 	env->scratched_regs = 0U;
673 	env->scratched_stack_slots = 0ULL;
674 }
675 
676 /* Used for printing the entire verifier state. */
677 static void mark_verifier_state_scratched(struct bpf_verifier_env *env)
678 {
679 	env->scratched_regs = ~0U;
680 	env->scratched_stack_slots = ~0ULL;
681 }
682 
683 static enum bpf_dynptr_type arg_to_dynptr_type(enum bpf_arg_type arg_type)
684 {
685 	switch (arg_type & DYNPTR_TYPE_FLAG_MASK) {
686 	case DYNPTR_TYPE_LOCAL:
687 		return BPF_DYNPTR_TYPE_LOCAL;
688 	case DYNPTR_TYPE_RINGBUF:
689 		return BPF_DYNPTR_TYPE_RINGBUF;
690 	default:
691 		return BPF_DYNPTR_TYPE_INVALID;
692 	}
693 }
694 
695 static bool dynptr_type_refcounted(enum bpf_dynptr_type type)
696 {
697 	return type == BPF_DYNPTR_TYPE_RINGBUF;
698 }
699 
700 static int mark_stack_slots_dynptr(struct bpf_verifier_env *env, struct bpf_reg_state *reg,
701 				   enum bpf_arg_type arg_type, int insn_idx)
702 {
703 	struct bpf_func_state *state = func(env, reg);
704 	enum bpf_dynptr_type type;
705 	int spi, i, id;
706 
707 	spi = get_spi(reg->off);
708 
709 	if (!is_spi_bounds_valid(state, spi, BPF_DYNPTR_NR_SLOTS))
710 		return -EINVAL;
711 
712 	for (i = 0; i < BPF_REG_SIZE; i++) {
713 		state->stack[spi].slot_type[i] = STACK_DYNPTR;
714 		state->stack[spi - 1].slot_type[i] = STACK_DYNPTR;
715 	}
716 
717 	type = arg_to_dynptr_type(arg_type);
718 	if (type == BPF_DYNPTR_TYPE_INVALID)
719 		return -EINVAL;
720 
721 	state->stack[spi].spilled_ptr.dynptr.first_slot = true;
722 	state->stack[spi].spilled_ptr.dynptr.type = type;
723 	state->stack[spi - 1].spilled_ptr.dynptr.type = type;
724 
725 	if (dynptr_type_refcounted(type)) {
726 		/* The id is used to track proper releasing */
727 		id = acquire_reference_state(env, insn_idx);
728 		if (id < 0)
729 			return id;
730 
731 		state->stack[spi].spilled_ptr.id = id;
732 		state->stack[spi - 1].spilled_ptr.id = id;
733 	}
734 
735 	return 0;
736 }
737 
738 static int unmark_stack_slots_dynptr(struct bpf_verifier_env *env, struct bpf_reg_state *reg)
739 {
740 	struct bpf_func_state *state = func(env, reg);
741 	int spi, i;
742 
743 	spi = get_spi(reg->off);
744 
745 	if (!is_spi_bounds_valid(state, spi, BPF_DYNPTR_NR_SLOTS))
746 		return -EINVAL;
747 
748 	for (i = 0; i < BPF_REG_SIZE; i++) {
749 		state->stack[spi].slot_type[i] = STACK_INVALID;
750 		state->stack[spi - 1].slot_type[i] = STACK_INVALID;
751 	}
752 
753 	/* Invalidate any slices associated with this dynptr */
754 	if (dynptr_type_refcounted(state->stack[spi].spilled_ptr.dynptr.type)) {
755 		release_reference(env, state->stack[spi].spilled_ptr.id);
756 		state->stack[spi].spilled_ptr.id = 0;
757 		state->stack[spi - 1].spilled_ptr.id = 0;
758 	}
759 
760 	state->stack[spi].spilled_ptr.dynptr.first_slot = false;
761 	state->stack[spi].spilled_ptr.dynptr.type = 0;
762 	state->stack[spi - 1].spilled_ptr.dynptr.type = 0;
763 
764 	return 0;
765 }
766 
767 static bool is_dynptr_reg_valid_uninit(struct bpf_verifier_env *env, struct bpf_reg_state *reg)
768 {
769 	struct bpf_func_state *state = func(env, reg);
770 	int spi = get_spi(reg->off);
771 	int i;
772 
773 	if (!is_spi_bounds_valid(state, spi, BPF_DYNPTR_NR_SLOTS))
774 		return true;
775 
776 	for (i = 0; i < BPF_REG_SIZE; i++) {
777 		if (state->stack[spi].slot_type[i] == STACK_DYNPTR ||
778 		    state->stack[spi - 1].slot_type[i] == STACK_DYNPTR)
779 			return false;
780 	}
781 
782 	return true;
783 }
784 
785 bool is_dynptr_reg_valid_init(struct bpf_verifier_env *env,
786 			      struct bpf_reg_state *reg)
787 {
788 	struct bpf_func_state *state = func(env, reg);
789 	int spi = get_spi(reg->off);
790 	int i;
791 
792 	if (!is_spi_bounds_valid(state, spi, BPF_DYNPTR_NR_SLOTS) ||
793 	    !state->stack[spi].spilled_ptr.dynptr.first_slot)
794 		return false;
795 
796 	for (i = 0; i < BPF_REG_SIZE; i++) {
797 		if (state->stack[spi].slot_type[i] != STACK_DYNPTR ||
798 		    state->stack[spi - 1].slot_type[i] != STACK_DYNPTR)
799 			return false;
800 	}
801 
802 	return true;
803 }
804 
805 bool is_dynptr_type_expected(struct bpf_verifier_env *env,
806 			     struct bpf_reg_state *reg,
807 			     enum bpf_arg_type arg_type)
808 {
809 	struct bpf_func_state *state = func(env, reg);
810 	enum bpf_dynptr_type dynptr_type;
811 	int spi = get_spi(reg->off);
812 
813 	/* ARG_PTR_TO_DYNPTR takes any type of dynptr */
814 	if (arg_type == ARG_PTR_TO_DYNPTR)
815 		return true;
816 
817 	dynptr_type = arg_to_dynptr_type(arg_type);
818 
819 	return state->stack[spi].spilled_ptr.dynptr.type == dynptr_type;
820 }
821 
822 /* The reg state of a pointer or a bounded scalar was saved when
823  * it was spilled to the stack.
824  */
825 static bool is_spilled_reg(const struct bpf_stack_state *stack)
826 {
827 	return stack->slot_type[BPF_REG_SIZE - 1] == STACK_SPILL;
828 }
829 
830 static void scrub_spilled_slot(u8 *stype)
831 {
832 	if (*stype != STACK_INVALID)
833 		*stype = STACK_MISC;
834 }
835 
836 static void print_verifier_state(struct bpf_verifier_env *env,
837 				 const struct bpf_func_state *state,
838 				 bool print_all)
839 {
840 	const struct bpf_reg_state *reg;
841 	enum bpf_reg_type t;
842 	int i;
843 
844 	if (state->frameno)
845 		verbose(env, " frame%d:", state->frameno);
846 	for (i = 0; i < MAX_BPF_REG; i++) {
847 		reg = &state->regs[i];
848 		t = reg->type;
849 		if (t == NOT_INIT)
850 			continue;
851 		if (!print_all && !reg_scratched(env, i))
852 			continue;
853 		verbose(env, " R%d", i);
854 		print_liveness(env, reg->live);
855 		verbose(env, "=");
856 		if (t == SCALAR_VALUE && reg->precise)
857 			verbose(env, "P");
858 		if ((t == SCALAR_VALUE || t == PTR_TO_STACK) &&
859 		    tnum_is_const(reg->var_off)) {
860 			/* reg->off should be 0 for SCALAR_VALUE */
861 			verbose(env, "%s", t == SCALAR_VALUE ? "" : reg_type_str(env, t));
862 			verbose(env, "%lld", reg->var_off.value + reg->off);
863 		} else {
864 			const char *sep = "";
865 
866 			verbose(env, "%s", reg_type_str(env, t));
867 			if (base_type(t) == PTR_TO_BTF_ID)
868 				verbose(env, "%s", kernel_type_name(reg->btf, reg->btf_id));
869 			verbose(env, "(");
870 /*
871  * _a stands for append, was shortened to avoid multiline statements below.
872  * This macro is used to output a comma separated list of attributes.
873  */
874 #define verbose_a(fmt, ...) ({ verbose(env, "%s" fmt, sep, __VA_ARGS__); sep = ","; })
875 
876 			if (reg->id)
877 				verbose_a("id=%d", reg->id);
878 			if (reg_type_may_be_refcounted_or_null(t) && reg->ref_obj_id)
879 				verbose_a("ref_obj_id=%d", reg->ref_obj_id);
880 			if (t != SCALAR_VALUE)
881 				verbose_a("off=%d", reg->off);
882 			if (type_is_pkt_pointer(t))
883 				verbose_a("r=%d", reg->range);
884 			else if (base_type(t) == CONST_PTR_TO_MAP ||
885 				 base_type(t) == PTR_TO_MAP_KEY ||
886 				 base_type(t) == PTR_TO_MAP_VALUE)
887 				verbose_a("ks=%d,vs=%d",
888 					  reg->map_ptr->key_size,
889 					  reg->map_ptr->value_size);
890 			if (tnum_is_const(reg->var_off)) {
891 				/* Typically an immediate SCALAR_VALUE, but
892 				 * could be a pointer whose offset is too big
893 				 * for reg->off
894 				 */
895 				verbose_a("imm=%llx", reg->var_off.value);
896 			} else {
897 				if (reg->smin_value != reg->umin_value &&
898 				    reg->smin_value != S64_MIN)
899 					verbose_a("smin=%lld", (long long)reg->smin_value);
900 				if (reg->smax_value != reg->umax_value &&
901 				    reg->smax_value != S64_MAX)
902 					verbose_a("smax=%lld", (long long)reg->smax_value);
903 				if (reg->umin_value != 0)
904 					verbose_a("umin=%llu", (unsigned long long)reg->umin_value);
905 				if (reg->umax_value != U64_MAX)
906 					verbose_a("umax=%llu", (unsigned long long)reg->umax_value);
907 				if (!tnum_is_unknown(reg->var_off)) {
908 					char tn_buf[48];
909 
910 					tnum_strn(tn_buf, sizeof(tn_buf), reg->var_off);
911 					verbose_a("var_off=%s", tn_buf);
912 				}
913 				if (reg->s32_min_value != reg->smin_value &&
914 				    reg->s32_min_value != S32_MIN)
915 					verbose_a("s32_min=%d", (int)(reg->s32_min_value));
916 				if (reg->s32_max_value != reg->smax_value &&
917 				    reg->s32_max_value != S32_MAX)
918 					verbose_a("s32_max=%d", (int)(reg->s32_max_value));
919 				if (reg->u32_min_value != reg->umin_value &&
920 				    reg->u32_min_value != U32_MIN)
921 					verbose_a("u32_min=%d", (int)(reg->u32_min_value));
922 				if (reg->u32_max_value != reg->umax_value &&
923 				    reg->u32_max_value != U32_MAX)
924 					verbose_a("u32_max=%d", (int)(reg->u32_max_value));
925 			}
926 #undef verbose_a
927 
928 			verbose(env, ")");
929 		}
930 	}
931 	for (i = 0; i < state->allocated_stack / BPF_REG_SIZE; i++) {
932 		char types_buf[BPF_REG_SIZE + 1];
933 		bool valid = false;
934 		int j;
935 
936 		for (j = 0; j < BPF_REG_SIZE; j++) {
937 			if (state->stack[i].slot_type[j] != STACK_INVALID)
938 				valid = true;
939 			types_buf[j] = slot_type_char[
940 					state->stack[i].slot_type[j]];
941 		}
942 		types_buf[BPF_REG_SIZE] = 0;
943 		if (!valid)
944 			continue;
945 		if (!print_all && !stack_slot_scratched(env, i))
946 			continue;
947 		verbose(env, " fp%d", (-i - 1) * BPF_REG_SIZE);
948 		print_liveness(env, state->stack[i].spilled_ptr.live);
949 		if (is_spilled_reg(&state->stack[i])) {
950 			reg = &state->stack[i].spilled_ptr;
951 			t = reg->type;
952 			verbose(env, "=%s", t == SCALAR_VALUE ? "" : reg_type_str(env, t));
953 			if (t == SCALAR_VALUE && reg->precise)
954 				verbose(env, "P");
955 			if (t == SCALAR_VALUE && tnum_is_const(reg->var_off))
956 				verbose(env, "%lld", reg->var_off.value + reg->off);
957 		} else {
958 			verbose(env, "=%s", types_buf);
959 		}
960 	}
961 	if (state->acquired_refs && state->refs[0].id) {
962 		verbose(env, " refs=%d", state->refs[0].id);
963 		for (i = 1; i < state->acquired_refs; i++)
964 			if (state->refs[i].id)
965 				verbose(env, ",%d", state->refs[i].id);
966 	}
967 	if (state->in_callback_fn)
968 		verbose(env, " cb");
969 	if (state->in_async_callback_fn)
970 		verbose(env, " async_cb");
971 	verbose(env, "\n");
972 	mark_verifier_state_clean(env);
973 }
974 
975 static inline u32 vlog_alignment(u32 pos)
976 {
977 	return round_up(max(pos + BPF_LOG_MIN_ALIGNMENT / 2, BPF_LOG_ALIGNMENT),
978 			BPF_LOG_MIN_ALIGNMENT) - pos - 1;
979 }
980 
981 static void print_insn_state(struct bpf_verifier_env *env,
982 			     const struct bpf_func_state *state)
983 {
984 	if (env->prev_log_len && env->prev_log_len == env->log.len_used) {
985 		/* remove new line character */
986 		bpf_vlog_reset(&env->log, env->prev_log_len - 1);
987 		verbose(env, "%*c;", vlog_alignment(env->prev_insn_print_len), ' ');
988 	} else {
989 		verbose(env, "%d:", env->insn_idx);
990 	}
991 	print_verifier_state(env, state, false);
992 }
993 
994 /* copy array src of length n * size bytes to dst. dst is reallocated if it's too
995  * small to hold src. This is different from krealloc since we don't want to preserve
996  * the contents of dst.
997  *
998  * Leaves dst untouched if src is NULL or length is zero. Returns NULL if memory could
999  * not be allocated.
1000  */
1001 static void *copy_array(void *dst, const void *src, size_t n, size_t size, gfp_t flags)
1002 {
1003 	size_t bytes;
1004 
1005 	if (ZERO_OR_NULL_PTR(src))
1006 		goto out;
1007 
1008 	if (unlikely(check_mul_overflow(n, size, &bytes)))
1009 		return NULL;
1010 
1011 	if (ksize(dst) < bytes) {
1012 		kfree(dst);
1013 		dst = kmalloc_track_caller(bytes, flags);
1014 		if (!dst)
1015 			return NULL;
1016 	}
1017 
1018 	memcpy(dst, src, bytes);
1019 out:
1020 	return dst ? dst : ZERO_SIZE_PTR;
1021 }
1022 
1023 /* resize an array from old_n items to new_n items. the array is reallocated if it's too
1024  * small to hold new_n items. new items are zeroed out if the array grows.
1025  *
1026  * Contrary to krealloc_array, does not free arr if new_n is zero.
1027  */
1028 static void *realloc_array(void *arr, size_t old_n, size_t new_n, size_t size)
1029 {
1030 	if (!new_n || old_n == new_n)
1031 		goto out;
1032 
1033 	arr = krealloc_array(arr, new_n, size, GFP_KERNEL);
1034 	if (!arr)
1035 		return NULL;
1036 
1037 	if (new_n > old_n)
1038 		memset(arr + old_n * size, 0, (new_n - old_n) * size);
1039 
1040 out:
1041 	return arr ? arr : ZERO_SIZE_PTR;
1042 }
1043 
1044 static int copy_reference_state(struct bpf_func_state *dst, const struct bpf_func_state *src)
1045 {
1046 	dst->refs = copy_array(dst->refs, src->refs, src->acquired_refs,
1047 			       sizeof(struct bpf_reference_state), GFP_KERNEL);
1048 	if (!dst->refs)
1049 		return -ENOMEM;
1050 
1051 	dst->acquired_refs = src->acquired_refs;
1052 	return 0;
1053 }
1054 
1055 static int copy_stack_state(struct bpf_func_state *dst, const struct bpf_func_state *src)
1056 {
1057 	size_t n = src->allocated_stack / BPF_REG_SIZE;
1058 
1059 	dst->stack = copy_array(dst->stack, src->stack, n, sizeof(struct bpf_stack_state),
1060 				GFP_KERNEL);
1061 	if (!dst->stack)
1062 		return -ENOMEM;
1063 
1064 	dst->allocated_stack = src->allocated_stack;
1065 	return 0;
1066 }
1067 
1068 static int resize_reference_state(struct bpf_func_state *state, size_t n)
1069 {
1070 	state->refs = realloc_array(state->refs, state->acquired_refs, n,
1071 				    sizeof(struct bpf_reference_state));
1072 	if (!state->refs)
1073 		return -ENOMEM;
1074 
1075 	state->acquired_refs = n;
1076 	return 0;
1077 }
1078 
1079 static int grow_stack_state(struct bpf_func_state *state, int size)
1080 {
1081 	size_t old_n = state->allocated_stack / BPF_REG_SIZE, n = size / BPF_REG_SIZE;
1082 
1083 	if (old_n >= n)
1084 		return 0;
1085 
1086 	state->stack = realloc_array(state->stack, old_n, n, sizeof(struct bpf_stack_state));
1087 	if (!state->stack)
1088 		return -ENOMEM;
1089 
1090 	state->allocated_stack = size;
1091 	return 0;
1092 }
1093 
1094 /* Acquire a pointer id from the env and update the state->refs to include
1095  * this new pointer reference.
1096  * On success, returns a valid pointer id to associate with the register
1097  * On failure, returns a negative errno.
1098  */
1099 static int acquire_reference_state(struct bpf_verifier_env *env, int insn_idx)
1100 {
1101 	struct bpf_func_state *state = cur_func(env);
1102 	int new_ofs = state->acquired_refs;
1103 	int id, err;
1104 
1105 	err = resize_reference_state(state, state->acquired_refs + 1);
1106 	if (err)
1107 		return err;
1108 	id = ++env->id_gen;
1109 	state->refs[new_ofs].id = id;
1110 	state->refs[new_ofs].insn_idx = insn_idx;
1111 	state->refs[new_ofs].callback_ref = state->in_callback_fn ? state->frameno : 0;
1112 
1113 	return id;
1114 }
1115 
1116 /* release function corresponding to acquire_reference_state(). Idempotent. */
1117 static int release_reference_state(struct bpf_func_state *state, int ptr_id)
1118 {
1119 	int i, last_idx;
1120 
1121 	last_idx = state->acquired_refs - 1;
1122 	for (i = 0; i < state->acquired_refs; i++) {
1123 		if (state->refs[i].id == ptr_id) {
1124 			/* Cannot release caller references in callbacks */
1125 			if (state->in_callback_fn && state->refs[i].callback_ref != state->frameno)
1126 				return -EINVAL;
1127 			if (last_idx && i != last_idx)
1128 				memcpy(&state->refs[i], &state->refs[last_idx],
1129 				       sizeof(*state->refs));
1130 			memset(&state->refs[last_idx], 0, sizeof(*state->refs));
1131 			state->acquired_refs--;
1132 			return 0;
1133 		}
1134 	}
1135 	return -EINVAL;
1136 }
1137 
1138 static void free_func_state(struct bpf_func_state *state)
1139 {
1140 	if (!state)
1141 		return;
1142 	kfree(state->refs);
1143 	kfree(state->stack);
1144 	kfree(state);
1145 }
1146 
1147 static void clear_jmp_history(struct bpf_verifier_state *state)
1148 {
1149 	kfree(state->jmp_history);
1150 	state->jmp_history = NULL;
1151 	state->jmp_history_cnt = 0;
1152 }
1153 
1154 static void free_verifier_state(struct bpf_verifier_state *state,
1155 				bool free_self)
1156 {
1157 	int i;
1158 
1159 	for (i = 0; i <= state->curframe; i++) {
1160 		free_func_state(state->frame[i]);
1161 		state->frame[i] = NULL;
1162 	}
1163 	clear_jmp_history(state);
1164 	if (free_self)
1165 		kfree(state);
1166 }
1167 
1168 /* copy verifier state from src to dst growing dst stack space
1169  * when necessary to accommodate larger src stack
1170  */
1171 static int copy_func_state(struct bpf_func_state *dst,
1172 			   const struct bpf_func_state *src)
1173 {
1174 	int err;
1175 
1176 	memcpy(dst, src, offsetof(struct bpf_func_state, acquired_refs));
1177 	err = copy_reference_state(dst, src);
1178 	if (err)
1179 		return err;
1180 	return copy_stack_state(dst, src);
1181 }
1182 
1183 static int copy_verifier_state(struct bpf_verifier_state *dst_state,
1184 			       const struct bpf_verifier_state *src)
1185 {
1186 	struct bpf_func_state *dst;
1187 	int i, err;
1188 
1189 	dst_state->jmp_history = copy_array(dst_state->jmp_history, src->jmp_history,
1190 					    src->jmp_history_cnt, sizeof(struct bpf_idx_pair),
1191 					    GFP_USER);
1192 	if (!dst_state->jmp_history)
1193 		return -ENOMEM;
1194 	dst_state->jmp_history_cnt = src->jmp_history_cnt;
1195 
1196 	/* if dst has more stack frames then src frame, free them */
1197 	for (i = src->curframe + 1; i <= dst_state->curframe; i++) {
1198 		free_func_state(dst_state->frame[i]);
1199 		dst_state->frame[i] = NULL;
1200 	}
1201 	dst_state->speculative = src->speculative;
1202 	dst_state->curframe = src->curframe;
1203 	dst_state->active_spin_lock = src->active_spin_lock;
1204 	dst_state->branches = src->branches;
1205 	dst_state->parent = src->parent;
1206 	dst_state->first_insn_idx = src->first_insn_idx;
1207 	dst_state->last_insn_idx = src->last_insn_idx;
1208 	for (i = 0; i <= src->curframe; i++) {
1209 		dst = dst_state->frame[i];
1210 		if (!dst) {
1211 			dst = kzalloc(sizeof(*dst), GFP_KERNEL);
1212 			if (!dst)
1213 				return -ENOMEM;
1214 			dst_state->frame[i] = dst;
1215 		}
1216 		err = copy_func_state(dst, src->frame[i]);
1217 		if (err)
1218 			return err;
1219 	}
1220 	return 0;
1221 }
1222 
1223 static void update_branch_counts(struct bpf_verifier_env *env, struct bpf_verifier_state *st)
1224 {
1225 	while (st) {
1226 		u32 br = --st->branches;
1227 
1228 		/* WARN_ON(br > 1) technically makes sense here,
1229 		 * but see comment in push_stack(), hence:
1230 		 */
1231 		WARN_ONCE((int)br < 0,
1232 			  "BUG update_branch_counts:branches_to_explore=%d\n",
1233 			  br);
1234 		if (br)
1235 			break;
1236 		st = st->parent;
1237 	}
1238 }
1239 
1240 static int pop_stack(struct bpf_verifier_env *env, int *prev_insn_idx,
1241 		     int *insn_idx, bool pop_log)
1242 {
1243 	struct bpf_verifier_state *cur = env->cur_state;
1244 	struct bpf_verifier_stack_elem *elem, *head = env->head;
1245 	int err;
1246 
1247 	if (env->head == NULL)
1248 		return -ENOENT;
1249 
1250 	if (cur) {
1251 		err = copy_verifier_state(cur, &head->st);
1252 		if (err)
1253 			return err;
1254 	}
1255 	if (pop_log)
1256 		bpf_vlog_reset(&env->log, head->log_pos);
1257 	if (insn_idx)
1258 		*insn_idx = head->insn_idx;
1259 	if (prev_insn_idx)
1260 		*prev_insn_idx = head->prev_insn_idx;
1261 	elem = head->next;
1262 	free_verifier_state(&head->st, false);
1263 	kfree(head);
1264 	env->head = elem;
1265 	env->stack_size--;
1266 	return 0;
1267 }
1268 
1269 static struct bpf_verifier_state *push_stack(struct bpf_verifier_env *env,
1270 					     int insn_idx, int prev_insn_idx,
1271 					     bool speculative)
1272 {
1273 	struct bpf_verifier_state *cur = env->cur_state;
1274 	struct bpf_verifier_stack_elem *elem;
1275 	int err;
1276 
1277 	elem = kzalloc(sizeof(struct bpf_verifier_stack_elem), GFP_KERNEL);
1278 	if (!elem)
1279 		goto err;
1280 
1281 	elem->insn_idx = insn_idx;
1282 	elem->prev_insn_idx = prev_insn_idx;
1283 	elem->next = env->head;
1284 	elem->log_pos = env->log.len_used;
1285 	env->head = elem;
1286 	env->stack_size++;
1287 	err = copy_verifier_state(&elem->st, cur);
1288 	if (err)
1289 		goto err;
1290 	elem->st.speculative |= speculative;
1291 	if (env->stack_size > BPF_COMPLEXITY_LIMIT_JMP_SEQ) {
1292 		verbose(env, "The sequence of %d jumps is too complex.\n",
1293 			env->stack_size);
1294 		goto err;
1295 	}
1296 	if (elem->st.parent) {
1297 		++elem->st.parent->branches;
1298 		/* WARN_ON(branches > 2) technically makes sense here,
1299 		 * but
1300 		 * 1. speculative states will bump 'branches' for non-branch
1301 		 * instructions
1302 		 * 2. is_state_visited() heuristics may decide not to create
1303 		 * a new state for a sequence of branches and all such current
1304 		 * and cloned states will be pointing to a single parent state
1305 		 * which might have large 'branches' count.
1306 		 */
1307 	}
1308 	return &elem->st;
1309 err:
1310 	free_verifier_state(env->cur_state, true);
1311 	env->cur_state = NULL;
1312 	/* pop all elements and return */
1313 	while (!pop_stack(env, NULL, NULL, false));
1314 	return NULL;
1315 }
1316 
1317 #define CALLER_SAVED_REGS 6
1318 static const int caller_saved[CALLER_SAVED_REGS] = {
1319 	BPF_REG_0, BPF_REG_1, BPF_REG_2, BPF_REG_3, BPF_REG_4, BPF_REG_5
1320 };
1321 
1322 static void __mark_reg_not_init(const struct bpf_verifier_env *env,
1323 				struct bpf_reg_state *reg);
1324 
1325 /* This helper doesn't clear reg->id */
1326 static void ___mark_reg_known(struct bpf_reg_state *reg, u64 imm)
1327 {
1328 	reg->var_off = tnum_const(imm);
1329 	reg->smin_value = (s64)imm;
1330 	reg->smax_value = (s64)imm;
1331 	reg->umin_value = imm;
1332 	reg->umax_value = imm;
1333 
1334 	reg->s32_min_value = (s32)imm;
1335 	reg->s32_max_value = (s32)imm;
1336 	reg->u32_min_value = (u32)imm;
1337 	reg->u32_max_value = (u32)imm;
1338 }
1339 
1340 /* Mark the unknown part of a register (variable offset or scalar value) as
1341  * known to have the value @imm.
1342  */
1343 static void __mark_reg_known(struct bpf_reg_state *reg, u64 imm)
1344 {
1345 	/* Clear id, off, and union(map_ptr, range) */
1346 	memset(((u8 *)reg) + sizeof(reg->type), 0,
1347 	       offsetof(struct bpf_reg_state, var_off) - sizeof(reg->type));
1348 	___mark_reg_known(reg, imm);
1349 }
1350 
1351 static void __mark_reg32_known(struct bpf_reg_state *reg, u64 imm)
1352 {
1353 	reg->var_off = tnum_const_subreg(reg->var_off, imm);
1354 	reg->s32_min_value = (s32)imm;
1355 	reg->s32_max_value = (s32)imm;
1356 	reg->u32_min_value = (u32)imm;
1357 	reg->u32_max_value = (u32)imm;
1358 }
1359 
1360 /* Mark the 'variable offset' part of a register as zero.  This should be
1361  * used only on registers holding a pointer type.
1362  */
1363 static void __mark_reg_known_zero(struct bpf_reg_state *reg)
1364 {
1365 	__mark_reg_known(reg, 0);
1366 }
1367 
1368 static void __mark_reg_const_zero(struct bpf_reg_state *reg)
1369 {
1370 	__mark_reg_known(reg, 0);
1371 	reg->type = SCALAR_VALUE;
1372 }
1373 
1374 static void mark_reg_known_zero(struct bpf_verifier_env *env,
1375 				struct bpf_reg_state *regs, u32 regno)
1376 {
1377 	if (WARN_ON(regno >= MAX_BPF_REG)) {
1378 		verbose(env, "mark_reg_known_zero(regs, %u)\n", regno);
1379 		/* Something bad happened, let's kill all regs */
1380 		for (regno = 0; regno < MAX_BPF_REG; regno++)
1381 			__mark_reg_not_init(env, regs + regno);
1382 		return;
1383 	}
1384 	__mark_reg_known_zero(regs + regno);
1385 }
1386 
1387 static void mark_ptr_not_null_reg(struct bpf_reg_state *reg)
1388 {
1389 	if (base_type(reg->type) == PTR_TO_MAP_VALUE) {
1390 		const struct bpf_map *map = reg->map_ptr;
1391 
1392 		if (map->inner_map_meta) {
1393 			reg->type = CONST_PTR_TO_MAP;
1394 			reg->map_ptr = map->inner_map_meta;
1395 			/* transfer reg's id which is unique for every map_lookup_elem
1396 			 * as UID of the inner map.
1397 			 */
1398 			if (map_value_has_timer(map->inner_map_meta))
1399 				reg->map_uid = reg->id;
1400 		} else if (map->map_type == BPF_MAP_TYPE_XSKMAP) {
1401 			reg->type = PTR_TO_XDP_SOCK;
1402 		} else if (map->map_type == BPF_MAP_TYPE_SOCKMAP ||
1403 			   map->map_type == BPF_MAP_TYPE_SOCKHASH) {
1404 			reg->type = PTR_TO_SOCKET;
1405 		} else {
1406 			reg->type = PTR_TO_MAP_VALUE;
1407 		}
1408 		return;
1409 	}
1410 
1411 	reg->type &= ~PTR_MAYBE_NULL;
1412 }
1413 
1414 static bool reg_is_pkt_pointer(const struct bpf_reg_state *reg)
1415 {
1416 	return type_is_pkt_pointer(reg->type);
1417 }
1418 
1419 static bool reg_is_pkt_pointer_any(const struct bpf_reg_state *reg)
1420 {
1421 	return reg_is_pkt_pointer(reg) ||
1422 	       reg->type == PTR_TO_PACKET_END;
1423 }
1424 
1425 /* Unmodified PTR_TO_PACKET[_META,_END] register from ctx access. */
1426 static bool reg_is_init_pkt_pointer(const struct bpf_reg_state *reg,
1427 				    enum bpf_reg_type which)
1428 {
1429 	/* The register can already have a range from prior markings.
1430 	 * This is fine as long as it hasn't been advanced from its
1431 	 * origin.
1432 	 */
1433 	return reg->type == which &&
1434 	       reg->id == 0 &&
1435 	       reg->off == 0 &&
1436 	       tnum_equals_const(reg->var_off, 0);
1437 }
1438 
1439 /* Reset the min/max bounds of a register */
1440 static void __mark_reg_unbounded(struct bpf_reg_state *reg)
1441 {
1442 	reg->smin_value = S64_MIN;
1443 	reg->smax_value = S64_MAX;
1444 	reg->umin_value = 0;
1445 	reg->umax_value = U64_MAX;
1446 
1447 	reg->s32_min_value = S32_MIN;
1448 	reg->s32_max_value = S32_MAX;
1449 	reg->u32_min_value = 0;
1450 	reg->u32_max_value = U32_MAX;
1451 }
1452 
1453 static void __mark_reg64_unbounded(struct bpf_reg_state *reg)
1454 {
1455 	reg->smin_value = S64_MIN;
1456 	reg->smax_value = S64_MAX;
1457 	reg->umin_value = 0;
1458 	reg->umax_value = U64_MAX;
1459 }
1460 
1461 static void __mark_reg32_unbounded(struct bpf_reg_state *reg)
1462 {
1463 	reg->s32_min_value = S32_MIN;
1464 	reg->s32_max_value = S32_MAX;
1465 	reg->u32_min_value = 0;
1466 	reg->u32_max_value = U32_MAX;
1467 }
1468 
1469 static void __update_reg32_bounds(struct bpf_reg_state *reg)
1470 {
1471 	struct tnum var32_off = tnum_subreg(reg->var_off);
1472 
1473 	/* min signed is max(sign bit) | min(other bits) */
1474 	reg->s32_min_value = max_t(s32, reg->s32_min_value,
1475 			var32_off.value | (var32_off.mask & S32_MIN));
1476 	/* max signed is min(sign bit) | max(other bits) */
1477 	reg->s32_max_value = min_t(s32, reg->s32_max_value,
1478 			var32_off.value | (var32_off.mask & S32_MAX));
1479 	reg->u32_min_value = max_t(u32, reg->u32_min_value, (u32)var32_off.value);
1480 	reg->u32_max_value = min(reg->u32_max_value,
1481 				 (u32)(var32_off.value | var32_off.mask));
1482 }
1483 
1484 static void __update_reg64_bounds(struct bpf_reg_state *reg)
1485 {
1486 	/* min signed is max(sign bit) | min(other bits) */
1487 	reg->smin_value = max_t(s64, reg->smin_value,
1488 				reg->var_off.value | (reg->var_off.mask & S64_MIN));
1489 	/* max signed is min(sign bit) | max(other bits) */
1490 	reg->smax_value = min_t(s64, reg->smax_value,
1491 				reg->var_off.value | (reg->var_off.mask & S64_MAX));
1492 	reg->umin_value = max(reg->umin_value, reg->var_off.value);
1493 	reg->umax_value = min(reg->umax_value,
1494 			      reg->var_off.value | reg->var_off.mask);
1495 }
1496 
1497 static void __update_reg_bounds(struct bpf_reg_state *reg)
1498 {
1499 	__update_reg32_bounds(reg);
1500 	__update_reg64_bounds(reg);
1501 }
1502 
1503 /* Uses signed min/max values to inform unsigned, and vice-versa */
1504 static void __reg32_deduce_bounds(struct bpf_reg_state *reg)
1505 {
1506 	/* Learn sign from signed bounds.
1507 	 * If we cannot cross the sign boundary, then signed and unsigned bounds
1508 	 * are the same, so combine.  This works even in the negative case, e.g.
1509 	 * -3 s<= x s<= -1 implies 0xf...fd u<= x u<= 0xf...ff.
1510 	 */
1511 	if (reg->s32_min_value >= 0 || reg->s32_max_value < 0) {
1512 		reg->s32_min_value = reg->u32_min_value =
1513 			max_t(u32, reg->s32_min_value, reg->u32_min_value);
1514 		reg->s32_max_value = reg->u32_max_value =
1515 			min_t(u32, reg->s32_max_value, reg->u32_max_value);
1516 		return;
1517 	}
1518 	/* Learn sign from unsigned bounds.  Signed bounds cross the sign
1519 	 * boundary, so we must be careful.
1520 	 */
1521 	if ((s32)reg->u32_max_value >= 0) {
1522 		/* Positive.  We can't learn anything from the smin, but smax
1523 		 * is positive, hence safe.
1524 		 */
1525 		reg->s32_min_value = reg->u32_min_value;
1526 		reg->s32_max_value = reg->u32_max_value =
1527 			min_t(u32, reg->s32_max_value, reg->u32_max_value);
1528 	} else if ((s32)reg->u32_min_value < 0) {
1529 		/* Negative.  We can't learn anything from the smax, but smin
1530 		 * is negative, hence safe.
1531 		 */
1532 		reg->s32_min_value = reg->u32_min_value =
1533 			max_t(u32, reg->s32_min_value, reg->u32_min_value);
1534 		reg->s32_max_value = reg->u32_max_value;
1535 	}
1536 }
1537 
1538 static void __reg64_deduce_bounds(struct bpf_reg_state *reg)
1539 {
1540 	/* Learn sign from signed bounds.
1541 	 * If we cannot cross the sign boundary, then signed and unsigned bounds
1542 	 * are the same, so combine.  This works even in the negative case, e.g.
1543 	 * -3 s<= x s<= -1 implies 0xf...fd u<= x u<= 0xf...ff.
1544 	 */
1545 	if (reg->smin_value >= 0 || reg->smax_value < 0) {
1546 		reg->smin_value = reg->umin_value = max_t(u64, reg->smin_value,
1547 							  reg->umin_value);
1548 		reg->smax_value = reg->umax_value = min_t(u64, reg->smax_value,
1549 							  reg->umax_value);
1550 		return;
1551 	}
1552 	/* Learn sign from unsigned bounds.  Signed bounds cross the sign
1553 	 * boundary, so we must be careful.
1554 	 */
1555 	if ((s64)reg->umax_value >= 0) {
1556 		/* Positive.  We can't learn anything from the smin, but smax
1557 		 * is positive, hence safe.
1558 		 */
1559 		reg->smin_value = reg->umin_value;
1560 		reg->smax_value = reg->umax_value = min_t(u64, reg->smax_value,
1561 							  reg->umax_value);
1562 	} else if ((s64)reg->umin_value < 0) {
1563 		/* Negative.  We can't learn anything from the smax, but smin
1564 		 * is negative, hence safe.
1565 		 */
1566 		reg->smin_value = reg->umin_value = max_t(u64, reg->smin_value,
1567 							  reg->umin_value);
1568 		reg->smax_value = reg->umax_value;
1569 	}
1570 }
1571 
1572 static void __reg_deduce_bounds(struct bpf_reg_state *reg)
1573 {
1574 	__reg32_deduce_bounds(reg);
1575 	__reg64_deduce_bounds(reg);
1576 }
1577 
1578 /* Attempts to improve var_off based on unsigned min/max information */
1579 static void __reg_bound_offset(struct bpf_reg_state *reg)
1580 {
1581 	struct tnum var64_off = tnum_intersect(reg->var_off,
1582 					       tnum_range(reg->umin_value,
1583 							  reg->umax_value));
1584 	struct tnum var32_off = tnum_intersect(tnum_subreg(reg->var_off),
1585 						tnum_range(reg->u32_min_value,
1586 							   reg->u32_max_value));
1587 
1588 	reg->var_off = tnum_or(tnum_clear_subreg(var64_off), var32_off);
1589 }
1590 
1591 static void reg_bounds_sync(struct bpf_reg_state *reg)
1592 {
1593 	/* We might have learned new bounds from the var_off. */
1594 	__update_reg_bounds(reg);
1595 	/* We might have learned something about the sign bit. */
1596 	__reg_deduce_bounds(reg);
1597 	/* We might have learned some bits from the bounds. */
1598 	__reg_bound_offset(reg);
1599 	/* Intersecting with the old var_off might have improved our bounds
1600 	 * slightly, e.g. if umax was 0x7f...f and var_off was (0; 0xf...fc),
1601 	 * then new var_off is (0; 0x7f...fc) which improves our umax.
1602 	 */
1603 	__update_reg_bounds(reg);
1604 }
1605 
1606 static bool __reg32_bound_s64(s32 a)
1607 {
1608 	return a >= 0 && a <= S32_MAX;
1609 }
1610 
1611 static void __reg_assign_32_into_64(struct bpf_reg_state *reg)
1612 {
1613 	reg->umin_value = reg->u32_min_value;
1614 	reg->umax_value = reg->u32_max_value;
1615 
1616 	/* Attempt to pull 32-bit signed bounds into 64-bit bounds but must
1617 	 * be positive otherwise set to worse case bounds and refine later
1618 	 * from tnum.
1619 	 */
1620 	if (__reg32_bound_s64(reg->s32_min_value) &&
1621 	    __reg32_bound_s64(reg->s32_max_value)) {
1622 		reg->smin_value = reg->s32_min_value;
1623 		reg->smax_value = reg->s32_max_value;
1624 	} else {
1625 		reg->smin_value = 0;
1626 		reg->smax_value = U32_MAX;
1627 	}
1628 }
1629 
1630 static void __reg_combine_32_into_64(struct bpf_reg_state *reg)
1631 {
1632 	/* special case when 64-bit register has upper 32-bit register
1633 	 * zeroed. Typically happens after zext or <<32, >>32 sequence
1634 	 * allowing us to use 32-bit bounds directly,
1635 	 */
1636 	if (tnum_equals_const(tnum_clear_subreg(reg->var_off), 0)) {
1637 		__reg_assign_32_into_64(reg);
1638 	} else {
1639 		/* Otherwise the best we can do is push lower 32bit known and
1640 		 * unknown bits into register (var_off set from jmp logic)
1641 		 * then learn as much as possible from the 64-bit tnum
1642 		 * known and unknown bits. The previous smin/smax bounds are
1643 		 * invalid here because of jmp32 compare so mark them unknown
1644 		 * so they do not impact tnum bounds calculation.
1645 		 */
1646 		__mark_reg64_unbounded(reg);
1647 	}
1648 	reg_bounds_sync(reg);
1649 }
1650 
1651 static bool __reg64_bound_s32(s64 a)
1652 {
1653 	return a >= S32_MIN && a <= S32_MAX;
1654 }
1655 
1656 static bool __reg64_bound_u32(u64 a)
1657 {
1658 	return a >= U32_MIN && a <= U32_MAX;
1659 }
1660 
1661 static void __reg_combine_64_into_32(struct bpf_reg_state *reg)
1662 {
1663 	__mark_reg32_unbounded(reg);
1664 	if (__reg64_bound_s32(reg->smin_value) && __reg64_bound_s32(reg->smax_value)) {
1665 		reg->s32_min_value = (s32)reg->smin_value;
1666 		reg->s32_max_value = (s32)reg->smax_value;
1667 	}
1668 	if (__reg64_bound_u32(reg->umin_value) && __reg64_bound_u32(reg->umax_value)) {
1669 		reg->u32_min_value = (u32)reg->umin_value;
1670 		reg->u32_max_value = (u32)reg->umax_value;
1671 	}
1672 	reg_bounds_sync(reg);
1673 }
1674 
1675 /* Mark a register as having a completely unknown (scalar) value. */
1676 static void __mark_reg_unknown(const struct bpf_verifier_env *env,
1677 			       struct bpf_reg_state *reg)
1678 {
1679 	/*
1680 	 * Clear type, id, off, and union(map_ptr, range) and
1681 	 * padding between 'type' and union
1682 	 */
1683 	memset(reg, 0, offsetof(struct bpf_reg_state, var_off));
1684 	reg->type = SCALAR_VALUE;
1685 	reg->var_off = tnum_unknown;
1686 	reg->frameno = 0;
1687 	reg->precise = env->subprog_cnt > 1 || !env->bpf_capable;
1688 	__mark_reg_unbounded(reg);
1689 }
1690 
1691 static void mark_reg_unknown(struct bpf_verifier_env *env,
1692 			     struct bpf_reg_state *regs, u32 regno)
1693 {
1694 	if (WARN_ON(regno >= MAX_BPF_REG)) {
1695 		verbose(env, "mark_reg_unknown(regs, %u)\n", regno);
1696 		/* Something bad happened, let's kill all regs except FP */
1697 		for (regno = 0; regno < BPF_REG_FP; regno++)
1698 			__mark_reg_not_init(env, regs + regno);
1699 		return;
1700 	}
1701 	__mark_reg_unknown(env, regs + regno);
1702 }
1703 
1704 static void __mark_reg_not_init(const struct bpf_verifier_env *env,
1705 				struct bpf_reg_state *reg)
1706 {
1707 	__mark_reg_unknown(env, reg);
1708 	reg->type = NOT_INIT;
1709 }
1710 
1711 static void mark_reg_not_init(struct bpf_verifier_env *env,
1712 			      struct bpf_reg_state *regs, u32 regno)
1713 {
1714 	if (WARN_ON(regno >= MAX_BPF_REG)) {
1715 		verbose(env, "mark_reg_not_init(regs, %u)\n", regno);
1716 		/* Something bad happened, let's kill all regs except FP */
1717 		for (regno = 0; regno < BPF_REG_FP; regno++)
1718 			__mark_reg_not_init(env, regs + regno);
1719 		return;
1720 	}
1721 	__mark_reg_not_init(env, regs + regno);
1722 }
1723 
1724 static void mark_btf_ld_reg(struct bpf_verifier_env *env,
1725 			    struct bpf_reg_state *regs, u32 regno,
1726 			    enum bpf_reg_type reg_type,
1727 			    struct btf *btf, u32 btf_id,
1728 			    enum bpf_type_flag flag)
1729 {
1730 	if (reg_type == SCALAR_VALUE) {
1731 		mark_reg_unknown(env, regs, regno);
1732 		return;
1733 	}
1734 	mark_reg_known_zero(env, regs, regno);
1735 	regs[regno].type = PTR_TO_BTF_ID | flag;
1736 	regs[regno].btf = btf;
1737 	regs[regno].btf_id = btf_id;
1738 }
1739 
1740 #define DEF_NOT_SUBREG	(0)
1741 static void init_reg_state(struct bpf_verifier_env *env,
1742 			   struct bpf_func_state *state)
1743 {
1744 	struct bpf_reg_state *regs = state->regs;
1745 	int i;
1746 
1747 	for (i = 0; i < MAX_BPF_REG; i++) {
1748 		mark_reg_not_init(env, regs, i);
1749 		regs[i].live = REG_LIVE_NONE;
1750 		regs[i].parent = NULL;
1751 		regs[i].subreg_def = DEF_NOT_SUBREG;
1752 	}
1753 
1754 	/* frame pointer */
1755 	regs[BPF_REG_FP].type = PTR_TO_STACK;
1756 	mark_reg_known_zero(env, regs, BPF_REG_FP);
1757 	regs[BPF_REG_FP].frameno = state->frameno;
1758 }
1759 
1760 #define BPF_MAIN_FUNC (-1)
1761 static void init_func_state(struct bpf_verifier_env *env,
1762 			    struct bpf_func_state *state,
1763 			    int callsite, int frameno, int subprogno)
1764 {
1765 	state->callsite = callsite;
1766 	state->frameno = frameno;
1767 	state->subprogno = subprogno;
1768 	state->callback_ret_range = tnum_range(0, 0);
1769 	init_reg_state(env, state);
1770 	mark_verifier_state_scratched(env);
1771 }
1772 
1773 /* Similar to push_stack(), but for async callbacks */
1774 static struct bpf_verifier_state *push_async_cb(struct bpf_verifier_env *env,
1775 						int insn_idx, int prev_insn_idx,
1776 						int subprog)
1777 {
1778 	struct bpf_verifier_stack_elem *elem;
1779 	struct bpf_func_state *frame;
1780 
1781 	elem = kzalloc(sizeof(struct bpf_verifier_stack_elem), GFP_KERNEL);
1782 	if (!elem)
1783 		goto err;
1784 
1785 	elem->insn_idx = insn_idx;
1786 	elem->prev_insn_idx = prev_insn_idx;
1787 	elem->next = env->head;
1788 	elem->log_pos = env->log.len_used;
1789 	env->head = elem;
1790 	env->stack_size++;
1791 	if (env->stack_size > BPF_COMPLEXITY_LIMIT_JMP_SEQ) {
1792 		verbose(env,
1793 			"The sequence of %d jumps is too complex for async cb.\n",
1794 			env->stack_size);
1795 		goto err;
1796 	}
1797 	/* Unlike push_stack() do not copy_verifier_state().
1798 	 * The caller state doesn't matter.
1799 	 * This is async callback. It starts in a fresh stack.
1800 	 * Initialize it similar to do_check_common().
1801 	 */
1802 	elem->st.branches = 1;
1803 	frame = kzalloc(sizeof(*frame), GFP_KERNEL);
1804 	if (!frame)
1805 		goto err;
1806 	init_func_state(env, frame,
1807 			BPF_MAIN_FUNC /* callsite */,
1808 			0 /* frameno within this callchain */,
1809 			subprog /* subprog number within this prog */);
1810 	elem->st.frame[0] = frame;
1811 	return &elem->st;
1812 err:
1813 	free_verifier_state(env->cur_state, true);
1814 	env->cur_state = NULL;
1815 	/* pop all elements and return */
1816 	while (!pop_stack(env, NULL, NULL, false));
1817 	return NULL;
1818 }
1819 
1820 
1821 enum reg_arg_type {
1822 	SRC_OP,		/* register is used as source operand */
1823 	DST_OP,		/* register is used as destination operand */
1824 	DST_OP_NO_MARK	/* same as above, check only, don't mark */
1825 };
1826 
1827 static int cmp_subprogs(const void *a, const void *b)
1828 {
1829 	return ((struct bpf_subprog_info *)a)->start -
1830 	       ((struct bpf_subprog_info *)b)->start;
1831 }
1832 
1833 static int find_subprog(struct bpf_verifier_env *env, int off)
1834 {
1835 	struct bpf_subprog_info *p;
1836 
1837 	p = bsearch(&off, env->subprog_info, env->subprog_cnt,
1838 		    sizeof(env->subprog_info[0]), cmp_subprogs);
1839 	if (!p)
1840 		return -ENOENT;
1841 	return p - env->subprog_info;
1842 
1843 }
1844 
1845 static int add_subprog(struct bpf_verifier_env *env, int off)
1846 {
1847 	int insn_cnt = env->prog->len;
1848 	int ret;
1849 
1850 	if (off >= insn_cnt || off < 0) {
1851 		verbose(env, "call to invalid destination\n");
1852 		return -EINVAL;
1853 	}
1854 	ret = find_subprog(env, off);
1855 	if (ret >= 0)
1856 		return ret;
1857 	if (env->subprog_cnt >= BPF_MAX_SUBPROGS) {
1858 		verbose(env, "too many subprograms\n");
1859 		return -E2BIG;
1860 	}
1861 	/* determine subprog starts. The end is one before the next starts */
1862 	env->subprog_info[env->subprog_cnt++].start = off;
1863 	sort(env->subprog_info, env->subprog_cnt,
1864 	     sizeof(env->subprog_info[0]), cmp_subprogs, NULL);
1865 	return env->subprog_cnt - 1;
1866 }
1867 
1868 #define MAX_KFUNC_DESCS 256
1869 #define MAX_KFUNC_BTFS	256
1870 
1871 struct bpf_kfunc_desc {
1872 	struct btf_func_model func_model;
1873 	u32 func_id;
1874 	s32 imm;
1875 	u16 offset;
1876 };
1877 
1878 struct bpf_kfunc_btf {
1879 	struct btf *btf;
1880 	struct module *module;
1881 	u16 offset;
1882 };
1883 
1884 struct bpf_kfunc_desc_tab {
1885 	struct bpf_kfunc_desc descs[MAX_KFUNC_DESCS];
1886 	u32 nr_descs;
1887 };
1888 
1889 struct bpf_kfunc_btf_tab {
1890 	struct bpf_kfunc_btf descs[MAX_KFUNC_BTFS];
1891 	u32 nr_descs;
1892 };
1893 
1894 static int kfunc_desc_cmp_by_id_off(const void *a, const void *b)
1895 {
1896 	const struct bpf_kfunc_desc *d0 = a;
1897 	const struct bpf_kfunc_desc *d1 = b;
1898 
1899 	/* func_id is not greater than BTF_MAX_TYPE */
1900 	return d0->func_id - d1->func_id ?: d0->offset - d1->offset;
1901 }
1902 
1903 static int kfunc_btf_cmp_by_off(const void *a, const void *b)
1904 {
1905 	const struct bpf_kfunc_btf *d0 = a;
1906 	const struct bpf_kfunc_btf *d1 = b;
1907 
1908 	return d0->offset - d1->offset;
1909 }
1910 
1911 static const struct bpf_kfunc_desc *
1912 find_kfunc_desc(const struct bpf_prog *prog, u32 func_id, u16 offset)
1913 {
1914 	struct bpf_kfunc_desc desc = {
1915 		.func_id = func_id,
1916 		.offset = offset,
1917 	};
1918 	struct bpf_kfunc_desc_tab *tab;
1919 
1920 	tab = prog->aux->kfunc_tab;
1921 	return bsearch(&desc, tab->descs, tab->nr_descs,
1922 		       sizeof(tab->descs[0]), kfunc_desc_cmp_by_id_off);
1923 }
1924 
1925 static struct btf *__find_kfunc_desc_btf(struct bpf_verifier_env *env,
1926 					 s16 offset)
1927 {
1928 	struct bpf_kfunc_btf kf_btf = { .offset = offset };
1929 	struct bpf_kfunc_btf_tab *tab;
1930 	struct bpf_kfunc_btf *b;
1931 	struct module *mod;
1932 	struct btf *btf;
1933 	int btf_fd;
1934 
1935 	tab = env->prog->aux->kfunc_btf_tab;
1936 	b = bsearch(&kf_btf, tab->descs, tab->nr_descs,
1937 		    sizeof(tab->descs[0]), kfunc_btf_cmp_by_off);
1938 	if (!b) {
1939 		if (tab->nr_descs == MAX_KFUNC_BTFS) {
1940 			verbose(env, "too many different module BTFs\n");
1941 			return ERR_PTR(-E2BIG);
1942 		}
1943 
1944 		if (bpfptr_is_null(env->fd_array)) {
1945 			verbose(env, "kfunc offset > 0 without fd_array is invalid\n");
1946 			return ERR_PTR(-EPROTO);
1947 		}
1948 
1949 		if (copy_from_bpfptr_offset(&btf_fd, env->fd_array,
1950 					    offset * sizeof(btf_fd),
1951 					    sizeof(btf_fd)))
1952 			return ERR_PTR(-EFAULT);
1953 
1954 		btf = btf_get_by_fd(btf_fd);
1955 		if (IS_ERR(btf)) {
1956 			verbose(env, "invalid module BTF fd specified\n");
1957 			return btf;
1958 		}
1959 
1960 		if (!btf_is_module(btf)) {
1961 			verbose(env, "BTF fd for kfunc is not a module BTF\n");
1962 			btf_put(btf);
1963 			return ERR_PTR(-EINVAL);
1964 		}
1965 
1966 		mod = btf_try_get_module(btf);
1967 		if (!mod) {
1968 			btf_put(btf);
1969 			return ERR_PTR(-ENXIO);
1970 		}
1971 
1972 		b = &tab->descs[tab->nr_descs++];
1973 		b->btf = btf;
1974 		b->module = mod;
1975 		b->offset = offset;
1976 
1977 		sort(tab->descs, tab->nr_descs, sizeof(tab->descs[0]),
1978 		     kfunc_btf_cmp_by_off, NULL);
1979 	}
1980 	return b->btf;
1981 }
1982 
1983 void bpf_free_kfunc_btf_tab(struct bpf_kfunc_btf_tab *tab)
1984 {
1985 	if (!tab)
1986 		return;
1987 
1988 	while (tab->nr_descs--) {
1989 		module_put(tab->descs[tab->nr_descs].module);
1990 		btf_put(tab->descs[tab->nr_descs].btf);
1991 	}
1992 	kfree(tab);
1993 }
1994 
1995 static struct btf *find_kfunc_desc_btf(struct bpf_verifier_env *env, s16 offset)
1996 {
1997 	if (offset) {
1998 		if (offset < 0) {
1999 			/* In the future, this can be allowed to increase limit
2000 			 * of fd index into fd_array, interpreted as u16.
2001 			 */
2002 			verbose(env, "negative offset disallowed for kernel module function call\n");
2003 			return ERR_PTR(-EINVAL);
2004 		}
2005 
2006 		return __find_kfunc_desc_btf(env, offset);
2007 	}
2008 	return btf_vmlinux ?: ERR_PTR(-ENOENT);
2009 }
2010 
2011 static int add_kfunc_call(struct bpf_verifier_env *env, u32 func_id, s16 offset)
2012 {
2013 	const struct btf_type *func, *func_proto;
2014 	struct bpf_kfunc_btf_tab *btf_tab;
2015 	struct bpf_kfunc_desc_tab *tab;
2016 	struct bpf_prog_aux *prog_aux;
2017 	struct bpf_kfunc_desc *desc;
2018 	const char *func_name;
2019 	struct btf *desc_btf;
2020 	unsigned long call_imm;
2021 	unsigned long addr;
2022 	int err;
2023 
2024 	prog_aux = env->prog->aux;
2025 	tab = prog_aux->kfunc_tab;
2026 	btf_tab = prog_aux->kfunc_btf_tab;
2027 	if (!tab) {
2028 		if (!btf_vmlinux) {
2029 			verbose(env, "calling kernel function is not supported without CONFIG_DEBUG_INFO_BTF\n");
2030 			return -ENOTSUPP;
2031 		}
2032 
2033 		if (!env->prog->jit_requested) {
2034 			verbose(env, "JIT is required for calling kernel function\n");
2035 			return -ENOTSUPP;
2036 		}
2037 
2038 		if (!bpf_jit_supports_kfunc_call()) {
2039 			verbose(env, "JIT does not support calling kernel function\n");
2040 			return -ENOTSUPP;
2041 		}
2042 
2043 		if (!env->prog->gpl_compatible) {
2044 			verbose(env, "cannot call kernel function from non-GPL compatible program\n");
2045 			return -EINVAL;
2046 		}
2047 
2048 		tab = kzalloc(sizeof(*tab), GFP_KERNEL);
2049 		if (!tab)
2050 			return -ENOMEM;
2051 		prog_aux->kfunc_tab = tab;
2052 	}
2053 
2054 	/* func_id == 0 is always invalid, but instead of returning an error, be
2055 	 * conservative and wait until the code elimination pass before returning
2056 	 * error, so that invalid calls that get pruned out can be in BPF programs
2057 	 * loaded from userspace.  It is also required that offset be untouched
2058 	 * for such calls.
2059 	 */
2060 	if (!func_id && !offset)
2061 		return 0;
2062 
2063 	if (!btf_tab && offset) {
2064 		btf_tab = kzalloc(sizeof(*btf_tab), GFP_KERNEL);
2065 		if (!btf_tab)
2066 			return -ENOMEM;
2067 		prog_aux->kfunc_btf_tab = btf_tab;
2068 	}
2069 
2070 	desc_btf = find_kfunc_desc_btf(env, offset);
2071 	if (IS_ERR(desc_btf)) {
2072 		verbose(env, "failed to find BTF for kernel function\n");
2073 		return PTR_ERR(desc_btf);
2074 	}
2075 
2076 	if (find_kfunc_desc(env->prog, func_id, offset))
2077 		return 0;
2078 
2079 	if (tab->nr_descs == MAX_KFUNC_DESCS) {
2080 		verbose(env, "too many different kernel function calls\n");
2081 		return -E2BIG;
2082 	}
2083 
2084 	func = btf_type_by_id(desc_btf, func_id);
2085 	if (!func || !btf_type_is_func(func)) {
2086 		verbose(env, "kernel btf_id %u is not a function\n",
2087 			func_id);
2088 		return -EINVAL;
2089 	}
2090 	func_proto = btf_type_by_id(desc_btf, func->type);
2091 	if (!func_proto || !btf_type_is_func_proto(func_proto)) {
2092 		verbose(env, "kernel function btf_id %u does not have a valid func_proto\n",
2093 			func_id);
2094 		return -EINVAL;
2095 	}
2096 
2097 	func_name = btf_name_by_offset(desc_btf, func->name_off);
2098 	addr = kallsyms_lookup_name(func_name);
2099 	if (!addr) {
2100 		verbose(env, "cannot find address for kernel function %s\n",
2101 			func_name);
2102 		return -EINVAL;
2103 	}
2104 
2105 	call_imm = BPF_CALL_IMM(addr);
2106 	/* Check whether or not the relative offset overflows desc->imm */
2107 	if ((unsigned long)(s32)call_imm != call_imm) {
2108 		verbose(env, "address of kernel function %s is out of range\n",
2109 			func_name);
2110 		return -EINVAL;
2111 	}
2112 
2113 	desc = &tab->descs[tab->nr_descs++];
2114 	desc->func_id = func_id;
2115 	desc->imm = call_imm;
2116 	desc->offset = offset;
2117 	err = btf_distill_func_proto(&env->log, desc_btf,
2118 				     func_proto, func_name,
2119 				     &desc->func_model);
2120 	if (!err)
2121 		sort(tab->descs, tab->nr_descs, sizeof(tab->descs[0]),
2122 		     kfunc_desc_cmp_by_id_off, NULL);
2123 	return err;
2124 }
2125 
2126 static int kfunc_desc_cmp_by_imm(const void *a, const void *b)
2127 {
2128 	const struct bpf_kfunc_desc *d0 = a;
2129 	const struct bpf_kfunc_desc *d1 = b;
2130 
2131 	if (d0->imm > d1->imm)
2132 		return 1;
2133 	else if (d0->imm < d1->imm)
2134 		return -1;
2135 	return 0;
2136 }
2137 
2138 static void sort_kfunc_descs_by_imm(struct bpf_prog *prog)
2139 {
2140 	struct bpf_kfunc_desc_tab *tab;
2141 
2142 	tab = prog->aux->kfunc_tab;
2143 	if (!tab)
2144 		return;
2145 
2146 	sort(tab->descs, tab->nr_descs, sizeof(tab->descs[0]),
2147 	     kfunc_desc_cmp_by_imm, NULL);
2148 }
2149 
2150 bool bpf_prog_has_kfunc_call(const struct bpf_prog *prog)
2151 {
2152 	return !!prog->aux->kfunc_tab;
2153 }
2154 
2155 const struct btf_func_model *
2156 bpf_jit_find_kfunc_model(const struct bpf_prog *prog,
2157 			 const struct bpf_insn *insn)
2158 {
2159 	const struct bpf_kfunc_desc desc = {
2160 		.imm = insn->imm,
2161 	};
2162 	const struct bpf_kfunc_desc *res;
2163 	struct bpf_kfunc_desc_tab *tab;
2164 
2165 	tab = prog->aux->kfunc_tab;
2166 	res = bsearch(&desc, tab->descs, tab->nr_descs,
2167 		      sizeof(tab->descs[0]), kfunc_desc_cmp_by_imm);
2168 
2169 	return res ? &res->func_model : NULL;
2170 }
2171 
2172 static int add_subprog_and_kfunc(struct bpf_verifier_env *env)
2173 {
2174 	struct bpf_subprog_info *subprog = env->subprog_info;
2175 	struct bpf_insn *insn = env->prog->insnsi;
2176 	int i, ret, insn_cnt = env->prog->len;
2177 
2178 	/* Add entry function. */
2179 	ret = add_subprog(env, 0);
2180 	if (ret)
2181 		return ret;
2182 
2183 	for (i = 0; i < insn_cnt; i++, insn++) {
2184 		if (!bpf_pseudo_func(insn) && !bpf_pseudo_call(insn) &&
2185 		    !bpf_pseudo_kfunc_call(insn))
2186 			continue;
2187 
2188 		if (!env->bpf_capable) {
2189 			verbose(env, "loading/calling other bpf or kernel functions are allowed for CAP_BPF and CAP_SYS_ADMIN\n");
2190 			return -EPERM;
2191 		}
2192 
2193 		if (bpf_pseudo_func(insn) || bpf_pseudo_call(insn))
2194 			ret = add_subprog(env, i + insn->imm + 1);
2195 		else
2196 			ret = add_kfunc_call(env, insn->imm, insn->off);
2197 
2198 		if (ret < 0)
2199 			return ret;
2200 	}
2201 
2202 	/* Add a fake 'exit' subprog which could simplify subprog iteration
2203 	 * logic. 'subprog_cnt' should not be increased.
2204 	 */
2205 	subprog[env->subprog_cnt].start = insn_cnt;
2206 
2207 	if (env->log.level & BPF_LOG_LEVEL2)
2208 		for (i = 0; i < env->subprog_cnt; i++)
2209 			verbose(env, "func#%d @%d\n", i, subprog[i].start);
2210 
2211 	return 0;
2212 }
2213 
2214 static int check_subprogs(struct bpf_verifier_env *env)
2215 {
2216 	int i, subprog_start, subprog_end, off, cur_subprog = 0;
2217 	struct bpf_subprog_info *subprog = env->subprog_info;
2218 	struct bpf_insn *insn = env->prog->insnsi;
2219 	int insn_cnt = env->prog->len;
2220 
2221 	/* now check that all jumps are within the same subprog */
2222 	subprog_start = subprog[cur_subprog].start;
2223 	subprog_end = subprog[cur_subprog + 1].start;
2224 	for (i = 0; i < insn_cnt; i++) {
2225 		u8 code = insn[i].code;
2226 
2227 		if (code == (BPF_JMP | BPF_CALL) &&
2228 		    insn[i].imm == BPF_FUNC_tail_call &&
2229 		    insn[i].src_reg != BPF_PSEUDO_CALL)
2230 			subprog[cur_subprog].has_tail_call = true;
2231 		if (BPF_CLASS(code) == BPF_LD &&
2232 		    (BPF_MODE(code) == BPF_ABS || BPF_MODE(code) == BPF_IND))
2233 			subprog[cur_subprog].has_ld_abs = true;
2234 		if (BPF_CLASS(code) != BPF_JMP && BPF_CLASS(code) != BPF_JMP32)
2235 			goto next;
2236 		if (BPF_OP(code) == BPF_EXIT || BPF_OP(code) == BPF_CALL)
2237 			goto next;
2238 		off = i + insn[i].off + 1;
2239 		if (off < subprog_start || off >= subprog_end) {
2240 			verbose(env, "jump out of range from insn %d to %d\n", i, off);
2241 			return -EINVAL;
2242 		}
2243 next:
2244 		if (i == subprog_end - 1) {
2245 			/* to avoid fall-through from one subprog into another
2246 			 * the last insn of the subprog should be either exit
2247 			 * or unconditional jump back
2248 			 */
2249 			if (code != (BPF_JMP | BPF_EXIT) &&
2250 			    code != (BPF_JMP | BPF_JA)) {
2251 				verbose(env, "last insn is not an exit or jmp\n");
2252 				return -EINVAL;
2253 			}
2254 			subprog_start = subprog_end;
2255 			cur_subprog++;
2256 			if (cur_subprog < env->subprog_cnt)
2257 				subprog_end = subprog[cur_subprog + 1].start;
2258 		}
2259 	}
2260 	return 0;
2261 }
2262 
2263 /* Parentage chain of this register (or stack slot) should take care of all
2264  * issues like callee-saved registers, stack slot allocation time, etc.
2265  */
2266 static int mark_reg_read(struct bpf_verifier_env *env,
2267 			 const struct bpf_reg_state *state,
2268 			 struct bpf_reg_state *parent, u8 flag)
2269 {
2270 	bool writes = parent == state->parent; /* Observe write marks */
2271 	int cnt = 0;
2272 
2273 	while (parent) {
2274 		/* if read wasn't screened by an earlier write ... */
2275 		if (writes && state->live & REG_LIVE_WRITTEN)
2276 			break;
2277 		if (parent->live & REG_LIVE_DONE) {
2278 			verbose(env, "verifier BUG type %s var_off %lld off %d\n",
2279 				reg_type_str(env, parent->type),
2280 				parent->var_off.value, parent->off);
2281 			return -EFAULT;
2282 		}
2283 		/* The first condition is more likely to be true than the
2284 		 * second, checked it first.
2285 		 */
2286 		if ((parent->live & REG_LIVE_READ) == flag ||
2287 		    parent->live & REG_LIVE_READ64)
2288 			/* The parentage chain never changes and
2289 			 * this parent was already marked as LIVE_READ.
2290 			 * There is no need to keep walking the chain again and
2291 			 * keep re-marking all parents as LIVE_READ.
2292 			 * This case happens when the same register is read
2293 			 * multiple times without writes into it in-between.
2294 			 * Also, if parent has the stronger REG_LIVE_READ64 set,
2295 			 * then no need to set the weak REG_LIVE_READ32.
2296 			 */
2297 			break;
2298 		/* ... then we depend on parent's value */
2299 		parent->live |= flag;
2300 		/* REG_LIVE_READ64 overrides REG_LIVE_READ32. */
2301 		if (flag == REG_LIVE_READ64)
2302 			parent->live &= ~REG_LIVE_READ32;
2303 		state = parent;
2304 		parent = state->parent;
2305 		writes = true;
2306 		cnt++;
2307 	}
2308 
2309 	if (env->longest_mark_read_walk < cnt)
2310 		env->longest_mark_read_walk = cnt;
2311 	return 0;
2312 }
2313 
2314 /* This function is supposed to be used by the following 32-bit optimization
2315  * code only. It returns TRUE if the source or destination register operates
2316  * on 64-bit, otherwise return FALSE.
2317  */
2318 static bool is_reg64(struct bpf_verifier_env *env, struct bpf_insn *insn,
2319 		     u32 regno, struct bpf_reg_state *reg, enum reg_arg_type t)
2320 {
2321 	u8 code, class, op;
2322 
2323 	code = insn->code;
2324 	class = BPF_CLASS(code);
2325 	op = BPF_OP(code);
2326 	if (class == BPF_JMP) {
2327 		/* BPF_EXIT for "main" will reach here. Return TRUE
2328 		 * conservatively.
2329 		 */
2330 		if (op == BPF_EXIT)
2331 			return true;
2332 		if (op == BPF_CALL) {
2333 			/* BPF to BPF call will reach here because of marking
2334 			 * caller saved clobber with DST_OP_NO_MARK for which we
2335 			 * don't care the register def because they are anyway
2336 			 * marked as NOT_INIT already.
2337 			 */
2338 			if (insn->src_reg == BPF_PSEUDO_CALL)
2339 				return false;
2340 			/* Helper call will reach here because of arg type
2341 			 * check, conservatively return TRUE.
2342 			 */
2343 			if (t == SRC_OP)
2344 				return true;
2345 
2346 			return false;
2347 		}
2348 	}
2349 
2350 	if (class == BPF_ALU64 || class == BPF_JMP ||
2351 	    /* BPF_END always use BPF_ALU class. */
2352 	    (class == BPF_ALU && op == BPF_END && insn->imm == 64))
2353 		return true;
2354 
2355 	if (class == BPF_ALU || class == BPF_JMP32)
2356 		return false;
2357 
2358 	if (class == BPF_LDX) {
2359 		if (t != SRC_OP)
2360 			return BPF_SIZE(code) == BPF_DW;
2361 		/* LDX source must be ptr. */
2362 		return true;
2363 	}
2364 
2365 	if (class == BPF_STX) {
2366 		/* BPF_STX (including atomic variants) has multiple source
2367 		 * operands, one of which is a ptr. Check whether the caller is
2368 		 * asking about it.
2369 		 */
2370 		if (t == SRC_OP && reg->type != SCALAR_VALUE)
2371 			return true;
2372 		return BPF_SIZE(code) == BPF_DW;
2373 	}
2374 
2375 	if (class == BPF_LD) {
2376 		u8 mode = BPF_MODE(code);
2377 
2378 		/* LD_IMM64 */
2379 		if (mode == BPF_IMM)
2380 			return true;
2381 
2382 		/* Both LD_IND and LD_ABS return 32-bit data. */
2383 		if (t != SRC_OP)
2384 			return  false;
2385 
2386 		/* Implicit ctx ptr. */
2387 		if (regno == BPF_REG_6)
2388 			return true;
2389 
2390 		/* Explicit source could be any width. */
2391 		return true;
2392 	}
2393 
2394 	if (class == BPF_ST)
2395 		/* The only source register for BPF_ST is a ptr. */
2396 		return true;
2397 
2398 	/* Conservatively return true at default. */
2399 	return true;
2400 }
2401 
2402 /* Return the regno defined by the insn, or -1. */
2403 static int insn_def_regno(const struct bpf_insn *insn)
2404 {
2405 	switch (BPF_CLASS(insn->code)) {
2406 	case BPF_JMP:
2407 	case BPF_JMP32:
2408 	case BPF_ST:
2409 		return -1;
2410 	case BPF_STX:
2411 		if (BPF_MODE(insn->code) == BPF_ATOMIC &&
2412 		    (insn->imm & BPF_FETCH)) {
2413 			if (insn->imm == BPF_CMPXCHG)
2414 				return BPF_REG_0;
2415 			else
2416 				return insn->src_reg;
2417 		} else {
2418 			return -1;
2419 		}
2420 	default:
2421 		return insn->dst_reg;
2422 	}
2423 }
2424 
2425 /* Return TRUE if INSN has defined any 32-bit value explicitly. */
2426 static bool insn_has_def32(struct bpf_verifier_env *env, struct bpf_insn *insn)
2427 {
2428 	int dst_reg = insn_def_regno(insn);
2429 
2430 	if (dst_reg == -1)
2431 		return false;
2432 
2433 	return !is_reg64(env, insn, dst_reg, NULL, DST_OP);
2434 }
2435 
2436 static void mark_insn_zext(struct bpf_verifier_env *env,
2437 			   struct bpf_reg_state *reg)
2438 {
2439 	s32 def_idx = reg->subreg_def;
2440 
2441 	if (def_idx == DEF_NOT_SUBREG)
2442 		return;
2443 
2444 	env->insn_aux_data[def_idx - 1].zext_dst = true;
2445 	/* The dst will be zero extended, so won't be sub-register anymore. */
2446 	reg->subreg_def = DEF_NOT_SUBREG;
2447 }
2448 
2449 static int check_reg_arg(struct bpf_verifier_env *env, u32 regno,
2450 			 enum reg_arg_type t)
2451 {
2452 	struct bpf_verifier_state *vstate = env->cur_state;
2453 	struct bpf_func_state *state = vstate->frame[vstate->curframe];
2454 	struct bpf_insn *insn = env->prog->insnsi + env->insn_idx;
2455 	struct bpf_reg_state *reg, *regs = state->regs;
2456 	bool rw64;
2457 
2458 	if (regno >= MAX_BPF_REG) {
2459 		verbose(env, "R%d is invalid\n", regno);
2460 		return -EINVAL;
2461 	}
2462 
2463 	mark_reg_scratched(env, regno);
2464 
2465 	reg = &regs[regno];
2466 	rw64 = is_reg64(env, insn, regno, reg, t);
2467 	if (t == SRC_OP) {
2468 		/* check whether register used as source operand can be read */
2469 		if (reg->type == NOT_INIT) {
2470 			verbose(env, "R%d !read_ok\n", regno);
2471 			return -EACCES;
2472 		}
2473 		/* We don't need to worry about FP liveness because it's read-only */
2474 		if (regno == BPF_REG_FP)
2475 			return 0;
2476 
2477 		if (rw64)
2478 			mark_insn_zext(env, reg);
2479 
2480 		return mark_reg_read(env, reg, reg->parent,
2481 				     rw64 ? REG_LIVE_READ64 : REG_LIVE_READ32);
2482 	} else {
2483 		/* check whether register used as dest operand can be written to */
2484 		if (regno == BPF_REG_FP) {
2485 			verbose(env, "frame pointer is read only\n");
2486 			return -EACCES;
2487 		}
2488 		reg->live |= REG_LIVE_WRITTEN;
2489 		reg->subreg_def = rw64 ? DEF_NOT_SUBREG : env->insn_idx + 1;
2490 		if (t == DST_OP)
2491 			mark_reg_unknown(env, regs, regno);
2492 	}
2493 	return 0;
2494 }
2495 
2496 /* for any branch, call, exit record the history of jmps in the given state */
2497 static int push_jmp_history(struct bpf_verifier_env *env,
2498 			    struct bpf_verifier_state *cur)
2499 {
2500 	u32 cnt = cur->jmp_history_cnt;
2501 	struct bpf_idx_pair *p;
2502 
2503 	cnt++;
2504 	p = krealloc(cur->jmp_history, cnt * sizeof(*p), GFP_USER);
2505 	if (!p)
2506 		return -ENOMEM;
2507 	p[cnt - 1].idx = env->insn_idx;
2508 	p[cnt - 1].prev_idx = env->prev_insn_idx;
2509 	cur->jmp_history = p;
2510 	cur->jmp_history_cnt = cnt;
2511 	return 0;
2512 }
2513 
2514 /* Backtrack one insn at a time. If idx is not at the top of recorded
2515  * history then previous instruction came from straight line execution.
2516  */
2517 static int get_prev_insn_idx(struct bpf_verifier_state *st, int i,
2518 			     u32 *history)
2519 {
2520 	u32 cnt = *history;
2521 
2522 	if (cnt && st->jmp_history[cnt - 1].idx == i) {
2523 		i = st->jmp_history[cnt - 1].prev_idx;
2524 		(*history)--;
2525 	} else {
2526 		i--;
2527 	}
2528 	return i;
2529 }
2530 
2531 static const char *disasm_kfunc_name(void *data, const struct bpf_insn *insn)
2532 {
2533 	const struct btf_type *func;
2534 	struct btf *desc_btf;
2535 
2536 	if (insn->src_reg != BPF_PSEUDO_KFUNC_CALL)
2537 		return NULL;
2538 
2539 	desc_btf = find_kfunc_desc_btf(data, insn->off);
2540 	if (IS_ERR(desc_btf))
2541 		return "<error>";
2542 
2543 	func = btf_type_by_id(desc_btf, insn->imm);
2544 	return btf_name_by_offset(desc_btf, func->name_off);
2545 }
2546 
2547 /* For given verifier state backtrack_insn() is called from the last insn to
2548  * the first insn. Its purpose is to compute a bitmask of registers and
2549  * stack slots that needs precision in the parent verifier state.
2550  */
2551 static int backtrack_insn(struct bpf_verifier_env *env, int idx,
2552 			  u32 *reg_mask, u64 *stack_mask)
2553 {
2554 	const struct bpf_insn_cbs cbs = {
2555 		.cb_call	= disasm_kfunc_name,
2556 		.cb_print	= verbose,
2557 		.private_data	= env,
2558 	};
2559 	struct bpf_insn *insn = env->prog->insnsi + idx;
2560 	u8 class = BPF_CLASS(insn->code);
2561 	u8 opcode = BPF_OP(insn->code);
2562 	u8 mode = BPF_MODE(insn->code);
2563 	u32 dreg = 1u << insn->dst_reg;
2564 	u32 sreg = 1u << insn->src_reg;
2565 	u32 spi;
2566 
2567 	if (insn->code == 0)
2568 		return 0;
2569 	if (env->log.level & BPF_LOG_LEVEL2) {
2570 		verbose(env, "regs=%x stack=%llx before ", *reg_mask, *stack_mask);
2571 		verbose(env, "%d: ", idx);
2572 		print_bpf_insn(&cbs, insn, env->allow_ptr_leaks);
2573 	}
2574 
2575 	if (class == BPF_ALU || class == BPF_ALU64) {
2576 		if (!(*reg_mask & dreg))
2577 			return 0;
2578 		if (opcode == BPF_MOV) {
2579 			if (BPF_SRC(insn->code) == BPF_X) {
2580 				/* dreg = sreg
2581 				 * dreg needs precision after this insn
2582 				 * sreg needs precision before this insn
2583 				 */
2584 				*reg_mask &= ~dreg;
2585 				*reg_mask |= sreg;
2586 			} else {
2587 				/* dreg = K
2588 				 * dreg needs precision after this insn.
2589 				 * Corresponding register is already marked
2590 				 * as precise=true in this verifier state.
2591 				 * No further markings in parent are necessary
2592 				 */
2593 				*reg_mask &= ~dreg;
2594 			}
2595 		} else {
2596 			if (BPF_SRC(insn->code) == BPF_X) {
2597 				/* dreg += sreg
2598 				 * both dreg and sreg need precision
2599 				 * before this insn
2600 				 */
2601 				*reg_mask |= sreg;
2602 			} /* else dreg += K
2603 			   * dreg still needs precision before this insn
2604 			   */
2605 		}
2606 	} else if (class == BPF_LDX) {
2607 		if (!(*reg_mask & dreg))
2608 			return 0;
2609 		*reg_mask &= ~dreg;
2610 
2611 		/* scalars can only be spilled into stack w/o losing precision.
2612 		 * Load from any other memory can be zero extended.
2613 		 * The desire to keep that precision is already indicated
2614 		 * by 'precise' mark in corresponding register of this state.
2615 		 * No further tracking necessary.
2616 		 */
2617 		if (insn->src_reg != BPF_REG_FP)
2618 			return 0;
2619 
2620 		/* dreg = *(u64 *)[fp - off] was a fill from the stack.
2621 		 * that [fp - off] slot contains scalar that needs to be
2622 		 * tracked with precision
2623 		 */
2624 		spi = (-insn->off - 1) / BPF_REG_SIZE;
2625 		if (spi >= 64) {
2626 			verbose(env, "BUG spi %d\n", spi);
2627 			WARN_ONCE(1, "verifier backtracking bug");
2628 			return -EFAULT;
2629 		}
2630 		*stack_mask |= 1ull << spi;
2631 	} else if (class == BPF_STX || class == BPF_ST) {
2632 		if (*reg_mask & dreg)
2633 			/* stx & st shouldn't be using _scalar_ dst_reg
2634 			 * to access memory. It means backtracking
2635 			 * encountered a case of pointer subtraction.
2636 			 */
2637 			return -ENOTSUPP;
2638 		/* scalars can only be spilled into stack */
2639 		if (insn->dst_reg != BPF_REG_FP)
2640 			return 0;
2641 		spi = (-insn->off - 1) / BPF_REG_SIZE;
2642 		if (spi >= 64) {
2643 			verbose(env, "BUG spi %d\n", spi);
2644 			WARN_ONCE(1, "verifier backtracking bug");
2645 			return -EFAULT;
2646 		}
2647 		if (!(*stack_mask & (1ull << spi)))
2648 			return 0;
2649 		*stack_mask &= ~(1ull << spi);
2650 		if (class == BPF_STX)
2651 			*reg_mask |= sreg;
2652 	} else if (class == BPF_JMP || class == BPF_JMP32) {
2653 		if (opcode == BPF_CALL) {
2654 			if (insn->src_reg == BPF_PSEUDO_CALL)
2655 				return -ENOTSUPP;
2656 			/* regular helper call sets R0 */
2657 			*reg_mask &= ~1;
2658 			if (*reg_mask & 0x3f) {
2659 				/* if backtracing was looking for registers R1-R5
2660 				 * they should have been found already.
2661 				 */
2662 				verbose(env, "BUG regs %x\n", *reg_mask);
2663 				WARN_ONCE(1, "verifier backtracking bug");
2664 				return -EFAULT;
2665 			}
2666 		} else if (opcode == BPF_EXIT) {
2667 			return -ENOTSUPP;
2668 		}
2669 	} else if (class == BPF_LD) {
2670 		if (!(*reg_mask & dreg))
2671 			return 0;
2672 		*reg_mask &= ~dreg;
2673 		/* It's ld_imm64 or ld_abs or ld_ind.
2674 		 * For ld_imm64 no further tracking of precision
2675 		 * into parent is necessary
2676 		 */
2677 		if (mode == BPF_IND || mode == BPF_ABS)
2678 			/* to be analyzed */
2679 			return -ENOTSUPP;
2680 	}
2681 	return 0;
2682 }
2683 
2684 /* the scalar precision tracking algorithm:
2685  * . at the start all registers have precise=false.
2686  * . scalar ranges are tracked as normal through alu and jmp insns.
2687  * . once precise value of the scalar register is used in:
2688  *   .  ptr + scalar alu
2689  *   . if (scalar cond K|scalar)
2690  *   .  helper_call(.., scalar, ...) where ARG_CONST is expected
2691  *   backtrack through the verifier states and mark all registers and
2692  *   stack slots with spilled constants that these scalar regisers
2693  *   should be precise.
2694  * . during state pruning two registers (or spilled stack slots)
2695  *   are equivalent if both are not precise.
2696  *
2697  * Note the verifier cannot simply walk register parentage chain,
2698  * since many different registers and stack slots could have been
2699  * used to compute single precise scalar.
2700  *
2701  * The approach of starting with precise=true for all registers and then
2702  * backtrack to mark a register as not precise when the verifier detects
2703  * that program doesn't care about specific value (e.g., when helper
2704  * takes register as ARG_ANYTHING parameter) is not safe.
2705  *
2706  * It's ok to walk single parentage chain of the verifier states.
2707  * It's possible that this backtracking will go all the way till 1st insn.
2708  * All other branches will be explored for needing precision later.
2709  *
2710  * The backtracking needs to deal with cases like:
2711  *   R8=map_value(id=0,off=0,ks=4,vs=1952,imm=0) R9_w=map_value(id=0,off=40,ks=4,vs=1952,imm=0)
2712  * r9 -= r8
2713  * r5 = r9
2714  * if r5 > 0x79f goto pc+7
2715  *    R5_w=inv(id=0,umax_value=1951,var_off=(0x0; 0x7ff))
2716  * r5 += 1
2717  * ...
2718  * call bpf_perf_event_output#25
2719  *   where .arg5_type = ARG_CONST_SIZE_OR_ZERO
2720  *
2721  * and this case:
2722  * r6 = 1
2723  * call foo // uses callee's r6 inside to compute r0
2724  * r0 += r6
2725  * if r0 == 0 goto
2726  *
2727  * to track above reg_mask/stack_mask needs to be independent for each frame.
2728  *
2729  * Also if parent's curframe > frame where backtracking started,
2730  * the verifier need to mark registers in both frames, otherwise callees
2731  * may incorrectly prune callers. This is similar to
2732  * commit 7640ead93924 ("bpf: verifier: make sure callees don't prune with caller differences")
2733  *
2734  * For now backtracking falls back into conservative marking.
2735  */
2736 static void mark_all_scalars_precise(struct bpf_verifier_env *env,
2737 				     struct bpf_verifier_state *st)
2738 {
2739 	struct bpf_func_state *func;
2740 	struct bpf_reg_state *reg;
2741 	int i, j;
2742 
2743 	/* big hammer: mark all scalars precise in this path.
2744 	 * pop_stack may still get !precise scalars.
2745 	 */
2746 	for (; st; st = st->parent)
2747 		for (i = 0; i <= st->curframe; i++) {
2748 			func = st->frame[i];
2749 			for (j = 0; j < BPF_REG_FP; j++) {
2750 				reg = &func->regs[j];
2751 				if (reg->type != SCALAR_VALUE)
2752 					continue;
2753 				reg->precise = true;
2754 			}
2755 			for (j = 0; j < func->allocated_stack / BPF_REG_SIZE; j++) {
2756 				if (!is_spilled_reg(&func->stack[j]))
2757 					continue;
2758 				reg = &func->stack[j].spilled_ptr;
2759 				if (reg->type != SCALAR_VALUE)
2760 					continue;
2761 				reg->precise = true;
2762 			}
2763 		}
2764 }
2765 
2766 static int __mark_chain_precision(struct bpf_verifier_env *env, int regno,
2767 				  int spi)
2768 {
2769 	struct bpf_verifier_state *st = env->cur_state;
2770 	int first_idx = st->first_insn_idx;
2771 	int last_idx = env->insn_idx;
2772 	struct bpf_func_state *func;
2773 	struct bpf_reg_state *reg;
2774 	u32 reg_mask = regno >= 0 ? 1u << regno : 0;
2775 	u64 stack_mask = spi >= 0 ? 1ull << spi : 0;
2776 	bool skip_first = true;
2777 	bool new_marks = false;
2778 	int i, err;
2779 
2780 	if (!env->bpf_capable)
2781 		return 0;
2782 
2783 	func = st->frame[st->curframe];
2784 	if (regno >= 0) {
2785 		reg = &func->regs[regno];
2786 		if (reg->type != SCALAR_VALUE) {
2787 			WARN_ONCE(1, "backtracing misuse");
2788 			return -EFAULT;
2789 		}
2790 		if (!reg->precise)
2791 			new_marks = true;
2792 		else
2793 			reg_mask = 0;
2794 		reg->precise = true;
2795 	}
2796 
2797 	while (spi >= 0) {
2798 		if (!is_spilled_reg(&func->stack[spi])) {
2799 			stack_mask = 0;
2800 			break;
2801 		}
2802 		reg = &func->stack[spi].spilled_ptr;
2803 		if (reg->type != SCALAR_VALUE) {
2804 			stack_mask = 0;
2805 			break;
2806 		}
2807 		if (!reg->precise)
2808 			new_marks = true;
2809 		else
2810 			stack_mask = 0;
2811 		reg->precise = true;
2812 		break;
2813 	}
2814 
2815 	if (!new_marks)
2816 		return 0;
2817 	if (!reg_mask && !stack_mask)
2818 		return 0;
2819 	for (;;) {
2820 		DECLARE_BITMAP(mask, 64);
2821 		u32 history = st->jmp_history_cnt;
2822 
2823 		if (env->log.level & BPF_LOG_LEVEL2)
2824 			verbose(env, "last_idx %d first_idx %d\n", last_idx, first_idx);
2825 		for (i = last_idx;;) {
2826 			if (skip_first) {
2827 				err = 0;
2828 				skip_first = false;
2829 			} else {
2830 				err = backtrack_insn(env, i, &reg_mask, &stack_mask);
2831 			}
2832 			if (err == -ENOTSUPP) {
2833 				mark_all_scalars_precise(env, st);
2834 				return 0;
2835 			} else if (err) {
2836 				return err;
2837 			}
2838 			if (!reg_mask && !stack_mask)
2839 				/* Found assignment(s) into tracked register in this state.
2840 				 * Since this state is already marked, just return.
2841 				 * Nothing to be tracked further in the parent state.
2842 				 */
2843 				return 0;
2844 			if (i == first_idx)
2845 				break;
2846 			i = get_prev_insn_idx(st, i, &history);
2847 			if (i >= env->prog->len) {
2848 				/* This can happen if backtracking reached insn 0
2849 				 * and there are still reg_mask or stack_mask
2850 				 * to backtrack.
2851 				 * It means the backtracking missed the spot where
2852 				 * particular register was initialized with a constant.
2853 				 */
2854 				verbose(env, "BUG backtracking idx %d\n", i);
2855 				WARN_ONCE(1, "verifier backtracking bug");
2856 				return -EFAULT;
2857 			}
2858 		}
2859 		st = st->parent;
2860 		if (!st)
2861 			break;
2862 
2863 		new_marks = false;
2864 		func = st->frame[st->curframe];
2865 		bitmap_from_u64(mask, reg_mask);
2866 		for_each_set_bit(i, mask, 32) {
2867 			reg = &func->regs[i];
2868 			if (reg->type != SCALAR_VALUE) {
2869 				reg_mask &= ~(1u << i);
2870 				continue;
2871 			}
2872 			if (!reg->precise)
2873 				new_marks = true;
2874 			reg->precise = true;
2875 		}
2876 
2877 		bitmap_from_u64(mask, stack_mask);
2878 		for_each_set_bit(i, mask, 64) {
2879 			if (i >= func->allocated_stack / BPF_REG_SIZE) {
2880 				/* the sequence of instructions:
2881 				 * 2: (bf) r3 = r10
2882 				 * 3: (7b) *(u64 *)(r3 -8) = r0
2883 				 * 4: (79) r4 = *(u64 *)(r10 -8)
2884 				 * doesn't contain jmps. It's backtracked
2885 				 * as a single block.
2886 				 * During backtracking insn 3 is not recognized as
2887 				 * stack access, so at the end of backtracking
2888 				 * stack slot fp-8 is still marked in stack_mask.
2889 				 * However the parent state may not have accessed
2890 				 * fp-8 and it's "unallocated" stack space.
2891 				 * In such case fallback to conservative.
2892 				 */
2893 				mark_all_scalars_precise(env, st);
2894 				return 0;
2895 			}
2896 
2897 			if (!is_spilled_reg(&func->stack[i])) {
2898 				stack_mask &= ~(1ull << i);
2899 				continue;
2900 			}
2901 			reg = &func->stack[i].spilled_ptr;
2902 			if (reg->type != SCALAR_VALUE) {
2903 				stack_mask &= ~(1ull << i);
2904 				continue;
2905 			}
2906 			if (!reg->precise)
2907 				new_marks = true;
2908 			reg->precise = true;
2909 		}
2910 		if (env->log.level & BPF_LOG_LEVEL2) {
2911 			verbose(env, "parent %s regs=%x stack=%llx marks:",
2912 				new_marks ? "didn't have" : "already had",
2913 				reg_mask, stack_mask);
2914 			print_verifier_state(env, func, true);
2915 		}
2916 
2917 		if (!reg_mask && !stack_mask)
2918 			break;
2919 		if (!new_marks)
2920 			break;
2921 
2922 		last_idx = st->last_insn_idx;
2923 		first_idx = st->first_insn_idx;
2924 	}
2925 	return 0;
2926 }
2927 
2928 int mark_chain_precision(struct bpf_verifier_env *env, int regno)
2929 {
2930 	return __mark_chain_precision(env, regno, -1);
2931 }
2932 
2933 static int mark_chain_precision_stack(struct bpf_verifier_env *env, int spi)
2934 {
2935 	return __mark_chain_precision(env, -1, spi);
2936 }
2937 
2938 static bool is_spillable_regtype(enum bpf_reg_type type)
2939 {
2940 	switch (base_type(type)) {
2941 	case PTR_TO_MAP_VALUE:
2942 	case PTR_TO_STACK:
2943 	case PTR_TO_CTX:
2944 	case PTR_TO_PACKET:
2945 	case PTR_TO_PACKET_META:
2946 	case PTR_TO_PACKET_END:
2947 	case PTR_TO_FLOW_KEYS:
2948 	case CONST_PTR_TO_MAP:
2949 	case PTR_TO_SOCKET:
2950 	case PTR_TO_SOCK_COMMON:
2951 	case PTR_TO_TCP_SOCK:
2952 	case PTR_TO_XDP_SOCK:
2953 	case PTR_TO_BTF_ID:
2954 	case PTR_TO_BUF:
2955 	case PTR_TO_MEM:
2956 	case PTR_TO_FUNC:
2957 	case PTR_TO_MAP_KEY:
2958 		return true;
2959 	default:
2960 		return false;
2961 	}
2962 }
2963 
2964 /* Does this register contain a constant zero? */
2965 static bool register_is_null(struct bpf_reg_state *reg)
2966 {
2967 	return reg->type == SCALAR_VALUE && tnum_equals_const(reg->var_off, 0);
2968 }
2969 
2970 static bool register_is_const(struct bpf_reg_state *reg)
2971 {
2972 	return reg->type == SCALAR_VALUE && tnum_is_const(reg->var_off);
2973 }
2974 
2975 static bool __is_scalar_unbounded(struct bpf_reg_state *reg)
2976 {
2977 	return tnum_is_unknown(reg->var_off) &&
2978 	       reg->smin_value == S64_MIN && reg->smax_value == S64_MAX &&
2979 	       reg->umin_value == 0 && reg->umax_value == U64_MAX &&
2980 	       reg->s32_min_value == S32_MIN && reg->s32_max_value == S32_MAX &&
2981 	       reg->u32_min_value == 0 && reg->u32_max_value == U32_MAX;
2982 }
2983 
2984 static bool register_is_bounded(struct bpf_reg_state *reg)
2985 {
2986 	return reg->type == SCALAR_VALUE && !__is_scalar_unbounded(reg);
2987 }
2988 
2989 static bool __is_pointer_value(bool allow_ptr_leaks,
2990 			       const struct bpf_reg_state *reg)
2991 {
2992 	if (allow_ptr_leaks)
2993 		return false;
2994 
2995 	return reg->type != SCALAR_VALUE;
2996 }
2997 
2998 static void save_register_state(struct bpf_func_state *state,
2999 				int spi, struct bpf_reg_state *reg,
3000 				int size)
3001 {
3002 	int i;
3003 
3004 	state->stack[spi].spilled_ptr = *reg;
3005 	if (size == BPF_REG_SIZE)
3006 		state->stack[spi].spilled_ptr.live |= REG_LIVE_WRITTEN;
3007 
3008 	for (i = BPF_REG_SIZE; i > BPF_REG_SIZE - size; i--)
3009 		state->stack[spi].slot_type[i - 1] = STACK_SPILL;
3010 
3011 	/* size < 8 bytes spill */
3012 	for (; i; i--)
3013 		scrub_spilled_slot(&state->stack[spi].slot_type[i - 1]);
3014 }
3015 
3016 /* check_stack_{read,write}_fixed_off functions track spill/fill of registers,
3017  * stack boundary and alignment are checked in check_mem_access()
3018  */
3019 static int check_stack_write_fixed_off(struct bpf_verifier_env *env,
3020 				       /* stack frame we're writing to */
3021 				       struct bpf_func_state *state,
3022 				       int off, int size, int value_regno,
3023 				       int insn_idx)
3024 {
3025 	struct bpf_func_state *cur; /* state of the current function */
3026 	int i, slot = -off - 1, spi = slot / BPF_REG_SIZE, err;
3027 	u32 dst_reg = env->prog->insnsi[insn_idx].dst_reg;
3028 	struct bpf_reg_state *reg = NULL;
3029 
3030 	err = grow_stack_state(state, round_up(slot + 1, BPF_REG_SIZE));
3031 	if (err)
3032 		return err;
3033 	/* caller checked that off % size == 0 and -MAX_BPF_STACK <= off < 0,
3034 	 * so it's aligned access and [off, off + size) are within stack limits
3035 	 */
3036 	if (!env->allow_ptr_leaks &&
3037 	    state->stack[spi].slot_type[0] == STACK_SPILL &&
3038 	    size != BPF_REG_SIZE) {
3039 		verbose(env, "attempt to corrupt spilled pointer on stack\n");
3040 		return -EACCES;
3041 	}
3042 
3043 	cur = env->cur_state->frame[env->cur_state->curframe];
3044 	if (value_regno >= 0)
3045 		reg = &cur->regs[value_regno];
3046 	if (!env->bypass_spec_v4) {
3047 		bool sanitize = reg && is_spillable_regtype(reg->type);
3048 
3049 		for (i = 0; i < size; i++) {
3050 			if (state->stack[spi].slot_type[i] == STACK_INVALID) {
3051 				sanitize = true;
3052 				break;
3053 			}
3054 		}
3055 
3056 		if (sanitize)
3057 			env->insn_aux_data[insn_idx].sanitize_stack_spill = true;
3058 	}
3059 
3060 	mark_stack_slot_scratched(env, spi);
3061 	if (reg && !(off % BPF_REG_SIZE) && register_is_bounded(reg) &&
3062 	    !register_is_null(reg) && env->bpf_capable) {
3063 		if (dst_reg != BPF_REG_FP) {
3064 			/* The backtracking logic can only recognize explicit
3065 			 * stack slot address like [fp - 8]. Other spill of
3066 			 * scalar via different register has to be conservative.
3067 			 * Backtrack from here and mark all registers as precise
3068 			 * that contributed into 'reg' being a constant.
3069 			 */
3070 			err = mark_chain_precision(env, value_regno);
3071 			if (err)
3072 				return err;
3073 		}
3074 		save_register_state(state, spi, reg, size);
3075 	} else if (reg && is_spillable_regtype(reg->type)) {
3076 		/* register containing pointer is being spilled into stack */
3077 		if (size != BPF_REG_SIZE) {
3078 			verbose_linfo(env, insn_idx, "; ");
3079 			verbose(env, "invalid size of register spill\n");
3080 			return -EACCES;
3081 		}
3082 		if (state != cur && reg->type == PTR_TO_STACK) {
3083 			verbose(env, "cannot spill pointers to stack into stack frame of the caller\n");
3084 			return -EINVAL;
3085 		}
3086 		save_register_state(state, spi, reg, size);
3087 	} else {
3088 		u8 type = STACK_MISC;
3089 
3090 		/* regular write of data into stack destroys any spilled ptr */
3091 		state->stack[spi].spilled_ptr.type = NOT_INIT;
3092 		/* Mark slots as STACK_MISC if they belonged to spilled ptr. */
3093 		if (is_spilled_reg(&state->stack[spi]))
3094 			for (i = 0; i < BPF_REG_SIZE; i++)
3095 				scrub_spilled_slot(&state->stack[spi].slot_type[i]);
3096 
3097 		/* only mark the slot as written if all 8 bytes were written
3098 		 * otherwise read propagation may incorrectly stop too soon
3099 		 * when stack slots are partially written.
3100 		 * This heuristic means that read propagation will be
3101 		 * conservative, since it will add reg_live_read marks
3102 		 * to stack slots all the way to first state when programs
3103 		 * writes+reads less than 8 bytes
3104 		 */
3105 		if (size == BPF_REG_SIZE)
3106 			state->stack[spi].spilled_ptr.live |= REG_LIVE_WRITTEN;
3107 
3108 		/* when we zero initialize stack slots mark them as such */
3109 		if (reg && register_is_null(reg)) {
3110 			/* backtracking doesn't work for STACK_ZERO yet. */
3111 			err = mark_chain_precision(env, value_regno);
3112 			if (err)
3113 				return err;
3114 			type = STACK_ZERO;
3115 		}
3116 
3117 		/* Mark slots affected by this stack write. */
3118 		for (i = 0; i < size; i++)
3119 			state->stack[spi].slot_type[(slot - i) % BPF_REG_SIZE] =
3120 				type;
3121 	}
3122 	return 0;
3123 }
3124 
3125 /* Write the stack: 'stack[ptr_regno + off] = value_regno'. 'ptr_regno' is
3126  * known to contain a variable offset.
3127  * This function checks whether the write is permitted and conservatively
3128  * tracks the effects of the write, considering that each stack slot in the
3129  * dynamic range is potentially written to.
3130  *
3131  * 'off' includes 'regno->off'.
3132  * 'value_regno' can be -1, meaning that an unknown value is being written to
3133  * the stack.
3134  *
3135  * Spilled pointers in range are not marked as written because we don't know
3136  * what's going to be actually written. This means that read propagation for
3137  * future reads cannot be terminated by this write.
3138  *
3139  * For privileged programs, uninitialized stack slots are considered
3140  * initialized by this write (even though we don't know exactly what offsets
3141  * are going to be written to). The idea is that we don't want the verifier to
3142  * reject future reads that access slots written to through variable offsets.
3143  */
3144 static int check_stack_write_var_off(struct bpf_verifier_env *env,
3145 				     /* func where register points to */
3146 				     struct bpf_func_state *state,
3147 				     int ptr_regno, int off, int size,
3148 				     int value_regno, int insn_idx)
3149 {
3150 	struct bpf_func_state *cur; /* state of the current function */
3151 	int min_off, max_off;
3152 	int i, err;
3153 	struct bpf_reg_state *ptr_reg = NULL, *value_reg = NULL;
3154 	bool writing_zero = false;
3155 	/* set if the fact that we're writing a zero is used to let any
3156 	 * stack slots remain STACK_ZERO
3157 	 */
3158 	bool zero_used = false;
3159 
3160 	cur = env->cur_state->frame[env->cur_state->curframe];
3161 	ptr_reg = &cur->regs[ptr_regno];
3162 	min_off = ptr_reg->smin_value + off;
3163 	max_off = ptr_reg->smax_value + off + size;
3164 	if (value_regno >= 0)
3165 		value_reg = &cur->regs[value_regno];
3166 	if (value_reg && register_is_null(value_reg))
3167 		writing_zero = true;
3168 
3169 	err = grow_stack_state(state, round_up(-min_off, BPF_REG_SIZE));
3170 	if (err)
3171 		return err;
3172 
3173 
3174 	/* Variable offset writes destroy any spilled pointers in range. */
3175 	for (i = min_off; i < max_off; i++) {
3176 		u8 new_type, *stype;
3177 		int slot, spi;
3178 
3179 		slot = -i - 1;
3180 		spi = slot / BPF_REG_SIZE;
3181 		stype = &state->stack[spi].slot_type[slot % BPF_REG_SIZE];
3182 		mark_stack_slot_scratched(env, spi);
3183 
3184 		if (!env->allow_ptr_leaks
3185 				&& *stype != NOT_INIT
3186 				&& *stype != SCALAR_VALUE) {
3187 			/* Reject the write if there's are spilled pointers in
3188 			 * range. If we didn't reject here, the ptr status
3189 			 * would be erased below (even though not all slots are
3190 			 * actually overwritten), possibly opening the door to
3191 			 * leaks.
3192 			 */
3193 			verbose(env, "spilled ptr in range of var-offset stack write; insn %d, ptr off: %d",
3194 				insn_idx, i);
3195 			return -EINVAL;
3196 		}
3197 
3198 		/* Erase all spilled pointers. */
3199 		state->stack[spi].spilled_ptr.type = NOT_INIT;
3200 
3201 		/* Update the slot type. */
3202 		new_type = STACK_MISC;
3203 		if (writing_zero && *stype == STACK_ZERO) {
3204 			new_type = STACK_ZERO;
3205 			zero_used = true;
3206 		}
3207 		/* If the slot is STACK_INVALID, we check whether it's OK to
3208 		 * pretend that it will be initialized by this write. The slot
3209 		 * might not actually be written to, and so if we mark it as
3210 		 * initialized future reads might leak uninitialized memory.
3211 		 * For privileged programs, we will accept such reads to slots
3212 		 * that may or may not be written because, if we're reject
3213 		 * them, the error would be too confusing.
3214 		 */
3215 		if (*stype == STACK_INVALID && !env->allow_uninit_stack) {
3216 			verbose(env, "uninit stack in range of var-offset write prohibited for !root; insn %d, off: %d",
3217 					insn_idx, i);
3218 			return -EINVAL;
3219 		}
3220 		*stype = new_type;
3221 	}
3222 	if (zero_used) {
3223 		/* backtracking doesn't work for STACK_ZERO yet. */
3224 		err = mark_chain_precision(env, value_regno);
3225 		if (err)
3226 			return err;
3227 	}
3228 	return 0;
3229 }
3230 
3231 /* When register 'dst_regno' is assigned some values from stack[min_off,
3232  * max_off), we set the register's type according to the types of the
3233  * respective stack slots. If all the stack values are known to be zeros, then
3234  * so is the destination reg. Otherwise, the register is considered to be
3235  * SCALAR. This function does not deal with register filling; the caller must
3236  * ensure that all spilled registers in the stack range have been marked as
3237  * read.
3238  */
3239 static void mark_reg_stack_read(struct bpf_verifier_env *env,
3240 				/* func where src register points to */
3241 				struct bpf_func_state *ptr_state,
3242 				int min_off, int max_off, int dst_regno)
3243 {
3244 	struct bpf_verifier_state *vstate = env->cur_state;
3245 	struct bpf_func_state *state = vstate->frame[vstate->curframe];
3246 	int i, slot, spi;
3247 	u8 *stype;
3248 	int zeros = 0;
3249 
3250 	for (i = min_off; i < max_off; i++) {
3251 		slot = -i - 1;
3252 		spi = slot / BPF_REG_SIZE;
3253 		stype = ptr_state->stack[spi].slot_type;
3254 		if (stype[slot % BPF_REG_SIZE] != STACK_ZERO)
3255 			break;
3256 		zeros++;
3257 	}
3258 	if (zeros == max_off - min_off) {
3259 		/* any access_size read into register is zero extended,
3260 		 * so the whole register == const_zero
3261 		 */
3262 		__mark_reg_const_zero(&state->regs[dst_regno]);
3263 		/* backtracking doesn't support STACK_ZERO yet,
3264 		 * so mark it precise here, so that later
3265 		 * backtracking can stop here.
3266 		 * Backtracking may not need this if this register
3267 		 * doesn't participate in pointer adjustment.
3268 		 * Forward propagation of precise flag is not
3269 		 * necessary either. This mark is only to stop
3270 		 * backtracking. Any register that contributed
3271 		 * to const 0 was marked precise before spill.
3272 		 */
3273 		state->regs[dst_regno].precise = true;
3274 	} else {
3275 		/* have read misc data from the stack */
3276 		mark_reg_unknown(env, state->regs, dst_regno);
3277 	}
3278 	state->regs[dst_regno].live |= REG_LIVE_WRITTEN;
3279 }
3280 
3281 /* Read the stack at 'off' and put the results into the register indicated by
3282  * 'dst_regno'. It handles reg filling if the addressed stack slot is a
3283  * spilled reg.
3284  *
3285  * 'dst_regno' can be -1, meaning that the read value is not going to a
3286  * register.
3287  *
3288  * The access is assumed to be within the current stack bounds.
3289  */
3290 static int check_stack_read_fixed_off(struct bpf_verifier_env *env,
3291 				      /* func where src register points to */
3292 				      struct bpf_func_state *reg_state,
3293 				      int off, int size, int dst_regno)
3294 {
3295 	struct bpf_verifier_state *vstate = env->cur_state;
3296 	struct bpf_func_state *state = vstate->frame[vstate->curframe];
3297 	int i, slot = -off - 1, spi = slot / BPF_REG_SIZE;
3298 	struct bpf_reg_state *reg;
3299 	u8 *stype, type;
3300 
3301 	stype = reg_state->stack[spi].slot_type;
3302 	reg = &reg_state->stack[spi].spilled_ptr;
3303 
3304 	if (is_spilled_reg(&reg_state->stack[spi])) {
3305 		u8 spill_size = 1;
3306 
3307 		for (i = BPF_REG_SIZE - 1; i > 0 && stype[i - 1] == STACK_SPILL; i--)
3308 			spill_size++;
3309 
3310 		if (size != BPF_REG_SIZE || spill_size != BPF_REG_SIZE) {
3311 			if (reg->type != SCALAR_VALUE) {
3312 				verbose_linfo(env, env->insn_idx, "; ");
3313 				verbose(env, "invalid size of register fill\n");
3314 				return -EACCES;
3315 			}
3316 
3317 			mark_reg_read(env, reg, reg->parent, REG_LIVE_READ64);
3318 			if (dst_regno < 0)
3319 				return 0;
3320 
3321 			if (!(off % BPF_REG_SIZE) && size == spill_size) {
3322 				/* The earlier check_reg_arg() has decided the
3323 				 * subreg_def for this insn.  Save it first.
3324 				 */
3325 				s32 subreg_def = state->regs[dst_regno].subreg_def;
3326 
3327 				state->regs[dst_regno] = *reg;
3328 				state->regs[dst_regno].subreg_def = subreg_def;
3329 			} else {
3330 				for (i = 0; i < size; i++) {
3331 					type = stype[(slot - i) % BPF_REG_SIZE];
3332 					if (type == STACK_SPILL)
3333 						continue;
3334 					if (type == STACK_MISC)
3335 						continue;
3336 					verbose(env, "invalid read from stack off %d+%d size %d\n",
3337 						off, i, size);
3338 					return -EACCES;
3339 				}
3340 				mark_reg_unknown(env, state->regs, dst_regno);
3341 			}
3342 			state->regs[dst_regno].live |= REG_LIVE_WRITTEN;
3343 			return 0;
3344 		}
3345 
3346 		if (dst_regno >= 0) {
3347 			/* restore register state from stack */
3348 			state->regs[dst_regno] = *reg;
3349 			/* mark reg as written since spilled pointer state likely
3350 			 * has its liveness marks cleared by is_state_visited()
3351 			 * which resets stack/reg liveness for state transitions
3352 			 */
3353 			state->regs[dst_regno].live |= REG_LIVE_WRITTEN;
3354 		} else if (__is_pointer_value(env->allow_ptr_leaks, reg)) {
3355 			/* If dst_regno==-1, the caller is asking us whether
3356 			 * it is acceptable to use this value as a SCALAR_VALUE
3357 			 * (e.g. for XADD).
3358 			 * We must not allow unprivileged callers to do that
3359 			 * with spilled pointers.
3360 			 */
3361 			verbose(env, "leaking pointer from stack off %d\n",
3362 				off);
3363 			return -EACCES;
3364 		}
3365 		mark_reg_read(env, reg, reg->parent, REG_LIVE_READ64);
3366 	} else {
3367 		for (i = 0; i < size; i++) {
3368 			type = stype[(slot - i) % BPF_REG_SIZE];
3369 			if (type == STACK_MISC)
3370 				continue;
3371 			if (type == STACK_ZERO)
3372 				continue;
3373 			verbose(env, "invalid read from stack off %d+%d size %d\n",
3374 				off, i, size);
3375 			return -EACCES;
3376 		}
3377 		mark_reg_read(env, reg, reg->parent, REG_LIVE_READ64);
3378 		if (dst_regno >= 0)
3379 			mark_reg_stack_read(env, reg_state, off, off + size, dst_regno);
3380 	}
3381 	return 0;
3382 }
3383 
3384 enum bpf_access_src {
3385 	ACCESS_DIRECT = 1,  /* the access is performed by an instruction */
3386 	ACCESS_HELPER = 2,  /* the access is performed by a helper */
3387 };
3388 
3389 static int check_stack_range_initialized(struct bpf_verifier_env *env,
3390 					 int regno, int off, int access_size,
3391 					 bool zero_size_allowed,
3392 					 enum bpf_access_src type,
3393 					 struct bpf_call_arg_meta *meta);
3394 
3395 static struct bpf_reg_state *reg_state(struct bpf_verifier_env *env, int regno)
3396 {
3397 	return cur_regs(env) + regno;
3398 }
3399 
3400 /* Read the stack at 'ptr_regno + off' and put the result into the register
3401  * 'dst_regno'.
3402  * 'off' includes the pointer register's fixed offset(i.e. 'ptr_regno.off'),
3403  * but not its variable offset.
3404  * 'size' is assumed to be <= reg size and the access is assumed to be aligned.
3405  *
3406  * As opposed to check_stack_read_fixed_off, this function doesn't deal with
3407  * filling registers (i.e. reads of spilled register cannot be detected when
3408  * the offset is not fixed). We conservatively mark 'dst_regno' as containing
3409  * SCALAR_VALUE. That's why we assert that the 'ptr_regno' has a variable
3410  * offset; for a fixed offset check_stack_read_fixed_off should be used
3411  * instead.
3412  */
3413 static int check_stack_read_var_off(struct bpf_verifier_env *env,
3414 				    int ptr_regno, int off, int size, int dst_regno)
3415 {
3416 	/* The state of the source register. */
3417 	struct bpf_reg_state *reg = reg_state(env, ptr_regno);
3418 	struct bpf_func_state *ptr_state = func(env, reg);
3419 	int err;
3420 	int min_off, max_off;
3421 
3422 	/* Note that we pass a NULL meta, so raw access will not be permitted.
3423 	 */
3424 	err = check_stack_range_initialized(env, ptr_regno, off, size,
3425 					    false, ACCESS_DIRECT, NULL);
3426 	if (err)
3427 		return err;
3428 
3429 	min_off = reg->smin_value + off;
3430 	max_off = reg->smax_value + off;
3431 	mark_reg_stack_read(env, ptr_state, min_off, max_off + size, dst_regno);
3432 	return 0;
3433 }
3434 
3435 /* check_stack_read dispatches to check_stack_read_fixed_off or
3436  * check_stack_read_var_off.
3437  *
3438  * The caller must ensure that the offset falls within the allocated stack
3439  * bounds.
3440  *
3441  * 'dst_regno' is a register which will receive the value from the stack. It
3442  * can be -1, meaning that the read value is not going to a register.
3443  */
3444 static int check_stack_read(struct bpf_verifier_env *env,
3445 			    int ptr_regno, int off, int size,
3446 			    int dst_regno)
3447 {
3448 	struct bpf_reg_state *reg = reg_state(env, ptr_regno);
3449 	struct bpf_func_state *state = func(env, reg);
3450 	int err;
3451 	/* Some accesses are only permitted with a static offset. */
3452 	bool var_off = !tnum_is_const(reg->var_off);
3453 
3454 	/* The offset is required to be static when reads don't go to a
3455 	 * register, in order to not leak pointers (see
3456 	 * check_stack_read_fixed_off).
3457 	 */
3458 	if (dst_regno < 0 && var_off) {
3459 		char tn_buf[48];
3460 
3461 		tnum_strn(tn_buf, sizeof(tn_buf), reg->var_off);
3462 		verbose(env, "variable offset stack pointer cannot be passed into helper function; var_off=%s off=%d size=%d\n",
3463 			tn_buf, off, size);
3464 		return -EACCES;
3465 	}
3466 	/* Variable offset is prohibited for unprivileged mode for simplicity
3467 	 * since it requires corresponding support in Spectre masking for stack
3468 	 * ALU. See also retrieve_ptr_limit().
3469 	 */
3470 	if (!env->bypass_spec_v1 && var_off) {
3471 		char tn_buf[48];
3472 
3473 		tnum_strn(tn_buf, sizeof(tn_buf), reg->var_off);
3474 		verbose(env, "R%d variable offset stack access prohibited for !root, var_off=%s\n",
3475 				ptr_regno, tn_buf);
3476 		return -EACCES;
3477 	}
3478 
3479 	if (!var_off) {
3480 		off += reg->var_off.value;
3481 		err = check_stack_read_fixed_off(env, state, off, size,
3482 						 dst_regno);
3483 	} else {
3484 		/* Variable offset stack reads need more conservative handling
3485 		 * than fixed offset ones. Note that dst_regno >= 0 on this
3486 		 * branch.
3487 		 */
3488 		err = check_stack_read_var_off(env, ptr_regno, off, size,
3489 					       dst_regno);
3490 	}
3491 	return err;
3492 }
3493 
3494 
3495 /* check_stack_write dispatches to check_stack_write_fixed_off or
3496  * check_stack_write_var_off.
3497  *
3498  * 'ptr_regno' is the register used as a pointer into the stack.
3499  * 'off' includes 'ptr_regno->off', but not its variable offset (if any).
3500  * 'value_regno' is the register whose value we're writing to the stack. It can
3501  * be -1, meaning that we're not writing from a register.
3502  *
3503  * The caller must ensure that the offset falls within the maximum stack size.
3504  */
3505 static int check_stack_write(struct bpf_verifier_env *env,
3506 			     int ptr_regno, int off, int size,
3507 			     int value_regno, int insn_idx)
3508 {
3509 	struct bpf_reg_state *reg = reg_state(env, ptr_regno);
3510 	struct bpf_func_state *state = func(env, reg);
3511 	int err;
3512 
3513 	if (tnum_is_const(reg->var_off)) {
3514 		off += reg->var_off.value;
3515 		err = check_stack_write_fixed_off(env, state, off, size,
3516 						  value_regno, insn_idx);
3517 	} else {
3518 		/* Variable offset stack reads need more conservative handling
3519 		 * than fixed offset ones.
3520 		 */
3521 		err = check_stack_write_var_off(env, state,
3522 						ptr_regno, off, size,
3523 						value_regno, insn_idx);
3524 	}
3525 	return err;
3526 }
3527 
3528 static int check_map_access_type(struct bpf_verifier_env *env, u32 regno,
3529 				 int off, int size, enum bpf_access_type type)
3530 {
3531 	struct bpf_reg_state *regs = cur_regs(env);
3532 	struct bpf_map *map = regs[regno].map_ptr;
3533 	u32 cap = bpf_map_flags_to_cap(map);
3534 
3535 	if (type == BPF_WRITE && !(cap & BPF_MAP_CAN_WRITE)) {
3536 		verbose(env, "write into map forbidden, value_size=%d off=%d size=%d\n",
3537 			map->value_size, off, size);
3538 		return -EACCES;
3539 	}
3540 
3541 	if (type == BPF_READ && !(cap & BPF_MAP_CAN_READ)) {
3542 		verbose(env, "read from map forbidden, value_size=%d off=%d size=%d\n",
3543 			map->value_size, off, size);
3544 		return -EACCES;
3545 	}
3546 
3547 	return 0;
3548 }
3549 
3550 /* check read/write into memory region (e.g., map value, ringbuf sample, etc) */
3551 static int __check_mem_access(struct bpf_verifier_env *env, int regno,
3552 			      int off, int size, u32 mem_size,
3553 			      bool zero_size_allowed)
3554 {
3555 	bool size_ok = size > 0 || (size == 0 && zero_size_allowed);
3556 	struct bpf_reg_state *reg;
3557 
3558 	if (off >= 0 && size_ok && (u64)off + size <= mem_size)
3559 		return 0;
3560 
3561 	reg = &cur_regs(env)[regno];
3562 	switch (reg->type) {
3563 	case PTR_TO_MAP_KEY:
3564 		verbose(env, "invalid access to map key, key_size=%d off=%d size=%d\n",
3565 			mem_size, off, size);
3566 		break;
3567 	case PTR_TO_MAP_VALUE:
3568 		verbose(env, "invalid access to map value, value_size=%d off=%d size=%d\n",
3569 			mem_size, off, size);
3570 		break;
3571 	case PTR_TO_PACKET:
3572 	case PTR_TO_PACKET_META:
3573 	case PTR_TO_PACKET_END:
3574 		verbose(env, "invalid access to packet, off=%d size=%d, R%d(id=%d,off=%d,r=%d)\n",
3575 			off, size, regno, reg->id, off, mem_size);
3576 		break;
3577 	case PTR_TO_MEM:
3578 	default:
3579 		verbose(env, "invalid access to memory, mem_size=%u off=%d size=%d\n",
3580 			mem_size, off, size);
3581 	}
3582 
3583 	return -EACCES;
3584 }
3585 
3586 /* check read/write into a memory region with possible variable offset */
3587 static int check_mem_region_access(struct bpf_verifier_env *env, u32 regno,
3588 				   int off, int size, u32 mem_size,
3589 				   bool zero_size_allowed)
3590 {
3591 	struct bpf_verifier_state *vstate = env->cur_state;
3592 	struct bpf_func_state *state = vstate->frame[vstate->curframe];
3593 	struct bpf_reg_state *reg = &state->regs[regno];
3594 	int err;
3595 
3596 	/* We may have adjusted the register pointing to memory region, so we
3597 	 * need to try adding each of min_value and max_value to off
3598 	 * to make sure our theoretical access will be safe.
3599 	 *
3600 	 * The minimum value is only important with signed
3601 	 * comparisons where we can't assume the floor of a
3602 	 * value is 0.  If we are using signed variables for our
3603 	 * index'es we need to make sure that whatever we use
3604 	 * will have a set floor within our range.
3605 	 */
3606 	if (reg->smin_value < 0 &&
3607 	    (reg->smin_value == S64_MIN ||
3608 	     (off + reg->smin_value != (s64)(s32)(off + reg->smin_value)) ||
3609 	      reg->smin_value + off < 0)) {
3610 		verbose(env, "R%d min value is negative, either use unsigned index or do a if (index >=0) check.\n",
3611 			regno);
3612 		return -EACCES;
3613 	}
3614 	err = __check_mem_access(env, regno, reg->smin_value + off, size,
3615 				 mem_size, zero_size_allowed);
3616 	if (err) {
3617 		verbose(env, "R%d min value is outside of the allowed memory range\n",
3618 			regno);
3619 		return err;
3620 	}
3621 
3622 	/* If we haven't set a max value then we need to bail since we can't be
3623 	 * sure we won't do bad things.
3624 	 * If reg->umax_value + off could overflow, treat that as unbounded too.
3625 	 */
3626 	if (reg->umax_value >= BPF_MAX_VAR_OFF) {
3627 		verbose(env, "R%d unbounded memory access, make sure to bounds check any such access\n",
3628 			regno);
3629 		return -EACCES;
3630 	}
3631 	err = __check_mem_access(env, regno, reg->umax_value + off, size,
3632 				 mem_size, zero_size_allowed);
3633 	if (err) {
3634 		verbose(env, "R%d max value is outside of the allowed memory range\n",
3635 			regno);
3636 		return err;
3637 	}
3638 
3639 	return 0;
3640 }
3641 
3642 static int __check_ptr_off_reg(struct bpf_verifier_env *env,
3643 			       const struct bpf_reg_state *reg, int regno,
3644 			       bool fixed_off_ok)
3645 {
3646 	/* Access to this pointer-typed register or passing it to a helper
3647 	 * is only allowed in its original, unmodified form.
3648 	 */
3649 
3650 	if (reg->off < 0) {
3651 		verbose(env, "negative offset %s ptr R%d off=%d disallowed\n",
3652 			reg_type_str(env, reg->type), regno, reg->off);
3653 		return -EACCES;
3654 	}
3655 
3656 	if (!fixed_off_ok && reg->off) {
3657 		verbose(env, "dereference of modified %s ptr R%d off=%d disallowed\n",
3658 			reg_type_str(env, reg->type), regno, reg->off);
3659 		return -EACCES;
3660 	}
3661 
3662 	if (!tnum_is_const(reg->var_off) || reg->var_off.value) {
3663 		char tn_buf[48];
3664 
3665 		tnum_strn(tn_buf, sizeof(tn_buf), reg->var_off);
3666 		verbose(env, "variable %s access var_off=%s disallowed\n",
3667 			reg_type_str(env, reg->type), tn_buf);
3668 		return -EACCES;
3669 	}
3670 
3671 	return 0;
3672 }
3673 
3674 int check_ptr_off_reg(struct bpf_verifier_env *env,
3675 		      const struct bpf_reg_state *reg, int regno)
3676 {
3677 	return __check_ptr_off_reg(env, reg, regno, false);
3678 }
3679 
3680 static int map_kptr_match_type(struct bpf_verifier_env *env,
3681 			       struct bpf_map_value_off_desc *off_desc,
3682 			       struct bpf_reg_state *reg, u32 regno)
3683 {
3684 	const char *targ_name = kernel_type_name(off_desc->kptr.btf, off_desc->kptr.btf_id);
3685 	int perm_flags = PTR_MAYBE_NULL;
3686 	const char *reg_name = "";
3687 
3688 	/* Only unreferenced case accepts untrusted pointers */
3689 	if (off_desc->type == BPF_KPTR_UNREF)
3690 		perm_flags |= PTR_UNTRUSTED;
3691 
3692 	if (base_type(reg->type) != PTR_TO_BTF_ID || (type_flag(reg->type) & ~perm_flags))
3693 		goto bad_type;
3694 
3695 	if (!btf_is_kernel(reg->btf)) {
3696 		verbose(env, "R%d must point to kernel BTF\n", regno);
3697 		return -EINVAL;
3698 	}
3699 	/* We need to verify reg->type and reg->btf, before accessing reg->btf */
3700 	reg_name = kernel_type_name(reg->btf, reg->btf_id);
3701 
3702 	/* For ref_ptr case, release function check should ensure we get one
3703 	 * referenced PTR_TO_BTF_ID, and that its fixed offset is 0. For the
3704 	 * normal store of unreferenced kptr, we must ensure var_off is zero.
3705 	 * Since ref_ptr cannot be accessed directly by BPF insns, checks for
3706 	 * reg->off and reg->ref_obj_id are not needed here.
3707 	 */
3708 	if (__check_ptr_off_reg(env, reg, regno, true))
3709 		return -EACCES;
3710 
3711 	/* A full type match is needed, as BTF can be vmlinux or module BTF, and
3712 	 * we also need to take into account the reg->off.
3713 	 *
3714 	 * We want to support cases like:
3715 	 *
3716 	 * struct foo {
3717 	 *         struct bar br;
3718 	 *         struct baz bz;
3719 	 * };
3720 	 *
3721 	 * struct foo *v;
3722 	 * v = func();	      // PTR_TO_BTF_ID
3723 	 * val->foo = v;      // reg->off is zero, btf and btf_id match type
3724 	 * val->bar = &v->br; // reg->off is still zero, but we need to retry with
3725 	 *                    // first member type of struct after comparison fails
3726 	 * val->baz = &v->bz; // reg->off is non-zero, so struct needs to be walked
3727 	 *                    // to match type
3728 	 *
3729 	 * In the kptr_ref case, check_func_arg_reg_off already ensures reg->off
3730 	 * is zero. We must also ensure that btf_struct_ids_match does not walk
3731 	 * the struct to match type against first member of struct, i.e. reject
3732 	 * second case from above. Hence, when type is BPF_KPTR_REF, we set
3733 	 * strict mode to true for type match.
3734 	 */
3735 	if (!btf_struct_ids_match(&env->log, reg->btf, reg->btf_id, reg->off,
3736 				  off_desc->kptr.btf, off_desc->kptr.btf_id,
3737 				  off_desc->type == BPF_KPTR_REF))
3738 		goto bad_type;
3739 	return 0;
3740 bad_type:
3741 	verbose(env, "invalid kptr access, R%d type=%s%s ", regno,
3742 		reg_type_str(env, reg->type), reg_name);
3743 	verbose(env, "expected=%s%s", reg_type_str(env, PTR_TO_BTF_ID), targ_name);
3744 	if (off_desc->type == BPF_KPTR_UNREF)
3745 		verbose(env, " or %s%s\n", reg_type_str(env, PTR_TO_BTF_ID | PTR_UNTRUSTED),
3746 			targ_name);
3747 	else
3748 		verbose(env, "\n");
3749 	return -EINVAL;
3750 }
3751 
3752 static int check_map_kptr_access(struct bpf_verifier_env *env, u32 regno,
3753 				 int value_regno, int insn_idx,
3754 				 struct bpf_map_value_off_desc *off_desc)
3755 {
3756 	struct bpf_insn *insn = &env->prog->insnsi[insn_idx];
3757 	int class = BPF_CLASS(insn->code);
3758 	struct bpf_reg_state *val_reg;
3759 
3760 	/* Things we already checked for in check_map_access and caller:
3761 	 *  - Reject cases where variable offset may touch kptr
3762 	 *  - size of access (must be BPF_DW)
3763 	 *  - tnum_is_const(reg->var_off)
3764 	 *  - off_desc->offset == off + reg->var_off.value
3765 	 */
3766 	/* Only BPF_[LDX,STX,ST] | BPF_MEM | BPF_DW is supported */
3767 	if (BPF_MODE(insn->code) != BPF_MEM) {
3768 		verbose(env, "kptr in map can only be accessed using BPF_MEM instruction mode\n");
3769 		return -EACCES;
3770 	}
3771 
3772 	/* We only allow loading referenced kptr, since it will be marked as
3773 	 * untrusted, similar to unreferenced kptr.
3774 	 */
3775 	if (class != BPF_LDX && off_desc->type == BPF_KPTR_REF) {
3776 		verbose(env, "store to referenced kptr disallowed\n");
3777 		return -EACCES;
3778 	}
3779 
3780 	if (class == BPF_LDX) {
3781 		val_reg = reg_state(env, value_regno);
3782 		/* We can simply mark the value_regno receiving the pointer
3783 		 * value from map as PTR_TO_BTF_ID, with the correct type.
3784 		 */
3785 		mark_btf_ld_reg(env, cur_regs(env), value_regno, PTR_TO_BTF_ID, off_desc->kptr.btf,
3786 				off_desc->kptr.btf_id, PTR_MAYBE_NULL | PTR_UNTRUSTED);
3787 		/* For mark_ptr_or_null_reg */
3788 		val_reg->id = ++env->id_gen;
3789 	} else if (class == BPF_STX) {
3790 		val_reg = reg_state(env, value_regno);
3791 		if (!register_is_null(val_reg) &&
3792 		    map_kptr_match_type(env, off_desc, val_reg, value_regno))
3793 			return -EACCES;
3794 	} else if (class == BPF_ST) {
3795 		if (insn->imm) {
3796 			verbose(env, "BPF_ST imm must be 0 when storing to kptr at off=%u\n",
3797 				off_desc->offset);
3798 			return -EACCES;
3799 		}
3800 	} else {
3801 		verbose(env, "kptr in map can only be accessed using BPF_LDX/BPF_STX/BPF_ST\n");
3802 		return -EACCES;
3803 	}
3804 	return 0;
3805 }
3806 
3807 /* check read/write into a map element with possible variable offset */
3808 static int check_map_access(struct bpf_verifier_env *env, u32 regno,
3809 			    int off, int size, bool zero_size_allowed,
3810 			    enum bpf_access_src src)
3811 {
3812 	struct bpf_verifier_state *vstate = env->cur_state;
3813 	struct bpf_func_state *state = vstate->frame[vstate->curframe];
3814 	struct bpf_reg_state *reg = &state->regs[regno];
3815 	struct bpf_map *map = reg->map_ptr;
3816 	int err;
3817 
3818 	err = check_mem_region_access(env, regno, off, size, map->value_size,
3819 				      zero_size_allowed);
3820 	if (err)
3821 		return err;
3822 
3823 	if (map_value_has_spin_lock(map)) {
3824 		u32 lock = map->spin_lock_off;
3825 
3826 		/* if any part of struct bpf_spin_lock can be touched by
3827 		 * load/store reject this program.
3828 		 * To check that [x1, x2) overlaps with [y1, y2)
3829 		 * it is sufficient to check x1 < y2 && y1 < x2.
3830 		 */
3831 		if (reg->smin_value + off < lock + sizeof(struct bpf_spin_lock) &&
3832 		     lock < reg->umax_value + off + size) {
3833 			verbose(env, "bpf_spin_lock cannot be accessed directly by load/store\n");
3834 			return -EACCES;
3835 		}
3836 	}
3837 	if (map_value_has_timer(map)) {
3838 		u32 t = map->timer_off;
3839 
3840 		if (reg->smin_value + off < t + sizeof(struct bpf_timer) &&
3841 		     t < reg->umax_value + off + size) {
3842 			verbose(env, "bpf_timer cannot be accessed directly by load/store\n");
3843 			return -EACCES;
3844 		}
3845 	}
3846 	if (map_value_has_kptrs(map)) {
3847 		struct bpf_map_value_off *tab = map->kptr_off_tab;
3848 		int i;
3849 
3850 		for (i = 0; i < tab->nr_off; i++) {
3851 			u32 p = tab->off[i].offset;
3852 
3853 			if (reg->smin_value + off < p + sizeof(u64) &&
3854 			    p < reg->umax_value + off + size) {
3855 				if (src != ACCESS_DIRECT) {
3856 					verbose(env, "kptr cannot be accessed indirectly by helper\n");
3857 					return -EACCES;
3858 				}
3859 				if (!tnum_is_const(reg->var_off)) {
3860 					verbose(env, "kptr access cannot have variable offset\n");
3861 					return -EACCES;
3862 				}
3863 				if (p != off + reg->var_off.value) {
3864 					verbose(env, "kptr access misaligned expected=%u off=%llu\n",
3865 						p, off + reg->var_off.value);
3866 					return -EACCES;
3867 				}
3868 				if (size != bpf_size_to_bytes(BPF_DW)) {
3869 					verbose(env, "kptr access size must be BPF_DW\n");
3870 					return -EACCES;
3871 				}
3872 				break;
3873 			}
3874 		}
3875 	}
3876 	return err;
3877 }
3878 
3879 #define MAX_PACKET_OFF 0xffff
3880 
3881 static bool may_access_direct_pkt_data(struct bpf_verifier_env *env,
3882 				       const struct bpf_call_arg_meta *meta,
3883 				       enum bpf_access_type t)
3884 {
3885 	enum bpf_prog_type prog_type = resolve_prog_type(env->prog);
3886 
3887 	switch (prog_type) {
3888 	/* Program types only with direct read access go here! */
3889 	case BPF_PROG_TYPE_LWT_IN:
3890 	case BPF_PROG_TYPE_LWT_OUT:
3891 	case BPF_PROG_TYPE_LWT_SEG6LOCAL:
3892 	case BPF_PROG_TYPE_SK_REUSEPORT:
3893 	case BPF_PROG_TYPE_FLOW_DISSECTOR:
3894 	case BPF_PROG_TYPE_CGROUP_SKB:
3895 		if (t == BPF_WRITE)
3896 			return false;
3897 		fallthrough;
3898 
3899 	/* Program types with direct read + write access go here! */
3900 	case BPF_PROG_TYPE_SCHED_CLS:
3901 	case BPF_PROG_TYPE_SCHED_ACT:
3902 	case BPF_PROG_TYPE_XDP:
3903 	case BPF_PROG_TYPE_LWT_XMIT:
3904 	case BPF_PROG_TYPE_SK_SKB:
3905 	case BPF_PROG_TYPE_SK_MSG:
3906 		if (meta)
3907 			return meta->pkt_access;
3908 
3909 		env->seen_direct_write = true;
3910 		return true;
3911 
3912 	case BPF_PROG_TYPE_CGROUP_SOCKOPT:
3913 		if (t == BPF_WRITE)
3914 			env->seen_direct_write = true;
3915 
3916 		return true;
3917 
3918 	default:
3919 		return false;
3920 	}
3921 }
3922 
3923 static int check_packet_access(struct bpf_verifier_env *env, u32 regno, int off,
3924 			       int size, bool zero_size_allowed)
3925 {
3926 	struct bpf_reg_state *regs = cur_regs(env);
3927 	struct bpf_reg_state *reg = &regs[regno];
3928 	int err;
3929 
3930 	/* We may have added a variable offset to the packet pointer; but any
3931 	 * reg->range we have comes after that.  We are only checking the fixed
3932 	 * offset.
3933 	 */
3934 
3935 	/* We don't allow negative numbers, because we aren't tracking enough
3936 	 * detail to prove they're safe.
3937 	 */
3938 	if (reg->smin_value < 0) {
3939 		verbose(env, "R%d min value is negative, either use unsigned index or do a if (index >=0) check.\n",
3940 			regno);
3941 		return -EACCES;
3942 	}
3943 
3944 	err = reg->range < 0 ? -EINVAL :
3945 	      __check_mem_access(env, regno, off, size, reg->range,
3946 				 zero_size_allowed);
3947 	if (err) {
3948 		verbose(env, "R%d offset is outside of the packet\n", regno);
3949 		return err;
3950 	}
3951 
3952 	/* __check_mem_access has made sure "off + size - 1" is within u16.
3953 	 * reg->umax_value can't be bigger than MAX_PACKET_OFF which is 0xffff,
3954 	 * otherwise find_good_pkt_pointers would have refused to set range info
3955 	 * that __check_mem_access would have rejected this pkt access.
3956 	 * Therefore, "off + reg->umax_value + size - 1" won't overflow u32.
3957 	 */
3958 	env->prog->aux->max_pkt_offset =
3959 		max_t(u32, env->prog->aux->max_pkt_offset,
3960 		      off + reg->umax_value + size - 1);
3961 
3962 	return err;
3963 }
3964 
3965 /* check access to 'struct bpf_context' fields.  Supports fixed offsets only */
3966 static int check_ctx_access(struct bpf_verifier_env *env, int insn_idx, int off, int size,
3967 			    enum bpf_access_type t, enum bpf_reg_type *reg_type,
3968 			    struct btf **btf, u32 *btf_id)
3969 {
3970 	struct bpf_insn_access_aux info = {
3971 		.reg_type = *reg_type,
3972 		.log = &env->log,
3973 	};
3974 
3975 	if (env->ops->is_valid_access &&
3976 	    env->ops->is_valid_access(off, size, t, env->prog, &info)) {
3977 		/* A non zero info.ctx_field_size indicates that this field is a
3978 		 * candidate for later verifier transformation to load the whole
3979 		 * field and then apply a mask when accessed with a narrower
3980 		 * access than actual ctx access size. A zero info.ctx_field_size
3981 		 * will only allow for whole field access and rejects any other
3982 		 * type of narrower access.
3983 		 */
3984 		*reg_type = info.reg_type;
3985 
3986 		if (base_type(*reg_type) == PTR_TO_BTF_ID) {
3987 			*btf = info.btf;
3988 			*btf_id = info.btf_id;
3989 		} else {
3990 			env->insn_aux_data[insn_idx].ctx_field_size = info.ctx_field_size;
3991 		}
3992 		/* remember the offset of last byte accessed in ctx */
3993 		if (env->prog->aux->max_ctx_offset < off + size)
3994 			env->prog->aux->max_ctx_offset = off + size;
3995 		return 0;
3996 	}
3997 
3998 	verbose(env, "invalid bpf_context access off=%d size=%d\n", off, size);
3999 	return -EACCES;
4000 }
4001 
4002 static int check_flow_keys_access(struct bpf_verifier_env *env, int off,
4003 				  int size)
4004 {
4005 	if (size < 0 || off < 0 ||
4006 	    (u64)off + size > sizeof(struct bpf_flow_keys)) {
4007 		verbose(env, "invalid access to flow keys off=%d size=%d\n",
4008 			off, size);
4009 		return -EACCES;
4010 	}
4011 	return 0;
4012 }
4013 
4014 static int check_sock_access(struct bpf_verifier_env *env, int insn_idx,
4015 			     u32 regno, int off, int size,
4016 			     enum bpf_access_type t)
4017 {
4018 	struct bpf_reg_state *regs = cur_regs(env);
4019 	struct bpf_reg_state *reg = &regs[regno];
4020 	struct bpf_insn_access_aux info = {};
4021 	bool valid;
4022 
4023 	if (reg->smin_value < 0) {
4024 		verbose(env, "R%d min value is negative, either use unsigned index or do a if (index >=0) check.\n",
4025 			regno);
4026 		return -EACCES;
4027 	}
4028 
4029 	switch (reg->type) {
4030 	case PTR_TO_SOCK_COMMON:
4031 		valid = bpf_sock_common_is_valid_access(off, size, t, &info);
4032 		break;
4033 	case PTR_TO_SOCKET:
4034 		valid = bpf_sock_is_valid_access(off, size, t, &info);
4035 		break;
4036 	case PTR_TO_TCP_SOCK:
4037 		valid = bpf_tcp_sock_is_valid_access(off, size, t, &info);
4038 		break;
4039 	case PTR_TO_XDP_SOCK:
4040 		valid = bpf_xdp_sock_is_valid_access(off, size, t, &info);
4041 		break;
4042 	default:
4043 		valid = false;
4044 	}
4045 
4046 
4047 	if (valid) {
4048 		env->insn_aux_data[insn_idx].ctx_field_size =
4049 			info.ctx_field_size;
4050 		return 0;
4051 	}
4052 
4053 	verbose(env, "R%d invalid %s access off=%d size=%d\n",
4054 		regno, reg_type_str(env, reg->type), off, size);
4055 
4056 	return -EACCES;
4057 }
4058 
4059 static bool is_pointer_value(struct bpf_verifier_env *env, int regno)
4060 {
4061 	return __is_pointer_value(env->allow_ptr_leaks, reg_state(env, regno));
4062 }
4063 
4064 static bool is_ctx_reg(struct bpf_verifier_env *env, int regno)
4065 {
4066 	const struct bpf_reg_state *reg = reg_state(env, regno);
4067 
4068 	return reg->type == PTR_TO_CTX;
4069 }
4070 
4071 static bool is_sk_reg(struct bpf_verifier_env *env, int regno)
4072 {
4073 	const struct bpf_reg_state *reg = reg_state(env, regno);
4074 
4075 	return type_is_sk_pointer(reg->type);
4076 }
4077 
4078 static bool is_pkt_reg(struct bpf_verifier_env *env, int regno)
4079 {
4080 	const struct bpf_reg_state *reg = reg_state(env, regno);
4081 
4082 	return type_is_pkt_pointer(reg->type);
4083 }
4084 
4085 static bool is_flow_key_reg(struct bpf_verifier_env *env, int regno)
4086 {
4087 	const struct bpf_reg_state *reg = reg_state(env, regno);
4088 
4089 	/* Separate to is_ctx_reg() since we still want to allow BPF_ST here. */
4090 	return reg->type == PTR_TO_FLOW_KEYS;
4091 }
4092 
4093 static int check_pkt_ptr_alignment(struct bpf_verifier_env *env,
4094 				   const struct bpf_reg_state *reg,
4095 				   int off, int size, bool strict)
4096 {
4097 	struct tnum reg_off;
4098 	int ip_align;
4099 
4100 	/* Byte size accesses are always allowed. */
4101 	if (!strict || size == 1)
4102 		return 0;
4103 
4104 	/* For platforms that do not have a Kconfig enabling
4105 	 * CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS the value of
4106 	 * NET_IP_ALIGN is universally set to '2'.  And on platforms
4107 	 * that do set CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS, we get
4108 	 * to this code only in strict mode where we want to emulate
4109 	 * the NET_IP_ALIGN==2 checking.  Therefore use an
4110 	 * unconditional IP align value of '2'.
4111 	 */
4112 	ip_align = 2;
4113 
4114 	reg_off = tnum_add(reg->var_off, tnum_const(ip_align + reg->off + off));
4115 	if (!tnum_is_aligned(reg_off, size)) {
4116 		char tn_buf[48];
4117 
4118 		tnum_strn(tn_buf, sizeof(tn_buf), reg->var_off);
4119 		verbose(env,
4120 			"misaligned packet access off %d+%s+%d+%d size %d\n",
4121 			ip_align, tn_buf, reg->off, off, size);
4122 		return -EACCES;
4123 	}
4124 
4125 	return 0;
4126 }
4127 
4128 static int check_generic_ptr_alignment(struct bpf_verifier_env *env,
4129 				       const struct bpf_reg_state *reg,
4130 				       const char *pointer_desc,
4131 				       int off, int size, bool strict)
4132 {
4133 	struct tnum reg_off;
4134 
4135 	/* Byte size accesses are always allowed. */
4136 	if (!strict || size == 1)
4137 		return 0;
4138 
4139 	reg_off = tnum_add(reg->var_off, tnum_const(reg->off + off));
4140 	if (!tnum_is_aligned(reg_off, size)) {
4141 		char tn_buf[48];
4142 
4143 		tnum_strn(tn_buf, sizeof(tn_buf), reg->var_off);
4144 		verbose(env, "misaligned %saccess off %s+%d+%d size %d\n",
4145 			pointer_desc, tn_buf, reg->off, off, size);
4146 		return -EACCES;
4147 	}
4148 
4149 	return 0;
4150 }
4151 
4152 static int check_ptr_alignment(struct bpf_verifier_env *env,
4153 			       const struct bpf_reg_state *reg, int off,
4154 			       int size, bool strict_alignment_once)
4155 {
4156 	bool strict = env->strict_alignment || strict_alignment_once;
4157 	const char *pointer_desc = "";
4158 
4159 	switch (reg->type) {
4160 	case PTR_TO_PACKET:
4161 	case PTR_TO_PACKET_META:
4162 		/* Special case, because of NET_IP_ALIGN. Given metadata sits
4163 		 * right in front, treat it the very same way.
4164 		 */
4165 		return check_pkt_ptr_alignment(env, reg, off, size, strict);
4166 	case PTR_TO_FLOW_KEYS:
4167 		pointer_desc = "flow keys ";
4168 		break;
4169 	case PTR_TO_MAP_KEY:
4170 		pointer_desc = "key ";
4171 		break;
4172 	case PTR_TO_MAP_VALUE:
4173 		pointer_desc = "value ";
4174 		break;
4175 	case PTR_TO_CTX:
4176 		pointer_desc = "context ";
4177 		break;
4178 	case PTR_TO_STACK:
4179 		pointer_desc = "stack ";
4180 		/* The stack spill tracking logic in check_stack_write_fixed_off()
4181 		 * and check_stack_read_fixed_off() relies on stack accesses being
4182 		 * aligned.
4183 		 */
4184 		strict = true;
4185 		break;
4186 	case PTR_TO_SOCKET:
4187 		pointer_desc = "sock ";
4188 		break;
4189 	case PTR_TO_SOCK_COMMON:
4190 		pointer_desc = "sock_common ";
4191 		break;
4192 	case PTR_TO_TCP_SOCK:
4193 		pointer_desc = "tcp_sock ";
4194 		break;
4195 	case PTR_TO_XDP_SOCK:
4196 		pointer_desc = "xdp_sock ";
4197 		break;
4198 	default:
4199 		break;
4200 	}
4201 	return check_generic_ptr_alignment(env, reg, pointer_desc, off, size,
4202 					   strict);
4203 }
4204 
4205 static int update_stack_depth(struct bpf_verifier_env *env,
4206 			      const struct bpf_func_state *func,
4207 			      int off)
4208 {
4209 	u16 stack = env->subprog_info[func->subprogno].stack_depth;
4210 
4211 	if (stack >= -off)
4212 		return 0;
4213 
4214 	/* update known max for given subprogram */
4215 	env->subprog_info[func->subprogno].stack_depth = -off;
4216 	return 0;
4217 }
4218 
4219 /* starting from main bpf function walk all instructions of the function
4220  * and recursively walk all callees that given function can call.
4221  * Ignore jump and exit insns.
4222  * Since recursion is prevented by check_cfg() this algorithm
4223  * only needs a local stack of MAX_CALL_FRAMES to remember callsites
4224  */
4225 static int check_max_stack_depth(struct bpf_verifier_env *env)
4226 {
4227 	int depth = 0, frame = 0, idx = 0, i = 0, subprog_end;
4228 	struct bpf_subprog_info *subprog = env->subprog_info;
4229 	struct bpf_insn *insn = env->prog->insnsi;
4230 	bool tail_call_reachable = false;
4231 	int ret_insn[MAX_CALL_FRAMES];
4232 	int ret_prog[MAX_CALL_FRAMES];
4233 	int j;
4234 
4235 process_func:
4236 	/* protect against potential stack overflow that might happen when
4237 	 * bpf2bpf calls get combined with tailcalls. Limit the caller's stack
4238 	 * depth for such case down to 256 so that the worst case scenario
4239 	 * would result in 8k stack size (32 which is tailcall limit * 256 =
4240 	 * 8k).
4241 	 *
4242 	 * To get the idea what might happen, see an example:
4243 	 * func1 -> sub rsp, 128
4244 	 *  subfunc1 -> sub rsp, 256
4245 	 *  tailcall1 -> add rsp, 256
4246 	 *   func2 -> sub rsp, 192 (total stack size = 128 + 192 = 320)
4247 	 *   subfunc2 -> sub rsp, 64
4248 	 *   subfunc22 -> sub rsp, 128
4249 	 *   tailcall2 -> add rsp, 128
4250 	 *    func3 -> sub rsp, 32 (total stack size 128 + 192 + 64 + 32 = 416)
4251 	 *
4252 	 * tailcall will unwind the current stack frame but it will not get rid
4253 	 * of caller's stack as shown on the example above.
4254 	 */
4255 	if (idx && subprog[idx].has_tail_call && depth >= 256) {
4256 		verbose(env,
4257 			"tail_calls are not allowed when call stack of previous frames is %d bytes. Too large\n",
4258 			depth);
4259 		return -EACCES;
4260 	}
4261 	/* round up to 32-bytes, since this is granularity
4262 	 * of interpreter stack size
4263 	 */
4264 	depth += round_up(max_t(u32, subprog[idx].stack_depth, 1), 32);
4265 	if (depth > MAX_BPF_STACK) {
4266 		verbose(env, "combined stack size of %d calls is %d. Too large\n",
4267 			frame + 1, depth);
4268 		return -EACCES;
4269 	}
4270 continue_func:
4271 	subprog_end = subprog[idx + 1].start;
4272 	for (; i < subprog_end; i++) {
4273 		int next_insn;
4274 
4275 		if (!bpf_pseudo_call(insn + i) && !bpf_pseudo_func(insn + i))
4276 			continue;
4277 		/* remember insn and function to return to */
4278 		ret_insn[frame] = i + 1;
4279 		ret_prog[frame] = idx;
4280 
4281 		/* find the callee */
4282 		next_insn = i + insn[i].imm + 1;
4283 		idx = find_subprog(env, next_insn);
4284 		if (idx < 0) {
4285 			WARN_ONCE(1, "verifier bug. No program starts at insn %d\n",
4286 				  next_insn);
4287 			return -EFAULT;
4288 		}
4289 		if (subprog[idx].is_async_cb) {
4290 			if (subprog[idx].has_tail_call) {
4291 				verbose(env, "verifier bug. subprog has tail_call and async cb\n");
4292 				return -EFAULT;
4293 			}
4294 			 /* async callbacks don't increase bpf prog stack size */
4295 			continue;
4296 		}
4297 		i = next_insn;
4298 
4299 		if (subprog[idx].has_tail_call)
4300 			tail_call_reachable = true;
4301 
4302 		frame++;
4303 		if (frame >= MAX_CALL_FRAMES) {
4304 			verbose(env, "the call stack of %d frames is too deep !\n",
4305 				frame);
4306 			return -E2BIG;
4307 		}
4308 		goto process_func;
4309 	}
4310 	/* if tail call got detected across bpf2bpf calls then mark each of the
4311 	 * currently present subprog frames as tail call reachable subprogs;
4312 	 * this info will be utilized by JIT so that we will be preserving the
4313 	 * tail call counter throughout bpf2bpf calls combined with tailcalls
4314 	 */
4315 	if (tail_call_reachable)
4316 		for (j = 0; j < frame; j++)
4317 			subprog[ret_prog[j]].tail_call_reachable = true;
4318 	if (subprog[0].tail_call_reachable)
4319 		env->prog->aux->tail_call_reachable = true;
4320 
4321 	/* end of for() loop means the last insn of the 'subprog'
4322 	 * was reached. Doesn't matter whether it was JA or EXIT
4323 	 */
4324 	if (frame == 0)
4325 		return 0;
4326 	depth -= round_up(max_t(u32, subprog[idx].stack_depth, 1), 32);
4327 	frame--;
4328 	i = ret_insn[frame];
4329 	idx = ret_prog[frame];
4330 	goto continue_func;
4331 }
4332 
4333 #ifndef CONFIG_BPF_JIT_ALWAYS_ON
4334 static int get_callee_stack_depth(struct bpf_verifier_env *env,
4335 				  const struct bpf_insn *insn, int idx)
4336 {
4337 	int start = idx + insn->imm + 1, subprog;
4338 
4339 	subprog = find_subprog(env, start);
4340 	if (subprog < 0) {
4341 		WARN_ONCE(1, "verifier bug. No program starts at insn %d\n",
4342 			  start);
4343 		return -EFAULT;
4344 	}
4345 	return env->subprog_info[subprog].stack_depth;
4346 }
4347 #endif
4348 
4349 static int __check_buffer_access(struct bpf_verifier_env *env,
4350 				 const char *buf_info,
4351 				 const struct bpf_reg_state *reg,
4352 				 int regno, int off, int size)
4353 {
4354 	if (off < 0) {
4355 		verbose(env,
4356 			"R%d invalid %s buffer access: off=%d, size=%d\n",
4357 			regno, buf_info, off, size);
4358 		return -EACCES;
4359 	}
4360 	if (!tnum_is_const(reg->var_off) || reg->var_off.value) {
4361 		char tn_buf[48];
4362 
4363 		tnum_strn(tn_buf, sizeof(tn_buf), reg->var_off);
4364 		verbose(env,
4365 			"R%d invalid variable buffer offset: off=%d, var_off=%s\n",
4366 			regno, off, tn_buf);
4367 		return -EACCES;
4368 	}
4369 
4370 	return 0;
4371 }
4372 
4373 static int check_tp_buffer_access(struct bpf_verifier_env *env,
4374 				  const struct bpf_reg_state *reg,
4375 				  int regno, int off, int size)
4376 {
4377 	int err;
4378 
4379 	err = __check_buffer_access(env, "tracepoint", reg, regno, off, size);
4380 	if (err)
4381 		return err;
4382 
4383 	if (off + size > env->prog->aux->max_tp_access)
4384 		env->prog->aux->max_tp_access = off + size;
4385 
4386 	return 0;
4387 }
4388 
4389 static int check_buffer_access(struct bpf_verifier_env *env,
4390 			       const struct bpf_reg_state *reg,
4391 			       int regno, int off, int size,
4392 			       bool zero_size_allowed,
4393 			       u32 *max_access)
4394 {
4395 	const char *buf_info = type_is_rdonly_mem(reg->type) ? "rdonly" : "rdwr";
4396 	int err;
4397 
4398 	err = __check_buffer_access(env, buf_info, reg, regno, off, size);
4399 	if (err)
4400 		return err;
4401 
4402 	if (off + size > *max_access)
4403 		*max_access = off + size;
4404 
4405 	return 0;
4406 }
4407 
4408 /* BPF architecture zero extends alu32 ops into 64-bit registesr */
4409 static void zext_32_to_64(struct bpf_reg_state *reg)
4410 {
4411 	reg->var_off = tnum_subreg(reg->var_off);
4412 	__reg_assign_32_into_64(reg);
4413 }
4414 
4415 /* truncate register to smaller size (in bytes)
4416  * must be called with size < BPF_REG_SIZE
4417  */
4418 static void coerce_reg_to_size(struct bpf_reg_state *reg, int size)
4419 {
4420 	u64 mask;
4421 
4422 	/* clear high bits in bit representation */
4423 	reg->var_off = tnum_cast(reg->var_off, size);
4424 
4425 	/* fix arithmetic bounds */
4426 	mask = ((u64)1 << (size * 8)) - 1;
4427 	if ((reg->umin_value & ~mask) == (reg->umax_value & ~mask)) {
4428 		reg->umin_value &= mask;
4429 		reg->umax_value &= mask;
4430 	} else {
4431 		reg->umin_value = 0;
4432 		reg->umax_value = mask;
4433 	}
4434 	reg->smin_value = reg->umin_value;
4435 	reg->smax_value = reg->umax_value;
4436 
4437 	/* If size is smaller than 32bit register the 32bit register
4438 	 * values are also truncated so we push 64-bit bounds into
4439 	 * 32-bit bounds. Above were truncated < 32-bits already.
4440 	 */
4441 	if (size >= 4)
4442 		return;
4443 	__reg_combine_64_into_32(reg);
4444 }
4445 
4446 static bool bpf_map_is_rdonly(const struct bpf_map *map)
4447 {
4448 	/* A map is considered read-only if the following condition are true:
4449 	 *
4450 	 * 1) BPF program side cannot change any of the map content. The
4451 	 *    BPF_F_RDONLY_PROG flag is throughout the lifetime of a map
4452 	 *    and was set at map creation time.
4453 	 * 2) The map value(s) have been initialized from user space by a
4454 	 *    loader and then "frozen", such that no new map update/delete
4455 	 *    operations from syscall side are possible for the rest of
4456 	 *    the map's lifetime from that point onwards.
4457 	 * 3) Any parallel/pending map update/delete operations from syscall
4458 	 *    side have been completed. Only after that point, it's safe to
4459 	 *    assume that map value(s) are immutable.
4460 	 */
4461 	return (map->map_flags & BPF_F_RDONLY_PROG) &&
4462 	       READ_ONCE(map->frozen) &&
4463 	       !bpf_map_write_active(map);
4464 }
4465 
4466 static int bpf_map_direct_read(struct bpf_map *map, int off, int size, u64 *val)
4467 {
4468 	void *ptr;
4469 	u64 addr;
4470 	int err;
4471 
4472 	err = map->ops->map_direct_value_addr(map, &addr, off);
4473 	if (err)
4474 		return err;
4475 	ptr = (void *)(long)addr + off;
4476 
4477 	switch (size) {
4478 	case sizeof(u8):
4479 		*val = (u64)*(u8 *)ptr;
4480 		break;
4481 	case sizeof(u16):
4482 		*val = (u64)*(u16 *)ptr;
4483 		break;
4484 	case sizeof(u32):
4485 		*val = (u64)*(u32 *)ptr;
4486 		break;
4487 	case sizeof(u64):
4488 		*val = *(u64 *)ptr;
4489 		break;
4490 	default:
4491 		return -EINVAL;
4492 	}
4493 	return 0;
4494 }
4495 
4496 static int check_ptr_to_btf_access(struct bpf_verifier_env *env,
4497 				   struct bpf_reg_state *regs,
4498 				   int regno, int off, int size,
4499 				   enum bpf_access_type atype,
4500 				   int value_regno)
4501 {
4502 	struct bpf_reg_state *reg = regs + regno;
4503 	const struct btf_type *t = btf_type_by_id(reg->btf, reg->btf_id);
4504 	const char *tname = btf_name_by_offset(reg->btf, t->name_off);
4505 	enum bpf_type_flag flag = 0;
4506 	u32 btf_id;
4507 	int ret;
4508 
4509 	if (off < 0) {
4510 		verbose(env,
4511 			"R%d is ptr_%s invalid negative access: off=%d\n",
4512 			regno, tname, off);
4513 		return -EACCES;
4514 	}
4515 	if (!tnum_is_const(reg->var_off) || reg->var_off.value) {
4516 		char tn_buf[48];
4517 
4518 		tnum_strn(tn_buf, sizeof(tn_buf), reg->var_off);
4519 		verbose(env,
4520 			"R%d is ptr_%s invalid variable offset: off=%d, var_off=%s\n",
4521 			regno, tname, off, tn_buf);
4522 		return -EACCES;
4523 	}
4524 
4525 	if (reg->type & MEM_USER) {
4526 		verbose(env,
4527 			"R%d is ptr_%s access user memory: off=%d\n",
4528 			regno, tname, off);
4529 		return -EACCES;
4530 	}
4531 
4532 	if (reg->type & MEM_PERCPU) {
4533 		verbose(env,
4534 			"R%d is ptr_%s access percpu memory: off=%d\n",
4535 			regno, tname, off);
4536 		return -EACCES;
4537 	}
4538 
4539 	if (env->ops->btf_struct_access) {
4540 		ret = env->ops->btf_struct_access(&env->log, reg->btf, t,
4541 						  off, size, atype, &btf_id, &flag);
4542 	} else {
4543 		if (atype != BPF_READ) {
4544 			verbose(env, "only read is supported\n");
4545 			return -EACCES;
4546 		}
4547 
4548 		ret = btf_struct_access(&env->log, reg->btf, t, off, size,
4549 					atype, &btf_id, &flag);
4550 	}
4551 
4552 	if (ret < 0)
4553 		return ret;
4554 
4555 	/* If this is an untrusted pointer, all pointers formed by walking it
4556 	 * also inherit the untrusted flag.
4557 	 */
4558 	if (type_flag(reg->type) & PTR_UNTRUSTED)
4559 		flag |= PTR_UNTRUSTED;
4560 
4561 	if (atype == BPF_READ && value_regno >= 0)
4562 		mark_btf_ld_reg(env, regs, value_regno, ret, reg->btf, btf_id, flag);
4563 
4564 	return 0;
4565 }
4566 
4567 static int check_ptr_to_map_access(struct bpf_verifier_env *env,
4568 				   struct bpf_reg_state *regs,
4569 				   int regno, int off, int size,
4570 				   enum bpf_access_type atype,
4571 				   int value_regno)
4572 {
4573 	struct bpf_reg_state *reg = regs + regno;
4574 	struct bpf_map *map = reg->map_ptr;
4575 	enum bpf_type_flag flag = 0;
4576 	const struct btf_type *t;
4577 	const char *tname;
4578 	u32 btf_id;
4579 	int ret;
4580 
4581 	if (!btf_vmlinux) {
4582 		verbose(env, "map_ptr access not supported without CONFIG_DEBUG_INFO_BTF\n");
4583 		return -ENOTSUPP;
4584 	}
4585 
4586 	if (!map->ops->map_btf_id || !*map->ops->map_btf_id) {
4587 		verbose(env, "map_ptr access not supported for map type %d\n",
4588 			map->map_type);
4589 		return -ENOTSUPP;
4590 	}
4591 
4592 	t = btf_type_by_id(btf_vmlinux, *map->ops->map_btf_id);
4593 	tname = btf_name_by_offset(btf_vmlinux, t->name_off);
4594 
4595 	if (!env->allow_ptr_to_map_access) {
4596 		verbose(env,
4597 			"%s access is allowed only to CAP_PERFMON and CAP_SYS_ADMIN\n",
4598 			tname);
4599 		return -EPERM;
4600 	}
4601 
4602 	if (off < 0) {
4603 		verbose(env, "R%d is %s invalid negative access: off=%d\n",
4604 			regno, tname, off);
4605 		return -EACCES;
4606 	}
4607 
4608 	if (atype != BPF_READ) {
4609 		verbose(env, "only read from %s is supported\n", tname);
4610 		return -EACCES;
4611 	}
4612 
4613 	ret = btf_struct_access(&env->log, btf_vmlinux, t, off, size, atype, &btf_id, &flag);
4614 	if (ret < 0)
4615 		return ret;
4616 
4617 	if (value_regno >= 0)
4618 		mark_btf_ld_reg(env, regs, value_regno, ret, btf_vmlinux, btf_id, flag);
4619 
4620 	return 0;
4621 }
4622 
4623 /* Check that the stack access at the given offset is within bounds. The
4624  * maximum valid offset is -1.
4625  *
4626  * The minimum valid offset is -MAX_BPF_STACK for writes, and
4627  * -state->allocated_stack for reads.
4628  */
4629 static int check_stack_slot_within_bounds(int off,
4630 					  struct bpf_func_state *state,
4631 					  enum bpf_access_type t)
4632 {
4633 	int min_valid_off;
4634 
4635 	if (t == BPF_WRITE)
4636 		min_valid_off = -MAX_BPF_STACK;
4637 	else
4638 		min_valid_off = -state->allocated_stack;
4639 
4640 	if (off < min_valid_off || off > -1)
4641 		return -EACCES;
4642 	return 0;
4643 }
4644 
4645 /* Check that the stack access at 'regno + off' falls within the maximum stack
4646  * bounds.
4647  *
4648  * 'off' includes `regno->offset`, but not its dynamic part (if any).
4649  */
4650 static int check_stack_access_within_bounds(
4651 		struct bpf_verifier_env *env,
4652 		int regno, int off, int access_size,
4653 		enum bpf_access_src src, enum bpf_access_type type)
4654 {
4655 	struct bpf_reg_state *regs = cur_regs(env);
4656 	struct bpf_reg_state *reg = regs + regno;
4657 	struct bpf_func_state *state = func(env, reg);
4658 	int min_off, max_off;
4659 	int err;
4660 	char *err_extra;
4661 
4662 	if (src == ACCESS_HELPER)
4663 		/* We don't know if helpers are reading or writing (or both). */
4664 		err_extra = " indirect access to";
4665 	else if (type == BPF_READ)
4666 		err_extra = " read from";
4667 	else
4668 		err_extra = " write to";
4669 
4670 	if (tnum_is_const(reg->var_off)) {
4671 		min_off = reg->var_off.value + off;
4672 		if (access_size > 0)
4673 			max_off = min_off + access_size - 1;
4674 		else
4675 			max_off = min_off;
4676 	} else {
4677 		if (reg->smax_value >= BPF_MAX_VAR_OFF ||
4678 		    reg->smin_value <= -BPF_MAX_VAR_OFF) {
4679 			verbose(env, "invalid unbounded variable-offset%s stack R%d\n",
4680 				err_extra, regno);
4681 			return -EACCES;
4682 		}
4683 		min_off = reg->smin_value + off;
4684 		if (access_size > 0)
4685 			max_off = reg->smax_value + off + access_size - 1;
4686 		else
4687 			max_off = min_off;
4688 	}
4689 
4690 	err = check_stack_slot_within_bounds(min_off, state, type);
4691 	if (!err)
4692 		err = check_stack_slot_within_bounds(max_off, state, type);
4693 
4694 	if (err) {
4695 		if (tnum_is_const(reg->var_off)) {
4696 			verbose(env, "invalid%s stack R%d off=%d size=%d\n",
4697 				err_extra, regno, off, access_size);
4698 		} else {
4699 			char tn_buf[48];
4700 
4701 			tnum_strn(tn_buf, sizeof(tn_buf), reg->var_off);
4702 			verbose(env, "invalid variable-offset%s stack R%d var_off=%s size=%d\n",
4703 				err_extra, regno, tn_buf, access_size);
4704 		}
4705 	}
4706 	return err;
4707 }
4708 
4709 /* check whether memory at (regno + off) is accessible for t = (read | write)
4710  * if t==write, value_regno is a register which value is stored into memory
4711  * if t==read, value_regno is a register which will receive the value from memory
4712  * if t==write && value_regno==-1, some unknown value is stored into memory
4713  * if t==read && value_regno==-1, don't care what we read from memory
4714  */
4715 static int check_mem_access(struct bpf_verifier_env *env, int insn_idx, u32 regno,
4716 			    int off, int bpf_size, enum bpf_access_type t,
4717 			    int value_regno, bool strict_alignment_once)
4718 {
4719 	struct bpf_reg_state *regs = cur_regs(env);
4720 	struct bpf_reg_state *reg = regs + regno;
4721 	struct bpf_func_state *state;
4722 	int size, err = 0;
4723 
4724 	size = bpf_size_to_bytes(bpf_size);
4725 	if (size < 0)
4726 		return size;
4727 
4728 	/* alignment checks will add in reg->off themselves */
4729 	err = check_ptr_alignment(env, reg, off, size, strict_alignment_once);
4730 	if (err)
4731 		return err;
4732 
4733 	/* for access checks, reg->off is just part of off */
4734 	off += reg->off;
4735 
4736 	if (reg->type == PTR_TO_MAP_KEY) {
4737 		if (t == BPF_WRITE) {
4738 			verbose(env, "write to change key R%d not allowed\n", regno);
4739 			return -EACCES;
4740 		}
4741 
4742 		err = check_mem_region_access(env, regno, off, size,
4743 					      reg->map_ptr->key_size, false);
4744 		if (err)
4745 			return err;
4746 		if (value_regno >= 0)
4747 			mark_reg_unknown(env, regs, value_regno);
4748 	} else if (reg->type == PTR_TO_MAP_VALUE) {
4749 		struct bpf_map_value_off_desc *kptr_off_desc = NULL;
4750 
4751 		if (t == BPF_WRITE && value_regno >= 0 &&
4752 		    is_pointer_value(env, value_regno)) {
4753 			verbose(env, "R%d leaks addr into map\n", value_regno);
4754 			return -EACCES;
4755 		}
4756 		err = check_map_access_type(env, regno, off, size, t);
4757 		if (err)
4758 			return err;
4759 		err = check_map_access(env, regno, off, size, false, ACCESS_DIRECT);
4760 		if (err)
4761 			return err;
4762 		if (tnum_is_const(reg->var_off))
4763 			kptr_off_desc = bpf_map_kptr_off_contains(reg->map_ptr,
4764 								  off + reg->var_off.value);
4765 		if (kptr_off_desc) {
4766 			err = check_map_kptr_access(env, regno, value_regno, insn_idx,
4767 						    kptr_off_desc);
4768 		} else if (t == BPF_READ && value_regno >= 0) {
4769 			struct bpf_map *map = reg->map_ptr;
4770 
4771 			/* if map is read-only, track its contents as scalars */
4772 			if (tnum_is_const(reg->var_off) &&
4773 			    bpf_map_is_rdonly(map) &&
4774 			    map->ops->map_direct_value_addr) {
4775 				int map_off = off + reg->var_off.value;
4776 				u64 val = 0;
4777 
4778 				err = bpf_map_direct_read(map, map_off, size,
4779 							  &val);
4780 				if (err)
4781 					return err;
4782 
4783 				regs[value_regno].type = SCALAR_VALUE;
4784 				__mark_reg_known(&regs[value_regno], val);
4785 			} else {
4786 				mark_reg_unknown(env, regs, value_regno);
4787 			}
4788 		}
4789 	} else if (base_type(reg->type) == PTR_TO_MEM) {
4790 		bool rdonly_mem = type_is_rdonly_mem(reg->type);
4791 
4792 		if (type_may_be_null(reg->type)) {
4793 			verbose(env, "R%d invalid mem access '%s'\n", regno,
4794 				reg_type_str(env, reg->type));
4795 			return -EACCES;
4796 		}
4797 
4798 		if (t == BPF_WRITE && rdonly_mem) {
4799 			verbose(env, "R%d cannot write into %s\n",
4800 				regno, reg_type_str(env, reg->type));
4801 			return -EACCES;
4802 		}
4803 
4804 		if (t == BPF_WRITE && value_regno >= 0 &&
4805 		    is_pointer_value(env, value_regno)) {
4806 			verbose(env, "R%d leaks addr into mem\n", value_regno);
4807 			return -EACCES;
4808 		}
4809 
4810 		err = check_mem_region_access(env, regno, off, size,
4811 					      reg->mem_size, false);
4812 		if (!err && value_regno >= 0 && (t == BPF_READ || rdonly_mem))
4813 			mark_reg_unknown(env, regs, value_regno);
4814 	} else if (reg->type == PTR_TO_CTX) {
4815 		enum bpf_reg_type reg_type = SCALAR_VALUE;
4816 		struct btf *btf = NULL;
4817 		u32 btf_id = 0;
4818 
4819 		if (t == BPF_WRITE && value_regno >= 0 &&
4820 		    is_pointer_value(env, value_regno)) {
4821 			verbose(env, "R%d leaks addr into ctx\n", value_regno);
4822 			return -EACCES;
4823 		}
4824 
4825 		err = check_ptr_off_reg(env, reg, regno);
4826 		if (err < 0)
4827 			return err;
4828 
4829 		err = check_ctx_access(env, insn_idx, off, size, t, &reg_type, &btf,
4830 				       &btf_id);
4831 		if (err)
4832 			verbose_linfo(env, insn_idx, "; ");
4833 		if (!err && t == BPF_READ && value_regno >= 0) {
4834 			/* ctx access returns either a scalar, or a
4835 			 * PTR_TO_PACKET[_META,_END]. In the latter
4836 			 * case, we know the offset is zero.
4837 			 */
4838 			if (reg_type == SCALAR_VALUE) {
4839 				mark_reg_unknown(env, regs, value_regno);
4840 			} else {
4841 				mark_reg_known_zero(env, regs,
4842 						    value_regno);
4843 				if (type_may_be_null(reg_type))
4844 					regs[value_regno].id = ++env->id_gen;
4845 				/* A load of ctx field could have different
4846 				 * actual load size with the one encoded in the
4847 				 * insn. When the dst is PTR, it is for sure not
4848 				 * a sub-register.
4849 				 */
4850 				regs[value_regno].subreg_def = DEF_NOT_SUBREG;
4851 				if (base_type(reg_type) == PTR_TO_BTF_ID) {
4852 					regs[value_regno].btf = btf;
4853 					regs[value_regno].btf_id = btf_id;
4854 				}
4855 			}
4856 			regs[value_regno].type = reg_type;
4857 		}
4858 
4859 	} else if (reg->type == PTR_TO_STACK) {
4860 		/* Basic bounds checks. */
4861 		err = check_stack_access_within_bounds(env, regno, off, size, ACCESS_DIRECT, t);
4862 		if (err)
4863 			return err;
4864 
4865 		state = func(env, reg);
4866 		err = update_stack_depth(env, state, off);
4867 		if (err)
4868 			return err;
4869 
4870 		if (t == BPF_READ)
4871 			err = check_stack_read(env, regno, off, size,
4872 					       value_regno);
4873 		else
4874 			err = check_stack_write(env, regno, off, size,
4875 						value_regno, insn_idx);
4876 	} else if (reg_is_pkt_pointer(reg)) {
4877 		if (t == BPF_WRITE && !may_access_direct_pkt_data(env, NULL, t)) {
4878 			verbose(env, "cannot write into packet\n");
4879 			return -EACCES;
4880 		}
4881 		if (t == BPF_WRITE && value_regno >= 0 &&
4882 		    is_pointer_value(env, value_regno)) {
4883 			verbose(env, "R%d leaks addr into packet\n",
4884 				value_regno);
4885 			return -EACCES;
4886 		}
4887 		err = check_packet_access(env, regno, off, size, false);
4888 		if (!err && t == BPF_READ && value_regno >= 0)
4889 			mark_reg_unknown(env, regs, value_regno);
4890 	} else if (reg->type == PTR_TO_FLOW_KEYS) {
4891 		if (t == BPF_WRITE && value_regno >= 0 &&
4892 		    is_pointer_value(env, value_regno)) {
4893 			verbose(env, "R%d leaks addr into flow keys\n",
4894 				value_regno);
4895 			return -EACCES;
4896 		}
4897 
4898 		err = check_flow_keys_access(env, off, size);
4899 		if (!err && t == BPF_READ && value_regno >= 0)
4900 			mark_reg_unknown(env, regs, value_regno);
4901 	} else if (type_is_sk_pointer(reg->type)) {
4902 		if (t == BPF_WRITE) {
4903 			verbose(env, "R%d cannot write into %s\n",
4904 				regno, reg_type_str(env, reg->type));
4905 			return -EACCES;
4906 		}
4907 		err = check_sock_access(env, insn_idx, regno, off, size, t);
4908 		if (!err && value_regno >= 0)
4909 			mark_reg_unknown(env, regs, value_regno);
4910 	} else if (reg->type == PTR_TO_TP_BUFFER) {
4911 		err = check_tp_buffer_access(env, reg, regno, off, size);
4912 		if (!err && t == BPF_READ && value_regno >= 0)
4913 			mark_reg_unknown(env, regs, value_regno);
4914 	} else if (base_type(reg->type) == PTR_TO_BTF_ID &&
4915 		   !type_may_be_null(reg->type)) {
4916 		err = check_ptr_to_btf_access(env, regs, regno, off, size, t,
4917 					      value_regno);
4918 	} else if (reg->type == CONST_PTR_TO_MAP) {
4919 		err = check_ptr_to_map_access(env, regs, regno, off, size, t,
4920 					      value_regno);
4921 	} else if (base_type(reg->type) == PTR_TO_BUF) {
4922 		bool rdonly_mem = type_is_rdonly_mem(reg->type);
4923 		u32 *max_access;
4924 
4925 		if (rdonly_mem) {
4926 			if (t == BPF_WRITE) {
4927 				verbose(env, "R%d cannot write into %s\n",
4928 					regno, reg_type_str(env, reg->type));
4929 				return -EACCES;
4930 			}
4931 			max_access = &env->prog->aux->max_rdonly_access;
4932 		} else {
4933 			max_access = &env->prog->aux->max_rdwr_access;
4934 		}
4935 
4936 		err = check_buffer_access(env, reg, regno, off, size, false,
4937 					  max_access);
4938 
4939 		if (!err && value_regno >= 0 && (rdonly_mem || t == BPF_READ))
4940 			mark_reg_unknown(env, regs, value_regno);
4941 	} else {
4942 		verbose(env, "R%d invalid mem access '%s'\n", regno,
4943 			reg_type_str(env, reg->type));
4944 		return -EACCES;
4945 	}
4946 
4947 	if (!err && size < BPF_REG_SIZE && value_regno >= 0 && t == BPF_READ &&
4948 	    regs[value_regno].type == SCALAR_VALUE) {
4949 		/* b/h/w load zero-extends, mark upper bits as known 0 */
4950 		coerce_reg_to_size(&regs[value_regno], size);
4951 	}
4952 	return err;
4953 }
4954 
4955 static int check_atomic(struct bpf_verifier_env *env, int insn_idx, struct bpf_insn *insn)
4956 {
4957 	int load_reg;
4958 	int err;
4959 
4960 	switch (insn->imm) {
4961 	case BPF_ADD:
4962 	case BPF_ADD | BPF_FETCH:
4963 	case BPF_AND:
4964 	case BPF_AND | BPF_FETCH:
4965 	case BPF_OR:
4966 	case BPF_OR | BPF_FETCH:
4967 	case BPF_XOR:
4968 	case BPF_XOR | BPF_FETCH:
4969 	case BPF_XCHG:
4970 	case BPF_CMPXCHG:
4971 		break;
4972 	default:
4973 		verbose(env, "BPF_ATOMIC uses invalid atomic opcode %02x\n", insn->imm);
4974 		return -EINVAL;
4975 	}
4976 
4977 	if (BPF_SIZE(insn->code) != BPF_W && BPF_SIZE(insn->code) != BPF_DW) {
4978 		verbose(env, "invalid atomic operand size\n");
4979 		return -EINVAL;
4980 	}
4981 
4982 	/* check src1 operand */
4983 	err = check_reg_arg(env, insn->src_reg, SRC_OP);
4984 	if (err)
4985 		return err;
4986 
4987 	/* check src2 operand */
4988 	err = check_reg_arg(env, insn->dst_reg, SRC_OP);
4989 	if (err)
4990 		return err;
4991 
4992 	if (insn->imm == BPF_CMPXCHG) {
4993 		/* Check comparison of R0 with memory location */
4994 		const u32 aux_reg = BPF_REG_0;
4995 
4996 		err = check_reg_arg(env, aux_reg, SRC_OP);
4997 		if (err)
4998 			return err;
4999 
5000 		if (is_pointer_value(env, aux_reg)) {
5001 			verbose(env, "R%d leaks addr into mem\n", aux_reg);
5002 			return -EACCES;
5003 		}
5004 	}
5005 
5006 	if (is_pointer_value(env, insn->src_reg)) {
5007 		verbose(env, "R%d leaks addr into mem\n", insn->src_reg);
5008 		return -EACCES;
5009 	}
5010 
5011 	if (is_ctx_reg(env, insn->dst_reg) ||
5012 	    is_pkt_reg(env, insn->dst_reg) ||
5013 	    is_flow_key_reg(env, insn->dst_reg) ||
5014 	    is_sk_reg(env, insn->dst_reg)) {
5015 		verbose(env, "BPF_ATOMIC stores into R%d %s is not allowed\n",
5016 			insn->dst_reg,
5017 			reg_type_str(env, reg_state(env, insn->dst_reg)->type));
5018 		return -EACCES;
5019 	}
5020 
5021 	if (insn->imm & BPF_FETCH) {
5022 		if (insn->imm == BPF_CMPXCHG)
5023 			load_reg = BPF_REG_0;
5024 		else
5025 			load_reg = insn->src_reg;
5026 
5027 		/* check and record load of old value */
5028 		err = check_reg_arg(env, load_reg, DST_OP);
5029 		if (err)
5030 			return err;
5031 	} else {
5032 		/* This instruction accesses a memory location but doesn't
5033 		 * actually load it into a register.
5034 		 */
5035 		load_reg = -1;
5036 	}
5037 
5038 	/* Check whether we can read the memory, with second call for fetch
5039 	 * case to simulate the register fill.
5040 	 */
5041 	err = check_mem_access(env, insn_idx, insn->dst_reg, insn->off,
5042 			       BPF_SIZE(insn->code), BPF_READ, -1, true);
5043 	if (!err && load_reg >= 0)
5044 		err = check_mem_access(env, insn_idx, insn->dst_reg, insn->off,
5045 				       BPF_SIZE(insn->code), BPF_READ, load_reg,
5046 				       true);
5047 	if (err)
5048 		return err;
5049 
5050 	/* Check whether we can write into the same memory. */
5051 	err = check_mem_access(env, insn_idx, insn->dst_reg, insn->off,
5052 			       BPF_SIZE(insn->code), BPF_WRITE, -1, true);
5053 	if (err)
5054 		return err;
5055 
5056 	return 0;
5057 }
5058 
5059 /* When register 'regno' is used to read the stack (either directly or through
5060  * a helper function) make sure that it's within stack boundary and, depending
5061  * on the access type, that all elements of the stack are initialized.
5062  *
5063  * 'off' includes 'regno->off', but not its dynamic part (if any).
5064  *
5065  * All registers that have been spilled on the stack in the slots within the
5066  * read offsets are marked as read.
5067  */
5068 static int check_stack_range_initialized(
5069 		struct bpf_verifier_env *env, int regno, int off,
5070 		int access_size, bool zero_size_allowed,
5071 		enum bpf_access_src type, struct bpf_call_arg_meta *meta)
5072 {
5073 	struct bpf_reg_state *reg = reg_state(env, regno);
5074 	struct bpf_func_state *state = func(env, reg);
5075 	int err, min_off, max_off, i, j, slot, spi;
5076 	char *err_extra = type == ACCESS_HELPER ? " indirect" : "";
5077 	enum bpf_access_type bounds_check_type;
5078 	/* Some accesses can write anything into the stack, others are
5079 	 * read-only.
5080 	 */
5081 	bool clobber = false;
5082 
5083 	if (access_size == 0 && !zero_size_allowed) {
5084 		verbose(env, "invalid zero-sized read\n");
5085 		return -EACCES;
5086 	}
5087 
5088 	if (type == ACCESS_HELPER) {
5089 		/* The bounds checks for writes are more permissive than for
5090 		 * reads. However, if raw_mode is not set, we'll do extra
5091 		 * checks below.
5092 		 */
5093 		bounds_check_type = BPF_WRITE;
5094 		clobber = true;
5095 	} else {
5096 		bounds_check_type = BPF_READ;
5097 	}
5098 	err = check_stack_access_within_bounds(env, regno, off, access_size,
5099 					       type, bounds_check_type);
5100 	if (err)
5101 		return err;
5102 
5103 
5104 	if (tnum_is_const(reg->var_off)) {
5105 		min_off = max_off = reg->var_off.value + off;
5106 	} else {
5107 		/* Variable offset is prohibited for unprivileged mode for
5108 		 * simplicity since it requires corresponding support in
5109 		 * Spectre masking for stack ALU.
5110 		 * See also retrieve_ptr_limit().
5111 		 */
5112 		if (!env->bypass_spec_v1) {
5113 			char tn_buf[48];
5114 
5115 			tnum_strn(tn_buf, sizeof(tn_buf), reg->var_off);
5116 			verbose(env, "R%d%s variable offset stack access prohibited for !root, var_off=%s\n",
5117 				regno, err_extra, tn_buf);
5118 			return -EACCES;
5119 		}
5120 		/* Only initialized buffer on stack is allowed to be accessed
5121 		 * with variable offset. With uninitialized buffer it's hard to
5122 		 * guarantee that whole memory is marked as initialized on
5123 		 * helper return since specific bounds are unknown what may
5124 		 * cause uninitialized stack leaking.
5125 		 */
5126 		if (meta && meta->raw_mode)
5127 			meta = NULL;
5128 
5129 		min_off = reg->smin_value + off;
5130 		max_off = reg->smax_value + off;
5131 	}
5132 
5133 	if (meta && meta->raw_mode) {
5134 		meta->access_size = access_size;
5135 		meta->regno = regno;
5136 		return 0;
5137 	}
5138 
5139 	for (i = min_off; i < max_off + access_size; i++) {
5140 		u8 *stype;
5141 
5142 		slot = -i - 1;
5143 		spi = slot / BPF_REG_SIZE;
5144 		if (state->allocated_stack <= slot)
5145 			goto err;
5146 		stype = &state->stack[spi].slot_type[slot % BPF_REG_SIZE];
5147 		if (*stype == STACK_MISC)
5148 			goto mark;
5149 		if (*stype == STACK_ZERO) {
5150 			if (clobber) {
5151 				/* helper can write anything into the stack */
5152 				*stype = STACK_MISC;
5153 			}
5154 			goto mark;
5155 		}
5156 
5157 		if (is_spilled_reg(&state->stack[spi]) &&
5158 		    base_type(state->stack[spi].spilled_ptr.type) == PTR_TO_BTF_ID)
5159 			goto mark;
5160 
5161 		if (is_spilled_reg(&state->stack[spi]) &&
5162 		    (state->stack[spi].spilled_ptr.type == SCALAR_VALUE ||
5163 		     env->allow_ptr_leaks)) {
5164 			if (clobber) {
5165 				__mark_reg_unknown(env, &state->stack[spi].spilled_ptr);
5166 				for (j = 0; j < BPF_REG_SIZE; j++)
5167 					scrub_spilled_slot(&state->stack[spi].slot_type[j]);
5168 			}
5169 			goto mark;
5170 		}
5171 
5172 err:
5173 		if (tnum_is_const(reg->var_off)) {
5174 			verbose(env, "invalid%s read from stack R%d off %d+%d size %d\n",
5175 				err_extra, regno, min_off, i - min_off, access_size);
5176 		} else {
5177 			char tn_buf[48];
5178 
5179 			tnum_strn(tn_buf, sizeof(tn_buf), reg->var_off);
5180 			verbose(env, "invalid%s read from stack R%d var_off %s+%d size %d\n",
5181 				err_extra, regno, tn_buf, i - min_off, access_size);
5182 		}
5183 		return -EACCES;
5184 mark:
5185 		/* reading any byte out of 8-byte 'spill_slot' will cause
5186 		 * the whole slot to be marked as 'read'
5187 		 */
5188 		mark_reg_read(env, &state->stack[spi].spilled_ptr,
5189 			      state->stack[spi].spilled_ptr.parent,
5190 			      REG_LIVE_READ64);
5191 	}
5192 	return update_stack_depth(env, state, min_off);
5193 }
5194 
5195 static int check_helper_mem_access(struct bpf_verifier_env *env, int regno,
5196 				   int access_size, bool zero_size_allowed,
5197 				   struct bpf_call_arg_meta *meta)
5198 {
5199 	struct bpf_reg_state *regs = cur_regs(env), *reg = &regs[regno];
5200 	u32 *max_access;
5201 
5202 	switch (base_type(reg->type)) {
5203 	case PTR_TO_PACKET:
5204 	case PTR_TO_PACKET_META:
5205 		return check_packet_access(env, regno, reg->off, access_size,
5206 					   zero_size_allowed);
5207 	case PTR_TO_MAP_KEY:
5208 		if (meta && meta->raw_mode) {
5209 			verbose(env, "R%d cannot write into %s\n", regno,
5210 				reg_type_str(env, reg->type));
5211 			return -EACCES;
5212 		}
5213 		return check_mem_region_access(env, regno, reg->off, access_size,
5214 					       reg->map_ptr->key_size, false);
5215 	case PTR_TO_MAP_VALUE:
5216 		if (check_map_access_type(env, regno, reg->off, access_size,
5217 					  meta && meta->raw_mode ? BPF_WRITE :
5218 					  BPF_READ))
5219 			return -EACCES;
5220 		return check_map_access(env, regno, reg->off, access_size,
5221 					zero_size_allowed, ACCESS_HELPER);
5222 	case PTR_TO_MEM:
5223 		if (type_is_rdonly_mem(reg->type)) {
5224 			if (meta && meta->raw_mode) {
5225 				verbose(env, "R%d cannot write into %s\n", regno,
5226 					reg_type_str(env, reg->type));
5227 				return -EACCES;
5228 			}
5229 		}
5230 		return check_mem_region_access(env, regno, reg->off,
5231 					       access_size, reg->mem_size,
5232 					       zero_size_allowed);
5233 	case PTR_TO_BUF:
5234 		if (type_is_rdonly_mem(reg->type)) {
5235 			if (meta && meta->raw_mode) {
5236 				verbose(env, "R%d cannot write into %s\n", regno,
5237 					reg_type_str(env, reg->type));
5238 				return -EACCES;
5239 			}
5240 
5241 			max_access = &env->prog->aux->max_rdonly_access;
5242 		} else {
5243 			max_access = &env->prog->aux->max_rdwr_access;
5244 		}
5245 		return check_buffer_access(env, reg, regno, reg->off,
5246 					   access_size, zero_size_allowed,
5247 					   max_access);
5248 	case PTR_TO_STACK:
5249 		return check_stack_range_initialized(
5250 				env,
5251 				regno, reg->off, access_size,
5252 				zero_size_allowed, ACCESS_HELPER, meta);
5253 	case PTR_TO_CTX:
5254 		/* in case the function doesn't know how to access the context,
5255 		 * (because we are in a program of type SYSCALL for example), we
5256 		 * can not statically check its size.
5257 		 * Dynamically check it now.
5258 		 */
5259 		if (!env->ops->convert_ctx_access) {
5260 			enum bpf_access_type atype = meta && meta->raw_mode ? BPF_WRITE : BPF_READ;
5261 			int offset = access_size - 1;
5262 
5263 			/* Allow zero-byte read from PTR_TO_CTX */
5264 			if (access_size == 0)
5265 				return zero_size_allowed ? 0 : -EACCES;
5266 
5267 			return check_mem_access(env, env->insn_idx, regno, offset, BPF_B,
5268 						atype, -1, false);
5269 		}
5270 
5271 		fallthrough;
5272 	default: /* scalar_value or invalid ptr */
5273 		/* Allow zero-byte read from NULL, regardless of pointer type */
5274 		if (zero_size_allowed && access_size == 0 &&
5275 		    register_is_null(reg))
5276 			return 0;
5277 
5278 		verbose(env, "R%d type=%s ", regno,
5279 			reg_type_str(env, reg->type));
5280 		verbose(env, "expected=%s\n", reg_type_str(env, PTR_TO_STACK));
5281 		return -EACCES;
5282 	}
5283 }
5284 
5285 static int check_mem_size_reg(struct bpf_verifier_env *env,
5286 			      struct bpf_reg_state *reg, u32 regno,
5287 			      bool zero_size_allowed,
5288 			      struct bpf_call_arg_meta *meta)
5289 {
5290 	int err;
5291 
5292 	/* This is used to refine r0 return value bounds for helpers
5293 	 * that enforce this value as an upper bound on return values.
5294 	 * See do_refine_retval_range() for helpers that can refine
5295 	 * the return value. C type of helper is u32 so we pull register
5296 	 * bound from umax_value however, if negative verifier errors
5297 	 * out. Only upper bounds can be learned because retval is an
5298 	 * int type and negative retvals are allowed.
5299 	 */
5300 	meta->msize_max_value = reg->umax_value;
5301 
5302 	/* The register is SCALAR_VALUE; the access check
5303 	 * happens using its boundaries.
5304 	 */
5305 	if (!tnum_is_const(reg->var_off))
5306 		/* For unprivileged variable accesses, disable raw
5307 		 * mode so that the program is required to
5308 		 * initialize all the memory that the helper could
5309 		 * just partially fill up.
5310 		 */
5311 		meta = NULL;
5312 
5313 	if (reg->smin_value < 0) {
5314 		verbose(env, "R%d min value is negative, either use unsigned or 'var &= const'\n",
5315 			regno);
5316 		return -EACCES;
5317 	}
5318 
5319 	if (reg->umin_value == 0) {
5320 		err = check_helper_mem_access(env, regno - 1, 0,
5321 					      zero_size_allowed,
5322 					      meta);
5323 		if (err)
5324 			return err;
5325 	}
5326 
5327 	if (reg->umax_value >= BPF_MAX_VAR_SIZ) {
5328 		verbose(env, "R%d unbounded memory access, use 'var &= const' or 'if (var < const)'\n",
5329 			regno);
5330 		return -EACCES;
5331 	}
5332 	err = check_helper_mem_access(env, regno - 1,
5333 				      reg->umax_value,
5334 				      zero_size_allowed, meta);
5335 	if (!err)
5336 		err = mark_chain_precision(env, regno);
5337 	return err;
5338 }
5339 
5340 int check_mem_reg(struct bpf_verifier_env *env, struct bpf_reg_state *reg,
5341 		   u32 regno, u32 mem_size)
5342 {
5343 	bool may_be_null = type_may_be_null(reg->type);
5344 	struct bpf_reg_state saved_reg;
5345 	struct bpf_call_arg_meta meta;
5346 	int err;
5347 
5348 	if (register_is_null(reg))
5349 		return 0;
5350 
5351 	memset(&meta, 0, sizeof(meta));
5352 	/* Assuming that the register contains a value check if the memory
5353 	 * access is safe. Temporarily save and restore the register's state as
5354 	 * the conversion shouldn't be visible to a caller.
5355 	 */
5356 	if (may_be_null) {
5357 		saved_reg = *reg;
5358 		mark_ptr_not_null_reg(reg);
5359 	}
5360 
5361 	err = check_helper_mem_access(env, regno, mem_size, true, &meta);
5362 	/* Check access for BPF_WRITE */
5363 	meta.raw_mode = true;
5364 	err = err ?: check_helper_mem_access(env, regno, mem_size, true, &meta);
5365 
5366 	if (may_be_null)
5367 		*reg = saved_reg;
5368 
5369 	return err;
5370 }
5371 
5372 int check_kfunc_mem_size_reg(struct bpf_verifier_env *env, struct bpf_reg_state *reg,
5373 			     u32 regno)
5374 {
5375 	struct bpf_reg_state *mem_reg = &cur_regs(env)[regno - 1];
5376 	bool may_be_null = type_may_be_null(mem_reg->type);
5377 	struct bpf_reg_state saved_reg;
5378 	struct bpf_call_arg_meta meta;
5379 	int err;
5380 
5381 	WARN_ON_ONCE(regno < BPF_REG_2 || regno > BPF_REG_5);
5382 
5383 	memset(&meta, 0, sizeof(meta));
5384 
5385 	if (may_be_null) {
5386 		saved_reg = *mem_reg;
5387 		mark_ptr_not_null_reg(mem_reg);
5388 	}
5389 
5390 	err = check_mem_size_reg(env, reg, regno, true, &meta);
5391 	/* Check access for BPF_WRITE */
5392 	meta.raw_mode = true;
5393 	err = err ?: check_mem_size_reg(env, reg, regno, true, &meta);
5394 
5395 	if (may_be_null)
5396 		*mem_reg = saved_reg;
5397 	return err;
5398 }
5399 
5400 /* Implementation details:
5401  * bpf_map_lookup returns PTR_TO_MAP_VALUE_OR_NULL
5402  * Two bpf_map_lookups (even with the same key) will have different reg->id.
5403  * For traditional PTR_TO_MAP_VALUE the verifier clears reg->id after
5404  * value_or_null->value transition, since the verifier only cares about
5405  * the range of access to valid map value pointer and doesn't care about actual
5406  * address of the map element.
5407  * For maps with 'struct bpf_spin_lock' inside map value the verifier keeps
5408  * reg->id > 0 after value_or_null->value transition. By doing so
5409  * two bpf_map_lookups will be considered two different pointers that
5410  * point to different bpf_spin_locks.
5411  * The verifier allows taking only one bpf_spin_lock at a time to avoid
5412  * dead-locks.
5413  * Since only one bpf_spin_lock is allowed the checks are simpler than
5414  * reg_is_refcounted() logic. The verifier needs to remember only
5415  * one spin_lock instead of array of acquired_refs.
5416  * cur_state->active_spin_lock remembers which map value element got locked
5417  * and clears it after bpf_spin_unlock.
5418  */
5419 static int process_spin_lock(struct bpf_verifier_env *env, int regno,
5420 			     bool is_lock)
5421 {
5422 	struct bpf_reg_state *regs = cur_regs(env), *reg = &regs[regno];
5423 	struct bpf_verifier_state *cur = env->cur_state;
5424 	bool is_const = tnum_is_const(reg->var_off);
5425 	struct bpf_map *map = reg->map_ptr;
5426 	u64 val = reg->var_off.value;
5427 
5428 	if (!is_const) {
5429 		verbose(env,
5430 			"R%d doesn't have constant offset. bpf_spin_lock has to be at the constant offset\n",
5431 			regno);
5432 		return -EINVAL;
5433 	}
5434 	if (!map->btf) {
5435 		verbose(env,
5436 			"map '%s' has to have BTF in order to use bpf_spin_lock\n",
5437 			map->name);
5438 		return -EINVAL;
5439 	}
5440 	if (!map_value_has_spin_lock(map)) {
5441 		if (map->spin_lock_off == -E2BIG)
5442 			verbose(env,
5443 				"map '%s' has more than one 'struct bpf_spin_lock'\n",
5444 				map->name);
5445 		else if (map->spin_lock_off == -ENOENT)
5446 			verbose(env,
5447 				"map '%s' doesn't have 'struct bpf_spin_lock'\n",
5448 				map->name);
5449 		else
5450 			verbose(env,
5451 				"map '%s' is not a struct type or bpf_spin_lock is mangled\n",
5452 				map->name);
5453 		return -EINVAL;
5454 	}
5455 	if (map->spin_lock_off != val + reg->off) {
5456 		verbose(env, "off %lld doesn't point to 'struct bpf_spin_lock'\n",
5457 			val + reg->off);
5458 		return -EINVAL;
5459 	}
5460 	if (is_lock) {
5461 		if (cur->active_spin_lock) {
5462 			verbose(env,
5463 				"Locking two bpf_spin_locks are not allowed\n");
5464 			return -EINVAL;
5465 		}
5466 		cur->active_spin_lock = reg->id;
5467 	} else {
5468 		if (!cur->active_spin_lock) {
5469 			verbose(env, "bpf_spin_unlock without taking a lock\n");
5470 			return -EINVAL;
5471 		}
5472 		if (cur->active_spin_lock != reg->id) {
5473 			verbose(env, "bpf_spin_unlock of different lock\n");
5474 			return -EINVAL;
5475 		}
5476 		cur->active_spin_lock = 0;
5477 	}
5478 	return 0;
5479 }
5480 
5481 static int process_timer_func(struct bpf_verifier_env *env, int regno,
5482 			      struct bpf_call_arg_meta *meta)
5483 {
5484 	struct bpf_reg_state *regs = cur_regs(env), *reg = &regs[regno];
5485 	bool is_const = tnum_is_const(reg->var_off);
5486 	struct bpf_map *map = reg->map_ptr;
5487 	u64 val = reg->var_off.value;
5488 
5489 	if (!is_const) {
5490 		verbose(env,
5491 			"R%d doesn't have constant offset. bpf_timer has to be at the constant offset\n",
5492 			regno);
5493 		return -EINVAL;
5494 	}
5495 	if (!map->btf) {
5496 		verbose(env, "map '%s' has to have BTF in order to use bpf_timer\n",
5497 			map->name);
5498 		return -EINVAL;
5499 	}
5500 	if (!map_value_has_timer(map)) {
5501 		if (map->timer_off == -E2BIG)
5502 			verbose(env,
5503 				"map '%s' has more than one 'struct bpf_timer'\n",
5504 				map->name);
5505 		else if (map->timer_off == -ENOENT)
5506 			verbose(env,
5507 				"map '%s' doesn't have 'struct bpf_timer'\n",
5508 				map->name);
5509 		else
5510 			verbose(env,
5511 				"map '%s' is not a struct type or bpf_timer is mangled\n",
5512 				map->name);
5513 		return -EINVAL;
5514 	}
5515 	if (map->timer_off != val + reg->off) {
5516 		verbose(env, "off %lld doesn't point to 'struct bpf_timer' that is at %d\n",
5517 			val + reg->off, map->timer_off);
5518 		return -EINVAL;
5519 	}
5520 	if (meta->map_ptr) {
5521 		verbose(env, "verifier bug. Two map pointers in a timer helper\n");
5522 		return -EFAULT;
5523 	}
5524 	meta->map_uid = reg->map_uid;
5525 	meta->map_ptr = map;
5526 	return 0;
5527 }
5528 
5529 static int process_kptr_func(struct bpf_verifier_env *env, int regno,
5530 			     struct bpf_call_arg_meta *meta)
5531 {
5532 	struct bpf_reg_state *regs = cur_regs(env), *reg = &regs[regno];
5533 	struct bpf_map_value_off_desc *off_desc;
5534 	struct bpf_map *map_ptr = reg->map_ptr;
5535 	u32 kptr_off;
5536 	int ret;
5537 
5538 	if (!tnum_is_const(reg->var_off)) {
5539 		verbose(env,
5540 			"R%d doesn't have constant offset. kptr has to be at the constant offset\n",
5541 			regno);
5542 		return -EINVAL;
5543 	}
5544 	if (!map_ptr->btf) {
5545 		verbose(env, "map '%s' has to have BTF in order to use bpf_kptr_xchg\n",
5546 			map_ptr->name);
5547 		return -EINVAL;
5548 	}
5549 	if (!map_value_has_kptrs(map_ptr)) {
5550 		ret = PTR_ERR_OR_ZERO(map_ptr->kptr_off_tab);
5551 		if (ret == -E2BIG)
5552 			verbose(env, "map '%s' has more than %d kptr\n", map_ptr->name,
5553 				BPF_MAP_VALUE_OFF_MAX);
5554 		else if (ret == -EEXIST)
5555 			verbose(env, "map '%s' has repeating kptr BTF tags\n", map_ptr->name);
5556 		else
5557 			verbose(env, "map '%s' has no valid kptr\n", map_ptr->name);
5558 		return -EINVAL;
5559 	}
5560 
5561 	meta->map_ptr = map_ptr;
5562 	kptr_off = reg->off + reg->var_off.value;
5563 	off_desc = bpf_map_kptr_off_contains(map_ptr, kptr_off);
5564 	if (!off_desc) {
5565 		verbose(env, "off=%d doesn't point to kptr\n", kptr_off);
5566 		return -EACCES;
5567 	}
5568 	if (off_desc->type != BPF_KPTR_REF) {
5569 		verbose(env, "off=%d kptr isn't referenced kptr\n", kptr_off);
5570 		return -EACCES;
5571 	}
5572 	meta->kptr_off_desc = off_desc;
5573 	return 0;
5574 }
5575 
5576 static bool arg_type_is_mem_size(enum bpf_arg_type type)
5577 {
5578 	return type == ARG_CONST_SIZE ||
5579 	       type == ARG_CONST_SIZE_OR_ZERO;
5580 }
5581 
5582 static bool arg_type_is_release(enum bpf_arg_type type)
5583 {
5584 	return type & OBJ_RELEASE;
5585 }
5586 
5587 static bool arg_type_is_dynptr(enum bpf_arg_type type)
5588 {
5589 	return base_type(type) == ARG_PTR_TO_DYNPTR;
5590 }
5591 
5592 static int int_ptr_type_to_size(enum bpf_arg_type type)
5593 {
5594 	if (type == ARG_PTR_TO_INT)
5595 		return sizeof(u32);
5596 	else if (type == ARG_PTR_TO_LONG)
5597 		return sizeof(u64);
5598 
5599 	return -EINVAL;
5600 }
5601 
5602 static int resolve_map_arg_type(struct bpf_verifier_env *env,
5603 				 const struct bpf_call_arg_meta *meta,
5604 				 enum bpf_arg_type *arg_type)
5605 {
5606 	if (!meta->map_ptr) {
5607 		/* kernel subsystem misconfigured verifier */
5608 		verbose(env, "invalid map_ptr to access map->type\n");
5609 		return -EACCES;
5610 	}
5611 
5612 	switch (meta->map_ptr->map_type) {
5613 	case BPF_MAP_TYPE_SOCKMAP:
5614 	case BPF_MAP_TYPE_SOCKHASH:
5615 		if (*arg_type == ARG_PTR_TO_MAP_VALUE) {
5616 			*arg_type = ARG_PTR_TO_BTF_ID_SOCK_COMMON;
5617 		} else {
5618 			verbose(env, "invalid arg_type for sockmap/sockhash\n");
5619 			return -EINVAL;
5620 		}
5621 		break;
5622 	case BPF_MAP_TYPE_BLOOM_FILTER:
5623 		if (meta->func_id == BPF_FUNC_map_peek_elem)
5624 			*arg_type = ARG_PTR_TO_MAP_VALUE;
5625 		break;
5626 	default:
5627 		break;
5628 	}
5629 	return 0;
5630 }
5631 
5632 struct bpf_reg_types {
5633 	const enum bpf_reg_type types[10];
5634 	u32 *btf_id;
5635 };
5636 
5637 static const struct bpf_reg_types map_key_value_types = {
5638 	.types = {
5639 		PTR_TO_STACK,
5640 		PTR_TO_PACKET,
5641 		PTR_TO_PACKET_META,
5642 		PTR_TO_MAP_KEY,
5643 		PTR_TO_MAP_VALUE,
5644 	},
5645 };
5646 
5647 static const struct bpf_reg_types sock_types = {
5648 	.types = {
5649 		PTR_TO_SOCK_COMMON,
5650 		PTR_TO_SOCKET,
5651 		PTR_TO_TCP_SOCK,
5652 		PTR_TO_XDP_SOCK,
5653 	},
5654 };
5655 
5656 #ifdef CONFIG_NET
5657 static const struct bpf_reg_types btf_id_sock_common_types = {
5658 	.types = {
5659 		PTR_TO_SOCK_COMMON,
5660 		PTR_TO_SOCKET,
5661 		PTR_TO_TCP_SOCK,
5662 		PTR_TO_XDP_SOCK,
5663 		PTR_TO_BTF_ID,
5664 	},
5665 	.btf_id = &btf_sock_ids[BTF_SOCK_TYPE_SOCK_COMMON],
5666 };
5667 #endif
5668 
5669 static const struct bpf_reg_types mem_types = {
5670 	.types = {
5671 		PTR_TO_STACK,
5672 		PTR_TO_PACKET,
5673 		PTR_TO_PACKET_META,
5674 		PTR_TO_MAP_KEY,
5675 		PTR_TO_MAP_VALUE,
5676 		PTR_TO_MEM,
5677 		PTR_TO_MEM | MEM_ALLOC,
5678 		PTR_TO_BUF,
5679 	},
5680 };
5681 
5682 static const struct bpf_reg_types int_ptr_types = {
5683 	.types = {
5684 		PTR_TO_STACK,
5685 		PTR_TO_PACKET,
5686 		PTR_TO_PACKET_META,
5687 		PTR_TO_MAP_KEY,
5688 		PTR_TO_MAP_VALUE,
5689 	},
5690 };
5691 
5692 static const struct bpf_reg_types fullsock_types = { .types = { PTR_TO_SOCKET } };
5693 static const struct bpf_reg_types scalar_types = { .types = { SCALAR_VALUE } };
5694 static const struct bpf_reg_types context_types = { .types = { PTR_TO_CTX } };
5695 static const struct bpf_reg_types alloc_mem_types = { .types = { PTR_TO_MEM | MEM_ALLOC } };
5696 static const struct bpf_reg_types const_map_ptr_types = { .types = { CONST_PTR_TO_MAP } };
5697 static const struct bpf_reg_types btf_ptr_types = { .types = { PTR_TO_BTF_ID } };
5698 static const struct bpf_reg_types spin_lock_types = { .types = { PTR_TO_MAP_VALUE } };
5699 static const struct bpf_reg_types percpu_btf_ptr_types = { .types = { PTR_TO_BTF_ID | MEM_PERCPU } };
5700 static const struct bpf_reg_types func_ptr_types = { .types = { PTR_TO_FUNC } };
5701 static const struct bpf_reg_types stack_ptr_types = { .types = { PTR_TO_STACK } };
5702 static const struct bpf_reg_types const_str_ptr_types = { .types = { PTR_TO_MAP_VALUE } };
5703 static const struct bpf_reg_types timer_types = { .types = { PTR_TO_MAP_VALUE } };
5704 static const struct bpf_reg_types kptr_types = { .types = { PTR_TO_MAP_VALUE } };
5705 static const struct bpf_reg_types dynptr_types = {
5706 	.types = {
5707 		PTR_TO_STACK,
5708 		PTR_TO_DYNPTR | DYNPTR_TYPE_LOCAL,
5709 	}
5710 };
5711 
5712 static const struct bpf_reg_types *compatible_reg_types[__BPF_ARG_TYPE_MAX] = {
5713 	[ARG_PTR_TO_MAP_KEY]		= &map_key_value_types,
5714 	[ARG_PTR_TO_MAP_VALUE]		= &map_key_value_types,
5715 	[ARG_CONST_SIZE]		= &scalar_types,
5716 	[ARG_CONST_SIZE_OR_ZERO]	= &scalar_types,
5717 	[ARG_CONST_ALLOC_SIZE_OR_ZERO]	= &scalar_types,
5718 	[ARG_CONST_MAP_PTR]		= &const_map_ptr_types,
5719 	[ARG_PTR_TO_CTX]		= &context_types,
5720 	[ARG_PTR_TO_SOCK_COMMON]	= &sock_types,
5721 #ifdef CONFIG_NET
5722 	[ARG_PTR_TO_BTF_ID_SOCK_COMMON]	= &btf_id_sock_common_types,
5723 #endif
5724 	[ARG_PTR_TO_SOCKET]		= &fullsock_types,
5725 	[ARG_PTR_TO_BTF_ID]		= &btf_ptr_types,
5726 	[ARG_PTR_TO_SPIN_LOCK]		= &spin_lock_types,
5727 	[ARG_PTR_TO_MEM]		= &mem_types,
5728 	[ARG_PTR_TO_ALLOC_MEM]		= &alloc_mem_types,
5729 	[ARG_PTR_TO_INT]		= &int_ptr_types,
5730 	[ARG_PTR_TO_LONG]		= &int_ptr_types,
5731 	[ARG_PTR_TO_PERCPU_BTF_ID]	= &percpu_btf_ptr_types,
5732 	[ARG_PTR_TO_FUNC]		= &func_ptr_types,
5733 	[ARG_PTR_TO_STACK]		= &stack_ptr_types,
5734 	[ARG_PTR_TO_CONST_STR]		= &const_str_ptr_types,
5735 	[ARG_PTR_TO_TIMER]		= &timer_types,
5736 	[ARG_PTR_TO_KPTR]		= &kptr_types,
5737 	[ARG_PTR_TO_DYNPTR]		= &dynptr_types,
5738 };
5739 
5740 static int check_reg_type(struct bpf_verifier_env *env, u32 regno,
5741 			  enum bpf_arg_type arg_type,
5742 			  const u32 *arg_btf_id,
5743 			  struct bpf_call_arg_meta *meta)
5744 {
5745 	struct bpf_reg_state *regs = cur_regs(env), *reg = &regs[regno];
5746 	enum bpf_reg_type expected, type = reg->type;
5747 	const struct bpf_reg_types *compatible;
5748 	int i, j;
5749 
5750 	compatible = compatible_reg_types[base_type(arg_type)];
5751 	if (!compatible) {
5752 		verbose(env, "verifier internal error: unsupported arg type %d\n", arg_type);
5753 		return -EFAULT;
5754 	}
5755 
5756 	/* ARG_PTR_TO_MEM + RDONLY is compatible with PTR_TO_MEM and PTR_TO_MEM + RDONLY,
5757 	 * but ARG_PTR_TO_MEM is compatible only with PTR_TO_MEM and NOT with PTR_TO_MEM + RDONLY
5758 	 *
5759 	 * Same for MAYBE_NULL:
5760 	 *
5761 	 * ARG_PTR_TO_MEM + MAYBE_NULL is compatible with PTR_TO_MEM and PTR_TO_MEM + MAYBE_NULL,
5762 	 * but ARG_PTR_TO_MEM is compatible only with PTR_TO_MEM but NOT with PTR_TO_MEM + MAYBE_NULL
5763 	 *
5764 	 * Therefore we fold these flags depending on the arg_type before comparison.
5765 	 */
5766 	if (arg_type & MEM_RDONLY)
5767 		type &= ~MEM_RDONLY;
5768 	if (arg_type & PTR_MAYBE_NULL)
5769 		type &= ~PTR_MAYBE_NULL;
5770 
5771 	for (i = 0; i < ARRAY_SIZE(compatible->types); i++) {
5772 		expected = compatible->types[i];
5773 		if (expected == NOT_INIT)
5774 			break;
5775 
5776 		if (type == expected)
5777 			goto found;
5778 	}
5779 
5780 	verbose(env, "R%d type=%s expected=", regno, reg_type_str(env, reg->type));
5781 	for (j = 0; j + 1 < i; j++)
5782 		verbose(env, "%s, ", reg_type_str(env, compatible->types[j]));
5783 	verbose(env, "%s\n", reg_type_str(env, compatible->types[j]));
5784 	return -EACCES;
5785 
5786 found:
5787 	if (reg->type == PTR_TO_BTF_ID) {
5788 		/* For bpf_sk_release, it needs to match against first member
5789 		 * 'struct sock_common', hence make an exception for it. This
5790 		 * allows bpf_sk_release to work for multiple socket types.
5791 		 */
5792 		bool strict_type_match = arg_type_is_release(arg_type) &&
5793 					 meta->func_id != BPF_FUNC_sk_release;
5794 
5795 		if (!arg_btf_id) {
5796 			if (!compatible->btf_id) {
5797 				verbose(env, "verifier internal error: missing arg compatible BTF ID\n");
5798 				return -EFAULT;
5799 			}
5800 			arg_btf_id = compatible->btf_id;
5801 		}
5802 
5803 		if (meta->func_id == BPF_FUNC_kptr_xchg) {
5804 			if (map_kptr_match_type(env, meta->kptr_off_desc, reg, regno))
5805 				return -EACCES;
5806 		} else {
5807 			if (arg_btf_id == BPF_PTR_POISON) {
5808 				verbose(env, "verifier internal error:");
5809 				verbose(env, "R%d has non-overwritten BPF_PTR_POISON type\n",
5810 					regno);
5811 				return -EACCES;
5812 			}
5813 
5814 			if (!btf_struct_ids_match(&env->log, reg->btf, reg->btf_id, reg->off,
5815 						  btf_vmlinux, *arg_btf_id,
5816 						  strict_type_match)) {
5817 				verbose(env, "R%d is of type %s but %s is expected\n",
5818 					regno, kernel_type_name(reg->btf, reg->btf_id),
5819 					kernel_type_name(btf_vmlinux, *arg_btf_id));
5820 				return -EACCES;
5821 			}
5822 		}
5823 	}
5824 
5825 	return 0;
5826 }
5827 
5828 int check_func_arg_reg_off(struct bpf_verifier_env *env,
5829 			   const struct bpf_reg_state *reg, int regno,
5830 			   enum bpf_arg_type arg_type)
5831 {
5832 	enum bpf_reg_type type = reg->type;
5833 	bool fixed_off_ok = false;
5834 
5835 	switch ((u32)type) {
5836 	/* Pointer types where reg offset is explicitly allowed: */
5837 	case PTR_TO_STACK:
5838 		if (arg_type_is_dynptr(arg_type) && reg->off % BPF_REG_SIZE) {
5839 			verbose(env, "cannot pass in dynptr at an offset\n");
5840 			return -EINVAL;
5841 		}
5842 		fallthrough;
5843 	case PTR_TO_PACKET:
5844 	case PTR_TO_PACKET_META:
5845 	case PTR_TO_MAP_KEY:
5846 	case PTR_TO_MAP_VALUE:
5847 	case PTR_TO_MEM:
5848 	case PTR_TO_MEM | MEM_RDONLY:
5849 	case PTR_TO_MEM | MEM_ALLOC:
5850 	case PTR_TO_BUF:
5851 	case PTR_TO_BUF | MEM_RDONLY:
5852 	case SCALAR_VALUE:
5853 		/* Some of the argument types nevertheless require a
5854 		 * zero register offset.
5855 		 */
5856 		if (base_type(arg_type) != ARG_PTR_TO_ALLOC_MEM)
5857 			return 0;
5858 		break;
5859 	/* All the rest must be rejected, except PTR_TO_BTF_ID which allows
5860 	 * fixed offset.
5861 	 */
5862 	case PTR_TO_BTF_ID:
5863 		/* When referenced PTR_TO_BTF_ID is passed to release function,
5864 		 * it's fixed offset must be 0.	In the other cases, fixed offset
5865 		 * can be non-zero.
5866 		 */
5867 		if (arg_type_is_release(arg_type) && reg->off) {
5868 			verbose(env, "R%d must have zero offset when passed to release func\n",
5869 				regno);
5870 			return -EINVAL;
5871 		}
5872 		/* For arg is release pointer, fixed_off_ok must be false, but
5873 		 * we already checked and rejected reg->off != 0 above, so set
5874 		 * to true to allow fixed offset for all other cases.
5875 		 */
5876 		fixed_off_ok = true;
5877 		break;
5878 	default:
5879 		break;
5880 	}
5881 	return __check_ptr_off_reg(env, reg, regno, fixed_off_ok);
5882 }
5883 
5884 static u32 stack_slot_get_id(struct bpf_verifier_env *env, struct bpf_reg_state *reg)
5885 {
5886 	struct bpf_func_state *state = func(env, reg);
5887 	int spi = get_spi(reg->off);
5888 
5889 	return state->stack[spi].spilled_ptr.id;
5890 }
5891 
5892 static int check_func_arg(struct bpf_verifier_env *env, u32 arg,
5893 			  struct bpf_call_arg_meta *meta,
5894 			  const struct bpf_func_proto *fn)
5895 {
5896 	u32 regno = BPF_REG_1 + arg;
5897 	struct bpf_reg_state *regs = cur_regs(env), *reg = &regs[regno];
5898 	enum bpf_arg_type arg_type = fn->arg_type[arg];
5899 	enum bpf_reg_type type = reg->type;
5900 	u32 *arg_btf_id = NULL;
5901 	int err = 0;
5902 
5903 	if (arg_type == ARG_DONTCARE)
5904 		return 0;
5905 
5906 	err = check_reg_arg(env, regno, SRC_OP);
5907 	if (err)
5908 		return err;
5909 
5910 	if (arg_type == ARG_ANYTHING) {
5911 		if (is_pointer_value(env, regno)) {
5912 			verbose(env, "R%d leaks addr into helper function\n",
5913 				regno);
5914 			return -EACCES;
5915 		}
5916 		return 0;
5917 	}
5918 
5919 	if (type_is_pkt_pointer(type) &&
5920 	    !may_access_direct_pkt_data(env, meta, BPF_READ)) {
5921 		verbose(env, "helper access to the packet is not allowed\n");
5922 		return -EACCES;
5923 	}
5924 
5925 	if (base_type(arg_type) == ARG_PTR_TO_MAP_VALUE) {
5926 		err = resolve_map_arg_type(env, meta, &arg_type);
5927 		if (err)
5928 			return err;
5929 	}
5930 
5931 	if (register_is_null(reg) && type_may_be_null(arg_type))
5932 		/* A NULL register has a SCALAR_VALUE type, so skip
5933 		 * type checking.
5934 		 */
5935 		goto skip_type_check;
5936 
5937 	/* arg_btf_id and arg_size are in a union. */
5938 	if (base_type(arg_type) == ARG_PTR_TO_BTF_ID)
5939 		arg_btf_id = fn->arg_btf_id[arg];
5940 
5941 	err = check_reg_type(env, regno, arg_type, arg_btf_id, meta);
5942 	if (err)
5943 		return err;
5944 
5945 	err = check_func_arg_reg_off(env, reg, regno, arg_type);
5946 	if (err)
5947 		return err;
5948 
5949 skip_type_check:
5950 	if (arg_type_is_release(arg_type)) {
5951 		if (arg_type_is_dynptr(arg_type)) {
5952 			struct bpf_func_state *state = func(env, reg);
5953 			int spi = get_spi(reg->off);
5954 
5955 			if (!is_spi_bounds_valid(state, spi, BPF_DYNPTR_NR_SLOTS) ||
5956 			    !state->stack[spi].spilled_ptr.id) {
5957 				verbose(env, "arg %d is an unacquired reference\n", regno);
5958 				return -EINVAL;
5959 			}
5960 		} else if (!reg->ref_obj_id && !register_is_null(reg)) {
5961 			verbose(env, "R%d must be referenced when passed to release function\n",
5962 				regno);
5963 			return -EINVAL;
5964 		}
5965 		if (meta->release_regno) {
5966 			verbose(env, "verifier internal error: more than one release argument\n");
5967 			return -EFAULT;
5968 		}
5969 		meta->release_regno = regno;
5970 	}
5971 
5972 	if (reg->ref_obj_id) {
5973 		if (meta->ref_obj_id) {
5974 			verbose(env, "verifier internal error: more than one arg with ref_obj_id R%d %u %u\n",
5975 				regno, reg->ref_obj_id,
5976 				meta->ref_obj_id);
5977 			return -EFAULT;
5978 		}
5979 		meta->ref_obj_id = reg->ref_obj_id;
5980 	}
5981 
5982 	switch (base_type(arg_type)) {
5983 	case ARG_CONST_MAP_PTR:
5984 		/* bpf_map_xxx(map_ptr) call: remember that map_ptr */
5985 		if (meta->map_ptr) {
5986 			/* Use map_uid (which is unique id of inner map) to reject:
5987 			 * inner_map1 = bpf_map_lookup_elem(outer_map, key1)
5988 			 * inner_map2 = bpf_map_lookup_elem(outer_map, key2)
5989 			 * if (inner_map1 && inner_map2) {
5990 			 *     timer = bpf_map_lookup_elem(inner_map1);
5991 			 *     if (timer)
5992 			 *         // mismatch would have been allowed
5993 			 *         bpf_timer_init(timer, inner_map2);
5994 			 * }
5995 			 *
5996 			 * Comparing map_ptr is enough to distinguish normal and outer maps.
5997 			 */
5998 			if (meta->map_ptr != reg->map_ptr ||
5999 			    meta->map_uid != reg->map_uid) {
6000 				verbose(env,
6001 					"timer pointer in R1 map_uid=%d doesn't match map pointer in R2 map_uid=%d\n",
6002 					meta->map_uid, reg->map_uid);
6003 				return -EINVAL;
6004 			}
6005 		}
6006 		meta->map_ptr = reg->map_ptr;
6007 		meta->map_uid = reg->map_uid;
6008 		break;
6009 	case ARG_PTR_TO_MAP_KEY:
6010 		/* bpf_map_xxx(..., map_ptr, ..., key) call:
6011 		 * check that [key, key + map->key_size) are within
6012 		 * stack limits and initialized
6013 		 */
6014 		if (!meta->map_ptr) {
6015 			/* in function declaration map_ptr must come before
6016 			 * map_key, so that it's verified and known before
6017 			 * we have to check map_key here. Otherwise it means
6018 			 * that kernel subsystem misconfigured verifier
6019 			 */
6020 			verbose(env, "invalid map_ptr to access map->key\n");
6021 			return -EACCES;
6022 		}
6023 		err = check_helper_mem_access(env, regno,
6024 					      meta->map_ptr->key_size, false,
6025 					      NULL);
6026 		break;
6027 	case ARG_PTR_TO_MAP_VALUE:
6028 		if (type_may_be_null(arg_type) && register_is_null(reg))
6029 			return 0;
6030 
6031 		/* bpf_map_xxx(..., map_ptr, ..., value) call:
6032 		 * check [value, value + map->value_size) validity
6033 		 */
6034 		if (!meta->map_ptr) {
6035 			/* kernel subsystem misconfigured verifier */
6036 			verbose(env, "invalid map_ptr to access map->value\n");
6037 			return -EACCES;
6038 		}
6039 		meta->raw_mode = arg_type & MEM_UNINIT;
6040 		err = check_helper_mem_access(env, regno,
6041 					      meta->map_ptr->value_size, false,
6042 					      meta);
6043 		break;
6044 	case ARG_PTR_TO_PERCPU_BTF_ID:
6045 		if (!reg->btf_id) {
6046 			verbose(env, "Helper has invalid btf_id in R%d\n", regno);
6047 			return -EACCES;
6048 		}
6049 		meta->ret_btf = reg->btf;
6050 		meta->ret_btf_id = reg->btf_id;
6051 		break;
6052 	case ARG_PTR_TO_SPIN_LOCK:
6053 		if (meta->func_id == BPF_FUNC_spin_lock) {
6054 			if (process_spin_lock(env, regno, true))
6055 				return -EACCES;
6056 		} else if (meta->func_id == BPF_FUNC_spin_unlock) {
6057 			if (process_spin_lock(env, regno, false))
6058 				return -EACCES;
6059 		} else {
6060 			verbose(env, "verifier internal error\n");
6061 			return -EFAULT;
6062 		}
6063 		break;
6064 	case ARG_PTR_TO_TIMER:
6065 		if (process_timer_func(env, regno, meta))
6066 			return -EACCES;
6067 		break;
6068 	case ARG_PTR_TO_FUNC:
6069 		meta->subprogno = reg->subprogno;
6070 		break;
6071 	case ARG_PTR_TO_MEM:
6072 		/* The access to this pointer is only checked when we hit the
6073 		 * next is_mem_size argument below.
6074 		 */
6075 		meta->raw_mode = arg_type & MEM_UNINIT;
6076 		if (arg_type & MEM_FIXED_SIZE) {
6077 			err = check_helper_mem_access(env, regno,
6078 						      fn->arg_size[arg], false,
6079 						      meta);
6080 		}
6081 		break;
6082 	case ARG_CONST_SIZE:
6083 		err = check_mem_size_reg(env, reg, regno, false, meta);
6084 		break;
6085 	case ARG_CONST_SIZE_OR_ZERO:
6086 		err = check_mem_size_reg(env, reg, regno, true, meta);
6087 		break;
6088 	case ARG_PTR_TO_DYNPTR:
6089 		/* We only need to check for initialized / uninitialized helper
6090 		 * dynptr args if the dynptr is not PTR_TO_DYNPTR, as the
6091 		 * assumption is that if it is, that a helper function
6092 		 * initialized the dynptr on behalf of the BPF program.
6093 		 */
6094 		if (base_type(reg->type) == PTR_TO_DYNPTR)
6095 			break;
6096 		if (arg_type & MEM_UNINIT) {
6097 			if (!is_dynptr_reg_valid_uninit(env, reg)) {
6098 				verbose(env, "Dynptr has to be an uninitialized dynptr\n");
6099 				return -EINVAL;
6100 			}
6101 
6102 			/* We only support one dynptr being uninitialized at the moment,
6103 			 * which is sufficient for the helper functions we have right now.
6104 			 */
6105 			if (meta->uninit_dynptr_regno) {
6106 				verbose(env, "verifier internal error: multiple uninitialized dynptr args\n");
6107 				return -EFAULT;
6108 			}
6109 
6110 			meta->uninit_dynptr_regno = regno;
6111 		} else if (!is_dynptr_reg_valid_init(env, reg)) {
6112 			verbose(env,
6113 				"Expected an initialized dynptr as arg #%d\n",
6114 				arg + 1);
6115 			return -EINVAL;
6116 		} else if (!is_dynptr_type_expected(env, reg, arg_type)) {
6117 			const char *err_extra = "";
6118 
6119 			switch (arg_type & DYNPTR_TYPE_FLAG_MASK) {
6120 			case DYNPTR_TYPE_LOCAL:
6121 				err_extra = "local";
6122 				break;
6123 			case DYNPTR_TYPE_RINGBUF:
6124 				err_extra = "ringbuf";
6125 				break;
6126 			default:
6127 				err_extra = "<unknown>";
6128 				break;
6129 			}
6130 			verbose(env,
6131 				"Expected a dynptr of type %s as arg #%d\n",
6132 				err_extra, arg + 1);
6133 			return -EINVAL;
6134 		}
6135 		break;
6136 	case ARG_CONST_ALLOC_SIZE_OR_ZERO:
6137 		if (!tnum_is_const(reg->var_off)) {
6138 			verbose(env, "R%d is not a known constant'\n",
6139 				regno);
6140 			return -EACCES;
6141 		}
6142 		meta->mem_size = reg->var_off.value;
6143 		err = mark_chain_precision(env, regno);
6144 		if (err)
6145 			return err;
6146 		break;
6147 	case ARG_PTR_TO_INT:
6148 	case ARG_PTR_TO_LONG:
6149 	{
6150 		int size = int_ptr_type_to_size(arg_type);
6151 
6152 		err = check_helper_mem_access(env, regno, size, false, meta);
6153 		if (err)
6154 			return err;
6155 		err = check_ptr_alignment(env, reg, 0, size, true);
6156 		break;
6157 	}
6158 	case ARG_PTR_TO_CONST_STR:
6159 	{
6160 		struct bpf_map *map = reg->map_ptr;
6161 		int map_off;
6162 		u64 map_addr;
6163 		char *str_ptr;
6164 
6165 		if (!bpf_map_is_rdonly(map)) {
6166 			verbose(env, "R%d does not point to a readonly map'\n", regno);
6167 			return -EACCES;
6168 		}
6169 
6170 		if (!tnum_is_const(reg->var_off)) {
6171 			verbose(env, "R%d is not a constant address'\n", regno);
6172 			return -EACCES;
6173 		}
6174 
6175 		if (!map->ops->map_direct_value_addr) {
6176 			verbose(env, "no direct value access support for this map type\n");
6177 			return -EACCES;
6178 		}
6179 
6180 		err = check_map_access(env, regno, reg->off,
6181 				       map->value_size - reg->off, false,
6182 				       ACCESS_HELPER);
6183 		if (err)
6184 			return err;
6185 
6186 		map_off = reg->off + reg->var_off.value;
6187 		err = map->ops->map_direct_value_addr(map, &map_addr, map_off);
6188 		if (err) {
6189 			verbose(env, "direct value access on string failed\n");
6190 			return err;
6191 		}
6192 
6193 		str_ptr = (char *)(long)(map_addr);
6194 		if (!strnchr(str_ptr + map_off, map->value_size - map_off, 0)) {
6195 			verbose(env, "string is not zero-terminated\n");
6196 			return -EINVAL;
6197 		}
6198 		break;
6199 	}
6200 	case ARG_PTR_TO_KPTR:
6201 		if (process_kptr_func(env, regno, meta))
6202 			return -EACCES;
6203 		break;
6204 	}
6205 
6206 	return err;
6207 }
6208 
6209 static bool may_update_sockmap(struct bpf_verifier_env *env, int func_id)
6210 {
6211 	enum bpf_attach_type eatype = env->prog->expected_attach_type;
6212 	enum bpf_prog_type type = resolve_prog_type(env->prog);
6213 
6214 	if (func_id != BPF_FUNC_map_update_elem)
6215 		return false;
6216 
6217 	/* It's not possible to get access to a locked struct sock in these
6218 	 * contexts, so updating is safe.
6219 	 */
6220 	switch (type) {
6221 	case BPF_PROG_TYPE_TRACING:
6222 		if (eatype == BPF_TRACE_ITER)
6223 			return true;
6224 		break;
6225 	case BPF_PROG_TYPE_SOCKET_FILTER:
6226 	case BPF_PROG_TYPE_SCHED_CLS:
6227 	case BPF_PROG_TYPE_SCHED_ACT:
6228 	case BPF_PROG_TYPE_XDP:
6229 	case BPF_PROG_TYPE_SK_REUSEPORT:
6230 	case BPF_PROG_TYPE_FLOW_DISSECTOR:
6231 	case BPF_PROG_TYPE_SK_LOOKUP:
6232 		return true;
6233 	default:
6234 		break;
6235 	}
6236 
6237 	verbose(env, "cannot update sockmap in this context\n");
6238 	return false;
6239 }
6240 
6241 static bool allow_tail_call_in_subprogs(struct bpf_verifier_env *env)
6242 {
6243 	return env->prog->jit_requested &&
6244 	       bpf_jit_supports_subprog_tailcalls();
6245 }
6246 
6247 static int check_map_func_compatibility(struct bpf_verifier_env *env,
6248 					struct bpf_map *map, int func_id)
6249 {
6250 	if (!map)
6251 		return 0;
6252 
6253 	/* We need a two way check, first is from map perspective ... */
6254 	switch (map->map_type) {
6255 	case BPF_MAP_TYPE_PROG_ARRAY:
6256 		if (func_id != BPF_FUNC_tail_call)
6257 			goto error;
6258 		break;
6259 	case BPF_MAP_TYPE_PERF_EVENT_ARRAY:
6260 		if (func_id != BPF_FUNC_perf_event_read &&
6261 		    func_id != BPF_FUNC_perf_event_output &&
6262 		    func_id != BPF_FUNC_skb_output &&
6263 		    func_id != BPF_FUNC_perf_event_read_value &&
6264 		    func_id != BPF_FUNC_xdp_output)
6265 			goto error;
6266 		break;
6267 	case BPF_MAP_TYPE_RINGBUF:
6268 		if (func_id != BPF_FUNC_ringbuf_output &&
6269 		    func_id != BPF_FUNC_ringbuf_reserve &&
6270 		    func_id != BPF_FUNC_ringbuf_query &&
6271 		    func_id != BPF_FUNC_ringbuf_reserve_dynptr &&
6272 		    func_id != BPF_FUNC_ringbuf_submit_dynptr &&
6273 		    func_id != BPF_FUNC_ringbuf_discard_dynptr)
6274 			goto error;
6275 		break;
6276 	case BPF_MAP_TYPE_USER_RINGBUF:
6277 		if (func_id != BPF_FUNC_user_ringbuf_drain)
6278 			goto error;
6279 		break;
6280 	case BPF_MAP_TYPE_STACK_TRACE:
6281 		if (func_id != BPF_FUNC_get_stackid)
6282 			goto error;
6283 		break;
6284 	case BPF_MAP_TYPE_CGROUP_ARRAY:
6285 		if (func_id != BPF_FUNC_skb_under_cgroup &&
6286 		    func_id != BPF_FUNC_current_task_under_cgroup)
6287 			goto error;
6288 		break;
6289 	case BPF_MAP_TYPE_CGROUP_STORAGE:
6290 	case BPF_MAP_TYPE_PERCPU_CGROUP_STORAGE:
6291 		if (func_id != BPF_FUNC_get_local_storage)
6292 			goto error;
6293 		break;
6294 	case BPF_MAP_TYPE_DEVMAP:
6295 	case BPF_MAP_TYPE_DEVMAP_HASH:
6296 		if (func_id != BPF_FUNC_redirect_map &&
6297 		    func_id != BPF_FUNC_map_lookup_elem)
6298 			goto error;
6299 		break;
6300 	/* Restrict bpf side of cpumap and xskmap, open when use-cases
6301 	 * appear.
6302 	 */
6303 	case BPF_MAP_TYPE_CPUMAP:
6304 		if (func_id != BPF_FUNC_redirect_map)
6305 			goto error;
6306 		break;
6307 	case BPF_MAP_TYPE_XSKMAP:
6308 		if (func_id != BPF_FUNC_redirect_map &&
6309 		    func_id != BPF_FUNC_map_lookup_elem)
6310 			goto error;
6311 		break;
6312 	case BPF_MAP_TYPE_ARRAY_OF_MAPS:
6313 	case BPF_MAP_TYPE_HASH_OF_MAPS:
6314 		if (func_id != BPF_FUNC_map_lookup_elem)
6315 			goto error;
6316 		break;
6317 	case BPF_MAP_TYPE_SOCKMAP:
6318 		if (func_id != BPF_FUNC_sk_redirect_map &&
6319 		    func_id != BPF_FUNC_sock_map_update &&
6320 		    func_id != BPF_FUNC_map_delete_elem &&
6321 		    func_id != BPF_FUNC_msg_redirect_map &&
6322 		    func_id != BPF_FUNC_sk_select_reuseport &&
6323 		    func_id != BPF_FUNC_map_lookup_elem &&
6324 		    !may_update_sockmap(env, func_id))
6325 			goto error;
6326 		break;
6327 	case BPF_MAP_TYPE_SOCKHASH:
6328 		if (func_id != BPF_FUNC_sk_redirect_hash &&
6329 		    func_id != BPF_FUNC_sock_hash_update &&
6330 		    func_id != BPF_FUNC_map_delete_elem &&
6331 		    func_id != BPF_FUNC_msg_redirect_hash &&
6332 		    func_id != BPF_FUNC_sk_select_reuseport &&
6333 		    func_id != BPF_FUNC_map_lookup_elem &&
6334 		    !may_update_sockmap(env, func_id))
6335 			goto error;
6336 		break;
6337 	case BPF_MAP_TYPE_REUSEPORT_SOCKARRAY:
6338 		if (func_id != BPF_FUNC_sk_select_reuseport)
6339 			goto error;
6340 		break;
6341 	case BPF_MAP_TYPE_QUEUE:
6342 	case BPF_MAP_TYPE_STACK:
6343 		if (func_id != BPF_FUNC_map_peek_elem &&
6344 		    func_id != BPF_FUNC_map_pop_elem &&
6345 		    func_id != BPF_FUNC_map_push_elem)
6346 			goto error;
6347 		break;
6348 	case BPF_MAP_TYPE_SK_STORAGE:
6349 		if (func_id != BPF_FUNC_sk_storage_get &&
6350 		    func_id != BPF_FUNC_sk_storage_delete)
6351 			goto error;
6352 		break;
6353 	case BPF_MAP_TYPE_INODE_STORAGE:
6354 		if (func_id != BPF_FUNC_inode_storage_get &&
6355 		    func_id != BPF_FUNC_inode_storage_delete)
6356 			goto error;
6357 		break;
6358 	case BPF_MAP_TYPE_TASK_STORAGE:
6359 		if (func_id != BPF_FUNC_task_storage_get &&
6360 		    func_id != BPF_FUNC_task_storage_delete)
6361 			goto error;
6362 		break;
6363 	case BPF_MAP_TYPE_BLOOM_FILTER:
6364 		if (func_id != BPF_FUNC_map_peek_elem &&
6365 		    func_id != BPF_FUNC_map_push_elem)
6366 			goto error;
6367 		break;
6368 	default:
6369 		break;
6370 	}
6371 
6372 	/* ... and second from the function itself. */
6373 	switch (func_id) {
6374 	case BPF_FUNC_tail_call:
6375 		if (map->map_type != BPF_MAP_TYPE_PROG_ARRAY)
6376 			goto error;
6377 		if (env->subprog_cnt > 1 && !allow_tail_call_in_subprogs(env)) {
6378 			verbose(env, "tail_calls are not allowed in non-JITed programs with bpf-to-bpf calls\n");
6379 			return -EINVAL;
6380 		}
6381 		break;
6382 	case BPF_FUNC_perf_event_read:
6383 	case BPF_FUNC_perf_event_output:
6384 	case BPF_FUNC_perf_event_read_value:
6385 	case BPF_FUNC_skb_output:
6386 	case BPF_FUNC_xdp_output:
6387 		if (map->map_type != BPF_MAP_TYPE_PERF_EVENT_ARRAY)
6388 			goto error;
6389 		break;
6390 	case BPF_FUNC_ringbuf_output:
6391 	case BPF_FUNC_ringbuf_reserve:
6392 	case BPF_FUNC_ringbuf_query:
6393 	case BPF_FUNC_ringbuf_reserve_dynptr:
6394 	case BPF_FUNC_ringbuf_submit_dynptr:
6395 	case BPF_FUNC_ringbuf_discard_dynptr:
6396 		if (map->map_type != BPF_MAP_TYPE_RINGBUF)
6397 			goto error;
6398 		break;
6399 	case BPF_FUNC_user_ringbuf_drain:
6400 		if (map->map_type != BPF_MAP_TYPE_USER_RINGBUF)
6401 			goto error;
6402 		break;
6403 	case BPF_FUNC_get_stackid:
6404 		if (map->map_type != BPF_MAP_TYPE_STACK_TRACE)
6405 			goto error;
6406 		break;
6407 	case BPF_FUNC_current_task_under_cgroup:
6408 	case BPF_FUNC_skb_under_cgroup:
6409 		if (map->map_type != BPF_MAP_TYPE_CGROUP_ARRAY)
6410 			goto error;
6411 		break;
6412 	case BPF_FUNC_redirect_map:
6413 		if (map->map_type != BPF_MAP_TYPE_DEVMAP &&
6414 		    map->map_type != BPF_MAP_TYPE_DEVMAP_HASH &&
6415 		    map->map_type != BPF_MAP_TYPE_CPUMAP &&
6416 		    map->map_type != BPF_MAP_TYPE_XSKMAP)
6417 			goto error;
6418 		break;
6419 	case BPF_FUNC_sk_redirect_map:
6420 	case BPF_FUNC_msg_redirect_map:
6421 	case BPF_FUNC_sock_map_update:
6422 		if (map->map_type != BPF_MAP_TYPE_SOCKMAP)
6423 			goto error;
6424 		break;
6425 	case BPF_FUNC_sk_redirect_hash:
6426 	case BPF_FUNC_msg_redirect_hash:
6427 	case BPF_FUNC_sock_hash_update:
6428 		if (map->map_type != BPF_MAP_TYPE_SOCKHASH)
6429 			goto error;
6430 		break;
6431 	case BPF_FUNC_get_local_storage:
6432 		if (map->map_type != BPF_MAP_TYPE_CGROUP_STORAGE &&
6433 		    map->map_type != BPF_MAP_TYPE_PERCPU_CGROUP_STORAGE)
6434 			goto error;
6435 		break;
6436 	case BPF_FUNC_sk_select_reuseport:
6437 		if (map->map_type != BPF_MAP_TYPE_REUSEPORT_SOCKARRAY &&
6438 		    map->map_type != BPF_MAP_TYPE_SOCKMAP &&
6439 		    map->map_type != BPF_MAP_TYPE_SOCKHASH)
6440 			goto error;
6441 		break;
6442 	case BPF_FUNC_map_pop_elem:
6443 		if (map->map_type != BPF_MAP_TYPE_QUEUE &&
6444 		    map->map_type != BPF_MAP_TYPE_STACK)
6445 			goto error;
6446 		break;
6447 	case BPF_FUNC_map_peek_elem:
6448 	case BPF_FUNC_map_push_elem:
6449 		if (map->map_type != BPF_MAP_TYPE_QUEUE &&
6450 		    map->map_type != BPF_MAP_TYPE_STACK &&
6451 		    map->map_type != BPF_MAP_TYPE_BLOOM_FILTER)
6452 			goto error;
6453 		break;
6454 	case BPF_FUNC_map_lookup_percpu_elem:
6455 		if (map->map_type != BPF_MAP_TYPE_PERCPU_ARRAY &&
6456 		    map->map_type != BPF_MAP_TYPE_PERCPU_HASH &&
6457 		    map->map_type != BPF_MAP_TYPE_LRU_PERCPU_HASH)
6458 			goto error;
6459 		break;
6460 	case BPF_FUNC_sk_storage_get:
6461 	case BPF_FUNC_sk_storage_delete:
6462 		if (map->map_type != BPF_MAP_TYPE_SK_STORAGE)
6463 			goto error;
6464 		break;
6465 	case BPF_FUNC_inode_storage_get:
6466 	case BPF_FUNC_inode_storage_delete:
6467 		if (map->map_type != BPF_MAP_TYPE_INODE_STORAGE)
6468 			goto error;
6469 		break;
6470 	case BPF_FUNC_task_storage_get:
6471 	case BPF_FUNC_task_storage_delete:
6472 		if (map->map_type != BPF_MAP_TYPE_TASK_STORAGE)
6473 			goto error;
6474 		break;
6475 	default:
6476 		break;
6477 	}
6478 
6479 	return 0;
6480 error:
6481 	verbose(env, "cannot pass map_type %d into func %s#%d\n",
6482 		map->map_type, func_id_name(func_id), func_id);
6483 	return -EINVAL;
6484 }
6485 
6486 static bool check_raw_mode_ok(const struct bpf_func_proto *fn)
6487 {
6488 	int count = 0;
6489 
6490 	if (fn->arg1_type == ARG_PTR_TO_UNINIT_MEM)
6491 		count++;
6492 	if (fn->arg2_type == ARG_PTR_TO_UNINIT_MEM)
6493 		count++;
6494 	if (fn->arg3_type == ARG_PTR_TO_UNINIT_MEM)
6495 		count++;
6496 	if (fn->arg4_type == ARG_PTR_TO_UNINIT_MEM)
6497 		count++;
6498 	if (fn->arg5_type == ARG_PTR_TO_UNINIT_MEM)
6499 		count++;
6500 
6501 	/* We only support one arg being in raw mode at the moment,
6502 	 * which is sufficient for the helper functions we have
6503 	 * right now.
6504 	 */
6505 	return count <= 1;
6506 }
6507 
6508 static bool check_args_pair_invalid(const struct bpf_func_proto *fn, int arg)
6509 {
6510 	bool is_fixed = fn->arg_type[arg] & MEM_FIXED_SIZE;
6511 	bool has_size = fn->arg_size[arg] != 0;
6512 	bool is_next_size = false;
6513 
6514 	if (arg + 1 < ARRAY_SIZE(fn->arg_type))
6515 		is_next_size = arg_type_is_mem_size(fn->arg_type[arg + 1]);
6516 
6517 	if (base_type(fn->arg_type[arg]) != ARG_PTR_TO_MEM)
6518 		return is_next_size;
6519 
6520 	return has_size == is_next_size || is_next_size == is_fixed;
6521 }
6522 
6523 static bool check_arg_pair_ok(const struct bpf_func_proto *fn)
6524 {
6525 	/* bpf_xxx(..., buf, len) call will access 'len'
6526 	 * bytes from memory 'buf'. Both arg types need
6527 	 * to be paired, so make sure there's no buggy
6528 	 * helper function specification.
6529 	 */
6530 	if (arg_type_is_mem_size(fn->arg1_type) ||
6531 	    check_args_pair_invalid(fn, 0) ||
6532 	    check_args_pair_invalid(fn, 1) ||
6533 	    check_args_pair_invalid(fn, 2) ||
6534 	    check_args_pair_invalid(fn, 3) ||
6535 	    check_args_pair_invalid(fn, 4))
6536 		return false;
6537 
6538 	return true;
6539 }
6540 
6541 static bool check_btf_id_ok(const struct bpf_func_proto *fn)
6542 {
6543 	int i;
6544 
6545 	for (i = 0; i < ARRAY_SIZE(fn->arg_type); i++) {
6546 		if (base_type(fn->arg_type[i]) == ARG_PTR_TO_BTF_ID && !fn->arg_btf_id[i])
6547 			return false;
6548 
6549 		if (base_type(fn->arg_type[i]) != ARG_PTR_TO_BTF_ID && fn->arg_btf_id[i] &&
6550 		    /* arg_btf_id and arg_size are in a union. */
6551 		    (base_type(fn->arg_type[i]) != ARG_PTR_TO_MEM ||
6552 		     !(fn->arg_type[i] & MEM_FIXED_SIZE)))
6553 			return false;
6554 	}
6555 
6556 	return true;
6557 }
6558 
6559 static int check_func_proto(const struct bpf_func_proto *fn, int func_id)
6560 {
6561 	return check_raw_mode_ok(fn) &&
6562 	       check_arg_pair_ok(fn) &&
6563 	       check_btf_id_ok(fn) ? 0 : -EINVAL;
6564 }
6565 
6566 /* Packet data might have moved, any old PTR_TO_PACKET[_META,_END]
6567  * are now invalid, so turn them into unknown SCALAR_VALUE.
6568  */
6569 static void clear_all_pkt_pointers(struct bpf_verifier_env *env)
6570 {
6571 	struct bpf_func_state *state;
6572 	struct bpf_reg_state *reg;
6573 
6574 	bpf_for_each_reg_in_vstate(env->cur_state, state, reg, ({
6575 		if (reg_is_pkt_pointer_any(reg))
6576 			__mark_reg_unknown(env, reg);
6577 	}));
6578 }
6579 
6580 enum {
6581 	AT_PKT_END = -1,
6582 	BEYOND_PKT_END = -2,
6583 };
6584 
6585 static void mark_pkt_end(struct bpf_verifier_state *vstate, int regn, bool range_open)
6586 {
6587 	struct bpf_func_state *state = vstate->frame[vstate->curframe];
6588 	struct bpf_reg_state *reg = &state->regs[regn];
6589 
6590 	if (reg->type != PTR_TO_PACKET)
6591 		/* PTR_TO_PACKET_META is not supported yet */
6592 		return;
6593 
6594 	/* The 'reg' is pkt > pkt_end or pkt >= pkt_end.
6595 	 * How far beyond pkt_end it goes is unknown.
6596 	 * if (!range_open) it's the case of pkt >= pkt_end
6597 	 * if (range_open) it's the case of pkt > pkt_end
6598 	 * hence this pointer is at least 1 byte bigger than pkt_end
6599 	 */
6600 	if (range_open)
6601 		reg->range = BEYOND_PKT_END;
6602 	else
6603 		reg->range = AT_PKT_END;
6604 }
6605 
6606 /* The pointer with the specified id has released its reference to kernel
6607  * resources. Identify all copies of the same pointer and clear the reference.
6608  */
6609 static int release_reference(struct bpf_verifier_env *env,
6610 			     int ref_obj_id)
6611 {
6612 	struct bpf_func_state *state;
6613 	struct bpf_reg_state *reg;
6614 	int err;
6615 
6616 	err = release_reference_state(cur_func(env), ref_obj_id);
6617 	if (err)
6618 		return err;
6619 
6620 	bpf_for_each_reg_in_vstate(env->cur_state, state, reg, ({
6621 		if (reg->ref_obj_id == ref_obj_id)
6622 			__mark_reg_unknown(env, reg);
6623 	}));
6624 
6625 	return 0;
6626 }
6627 
6628 static void clear_caller_saved_regs(struct bpf_verifier_env *env,
6629 				    struct bpf_reg_state *regs)
6630 {
6631 	int i;
6632 
6633 	/* after the call registers r0 - r5 were scratched */
6634 	for (i = 0; i < CALLER_SAVED_REGS; i++) {
6635 		mark_reg_not_init(env, regs, caller_saved[i]);
6636 		check_reg_arg(env, caller_saved[i], DST_OP_NO_MARK);
6637 	}
6638 }
6639 
6640 typedef int (*set_callee_state_fn)(struct bpf_verifier_env *env,
6641 				   struct bpf_func_state *caller,
6642 				   struct bpf_func_state *callee,
6643 				   int insn_idx);
6644 
6645 static int __check_func_call(struct bpf_verifier_env *env, struct bpf_insn *insn,
6646 			     int *insn_idx, int subprog,
6647 			     set_callee_state_fn set_callee_state_cb)
6648 {
6649 	struct bpf_verifier_state *state = env->cur_state;
6650 	struct bpf_func_info_aux *func_info_aux;
6651 	struct bpf_func_state *caller, *callee;
6652 	int err;
6653 	bool is_global = false;
6654 
6655 	if (state->curframe + 1 >= MAX_CALL_FRAMES) {
6656 		verbose(env, "the call stack of %d frames is too deep\n",
6657 			state->curframe + 2);
6658 		return -E2BIG;
6659 	}
6660 
6661 	caller = state->frame[state->curframe];
6662 	if (state->frame[state->curframe + 1]) {
6663 		verbose(env, "verifier bug. Frame %d already allocated\n",
6664 			state->curframe + 1);
6665 		return -EFAULT;
6666 	}
6667 
6668 	func_info_aux = env->prog->aux->func_info_aux;
6669 	if (func_info_aux)
6670 		is_global = func_info_aux[subprog].linkage == BTF_FUNC_GLOBAL;
6671 	err = btf_check_subprog_call(env, subprog, caller->regs);
6672 	if (err == -EFAULT)
6673 		return err;
6674 	if (is_global) {
6675 		if (err) {
6676 			verbose(env, "Caller passes invalid args into func#%d\n",
6677 				subprog);
6678 			return err;
6679 		} else {
6680 			if (env->log.level & BPF_LOG_LEVEL)
6681 				verbose(env,
6682 					"Func#%d is global and valid. Skipping.\n",
6683 					subprog);
6684 			clear_caller_saved_regs(env, caller->regs);
6685 
6686 			/* All global functions return a 64-bit SCALAR_VALUE */
6687 			mark_reg_unknown(env, caller->regs, BPF_REG_0);
6688 			caller->regs[BPF_REG_0].subreg_def = DEF_NOT_SUBREG;
6689 
6690 			/* continue with next insn after call */
6691 			return 0;
6692 		}
6693 	}
6694 
6695 	if (insn->code == (BPF_JMP | BPF_CALL) &&
6696 	    insn->src_reg == 0 &&
6697 	    insn->imm == BPF_FUNC_timer_set_callback) {
6698 		struct bpf_verifier_state *async_cb;
6699 
6700 		/* there is no real recursion here. timer callbacks are async */
6701 		env->subprog_info[subprog].is_async_cb = true;
6702 		async_cb = push_async_cb(env, env->subprog_info[subprog].start,
6703 					 *insn_idx, subprog);
6704 		if (!async_cb)
6705 			return -EFAULT;
6706 		callee = async_cb->frame[0];
6707 		callee->async_entry_cnt = caller->async_entry_cnt + 1;
6708 
6709 		/* Convert bpf_timer_set_callback() args into timer callback args */
6710 		err = set_callee_state_cb(env, caller, callee, *insn_idx);
6711 		if (err)
6712 			return err;
6713 
6714 		clear_caller_saved_regs(env, caller->regs);
6715 		mark_reg_unknown(env, caller->regs, BPF_REG_0);
6716 		caller->regs[BPF_REG_0].subreg_def = DEF_NOT_SUBREG;
6717 		/* continue with next insn after call */
6718 		return 0;
6719 	}
6720 
6721 	callee = kzalloc(sizeof(*callee), GFP_KERNEL);
6722 	if (!callee)
6723 		return -ENOMEM;
6724 	state->frame[state->curframe + 1] = callee;
6725 
6726 	/* callee cannot access r0, r6 - r9 for reading and has to write
6727 	 * into its own stack before reading from it.
6728 	 * callee can read/write into caller's stack
6729 	 */
6730 	init_func_state(env, callee,
6731 			/* remember the callsite, it will be used by bpf_exit */
6732 			*insn_idx /* callsite */,
6733 			state->curframe + 1 /* frameno within this callchain */,
6734 			subprog /* subprog number within this prog */);
6735 
6736 	/* Transfer references to the callee */
6737 	err = copy_reference_state(callee, caller);
6738 	if (err)
6739 		return err;
6740 
6741 	err = set_callee_state_cb(env, caller, callee, *insn_idx);
6742 	if (err)
6743 		return err;
6744 
6745 	clear_caller_saved_regs(env, caller->regs);
6746 
6747 	/* only increment it after check_reg_arg() finished */
6748 	state->curframe++;
6749 
6750 	/* and go analyze first insn of the callee */
6751 	*insn_idx = env->subprog_info[subprog].start - 1;
6752 
6753 	if (env->log.level & BPF_LOG_LEVEL) {
6754 		verbose(env, "caller:\n");
6755 		print_verifier_state(env, caller, true);
6756 		verbose(env, "callee:\n");
6757 		print_verifier_state(env, callee, true);
6758 	}
6759 	return 0;
6760 }
6761 
6762 int map_set_for_each_callback_args(struct bpf_verifier_env *env,
6763 				   struct bpf_func_state *caller,
6764 				   struct bpf_func_state *callee)
6765 {
6766 	/* bpf_for_each_map_elem(struct bpf_map *map, void *callback_fn,
6767 	 *      void *callback_ctx, u64 flags);
6768 	 * callback_fn(struct bpf_map *map, void *key, void *value,
6769 	 *      void *callback_ctx);
6770 	 */
6771 	callee->regs[BPF_REG_1] = caller->regs[BPF_REG_1];
6772 
6773 	callee->regs[BPF_REG_2].type = PTR_TO_MAP_KEY;
6774 	__mark_reg_known_zero(&callee->regs[BPF_REG_2]);
6775 	callee->regs[BPF_REG_2].map_ptr = caller->regs[BPF_REG_1].map_ptr;
6776 
6777 	callee->regs[BPF_REG_3].type = PTR_TO_MAP_VALUE;
6778 	__mark_reg_known_zero(&callee->regs[BPF_REG_3]);
6779 	callee->regs[BPF_REG_3].map_ptr = caller->regs[BPF_REG_1].map_ptr;
6780 
6781 	/* pointer to stack or null */
6782 	callee->regs[BPF_REG_4] = caller->regs[BPF_REG_3];
6783 
6784 	/* unused */
6785 	__mark_reg_not_init(env, &callee->regs[BPF_REG_5]);
6786 	return 0;
6787 }
6788 
6789 static int set_callee_state(struct bpf_verifier_env *env,
6790 			    struct bpf_func_state *caller,
6791 			    struct bpf_func_state *callee, int insn_idx)
6792 {
6793 	int i;
6794 
6795 	/* copy r1 - r5 args that callee can access.  The copy includes parent
6796 	 * pointers, which connects us up to the liveness chain
6797 	 */
6798 	for (i = BPF_REG_1; i <= BPF_REG_5; i++)
6799 		callee->regs[i] = caller->regs[i];
6800 	return 0;
6801 }
6802 
6803 static int check_func_call(struct bpf_verifier_env *env, struct bpf_insn *insn,
6804 			   int *insn_idx)
6805 {
6806 	int subprog, target_insn;
6807 
6808 	target_insn = *insn_idx + insn->imm + 1;
6809 	subprog = find_subprog(env, target_insn);
6810 	if (subprog < 0) {
6811 		verbose(env, "verifier bug. No program starts at insn %d\n",
6812 			target_insn);
6813 		return -EFAULT;
6814 	}
6815 
6816 	return __check_func_call(env, insn, insn_idx, subprog, set_callee_state);
6817 }
6818 
6819 static int set_map_elem_callback_state(struct bpf_verifier_env *env,
6820 				       struct bpf_func_state *caller,
6821 				       struct bpf_func_state *callee,
6822 				       int insn_idx)
6823 {
6824 	struct bpf_insn_aux_data *insn_aux = &env->insn_aux_data[insn_idx];
6825 	struct bpf_map *map;
6826 	int err;
6827 
6828 	if (bpf_map_ptr_poisoned(insn_aux)) {
6829 		verbose(env, "tail_call abusing map_ptr\n");
6830 		return -EINVAL;
6831 	}
6832 
6833 	map = BPF_MAP_PTR(insn_aux->map_ptr_state);
6834 	if (!map->ops->map_set_for_each_callback_args ||
6835 	    !map->ops->map_for_each_callback) {
6836 		verbose(env, "callback function not allowed for map\n");
6837 		return -ENOTSUPP;
6838 	}
6839 
6840 	err = map->ops->map_set_for_each_callback_args(env, caller, callee);
6841 	if (err)
6842 		return err;
6843 
6844 	callee->in_callback_fn = true;
6845 	callee->callback_ret_range = tnum_range(0, 1);
6846 	return 0;
6847 }
6848 
6849 static int set_loop_callback_state(struct bpf_verifier_env *env,
6850 				   struct bpf_func_state *caller,
6851 				   struct bpf_func_state *callee,
6852 				   int insn_idx)
6853 {
6854 	/* bpf_loop(u32 nr_loops, void *callback_fn, void *callback_ctx,
6855 	 *	    u64 flags);
6856 	 * callback_fn(u32 index, void *callback_ctx);
6857 	 */
6858 	callee->regs[BPF_REG_1].type = SCALAR_VALUE;
6859 	callee->regs[BPF_REG_2] = caller->regs[BPF_REG_3];
6860 
6861 	/* unused */
6862 	__mark_reg_not_init(env, &callee->regs[BPF_REG_3]);
6863 	__mark_reg_not_init(env, &callee->regs[BPF_REG_4]);
6864 	__mark_reg_not_init(env, &callee->regs[BPF_REG_5]);
6865 
6866 	callee->in_callback_fn = true;
6867 	callee->callback_ret_range = tnum_range(0, 1);
6868 	return 0;
6869 }
6870 
6871 static int set_timer_callback_state(struct bpf_verifier_env *env,
6872 				    struct bpf_func_state *caller,
6873 				    struct bpf_func_state *callee,
6874 				    int insn_idx)
6875 {
6876 	struct bpf_map *map_ptr = caller->regs[BPF_REG_1].map_ptr;
6877 
6878 	/* bpf_timer_set_callback(struct bpf_timer *timer, void *callback_fn);
6879 	 * callback_fn(struct bpf_map *map, void *key, void *value);
6880 	 */
6881 	callee->regs[BPF_REG_1].type = CONST_PTR_TO_MAP;
6882 	__mark_reg_known_zero(&callee->regs[BPF_REG_1]);
6883 	callee->regs[BPF_REG_1].map_ptr = map_ptr;
6884 
6885 	callee->regs[BPF_REG_2].type = PTR_TO_MAP_KEY;
6886 	__mark_reg_known_zero(&callee->regs[BPF_REG_2]);
6887 	callee->regs[BPF_REG_2].map_ptr = map_ptr;
6888 
6889 	callee->regs[BPF_REG_3].type = PTR_TO_MAP_VALUE;
6890 	__mark_reg_known_zero(&callee->regs[BPF_REG_3]);
6891 	callee->regs[BPF_REG_3].map_ptr = map_ptr;
6892 
6893 	/* unused */
6894 	__mark_reg_not_init(env, &callee->regs[BPF_REG_4]);
6895 	__mark_reg_not_init(env, &callee->regs[BPF_REG_5]);
6896 	callee->in_async_callback_fn = true;
6897 	callee->callback_ret_range = tnum_range(0, 1);
6898 	return 0;
6899 }
6900 
6901 static int set_find_vma_callback_state(struct bpf_verifier_env *env,
6902 				       struct bpf_func_state *caller,
6903 				       struct bpf_func_state *callee,
6904 				       int insn_idx)
6905 {
6906 	/* bpf_find_vma(struct task_struct *task, u64 addr,
6907 	 *               void *callback_fn, void *callback_ctx, u64 flags)
6908 	 * (callback_fn)(struct task_struct *task,
6909 	 *               struct vm_area_struct *vma, void *callback_ctx);
6910 	 */
6911 	callee->regs[BPF_REG_1] = caller->regs[BPF_REG_1];
6912 
6913 	callee->regs[BPF_REG_2].type = PTR_TO_BTF_ID;
6914 	__mark_reg_known_zero(&callee->regs[BPF_REG_2]);
6915 	callee->regs[BPF_REG_2].btf =  btf_vmlinux;
6916 	callee->regs[BPF_REG_2].btf_id = btf_tracing_ids[BTF_TRACING_TYPE_VMA],
6917 
6918 	/* pointer to stack or null */
6919 	callee->regs[BPF_REG_3] = caller->regs[BPF_REG_4];
6920 
6921 	/* unused */
6922 	__mark_reg_not_init(env, &callee->regs[BPF_REG_4]);
6923 	__mark_reg_not_init(env, &callee->regs[BPF_REG_5]);
6924 	callee->in_callback_fn = true;
6925 	callee->callback_ret_range = tnum_range(0, 1);
6926 	return 0;
6927 }
6928 
6929 static int set_user_ringbuf_callback_state(struct bpf_verifier_env *env,
6930 					   struct bpf_func_state *caller,
6931 					   struct bpf_func_state *callee,
6932 					   int insn_idx)
6933 {
6934 	/* bpf_user_ringbuf_drain(struct bpf_map *map, void *callback_fn, void
6935 	 *			  callback_ctx, u64 flags);
6936 	 * callback_fn(struct bpf_dynptr_t* dynptr, void *callback_ctx);
6937 	 */
6938 	__mark_reg_not_init(env, &callee->regs[BPF_REG_0]);
6939 	callee->regs[BPF_REG_1].type = PTR_TO_DYNPTR | DYNPTR_TYPE_LOCAL;
6940 	__mark_reg_known_zero(&callee->regs[BPF_REG_1]);
6941 	callee->regs[BPF_REG_2] = caller->regs[BPF_REG_3];
6942 
6943 	/* unused */
6944 	__mark_reg_not_init(env, &callee->regs[BPF_REG_3]);
6945 	__mark_reg_not_init(env, &callee->regs[BPF_REG_4]);
6946 	__mark_reg_not_init(env, &callee->regs[BPF_REG_5]);
6947 
6948 	callee->in_callback_fn = true;
6949 	callee->callback_ret_range = tnum_range(0, 1);
6950 	return 0;
6951 }
6952 
6953 static int prepare_func_exit(struct bpf_verifier_env *env, int *insn_idx)
6954 {
6955 	struct bpf_verifier_state *state = env->cur_state;
6956 	struct bpf_func_state *caller, *callee;
6957 	struct bpf_reg_state *r0;
6958 	int err;
6959 
6960 	callee = state->frame[state->curframe];
6961 	r0 = &callee->regs[BPF_REG_0];
6962 	if (r0->type == PTR_TO_STACK) {
6963 		/* technically it's ok to return caller's stack pointer
6964 		 * (or caller's caller's pointer) back to the caller,
6965 		 * since these pointers are valid. Only current stack
6966 		 * pointer will be invalid as soon as function exits,
6967 		 * but let's be conservative
6968 		 */
6969 		verbose(env, "cannot return stack pointer to the caller\n");
6970 		return -EINVAL;
6971 	}
6972 
6973 	state->curframe--;
6974 	caller = state->frame[state->curframe];
6975 	if (callee->in_callback_fn) {
6976 		/* enforce R0 return value range [0, 1]. */
6977 		struct tnum range = callee->callback_ret_range;
6978 
6979 		if (r0->type != SCALAR_VALUE) {
6980 			verbose(env, "R0 not a scalar value\n");
6981 			return -EACCES;
6982 		}
6983 		if (!tnum_in(range, r0->var_off)) {
6984 			verbose_invalid_scalar(env, r0, &range, "callback return", "R0");
6985 			return -EINVAL;
6986 		}
6987 	} else {
6988 		/* return to the caller whatever r0 had in the callee */
6989 		caller->regs[BPF_REG_0] = *r0;
6990 	}
6991 
6992 	/* callback_fn frame should have released its own additions to parent's
6993 	 * reference state at this point, or check_reference_leak would
6994 	 * complain, hence it must be the same as the caller. There is no need
6995 	 * to copy it back.
6996 	 */
6997 	if (!callee->in_callback_fn) {
6998 		/* Transfer references to the caller */
6999 		err = copy_reference_state(caller, callee);
7000 		if (err)
7001 			return err;
7002 	}
7003 
7004 	*insn_idx = callee->callsite + 1;
7005 	if (env->log.level & BPF_LOG_LEVEL) {
7006 		verbose(env, "returning from callee:\n");
7007 		print_verifier_state(env, callee, true);
7008 		verbose(env, "to caller at %d:\n", *insn_idx);
7009 		print_verifier_state(env, caller, true);
7010 	}
7011 	/* clear everything in the callee */
7012 	free_func_state(callee);
7013 	state->frame[state->curframe + 1] = NULL;
7014 	return 0;
7015 }
7016 
7017 static void do_refine_retval_range(struct bpf_reg_state *regs, int ret_type,
7018 				   int func_id,
7019 				   struct bpf_call_arg_meta *meta)
7020 {
7021 	struct bpf_reg_state *ret_reg = &regs[BPF_REG_0];
7022 
7023 	if (ret_type != RET_INTEGER ||
7024 	    (func_id != BPF_FUNC_get_stack &&
7025 	     func_id != BPF_FUNC_get_task_stack &&
7026 	     func_id != BPF_FUNC_probe_read_str &&
7027 	     func_id != BPF_FUNC_probe_read_kernel_str &&
7028 	     func_id != BPF_FUNC_probe_read_user_str))
7029 		return;
7030 
7031 	ret_reg->smax_value = meta->msize_max_value;
7032 	ret_reg->s32_max_value = meta->msize_max_value;
7033 	ret_reg->smin_value = -MAX_ERRNO;
7034 	ret_reg->s32_min_value = -MAX_ERRNO;
7035 	reg_bounds_sync(ret_reg);
7036 }
7037 
7038 static int
7039 record_func_map(struct bpf_verifier_env *env, struct bpf_call_arg_meta *meta,
7040 		int func_id, int insn_idx)
7041 {
7042 	struct bpf_insn_aux_data *aux = &env->insn_aux_data[insn_idx];
7043 	struct bpf_map *map = meta->map_ptr;
7044 
7045 	if (func_id != BPF_FUNC_tail_call &&
7046 	    func_id != BPF_FUNC_map_lookup_elem &&
7047 	    func_id != BPF_FUNC_map_update_elem &&
7048 	    func_id != BPF_FUNC_map_delete_elem &&
7049 	    func_id != BPF_FUNC_map_push_elem &&
7050 	    func_id != BPF_FUNC_map_pop_elem &&
7051 	    func_id != BPF_FUNC_map_peek_elem &&
7052 	    func_id != BPF_FUNC_for_each_map_elem &&
7053 	    func_id != BPF_FUNC_redirect_map &&
7054 	    func_id != BPF_FUNC_map_lookup_percpu_elem)
7055 		return 0;
7056 
7057 	if (map == NULL) {
7058 		verbose(env, "kernel subsystem misconfigured verifier\n");
7059 		return -EINVAL;
7060 	}
7061 
7062 	/* In case of read-only, some additional restrictions
7063 	 * need to be applied in order to prevent altering the
7064 	 * state of the map from program side.
7065 	 */
7066 	if ((map->map_flags & BPF_F_RDONLY_PROG) &&
7067 	    (func_id == BPF_FUNC_map_delete_elem ||
7068 	     func_id == BPF_FUNC_map_update_elem ||
7069 	     func_id == BPF_FUNC_map_push_elem ||
7070 	     func_id == BPF_FUNC_map_pop_elem)) {
7071 		verbose(env, "write into map forbidden\n");
7072 		return -EACCES;
7073 	}
7074 
7075 	if (!BPF_MAP_PTR(aux->map_ptr_state))
7076 		bpf_map_ptr_store(aux, meta->map_ptr,
7077 				  !meta->map_ptr->bypass_spec_v1);
7078 	else if (BPF_MAP_PTR(aux->map_ptr_state) != meta->map_ptr)
7079 		bpf_map_ptr_store(aux, BPF_MAP_PTR_POISON,
7080 				  !meta->map_ptr->bypass_spec_v1);
7081 	return 0;
7082 }
7083 
7084 static int
7085 record_func_key(struct bpf_verifier_env *env, struct bpf_call_arg_meta *meta,
7086 		int func_id, int insn_idx)
7087 {
7088 	struct bpf_insn_aux_data *aux = &env->insn_aux_data[insn_idx];
7089 	struct bpf_reg_state *regs = cur_regs(env), *reg;
7090 	struct bpf_map *map = meta->map_ptr;
7091 	u64 val, max;
7092 	int err;
7093 
7094 	if (func_id != BPF_FUNC_tail_call)
7095 		return 0;
7096 	if (!map || map->map_type != BPF_MAP_TYPE_PROG_ARRAY) {
7097 		verbose(env, "kernel subsystem misconfigured verifier\n");
7098 		return -EINVAL;
7099 	}
7100 
7101 	reg = &regs[BPF_REG_3];
7102 	val = reg->var_off.value;
7103 	max = map->max_entries;
7104 
7105 	if (!(register_is_const(reg) && val < max)) {
7106 		bpf_map_key_store(aux, BPF_MAP_KEY_POISON);
7107 		return 0;
7108 	}
7109 
7110 	err = mark_chain_precision(env, BPF_REG_3);
7111 	if (err)
7112 		return err;
7113 	if (bpf_map_key_unseen(aux))
7114 		bpf_map_key_store(aux, val);
7115 	else if (!bpf_map_key_poisoned(aux) &&
7116 		  bpf_map_key_immediate(aux) != val)
7117 		bpf_map_key_store(aux, BPF_MAP_KEY_POISON);
7118 	return 0;
7119 }
7120 
7121 static int check_reference_leak(struct bpf_verifier_env *env)
7122 {
7123 	struct bpf_func_state *state = cur_func(env);
7124 	bool refs_lingering = false;
7125 	int i;
7126 
7127 	if (state->frameno && !state->in_callback_fn)
7128 		return 0;
7129 
7130 	for (i = 0; i < state->acquired_refs; i++) {
7131 		if (state->in_callback_fn && state->refs[i].callback_ref != state->frameno)
7132 			continue;
7133 		verbose(env, "Unreleased reference id=%d alloc_insn=%d\n",
7134 			state->refs[i].id, state->refs[i].insn_idx);
7135 		refs_lingering = true;
7136 	}
7137 	return refs_lingering ? -EINVAL : 0;
7138 }
7139 
7140 static int check_bpf_snprintf_call(struct bpf_verifier_env *env,
7141 				   struct bpf_reg_state *regs)
7142 {
7143 	struct bpf_reg_state *fmt_reg = &regs[BPF_REG_3];
7144 	struct bpf_reg_state *data_len_reg = &regs[BPF_REG_5];
7145 	struct bpf_map *fmt_map = fmt_reg->map_ptr;
7146 	int err, fmt_map_off, num_args;
7147 	u64 fmt_addr;
7148 	char *fmt;
7149 
7150 	/* data must be an array of u64 */
7151 	if (data_len_reg->var_off.value % 8)
7152 		return -EINVAL;
7153 	num_args = data_len_reg->var_off.value / 8;
7154 
7155 	/* fmt being ARG_PTR_TO_CONST_STR guarantees that var_off is const
7156 	 * and map_direct_value_addr is set.
7157 	 */
7158 	fmt_map_off = fmt_reg->off + fmt_reg->var_off.value;
7159 	err = fmt_map->ops->map_direct_value_addr(fmt_map, &fmt_addr,
7160 						  fmt_map_off);
7161 	if (err) {
7162 		verbose(env, "verifier bug\n");
7163 		return -EFAULT;
7164 	}
7165 	fmt = (char *)(long)fmt_addr + fmt_map_off;
7166 
7167 	/* We are also guaranteed that fmt+fmt_map_off is NULL terminated, we
7168 	 * can focus on validating the format specifiers.
7169 	 */
7170 	err = bpf_bprintf_prepare(fmt, UINT_MAX, NULL, NULL, num_args);
7171 	if (err < 0)
7172 		verbose(env, "Invalid format string\n");
7173 
7174 	return err;
7175 }
7176 
7177 static int check_get_func_ip(struct bpf_verifier_env *env)
7178 {
7179 	enum bpf_prog_type type = resolve_prog_type(env->prog);
7180 	int func_id = BPF_FUNC_get_func_ip;
7181 
7182 	if (type == BPF_PROG_TYPE_TRACING) {
7183 		if (!bpf_prog_has_trampoline(env->prog)) {
7184 			verbose(env, "func %s#%d supported only for fentry/fexit/fmod_ret programs\n",
7185 				func_id_name(func_id), func_id);
7186 			return -ENOTSUPP;
7187 		}
7188 		return 0;
7189 	} else if (type == BPF_PROG_TYPE_KPROBE) {
7190 		return 0;
7191 	}
7192 
7193 	verbose(env, "func %s#%d not supported for program type %d\n",
7194 		func_id_name(func_id), func_id, type);
7195 	return -ENOTSUPP;
7196 }
7197 
7198 static struct bpf_insn_aux_data *cur_aux(struct bpf_verifier_env *env)
7199 {
7200 	return &env->insn_aux_data[env->insn_idx];
7201 }
7202 
7203 static bool loop_flag_is_zero(struct bpf_verifier_env *env)
7204 {
7205 	struct bpf_reg_state *regs = cur_regs(env);
7206 	struct bpf_reg_state *reg = &regs[BPF_REG_4];
7207 	bool reg_is_null = register_is_null(reg);
7208 
7209 	if (reg_is_null)
7210 		mark_chain_precision(env, BPF_REG_4);
7211 
7212 	return reg_is_null;
7213 }
7214 
7215 static void update_loop_inline_state(struct bpf_verifier_env *env, u32 subprogno)
7216 {
7217 	struct bpf_loop_inline_state *state = &cur_aux(env)->loop_inline_state;
7218 
7219 	if (!state->initialized) {
7220 		state->initialized = 1;
7221 		state->fit_for_inline = loop_flag_is_zero(env);
7222 		state->callback_subprogno = subprogno;
7223 		return;
7224 	}
7225 
7226 	if (!state->fit_for_inline)
7227 		return;
7228 
7229 	state->fit_for_inline = (loop_flag_is_zero(env) &&
7230 				 state->callback_subprogno == subprogno);
7231 }
7232 
7233 static int check_helper_call(struct bpf_verifier_env *env, struct bpf_insn *insn,
7234 			     int *insn_idx_p)
7235 {
7236 	enum bpf_prog_type prog_type = resolve_prog_type(env->prog);
7237 	const struct bpf_func_proto *fn = NULL;
7238 	enum bpf_return_type ret_type;
7239 	enum bpf_type_flag ret_flag;
7240 	struct bpf_reg_state *regs;
7241 	struct bpf_call_arg_meta meta;
7242 	int insn_idx = *insn_idx_p;
7243 	bool changes_data;
7244 	int i, err, func_id;
7245 
7246 	/* find function prototype */
7247 	func_id = insn->imm;
7248 	if (func_id < 0 || func_id >= __BPF_FUNC_MAX_ID) {
7249 		verbose(env, "invalid func %s#%d\n", func_id_name(func_id),
7250 			func_id);
7251 		return -EINVAL;
7252 	}
7253 
7254 	if (env->ops->get_func_proto)
7255 		fn = env->ops->get_func_proto(func_id, env->prog);
7256 	if (!fn) {
7257 		verbose(env, "unknown func %s#%d\n", func_id_name(func_id),
7258 			func_id);
7259 		return -EINVAL;
7260 	}
7261 
7262 	/* eBPF programs must be GPL compatible to use GPL-ed functions */
7263 	if (!env->prog->gpl_compatible && fn->gpl_only) {
7264 		verbose(env, "cannot call GPL-restricted function from non-GPL compatible program\n");
7265 		return -EINVAL;
7266 	}
7267 
7268 	if (fn->allowed && !fn->allowed(env->prog)) {
7269 		verbose(env, "helper call is not allowed in probe\n");
7270 		return -EINVAL;
7271 	}
7272 
7273 	/* With LD_ABS/IND some JITs save/restore skb from r1. */
7274 	changes_data = bpf_helper_changes_pkt_data(fn->func);
7275 	if (changes_data && fn->arg1_type != ARG_PTR_TO_CTX) {
7276 		verbose(env, "kernel subsystem misconfigured func %s#%d: r1 != ctx\n",
7277 			func_id_name(func_id), func_id);
7278 		return -EINVAL;
7279 	}
7280 
7281 	memset(&meta, 0, sizeof(meta));
7282 	meta.pkt_access = fn->pkt_access;
7283 
7284 	err = check_func_proto(fn, func_id);
7285 	if (err) {
7286 		verbose(env, "kernel subsystem misconfigured func %s#%d\n",
7287 			func_id_name(func_id), func_id);
7288 		return err;
7289 	}
7290 
7291 	meta.func_id = func_id;
7292 	/* check args */
7293 	for (i = 0; i < MAX_BPF_FUNC_REG_ARGS; i++) {
7294 		err = check_func_arg(env, i, &meta, fn);
7295 		if (err)
7296 			return err;
7297 	}
7298 
7299 	err = record_func_map(env, &meta, func_id, insn_idx);
7300 	if (err)
7301 		return err;
7302 
7303 	err = record_func_key(env, &meta, func_id, insn_idx);
7304 	if (err)
7305 		return err;
7306 
7307 	/* Mark slots with STACK_MISC in case of raw mode, stack offset
7308 	 * is inferred from register state.
7309 	 */
7310 	for (i = 0; i < meta.access_size; i++) {
7311 		err = check_mem_access(env, insn_idx, meta.regno, i, BPF_B,
7312 				       BPF_WRITE, -1, false);
7313 		if (err)
7314 			return err;
7315 	}
7316 
7317 	regs = cur_regs(env);
7318 
7319 	if (meta.uninit_dynptr_regno) {
7320 		/* we write BPF_DW bits (8 bytes) at a time */
7321 		for (i = 0; i < BPF_DYNPTR_SIZE; i += 8) {
7322 			err = check_mem_access(env, insn_idx, meta.uninit_dynptr_regno,
7323 					       i, BPF_DW, BPF_WRITE, -1, false);
7324 			if (err)
7325 				return err;
7326 		}
7327 
7328 		err = mark_stack_slots_dynptr(env, &regs[meta.uninit_dynptr_regno],
7329 					      fn->arg_type[meta.uninit_dynptr_regno - BPF_REG_1],
7330 					      insn_idx);
7331 		if (err)
7332 			return err;
7333 	}
7334 
7335 	if (meta.release_regno) {
7336 		err = -EINVAL;
7337 		if (arg_type_is_dynptr(fn->arg_type[meta.release_regno - BPF_REG_1]))
7338 			err = unmark_stack_slots_dynptr(env, &regs[meta.release_regno]);
7339 		else if (meta.ref_obj_id)
7340 			err = release_reference(env, meta.ref_obj_id);
7341 		/* meta.ref_obj_id can only be 0 if register that is meant to be
7342 		 * released is NULL, which must be > R0.
7343 		 */
7344 		else if (register_is_null(&regs[meta.release_regno]))
7345 			err = 0;
7346 		if (err) {
7347 			verbose(env, "func %s#%d reference has not been acquired before\n",
7348 				func_id_name(func_id), func_id);
7349 			return err;
7350 		}
7351 	}
7352 
7353 	switch (func_id) {
7354 	case BPF_FUNC_tail_call:
7355 		err = check_reference_leak(env);
7356 		if (err) {
7357 			verbose(env, "tail_call would lead to reference leak\n");
7358 			return err;
7359 		}
7360 		break;
7361 	case BPF_FUNC_get_local_storage:
7362 		/* check that flags argument in get_local_storage(map, flags) is 0,
7363 		 * this is required because get_local_storage() can't return an error.
7364 		 */
7365 		if (!register_is_null(&regs[BPF_REG_2])) {
7366 			verbose(env, "get_local_storage() doesn't support non-zero flags\n");
7367 			return -EINVAL;
7368 		}
7369 		break;
7370 	case BPF_FUNC_for_each_map_elem:
7371 		err = __check_func_call(env, insn, insn_idx_p, meta.subprogno,
7372 					set_map_elem_callback_state);
7373 		break;
7374 	case BPF_FUNC_timer_set_callback:
7375 		err = __check_func_call(env, insn, insn_idx_p, meta.subprogno,
7376 					set_timer_callback_state);
7377 		break;
7378 	case BPF_FUNC_find_vma:
7379 		err = __check_func_call(env, insn, insn_idx_p, meta.subprogno,
7380 					set_find_vma_callback_state);
7381 		break;
7382 	case BPF_FUNC_snprintf:
7383 		err = check_bpf_snprintf_call(env, regs);
7384 		break;
7385 	case BPF_FUNC_loop:
7386 		update_loop_inline_state(env, meta.subprogno);
7387 		err = __check_func_call(env, insn, insn_idx_p, meta.subprogno,
7388 					set_loop_callback_state);
7389 		break;
7390 	case BPF_FUNC_dynptr_from_mem:
7391 		if (regs[BPF_REG_1].type != PTR_TO_MAP_VALUE) {
7392 			verbose(env, "Unsupported reg type %s for bpf_dynptr_from_mem data\n",
7393 				reg_type_str(env, regs[BPF_REG_1].type));
7394 			return -EACCES;
7395 		}
7396 		break;
7397 	case BPF_FUNC_set_retval:
7398 		if (prog_type == BPF_PROG_TYPE_LSM &&
7399 		    env->prog->expected_attach_type == BPF_LSM_CGROUP) {
7400 			if (!env->prog->aux->attach_func_proto->type) {
7401 				/* Make sure programs that attach to void
7402 				 * hooks don't try to modify return value.
7403 				 */
7404 				verbose(env, "BPF_LSM_CGROUP that attach to void LSM hooks can't modify return value!\n");
7405 				return -EINVAL;
7406 			}
7407 		}
7408 		break;
7409 	case BPF_FUNC_dynptr_data:
7410 		for (i = 0; i < MAX_BPF_FUNC_REG_ARGS; i++) {
7411 			if (arg_type_is_dynptr(fn->arg_type[i])) {
7412 				struct bpf_reg_state *reg = &regs[BPF_REG_1 + i];
7413 
7414 				if (meta.ref_obj_id) {
7415 					verbose(env, "verifier internal error: meta.ref_obj_id already set\n");
7416 					return -EFAULT;
7417 				}
7418 
7419 				if (base_type(reg->type) != PTR_TO_DYNPTR)
7420 					/* Find the id of the dynptr we're
7421 					 * tracking the reference of
7422 					 */
7423 					meta.ref_obj_id = stack_slot_get_id(env, reg);
7424 				break;
7425 			}
7426 		}
7427 		if (i == MAX_BPF_FUNC_REG_ARGS) {
7428 			verbose(env, "verifier internal error: no dynptr in bpf_dynptr_data()\n");
7429 			return -EFAULT;
7430 		}
7431 		break;
7432 	case BPF_FUNC_user_ringbuf_drain:
7433 		err = __check_func_call(env, insn, insn_idx_p, meta.subprogno,
7434 					set_user_ringbuf_callback_state);
7435 		break;
7436 	}
7437 
7438 	if (err)
7439 		return err;
7440 
7441 	/* reset caller saved regs */
7442 	for (i = 0; i < CALLER_SAVED_REGS; i++) {
7443 		mark_reg_not_init(env, regs, caller_saved[i]);
7444 		check_reg_arg(env, caller_saved[i], DST_OP_NO_MARK);
7445 	}
7446 
7447 	/* helper call returns 64-bit value. */
7448 	regs[BPF_REG_0].subreg_def = DEF_NOT_SUBREG;
7449 
7450 	/* update return register (already marked as written above) */
7451 	ret_type = fn->ret_type;
7452 	ret_flag = type_flag(ret_type);
7453 
7454 	switch (base_type(ret_type)) {
7455 	case RET_INTEGER:
7456 		/* sets type to SCALAR_VALUE */
7457 		mark_reg_unknown(env, regs, BPF_REG_0);
7458 		break;
7459 	case RET_VOID:
7460 		regs[BPF_REG_0].type = NOT_INIT;
7461 		break;
7462 	case RET_PTR_TO_MAP_VALUE:
7463 		/* There is no offset yet applied, variable or fixed */
7464 		mark_reg_known_zero(env, regs, BPF_REG_0);
7465 		/* remember map_ptr, so that check_map_access()
7466 		 * can check 'value_size' boundary of memory access
7467 		 * to map element returned from bpf_map_lookup_elem()
7468 		 */
7469 		if (meta.map_ptr == NULL) {
7470 			verbose(env,
7471 				"kernel subsystem misconfigured verifier\n");
7472 			return -EINVAL;
7473 		}
7474 		regs[BPF_REG_0].map_ptr = meta.map_ptr;
7475 		regs[BPF_REG_0].map_uid = meta.map_uid;
7476 		regs[BPF_REG_0].type = PTR_TO_MAP_VALUE | ret_flag;
7477 		if (!type_may_be_null(ret_type) &&
7478 		    map_value_has_spin_lock(meta.map_ptr)) {
7479 			regs[BPF_REG_0].id = ++env->id_gen;
7480 		}
7481 		break;
7482 	case RET_PTR_TO_SOCKET:
7483 		mark_reg_known_zero(env, regs, BPF_REG_0);
7484 		regs[BPF_REG_0].type = PTR_TO_SOCKET | ret_flag;
7485 		break;
7486 	case RET_PTR_TO_SOCK_COMMON:
7487 		mark_reg_known_zero(env, regs, BPF_REG_0);
7488 		regs[BPF_REG_0].type = PTR_TO_SOCK_COMMON | ret_flag;
7489 		break;
7490 	case RET_PTR_TO_TCP_SOCK:
7491 		mark_reg_known_zero(env, regs, BPF_REG_0);
7492 		regs[BPF_REG_0].type = PTR_TO_TCP_SOCK | ret_flag;
7493 		break;
7494 	case RET_PTR_TO_ALLOC_MEM:
7495 		mark_reg_known_zero(env, regs, BPF_REG_0);
7496 		regs[BPF_REG_0].type = PTR_TO_MEM | ret_flag;
7497 		regs[BPF_REG_0].mem_size = meta.mem_size;
7498 		break;
7499 	case RET_PTR_TO_MEM_OR_BTF_ID:
7500 	{
7501 		const struct btf_type *t;
7502 
7503 		mark_reg_known_zero(env, regs, BPF_REG_0);
7504 		t = btf_type_skip_modifiers(meta.ret_btf, meta.ret_btf_id, NULL);
7505 		if (!btf_type_is_struct(t)) {
7506 			u32 tsize;
7507 			const struct btf_type *ret;
7508 			const char *tname;
7509 
7510 			/* resolve the type size of ksym. */
7511 			ret = btf_resolve_size(meta.ret_btf, t, &tsize);
7512 			if (IS_ERR(ret)) {
7513 				tname = btf_name_by_offset(meta.ret_btf, t->name_off);
7514 				verbose(env, "unable to resolve the size of type '%s': %ld\n",
7515 					tname, PTR_ERR(ret));
7516 				return -EINVAL;
7517 			}
7518 			regs[BPF_REG_0].type = PTR_TO_MEM | ret_flag;
7519 			regs[BPF_REG_0].mem_size = tsize;
7520 		} else {
7521 			/* MEM_RDONLY may be carried from ret_flag, but it
7522 			 * doesn't apply on PTR_TO_BTF_ID. Fold it, otherwise
7523 			 * it will confuse the check of PTR_TO_BTF_ID in
7524 			 * check_mem_access().
7525 			 */
7526 			ret_flag &= ~MEM_RDONLY;
7527 
7528 			regs[BPF_REG_0].type = PTR_TO_BTF_ID | ret_flag;
7529 			regs[BPF_REG_0].btf = meta.ret_btf;
7530 			regs[BPF_REG_0].btf_id = meta.ret_btf_id;
7531 		}
7532 		break;
7533 	}
7534 	case RET_PTR_TO_BTF_ID:
7535 	{
7536 		struct btf *ret_btf;
7537 		int ret_btf_id;
7538 
7539 		mark_reg_known_zero(env, regs, BPF_REG_0);
7540 		regs[BPF_REG_0].type = PTR_TO_BTF_ID | ret_flag;
7541 		if (func_id == BPF_FUNC_kptr_xchg) {
7542 			ret_btf = meta.kptr_off_desc->kptr.btf;
7543 			ret_btf_id = meta.kptr_off_desc->kptr.btf_id;
7544 		} else {
7545 			if (fn->ret_btf_id == BPF_PTR_POISON) {
7546 				verbose(env, "verifier internal error:");
7547 				verbose(env, "func %s has non-overwritten BPF_PTR_POISON return type\n",
7548 					func_id_name(func_id));
7549 				return -EINVAL;
7550 			}
7551 			ret_btf = btf_vmlinux;
7552 			ret_btf_id = *fn->ret_btf_id;
7553 		}
7554 		if (ret_btf_id == 0) {
7555 			verbose(env, "invalid return type %u of func %s#%d\n",
7556 				base_type(ret_type), func_id_name(func_id),
7557 				func_id);
7558 			return -EINVAL;
7559 		}
7560 		regs[BPF_REG_0].btf = ret_btf;
7561 		regs[BPF_REG_0].btf_id = ret_btf_id;
7562 		break;
7563 	}
7564 	default:
7565 		verbose(env, "unknown return type %u of func %s#%d\n",
7566 			base_type(ret_type), func_id_name(func_id), func_id);
7567 		return -EINVAL;
7568 	}
7569 
7570 	if (type_may_be_null(regs[BPF_REG_0].type))
7571 		regs[BPF_REG_0].id = ++env->id_gen;
7572 
7573 	if (helper_multiple_ref_obj_use(func_id, meta.map_ptr)) {
7574 		verbose(env, "verifier internal error: func %s#%d sets ref_obj_id more than once\n",
7575 			func_id_name(func_id), func_id);
7576 		return -EFAULT;
7577 	}
7578 
7579 	if (is_ptr_cast_function(func_id) || is_dynptr_ref_function(func_id)) {
7580 		/* For release_reference() */
7581 		regs[BPF_REG_0].ref_obj_id = meta.ref_obj_id;
7582 	} else if (is_acquire_function(func_id, meta.map_ptr)) {
7583 		int id = acquire_reference_state(env, insn_idx);
7584 
7585 		if (id < 0)
7586 			return id;
7587 		/* For mark_ptr_or_null_reg() */
7588 		regs[BPF_REG_0].id = id;
7589 		/* For release_reference() */
7590 		regs[BPF_REG_0].ref_obj_id = id;
7591 	}
7592 
7593 	do_refine_retval_range(regs, fn->ret_type, func_id, &meta);
7594 
7595 	err = check_map_func_compatibility(env, meta.map_ptr, func_id);
7596 	if (err)
7597 		return err;
7598 
7599 	if ((func_id == BPF_FUNC_get_stack ||
7600 	     func_id == BPF_FUNC_get_task_stack) &&
7601 	    !env->prog->has_callchain_buf) {
7602 		const char *err_str;
7603 
7604 #ifdef CONFIG_PERF_EVENTS
7605 		err = get_callchain_buffers(sysctl_perf_event_max_stack);
7606 		err_str = "cannot get callchain buffer for func %s#%d\n";
7607 #else
7608 		err = -ENOTSUPP;
7609 		err_str = "func %s#%d not supported without CONFIG_PERF_EVENTS\n";
7610 #endif
7611 		if (err) {
7612 			verbose(env, err_str, func_id_name(func_id), func_id);
7613 			return err;
7614 		}
7615 
7616 		env->prog->has_callchain_buf = true;
7617 	}
7618 
7619 	if (func_id == BPF_FUNC_get_stackid || func_id == BPF_FUNC_get_stack)
7620 		env->prog->call_get_stack = true;
7621 
7622 	if (func_id == BPF_FUNC_get_func_ip) {
7623 		if (check_get_func_ip(env))
7624 			return -ENOTSUPP;
7625 		env->prog->call_get_func_ip = true;
7626 	}
7627 
7628 	if (changes_data)
7629 		clear_all_pkt_pointers(env);
7630 	return 0;
7631 }
7632 
7633 /* mark_btf_func_reg_size() is used when the reg size is determined by
7634  * the BTF func_proto's return value size and argument.
7635  */
7636 static void mark_btf_func_reg_size(struct bpf_verifier_env *env, u32 regno,
7637 				   size_t reg_size)
7638 {
7639 	struct bpf_reg_state *reg = &cur_regs(env)[regno];
7640 
7641 	if (regno == BPF_REG_0) {
7642 		/* Function return value */
7643 		reg->live |= REG_LIVE_WRITTEN;
7644 		reg->subreg_def = reg_size == sizeof(u64) ?
7645 			DEF_NOT_SUBREG : env->insn_idx + 1;
7646 	} else {
7647 		/* Function argument */
7648 		if (reg_size == sizeof(u64)) {
7649 			mark_insn_zext(env, reg);
7650 			mark_reg_read(env, reg, reg->parent, REG_LIVE_READ64);
7651 		} else {
7652 			mark_reg_read(env, reg, reg->parent, REG_LIVE_READ32);
7653 		}
7654 	}
7655 }
7656 
7657 static int check_kfunc_call(struct bpf_verifier_env *env, struct bpf_insn *insn,
7658 			    int *insn_idx_p)
7659 {
7660 	const struct btf_type *t, *func, *func_proto, *ptr_type;
7661 	struct bpf_reg_state *regs = cur_regs(env);
7662 	struct bpf_kfunc_arg_meta meta = { 0 };
7663 	const char *func_name, *ptr_type_name;
7664 	u32 i, nargs, func_id, ptr_type_id;
7665 	int err, insn_idx = *insn_idx_p;
7666 	const struct btf_param *args;
7667 	struct btf *desc_btf;
7668 	u32 *kfunc_flags;
7669 	bool acq;
7670 
7671 	/* skip for now, but return error when we find this in fixup_kfunc_call */
7672 	if (!insn->imm)
7673 		return 0;
7674 
7675 	desc_btf = find_kfunc_desc_btf(env, insn->off);
7676 	if (IS_ERR(desc_btf))
7677 		return PTR_ERR(desc_btf);
7678 
7679 	func_id = insn->imm;
7680 	func = btf_type_by_id(desc_btf, func_id);
7681 	func_name = btf_name_by_offset(desc_btf, func->name_off);
7682 	func_proto = btf_type_by_id(desc_btf, func->type);
7683 
7684 	kfunc_flags = btf_kfunc_id_set_contains(desc_btf, resolve_prog_type(env->prog), func_id);
7685 	if (!kfunc_flags) {
7686 		verbose(env, "calling kernel function %s is not allowed\n",
7687 			func_name);
7688 		return -EACCES;
7689 	}
7690 	if (*kfunc_flags & KF_DESTRUCTIVE && !capable(CAP_SYS_BOOT)) {
7691 		verbose(env, "destructive kfunc calls require CAP_SYS_BOOT capabilities\n");
7692 		return -EACCES;
7693 	}
7694 
7695 	acq = *kfunc_flags & KF_ACQUIRE;
7696 
7697 	meta.flags = *kfunc_flags;
7698 
7699 	/* Check the arguments */
7700 	err = btf_check_kfunc_arg_match(env, desc_btf, func_id, regs, &meta);
7701 	if (err < 0)
7702 		return err;
7703 	/* In case of release function, we get register number of refcounted
7704 	 * PTR_TO_BTF_ID back from btf_check_kfunc_arg_match, do the release now
7705 	 */
7706 	if (err) {
7707 		err = release_reference(env, regs[err].ref_obj_id);
7708 		if (err) {
7709 			verbose(env, "kfunc %s#%d reference has not been acquired before\n",
7710 				func_name, func_id);
7711 			return err;
7712 		}
7713 	}
7714 
7715 	for (i = 0; i < CALLER_SAVED_REGS; i++)
7716 		mark_reg_not_init(env, regs, caller_saved[i]);
7717 
7718 	/* Check return type */
7719 	t = btf_type_skip_modifiers(desc_btf, func_proto->type, NULL);
7720 
7721 	if (acq && !btf_type_is_struct_ptr(desc_btf, t)) {
7722 		verbose(env, "acquire kernel function does not return PTR_TO_BTF_ID\n");
7723 		return -EINVAL;
7724 	}
7725 
7726 	if (btf_type_is_scalar(t)) {
7727 		mark_reg_unknown(env, regs, BPF_REG_0);
7728 		mark_btf_func_reg_size(env, BPF_REG_0, t->size);
7729 	} else if (btf_type_is_ptr(t)) {
7730 		ptr_type = btf_type_skip_modifiers(desc_btf, t->type,
7731 						   &ptr_type_id);
7732 		if (!btf_type_is_struct(ptr_type)) {
7733 			if (!meta.r0_size) {
7734 				ptr_type_name = btf_name_by_offset(desc_btf,
7735 								   ptr_type->name_off);
7736 				verbose(env,
7737 					"kernel function %s returns pointer type %s %s is not supported\n",
7738 					func_name,
7739 					btf_type_str(ptr_type),
7740 					ptr_type_name);
7741 				return -EINVAL;
7742 			}
7743 
7744 			mark_reg_known_zero(env, regs, BPF_REG_0);
7745 			regs[BPF_REG_0].type = PTR_TO_MEM;
7746 			regs[BPF_REG_0].mem_size = meta.r0_size;
7747 
7748 			if (meta.r0_rdonly)
7749 				regs[BPF_REG_0].type |= MEM_RDONLY;
7750 
7751 			/* Ensures we don't access the memory after a release_reference() */
7752 			if (meta.ref_obj_id)
7753 				regs[BPF_REG_0].ref_obj_id = meta.ref_obj_id;
7754 		} else {
7755 			mark_reg_known_zero(env, regs, BPF_REG_0);
7756 			regs[BPF_REG_0].btf = desc_btf;
7757 			regs[BPF_REG_0].type = PTR_TO_BTF_ID;
7758 			regs[BPF_REG_0].btf_id = ptr_type_id;
7759 		}
7760 		if (*kfunc_flags & KF_RET_NULL) {
7761 			regs[BPF_REG_0].type |= PTR_MAYBE_NULL;
7762 			/* For mark_ptr_or_null_reg, see 93c230e3f5bd6 */
7763 			regs[BPF_REG_0].id = ++env->id_gen;
7764 		}
7765 		mark_btf_func_reg_size(env, BPF_REG_0, sizeof(void *));
7766 		if (acq) {
7767 			int id = acquire_reference_state(env, insn_idx);
7768 
7769 			if (id < 0)
7770 				return id;
7771 			regs[BPF_REG_0].id = id;
7772 			regs[BPF_REG_0].ref_obj_id = id;
7773 		}
7774 	} /* else { add_kfunc_call() ensures it is btf_type_is_void(t) } */
7775 
7776 	nargs = btf_type_vlen(func_proto);
7777 	args = (const struct btf_param *)(func_proto + 1);
7778 	for (i = 0; i < nargs; i++) {
7779 		u32 regno = i + 1;
7780 
7781 		t = btf_type_skip_modifiers(desc_btf, args[i].type, NULL);
7782 		if (btf_type_is_ptr(t))
7783 			mark_btf_func_reg_size(env, regno, sizeof(void *));
7784 		else
7785 			/* scalar. ensured by btf_check_kfunc_arg_match() */
7786 			mark_btf_func_reg_size(env, regno, t->size);
7787 	}
7788 
7789 	return 0;
7790 }
7791 
7792 static bool signed_add_overflows(s64 a, s64 b)
7793 {
7794 	/* Do the add in u64, where overflow is well-defined */
7795 	s64 res = (s64)((u64)a + (u64)b);
7796 
7797 	if (b < 0)
7798 		return res > a;
7799 	return res < a;
7800 }
7801 
7802 static bool signed_add32_overflows(s32 a, s32 b)
7803 {
7804 	/* Do the add in u32, where overflow is well-defined */
7805 	s32 res = (s32)((u32)a + (u32)b);
7806 
7807 	if (b < 0)
7808 		return res > a;
7809 	return res < a;
7810 }
7811 
7812 static bool signed_sub_overflows(s64 a, s64 b)
7813 {
7814 	/* Do the sub in u64, where overflow is well-defined */
7815 	s64 res = (s64)((u64)a - (u64)b);
7816 
7817 	if (b < 0)
7818 		return res < a;
7819 	return res > a;
7820 }
7821 
7822 static bool signed_sub32_overflows(s32 a, s32 b)
7823 {
7824 	/* Do the sub in u32, where overflow is well-defined */
7825 	s32 res = (s32)((u32)a - (u32)b);
7826 
7827 	if (b < 0)
7828 		return res < a;
7829 	return res > a;
7830 }
7831 
7832 static bool check_reg_sane_offset(struct bpf_verifier_env *env,
7833 				  const struct bpf_reg_state *reg,
7834 				  enum bpf_reg_type type)
7835 {
7836 	bool known = tnum_is_const(reg->var_off);
7837 	s64 val = reg->var_off.value;
7838 	s64 smin = reg->smin_value;
7839 
7840 	if (known && (val >= BPF_MAX_VAR_OFF || val <= -BPF_MAX_VAR_OFF)) {
7841 		verbose(env, "math between %s pointer and %lld is not allowed\n",
7842 			reg_type_str(env, type), val);
7843 		return false;
7844 	}
7845 
7846 	if (reg->off >= BPF_MAX_VAR_OFF || reg->off <= -BPF_MAX_VAR_OFF) {
7847 		verbose(env, "%s pointer offset %d is not allowed\n",
7848 			reg_type_str(env, type), reg->off);
7849 		return false;
7850 	}
7851 
7852 	if (smin == S64_MIN) {
7853 		verbose(env, "math between %s pointer and register with unbounded min value is not allowed\n",
7854 			reg_type_str(env, type));
7855 		return false;
7856 	}
7857 
7858 	if (smin >= BPF_MAX_VAR_OFF || smin <= -BPF_MAX_VAR_OFF) {
7859 		verbose(env, "value %lld makes %s pointer be out of bounds\n",
7860 			smin, reg_type_str(env, type));
7861 		return false;
7862 	}
7863 
7864 	return true;
7865 }
7866 
7867 enum {
7868 	REASON_BOUNDS	= -1,
7869 	REASON_TYPE	= -2,
7870 	REASON_PATHS	= -3,
7871 	REASON_LIMIT	= -4,
7872 	REASON_STACK	= -5,
7873 };
7874 
7875 static int retrieve_ptr_limit(const struct bpf_reg_state *ptr_reg,
7876 			      u32 *alu_limit, bool mask_to_left)
7877 {
7878 	u32 max = 0, ptr_limit = 0;
7879 
7880 	switch (ptr_reg->type) {
7881 	case PTR_TO_STACK:
7882 		/* Offset 0 is out-of-bounds, but acceptable start for the
7883 		 * left direction, see BPF_REG_FP. Also, unknown scalar
7884 		 * offset where we would need to deal with min/max bounds is
7885 		 * currently prohibited for unprivileged.
7886 		 */
7887 		max = MAX_BPF_STACK + mask_to_left;
7888 		ptr_limit = -(ptr_reg->var_off.value + ptr_reg->off);
7889 		break;
7890 	case PTR_TO_MAP_VALUE:
7891 		max = ptr_reg->map_ptr->value_size;
7892 		ptr_limit = (mask_to_left ?
7893 			     ptr_reg->smin_value :
7894 			     ptr_reg->umax_value) + ptr_reg->off;
7895 		break;
7896 	default:
7897 		return REASON_TYPE;
7898 	}
7899 
7900 	if (ptr_limit >= max)
7901 		return REASON_LIMIT;
7902 	*alu_limit = ptr_limit;
7903 	return 0;
7904 }
7905 
7906 static bool can_skip_alu_sanitation(const struct bpf_verifier_env *env,
7907 				    const struct bpf_insn *insn)
7908 {
7909 	return env->bypass_spec_v1 || BPF_SRC(insn->code) == BPF_K;
7910 }
7911 
7912 static int update_alu_sanitation_state(struct bpf_insn_aux_data *aux,
7913 				       u32 alu_state, u32 alu_limit)
7914 {
7915 	/* If we arrived here from different branches with different
7916 	 * state or limits to sanitize, then this won't work.
7917 	 */
7918 	if (aux->alu_state &&
7919 	    (aux->alu_state != alu_state ||
7920 	     aux->alu_limit != alu_limit))
7921 		return REASON_PATHS;
7922 
7923 	/* Corresponding fixup done in do_misc_fixups(). */
7924 	aux->alu_state = alu_state;
7925 	aux->alu_limit = alu_limit;
7926 	return 0;
7927 }
7928 
7929 static int sanitize_val_alu(struct bpf_verifier_env *env,
7930 			    struct bpf_insn *insn)
7931 {
7932 	struct bpf_insn_aux_data *aux = cur_aux(env);
7933 
7934 	if (can_skip_alu_sanitation(env, insn))
7935 		return 0;
7936 
7937 	return update_alu_sanitation_state(aux, BPF_ALU_NON_POINTER, 0);
7938 }
7939 
7940 static bool sanitize_needed(u8 opcode)
7941 {
7942 	return opcode == BPF_ADD || opcode == BPF_SUB;
7943 }
7944 
7945 struct bpf_sanitize_info {
7946 	struct bpf_insn_aux_data aux;
7947 	bool mask_to_left;
7948 };
7949 
7950 static struct bpf_verifier_state *
7951 sanitize_speculative_path(struct bpf_verifier_env *env,
7952 			  const struct bpf_insn *insn,
7953 			  u32 next_idx, u32 curr_idx)
7954 {
7955 	struct bpf_verifier_state *branch;
7956 	struct bpf_reg_state *regs;
7957 
7958 	branch = push_stack(env, next_idx, curr_idx, true);
7959 	if (branch && insn) {
7960 		regs = branch->frame[branch->curframe]->regs;
7961 		if (BPF_SRC(insn->code) == BPF_K) {
7962 			mark_reg_unknown(env, regs, insn->dst_reg);
7963 		} else if (BPF_SRC(insn->code) == BPF_X) {
7964 			mark_reg_unknown(env, regs, insn->dst_reg);
7965 			mark_reg_unknown(env, regs, insn->src_reg);
7966 		}
7967 	}
7968 	return branch;
7969 }
7970 
7971 static int sanitize_ptr_alu(struct bpf_verifier_env *env,
7972 			    struct bpf_insn *insn,
7973 			    const struct bpf_reg_state *ptr_reg,
7974 			    const struct bpf_reg_state *off_reg,
7975 			    struct bpf_reg_state *dst_reg,
7976 			    struct bpf_sanitize_info *info,
7977 			    const bool commit_window)
7978 {
7979 	struct bpf_insn_aux_data *aux = commit_window ? cur_aux(env) : &info->aux;
7980 	struct bpf_verifier_state *vstate = env->cur_state;
7981 	bool off_is_imm = tnum_is_const(off_reg->var_off);
7982 	bool off_is_neg = off_reg->smin_value < 0;
7983 	bool ptr_is_dst_reg = ptr_reg == dst_reg;
7984 	u8 opcode = BPF_OP(insn->code);
7985 	u32 alu_state, alu_limit;
7986 	struct bpf_reg_state tmp;
7987 	bool ret;
7988 	int err;
7989 
7990 	if (can_skip_alu_sanitation(env, insn))
7991 		return 0;
7992 
7993 	/* We already marked aux for masking from non-speculative
7994 	 * paths, thus we got here in the first place. We only care
7995 	 * to explore bad access from here.
7996 	 */
7997 	if (vstate->speculative)
7998 		goto do_sim;
7999 
8000 	if (!commit_window) {
8001 		if (!tnum_is_const(off_reg->var_off) &&
8002 		    (off_reg->smin_value < 0) != (off_reg->smax_value < 0))
8003 			return REASON_BOUNDS;
8004 
8005 		info->mask_to_left = (opcode == BPF_ADD &&  off_is_neg) ||
8006 				     (opcode == BPF_SUB && !off_is_neg);
8007 	}
8008 
8009 	err = retrieve_ptr_limit(ptr_reg, &alu_limit, info->mask_to_left);
8010 	if (err < 0)
8011 		return err;
8012 
8013 	if (commit_window) {
8014 		/* In commit phase we narrow the masking window based on
8015 		 * the observed pointer move after the simulated operation.
8016 		 */
8017 		alu_state = info->aux.alu_state;
8018 		alu_limit = abs(info->aux.alu_limit - alu_limit);
8019 	} else {
8020 		alu_state  = off_is_neg ? BPF_ALU_NEG_VALUE : 0;
8021 		alu_state |= off_is_imm ? BPF_ALU_IMMEDIATE : 0;
8022 		alu_state |= ptr_is_dst_reg ?
8023 			     BPF_ALU_SANITIZE_SRC : BPF_ALU_SANITIZE_DST;
8024 
8025 		/* Limit pruning on unknown scalars to enable deep search for
8026 		 * potential masking differences from other program paths.
8027 		 */
8028 		if (!off_is_imm)
8029 			env->explore_alu_limits = true;
8030 	}
8031 
8032 	err = update_alu_sanitation_state(aux, alu_state, alu_limit);
8033 	if (err < 0)
8034 		return err;
8035 do_sim:
8036 	/* If we're in commit phase, we're done here given we already
8037 	 * pushed the truncated dst_reg into the speculative verification
8038 	 * stack.
8039 	 *
8040 	 * Also, when register is a known constant, we rewrite register-based
8041 	 * operation to immediate-based, and thus do not need masking (and as
8042 	 * a consequence, do not need to simulate the zero-truncation either).
8043 	 */
8044 	if (commit_window || off_is_imm)
8045 		return 0;
8046 
8047 	/* Simulate and find potential out-of-bounds access under
8048 	 * speculative execution from truncation as a result of
8049 	 * masking when off was not within expected range. If off
8050 	 * sits in dst, then we temporarily need to move ptr there
8051 	 * to simulate dst (== 0) +/-= ptr. Needed, for example,
8052 	 * for cases where we use K-based arithmetic in one direction
8053 	 * and truncated reg-based in the other in order to explore
8054 	 * bad access.
8055 	 */
8056 	if (!ptr_is_dst_reg) {
8057 		tmp = *dst_reg;
8058 		*dst_reg = *ptr_reg;
8059 	}
8060 	ret = sanitize_speculative_path(env, NULL, env->insn_idx + 1,
8061 					env->insn_idx);
8062 	if (!ptr_is_dst_reg && ret)
8063 		*dst_reg = tmp;
8064 	return !ret ? REASON_STACK : 0;
8065 }
8066 
8067 static void sanitize_mark_insn_seen(struct bpf_verifier_env *env)
8068 {
8069 	struct bpf_verifier_state *vstate = env->cur_state;
8070 
8071 	/* If we simulate paths under speculation, we don't update the
8072 	 * insn as 'seen' such that when we verify unreachable paths in
8073 	 * the non-speculative domain, sanitize_dead_code() can still
8074 	 * rewrite/sanitize them.
8075 	 */
8076 	if (!vstate->speculative)
8077 		env->insn_aux_data[env->insn_idx].seen = env->pass_cnt;
8078 }
8079 
8080 static int sanitize_err(struct bpf_verifier_env *env,
8081 			const struct bpf_insn *insn, int reason,
8082 			const struct bpf_reg_state *off_reg,
8083 			const struct bpf_reg_state *dst_reg)
8084 {
8085 	static const char *err = "pointer arithmetic with it prohibited for !root";
8086 	const char *op = BPF_OP(insn->code) == BPF_ADD ? "add" : "sub";
8087 	u32 dst = insn->dst_reg, src = insn->src_reg;
8088 
8089 	switch (reason) {
8090 	case REASON_BOUNDS:
8091 		verbose(env, "R%d has unknown scalar with mixed signed bounds, %s\n",
8092 			off_reg == dst_reg ? dst : src, err);
8093 		break;
8094 	case REASON_TYPE:
8095 		verbose(env, "R%d has pointer with unsupported alu operation, %s\n",
8096 			off_reg == dst_reg ? src : dst, err);
8097 		break;
8098 	case REASON_PATHS:
8099 		verbose(env, "R%d tried to %s from different maps, paths or scalars, %s\n",
8100 			dst, op, err);
8101 		break;
8102 	case REASON_LIMIT:
8103 		verbose(env, "R%d tried to %s beyond pointer bounds, %s\n",
8104 			dst, op, err);
8105 		break;
8106 	case REASON_STACK:
8107 		verbose(env, "R%d could not be pushed for speculative verification, %s\n",
8108 			dst, err);
8109 		break;
8110 	default:
8111 		verbose(env, "verifier internal error: unknown reason (%d)\n",
8112 			reason);
8113 		break;
8114 	}
8115 
8116 	return -EACCES;
8117 }
8118 
8119 /* check that stack access falls within stack limits and that 'reg' doesn't
8120  * have a variable offset.
8121  *
8122  * Variable offset is prohibited for unprivileged mode for simplicity since it
8123  * requires corresponding support in Spectre masking for stack ALU.  See also
8124  * retrieve_ptr_limit().
8125  *
8126  *
8127  * 'off' includes 'reg->off'.
8128  */
8129 static int check_stack_access_for_ptr_arithmetic(
8130 				struct bpf_verifier_env *env,
8131 				int regno,
8132 				const struct bpf_reg_state *reg,
8133 				int off)
8134 {
8135 	if (!tnum_is_const(reg->var_off)) {
8136 		char tn_buf[48];
8137 
8138 		tnum_strn(tn_buf, sizeof(tn_buf), reg->var_off);
8139 		verbose(env, "R%d variable stack access prohibited for !root, var_off=%s off=%d\n",
8140 			regno, tn_buf, off);
8141 		return -EACCES;
8142 	}
8143 
8144 	if (off >= 0 || off < -MAX_BPF_STACK) {
8145 		verbose(env, "R%d stack pointer arithmetic goes out of range, "
8146 			"prohibited for !root; off=%d\n", regno, off);
8147 		return -EACCES;
8148 	}
8149 
8150 	return 0;
8151 }
8152 
8153 static int sanitize_check_bounds(struct bpf_verifier_env *env,
8154 				 const struct bpf_insn *insn,
8155 				 const struct bpf_reg_state *dst_reg)
8156 {
8157 	u32 dst = insn->dst_reg;
8158 
8159 	/* For unprivileged we require that resulting offset must be in bounds
8160 	 * in order to be able to sanitize access later on.
8161 	 */
8162 	if (env->bypass_spec_v1)
8163 		return 0;
8164 
8165 	switch (dst_reg->type) {
8166 	case PTR_TO_STACK:
8167 		if (check_stack_access_for_ptr_arithmetic(env, dst, dst_reg,
8168 					dst_reg->off + dst_reg->var_off.value))
8169 			return -EACCES;
8170 		break;
8171 	case PTR_TO_MAP_VALUE:
8172 		if (check_map_access(env, dst, dst_reg->off, 1, false, ACCESS_HELPER)) {
8173 			verbose(env, "R%d pointer arithmetic of map value goes out of range, "
8174 				"prohibited for !root\n", dst);
8175 			return -EACCES;
8176 		}
8177 		break;
8178 	default:
8179 		break;
8180 	}
8181 
8182 	return 0;
8183 }
8184 
8185 /* Handles arithmetic on a pointer and a scalar: computes new min/max and var_off.
8186  * Caller should also handle BPF_MOV case separately.
8187  * If we return -EACCES, caller may want to try again treating pointer as a
8188  * scalar.  So we only emit a diagnostic if !env->allow_ptr_leaks.
8189  */
8190 static int adjust_ptr_min_max_vals(struct bpf_verifier_env *env,
8191 				   struct bpf_insn *insn,
8192 				   const struct bpf_reg_state *ptr_reg,
8193 				   const struct bpf_reg_state *off_reg)
8194 {
8195 	struct bpf_verifier_state *vstate = env->cur_state;
8196 	struct bpf_func_state *state = vstate->frame[vstate->curframe];
8197 	struct bpf_reg_state *regs = state->regs, *dst_reg;
8198 	bool known = tnum_is_const(off_reg->var_off);
8199 	s64 smin_val = off_reg->smin_value, smax_val = off_reg->smax_value,
8200 	    smin_ptr = ptr_reg->smin_value, smax_ptr = ptr_reg->smax_value;
8201 	u64 umin_val = off_reg->umin_value, umax_val = off_reg->umax_value,
8202 	    umin_ptr = ptr_reg->umin_value, umax_ptr = ptr_reg->umax_value;
8203 	struct bpf_sanitize_info info = {};
8204 	u8 opcode = BPF_OP(insn->code);
8205 	u32 dst = insn->dst_reg;
8206 	int ret;
8207 
8208 	dst_reg = &regs[dst];
8209 
8210 	if ((known && (smin_val != smax_val || umin_val != umax_val)) ||
8211 	    smin_val > smax_val || umin_val > umax_val) {
8212 		/* Taint dst register if offset had invalid bounds derived from
8213 		 * e.g. dead branches.
8214 		 */
8215 		__mark_reg_unknown(env, dst_reg);
8216 		return 0;
8217 	}
8218 
8219 	if (BPF_CLASS(insn->code) != BPF_ALU64) {
8220 		/* 32-bit ALU ops on pointers produce (meaningless) scalars */
8221 		if (opcode == BPF_SUB && env->allow_ptr_leaks) {
8222 			__mark_reg_unknown(env, dst_reg);
8223 			return 0;
8224 		}
8225 
8226 		verbose(env,
8227 			"R%d 32-bit pointer arithmetic prohibited\n",
8228 			dst);
8229 		return -EACCES;
8230 	}
8231 
8232 	if (ptr_reg->type & PTR_MAYBE_NULL) {
8233 		verbose(env, "R%d pointer arithmetic on %s prohibited, null-check it first\n",
8234 			dst, reg_type_str(env, ptr_reg->type));
8235 		return -EACCES;
8236 	}
8237 
8238 	switch (base_type(ptr_reg->type)) {
8239 	case CONST_PTR_TO_MAP:
8240 		/* smin_val represents the known value */
8241 		if (known && smin_val == 0 && opcode == BPF_ADD)
8242 			break;
8243 		fallthrough;
8244 	case PTR_TO_PACKET_END:
8245 	case PTR_TO_SOCKET:
8246 	case PTR_TO_SOCK_COMMON:
8247 	case PTR_TO_TCP_SOCK:
8248 	case PTR_TO_XDP_SOCK:
8249 		verbose(env, "R%d pointer arithmetic on %s prohibited\n",
8250 			dst, reg_type_str(env, ptr_reg->type));
8251 		return -EACCES;
8252 	default:
8253 		break;
8254 	}
8255 
8256 	/* In case of 'scalar += pointer', dst_reg inherits pointer type and id.
8257 	 * The id may be overwritten later if we create a new variable offset.
8258 	 */
8259 	dst_reg->type = ptr_reg->type;
8260 	dst_reg->id = ptr_reg->id;
8261 
8262 	if (!check_reg_sane_offset(env, off_reg, ptr_reg->type) ||
8263 	    !check_reg_sane_offset(env, ptr_reg, ptr_reg->type))
8264 		return -EINVAL;
8265 
8266 	/* pointer types do not carry 32-bit bounds at the moment. */
8267 	__mark_reg32_unbounded(dst_reg);
8268 
8269 	if (sanitize_needed(opcode)) {
8270 		ret = sanitize_ptr_alu(env, insn, ptr_reg, off_reg, dst_reg,
8271 				       &info, false);
8272 		if (ret < 0)
8273 			return sanitize_err(env, insn, ret, off_reg, dst_reg);
8274 	}
8275 
8276 	switch (opcode) {
8277 	case BPF_ADD:
8278 		/* We can take a fixed offset as long as it doesn't overflow
8279 		 * the s32 'off' field
8280 		 */
8281 		if (known && (ptr_reg->off + smin_val ==
8282 			      (s64)(s32)(ptr_reg->off + smin_val))) {
8283 			/* pointer += K.  Accumulate it into fixed offset */
8284 			dst_reg->smin_value = smin_ptr;
8285 			dst_reg->smax_value = smax_ptr;
8286 			dst_reg->umin_value = umin_ptr;
8287 			dst_reg->umax_value = umax_ptr;
8288 			dst_reg->var_off = ptr_reg->var_off;
8289 			dst_reg->off = ptr_reg->off + smin_val;
8290 			dst_reg->raw = ptr_reg->raw;
8291 			break;
8292 		}
8293 		/* A new variable offset is created.  Note that off_reg->off
8294 		 * == 0, since it's a scalar.
8295 		 * dst_reg gets the pointer type and since some positive
8296 		 * integer value was added to the pointer, give it a new 'id'
8297 		 * if it's a PTR_TO_PACKET.
8298 		 * this creates a new 'base' pointer, off_reg (variable) gets
8299 		 * added into the variable offset, and we copy the fixed offset
8300 		 * from ptr_reg.
8301 		 */
8302 		if (signed_add_overflows(smin_ptr, smin_val) ||
8303 		    signed_add_overflows(smax_ptr, smax_val)) {
8304 			dst_reg->smin_value = S64_MIN;
8305 			dst_reg->smax_value = S64_MAX;
8306 		} else {
8307 			dst_reg->smin_value = smin_ptr + smin_val;
8308 			dst_reg->smax_value = smax_ptr + smax_val;
8309 		}
8310 		if (umin_ptr + umin_val < umin_ptr ||
8311 		    umax_ptr + umax_val < umax_ptr) {
8312 			dst_reg->umin_value = 0;
8313 			dst_reg->umax_value = U64_MAX;
8314 		} else {
8315 			dst_reg->umin_value = umin_ptr + umin_val;
8316 			dst_reg->umax_value = umax_ptr + umax_val;
8317 		}
8318 		dst_reg->var_off = tnum_add(ptr_reg->var_off, off_reg->var_off);
8319 		dst_reg->off = ptr_reg->off;
8320 		dst_reg->raw = ptr_reg->raw;
8321 		if (reg_is_pkt_pointer(ptr_reg)) {
8322 			dst_reg->id = ++env->id_gen;
8323 			/* something was added to pkt_ptr, set range to zero */
8324 			memset(&dst_reg->raw, 0, sizeof(dst_reg->raw));
8325 		}
8326 		break;
8327 	case BPF_SUB:
8328 		if (dst_reg == off_reg) {
8329 			/* scalar -= pointer.  Creates an unknown scalar */
8330 			verbose(env, "R%d tried to subtract pointer from scalar\n",
8331 				dst);
8332 			return -EACCES;
8333 		}
8334 		/* We don't allow subtraction from FP, because (according to
8335 		 * test_verifier.c test "invalid fp arithmetic", JITs might not
8336 		 * be able to deal with it.
8337 		 */
8338 		if (ptr_reg->type == PTR_TO_STACK) {
8339 			verbose(env, "R%d subtraction from stack pointer prohibited\n",
8340 				dst);
8341 			return -EACCES;
8342 		}
8343 		if (known && (ptr_reg->off - smin_val ==
8344 			      (s64)(s32)(ptr_reg->off - smin_val))) {
8345 			/* pointer -= K.  Subtract it from fixed offset */
8346 			dst_reg->smin_value = smin_ptr;
8347 			dst_reg->smax_value = smax_ptr;
8348 			dst_reg->umin_value = umin_ptr;
8349 			dst_reg->umax_value = umax_ptr;
8350 			dst_reg->var_off = ptr_reg->var_off;
8351 			dst_reg->id = ptr_reg->id;
8352 			dst_reg->off = ptr_reg->off - smin_val;
8353 			dst_reg->raw = ptr_reg->raw;
8354 			break;
8355 		}
8356 		/* A new variable offset is created.  If the subtrahend is known
8357 		 * nonnegative, then any reg->range we had before is still good.
8358 		 */
8359 		if (signed_sub_overflows(smin_ptr, smax_val) ||
8360 		    signed_sub_overflows(smax_ptr, smin_val)) {
8361 			/* Overflow possible, we know nothing */
8362 			dst_reg->smin_value = S64_MIN;
8363 			dst_reg->smax_value = S64_MAX;
8364 		} else {
8365 			dst_reg->smin_value = smin_ptr - smax_val;
8366 			dst_reg->smax_value = smax_ptr - smin_val;
8367 		}
8368 		if (umin_ptr < umax_val) {
8369 			/* Overflow possible, we know nothing */
8370 			dst_reg->umin_value = 0;
8371 			dst_reg->umax_value = U64_MAX;
8372 		} else {
8373 			/* Cannot overflow (as long as bounds are consistent) */
8374 			dst_reg->umin_value = umin_ptr - umax_val;
8375 			dst_reg->umax_value = umax_ptr - umin_val;
8376 		}
8377 		dst_reg->var_off = tnum_sub(ptr_reg->var_off, off_reg->var_off);
8378 		dst_reg->off = ptr_reg->off;
8379 		dst_reg->raw = ptr_reg->raw;
8380 		if (reg_is_pkt_pointer(ptr_reg)) {
8381 			dst_reg->id = ++env->id_gen;
8382 			/* something was added to pkt_ptr, set range to zero */
8383 			if (smin_val < 0)
8384 				memset(&dst_reg->raw, 0, sizeof(dst_reg->raw));
8385 		}
8386 		break;
8387 	case BPF_AND:
8388 	case BPF_OR:
8389 	case BPF_XOR:
8390 		/* bitwise ops on pointers are troublesome, prohibit. */
8391 		verbose(env, "R%d bitwise operator %s on pointer prohibited\n",
8392 			dst, bpf_alu_string[opcode >> 4]);
8393 		return -EACCES;
8394 	default:
8395 		/* other operators (e.g. MUL,LSH) produce non-pointer results */
8396 		verbose(env, "R%d pointer arithmetic with %s operator prohibited\n",
8397 			dst, bpf_alu_string[opcode >> 4]);
8398 		return -EACCES;
8399 	}
8400 
8401 	if (!check_reg_sane_offset(env, dst_reg, ptr_reg->type))
8402 		return -EINVAL;
8403 	reg_bounds_sync(dst_reg);
8404 	if (sanitize_check_bounds(env, insn, dst_reg) < 0)
8405 		return -EACCES;
8406 	if (sanitize_needed(opcode)) {
8407 		ret = sanitize_ptr_alu(env, insn, dst_reg, off_reg, dst_reg,
8408 				       &info, true);
8409 		if (ret < 0)
8410 			return sanitize_err(env, insn, ret, off_reg, dst_reg);
8411 	}
8412 
8413 	return 0;
8414 }
8415 
8416 static void scalar32_min_max_add(struct bpf_reg_state *dst_reg,
8417 				 struct bpf_reg_state *src_reg)
8418 {
8419 	s32 smin_val = src_reg->s32_min_value;
8420 	s32 smax_val = src_reg->s32_max_value;
8421 	u32 umin_val = src_reg->u32_min_value;
8422 	u32 umax_val = src_reg->u32_max_value;
8423 
8424 	if (signed_add32_overflows(dst_reg->s32_min_value, smin_val) ||
8425 	    signed_add32_overflows(dst_reg->s32_max_value, smax_val)) {
8426 		dst_reg->s32_min_value = S32_MIN;
8427 		dst_reg->s32_max_value = S32_MAX;
8428 	} else {
8429 		dst_reg->s32_min_value += smin_val;
8430 		dst_reg->s32_max_value += smax_val;
8431 	}
8432 	if (dst_reg->u32_min_value + umin_val < umin_val ||
8433 	    dst_reg->u32_max_value + umax_val < umax_val) {
8434 		dst_reg->u32_min_value = 0;
8435 		dst_reg->u32_max_value = U32_MAX;
8436 	} else {
8437 		dst_reg->u32_min_value += umin_val;
8438 		dst_reg->u32_max_value += umax_val;
8439 	}
8440 }
8441 
8442 static void scalar_min_max_add(struct bpf_reg_state *dst_reg,
8443 			       struct bpf_reg_state *src_reg)
8444 {
8445 	s64 smin_val = src_reg->smin_value;
8446 	s64 smax_val = src_reg->smax_value;
8447 	u64 umin_val = src_reg->umin_value;
8448 	u64 umax_val = src_reg->umax_value;
8449 
8450 	if (signed_add_overflows(dst_reg->smin_value, smin_val) ||
8451 	    signed_add_overflows(dst_reg->smax_value, smax_val)) {
8452 		dst_reg->smin_value = S64_MIN;
8453 		dst_reg->smax_value = S64_MAX;
8454 	} else {
8455 		dst_reg->smin_value += smin_val;
8456 		dst_reg->smax_value += smax_val;
8457 	}
8458 	if (dst_reg->umin_value + umin_val < umin_val ||
8459 	    dst_reg->umax_value + umax_val < umax_val) {
8460 		dst_reg->umin_value = 0;
8461 		dst_reg->umax_value = U64_MAX;
8462 	} else {
8463 		dst_reg->umin_value += umin_val;
8464 		dst_reg->umax_value += umax_val;
8465 	}
8466 }
8467 
8468 static void scalar32_min_max_sub(struct bpf_reg_state *dst_reg,
8469 				 struct bpf_reg_state *src_reg)
8470 {
8471 	s32 smin_val = src_reg->s32_min_value;
8472 	s32 smax_val = src_reg->s32_max_value;
8473 	u32 umin_val = src_reg->u32_min_value;
8474 	u32 umax_val = src_reg->u32_max_value;
8475 
8476 	if (signed_sub32_overflows(dst_reg->s32_min_value, smax_val) ||
8477 	    signed_sub32_overflows(dst_reg->s32_max_value, smin_val)) {
8478 		/* Overflow possible, we know nothing */
8479 		dst_reg->s32_min_value = S32_MIN;
8480 		dst_reg->s32_max_value = S32_MAX;
8481 	} else {
8482 		dst_reg->s32_min_value -= smax_val;
8483 		dst_reg->s32_max_value -= smin_val;
8484 	}
8485 	if (dst_reg->u32_min_value < umax_val) {
8486 		/* Overflow possible, we know nothing */
8487 		dst_reg->u32_min_value = 0;
8488 		dst_reg->u32_max_value = U32_MAX;
8489 	} else {
8490 		/* Cannot overflow (as long as bounds are consistent) */
8491 		dst_reg->u32_min_value -= umax_val;
8492 		dst_reg->u32_max_value -= umin_val;
8493 	}
8494 }
8495 
8496 static void scalar_min_max_sub(struct bpf_reg_state *dst_reg,
8497 			       struct bpf_reg_state *src_reg)
8498 {
8499 	s64 smin_val = src_reg->smin_value;
8500 	s64 smax_val = src_reg->smax_value;
8501 	u64 umin_val = src_reg->umin_value;
8502 	u64 umax_val = src_reg->umax_value;
8503 
8504 	if (signed_sub_overflows(dst_reg->smin_value, smax_val) ||
8505 	    signed_sub_overflows(dst_reg->smax_value, smin_val)) {
8506 		/* Overflow possible, we know nothing */
8507 		dst_reg->smin_value = S64_MIN;
8508 		dst_reg->smax_value = S64_MAX;
8509 	} else {
8510 		dst_reg->smin_value -= smax_val;
8511 		dst_reg->smax_value -= smin_val;
8512 	}
8513 	if (dst_reg->umin_value < umax_val) {
8514 		/* Overflow possible, we know nothing */
8515 		dst_reg->umin_value = 0;
8516 		dst_reg->umax_value = U64_MAX;
8517 	} else {
8518 		/* Cannot overflow (as long as bounds are consistent) */
8519 		dst_reg->umin_value -= umax_val;
8520 		dst_reg->umax_value -= umin_val;
8521 	}
8522 }
8523 
8524 static void scalar32_min_max_mul(struct bpf_reg_state *dst_reg,
8525 				 struct bpf_reg_state *src_reg)
8526 {
8527 	s32 smin_val = src_reg->s32_min_value;
8528 	u32 umin_val = src_reg->u32_min_value;
8529 	u32 umax_val = src_reg->u32_max_value;
8530 
8531 	if (smin_val < 0 || dst_reg->s32_min_value < 0) {
8532 		/* Ain't nobody got time to multiply that sign */
8533 		__mark_reg32_unbounded(dst_reg);
8534 		return;
8535 	}
8536 	/* Both values are positive, so we can work with unsigned and
8537 	 * copy the result to signed (unless it exceeds S32_MAX).
8538 	 */
8539 	if (umax_val > U16_MAX || dst_reg->u32_max_value > U16_MAX) {
8540 		/* Potential overflow, we know nothing */
8541 		__mark_reg32_unbounded(dst_reg);
8542 		return;
8543 	}
8544 	dst_reg->u32_min_value *= umin_val;
8545 	dst_reg->u32_max_value *= umax_val;
8546 	if (dst_reg->u32_max_value > S32_MAX) {
8547 		/* Overflow possible, we know nothing */
8548 		dst_reg->s32_min_value = S32_MIN;
8549 		dst_reg->s32_max_value = S32_MAX;
8550 	} else {
8551 		dst_reg->s32_min_value = dst_reg->u32_min_value;
8552 		dst_reg->s32_max_value = dst_reg->u32_max_value;
8553 	}
8554 }
8555 
8556 static void scalar_min_max_mul(struct bpf_reg_state *dst_reg,
8557 			       struct bpf_reg_state *src_reg)
8558 {
8559 	s64 smin_val = src_reg->smin_value;
8560 	u64 umin_val = src_reg->umin_value;
8561 	u64 umax_val = src_reg->umax_value;
8562 
8563 	if (smin_val < 0 || dst_reg->smin_value < 0) {
8564 		/* Ain't nobody got time to multiply that sign */
8565 		__mark_reg64_unbounded(dst_reg);
8566 		return;
8567 	}
8568 	/* Both values are positive, so we can work with unsigned and
8569 	 * copy the result to signed (unless it exceeds S64_MAX).
8570 	 */
8571 	if (umax_val > U32_MAX || dst_reg->umax_value > U32_MAX) {
8572 		/* Potential overflow, we know nothing */
8573 		__mark_reg64_unbounded(dst_reg);
8574 		return;
8575 	}
8576 	dst_reg->umin_value *= umin_val;
8577 	dst_reg->umax_value *= umax_val;
8578 	if (dst_reg->umax_value > S64_MAX) {
8579 		/* Overflow possible, we know nothing */
8580 		dst_reg->smin_value = S64_MIN;
8581 		dst_reg->smax_value = S64_MAX;
8582 	} else {
8583 		dst_reg->smin_value = dst_reg->umin_value;
8584 		dst_reg->smax_value = dst_reg->umax_value;
8585 	}
8586 }
8587 
8588 static void scalar32_min_max_and(struct bpf_reg_state *dst_reg,
8589 				 struct bpf_reg_state *src_reg)
8590 {
8591 	bool src_known = tnum_subreg_is_const(src_reg->var_off);
8592 	bool dst_known = tnum_subreg_is_const(dst_reg->var_off);
8593 	struct tnum var32_off = tnum_subreg(dst_reg->var_off);
8594 	s32 smin_val = src_reg->s32_min_value;
8595 	u32 umax_val = src_reg->u32_max_value;
8596 
8597 	if (src_known && dst_known) {
8598 		__mark_reg32_known(dst_reg, var32_off.value);
8599 		return;
8600 	}
8601 
8602 	/* We get our minimum from the var_off, since that's inherently
8603 	 * bitwise.  Our maximum is the minimum of the operands' maxima.
8604 	 */
8605 	dst_reg->u32_min_value = var32_off.value;
8606 	dst_reg->u32_max_value = min(dst_reg->u32_max_value, umax_val);
8607 	if (dst_reg->s32_min_value < 0 || smin_val < 0) {
8608 		/* Lose signed bounds when ANDing negative numbers,
8609 		 * ain't nobody got time for that.
8610 		 */
8611 		dst_reg->s32_min_value = S32_MIN;
8612 		dst_reg->s32_max_value = S32_MAX;
8613 	} else {
8614 		/* ANDing two positives gives a positive, so safe to
8615 		 * cast result into s64.
8616 		 */
8617 		dst_reg->s32_min_value = dst_reg->u32_min_value;
8618 		dst_reg->s32_max_value = dst_reg->u32_max_value;
8619 	}
8620 }
8621 
8622 static void scalar_min_max_and(struct bpf_reg_state *dst_reg,
8623 			       struct bpf_reg_state *src_reg)
8624 {
8625 	bool src_known = tnum_is_const(src_reg->var_off);
8626 	bool dst_known = tnum_is_const(dst_reg->var_off);
8627 	s64 smin_val = src_reg->smin_value;
8628 	u64 umax_val = src_reg->umax_value;
8629 
8630 	if (src_known && dst_known) {
8631 		__mark_reg_known(dst_reg, dst_reg->var_off.value);
8632 		return;
8633 	}
8634 
8635 	/* We get our minimum from the var_off, since that's inherently
8636 	 * bitwise.  Our maximum is the minimum of the operands' maxima.
8637 	 */
8638 	dst_reg->umin_value = dst_reg->var_off.value;
8639 	dst_reg->umax_value = min(dst_reg->umax_value, umax_val);
8640 	if (dst_reg->smin_value < 0 || smin_val < 0) {
8641 		/* Lose signed bounds when ANDing negative numbers,
8642 		 * ain't nobody got time for that.
8643 		 */
8644 		dst_reg->smin_value = S64_MIN;
8645 		dst_reg->smax_value = S64_MAX;
8646 	} else {
8647 		/* ANDing two positives gives a positive, so safe to
8648 		 * cast result into s64.
8649 		 */
8650 		dst_reg->smin_value = dst_reg->umin_value;
8651 		dst_reg->smax_value = dst_reg->umax_value;
8652 	}
8653 	/* We may learn something more from the var_off */
8654 	__update_reg_bounds(dst_reg);
8655 }
8656 
8657 static void scalar32_min_max_or(struct bpf_reg_state *dst_reg,
8658 				struct bpf_reg_state *src_reg)
8659 {
8660 	bool src_known = tnum_subreg_is_const(src_reg->var_off);
8661 	bool dst_known = tnum_subreg_is_const(dst_reg->var_off);
8662 	struct tnum var32_off = tnum_subreg(dst_reg->var_off);
8663 	s32 smin_val = src_reg->s32_min_value;
8664 	u32 umin_val = src_reg->u32_min_value;
8665 
8666 	if (src_known && dst_known) {
8667 		__mark_reg32_known(dst_reg, var32_off.value);
8668 		return;
8669 	}
8670 
8671 	/* We get our maximum from the var_off, and our minimum is the
8672 	 * maximum of the operands' minima
8673 	 */
8674 	dst_reg->u32_min_value = max(dst_reg->u32_min_value, umin_val);
8675 	dst_reg->u32_max_value = var32_off.value | var32_off.mask;
8676 	if (dst_reg->s32_min_value < 0 || smin_val < 0) {
8677 		/* Lose signed bounds when ORing negative numbers,
8678 		 * ain't nobody got time for that.
8679 		 */
8680 		dst_reg->s32_min_value = S32_MIN;
8681 		dst_reg->s32_max_value = S32_MAX;
8682 	} else {
8683 		/* ORing two positives gives a positive, so safe to
8684 		 * cast result into s64.
8685 		 */
8686 		dst_reg->s32_min_value = dst_reg->u32_min_value;
8687 		dst_reg->s32_max_value = dst_reg->u32_max_value;
8688 	}
8689 }
8690 
8691 static void scalar_min_max_or(struct bpf_reg_state *dst_reg,
8692 			      struct bpf_reg_state *src_reg)
8693 {
8694 	bool src_known = tnum_is_const(src_reg->var_off);
8695 	bool dst_known = tnum_is_const(dst_reg->var_off);
8696 	s64 smin_val = src_reg->smin_value;
8697 	u64 umin_val = src_reg->umin_value;
8698 
8699 	if (src_known && dst_known) {
8700 		__mark_reg_known(dst_reg, dst_reg->var_off.value);
8701 		return;
8702 	}
8703 
8704 	/* We get our maximum from the var_off, and our minimum is the
8705 	 * maximum of the operands' minima
8706 	 */
8707 	dst_reg->umin_value = max(dst_reg->umin_value, umin_val);
8708 	dst_reg->umax_value = dst_reg->var_off.value | dst_reg->var_off.mask;
8709 	if (dst_reg->smin_value < 0 || smin_val < 0) {
8710 		/* Lose signed bounds when ORing negative numbers,
8711 		 * ain't nobody got time for that.
8712 		 */
8713 		dst_reg->smin_value = S64_MIN;
8714 		dst_reg->smax_value = S64_MAX;
8715 	} else {
8716 		/* ORing two positives gives a positive, so safe to
8717 		 * cast result into s64.
8718 		 */
8719 		dst_reg->smin_value = dst_reg->umin_value;
8720 		dst_reg->smax_value = dst_reg->umax_value;
8721 	}
8722 	/* We may learn something more from the var_off */
8723 	__update_reg_bounds(dst_reg);
8724 }
8725 
8726 static void scalar32_min_max_xor(struct bpf_reg_state *dst_reg,
8727 				 struct bpf_reg_state *src_reg)
8728 {
8729 	bool src_known = tnum_subreg_is_const(src_reg->var_off);
8730 	bool dst_known = tnum_subreg_is_const(dst_reg->var_off);
8731 	struct tnum var32_off = tnum_subreg(dst_reg->var_off);
8732 	s32 smin_val = src_reg->s32_min_value;
8733 
8734 	if (src_known && dst_known) {
8735 		__mark_reg32_known(dst_reg, var32_off.value);
8736 		return;
8737 	}
8738 
8739 	/* We get both minimum and maximum from the var32_off. */
8740 	dst_reg->u32_min_value = var32_off.value;
8741 	dst_reg->u32_max_value = var32_off.value | var32_off.mask;
8742 
8743 	if (dst_reg->s32_min_value >= 0 && smin_val >= 0) {
8744 		/* XORing two positive sign numbers gives a positive,
8745 		 * so safe to cast u32 result into s32.
8746 		 */
8747 		dst_reg->s32_min_value = dst_reg->u32_min_value;
8748 		dst_reg->s32_max_value = dst_reg->u32_max_value;
8749 	} else {
8750 		dst_reg->s32_min_value = S32_MIN;
8751 		dst_reg->s32_max_value = S32_MAX;
8752 	}
8753 }
8754 
8755 static void scalar_min_max_xor(struct bpf_reg_state *dst_reg,
8756 			       struct bpf_reg_state *src_reg)
8757 {
8758 	bool src_known = tnum_is_const(src_reg->var_off);
8759 	bool dst_known = tnum_is_const(dst_reg->var_off);
8760 	s64 smin_val = src_reg->smin_value;
8761 
8762 	if (src_known && dst_known) {
8763 		/* dst_reg->var_off.value has been updated earlier */
8764 		__mark_reg_known(dst_reg, dst_reg->var_off.value);
8765 		return;
8766 	}
8767 
8768 	/* We get both minimum and maximum from the var_off. */
8769 	dst_reg->umin_value = dst_reg->var_off.value;
8770 	dst_reg->umax_value = dst_reg->var_off.value | dst_reg->var_off.mask;
8771 
8772 	if (dst_reg->smin_value >= 0 && smin_val >= 0) {
8773 		/* XORing two positive sign numbers gives a positive,
8774 		 * so safe to cast u64 result into s64.
8775 		 */
8776 		dst_reg->smin_value = dst_reg->umin_value;
8777 		dst_reg->smax_value = dst_reg->umax_value;
8778 	} else {
8779 		dst_reg->smin_value = S64_MIN;
8780 		dst_reg->smax_value = S64_MAX;
8781 	}
8782 
8783 	__update_reg_bounds(dst_reg);
8784 }
8785 
8786 static void __scalar32_min_max_lsh(struct bpf_reg_state *dst_reg,
8787 				   u64 umin_val, u64 umax_val)
8788 {
8789 	/* We lose all sign bit information (except what we can pick
8790 	 * up from var_off)
8791 	 */
8792 	dst_reg->s32_min_value = S32_MIN;
8793 	dst_reg->s32_max_value = S32_MAX;
8794 	/* If we might shift our top bit out, then we know nothing */
8795 	if (umax_val > 31 || dst_reg->u32_max_value > 1ULL << (31 - umax_val)) {
8796 		dst_reg->u32_min_value = 0;
8797 		dst_reg->u32_max_value = U32_MAX;
8798 	} else {
8799 		dst_reg->u32_min_value <<= umin_val;
8800 		dst_reg->u32_max_value <<= umax_val;
8801 	}
8802 }
8803 
8804 static void scalar32_min_max_lsh(struct bpf_reg_state *dst_reg,
8805 				 struct bpf_reg_state *src_reg)
8806 {
8807 	u32 umax_val = src_reg->u32_max_value;
8808 	u32 umin_val = src_reg->u32_min_value;
8809 	/* u32 alu operation will zext upper bits */
8810 	struct tnum subreg = tnum_subreg(dst_reg->var_off);
8811 
8812 	__scalar32_min_max_lsh(dst_reg, umin_val, umax_val);
8813 	dst_reg->var_off = tnum_subreg(tnum_lshift(subreg, umin_val));
8814 	/* Not required but being careful mark reg64 bounds as unknown so
8815 	 * that we are forced to pick them up from tnum and zext later and
8816 	 * if some path skips this step we are still safe.
8817 	 */
8818 	__mark_reg64_unbounded(dst_reg);
8819 	__update_reg32_bounds(dst_reg);
8820 }
8821 
8822 static void __scalar64_min_max_lsh(struct bpf_reg_state *dst_reg,
8823 				   u64 umin_val, u64 umax_val)
8824 {
8825 	/* Special case <<32 because it is a common compiler pattern to sign
8826 	 * extend subreg by doing <<32 s>>32. In this case if 32bit bounds are
8827 	 * positive we know this shift will also be positive so we can track
8828 	 * bounds correctly. Otherwise we lose all sign bit information except
8829 	 * what we can pick up from var_off. Perhaps we can generalize this
8830 	 * later to shifts of any length.
8831 	 */
8832 	if (umin_val == 32 && umax_val == 32 && dst_reg->s32_max_value >= 0)
8833 		dst_reg->smax_value = (s64)dst_reg->s32_max_value << 32;
8834 	else
8835 		dst_reg->smax_value = S64_MAX;
8836 
8837 	if (umin_val == 32 && umax_val == 32 && dst_reg->s32_min_value >= 0)
8838 		dst_reg->smin_value = (s64)dst_reg->s32_min_value << 32;
8839 	else
8840 		dst_reg->smin_value = S64_MIN;
8841 
8842 	/* If we might shift our top bit out, then we know nothing */
8843 	if (dst_reg->umax_value > 1ULL << (63 - umax_val)) {
8844 		dst_reg->umin_value = 0;
8845 		dst_reg->umax_value = U64_MAX;
8846 	} else {
8847 		dst_reg->umin_value <<= umin_val;
8848 		dst_reg->umax_value <<= umax_val;
8849 	}
8850 }
8851 
8852 static void scalar_min_max_lsh(struct bpf_reg_state *dst_reg,
8853 			       struct bpf_reg_state *src_reg)
8854 {
8855 	u64 umax_val = src_reg->umax_value;
8856 	u64 umin_val = src_reg->umin_value;
8857 
8858 	/* scalar64 calc uses 32bit unshifted bounds so must be called first */
8859 	__scalar64_min_max_lsh(dst_reg, umin_val, umax_val);
8860 	__scalar32_min_max_lsh(dst_reg, umin_val, umax_val);
8861 
8862 	dst_reg->var_off = tnum_lshift(dst_reg->var_off, umin_val);
8863 	/* We may learn something more from the var_off */
8864 	__update_reg_bounds(dst_reg);
8865 }
8866 
8867 static void scalar32_min_max_rsh(struct bpf_reg_state *dst_reg,
8868 				 struct bpf_reg_state *src_reg)
8869 {
8870 	struct tnum subreg = tnum_subreg(dst_reg->var_off);
8871 	u32 umax_val = src_reg->u32_max_value;
8872 	u32 umin_val = src_reg->u32_min_value;
8873 
8874 	/* BPF_RSH is an unsigned shift.  If the value in dst_reg might
8875 	 * be negative, then either:
8876 	 * 1) src_reg might be zero, so the sign bit of the result is
8877 	 *    unknown, so we lose our signed bounds
8878 	 * 2) it's known negative, thus the unsigned bounds capture the
8879 	 *    signed bounds
8880 	 * 3) the signed bounds cross zero, so they tell us nothing
8881 	 *    about the result
8882 	 * If the value in dst_reg is known nonnegative, then again the
8883 	 * unsigned bounds capture the signed bounds.
8884 	 * Thus, in all cases it suffices to blow away our signed bounds
8885 	 * and rely on inferring new ones from the unsigned bounds and
8886 	 * var_off of the result.
8887 	 */
8888 	dst_reg->s32_min_value = S32_MIN;
8889 	dst_reg->s32_max_value = S32_MAX;
8890 
8891 	dst_reg->var_off = tnum_rshift(subreg, umin_val);
8892 	dst_reg->u32_min_value >>= umax_val;
8893 	dst_reg->u32_max_value >>= umin_val;
8894 
8895 	__mark_reg64_unbounded(dst_reg);
8896 	__update_reg32_bounds(dst_reg);
8897 }
8898 
8899 static void scalar_min_max_rsh(struct bpf_reg_state *dst_reg,
8900 			       struct bpf_reg_state *src_reg)
8901 {
8902 	u64 umax_val = src_reg->umax_value;
8903 	u64 umin_val = src_reg->umin_value;
8904 
8905 	/* BPF_RSH is an unsigned shift.  If the value in dst_reg might
8906 	 * be negative, then either:
8907 	 * 1) src_reg might be zero, so the sign bit of the result is
8908 	 *    unknown, so we lose our signed bounds
8909 	 * 2) it's known negative, thus the unsigned bounds capture the
8910 	 *    signed bounds
8911 	 * 3) the signed bounds cross zero, so they tell us nothing
8912 	 *    about the result
8913 	 * If the value in dst_reg is known nonnegative, then again the
8914 	 * unsigned bounds capture the signed bounds.
8915 	 * Thus, in all cases it suffices to blow away our signed bounds
8916 	 * and rely on inferring new ones from the unsigned bounds and
8917 	 * var_off of the result.
8918 	 */
8919 	dst_reg->smin_value = S64_MIN;
8920 	dst_reg->smax_value = S64_MAX;
8921 	dst_reg->var_off = tnum_rshift(dst_reg->var_off, umin_val);
8922 	dst_reg->umin_value >>= umax_val;
8923 	dst_reg->umax_value >>= umin_val;
8924 
8925 	/* Its not easy to operate on alu32 bounds here because it depends
8926 	 * on bits being shifted in. Take easy way out and mark unbounded
8927 	 * so we can recalculate later from tnum.
8928 	 */
8929 	__mark_reg32_unbounded(dst_reg);
8930 	__update_reg_bounds(dst_reg);
8931 }
8932 
8933 static void scalar32_min_max_arsh(struct bpf_reg_state *dst_reg,
8934 				  struct bpf_reg_state *src_reg)
8935 {
8936 	u64 umin_val = src_reg->u32_min_value;
8937 
8938 	/* Upon reaching here, src_known is true and
8939 	 * umax_val is equal to umin_val.
8940 	 */
8941 	dst_reg->s32_min_value = (u32)(((s32)dst_reg->s32_min_value) >> umin_val);
8942 	dst_reg->s32_max_value = (u32)(((s32)dst_reg->s32_max_value) >> umin_val);
8943 
8944 	dst_reg->var_off = tnum_arshift(tnum_subreg(dst_reg->var_off), umin_val, 32);
8945 
8946 	/* blow away the dst_reg umin_value/umax_value and rely on
8947 	 * dst_reg var_off to refine the result.
8948 	 */
8949 	dst_reg->u32_min_value = 0;
8950 	dst_reg->u32_max_value = U32_MAX;
8951 
8952 	__mark_reg64_unbounded(dst_reg);
8953 	__update_reg32_bounds(dst_reg);
8954 }
8955 
8956 static void scalar_min_max_arsh(struct bpf_reg_state *dst_reg,
8957 				struct bpf_reg_state *src_reg)
8958 {
8959 	u64 umin_val = src_reg->umin_value;
8960 
8961 	/* Upon reaching here, src_known is true and umax_val is equal
8962 	 * to umin_val.
8963 	 */
8964 	dst_reg->smin_value >>= umin_val;
8965 	dst_reg->smax_value >>= umin_val;
8966 
8967 	dst_reg->var_off = tnum_arshift(dst_reg->var_off, umin_val, 64);
8968 
8969 	/* blow away the dst_reg umin_value/umax_value and rely on
8970 	 * dst_reg var_off to refine the result.
8971 	 */
8972 	dst_reg->umin_value = 0;
8973 	dst_reg->umax_value = U64_MAX;
8974 
8975 	/* Its not easy to operate on alu32 bounds here because it depends
8976 	 * on bits being shifted in from upper 32-bits. Take easy way out
8977 	 * and mark unbounded so we can recalculate later from tnum.
8978 	 */
8979 	__mark_reg32_unbounded(dst_reg);
8980 	__update_reg_bounds(dst_reg);
8981 }
8982 
8983 /* WARNING: This function does calculations on 64-bit values, but the actual
8984  * execution may occur on 32-bit values. Therefore, things like bitshifts
8985  * need extra checks in the 32-bit case.
8986  */
8987 static int adjust_scalar_min_max_vals(struct bpf_verifier_env *env,
8988 				      struct bpf_insn *insn,
8989 				      struct bpf_reg_state *dst_reg,
8990 				      struct bpf_reg_state src_reg)
8991 {
8992 	struct bpf_reg_state *regs = cur_regs(env);
8993 	u8 opcode = BPF_OP(insn->code);
8994 	bool src_known;
8995 	s64 smin_val, smax_val;
8996 	u64 umin_val, umax_val;
8997 	s32 s32_min_val, s32_max_val;
8998 	u32 u32_min_val, u32_max_val;
8999 	u64 insn_bitness = (BPF_CLASS(insn->code) == BPF_ALU64) ? 64 : 32;
9000 	bool alu32 = (BPF_CLASS(insn->code) != BPF_ALU64);
9001 	int ret;
9002 
9003 	smin_val = src_reg.smin_value;
9004 	smax_val = src_reg.smax_value;
9005 	umin_val = src_reg.umin_value;
9006 	umax_val = src_reg.umax_value;
9007 
9008 	s32_min_val = src_reg.s32_min_value;
9009 	s32_max_val = src_reg.s32_max_value;
9010 	u32_min_val = src_reg.u32_min_value;
9011 	u32_max_val = src_reg.u32_max_value;
9012 
9013 	if (alu32) {
9014 		src_known = tnum_subreg_is_const(src_reg.var_off);
9015 		if ((src_known &&
9016 		     (s32_min_val != s32_max_val || u32_min_val != u32_max_val)) ||
9017 		    s32_min_val > s32_max_val || u32_min_val > u32_max_val) {
9018 			/* Taint dst register if offset had invalid bounds
9019 			 * derived from e.g. dead branches.
9020 			 */
9021 			__mark_reg_unknown(env, dst_reg);
9022 			return 0;
9023 		}
9024 	} else {
9025 		src_known = tnum_is_const(src_reg.var_off);
9026 		if ((src_known &&
9027 		     (smin_val != smax_val || umin_val != umax_val)) ||
9028 		    smin_val > smax_val || umin_val > umax_val) {
9029 			/* Taint dst register if offset had invalid bounds
9030 			 * derived from e.g. dead branches.
9031 			 */
9032 			__mark_reg_unknown(env, dst_reg);
9033 			return 0;
9034 		}
9035 	}
9036 
9037 	if (!src_known &&
9038 	    opcode != BPF_ADD && opcode != BPF_SUB && opcode != BPF_AND) {
9039 		__mark_reg_unknown(env, dst_reg);
9040 		return 0;
9041 	}
9042 
9043 	if (sanitize_needed(opcode)) {
9044 		ret = sanitize_val_alu(env, insn);
9045 		if (ret < 0)
9046 			return sanitize_err(env, insn, ret, NULL, NULL);
9047 	}
9048 
9049 	/* Calculate sign/unsigned bounds and tnum for alu32 and alu64 bit ops.
9050 	 * There are two classes of instructions: The first class we track both
9051 	 * alu32 and alu64 sign/unsigned bounds independently this provides the
9052 	 * greatest amount of precision when alu operations are mixed with jmp32
9053 	 * operations. These operations are BPF_ADD, BPF_SUB, BPF_MUL, BPF_ADD,
9054 	 * and BPF_OR. This is possible because these ops have fairly easy to
9055 	 * understand and calculate behavior in both 32-bit and 64-bit alu ops.
9056 	 * See alu32 verifier tests for examples. The second class of
9057 	 * operations, BPF_LSH, BPF_RSH, and BPF_ARSH, however are not so easy
9058 	 * with regards to tracking sign/unsigned bounds because the bits may
9059 	 * cross subreg boundaries in the alu64 case. When this happens we mark
9060 	 * the reg unbounded in the subreg bound space and use the resulting
9061 	 * tnum to calculate an approximation of the sign/unsigned bounds.
9062 	 */
9063 	switch (opcode) {
9064 	case BPF_ADD:
9065 		scalar32_min_max_add(dst_reg, &src_reg);
9066 		scalar_min_max_add(dst_reg, &src_reg);
9067 		dst_reg->var_off = tnum_add(dst_reg->var_off, src_reg.var_off);
9068 		break;
9069 	case BPF_SUB:
9070 		scalar32_min_max_sub(dst_reg, &src_reg);
9071 		scalar_min_max_sub(dst_reg, &src_reg);
9072 		dst_reg->var_off = tnum_sub(dst_reg->var_off, src_reg.var_off);
9073 		break;
9074 	case BPF_MUL:
9075 		dst_reg->var_off = tnum_mul(dst_reg->var_off, src_reg.var_off);
9076 		scalar32_min_max_mul(dst_reg, &src_reg);
9077 		scalar_min_max_mul(dst_reg, &src_reg);
9078 		break;
9079 	case BPF_AND:
9080 		dst_reg->var_off = tnum_and(dst_reg->var_off, src_reg.var_off);
9081 		scalar32_min_max_and(dst_reg, &src_reg);
9082 		scalar_min_max_and(dst_reg, &src_reg);
9083 		break;
9084 	case BPF_OR:
9085 		dst_reg->var_off = tnum_or(dst_reg->var_off, src_reg.var_off);
9086 		scalar32_min_max_or(dst_reg, &src_reg);
9087 		scalar_min_max_or(dst_reg, &src_reg);
9088 		break;
9089 	case BPF_XOR:
9090 		dst_reg->var_off = tnum_xor(dst_reg->var_off, src_reg.var_off);
9091 		scalar32_min_max_xor(dst_reg, &src_reg);
9092 		scalar_min_max_xor(dst_reg, &src_reg);
9093 		break;
9094 	case BPF_LSH:
9095 		if (umax_val >= insn_bitness) {
9096 			/* Shifts greater than 31 or 63 are undefined.
9097 			 * This includes shifts by a negative number.
9098 			 */
9099 			mark_reg_unknown(env, regs, insn->dst_reg);
9100 			break;
9101 		}
9102 		if (alu32)
9103 			scalar32_min_max_lsh(dst_reg, &src_reg);
9104 		else
9105 			scalar_min_max_lsh(dst_reg, &src_reg);
9106 		break;
9107 	case BPF_RSH:
9108 		if (umax_val >= insn_bitness) {
9109 			/* Shifts greater than 31 or 63 are undefined.
9110 			 * This includes shifts by a negative number.
9111 			 */
9112 			mark_reg_unknown(env, regs, insn->dst_reg);
9113 			break;
9114 		}
9115 		if (alu32)
9116 			scalar32_min_max_rsh(dst_reg, &src_reg);
9117 		else
9118 			scalar_min_max_rsh(dst_reg, &src_reg);
9119 		break;
9120 	case BPF_ARSH:
9121 		if (umax_val >= insn_bitness) {
9122 			/* Shifts greater than 31 or 63 are undefined.
9123 			 * This includes shifts by a negative number.
9124 			 */
9125 			mark_reg_unknown(env, regs, insn->dst_reg);
9126 			break;
9127 		}
9128 		if (alu32)
9129 			scalar32_min_max_arsh(dst_reg, &src_reg);
9130 		else
9131 			scalar_min_max_arsh(dst_reg, &src_reg);
9132 		break;
9133 	default:
9134 		mark_reg_unknown(env, regs, insn->dst_reg);
9135 		break;
9136 	}
9137 
9138 	/* ALU32 ops are zero extended into 64bit register */
9139 	if (alu32)
9140 		zext_32_to_64(dst_reg);
9141 	reg_bounds_sync(dst_reg);
9142 	return 0;
9143 }
9144 
9145 /* Handles ALU ops other than BPF_END, BPF_NEG and BPF_MOV: computes new min/max
9146  * and var_off.
9147  */
9148 static int adjust_reg_min_max_vals(struct bpf_verifier_env *env,
9149 				   struct bpf_insn *insn)
9150 {
9151 	struct bpf_verifier_state *vstate = env->cur_state;
9152 	struct bpf_func_state *state = vstate->frame[vstate->curframe];
9153 	struct bpf_reg_state *regs = state->regs, *dst_reg, *src_reg;
9154 	struct bpf_reg_state *ptr_reg = NULL, off_reg = {0};
9155 	u8 opcode = BPF_OP(insn->code);
9156 	int err;
9157 
9158 	dst_reg = &regs[insn->dst_reg];
9159 	src_reg = NULL;
9160 	if (dst_reg->type != SCALAR_VALUE)
9161 		ptr_reg = dst_reg;
9162 	else
9163 		/* Make sure ID is cleared otherwise dst_reg min/max could be
9164 		 * incorrectly propagated into other registers by find_equal_scalars()
9165 		 */
9166 		dst_reg->id = 0;
9167 	if (BPF_SRC(insn->code) == BPF_X) {
9168 		src_reg = &regs[insn->src_reg];
9169 		if (src_reg->type != SCALAR_VALUE) {
9170 			if (dst_reg->type != SCALAR_VALUE) {
9171 				/* Combining two pointers by any ALU op yields
9172 				 * an arbitrary scalar. Disallow all math except
9173 				 * pointer subtraction
9174 				 */
9175 				if (opcode == BPF_SUB && env->allow_ptr_leaks) {
9176 					mark_reg_unknown(env, regs, insn->dst_reg);
9177 					return 0;
9178 				}
9179 				verbose(env, "R%d pointer %s pointer prohibited\n",
9180 					insn->dst_reg,
9181 					bpf_alu_string[opcode >> 4]);
9182 				return -EACCES;
9183 			} else {
9184 				/* scalar += pointer
9185 				 * This is legal, but we have to reverse our
9186 				 * src/dest handling in computing the range
9187 				 */
9188 				err = mark_chain_precision(env, insn->dst_reg);
9189 				if (err)
9190 					return err;
9191 				return adjust_ptr_min_max_vals(env, insn,
9192 							       src_reg, dst_reg);
9193 			}
9194 		} else if (ptr_reg) {
9195 			/* pointer += scalar */
9196 			err = mark_chain_precision(env, insn->src_reg);
9197 			if (err)
9198 				return err;
9199 			return adjust_ptr_min_max_vals(env, insn,
9200 						       dst_reg, src_reg);
9201 		}
9202 	} else {
9203 		/* Pretend the src is a reg with a known value, since we only
9204 		 * need to be able to read from this state.
9205 		 */
9206 		off_reg.type = SCALAR_VALUE;
9207 		__mark_reg_known(&off_reg, insn->imm);
9208 		src_reg = &off_reg;
9209 		if (ptr_reg) /* pointer += K */
9210 			return adjust_ptr_min_max_vals(env, insn,
9211 						       ptr_reg, src_reg);
9212 	}
9213 
9214 	/* Got here implies adding two SCALAR_VALUEs */
9215 	if (WARN_ON_ONCE(ptr_reg)) {
9216 		print_verifier_state(env, state, true);
9217 		verbose(env, "verifier internal error: unexpected ptr_reg\n");
9218 		return -EINVAL;
9219 	}
9220 	if (WARN_ON(!src_reg)) {
9221 		print_verifier_state(env, state, true);
9222 		verbose(env, "verifier internal error: no src_reg\n");
9223 		return -EINVAL;
9224 	}
9225 	return adjust_scalar_min_max_vals(env, insn, dst_reg, *src_reg);
9226 }
9227 
9228 /* check validity of 32-bit and 64-bit arithmetic operations */
9229 static int check_alu_op(struct bpf_verifier_env *env, struct bpf_insn *insn)
9230 {
9231 	struct bpf_reg_state *regs = cur_regs(env);
9232 	u8 opcode = BPF_OP(insn->code);
9233 	int err;
9234 
9235 	if (opcode == BPF_END || opcode == BPF_NEG) {
9236 		if (opcode == BPF_NEG) {
9237 			if (BPF_SRC(insn->code) != BPF_K ||
9238 			    insn->src_reg != BPF_REG_0 ||
9239 			    insn->off != 0 || insn->imm != 0) {
9240 				verbose(env, "BPF_NEG uses reserved fields\n");
9241 				return -EINVAL;
9242 			}
9243 		} else {
9244 			if (insn->src_reg != BPF_REG_0 || insn->off != 0 ||
9245 			    (insn->imm != 16 && insn->imm != 32 && insn->imm != 64) ||
9246 			    BPF_CLASS(insn->code) == BPF_ALU64) {
9247 				verbose(env, "BPF_END uses reserved fields\n");
9248 				return -EINVAL;
9249 			}
9250 		}
9251 
9252 		/* check src operand */
9253 		err = check_reg_arg(env, insn->dst_reg, SRC_OP);
9254 		if (err)
9255 			return err;
9256 
9257 		if (is_pointer_value(env, insn->dst_reg)) {
9258 			verbose(env, "R%d pointer arithmetic prohibited\n",
9259 				insn->dst_reg);
9260 			return -EACCES;
9261 		}
9262 
9263 		/* check dest operand */
9264 		err = check_reg_arg(env, insn->dst_reg, DST_OP);
9265 		if (err)
9266 			return err;
9267 
9268 	} else if (opcode == BPF_MOV) {
9269 
9270 		if (BPF_SRC(insn->code) == BPF_X) {
9271 			if (insn->imm != 0 || insn->off != 0) {
9272 				verbose(env, "BPF_MOV uses reserved fields\n");
9273 				return -EINVAL;
9274 			}
9275 
9276 			/* check src operand */
9277 			err = check_reg_arg(env, insn->src_reg, SRC_OP);
9278 			if (err)
9279 				return err;
9280 		} else {
9281 			if (insn->src_reg != BPF_REG_0 || insn->off != 0) {
9282 				verbose(env, "BPF_MOV uses reserved fields\n");
9283 				return -EINVAL;
9284 			}
9285 		}
9286 
9287 		/* check dest operand, mark as required later */
9288 		err = check_reg_arg(env, insn->dst_reg, DST_OP_NO_MARK);
9289 		if (err)
9290 			return err;
9291 
9292 		if (BPF_SRC(insn->code) == BPF_X) {
9293 			struct bpf_reg_state *src_reg = regs + insn->src_reg;
9294 			struct bpf_reg_state *dst_reg = regs + insn->dst_reg;
9295 
9296 			if (BPF_CLASS(insn->code) == BPF_ALU64) {
9297 				/* case: R1 = R2
9298 				 * copy register state to dest reg
9299 				 */
9300 				if (src_reg->type == SCALAR_VALUE && !src_reg->id)
9301 					/* Assign src and dst registers the same ID
9302 					 * that will be used by find_equal_scalars()
9303 					 * to propagate min/max range.
9304 					 */
9305 					src_reg->id = ++env->id_gen;
9306 				*dst_reg = *src_reg;
9307 				dst_reg->live |= REG_LIVE_WRITTEN;
9308 				dst_reg->subreg_def = DEF_NOT_SUBREG;
9309 			} else {
9310 				/* R1 = (u32) R2 */
9311 				if (is_pointer_value(env, insn->src_reg)) {
9312 					verbose(env,
9313 						"R%d partial copy of pointer\n",
9314 						insn->src_reg);
9315 					return -EACCES;
9316 				} else if (src_reg->type == SCALAR_VALUE) {
9317 					*dst_reg = *src_reg;
9318 					/* Make sure ID is cleared otherwise
9319 					 * dst_reg min/max could be incorrectly
9320 					 * propagated into src_reg by find_equal_scalars()
9321 					 */
9322 					dst_reg->id = 0;
9323 					dst_reg->live |= REG_LIVE_WRITTEN;
9324 					dst_reg->subreg_def = env->insn_idx + 1;
9325 				} else {
9326 					mark_reg_unknown(env, regs,
9327 							 insn->dst_reg);
9328 				}
9329 				zext_32_to_64(dst_reg);
9330 				reg_bounds_sync(dst_reg);
9331 			}
9332 		} else {
9333 			/* case: R = imm
9334 			 * remember the value we stored into this reg
9335 			 */
9336 			/* clear any state __mark_reg_known doesn't set */
9337 			mark_reg_unknown(env, regs, insn->dst_reg);
9338 			regs[insn->dst_reg].type = SCALAR_VALUE;
9339 			if (BPF_CLASS(insn->code) == BPF_ALU64) {
9340 				__mark_reg_known(regs + insn->dst_reg,
9341 						 insn->imm);
9342 			} else {
9343 				__mark_reg_known(regs + insn->dst_reg,
9344 						 (u32)insn->imm);
9345 			}
9346 		}
9347 
9348 	} else if (opcode > BPF_END) {
9349 		verbose(env, "invalid BPF_ALU opcode %x\n", opcode);
9350 		return -EINVAL;
9351 
9352 	} else {	/* all other ALU ops: and, sub, xor, add, ... */
9353 
9354 		if (BPF_SRC(insn->code) == BPF_X) {
9355 			if (insn->imm != 0 || insn->off != 0) {
9356 				verbose(env, "BPF_ALU uses reserved fields\n");
9357 				return -EINVAL;
9358 			}
9359 			/* check src1 operand */
9360 			err = check_reg_arg(env, insn->src_reg, SRC_OP);
9361 			if (err)
9362 				return err;
9363 		} else {
9364 			if (insn->src_reg != BPF_REG_0 || insn->off != 0) {
9365 				verbose(env, "BPF_ALU uses reserved fields\n");
9366 				return -EINVAL;
9367 			}
9368 		}
9369 
9370 		/* check src2 operand */
9371 		err = check_reg_arg(env, insn->dst_reg, SRC_OP);
9372 		if (err)
9373 			return err;
9374 
9375 		if ((opcode == BPF_MOD || opcode == BPF_DIV) &&
9376 		    BPF_SRC(insn->code) == BPF_K && insn->imm == 0) {
9377 			verbose(env, "div by zero\n");
9378 			return -EINVAL;
9379 		}
9380 
9381 		if ((opcode == BPF_LSH || opcode == BPF_RSH ||
9382 		     opcode == BPF_ARSH) && BPF_SRC(insn->code) == BPF_K) {
9383 			int size = BPF_CLASS(insn->code) == BPF_ALU64 ? 64 : 32;
9384 
9385 			if (insn->imm < 0 || insn->imm >= size) {
9386 				verbose(env, "invalid shift %d\n", insn->imm);
9387 				return -EINVAL;
9388 			}
9389 		}
9390 
9391 		/* check dest operand */
9392 		err = check_reg_arg(env, insn->dst_reg, DST_OP_NO_MARK);
9393 		if (err)
9394 			return err;
9395 
9396 		return adjust_reg_min_max_vals(env, insn);
9397 	}
9398 
9399 	return 0;
9400 }
9401 
9402 static void find_good_pkt_pointers(struct bpf_verifier_state *vstate,
9403 				   struct bpf_reg_state *dst_reg,
9404 				   enum bpf_reg_type type,
9405 				   bool range_right_open)
9406 {
9407 	struct bpf_func_state *state;
9408 	struct bpf_reg_state *reg;
9409 	int new_range;
9410 
9411 	if (dst_reg->off < 0 ||
9412 	    (dst_reg->off == 0 && range_right_open))
9413 		/* This doesn't give us any range */
9414 		return;
9415 
9416 	if (dst_reg->umax_value > MAX_PACKET_OFF ||
9417 	    dst_reg->umax_value + dst_reg->off > MAX_PACKET_OFF)
9418 		/* Risk of overflow.  For instance, ptr + (1<<63) may be less
9419 		 * than pkt_end, but that's because it's also less than pkt.
9420 		 */
9421 		return;
9422 
9423 	new_range = dst_reg->off;
9424 	if (range_right_open)
9425 		new_range++;
9426 
9427 	/* Examples for register markings:
9428 	 *
9429 	 * pkt_data in dst register:
9430 	 *
9431 	 *   r2 = r3;
9432 	 *   r2 += 8;
9433 	 *   if (r2 > pkt_end) goto <handle exception>
9434 	 *   <access okay>
9435 	 *
9436 	 *   r2 = r3;
9437 	 *   r2 += 8;
9438 	 *   if (r2 < pkt_end) goto <access okay>
9439 	 *   <handle exception>
9440 	 *
9441 	 *   Where:
9442 	 *     r2 == dst_reg, pkt_end == src_reg
9443 	 *     r2=pkt(id=n,off=8,r=0)
9444 	 *     r3=pkt(id=n,off=0,r=0)
9445 	 *
9446 	 * pkt_data in src register:
9447 	 *
9448 	 *   r2 = r3;
9449 	 *   r2 += 8;
9450 	 *   if (pkt_end >= r2) goto <access okay>
9451 	 *   <handle exception>
9452 	 *
9453 	 *   r2 = r3;
9454 	 *   r2 += 8;
9455 	 *   if (pkt_end <= r2) goto <handle exception>
9456 	 *   <access okay>
9457 	 *
9458 	 *   Where:
9459 	 *     pkt_end == dst_reg, r2 == src_reg
9460 	 *     r2=pkt(id=n,off=8,r=0)
9461 	 *     r3=pkt(id=n,off=0,r=0)
9462 	 *
9463 	 * Find register r3 and mark its range as r3=pkt(id=n,off=0,r=8)
9464 	 * or r3=pkt(id=n,off=0,r=8-1), so that range of bytes [r3, r3 + 8)
9465 	 * and [r3, r3 + 8-1) respectively is safe to access depending on
9466 	 * the check.
9467 	 */
9468 
9469 	/* If our ids match, then we must have the same max_value.  And we
9470 	 * don't care about the other reg's fixed offset, since if it's too big
9471 	 * the range won't allow anything.
9472 	 * dst_reg->off is known < MAX_PACKET_OFF, therefore it fits in a u16.
9473 	 */
9474 	bpf_for_each_reg_in_vstate(vstate, state, reg, ({
9475 		if (reg->type == type && reg->id == dst_reg->id)
9476 			/* keep the maximum range already checked */
9477 			reg->range = max(reg->range, new_range);
9478 	}));
9479 }
9480 
9481 static int is_branch32_taken(struct bpf_reg_state *reg, u32 val, u8 opcode)
9482 {
9483 	struct tnum subreg = tnum_subreg(reg->var_off);
9484 	s32 sval = (s32)val;
9485 
9486 	switch (opcode) {
9487 	case BPF_JEQ:
9488 		if (tnum_is_const(subreg))
9489 			return !!tnum_equals_const(subreg, val);
9490 		break;
9491 	case BPF_JNE:
9492 		if (tnum_is_const(subreg))
9493 			return !tnum_equals_const(subreg, val);
9494 		break;
9495 	case BPF_JSET:
9496 		if ((~subreg.mask & subreg.value) & val)
9497 			return 1;
9498 		if (!((subreg.mask | subreg.value) & val))
9499 			return 0;
9500 		break;
9501 	case BPF_JGT:
9502 		if (reg->u32_min_value > val)
9503 			return 1;
9504 		else if (reg->u32_max_value <= val)
9505 			return 0;
9506 		break;
9507 	case BPF_JSGT:
9508 		if (reg->s32_min_value > sval)
9509 			return 1;
9510 		else if (reg->s32_max_value <= sval)
9511 			return 0;
9512 		break;
9513 	case BPF_JLT:
9514 		if (reg->u32_max_value < val)
9515 			return 1;
9516 		else if (reg->u32_min_value >= val)
9517 			return 0;
9518 		break;
9519 	case BPF_JSLT:
9520 		if (reg->s32_max_value < sval)
9521 			return 1;
9522 		else if (reg->s32_min_value >= sval)
9523 			return 0;
9524 		break;
9525 	case BPF_JGE:
9526 		if (reg->u32_min_value >= val)
9527 			return 1;
9528 		else if (reg->u32_max_value < val)
9529 			return 0;
9530 		break;
9531 	case BPF_JSGE:
9532 		if (reg->s32_min_value >= sval)
9533 			return 1;
9534 		else if (reg->s32_max_value < sval)
9535 			return 0;
9536 		break;
9537 	case BPF_JLE:
9538 		if (reg->u32_max_value <= val)
9539 			return 1;
9540 		else if (reg->u32_min_value > val)
9541 			return 0;
9542 		break;
9543 	case BPF_JSLE:
9544 		if (reg->s32_max_value <= sval)
9545 			return 1;
9546 		else if (reg->s32_min_value > sval)
9547 			return 0;
9548 		break;
9549 	}
9550 
9551 	return -1;
9552 }
9553 
9554 
9555 static int is_branch64_taken(struct bpf_reg_state *reg, u64 val, u8 opcode)
9556 {
9557 	s64 sval = (s64)val;
9558 
9559 	switch (opcode) {
9560 	case BPF_JEQ:
9561 		if (tnum_is_const(reg->var_off))
9562 			return !!tnum_equals_const(reg->var_off, val);
9563 		break;
9564 	case BPF_JNE:
9565 		if (tnum_is_const(reg->var_off))
9566 			return !tnum_equals_const(reg->var_off, val);
9567 		break;
9568 	case BPF_JSET:
9569 		if ((~reg->var_off.mask & reg->var_off.value) & val)
9570 			return 1;
9571 		if (!((reg->var_off.mask | reg->var_off.value) & val))
9572 			return 0;
9573 		break;
9574 	case BPF_JGT:
9575 		if (reg->umin_value > val)
9576 			return 1;
9577 		else if (reg->umax_value <= val)
9578 			return 0;
9579 		break;
9580 	case BPF_JSGT:
9581 		if (reg->smin_value > sval)
9582 			return 1;
9583 		else if (reg->smax_value <= sval)
9584 			return 0;
9585 		break;
9586 	case BPF_JLT:
9587 		if (reg->umax_value < val)
9588 			return 1;
9589 		else if (reg->umin_value >= val)
9590 			return 0;
9591 		break;
9592 	case BPF_JSLT:
9593 		if (reg->smax_value < sval)
9594 			return 1;
9595 		else if (reg->smin_value >= sval)
9596 			return 0;
9597 		break;
9598 	case BPF_JGE:
9599 		if (reg->umin_value >= val)
9600 			return 1;
9601 		else if (reg->umax_value < val)
9602 			return 0;
9603 		break;
9604 	case BPF_JSGE:
9605 		if (reg->smin_value >= sval)
9606 			return 1;
9607 		else if (reg->smax_value < sval)
9608 			return 0;
9609 		break;
9610 	case BPF_JLE:
9611 		if (reg->umax_value <= val)
9612 			return 1;
9613 		else if (reg->umin_value > val)
9614 			return 0;
9615 		break;
9616 	case BPF_JSLE:
9617 		if (reg->smax_value <= sval)
9618 			return 1;
9619 		else if (reg->smin_value > sval)
9620 			return 0;
9621 		break;
9622 	}
9623 
9624 	return -1;
9625 }
9626 
9627 /* compute branch direction of the expression "if (reg opcode val) goto target;"
9628  * and return:
9629  *  1 - branch will be taken and "goto target" will be executed
9630  *  0 - branch will not be taken and fall-through to next insn
9631  * -1 - unknown. Example: "if (reg < 5)" is unknown when register value
9632  *      range [0,10]
9633  */
9634 static int is_branch_taken(struct bpf_reg_state *reg, u64 val, u8 opcode,
9635 			   bool is_jmp32)
9636 {
9637 	if (__is_pointer_value(false, reg)) {
9638 		if (!reg_type_not_null(reg->type))
9639 			return -1;
9640 
9641 		/* If pointer is valid tests against zero will fail so we can
9642 		 * use this to direct branch taken.
9643 		 */
9644 		if (val != 0)
9645 			return -1;
9646 
9647 		switch (opcode) {
9648 		case BPF_JEQ:
9649 			return 0;
9650 		case BPF_JNE:
9651 			return 1;
9652 		default:
9653 			return -1;
9654 		}
9655 	}
9656 
9657 	if (is_jmp32)
9658 		return is_branch32_taken(reg, val, opcode);
9659 	return is_branch64_taken(reg, val, opcode);
9660 }
9661 
9662 static int flip_opcode(u32 opcode)
9663 {
9664 	/* How can we transform "a <op> b" into "b <op> a"? */
9665 	static const u8 opcode_flip[16] = {
9666 		/* these stay the same */
9667 		[BPF_JEQ  >> 4] = BPF_JEQ,
9668 		[BPF_JNE  >> 4] = BPF_JNE,
9669 		[BPF_JSET >> 4] = BPF_JSET,
9670 		/* these swap "lesser" and "greater" (L and G in the opcodes) */
9671 		[BPF_JGE  >> 4] = BPF_JLE,
9672 		[BPF_JGT  >> 4] = BPF_JLT,
9673 		[BPF_JLE  >> 4] = BPF_JGE,
9674 		[BPF_JLT  >> 4] = BPF_JGT,
9675 		[BPF_JSGE >> 4] = BPF_JSLE,
9676 		[BPF_JSGT >> 4] = BPF_JSLT,
9677 		[BPF_JSLE >> 4] = BPF_JSGE,
9678 		[BPF_JSLT >> 4] = BPF_JSGT
9679 	};
9680 	return opcode_flip[opcode >> 4];
9681 }
9682 
9683 static int is_pkt_ptr_branch_taken(struct bpf_reg_state *dst_reg,
9684 				   struct bpf_reg_state *src_reg,
9685 				   u8 opcode)
9686 {
9687 	struct bpf_reg_state *pkt;
9688 
9689 	if (src_reg->type == PTR_TO_PACKET_END) {
9690 		pkt = dst_reg;
9691 	} else if (dst_reg->type == PTR_TO_PACKET_END) {
9692 		pkt = src_reg;
9693 		opcode = flip_opcode(opcode);
9694 	} else {
9695 		return -1;
9696 	}
9697 
9698 	if (pkt->range >= 0)
9699 		return -1;
9700 
9701 	switch (opcode) {
9702 	case BPF_JLE:
9703 		/* pkt <= pkt_end */
9704 		fallthrough;
9705 	case BPF_JGT:
9706 		/* pkt > pkt_end */
9707 		if (pkt->range == BEYOND_PKT_END)
9708 			/* pkt has at last one extra byte beyond pkt_end */
9709 			return opcode == BPF_JGT;
9710 		break;
9711 	case BPF_JLT:
9712 		/* pkt < pkt_end */
9713 		fallthrough;
9714 	case BPF_JGE:
9715 		/* pkt >= pkt_end */
9716 		if (pkt->range == BEYOND_PKT_END || pkt->range == AT_PKT_END)
9717 			return opcode == BPF_JGE;
9718 		break;
9719 	}
9720 	return -1;
9721 }
9722 
9723 /* Adjusts the register min/max values in the case that the dst_reg is the
9724  * variable register that we are working on, and src_reg is a constant or we're
9725  * simply doing a BPF_K check.
9726  * In JEQ/JNE cases we also adjust the var_off values.
9727  */
9728 static void reg_set_min_max(struct bpf_reg_state *true_reg,
9729 			    struct bpf_reg_state *false_reg,
9730 			    u64 val, u32 val32,
9731 			    u8 opcode, bool is_jmp32)
9732 {
9733 	struct tnum false_32off = tnum_subreg(false_reg->var_off);
9734 	struct tnum false_64off = false_reg->var_off;
9735 	struct tnum true_32off = tnum_subreg(true_reg->var_off);
9736 	struct tnum true_64off = true_reg->var_off;
9737 	s64 sval = (s64)val;
9738 	s32 sval32 = (s32)val32;
9739 
9740 	/* If the dst_reg is a pointer, we can't learn anything about its
9741 	 * variable offset from the compare (unless src_reg were a pointer into
9742 	 * the same object, but we don't bother with that.
9743 	 * Since false_reg and true_reg have the same type by construction, we
9744 	 * only need to check one of them for pointerness.
9745 	 */
9746 	if (__is_pointer_value(false, false_reg))
9747 		return;
9748 
9749 	switch (opcode) {
9750 	/* JEQ/JNE comparison doesn't change the register equivalence.
9751 	 *
9752 	 * r1 = r2;
9753 	 * if (r1 == 42) goto label;
9754 	 * ...
9755 	 * label: // here both r1 and r2 are known to be 42.
9756 	 *
9757 	 * Hence when marking register as known preserve it's ID.
9758 	 */
9759 	case BPF_JEQ:
9760 		if (is_jmp32) {
9761 			__mark_reg32_known(true_reg, val32);
9762 			true_32off = tnum_subreg(true_reg->var_off);
9763 		} else {
9764 			___mark_reg_known(true_reg, val);
9765 			true_64off = true_reg->var_off;
9766 		}
9767 		break;
9768 	case BPF_JNE:
9769 		if (is_jmp32) {
9770 			__mark_reg32_known(false_reg, val32);
9771 			false_32off = tnum_subreg(false_reg->var_off);
9772 		} else {
9773 			___mark_reg_known(false_reg, val);
9774 			false_64off = false_reg->var_off;
9775 		}
9776 		break;
9777 	case BPF_JSET:
9778 		if (is_jmp32) {
9779 			false_32off = tnum_and(false_32off, tnum_const(~val32));
9780 			if (is_power_of_2(val32))
9781 				true_32off = tnum_or(true_32off,
9782 						     tnum_const(val32));
9783 		} else {
9784 			false_64off = tnum_and(false_64off, tnum_const(~val));
9785 			if (is_power_of_2(val))
9786 				true_64off = tnum_or(true_64off,
9787 						     tnum_const(val));
9788 		}
9789 		break;
9790 	case BPF_JGE:
9791 	case BPF_JGT:
9792 	{
9793 		if (is_jmp32) {
9794 			u32 false_umax = opcode == BPF_JGT ? val32  : val32 - 1;
9795 			u32 true_umin = opcode == BPF_JGT ? val32 + 1 : val32;
9796 
9797 			false_reg->u32_max_value = min(false_reg->u32_max_value,
9798 						       false_umax);
9799 			true_reg->u32_min_value = max(true_reg->u32_min_value,
9800 						      true_umin);
9801 		} else {
9802 			u64 false_umax = opcode == BPF_JGT ? val    : val - 1;
9803 			u64 true_umin = opcode == BPF_JGT ? val + 1 : val;
9804 
9805 			false_reg->umax_value = min(false_reg->umax_value, false_umax);
9806 			true_reg->umin_value = max(true_reg->umin_value, true_umin);
9807 		}
9808 		break;
9809 	}
9810 	case BPF_JSGE:
9811 	case BPF_JSGT:
9812 	{
9813 		if (is_jmp32) {
9814 			s32 false_smax = opcode == BPF_JSGT ? sval32    : sval32 - 1;
9815 			s32 true_smin = opcode == BPF_JSGT ? sval32 + 1 : sval32;
9816 
9817 			false_reg->s32_max_value = min(false_reg->s32_max_value, false_smax);
9818 			true_reg->s32_min_value = max(true_reg->s32_min_value, true_smin);
9819 		} else {
9820 			s64 false_smax = opcode == BPF_JSGT ? sval    : sval - 1;
9821 			s64 true_smin = opcode == BPF_JSGT ? sval + 1 : sval;
9822 
9823 			false_reg->smax_value = min(false_reg->smax_value, false_smax);
9824 			true_reg->smin_value = max(true_reg->smin_value, true_smin);
9825 		}
9826 		break;
9827 	}
9828 	case BPF_JLE:
9829 	case BPF_JLT:
9830 	{
9831 		if (is_jmp32) {
9832 			u32 false_umin = opcode == BPF_JLT ? val32  : val32 + 1;
9833 			u32 true_umax = opcode == BPF_JLT ? val32 - 1 : val32;
9834 
9835 			false_reg->u32_min_value = max(false_reg->u32_min_value,
9836 						       false_umin);
9837 			true_reg->u32_max_value = min(true_reg->u32_max_value,
9838 						      true_umax);
9839 		} else {
9840 			u64 false_umin = opcode == BPF_JLT ? val    : val + 1;
9841 			u64 true_umax = opcode == BPF_JLT ? val - 1 : val;
9842 
9843 			false_reg->umin_value = max(false_reg->umin_value, false_umin);
9844 			true_reg->umax_value = min(true_reg->umax_value, true_umax);
9845 		}
9846 		break;
9847 	}
9848 	case BPF_JSLE:
9849 	case BPF_JSLT:
9850 	{
9851 		if (is_jmp32) {
9852 			s32 false_smin = opcode == BPF_JSLT ? sval32    : sval32 + 1;
9853 			s32 true_smax = opcode == BPF_JSLT ? sval32 - 1 : sval32;
9854 
9855 			false_reg->s32_min_value = max(false_reg->s32_min_value, false_smin);
9856 			true_reg->s32_max_value = min(true_reg->s32_max_value, true_smax);
9857 		} else {
9858 			s64 false_smin = opcode == BPF_JSLT ? sval    : sval + 1;
9859 			s64 true_smax = opcode == BPF_JSLT ? sval - 1 : sval;
9860 
9861 			false_reg->smin_value = max(false_reg->smin_value, false_smin);
9862 			true_reg->smax_value = min(true_reg->smax_value, true_smax);
9863 		}
9864 		break;
9865 	}
9866 	default:
9867 		return;
9868 	}
9869 
9870 	if (is_jmp32) {
9871 		false_reg->var_off = tnum_or(tnum_clear_subreg(false_64off),
9872 					     tnum_subreg(false_32off));
9873 		true_reg->var_off = tnum_or(tnum_clear_subreg(true_64off),
9874 					    tnum_subreg(true_32off));
9875 		__reg_combine_32_into_64(false_reg);
9876 		__reg_combine_32_into_64(true_reg);
9877 	} else {
9878 		false_reg->var_off = false_64off;
9879 		true_reg->var_off = true_64off;
9880 		__reg_combine_64_into_32(false_reg);
9881 		__reg_combine_64_into_32(true_reg);
9882 	}
9883 }
9884 
9885 /* Same as above, but for the case that dst_reg holds a constant and src_reg is
9886  * the variable reg.
9887  */
9888 static void reg_set_min_max_inv(struct bpf_reg_state *true_reg,
9889 				struct bpf_reg_state *false_reg,
9890 				u64 val, u32 val32,
9891 				u8 opcode, bool is_jmp32)
9892 {
9893 	opcode = flip_opcode(opcode);
9894 	/* This uses zero as "not present in table"; luckily the zero opcode,
9895 	 * BPF_JA, can't get here.
9896 	 */
9897 	if (opcode)
9898 		reg_set_min_max(true_reg, false_reg, val, val32, opcode, is_jmp32);
9899 }
9900 
9901 /* Regs are known to be equal, so intersect their min/max/var_off */
9902 static void __reg_combine_min_max(struct bpf_reg_state *src_reg,
9903 				  struct bpf_reg_state *dst_reg)
9904 {
9905 	src_reg->umin_value = dst_reg->umin_value = max(src_reg->umin_value,
9906 							dst_reg->umin_value);
9907 	src_reg->umax_value = dst_reg->umax_value = min(src_reg->umax_value,
9908 							dst_reg->umax_value);
9909 	src_reg->smin_value = dst_reg->smin_value = max(src_reg->smin_value,
9910 							dst_reg->smin_value);
9911 	src_reg->smax_value = dst_reg->smax_value = min(src_reg->smax_value,
9912 							dst_reg->smax_value);
9913 	src_reg->var_off = dst_reg->var_off = tnum_intersect(src_reg->var_off,
9914 							     dst_reg->var_off);
9915 	reg_bounds_sync(src_reg);
9916 	reg_bounds_sync(dst_reg);
9917 }
9918 
9919 static void reg_combine_min_max(struct bpf_reg_state *true_src,
9920 				struct bpf_reg_state *true_dst,
9921 				struct bpf_reg_state *false_src,
9922 				struct bpf_reg_state *false_dst,
9923 				u8 opcode)
9924 {
9925 	switch (opcode) {
9926 	case BPF_JEQ:
9927 		__reg_combine_min_max(true_src, true_dst);
9928 		break;
9929 	case BPF_JNE:
9930 		__reg_combine_min_max(false_src, false_dst);
9931 		break;
9932 	}
9933 }
9934 
9935 static void mark_ptr_or_null_reg(struct bpf_func_state *state,
9936 				 struct bpf_reg_state *reg, u32 id,
9937 				 bool is_null)
9938 {
9939 	if (type_may_be_null(reg->type) && reg->id == id &&
9940 	    !WARN_ON_ONCE(!reg->id)) {
9941 		if (WARN_ON_ONCE(reg->smin_value || reg->smax_value ||
9942 				 !tnum_equals_const(reg->var_off, 0) ||
9943 				 reg->off)) {
9944 			/* Old offset (both fixed and variable parts) should
9945 			 * have been known-zero, because we don't allow pointer
9946 			 * arithmetic on pointers that might be NULL. If we
9947 			 * see this happening, don't convert the register.
9948 			 */
9949 			return;
9950 		}
9951 		if (is_null) {
9952 			reg->type = SCALAR_VALUE;
9953 			/* We don't need id and ref_obj_id from this point
9954 			 * onwards anymore, thus we should better reset it,
9955 			 * so that state pruning has chances to take effect.
9956 			 */
9957 			reg->id = 0;
9958 			reg->ref_obj_id = 0;
9959 
9960 			return;
9961 		}
9962 
9963 		mark_ptr_not_null_reg(reg);
9964 
9965 		if (!reg_may_point_to_spin_lock(reg)) {
9966 			/* For not-NULL ptr, reg->ref_obj_id will be reset
9967 			 * in release_reference().
9968 			 *
9969 			 * reg->id is still used by spin_lock ptr. Other
9970 			 * than spin_lock ptr type, reg->id can be reset.
9971 			 */
9972 			reg->id = 0;
9973 		}
9974 	}
9975 }
9976 
9977 /* The logic is similar to find_good_pkt_pointers(), both could eventually
9978  * be folded together at some point.
9979  */
9980 static void mark_ptr_or_null_regs(struct bpf_verifier_state *vstate, u32 regno,
9981 				  bool is_null)
9982 {
9983 	struct bpf_func_state *state = vstate->frame[vstate->curframe];
9984 	struct bpf_reg_state *regs = state->regs, *reg;
9985 	u32 ref_obj_id = regs[regno].ref_obj_id;
9986 	u32 id = regs[regno].id;
9987 
9988 	if (ref_obj_id && ref_obj_id == id && is_null)
9989 		/* regs[regno] is in the " == NULL" branch.
9990 		 * No one could have freed the reference state before
9991 		 * doing the NULL check.
9992 		 */
9993 		WARN_ON_ONCE(release_reference_state(state, id));
9994 
9995 	bpf_for_each_reg_in_vstate(vstate, state, reg, ({
9996 		mark_ptr_or_null_reg(state, reg, id, is_null);
9997 	}));
9998 }
9999 
10000 static bool try_match_pkt_pointers(const struct bpf_insn *insn,
10001 				   struct bpf_reg_state *dst_reg,
10002 				   struct bpf_reg_state *src_reg,
10003 				   struct bpf_verifier_state *this_branch,
10004 				   struct bpf_verifier_state *other_branch)
10005 {
10006 	if (BPF_SRC(insn->code) != BPF_X)
10007 		return false;
10008 
10009 	/* Pointers are always 64-bit. */
10010 	if (BPF_CLASS(insn->code) == BPF_JMP32)
10011 		return false;
10012 
10013 	switch (BPF_OP(insn->code)) {
10014 	case BPF_JGT:
10015 		if ((dst_reg->type == PTR_TO_PACKET &&
10016 		     src_reg->type == PTR_TO_PACKET_END) ||
10017 		    (dst_reg->type == PTR_TO_PACKET_META &&
10018 		     reg_is_init_pkt_pointer(src_reg, PTR_TO_PACKET))) {
10019 			/* pkt_data' > pkt_end, pkt_meta' > pkt_data */
10020 			find_good_pkt_pointers(this_branch, dst_reg,
10021 					       dst_reg->type, false);
10022 			mark_pkt_end(other_branch, insn->dst_reg, true);
10023 		} else if ((dst_reg->type == PTR_TO_PACKET_END &&
10024 			    src_reg->type == PTR_TO_PACKET) ||
10025 			   (reg_is_init_pkt_pointer(dst_reg, PTR_TO_PACKET) &&
10026 			    src_reg->type == PTR_TO_PACKET_META)) {
10027 			/* pkt_end > pkt_data', pkt_data > pkt_meta' */
10028 			find_good_pkt_pointers(other_branch, src_reg,
10029 					       src_reg->type, true);
10030 			mark_pkt_end(this_branch, insn->src_reg, false);
10031 		} else {
10032 			return false;
10033 		}
10034 		break;
10035 	case BPF_JLT:
10036 		if ((dst_reg->type == PTR_TO_PACKET &&
10037 		     src_reg->type == PTR_TO_PACKET_END) ||
10038 		    (dst_reg->type == PTR_TO_PACKET_META &&
10039 		     reg_is_init_pkt_pointer(src_reg, PTR_TO_PACKET))) {
10040 			/* pkt_data' < pkt_end, pkt_meta' < pkt_data */
10041 			find_good_pkt_pointers(other_branch, dst_reg,
10042 					       dst_reg->type, true);
10043 			mark_pkt_end(this_branch, insn->dst_reg, false);
10044 		} else if ((dst_reg->type == PTR_TO_PACKET_END &&
10045 			    src_reg->type == PTR_TO_PACKET) ||
10046 			   (reg_is_init_pkt_pointer(dst_reg, PTR_TO_PACKET) &&
10047 			    src_reg->type == PTR_TO_PACKET_META)) {
10048 			/* pkt_end < pkt_data', pkt_data > pkt_meta' */
10049 			find_good_pkt_pointers(this_branch, src_reg,
10050 					       src_reg->type, false);
10051 			mark_pkt_end(other_branch, insn->src_reg, true);
10052 		} else {
10053 			return false;
10054 		}
10055 		break;
10056 	case BPF_JGE:
10057 		if ((dst_reg->type == PTR_TO_PACKET &&
10058 		     src_reg->type == PTR_TO_PACKET_END) ||
10059 		    (dst_reg->type == PTR_TO_PACKET_META &&
10060 		     reg_is_init_pkt_pointer(src_reg, PTR_TO_PACKET))) {
10061 			/* pkt_data' >= pkt_end, pkt_meta' >= pkt_data */
10062 			find_good_pkt_pointers(this_branch, dst_reg,
10063 					       dst_reg->type, true);
10064 			mark_pkt_end(other_branch, insn->dst_reg, false);
10065 		} else if ((dst_reg->type == PTR_TO_PACKET_END &&
10066 			    src_reg->type == PTR_TO_PACKET) ||
10067 			   (reg_is_init_pkt_pointer(dst_reg, PTR_TO_PACKET) &&
10068 			    src_reg->type == PTR_TO_PACKET_META)) {
10069 			/* pkt_end >= pkt_data', pkt_data >= pkt_meta' */
10070 			find_good_pkt_pointers(other_branch, src_reg,
10071 					       src_reg->type, false);
10072 			mark_pkt_end(this_branch, insn->src_reg, true);
10073 		} else {
10074 			return false;
10075 		}
10076 		break;
10077 	case BPF_JLE:
10078 		if ((dst_reg->type == PTR_TO_PACKET &&
10079 		     src_reg->type == PTR_TO_PACKET_END) ||
10080 		    (dst_reg->type == PTR_TO_PACKET_META &&
10081 		     reg_is_init_pkt_pointer(src_reg, PTR_TO_PACKET))) {
10082 			/* pkt_data' <= pkt_end, pkt_meta' <= pkt_data */
10083 			find_good_pkt_pointers(other_branch, dst_reg,
10084 					       dst_reg->type, false);
10085 			mark_pkt_end(this_branch, insn->dst_reg, true);
10086 		} else if ((dst_reg->type == PTR_TO_PACKET_END &&
10087 			    src_reg->type == PTR_TO_PACKET) ||
10088 			   (reg_is_init_pkt_pointer(dst_reg, PTR_TO_PACKET) &&
10089 			    src_reg->type == PTR_TO_PACKET_META)) {
10090 			/* pkt_end <= pkt_data', pkt_data <= pkt_meta' */
10091 			find_good_pkt_pointers(this_branch, src_reg,
10092 					       src_reg->type, true);
10093 			mark_pkt_end(other_branch, insn->src_reg, false);
10094 		} else {
10095 			return false;
10096 		}
10097 		break;
10098 	default:
10099 		return false;
10100 	}
10101 
10102 	return true;
10103 }
10104 
10105 static void find_equal_scalars(struct bpf_verifier_state *vstate,
10106 			       struct bpf_reg_state *known_reg)
10107 {
10108 	struct bpf_func_state *state;
10109 	struct bpf_reg_state *reg;
10110 
10111 	bpf_for_each_reg_in_vstate(vstate, state, reg, ({
10112 		if (reg->type == SCALAR_VALUE && reg->id == known_reg->id)
10113 			*reg = *known_reg;
10114 	}));
10115 }
10116 
10117 static int check_cond_jmp_op(struct bpf_verifier_env *env,
10118 			     struct bpf_insn *insn, int *insn_idx)
10119 {
10120 	struct bpf_verifier_state *this_branch = env->cur_state;
10121 	struct bpf_verifier_state *other_branch;
10122 	struct bpf_reg_state *regs = this_branch->frame[this_branch->curframe]->regs;
10123 	struct bpf_reg_state *dst_reg, *other_branch_regs, *src_reg = NULL;
10124 	u8 opcode = BPF_OP(insn->code);
10125 	bool is_jmp32;
10126 	int pred = -1;
10127 	int err;
10128 
10129 	/* Only conditional jumps are expected to reach here. */
10130 	if (opcode == BPF_JA || opcode > BPF_JSLE) {
10131 		verbose(env, "invalid BPF_JMP/JMP32 opcode %x\n", opcode);
10132 		return -EINVAL;
10133 	}
10134 
10135 	if (BPF_SRC(insn->code) == BPF_X) {
10136 		if (insn->imm != 0) {
10137 			verbose(env, "BPF_JMP/JMP32 uses reserved fields\n");
10138 			return -EINVAL;
10139 		}
10140 
10141 		/* check src1 operand */
10142 		err = check_reg_arg(env, insn->src_reg, SRC_OP);
10143 		if (err)
10144 			return err;
10145 
10146 		if (is_pointer_value(env, insn->src_reg)) {
10147 			verbose(env, "R%d pointer comparison prohibited\n",
10148 				insn->src_reg);
10149 			return -EACCES;
10150 		}
10151 		src_reg = &regs[insn->src_reg];
10152 	} else {
10153 		if (insn->src_reg != BPF_REG_0) {
10154 			verbose(env, "BPF_JMP/JMP32 uses reserved fields\n");
10155 			return -EINVAL;
10156 		}
10157 	}
10158 
10159 	/* check src2 operand */
10160 	err = check_reg_arg(env, insn->dst_reg, SRC_OP);
10161 	if (err)
10162 		return err;
10163 
10164 	dst_reg = &regs[insn->dst_reg];
10165 	is_jmp32 = BPF_CLASS(insn->code) == BPF_JMP32;
10166 
10167 	if (BPF_SRC(insn->code) == BPF_K) {
10168 		pred = is_branch_taken(dst_reg, insn->imm, opcode, is_jmp32);
10169 	} else if (src_reg->type == SCALAR_VALUE &&
10170 		   is_jmp32 && tnum_is_const(tnum_subreg(src_reg->var_off))) {
10171 		pred = is_branch_taken(dst_reg,
10172 				       tnum_subreg(src_reg->var_off).value,
10173 				       opcode,
10174 				       is_jmp32);
10175 	} else if (src_reg->type == SCALAR_VALUE &&
10176 		   !is_jmp32 && tnum_is_const(src_reg->var_off)) {
10177 		pred = is_branch_taken(dst_reg,
10178 				       src_reg->var_off.value,
10179 				       opcode,
10180 				       is_jmp32);
10181 	} else if (reg_is_pkt_pointer_any(dst_reg) &&
10182 		   reg_is_pkt_pointer_any(src_reg) &&
10183 		   !is_jmp32) {
10184 		pred = is_pkt_ptr_branch_taken(dst_reg, src_reg, opcode);
10185 	}
10186 
10187 	if (pred >= 0) {
10188 		/* If we get here with a dst_reg pointer type it is because
10189 		 * above is_branch_taken() special cased the 0 comparison.
10190 		 */
10191 		if (!__is_pointer_value(false, dst_reg))
10192 			err = mark_chain_precision(env, insn->dst_reg);
10193 		if (BPF_SRC(insn->code) == BPF_X && !err &&
10194 		    !__is_pointer_value(false, src_reg))
10195 			err = mark_chain_precision(env, insn->src_reg);
10196 		if (err)
10197 			return err;
10198 	}
10199 
10200 	if (pred == 1) {
10201 		/* Only follow the goto, ignore fall-through. If needed, push
10202 		 * the fall-through branch for simulation under speculative
10203 		 * execution.
10204 		 */
10205 		if (!env->bypass_spec_v1 &&
10206 		    !sanitize_speculative_path(env, insn, *insn_idx + 1,
10207 					       *insn_idx))
10208 			return -EFAULT;
10209 		*insn_idx += insn->off;
10210 		return 0;
10211 	} else if (pred == 0) {
10212 		/* Only follow the fall-through branch, since that's where the
10213 		 * program will go. If needed, push the goto branch for
10214 		 * simulation under speculative execution.
10215 		 */
10216 		if (!env->bypass_spec_v1 &&
10217 		    !sanitize_speculative_path(env, insn,
10218 					       *insn_idx + insn->off + 1,
10219 					       *insn_idx))
10220 			return -EFAULT;
10221 		return 0;
10222 	}
10223 
10224 	other_branch = push_stack(env, *insn_idx + insn->off + 1, *insn_idx,
10225 				  false);
10226 	if (!other_branch)
10227 		return -EFAULT;
10228 	other_branch_regs = other_branch->frame[other_branch->curframe]->regs;
10229 
10230 	/* detect if we are comparing against a constant value so we can adjust
10231 	 * our min/max values for our dst register.
10232 	 * this is only legit if both are scalars (or pointers to the same
10233 	 * object, I suppose, but we don't support that right now), because
10234 	 * otherwise the different base pointers mean the offsets aren't
10235 	 * comparable.
10236 	 */
10237 	if (BPF_SRC(insn->code) == BPF_X) {
10238 		struct bpf_reg_state *src_reg = &regs[insn->src_reg];
10239 
10240 		if (dst_reg->type == SCALAR_VALUE &&
10241 		    src_reg->type == SCALAR_VALUE) {
10242 			if (tnum_is_const(src_reg->var_off) ||
10243 			    (is_jmp32 &&
10244 			     tnum_is_const(tnum_subreg(src_reg->var_off))))
10245 				reg_set_min_max(&other_branch_regs[insn->dst_reg],
10246 						dst_reg,
10247 						src_reg->var_off.value,
10248 						tnum_subreg(src_reg->var_off).value,
10249 						opcode, is_jmp32);
10250 			else if (tnum_is_const(dst_reg->var_off) ||
10251 				 (is_jmp32 &&
10252 				  tnum_is_const(tnum_subreg(dst_reg->var_off))))
10253 				reg_set_min_max_inv(&other_branch_regs[insn->src_reg],
10254 						    src_reg,
10255 						    dst_reg->var_off.value,
10256 						    tnum_subreg(dst_reg->var_off).value,
10257 						    opcode, is_jmp32);
10258 			else if (!is_jmp32 &&
10259 				 (opcode == BPF_JEQ || opcode == BPF_JNE))
10260 				/* Comparing for equality, we can combine knowledge */
10261 				reg_combine_min_max(&other_branch_regs[insn->src_reg],
10262 						    &other_branch_regs[insn->dst_reg],
10263 						    src_reg, dst_reg, opcode);
10264 			if (src_reg->id &&
10265 			    !WARN_ON_ONCE(src_reg->id != other_branch_regs[insn->src_reg].id)) {
10266 				find_equal_scalars(this_branch, src_reg);
10267 				find_equal_scalars(other_branch, &other_branch_regs[insn->src_reg]);
10268 			}
10269 
10270 		}
10271 	} else if (dst_reg->type == SCALAR_VALUE) {
10272 		reg_set_min_max(&other_branch_regs[insn->dst_reg],
10273 					dst_reg, insn->imm, (u32)insn->imm,
10274 					opcode, is_jmp32);
10275 	}
10276 
10277 	if (dst_reg->type == SCALAR_VALUE && dst_reg->id &&
10278 	    !WARN_ON_ONCE(dst_reg->id != other_branch_regs[insn->dst_reg].id)) {
10279 		find_equal_scalars(this_branch, dst_reg);
10280 		find_equal_scalars(other_branch, &other_branch_regs[insn->dst_reg]);
10281 	}
10282 
10283 	/* detect if R == 0 where R is returned from bpf_map_lookup_elem().
10284 	 * NOTE: these optimizations below are related with pointer comparison
10285 	 *       which will never be JMP32.
10286 	 */
10287 	if (!is_jmp32 && BPF_SRC(insn->code) == BPF_K &&
10288 	    insn->imm == 0 && (opcode == BPF_JEQ || opcode == BPF_JNE) &&
10289 	    type_may_be_null(dst_reg->type)) {
10290 		/* Mark all identical registers in each branch as either
10291 		 * safe or unknown depending R == 0 or R != 0 conditional.
10292 		 */
10293 		mark_ptr_or_null_regs(this_branch, insn->dst_reg,
10294 				      opcode == BPF_JNE);
10295 		mark_ptr_or_null_regs(other_branch, insn->dst_reg,
10296 				      opcode == BPF_JEQ);
10297 	} else if (!try_match_pkt_pointers(insn, dst_reg, &regs[insn->src_reg],
10298 					   this_branch, other_branch) &&
10299 		   is_pointer_value(env, insn->dst_reg)) {
10300 		verbose(env, "R%d pointer comparison prohibited\n",
10301 			insn->dst_reg);
10302 		return -EACCES;
10303 	}
10304 	if (env->log.level & BPF_LOG_LEVEL)
10305 		print_insn_state(env, this_branch->frame[this_branch->curframe]);
10306 	return 0;
10307 }
10308 
10309 /* verify BPF_LD_IMM64 instruction */
10310 static int check_ld_imm(struct bpf_verifier_env *env, struct bpf_insn *insn)
10311 {
10312 	struct bpf_insn_aux_data *aux = cur_aux(env);
10313 	struct bpf_reg_state *regs = cur_regs(env);
10314 	struct bpf_reg_state *dst_reg;
10315 	struct bpf_map *map;
10316 	int err;
10317 
10318 	if (BPF_SIZE(insn->code) != BPF_DW) {
10319 		verbose(env, "invalid BPF_LD_IMM insn\n");
10320 		return -EINVAL;
10321 	}
10322 	if (insn->off != 0) {
10323 		verbose(env, "BPF_LD_IMM64 uses reserved fields\n");
10324 		return -EINVAL;
10325 	}
10326 
10327 	err = check_reg_arg(env, insn->dst_reg, DST_OP);
10328 	if (err)
10329 		return err;
10330 
10331 	dst_reg = &regs[insn->dst_reg];
10332 	if (insn->src_reg == 0) {
10333 		u64 imm = ((u64)(insn + 1)->imm << 32) | (u32)insn->imm;
10334 
10335 		dst_reg->type = SCALAR_VALUE;
10336 		__mark_reg_known(&regs[insn->dst_reg], imm);
10337 		return 0;
10338 	}
10339 
10340 	/* All special src_reg cases are listed below. From this point onwards
10341 	 * we either succeed and assign a corresponding dst_reg->type after
10342 	 * zeroing the offset, or fail and reject the program.
10343 	 */
10344 	mark_reg_known_zero(env, regs, insn->dst_reg);
10345 
10346 	if (insn->src_reg == BPF_PSEUDO_BTF_ID) {
10347 		dst_reg->type = aux->btf_var.reg_type;
10348 		switch (base_type(dst_reg->type)) {
10349 		case PTR_TO_MEM:
10350 			dst_reg->mem_size = aux->btf_var.mem_size;
10351 			break;
10352 		case PTR_TO_BTF_ID:
10353 			dst_reg->btf = aux->btf_var.btf;
10354 			dst_reg->btf_id = aux->btf_var.btf_id;
10355 			break;
10356 		default:
10357 			verbose(env, "bpf verifier is misconfigured\n");
10358 			return -EFAULT;
10359 		}
10360 		return 0;
10361 	}
10362 
10363 	if (insn->src_reg == BPF_PSEUDO_FUNC) {
10364 		struct bpf_prog_aux *aux = env->prog->aux;
10365 		u32 subprogno = find_subprog(env,
10366 					     env->insn_idx + insn->imm + 1);
10367 
10368 		if (!aux->func_info) {
10369 			verbose(env, "missing btf func_info\n");
10370 			return -EINVAL;
10371 		}
10372 		if (aux->func_info_aux[subprogno].linkage != BTF_FUNC_STATIC) {
10373 			verbose(env, "callback function not static\n");
10374 			return -EINVAL;
10375 		}
10376 
10377 		dst_reg->type = PTR_TO_FUNC;
10378 		dst_reg->subprogno = subprogno;
10379 		return 0;
10380 	}
10381 
10382 	map = env->used_maps[aux->map_index];
10383 	dst_reg->map_ptr = map;
10384 
10385 	if (insn->src_reg == BPF_PSEUDO_MAP_VALUE ||
10386 	    insn->src_reg == BPF_PSEUDO_MAP_IDX_VALUE) {
10387 		dst_reg->type = PTR_TO_MAP_VALUE;
10388 		dst_reg->off = aux->map_off;
10389 		if (map_value_has_spin_lock(map))
10390 			dst_reg->id = ++env->id_gen;
10391 	} else if (insn->src_reg == BPF_PSEUDO_MAP_FD ||
10392 		   insn->src_reg == BPF_PSEUDO_MAP_IDX) {
10393 		dst_reg->type = CONST_PTR_TO_MAP;
10394 	} else {
10395 		verbose(env, "bpf verifier is misconfigured\n");
10396 		return -EINVAL;
10397 	}
10398 
10399 	return 0;
10400 }
10401 
10402 static bool may_access_skb(enum bpf_prog_type type)
10403 {
10404 	switch (type) {
10405 	case BPF_PROG_TYPE_SOCKET_FILTER:
10406 	case BPF_PROG_TYPE_SCHED_CLS:
10407 	case BPF_PROG_TYPE_SCHED_ACT:
10408 		return true;
10409 	default:
10410 		return false;
10411 	}
10412 }
10413 
10414 /* verify safety of LD_ABS|LD_IND instructions:
10415  * - they can only appear in the programs where ctx == skb
10416  * - since they are wrappers of function calls, they scratch R1-R5 registers,
10417  *   preserve R6-R9, and store return value into R0
10418  *
10419  * Implicit input:
10420  *   ctx == skb == R6 == CTX
10421  *
10422  * Explicit input:
10423  *   SRC == any register
10424  *   IMM == 32-bit immediate
10425  *
10426  * Output:
10427  *   R0 - 8/16/32-bit skb data converted to cpu endianness
10428  */
10429 static int check_ld_abs(struct bpf_verifier_env *env, struct bpf_insn *insn)
10430 {
10431 	struct bpf_reg_state *regs = cur_regs(env);
10432 	static const int ctx_reg = BPF_REG_6;
10433 	u8 mode = BPF_MODE(insn->code);
10434 	int i, err;
10435 
10436 	if (!may_access_skb(resolve_prog_type(env->prog))) {
10437 		verbose(env, "BPF_LD_[ABS|IND] instructions not allowed for this program type\n");
10438 		return -EINVAL;
10439 	}
10440 
10441 	if (!env->ops->gen_ld_abs) {
10442 		verbose(env, "bpf verifier is misconfigured\n");
10443 		return -EINVAL;
10444 	}
10445 
10446 	if (insn->dst_reg != BPF_REG_0 || insn->off != 0 ||
10447 	    BPF_SIZE(insn->code) == BPF_DW ||
10448 	    (mode == BPF_ABS && insn->src_reg != BPF_REG_0)) {
10449 		verbose(env, "BPF_LD_[ABS|IND] uses reserved fields\n");
10450 		return -EINVAL;
10451 	}
10452 
10453 	/* check whether implicit source operand (register R6) is readable */
10454 	err = check_reg_arg(env, ctx_reg, SRC_OP);
10455 	if (err)
10456 		return err;
10457 
10458 	/* Disallow usage of BPF_LD_[ABS|IND] with reference tracking, as
10459 	 * gen_ld_abs() may terminate the program at runtime, leading to
10460 	 * reference leak.
10461 	 */
10462 	err = check_reference_leak(env);
10463 	if (err) {
10464 		verbose(env, "BPF_LD_[ABS|IND] cannot be mixed with socket references\n");
10465 		return err;
10466 	}
10467 
10468 	if (env->cur_state->active_spin_lock) {
10469 		verbose(env, "BPF_LD_[ABS|IND] cannot be used inside bpf_spin_lock-ed region\n");
10470 		return -EINVAL;
10471 	}
10472 
10473 	if (regs[ctx_reg].type != PTR_TO_CTX) {
10474 		verbose(env,
10475 			"at the time of BPF_LD_ABS|IND R6 != pointer to skb\n");
10476 		return -EINVAL;
10477 	}
10478 
10479 	if (mode == BPF_IND) {
10480 		/* check explicit source operand */
10481 		err = check_reg_arg(env, insn->src_reg, SRC_OP);
10482 		if (err)
10483 			return err;
10484 	}
10485 
10486 	err = check_ptr_off_reg(env, &regs[ctx_reg], ctx_reg);
10487 	if (err < 0)
10488 		return err;
10489 
10490 	/* reset caller saved regs to unreadable */
10491 	for (i = 0; i < CALLER_SAVED_REGS; i++) {
10492 		mark_reg_not_init(env, regs, caller_saved[i]);
10493 		check_reg_arg(env, caller_saved[i], DST_OP_NO_MARK);
10494 	}
10495 
10496 	/* mark destination R0 register as readable, since it contains
10497 	 * the value fetched from the packet.
10498 	 * Already marked as written above.
10499 	 */
10500 	mark_reg_unknown(env, regs, BPF_REG_0);
10501 	/* ld_abs load up to 32-bit skb data. */
10502 	regs[BPF_REG_0].subreg_def = env->insn_idx + 1;
10503 	return 0;
10504 }
10505 
10506 static int check_return_code(struct bpf_verifier_env *env)
10507 {
10508 	struct tnum enforce_attach_type_range = tnum_unknown;
10509 	const struct bpf_prog *prog = env->prog;
10510 	struct bpf_reg_state *reg;
10511 	struct tnum range = tnum_range(0, 1);
10512 	enum bpf_prog_type prog_type = resolve_prog_type(env->prog);
10513 	int err;
10514 	struct bpf_func_state *frame = env->cur_state->frame[0];
10515 	const bool is_subprog = frame->subprogno;
10516 
10517 	/* LSM and struct_ops func-ptr's return type could be "void" */
10518 	if (!is_subprog) {
10519 		switch (prog_type) {
10520 		case BPF_PROG_TYPE_LSM:
10521 			if (prog->expected_attach_type == BPF_LSM_CGROUP)
10522 				/* See below, can be 0 or 0-1 depending on hook. */
10523 				break;
10524 			fallthrough;
10525 		case BPF_PROG_TYPE_STRUCT_OPS:
10526 			if (!prog->aux->attach_func_proto->type)
10527 				return 0;
10528 			break;
10529 		default:
10530 			break;
10531 		}
10532 	}
10533 
10534 	/* eBPF calling convention is such that R0 is used
10535 	 * to return the value from eBPF program.
10536 	 * Make sure that it's readable at this time
10537 	 * of bpf_exit, which means that program wrote
10538 	 * something into it earlier
10539 	 */
10540 	err = check_reg_arg(env, BPF_REG_0, SRC_OP);
10541 	if (err)
10542 		return err;
10543 
10544 	if (is_pointer_value(env, BPF_REG_0)) {
10545 		verbose(env, "R0 leaks addr as return value\n");
10546 		return -EACCES;
10547 	}
10548 
10549 	reg = cur_regs(env) + BPF_REG_0;
10550 
10551 	if (frame->in_async_callback_fn) {
10552 		/* enforce return zero from async callbacks like timer */
10553 		if (reg->type != SCALAR_VALUE) {
10554 			verbose(env, "In async callback the register R0 is not a known value (%s)\n",
10555 				reg_type_str(env, reg->type));
10556 			return -EINVAL;
10557 		}
10558 
10559 		if (!tnum_in(tnum_const(0), reg->var_off)) {
10560 			verbose_invalid_scalar(env, reg, &range, "async callback", "R0");
10561 			return -EINVAL;
10562 		}
10563 		return 0;
10564 	}
10565 
10566 	if (is_subprog) {
10567 		if (reg->type != SCALAR_VALUE) {
10568 			verbose(env, "At subprogram exit the register R0 is not a scalar value (%s)\n",
10569 				reg_type_str(env, reg->type));
10570 			return -EINVAL;
10571 		}
10572 		return 0;
10573 	}
10574 
10575 	switch (prog_type) {
10576 	case BPF_PROG_TYPE_CGROUP_SOCK_ADDR:
10577 		if (env->prog->expected_attach_type == BPF_CGROUP_UDP4_RECVMSG ||
10578 		    env->prog->expected_attach_type == BPF_CGROUP_UDP6_RECVMSG ||
10579 		    env->prog->expected_attach_type == BPF_CGROUP_INET4_GETPEERNAME ||
10580 		    env->prog->expected_attach_type == BPF_CGROUP_INET6_GETPEERNAME ||
10581 		    env->prog->expected_attach_type == BPF_CGROUP_INET4_GETSOCKNAME ||
10582 		    env->prog->expected_attach_type == BPF_CGROUP_INET6_GETSOCKNAME)
10583 			range = tnum_range(1, 1);
10584 		if (env->prog->expected_attach_type == BPF_CGROUP_INET4_BIND ||
10585 		    env->prog->expected_attach_type == BPF_CGROUP_INET6_BIND)
10586 			range = tnum_range(0, 3);
10587 		break;
10588 	case BPF_PROG_TYPE_CGROUP_SKB:
10589 		if (env->prog->expected_attach_type == BPF_CGROUP_INET_EGRESS) {
10590 			range = tnum_range(0, 3);
10591 			enforce_attach_type_range = tnum_range(2, 3);
10592 		}
10593 		break;
10594 	case BPF_PROG_TYPE_CGROUP_SOCK:
10595 	case BPF_PROG_TYPE_SOCK_OPS:
10596 	case BPF_PROG_TYPE_CGROUP_DEVICE:
10597 	case BPF_PROG_TYPE_CGROUP_SYSCTL:
10598 	case BPF_PROG_TYPE_CGROUP_SOCKOPT:
10599 		break;
10600 	case BPF_PROG_TYPE_RAW_TRACEPOINT:
10601 		if (!env->prog->aux->attach_btf_id)
10602 			return 0;
10603 		range = tnum_const(0);
10604 		break;
10605 	case BPF_PROG_TYPE_TRACING:
10606 		switch (env->prog->expected_attach_type) {
10607 		case BPF_TRACE_FENTRY:
10608 		case BPF_TRACE_FEXIT:
10609 			range = tnum_const(0);
10610 			break;
10611 		case BPF_TRACE_RAW_TP:
10612 		case BPF_MODIFY_RETURN:
10613 			return 0;
10614 		case BPF_TRACE_ITER:
10615 			break;
10616 		default:
10617 			return -ENOTSUPP;
10618 		}
10619 		break;
10620 	case BPF_PROG_TYPE_SK_LOOKUP:
10621 		range = tnum_range(SK_DROP, SK_PASS);
10622 		break;
10623 
10624 	case BPF_PROG_TYPE_LSM:
10625 		if (env->prog->expected_attach_type != BPF_LSM_CGROUP) {
10626 			/* Regular BPF_PROG_TYPE_LSM programs can return
10627 			 * any value.
10628 			 */
10629 			return 0;
10630 		}
10631 		if (!env->prog->aux->attach_func_proto->type) {
10632 			/* Make sure programs that attach to void
10633 			 * hooks don't try to modify return value.
10634 			 */
10635 			range = tnum_range(1, 1);
10636 		}
10637 		break;
10638 
10639 	case BPF_PROG_TYPE_EXT:
10640 		/* freplace program can return anything as its return value
10641 		 * depends on the to-be-replaced kernel func or bpf program.
10642 		 */
10643 	default:
10644 		return 0;
10645 	}
10646 
10647 	if (reg->type != SCALAR_VALUE) {
10648 		verbose(env, "At program exit the register R0 is not a known value (%s)\n",
10649 			reg_type_str(env, reg->type));
10650 		return -EINVAL;
10651 	}
10652 
10653 	if (!tnum_in(range, reg->var_off)) {
10654 		verbose_invalid_scalar(env, reg, &range, "program exit", "R0");
10655 		if (prog->expected_attach_type == BPF_LSM_CGROUP &&
10656 		    prog_type == BPF_PROG_TYPE_LSM &&
10657 		    !prog->aux->attach_func_proto->type)
10658 			verbose(env, "Note, BPF_LSM_CGROUP that attach to void LSM hooks can't modify return value!\n");
10659 		return -EINVAL;
10660 	}
10661 
10662 	if (!tnum_is_unknown(enforce_attach_type_range) &&
10663 	    tnum_in(enforce_attach_type_range, reg->var_off))
10664 		env->prog->enforce_expected_attach_type = 1;
10665 	return 0;
10666 }
10667 
10668 /* non-recursive DFS pseudo code
10669  * 1  procedure DFS-iterative(G,v):
10670  * 2      label v as discovered
10671  * 3      let S be a stack
10672  * 4      S.push(v)
10673  * 5      while S is not empty
10674  * 6            t <- S.pop()
10675  * 7            if t is what we're looking for:
10676  * 8                return t
10677  * 9            for all edges e in G.adjacentEdges(t) do
10678  * 10               if edge e is already labelled
10679  * 11                   continue with the next edge
10680  * 12               w <- G.adjacentVertex(t,e)
10681  * 13               if vertex w is not discovered and not explored
10682  * 14                   label e as tree-edge
10683  * 15                   label w as discovered
10684  * 16                   S.push(w)
10685  * 17                   continue at 5
10686  * 18               else if vertex w is discovered
10687  * 19                   label e as back-edge
10688  * 20               else
10689  * 21                   // vertex w is explored
10690  * 22                   label e as forward- or cross-edge
10691  * 23           label t as explored
10692  * 24           S.pop()
10693  *
10694  * convention:
10695  * 0x10 - discovered
10696  * 0x11 - discovered and fall-through edge labelled
10697  * 0x12 - discovered and fall-through and branch edges labelled
10698  * 0x20 - explored
10699  */
10700 
10701 enum {
10702 	DISCOVERED = 0x10,
10703 	EXPLORED = 0x20,
10704 	FALLTHROUGH = 1,
10705 	BRANCH = 2,
10706 };
10707 
10708 static u32 state_htab_size(struct bpf_verifier_env *env)
10709 {
10710 	return env->prog->len;
10711 }
10712 
10713 static struct bpf_verifier_state_list **explored_state(
10714 					struct bpf_verifier_env *env,
10715 					int idx)
10716 {
10717 	struct bpf_verifier_state *cur = env->cur_state;
10718 	struct bpf_func_state *state = cur->frame[cur->curframe];
10719 
10720 	return &env->explored_states[(idx ^ state->callsite) % state_htab_size(env)];
10721 }
10722 
10723 static void init_explored_state(struct bpf_verifier_env *env, int idx)
10724 {
10725 	env->insn_aux_data[idx].prune_point = true;
10726 }
10727 
10728 enum {
10729 	DONE_EXPLORING = 0,
10730 	KEEP_EXPLORING = 1,
10731 };
10732 
10733 /* t, w, e - match pseudo-code above:
10734  * t - index of current instruction
10735  * w - next instruction
10736  * e - edge
10737  */
10738 static int push_insn(int t, int w, int e, struct bpf_verifier_env *env,
10739 		     bool loop_ok)
10740 {
10741 	int *insn_stack = env->cfg.insn_stack;
10742 	int *insn_state = env->cfg.insn_state;
10743 
10744 	if (e == FALLTHROUGH && insn_state[t] >= (DISCOVERED | FALLTHROUGH))
10745 		return DONE_EXPLORING;
10746 
10747 	if (e == BRANCH && insn_state[t] >= (DISCOVERED | BRANCH))
10748 		return DONE_EXPLORING;
10749 
10750 	if (w < 0 || w >= env->prog->len) {
10751 		verbose_linfo(env, t, "%d: ", t);
10752 		verbose(env, "jump out of range from insn %d to %d\n", t, w);
10753 		return -EINVAL;
10754 	}
10755 
10756 	if (e == BRANCH)
10757 		/* mark branch target for state pruning */
10758 		init_explored_state(env, w);
10759 
10760 	if (insn_state[w] == 0) {
10761 		/* tree-edge */
10762 		insn_state[t] = DISCOVERED | e;
10763 		insn_state[w] = DISCOVERED;
10764 		if (env->cfg.cur_stack >= env->prog->len)
10765 			return -E2BIG;
10766 		insn_stack[env->cfg.cur_stack++] = w;
10767 		return KEEP_EXPLORING;
10768 	} else if ((insn_state[w] & 0xF0) == DISCOVERED) {
10769 		if (loop_ok && env->bpf_capable)
10770 			return DONE_EXPLORING;
10771 		verbose_linfo(env, t, "%d: ", t);
10772 		verbose_linfo(env, w, "%d: ", w);
10773 		verbose(env, "back-edge from insn %d to %d\n", t, w);
10774 		return -EINVAL;
10775 	} else if (insn_state[w] == EXPLORED) {
10776 		/* forward- or cross-edge */
10777 		insn_state[t] = DISCOVERED | e;
10778 	} else {
10779 		verbose(env, "insn state internal bug\n");
10780 		return -EFAULT;
10781 	}
10782 	return DONE_EXPLORING;
10783 }
10784 
10785 static int visit_func_call_insn(int t, int insn_cnt,
10786 				struct bpf_insn *insns,
10787 				struct bpf_verifier_env *env,
10788 				bool visit_callee)
10789 {
10790 	int ret;
10791 
10792 	ret = push_insn(t, t + 1, FALLTHROUGH, env, false);
10793 	if (ret)
10794 		return ret;
10795 
10796 	if (t + 1 < insn_cnt)
10797 		init_explored_state(env, t + 1);
10798 	if (visit_callee) {
10799 		init_explored_state(env, t);
10800 		ret = push_insn(t, t + insns[t].imm + 1, BRANCH, env,
10801 				/* It's ok to allow recursion from CFG point of
10802 				 * view. __check_func_call() will do the actual
10803 				 * check.
10804 				 */
10805 				bpf_pseudo_func(insns + t));
10806 	}
10807 	return ret;
10808 }
10809 
10810 /* Visits the instruction at index t and returns one of the following:
10811  *  < 0 - an error occurred
10812  *  DONE_EXPLORING - the instruction was fully explored
10813  *  KEEP_EXPLORING - there is still work to be done before it is fully explored
10814  */
10815 static int visit_insn(int t, int insn_cnt, struct bpf_verifier_env *env)
10816 {
10817 	struct bpf_insn *insns = env->prog->insnsi;
10818 	int ret;
10819 
10820 	if (bpf_pseudo_func(insns + t))
10821 		return visit_func_call_insn(t, insn_cnt, insns, env, true);
10822 
10823 	/* All non-branch instructions have a single fall-through edge. */
10824 	if (BPF_CLASS(insns[t].code) != BPF_JMP &&
10825 	    BPF_CLASS(insns[t].code) != BPF_JMP32)
10826 		return push_insn(t, t + 1, FALLTHROUGH, env, false);
10827 
10828 	switch (BPF_OP(insns[t].code)) {
10829 	case BPF_EXIT:
10830 		return DONE_EXPLORING;
10831 
10832 	case BPF_CALL:
10833 		if (insns[t].imm == BPF_FUNC_timer_set_callback)
10834 			/* Mark this call insn to trigger is_state_visited() check
10835 			 * before call itself is processed by __check_func_call().
10836 			 * Otherwise new async state will be pushed for further
10837 			 * exploration.
10838 			 */
10839 			init_explored_state(env, t);
10840 		return visit_func_call_insn(t, insn_cnt, insns, env,
10841 					    insns[t].src_reg == BPF_PSEUDO_CALL);
10842 
10843 	case BPF_JA:
10844 		if (BPF_SRC(insns[t].code) != BPF_K)
10845 			return -EINVAL;
10846 
10847 		/* unconditional jump with single edge */
10848 		ret = push_insn(t, t + insns[t].off + 1, FALLTHROUGH, env,
10849 				true);
10850 		if (ret)
10851 			return ret;
10852 
10853 		/* unconditional jmp is not a good pruning point,
10854 		 * but it's marked, since backtracking needs
10855 		 * to record jmp history in is_state_visited().
10856 		 */
10857 		init_explored_state(env, t + insns[t].off + 1);
10858 		/* tell verifier to check for equivalent states
10859 		 * after every call and jump
10860 		 */
10861 		if (t + 1 < insn_cnt)
10862 			init_explored_state(env, t + 1);
10863 
10864 		return ret;
10865 
10866 	default:
10867 		/* conditional jump with two edges */
10868 		init_explored_state(env, t);
10869 		ret = push_insn(t, t + 1, FALLTHROUGH, env, true);
10870 		if (ret)
10871 			return ret;
10872 
10873 		return push_insn(t, t + insns[t].off + 1, BRANCH, env, true);
10874 	}
10875 }
10876 
10877 /* non-recursive depth-first-search to detect loops in BPF program
10878  * loop == back-edge in directed graph
10879  */
10880 static int check_cfg(struct bpf_verifier_env *env)
10881 {
10882 	int insn_cnt = env->prog->len;
10883 	int *insn_stack, *insn_state;
10884 	int ret = 0;
10885 	int i;
10886 
10887 	insn_state = env->cfg.insn_state = kvcalloc(insn_cnt, sizeof(int), GFP_KERNEL);
10888 	if (!insn_state)
10889 		return -ENOMEM;
10890 
10891 	insn_stack = env->cfg.insn_stack = kvcalloc(insn_cnt, sizeof(int), GFP_KERNEL);
10892 	if (!insn_stack) {
10893 		kvfree(insn_state);
10894 		return -ENOMEM;
10895 	}
10896 
10897 	insn_state[0] = DISCOVERED; /* mark 1st insn as discovered */
10898 	insn_stack[0] = 0; /* 0 is the first instruction */
10899 	env->cfg.cur_stack = 1;
10900 
10901 	while (env->cfg.cur_stack > 0) {
10902 		int t = insn_stack[env->cfg.cur_stack - 1];
10903 
10904 		ret = visit_insn(t, insn_cnt, env);
10905 		switch (ret) {
10906 		case DONE_EXPLORING:
10907 			insn_state[t] = EXPLORED;
10908 			env->cfg.cur_stack--;
10909 			break;
10910 		case KEEP_EXPLORING:
10911 			break;
10912 		default:
10913 			if (ret > 0) {
10914 				verbose(env, "visit_insn internal bug\n");
10915 				ret = -EFAULT;
10916 			}
10917 			goto err_free;
10918 		}
10919 	}
10920 
10921 	if (env->cfg.cur_stack < 0) {
10922 		verbose(env, "pop stack internal bug\n");
10923 		ret = -EFAULT;
10924 		goto err_free;
10925 	}
10926 
10927 	for (i = 0; i < insn_cnt; i++) {
10928 		if (insn_state[i] != EXPLORED) {
10929 			verbose(env, "unreachable insn %d\n", i);
10930 			ret = -EINVAL;
10931 			goto err_free;
10932 		}
10933 	}
10934 	ret = 0; /* cfg looks good */
10935 
10936 err_free:
10937 	kvfree(insn_state);
10938 	kvfree(insn_stack);
10939 	env->cfg.insn_state = env->cfg.insn_stack = NULL;
10940 	return ret;
10941 }
10942 
10943 static int check_abnormal_return(struct bpf_verifier_env *env)
10944 {
10945 	int i;
10946 
10947 	for (i = 1; i < env->subprog_cnt; i++) {
10948 		if (env->subprog_info[i].has_ld_abs) {
10949 			verbose(env, "LD_ABS is not allowed in subprogs without BTF\n");
10950 			return -EINVAL;
10951 		}
10952 		if (env->subprog_info[i].has_tail_call) {
10953 			verbose(env, "tail_call is not allowed in subprogs without BTF\n");
10954 			return -EINVAL;
10955 		}
10956 	}
10957 	return 0;
10958 }
10959 
10960 /* The minimum supported BTF func info size */
10961 #define MIN_BPF_FUNCINFO_SIZE	8
10962 #define MAX_FUNCINFO_REC_SIZE	252
10963 
10964 static int check_btf_func(struct bpf_verifier_env *env,
10965 			  const union bpf_attr *attr,
10966 			  bpfptr_t uattr)
10967 {
10968 	const struct btf_type *type, *func_proto, *ret_type;
10969 	u32 i, nfuncs, urec_size, min_size;
10970 	u32 krec_size = sizeof(struct bpf_func_info);
10971 	struct bpf_func_info *krecord;
10972 	struct bpf_func_info_aux *info_aux = NULL;
10973 	struct bpf_prog *prog;
10974 	const struct btf *btf;
10975 	bpfptr_t urecord;
10976 	u32 prev_offset = 0;
10977 	bool scalar_return;
10978 	int ret = -ENOMEM;
10979 
10980 	nfuncs = attr->func_info_cnt;
10981 	if (!nfuncs) {
10982 		if (check_abnormal_return(env))
10983 			return -EINVAL;
10984 		return 0;
10985 	}
10986 
10987 	if (nfuncs != env->subprog_cnt) {
10988 		verbose(env, "number of funcs in func_info doesn't match number of subprogs\n");
10989 		return -EINVAL;
10990 	}
10991 
10992 	urec_size = attr->func_info_rec_size;
10993 	if (urec_size < MIN_BPF_FUNCINFO_SIZE ||
10994 	    urec_size > MAX_FUNCINFO_REC_SIZE ||
10995 	    urec_size % sizeof(u32)) {
10996 		verbose(env, "invalid func info rec size %u\n", urec_size);
10997 		return -EINVAL;
10998 	}
10999 
11000 	prog = env->prog;
11001 	btf = prog->aux->btf;
11002 
11003 	urecord = make_bpfptr(attr->func_info, uattr.is_kernel);
11004 	min_size = min_t(u32, krec_size, urec_size);
11005 
11006 	krecord = kvcalloc(nfuncs, krec_size, GFP_KERNEL | __GFP_NOWARN);
11007 	if (!krecord)
11008 		return -ENOMEM;
11009 	info_aux = kcalloc(nfuncs, sizeof(*info_aux), GFP_KERNEL | __GFP_NOWARN);
11010 	if (!info_aux)
11011 		goto err_free;
11012 
11013 	for (i = 0; i < nfuncs; i++) {
11014 		ret = bpf_check_uarg_tail_zero(urecord, krec_size, urec_size);
11015 		if (ret) {
11016 			if (ret == -E2BIG) {
11017 				verbose(env, "nonzero tailing record in func info");
11018 				/* set the size kernel expects so loader can zero
11019 				 * out the rest of the record.
11020 				 */
11021 				if (copy_to_bpfptr_offset(uattr,
11022 							  offsetof(union bpf_attr, func_info_rec_size),
11023 							  &min_size, sizeof(min_size)))
11024 					ret = -EFAULT;
11025 			}
11026 			goto err_free;
11027 		}
11028 
11029 		if (copy_from_bpfptr(&krecord[i], urecord, min_size)) {
11030 			ret = -EFAULT;
11031 			goto err_free;
11032 		}
11033 
11034 		/* check insn_off */
11035 		ret = -EINVAL;
11036 		if (i == 0) {
11037 			if (krecord[i].insn_off) {
11038 				verbose(env,
11039 					"nonzero insn_off %u for the first func info record",
11040 					krecord[i].insn_off);
11041 				goto err_free;
11042 			}
11043 		} else if (krecord[i].insn_off <= prev_offset) {
11044 			verbose(env,
11045 				"same or smaller insn offset (%u) than previous func info record (%u)",
11046 				krecord[i].insn_off, prev_offset);
11047 			goto err_free;
11048 		}
11049 
11050 		if (env->subprog_info[i].start != krecord[i].insn_off) {
11051 			verbose(env, "func_info BTF section doesn't match subprog layout in BPF program\n");
11052 			goto err_free;
11053 		}
11054 
11055 		/* check type_id */
11056 		type = btf_type_by_id(btf, krecord[i].type_id);
11057 		if (!type || !btf_type_is_func(type)) {
11058 			verbose(env, "invalid type id %d in func info",
11059 				krecord[i].type_id);
11060 			goto err_free;
11061 		}
11062 		info_aux[i].linkage = BTF_INFO_VLEN(type->info);
11063 
11064 		func_proto = btf_type_by_id(btf, type->type);
11065 		if (unlikely(!func_proto || !btf_type_is_func_proto(func_proto)))
11066 			/* btf_func_check() already verified it during BTF load */
11067 			goto err_free;
11068 		ret_type = btf_type_skip_modifiers(btf, func_proto->type, NULL);
11069 		scalar_return =
11070 			btf_type_is_small_int(ret_type) || btf_is_any_enum(ret_type);
11071 		if (i && !scalar_return && env->subprog_info[i].has_ld_abs) {
11072 			verbose(env, "LD_ABS is only allowed in functions that return 'int'.\n");
11073 			goto err_free;
11074 		}
11075 		if (i && !scalar_return && env->subprog_info[i].has_tail_call) {
11076 			verbose(env, "tail_call is only allowed in functions that return 'int'.\n");
11077 			goto err_free;
11078 		}
11079 
11080 		prev_offset = krecord[i].insn_off;
11081 		bpfptr_add(&urecord, urec_size);
11082 	}
11083 
11084 	prog->aux->func_info = krecord;
11085 	prog->aux->func_info_cnt = nfuncs;
11086 	prog->aux->func_info_aux = info_aux;
11087 	return 0;
11088 
11089 err_free:
11090 	kvfree(krecord);
11091 	kfree(info_aux);
11092 	return ret;
11093 }
11094 
11095 static void adjust_btf_func(struct bpf_verifier_env *env)
11096 {
11097 	struct bpf_prog_aux *aux = env->prog->aux;
11098 	int i;
11099 
11100 	if (!aux->func_info)
11101 		return;
11102 
11103 	for (i = 0; i < env->subprog_cnt; i++)
11104 		aux->func_info[i].insn_off = env->subprog_info[i].start;
11105 }
11106 
11107 #define MIN_BPF_LINEINFO_SIZE	offsetofend(struct bpf_line_info, line_col)
11108 #define MAX_LINEINFO_REC_SIZE	MAX_FUNCINFO_REC_SIZE
11109 
11110 static int check_btf_line(struct bpf_verifier_env *env,
11111 			  const union bpf_attr *attr,
11112 			  bpfptr_t uattr)
11113 {
11114 	u32 i, s, nr_linfo, ncopy, expected_size, rec_size, prev_offset = 0;
11115 	struct bpf_subprog_info *sub;
11116 	struct bpf_line_info *linfo;
11117 	struct bpf_prog *prog;
11118 	const struct btf *btf;
11119 	bpfptr_t ulinfo;
11120 	int err;
11121 
11122 	nr_linfo = attr->line_info_cnt;
11123 	if (!nr_linfo)
11124 		return 0;
11125 	if (nr_linfo > INT_MAX / sizeof(struct bpf_line_info))
11126 		return -EINVAL;
11127 
11128 	rec_size = attr->line_info_rec_size;
11129 	if (rec_size < MIN_BPF_LINEINFO_SIZE ||
11130 	    rec_size > MAX_LINEINFO_REC_SIZE ||
11131 	    rec_size & (sizeof(u32) - 1))
11132 		return -EINVAL;
11133 
11134 	/* Need to zero it in case the userspace may
11135 	 * pass in a smaller bpf_line_info object.
11136 	 */
11137 	linfo = kvcalloc(nr_linfo, sizeof(struct bpf_line_info),
11138 			 GFP_KERNEL | __GFP_NOWARN);
11139 	if (!linfo)
11140 		return -ENOMEM;
11141 
11142 	prog = env->prog;
11143 	btf = prog->aux->btf;
11144 
11145 	s = 0;
11146 	sub = env->subprog_info;
11147 	ulinfo = make_bpfptr(attr->line_info, uattr.is_kernel);
11148 	expected_size = sizeof(struct bpf_line_info);
11149 	ncopy = min_t(u32, expected_size, rec_size);
11150 	for (i = 0; i < nr_linfo; i++) {
11151 		err = bpf_check_uarg_tail_zero(ulinfo, expected_size, rec_size);
11152 		if (err) {
11153 			if (err == -E2BIG) {
11154 				verbose(env, "nonzero tailing record in line_info");
11155 				if (copy_to_bpfptr_offset(uattr,
11156 							  offsetof(union bpf_attr, line_info_rec_size),
11157 							  &expected_size, sizeof(expected_size)))
11158 					err = -EFAULT;
11159 			}
11160 			goto err_free;
11161 		}
11162 
11163 		if (copy_from_bpfptr(&linfo[i], ulinfo, ncopy)) {
11164 			err = -EFAULT;
11165 			goto err_free;
11166 		}
11167 
11168 		/*
11169 		 * Check insn_off to ensure
11170 		 * 1) strictly increasing AND
11171 		 * 2) bounded by prog->len
11172 		 *
11173 		 * The linfo[0].insn_off == 0 check logically falls into
11174 		 * the later "missing bpf_line_info for func..." case
11175 		 * because the first linfo[0].insn_off must be the
11176 		 * first sub also and the first sub must have
11177 		 * subprog_info[0].start == 0.
11178 		 */
11179 		if ((i && linfo[i].insn_off <= prev_offset) ||
11180 		    linfo[i].insn_off >= prog->len) {
11181 			verbose(env, "Invalid line_info[%u].insn_off:%u (prev_offset:%u prog->len:%u)\n",
11182 				i, linfo[i].insn_off, prev_offset,
11183 				prog->len);
11184 			err = -EINVAL;
11185 			goto err_free;
11186 		}
11187 
11188 		if (!prog->insnsi[linfo[i].insn_off].code) {
11189 			verbose(env,
11190 				"Invalid insn code at line_info[%u].insn_off\n",
11191 				i);
11192 			err = -EINVAL;
11193 			goto err_free;
11194 		}
11195 
11196 		if (!btf_name_by_offset(btf, linfo[i].line_off) ||
11197 		    !btf_name_by_offset(btf, linfo[i].file_name_off)) {
11198 			verbose(env, "Invalid line_info[%u].line_off or .file_name_off\n", i);
11199 			err = -EINVAL;
11200 			goto err_free;
11201 		}
11202 
11203 		if (s != env->subprog_cnt) {
11204 			if (linfo[i].insn_off == sub[s].start) {
11205 				sub[s].linfo_idx = i;
11206 				s++;
11207 			} else if (sub[s].start < linfo[i].insn_off) {
11208 				verbose(env, "missing bpf_line_info for func#%u\n", s);
11209 				err = -EINVAL;
11210 				goto err_free;
11211 			}
11212 		}
11213 
11214 		prev_offset = linfo[i].insn_off;
11215 		bpfptr_add(&ulinfo, rec_size);
11216 	}
11217 
11218 	if (s != env->subprog_cnt) {
11219 		verbose(env, "missing bpf_line_info for %u funcs starting from func#%u\n",
11220 			env->subprog_cnt - s, s);
11221 		err = -EINVAL;
11222 		goto err_free;
11223 	}
11224 
11225 	prog->aux->linfo = linfo;
11226 	prog->aux->nr_linfo = nr_linfo;
11227 
11228 	return 0;
11229 
11230 err_free:
11231 	kvfree(linfo);
11232 	return err;
11233 }
11234 
11235 #define MIN_CORE_RELO_SIZE	sizeof(struct bpf_core_relo)
11236 #define MAX_CORE_RELO_SIZE	MAX_FUNCINFO_REC_SIZE
11237 
11238 static int check_core_relo(struct bpf_verifier_env *env,
11239 			   const union bpf_attr *attr,
11240 			   bpfptr_t uattr)
11241 {
11242 	u32 i, nr_core_relo, ncopy, expected_size, rec_size;
11243 	struct bpf_core_relo core_relo = {};
11244 	struct bpf_prog *prog = env->prog;
11245 	const struct btf *btf = prog->aux->btf;
11246 	struct bpf_core_ctx ctx = {
11247 		.log = &env->log,
11248 		.btf = btf,
11249 	};
11250 	bpfptr_t u_core_relo;
11251 	int err;
11252 
11253 	nr_core_relo = attr->core_relo_cnt;
11254 	if (!nr_core_relo)
11255 		return 0;
11256 	if (nr_core_relo > INT_MAX / sizeof(struct bpf_core_relo))
11257 		return -EINVAL;
11258 
11259 	rec_size = attr->core_relo_rec_size;
11260 	if (rec_size < MIN_CORE_RELO_SIZE ||
11261 	    rec_size > MAX_CORE_RELO_SIZE ||
11262 	    rec_size % sizeof(u32))
11263 		return -EINVAL;
11264 
11265 	u_core_relo = make_bpfptr(attr->core_relos, uattr.is_kernel);
11266 	expected_size = sizeof(struct bpf_core_relo);
11267 	ncopy = min_t(u32, expected_size, rec_size);
11268 
11269 	/* Unlike func_info and line_info, copy and apply each CO-RE
11270 	 * relocation record one at a time.
11271 	 */
11272 	for (i = 0; i < nr_core_relo; i++) {
11273 		/* future proofing when sizeof(bpf_core_relo) changes */
11274 		err = bpf_check_uarg_tail_zero(u_core_relo, expected_size, rec_size);
11275 		if (err) {
11276 			if (err == -E2BIG) {
11277 				verbose(env, "nonzero tailing record in core_relo");
11278 				if (copy_to_bpfptr_offset(uattr,
11279 							  offsetof(union bpf_attr, core_relo_rec_size),
11280 							  &expected_size, sizeof(expected_size)))
11281 					err = -EFAULT;
11282 			}
11283 			break;
11284 		}
11285 
11286 		if (copy_from_bpfptr(&core_relo, u_core_relo, ncopy)) {
11287 			err = -EFAULT;
11288 			break;
11289 		}
11290 
11291 		if (core_relo.insn_off % 8 || core_relo.insn_off / 8 >= prog->len) {
11292 			verbose(env, "Invalid core_relo[%u].insn_off:%u prog->len:%u\n",
11293 				i, core_relo.insn_off, prog->len);
11294 			err = -EINVAL;
11295 			break;
11296 		}
11297 
11298 		err = bpf_core_apply(&ctx, &core_relo, i,
11299 				     &prog->insnsi[core_relo.insn_off / 8]);
11300 		if (err)
11301 			break;
11302 		bpfptr_add(&u_core_relo, rec_size);
11303 	}
11304 	return err;
11305 }
11306 
11307 static int check_btf_info(struct bpf_verifier_env *env,
11308 			  const union bpf_attr *attr,
11309 			  bpfptr_t uattr)
11310 {
11311 	struct btf *btf;
11312 	int err;
11313 
11314 	if (!attr->func_info_cnt && !attr->line_info_cnt) {
11315 		if (check_abnormal_return(env))
11316 			return -EINVAL;
11317 		return 0;
11318 	}
11319 
11320 	btf = btf_get_by_fd(attr->prog_btf_fd);
11321 	if (IS_ERR(btf))
11322 		return PTR_ERR(btf);
11323 	if (btf_is_kernel(btf)) {
11324 		btf_put(btf);
11325 		return -EACCES;
11326 	}
11327 	env->prog->aux->btf = btf;
11328 
11329 	err = check_btf_func(env, attr, uattr);
11330 	if (err)
11331 		return err;
11332 
11333 	err = check_btf_line(env, attr, uattr);
11334 	if (err)
11335 		return err;
11336 
11337 	err = check_core_relo(env, attr, uattr);
11338 	if (err)
11339 		return err;
11340 
11341 	return 0;
11342 }
11343 
11344 /* check %cur's range satisfies %old's */
11345 static bool range_within(struct bpf_reg_state *old,
11346 			 struct bpf_reg_state *cur)
11347 {
11348 	return old->umin_value <= cur->umin_value &&
11349 	       old->umax_value >= cur->umax_value &&
11350 	       old->smin_value <= cur->smin_value &&
11351 	       old->smax_value >= cur->smax_value &&
11352 	       old->u32_min_value <= cur->u32_min_value &&
11353 	       old->u32_max_value >= cur->u32_max_value &&
11354 	       old->s32_min_value <= cur->s32_min_value &&
11355 	       old->s32_max_value >= cur->s32_max_value;
11356 }
11357 
11358 /* If in the old state two registers had the same id, then they need to have
11359  * the same id in the new state as well.  But that id could be different from
11360  * the old state, so we need to track the mapping from old to new ids.
11361  * Once we have seen that, say, a reg with old id 5 had new id 9, any subsequent
11362  * regs with old id 5 must also have new id 9 for the new state to be safe.  But
11363  * regs with a different old id could still have new id 9, we don't care about
11364  * that.
11365  * So we look through our idmap to see if this old id has been seen before.  If
11366  * so, we require the new id to match; otherwise, we add the id pair to the map.
11367  */
11368 static bool check_ids(u32 old_id, u32 cur_id, struct bpf_id_pair *idmap)
11369 {
11370 	unsigned int i;
11371 
11372 	for (i = 0; i < BPF_ID_MAP_SIZE; i++) {
11373 		if (!idmap[i].old) {
11374 			/* Reached an empty slot; haven't seen this id before */
11375 			idmap[i].old = old_id;
11376 			idmap[i].cur = cur_id;
11377 			return true;
11378 		}
11379 		if (idmap[i].old == old_id)
11380 			return idmap[i].cur == cur_id;
11381 	}
11382 	/* We ran out of idmap slots, which should be impossible */
11383 	WARN_ON_ONCE(1);
11384 	return false;
11385 }
11386 
11387 static void clean_func_state(struct bpf_verifier_env *env,
11388 			     struct bpf_func_state *st)
11389 {
11390 	enum bpf_reg_liveness live;
11391 	int i, j;
11392 
11393 	for (i = 0; i < BPF_REG_FP; i++) {
11394 		live = st->regs[i].live;
11395 		/* liveness must not touch this register anymore */
11396 		st->regs[i].live |= REG_LIVE_DONE;
11397 		if (!(live & REG_LIVE_READ))
11398 			/* since the register is unused, clear its state
11399 			 * to make further comparison simpler
11400 			 */
11401 			__mark_reg_not_init(env, &st->regs[i]);
11402 	}
11403 
11404 	for (i = 0; i < st->allocated_stack / BPF_REG_SIZE; i++) {
11405 		live = st->stack[i].spilled_ptr.live;
11406 		/* liveness must not touch this stack slot anymore */
11407 		st->stack[i].spilled_ptr.live |= REG_LIVE_DONE;
11408 		if (!(live & REG_LIVE_READ)) {
11409 			__mark_reg_not_init(env, &st->stack[i].spilled_ptr);
11410 			for (j = 0; j < BPF_REG_SIZE; j++)
11411 				st->stack[i].slot_type[j] = STACK_INVALID;
11412 		}
11413 	}
11414 }
11415 
11416 static void clean_verifier_state(struct bpf_verifier_env *env,
11417 				 struct bpf_verifier_state *st)
11418 {
11419 	int i;
11420 
11421 	if (st->frame[0]->regs[0].live & REG_LIVE_DONE)
11422 		/* all regs in this state in all frames were already marked */
11423 		return;
11424 
11425 	for (i = 0; i <= st->curframe; i++)
11426 		clean_func_state(env, st->frame[i]);
11427 }
11428 
11429 /* the parentage chains form a tree.
11430  * the verifier states are added to state lists at given insn and
11431  * pushed into state stack for future exploration.
11432  * when the verifier reaches bpf_exit insn some of the verifer states
11433  * stored in the state lists have their final liveness state already,
11434  * but a lot of states will get revised from liveness point of view when
11435  * the verifier explores other branches.
11436  * Example:
11437  * 1: r0 = 1
11438  * 2: if r1 == 100 goto pc+1
11439  * 3: r0 = 2
11440  * 4: exit
11441  * when the verifier reaches exit insn the register r0 in the state list of
11442  * insn 2 will be seen as !REG_LIVE_READ. Then the verifier pops the other_branch
11443  * of insn 2 and goes exploring further. At the insn 4 it will walk the
11444  * parentage chain from insn 4 into insn 2 and will mark r0 as REG_LIVE_READ.
11445  *
11446  * Since the verifier pushes the branch states as it sees them while exploring
11447  * the program the condition of walking the branch instruction for the second
11448  * time means that all states below this branch were already explored and
11449  * their final liveness marks are already propagated.
11450  * Hence when the verifier completes the search of state list in is_state_visited()
11451  * we can call this clean_live_states() function to mark all liveness states
11452  * as REG_LIVE_DONE to indicate that 'parent' pointers of 'struct bpf_reg_state'
11453  * will not be used.
11454  * This function also clears the registers and stack for states that !READ
11455  * to simplify state merging.
11456  *
11457  * Important note here that walking the same branch instruction in the callee
11458  * doesn't meant that the states are DONE. The verifier has to compare
11459  * the callsites
11460  */
11461 static void clean_live_states(struct bpf_verifier_env *env, int insn,
11462 			      struct bpf_verifier_state *cur)
11463 {
11464 	struct bpf_verifier_state_list *sl;
11465 	int i;
11466 
11467 	sl = *explored_state(env, insn);
11468 	while (sl) {
11469 		if (sl->state.branches)
11470 			goto next;
11471 		if (sl->state.insn_idx != insn ||
11472 		    sl->state.curframe != cur->curframe)
11473 			goto next;
11474 		for (i = 0; i <= cur->curframe; i++)
11475 			if (sl->state.frame[i]->callsite != cur->frame[i]->callsite)
11476 				goto next;
11477 		clean_verifier_state(env, &sl->state);
11478 next:
11479 		sl = sl->next;
11480 	}
11481 }
11482 
11483 /* Returns true if (rold safe implies rcur safe) */
11484 static bool regsafe(struct bpf_verifier_env *env, struct bpf_reg_state *rold,
11485 		    struct bpf_reg_state *rcur, struct bpf_id_pair *idmap)
11486 {
11487 	bool equal;
11488 
11489 	if (!(rold->live & REG_LIVE_READ))
11490 		/* explored state didn't use this */
11491 		return true;
11492 
11493 	equal = memcmp(rold, rcur, offsetof(struct bpf_reg_state, parent)) == 0;
11494 
11495 	if (rold->type == PTR_TO_STACK)
11496 		/* two stack pointers are equal only if they're pointing to
11497 		 * the same stack frame, since fp-8 in foo != fp-8 in bar
11498 		 */
11499 		return equal && rold->frameno == rcur->frameno;
11500 
11501 	if (equal)
11502 		return true;
11503 
11504 	if (rold->type == NOT_INIT)
11505 		/* explored state can't have used this */
11506 		return true;
11507 	if (rcur->type == NOT_INIT)
11508 		return false;
11509 	switch (base_type(rold->type)) {
11510 	case SCALAR_VALUE:
11511 		if (env->explore_alu_limits)
11512 			return false;
11513 		if (rcur->type == SCALAR_VALUE) {
11514 			if (!rold->precise && !rcur->precise)
11515 				return true;
11516 			/* new val must satisfy old val knowledge */
11517 			return range_within(rold, rcur) &&
11518 			       tnum_in(rold->var_off, rcur->var_off);
11519 		} else {
11520 			/* We're trying to use a pointer in place of a scalar.
11521 			 * Even if the scalar was unbounded, this could lead to
11522 			 * pointer leaks because scalars are allowed to leak
11523 			 * while pointers are not. We could make this safe in
11524 			 * special cases if root is calling us, but it's
11525 			 * probably not worth the hassle.
11526 			 */
11527 			return false;
11528 		}
11529 	case PTR_TO_MAP_KEY:
11530 	case PTR_TO_MAP_VALUE:
11531 		/* a PTR_TO_MAP_VALUE could be safe to use as a
11532 		 * PTR_TO_MAP_VALUE_OR_NULL into the same map.
11533 		 * However, if the old PTR_TO_MAP_VALUE_OR_NULL then got NULL-
11534 		 * checked, doing so could have affected others with the same
11535 		 * id, and we can't check for that because we lost the id when
11536 		 * we converted to a PTR_TO_MAP_VALUE.
11537 		 */
11538 		if (type_may_be_null(rold->type)) {
11539 			if (!type_may_be_null(rcur->type))
11540 				return false;
11541 			if (memcmp(rold, rcur, offsetof(struct bpf_reg_state, id)))
11542 				return false;
11543 			/* Check our ids match any regs they're supposed to */
11544 			return check_ids(rold->id, rcur->id, idmap);
11545 		}
11546 
11547 		/* If the new min/max/var_off satisfy the old ones and
11548 		 * everything else matches, we are OK.
11549 		 * 'id' is not compared, since it's only used for maps with
11550 		 * bpf_spin_lock inside map element and in such cases if
11551 		 * the rest of the prog is valid for one map element then
11552 		 * it's valid for all map elements regardless of the key
11553 		 * used in bpf_map_lookup()
11554 		 */
11555 		return memcmp(rold, rcur, offsetof(struct bpf_reg_state, id)) == 0 &&
11556 		       range_within(rold, rcur) &&
11557 		       tnum_in(rold->var_off, rcur->var_off);
11558 	case PTR_TO_PACKET_META:
11559 	case PTR_TO_PACKET:
11560 		if (rcur->type != rold->type)
11561 			return false;
11562 		/* We must have at least as much range as the old ptr
11563 		 * did, so that any accesses which were safe before are
11564 		 * still safe.  This is true even if old range < old off,
11565 		 * since someone could have accessed through (ptr - k), or
11566 		 * even done ptr -= k in a register, to get a safe access.
11567 		 */
11568 		if (rold->range > rcur->range)
11569 			return false;
11570 		/* If the offsets don't match, we can't trust our alignment;
11571 		 * nor can we be sure that we won't fall out of range.
11572 		 */
11573 		if (rold->off != rcur->off)
11574 			return false;
11575 		/* id relations must be preserved */
11576 		if (rold->id && !check_ids(rold->id, rcur->id, idmap))
11577 			return false;
11578 		/* new val must satisfy old val knowledge */
11579 		return range_within(rold, rcur) &&
11580 		       tnum_in(rold->var_off, rcur->var_off);
11581 	case PTR_TO_CTX:
11582 	case CONST_PTR_TO_MAP:
11583 	case PTR_TO_PACKET_END:
11584 	case PTR_TO_FLOW_KEYS:
11585 	case PTR_TO_SOCKET:
11586 	case PTR_TO_SOCK_COMMON:
11587 	case PTR_TO_TCP_SOCK:
11588 	case PTR_TO_XDP_SOCK:
11589 		/* Only valid matches are exact, which memcmp() above
11590 		 * would have accepted
11591 		 */
11592 	default:
11593 		/* Don't know what's going on, just say it's not safe */
11594 		return false;
11595 	}
11596 
11597 	/* Shouldn't get here; if we do, say it's not safe */
11598 	WARN_ON_ONCE(1);
11599 	return false;
11600 }
11601 
11602 static bool stacksafe(struct bpf_verifier_env *env, struct bpf_func_state *old,
11603 		      struct bpf_func_state *cur, struct bpf_id_pair *idmap)
11604 {
11605 	int i, spi;
11606 
11607 	/* walk slots of the explored stack and ignore any additional
11608 	 * slots in the current stack, since explored(safe) state
11609 	 * didn't use them
11610 	 */
11611 	for (i = 0; i < old->allocated_stack; i++) {
11612 		spi = i / BPF_REG_SIZE;
11613 
11614 		if (!(old->stack[spi].spilled_ptr.live & REG_LIVE_READ)) {
11615 			i += BPF_REG_SIZE - 1;
11616 			/* explored state didn't use this */
11617 			continue;
11618 		}
11619 
11620 		if (old->stack[spi].slot_type[i % BPF_REG_SIZE] == STACK_INVALID)
11621 			continue;
11622 
11623 		/* explored stack has more populated slots than current stack
11624 		 * and these slots were used
11625 		 */
11626 		if (i >= cur->allocated_stack)
11627 			return false;
11628 
11629 		/* if old state was safe with misc data in the stack
11630 		 * it will be safe with zero-initialized stack.
11631 		 * The opposite is not true
11632 		 */
11633 		if (old->stack[spi].slot_type[i % BPF_REG_SIZE] == STACK_MISC &&
11634 		    cur->stack[spi].slot_type[i % BPF_REG_SIZE] == STACK_ZERO)
11635 			continue;
11636 		if (old->stack[spi].slot_type[i % BPF_REG_SIZE] !=
11637 		    cur->stack[spi].slot_type[i % BPF_REG_SIZE])
11638 			/* Ex: old explored (safe) state has STACK_SPILL in
11639 			 * this stack slot, but current has STACK_MISC ->
11640 			 * this verifier states are not equivalent,
11641 			 * return false to continue verification of this path
11642 			 */
11643 			return false;
11644 		if (i % BPF_REG_SIZE != BPF_REG_SIZE - 1)
11645 			continue;
11646 		if (!is_spilled_reg(&old->stack[spi]))
11647 			continue;
11648 		if (!regsafe(env, &old->stack[spi].spilled_ptr,
11649 			     &cur->stack[spi].spilled_ptr, idmap))
11650 			/* when explored and current stack slot are both storing
11651 			 * spilled registers, check that stored pointers types
11652 			 * are the same as well.
11653 			 * Ex: explored safe path could have stored
11654 			 * (bpf_reg_state) {.type = PTR_TO_STACK, .off = -8}
11655 			 * but current path has stored:
11656 			 * (bpf_reg_state) {.type = PTR_TO_STACK, .off = -16}
11657 			 * such verifier states are not equivalent.
11658 			 * return false to continue verification of this path
11659 			 */
11660 			return false;
11661 	}
11662 	return true;
11663 }
11664 
11665 static bool refsafe(struct bpf_func_state *old, struct bpf_func_state *cur)
11666 {
11667 	if (old->acquired_refs != cur->acquired_refs)
11668 		return false;
11669 	return !memcmp(old->refs, cur->refs,
11670 		       sizeof(*old->refs) * old->acquired_refs);
11671 }
11672 
11673 /* compare two verifier states
11674  *
11675  * all states stored in state_list are known to be valid, since
11676  * verifier reached 'bpf_exit' instruction through them
11677  *
11678  * this function is called when verifier exploring different branches of
11679  * execution popped from the state stack. If it sees an old state that has
11680  * more strict register state and more strict stack state then this execution
11681  * branch doesn't need to be explored further, since verifier already
11682  * concluded that more strict state leads to valid finish.
11683  *
11684  * Therefore two states are equivalent if register state is more conservative
11685  * and explored stack state is more conservative than the current one.
11686  * Example:
11687  *       explored                   current
11688  * (slot1=INV slot2=MISC) == (slot1=MISC slot2=MISC)
11689  * (slot1=MISC slot2=MISC) != (slot1=INV slot2=MISC)
11690  *
11691  * In other words if current stack state (one being explored) has more
11692  * valid slots than old one that already passed validation, it means
11693  * the verifier can stop exploring and conclude that current state is valid too
11694  *
11695  * Similarly with registers. If explored state has register type as invalid
11696  * whereas register type in current state is meaningful, it means that
11697  * the current state will reach 'bpf_exit' instruction safely
11698  */
11699 static bool func_states_equal(struct bpf_verifier_env *env, struct bpf_func_state *old,
11700 			      struct bpf_func_state *cur)
11701 {
11702 	int i;
11703 
11704 	memset(env->idmap_scratch, 0, sizeof(env->idmap_scratch));
11705 	for (i = 0; i < MAX_BPF_REG; i++)
11706 		if (!regsafe(env, &old->regs[i], &cur->regs[i],
11707 			     env->idmap_scratch))
11708 			return false;
11709 
11710 	if (!stacksafe(env, old, cur, env->idmap_scratch))
11711 		return false;
11712 
11713 	if (!refsafe(old, cur))
11714 		return false;
11715 
11716 	return true;
11717 }
11718 
11719 static bool states_equal(struct bpf_verifier_env *env,
11720 			 struct bpf_verifier_state *old,
11721 			 struct bpf_verifier_state *cur)
11722 {
11723 	int i;
11724 
11725 	if (old->curframe != cur->curframe)
11726 		return false;
11727 
11728 	/* Verification state from speculative execution simulation
11729 	 * must never prune a non-speculative execution one.
11730 	 */
11731 	if (old->speculative && !cur->speculative)
11732 		return false;
11733 
11734 	if (old->active_spin_lock != cur->active_spin_lock)
11735 		return false;
11736 
11737 	/* for states to be equal callsites have to be the same
11738 	 * and all frame states need to be equivalent
11739 	 */
11740 	for (i = 0; i <= old->curframe; i++) {
11741 		if (old->frame[i]->callsite != cur->frame[i]->callsite)
11742 			return false;
11743 		if (!func_states_equal(env, old->frame[i], cur->frame[i]))
11744 			return false;
11745 	}
11746 	return true;
11747 }
11748 
11749 /* Return 0 if no propagation happened. Return negative error code if error
11750  * happened. Otherwise, return the propagated bit.
11751  */
11752 static int propagate_liveness_reg(struct bpf_verifier_env *env,
11753 				  struct bpf_reg_state *reg,
11754 				  struct bpf_reg_state *parent_reg)
11755 {
11756 	u8 parent_flag = parent_reg->live & REG_LIVE_READ;
11757 	u8 flag = reg->live & REG_LIVE_READ;
11758 	int err;
11759 
11760 	/* When comes here, read flags of PARENT_REG or REG could be any of
11761 	 * REG_LIVE_READ64, REG_LIVE_READ32, REG_LIVE_NONE. There is no need
11762 	 * of propagation if PARENT_REG has strongest REG_LIVE_READ64.
11763 	 */
11764 	if (parent_flag == REG_LIVE_READ64 ||
11765 	    /* Or if there is no read flag from REG. */
11766 	    !flag ||
11767 	    /* Or if the read flag from REG is the same as PARENT_REG. */
11768 	    parent_flag == flag)
11769 		return 0;
11770 
11771 	err = mark_reg_read(env, reg, parent_reg, flag);
11772 	if (err)
11773 		return err;
11774 
11775 	return flag;
11776 }
11777 
11778 /* A write screens off any subsequent reads; but write marks come from the
11779  * straight-line code between a state and its parent.  When we arrive at an
11780  * equivalent state (jump target or such) we didn't arrive by the straight-line
11781  * code, so read marks in the state must propagate to the parent regardless
11782  * of the state's write marks. That's what 'parent == state->parent' comparison
11783  * in mark_reg_read() is for.
11784  */
11785 static int propagate_liveness(struct bpf_verifier_env *env,
11786 			      const struct bpf_verifier_state *vstate,
11787 			      struct bpf_verifier_state *vparent)
11788 {
11789 	struct bpf_reg_state *state_reg, *parent_reg;
11790 	struct bpf_func_state *state, *parent;
11791 	int i, frame, err = 0;
11792 
11793 	if (vparent->curframe != vstate->curframe) {
11794 		WARN(1, "propagate_live: parent frame %d current frame %d\n",
11795 		     vparent->curframe, vstate->curframe);
11796 		return -EFAULT;
11797 	}
11798 	/* Propagate read liveness of registers... */
11799 	BUILD_BUG_ON(BPF_REG_FP + 1 != MAX_BPF_REG);
11800 	for (frame = 0; frame <= vstate->curframe; frame++) {
11801 		parent = vparent->frame[frame];
11802 		state = vstate->frame[frame];
11803 		parent_reg = parent->regs;
11804 		state_reg = state->regs;
11805 		/* We don't need to worry about FP liveness, it's read-only */
11806 		for (i = frame < vstate->curframe ? BPF_REG_6 : 0; i < BPF_REG_FP; i++) {
11807 			err = propagate_liveness_reg(env, &state_reg[i],
11808 						     &parent_reg[i]);
11809 			if (err < 0)
11810 				return err;
11811 			if (err == REG_LIVE_READ64)
11812 				mark_insn_zext(env, &parent_reg[i]);
11813 		}
11814 
11815 		/* Propagate stack slots. */
11816 		for (i = 0; i < state->allocated_stack / BPF_REG_SIZE &&
11817 			    i < parent->allocated_stack / BPF_REG_SIZE; i++) {
11818 			parent_reg = &parent->stack[i].spilled_ptr;
11819 			state_reg = &state->stack[i].spilled_ptr;
11820 			err = propagate_liveness_reg(env, state_reg,
11821 						     parent_reg);
11822 			if (err < 0)
11823 				return err;
11824 		}
11825 	}
11826 	return 0;
11827 }
11828 
11829 /* find precise scalars in the previous equivalent state and
11830  * propagate them into the current state
11831  */
11832 static int propagate_precision(struct bpf_verifier_env *env,
11833 			       const struct bpf_verifier_state *old)
11834 {
11835 	struct bpf_reg_state *state_reg;
11836 	struct bpf_func_state *state;
11837 	int i, err = 0;
11838 
11839 	state = old->frame[old->curframe];
11840 	state_reg = state->regs;
11841 	for (i = 0; i < BPF_REG_FP; i++, state_reg++) {
11842 		if (state_reg->type != SCALAR_VALUE ||
11843 		    !state_reg->precise)
11844 			continue;
11845 		if (env->log.level & BPF_LOG_LEVEL2)
11846 			verbose(env, "propagating r%d\n", i);
11847 		err = mark_chain_precision(env, i);
11848 		if (err < 0)
11849 			return err;
11850 	}
11851 
11852 	for (i = 0; i < state->allocated_stack / BPF_REG_SIZE; i++) {
11853 		if (!is_spilled_reg(&state->stack[i]))
11854 			continue;
11855 		state_reg = &state->stack[i].spilled_ptr;
11856 		if (state_reg->type != SCALAR_VALUE ||
11857 		    !state_reg->precise)
11858 			continue;
11859 		if (env->log.level & BPF_LOG_LEVEL2)
11860 			verbose(env, "propagating fp%d\n",
11861 				(-i - 1) * BPF_REG_SIZE);
11862 		err = mark_chain_precision_stack(env, i);
11863 		if (err < 0)
11864 			return err;
11865 	}
11866 	return 0;
11867 }
11868 
11869 static bool states_maybe_looping(struct bpf_verifier_state *old,
11870 				 struct bpf_verifier_state *cur)
11871 {
11872 	struct bpf_func_state *fold, *fcur;
11873 	int i, fr = cur->curframe;
11874 
11875 	if (old->curframe != fr)
11876 		return false;
11877 
11878 	fold = old->frame[fr];
11879 	fcur = cur->frame[fr];
11880 	for (i = 0; i < MAX_BPF_REG; i++)
11881 		if (memcmp(&fold->regs[i], &fcur->regs[i],
11882 			   offsetof(struct bpf_reg_state, parent)))
11883 			return false;
11884 	return true;
11885 }
11886 
11887 
11888 static int is_state_visited(struct bpf_verifier_env *env, int insn_idx)
11889 {
11890 	struct bpf_verifier_state_list *new_sl;
11891 	struct bpf_verifier_state_list *sl, **pprev;
11892 	struct bpf_verifier_state *cur = env->cur_state, *new;
11893 	int i, j, err, states_cnt = 0;
11894 	bool add_new_state = env->test_state_freq ? true : false;
11895 
11896 	cur->last_insn_idx = env->prev_insn_idx;
11897 	if (!env->insn_aux_data[insn_idx].prune_point)
11898 		/* this 'insn_idx' instruction wasn't marked, so we will not
11899 		 * be doing state search here
11900 		 */
11901 		return 0;
11902 
11903 	/* bpf progs typically have pruning point every 4 instructions
11904 	 * http://vger.kernel.org/bpfconf2019.html#session-1
11905 	 * Do not add new state for future pruning if the verifier hasn't seen
11906 	 * at least 2 jumps and at least 8 instructions.
11907 	 * This heuristics helps decrease 'total_states' and 'peak_states' metric.
11908 	 * In tests that amounts to up to 50% reduction into total verifier
11909 	 * memory consumption and 20% verifier time speedup.
11910 	 */
11911 	if (env->jmps_processed - env->prev_jmps_processed >= 2 &&
11912 	    env->insn_processed - env->prev_insn_processed >= 8)
11913 		add_new_state = true;
11914 
11915 	pprev = explored_state(env, insn_idx);
11916 	sl = *pprev;
11917 
11918 	clean_live_states(env, insn_idx, cur);
11919 
11920 	while (sl) {
11921 		states_cnt++;
11922 		if (sl->state.insn_idx != insn_idx)
11923 			goto next;
11924 
11925 		if (sl->state.branches) {
11926 			struct bpf_func_state *frame = sl->state.frame[sl->state.curframe];
11927 
11928 			if (frame->in_async_callback_fn &&
11929 			    frame->async_entry_cnt != cur->frame[cur->curframe]->async_entry_cnt) {
11930 				/* Different async_entry_cnt means that the verifier is
11931 				 * processing another entry into async callback.
11932 				 * Seeing the same state is not an indication of infinite
11933 				 * loop or infinite recursion.
11934 				 * But finding the same state doesn't mean that it's safe
11935 				 * to stop processing the current state. The previous state
11936 				 * hasn't yet reached bpf_exit, since state.branches > 0.
11937 				 * Checking in_async_callback_fn alone is not enough either.
11938 				 * Since the verifier still needs to catch infinite loops
11939 				 * inside async callbacks.
11940 				 */
11941 			} else if (states_maybe_looping(&sl->state, cur) &&
11942 				   states_equal(env, &sl->state, cur)) {
11943 				verbose_linfo(env, insn_idx, "; ");
11944 				verbose(env, "infinite loop detected at insn %d\n", insn_idx);
11945 				return -EINVAL;
11946 			}
11947 			/* if the verifier is processing a loop, avoid adding new state
11948 			 * too often, since different loop iterations have distinct
11949 			 * states and may not help future pruning.
11950 			 * This threshold shouldn't be too low to make sure that
11951 			 * a loop with large bound will be rejected quickly.
11952 			 * The most abusive loop will be:
11953 			 * r1 += 1
11954 			 * if r1 < 1000000 goto pc-2
11955 			 * 1M insn_procssed limit / 100 == 10k peak states.
11956 			 * This threshold shouldn't be too high either, since states
11957 			 * at the end of the loop are likely to be useful in pruning.
11958 			 */
11959 			if (env->jmps_processed - env->prev_jmps_processed < 20 &&
11960 			    env->insn_processed - env->prev_insn_processed < 100)
11961 				add_new_state = false;
11962 			goto miss;
11963 		}
11964 		if (states_equal(env, &sl->state, cur)) {
11965 			sl->hit_cnt++;
11966 			/* reached equivalent register/stack state,
11967 			 * prune the search.
11968 			 * Registers read by the continuation are read by us.
11969 			 * If we have any write marks in env->cur_state, they
11970 			 * will prevent corresponding reads in the continuation
11971 			 * from reaching our parent (an explored_state).  Our
11972 			 * own state will get the read marks recorded, but
11973 			 * they'll be immediately forgotten as we're pruning
11974 			 * this state and will pop a new one.
11975 			 */
11976 			err = propagate_liveness(env, &sl->state, cur);
11977 
11978 			/* if previous state reached the exit with precision and
11979 			 * current state is equivalent to it (except precsion marks)
11980 			 * the precision needs to be propagated back in
11981 			 * the current state.
11982 			 */
11983 			err = err ? : push_jmp_history(env, cur);
11984 			err = err ? : propagate_precision(env, &sl->state);
11985 			if (err)
11986 				return err;
11987 			return 1;
11988 		}
11989 miss:
11990 		/* when new state is not going to be added do not increase miss count.
11991 		 * Otherwise several loop iterations will remove the state
11992 		 * recorded earlier. The goal of these heuristics is to have
11993 		 * states from some iterations of the loop (some in the beginning
11994 		 * and some at the end) to help pruning.
11995 		 */
11996 		if (add_new_state)
11997 			sl->miss_cnt++;
11998 		/* heuristic to determine whether this state is beneficial
11999 		 * to keep checking from state equivalence point of view.
12000 		 * Higher numbers increase max_states_per_insn and verification time,
12001 		 * but do not meaningfully decrease insn_processed.
12002 		 */
12003 		if (sl->miss_cnt > sl->hit_cnt * 3 + 3) {
12004 			/* the state is unlikely to be useful. Remove it to
12005 			 * speed up verification
12006 			 */
12007 			*pprev = sl->next;
12008 			if (sl->state.frame[0]->regs[0].live & REG_LIVE_DONE) {
12009 				u32 br = sl->state.branches;
12010 
12011 				WARN_ONCE(br,
12012 					  "BUG live_done but branches_to_explore %d\n",
12013 					  br);
12014 				free_verifier_state(&sl->state, false);
12015 				kfree(sl);
12016 				env->peak_states--;
12017 			} else {
12018 				/* cannot free this state, since parentage chain may
12019 				 * walk it later. Add it for free_list instead to
12020 				 * be freed at the end of verification
12021 				 */
12022 				sl->next = env->free_list;
12023 				env->free_list = sl;
12024 			}
12025 			sl = *pprev;
12026 			continue;
12027 		}
12028 next:
12029 		pprev = &sl->next;
12030 		sl = *pprev;
12031 	}
12032 
12033 	if (env->max_states_per_insn < states_cnt)
12034 		env->max_states_per_insn = states_cnt;
12035 
12036 	if (!env->bpf_capable && states_cnt > BPF_COMPLEXITY_LIMIT_STATES)
12037 		return push_jmp_history(env, cur);
12038 
12039 	if (!add_new_state)
12040 		return push_jmp_history(env, cur);
12041 
12042 	/* There were no equivalent states, remember the current one.
12043 	 * Technically the current state is not proven to be safe yet,
12044 	 * but it will either reach outer most bpf_exit (which means it's safe)
12045 	 * or it will be rejected. When there are no loops the verifier won't be
12046 	 * seeing this tuple (frame[0].callsite, frame[1].callsite, .. insn_idx)
12047 	 * again on the way to bpf_exit.
12048 	 * When looping the sl->state.branches will be > 0 and this state
12049 	 * will not be considered for equivalence until branches == 0.
12050 	 */
12051 	new_sl = kzalloc(sizeof(struct bpf_verifier_state_list), GFP_KERNEL);
12052 	if (!new_sl)
12053 		return -ENOMEM;
12054 	env->total_states++;
12055 	env->peak_states++;
12056 	env->prev_jmps_processed = env->jmps_processed;
12057 	env->prev_insn_processed = env->insn_processed;
12058 
12059 	/* add new state to the head of linked list */
12060 	new = &new_sl->state;
12061 	err = copy_verifier_state(new, cur);
12062 	if (err) {
12063 		free_verifier_state(new, false);
12064 		kfree(new_sl);
12065 		return err;
12066 	}
12067 	new->insn_idx = insn_idx;
12068 	WARN_ONCE(new->branches != 1,
12069 		  "BUG is_state_visited:branches_to_explore=%d insn %d\n", new->branches, insn_idx);
12070 
12071 	cur->parent = new;
12072 	cur->first_insn_idx = insn_idx;
12073 	clear_jmp_history(cur);
12074 	new_sl->next = *explored_state(env, insn_idx);
12075 	*explored_state(env, insn_idx) = new_sl;
12076 	/* connect new state to parentage chain. Current frame needs all
12077 	 * registers connected. Only r6 - r9 of the callers are alive (pushed
12078 	 * to the stack implicitly by JITs) so in callers' frames connect just
12079 	 * r6 - r9 as an optimization. Callers will have r1 - r5 connected to
12080 	 * the state of the call instruction (with WRITTEN set), and r0 comes
12081 	 * from callee with its full parentage chain, anyway.
12082 	 */
12083 	/* clear write marks in current state: the writes we did are not writes
12084 	 * our child did, so they don't screen off its reads from us.
12085 	 * (There are no read marks in current state, because reads always mark
12086 	 * their parent and current state never has children yet.  Only
12087 	 * explored_states can get read marks.)
12088 	 */
12089 	for (j = 0; j <= cur->curframe; j++) {
12090 		for (i = j < cur->curframe ? BPF_REG_6 : 0; i < BPF_REG_FP; i++)
12091 			cur->frame[j]->regs[i].parent = &new->frame[j]->regs[i];
12092 		for (i = 0; i < BPF_REG_FP; i++)
12093 			cur->frame[j]->regs[i].live = REG_LIVE_NONE;
12094 	}
12095 
12096 	/* all stack frames are accessible from callee, clear them all */
12097 	for (j = 0; j <= cur->curframe; j++) {
12098 		struct bpf_func_state *frame = cur->frame[j];
12099 		struct bpf_func_state *newframe = new->frame[j];
12100 
12101 		for (i = 0; i < frame->allocated_stack / BPF_REG_SIZE; i++) {
12102 			frame->stack[i].spilled_ptr.live = REG_LIVE_NONE;
12103 			frame->stack[i].spilled_ptr.parent =
12104 						&newframe->stack[i].spilled_ptr;
12105 		}
12106 	}
12107 	return 0;
12108 }
12109 
12110 /* Return true if it's OK to have the same insn return a different type. */
12111 static bool reg_type_mismatch_ok(enum bpf_reg_type type)
12112 {
12113 	switch (base_type(type)) {
12114 	case PTR_TO_CTX:
12115 	case PTR_TO_SOCKET:
12116 	case PTR_TO_SOCK_COMMON:
12117 	case PTR_TO_TCP_SOCK:
12118 	case PTR_TO_XDP_SOCK:
12119 	case PTR_TO_BTF_ID:
12120 		return false;
12121 	default:
12122 		return true;
12123 	}
12124 }
12125 
12126 /* If an instruction was previously used with particular pointer types, then we
12127  * need to be careful to avoid cases such as the below, where it may be ok
12128  * for one branch accessing the pointer, but not ok for the other branch:
12129  *
12130  * R1 = sock_ptr
12131  * goto X;
12132  * ...
12133  * R1 = some_other_valid_ptr;
12134  * goto X;
12135  * ...
12136  * R2 = *(u32 *)(R1 + 0);
12137  */
12138 static bool reg_type_mismatch(enum bpf_reg_type src, enum bpf_reg_type prev)
12139 {
12140 	return src != prev && (!reg_type_mismatch_ok(src) ||
12141 			       !reg_type_mismatch_ok(prev));
12142 }
12143 
12144 static int do_check(struct bpf_verifier_env *env)
12145 {
12146 	bool pop_log = !(env->log.level & BPF_LOG_LEVEL2);
12147 	struct bpf_verifier_state *state = env->cur_state;
12148 	struct bpf_insn *insns = env->prog->insnsi;
12149 	struct bpf_reg_state *regs;
12150 	int insn_cnt = env->prog->len;
12151 	bool do_print_state = false;
12152 	int prev_insn_idx = -1;
12153 
12154 	for (;;) {
12155 		struct bpf_insn *insn;
12156 		u8 class;
12157 		int err;
12158 
12159 		env->prev_insn_idx = prev_insn_idx;
12160 		if (env->insn_idx >= insn_cnt) {
12161 			verbose(env, "invalid insn idx %d insn_cnt %d\n",
12162 				env->insn_idx, insn_cnt);
12163 			return -EFAULT;
12164 		}
12165 
12166 		insn = &insns[env->insn_idx];
12167 		class = BPF_CLASS(insn->code);
12168 
12169 		if (++env->insn_processed > BPF_COMPLEXITY_LIMIT_INSNS) {
12170 			verbose(env,
12171 				"BPF program is too large. Processed %d insn\n",
12172 				env->insn_processed);
12173 			return -E2BIG;
12174 		}
12175 
12176 		err = is_state_visited(env, env->insn_idx);
12177 		if (err < 0)
12178 			return err;
12179 		if (err == 1) {
12180 			/* found equivalent state, can prune the search */
12181 			if (env->log.level & BPF_LOG_LEVEL) {
12182 				if (do_print_state)
12183 					verbose(env, "\nfrom %d to %d%s: safe\n",
12184 						env->prev_insn_idx, env->insn_idx,
12185 						env->cur_state->speculative ?
12186 						" (speculative execution)" : "");
12187 				else
12188 					verbose(env, "%d: safe\n", env->insn_idx);
12189 			}
12190 			goto process_bpf_exit;
12191 		}
12192 
12193 		if (signal_pending(current))
12194 			return -EAGAIN;
12195 
12196 		if (need_resched())
12197 			cond_resched();
12198 
12199 		if (env->log.level & BPF_LOG_LEVEL2 && do_print_state) {
12200 			verbose(env, "\nfrom %d to %d%s:",
12201 				env->prev_insn_idx, env->insn_idx,
12202 				env->cur_state->speculative ?
12203 				" (speculative execution)" : "");
12204 			print_verifier_state(env, state->frame[state->curframe], true);
12205 			do_print_state = false;
12206 		}
12207 
12208 		if (env->log.level & BPF_LOG_LEVEL) {
12209 			const struct bpf_insn_cbs cbs = {
12210 				.cb_call	= disasm_kfunc_name,
12211 				.cb_print	= verbose,
12212 				.private_data	= env,
12213 			};
12214 
12215 			if (verifier_state_scratched(env))
12216 				print_insn_state(env, state->frame[state->curframe]);
12217 
12218 			verbose_linfo(env, env->insn_idx, "; ");
12219 			env->prev_log_len = env->log.len_used;
12220 			verbose(env, "%d: ", env->insn_idx);
12221 			print_bpf_insn(&cbs, insn, env->allow_ptr_leaks);
12222 			env->prev_insn_print_len = env->log.len_used - env->prev_log_len;
12223 			env->prev_log_len = env->log.len_used;
12224 		}
12225 
12226 		if (bpf_prog_is_dev_bound(env->prog->aux)) {
12227 			err = bpf_prog_offload_verify_insn(env, env->insn_idx,
12228 							   env->prev_insn_idx);
12229 			if (err)
12230 				return err;
12231 		}
12232 
12233 		regs = cur_regs(env);
12234 		sanitize_mark_insn_seen(env);
12235 		prev_insn_idx = env->insn_idx;
12236 
12237 		if (class == BPF_ALU || class == BPF_ALU64) {
12238 			err = check_alu_op(env, insn);
12239 			if (err)
12240 				return err;
12241 
12242 		} else if (class == BPF_LDX) {
12243 			enum bpf_reg_type *prev_src_type, src_reg_type;
12244 
12245 			/* check for reserved fields is already done */
12246 
12247 			/* check src operand */
12248 			err = check_reg_arg(env, insn->src_reg, SRC_OP);
12249 			if (err)
12250 				return err;
12251 
12252 			err = check_reg_arg(env, insn->dst_reg, DST_OP_NO_MARK);
12253 			if (err)
12254 				return err;
12255 
12256 			src_reg_type = regs[insn->src_reg].type;
12257 
12258 			/* check that memory (src_reg + off) is readable,
12259 			 * the state of dst_reg will be updated by this func
12260 			 */
12261 			err = check_mem_access(env, env->insn_idx, insn->src_reg,
12262 					       insn->off, BPF_SIZE(insn->code),
12263 					       BPF_READ, insn->dst_reg, false);
12264 			if (err)
12265 				return err;
12266 
12267 			prev_src_type = &env->insn_aux_data[env->insn_idx].ptr_type;
12268 
12269 			if (*prev_src_type == NOT_INIT) {
12270 				/* saw a valid insn
12271 				 * dst_reg = *(u32 *)(src_reg + off)
12272 				 * save type to validate intersecting paths
12273 				 */
12274 				*prev_src_type = src_reg_type;
12275 
12276 			} else if (reg_type_mismatch(src_reg_type, *prev_src_type)) {
12277 				/* ABuser program is trying to use the same insn
12278 				 * dst_reg = *(u32*) (src_reg + off)
12279 				 * with different pointer types:
12280 				 * src_reg == ctx in one branch and
12281 				 * src_reg == stack|map in some other branch.
12282 				 * Reject it.
12283 				 */
12284 				verbose(env, "same insn cannot be used with different pointers\n");
12285 				return -EINVAL;
12286 			}
12287 
12288 		} else if (class == BPF_STX) {
12289 			enum bpf_reg_type *prev_dst_type, dst_reg_type;
12290 
12291 			if (BPF_MODE(insn->code) == BPF_ATOMIC) {
12292 				err = check_atomic(env, env->insn_idx, insn);
12293 				if (err)
12294 					return err;
12295 				env->insn_idx++;
12296 				continue;
12297 			}
12298 
12299 			if (BPF_MODE(insn->code) != BPF_MEM || insn->imm != 0) {
12300 				verbose(env, "BPF_STX uses reserved fields\n");
12301 				return -EINVAL;
12302 			}
12303 
12304 			/* check src1 operand */
12305 			err = check_reg_arg(env, insn->src_reg, SRC_OP);
12306 			if (err)
12307 				return err;
12308 			/* check src2 operand */
12309 			err = check_reg_arg(env, insn->dst_reg, SRC_OP);
12310 			if (err)
12311 				return err;
12312 
12313 			dst_reg_type = regs[insn->dst_reg].type;
12314 
12315 			/* check that memory (dst_reg + off) is writeable */
12316 			err = check_mem_access(env, env->insn_idx, insn->dst_reg,
12317 					       insn->off, BPF_SIZE(insn->code),
12318 					       BPF_WRITE, insn->src_reg, false);
12319 			if (err)
12320 				return err;
12321 
12322 			prev_dst_type = &env->insn_aux_data[env->insn_idx].ptr_type;
12323 
12324 			if (*prev_dst_type == NOT_INIT) {
12325 				*prev_dst_type = dst_reg_type;
12326 			} else if (reg_type_mismatch(dst_reg_type, *prev_dst_type)) {
12327 				verbose(env, "same insn cannot be used with different pointers\n");
12328 				return -EINVAL;
12329 			}
12330 
12331 		} else if (class == BPF_ST) {
12332 			if (BPF_MODE(insn->code) != BPF_MEM ||
12333 			    insn->src_reg != BPF_REG_0) {
12334 				verbose(env, "BPF_ST uses reserved fields\n");
12335 				return -EINVAL;
12336 			}
12337 			/* check src operand */
12338 			err = check_reg_arg(env, insn->dst_reg, SRC_OP);
12339 			if (err)
12340 				return err;
12341 
12342 			if (is_ctx_reg(env, insn->dst_reg)) {
12343 				verbose(env, "BPF_ST stores into R%d %s is not allowed\n",
12344 					insn->dst_reg,
12345 					reg_type_str(env, reg_state(env, insn->dst_reg)->type));
12346 				return -EACCES;
12347 			}
12348 
12349 			/* check that memory (dst_reg + off) is writeable */
12350 			err = check_mem_access(env, env->insn_idx, insn->dst_reg,
12351 					       insn->off, BPF_SIZE(insn->code),
12352 					       BPF_WRITE, -1, false);
12353 			if (err)
12354 				return err;
12355 
12356 		} else if (class == BPF_JMP || class == BPF_JMP32) {
12357 			u8 opcode = BPF_OP(insn->code);
12358 
12359 			env->jmps_processed++;
12360 			if (opcode == BPF_CALL) {
12361 				if (BPF_SRC(insn->code) != BPF_K ||
12362 				    (insn->src_reg != BPF_PSEUDO_KFUNC_CALL
12363 				     && insn->off != 0) ||
12364 				    (insn->src_reg != BPF_REG_0 &&
12365 				     insn->src_reg != BPF_PSEUDO_CALL &&
12366 				     insn->src_reg != BPF_PSEUDO_KFUNC_CALL) ||
12367 				    insn->dst_reg != BPF_REG_0 ||
12368 				    class == BPF_JMP32) {
12369 					verbose(env, "BPF_CALL uses reserved fields\n");
12370 					return -EINVAL;
12371 				}
12372 
12373 				if (env->cur_state->active_spin_lock &&
12374 				    (insn->src_reg == BPF_PSEUDO_CALL ||
12375 				     insn->imm != BPF_FUNC_spin_unlock)) {
12376 					verbose(env, "function calls are not allowed while holding a lock\n");
12377 					return -EINVAL;
12378 				}
12379 				if (insn->src_reg == BPF_PSEUDO_CALL)
12380 					err = check_func_call(env, insn, &env->insn_idx);
12381 				else if (insn->src_reg == BPF_PSEUDO_KFUNC_CALL)
12382 					err = check_kfunc_call(env, insn, &env->insn_idx);
12383 				else
12384 					err = check_helper_call(env, insn, &env->insn_idx);
12385 				if (err)
12386 					return err;
12387 			} else if (opcode == BPF_JA) {
12388 				if (BPF_SRC(insn->code) != BPF_K ||
12389 				    insn->imm != 0 ||
12390 				    insn->src_reg != BPF_REG_0 ||
12391 				    insn->dst_reg != BPF_REG_0 ||
12392 				    class == BPF_JMP32) {
12393 					verbose(env, "BPF_JA uses reserved fields\n");
12394 					return -EINVAL;
12395 				}
12396 
12397 				env->insn_idx += insn->off + 1;
12398 				continue;
12399 
12400 			} else if (opcode == BPF_EXIT) {
12401 				if (BPF_SRC(insn->code) != BPF_K ||
12402 				    insn->imm != 0 ||
12403 				    insn->src_reg != BPF_REG_0 ||
12404 				    insn->dst_reg != BPF_REG_0 ||
12405 				    class == BPF_JMP32) {
12406 					verbose(env, "BPF_EXIT uses reserved fields\n");
12407 					return -EINVAL;
12408 				}
12409 
12410 				if (env->cur_state->active_spin_lock) {
12411 					verbose(env, "bpf_spin_unlock is missing\n");
12412 					return -EINVAL;
12413 				}
12414 
12415 				/* We must do check_reference_leak here before
12416 				 * prepare_func_exit to handle the case when
12417 				 * state->curframe > 0, it may be a callback
12418 				 * function, for which reference_state must
12419 				 * match caller reference state when it exits.
12420 				 */
12421 				err = check_reference_leak(env);
12422 				if (err)
12423 					return err;
12424 
12425 				if (state->curframe) {
12426 					/* exit from nested function */
12427 					err = prepare_func_exit(env, &env->insn_idx);
12428 					if (err)
12429 						return err;
12430 					do_print_state = true;
12431 					continue;
12432 				}
12433 
12434 				err = check_return_code(env);
12435 				if (err)
12436 					return err;
12437 process_bpf_exit:
12438 				mark_verifier_state_scratched(env);
12439 				update_branch_counts(env, env->cur_state);
12440 				err = pop_stack(env, &prev_insn_idx,
12441 						&env->insn_idx, pop_log);
12442 				if (err < 0) {
12443 					if (err != -ENOENT)
12444 						return err;
12445 					break;
12446 				} else {
12447 					do_print_state = true;
12448 					continue;
12449 				}
12450 			} else {
12451 				err = check_cond_jmp_op(env, insn, &env->insn_idx);
12452 				if (err)
12453 					return err;
12454 			}
12455 		} else if (class == BPF_LD) {
12456 			u8 mode = BPF_MODE(insn->code);
12457 
12458 			if (mode == BPF_ABS || mode == BPF_IND) {
12459 				err = check_ld_abs(env, insn);
12460 				if (err)
12461 					return err;
12462 
12463 			} else if (mode == BPF_IMM) {
12464 				err = check_ld_imm(env, insn);
12465 				if (err)
12466 					return err;
12467 
12468 				env->insn_idx++;
12469 				sanitize_mark_insn_seen(env);
12470 			} else {
12471 				verbose(env, "invalid BPF_LD mode\n");
12472 				return -EINVAL;
12473 			}
12474 		} else {
12475 			verbose(env, "unknown insn class %d\n", class);
12476 			return -EINVAL;
12477 		}
12478 
12479 		env->insn_idx++;
12480 	}
12481 
12482 	return 0;
12483 }
12484 
12485 static int find_btf_percpu_datasec(struct btf *btf)
12486 {
12487 	const struct btf_type *t;
12488 	const char *tname;
12489 	int i, n;
12490 
12491 	/*
12492 	 * Both vmlinux and module each have their own ".data..percpu"
12493 	 * DATASECs in BTF. So for module's case, we need to skip vmlinux BTF
12494 	 * types to look at only module's own BTF types.
12495 	 */
12496 	n = btf_nr_types(btf);
12497 	if (btf_is_module(btf))
12498 		i = btf_nr_types(btf_vmlinux);
12499 	else
12500 		i = 1;
12501 
12502 	for(; i < n; i++) {
12503 		t = btf_type_by_id(btf, i);
12504 		if (BTF_INFO_KIND(t->info) != BTF_KIND_DATASEC)
12505 			continue;
12506 
12507 		tname = btf_name_by_offset(btf, t->name_off);
12508 		if (!strcmp(tname, ".data..percpu"))
12509 			return i;
12510 	}
12511 
12512 	return -ENOENT;
12513 }
12514 
12515 /* replace pseudo btf_id with kernel symbol address */
12516 static int check_pseudo_btf_id(struct bpf_verifier_env *env,
12517 			       struct bpf_insn *insn,
12518 			       struct bpf_insn_aux_data *aux)
12519 {
12520 	const struct btf_var_secinfo *vsi;
12521 	const struct btf_type *datasec;
12522 	struct btf_mod_pair *btf_mod;
12523 	const struct btf_type *t;
12524 	const char *sym_name;
12525 	bool percpu = false;
12526 	u32 type, id = insn->imm;
12527 	struct btf *btf;
12528 	s32 datasec_id;
12529 	u64 addr;
12530 	int i, btf_fd, err;
12531 
12532 	btf_fd = insn[1].imm;
12533 	if (btf_fd) {
12534 		btf = btf_get_by_fd(btf_fd);
12535 		if (IS_ERR(btf)) {
12536 			verbose(env, "invalid module BTF object FD specified.\n");
12537 			return -EINVAL;
12538 		}
12539 	} else {
12540 		if (!btf_vmlinux) {
12541 			verbose(env, "kernel is missing BTF, make sure CONFIG_DEBUG_INFO_BTF=y is specified in Kconfig.\n");
12542 			return -EINVAL;
12543 		}
12544 		btf = btf_vmlinux;
12545 		btf_get(btf);
12546 	}
12547 
12548 	t = btf_type_by_id(btf, id);
12549 	if (!t) {
12550 		verbose(env, "ldimm64 insn specifies invalid btf_id %d.\n", id);
12551 		err = -ENOENT;
12552 		goto err_put;
12553 	}
12554 
12555 	if (!btf_type_is_var(t)) {
12556 		verbose(env, "pseudo btf_id %d in ldimm64 isn't KIND_VAR.\n", id);
12557 		err = -EINVAL;
12558 		goto err_put;
12559 	}
12560 
12561 	sym_name = btf_name_by_offset(btf, t->name_off);
12562 	addr = kallsyms_lookup_name(sym_name);
12563 	if (!addr) {
12564 		verbose(env, "ldimm64 failed to find the address for kernel symbol '%s'.\n",
12565 			sym_name);
12566 		err = -ENOENT;
12567 		goto err_put;
12568 	}
12569 
12570 	datasec_id = find_btf_percpu_datasec(btf);
12571 	if (datasec_id > 0) {
12572 		datasec = btf_type_by_id(btf, datasec_id);
12573 		for_each_vsi(i, datasec, vsi) {
12574 			if (vsi->type == id) {
12575 				percpu = true;
12576 				break;
12577 			}
12578 		}
12579 	}
12580 
12581 	insn[0].imm = (u32)addr;
12582 	insn[1].imm = addr >> 32;
12583 
12584 	type = t->type;
12585 	t = btf_type_skip_modifiers(btf, type, NULL);
12586 	if (percpu) {
12587 		aux->btf_var.reg_type = PTR_TO_BTF_ID | MEM_PERCPU;
12588 		aux->btf_var.btf = btf;
12589 		aux->btf_var.btf_id = type;
12590 	} else if (!btf_type_is_struct(t)) {
12591 		const struct btf_type *ret;
12592 		const char *tname;
12593 		u32 tsize;
12594 
12595 		/* resolve the type size of ksym. */
12596 		ret = btf_resolve_size(btf, t, &tsize);
12597 		if (IS_ERR(ret)) {
12598 			tname = btf_name_by_offset(btf, t->name_off);
12599 			verbose(env, "ldimm64 unable to resolve the size of type '%s': %ld\n",
12600 				tname, PTR_ERR(ret));
12601 			err = -EINVAL;
12602 			goto err_put;
12603 		}
12604 		aux->btf_var.reg_type = PTR_TO_MEM | MEM_RDONLY;
12605 		aux->btf_var.mem_size = tsize;
12606 	} else {
12607 		aux->btf_var.reg_type = PTR_TO_BTF_ID;
12608 		aux->btf_var.btf = btf;
12609 		aux->btf_var.btf_id = type;
12610 	}
12611 
12612 	/* check whether we recorded this BTF (and maybe module) already */
12613 	for (i = 0; i < env->used_btf_cnt; i++) {
12614 		if (env->used_btfs[i].btf == btf) {
12615 			btf_put(btf);
12616 			return 0;
12617 		}
12618 	}
12619 
12620 	if (env->used_btf_cnt >= MAX_USED_BTFS) {
12621 		err = -E2BIG;
12622 		goto err_put;
12623 	}
12624 
12625 	btf_mod = &env->used_btfs[env->used_btf_cnt];
12626 	btf_mod->btf = btf;
12627 	btf_mod->module = NULL;
12628 
12629 	/* if we reference variables from kernel module, bump its refcount */
12630 	if (btf_is_module(btf)) {
12631 		btf_mod->module = btf_try_get_module(btf);
12632 		if (!btf_mod->module) {
12633 			err = -ENXIO;
12634 			goto err_put;
12635 		}
12636 	}
12637 
12638 	env->used_btf_cnt++;
12639 
12640 	return 0;
12641 err_put:
12642 	btf_put(btf);
12643 	return err;
12644 }
12645 
12646 static bool is_tracing_prog_type(enum bpf_prog_type type)
12647 {
12648 	switch (type) {
12649 	case BPF_PROG_TYPE_KPROBE:
12650 	case BPF_PROG_TYPE_TRACEPOINT:
12651 	case BPF_PROG_TYPE_PERF_EVENT:
12652 	case BPF_PROG_TYPE_RAW_TRACEPOINT:
12653 	case BPF_PROG_TYPE_RAW_TRACEPOINT_WRITABLE:
12654 		return true;
12655 	default:
12656 		return false;
12657 	}
12658 }
12659 
12660 static int check_map_prog_compatibility(struct bpf_verifier_env *env,
12661 					struct bpf_map *map,
12662 					struct bpf_prog *prog)
12663 
12664 {
12665 	enum bpf_prog_type prog_type = resolve_prog_type(prog);
12666 
12667 	if (map_value_has_spin_lock(map)) {
12668 		if (prog_type == BPF_PROG_TYPE_SOCKET_FILTER) {
12669 			verbose(env, "socket filter progs cannot use bpf_spin_lock yet\n");
12670 			return -EINVAL;
12671 		}
12672 
12673 		if (is_tracing_prog_type(prog_type)) {
12674 			verbose(env, "tracing progs cannot use bpf_spin_lock yet\n");
12675 			return -EINVAL;
12676 		}
12677 
12678 		if (prog->aux->sleepable) {
12679 			verbose(env, "sleepable progs cannot use bpf_spin_lock yet\n");
12680 			return -EINVAL;
12681 		}
12682 	}
12683 
12684 	if (map_value_has_timer(map)) {
12685 		if (is_tracing_prog_type(prog_type)) {
12686 			verbose(env, "tracing progs cannot use bpf_timer yet\n");
12687 			return -EINVAL;
12688 		}
12689 	}
12690 
12691 	if ((bpf_prog_is_dev_bound(prog->aux) || bpf_map_is_dev_bound(map)) &&
12692 	    !bpf_offload_prog_map_match(prog, map)) {
12693 		verbose(env, "offload device mismatch between prog and map\n");
12694 		return -EINVAL;
12695 	}
12696 
12697 	if (map->map_type == BPF_MAP_TYPE_STRUCT_OPS) {
12698 		verbose(env, "bpf_struct_ops map cannot be used in prog\n");
12699 		return -EINVAL;
12700 	}
12701 
12702 	if (prog->aux->sleepable)
12703 		switch (map->map_type) {
12704 		case BPF_MAP_TYPE_HASH:
12705 		case BPF_MAP_TYPE_LRU_HASH:
12706 		case BPF_MAP_TYPE_ARRAY:
12707 		case BPF_MAP_TYPE_PERCPU_HASH:
12708 		case BPF_MAP_TYPE_PERCPU_ARRAY:
12709 		case BPF_MAP_TYPE_LRU_PERCPU_HASH:
12710 		case BPF_MAP_TYPE_ARRAY_OF_MAPS:
12711 		case BPF_MAP_TYPE_HASH_OF_MAPS:
12712 		case BPF_MAP_TYPE_RINGBUF:
12713 		case BPF_MAP_TYPE_USER_RINGBUF:
12714 		case BPF_MAP_TYPE_INODE_STORAGE:
12715 		case BPF_MAP_TYPE_SK_STORAGE:
12716 		case BPF_MAP_TYPE_TASK_STORAGE:
12717 			break;
12718 		default:
12719 			verbose(env,
12720 				"Sleepable programs can only use array, hash, and ringbuf maps\n");
12721 			return -EINVAL;
12722 		}
12723 
12724 	return 0;
12725 }
12726 
12727 static bool bpf_map_is_cgroup_storage(struct bpf_map *map)
12728 {
12729 	return (map->map_type == BPF_MAP_TYPE_CGROUP_STORAGE ||
12730 		map->map_type == BPF_MAP_TYPE_PERCPU_CGROUP_STORAGE);
12731 }
12732 
12733 /* find and rewrite pseudo imm in ld_imm64 instructions:
12734  *
12735  * 1. if it accesses map FD, replace it with actual map pointer.
12736  * 2. if it accesses btf_id of a VAR, replace it with pointer to the var.
12737  *
12738  * NOTE: btf_vmlinux is required for converting pseudo btf_id.
12739  */
12740 static int resolve_pseudo_ldimm64(struct bpf_verifier_env *env)
12741 {
12742 	struct bpf_insn *insn = env->prog->insnsi;
12743 	int insn_cnt = env->prog->len;
12744 	int i, j, err;
12745 
12746 	err = bpf_prog_calc_tag(env->prog);
12747 	if (err)
12748 		return err;
12749 
12750 	for (i = 0; i < insn_cnt; i++, insn++) {
12751 		if (BPF_CLASS(insn->code) == BPF_LDX &&
12752 		    (BPF_MODE(insn->code) != BPF_MEM || insn->imm != 0)) {
12753 			verbose(env, "BPF_LDX uses reserved fields\n");
12754 			return -EINVAL;
12755 		}
12756 
12757 		if (insn[0].code == (BPF_LD | BPF_IMM | BPF_DW)) {
12758 			struct bpf_insn_aux_data *aux;
12759 			struct bpf_map *map;
12760 			struct fd f;
12761 			u64 addr;
12762 			u32 fd;
12763 
12764 			if (i == insn_cnt - 1 || insn[1].code != 0 ||
12765 			    insn[1].dst_reg != 0 || insn[1].src_reg != 0 ||
12766 			    insn[1].off != 0) {
12767 				verbose(env, "invalid bpf_ld_imm64 insn\n");
12768 				return -EINVAL;
12769 			}
12770 
12771 			if (insn[0].src_reg == 0)
12772 				/* valid generic load 64-bit imm */
12773 				goto next_insn;
12774 
12775 			if (insn[0].src_reg == BPF_PSEUDO_BTF_ID) {
12776 				aux = &env->insn_aux_data[i];
12777 				err = check_pseudo_btf_id(env, insn, aux);
12778 				if (err)
12779 					return err;
12780 				goto next_insn;
12781 			}
12782 
12783 			if (insn[0].src_reg == BPF_PSEUDO_FUNC) {
12784 				aux = &env->insn_aux_data[i];
12785 				aux->ptr_type = PTR_TO_FUNC;
12786 				goto next_insn;
12787 			}
12788 
12789 			/* In final convert_pseudo_ld_imm64() step, this is
12790 			 * converted into regular 64-bit imm load insn.
12791 			 */
12792 			switch (insn[0].src_reg) {
12793 			case BPF_PSEUDO_MAP_VALUE:
12794 			case BPF_PSEUDO_MAP_IDX_VALUE:
12795 				break;
12796 			case BPF_PSEUDO_MAP_FD:
12797 			case BPF_PSEUDO_MAP_IDX:
12798 				if (insn[1].imm == 0)
12799 					break;
12800 				fallthrough;
12801 			default:
12802 				verbose(env, "unrecognized bpf_ld_imm64 insn\n");
12803 				return -EINVAL;
12804 			}
12805 
12806 			switch (insn[0].src_reg) {
12807 			case BPF_PSEUDO_MAP_IDX_VALUE:
12808 			case BPF_PSEUDO_MAP_IDX:
12809 				if (bpfptr_is_null(env->fd_array)) {
12810 					verbose(env, "fd_idx without fd_array is invalid\n");
12811 					return -EPROTO;
12812 				}
12813 				if (copy_from_bpfptr_offset(&fd, env->fd_array,
12814 							    insn[0].imm * sizeof(fd),
12815 							    sizeof(fd)))
12816 					return -EFAULT;
12817 				break;
12818 			default:
12819 				fd = insn[0].imm;
12820 				break;
12821 			}
12822 
12823 			f = fdget(fd);
12824 			map = __bpf_map_get(f);
12825 			if (IS_ERR(map)) {
12826 				verbose(env, "fd %d is not pointing to valid bpf_map\n",
12827 					insn[0].imm);
12828 				return PTR_ERR(map);
12829 			}
12830 
12831 			err = check_map_prog_compatibility(env, map, env->prog);
12832 			if (err) {
12833 				fdput(f);
12834 				return err;
12835 			}
12836 
12837 			aux = &env->insn_aux_data[i];
12838 			if (insn[0].src_reg == BPF_PSEUDO_MAP_FD ||
12839 			    insn[0].src_reg == BPF_PSEUDO_MAP_IDX) {
12840 				addr = (unsigned long)map;
12841 			} else {
12842 				u32 off = insn[1].imm;
12843 
12844 				if (off >= BPF_MAX_VAR_OFF) {
12845 					verbose(env, "direct value offset of %u is not allowed\n", off);
12846 					fdput(f);
12847 					return -EINVAL;
12848 				}
12849 
12850 				if (!map->ops->map_direct_value_addr) {
12851 					verbose(env, "no direct value access support for this map type\n");
12852 					fdput(f);
12853 					return -EINVAL;
12854 				}
12855 
12856 				err = map->ops->map_direct_value_addr(map, &addr, off);
12857 				if (err) {
12858 					verbose(env, "invalid access to map value pointer, value_size=%u off=%u\n",
12859 						map->value_size, off);
12860 					fdput(f);
12861 					return err;
12862 				}
12863 
12864 				aux->map_off = off;
12865 				addr += off;
12866 			}
12867 
12868 			insn[0].imm = (u32)addr;
12869 			insn[1].imm = addr >> 32;
12870 
12871 			/* check whether we recorded this map already */
12872 			for (j = 0; j < env->used_map_cnt; j++) {
12873 				if (env->used_maps[j] == map) {
12874 					aux->map_index = j;
12875 					fdput(f);
12876 					goto next_insn;
12877 				}
12878 			}
12879 
12880 			if (env->used_map_cnt >= MAX_USED_MAPS) {
12881 				fdput(f);
12882 				return -E2BIG;
12883 			}
12884 
12885 			/* hold the map. If the program is rejected by verifier,
12886 			 * the map will be released by release_maps() or it
12887 			 * will be used by the valid program until it's unloaded
12888 			 * and all maps are released in free_used_maps()
12889 			 */
12890 			bpf_map_inc(map);
12891 
12892 			aux->map_index = env->used_map_cnt;
12893 			env->used_maps[env->used_map_cnt++] = map;
12894 
12895 			if (bpf_map_is_cgroup_storage(map) &&
12896 			    bpf_cgroup_storage_assign(env->prog->aux, map)) {
12897 				verbose(env, "only one cgroup storage of each type is allowed\n");
12898 				fdput(f);
12899 				return -EBUSY;
12900 			}
12901 
12902 			fdput(f);
12903 next_insn:
12904 			insn++;
12905 			i++;
12906 			continue;
12907 		}
12908 
12909 		/* Basic sanity check before we invest more work here. */
12910 		if (!bpf_opcode_in_insntable(insn->code)) {
12911 			verbose(env, "unknown opcode %02x\n", insn->code);
12912 			return -EINVAL;
12913 		}
12914 	}
12915 
12916 	/* now all pseudo BPF_LD_IMM64 instructions load valid
12917 	 * 'struct bpf_map *' into a register instead of user map_fd.
12918 	 * These pointers will be used later by verifier to validate map access.
12919 	 */
12920 	return 0;
12921 }
12922 
12923 /* drop refcnt of maps used by the rejected program */
12924 static void release_maps(struct bpf_verifier_env *env)
12925 {
12926 	__bpf_free_used_maps(env->prog->aux, env->used_maps,
12927 			     env->used_map_cnt);
12928 }
12929 
12930 /* drop refcnt of maps used by the rejected program */
12931 static void release_btfs(struct bpf_verifier_env *env)
12932 {
12933 	__bpf_free_used_btfs(env->prog->aux, env->used_btfs,
12934 			     env->used_btf_cnt);
12935 }
12936 
12937 /* convert pseudo BPF_LD_IMM64 into generic BPF_LD_IMM64 */
12938 static void convert_pseudo_ld_imm64(struct bpf_verifier_env *env)
12939 {
12940 	struct bpf_insn *insn = env->prog->insnsi;
12941 	int insn_cnt = env->prog->len;
12942 	int i;
12943 
12944 	for (i = 0; i < insn_cnt; i++, insn++) {
12945 		if (insn->code != (BPF_LD | BPF_IMM | BPF_DW))
12946 			continue;
12947 		if (insn->src_reg == BPF_PSEUDO_FUNC)
12948 			continue;
12949 		insn->src_reg = 0;
12950 	}
12951 }
12952 
12953 /* single env->prog->insni[off] instruction was replaced with the range
12954  * insni[off, off + cnt).  Adjust corresponding insn_aux_data by copying
12955  * [0, off) and [off, end) to new locations, so the patched range stays zero
12956  */
12957 static void adjust_insn_aux_data(struct bpf_verifier_env *env,
12958 				 struct bpf_insn_aux_data *new_data,
12959 				 struct bpf_prog *new_prog, u32 off, u32 cnt)
12960 {
12961 	struct bpf_insn_aux_data *old_data = env->insn_aux_data;
12962 	struct bpf_insn *insn = new_prog->insnsi;
12963 	u32 old_seen = old_data[off].seen;
12964 	u32 prog_len;
12965 	int i;
12966 
12967 	/* aux info at OFF always needs adjustment, no matter fast path
12968 	 * (cnt == 1) is taken or not. There is no guarantee INSN at OFF is the
12969 	 * original insn at old prog.
12970 	 */
12971 	old_data[off].zext_dst = insn_has_def32(env, insn + off + cnt - 1);
12972 
12973 	if (cnt == 1)
12974 		return;
12975 	prog_len = new_prog->len;
12976 
12977 	memcpy(new_data, old_data, sizeof(struct bpf_insn_aux_data) * off);
12978 	memcpy(new_data + off + cnt - 1, old_data + off,
12979 	       sizeof(struct bpf_insn_aux_data) * (prog_len - off - cnt + 1));
12980 	for (i = off; i < off + cnt - 1; i++) {
12981 		/* Expand insni[off]'s seen count to the patched range. */
12982 		new_data[i].seen = old_seen;
12983 		new_data[i].zext_dst = insn_has_def32(env, insn + i);
12984 	}
12985 	env->insn_aux_data = new_data;
12986 	vfree(old_data);
12987 }
12988 
12989 static void adjust_subprog_starts(struct bpf_verifier_env *env, u32 off, u32 len)
12990 {
12991 	int i;
12992 
12993 	if (len == 1)
12994 		return;
12995 	/* NOTE: fake 'exit' subprog should be updated as well. */
12996 	for (i = 0; i <= env->subprog_cnt; i++) {
12997 		if (env->subprog_info[i].start <= off)
12998 			continue;
12999 		env->subprog_info[i].start += len - 1;
13000 	}
13001 }
13002 
13003 static void adjust_poke_descs(struct bpf_prog *prog, u32 off, u32 len)
13004 {
13005 	struct bpf_jit_poke_descriptor *tab = prog->aux->poke_tab;
13006 	int i, sz = prog->aux->size_poke_tab;
13007 	struct bpf_jit_poke_descriptor *desc;
13008 
13009 	for (i = 0; i < sz; i++) {
13010 		desc = &tab[i];
13011 		if (desc->insn_idx <= off)
13012 			continue;
13013 		desc->insn_idx += len - 1;
13014 	}
13015 }
13016 
13017 static struct bpf_prog *bpf_patch_insn_data(struct bpf_verifier_env *env, u32 off,
13018 					    const struct bpf_insn *patch, u32 len)
13019 {
13020 	struct bpf_prog *new_prog;
13021 	struct bpf_insn_aux_data *new_data = NULL;
13022 
13023 	if (len > 1) {
13024 		new_data = vzalloc(array_size(env->prog->len + len - 1,
13025 					      sizeof(struct bpf_insn_aux_data)));
13026 		if (!new_data)
13027 			return NULL;
13028 	}
13029 
13030 	new_prog = bpf_patch_insn_single(env->prog, off, patch, len);
13031 	if (IS_ERR(new_prog)) {
13032 		if (PTR_ERR(new_prog) == -ERANGE)
13033 			verbose(env,
13034 				"insn %d cannot be patched due to 16-bit range\n",
13035 				env->insn_aux_data[off].orig_idx);
13036 		vfree(new_data);
13037 		return NULL;
13038 	}
13039 	adjust_insn_aux_data(env, new_data, new_prog, off, len);
13040 	adjust_subprog_starts(env, off, len);
13041 	adjust_poke_descs(new_prog, off, len);
13042 	return new_prog;
13043 }
13044 
13045 static int adjust_subprog_starts_after_remove(struct bpf_verifier_env *env,
13046 					      u32 off, u32 cnt)
13047 {
13048 	int i, j;
13049 
13050 	/* find first prog starting at or after off (first to remove) */
13051 	for (i = 0; i < env->subprog_cnt; i++)
13052 		if (env->subprog_info[i].start >= off)
13053 			break;
13054 	/* find first prog starting at or after off + cnt (first to stay) */
13055 	for (j = i; j < env->subprog_cnt; j++)
13056 		if (env->subprog_info[j].start >= off + cnt)
13057 			break;
13058 	/* if j doesn't start exactly at off + cnt, we are just removing
13059 	 * the front of previous prog
13060 	 */
13061 	if (env->subprog_info[j].start != off + cnt)
13062 		j--;
13063 
13064 	if (j > i) {
13065 		struct bpf_prog_aux *aux = env->prog->aux;
13066 		int move;
13067 
13068 		/* move fake 'exit' subprog as well */
13069 		move = env->subprog_cnt + 1 - j;
13070 
13071 		memmove(env->subprog_info + i,
13072 			env->subprog_info + j,
13073 			sizeof(*env->subprog_info) * move);
13074 		env->subprog_cnt -= j - i;
13075 
13076 		/* remove func_info */
13077 		if (aux->func_info) {
13078 			move = aux->func_info_cnt - j;
13079 
13080 			memmove(aux->func_info + i,
13081 				aux->func_info + j,
13082 				sizeof(*aux->func_info) * move);
13083 			aux->func_info_cnt -= j - i;
13084 			/* func_info->insn_off is set after all code rewrites,
13085 			 * in adjust_btf_func() - no need to adjust
13086 			 */
13087 		}
13088 	} else {
13089 		/* convert i from "first prog to remove" to "first to adjust" */
13090 		if (env->subprog_info[i].start == off)
13091 			i++;
13092 	}
13093 
13094 	/* update fake 'exit' subprog as well */
13095 	for (; i <= env->subprog_cnt; i++)
13096 		env->subprog_info[i].start -= cnt;
13097 
13098 	return 0;
13099 }
13100 
13101 static int bpf_adj_linfo_after_remove(struct bpf_verifier_env *env, u32 off,
13102 				      u32 cnt)
13103 {
13104 	struct bpf_prog *prog = env->prog;
13105 	u32 i, l_off, l_cnt, nr_linfo;
13106 	struct bpf_line_info *linfo;
13107 
13108 	nr_linfo = prog->aux->nr_linfo;
13109 	if (!nr_linfo)
13110 		return 0;
13111 
13112 	linfo = prog->aux->linfo;
13113 
13114 	/* find first line info to remove, count lines to be removed */
13115 	for (i = 0; i < nr_linfo; i++)
13116 		if (linfo[i].insn_off >= off)
13117 			break;
13118 
13119 	l_off = i;
13120 	l_cnt = 0;
13121 	for (; i < nr_linfo; i++)
13122 		if (linfo[i].insn_off < off + cnt)
13123 			l_cnt++;
13124 		else
13125 			break;
13126 
13127 	/* First live insn doesn't match first live linfo, it needs to "inherit"
13128 	 * last removed linfo.  prog is already modified, so prog->len == off
13129 	 * means no live instructions after (tail of the program was removed).
13130 	 */
13131 	if (prog->len != off && l_cnt &&
13132 	    (i == nr_linfo || linfo[i].insn_off != off + cnt)) {
13133 		l_cnt--;
13134 		linfo[--i].insn_off = off + cnt;
13135 	}
13136 
13137 	/* remove the line info which refer to the removed instructions */
13138 	if (l_cnt) {
13139 		memmove(linfo + l_off, linfo + i,
13140 			sizeof(*linfo) * (nr_linfo - i));
13141 
13142 		prog->aux->nr_linfo -= l_cnt;
13143 		nr_linfo = prog->aux->nr_linfo;
13144 	}
13145 
13146 	/* pull all linfo[i].insn_off >= off + cnt in by cnt */
13147 	for (i = l_off; i < nr_linfo; i++)
13148 		linfo[i].insn_off -= cnt;
13149 
13150 	/* fix up all subprogs (incl. 'exit') which start >= off */
13151 	for (i = 0; i <= env->subprog_cnt; i++)
13152 		if (env->subprog_info[i].linfo_idx > l_off) {
13153 			/* program may have started in the removed region but
13154 			 * may not be fully removed
13155 			 */
13156 			if (env->subprog_info[i].linfo_idx >= l_off + l_cnt)
13157 				env->subprog_info[i].linfo_idx -= l_cnt;
13158 			else
13159 				env->subprog_info[i].linfo_idx = l_off;
13160 		}
13161 
13162 	return 0;
13163 }
13164 
13165 static int verifier_remove_insns(struct bpf_verifier_env *env, u32 off, u32 cnt)
13166 {
13167 	struct bpf_insn_aux_data *aux_data = env->insn_aux_data;
13168 	unsigned int orig_prog_len = env->prog->len;
13169 	int err;
13170 
13171 	if (bpf_prog_is_dev_bound(env->prog->aux))
13172 		bpf_prog_offload_remove_insns(env, off, cnt);
13173 
13174 	err = bpf_remove_insns(env->prog, off, cnt);
13175 	if (err)
13176 		return err;
13177 
13178 	err = adjust_subprog_starts_after_remove(env, off, cnt);
13179 	if (err)
13180 		return err;
13181 
13182 	err = bpf_adj_linfo_after_remove(env, off, cnt);
13183 	if (err)
13184 		return err;
13185 
13186 	memmove(aux_data + off,	aux_data + off + cnt,
13187 		sizeof(*aux_data) * (orig_prog_len - off - cnt));
13188 
13189 	return 0;
13190 }
13191 
13192 /* The verifier does more data flow analysis than llvm and will not
13193  * explore branches that are dead at run time. Malicious programs can
13194  * have dead code too. Therefore replace all dead at-run-time code
13195  * with 'ja -1'.
13196  *
13197  * Just nops are not optimal, e.g. if they would sit at the end of the
13198  * program and through another bug we would manage to jump there, then
13199  * we'd execute beyond program memory otherwise. Returning exception
13200  * code also wouldn't work since we can have subprogs where the dead
13201  * code could be located.
13202  */
13203 static void sanitize_dead_code(struct bpf_verifier_env *env)
13204 {
13205 	struct bpf_insn_aux_data *aux_data = env->insn_aux_data;
13206 	struct bpf_insn trap = BPF_JMP_IMM(BPF_JA, 0, 0, -1);
13207 	struct bpf_insn *insn = env->prog->insnsi;
13208 	const int insn_cnt = env->prog->len;
13209 	int i;
13210 
13211 	for (i = 0; i < insn_cnt; i++) {
13212 		if (aux_data[i].seen)
13213 			continue;
13214 		memcpy(insn + i, &trap, sizeof(trap));
13215 		aux_data[i].zext_dst = false;
13216 	}
13217 }
13218 
13219 static bool insn_is_cond_jump(u8 code)
13220 {
13221 	u8 op;
13222 
13223 	if (BPF_CLASS(code) == BPF_JMP32)
13224 		return true;
13225 
13226 	if (BPF_CLASS(code) != BPF_JMP)
13227 		return false;
13228 
13229 	op = BPF_OP(code);
13230 	return op != BPF_JA && op != BPF_EXIT && op != BPF_CALL;
13231 }
13232 
13233 static void opt_hard_wire_dead_code_branches(struct bpf_verifier_env *env)
13234 {
13235 	struct bpf_insn_aux_data *aux_data = env->insn_aux_data;
13236 	struct bpf_insn ja = BPF_JMP_IMM(BPF_JA, 0, 0, 0);
13237 	struct bpf_insn *insn = env->prog->insnsi;
13238 	const int insn_cnt = env->prog->len;
13239 	int i;
13240 
13241 	for (i = 0; i < insn_cnt; i++, insn++) {
13242 		if (!insn_is_cond_jump(insn->code))
13243 			continue;
13244 
13245 		if (!aux_data[i + 1].seen)
13246 			ja.off = insn->off;
13247 		else if (!aux_data[i + 1 + insn->off].seen)
13248 			ja.off = 0;
13249 		else
13250 			continue;
13251 
13252 		if (bpf_prog_is_dev_bound(env->prog->aux))
13253 			bpf_prog_offload_replace_insn(env, i, &ja);
13254 
13255 		memcpy(insn, &ja, sizeof(ja));
13256 	}
13257 }
13258 
13259 static int opt_remove_dead_code(struct bpf_verifier_env *env)
13260 {
13261 	struct bpf_insn_aux_data *aux_data = env->insn_aux_data;
13262 	int insn_cnt = env->prog->len;
13263 	int i, err;
13264 
13265 	for (i = 0; i < insn_cnt; i++) {
13266 		int j;
13267 
13268 		j = 0;
13269 		while (i + j < insn_cnt && !aux_data[i + j].seen)
13270 			j++;
13271 		if (!j)
13272 			continue;
13273 
13274 		err = verifier_remove_insns(env, i, j);
13275 		if (err)
13276 			return err;
13277 		insn_cnt = env->prog->len;
13278 	}
13279 
13280 	return 0;
13281 }
13282 
13283 static int opt_remove_nops(struct bpf_verifier_env *env)
13284 {
13285 	const struct bpf_insn ja = BPF_JMP_IMM(BPF_JA, 0, 0, 0);
13286 	struct bpf_insn *insn = env->prog->insnsi;
13287 	int insn_cnt = env->prog->len;
13288 	int i, err;
13289 
13290 	for (i = 0; i < insn_cnt; i++) {
13291 		if (memcmp(&insn[i], &ja, sizeof(ja)))
13292 			continue;
13293 
13294 		err = verifier_remove_insns(env, i, 1);
13295 		if (err)
13296 			return err;
13297 		insn_cnt--;
13298 		i--;
13299 	}
13300 
13301 	return 0;
13302 }
13303 
13304 static int opt_subreg_zext_lo32_rnd_hi32(struct bpf_verifier_env *env,
13305 					 const union bpf_attr *attr)
13306 {
13307 	struct bpf_insn *patch, zext_patch[2], rnd_hi32_patch[4];
13308 	struct bpf_insn_aux_data *aux = env->insn_aux_data;
13309 	int i, patch_len, delta = 0, len = env->prog->len;
13310 	struct bpf_insn *insns = env->prog->insnsi;
13311 	struct bpf_prog *new_prog;
13312 	bool rnd_hi32;
13313 
13314 	rnd_hi32 = attr->prog_flags & BPF_F_TEST_RND_HI32;
13315 	zext_patch[1] = BPF_ZEXT_REG(0);
13316 	rnd_hi32_patch[1] = BPF_ALU64_IMM(BPF_MOV, BPF_REG_AX, 0);
13317 	rnd_hi32_patch[2] = BPF_ALU64_IMM(BPF_LSH, BPF_REG_AX, 32);
13318 	rnd_hi32_patch[3] = BPF_ALU64_REG(BPF_OR, 0, BPF_REG_AX);
13319 	for (i = 0; i < len; i++) {
13320 		int adj_idx = i + delta;
13321 		struct bpf_insn insn;
13322 		int load_reg;
13323 
13324 		insn = insns[adj_idx];
13325 		load_reg = insn_def_regno(&insn);
13326 		if (!aux[adj_idx].zext_dst) {
13327 			u8 code, class;
13328 			u32 imm_rnd;
13329 
13330 			if (!rnd_hi32)
13331 				continue;
13332 
13333 			code = insn.code;
13334 			class = BPF_CLASS(code);
13335 			if (load_reg == -1)
13336 				continue;
13337 
13338 			/* NOTE: arg "reg" (the fourth one) is only used for
13339 			 *       BPF_STX + SRC_OP, so it is safe to pass NULL
13340 			 *       here.
13341 			 */
13342 			if (is_reg64(env, &insn, load_reg, NULL, DST_OP)) {
13343 				if (class == BPF_LD &&
13344 				    BPF_MODE(code) == BPF_IMM)
13345 					i++;
13346 				continue;
13347 			}
13348 
13349 			/* ctx load could be transformed into wider load. */
13350 			if (class == BPF_LDX &&
13351 			    aux[adj_idx].ptr_type == PTR_TO_CTX)
13352 				continue;
13353 
13354 			imm_rnd = get_random_u32();
13355 			rnd_hi32_patch[0] = insn;
13356 			rnd_hi32_patch[1].imm = imm_rnd;
13357 			rnd_hi32_patch[3].dst_reg = load_reg;
13358 			patch = rnd_hi32_patch;
13359 			patch_len = 4;
13360 			goto apply_patch_buffer;
13361 		}
13362 
13363 		/* Add in an zero-extend instruction if a) the JIT has requested
13364 		 * it or b) it's a CMPXCHG.
13365 		 *
13366 		 * The latter is because: BPF_CMPXCHG always loads a value into
13367 		 * R0, therefore always zero-extends. However some archs'
13368 		 * equivalent instruction only does this load when the
13369 		 * comparison is successful. This detail of CMPXCHG is
13370 		 * orthogonal to the general zero-extension behaviour of the
13371 		 * CPU, so it's treated independently of bpf_jit_needs_zext.
13372 		 */
13373 		if (!bpf_jit_needs_zext() && !is_cmpxchg_insn(&insn))
13374 			continue;
13375 
13376 		if (WARN_ON(load_reg == -1)) {
13377 			verbose(env, "verifier bug. zext_dst is set, but no reg is defined\n");
13378 			return -EFAULT;
13379 		}
13380 
13381 		zext_patch[0] = insn;
13382 		zext_patch[1].dst_reg = load_reg;
13383 		zext_patch[1].src_reg = load_reg;
13384 		patch = zext_patch;
13385 		patch_len = 2;
13386 apply_patch_buffer:
13387 		new_prog = bpf_patch_insn_data(env, adj_idx, patch, patch_len);
13388 		if (!new_prog)
13389 			return -ENOMEM;
13390 		env->prog = new_prog;
13391 		insns = new_prog->insnsi;
13392 		aux = env->insn_aux_data;
13393 		delta += patch_len - 1;
13394 	}
13395 
13396 	return 0;
13397 }
13398 
13399 /* convert load instructions that access fields of a context type into a
13400  * sequence of instructions that access fields of the underlying structure:
13401  *     struct __sk_buff    -> struct sk_buff
13402  *     struct bpf_sock_ops -> struct sock
13403  */
13404 static int convert_ctx_accesses(struct bpf_verifier_env *env)
13405 {
13406 	const struct bpf_verifier_ops *ops = env->ops;
13407 	int i, cnt, size, ctx_field_size, delta = 0;
13408 	const int insn_cnt = env->prog->len;
13409 	struct bpf_insn insn_buf[16], *insn;
13410 	u32 target_size, size_default, off;
13411 	struct bpf_prog *new_prog;
13412 	enum bpf_access_type type;
13413 	bool is_narrower_load;
13414 
13415 	if (ops->gen_prologue || env->seen_direct_write) {
13416 		if (!ops->gen_prologue) {
13417 			verbose(env, "bpf verifier is misconfigured\n");
13418 			return -EINVAL;
13419 		}
13420 		cnt = ops->gen_prologue(insn_buf, env->seen_direct_write,
13421 					env->prog);
13422 		if (cnt >= ARRAY_SIZE(insn_buf)) {
13423 			verbose(env, "bpf verifier is misconfigured\n");
13424 			return -EINVAL;
13425 		} else if (cnt) {
13426 			new_prog = bpf_patch_insn_data(env, 0, insn_buf, cnt);
13427 			if (!new_prog)
13428 				return -ENOMEM;
13429 
13430 			env->prog = new_prog;
13431 			delta += cnt - 1;
13432 		}
13433 	}
13434 
13435 	if (bpf_prog_is_dev_bound(env->prog->aux))
13436 		return 0;
13437 
13438 	insn = env->prog->insnsi + delta;
13439 
13440 	for (i = 0; i < insn_cnt; i++, insn++) {
13441 		bpf_convert_ctx_access_t convert_ctx_access;
13442 		bool ctx_access;
13443 
13444 		if (insn->code == (BPF_LDX | BPF_MEM | BPF_B) ||
13445 		    insn->code == (BPF_LDX | BPF_MEM | BPF_H) ||
13446 		    insn->code == (BPF_LDX | BPF_MEM | BPF_W) ||
13447 		    insn->code == (BPF_LDX | BPF_MEM | BPF_DW)) {
13448 			type = BPF_READ;
13449 			ctx_access = true;
13450 		} else if (insn->code == (BPF_STX | BPF_MEM | BPF_B) ||
13451 			   insn->code == (BPF_STX | BPF_MEM | BPF_H) ||
13452 			   insn->code == (BPF_STX | BPF_MEM | BPF_W) ||
13453 			   insn->code == (BPF_STX | BPF_MEM | BPF_DW) ||
13454 			   insn->code == (BPF_ST | BPF_MEM | BPF_B) ||
13455 			   insn->code == (BPF_ST | BPF_MEM | BPF_H) ||
13456 			   insn->code == (BPF_ST | BPF_MEM | BPF_W) ||
13457 			   insn->code == (BPF_ST | BPF_MEM | BPF_DW)) {
13458 			type = BPF_WRITE;
13459 			ctx_access = BPF_CLASS(insn->code) == BPF_STX;
13460 		} else {
13461 			continue;
13462 		}
13463 
13464 		if (type == BPF_WRITE &&
13465 		    env->insn_aux_data[i + delta].sanitize_stack_spill) {
13466 			struct bpf_insn patch[] = {
13467 				*insn,
13468 				BPF_ST_NOSPEC(),
13469 			};
13470 
13471 			cnt = ARRAY_SIZE(patch);
13472 			new_prog = bpf_patch_insn_data(env, i + delta, patch, cnt);
13473 			if (!new_prog)
13474 				return -ENOMEM;
13475 
13476 			delta    += cnt - 1;
13477 			env->prog = new_prog;
13478 			insn      = new_prog->insnsi + i + delta;
13479 			continue;
13480 		}
13481 
13482 		if (!ctx_access)
13483 			continue;
13484 
13485 		switch ((int)env->insn_aux_data[i + delta].ptr_type) {
13486 		case PTR_TO_CTX:
13487 			if (!ops->convert_ctx_access)
13488 				continue;
13489 			convert_ctx_access = ops->convert_ctx_access;
13490 			break;
13491 		case PTR_TO_SOCKET:
13492 		case PTR_TO_SOCK_COMMON:
13493 			convert_ctx_access = bpf_sock_convert_ctx_access;
13494 			break;
13495 		case PTR_TO_TCP_SOCK:
13496 			convert_ctx_access = bpf_tcp_sock_convert_ctx_access;
13497 			break;
13498 		case PTR_TO_XDP_SOCK:
13499 			convert_ctx_access = bpf_xdp_sock_convert_ctx_access;
13500 			break;
13501 		case PTR_TO_BTF_ID:
13502 		case PTR_TO_BTF_ID | PTR_UNTRUSTED:
13503 			if (type == BPF_READ) {
13504 				insn->code = BPF_LDX | BPF_PROBE_MEM |
13505 					BPF_SIZE((insn)->code);
13506 				env->prog->aux->num_exentries++;
13507 			}
13508 			continue;
13509 		default:
13510 			continue;
13511 		}
13512 
13513 		ctx_field_size = env->insn_aux_data[i + delta].ctx_field_size;
13514 		size = BPF_LDST_BYTES(insn);
13515 
13516 		/* If the read access is a narrower load of the field,
13517 		 * convert to a 4/8-byte load, to minimum program type specific
13518 		 * convert_ctx_access changes. If conversion is successful,
13519 		 * we will apply proper mask to the result.
13520 		 */
13521 		is_narrower_load = size < ctx_field_size;
13522 		size_default = bpf_ctx_off_adjust_machine(ctx_field_size);
13523 		off = insn->off;
13524 		if (is_narrower_load) {
13525 			u8 size_code;
13526 
13527 			if (type == BPF_WRITE) {
13528 				verbose(env, "bpf verifier narrow ctx access misconfigured\n");
13529 				return -EINVAL;
13530 			}
13531 
13532 			size_code = BPF_H;
13533 			if (ctx_field_size == 4)
13534 				size_code = BPF_W;
13535 			else if (ctx_field_size == 8)
13536 				size_code = BPF_DW;
13537 
13538 			insn->off = off & ~(size_default - 1);
13539 			insn->code = BPF_LDX | BPF_MEM | size_code;
13540 		}
13541 
13542 		target_size = 0;
13543 		cnt = convert_ctx_access(type, insn, insn_buf, env->prog,
13544 					 &target_size);
13545 		if (cnt == 0 || cnt >= ARRAY_SIZE(insn_buf) ||
13546 		    (ctx_field_size && !target_size)) {
13547 			verbose(env, "bpf verifier is misconfigured\n");
13548 			return -EINVAL;
13549 		}
13550 
13551 		if (is_narrower_load && size < target_size) {
13552 			u8 shift = bpf_ctx_narrow_access_offset(
13553 				off, size, size_default) * 8;
13554 			if (shift && cnt + 1 >= ARRAY_SIZE(insn_buf)) {
13555 				verbose(env, "bpf verifier narrow ctx load misconfigured\n");
13556 				return -EINVAL;
13557 			}
13558 			if (ctx_field_size <= 4) {
13559 				if (shift)
13560 					insn_buf[cnt++] = BPF_ALU32_IMM(BPF_RSH,
13561 									insn->dst_reg,
13562 									shift);
13563 				insn_buf[cnt++] = BPF_ALU32_IMM(BPF_AND, insn->dst_reg,
13564 								(1 << size * 8) - 1);
13565 			} else {
13566 				if (shift)
13567 					insn_buf[cnt++] = BPF_ALU64_IMM(BPF_RSH,
13568 									insn->dst_reg,
13569 									shift);
13570 				insn_buf[cnt++] = BPF_ALU64_IMM(BPF_AND, insn->dst_reg,
13571 								(1ULL << size * 8) - 1);
13572 			}
13573 		}
13574 
13575 		new_prog = bpf_patch_insn_data(env, i + delta, insn_buf, cnt);
13576 		if (!new_prog)
13577 			return -ENOMEM;
13578 
13579 		delta += cnt - 1;
13580 
13581 		/* keep walking new program and skip insns we just inserted */
13582 		env->prog = new_prog;
13583 		insn      = new_prog->insnsi + i + delta;
13584 	}
13585 
13586 	return 0;
13587 }
13588 
13589 static int jit_subprogs(struct bpf_verifier_env *env)
13590 {
13591 	struct bpf_prog *prog = env->prog, **func, *tmp;
13592 	int i, j, subprog_start, subprog_end = 0, len, subprog;
13593 	struct bpf_map *map_ptr;
13594 	struct bpf_insn *insn;
13595 	void *old_bpf_func;
13596 	int err, num_exentries;
13597 
13598 	if (env->subprog_cnt <= 1)
13599 		return 0;
13600 
13601 	for (i = 0, insn = prog->insnsi; i < prog->len; i++, insn++) {
13602 		if (!bpf_pseudo_func(insn) && !bpf_pseudo_call(insn))
13603 			continue;
13604 
13605 		/* Upon error here we cannot fall back to interpreter but
13606 		 * need a hard reject of the program. Thus -EFAULT is
13607 		 * propagated in any case.
13608 		 */
13609 		subprog = find_subprog(env, i + insn->imm + 1);
13610 		if (subprog < 0) {
13611 			WARN_ONCE(1, "verifier bug. No program starts at insn %d\n",
13612 				  i + insn->imm + 1);
13613 			return -EFAULT;
13614 		}
13615 		/* temporarily remember subprog id inside insn instead of
13616 		 * aux_data, since next loop will split up all insns into funcs
13617 		 */
13618 		insn->off = subprog;
13619 		/* remember original imm in case JIT fails and fallback
13620 		 * to interpreter will be needed
13621 		 */
13622 		env->insn_aux_data[i].call_imm = insn->imm;
13623 		/* point imm to __bpf_call_base+1 from JITs point of view */
13624 		insn->imm = 1;
13625 		if (bpf_pseudo_func(insn))
13626 			/* jit (e.g. x86_64) may emit fewer instructions
13627 			 * if it learns a u32 imm is the same as a u64 imm.
13628 			 * Force a non zero here.
13629 			 */
13630 			insn[1].imm = 1;
13631 	}
13632 
13633 	err = bpf_prog_alloc_jited_linfo(prog);
13634 	if (err)
13635 		goto out_undo_insn;
13636 
13637 	err = -ENOMEM;
13638 	func = kcalloc(env->subprog_cnt, sizeof(prog), GFP_KERNEL);
13639 	if (!func)
13640 		goto out_undo_insn;
13641 
13642 	for (i = 0; i < env->subprog_cnt; i++) {
13643 		subprog_start = subprog_end;
13644 		subprog_end = env->subprog_info[i + 1].start;
13645 
13646 		len = subprog_end - subprog_start;
13647 		/* bpf_prog_run() doesn't call subprogs directly,
13648 		 * hence main prog stats include the runtime of subprogs.
13649 		 * subprogs don't have IDs and not reachable via prog_get_next_id
13650 		 * func[i]->stats will never be accessed and stays NULL
13651 		 */
13652 		func[i] = bpf_prog_alloc_no_stats(bpf_prog_size(len), GFP_USER);
13653 		if (!func[i])
13654 			goto out_free;
13655 		memcpy(func[i]->insnsi, &prog->insnsi[subprog_start],
13656 		       len * sizeof(struct bpf_insn));
13657 		func[i]->type = prog->type;
13658 		func[i]->len = len;
13659 		if (bpf_prog_calc_tag(func[i]))
13660 			goto out_free;
13661 		func[i]->is_func = 1;
13662 		func[i]->aux->func_idx = i;
13663 		/* Below members will be freed only at prog->aux */
13664 		func[i]->aux->btf = prog->aux->btf;
13665 		func[i]->aux->func_info = prog->aux->func_info;
13666 		func[i]->aux->func_info_cnt = prog->aux->func_info_cnt;
13667 		func[i]->aux->poke_tab = prog->aux->poke_tab;
13668 		func[i]->aux->size_poke_tab = prog->aux->size_poke_tab;
13669 
13670 		for (j = 0; j < prog->aux->size_poke_tab; j++) {
13671 			struct bpf_jit_poke_descriptor *poke;
13672 
13673 			poke = &prog->aux->poke_tab[j];
13674 			if (poke->insn_idx < subprog_end &&
13675 			    poke->insn_idx >= subprog_start)
13676 				poke->aux = func[i]->aux;
13677 		}
13678 
13679 		func[i]->aux->name[0] = 'F';
13680 		func[i]->aux->stack_depth = env->subprog_info[i].stack_depth;
13681 		func[i]->jit_requested = 1;
13682 		func[i]->blinding_requested = prog->blinding_requested;
13683 		func[i]->aux->kfunc_tab = prog->aux->kfunc_tab;
13684 		func[i]->aux->kfunc_btf_tab = prog->aux->kfunc_btf_tab;
13685 		func[i]->aux->linfo = prog->aux->linfo;
13686 		func[i]->aux->nr_linfo = prog->aux->nr_linfo;
13687 		func[i]->aux->jited_linfo = prog->aux->jited_linfo;
13688 		func[i]->aux->linfo_idx = env->subprog_info[i].linfo_idx;
13689 		num_exentries = 0;
13690 		insn = func[i]->insnsi;
13691 		for (j = 0; j < func[i]->len; j++, insn++) {
13692 			if (BPF_CLASS(insn->code) == BPF_LDX &&
13693 			    BPF_MODE(insn->code) == BPF_PROBE_MEM)
13694 				num_exentries++;
13695 		}
13696 		func[i]->aux->num_exentries = num_exentries;
13697 		func[i]->aux->tail_call_reachable = env->subprog_info[i].tail_call_reachable;
13698 		func[i] = bpf_int_jit_compile(func[i]);
13699 		if (!func[i]->jited) {
13700 			err = -ENOTSUPP;
13701 			goto out_free;
13702 		}
13703 		cond_resched();
13704 	}
13705 
13706 	/* at this point all bpf functions were successfully JITed
13707 	 * now populate all bpf_calls with correct addresses and
13708 	 * run last pass of JIT
13709 	 */
13710 	for (i = 0; i < env->subprog_cnt; i++) {
13711 		insn = func[i]->insnsi;
13712 		for (j = 0; j < func[i]->len; j++, insn++) {
13713 			if (bpf_pseudo_func(insn)) {
13714 				subprog = insn->off;
13715 				insn[0].imm = (u32)(long)func[subprog]->bpf_func;
13716 				insn[1].imm = ((u64)(long)func[subprog]->bpf_func) >> 32;
13717 				continue;
13718 			}
13719 			if (!bpf_pseudo_call(insn))
13720 				continue;
13721 			subprog = insn->off;
13722 			insn->imm = BPF_CALL_IMM(func[subprog]->bpf_func);
13723 		}
13724 
13725 		/* we use the aux data to keep a list of the start addresses
13726 		 * of the JITed images for each function in the program
13727 		 *
13728 		 * for some architectures, such as powerpc64, the imm field
13729 		 * might not be large enough to hold the offset of the start
13730 		 * address of the callee's JITed image from __bpf_call_base
13731 		 *
13732 		 * in such cases, we can lookup the start address of a callee
13733 		 * by using its subprog id, available from the off field of
13734 		 * the call instruction, as an index for this list
13735 		 */
13736 		func[i]->aux->func = func;
13737 		func[i]->aux->func_cnt = env->subprog_cnt;
13738 	}
13739 	for (i = 0; i < env->subprog_cnt; i++) {
13740 		old_bpf_func = func[i]->bpf_func;
13741 		tmp = bpf_int_jit_compile(func[i]);
13742 		if (tmp != func[i] || func[i]->bpf_func != old_bpf_func) {
13743 			verbose(env, "JIT doesn't support bpf-to-bpf calls\n");
13744 			err = -ENOTSUPP;
13745 			goto out_free;
13746 		}
13747 		cond_resched();
13748 	}
13749 
13750 	/* finally lock prog and jit images for all functions and
13751 	 * populate kallsysm
13752 	 */
13753 	for (i = 0; i < env->subprog_cnt; i++) {
13754 		bpf_prog_lock_ro(func[i]);
13755 		bpf_prog_kallsyms_add(func[i]);
13756 	}
13757 
13758 	/* Last step: make now unused interpreter insns from main
13759 	 * prog consistent for later dump requests, so they can
13760 	 * later look the same as if they were interpreted only.
13761 	 */
13762 	for (i = 0, insn = prog->insnsi; i < prog->len; i++, insn++) {
13763 		if (bpf_pseudo_func(insn)) {
13764 			insn[0].imm = env->insn_aux_data[i].call_imm;
13765 			insn[1].imm = insn->off;
13766 			insn->off = 0;
13767 			continue;
13768 		}
13769 		if (!bpf_pseudo_call(insn))
13770 			continue;
13771 		insn->off = env->insn_aux_data[i].call_imm;
13772 		subprog = find_subprog(env, i + insn->off + 1);
13773 		insn->imm = subprog;
13774 	}
13775 
13776 	prog->jited = 1;
13777 	prog->bpf_func = func[0]->bpf_func;
13778 	prog->jited_len = func[0]->jited_len;
13779 	prog->aux->func = func;
13780 	prog->aux->func_cnt = env->subprog_cnt;
13781 	bpf_prog_jit_attempt_done(prog);
13782 	return 0;
13783 out_free:
13784 	/* We failed JIT'ing, so at this point we need to unregister poke
13785 	 * descriptors from subprogs, so that kernel is not attempting to
13786 	 * patch it anymore as we're freeing the subprog JIT memory.
13787 	 */
13788 	for (i = 0; i < prog->aux->size_poke_tab; i++) {
13789 		map_ptr = prog->aux->poke_tab[i].tail_call.map;
13790 		map_ptr->ops->map_poke_untrack(map_ptr, prog->aux);
13791 	}
13792 	/* At this point we're guaranteed that poke descriptors are not
13793 	 * live anymore. We can just unlink its descriptor table as it's
13794 	 * released with the main prog.
13795 	 */
13796 	for (i = 0; i < env->subprog_cnt; i++) {
13797 		if (!func[i])
13798 			continue;
13799 		func[i]->aux->poke_tab = NULL;
13800 		bpf_jit_free(func[i]);
13801 	}
13802 	kfree(func);
13803 out_undo_insn:
13804 	/* cleanup main prog to be interpreted */
13805 	prog->jit_requested = 0;
13806 	prog->blinding_requested = 0;
13807 	for (i = 0, insn = prog->insnsi; i < prog->len; i++, insn++) {
13808 		if (!bpf_pseudo_call(insn))
13809 			continue;
13810 		insn->off = 0;
13811 		insn->imm = env->insn_aux_data[i].call_imm;
13812 	}
13813 	bpf_prog_jit_attempt_done(prog);
13814 	return err;
13815 }
13816 
13817 static int fixup_call_args(struct bpf_verifier_env *env)
13818 {
13819 #ifndef CONFIG_BPF_JIT_ALWAYS_ON
13820 	struct bpf_prog *prog = env->prog;
13821 	struct bpf_insn *insn = prog->insnsi;
13822 	bool has_kfunc_call = bpf_prog_has_kfunc_call(prog);
13823 	int i, depth;
13824 #endif
13825 	int err = 0;
13826 
13827 	if (env->prog->jit_requested &&
13828 	    !bpf_prog_is_dev_bound(env->prog->aux)) {
13829 		err = jit_subprogs(env);
13830 		if (err == 0)
13831 			return 0;
13832 		if (err == -EFAULT)
13833 			return err;
13834 	}
13835 #ifndef CONFIG_BPF_JIT_ALWAYS_ON
13836 	if (has_kfunc_call) {
13837 		verbose(env, "calling kernel functions are not allowed in non-JITed programs\n");
13838 		return -EINVAL;
13839 	}
13840 	if (env->subprog_cnt > 1 && env->prog->aux->tail_call_reachable) {
13841 		/* When JIT fails the progs with bpf2bpf calls and tail_calls
13842 		 * have to be rejected, since interpreter doesn't support them yet.
13843 		 */
13844 		verbose(env, "tail_calls are not allowed in non-JITed programs with bpf-to-bpf calls\n");
13845 		return -EINVAL;
13846 	}
13847 	for (i = 0; i < prog->len; i++, insn++) {
13848 		if (bpf_pseudo_func(insn)) {
13849 			/* When JIT fails the progs with callback calls
13850 			 * have to be rejected, since interpreter doesn't support them yet.
13851 			 */
13852 			verbose(env, "callbacks are not allowed in non-JITed programs\n");
13853 			return -EINVAL;
13854 		}
13855 
13856 		if (!bpf_pseudo_call(insn))
13857 			continue;
13858 		depth = get_callee_stack_depth(env, insn, i);
13859 		if (depth < 0)
13860 			return depth;
13861 		bpf_patch_call_args(insn, depth);
13862 	}
13863 	err = 0;
13864 #endif
13865 	return err;
13866 }
13867 
13868 static int fixup_kfunc_call(struct bpf_verifier_env *env,
13869 			    struct bpf_insn *insn)
13870 {
13871 	const struct bpf_kfunc_desc *desc;
13872 
13873 	if (!insn->imm) {
13874 		verbose(env, "invalid kernel function call not eliminated in verifier pass\n");
13875 		return -EINVAL;
13876 	}
13877 
13878 	/* insn->imm has the btf func_id. Replace it with
13879 	 * an address (relative to __bpf_base_call).
13880 	 */
13881 	desc = find_kfunc_desc(env->prog, insn->imm, insn->off);
13882 	if (!desc) {
13883 		verbose(env, "verifier internal error: kernel function descriptor not found for func_id %u\n",
13884 			insn->imm);
13885 		return -EFAULT;
13886 	}
13887 
13888 	insn->imm = desc->imm;
13889 
13890 	return 0;
13891 }
13892 
13893 /* Do various post-verification rewrites in a single program pass.
13894  * These rewrites simplify JIT and interpreter implementations.
13895  */
13896 static int do_misc_fixups(struct bpf_verifier_env *env)
13897 {
13898 	struct bpf_prog *prog = env->prog;
13899 	enum bpf_attach_type eatype = prog->expected_attach_type;
13900 	enum bpf_prog_type prog_type = resolve_prog_type(prog);
13901 	struct bpf_insn *insn = prog->insnsi;
13902 	const struct bpf_func_proto *fn;
13903 	const int insn_cnt = prog->len;
13904 	const struct bpf_map_ops *ops;
13905 	struct bpf_insn_aux_data *aux;
13906 	struct bpf_insn insn_buf[16];
13907 	struct bpf_prog *new_prog;
13908 	struct bpf_map *map_ptr;
13909 	int i, ret, cnt, delta = 0;
13910 
13911 	for (i = 0; i < insn_cnt; i++, insn++) {
13912 		/* Make divide-by-zero exceptions impossible. */
13913 		if (insn->code == (BPF_ALU64 | BPF_MOD | BPF_X) ||
13914 		    insn->code == (BPF_ALU64 | BPF_DIV | BPF_X) ||
13915 		    insn->code == (BPF_ALU | BPF_MOD | BPF_X) ||
13916 		    insn->code == (BPF_ALU | BPF_DIV | BPF_X)) {
13917 			bool is64 = BPF_CLASS(insn->code) == BPF_ALU64;
13918 			bool isdiv = BPF_OP(insn->code) == BPF_DIV;
13919 			struct bpf_insn *patchlet;
13920 			struct bpf_insn chk_and_div[] = {
13921 				/* [R,W]x div 0 -> 0 */
13922 				BPF_RAW_INSN((is64 ? BPF_JMP : BPF_JMP32) |
13923 					     BPF_JNE | BPF_K, insn->src_reg,
13924 					     0, 2, 0),
13925 				BPF_ALU32_REG(BPF_XOR, insn->dst_reg, insn->dst_reg),
13926 				BPF_JMP_IMM(BPF_JA, 0, 0, 1),
13927 				*insn,
13928 			};
13929 			struct bpf_insn chk_and_mod[] = {
13930 				/* [R,W]x mod 0 -> [R,W]x */
13931 				BPF_RAW_INSN((is64 ? BPF_JMP : BPF_JMP32) |
13932 					     BPF_JEQ | BPF_K, insn->src_reg,
13933 					     0, 1 + (is64 ? 0 : 1), 0),
13934 				*insn,
13935 				BPF_JMP_IMM(BPF_JA, 0, 0, 1),
13936 				BPF_MOV32_REG(insn->dst_reg, insn->dst_reg),
13937 			};
13938 
13939 			patchlet = isdiv ? chk_and_div : chk_and_mod;
13940 			cnt = isdiv ? ARRAY_SIZE(chk_and_div) :
13941 				      ARRAY_SIZE(chk_and_mod) - (is64 ? 2 : 0);
13942 
13943 			new_prog = bpf_patch_insn_data(env, i + delta, patchlet, cnt);
13944 			if (!new_prog)
13945 				return -ENOMEM;
13946 
13947 			delta    += cnt - 1;
13948 			env->prog = prog = new_prog;
13949 			insn      = new_prog->insnsi + i + delta;
13950 			continue;
13951 		}
13952 
13953 		/* Implement LD_ABS and LD_IND with a rewrite, if supported by the program type. */
13954 		if (BPF_CLASS(insn->code) == BPF_LD &&
13955 		    (BPF_MODE(insn->code) == BPF_ABS ||
13956 		     BPF_MODE(insn->code) == BPF_IND)) {
13957 			cnt = env->ops->gen_ld_abs(insn, insn_buf);
13958 			if (cnt == 0 || cnt >= ARRAY_SIZE(insn_buf)) {
13959 				verbose(env, "bpf verifier is misconfigured\n");
13960 				return -EINVAL;
13961 			}
13962 
13963 			new_prog = bpf_patch_insn_data(env, i + delta, insn_buf, cnt);
13964 			if (!new_prog)
13965 				return -ENOMEM;
13966 
13967 			delta    += cnt - 1;
13968 			env->prog = prog = new_prog;
13969 			insn      = new_prog->insnsi + i + delta;
13970 			continue;
13971 		}
13972 
13973 		/* Rewrite pointer arithmetic to mitigate speculation attacks. */
13974 		if (insn->code == (BPF_ALU64 | BPF_ADD | BPF_X) ||
13975 		    insn->code == (BPF_ALU64 | BPF_SUB | BPF_X)) {
13976 			const u8 code_add = BPF_ALU64 | BPF_ADD | BPF_X;
13977 			const u8 code_sub = BPF_ALU64 | BPF_SUB | BPF_X;
13978 			struct bpf_insn *patch = &insn_buf[0];
13979 			bool issrc, isneg, isimm;
13980 			u32 off_reg;
13981 
13982 			aux = &env->insn_aux_data[i + delta];
13983 			if (!aux->alu_state ||
13984 			    aux->alu_state == BPF_ALU_NON_POINTER)
13985 				continue;
13986 
13987 			isneg = aux->alu_state & BPF_ALU_NEG_VALUE;
13988 			issrc = (aux->alu_state & BPF_ALU_SANITIZE) ==
13989 				BPF_ALU_SANITIZE_SRC;
13990 			isimm = aux->alu_state & BPF_ALU_IMMEDIATE;
13991 
13992 			off_reg = issrc ? insn->src_reg : insn->dst_reg;
13993 			if (isimm) {
13994 				*patch++ = BPF_MOV32_IMM(BPF_REG_AX, aux->alu_limit);
13995 			} else {
13996 				if (isneg)
13997 					*patch++ = BPF_ALU64_IMM(BPF_MUL, off_reg, -1);
13998 				*patch++ = BPF_MOV32_IMM(BPF_REG_AX, aux->alu_limit);
13999 				*patch++ = BPF_ALU64_REG(BPF_SUB, BPF_REG_AX, off_reg);
14000 				*patch++ = BPF_ALU64_REG(BPF_OR, BPF_REG_AX, off_reg);
14001 				*patch++ = BPF_ALU64_IMM(BPF_NEG, BPF_REG_AX, 0);
14002 				*patch++ = BPF_ALU64_IMM(BPF_ARSH, BPF_REG_AX, 63);
14003 				*patch++ = BPF_ALU64_REG(BPF_AND, BPF_REG_AX, off_reg);
14004 			}
14005 			if (!issrc)
14006 				*patch++ = BPF_MOV64_REG(insn->dst_reg, insn->src_reg);
14007 			insn->src_reg = BPF_REG_AX;
14008 			if (isneg)
14009 				insn->code = insn->code == code_add ?
14010 					     code_sub : code_add;
14011 			*patch++ = *insn;
14012 			if (issrc && isneg && !isimm)
14013 				*patch++ = BPF_ALU64_IMM(BPF_MUL, off_reg, -1);
14014 			cnt = patch - insn_buf;
14015 
14016 			new_prog = bpf_patch_insn_data(env, i + delta, insn_buf, cnt);
14017 			if (!new_prog)
14018 				return -ENOMEM;
14019 
14020 			delta    += cnt - 1;
14021 			env->prog = prog = new_prog;
14022 			insn      = new_prog->insnsi + i + delta;
14023 			continue;
14024 		}
14025 
14026 		if (insn->code != (BPF_JMP | BPF_CALL))
14027 			continue;
14028 		if (insn->src_reg == BPF_PSEUDO_CALL)
14029 			continue;
14030 		if (insn->src_reg == BPF_PSEUDO_KFUNC_CALL) {
14031 			ret = fixup_kfunc_call(env, insn);
14032 			if (ret)
14033 				return ret;
14034 			continue;
14035 		}
14036 
14037 		if (insn->imm == BPF_FUNC_get_route_realm)
14038 			prog->dst_needed = 1;
14039 		if (insn->imm == BPF_FUNC_get_prandom_u32)
14040 			bpf_user_rnd_init_once();
14041 		if (insn->imm == BPF_FUNC_override_return)
14042 			prog->kprobe_override = 1;
14043 		if (insn->imm == BPF_FUNC_tail_call) {
14044 			/* If we tail call into other programs, we
14045 			 * cannot make any assumptions since they can
14046 			 * be replaced dynamically during runtime in
14047 			 * the program array.
14048 			 */
14049 			prog->cb_access = 1;
14050 			if (!allow_tail_call_in_subprogs(env))
14051 				prog->aux->stack_depth = MAX_BPF_STACK;
14052 			prog->aux->max_pkt_offset = MAX_PACKET_OFF;
14053 
14054 			/* mark bpf_tail_call as different opcode to avoid
14055 			 * conditional branch in the interpreter for every normal
14056 			 * call and to prevent accidental JITing by JIT compiler
14057 			 * that doesn't support bpf_tail_call yet
14058 			 */
14059 			insn->imm = 0;
14060 			insn->code = BPF_JMP | BPF_TAIL_CALL;
14061 
14062 			aux = &env->insn_aux_data[i + delta];
14063 			if (env->bpf_capable && !prog->blinding_requested &&
14064 			    prog->jit_requested &&
14065 			    !bpf_map_key_poisoned(aux) &&
14066 			    !bpf_map_ptr_poisoned(aux) &&
14067 			    !bpf_map_ptr_unpriv(aux)) {
14068 				struct bpf_jit_poke_descriptor desc = {
14069 					.reason = BPF_POKE_REASON_TAIL_CALL,
14070 					.tail_call.map = BPF_MAP_PTR(aux->map_ptr_state),
14071 					.tail_call.key = bpf_map_key_immediate(aux),
14072 					.insn_idx = i + delta,
14073 				};
14074 
14075 				ret = bpf_jit_add_poke_descriptor(prog, &desc);
14076 				if (ret < 0) {
14077 					verbose(env, "adding tail call poke descriptor failed\n");
14078 					return ret;
14079 				}
14080 
14081 				insn->imm = ret + 1;
14082 				continue;
14083 			}
14084 
14085 			if (!bpf_map_ptr_unpriv(aux))
14086 				continue;
14087 
14088 			/* instead of changing every JIT dealing with tail_call
14089 			 * emit two extra insns:
14090 			 * if (index >= max_entries) goto out;
14091 			 * index &= array->index_mask;
14092 			 * to avoid out-of-bounds cpu speculation
14093 			 */
14094 			if (bpf_map_ptr_poisoned(aux)) {
14095 				verbose(env, "tail_call abusing map_ptr\n");
14096 				return -EINVAL;
14097 			}
14098 
14099 			map_ptr = BPF_MAP_PTR(aux->map_ptr_state);
14100 			insn_buf[0] = BPF_JMP_IMM(BPF_JGE, BPF_REG_3,
14101 						  map_ptr->max_entries, 2);
14102 			insn_buf[1] = BPF_ALU32_IMM(BPF_AND, BPF_REG_3,
14103 						    container_of(map_ptr,
14104 								 struct bpf_array,
14105 								 map)->index_mask);
14106 			insn_buf[2] = *insn;
14107 			cnt = 3;
14108 			new_prog = bpf_patch_insn_data(env, i + delta, insn_buf, cnt);
14109 			if (!new_prog)
14110 				return -ENOMEM;
14111 
14112 			delta    += cnt - 1;
14113 			env->prog = prog = new_prog;
14114 			insn      = new_prog->insnsi + i + delta;
14115 			continue;
14116 		}
14117 
14118 		if (insn->imm == BPF_FUNC_timer_set_callback) {
14119 			/* The verifier will process callback_fn as many times as necessary
14120 			 * with different maps and the register states prepared by
14121 			 * set_timer_callback_state will be accurate.
14122 			 *
14123 			 * The following use case is valid:
14124 			 *   map1 is shared by prog1, prog2, prog3.
14125 			 *   prog1 calls bpf_timer_init for some map1 elements
14126 			 *   prog2 calls bpf_timer_set_callback for some map1 elements.
14127 			 *     Those that were not bpf_timer_init-ed will return -EINVAL.
14128 			 *   prog3 calls bpf_timer_start for some map1 elements.
14129 			 *     Those that were not both bpf_timer_init-ed and
14130 			 *     bpf_timer_set_callback-ed will return -EINVAL.
14131 			 */
14132 			struct bpf_insn ld_addrs[2] = {
14133 				BPF_LD_IMM64(BPF_REG_3, (long)prog->aux),
14134 			};
14135 
14136 			insn_buf[0] = ld_addrs[0];
14137 			insn_buf[1] = ld_addrs[1];
14138 			insn_buf[2] = *insn;
14139 			cnt = 3;
14140 
14141 			new_prog = bpf_patch_insn_data(env, i + delta, insn_buf, cnt);
14142 			if (!new_prog)
14143 				return -ENOMEM;
14144 
14145 			delta    += cnt - 1;
14146 			env->prog = prog = new_prog;
14147 			insn      = new_prog->insnsi + i + delta;
14148 			goto patch_call_imm;
14149 		}
14150 
14151 		if (insn->imm == BPF_FUNC_task_storage_get ||
14152 		    insn->imm == BPF_FUNC_sk_storage_get ||
14153 		    insn->imm == BPF_FUNC_inode_storage_get) {
14154 			if (env->prog->aux->sleepable)
14155 				insn_buf[0] = BPF_MOV64_IMM(BPF_REG_5, (__force __s32)GFP_KERNEL);
14156 			else
14157 				insn_buf[0] = BPF_MOV64_IMM(BPF_REG_5, (__force __s32)GFP_ATOMIC);
14158 			insn_buf[1] = *insn;
14159 			cnt = 2;
14160 
14161 			new_prog = bpf_patch_insn_data(env, i + delta, insn_buf, cnt);
14162 			if (!new_prog)
14163 				return -ENOMEM;
14164 
14165 			delta += cnt - 1;
14166 			env->prog = prog = new_prog;
14167 			insn = new_prog->insnsi + i + delta;
14168 			goto patch_call_imm;
14169 		}
14170 
14171 		/* BPF_EMIT_CALL() assumptions in some of the map_gen_lookup
14172 		 * and other inlining handlers are currently limited to 64 bit
14173 		 * only.
14174 		 */
14175 		if (prog->jit_requested && BITS_PER_LONG == 64 &&
14176 		    (insn->imm == BPF_FUNC_map_lookup_elem ||
14177 		     insn->imm == BPF_FUNC_map_update_elem ||
14178 		     insn->imm == BPF_FUNC_map_delete_elem ||
14179 		     insn->imm == BPF_FUNC_map_push_elem   ||
14180 		     insn->imm == BPF_FUNC_map_pop_elem    ||
14181 		     insn->imm == BPF_FUNC_map_peek_elem   ||
14182 		     insn->imm == BPF_FUNC_redirect_map    ||
14183 		     insn->imm == BPF_FUNC_for_each_map_elem ||
14184 		     insn->imm == BPF_FUNC_map_lookup_percpu_elem)) {
14185 			aux = &env->insn_aux_data[i + delta];
14186 			if (bpf_map_ptr_poisoned(aux))
14187 				goto patch_call_imm;
14188 
14189 			map_ptr = BPF_MAP_PTR(aux->map_ptr_state);
14190 			ops = map_ptr->ops;
14191 			if (insn->imm == BPF_FUNC_map_lookup_elem &&
14192 			    ops->map_gen_lookup) {
14193 				cnt = ops->map_gen_lookup(map_ptr, insn_buf);
14194 				if (cnt == -EOPNOTSUPP)
14195 					goto patch_map_ops_generic;
14196 				if (cnt <= 0 || cnt >= ARRAY_SIZE(insn_buf)) {
14197 					verbose(env, "bpf verifier is misconfigured\n");
14198 					return -EINVAL;
14199 				}
14200 
14201 				new_prog = bpf_patch_insn_data(env, i + delta,
14202 							       insn_buf, cnt);
14203 				if (!new_prog)
14204 					return -ENOMEM;
14205 
14206 				delta    += cnt - 1;
14207 				env->prog = prog = new_prog;
14208 				insn      = new_prog->insnsi + i + delta;
14209 				continue;
14210 			}
14211 
14212 			BUILD_BUG_ON(!__same_type(ops->map_lookup_elem,
14213 				     (void *(*)(struct bpf_map *map, void *key))NULL));
14214 			BUILD_BUG_ON(!__same_type(ops->map_delete_elem,
14215 				     (int (*)(struct bpf_map *map, void *key))NULL));
14216 			BUILD_BUG_ON(!__same_type(ops->map_update_elem,
14217 				     (int (*)(struct bpf_map *map, void *key, void *value,
14218 					      u64 flags))NULL));
14219 			BUILD_BUG_ON(!__same_type(ops->map_push_elem,
14220 				     (int (*)(struct bpf_map *map, void *value,
14221 					      u64 flags))NULL));
14222 			BUILD_BUG_ON(!__same_type(ops->map_pop_elem,
14223 				     (int (*)(struct bpf_map *map, void *value))NULL));
14224 			BUILD_BUG_ON(!__same_type(ops->map_peek_elem,
14225 				     (int (*)(struct bpf_map *map, void *value))NULL));
14226 			BUILD_BUG_ON(!__same_type(ops->map_redirect,
14227 				     (int (*)(struct bpf_map *map, u32 ifindex, u64 flags))NULL));
14228 			BUILD_BUG_ON(!__same_type(ops->map_for_each_callback,
14229 				     (int (*)(struct bpf_map *map,
14230 					      bpf_callback_t callback_fn,
14231 					      void *callback_ctx,
14232 					      u64 flags))NULL));
14233 			BUILD_BUG_ON(!__same_type(ops->map_lookup_percpu_elem,
14234 				     (void *(*)(struct bpf_map *map, void *key, u32 cpu))NULL));
14235 
14236 patch_map_ops_generic:
14237 			switch (insn->imm) {
14238 			case BPF_FUNC_map_lookup_elem:
14239 				insn->imm = BPF_CALL_IMM(ops->map_lookup_elem);
14240 				continue;
14241 			case BPF_FUNC_map_update_elem:
14242 				insn->imm = BPF_CALL_IMM(ops->map_update_elem);
14243 				continue;
14244 			case BPF_FUNC_map_delete_elem:
14245 				insn->imm = BPF_CALL_IMM(ops->map_delete_elem);
14246 				continue;
14247 			case BPF_FUNC_map_push_elem:
14248 				insn->imm = BPF_CALL_IMM(ops->map_push_elem);
14249 				continue;
14250 			case BPF_FUNC_map_pop_elem:
14251 				insn->imm = BPF_CALL_IMM(ops->map_pop_elem);
14252 				continue;
14253 			case BPF_FUNC_map_peek_elem:
14254 				insn->imm = BPF_CALL_IMM(ops->map_peek_elem);
14255 				continue;
14256 			case BPF_FUNC_redirect_map:
14257 				insn->imm = BPF_CALL_IMM(ops->map_redirect);
14258 				continue;
14259 			case BPF_FUNC_for_each_map_elem:
14260 				insn->imm = BPF_CALL_IMM(ops->map_for_each_callback);
14261 				continue;
14262 			case BPF_FUNC_map_lookup_percpu_elem:
14263 				insn->imm = BPF_CALL_IMM(ops->map_lookup_percpu_elem);
14264 				continue;
14265 			}
14266 
14267 			goto patch_call_imm;
14268 		}
14269 
14270 		/* Implement bpf_jiffies64 inline. */
14271 		if (prog->jit_requested && BITS_PER_LONG == 64 &&
14272 		    insn->imm == BPF_FUNC_jiffies64) {
14273 			struct bpf_insn ld_jiffies_addr[2] = {
14274 				BPF_LD_IMM64(BPF_REG_0,
14275 					     (unsigned long)&jiffies),
14276 			};
14277 
14278 			insn_buf[0] = ld_jiffies_addr[0];
14279 			insn_buf[1] = ld_jiffies_addr[1];
14280 			insn_buf[2] = BPF_LDX_MEM(BPF_DW, BPF_REG_0,
14281 						  BPF_REG_0, 0);
14282 			cnt = 3;
14283 
14284 			new_prog = bpf_patch_insn_data(env, i + delta, insn_buf,
14285 						       cnt);
14286 			if (!new_prog)
14287 				return -ENOMEM;
14288 
14289 			delta    += cnt - 1;
14290 			env->prog = prog = new_prog;
14291 			insn      = new_prog->insnsi + i + delta;
14292 			continue;
14293 		}
14294 
14295 		/* Implement bpf_get_func_arg inline. */
14296 		if (prog_type == BPF_PROG_TYPE_TRACING &&
14297 		    insn->imm == BPF_FUNC_get_func_arg) {
14298 			/* Load nr_args from ctx - 8 */
14299 			insn_buf[0] = BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, -8);
14300 			insn_buf[1] = BPF_JMP32_REG(BPF_JGE, BPF_REG_2, BPF_REG_0, 6);
14301 			insn_buf[2] = BPF_ALU64_IMM(BPF_LSH, BPF_REG_2, 3);
14302 			insn_buf[3] = BPF_ALU64_REG(BPF_ADD, BPF_REG_2, BPF_REG_1);
14303 			insn_buf[4] = BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_2, 0);
14304 			insn_buf[5] = BPF_STX_MEM(BPF_DW, BPF_REG_3, BPF_REG_0, 0);
14305 			insn_buf[6] = BPF_MOV64_IMM(BPF_REG_0, 0);
14306 			insn_buf[7] = BPF_JMP_A(1);
14307 			insn_buf[8] = BPF_MOV64_IMM(BPF_REG_0, -EINVAL);
14308 			cnt = 9;
14309 
14310 			new_prog = bpf_patch_insn_data(env, i + delta, insn_buf, cnt);
14311 			if (!new_prog)
14312 				return -ENOMEM;
14313 
14314 			delta    += cnt - 1;
14315 			env->prog = prog = new_prog;
14316 			insn      = new_prog->insnsi + i + delta;
14317 			continue;
14318 		}
14319 
14320 		/* Implement bpf_get_func_ret inline. */
14321 		if (prog_type == BPF_PROG_TYPE_TRACING &&
14322 		    insn->imm == BPF_FUNC_get_func_ret) {
14323 			if (eatype == BPF_TRACE_FEXIT ||
14324 			    eatype == BPF_MODIFY_RETURN) {
14325 				/* Load nr_args from ctx - 8 */
14326 				insn_buf[0] = BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, -8);
14327 				insn_buf[1] = BPF_ALU64_IMM(BPF_LSH, BPF_REG_0, 3);
14328 				insn_buf[2] = BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_1);
14329 				insn_buf[3] = BPF_LDX_MEM(BPF_DW, BPF_REG_3, BPF_REG_0, 0);
14330 				insn_buf[4] = BPF_STX_MEM(BPF_DW, BPF_REG_2, BPF_REG_3, 0);
14331 				insn_buf[5] = BPF_MOV64_IMM(BPF_REG_0, 0);
14332 				cnt = 6;
14333 			} else {
14334 				insn_buf[0] = BPF_MOV64_IMM(BPF_REG_0, -EOPNOTSUPP);
14335 				cnt = 1;
14336 			}
14337 
14338 			new_prog = bpf_patch_insn_data(env, i + delta, insn_buf, cnt);
14339 			if (!new_prog)
14340 				return -ENOMEM;
14341 
14342 			delta    += cnt - 1;
14343 			env->prog = prog = new_prog;
14344 			insn      = new_prog->insnsi + i + delta;
14345 			continue;
14346 		}
14347 
14348 		/* Implement get_func_arg_cnt inline. */
14349 		if (prog_type == BPF_PROG_TYPE_TRACING &&
14350 		    insn->imm == BPF_FUNC_get_func_arg_cnt) {
14351 			/* Load nr_args from ctx - 8 */
14352 			insn_buf[0] = BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, -8);
14353 
14354 			new_prog = bpf_patch_insn_data(env, i + delta, insn_buf, 1);
14355 			if (!new_prog)
14356 				return -ENOMEM;
14357 
14358 			env->prog = prog = new_prog;
14359 			insn      = new_prog->insnsi + i + delta;
14360 			continue;
14361 		}
14362 
14363 		/* Implement bpf_get_func_ip inline. */
14364 		if (prog_type == BPF_PROG_TYPE_TRACING &&
14365 		    insn->imm == BPF_FUNC_get_func_ip) {
14366 			/* Load IP address from ctx - 16 */
14367 			insn_buf[0] = BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, -16);
14368 
14369 			new_prog = bpf_patch_insn_data(env, i + delta, insn_buf, 1);
14370 			if (!new_prog)
14371 				return -ENOMEM;
14372 
14373 			env->prog = prog = new_prog;
14374 			insn      = new_prog->insnsi + i + delta;
14375 			continue;
14376 		}
14377 
14378 patch_call_imm:
14379 		fn = env->ops->get_func_proto(insn->imm, env->prog);
14380 		/* all functions that have prototype and verifier allowed
14381 		 * programs to call them, must be real in-kernel functions
14382 		 */
14383 		if (!fn->func) {
14384 			verbose(env,
14385 				"kernel subsystem misconfigured func %s#%d\n",
14386 				func_id_name(insn->imm), insn->imm);
14387 			return -EFAULT;
14388 		}
14389 		insn->imm = fn->func - __bpf_call_base;
14390 	}
14391 
14392 	/* Since poke tab is now finalized, publish aux to tracker. */
14393 	for (i = 0; i < prog->aux->size_poke_tab; i++) {
14394 		map_ptr = prog->aux->poke_tab[i].tail_call.map;
14395 		if (!map_ptr->ops->map_poke_track ||
14396 		    !map_ptr->ops->map_poke_untrack ||
14397 		    !map_ptr->ops->map_poke_run) {
14398 			verbose(env, "bpf verifier is misconfigured\n");
14399 			return -EINVAL;
14400 		}
14401 
14402 		ret = map_ptr->ops->map_poke_track(map_ptr, prog->aux);
14403 		if (ret < 0) {
14404 			verbose(env, "tracking tail call prog failed\n");
14405 			return ret;
14406 		}
14407 	}
14408 
14409 	sort_kfunc_descs_by_imm(env->prog);
14410 
14411 	return 0;
14412 }
14413 
14414 static struct bpf_prog *inline_bpf_loop(struct bpf_verifier_env *env,
14415 					int position,
14416 					s32 stack_base,
14417 					u32 callback_subprogno,
14418 					u32 *cnt)
14419 {
14420 	s32 r6_offset = stack_base + 0 * BPF_REG_SIZE;
14421 	s32 r7_offset = stack_base + 1 * BPF_REG_SIZE;
14422 	s32 r8_offset = stack_base + 2 * BPF_REG_SIZE;
14423 	int reg_loop_max = BPF_REG_6;
14424 	int reg_loop_cnt = BPF_REG_7;
14425 	int reg_loop_ctx = BPF_REG_8;
14426 
14427 	struct bpf_prog *new_prog;
14428 	u32 callback_start;
14429 	u32 call_insn_offset;
14430 	s32 callback_offset;
14431 
14432 	/* This represents an inlined version of bpf_iter.c:bpf_loop,
14433 	 * be careful to modify this code in sync.
14434 	 */
14435 	struct bpf_insn insn_buf[] = {
14436 		/* Return error and jump to the end of the patch if
14437 		 * expected number of iterations is too big.
14438 		 */
14439 		BPF_JMP_IMM(BPF_JLE, BPF_REG_1, BPF_MAX_LOOPS, 2),
14440 		BPF_MOV32_IMM(BPF_REG_0, -E2BIG),
14441 		BPF_JMP_IMM(BPF_JA, 0, 0, 16),
14442 		/* spill R6, R7, R8 to use these as loop vars */
14443 		BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_6, r6_offset),
14444 		BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_7, r7_offset),
14445 		BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_8, r8_offset),
14446 		/* initialize loop vars */
14447 		BPF_MOV64_REG(reg_loop_max, BPF_REG_1),
14448 		BPF_MOV32_IMM(reg_loop_cnt, 0),
14449 		BPF_MOV64_REG(reg_loop_ctx, BPF_REG_3),
14450 		/* loop header,
14451 		 * if reg_loop_cnt >= reg_loop_max skip the loop body
14452 		 */
14453 		BPF_JMP_REG(BPF_JGE, reg_loop_cnt, reg_loop_max, 5),
14454 		/* callback call,
14455 		 * correct callback offset would be set after patching
14456 		 */
14457 		BPF_MOV64_REG(BPF_REG_1, reg_loop_cnt),
14458 		BPF_MOV64_REG(BPF_REG_2, reg_loop_ctx),
14459 		BPF_CALL_REL(0),
14460 		/* increment loop counter */
14461 		BPF_ALU64_IMM(BPF_ADD, reg_loop_cnt, 1),
14462 		/* jump to loop header if callback returned 0 */
14463 		BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, -6),
14464 		/* return value of bpf_loop,
14465 		 * set R0 to the number of iterations
14466 		 */
14467 		BPF_MOV64_REG(BPF_REG_0, reg_loop_cnt),
14468 		/* restore original values of R6, R7, R8 */
14469 		BPF_LDX_MEM(BPF_DW, BPF_REG_6, BPF_REG_10, r6_offset),
14470 		BPF_LDX_MEM(BPF_DW, BPF_REG_7, BPF_REG_10, r7_offset),
14471 		BPF_LDX_MEM(BPF_DW, BPF_REG_8, BPF_REG_10, r8_offset),
14472 	};
14473 
14474 	*cnt = ARRAY_SIZE(insn_buf);
14475 	new_prog = bpf_patch_insn_data(env, position, insn_buf, *cnt);
14476 	if (!new_prog)
14477 		return new_prog;
14478 
14479 	/* callback start is known only after patching */
14480 	callback_start = env->subprog_info[callback_subprogno].start;
14481 	/* Note: insn_buf[12] is an offset of BPF_CALL_REL instruction */
14482 	call_insn_offset = position + 12;
14483 	callback_offset = callback_start - call_insn_offset - 1;
14484 	new_prog->insnsi[call_insn_offset].imm = callback_offset;
14485 
14486 	return new_prog;
14487 }
14488 
14489 static bool is_bpf_loop_call(struct bpf_insn *insn)
14490 {
14491 	return insn->code == (BPF_JMP | BPF_CALL) &&
14492 		insn->src_reg == 0 &&
14493 		insn->imm == BPF_FUNC_loop;
14494 }
14495 
14496 /* For all sub-programs in the program (including main) check
14497  * insn_aux_data to see if there are bpf_loop calls that require
14498  * inlining. If such calls are found the calls are replaced with a
14499  * sequence of instructions produced by `inline_bpf_loop` function and
14500  * subprog stack_depth is increased by the size of 3 registers.
14501  * This stack space is used to spill values of the R6, R7, R8.  These
14502  * registers are used to store the loop bound, counter and context
14503  * variables.
14504  */
14505 static int optimize_bpf_loop(struct bpf_verifier_env *env)
14506 {
14507 	struct bpf_subprog_info *subprogs = env->subprog_info;
14508 	int i, cur_subprog = 0, cnt, delta = 0;
14509 	struct bpf_insn *insn = env->prog->insnsi;
14510 	int insn_cnt = env->prog->len;
14511 	u16 stack_depth = subprogs[cur_subprog].stack_depth;
14512 	u16 stack_depth_roundup = round_up(stack_depth, 8) - stack_depth;
14513 	u16 stack_depth_extra = 0;
14514 
14515 	for (i = 0; i < insn_cnt; i++, insn++) {
14516 		struct bpf_loop_inline_state *inline_state =
14517 			&env->insn_aux_data[i + delta].loop_inline_state;
14518 
14519 		if (is_bpf_loop_call(insn) && inline_state->fit_for_inline) {
14520 			struct bpf_prog *new_prog;
14521 
14522 			stack_depth_extra = BPF_REG_SIZE * 3 + stack_depth_roundup;
14523 			new_prog = inline_bpf_loop(env,
14524 						   i + delta,
14525 						   -(stack_depth + stack_depth_extra),
14526 						   inline_state->callback_subprogno,
14527 						   &cnt);
14528 			if (!new_prog)
14529 				return -ENOMEM;
14530 
14531 			delta     += cnt - 1;
14532 			env->prog  = new_prog;
14533 			insn       = new_prog->insnsi + i + delta;
14534 		}
14535 
14536 		if (subprogs[cur_subprog + 1].start == i + delta + 1) {
14537 			subprogs[cur_subprog].stack_depth += stack_depth_extra;
14538 			cur_subprog++;
14539 			stack_depth = subprogs[cur_subprog].stack_depth;
14540 			stack_depth_roundup = round_up(stack_depth, 8) - stack_depth;
14541 			stack_depth_extra = 0;
14542 		}
14543 	}
14544 
14545 	env->prog->aux->stack_depth = env->subprog_info[0].stack_depth;
14546 
14547 	return 0;
14548 }
14549 
14550 static void free_states(struct bpf_verifier_env *env)
14551 {
14552 	struct bpf_verifier_state_list *sl, *sln;
14553 	int i;
14554 
14555 	sl = env->free_list;
14556 	while (sl) {
14557 		sln = sl->next;
14558 		free_verifier_state(&sl->state, false);
14559 		kfree(sl);
14560 		sl = sln;
14561 	}
14562 	env->free_list = NULL;
14563 
14564 	if (!env->explored_states)
14565 		return;
14566 
14567 	for (i = 0; i < state_htab_size(env); i++) {
14568 		sl = env->explored_states[i];
14569 
14570 		while (sl) {
14571 			sln = sl->next;
14572 			free_verifier_state(&sl->state, false);
14573 			kfree(sl);
14574 			sl = sln;
14575 		}
14576 		env->explored_states[i] = NULL;
14577 	}
14578 }
14579 
14580 static int do_check_common(struct bpf_verifier_env *env, int subprog)
14581 {
14582 	bool pop_log = !(env->log.level & BPF_LOG_LEVEL2);
14583 	struct bpf_verifier_state *state;
14584 	struct bpf_reg_state *regs;
14585 	int ret, i;
14586 
14587 	env->prev_linfo = NULL;
14588 	env->pass_cnt++;
14589 
14590 	state = kzalloc(sizeof(struct bpf_verifier_state), GFP_KERNEL);
14591 	if (!state)
14592 		return -ENOMEM;
14593 	state->curframe = 0;
14594 	state->speculative = false;
14595 	state->branches = 1;
14596 	state->frame[0] = kzalloc(sizeof(struct bpf_func_state), GFP_KERNEL);
14597 	if (!state->frame[0]) {
14598 		kfree(state);
14599 		return -ENOMEM;
14600 	}
14601 	env->cur_state = state;
14602 	init_func_state(env, state->frame[0],
14603 			BPF_MAIN_FUNC /* callsite */,
14604 			0 /* frameno */,
14605 			subprog);
14606 
14607 	regs = state->frame[state->curframe]->regs;
14608 	if (subprog || env->prog->type == BPF_PROG_TYPE_EXT) {
14609 		ret = btf_prepare_func_args(env, subprog, regs);
14610 		if (ret)
14611 			goto out;
14612 		for (i = BPF_REG_1; i <= BPF_REG_5; i++) {
14613 			if (regs[i].type == PTR_TO_CTX)
14614 				mark_reg_known_zero(env, regs, i);
14615 			else if (regs[i].type == SCALAR_VALUE)
14616 				mark_reg_unknown(env, regs, i);
14617 			else if (base_type(regs[i].type) == PTR_TO_MEM) {
14618 				const u32 mem_size = regs[i].mem_size;
14619 
14620 				mark_reg_known_zero(env, regs, i);
14621 				regs[i].mem_size = mem_size;
14622 				regs[i].id = ++env->id_gen;
14623 			}
14624 		}
14625 	} else {
14626 		/* 1st arg to a function */
14627 		regs[BPF_REG_1].type = PTR_TO_CTX;
14628 		mark_reg_known_zero(env, regs, BPF_REG_1);
14629 		ret = btf_check_subprog_arg_match(env, subprog, regs);
14630 		if (ret == -EFAULT)
14631 			/* unlikely verifier bug. abort.
14632 			 * ret == 0 and ret < 0 are sadly acceptable for
14633 			 * main() function due to backward compatibility.
14634 			 * Like socket filter program may be written as:
14635 			 * int bpf_prog(struct pt_regs *ctx)
14636 			 * and never dereference that ctx in the program.
14637 			 * 'struct pt_regs' is a type mismatch for socket
14638 			 * filter that should be using 'struct __sk_buff'.
14639 			 */
14640 			goto out;
14641 	}
14642 
14643 	ret = do_check(env);
14644 out:
14645 	/* check for NULL is necessary, since cur_state can be freed inside
14646 	 * do_check() under memory pressure.
14647 	 */
14648 	if (env->cur_state) {
14649 		free_verifier_state(env->cur_state, true);
14650 		env->cur_state = NULL;
14651 	}
14652 	while (!pop_stack(env, NULL, NULL, false));
14653 	if (!ret && pop_log)
14654 		bpf_vlog_reset(&env->log, 0);
14655 	free_states(env);
14656 	return ret;
14657 }
14658 
14659 /* Verify all global functions in a BPF program one by one based on their BTF.
14660  * All global functions must pass verification. Otherwise the whole program is rejected.
14661  * Consider:
14662  * int bar(int);
14663  * int foo(int f)
14664  * {
14665  *    return bar(f);
14666  * }
14667  * int bar(int b)
14668  * {
14669  *    ...
14670  * }
14671  * foo() will be verified first for R1=any_scalar_value. During verification it
14672  * will be assumed that bar() already verified successfully and call to bar()
14673  * from foo() will be checked for type match only. Later bar() will be verified
14674  * independently to check that it's safe for R1=any_scalar_value.
14675  */
14676 static int do_check_subprogs(struct bpf_verifier_env *env)
14677 {
14678 	struct bpf_prog_aux *aux = env->prog->aux;
14679 	int i, ret;
14680 
14681 	if (!aux->func_info)
14682 		return 0;
14683 
14684 	for (i = 1; i < env->subprog_cnt; i++) {
14685 		if (aux->func_info_aux[i].linkage != BTF_FUNC_GLOBAL)
14686 			continue;
14687 		env->insn_idx = env->subprog_info[i].start;
14688 		WARN_ON_ONCE(env->insn_idx == 0);
14689 		ret = do_check_common(env, i);
14690 		if (ret) {
14691 			return ret;
14692 		} else if (env->log.level & BPF_LOG_LEVEL) {
14693 			verbose(env,
14694 				"Func#%d is safe for any args that match its prototype\n",
14695 				i);
14696 		}
14697 	}
14698 	return 0;
14699 }
14700 
14701 static int do_check_main(struct bpf_verifier_env *env)
14702 {
14703 	int ret;
14704 
14705 	env->insn_idx = 0;
14706 	ret = do_check_common(env, 0);
14707 	if (!ret)
14708 		env->prog->aux->stack_depth = env->subprog_info[0].stack_depth;
14709 	return ret;
14710 }
14711 
14712 
14713 static void print_verification_stats(struct bpf_verifier_env *env)
14714 {
14715 	int i;
14716 
14717 	if (env->log.level & BPF_LOG_STATS) {
14718 		verbose(env, "verification time %lld usec\n",
14719 			div_u64(env->verification_time, 1000));
14720 		verbose(env, "stack depth ");
14721 		for (i = 0; i < env->subprog_cnt; i++) {
14722 			u32 depth = env->subprog_info[i].stack_depth;
14723 
14724 			verbose(env, "%d", depth);
14725 			if (i + 1 < env->subprog_cnt)
14726 				verbose(env, "+");
14727 		}
14728 		verbose(env, "\n");
14729 	}
14730 	verbose(env, "processed %d insns (limit %d) max_states_per_insn %d "
14731 		"total_states %d peak_states %d mark_read %d\n",
14732 		env->insn_processed, BPF_COMPLEXITY_LIMIT_INSNS,
14733 		env->max_states_per_insn, env->total_states,
14734 		env->peak_states, env->longest_mark_read_walk);
14735 }
14736 
14737 static int check_struct_ops_btf_id(struct bpf_verifier_env *env)
14738 {
14739 	const struct btf_type *t, *func_proto;
14740 	const struct bpf_struct_ops *st_ops;
14741 	const struct btf_member *member;
14742 	struct bpf_prog *prog = env->prog;
14743 	u32 btf_id, member_idx;
14744 	const char *mname;
14745 
14746 	if (!prog->gpl_compatible) {
14747 		verbose(env, "struct ops programs must have a GPL compatible license\n");
14748 		return -EINVAL;
14749 	}
14750 
14751 	btf_id = prog->aux->attach_btf_id;
14752 	st_ops = bpf_struct_ops_find(btf_id);
14753 	if (!st_ops) {
14754 		verbose(env, "attach_btf_id %u is not a supported struct\n",
14755 			btf_id);
14756 		return -ENOTSUPP;
14757 	}
14758 
14759 	t = st_ops->type;
14760 	member_idx = prog->expected_attach_type;
14761 	if (member_idx >= btf_type_vlen(t)) {
14762 		verbose(env, "attach to invalid member idx %u of struct %s\n",
14763 			member_idx, st_ops->name);
14764 		return -EINVAL;
14765 	}
14766 
14767 	member = &btf_type_member(t)[member_idx];
14768 	mname = btf_name_by_offset(btf_vmlinux, member->name_off);
14769 	func_proto = btf_type_resolve_func_ptr(btf_vmlinux, member->type,
14770 					       NULL);
14771 	if (!func_proto) {
14772 		verbose(env, "attach to invalid member %s(@idx %u) of struct %s\n",
14773 			mname, member_idx, st_ops->name);
14774 		return -EINVAL;
14775 	}
14776 
14777 	if (st_ops->check_member) {
14778 		int err = st_ops->check_member(t, member);
14779 
14780 		if (err) {
14781 			verbose(env, "attach to unsupported member %s of struct %s\n",
14782 				mname, st_ops->name);
14783 			return err;
14784 		}
14785 	}
14786 
14787 	prog->aux->attach_func_proto = func_proto;
14788 	prog->aux->attach_func_name = mname;
14789 	env->ops = st_ops->verifier_ops;
14790 
14791 	return 0;
14792 }
14793 #define SECURITY_PREFIX "security_"
14794 
14795 static int check_attach_modify_return(unsigned long addr, const char *func_name)
14796 {
14797 	if (within_error_injection_list(addr) ||
14798 	    !strncmp(SECURITY_PREFIX, func_name, sizeof(SECURITY_PREFIX) - 1))
14799 		return 0;
14800 
14801 	return -EINVAL;
14802 }
14803 
14804 /* list of non-sleepable functions that are otherwise on
14805  * ALLOW_ERROR_INJECTION list
14806  */
14807 BTF_SET_START(btf_non_sleepable_error_inject)
14808 /* Three functions below can be called from sleepable and non-sleepable context.
14809  * Assume non-sleepable from bpf safety point of view.
14810  */
14811 BTF_ID(func, __filemap_add_folio)
14812 BTF_ID(func, should_fail_alloc_page)
14813 BTF_ID(func, should_failslab)
14814 BTF_SET_END(btf_non_sleepable_error_inject)
14815 
14816 static int check_non_sleepable_error_inject(u32 btf_id)
14817 {
14818 	return btf_id_set_contains(&btf_non_sleepable_error_inject, btf_id);
14819 }
14820 
14821 int bpf_check_attach_target(struct bpf_verifier_log *log,
14822 			    const struct bpf_prog *prog,
14823 			    const struct bpf_prog *tgt_prog,
14824 			    u32 btf_id,
14825 			    struct bpf_attach_target_info *tgt_info)
14826 {
14827 	bool prog_extension = prog->type == BPF_PROG_TYPE_EXT;
14828 	const char prefix[] = "btf_trace_";
14829 	int ret = 0, subprog = -1, i;
14830 	const struct btf_type *t;
14831 	bool conservative = true;
14832 	const char *tname;
14833 	struct btf *btf;
14834 	long addr = 0;
14835 
14836 	if (!btf_id) {
14837 		bpf_log(log, "Tracing programs must provide btf_id\n");
14838 		return -EINVAL;
14839 	}
14840 	btf = tgt_prog ? tgt_prog->aux->btf : prog->aux->attach_btf;
14841 	if (!btf) {
14842 		bpf_log(log,
14843 			"FENTRY/FEXIT program can only be attached to another program annotated with BTF\n");
14844 		return -EINVAL;
14845 	}
14846 	t = btf_type_by_id(btf, btf_id);
14847 	if (!t) {
14848 		bpf_log(log, "attach_btf_id %u is invalid\n", btf_id);
14849 		return -EINVAL;
14850 	}
14851 	tname = btf_name_by_offset(btf, t->name_off);
14852 	if (!tname) {
14853 		bpf_log(log, "attach_btf_id %u doesn't have a name\n", btf_id);
14854 		return -EINVAL;
14855 	}
14856 	if (tgt_prog) {
14857 		struct bpf_prog_aux *aux = tgt_prog->aux;
14858 
14859 		for (i = 0; i < aux->func_info_cnt; i++)
14860 			if (aux->func_info[i].type_id == btf_id) {
14861 				subprog = i;
14862 				break;
14863 			}
14864 		if (subprog == -1) {
14865 			bpf_log(log, "Subprog %s doesn't exist\n", tname);
14866 			return -EINVAL;
14867 		}
14868 		conservative = aux->func_info_aux[subprog].unreliable;
14869 		if (prog_extension) {
14870 			if (conservative) {
14871 				bpf_log(log,
14872 					"Cannot replace static functions\n");
14873 				return -EINVAL;
14874 			}
14875 			if (!prog->jit_requested) {
14876 				bpf_log(log,
14877 					"Extension programs should be JITed\n");
14878 				return -EINVAL;
14879 			}
14880 		}
14881 		if (!tgt_prog->jited) {
14882 			bpf_log(log, "Can attach to only JITed progs\n");
14883 			return -EINVAL;
14884 		}
14885 		if (tgt_prog->type == prog->type) {
14886 			/* Cannot fentry/fexit another fentry/fexit program.
14887 			 * Cannot attach program extension to another extension.
14888 			 * It's ok to attach fentry/fexit to extension program.
14889 			 */
14890 			bpf_log(log, "Cannot recursively attach\n");
14891 			return -EINVAL;
14892 		}
14893 		if (tgt_prog->type == BPF_PROG_TYPE_TRACING &&
14894 		    prog_extension &&
14895 		    (tgt_prog->expected_attach_type == BPF_TRACE_FENTRY ||
14896 		     tgt_prog->expected_attach_type == BPF_TRACE_FEXIT)) {
14897 			/* Program extensions can extend all program types
14898 			 * except fentry/fexit. The reason is the following.
14899 			 * The fentry/fexit programs are used for performance
14900 			 * analysis, stats and can be attached to any program
14901 			 * type except themselves. When extension program is
14902 			 * replacing XDP function it is necessary to allow
14903 			 * performance analysis of all functions. Both original
14904 			 * XDP program and its program extension. Hence
14905 			 * attaching fentry/fexit to BPF_PROG_TYPE_EXT is
14906 			 * allowed. If extending of fentry/fexit was allowed it
14907 			 * would be possible to create long call chain
14908 			 * fentry->extension->fentry->extension beyond
14909 			 * reasonable stack size. Hence extending fentry is not
14910 			 * allowed.
14911 			 */
14912 			bpf_log(log, "Cannot extend fentry/fexit\n");
14913 			return -EINVAL;
14914 		}
14915 	} else {
14916 		if (prog_extension) {
14917 			bpf_log(log, "Cannot replace kernel functions\n");
14918 			return -EINVAL;
14919 		}
14920 	}
14921 
14922 	switch (prog->expected_attach_type) {
14923 	case BPF_TRACE_RAW_TP:
14924 		if (tgt_prog) {
14925 			bpf_log(log,
14926 				"Only FENTRY/FEXIT progs are attachable to another BPF prog\n");
14927 			return -EINVAL;
14928 		}
14929 		if (!btf_type_is_typedef(t)) {
14930 			bpf_log(log, "attach_btf_id %u is not a typedef\n",
14931 				btf_id);
14932 			return -EINVAL;
14933 		}
14934 		if (strncmp(prefix, tname, sizeof(prefix) - 1)) {
14935 			bpf_log(log, "attach_btf_id %u points to wrong type name %s\n",
14936 				btf_id, tname);
14937 			return -EINVAL;
14938 		}
14939 		tname += sizeof(prefix) - 1;
14940 		t = btf_type_by_id(btf, t->type);
14941 		if (!btf_type_is_ptr(t))
14942 			/* should never happen in valid vmlinux build */
14943 			return -EINVAL;
14944 		t = btf_type_by_id(btf, t->type);
14945 		if (!btf_type_is_func_proto(t))
14946 			/* should never happen in valid vmlinux build */
14947 			return -EINVAL;
14948 
14949 		break;
14950 	case BPF_TRACE_ITER:
14951 		if (!btf_type_is_func(t)) {
14952 			bpf_log(log, "attach_btf_id %u is not a function\n",
14953 				btf_id);
14954 			return -EINVAL;
14955 		}
14956 		t = btf_type_by_id(btf, t->type);
14957 		if (!btf_type_is_func_proto(t))
14958 			return -EINVAL;
14959 		ret = btf_distill_func_proto(log, btf, t, tname, &tgt_info->fmodel);
14960 		if (ret)
14961 			return ret;
14962 		break;
14963 	default:
14964 		if (!prog_extension)
14965 			return -EINVAL;
14966 		fallthrough;
14967 	case BPF_MODIFY_RETURN:
14968 	case BPF_LSM_MAC:
14969 	case BPF_LSM_CGROUP:
14970 	case BPF_TRACE_FENTRY:
14971 	case BPF_TRACE_FEXIT:
14972 		if (!btf_type_is_func(t)) {
14973 			bpf_log(log, "attach_btf_id %u is not a function\n",
14974 				btf_id);
14975 			return -EINVAL;
14976 		}
14977 		if (prog_extension &&
14978 		    btf_check_type_match(log, prog, btf, t))
14979 			return -EINVAL;
14980 		t = btf_type_by_id(btf, t->type);
14981 		if (!btf_type_is_func_proto(t))
14982 			return -EINVAL;
14983 
14984 		if ((prog->aux->saved_dst_prog_type || prog->aux->saved_dst_attach_type) &&
14985 		    (!tgt_prog || prog->aux->saved_dst_prog_type != tgt_prog->type ||
14986 		     prog->aux->saved_dst_attach_type != tgt_prog->expected_attach_type))
14987 			return -EINVAL;
14988 
14989 		if (tgt_prog && conservative)
14990 			t = NULL;
14991 
14992 		ret = btf_distill_func_proto(log, btf, t, tname, &tgt_info->fmodel);
14993 		if (ret < 0)
14994 			return ret;
14995 
14996 		if (tgt_prog) {
14997 			if (subprog == 0)
14998 				addr = (long) tgt_prog->bpf_func;
14999 			else
15000 				addr = (long) tgt_prog->aux->func[subprog]->bpf_func;
15001 		} else {
15002 			addr = kallsyms_lookup_name(tname);
15003 			if (!addr) {
15004 				bpf_log(log,
15005 					"The address of function %s cannot be found\n",
15006 					tname);
15007 				return -ENOENT;
15008 			}
15009 		}
15010 
15011 		if (prog->aux->sleepable) {
15012 			ret = -EINVAL;
15013 			switch (prog->type) {
15014 			case BPF_PROG_TYPE_TRACING:
15015 				/* fentry/fexit/fmod_ret progs can be sleepable only if they are
15016 				 * attached to ALLOW_ERROR_INJECTION and are not in denylist.
15017 				 */
15018 				if (!check_non_sleepable_error_inject(btf_id) &&
15019 				    within_error_injection_list(addr))
15020 					ret = 0;
15021 				break;
15022 			case BPF_PROG_TYPE_LSM:
15023 				/* LSM progs check that they are attached to bpf_lsm_*() funcs.
15024 				 * Only some of them are sleepable.
15025 				 */
15026 				if (bpf_lsm_is_sleepable_hook(btf_id))
15027 					ret = 0;
15028 				break;
15029 			default:
15030 				break;
15031 			}
15032 			if (ret) {
15033 				bpf_log(log, "%s is not sleepable\n", tname);
15034 				return ret;
15035 			}
15036 		} else if (prog->expected_attach_type == BPF_MODIFY_RETURN) {
15037 			if (tgt_prog) {
15038 				bpf_log(log, "can't modify return codes of BPF programs\n");
15039 				return -EINVAL;
15040 			}
15041 			ret = check_attach_modify_return(addr, tname);
15042 			if (ret) {
15043 				bpf_log(log, "%s() is not modifiable\n", tname);
15044 				return ret;
15045 			}
15046 		}
15047 
15048 		break;
15049 	}
15050 	tgt_info->tgt_addr = addr;
15051 	tgt_info->tgt_name = tname;
15052 	tgt_info->tgt_type = t;
15053 	return 0;
15054 }
15055 
15056 BTF_SET_START(btf_id_deny)
15057 BTF_ID_UNUSED
15058 #ifdef CONFIG_SMP
15059 BTF_ID(func, migrate_disable)
15060 BTF_ID(func, migrate_enable)
15061 #endif
15062 #if !defined CONFIG_PREEMPT_RCU && !defined CONFIG_TINY_RCU
15063 BTF_ID(func, rcu_read_unlock_strict)
15064 #endif
15065 BTF_SET_END(btf_id_deny)
15066 
15067 static int check_attach_btf_id(struct bpf_verifier_env *env)
15068 {
15069 	struct bpf_prog *prog = env->prog;
15070 	struct bpf_prog *tgt_prog = prog->aux->dst_prog;
15071 	struct bpf_attach_target_info tgt_info = {};
15072 	u32 btf_id = prog->aux->attach_btf_id;
15073 	struct bpf_trampoline *tr;
15074 	int ret;
15075 	u64 key;
15076 
15077 	if (prog->type == BPF_PROG_TYPE_SYSCALL) {
15078 		if (prog->aux->sleepable)
15079 			/* attach_btf_id checked to be zero already */
15080 			return 0;
15081 		verbose(env, "Syscall programs can only be sleepable\n");
15082 		return -EINVAL;
15083 	}
15084 
15085 	if (prog->aux->sleepable && prog->type != BPF_PROG_TYPE_TRACING &&
15086 	    prog->type != BPF_PROG_TYPE_LSM && prog->type != BPF_PROG_TYPE_KPROBE) {
15087 		verbose(env, "Only fentry/fexit/fmod_ret, lsm, and kprobe/uprobe programs can be sleepable\n");
15088 		return -EINVAL;
15089 	}
15090 
15091 	if (prog->type == BPF_PROG_TYPE_STRUCT_OPS)
15092 		return check_struct_ops_btf_id(env);
15093 
15094 	if (prog->type != BPF_PROG_TYPE_TRACING &&
15095 	    prog->type != BPF_PROG_TYPE_LSM &&
15096 	    prog->type != BPF_PROG_TYPE_EXT)
15097 		return 0;
15098 
15099 	ret = bpf_check_attach_target(&env->log, prog, tgt_prog, btf_id, &tgt_info);
15100 	if (ret)
15101 		return ret;
15102 
15103 	if (tgt_prog && prog->type == BPF_PROG_TYPE_EXT) {
15104 		/* to make freplace equivalent to their targets, they need to
15105 		 * inherit env->ops and expected_attach_type for the rest of the
15106 		 * verification
15107 		 */
15108 		env->ops = bpf_verifier_ops[tgt_prog->type];
15109 		prog->expected_attach_type = tgt_prog->expected_attach_type;
15110 	}
15111 
15112 	/* store info about the attachment target that will be used later */
15113 	prog->aux->attach_func_proto = tgt_info.tgt_type;
15114 	prog->aux->attach_func_name = tgt_info.tgt_name;
15115 
15116 	if (tgt_prog) {
15117 		prog->aux->saved_dst_prog_type = tgt_prog->type;
15118 		prog->aux->saved_dst_attach_type = tgt_prog->expected_attach_type;
15119 	}
15120 
15121 	if (prog->expected_attach_type == BPF_TRACE_RAW_TP) {
15122 		prog->aux->attach_btf_trace = true;
15123 		return 0;
15124 	} else if (prog->expected_attach_type == BPF_TRACE_ITER) {
15125 		if (!bpf_iter_prog_supported(prog))
15126 			return -EINVAL;
15127 		return 0;
15128 	}
15129 
15130 	if (prog->type == BPF_PROG_TYPE_LSM) {
15131 		ret = bpf_lsm_verify_prog(&env->log, prog);
15132 		if (ret < 0)
15133 			return ret;
15134 	} else if (prog->type == BPF_PROG_TYPE_TRACING &&
15135 		   btf_id_set_contains(&btf_id_deny, btf_id)) {
15136 		return -EINVAL;
15137 	}
15138 
15139 	key = bpf_trampoline_compute_key(tgt_prog, prog->aux->attach_btf, btf_id);
15140 	tr = bpf_trampoline_get(key, &tgt_info);
15141 	if (!tr)
15142 		return -ENOMEM;
15143 
15144 	prog->aux->dst_trampoline = tr;
15145 	return 0;
15146 }
15147 
15148 struct btf *bpf_get_btf_vmlinux(void)
15149 {
15150 	if (!btf_vmlinux && IS_ENABLED(CONFIG_DEBUG_INFO_BTF)) {
15151 		mutex_lock(&bpf_verifier_lock);
15152 		if (!btf_vmlinux)
15153 			btf_vmlinux = btf_parse_vmlinux();
15154 		mutex_unlock(&bpf_verifier_lock);
15155 	}
15156 	return btf_vmlinux;
15157 }
15158 
15159 int bpf_check(struct bpf_prog **prog, union bpf_attr *attr, bpfptr_t uattr)
15160 {
15161 	u64 start_time = ktime_get_ns();
15162 	struct bpf_verifier_env *env;
15163 	struct bpf_verifier_log *log;
15164 	int i, len, ret = -EINVAL;
15165 	bool is_priv;
15166 
15167 	/* no program is valid */
15168 	if (ARRAY_SIZE(bpf_verifier_ops) == 0)
15169 		return -EINVAL;
15170 
15171 	/* 'struct bpf_verifier_env' can be global, but since it's not small,
15172 	 * allocate/free it every time bpf_check() is called
15173 	 */
15174 	env = kzalloc(sizeof(struct bpf_verifier_env), GFP_KERNEL);
15175 	if (!env)
15176 		return -ENOMEM;
15177 	log = &env->log;
15178 
15179 	len = (*prog)->len;
15180 	env->insn_aux_data =
15181 		vzalloc(array_size(sizeof(struct bpf_insn_aux_data), len));
15182 	ret = -ENOMEM;
15183 	if (!env->insn_aux_data)
15184 		goto err_free_env;
15185 	for (i = 0; i < len; i++)
15186 		env->insn_aux_data[i].orig_idx = i;
15187 	env->prog = *prog;
15188 	env->ops = bpf_verifier_ops[env->prog->type];
15189 	env->fd_array = make_bpfptr(attr->fd_array, uattr.is_kernel);
15190 	is_priv = bpf_capable();
15191 
15192 	bpf_get_btf_vmlinux();
15193 
15194 	/* grab the mutex to protect few globals used by verifier */
15195 	if (!is_priv)
15196 		mutex_lock(&bpf_verifier_lock);
15197 
15198 	if (attr->log_level || attr->log_buf || attr->log_size) {
15199 		/* user requested verbose verifier output
15200 		 * and supplied buffer to store the verification trace
15201 		 */
15202 		log->level = attr->log_level;
15203 		log->ubuf = (char __user *) (unsigned long) attr->log_buf;
15204 		log->len_total = attr->log_size;
15205 
15206 		/* log attributes have to be sane */
15207 		if (!bpf_verifier_log_attr_valid(log)) {
15208 			ret = -EINVAL;
15209 			goto err_unlock;
15210 		}
15211 	}
15212 
15213 	mark_verifier_state_clean(env);
15214 
15215 	if (IS_ERR(btf_vmlinux)) {
15216 		/* Either gcc or pahole or kernel are broken. */
15217 		verbose(env, "in-kernel BTF is malformed\n");
15218 		ret = PTR_ERR(btf_vmlinux);
15219 		goto skip_full_check;
15220 	}
15221 
15222 	env->strict_alignment = !!(attr->prog_flags & BPF_F_STRICT_ALIGNMENT);
15223 	if (!IS_ENABLED(CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS))
15224 		env->strict_alignment = true;
15225 	if (attr->prog_flags & BPF_F_ANY_ALIGNMENT)
15226 		env->strict_alignment = false;
15227 
15228 	env->allow_ptr_leaks = bpf_allow_ptr_leaks();
15229 	env->allow_uninit_stack = bpf_allow_uninit_stack();
15230 	env->allow_ptr_to_map_access = bpf_allow_ptr_to_map_access();
15231 	env->bypass_spec_v1 = bpf_bypass_spec_v1();
15232 	env->bypass_spec_v4 = bpf_bypass_spec_v4();
15233 	env->bpf_capable = bpf_capable();
15234 
15235 	if (is_priv)
15236 		env->test_state_freq = attr->prog_flags & BPF_F_TEST_STATE_FREQ;
15237 
15238 	env->explored_states = kvcalloc(state_htab_size(env),
15239 				       sizeof(struct bpf_verifier_state_list *),
15240 				       GFP_USER);
15241 	ret = -ENOMEM;
15242 	if (!env->explored_states)
15243 		goto skip_full_check;
15244 
15245 	ret = add_subprog_and_kfunc(env);
15246 	if (ret < 0)
15247 		goto skip_full_check;
15248 
15249 	ret = check_subprogs(env);
15250 	if (ret < 0)
15251 		goto skip_full_check;
15252 
15253 	ret = check_btf_info(env, attr, uattr);
15254 	if (ret < 0)
15255 		goto skip_full_check;
15256 
15257 	ret = check_attach_btf_id(env);
15258 	if (ret)
15259 		goto skip_full_check;
15260 
15261 	ret = resolve_pseudo_ldimm64(env);
15262 	if (ret < 0)
15263 		goto skip_full_check;
15264 
15265 	if (bpf_prog_is_dev_bound(env->prog->aux)) {
15266 		ret = bpf_prog_offload_verifier_prep(env->prog);
15267 		if (ret)
15268 			goto skip_full_check;
15269 	}
15270 
15271 	ret = check_cfg(env);
15272 	if (ret < 0)
15273 		goto skip_full_check;
15274 
15275 	ret = do_check_subprogs(env);
15276 	ret = ret ?: do_check_main(env);
15277 
15278 	if (ret == 0 && bpf_prog_is_dev_bound(env->prog->aux))
15279 		ret = bpf_prog_offload_finalize(env);
15280 
15281 skip_full_check:
15282 	kvfree(env->explored_states);
15283 
15284 	if (ret == 0)
15285 		ret = check_max_stack_depth(env);
15286 
15287 	/* instruction rewrites happen after this point */
15288 	if (ret == 0)
15289 		ret = optimize_bpf_loop(env);
15290 
15291 	if (is_priv) {
15292 		if (ret == 0)
15293 			opt_hard_wire_dead_code_branches(env);
15294 		if (ret == 0)
15295 			ret = opt_remove_dead_code(env);
15296 		if (ret == 0)
15297 			ret = opt_remove_nops(env);
15298 	} else {
15299 		if (ret == 0)
15300 			sanitize_dead_code(env);
15301 	}
15302 
15303 	if (ret == 0)
15304 		/* program is valid, convert *(u32*)(ctx + off) accesses */
15305 		ret = convert_ctx_accesses(env);
15306 
15307 	if (ret == 0)
15308 		ret = do_misc_fixups(env);
15309 
15310 	/* do 32-bit optimization after insn patching has done so those patched
15311 	 * insns could be handled correctly.
15312 	 */
15313 	if (ret == 0 && !bpf_prog_is_dev_bound(env->prog->aux)) {
15314 		ret = opt_subreg_zext_lo32_rnd_hi32(env, attr);
15315 		env->prog->aux->verifier_zext = bpf_jit_needs_zext() ? !ret
15316 								     : false;
15317 	}
15318 
15319 	if (ret == 0)
15320 		ret = fixup_call_args(env);
15321 
15322 	env->verification_time = ktime_get_ns() - start_time;
15323 	print_verification_stats(env);
15324 	env->prog->aux->verified_insns = env->insn_processed;
15325 
15326 	if (log->level && bpf_verifier_log_full(log))
15327 		ret = -ENOSPC;
15328 	if (log->level && !log->ubuf) {
15329 		ret = -EFAULT;
15330 		goto err_release_maps;
15331 	}
15332 
15333 	if (ret)
15334 		goto err_release_maps;
15335 
15336 	if (env->used_map_cnt) {
15337 		/* if program passed verifier, update used_maps in bpf_prog_info */
15338 		env->prog->aux->used_maps = kmalloc_array(env->used_map_cnt,
15339 							  sizeof(env->used_maps[0]),
15340 							  GFP_KERNEL);
15341 
15342 		if (!env->prog->aux->used_maps) {
15343 			ret = -ENOMEM;
15344 			goto err_release_maps;
15345 		}
15346 
15347 		memcpy(env->prog->aux->used_maps, env->used_maps,
15348 		       sizeof(env->used_maps[0]) * env->used_map_cnt);
15349 		env->prog->aux->used_map_cnt = env->used_map_cnt;
15350 	}
15351 	if (env->used_btf_cnt) {
15352 		/* if program passed verifier, update used_btfs in bpf_prog_aux */
15353 		env->prog->aux->used_btfs = kmalloc_array(env->used_btf_cnt,
15354 							  sizeof(env->used_btfs[0]),
15355 							  GFP_KERNEL);
15356 		if (!env->prog->aux->used_btfs) {
15357 			ret = -ENOMEM;
15358 			goto err_release_maps;
15359 		}
15360 
15361 		memcpy(env->prog->aux->used_btfs, env->used_btfs,
15362 		       sizeof(env->used_btfs[0]) * env->used_btf_cnt);
15363 		env->prog->aux->used_btf_cnt = env->used_btf_cnt;
15364 	}
15365 	if (env->used_map_cnt || env->used_btf_cnt) {
15366 		/* program is valid. Convert pseudo bpf_ld_imm64 into generic
15367 		 * bpf_ld_imm64 instructions
15368 		 */
15369 		convert_pseudo_ld_imm64(env);
15370 	}
15371 
15372 	adjust_btf_func(env);
15373 
15374 err_release_maps:
15375 	if (!env->prog->aux->used_maps)
15376 		/* if we didn't copy map pointers into bpf_prog_info, release
15377 		 * them now. Otherwise free_used_maps() will release them.
15378 		 */
15379 		release_maps(env);
15380 	if (!env->prog->aux->used_btfs)
15381 		release_btfs(env);
15382 
15383 	/* extension progs temporarily inherit the attach_type of their targets
15384 	   for verification purposes, so set it back to zero before returning
15385 	 */
15386 	if (env->prog->type == BPF_PROG_TYPE_EXT)
15387 		env->prog->expected_attach_type = 0;
15388 
15389 	*prog = env->prog;
15390 err_unlock:
15391 	if (!is_priv)
15392 		mutex_unlock(&bpf_verifier_lock);
15393 	vfree(env->insn_aux_data);
15394 err_free_env:
15395 	kfree(env);
15396 	return ret;
15397 }
15398