xref: /linux/kernel/bpf/verifier.c (revision 10a708c24a31ae1be1ea23d1c38da2691d1fd65c)
1 // SPDX-License-Identifier: GPL-2.0-only
2 /* Copyright (c) 2011-2014 PLUMgrid, http://plumgrid.com
3  * Copyright (c) 2016 Facebook
4  * Copyright (c) 2018 Covalent IO, Inc. http://covalent.io
5  */
6 #include <uapi/linux/btf.h>
7 #include <linux/kernel.h>
8 #include <linux/types.h>
9 #include <linux/slab.h>
10 #include <linux/bpf.h>
11 #include <linux/btf.h>
12 #include <linux/bpf_verifier.h>
13 #include <linux/filter.h>
14 #include <net/netlink.h>
15 #include <linux/file.h>
16 #include <linux/vmalloc.h>
17 #include <linux/stringify.h>
18 #include <linux/bsearch.h>
19 #include <linux/sort.h>
20 #include <linux/perf_event.h>
21 #include <linux/ctype.h>
22 
23 #include "disasm.h"
24 
25 static const struct bpf_verifier_ops * const bpf_verifier_ops[] = {
26 #define BPF_PROG_TYPE(_id, _name) \
27 	[_id] = & _name ## _verifier_ops,
28 #define BPF_MAP_TYPE(_id, _ops)
29 #include <linux/bpf_types.h>
30 #undef BPF_PROG_TYPE
31 #undef BPF_MAP_TYPE
32 };
33 
34 /* bpf_check() is a static code analyzer that walks eBPF program
35  * instruction by instruction and updates register/stack state.
36  * All paths of conditional branches are analyzed until 'bpf_exit' insn.
37  *
38  * The first pass is depth-first-search to check that the program is a DAG.
39  * It rejects the following programs:
40  * - larger than BPF_MAXINSNS insns
41  * - if loop is present (detected via back-edge)
42  * - unreachable insns exist (shouldn't be a forest. program = one function)
43  * - out of bounds or malformed jumps
44  * The second pass is all possible path descent from the 1st insn.
45  * Since it's analyzing all pathes through the program, the length of the
46  * analysis is limited to 64k insn, which may be hit even if total number of
47  * insn is less then 4K, but there are too many branches that change stack/regs.
48  * Number of 'branches to be analyzed' is limited to 1k
49  *
50  * On entry to each instruction, each register has a type, and the instruction
51  * changes the types of the registers depending on instruction semantics.
52  * If instruction is BPF_MOV64_REG(BPF_REG_1, BPF_REG_5), then type of R5 is
53  * copied to R1.
54  *
55  * All registers are 64-bit.
56  * R0 - return register
57  * R1-R5 argument passing registers
58  * R6-R9 callee saved registers
59  * R10 - frame pointer read-only
60  *
61  * At the start of BPF program the register R1 contains a pointer to bpf_context
62  * and has type PTR_TO_CTX.
63  *
64  * Verifier tracks arithmetic operations on pointers in case:
65  *    BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
66  *    BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -20),
67  * 1st insn copies R10 (which has FRAME_PTR) type into R1
68  * and 2nd arithmetic instruction is pattern matched to recognize
69  * that it wants to construct a pointer to some element within stack.
70  * So after 2nd insn, the register R1 has type PTR_TO_STACK
71  * (and -20 constant is saved for further stack bounds checking).
72  * Meaning that this reg is a pointer to stack plus known immediate constant.
73  *
74  * Most of the time the registers have SCALAR_VALUE type, which
75  * means the register has some value, but it's not a valid pointer.
76  * (like pointer plus pointer becomes SCALAR_VALUE type)
77  *
78  * When verifier sees load or store instructions the type of base register
79  * can be: PTR_TO_MAP_VALUE, PTR_TO_CTX, PTR_TO_STACK, PTR_TO_SOCKET. These are
80  * four pointer types recognized by check_mem_access() function.
81  *
82  * PTR_TO_MAP_VALUE means that this register is pointing to 'map element value'
83  * and the range of [ptr, ptr + map's value_size) is accessible.
84  *
85  * registers used to pass values to function calls are checked against
86  * function argument constraints.
87  *
88  * ARG_PTR_TO_MAP_KEY is one of such argument constraints.
89  * It means that the register type passed to this function must be
90  * PTR_TO_STACK and it will be used inside the function as
91  * 'pointer to map element key'
92  *
93  * For example the argument constraints for bpf_map_lookup_elem():
94  *   .ret_type = RET_PTR_TO_MAP_VALUE_OR_NULL,
95  *   .arg1_type = ARG_CONST_MAP_PTR,
96  *   .arg2_type = ARG_PTR_TO_MAP_KEY,
97  *
98  * ret_type says that this function returns 'pointer to map elem value or null'
99  * function expects 1st argument to be a const pointer to 'struct bpf_map' and
100  * 2nd argument should be a pointer to stack, which will be used inside
101  * the helper function as a pointer to map element key.
102  *
103  * On the kernel side the helper function looks like:
104  * u64 bpf_map_lookup_elem(u64 r1, u64 r2, u64 r3, u64 r4, u64 r5)
105  * {
106  *    struct bpf_map *map = (struct bpf_map *) (unsigned long) r1;
107  *    void *key = (void *) (unsigned long) r2;
108  *    void *value;
109  *
110  *    here kernel can access 'key' and 'map' pointers safely, knowing that
111  *    [key, key + map->key_size) bytes are valid and were initialized on
112  *    the stack of eBPF program.
113  * }
114  *
115  * Corresponding eBPF program may look like:
116  *    BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),  // after this insn R2 type is FRAME_PTR
117  *    BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -4), // after this insn R2 type is PTR_TO_STACK
118  *    BPF_LD_MAP_FD(BPF_REG_1, map_fd),      // after this insn R1 type is CONST_PTR_TO_MAP
119  *    BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_map_lookup_elem),
120  * here verifier looks at prototype of map_lookup_elem() and sees:
121  * .arg1_type == ARG_CONST_MAP_PTR and R1->type == CONST_PTR_TO_MAP, which is ok,
122  * Now verifier knows that this map has key of R1->map_ptr->key_size bytes
123  *
124  * Then .arg2_type == ARG_PTR_TO_MAP_KEY and R2->type == PTR_TO_STACK, ok so far,
125  * Now verifier checks that [R2, R2 + map's key_size) are within stack limits
126  * and were initialized prior to this call.
127  * If it's ok, then verifier allows this BPF_CALL insn and looks at
128  * .ret_type which is RET_PTR_TO_MAP_VALUE_OR_NULL, so it sets
129  * R0->type = PTR_TO_MAP_VALUE_OR_NULL which means bpf_map_lookup_elem() function
130  * returns ether pointer to map value or NULL.
131  *
132  * When type PTR_TO_MAP_VALUE_OR_NULL passes through 'if (reg != 0) goto +off'
133  * insn, the register holding that pointer in the true branch changes state to
134  * PTR_TO_MAP_VALUE and the same register changes state to CONST_IMM in the false
135  * branch. See check_cond_jmp_op().
136  *
137  * After the call R0 is set to return type of the function and registers R1-R5
138  * are set to NOT_INIT to indicate that they are no longer readable.
139  *
140  * The following reference types represent a potential reference to a kernel
141  * resource which, after first being allocated, must be checked and freed by
142  * the BPF program:
143  * - PTR_TO_SOCKET_OR_NULL, PTR_TO_SOCKET
144  *
145  * When the verifier sees a helper call return a reference type, it allocates a
146  * pointer id for the reference and stores it in the current function state.
147  * Similar to the way that PTR_TO_MAP_VALUE_OR_NULL is converted into
148  * PTR_TO_MAP_VALUE, PTR_TO_SOCKET_OR_NULL becomes PTR_TO_SOCKET when the type
149  * passes through a NULL-check conditional. For the branch wherein the state is
150  * changed to CONST_IMM, the verifier releases the reference.
151  *
152  * For each helper function that allocates a reference, such as
153  * bpf_sk_lookup_tcp(), there is a corresponding release function, such as
154  * bpf_sk_release(). When a reference type passes into the release function,
155  * the verifier also releases the reference. If any unchecked or unreleased
156  * reference remains at the end of the program, the verifier rejects it.
157  */
158 
159 /* verifier_state + insn_idx are pushed to stack when branch is encountered */
160 struct bpf_verifier_stack_elem {
161 	/* verifer state is 'st'
162 	 * before processing instruction 'insn_idx'
163 	 * and after processing instruction 'prev_insn_idx'
164 	 */
165 	struct bpf_verifier_state st;
166 	int insn_idx;
167 	int prev_insn_idx;
168 	struct bpf_verifier_stack_elem *next;
169 };
170 
171 #define BPF_COMPLEXITY_LIMIT_JMP_SEQ	8192
172 #define BPF_COMPLEXITY_LIMIT_STATES	64
173 
174 #define BPF_MAP_PTR_UNPRIV	1UL
175 #define BPF_MAP_PTR_POISON	((void *)((0xeB9FUL << 1) +	\
176 					  POISON_POINTER_DELTA))
177 #define BPF_MAP_PTR(X)		((struct bpf_map *)((X) & ~BPF_MAP_PTR_UNPRIV))
178 
179 static bool bpf_map_ptr_poisoned(const struct bpf_insn_aux_data *aux)
180 {
181 	return BPF_MAP_PTR(aux->map_state) == BPF_MAP_PTR_POISON;
182 }
183 
184 static bool bpf_map_ptr_unpriv(const struct bpf_insn_aux_data *aux)
185 {
186 	return aux->map_state & BPF_MAP_PTR_UNPRIV;
187 }
188 
189 static void bpf_map_ptr_store(struct bpf_insn_aux_data *aux,
190 			      const struct bpf_map *map, bool unpriv)
191 {
192 	BUILD_BUG_ON((unsigned long)BPF_MAP_PTR_POISON & BPF_MAP_PTR_UNPRIV);
193 	unpriv |= bpf_map_ptr_unpriv(aux);
194 	aux->map_state = (unsigned long)map |
195 			 (unpriv ? BPF_MAP_PTR_UNPRIV : 0UL);
196 }
197 
198 struct bpf_call_arg_meta {
199 	struct bpf_map *map_ptr;
200 	bool raw_mode;
201 	bool pkt_access;
202 	int regno;
203 	int access_size;
204 	s64 msize_smax_value;
205 	u64 msize_umax_value;
206 	int ref_obj_id;
207 	int func_id;
208 };
209 
210 static DEFINE_MUTEX(bpf_verifier_lock);
211 
212 static const struct bpf_line_info *
213 find_linfo(const struct bpf_verifier_env *env, u32 insn_off)
214 {
215 	const struct bpf_line_info *linfo;
216 	const struct bpf_prog *prog;
217 	u32 i, nr_linfo;
218 
219 	prog = env->prog;
220 	nr_linfo = prog->aux->nr_linfo;
221 
222 	if (!nr_linfo || insn_off >= prog->len)
223 		return NULL;
224 
225 	linfo = prog->aux->linfo;
226 	for (i = 1; i < nr_linfo; i++)
227 		if (insn_off < linfo[i].insn_off)
228 			break;
229 
230 	return &linfo[i - 1];
231 }
232 
233 void bpf_verifier_vlog(struct bpf_verifier_log *log, const char *fmt,
234 		       va_list args)
235 {
236 	unsigned int n;
237 
238 	n = vscnprintf(log->kbuf, BPF_VERIFIER_TMP_LOG_SIZE, fmt, args);
239 
240 	WARN_ONCE(n >= BPF_VERIFIER_TMP_LOG_SIZE - 1,
241 		  "verifier log line truncated - local buffer too short\n");
242 
243 	n = min(log->len_total - log->len_used - 1, n);
244 	log->kbuf[n] = '\0';
245 
246 	if (!copy_to_user(log->ubuf + log->len_used, log->kbuf, n + 1))
247 		log->len_used += n;
248 	else
249 		log->ubuf = NULL;
250 }
251 
252 /* log_level controls verbosity level of eBPF verifier.
253  * bpf_verifier_log_write() is used to dump the verification trace to the log,
254  * so the user can figure out what's wrong with the program
255  */
256 __printf(2, 3) void bpf_verifier_log_write(struct bpf_verifier_env *env,
257 					   const char *fmt, ...)
258 {
259 	va_list args;
260 
261 	if (!bpf_verifier_log_needed(&env->log))
262 		return;
263 
264 	va_start(args, fmt);
265 	bpf_verifier_vlog(&env->log, fmt, args);
266 	va_end(args);
267 }
268 EXPORT_SYMBOL_GPL(bpf_verifier_log_write);
269 
270 __printf(2, 3) static void verbose(void *private_data, const char *fmt, ...)
271 {
272 	struct bpf_verifier_env *env = private_data;
273 	va_list args;
274 
275 	if (!bpf_verifier_log_needed(&env->log))
276 		return;
277 
278 	va_start(args, fmt);
279 	bpf_verifier_vlog(&env->log, fmt, args);
280 	va_end(args);
281 }
282 
283 static const char *ltrim(const char *s)
284 {
285 	while (isspace(*s))
286 		s++;
287 
288 	return s;
289 }
290 
291 __printf(3, 4) static void verbose_linfo(struct bpf_verifier_env *env,
292 					 u32 insn_off,
293 					 const char *prefix_fmt, ...)
294 {
295 	const struct bpf_line_info *linfo;
296 
297 	if (!bpf_verifier_log_needed(&env->log))
298 		return;
299 
300 	linfo = find_linfo(env, insn_off);
301 	if (!linfo || linfo == env->prev_linfo)
302 		return;
303 
304 	if (prefix_fmt) {
305 		va_list args;
306 
307 		va_start(args, prefix_fmt);
308 		bpf_verifier_vlog(&env->log, prefix_fmt, args);
309 		va_end(args);
310 	}
311 
312 	verbose(env, "%s\n",
313 		ltrim(btf_name_by_offset(env->prog->aux->btf,
314 					 linfo->line_off)));
315 
316 	env->prev_linfo = linfo;
317 }
318 
319 static bool type_is_pkt_pointer(enum bpf_reg_type type)
320 {
321 	return type == PTR_TO_PACKET ||
322 	       type == PTR_TO_PACKET_META;
323 }
324 
325 static bool type_is_sk_pointer(enum bpf_reg_type type)
326 {
327 	return type == PTR_TO_SOCKET ||
328 		type == PTR_TO_SOCK_COMMON ||
329 		type == PTR_TO_TCP_SOCK ||
330 		type == PTR_TO_XDP_SOCK;
331 }
332 
333 static bool reg_type_may_be_null(enum bpf_reg_type type)
334 {
335 	return type == PTR_TO_MAP_VALUE_OR_NULL ||
336 	       type == PTR_TO_SOCKET_OR_NULL ||
337 	       type == PTR_TO_SOCK_COMMON_OR_NULL ||
338 	       type == PTR_TO_TCP_SOCK_OR_NULL;
339 }
340 
341 static bool reg_may_point_to_spin_lock(const struct bpf_reg_state *reg)
342 {
343 	return reg->type == PTR_TO_MAP_VALUE &&
344 		map_value_has_spin_lock(reg->map_ptr);
345 }
346 
347 static bool reg_type_may_be_refcounted_or_null(enum bpf_reg_type type)
348 {
349 	return type == PTR_TO_SOCKET ||
350 		type == PTR_TO_SOCKET_OR_NULL ||
351 		type == PTR_TO_TCP_SOCK ||
352 		type == PTR_TO_TCP_SOCK_OR_NULL;
353 }
354 
355 static bool arg_type_may_be_refcounted(enum bpf_arg_type type)
356 {
357 	return type == ARG_PTR_TO_SOCK_COMMON;
358 }
359 
360 /* Determine whether the function releases some resources allocated by another
361  * function call. The first reference type argument will be assumed to be
362  * released by release_reference().
363  */
364 static bool is_release_function(enum bpf_func_id func_id)
365 {
366 	return func_id == BPF_FUNC_sk_release;
367 }
368 
369 static bool is_acquire_function(enum bpf_func_id func_id)
370 {
371 	return func_id == BPF_FUNC_sk_lookup_tcp ||
372 		func_id == BPF_FUNC_sk_lookup_udp ||
373 		func_id == BPF_FUNC_skc_lookup_tcp;
374 }
375 
376 static bool is_ptr_cast_function(enum bpf_func_id func_id)
377 {
378 	return func_id == BPF_FUNC_tcp_sock ||
379 		func_id == BPF_FUNC_sk_fullsock;
380 }
381 
382 /* string representation of 'enum bpf_reg_type' */
383 static const char * const reg_type_str[] = {
384 	[NOT_INIT]		= "?",
385 	[SCALAR_VALUE]		= "inv",
386 	[PTR_TO_CTX]		= "ctx",
387 	[CONST_PTR_TO_MAP]	= "map_ptr",
388 	[PTR_TO_MAP_VALUE]	= "map_value",
389 	[PTR_TO_MAP_VALUE_OR_NULL] = "map_value_or_null",
390 	[PTR_TO_STACK]		= "fp",
391 	[PTR_TO_PACKET]		= "pkt",
392 	[PTR_TO_PACKET_META]	= "pkt_meta",
393 	[PTR_TO_PACKET_END]	= "pkt_end",
394 	[PTR_TO_FLOW_KEYS]	= "flow_keys",
395 	[PTR_TO_SOCKET]		= "sock",
396 	[PTR_TO_SOCKET_OR_NULL] = "sock_or_null",
397 	[PTR_TO_SOCK_COMMON]	= "sock_common",
398 	[PTR_TO_SOCK_COMMON_OR_NULL] = "sock_common_or_null",
399 	[PTR_TO_TCP_SOCK]	= "tcp_sock",
400 	[PTR_TO_TCP_SOCK_OR_NULL] = "tcp_sock_or_null",
401 	[PTR_TO_TP_BUFFER]	= "tp_buffer",
402 	[PTR_TO_XDP_SOCK]	= "xdp_sock",
403 };
404 
405 static char slot_type_char[] = {
406 	[STACK_INVALID]	= '?',
407 	[STACK_SPILL]	= 'r',
408 	[STACK_MISC]	= 'm',
409 	[STACK_ZERO]	= '0',
410 };
411 
412 static void print_liveness(struct bpf_verifier_env *env,
413 			   enum bpf_reg_liveness live)
414 {
415 	if (live & (REG_LIVE_READ | REG_LIVE_WRITTEN | REG_LIVE_DONE))
416 	    verbose(env, "_");
417 	if (live & REG_LIVE_READ)
418 		verbose(env, "r");
419 	if (live & REG_LIVE_WRITTEN)
420 		verbose(env, "w");
421 	if (live & REG_LIVE_DONE)
422 		verbose(env, "D");
423 }
424 
425 static struct bpf_func_state *func(struct bpf_verifier_env *env,
426 				   const struct bpf_reg_state *reg)
427 {
428 	struct bpf_verifier_state *cur = env->cur_state;
429 
430 	return cur->frame[reg->frameno];
431 }
432 
433 static void print_verifier_state(struct bpf_verifier_env *env,
434 				 const struct bpf_func_state *state)
435 {
436 	const struct bpf_reg_state *reg;
437 	enum bpf_reg_type t;
438 	int i;
439 
440 	if (state->frameno)
441 		verbose(env, " frame%d:", state->frameno);
442 	for (i = 0; i < MAX_BPF_REG; i++) {
443 		reg = &state->regs[i];
444 		t = reg->type;
445 		if (t == NOT_INIT)
446 			continue;
447 		verbose(env, " R%d", i);
448 		print_liveness(env, reg->live);
449 		verbose(env, "=%s", reg_type_str[t]);
450 		if (t == SCALAR_VALUE && reg->precise)
451 			verbose(env, "P");
452 		if ((t == SCALAR_VALUE || t == PTR_TO_STACK) &&
453 		    tnum_is_const(reg->var_off)) {
454 			/* reg->off should be 0 for SCALAR_VALUE */
455 			verbose(env, "%lld", reg->var_off.value + reg->off);
456 		} else {
457 			verbose(env, "(id=%d", reg->id);
458 			if (reg_type_may_be_refcounted_or_null(t))
459 				verbose(env, ",ref_obj_id=%d", reg->ref_obj_id);
460 			if (t != SCALAR_VALUE)
461 				verbose(env, ",off=%d", reg->off);
462 			if (type_is_pkt_pointer(t))
463 				verbose(env, ",r=%d", reg->range);
464 			else if (t == CONST_PTR_TO_MAP ||
465 				 t == PTR_TO_MAP_VALUE ||
466 				 t == PTR_TO_MAP_VALUE_OR_NULL)
467 				verbose(env, ",ks=%d,vs=%d",
468 					reg->map_ptr->key_size,
469 					reg->map_ptr->value_size);
470 			if (tnum_is_const(reg->var_off)) {
471 				/* Typically an immediate SCALAR_VALUE, but
472 				 * could be a pointer whose offset is too big
473 				 * for reg->off
474 				 */
475 				verbose(env, ",imm=%llx", reg->var_off.value);
476 			} else {
477 				if (reg->smin_value != reg->umin_value &&
478 				    reg->smin_value != S64_MIN)
479 					verbose(env, ",smin_value=%lld",
480 						(long long)reg->smin_value);
481 				if (reg->smax_value != reg->umax_value &&
482 				    reg->smax_value != S64_MAX)
483 					verbose(env, ",smax_value=%lld",
484 						(long long)reg->smax_value);
485 				if (reg->umin_value != 0)
486 					verbose(env, ",umin_value=%llu",
487 						(unsigned long long)reg->umin_value);
488 				if (reg->umax_value != U64_MAX)
489 					verbose(env, ",umax_value=%llu",
490 						(unsigned long long)reg->umax_value);
491 				if (!tnum_is_unknown(reg->var_off)) {
492 					char tn_buf[48];
493 
494 					tnum_strn(tn_buf, sizeof(tn_buf), reg->var_off);
495 					verbose(env, ",var_off=%s", tn_buf);
496 				}
497 			}
498 			verbose(env, ")");
499 		}
500 	}
501 	for (i = 0; i < state->allocated_stack / BPF_REG_SIZE; i++) {
502 		char types_buf[BPF_REG_SIZE + 1];
503 		bool valid = false;
504 		int j;
505 
506 		for (j = 0; j < BPF_REG_SIZE; j++) {
507 			if (state->stack[i].slot_type[j] != STACK_INVALID)
508 				valid = true;
509 			types_buf[j] = slot_type_char[
510 					state->stack[i].slot_type[j]];
511 		}
512 		types_buf[BPF_REG_SIZE] = 0;
513 		if (!valid)
514 			continue;
515 		verbose(env, " fp%d", (-i - 1) * BPF_REG_SIZE);
516 		print_liveness(env, state->stack[i].spilled_ptr.live);
517 		if (state->stack[i].slot_type[0] == STACK_SPILL) {
518 			reg = &state->stack[i].spilled_ptr;
519 			t = reg->type;
520 			verbose(env, "=%s", reg_type_str[t]);
521 			if (t == SCALAR_VALUE && reg->precise)
522 				verbose(env, "P");
523 			if (t == SCALAR_VALUE && tnum_is_const(reg->var_off))
524 				verbose(env, "%lld", reg->var_off.value + reg->off);
525 		} else {
526 			verbose(env, "=%s", types_buf);
527 		}
528 	}
529 	if (state->acquired_refs && state->refs[0].id) {
530 		verbose(env, " refs=%d", state->refs[0].id);
531 		for (i = 1; i < state->acquired_refs; i++)
532 			if (state->refs[i].id)
533 				verbose(env, ",%d", state->refs[i].id);
534 	}
535 	verbose(env, "\n");
536 }
537 
538 #define COPY_STATE_FN(NAME, COUNT, FIELD, SIZE)				\
539 static int copy_##NAME##_state(struct bpf_func_state *dst,		\
540 			       const struct bpf_func_state *src)	\
541 {									\
542 	if (!src->FIELD)						\
543 		return 0;						\
544 	if (WARN_ON_ONCE(dst->COUNT < src->COUNT)) {			\
545 		/* internal bug, make state invalid to reject the program */ \
546 		memset(dst, 0, sizeof(*dst));				\
547 		return -EFAULT;						\
548 	}								\
549 	memcpy(dst->FIELD, src->FIELD,					\
550 	       sizeof(*src->FIELD) * (src->COUNT / SIZE));		\
551 	return 0;							\
552 }
553 /* copy_reference_state() */
554 COPY_STATE_FN(reference, acquired_refs, refs, 1)
555 /* copy_stack_state() */
556 COPY_STATE_FN(stack, allocated_stack, stack, BPF_REG_SIZE)
557 #undef COPY_STATE_FN
558 
559 #define REALLOC_STATE_FN(NAME, COUNT, FIELD, SIZE)			\
560 static int realloc_##NAME##_state(struct bpf_func_state *state, int size, \
561 				  bool copy_old)			\
562 {									\
563 	u32 old_size = state->COUNT;					\
564 	struct bpf_##NAME##_state *new_##FIELD;				\
565 	int slot = size / SIZE;						\
566 									\
567 	if (size <= old_size || !size) {				\
568 		if (copy_old)						\
569 			return 0;					\
570 		state->COUNT = slot * SIZE;				\
571 		if (!size && old_size) {				\
572 			kfree(state->FIELD);				\
573 			state->FIELD = NULL;				\
574 		}							\
575 		return 0;						\
576 	}								\
577 	new_##FIELD = kmalloc_array(slot, sizeof(struct bpf_##NAME##_state), \
578 				    GFP_KERNEL);			\
579 	if (!new_##FIELD)						\
580 		return -ENOMEM;						\
581 	if (copy_old) {							\
582 		if (state->FIELD)					\
583 			memcpy(new_##FIELD, state->FIELD,		\
584 			       sizeof(*new_##FIELD) * (old_size / SIZE)); \
585 		memset(new_##FIELD + old_size / SIZE, 0,		\
586 		       sizeof(*new_##FIELD) * (size - old_size) / SIZE); \
587 	}								\
588 	state->COUNT = slot * SIZE;					\
589 	kfree(state->FIELD);						\
590 	state->FIELD = new_##FIELD;					\
591 	return 0;							\
592 }
593 /* realloc_reference_state() */
594 REALLOC_STATE_FN(reference, acquired_refs, refs, 1)
595 /* realloc_stack_state() */
596 REALLOC_STATE_FN(stack, allocated_stack, stack, BPF_REG_SIZE)
597 #undef REALLOC_STATE_FN
598 
599 /* do_check() starts with zero-sized stack in struct bpf_verifier_state to
600  * make it consume minimal amount of memory. check_stack_write() access from
601  * the program calls into realloc_func_state() to grow the stack size.
602  * Note there is a non-zero 'parent' pointer inside bpf_verifier_state
603  * which realloc_stack_state() copies over. It points to previous
604  * bpf_verifier_state which is never reallocated.
605  */
606 static int realloc_func_state(struct bpf_func_state *state, int stack_size,
607 			      int refs_size, bool copy_old)
608 {
609 	int err = realloc_reference_state(state, refs_size, copy_old);
610 	if (err)
611 		return err;
612 	return realloc_stack_state(state, stack_size, copy_old);
613 }
614 
615 /* Acquire a pointer id from the env and update the state->refs to include
616  * this new pointer reference.
617  * On success, returns a valid pointer id to associate with the register
618  * On failure, returns a negative errno.
619  */
620 static int acquire_reference_state(struct bpf_verifier_env *env, int insn_idx)
621 {
622 	struct bpf_func_state *state = cur_func(env);
623 	int new_ofs = state->acquired_refs;
624 	int id, err;
625 
626 	err = realloc_reference_state(state, state->acquired_refs + 1, true);
627 	if (err)
628 		return err;
629 	id = ++env->id_gen;
630 	state->refs[new_ofs].id = id;
631 	state->refs[new_ofs].insn_idx = insn_idx;
632 
633 	return id;
634 }
635 
636 /* release function corresponding to acquire_reference_state(). Idempotent. */
637 static int release_reference_state(struct bpf_func_state *state, int ptr_id)
638 {
639 	int i, last_idx;
640 
641 	last_idx = state->acquired_refs - 1;
642 	for (i = 0; i < state->acquired_refs; i++) {
643 		if (state->refs[i].id == ptr_id) {
644 			if (last_idx && i != last_idx)
645 				memcpy(&state->refs[i], &state->refs[last_idx],
646 				       sizeof(*state->refs));
647 			memset(&state->refs[last_idx], 0, sizeof(*state->refs));
648 			state->acquired_refs--;
649 			return 0;
650 		}
651 	}
652 	return -EINVAL;
653 }
654 
655 static int transfer_reference_state(struct bpf_func_state *dst,
656 				    struct bpf_func_state *src)
657 {
658 	int err = realloc_reference_state(dst, src->acquired_refs, false);
659 	if (err)
660 		return err;
661 	err = copy_reference_state(dst, src);
662 	if (err)
663 		return err;
664 	return 0;
665 }
666 
667 static void free_func_state(struct bpf_func_state *state)
668 {
669 	if (!state)
670 		return;
671 	kfree(state->refs);
672 	kfree(state->stack);
673 	kfree(state);
674 }
675 
676 static void clear_jmp_history(struct bpf_verifier_state *state)
677 {
678 	kfree(state->jmp_history);
679 	state->jmp_history = NULL;
680 	state->jmp_history_cnt = 0;
681 }
682 
683 static void free_verifier_state(struct bpf_verifier_state *state,
684 				bool free_self)
685 {
686 	int i;
687 
688 	for (i = 0; i <= state->curframe; i++) {
689 		free_func_state(state->frame[i]);
690 		state->frame[i] = NULL;
691 	}
692 	clear_jmp_history(state);
693 	if (free_self)
694 		kfree(state);
695 }
696 
697 /* copy verifier state from src to dst growing dst stack space
698  * when necessary to accommodate larger src stack
699  */
700 static int copy_func_state(struct bpf_func_state *dst,
701 			   const struct bpf_func_state *src)
702 {
703 	int err;
704 
705 	err = realloc_func_state(dst, src->allocated_stack, src->acquired_refs,
706 				 false);
707 	if (err)
708 		return err;
709 	memcpy(dst, src, offsetof(struct bpf_func_state, acquired_refs));
710 	err = copy_reference_state(dst, src);
711 	if (err)
712 		return err;
713 	return copy_stack_state(dst, src);
714 }
715 
716 static int copy_verifier_state(struct bpf_verifier_state *dst_state,
717 			       const struct bpf_verifier_state *src)
718 {
719 	struct bpf_func_state *dst;
720 	u32 jmp_sz = sizeof(struct bpf_idx_pair) * src->jmp_history_cnt;
721 	int i, err;
722 
723 	if (dst_state->jmp_history_cnt < src->jmp_history_cnt) {
724 		kfree(dst_state->jmp_history);
725 		dst_state->jmp_history = kmalloc(jmp_sz, GFP_USER);
726 		if (!dst_state->jmp_history)
727 			return -ENOMEM;
728 	}
729 	memcpy(dst_state->jmp_history, src->jmp_history, jmp_sz);
730 	dst_state->jmp_history_cnt = src->jmp_history_cnt;
731 
732 	/* if dst has more stack frames then src frame, free them */
733 	for (i = src->curframe + 1; i <= dst_state->curframe; i++) {
734 		free_func_state(dst_state->frame[i]);
735 		dst_state->frame[i] = NULL;
736 	}
737 	dst_state->speculative = src->speculative;
738 	dst_state->curframe = src->curframe;
739 	dst_state->active_spin_lock = src->active_spin_lock;
740 	dst_state->branches = src->branches;
741 	dst_state->parent = src->parent;
742 	dst_state->first_insn_idx = src->first_insn_idx;
743 	dst_state->last_insn_idx = src->last_insn_idx;
744 	for (i = 0; i <= src->curframe; i++) {
745 		dst = dst_state->frame[i];
746 		if (!dst) {
747 			dst = kzalloc(sizeof(*dst), GFP_KERNEL);
748 			if (!dst)
749 				return -ENOMEM;
750 			dst_state->frame[i] = dst;
751 		}
752 		err = copy_func_state(dst, src->frame[i]);
753 		if (err)
754 			return err;
755 	}
756 	return 0;
757 }
758 
759 static void update_branch_counts(struct bpf_verifier_env *env, struct bpf_verifier_state *st)
760 {
761 	while (st) {
762 		u32 br = --st->branches;
763 
764 		/* WARN_ON(br > 1) technically makes sense here,
765 		 * but see comment in push_stack(), hence:
766 		 */
767 		WARN_ONCE((int)br < 0,
768 			  "BUG update_branch_counts:branches_to_explore=%d\n",
769 			  br);
770 		if (br)
771 			break;
772 		st = st->parent;
773 	}
774 }
775 
776 static int pop_stack(struct bpf_verifier_env *env, int *prev_insn_idx,
777 		     int *insn_idx)
778 {
779 	struct bpf_verifier_state *cur = env->cur_state;
780 	struct bpf_verifier_stack_elem *elem, *head = env->head;
781 	int err;
782 
783 	if (env->head == NULL)
784 		return -ENOENT;
785 
786 	if (cur) {
787 		err = copy_verifier_state(cur, &head->st);
788 		if (err)
789 			return err;
790 	}
791 	if (insn_idx)
792 		*insn_idx = head->insn_idx;
793 	if (prev_insn_idx)
794 		*prev_insn_idx = head->prev_insn_idx;
795 	elem = head->next;
796 	free_verifier_state(&head->st, false);
797 	kfree(head);
798 	env->head = elem;
799 	env->stack_size--;
800 	return 0;
801 }
802 
803 static struct bpf_verifier_state *push_stack(struct bpf_verifier_env *env,
804 					     int insn_idx, int prev_insn_idx,
805 					     bool speculative)
806 {
807 	struct bpf_verifier_state *cur = env->cur_state;
808 	struct bpf_verifier_stack_elem *elem;
809 	int err;
810 
811 	elem = kzalloc(sizeof(struct bpf_verifier_stack_elem), GFP_KERNEL);
812 	if (!elem)
813 		goto err;
814 
815 	elem->insn_idx = insn_idx;
816 	elem->prev_insn_idx = prev_insn_idx;
817 	elem->next = env->head;
818 	env->head = elem;
819 	env->stack_size++;
820 	err = copy_verifier_state(&elem->st, cur);
821 	if (err)
822 		goto err;
823 	elem->st.speculative |= speculative;
824 	if (env->stack_size > BPF_COMPLEXITY_LIMIT_JMP_SEQ) {
825 		verbose(env, "The sequence of %d jumps is too complex.\n",
826 			env->stack_size);
827 		goto err;
828 	}
829 	if (elem->st.parent) {
830 		++elem->st.parent->branches;
831 		/* WARN_ON(branches > 2) technically makes sense here,
832 		 * but
833 		 * 1. speculative states will bump 'branches' for non-branch
834 		 * instructions
835 		 * 2. is_state_visited() heuristics may decide not to create
836 		 * a new state for a sequence of branches and all such current
837 		 * and cloned states will be pointing to a single parent state
838 		 * which might have large 'branches' count.
839 		 */
840 	}
841 	return &elem->st;
842 err:
843 	free_verifier_state(env->cur_state, true);
844 	env->cur_state = NULL;
845 	/* pop all elements and return */
846 	while (!pop_stack(env, NULL, NULL));
847 	return NULL;
848 }
849 
850 #define CALLER_SAVED_REGS 6
851 static const int caller_saved[CALLER_SAVED_REGS] = {
852 	BPF_REG_0, BPF_REG_1, BPF_REG_2, BPF_REG_3, BPF_REG_4, BPF_REG_5
853 };
854 
855 static void __mark_reg_not_init(struct bpf_reg_state *reg);
856 
857 /* Mark the unknown part of a register (variable offset or scalar value) as
858  * known to have the value @imm.
859  */
860 static void __mark_reg_known(struct bpf_reg_state *reg, u64 imm)
861 {
862 	/* Clear id, off, and union(map_ptr, range) */
863 	memset(((u8 *)reg) + sizeof(reg->type), 0,
864 	       offsetof(struct bpf_reg_state, var_off) - sizeof(reg->type));
865 	reg->var_off = tnum_const(imm);
866 	reg->smin_value = (s64)imm;
867 	reg->smax_value = (s64)imm;
868 	reg->umin_value = imm;
869 	reg->umax_value = imm;
870 }
871 
872 /* Mark the 'variable offset' part of a register as zero.  This should be
873  * used only on registers holding a pointer type.
874  */
875 static void __mark_reg_known_zero(struct bpf_reg_state *reg)
876 {
877 	__mark_reg_known(reg, 0);
878 }
879 
880 static void __mark_reg_const_zero(struct bpf_reg_state *reg)
881 {
882 	__mark_reg_known(reg, 0);
883 	reg->type = SCALAR_VALUE;
884 }
885 
886 static void mark_reg_known_zero(struct bpf_verifier_env *env,
887 				struct bpf_reg_state *regs, u32 regno)
888 {
889 	if (WARN_ON(regno >= MAX_BPF_REG)) {
890 		verbose(env, "mark_reg_known_zero(regs, %u)\n", regno);
891 		/* Something bad happened, let's kill all regs */
892 		for (regno = 0; regno < MAX_BPF_REG; regno++)
893 			__mark_reg_not_init(regs + regno);
894 		return;
895 	}
896 	__mark_reg_known_zero(regs + regno);
897 }
898 
899 static bool reg_is_pkt_pointer(const struct bpf_reg_state *reg)
900 {
901 	return type_is_pkt_pointer(reg->type);
902 }
903 
904 static bool reg_is_pkt_pointer_any(const struct bpf_reg_state *reg)
905 {
906 	return reg_is_pkt_pointer(reg) ||
907 	       reg->type == PTR_TO_PACKET_END;
908 }
909 
910 /* Unmodified PTR_TO_PACKET[_META,_END] register from ctx access. */
911 static bool reg_is_init_pkt_pointer(const struct bpf_reg_state *reg,
912 				    enum bpf_reg_type which)
913 {
914 	/* The register can already have a range from prior markings.
915 	 * This is fine as long as it hasn't been advanced from its
916 	 * origin.
917 	 */
918 	return reg->type == which &&
919 	       reg->id == 0 &&
920 	       reg->off == 0 &&
921 	       tnum_equals_const(reg->var_off, 0);
922 }
923 
924 /* Attempts to improve min/max values based on var_off information */
925 static void __update_reg_bounds(struct bpf_reg_state *reg)
926 {
927 	/* min signed is max(sign bit) | min(other bits) */
928 	reg->smin_value = max_t(s64, reg->smin_value,
929 				reg->var_off.value | (reg->var_off.mask & S64_MIN));
930 	/* max signed is min(sign bit) | max(other bits) */
931 	reg->smax_value = min_t(s64, reg->smax_value,
932 				reg->var_off.value | (reg->var_off.mask & S64_MAX));
933 	reg->umin_value = max(reg->umin_value, reg->var_off.value);
934 	reg->umax_value = min(reg->umax_value,
935 			      reg->var_off.value | reg->var_off.mask);
936 }
937 
938 /* Uses signed min/max values to inform unsigned, and vice-versa */
939 static void __reg_deduce_bounds(struct bpf_reg_state *reg)
940 {
941 	/* Learn sign from signed bounds.
942 	 * If we cannot cross the sign boundary, then signed and unsigned bounds
943 	 * are the same, so combine.  This works even in the negative case, e.g.
944 	 * -3 s<= x s<= -1 implies 0xf...fd u<= x u<= 0xf...ff.
945 	 */
946 	if (reg->smin_value >= 0 || reg->smax_value < 0) {
947 		reg->smin_value = reg->umin_value = max_t(u64, reg->smin_value,
948 							  reg->umin_value);
949 		reg->smax_value = reg->umax_value = min_t(u64, reg->smax_value,
950 							  reg->umax_value);
951 		return;
952 	}
953 	/* Learn sign from unsigned bounds.  Signed bounds cross the sign
954 	 * boundary, so we must be careful.
955 	 */
956 	if ((s64)reg->umax_value >= 0) {
957 		/* Positive.  We can't learn anything from the smin, but smax
958 		 * is positive, hence safe.
959 		 */
960 		reg->smin_value = reg->umin_value;
961 		reg->smax_value = reg->umax_value = min_t(u64, reg->smax_value,
962 							  reg->umax_value);
963 	} else if ((s64)reg->umin_value < 0) {
964 		/* Negative.  We can't learn anything from the smax, but smin
965 		 * is negative, hence safe.
966 		 */
967 		reg->smin_value = reg->umin_value = max_t(u64, reg->smin_value,
968 							  reg->umin_value);
969 		reg->smax_value = reg->umax_value;
970 	}
971 }
972 
973 /* Attempts to improve var_off based on unsigned min/max information */
974 static void __reg_bound_offset(struct bpf_reg_state *reg)
975 {
976 	reg->var_off = tnum_intersect(reg->var_off,
977 				      tnum_range(reg->umin_value,
978 						 reg->umax_value));
979 }
980 
981 /* Reset the min/max bounds of a register */
982 static void __mark_reg_unbounded(struct bpf_reg_state *reg)
983 {
984 	reg->smin_value = S64_MIN;
985 	reg->smax_value = S64_MAX;
986 	reg->umin_value = 0;
987 	reg->umax_value = U64_MAX;
988 
989 	/* constant backtracking is enabled for root only for now */
990 	reg->precise = capable(CAP_SYS_ADMIN) ? false : true;
991 }
992 
993 /* Mark a register as having a completely unknown (scalar) value. */
994 static void __mark_reg_unknown(struct bpf_reg_state *reg)
995 {
996 	/*
997 	 * Clear type, id, off, and union(map_ptr, range) and
998 	 * padding between 'type' and union
999 	 */
1000 	memset(reg, 0, offsetof(struct bpf_reg_state, var_off));
1001 	reg->type = SCALAR_VALUE;
1002 	reg->var_off = tnum_unknown;
1003 	reg->frameno = 0;
1004 	__mark_reg_unbounded(reg);
1005 }
1006 
1007 static void mark_reg_unknown(struct bpf_verifier_env *env,
1008 			     struct bpf_reg_state *regs, u32 regno)
1009 {
1010 	if (WARN_ON(regno >= MAX_BPF_REG)) {
1011 		verbose(env, "mark_reg_unknown(regs, %u)\n", regno);
1012 		/* Something bad happened, let's kill all regs except FP */
1013 		for (regno = 0; regno < BPF_REG_FP; regno++)
1014 			__mark_reg_not_init(regs + regno);
1015 		return;
1016 	}
1017 	__mark_reg_unknown(regs + regno);
1018 }
1019 
1020 static void __mark_reg_not_init(struct bpf_reg_state *reg)
1021 {
1022 	__mark_reg_unknown(reg);
1023 	reg->type = NOT_INIT;
1024 }
1025 
1026 static void mark_reg_not_init(struct bpf_verifier_env *env,
1027 			      struct bpf_reg_state *regs, u32 regno)
1028 {
1029 	if (WARN_ON(regno >= MAX_BPF_REG)) {
1030 		verbose(env, "mark_reg_not_init(regs, %u)\n", regno);
1031 		/* Something bad happened, let's kill all regs except FP */
1032 		for (regno = 0; regno < BPF_REG_FP; regno++)
1033 			__mark_reg_not_init(regs + regno);
1034 		return;
1035 	}
1036 	__mark_reg_not_init(regs + regno);
1037 }
1038 
1039 #define DEF_NOT_SUBREG	(0)
1040 static void init_reg_state(struct bpf_verifier_env *env,
1041 			   struct bpf_func_state *state)
1042 {
1043 	struct bpf_reg_state *regs = state->regs;
1044 	int i;
1045 
1046 	for (i = 0; i < MAX_BPF_REG; i++) {
1047 		mark_reg_not_init(env, regs, i);
1048 		regs[i].live = REG_LIVE_NONE;
1049 		regs[i].parent = NULL;
1050 		regs[i].subreg_def = DEF_NOT_SUBREG;
1051 	}
1052 
1053 	/* frame pointer */
1054 	regs[BPF_REG_FP].type = PTR_TO_STACK;
1055 	mark_reg_known_zero(env, regs, BPF_REG_FP);
1056 	regs[BPF_REG_FP].frameno = state->frameno;
1057 
1058 	/* 1st arg to a function */
1059 	regs[BPF_REG_1].type = PTR_TO_CTX;
1060 	mark_reg_known_zero(env, regs, BPF_REG_1);
1061 }
1062 
1063 #define BPF_MAIN_FUNC (-1)
1064 static void init_func_state(struct bpf_verifier_env *env,
1065 			    struct bpf_func_state *state,
1066 			    int callsite, int frameno, int subprogno)
1067 {
1068 	state->callsite = callsite;
1069 	state->frameno = frameno;
1070 	state->subprogno = subprogno;
1071 	init_reg_state(env, state);
1072 }
1073 
1074 enum reg_arg_type {
1075 	SRC_OP,		/* register is used as source operand */
1076 	DST_OP,		/* register is used as destination operand */
1077 	DST_OP_NO_MARK	/* same as above, check only, don't mark */
1078 };
1079 
1080 static int cmp_subprogs(const void *a, const void *b)
1081 {
1082 	return ((struct bpf_subprog_info *)a)->start -
1083 	       ((struct bpf_subprog_info *)b)->start;
1084 }
1085 
1086 static int find_subprog(struct bpf_verifier_env *env, int off)
1087 {
1088 	struct bpf_subprog_info *p;
1089 
1090 	p = bsearch(&off, env->subprog_info, env->subprog_cnt,
1091 		    sizeof(env->subprog_info[0]), cmp_subprogs);
1092 	if (!p)
1093 		return -ENOENT;
1094 	return p - env->subprog_info;
1095 
1096 }
1097 
1098 static int add_subprog(struct bpf_verifier_env *env, int off)
1099 {
1100 	int insn_cnt = env->prog->len;
1101 	int ret;
1102 
1103 	if (off >= insn_cnt || off < 0) {
1104 		verbose(env, "call to invalid destination\n");
1105 		return -EINVAL;
1106 	}
1107 	ret = find_subprog(env, off);
1108 	if (ret >= 0)
1109 		return 0;
1110 	if (env->subprog_cnt >= BPF_MAX_SUBPROGS) {
1111 		verbose(env, "too many subprograms\n");
1112 		return -E2BIG;
1113 	}
1114 	env->subprog_info[env->subprog_cnt++].start = off;
1115 	sort(env->subprog_info, env->subprog_cnt,
1116 	     sizeof(env->subprog_info[0]), cmp_subprogs, NULL);
1117 	return 0;
1118 }
1119 
1120 static int check_subprogs(struct bpf_verifier_env *env)
1121 {
1122 	int i, ret, subprog_start, subprog_end, off, cur_subprog = 0;
1123 	struct bpf_subprog_info *subprog = env->subprog_info;
1124 	struct bpf_insn *insn = env->prog->insnsi;
1125 	int insn_cnt = env->prog->len;
1126 
1127 	/* Add entry function. */
1128 	ret = add_subprog(env, 0);
1129 	if (ret < 0)
1130 		return ret;
1131 
1132 	/* determine subprog starts. The end is one before the next starts */
1133 	for (i = 0; i < insn_cnt; i++) {
1134 		if (insn[i].code != (BPF_JMP | BPF_CALL))
1135 			continue;
1136 		if (insn[i].src_reg != BPF_PSEUDO_CALL)
1137 			continue;
1138 		if (!env->allow_ptr_leaks) {
1139 			verbose(env, "function calls to other bpf functions are allowed for root only\n");
1140 			return -EPERM;
1141 		}
1142 		ret = add_subprog(env, i + insn[i].imm + 1);
1143 		if (ret < 0)
1144 			return ret;
1145 	}
1146 
1147 	/* Add a fake 'exit' subprog which could simplify subprog iteration
1148 	 * logic. 'subprog_cnt' should not be increased.
1149 	 */
1150 	subprog[env->subprog_cnt].start = insn_cnt;
1151 
1152 	if (env->log.level & BPF_LOG_LEVEL2)
1153 		for (i = 0; i < env->subprog_cnt; i++)
1154 			verbose(env, "func#%d @%d\n", i, subprog[i].start);
1155 
1156 	/* now check that all jumps are within the same subprog */
1157 	subprog_start = subprog[cur_subprog].start;
1158 	subprog_end = subprog[cur_subprog + 1].start;
1159 	for (i = 0; i < insn_cnt; i++) {
1160 		u8 code = insn[i].code;
1161 
1162 		if (BPF_CLASS(code) != BPF_JMP && BPF_CLASS(code) != BPF_JMP32)
1163 			goto next;
1164 		if (BPF_OP(code) == BPF_EXIT || BPF_OP(code) == BPF_CALL)
1165 			goto next;
1166 		off = i + insn[i].off + 1;
1167 		if (off < subprog_start || off >= subprog_end) {
1168 			verbose(env, "jump out of range from insn %d to %d\n", i, off);
1169 			return -EINVAL;
1170 		}
1171 next:
1172 		if (i == subprog_end - 1) {
1173 			/* to avoid fall-through from one subprog into another
1174 			 * the last insn of the subprog should be either exit
1175 			 * or unconditional jump back
1176 			 */
1177 			if (code != (BPF_JMP | BPF_EXIT) &&
1178 			    code != (BPF_JMP | BPF_JA)) {
1179 				verbose(env, "last insn is not an exit or jmp\n");
1180 				return -EINVAL;
1181 			}
1182 			subprog_start = subprog_end;
1183 			cur_subprog++;
1184 			if (cur_subprog < env->subprog_cnt)
1185 				subprog_end = subprog[cur_subprog + 1].start;
1186 		}
1187 	}
1188 	return 0;
1189 }
1190 
1191 /* Parentage chain of this register (or stack slot) should take care of all
1192  * issues like callee-saved registers, stack slot allocation time, etc.
1193  */
1194 static int mark_reg_read(struct bpf_verifier_env *env,
1195 			 const struct bpf_reg_state *state,
1196 			 struct bpf_reg_state *parent, u8 flag)
1197 {
1198 	bool writes = parent == state->parent; /* Observe write marks */
1199 	int cnt = 0;
1200 
1201 	while (parent) {
1202 		/* if read wasn't screened by an earlier write ... */
1203 		if (writes && state->live & REG_LIVE_WRITTEN)
1204 			break;
1205 		if (parent->live & REG_LIVE_DONE) {
1206 			verbose(env, "verifier BUG type %s var_off %lld off %d\n",
1207 				reg_type_str[parent->type],
1208 				parent->var_off.value, parent->off);
1209 			return -EFAULT;
1210 		}
1211 		/* The first condition is more likely to be true than the
1212 		 * second, checked it first.
1213 		 */
1214 		if ((parent->live & REG_LIVE_READ) == flag ||
1215 		    parent->live & REG_LIVE_READ64)
1216 			/* The parentage chain never changes and
1217 			 * this parent was already marked as LIVE_READ.
1218 			 * There is no need to keep walking the chain again and
1219 			 * keep re-marking all parents as LIVE_READ.
1220 			 * This case happens when the same register is read
1221 			 * multiple times without writes into it in-between.
1222 			 * Also, if parent has the stronger REG_LIVE_READ64 set,
1223 			 * then no need to set the weak REG_LIVE_READ32.
1224 			 */
1225 			break;
1226 		/* ... then we depend on parent's value */
1227 		parent->live |= flag;
1228 		/* REG_LIVE_READ64 overrides REG_LIVE_READ32. */
1229 		if (flag == REG_LIVE_READ64)
1230 			parent->live &= ~REG_LIVE_READ32;
1231 		state = parent;
1232 		parent = state->parent;
1233 		writes = true;
1234 		cnt++;
1235 	}
1236 
1237 	if (env->longest_mark_read_walk < cnt)
1238 		env->longest_mark_read_walk = cnt;
1239 	return 0;
1240 }
1241 
1242 /* This function is supposed to be used by the following 32-bit optimization
1243  * code only. It returns TRUE if the source or destination register operates
1244  * on 64-bit, otherwise return FALSE.
1245  */
1246 static bool is_reg64(struct bpf_verifier_env *env, struct bpf_insn *insn,
1247 		     u32 regno, struct bpf_reg_state *reg, enum reg_arg_type t)
1248 {
1249 	u8 code, class, op;
1250 
1251 	code = insn->code;
1252 	class = BPF_CLASS(code);
1253 	op = BPF_OP(code);
1254 	if (class == BPF_JMP) {
1255 		/* BPF_EXIT for "main" will reach here. Return TRUE
1256 		 * conservatively.
1257 		 */
1258 		if (op == BPF_EXIT)
1259 			return true;
1260 		if (op == BPF_CALL) {
1261 			/* BPF to BPF call will reach here because of marking
1262 			 * caller saved clobber with DST_OP_NO_MARK for which we
1263 			 * don't care the register def because they are anyway
1264 			 * marked as NOT_INIT already.
1265 			 */
1266 			if (insn->src_reg == BPF_PSEUDO_CALL)
1267 				return false;
1268 			/* Helper call will reach here because of arg type
1269 			 * check, conservatively return TRUE.
1270 			 */
1271 			if (t == SRC_OP)
1272 				return true;
1273 
1274 			return false;
1275 		}
1276 	}
1277 
1278 	if (class == BPF_ALU64 || class == BPF_JMP ||
1279 	    /* BPF_END always use BPF_ALU class. */
1280 	    (class == BPF_ALU && op == BPF_END && insn->imm == 64))
1281 		return true;
1282 
1283 	if (class == BPF_ALU || class == BPF_JMP32)
1284 		return false;
1285 
1286 	if (class == BPF_LDX) {
1287 		if (t != SRC_OP)
1288 			return BPF_SIZE(code) == BPF_DW;
1289 		/* LDX source must be ptr. */
1290 		return true;
1291 	}
1292 
1293 	if (class == BPF_STX) {
1294 		if (reg->type != SCALAR_VALUE)
1295 			return true;
1296 		return BPF_SIZE(code) == BPF_DW;
1297 	}
1298 
1299 	if (class == BPF_LD) {
1300 		u8 mode = BPF_MODE(code);
1301 
1302 		/* LD_IMM64 */
1303 		if (mode == BPF_IMM)
1304 			return true;
1305 
1306 		/* Both LD_IND and LD_ABS return 32-bit data. */
1307 		if (t != SRC_OP)
1308 			return  false;
1309 
1310 		/* Implicit ctx ptr. */
1311 		if (regno == BPF_REG_6)
1312 			return true;
1313 
1314 		/* Explicit source could be any width. */
1315 		return true;
1316 	}
1317 
1318 	if (class == BPF_ST)
1319 		/* The only source register for BPF_ST is a ptr. */
1320 		return true;
1321 
1322 	/* Conservatively return true at default. */
1323 	return true;
1324 }
1325 
1326 /* Return TRUE if INSN doesn't have explicit value define. */
1327 static bool insn_no_def(struct bpf_insn *insn)
1328 {
1329 	u8 class = BPF_CLASS(insn->code);
1330 
1331 	return (class == BPF_JMP || class == BPF_JMP32 ||
1332 		class == BPF_STX || class == BPF_ST);
1333 }
1334 
1335 /* Return TRUE if INSN has defined any 32-bit value explicitly. */
1336 static bool insn_has_def32(struct bpf_verifier_env *env, struct bpf_insn *insn)
1337 {
1338 	if (insn_no_def(insn))
1339 		return false;
1340 
1341 	return !is_reg64(env, insn, insn->dst_reg, NULL, DST_OP);
1342 }
1343 
1344 static void mark_insn_zext(struct bpf_verifier_env *env,
1345 			   struct bpf_reg_state *reg)
1346 {
1347 	s32 def_idx = reg->subreg_def;
1348 
1349 	if (def_idx == DEF_NOT_SUBREG)
1350 		return;
1351 
1352 	env->insn_aux_data[def_idx - 1].zext_dst = true;
1353 	/* The dst will be zero extended, so won't be sub-register anymore. */
1354 	reg->subreg_def = DEF_NOT_SUBREG;
1355 }
1356 
1357 static int check_reg_arg(struct bpf_verifier_env *env, u32 regno,
1358 			 enum reg_arg_type t)
1359 {
1360 	struct bpf_verifier_state *vstate = env->cur_state;
1361 	struct bpf_func_state *state = vstate->frame[vstate->curframe];
1362 	struct bpf_insn *insn = env->prog->insnsi + env->insn_idx;
1363 	struct bpf_reg_state *reg, *regs = state->regs;
1364 	bool rw64;
1365 
1366 	if (regno >= MAX_BPF_REG) {
1367 		verbose(env, "R%d is invalid\n", regno);
1368 		return -EINVAL;
1369 	}
1370 
1371 	reg = &regs[regno];
1372 	rw64 = is_reg64(env, insn, regno, reg, t);
1373 	if (t == SRC_OP) {
1374 		/* check whether register used as source operand can be read */
1375 		if (reg->type == NOT_INIT) {
1376 			verbose(env, "R%d !read_ok\n", regno);
1377 			return -EACCES;
1378 		}
1379 		/* We don't need to worry about FP liveness because it's read-only */
1380 		if (regno == BPF_REG_FP)
1381 			return 0;
1382 
1383 		if (rw64)
1384 			mark_insn_zext(env, reg);
1385 
1386 		return mark_reg_read(env, reg, reg->parent,
1387 				     rw64 ? REG_LIVE_READ64 : REG_LIVE_READ32);
1388 	} else {
1389 		/* check whether register used as dest operand can be written to */
1390 		if (regno == BPF_REG_FP) {
1391 			verbose(env, "frame pointer is read only\n");
1392 			return -EACCES;
1393 		}
1394 		reg->live |= REG_LIVE_WRITTEN;
1395 		reg->subreg_def = rw64 ? DEF_NOT_SUBREG : env->insn_idx + 1;
1396 		if (t == DST_OP)
1397 			mark_reg_unknown(env, regs, regno);
1398 	}
1399 	return 0;
1400 }
1401 
1402 /* for any branch, call, exit record the history of jmps in the given state */
1403 static int push_jmp_history(struct bpf_verifier_env *env,
1404 			    struct bpf_verifier_state *cur)
1405 {
1406 	u32 cnt = cur->jmp_history_cnt;
1407 	struct bpf_idx_pair *p;
1408 
1409 	cnt++;
1410 	p = krealloc(cur->jmp_history, cnt * sizeof(*p), GFP_USER);
1411 	if (!p)
1412 		return -ENOMEM;
1413 	p[cnt - 1].idx = env->insn_idx;
1414 	p[cnt - 1].prev_idx = env->prev_insn_idx;
1415 	cur->jmp_history = p;
1416 	cur->jmp_history_cnt = cnt;
1417 	return 0;
1418 }
1419 
1420 /* Backtrack one insn at a time. If idx is not at the top of recorded
1421  * history then previous instruction came from straight line execution.
1422  */
1423 static int get_prev_insn_idx(struct bpf_verifier_state *st, int i,
1424 			     u32 *history)
1425 {
1426 	u32 cnt = *history;
1427 
1428 	if (cnt && st->jmp_history[cnt - 1].idx == i) {
1429 		i = st->jmp_history[cnt - 1].prev_idx;
1430 		(*history)--;
1431 	} else {
1432 		i--;
1433 	}
1434 	return i;
1435 }
1436 
1437 /* For given verifier state backtrack_insn() is called from the last insn to
1438  * the first insn. Its purpose is to compute a bitmask of registers and
1439  * stack slots that needs precision in the parent verifier state.
1440  */
1441 static int backtrack_insn(struct bpf_verifier_env *env, int idx,
1442 			  u32 *reg_mask, u64 *stack_mask)
1443 {
1444 	const struct bpf_insn_cbs cbs = {
1445 		.cb_print	= verbose,
1446 		.private_data	= env,
1447 	};
1448 	struct bpf_insn *insn = env->prog->insnsi + idx;
1449 	u8 class = BPF_CLASS(insn->code);
1450 	u8 opcode = BPF_OP(insn->code);
1451 	u8 mode = BPF_MODE(insn->code);
1452 	u32 dreg = 1u << insn->dst_reg;
1453 	u32 sreg = 1u << insn->src_reg;
1454 	u32 spi;
1455 
1456 	if (insn->code == 0)
1457 		return 0;
1458 	if (env->log.level & BPF_LOG_LEVEL) {
1459 		verbose(env, "regs=%x stack=%llx before ", *reg_mask, *stack_mask);
1460 		verbose(env, "%d: ", idx);
1461 		print_bpf_insn(&cbs, insn, env->allow_ptr_leaks);
1462 	}
1463 
1464 	if (class == BPF_ALU || class == BPF_ALU64) {
1465 		if (!(*reg_mask & dreg))
1466 			return 0;
1467 		if (opcode == BPF_MOV) {
1468 			if (BPF_SRC(insn->code) == BPF_X) {
1469 				/* dreg = sreg
1470 				 * dreg needs precision after this insn
1471 				 * sreg needs precision before this insn
1472 				 */
1473 				*reg_mask &= ~dreg;
1474 				*reg_mask |= sreg;
1475 			} else {
1476 				/* dreg = K
1477 				 * dreg needs precision after this insn.
1478 				 * Corresponding register is already marked
1479 				 * as precise=true in this verifier state.
1480 				 * No further markings in parent are necessary
1481 				 */
1482 				*reg_mask &= ~dreg;
1483 			}
1484 		} else {
1485 			if (BPF_SRC(insn->code) == BPF_X) {
1486 				/* dreg += sreg
1487 				 * both dreg and sreg need precision
1488 				 * before this insn
1489 				 */
1490 				*reg_mask |= sreg;
1491 			} /* else dreg += K
1492 			   * dreg still needs precision before this insn
1493 			   */
1494 		}
1495 	} else if (class == BPF_LDX) {
1496 		if (!(*reg_mask & dreg))
1497 			return 0;
1498 		*reg_mask &= ~dreg;
1499 
1500 		/* scalars can only be spilled into stack w/o losing precision.
1501 		 * Load from any other memory can be zero extended.
1502 		 * The desire to keep that precision is already indicated
1503 		 * by 'precise' mark in corresponding register of this state.
1504 		 * No further tracking necessary.
1505 		 */
1506 		if (insn->src_reg != BPF_REG_FP)
1507 			return 0;
1508 		if (BPF_SIZE(insn->code) != BPF_DW)
1509 			return 0;
1510 
1511 		/* dreg = *(u64 *)[fp - off] was a fill from the stack.
1512 		 * that [fp - off] slot contains scalar that needs to be
1513 		 * tracked with precision
1514 		 */
1515 		spi = (-insn->off - 1) / BPF_REG_SIZE;
1516 		if (spi >= 64) {
1517 			verbose(env, "BUG spi %d\n", spi);
1518 			WARN_ONCE(1, "verifier backtracking bug");
1519 			return -EFAULT;
1520 		}
1521 		*stack_mask |= 1ull << spi;
1522 	} else if (class == BPF_STX || class == BPF_ST) {
1523 		if (*reg_mask & dreg)
1524 			/* stx & st shouldn't be using _scalar_ dst_reg
1525 			 * to access memory. It means backtracking
1526 			 * encountered a case of pointer subtraction.
1527 			 */
1528 			return -ENOTSUPP;
1529 		/* scalars can only be spilled into stack */
1530 		if (insn->dst_reg != BPF_REG_FP)
1531 			return 0;
1532 		if (BPF_SIZE(insn->code) != BPF_DW)
1533 			return 0;
1534 		spi = (-insn->off - 1) / BPF_REG_SIZE;
1535 		if (spi >= 64) {
1536 			verbose(env, "BUG spi %d\n", spi);
1537 			WARN_ONCE(1, "verifier backtracking bug");
1538 			return -EFAULT;
1539 		}
1540 		if (!(*stack_mask & (1ull << spi)))
1541 			return 0;
1542 		*stack_mask &= ~(1ull << spi);
1543 		if (class == BPF_STX)
1544 			*reg_mask |= sreg;
1545 	} else if (class == BPF_JMP || class == BPF_JMP32) {
1546 		if (opcode == BPF_CALL) {
1547 			if (insn->src_reg == BPF_PSEUDO_CALL)
1548 				return -ENOTSUPP;
1549 			/* regular helper call sets R0 */
1550 			*reg_mask &= ~1;
1551 			if (*reg_mask & 0x3f) {
1552 				/* if backtracing was looking for registers R1-R5
1553 				 * they should have been found already.
1554 				 */
1555 				verbose(env, "BUG regs %x\n", *reg_mask);
1556 				WARN_ONCE(1, "verifier backtracking bug");
1557 				return -EFAULT;
1558 			}
1559 		} else if (opcode == BPF_EXIT) {
1560 			return -ENOTSUPP;
1561 		}
1562 	} else if (class == BPF_LD) {
1563 		if (!(*reg_mask & dreg))
1564 			return 0;
1565 		*reg_mask &= ~dreg;
1566 		/* It's ld_imm64 or ld_abs or ld_ind.
1567 		 * For ld_imm64 no further tracking of precision
1568 		 * into parent is necessary
1569 		 */
1570 		if (mode == BPF_IND || mode == BPF_ABS)
1571 			/* to be analyzed */
1572 			return -ENOTSUPP;
1573 	}
1574 	return 0;
1575 }
1576 
1577 /* the scalar precision tracking algorithm:
1578  * . at the start all registers have precise=false.
1579  * . scalar ranges are tracked as normal through alu and jmp insns.
1580  * . once precise value of the scalar register is used in:
1581  *   .  ptr + scalar alu
1582  *   . if (scalar cond K|scalar)
1583  *   .  helper_call(.., scalar, ...) where ARG_CONST is expected
1584  *   backtrack through the verifier states and mark all registers and
1585  *   stack slots with spilled constants that these scalar regisers
1586  *   should be precise.
1587  * . during state pruning two registers (or spilled stack slots)
1588  *   are equivalent if both are not precise.
1589  *
1590  * Note the verifier cannot simply walk register parentage chain,
1591  * since many different registers and stack slots could have been
1592  * used to compute single precise scalar.
1593  *
1594  * The approach of starting with precise=true for all registers and then
1595  * backtrack to mark a register as not precise when the verifier detects
1596  * that program doesn't care about specific value (e.g., when helper
1597  * takes register as ARG_ANYTHING parameter) is not safe.
1598  *
1599  * It's ok to walk single parentage chain of the verifier states.
1600  * It's possible that this backtracking will go all the way till 1st insn.
1601  * All other branches will be explored for needing precision later.
1602  *
1603  * The backtracking needs to deal with cases like:
1604  *   R8=map_value(id=0,off=0,ks=4,vs=1952,imm=0) R9_w=map_value(id=0,off=40,ks=4,vs=1952,imm=0)
1605  * r9 -= r8
1606  * r5 = r9
1607  * if r5 > 0x79f goto pc+7
1608  *    R5_w=inv(id=0,umax_value=1951,var_off=(0x0; 0x7ff))
1609  * r5 += 1
1610  * ...
1611  * call bpf_perf_event_output#25
1612  *   where .arg5_type = ARG_CONST_SIZE_OR_ZERO
1613  *
1614  * and this case:
1615  * r6 = 1
1616  * call foo // uses callee's r6 inside to compute r0
1617  * r0 += r6
1618  * if r0 == 0 goto
1619  *
1620  * to track above reg_mask/stack_mask needs to be independent for each frame.
1621  *
1622  * Also if parent's curframe > frame where backtracking started,
1623  * the verifier need to mark registers in both frames, otherwise callees
1624  * may incorrectly prune callers. This is similar to
1625  * commit 7640ead93924 ("bpf: verifier: make sure callees don't prune with caller differences")
1626  *
1627  * For now backtracking falls back into conservative marking.
1628  */
1629 static void mark_all_scalars_precise(struct bpf_verifier_env *env,
1630 				     struct bpf_verifier_state *st)
1631 {
1632 	struct bpf_func_state *func;
1633 	struct bpf_reg_state *reg;
1634 	int i, j;
1635 
1636 	/* big hammer: mark all scalars precise in this path.
1637 	 * pop_stack may still get !precise scalars.
1638 	 */
1639 	for (; st; st = st->parent)
1640 		for (i = 0; i <= st->curframe; i++) {
1641 			func = st->frame[i];
1642 			for (j = 0; j < BPF_REG_FP; j++) {
1643 				reg = &func->regs[j];
1644 				if (reg->type != SCALAR_VALUE)
1645 					continue;
1646 				reg->precise = true;
1647 			}
1648 			for (j = 0; j < func->allocated_stack / BPF_REG_SIZE; j++) {
1649 				if (func->stack[j].slot_type[0] != STACK_SPILL)
1650 					continue;
1651 				reg = &func->stack[j].spilled_ptr;
1652 				if (reg->type != SCALAR_VALUE)
1653 					continue;
1654 				reg->precise = true;
1655 			}
1656 		}
1657 }
1658 
1659 static int __mark_chain_precision(struct bpf_verifier_env *env, int regno,
1660 				  int spi)
1661 {
1662 	struct bpf_verifier_state *st = env->cur_state;
1663 	int first_idx = st->first_insn_idx;
1664 	int last_idx = env->insn_idx;
1665 	struct bpf_func_state *func;
1666 	struct bpf_reg_state *reg;
1667 	u32 reg_mask = regno >= 0 ? 1u << regno : 0;
1668 	u64 stack_mask = spi >= 0 ? 1ull << spi : 0;
1669 	bool skip_first = true;
1670 	bool new_marks = false;
1671 	int i, err;
1672 
1673 	if (!env->allow_ptr_leaks)
1674 		/* backtracking is root only for now */
1675 		return 0;
1676 
1677 	func = st->frame[st->curframe];
1678 	if (regno >= 0) {
1679 		reg = &func->regs[regno];
1680 		if (reg->type != SCALAR_VALUE) {
1681 			WARN_ONCE(1, "backtracing misuse");
1682 			return -EFAULT;
1683 		}
1684 		if (!reg->precise)
1685 			new_marks = true;
1686 		else
1687 			reg_mask = 0;
1688 		reg->precise = true;
1689 	}
1690 
1691 	while (spi >= 0) {
1692 		if (func->stack[spi].slot_type[0] != STACK_SPILL) {
1693 			stack_mask = 0;
1694 			break;
1695 		}
1696 		reg = &func->stack[spi].spilled_ptr;
1697 		if (reg->type != SCALAR_VALUE) {
1698 			stack_mask = 0;
1699 			break;
1700 		}
1701 		if (!reg->precise)
1702 			new_marks = true;
1703 		else
1704 			stack_mask = 0;
1705 		reg->precise = true;
1706 		break;
1707 	}
1708 
1709 	if (!new_marks)
1710 		return 0;
1711 	if (!reg_mask && !stack_mask)
1712 		return 0;
1713 	for (;;) {
1714 		DECLARE_BITMAP(mask, 64);
1715 		u32 history = st->jmp_history_cnt;
1716 
1717 		if (env->log.level & BPF_LOG_LEVEL)
1718 			verbose(env, "last_idx %d first_idx %d\n", last_idx, first_idx);
1719 		for (i = last_idx;;) {
1720 			if (skip_first) {
1721 				err = 0;
1722 				skip_first = false;
1723 			} else {
1724 				err = backtrack_insn(env, i, &reg_mask, &stack_mask);
1725 			}
1726 			if (err == -ENOTSUPP) {
1727 				mark_all_scalars_precise(env, st);
1728 				return 0;
1729 			} else if (err) {
1730 				return err;
1731 			}
1732 			if (!reg_mask && !stack_mask)
1733 				/* Found assignment(s) into tracked register in this state.
1734 				 * Since this state is already marked, just return.
1735 				 * Nothing to be tracked further in the parent state.
1736 				 */
1737 				return 0;
1738 			if (i == first_idx)
1739 				break;
1740 			i = get_prev_insn_idx(st, i, &history);
1741 			if (i >= env->prog->len) {
1742 				/* This can happen if backtracking reached insn 0
1743 				 * and there are still reg_mask or stack_mask
1744 				 * to backtrack.
1745 				 * It means the backtracking missed the spot where
1746 				 * particular register was initialized with a constant.
1747 				 */
1748 				verbose(env, "BUG backtracking idx %d\n", i);
1749 				WARN_ONCE(1, "verifier backtracking bug");
1750 				return -EFAULT;
1751 			}
1752 		}
1753 		st = st->parent;
1754 		if (!st)
1755 			break;
1756 
1757 		new_marks = false;
1758 		func = st->frame[st->curframe];
1759 		bitmap_from_u64(mask, reg_mask);
1760 		for_each_set_bit(i, mask, 32) {
1761 			reg = &func->regs[i];
1762 			if (reg->type != SCALAR_VALUE) {
1763 				reg_mask &= ~(1u << i);
1764 				continue;
1765 			}
1766 			if (!reg->precise)
1767 				new_marks = true;
1768 			reg->precise = true;
1769 		}
1770 
1771 		bitmap_from_u64(mask, stack_mask);
1772 		for_each_set_bit(i, mask, 64) {
1773 			if (i >= func->allocated_stack / BPF_REG_SIZE) {
1774 				/* This can happen if backtracking
1775 				 * is propagating stack precision where
1776 				 * caller has larger stack frame
1777 				 * than callee, but backtrack_insn() should
1778 				 * have returned -ENOTSUPP.
1779 				 */
1780 				verbose(env, "BUG spi %d stack_size %d\n",
1781 					i, func->allocated_stack);
1782 				WARN_ONCE(1, "verifier backtracking bug");
1783 				return -EFAULT;
1784 			}
1785 
1786 			if (func->stack[i].slot_type[0] != STACK_SPILL) {
1787 				stack_mask &= ~(1ull << i);
1788 				continue;
1789 			}
1790 			reg = &func->stack[i].spilled_ptr;
1791 			if (reg->type != SCALAR_VALUE) {
1792 				stack_mask &= ~(1ull << i);
1793 				continue;
1794 			}
1795 			if (!reg->precise)
1796 				new_marks = true;
1797 			reg->precise = true;
1798 		}
1799 		if (env->log.level & BPF_LOG_LEVEL) {
1800 			print_verifier_state(env, func);
1801 			verbose(env, "parent %s regs=%x stack=%llx marks\n",
1802 				new_marks ? "didn't have" : "already had",
1803 				reg_mask, stack_mask);
1804 		}
1805 
1806 		if (!reg_mask && !stack_mask)
1807 			break;
1808 		if (!new_marks)
1809 			break;
1810 
1811 		last_idx = st->last_insn_idx;
1812 		first_idx = st->first_insn_idx;
1813 	}
1814 	return 0;
1815 }
1816 
1817 static int mark_chain_precision(struct bpf_verifier_env *env, int regno)
1818 {
1819 	return __mark_chain_precision(env, regno, -1);
1820 }
1821 
1822 static int mark_chain_precision_stack(struct bpf_verifier_env *env, int spi)
1823 {
1824 	return __mark_chain_precision(env, -1, spi);
1825 }
1826 
1827 static bool is_spillable_regtype(enum bpf_reg_type type)
1828 {
1829 	switch (type) {
1830 	case PTR_TO_MAP_VALUE:
1831 	case PTR_TO_MAP_VALUE_OR_NULL:
1832 	case PTR_TO_STACK:
1833 	case PTR_TO_CTX:
1834 	case PTR_TO_PACKET:
1835 	case PTR_TO_PACKET_META:
1836 	case PTR_TO_PACKET_END:
1837 	case PTR_TO_FLOW_KEYS:
1838 	case CONST_PTR_TO_MAP:
1839 	case PTR_TO_SOCKET:
1840 	case PTR_TO_SOCKET_OR_NULL:
1841 	case PTR_TO_SOCK_COMMON:
1842 	case PTR_TO_SOCK_COMMON_OR_NULL:
1843 	case PTR_TO_TCP_SOCK:
1844 	case PTR_TO_TCP_SOCK_OR_NULL:
1845 	case PTR_TO_XDP_SOCK:
1846 		return true;
1847 	default:
1848 		return false;
1849 	}
1850 }
1851 
1852 /* Does this register contain a constant zero? */
1853 static bool register_is_null(struct bpf_reg_state *reg)
1854 {
1855 	return reg->type == SCALAR_VALUE && tnum_equals_const(reg->var_off, 0);
1856 }
1857 
1858 static bool register_is_const(struct bpf_reg_state *reg)
1859 {
1860 	return reg->type == SCALAR_VALUE && tnum_is_const(reg->var_off);
1861 }
1862 
1863 static void save_register_state(struct bpf_func_state *state,
1864 				int spi, struct bpf_reg_state *reg)
1865 {
1866 	int i;
1867 
1868 	state->stack[spi].spilled_ptr = *reg;
1869 	state->stack[spi].spilled_ptr.live |= REG_LIVE_WRITTEN;
1870 
1871 	for (i = 0; i < BPF_REG_SIZE; i++)
1872 		state->stack[spi].slot_type[i] = STACK_SPILL;
1873 }
1874 
1875 /* check_stack_read/write functions track spill/fill of registers,
1876  * stack boundary and alignment are checked in check_mem_access()
1877  */
1878 static int check_stack_write(struct bpf_verifier_env *env,
1879 			     struct bpf_func_state *state, /* func where register points to */
1880 			     int off, int size, int value_regno, int insn_idx)
1881 {
1882 	struct bpf_func_state *cur; /* state of the current function */
1883 	int i, slot = -off - 1, spi = slot / BPF_REG_SIZE, err;
1884 	u32 dst_reg = env->prog->insnsi[insn_idx].dst_reg;
1885 	struct bpf_reg_state *reg = NULL;
1886 
1887 	err = realloc_func_state(state, round_up(slot + 1, BPF_REG_SIZE),
1888 				 state->acquired_refs, true);
1889 	if (err)
1890 		return err;
1891 	/* caller checked that off % size == 0 and -MAX_BPF_STACK <= off < 0,
1892 	 * so it's aligned access and [off, off + size) are within stack limits
1893 	 */
1894 	if (!env->allow_ptr_leaks &&
1895 	    state->stack[spi].slot_type[0] == STACK_SPILL &&
1896 	    size != BPF_REG_SIZE) {
1897 		verbose(env, "attempt to corrupt spilled pointer on stack\n");
1898 		return -EACCES;
1899 	}
1900 
1901 	cur = env->cur_state->frame[env->cur_state->curframe];
1902 	if (value_regno >= 0)
1903 		reg = &cur->regs[value_regno];
1904 
1905 	if (reg && size == BPF_REG_SIZE && register_is_const(reg) &&
1906 	    !register_is_null(reg) && env->allow_ptr_leaks) {
1907 		if (dst_reg != BPF_REG_FP) {
1908 			/* The backtracking logic can only recognize explicit
1909 			 * stack slot address like [fp - 8]. Other spill of
1910 			 * scalar via different register has to be conervative.
1911 			 * Backtrack from here and mark all registers as precise
1912 			 * that contributed into 'reg' being a constant.
1913 			 */
1914 			err = mark_chain_precision(env, value_regno);
1915 			if (err)
1916 				return err;
1917 		}
1918 		save_register_state(state, spi, reg);
1919 	} else if (reg && is_spillable_regtype(reg->type)) {
1920 		/* register containing pointer is being spilled into stack */
1921 		if (size != BPF_REG_SIZE) {
1922 			verbose_linfo(env, insn_idx, "; ");
1923 			verbose(env, "invalid size of register spill\n");
1924 			return -EACCES;
1925 		}
1926 
1927 		if (state != cur && reg->type == PTR_TO_STACK) {
1928 			verbose(env, "cannot spill pointers to stack into stack frame of the caller\n");
1929 			return -EINVAL;
1930 		}
1931 
1932 		if (!env->allow_ptr_leaks) {
1933 			bool sanitize = false;
1934 
1935 			if (state->stack[spi].slot_type[0] == STACK_SPILL &&
1936 			    register_is_const(&state->stack[spi].spilled_ptr))
1937 				sanitize = true;
1938 			for (i = 0; i < BPF_REG_SIZE; i++)
1939 				if (state->stack[spi].slot_type[i] == STACK_MISC) {
1940 					sanitize = true;
1941 					break;
1942 				}
1943 			if (sanitize) {
1944 				int *poff = &env->insn_aux_data[insn_idx].sanitize_stack_off;
1945 				int soff = (-spi - 1) * BPF_REG_SIZE;
1946 
1947 				/* detected reuse of integer stack slot with a pointer
1948 				 * which means either llvm is reusing stack slot or
1949 				 * an attacker is trying to exploit CVE-2018-3639
1950 				 * (speculative store bypass)
1951 				 * Have to sanitize that slot with preemptive
1952 				 * store of zero.
1953 				 */
1954 				if (*poff && *poff != soff) {
1955 					/* disallow programs where single insn stores
1956 					 * into two different stack slots, since verifier
1957 					 * cannot sanitize them
1958 					 */
1959 					verbose(env,
1960 						"insn %d cannot access two stack slots fp%d and fp%d",
1961 						insn_idx, *poff, soff);
1962 					return -EINVAL;
1963 				}
1964 				*poff = soff;
1965 			}
1966 		}
1967 		save_register_state(state, spi, reg);
1968 	} else {
1969 		u8 type = STACK_MISC;
1970 
1971 		/* regular write of data into stack destroys any spilled ptr */
1972 		state->stack[spi].spilled_ptr.type = NOT_INIT;
1973 		/* Mark slots as STACK_MISC if they belonged to spilled ptr. */
1974 		if (state->stack[spi].slot_type[0] == STACK_SPILL)
1975 			for (i = 0; i < BPF_REG_SIZE; i++)
1976 				state->stack[spi].slot_type[i] = STACK_MISC;
1977 
1978 		/* only mark the slot as written if all 8 bytes were written
1979 		 * otherwise read propagation may incorrectly stop too soon
1980 		 * when stack slots are partially written.
1981 		 * This heuristic means that read propagation will be
1982 		 * conservative, since it will add reg_live_read marks
1983 		 * to stack slots all the way to first state when programs
1984 		 * writes+reads less than 8 bytes
1985 		 */
1986 		if (size == BPF_REG_SIZE)
1987 			state->stack[spi].spilled_ptr.live |= REG_LIVE_WRITTEN;
1988 
1989 		/* when we zero initialize stack slots mark them as such */
1990 		if (reg && register_is_null(reg)) {
1991 			/* backtracking doesn't work for STACK_ZERO yet. */
1992 			err = mark_chain_precision(env, value_regno);
1993 			if (err)
1994 				return err;
1995 			type = STACK_ZERO;
1996 		}
1997 
1998 		/* Mark slots affected by this stack write. */
1999 		for (i = 0; i < size; i++)
2000 			state->stack[spi].slot_type[(slot - i) % BPF_REG_SIZE] =
2001 				type;
2002 	}
2003 	return 0;
2004 }
2005 
2006 static int check_stack_read(struct bpf_verifier_env *env,
2007 			    struct bpf_func_state *reg_state /* func where register points to */,
2008 			    int off, int size, int value_regno)
2009 {
2010 	struct bpf_verifier_state *vstate = env->cur_state;
2011 	struct bpf_func_state *state = vstate->frame[vstate->curframe];
2012 	int i, slot = -off - 1, spi = slot / BPF_REG_SIZE;
2013 	struct bpf_reg_state *reg;
2014 	u8 *stype;
2015 
2016 	if (reg_state->allocated_stack <= slot) {
2017 		verbose(env, "invalid read from stack off %d+0 size %d\n",
2018 			off, size);
2019 		return -EACCES;
2020 	}
2021 	stype = reg_state->stack[spi].slot_type;
2022 	reg = &reg_state->stack[spi].spilled_ptr;
2023 
2024 	if (stype[0] == STACK_SPILL) {
2025 		if (size != BPF_REG_SIZE) {
2026 			if (reg->type != SCALAR_VALUE) {
2027 				verbose_linfo(env, env->insn_idx, "; ");
2028 				verbose(env, "invalid size of register fill\n");
2029 				return -EACCES;
2030 			}
2031 			if (value_regno >= 0) {
2032 				mark_reg_unknown(env, state->regs, value_regno);
2033 				state->regs[value_regno].live |= REG_LIVE_WRITTEN;
2034 			}
2035 			mark_reg_read(env, reg, reg->parent, REG_LIVE_READ64);
2036 			return 0;
2037 		}
2038 		for (i = 1; i < BPF_REG_SIZE; i++) {
2039 			if (stype[(slot - i) % BPF_REG_SIZE] != STACK_SPILL) {
2040 				verbose(env, "corrupted spill memory\n");
2041 				return -EACCES;
2042 			}
2043 		}
2044 
2045 		if (value_regno >= 0) {
2046 			/* restore register state from stack */
2047 			state->regs[value_regno] = *reg;
2048 			/* mark reg as written since spilled pointer state likely
2049 			 * has its liveness marks cleared by is_state_visited()
2050 			 * which resets stack/reg liveness for state transitions
2051 			 */
2052 			state->regs[value_regno].live |= REG_LIVE_WRITTEN;
2053 		}
2054 		mark_reg_read(env, reg, reg->parent, REG_LIVE_READ64);
2055 	} else {
2056 		int zeros = 0;
2057 
2058 		for (i = 0; i < size; i++) {
2059 			if (stype[(slot - i) % BPF_REG_SIZE] == STACK_MISC)
2060 				continue;
2061 			if (stype[(slot - i) % BPF_REG_SIZE] == STACK_ZERO) {
2062 				zeros++;
2063 				continue;
2064 			}
2065 			verbose(env, "invalid read from stack off %d+%d size %d\n",
2066 				off, i, size);
2067 			return -EACCES;
2068 		}
2069 		mark_reg_read(env, reg, reg->parent, REG_LIVE_READ64);
2070 		if (value_regno >= 0) {
2071 			if (zeros == size) {
2072 				/* any size read into register is zero extended,
2073 				 * so the whole register == const_zero
2074 				 */
2075 				__mark_reg_const_zero(&state->regs[value_regno]);
2076 				/* backtracking doesn't support STACK_ZERO yet,
2077 				 * so mark it precise here, so that later
2078 				 * backtracking can stop here.
2079 				 * Backtracking may not need this if this register
2080 				 * doesn't participate in pointer adjustment.
2081 				 * Forward propagation of precise flag is not
2082 				 * necessary either. This mark is only to stop
2083 				 * backtracking. Any register that contributed
2084 				 * to const 0 was marked precise before spill.
2085 				 */
2086 				state->regs[value_regno].precise = true;
2087 			} else {
2088 				/* have read misc data from the stack */
2089 				mark_reg_unknown(env, state->regs, value_regno);
2090 			}
2091 			state->regs[value_regno].live |= REG_LIVE_WRITTEN;
2092 		}
2093 	}
2094 	return 0;
2095 }
2096 
2097 static int check_stack_access(struct bpf_verifier_env *env,
2098 			      const struct bpf_reg_state *reg,
2099 			      int off, int size)
2100 {
2101 	/* Stack accesses must be at a fixed offset, so that we
2102 	 * can determine what type of data were returned. See
2103 	 * check_stack_read().
2104 	 */
2105 	if (!tnum_is_const(reg->var_off)) {
2106 		char tn_buf[48];
2107 
2108 		tnum_strn(tn_buf, sizeof(tn_buf), reg->var_off);
2109 		verbose(env, "variable stack access var_off=%s off=%d size=%d\n",
2110 			tn_buf, off, size);
2111 		return -EACCES;
2112 	}
2113 
2114 	if (off >= 0 || off < -MAX_BPF_STACK) {
2115 		verbose(env, "invalid stack off=%d size=%d\n", off, size);
2116 		return -EACCES;
2117 	}
2118 
2119 	return 0;
2120 }
2121 
2122 static int check_map_access_type(struct bpf_verifier_env *env, u32 regno,
2123 				 int off, int size, enum bpf_access_type type)
2124 {
2125 	struct bpf_reg_state *regs = cur_regs(env);
2126 	struct bpf_map *map = regs[regno].map_ptr;
2127 	u32 cap = bpf_map_flags_to_cap(map);
2128 
2129 	if (type == BPF_WRITE && !(cap & BPF_MAP_CAN_WRITE)) {
2130 		verbose(env, "write into map forbidden, value_size=%d off=%d size=%d\n",
2131 			map->value_size, off, size);
2132 		return -EACCES;
2133 	}
2134 
2135 	if (type == BPF_READ && !(cap & BPF_MAP_CAN_READ)) {
2136 		verbose(env, "read from map forbidden, value_size=%d off=%d size=%d\n",
2137 			map->value_size, off, size);
2138 		return -EACCES;
2139 	}
2140 
2141 	return 0;
2142 }
2143 
2144 /* check read/write into map element returned by bpf_map_lookup_elem() */
2145 static int __check_map_access(struct bpf_verifier_env *env, u32 regno, int off,
2146 			      int size, bool zero_size_allowed)
2147 {
2148 	struct bpf_reg_state *regs = cur_regs(env);
2149 	struct bpf_map *map = regs[regno].map_ptr;
2150 
2151 	if (off < 0 || size < 0 || (size == 0 && !zero_size_allowed) ||
2152 	    off + size > map->value_size) {
2153 		verbose(env, "invalid access to map value, value_size=%d off=%d size=%d\n",
2154 			map->value_size, off, size);
2155 		return -EACCES;
2156 	}
2157 	return 0;
2158 }
2159 
2160 /* check read/write into a map element with possible variable offset */
2161 static int check_map_access(struct bpf_verifier_env *env, u32 regno,
2162 			    int off, int size, bool zero_size_allowed)
2163 {
2164 	struct bpf_verifier_state *vstate = env->cur_state;
2165 	struct bpf_func_state *state = vstate->frame[vstate->curframe];
2166 	struct bpf_reg_state *reg = &state->regs[regno];
2167 	int err;
2168 
2169 	/* We may have adjusted the register to this map value, so we
2170 	 * need to try adding each of min_value and max_value to off
2171 	 * to make sure our theoretical access will be safe.
2172 	 */
2173 	if (env->log.level & BPF_LOG_LEVEL)
2174 		print_verifier_state(env, state);
2175 
2176 	/* The minimum value is only important with signed
2177 	 * comparisons where we can't assume the floor of a
2178 	 * value is 0.  If we are using signed variables for our
2179 	 * index'es we need to make sure that whatever we use
2180 	 * will have a set floor within our range.
2181 	 */
2182 	if (reg->smin_value < 0 &&
2183 	    (reg->smin_value == S64_MIN ||
2184 	     (off + reg->smin_value != (s64)(s32)(off + reg->smin_value)) ||
2185 	      reg->smin_value + off < 0)) {
2186 		verbose(env, "R%d min value is negative, either use unsigned index or do a if (index >=0) check.\n",
2187 			regno);
2188 		return -EACCES;
2189 	}
2190 	err = __check_map_access(env, regno, reg->smin_value + off, size,
2191 				 zero_size_allowed);
2192 	if (err) {
2193 		verbose(env, "R%d min value is outside of the array range\n",
2194 			regno);
2195 		return err;
2196 	}
2197 
2198 	/* If we haven't set a max value then we need to bail since we can't be
2199 	 * sure we won't do bad things.
2200 	 * If reg->umax_value + off could overflow, treat that as unbounded too.
2201 	 */
2202 	if (reg->umax_value >= BPF_MAX_VAR_OFF) {
2203 		verbose(env, "R%d unbounded memory access, make sure to bounds check any array access into a map\n",
2204 			regno);
2205 		return -EACCES;
2206 	}
2207 	err = __check_map_access(env, regno, reg->umax_value + off, size,
2208 				 zero_size_allowed);
2209 	if (err)
2210 		verbose(env, "R%d max value is outside of the array range\n",
2211 			regno);
2212 
2213 	if (map_value_has_spin_lock(reg->map_ptr)) {
2214 		u32 lock = reg->map_ptr->spin_lock_off;
2215 
2216 		/* if any part of struct bpf_spin_lock can be touched by
2217 		 * load/store reject this program.
2218 		 * To check that [x1, x2) overlaps with [y1, y2)
2219 		 * it is sufficient to check x1 < y2 && y1 < x2.
2220 		 */
2221 		if (reg->smin_value + off < lock + sizeof(struct bpf_spin_lock) &&
2222 		     lock < reg->umax_value + off + size) {
2223 			verbose(env, "bpf_spin_lock cannot be accessed directly by load/store\n");
2224 			return -EACCES;
2225 		}
2226 	}
2227 	return err;
2228 }
2229 
2230 #define MAX_PACKET_OFF 0xffff
2231 
2232 static bool may_access_direct_pkt_data(struct bpf_verifier_env *env,
2233 				       const struct bpf_call_arg_meta *meta,
2234 				       enum bpf_access_type t)
2235 {
2236 	switch (env->prog->type) {
2237 	/* Program types only with direct read access go here! */
2238 	case BPF_PROG_TYPE_LWT_IN:
2239 	case BPF_PROG_TYPE_LWT_OUT:
2240 	case BPF_PROG_TYPE_LWT_SEG6LOCAL:
2241 	case BPF_PROG_TYPE_SK_REUSEPORT:
2242 	case BPF_PROG_TYPE_FLOW_DISSECTOR:
2243 	case BPF_PROG_TYPE_CGROUP_SKB:
2244 		if (t == BPF_WRITE)
2245 			return false;
2246 		/* fallthrough */
2247 
2248 	/* Program types with direct read + write access go here! */
2249 	case BPF_PROG_TYPE_SCHED_CLS:
2250 	case BPF_PROG_TYPE_SCHED_ACT:
2251 	case BPF_PROG_TYPE_XDP:
2252 	case BPF_PROG_TYPE_LWT_XMIT:
2253 	case BPF_PROG_TYPE_SK_SKB:
2254 	case BPF_PROG_TYPE_SK_MSG:
2255 		if (meta)
2256 			return meta->pkt_access;
2257 
2258 		env->seen_direct_write = true;
2259 		return true;
2260 
2261 	case BPF_PROG_TYPE_CGROUP_SOCKOPT:
2262 		if (t == BPF_WRITE)
2263 			env->seen_direct_write = true;
2264 
2265 		return true;
2266 
2267 	default:
2268 		return false;
2269 	}
2270 }
2271 
2272 static int __check_packet_access(struct bpf_verifier_env *env, u32 regno,
2273 				 int off, int size, bool zero_size_allowed)
2274 {
2275 	struct bpf_reg_state *regs = cur_regs(env);
2276 	struct bpf_reg_state *reg = &regs[regno];
2277 
2278 	if (off < 0 || size < 0 || (size == 0 && !zero_size_allowed) ||
2279 	    (u64)off + size > reg->range) {
2280 		verbose(env, "invalid access to packet, off=%d size=%d, R%d(id=%d,off=%d,r=%d)\n",
2281 			off, size, regno, reg->id, reg->off, reg->range);
2282 		return -EACCES;
2283 	}
2284 	return 0;
2285 }
2286 
2287 static int check_packet_access(struct bpf_verifier_env *env, u32 regno, int off,
2288 			       int size, bool zero_size_allowed)
2289 {
2290 	struct bpf_reg_state *regs = cur_regs(env);
2291 	struct bpf_reg_state *reg = &regs[regno];
2292 	int err;
2293 
2294 	/* We may have added a variable offset to the packet pointer; but any
2295 	 * reg->range we have comes after that.  We are only checking the fixed
2296 	 * offset.
2297 	 */
2298 
2299 	/* We don't allow negative numbers, because we aren't tracking enough
2300 	 * detail to prove they're safe.
2301 	 */
2302 	if (reg->smin_value < 0) {
2303 		verbose(env, "R%d min value is negative, either use unsigned index or do a if (index >=0) check.\n",
2304 			regno);
2305 		return -EACCES;
2306 	}
2307 	err = __check_packet_access(env, regno, off, size, zero_size_allowed);
2308 	if (err) {
2309 		verbose(env, "R%d offset is outside of the packet\n", regno);
2310 		return err;
2311 	}
2312 
2313 	/* __check_packet_access has made sure "off + size - 1" is within u16.
2314 	 * reg->umax_value can't be bigger than MAX_PACKET_OFF which is 0xffff,
2315 	 * otherwise find_good_pkt_pointers would have refused to set range info
2316 	 * that __check_packet_access would have rejected this pkt access.
2317 	 * Therefore, "off + reg->umax_value + size - 1" won't overflow u32.
2318 	 */
2319 	env->prog->aux->max_pkt_offset =
2320 		max_t(u32, env->prog->aux->max_pkt_offset,
2321 		      off + reg->umax_value + size - 1);
2322 
2323 	return err;
2324 }
2325 
2326 /* check access to 'struct bpf_context' fields.  Supports fixed offsets only */
2327 static int check_ctx_access(struct bpf_verifier_env *env, int insn_idx, int off, int size,
2328 			    enum bpf_access_type t, enum bpf_reg_type *reg_type)
2329 {
2330 	struct bpf_insn_access_aux info = {
2331 		.reg_type = *reg_type,
2332 	};
2333 
2334 	if (env->ops->is_valid_access &&
2335 	    env->ops->is_valid_access(off, size, t, env->prog, &info)) {
2336 		/* A non zero info.ctx_field_size indicates that this field is a
2337 		 * candidate for later verifier transformation to load the whole
2338 		 * field and then apply a mask when accessed with a narrower
2339 		 * access than actual ctx access size. A zero info.ctx_field_size
2340 		 * will only allow for whole field access and rejects any other
2341 		 * type of narrower access.
2342 		 */
2343 		*reg_type = info.reg_type;
2344 
2345 		env->insn_aux_data[insn_idx].ctx_field_size = info.ctx_field_size;
2346 		/* remember the offset of last byte accessed in ctx */
2347 		if (env->prog->aux->max_ctx_offset < off + size)
2348 			env->prog->aux->max_ctx_offset = off + size;
2349 		return 0;
2350 	}
2351 
2352 	verbose(env, "invalid bpf_context access off=%d size=%d\n", off, size);
2353 	return -EACCES;
2354 }
2355 
2356 static int check_flow_keys_access(struct bpf_verifier_env *env, int off,
2357 				  int size)
2358 {
2359 	if (size < 0 || off < 0 ||
2360 	    (u64)off + size > sizeof(struct bpf_flow_keys)) {
2361 		verbose(env, "invalid access to flow keys off=%d size=%d\n",
2362 			off, size);
2363 		return -EACCES;
2364 	}
2365 	return 0;
2366 }
2367 
2368 static int check_sock_access(struct bpf_verifier_env *env, int insn_idx,
2369 			     u32 regno, int off, int size,
2370 			     enum bpf_access_type t)
2371 {
2372 	struct bpf_reg_state *regs = cur_regs(env);
2373 	struct bpf_reg_state *reg = &regs[regno];
2374 	struct bpf_insn_access_aux info = {};
2375 	bool valid;
2376 
2377 	if (reg->smin_value < 0) {
2378 		verbose(env, "R%d min value is negative, either use unsigned index or do a if (index >=0) check.\n",
2379 			regno);
2380 		return -EACCES;
2381 	}
2382 
2383 	switch (reg->type) {
2384 	case PTR_TO_SOCK_COMMON:
2385 		valid = bpf_sock_common_is_valid_access(off, size, t, &info);
2386 		break;
2387 	case PTR_TO_SOCKET:
2388 		valid = bpf_sock_is_valid_access(off, size, t, &info);
2389 		break;
2390 	case PTR_TO_TCP_SOCK:
2391 		valid = bpf_tcp_sock_is_valid_access(off, size, t, &info);
2392 		break;
2393 	case PTR_TO_XDP_SOCK:
2394 		valid = bpf_xdp_sock_is_valid_access(off, size, t, &info);
2395 		break;
2396 	default:
2397 		valid = false;
2398 	}
2399 
2400 
2401 	if (valid) {
2402 		env->insn_aux_data[insn_idx].ctx_field_size =
2403 			info.ctx_field_size;
2404 		return 0;
2405 	}
2406 
2407 	verbose(env, "R%d invalid %s access off=%d size=%d\n",
2408 		regno, reg_type_str[reg->type], off, size);
2409 
2410 	return -EACCES;
2411 }
2412 
2413 static bool __is_pointer_value(bool allow_ptr_leaks,
2414 			       const struct bpf_reg_state *reg)
2415 {
2416 	if (allow_ptr_leaks)
2417 		return false;
2418 
2419 	return reg->type != SCALAR_VALUE;
2420 }
2421 
2422 static struct bpf_reg_state *reg_state(struct bpf_verifier_env *env, int regno)
2423 {
2424 	return cur_regs(env) + regno;
2425 }
2426 
2427 static bool is_pointer_value(struct bpf_verifier_env *env, int regno)
2428 {
2429 	return __is_pointer_value(env->allow_ptr_leaks, reg_state(env, regno));
2430 }
2431 
2432 static bool is_ctx_reg(struct bpf_verifier_env *env, int regno)
2433 {
2434 	const struct bpf_reg_state *reg = reg_state(env, regno);
2435 
2436 	return reg->type == PTR_TO_CTX;
2437 }
2438 
2439 static bool is_sk_reg(struct bpf_verifier_env *env, int regno)
2440 {
2441 	const struct bpf_reg_state *reg = reg_state(env, regno);
2442 
2443 	return type_is_sk_pointer(reg->type);
2444 }
2445 
2446 static bool is_pkt_reg(struct bpf_verifier_env *env, int regno)
2447 {
2448 	const struct bpf_reg_state *reg = reg_state(env, regno);
2449 
2450 	return type_is_pkt_pointer(reg->type);
2451 }
2452 
2453 static bool is_flow_key_reg(struct bpf_verifier_env *env, int regno)
2454 {
2455 	const struct bpf_reg_state *reg = reg_state(env, regno);
2456 
2457 	/* Separate to is_ctx_reg() since we still want to allow BPF_ST here. */
2458 	return reg->type == PTR_TO_FLOW_KEYS;
2459 }
2460 
2461 static int check_pkt_ptr_alignment(struct bpf_verifier_env *env,
2462 				   const struct bpf_reg_state *reg,
2463 				   int off, int size, bool strict)
2464 {
2465 	struct tnum reg_off;
2466 	int ip_align;
2467 
2468 	/* Byte size accesses are always allowed. */
2469 	if (!strict || size == 1)
2470 		return 0;
2471 
2472 	/* For platforms that do not have a Kconfig enabling
2473 	 * CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS the value of
2474 	 * NET_IP_ALIGN is universally set to '2'.  And on platforms
2475 	 * that do set CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS, we get
2476 	 * to this code only in strict mode where we want to emulate
2477 	 * the NET_IP_ALIGN==2 checking.  Therefore use an
2478 	 * unconditional IP align value of '2'.
2479 	 */
2480 	ip_align = 2;
2481 
2482 	reg_off = tnum_add(reg->var_off, tnum_const(ip_align + reg->off + off));
2483 	if (!tnum_is_aligned(reg_off, size)) {
2484 		char tn_buf[48];
2485 
2486 		tnum_strn(tn_buf, sizeof(tn_buf), reg->var_off);
2487 		verbose(env,
2488 			"misaligned packet access off %d+%s+%d+%d size %d\n",
2489 			ip_align, tn_buf, reg->off, off, size);
2490 		return -EACCES;
2491 	}
2492 
2493 	return 0;
2494 }
2495 
2496 static int check_generic_ptr_alignment(struct bpf_verifier_env *env,
2497 				       const struct bpf_reg_state *reg,
2498 				       const char *pointer_desc,
2499 				       int off, int size, bool strict)
2500 {
2501 	struct tnum reg_off;
2502 
2503 	/* Byte size accesses are always allowed. */
2504 	if (!strict || size == 1)
2505 		return 0;
2506 
2507 	reg_off = tnum_add(reg->var_off, tnum_const(reg->off + off));
2508 	if (!tnum_is_aligned(reg_off, size)) {
2509 		char tn_buf[48];
2510 
2511 		tnum_strn(tn_buf, sizeof(tn_buf), reg->var_off);
2512 		verbose(env, "misaligned %saccess off %s+%d+%d size %d\n",
2513 			pointer_desc, tn_buf, reg->off, off, size);
2514 		return -EACCES;
2515 	}
2516 
2517 	return 0;
2518 }
2519 
2520 static int check_ptr_alignment(struct bpf_verifier_env *env,
2521 			       const struct bpf_reg_state *reg, int off,
2522 			       int size, bool strict_alignment_once)
2523 {
2524 	bool strict = env->strict_alignment || strict_alignment_once;
2525 	const char *pointer_desc = "";
2526 
2527 	switch (reg->type) {
2528 	case PTR_TO_PACKET:
2529 	case PTR_TO_PACKET_META:
2530 		/* Special case, because of NET_IP_ALIGN. Given metadata sits
2531 		 * right in front, treat it the very same way.
2532 		 */
2533 		return check_pkt_ptr_alignment(env, reg, off, size, strict);
2534 	case PTR_TO_FLOW_KEYS:
2535 		pointer_desc = "flow keys ";
2536 		break;
2537 	case PTR_TO_MAP_VALUE:
2538 		pointer_desc = "value ";
2539 		break;
2540 	case PTR_TO_CTX:
2541 		pointer_desc = "context ";
2542 		break;
2543 	case PTR_TO_STACK:
2544 		pointer_desc = "stack ";
2545 		/* The stack spill tracking logic in check_stack_write()
2546 		 * and check_stack_read() relies on stack accesses being
2547 		 * aligned.
2548 		 */
2549 		strict = true;
2550 		break;
2551 	case PTR_TO_SOCKET:
2552 		pointer_desc = "sock ";
2553 		break;
2554 	case PTR_TO_SOCK_COMMON:
2555 		pointer_desc = "sock_common ";
2556 		break;
2557 	case PTR_TO_TCP_SOCK:
2558 		pointer_desc = "tcp_sock ";
2559 		break;
2560 	case PTR_TO_XDP_SOCK:
2561 		pointer_desc = "xdp_sock ";
2562 		break;
2563 	default:
2564 		break;
2565 	}
2566 	return check_generic_ptr_alignment(env, reg, pointer_desc, off, size,
2567 					   strict);
2568 }
2569 
2570 static int update_stack_depth(struct bpf_verifier_env *env,
2571 			      const struct bpf_func_state *func,
2572 			      int off)
2573 {
2574 	u16 stack = env->subprog_info[func->subprogno].stack_depth;
2575 
2576 	if (stack >= -off)
2577 		return 0;
2578 
2579 	/* update known max for given subprogram */
2580 	env->subprog_info[func->subprogno].stack_depth = -off;
2581 	return 0;
2582 }
2583 
2584 /* starting from main bpf function walk all instructions of the function
2585  * and recursively walk all callees that given function can call.
2586  * Ignore jump and exit insns.
2587  * Since recursion is prevented by check_cfg() this algorithm
2588  * only needs a local stack of MAX_CALL_FRAMES to remember callsites
2589  */
2590 static int check_max_stack_depth(struct bpf_verifier_env *env)
2591 {
2592 	int depth = 0, frame = 0, idx = 0, i = 0, subprog_end;
2593 	struct bpf_subprog_info *subprog = env->subprog_info;
2594 	struct bpf_insn *insn = env->prog->insnsi;
2595 	int ret_insn[MAX_CALL_FRAMES];
2596 	int ret_prog[MAX_CALL_FRAMES];
2597 
2598 process_func:
2599 	/* round up to 32-bytes, since this is granularity
2600 	 * of interpreter stack size
2601 	 */
2602 	depth += round_up(max_t(u32, subprog[idx].stack_depth, 1), 32);
2603 	if (depth > MAX_BPF_STACK) {
2604 		verbose(env, "combined stack size of %d calls is %d. Too large\n",
2605 			frame + 1, depth);
2606 		return -EACCES;
2607 	}
2608 continue_func:
2609 	subprog_end = subprog[idx + 1].start;
2610 	for (; i < subprog_end; i++) {
2611 		if (insn[i].code != (BPF_JMP | BPF_CALL))
2612 			continue;
2613 		if (insn[i].src_reg != BPF_PSEUDO_CALL)
2614 			continue;
2615 		/* remember insn and function to return to */
2616 		ret_insn[frame] = i + 1;
2617 		ret_prog[frame] = idx;
2618 
2619 		/* find the callee */
2620 		i = i + insn[i].imm + 1;
2621 		idx = find_subprog(env, i);
2622 		if (idx < 0) {
2623 			WARN_ONCE(1, "verifier bug. No program starts at insn %d\n",
2624 				  i);
2625 			return -EFAULT;
2626 		}
2627 		frame++;
2628 		if (frame >= MAX_CALL_FRAMES) {
2629 			verbose(env, "the call stack of %d frames is too deep !\n",
2630 				frame);
2631 			return -E2BIG;
2632 		}
2633 		goto process_func;
2634 	}
2635 	/* end of for() loop means the last insn of the 'subprog'
2636 	 * was reached. Doesn't matter whether it was JA or EXIT
2637 	 */
2638 	if (frame == 0)
2639 		return 0;
2640 	depth -= round_up(max_t(u32, subprog[idx].stack_depth, 1), 32);
2641 	frame--;
2642 	i = ret_insn[frame];
2643 	idx = ret_prog[frame];
2644 	goto continue_func;
2645 }
2646 
2647 #ifndef CONFIG_BPF_JIT_ALWAYS_ON
2648 static int get_callee_stack_depth(struct bpf_verifier_env *env,
2649 				  const struct bpf_insn *insn, int idx)
2650 {
2651 	int start = idx + insn->imm + 1, subprog;
2652 
2653 	subprog = find_subprog(env, start);
2654 	if (subprog < 0) {
2655 		WARN_ONCE(1, "verifier bug. No program starts at insn %d\n",
2656 			  start);
2657 		return -EFAULT;
2658 	}
2659 	return env->subprog_info[subprog].stack_depth;
2660 }
2661 #endif
2662 
2663 static int check_ctx_reg(struct bpf_verifier_env *env,
2664 			 const struct bpf_reg_state *reg, int regno)
2665 {
2666 	/* Access to ctx or passing it to a helper is only allowed in
2667 	 * its original, unmodified form.
2668 	 */
2669 
2670 	if (reg->off) {
2671 		verbose(env, "dereference of modified ctx ptr R%d off=%d disallowed\n",
2672 			regno, reg->off);
2673 		return -EACCES;
2674 	}
2675 
2676 	if (!tnum_is_const(reg->var_off) || reg->var_off.value) {
2677 		char tn_buf[48];
2678 
2679 		tnum_strn(tn_buf, sizeof(tn_buf), reg->var_off);
2680 		verbose(env, "variable ctx access var_off=%s disallowed\n", tn_buf);
2681 		return -EACCES;
2682 	}
2683 
2684 	return 0;
2685 }
2686 
2687 static int check_tp_buffer_access(struct bpf_verifier_env *env,
2688 				  const struct bpf_reg_state *reg,
2689 				  int regno, int off, int size)
2690 {
2691 	if (off < 0) {
2692 		verbose(env,
2693 			"R%d invalid tracepoint buffer access: off=%d, size=%d",
2694 			regno, off, size);
2695 		return -EACCES;
2696 	}
2697 	if (!tnum_is_const(reg->var_off) || reg->var_off.value) {
2698 		char tn_buf[48];
2699 
2700 		tnum_strn(tn_buf, sizeof(tn_buf), reg->var_off);
2701 		verbose(env,
2702 			"R%d invalid variable buffer offset: off=%d, var_off=%s",
2703 			regno, off, tn_buf);
2704 		return -EACCES;
2705 	}
2706 	if (off + size > env->prog->aux->max_tp_access)
2707 		env->prog->aux->max_tp_access = off + size;
2708 
2709 	return 0;
2710 }
2711 
2712 
2713 /* truncate register to smaller size (in bytes)
2714  * must be called with size < BPF_REG_SIZE
2715  */
2716 static void coerce_reg_to_size(struct bpf_reg_state *reg, int size)
2717 {
2718 	u64 mask;
2719 
2720 	/* clear high bits in bit representation */
2721 	reg->var_off = tnum_cast(reg->var_off, size);
2722 
2723 	/* fix arithmetic bounds */
2724 	mask = ((u64)1 << (size * 8)) - 1;
2725 	if ((reg->umin_value & ~mask) == (reg->umax_value & ~mask)) {
2726 		reg->umin_value &= mask;
2727 		reg->umax_value &= mask;
2728 	} else {
2729 		reg->umin_value = 0;
2730 		reg->umax_value = mask;
2731 	}
2732 	reg->smin_value = reg->umin_value;
2733 	reg->smax_value = reg->umax_value;
2734 }
2735 
2736 /* check whether memory at (regno + off) is accessible for t = (read | write)
2737  * if t==write, value_regno is a register which value is stored into memory
2738  * if t==read, value_regno is a register which will receive the value from memory
2739  * if t==write && value_regno==-1, some unknown value is stored into memory
2740  * if t==read && value_regno==-1, don't care what we read from memory
2741  */
2742 static int check_mem_access(struct bpf_verifier_env *env, int insn_idx, u32 regno,
2743 			    int off, int bpf_size, enum bpf_access_type t,
2744 			    int value_regno, bool strict_alignment_once)
2745 {
2746 	struct bpf_reg_state *regs = cur_regs(env);
2747 	struct bpf_reg_state *reg = regs + regno;
2748 	struct bpf_func_state *state;
2749 	int size, err = 0;
2750 
2751 	size = bpf_size_to_bytes(bpf_size);
2752 	if (size < 0)
2753 		return size;
2754 
2755 	/* alignment checks will add in reg->off themselves */
2756 	err = check_ptr_alignment(env, reg, off, size, strict_alignment_once);
2757 	if (err)
2758 		return err;
2759 
2760 	/* for access checks, reg->off is just part of off */
2761 	off += reg->off;
2762 
2763 	if (reg->type == PTR_TO_MAP_VALUE) {
2764 		if (t == BPF_WRITE && value_regno >= 0 &&
2765 		    is_pointer_value(env, value_regno)) {
2766 			verbose(env, "R%d leaks addr into map\n", value_regno);
2767 			return -EACCES;
2768 		}
2769 		err = check_map_access_type(env, regno, off, size, t);
2770 		if (err)
2771 			return err;
2772 		err = check_map_access(env, regno, off, size, false);
2773 		if (!err && t == BPF_READ && value_regno >= 0)
2774 			mark_reg_unknown(env, regs, value_regno);
2775 
2776 	} else if (reg->type == PTR_TO_CTX) {
2777 		enum bpf_reg_type reg_type = SCALAR_VALUE;
2778 
2779 		if (t == BPF_WRITE && value_regno >= 0 &&
2780 		    is_pointer_value(env, value_regno)) {
2781 			verbose(env, "R%d leaks addr into ctx\n", value_regno);
2782 			return -EACCES;
2783 		}
2784 
2785 		err = check_ctx_reg(env, reg, regno);
2786 		if (err < 0)
2787 			return err;
2788 
2789 		err = check_ctx_access(env, insn_idx, off, size, t, &reg_type);
2790 		if (!err && t == BPF_READ && value_regno >= 0) {
2791 			/* ctx access returns either a scalar, or a
2792 			 * PTR_TO_PACKET[_META,_END]. In the latter
2793 			 * case, we know the offset is zero.
2794 			 */
2795 			if (reg_type == SCALAR_VALUE) {
2796 				mark_reg_unknown(env, regs, value_regno);
2797 			} else {
2798 				mark_reg_known_zero(env, regs,
2799 						    value_regno);
2800 				if (reg_type_may_be_null(reg_type))
2801 					regs[value_regno].id = ++env->id_gen;
2802 				/* A load of ctx field could have different
2803 				 * actual load size with the one encoded in the
2804 				 * insn. When the dst is PTR, it is for sure not
2805 				 * a sub-register.
2806 				 */
2807 				regs[value_regno].subreg_def = DEF_NOT_SUBREG;
2808 			}
2809 			regs[value_regno].type = reg_type;
2810 		}
2811 
2812 	} else if (reg->type == PTR_TO_STACK) {
2813 		off += reg->var_off.value;
2814 		err = check_stack_access(env, reg, off, size);
2815 		if (err)
2816 			return err;
2817 
2818 		state = func(env, reg);
2819 		err = update_stack_depth(env, state, off);
2820 		if (err)
2821 			return err;
2822 
2823 		if (t == BPF_WRITE)
2824 			err = check_stack_write(env, state, off, size,
2825 						value_regno, insn_idx);
2826 		else
2827 			err = check_stack_read(env, state, off, size,
2828 					       value_regno);
2829 	} else if (reg_is_pkt_pointer(reg)) {
2830 		if (t == BPF_WRITE && !may_access_direct_pkt_data(env, NULL, t)) {
2831 			verbose(env, "cannot write into packet\n");
2832 			return -EACCES;
2833 		}
2834 		if (t == BPF_WRITE && value_regno >= 0 &&
2835 		    is_pointer_value(env, value_regno)) {
2836 			verbose(env, "R%d leaks addr into packet\n",
2837 				value_regno);
2838 			return -EACCES;
2839 		}
2840 		err = check_packet_access(env, regno, off, size, false);
2841 		if (!err && t == BPF_READ && value_regno >= 0)
2842 			mark_reg_unknown(env, regs, value_regno);
2843 	} else if (reg->type == PTR_TO_FLOW_KEYS) {
2844 		if (t == BPF_WRITE && value_regno >= 0 &&
2845 		    is_pointer_value(env, value_regno)) {
2846 			verbose(env, "R%d leaks addr into flow keys\n",
2847 				value_regno);
2848 			return -EACCES;
2849 		}
2850 
2851 		err = check_flow_keys_access(env, off, size);
2852 		if (!err && t == BPF_READ && value_regno >= 0)
2853 			mark_reg_unknown(env, regs, value_regno);
2854 	} else if (type_is_sk_pointer(reg->type)) {
2855 		if (t == BPF_WRITE) {
2856 			verbose(env, "R%d cannot write into %s\n",
2857 				regno, reg_type_str[reg->type]);
2858 			return -EACCES;
2859 		}
2860 		err = check_sock_access(env, insn_idx, regno, off, size, t);
2861 		if (!err && value_regno >= 0)
2862 			mark_reg_unknown(env, regs, value_regno);
2863 	} else if (reg->type == PTR_TO_TP_BUFFER) {
2864 		err = check_tp_buffer_access(env, reg, regno, off, size);
2865 		if (!err && t == BPF_READ && value_regno >= 0)
2866 			mark_reg_unknown(env, regs, value_regno);
2867 	} else {
2868 		verbose(env, "R%d invalid mem access '%s'\n", regno,
2869 			reg_type_str[reg->type]);
2870 		return -EACCES;
2871 	}
2872 
2873 	if (!err && size < BPF_REG_SIZE && value_regno >= 0 && t == BPF_READ &&
2874 	    regs[value_regno].type == SCALAR_VALUE) {
2875 		/* b/h/w load zero-extends, mark upper bits as known 0 */
2876 		coerce_reg_to_size(&regs[value_regno], size);
2877 	}
2878 	return err;
2879 }
2880 
2881 static int check_xadd(struct bpf_verifier_env *env, int insn_idx, struct bpf_insn *insn)
2882 {
2883 	int err;
2884 
2885 	if ((BPF_SIZE(insn->code) != BPF_W && BPF_SIZE(insn->code) != BPF_DW) ||
2886 	    insn->imm != 0) {
2887 		verbose(env, "BPF_XADD uses reserved fields\n");
2888 		return -EINVAL;
2889 	}
2890 
2891 	/* check src1 operand */
2892 	err = check_reg_arg(env, insn->src_reg, SRC_OP);
2893 	if (err)
2894 		return err;
2895 
2896 	/* check src2 operand */
2897 	err = check_reg_arg(env, insn->dst_reg, SRC_OP);
2898 	if (err)
2899 		return err;
2900 
2901 	if (is_pointer_value(env, insn->src_reg)) {
2902 		verbose(env, "R%d leaks addr into mem\n", insn->src_reg);
2903 		return -EACCES;
2904 	}
2905 
2906 	if (is_ctx_reg(env, insn->dst_reg) ||
2907 	    is_pkt_reg(env, insn->dst_reg) ||
2908 	    is_flow_key_reg(env, insn->dst_reg) ||
2909 	    is_sk_reg(env, insn->dst_reg)) {
2910 		verbose(env, "BPF_XADD stores into R%d %s is not allowed\n",
2911 			insn->dst_reg,
2912 			reg_type_str[reg_state(env, insn->dst_reg)->type]);
2913 		return -EACCES;
2914 	}
2915 
2916 	/* check whether atomic_add can read the memory */
2917 	err = check_mem_access(env, insn_idx, insn->dst_reg, insn->off,
2918 			       BPF_SIZE(insn->code), BPF_READ, -1, true);
2919 	if (err)
2920 		return err;
2921 
2922 	/* check whether atomic_add can write into the same memory */
2923 	return check_mem_access(env, insn_idx, insn->dst_reg, insn->off,
2924 				BPF_SIZE(insn->code), BPF_WRITE, -1, true);
2925 }
2926 
2927 static int __check_stack_boundary(struct bpf_verifier_env *env, u32 regno,
2928 				  int off, int access_size,
2929 				  bool zero_size_allowed)
2930 {
2931 	struct bpf_reg_state *reg = reg_state(env, regno);
2932 
2933 	if (off >= 0 || off < -MAX_BPF_STACK || off + access_size > 0 ||
2934 	    access_size < 0 || (access_size == 0 && !zero_size_allowed)) {
2935 		if (tnum_is_const(reg->var_off)) {
2936 			verbose(env, "invalid stack type R%d off=%d access_size=%d\n",
2937 				regno, off, access_size);
2938 		} else {
2939 			char tn_buf[48];
2940 
2941 			tnum_strn(tn_buf, sizeof(tn_buf), reg->var_off);
2942 			verbose(env, "invalid stack type R%d var_off=%s access_size=%d\n",
2943 				regno, tn_buf, access_size);
2944 		}
2945 		return -EACCES;
2946 	}
2947 	return 0;
2948 }
2949 
2950 /* when register 'regno' is passed into function that will read 'access_size'
2951  * bytes from that pointer, make sure that it's within stack boundary
2952  * and all elements of stack are initialized.
2953  * Unlike most pointer bounds-checking functions, this one doesn't take an
2954  * 'off' argument, so it has to add in reg->off itself.
2955  */
2956 static int check_stack_boundary(struct bpf_verifier_env *env, int regno,
2957 				int access_size, bool zero_size_allowed,
2958 				struct bpf_call_arg_meta *meta)
2959 {
2960 	struct bpf_reg_state *reg = reg_state(env, regno);
2961 	struct bpf_func_state *state = func(env, reg);
2962 	int err, min_off, max_off, i, j, slot, spi;
2963 
2964 	if (reg->type != PTR_TO_STACK) {
2965 		/* Allow zero-byte read from NULL, regardless of pointer type */
2966 		if (zero_size_allowed && access_size == 0 &&
2967 		    register_is_null(reg))
2968 			return 0;
2969 
2970 		verbose(env, "R%d type=%s expected=%s\n", regno,
2971 			reg_type_str[reg->type],
2972 			reg_type_str[PTR_TO_STACK]);
2973 		return -EACCES;
2974 	}
2975 
2976 	if (tnum_is_const(reg->var_off)) {
2977 		min_off = max_off = reg->var_off.value + reg->off;
2978 		err = __check_stack_boundary(env, regno, min_off, access_size,
2979 					     zero_size_allowed);
2980 		if (err)
2981 			return err;
2982 	} else {
2983 		/* Variable offset is prohibited for unprivileged mode for
2984 		 * simplicity since it requires corresponding support in
2985 		 * Spectre masking for stack ALU.
2986 		 * See also retrieve_ptr_limit().
2987 		 */
2988 		if (!env->allow_ptr_leaks) {
2989 			char tn_buf[48];
2990 
2991 			tnum_strn(tn_buf, sizeof(tn_buf), reg->var_off);
2992 			verbose(env, "R%d indirect variable offset stack access prohibited for !root, var_off=%s\n",
2993 				regno, tn_buf);
2994 			return -EACCES;
2995 		}
2996 		/* Only initialized buffer on stack is allowed to be accessed
2997 		 * with variable offset. With uninitialized buffer it's hard to
2998 		 * guarantee that whole memory is marked as initialized on
2999 		 * helper return since specific bounds are unknown what may
3000 		 * cause uninitialized stack leaking.
3001 		 */
3002 		if (meta && meta->raw_mode)
3003 			meta = NULL;
3004 
3005 		if (reg->smax_value >= BPF_MAX_VAR_OFF ||
3006 		    reg->smax_value <= -BPF_MAX_VAR_OFF) {
3007 			verbose(env, "R%d unbounded indirect variable offset stack access\n",
3008 				regno);
3009 			return -EACCES;
3010 		}
3011 		min_off = reg->smin_value + reg->off;
3012 		max_off = reg->smax_value + reg->off;
3013 		err = __check_stack_boundary(env, regno, min_off, access_size,
3014 					     zero_size_allowed);
3015 		if (err) {
3016 			verbose(env, "R%d min value is outside of stack bound\n",
3017 				regno);
3018 			return err;
3019 		}
3020 		err = __check_stack_boundary(env, regno, max_off, access_size,
3021 					     zero_size_allowed);
3022 		if (err) {
3023 			verbose(env, "R%d max value is outside of stack bound\n",
3024 				regno);
3025 			return err;
3026 		}
3027 	}
3028 
3029 	if (meta && meta->raw_mode) {
3030 		meta->access_size = access_size;
3031 		meta->regno = regno;
3032 		return 0;
3033 	}
3034 
3035 	for (i = min_off; i < max_off + access_size; i++) {
3036 		u8 *stype;
3037 
3038 		slot = -i - 1;
3039 		spi = slot / BPF_REG_SIZE;
3040 		if (state->allocated_stack <= slot)
3041 			goto err;
3042 		stype = &state->stack[spi].slot_type[slot % BPF_REG_SIZE];
3043 		if (*stype == STACK_MISC)
3044 			goto mark;
3045 		if (*stype == STACK_ZERO) {
3046 			/* helper can write anything into the stack */
3047 			*stype = STACK_MISC;
3048 			goto mark;
3049 		}
3050 		if (state->stack[spi].slot_type[0] == STACK_SPILL &&
3051 		    state->stack[spi].spilled_ptr.type == SCALAR_VALUE) {
3052 			__mark_reg_unknown(&state->stack[spi].spilled_ptr);
3053 			for (j = 0; j < BPF_REG_SIZE; j++)
3054 				state->stack[spi].slot_type[j] = STACK_MISC;
3055 			goto mark;
3056 		}
3057 
3058 err:
3059 		if (tnum_is_const(reg->var_off)) {
3060 			verbose(env, "invalid indirect read from stack off %d+%d size %d\n",
3061 				min_off, i - min_off, access_size);
3062 		} else {
3063 			char tn_buf[48];
3064 
3065 			tnum_strn(tn_buf, sizeof(tn_buf), reg->var_off);
3066 			verbose(env, "invalid indirect read from stack var_off %s+%d size %d\n",
3067 				tn_buf, i - min_off, access_size);
3068 		}
3069 		return -EACCES;
3070 mark:
3071 		/* reading any byte out of 8-byte 'spill_slot' will cause
3072 		 * the whole slot to be marked as 'read'
3073 		 */
3074 		mark_reg_read(env, &state->stack[spi].spilled_ptr,
3075 			      state->stack[spi].spilled_ptr.parent,
3076 			      REG_LIVE_READ64);
3077 	}
3078 	return update_stack_depth(env, state, min_off);
3079 }
3080 
3081 static int check_helper_mem_access(struct bpf_verifier_env *env, int regno,
3082 				   int access_size, bool zero_size_allowed,
3083 				   struct bpf_call_arg_meta *meta)
3084 {
3085 	struct bpf_reg_state *regs = cur_regs(env), *reg = &regs[regno];
3086 
3087 	switch (reg->type) {
3088 	case PTR_TO_PACKET:
3089 	case PTR_TO_PACKET_META:
3090 		return check_packet_access(env, regno, reg->off, access_size,
3091 					   zero_size_allowed);
3092 	case PTR_TO_MAP_VALUE:
3093 		if (check_map_access_type(env, regno, reg->off, access_size,
3094 					  meta && meta->raw_mode ? BPF_WRITE :
3095 					  BPF_READ))
3096 			return -EACCES;
3097 		return check_map_access(env, regno, reg->off, access_size,
3098 					zero_size_allowed);
3099 	default: /* scalar_value|ptr_to_stack or invalid ptr */
3100 		return check_stack_boundary(env, regno, access_size,
3101 					    zero_size_allowed, meta);
3102 	}
3103 }
3104 
3105 /* Implementation details:
3106  * bpf_map_lookup returns PTR_TO_MAP_VALUE_OR_NULL
3107  * Two bpf_map_lookups (even with the same key) will have different reg->id.
3108  * For traditional PTR_TO_MAP_VALUE the verifier clears reg->id after
3109  * value_or_null->value transition, since the verifier only cares about
3110  * the range of access to valid map value pointer and doesn't care about actual
3111  * address of the map element.
3112  * For maps with 'struct bpf_spin_lock' inside map value the verifier keeps
3113  * reg->id > 0 after value_or_null->value transition. By doing so
3114  * two bpf_map_lookups will be considered two different pointers that
3115  * point to different bpf_spin_locks.
3116  * The verifier allows taking only one bpf_spin_lock at a time to avoid
3117  * dead-locks.
3118  * Since only one bpf_spin_lock is allowed the checks are simpler than
3119  * reg_is_refcounted() logic. The verifier needs to remember only
3120  * one spin_lock instead of array of acquired_refs.
3121  * cur_state->active_spin_lock remembers which map value element got locked
3122  * and clears it after bpf_spin_unlock.
3123  */
3124 static int process_spin_lock(struct bpf_verifier_env *env, int regno,
3125 			     bool is_lock)
3126 {
3127 	struct bpf_reg_state *regs = cur_regs(env), *reg = &regs[regno];
3128 	struct bpf_verifier_state *cur = env->cur_state;
3129 	bool is_const = tnum_is_const(reg->var_off);
3130 	struct bpf_map *map = reg->map_ptr;
3131 	u64 val = reg->var_off.value;
3132 
3133 	if (reg->type != PTR_TO_MAP_VALUE) {
3134 		verbose(env, "R%d is not a pointer to map_value\n", regno);
3135 		return -EINVAL;
3136 	}
3137 	if (!is_const) {
3138 		verbose(env,
3139 			"R%d doesn't have constant offset. bpf_spin_lock has to be at the constant offset\n",
3140 			regno);
3141 		return -EINVAL;
3142 	}
3143 	if (!map->btf) {
3144 		verbose(env,
3145 			"map '%s' has to have BTF in order to use bpf_spin_lock\n",
3146 			map->name);
3147 		return -EINVAL;
3148 	}
3149 	if (!map_value_has_spin_lock(map)) {
3150 		if (map->spin_lock_off == -E2BIG)
3151 			verbose(env,
3152 				"map '%s' has more than one 'struct bpf_spin_lock'\n",
3153 				map->name);
3154 		else if (map->spin_lock_off == -ENOENT)
3155 			verbose(env,
3156 				"map '%s' doesn't have 'struct bpf_spin_lock'\n",
3157 				map->name);
3158 		else
3159 			verbose(env,
3160 				"map '%s' is not a struct type or bpf_spin_lock is mangled\n",
3161 				map->name);
3162 		return -EINVAL;
3163 	}
3164 	if (map->spin_lock_off != val + reg->off) {
3165 		verbose(env, "off %lld doesn't point to 'struct bpf_spin_lock'\n",
3166 			val + reg->off);
3167 		return -EINVAL;
3168 	}
3169 	if (is_lock) {
3170 		if (cur->active_spin_lock) {
3171 			verbose(env,
3172 				"Locking two bpf_spin_locks are not allowed\n");
3173 			return -EINVAL;
3174 		}
3175 		cur->active_spin_lock = reg->id;
3176 	} else {
3177 		if (!cur->active_spin_lock) {
3178 			verbose(env, "bpf_spin_unlock without taking a lock\n");
3179 			return -EINVAL;
3180 		}
3181 		if (cur->active_spin_lock != reg->id) {
3182 			verbose(env, "bpf_spin_unlock of different lock\n");
3183 			return -EINVAL;
3184 		}
3185 		cur->active_spin_lock = 0;
3186 	}
3187 	return 0;
3188 }
3189 
3190 static bool arg_type_is_mem_ptr(enum bpf_arg_type type)
3191 {
3192 	return type == ARG_PTR_TO_MEM ||
3193 	       type == ARG_PTR_TO_MEM_OR_NULL ||
3194 	       type == ARG_PTR_TO_UNINIT_MEM;
3195 }
3196 
3197 static bool arg_type_is_mem_size(enum bpf_arg_type type)
3198 {
3199 	return type == ARG_CONST_SIZE ||
3200 	       type == ARG_CONST_SIZE_OR_ZERO;
3201 }
3202 
3203 static bool arg_type_is_int_ptr(enum bpf_arg_type type)
3204 {
3205 	return type == ARG_PTR_TO_INT ||
3206 	       type == ARG_PTR_TO_LONG;
3207 }
3208 
3209 static int int_ptr_type_to_size(enum bpf_arg_type type)
3210 {
3211 	if (type == ARG_PTR_TO_INT)
3212 		return sizeof(u32);
3213 	else if (type == ARG_PTR_TO_LONG)
3214 		return sizeof(u64);
3215 
3216 	return -EINVAL;
3217 }
3218 
3219 static int check_func_arg(struct bpf_verifier_env *env, u32 regno,
3220 			  enum bpf_arg_type arg_type,
3221 			  struct bpf_call_arg_meta *meta)
3222 {
3223 	struct bpf_reg_state *regs = cur_regs(env), *reg = &regs[regno];
3224 	enum bpf_reg_type expected_type, type = reg->type;
3225 	int err = 0;
3226 
3227 	if (arg_type == ARG_DONTCARE)
3228 		return 0;
3229 
3230 	err = check_reg_arg(env, regno, SRC_OP);
3231 	if (err)
3232 		return err;
3233 
3234 	if (arg_type == ARG_ANYTHING) {
3235 		if (is_pointer_value(env, regno)) {
3236 			verbose(env, "R%d leaks addr into helper function\n",
3237 				regno);
3238 			return -EACCES;
3239 		}
3240 		return 0;
3241 	}
3242 
3243 	if (type_is_pkt_pointer(type) &&
3244 	    !may_access_direct_pkt_data(env, meta, BPF_READ)) {
3245 		verbose(env, "helper access to the packet is not allowed\n");
3246 		return -EACCES;
3247 	}
3248 
3249 	if (arg_type == ARG_PTR_TO_MAP_KEY ||
3250 	    arg_type == ARG_PTR_TO_MAP_VALUE ||
3251 	    arg_type == ARG_PTR_TO_UNINIT_MAP_VALUE ||
3252 	    arg_type == ARG_PTR_TO_MAP_VALUE_OR_NULL) {
3253 		expected_type = PTR_TO_STACK;
3254 		if (register_is_null(reg) &&
3255 		    arg_type == ARG_PTR_TO_MAP_VALUE_OR_NULL)
3256 			/* final test in check_stack_boundary() */;
3257 		else if (!type_is_pkt_pointer(type) &&
3258 			 type != PTR_TO_MAP_VALUE &&
3259 			 type != expected_type)
3260 			goto err_type;
3261 	} else if (arg_type == ARG_CONST_SIZE ||
3262 		   arg_type == ARG_CONST_SIZE_OR_ZERO) {
3263 		expected_type = SCALAR_VALUE;
3264 		if (type != expected_type)
3265 			goto err_type;
3266 	} else if (arg_type == ARG_CONST_MAP_PTR) {
3267 		expected_type = CONST_PTR_TO_MAP;
3268 		if (type != expected_type)
3269 			goto err_type;
3270 	} else if (arg_type == ARG_PTR_TO_CTX) {
3271 		expected_type = PTR_TO_CTX;
3272 		if (type != expected_type)
3273 			goto err_type;
3274 		err = check_ctx_reg(env, reg, regno);
3275 		if (err < 0)
3276 			return err;
3277 	} else if (arg_type == ARG_PTR_TO_SOCK_COMMON) {
3278 		expected_type = PTR_TO_SOCK_COMMON;
3279 		/* Any sk pointer can be ARG_PTR_TO_SOCK_COMMON */
3280 		if (!type_is_sk_pointer(type))
3281 			goto err_type;
3282 		if (reg->ref_obj_id) {
3283 			if (meta->ref_obj_id) {
3284 				verbose(env, "verifier internal error: more than one arg with ref_obj_id R%d %u %u\n",
3285 					regno, reg->ref_obj_id,
3286 					meta->ref_obj_id);
3287 				return -EFAULT;
3288 			}
3289 			meta->ref_obj_id = reg->ref_obj_id;
3290 		}
3291 	} else if (arg_type == ARG_PTR_TO_SOCKET) {
3292 		expected_type = PTR_TO_SOCKET;
3293 		if (type != expected_type)
3294 			goto err_type;
3295 	} else if (arg_type == ARG_PTR_TO_SPIN_LOCK) {
3296 		if (meta->func_id == BPF_FUNC_spin_lock) {
3297 			if (process_spin_lock(env, regno, true))
3298 				return -EACCES;
3299 		} else if (meta->func_id == BPF_FUNC_spin_unlock) {
3300 			if (process_spin_lock(env, regno, false))
3301 				return -EACCES;
3302 		} else {
3303 			verbose(env, "verifier internal error\n");
3304 			return -EFAULT;
3305 		}
3306 	} else if (arg_type_is_mem_ptr(arg_type)) {
3307 		expected_type = PTR_TO_STACK;
3308 		/* One exception here. In case function allows for NULL to be
3309 		 * passed in as argument, it's a SCALAR_VALUE type. Final test
3310 		 * happens during stack boundary checking.
3311 		 */
3312 		if (register_is_null(reg) &&
3313 		    arg_type == ARG_PTR_TO_MEM_OR_NULL)
3314 			/* final test in check_stack_boundary() */;
3315 		else if (!type_is_pkt_pointer(type) &&
3316 			 type != PTR_TO_MAP_VALUE &&
3317 			 type != expected_type)
3318 			goto err_type;
3319 		meta->raw_mode = arg_type == ARG_PTR_TO_UNINIT_MEM;
3320 	} else if (arg_type_is_int_ptr(arg_type)) {
3321 		expected_type = PTR_TO_STACK;
3322 		if (!type_is_pkt_pointer(type) &&
3323 		    type != PTR_TO_MAP_VALUE &&
3324 		    type != expected_type)
3325 			goto err_type;
3326 	} else {
3327 		verbose(env, "unsupported arg_type %d\n", arg_type);
3328 		return -EFAULT;
3329 	}
3330 
3331 	if (arg_type == ARG_CONST_MAP_PTR) {
3332 		/* bpf_map_xxx(map_ptr) call: remember that map_ptr */
3333 		meta->map_ptr = reg->map_ptr;
3334 	} else if (arg_type == ARG_PTR_TO_MAP_KEY) {
3335 		/* bpf_map_xxx(..., map_ptr, ..., key) call:
3336 		 * check that [key, key + map->key_size) are within
3337 		 * stack limits and initialized
3338 		 */
3339 		if (!meta->map_ptr) {
3340 			/* in function declaration map_ptr must come before
3341 			 * map_key, so that it's verified and known before
3342 			 * we have to check map_key here. Otherwise it means
3343 			 * that kernel subsystem misconfigured verifier
3344 			 */
3345 			verbose(env, "invalid map_ptr to access map->key\n");
3346 			return -EACCES;
3347 		}
3348 		err = check_helper_mem_access(env, regno,
3349 					      meta->map_ptr->key_size, false,
3350 					      NULL);
3351 	} else if (arg_type == ARG_PTR_TO_MAP_VALUE ||
3352 		   (arg_type == ARG_PTR_TO_MAP_VALUE_OR_NULL &&
3353 		    !register_is_null(reg)) ||
3354 		   arg_type == ARG_PTR_TO_UNINIT_MAP_VALUE) {
3355 		/* bpf_map_xxx(..., map_ptr, ..., value) call:
3356 		 * check [value, value + map->value_size) validity
3357 		 */
3358 		if (!meta->map_ptr) {
3359 			/* kernel subsystem misconfigured verifier */
3360 			verbose(env, "invalid map_ptr to access map->value\n");
3361 			return -EACCES;
3362 		}
3363 		meta->raw_mode = (arg_type == ARG_PTR_TO_UNINIT_MAP_VALUE);
3364 		err = check_helper_mem_access(env, regno,
3365 					      meta->map_ptr->value_size, false,
3366 					      meta);
3367 	} else if (arg_type_is_mem_size(arg_type)) {
3368 		bool zero_size_allowed = (arg_type == ARG_CONST_SIZE_OR_ZERO);
3369 
3370 		/* remember the mem_size which may be used later
3371 		 * to refine return values.
3372 		 */
3373 		meta->msize_smax_value = reg->smax_value;
3374 		meta->msize_umax_value = reg->umax_value;
3375 
3376 		/* The register is SCALAR_VALUE; the access check
3377 		 * happens using its boundaries.
3378 		 */
3379 		if (!tnum_is_const(reg->var_off))
3380 			/* For unprivileged variable accesses, disable raw
3381 			 * mode so that the program is required to
3382 			 * initialize all the memory that the helper could
3383 			 * just partially fill up.
3384 			 */
3385 			meta = NULL;
3386 
3387 		if (reg->smin_value < 0) {
3388 			verbose(env, "R%d min value is negative, either use unsigned or 'var &= const'\n",
3389 				regno);
3390 			return -EACCES;
3391 		}
3392 
3393 		if (reg->umin_value == 0) {
3394 			err = check_helper_mem_access(env, regno - 1, 0,
3395 						      zero_size_allowed,
3396 						      meta);
3397 			if (err)
3398 				return err;
3399 		}
3400 
3401 		if (reg->umax_value >= BPF_MAX_VAR_SIZ) {
3402 			verbose(env, "R%d unbounded memory access, use 'var &= const' or 'if (var < const)'\n",
3403 				regno);
3404 			return -EACCES;
3405 		}
3406 		err = check_helper_mem_access(env, regno - 1,
3407 					      reg->umax_value,
3408 					      zero_size_allowed, meta);
3409 		if (!err)
3410 			err = mark_chain_precision(env, regno);
3411 	} else if (arg_type_is_int_ptr(arg_type)) {
3412 		int size = int_ptr_type_to_size(arg_type);
3413 
3414 		err = check_helper_mem_access(env, regno, size, false, meta);
3415 		if (err)
3416 			return err;
3417 		err = check_ptr_alignment(env, reg, 0, size, true);
3418 	}
3419 
3420 	return err;
3421 err_type:
3422 	verbose(env, "R%d type=%s expected=%s\n", regno,
3423 		reg_type_str[type], reg_type_str[expected_type]);
3424 	return -EACCES;
3425 }
3426 
3427 static int check_map_func_compatibility(struct bpf_verifier_env *env,
3428 					struct bpf_map *map, int func_id)
3429 {
3430 	if (!map)
3431 		return 0;
3432 
3433 	/* We need a two way check, first is from map perspective ... */
3434 	switch (map->map_type) {
3435 	case BPF_MAP_TYPE_PROG_ARRAY:
3436 		if (func_id != BPF_FUNC_tail_call)
3437 			goto error;
3438 		break;
3439 	case BPF_MAP_TYPE_PERF_EVENT_ARRAY:
3440 		if (func_id != BPF_FUNC_perf_event_read &&
3441 		    func_id != BPF_FUNC_perf_event_output &&
3442 		    func_id != BPF_FUNC_perf_event_read_value)
3443 			goto error;
3444 		break;
3445 	case BPF_MAP_TYPE_STACK_TRACE:
3446 		if (func_id != BPF_FUNC_get_stackid)
3447 			goto error;
3448 		break;
3449 	case BPF_MAP_TYPE_CGROUP_ARRAY:
3450 		if (func_id != BPF_FUNC_skb_under_cgroup &&
3451 		    func_id != BPF_FUNC_current_task_under_cgroup)
3452 			goto error;
3453 		break;
3454 	case BPF_MAP_TYPE_CGROUP_STORAGE:
3455 	case BPF_MAP_TYPE_PERCPU_CGROUP_STORAGE:
3456 		if (func_id != BPF_FUNC_get_local_storage)
3457 			goto error;
3458 		break;
3459 	case BPF_MAP_TYPE_DEVMAP:
3460 	case BPF_MAP_TYPE_DEVMAP_HASH:
3461 		if (func_id != BPF_FUNC_redirect_map &&
3462 		    func_id != BPF_FUNC_map_lookup_elem)
3463 			goto error;
3464 		break;
3465 	/* Restrict bpf side of cpumap and xskmap, open when use-cases
3466 	 * appear.
3467 	 */
3468 	case BPF_MAP_TYPE_CPUMAP:
3469 		if (func_id != BPF_FUNC_redirect_map)
3470 			goto error;
3471 		break;
3472 	case BPF_MAP_TYPE_XSKMAP:
3473 		if (func_id != BPF_FUNC_redirect_map &&
3474 		    func_id != BPF_FUNC_map_lookup_elem)
3475 			goto error;
3476 		break;
3477 	case BPF_MAP_TYPE_ARRAY_OF_MAPS:
3478 	case BPF_MAP_TYPE_HASH_OF_MAPS:
3479 		if (func_id != BPF_FUNC_map_lookup_elem)
3480 			goto error;
3481 		break;
3482 	case BPF_MAP_TYPE_SOCKMAP:
3483 		if (func_id != BPF_FUNC_sk_redirect_map &&
3484 		    func_id != BPF_FUNC_sock_map_update &&
3485 		    func_id != BPF_FUNC_map_delete_elem &&
3486 		    func_id != BPF_FUNC_msg_redirect_map)
3487 			goto error;
3488 		break;
3489 	case BPF_MAP_TYPE_SOCKHASH:
3490 		if (func_id != BPF_FUNC_sk_redirect_hash &&
3491 		    func_id != BPF_FUNC_sock_hash_update &&
3492 		    func_id != BPF_FUNC_map_delete_elem &&
3493 		    func_id != BPF_FUNC_msg_redirect_hash)
3494 			goto error;
3495 		break;
3496 	case BPF_MAP_TYPE_REUSEPORT_SOCKARRAY:
3497 		if (func_id != BPF_FUNC_sk_select_reuseport)
3498 			goto error;
3499 		break;
3500 	case BPF_MAP_TYPE_QUEUE:
3501 	case BPF_MAP_TYPE_STACK:
3502 		if (func_id != BPF_FUNC_map_peek_elem &&
3503 		    func_id != BPF_FUNC_map_pop_elem &&
3504 		    func_id != BPF_FUNC_map_push_elem)
3505 			goto error;
3506 		break;
3507 	case BPF_MAP_TYPE_SK_STORAGE:
3508 		if (func_id != BPF_FUNC_sk_storage_get &&
3509 		    func_id != BPF_FUNC_sk_storage_delete)
3510 			goto error;
3511 		break;
3512 	default:
3513 		break;
3514 	}
3515 
3516 	/* ... and second from the function itself. */
3517 	switch (func_id) {
3518 	case BPF_FUNC_tail_call:
3519 		if (map->map_type != BPF_MAP_TYPE_PROG_ARRAY)
3520 			goto error;
3521 		if (env->subprog_cnt > 1) {
3522 			verbose(env, "tail_calls are not allowed in programs with bpf-to-bpf calls\n");
3523 			return -EINVAL;
3524 		}
3525 		break;
3526 	case BPF_FUNC_perf_event_read:
3527 	case BPF_FUNC_perf_event_output:
3528 	case BPF_FUNC_perf_event_read_value:
3529 		if (map->map_type != BPF_MAP_TYPE_PERF_EVENT_ARRAY)
3530 			goto error;
3531 		break;
3532 	case BPF_FUNC_get_stackid:
3533 		if (map->map_type != BPF_MAP_TYPE_STACK_TRACE)
3534 			goto error;
3535 		break;
3536 	case BPF_FUNC_current_task_under_cgroup:
3537 	case BPF_FUNC_skb_under_cgroup:
3538 		if (map->map_type != BPF_MAP_TYPE_CGROUP_ARRAY)
3539 			goto error;
3540 		break;
3541 	case BPF_FUNC_redirect_map:
3542 		if (map->map_type != BPF_MAP_TYPE_DEVMAP &&
3543 		    map->map_type != BPF_MAP_TYPE_DEVMAP_HASH &&
3544 		    map->map_type != BPF_MAP_TYPE_CPUMAP &&
3545 		    map->map_type != BPF_MAP_TYPE_XSKMAP)
3546 			goto error;
3547 		break;
3548 	case BPF_FUNC_sk_redirect_map:
3549 	case BPF_FUNC_msg_redirect_map:
3550 	case BPF_FUNC_sock_map_update:
3551 		if (map->map_type != BPF_MAP_TYPE_SOCKMAP)
3552 			goto error;
3553 		break;
3554 	case BPF_FUNC_sk_redirect_hash:
3555 	case BPF_FUNC_msg_redirect_hash:
3556 	case BPF_FUNC_sock_hash_update:
3557 		if (map->map_type != BPF_MAP_TYPE_SOCKHASH)
3558 			goto error;
3559 		break;
3560 	case BPF_FUNC_get_local_storage:
3561 		if (map->map_type != BPF_MAP_TYPE_CGROUP_STORAGE &&
3562 		    map->map_type != BPF_MAP_TYPE_PERCPU_CGROUP_STORAGE)
3563 			goto error;
3564 		break;
3565 	case BPF_FUNC_sk_select_reuseport:
3566 		if (map->map_type != BPF_MAP_TYPE_REUSEPORT_SOCKARRAY)
3567 			goto error;
3568 		break;
3569 	case BPF_FUNC_map_peek_elem:
3570 	case BPF_FUNC_map_pop_elem:
3571 	case BPF_FUNC_map_push_elem:
3572 		if (map->map_type != BPF_MAP_TYPE_QUEUE &&
3573 		    map->map_type != BPF_MAP_TYPE_STACK)
3574 			goto error;
3575 		break;
3576 	case BPF_FUNC_sk_storage_get:
3577 	case BPF_FUNC_sk_storage_delete:
3578 		if (map->map_type != BPF_MAP_TYPE_SK_STORAGE)
3579 			goto error;
3580 		break;
3581 	default:
3582 		break;
3583 	}
3584 
3585 	return 0;
3586 error:
3587 	verbose(env, "cannot pass map_type %d into func %s#%d\n",
3588 		map->map_type, func_id_name(func_id), func_id);
3589 	return -EINVAL;
3590 }
3591 
3592 static bool check_raw_mode_ok(const struct bpf_func_proto *fn)
3593 {
3594 	int count = 0;
3595 
3596 	if (fn->arg1_type == ARG_PTR_TO_UNINIT_MEM)
3597 		count++;
3598 	if (fn->arg2_type == ARG_PTR_TO_UNINIT_MEM)
3599 		count++;
3600 	if (fn->arg3_type == ARG_PTR_TO_UNINIT_MEM)
3601 		count++;
3602 	if (fn->arg4_type == ARG_PTR_TO_UNINIT_MEM)
3603 		count++;
3604 	if (fn->arg5_type == ARG_PTR_TO_UNINIT_MEM)
3605 		count++;
3606 
3607 	/* We only support one arg being in raw mode at the moment,
3608 	 * which is sufficient for the helper functions we have
3609 	 * right now.
3610 	 */
3611 	return count <= 1;
3612 }
3613 
3614 static bool check_args_pair_invalid(enum bpf_arg_type arg_curr,
3615 				    enum bpf_arg_type arg_next)
3616 {
3617 	return (arg_type_is_mem_ptr(arg_curr) &&
3618 	        !arg_type_is_mem_size(arg_next)) ||
3619 	       (!arg_type_is_mem_ptr(arg_curr) &&
3620 		arg_type_is_mem_size(arg_next));
3621 }
3622 
3623 static bool check_arg_pair_ok(const struct bpf_func_proto *fn)
3624 {
3625 	/* bpf_xxx(..., buf, len) call will access 'len'
3626 	 * bytes from memory 'buf'. Both arg types need
3627 	 * to be paired, so make sure there's no buggy
3628 	 * helper function specification.
3629 	 */
3630 	if (arg_type_is_mem_size(fn->arg1_type) ||
3631 	    arg_type_is_mem_ptr(fn->arg5_type)  ||
3632 	    check_args_pair_invalid(fn->arg1_type, fn->arg2_type) ||
3633 	    check_args_pair_invalid(fn->arg2_type, fn->arg3_type) ||
3634 	    check_args_pair_invalid(fn->arg3_type, fn->arg4_type) ||
3635 	    check_args_pair_invalid(fn->arg4_type, fn->arg5_type))
3636 		return false;
3637 
3638 	return true;
3639 }
3640 
3641 static bool check_refcount_ok(const struct bpf_func_proto *fn, int func_id)
3642 {
3643 	int count = 0;
3644 
3645 	if (arg_type_may_be_refcounted(fn->arg1_type))
3646 		count++;
3647 	if (arg_type_may_be_refcounted(fn->arg2_type))
3648 		count++;
3649 	if (arg_type_may_be_refcounted(fn->arg3_type))
3650 		count++;
3651 	if (arg_type_may_be_refcounted(fn->arg4_type))
3652 		count++;
3653 	if (arg_type_may_be_refcounted(fn->arg5_type))
3654 		count++;
3655 
3656 	/* A reference acquiring function cannot acquire
3657 	 * another refcounted ptr.
3658 	 */
3659 	if (is_acquire_function(func_id) && count)
3660 		return false;
3661 
3662 	/* We only support one arg being unreferenced at the moment,
3663 	 * which is sufficient for the helper functions we have right now.
3664 	 */
3665 	return count <= 1;
3666 }
3667 
3668 static int check_func_proto(const struct bpf_func_proto *fn, int func_id)
3669 {
3670 	return check_raw_mode_ok(fn) &&
3671 	       check_arg_pair_ok(fn) &&
3672 	       check_refcount_ok(fn, func_id) ? 0 : -EINVAL;
3673 }
3674 
3675 /* Packet data might have moved, any old PTR_TO_PACKET[_META,_END]
3676  * are now invalid, so turn them into unknown SCALAR_VALUE.
3677  */
3678 static void __clear_all_pkt_pointers(struct bpf_verifier_env *env,
3679 				     struct bpf_func_state *state)
3680 {
3681 	struct bpf_reg_state *regs = state->regs, *reg;
3682 	int i;
3683 
3684 	for (i = 0; i < MAX_BPF_REG; i++)
3685 		if (reg_is_pkt_pointer_any(&regs[i]))
3686 			mark_reg_unknown(env, regs, i);
3687 
3688 	bpf_for_each_spilled_reg(i, state, reg) {
3689 		if (!reg)
3690 			continue;
3691 		if (reg_is_pkt_pointer_any(reg))
3692 			__mark_reg_unknown(reg);
3693 	}
3694 }
3695 
3696 static void clear_all_pkt_pointers(struct bpf_verifier_env *env)
3697 {
3698 	struct bpf_verifier_state *vstate = env->cur_state;
3699 	int i;
3700 
3701 	for (i = 0; i <= vstate->curframe; i++)
3702 		__clear_all_pkt_pointers(env, vstate->frame[i]);
3703 }
3704 
3705 static void release_reg_references(struct bpf_verifier_env *env,
3706 				   struct bpf_func_state *state,
3707 				   int ref_obj_id)
3708 {
3709 	struct bpf_reg_state *regs = state->regs, *reg;
3710 	int i;
3711 
3712 	for (i = 0; i < MAX_BPF_REG; i++)
3713 		if (regs[i].ref_obj_id == ref_obj_id)
3714 			mark_reg_unknown(env, regs, i);
3715 
3716 	bpf_for_each_spilled_reg(i, state, reg) {
3717 		if (!reg)
3718 			continue;
3719 		if (reg->ref_obj_id == ref_obj_id)
3720 			__mark_reg_unknown(reg);
3721 	}
3722 }
3723 
3724 /* The pointer with the specified id has released its reference to kernel
3725  * resources. Identify all copies of the same pointer and clear the reference.
3726  */
3727 static int release_reference(struct bpf_verifier_env *env,
3728 			     int ref_obj_id)
3729 {
3730 	struct bpf_verifier_state *vstate = env->cur_state;
3731 	int err;
3732 	int i;
3733 
3734 	err = release_reference_state(cur_func(env), ref_obj_id);
3735 	if (err)
3736 		return err;
3737 
3738 	for (i = 0; i <= vstate->curframe; i++)
3739 		release_reg_references(env, vstate->frame[i], ref_obj_id);
3740 
3741 	return 0;
3742 }
3743 
3744 static int check_func_call(struct bpf_verifier_env *env, struct bpf_insn *insn,
3745 			   int *insn_idx)
3746 {
3747 	struct bpf_verifier_state *state = env->cur_state;
3748 	struct bpf_func_state *caller, *callee;
3749 	int i, err, subprog, target_insn;
3750 
3751 	if (state->curframe + 1 >= MAX_CALL_FRAMES) {
3752 		verbose(env, "the call stack of %d frames is too deep\n",
3753 			state->curframe + 2);
3754 		return -E2BIG;
3755 	}
3756 
3757 	target_insn = *insn_idx + insn->imm;
3758 	subprog = find_subprog(env, target_insn + 1);
3759 	if (subprog < 0) {
3760 		verbose(env, "verifier bug. No program starts at insn %d\n",
3761 			target_insn + 1);
3762 		return -EFAULT;
3763 	}
3764 
3765 	caller = state->frame[state->curframe];
3766 	if (state->frame[state->curframe + 1]) {
3767 		verbose(env, "verifier bug. Frame %d already allocated\n",
3768 			state->curframe + 1);
3769 		return -EFAULT;
3770 	}
3771 
3772 	callee = kzalloc(sizeof(*callee), GFP_KERNEL);
3773 	if (!callee)
3774 		return -ENOMEM;
3775 	state->frame[state->curframe + 1] = callee;
3776 
3777 	/* callee cannot access r0, r6 - r9 for reading and has to write
3778 	 * into its own stack before reading from it.
3779 	 * callee can read/write into caller's stack
3780 	 */
3781 	init_func_state(env, callee,
3782 			/* remember the callsite, it will be used by bpf_exit */
3783 			*insn_idx /* callsite */,
3784 			state->curframe + 1 /* frameno within this callchain */,
3785 			subprog /* subprog number within this prog */);
3786 
3787 	/* Transfer references to the callee */
3788 	err = transfer_reference_state(callee, caller);
3789 	if (err)
3790 		return err;
3791 
3792 	/* copy r1 - r5 args that callee can access.  The copy includes parent
3793 	 * pointers, which connects us up to the liveness chain
3794 	 */
3795 	for (i = BPF_REG_1; i <= BPF_REG_5; i++)
3796 		callee->regs[i] = caller->regs[i];
3797 
3798 	/* after the call registers r0 - r5 were scratched */
3799 	for (i = 0; i < CALLER_SAVED_REGS; i++) {
3800 		mark_reg_not_init(env, caller->regs, caller_saved[i]);
3801 		check_reg_arg(env, caller_saved[i], DST_OP_NO_MARK);
3802 	}
3803 
3804 	/* only increment it after check_reg_arg() finished */
3805 	state->curframe++;
3806 
3807 	/* and go analyze first insn of the callee */
3808 	*insn_idx = target_insn;
3809 
3810 	if (env->log.level & BPF_LOG_LEVEL) {
3811 		verbose(env, "caller:\n");
3812 		print_verifier_state(env, caller);
3813 		verbose(env, "callee:\n");
3814 		print_verifier_state(env, callee);
3815 	}
3816 	return 0;
3817 }
3818 
3819 static int prepare_func_exit(struct bpf_verifier_env *env, int *insn_idx)
3820 {
3821 	struct bpf_verifier_state *state = env->cur_state;
3822 	struct bpf_func_state *caller, *callee;
3823 	struct bpf_reg_state *r0;
3824 	int err;
3825 
3826 	callee = state->frame[state->curframe];
3827 	r0 = &callee->regs[BPF_REG_0];
3828 	if (r0->type == PTR_TO_STACK) {
3829 		/* technically it's ok to return caller's stack pointer
3830 		 * (or caller's caller's pointer) back to the caller,
3831 		 * since these pointers are valid. Only current stack
3832 		 * pointer will be invalid as soon as function exits,
3833 		 * but let's be conservative
3834 		 */
3835 		verbose(env, "cannot return stack pointer to the caller\n");
3836 		return -EINVAL;
3837 	}
3838 
3839 	state->curframe--;
3840 	caller = state->frame[state->curframe];
3841 	/* return to the caller whatever r0 had in the callee */
3842 	caller->regs[BPF_REG_0] = *r0;
3843 
3844 	/* Transfer references to the caller */
3845 	err = transfer_reference_state(caller, callee);
3846 	if (err)
3847 		return err;
3848 
3849 	*insn_idx = callee->callsite + 1;
3850 	if (env->log.level & BPF_LOG_LEVEL) {
3851 		verbose(env, "returning from callee:\n");
3852 		print_verifier_state(env, callee);
3853 		verbose(env, "to caller at %d:\n", *insn_idx);
3854 		print_verifier_state(env, caller);
3855 	}
3856 	/* clear everything in the callee */
3857 	free_func_state(callee);
3858 	state->frame[state->curframe + 1] = NULL;
3859 	return 0;
3860 }
3861 
3862 static void do_refine_retval_range(struct bpf_reg_state *regs, int ret_type,
3863 				   int func_id,
3864 				   struct bpf_call_arg_meta *meta)
3865 {
3866 	struct bpf_reg_state *ret_reg = &regs[BPF_REG_0];
3867 
3868 	if (ret_type != RET_INTEGER ||
3869 	    (func_id != BPF_FUNC_get_stack &&
3870 	     func_id != BPF_FUNC_probe_read_str))
3871 		return;
3872 
3873 	ret_reg->smax_value = meta->msize_smax_value;
3874 	ret_reg->umax_value = meta->msize_umax_value;
3875 	__reg_deduce_bounds(ret_reg);
3876 	__reg_bound_offset(ret_reg);
3877 }
3878 
3879 static int
3880 record_func_map(struct bpf_verifier_env *env, struct bpf_call_arg_meta *meta,
3881 		int func_id, int insn_idx)
3882 {
3883 	struct bpf_insn_aux_data *aux = &env->insn_aux_data[insn_idx];
3884 	struct bpf_map *map = meta->map_ptr;
3885 
3886 	if (func_id != BPF_FUNC_tail_call &&
3887 	    func_id != BPF_FUNC_map_lookup_elem &&
3888 	    func_id != BPF_FUNC_map_update_elem &&
3889 	    func_id != BPF_FUNC_map_delete_elem &&
3890 	    func_id != BPF_FUNC_map_push_elem &&
3891 	    func_id != BPF_FUNC_map_pop_elem &&
3892 	    func_id != BPF_FUNC_map_peek_elem)
3893 		return 0;
3894 
3895 	if (map == NULL) {
3896 		verbose(env, "kernel subsystem misconfigured verifier\n");
3897 		return -EINVAL;
3898 	}
3899 
3900 	/* In case of read-only, some additional restrictions
3901 	 * need to be applied in order to prevent altering the
3902 	 * state of the map from program side.
3903 	 */
3904 	if ((map->map_flags & BPF_F_RDONLY_PROG) &&
3905 	    (func_id == BPF_FUNC_map_delete_elem ||
3906 	     func_id == BPF_FUNC_map_update_elem ||
3907 	     func_id == BPF_FUNC_map_push_elem ||
3908 	     func_id == BPF_FUNC_map_pop_elem)) {
3909 		verbose(env, "write into map forbidden\n");
3910 		return -EACCES;
3911 	}
3912 
3913 	if (!BPF_MAP_PTR(aux->map_state))
3914 		bpf_map_ptr_store(aux, meta->map_ptr,
3915 				  meta->map_ptr->unpriv_array);
3916 	else if (BPF_MAP_PTR(aux->map_state) != meta->map_ptr)
3917 		bpf_map_ptr_store(aux, BPF_MAP_PTR_POISON,
3918 				  meta->map_ptr->unpriv_array);
3919 	return 0;
3920 }
3921 
3922 static int check_reference_leak(struct bpf_verifier_env *env)
3923 {
3924 	struct bpf_func_state *state = cur_func(env);
3925 	int i;
3926 
3927 	for (i = 0; i < state->acquired_refs; i++) {
3928 		verbose(env, "Unreleased reference id=%d alloc_insn=%d\n",
3929 			state->refs[i].id, state->refs[i].insn_idx);
3930 	}
3931 	return state->acquired_refs ? -EINVAL : 0;
3932 }
3933 
3934 static int check_helper_call(struct bpf_verifier_env *env, int func_id, int insn_idx)
3935 {
3936 	const struct bpf_func_proto *fn = NULL;
3937 	struct bpf_reg_state *regs;
3938 	struct bpf_call_arg_meta meta;
3939 	bool changes_data;
3940 	int i, err;
3941 
3942 	/* find function prototype */
3943 	if (func_id < 0 || func_id >= __BPF_FUNC_MAX_ID) {
3944 		verbose(env, "invalid func %s#%d\n", func_id_name(func_id),
3945 			func_id);
3946 		return -EINVAL;
3947 	}
3948 
3949 	if (env->ops->get_func_proto)
3950 		fn = env->ops->get_func_proto(func_id, env->prog);
3951 	if (!fn) {
3952 		verbose(env, "unknown func %s#%d\n", func_id_name(func_id),
3953 			func_id);
3954 		return -EINVAL;
3955 	}
3956 
3957 	/* eBPF programs must be GPL compatible to use GPL-ed functions */
3958 	if (!env->prog->gpl_compatible && fn->gpl_only) {
3959 		verbose(env, "cannot call GPL-restricted function from non-GPL compatible program\n");
3960 		return -EINVAL;
3961 	}
3962 
3963 	/* With LD_ABS/IND some JITs save/restore skb from r1. */
3964 	changes_data = bpf_helper_changes_pkt_data(fn->func);
3965 	if (changes_data && fn->arg1_type != ARG_PTR_TO_CTX) {
3966 		verbose(env, "kernel subsystem misconfigured func %s#%d: r1 != ctx\n",
3967 			func_id_name(func_id), func_id);
3968 		return -EINVAL;
3969 	}
3970 
3971 	memset(&meta, 0, sizeof(meta));
3972 	meta.pkt_access = fn->pkt_access;
3973 
3974 	err = check_func_proto(fn, func_id);
3975 	if (err) {
3976 		verbose(env, "kernel subsystem misconfigured func %s#%d\n",
3977 			func_id_name(func_id), func_id);
3978 		return err;
3979 	}
3980 
3981 	meta.func_id = func_id;
3982 	/* check args */
3983 	err = check_func_arg(env, BPF_REG_1, fn->arg1_type, &meta);
3984 	if (err)
3985 		return err;
3986 	err = check_func_arg(env, BPF_REG_2, fn->arg2_type, &meta);
3987 	if (err)
3988 		return err;
3989 	err = check_func_arg(env, BPF_REG_3, fn->arg3_type, &meta);
3990 	if (err)
3991 		return err;
3992 	err = check_func_arg(env, BPF_REG_4, fn->arg4_type, &meta);
3993 	if (err)
3994 		return err;
3995 	err = check_func_arg(env, BPF_REG_5, fn->arg5_type, &meta);
3996 	if (err)
3997 		return err;
3998 
3999 	err = record_func_map(env, &meta, func_id, insn_idx);
4000 	if (err)
4001 		return err;
4002 
4003 	/* Mark slots with STACK_MISC in case of raw mode, stack offset
4004 	 * is inferred from register state.
4005 	 */
4006 	for (i = 0; i < meta.access_size; i++) {
4007 		err = check_mem_access(env, insn_idx, meta.regno, i, BPF_B,
4008 				       BPF_WRITE, -1, false);
4009 		if (err)
4010 			return err;
4011 	}
4012 
4013 	if (func_id == BPF_FUNC_tail_call) {
4014 		err = check_reference_leak(env);
4015 		if (err) {
4016 			verbose(env, "tail_call would lead to reference leak\n");
4017 			return err;
4018 		}
4019 	} else if (is_release_function(func_id)) {
4020 		err = release_reference(env, meta.ref_obj_id);
4021 		if (err) {
4022 			verbose(env, "func %s#%d reference has not been acquired before\n",
4023 				func_id_name(func_id), func_id);
4024 			return err;
4025 		}
4026 	}
4027 
4028 	regs = cur_regs(env);
4029 
4030 	/* check that flags argument in get_local_storage(map, flags) is 0,
4031 	 * this is required because get_local_storage() can't return an error.
4032 	 */
4033 	if (func_id == BPF_FUNC_get_local_storage &&
4034 	    !register_is_null(&regs[BPF_REG_2])) {
4035 		verbose(env, "get_local_storage() doesn't support non-zero flags\n");
4036 		return -EINVAL;
4037 	}
4038 
4039 	/* reset caller saved regs */
4040 	for (i = 0; i < CALLER_SAVED_REGS; i++) {
4041 		mark_reg_not_init(env, regs, caller_saved[i]);
4042 		check_reg_arg(env, caller_saved[i], DST_OP_NO_MARK);
4043 	}
4044 
4045 	/* helper call returns 64-bit value. */
4046 	regs[BPF_REG_0].subreg_def = DEF_NOT_SUBREG;
4047 
4048 	/* update return register (already marked as written above) */
4049 	if (fn->ret_type == RET_INTEGER) {
4050 		/* sets type to SCALAR_VALUE */
4051 		mark_reg_unknown(env, regs, BPF_REG_0);
4052 	} else if (fn->ret_type == RET_VOID) {
4053 		regs[BPF_REG_0].type = NOT_INIT;
4054 	} else if (fn->ret_type == RET_PTR_TO_MAP_VALUE_OR_NULL ||
4055 		   fn->ret_type == RET_PTR_TO_MAP_VALUE) {
4056 		/* There is no offset yet applied, variable or fixed */
4057 		mark_reg_known_zero(env, regs, BPF_REG_0);
4058 		/* remember map_ptr, so that check_map_access()
4059 		 * can check 'value_size' boundary of memory access
4060 		 * to map element returned from bpf_map_lookup_elem()
4061 		 */
4062 		if (meta.map_ptr == NULL) {
4063 			verbose(env,
4064 				"kernel subsystem misconfigured verifier\n");
4065 			return -EINVAL;
4066 		}
4067 		regs[BPF_REG_0].map_ptr = meta.map_ptr;
4068 		if (fn->ret_type == RET_PTR_TO_MAP_VALUE) {
4069 			regs[BPF_REG_0].type = PTR_TO_MAP_VALUE;
4070 			if (map_value_has_spin_lock(meta.map_ptr))
4071 				regs[BPF_REG_0].id = ++env->id_gen;
4072 		} else {
4073 			regs[BPF_REG_0].type = PTR_TO_MAP_VALUE_OR_NULL;
4074 			regs[BPF_REG_0].id = ++env->id_gen;
4075 		}
4076 	} else if (fn->ret_type == RET_PTR_TO_SOCKET_OR_NULL) {
4077 		mark_reg_known_zero(env, regs, BPF_REG_0);
4078 		regs[BPF_REG_0].type = PTR_TO_SOCKET_OR_NULL;
4079 		regs[BPF_REG_0].id = ++env->id_gen;
4080 	} else if (fn->ret_type == RET_PTR_TO_SOCK_COMMON_OR_NULL) {
4081 		mark_reg_known_zero(env, regs, BPF_REG_0);
4082 		regs[BPF_REG_0].type = PTR_TO_SOCK_COMMON_OR_NULL;
4083 		regs[BPF_REG_0].id = ++env->id_gen;
4084 	} else if (fn->ret_type == RET_PTR_TO_TCP_SOCK_OR_NULL) {
4085 		mark_reg_known_zero(env, regs, BPF_REG_0);
4086 		regs[BPF_REG_0].type = PTR_TO_TCP_SOCK_OR_NULL;
4087 		regs[BPF_REG_0].id = ++env->id_gen;
4088 	} else {
4089 		verbose(env, "unknown return type %d of func %s#%d\n",
4090 			fn->ret_type, func_id_name(func_id), func_id);
4091 		return -EINVAL;
4092 	}
4093 
4094 	if (is_ptr_cast_function(func_id)) {
4095 		/* For release_reference() */
4096 		regs[BPF_REG_0].ref_obj_id = meta.ref_obj_id;
4097 	} else if (is_acquire_function(func_id)) {
4098 		int id = acquire_reference_state(env, insn_idx);
4099 
4100 		if (id < 0)
4101 			return id;
4102 		/* For mark_ptr_or_null_reg() */
4103 		regs[BPF_REG_0].id = id;
4104 		/* For release_reference() */
4105 		regs[BPF_REG_0].ref_obj_id = id;
4106 	}
4107 
4108 	do_refine_retval_range(regs, fn->ret_type, func_id, &meta);
4109 
4110 	err = check_map_func_compatibility(env, meta.map_ptr, func_id);
4111 	if (err)
4112 		return err;
4113 
4114 	if (func_id == BPF_FUNC_get_stack && !env->prog->has_callchain_buf) {
4115 		const char *err_str;
4116 
4117 #ifdef CONFIG_PERF_EVENTS
4118 		err = get_callchain_buffers(sysctl_perf_event_max_stack);
4119 		err_str = "cannot get callchain buffer for func %s#%d\n";
4120 #else
4121 		err = -ENOTSUPP;
4122 		err_str = "func %s#%d not supported without CONFIG_PERF_EVENTS\n";
4123 #endif
4124 		if (err) {
4125 			verbose(env, err_str, func_id_name(func_id), func_id);
4126 			return err;
4127 		}
4128 
4129 		env->prog->has_callchain_buf = true;
4130 	}
4131 
4132 	if (changes_data)
4133 		clear_all_pkt_pointers(env);
4134 	return 0;
4135 }
4136 
4137 static bool signed_add_overflows(s64 a, s64 b)
4138 {
4139 	/* Do the add in u64, where overflow is well-defined */
4140 	s64 res = (s64)((u64)a + (u64)b);
4141 
4142 	if (b < 0)
4143 		return res > a;
4144 	return res < a;
4145 }
4146 
4147 static bool signed_sub_overflows(s64 a, s64 b)
4148 {
4149 	/* Do the sub in u64, where overflow is well-defined */
4150 	s64 res = (s64)((u64)a - (u64)b);
4151 
4152 	if (b < 0)
4153 		return res < a;
4154 	return res > a;
4155 }
4156 
4157 static bool check_reg_sane_offset(struct bpf_verifier_env *env,
4158 				  const struct bpf_reg_state *reg,
4159 				  enum bpf_reg_type type)
4160 {
4161 	bool known = tnum_is_const(reg->var_off);
4162 	s64 val = reg->var_off.value;
4163 	s64 smin = reg->smin_value;
4164 
4165 	if (known && (val >= BPF_MAX_VAR_OFF || val <= -BPF_MAX_VAR_OFF)) {
4166 		verbose(env, "math between %s pointer and %lld is not allowed\n",
4167 			reg_type_str[type], val);
4168 		return false;
4169 	}
4170 
4171 	if (reg->off >= BPF_MAX_VAR_OFF || reg->off <= -BPF_MAX_VAR_OFF) {
4172 		verbose(env, "%s pointer offset %d is not allowed\n",
4173 			reg_type_str[type], reg->off);
4174 		return false;
4175 	}
4176 
4177 	if (smin == S64_MIN) {
4178 		verbose(env, "math between %s pointer and register with unbounded min value is not allowed\n",
4179 			reg_type_str[type]);
4180 		return false;
4181 	}
4182 
4183 	if (smin >= BPF_MAX_VAR_OFF || smin <= -BPF_MAX_VAR_OFF) {
4184 		verbose(env, "value %lld makes %s pointer be out of bounds\n",
4185 			smin, reg_type_str[type]);
4186 		return false;
4187 	}
4188 
4189 	return true;
4190 }
4191 
4192 static struct bpf_insn_aux_data *cur_aux(struct bpf_verifier_env *env)
4193 {
4194 	return &env->insn_aux_data[env->insn_idx];
4195 }
4196 
4197 static int retrieve_ptr_limit(const struct bpf_reg_state *ptr_reg,
4198 			      u32 *ptr_limit, u8 opcode, bool off_is_neg)
4199 {
4200 	bool mask_to_left = (opcode == BPF_ADD &&  off_is_neg) ||
4201 			    (opcode == BPF_SUB && !off_is_neg);
4202 	u32 off;
4203 
4204 	switch (ptr_reg->type) {
4205 	case PTR_TO_STACK:
4206 		/* Indirect variable offset stack access is prohibited in
4207 		 * unprivileged mode so it's not handled here.
4208 		 */
4209 		off = ptr_reg->off + ptr_reg->var_off.value;
4210 		if (mask_to_left)
4211 			*ptr_limit = MAX_BPF_STACK + off;
4212 		else
4213 			*ptr_limit = -off;
4214 		return 0;
4215 	case PTR_TO_MAP_VALUE:
4216 		if (mask_to_left) {
4217 			*ptr_limit = ptr_reg->umax_value + ptr_reg->off;
4218 		} else {
4219 			off = ptr_reg->smin_value + ptr_reg->off;
4220 			*ptr_limit = ptr_reg->map_ptr->value_size - off;
4221 		}
4222 		return 0;
4223 	default:
4224 		return -EINVAL;
4225 	}
4226 }
4227 
4228 static bool can_skip_alu_sanitation(const struct bpf_verifier_env *env,
4229 				    const struct bpf_insn *insn)
4230 {
4231 	return env->allow_ptr_leaks || BPF_SRC(insn->code) == BPF_K;
4232 }
4233 
4234 static int update_alu_sanitation_state(struct bpf_insn_aux_data *aux,
4235 				       u32 alu_state, u32 alu_limit)
4236 {
4237 	/* If we arrived here from different branches with different
4238 	 * state or limits to sanitize, then this won't work.
4239 	 */
4240 	if (aux->alu_state &&
4241 	    (aux->alu_state != alu_state ||
4242 	     aux->alu_limit != alu_limit))
4243 		return -EACCES;
4244 
4245 	/* Corresponding fixup done in fixup_bpf_calls(). */
4246 	aux->alu_state = alu_state;
4247 	aux->alu_limit = alu_limit;
4248 	return 0;
4249 }
4250 
4251 static int sanitize_val_alu(struct bpf_verifier_env *env,
4252 			    struct bpf_insn *insn)
4253 {
4254 	struct bpf_insn_aux_data *aux = cur_aux(env);
4255 
4256 	if (can_skip_alu_sanitation(env, insn))
4257 		return 0;
4258 
4259 	return update_alu_sanitation_state(aux, BPF_ALU_NON_POINTER, 0);
4260 }
4261 
4262 static int sanitize_ptr_alu(struct bpf_verifier_env *env,
4263 			    struct bpf_insn *insn,
4264 			    const struct bpf_reg_state *ptr_reg,
4265 			    struct bpf_reg_state *dst_reg,
4266 			    bool off_is_neg)
4267 {
4268 	struct bpf_verifier_state *vstate = env->cur_state;
4269 	struct bpf_insn_aux_data *aux = cur_aux(env);
4270 	bool ptr_is_dst_reg = ptr_reg == dst_reg;
4271 	u8 opcode = BPF_OP(insn->code);
4272 	u32 alu_state, alu_limit;
4273 	struct bpf_reg_state tmp;
4274 	bool ret;
4275 
4276 	if (can_skip_alu_sanitation(env, insn))
4277 		return 0;
4278 
4279 	/* We already marked aux for masking from non-speculative
4280 	 * paths, thus we got here in the first place. We only care
4281 	 * to explore bad access from here.
4282 	 */
4283 	if (vstate->speculative)
4284 		goto do_sim;
4285 
4286 	alu_state  = off_is_neg ? BPF_ALU_NEG_VALUE : 0;
4287 	alu_state |= ptr_is_dst_reg ?
4288 		     BPF_ALU_SANITIZE_SRC : BPF_ALU_SANITIZE_DST;
4289 
4290 	if (retrieve_ptr_limit(ptr_reg, &alu_limit, opcode, off_is_neg))
4291 		return 0;
4292 	if (update_alu_sanitation_state(aux, alu_state, alu_limit))
4293 		return -EACCES;
4294 do_sim:
4295 	/* Simulate and find potential out-of-bounds access under
4296 	 * speculative execution from truncation as a result of
4297 	 * masking when off was not within expected range. If off
4298 	 * sits in dst, then we temporarily need to move ptr there
4299 	 * to simulate dst (== 0) +/-= ptr. Needed, for example,
4300 	 * for cases where we use K-based arithmetic in one direction
4301 	 * and truncated reg-based in the other in order to explore
4302 	 * bad access.
4303 	 */
4304 	if (!ptr_is_dst_reg) {
4305 		tmp = *dst_reg;
4306 		*dst_reg = *ptr_reg;
4307 	}
4308 	ret = push_stack(env, env->insn_idx + 1, env->insn_idx, true);
4309 	if (!ptr_is_dst_reg && ret)
4310 		*dst_reg = tmp;
4311 	return !ret ? -EFAULT : 0;
4312 }
4313 
4314 /* Handles arithmetic on a pointer and a scalar: computes new min/max and var_off.
4315  * Caller should also handle BPF_MOV case separately.
4316  * If we return -EACCES, caller may want to try again treating pointer as a
4317  * scalar.  So we only emit a diagnostic if !env->allow_ptr_leaks.
4318  */
4319 static int adjust_ptr_min_max_vals(struct bpf_verifier_env *env,
4320 				   struct bpf_insn *insn,
4321 				   const struct bpf_reg_state *ptr_reg,
4322 				   const struct bpf_reg_state *off_reg)
4323 {
4324 	struct bpf_verifier_state *vstate = env->cur_state;
4325 	struct bpf_func_state *state = vstate->frame[vstate->curframe];
4326 	struct bpf_reg_state *regs = state->regs, *dst_reg;
4327 	bool known = tnum_is_const(off_reg->var_off);
4328 	s64 smin_val = off_reg->smin_value, smax_val = off_reg->smax_value,
4329 	    smin_ptr = ptr_reg->smin_value, smax_ptr = ptr_reg->smax_value;
4330 	u64 umin_val = off_reg->umin_value, umax_val = off_reg->umax_value,
4331 	    umin_ptr = ptr_reg->umin_value, umax_ptr = ptr_reg->umax_value;
4332 	u32 dst = insn->dst_reg, src = insn->src_reg;
4333 	u8 opcode = BPF_OP(insn->code);
4334 	int ret;
4335 
4336 	dst_reg = &regs[dst];
4337 
4338 	if ((known && (smin_val != smax_val || umin_val != umax_val)) ||
4339 	    smin_val > smax_val || umin_val > umax_val) {
4340 		/* Taint dst register if offset had invalid bounds derived from
4341 		 * e.g. dead branches.
4342 		 */
4343 		__mark_reg_unknown(dst_reg);
4344 		return 0;
4345 	}
4346 
4347 	if (BPF_CLASS(insn->code) != BPF_ALU64) {
4348 		/* 32-bit ALU ops on pointers produce (meaningless) scalars */
4349 		verbose(env,
4350 			"R%d 32-bit pointer arithmetic prohibited\n",
4351 			dst);
4352 		return -EACCES;
4353 	}
4354 
4355 	switch (ptr_reg->type) {
4356 	case PTR_TO_MAP_VALUE_OR_NULL:
4357 		verbose(env, "R%d pointer arithmetic on %s prohibited, null-check it first\n",
4358 			dst, reg_type_str[ptr_reg->type]);
4359 		return -EACCES;
4360 	case CONST_PTR_TO_MAP:
4361 	case PTR_TO_PACKET_END:
4362 	case PTR_TO_SOCKET:
4363 	case PTR_TO_SOCKET_OR_NULL:
4364 	case PTR_TO_SOCK_COMMON:
4365 	case PTR_TO_SOCK_COMMON_OR_NULL:
4366 	case PTR_TO_TCP_SOCK:
4367 	case PTR_TO_TCP_SOCK_OR_NULL:
4368 	case PTR_TO_XDP_SOCK:
4369 		verbose(env, "R%d pointer arithmetic on %s prohibited\n",
4370 			dst, reg_type_str[ptr_reg->type]);
4371 		return -EACCES;
4372 	case PTR_TO_MAP_VALUE:
4373 		if (!env->allow_ptr_leaks && !known && (smin_val < 0) != (smax_val < 0)) {
4374 			verbose(env, "R%d has unknown scalar with mixed signed bounds, pointer arithmetic with it prohibited for !root\n",
4375 				off_reg == dst_reg ? dst : src);
4376 			return -EACCES;
4377 		}
4378 		/* fall-through */
4379 	default:
4380 		break;
4381 	}
4382 
4383 	/* In case of 'scalar += pointer', dst_reg inherits pointer type and id.
4384 	 * The id may be overwritten later if we create a new variable offset.
4385 	 */
4386 	dst_reg->type = ptr_reg->type;
4387 	dst_reg->id = ptr_reg->id;
4388 
4389 	if (!check_reg_sane_offset(env, off_reg, ptr_reg->type) ||
4390 	    !check_reg_sane_offset(env, ptr_reg, ptr_reg->type))
4391 		return -EINVAL;
4392 
4393 	switch (opcode) {
4394 	case BPF_ADD:
4395 		ret = sanitize_ptr_alu(env, insn, ptr_reg, dst_reg, smin_val < 0);
4396 		if (ret < 0) {
4397 			verbose(env, "R%d tried to add from different maps or paths\n", dst);
4398 			return ret;
4399 		}
4400 		/* We can take a fixed offset as long as it doesn't overflow
4401 		 * the s32 'off' field
4402 		 */
4403 		if (known && (ptr_reg->off + smin_val ==
4404 			      (s64)(s32)(ptr_reg->off + smin_val))) {
4405 			/* pointer += K.  Accumulate it into fixed offset */
4406 			dst_reg->smin_value = smin_ptr;
4407 			dst_reg->smax_value = smax_ptr;
4408 			dst_reg->umin_value = umin_ptr;
4409 			dst_reg->umax_value = umax_ptr;
4410 			dst_reg->var_off = ptr_reg->var_off;
4411 			dst_reg->off = ptr_reg->off + smin_val;
4412 			dst_reg->raw = ptr_reg->raw;
4413 			break;
4414 		}
4415 		/* A new variable offset is created.  Note that off_reg->off
4416 		 * == 0, since it's a scalar.
4417 		 * dst_reg gets the pointer type and since some positive
4418 		 * integer value was added to the pointer, give it a new 'id'
4419 		 * if it's a PTR_TO_PACKET.
4420 		 * this creates a new 'base' pointer, off_reg (variable) gets
4421 		 * added into the variable offset, and we copy the fixed offset
4422 		 * from ptr_reg.
4423 		 */
4424 		if (signed_add_overflows(smin_ptr, smin_val) ||
4425 		    signed_add_overflows(smax_ptr, smax_val)) {
4426 			dst_reg->smin_value = S64_MIN;
4427 			dst_reg->smax_value = S64_MAX;
4428 		} else {
4429 			dst_reg->smin_value = smin_ptr + smin_val;
4430 			dst_reg->smax_value = smax_ptr + smax_val;
4431 		}
4432 		if (umin_ptr + umin_val < umin_ptr ||
4433 		    umax_ptr + umax_val < umax_ptr) {
4434 			dst_reg->umin_value = 0;
4435 			dst_reg->umax_value = U64_MAX;
4436 		} else {
4437 			dst_reg->umin_value = umin_ptr + umin_val;
4438 			dst_reg->umax_value = umax_ptr + umax_val;
4439 		}
4440 		dst_reg->var_off = tnum_add(ptr_reg->var_off, off_reg->var_off);
4441 		dst_reg->off = ptr_reg->off;
4442 		dst_reg->raw = ptr_reg->raw;
4443 		if (reg_is_pkt_pointer(ptr_reg)) {
4444 			dst_reg->id = ++env->id_gen;
4445 			/* something was added to pkt_ptr, set range to zero */
4446 			dst_reg->raw = 0;
4447 		}
4448 		break;
4449 	case BPF_SUB:
4450 		ret = sanitize_ptr_alu(env, insn, ptr_reg, dst_reg, smin_val < 0);
4451 		if (ret < 0) {
4452 			verbose(env, "R%d tried to sub from different maps or paths\n", dst);
4453 			return ret;
4454 		}
4455 		if (dst_reg == off_reg) {
4456 			/* scalar -= pointer.  Creates an unknown scalar */
4457 			verbose(env, "R%d tried to subtract pointer from scalar\n",
4458 				dst);
4459 			return -EACCES;
4460 		}
4461 		/* We don't allow subtraction from FP, because (according to
4462 		 * test_verifier.c test "invalid fp arithmetic", JITs might not
4463 		 * be able to deal with it.
4464 		 */
4465 		if (ptr_reg->type == PTR_TO_STACK) {
4466 			verbose(env, "R%d subtraction from stack pointer prohibited\n",
4467 				dst);
4468 			return -EACCES;
4469 		}
4470 		if (known && (ptr_reg->off - smin_val ==
4471 			      (s64)(s32)(ptr_reg->off - smin_val))) {
4472 			/* pointer -= K.  Subtract it from fixed offset */
4473 			dst_reg->smin_value = smin_ptr;
4474 			dst_reg->smax_value = smax_ptr;
4475 			dst_reg->umin_value = umin_ptr;
4476 			dst_reg->umax_value = umax_ptr;
4477 			dst_reg->var_off = ptr_reg->var_off;
4478 			dst_reg->id = ptr_reg->id;
4479 			dst_reg->off = ptr_reg->off - smin_val;
4480 			dst_reg->raw = ptr_reg->raw;
4481 			break;
4482 		}
4483 		/* A new variable offset is created.  If the subtrahend is known
4484 		 * nonnegative, then any reg->range we had before is still good.
4485 		 */
4486 		if (signed_sub_overflows(smin_ptr, smax_val) ||
4487 		    signed_sub_overflows(smax_ptr, smin_val)) {
4488 			/* Overflow possible, we know nothing */
4489 			dst_reg->smin_value = S64_MIN;
4490 			dst_reg->smax_value = S64_MAX;
4491 		} else {
4492 			dst_reg->smin_value = smin_ptr - smax_val;
4493 			dst_reg->smax_value = smax_ptr - smin_val;
4494 		}
4495 		if (umin_ptr < umax_val) {
4496 			/* Overflow possible, we know nothing */
4497 			dst_reg->umin_value = 0;
4498 			dst_reg->umax_value = U64_MAX;
4499 		} else {
4500 			/* Cannot overflow (as long as bounds are consistent) */
4501 			dst_reg->umin_value = umin_ptr - umax_val;
4502 			dst_reg->umax_value = umax_ptr - umin_val;
4503 		}
4504 		dst_reg->var_off = tnum_sub(ptr_reg->var_off, off_reg->var_off);
4505 		dst_reg->off = ptr_reg->off;
4506 		dst_reg->raw = ptr_reg->raw;
4507 		if (reg_is_pkt_pointer(ptr_reg)) {
4508 			dst_reg->id = ++env->id_gen;
4509 			/* something was added to pkt_ptr, set range to zero */
4510 			if (smin_val < 0)
4511 				dst_reg->raw = 0;
4512 		}
4513 		break;
4514 	case BPF_AND:
4515 	case BPF_OR:
4516 	case BPF_XOR:
4517 		/* bitwise ops on pointers are troublesome, prohibit. */
4518 		verbose(env, "R%d bitwise operator %s on pointer prohibited\n",
4519 			dst, bpf_alu_string[opcode >> 4]);
4520 		return -EACCES;
4521 	default:
4522 		/* other operators (e.g. MUL,LSH) produce non-pointer results */
4523 		verbose(env, "R%d pointer arithmetic with %s operator prohibited\n",
4524 			dst, bpf_alu_string[opcode >> 4]);
4525 		return -EACCES;
4526 	}
4527 
4528 	if (!check_reg_sane_offset(env, dst_reg, ptr_reg->type))
4529 		return -EINVAL;
4530 
4531 	__update_reg_bounds(dst_reg);
4532 	__reg_deduce_bounds(dst_reg);
4533 	__reg_bound_offset(dst_reg);
4534 
4535 	/* For unprivileged we require that resulting offset must be in bounds
4536 	 * in order to be able to sanitize access later on.
4537 	 */
4538 	if (!env->allow_ptr_leaks) {
4539 		if (dst_reg->type == PTR_TO_MAP_VALUE &&
4540 		    check_map_access(env, dst, dst_reg->off, 1, false)) {
4541 			verbose(env, "R%d pointer arithmetic of map value goes out of range, "
4542 				"prohibited for !root\n", dst);
4543 			return -EACCES;
4544 		} else if (dst_reg->type == PTR_TO_STACK &&
4545 			   check_stack_access(env, dst_reg, dst_reg->off +
4546 					      dst_reg->var_off.value, 1)) {
4547 			verbose(env, "R%d stack pointer arithmetic goes out of range, "
4548 				"prohibited for !root\n", dst);
4549 			return -EACCES;
4550 		}
4551 	}
4552 
4553 	return 0;
4554 }
4555 
4556 /* WARNING: This function does calculations on 64-bit values, but the actual
4557  * execution may occur on 32-bit values. Therefore, things like bitshifts
4558  * need extra checks in the 32-bit case.
4559  */
4560 static int adjust_scalar_min_max_vals(struct bpf_verifier_env *env,
4561 				      struct bpf_insn *insn,
4562 				      struct bpf_reg_state *dst_reg,
4563 				      struct bpf_reg_state src_reg)
4564 {
4565 	struct bpf_reg_state *regs = cur_regs(env);
4566 	u8 opcode = BPF_OP(insn->code);
4567 	bool src_known, dst_known;
4568 	s64 smin_val, smax_val;
4569 	u64 umin_val, umax_val;
4570 	u64 insn_bitness = (BPF_CLASS(insn->code) == BPF_ALU64) ? 64 : 32;
4571 	u32 dst = insn->dst_reg;
4572 	int ret;
4573 
4574 	if (insn_bitness == 32) {
4575 		/* Relevant for 32-bit RSH: Information can propagate towards
4576 		 * LSB, so it isn't sufficient to only truncate the output to
4577 		 * 32 bits.
4578 		 */
4579 		coerce_reg_to_size(dst_reg, 4);
4580 		coerce_reg_to_size(&src_reg, 4);
4581 	}
4582 
4583 	smin_val = src_reg.smin_value;
4584 	smax_val = src_reg.smax_value;
4585 	umin_val = src_reg.umin_value;
4586 	umax_val = src_reg.umax_value;
4587 	src_known = tnum_is_const(src_reg.var_off);
4588 	dst_known = tnum_is_const(dst_reg->var_off);
4589 
4590 	if ((src_known && (smin_val != smax_val || umin_val != umax_val)) ||
4591 	    smin_val > smax_val || umin_val > umax_val) {
4592 		/* Taint dst register if offset had invalid bounds derived from
4593 		 * e.g. dead branches.
4594 		 */
4595 		__mark_reg_unknown(dst_reg);
4596 		return 0;
4597 	}
4598 
4599 	if (!src_known &&
4600 	    opcode != BPF_ADD && opcode != BPF_SUB && opcode != BPF_AND) {
4601 		__mark_reg_unknown(dst_reg);
4602 		return 0;
4603 	}
4604 
4605 	switch (opcode) {
4606 	case BPF_ADD:
4607 		ret = sanitize_val_alu(env, insn);
4608 		if (ret < 0) {
4609 			verbose(env, "R%d tried to add from different pointers or scalars\n", dst);
4610 			return ret;
4611 		}
4612 		if (signed_add_overflows(dst_reg->smin_value, smin_val) ||
4613 		    signed_add_overflows(dst_reg->smax_value, smax_val)) {
4614 			dst_reg->smin_value = S64_MIN;
4615 			dst_reg->smax_value = S64_MAX;
4616 		} else {
4617 			dst_reg->smin_value += smin_val;
4618 			dst_reg->smax_value += smax_val;
4619 		}
4620 		if (dst_reg->umin_value + umin_val < umin_val ||
4621 		    dst_reg->umax_value + umax_val < umax_val) {
4622 			dst_reg->umin_value = 0;
4623 			dst_reg->umax_value = U64_MAX;
4624 		} else {
4625 			dst_reg->umin_value += umin_val;
4626 			dst_reg->umax_value += umax_val;
4627 		}
4628 		dst_reg->var_off = tnum_add(dst_reg->var_off, src_reg.var_off);
4629 		break;
4630 	case BPF_SUB:
4631 		ret = sanitize_val_alu(env, insn);
4632 		if (ret < 0) {
4633 			verbose(env, "R%d tried to sub from different pointers or scalars\n", dst);
4634 			return ret;
4635 		}
4636 		if (signed_sub_overflows(dst_reg->smin_value, smax_val) ||
4637 		    signed_sub_overflows(dst_reg->smax_value, smin_val)) {
4638 			/* Overflow possible, we know nothing */
4639 			dst_reg->smin_value = S64_MIN;
4640 			dst_reg->smax_value = S64_MAX;
4641 		} else {
4642 			dst_reg->smin_value -= smax_val;
4643 			dst_reg->smax_value -= smin_val;
4644 		}
4645 		if (dst_reg->umin_value < umax_val) {
4646 			/* Overflow possible, we know nothing */
4647 			dst_reg->umin_value = 0;
4648 			dst_reg->umax_value = U64_MAX;
4649 		} else {
4650 			/* Cannot overflow (as long as bounds are consistent) */
4651 			dst_reg->umin_value -= umax_val;
4652 			dst_reg->umax_value -= umin_val;
4653 		}
4654 		dst_reg->var_off = tnum_sub(dst_reg->var_off, src_reg.var_off);
4655 		break;
4656 	case BPF_MUL:
4657 		dst_reg->var_off = tnum_mul(dst_reg->var_off, src_reg.var_off);
4658 		if (smin_val < 0 || dst_reg->smin_value < 0) {
4659 			/* Ain't nobody got time to multiply that sign */
4660 			__mark_reg_unbounded(dst_reg);
4661 			__update_reg_bounds(dst_reg);
4662 			break;
4663 		}
4664 		/* Both values are positive, so we can work with unsigned and
4665 		 * copy the result to signed (unless it exceeds S64_MAX).
4666 		 */
4667 		if (umax_val > U32_MAX || dst_reg->umax_value > U32_MAX) {
4668 			/* Potential overflow, we know nothing */
4669 			__mark_reg_unbounded(dst_reg);
4670 			/* (except what we can learn from the var_off) */
4671 			__update_reg_bounds(dst_reg);
4672 			break;
4673 		}
4674 		dst_reg->umin_value *= umin_val;
4675 		dst_reg->umax_value *= umax_val;
4676 		if (dst_reg->umax_value > S64_MAX) {
4677 			/* Overflow possible, we know nothing */
4678 			dst_reg->smin_value = S64_MIN;
4679 			dst_reg->smax_value = S64_MAX;
4680 		} else {
4681 			dst_reg->smin_value = dst_reg->umin_value;
4682 			dst_reg->smax_value = dst_reg->umax_value;
4683 		}
4684 		break;
4685 	case BPF_AND:
4686 		if (src_known && dst_known) {
4687 			__mark_reg_known(dst_reg, dst_reg->var_off.value &
4688 						  src_reg.var_off.value);
4689 			break;
4690 		}
4691 		/* We get our minimum from the var_off, since that's inherently
4692 		 * bitwise.  Our maximum is the minimum of the operands' maxima.
4693 		 */
4694 		dst_reg->var_off = tnum_and(dst_reg->var_off, src_reg.var_off);
4695 		dst_reg->umin_value = dst_reg->var_off.value;
4696 		dst_reg->umax_value = min(dst_reg->umax_value, umax_val);
4697 		if (dst_reg->smin_value < 0 || smin_val < 0) {
4698 			/* Lose signed bounds when ANDing negative numbers,
4699 			 * ain't nobody got time for that.
4700 			 */
4701 			dst_reg->smin_value = S64_MIN;
4702 			dst_reg->smax_value = S64_MAX;
4703 		} else {
4704 			/* ANDing two positives gives a positive, so safe to
4705 			 * cast result into s64.
4706 			 */
4707 			dst_reg->smin_value = dst_reg->umin_value;
4708 			dst_reg->smax_value = dst_reg->umax_value;
4709 		}
4710 		/* We may learn something more from the var_off */
4711 		__update_reg_bounds(dst_reg);
4712 		break;
4713 	case BPF_OR:
4714 		if (src_known && dst_known) {
4715 			__mark_reg_known(dst_reg, dst_reg->var_off.value |
4716 						  src_reg.var_off.value);
4717 			break;
4718 		}
4719 		/* We get our maximum from the var_off, and our minimum is the
4720 		 * maximum of the operands' minima
4721 		 */
4722 		dst_reg->var_off = tnum_or(dst_reg->var_off, src_reg.var_off);
4723 		dst_reg->umin_value = max(dst_reg->umin_value, umin_val);
4724 		dst_reg->umax_value = dst_reg->var_off.value |
4725 				      dst_reg->var_off.mask;
4726 		if (dst_reg->smin_value < 0 || smin_val < 0) {
4727 			/* Lose signed bounds when ORing negative numbers,
4728 			 * ain't nobody got time for that.
4729 			 */
4730 			dst_reg->smin_value = S64_MIN;
4731 			dst_reg->smax_value = S64_MAX;
4732 		} else {
4733 			/* ORing two positives gives a positive, so safe to
4734 			 * cast result into s64.
4735 			 */
4736 			dst_reg->smin_value = dst_reg->umin_value;
4737 			dst_reg->smax_value = dst_reg->umax_value;
4738 		}
4739 		/* We may learn something more from the var_off */
4740 		__update_reg_bounds(dst_reg);
4741 		break;
4742 	case BPF_LSH:
4743 		if (umax_val >= insn_bitness) {
4744 			/* Shifts greater than 31 or 63 are undefined.
4745 			 * This includes shifts by a negative number.
4746 			 */
4747 			mark_reg_unknown(env, regs, insn->dst_reg);
4748 			break;
4749 		}
4750 		/* We lose all sign bit information (except what we can pick
4751 		 * up from var_off)
4752 		 */
4753 		dst_reg->smin_value = S64_MIN;
4754 		dst_reg->smax_value = S64_MAX;
4755 		/* If we might shift our top bit out, then we know nothing */
4756 		if (dst_reg->umax_value > 1ULL << (63 - umax_val)) {
4757 			dst_reg->umin_value = 0;
4758 			dst_reg->umax_value = U64_MAX;
4759 		} else {
4760 			dst_reg->umin_value <<= umin_val;
4761 			dst_reg->umax_value <<= umax_val;
4762 		}
4763 		dst_reg->var_off = tnum_lshift(dst_reg->var_off, umin_val);
4764 		/* We may learn something more from the var_off */
4765 		__update_reg_bounds(dst_reg);
4766 		break;
4767 	case BPF_RSH:
4768 		if (umax_val >= insn_bitness) {
4769 			/* Shifts greater than 31 or 63 are undefined.
4770 			 * This includes shifts by a negative number.
4771 			 */
4772 			mark_reg_unknown(env, regs, insn->dst_reg);
4773 			break;
4774 		}
4775 		/* BPF_RSH is an unsigned shift.  If the value in dst_reg might
4776 		 * be negative, then either:
4777 		 * 1) src_reg might be zero, so the sign bit of the result is
4778 		 *    unknown, so we lose our signed bounds
4779 		 * 2) it's known negative, thus the unsigned bounds capture the
4780 		 *    signed bounds
4781 		 * 3) the signed bounds cross zero, so they tell us nothing
4782 		 *    about the result
4783 		 * If the value in dst_reg is known nonnegative, then again the
4784 		 * unsigned bounts capture the signed bounds.
4785 		 * Thus, in all cases it suffices to blow away our signed bounds
4786 		 * and rely on inferring new ones from the unsigned bounds and
4787 		 * var_off of the result.
4788 		 */
4789 		dst_reg->smin_value = S64_MIN;
4790 		dst_reg->smax_value = S64_MAX;
4791 		dst_reg->var_off = tnum_rshift(dst_reg->var_off, umin_val);
4792 		dst_reg->umin_value >>= umax_val;
4793 		dst_reg->umax_value >>= umin_val;
4794 		/* We may learn something more from the var_off */
4795 		__update_reg_bounds(dst_reg);
4796 		break;
4797 	case BPF_ARSH:
4798 		if (umax_val >= insn_bitness) {
4799 			/* Shifts greater than 31 or 63 are undefined.
4800 			 * This includes shifts by a negative number.
4801 			 */
4802 			mark_reg_unknown(env, regs, insn->dst_reg);
4803 			break;
4804 		}
4805 
4806 		/* Upon reaching here, src_known is true and
4807 		 * umax_val is equal to umin_val.
4808 		 */
4809 		dst_reg->smin_value >>= umin_val;
4810 		dst_reg->smax_value >>= umin_val;
4811 		dst_reg->var_off = tnum_arshift(dst_reg->var_off, umin_val);
4812 
4813 		/* blow away the dst_reg umin_value/umax_value and rely on
4814 		 * dst_reg var_off to refine the result.
4815 		 */
4816 		dst_reg->umin_value = 0;
4817 		dst_reg->umax_value = U64_MAX;
4818 		__update_reg_bounds(dst_reg);
4819 		break;
4820 	default:
4821 		mark_reg_unknown(env, regs, insn->dst_reg);
4822 		break;
4823 	}
4824 
4825 	if (BPF_CLASS(insn->code) != BPF_ALU64) {
4826 		/* 32-bit ALU ops are (32,32)->32 */
4827 		coerce_reg_to_size(dst_reg, 4);
4828 	}
4829 
4830 	__reg_deduce_bounds(dst_reg);
4831 	__reg_bound_offset(dst_reg);
4832 	return 0;
4833 }
4834 
4835 /* Handles ALU ops other than BPF_END, BPF_NEG and BPF_MOV: computes new min/max
4836  * and var_off.
4837  */
4838 static int adjust_reg_min_max_vals(struct bpf_verifier_env *env,
4839 				   struct bpf_insn *insn)
4840 {
4841 	struct bpf_verifier_state *vstate = env->cur_state;
4842 	struct bpf_func_state *state = vstate->frame[vstate->curframe];
4843 	struct bpf_reg_state *regs = state->regs, *dst_reg, *src_reg;
4844 	struct bpf_reg_state *ptr_reg = NULL, off_reg = {0};
4845 	u8 opcode = BPF_OP(insn->code);
4846 	int err;
4847 
4848 	dst_reg = &regs[insn->dst_reg];
4849 	src_reg = NULL;
4850 	if (dst_reg->type != SCALAR_VALUE)
4851 		ptr_reg = dst_reg;
4852 	if (BPF_SRC(insn->code) == BPF_X) {
4853 		src_reg = &regs[insn->src_reg];
4854 		if (src_reg->type != SCALAR_VALUE) {
4855 			if (dst_reg->type != SCALAR_VALUE) {
4856 				/* Combining two pointers by any ALU op yields
4857 				 * an arbitrary scalar. Disallow all math except
4858 				 * pointer subtraction
4859 				 */
4860 				if (opcode == BPF_SUB && env->allow_ptr_leaks) {
4861 					mark_reg_unknown(env, regs, insn->dst_reg);
4862 					return 0;
4863 				}
4864 				verbose(env, "R%d pointer %s pointer prohibited\n",
4865 					insn->dst_reg,
4866 					bpf_alu_string[opcode >> 4]);
4867 				return -EACCES;
4868 			} else {
4869 				/* scalar += pointer
4870 				 * This is legal, but we have to reverse our
4871 				 * src/dest handling in computing the range
4872 				 */
4873 				err = mark_chain_precision(env, insn->dst_reg);
4874 				if (err)
4875 					return err;
4876 				return adjust_ptr_min_max_vals(env, insn,
4877 							       src_reg, dst_reg);
4878 			}
4879 		} else if (ptr_reg) {
4880 			/* pointer += scalar */
4881 			err = mark_chain_precision(env, insn->src_reg);
4882 			if (err)
4883 				return err;
4884 			return adjust_ptr_min_max_vals(env, insn,
4885 						       dst_reg, src_reg);
4886 		}
4887 	} else {
4888 		/* Pretend the src is a reg with a known value, since we only
4889 		 * need to be able to read from this state.
4890 		 */
4891 		off_reg.type = SCALAR_VALUE;
4892 		__mark_reg_known(&off_reg, insn->imm);
4893 		src_reg = &off_reg;
4894 		if (ptr_reg) /* pointer += K */
4895 			return adjust_ptr_min_max_vals(env, insn,
4896 						       ptr_reg, src_reg);
4897 	}
4898 
4899 	/* Got here implies adding two SCALAR_VALUEs */
4900 	if (WARN_ON_ONCE(ptr_reg)) {
4901 		print_verifier_state(env, state);
4902 		verbose(env, "verifier internal error: unexpected ptr_reg\n");
4903 		return -EINVAL;
4904 	}
4905 	if (WARN_ON(!src_reg)) {
4906 		print_verifier_state(env, state);
4907 		verbose(env, "verifier internal error: no src_reg\n");
4908 		return -EINVAL;
4909 	}
4910 	return adjust_scalar_min_max_vals(env, insn, dst_reg, *src_reg);
4911 }
4912 
4913 /* check validity of 32-bit and 64-bit arithmetic operations */
4914 static int check_alu_op(struct bpf_verifier_env *env, struct bpf_insn *insn)
4915 {
4916 	struct bpf_reg_state *regs = cur_regs(env);
4917 	u8 opcode = BPF_OP(insn->code);
4918 	int err;
4919 
4920 	if (opcode == BPF_END || opcode == BPF_NEG) {
4921 		if (opcode == BPF_NEG) {
4922 			if (BPF_SRC(insn->code) != 0 ||
4923 			    insn->src_reg != BPF_REG_0 ||
4924 			    insn->off != 0 || insn->imm != 0) {
4925 				verbose(env, "BPF_NEG uses reserved fields\n");
4926 				return -EINVAL;
4927 			}
4928 		} else {
4929 			if (insn->src_reg != BPF_REG_0 || insn->off != 0 ||
4930 			    (insn->imm != 16 && insn->imm != 32 && insn->imm != 64) ||
4931 			    BPF_CLASS(insn->code) == BPF_ALU64) {
4932 				verbose(env, "BPF_END uses reserved fields\n");
4933 				return -EINVAL;
4934 			}
4935 		}
4936 
4937 		/* check src operand */
4938 		err = check_reg_arg(env, insn->dst_reg, SRC_OP);
4939 		if (err)
4940 			return err;
4941 
4942 		if (is_pointer_value(env, insn->dst_reg)) {
4943 			verbose(env, "R%d pointer arithmetic prohibited\n",
4944 				insn->dst_reg);
4945 			return -EACCES;
4946 		}
4947 
4948 		/* check dest operand */
4949 		err = check_reg_arg(env, insn->dst_reg, DST_OP);
4950 		if (err)
4951 			return err;
4952 
4953 	} else if (opcode == BPF_MOV) {
4954 
4955 		if (BPF_SRC(insn->code) == BPF_X) {
4956 			if (insn->imm != 0 || insn->off != 0) {
4957 				verbose(env, "BPF_MOV uses reserved fields\n");
4958 				return -EINVAL;
4959 			}
4960 
4961 			/* check src operand */
4962 			err = check_reg_arg(env, insn->src_reg, SRC_OP);
4963 			if (err)
4964 				return err;
4965 		} else {
4966 			if (insn->src_reg != BPF_REG_0 || insn->off != 0) {
4967 				verbose(env, "BPF_MOV uses reserved fields\n");
4968 				return -EINVAL;
4969 			}
4970 		}
4971 
4972 		/* check dest operand, mark as required later */
4973 		err = check_reg_arg(env, insn->dst_reg, DST_OP_NO_MARK);
4974 		if (err)
4975 			return err;
4976 
4977 		if (BPF_SRC(insn->code) == BPF_X) {
4978 			struct bpf_reg_state *src_reg = regs + insn->src_reg;
4979 			struct bpf_reg_state *dst_reg = regs + insn->dst_reg;
4980 
4981 			if (BPF_CLASS(insn->code) == BPF_ALU64) {
4982 				/* case: R1 = R2
4983 				 * copy register state to dest reg
4984 				 */
4985 				*dst_reg = *src_reg;
4986 				dst_reg->live |= REG_LIVE_WRITTEN;
4987 				dst_reg->subreg_def = DEF_NOT_SUBREG;
4988 			} else {
4989 				/* R1 = (u32) R2 */
4990 				if (is_pointer_value(env, insn->src_reg)) {
4991 					verbose(env,
4992 						"R%d partial copy of pointer\n",
4993 						insn->src_reg);
4994 					return -EACCES;
4995 				} else if (src_reg->type == SCALAR_VALUE) {
4996 					*dst_reg = *src_reg;
4997 					dst_reg->live |= REG_LIVE_WRITTEN;
4998 					dst_reg->subreg_def = env->insn_idx + 1;
4999 				} else {
5000 					mark_reg_unknown(env, regs,
5001 							 insn->dst_reg);
5002 				}
5003 				coerce_reg_to_size(dst_reg, 4);
5004 			}
5005 		} else {
5006 			/* case: R = imm
5007 			 * remember the value we stored into this reg
5008 			 */
5009 			/* clear any state __mark_reg_known doesn't set */
5010 			mark_reg_unknown(env, regs, insn->dst_reg);
5011 			regs[insn->dst_reg].type = SCALAR_VALUE;
5012 			if (BPF_CLASS(insn->code) == BPF_ALU64) {
5013 				__mark_reg_known(regs + insn->dst_reg,
5014 						 insn->imm);
5015 			} else {
5016 				__mark_reg_known(regs + insn->dst_reg,
5017 						 (u32)insn->imm);
5018 			}
5019 		}
5020 
5021 	} else if (opcode > BPF_END) {
5022 		verbose(env, "invalid BPF_ALU opcode %x\n", opcode);
5023 		return -EINVAL;
5024 
5025 	} else {	/* all other ALU ops: and, sub, xor, add, ... */
5026 
5027 		if (BPF_SRC(insn->code) == BPF_X) {
5028 			if (insn->imm != 0 || insn->off != 0) {
5029 				verbose(env, "BPF_ALU uses reserved fields\n");
5030 				return -EINVAL;
5031 			}
5032 			/* check src1 operand */
5033 			err = check_reg_arg(env, insn->src_reg, SRC_OP);
5034 			if (err)
5035 				return err;
5036 		} else {
5037 			if (insn->src_reg != BPF_REG_0 || insn->off != 0) {
5038 				verbose(env, "BPF_ALU uses reserved fields\n");
5039 				return -EINVAL;
5040 			}
5041 		}
5042 
5043 		/* check src2 operand */
5044 		err = check_reg_arg(env, insn->dst_reg, SRC_OP);
5045 		if (err)
5046 			return err;
5047 
5048 		if ((opcode == BPF_MOD || opcode == BPF_DIV) &&
5049 		    BPF_SRC(insn->code) == BPF_K && insn->imm == 0) {
5050 			verbose(env, "div by zero\n");
5051 			return -EINVAL;
5052 		}
5053 
5054 		if ((opcode == BPF_LSH || opcode == BPF_RSH ||
5055 		     opcode == BPF_ARSH) && BPF_SRC(insn->code) == BPF_K) {
5056 			int size = BPF_CLASS(insn->code) == BPF_ALU64 ? 64 : 32;
5057 
5058 			if (insn->imm < 0 || insn->imm >= size) {
5059 				verbose(env, "invalid shift %d\n", insn->imm);
5060 				return -EINVAL;
5061 			}
5062 		}
5063 
5064 		/* check dest operand */
5065 		err = check_reg_arg(env, insn->dst_reg, DST_OP_NO_MARK);
5066 		if (err)
5067 			return err;
5068 
5069 		return adjust_reg_min_max_vals(env, insn);
5070 	}
5071 
5072 	return 0;
5073 }
5074 
5075 static void __find_good_pkt_pointers(struct bpf_func_state *state,
5076 				     struct bpf_reg_state *dst_reg,
5077 				     enum bpf_reg_type type, u16 new_range)
5078 {
5079 	struct bpf_reg_state *reg;
5080 	int i;
5081 
5082 	for (i = 0; i < MAX_BPF_REG; i++) {
5083 		reg = &state->regs[i];
5084 		if (reg->type == type && reg->id == dst_reg->id)
5085 			/* keep the maximum range already checked */
5086 			reg->range = max(reg->range, new_range);
5087 	}
5088 
5089 	bpf_for_each_spilled_reg(i, state, reg) {
5090 		if (!reg)
5091 			continue;
5092 		if (reg->type == type && reg->id == dst_reg->id)
5093 			reg->range = max(reg->range, new_range);
5094 	}
5095 }
5096 
5097 static void find_good_pkt_pointers(struct bpf_verifier_state *vstate,
5098 				   struct bpf_reg_state *dst_reg,
5099 				   enum bpf_reg_type type,
5100 				   bool range_right_open)
5101 {
5102 	u16 new_range;
5103 	int i;
5104 
5105 	if (dst_reg->off < 0 ||
5106 	    (dst_reg->off == 0 && range_right_open))
5107 		/* This doesn't give us any range */
5108 		return;
5109 
5110 	if (dst_reg->umax_value > MAX_PACKET_OFF ||
5111 	    dst_reg->umax_value + dst_reg->off > MAX_PACKET_OFF)
5112 		/* Risk of overflow.  For instance, ptr + (1<<63) may be less
5113 		 * than pkt_end, but that's because it's also less than pkt.
5114 		 */
5115 		return;
5116 
5117 	new_range = dst_reg->off;
5118 	if (range_right_open)
5119 		new_range--;
5120 
5121 	/* Examples for register markings:
5122 	 *
5123 	 * pkt_data in dst register:
5124 	 *
5125 	 *   r2 = r3;
5126 	 *   r2 += 8;
5127 	 *   if (r2 > pkt_end) goto <handle exception>
5128 	 *   <access okay>
5129 	 *
5130 	 *   r2 = r3;
5131 	 *   r2 += 8;
5132 	 *   if (r2 < pkt_end) goto <access okay>
5133 	 *   <handle exception>
5134 	 *
5135 	 *   Where:
5136 	 *     r2 == dst_reg, pkt_end == src_reg
5137 	 *     r2=pkt(id=n,off=8,r=0)
5138 	 *     r3=pkt(id=n,off=0,r=0)
5139 	 *
5140 	 * pkt_data in src register:
5141 	 *
5142 	 *   r2 = r3;
5143 	 *   r2 += 8;
5144 	 *   if (pkt_end >= r2) goto <access okay>
5145 	 *   <handle exception>
5146 	 *
5147 	 *   r2 = r3;
5148 	 *   r2 += 8;
5149 	 *   if (pkt_end <= r2) goto <handle exception>
5150 	 *   <access okay>
5151 	 *
5152 	 *   Where:
5153 	 *     pkt_end == dst_reg, r2 == src_reg
5154 	 *     r2=pkt(id=n,off=8,r=0)
5155 	 *     r3=pkt(id=n,off=0,r=0)
5156 	 *
5157 	 * Find register r3 and mark its range as r3=pkt(id=n,off=0,r=8)
5158 	 * or r3=pkt(id=n,off=0,r=8-1), so that range of bytes [r3, r3 + 8)
5159 	 * and [r3, r3 + 8-1) respectively is safe to access depending on
5160 	 * the check.
5161 	 */
5162 
5163 	/* If our ids match, then we must have the same max_value.  And we
5164 	 * don't care about the other reg's fixed offset, since if it's too big
5165 	 * the range won't allow anything.
5166 	 * dst_reg->off is known < MAX_PACKET_OFF, therefore it fits in a u16.
5167 	 */
5168 	for (i = 0; i <= vstate->curframe; i++)
5169 		__find_good_pkt_pointers(vstate->frame[i], dst_reg, type,
5170 					 new_range);
5171 }
5172 
5173 /* compute branch direction of the expression "if (reg opcode val) goto target;"
5174  * and return:
5175  *  1 - branch will be taken and "goto target" will be executed
5176  *  0 - branch will not be taken and fall-through to next insn
5177  * -1 - unknown. Example: "if (reg < 5)" is unknown when register value range [0,10]
5178  */
5179 static int is_branch_taken(struct bpf_reg_state *reg, u64 val, u8 opcode,
5180 			   bool is_jmp32)
5181 {
5182 	struct bpf_reg_state reg_lo;
5183 	s64 sval;
5184 
5185 	if (__is_pointer_value(false, reg))
5186 		return -1;
5187 
5188 	if (is_jmp32) {
5189 		reg_lo = *reg;
5190 		reg = &reg_lo;
5191 		/* For JMP32, only low 32 bits are compared, coerce_reg_to_size
5192 		 * could truncate high bits and update umin/umax according to
5193 		 * information of low bits.
5194 		 */
5195 		coerce_reg_to_size(reg, 4);
5196 		/* smin/smax need special handling. For example, after coerce,
5197 		 * if smin_value is 0x00000000ffffffffLL, the value is -1 when
5198 		 * used as operand to JMP32. It is a negative number from s32's
5199 		 * point of view, while it is a positive number when seen as
5200 		 * s64. The smin/smax are kept as s64, therefore, when used with
5201 		 * JMP32, they need to be transformed into s32, then sign
5202 		 * extended back to s64.
5203 		 *
5204 		 * Also, smin/smax were copied from umin/umax. If umin/umax has
5205 		 * different sign bit, then min/max relationship doesn't
5206 		 * maintain after casting into s32, for this case, set smin/smax
5207 		 * to safest range.
5208 		 */
5209 		if ((reg->umax_value ^ reg->umin_value) &
5210 		    (1ULL << 31)) {
5211 			reg->smin_value = S32_MIN;
5212 			reg->smax_value = S32_MAX;
5213 		}
5214 		reg->smin_value = (s64)(s32)reg->smin_value;
5215 		reg->smax_value = (s64)(s32)reg->smax_value;
5216 
5217 		val = (u32)val;
5218 		sval = (s64)(s32)val;
5219 	} else {
5220 		sval = (s64)val;
5221 	}
5222 
5223 	switch (opcode) {
5224 	case BPF_JEQ:
5225 		if (tnum_is_const(reg->var_off))
5226 			return !!tnum_equals_const(reg->var_off, val);
5227 		break;
5228 	case BPF_JNE:
5229 		if (tnum_is_const(reg->var_off))
5230 			return !tnum_equals_const(reg->var_off, val);
5231 		break;
5232 	case BPF_JSET:
5233 		if ((~reg->var_off.mask & reg->var_off.value) & val)
5234 			return 1;
5235 		if (!((reg->var_off.mask | reg->var_off.value) & val))
5236 			return 0;
5237 		break;
5238 	case BPF_JGT:
5239 		if (reg->umin_value > val)
5240 			return 1;
5241 		else if (reg->umax_value <= val)
5242 			return 0;
5243 		break;
5244 	case BPF_JSGT:
5245 		if (reg->smin_value > sval)
5246 			return 1;
5247 		else if (reg->smax_value < sval)
5248 			return 0;
5249 		break;
5250 	case BPF_JLT:
5251 		if (reg->umax_value < val)
5252 			return 1;
5253 		else if (reg->umin_value >= val)
5254 			return 0;
5255 		break;
5256 	case BPF_JSLT:
5257 		if (reg->smax_value < sval)
5258 			return 1;
5259 		else if (reg->smin_value >= sval)
5260 			return 0;
5261 		break;
5262 	case BPF_JGE:
5263 		if (reg->umin_value >= val)
5264 			return 1;
5265 		else if (reg->umax_value < val)
5266 			return 0;
5267 		break;
5268 	case BPF_JSGE:
5269 		if (reg->smin_value >= sval)
5270 			return 1;
5271 		else if (reg->smax_value < sval)
5272 			return 0;
5273 		break;
5274 	case BPF_JLE:
5275 		if (reg->umax_value <= val)
5276 			return 1;
5277 		else if (reg->umin_value > val)
5278 			return 0;
5279 		break;
5280 	case BPF_JSLE:
5281 		if (reg->smax_value <= sval)
5282 			return 1;
5283 		else if (reg->smin_value > sval)
5284 			return 0;
5285 		break;
5286 	}
5287 
5288 	return -1;
5289 }
5290 
5291 /* Generate min value of the high 32-bit from TNUM info. */
5292 static u64 gen_hi_min(struct tnum var)
5293 {
5294 	return var.value & ~0xffffffffULL;
5295 }
5296 
5297 /* Generate max value of the high 32-bit from TNUM info. */
5298 static u64 gen_hi_max(struct tnum var)
5299 {
5300 	return (var.value | var.mask) & ~0xffffffffULL;
5301 }
5302 
5303 /* Return true if VAL is compared with a s64 sign extended from s32, and they
5304  * are with the same signedness.
5305  */
5306 static bool cmp_val_with_extended_s64(s64 sval, struct bpf_reg_state *reg)
5307 {
5308 	return ((s32)sval >= 0 &&
5309 		reg->smin_value >= 0 && reg->smax_value <= S32_MAX) ||
5310 	       ((s32)sval < 0 &&
5311 		reg->smax_value <= 0 && reg->smin_value >= S32_MIN);
5312 }
5313 
5314 /* Adjusts the register min/max values in the case that the dst_reg is the
5315  * variable register that we are working on, and src_reg is a constant or we're
5316  * simply doing a BPF_K check.
5317  * In JEQ/JNE cases we also adjust the var_off values.
5318  */
5319 static void reg_set_min_max(struct bpf_reg_state *true_reg,
5320 			    struct bpf_reg_state *false_reg, u64 val,
5321 			    u8 opcode, bool is_jmp32)
5322 {
5323 	s64 sval;
5324 
5325 	/* If the dst_reg is a pointer, we can't learn anything about its
5326 	 * variable offset from the compare (unless src_reg were a pointer into
5327 	 * the same object, but we don't bother with that.
5328 	 * Since false_reg and true_reg have the same type by construction, we
5329 	 * only need to check one of them for pointerness.
5330 	 */
5331 	if (__is_pointer_value(false, false_reg))
5332 		return;
5333 
5334 	val = is_jmp32 ? (u32)val : val;
5335 	sval = is_jmp32 ? (s64)(s32)val : (s64)val;
5336 
5337 	switch (opcode) {
5338 	case BPF_JEQ:
5339 	case BPF_JNE:
5340 	{
5341 		struct bpf_reg_state *reg =
5342 			opcode == BPF_JEQ ? true_reg : false_reg;
5343 
5344 		/* For BPF_JEQ, if this is false we know nothing Jon Snow, but
5345 		 * if it is true we know the value for sure. Likewise for
5346 		 * BPF_JNE.
5347 		 */
5348 		if (is_jmp32) {
5349 			u64 old_v = reg->var_off.value;
5350 			u64 hi_mask = ~0xffffffffULL;
5351 
5352 			reg->var_off.value = (old_v & hi_mask) | val;
5353 			reg->var_off.mask &= hi_mask;
5354 		} else {
5355 			__mark_reg_known(reg, val);
5356 		}
5357 		break;
5358 	}
5359 	case BPF_JSET:
5360 		false_reg->var_off = tnum_and(false_reg->var_off,
5361 					      tnum_const(~val));
5362 		if (is_power_of_2(val))
5363 			true_reg->var_off = tnum_or(true_reg->var_off,
5364 						    tnum_const(val));
5365 		break;
5366 	case BPF_JGE:
5367 	case BPF_JGT:
5368 	{
5369 		u64 false_umax = opcode == BPF_JGT ? val    : val - 1;
5370 		u64 true_umin = opcode == BPF_JGT ? val + 1 : val;
5371 
5372 		if (is_jmp32) {
5373 			false_umax += gen_hi_max(false_reg->var_off);
5374 			true_umin += gen_hi_min(true_reg->var_off);
5375 		}
5376 		false_reg->umax_value = min(false_reg->umax_value, false_umax);
5377 		true_reg->umin_value = max(true_reg->umin_value, true_umin);
5378 		break;
5379 	}
5380 	case BPF_JSGE:
5381 	case BPF_JSGT:
5382 	{
5383 		s64 false_smax = opcode == BPF_JSGT ? sval    : sval - 1;
5384 		s64 true_smin = opcode == BPF_JSGT ? sval + 1 : sval;
5385 
5386 		/* If the full s64 was not sign-extended from s32 then don't
5387 		 * deduct further info.
5388 		 */
5389 		if (is_jmp32 && !cmp_val_with_extended_s64(sval, false_reg))
5390 			break;
5391 		false_reg->smax_value = min(false_reg->smax_value, false_smax);
5392 		true_reg->smin_value = max(true_reg->smin_value, true_smin);
5393 		break;
5394 	}
5395 	case BPF_JLE:
5396 	case BPF_JLT:
5397 	{
5398 		u64 false_umin = opcode == BPF_JLT ? val    : val + 1;
5399 		u64 true_umax = opcode == BPF_JLT ? val - 1 : val;
5400 
5401 		if (is_jmp32) {
5402 			false_umin += gen_hi_min(false_reg->var_off);
5403 			true_umax += gen_hi_max(true_reg->var_off);
5404 		}
5405 		false_reg->umin_value = max(false_reg->umin_value, false_umin);
5406 		true_reg->umax_value = min(true_reg->umax_value, true_umax);
5407 		break;
5408 	}
5409 	case BPF_JSLE:
5410 	case BPF_JSLT:
5411 	{
5412 		s64 false_smin = opcode == BPF_JSLT ? sval    : sval + 1;
5413 		s64 true_smax = opcode == BPF_JSLT ? sval - 1 : sval;
5414 
5415 		if (is_jmp32 && !cmp_val_with_extended_s64(sval, false_reg))
5416 			break;
5417 		false_reg->smin_value = max(false_reg->smin_value, false_smin);
5418 		true_reg->smax_value = min(true_reg->smax_value, true_smax);
5419 		break;
5420 	}
5421 	default:
5422 		break;
5423 	}
5424 
5425 	__reg_deduce_bounds(false_reg);
5426 	__reg_deduce_bounds(true_reg);
5427 	/* We might have learned some bits from the bounds. */
5428 	__reg_bound_offset(false_reg);
5429 	__reg_bound_offset(true_reg);
5430 	/* Intersecting with the old var_off might have improved our bounds
5431 	 * slightly.  e.g. if umax was 0x7f...f and var_off was (0; 0xf...fc),
5432 	 * then new var_off is (0; 0x7f...fc) which improves our umax.
5433 	 */
5434 	__update_reg_bounds(false_reg);
5435 	__update_reg_bounds(true_reg);
5436 }
5437 
5438 /* Same as above, but for the case that dst_reg holds a constant and src_reg is
5439  * the variable reg.
5440  */
5441 static void reg_set_min_max_inv(struct bpf_reg_state *true_reg,
5442 				struct bpf_reg_state *false_reg, u64 val,
5443 				u8 opcode, bool is_jmp32)
5444 {
5445 	s64 sval;
5446 
5447 	if (__is_pointer_value(false, false_reg))
5448 		return;
5449 
5450 	val = is_jmp32 ? (u32)val : val;
5451 	sval = is_jmp32 ? (s64)(s32)val : (s64)val;
5452 
5453 	switch (opcode) {
5454 	case BPF_JEQ:
5455 	case BPF_JNE:
5456 	{
5457 		struct bpf_reg_state *reg =
5458 			opcode == BPF_JEQ ? true_reg : false_reg;
5459 
5460 		if (is_jmp32) {
5461 			u64 old_v = reg->var_off.value;
5462 			u64 hi_mask = ~0xffffffffULL;
5463 
5464 			reg->var_off.value = (old_v & hi_mask) | val;
5465 			reg->var_off.mask &= hi_mask;
5466 		} else {
5467 			__mark_reg_known(reg, val);
5468 		}
5469 		break;
5470 	}
5471 	case BPF_JSET:
5472 		false_reg->var_off = tnum_and(false_reg->var_off,
5473 					      tnum_const(~val));
5474 		if (is_power_of_2(val))
5475 			true_reg->var_off = tnum_or(true_reg->var_off,
5476 						    tnum_const(val));
5477 		break;
5478 	case BPF_JGE:
5479 	case BPF_JGT:
5480 	{
5481 		u64 false_umin = opcode == BPF_JGT ? val    : val + 1;
5482 		u64 true_umax = opcode == BPF_JGT ? val - 1 : val;
5483 
5484 		if (is_jmp32) {
5485 			false_umin += gen_hi_min(false_reg->var_off);
5486 			true_umax += gen_hi_max(true_reg->var_off);
5487 		}
5488 		false_reg->umin_value = max(false_reg->umin_value, false_umin);
5489 		true_reg->umax_value = min(true_reg->umax_value, true_umax);
5490 		break;
5491 	}
5492 	case BPF_JSGE:
5493 	case BPF_JSGT:
5494 	{
5495 		s64 false_smin = opcode == BPF_JSGT ? sval    : sval + 1;
5496 		s64 true_smax = opcode == BPF_JSGT ? sval - 1 : sval;
5497 
5498 		if (is_jmp32 && !cmp_val_with_extended_s64(sval, false_reg))
5499 			break;
5500 		false_reg->smin_value = max(false_reg->smin_value, false_smin);
5501 		true_reg->smax_value = min(true_reg->smax_value, true_smax);
5502 		break;
5503 	}
5504 	case BPF_JLE:
5505 	case BPF_JLT:
5506 	{
5507 		u64 false_umax = opcode == BPF_JLT ? val    : val - 1;
5508 		u64 true_umin = opcode == BPF_JLT ? val + 1 : val;
5509 
5510 		if (is_jmp32) {
5511 			false_umax += gen_hi_max(false_reg->var_off);
5512 			true_umin += gen_hi_min(true_reg->var_off);
5513 		}
5514 		false_reg->umax_value = min(false_reg->umax_value, false_umax);
5515 		true_reg->umin_value = max(true_reg->umin_value, true_umin);
5516 		break;
5517 	}
5518 	case BPF_JSLE:
5519 	case BPF_JSLT:
5520 	{
5521 		s64 false_smax = opcode == BPF_JSLT ? sval    : sval - 1;
5522 		s64 true_smin = opcode == BPF_JSLT ? sval + 1 : sval;
5523 
5524 		if (is_jmp32 && !cmp_val_with_extended_s64(sval, false_reg))
5525 			break;
5526 		false_reg->smax_value = min(false_reg->smax_value, false_smax);
5527 		true_reg->smin_value = max(true_reg->smin_value, true_smin);
5528 		break;
5529 	}
5530 	default:
5531 		break;
5532 	}
5533 
5534 	__reg_deduce_bounds(false_reg);
5535 	__reg_deduce_bounds(true_reg);
5536 	/* We might have learned some bits from the bounds. */
5537 	__reg_bound_offset(false_reg);
5538 	__reg_bound_offset(true_reg);
5539 	/* Intersecting with the old var_off might have improved our bounds
5540 	 * slightly.  e.g. if umax was 0x7f...f and var_off was (0; 0xf...fc),
5541 	 * then new var_off is (0; 0x7f...fc) which improves our umax.
5542 	 */
5543 	__update_reg_bounds(false_reg);
5544 	__update_reg_bounds(true_reg);
5545 }
5546 
5547 /* Regs are known to be equal, so intersect their min/max/var_off */
5548 static void __reg_combine_min_max(struct bpf_reg_state *src_reg,
5549 				  struct bpf_reg_state *dst_reg)
5550 {
5551 	src_reg->umin_value = dst_reg->umin_value = max(src_reg->umin_value,
5552 							dst_reg->umin_value);
5553 	src_reg->umax_value = dst_reg->umax_value = min(src_reg->umax_value,
5554 							dst_reg->umax_value);
5555 	src_reg->smin_value = dst_reg->smin_value = max(src_reg->smin_value,
5556 							dst_reg->smin_value);
5557 	src_reg->smax_value = dst_reg->smax_value = min(src_reg->smax_value,
5558 							dst_reg->smax_value);
5559 	src_reg->var_off = dst_reg->var_off = tnum_intersect(src_reg->var_off,
5560 							     dst_reg->var_off);
5561 	/* We might have learned new bounds from the var_off. */
5562 	__update_reg_bounds(src_reg);
5563 	__update_reg_bounds(dst_reg);
5564 	/* We might have learned something about the sign bit. */
5565 	__reg_deduce_bounds(src_reg);
5566 	__reg_deduce_bounds(dst_reg);
5567 	/* We might have learned some bits from the bounds. */
5568 	__reg_bound_offset(src_reg);
5569 	__reg_bound_offset(dst_reg);
5570 	/* Intersecting with the old var_off might have improved our bounds
5571 	 * slightly.  e.g. if umax was 0x7f...f and var_off was (0; 0xf...fc),
5572 	 * then new var_off is (0; 0x7f...fc) which improves our umax.
5573 	 */
5574 	__update_reg_bounds(src_reg);
5575 	__update_reg_bounds(dst_reg);
5576 }
5577 
5578 static void reg_combine_min_max(struct bpf_reg_state *true_src,
5579 				struct bpf_reg_state *true_dst,
5580 				struct bpf_reg_state *false_src,
5581 				struct bpf_reg_state *false_dst,
5582 				u8 opcode)
5583 {
5584 	switch (opcode) {
5585 	case BPF_JEQ:
5586 		__reg_combine_min_max(true_src, true_dst);
5587 		break;
5588 	case BPF_JNE:
5589 		__reg_combine_min_max(false_src, false_dst);
5590 		break;
5591 	}
5592 }
5593 
5594 static void mark_ptr_or_null_reg(struct bpf_func_state *state,
5595 				 struct bpf_reg_state *reg, u32 id,
5596 				 bool is_null)
5597 {
5598 	if (reg_type_may_be_null(reg->type) && reg->id == id) {
5599 		/* Old offset (both fixed and variable parts) should
5600 		 * have been known-zero, because we don't allow pointer
5601 		 * arithmetic on pointers that might be NULL.
5602 		 */
5603 		if (WARN_ON_ONCE(reg->smin_value || reg->smax_value ||
5604 				 !tnum_equals_const(reg->var_off, 0) ||
5605 				 reg->off)) {
5606 			__mark_reg_known_zero(reg);
5607 			reg->off = 0;
5608 		}
5609 		if (is_null) {
5610 			reg->type = SCALAR_VALUE;
5611 		} else if (reg->type == PTR_TO_MAP_VALUE_OR_NULL) {
5612 			if (reg->map_ptr->inner_map_meta) {
5613 				reg->type = CONST_PTR_TO_MAP;
5614 				reg->map_ptr = reg->map_ptr->inner_map_meta;
5615 			} else if (reg->map_ptr->map_type ==
5616 				   BPF_MAP_TYPE_XSKMAP) {
5617 				reg->type = PTR_TO_XDP_SOCK;
5618 			} else {
5619 				reg->type = PTR_TO_MAP_VALUE;
5620 			}
5621 		} else if (reg->type == PTR_TO_SOCKET_OR_NULL) {
5622 			reg->type = PTR_TO_SOCKET;
5623 		} else if (reg->type == PTR_TO_SOCK_COMMON_OR_NULL) {
5624 			reg->type = PTR_TO_SOCK_COMMON;
5625 		} else if (reg->type == PTR_TO_TCP_SOCK_OR_NULL) {
5626 			reg->type = PTR_TO_TCP_SOCK;
5627 		}
5628 		if (is_null) {
5629 			/* We don't need id and ref_obj_id from this point
5630 			 * onwards anymore, thus we should better reset it,
5631 			 * so that state pruning has chances to take effect.
5632 			 */
5633 			reg->id = 0;
5634 			reg->ref_obj_id = 0;
5635 		} else if (!reg_may_point_to_spin_lock(reg)) {
5636 			/* For not-NULL ptr, reg->ref_obj_id will be reset
5637 			 * in release_reg_references().
5638 			 *
5639 			 * reg->id is still used by spin_lock ptr. Other
5640 			 * than spin_lock ptr type, reg->id can be reset.
5641 			 */
5642 			reg->id = 0;
5643 		}
5644 	}
5645 }
5646 
5647 static void __mark_ptr_or_null_regs(struct bpf_func_state *state, u32 id,
5648 				    bool is_null)
5649 {
5650 	struct bpf_reg_state *reg;
5651 	int i;
5652 
5653 	for (i = 0; i < MAX_BPF_REG; i++)
5654 		mark_ptr_or_null_reg(state, &state->regs[i], id, is_null);
5655 
5656 	bpf_for_each_spilled_reg(i, state, reg) {
5657 		if (!reg)
5658 			continue;
5659 		mark_ptr_or_null_reg(state, reg, id, is_null);
5660 	}
5661 }
5662 
5663 /* The logic is similar to find_good_pkt_pointers(), both could eventually
5664  * be folded together at some point.
5665  */
5666 static void mark_ptr_or_null_regs(struct bpf_verifier_state *vstate, u32 regno,
5667 				  bool is_null)
5668 {
5669 	struct bpf_func_state *state = vstate->frame[vstate->curframe];
5670 	struct bpf_reg_state *regs = state->regs;
5671 	u32 ref_obj_id = regs[regno].ref_obj_id;
5672 	u32 id = regs[regno].id;
5673 	int i;
5674 
5675 	if (ref_obj_id && ref_obj_id == id && is_null)
5676 		/* regs[regno] is in the " == NULL" branch.
5677 		 * No one could have freed the reference state before
5678 		 * doing the NULL check.
5679 		 */
5680 		WARN_ON_ONCE(release_reference_state(state, id));
5681 
5682 	for (i = 0; i <= vstate->curframe; i++)
5683 		__mark_ptr_or_null_regs(vstate->frame[i], id, is_null);
5684 }
5685 
5686 static bool try_match_pkt_pointers(const struct bpf_insn *insn,
5687 				   struct bpf_reg_state *dst_reg,
5688 				   struct bpf_reg_state *src_reg,
5689 				   struct bpf_verifier_state *this_branch,
5690 				   struct bpf_verifier_state *other_branch)
5691 {
5692 	if (BPF_SRC(insn->code) != BPF_X)
5693 		return false;
5694 
5695 	/* Pointers are always 64-bit. */
5696 	if (BPF_CLASS(insn->code) == BPF_JMP32)
5697 		return false;
5698 
5699 	switch (BPF_OP(insn->code)) {
5700 	case BPF_JGT:
5701 		if ((dst_reg->type == PTR_TO_PACKET &&
5702 		     src_reg->type == PTR_TO_PACKET_END) ||
5703 		    (dst_reg->type == PTR_TO_PACKET_META &&
5704 		     reg_is_init_pkt_pointer(src_reg, PTR_TO_PACKET))) {
5705 			/* pkt_data' > pkt_end, pkt_meta' > pkt_data */
5706 			find_good_pkt_pointers(this_branch, dst_reg,
5707 					       dst_reg->type, false);
5708 		} else if ((dst_reg->type == PTR_TO_PACKET_END &&
5709 			    src_reg->type == PTR_TO_PACKET) ||
5710 			   (reg_is_init_pkt_pointer(dst_reg, PTR_TO_PACKET) &&
5711 			    src_reg->type == PTR_TO_PACKET_META)) {
5712 			/* pkt_end > pkt_data', pkt_data > pkt_meta' */
5713 			find_good_pkt_pointers(other_branch, src_reg,
5714 					       src_reg->type, true);
5715 		} else {
5716 			return false;
5717 		}
5718 		break;
5719 	case BPF_JLT:
5720 		if ((dst_reg->type == PTR_TO_PACKET &&
5721 		     src_reg->type == PTR_TO_PACKET_END) ||
5722 		    (dst_reg->type == PTR_TO_PACKET_META &&
5723 		     reg_is_init_pkt_pointer(src_reg, PTR_TO_PACKET))) {
5724 			/* pkt_data' < pkt_end, pkt_meta' < pkt_data */
5725 			find_good_pkt_pointers(other_branch, dst_reg,
5726 					       dst_reg->type, true);
5727 		} else if ((dst_reg->type == PTR_TO_PACKET_END &&
5728 			    src_reg->type == PTR_TO_PACKET) ||
5729 			   (reg_is_init_pkt_pointer(dst_reg, PTR_TO_PACKET) &&
5730 			    src_reg->type == PTR_TO_PACKET_META)) {
5731 			/* pkt_end < pkt_data', pkt_data > pkt_meta' */
5732 			find_good_pkt_pointers(this_branch, src_reg,
5733 					       src_reg->type, false);
5734 		} else {
5735 			return false;
5736 		}
5737 		break;
5738 	case BPF_JGE:
5739 		if ((dst_reg->type == PTR_TO_PACKET &&
5740 		     src_reg->type == PTR_TO_PACKET_END) ||
5741 		    (dst_reg->type == PTR_TO_PACKET_META &&
5742 		     reg_is_init_pkt_pointer(src_reg, PTR_TO_PACKET))) {
5743 			/* pkt_data' >= pkt_end, pkt_meta' >= pkt_data */
5744 			find_good_pkt_pointers(this_branch, dst_reg,
5745 					       dst_reg->type, true);
5746 		} else if ((dst_reg->type == PTR_TO_PACKET_END &&
5747 			    src_reg->type == PTR_TO_PACKET) ||
5748 			   (reg_is_init_pkt_pointer(dst_reg, PTR_TO_PACKET) &&
5749 			    src_reg->type == PTR_TO_PACKET_META)) {
5750 			/* pkt_end >= pkt_data', pkt_data >= pkt_meta' */
5751 			find_good_pkt_pointers(other_branch, src_reg,
5752 					       src_reg->type, false);
5753 		} else {
5754 			return false;
5755 		}
5756 		break;
5757 	case BPF_JLE:
5758 		if ((dst_reg->type == PTR_TO_PACKET &&
5759 		     src_reg->type == PTR_TO_PACKET_END) ||
5760 		    (dst_reg->type == PTR_TO_PACKET_META &&
5761 		     reg_is_init_pkt_pointer(src_reg, PTR_TO_PACKET))) {
5762 			/* pkt_data' <= pkt_end, pkt_meta' <= pkt_data */
5763 			find_good_pkt_pointers(other_branch, dst_reg,
5764 					       dst_reg->type, false);
5765 		} else if ((dst_reg->type == PTR_TO_PACKET_END &&
5766 			    src_reg->type == PTR_TO_PACKET) ||
5767 			   (reg_is_init_pkt_pointer(dst_reg, PTR_TO_PACKET) &&
5768 			    src_reg->type == PTR_TO_PACKET_META)) {
5769 			/* pkt_end <= pkt_data', pkt_data <= pkt_meta' */
5770 			find_good_pkt_pointers(this_branch, src_reg,
5771 					       src_reg->type, true);
5772 		} else {
5773 			return false;
5774 		}
5775 		break;
5776 	default:
5777 		return false;
5778 	}
5779 
5780 	return true;
5781 }
5782 
5783 static int check_cond_jmp_op(struct bpf_verifier_env *env,
5784 			     struct bpf_insn *insn, int *insn_idx)
5785 {
5786 	struct bpf_verifier_state *this_branch = env->cur_state;
5787 	struct bpf_verifier_state *other_branch;
5788 	struct bpf_reg_state *regs = this_branch->frame[this_branch->curframe]->regs;
5789 	struct bpf_reg_state *dst_reg, *other_branch_regs, *src_reg = NULL;
5790 	u8 opcode = BPF_OP(insn->code);
5791 	bool is_jmp32;
5792 	int pred = -1;
5793 	int err;
5794 
5795 	/* Only conditional jumps are expected to reach here. */
5796 	if (opcode == BPF_JA || opcode > BPF_JSLE) {
5797 		verbose(env, "invalid BPF_JMP/JMP32 opcode %x\n", opcode);
5798 		return -EINVAL;
5799 	}
5800 
5801 	if (BPF_SRC(insn->code) == BPF_X) {
5802 		if (insn->imm != 0) {
5803 			verbose(env, "BPF_JMP/JMP32 uses reserved fields\n");
5804 			return -EINVAL;
5805 		}
5806 
5807 		/* check src1 operand */
5808 		err = check_reg_arg(env, insn->src_reg, SRC_OP);
5809 		if (err)
5810 			return err;
5811 
5812 		if (is_pointer_value(env, insn->src_reg)) {
5813 			verbose(env, "R%d pointer comparison prohibited\n",
5814 				insn->src_reg);
5815 			return -EACCES;
5816 		}
5817 		src_reg = &regs[insn->src_reg];
5818 	} else {
5819 		if (insn->src_reg != BPF_REG_0) {
5820 			verbose(env, "BPF_JMP/JMP32 uses reserved fields\n");
5821 			return -EINVAL;
5822 		}
5823 	}
5824 
5825 	/* check src2 operand */
5826 	err = check_reg_arg(env, insn->dst_reg, SRC_OP);
5827 	if (err)
5828 		return err;
5829 
5830 	dst_reg = &regs[insn->dst_reg];
5831 	is_jmp32 = BPF_CLASS(insn->code) == BPF_JMP32;
5832 
5833 	if (BPF_SRC(insn->code) == BPF_K)
5834 		pred = is_branch_taken(dst_reg, insn->imm,
5835 				       opcode, is_jmp32);
5836 	else if (src_reg->type == SCALAR_VALUE &&
5837 		 tnum_is_const(src_reg->var_off))
5838 		pred = is_branch_taken(dst_reg, src_reg->var_off.value,
5839 				       opcode, is_jmp32);
5840 	if (pred >= 0) {
5841 		err = mark_chain_precision(env, insn->dst_reg);
5842 		if (BPF_SRC(insn->code) == BPF_X && !err)
5843 			err = mark_chain_precision(env, insn->src_reg);
5844 		if (err)
5845 			return err;
5846 	}
5847 	if (pred == 1) {
5848 		/* only follow the goto, ignore fall-through */
5849 		*insn_idx += insn->off;
5850 		return 0;
5851 	} else if (pred == 0) {
5852 		/* only follow fall-through branch, since
5853 		 * that's where the program will go
5854 		 */
5855 		return 0;
5856 	}
5857 
5858 	other_branch = push_stack(env, *insn_idx + insn->off + 1, *insn_idx,
5859 				  false);
5860 	if (!other_branch)
5861 		return -EFAULT;
5862 	other_branch_regs = other_branch->frame[other_branch->curframe]->regs;
5863 
5864 	/* detect if we are comparing against a constant value so we can adjust
5865 	 * our min/max values for our dst register.
5866 	 * this is only legit if both are scalars (or pointers to the same
5867 	 * object, I suppose, but we don't support that right now), because
5868 	 * otherwise the different base pointers mean the offsets aren't
5869 	 * comparable.
5870 	 */
5871 	if (BPF_SRC(insn->code) == BPF_X) {
5872 		struct bpf_reg_state *src_reg = &regs[insn->src_reg];
5873 		struct bpf_reg_state lo_reg0 = *dst_reg;
5874 		struct bpf_reg_state lo_reg1 = *src_reg;
5875 		struct bpf_reg_state *src_lo, *dst_lo;
5876 
5877 		dst_lo = &lo_reg0;
5878 		src_lo = &lo_reg1;
5879 		coerce_reg_to_size(dst_lo, 4);
5880 		coerce_reg_to_size(src_lo, 4);
5881 
5882 		if (dst_reg->type == SCALAR_VALUE &&
5883 		    src_reg->type == SCALAR_VALUE) {
5884 			if (tnum_is_const(src_reg->var_off) ||
5885 			    (is_jmp32 && tnum_is_const(src_lo->var_off)))
5886 				reg_set_min_max(&other_branch_regs[insn->dst_reg],
5887 						dst_reg,
5888 						is_jmp32
5889 						? src_lo->var_off.value
5890 						: src_reg->var_off.value,
5891 						opcode, is_jmp32);
5892 			else if (tnum_is_const(dst_reg->var_off) ||
5893 				 (is_jmp32 && tnum_is_const(dst_lo->var_off)))
5894 				reg_set_min_max_inv(&other_branch_regs[insn->src_reg],
5895 						    src_reg,
5896 						    is_jmp32
5897 						    ? dst_lo->var_off.value
5898 						    : dst_reg->var_off.value,
5899 						    opcode, is_jmp32);
5900 			else if (!is_jmp32 &&
5901 				 (opcode == BPF_JEQ || opcode == BPF_JNE))
5902 				/* Comparing for equality, we can combine knowledge */
5903 				reg_combine_min_max(&other_branch_regs[insn->src_reg],
5904 						    &other_branch_regs[insn->dst_reg],
5905 						    src_reg, dst_reg, opcode);
5906 		}
5907 	} else if (dst_reg->type == SCALAR_VALUE) {
5908 		reg_set_min_max(&other_branch_regs[insn->dst_reg],
5909 					dst_reg, insn->imm, opcode, is_jmp32);
5910 	}
5911 
5912 	/* detect if R == 0 where R is returned from bpf_map_lookup_elem().
5913 	 * NOTE: these optimizations below are related with pointer comparison
5914 	 *       which will never be JMP32.
5915 	 */
5916 	if (!is_jmp32 && BPF_SRC(insn->code) == BPF_K &&
5917 	    insn->imm == 0 && (opcode == BPF_JEQ || opcode == BPF_JNE) &&
5918 	    reg_type_may_be_null(dst_reg->type)) {
5919 		/* Mark all identical registers in each branch as either
5920 		 * safe or unknown depending R == 0 or R != 0 conditional.
5921 		 */
5922 		mark_ptr_or_null_regs(this_branch, insn->dst_reg,
5923 				      opcode == BPF_JNE);
5924 		mark_ptr_or_null_regs(other_branch, insn->dst_reg,
5925 				      opcode == BPF_JEQ);
5926 	} else if (!try_match_pkt_pointers(insn, dst_reg, &regs[insn->src_reg],
5927 					   this_branch, other_branch) &&
5928 		   is_pointer_value(env, insn->dst_reg)) {
5929 		verbose(env, "R%d pointer comparison prohibited\n",
5930 			insn->dst_reg);
5931 		return -EACCES;
5932 	}
5933 	if (env->log.level & BPF_LOG_LEVEL)
5934 		print_verifier_state(env, this_branch->frame[this_branch->curframe]);
5935 	return 0;
5936 }
5937 
5938 /* verify BPF_LD_IMM64 instruction */
5939 static int check_ld_imm(struct bpf_verifier_env *env, struct bpf_insn *insn)
5940 {
5941 	struct bpf_insn_aux_data *aux = cur_aux(env);
5942 	struct bpf_reg_state *regs = cur_regs(env);
5943 	struct bpf_map *map;
5944 	int err;
5945 
5946 	if (BPF_SIZE(insn->code) != BPF_DW) {
5947 		verbose(env, "invalid BPF_LD_IMM insn\n");
5948 		return -EINVAL;
5949 	}
5950 	if (insn->off != 0) {
5951 		verbose(env, "BPF_LD_IMM64 uses reserved fields\n");
5952 		return -EINVAL;
5953 	}
5954 
5955 	err = check_reg_arg(env, insn->dst_reg, DST_OP);
5956 	if (err)
5957 		return err;
5958 
5959 	if (insn->src_reg == 0) {
5960 		u64 imm = ((u64)(insn + 1)->imm << 32) | (u32)insn->imm;
5961 
5962 		regs[insn->dst_reg].type = SCALAR_VALUE;
5963 		__mark_reg_known(&regs[insn->dst_reg], imm);
5964 		return 0;
5965 	}
5966 
5967 	map = env->used_maps[aux->map_index];
5968 	mark_reg_known_zero(env, regs, insn->dst_reg);
5969 	regs[insn->dst_reg].map_ptr = map;
5970 
5971 	if (insn->src_reg == BPF_PSEUDO_MAP_VALUE) {
5972 		regs[insn->dst_reg].type = PTR_TO_MAP_VALUE;
5973 		regs[insn->dst_reg].off = aux->map_off;
5974 		if (map_value_has_spin_lock(map))
5975 			regs[insn->dst_reg].id = ++env->id_gen;
5976 	} else if (insn->src_reg == BPF_PSEUDO_MAP_FD) {
5977 		regs[insn->dst_reg].type = CONST_PTR_TO_MAP;
5978 	} else {
5979 		verbose(env, "bpf verifier is misconfigured\n");
5980 		return -EINVAL;
5981 	}
5982 
5983 	return 0;
5984 }
5985 
5986 static bool may_access_skb(enum bpf_prog_type type)
5987 {
5988 	switch (type) {
5989 	case BPF_PROG_TYPE_SOCKET_FILTER:
5990 	case BPF_PROG_TYPE_SCHED_CLS:
5991 	case BPF_PROG_TYPE_SCHED_ACT:
5992 		return true;
5993 	default:
5994 		return false;
5995 	}
5996 }
5997 
5998 /* verify safety of LD_ABS|LD_IND instructions:
5999  * - they can only appear in the programs where ctx == skb
6000  * - since they are wrappers of function calls, they scratch R1-R5 registers,
6001  *   preserve R6-R9, and store return value into R0
6002  *
6003  * Implicit input:
6004  *   ctx == skb == R6 == CTX
6005  *
6006  * Explicit input:
6007  *   SRC == any register
6008  *   IMM == 32-bit immediate
6009  *
6010  * Output:
6011  *   R0 - 8/16/32-bit skb data converted to cpu endianness
6012  */
6013 static int check_ld_abs(struct bpf_verifier_env *env, struct bpf_insn *insn)
6014 {
6015 	struct bpf_reg_state *regs = cur_regs(env);
6016 	u8 mode = BPF_MODE(insn->code);
6017 	int i, err;
6018 
6019 	if (!may_access_skb(env->prog->type)) {
6020 		verbose(env, "BPF_LD_[ABS|IND] instructions not allowed for this program type\n");
6021 		return -EINVAL;
6022 	}
6023 
6024 	if (!env->ops->gen_ld_abs) {
6025 		verbose(env, "bpf verifier is misconfigured\n");
6026 		return -EINVAL;
6027 	}
6028 
6029 	if (env->subprog_cnt > 1) {
6030 		/* when program has LD_ABS insn JITs and interpreter assume
6031 		 * that r1 == ctx == skb which is not the case for callees
6032 		 * that can have arbitrary arguments. It's problematic
6033 		 * for main prog as well since JITs would need to analyze
6034 		 * all functions in order to make proper register save/restore
6035 		 * decisions in the main prog. Hence disallow LD_ABS with calls
6036 		 */
6037 		verbose(env, "BPF_LD_[ABS|IND] instructions cannot be mixed with bpf-to-bpf calls\n");
6038 		return -EINVAL;
6039 	}
6040 
6041 	if (insn->dst_reg != BPF_REG_0 || insn->off != 0 ||
6042 	    BPF_SIZE(insn->code) == BPF_DW ||
6043 	    (mode == BPF_ABS && insn->src_reg != BPF_REG_0)) {
6044 		verbose(env, "BPF_LD_[ABS|IND] uses reserved fields\n");
6045 		return -EINVAL;
6046 	}
6047 
6048 	/* check whether implicit source operand (register R6) is readable */
6049 	err = check_reg_arg(env, BPF_REG_6, SRC_OP);
6050 	if (err)
6051 		return err;
6052 
6053 	/* Disallow usage of BPF_LD_[ABS|IND] with reference tracking, as
6054 	 * gen_ld_abs() may terminate the program at runtime, leading to
6055 	 * reference leak.
6056 	 */
6057 	err = check_reference_leak(env);
6058 	if (err) {
6059 		verbose(env, "BPF_LD_[ABS|IND] cannot be mixed with socket references\n");
6060 		return err;
6061 	}
6062 
6063 	if (env->cur_state->active_spin_lock) {
6064 		verbose(env, "BPF_LD_[ABS|IND] cannot be used inside bpf_spin_lock-ed region\n");
6065 		return -EINVAL;
6066 	}
6067 
6068 	if (regs[BPF_REG_6].type != PTR_TO_CTX) {
6069 		verbose(env,
6070 			"at the time of BPF_LD_ABS|IND R6 != pointer to skb\n");
6071 		return -EINVAL;
6072 	}
6073 
6074 	if (mode == BPF_IND) {
6075 		/* check explicit source operand */
6076 		err = check_reg_arg(env, insn->src_reg, SRC_OP);
6077 		if (err)
6078 			return err;
6079 	}
6080 
6081 	/* reset caller saved regs to unreadable */
6082 	for (i = 0; i < CALLER_SAVED_REGS; i++) {
6083 		mark_reg_not_init(env, regs, caller_saved[i]);
6084 		check_reg_arg(env, caller_saved[i], DST_OP_NO_MARK);
6085 	}
6086 
6087 	/* mark destination R0 register as readable, since it contains
6088 	 * the value fetched from the packet.
6089 	 * Already marked as written above.
6090 	 */
6091 	mark_reg_unknown(env, regs, BPF_REG_0);
6092 	/* ld_abs load up to 32-bit skb data. */
6093 	regs[BPF_REG_0].subreg_def = env->insn_idx + 1;
6094 	return 0;
6095 }
6096 
6097 static int check_return_code(struct bpf_verifier_env *env)
6098 {
6099 	struct tnum enforce_attach_type_range = tnum_unknown;
6100 	struct bpf_reg_state *reg;
6101 	struct tnum range = tnum_range(0, 1);
6102 
6103 	switch (env->prog->type) {
6104 	case BPF_PROG_TYPE_CGROUP_SOCK_ADDR:
6105 		if (env->prog->expected_attach_type == BPF_CGROUP_UDP4_RECVMSG ||
6106 		    env->prog->expected_attach_type == BPF_CGROUP_UDP6_RECVMSG)
6107 			range = tnum_range(1, 1);
6108 		break;
6109 	case BPF_PROG_TYPE_CGROUP_SKB:
6110 		if (env->prog->expected_attach_type == BPF_CGROUP_INET_EGRESS) {
6111 			range = tnum_range(0, 3);
6112 			enforce_attach_type_range = tnum_range(2, 3);
6113 		}
6114 		break;
6115 	case BPF_PROG_TYPE_CGROUP_SOCK:
6116 	case BPF_PROG_TYPE_SOCK_OPS:
6117 	case BPF_PROG_TYPE_CGROUP_DEVICE:
6118 	case BPF_PROG_TYPE_CGROUP_SYSCTL:
6119 	case BPF_PROG_TYPE_CGROUP_SOCKOPT:
6120 		break;
6121 	default:
6122 		return 0;
6123 	}
6124 
6125 	reg = cur_regs(env) + BPF_REG_0;
6126 	if (reg->type != SCALAR_VALUE) {
6127 		verbose(env, "At program exit the register R0 is not a known value (%s)\n",
6128 			reg_type_str[reg->type]);
6129 		return -EINVAL;
6130 	}
6131 
6132 	if (!tnum_in(range, reg->var_off)) {
6133 		char tn_buf[48];
6134 
6135 		verbose(env, "At program exit the register R0 ");
6136 		if (!tnum_is_unknown(reg->var_off)) {
6137 			tnum_strn(tn_buf, sizeof(tn_buf), reg->var_off);
6138 			verbose(env, "has value %s", tn_buf);
6139 		} else {
6140 			verbose(env, "has unknown scalar value");
6141 		}
6142 		tnum_strn(tn_buf, sizeof(tn_buf), range);
6143 		verbose(env, " should have been in %s\n", tn_buf);
6144 		return -EINVAL;
6145 	}
6146 
6147 	if (!tnum_is_unknown(enforce_attach_type_range) &&
6148 	    tnum_in(enforce_attach_type_range, reg->var_off))
6149 		env->prog->enforce_expected_attach_type = 1;
6150 	return 0;
6151 }
6152 
6153 /* non-recursive DFS pseudo code
6154  * 1  procedure DFS-iterative(G,v):
6155  * 2      label v as discovered
6156  * 3      let S be a stack
6157  * 4      S.push(v)
6158  * 5      while S is not empty
6159  * 6            t <- S.pop()
6160  * 7            if t is what we're looking for:
6161  * 8                return t
6162  * 9            for all edges e in G.adjacentEdges(t) do
6163  * 10               if edge e is already labelled
6164  * 11                   continue with the next edge
6165  * 12               w <- G.adjacentVertex(t,e)
6166  * 13               if vertex w is not discovered and not explored
6167  * 14                   label e as tree-edge
6168  * 15                   label w as discovered
6169  * 16                   S.push(w)
6170  * 17                   continue at 5
6171  * 18               else if vertex w is discovered
6172  * 19                   label e as back-edge
6173  * 20               else
6174  * 21                   // vertex w is explored
6175  * 22                   label e as forward- or cross-edge
6176  * 23           label t as explored
6177  * 24           S.pop()
6178  *
6179  * convention:
6180  * 0x10 - discovered
6181  * 0x11 - discovered and fall-through edge labelled
6182  * 0x12 - discovered and fall-through and branch edges labelled
6183  * 0x20 - explored
6184  */
6185 
6186 enum {
6187 	DISCOVERED = 0x10,
6188 	EXPLORED = 0x20,
6189 	FALLTHROUGH = 1,
6190 	BRANCH = 2,
6191 };
6192 
6193 static u32 state_htab_size(struct bpf_verifier_env *env)
6194 {
6195 	return env->prog->len;
6196 }
6197 
6198 static struct bpf_verifier_state_list **explored_state(
6199 					struct bpf_verifier_env *env,
6200 					int idx)
6201 {
6202 	struct bpf_verifier_state *cur = env->cur_state;
6203 	struct bpf_func_state *state = cur->frame[cur->curframe];
6204 
6205 	return &env->explored_states[(idx ^ state->callsite) % state_htab_size(env)];
6206 }
6207 
6208 static void init_explored_state(struct bpf_verifier_env *env, int idx)
6209 {
6210 	env->insn_aux_data[idx].prune_point = true;
6211 }
6212 
6213 /* t, w, e - match pseudo-code above:
6214  * t - index of current instruction
6215  * w - next instruction
6216  * e - edge
6217  */
6218 static int push_insn(int t, int w, int e, struct bpf_verifier_env *env,
6219 		     bool loop_ok)
6220 {
6221 	int *insn_stack = env->cfg.insn_stack;
6222 	int *insn_state = env->cfg.insn_state;
6223 
6224 	if (e == FALLTHROUGH && insn_state[t] >= (DISCOVERED | FALLTHROUGH))
6225 		return 0;
6226 
6227 	if (e == BRANCH && insn_state[t] >= (DISCOVERED | BRANCH))
6228 		return 0;
6229 
6230 	if (w < 0 || w >= env->prog->len) {
6231 		verbose_linfo(env, t, "%d: ", t);
6232 		verbose(env, "jump out of range from insn %d to %d\n", t, w);
6233 		return -EINVAL;
6234 	}
6235 
6236 	if (e == BRANCH)
6237 		/* mark branch target for state pruning */
6238 		init_explored_state(env, w);
6239 
6240 	if (insn_state[w] == 0) {
6241 		/* tree-edge */
6242 		insn_state[t] = DISCOVERED | e;
6243 		insn_state[w] = DISCOVERED;
6244 		if (env->cfg.cur_stack >= env->prog->len)
6245 			return -E2BIG;
6246 		insn_stack[env->cfg.cur_stack++] = w;
6247 		return 1;
6248 	} else if ((insn_state[w] & 0xF0) == DISCOVERED) {
6249 		if (loop_ok && env->allow_ptr_leaks)
6250 			return 0;
6251 		verbose_linfo(env, t, "%d: ", t);
6252 		verbose_linfo(env, w, "%d: ", w);
6253 		verbose(env, "back-edge from insn %d to %d\n", t, w);
6254 		return -EINVAL;
6255 	} else if (insn_state[w] == EXPLORED) {
6256 		/* forward- or cross-edge */
6257 		insn_state[t] = DISCOVERED | e;
6258 	} else {
6259 		verbose(env, "insn state internal bug\n");
6260 		return -EFAULT;
6261 	}
6262 	return 0;
6263 }
6264 
6265 /* non-recursive depth-first-search to detect loops in BPF program
6266  * loop == back-edge in directed graph
6267  */
6268 static int check_cfg(struct bpf_verifier_env *env)
6269 {
6270 	struct bpf_insn *insns = env->prog->insnsi;
6271 	int insn_cnt = env->prog->len;
6272 	int *insn_stack, *insn_state;
6273 	int ret = 0;
6274 	int i, t;
6275 
6276 	insn_state = env->cfg.insn_state = kvcalloc(insn_cnt, sizeof(int), GFP_KERNEL);
6277 	if (!insn_state)
6278 		return -ENOMEM;
6279 
6280 	insn_stack = env->cfg.insn_stack = kvcalloc(insn_cnt, sizeof(int), GFP_KERNEL);
6281 	if (!insn_stack) {
6282 		kvfree(insn_state);
6283 		return -ENOMEM;
6284 	}
6285 
6286 	insn_state[0] = DISCOVERED; /* mark 1st insn as discovered */
6287 	insn_stack[0] = 0; /* 0 is the first instruction */
6288 	env->cfg.cur_stack = 1;
6289 
6290 peek_stack:
6291 	if (env->cfg.cur_stack == 0)
6292 		goto check_state;
6293 	t = insn_stack[env->cfg.cur_stack - 1];
6294 
6295 	if (BPF_CLASS(insns[t].code) == BPF_JMP ||
6296 	    BPF_CLASS(insns[t].code) == BPF_JMP32) {
6297 		u8 opcode = BPF_OP(insns[t].code);
6298 
6299 		if (opcode == BPF_EXIT) {
6300 			goto mark_explored;
6301 		} else if (opcode == BPF_CALL) {
6302 			ret = push_insn(t, t + 1, FALLTHROUGH, env, false);
6303 			if (ret == 1)
6304 				goto peek_stack;
6305 			else if (ret < 0)
6306 				goto err_free;
6307 			if (t + 1 < insn_cnt)
6308 				init_explored_state(env, t + 1);
6309 			if (insns[t].src_reg == BPF_PSEUDO_CALL) {
6310 				init_explored_state(env, t);
6311 				ret = push_insn(t, t + insns[t].imm + 1, BRANCH,
6312 						env, false);
6313 				if (ret == 1)
6314 					goto peek_stack;
6315 				else if (ret < 0)
6316 					goto err_free;
6317 			}
6318 		} else if (opcode == BPF_JA) {
6319 			if (BPF_SRC(insns[t].code) != BPF_K) {
6320 				ret = -EINVAL;
6321 				goto err_free;
6322 			}
6323 			/* unconditional jump with single edge */
6324 			ret = push_insn(t, t + insns[t].off + 1,
6325 					FALLTHROUGH, env, true);
6326 			if (ret == 1)
6327 				goto peek_stack;
6328 			else if (ret < 0)
6329 				goto err_free;
6330 			/* unconditional jmp is not a good pruning point,
6331 			 * but it's marked, since backtracking needs
6332 			 * to record jmp history in is_state_visited().
6333 			 */
6334 			init_explored_state(env, t + insns[t].off + 1);
6335 			/* tell verifier to check for equivalent states
6336 			 * after every call and jump
6337 			 */
6338 			if (t + 1 < insn_cnt)
6339 				init_explored_state(env, t + 1);
6340 		} else {
6341 			/* conditional jump with two edges */
6342 			init_explored_state(env, t);
6343 			ret = push_insn(t, t + 1, FALLTHROUGH, env, true);
6344 			if (ret == 1)
6345 				goto peek_stack;
6346 			else if (ret < 0)
6347 				goto err_free;
6348 
6349 			ret = push_insn(t, t + insns[t].off + 1, BRANCH, env, true);
6350 			if (ret == 1)
6351 				goto peek_stack;
6352 			else if (ret < 0)
6353 				goto err_free;
6354 		}
6355 	} else {
6356 		/* all other non-branch instructions with single
6357 		 * fall-through edge
6358 		 */
6359 		ret = push_insn(t, t + 1, FALLTHROUGH, env, false);
6360 		if (ret == 1)
6361 			goto peek_stack;
6362 		else if (ret < 0)
6363 			goto err_free;
6364 	}
6365 
6366 mark_explored:
6367 	insn_state[t] = EXPLORED;
6368 	if (env->cfg.cur_stack-- <= 0) {
6369 		verbose(env, "pop stack internal bug\n");
6370 		ret = -EFAULT;
6371 		goto err_free;
6372 	}
6373 	goto peek_stack;
6374 
6375 check_state:
6376 	for (i = 0; i < insn_cnt; i++) {
6377 		if (insn_state[i] != EXPLORED) {
6378 			verbose(env, "unreachable insn %d\n", i);
6379 			ret = -EINVAL;
6380 			goto err_free;
6381 		}
6382 	}
6383 	ret = 0; /* cfg looks good */
6384 
6385 err_free:
6386 	kvfree(insn_state);
6387 	kvfree(insn_stack);
6388 	env->cfg.insn_state = env->cfg.insn_stack = NULL;
6389 	return ret;
6390 }
6391 
6392 /* The minimum supported BTF func info size */
6393 #define MIN_BPF_FUNCINFO_SIZE	8
6394 #define MAX_FUNCINFO_REC_SIZE	252
6395 
6396 static int check_btf_func(struct bpf_verifier_env *env,
6397 			  const union bpf_attr *attr,
6398 			  union bpf_attr __user *uattr)
6399 {
6400 	u32 i, nfuncs, urec_size, min_size;
6401 	u32 krec_size = sizeof(struct bpf_func_info);
6402 	struct bpf_func_info *krecord;
6403 	const struct btf_type *type;
6404 	struct bpf_prog *prog;
6405 	const struct btf *btf;
6406 	void __user *urecord;
6407 	u32 prev_offset = 0;
6408 	int ret = 0;
6409 
6410 	nfuncs = attr->func_info_cnt;
6411 	if (!nfuncs)
6412 		return 0;
6413 
6414 	if (nfuncs != env->subprog_cnt) {
6415 		verbose(env, "number of funcs in func_info doesn't match number of subprogs\n");
6416 		return -EINVAL;
6417 	}
6418 
6419 	urec_size = attr->func_info_rec_size;
6420 	if (urec_size < MIN_BPF_FUNCINFO_SIZE ||
6421 	    urec_size > MAX_FUNCINFO_REC_SIZE ||
6422 	    urec_size % sizeof(u32)) {
6423 		verbose(env, "invalid func info rec size %u\n", urec_size);
6424 		return -EINVAL;
6425 	}
6426 
6427 	prog = env->prog;
6428 	btf = prog->aux->btf;
6429 
6430 	urecord = u64_to_user_ptr(attr->func_info);
6431 	min_size = min_t(u32, krec_size, urec_size);
6432 
6433 	krecord = kvcalloc(nfuncs, krec_size, GFP_KERNEL | __GFP_NOWARN);
6434 	if (!krecord)
6435 		return -ENOMEM;
6436 
6437 	for (i = 0; i < nfuncs; i++) {
6438 		ret = bpf_check_uarg_tail_zero(urecord, krec_size, urec_size);
6439 		if (ret) {
6440 			if (ret == -E2BIG) {
6441 				verbose(env, "nonzero tailing record in func info");
6442 				/* set the size kernel expects so loader can zero
6443 				 * out the rest of the record.
6444 				 */
6445 				if (put_user(min_size, &uattr->func_info_rec_size))
6446 					ret = -EFAULT;
6447 			}
6448 			goto err_free;
6449 		}
6450 
6451 		if (copy_from_user(&krecord[i], urecord, min_size)) {
6452 			ret = -EFAULT;
6453 			goto err_free;
6454 		}
6455 
6456 		/* check insn_off */
6457 		if (i == 0) {
6458 			if (krecord[i].insn_off) {
6459 				verbose(env,
6460 					"nonzero insn_off %u for the first func info record",
6461 					krecord[i].insn_off);
6462 				ret = -EINVAL;
6463 				goto err_free;
6464 			}
6465 		} else if (krecord[i].insn_off <= prev_offset) {
6466 			verbose(env,
6467 				"same or smaller insn offset (%u) than previous func info record (%u)",
6468 				krecord[i].insn_off, prev_offset);
6469 			ret = -EINVAL;
6470 			goto err_free;
6471 		}
6472 
6473 		if (env->subprog_info[i].start != krecord[i].insn_off) {
6474 			verbose(env, "func_info BTF section doesn't match subprog layout in BPF program\n");
6475 			ret = -EINVAL;
6476 			goto err_free;
6477 		}
6478 
6479 		/* check type_id */
6480 		type = btf_type_by_id(btf, krecord[i].type_id);
6481 		if (!type || BTF_INFO_KIND(type->info) != BTF_KIND_FUNC) {
6482 			verbose(env, "invalid type id %d in func info",
6483 				krecord[i].type_id);
6484 			ret = -EINVAL;
6485 			goto err_free;
6486 		}
6487 
6488 		prev_offset = krecord[i].insn_off;
6489 		urecord += urec_size;
6490 	}
6491 
6492 	prog->aux->func_info = krecord;
6493 	prog->aux->func_info_cnt = nfuncs;
6494 	return 0;
6495 
6496 err_free:
6497 	kvfree(krecord);
6498 	return ret;
6499 }
6500 
6501 static void adjust_btf_func(struct bpf_verifier_env *env)
6502 {
6503 	int i;
6504 
6505 	if (!env->prog->aux->func_info)
6506 		return;
6507 
6508 	for (i = 0; i < env->subprog_cnt; i++)
6509 		env->prog->aux->func_info[i].insn_off = env->subprog_info[i].start;
6510 }
6511 
6512 #define MIN_BPF_LINEINFO_SIZE	(offsetof(struct bpf_line_info, line_col) + \
6513 		sizeof(((struct bpf_line_info *)(0))->line_col))
6514 #define MAX_LINEINFO_REC_SIZE	MAX_FUNCINFO_REC_SIZE
6515 
6516 static int check_btf_line(struct bpf_verifier_env *env,
6517 			  const union bpf_attr *attr,
6518 			  union bpf_attr __user *uattr)
6519 {
6520 	u32 i, s, nr_linfo, ncopy, expected_size, rec_size, prev_offset = 0;
6521 	struct bpf_subprog_info *sub;
6522 	struct bpf_line_info *linfo;
6523 	struct bpf_prog *prog;
6524 	const struct btf *btf;
6525 	void __user *ulinfo;
6526 	int err;
6527 
6528 	nr_linfo = attr->line_info_cnt;
6529 	if (!nr_linfo)
6530 		return 0;
6531 
6532 	rec_size = attr->line_info_rec_size;
6533 	if (rec_size < MIN_BPF_LINEINFO_SIZE ||
6534 	    rec_size > MAX_LINEINFO_REC_SIZE ||
6535 	    rec_size & (sizeof(u32) - 1))
6536 		return -EINVAL;
6537 
6538 	/* Need to zero it in case the userspace may
6539 	 * pass in a smaller bpf_line_info object.
6540 	 */
6541 	linfo = kvcalloc(nr_linfo, sizeof(struct bpf_line_info),
6542 			 GFP_KERNEL | __GFP_NOWARN);
6543 	if (!linfo)
6544 		return -ENOMEM;
6545 
6546 	prog = env->prog;
6547 	btf = prog->aux->btf;
6548 
6549 	s = 0;
6550 	sub = env->subprog_info;
6551 	ulinfo = u64_to_user_ptr(attr->line_info);
6552 	expected_size = sizeof(struct bpf_line_info);
6553 	ncopy = min_t(u32, expected_size, rec_size);
6554 	for (i = 0; i < nr_linfo; i++) {
6555 		err = bpf_check_uarg_tail_zero(ulinfo, expected_size, rec_size);
6556 		if (err) {
6557 			if (err == -E2BIG) {
6558 				verbose(env, "nonzero tailing record in line_info");
6559 				if (put_user(expected_size,
6560 					     &uattr->line_info_rec_size))
6561 					err = -EFAULT;
6562 			}
6563 			goto err_free;
6564 		}
6565 
6566 		if (copy_from_user(&linfo[i], ulinfo, ncopy)) {
6567 			err = -EFAULT;
6568 			goto err_free;
6569 		}
6570 
6571 		/*
6572 		 * Check insn_off to ensure
6573 		 * 1) strictly increasing AND
6574 		 * 2) bounded by prog->len
6575 		 *
6576 		 * The linfo[0].insn_off == 0 check logically falls into
6577 		 * the later "missing bpf_line_info for func..." case
6578 		 * because the first linfo[0].insn_off must be the
6579 		 * first sub also and the first sub must have
6580 		 * subprog_info[0].start == 0.
6581 		 */
6582 		if ((i && linfo[i].insn_off <= prev_offset) ||
6583 		    linfo[i].insn_off >= prog->len) {
6584 			verbose(env, "Invalid line_info[%u].insn_off:%u (prev_offset:%u prog->len:%u)\n",
6585 				i, linfo[i].insn_off, prev_offset,
6586 				prog->len);
6587 			err = -EINVAL;
6588 			goto err_free;
6589 		}
6590 
6591 		if (!prog->insnsi[linfo[i].insn_off].code) {
6592 			verbose(env,
6593 				"Invalid insn code at line_info[%u].insn_off\n",
6594 				i);
6595 			err = -EINVAL;
6596 			goto err_free;
6597 		}
6598 
6599 		if (!btf_name_by_offset(btf, linfo[i].line_off) ||
6600 		    !btf_name_by_offset(btf, linfo[i].file_name_off)) {
6601 			verbose(env, "Invalid line_info[%u].line_off or .file_name_off\n", i);
6602 			err = -EINVAL;
6603 			goto err_free;
6604 		}
6605 
6606 		if (s != env->subprog_cnt) {
6607 			if (linfo[i].insn_off == sub[s].start) {
6608 				sub[s].linfo_idx = i;
6609 				s++;
6610 			} else if (sub[s].start < linfo[i].insn_off) {
6611 				verbose(env, "missing bpf_line_info for func#%u\n", s);
6612 				err = -EINVAL;
6613 				goto err_free;
6614 			}
6615 		}
6616 
6617 		prev_offset = linfo[i].insn_off;
6618 		ulinfo += rec_size;
6619 	}
6620 
6621 	if (s != env->subprog_cnt) {
6622 		verbose(env, "missing bpf_line_info for %u funcs starting from func#%u\n",
6623 			env->subprog_cnt - s, s);
6624 		err = -EINVAL;
6625 		goto err_free;
6626 	}
6627 
6628 	prog->aux->linfo = linfo;
6629 	prog->aux->nr_linfo = nr_linfo;
6630 
6631 	return 0;
6632 
6633 err_free:
6634 	kvfree(linfo);
6635 	return err;
6636 }
6637 
6638 static int check_btf_info(struct bpf_verifier_env *env,
6639 			  const union bpf_attr *attr,
6640 			  union bpf_attr __user *uattr)
6641 {
6642 	struct btf *btf;
6643 	int err;
6644 
6645 	if (!attr->func_info_cnt && !attr->line_info_cnt)
6646 		return 0;
6647 
6648 	btf = btf_get_by_fd(attr->prog_btf_fd);
6649 	if (IS_ERR(btf))
6650 		return PTR_ERR(btf);
6651 	env->prog->aux->btf = btf;
6652 
6653 	err = check_btf_func(env, attr, uattr);
6654 	if (err)
6655 		return err;
6656 
6657 	err = check_btf_line(env, attr, uattr);
6658 	if (err)
6659 		return err;
6660 
6661 	return 0;
6662 }
6663 
6664 /* check %cur's range satisfies %old's */
6665 static bool range_within(struct bpf_reg_state *old,
6666 			 struct bpf_reg_state *cur)
6667 {
6668 	return old->umin_value <= cur->umin_value &&
6669 	       old->umax_value >= cur->umax_value &&
6670 	       old->smin_value <= cur->smin_value &&
6671 	       old->smax_value >= cur->smax_value;
6672 }
6673 
6674 /* Maximum number of register states that can exist at once */
6675 #define ID_MAP_SIZE	(MAX_BPF_REG + MAX_BPF_STACK / BPF_REG_SIZE)
6676 struct idpair {
6677 	u32 old;
6678 	u32 cur;
6679 };
6680 
6681 /* If in the old state two registers had the same id, then they need to have
6682  * the same id in the new state as well.  But that id could be different from
6683  * the old state, so we need to track the mapping from old to new ids.
6684  * Once we have seen that, say, a reg with old id 5 had new id 9, any subsequent
6685  * regs with old id 5 must also have new id 9 for the new state to be safe.  But
6686  * regs with a different old id could still have new id 9, we don't care about
6687  * that.
6688  * So we look through our idmap to see if this old id has been seen before.  If
6689  * so, we require the new id to match; otherwise, we add the id pair to the map.
6690  */
6691 static bool check_ids(u32 old_id, u32 cur_id, struct idpair *idmap)
6692 {
6693 	unsigned int i;
6694 
6695 	for (i = 0; i < ID_MAP_SIZE; i++) {
6696 		if (!idmap[i].old) {
6697 			/* Reached an empty slot; haven't seen this id before */
6698 			idmap[i].old = old_id;
6699 			idmap[i].cur = cur_id;
6700 			return true;
6701 		}
6702 		if (idmap[i].old == old_id)
6703 			return idmap[i].cur == cur_id;
6704 	}
6705 	/* We ran out of idmap slots, which should be impossible */
6706 	WARN_ON_ONCE(1);
6707 	return false;
6708 }
6709 
6710 static void clean_func_state(struct bpf_verifier_env *env,
6711 			     struct bpf_func_state *st)
6712 {
6713 	enum bpf_reg_liveness live;
6714 	int i, j;
6715 
6716 	for (i = 0; i < BPF_REG_FP; i++) {
6717 		live = st->regs[i].live;
6718 		/* liveness must not touch this register anymore */
6719 		st->regs[i].live |= REG_LIVE_DONE;
6720 		if (!(live & REG_LIVE_READ))
6721 			/* since the register is unused, clear its state
6722 			 * to make further comparison simpler
6723 			 */
6724 			__mark_reg_not_init(&st->regs[i]);
6725 	}
6726 
6727 	for (i = 0; i < st->allocated_stack / BPF_REG_SIZE; i++) {
6728 		live = st->stack[i].spilled_ptr.live;
6729 		/* liveness must not touch this stack slot anymore */
6730 		st->stack[i].spilled_ptr.live |= REG_LIVE_DONE;
6731 		if (!(live & REG_LIVE_READ)) {
6732 			__mark_reg_not_init(&st->stack[i].spilled_ptr);
6733 			for (j = 0; j < BPF_REG_SIZE; j++)
6734 				st->stack[i].slot_type[j] = STACK_INVALID;
6735 		}
6736 	}
6737 }
6738 
6739 static void clean_verifier_state(struct bpf_verifier_env *env,
6740 				 struct bpf_verifier_state *st)
6741 {
6742 	int i;
6743 
6744 	if (st->frame[0]->regs[0].live & REG_LIVE_DONE)
6745 		/* all regs in this state in all frames were already marked */
6746 		return;
6747 
6748 	for (i = 0; i <= st->curframe; i++)
6749 		clean_func_state(env, st->frame[i]);
6750 }
6751 
6752 /* the parentage chains form a tree.
6753  * the verifier states are added to state lists at given insn and
6754  * pushed into state stack for future exploration.
6755  * when the verifier reaches bpf_exit insn some of the verifer states
6756  * stored in the state lists have their final liveness state already,
6757  * but a lot of states will get revised from liveness point of view when
6758  * the verifier explores other branches.
6759  * Example:
6760  * 1: r0 = 1
6761  * 2: if r1 == 100 goto pc+1
6762  * 3: r0 = 2
6763  * 4: exit
6764  * when the verifier reaches exit insn the register r0 in the state list of
6765  * insn 2 will be seen as !REG_LIVE_READ. Then the verifier pops the other_branch
6766  * of insn 2 and goes exploring further. At the insn 4 it will walk the
6767  * parentage chain from insn 4 into insn 2 and will mark r0 as REG_LIVE_READ.
6768  *
6769  * Since the verifier pushes the branch states as it sees them while exploring
6770  * the program the condition of walking the branch instruction for the second
6771  * time means that all states below this branch were already explored and
6772  * their final liveness markes are already propagated.
6773  * Hence when the verifier completes the search of state list in is_state_visited()
6774  * we can call this clean_live_states() function to mark all liveness states
6775  * as REG_LIVE_DONE to indicate that 'parent' pointers of 'struct bpf_reg_state'
6776  * will not be used.
6777  * This function also clears the registers and stack for states that !READ
6778  * to simplify state merging.
6779  *
6780  * Important note here that walking the same branch instruction in the callee
6781  * doesn't meant that the states are DONE. The verifier has to compare
6782  * the callsites
6783  */
6784 static void clean_live_states(struct bpf_verifier_env *env, int insn,
6785 			      struct bpf_verifier_state *cur)
6786 {
6787 	struct bpf_verifier_state_list *sl;
6788 	int i;
6789 
6790 	sl = *explored_state(env, insn);
6791 	while (sl) {
6792 		if (sl->state.branches)
6793 			goto next;
6794 		if (sl->state.insn_idx != insn ||
6795 		    sl->state.curframe != cur->curframe)
6796 			goto next;
6797 		for (i = 0; i <= cur->curframe; i++)
6798 			if (sl->state.frame[i]->callsite != cur->frame[i]->callsite)
6799 				goto next;
6800 		clean_verifier_state(env, &sl->state);
6801 next:
6802 		sl = sl->next;
6803 	}
6804 }
6805 
6806 /* Returns true if (rold safe implies rcur safe) */
6807 static bool regsafe(struct bpf_reg_state *rold, struct bpf_reg_state *rcur,
6808 		    struct idpair *idmap)
6809 {
6810 	bool equal;
6811 
6812 	if (!(rold->live & REG_LIVE_READ))
6813 		/* explored state didn't use this */
6814 		return true;
6815 
6816 	equal = memcmp(rold, rcur, offsetof(struct bpf_reg_state, parent)) == 0;
6817 
6818 	if (rold->type == PTR_TO_STACK)
6819 		/* two stack pointers are equal only if they're pointing to
6820 		 * the same stack frame, since fp-8 in foo != fp-8 in bar
6821 		 */
6822 		return equal && rold->frameno == rcur->frameno;
6823 
6824 	if (equal)
6825 		return true;
6826 
6827 	if (rold->type == NOT_INIT)
6828 		/* explored state can't have used this */
6829 		return true;
6830 	if (rcur->type == NOT_INIT)
6831 		return false;
6832 	switch (rold->type) {
6833 	case SCALAR_VALUE:
6834 		if (rcur->type == SCALAR_VALUE) {
6835 			if (!rold->precise && !rcur->precise)
6836 				return true;
6837 			/* new val must satisfy old val knowledge */
6838 			return range_within(rold, rcur) &&
6839 			       tnum_in(rold->var_off, rcur->var_off);
6840 		} else {
6841 			/* We're trying to use a pointer in place of a scalar.
6842 			 * Even if the scalar was unbounded, this could lead to
6843 			 * pointer leaks because scalars are allowed to leak
6844 			 * while pointers are not. We could make this safe in
6845 			 * special cases if root is calling us, but it's
6846 			 * probably not worth the hassle.
6847 			 */
6848 			return false;
6849 		}
6850 	case PTR_TO_MAP_VALUE:
6851 		/* If the new min/max/var_off satisfy the old ones and
6852 		 * everything else matches, we are OK.
6853 		 * 'id' is not compared, since it's only used for maps with
6854 		 * bpf_spin_lock inside map element and in such cases if
6855 		 * the rest of the prog is valid for one map element then
6856 		 * it's valid for all map elements regardless of the key
6857 		 * used in bpf_map_lookup()
6858 		 */
6859 		return memcmp(rold, rcur, offsetof(struct bpf_reg_state, id)) == 0 &&
6860 		       range_within(rold, rcur) &&
6861 		       tnum_in(rold->var_off, rcur->var_off);
6862 	case PTR_TO_MAP_VALUE_OR_NULL:
6863 		/* a PTR_TO_MAP_VALUE could be safe to use as a
6864 		 * PTR_TO_MAP_VALUE_OR_NULL into the same map.
6865 		 * However, if the old PTR_TO_MAP_VALUE_OR_NULL then got NULL-
6866 		 * checked, doing so could have affected others with the same
6867 		 * id, and we can't check for that because we lost the id when
6868 		 * we converted to a PTR_TO_MAP_VALUE.
6869 		 */
6870 		if (rcur->type != PTR_TO_MAP_VALUE_OR_NULL)
6871 			return false;
6872 		if (memcmp(rold, rcur, offsetof(struct bpf_reg_state, id)))
6873 			return false;
6874 		/* Check our ids match any regs they're supposed to */
6875 		return check_ids(rold->id, rcur->id, idmap);
6876 	case PTR_TO_PACKET_META:
6877 	case PTR_TO_PACKET:
6878 		if (rcur->type != rold->type)
6879 			return false;
6880 		/* We must have at least as much range as the old ptr
6881 		 * did, so that any accesses which were safe before are
6882 		 * still safe.  This is true even if old range < old off,
6883 		 * since someone could have accessed through (ptr - k), or
6884 		 * even done ptr -= k in a register, to get a safe access.
6885 		 */
6886 		if (rold->range > rcur->range)
6887 			return false;
6888 		/* If the offsets don't match, we can't trust our alignment;
6889 		 * nor can we be sure that we won't fall out of range.
6890 		 */
6891 		if (rold->off != rcur->off)
6892 			return false;
6893 		/* id relations must be preserved */
6894 		if (rold->id && !check_ids(rold->id, rcur->id, idmap))
6895 			return false;
6896 		/* new val must satisfy old val knowledge */
6897 		return range_within(rold, rcur) &&
6898 		       tnum_in(rold->var_off, rcur->var_off);
6899 	case PTR_TO_CTX:
6900 	case CONST_PTR_TO_MAP:
6901 	case PTR_TO_PACKET_END:
6902 	case PTR_TO_FLOW_KEYS:
6903 	case PTR_TO_SOCKET:
6904 	case PTR_TO_SOCKET_OR_NULL:
6905 	case PTR_TO_SOCK_COMMON:
6906 	case PTR_TO_SOCK_COMMON_OR_NULL:
6907 	case PTR_TO_TCP_SOCK:
6908 	case PTR_TO_TCP_SOCK_OR_NULL:
6909 	case PTR_TO_XDP_SOCK:
6910 		/* Only valid matches are exact, which memcmp() above
6911 		 * would have accepted
6912 		 */
6913 	default:
6914 		/* Don't know what's going on, just say it's not safe */
6915 		return false;
6916 	}
6917 
6918 	/* Shouldn't get here; if we do, say it's not safe */
6919 	WARN_ON_ONCE(1);
6920 	return false;
6921 }
6922 
6923 static bool stacksafe(struct bpf_func_state *old,
6924 		      struct bpf_func_state *cur,
6925 		      struct idpair *idmap)
6926 {
6927 	int i, spi;
6928 
6929 	/* walk slots of the explored stack and ignore any additional
6930 	 * slots in the current stack, since explored(safe) state
6931 	 * didn't use them
6932 	 */
6933 	for (i = 0; i < old->allocated_stack; i++) {
6934 		spi = i / BPF_REG_SIZE;
6935 
6936 		if (!(old->stack[spi].spilled_ptr.live & REG_LIVE_READ)) {
6937 			i += BPF_REG_SIZE - 1;
6938 			/* explored state didn't use this */
6939 			continue;
6940 		}
6941 
6942 		if (old->stack[spi].slot_type[i % BPF_REG_SIZE] == STACK_INVALID)
6943 			continue;
6944 
6945 		/* explored stack has more populated slots than current stack
6946 		 * and these slots were used
6947 		 */
6948 		if (i >= cur->allocated_stack)
6949 			return false;
6950 
6951 		/* if old state was safe with misc data in the stack
6952 		 * it will be safe with zero-initialized stack.
6953 		 * The opposite is not true
6954 		 */
6955 		if (old->stack[spi].slot_type[i % BPF_REG_SIZE] == STACK_MISC &&
6956 		    cur->stack[spi].slot_type[i % BPF_REG_SIZE] == STACK_ZERO)
6957 			continue;
6958 		if (old->stack[spi].slot_type[i % BPF_REG_SIZE] !=
6959 		    cur->stack[spi].slot_type[i % BPF_REG_SIZE])
6960 			/* Ex: old explored (safe) state has STACK_SPILL in
6961 			 * this stack slot, but current has has STACK_MISC ->
6962 			 * this verifier states are not equivalent,
6963 			 * return false to continue verification of this path
6964 			 */
6965 			return false;
6966 		if (i % BPF_REG_SIZE)
6967 			continue;
6968 		if (old->stack[spi].slot_type[0] != STACK_SPILL)
6969 			continue;
6970 		if (!regsafe(&old->stack[spi].spilled_ptr,
6971 			     &cur->stack[spi].spilled_ptr,
6972 			     idmap))
6973 			/* when explored and current stack slot are both storing
6974 			 * spilled registers, check that stored pointers types
6975 			 * are the same as well.
6976 			 * Ex: explored safe path could have stored
6977 			 * (bpf_reg_state) {.type = PTR_TO_STACK, .off = -8}
6978 			 * but current path has stored:
6979 			 * (bpf_reg_state) {.type = PTR_TO_STACK, .off = -16}
6980 			 * such verifier states are not equivalent.
6981 			 * return false to continue verification of this path
6982 			 */
6983 			return false;
6984 	}
6985 	return true;
6986 }
6987 
6988 static bool refsafe(struct bpf_func_state *old, struct bpf_func_state *cur)
6989 {
6990 	if (old->acquired_refs != cur->acquired_refs)
6991 		return false;
6992 	return !memcmp(old->refs, cur->refs,
6993 		       sizeof(*old->refs) * old->acquired_refs);
6994 }
6995 
6996 /* compare two verifier states
6997  *
6998  * all states stored in state_list are known to be valid, since
6999  * verifier reached 'bpf_exit' instruction through them
7000  *
7001  * this function is called when verifier exploring different branches of
7002  * execution popped from the state stack. If it sees an old state that has
7003  * more strict register state and more strict stack state then this execution
7004  * branch doesn't need to be explored further, since verifier already
7005  * concluded that more strict state leads to valid finish.
7006  *
7007  * Therefore two states are equivalent if register state is more conservative
7008  * and explored stack state is more conservative than the current one.
7009  * Example:
7010  *       explored                   current
7011  * (slot1=INV slot2=MISC) == (slot1=MISC slot2=MISC)
7012  * (slot1=MISC slot2=MISC) != (slot1=INV slot2=MISC)
7013  *
7014  * In other words if current stack state (one being explored) has more
7015  * valid slots than old one that already passed validation, it means
7016  * the verifier can stop exploring and conclude that current state is valid too
7017  *
7018  * Similarly with registers. If explored state has register type as invalid
7019  * whereas register type in current state is meaningful, it means that
7020  * the current state will reach 'bpf_exit' instruction safely
7021  */
7022 static bool func_states_equal(struct bpf_func_state *old,
7023 			      struct bpf_func_state *cur)
7024 {
7025 	struct idpair *idmap;
7026 	bool ret = false;
7027 	int i;
7028 
7029 	idmap = kcalloc(ID_MAP_SIZE, sizeof(struct idpair), GFP_KERNEL);
7030 	/* If we failed to allocate the idmap, just say it's not safe */
7031 	if (!idmap)
7032 		return false;
7033 
7034 	for (i = 0; i < MAX_BPF_REG; i++) {
7035 		if (!regsafe(&old->regs[i], &cur->regs[i], idmap))
7036 			goto out_free;
7037 	}
7038 
7039 	if (!stacksafe(old, cur, idmap))
7040 		goto out_free;
7041 
7042 	if (!refsafe(old, cur))
7043 		goto out_free;
7044 	ret = true;
7045 out_free:
7046 	kfree(idmap);
7047 	return ret;
7048 }
7049 
7050 static bool states_equal(struct bpf_verifier_env *env,
7051 			 struct bpf_verifier_state *old,
7052 			 struct bpf_verifier_state *cur)
7053 {
7054 	int i;
7055 
7056 	if (old->curframe != cur->curframe)
7057 		return false;
7058 
7059 	/* Verification state from speculative execution simulation
7060 	 * must never prune a non-speculative execution one.
7061 	 */
7062 	if (old->speculative && !cur->speculative)
7063 		return false;
7064 
7065 	if (old->active_spin_lock != cur->active_spin_lock)
7066 		return false;
7067 
7068 	/* for states to be equal callsites have to be the same
7069 	 * and all frame states need to be equivalent
7070 	 */
7071 	for (i = 0; i <= old->curframe; i++) {
7072 		if (old->frame[i]->callsite != cur->frame[i]->callsite)
7073 			return false;
7074 		if (!func_states_equal(old->frame[i], cur->frame[i]))
7075 			return false;
7076 	}
7077 	return true;
7078 }
7079 
7080 /* Return 0 if no propagation happened. Return negative error code if error
7081  * happened. Otherwise, return the propagated bit.
7082  */
7083 static int propagate_liveness_reg(struct bpf_verifier_env *env,
7084 				  struct bpf_reg_state *reg,
7085 				  struct bpf_reg_state *parent_reg)
7086 {
7087 	u8 parent_flag = parent_reg->live & REG_LIVE_READ;
7088 	u8 flag = reg->live & REG_LIVE_READ;
7089 	int err;
7090 
7091 	/* When comes here, read flags of PARENT_REG or REG could be any of
7092 	 * REG_LIVE_READ64, REG_LIVE_READ32, REG_LIVE_NONE. There is no need
7093 	 * of propagation if PARENT_REG has strongest REG_LIVE_READ64.
7094 	 */
7095 	if (parent_flag == REG_LIVE_READ64 ||
7096 	    /* Or if there is no read flag from REG. */
7097 	    !flag ||
7098 	    /* Or if the read flag from REG is the same as PARENT_REG. */
7099 	    parent_flag == flag)
7100 		return 0;
7101 
7102 	err = mark_reg_read(env, reg, parent_reg, flag);
7103 	if (err)
7104 		return err;
7105 
7106 	return flag;
7107 }
7108 
7109 /* A write screens off any subsequent reads; but write marks come from the
7110  * straight-line code between a state and its parent.  When we arrive at an
7111  * equivalent state (jump target or such) we didn't arrive by the straight-line
7112  * code, so read marks in the state must propagate to the parent regardless
7113  * of the state's write marks. That's what 'parent == state->parent' comparison
7114  * in mark_reg_read() is for.
7115  */
7116 static int propagate_liveness(struct bpf_verifier_env *env,
7117 			      const struct bpf_verifier_state *vstate,
7118 			      struct bpf_verifier_state *vparent)
7119 {
7120 	struct bpf_reg_state *state_reg, *parent_reg;
7121 	struct bpf_func_state *state, *parent;
7122 	int i, frame, err = 0;
7123 
7124 	if (vparent->curframe != vstate->curframe) {
7125 		WARN(1, "propagate_live: parent frame %d current frame %d\n",
7126 		     vparent->curframe, vstate->curframe);
7127 		return -EFAULT;
7128 	}
7129 	/* Propagate read liveness of registers... */
7130 	BUILD_BUG_ON(BPF_REG_FP + 1 != MAX_BPF_REG);
7131 	for (frame = 0; frame <= vstate->curframe; frame++) {
7132 		parent = vparent->frame[frame];
7133 		state = vstate->frame[frame];
7134 		parent_reg = parent->regs;
7135 		state_reg = state->regs;
7136 		/* We don't need to worry about FP liveness, it's read-only */
7137 		for (i = frame < vstate->curframe ? BPF_REG_6 : 0; i < BPF_REG_FP; i++) {
7138 			err = propagate_liveness_reg(env, &state_reg[i],
7139 						     &parent_reg[i]);
7140 			if (err < 0)
7141 				return err;
7142 			if (err == REG_LIVE_READ64)
7143 				mark_insn_zext(env, &parent_reg[i]);
7144 		}
7145 
7146 		/* Propagate stack slots. */
7147 		for (i = 0; i < state->allocated_stack / BPF_REG_SIZE &&
7148 			    i < parent->allocated_stack / BPF_REG_SIZE; i++) {
7149 			parent_reg = &parent->stack[i].spilled_ptr;
7150 			state_reg = &state->stack[i].spilled_ptr;
7151 			err = propagate_liveness_reg(env, state_reg,
7152 						     parent_reg);
7153 			if (err < 0)
7154 				return err;
7155 		}
7156 	}
7157 	return 0;
7158 }
7159 
7160 /* find precise scalars in the previous equivalent state and
7161  * propagate them into the current state
7162  */
7163 static int propagate_precision(struct bpf_verifier_env *env,
7164 			       const struct bpf_verifier_state *old)
7165 {
7166 	struct bpf_reg_state *state_reg;
7167 	struct bpf_func_state *state;
7168 	int i, err = 0;
7169 
7170 	state = old->frame[old->curframe];
7171 	state_reg = state->regs;
7172 	for (i = 0; i < BPF_REG_FP; i++, state_reg++) {
7173 		if (state_reg->type != SCALAR_VALUE ||
7174 		    !state_reg->precise)
7175 			continue;
7176 		if (env->log.level & BPF_LOG_LEVEL2)
7177 			verbose(env, "propagating r%d\n", i);
7178 		err = mark_chain_precision(env, i);
7179 		if (err < 0)
7180 			return err;
7181 	}
7182 
7183 	for (i = 0; i < state->allocated_stack / BPF_REG_SIZE; i++) {
7184 		if (state->stack[i].slot_type[0] != STACK_SPILL)
7185 			continue;
7186 		state_reg = &state->stack[i].spilled_ptr;
7187 		if (state_reg->type != SCALAR_VALUE ||
7188 		    !state_reg->precise)
7189 			continue;
7190 		if (env->log.level & BPF_LOG_LEVEL2)
7191 			verbose(env, "propagating fp%d\n",
7192 				(-i - 1) * BPF_REG_SIZE);
7193 		err = mark_chain_precision_stack(env, i);
7194 		if (err < 0)
7195 			return err;
7196 	}
7197 	return 0;
7198 }
7199 
7200 static bool states_maybe_looping(struct bpf_verifier_state *old,
7201 				 struct bpf_verifier_state *cur)
7202 {
7203 	struct bpf_func_state *fold, *fcur;
7204 	int i, fr = cur->curframe;
7205 
7206 	if (old->curframe != fr)
7207 		return false;
7208 
7209 	fold = old->frame[fr];
7210 	fcur = cur->frame[fr];
7211 	for (i = 0; i < MAX_BPF_REG; i++)
7212 		if (memcmp(&fold->regs[i], &fcur->regs[i],
7213 			   offsetof(struct bpf_reg_state, parent)))
7214 			return false;
7215 	return true;
7216 }
7217 
7218 
7219 static int is_state_visited(struct bpf_verifier_env *env, int insn_idx)
7220 {
7221 	struct bpf_verifier_state_list *new_sl;
7222 	struct bpf_verifier_state_list *sl, **pprev;
7223 	struct bpf_verifier_state *cur = env->cur_state, *new;
7224 	int i, j, err, states_cnt = 0;
7225 	bool add_new_state = false;
7226 
7227 	cur->last_insn_idx = env->prev_insn_idx;
7228 	if (!env->insn_aux_data[insn_idx].prune_point)
7229 		/* this 'insn_idx' instruction wasn't marked, so we will not
7230 		 * be doing state search here
7231 		 */
7232 		return 0;
7233 
7234 	/* bpf progs typically have pruning point every 4 instructions
7235 	 * http://vger.kernel.org/bpfconf2019.html#session-1
7236 	 * Do not add new state for future pruning if the verifier hasn't seen
7237 	 * at least 2 jumps and at least 8 instructions.
7238 	 * This heuristics helps decrease 'total_states' and 'peak_states' metric.
7239 	 * In tests that amounts to up to 50% reduction into total verifier
7240 	 * memory consumption and 20% verifier time speedup.
7241 	 */
7242 	if (env->jmps_processed - env->prev_jmps_processed >= 2 &&
7243 	    env->insn_processed - env->prev_insn_processed >= 8)
7244 		add_new_state = true;
7245 
7246 	pprev = explored_state(env, insn_idx);
7247 	sl = *pprev;
7248 
7249 	clean_live_states(env, insn_idx, cur);
7250 
7251 	while (sl) {
7252 		states_cnt++;
7253 		if (sl->state.insn_idx != insn_idx)
7254 			goto next;
7255 		if (sl->state.branches) {
7256 			if (states_maybe_looping(&sl->state, cur) &&
7257 			    states_equal(env, &sl->state, cur)) {
7258 				verbose_linfo(env, insn_idx, "; ");
7259 				verbose(env, "infinite loop detected at insn %d\n", insn_idx);
7260 				return -EINVAL;
7261 			}
7262 			/* if the verifier is processing a loop, avoid adding new state
7263 			 * too often, since different loop iterations have distinct
7264 			 * states and may not help future pruning.
7265 			 * This threshold shouldn't be too low to make sure that
7266 			 * a loop with large bound will be rejected quickly.
7267 			 * The most abusive loop will be:
7268 			 * r1 += 1
7269 			 * if r1 < 1000000 goto pc-2
7270 			 * 1M insn_procssed limit / 100 == 10k peak states.
7271 			 * This threshold shouldn't be too high either, since states
7272 			 * at the end of the loop are likely to be useful in pruning.
7273 			 */
7274 			if (env->jmps_processed - env->prev_jmps_processed < 20 &&
7275 			    env->insn_processed - env->prev_insn_processed < 100)
7276 				add_new_state = false;
7277 			goto miss;
7278 		}
7279 		if (states_equal(env, &sl->state, cur)) {
7280 			sl->hit_cnt++;
7281 			/* reached equivalent register/stack state,
7282 			 * prune the search.
7283 			 * Registers read by the continuation are read by us.
7284 			 * If we have any write marks in env->cur_state, they
7285 			 * will prevent corresponding reads in the continuation
7286 			 * from reaching our parent (an explored_state).  Our
7287 			 * own state will get the read marks recorded, but
7288 			 * they'll be immediately forgotten as we're pruning
7289 			 * this state and will pop a new one.
7290 			 */
7291 			err = propagate_liveness(env, &sl->state, cur);
7292 
7293 			/* if previous state reached the exit with precision and
7294 			 * current state is equivalent to it (except precsion marks)
7295 			 * the precision needs to be propagated back in
7296 			 * the current state.
7297 			 */
7298 			err = err ? : push_jmp_history(env, cur);
7299 			err = err ? : propagate_precision(env, &sl->state);
7300 			if (err)
7301 				return err;
7302 			return 1;
7303 		}
7304 miss:
7305 		/* when new state is not going to be added do not increase miss count.
7306 		 * Otherwise several loop iterations will remove the state
7307 		 * recorded earlier. The goal of these heuristics is to have
7308 		 * states from some iterations of the loop (some in the beginning
7309 		 * and some at the end) to help pruning.
7310 		 */
7311 		if (add_new_state)
7312 			sl->miss_cnt++;
7313 		/* heuristic to determine whether this state is beneficial
7314 		 * to keep checking from state equivalence point of view.
7315 		 * Higher numbers increase max_states_per_insn and verification time,
7316 		 * but do not meaningfully decrease insn_processed.
7317 		 */
7318 		if (sl->miss_cnt > sl->hit_cnt * 3 + 3) {
7319 			/* the state is unlikely to be useful. Remove it to
7320 			 * speed up verification
7321 			 */
7322 			*pprev = sl->next;
7323 			if (sl->state.frame[0]->regs[0].live & REG_LIVE_DONE) {
7324 				u32 br = sl->state.branches;
7325 
7326 				WARN_ONCE(br,
7327 					  "BUG live_done but branches_to_explore %d\n",
7328 					  br);
7329 				free_verifier_state(&sl->state, false);
7330 				kfree(sl);
7331 				env->peak_states--;
7332 			} else {
7333 				/* cannot free this state, since parentage chain may
7334 				 * walk it later. Add it for free_list instead to
7335 				 * be freed at the end of verification
7336 				 */
7337 				sl->next = env->free_list;
7338 				env->free_list = sl;
7339 			}
7340 			sl = *pprev;
7341 			continue;
7342 		}
7343 next:
7344 		pprev = &sl->next;
7345 		sl = *pprev;
7346 	}
7347 
7348 	if (env->max_states_per_insn < states_cnt)
7349 		env->max_states_per_insn = states_cnt;
7350 
7351 	if (!env->allow_ptr_leaks && states_cnt > BPF_COMPLEXITY_LIMIT_STATES)
7352 		return push_jmp_history(env, cur);
7353 
7354 	if (!add_new_state)
7355 		return push_jmp_history(env, cur);
7356 
7357 	/* There were no equivalent states, remember the current one.
7358 	 * Technically the current state is not proven to be safe yet,
7359 	 * but it will either reach outer most bpf_exit (which means it's safe)
7360 	 * or it will be rejected. When there are no loops the verifier won't be
7361 	 * seeing this tuple (frame[0].callsite, frame[1].callsite, .. insn_idx)
7362 	 * again on the way to bpf_exit.
7363 	 * When looping the sl->state.branches will be > 0 and this state
7364 	 * will not be considered for equivalence until branches == 0.
7365 	 */
7366 	new_sl = kzalloc(sizeof(struct bpf_verifier_state_list), GFP_KERNEL);
7367 	if (!new_sl)
7368 		return -ENOMEM;
7369 	env->total_states++;
7370 	env->peak_states++;
7371 	env->prev_jmps_processed = env->jmps_processed;
7372 	env->prev_insn_processed = env->insn_processed;
7373 
7374 	/* add new state to the head of linked list */
7375 	new = &new_sl->state;
7376 	err = copy_verifier_state(new, cur);
7377 	if (err) {
7378 		free_verifier_state(new, false);
7379 		kfree(new_sl);
7380 		return err;
7381 	}
7382 	new->insn_idx = insn_idx;
7383 	WARN_ONCE(new->branches != 1,
7384 		  "BUG is_state_visited:branches_to_explore=%d insn %d\n", new->branches, insn_idx);
7385 
7386 	cur->parent = new;
7387 	cur->first_insn_idx = insn_idx;
7388 	clear_jmp_history(cur);
7389 	new_sl->next = *explored_state(env, insn_idx);
7390 	*explored_state(env, insn_idx) = new_sl;
7391 	/* connect new state to parentage chain. Current frame needs all
7392 	 * registers connected. Only r6 - r9 of the callers are alive (pushed
7393 	 * to the stack implicitly by JITs) so in callers' frames connect just
7394 	 * r6 - r9 as an optimization. Callers will have r1 - r5 connected to
7395 	 * the state of the call instruction (with WRITTEN set), and r0 comes
7396 	 * from callee with its full parentage chain, anyway.
7397 	 */
7398 	/* clear write marks in current state: the writes we did are not writes
7399 	 * our child did, so they don't screen off its reads from us.
7400 	 * (There are no read marks in current state, because reads always mark
7401 	 * their parent and current state never has children yet.  Only
7402 	 * explored_states can get read marks.)
7403 	 */
7404 	for (j = 0; j <= cur->curframe; j++) {
7405 		for (i = j < cur->curframe ? BPF_REG_6 : 0; i < BPF_REG_FP; i++)
7406 			cur->frame[j]->regs[i].parent = &new->frame[j]->regs[i];
7407 		for (i = 0; i < BPF_REG_FP; i++)
7408 			cur->frame[j]->regs[i].live = REG_LIVE_NONE;
7409 	}
7410 
7411 	/* all stack frames are accessible from callee, clear them all */
7412 	for (j = 0; j <= cur->curframe; j++) {
7413 		struct bpf_func_state *frame = cur->frame[j];
7414 		struct bpf_func_state *newframe = new->frame[j];
7415 
7416 		for (i = 0; i < frame->allocated_stack / BPF_REG_SIZE; i++) {
7417 			frame->stack[i].spilled_ptr.live = REG_LIVE_NONE;
7418 			frame->stack[i].spilled_ptr.parent =
7419 						&newframe->stack[i].spilled_ptr;
7420 		}
7421 	}
7422 	return 0;
7423 }
7424 
7425 /* Return true if it's OK to have the same insn return a different type. */
7426 static bool reg_type_mismatch_ok(enum bpf_reg_type type)
7427 {
7428 	switch (type) {
7429 	case PTR_TO_CTX:
7430 	case PTR_TO_SOCKET:
7431 	case PTR_TO_SOCKET_OR_NULL:
7432 	case PTR_TO_SOCK_COMMON:
7433 	case PTR_TO_SOCK_COMMON_OR_NULL:
7434 	case PTR_TO_TCP_SOCK:
7435 	case PTR_TO_TCP_SOCK_OR_NULL:
7436 	case PTR_TO_XDP_SOCK:
7437 		return false;
7438 	default:
7439 		return true;
7440 	}
7441 }
7442 
7443 /* If an instruction was previously used with particular pointer types, then we
7444  * need to be careful to avoid cases such as the below, where it may be ok
7445  * for one branch accessing the pointer, but not ok for the other branch:
7446  *
7447  * R1 = sock_ptr
7448  * goto X;
7449  * ...
7450  * R1 = some_other_valid_ptr;
7451  * goto X;
7452  * ...
7453  * R2 = *(u32 *)(R1 + 0);
7454  */
7455 static bool reg_type_mismatch(enum bpf_reg_type src, enum bpf_reg_type prev)
7456 {
7457 	return src != prev && (!reg_type_mismatch_ok(src) ||
7458 			       !reg_type_mismatch_ok(prev));
7459 }
7460 
7461 static int do_check(struct bpf_verifier_env *env)
7462 {
7463 	struct bpf_verifier_state *state;
7464 	struct bpf_insn *insns = env->prog->insnsi;
7465 	struct bpf_reg_state *regs;
7466 	int insn_cnt = env->prog->len;
7467 	bool do_print_state = false;
7468 	int prev_insn_idx = -1;
7469 
7470 	env->prev_linfo = NULL;
7471 
7472 	state = kzalloc(sizeof(struct bpf_verifier_state), GFP_KERNEL);
7473 	if (!state)
7474 		return -ENOMEM;
7475 	state->curframe = 0;
7476 	state->speculative = false;
7477 	state->branches = 1;
7478 	state->frame[0] = kzalloc(sizeof(struct bpf_func_state), GFP_KERNEL);
7479 	if (!state->frame[0]) {
7480 		kfree(state);
7481 		return -ENOMEM;
7482 	}
7483 	env->cur_state = state;
7484 	init_func_state(env, state->frame[0],
7485 			BPF_MAIN_FUNC /* callsite */,
7486 			0 /* frameno */,
7487 			0 /* subprogno, zero == main subprog */);
7488 
7489 	for (;;) {
7490 		struct bpf_insn *insn;
7491 		u8 class;
7492 		int err;
7493 
7494 		env->prev_insn_idx = prev_insn_idx;
7495 		if (env->insn_idx >= insn_cnt) {
7496 			verbose(env, "invalid insn idx %d insn_cnt %d\n",
7497 				env->insn_idx, insn_cnt);
7498 			return -EFAULT;
7499 		}
7500 
7501 		insn = &insns[env->insn_idx];
7502 		class = BPF_CLASS(insn->code);
7503 
7504 		if (++env->insn_processed > BPF_COMPLEXITY_LIMIT_INSNS) {
7505 			verbose(env,
7506 				"BPF program is too large. Processed %d insn\n",
7507 				env->insn_processed);
7508 			return -E2BIG;
7509 		}
7510 
7511 		err = is_state_visited(env, env->insn_idx);
7512 		if (err < 0)
7513 			return err;
7514 		if (err == 1) {
7515 			/* found equivalent state, can prune the search */
7516 			if (env->log.level & BPF_LOG_LEVEL) {
7517 				if (do_print_state)
7518 					verbose(env, "\nfrom %d to %d%s: safe\n",
7519 						env->prev_insn_idx, env->insn_idx,
7520 						env->cur_state->speculative ?
7521 						" (speculative execution)" : "");
7522 				else
7523 					verbose(env, "%d: safe\n", env->insn_idx);
7524 			}
7525 			goto process_bpf_exit;
7526 		}
7527 
7528 		if (signal_pending(current))
7529 			return -EAGAIN;
7530 
7531 		if (need_resched())
7532 			cond_resched();
7533 
7534 		if (env->log.level & BPF_LOG_LEVEL2 ||
7535 		    (env->log.level & BPF_LOG_LEVEL && do_print_state)) {
7536 			if (env->log.level & BPF_LOG_LEVEL2)
7537 				verbose(env, "%d:", env->insn_idx);
7538 			else
7539 				verbose(env, "\nfrom %d to %d%s:",
7540 					env->prev_insn_idx, env->insn_idx,
7541 					env->cur_state->speculative ?
7542 					" (speculative execution)" : "");
7543 			print_verifier_state(env, state->frame[state->curframe]);
7544 			do_print_state = false;
7545 		}
7546 
7547 		if (env->log.level & BPF_LOG_LEVEL) {
7548 			const struct bpf_insn_cbs cbs = {
7549 				.cb_print	= verbose,
7550 				.private_data	= env,
7551 			};
7552 
7553 			verbose_linfo(env, env->insn_idx, "; ");
7554 			verbose(env, "%d: ", env->insn_idx);
7555 			print_bpf_insn(&cbs, insn, env->allow_ptr_leaks);
7556 		}
7557 
7558 		if (bpf_prog_is_dev_bound(env->prog->aux)) {
7559 			err = bpf_prog_offload_verify_insn(env, env->insn_idx,
7560 							   env->prev_insn_idx);
7561 			if (err)
7562 				return err;
7563 		}
7564 
7565 		regs = cur_regs(env);
7566 		env->insn_aux_data[env->insn_idx].seen = true;
7567 		prev_insn_idx = env->insn_idx;
7568 
7569 		if (class == BPF_ALU || class == BPF_ALU64) {
7570 			err = check_alu_op(env, insn);
7571 			if (err)
7572 				return err;
7573 
7574 		} else if (class == BPF_LDX) {
7575 			enum bpf_reg_type *prev_src_type, src_reg_type;
7576 
7577 			/* check for reserved fields is already done */
7578 
7579 			/* check src operand */
7580 			err = check_reg_arg(env, insn->src_reg, SRC_OP);
7581 			if (err)
7582 				return err;
7583 
7584 			err = check_reg_arg(env, insn->dst_reg, DST_OP_NO_MARK);
7585 			if (err)
7586 				return err;
7587 
7588 			src_reg_type = regs[insn->src_reg].type;
7589 
7590 			/* check that memory (src_reg + off) is readable,
7591 			 * the state of dst_reg will be updated by this func
7592 			 */
7593 			err = check_mem_access(env, env->insn_idx, insn->src_reg,
7594 					       insn->off, BPF_SIZE(insn->code),
7595 					       BPF_READ, insn->dst_reg, false);
7596 			if (err)
7597 				return err;
7598 
7599 			prev_src_type = &env->insn_aux_data[env->insn_idx].ptr_type;
7600 
7601 			if (*prev_src_type == NOT_INIT) {
7602 				/* saw a valid insn
7603 				 * dst_reg = *(u32 *)(src_reg + off)
7604 				 * save type to validate intersecting paths
7605 				 */
7606 				*prev_src_type = src_reg_type;
7607 
7608 			} else if (reg_type_mismatch(src_reg_type, *prev_src_type)) {
7609 				/* ABuser program is trying to use the same insn
7610 				 * dst_reg = *(u32*) (src_reg + off)
7611 				 * with different pointer types:
7612 				 * src_reg == ctx in one branch and
7613 				 * src_reg == stack|map in some other branch.
7614 				 * Reject it.
7615 				 */
7616 				verbose(env, "same insn cannot be used with different pointers\n");
7617 				return -EINVAL;
7618 			}
7619 
7620 		} else if (class == BPF_STX) {
7621 			enum bpf_reg_type *prev_dst_type, dst_reg_type;
7622 
7623 			if (BPF_MODE(insn->code) == BPF_XADD) {
7624 				err = check_xadd(env, env->insn_idx, insn);
7625 				if (err)
7626 					return err;
7627 				env->insn_idx++;
7628 				continue;
7629 			}
7630 
7631 			/* check src1 operand */
7632 			err = check_reg_arg(env, insn->src_reg, SRC_OP);
7633 			if (err)
7634 				return err;
7635 			/* check src2 operand */
7636 			err = check_reg_arg(env, insn->dst_reg, SRC_OP);
7637 			if (err)
7638 				return err;
7639 
7640 			dst_reg_type = regs[insn->dst_reg].type;
7641 
7642 			/* check that memory (dst_reg + off) is writeable */
7643 			err = check_mem_access(env, env->insn_idx, insn->dst_reg,
7644 					       insn->off, BPF_SIZE(insn->code),
7645 					       BPF_WRITE, insn->src_reg, false);
7646 			if (err)
7647 				return err;
7648 
7649 			prev_dst_type = &env->insn_aux_data[env->insn_idx].ptr_type;
7650 
7651 			if (*prev_dst_type == NOT_INIT) {
7652 				*prev_dst_type = dst_reg_type;
7653 			} else if (reg_type_mismatch(dst_reg_type, *prev_dst_type)) {
7654 				verbose(env, "same insn cannot be used with different pointers\n");
7655 				return -EINVAL;
7656 			}
7657 
7658 		} else if (class == BPF_ST) {
7659 			if (BPF_MODE(insn->code) != BPF_MEM ||
7660 			    insn->src_reg != BPF_REG_0) {
7661 				verbose(env, "BPF_ST uses reserved fields\n");
7662 				return -EINVAL;
7663 			}
7664 			/* check src operand */
7665 			err = check_reg_arg(env, insn->dst_reg, SRC_OP);
7666 			if (err)
7667 				return err;
7668 
7669 			if (is_ctx_reg(env, insn->dst_reg)) {
7670 				verbose(env, "BPF_ST stores into R%d %s is not allowed\n",
7671 					insn->dst_reg,
7672 					reg_type_str[reg_state(env, insn->dst_reg)->type]);
7673 				return -EACCES;
7674 			}
7675 
7676 			/* check that memory (dst_reg + off) is writeable */
7677 			err = check_mem_access(env, env->insn_idx, insn->dst_reg,
7678 					       insn->off, BPF_SIZE(insn->code),
7679 					       BPF_WRITE, -1, false);
7680 			if (err)
7681 				return err;
7682 
7683 		} else if (class == BPF_JMP || class == BPF_JMP32) {
7684 			u8 opcode = BPF_OP(insn->code);
7685 
7686 			env->jmps_processed++;
7687 			if (opcode == BPF_CALL) {
7688 				if (BPF_SRC(insn->code) != BPF_K ||
7689 				    insn->off != 0 ||
7690 				    (insn->src_reg != BPF_REG_0 &&
7691 				     insn->src_reg != BPF_PSEUDO_CALL) ||
7692 				    insn->dst_reg != BPF_REG_0 ||
7693 				    class == BPF_JMP32) {
7694 					verbose(env, "BPF_CALL uses reserved fields\n");
7695 					return -EINVAL;
7696 				}
7697 
7698 				if (env->cur_state->active_spin_lock &&
7699 				    (insn->src_reg == BPF_PSEUDO_CALL ||
7700 				     insn->imm != BPF_FUNC_spin_unlock)) {
7701 					verbose(env, "function calls are not allowed while holding a lock\n");
7702 					return -EINVAL;
7703 				}
7704 				if (insn->src_reg == BPF_PSEUDO_CALL)
7705 					err = check_func_call(env, insn, &env->insn_idx);
7706 				else
7707 					err = check_helper_call(env, insn->imm, env->insn_idx);
7708 				if (err)
7709 					return err;
7710 
7711 			} else if (opcode == BPF_JA) {
7712 				if (BPF_SRC(insn->code) != BPF_K ||
7713 				    insn->imm != 0 ||
7714 				    insn->src_reg != BPF_REG_0 ||
7715 				    insn->dst_reg != BPF_REG_0 ||
7716 				    class == BPF_JMP32) {
7717 					verbose(env, "BPF_JA uses reserved fields\n");
7718 					return -EINVAL;
7719 				}
7720 
7721 				env->insn_idx += insn->off + 1;
7722 				continue;
7723 
7724 			} else if (opcode == BPF_EXIT) {
7725 				if (BPF_SRC(insn->code) != BPF_K ||
7726 				    insn->imm != 0 ||
7727 				    insn->src_reg != BPF_REG_0 ||
7728 				    insn->dst_reg != BPF_REG_0 ||
7729 				    class == BPF_JMP32) {
7730 					verbose(env, "BPF_EXIT uses reserved fields\n");
7731 					return -EINVAL;
7732 				}
7733 
7734 				if (env->cur_state->active_spin_lock) {
7735 					verbose(env, "bpf_spin_unlock is missing\n");
7736 					return -EINVAL;
7737 				}
7738 
7739 				if (state->curframe) {
7740 					/* exit from nested function */
7741 					err = prepare_func_exit(env, &env->insn_idx);
7742 					if (err)
7743 						return err;
7744 					do_print_state = true;
7745 					continue;
7746 				}
7747 
7748 				err = check_reference_leak(env);
7749 				if (err)
7750 					return err;
7751 
7752 				/* eBPF calling convetion is such that R0 is used
7753 				 * to return the value from eBPF program.
7754 				 * Make sure that it's readable at this time
7755 				 * of bpf_exit, which means that program wrote
7756 				 * something into it earlier
7757 				 */
7758 				err = check_reg_arg(env, BPF_REG_0, SRC_OP);
7759 				if (err)
7760 					return err;
7761 
7762 				if (is_pointer_value(env, BPF_REG_0)) {
7763 					verbose(env, "R0 leaks addr as return value\n");
7764 					return -EACCES;
7765 				}
7766 
7767 				err = check_return_code(env);
7768 				if (err)
7769 					return err;
7770 process_bpf_exit:
7771 				update_branch_counts(env, env->cur_state);
7772 				err = pop_stack(env, &prev_insn_idx,
7773 						&env->insn_idx);
7774 				if (err < 0) {
7775 					if (err != -ENOENT)
7776 						return err;
7777 					break;
7778 				} else {
7779 					do_print_state = true;
7780 					continue;
7781 				}
7782 			} else {
7783 				err = check_cond_jmp_op(env, insn, &env->insn_idx);
7784 				if (err)
7785 					return err;
7786 			}
7787 		} else if (class == BPF_LD) {
7788 			u8 mode = BPF_MODE(insn->code);
7789 
7790 			if (mode == BPF_ABS || mode == BPF_IND) {
7791 				err = check_ld_abs(env, insn);
7792 				if (err)
7793 					return err;
7794 
7795 			} else if (mode == BPF_IMM) {
7796 				err = check_ld_imm(env, insn);
7797 				if (err)
7798 					return err;
7799 
7800 				env->insn_idx++;
7801 				env->insn_aux_data[env->insn_idx].seen = true;
7802 			} else {
7803 				verbose(env, "invalid BPF_LD mode\n");
7804 				return -EINVAL;
7805 			}
7806 		} else {
7807 			verbose(env, "unknown insn class %d\n", class);
7808 			return -EINVAL;
7809 		}
7810 
7811 		env->insn_idx++;
7812 	}
7813 
7814 	env->prog->aux->stack_depth = env->subprog_info[0].stack_depth;
7815 	return 0;
7816 }
7817 
7818 static int check_map_prealloc(struct bpf_map *map)
7819 {
7820 	return (map->map_type != BPF_MAP_TYPE_HASH &&
7821 		map->map_type != BPF_MAP_TYPE_PERCPU_HASH &&
7822 		map->map_type != BPF_MAP_TYPE_HASH_OF_MAPS) ||
7823 		!(map->map_flags & BPF_F_NO_PREALLOC);
7824 }
7825 
7826 static bool is_tracing_prog_type(enum bpf_prog_type type)
7827 {
7828 	switch (type) {
7829 	case BPF_PROG_TYPE_KPROBE:
7830 	case BPF_PROG_TYPE_TRACEPOINT:
7831 	case BPF_PROG_TYPE_PERF_EVENT:
7832 	case BPF_PROG_TYPE_RAW_TRACEPOINT:
7833 		return true;
7834 	default:
7835 		return false;
7836 	}
7837 }
7838 
7839 static int check_map_prog_compatibility(struct bpf_verifier_env *env,
7840 					struct bpf_map *map,
7841 					struct bpf_prog *prog)
7842 
7843 {
7844 	/* Make sure that BPF_PROG_TYPE_PERF_EVENT programs only use
7845 	 * preallocated hash maps, since doing memory allocation
7846 	 * in overflow_handler can crash depending on where nmi got
7847 	 * triggered.
7848 	 */
7849 	if (prog->type == BPF_PROG_TYPE_PERF_EVENT) {
7850 		if (!check_map_prealloc(map)) {
7851 			verbose(env, "perf_event programs can only use preallocated hash map\n");
7852 			return -EINVAL;
7853 		}
7854 		if (map->inner_map_meta &&
7855 		    !check_map_prealloc(map->inner_map_meta)) {
7856 			verbose(env, "perf_event programs can only use preallocated inner hash map\n");
7857 			return -EINVAL;
7858 		}
7859 	}
7860 
7861 	if ((is_tracing_prog_type(prog->type) ||
7862 	     prog->type == BPF_PROG_TYPE_SOCKET_FILTER) &&
7863 	    map_value_has_spin_lock(map)) {
7864 		verbose(env, "tracing progs cannot use bpf_spin_lock yet\n");
7865 		return -EINVAL;
7866 	}
7867 
7868 	if ((bpf_prog_is_dev_bound(prog->aux) || bpf_map_is_dev_bound(map)) &&
7869 	    !bpf_offload_prog_map_match(prog, map)) {
7870 		verbose(env, "offload device mismatch between prog and map\n");
7871 		return -EINVAL;
7872 	}
7873 
7874 	return 0;
7875 }
7876 
7877 static bool bpf_map_is_cgroup_storage(struct bpf_map *map)
7878 {
7879 	return (map->map_type == BPF_MAP_TYPE_CGROUP_STORAGE ||
7880 		map->map_type == BPF_MAP_TYPE_PERCPU_CGROUP_STORAGE);
7881 }
7882 
7883 /* look for pseudo eBPF instructions that access map FDs and
7884  * replace them with actual map pointers
7885  */
7886 static int replace_map_fd_with_map_ptr(struct bpf_verifier_env *env)
7887 {
7888 	struct bpf_insn *insn = env->prog->insnsi;
7889 	int insn_cnt = env->prog->len;
7890 	int i, j, err;
7891 
7892 	err = bpf_prog_calc_tag(env->prog);
7893 	if (err)
7894 		return err;
7895 
7896 	for (i = 0; i < insn_cnt; i++, insn++) {
7897 		if (BPF_CLASS(insn->code) == BPF_LDX &&
7898 		    (BPF_MODE(insn->code) != BPF_MEM || insn->imm != 0)) {
7899 			verbose(env, "BPF_LDX uses reserved fields\n");
7900 			return -EINVAL;
7901 		}
7902 
7903 		if (BPF_CLASS(insn->code) == BPF_STX &&
7904 		    ((BPF_MODE(insn->code) != BPF_MEM &&
7905 		      BPF_MODE(insn->code) != BPF_XADD) || insn->imm != 0)) {
7906 			verbose(env, "BPF_STX uses reserved fields\n");
7907 			return -EINVAL;
7908 		}
7909 
7910 		if (insn[0].code == (BPF_LD | BPF_IMM | BPF_DW)) {
7911 			struct bpf_insn_aux_data *aux;
7912 			struct bpf_map *map;
7913 			struct fd f;
7914 			u64 addr;
7915 
7916 			if (i == insn_cnt - 1 || insn[1].code != 0 ||
7917 			    insn[1].dst_reg != 0 || insn[1].src_reg != 0 ||
7918 			    insn[1].off != 0) {
7919 				verbose(env, "invalid bpf_ld_imm64 insn\n");
7920 				return -EINVAL;
7921 			}
7922 
7923 			if (insn[0].src_reg == 0)
7924 				/* valid generic load 64-bit imm */
7925 				goto next_insn;
7926 
7927 			/* In final convert_pseudo_ld_imm64() step, this is
7928 			 * converted into regular 64-bit imm load insn.
7929 			 */
7930 			if ((insn[0].src_reg != BPF_PSEUDO_MAP_FD &&
7931 			     insn[0].src_reg != BPF_PSEUDO_MAP_VALUE) ||
7932 			    (insn[0].src_reg == BPF_PSEUDO_MAP_FD &&
7933 			     insn[1].imm != 0)) {
7934 				verbose(env,
7935 					"unrecognized bpf_ld_imm64 insn\n");
7936 				return -EINVAL;
7937 			}
7938 
7939 			f = fdget(insn[0].imm);
7940 			map = __bpf_map_get(f);
7941 			if (IS_ERR(map)) {
7942 				verbose(env, "fd %d is not pointing to valid bpf_map\n",
7943 					insn[0].imm);
7944 				return PTR_ERR(map);
7945 			}
7946 
7947 			err = check_map_prog_compatibility(env, map, env->prog);
7948 			if (err) {
7949 				fdput(f);
7950 				return err;
7951 			}
7952 
7953 			aux = &env->insn_aux_data[i];
7954 			if (insn->src_reg == BPF_PSEUDO_MAP_FD) {
7955 				addr = (unsigned long)map;
7956 			} else {
7957 				u32 off = insn[1].imm;
7958 
7959 				if (off >= BPF_MAX_VAR_OFF) {
7960 					verbose(env, "direct value offset of %u is not allowed\n", off);
7961 					fdput(f);
7962 					return -EINVAL;
7963 				}
7964 
7965 				if (!map->ops->map_direct_value_addr) {
7966 					verbose(env, "no direct value access support for this map type\n");
7967 					fdput(f);
7968 					return -EINVAL;
7969 				}
7970 
7971 				err = map->ops->map_direct_value_addr(map, &addr, off);
7972 				if (err) {
7973 					verbose(env, "invalid access to map value pointer, value_size=%u off=%u\n",
7974 						map->value_size, off);
7975 					fdput(f);
7976 					return err;
7977 				}
7978 
7979 				aux->map_off = off;
7980 				addr += off;
7981 			}
7982 
7983 			insn[0].imm = (u32)addr;
7984 			insn[1].imm = addr >> 32;
7985 
7986 			/* check whether we recorded this map already */
7987 			for (j = 0; j < env->used_map_cnt; j++) {
7988 				if (env->used_maps[j] == map) {
7989 					aux->map_index = j;
7990 					fdput(f);
7991 					goto next_insn;
7992 				}
7993 			}
7994 
7995 			if (env->used_map_cnt >= MAX_USED_MAPS) {
7996 				fdput(f);
7997 				return -E2BIG;
7998 			}
7999 
8000 			/* hold the map. If the program is rejected by verifier,
8001 			 * the map will be released by release_maps() or it
8002 			 * will be used by the valid program until it's unloaded
8003 			 * and all maps are released in free_used_maps()
8004 			 */
8005 			map = bpf_map_inc(map, false);
8006 			if (IS_ERR(map)) {
8007 				fdput(f);
8008 				return PTR_ERR(map);
8009 			}
8010 
8011 			aux->map_index = env->used_map_cnt;
8012 			env->used_maps[env->used_map_cnt++] = map;
8013 
8014 			if (bpf_map_is_cgroup_storage(map) &&
8015 			    bpf_cgroup_storage_assign(env->prog, map)) {
8016 				verbose(env, "only one cgroup storage of each type is allowed\n");
8017 				fdput(f);
8018 				return -EBUSY;
8019 			}
8020 
8021 			fdput(f);
8022 next_insn:
8023 			insn++;
8024 			i++;
8025 			continue;
8026 		}
8027 
8028 		/* Basic sanity check before we invest more work here. */
8029 		if (!bpf_opcode_in_insntable(insn->code)) {
8030 			verbose(env, "unknown opcode %02x\n", insn->code);
8031 			return -EINVAL;
8032 		}
8033 	}
8034 
8035 	/* now all pseudo BPF_LD_IMM64 instructions load valid
8036 	 * 'struct bpf_map *' into a register instead of user map_fd.
8037 	 * These pointers will be used later by verifier to validate map access.
8038 	 */
8039 	return 0;
8040 }
8041 
8042 /* drop refcnt of maps used by the rejected program */
8043 static void release_maps(struct bpf_verifier_env *env)
8044 {
8045 	enum bpf_cgroup_storage_type stype;
8046 	int i;
8047 
8048 	for_each_cgroup_storage_type(stype) {
8049 		if (!env->prog->aux->cgroup_storage[stype])
8050 			continue;
8051 		bpf_cgroup_storage_release(env->prog,
8052 			env->prog->aux->cgroup_storage[stype]);
8053 	}
8054 
8055 	for (i = 0; i < env->used_map_cnt; i++)
8056 		bpf_map_put(env->used_maps[i]);
8057 }
8058 
8059 /* convert pseudo BPF_LD_IMM64 into generic BPF_LD_IMM64 */
8060 static void convert_pseudo_ld_imm64(struct bpf_verifier_env *env)
8061 {
8062 	struct bpf_insn *insn = env->prog->insnsi;
8063 	int insn_cnt = env->prog->len;
8064 	int i;
8065 
8066 	for (i = 0; i < insn_cnt; i++, insn++)
8067 		if (insn->code == (BPF_LD | BPF_IMM | BPF_DW))
8068 			insn->src_reg = 0;
8069 }
8070 
8071 /* single env->prog->insni[off] instruction was replaced with the range
8072  * insni[off, off + cnt).  Adjust corresponding insn_aux_data by copying
8073  * [0, off) and [off, end) to new locations, so the patched range stays zero
8074  */
8075 static int adjust_insn_aux_data(struct bpf_verifier_env *env,
8076 				struct bpf_prog *new_prog, u32 off, u32 cnt)
8077 {
8078 	struct bpf_insn_aux_data *new_data, *old_data = env->insn_aux_data;
8079 	struct bpf_insn *insn = new_prog->insnsi;
8080 	u32 prog_len;
8081 	int i;
8082 
8083 	/* aux info at OFF always needs adjustment, no matter fast path
8084 	 * (cnt == 1) is taken or not. There is no guarantee INSN at OFF is the
8085 	 * original insn at old prog.
8086 	 */
8087 	old_data[off].zext_dst = insn_has_def32(env, insn + off + cnt - 1);
8088 
8089 	if (cnt == 1)
8090 		return 0;
8091 	prog_len = new_prog->len;
8092 	new_data = vzalloc(array_size(prog_len,
8093 				      sizeof(struct bpf_insn_aux_data)));
8094 	if (!new_data)
8095 		return -ENOMEM;
8096 	memcpy(new_data, old_data, sizeof(struct bpf_insn_aux_data) * off);
8097 	memcpy(new_data + off + cnt - 1, old_data + off,
8098 	       sizeof(struct bpf_insn_aux_data) * (prog_len - off - cnt + 1));
8099 	for (i = off; i < off + cnt - 1; i++) {
8100 		new_data[i].seen = true;
8101 		new_data[i].zext_dst = insn_has_def32(env, insn + i);
8102 	}
8103 	env->insn_aux_data = new_data;
8104 	vfree(old_data);
8105 	return 0;
8106 }
8107 
8108 static void adjust_subprog_starts(struct bpf_verifier_env *env, u32 off, u32 len)
8109 {
8110 	int i;
8111 
8112 	if (len == 1)
8113 		return;
8114 	/* NOTE: fake 'exit' subprog should be updated as well. */
8115 	for (i = 0; i <= env->subprog_cnt; i++) {
8116 		if (env->subprog_info[i].start <= off)
8117 			continue;
8118 		env->subprog_info[i].start += len - 1;
8119 	}
8120 }
8121 
8122 static struct bpf_prog *bpf_patch_insn_data(struct bpf_verifier_env *env, u32 off,
8123 					    const struct bpf_insn *patch, u32 len)
8124 {
8125 	struct bpf_prog *new_prog;
8126 
8127 	new_prog = bpf_patch_insn_single(env->prog, off, patch, len);
8128 	if (IS_ERR(new_prog)) {
8129 		if (PTR_ERR(new_prog) == -ERANGE)
8130 			verbose(env,
8131 				"insn %d cannot be patched due to 16-bit range\n",
8132 				env->insn_aux_data[off].orig_idx);
8133 		return NULL;
8134 	}
8135 	if (adjust_insn_aux_data(env, new_prog, off, len))
8136 		return NULL;
8137 	adjust_subprog_starts(env, off, len);
8138 	return new_prog;
8139 }
8140 
8141 static int adjust_subprog_starts_after_remove(struct bpf_verifier_env *env,
8142 					      u32 off, u32 cnt)
8143 {
8144 	int i, j;
8145 
8146 	/* find first prog starting at or after off (first to remove) */
8147 	for (i = 0; i < env->subprog_cnt; i++)
8148 		if (env->subprog_info[i].start >= off)
8149 			break;
8150 	/* find first prog starting at or after off + cnt (first to stay) */
8151 	for (j = i; j < env->subprog_cnt; j++)
8152 		if (env->subprog_info[j].start >= off + cnt)
8153 			break;
8154 	/* if j doesn't start exactly at off + cnt, we are just removing
8155 	 * the front of previous prog
8156 	 */
8157 	if (env->subprog_info[j].start != off + cnt)
8158 		j--;
8159 
8160 	if (j > i) {
8161 		struct bpf_prog_aux *aux = env->prog->aux;
8162 		int move;
8163 
8164 		/* move fake 'exit' subprog as well */
8165 		move = env->subprog_cnt + 1 - j;
8166 
8167 		memmove(env->subprog_info + i,
8168 			env->subprog_info + j,
8169 			sizeof(*env->subprog_info) * move);
8170 		env->subprog_cnt -= j - i;
8171 
8172 		/* remove func_info */
8173 		if (aux->func_info) {
8174 			move = aux->func_info_cnt - j;
8175 
8176 			memmove(aux->func_info + i,
8177 				aux->func_info + j,
8178 				sizeof(*aux->func_info) * move);
8179 			aux->func_info_cnt -= j - i;
8180 			/* func_info->insn_off is set after all code rewrites,
8181 			 * in adjust_btf_func() - no need to adjust
8182 			 */
8183 		}
8184 	} else {
8185 		/* convert i from "first prog to remove" to "first to adjust" */
8186 		if (env->subprog_info[i].start == off)
8187 			i++;
8188 	}
8189 
8190 	/* update fake 'exit' subprog as well */
8191 	for (; i <= env->subprog_cnt; i++)
8192 		env->subprog_info[i].start -= cnt;
8193 
8194 	return 0;
8195 }
8196 
8197 static int bpf_adj_linfo_after_remove(struct bpf_verifier_env *env, u32 off,
8198 				      u32 cnt)
8199 {
8200 	struct bpf_prog *prog = env->prog;
8201 	u32 i, l_off, l_cnt, nr_linfo;
8202 	struct bpf_line_info *linfo;
8203 
8204 	nr_linfo = prog->aux->nr_linfo;
8205 	if (!nr_linfo)
8206 		return 0;
8207 
8208 	linfo = prog->aux->linfo;
8209 
8210 	/* find first line info to remove, count lines to be removed */
8211 	for (i = 0; i < nr_linfo; i++)
8212 		if (linfo[i].insn_off >= off)
8213 			break;
8214 
8215 	l_off = i;
8216 	l_cnt = 0;
8217 	for (; i < nr_linfo; i++)
8218 		if (linfo[i].insn_off < off + cnt)
8219 			l_cnt++;
8220 		else
8221 			break;
8222 
8223 	/* First live insn doesn't match first live linfo, it needs to "inherit"
8224 	 * last removed linfo.  prog is already modified, so prog->len == off
8225 	 * means no live instructions after (tail of the program was removed).
8226 	 */
8227 	if (prog->len != off && l_cnt &&
8228 	    (i == nr_linfo || linfo[i].insn_off != off + cnt)) {
8229 		l_cnt--;
8230 		linfo[--i].insn_off = off + cnt;
8231 	}
8232 
8233 	/* remove the line info which refer to the removed instructions */
8234 	if (l_cnt) {
8235 		memmove(linfo + l_off, linfo + i,
8236 			sizeof(*linfo) * (nr_linfo - i));
8237 
8238 		prog->aux->nr_linfo -= l_cnt;
8239 		nr_linfo = prog->aux->nr_linfo;
8240 	}
8241 
8242 	/* pull all linfo[i].insn_off >= off + cnt in by cnt */
8243 	for (i = l_off; i < nr_linfo; i++)
8244 		linfo[i].insn_off -= cnt;
8245 
8246 	/* fix up all subprogs (incl. 'exit') which start >= off */
8247 	for (i = 0; i <= env->subprog_cnt; i++)
8248 		if (env->subprog_info[i].linfo_idx > l_off) {
8249 			/* program may have started in the removed region but
8250 			 * may not be fully removed
8251 			 */
8252 			if (env->subprog_info[i].linfo_idx >= l_off + l_cnt)
8253 				env->subprog_info[i].linfo_idx -= l_cnt;
8254 			else
8255 				env->subprog_info[i].linfo_idx = l_off;
8256 		}
8257 
8258 	return 0;
8259 }
8260 
8261 static int verifier_remove_insns(struct bpf_verifier_env *env, u32 off, u32 cnt)
8262 {
8263 	struct bpf_insn_aux_data *aux_data = env->insn_aux_data;
8264 	unsigned int orig_prog_len = env->prog->len;
8265 	int err;
8266 
8267 	if (bpf_prog_is_dev_bound(env->prog->aux))
8268 		bpf_prog_offload_remove_insns(env, off, cnt);
8269 
8270 	err = bpf_remove_insns(env->prog, off, cnt);
8271 	if (err)
8272 		return err;
8273 
8274 	err = adjust_subprog_starts_after_remove(env, off, cnt);
8275 	if (err)
8276 		return err;
8277 
8278 	err = bpf_adj_linfo_after_remove(env, off, cnt);
8279 	if (err)
8280 		return err;
8281 
8282 	memmove(aux_data + off,	aux_data + off + cnt,
8283 		sizeof(*aux_data) * (orig_prog_len - off - cnt));
8284 
8285 	return 0;
8286 }
8287 
8288 /* The verifier does more data flow analysis than llvm and will not
8289  * explore branches that are dead at run time. Malicious programs can
8290  * have dead code too. Therefore replace all dead at-run-time code
8291  * with 'ja -1'.
8292  *
8293  * Just nops are not optimal, e.g. if they would sit at the end of the
8294  * program and through another bug we would manage to jump there, then
8295  * we'd execute beyond program memory otherwise. Returning exception
8296  * code also wouldn't work since we can have subprogs where the dead
8297  * code could be located.
8298  */
8299 static void sanitize_dead_code(struct bpf_verifier_env *env)
8300 {
8301 	struct bpf_insn_aux_data *aux_data = env->insn_aux_data;
8302 	struct bpf_insn trap = BPF_JMP_IMM(BPF_JA, 0, 0, -1);
8303 	struct bpf_insn *insn = env->prog->insnsi;
8304 	const int insn_cnt = env->prog->len;
8305 	int i;
8306 
8307 	for (i = 0; i < insn_cnt; i++) {
8308 		if (aux_data[i].seen)
8309 			continue;
8310 		memcpy(insn + i, &trap, sizeof(trap));
8311 	}
8312 }
8313 
8314 static bool insn_is_cond_jump(u8 code)
8315 {
8316 	u8 op;
8317 
8318 	if (BPF_CLASS(code) == BPF_JMP32)
8319 		return true;
8320 
8321 	if (BPF_CLASS(code) != BPF_JMP)
8322 		return false;
8323 
8324 	op = BPF_OP(code);
8325 	return op != BPF_JA && op != BPF_EXIT && op != BPF_CALL;
8326 }
8327 
8328 static void opt_hard_wire_dead_code_branches(struct bpf_verifier_env *env)
8329 {
8330 	struct bpf_insn_aux_data *aux_data = env->insn_aux_data;
8331 	struct bpf_insn ja = BPF_JMP_IMM(BPF_JA, 0, 0, 0);
8332 	struct bpf_insn *insn = env->prog->insnsi;
8333 	const int insn_cnt = env->prog->len;
8334 	int i;
8335 
8336 	for (i = 0; i < insn_cnt; i++, insn++) {
8337 		if (!insn_is_cond_jump(insn->code))
8338 			continue;
8339 
8340 		if (!aux_data[i + 1].seen)
8341 			ja.off = insn->off;
8342 		else if (!aux_data[i + 1 + insn->off].seen)
8343 			ja.off = 0;
8344 		else
8345 			continue;
8346 
8347 		if (bpf_prog_is_dev_bound(env->prog->aux))
8348 			bpf_prog_offload_replace_insn(env, i, &ja);
8349 
8350 		memcpy(insn, &ja, sizeof(ja));
8351 	}
8352 }
8353 
8354 static int opt_remove_dead_code(struct bpf_verifier_env *env)
8355 {
8356 	struct bpf_insn_aux_data *aux_data = env->insn_aux_data;
8357 	int insn_cnt = env->prog->len;
8358 	int i, err;
8359 
8360 	for (i = 0; i < insn_cnt; i++) {
8361 		int j;
8362 
8363 		j = 0;
8364 		while (i + j < insn_cnt && !aux_data[i + j].seen)
8365 			j++;
8366 		if (!j)
8367 			continue;
8368 
8369 		err = verifier_remove_insns(env, i, j);
8370 		if (err)
8371 			return err;
8372 		insn_cnt = env->prog->len;
8373 	}
8374 
8375 	return 0;
8376 }
8377 
8378 static int opt_remove_nops(struct bpf_verifier_env *env)
8379 {
8380 	const struct bpf_insn ja = BPF_JMP_IMM(BPF_JA, 0, 0, 0);
8381 	struct bpf_insn *insn = env->prog->insnsi;
8382 	int insn_cnt = env->prog->len;
8383 	int i, err;
8384 
8385 	for (i = 0; i < insn_cnt; i++) {
8386 		if (memcmp(&insn[i], &ja, sizeof(ja)))
8387 			continue;
8388 
8389 		err = verifier_remove_insns(env, i, 1);
8390 		if (err)
8391 			return err;
8392 		insn_cnt--;
8393 		i--;
8394 	}
8395 
8396 	return 0;
8397 }
8398 
8399 static int opt_subreg_zext_lo32_rnd_hi32(struct bpf_verifier_env *env,
8400 					 const union bpf_attr *attr)
8401 {
8402 	struct bpf_insn *patch, zext_patch[2], rnd_hi32_patch[4];
8403 	struct bpf_insn_aux_data *aux = env->insn_aux_data;
8404 	int i, patch_len, delta = 0, len = env->prog->len;
8405 	struct bpf_insn *insns = env->prog->insnsi;
8406 	struct bpf_prog *new_prog;
8407 	bool rnd_hi32;
8408 
8409 	rnd_hi32 = attr->prog_flags & BPF_F_TEST_RND_HI32;
8410 	zext_patch[1] = BPF_ZEXT_REG(0);
8411 	rnd_hi32_patch[1] = BPF_ALU64_IMM(BPF_MOV, BPF_REG_AX, 0);
8412 	rnd_hi32_patch[2] = BPF_ALU64_IMM(BPF_LSH, BPF_REG_AX, 32);
8413 	rnd_hi32_patch[3] = BPF_ALU64_REG(BPF_OR, 0, BPF_REG_AX);
8414 	for (i = 0; i < len; i++) {
8415 		int adj_idx = i + delta;
8416 		struct bpf_insn insn;
8417 
8418 		insn = insns[adj_idx];
8419 		if (!aux[adj_idx].zext_dst) {
8420 			u8 code, class;
8421 			u32 imm_rnd;
8422 
8423 			if (!rnd_hi32)
8424 				continue;
8425 
8426 			code = insn.code;
8427 			class = BPF_CLASS(code);
8428 			if (insn_no_def(&insn))
8429 				continue;
8430 
8431 			/* NOTE: arg "reg" (the fourth one) is only used for
8432 			 *       BPF_STX which has been ruled out in above
8433 			 *       check, it is safe to pass NULL here.
8434 			 */
8435 			if (is_reg64(env, &insn, insn.dst_reg, NULL, DST_OP)) {
8436 				if (class == BPF_LD &&
8437 				    BPF_MODE(code) == BPF_IMM)
8438 					i++;
8439 				continue;
8440 			}
8441 
8442 			/* ctx load could be transformed into wider load. */
8443 			if (class == BPF_LDX &&
8444 			    aux[adj_idx].ptr_type == PTR_TO_CTX)
8445 				continue;
8446 
8447 			imm_rnd = get_random_int();
8448 			rnd_hi32_patch[0] = insn;
8449 			rnd_hi32_patch[1].imm = imm_rnd;
8450 			rnd_hi32_patch[3].dst_reg = insn.dst_reg;
8451 			patch = rnd_hi32_patch;
8452 			patch_len = 4;
8453 			goto apply_patch_buffer;
8454 		}
8455 
8456 		if (!bpf_jit_needs_zext())
8457 			continue;
8458 
8459 		zext_patch[0] = insn;
8460 		zext_patch[1].dst_reg = insn.dst_reg;
8461 		zext_patch[1].src_reg = insn.dst_reg;
8462 		patch = zext_patch;
8463 		patch_len = 2;
8464 apply_patch_buffer:
8465 		new_prog = bpf_patch_insn_data(env, adj_idx, patch, patch_len);
8466 		if (!new_prog)
8467 			return -ENOMEM;
8468 		env->prog = new_prog;
8469 		insns = new_prog->insnsi;
8470 		aux = env->insn_aux_data;
8471 		delta += patch_len - 1;
8472 	}
8473 
8474 	return 0;
8475 }
8476 
8477 /* convert load instructions that access fields of a context type into a
8478  * sequence of instructions that access fields of the underlying structure:
8479  *     struct __sk_buff    -> struct sk_buff
8480  *     struct bpf_sock_ops -> struct sock
8481  */
8482 static int convert_ctx_accesses(struct bpf_verifier_env *env)
8483 {
8484 	const struct bpf_verifier_ops *ops = env->ops;
8485 	int i, cnt, size, ctx_field_size, delta = 0;
8486 	const int insn_cnt = env->prog->len;
8487 	struct bpf_insn insn_buf[16], *insn;
8488 	u32 target_size, size_default, off;
8489 	struct bpf_prog *new_prog;
8490 	enum bpf_access_type type;
8491 	bool is_narrower_load;
8492 
8493 	if (ops->gen_prologue || env->seen_direct_write) {
8494 		if (!ops->gen_prologue) {
8495 			verbose(env, "bpf verifier is misconfigured\n");
8496 			return -EINVAL;
8497 		}
8498 		cnt = ops->gen_prologue(insn_buf, env->seen_direct_write,
8499 					env->prog);
8500 		if (cnt >= ARRAY_SIZE(insn_buf)) {
8501 			verbose(env, "bpf verifier is misconfigured\n");
8502 			return -EINVAL;
8503 		} else if (cnt) {
8504 			new_prog = bpf_patch_insn_data(env, 0, insn_buf, cnt);
8505 			if (!new_prog)
8506 				return -ENOMEM;
8507 
8508 			env->prog = new_prog;
8509 			delta += cnt - 1;
8510 		}
8511 	}
8512 
8513 	if (bpf_prog_is_dev_bound(env->prog->aux))
8514 		return 0;
8515 
8516 	insn = env->prog->insnsi + delta;
8517 
8518 	for (i = 0; i < insn_cnt; i++, insn++) {
8519 		bpf_convert_ctx_access_t convert_ctx_access;
8520 
8521 		if (insn->code == (BPF_LDX | BPF_MEM | BPF_B) ||
8522 		    insn->code == (BPF_LDX | BPF_MEM | BPF_H) ||
8523 		    insn->code == (BPF_LDX | BPF_MEM | BPF_W) ||
8524 		    insn->code == (BPF_LDX | BPF_MEM | BPF_DW))
8525 			type = BPF_READ;
8526 		else if (insn->code == (BPF_STX | BPF_MEM | BPF_B) ||
8527 			 insn->code == (BPF_STX | BPF_MEM | BPF_H) ||
8528 			 insn->code == (BPF_STX | BPF_MEM | BPF_W) ||
8529 			 insn->code == (BPF_STX | BPF_MEM | BPF_DW))
8530 			type = BPF_WRITE;
8531 		else
8532 			continue;
8533 
8534 		if (type == BPF_WRITE &&
8535 		    env->insn_aux_data[i + delta].sanitize_stack_off) {
8536 			struct bpf_insn patch[] = {
8537 				/* Sanitize suspicious stack slot with zero.
8538 				 * There are no memory dependencies for this store,
8539 				 * since it's only using frame pointer and immediate
8540 				 * constant of zero
8541 				 */
8542 				BPF_ST_MEM(BPF_DW, BPF_REG_FP,
8543 					   env->insn_aux_data[i + delta].sanitize_stack_off,
8544 					   0),
8545 				/* the original STX instruction will immediately
8546 				 * overwrite the same stack slot with appropriate value
8547 				 */
8548 				*insn,
8549 			};
8550 
8551 			cnt = ARRAY_SIZE(patch);
8552 			new_prog = bpf_patch_insn_data(env, i + delta, patch, cnt);
8553 			if (!new_prog)
8554 				return -ENOMEM;
8555 
8556 			delta    += cnt - 1;
8557 			env->prog = new_prog;
8558 			insn      = new_prog->insnsi + i + delta;
8559 			continue;
8560 		}
8561 
8562 		switch (env->insn_aux_data[i + delta].ptr_type) {
8563 		case PTR_TO_CTX:
8564 			if (!ops->convert_ctx_access)
8565 				continue;
8566 			convert_ctx_access = ops->convert_ctx_access;
8567 			break;
8568 		case PTR_TO_SOCKET:
8569 		case PTR_TO_SOCK_COMMON:
8570 			convert_ctx_access = bpf_sock_convert_ctx_access;
8571 			break;
8572 		case PTR_TO_TCP_SOCK:
8573 			convert_ctx_access = bpf_tcp_sock_convert_ctx_access;
8574 			break;
8575 		case PTR_TO_XDP_SOCK:
8576 			convert_ctx_access = bpf_xdp_sock_convert_ctx_access;
8577 			break;
8578 		default:
8579 			continue;
8580 		}
8581 
8582 		ctx_field_size = env->insn_aux_data[i + delta].ctx_field_size;
8583 		size = BPF_LDST_BYTES(insn);
8584 
8585 		/* If the read access is a narrower load of the field,
8586 		 * convert to a 4/8-byte load, to minimum program type specific
8587 		 * convert_ctx_access changes. If conversion is successful,
8588 		 * we will apply proper mask to the result.
8589 		 */
8590 		is_narrower_load = size < ctx_field_size;
8591 		size_default = bpf_ctx_off_adjust_machine(ctx_field_size);
8592 		off = insn->off;
8593 		if (is_narrower_load) {
8594 			u8 size_code;
8595 
8596 			if (type == BPF_WRITE) {
8597 				verbose(env, "bpf verifier narrow ctx access misconfigured\n");
8598 				return -EINVAL;
8599 			}
8600 
8601 			size_code = BPF_H;
8602 			if (ctx_field_size == 4)
8603 				size_code = BPF_W;
8604 			else if (ctx_field_size == 8)
8605 				size_code = BPF_DW;
8606 
8607 			insn->off = off & ~(size_default - 1);
8608 			insn->code = BPF_LDX | BPF_MEM | size_code;
8609 		}
8610 
8611 		target_size = 0;
8612 		cnt = convert_ctx_access(type, insn, insn_buf, env->prog,
8613 					 &target_size);
8614 		if (cnt == 0 || cnt >= ARRAY_SIZE(insn_buf) ||
8615 		    (ctx_field_size && !target_size)) {
8616 			verbose(env, "bpf verifier is misconfigured\n");
8617 			return -EINVAL;
8618 		}
8619 
8620 		if (is_narrower_load && size < target_size) {
8621 			u8 shift = bpf_ctx_narrow_load_shift(off, size,
8622 							     size_default);
8623 			if (ctx_field_size <= 4) {
8624 				if (shift)
8625 					insn_buf[cnt++] = BPF_ALU32_IMM(BPF_RSH,
8626 									insn->dst_reg,
8627 									shift);
8628 				insn_buf[cnt++] = BPF_ALU32_IMM(BPF_AND, insn->dst_reg,
8629 								(1 << size * 8) - 1);
8630 			} else {
8631 				if (shift)
8632 					insn_buf[cnt++] = BPF_ALU64_IMM(BPF_RSH,
8633 									insn->dst_reg,
8634 									shift);
8635 				insn_buf[cnt++] = BPF_ALU64_IMM(BPF_AND, insn->dst_reg,
8636 								(1ULL << size * 8) - 1);
8637 			}
8638 		}
8639 
8640 		new_prog = bpf_patch_insn_data(env, i + delta, insn_buf, cnt);
8641 		if (!new_prog)
8642 			return -ENOMEM;
8643 
8644 		delta += cnt - 1;
8645 
8646 		/* keep walking new program and skip insns we just inserted */
8647 		env->prog = new_prog;
8648 		insn      = new_prog->insnsi + i + delta;
8649 	}
8650 
8651 	return 0;
8652 }
8653 
8654 static int jit_subprogs(struct bpf_verifier_env *env)
8655 {
8656 	struct bpf_prog *prog = env->prog, **func, *tmp;
8657 	int i, j, subprog_start, subprog_end = 0, len, subprog;
8658 	struct bpf_insn *insn;
8659 	void *old_bpf_func;
8660 	int err;
8661 
8662 	if (env->subprog_cnt <= 1)
8663 		return 0;
8664 
8665 	for (i = 0, insn = prog->insnsi; i < prog->len; i++, insn++) {
8666 		if (insn->code != (BPF_JMP | BPF_CALL) ||
8667 		    insn->src_reg != BPF_PSEUDO_CALL)
8668 			continue;
8669 		/* Upon error here we cannot fall back to interpreter but
8670 		 * need a hard reject of the program. Thus -EFAULT is
8671 		 * propagated in any case.
8672 		 */
8673 		subprog = find_subprog(env, i + insn->imm + 1);
8674 		if (subprog < 0) {
8675 			WARN_ONCE(1, "verifier bug. No program starts at insn %d\n",
8676 				  i + insn->imm + 1);
8677 			return -EFAULT;
8678 		}
8679 		/* temporarily remember subprog id inside insn instead of
8680 		 * aux_data, since next loop will split up all insns into funcs
8681 		 */
8682 		insn->off = subprog;
8683 		/* remember original imm in case JIT fails and fallback
8684 		 * to interpreter will be needed
8685 		 */
8686 		env->insn_aux_data[i].call_imm = insn->imm;
8687 		/* point imm to __bpf_call_base+1 from JITs point of view */
8688 		insn->imm = 1;
8689 	}
8690 
8691 	err = bpf_prog_alloc_jited_linfo(prog);
8692 	if (err)
8693 		goto out_undo_insn;
8694 
8695 	err = -ENOMEM;
8696 	func = kcalloc(env->subprog_cnt, sizeof(prog), GFP_KERNEL);
8697 	if (!func)
8698 		goto out_undo_insn;
8699 
8700 	for (i = 0; i < env->subprog_cnt; i++) {
8701 		subprog_start = subprog_end;
8702 		subprog_end = env->subprog_info[i + 1].start;
8703 
8704 		len = subprog_end - subprog_start;
8705 		/* BPF_PROG_RUN doesn't call subprogs directly,
8706 		 * hence main prog stats include the runtime of subprogs.
8707 		 * subprogs don't have IDs and not reachable via prog_get_next_id
8708 		 * func[i]->aux->stats will never be accessed and stays NULL
8709 		 */
8710 		func[i] = bpf_prog_alloc_no_stats(bpf_prog_size(len), GFP_USER);
8711 		if (!func[i])
8712 			goto out_free;
8713 		memcpy(func[i]->insnsi, &prog->insnsi[subprog_start],
8714 		       len * sizeof(struct bpf_insn));
8715 		func[i]->type = prog->type;
8716 		func[i]->len = len;
8717 		if (bpf_prog_calc_tag(func[i]))
8718 			goto out_free;
8719 		func[i]->is_func = 1;
8720 		func[i]->aux->func_idx = i;
8721 		/* the btf and func_info will be freed only at prog->aux */
8722 		func[i]->aux->btf = prog->aux->btf;
8723 		func[i]->aux->func_info = prog->aux->func_info;
8724 
8725 		/* Use bpf_prog_F_tag to indicate functions in stack traces.
8726 		 * Long term would need debug info to populate names
8727 		 */
8728 		func[i]->aux->name[0] = 'F';
8729 		func[i]->aux->stack_depth = env->subprog_info[i].stack_depth;
8730 		func[i]->jit_requested = 1;
8731 		func[i]->aux->linfo = prog->aux->linfo;
8732 		func[i]->aux->nr_linfo = prog->aux->nr_linfo;
8733 		func[i]->aux->jited_linfo = prog->aux->jited_linfo;
8734 		func[i]->aux->linfo_idx = env->subprog_info[i].linfo_idx;
8735 		func[i] = bpf_int_jit_compile(func[i]);
8736 		if (!func[i]->jited) {
8737 			err = -ENOTSUPP;
8738 			goto out_free;
8739 		}
8740 		cond_resched();
8741 	}
8742 	/* at this point all bpf functions were successfully JITed
8743 	 * now populate all bpf_calls with correct addresses and
8744 	 * run last pass of JIT
8745 	 */
8746 	for (i = 0; i < env->subprog_cnt; i++) {
8747 		insn = func[i]->insnsi;
8748 		for (j = 0; j < func[i]->len; j++, insn++) {
8749 			if (insn->code != (BPF_JMP | BPF_CALL) ||
8750 			    insn->src_reg != BPF_PSEUDO_CALL)
8751 				continue;
8752 			subprog = insn->off;
8753 			insn->imm = BPF_CAST_CALL(func[subprog]->bpf_func) -
8754 				    __bpf_call_base;
8755 		}
8756 
8757 		/* we use the aux data to keep a list of the start addresses
8758 		 * of the JITed images for each function in the program
8759 		 *
8760 		 * for some architectures, such as powerpc64, the imm field
8761 		 * might not be large enough to hold the offset of the start
8762 		 * address of the callee's JITed image from __bpf_call_base
8763 		 *
8764 		 * in such cases, we can lookup the start address of a callee
8765 		 * by using its subprog id, available from the off field of
8766 		 * the call instruction, as an index for this list
8767 		 */
8768 		func[i]->aux->func = func;
8769 		func[i]->aux->func_cnt = env->subprog_cnt;
8770 	}
8771 	for (i = 0; i < env->subprog_cnt; i++) {
8772 		old_bpf_func = func[i]->bpf_func;
8773 		tmp = bpf_int_jit_compile(func[i]);
8774 		if (tmp != func[i] || func[i]->bpf_func != old_bpf_func) {
8775 			verbose(env, "JIT doesn't support bpf-to-bpf calls\n");
8776 			err = -ENOTSUPP;
8777 			goto out_free;
8778 		}
8779 		cond_resched();
8780 	}
8781 
8782 	/* finally lock prog and jit images for all functions and
8783 	 * populate kallsysm
8784 	 */
8785 	for (i = 0; i < env->subprog_cnt; i++) {
8786 		bpf_prog_lock_ro(func[i]);
8787 		bpf_prog_kallsyms_add(func[i]);
8788 	}
8789 
8790 	/* Last step: make now unused interpreter insns from main
8791 	 * prog consistent for later dump requests, so they can
8792 	 * later look the same as if they were interpreted only.
8793 	 */
8794 	for (i = 0, insn = prog->insnsi; i < prog->len; i++, insn++) {
8795 		if (insn->code != (BPF_JMP | BPF_CALL) ||
8796 		    insn->src_reg != BPF_PSEUDO_CALL)
8797 			continue;
8798 		insn->off = env->insn_aux_data[i].call_imm;
8799 		subprog = find_subprog(env, i + insn->off + 1);
8800 		insn->imm = subprog;
8801 	}
8802 
8803 	prog->jited = 1;
8804 	prog->bpf_func = func[0]->bpf_func;
8805 	prog->aux->func = func;
8806 	prog->aux->func_cnt = env->subprog_cnt;
8807 	bpf_prog_free_unused_jited_linfo(prog);
8808 	return 0;
8809 out_free:
8810 	for (i = 0; i < env->subprog_cnt; i++)
8811 		if (func[i])
8812 			bpf_jit_free(func[i]);
8813 	kfree(func);
8814 out_undo_insn:
8815 	/* cleanup main prog to be interpreted */
8816 	prog->jit_requested = 0;
8817 	for (i = 0, insn = prog->insnsi; i < prog->len; i++, insn++) {
8818 		if (insn->code != (BPF_JMP | BPF_CALL) ||
8819 		    insn->src_reg != BPF_PSEUDO_CALL)
8820 			continue;
8821 		insn->off = 0;
8822 		insn->imm = env->insn_aux_data[i].call_imm;
8823 	}
8824 	bpf_prog_free_jited_linfo(prog);
8825 	return err;
8826 }
8827 
8828 static int fixup_call_args(struct bpf_verifier_env *env)
8829 {
8830 #ifndef CONFIG_BPF_JIT_ALWAYS_ON
8831 	struct bpf_prog *prog = env->prog;
8832 	struct bpf_insn *insn = prog->insnsi;
8833 	int i, depth;
8834 #endif
8835 	int err = 0;
8836 
8837 	if (env->prog->jit_requested &&
8838 	    !bpf_prog_is_dev_bound(env->prog->aux)) {
8839 		err = jit_subprogs(env);
8840 		if (err == 0)
8841 			return 0;
8842 		if (err == -EFAULT)
8843 			return err;
8844 	}
8845 #ifndef CONFIG_BPF_JIT_ALWAYS_ON
8846 	for (i = 0; i < prog->len; i++, insn++) {
8847 		if (insn->code != (BPF_JMP | BPF_CALL) ||
8848 		    insn->src_reg != BPF_PSEUDO_CALL)
8849 			continue;
8850 		depth = get_callee_stack_depth(env, insn, i);
8851 		if (depth < 0)
8852 			return depth;
8853 		bpf_patch_call_args(insn, depth);
8854 	}
8855 	err = 0;
8856 #endif
8857 	return err;
8858 }
8859 
8860 /* fixup insn->imm field of bpf_call instructions
8861  * and inline eligible helpers as explicit sequence of BPF instructions
8862  *
8863  * this function is called after eBPF program passed verification
8864  */
8865 static int fixup_bpf_calls(struct bpf_verifier_env *env)
8866 {
8867 	struct bpf_prog *prog = env->prog;
8868 	struct bpf_insn *insn = prog->insnsi;
8869 	const struct bpf_func_proto *fn;
8870 	const int insn_cnt = prog->len;
8871 	const struct bpf_map_ops *ops;
8872 	struct bpf_insn_aux_data *aux;
8873 	struct bpf_insn insn_buf[16];
8874 	struct bpf_prog *new_prog;
8875 	struct bpf_map *map_ptr;
8876 	int i, cnt, delta = 0;
8877 
8878 	for (i = 0; i < insn_cnt; i++, insn++) {
8879 		if (insn->code == (BPF_ALU64 | BPF_MOD | BPF_X) ||
8880 		    insn->code == (BPF_ALU64 | BPF_DIV | BPF_X) ||
8881 		    insn->code == (BPF_ALU | BPF_MOD | BPF_X) ||
8882 		    insn->code == (BPF_ALU | BPF_DIV | BPF_X)) {
8883 			bool is64 = BPF_CLASS(insn->code) == BPF_ALU64;
8884 			struct bpf_insn mask_and_div[] = {
8885 				BPF_MOV32_REG(insn->src_reg, insn->src_reg),
8886 				/* Rx div 0 -> 0 */
8887 				BPF_JMP_IMM(BPF_JNE, insn->src_reg, 0, 2),
8888 				BPF_ALU32_REG(BPF_XOR, insn->dst_reg, insn->dst_reg),
8889 				BPF_JMP_IMM(BPF_JA, 0, 0, 1),
8890 				*insn,
8891 			};
8892 			struct bpf_insn mask_and_mod[] = {
8893 				BPF_MOV32_REG(insn->src_reg, insn->src_reg),
8894 				/* Rx mod 0 -> Rx */
8895 				BPF_JMP_IMM(BPF_JEQ, insn->src_reg, 0, 1),
8896 				*insn,
8897 			};
8898 			struct bpf_insn *patchlet;
8899 
8900 			if (insn->code == (BPF_ALU64 | BPF_DIV | BPF_X) ||
8901 			    insn->code == (BPF_ALU | BPF_DIV | BPF_X)) {
8902 				patchlet = mask_and_div + (is64 ? 1 : 0);
8903 				cnt = ARRAY_SIZE(mask_and_div) - (is64 ? 1 : 0);
8904 			} else {
8905 				patchlet = mask_and_mod + (is64 ? 1 : 0);
8906 				cnt = ARRAY_SIZE(mask_and_mod) - (is64 ? 1 : 0);
8907 			}
8908 
8909 			new_prog = bpf_patch_insn_data(env, i + delta, patchlet, cnt);
8910 			if (!new_prog)
8911 				return -ENOMEM;
8912 
8913 			delta    += cnt - 1;
8914 			env->prog = prog = new_prog;
8915 			insn      = new_prog->insnsi + i + delta;
8916 			continue;
8917 		}
8918 
8919 		if (BPF_CLASS(insn->code) == BPF_LD &&
8920 		    (BPF_MODE(insn->code) == BPF_ABS ||
8921 		     BPF_MODE(insn->code) == BPF_IND)) {
8922 			cnt = env->ops->gen_ld_abs(insn, insn_buf);
8923 			if (cnt == 0 || cnt >= ARRAY_SIZE(insn_buf)) {
8924 				verbose(env, "bpf verifier is misconfigured\n");
8925 				return -EINVAL;
8926 			}
8927 
8928 			new_prog = bpf_patch_insn_data(env, i + delta, insn_buf, cnt);
8929 			if (!new_prog)
8930 				return -ENOMEM;
8931 
8932 			delta    += cnt - 1;
8933 			env->prog = prog = new_prog;
8934 			insn      = new_prog->insnsi + i + delta;
8935 			continue;
8936 		}
8937 
8938 		if (insn->code == (BPF_ALU64 | BPF_ADD | BPF_X) ||
8939 		    insn->code == (BPF_ALU64 | BPF_SUB | BPF_X)) {
8940 			const u8 code_add = BPF_ALU64 | BPF_ADD | BPF_X;
8941 			const u8 code_sub = BPF_ALU64 | BPF_SUB | BPF_X;
8942 			struct bpf_insn insn_buf[16];
8943 			struct bpf_insn *patch = &insn_buf[0];
8944 			bool issrc, isneg;
8945 			u32 off_reg;
8946 
8947 			aux = &env->insn_aux_data[i + delta];
8948 			if (!aux->alu_state ||
8949 			    aux->alu_state == BPF_ALU_NON_POINTER)
8950 				continue;
8951 
8952 			isneg = aux->alu_state & BPF_ALU_NEG_VALUE;
8953 			issrc = (aux->alu_state & BPF_ALU_SANITIZE) ==
8954 				BPF_ALU_SANITIZE_SRC;
8955 
8956 			off_reg = issrc ? insn->src_reg : insn->dst_reg;
8957 			if (isneg)
8958 				*patch++ = BPF_ALU64_IMM(BPF_MUL, off_reg, -1);
8959 			*patch++ = BPF_MOV32_IMM(BPF_REG_AX, aux->alu_limit - 1);
8960 			*patch++ = BPF_ALU64_REG(BPF_SUB, BPF_REG_AX, off_reg);
8961 			*patch++ = BPF_ALU64_REG(BPF_OR, BPF_REG_AX, off_reg);
8962 			*patch++ = BPF_ALU64_IMM(BPF_NEG, BPF_REG_AX, 0);
8963 			*patch++ = BPF_ALU64_IMM(BPF_ARSH, BPF_REG_AX, 63);
8964 			if (issrc) {
8965 				*patch++ = BPF_ALU64_REG(BPF_AND, BPF_REG_AX,
8966 							 off_reg);
8967 				insn->src_reg = BPF_REG_AX;
8968 			} else {
8969 				*patch++ = BPF_ALU64_REG(BPF_AND, off_reg,
8970 							 BPF_REG_AX);
8971 			}
8972 			if (isneg)
8973 				insn->code = insn->code == code_add ?
8974 					     code_sub : code_add;
8975 			*patch++ = *insn;
8976 			if (issrc && isneg)
8977 				*patch++ = BPF_ALU64_IMM(BPF_MUL, off_reg, -1);
8978 			cnt = patch - insn_buf;
8979 
8980 			new_prog = bpf_patch_insn_data(env, i + delta, insn_buf, cnt);
8981 			if (!new_prog)
8982 				return -ENOMEM;
8983 
8984 			delta    += cnt - 1;
8985 			env->prog = prog = new_prog;
8986 			insn      = new_prog->insnsi + i + delta;
8987 			continue;
8988 		}
8989 
8990 		if (insn->code != (BPF_JMP | BPF_CALL))
8991 			continue;
8992 		if (insn->src_reg == BPF_PSEUDO_CALL)
8993 			continue;
8994 
8995 		if (insn->imm == BPF_FUNC_get_route_realm)
8996 			prog->dst_needed = 1;
8997 		if (insn->imm == BPF_FUNC_get_prandom_u32)
8998 			bpf_user_rnd_init_once();
8999 		if (insn->imm == BPF_FUNC_override_return)
9000 			prog->kprobe_override = 1;
9001 		if (insn->imm == BPF_FUNC_tail_call) {
9002 			/* If we tail call into other programs, we
9003 			 * cannot make any assumptions since they can
9004 			 * be replaced dynamically during runtime in
9005 			 * the program array.
9006 			 */
9007 			prog->cb_access = 1;
9008 			env->prog->aux->stack_depth = MAX_BPF_STACK;
9009 			env->prog->aux->max_pkt_offset = MAX_PACKET_OFF;
9010 
9011 			/* mark bpf_tail_call as different opcode to avoid
9012 			 * conditional branch in the interpeter for every normal
9013 			 * call and to prevent accidental JITing by JIT compiler
9014 			 * that doesn't support bpf_tail_call yet
9015 			 */
9016 			insn->imm = 0;
9017 			insn->code = BPF_JMP | BPF_TAIL_CALL;
9018 
9019 			aux = &env->insn_aux_data[i + delta];
9020 			if (!bpf_map_ptr_unpriv(aux))
9021 				continue;
9022 
9023 			/* instead of changing every JIT dealing with tail_call
9024 			 * emit two extra insns:
9025 			 * if (index >= max_entries) goto out;
9026 			 * index &= array->index_mask;
9027 			 * to avoid out-of-bounds cpu speculation
9028 			 */
9029 			if (bpf_map_ptr_poisoned(aux)) {
9030 				verbose(env, "tail_call abusing map_ptr\n");
9031 				return -EINVAL;
9032 			}
9033 
9034 			map_ptr = BPF_MAP_PTR(aux->map_state);
9035 			insn_buf[0] = BPF_JMP_IMM(BPF_JGE, BPF_REG_3,
9036 						  map_ptr->max_entries, 2);
9037 			insn_buf[1] = BPF_ALU32_IMM(BPF_AND, BPF_REG_3,
9038 						    container_of(map_ptr,
9039 								 struct bpf_array,
9040 								 map)->index_mask);
9041 			insn_buf[2] = *insn;
9042 			cnt = 3;
9043 			new_prog = bpf_patch_insn_data(env, i + delta, insn_buf, cnt);
9044 			if (!new_prog)
9045 				return -ENOMEM;
9046 
9047 			delta    += cnt - 1;
9048 			env->prog = prog = new_prog;
9049 			insn      = new_prog->insnsi + i + delta;
9050 			continue;
9051 		}
9052 
9053 		/* BPF_EMIT_CALL() assumptions in some of the map_gen_lookup
9054 		 * and other inlining handlers are currently limited to 64 bit
9055 		 * only.
9056 		 */
9057 		if (prog->jit_requested && BITS_PER_LONG == 64 &&
9058 		    (insn->imm == BPF_FUNC_map_lookup_elem ||
9059 		     insn->imm == BPF_FUNC_map_update_elem ||
9060 		     insn->imm == BPF_FUNC_map_delete_elem ||
9061 		     insn->imm == BPF_FUNC_map_push_elem   ||
9062 		     insn->imm == BPF_FUNC_map_pop_elem    ||
9063 		     insn->imm == BPF_FUNC_map_peek_elem)) {
9064 			aux = &env->insn_aux_data[i + delta];
9065 			if (bpf_map_ptr_poisoned(aux))
9066 				goto patch_call_imm;
9067 
9068 			map_ptr = BPF_MAP_PTR(aux->map_state);
9069 			ops = map_ptr->ops;
9070 			if (insn->imm == BPF_FUNC_map_lookup_elem &&
9071 			    ops->map_gen_lookup) {
9072 				cnt = ops->map_gen_lookup(map_ptr, insn_buf);
9073 				if (cnt == 0 || cnt >= ARRAY_SIZE(insn_buf)) {
9074 					verbose(env, "bpf verifier is misconfigured\n");
9075 					return -EINVAL;
9076 				}
9077 
9078 				new_prog = bpf_patch_insn_data(env, i + delta,
9079 							       insn_buf, cnt);
9080 				if (!new_prog)
9081 					return -ENOMEM;
9082 
9083 				delta    += cnt - 1;
9084 				env->prog = prog = new_prog;
9085 				insn      = new_prog->insnsi + i + delta;
9086 				continue;
9087 			}
9088 
9089 			BUILD_BUG_ON(!__same_type(ops->map_lookup_elem,
9090 				     (void *(*)(struct bpf_map *map, void *key))NULL));
9091 			BUILD_BUG_ON(!__same_type(ops->map_delete_elem,
9092 				     (int (*)(struct bpf_map *map, void *key))NULL));
9093 			BUILD_BUG_ON(!__same_type(ops->map_update_elem,
9094 				     (int (*)(struct bpf_map *map, void *key, void *value,
9095 					      u64 flags))NULL));
9096 			BUILD_BUG_ON(!__same_type(ops->map_push_elem,
9097 				     (int (*)(struct bpf_map *map, void *value,
9098 					      u64 flags))NULL));
9099 			BUILD_BUG_ON(!__same_type(ops->map_pop_elem,
9100 				     (int (*)(struct bpf_map *map, void *value))NULL));
9101 			BUILD_BUG_ON(!__same_type(ops->map_peek_elem,
9102 				     (int (*)(struct bpf_map *map, void *value))NULL));
9103 
9104 			switch (insn->imm) {
9105 			case BPF_FUNC_map_lookup_elem:
9106 				insn->imm = BPF_CAST_CALL(ops->map_lookup_elem) -
9107 					    __bpf_call_base;
9108 				continue;
9109 			case BPF_FUNC_map_update_elem:
9110 				insn->imm = BPF_CAST_CALL(ops->map_update_elem) -
9111 					    __bpf_call_base;
9112 				continue;
9113 			case BPF_FUNC_map_delete_elem:
9114 				insn->imm = BPF_CAST_CALL(ops->map_delete_elem) -
9115 					    __bpf_call_base;
9116 				continue;
9117 			case BPF_FUNC_map_push_elem:
9118 				insn->imm = BPF_CAST_CALL(ops->map_push_elem) -
9119 					    __bpf_call_base;
9120 				continue;
9121 			case BPF_FUNC_map_pop_elem:
9122 				insn->imm = BPF_CAST_CALL(ops->map_pop_elem) -
9123 					    __bpf_call_base;
9124 				continue;
9125 			case BPF_FUNC_map_peek_elem:
9126 				insn->imm = BPF_CAST_CALL(ops->map_peek_elem) -
9127 					    __bpf_call_base;
9128 				continue;
9129 			}
9130 
9131 			goto patch_call_imm;
9132 		}
9133 
9134 patch_call_imm:
9135 		fn = env->ops->get_func_proto(insn->imm, env->prog);
9136 		/* all functions that have prototype and verifier allowed
9137 		 * programs to call them, must be real in-kernel functions
9138 		 */
9139 		if (!fn->func) {
9140 			verbose(env,
9141 				"kernel subsystem misconfigured func %s#%d\n",
9142 				func_id_name(insn->imm), insn->imm);
9143 			return -EFAULT;
9144 		}
9145 		insn->imm = fn->func - __bpf_call_base;
9146 	}
9147 
9148 	return 0;
9149 }
9150 
9151 static void free_states(struct bpf_verifier_env *env)
9152 {
9153 	struct bpf_verifier_state_list *sl, *sln;
9154 	int i;
9155 
9156 	sl = env->free_list;
9157 	while (sl) {
9158 		sln = sl->next;
9159 		free_verifier_state(&sl->state, false);
9160 		kfree(sl);
9161 		sl = sln;
9162 	}
9163 
9164 	if (!env->explored_states)
9165 		return;
9166 
9167 	for (i = 0; i < state_htab_size(env); i++) {
9168 		sl = env->explored_states[i];
9169 
9170 		while (sl) {
9171 			sln = sl->next;
9172 			free_verifier_state(&sl->state, false);
9173 			kfree(sl);
9174 			sl = sln;
9175 		}
9176 	}
9177 
9178 	kvfree(env->explored_states);
9179 }
9180 
9181 static void print_verification_stats(struct bpf_verifier_env *env)
9182 {
9183 	int i;
9184 
9185 	if (env->log.level & BPF_LOG_STATS) {
9186 		verbose(env, "verification time %lld usec\n",
9187 			div_u64(env->verification_time, 1000));
9188 		verbose(env, "stack depth ");
9189 		for (i = 0; i < env->subprog_cnt; i++) {
9190 			u32 depth = env->subprog_info[i].stack_depth;
9191 
9192 			verbose(env, "%d", depth);
9193 			if (i + 1 < env->subprog_cnt)
9194 				verbose(env, "+");
9195 		}
9196 		verbose(env, "\n");
9197 	}
9198 	verbose(env, "processed %d insns (limit %d) max_states_per_insn %d "
9199 		"total_states %d peak_states %d mark_read %d\n",
9200 		env->insn_processed, BPF_COMPLEXITY_LIMIT_INSNS,
9201 		env->max_states_per_insn, env->total_states,
9202 		env->peak_states, env->longest_mark_read_walk);
9203 }
9204 
9205 int bpf_check(struct bpf_prog **prog, union bpf_attr *attr,
9206 	      union bpf_attr __user *uattr)
9207 {
9208 	u64 start_time = ktime_get_ns();
9209 	struct bpf_verifier_env *env;
9210 	struct bpf_verifier_log *log;
9211 	int i, len, ret = -EINVAL;
9212 	bool is_priv;
9213 
9214 	/* no program is valid */
9215 	if (ARRAY_SIZE(bpf_verifier_ops) == 0)
9216 		return -EINVAL;
9217 
9218 	/* 'struct bpf_verifier_env' can be global, but since it's not small,
9219 	 * allocate/free it every time bpf_check() is called
9220 	 */
9221 	env = kzalloc(sizeof(struct bpf_verifier_env), GFP_KERNEL);
9222 	if (!env)
9223 		return -ENOMEM;
9224 	log = &env->log;
9225 
9226 	len = (*prog)->len;
9227 	env->insn_aux_data =
9228 		vzalloc(array_size(sizeof(struct bpf_insn_aux_data), len));
9229 	ret = -ENOMEM;
9230 	if (!env->insn_aux_data)
9231 		goto err_free_env;
9232 	for (i = 0; i < len; i++)
9233 		env->insn_aux_data[i].orig_idx = i;
9234 	env->prog = *prog;
9235 	env->ops = bpf_verifier_ops[env->prog->type];
9236 	is_priv = capable(CAP_SYS_ADMIN);
9237 
9238 	/* grab the mutex to protect few globals used by verifier */
9239 	if (!is_priv)
9240 		mutex_lock(&bpf_verifier_lock);
9241 
9242 	if (attr->log_level || attr->log_buf || attr->log_size) {
9243 		/* user requested verbose verifier output
9244 		 * and supplied buffer to store the verification trace
9245 		 */
9246 		log->level = attr->log_level;
9247 		log->ubuf = (char __user *) (unsigned long) attr->log_buf;
9248 		log->len_total = attr->log_size;
9249 
9250 		ret = -EINVAL;
9251 		/* log attributes have to be sane */
9252 		if (log->len_total < 128 || log->len_total > UINT_MAX >> 2 ||
9253 		    !log->level || !log->ubuf || log->level & ~BPF_LOG_MASK)
9254 			goto err_unlock;
9255 	}
9256 
9257 	env->strict_alignment = !!(attr->prog_flags & BPF_F_STRICT_ALIGNMENT);
9258 	if (!IS_ENABLED(CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS))
9259 		env->strict_alignment = true;
9260 	if (attr->prog_flags & BPF_F_ANY_ALIGNMENT)
9261 		env->strict_alignment = false;
9262 
9263 	env->allow_ptr_leaks = is_priv;
9264 
9265 	ret = replace_map_fd_with_map_ptr(env);
9266 	if (ret < 0)
9267 		goto skip_full_check;
9268 
9269 	if (bpf_prog_is_dev_bound(env->prog->aux)) {
9270 		ret = bpf_prog_offload_verifier_prep(env->prog);
9271 		if (ret)
9272 			goto skip_full_check;
9273 	}
9274 
9275 	env->explored_states = kvcalloc(state_htab_size(env),
9276 				       sizeof(struct bpf_verifier_state_list *),
9277 				       GFP_USER);
9278 	ret = -ENOMEM;
9279 	if (!env->explored_states)
9280 		goto skip_full_check;
9281 
9282 	ret = check_subprogs(env);
9283 	if (ret < 0)
9284 		goto skip_full_check;
9285 
9286 	ret = check_btf_info(env, attr, uattr);
9287 	if (ret < 0)
9288 		goto skip_full_check;
9289 
9290 	ret = check_cfg(env);
9291 	if (ret < 0)
9292 		goto skip_full_check;
9293 
9294 	ret = do_check(env);
9295 	if (env->cur_state) {
9296 		free_verifier_state(env->cur_state, true);
9297 		env->cur_state = NULL;
9298 	}
9299 
9300 	if (ret == 0 && bpf_prog_is_dev_bound(env->prog->aux))
9301 		ret = bpf_prog_offload_finalize(env);
9302 
9303 skip_full_check:
9304 	while (!pop_stack(env, NULL, NULL));
9305 	free_states(env);
9306 
9307 	if (ret == 0)
9308 		ret = check_max_stack_depth(env);
9309 
9310 	/* instruction rewrites happen after this point */
9311 	if (is_priv) {
9312 		if (ret == 0)
9313 			opt_hard_wire_dead_code_branches(env);
9314 		if (ret == 0)
9315 			ret = opt_remove_dead_code(env);
9316 		if (ret == 0)
9317 			ret = opt_remove_nops(env);
9318 	} else {
9319 		if (ret == 0)
9320 			sanitize_dead_code(env);
9321 	}
9322 
9323 	if (ret == 0)
9324 		/* program is valid, convert *(u32*)(ctx + off) accesses */
9325 		ret = convert_ctx_accesses(env);
9326 
9327 	if (ret == 0)
9328 		ret = fixup_bpf_calls(env);
9329 
9330 	/* do 32-bit optimization after insn patching has done so those patched
9331 	 * insns could be handled correctly.
9332 	 */
9333 	if (ret == 0 && !bpf_prog_is_dev_bound(env->prog->aux)) {
9334 		ret = opt_subreg_zext_lo32_rnd_hi32(env, attr);
9335 		env->prog->aux->verifier_zext = bpf_jit_needs_zext() ? !ret
9336 								     : false;
9337 	}
9338 
9339 	if (ret == 0)
9340 		ret = fixup_call_args(env);
9341 
9342 	env->verification_time = ktime_get_ns() - start_time;
9343 	print_verification_stats(env);
9344 
9345 	if (log->level && bpf_verifier_log_full(log))
9346 		ret = -ENOSPC;
9347 	if (log->level && !log->ubuf) {
9348 		ret = -EFAULT;
9349 		goto err_release_maps;
9350 	}
9351 
9352 	if (ret == 0 && env->used_map_cnt) {
9353 		/* if program passed verifier, update used_maps in bpf_prog_info */
9354 		env->prog->aux->used_maps = kmalloc_array(env->used_map_cnt,
9355 							  sizeof(env->used_maps[0]),
9356 							  GFP_KERNEL);
9357 
9358 		if (!env->prog->aux->used_maps) {
9359 			ret = -ENOMEM;
9360 			goto err_release_maps;
9361 		}
9362 
9363 		memcpy(env->prog->aux->used_maps, env->used_maps,
9364 		       sizeof(env->used_maps[0]) * env->used_map_cnt);
9365 		env->prog->aux->used_map_cnt = env->used_map_cnt;
9366 
9367 		/* program is valid. Convert pseudo bpf_ld_imm64 into generic
9368 		 * bpf_ld_imm64 instructions
9369 		 */
9370 		convert_pseudo_ld_imm64(env);
9371 	}
9372 
9373 	if (ret == 0)
9374 		adjust_btf_func(env);
9375 
9376 err_release_maps:
9377 	if (!env->prog->aux->used_maps)
9378 		/* if we didn't copy map pointers into bpf_prog_info, release
9379 		 * them now. Otherwise free_used_maps() will release them.
9380 		 */
9381 		release_maps(env);
9382 	*prog = env->prog;
9383 err_unlock:
9384 	if (!is_priv)
9385 		mutex_unlock(&bpf_verifier_lock);
9386 	vfree(env->insn_aux_data);
9387 err_free_env:
9388 	kfree(env);
9389 	return ret;
9390 }
9391