xref: /linux/kernel/bpf/verifier.c (revision 2a171788ba7bb61995e98e8163204fc7880f63b2)
1 /* Copyright (c) 2011-2014 PLUMgrid, http://plumgrid.com
2  * Copyright (c) 2016 Facebook
3  *
4  * This program is free software; you can redistribute it and/or
5  * modify it under the terms of version 2 of the GNU General Public
6  * License as published by the Free Software Foundation.
7  *
8  * This program is distributed in the hope that it will be useful, but
9  * WITHOUT ANY WARRANTY; without even the implied warranty of
10  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
11  * General Public License for more details.
12  */
13 #include <linux/kernel.h>
14 #include <linux/types.h>
15 #include <linux/slab.h>
16 #include <linux/bpf.h>
17 #include <linux/bpf_verifier.h>
18 #include <linux/filter.h>
19 #include <net/netlink.h>
20 #include <linux/file.h>
21 #include <linux/vmalloc.h>
22 #include <linux/stringify.h>
23 
24 #include "disasm.h"
25 
26 static const struct bpf_verifier_ops * const bpf_verifier_ops[] = {
27 #define BPF_PROG_TYPE(_id, _name) \
28 	[_id] = & _name ## _verifier_ops,
29 #define BPF_MAP_TYPE(_id, _ops)
30 #include <linux/bpf_types.h>
31 #undef BPF_PROG_TYPE
32 #undef BPF_MAP_TYPE
33 };
34 
35 /* bpf_check() is a static code analyzer that walks eBPF program
36  * instruction by instruction and updates register/stack state.
37  * All paths of conditional branches are analyzed until 'bpf_exit' insn.
38  *
39  * The first pass is depth-first-search to check that the program is a DAG.
40  * It rejects the following programs:
41  * - larger than BPF_MAXINSNS insns
42  * - if loop is present (detected via back-edge)
43  * - unreachable insns exist (shouldn't be a forest. program = one function)
44  * - out of bounds or malformed jumps
45  * The second pass is all possible path descent from the 1st insn.
46  * Since it's analyzing all pathes through the program, the length of the
47  * analysis is limited to 64k insn, which may be hit even if total number of
48  * insn is less then 4K, but there are too many branches that change stack/regs.
49  * Number of 'branches to be analyzed' is limited to 1k
50  *
51  * On entry to each instruction, each register has a type, and the instruction
52  * changes the types of the registers depending on instruction semantics.
53  * If instruction is BPF_MOV64_REG(BPF_REG_1, BPF_REG_5), then type of R5 is
54  * copied to R1.
55  *
56  * All registers are 64-bit.
57  * R0 - return register
58  * R1-R5 argument passing registers
59  * R6-R9 callee saved registers
60  * R10 - frame pointer read-only
61  *
62  * At the start of BPF program the register R1 contains a pointer to bpf_context
63  * and has type PTR_TO_CTX.
64  *
65  * Verifier tracks arithmetic operations on pointers in case:
66  *    BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
67  *    BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -20),
68  * 1st insn copies R10 (which has FRAME_PTR) type into R1
69  * and 2nd arithmetic instruction is pattern matched to recognize
70  * that it wants to construct a pointer to some element within stack.
71  * So after 2nd insn, the register R1 has type PTR_TO_STACK
72  * (and -20 constant is saved for further stack bounds checking).
73  * Meaning that this reg is a pointer to stack plus known immediate constant.
74  *
75  * Most of the time the registers have SCALAR_VALUE type, which
76  * means the register has some value, but it's not a valid pointer.
77  * (like pointer plus pointer becomes SCALAR_VALUE type)
78  *
79  * When verifier sees load or store instructions the type of base register
80  * can be: PTR_TO_MAP_VALUE, PTR_TO_CTX, PTR_TO_STACK. These are three pointer
81  * types recognized by check_mem_access() function.
82  *
83  * PTR_TO_MAP_VALUE means that this register is pointing to 'map element value'
84  * and the range of [ptr, ptr + map's value_size) is accessible.
85  *
86  * registers used to pass values to function calls are checked against
87  * function argument constraints.
88  *
89  * ARG_PTR_TO_MAP_KEY is one of such argument constraints.
90  * It means that the register type passed to this function must be
91  * PTR_TO_STACK and it will be used inside the function as
92  * 'pointer to map element key'
93  *
94  * For example the argument constraints for bpf_map_lookup_elem():
95  *   .ret_type = RET_PTR_TO_MAP_VALUE_OR_NULL,
96  *   .arg1_type = ARG_CONST_MAP_PTR,
97  *   .arg2_type = ARG_PTR_TO_MAP_KEY,
98  *
99  * ret_type says that this function returns 'pointer to map elem value or null'
100  * function expects 1st argument to be a const pointer to 'struct bpf_map' and
101  * 2nd argument should be a pointer to stack, which will be used inside
102  * the helper function as a pointer to map element key.
103  *
104  * On the kernel side the helper function looks like:
105  * u64 bpf_map_lookup_elem(u64 r1, u64 r2, u64 r3, u64 r4, u64 r5)
106  * {
107  *    struct bpf_map *map = (struct bpf_map *) (unsigned long) r1;
108  *    void *key = (void *) (unsigned long) r2;
109  *    void *value;
110  *
111  *    here kernel can access 'key' and 'map' pointers safely, knowing that
112  *    [key, key + map->key_size) bytes are valid and were initialized on
113  *    the stack of eBPF program.
114  * }
115  *
116  * Corresponding eBPF program may look like:
117  *    BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),  // after this insn R2 type is FRAME_PTR
118  *    BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -4), // after this insn R2 type is PTR_TO_STACK
119  *    BPF_LD_MAP_FD(BPF_REG_1, map_fd),      // after this insn R1 type is CONST_PTR_TO_MAP
120  *    BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_map_lookup_elem),
121  * here verifier looks at prototype of map_lookup_elem() and sees:
122  * .arg1_type == ARG_CONST_MAP_PTR and R1->type == CONST_PTR_TO_MAP, which is ok,
123  * Now verifier knows that this map has key of R1->map_ptr->key_size bytes
124  *
125  * Then .arg2_type == ARG_PTR_TO_MAP_KEY and R2->type == PTR_TO_STACK, ok so far,
126  * Now verifier checks that [R2, R2 + map's key_size) are within stack limits
127  * and were initialized prior to this call.
128  * If it's ok, then verifier allows this BPF_CALL insn and looks at
129  * .ret_type which is RET_PTR_TO_MAP_VALUE_OR_NULL, so it sets
130  * R0->type = PTR_TO_MAP_VALUE_OR_NULL which means bpf_map_lookup_elem() function
131  * returns ether pointer to map value or NULL.
132  *
133  * When type PTR_TO_MAP_VALUE_OR_NULL passes through 'if (reg != 0) goto +off'
134  * insn, the register holding that pointer in the true branch changes state to
135  * PTR_TO_MAP_VALUE and the same register changes state to CONST_IMM in the false
136  * branch. See check_cond_jmp_op().
137  *
138  * After the call R0 is set to return type of the function and registers R1-R5
139  * are set to NOT_INIT to indicate that they are no longer readable.
140  */
141 
142 /* verifier_state + insn_idx are pushed to stack when branch is encountered */
143 struct bpf_verifier_stack_elem {
144 	/* verifer state is 'st'
145 	 * before processing instruction 'insn_idx'
146 	 * and after processing instruction 'prev_insn_idx'
147 	 */
148 	struct bpf_verifier_state st;
149 	int insn_idx;
150 	int prev_insn_idx;
151 	struct bpf_verifier_stack_elem *next;
152 };
153 
154 #define BPF_COMPLEXITY_LIMIT_INSNS	131072
155 #define BPF_COMPLEXITY_LIMIT_STACK	1024
156 
157 #define BPF_MAP_PTR_POISON ((void *)0xeB9F + POISON_POINTER_DELTA)
158 
159 struct bpf_call_arg_meta {
160 	struct bpf_map *map_ptr;
161 	bool raw_mode;
162 	bool pkt_access;
163 	int regno;
164 	int access_size;
165 };
166 
167 static DEFINE_MUTEX(bpf_verifier_lock);
168 
169 /* log_level controls verbosity level of eBPF verifier.
170  * verbose() is used to dump the verification trace to the log, so the user
171  * can figure out what's wrong with the program
172  */
173 static __printf(2, 3) void verbose(struct bpf_verifier_env *env,
174 				   const char *fmt, ...)
175 {
176 	struct bpf_verifer_log *log = &env->log;
177 	unsigned int n;
178 	va_list args;
179 
180 	if (!log->level || !log->ubuf || bpf_verifier_log_full(log))
181 		return;
182 
183 	va_start(args, fmt);
184 	n = vscnprintf(log->kbuf, BPF_VERIFIER_TMP_LOG_SIZE, fmt, args);
185 	va_end(args);
186 
187 	WARN_ONCE(n >= BPF_VERIFIER_TMP_LOG_SIZE - 1,
188 		  "verifier log line truncated - local buffer too short\n");
189 
190 	n = min(log->len_total - log->len_used - 1, n);
191 	log->kbuf[n] = '\0';
192 
193 	if (!copy_to_user(log->ubuf + log->len_used, log->kbuf, n + 1))
194 		log->len_used += n;
195 	else
196 		log->ubuf = NULL;
197 }
198 
199 static bool type_is_pkt_pointer(enum bpf_reg_type type)
200 {
201 	return type == PTR_TO_PACKET ||
202 	       type == PTR_TO_PACKET_META;
203 }
204 
205 /* string representation of 'enum bpf_reg_type' */
206 static const char * const reg_type_str[] = {
207 	[NOT_INIT]		= "?",
208 	[SCALAR_VALUE]		= "inv",
209 	[PTR_TO_CTX]		= "ctx",
210 	[CONST_PTR_TO_MAP]	= "map_ptr",
211 	[PTR_TO_MAP_VALUE]	= "map_value",
212 	[PTR_TO_MAP_VALUE_OR_NULL] = "map_value_or_null",
213 	[PTR_TO_STACK]		= "fp",
214 	[PTR_TO_PACKET]		= "pkt",
215 	[PTR_TO_PACKET_META]	= "pkt_meta",
216 	[PTR_TO_PACKET_END]	= "pkt_end",
217 };
218 
219 static void print_verifier_state(struct bpf_verifier_env *env,
220 				 struct bpf_verifier_state *state)
221 {
222 	struct bpf_reg_state *reg;
223 	enum bpf_reg_type t;
224 	int i;
225 
226 	for (i = 0; i < MAX_BPF_REG; i++) {
227 		reg = &state->regs[i];
228 		t = reg->type;
229 		if (t == NOT_INIT)
230 			continue;
231 		verbose(env, " R%d=%s", i, reg_type_str[t]);
232 		if ((t == SCALAR_VALUE || t == PTR_TO_STACK) &&
233 		    tnum_is_const(reg->var_off)) {
234 			/* reg->off should be 0 for SCALAR_VALUE */
235 			verbose(env, "%lld", reg->var_off.value + reg->off);
236 		} else {
237 			verbose(env, "(id=%d", reg->id);
238 			if (t != SCALAR_VALUE)
239 				verbose(env, ",off=%d", reg->off);
240 			if (type_is_pkt_pointer(t))
241 				verbose(env, ",r=%d", reg->range);
242 			else if (t == CONST_PTR_TO_MAP ||
243 				 t == PTR_TO_MAP_VALUE ||
244 				 t == PTR_TO_MAP_VALUE_OR_NULL)
245 				verbose(env, ",ks=%d,vs=%d",
246 					reg->map_ptr->key_size,
247 					reg->map_ptr->value_size);
248 			if (tnum_is_const(reg->var_off)) {
249 				/* Typically an immediate SCALAR_VALUE, but
250 				 * could be a pointer whose offset is too big
251 				 * for reg->off
252 				 */
253 				verbose(env, ",imm=%llx", reg->var_off.value);
254 			} else {
255 				if (reg->smin_value != reg->umin_value &&
256 				    reg->smin_value != S64_MIN)
257 					verbose(env, ",smin_value=%lld",
258 						(long long)reg->smin_value);
259 				if (reg->smax_value != reg->umax_value &&
260 				    reg->smax_value != S64_MAX)
261 					verbose(env, ",smax_value=%lld",
262 						(long long)reg->smax_value);
263 				if (reg->umin_value != 0)
264 					verbose(env, ",umin_value=%llu",
265 						(unsigned long long)reg->umin_value);
266 				if (reg->umax_value != U64_MAX)
267 					verbose(env, ",umax_value=%llu",
268 						(unsigned long long)reg->umax_value);
269 				if (!tnum_is_unknown(reg->var_off)) {
270 					char tn_buf[48];
271 
272 					tnum_strn(tn_buf, sizeof(tn_buf), reg->var_off);
273 					verbose(env, ",var_off=%s", tn_buf);
274 				}
275 			}
276 			verbose(env, ")");
277 		}
278 	}
279 	for (i = 0; i < state->allocated_stack / BPF_REG_SIZE; i++) {
280 		if (state->stack[i].slot_type[0] == STACK_SPILL)
281 			verbose(env, " fp%d=%s",
282 				-MAX_BPF_STACK + i * BPF_REG_SIZE,
283 				reg_type_str[state->stack[i].spilled_ptr.type]);
284 	}
285 	verbose(env, "\n");
286 }
287 
288 static int copy_stack_state(struct bpf_verifier_state *dst,
289 			    const struct bpf_verifier_state *src)
290 {
291 	if (!src->stack)
292 		return 0;
293 	if (WARN_ON_ONCE(dst->allocated_stack < src->allocated_stack)) {
294 		/* internal bug, make state invalid to reject the program */
295 		memset(dst, 0, sizeof(*dst));
296 		return -EFAULT;
297 	}
298 	memcpy(dst->stack, src->stack,
299 	       sizeof(*src->stack) * (src->allocated_stack / BPF_REG_SIZE));
300 	return 0;
301 }
302 
303 /* do_check() starts with zero-sized stack in struct bpf_verifier_state to
304  * make it consume minimal amount of memory. check_stack_write() access from
305  * the program calls into realloc_verifier_state() to grow the stack size.
306  * Note there is a non-zero 'parent' pointer inside bpf_verifier_state
307  * which this function copies over. It points to previous bpf_verifier_state
308  * which is never reallocated
309  */
310 static int realloc_verifier_state(struct bpf_verifier_state *state, int size,
311 				  bool copy_old)
312 {
313 	u32 old_size = state->allocated_stack;
314 	struct bpf_stack_state *new_stack;
315 	int slot = size / BPF_REG_SIZE;
316 
317 	if (size <= old_size || !size) {
318 		if (copy_old)
319 			return 0;
320 		state->allocated_stack = slot * BPF_REG_SIZE;
321 		if (!size && old_size) {
322 			kfree(state->stack);
323 			state->stack = NULL;
324 		}
325 		return 0;
326 	}
327 	new_stack = kmalloc_array(slot, sizeof(struct bpf_stack_state),
328 				  GFP_KERNEL);
329 	if (!new_stack)
330 		return -ENOMEM;
331 	if (copy_old) {
332 		if (state->stack)
333 			memcpy(new_stack, state->stack,
334 			       sizeof(*new_stack) * (old_size / BPF_REG_SIZE));
335 		memset(new_stack + old_size / BPF_REG_SIZE, 0,
336 		       sizeof(*new_stack) * (size - old_size) / BPF_REG_SIZE);
337 	}
338 	state->allocated_stack = slot * BPF_REG_SIZE;
339 	kfree(state->stack);
340 	state->stack = new_stack;
341 	return 0;
342 }
343 
344 static void free_verifier_state(struct bpf_verifier_state *state,
345 				bool free_self)
346 {
347 	kfree(state->stack);
348 	if (free_self)
349 		kfree(state);
350 }
351 
352 /* copy verifier state from src to dst growing dst stack space
353  * when necessary to accommodate larger src stack
354  */
355 static int copy_verifier_state(struct bpf_verifier_state *dst,
356 			       const struct bpf_verifier_state *src)
357 {
358 	int err;
359 
360 	err = realloc_verifier_state(dst, src->allocated_stack, false);
361 	if (err)
362 		return err;
363 	memcpy(dst, src, offsetof(struct bpf_verifier_state, allocated_stack));
364 	return copy_stack_state(dst, src);
365 }
366 
367 static int pop_stack(struct bpf_verifier_env *env, int *prev_insn_idx,
368 		     int *insn_idx)
369 {
370 	struct bpf_verifier_state *cur = env->cur_state;
371 	struct bpf_verifier_stack_elem *elem, *head = env->head;
372 	int err;
373 
374 	if (env->head == NULL)
375 		return -ENOENT;
376 
377 	if (cur) {
378 		err = copy_verifier_state(cur, &head->st);
379 		if (err)
380 			return err;
381 	}
382 	if (insn_idx)
383 		*insn_idx = head->insn_idx;
384 	if (prev_insn_idx)
385 		*prev_insn_idx = head->prev_insn_idx;
386 	elem = head->next;
387 	free_verifier_state(&head->st, false);
388 	kfree(head);
389 	env->head = elem;
390 	env->stack_size--;
391 	return 0;
392 }
393 
394 static struct bpf_verifier_state *push_stack(struct bpf_verifier_env *env,
395 					     int insn_idx, int prev_insn_idx)
396 {
397 	struct bpf_verifier_state *cur = env->cur_state;
398 	struct bpf_verifier_stack_elem *elem;
399 	int err;
400 
401 	elem = kzalloc(sizeof(struct bpf_verifier_stack_elem), GFP_KERNEL);
402 	if (!elem)
403 		goto err;
404 
405 	elem->insn_idx = insn_idx;
406 	elem->prev_insn_idx = prev_insn_idx;
407 	elem->next = env->head;
408 	env->head = elem;
409 	env->stack_size++;
410 	err = copy_verifier_state(&elem->st, cur);
411 	if (err)
412 		goto err;
413 	if (env->stack_size > BPF_COMPLEXITY_LIMIT_STACK) {
414 		verbose(env, "BPF program is too complex\n");
415 		goto err;
416 	}
417 	return &elem->st;
418 err:
419 	/* pop all elements and return */
420 	while (!pop_stack(env, NULL, NULL));
421 	return NULL;
422 }
423 
424 #define CALLER_SAVED_REGS 6
425 static const int caller_saved[CALLER_SAVED_REGS] = {
426 	BPF_REG_0, BPF_REG_1, BPF_REG_2, BPF_REG_3, BPF_REG_4, BPF_REG_5
427 };
428 
429 static void __mark_reg_not_init(struct bpf_reg_state *reg);
430 
431 /* Mark the unknown part of a register (variable offset or scalar value) as
432  * known to have the value @imm.
433  */
434 static void __mark_reg_known(struct bpf_reg_state *reg, u64 imm)
435 {
436 	reg->id = 0;
437 	reg->var_off = tnum_const(imm);
438 	reg->smin_value = (s64)imm;
439 	reg->smax_value = (s64)imm;
440 	reg->umin_value = imm;
441 	reg->umax_value = imm;
442 }
443 
444 /* Mark the 'variable offset' part of a register as zero.  This should be
445  * used only on registers holding a pointer type.
446  */
447 static void __mark_reg_known_zero(struct bpf_reg_state *reg)
448 {
449 	__mark_reg_known(reg, 0);
450 }
451 
452 static void mark_reg_known_zero(struct bpf_verifier_env *env,
453 				struct bpf_reg_state *regs, u32 regno)
454 {
455 	if (WARN_ON(regno >= MAX_BPF_REG)) {
456 		verbose(env, "mark_reg_known_zero(regs, %u)\n", regno);
457 		/* Something bad happened, let's kill all regs */
458 		for (regno = 0; regno < MAX_BPF_REG; regno++)
459 			__mark_reg_not_init(regs + regno);
460 		return;
461 	}
462 	__mark_reg_known_zero(regs + regno);
463 }
464 
465 static bool reg_is_pkt_pointer(const struct bpf_reg_state *reg)
466 {
467 	return type_is_pkt_pointer(reg->type);
468 }
469 
470 static bool reg_is_pkt_pointer_any(const struct bpf_reg_state *reg)
471 {
472 	return reg_is_pkt_pointer(reg) ||
473 	       reg->type == PTR_TO_PACKET_END;
474 }
475 
476 /* Unmodified PTR_TO_PACKET[_META,_END] register from ctx access. */
477 static bool reg_is_init_pkt_pointer(const struct bpf_reg_state *reg,
478 				    enum bpf_reg_type which)
479 {
480 	/* The register can already have a range from prior markings.
481 	 * This is fine as long as it hasn't been advanced from its
482 	 * origin.
483 	 */
484 	return reg->type == which &&
485 	       reg->id == 0 &&
486 	       reg->off == 0 &&
487 	       tnum_equals_const(reg->var_off, 0);
488 }
489 
490 /* Attempts to improve min/max values based on var_off information */
491 static void __update_reg_bounds(struct bpf_reg_state *reg)
492 {
493 	/* min signed is max(sign bit) | min(other bits) */
494 	reg->smin_value = max_t(s64, reg->smin_value,
495 				reg->var_off.value | (reg->var_off.mask & S64_MIN));
496 	/* max signed is min(sign bit) | max(other bits) */
497 	reg->smax_value = min_t(s64, reg->smax_value,
498 				reg->var_off.value | (reg->var_off.mask & S64_MAX));
499 	reg->umin_value = max(reg->umin_value, reg->var_off.value);
500 	reg->umax_value = min(reg->umax_value,
501 			      reg->var_off.value | reg->var_off.mask);
502 }
503 
504 /* Uses signed min/max values to inform unsigned, and vice-versa */
505 static void __reg_deduce_bounds(struct bpf_reg_state *reg)
506 {
507 	/* Learn sign from signed bounds.
508 	 * If we cannot cross the sign boundary, then signed and unsigned bounds
509 	 * are the same, so combine.  This works even in the negative case, e.g.
510 	 * -3 s<= x s<= -1 implies 0xf...fd u<= x u<= 0xf...ff.
511 	 */
512 	if (reg->smin_value >= 0 || reg->smax_value < 0) {
513 		reg->smin_value = reg->umin_value = max_t(u64, reg->smin_value,
514 							  reg->umin_value);
515 		reg->smax_value = reg->umax_value = min_t(u64, reg->smax_value,
516 							  reg->umax_value);
517 		return;
518 	}
519 	/* Learn sign from unsigned bounds.  Signed bounds cross the sign
520 	 * boundary, so we must be careful.
521 	 */
522 	if ((s64)reg->umax_value >= 0) {
523 		/* Positive.  We can't learn anything from the smin, but smax
524 		 * is positive, hence safe.
525 		 */
526 		reg->smin_value = reg->umin_value;
527 		reg->smax_value = reg->umax_value = min_t(u64, reg->smax_value,
528 							  reg->umax_value);
529 	} else if ((s64)reg->umin_value < 0) {
530 		/* Negative.  We can't learn anything from the smax, but smin
531 		 * is negative, hence safe.
532 		 */
533 		reg->smin_value = reg->umin_value = max_t(u64, reg->smin_value,
534 							  reg->umin_value);
535 		reg->smax_value = reg->umax_value;
536 	}
537 }
538 
539 /* Attempts to improve var_off based on unsigned min/max information */
540 static void __reg_bound_offset(struct bpf_reg_state *reg)
541 {
542 	reg->var_off = tnum_intersect(reg->var_off,
543 				      tnum_range(reg->umin_value,
544 						 reg->umax_value));
545 }
546 
547 /* Reset the min/max bounds of a register */
548 static void __mark_reg_unbounded(struct bpf_reg_state *reg)
549 {
550 	reg->smin_value = S64_MIN;
551 	reg->smax_value = S64_MAX;
552 	reg->umin_value = 0;
553 	reg->umax_value = U64_MAX;
554 }
555 
556 /* Mark a register as having a completely unknown (scalar) value. */
557 static void __mark_reg_unknown(struct bpf_reg_state *reg)
558 {
559 	reg->type = SCALAR_VALUE;
560 	reg->id = 0;
561 	reg->off = 0;
562 	reg->var_off = tnum_unknown;
563 	__mark_reg_unbounded(reg);
564 }
565 
566 static void mark_reg_unknown(struct bpf_verifier_env *env,
567 			     struct bpf_reg_state *regs, u32 regno)
568 {
569 	if (WARN_ON(regno >= MAX_BPF_REG)) {
570 		verbose(env, "mark_reg_unknown(regs, %u)\n", regno);
571 		/* Something bad happened, let's kill all regs */
572 		for (regno = 0; regno < MAX_BPF_REG; regno++)
573 			__mark_reg_not_init(regs + regno);
574 		return;
575 	}
576 	__mark_reg_unknown(regs + regno);
577 }
578 
579 static void __mark_reg_not_init(struct bpf_reg_state *reg)
580 {
581 	__mark_reg_unknown(reg);
582 	reg->type = NOT_INIT;
583 }
584 
585 static void mark_reg_not_init(struct bpf_verifier_env *env,
586 			      struct bpf_reg_state *regs, u32 regno)
587 {
588 	if (WARN_ON(regno >= MAX_BPF_REG)) {
589 		verbose(env, "mark_reg_not_init(regs, %u)\n", regno);
590 		/* Something bad happened, let's kill all regs */
591 		for (regno = 0; regno < MAX_BPF_REG; regno++)
592 			__mark_reg_not_init(regs + regno);
593 		return;
594 	}
595 	__mark_reg_not_init(regs + regno);
596 }
597 
598 static void init_reg_state(struct bpf_verifier_env *env,
599 			   struct bpf_reg_state *regs)
600 {
601 	int i;
602 
603 	for (i = 0; i < MAX_BPF_REG; i++) {
604 		mark_reg_not_init(env, regs, i);
605 		regs[i].live = REG_LIVE_NONE;
606 	}
607 
608 	/* frame pointer */
609 	regs[BPF_REG_FP].type = PTR_TO_STACK;
610 	mark_reg_known_zero(env, regs, BPF_REG_FP);
611 
612 	/* 1st arg to a function */
613 	regs[BPF_REG_1].type = PTR_TO_CTX;
614 	mark_reg_known_zero(env, regs, BPF_REG_1);
615 }
616 
617 enum reg_arg_type {
618 	SRC_OP,		/* register is used as source operand */
619 	DST_OP,		/* register is used as destination operand */
620 	DST_OP_NO_MARK	/* same as above, check only, don't mark */
621 };
622 
623 static void mark_reg_read(const struct bpf_verifier_state *state, u32 regno)
624 {
625 	struct bpf_verifier_state *parent = state->parent;
626 
627 	if (regno == BPF_REG_FP)
628 		/* We don't need to worry about FP liveness because it's read-only */
629 		return;
630 
631 	while (parent) {
632 		/* if read wasn't screened by an earlier write ... */
633 		if (state->regs[regno].live & REG_LIVE_WRITTEN)
634 			break;
635 		/* ... then we depend on parent's value */
636 		parent->regs[regno].live |= REG_LIVE_READ;
637 		state = parent;
638 		parent = state->parent;
639 	}
640 }
641 
642 static int check_reg_arg(struct bpf_verifier_env *env, u32 regno,
643 			 enum reg_arg_type t)
644 {
645 	struct bpf_reg_state *regs = env->cur_state->regs;
646 
647 	if (regno >= MAX_BPF_REG) {
648 		verbose(env, "R%d is invalid\n", regno);
649 		return -EINVAL;
650 	}
651 
652 	if (t == SRC_OP) {
653 		/* check whether register used as source operand can be read */
654 		if (regs[regno].type == NOT_INIT) {
655 			verbose(env, "R%d !read_ok\n", regno);
656 			return -EACCES;
657 		}
658 		mark_reg_read(env->cur_state, regno);
659 	} else {
660 		/* check whether register used as dest operand can be written to */
661 		if (regno == BPF_REG_FP) {
662 			verbose(env, "frame pointer is read only\n");
663 			return -EACCES;
664 		}
665 		regs[regno].live |= REG_LIVE_WRITTEN;
666 		if (t == DST_OP)
667 			mark_reg_unknown(env, regs, regno);
668 	}
669 	return 0;
670 }
671 
672 static bool is_spillable_regtype(enum bpf_reg_type type)
673 {
674 	switch (type) {
675 	case PTR_TO_MAP_VALUE:
676 	case PTR_TO_MAP_VALUE_OR_NULL:
677 	case PTR_TO_STACK:
678 	case PTR_TO_CTX:
679 	case PTR_TO_PACKET:
680 	case PTR_TO_PACKET_META:
681 	case PTR_TO_PACKET_END:
682 	case CONST_PTR_TO_MAP:
683 		return true;
684 	default:
685 		return false;
686 	}
687 }
688 
689 /* check_stack_read/write functions track spill/fill of registers,
690  * stack boundary and alignment are checked in check_mem_access()
691  */
692 static int check_stack_write(struct bpf_verifier_env *env,
693 			     struct bpf_verifier_state *state, int off,
694 			     int size, int value_regno)
695 {
696 	int i, slot = -off - 1, spi = slot / BPF_REG_SIZE, err;
697 
698 	err = realloc_verifier_state(state, round_up(slot + 1, BPF_REG_SIZE),
699 				     true);
700 	if (err)
701 		return err;
702 	/* caller checked that off % size == 0 and -MAX_BPF_STACK <= off < 0,
703 	 * so it's aligned access and [off, off + size) are within stack limits
704 	 */
705 	if (!env->allow_ptr_leaks &&
706 	    state->stack[spi].slot_type[0] == STACK_SPILL &&
707 	    size != BPF_REG_SIZE) {
708 		verbose(env, "attempt to corrupt spilled pointer on stack\n");
709 		return -EACCES;
710 	}
711 
712 	if (value_regno >= 0 &&
713 	    is_spillable_regtype(state->regs[value_regno].type)) {
714 
715 		/* register containing pointer is being spilled into stack */
716 		if (size != BPF_REG_SIZE) {
717 			verbose(env, "invalid size of register spill\n");
718 			return -EACCES;
719 		}
720 
721 		/* save register state */
722 		state->stack[spi].spilled_ptr = state->regs[value_regno];
723 		state->stack[spi].spilled_ptr.live |= REG_LIVE_WRITTEN;
724 
725 		for (i = 0; i < BPF_REG_SIZE; i++)
726 			state->stack[spi].slot_type[i] = STACK_SPILL;
727 	} else {
728 		/* regular write of data into stack */
729 		state->stack[spi].spilled_ptr = (struct bpf_reg_state) {};
730 
731 		for (i = 0; i < size; i++)
732 			state->stack[spi].slot_type[(slot - i) % BPF_REG_SIZE] =
733 				STACK_MISC;
734 	}
735 	return 0;
736 }
737 
738 static void mark_stack_slot_read(const struct bpf_verifier_state *state, int slot)
739 {
740 	struct bpf_verifier_state *parent = state->parent;
741 
742 	while (parent) {
743 		/* if read wasn't screened by an earlier write ... */
744 		if (state->stack[slot].spilled_ptr.live & REG_LIVE_WRITTEN)
745 			break;
746 		/* ... then we depend on parent's value */
747 		parent->stack[slot].spilled_ptr.live |= REG_LIVE_READ;
748 		state = parent;
749 		parent = state->parent;
750 	}
751 }
752 
753 static int check_stack_read(struct bpf_verifier_env *env,
754 			    struct bpf_verifier_state *state, int off, int size,
755 			    int value_regno)
756 {
757 	int i, slot = -off - 1, spi = slot / BPF_REG_SIZE;
758 	u8 *stype;
759 
760 	if (state->allocated_stack <= slot) {
761 		verbose(env, "invalid read from stack off %d+0 size %d\n",
762 			off, size);
763 		return -EACCES;
764 	}
765 	stype = state->stack[spi].slot_type;
766 
767 	if (stype[0] == STACK_SPILL) {
768 		if (size != BPF_REG_SIZE) {
769 			verbose(env, "invalid size of register spill\n");
770 			return -EACCES;
771 		}
772 		for (i = 1; i < BPF_REG_SIZE; i++) {
773 			if (stype[(slot - i) % BPF_REG_SIZE] != STACK_SPILL) {
774 				verbose(env, "corrupted spill memory\n");
775 				return -EACCES;
776 			}
777 		}
778 
779 		if (value_regno >= 0) {
780 			/* restore register state from stack */
781 			state->regs[value_regno] = state->stack[spi].spilled_ptr;
782 			mark_stack_slot_read(state, spi);
783 		}
784 		return 0;
785 	} else {
786 		for (i = 0; i < size; i++) {
787 			if (stype[(slot - i) % BPF_REG_SIZE] != STACK_MISC) {
788 				verbose(env, "invalid read from stack off %d+%d size %d\n",
789 					off, i, size);
790 				return -EACCES;
791 			}
792 		}
793 		if (value_regno >= 0)
794 			/* have read misc data from the stack */
795 			mark_reg_unknown(env, state->regs, value_regno);
796 		return 0;
797 	}
798 }
799 
800 /* check read/write into map element returned by bpf_map_lookup_elem() */
801 static int __check_map_access(struct bpf_verifier_env *env, u32 regno, int off,
802 			    int size)
803 {
804 	struct bpf_reg_state *regs = cur_regs(env);
805 	struct bpf_map *map = regs[regno].map_ptr;
806 
807 	if (off < 0 || size <= 0 || off + size > map->value_size) {
808 		verbose(env, "invalid access to map value, value_size=%d off=%d size=%d\n",
809 			map->value_size, off, size);
810 		return -EACCES;
811 	}
812 	return 0;
813 }
814 
815 /* check read/write into a map element with possible variable offset */
816 static int check_map_access(struct bpf_verifier_env *env, u32 regno,
817 			    int off, int size)
818 {
819 	struct bpf_verifier_state *state = env->cur_state;
820 	struct bpf_reg_state *reg = &state->regs[regno];
821 	int err;
822 
823 	/* We may have adjusted the register to this map value, so we
824 	 * need to try adding each of min_value and max_value to off
825 	 * to make sure our theoretical access will be safe.
826 	 */
827 	if (env->log.level)
828 		print_verifier_state(env, state);
829 	/* The minimum value is only important with signed
830 	 * comparisons where we can't assume the floor of a
831 	 * value is 0.  If we are using signed variables for our
832 	 * index'es we need to make sure that whatever we use
833 	 * will have a set floor within our range.
834 	 */
835 	if (reg->smin_value < 0) {
836 		verbose(env, "R%d min value is negative, either use unsigned index or do a if (index >=0) check.\n",
837 			regno);
838 		return -EACCES;
839 	}
840 	err = __check_map_access(env, regno, reg->smin_value + off, size);
841 	if (err) {
842 		verbose(env, "R%d min value is outside of the array range\n",
843 			regno);
844 		return err;
845 	}
846 
847 	/* If we haven't set a max value then we need to bail since we can't be
848 	 * sure we won't do bad things.
849 	 * If reg->umax_value + off could overflow, treat that as unbounded too.
850 	 */
851 	if (reg->umax_value >= BPF_MAX_VAR_OFF) {
852 		verbose(env, "R%d unbounded memory access, make sure to bounds check any array access into a map\n",
853 			regno);
854 		return -EACCES;
855 	}
856 	err = __check_map_access(env, regno, reg->umax_value + off, size);
857 	if (err)
858 		verbose(env, "R%d max value is outside of the array range\n",
859 			regno);
860 	return err;
861 }
862 
863 #define MAX_PACKET_OFF 0xffff
864 
865 static bool may_access_direct_pkt_data(struct bpf_verifier_env *env,
866 				       const struct bpf_call_arg_meta *meta,
867 				       enum bpf_access_type t)
868 {
869 	switch (env->prog->type) {
870 	case BPF_PROG_TYPE_LWT_IN:
871 	case BPF_PROG_TYPE_LWT_OUT:
872 		/* dst_input() and dst_output() can't write for now */
873 		if (t == BPF_WRITE)
874 			return false;
875 		/* fallthrough */
876 	case BPF_PROG_TYPE_SCHED_CLS:
877 	case BPF_PROG_TYPE_SCHED_ACT:
878 	case BPF_PROG_TYPE_XDP:
879 	case BPF_PROG_TYPE_LWT_XMIT:
880 	case BPF_PROG_TYPE_SK_SKB:
881 		if (meta)
882 			return meta->pkt_access;
883 
884 		env->seen_direct_write = true;
885 		return true;
886 	default:
887 		return false;
888 	}
889 }
890 
891 static int __check_packet_access(struct bpf_verifier_env *env, u32 regno,
892 				 int off, int size)
893 {
894 	struct bpf_reg_state *regs = cur_regs(env);
895 	struct bpf_reg_state *reg = &regs[regno];
896 
897 	if (off < 0 || size <= 0 || (u64)off + size > reg->range) {
898 		verbose(env, "invalid access to packet, off=%d size=%d, R%d(id=%d,off=%d,r=%d)\n",
899 			off, size, regno, reg->id, reg->off, reg->range);
900 		return -EACCES;
901 	}
902 	return 0;
903 }
904 
905 static int check_packet_access(struct bpf_verifier_env *env, u32 regno, int off,
906 			       int size)
907 {
908 	struct bpf_reg_state *regs = cur_regs(env);
909 	struct bpf_reg_state *reg = &regs[regno];
910 	int err;
911 
912 	/* We may have added a variable offset to the packet pointer; but any
913 	 * reg->range we have comes after that.  We are only checking the fixed
914 	 * offset.
915 	 */
916 
917 	/* We don't allow negative numbers, because we aren't tracking enough
918 	 * detail to prove they're safe.
919 	 */
920 	if (reg->smin_value < 0) {
921 		verbose(env, "R%d min value is negative, either use unsigned index or do a if (index >=0) check.\n",
922 			regno);
923 		return -EACCES;
924 	}
925 	err = __check_packet_access(env, regno, off, size);
926 	if (err) {
927 		verbose(env, "R%d offset is outside of the packet\n", regno);
928 		return err;
929 	}
930 	return err;
931 }
932 
933 /* check access to 'struct bpf_context' fields.  Supports fixed offsets only */
934 static int check_ctx_access(struct bpf_verifier_env *env, int insn_idx, int off, int size,
935 			    enum bpf_access_type t, enum bpf_reg_type *reg_type)
936 {
937 	struct bpf_insn_access_aux info = {
938 		.reg_type = *reg_type,
939 	};
940 
941 	if (env->ops->is_valid_access &&
942 	    env->ops->is_valid_access(off, size, t, &info)) {
943 		/* A non zero info.ctx_field_size indicates that this field is a
944 		 * candidate for later verifier transformation to load the whole
945 		 * field and then apply a mask when accessed with a narrower
946 		 * access than actual ctx access size. A zero info.ctx_field_size
947 		 * will only allow for whole field access and rejects any other
948 		 * type of narrower access.
949 		 */
950 		*reg_type = info.reg_type;
951 
952 		if (env->analyzer_ops)
953 			return 0;
954 
955 		env->insn_aux_data[insn_idx].ctx_field_size = info.ctx_field_size;
956 		/* remember the offset of last byte accessed in ctx */
957 		if (env->prog->aux->max_ctx_offset < off + size)
958 			env->prog->aux->max_ctx_offset = off + size;
959 		return 0;
960 	}
961 
962 	verbose(env, "invalid bpf_context access off=%d size=%d\n", off, size);
963 	return -EACCES;
964 }
965 
966 static bool __is_pointer_value(bool allow_ptr_leaks,
967 			       const struct bpf_reg_state *reg)
968 {
969 	if (allow_ptr_leaks)
970 		return false;
971 
972 	return reg->type != SCALAR_VALUE;
973 }
974 
975 static bool is_pointer_value(struct bpf_verifier_env *env, int regno)
976 {
977 	return __is_pointer_value(env->allow_ptr_leaks, cur_regs(env) + regno);
978 }
979 
980 static int check_pkt_ptr_alignment(struct bpf_verifier_env *env,
981 				   const struct bpf_reg_state *reg,
982 				   int off, int size, bool strict)
983 {
984 	struct tnum reg_off;
985 	int ip_align;
986 
987 	/* Byte size accesses are always allowed. */
988 	if (!strict || size == 1)
989 		return 0;
990 
991 	/* For platforms that do not have a Kconfig enabling
992 	 * CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS the value of
993 	 * NET_IP_ALIGN is universally set to '2'.  And on platforms
994 	 * that do set CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS, we get
995 	 * to this code only in strict mode where we want to emulate
996 	 * the NET_IP_ALIGN==2 checking.  Therefore use an
997 	 * unconditional IP align value of '2'.
998 	 */
999 	ip_align = 2;
1000 
1001 	reg_off = tnum_add(reg->var_off, tnum_const(ip_align + reg->off + off));
1002 	if (!tnum_is_aligned(reg_off, size)) {
1003 		char tn_buf[48];
1004 
1005 		tnum_strn(tn_buf, sizeof(tn_buf), reg->var_off);
1006 		verbose(env,
1007 			"misaligned packet access off %d+%s+%d+%d size %d\n",
1008 			ip_align, tn_buf, reg->off, off, size);
1009 		return -EACCES;
1010 	}
1011 
1012 	return 0;
1013 }
1014 
1015 static int check_generic_ptr_alignment(struct bpf_verifier_env *env,
1016 				       const struct bpf_reg_state *reg,
1017 				       const char *pointer_desc,
1018 				       int off, int size, bool strict)
1019 {
1020 	struct tnum reg_off;
1021 
1022 	/* Byte size accesses are always allowed. */
1023 	if (!strict || size == 1)
1024 		return 0;
1025 
1026 	reg_off = tnum_add(reg->var_off, tnum_const(reg->off + off));
1027 	if (!tnum_is_aligned(reg_off, size)) {
1028 		char tn_buf[48];
1029 
1030 		tnum_strn(tn_buf, sizeof(tn_buf), reg->var_off);
1031 		verbose(env, "misaligned %saccess off %s+%d+%d size %d\n",
1032 			pointer_desc, tn_buf, reg->off, off, size);
1033 		return -EACCES;
1034 	}
1035 
1036 	return 0;
1037 }
1038 
1039 static int check_ptr_alignment(struct bpf_verifier_env *env,
1040 			       const struct bpf_reg_state *reg,
1041 			       int off, int size)
1042 {
1043 	bool strict = env->strict_alignment;
1044 	const char *pointer_desc = "";
1045 
1046 	switch (reg->type) {
1047 	case PTR_TO_PACKET:
1048 	case PTR_TO_PACKET_META:
1049 		/* Special case, because of NET_IP_ALIGN. Given metadata sits
1050 		 * right in front, treat it the very same way.
1051 		 */
1052 		return check_pkt_ptr_alignment(env, reg, off, size, strict);
1053 	case PTR_TO_MAP_VALUE:
1054 		pointer_desc = "value ";
1055 		break;
1056 	case PTR_TO_CTX:
1057 		pointer_desc = "context ";
1058 		break;
1059 	case PTR_TO_STACK:
1060 		pointer_desc = "stack ";
1061 		break;
1062 	default:
1063 		break;
1064 	}
1065 	return check_generic_ptr_alignment(env, reg, pointer_desc, off, size,
1066 					   strict);
1067 }
1068 
1069 /* check whether memory at (regno + off) is accessible for t = (read | write)
1070  * if t==write, value_regno is a register which value is stored into memory
1071  * if t==read, value_regno is a register which will receive the value from memory
1072  * if t==write && value_regno==-1, some unknown value is stored into memory
1073  * if t==read && value_regno==-1, don't care what we read from memory
1074  */
1075 static int check_mem_access(struct bpf_verifier_env *env, int insn_idx, u32 regno, int off,
1076 			    int bpf_size, enum bpf_access_type t,
1077 			    int value_regno)
1078 {
1079 	struct bpf_verifier_state *state = env->cur_state;
1080 	struct bpf_reg_state *regs = cur_regs(env);
1081 	struct bpf_reg_state *reg = regs + regno;
1082 	int size, err = 0;
1083 
1084 	size = bpf_size_to_bytes(bpf_size);
1085 	if (size < 0)
1086 		return size;
1087 
1088 	/* alignment checks will add in reg->off themselves */
1089 	err = check_ptr_alignment(env, reg, off, size);
1090 	if (err)
1091 		return err;
1092 
1093 	/* for access checks, reg->off is just part of off */
1094 	off += reg->off;
1095 
1096 	if (reg->type == PTR_TO_MAP_VALUE) {
1097 		if (t == BPF_WRITE && value_regno >= 0 &&
1098 		    is_pointer_value(env, value_regno)) {
1099 			verbose(env, "R%d leaks addr into map\n", value_regno);
1100 			return -EACCES;
1101 		}
1102 
1103 		err = check_map_access(env, regno, off, size);
1104 		if (!err && t == BPF_READ && value_regno >= 0)
1105 			mark_reg_unknown(env, regs, value_regno);
1106 
1107 	} else if (reg->type == PTR_TO_CTX) {
1108 		enum bpf_reg_type reg_type = SCALAR_VALUE;
1109 
1110 		if (t == BPF_WRITE && value_regno >= 0 &&
1111 		    is_pointer_value(env, value_regno)) {
1112 			verbose(env, "R%d leaks addr into ctx\n", value_regno);
1113 			return -EACCES;
1114 		}
1115 		/* ctx accesses must be at a fixed offset, so that we can
1116 		 * determine what type of data were returned.
1117 		 */
1118 		if (reg->off) {
1119 			verbose(env,
1120 				"dereference of modified ctx ptr R%d off=%d+%d, ctx+const is allowed, ctx+const+const is not\n",
1121 				regno, reg->off, off - reg->off);
1122 			return -EACCES;
1123 		}
1124 		if (!tnum_is_const(reg->var_off) || reg->var_off.value) {
1125 			char tn_buf[48];
1126 
1127 			tnum_strn(tn_buf, sizeof(tn_buf), reg->var_off);
1128 			verbose(env,
1129 				"variable ctx access var_off=%s off=%d size=%d",
1130 				tn_buf, off, size);
1131 			return -EACCES;
1132 		}
1133 		err = check_ctx_access(env, insn_idx, off, size, t, &reg_type);
1134 		if (!err && t == BPF_READ && value_regno >= 0) {
1135 			/* ctx access returns either a scalar, or a
1136 			 * PTR_TO_PACKET[_META,_END]. In the latter
1137 			 * case, we know the offset is zero.
1138 			 */
1139 			if (reg_type == SCALAR_VALUE)
1140 				mark_reg_unknown(env, regs, value_regno);
1141 			else
1142 				mark_reg_known_zero(env, regs,
1143 						    value_regno);
1144 			regs[value_regno].id = 0;
1145 			regs[value_regno].off = 0;
1146 			regs[value_regno].range = 0;
1147 			regs[value_regno].type = reg_type;
1148 		}
1149 
1150 	} else if (reg->type == PTR_TO_STACK) {
1151 		/* stack accesses must be at a fixed offset, so that we can
1152 		 * determine what type of data were returned.
1153 		 * See check_stack_read().
1154 		 */
1155 		if (!tnum_is_const(reg->var_off)) {
1156 			char tn_buf[48];
1157 
1158 			tnum_strn(tn_buf, sizeof(tn_buf), reg->var_off);
1159 			verbose(env, "variable stack access var_off=%s off=%d size=%d",
1160 				tn_buf, off, size);
1161 			return -EACCES;
1162 		}
1163 		off += reg->var_off.value;
1164 		if (off >= 0 || off < -MAX_BPF_STACK) {
1165 			verbose(env, "invalid stack off=%d size=%d\n", off,
1166 				size);
1167 			return -EACCES;
1168 		}
1169 
1170 		if (env->prog->aux->stack_depth < -off)
1171 			env->prog->aux->stack_depth = -off;
1172 
1173 		if (t == BPF_WRITE)
1174 			err = check_stack_write(env, state, off, size,
1175 						value_regno);
1176 		else
1177 			err = check_stack_read(env, state, off, size,
1178 					       value_regno);
1179 	} else if (reg_is_pkt_pointer(reg)) {
1180 		if (t == BPF_WRITE && !may_access_direct_pkt_data(env, NULL, t)) {
1181 			verbose(env, "cannot write into packet\n");
1182 			return -EACCES;
1183 		}
1184 		if (t == BPF_WRITE && value_regno >= 0 &&
1185 		    is_pointer_value(env, value_regno)) {
1186 			verbose(env, "R%d leaks addr into packet\n",
1187 				value_regno);
1188 			return -EACCES;
1189 		}
1190 		err = check_packet_access(env, regno, off, size);
1191 		if (!err && t == BPF_READ && value_regno >= 0)
1192 			mark_reg_unknown(env, regs, value_regno);
1193 	} else {
1194 		verbose(env, "R%d invalid mem access '%s'\n", regno,
1195 			reg_type_str[reg->type]);
1196 		return -EACCES;
1197 	}
1198 
1199 	if (!err && size < BPF_REG_SIZE && value_regno >= 0 && t == BPF_READ &&
1200 	    regs[value_regno].type == SCALAR_VALUE) {
1201 		/* b/h/w load zero-extends, mark upper bits as known 0 */
1202 		regs[value_regno].var_off =
1203 			tnum_cast(regs[value_regno].var_off, size);
1204 		__update_reg_bounds(&regs[value_regno]);
1205 	}
1206 	return err;
1207 }
1208 
1209 static int check_xadd(struct bpf_verifier_env *env, int insn_idx, struct bpf_insn *insn)
1210 {
1211 	int err;
1212 
1213 	if ((BPF_SIZE(insn->code) != BPF_W && BPF_SIZE(insn->code) != BPF_DW) ||
1214 	    insn->imm != 0) {
1215 		verbose(env, "BPF_XADD uses reserved fields\n");
1216 		return -EINVAL;
1217 	}
1218 
1219 	/* check src1 operand */
1220 	err = check_reg_arg(env, insn->src_reg, SRC_OP);
1221 	if (err)
1222 		return err;
1223 
1224 	/* check src2 operand */
1225 	err = check_reg_arg(env, insn->dst_reg, SRC_OP);
1226 	if (err)
1227 		return err;
1228 
1229 	if (is_pointer_value(env, insn->src_reg)) {
1230 		verbose(env, "R%d leaks addr into mem\n", insn->src_reg);
1231 		return -EACCES;
1232 	}
1233 
1234 	/* check whether atomic_add can read the memory */
1235 	err = check_mem_access(env, insn_idx, insn->dst_reg, insn->off,
1236 			       BPF_SIZE(insn->code), BPF_READ, -1);
1237 	if (err)
1238 		return err;
1239 
1240 	/* check whether atomic_add can write into the same memory */
1241 	return check_mem_access(env, insn_idx, insn->dst_reg, insn->off,
1242 				BPF_SIZE(insn->code), BPF_WRITE, -1);
1243 }
1244 
1245 /* Does this register contain a constant zero? */
1246 static bool register_is_null(struct bpf_reg_state reg)
1247 {
1248 	return reg.type == SCALAR_VALUE && tnum_equals_const(reg.var_off, 0);
1249 }
1250 
1251 /* when register 'regno' is passed into function that will read 'access_size'
1252  * bytes from that pointer, make sure that it's within stack boundary
1253  * and all elements of stack are initialized.
1254  * Unlike most pointer bounds-checking functions, this one doesn't take an
1255  * 'off' argument, so it has to add in reg->off itself.
1256  */
1257 static int check_stack_boundary(struct bpf_verifier_env *env, int regno,
1258 				int access_size, bool zero_size_allowed,
1259 				struct bpf_call_arg_meta *meta)
1260 {
1261 	struct bpf_verifier_state *state = env->cur_state;
1262 	struct bpf_reg_state *regs = state->regs;
1263 	int off, i, slot, spi;
1264 
1265 	if (regs[regno].type != PTR_TO_STACK) {
1266 		/* Allow zero-byte read from NULL, regardless of pointer type */
1267 		if (zero_size_allowed && access_size == 0 &&
1268 		    register_is_null(regs[regno]))
1269 			return 0;
1270 
1271 		verbose(env, "R%d type=%s expected=%s\n", regno,
1272 			reg_type_str[regs[regno].type],
1273 			reg_type_str[PTR_TO_STACK]);
1274 		return -EACCES;
1275 	}
1276 
1277 	/* Only allow fixed-offset stack reads */
1278 	if (!tnum_is_const(regs[regno].var_off)) {
1279 		char tn_buf[48];
1280 
1281 		tnum_strn(tn_buf, sizeof(tn_buf), regs[regno].var_off);
1282 		verbose(env, "invalid variable stack read R%d var_off=%s\n",
1283 			regno, tn_buf);
1284 	}
1285 	off = regs[regno].off + regs[regno].var_off.value;
1286 	if (off >= 0 || off < -MAX_BPF_STACK || off + access_size > 0 ||
1287 	    access_size <= 0) {
1288 		verbose(env, "invalid stack type R%d off=%d access_size=%d\n",
1289 			regno, off, access_size);
1290 		return -EACCES;
1291 	}
1292 
1293 	if (env->prog->aux->stack_depth < -off)
1294 		env->prog->aux->stack_depth = -off;
1295 
1296 	if (meta && meta->raw_mode) {
1297 		meta->access_size = access_size;
1298 		meta->regno = regno;
1299 		return 0;
1300 	}
1301 
1302 	for (i = 0; i < access_size; i++) {
1303 		slot = -(off + i) - 1;
1304 		spi = slot / BPF_REG_SIZE;
1305 		if (state->allocated_stack <= slot ||
1306 		    state->stack[spi].slot_type[slot % BPF_REG_SIZE] !=
1307 			STACK_MISC) {
1308 			verbose(env, "invalid indirect read from stack off %d+%d size %d\n",
1309 				off, i, access_size);
1310 			return -EACCES;
1311 		}
1312 	}
1313 	return 0;
1314 }
1315 
1316 static int check_helper_mem_access(struct bpf_verifier_env *env, int regno,
1317 				   int access_size, bool zero_size_allowed,
1318 				   struct bpf_call_arg_meta *meta)
1319 {
1320 	struct bpf_reg_state *regs = cur_regs(env), *reg = &regs[regno];
1321 
1322 	switch (reg->type) {
1323 	case PTR_TO_PACKET:
1324 	case PTR_TO_PACKET_META:
1325 		return check_packet_access(env, regno, reg->off, access_size);
1326 	case PTR_TO_MAP_VALUE:
1327 		return check_map_access(env, regno, reg->off, access_size);
1328 	default: /* scalar_value|ptr_to_stack or invalid ptr */
1329 		return check_stack_boundary(env, regno, access_size,
1330 					    zero_size_allowed, meta);
1331 	}
1332 }
1333 
1334 static int check_func_arg(struct bpf_verifier_env *env, u32 regno,
1335 			  enum bpf_arg_type arg_type,
1336 			  struct bpf_call_arg_meta *meta)
1337 {
1338 	struct bpf_reg_state *regs = cur_regs(env), *reg = &regs[regno];
1339 	enum bpf_reg_type expected_type, type = reg->type;
1340 	int err = 0;
1341 
1342 	if (arg_type == ARG_DONTCARE)
1343 		return 0;
1344 
1345 	err = check_reg_arg(env, regno, SRC_OP);
1346 	if (err)
1347 		return err;
1348 
1349 	if (arg_type == ARG_ANYTHING) {
1350 		if (is_pointer_value(env, regno)) {
1351 			verbose(env, "R%d leaks addr into helper function\n",
1352 				regno);
1353 			return -EACCES;
1354 		}
1355 		return 0;
1356 	}
1357 
1358 	if (type_is_pkt_pointer(type) &&
1359 	    !may_access_direct_pkt_data(env, meta, BPF_READ)) {
1360 		verbose(env, "helper access to the packet is not allowed\n");
1361 		return -EACCES;
1362 	}
1363 
1364 	if (arg_type == ARG_PTR_TO_MAP_KEY ||
1365 	    arg_type == ARG_PTR_TO_MAP_VALUE) {
1366 		expected_type = PTR_TO_STACK;
1367 		if (!type_is_pkt_pointer(type) &&
1368 		    type != expected_type)
1369 			goto err_type;
1370 	} else if (arg_type == ARG_CONST_SIZE ||
1371 		   arg_type == ARG_CONST_SIZE_OR_ZERO) {
1372 		expected_type = SCALAR_VALUE;
1373 		if (type != expected_type)
1374 			goto err_type;
1375 	} else if (arg_type == ARG_CONST_MAP_PTR) {
1376 		expected_type = CONST_PTR_TO_MAP;
1377 		if (type != expected_type)
1378 			goto err_type;
1379 	} else if (arg_type == ARG_PTR_TO_CTX) {
1380 		expected_type = PTR_TO_CTX;
1381 		if (type != expected_type)
1382 			goto err_type;
1383 	} else if (arg_type == ARG_PTR_TO_MEM ||
1384 		   arg_type == ARG_PTR_TO_UNINIT_MEM) {
1385 		expected_type = PTR_TO_STACK;
1386 		/* One exception here. In case function allows for NULL to be
1387 		 * passed in as argument, it's a SCALAR_VALUE type. Final test
1388 		 * happens during stack boundary checking.
1389 		 */
1390 		if (register_is_null(*reg))
1391 			/* final test in check_stack_boundary() */;
1392 		else if (!type_is_pkt_pointer(type) &&
1393 			 type != PTR_TO_MAP_VALUE &&
1394 			 type != expected_type)
1395 			goto err_type;
1396 		meta->raw_mode = arg_type == ARG_PTR_TO_UNINIT_MEM;
1397 	} else {
1398 		verbose(env, "unsupported arg_type %d\n", arg_type);
1399 		return -EFAULT;
1400 	}
1401 
1402 	if (arg_type == ARG_CONST_MAP_PTR) {
1403 		/* bpf_map_xxx(map_ptr) call: remember that map_ptr */
1404 		meta->map_ptr = reg->map_ptr;
1405 	} else if (arg_type == ARG_PTR_TO_MAP_KEY) {
1406 		/* bpf_map_xxx(..., map_ptr, ..., key) call:
1407 		 * check that [key, key + map->key_size) are within
1408 		 * stack limits and initialized
1409 		 */
1410 		if (!meta->map_ptr) {
1411 			/* in function declaration map_ptr must come before
1412 			 * map_key, so that it's verified and known before
1413 			 * we have to check map_key here. Otherwise it means
1414 			 * that kernel subsystem misconfigured verifier
1415 			 */
1416 			verbose(env, "invalid map_ptr to access map->key\n");
1417 			return -EACCES;
1418 		}
1419 		if (type_is_pkt_pointer(type))
1420 			err = check_packet_access(env, regno, reg->off,
1421 						  meta->map_ptr->key_size);
1422 		else
1423 			err = check_stack_boundary(env, regno,
1424 						   meta->map_ptr->key_size,
1425 						   false, NULL);
1426 	} else if (arg_type == ARG_PTR_TO_MAP_VALUE) {
1427 		/* bpf_map_xxx(..., map_ptr, ..., value) call:
1428 		 * check [value, value + map->value_size) validity
1429 		 */
1430 		if (!meta->map_ptr) {
1431 			/* kernel subsystem misconfigured verifier */
1432 			verbose(env, "invalid map_ptr to access map->value\n");
1433 			return -EACCES;
1434 		}
1435 		if (type_is_pkt_pointer(type))
1436 			err = check_packet_access(env, regno, reg->off,
1437 						  meta->map_ptr->value_size);
1438 		else
1439 			err = check_stack_boundary(env, regno,
1440 						   meta->map_ptr->value_size,
1441 						   false, NULL);
1442 	} else if (arg_type == ARG_CONST_SIZE ||
1443 		   arg_type == ARG_CONST_SIZE_OR_ZERO) {
1444 		bool zero_size_allowed = (arg_type == ARG_CONST_SIZE_OR_ZERO);
1445 
1446 		/* bpf_xxx(..., buf, len) call will access 'len' bytes
1447 		 * from stack pointer 'buf'. Check it
1448 		 * note: regno == len, regno - 1 == buf
1449 		 */
1450 		if (regno == 0) {
1451 			/* kernel subsystem misconfigured verifier */
1452 			verbose(env,
1453 				"ARG_CONST_SIZE cannot be first argument\n");
1454 			return -EACCES;
1455 		}
1456 
1457 		/* The register is SCALAR_VALUE; the access check
1458 		 * happens using its boundaries.
1459 		 */
1460 
1461 		if (!tnum_is_const(reg->var_off))
1462 			/* For unprivileged variable accesses, disable raw
1463 			 * mode so that the program is required to
1464 			 * initialize all the memory that the helper could
1465 			 * just partially fill up.
1466 			 */
1467 			meta = NULL;
1468 
1469 		if (reg->smin_value < 0) {
1470 			verbose(env, "R%d min value is negative, either use unsigned or 'var &= const'\n",
1471 				regno);
1472 			return -EACCES;
1473 		}
1474 
1475 		if (reg->umin_value == 0) {
1476 			err = check_helper_mem_access(env, regno - 1, 0,
1477 						      zero_size_allowed,
1478 						      meta);
1479 			if (err)
1480 				return err;
1481 		}
1482 
1483 		if (reg->umax_value >= BPF_MAX_VAR_SIZ) {
1484 			verbose(env, "R%d unbounded memory access, use 'var &= const' or 'if (var < const)'\n",
1485 				regno);
1486 			return -EACCES;
1487 		}
1488 		err = check_helper_mem_access(env, regno - 1,
1489 					      reg->umax_value,
1490 					      zero_size_allowed, meta);
1491 	}
1492 
1493 	return err;
1494 err_type:
1495 	verbose(env, "R%d type=%s expected=%s\n", regno,
1496 		reg_type_str[type], reg_type_str[expected_type]);
1497 	return -EACCES;
1498 }
1499 
1500 static int check_map_func_compatibility(struct bpf_verifier_env *env,
1501 					struct bpf_map *map, int func_id)
1502 {
1503 	if (!map)
1504 		return 0;
1505 
1506 	/* We need a two way check, first is from map perspective ... */
1507 	switch (map->map_type) {
1508 	case BPF_MAP_TYPE_PROG_ARRAY:
1509 		if (func_id != BPF_FUNC_tail_call)
1510 			goto error;
1511 		break;
1512 	case BPF_MAP_TYPE_PERF_EVENT_ARRAY:
1513 		if (func_id != BPF_FUNC_perf_event_read &&
1514 		    func_id != BPF_FUNC_perf_event_output &&
1515 		    func_id != BPF_FUNC_perf_event_read_value)
1516 			goto error;
1517 		break;
1518 	case BPF_MAP_TYPE_STACK_TRACE:
1519 		if (func_id != BPF_FUNC_get_stackid)
1520 			goto error;
1521 		break;
1522 	case BPF_MAP_TYPE_CGROUP_ARRAY:
1523 		if (func_id != BPF_FUNC_skb_under_cgroup &&
1524 		    func_id != BPF_FUNC_current_task_under_cgroup)
1525 			goto error;
1526 		break;
1527 	/* devmap returns a pointer to a live net_device ifindex that we cannot
1528 	 * allow to be modified from bpf side. So do not allow lookup elements
1529 	 * for now.
1530 	 */
1531 	case BPF_MAP_TYPE_DEVMAP:
1532 		if (func_id != BPF_FUNC_redirect_map)
1533 			goto error;
1534 		break;
1535 	/* Restrict bpf side of cpumap, open when use-cases appear */
1536 	case BPF_MAP_TYPE_CPUMAP:
1537 		if (func_id != BPF_FUNC_redirect_map)
1538 			goto error;
1539 		break;
1540 	case BPF_MAP_TYPE_ARRAY_OF_MAPS:
1541 	case BPF_MAP_TYPE_HASH_OF_MAPS:
1542 		if (func_id != BPF_FUNC_map_lookup_elem)
1543 			goto error;
1544 		break;
1545 	case BPF_MAP_TYPE_SOCKMAP:
1546 		if (func_id != BPF_FUNC_sk_redirect_map &&
1547 		    func_id != BPF_FUNC_sock_map_update &&
1548 		    func_id != BPF_FUNC_map_delete_elem)
1549 			goto error;
1550 		break;
1551 	default:
1552 		break;
1553 	}
1554 
1555 	/* ... and second from the function itself. */
1556 	switch (func_id) {
1557 	case BPF_FUNC_tail_call:
1558 		if (map->map_type != BPF_MAP_TYPE_PROG_ARRAY)
1559 			goto error;
1560 		break;
1561 	case BPF_FUNC_perf_event_read:
1562 	case BPF_FUNC_perf_event_output:
1563 	case BPF_FUNC_perf_event_read_value:
1564 		if (map->map_type != BPF_MAP_TYPE_PERF_EVENT_ARRAY)
1565 			goto error;
1566 		break;
1567 	case BPF_FUNC_get_stackid:
1568 		if (map->map_type != BPF_MAP_TYPE_STACK_TRACE)
1569 			goto error;
1570 		break;
1571 	case BPF_FUNC_current_task_under_cgroup:
1572 	case BPF_FUNC_skb_under_cgroup:
1573 		if (map->map_type != BPF_MAP_TYPE_CGROUP_ARRAY)
1574 			goto error;
1575 		break;
1576 	case BPF_FUNC_redirect_map:
1577 		if (map->map_type != BPF_MAP_TYPE_DEVMAP &&
1578 		    map->map_type != BPF_MAP_TYPE_CPUMAP)
1579 			goto error;
1580 		break;
1581 	case BPF_FUNC_sk_redirect_map:
1582 		if (map->map_type != BPF_MAP_TYPE_SOCKMAP)
1583 			goto error;
1584 		break;
1585 	case BPF_FUNC_sock_map_update:
1586 		if (map->map_type != BPF_MAP_TYPE_SOCKMAP)
1587 			goto error;
1588 		break;
1589 	default:
1590 		break;
1591 	}
1592 
1593 	return 0;
1594 error:
1595 	verbose(env, "cannot pass map_type %d into func %s#%d\n",
1596 		map->map_type, func_id_name(func_id), func_id);
1597 	return -EINVAL;
1598 }
1599 
1600 static int check_raw_mode(const struct bpf_func_proto *fn)
1601 {
1602 	int count = 0;
1603 
1604 	if (fn->arg1_type == ARG_PTR_TO_UNINIT_MEM)
1605 		count++;
1606 	if (fn->arg2_type == ARG_PTR_TO_UNINIT_MEM)
1607 		count++;
1608 	if (fn->arg3_type == ARG_PTR_TO_UNINIT_MEM)
1609 		count++;
1610 	if (fn->arg4_type == ARG_PTR_TO_UNINIT_MEM)
1611 		count++;
1612 	if (fn->arg5_type == ARG_PTR_TO_UNINIT_MEM)
1613 		count++;
1614 
1615 	return count > 1 ? -EINVAL : 0;
1616 }
1617 
1618 /* Packet data might have moved, any old PTR_TO_PACKET[_META,_END]
1619  * are now invalid, so turn them into unknown SCALAR_VALUE.
1620  */
1621 static void clear_all_pkt_pointers(struct bpf_verifier_env *env)
1622 {
1623 	struct bpf_verifier_state *state = env->cur_state;
1624 	struct bpf_reg_state *regs = state->regs, *reg;
1625 	int i;
1626 
1627 	for (i = 0; i < MAX_BPF_REG; i++)
1628 		if (reg_is_pkt_pointer_any(&regs[i]))
1629 			mark_reg_unknown(env, regs, i);
1630 
1631 	for (i = 0; i < state->allocated_stack / BPF_REG_SIZE; i++) {
1632 		if (state->stack[i].slot_type[0] != STACK_SPILL)
1633 			continue;
1634 		reg = &state->stack[i].spilled_ptr;
1635 		if (reg_is_pkt_pointer_any(reg))
1636 			__mark_reg_unknown(reg);
1637 	}
1638 }
1639 
1640 static int check_call(struct bpf_verifier_env *env, int func_id, int insn_idx)
1641 {
1642 	const struct bpf_func_proto *fn = NULL;
1643 	struct bpf_reg_state *regs;
1644 	struct bpf_call_arg_meta meta;
1645 	bool changes_data;
1646 	int i, err;
1647 
1648 	/* find function prototype */
1649 	if (func_id < 0 || func_id >= __BPF_FUNC_MAX_ID) {
1650 		verbose(env, "invalid func %s#%d\n", func_id_name(func_id),
1651 			func_id);
1652 		return -EINVAL;
1653 	}
1654 
1655 	if (env->ops->get_func_proto)
1656 		fn = env->ops->get_func_proto(func_id);
1657 
1658 	if (!fn) {
1659 		verbose(env, "unknown func %s#%d\n", func_id_name(func_id),
1660 			func_id);
1661 		return -EINVAL;
1662 	}
1663 
1664 	/* eBPF programs must be GPL compatible to use GPL-ed functions */
1665 	if (!env->prog->gpl_compatible && fn->gpl_only) {
1666 		verbose(env, "cannot call GPL only function from proprietary program\n");
1667 		return -EINVAL;
1668 	}
1669 
1670 	changes_data = bpf_helper_changes_pkt_data(fn->func);
1671 
1672 	memset(&meta, 0, sizeof(meta));
1673 	meta.pkt_access = fn->pkt_access;
1674 
1675 	/* We only support one arg being in raw mode at the moment, which
1676 	 * is sufficient for the helper functions we have right now.
1677 	 */
1678 	err = check_raw_mode(fn);
1679 	if (err) {
1680 		verbose(env, "kernel subsystem misconfigured func %s#%d\n",
1681 			func_id_name(func_id), func_id);
1682 		return err;
1683 	}
1684 
1685 	/* check args */
1686 	err = check_func_arg(env, BPF_REG_1, fn->arg1_type, &meta);
1687 	if (err)
1688 		return err;
1689 	err = check_func_arg(env, BPF_REG_2, fn->arg2_type, &meta);
1690 	if (err)
1691 		return err;
1692 	err = check_func_arg(env, BPF_REG_3, fn->arg3_type, &meta);
1693 	if (err)
1694 		return err;
1695 	err = check_func_arg(env, BPF_REG_4, fn->arg4_type, &meta);
1696 	if (err)
1697 		return err;
1698 	err = check_func_arg(env, BPF_REG_5, fn->arg5_type, &meta);
1699 	if (err)
1700 		return err;
1701 
1702 	/* Mark slots with STACK_MISC in case of raw mode, stack offset
1703 	 * is inferred from register state.
1704 	 */
1705 	for (i = 0; i < meta.access_size; i++) {
1706 		err = check_mem_access(env, insn_idx, meta.regno, i, BPF_B, BPF_WRITE, -1);
1707 		if (err)
1708 			return err;
1709 	}
1710 
1711 	regs = cur_regs(env);
1712 	/* reset caller saved regs */
1713 	for (i = 0; i < CALLER_SAVED_REGS; i++) {
1714 		mark_reg_not_init(env, regs, caller_saved[i]);
1715 		check_reg_arg(env, caller_saved[i], DST_OP_NO_MARK);
1716 	}
1717 
1718 	/* update return register (already marked as written above) */
1719 	if (fn->ret_type == RET_INTEGER) {
1720 		/* sets type to SCALAR_VALUE */
1721 		mark_reg_unknown(env, regs, BPF_REG_0);
1722 	} else if (fn->ret_type == RET_VOID) {
1723 		regs[BPF_REG_0].type = NOT_INIT;
1724 	} else if (fn->ret_type == RET_PTR_TO_MAP_VALUE_OR_NULL) {
1725 		struct bpf_insn_aux_data *insn_aux;
1726 
1727 		regs[BPF_REG_0].type = PTR_TO_MAP_VALUE_OR_NULL;
1728 		/* There is no offset yet applied, variable or fixed */
1729 		mark_reg_known_zero(env, regs, BPF_REG_0);
1730 		regs[BPF_REG_0].off = 0;
1731 		/* remember map_ptr, so that check_map_access()
1732 		 * can check 'value_size' boundary of memory access
1733 		 * to map element returned from bpf_map_lookup_elem()
1734 		 */
1735 		if (meta.map_ptr == NULL) {
1736 			verbose(env,
1737 				"kernel subsystem misconfigured verifier\n");
1738 			return -EINVAL;
1739 		}
1740 		regs[BPF_REG_0].map_ptr = meta.map_ptr;
1741 		regs[BPF_REG_0].id = ++env->id_gen;
1742 		insn_aux = &env->insn_aux_data[insn_idx];
1743 		if (!insn_aux->map_ptr)
1744 			insn_aux->map_ptr = meta.map_ptr;
1745 		else if (insn_aux->map_ptr != meta.map_ptr)
1746 			insn_aux->map_ptr = BPF_MAP_PTR_POISON;
1747 	} else {
1748 		verbose(env, "unknown return type %d of func %s#%d\n",
1749 			fn->ret_type, func_id_name(func_id), func_id);
1750 		return -EINVAL;
1751 	}
1752 
1753 	err = check_map_func_compatibility(env, meta.map_ptr, func_id);
1754 	if (err)
1755 		return err;
1756 
1757 	if (changes_data)
1758 		clear_all_pkt_pointers(env);
1759 	return 0;
1760 }
1761 
1762 static void coerce_reg_to_32(struct bpf_reg_state *reg)
1763 {
1764 	/* clear high 32 bits */
1765 	reg->var_off = tnum_cast(reg->var_off, 4);
1766 	/* Update bounds */
1767 	__update_reg_bounds(reg);
1768 }
1769 
1770 static bool signed_add_overflows(s64 a, s64 b)
1771 {
1772 	/* Do the add in u64, where overflow is well-defined */
1773 	s64 res = (s64)((u64)a + (u64)b);
1774 
1775 	if (b < 0)
1776 		return res > a;
1777 	return res < a;
1778 }
1779 
1780 static bool signed_sub_overflows(s64 a, s64 b)
1781 {
1782 	/* Do the sub in u64, where overflow is well-defined */
1783 	s64 res = (s64)((u64)a - (u64)b);
1784 
1785 	if (b < 0)
1786 		return res < a;
1787 	return res > a;
1788 }
1789 
1790 /* Handles arithmetic on a pointer and a scalar: computes new min/max and var_off.
1791  * Caller should also handle BPF_MOV case separately.
1792  * If we return -EACCES, caller may want to try again treating pointer as a
1793  * scalar.  So we only emit a diagnostic if !env->allow_ptr_leaks.
1794  */
1795 static int adjust_ptr_min_max_vals(struct bpf_verifier_env *env,
1796 				   struct bpf_insn *insn,
1797 				   const struct bpf_reg_state *ptr_reg,
1798 				   const struct bpf_reg_state *off_reg)
1799 {
1800 	struct bpf_reg_state *regs = cur_regs(env), *dst_reg;
1801 	bool known = tnum_is_const(off_reg->var_off);
1802 	s64 smin_val = off_reg->smin_value, smax_val = off_reg->smax_value,
1803 	    smin_ptr = ptr_reg->smin_value, smax_ptr = ptr_reg->smax_value;
1804 	u64 umin_val = off_reg->umin_value, umax_val = off_reg->umax_value,
1805 	    umin_ptr = ptr_reg->umin_value, umax_ptr = ptr_reg->umax_value;
1806 	u8 opcode = BPF_OP(insn->code);
1807 	u32 dst = insn->dst_reg;
1808 
1809 	dst_reg = &regs[dst];
1810 
1811 	if (WARN_ON_ONCE(known && (smin_val != smax_val))) {
1812 		print_verifier_state(env, env->cur_state);
1813 		verbose(env,
1814 			"verifier internal error: known but bad sbounds\n");
1815 		return -EINVAL;
1816 	}
1817 	if (WARN_ON_ONCE(known && (umin_val != umax_val))) {
1818 		print_verifier_state(env, env->cur_state);
1819 		verbose(env,
1820 			"verifier internal error: known but bad ubounds\n");
1821 		return -EINVAL;
1822 	}
1823 
1824 	if (BPF_CLASS(insn->code) != BPF_ALU64) {
1825 		/* 32-bit ALU ops on pointers produce (meaningless) scalars */
1826 		if (!env->allow_ptr_leaks)
1827 			verbose(env,
1828 				"R%d 32-bit pointer arithmetic prohibited\n",
1829 				dst);
1830 		return -EACCES;
1831 	}
1832 
1833 	if (ptr_reg->type == PTR_TO_MAP_VALUE_OR_NULL) {
1834 		if (!env->allow_ptr_leaks)
1835 			verbose(env, "R%d pointer arithmetic on PTR_TO_MAP_VALUE_OR_NULL prohibited, null-check it first\n",
1836 				dst);
1837 		return -EACCES;
1838 	}
1839 	if (ptr_reg->type == CONST_PTR_TO_MAP) {
1840 		if (!env->allow_ptr_leaks)
1841 			verbose(env, "R%d pointer arithmetic on CONST_PTR_TO_MAP prohibited\n",
1842 				dst);
1843 		return -EACCES;
1844 	}
1845 	if (ptr_reg->type == PTR_TO_PACKET_END) {
1846 		if (!env->allow_ptr_leaks)
1847 			verbose(env, "R%d pointer arithmetic on PTR_TO_PACKET_END prohibited\n",
1848 				dst);
1849 		return -EACCES;
1850 	}
1851 
1852 	/* In case of 'scalar += pointer', dst_reg inherits pointer type and id.
1853 	 * The id may be overwritten later if we create a new variable offset.
1854 	 */
1855 	dst_reg->type = ptr_reg->type;
1856 	dst_reg->id = ptr_reg->id;
1857 
1858 	switch (opcode) {
1859 	case BPF_ADD:
1860 		/* We can take a fixed offset as long as it doesn't overflow
1861 		 * the s32 'off' field
1862 		 */
1863 		if (known && (ptr_reg->off + smin_val ==
1864 			      (s64)(s32)(ptr_reg->off + smin_val))) {
1865 			/* pointer += K.  Accumulate it into fixed offset */
1866 			dst_reg->smin_value = smin_ptr;
1867 			dst_reg->smax_value = smax_ptr;
1868 			dst_reg->umin_value = umin_ptr;
1869 			dst_reg->umax_value = umax_ptr;
1870 			dst_reg->var_off = ptr_reg->var_off;
1871 			dst_reg->off = ptr_reg->off + smin_val;
1872 			dst_reg->range = ptr_reg->range;
1873 			break;
1874 		}
1875 		/* A new variable offset is created.  Note that off_reg->off
1876 		 * == 0, since it's a scalar.
1877 		 * dst_reg gets the pointer type and since some positive
1878 		 * integer value was added to the pointer, give it a new 'id'
1879 		 * if it's a PTR_TO_PACKET.
1880 		 * this creates a new 'base' pointer, off_reg (variable) gets
1881 		 * added into the variable offset, and we copy the fixed offset
1882 		 * from ptr_reg.
1883 		 */
1884 		if (signed_add_overflows(smin_ptr, smin_val) ||
1885 		    signed_add_overflows(smax_ptr, smax_val)) {
1886 			dst_reg->smin_value = S64_MIN;
1887 			dst_reg->smax_value = S64_MAX;
1888 		} else {
1889 			dst_reg->smin_value = smin_ptr + smin_val;
1890 			dst_reg->smax_value = smax_ptr + smax_val;
1891 		}
1892 		if (umin_ptr + umin_val < umin_ptr ||
1893 		    umax_ptr + umax_val < umax_ptr) {
1894 			dst_reg->umin_value = 0;
1895 			dst_reg->umax_value = U64_MAX;
1896 		} else {
1897 			dst_reg->umin_value = umin_ptr + umin_val;
1898 			dst_reg->umax_value = umax_ptr + umax_val;
1899 		}
1900 		dst_reg->var_off = tnum_add(ptr_reg->var_off, off_reg->var_off);
1901 		dst_reg->off = ptr_reg->off;
1902 		if (reg_is_pkt_pointer(ptr_reg)) {
1903 			dst_reg->id = ++env->id_gen;
1904 			/* something was added to pkt_ptr, set range to zero */
1905 			dst_reg->range = 0;
1906 		}
1907 		break;
1908 	case BPF_SUB:
1909 		if (dst_reg == off_reg) {
1910 			/* scalar -= pointer.  Creates an unknown scalar */
1911 			if (!env->allow_ptr_leaks)
1912 				verbose(env, "R%d tried to subtract pointer from scalar\n",
1913 					dst);
1914 			return -EACCES;
1915 		}
1916 		/* We don't allow subtraction from FP, because (according to
1917 		 * test_verifier.c test "invalid fp arithmetic", JITs might not
1918 		 * be able to deal with it.
1919 		 */
1920 		if (ptr_reg->type == PTR_TO_STACK) {
1921 			if (!env->allow_ptr_leaks)
1922 				verbose(env, "R%d subtraction from stack pointer prohibited\n",
1923 					dst);
1924 			return -EACCES;
1925 		}
1926 		if (known && (ptr_reg->off - smin_val ==
1927 			      (s64)(s32)(ptr_reg->off - smin_val))) {
1928 			/* pointer -= K.  Subtract it from fixed offset */
1929 			dst_reg->smin_value = smin_ptr;
1930 			dst_reg->smax_value = smax_ptr;
1931 			dst_reg->umin_value = umin_ptr;
1932 			dst_reg->umax_value = umax_ptr;
1933 			dst_reg->var_off = ptr_reg->var_off;
1934 			dst_reg->id = ptr_reg->id;
1935 			dst_reg->off = ptr_reg->off - smin_val;
1936 			dst_reg->range = ptr_reg->range;
1937 			break;
1938 		}
1939 		/* A new variable offset is created.  If the subtrahend is known
1940 		 * nonnegative, then any reg->range we had before is still good.
1941 		 */
1942 		if (signed_sub_overflows(smin_ptr, smax_val) ||
1943 		    signed_sub_overflows(smax_ptr, smin_val)) {
1944 			/* Overflow possible, we know nothing */
1945 			dst_reg->smin_value = S64_MIN;
1946 			dst_reg->smax_value = S64_MAX;
1947 		} else {
1948 			dst_reg->smin_value = smin_ptr - smax_val;
1949 			dst_reg->smax_value = smax_ptr - smin_val;
1950 		}
1951 		if (umin_ptr < umax_val) {
1952 			/* Overflow possible, we know nothing */
1953 			dst_reg->umin_value = 0;
1954 			dst_reg->umax_value = U64_MAX;
1955 		} else {
1956 			/* Cannot overflow (as long as bounds are consistent) */
1957 			dst_reg->umin_value = umin_ptr - umax_val;
1958 			dst_reg->umax_value = umax_ptr - umin_val;
1959 		}
1960 		dst_reg->var_off = tnum_sub(ptr_reg->var_off, off_reg->var_off);
1961 		dst_reg->off = ptr_reg->off;
1962 		if (reg_is_pkt_pointer(ptr_reg)) {
1963 			dst_reg->id = ++env->id_gen;
1964 			/* something was added to pkt_ptr, set range to zero */
1965 			if (smin_val < 0)
1966 				dst_reg->range = 0;
1967 		}
1968 		break;
1969 	case BPF_AND:
1970 	case BPF_OR:
1971 	case BPF_XOR:
1972 		/* bitwise ops on pointers are troublesome, prohibit for now.
1973 		 * (However, in principle we could allow some cases, e.g.
1974 		 * ptr &= ~3 which would reduce min_value by 3.)
1975 		 */
1976 		if (!env->allow_ptr_leaks)
1977 			verbose(env, "R%d bitwise operator %s on pointer prohibited\n",
1978 				dst, bpf_alu_string[opcode >> 4]);
1979 		return -EACCES;
1980 	default:
1981 		/* other operators (e.g. MUL,LSH) produce non-pointer results */
1982 		if (!env->allow_ptr_leaks)
1983 			verbose(env, "R%d pointer arithmetic with %s operator prohibited\n",
1984 				dst, bpf_alu_string[opcode >> 4]);
1985 		return -EACCES;
1986 	}
1987 
1988 	__update_reg_bounds(dst_reg);
1989 	__reg_deduce_bounds(dst_reg);
1990 	__reg_bound_offset(dst_reg);
1991 	return 0;
1992 }
1993 
1994 static int adjust_scalar_min_max_vals(struct bpf_verifier_env *env,
1995 				      struct bpf_insn *insn,
1996 				      struct bpf_reg_state *dst_reg,
1997 				      struct bpf_reg_state src_reg)
1998 {
1999 	struct bpf_reg_state *regs = cur_regs(env);
2000 	u8 opcode = BPF_OP(insn->code);
2001 	bool src_known, dst_known;
2002 	s64 smin_val, smax_val;
2003 	u64 umin_val, umax_val;
2004 
2005 	if (BPF_CLASS(insn->code) != BPF_ALU64) {
2006 		/* 32-bit ALU ops are (32,32)->64 */
2007 		coerce_reg_to_32(dst_reg);
2008 		coerce_reg_to_32(&src_reg);
2009 	}
2010 	smin_val = src_reg.smin_value;
2011 	smax_val = src_reg.smax_value;
2012 	umin_val = src_reg.umin_value;
2013 	umax_val = src_reg.umax_value;
2014 	src_known = tnum_is_const(src_reg.var_off);
2015 	dst_known = tnum_is_const(dst_reg->var_off);
2016 
2017 	switch (opcode) {
2018 	case BPF_ADD:
2019 		if (signed_add_overflows(dst_reg->smin_value, smin_val) ||
2020 		    signed_add_overflows(dst_reg->smax_value, smax_val)) {
2021 			dst_reg->smin_value = S64_MIN;
2022 			dst_reg->smax_value = S64_MAX;
2023 		} else {
2024 			dst_reg->smin_value += smin_val;
2025 			dst_reg->smax_value += smax_val;
2026 		}
2027 		if (dst_reg->umin_value + umin_val < umin_val ||
2028 		    dst_reg->umax_value + umax_val < umax_val) {
2029 			dst_reg->umin_value = 0;
2030 			dst_reg->umax_value = U64_MAX;
2031 		} else {
2032 			dst_reg->umin_value += umin_val;
2033 			dst_reg->umax_value += umax_val;
2034 		}
2035 		dst_reg->var_off = tnum_add(dst_reg->var_off, src_reg.var_off);
2036 		break;
2037 	case BPF_SUB:
2038 		if (signed_sub_overflows(dst_reg->smin_value, smax_val) ||
2039 		    signed_sub_overflows(dst_reg->smax_value, smin_val)) {
2040 			/* Overflow possible, we know nothing */
2041 			dst_reg->smin_value = S64_MIN;
2042 			dst_reg->smax_value = S64_MAX;
2043 		} else {
2044 			dst_reg->smin_value -= smax_val;
2045 			dst_reg->smax_value -= smin_val;
2046 		}
2047 		if (dst_reg->umin_value < umax_val) {
2048 			/* Overflow possible, we know nothing */
2049 			dst_reg->umin_value = 0;
2050 			dst_reg->umax_value = U64_MAX;
2051 		} else {
2052 			/* Cannot overflow (as long as bounds are consistent) */
2053 			dst_reg->umin_value -= umax_val;
2054 			dst_reg->umax_value -= umin_val;
2055 		}
2056 		dst_reg->var_off = tnum_sub(dst_reg->var_off, src_reg.var_off);
2057 		break;
2058 	case BPF_MUL:
2059 		dst_reg->var_off = tnum_mul(dst_reg->var_off, src_reg.var_off);
2060 		if (smin_val < 0 || dst_reg->smin_value < 0) {
2061 			/* Ain't nobody got time to multiply that sign */
2062 			__mark_reg_unbounded(dst_reg);
2063 			__update_reg_bounds(dst_reg);
2064 			break;
2065 		}
2066 		/* Both values are positive, so we can work with unsigned and
2067 		 * copy the result to signed (unless it exceeds S64_MAX).
2068 		 */
2069 		if (umax_val > U32_MAX || dst_reg->umax_value > U32_MAX) {
2070 			/* Potential overflow, we know nothing */
2071 			__mark_reg_unbounded(dst_reg);
2072 			/* (except what we can learn from the var_off) */
2073 			__update_reg_bounds(dst_reg);
2074 			break;
2075 		}
2076 		dst_reg->umin_value *= umin_val;
2077 		dst_reg->umax_value *= umax_val;
2078 		if (dst_reg->umax_value > S64_MAX) {
2079 			/* Overflow possible, we know nothing */
2080 			dst_reg->smin_value = S64_MIN;
2081 			dst_reg->smax_value = S64_MAX;
2082 		} else {
2083 			dst_reg->smin_value = dst_reg->umin_value;
2084 			dst_reg->smax_value = dst_reg->umax_value;
2085 		}
2086 		break;
2087 	case BPF_AND:
2088 		if (src_known && dst_known) {
2089 			__mark_reg_known(dst_reg, dst_reg->var_off.value &
2090 						  src_reg.var_off.value);
2091 			break;
2092 		}
2093 		/* We get our minimum from the var_off, since that's inherently
2094 		 * bitwise.  Our maximum is the minimum of the operands' maxima.
2095 		 */
2096 		dst_reg->var_off = tnum_and(dst_reg->var_off, src_reg.var_off);
2097 		dst_reg->umin_value = dst_reg->var_off.value;
2098 		dst_reg->umax_value = min(dst_reg->umax_value, umax_val);
2099 		if (dst_reg->smin_value < 0 || smin_val < 0) {
2100 			/* Lose signed bounds when ANDing negative numbers,
2101 			 * ain't nobody got time for that.
2102 			 */
2103 			dst_reg->smin_value = S64_MIN;
2104 			dst_reg->smax_value = S64_MAX;
2105 		} else {
2106 			/* ANDing two positives gives a positive, so safe to
2107 			 * cast result into s64.
2108 			 */
2109 			dst_reg->smin_value = dst_reg->umin_value;
2110 			dst_reg->smax_value = dst_reg->umax_value;
2111 		}
2112 		/* We may learn something more from the var_off */
2113 		__update_reg_bounds(dst_reg);
2114 		break;
2115 	case BPF_OR:
2116 		if (src_known && dst_known) {
2117 			__mark_reg_known(dst_reg, dst_reg->var_off.value |
2118 						  src_reg.var_off.value);
2119 			break;
2120 		}
2121 		/* We get our maximum from the var_off, and our minimum is the
2122 		 * maximum of the operands' minima
2123 		 */
2124 		dst_reg->var_off = tnum_or(dst_reg->var_off, src_reg.var_off);
2125 		dst_reg->umin_value = max(dst_reg->umin_value, umin_val);
2126 		dst_reg->umax_value = dst_reg->var_off.value |
2127 				      dst_reg->var_off.mask;
2128 		if (dst_reg->smin_value < 0 || smin_val < 0) {
2129 			/* Lose signed bounds when ORing negative numbers,
2130 			 * ain't nobody got time for that.
2131 			 */
2132 			dst_reg->smin_value = S64_MIN;
2133 			dst_reg->smax_value = S64_MAX;
2134 		} else {
2135 			/* ORing two positives gives a positive, so safe to
2136 			 * cast result into s64.
2137 			 */
2138 			dst_reg->smin_value = dst_reg->umin_value;
2139 			dst_reg->smax_value = dst_reg->umax_value;
2140 		}
2141 		/* We may learn something more from the var_off */
2142 		__update_reg_bounds(dst_reg);
2143 		break;
2144 	case BPF_LSH:
2145 		if (umax_val > 63) {
2146 			/* Shifts greater than 63 are undefined.  This includes
2147 			 * shifts by a negative number.
2148 			 */
2149 			mark_reg_unknown(env, regs, insn->dst_reg);
2150 			break;
2151 		}
2152 		/* We lose all sign bit information (except what we can pick
2153 		 * up from var_off)
2154 		 */
2155 		dst_reg->smin_value = S64_MIN;
2156 		dst_reg->smax_value = S64_MAX;
2157 		/* If we might shift our top bit out, then we know nothing */
2158 		if (dst_reg->umax_value > 1ULL << (63 - umax_val)) {
2159 			dst_reg->umin_value = 0;
2160 			dst_reg->umax_value = U64_MAX;
2161 		} else {
2162 			dst_reg->umin_value <<= umin_val;
2163 			dst_reg->umax_value <<= umax_val;
2164 		}
2165 		if (src_known)
2166 			dst_reg->var_off = tnum_lshift(dst_reg->var_off, umin_val);
2167 		else
2168 			dst_reg->var_off = tnum_lshift(tnum_unknown, umin_val);
2169 		/* We may learn something more from the var_off */
2170 		__update_reg_bounds(dst_reg);
2171 		break;
2172 	case BPF_RSH:
2173 		if (umax_val > 63) {
2174 			/* Shifts greater than 63 are undefined.  This includes
2175 			 * shifts by a negative number.
2176 			 */
2177 			mark_reg_unknown(env, regs, insn->dst_reg);
2178 			break;
2179 		}
2180 		/* BPF_RSH is an unsigned shift, so make the appropriate casts */
2181 		if (dst_reg->smin_value < 0) {
2182 			if (umin_val) {
2183 				/* Sign bit will be cleared */
2184 				dst_reg->smin_value = 0;
2185 			} else {
2186 				/* Lost sign bit information */
2187 				dst_reg->smin_value = S64_MIN;
2188 				dst_reg->smax_value = S64_MAX;
2189 			}
2190 		} else {
2191 			dst_reg->smin_value =
2192 				(u64)(dst_reg->smin_value) >> umax_val;
2193 		}
2194 		if (src_known)
2195 			dst_reg->var_off = tnum_rshift(dst_reg->var_off,
2196 						       umin_val);
2197 		else
2198 			dst_reg->var_off = tnum_rshift(tnum_unknown, umin_val);
2199 		dst_reg->umin_value >>= umax_val;
2200 		dst_reg->umax_value >>= umin_val;
2201 		/* We may learn something more from the var_off */
2202 		__update_reg_bounds(dst_reg);
2203 		break;
2204 	default:
2205 		mark_reg_unknown(env, regs, insn->dst_reg);
2206 		break;
2207 	}
2208 
2209 	__reg_deduce_bounds(dst_reg);
2210 	__reg_bound_offset(dst_reg);
2211 	return 0;
2212 }
2213 
2214 /* Handles ALU ops other than BPF_END, BPF_NEG and BPF_MOV: computes new min/max
2215  * and var_off.
2216  */
2217 static int adjust_reg_min_max_vals(struct bpf_verifier_env *env,
2218 				   struct bpf_insn *insn)
2219 {
2220 	struct bpf_reg_state *regs = cur_regs(env), *dst_reg, *src_reg;
2221 	struct bpf_reg_state *ptr_reg = NULL, off_reg = {0};
2222 	u8 opcode = BPF_OP(insn->code);
2223 	int rc;
2224 
2225 	dst_reg = &regs[insn->dst_reg];
2226 	src_reg = NULL;
2227 	if (dst_reg->type != SCALAR_VALUE)
2228 		ptr_reg = dst_reg;
2229 	if (BPF_SRC(insn->code) == BPF_X) {
2230 		src_reg = &regs[insn->src_reg];
2231 		if (src_reg->type != SCALAR_VALUE) {
2232 			if (dst_reg->type != SCALAR_VALUE) {
2233 				/* Combining two pointers by any ALU op yields
2234 				 * an arbitrary scalar.
2235 				 */
2236 				if (!env->allow_ptr_leaks) {
2237 					verbose(env, "R%d pointer %s pointer prohibited\n",
2238 						insn->dst_reg,
2239 						bpf_alu_string[opcode >> 4]);
2240 					return -EACCES;
2241 				}
2242 				mark_reg_unknown(env, regs, insn->dst_reg);
2243 				return 0;
2244 			} else {
2245 				/* scalar += pointer
2246 				 * This is legal, but we have to reverse our
2247 				 * src/dest handling in computing the range
2248 				 */
2249 				rc = adjust_ptr_min_max_vals(env, insn,
2250 							     src_reg, dst_reg);
2251 				if (rc == -EACCES && env->allow_ptr_leaks) {
2252 					/* scalar += unknown scalar */
2253 					__mark_reg_unknown(&off_reg);
2254 					return adjust_scalar_min_max_vals(
2255 							env, insn,
2256 							dst_reg, off_reg);
2257 				}
2258 				return rc;
2259 			}
2260 		} else if (ptr_reg) {
2261 			/* pointer += scalar */
2262 			rc = adjust_ptr_min_max_vals(env, insn,
2263 						     dst_reg, src_reg);
2264 			if (rc == -EACCES && env->allow_ptr_leaks) {
2265 				/* unknown scalar += scalar */
2266 				__mark_reg_unknown(dst_reg);
2267 				return adjust_scalar_min_max_vals(
2268 						env, insn, dst_reg, *src_reg);
2269 			}
2270 			return rc;
2271 		}
2272 	} else {
2273 		/* Pretend the src is a reg with a known value, since we only
2274 		 * need to be able to read from this state.
2275 		 */
2276 		off_reg.type = SCALAR_VALUE;
2277 		__mark_reg_known(&off_reg, insn->imm);
2278 		src_reg = &off_reg;
2279 		if (ptr_reg) { /* pointer += K */
2280 			rc = adjust_ptr_min_max_vals(env, insn,
2281 						     ptr_reg, src_reg);
2282 			if (rc == -EACCES && env->allow_ptr_leaks) {
2283 				/* unknown scalar += K */
2284 				__mark_reg_unknown(dst_reg);
2285 				return adjust_scalar_min_max_vals(
2286 						env, insn, dst_reg, off_reg);
2287 			}
2288 			return rc;
2289 		}
2290 	}
2291 
2292 	/* Got here implies adding two SCALAR_VALUEs */
2293 	if (WARN_ON_ONCE(ptr_reg)) {
2294 		print_verifier_state(env, env->cur_state);
2295 		verbose(env, "verifier internal error: unexpected ptr_reg\n");
2296 		return -EINVAL;
2297 	}
2298 	if (WARN_ON(!src_reg)) {
2299 		print_verifier_state(env, env->cur_state);
2300 		verbose(env, "verifier internal error: no src_reg\n");
2301 		return -EINVAL;
2302 	}
2303 	return adjust_scalar_min_max_vals(env, insn, dst_reg, *src_reg);
2304 }
2305 
2306 /* check validity of 32-bit and 64-bit arithmetic operations */
2307 static int check_alu_op(struct bpf_verifier_env *env, struct bpf_insn *insn)
2308 {
2309 	struct bpf_reg_state *regs = cur_regs(env);
2310 	u8 opcode = BPF_OP(insn->code);
2311 	int err;
2312 
2313 	if (opcode == BPF_END || opcode == BPF_NEG) {
2314 		if (opcode == BPF_NEG) {
2315 			if (BPF_SRC(insn->code) != 0 ||
2316 			    insn->src_reg != BPF_REG_0 ||
2317 			    insn->off != 0 || insn->imm != 0) {
2318 				verbose(env, "BPF_NEG uses reserved fields\n");
2319 				return -EINVAL;
2320 			}
2321 		} else {
2322 			if (insn->src_reg != BPF_REG_0 || insn->off != 0 ||
2323 			    (insn->imm != 16 && insn->imm != 32 && insn->imm != 64) ||
2324 			    BPF_CLASS(insn->code) == BPF_ALU64) {
2325 				verbose(env, "BPF_END uses reserved fields\n");
2326 				return -EINVAL;
2327 			}
2328 		}
2329 
2330 		/* check src operand */
2331 		err = check_reg_arg(env, insn->dst_reg, SRC_OP);
2332 		if (err)
2333 			return err;
2334 
2335 		if (is_pointer_value(env, insn->dst_reg)) {
2336 			verbose(env, "R%d pointer arithmetic prohibited\n",
2337 				insn->dst_reg);
2338 			return -EACCES;
2339 		}
2340 
2341 		/* check dest operand */
2342 		err = check_reg_arg(env, insn->dst_reg, DST_OP);
2343 		if (err)
2344 			return err;
2345 
2346 	} else if (opcode == BPF_MOV) {
2347 
2348 		if (BPF_SRC(insn->code) == BPF_X) {
2349 			if (insn->imm != 0 || insn->off != 0) {
2350 				verbose(env, "BPF_MOV uses reserved fields\n");
2351 				return -EINVAL;
2352 			}
2353 
2354 			/* check src operand */
2355 			err = check_reg_arg(env, insn->src_reg, SRC_OP);
2356 			if (err)
2357 				return err;
2358 		} else {
2359 			if (insn->src_reg != BPF_REG_0 || insn->off != 0) {
2360 				verbose(env, "BPF_MOV uses reserved fields\n");
2361 				return -EINVAL;
2362 			}
2363 		}
2364 
2365 		/* check dest operand */
2366 		err = check_reg_arg(env, insn->dst_reg, DST_OP);
2367 		if (err)
2368 			return err;
2369 
2370 		if (BPF_SRC(insn->code) == BPF_X) {
2371 			if (BPF_CLASS(insn->code) == BPF_ALU64) {
2372 				/* case: R1 = R2
2373 				 * copy register state to dest reg
2374 				 */
2375 				regs[insn->dst_reg] = regs[insn->src_reg];
2376 				regs[insn->dst_reg].live |= REG_LIVE_WRITTEN;
2377 			} else {
2378 				/* R1 = (u32) R2 */
2379 				if (is_pointer_value(env, insn->src_reg)) {
2380 					verbose(env,
2381 						"R%d partial copy of pointer\n",
2382 						insn->src_reg);
2383 					return -EACCES;
2384 				}
2385 				mark_reg_unknown(env, regs, insn->dst_reg);
2386 				/* high 32 bits are known zero. */
2387 				regs[insn->dst_reg].var_off = tnum_cast(
2388 						regs[insn->dst_reg].var_off, 4);
2389 				__update_reg_bounds(&regs[insn->dst_reg]);
2390 			}
2391 		} else {
2392 			/* case: R = imm
2393 			 * remember the value we stored into this reg
2394 			 */
2395 			regs[insn->dst_reg].type = SCALAR_VALUE;
2396 			__mark_reg_known(regs + insn->dst_reg, insn->imm);
2397 		}
2398 
2399 	} else if (opcode > BPF_END) {
2400 		verbose(env, "invalid BPF_ALU opcode %x\n", opcode);
2401 		return -EINVAL;
2402 
2403 	} else {	/* all other ALU ops: and, sub, xor, add, ... */
2404 
2405 		if (BPF_SRC(insn->code) == BPF_X) {
2406 			if (insn->imm != 0 || insn->off != 0) {
2407 				verbose(env, "BPF_ALU uses reserved fields\n");
2408 				return -EINVAL;
2409 			}
2410 			/* check src1 operand */
2411 			err = check_reg_arg(env, insn->src_reg, SRC_OP);
2412 			if (err)
2413 				return err;
2414 		} else {
2415 			if (insn->src_reg != BPF_REG_0 || insn->off != 0) {
2416 				verbose(env, "BPF_ALU uses reserved fields\n");
2417 				return -EINVAL;
2418 			}
2419 		}
2420 
2421 		/* check src2 operand */
2422 		err = check_reg_arg(env, insn->dst_reg, SRC_OP);
2423 		if (err)
2424 			return err;
2425 
2426 		if ((opcode == BPF_MOD || opcode == BPF_DIV) &&
2427 		    BPF_SRC(insn->code) == BPF_K && insn->imm == 0) {
2428 			verbose(env, "div by zero\n");
2429 			return -EINVAL;
2430 		}
2431 
2432 		if ((opcode == BPF_LSH || opcode == BPF_RSH ||
2433 		     opcode == BPF_ARSH) && BPF_SRC(insn->code) == BPF_K) {
2434 			int size = BPF_CLASS(insn->code) == BPF_ALU64 ? 64 : 32;
2435 
2436 			if (insn->imm < 0 || insn->imm >= size) {
2437 				verbose(env, "invalid shift %d\n", insn->imm);
2438 				return -EINVAL;
2439 			}
2440 		}
2441 
2442 		/* check dest operand */
2443 		err = check_reg_arg(env, insn->dst_reg, DST_OP_NO_MARK);
2444 		if (err)
2445 			return err;
2446 
2447 		return adjust_reg_min_max_vals(env, insn);
2448 	}
2449 
2450 	return 0;
2451 }
2452 
2453 static void find_good_pkt_pointers(struct bpf_verifier_state *state,
2454 				   struct bpf_reg_state *dst_reg,
2455 				   enum bpf_reg_type type,
2456 				   bool range_right_open)
2457 {
2458 	struct bpf_reg_state *regs = state->regs, *reg;
2459 	u16 new_range;
2460 	int i;
2461 
2462 	if (dst_reg->off < 0 ||
2463 	    (dst_reg->off == 0 && range_right_open))
2464 		/* This doesn't give us any range */
2465 		return;
2466 
2467 	if (dst_reg->umax_value > MAX_PACKET_OFF ||
2468 	    dst_reg->umax_value + dst_reg->off > MAX_PACKET_OFF)
2469 		/* Risk of overflow.  For instance, ptr + (1<<63) may be less
2470 		 * than pkt_end, but that's because it's also less than pkt.
2471 		 */
2472 		return;
2473 
2474 	new_range = dst_reg->off;
2475 	if (range_right_open)
2476 		new_range--;
2477 
2478 	/* Examples for register markings:
2479 	 *
2480 	 * pkt_data in dst register:
2481 	 *
2482 	 *   r2 = r3;
2483 	 *   r2 += 8;
2484 	 *   if (r2 > pkt_end) goto <handle exception>
2485 	 *   <access okay>
2486 	 *
2487 	 *   r2 = r3;
2488 	 *   r2 += 8;
2489 	 *   if (r2 < pkt_end) goto <access okay>
2490 	 *   <handle exception>
2491 	 *
2492 	 *   Where:
2493 	 *     r2 == dst_reg, pkt_end == src_reg
2494 	 *     r2=pkt(id=n,off=8,r=0)
2495 	 *     r3=pkt(id=n,off=0,r=0)
2496 	 *
2497 	 * pkt_data in src register:
2498 	 *
2499 	 *   r2 = r3;
2500 	 *   r2 += 8;
2501 	 *   if (pkt_end >= r2) goto <access okay>
2502 	 *   <handle exception>
2503 	 *
2504 	 *   r2 = r3;
2505 	 *   r2 += 8;
2506 	 *   if (pkt_end <= r2) goto <handle exception>
2507 	 *   <access okay>
2508 	 *
2509 	 *   Where:
2510 	 *     pkt_end == dst_reg, r2 == src_reg
2511 	 *     r2=pkt(id=n,off=8,r=0)
2512 	 *     r3=pkt(id=n,off=0,r=0)
2513 	 *
2514 	 * Find register r3 and mark its range as r3=pkt(id=n,off=0,r=8)
2515 	 * or r3=pkt(id=n,off=0,r=8-1), so that range of bytes [r3, r3 + 8)
2516 	 * and [r3, r3 + 8-1) respectively is safe to access depending on
2517 	 * the check.
2518 	 */
2519 
2520 	/* If our ids match, then we must have the same max_value.  And we
2521 	 * don't care about the other reg's fixed offset, since if it's too big
2522 	 * the range won't allow anything.
2523 	 * dst_reg->off is known < MAX_PACKET_OFF, therefore it fits in a u16.
2524 	 */
2525 	for (i = 0; i < MAX_BPF_REG; i++)
2526 		if (regs[i].type == type && regs[i].id == dst_reg->id)
2527 			/* keep the maximum range already checked */
2528 			regs[i].range = max(regs[i].range, new_range);
2529 
2530 	for (i = 0; i < state->allocated_stack / BPF_REG_SIZE; i++) {
2531 		if (state->stack[i].slot_type[0] != STACK_SPILL)
2532 			continue;
2533 		reg = &state->stack[i].spilled_ptr;
2534 		if (reg->type == type && reg->id == dst_reg->id)
2535 			reg->range = max(reg->range, new_range);
2536 	}
2537 }
2538 
2539 /* Adjusts the register min/max values in the case that the dst_reg is the
2540  * variable register that we are working on, and src_reg is a constant or we're
2541  * simply doing a BPF_K check.
2542  * In JEQ/JNE cases we also adjust the var_off values.
2543  */
2544 static void reg_set_min_max(struct bpf_reg_state *true_reg,
2545 			    struct bpf_reg_state *false_reg, u64 val,
2546 			    u8 opcode)
2547 {
2548 	/* If the dst_reg is a pointer, we can't learn anything about its
2549 	 * variable offset from the compare (unless src_reg were a pointer into
2550 	 * the same object, but we don't bother with that.
2551 	 * Since false_reg and true_reg have the same type by construction, we
2552 	 * only need to check one of them for pointerness.
2553 	 */
2554 	if (__is_pointer_value(false, false_reg))
2555 		return;
2556 
2557 	switch (opcode) {
2558 	case BPF_JEQ:
2559 		/* If this is false then we know nothing Jon Snow, but if it is
2560 		 * true then we know for sure.
2561 		 */
2562 		__mark_reg_known(true_reg, val);
2563 		break;
2564 	case BPF_JNE:
2565 		/* If this is true we know nothing Jon Snow, but if it is false
2566 		 * we know the value for sure;
2567 		 */
2568 		__mark_reg_known(false_reg, val);
2569 		break;
2570 	case BPF_JGT:
2571 		false_reg->umax_value = min(false_reg->umax_value, val);
2572 		true_reg->umin_value = max(true_reg->umin_value, val + 1);
2573 		break;
2574 	case BPF_JSGT:
2575 		false_reg->smax_value = min_t(s64, false_reg->smax_value, val);
2576 		true_reg->smin_value = max_t(s64, true_reg->smin_value, val + 1);
2577 		break;
2578 	case BPF_JLT:
2579 		false_reg->umin_value = max(false_reg->umin_value, val);
2580 		true_reg->umax_value = min(true_reg->umax_value, val - 1);
2581 		break;
2582 	case BPF_JSLT:
2583 		false_reg->smin_value = max_t(s64, false_reg->smin_value, val);
2584 		true_reg->smax_value = min_t(s64, true_reg->smax_value, val - 1);
2585 		break;
2586 	case BPF_JGE:
2587 		false_reg->umax_value = min(false_reg->umax_value, val - 1);
2588 		true_reg->umin_value = max(true_reg->umin_value, val);
2589 		break;
2590 	case BPF_JSGE:
2591 		false_reg->smax_value = min_t(s64, false_reg->smax_value, val - 1);
2592 		true_reg->smin_value = max_t(s64, true_reg->smin_value, val);
2593 		break;
2594 	case BPF_JLE:
2595 		false_reg->umin_value = max(false_reg->umin_value, val + 1);
2596 		true_reg->umax_value = min(true_reg->umax_value, val);
2597 		break;
2598 	case BPF_JSLE:
2599 		false_reg->smin_value = max_t(s64, false_reg->smin_value, val + 1);
2600 		true_reg->smax_value = min_t(s64, true_reg->smax_value, val);
2601 		break;
2602 	default:
2603 		break;
2604 	}
2605 
2606 	__reg_deduce_bounds(false_reg);
2607 	__reg_deduce_bounds(true_reg);
2608 	/* We might have learned some bits from the bounds. */
2609 	__reg_bound_offset(false_reg);
2610 	__reg_bound_offset(true_reg);
2611 	/* Intersecting with the old var_off might have improved our bounds
2612 	 * slightly.  e.g. if umax was 0x7f...f and var_off was (0; 0xf...fc),
2613 	 * then new var_off is (0; 0x7f...fc) which improves our umax.
2614 	 */
2615 	__update_reg_bounds(false_reg);
2616 	__update_reg_bounds(true_reg);
2617 }
2618 
2619 /* Same as above, but for the case that dst_reg holds a constant and src_reg is
2620  * the variable reg.
2621  */
2622 static void reg_set_min_max_inv(struct bpf_reg_state *true_reg,
2623 				struct bpf_reg_state *false_reg, u64 val,
2624 				u8 opcode)
2625 {
2626 	if (__is_pointer_value(false, false_reg))
2627 		return;
2628 
2629 	switch (opcode) {
2630 	case BPF_JEQ:
2631 		/* If this is false then we know nothing Jon Snow, but if it is
2632 		 * true then we know for sure.
2633 		 */
2634 		__mark_reg_known(true_reg, val);
2635 		break;
2636 	case BPF_JNE:
2637 		/* If this is true we know nothing Jon Snow, but if it is false
2638 		 * we know the value for sure;
2639 		 */
2640 		__mark_reg_known(false_reg, val);
2641 		break;
2642 	case BPF_JGT:
2643 		true_reg->umax_value = min(true_reg->umax_value, val - 1);
2644 		false_reg->umin_value = max(false_reg->umin_value, val);
2645 		break;
2646 	case BPF_JSGT:
2647 		true_reg->smax_value = min_t(s64, true_reg->smax_value, val - 1);
2648 		false_reg->smin_value = max_t(s64, false_reg->smin_value, val);
2649 		break;
2650 	case BPF_JLT:
2651 		true_reg->umin_value = max(true_reg->umin_value, val + 1);
2652 		false_reg->umax_value = min(false_reg->umax_value, val);
2653 		break;
2654 	case BPF_JSLT:
2655 		true_reg->smin_value = max_t(s64, true_reg->smin_value, val + 1);
2656 		false_reg->smax_value = min_t(s64, false_reg->smax_value, val);
2657 		break;
2658 	case BPF_JGE:
2659 		true_reg->umax_value = min(true_reg->umax_value, val);
2660 		false_reg->umin_value = max(false_reg->umin_value, val + 1);
2661 		break;
2662 	case BPF_JSGE:
2663 		true_reg->smax_value = min_t(s64, true_reg->smax_value, val);
2664 		false_reg->smin_value = max_t(s64, false_reg->smin_value, val + 1);
2665 		break;
2666 	case BPF_JLE:
2667 		true_reg->umin_value = max(true_reg->umin_value, val);
2668 		false_reg->umax_value = min(false_reg->umax_value, val - 1);
2669 		break;
2670 	case BPF_JSLE:
2671 		true_reg->smin_value = max_t(s64, true_reg->smin_value, val);
2672 		false_reg->smax_value = min_t(s64, false_reg->smax_value, val - 1);
2673 		break;
2674 	default:
2675 		break;
2676 	}
2677 
2678 	__reg_deduce_bounds(false_reg);
2679 	__reg_deduce_bounds(true_reg);
2680 	/* We might have learned some bits from the bounds. */
2681 	__reg_bound_offset(false_reg);
2682 	__reg_bound_offset(true_reg);
2683 	/* Intersecting with the old var_off might have improved our bounds
2684 	 * slightly.  e.g. if umax was 0x7f...f and var_off was (0; 0xf...fc),
2685 	 * then new var_off is (0; 0x7f...fc) which improves our umax.
2686 	 */
2687 	__update_reg_bounds(false_reg);
2688 	__update_reg_bounds(true_reg);
2689 }
2690 
2691 /* Regs are known to be equal, so intersect their min/max/var_off */
2692 static void __reg_combine_min_max(struct bpf_reg_state *src_reg,
2693 				  struct bpf_reg_state *dst_reg)
2694 {
2695 	src_reg->umin_value = dst_reg->umin_value = max(src_reg->umin_value,
2696 							dst_reg->umin_value);
2697 	src_reg->umax_value = dst_reg->umax_value = min(src_reg->umax_value,
2698 							dst_reg->umax_value);
2699 	src_reg->smin_value = dst_reg->smin_value = max(src_reg->smin_value,
2700 							dst_reg->smin_value);
2701 	src_reg->smax_value = dst_reg->smax_value = min(src_reg->smax_value,
2702 							dst_reg->smax_value);
2703 	src_reg->var_off = dst_reg->var_off = tnum_intersect(src_reg->var_off,
2704 							     dst_reg->var_off);
2705 	/* We might have learned new bounds from the var_off. */
2706 	__update_reg_bounds(src_reg);
2707 	__update_reg_bounds(dst_reg);
2708 	/* We might have learned something about the sign bit. */
2709 	__reg_deduce_bounds(src_reg);
2710 	__reg_deduce_bounds(dst_reg);
2711 	/* We might have learned some bits from the bounds. */
2712 	__reg_bound_offset(src_reg);
2713 	__reg_bound_offset(dst_reg);
2714 	/* Intersecting with the old var_off might have improved our bounds
2715 	 * slightly.  e.g. if umax was 0x7f...f and var_off was (0; 0xf...fc),
2716 	 * then new var_off is (0; 0x7f...fc) which improves our umax.
2717 	 */
2718 	__update_reg_bounds(src_reg);
2719 	__update_reg_bounds(dst_reg);
2720 }
2721 
2722 static void reg_combine_min_max(struct bpf_reg_state *true_src,
2723 				struct bpf_reg_state *true_dst,
2724 				struct bpf_reg_state *false_src,
2725 				struct bpf_reg_state *false_dst,
2726 				u8 opcode)
2727 {
2728 	switch (opcode) {
2729 	case BPF_JEQ:
2730 		__reg_combine_min_max(true_src, true_dst);
2731 		break;
2732 	case BPF_JNE:
2733 		__reg_combine_min_max(false_src, false_dst);
2734 		break;
2735 	}
2736 }
2737 
2738 static void mark_map_reg(struct bpf_reg_state *regs, u32 regno, u32 id,
2739 			 bool is_null)
2740 {
2741 	struct bpf_reg_state *reg = &regs[regno];
2742 
2743 	if (reg->type == PTR_TO_MAP_VALUE_OR_NULL && reg->id == id) {
2744 		/* Old offset (both fixed and variable parts) should
2745 		 * have been known-zero, because we don't allow pointer
2746 		 * arithmetic on pointers that might be NULL.
2747 		 */
2748 		if (WARN_ON_ONCE(reg->smin_value || reg->smax_value ||
2749 				 !tnum_equals_const(reg->var_off, 0) ||
2750 				 reg->off)) {
2751 			__mark_reg_known_zero(reg);
2752 			reg->off = 0;
2753 		}
2754 		if (is_null) {
2755 			reg->type = SCALAR_VALUE;
2756 		} else if (reg->map_ptr->inner_map_meta) {
2757 			reg->type = CONST_PTR_TO_MAP;
2758 			reg->map_ptr = reg->map_ptr->inner_map_meta;
2759 		} else {
2760 			reg->type = PTR_TO_MAP_VALUE;
2761 		}
2762 		/* We don't need id from this point onwards anymore, thus we
2763 		 * should better reset it, so that state pruning has chances
2764 		 * to take effect.
2765 		 */
2766 		reg->id = 0;
2767 	}
2768 }
2769 
2770 /* The logic is similar to find_good_pkt_pointers(), both could eventually
2771  * be folded together at some point.
2772  */
2773 static void mark_map_regs(struct bpf_verifier_state *state, u32 regno,
2774 			  bool is_null)
2775 {
2776 	struct bpf_reg_state *regs = state->regs;
2777 	u32 id = regs[regno].id;
2778 	int i;
2779 
2780 	for (i = 0; i < MAX_BPF_REG; i++)
2781 		mark_map_reg(regs, i, id, is_null);
2782 
2783 	for (i = 0; i < state->allocated_stack / BPF_REG_SIZE; i++) {
2784 		if (state->stack[i].slot_type[0] != STACK_SPILL)
2785 			continue;
2786 		mark_map_reg(&state->stack[i].spilled_ptr, 0, id, is_null);
2787 	}
2788 }
2789 
2790 static bool try_match_pkt_pointers(const struct bpf_insn *insn,
2791 				   struct bpf_reg_state *dst_reg,
2792 				   struct bpf_reg_state *src_reg,
2793 				   struct bpf_verifier_state *this_branch,
2794 				   struct bpf_verifier_state *other_branch)
2795 {
2796 	if (BPF_SRC(insn->code) != BPF_X)
2797 		return false;
2798 
2799 	switch (BPF_OP(insn->code)) {
2800 	case BPF_JGT:
2801 		if ((dst_reg->type == PTR_TO_PACKET &&
2802 		     src_reg->type == PTR_TO_PACKET_END) ||
2803 		    (dst_reg->type == PTR_TO_PACKET_META &&
2804 		     reg_is_init_pkt_pointer(src_reg, PTR_TO_PACKET))) {
2805 			/* pkt_data' > pkt_end, pkt_meta' > pkt_data */
2806 			find_good_pkt_pointers(this_branch, dst_reg,
2807 					       dst_reg->type, false);
2808 		} else if ((dst_reg->type == PTR_TO_PACKET_END &&
2809 			    src_reg->type == PTR_TO_PACKET) ||
2810 			   (reg_is_init_pkt_pointer(dst_reg, PTR_TO_PACKET) &&
2811 			    src_reg->type == PTR_TO_PACKET_META)) {
2812 			/* pkt_end > pkt_data', pkt_data > pkt_meta' */
2813 			find_good_pkt_pointers(other_branch, src_reg,
2814 					       src_reg->type, true);
2815 		} else {
2816 			return false;
2817 		}
2818 		break;
2819 	case BPF_JLT:
2820 		if ((dst_reg->type == PTR_TO_PACKET &&
2821 		     src_reg->type == PTR_TO_PACKET_END) ||
2822 		    (dst_reg->type == PTR_TO_PACKET_META &&
2823 		     reg_is_init_pkt_pointer(src_reg, PTR_TO_PACKET))) {
2824 			/* pkt_data' < pkt_end, pkt_meta' < pkt_data */
2825 			find_good_pkt_pointers(other_branch, dst_reg,
2826 					       dst_reg->type, true);
2827 		} else if ((dst_reg->type == PTR_TO_PACKET_END &&
2828 			    src_reg->type == PTR_TO_PACKET) ||
2829 			   (reg_is_init_pkt_pointer(dst_reg, PTR_TO_PACKET) &&
2830 			    src_reg->type == PTR_TO_PACKET_META)) {
2831 			/* pkt_end < pkt_data', pkt_data > pkt_meta' */
2832 			find_good_pkt_pointers(this_branch, src_reg,
2833 					       src_reg->type, false);
2834 		} else {
2835 			return false;
2836 		}
2837 		break;
2838 	case BPF_JGE:
2839 		if ((dst_reg->type == PTR_TO_PACKET &&
2840 		     src_reg->type == PTR_TO_PACKET_END) ||
2841 		    (dst_reg->type == PTR_TO_PACKET_META &&
2842 		     reg_is_init_pkt_pointer(src_reg, PTR_TO_PACKET))) {
2843 			/* pkt_data' >= pkt_end, pkt_meta' >= pkt_data */
2844 			find_good_pkt_pointers(this_branch, dst_reg,
2845 					       dst_reg->type, true);
2846 		} else if ((dst_reg->type == PTR_TO_PACKET_END &&
2847 			    src_reg->type == PTR_TO_PACKET) ||
2848 			   (reg_is_init_pkt_pointer(dst_reg, PTR_TO_PACKET) &&
2849 			    src_reg->type == PTR_TO_PACKET_META)) {
2850 			/* pkt_end >= pkt_data', pkt_data >= pkt_meta' */
2851 			find_good_pkt_pointers(other_branch, src_reg,
2852 					       src_reg->type, false);
2853 		} else {
2854 			return false;
2855 		}
2856 		break;
2857 	case BPF_JLE:
2858 		if ((dst_reg->type == PTR_TO_PACKET &&
2859 		     src_reg->type == PTR_TO_PACKET_END) ||
2860 		    (dst_reg->type == PTR_TO_PACKET_META &&
2861 		     reg_is_init_pkt_pointer(src_reg, PTR_TO_PACKET))) {
2862 			/* pkt_data' <= pkt_end, pkt_meta' <= pkt_data */
2863 			find_good_pkt_pointers(other_branch, dst_reg,
2864 					       dst_reg->type, false);
2865 		} else if ((dst_reg->type == PTR_TO_PACKET_END &&
2866 			    src_reg->type == PTR_TO_PACKET) ||
2867 			   (reg_is_init_pkt_pointer(dst_reg, PTR_TO_PACKET) &&
2868 			    src_reg->type == PTR_TO_PACKET_META)) {
2869 			/* pkt_end <= pkt_data', pkt_data <= pkt_meta' */
2870 			find_good_pkt_pointers(this_branch, src_reg,
2871 					       src_reg->type, true);
2872 		} else {
2873 			return false;
2874 		}
2875 		break;
2876 	default:
2877 		return false;
2878 	}
2879 
2880 	return true;
2881 }
2882 
2883 static int check_cond_jmp_op(struct bpf_verifier_env *env,
2884 			     struct bpf_insn *insn, int *insn_idx)
2885 {
2886 	struct bpf_verifier_state *other_branch, *this_branch = env->cur_state;
2887 	struct bpf_reg_state *regs = this_branch->regs, *dst_reg;
2888 	u8 opcode = BPF_OP(insn->code);
2889 	int err;
2890 
2891 	if (opcode > BPF_JSLE) {
2892 		verbose(env, "invalid BPF_JMP opcode %x\n", opcode);
2893 		return -EINVAL;
2894 	}
2895 
2896 	if (BPF_SRC(insn->code) == BPF_X) {
2897 		if (insn->imm != 0) {
2898 			verbose(env, "BPF_JMP uses reserved fields\n");
2899 			return -EINVAL;
2900 		}
2901 
2902 		/* check src1 operand */
2903 		err = check_reg_arg(env, insn->src_reg, SRC_OP);
2904 		if (err)
2905 			return err;
2906 
2907 		if (is_pointer_value(env, insn->src_reg)) {
2908 			verbose(env, "R%d pointer comparison prohibited\n",
2909 				insn->src_reg);
2910 			return -EACCES;
2911 		}
2912 	} else {
2913 		if (insn->src_reg != BPF_REG_0) {
2914 			verbose(env, "BPF_JMP uses reserved fields\n");
2915 			return -EINVAL;
2916 		}
2917 	}
2918 
2919 	/* check src2 operand */
2920 	err = check_reg_arg(env, insn->dst_reg, SRC_OP);
2921 	if (err)
2922 		return err;
2923 
2924 	dst_reg = &regs[insn->dst_reg];
2925 
2926 	/* detect if R == 0 where R was initialized to zero earlier */
2927 	if (BPF_SRC(insn->code) == BPF_K &&
2928 	    (opcode == BPF_JEQ || opcode == BPF_JNE) &&
2929 	    dst_reg->type == SCALAR_VALUE &&
2930 	    tnum_equals_const(dst_reg->var_off, insn->imm)) {
2931 		if (opcode == BPF_JEQ) {
2932 			/* if (imm == imm) goto pc+off;
2933 			 * only follow the goto, ignore fall-through
2934 			 */
2935 			*insn_idx += insn->off;
2936 			return 0;
2937 		} else {
2938 			/* if (imm != imm) goto pc+off;
2939 			 * only follow fall-through branch, since
2940 			 * that's where the program will go
2941 			 */
2942 			return 0;
2943 		}
2944 	}
2945 
2946 	other_branch = push_stack(env, *insn_idx + insn->off + 1, *insn_idx);
2947 	if (!other_branch)
2948 		return -EFAULT;
2949 
2950 	/* detect if we are comparing against a constant value so we can adjust
2951 	 * our min/max values for our dst register.
2952 	 * this is only legit if both are scalars (or pointers to the same
2953 	 * object, I suppose, but we don't support that right now), because
2954 	 * otherwise the different base pointers mean the offsets aren't
2955 	 * comparable.
2956 	 */
2957 	if (BPF_SRC(insn->code) == BPF_X) {
2958 		if (dst_reg->type == SCALAR_VALUE &&
2959 		    regs[insn->src_reg].type == SCALAR_VALUE) {
2960 			if (tnum_is_const(regs[insn->src_reg].var_off))
2961 				reg_set_min_max(&other_branch->regs[insn->dst_reg],
2962 						dst_reg, regs[insn->src_reg].var_off.value,
2963 						opcode);
2964 			else if (tnum_is_const(dst_reg->var_off))
2965 				reg_set_min_max_inv(&other_branch->regs[insn->src_reg],
2966 						    &regs[insn->src_reg],
2967 						    dst_reg->var_off.value, opcode);
2968 			else if (opcode == BPF_JEQ || opcode == BPF_JNE)
2969 				/* Comparing for equality, we can combine knowledge */
2970 				reg_combine_min_max(&other_branch->regs[insn->src_reg],
2971 						    &other_branch->regs[insn->dst_reg],
2972 						    &regs[insn->src_reg],
2973 						    &regs[insn->dst_reg], opcode);
2974 		}
2975 	} else if (dst_reg->type == SCALAR_VALUE) {
2976 		reg_set_min_max(&other_branch->regs[insn->dst_reg],
2977 					dst_reg, insn->imm, opcode);
2978 	}
2979 
2980 	/* detect if R == 0 where R is returned from bpf_map_lookup_elem() */
2981 	if (BPF_SRC(insn->code) == BPF_K &&
2982 	    insn->imm == 0 && (opcode == BPF_JEQ || opcode == BPF_JNE) &&
2983 	    dst_reg->type == PTR_TO_MAP_VALUE_OR_NULL) {
2984 		/* Mark all identical map registers in each branch as either
2985 		 * safe or unknown depending R == 0 or R != 0 conditional.
2986 		 */
2987 		mark_map_regs(this_branch, insn->dst_reg, opcode == BPF_JNE);
2988 		mark_map_regs(other_branch, insn->dst_reg, opcode == BPF_JEQ);
2989 	} else if (!try_match_pkt_pointers(insn, dst_reg, &regs[insn->src_reg],
2990 					   this_branch, other_branch) &&
2991 		   is_pointer_value(env, insn->dst_reg)) {
2992 		verbose(env, "R%d pointer comparison prohibited\n",
2993 			insn->dst_reg);
2994 		return -EACCES;
2995 	}
2996 	if (env->log.level)
2997 		print_verifier_state(env, this_branch);
2998 	return 0;
2999 }
3000 
3001 /* return the map pointer stored inside BPF_LD_IMM64 instruction */
3002 static struct bpf_map *ld_imm64_to_map_ptr(struct bpf_insn *insn)
3003 {
3004 	u64 imm64 = ((u64) (u32) insn[0].imm) | ((u64) (u32) insn[1].imm) << 32;
3005 
3006 	return (struct bpf_map *) (unsigned long) imm64;
3007 }
3008 
3009 /* verify BPF_LD_IMM64 instruction */
3010 static int check_ld_imm(struct bpf_verifier_env *env, struct bpf_insn *insn)
3011 {
3012 	struct bpf_reg_state *regs = cur_regs(env);
3013 	int err;
3014 
3015 	if (BPF_SIZE(insn->code) != BPF_DW) {
3016 		verbose(env, "invalid BPF_LD_IMM insn\n");
3017 		return -EINVAL;
3018 	}
3019 	if (insn->off != 0) {
3020 		verbose(env, "BPF_LD_IMM64 uses reserved fields\n");
3021 		return -EINVAL;
3022 	}
3023 
3024 	err = check_reg_arg(env, insn->dst_reg, DST_OP);
3025 	if (err)
3026 		return err;
3027 
3028 	if (insn->src_reg == 0) {
3029 		u64 imm = ((u64)(insn + 1)->imm << 32) | (u32)insn->imm;
3030 
3031 		regs[insn->dst_reg].type = SCALAR_VALUE;
3032 		__mark_reg_known(&regs[insn->dst_reg], imm);
3033 		return 0;
3034 	}
3035 
3036 	/* replace_map_fd_with_map_ptr() should have caught bad ld_imm64 */
3037 	BUG_ON(insn->src_reg != BPF_PSEUDO_MAP_FD);
3038 
3039 	regs[insn->dst_reg].type = CONST_PTR_TO_MAP;
3040 	regs[insn->dst_reg].map_ptr = ld_imm64_to_map_ptr(insn);
3041 	return 0;
3042 }
3043 
3044 static bool may_access_skb(enum bpf_prog_type type)
3045 {
3046 	switch (type) {
3047 	case BPF_PROG_TYPE_SOCKET_FILTER:
3048 	case BPF_PROG_TYPE_SCHED_CLS:
3049 	case BPF_PROG_TYPE_SCHED_ACT:
3050 		return true;
3051 	default:
3052 		return false;
3053 	}
3054 }
3055 
3056 /* verify safety of LD_ABS|LD_IND instructions:
3057  * - they can only appear in the programs where ctx == skb
3058  * - since they are wrappers of function calls, they scratch R1-R5 registers,
3059  *   preserve R6-R9, and store return value into R0
3060  *
3061  * Implicit input:
3062  *   ctx == skb == R6 == CTX
3063  *
3064  * Explicit input:
3065  *   SRC == any register
3066  *   IMM == 32-bit immediate
3067  *
3068  * Output:
3069  *   R0 - 8/16/32-bit skb data converted to cpu endianness
3070  */
3071 static int check_ld_abs(struct bpf_verifier_env *env, struct bpf_insn *insn)
3072 {
3073 	struct bpf_reg_state *regs = cur_regs(env);
3074 	u8 mode = BPF_MODE(insn->code);
3075 	int i, err;
3076 
3077 	if (!may_access_skb(env->prog->type)) {
3078 		verbose(env, "BPF_LD_[ABS|IND] instructions not allowed for this program type\n");
3079 		return -EINVAL;
3080 	}
3081 
3082 	if (insn->dst_reg != BPF_REG_0 || insn->off != 0 ||
3083 	    BPF_SIZE(insn->code) == BPF_DW ||
3084 	    (mode == BPF_ABS && insn->src_reg != BPF_REG_0)) {
3085 		verbose(env, "BPF_LD_[ABS|IND] uses reserved fields\n");
3086 		return -EINVAL;
3087 	}
3088 
3089 	/* check whether implicit source operand (register R6) is readable */
3090 	err = check_reg_arg(env, BPF_REG_6, SRC_OP);
3091 	if (err)
3092 		return err;
3093 
3094 	if (regs[BPF_REG_6].type != PTR_TO_CTX) {
3095 		verbose(env,
3096 			"at the time of BPF_LD_ABS|IND R6 != pointer to skb\n");
3097 		return -EINVAL;
3098 	}
3099 
3100 	if (mode == BPF_IND) {
3101 		/* check explicit source operand */
3102 		err = check_reg_arg(env, insn->src_reg, SRC_OP);
3103 		if (err)
3104 			return err;
3105 	}
3106 
3107 	/* reset caller saved regs to unreadable */
3108 	for (i = 0; i < CALLER_SAVED_REGS; i++) {
3109 		mark_reg_not_init(env, regs, caller_saved[i]);
3110 		check_reg_arg(env, caller_saved[i], DST_OP_NO_MARK);
3111 	}
3112 
3113 	/* mark destination R0 register as readable, since it contains
3114 	 * the value fetched from the packet.
3115 	 * Already marked as written above.
3116 	 */
3117 	mark_reg_unknown(env, regs, BPF_REG_0);
3118 	return 0;
3119 }
3120 
3121 static int check_return_code(struct bpf_verifier_env *env)
3122 {
3123 	struct bpf_reg_state *reg;
3124 	struct tnum range = tnum_range(0, 1);
3125 
3126 	switch (env->prog->type) {
3127 	case BPF_PROG_TYPE_CGROUP_SKB:
3128 	case BPF_PROG_TYPE_CGROUP_SOCK:
3129 	case BPF_PROG_TYPE_SOCK_OPS:
3130 		break;
3131 	default:
3132 		return 0;
3133 	}
3134 
3135 	reg = cur_regs(env) + BPF_REG_0;
3136 	if (reg->type != SCALAR_VALUE) {
3137 		verbose(env, "At program exit the register R0 is not a known value (%s)\n",
3138 			reg_type_str[reg->type]);
3139 		return -EINVAL;
3140 	}
3141 
3142 	if (!tnum_in(range, reg->var_off)) {
3143 		verbose(env, "At program exit the register R0 ");
3144 		if (!tnum_is_unknown(reg->var_off)) {
3145 			char tn_buf[48];
3146 
3147 			tnum_strn(tn_buf, sizeof(tn_buf), reg->var_off);
3148 			verbose(env, "has value %s", tn_buf);
3149 		} else {
3150 			verbose(env, "has unknown scalar value");
3151 		}
3152 		verbose(env, " should have been 0 or 1\n");
3153 		return -EINVAL;
3154 	}
3155 	return 0;
3156 }
3157 
3158 /* non-recursive DFS pseudo code
3159  * 1  procedure DFS-iterative(G,v):
3160  * 2      label v as discovered
3161  * 3      let S be a stack
3162  * 4      S.push(v)
3163  * 5      while S is not empty
3164  * 6            t <- S.pop()
3165  * 7            if t is what we're looking for:
3166  * 8                return t
3167  * 9            for all edges e in G.adjacentEdges(t) do
3168  * 10               if edge e is already labelled
3169  * 11                   continue with the next edge
3170  * 12               w <- G.adjacentVertex(t,e)
3171  * 13               if vertex w is not discovered and not explored
3172  * 14                   label e as tree-edge
3173  * 15                   label w as discovered
3174  * 16                   S.push(w)
3175  * 17                   continue at 5
3176  * 18               else if vertex w is discovered
3177  * 19                   label e as back-edge
3178  * 20               else
3179  * 21                   // vertex w is explored
3180  * 22                   label e as forward- or cross-edge
3181  * 23           label t as explored
3182  * 24           S.pop()
3183  *
3184  * convention:
3185  * 0x10 - discovered
3186  * 0x11 - discovered and fall-through edge labelled
3187  * 0x12 - discovered and fall-through and branch edges labelled
3188  * 0x20 - explored
3189  */
3190 
3191 enum {
3192 	DISCOVERED = 0x10,
3193 	EXPLORED = 0x20,
3194 	FALLTHROUGH = 1,
3195 	BRANCH = 2,
3196 };
3197 
3198 #define STATE_LIST_MARK ((struct bpf_verifier_state_list *) -1L)
3199 
3200 static int *insn_stack;	/* stack of insns to process */
3201 static int cur_stack;	/* current stack index */
3202 static int *insn_state;
3203 
3204 /* t, w, e - match pseudo-code above:
3205  * t - index of current instruction
3206  * w - next instruction
3207  * e - edge
3208  */
3209 static int push_insn(int t, int w, int e, struct bpf_verifier_env *env)
3210 {
3211 	if (e == FALLTHROUGH && insn_state[t] >= (DISCOVERED | FALLTHROUGH))
3212 		return 0;
3213 
3214 	if (e == BRANCH && insn_state[t] >= (DISCOVERED | BRANCH))
3215 		return 0;
3216 
3217 	if (w < 0 || w >= env->prog->len) {
3218 		verbose(env, "jump out of range from insn %d to %d\n", t, w);
3219 		return -EINVAL;
3220 	}
3221 
3222 	if (e == BRANCH)
3223 		/* mark branch target for state pruning */
3224 		env->explored_states[w] = STATE_LIST_MARK;
3225 
3226 	if (insn_state[w] == 0) {
3227 		/* tree-edge */
3228 		insn_state[t] = DISCOVERED | e;
3229 		insn_state[w] = DISCOVERED;
3230 		if (cur_stack >= env->prog->len)
3231 			return -E2BIG;
3232 		insn_stack[cur_stack++] = w;
3233 		return 1;
3234 	} else if ((insn_state[w] & 0xF0) == DISCOVERED) {
3235 		verbose(env, "back-edge from insn %d to %d\n", t, w);
3236 		return -EINVAL;
3237 	} else if (insn_state[w] == EXPLORED) {
3238 		/* forward- or cross-edge */
3239 		insn_state[t] = DISCOVERED | e;
3240 	} else {
3241 		verbose(env, "insn state internal bug\n");
3242 		return -EFAULT;
3243 	}
3244 	return 0;
3245 }
3246 
3247 /* non-recursive depth-first-search to detect loops in BPF program
3248  * loop == back-edge in directed graph
3249  */
3250 static int check_cfg(struct bpf_verifier_env *env)
3251 {
3252 	struct bpf_insn *insns = env->prog->insnsi;
3253 	int insn_cnt = env->prog->len;
3254 	int ret = 0;
3255 	int i, t;
3256 
3257 	insn_state = kcalloc(insn_cnt, sizeof(int), GFP_KERNEL);
3258 	if (!insn_state)
3259 		return -ENOMEM;
3260 
3261 	insn_stack = kcalloc(insn_cnt, sizeof(int), GFP_KERNEL);
3262 	if (!insn_stack) {
3263 		kfree(insn_state);
3264 		return -ENOMEM;
3265 	}
3266 
3267 	insn_state[0] = DISCOVERED; /* mark 1st insn as discovered */
3268 	insn_stack[0] = 0; /* 0 is the first instruction */
3269 	cur_stack = 1;
3270 
3271 peek_stack:
3272 	if (cur_stack == 0)
3273 		goto check_state;
3274 	t = insn_stack[cur_stack - 1];
3275 
3276 	if (BPF_CLASS(insns[t].code) == BPF_JMP) {
3277 		u8 opcode = BPF_OP(insns[t].code);
3278 
3279 		if (opcode == BPF_EXIT) {
3280 			goto mark_explored;
3281 		} else if (opcode == BPF_CALL) {
3282 			ret = push_insn(t, t + 1, FALLTHROUGH, env);
3283 			if (ret == 1)
3284 				goto peek_stack;
3285 			else if (ret < 0)
3286 				goto err_free;
3287 			if (t + 1 < insn_cnt)
3288 				env->explored_states[t + 1] = STATE_LIST_MARK;
3289 		} else if (opcode == BPF_JA) {
3290 			if (BPF_SRC(insns[t].code) != BPF_K) {
3291 				ret = -EINVAL;
3292 				goto err_free;
3293 			}
3294 			/* unconditional jump with single edge */
3295 			ret = push_insn(t, t + insns[t].off + 1,
3296 					FALLTHROUGH, env);
3297 			if (ret == 1)
3298 				goto peek_stack;
3299 			else if (ret < 0)
3300 				goto err_free;
3301 			/* tell verifier to check for equivalent states
3302 			 * after every call and jump
3303 			 */
3304 			if (t + 1 < insn_cnt)
3305 				env->explored_states[t + 1] = STATE_LIST_MARK;
3306 		} else {
3307 			/* conditional jump with two edges */
3308 			env->explored_states[t] = STATE_LIST_MARK;
3309 			ret = push_insn(t, t + 1, FALLTHROUGH, env);
3310 			if (ret == 1)
3311 				goto peek_stack;
3312 			else if (ret < 0)
3313 				goto err_free;
3314 
3315 			ret = push_insn(t, t + insns[t].off + 1, BRANCH, env);
3316 			if (ret == 1)
3317 				goto peek_stack;
3318 			else if (ret < 0)
3319 				goto err_free;
3320 		}
3321 	} else {
3322 		/* all other non-branch instructions with single
3323 		 * fall-through edge
3324 		 */
3325 		ret = push_insn(t, t + 1, FALLTHROUGH, env);
3326 		if (ret == 1)
3327 			goto peek_stack;
3328 		else if (ret < 0)
3329 			goto err_free;
3330 	}
3331 
3332 mark_explored:
3333 	insn_state[t] = EXPLORED;
3334 	if (cur_stack-- <= 0) {
3335 		verbose(env, "pop stack internal bug\n");
3336 		ret = -EFAULT;
3337 		goto err_free;
3338 	}
3339 	goto peek_stack;
3340 
3341 check_state:
3342 	for (i = 0; i < insn_cnt; i++) {
3343 		if (insn_state[i] != EXPLORED) {
3344 			verbose(env, "unreachable insn %d\n", i);
3345 			ret = -EINVAL;
3346 			goto err_free;
3347 		}
3348 	}
3349 	ret = 0; /* cfg looks good */
3350 
3351 err_free:
3352 	kfree(insn_state);
3353 	kfree(insn_stack);
3354 	return ret;
3355 }
3356 
3357 /* check %cur's range satisfies %old's */
3358 static bool range_within(struct bpf_reg_state *old,
3359 			 struct bpf_reg_state *cur)
3360 {
3361 	return old->umin_value <= cur->umin_value &&
3362 	       old->umax_value >= cur->umax_value &&
3363 	       old->smin_value <= cur->smin_value &&
3364 	       old->smax_value >= cur->smax_value;
3365 }
3366 
3367 /* Maximum number of register states that can exist at once */
3368 #define ID_MAP_SIZE	(MAX_BPF_REG + MAX_BPF_STACK / BPF_REG_SIZE)
3369 struct idpair {
3370 	u32 old;
3371 	u32 cur;
3372 };
3373 
3374 /* If in the old state two registers had the same id, then they need to have
3375  * the same id in the new state as well.  But that id could be different from
3376  * the old state, so we need to track the mapping from old to new ids.
3377  * Once we have seen that, say, a reg with old id 5 had new id 9, any subsequent
3378  * regs with old id 5 must also have new id 9 for the new state to be safe.  But
3379  * regs with a different old id could still have new id 9, we don't care about
3380  * that.
3381  * So we look through our idmap to see if this old id has been seen before.  If
3382  * so, we require the new id to match; otherwise, we add the id pair to the map.
3383  */
3384 static bool check_ids(u32 old_id, u32 cur_id, struct idpair *idmap)
3385 {
3386 	unsigned int i;
3387 
3388 	for (i = 0; i < ID_MAP_SIZE; i++) {
3389 		if (!idmap[i].old) {
3390 			/* Reached an empty slot; haven't seen this id before */
3391 			idmap[i].old = old_id;
3392 			idmap[i].cur = cur_id;
3393 			return true;
3394 		}
3395 		if (idmap[i].old == old_id)
3396 			return idmap[i].cur == cur_id;
3397 	}
3398 	/* We ran out of idmap slots, which should be impossible */
3399 	WARN_ON_ONCE(1);
3400 	return false;
3401 }
3402 
3403 /* Returns true if (rold safe implies rcur safe) */
3404 static bool regsafe(struct bpf_reg_state *rold, struct bpf_reg_state *rcur,
3405 		    struct idpair *idmap)
3406 {
3407 	if (!(rold->live & REG_LIVE_READ))
3408 		/* explored state didn't use this */
3409 		return true;
3410 
3411 	if (memcmp(rold, rcur, offsetof(struct bpf_reg_state, live)) == 0)
3412 		return true;
3413 
3414 	if (rold->type == NOT_INIT)
3415 		/* explored state can't have used this */
3416 		return true;
3417 	if (rcur->type == NOT_INIT)
3418 		return false;
3419 	switch (rold->type) {
3420 	case SCALAR_VALUE:
3421 		if (rcur->type == SCALAR_VALUE) {
3422 			/* new val must satisfy old val knowledge */
3423 			return range_within(rold, rcur) &&
3424 			       tnum_in(rold->var_off, rcur->var_off);
3425 		} else {
3426 			/* if we knew anything about the old value, we're not
3427 			 * equal, because we can't know anything about the
3428 			 * scalar value of the pointer in the new value.
3429 			 */
3430 			return rold->umin_value == 0 &&
3431 			       rold->umax_value == U64_MAX &&
3432 			       rold->smin_value == S64_MIN &&
3433 			       rold->smax_value == S64_MAX &&
3434 			       tnum_is_unknown(rold->var_off);
3435 		}
3436 	case PTR_TO_MAP_VALUE:
3437 		/* If the new min/max/var_off satisfy the old ones and
3438 		 * everything else matches, we are OK.
3439 		 * We don't care about the 'id' value, because nothing
3440 		 * uses it for PTR_TO_MAP_VALUE (only for ..._OR_NULL)
3441 		 */
3442 		return memcmp(rold, rcur, offsetof(struct bpf_reg_state, id)) == 0 &&
3443 		       range_within(rold, rcur) &&
3444 		       tnum_in(rold->var_off, rcur->var_off);
3445 	case PTR_TO_MAP_VALUE_OR_NULL:
3446 		/* a PTR_TO_MAP_VALUE could be safe to use as a
3447 		 * PTR_TO_MAP_VALUE_OR_NULL into the same map.
3448 		 * However, if the old PTR_TO_MAP_VALUE_OR_NULL then got NULL-
3449 		 * checked, doing so could have affected others with the same
3450 		 * id, and we can't check for that because we lost the id when
3451 		 * we converted to a PTR_TO_MAP_VALUE.
3452 		 */
3453 		if (rcur->type != PTR_TO_MAP_VALUE_OR_NULL)
3454 			return false;
3455 		if (memcmp(rold, rcur, offsetof(struct bpf_reg_state, id)))
3456 			return false;
3457 		/* Check our ids match any regs they're supposed to */
3458 		return check_ids(rold->id, rcur->id, idmap);
3459 	case PTR_TO_PACKET_META:
3460 	case PTR_TO_PACKET:
3461 		if (rcur->type != rold->type)
3462 			return false;
3463 		/* We must have at least as much range as the old ptr
3464 		 * did, so that any accesses which were safe before are
3465 		 * still safe.  This is true even if old range < old off,
3466 		 * since someone could have accessed through (ptr - k), or
3467 		 * even done ptr -= k in a register, to get a safe access.
3468 		 */
3469 		if (rold->range > rcur->range)
3470 			return false;
3471 		/* If the offsets don't match, we can't trust our alignment;
3472 		 * nor can we be sure that we won't fall out of range.
3473 		 */
3474 		if (rold->off != rcur->off)
3475 			return false;
3476 		/* id relations must be preserved */
3477 		if (rold->id && !check_ids(rold->id, rcur->id, idmap))
3478 			return false;
3479 		/* new val must satisfy old val knowledge */
3480 		return range_within(rold, rcur) &&
3481 		       tnum_in(rold->var_off, rcur->var_off);
3482 	case PTR_TO_CTX:
3483 	case CONST_PTR_TO_MAP:
3484 	case PTR_TO_STACK:
3485 	case PTR_TO_PACKET_END:
3486 		/* Only valid matches are exact, which memcmp() above
3487 		 * would have accepted
3488 		 */
3489 	default:
3490 		/* Don't know what's going on, just say it's not safe */
3491 		return false;
3492 	}
3493 
3494 	/* Shouldn't get here; if we do, say it's not safe */
3495 	WARN_ON_ONCE(1);
3496 	return false;
3497 }
3498 
3499 static bool stacksafe(struct bpf_verifier_state *old,
3500 		      struct bpf_verifier_state *cur,
3501 		      struct idpair *idmap)
3502 {
3503 	int i, spi;
3504 
3505 	/* if explored stack has more populated slots than current stack
3506 	 * such stacks are not equivalent
3507 	 */
3508 	if (old->allocated_stack > cur->allocated_stack)
3509 		return false;
3510 
3511 	/* walk slots of the explored stack and ignore any additional
3512 	 * slots in the current stack, since explored(safe) state
3513 	 * didn't use them
3514 	 */
3515 	for (i = 0; i < old->allocated_stack; i++) {
3516 		spi = i / BPF_REG_SIZE;
3517 
3518 		if (old->stack[spi].slot_type[i % BPF_REG_SIZE] == STACK_INVALID)
3519 			continue;
3520 		if (old->stack[spi].slot_type[i % BPF_REG_SIZE] !=
3521 		    cur->stack[spi].slot_type[i % BPF_REG_SIZE])
3522 			/* Ex: old explored (safe) state has STACK_SPILL in
3523 			 * this stack slot, but current has has STACK_MISC ->
3524 			 * this verifier states are not equivalent,
3525 			 * return false to continue verification of this path
3526 			 */
3527 			return false;
3528 		if (i % BPF_REG_SIZE)
3529 			continue;
3530 		if (old->stack[spi].slot_type[0] != STACK_SPILL)
3531 			continue;
3532 		if (!regsafe(&old->stack[spi].spilled_ptr,
3533 			     &cur->stack[spi].spilled_ptr,
3534 			     idmap))
3535 			/* when explored and current stack slot are both storing
3536 			 * spilled registers, check that stored pointers types
3537 			 * are the same as well.
3538 			 * Ex: explored safe path could have stored
3539 			 * (bpf_reg_state) {.type = PTR_TO_STACK, .off = -8}
3540 			 * but current path has stored:
3541 			 * (bpf_reg_state) {.type = PTR_TO_STACK, .off = -16}
3542 			 * such verifier states are not equivalent.
3543 			 * return false to continue verification of this path
3544 			 */
3545 			return false;
3546 	}
3547 	return true;
3548 }
3549 
3550 /* compare two verifier states
3551  *
3552  * all states stored in state_list are known to be valid, since
3553  * verifier reached 'bpf_exit' instruction through them
3554  *
3555  * this function is called when verifier exploring different branches of
3556  * execution popped from the state stack. If it sees an old state that has
3557  * more strict register state and more strict stack state then this execution
3558  * branch doesn't need to be explored further, since verifier already
3559  * concluded that more strict state leads to valid finish.
3560  *
3561  * Therefore two states are equivalent if register state is more conservative
3562  * and explored stack state is more conservative than the current one.
3563  * Example:
3564  *       explored                   current
3565  * (slot1=INV slot2=MISC) == (slot1=MISC slot2=MISC)
3566  * (slot1=MISC slot2=MISC) != (slot1=INV slot2=MISC)
3567  *
3568  * In other words if current stack state (one being explored) has more
3569  * valid slots than old one that already passed validation, it means
3570  * the verifier can stop exploring and conclude that current state is valid too
3571  *
3572  * Similarly with registers. If explored state has register type as invalid
3573  * whereas register type in current state is meaningful, it means that
3574  * the current state will reach 'bpf_exit' instruction safely
3575  */
3576 static bool states_equal(struct bpf_verifier_env *env,
3577 			 struct bpf_verifier_state *old,
3578 			 struct bpf_verifier_state *cur)
3579 {
3580 	struct idpair *idmap;
3581 	bool ret = false;
3582 	int i;
3583 
3584 	idmap = kcalloc(ID_MAP_SIZE, sizeof(struct idpair), GFP_KERNEL);
3585 	/* If we failed to allocate the idmap, just say it's not safe */
3586 	if (!idmap)
3587 		return false;
3588 
3589 	for (i = 0; i < MAX_BPF_REG; i++) {
3590 		if (!regsafe(&old->regs[i], &cur->regs[i], idmap))
3591 			goto out_free;
3592 	}
3593 
3594 	if (!stacksafe(old, cur, idmap))
3595 		goto out_free;
3596 	ret = true;
3597 out_free:
3598 	kfree(idmap);
3599 	return ret;
3600 }
3601 
3602 /* A write screens off any subsequent reads; but write marks come from the
3603  * straight-line code between a state and its parent.  When we arrive at a
3604  * jump target (in the first iteration of the propagate_liveness() loop),
3605  * we didn't arrive by the straight-line code, so read marks in state must
3606  * propagate to parent regardless of state's write marks.
3607  */
3608 static bool do_propagate_liveness(const struct bpf_verifier_state *state,
3609 				  struct bpf_verifier_state *parent)
3610 {
3611 	bool writes = parent == state->parent; /* Observe write marks */
3612 	bool touched = false; /* any changes made? */
3613 	int i;
3614 
3615 	if (!parent)
3616 		return touched;
3617 	/* Propagate read liveness of registers... */
3618 	BUILD_BUG_ON(BPF_REG_FP + 1 != MAX_BPF_REG);
3619 	/* We don't need to worry about FP liveness because it's read-only */
3620 	for (i = 0; i < BPF_REG_FP; i++) {
3621 		if (parent->regs[i].live & REG_LIVE_READ)
3622 			continue;
3623 		if (writes && (state->regs[i].live & REG_LIVE_WRITTEN))
3624 			continue;
3625 		if (state->regs[i].live & REG_LIVE_READ) {
3626 			parent->regs[i].live |= REG_LIVE_READ;
3627 			touched = true;
3628 		}
3629 	}
3630 	/* ... and stack slots */
3631 	for (i = 0; i < state->allocated_stack / BPF_REG_SIZE &&
3632 		    i < parent->allocated_stack / BPF_REG_SIZE; i++) {
3633 		if (parent->stack[i].slot_type[0] != STACK_SPILL)
3634 			continue;
3635 		if (state->stack[i].slot_type[0] != STACK_SPILL)
3636 			continue;
3637 		if (parent->stack[i].spilled_ptr.live & REG_LIVE_READ)
3638 			continue;
3639 		if (writes &&
3640 		    (state->stack[i].spilled_ptr.live & REG_LIVE_WRITTEN))
3641 			continue;
3642 		if (state->stack[i].spilled_ptr.live & REG_LIVE_READ) {
3643 			parent->stack[i].spilled_ptr.live |= REG_LIVE_READ;
3644 			touched = true;
3645 		}
3646 	}
3647 	return touched;
3648 }
3649 
3650 /* "parent" is "a state from which we reach the current state", but initially
3651  * it is not the state->parent (i.e. "the state whose straight-line code leads
3652  * to the current state"), instead it is the state that happened to arrive at
3653  * a (prunable) equivalent of the current state.  See comment above
3654  * do_propagate_liveness() for consequences of this.
3655  * This function is just a more efficient way of calling mark_reg_read() or
3656  * mark_stack_slot_read() on each reg in "parent" that is read in "state",
3657  * though it requires that parent != state->parent in the call arguments.
3658  */
3659 static void propagate_liveness(const struct bpf_verifier_state *state,
3660 			       struct bpf_verifier_state *parent)
3661 {
3662 	while (do_propagate_liveness(state, parent)) {
3663 		/* Something changed, so we need to feed those changes onward */
3664 		state = parent;
3665 		parent = state->parent;
3666 	}
3667 }
3668 
3669 static int is_state_visited(struct bpf_verifier_env *env, int insn_idx)
3670 {
3671 	struct bpf_verifier_state_list *new_sl;
3672 	struct bpf_verifier_state_list *sl;
3673 	struct bpf_verifier_state *cur = env->cur_state;
3674 	int i, err;
3675 
3676 	sl = env->explored_states[insn_idx];
3677 	if (!sl)
3678 		/* this 'insn_idx' instruction wasn't marked, so we will not
3679 		 * be doing state search here
3680 		 */
3681 		return 0;
3682 
3683 	while (sl != STATE_LIST_MARK) {
3684 		if (states_equal(env, &sl->state, cur)) {
3685 			/* reached equivalent register/stack state,
3686 			 * prune the search.
3687 			 * Registers read by the continuation are read by us.
3688 			 * If we have any write marks in env->cur_state, they
3689 			 * will prevent corresponding reads in the continuation
3690 			 * from reaching our parent (an explored_state).  Our
3691 			 * own state will get the read marks recorded, but
3692 			 * they'll be immediately forgotten as we're pruning
3693 			 * this state and will pop a new one.
3694 			 */
3695 			propagate_liveness(&sl->state, cur);
3696 			return 1;
3697 		}
3698 		sl = sl->next;
3699 	}
3700 
3701 	/* there were no equivalent states, remember current one.
3702 	 * technically the current state is not proven to be safe yet,
3703 	 * but it will either reach bpf_exit (which means it's safe) or
3704 	 * it will be rejected. Since there are no loops, we won't be
3705 	 * seeing this 'insn_idx' instruction again on the way to bpf_exit
3706 	 */
3707 	new_sl = kzalloc(sizeof(struct bpf_verifier_state_list), GFP_KERNEL);
3708 	if (!new_sl)
3709 		return -ENOMEM;
3710 
3711 	/* add new state to the head of linked list */
3712 	err = copy_verifier_state(&new_sl->state, cur);
3713 	if (err) {
3714 		free_verifier_state(&new_sl->state, false);
3715 		kfree(new_sl);
3716 		return err;
3717 	}
3718 	new_sl->next = env->explored_states[insn_idx];
3719 	env->explored_states[insn_idx] = new_sl;
3720 	/* connect new state to parentage chain */
3721 	cur->parent = &new_sl->state;
3722 	/* clear write marks in current state: the writes we did are not writes
3723 	 * our child did, so they don't screen off its reads from us.
3724 	 * (There are no read marks in current state, because reads always mark
3725 	 * their parent and current state never has children yet.  Only
3726 	 * explored_states can get read marks.)
3727 	 */
3728 	for (i = 0; i < BPF_REG_FP; i++)
3729 		cur->regs[i].live = REG_LIVE_NONE;
3730 	for (i = 0; i < cur->allocated_stack / BPF_REG_SIZE; i++)
3731 		if (cur->stack[i].slot_type[0] == STACK_SPILL)
3732 			cur->stack[i].spilled_ptr.live = REG_LIVE_NONE;
3733 	return 0;
3734 }
3735 
3736 static int ext_analyzer_insn_hook(struct bpf_verifier_env *env,
3737 				  int insn_idx, int prev_insn_idx)
3738 {
3739 	if (!env->analyzer_ops || !env->analyzer_ops->insn_hook)
3740 		return 0;
3741 
3742 	return env->analyzer_ops->insn_hook(env, insn_idx, prev_insn_idx);
3743 }
3744 
3745 static int do_check(struct bpf_verifier_env *env)
3746 {
3747 	struct bpf_verifier_state *state;
3748 	struct bpf_insn *insns = env->prog->insnsi;
3749 	struct bpf_reg_state *regs;
3750 	int insn_cnt = env->prog->len;
3751 	int insn_idx, prev_insn_idx = 0;
3752 	int insn_processed = 0;
3753 	bool do_print_state = false;
3754 
3755 	state = kzalloc(sizeof(struct bpf_verifier_state), GFP_KERNEL);
3756 	if (!state)
3757 		return -ENOMEM;
3758 	env->cur_state = state;
3759 	init_reg_state(env, state->regs);
3760 	state->parent = NULL;
3761 	insn_idx = 0;
3762 	for (;;) {
3763 		struct bpf_insn *insn;
3764 		u8 class;
3765 		int err;
3766 
3767 		if (insn_idx >= insn_cnt) {
3768 			verbose(env, "invalid insn idx %d insn_cnt %d\n",
3769 				insn_idx, insn_cnt);
3770 			return -EFAULT;
3771 		}
3772 
3773 		insn = &insns[insn_idx];
3774 		class = BPF_CLASS(insn->code);
3775 
3776 		if (++insn_processed > BPF_COMPLEXITY_LIMIT_INSNS) {
3777 			verbose(env,
3778 				"BPF program is too large. Processed %d insn\n",
3779 				insn_processed);
3780 			return -E2BIG;
3781 		}
3782 
3783 		err = is_state_visited(env, insn_idx);
3784 		if (err < 0)
3785 			return err;
3786 		if (err == 1) {
3787 			/* found equivalent state, can prune the search */
3788 			if (env->log.level) {
3789 				if (do_print_state)
3790 					verbose(env, "\nfrom %d to %d: safe\n",
3791 						prev_insn_idx, insn_idx);
3792 				else
3793 					verbose(env, "%d: safe\n", insn_idx);
3794 			}
3795 			goto process_bpf_exit;
3796 		}
3797 
3798 		if (need_resched())
3799 			cond_resched();
3800 
3801 		if (env->log.level > 1 || (env->log.level && do_print_state)) {
3802 			if (env->log.level > 1)
3803 				verbose(env, "%d:", insn_idx);
3804 			else
3805 				verbose(env, "\nfrom %d to %d:",
3806 					prev_insn_idx, insn_idx);
3807 			print_verifier_state(env, state);
3808 			do_print_state = false;
3809 		}
3810 
3811 		if (env->log.level) {
3812 			verbose(env, "%d: ", insn_idx);
3813 			print_bpf_insn(verbose, env, insn,
3814 				       env->allow_ptr_leaks);
3815 		}
3816 
3817 		err = ext_analyzer_insn_hook(env, insn_idx, prev_insn_idx);
3818 		if (err)
3819 			return err;
3820 
3821 		regs = cur_regs(env);
3822 		if (class == BPF_ALU || class == BPF_ALU64) {
3823 			err = check_alu_op(env, insn);
3824 			if (err)
3825 				return err;
3826 
3827 		} else if (class == BPF_LDX) {
3828 			enum bpf_reg_type *prev_src_type, src_reg_type;
3829 
3830 			/* check for reserved fields is already done */
3831 
3832 			/* check src operand */
3833 			err = check_reg_arg(env, insn->src_reg, SRC_OP);
3834 			if (err)
3835 				return err;
3836 
3837 			err = check_reg_arg(env, insn->dst_reg, DST_OP_NO_MARK);
3838 			if (err)
3839 				return err;
3840 
3841 			src_reg_type = regs[insn->src_reg].type;
3842 
3843 			/* check that memory (src_reg + off) is readable,
3844 			 * the state of dst_reg will be updated by this func
3845 			 */
3846 			err = check_mem_access(env, insn_idx, insn->src_reg, insn->off,
3847 					       BPF_SIZE(insn->code), BPF_READ,
3848 					       insn->dst_reg);
3849 			if (err)
3850 				return err;
3851 
3852 			prev_src_type = &env->insn_aux_data[insn_idx].ptr_type;
3853 
3854 			if (*prev_src_type == NOT_INIT) {
3855 				/* saw a valid insn
3856 				 * dst_reg = *(u32 *)(src_reg + off)
3857 				 * save type to validate intersecting paths
3858 				 */
3859 				*prev_src_type = src_reg_type;
3860 
3861 			} else if (src_reg_type != *prev_src_type &&
3862 				   (src_reg_type == PTR_TO_CTX ||
3863 				    *prev_src_type == PTR_TO_CTX)) {
3864 				/* ABuser program is trying to use the same insn
3865 				 * dst_reg = *(u32*) (src_reg + off)
3866 				 * with different pointer types:
3867 				 * src_reg == ctx in one branch and
3868 				 * src_reg == stack|map in some other branch.
3869 				 * Reject it.
3870 				 */
3871 				verbose(env, "same insn cannot be used with different pointers\n");
3872 				return -EINVAL;
3873 			}
3874 
3875 		} else if (class == BPF_STX) {
3876 			enum bpf_reg_type *prev_dst_type, dst_reg_type;
3877 
3878 			if (BPF_MODE(insn->code) == BPF_XADD) {
3879 				err = check_xadd(env, insn_idx, insn);
3880 				if (err)
3881 					return err;
3882 				insn_idx++;
3883 				continue;
3884 			}
3885 
3886 			/* check src1 operand */
3887 			err = check_reg_arg(env, insn->src_reg, SRC_OP);
3888 			if (err)
3889 				return err;
3890 			/* check src2 operand */
3891 			err = check_reg_arg(env, insn->dst_reg, SRC_OP);
3892 			if (err)
3893 				return err;
3894 
3895 			dst_reg_type = regs[insn->dst_reg].type;
3896 
3897 			/* check that memory (dst_reg + off) is writeable */
3898 			err = check_mem_access(env, insn_idx, insn->dst_reg, insn->off,
3899 					       BPF_SIZE(insn->code), BPF_WRITE,
3900 					       insn->src_reg);
3901 			if (err)
3902 				return err;
3903 
3904 			prev_dst_type = &env->insn_aux_data[insn_idx].ptr_type;
3905 
3906 			if (*prev_dst_type == NOT_INIT) {
3907 				*prev_dst_type = dst_reg_type;
3908 			} else if (dst_reg_type != *prev_dst_type &&
3909 				   (dst_reg_type == PTR_TO_CTX ||
3910 				    *prev_dst_type == PTR_TO_CTX)) {
3911 				verbose(env, "same insn cannot be used with different pointers\n");
3912 				return -EINVAL;
3913 			}
3914 
3915 		} else if (class == BPF_ST) {
3916 			if (BPF_MODE(insn->code) != BPF_MEM ||
3917 			    insn->src_reg != BPF_REG_0) {
3918 				verbose(env, "BPF_ST uses reserved fields\n");
3919 				return -EINVAL;
3920 			}
3921 			/* check src operand */
3922 			err = check_reg_arg(env, insn->dst_reg, SRC_OP);
3923 			if (err)
3924 				return err;
3925 
3926 			/* check that memory (dst_reg + off) is writeable */
3927 			err = check_mem_access(env, insn_idx, insn->dst_reg, insn->off,
3928 					       BPF_SIZE(insn->code), BPF_WRITE,
3929 					       -1);
3930 			if (err)
3931 				return err;
3932 
3933 		} else if (class == BPF_JMP) {
3934 			u8 opcode = BPF_OP(insn->code);
3935 
3936 			if (opcode == BPF_CALL) {
3937 				if (BPF_SRC(insn->code) != BPF_K ||
3938 				    insn->off != 0 ||
3939 				    insn->src_reg != BPF_REG_0 ||
3940 				    insn->dst_reg != BPF_REG_0) {
3941 					verbose(env, "BPF_CALL uses reserved fields\n");
3942 					return -EINVAL;
3943 				}
3944 
3945 				err = check_call(env, insn->imm, insn_idx);
3946 				if (err)
3947 					return err;
3948 
3949 			} else if (opcode == BPF_JA) {
3950 				if (BPF_SRC(insn->code) != BPF_K ||
3951 				    insn->imm != 0 ||
3952 				    insn->src_reg != BPF_REG_0 ||
3953 				    insn->dst_reg != BPF_REG_0) {
3954 					verbose(env, "BPF_JA uses reserved fields\n");
3955 					return -EINVAL;
3956 				}
3957 
3958 				insn_idx += insn->off + 1;
3959 				continue;
3960 
3961 			} else if (opcode == BPF_EXIT) {
3962 				if (BPF_SRC(insn->code) != BPF_K ||
3963 				    insn->imm != 0 ||
3964 				    insn->src_reg != BPF_REG_0 ||
3965 				    insn->dst_reg != BPF_REG_0) {
3966 					verbose(env, "BPF_EXIT uses reserved fields\n");
3967 					return -EINVAL;
3968 				}
3969 
3970 				/* eBPF calling convetion is such that R0 is used
3971 				 * to return the value from eBPF program.
3972 				 * Make sure that it's readable at this time
3973 				 * of bpf_exit, which means that program wrote
3974 				 * something into it earlier
3975 				 */
3976 				err = check_reg_arg(env, BPF_REG_0, SRC_OP);
3977 				if (err)
3978 					return err;
3979 
3980 				if (is_pointer_value(env, BPF_REG_0)) {
3981 					verbose(env, "R0 leaks addr as return value\n");
3982 					return -EACCES;
3983 				}
3984 
3985 				err = check_return_code(env);
3986 				if (err)
3987 					return err;
3988 process_bpf_exit:
3989 				err = pop_stack(env, &prev_insn_idx, &insn_idx);
3990 				if (err < 0) {
3991 					if (err != -ENOENT)
3992 						return err;
3993 					break;
3994 				} else {
3995 					do_print_state = true;
3996 					continue;
3997 				}
3998 			} else {
3999 				err = check_cond_jmp_op(env, insn, &insn_idx);
4000 				if (err)
4001 					return err;
4002 			}
4003 		} else if (class == BPF_LD) {
4004 			u8 mode = BPF_MODE(insn->code);
4005 
4006 			if (mode == BPF_ABS || mode == BPF_IND) {
4007 				err = check_ld_abs(env, insn);
4008 				if (err)
4009 					return err;
4010 
4011 			} else if (mode == BPF_IMM) {
4012 				err = check_ld_imm(env, insn);
4013 				if (err)
4014 					return err;
4015 
4016 				insn_idx++;
4017 			} else {
4018 				verbose(env, "invalid BPF_LD mode\n");
4019 				return -EINVAL;
4020 			}
4021 		} else {
4022 			verbose(env, "unknown insn class %d\n", class);
4023 			return -EINVAL;
4024 		}
4025 
4026 		insn_idx++;
4027 	}
4028 
4029 	verbose(env, "processed %d insns, stack depth %d\n", insn_processed,
4030 		env->prog->aux->stack_depth);
4031 	return 0;
4032 }
4033 
4034 static int check_map_prealloc(struct bpf_map *map)
4035 {
4036 	return (map->map_type != BPF_MAP_TYPE_HASH &&
4037 		map->map_type != BPF_MAP_TYPE_PERCPU_HASH &&
4038 		map->map_type != BPF_MAP_TYPE_HASH_OF_MAPS) ||
4039 		!(map->map_flags & BPF_F_NO_PREALLOC);
4040 }
4041 
4042 static int check_map_prog_compatibility(struct bpf_verifier_env *env,
4043 					struct bpf_map *map,
4044 					struct bpf_prog *prog)
4045 
4046 {
4047 	/* Make sure that BPF_PROG_TYPE_PERF_EVENT programs only use
4048 	 * preallocated hash maps, since doing memory allocation
4049 	 * in overflow_handler can crash depending on where nmi got
4050 	 * triggered.
4051 	 */
4052 	if (prog->type == BPF_PROG_TYPE_PERF_EVENT) {
4053 		if (!check_map_prealloc(map)) {
4054 			verbose(env, "perf_event programs can only use preallocated hash map\n");
4055 			return -EINVAL;
4056 		}
4057 		if (map->inner_map_meta &&
4058 		    !check_map_prealloc(map->inner_map_meta)) {
4059 			verbose(env, "perf_event programs can only use preallocated inner hash map\n");
4060 			return -EINVAL;
4061 		}
4062 	}
4063 	return 0;
4064 }
4065 
4066 /* look for pseudo eBPF instructions that access map FDs and
4067  * replace them with actual map pointers
4068  */
4069 static int replace_map_fd_with_map_ptr(struct bpf_verifier_env *env)
4070 {
4071 	struct bpf_insn *insn = env->prog->insnsi;
4072 	int insn_cnt = env->prog->len;
4073 	int i, j, err;
4074 
4075 	err = bpf_prog_calc_tag(env->prog);
4076 	if (err)
4077 		return err;
4078 
4079 	for (i = 0; i < insn_cnt; i++, insn++) {
4080 		if (BPF_CLASS(insn->code) == BPF_LDX &&
4081 		    (BPF_MODE(insn->code) != BPF_MEM || insn->imm != 0)) {
4082 			verbose(env, "BPF_LDX uses reserved fields\n");
4083 			return -EINVAL;
4084 		}
4085 
4086 		if (BPF_CLASS(insn->code) == BPF_STX &&
4087 		    ((BPF_MODE(insn->code) != BPF_MEM &&
4088 		      BPF_MODE(insn->code) != BPF_XADD) || insn->imm != 0)) {
4089 			verbose(env, "BPF_STX uses reserved fields\n");
4090 			return -EINVAL;
4091 		}
4092 
4093 		if (insn[0].code == (BPF_LD | BPF_IMM | BPF_DW)) {
4094 			struct bpf_map *map;
4095 			struct fd f;
4096 
4097 			if (i == insn_cnt - 1 || insn[1].code != 0 ||
4098 			    insn[1].dst_reg != 0 || insn[1].src_reg != 0 ||
4099 			    insn[1].off != 0) {
4100 				verbose(env, "invalid bpf_ld_imm64 insn\n");
4101 				return -EINVAL;
4102 			}
4103 
4104 			if (insn->src_reg == 0)
4105 				/* valid generic load 64-bit imm */
4106 				goto next_insn;
4107 
4108 			if (insn->src_reg != BPF_PSEUDO_MAP_FD) {
4109 				verbose(env,
4110 					"unrecognized bpf_ld_imm64 insn\n");
4111 				return -EINVAL;
4112 			}
4113 
4114 			f = fdget(insn->imm);
4115 			map = __bpf_map_get(f);
4116 			if (IS_ERR(map)) {
4117 				verbose(env, "fd %d is not pointing to valid bpf_map\n",
4118 					insn->imm);
4119 				return PTR_ERR(map);
4120 			}
4121 
4122 			err = check_map_prog_compatibility(env, map, env->prog);
4123 			if (err) {
4124 				fdput(f);
4125 				return err;
4126 			}
4127 
4128 			/* store map pointer inside BPF_LD_IMM64 instruction */
4129 			insn[0].imm = (u32) (unsigned long) map;
4130 			insn[1].imm = ((u64) (unsigned long) map) >> 32;
4131 
4132 			/* check whether we recorded this map already */
4133 			for (j = 0; j < env->used_map_cnt; j++)
4134 				if (env->used_maps[j] == map) {
4135 					fdput(f);
4136 					goto next_insn;
4137 				}
4138 
4139 			if (env->used_map_cnt >= MAX_USED_MAPS) {
4140 				fdput(f);
4141 				return -E2BIG;
4142 			}
4143 
4144 			/* hold the map. If the program is rejected by verifier,
4145 			 * the map will be released by release_maps() or it
4146 			 * will be used by the valid program until it's unloaded
4147 			 * and all maps are released in free_bpf_prog_info()
4148 			 */
4149 			map = bpf_map_inc(map, false);
4150 			if (IS_ERR(map)) {
4151 				fdput(f);
4152 				return PTR_ERR(map);
4153 			}
4154 			env->used_maps[env->used_map_cnt++] = map;
4155 
4156 			fdput(f);
4157 next_insn:
4158 			insn++;
4159 			i++;
4160 		}
4161 	}
4162 
4163 	/* now all pseudo BPF_LD_IMM64 instructions load valid
4164 	 * 'struct bpf_map *' into a register instead of user map_fd.
4165 	 * These pointers will be used later by verifier to validate map access.
4166 	 */
4167 	return 0;
4168 }
4169 
4170 /* drop refcnt of maps used by the rejected program */
4171 static void release_maps(struct bpf_verifier_env *env)
4172 {
4173 	int i;
4174 
4175 	for (i = 0; i < env->used_map_cnt; i++)
4176 		bpf_map_put(env->used_maps[i]);
4177 }
4178 
4179 /* convert pseudo BPF_LD_IMM64 into generic BPF_LD_IMM64 */
4180 static void convert_pseudo_ld_imm64(struct bpf_verifier_env *env)
4181 {
4182 	struct bpf_insn *insn = env->prog->insnsi;
4183 	int insn_cnt = env->prog->len;
4184 	int i;
4185 
4186 	for (i = 0; i < insn_cnt; i++, insn++)
4187 		if (insn->code == (BPF_LD | BPF_IMM | BPF_DW))
4188 			insn->src_reg = 0;
4189 }
4190 
4191 /* single env->prog->insni[off] instruction was replaced with the range
4192  * insni[off, off + cnt).  Adjust corresponding insn_aux_data by copying
4193  * [0, off) and [off, end) to new locations, so the patched range stays zero
4194  */
4195 static int adjust_insn_aux_data(struct bpf_verifier_env *env, u32 prog_len,
4196 				u32 off, u32 cnt)
4197 {
4198 	struct bpf_insn_aux_data *new_data, *old_data = env->insn_aux_data;
4199 
4200 	if (cnt == 1)
4201 		return 0;
4202 	new_data = vzalloc(sizeof(struct bpf_insn_aux_data) * prog_len);
4203 	if (!new_data)
4204 		return -ENOMEM;
4205 	memcpy(new_data, old_data, sizeof(struct bpf_insn_aux_data) * off);
4206 	memcpy(new_data + off + cnt - 1, old_data + off,
4207 	       sizeof(struct bpf_insn_aux_data) * (prog_len - off - cnt + 1));
4208 	env->insn_aux_data = new_data;
4209 	vfree(old_data);
4210 	return 0;
4211 }
4212 
4213 static struct bpf_prog *bpf_patch_insn_data(struct bpf_verifier_env *env, u32 off,
4214 					    const struct bpf_insn *patch, u32 len)
4215 {
4216 	struct bpf_prog *new_prog;
4217 
4218 	new_prog = bpf_patch_insn_single(env->prog, off, patch, len);
4219 	if (!new_prog)
4220 		return NULL;
4221 	if (adjust_insn_aux_data(env, new_prog->len, off, len))
4222 		return NULL;
4223 	return new_prog;
4224 }
4225 
4226 /* convert load instructions that access fields of 'struct __sk_buff'
4227  * into sequence of instructions that access fields of 'struct sk_buff'
4228  */
4229 static int convert_ctx_accesses(struct bpf_verifier_env *env)
4230 {
4231 	const struct bpf_verifier_ops *ops = env->ops;
4232 	int i, cnt, size, ctx_field_size, delta = 0;
4233 	const int insn_cnt = env->prog->len;
4234 	struct bpf_insn insn_buf[16], *insn;
4235 	struct bpf_prog *new_prog;
4236 	enum bpf_access_type type;
4237 	bool is_narrower_load;
4238 	u32 target_size;
4239 
4240 	if (ops->gen_prologue) {
4241 		cnt = ops->gen_prologue(insn_buf, env->seen_direct_write,
4242 					env->prog);
4243 		if (cnt >= ARRAY_SIZE(insn_buf)) {
4244 			verbose(env, "bpf verifier is misconfigured\n");
4245 			return -EINVAL;
4246 		} else if (cnt) {
4247 			new_prog = bpf_patch_insn_data(env, 0, insn_buf, cnt);
4248 			if (!new_prog)
4249 				return -ENOMEM;
4250 
4251 			env->prog = new_prog;
4252 			delta += cnt - 1;
4253 		}
4254 	}
4255 
4256 	if (!ops->convert_ctx_access)
4257 		return 0;
4258 
4259 	insn = env->prog->insnsi + delta;
4260 
4261 	for (i = 0; i < insn_cnt; i++, insn++) {
4262 		if (insn->code == (BPF_LDX | BPF_MEM | BPF_B) ||
4263 		    insn->code == (BPF_LDX | BPF_MEM | BPF_H) ||
4264 		    insn->code == (BPF_LDX | BPF_MEM | BPF_W) ||
4265 		    insn->code == (BPF_LDX | BPF_MEM | BPF_DW))
4266 			type = BPF_READ;
4267 		else if (insn->code == (BPF_STX | BPF_MEM | BPF_B) ||
4268 			 insn->code == (BPF_STX | BPF_MEM | BPF_H) ||
4269 			 insn->code == (BPF_STX | BPF_MEM | BPF_W) ||
4270 			 insn->code == (BPF_STX | BPF_MEM | BPF_DW))
4271 			type = BPF_WRITE;
4272 		else
4273 			continue;
4274 
4275 		if (env->insn_aux_data[i + delta].ptr_type != PTR_TO_CTX)
4276 			continue;
4277 
4278 		ctx_field_size = env->insn_aux_data[i + delta].ctx_field_size;
4279 		size = BPF_LDST_BYTES(insn);
4280 
4281 		/* If the read access is a narrower load of the field,
4282 		 * convert to a 4/8-byte load, to minimum program type specific
4283 		 * convert_ctx_access changes. If conversion is successful,
4284 		 * we will apply proper mask to the result.
4285 		 */
4286 		is_narrower_load = size < ctx_field_size;
4287 		if (is_narrower_load) {
4288 			u32 off = insn->off;
4289 			u8 size_code;
4290 
4291 			if (type == BPF_WRITE) {
4292 				verbose(env, "bpf verifier narrow ctx access misconfigured\n");
4293 				return -EINVAL;
4294 			}
4295 
4296 			size_code = BPF_H;
4297 			if (ctx_field_size == 4)
4298 				size_code = BPF_W;
4299 			else if (ctx_field_size == 8)
4300 				size_code = BPF_DW;
4301 
4302 			insn->off = off & ~(ctx_field_size - 1);
4303 			insn->code = BPF_LDX | BPF_MEM | size_code;
4304 		}
4305 
4306 		target_size = 0;
4307 		cnt = ops->convert_ctx_access(type, insn, insn_buf, env->prog,
4308 					      &target_size);
4309 		if (cnt == 0 || cnt >= ARRAY_SIZE(insn_buf) ||
4310 		    (ctx_field_size && !target_size)) {
4311 			verbose(env, "bpf verifier is misconfigured\n");
4312 			return -EINVAL;
4313 		}
4314 
4315 		if (is_narrower_load && size < target_size) {
4316 			if (ctx_field_size <= 4)
4317 				insn_buf[cnt++] = BPF_ALU32_IMM(BPF_AND, insn->dst_reg,
4318 								(1 << size * 8) - 1);
4319 			else
4320 				insn_buf[cnt++] = BPF_ALU64_IMM(BPF_AND, insn->dst_reg,
4321 								(1 << size * 8) - 1);
4322 		}
4323 
4324 		new_prog = bpf_patch_insn_data(env, i + delta, insn_buf, cnt);
4325 		if (!new_prog)
4326 			return -ENOMEM;
4327 
4328 		delta += cnt - 1;
4329 
4330 		/* keep walking new program and skip insns we just inserted */
4331 		env->prog = new_prog;
4332 		insn      = new_prog->insnsi + i + delta;
4333 	}
4334 
4335 	return 0;
4336 }
4337 
4338 /* fixup insn->imm field of bpf_call instructions
4339  * and inline eligible helpers as explicit sequence of BPF instructions
4340  *
4341  * this function is called after eBPF program passed verification
4342  */
4343 static int fixup_bpf_calls(struct bpf_verifier_env *env)
4344 {
4345 	struct bpf_prog *prog = env->prog;
4346 	struct bpf_insn *insn = prog->insnsi;
4347 	const struct bpf_func_proto *fn;
4348 	const int insn_cnt = prog->len;
4349 	struct bpf_insn insn_buf[16];
4350 	struct bpf_prog *new_prog;
4351 	struct bpf_map *map_ptr;
4352 	int i, cnt, delta = 0;
4353 
4354 	for (i = 0; i < insn_cnt; i++, insn++) {
4355 		if (insn->code != (BPF_JMP | BPF_CALL))
4356 			continue;
4357 
4358 		if (insn->imm == BPF_FUNC_get_route_realm)
4359 			prog->dst_needed = 1;
4360 		if (insn->imm == BPF_FUNC_get_prandom_u32)
4361 			bpf_user_rnd_init_once();
4362 		if (insn->imm == BPF_FUNC_tail_call) {
4363 			/* If we tail call into other programs, we
4364 			 * cannot make any assumptions since they can
4365 			 * be replaced dynamically during runtime in
4366 			 * the program array.
4367 			 */
4368 			prog->cb_access = 1;
4369 			env->prog->aux->stack_depth = MAX_BPF_STACK;
4370 
4371 			/* mark bpf_tail_call as different opcode to avoid
4372 			 * conditional branch in the interpeter for every normal
4373 			 * call and to prevent accidental JITing by JIT compiler
4374 			 * that doesn't support bpf_tail_call yet
4375 			 */
4376 			insn->imm = 0;
4377 			insn->code = BPF_JMP | BPF_TAIL_CALL;
4378 			continue;
4379 		}
4380 
4381 		/* BPF_EMIT_CALL() assumptions in some of the map_gen_lookup
4382 		 * handlers are currently limited to 64 bit only.
4383 		 */
4384 		if (ebpf_jit_enabled() && BITS_PER_LONG == 64 &&
4385 		    insn->imm == BPF_FUNC_map_lookup_elem) {
4386 			map_ptr = env->insn_aux_data[i + delta].map_ptr;
4387 			if (map_ptr == BPF_MAP_PTR_POISON ||
4388 			    !map_ptr->ops->map_gen_lookup)
4389 				goto patch_call_imm;
4390 
4391 			cnt = map_ptr->ops->map_gen_lookup(map_ptr, insn_buf);
4392 			if (cnt == 0 || cnt >= ARRAY_SIZE(insn_buf)) {
4393 				verbose(env, "bpf verifier is misconfigured\n");
4394 				return -EINVAL;
4395 			}
4396 
4397 			new_prog = bpf_patch_insn_data(env, i + delta, insn_buf,
4398 						       cnt);
4399 			if (!new_prog)
4400 				return -ENOMEM;
4401 
4402 			delta += cnt - 1;
4403 
4404 			/* keep walking new program and skip insns we just inserted */
4405 			env->prog = prog = new_prog;
4406 			insn      = new_prog->insnsi + i + delta;
4407 			continue;
4408 		}
4409 
4410 		if (insn->imm == BPF_FUNC_redirect_map) {
4411 			/* Note, we cannot use prog directly as imm as subsequent
4412 			 * rewrites would still change the prog pointer. The only
4413 			 * stable address we can use is aux, which also works with
4414 			 * prog clones during blinding.
4415 			 */
4416 			u64 addr = (unsigned long)prog->aux;
4417 			struct bpf_insn r4_ld[] = {
4418 				BPF_LD_IMM64(BPF_REG_4, addr),
4419 				*insn,
4420 			};
4421 			cnt = ARRAY_SIZE(r4_ld);
4422 
4423 			new_prog = bpf_patch_insn_data(env, i + delta, r4_ld, cnt);
4424 			if (!new_prog)
4425 				return -ENOMEM;
4426 
4427 			delta    += cnt - 1;
4428 			env->prog = prog = new_prog;
4429 			insn      = new_prog->insnsi + i + delta;
4430 		}
4431 patch_call_imm:
4432 		fn = env->ops->get_func_proto(insn->imm);
4433 		/* all functions that have prototype and verifier allowed
4434 		 * programs to call them, must be real in-kernel functions
4435 		 */
4436 		if (!fn->func) {
4437 			verbose(env,
4438 				"kernel subsystem misconfigured func %s#%d\n",
4439 				func_id_name(insn->imm), insn->imm);
4440 			return -EFAULT;
4441 		}
4442 		insn->imm = fn->func - __bpf_call_base;
4443 	}
4444 
4445 	return 0;
4446 }
4447 
4448 static void free_states(struct bpf_verifier_env *env)
4449 {
4450 	struct bpf_verifier_state_list *sl, *sln;
4451 	int i;
4452 
4453 	if (!env->explored_states)
4454 		return;
4455 
4456 	for (i = 0; i < env->prog->len; i++) {
4457 		sl = env->explored_states[i];
4458 
4459 		if (sl)
4460 			while (sl != STATE_LIST_MARK) {
4461 				sln = sl->next;
4462 				free_verifier_state(&sl->state, false);
4463 				kfree(sl);
4464 				sl = sln;
4465 			}
4466 	}
4467 
4468 	kfree(env->explored_states);
4469 }
4470 
4471 int bpf_check(struct bpf_prog **prog, union bpf_attr *attr)
4472 {
4473 	struct bpf_verifier_env *env;
4474 	struct bpf_verifer_log *log;
4475 	int ret = -EINVAL;
4476 
4477 	/* no program is valid */
4478 	if (ARRAY_SIZE(bpf_verifier_ops) == 0)
4479 		return -EINVAL;
4480 
4481 	/* 'struct bpf_verifier_env' can be global, but since it's not small,
4482 	 * allocate/free it every time bpf_check() is called
4483 	 */
4484 	env = kzalloc(sizeof(struct bpf_verifier_env), GFP_KERNEL);
4485 	if (!env)
4486 		return -ENOMEM;
4487 	log = &env->log;
4488 
4489 	env->insn_aux_data = vzalloc(sizeof(struct bpf_insn_aux_data) *
4490 				     (*prog)->len);
4491 	ret = -ENOMEM;
4492 	if (!env->insn_aux_data)
4493 		goto err_free_env;
4494 	env->prog = *prog;
4495 	env->ops = bpf_verifier_ops[env->prog->type];
4496 
4497 	/* grab the mutex to protect few globals used by verifier */
4498 	mutex_lock(&bpf_verifier_lock);
4499 
4500 	if (attr->log_level || attr->log_buf || attr->log_size) {
4501 		/* user requested verbose verifier output
4502 		 * and supplied buffer to store the verification trace
4503 		 */
4504 		log->level = attr->log_level;
4505 		log->ubuf = (char __user *) (unsigned long) attr->log_buf;
4506 		log->len_total = attr->log_size;
4507 
4508 		ret = -EINVAL;
4509 		/* log attributes have to be sane */
4510 		if (log->len_total < 128 || log->len_total > UINT_MAX >> 8 ||
4511 		    !log->level || !log->ubuf)
4512 			goto err_unlock;
4513 	}
4514 
4515 	env->strict_alignment = !!(attr->prog_flags & BPF_F_STRICT_ALIGNMENT);
4516 	if (!IS_ENABLED(CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS))
4517 		env->strict_alignment = true;
4518 
4519 	ret = replace_map_fd_with_map_ptr(env);
4520 	if (ret < 0)
4521 		goto skip_full_check;
4522 
4523 	env->explored_states = kcalloc(env->prog->len,
4524 				       sizeof(struct bpf_verifier_state_list *),
4525 				       GFP_USER);
4526 	ret = -ENOMEM;
4527 	if (!env->explored_states)
4528 		goto skip_full_check;
4529 
4530 	ret = check_cfg(env);
4531 	if (ret < 0)
4532 		goto skip_full_check;
4533 
4534 	env->allow_ptr_leaks = capable(CAP_SYS_ADMIN);
4535 
4536 	ret = do_check(env);
4537 	if (env->cur_state) {
4538 		free_verifier_state(env->cur_state, true);
4539 		env->cur_state = NULL;
4540 	}
4541 
4542 skip_full_check:
4543 	while (!pop_stack(env, NULL, NULL));
4544 	free_states(env);
4545 
4546 	if (ret == 0)
4547 		/* program is valid, convert *(u32*)(ctx + off) accesses */
4548 		ret = convert_ctx_accesses(env);
4549 
4550 	if (ret == 0)
4551 		ret = fixup_bpf_calls(env);
4552 
4553 	if (log->level && bpf_verifier_log_full(log))
4554 		ret = -ENOSPC;
4555 	if (log->level && !log->ubuf) {
4556 		ret = -EFAULT;
4557 		goto err_release_maps;
4558 	}
4559 
4560 	if (ret == 0 && env->used_map_cnt) {
4561 		/* if program passed verifier, update used_maps in bpf_prog_info */
4562 		env->prog->aux->used_maps = kmalloc_array(env->used_map_cnt,
4563 							  sizeof(env->used_maps[0]),
4564 							  GFP_KERNEL);
4565 
4566 		if (!env->prog->aux->used_maps) {
4567 			ret = -ENOMEM;
4568 			goto err_release_maps;
4569 		}
4570 
4571 		memcpy(env->prog->aux->used_maps, env->used_maps,
4572 		       sizeof(env->used_maps[0]) * env->used_map_cnt);
4573 		env->prog->aux->used_map_cnt = env->used_map_cnt;
4574 
4575 		/* program is valid. Convert pseudo bpf_ld_imm64 into generic
4576 		 * bpf_ld_imm64 instructions
4577 		 */
4578 		convert_pseudo_ld_imm64(env);
4579 	}
4580 
4581 err_release_maps:
4582 	if (!env->prog->aux->used_maps)
4583 		/* if we didn't copy map pointers into bpf_prog_info, release
4584 		 * them now. Otherwise free_bpf_prog_info() will release them.
4585 		 */
4586 		release_maps(env);
4587 	*prog = env->prog;
4588 err_unlock:
4589 	mutex_unlock(&bpf_verifier_lock);
4590 	vfree(env->insn_aux_data);
4591 err_free_env:
4592 	kfree(env);
4593 	return ret;
4594 }
4595 
4596 static const struct bpf_verifier_ops * const bpf_analyzer_ops[] = {
4597 #ifdef CONFIG_NET
4598 	[BPF_PROG_TYPE_XDP]		= &xdp_analyzer_ops,
4599 	[BPF_PROG_TYPE_SCHED_CLS]	= &tc_cls_act_analyzer_ops,
4600 #endif
4601 };
4602 
4603 int bpf_analyzer(struct bpf_prog *prog, const struct bpf_ext_analyzer_ops *ops,
4604 		 void *priv)
4605 {
4606 	struct bpf_verifier_env *env;
4607 	int ret;
4608 
4609 	if (prog->type >= ARRAY_SIZE(bpf_analyzer_ops) ||
4610 	    !bpf_analyzer_ops[prog->type])
4611 		return -EOPNOTSUPP;
4612 
4613 	env = kzalloc(sizeof(struct bpf_verifier_env), GFP_KERNEL);
4614 	if (!env)
4615 		return -ENOMEM;
4616 
4617 	env->insn_aux_data = vzalloc(sizeof(struct bpf_insn_aux_data) *
4618 				     prog->len);
4619 	ret = -ENOMEM;
4620 	if (!env->insn_aux_data)
4621 		goto err_free_env;
4622 	env->prog = prog;
4623 	env->ops = bpf_analyzer_ops[env->prog->type];
4624 	env->analyzer_ops = ops;
4625 	env->analyzer_priv = priv;
4626 
4627 	/* grab the mutex to protect few globals used by verifier */
4628 	mutex_lock(&bpf_verifier_lock);
4629 
4630 	env->strict_alignment = false;
4631 	if (!IS_ENABLED(CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS))
4632 		env->strict_alignment = true;
4633 
4634 	env->explored_states = kcalloc(env->prog->len,
4635 				       sizeof(struct bpf_verifier_state_list *),
4636 				       GFP_KERNEL);
4637 	ret = -ENOMEM;
4638 	if (!env->explored_states)
4639 		goto skip_full_check;
4640 
4641 	ret = check_cfg(env);
4642 	if (ret < 0)
4643 		goto skip_full_check;
4644 
4645 	env->allow_ptr_leaks = capable(CAP_SYS_ADMIN);
4646 
4647 	ret = do_check(env);
4648 	if (env->cur_state) {
4649 		free_verifier_state(env->cur_state, true);
4650 		env->cur_state = NULL;
4651 	}
4652 
4653 skip_full_check:
4654 	while (!pop_stack(env, NULL, NULL));
4655 	free_states(env);
4656 
4657 	mutex_unlock(&bpf_verifier_lock);
4658 	vfree(env->insn_aux_data);
4659 err_free_env:
4660 	kfree(env);
4661 	return ret;
4662 }
4663 EXPORT_SYMBOL_GPL(bpf_analyzer);
4664