xref: /linux/kernel/bpf/core.c (revision b2d0f5d5dc53532e6f07bc546a476a55ebdfe0f3)
1 /*
2  * Linux Socket Filter - Kernel level socket filtering
3  *
4  * Based on the design of the Berkeley Packet Filter. The new
5  * internal format has been designed by PLUMgrid:
6  *
7  *	Copyright (c) 2011 - 2014 PLUMgrid, http://plumgrid.com
8  *
9  * Authors:
10  *
11  *	Jay Schulist <jschlst@samba.org>
12  *	Alexei Starovoitov <ast@plumgrid.com>
13  *	Daniel Borkmann <dborkman@redhat.com>
14  *
15  * This program is free software; you can redistribute it and/or
16  * modify it under the terms of the GNU General Public License
17  * as published by the Free Software Foundation; either version
18  * 2 of the License, or (at your option) any later version.
19  *
20  * Andi Kleen - Fix a few bad bugs and races.
21  * Kris Katterjohn - Added many additional checks in bpf_check_classic()
22  */
23 
24 #include <linux/filter.h>
25 #include <linux/skbuff.h>
26 #include <linux/vmalloc.h>
27 #include <linux/random.h>
28 #include <linux/moduleloader.h>
29 #include <linux/bpf.h>
30 #include <linux/frame.h>
31 #include <linux/rbtree_latch.h>
32 #include <linux/kallsyms.h>
33 #include <linux/rcupdate.h>
34 
35 #include <asm/unaligned.h>
36 
37 /* Registers */
38 #define BPF_R0	regs[BPF_REG_0]
39 #define BPF_R1	regs[BPF_REG_1]
40 #define BPF_R2	regs[BPF_REG_2]
41 #define BPF_R3	regs[BPF_REG_3]
42 #define BPF_R4	regs[BPF_REG_4]
43 #define BPF_R5	regs[BPF_REG_5]
44 #define BPF_R6	regs[BPF_REG_6]
45 #define BPF_R7	regs[BPF_REG_7]
46 #define BPF_R8	regs[BPF_REG_8]
47 #define BPF_R9	regs[BPF_REG_9]
48 #define BPF_R10	regs[BPF_REG_10]
49 
50 /* Named registers */
51 #define DST	regs[insn->dst_reg]
52 #define SRC	regs[insn->src_reg]
53 #define FP	regs[BPF_REG_FP]
54 #define ARG1	regs[BPF_REG_ARG1]
55 #define CTX	regs[BPF_REG_CTX]
56 #define IMM	insn->imm
57 
58 /* No hurry in this branch
59  *
60  * Exported for the bpf jit load helper.
61  */
62 void *bpf_internal_load_pointer_neg_helper(const struct sk_buff *skb, int k, unsigned int size)
63 {
64 	u8 *ptr = NULL;
65 
66 	if (k >= SKF_NET_OFF)
67 		ptr = skb_network_header(skb) + k - SKF_NET_OFF;
68 	else if (k >= SKF_LL_OFF)
69 		ptr = skb_mac_header(skb) + k - SKF_LL_OFF;
70 
71 	if (ptr >= skb->head && ptr + size <= skb_tail_pointer(skb))
72 		return ptr;
73 
74 	return NULL;
75 }
76 
77 struct bpf_prog *bpf_prog_alloc(unsigned int size, gfp_t gfp_extra_flags)
78 {
79 	gfp_t gfp_flags = GFP_KERNEL | __GFP_ZERO | gfp_extra_flags;
80 	struct bpf_prog_aux *aux;
81 	struct bpf_prog *fp;
82 
83 	size = round_up(size, PAGE_SIZE);
84 	fp = __vmalloc(size, gfp_flags, PAGE_KERNEL);
85 	if (fp == NULL)
86 		return NULL;
87 
88 	kmemcheck_annotate_bitfield(fp, meta);
89 
90 	aux = kzalloc(sizeof(*aux), GFP_KERNEL | gfp_extra_flags);
91 	if (aux == NULL) {
92 		vfree(fp);
93 		return NULL;
94 	}
95 
96 	fp->pages = size / PAGE_SIZE;
97 	fp->aux = aux;
98 	fp->aux->prog = fp;
99 
100 	INIT_LIST_HEAD_RCU(&fp->aux->ksym_lnode);
101 
102 	return fp;
103 }
104 EXPORT_SYMBOL_GPL(bpf_prog_alloc);
105 
106 struct bpf_prog *bpf_prog_realloc(struct bpf_prog *fp_old, unsigned int size,
107 				  gfp_t gfp_extra_flags)
108 {
109 	gfp_t gfp_flags = GFP_KERNEL | __GFP_ZERO | gfp_extra_flags;
110 	struct bpf_prog *fp;
111 	u32 pages, delta;
112 	int ret;
113 
114 	BUG_ON(fp_old == NULL);
115 
116 	size = round_up(size, PAGE_SIZE);
117 	pages = size / PAGE_SIZE;
118 	if (pages <= fp_old->pages)
119 		return fp_old;
120 
121 	delta = pages - fp_old->pages;
122 	ret = __bpf_prog_charge(fp_old->aux->user, delta);
123 	if (ret)
124 		return NULL;
125 
126 	fp = __vmalloc(size, gfp_flags, PAGE_KERNEL);
127 	if (fp == NULL) {
128 		__bpf_prog_uncharge(fp_old->aux->user, delta);
129 	} else {
130 		kmemcheck_annotate_bitfield(fp, meta);
131 
132 		memcpy(fp, fp_old, fp_old->pages * PAGE_SIZE);
133 		fp->pages = pages;
134 		fp->aux->prog = fp;
135 
136 		/* We keep fp->aux from fp_old around in the new
137 		 * reallocated structure.
138 		 */
139 		fp_old->aux = NULL;
140 		__bpf_prog_free(fp_old);
141 	}
142 
143 	return fp;
144 }
145 
146 void __bpf_prog_free(struct bpf_prog *fp)
147 {
148 	kfree(fp->aux);
149 	vfree(fp);
150 }
151 
152 int bpf_prog_calc_tag(struct bpf_prog *fp)
153 {
154 	const u32 bits_offset = SHA_MESSAGE_BYTES - sizeof(__be64);
155 	u32 raw_size = bpf_prog_tag_scratch_size(fp);
156 	u32 digest[SHA_DIGEST_WORDS];
157 	u32 ws[SHA_WORKSPACE_WORDS];
158 	u32 i, bsize, psize, blocks;
159 	struct bpf_insn *dst;
160 	bool was_ld_map;
161 	u8 *raw, *todo;
162 	__be32 *result;
163 	__be64 *bits;
164 
165 	raw = vmalloc(raw_size);
166 	if (!raw)
167 		return -ENOMEM;
168 
169 	sha_init(digest);
170 	memset(ws, 0, sizeof(ws));
171 
172 	/* We need to take out the map fd for the digest calculation
173 	 * since they are unstable from user space side.
174 	 */
175 	dst = (void *)raw;
176 	for (i = 0, was_ld_map = false; i < fp->len; i++) {
177 		dst[i] = fp->insnsi[i];
178 		if (!was_ld_map &&
179 		    dst[i].code == (BPF_LD | BPF_IMM | BPF_DW) &&
180 		    dst[i].src_reg == BPF_PSEUDO_MAP_FD) {
181 			was_ld_map = true;
182 			dst[i].imm = 0;
183 		} else if (was_ld_map &&
184 			   dst[i].code == 0 &&
185 			   dst[i].dst_reg == 0 &&
186 			   dst[i].src_reg == 0 &&
187 			   dst[i].off == 0) {
188 			was_ld_map = false;
189 			dst[i].imm = 0;
190 		} else {
191 			was_ld_map = false;
192 		}
193 	}
194 
195 	psize = bpf_prog_insn_size(fp);
196 	memset(&raw[psize], 0, raw_size - psize);
197 	raw[psize++] = 0x80;
198 
199 	bsize  = round_up(psize, SHA_MESSAGE_BYTES);
200 	blocks = bsize / SHA_MESSAGE_BYTES;
201 	todo   = raw;
202 	if (bsize - psize >= sizeof(__be64)) {
203 		bits = (__be64 *)(todo + bsize - sizeof(__be64));
204 	} else {
205 		bits = (__be64 *)(todo + bsize + bits_offset);
206 		blocks++;
207 	}
208 	*bits = cpu_to_be64((psize - 1) << 3);
209 
210 	while (blocks--) {
211 		sha_transform(digest, todo, ws);
212 		todo += SHA_MESSAGE_BYTES;
213 	}
214 
215 	result = (__force __be32 *)digest;
216 	for (i = 0; i < SHA_DIGEST_WORDS; i++)
217 		result[i] = cpu_to_be32(digest[i]);
218 	memcpy(fp->tag, result, sizeof(fp->tag));
219 
220 	vfree(raw);
221 	return 0;
222 }
223 
224 static bool bpf_is_jmp_and_has_target(const struct bpf_insn *insn)
225 {
226 	return BPF_CLASS(insn->code) == BPF_JMP  &&
227 	       /* Call and Exit are both special jumps with no
228 		* target inside the BPF instruction image.
229 		*/
230 	       BPF_OP(insn->code) != BPF_CALL &&
231 	       BPF_OP(insn->code) != BPF_EXIT;
232 }
233 
234 static void bpf_adj_branches(struct bpf_prog *prog, u32 pos, u32 delta)
235 {
236 	struct bpf_insn *insn = prog->insnsi;
237 	u32 i, insn_cnt = prog->len;
238 
239 	for (i = 0; i < insn_cnt; i++, insn++) {
240 		if (!bpf_is_jmp_and_has_target(insn))
241 			continue;
242 
243 		/* Adjust offset of jmps if we cross boundaries. */
244 		if (i < pos && i + insn->off + 1 > pos)
245 			insn->off += delta;
246 		else if (i > pos + delta && i + insn->off + 1 <= pos + delta)
247 			insn->off -= delta;
248 	}
249 }
250 
251 struct bpf_prog *bpf_patch_insn_single(struct bpf_prog *prog, u32 off,
252 				       const struct bpf_insn *patch, u32 len)
253 {
254 	u32 insn_adj_cnt, insn_rest, insn_delta = len - 1;
255 	struct bpf_prog *prog_adj;
256 
257 	/* Since our patchlet doesn't expand the image, we're done. */
258 	if (insn_delta == 0) {
259 		memcpy(prog->insnsi + off, patch, sizeof(*patch));
260 		return prog;
261 	}
262 
263 	insn_adj_cnt = prog->len + insn_delta;
264 
265 	/* Several new instructions need to be inserted. Make room
266 	 * for them. Likely, there's no need for a new allocation as
267 	 * last page could have large enough tailroom.
268 	 */
269 	prog_adj = bpf_prog_realloc(prog, bpf_prog_size(insn_adj_cnt),
270 				    GFP_USER);
271 	if (!prog_adj)
272 		return NULL;
273 
274 	prog_adj->len = insn_adj_cnt;
275 
276 	/* Patching happens in 3 steps:
277 	 *
278 	 * 1) Move over tail of insnsi from next instruction onwards,
279 	 *    so we can patch the single target insn with one or more
280 	 *    new ones (patching is always from 1 to n insns, n > 0).
281 	 * 2) Inject new instructions at the target location.
282 	 * 3) Adjust branch offsets if necessary.
283 	 */
284 	insn_rest = insn_adj_cnt - off - len;
285 
286 	memmove(prog_adj->insnsi + off + len, prog_adj->insnsi + off + 1,
287 		sizeof(*patch) * insn_rest);
288 	memcpy(prog_adj->insnsi + off, patch, sizeof(*patch) * len);
289 
290 	bpf_adj_branches(prog_adj, off, insn_delta);
291 
292 	return prog_adj;
293 }
294 
295 #ifdef CONFIG_BPF_JIT
296 static __always_inline void
297 bpf_get_prog_addr_region(const struct bpf_prog *prog,
298 			 unsigned long *symbol_start,
299 			 unsigned long *symbol_end)
300 {
301 	const struct bpf_binary_header *hdr = bpf_jit_binary_hdr(prog);
302 	unsigned long addr = (unsigned long)hdr;
303 
304 	WARN_ON_ONCE(!bpf_prog_ebpf_jited(prog));
305 
306 	*symbol_start = addr;
307 	*symbol_end   = addr + hdr->pages * PAGE_SIZE;
308 }
309 
310 static void bpf_get_prog_name(const struct bpf_prog *prog, char *sym)
311 {
312 	const char *end = sym + KSYM_NAME_LEN;
313 
314 	BUILD_BUG_ON(sizeof("bpf_prog_") +
315 		     sizeof(prog->tag) * 2 +
316 		     /* name has been null terminated.
317 		      * We should need +1 for the '_' preceding
318 		      * the name.  However, the null character
319 		      * is double counted between the name and the
320 		      * sizeof("bpf_prog_") above, so we omit
321 		      * the +1 here.
322 		      */
323 		     sizeof(prog->aux->name) > KSYM_NAME_LEN);
324 
325 	sym += snprintf(sym, KSYM_NAME_LEN, "bpf_prog_");
326 	sym  = bin2hex(sym, prog->tag, sizeof(prog->tag));
327 	if (prog->aux->name[0])
328 		snprintf(sym, (size_t)(end - sym), "_%s", prog->aux->name);
329 	else
330 		*sym = 0;
331 }
332 
333 static __always_inline unsigned long
334 bpf_get_prog_addr_start(struct latch_tree_node *n)
335 {
336 	unsigned long symbol_start, symbol_end;
337 	const struct bpf_prog_aux *aux;
338 
339 	aux = container_of(n, struct bpf_prog_aux, ksym_tnode);
340 	bpf_get_prog_addr_region(aux->prog, &symbol_start, &symbol_end);
341 
342 	return symbol_start;
343 }
344 
345 static __always_inline bool bpf_tree_less(struct latch_tree_node *a,
346 					  struct latch_tree_node *b)
347 {
348 	return bpf_get_prog_addr_start(a) < bpf_get_prog_addr_start(b);
349 }
350 
351 static __always_inline int bpf_tree_comp(void *key, struct latch_tree_node *n)
352 {
353 	unsigned long val = (unsigned long)key;
354 	unsigned long symbol_start, symbol_end;
355 	const struct bpf_prog_aux *aux;
356 
357 	aux = container_of(n, struct bpf_prog_aux, ksym_tnode);
358 	bpf_get_prog_addr_region(aux->prog, &symbol_start, &symbol_end);
359 
360 	if (val < symbol_start)
361 		return -1;
362 	if (val >= symbol_end)
363 		return  1;
364 
365 	return 0;
366 }
367 
368 static const struct latch_tree_ops bpf_tree_ops = {
369 	.less	= bpf_tree_less,
370 	.comp	= bpf_tree_comp,
371 };
372 
373 static DEFINE_SPINLOCK(bpf_lock);
374 static LIST_HEAD(bpf_kallsyms);
375 static struct latch_tree_root bpf_tree __cacheline_aligned;
376 
377 int bpf_jit_kallsyms __read_mostly;
378 
379 static void bpf_prog_ksym_node_add(struct bpf_prog_aux *aux)
380 {
381 	WARN_ON_ONCE(!list_empty(&aux->ksym_lnode));
382 	list_add_tail_rcu(&aux->ksym_lnode, &bpf_kallsyms);
383 	latch_tree_insert(&aux->ksym_tnode, &bpf_tree, &bpf_tree_ops);
384 }
385 
386 static void bpf_prog_ksym_node_del(struct bpf_prog_aux *aux)
387 {
388 	if (list_empty(&aux->ksym_lnode))
389 		return;
390 
391 	latch_tree_erase(&aux->ksym_tnode, &bpf_tree, &bpf_tree_ops);
392 	list_del_rcu(&aux->ksym_lnode);
393 }
394 
395 static bool bpf_prog_kallsyms_candidate(const struct bpf_prog *fp)
396 {
397 	return fp->jited && !bpf_prog_was_classic(fp);
398 }
399 
400 static bool bpf_prog_kallsyms_verify_off(const struct bpf_prog *fp)
401 {
402 	return list_empty(&fp->aux->ksym_lnode) ||
403 	       fp->aux->ksym_lnode.prev == LIST_POISON2;
404 }
405 
406 void bpf_prog_kallsyms_add(struct bpf_prog *fp)
407 {
408 	if (!bpf_prog_kallsyms_candidate(fp) ||
409 	    !capable(CAP_SYS_ADMIN))
410 		return;
411 
412 	spin_lock_bh(&bpf_lock);
413 	bpf_prog_ksym_node_add(fp->aux);
414 	spin_unlock_bh(&bpf_lock);
415 }
416 
417 void bpf_prog_kallsyms_del(struct bpf_prog *fp)
418 {
419 	if (!bpf_prog_kallsyms_candidate(fp))
420 		return;
421 
422 	spin_lock_bh(&bpf_lock);
423 	bpf_prog_ksym_node_del(fp->aux);
424 	spin_unlock_bh(&bpf_lock);
425 }
426 
427 static struct bpf_prog *bpf_prog_kallsyms_find(unsigned long addr)
428 {
429 	struct latch_tree_node *n;
430 
431 	if (!bpf_jit_kallsyms_enabled())
432 		return NULL;
433 
434 	n = latch_tree_find((void *)addr, &bpf_tree, &bpf_tree_ops);
435 	return n ?
436 	       container_of(n, struct bpf_prog_aux, ksym_tnode)->prog :
437 	       NULL;
438 }
439 
440 const char *__bpf_address_lookup(unsigned long addr, unsigned long *size,
441 				 unsigned long *off, char *sym)
442 {
443 	unsigned long symbol_start, symbol_end;
444 	struct bpf_prog *prog;
445 	char *ret = NULL;
446 
447 	rcu_read_lock();
448 	prog = bpf_prog_kallsyms_find(addr);
449 	if (prog) {
450 		bpf_get_prog_addr_region(prog, &symbol_start, &symbol_end);
451 		bpf_get_prog_name(prog, sym);
452 
453 		ret = sym;
454 		if (size)
455 			*size = symbol_end - symbol_start;
456 		if (off)
457 			*off  = addr - symbol_start;
458 	}
459 	rcu_read_unlock();
460 
461 	return ret;
462 }
463 
464 bool is_bpf_text_address(unsigned long addr)
465 {
466 	bool ret;
467 
468 	rcu_read_lock();
469 	ret = bpf_prog_kallsyms_find(addr) != NULL;
470 	rcu_read_unlock();
471 
472 	return ret;
473 }
474 
475 int bpf_get_kallsym(unsigned int symnum, unsigned long *value, char *type,
476 		    char *sym)
477 {
478 	unsigned long symbol_start, symbol_end;
479 	struct bpf_prog_aux *aux;
480 	unsigned int it = 0;
481 	int ret = -ERANGE;
482 
483 	if (!bpf_jit_kallsyms_enabled())
484 		return ret;
485 
486 	rcu_read_lock();
487 	list_for_each_entry_rcu(aux, &bpf_kallsyms, ksym_lnode) {
488 		if (it++ != symnum)
489 			continue;
490 
491 		bpf_get_prog_addr_region(aux->prog, &symbol_start, &symbol_end);
492 		bpf_get_prog_name(aux->prog, sym);
493 
494 		*value = symbol_start;
495 		*type  = BPF_SYM_ELF_TYPE;
496 
497 		ret = 0;
498 		break;
499 	}
500 	rcu_read_unlock();
501 
502 	return ret;
503 }
504 
505 struct bpf_binary_header *
506 bpf_jit_binary_alloc(unsigned int proglen, u8 **image_ptr,
507 		     unsigned int alignment,
508 		     bpf_jit_fill_hole_t bpf_fill_ill_insns)
509 {
510 	struct bpf_binary_header *hdr;
511 	unsigned int size, hole, start;
512 
513 	/* Most of BPF filters are really small, but if some of them
514 	 * fill a page, allow at least 128 extra bytes to insert a
515 	 * random section of illegal instructions.
516 	 */
517 	size = round_up(proglen + sizeof(*hdr) + 128, PAGE_SIZE);
518 	hdr = module_alloc(size);
519 	if (hdr == NULL)
520 		return NULL;
521 
522 	/* Fill space with illegal/arch-dep instructions. */
523 	bpf_fill_ill_insns(hdr, size);
524 
525 	hdr->pages = size / PAGE_SIZE;
526 	hole = min_t(unsigned int, size - (proglen + sizeof(*hdr)),
527 		     PAGE_SIZE - sizeof(*hdr));
528 	start = (get_random_int() % hole) & ~(alignment - 1);
529 
530 	/* Leave a random number of instructions before BPF code. */
531 	*image_ptr = &hdr->image[start];
532 
533 	return hdr;
534 }
535 
536 void bpf_jit_binary_free(struct bpf_binary_header *hdr)
537 {
538 	module_memfree(hdr);
539 }
540 
541 /* This symbol is only overridden by archs that have different
542  * requirements than the usual eBPF JITs, f.e. when they only
543  * implement cBPF JIT, do not set images read-only, etc.
544  */
545 void __weak bpf_jit_free(struct bpf_prog *fp)
546 {
547 	if (fp->jited) {
548 		struct bpf_binary_header *hdr = bpf_jit_binary_hdr(fp);
549 
550 		bpf_jit_binary_unlock_ro(hdr);
551 		bpf_jit_binary_free(hdr);
552 
553 		WARN_ON_ONCE(!bpf_prog_kallsyms_verify_off(fp));
554 	}
555 
556 	bpf_prog_unlock_free(fp);
557 }
558 
559 int bpf_jit_harden __read_mostly;
560 
561 static int bpf_jit_blind_insn(const struct bpf_insn *from,
562 			      const struct bpf_insn *aux,
563 			      struct bpf_insn *to_buff)
564 {
565 	struct bpf_insn *to = to_buff;
566 	u32 imm_rnd = get_random_int();
567 	s16 off;
568 
569 	BUILD_BUG_ON(BPF_REG_AX  + 1 != MAX_BPF_JIT_REG);
570 	BUILD_BUG_ON(MAX_BPF_REG + 1 != MAX_BPF_JIT_REG);
571 
572 	if (from->imm == 0 &&
573 	    (from->code == (BPF_ALU   | BPF_MOV | BPF_K) ||
574 	     from->code == (BPF_ALU64 | BPF_MOV | BPF_K))) {
575 		*to++ = BPF_ALU64_REG(BPF_XOR, from->dst_reg, from->dst_reg);
576 		goto out;
577 	}
578 
579 	switch (from->code) {
580 	case BPF_ALU | BPF_ADD | BPF_K:
581 	case BPF_ALU | BPF_SUB | BPF_K:
582 	case BPF_ALU | BPF_AND | BPF_K:
583 	case BPF_ALU | BPF_OR  | BPF_K:
584 	case BPF_ALU | BPF_XOR | BPF_K:
585 	case BPF_ALU | BPF_MUL | BPF_K:
586 	case BPF_ALU | BPF_MOV | BPF_K:
587 	case BPF_ALU | BPF_DIV | BPF_K:
588 	case BPF_ALU | BPF_MOD | BPF_K:
589 		*to++ = BPF_ALU32_IMM(BPF_MOV, BPF_REG_AX, imm_rnd ^ from->imm);
590 		*to++ = BPF_ALU32_IMM(BPF_XOR, BPF_REG_AX, imm_rnd);
591 		*to++ = BPF_ALU32_REG(from->code, from->dst_reg, BPF_REG_AX);
592 		break;
593 
594 	case BPF_ALU64 | BPF_ADD | BPF_K:
595 	case BPF_ALU64 | BPF_SUB | BPF_K:
596 	case BPF_ALU64 | BPF_AND | BPF_K:
597 	case BPF_ALU64 | BPF_OR  | BPF_K:
598 	case BPF_ALU64 | BPF_XOR | BPF_K:
599 	case BPF_ALU64 | BPF_MUL | BPF_K:
600 	case BPF_ALU64 | BPF_MOV | BPF_K:
601 	case BPF_ALU64 | BPF_DIV | BPF_K:
602 	case BPF_ALU64 | BPF_MOD | BPF_K:
603 		*to++ = BPF_ALU64_IMM(BPF_MOV, BPF_REG_AX, imm_rnd ^ from->imm);
604 		*to++ = BPF_ALU64_IMM(BPF_XOR, BPF_REG_AX, imm_rnd);
605 		*to++ = BPF_ALU64_REG(from->code, from->dst_reg, BPF_REG_AX);
606 		break;
607 
608 	case BPF_JMP | BPF_JEQ  | BPF_K:
609 	case BPF_JMP | BPF_JNE  | BPF_K:
610 	case BPF_JMP | BPF_JGT  | BPF_K:
611 	case BPF_JMP | BPF_JLT  | BPF_K:
612 	case BPF_JMP | BPF_JGE  | BPF_K:
613 	case BPF_JMP | BPF_JLE  | BPF_K:
614 	case BPF_JMP | BPF_JSGT | BPF_K:
615 	case BPF_JMP | BPF_JSLT | BPF_K:
616 	case BPF_JMP | BPF_JSGE | BPF_K:
617 	case BPF_JMP | BPF_JSLE | BPF_K:
618 	case BPF_JMP | BPF_JSET | BPF_K:
619 		/* Accommodate for extra offset in case of a backjump. */
620 		off = from->off;
621 		if (off < 0)
622 			off -= 2;
623 		*to++ = BPF_ALU64_IMM(BPF_MOV, BPF_REG_AX, imm_rnd ^ from->imm);
624 		*to++ = BPF_ALU64_IMM(BPF_XOR, BPF_REG_AX, imm_rnd);
625 		*to++ = BPF_JMP_REG(from->code, from->dst_reg, BPF_REG_AX, off);
626 		break;
627 
628 	case BPF_LD | BPF_ABS | BPF_W:
629 	case BPF_LD | BPF_ABS | BPF_H:
630 	case BPF_LD | BPF_ABS | BPF_B:
631 		*to++ = BPF_ALU64_IMM(BPF_MOV, BPF_REG_AX, imm_rnd ^ from->imm);
632 		*to++ = BPF_ALU64_IMM(BPF_XOR, BPF_REG_AX, imm_rnd);
633 		*to++ = BPF_LD_IND(from->code, BPF_REG_AX, 0);
634 		break;
635 
636 	case BPF_LD | BPF_IND | BPF_W:
637 	case BPF_LD | BPF_IND | BPF_H:
638 	case BPF_LD | BPF_IND | BPF_B:
639 		*to++ = BPF_ALU64_IMM(BPF_MOV, BPF_REG_AX, imm_rnd ^ from->imm);
640 		*to++ = BPF_ALU64_IMM(BPF_XOR, BPF_REG_AX, imm_rnd);
641 		*to++ = BPF_ALU32_REG(BPF_ADD, BPF_REG_AX, from->src_reg);
642 		*to++ = BPF_LD_IND(from->code, BPF_REG_AX, 0);
643 		break;
644 
645 	case BPF_LD | BPF_IMM | BPF_DW:
646 		*to++ = BPF_ALU64_IMM(BPF_MOV, BPF_REG_AX, imm_rnd ^ aux[1].imm);
647 		*to++ = BPF_ALU64_IMM(BPF_XOR, BPF_REG_AX, imm_rnd);
648 		*to++ = BPF_ALU64_IMM(BPF_LSH, BPF_REG_AX, 32);
649 		*to++ = BPF_ALU64_REG(BPF_MOV, aux[0].dst_reg, BPF_REG_AX);
650 		break;
651 	case 0: /* Part 2 of BPF_LD | BPF_IMM | BPF_DW. */
652 		*to++ = BPF_ALU32_IMM(BPF_MOV, BPF_REG_AX, imm_rnd ^ aux[0].imm);
653 		*to++ = BPF_ALU32_IMM(BPF_XOR, BPF_REG_AX, imm_rnd);
654 		*to++ = BPF_ALU64_REG(BPF_OR,  aux[0].dst_reg, BPF_REG_AX);
655 		break;
656 
657 	case BPF_ST | BPF_MEM | BPF_DW:
658 	case BPF_ST | BPF_MEM | BPF_W:
659 	case BPF_ST | BPF_MEM | BPF_H:
660 	case BPF_ST | BPF_MEM | BPF_B:
661 		*to++ = BPF_ALU64_IMM(BPF_MOV, BPF_REG_AX, imm_rnd ^ from->imm);
662 		*to++ = BPF_ALU64_IMM(BPF_XOR, BPF_REG_AX, imm_rnd);
663 		*to++ = BPF_STX_MEM(from->code, from->dst_reg, BPF_REG_AX, from->off);
664 		break;
665 	}
666 out:
667 	return to - to_buff;
668 }
669 
670 static struct bpf_prog *bpf_prog_clone_create(struct bpf_prog *fp_other,
671 					      gfp_t gfp_extra_flags)
672 {
673 	gfp_t gfp_flags = GFP_KERNEL | __GFP_ZERO | gfp_extra_flags;
674 	struct bpf_prog *fp;
675 
676 	fp = __vmalloc(fp_other->pages * PAGE_SIZE, gfp_flags, PAGE_KERNEL);
677 	if (fp != NULL) {
678 		kmemcheck_annotate_bitfield(fp, meta);
679 
680 		/* aux->prog still points to the fp_other one, so
681 		 * when promoting the clone to the real program,
682 		 * this still needs to be adapted.
683 		 */
684 		memcpy(fp, fp_other, fp_other->pages * PAGE_SIZE);
685 	}
686 
687 	return fp;
688 }
689 
690 static void bpf_prog_clone_free(struct bpf_prog *fp)
691 {
692 	/* aux was stolen by the other clone, so we cannot free
693 	 * it from this path! It will be freed eventually by the
694 	 * other program on release.
695 	 *
696 	 * At this point, we don't need a deferred release since
697 	 * clone is guaranteed to not be locked.
698 	 */
699 	fp->aux = NULL;
700 	__bpf_prog_free(fp);
701 }
702 
703 void bpf_jit_prog_release_other(struct bpf_prog *fp, struct bpf_prog *fp_other)
704 {
705 	/* We have to repoint aux->prog to self, as we don't
706 	 * know whether fp here is the clone or the original.
707 	 */
708 	fp->aux->prog = fp;
709 	bpf_prog_clone_free(fp_other);
710 }
711 
712 struct bpf_prog *bpf_jit_blind_constants(struct bpf_prog *prog)
713 {
714 	struct bpf_insn insn_buff[16], aux[2];
715 	struct bpf_prog *clone, *tmp;
716 	int insn_delta, insn_cnt;
717 	struct bpf_insn *insn;
718 	int i, rewritten;
719 
720 	if (!bpf_jit_blinding_enabled())
721 		return prog;
722 
723 	clone = bpf_prog_clone_create(prog, GFP_USER);
724 	if (!clone)
725 		return ERR_PTR(-ENOMEM);
726 
727 	insn_cnt = clone->len;
728 	insn = clone->insnsi;
729 
730 	for (i = 0; i < insn_cnt; i++, insn++) {
731 		/* We temporarily need to hold the original ld64 insn
732 		 * so that we can still access the first part in the
733 		 * second blinding run.
734 		 */
735 		if (insn[0].code == (BPF_LD | BPF_IMM | BPF_DW) &&
736 		    insn[1].code == 0)
737 			memcpy(aux, insn, sizeof(aux));
738 
739 		rewritten = bpf_jit_blind_insn(insn, aux, insn_buff);
740 		if (!rewritten)
741 			continue;
742 
743 		tmp = bpf_patch_insn_single(clone, i, insn_buff, rewritten);
744 		if (!tmp) {
745 			/* Patching may have repointed aux->prog during
746 			 * realloc from the original one, so we need to
747 			 * fix it up here on error.
748 			 */
749 			bpf_jit_prog_release_other(prog, clone);
750 			return ERR_PTR(-ENOMEM);
751 		}
752 
753 		clone = tmp;
754 		insn_delta = rewritten - 1;
755 
756 		/* Walk new program and skip insns we just inserted. */
757 		insn = clone->insnsi + i + insn_delta;
758 		insn_cnt += insn_delta;
759 		i        += insn_delta;
760 	}
761 
762 	return clone;
763 }
764 #endif /* CONFIG_BPF_JIT */
765 
766 /* Base function for offset calculation. Needs to go into .text section,
767  * therefore keeping it non-static as well; will also be used by JITs
768  * anyway later on, so do not let the compiler omit it.
769  */
770 noinline u64 __bpf_call_base(u64 r1, u64 r2, u64 r3, u64 r4, u64 r5)
771 {
772 	return 0;
773 }
774 EXPORT_SYMBOL_GPL(__bpf_call_base);
775 
776 /**
777  *	__bpf_prog_run - run eBPF program on a given context
778  *	@ctx: is the data we are operating on
779  *	@insn: is the array of eBPF instructions
780  *
781  * Decode and execute eBPF instructions.
782  */
783 static unsigned int ___bpf_prog_run(u64 *regs, const struct bpf_insn *insn,
784 				    u64 *stack)
785 {
786 	u64 tmp;
787 	static const void *jumptable[256] = {
788 		[0 ... 255] = &&default_label,
789 		/* Now overwrite non-defaults ... */
790 		/* 32 bit ALU operations */
791 		[BPF_ALU | BPF_ADD | BPF_X] = &&ALU_ADD_X,
792 		[BPF_ALU | BPF_ADD | BPF_K] = &&ALU_ADD_K,
793 		[BPF_ALU | BPF_SUB | BPF_X] = &&ALU_SUB_X,
794 		[BPF_ALU | BPF_SUB | BPF_K] = &&ALU_SUB_K,
795 		[BPF_ALU | BPF_AND | BPF_X] = &&ALU_AND_X,
796 		[BPF_ALU | BPF_AND | BPF_K] = &&ALU_AND_K,
797 		[BPF_ALU | BPF_OR | BPF_X]  = &&ALU_OR_X,
798 		[BPF_ALU | BPF_OR | BPF_K]  = &&ALU_OR_K,
799 		[BPF_ALU | BPF_LSH | BPF_X] = &&ALU_LSH_X,
800 		[BPF_ALU | BPF_LSH | BPF_K] = &&ALU_LSH_K,
801 		[BPF_ALU | BPF_RSH | BPF_X] = &&ALU_RSH_X,
802 		[BPF_ALU | BPF_RSH | BPF_K] = &&ALU_RSH_K,
803 		[BPF_ALU | BPF_XOR | BPF_X] = &&ALU_XOR_X,
804 		[BPF_ALU | BPF_XOR | BPF_K] = &&ALU_XOR_K,
805 		[BPF_ALU | BPF_MUL | BPF_X] = &&ALU_MUL_X,
806 		[BPF_ALU | BPF_MUL | BPF_K] = &&ALU_MUL_K,
807 		[BPF_ALU | BPF_MOV | BPF_X] = &&ALU_MOV_X,
808 		[BPF_ALU | BPF_MOV | BPF_K] = &&ALU_MOV_K,
809 		[BPF_ALU | BPF_DIV | BPF_X] = &&ALU_DIV_X,
810 		[BPF_ALU | BPF_DIV | BPF_K] = &&ALU_DIV_K,
811 		[BPF_ALU | BPF_MOD | BPF_X] = &&ALU_MOD_X,
812 		[BPF_ALU | BPF_MOD | BPF_K] = &&ALU_MOD_K,
813 		[BPF_ALU | BPF_NEG] = &&ALU_NEG,
814 		[BPF_ALU | BPF_END | BPF_TO_BE] = &&ALU_END_TO_BE,
815 		[BPF_ALU | BPF_END | BPF_TO_LE] = &&ALU_END_TO_LE,
816 		/* 64 bit ALU operations */
817 		[BPF_ALU64 | BPF_ADD | BPF_X] = &&ALU64_ADD_X,
818 		[BPF_ALU64 | BPF_ADD | BPF_K] = &&ALU64_ADD_K,
819 		[BPF_ALU64 | BPF_SUB | BPF_X] = &&ALU64_SUB_X,
820 		[BPF_ALU64 | BPF_SUB | BPF_K] = &&ALU64_SUB_K,
821 		[BPF_ALU64 | BPF_AND | BPF_X] = &&ALU64_AND_X,
822 		[BPF_ALU64 | BPF_AND | BPF_K] = &&ALU64_AND_K,
823 		[BPF_ALU64 | BPF_OR | BPF_X] = &&ALU64_OR_X,
824 		[BPF_ALU64 | BPF_OR | BPF_K] = &&ALU64_OR_K,
825 		[BPF_ALU64 | BPF_LSH | BPF_X] = &&ALU64_LSH_X,
826 		[BPF_ALU64 | BPF_LSH | BPF_K] = &&ALU64_LSH_K,
827 		[BPF_ALU64 | BPF_RSH | BPF_X] = &&ALU64_RSH_X,
828 		[BPF_ALU64 | BPF_RSH | BPF_K] = &&ALU64_RSH_K,
829 		[BPF_ALU64 | BPF_XOR | BPF_X] = &&ALU64_XOR_X,
830 		[BPF_ALU64 | BPF_XOR | BPF_K] = &&ALU64_XOR_K,
831 		[BPF_ALU64 | BPF_MUL | BPF_X] = &&ALU64_MUL_X,
832 		[BPF_ALU64 | BPF_MUL | BPF_K] = &&ALU64_MUL_K,
833 		[BPF_ALU64 | BPF_MOV | BPF_X] = &&ALU64_MOV_X,
834 		[BPF_ALU64 | BPF_MOV | BPF_K] = &&ALU64_MOV_K,
835 		[BPF_ALU64 | BPF_ARSH | BPF_X] = &&ALU64_ARSH_X,
836 		[BPF_ALU64 | BPF_ARSH | BPF_K] = &&ALU64_ARSH_K,
837 		[BPF_ALU64 | BPF_DIV | BPF_X] = &&ALU64_DIV_X,
838 		[BPF_ALU64 | BPF_DIV | BPF_K] = &&ALU64_DIV_K,
839 		[BPF_ALU64 | BPF_MOD | BPF_X] = &&ALU64_MOD_X,
840 		[BPF_ALU64 | BPF_MOD | BPF_K] = &&ALU64_MOD_K,
841 		[BPF_ALU64 | BPF_NEG] = &&ALU64_NEG,
842 		/* Call instruction */
843 		[BPF_JMP | BPF_CALL] = &&JMP_CALL,
844 		[BPF_JMP | BPF_TAIL_CALL] = &&JMP_TAIL_CALL,
845 		/* Jumps */
846 		[BPF_JMP | BPF_JA] = &&JMP_JA,
847 		[BPF_JMP | BPF_JEQ | BPF_X] = &&JMP_JEQ_X,
848 		[BPF_JMP | BPF_JEQ | BPF_K] = &&JMP_JEQ_K,
849 		[BPF_JMP | BPF_JNE | BPF_X] = &&JMP_JNE_X,
850 		[BPF_JMP | BPF_JNE | BPF_K] = &&JMP_JNE_K,
851 		[BPF_JMP | BPF_JGT | BPF_X] = &&JMP_JGT_X,
852 		[BPF_JMP | BPF_JGT | BPF_K] = &&JMP_JGT_K,
853 		[BPF_JMP | BPF_JLT | BPF_X] = &&JMP_JLT_X,
854 		[BPF_JMP | BPF_JLT | BPF_K] = &&JMP_JLT_K,
855 		[BPF_JMP | BPF_JGE | BPF_X] = &&JMP_JGE_X,
856 		[BPF_JMP | BPF_JGE | BPF_K] = &&JMP_JGE_K,
857 		[BPF_JMP | BPF_JLE | BPF_X] = &&JMP_JLE_X,
858 		[BPF_JMP | BPF_JLE | BPF_K] = &&JMP_JLE_K,
859 		[BPF_JMP | BPF_JSGT | BPF_X] = &&JMP_JSGT_X,
860 		[BPF_JMP | BPF_JSGT | BPF_K] = &&JMP_JSGT_K,
861 		[BPF_JMP | BPF_JSLT | BPF_X] = &&JMP_JSLT_X,
862 		[BPF_JMP | BPF_JSLT | BPF_K] = &&JMP_JSLT_K,
863 		[BPF_JMP | BPF_JSGE | BPF_X] = &&JMP_JSGE_X,
864 		[BPF_JMP | BPF_JSGE | BPF_K] = &&JMP_JSGE_K,
865 		[BPF_JMP | BPF_JSLE | BPF_X] = &&JMP_JSLE_X,
866 		[BPF_JMP | BPF_JSLE | BPF_K] = &&JMP_JSLE_K,
867 		[BPF_JMP | BPF_JSET | BPF_X] = &&JMP_JSET_X,
868 		[BPF_JMP | BPF_JSET | BPF_K] = &&JMP_JSET_K,
869 		/* Program return */
870 		[BPF_JMP | BPF_EXIT] = &&JMP_EXIT,
871 		/* Store instructions */
872 		[BPF_STX | BPF_MEM | BPF_B] = &&STX_MEM_B,
873 		[BPF_STX | BPF_MEM | BPF_H] = &&STX_MEM_H,
874 		[BPF_STX | BPF_MEM | BPF_W] = &&STX_MEM_W,
875 		[BPF_STX | BPF_MEM | BPF_DW] = &&STX_MEM_DW,
876 		[BPF_STX | BPF_XADD | BPF_W] = &&STX_XADD_W,
877 		[BPF_STX | BPF_XADD | BPF_DW] = &&STX_XADD_DW,
878 		[BPF_ST | BPF_MEM | BPF_B] = &&ST_MEM_B,
879 		[BPF_ST | BPF_MEM | BPF_H] = &&ST_MEM_H,
880 		[BPF_ST | BPF_MEM | BPF_W] = &&ST_MEM_W,
881 		[BPF_ST | BPF_MEM | BPF_DW] = &&ST_MEM_DW,
882 		/* Load instructions */
883 		[BPF_LDX | BPF_MEM | BPF_B] = &&LDX_MEM_B,
884 		[BPF_LDX | BPF_MEM | BPF_H] = &&LDX_MEM_H,
885 		[BPF_LDX | BPF_MEM | BPF_W] = &&LDX_MEM_W,
886 		[BPF_LDX | BPF_MEM | BPF_DW] = &&LDX_MEM_DW,
887 		[BPF_LD | BPF_ABS | BPF_W] = &&LD_ABS_W,
888 		[BPF_LD | BPF_ABS | BPF_H] = &&LD_ABS_H,
889 		[BPF_LD | BPF_ABS | BPF_B] = &&LD_ABS_B,
890 		[BPF_LD | BPF_IND | BPF_W] = &&LD_IND_W,
891 		[BPF_LD | BPF_IND | BPF_H] = &&LD_IND_H,
892 		[BPF_LD | BPF_IND | BPF_B] = &&LD_IND_B,
893 		[BPF_LD | BPF_IMM | BPF_DW] = &&LD_IMM_DW,
894 	};
895 	u32 tail_call_cnt = 0;
896 	void *ptr;
897 	int off;
898 
899 #define CONT	 ({ insn++; goto select_insn; })
900 #define CONT_JMP ({ insn++; goto select_insn; })
901 
902 select_insn:
903 	goto *jumptable[insn->code];
904 
905 	/* ALU */
906 #define ALU(OPCODE, OP)			\
907 	ALU64_##OPCODE##_X:		\
908 		DST = DST OP SRC;	\
909 		CONT;			\
910 	ALU_##OPCODE##_X:		\
911 		DST = (u32) DST OP (u32) SRC;	\
912 		CONT;			\
913 	ALU64_##OPCODE##_K:		\
914 		DST = DST OP IMM;		\
915 		CONT;			\
916 	ALU_##OPCODE##_K:		\
917 		DST = (u32) DST OP (u32) IMM;	\
918 		CONT;
919 
920 	ALU(ADD,  +)
921 	ALU(SUB,  -)
922 	ALU(AND,  &)
923 	ALU(OR,   |)
924 	ALU(LSH, <<)
925 	ALU(RSH, >>)
926 	ALU(XOR,  ^)
927 	ALU(MUL,  *)
928 #undef ALU
929 	ALU_NEG:
930 		DST = (u32) -DST;
931 		CONT;
932 	ALU64_NEG:
933 		DST = -DST;
934 		CONT;
935 	ALU_MOV_X:
936 		DST = (u32) SRC;
937 		CONT;
938 	ALU_MOV_K:
939 		DST = (u32) IMM;
940 		CONT;
941 	ALU64_MOV_X:
942 		DST = SRC;
943 		CONT;
944 	ALU64_MOV_K:
945 		DST = IMM;
946 		CONT;
947 	LD_IMM_DW:
948 		DST = (u64) (u32) insn[0].imm | ((u64) (u32) insn[1].imm) << 32;
949 		insn++;
950 		CONT;
951 	ALU64_ARSH_X:
952 		(*(s64 *) &DST) >>= SRC;
953 		CONT;
954 	ALU64_ARSH_K:
955 		(*(s64 *) &DST) >>= IMM;
956 		CONT;
957 	ALU64_MOD_X:
958 		if (unlikely(SRC == 0))
959 			return 0;
960 		div64_u64_rem(DST, SRC, &tmp);
961 		DST = tmp;
962 		CONT;
963 	ALU_MOD_X:
964 		if (unlikely(SRC == 0))
965 			return 0;
966 		tmp = (u32) DST;
967 		DST = do_div(tmp, (u32) SRC);
968 		CONT;
969 	ALU64_MOD_K:
970 		div64_u64_rem(DST, IMM, &tmp);
971 		DST = tmp;
972 		CONT;
973 	ALU_MOD_K:
974 		tmp = (u32) DST;
975 		DST = do_div(tmp, (u32) IMM);
976 		CONT;
977 	ALU64_DIV_X:
978 		if (unlikely(SRC == 0))
979 			return 0;
980 		DST = div64_u64(DST, SRC);
981 		CONT;
982 	ALU_DIV_X:
983 		if (unlikely(SRC == 0))
984 			return 0;
985 		tmp = (u32) DST;
986 		do_div(tmp, (u32) SRC);
987 		DST = (u32) tmp;
988 		CONT;
989 	ALU64_DIV_K:
990 		DST = div64_u64(DST, IMM);
991 		CONT;
992 	ALU_DIV_K:
993 		tmp = (u32) DST;
994 		do_div(tmp, (u32) IMM);
995 		DST = (u32) tmp;
996 		CONT;
997 	ALU_END_TO_BE:
998 		switch (IMM) {
999 		case 16:
1000 			DST = (__force u16) cpu_to_be16(DST);
1001 			break;
1002 		case 32:
1003 			DST = (__force u32) cpu_to_be32(DST);
1004 			break;
1005 		case 64:
1006 			DST = (__force u64) cpu_to_be64(DST);
1007 			break;
1008 		}
1009 		CONT;
1010 	ALU_END_TO_LE:
1011 		switch (IMM) {
1012 		case 16:
1013 			DST = (__force u16) cpu_to_le16(DST);
1014 			break;
1015 		case 32:
1016 			DST = (__force u32) cpu_to_le32(DST);
1017 			break;
1018 		case 64:
1019 			DST = (__force u64) cpu_to_le64(DST);
1020 			break;
1021 		}
1022 		CONT;
1023 
1024 	/* CALL */
1025 	JMP_CALL:
1026 		/* Function call scratches BPF_R1-BPF_R5 registers,
1027 		 * preserves BPF_R6-BPF_R9, and stores return value
1028 		 * into BPF_R0.
1029 		 */
1030 		BPF_R0 = (__bpf_call_base + insn->imm)(BPF_R1, BPF_R2, BPF_R3,
1031 						       BPF_R4, BPF_R5);
1032 		CONT;
1033 
1034 	JMP_TAIL_CALL: {
1035 		struct bpf_map *map = (struct bpf_map *) (unsigned long) BPF_R2;
1036 		struct bpf_array *array = container_of(map, struct bpf_array, map);
1037 		struct bpf_prog *prog;
1038 		u32 index = BPF_R3;
1039 
1040 		if (unlikely(index >= array->map.max_entries))
1041 			goto out;
1042 		if (unlikely(tail_call_cnt > MAX_TAIL_CALL_CNT))
1043 			goto out;
1044 
1045 		tail_call_cnt++;
1046 
1047 		prog = READ_ONCE(array->ptrs[index]);
1048 		if (!prog)
1049 			goto out;
1050 
1051 		/* ARG1 at this point is guaranteed to point to CTX from
1052 		 * the verifier side due to the fact that the tail call is
1053 		 * handeled like a helper, that is, bpf_tail_call_proto,
1054 		 * where arg1_type is ARG_PTR_TO_CTX.
1055 		 */
1056 		insn = prog->insnsi;
1057 		goto select_insn;
1058 out:
1059 		CONT;
1060 	}
1061 	/* JMP */
1062 	JMP_JA:
1063 		insn += insn->off;
1064 		CONT;
1065 	JMP_JEQ_X:
1066 		if (DST == SRC) {
1067 			insn += insn->off;
1068 			CONT_JMP;
1069 		}
1070 		CONT;
1071 	JMP_JEQ_K:
1072 		if (DST == IMM) {
1073 			insn += insn->off;
1074 			CONT_JMP;
1075 		}
1076 		CONT;
1077 	JMP_JNE_X:
1078 		if (DST != SRC) {
1079 			insn += insn->off;
1080 			CONT_JMP;
1081 		}
1082 		CONT;
1083 	JMP_JNE_K:
1084 		if (DST != IMM) {
1085 			insn += insn->off;
1086 			CONT_JMP;
1087 		}
1088 		CONT;
1089 	JMP_JGT_X:
1090 		if (DST > SRC) {
1091 			insn += insn->off;
1092 			CONT_JMP;
1093 		}
1094 		CONT;
1095 	JMP_JGT_K:
1096 		if (DST > IMM) {
1097 			insn += insn->off;
1098 			CONT_JMP;
1099 		}
1100 		CONT;
1101 	JMP_JLT_X:
1102 		if (DST < SRC) {
1103 			insn += insn->off;
1104 			CONT_JMP;
1105 		}
1106 		CONT;
1107 	JMP_JLT_K:
1108 		if (DST < IMM) {
1109 			insn += insn->off;
1110 			CONT_JMP;
1111 		}
1112 		CONT;
1113 	JMP_JGE_X:
1114 		if (DST >= SRC) {
1115 			insn += insn->off;
1116 			CONT_JMP;
1117 		}
1118 		CONT;
1119 	JMP_JGE_K:
1120 		if (DST >= IMM) {
1121 			insn += insn->off;
1122 			CONT_JMP;
1123 		}
1124 		CONT;
1125 	JMP_JLE_X:
1126 		if (DST <= SRC) {
1127 			insn += insn->off;
1128 			CONT_JMP;
1129 		}
1130 		CONT;
1131 	JMP_JLE_K:
1132 		if (DST <= IMM) {
1133 			insn += insn->off;
1134 			CONT_JMP;
1135 		}
1136 		CONT;
1137 	JMP_JSGT_X:
1138 		if (((s64) DST) > ((s64) SRC)) {
1139 			insn += insn->off;
1140 			CONT_JMP;
1141 		}
1142 		CONT;
1143 	JMP_JSGT_K:
1144 		if (((s64) DST) > ((s64) IMM)) {
1145 			insn += insn->off;
1146 			CONT_JMP;
1147 		}
1148 		CONT;
1149 	JMP_JSLT_X:
1150 		if (((s64) DST) < ((s64) SRC)) {
1151 			insn += insn->off;
1152 			CONT_JMP;
1153 		}
1154 		CONT;
1155 	JMP_JSLT_K:
1156 		if (((s64) DST) < ((s64) IMM)) {
1157 			insn += insn->off;
1158 			CONT_JMP;
1159 		}
1160 		CONT;
1161 	JMP_JSGE_X:
1162 		if (((s64) DST) >= ((s64) SRC)) {
1163 			insn += insn->off;
1164 			CONT_JMP;
1165 		}
1166 		CONT;
1167 	JMP_JSGE_K:
1168 		if (((s64) DST) >= ((s64) IMM)) {
1169 			insn += insn->off;
1170 			CONT_JMP;
1171 		}
1172 		CONT;
1173 	JMP_JSLE_X:
1174 		if (((s64) DST) <= ((s64) SRC)) {
1175 			insn += insn->off;
1176 			CONT_JMP;
1177 		}
1178 		CONT;
1179 	JMP_JSLE_K:
1180 		if (((s64) DST) <= ((s64) IMM)) {
1181 			insn += insn->off;
1182 			CONT_JMP;
1183 		}
1184 		CONT;
1185 	JMP_JSET_X:
1186 		if (DST & SRC) {
1187 			insn += insn->off;
1188 			CONT_JMP;
1189 		}
1190 		CONT;
1191 	JMP_JSET_K:
1192 		if (DST & IMM) {
1193 			insn += insn->off;
1194 			CONT_JMP;
1195 		}
1196 		CONT;
1197 	JMP_EXIT:
1198 		return BPF_R0;
1199 
1200 	/* STX and ST and LDX*/
1201 #define LDST(SIZEOP, SIZE)						\
1202 	STX_MEM_##SIZEOP:						\
1203 		*(SIZE *)(unsigned long) (DST + insn->off) = SRC;	\
1204 		CONT;							\
1205 	ST_MEM_##SIZEOP:						\
1206 		*(SIZE *)(unsigned long) (DST + insn->off) = IMM;	\
1207 		CONT;							\
1208 	LDX_MEM_##SIZEOP:						\
1209 		DST = *(SIZE *)(unsigned long) (SRC + insn->off);	\
1210 		CONT;
1211 
1212 	LDST(B,   u8)
1213 	LDST(H,  u16)
1214 	LDST(W,  u32)
1215 	LDST(DW, u64)
1216 #undef LDST
1217 	STX_XADD_W: /* lock xadd *(u32 *)(dst_reg + off16) += src_reg */
1218 		atomic_add((u32) SRC, (atomic_t *)(unsigned long)
1219 			   (DST + insn->off));
1220 		CONT;
1221 	STX_XADD_DW: /* lock xadd *(u64 *)(dst_reg + off16) += src_reg */
1222 		atomic64_add((u64) SRC, (atomic64_t *)(unsigned long)
1223 			     (DST + insn->off));
1224 		CONT;
1225 	LD_ABS_W: /* BPF_R0 = ntohl(*(u32 *) (skb->data + imm32)) */
1226 		off = IMM;
1227 load_word:
1228 		/* BPF_LD + BPD_ABS and BPF_LD + BPF_IND insns are only
1229 		 * appearing in the programs where ctx == skb
1230 		 * (see may_access_skb() in the verifier). All programs
1231 		 * keep 'ctx' in regs[BPF_REG_CTX] == BPF_R6,
1232 		 * bpf_convert_filter() saves it in BPF_R6, internal BPF
1233 		 * verifier will check that BPF_R6 == ctx.
1234 		 *
1235 		 * BPF_ABS and BPF_IND are wrappers of function calls,
1236 		 * so they scratch BPF_R1-BPF_R5 registers, preserve
1237 		 * BPF_R6-BPF_R9, and store return value into BPF_R0.
1238 		 *
1239 		 * Implicit input:
1240 		 *   ctx == skb == BPF_R6 == CTX
1241 		 *
1242 		 * Explicit input:
1243 		 *   SRC == any register
1244 		 *   IMM == 32-bit immediate
1245 		 *
1246 		 * Output:
1247 		 *   BPF_R0 - 8/16/32-bit skb data converted to cpu endianness
1248 		 */
1249 
1250 		ptr = bpf_load_pointer((struct sk_buff *) (unsigned long) CTX, off, 4, &tmp);
1251 		if (likely(ptr != NULL)) {
1252 			BPF_R0 = get_unaligned_be32(ptr);
1253 			CONT;
1254 		}
1255 
1256 		return 0;
1257 	LD_ABS_H: /* BPF_R0 = ntohs(*(u16 *) (skb->data + imm32)) */
1258 		off = IMM;
1259 load_half:
1260 		ptr = bpf_load_pointer((struct sk_buff *) (unsigned long) CTX, off, 2, &tmp);
1261 		if (likely(ptr != NULL)) {
1262 			BPF_R0 = get_unaligned_be16(ptr);
1263 			CONT;
1264 		}
1265 
1266 		return 0;
1267 	LD_ABS_B: /* BPF_R0 = *(u8 *) (skb->data + imm32) */
1268 		off = IMM;
1269 load_byte:
1270 		ptr = bpf_load_pointer((struct sk_buff *) (unsigned long) CTX, off, 1, &tmp);
1271 		if (likely(ptr != NULL)) {
1272 			BPF_R0 = *(u8 *)ptr;
1273 			CONT;
1274 		}
1275 
1276 		return 0;
1277 	LD_IND_W: /* BPF_R0 = ntohl(*(u32 *) (skb->data + src_reg + imm32)) */
1278 		off = IMM + SRC;
1279 		goto load_word;
1280 	LD_IND_H: /* BPF_R0 = ntohs(*(u16 *) (skb->data + src_reg + imm32)) */
1281 		off = IMM + SRC;
1282 		goto load_half;
1283 	LD_IND_B: /* BPF_R0 = *(u8 *) (skb->data + src_reg + imm32) */
1284 		off = IMM + SRC;
1285 		goto load_byte;
1286 
1287 	default_label:
1288 		/* If we ever reach this, we have a bug somewhere. */
1289 		WARN_RATELIMIT(1, "unknown opcode %02x\n", insn->code);
1290 		return 0;
1291 }
1292 STACK_FRAME_NON_STANDARD(___bpf_prog_run); /* jump table */
1293 
1294 #define PROG_NAME(stack_size) __bpf_prog_run##stack_size
1295 #define DEFINE_BPF_PROG_RUN(stack_size) \
1296 static unsigned int PROG_NAME(stack_size)(const void *ctx, const struct bpf_insn *insn) \
1297 { \
1298 	u64 stack[stack_size / sizeof(u64)]; \
1299 	u64 regs[MAX_BPF_REG]; \
1300 \
1301 	FP = (u64) (unsigned long) &stack[ARRAY_SIZE(stack)]; \
1302 	ARG1 = (u64) (unsigned long) ctx; \
1303 	return ___bpf_prog_run(regs, insn, stack); \
1304 }
1305 
1306 #define EVAL1(FN, X) FN(X)
1307 #define EVAL2(FN, X, Y...) FN(X) EVAL1(FN, Y)
1308 #define EVAL3(FN, X, Y...) FN(X) EVAL2(FN, Y)
1309 #define EVAL4(FN, X, Y...) FN(X) EVAL3(FN, Y)
1310 #define EVAL5(FN, X, Y...) FN(X) EVAL4(FN, Y)
1311 #define EVAL6(FN, X, Y...) FN(X) EVAL5(FN, Y)
1312 
1313 EVAL6(DEFINE_BPF_PROG_RUN, 32, 64, 96, 128, 160, 192);
1314 EVAL6(DEFINE_BPF_PROG_RUN, 224, 256, 288, 320, 352, 384);
1315 EVAL4(DEFINE_BPF_PROG_RUN, 416, 448, 480, 512);
1316 
1317 #define PROG_NAME_LIST(stack_size) PROG_NAME(stack_size),
1318 
1319 static unsigned int (*interpreters[])(const void *ctx,
1320 				      const struct bpf_insn *insn) = {
1321 EVAL6(PROG_NAME_LIST, 32, 64, 96, 128, 160, 192)
1322 EVAL6(PROG_NAME_LIST, 224, 256, 288, 320, 352, 384)
1323 EVAL4(PROG_NAME_LIST, 416, 448, 480, 512)
1324 };
1325 
1326 bool bpf_prog_array_compatible(struct bpf_array *array,
1327 			       const struct bpf_prog *fp)
1328 {
1329 	if (!array->owner_prog_type) {
1330 		/* There's no owner yet where we could check for
1331 		 * compatibility.
1332 		 */
1333 		array->owner_prog_type = fp->type;
1334 		array->owner_jited = fp->jited;
1335 
1336 		return true;
1337 	}
1338 
1339 	return array->owner_prog_type == fp->type &&
1340 	       array->owner_jited == fp->jited;
1341 }
1342 
1343 static int bpf_check_tail_call(const struct bpf_prog *fp)
1344 {
1345 	struct bpf_prog_aux *aux = fp->aux;
1346 	int i;
1347 
1348 	for (i = 0; i < aux->used_map_cnt; i++) {
1349 		struct bpf_map *map = aux->used_maps[i];
1350 		struct bpf_array *array;
1351 
1352 		if (map->map_type != BPF_MAP_TYPE_PROG_ARRAY)
1353 			continue;
1354 
1355 		array = container_of(map, struct bpf_array, map);
1356 		if (!bpf_prog_array_compatible(array, fp))
1357 			return -EINVAL;
1358 	}
1359 
1360 	return 0;
1361 }
1362 
1363 /**
1364  *	bpf_prog_select_runtime - select exec runtime for BPF program
1365  *	@fp: bpf_prog populated with internal BPF program
1366  *	@err: pointer to error variable
1367  *
1368  * Try to JIT eBPF program, if JIT is not available, use interpreter.
1369  * The BPF program will be executed via BPF_PROG_RUN() macro.
1370  */
1371 struct bpf_prog *bpf_prog_select_runtime(struct bpf_prog *fp, int *err)
1372 {
1373 	u32 stack_depth = max_t(u32, fp->aux->stack_depth, 1);
1374 
1375 	fp->bpf_func = interpreters[(round_up(stack_depth, 32) / 32) - 1];
1376 
1377 	/* eBPF JITs can rewrite the program in case constant
1378 	 * blinding is active. However, in case of error during
1379 	 * blinding, bpf_int_jit_compile() must always return a
1380 	 * valid program, which in this case would simply not
1381 	 * be JITed, but falls back to the interpreter.
1382 	 */
1383 	if (!bpf_prog_is_dev_bound(fp->aux)) {
1384 		fp = bpf_int_jit_compile(fp);
1385 	} else {
1386 		*err = bpf_prog_offload_compile(fp);
1387 		if (*err)
1388 			return fp;
1389 	}
1390 	bpf_prog_lock_ro(fp);
1391 
1392 	/* The tail call compatibility check can only be done at
1393 	 * this late stage as we need to determine, if we deal
1394 	 * with JITed or non JITed program concatenations and not
1395 	 * all eBPF JITs might immediately support all features.
1396 	 */
1397 	*err = bpf_check_tail_call(fp);
1398 
1399 	return fp;
1400 }
1401 EXPORT_SYMBOL_GPL(bpf_prog_select_runtime);
1402 
1403 static unsigned int __bpf_prog_ret1(const void *ctx,
1404 				    const struct bpf_insn *insn)
1405 {
1406 	return 1;
1407 }
1408 
1409 static struct bpf_prog_dummy {
1410 	struct bpf_prog prog;
1411 } dummy_bpf_prog = {
1412 	.prog = {
1413 		.bpf_func = __bpf_prog_ret1,
1414 	},
1415 };
1416 
1417 /* to avoid allocating empty bpf_prog_array for cgroups that
1418  * don't have bpf program attached use one global 'empty_prog_array'
1419  * It will not be modified the caller of bpf_prog_array_alloc()
1420  * (since caller requested prog_cnt == 0)
1421  * that pointer should be 'freed' by bpf_prog_array_free()
1422  */
1423 static struct {
1424 	struct bpf_prog_array hdr;
1425 	struct bpf_prog *null_prog;
1426 } empty_prog_array = {
1427 	.null_prog = NULL,
1428 };
1429 
1430 struct bpf_prog_array __rcu *bpf_prog_array_alloc(u32 prog_cnt, gfp_t flags)
1431 {
1432 	if (prog_cnt)
1433 		return kzalloc(sizeof(struct bpf_prog_array) +
1434 			       sizeof(struct bpf_prog *) * (prog_cnt + 1),
1435 			       flags);
1436 
1437 	return &empty_prog_array.hdr;
1438 }
1439 
1440 void bpf_prog_array_free(struct bpf_prog_array __rcu *progs)
1441 {
1442 	if (!progs ||
1443 	    progs == (struct bpf_prog_array __rcu *)&empty_prog_array.hdr)
1444 		return;
1445 	kfree_rcu(progs, rcu);
1446 }
1447 
1448 int bpf_prog_array_length(struct bpf_prog_array __rcu *progs)
1449 {
1450 	struct bpf_prog **prog;
1451 	u32 cnt = 0;
1452 
1453 	rcu_read_lock();
1454 	prog = rcu_dereference(progs)->progs;
1455 	for (; *prog; prog++)
1456 		cnt++;
1457 	rcu_read_unlock();
1458 	return cnt;
1459 }
1460 
1461 int bpf_prog_array_copy_to_user(struct bpf_prog_array __rcu *progs,
1462 				__u32 __user *prog_ids, u32 cnt)
1463 {
1464 	struct bpf_prog **prog;
1465 	u32 i = 0, id;
1466 
1467 	rcu_read_lock();
1468 	prog = rcu_dereference(progs)->progs;
1469 	for (; *prog; prog++) {
1470 		id = (*prog)->aux->id;
1471 		if (copy_to_user(prog_ids + i, &id, sizeof(id))) {
1472 			rcu_read_unlock();
1473 			return -EFAULT;
1474 		}
1475 		if (++i == cnt) {
1476 			prog++;
1477 			break;
1478 		}
1479 	}
1480 	rcu_read_unlock();
1481 	if (*prog)
1482 		return -ENOSPC;
1483 	return 0;
1484 }
1485 
1486 void bpf_prog_array_delete_safe(struct bpf_prog_array __rcu *progs,
1487 				struct bpf_prog *old_prog)
1488 {
1489 	struct bpf_prog **prog = progs->progs;
1490 
1491 	for (; *prog; prog++)
1492 		if (*prog == old_prog) {
1493 			WRITE_ONCE(*prog, &dummy_bpf_prog.prog);
1494 			break;
1495 		}
1496 }
1497 
1498 int bpf_prog_array_copy(struct bpf_prog_array __rcu *old_array,
1499 			struct bpf_prog *exclude_prog,
1500 			struct bpf_prog *include_prog,
1501 			struct bpf_prog_array **new_array)
1502 {
1503 	int new_prog_cnt, carry_prog_cnt = 0;
1504 	struct bpf_prog **existing_prog;
1505 	struct bpf_prog_array *array;
1506 	int new_prog_idx = 0;
1507 
1508 	/* Figure out how many existing progs we need to carry over to
1509 	 * the new array.
1510 	 */
1511 	if (old_array) {
1512 		existing_prog = old_array->progs;
1513 		for (; *existing_prog; existing_prog++) {
1514 			if (*existing_prog != exclude_prog &&
1515 			    *existing_prog != &dummy_bpf_prog.prog)
1516 				carry_prog_cnt++;
1517 			if (*existing_prog == include_prog)
1518 				return -EEXIST;
1519 		}
1520 	}
1521 
1522 	/* How many progs (not NULL) will be in the new array? */
1523 	new_prog_cnt = carry_prog_cnt;
1524 	if (include_prog)
1525 		new_prog_cnt += 1;
1526 
1527 	/* Do we have any prog (not NULL) in the new array? */
1528 	if (!new_prog_cnt) {
1529 		*new_array = NULL;
1530 		return 0;
1531 	}
1532 
1533 	/* +1 as the end of prog_array is marked with NULL */
1534 	array = bpf_prog_array_alloc(new_prog_cnt + 1, GFP_KERNEL);
1535 	if (!array)
1536 		return -ENOMEM;
1537 
1538 	/* Fill in the new prog array */
1539 	if (carry_prog_cnt) {
1540 		existing_prog = old_array->progs;
1541 		for (; *existing_prog; existing_prog++)
1542 			if (*existing_prog != exclude_prog &&
1543 			    *existing_prog != &dummy_bpf_prog.prog)
1544 				array->progs[new_prog_idx++] = *existing_prog;
1545 	}
1546 	if (include_prog)
1547 		array->progs[new_prog_idx++] = include_prog;
1548 	array->progs[new_prog_idx] = NULL;
1549 	*new_array = array;
1550 	return 0;
1551 }
1552 
1553 static void bpf_prog_free_deferred(struct work_struct *work)
1554 {
1555 	struct bpf_prog_aux *aux;
1556 
1557 	aux = container_of(work, struct bpf_prog_aux, work);
1558 	if (bpf_prog_is_dev_bound(aux))
1559 		bpf_prog_offload_destroy(aux->prog);
1560 	bpf_jit_free(aux->prog);
1561 }
1562 
1563 /* Free internal BPF program */
1564 void bpf_prog_free(struct bpf_prog *fp)
1565 {
1566 	struct bpf_prog_aux *aux = fp->aux;
1567 
1568 	INIT_WORK(&aux->work, bpf_prog_free_deferred);
1569 	schedule_work(&aux->work);
1570 }
1571 EXPORT_SYMBOL_GPL(bpf_prog_free);
1572 
1573 /* RNG for unpriviledged user space with separated state from prandom_u32(). */
1574 static DEFINE_PER_CPU(struct rnd_state, bpf_user_rnd_state);
1575 
1576 void bpf_user_rnd_init_once(void)
1577 {
1578 	prandom_init_once(&bpf_user_rnd_state);
1579 }
1580 
1581 BPF_CALL_0(bpf_user_rnd_u32)
1582 {
1583 	/* Should someone ever have the rather unwise idea to use some
1584 	 * of the registers passed into this function, then note that
1585 	 * this function is called from native eBPF and classic-to-eBPF
1586 	 * transformations. Register assignments from both sides are
1587 	 * different, f.e. classic always sets fn(ctx, A, X) here.
1588 	 */
1589 	struct rnd_state *state;
1590 	u32 res;
1591 
1592 	state = &get_cpu_var(bpf_user_rnd_state);
1593 	res = prandom_u32_state(state);
1594 	put_cpu_var(bpf_user_rnd_state);
1595 
1596 	return res;
1597 }
1598 
1599 /* Weak definitions of helper functions in case we don't have bpf syscall. */
1600 const struct bpf_func_proto bpf_map_lookup_elem_proto __weak;
1601 const struct bpf_func_proto bpf_map_update_elem_proto __weak;
1602 const struct bpf_func_proto bpf_map_delete_elem_proto __weak;
1603 
1604 const struct bpf_func_proto bpf_get_prandom_u32_proto __weak;
1605 const struct bpf_func_proto bpf_get_smp_processor_id_proto __weak;
1606 const struct bpf_func_proto bpf_get_numa_node_id_proto __weak;
1607 const struct bpf_func_proto bpf_ktime_get_ns_proto __weak;
1608 
1609 const struct bpf_func_proto bpf_get_current_pid_tgid_proto __weak;
1610 const struct bpf_func_proto bpf_get_current_uid_gid_proto __weak;
1611 const struct bpf_func_proto bpf_get_current_comm_proto __weak;
1612 const struct bpf_func_proto bpf_sock_map_update_proto __weak;
1613 
1614 const struct bpf_func_proto * __weak bpf_get_trace_printk_proto(void)
1615 {
1616 	return NULL;
1617 }
1618 
1619 u64 __weak
1620 bpf_event_output(struct bpf_map *map, u64 flags, void *meta, u64 meta_size,
1621 		 void *ctx, u64 ctx_size, bpf_ctx_copy_t ctx_copy)
1622 {
1623 	return -ENOTSUPP;
1624 }
1625 
1626 /* Always built-in helper functions. */
1627 const struct bpf_func_proto bpf_tail_call_proto = {
1628 	.func		= NULL,
1629 	.gpl_only	= false,
1630 	.ret_type	= RET_VOID,
1631 	.arg1_type	= ARG_PTR_TO_CTX,
1632 	.arg2_type	= ARG_CONST_MAP_PTR,
1633 	.arg3_type	= ARG_ANYTHING,
1634 };
1635 
1636 /* Stub for JITs that only support cBPF. eBPF programs are interpreted.
1637  * It is encouraged to implement bpf_int_jit_compile() instead, so that
1638  * eBPF and implicitly also cBPF can get JITed!
1639  */
1640 struct bpf_prog * __weak bpf_int_jit_compile(struct bpf_prog *prog)
1641 {
1642 	return prog;
1643 }
1644 
1645 /* Stub for JITs that support eBPF. All cBPF code gets transformed into
1646  * eBPF by the kernel and is later compiled by bpf_int_jit_compile().
1647  */
1648 void __weak bpf_jit_compile(struct bpf_prog *prog)
1649 {
1650 }
1651 
1652 bool __weak bpf_helper_changes_pkt_data(void *func)
1653 {
1654 	return false;
1655 }
1656 
1657 /* To execute LD_ABS/LD_IND instructions __bpf_prog_run() may call
1658  * skb_copy_bits(), so provide a weak definition of it for NET-less config.
1659  */
1660 int __weak skb_copy_bits(const struct sk_buff *skb, int offset, void *to,
1661 			 int len)
1662 {
1663 	return -EFAULT;
1664 }
1665 
1666 /* All definitions of tracepoints related to BPF. */
1667 #define CREATE_TRACE_POINTS
1668 #include <linux/bpf_trace.h>
1669 
1670 EXPORT_TRACEPOINT_SYMBOL_GPL(xdp_exception);
1671 
1672 /* These are only used within the BPF_SYSCALL code */
1673 #ifdef CONFIG_BPF_SYSCALL
1674 EXPORT_TRACEPOINT_SYMBOL_GPL(bpf_prog_get_type);
1675 EXPORT_TRACEPOINT_SYMBOL_GPL(bpf_prog_put_rcu);
1676 #endif
1677