xref: /linux/kernel/bpf/core.c (revision 036b9e7caeb09598afb297a6d4fb36b477a4f6b2)
1 /*
2  * Linux Socket Filter - Kernel level socket filtering
3  *
4  * Based on the design of the Berkeley Packet Filter. The new
5  * internal format has been designed by PLUMgrid:
6  *
7  *	Copyright (c) 2011 - 2014 PLUMgrid, http://plumgrid.com
8  *
9  * Authors:
10  *
11  *	Jay Schulist <jschlst@samba.org>
12  *	Alexei Starovoitov <ast@plumgrid.com>
13  *	Daniel Borkmann <dborkman@redhat.com>
14  *
15  * This program is free software; you can redistribute it and/or
16  * modify it under the terms of the GNU General Public License
17  * as published by the Free Software Foundation; either version
18  * 2 of the License, or (at your option) any later version.
19  *
20  * Andi Kleen - Fix a few bad bugs and races.
21  * Kris Katterjohn - Added many additional checks in bpf_check_classic()
22  */
23 
24 #include <uapi/linux/btf.h>
25 #include <linux/filter.h>
26 #include <linux/skbuff.h>
27 #include <linux/vmalloc.h>
28 #include <linux/random.h>
29 #include <linux/moduleloader.h>
30 #include <linux/bpf.h>
31 #include <linux/btf.h>
32 #include <linux/frame.h>
33 #include <linux/rbtree_latch.h>
34 #include <linux/kallsyms.h>
35 #include <linux/rcupdate.h>
36 #include <linux/perf_event.h>
37 
38 #include <asm/unaligned.h>
39 
40 /* Registers */
41 #define BPF_R0	regs[BPF_REG_0]
42 #define BPF_R1	regs[BPF_REG_1]
43 #define BPF_R2	regs[BPF_REG_2]
44 #define BPF_R3	regs[BPF_REG_3]
45 #define BPF_R4	regs[BPF_REG_4]
46 #define BPF_R5	regs[BPF_REG_5]
47 #define BPF_R6	regs[BPF_REG_6]
48 #define BPF_R7	regs[BPF_REG_7]
49 #define BPF_R8	regs[BPF_REG_8]
50 #define BPF_R9	regs[BPF_REG_9]
51 #define BPF_R10	regs[BPF_REG_10]
52 
53 /* Named registers */
54 #define DST	regs[insn->dst_reg]
55 #define SRC	regs[insn->src_reg]
56 #define FP	regs[BPF_REG_FP]
57 #define ARG1	regs[BPF_REG_ARG1]
58 #define CTX	regs[BPF_REG_CTX]
59 #define IMM	insn->imm
60 
61 /* No hurry in this branch
62  *
63  * Exported for the bpf jit load helper.
64  */
65 void *bpf_internal_load_pointer_neg_helper(const struct sk_buff *skb, int k, unsigned int size)
66 {
67 	u8 *ptr = NULL;
68 
69 	if (k >= SKF_NET_OFF)
70 		ptr = skb_network_header(skb) + k - SKF_NET_OFF;
71 	else if (k >= SKF_LL_OFF)
72 		ptr = skb_mac_header(skb) + k - SKF_LL_OFF;
73 
74 	if (ptr >= skb->head && ptr + size <= skb_tail_pointer(skb))
75 		return ptr;
76 
77 	return NULL;
78 }
79 
80 struct bpf_prog *bpf_prog_alloc(unsigned int size, gfp_t gfp_extra_flags)
81 {
82 	gfp_t gfp_flags = GFP_KERNEL | __GFP_ZERO | gfp_extra_flags;
83 	struct bpf_prog_aux *aux;
84 	struct bpf_prog *fp;
85 
86 	size = round_up(size, PAGE_SIZE);
87 	fp = __vmalloc(size, gfp_flags, PAGE_KERNEL);
88 	if (fp == NULL)
89 		return NULL;
90 
91 	aux = kzalloc(sizeof(*aux), GFP_KERNEL | gfp_extra_flags);
92 	if (aux == NULL) {
93 		vfree(fp);
94 		return NULL;
95 	}
96 
97 	fp->pages = size / PAGE_SIZE;
98 	fp->aux = aux;
99 	fp->aux->prog = fp;
100 	fp->jit_requested = ebpf_jit_enabled();
101 
102 	INIT_LIST_HEAD_RCU(&fp->aux->ksym_lnode);
103 
104 	return fp;
105 }
106 EXPORT_SYMBOL_GPL(bpf_prog_alloc);
107 
108 int bpf_prog_alloc_jited_linfo(struct bpf_prog *prog)
109 {
110 	if (!prog->aux->nr_linfo || !prog->jit_requested)
111 		return 0;
112 
113 	prog->aux->jited_linfo = kcalloc(prog->aux->nr_linfo,
114 					 sizeof(*prog->aux->jited_linfo),
115 					 GFP_KERNEL | __GFP_NOWARN);
116 	if (!prog->aux->jited_linfo)
117 		return -ENOMEM;
118 
119 	return 0;
120 }
121 
122 void bpf_prog_free_jited_linfo(struct bpf_prog *prog)
123 {
124 	kfree(prog->aux->jited_linfo);
125 	prog->aux->jited_linfo = NULL;
126 }
127 
128 void bpf_prog_free_unused_jited_linfo(struct bpf_prog *prog)
129 {
130 	if (prog->aux->jited_linfo && !prog->aux->jited_linfo[0])
131 		bpf_prog_free_jited_linfo(prog);
132 }
133 
134 /* The jit engine is responsible to provide an array
135  * for insn_off to the jited_off mapping (insn_to_jit_off).
136  *
137  * The idx to this array is the insn_off.  Hence, the insn_off
138  * here is relative to the prog itself instead of the main prog.
139  * This array has one entry for each xlated bpf insn.
140  *
141  * jited_off is the byte off to the last byte of the jited insn.
142  *
143  * Hence, with
144  * insn_start:
145  *      The first bpf insn off of the prog.  The insn off
146  *      here is relative to the main prog.
147  *      e.g. if prog is a subprog, insn_start > 0
148  * linfo_idx:
149  *      The prog's idx to prog->aux->linfo and jited_linfo
150  *
151  * jited_linfo[linfo_idx] = prog->bpf_func
152  *
153  * For i > linfo_idx,
154  *
155  * jited_linfo[i] = prog->bpf_func +
156  *	insn_to_jit_off[linfo[i].insn_off - insn_start - 1]
157  */
158 void bpf_prog_fill_jited_linfo(struct bpf_prog *prog,
159 			       const u32 *insn_to_jit_off)
160 {
161 	u32 linfo_idx, insn_start, insn_end, nr_linfo, i;
162 	const struct bpf_line_info *linfo;
163 	void **jited_linfo;
164 
165 	if (!prog->aux->jited_linfo)
166 		/* Userspace did not provide linfo */
167 		return;
168 
169 	linfo_idx = prog->aux->linfo_idx;
170 	linfo = &prog->aux->linfo[linfo_idx];
171 	insn_start = linfo[0].insn_off;
172 	insn_end = insn_start + prog->len;
173 
174 	jited_linfo = &prog->aux->jited_linfo[linfo_idx];
175 	jited_linfo[0] = prog->bpf_func;
176 
177 	nr_linfo = prog->aux->nr_linfo - linfo_idx;
178 
179 	for (i = 1; i < nr_linfo && linfo[i].insn_off < insn_end; i++)
180 		/* The verifier ensures that linfo[i].insn_off is
181 		 * strictly increasing
182 		 */
183 		jited_linfo[i] = prog->bpf_func +
184 			insn_to_jit_off[linfo[i].insn_off - insn_start - 1];
185 }
186 
187 void bpf_prog_free_linfo(struct bpf_prog *prog)
188 {
189 	bpf_prog_free_jited_linfo(prog);
190 	kvfree(prog->aux->linfo);
191 }
192 
193 struct bpf_prog *bpf_prog_realloc(struct bpf_prog *fp_old, unsigned int size,
194 				  gfp_t gfp_extra_flags)
195 {
196 	gfp_t gfp_flags = GFP_KERNEL | __GFP_ZERO | gfp_extra_flags;
197 	struct bpf_prog *fp;
198 	u32 pages, delta;
199 	int ret;
200 
201 	BUG_ON(fp_old == NULL);
202 
203 	size = round_up(size, PAGE_SIZE);
204 	pages = size / PAGE_SIZE;
205 	if (pages <= fp_old->pages)
206 		return fp_old;
207 
208 	delta = pages - fp_old->pages;
209 	ret = __bpf_prog_charge(fp_old->aux->user, delta);
210 	if (ret)
211 		return NULL;
212 
213 	fp = __vmalloc(size, gfp_flags, PAGE_KERNEL);
214 	if (fp == NULL) {
215 		__bpf_prog_uncharge(fp_old->aux->user, delta);
216 	} else {
217 		memcpy(fp, fp_old, fp_old->pages * PAGE_SIZE);
218 		fp->pages = pages;
219 		fp->aux->prog = fp;
220 
221 		/* We keep fp->aux from fp_old around in the new
222 		 * reallocated structure.
223 		 */
224 		fp_old->aux = NULL;
225 		__bpf_prog_free(fp_old);
226 	}
227 
228 	return fp;
229 }
230 
231 void __bpf_prog_free(struct bpf_prog *fp)
232 {
233 	kfree(fp->aux);
234 	vfree(fp);
235 }
236 
237 int bpf_prog_calc_tag(struct bpf_prog *fp)
238 {
239 	const u32 bits_offset = SHA_MESSAGE_BYTES - sizeof(__be64);
240 	u32 raw_size = bpf_prog_tag_scratch_size(fp);
241 	u32 digest[SHA_DIGEST_WORDS];
242 	u32 ws[SHA_WORKSPACE_WORDS];
243 	u32 i, bsize, psize, blocks;
244 	struct bpf_insn *dst;
245 	bool was_ld_map;
246 	u8 *raw, *todo;
247 	__be32 *result;
248 	__be64 *bits;
249 
250 	raw = vmalloc(raw_size);
251 	if (!raw)
252 		return -ENOMEM;
253 
254 	sha_init(digest);
255 	memset(ws, 0, sizeof(ws));
256 
257 	/* We need to take out the map fd for the digest calculation
258 	 * since they are unstable from user space side.
259 	 */
260 	dst = (void *)raw;
261 	for (i = 0, was_ld_map = false; i < fp->len; i++) {
262 		dst[i] = fp->insnsi[i];
263 		if (!was_ld_map &&
264 		    dst[i].code == (BPF_LD | BPF_IMM | BPF_DW) &&
265 		    dst[i].src_reg == BPF_PSEUDO_MAP_FD) {
266 			was_ld_map = true;
267 			dst[i].imm = 0;
268 		} else if (was_ld_map &&
269 			   dst[i].code == 0 &&
270 			   dst[i].dst_reg == 0 &&
271 			   dst[i].src_reg == 0 &&
272 			   dst[i].off == 0) {
273 			was_ld_map = false;
274 			dst[i].imm = 0;
275 		} else {
276 			was_ld_map = false;
277 		}
278 	}
279 
280 	psize = bpf_prog_insn_size(fp);
281 	memset(&raw[psize], 0, raw_size - psize);
282 	raw[psize++] = 0x80;
283 
284 	bsize  = round_up(psize, SHA_MESSAGE_BYTES);
285 	blocks = bsize / SHA_MESSAGE_BYTES;
286 	todo   = raw;
287 	if (bsize - psize >= sizeof(__be64)) {
288 		bits = (__be64 *)(todo + bsize - sizeof(__be64));
289 	} else {
290 		bits = (__be64 *)(todo + bsize + bits_offset);
291 		blocks++;
292 	}
293 	*bits = cpu_to_be64((psize - 1) << 3);
294 
295 	while (blocks--) {
296 		sha_transform(digest, todo, ws);
297 		todo += SHA_MESSAGE_BYTES;
298 	}
299 
300 	result = (__force __be32 *)digest;
301 	for (i = 0; i < SHA_DIGEST_WORDS; i++)
302 		result[i] = cpu_to_be32(digest[i]);
303 	memcpy(fp->tag, result, sizeof(fp->tag));
304 
305 	vfree(raw);
306 	return 0;
307 }
308 
309 static int bpf_adj_delta_to_imm(struct bpf_insn *insn, u32 pos, u32 delta,
310 				u32 curr, const bool probe_pass)
311 {
312 	const s64 imm_min = S32_MIN, imm_max = S32_MAX;
313 	s64 imm = insn->imm;
314 
315 	if (curr < pos && curr + imm + 1 > pos)
316 		imm += delta;
317 	else if (curr > pos + delta && curr + imm + 1 <= pos + delta)
318 		imm -= delta;
319 	if (imm < imm_min || imm > imm_max)
320 		return -ERANGE;
321 	if (!probe_pass)
322 		insn->imm = imm;
323 	return 0;
324 }
325 
326 static int bpf_adj_delta_to_off(struct bpf_insn *insn, u32 pos, u32 delta,
327 				u32 curr, const bool probe_pass)
328 {
329 	const s32 off_min = S16_MIN, off_max = S16_MAX;
330 	s32 off = insn->off;
331 
332 	if (curr < pos && curr + off + 1 > pos)
333 		off += delta;
334 	else if (curr > pos + delta && curr + off + 1 <= pos + delta)
335 		off -= delta;
336 	if (off < off_min || off > off_max)
337 		return -ERANGE;
338 	if (!probe_pass)
339 		insn->off = off;
340 	return 0;
341 }
342 
343 static int bpf_adj_branches(struct bpf_prog *prog, u32 pos, u32 delta,
344 			    const bool probe_pass)
345 {
346 	u32 i, insn_cnt = prog->len + (probe_pass ? delta : 0);
347 	struct bpf_insn *insn = prog->insnsi;
348 	int ret = 0;
349 
350 	for (i = 0; i < insn_cnt; i++, insn++) {
351 		u8 code;
352 
353 		/* In the probing pass we still operate on the original,
354 		 * unpatched image in order to check overflows before we
355 		 * do any other adjustments. Therefore skip the patchlet.
356 		 */
357 		if (probe_pass && i == pos) {
358 			i += delta + 1;
359 			insn++;
360 		}
361 		code = insn->code;
362 		if (BPF_CLASS(code) != BPF_JMP ||
363 		    BPF_OP(code) == BPF_EXIT)
364 			continue;
365 		/* Adjust offset of jmps if we cross patch boundaries. */
366 		if (BPF_OP(code) == BPF_CALL) {
367 			if (insn->src_reg != BPF_PSEUDO_CALL)
368 				continue;
369 			ret = bpf_adj_delta_to_imm(insn, pos, delta, i,
370 						   probe_pass);
371 		} else {
372 			ret = bpf_adj_delta_to_off(insn, pos, delta, i,
373 						   probe_pass);
374 		}
375 		if (ret)
376 			break;
377 	}
378 
379 	return ret;
380 }
381 
382 static void bpf_adj_linfo(struct bpf_prog *prog, u32 off, u32 delta)
383 {
384 	struct bpf_line_info *linfo;
385 	u32 i, nr_linfo;
386 
387 	nr_linfo = prog->aux->nr_linfo;
388 	if (!nr_linfo || !delta)
389 		return;
390 
391 	linfo = prog->aux->linfo;
392 
393 	for (i = 0; i < nr_linfo; i++)
394 		if (off < linfo[i].insn_off)
395 			break;
396 
397 	/* Push all off < linfo[i].insn_off by delta */
398 	for (; i < nr_linfo; i++)
399 		linfo[i].insn_off += delta;
400 }
401 
402 struct bpf_prog *bpf_patch_insn_single(struct bpf_prog *prog, u32 off,
403 				       const struct bpf_insn *patch, u32 len)
404 {
405 	u32 insn_adj_cnt, insn_rest, insn_delta = len - 1;
406 	const u32 cnt_max = S16_MAX;
407 	struct bpf_prog *prog_adj;
408 
409 	/* Since our patchlet doesn't expand the image, we're done. */
410 	if (insn_delta == 0) {
411 		memcpy(prog->insnsi + off, patch, sizeof(*patch));
412 		return prog;
413 	}
414 
415 	insn_adj_cnt = prog->len + insn_delta;
416 
417 	/* Reject anything that would potentially let the insn->off
418 	 * target overflow when we have excessive program expansions.
419 	 * We need to probe here before we do any reallocation where
420 	 * we afterwards may not fail anymore.
421 	 */
422 	if (insn_adj_cnt > cnt_max &&
423 	    bpf_adj_branches(prog, off, insn_delta, true))
424 		return NULL;
425 
426 	/* Several new instructions need to be inserted. Make room
427 	 * for them. Likely, there's no need for a new allocation as
428 	 * last page could have large enough tailroom.
429 	 */
430 	prog_adj = bpf_prog_realloc(prog, bpf_prog_size(insn_adj_cnt),
431 				    GFP_USER);
432 	if (!prog_adj)
433 		return NULL;
434 
435 	prog_adj->len = insn_adj_cnt;
436 
437 	/* Patching happens in 3 steps:
438 	 *
439 	 * 1) Move over tail of insnsi from next instruction onwards,
440 	 *    so we can patch the single target insn with one or more
441 	 *    new ones (patching is always from 1 to n insns, n > 0).
442 	 * 2) Inject new instructions at the target location.
443 	 * 3) Adjust branch offsets if necessary.
444 	 */
445 	insn_rest = insn_adj_cnt - off - len;
446 
447 	memmove(prog_adj->insnsi + off + len, prog_adj->insnsi + off + 1,
448 		sizeof(*patch) * insn_rest);
449 	memcpy(prog_adj->insnsi + off, patch, sizeof(*patch) * len);
450 
451 	/* We are guaranteed to not fail at this point, otherwise
452 	 * the ship has sailed to reverse to the original state. An
453 	 * overflow cannot happen at this point.
454 	 */
455 	BUG_ON(bpf_adj_branches(prog_adj, off, insn_delta, false));
456 
457 	bpf_adj_linfo(prog_adj, off, insn_delta);
458 
459 	return prog_adj;
460 }
461 
462 void bpf_prog_kallsyms_del_subprogs(struct bpf_prog *fp)
463 {
464 	int i;
465 
466 	for (i = 0; i < fp->aux->func_cnt; i++)
467 		bpf_prog_kallsyms_del(fp->aux->func[i]);
468 }
469 
470 void bpf_prog_kallsyms_del_all(struct bpf_prog *fp)
471 {
472 	bpf_prog_kallsyms_del_subprogs(fp);
473 	bpf_prog_kallsyms_del(fp);
474 }
475 
476 #ifdef CONFIG_BPF_JIT
477 # define BPF_JIT_LIMIT_DEFAULT	(PAGE_SIZE * 40000)
478 
479 /* All BPF JIT sysctl knobs here. */
480 int bpf_jit_enable   __read_mostly = IS_BUILTIN(CONFIG_BPF_JIT_ALWAYS_ON);
481 int bpf_jit_harden   __read_mostly;
482 int bpf_jit_kallsyms __read_mostly;
483 int bpf_jit_limit    __read_mostly = BPF_JIT_LIMIT_DEFAULT;
484 
485 static __always_inline void
486 bpf_get_prog_addr_region(const struct bpf_prog *prog,
487 			 unsigned long *symbol_start,
488 			 unsigned long *symbol_end)
489 {
490 	const struct bpf_binary_header *hdr = bpf_jit_binary_hdr(prog);
491 	unsigned long addr = (unsigned long)hdr;
492 
493 	WARN_ON_ONCE(!bpf_prog_ebpf_jited(prog));
494 
495 	*symbol_start = addr;
496 	*symbol_end   = addr + hdr->pages * PAGE_SIZE;
497 }
498 
499 static void bpf_get_prog_name(const struct bpf_prog *prog, char *sym)
500 {
501 	const char *end = sym + KSYM_NAME_LEN;
502 	const struct btf_type *type;
503 	const char *func_name;
504 
505 	BUILD_BUG_ON(sizeof("bpf_prog_") +
506 		     sizeof(prog->tag) * 2 +
507 		     /* name has been null terminated.
508 		      * We should need +1 for the '_' preceding
509 		      * the name.  However, the null character
510 		      * is double counted between the name and the
511 		      * sizeof("bpf_prog_") above, so we omit
512 		      * the +1 here.
513 		      */
514 		     sizeof(prog->aux->name) > KSYM_NAME_LEN);
515 
516 	sym += snprintf(sym, KSYM_NAME_LEN, "bpf_prog_");
517 	sym  = bin2hex(sym, prog->tag, sizeof(prog->tag));
518 
519 	/* prog->aux->name will be ignored if full btf name is available */
520 	if (prog->aux->func_info_cnt) {
521 		type = btf_type_by_id(prog->aux->btf,
522 				      prog->aux->func_info[prog->aux->func_idx].type_id);
523 		func_name = btf_name_by_offset(prog->aux->btf, type->name_off);
524 		snprintf(sym, (size_t)(end - sym), "_%s", func_name);
525 		return;
526 	}
527 
528 	if (prog->aux->name[0])
529 		snprintf(sym, (size_t)(end - sym), "_%s", prog->aux->name);
530 	else
531 		*sym = 0;
532 }
533 
534 static __always_inline unsigned long
535 bpf_get_prog_addr_start(struct latch_tree_node *n)
536 {
537 	unsigned long symbol_start, symbol_end;
538 	const struct bpf_prog_aux *aux;
539 
540 	aux = container_of(n, struct bpf_prog_aux, ksym_tnode);
541 	bpf_get_prog_addr_region(aux->prog, &symbol_start, &symbol_end);
542 
543 	return symbol_start;
544 }
545 
546 static __always_inline bool bpf_tree_less(struct latch_tree_node *a,
547 					  struct latch_tree_node *b)
548 {
549 	return bpf_get_prog_addr_start(a) < bpf_get_prog_addr_start(b);
550 }
551 
552 static __always_inline int bpf_tree_comp(void *key, struct latch_tree_node *n)
553 {
554 	unsigned long val = (unsigned long)key;
555 	unsigned long symbol_start, symbol_end;
556 	const struct bpf_prog_aux *aux;
557 
558 	aux = container_of(n, struct bpf_prog_aux, ksym_tnode);
559 	bpf_get_prog_addr_region(aux->prog, &symbol_start, &symbol_end);
560 
561 	if (val < symbol_start)
562 		return -1;
563 	if (val >= symbol_end)
564 		return  1;
565 
566 	return 0;
567 }
568 
569 static const struct latch_tree_ops bpf_tree_ops = {
570 	.less	= bpf_tree_less,
571 	.comp	= bpf_tree_comp,
572 };
573 
574 static DEFINE_SPINLOCK(bpf_lock);
575 static LIST_HEAD(bpf_kallsyms);
576 static struct latch_tree_root bpf_tree __cacheline_aligned;
577 
578 static void bpf_prog_ksym_node_add(struct bpf_prog_aux *aux)
579 {
580 	WARN_ON_ONCE(!list_empty(&aux->ksym_lnode));
581 	list_add_tail_rcu(&aux->ksym_lnode, &bpf_kallsyms);
582 	latch_tree_insert(&aux->ksym_tnode, &bpf_tree, &bpf_tree_ops);
583 }
584 
585 static void bpf_prog_ksym_node_del(struct bpf_prog_aux *aux)
586 {
587 	if (list_empty(&aux->ksym_lnode))
588 		return;
589 
590 	latch_tree_erase(&aux->ksym_tnode, &bpf_tree, &bpf_tree_ops);
591 	list_del_rcu(&aux->ksym_lnode);
592 }
593 
594 static bool bpf_prog_kallsyms_candidate(const struct bpf_prog *fp)
595 {
596 	return fp->jited && !bpf_prog_was_classic(fp);
597 }
598 
599 static bool bpf_prog_kallsyms_verify_off(const struct bpf_prog *fp)
600 {
601 	return list_empty(&fp->aux->ksym_lnode) ||
602 	       fp->aux->ksym_lnode.prev == LIST_POISON2;
603 }
604 
605 void bpf_prog_kallsyms_add(struct bpf_prog *fp)
606 {
607 	if (!bpf_prog_kallsyms_candidate(fp) ||
608 	    !capable(CAP_SYS_ADMIN))
609 		return;
610 
611 	spin_lock_bh(&bpf_lock);
612 	bpf_prog_ksym_node_add(fp->aux);
613 	spin_unlock_bh(&bpf_lock);
614 }
615 
616 void bpf_prog_kallsyms_del(struct bpf_prog *fp)
617 {
618 	if (!bpf_prog_kallsyms_candidate(fp))
619 		return;
620 
621 	spin_lock_bh(&bpf_lock);
622 	bpf_prog_ksym_node_del(fp->aux);
623 	spin_unlock_bh(&bpf_lock);
624 }
625 
626 static struct bpf_prog *bpf_prog_kallsyms_find(unsigned long addr)
627 {
628 	struct latch_tree_node *n;
629 
630 	if (!bpf_jit_kallsyms_enabled())
631 		return NULL;
632 
633 	n = latch_tree_find((void *)addr, &bpf_tree, &bpf_tree_ops);
634 	return n ?
635 	       container_of(n, struct bpf_prog_aux, ksym_tnode)->prog :
636 	       NULL;
637 }
638 
639 const char *__bpf_address_lookup(unsigned long addr, unsigned long *size,
640 				 unsigned long *off, char *sym)
641 {
642 	unsigned long symbol_start, symbol_end;
643 	struct bpf_prog *prog;
644 	char *ret = NULL;
645 
646 	rcu_read_lock();
647 	prog = bpf_prog_kallsyms_find(addr);
648 	if (prog) {
649 		bpf_get_prog_addr_region(prog, &symbol_start, &symbol_end);
650 		bpf_get_prog_name(prog, sym);
651 
652 		ret = sym;
653 		if (size)
654 			*size = symbol_end - symbol_start;
655 		if (off)
656 			*off  = addr - symbol_start;
657 	}
658 	rcu_read_unlock();
659 
660 	return ret;
661 }
662 
663 bool is_bpf_text_address(unsigned long addr)
664 {
665 	bool ret;
666 
667 	rcu_read_lock();
668 	ret = bpf_prog_kallsyms_find(addr) != NULL;
669 	rcu_read_unlock();
670 
671 	return ret;
672 }
673 
674 int bpf_get_kallsym(unsigned int symnum, unsigned long *value, char *type,
675 		    char *sym)
676 {
677 	struct bpf_prog_aux *aux;
678 	unsigned int it = 0;
679 	int ret = -ERANGE;
680 
681 	if (!bpf_jit_kallsyms_enabled())
682 		return ret;
683 
684 	rcu_read_lock();
685 	list_for_each_entry_rcu(aux, &bpf_kallsyms, ksym_lnode) {
686 		if (it++ != symnum)
687 			continue;
688 
689 		bpf_get_prog_name(aux->prog, sym);
690 
691 		*value = (unsigned long)aux->prog->bpf_func;
692 		*type  = BPF_SYM_ELF_TYPE;
693 
694 		ret = 0;
695 		break;
696 	}
697 	rcu_read_unlock();
698 
699 	return ret;
700 }
701 
702 static atomic_long_t bpf_jit_current;
703 
704 #if defined(MODULES_VADDR)
705 static int __init bpf_jit_charge_init(void)
706 {
707 	/* Only used as heuristic here to derive limit. */
708 	bpf_jit_limit = min_t(u64, round_up((MODULES_END - MODULES_VADDR) >> 2,
709 					    PAGE_SIZE), INT_MAX);
710 	return 0;
711 }
712 pure_initcall(bpf_jit_charge_init);
713 #endif
714 
715 static int bpf_jit_charge_modmem(u32 pages)
716 {
717 	if (atomic_long_add_return(pages, &bpf_jit_current) >
718 	    (bpf_jit_limit >> PAGE_SHIFT)) {
719 		if (!capable(CAP_SYS_ADMIN)) {
720 			atomic_long_sub(pages, &bpf_jit_current);
721 			return -EPERM;
722 		}
723 	}
724 
725 	return 0;
726 }
727 
728 static void bpf_jit_uncharge_modmem(u32 pages)
729 {
730 	atomic_long_sub(pages, &bpf_jit_current);
731 }
732 
733 void *__weak bpf_jit_alloc_exec(unsigned long size)
734 {
735 	return module_alloc(size);
736 }
737 
738 void __weak bpf_jit_free_exec(void *addr)
739 {
740 	module_memfree(addr);
741 }
742 
743 struct bpf_binary_header *
744 bpf_jit_binary_alloc(unsigned int proglen, u8 **image_ptr,
745 		     unsigned int alignment,
746 		     bpf_jit_fill_hole_t bpf_fill_ill_insns)
747 {
748 	struct bpf_binary_header *hdr;
749 	u32 size, hole, start, pages;
750 
751 	/* Most of BPF filters are really small, but if some of them
752 	 * fill a page, allow at least 128 extra bytes to insert a
753 	 * random section of illegal instructions.
754 	 */
755 	size = round_up(proglen + sizeof(*hdr) + 128, PAGE_SIZE);
756 	pages = size / PAGE_SIZE;
757 
758 	if (bpf_jit_charge_modmem(pages))
759 		return NULL;
760 	hdr = bpf_jit_alloc_exec(size);
761 	if (!hdr) {
762 		bpf_jit_uncharge_modmem(pages);
763 		return NULL;
764 	}
765 
766 	/* Fill space with illegal/arch-dep instructions. */
767 	bpf_fill_ill_insns(hdr, size);
768 
769 	hdr->pages = pages;
770 	hole = min_t(unsigned int, size - (proglen + sizeof(*hdr)),
771 		     PAGE_SIZE - sizeof(*hdr));
772 	start = (get_random_int() % hole) & ~(alignment - 1);
773 
774 	/* Leave a random number of instructions before BPF code. */
775 	*image_ptr = &hdr->image[start];
776 
777 	return hdr;
778 }
779 
780 void bpf_jit_binary_free(struct bpf_binary_header *hdr)
781 {
782 	u32 pages = hdr->pages;
783 
784 	bpf_jit_free_exec(hdr);
785 	bpf_jit_uncharge_modmem(pages);
786 }
787 
788 /* This symbol is only overridden by archs that have different
789  * requirements than the usual eBPF JITs, f.e. when they only
790  * implement cBPF JIT, do not set images read-only, etc.
791  */
792 void __weak bpf_jit_free(struct bpf_prog *fp)
793 {
794 	if (fp->jited) {
795 		struct bpf_binary_header *hdr = bpf_jit_binary_hdr(fp);
796 
797 		bpf_jit_binary_unlock_ro(hdr);
798 		bpf_jit_binary_free(hdr);
799 
800 		WARN_ON_ONCE(!bpf_prog_kallsyms_verify_off(fp));
801 	}
802 
803 	bpf_prog_unlock_free(fp);
804 }
805 
806 int bpf_jit_get_func_addr(const struct bpf_prog *prog,
807 			  const struct bpf_insn *insn, bool extra_pass,
808 			  u64 *func_addr, bool *func_addr_fixed)
809 {
810 	s16 off = insn->off;
811 	s32 imm = insn->imm;
812 	u8 *addr;
813 
814 	*func_addr_fixed = insn->src_reg != BPF_PSEUDO_CALL;
815 	if (!*func_addr_fixed) {
816 		/* Place-holder address till the last pass has collected
817 		 * all addresses for JITed subprograms in which case we
818 		 * can pick them up from prog->aux.
819 		 */
820 		if (!extra_pass)
821 			addr = NULL;
822 		else if (prog->aux->func &&
823 			 off >= 0 && off < prog->aux->func_cnt)
824 			addr = (u8 *)prog->aux->func[off]->bpf_func;
825 		else
826 			return -EINVAL;
827 	} else {
828 		/* Address of a BPF helper call. Since part of the core
829 		 * kernel, it's always at a fixed location. __bpf_call_base
830 		 * and the helper with imm relative to it are both in core
831 		 * kernel.
832 		 */
833 		addr = (u8 *)__bpf_call_base + imm;
834 	}
835 
836 	*func_addr = (unsigned long)addr;
837 	return 0;
838 }
839 
840 static int bpf_jit_blind_insn(const struct bpf_insn *from,
841 			      const struct bpf_insn *aux,
842 			      struct bpf_insn *to_buff)
843 {
844 	struct bpf_insn *to = to_buff;
845 	u32 imm_rnd = get_random_int();
846 	s16 off;
847 
848 	BUILD_BUG_ON(BPF_REG_AX  + 1 != MAX_BPF_JIT_REG);
849 	BUILD_BUG_ON(MAX_BPF_REG + 1 != MAX_BPF_JIT_REG);
850 
851 	if (from->imm == 0 &&
852 	    (from->code == (BPF_ALU   | BPF_MOV | BPF_K) ||
853 	     from->code == (BPF_ALU64 | BPF_MOV | BPF_K))) {
854 		*to++ = BPF_ALU64_REG(BPF_XOR, from->dst_reg, from->dst_reg);
855 		goto out;
856 	}
857 
858 	switch (from->code) {
859 	case BPF_ALU | BPF_ADD | BPF_K:
860 	case BPF_ALU | BPF_SUB | BPF_K:
861 	case BPF_ALU | BPF_AND | BPF_K:
862 	case BPF_ALU | BPF_OR  | BPF_K:
863 	case BPF_ALU | BPF_XOR | BPF_K:
864 	case BPF_ALU | BPF_MUL | BPF_K:
865 	case BPF_ALU | BPF_MOV | BPF_K:
866 	case BPF_ALU | BPF_DIV | BPF_K:
867 	case BPF_ALU | BPF_MOD | BPF_K:
868 		*to++ = BPF_ALU32_IMM(BPF_MOV, BPF_REG_AX, imm_rnd ^ from->imm);
869 		*to++ = BPF_ALU32_IMM(BPF_XOR, BPF_REG_AX, imm_rnd);
870 		*to++ = BPF_ALU32_REG(from->code, from->dst_reg, BPF_REG_AX);
871 		break;
872 
873 	case BPF_ALU64 | BPF_ADD | BPF_K:
874 	case BPF_ALU64 | BPF_SUB | BPF_K:
875 	case BPF_ALU64 | BPF_AND | BPF_K:
876 	case BPF_ALU64 | BPF_OR  | BPF_K:
877 	case BPF_ALU64 | BPF_XOR | BPF_K:
878 	case BPF_ALU64 | BPF_MUL | BPF_K:
879 	case BPF_ALU64 | BPF_MOV | BPF_K:
880 	case BPF_ALU64 | BPF_DIV | BPF_K:
881 	case BPF_ALU64 | BPF_MOD | BPF_K:
882 		*to++ = BPF_ALU64_IMM(BPF_MOV, BPF_REG_AX, imm_rnd ^ from->imm);
883 		*to++ = BPF_ALU64_IMM(BPF_XOR, BPF_REG_AX, imm_rnd);
884 		*to++ = BPF_ALU64_REG(from->code, from->dst_reg, BPF_REG_AX);
885 		break;
886 
887 	case BPF_JMP | BPF_JEQ  | BPF_K:
888 	case BPF_JMP | BPF_JNE  | BPF_K:
889 	case BPF_JMP | BPF_JGT  | BPF_K:
890 	case BPF_JMP | BPF_JLT  | BPF_K:
891 	case BPF_JMP | BPF_JGE  | BPF_K:
892 	case BPF_JMP | BPF_JLE  | BPF_K:
893 	case BPF_JMP | BPF_JSGT | BPF_K:
894 	case BPF_JMP | BPF_JSLT | BPF_K:
895 	case BPF_JMP | BPF_JSGE | BPF_K:
896 	case BPF_JMP | BPF_JSLE | BPF_K:
897 	case BPF_JMP | BPF_JSET | BPF_K:
898 		/* Accommodate for extra offset in case of a backjump. */
899 		off = from->off;
900 		if (off < 0)
901 			off -= 2;
902 		*to++ = BPF_ALU64_IMM(BPF_MOV, BPF_REG_AX, imm_rnd ^ from->imm);
903 		*to++ = BPF_ALU64_IMM(BPF_XOR, BPF_REG_AX, imm_rnd);
904 		*to++ = BPF_JMP_REG(from->code, from->dst_reg, BPF_REG_AX, off);
905 		break;
906 
907 	case BPF_LD | BPF_IMM | BPF_DW:
908 		*to++ = BPF_ALU64_IMM(BPF_MOV, BPF_REG_AX, imm_rnd ^ aux[1].imm);
909 		*to++ = BPF_ALU64_IMM(BPF_XOR, BPF_REG_AX, imm_rnd);
910 		*to++ = BPF_ALU64_IMM(BPF_LSH, BPF_REG_AX, 32);
911 		*to++ = BPF_ALU64_REG(BPF_MOV, aux[0].dst_reg, BPF_REG_AX);
912 		break;
913 	case 0: /* Part 2 of BPF_LD | BPF_IMM | BPF_DW. */
914 		*to++ = BPF_ALU32_IMM(BPF_MOV, BPF_REG_AX, imm_rnd ^ aux[0].imm);
915 		*to++ = BPF_ALU32_IMM(BPF_XOR, BPF_REG_AX, imm_rnd);
916 		*to++ = BPF_ALU64_REG(BPF_OR,  aux[0].dst_reg, BPF_REG_AX);
917 		break;
918 
919 	case BPF_ST | BPF_MEM | BPF_DW:
920 	case BPF_ST | BPF_MEM | BPF_W:
921 	case BPF_ST | BPF_MEM | BPF_H:
922 	case BPF_ST | BPF_MEM | BPF_B:
923 		*to++ = BPF_ALU64_IMM(BPF_MOV, BPF_REG_AX, imm_rnd ^ from->imm);
924 		*to++ = BPF_ALU64_IMM(BPF_XOR, BPF_REG_AX, imm_rnd);
925 		*to++ = BPF_STX_MEM(from->code, from->dst_reg, BPF_REG_AX, from->off);
926 		break;
927 	}
928 out:
929 	return to - to_buff;
930 }
931 
932 static struct bpf_prog *bpf_prog_clone_create(struct bpf_prog *fp_other,
933 					      gfp_t gfp_extra_flags)
934 {
935 	gfp_t gfp_flags = GFP_KERNEL | __GFP_ZERO | gfp_extra_flags;
936 	struct bpf_prog *fp;
937 
938 	fp = __vmalloc(fp_other->pages * PAGE_SIZE, gfp_flags, PAGE_KERNEL);
939 	if (fp != NULL) {
940 		/* aux->prog still points to the fp_other one, so
941 		 * when promoting the clone to the real program,
942 		 * this still needs to be adapted.
943 		 */
944 		memcpy(fp, fp_other, fp_other->pages * PAGE_SIZE);
945 	}
946 
947 	return fp;
948 }
949 
950 static void bpf_prog_clone_free(struct bpf_prog *fp)
951 {
952 	/* aux was stolen by the other clone, so we cannot free
953 	 * it from this path! It will be freed eventually by the
954 	 * other program on release.
955 	 *
956 	 * At this point, we don't need a deferred release since
957 	 * clone is guaranteed to not be locked.
958 	 */
959 	fp->aux = NULL;
960 	__bpf_prog_free(fp);
961 }
962 
963 void bpf_jit_prog_release_other(struct bpf_prog *fp, struct bpf_prog *fp_other)
964 {
965 	/* We have to repoint aux->prog to self, as we don't
966 	 * know whether fp here is the clone or the original.
967 	 */
968 	fp->aux->prog = fp;
969 	bpf_prog_clone_free(fp_other);
970 }
971 
972 struct bpf_prog *bpf_jit_blind_constants(struct bpf_prog *prog)
973 {
974 	struct bpf_insn insn_buff[16], aux[2];
975 	struct bpf_prog *clone, *tmp;
976 	int insn_delta, insn_cnt;
977 	struct bpf_insn *insn;
978 	int i, rewritten;
979 
980 	if (!bpf_jit_blinding_enabled(prog) || prog->blinded)
981 		return prog;
982 
983 	clone = bpf_prog_clone_create(prog, GFP_USER);
984 	if (!clone)
985 		return ERR_PTR(-ENOMEM);
986 
987 	insn_cnt = clone->len;
988 	insn = clone->insnsi;
989 
990 	for (i = 0; i < insn_cnt; i++, insn++) {
991 		/* We temporarily need to hold the original ld64 insn
992 		 * so that we can still access the first part in the
993 		 * second blinding run.
994 		 */
995 		if (insn[0].code == (BPF_LD | BPF_IMM | BPF_DW) &&
996 		    insn[1].code == 0)
997 			memcpy(aux, insn, sizeof(aux));
998 
999 		rewritten = bpf_jit_blind_insn(insn, aux, insn_buff);
1000 		if (!rewritten)
1001 			continue;
1002 
1003 		tmp = bpf_patch_insn_single(clone, i, insn_buff, rewritten);
1004 		if (!tmp) {
1005 			/* Patching may have repointed aux->prog during
1006 			 * realloc from the original one, so we need to
1007 			 * fix it up here on error.
1008 			 */
1009 			bpf_jit_prog_release_other(prog, clone);
1010 			return ERR_PTR(-ENOMEM);
1011 		}
1012 
1013 		clone = tmp;
1014 		insn_delta = rewritten - 1;
1015 
1016 		/* Walk new program and skip insns we just inserted. */
1017 		insn = clone->insnsi + i + insn_delta;
1018 		insn_cnt += insn_delta;
1019 		i        += insn_delta;
1020 	}
1021 
1022 	clone->blinded = 1;
1023 	return clone;
1024 }
1025 #endif /* CONFIG_BPF_JIT */
1026 
1027 /* Base function for offset calculation. Needs to go into .text section,
1028  * therefore keeping it non-static as well; will also be used by JITs
1029  * anyway later on, so do not let the compiler omit it. This also needs
1030  * to go into kallsyms for correlation from e.g. bpftool, so naming
1031  * must not change.
1032  */
1033 noinline u64 __bpf_call_base(u64 r1, u64 r2, u64 r3, u64 r4, u64 r5)
1034 {
1035 	return 0;
1036 }
1037 EXPORT_SYMBOL_GPL(__bpf_call_base);
1038 
1039 /* All UAPI available opcodes. */
1040 #define BPF_INSN_MAP(INSN_2, INSN_3)		\
1041 	/* 32 bit ALU operations. */		\
1042 	/*   Register based. */			\
1043 	INSN_3(ALU, ADD,  X),			\
1044 	INSN_3(ALU, SUB,  X),			\
1045 	INSN_3(ALU, AND,  X),			\
1046 	INSN_3(ALU, OR,   X),			\
1047 	INSN_3(ALU, LSH,  X),			\
1048 	INSN_3(ALU, RSH,  X),			\
1049 	INSN_3(ALU, XOR,  X),			\
1050 	INSN_3(ALU, MUL,  X),			\
1051 	INSN_3(ALU, MOV,  X),			\
1052 	INSN_3(ALU, ARSH, X),			\
1053 	INSN_3(ALU, DIV,  X),			\
1054 	INSN_3(ALU, MOD,  X),			\
1055 	INSN_2(ALU, NEG),			\
1056 	INSN_3(ALU, END, TO_BE),		\
1057 	INSN_3(ALU, END, TO_LE),		\
1058 	/*   Immediate based. */		\
1059 	INSN_3(ALU, ADD,  K),			\
1060 	INSN_3(ALU, SUB,  K),			\
1061 	INSN_3(ALU, AND,  K),			\
1062 	INSN_3(ALU, OR,   K),			\
1063 	INSN_3(ALU, LSH,  K),			\
1064 	INSN_3(ALU, RSH,  K),			\
1065 	INSN_3(ALU, XOR,  K),			\
1066 	INSN_3(ALU, MUL,  K),			\
1067 	INSN_3(ALU, MOV,  K),			\
1068 	INSN_3(ALU, ARSH, K),			\
1069 	INSN_3(ALU, DIV,  K),			\
1070 	INSN_3(ALU, MOD,  K),			\
1071 	/* 64 bit ALU operations. */		\
1072 	/*   Register based. */			\
1073 	INSN_3(ALU64, ADD,  X),			\
1074 	INSN_3(ALU64, SUB,  X),			\
1075 	INSN_3(ALU64, AND,  X),			\
1076 	INSN_3(ALU64, OR,   X),			\
1077 	INSN_3(ALU64, LSH,  X),			\
1078 	INSN_3(ALU64, RSH,  X),			\
1079 	INSN_3(ALU64, XOR,  X),			\
1080 	INSN_3(ALU64, MUL,  X),			\
1081 	INSN_3(ALU64, MOV,  X),			\
1082 	INSN_3(ALU64, ARSH, X),			\
1083 	INSN_3(ALU64, DIV,  X),			\
1084 	INSN_3(ALU64, MOD,  X),			\
1085 	INSN_2(ALU64, NEG),			\
1086 	/*   Immediate based. */		\
1087 	INSN_3(ALU64, ADD,  K),			\
1088 	INSN_3(ALU64, SUB,  K),			\
1089 	INSN_3(ALU64, AND,  K),			\
1090 	INSN_3(ALU64, OR,   K),			\
1091 	INSN_3(ALU64, LSH,  K),			\
1092 	INSN_3(ALU64, RSH,  K),			\
1093 	INSN_3(ALU64, XOR,  K),			\
1094 	INSN_3(ALU64, MUL,  K),			\
1095 	INSN_3(ALU64, MOV,  K),			\
1096 	INSN_3(ALU64, ARSH, K),			\
1097 	INSN_3(ALU64, DIV,  K),			\
1098 	INSN_3(ALU64, MOD,  K),			\
1099 	/* Call instruction. */			\
1100 	INSN_2(JMP, CALL),			\
1101 	/* Exit instruction. */			\
1102 	INSN_2(JMP, EXIT),			\
1103 	/* Jump instructions. */		\
1104 	/*   Register based. */			\
1105 	INSN_3(JMP, JEQ,  X),			\
1106 	INSN_3(JMP, JNE,  X),			\
1107 	INSN_3(JMP, JGT,  X),			\
1108 	INSN_3(JMP, JLT,  X),			\
1109 	INSN_3(JMP, JGE,  X),			\
1110 	INSN_3(JMP, JLE,  X),			\
1111 	INSN_3(JMP, JSGT, X),			\
1112 	INSN_3(JMP, JSLT, X),			\
1113 	INSN_3(JMP, JSGE, X),			\
1114 	INSN_3(JMP, JSLE, X),			\
1115 	INSN_3(JMP, JSET, X),			\
1116 	/*   Immediate based. */		\
1117 	INSN_3(JMP, JEQ,  K),			\
1118 	INSN_3(JMP, JNE,  K),			\
1119 	INSN_3(JMP, JGT,  K),			\
1120 	INSN_3(JMP, JLT,  K),			\
1121 	INSN_3(JMP, JGE,  K),			\
1122 	INSN_3(JMP, JLE,  K),			\
1123 	INSN_3(JMP, JSGT, K),			\
1124 	INSN_3(JMP, JSLT, K),			\
1125 	INSN_3(JMP, JSGE, K),			\
1126 	INSN_3(JMP, JSLE, K),			\
1127 	INSN_3(JMP, JSET, K),			\
1128 	INSN_2(JMP, JA),			\
1129 	/* Store instructions. */		\
1130 	/*   Register based. */			\
1131 	INSN_3(STX, MEM,  B),			\
1132 	INSN_3(STX, MEM,  H),			\
1133 	INSN_3(STX, MEM,  W),			\
1134 	INSN_3(STX, MEM,  DW),			\
1135 	INSN_3(STX, XADD, W),			\
1136 	INSN_3(STX, XADD, DW),			\
1137 	/*   Immediate based. */		\
1138 	INSN_3(ST, MEM, B),			\
1139 	INSN_3(ST, MEM, H),			\
1140 	INSN_3(ST, MEM, W),			\
1141 	INSN_3(ST, MEM, DW),			\
1142 	/* Load instructions. */		\
1143 	/*   Register based. */			\
1144 	INSN_3(LDX, MEM, B),			\
1145 	INSN_3(LDX, MEM, H),			\
1146 	INSN_3(LDX, MEM, W),			\
1147 	INSN_3(LDX, MEM, DW),			\
1148 	/*   Immediate based. */		\
1149 	INSN_3(LD, IMM, DW)
1150 
1151 bool bpf_opcode_in_insntable(u8 code)
1152 {
1153 #define BPF_INSN_2_TBL(x, y)    [BPF_##x | BPF_##y] = true
1154 #define BPF_INSN_3_TBL(x, y, z) [BPF_##x | BPF_##y | BPF_##z] = true
1155 	static const bool public_insntable[256] = {
1156 		[0 ... 255] = false,
1157 		/* Now overwrite non-defaults ... */
1158 		BPF_INSN_MAP(BPF_INSN_2_TBL, BPF_INSN_3_TBL),
1159 		/* UAPI exposed, but rewritten opcodes. cBPF carry-over. */
1160 		[BPF_LD | BPF_ABS | BPF_B] = true,
1161 		[BPF_LD | BPF_ABS | BPF_H] = true,
1162 		[BPF_LD | BPF_ABS | BPF_W] = true,
1163 		[BPF_LD | BPF_IND | BPF_B] = true,
1164 		[BPF_LD | BPF_IND | BPF_H] = true,
1165 		[BPF_LD | BPF_IND | BPF_W] = true,
1166 	};
1167 #undef BPF_INSN_3_TBL
1168 #undef BPF_INSN_2_TBL
1169 	return public_insntable[code];
1170 }
1171 
1172 #ifndef CONFIG_BPF_JIT_ALWAYS_ON
1173 /**
1174  *	__bpf_prog_run - run eBPF program on a given context
1175  *	@ctx: is the data we are operating on
1176  *	@insn: is the array of eBPF instructions
1177  *
1178  * Decode and execute eBPF instructions.
1179  */
1180 static u64 ___bpf_prog_run(u64 *regs, const struct bpf_insn *insn, u64 *stack)
1181 {
1182 	u64 tmp;
1183 #define BPF_INSN_2_LBL(x, y)    [BPF_##x | BPF_##y] = &&x##_##y
1184 #define BPF_INSN_3_LBL(x, y, z) [BPF_##x | BPF_##y | BPF_##z] = &&x##_##y##_##z
1185 	static const void *jumptable[256] = {
1186 		[0 ... 255] = &&default_label,
1187 		/* Now overwrite non-defaults ... */
1188 		BPF_INSN_MAP(BPF_INSN_2_LBL, BPF_INSN_3_LBL),
1189 		/* Non-UAPI available opcodes. */
1190 		[BPF_JMP | BPF_CALL_ARGS] = &&JMP_CALL_ARGS,
1191 		[BPF_JMP | BPF_TAIL_CALL] = &&JMP_TAIL_CALL,
1192 	};
1193 #undef BPF_INSN_3_LBL
1194 #undef BPF_INSN_2_LBL
1195 	u32 tail_call_cnt = 0;
1196 
1197 #define CONT	 ({ insn++; goto select_insn; })
1198 #define CONT_JMP ({ insn++; goto select_insn; })
1199 
1200 select_insn:
1201 	goto *jumptable[insn->code];
1202 
1203 	/* ALU */
1204 #define ALU(OPCODE, OP)			\
1205 	ALU64_##OPCODE##_X:		\
1206 		DST = DST OP SRC;	\
1207 		CONT;			\
1208 	ALU_##OPCODE##_X:		\
1209 		DST = (u32) DST OP (u32) SRC;	\
1210 		CONT;			\
1211 	ALU64_##OPCODE##_K:		\
1212 		DST = DST OP IMM;		\
1213 		CONT;			\
1214 	ALU_##OPCODE##_K:		\
1215 		DST = (u32) DST OP (u32) IMM;	\
1216 		CONT;
1217 
1218 	ALU(ADD,  +)
1219 	ALU(SUB,  -)
1220 	ALU(AND,  &)
1221 	ALU(OR,   |)
1222 	ALU(LSH, <<)
1223 	ALU(RSH, >>)
1224 	ALU(XOR,  ^)
1225 	ALU(MUL,  *)
1226 #undef ALU
1227 	ALU_NEG:
1228 		DST = (u32) -DST;
1229 		CONT;
1230 	ALU64_NEG:
1231 		DST = -DST;
1232 		CONT;
1233 	ALU_MOV_X:
1234 		DST = (u32) SRC;
1235 		CONT;
1236 	ALU_MOV_K:
1237 		DST = (u32) IMM;
1238 		CONT;
1239 	ALU64_MOV_X:
1240 		DST = SRC;
1241 		CONT;
1242 	ALU64_MOV_K:
1243 		DST = IMM;
1244 		CONT;
1245 	LD_IMM_DW:
1246 		DST = (u64) (u32) insn[0].imm | ((u64) (u32) insn[1].imm) << 32;
1247 		insn++;
1248 		CONT;
1249 	ALU_ARSH_X:
1250 		DST = (u64) (u32) ((*(s32 *) &DST) >> SRC);
1251 		CONT;
1252 	ALU_ARSH_K:
1253 		DST = (u64) (u32) ((*(s32 *) &DST) >> IMM);
1254 		CONT;
1255 	ALU64_ARSH_X:
1256 		(*(s64 *) &DST) >>= SRC;
1257 		CONT;
1258 	ALU64_ARSH_K:
1259 		(*(s64 *) &DST) >>= IMM;
1260 		CONT;
1261 	ALU64_MOD_X:
1262 		div64_u64_rem(DST, SRC, &tmp);
1263 		DST = tmp;
1264 		CONT;
1265 	ALU_MOD_X:
1266 		tmp = (u32) DST;
1267 		DST = do_div(tmp, (u32) SRC);
1268 		CONT;
1269 	ALU64_MOD_K:
1270 		div64_u64_rem(DST, IMM, &tmp);
1271 		DST = tmp;
1272 		CONT;
1273 	ALU_MOD_K:
1274 		tmp = (u32) DST;
1275 		DST = do_div(tmp, (u32) IMM);
1276 		CONT;
1277 	ALU64_DIV_X:
1278 		DST = div64_u64(DST, SRC);
1279 		CONT;
1280 	ALU_DIV_X:
1281 		tmp = (u32) DST;
1282 		do_div(tmp, (u32) SRC);
1283 		DST = (u32) tmp;
1284 		CONT;
1285 	ALU64_DIV_K:
1286 		DST = div64_u64(DST, IMM);
1287 		CONT;
1288 	ALU_DIV_K:
1289 		tmp = (u32) DST;
1290 		do_div(tmp, (u32) IMM);
1291 		DST = (u32) tmp;
1292 		CONT;
1293 	ALU_END_TO_BE:
1294 		switch (IMM) {
1295 		case 16:
1296 			DST = (__force u16) cpu_to_be16(DST);
1297 			break;
1298 		case 32:
1299 			DST = (__force u32) cpu_to_be32(DST);
1300 			break;
1301 		case 64:
1302 			DST = (__force u64) cpu_to_be64(DST);
1303 			break;
1304 		}
1305 		CONT;
1306 	ALU_END_TO_LE:
1307 		switch (IMM) {
1308 		case 16:
1309 			DST = (__force u16) cpu_to_le16(DST);
1310 			break;
1311 		case 32:
1312 			DST = (__force u32) cpu_to_le32(DST);
1313 			break;
1314 		case 64:
1315 			DST = (__force u64) cpu_to_le64(DST);
1316 			break;
1317 		}
1318 		CONT;
1319 
1320 	/* CALL */
1321 	JMP_CALL:
1322 		/* Function call scratches BPF_R1-BPF_R5 registers,
1323 		 * preserves BPF_R6-BPF_R9, and stores return value
1324 		 * into BPF_R0.
1325 		 */
1326 		BPF_R0 = (__bpf_call_base + insn->imm)(BPF_R1, BPF_R2, BPF_R3,
1327 						       BPF_R4, BPF_R5);
1328 		CONT;
1329 
1330 	JMP_CALL_ARGS:
1331 		BPF_R0 = (__bpf_call_base_args + insn->imm)(BPF_R1, BPF_R2,
1332 							    BPF_R3, BPF_R4,
1333 							    BPF_R5,
1334 							    insn + insn->off + 1);
1335 		CONT;
1336 
1337 	JMP_TAIL_CALL: {
1338 		struct bpf_map *map = (struct bpf_map *) (unsigned long) BPF_R2;
1339 		struct bpf_array *array = container_of(map, struct bpf_array, map);
1340 		struct bpf_prog *prog;
1341 		u32 index = BPF_R3;
1342 
1343 		if (unlikely(index >= array->map.max_entries))
1344 			goto out;
1345 		if (unlikely(tail_call_cnt > MAX_TAIL_CALL_CNT))
1346 			goto out;
1347 
1348 		tail_call_cnt++;
1349 
1350 		prog = READ_ONCE(array->ptrs[index]);
1351 		if (!prog)
1352 			goto out;
1353 
1354 		/* ARG1 at this point is guaranteed to point to CTX from
1355 		 * the verifier side due to the fact that the tail call is
1356 		 * handeled like a helper, that is, bpf_tail_call_proto,
1357 		 * where arg1_type is ARG_PTR_TO_CTX.
1358 		 */
1359 		insn = prog->insnsi;
1360 		goto select_insn;
1361 out:
1362 		CONT;
1363 	}
1364 	/* JMP */
1365 	JMP_JA:
1366 		insn += insn->off;
1367 		CONT;
1368 	JMP_JEQ_X:
1369 		if (DST == SRC) {
1370 			insn += insn->off;
1371 			CONT_JMP;
1372 		}
1373 		CONT;
1374 	JMP_JEQ_K:
1375 		if (DST == IMM) {
1376 			insn += insn->off;
1377 			CONT_JMP;
1378 		}
1379 		CONT;
1380 	JMP_JNE_X:
1381 		if (DST != SRC) {
1382 			insn += insn->off;
1383 			CONT_JMP;
1384 		}
1385 		CONT;
1386 	JMP_JNE_K:
1387 		if (DST != IMM) {
1388 			insn += insn->off;
1389 			CONT_JMP;
1390 		}
1391 		CONT;
1392 	JMP_JGT_X:
1393 		if (DST > SRC) {
1394 			insn += insn->off;
1395 			CONT_JMP;
1396 		}
1397 		CONT;
1398 	JMP_JGT_K:
1399 		if (DST > IMM) {
1400 			insn += insn->off;
1401 			CONT_JMP;
1402 		}
1403 		CONT;
1404 	JMP_JLT_X:
1405 		if (DST < SRC) {
1406 			insn += insn->off;
1407 			CONT_JMP;
1408 		}
1409 		CONT;
1410 	JMP_JLT_K:
1411 		if (DST < IMM) {
1412 			insn += insn->off;
1413 			CONT_JMP;
1414 		}
1415 		CONT;
1416 	JMP_JGE_X:
1417 		if (DST >= SRC) {
1418 			insn += insn->off;
1419 			CONT_JMP;
1420 		}
1421 		CONT;
1422 	JMP_JGE_K:
1423 		if (DST >= IMM) {
1424 			insn += insn->off;
1425 			CONT_JMP;
1426 		}
1427 		CONT;
1428 	JMP_JLE_X:
1429 		if (DST <= SRC) {
1430 			insn += insn->off;
1431 			CONT_JMP;
1432 		}
1433 		CONT;
1434 	JMP_JLE_K:
1435 		if (DST <= IMM) {
1436 			insn += insn->off;
1437 			CONT_JMP;
1438 		}
1439 		CONT;
1440 	JMP_JSGT_X:
1441 		if (((s64) DST) > ((s64) SRC)) {
1442 			insn += insn->off;
1443 			CONT_JMP;
1444 		}
1445 		CONT;
1446 	JMP_JSGT_K:
1447 		if (((s64) DST) > ((s64) IMM)) {
1448 			insn += insn->off;
1449 			CONT_JMP;
1450 		}
1451 		CONT;
1452 	JMP_JSLT_X:
1453 		if (((s64) DST) < ((s64) SRC)) {
1454 			insn += insn->off;
1455 			CONT_JMP;
1456 		}
1457 		CONT;
1458 	JMP_JSLT_K:
1459 		if (((s64) DST) < ((s64) IMM)) {
1460 			insn += insn->off;
1461 			CONT_JMP;
1462 		}
1463 		CONT;
1464 	JMP_JSGE_X:
1465 		if (((s64) DST) >= ((s64) SRC)) {
1466 			insn += insn->off;
1467 			CONT_JMP;
1468 		}
1469 		CONT;
1470 	JMP_JSGE_K:
1471 		if (((s64) DST) >= ((s64) IMM)) {
1472 			insn += insn->off;
1473 			CONT_JMP;
1474 		}
1475 		CONT;
1476 	JMP_JSLE_X:
1477 		if (((s64) DST) <= ((s64) SRC)) {
1478 			insn += insn->off;
1479 			CONT_JMP;
1480 		}
1481 		CONT;
1482 	JMP_JSLE_K:
1483 		if (((s64) DST) <= ((s64) IMM)) {
1484 			insn += insn->off;
1485 			CONT_JMP;
1486 		}
1487 		CONT;
1488 	JMP_JSET_X:
1489 		if (DST & SRC) {
1490 			insn += insn->off;
1491 			CONT_JMP;
1492 		}
1493 		CONT;
1494 	JMP_JSET_K:
1495 		if (DST & IMM) {
1496 			insn += insn->off;
1497 			CONT_JMP;
1498 		}
1499 		CONT;
1500 	JMP_EXIT:
1501 		return BPF_R0;
1502 
1503 	/* STX and ST and LDX*/
1504 #define LDST(SIZEOP, SIZE)						\
1505 	STX_MEM_##SIZEOP:						\
1506 		*(SIZE *)(unsigned long) (DST + insn->off) = SRC;	\
1507 		CONT;							\
1508 	ST_MEM_##SIZEOP:						\
1509 		*(SIZE *)(unsigned long) (DST + insn->off) = IMM;	\
1510 		CONT;							\
1511 	LDX_MEM_##SIZEOP:						\
1512 		DST = *(SIZE *)(unsigned long) (SRC + insn->off);	\
1513 		CONT;
1514 
1515 	LDST(B,   u8)
1516 	LDST(H,  u16)
1517 	LDST(W,  u32)
1518 	LDST(DW, u64)
1519 #undef LDST
1520 	STX_XADD_W: /* lock xadd *(u32 *)(dst_reg + off16) += src_reg */
1521 		atomic_add((u32) SRC, (atomic_t *)(unsigned long)
1522 			   (DST + insn->off));
1523 		CONT;
1524 	STX_XADD_DW: /* lock xadd *(u64 *)(dst_reg + off16) += src_reg */
1525 		atomic64_add((u64) SRC, (atomic64_t *)(unsigned long)
1526 			     (DST + insn->off));
1527 		CONT;
1528 
1529 	default_label:
1530 		/* If we ever reach this, we have a bug somewhere. Die hard here
1531 		 * instead of just returning 0; we could be somewhere in a subprog,
1532 		 * so execution could continue otherwise which we do /not/ want.
1533 		 *
1534 		 * Note, verifier whitelists all opcodes in bpf_opcode_in_insntable().
1535 		 */
1536 		pr_warn("BPF interpreter: unknown opcode %02x\n", insn->code);
1537 		BUG_ON(1);
1538 		return 0;
1539 }
1540 STACK_FRAME_NON_STANDARD(___bpf_prog_run); /* jump table */
1541 
1542 #define PROG_NAME(stack_size) __bpf_prog_run##stack_size
1543 #define DEFINE_BPF_PROG_RUN(stack_size) \
1544 static unsigned int PROG_NAME(stack_size)(const void *ctx, const struct bpf_insn *insn) \
1545 { \
1546 	u64 stack[stack_size / sizeof(u64)]; \
1547 	u64 regs[MAX_BPF_REG]; \
1548 \
1549 	FP = (u64) (unsigned long) &stack[ARRAY_SIZE(stack)]; \
1550 	ARG1 = (u64) (unsigned long) ctx; \
1551 	return ___bpf_prog_run(regs, insn, stack); \
1552 }
1553 
1554 #define PROG_NAME_ARGS(stack_size) __bpf_prog_run_args##stack_size
1555 #define DEFINE_BPF_PROG_RUN_ARGS(stack_size) \
1556 static u64 PROG_NAME_ARGS(stack_size)(u64 r1, u64 r2, u64 r3, u64 r4, u64 r5, \
1557 				      const struct bpf_insn *insn) \
1558 { \
1559 	u64 stack[stack_size / sizeof(u64)]; \
1560 	u64 regs[MAX_BPF_REG]; \
1561 \
1562 	FP = (u64) (unsigned long) &stack[ARRAY_SIZE(stack)]; \
1563 	BPF_R1 = r1; \
1564 	BPF_R2 = r2; \
1565 	BPF_R3 = r3; \
1566 	BPF_R4 = r4; \
1567 	BPF_R5 = r5; \
1568 	return ___bpf_prog_run(regs, insn, stack); \
1569 }
1570 
1571 #define EVAL1(FN, X) FN(X)
1572 #define EVAL2(FN, X, Y...) FN(X) EVAL1(FN, Y)
1573 #define EVAL3(FN, X, Y...) FN(X) EVAL2(FN, Y)
1574 #define EVAL4(FN, X, Y...) FN(X) EVAL3(FN, Y)
1575 #define EVAL5(FN, X, Y...) FN(X) EVAL4(FN, Y)
1576 #define EVAL6(FN, X, Y...) FN(X) EVAL5(FN, Y)
1577 
1578 EVAL6(DEFINE_BPF_PROG_RUN, 32, 64, 96, 128, 160, 192);
1579 EVAL6(DEFINE_BPF_PROG_RUN, 224, 256, 288, 320, 352, 384);
1580 EVAL4(DEFINE_BPF_PROG_RUN, 416, 448, 480, 512);
1581 
1582 EVAL6(DEFINE_BPF_PROG_RUN_ARGS, 32, 64, 96, 128, 160, 192);
1583 EVAL6(DEFINE_BPF_PROG_RUN_ARGS, 224, 256, 288, 320, 352, 384);
1584 EVAL4(DEFINE_BPF_PROG_RUN_ARGS, 416, 448, 480, 512);
1585 
1586 #define PROG_NAME_LIST(stack_size) PROG_NAME(stack_size),
1587 
1588 static unsigned int (*interpreters[])(const void *ctx,
1589 				      const struct bpf_insn *insn) = {
1590 EVAL6(PROG_NAME_LIST, 32, 64, 96, 128, 160, 192)
1591 EVAL6(PROG_NAME_LIST, 224, 256, 288, 320, 352, 384)
1592 EVAL4(PROG_NAME_LIST, 416, 448, 480, 512)
1593 };
1594 #undef PROG_NAME_LIST
1595 #define PROG_NAME_LIST(stack_size) PROG_NAME_ARGS(stack_size),
1596 static u64 (*interpreters_args[])(u64 r1, u64 r2, u64 r3, u64 r4, u64 r5,
1597 				  const struct bpf_insn *insn) = {
1598 EVAL6(PROG_NAME_LIST, 32, 64, 96, 128, 160, 192)
1599 EVAL6(PROG_NAME_LIST, 224, 256, 288, 320, 352, 384)
1600 EVAL4(PROG_NAME_LIST, 416, 448, 480, 512)
1601 };
1602 #undef PROG_NAME_LIST
1603 
1604 void bpf_patch_call_args(struct bpf_insn *insn, u32 stack_depth)
1605 {
1606 	stack_depth = max_t(u32, stack_depth, 1);
1607 	insn->off = (s16) insn->imm;
1608 	insn->imm = interpreters_args[(round_up(stack_depth, 32) / 32) - 1] -
1609 		__bpf_call_base_args;
1610 	insn->code = BPF_JMP | BPF_CALL_ARGS;
1611 }
1612 
1613 #else
1614 static unsigned int __bpf_prog_ret0_warn(const void *ctx,
1615 					 const struct bpf_insn *insn)
1616 {
1617 	/* If this handler ever gets executed, then BPF_JIT_ALWAYS_ON
1618 	 * is not working properly, so warn about it!
1619 	 */
1620 	WARN_ON_ONCE(1);
1621 	return 0;
1622 }
1623 #endif
1624 
1625 bool bpf_prog_array_compatible(struct bpf_array *array,
1626 			       const struct bpf_prog *fp)
1627 {
1628 	if (fp->kprobe_override)
1629 		return false;
1630 
1631 	if (!array->owner_prog_type) {
1632 		/* There's no owner yet where we could check for
1633 		 * compatibility.
1634 		 */
1635 		array->owner_prog_type = fp->type;
1636 		array->owner_jited = fp->jited;
1637 
1638 		return true;
1639 	}
1640 
1641 	return array->owner_prog_type == fp->type &&
1642 	       array->owner_jited == fp->jited;
1643 }
1644 
1645 static int bpf_check_tail_call(const struct bpf_prog *fp)
1646 {
1647 	struct bpf_prog_aux *aux = fp->aux;
1648 	int i;
1649 
1650 	for (i = 0; i < aux->used_map_cnt; i++) {
1651 		struct bpf_map *map = aux->used_maps[i];
1652 		struct bpf_array *array;
1653 
1654 		if (map->map_type != BPF_MAP_TYPE_PROG_ARRAY)
1655 			continue;
1656 
1657 		array = container_of(map, struct bpf_array, map);
1658 		if (!bpf_prog_array_compatible(array, fp))
1659 			return -EINVAL;
1660 	}
1661 
1662 	return 0;
1663 }
1664 
1665 static void bpf_prog_select_func(struct bpf_prog *fp)
1666 {
1667 #ifndef CONFIG_BPF_JIT_ALWAYS_ON
1668 	u32 stack_depth = max_t(u32, fp->aux->stack_depth, 1);
1669 
1670 	fp->bpf_func = interpreters[(round_up(stack_depth, 32) / 32) - 1];
1671 #else
1672 	fp->bpf_func = __bpf_prog_ret0_warn;
1673 #endif
1674 }
1675 
1676 /**
1677  *	bpf_prog_select_runtime - select exec runtime for BPF program
1678  *	@fp: bpf_prog populated with internal BPF program
1679  *	@err: pointer to error variable
1680  *
1681  * Try to JIT eBPF program, if JIT is not available, use interpreter.
1682  * The BPF program will be executed via BPF_PROG_RUN() macro.
1683  */
1684 struct bpf_prog *bpf_prog_select_runtime(struct bpf_prog *fp, int *err)
1685 {
1686 	/* In case of BPF to BPF calls, verifier did all the prep
1687 	 * work with regards to JITing, etc.
1688 	 */
1689 	if (fp->bpf_func)
1690 		goto finalize;
1691 
1692 	bpf_prog_select_func(fp);
1693 
1694 	/* eBPF JITs can rewrite the program in case constant
1695 	 * blinding is active. However, in case of error during
1696 	 * blinding, bpf_int_jit_compile() must always return a
1697 	 * valid program, which in this case would simply not
1698 	 * be JITed, but falls back to the interpreter.
1699 	 */
1700 	if (!bpf_prog_is_dev_bound(fp->aux)) {
1701 		*err = bpf_prog_alloc_jited_linfo(fp);
1702 		if (*err)
1703 			return fp;
1704 
1705 		fp = bpf_int_jit_compile(fp);
1706 		if (!fp->jited) {
1707 			bpf_prog_free_jited_linfo(fp);
1708 #ifdef CONFIG_BPF_JIT_ALWAYS_ON
1709 			*err = -ENOTSUPP;
1710 			return fp;
1711 #endif
1712 		} else {
1713 			bpf_prog_free_unused_jited_linfo(fp);
1714 		}
1715 	} else {
1716 		*err = bpf_prog_offload_compile(fp);
1717 		if (*err)
1718 			return fp;
1719 	}
1720 
1721 finalize:
1722 	bpf_prog_lock_ro(fp);
1723 
1724 	/* The tail call compatibility check can only be done at
1725 	 * this late stage as we need to determine, if we deal
1726 	 * with JITed or non JITed program concatenations and not
1727 	 * all eBPF JITs might immediately support all features.
1728 	 */
1729 	*err = bpf_check_tail_call(fp);
1730 
1731 	return fp;
1732 }
1733 EXPORT_SYMBOL_GPL(bpf_prog_select_runtime);
1734 
1735 static unsigned int __bpf_prog_ret1(const void *ctx,
1736 				    const struct bpf_insn *insn)
1737 {
1738 	return 1;
1739 }
1740 
1741 static struct bpf_prog_dummy {
1742 	struct bpf_prog prog;
1743 } dummy_bpf_prog = {
1744 	.prog = {
1745 		.bpf_func = __bpf_prog_ret1,
1746 	},
1747 };
1748 
1749 /* to avoid allocating empty bpf_prog_array for cgroups that
1750  * don't have bpf program attached use one global 'empty_prog_array'
1751  * It will not be modified the caller of bpf_prog_array_alloc()
1752  * (since caller requested prog_cnt == 0)
1753  * that pointer should be 'freed' by bpf_prog_array_free()
1754  */
1755 static struct {
1756 	struct bpf_prog_array hdr;
1757 	struct bpf_prog *null_prog;
1758 } empty_prog_array = {
1759 	.null_prog = NULL,
1760 };
1761 
1762 struct bpf_prog_array *bpf_prog_array_alloc(u32 prog_cnt, gfp_t flags)
1763 {
1764 	if (prog_cnt)
1765 		return kzalloc(sizeof(struct bpf_prog_array) +
1766 			       sizeof(struct bpf_prog_array_item) *
1767 			       (prog_cnt + 1),
1768 			       flags);
1769 
1770 	return &empty_prog_array.hdr;
1771 }
1772 
1773 void bpf_prog_array_free(struct bpf_prog_array __rcu *progs)
1774 {
1775 	if (!progs ||
1776 	    progs == (struct bpf_prog_array __rcu *)&empty_prog_array.hdr)
1777 		return;
1778 	kfree_rcu(progs, rcu);
1779 }
1780 
1781 int bpf_prog_array_length(struct bpf_prog_array __rcu *array)
1782 {
1783 	struct bpf_prog_array_item *item;
1784 	u32 cnt = 0;
1785 
1786 	rcu_read_lock();
1787 	item = rcu_dereference(array)->items;
1788 	for (; item->prog; item++)
1789 		if (item->prog != &dummy_bpf_prog.prog)
1790 			cnt++;
1791 	rcu_read_unlock();
1792 	return cnt;
1793 }
1794 
1795 
1796 static bool bpf_prog_array_copy_core(struct bpf_prog_array __rcu *array,
1797 				     u32 *prog_ids,
1798 				     u32 request_cnt)
1799 {
1800 	struct bpf_prog_array_item *item;
1801 	int i = 0;
1802 
1803 	item = rcu_dereference_check(array, 1)->items;
1804 	for (; item->prog; item++) {
1805 		if (item->prog == &dummy_bpf_prog.prog)
1806 			continue;
1807 		prog_ids[i] = item->prog->aux->id;
1808 		if (++i == request_cnt) {
1809 			item++;
1810 			break;
1811 		}
1812 	}
1813 
1814 	return !!(item->prog);
1815 }
1816 
1817 int bpf_prog_array_copy_to_user(struct bpf_prog_array __rcu *array,
1818 				__u32 __user *prog_ids, u32 cnt)
1819 {
1820 	unsigned long err = 0;
1821 	bool nospc;
1822 	u32 *ids;
1823 
1824 	/* users of this function are doing:
1825 	 * cnt = bpf_prog_array_length();
1826 	 * if (cnt > 0)
1827 	 *     bpf_prog_array_copy_to_user(..., cnt);
1828 	 * so below kcalloc doesn't need extra cnt > 0 check, but
1829 	 * bpf_prog_array_length() releases rcu lock and
1830 	 * prog array could have been swapped with empty or larger array,
1831 	 * so always copy 'cnt' prog_ids to the user.
1832 	 * In a rare race the user will see zero prog_ids
1833 	 */
1834 	ids = kcalloc(cnt, sizeof(u32), GFP_USER | __GFP_NOWARN);
1835 	if (!ids)
1836 		return -ENOMEM;
1837 	rcu_read_lock();
1838 	nospc = bpf_prog_array_copy_core(array, ids, cnt);
1839 	rcu_read_unlock();
1840 	err = copy_to_user(prog_ids, ids, cnt * sizeof(u32));
1841 	kfree(ids);
1842 	if (err)
1843 		return -EFAULT;
1844 	if (nospc)
1845 		return -ENOSPC;
1846 	return 0;
1847 }
1848 
1849 void bpf_prog_array_delete_safe(struct bpf_prog_array __rcu *array,
1850 				struct bpf_prog *old_prog)
1851 {
1852 	struct bpf_prog_array_item *item = array->items;
1853 
1854 	for (; item->prog; item++)
1855 		if (item->prog == old_prog) {
1856 			WRITE_ONCE(item->prog, &dummy_bpf_prog.prog);
1857 			break;
1858 		}
1859 }
1860 
1861 int bpf_prog_array_copy(struct bpf_prog_array __rcu *old_array,
1862 			struct bpf_prog *exclude_prog,
1863 			struct bpf_prog *include_prog,
1864 			struct bpf_prog_array **new_array)
1865 {
1866 	int new_prog_cnt, carry_prog_cnt = 0;
1867 	struct bpf_prog_array_item *existing;
1868 	struct bpf_prog_array *array;
1869 	bool found_exclude = false;
1870 	int new_prog_idx = 0;
1871 
1872 	/* Figure out how many existing progs we need to carry over to
1873 	 * the new array.
1874 	 */
1875 	if (old_array) {
1876 		existing = old_array->items;
1877 		for (; existing->prog; existing++) {
1878 			if (existing->prog == exclude_prog) {
1879 				found_exclude = true;
1880 				continue;
1881 			}
1882 			if (existing->prog != &dummy_bpf_prog.prog)
1883 				carry_prog_cnt++;
1884 			if (existing->prog == include_prog)
1885 				return -EEXIST;
1886 		}
1887 	}
1888 
1889 	if (exclude_prog && !found_exclude)
1890 		return -ENOENT;
1891 
1892 	/* How many progs (not NULL) will be in the new array? */
1893 	new_prog_cnt = carry_prog_cnt;
1894 	if (include_prog)
1895 		new_prog_cnt += 1;
1896 
1897 	/* Do we have any prog (not NULL) in the new array? */
1898 	if (!new_prog_cnt) {
1899 		*new_array = NULL;
1900 		return 0;
1901 	}
1902 
1903 	/* +1 as the end of prog_array is marked with NULL */
1904 	array = bpf_prog_array_alloc(new_prog_cnt + 1, GFP_KERNEL);
1905 	if (!array)
1906 		return -ENOMEM;
1907 
1908 	/* Fill in the new prog array */
1909 	if (carry_prog_cnt) {
1910 		existing = old_array->items;
1911 		for (; existing->prog; existing++)
1912 			if (existing->prog != exclude_prog &&
1913 			    existing->prog != &dummy_bpf_prog.prog) {
1914 				array->items[new_prog_idx++].prog =
1915 					existing->prog;
1916 			}
1917 	}
1918 	if (include_prog)
1919 		array->items[new_prog_idx++].prog = include_prog;
1920 	array->items[new_prog_idx].prog = NULL;
1921 	*new_array = array;
1922 	return 0;
1923 }
1924 
1925 int bpf_prog_array_copy_info(struct bpf_prog_array __rcu *array,
1926 			     u32 *prog_ids, u32 request_cnt,
1927 			     u32 *prog_cnt)
1928 {
1929 	u32 cnt = 0;
1930 
1931 	if (array)
1932 		cnt = bpf_prog_array_length(array);
1933 
1934 	*prog_cnt = cnt;
1935 
1936 	/* return early if user requested only program count or nothing to copy */
1937 	if (!request_cnt || !cnt)
1938 		return 0;
1939 
1940 	/* this function is called under trace/bpf_trace.c: bpf_event_mutex */
1941 	return bpf_prog_array_copy_core(array, prog_ids, request_cnt) ? -ENOSPC
1942 								     : 0;
1943 }
1944 
1945 static void bpf_prog_free_deferred(struct work_struct *work)
1946 {
1947 	struct bpf_prog_aux *aux;
1948 	int i;
1949 
1950 	aux = container_of(work, struct bpf_prog_aux, work);
1951 	if (bpf_prog_is_dev_bound(aux))
1952 		bpf_prog_offload_destroy(aux->prog);
1953 #ifdef CONFIG_PERF_EVENTS
1954 	if (aux->prog->has_callchain_buf)
1955 		put_callchain_buffers();
1956 #endif
1957 	for (i = 0; i < aux->func_cnt; i++)
1958 		bpf_jit_free(aux->func[i]);
1959 	if (aux->func_cnt) {
1960 		kfree(aux->func);
1961 		bpf_prog_unlock_free(aux->prog);
1962 	} else {
1963 		bpf_jit_free(aux->prog);
1964 	}
1965 }
1966 
1967 /* Free internal BPF program */
1968 void bpf_prog_free(struct bpf_prog *fp)
1969 {
1970 	struct bpf_prog_aux *aux = fp->aux;
1971 
1972 	INIT_WORK(&aux->work, bpf_prog_free_deferred);
1973 	schedule_work(&aux->work);
1974 }
1975 EXPORT_SYMBOL_GPL(bpf_prog_free);
1976 
1977 /* RNG for unpriviledged user space with separated state from prandom_u32(). */
1978 static DEFINE_PER_CPU(struct rnd_state, bpf_user_rnd_state);
1979 
1980 void bpf_user_rnd_init_once(void)
1981 {
1982 	prandom_init_once(&bpf_user_rnd_state);
1983 }
1984 
1985 BPF_CALL_0(bpf_user_rnd_u32)
1986 {
1987 	/* Should someone ever have the rather unwise idea to use some
1988 	 * of the registers passed into this function, then note that
1989 	 * this function is called from native eBPF and classic-to-eBPF
1990 	 * transformations. Register assignments from both sides are
1991 	 * different, f.e. classic always sets fn(ctx, A, X) here.
1992 	 */
1993 	struct rnd_state *state;
1994 	u32 res;
1995 
1996 	state = &get_cpu_var(bpf_user_rnd_state);
1997 	res = prandom_u32_state(state);
1998 	put_cpu_var(bpf_user_rnd_state);
1999 
2000 	return res;
2001 }
2002 
2003 /* Weak definitions of helper functions in case we don't have bpf syscall. */
2004 const struct bpf_func_proto bpf_map_lookup_elem_proto __weak;
2005 const struct bpf_func_proto bpf_map_update_elem_proto __weak;
2006 const struct bpf_func_proto bpf_map_delete_elem_proto __weak;
2007 const struct bpf_func_proto bpf_map_push_elem_proto __weak;
2008 const struct bpf_func_proto bpf_map_pop_elem_proto __weak;
2009 const struct bpf_func_proto bpf_map_peek_elem_proto __weak;
2010 
2011 const struct bpf_func_proto bpf_get_prandom_u32_proto __weak;
2012 const struct bpf_func_proto bpf_get_smp_processor_id_proto __weak;
2013 const struct bpf_func_proto bpf_get_numa_node_id_proto __weak;
2014 const struct bpf_func_proto bpf_ktime_get_ns_proto __weak;
2015 
2016 const struct bpf_func_proto bpf_get_current_pid_tgid_proto __weak;
2017 const struct bpf_func_proto bpf_get_current_uid_gid_proto __weak;
2018 const struct bpf_func_proto bpf_get_current_comm_proto __weak;
2019 const struct bpf_func_proto bpf_get_current_cgroup_id_proto __weak;
2020 const struct bpf_func_proto bpf_get_local_storage_proto __weak;
2021 
2022 const struct bpf_func_proto * __weak bpf_get_trace_printk_proto(void)
2023 {
2024 	return NULL;
2025 }
2026 
2027 u64 __weak
2028 bpf_event_output(struct bpf_map *map, u64 flags, void *meta, u64 meta_size,
2029 		 void *ctx, u64 ctx_size, bpf_ctx_copy_t ctx_copy)
2030 {
2031 	return -ENOTSUPP;
2032 }
2033 EXPORT_SYMBOL_GPL(bpf_event_output);
2034 
2035 /* Always built-in helper functions. */
2036 const struct bpf_func_proto bpf_tail_call_proto = {
2037 	.func		= NULL,
2038 	.gpl_only	= false,
2039 	.ret_type	= RET_VOID,
2040 	.arg1_type	= ARG_PTR_TO_CTX,
2041 	.arg2_type	= ARG_CONST_MAP_PTR,
2042 	.arg3_type	= ARG_ANYTHING,
2043 };
2044 
2045 /* Stub for JITs that only support cBPF. eBPF programs are interpreted.
2046  * It is encouraged to implement bpf_int_jit_compile() instead, so that
2047  * eBPF and implicitly also cBPF can get JITed!
2048  */
2049 struct bpf_prog * __weak bpf_int_jit_compile(struct bpf_prog *prog)
2050 {
2051 	return prog;
2052 }
2053 
2054 /* Stub for JITs that support eBPF. All cBPF code gets transformed into
2055  * eBPF by the kernel and is later compiled by bpf_int_jit_compile().
2056  */
2057 void __weak bpf_jit_compile(struct bpf_prog *prog)
2058 {
2059 }
2060 
2061 bool __weak bpf_helper_changes_pkt_data(void *func)
2062 {
2063 	return false;
2064 }
2065 
2066 /* To execute LD_ABS/LD_IND instructions __bpf_prog_run() may call
2067  * skb_copy_bits(), so provide a weak definition of it for NET-less config.
2068  */
2069 int __weak skb_copy_bits(const struct sk_buff *skb, int offset, void *to,
2070 			 int len)
2071 {
2072 	return -EFAULT;
2073 }
2074 
2075 /* All definitions of tracepoints related to BPF. */
2076 #define CREATE_TRACE_POINTS
2077 #include <linux/bpf_trace.h>
2078 
2079 EXPORT_TRACEPOINT_SYMBOL_GPL(xdp_exception);
2080