xref: /linux/kernel/bpf/core.c (revision d3e945223e0158c85dbde23de4f89493a2a817f6)
1 // SPDX-License-Identifier: GPL-2.0-or-later
2 /*
3  * Linux Socket Filter - Kernel level socket filtering
4  *
5  * Based on the design of the Berkeley Packet Filter. The new
6  * internal format has been designed by PLUMgrid:
7  *
8  *	Copyright (c) 2011 - 2014 PLUMgrid, http://plumgrid.com
9  *
10  * Authors:
11  *
12  *	Jay Schulist <jschlst@samba.org>
13  *	Alexei Starovoitov <ast@plumgrid.com>
14  *	Daniel Borkmann <dborkman@redhat.com>
15  *
16  * Andi Kleen - Fix a few bad bugs and races.
17  * Kris Katterjohn - Added many additional checks in bpf_check_classic()
18  */
19 
20 #include <uapi/linux/btf.h>
21 #include <linux/filter.h>
22 #include <linux/skbuff.h>
23 #include <linux/vmalloc.h>
24 #include <linux/prandom.h>
25 #include <linux/bpf.h>
26 #include <linux/btf.h>
27 #include <linux/hex.h>
28 #include <linux/objtool.h>
29 #include <linux/overflow.h>
30 #include <linux/rbtree_latch.h>
31 #include <linux/kallsyms.h>
32 #include <linux/rcupdate.h>
33 #include <linux/perf_event.h>
34 #include <linux/extable.h>
35 #include <linux/log2.h>
36 #include <linux/bpf_verifier.h>
37 #include <linux/nodemask.h>
38 #include <linux/nospec.h>
39 #include <linux/bpf_mem_alloc.h>
40 #include <linux/memcontrol.h>
41 #include <linux/execmem.h>
42 #include <crypto/sha2.h>
43 
44 #include <asm/barrier.h>
45 #include <linux/unaligned.h>
46 
47 /* Registers */
48 #define BPF_R0	regs[BPF_REG_0]
49 #define BPF_R1	regs[BPF_REG_1]
50 #define BPF_R2	regs[BPF_REG_2]
51 #define BPF_R3	regs[BPF_REG_3]
52 #define BPF_R4	regs[BPF_REG_4]
53 #define BPF_R5	regs[BPF_REG_5]
54 #define BPF_R6	regs[BPF_REG_6]
55 #define BPF_R7	regs[BPF_REG_7]
56 #define BPF_R8	regs[BPF_REG_8]
57 #define BPF_R9	regs[BPF_REG_9]
58 #define BPF_R10	regs[BPF_REG_10]
59 
60 /* Named registers */
61 #define DST	regs[insn->dst_reg]
62 #define SRC	regs[insn->src_reg]
63 #define FP	regs[BPF_REG_FP]
64 #define AX	regs[BPF_REG_AX]
65 #define ARG1	regs[BPF_REG_ARG1]
66 #define CTX	regs[BPF_REG_CTX]
67 #define OFF	insn->off
68 #define IMM	insn->imm
69 
70 struct bpf_mem_alloc bpf_global_ma;
71 bool bpf_global_ma_set;
72 
73 /* No hurry in this branch
74  *
75  * Exported for the bpf jit load helper.
76  */
77 void *bpf_internal_load_pointer_neg_helper(const struct sk_buff *skb, int k, unsigned int size)
78 {
79 	u8 *ptr = NULL;
80 
81 	if (k >= SKF_NET_OFF) {
82 		ptr = skb_network_header(skb) + k - SKF_NET_OFF;
83 	} else if (k >= SKF_LL_OFF) {
84 		if (unlikely(!skb_mac_header_was_set(skb)))
85 			return NULL;
86 		ptr = skb_mac_header(skb) + k - SKF_LL_OFF;
87 	}
88 	if (ptr >= skb->head && ptr + size <= skb_tail_pointer(skb))
89 		return ptr;
90 
91 	return NULL;
92 }
93 
94 /* tell bpf programs that include vmlinux.h kernel's PAGE_SIZE */
95 enum page_size_enum {
96 	__PAGE_SIZE = PAGE_SIZE
97 };
98 
99 struct bpf_prog *bpf_prog_alloc_no_stats(unsigned int size, gfp_t gfp_extra_flags)
100 {
101 	gfp_t gfp_flags = bpf_memcg_flags(GFP_KERNEL | __GFP_ZERO | gfp_extra_flags);
102 	struct bpf_prog_aux *aux;
103 	struct bpf_prog *fp;
104 
105 	size = round_up(size, __PAGE_SIZE);
106 	fp = __vmalloc(size, gfp_flags);
107 	if (fp == NULL)
108 		return NULL;
109 
110 	aux = kzalloc_obj(*aux, bpf_memcg_flags(GFP_KERNEL | gfp_extra_flags));
111 	if (aux == NULL) {
112 		vfree(fp);
113 		return NULL;
114 	}
115 	fp->active = __alloc_percpu_gfp(sizeof(u8[BPF_NR_CONTEXTS]), 4,
116 					bpf_memcg_flags(GFP_KERNEL | gfp_extra_flags));
117 	if (!fp->active) {
118 		vfree(fp);
119 		kfree(aux);
120 		return NULL;
121 	}
122 
123 	fp->pages = size / PAGE_SIZE;
124 	fp->aux = aux;
125 	fp->aux->main_prog_aux = aux;
126 	fp->aux->prog = fp;
127 	fp->jit_requested = ebpf_jit_enabled();
128 	fp->blinding_requested = bpf_jit_blinding_enabled(fp);
129 #ifdef CONFIG_CGROUP_BPF
130 	aux->cgroup_atype = CGROUP_BPF_ATTACH_TYPE_INVALID;
131 #endif
132 
133 	INIT_LIST_HEAD_RCU(&fp->aux->ksym.lnode);
134 #ifdef CONFIG_FINEIBT
135 	INIT_LIST_HEAD_RCU(&fp->aux->ksym_prefix.lnode);
136 #endif
137 	mutex_init(&fp->aux->used_maps_mutex);
138 	mutex_init(&fp->aux->ext_mutex);
139 	mutex_init(&fp->aux->dst_mutex);
140 	mutex_init(&fp->aux->st_ops_assoc_mutex);
141 
142 #ifdef CONFIG_BPF_SYSCALL
143 	bpf_prog_stream_init(fp);
144 #endif
145 
146 	return fp;
147 }
148 
149 struct bpf_prog *bpf_prog_alloc(unsigned int size, gfp_t gfp_extra_flags)
150 {
151 	gfp_t gfp_flags = bpf_memcg_flags(GFP_KERNEL | __GFP_ZERO | gfp_extra_flags);
152 	struct bpf_prog *prog;
153 	int cpu;
154 
155 	prog = bpf_prog_alloc_no_stats(size, gfp_extra_flags);
156 	if (!prog)
157 		return NULL;
158 
159 	prog->stats = alloc_percpu_gfp(struct bpf_prog_stats, gfp_flags);
160 	if (!prog->stats) {
161 		free_percpu(prog->active);
162 		kfree(prog->aux);
163 		vfree(prog);
164 		return NULL;
165 	}
166 
167 	for_each_possible_cpu(cpu) {
168 		struct bpf_prog_stats *pstats;
169 
170 		pstats = per_cpu_ptr(prog->stats, cpu);
171 		u64_stats_init(&pstats->syncp);
172 	}
173 	return prog;
174 }
175 EXPORT_SYMBOL_GPL(bpf_prog_alloc);
176 
177 int bpf_prog_alloc_jited_linfo(struct bpf_prog *prog)
178 {
179 	if (!prog->aux->nr_linfo || !prog->jit_requested)
180 		return 0;
181 
182 	prog->aux->jited_linfo = kvzalloc_objs(*prog->aux->jited_linfo,
183 					       prog->aux->nr_linfo,
184 					       bpf_memcg_flags(GFP_KERNEL | __GFP_NOWARN));
185 	if (!prog->aux->jited_linfo)
186 		return -ENOMEM;
187 
188 	return 0;
189 }
190 
191 void bpf_prog_jit_attempt_done(struct bpf_prog *prog)
192 {
193 	if (prog->aux->jited_linfo &&
194 	    (!prog->jited || !prog->aux->jited_linfo[0])) {
195 		kvfree(prog->aux->jited_linfo);
196 		prog->aux->jited_linfo = NULL;
197 	}
198 
199 	kfree(prog->aux->kfunc_tab);
200 	prog->aux->kfunc_tab = NULL;
201 }
202 
203 /* The jit engine is responsible to provide an array
204  * for insn_off to the jited_off mapping (insn_to_jit_off).
205  *
206  * The idx to this array is the insn_off.  Hence, the insn_off
207  * here is relative to the prog itself instead of the main prog.
208  * This array has one entry for each xlated bpf insn.
209  *
210  * jited_off is the byte off to the end of the jited insn.
211  *
212  * Hence, with
213  * insn_start:
214  *      The first bpf insn off of the prog.  The insn off
215  *      here is relative to the main prog.
216  *      e.g. if prog is a subprog, insn_start > 0
217  * linfo_idx:
218  *      The prog's idx to prog->aux->linfo and jited_linfo
219  *
220  * jited_linfo[linfo_idx] = prog->bpf_func
221  *
222  * For i > linfo_idx,
223  *
224  * jited_linfo[i] = prog->bpf_func +
225  *	insn_to_jit_off[linfo[i].insn_off - insn_start - 1]
226  */
227 void bpf_prog_fill_jited_linfo(struct bpf_prog *prog,
228 			       const u32 *insn_to_jit_off)
229 {
230 	u32 linfo_idx, insn_start, insn_end, nr_linfo, i;
231 	const struct bpf_line_info *linfo;
232 	void **jited_linfo;
233 
234 	if (!prog->aux->jited_linfo || prog->aux->func_idx > prog->aux->func_cnt)
235 		/* Userspace did not provide linfo */
236 		return;
237 
238 	linfo_idx = prog->aux->linfo_idx;
239 	linfo = &prog->aux->linfo[linfo_idx];
240 	insn_start = linfo[0].insn_off;
241 	insn_end = insn_start + prog->len;
242 
243 	jited_linfo = &prog->aux->jited_linfo[linfo_idx];
244 	jited_linfo[0] = prog->bpf_func;
245 
246 	nr_linfo = prog->aux->nr_linfo - linfo_idx;
247 
248 	for (i = 1; i < nr_linfo && linfo[i].insn_off < insn_end; i++)
249 		/* The verifier ensures that linfo[i].insn_off is
250 		 * strictly increasing
251 		 */
252 		jited_linfo[i] = prog->bpf_func +
253 			insn_to_jit_off[linfo[i].insn_off - insn_start - 1];
254 }
255 
256 struct bpf_prog *bpf_prog_realloc(struct bpf_prog *fp_old, unsigned int size,
257 				  gfp_t gfp_extra_flags)
258 {
259 	gfp_t gfp_flags = bpf_memcg_flags(GFP_KERNEL | __GFP_ZERO | gfp_extra_flags);
260 	struct bpf_prog *fp;
261 	u32 pages;
262 
263 	size = round_up(size, PAGE_SIZE);
264 	pages = size / PAGE_SIZE;
265 	if (pages <= fp_old->pages)
266 		return fp_old;
267 
268 	fp = __vmalloc(size, gfp_flags);
269 	if (fp) {
270 		memcpy(fp, fp_old, fp_old->pages * PAGE_SIZE);
271 		fp->pages = pages;
272 		fp->aux->prog = fp;
273 
274 		/* We keep fp->aux from fp_old around in the new
275 		 * reallocated structure.
276 		 */
277 		fp_old->aux = NULL;
278 		fp_old->stats = NULL;
279 		fp_old->active = NULL;
280 		__bpf_prog_free(fp_old);
281 	}
282 
283 	return fp;
284 }
285 
286 void __bpf_prog_free(struct bpf_prog *fp)
287 {
288 	if (fp->aux) {
289 		mutex_destroy(&fp->aux->used_maps_mutex);
290 		mutex_destroy(&fp->aux->dst_mutex);
291 		mutex_destroy(&fp->aux->st_ops_assoc_mutex);
292 		kfree(fp->aux->poke_tab);
293 		kfree(fp->aux);
294 	}
295 	free_percpu(fp->stats);
296 	free_percpu(fp->active);
297 	vfree(fp);
298 }
299 
300 int bpf_prog_calc_tag(struct bpf_prog *fp)
301 {
302 	size_t size = bpf_prog_insn_size(fp);
303 	struct bpf_insn *dst;
304 	bool was_ld_map;
305 	u32 i;
306 
307 	dst = vmalloc(size);
308 	if (!dst)
309 		return -ENOMEM;
310 
311 	/* We need to take out the map fd for the digest calculation
312 	 * since they are unstable from user space side.
313 	 */
314 	for (i = 0, was_ld_map = false; i < fp->len; i++) {
315 		dst[i] = fp->insnsi[i];
316 		if (!was_ld_map &&
317 		    dst[i].code == (BPF_LD | BPF_IMM | BPF_DW) &&
318 		    (dst[i].src_reg == BPF_PSEUDO_MAP_FD ||
319 		     dst[i].src_reg == BPF_PSEUDO_MAP_VALUE)) {
320 			was_ld_map = true;
321 			dst[i].imm = 0;
322 		} else if (was_ld_map &&
323 			   dst[i].code == 0 &&
324 			   dst[i].dst_reg == 0 &&
325 			   dst[i].src_reg == 0 &&
326 			   dst[i].off == 0) {
327 			was_ld_map = false;
328 			dst[i].imm = 0;
329 		} else {
330 			was_ld_map = false;
331 		}
332 	}
333 	sha256((u8 *)dst, size, fp->digest);
334 	vfree(dst);
335 	return 0;
336 }
337 
338 static int bpf_adj_delta_to_imm(struct bpf_insn *insn, u32 pos, s32 end_old,
339 				s32 end_new, s32 curr, const bool probe_pass)
340 {
341 	const s64 imm_min = S32_MIN, imm_max = S32_MAX;
342 	s32 delta = end_new - end_old;
343 	s64 imm = insn->imm;
344 
345 	if (curr < pos && curr + imm + 1 >= end_old)
346 		imm += delta;
347 	else if (curr >= end_new && curr + imm + 1 < end_new)
348 		imm -= delta;
349 	if (imm < imm_min || imm > imm_max)
350 		return -ERANGE;
351 	if (!probe_pass)
352 		insn->imm = imm;
353 	return 0;
354 }
355 
356 static int bpf_adj_delta_to_off(struct bpf_insn *insn, u32 pos, s32 end_old,
357 				s32 end_new, s32 curr, const bool probe_pass)
358 {
359 	s64 off_min, off_max, off;
360 	s32 delta = end_new - end_old;
361 
362 	if (insn->code == (BPF_JMP32 | BPF_JA)) {
363 		off = insn->imm;
364 		off_min = S32_MIN;
365 		off_max = S32_MAX;
366 	} else {
367 		off = insn->off;
368 		off_min = S16_MIN;
369 		off_max = S16_MAX;
370 	}
371 
372 	if (curr < pos && curr + off + 1 >= end_old)
373 		off += delta;
374 	else if (curr >= end_new && curr + off + 1 < end_new)
375 		off -= delta;
376 	if (off < off_min || off > off_max)
377 		return -ERANGE;
378 	if (!probe_pass) {
379 		if (insn->code == (BPF_JMP32 | BPF_JA))
380 			insn->imm = off;
381 		else
382 			insn->off = off;
383 	}
384 	return 0;
385 }
386 
387 static int bpf_adj_branches(struct bpf_prog *prog, u32 pos, s32 end_old,
388 			    s32 end_new, const bool probe_pass)
389 {
390 	u32 i, insn_cnt = prog->len + (probe_pass ? end_new - end_old : 0);
391 	struct bpf_insn *insn = prog->insnsi;
392 	int ret = 0;
393 
394 	for (i = 0; i < insn_cnt; i++, insn++) {
395 		u8 code;
396 
397 		/* In the probing pass we still operate on the original,
398 		 * unpatched image in order to check overflows before we
399 		 * do any other adjustments. Therefore skip the patchlet.
400 		 */
401 		if (probe_pass && i == pos) {
402 			i = end_new;
403 			insn = prog->insnsi + end_old;
404 		}
405 		if (bpf_pseudo_func(insn)) {
406 			ret = bpf_adj_delta_to_imm(insn, pos, end_old,
407 						   end_new, i, probe_pass);
408 			if (ret)
409 				return ret;
410 			continue;
411 		}
412 		code = insn->code;
413 		if ((BPF_CLASS(code) != BPF_JMP &&
414 		     BPF_CLASS(code) != BPF_JMP32) ||
415 		    BPF_OP(code) == BPF_EXIT)
416 			continue;
417 		/* Adjust offset of jmps if we cross patch boundaries. */
418 		if (BPF_OP(code) == BPF_CALL) {
419 			if (insn->src_reg != BPF_PSEUDO_CALL)
420 				continue;
421 			ret = bpf_adj_delta_to_imm(insn, pos, end_old,
422 						   end_new, i, probe_pass);
423 		} else {
424 			ret = bpf_adj_delta_to_off(insn, pos, end_old,
425 						   end_new, i, probe_pass);
426 		}
427 		if (ret)
428 			break;
429 	}
430 
431 	return ret;
432 }
433 
434 static void bpf_adj_linfo(struct bpf_prog *prog, u32 off, u32 delta)
435 {
436 	struct bpf_line_info *linfo;
437 	u32 i, nr_linfo;
438 
439 	nr_linfo = prog->aux->nr_linfo;
440 	if (!nr_linfo || !delta)
441 		return;
442 
443 	linfo = prog->aux->linfo;
444 
445 	for (i = 0; i < nr_linfo; i++)
446 		if (off < linfo[i].insn_off)
447 			break;
448 
449 	/* Push all off < linfo[i].insn_off by delta */
450 	for (; i < nr_linfo; i++)
451 		linfo[i].insn_off += delta;
452 }
453 
454 struct bpf_prog *bpf_patch_insn_single(struct bpf_prog *prog, u32 off,
455 				       const struct bpf_insn *patch, u32 len)
456 {
457 	u32 insn_adj_cnt, insn_rest, insn_delta = len - 1;
458 	const u32 cnt_max = S16_MAX;
459 	struct bpf_prog *prog_adj;
460 	int err;
461 
462 	/* Since our patchlet doesn't expand the image, we're done. */
463 	if (insn_delta == 0) {
464 		memcpy(prog->insnsi + off, patch, sizeof(*patch));
465 		return prog;
466 	}
467 
468 	insn_adj_cnt = prog->len + insn_delta;
469 
470 	/* Reject anything that would potentially let the insn->off
471 	 * target overflow when we have excessive program expansions.
472 	 * We need to probe here before we do any reallocation where
473 	 * we afterwards may not fail anymore.
474 	 */
475 	if (insn_adj_cnt > cnt_max &&
476 	    (err = bpf_adj_branches(prog, off, off + 1, off + len, true)))
477 		return ERR_PTR(err);
478 
479 	/* Several new instructions need to be inserted. Make room
480 	 * for them. Likely, there's no need for a new allocation as
481 	 * last page could have large enough tailroom.
482 	 */
483 	prog_adj = bpf_prog_realloc(prog, bpf_prog_size(insn_adj_cnt),
484 				    GFP_USER);
485 	if (!prog_adj)
486 		return ERR_PTR(-ENOMEM);
487 
488 	prog_adj->len = insn_adj_cnt;
489 
490 	/* Patching happens in 3 steps:
491 	 *
492 	 * 1) Move over tail of insnsi from next instruction onwards,
493 	 *    so we can patch the single target insn with one or more
494 	 *    new ones (patching is always from 1 to n insns, n > 0).
495 	 * 2) Inject new instructions at the target location.
496 	 * 3) Adjust branch offsets if necessary.
497 	 */
498 	insn_rest = insn_adj_cnt - off - len;
499 
500 	memmove(prog_adj->insnsi + off + len, prog_adj->insnsi + off + 1,
501 		sizeof(*patch) * insn_rest);
502 	memcpy(prog_adj->insnsi + off, patch, sizeof(*patch) * len);
503 
504 	/* We are guaranteed to not fail at this point, otherwise
505 	 * the ship has sailed to reverse to the original state. An
506 	 * overflow cannot happen at this point.
507 	 */
508 	BUG_ON(bpf_adj_branches(prog_adj, off, off + 1, off + len, false));
509 
510 	bpf_adj_linfo(prog_adj, off, insn_delta);
511 
512 	return prog_adj;
513 }
514 
515 int bpf_remove_insns(struct bpf_prog *prog, u32 off, u32 cnt)
516 {
517 	int err;
518 
519 	/* Branch offsets can't overflow when program is shrinking, no need
520 	 * to call bpf_adj_branches(..., true) here
521 	 */
522 	memmove(prog->insnsi + off, prog->insnsi + off + cnt,
523 		sizeof(struct bpf_insn) * (prog->len - off - cnt));
524 	prog->len -= cnt;
525 
526 	err = bpf_adj_branches(prog, off, off + cnt, off, false);
527 	WARN_ON_ONCE(err);
528 	return err;
529 }
530 
531 static void bpf_prog_kallsyms_del_subprogs(struct bpf_prog *fp)
532 {
533 	int i;
534 
535 	for (i = 0; i < fp->aux->real_func_cnt; i++)
536 		bpf_prog_kallsyms_del(fp->aux->func[i]);
537 }
538 
539 void bpf_prog_kallsyms_del_all(struct bpf_prog *fp)
540 {
541 	bpf_prog_kallsyms_del_subprogs(fp);
542 	bpf_prog_kallsyms_del(fp);
543 }
544 
545 #ifdef CONFIG_BPF_JIT
546 /* All BPF JIT sysctl knobs here. */
547 int bpf_jit_enable   __read_mostly = IS_BUILTIN(CONFIG_BPF_JIT_DEFAULT_ON);
548 int bpf_jit_kallsyms __read_mostly = IS_BUILTIN(CONFIG_BPF_JIT_DEFAULT_ON);
549 int bpf_jit_harden   __read_mostly;
550 long bpf_jit_limit   __read_mostly;
551 long bpf_jit_limit_max __read_mostly;
552 
553 static void
554 bpf_prog_ksym_set_addr(struct bpf_prog *prog)
555 {
556 	WARN_ON_ONCE(!bpf_prog_ebpf_jited(prog));
557 
558 	prog->aux->ksym.start = (unsigned long) prog->bpf_func;
559 	prog->aux->ksym.end   = prog->aux->ksym.start + prog->jited_len;
560 }
561 
562 static void
563 bpf_prog_ksym_set_name(struct bpf_prog *prog)
564 {
565 	char *sym = prog->aux->ksym.name;
566 	const char *end = sym + KSYM_NAME_LEN;
567 	const struct btf_type *type;
568 	const char *func_name;
569 
570 	BUILD_BUG_ON(sizeof("bpf_prog_") +
571 		     sizeof(prog->tag) * 2 +
572 		     /* name has been null terminated.
573 		      * We should need +1 for the '_' preceding
574 		      * the name.  However, the null character
575 		      * is double counted between the name and the
576 		      * sizeof("bpf_prog_") above, so we omit
577 		      * the +1 here.
578 		      */
579 		     sizeof(prog->aux->name) > KSYM_NAME_LEN);
580 
581 	sym += snprintf(sym, KSYM_NAME_LEN, "bpf_prog_");
582 	sym  = bin2hex(sym, prog->tag, sizeof(prog->tag));
583 
584 	/* prog->aux->name will be ignored if full btf name is available */
585 	if (prog->aux->func_info_cnt && prog->aux->func_idx < prog->aux->func_info_cnt) {
586 		type = btf_type_by_id(prog->aux->btf,
587 				      prog->aux->func_info[prog->aux->func_idx].type_id);
588 		func_name = btf_name_by_offset(prog->aux->btf, type->name_off);
589 		snprintf(sym, (size_t)(end - sym), "_%s", func_name);
590 		return;
591 	}
592 
593 	if (prog->aux->name[0])
594 		snprintf(sym, (size_t)(end - sym), "_%s", prog->aux->name);
595 	else
596 		*sym = 0;
597 }
598 
599 static unsigned long bpf_get_ksym_start(struct latch_tree_node *n)
600 {
601 	return container_of(n, struct bpf_ksym, tnode)->start;
602 }
603 
604 static __always_inline bool bpf_tree_less(struct latch_tree_node *a,
605 					  struct latch_tree_node *b)
606 {
607 	return bpf_get_ksym_start(a) < bpf_get_ksym_start(b);
608 }
609 
610 static __always_inline int bpf_tree_comp(void *key, struct latch_tree_node *n)
611 {
612 	unsigned long val = (unsigned long)key;
613 	const struct bpf_ksym *ksym;
614 
615 	ksym = container_of(n, struct bpf_ksym, tnode);
616 
617 	if (val < ksym->start)
618 		return -1;
619 	/* Ensure that we detect return addresses as part of the program, when
620 	 * the final instruction is a call for a program part of the stack
621 	 * trace. Therefore, do val > ksym->end instead of val >= ksym->end.
622 	 */
623 	if (val > ksym->end)
624 		return  1;
625 
626 	return 0;
627 }
628 
629 static const struct latch_tree_ops bpf_tree_ops = {
630 	.less	= bpf_tree_less,
631 	.comp	= bpf_tree_comp,
632 };
633 
634 static DEFINE_SPINLOCK(bpf_lock);
635 static LIST_HEAD(bpf_kallsyms);
636 static struct latch_tree_root bpf_tree __cacheline_aligned;
637 
638 void bpf_ksym_add(struct bpf_ksym *ksym)
639 {
640 	spin_lock_bh(&bpf_lock);
641 	WARN_ON_ONCE(!list_empty(&ksym->lnode));
642 	list_add_tail_rcu(&ksym->lnode, &bpf_kallsyms);
643 	latch_tree_insert(&ksym->tnode, &bpf_tree, &bpf_tree_ops);
644 	spin_unlock_bh(&bpf_lock);
645 }
646 
647 static void __bpf_ksym_del(struct bpf_ksym *ksym)
648 {
649 	if (list_empty(&ksym->lnode))
650 		return;
651 
652 	latch_tree_erase(&ksym->tnode, &bpf_tree, &bpf_tree_ops);
653 	list_del_rcu(&ksym->lnode);
654 }
655 
656 void bpf_ksym_del(struct bpf_ksym *ksym)
657 {
658 	spin_lock_bh(&bpf_lock);
659 	__bpf_ksym_del(ksym);
660 	spin_unlock_bh(&bpf_lock);
661 }
662 
663 static bool bpf_prog_kallsyms_candidate(const struct bpf_prog *fp)
664 {
665 	return fp->jited && !bpf_prog_was_classic(fp);
666 }
667 
668 void bpf_prog_kallsyms_add(struct bpf_prog *fp)
669 {
670 	if (!bpf_prog_kallsyms_candidate(fp) ||
671 	    !bpf_token_capable(fp->aux->token, CAP_BPF))
672 		return;
673 
674 	bpf_prog_ksym_set_addr(fp);
675 	bpf_prog_ksym_set_name(fp);
676 	fp->aux->ksym.prog = true;
677 
678 	bpf_ksym_add(&fp->aux->ksym);
679 
680 #ifdef CONFIG_FINEIBT
681 	/*
682 	 * When FineIBT, code in the __cfi_foo() symbols can get executed
683 	 * and hence unwinder needs help.
684 	 */
685 	if (cfi_mode != CFI_FINEIBT)
686 		return;
687 
688 	snprintf(fp->aux->ksym_prefix.name, KSYM_NAME_LEN,
689 		 "__cfi_%s", fp->aux->ksym.name);
690 
691 	fp->aux->ksym_prefix.start = (unsigned long) fp->bpf_func - 16;
692 	fp->aux->ksym_prefix.end   = (unsigned long) fp->bpf_func;
693 
694 	bpf_ksym_add(&fp->aux->ksym_prefix);
695 #endif
696 }
697 
698 void bpf_prog_kallsyms_del(struct bpf_prog *fp)
699 {
700 	if (!bpf_prog_kallsyms_candidate(fp))
701 		return;
702 
703 	bpf_ksym_del(&fp->aux->ksym);
704 #ifdef CONFIG_FINEIBT
705 	if (cfi_mode != CFI_FINEIBT)
706 		return;
707 	bpf_ksym_del(&fp->aux->ksym_prefix);
708 #endif
709 }
710 
711 static struct bpf_ksym *bpf_ksym_find(unsigned long addr)
712 {
713 	struct latch_tree_node *n;
714 
715 	n = latch_tree_find((void *)addr, &bpf_tree, &bpf_tree_ops);
716 	return n ? container_of(n, struct bpf_ksym, tnode) : NULL;
717 }
718 
719 int bpf_address_lookup(unsigned long addr, unsigned long *size,
720 		       unsigned long *off, char *sym)
721 {
722 	struct bpf_ksym *ksym;
723 	int ret = 0;
724 
725 	rcu_read_lock();
726 	ksym = bpf_ksym_find(addr);
727 	if (ksym) {
728 		unsigned long symbol_start = ksym->start;
729 		unsigned long symbol_end = ksym->end;
730 
731 		ret = strscpy(sym, ksym->name, KSYM_NAME_LEN);
732 
733 		if (size)
734 			*size = symbol_end - symbol_start;
735 		if (off)
736 			*off  = addr - symbol_start;
737 	}
738 	rcu_read_unlock();
739 
740 	return ret;
741 }
742 
743 bool is_bpf_text_address(unsigned long addr)
744 {
745 	bool ret;
746 
747 	rcu_read_lock();
748 	ret = bpf_ksym_find(addr) != NULL;
749 	rcu_read_unlock();
750 
751 	return ret;
752 }
753 
754 struct bpf_prog *bpf_prog_ksym_find(unsigned long addr)
755 {
756 	struct bpf_ksym *ksym;
757 
758 	WARN_ON_ONCE(!rcu_read_lock_held());
759 	ksym = bpf_ksym_find(addr);
760 
761 	return ksym && ksym->prog ?
762 	       container_of(ksym, struct bpf_prog_aux, ksym)->prog :
763 	       NULL;
764 }
765 
766 bool bpf_has_frame_pointer(unsigned long ip)
767 {
768 	struct bpf_ksym *ksym;
769 	unsigned long offset;
770 
771 	guard(rcu)();
772 
773 	ksym = bpf_ksym_find(ip);
774 	if (!ksym || !ksym->fp_start || !ksym->fp_end)
775 		return false;
776 
777 	offset = ip - ksym->start;
778 
779 	return offset >= ksym->fp_start && offset < ksym->fp_end;
780 }
781 
782 const struct exception_table_entry *search_bpf_extables(unsigned long addr)
783 {
784 	const struct exception_table_entry *e = NULL;
785 	struct bpf_prog *prog;
786 
787 	rcu_read_lock();
788 	prog = bpf_prog_ksym_find(addr);
789 	if (!prog)
790 		goto out;
791 	if (!prog->aux->num_exentries)
792 		goto out;
793 
794 	e = search_extable(prog->aux->extable, prog->aux->num_exentries, addr);
795 out:
796 	rcu_read_unlock();
797 	return e;
798 }
799 
800 int bpf_get_kallsym(unsigned int symnum, unsigned long *value, char *type,
801 		    char *sym)
802 {
803 	struct bpf_ksym *ksym;
804 	unsigned int it = 0;
805 	int ret = -ERANGE;
806 
807 	if (!bpf_jit_kallsyms_enabled())
808 		return ret;
809 
810 	rcu_read_lock();
811 	list_for_each_entry_rcu(ksym, &bpf_kallsyms, lnode) {
812 		if (it++ != symnum)
813 			continue;
814 
815 		strscpy(sym, ksym->name, KSYM_NAME_LEN);
816 
817 		*value = ksym->start;
818 		*type  = BPF_SYM_ELF_TYPE;
819 
820 		ret = 0;
821 		break;
822 	}
823 	rcu_read_unlock();
824 
825 	return ret;
826 }
827 
828 int bpf_jit_add_poke_descriptor(struct bpf_prog *prog,
829 				struct bpf_jit_poke_descriptor *poke)
830 {
831 	struct bpf_jit_poke_descriptor *tab = prog->aux->poke_tab;
832 	static const u32 poke_tab_max = 1024;
833 	u32 slot = prog->aux->size_poke_tab;
834 	u32 size = slot + 1;
835 
836 	if (size > poke_tab_max)
837 		return -ENOSPC;
838 	if (poke->tailcall_target || poke->tailcall_target_stable ||
839 	    poke->tailcall_bypass || poke->adj_off || poke->bypass_addr)
840 		return -EINVAL;
841 
842 	switch (poke->reason) {
843 	case BPF_POKE_REASON_TAIL_CALL:
844 		if (!poke->tail_call.map)
845 			return -EINVAL;
846 		break;
847 	default:
848 		return -EINVAL;
849 	}
850 
851 	tab = krealloc_array(tab, size, sizeof(*poke), GFP_KERNEL);
852 	if (!tab)
853 		return -ENOMEM;
854 
855 	memcpy(&tab[slot], poke, sizeof(*poke));
856 	prog->aux->size_poke_tab = size;
857 	prog->aux->poke_tab = tab;
858 
859 	return slot;
860 }
861 
862 /*
863  * BPF program pack allocator.
864  *
865  * Most BPF programs are pretty small. Allocating a hole page for each
866  * program is sometime a waste. Many small bpf program also adds pressure
867  * to instruction TLB. To solve this issue, we introduce a BPF program pack
868  * allocator. The prog_pack allocator uses HPAGE_PMD_SIZE page (2MB on x86)
869  * to host BPF programs.
870  */
871 #define BPF_PROG_CHUNK_SHIFT	6
872 #define BPF_PROG_CHUNK_SIZE	(1 << BPF_PROG_CHUNK_SHIFT)
873 #define BPF_PROG_CHUNK_MASK	(~(BPF_PROG_CHUNK_SIZE - 1))
874 
875 struct bpf_prog_pack {
876 	struct list_head list;
877 	void *ptr;
878 	unsigned long bitmap[];
879 };
880 
881 void bpf_jit_fill_hole_with_zero(void *area, unsigned int size)
882 {
883 	memset(area, 0, size);
884 }
885 
886 #define BPF_PROG_SIZE_TO_NBITS(size)	(round_up(size, BPF_PROG_CHUNK_SIZE) / BPF_PROG_CHUNK_SIZE)
887 
888 static DEFINE_MUTEX(pack_mutex);
889 static LIST_HEAD(pack_list);
890 
891 /* PMD_SIZE is not available in some special config, e.g. ARCH=arm with
892  * CONFIG_MMU=n. Use PAGE_SIZE in these cases.
893  */
894 #ifdef PMD_SIZE
895 /* PMD_SIZE is really big for some archs. It doesn't make sense to
896  * reserve too much memory in one allocation. Hardcode BPF_PROG_PACK_SIZE to
897  * 2MiB * num_possible_nodes(). On most architectures PMD_SIZE will be
898  * greater than or equal to 2MB.
899  */
900 #define BPF_PROG_PACK_SIZE (SZ_2M * num_possible_nodes())
901 #else
902 #define BPF_PROG_PACK_SIZE PAGE_SIZE
903 #endif
904 
905 #define BPF_PROG_CHUNK_COUNT (BPF_PROG_PACK_SIZE / BPF_PROG_CHUNK_SIZE)
906 
907 static struct bpf_prog_pack *alloc_new_pack(bpf_jit_fill_hole_t bpf_fill_ill_insns)
908 {
909 	struct bpf_prog_pack *pack;
910 	int err;
911 
912 	pack = kzalloc_flex(*pack, bitmap, BITS_TO_LONGS(BPF_PROG_CHUNK_COUNT));
913 	if (!pack)
914 		return NULL;
915 	pack->ptr = bpf_jit_alloc_exec(BPF_PROG_PACK_SIZE);
916 	if (!pack->ptr)
917 		goto out;
918 	bpf_fill_ill_insns(pack->ptr, BPF_PROG_PACK_SIZE);
919 	bitmap_zero(pack->bitmap, BPF_PROG_PACK_SIZE / BPF_PROG_CHUNK_SIZE);
920 
921 	set_vm_flush_reset_perms(pack->ptr);
922 	err = set_memory_rox((unsigned long)pack->ptr,
923 			     BPF_PROG_PACK_SIZE / PAGE_SIZE);
924 	if (err)
925 		goto out;
926 	list_add_tail(&pack->list, &pack_list);
927 	return pack;
928 
929 out:
930 	bpf_jit_free_exec(pack->ptr);
931 	kfree(pack);
932 	return NULL;
933 }
934 
935 void *bpf_prog_pack_alloc(u32 size, bpf_jit_fill_hole_t bpf_fill_ill_insns)
936 {
937 	unsigned int nbits = BPF_PROG_SIZE_TO_NBITS(size);
938 	struct bpf_prog_pack *pack;
939 	unsigned long pos;
940 	void *ptr = NULL;
941 
942 	mutex_lock(&pack_mutex);
943 	if (size > BPF_PROG_PACK_SIZE) {
944 		size = round_up(size, PAGE_SIZE);
945 		ptr = bpf_jit_alloc_exec(size);
946 		if (ptr) {
947 			int err;
948 
949 			bpf_fill_ill_insns(ptr, size);
950 			set_vm_flush_reset_perms(ptr);
951 			err = set_memory_rox((unsigned long)ptr,
952 					     size / PAGE_SIZE);
953 			if (err) {
954 				bpf_jit_free_exec(ptr);
955 				ptr = NULL;
956 			}
957 		}
958 		goto out;
959 	}
960 	list_for_each_entry(pack, &pack_list, list) {
961 		pos = bitmap_find_next_zero_area(pack->bitmap, BPF_PROG_CHUNK_COUNT, 0,
962 						 nbits, 0);
963 		if (pos < BPF_PROG_CHUNK_COUNT)
964 			goto found_free_area;
965 	}
966 
967 	pack = alloc_new_pack(bpf_fill_ill_insns);
968 	if (!pack)
969 		goto out;
970 
971 	pos = 0;
972 
973 found_free_area:
974 	bitmap_set(pack->bitmap, pos, nbits);
975 	ptr = (void *)(pack->ptr) + (pos << BPF_PROG_CHUNK_SHIFT);
976 
977 out:
978 	mutex_unlock(&pack_mutex);
979 	return ptr;
980 }
981 
982 void bpf_prog_pack_free(void *ptr, u32 size)
983 {
984 	struct bpf_prog_pack *pack = NULL, *tmp;
985 	unsigned int nbits;
986 	unsigned long pos;
987 
988 	mutex_lock(&pack_mutex);
989 	if (size > BPF_PROG_PACK_SIZE) {
990 		bpf_jit_free_exec(ptr);
991 		goto out;
992 	}
993 
994 	list_for_each_entry(tmp, &pack_list, list) {
995 		if (ptr >= tmp->ptr && (tmp->ptr + BPF_PROG_PACK_SIZE) > ptr) {
996 			pack = tmp;
997 			break;
998 		}
999 	}
1000 
1001 	if (WARN_ONCE(!pack, "bpf_prog_pack bug\n"))
1002 		goto out;
1003 
1004 	nbits = BPF_PROG_SIZE_TO_NBITS(size);
1005 	pos = ((unsigned long)ptr - (unsigned long)pack->ptr) >> BPF_PROG_CHUNK_SHIFT;
1006 
1007 	WARN_ONCE(bpf_arch_text_invalidate(ptr, size),
1008 		  "bpf_prog_pack bug: missing bpf_arch_text_invalidate?\n");
1009 
1010 	bitmap_clear(pack->bitmap, pos, nbits);
1011 	if (bitmap_find_next_zero_area(pack->bitmap, BPF_PROG_CHUNK_COUNT, 0,
1012 				       BPF_PROG_CHUNK_COUNT, 0) == 0) {
1013 		list_del(&pack->list);
1014 		bpf_jit_free_exec(pack->ptr);
1015 		kfree(pack);
1016 	}
1017 out:
1018 	mutex_unlock(&pack_mutex);
1019 }
1020 
1021 static atomic_long_t bpf_jit_current;
1022 
1023 /* Can be overridden by an arch's JIT compiler if it has a custom,
1024  * dedicated BPF backend memory area, or if neither of the two
1025  * below apply.
1026  */
1027 u64 __weak bpf_jit_alloc_exec_limit(void)
1028 {
1029 #if defined(MODULES_VADDR)
1030 	return MODULES_END - MODULES_VADDR;
1031 #else
1032 	return VMALLOC_END - VMALLOC_START;
1033 #endif
1034 }
1035 
1036 static int __init bpf_jit_charge_init(void)
1037 {
1038 	/* Only used as heuristic here to derive limit. */
1039 	bpf_jit_limit_max = bpf_jit_alloc_exec_limit();
1040 	bpf_jit_limit = min_t(u64, round_up(bpf_jit_limit_max >> 1,
1041 					    PAGE_SIZE), LONG_MAX);
1042 	return 0;
1043 }
1044 pure_initcall(bpf_jit_charge_init);
1045 
1046 int bpf_jit_charge_modmem(u32 size)
1047 {
1048 	if (atomic_long_add_return(size, &bpf_jit_current) > READ_ONCE(bpf_jit_limit)) {
1049 		if (!bpf_capable()) {
1050 			atomic_long_sub(size, &bpf_jit_current);
1051 			return -EPERM;
1052 		}
1053 	}
1054 
1055 	return 0;
1056 }
1057 
1058 void bpf_jit_uncharge_modmem(u32 size)
1059 {
1060 	atomic_long_sub(size, &bpf_jit_current);
1061 }
1062 
1063 void *__weak bpf_jit_alloc_exec(unsigned long size)
1064 {
1065 	return execmem_alloc(EXECMEM_BPF, size);
1066 }
1067 
1068 void __weak bpf_jit_free_exec(void *addr)
1069 {
1070 	execmem_free(addr);
1071 }
1072 
1073 struct bpf_binary_header *
1074 bpf_jit_binary_alloc(unsigned int proglen, u8 **image_ptr,
1075 		     unsigned int alignment,
1076 		     bpf_jit_fill_hole_t bpf_fill_ill_insns)
1077 {
1078 	struct bpf_binary_header *hdr;
1079 	u32 size, hole, start;
1080 
1081 	WARN_ON_ONCE(!is_power_of_2(alignment) ||
1082 		     alignment > BPF_IMAGE_ALIGNMENT);
1083 
1084 	/* Most of BPF filters are really small, but if some of them
1085 	 * fill a page, allow at least 128 extra bytes to insert a
1086 	 * random section of illegal instructions.
1087 	 */
1088 	size = round_up(proglen + sizeof(*hdr) + 128, PAGE_SIZE);
1089 
1090 	if (bpf_jit_charge_modmem(size))
1091 		return NULL;
1092 	hdr = bpf_jit_alloc_exec(size);
1093 	if (!hdr) {
1094 		bpf_jit_uncharge_modmem(size);
1095 		return NULL;
1096 	}
1097 
1098 	/* Fill space with illegal/arch-dep instructions. */
1099 	bpf_fill_ill_insns(hdr, size);
1100 
1101 	hdr->size = size;
1102 	hole = min_t(unsigned int, size - (proglen + sizeof(*hdr)),
1103 		     PAGE_SIZE - sizeof(*hdr));
1104 	start = get_random_u32_below(hole) & ~(alignment - 1);
1105 
1106 	/* Leave a random number of instructions before BPF code. */
1107 	*image_ptr = &hdr->image[start];
1108 
1109 	return hdr;
1110 }
1111 
1112 void bpf_jit_binary_free(struct bpf_binary_header *hdr)
1113 {
1114 	u32 size = hdr->size;
1115 
1116 	bpf_jit_free_exec(hdr);
1117 	bpf_jit_uncharge_modmem(size);
1118 }
1119 
1120 /* Allocate jit binary from bpf_prog_pack allocator.
1121  * Since the allocated memory is RO+X, the JIT engine cannot write directly
1122  * to the memory. To solve this problem, a RW buffer is also allocated at
1123  * as the same time. The JIT engine should calculate offsets based on the
1124  * RO memory address, but write JITed program to the RW buffer. Once the
1125  * JIT engine finishes, it calls bpf_jit_binary_pack_finalize, which copies
1126  * the JITed program to the RO memory.
1127  */
1128 struct bpf_binary_header *
1129 bpf_jit_binary_pack_alloc(unsigned int proglen, u8 **image_ptr,
1130 			  unsigned int alignment,
1131 			  struct bpf_binary_header **rw_header,
1132 			  u8 **rw_image,
1133 			  bpf_jit_fill_hole_t bpf_fill_ill_insns)
1134 {
1135 	struct bpf_binary_header *ro_header;
1136 	u32 size, hole, start;
1137 
1138 	WARN_ON_ONCE(!is_power_of_2(alignment) ||
1139 		     alignment > BPF_IMAGE_ALIGNMENT);
1140 
1141 	/* add 16 bytes for a random section of illegal instructions */
1142 	size = round_up(proglen + sizeof(*ro_header) + 16, BPF_PROG_CHUNK_SIZE);
1143 
1144 	if (bpf_jit_charge_modmem(size))
1145 		return NULL;
1146 	ro_header = bpf_prog_pack_alloc(size, bpf_fill_ill_insns);
1147 	if (!ro_header) {
1148 		bpf_jit_uncharge_modmem(size);
1149 		return NULL;
1150 	}
1151 
1152 	*rw_header = kvmalloc(size, GFP_KERNEL);
1153 	if (!*rw_header) {
1154 		bpf_prog_pack_free(ro_header, size);
1155 		bpf_jit_uncharge_modmem(size);
1156 		return NULL;
1157 	}
1158 
1159 	/* Fill space with illegal/arch-dep instructions. */
1160 	bpf_fill_ill_insns(*rw_header, size);
1161 	(*rw_header)->size = size;
1162 
1163 	hole = min_t(unsigned int, size - (proglen + sizeof(*ro_header)),
1164 		     BPF_PROG_CHUNK_SIZE - sizeof(*ro_header));
1165 	start = get_random_u32_below(hole) & ~(alignment - 1);
1166 
1167 	*image_ptr = &ro_header->image[start];
1168 	*rw_image = &(*rw_header)->image[start];
1169 
1170 	return ro_header;
1171 }
1172 
1173 /* Copy JITed text from rw_header to its final location, the ro_header. */
1174 int bpf_jit_binary_pack_finalize(struct bpf_binary_header *ro_header,
1175 				 struct bpf_binary_header *rw_header)
1176 {
1177 	void *ptr;
1178 
1179 	ptr = bpf_arch_text_copy(ro_header, rw_header, rw_header->size);
1180 
1181 	kvfree(rw_header);
1182 
1183 	if (IS_ERR(ptr)) {
1184 		bpf_prog_pack_free(ro_header, ro_header->size);
1185 		return PTR_ERR(ptr);
1186 	}
1187 	return 0;
1188 }
1189 
1190 /* bpf_jit_binary_pack_free is called in two different scenarios:
1191  *   1) when the program is freed after;
1192  *   2) when the JIT engine fails (before bpf_jit_binary_pack_finalize).
1193  * For case 2), we need to free both the RO memory and the RW buffer.
1194  *
1195  * bpf_jit_binary_pack_free requires proper ro_header->size. However,
1196  * bpf_jit_binary_pack_alloc does not set it. Therefore, ro_header->size
1197  * must be set with either bpf_jit_binary_pack_finalize (normal path) or
1198  * bpf_arch_text_copy (when jit fails).
1199  */
1200 void bpf_jit_binary_pack_free(struct bpf_binary_header *ro_header,
1201 			      struct bpf_binary_header *rw_header)
1202 {
1203 	u32 size = ro_header->size;
1204 
1205 	bpf_prog_pack_free(ro_header, size);
1206 	kvfree(rw_header);
1207 	bpf_jit_uncharge_modmem(size);
1208 }
1209 
1210 struct bpf_binary_header *
1211 bpf_jit_binary_pack_hdr(const struct bpf_prog *fp)
1212 {
1213 	unsigned long real_start = (unsigned long)fp->bpf_func;
1214 	unsigned long addr;
1215 
1216 	addr = real_start & BPF_PROG_CHUNK_MASK;
1217 	return (void *)addr;
1218 }
1219 
1220 static inline struct bpf_binary_header *
1221 bpf_jit_binary_hdr(const struct bpf_prog *fp)
1222 {
1223 	unsigned long real_start = (unsigned long)fp->bpf_func;
1224 	unsigned long addr;
1225 
1226 	addr = real_start & PAGE_MASK;
1227 	return (void *)addr;
1228 }
1229 
1230 /* This symbol is only overridden by archs that have different
1231  * requirements than the usual eBPF JITs, f.e. when they only
1232  * implement cBPF JIT, do not set images read-only, etc.
1233  */
1234 void __weak bpf_jit_free(struct bpf_prog *fp)
1235 {
1236 	if (fp->jited) {
1237 		struct bpf_binary_header *hdr = bpf_jit_binary_hdr(fp);
1238 
1239 		bpf_jit_binary_free(hdr);
1240 		WARN_ON_ONCE(!bpf_prog_kallsyms_verify_off(fp));
1241 	}
1242 
1243 	bpf_prog_unlock_free(fp);
1244 }
1245 
1246 int bpf_jit_get_func_addr(const struct bpf_prog *prog,
1247 			  const struct bpf_insn *insn, bool extra_pass,
1248 			  u64 *func_addr, bool *func_addr_fixed)
1249 {
1250 	s16 off = insn->off;
1251 	s32 imm = insn->imm;
1252 	u8 *addr;
1253 	int err;
1254 
1255 	*func_addr_fixed = insn->src_reg != BPF_PSEUDO_CALL;
1256 	if (!*func_addr_fixed) {
1257 		/* Place-holder address till the last pass has collected
1258 		 * all addresses for JITed subprograms in which case we
1259 		 * can pick them up from prog->aux.
1260 		 */
1261 		if (!extra_pass)
1262 			addr = NULL;
1263 		else if (prog->aux->func &&
1264 			 off >= 0 && off < prog->aux->real_func_cnt)
1265 			addr = (u8 *)prog->aux->func[off]->bpf_func;
1266 		else
1267 			return -EINVAL;
1268 	} else if (insn->src_reg == BPF_PSEUDO_KFUNC_CALL &&
1269 		   bpf_jit_supports_far_kfunc_call()) {
1270 		err = bpf_get_kfunc_addr(prog, insn->imm, insn->off, &addr);
1271 		if (err)
1272 			return err;
1273 	} else {
1274 		/* Address of a BPF helper call. Since part of the core
1275 		 * kernel, it's always at a fixed location. __bpf_call_base
1276 		 * and the helper with imm relative to it are both in core
1277 		 * kernel.
1278 		 */
1279 		addr = (u8 *)__bpf_call_base + imm;
1280 	}
1281 
1282 	*func_addr = (unsigned long)addr;
1283 	return 0;
1284 }
1285 
1286 const char *bpf_jit_get_prog_name(struct bpf_prog *prog)
1287 {
1288 	if (prog->aux->ksym.prog)
1289 		return prog->aux->ksym.name;
1290 	return prog->aux->name;
1291 }
1292 
1293 static int bpf_jit_blind_insn(const struct bpf_insn *from,
1294 			      const struct bpf_insn *aux,
1295 			      struct bpf_insn *to_buff,
1296 			      bool emit_zext)
1297 {
1298 	struct bpf_insn *to = to_buff;
1299 	u32 imm_rnd = get_random_u32();
1300 	s16 off;
1301 
1302 	BUILD_BUG_ON(BPF_REG_AX  + 1 != MAX_BPF_JIT_REG);
1303 	BUILD_BUG_ON(MAX_BPF_REG + 1 != MAX_BPF_JIT_REG);
1304 
1305 	/* Constraints on AX register:
1306 	 *
1307 	 * AX register is inaccessible from user space. It is mapped in
1308 	 * all JITs, and used here for constant blinding rewrites. It is
1309 	 * typically "stateless" meaning its contents are only valid within
1310 	 * the executed instruction, but not across several instructions.
1311 	 * There are a few exceptions however which are further detailed
1312 	 * below.
1313 	 *
1314 	 * Constant blinding is only used by JITs, not in the interpreter.
1315 	 * The interpreter uses AX in some occasions as a local temporary
1316 	 * register e.g. in DIV or MOD instructions.
1317 	 *
1318 	 * In restricted circumstances, the verifier can also use the AX
1319 	 * register for rewrites as long as they do not interfere with
1320 	 * the above cases!
1321 	 */
1322 	if (from->dst_reg == BPF_REG_AX || from->src_reg == BPF_REG_AX)
1323 		goto out;
1324 
1325 	if (from->imm == 0 &&
1326 	    (from->code == (BPF_ALU   | BPF_MOV | BPF_K) ||
1327 	     from->code == (BPF_ALU64 | BPF_MOV | BPF_K))) {
1328 		*to++ = BPF_ALU64_REG(BPF_XOR, from->dst_reg, from->dst_reg);
1329 		goto out;
1330 	}
1331 
1332 	switch (from->code) {
1333 	case BPF_ALU | BPF_ADD | BPF_K:
1334 	case BPF_ALU | BPF_SUB | BPF_K:
1335 	case BPF_ALU | BPF_AND | BPF_K:
1336 	case BPF_ALU | BPF_OR  | BPF_K:
1337 	case BPF_ALU | BPF_XOR | BPF_K:
1338 	case BPF_ALU | BPF_MUL | BPF_K:
1339 	case BPF_ALU | BPF_MOV | BPF_K:
1340 	case BPF_ALU | BPF_DIV | BPF_K:
1341 	case BPF_ALU | BPF_MOD | BPF_K:
1342 		*to++ = BPF_ALU32_IMM(BPF_MOV, BPF_REG_AX, imm_rnd ^ from->imm);
1343 		*to++ = BPF_ALU32_IMM(BPF_XOR, BPF_REG_AX, imm_rnd);
1344 		*to++ = BPF_ALU32_REG_OFF(from->code, from->dst_reg, BPF_REG_AX, from->off);
1345 		break;
1346 
1347 	case BPF_ALU64 | BPF_ADD | BPF_K:
1348 	case BPF_ALU64 | BPF_SUB | BPF_K:
1349 	case BPF_ALU64 | BPF_AND | BPF_K:
1350 	case BPF_ALU64 | BPF_OR  | BPF_K:
1351 	case BPF_ALU64 | BPF_XOR | BPF_K:
1352 	case BPF_ALU64 | BPF_MUL | BPF_K:
1353 	case BPF_ALU64 | BPF_MOV | BPF_K:
1354 	case BPF_ALU64 | BPF_DIV | BPF_K:
1355 	case BPF_ALU64 | BPF_MOD | BPF_K:
1356 		*to++ = BPF_ALU64_IMM(BPF_MOV, BPF_REG_AX, imm_rnd ^ from->imm);
1357 		*to++ = BPF_ALU64_IMM(BPF_XOR, BPF_REG_AX, imm_rnd);
1358 		*to++ = BPF_ALU64_REG_OFF(from->code, from->dst_reg, BPF_REG_AX, from->off);
1359 		break;
1360 
1361 	case BPF_JMP | BPF_JEQ  | BPF_K:
1362 	case BPF_JMP | BPF_JNE  | BPF_K:
1363 	case BPF_JMP | BPF_JGT  | BPF_K:
1364 	case BPF_JMP | BPF_JLT  | BPF_K:
1365 	case BPF_JMP | BPF_JGE  | BPF_K:
1366 	case BPF_JMP | BPF_JLE  | BPF_K:
1367 	case BPF_JMP | BPF_JSGT | BPF_K:
1368 	case BPF_JMP | BPF_JSLT | BPF_K:
1369 	case BPF_JMP | BPF_JSGE | BPF_K:
1370 	case BPF_JMP | BPF_JSLE | BPF_K:
1371 	case BPF_JMP | BPF_JSET | BPF_K:
1372 		/* Accommodate for extra offset in case of a backjump. */
1373 		off = from->off;
1374 		if (off < 0)
1375 			off -= 2;
1376 		*to++ = BPF_ALU64_IMM(BPF_MOV, BPF_REG_AX, imm_rnd ^ from->imm);
1377 		*to++ = BPF_ALU64_IMM(BPF_XOR, BPF_REG_AX, imm_rnd);
1378 		*to++ = BPF_JMP_REG(from->code, from->dst_reg, BPF_REG_AX, off);
1379 		break;
1380 
1381 	case BPF_JMP32 | BPF_JEQ  | BPF_K:
1382 	case BPF_JMP32 | BPF_JNE  | BPF_K:
1383 	case BPF_JMP32 | BPF_JGT  | BPF_K:
1384 	case BPF_JMP32 | BPF_JLT  | BPF_K:
1385 	case BPF_JMP32 | BPF_JGE  | BPF_K:
1386 	case BPF_JMP32 | BPF_JLE  | BPF_K:
1387 	case BPF_JMP32 | BPF_JSGT | BPF_K:
1388 	case BPF_JMP32 | BPF_JSLT | BPF_K:
1389 	case BPF_JMP32 | BPF_JSGE | BPF_K:
1390 	case BPF_JMP32 | BPF_JSLE | BPF_K:
1391 	case BPF_JMP32 | BPF_JSET | BPF_K:
1392 		/* Accommodate for extra offset in case of a backjump. */
1393 		off = from->off;
1394 		if (off < 0)
1395 			off -= 2;
1396 		*to++ = BPF_ALU32_IMM(BPF_MOV, BPF_REG_AX, imm_rnd ^ from->imm);
1397 		*to++ = BPF_ALU32_IMM(BPF_XOR, BPF_REG_AX, imm_rnd);
1398 		*to++ = BPF_JMP32_REG(from->code, from->dst_reg, BPF_REG_AX,
1399 				      off);
1400 		break;
1401 
1402 	case BPF_LD | BPF_IMM | BPF_DW:
1403 		*to++ = BPF_ALU64_IMM(BPF_MOV, BPF_REG_AX, imm_rnd ^ aux[1].imm);
1404 		*to++ = BPF_ALU64_IMM(BPF_XOR, BPF_REG_AX, imm_rnd);
1405 		*to++ = BPF_ALU64_IMM(BPF_LSH, BPF_REG_AX, 32);
1406 		*to++ = BPF_ALU64_REG(BPF_MOV, aux[0].dst_reg, BPF_REG_AX);
1407 		break;
1408 	case 0: /* Part 2 of BPF_LD | BPF_IMM | BPF_DW. */
1409 		*to++ = BPF_ALU32_IMM(BPF_MOV, BPF_REG_AX, imm_rnd ^ aux[0].imm);
1410 		*to++ = BPF_ALU32_IMM(BPF_XOR, BPF_REG_AX, imm_rnd);
1411 		if (emit_zext)
1412 			*to++ = BPF_ZEXT_REG(BPF_REG_AX);
1413 		*to++ = BPF_ALU64_REG(BPF_OR,  aux[0].dst_reg, BPF_REG_AX);
1414 		break;
1415 
1416 	case BPF_ST | BPF_MEM | BPF_DW:
1417 	case BPF_ST | BPF_MEM | BPF_W:
1418 	case BPF_ST | BPF_MEM | BPF_H:
1419 	case BPF_ST | BPF_MEM | BPF_B:
1420 		*to++ = BPF_ALU64_IMM(BPF_MOV, BPF_REG_AX, imm_rnd ^ from->imm);
1421 		*to++ = BPF_ALU64_IMM(BPF_XOR, BPF_REG_AX, imm_rnd);
1422 		*to++ = BPF_STX_MEM(from->code, from->dst_reg, BPF_REG_AX, from->off);
1423 		break;
1424 
1425 	case BPF_ST | BPF_PROBE_MEM32 | BPF_DW:
1426 	case BPF_ST | BPF_PROBE_MEM32 | BPF_W:
1427 	case BPF_ST | BPF_PROBE_MEM32 | BPF_H:
1428 	case BPF_ST | BPF_PROBE_MEM32 | BPF_B:
1429 		*to++ = BPF_ALU64_IMM(BPF_MOV, BPF_REG_AX, imm_rnd ^
1430 				      from->imm);
1431 		*to++ = BPF_ALU64_IMM(BPF_XOR, BPF_REG_AX, imm_rnd);
1432 		/*
1433 		 * Cannot use BPF_STX_MEM() macro here as it
1434 		 * hardcodes BPF_MEM mode, losing PROBE_MEM32
1435 		 * and breaking arena addressing in the JIT.
1436 		 */
1437 		*to++ = (struct bpf_insn) {
1438 			.code  = BPF_STX | BPF_PROBE_MEM32 |
1439 				 BPF_SIZE(from->code),
1440 			.dst_reg = from->dst_reg,
1441 			.src_reg = BPF_REG_AX,
1442 			.off   = from->off,
1443 		};
1444 		break;
1445 	}
1446 out:
1447 	return to - to_buff;
1448 }
1449 
1450 static struct bpf_prog *bpf_prog_clone_create(struct bpf_prog *fp_other,
1451 					      gfp_t gfp_extra_flags)
1452 {
1453 	gfp_t gfp_flags = GFP_KERNEL | __GFP_ZERO | gfp_extra_flags;
1454 	struct bpf_prog *fp;
1455 
1456 	fp = __vmalloc(fp_other->pages * PAGE_SIZE, gfp_flags);
1457 	if (fp != NULL) {
1458 		/* aux->prog still points to the fp_other one, so
1459 		 * when promoting the clone to the real program,
1460 		 * this still needs to be adapted.
1461 		 */
1462 		memcpy(fp, fp_other, fp_other->pages * PAGE_SIZE);
1463 	}
1464 
1465 	return fp;
1466 }
1467 
1468 static void bpf_prog_clone_free(struct bpf_prog *fp)
1469 {
1470 	/* aux was stolen by the other clone, so we cannot free
1471 	 * it from this path! It will be freed eventually by the
1472 	 * other program on release.
1473 	 *
1474 	 * At this point, we don't need a deferred release since
1475 	 * clone is guaranteed to not be locked.
1476 	 */
1477 	fp->aux = NULL;
1478 	fp->stats = NULL;
1479 	fp->active = NULL;
1480 	__bpf_prog_free(fp);
1481 }
1482 
1483 void bpf_jit_prog_release_other(struct bpf_prog *fp, struct bpf_prog *fp_other)
1484 {
1485 	/* We have to repoint aux->prog to self, as we don't
1486 	 * know whether fp here is the clone or the original.
1487 	 */
1488 	fp->aux->prog = fp;
1489 	if (fp->aux->offload)
1490 		fp->aux->offload->prog = fp;
1491 	bpf_prog_clone_free(fp_other);
1492 }
1493 
1494 static void adjust_insn_arrays(struct bpf_prog *prog, u32 off, u32 len)
1495 {
1496 #ifdef CONFIG_BPF_SYSCALL
1497 	struct bpf_map *map;
1498 	int i;
1499 
1500 	if (len <= 1)
1501 		return;
1502 
1503 	for (i = 0; i < prog->aux->used_map_cnt; i++) {
1504 		map = prog->aux->used_maps[i];
1505 		if (map->map_type == BPF_MAP_TYPE_INSN_ARRAY)
1506 			bpf_insn_array_adjust(map, off, len);
1507 	}
1508 #endif
1509 }
1510 
1511 /*
1512  * Now this function is used only to blind the main prog and must be invoked only when
1513  * bpf_prog_need_blind() returns true.
1514  */
1515 struct bpf_prog *bpf_jit_blind_constants(struct bpf_verifier_env *env, struct bpf_prog *prog)
1516 {
1517 	struct bpf_insn insn_buff[16], aux[2];
1518 	struct bpf_prog *clone, *tmp;
1519 	int insn_delta, insn_cnt;
1520 	struct bpf_insn *insn;
1521 	int i, rewritten;
1522 
1523 	if (WARN_ON_ONCE(env && env->prog != prog))
1524 		return ERR_PTR(-EINVAL);
1525 
1526 	clone = bpf_prog_clone_create(prog, GFP_USER);
1527 	if (!clone)
1528 		return ERR_PTR(-ENOMEM);
1529 
1530 	/* make sure bpf_patch_insn_data() patches the correct prog */
1531 	if (env)
1532 		env->prog = clone;
1533 
1534 	insn_cnt = clone->len;
1535 	insn = clone->insnsi;
1536 
1537 	for (i = 0; i < insn_cnt; i++, insn++) {
1538 		if (bpf_pseudo_func(insn)) {
1539 			/* ld_imm64 with an address of bpf subprog is not
1540 			 * a user controlled constant. Don't randomize it,
1541 			 * since it will conflict with jit_subprogs() logic.
1542 			 */
1543 			insn++;
1544 			i++;
1545 			continue;
1546 		}
1547 
1548 		/* We temporarily need to hold the original ld64 insn
1549 		 * so that we can still access the first part in the
1550 		 * second blinding run.
1551 		 */
1552 		if (insn[0].code == (BPF_LD | BPF_IMM | BPF_DW) &&
1553 		    insn[1].code == 0)
1554 			memcpy(aux, insn, sizeof(aux));
1555 
1556 		rewritten = bpf_jit_blind_insn(insn, aux, insn_buff,
1557 						clone->aux->verifier_zext);
1558 		if (!rewritten)
1559 			continue;
1560 
1561 		if (env)
1562 			tmp = bpf_patch_insn_data(env, i, insn_buff, rewritten);
1563 		else
1564 			tmp = bpf_patch_insn_single(clone, i, insn_buff, rewritten);
1565 
1566 		if (IS_ERR_OR_NULL(tmp)) {
1567 			if (env)
1568 				/* restore the original prog */
1569 				env->prog = prog;
1570 			/* Patching may have repointed aux->prog during
1571 			 * realloc from the original one, so we need to
1572 			 * fix it up here on error.
1573 			 */
1574 			bpf_jit_prog_release_other(prog, clone);
1575 			return IS_ERR(tmp) ? tmp : ERR_PTR(-ENOMEM);
1576 		}
1577 
1578 		clone = tmp;
1579 		insn_delta = rewritten - 1;
1580 
1581 		if (env)
1582 			env->prog = clone;
1583 		else
1584 			/*
1585 			 * Instructions arrays must be updated using absolute xlated offsets.
1586 			 * The arrays have already been adjusted by bpf_patch_insn_data() when
1587 			 * env is not NULL.
1588 			 */
1589 			adjust_insn_arrays(clone, i, rewritten);
1590 
1591 		/* Walk new program and skip insns we just inserted. */
1592 		insn = clone->insnsi + i + insn_delta;
1593 		insn_cnt += insn_delta;
1594 		i        += insn_delta;
1595 	}
1596 
1597 	clone->blinded = 1;
1598 	return clone;
1599 }
1600 #endif /* CONFIG_BPF_JIT */
1601 
1602 /* Base function for offset calculation. Needs to go into .text section,
1603  * therefore keeping it non-static as well; will also be used by JITs
1604  * anyway later on, so do not let the compiler omit it. This also needs
1605  * to go into kallsyms for correlation from e.g. bpftool, so naming
1606  * must not change.
1607  */
1608 noinline u64 __bpf_call_base(u64 r1, u64 r2, u64 r3, u64 r4, u64 r5)
1609 {
1610 	return 0;
1611 }
1612 EXPORT_SYMBOL_GPL(__bpf_call_base);
1613 
1614 /* All UAPI available opcodes. */
1615 #define BPF_INSN_MAP(INSN_2, INSN_3)		\
1616 	/* 32 bit ALU operations. */		\
1617 	/*   Register based. */			\
1618 	INSN_3(ALU, ADD,  X),			\
1619 	INSN_3(ALU, SUB,  X),			\
1620 	INSN_3(ALU, AND,  X),			\
1621 	INSN_3(ALU, OR,   X),			\
1622 	INSN_3(ALU, LSH,  X),			\
1623 	INSN_3(ALU, RSH,  X),			\
1624 	INSN_3(ALU, XOR,  X),			\
1625 	INSN_3(ALU, MUL,  X),			\
1626 	INSN_3(ALU, MOV,  X),			\
1627 	INSN_3(ALU, ARSH, X),			\
1628 	INSN_3(ALU, DIV,  X),			\
1629 	INSN_3(ALU, MOD,  X),			\
1630 	INSN_2(ALU, NEG),			\
1631 	INSN_3(ALU, END, TO_BE),		\
1632 	INSN_3(ALU, END, TO_LE),		\
1633 	/*   Immediate based. */		\
1634 	INSN_3(ALU, ADD,  K),			\
1635 	INSN_3(ALU, SUB,  K),			\
1636 	INSN_3(ALU, AND,  K),			\
1637 	INSN_3(ALU, OR,   K),			\
1638 	INSN_3(ALU, LSH,  K),			\
1639 	INSN_3(ALU, RSH,  K),			\
1640 	INSN_3(ALU, XOR,  K),			\
1641 	INSN_3(ALU, MUL,  K),			\
1642 	INSN_3(ALU, MOV,  K),			\
1643 	INSN_3(ALU, ARSH, K),			\
1644 	INSN_3(ALU, DIV,  K),			\
1645 	INSN_3(ALU, MOD,  K),			\
1646 	/* 64 bit ALU operations. */		\
1647 	/*   Register based. */			\
1648 	INSN_3(ALU64, ADD,  X),			\
1649 	INSN_3(ALU64, SUB,  X),			\
1650 	INSN_3(ALU64, AND,  X),			\
1651 	INSN_3(ALU64, OR,   X),			\
1652 	INSN_3(ALU64, LSH,  X),			\
1653 	INSN_3(ALU64, RSH,  X),			\
1654 	INSN_3(ALU64, XOR,  X),			\
1655 	INSN_3(ALU64, MUL,  X),			\
1656 	INSN_3(ALU64, MOV,  X),			\
1657 	INSN_3(ALU64, ARSH, X),			\
1658 	INSN_3(ALU64, DIV,  X),			\
1659 	INSN_3(ALU64, MOD,  X),			\
1660 	INSN_2(ALU64, NEG),			\
1661 	INSN_3(ALU64, END, TO_LE),		\
1662 	/*   Immediate based. */		\
1663 	INSN_3(ALU64, ADD,  K),			\
1664 	INSN_3(ALU64, SUB,  K),			\
1665 	INSN_3(ALU64, AND,  K),			\
1666 	INSN_3(ALU64, OR,   K),			\
1667 	INSN_3(ALU64, LSH,  K),			\
1668 	INSN_3(ALU64, RSH,  K),			\
1669 	INSN_3(ALU64, XOR,  K),			\
1670 	INSN_3(ALU64, MUL,  K),			\
1671 	INSN_3(ALU64, MOV,  K),			\
1672 	INSN_3(ALU64, ARSH, K),			\
1673 	INSN_3(ALU64, DIV,  K),			\
1674 	INSN_3(ALU64, MOD,  K),			\
1675 	/* Call instruction. */			\
1676 	INSN_2(JMP, CALL),			\
1677 	/* Exit instruction. */			\
1678 	INSN_2(JMP, EXIT),			\
1679 	/* 32-bit Jump instructions. */		\
1680 	/*   Register based. */			\
1681 	INSN_3(JMP32, JEQ,  X),			\
1682 	INSN_3(JMP32, JNE,  X),			\
1683 	INSN_3(JMP32, JGT,  X),			\
1684 	INSN_3(JMP32, JLT,  X),			\
1685 	INSN_3(JMP32, JGE,  X),			\
1686 	INSN_3(JMP32, JLE,  X),			\
1687 	INSN_3(JMP32, JSGT, X),			\
1688 	INSN_3(JMP32, JSLT, X),			\
1689 	INSN_3(JMP32, JSGE, X),			\
1690 	INSN_3(JMP32, JSLE, X),			\
1691 	INSN_3(JMP32, JSET, X),			\
1692 	/*   Immediate based. */		\
1693 	INSN_3(JMP32, JEQ,  K),			\
1694 	INSN_3(JMP32, JNE,  K),			\
1695 	INSN_3(JMP32, JGT,  K),			\
1696 	INSN_3(JMP32, JLT,  K),			\
1697 	INSN_3(JMP32, JGE,  K),			\
1698 	INSN_3(JMP32, JLE,  K),			\
1699 	INSN_3(JMP32, JSGT, K),			\
1700 	INSN_3(JMP32, JSLT, K),			\
1701 	INSN_3(JMP32, JSGE, K),			\
1702 	INSN_3(JMP32, JSLE, K),			\
1703 	INSN_3(JMP32, JSET, K),			\
1704 	/* Jump instructions. */		\
1705 	/*   Register based. */			\
1706 	INSN_3(JMP, JEQ,  X),			\
1707 	INSN_3(JMP, JNE,  X),			\
1708 	INSN_3(JMP, JGT,  X),			\
1709 	INSN_3(JMP, JLT,  X),			\
1710 	INSN_3(JMP, JGE,  X),			\
1711 	INSN_3(JMP, JLE,  X),			\
1712 	INSN_3(JMP, JSGT, X),			\
1713 	INSN_3(JMP, JSLT, X),			\
1714 	INSN_3(JMP, JSGE, X),			\
1715 	INSN_3(JMP, JSLE, X),			\
1716 	INSN_3(JMP, JSET, X),			\
1717 	/*   Immediate based. */		\
1718 	INSN_3(JMP, JEQ,  K),			\
1719 	INSN_3(JMP, JNE,  K),			\
1720 	INSN_3(JMP, JGT,  K),			\
1721 	INSN_3(JMP, JLT,  K),			\
1722 	INSN_3(JMP, JGE,  K),			\
1723 	INSN_3(JMP, JLE,  K),			\
1724 	INSN_3(JMP, JSGT, K),			\
1725 	INSN_3(JMP, JSLT, K),			\
1726 	INSN_3(JMP, JSGE, K),			\
1727 	INSN_3(JMP, JSLE, K),			\
1728 	INSN_3(JMP, JSET, K),			\
1729 	INSN_2(JMP, JA),			\
1730 	INSN_2(JMP32, JA),			\
1731 	/* Atomic operations. */		\
1732 	INSN_3(STX, ATOMIC, B),			\
1733 	INSN_3(STX, ATOMIC, H),			\
1734 	INSN_3(STX, ATOMIC, W),			\
1735 	INSN_3(STX, ATOMIC, DW),		\
1736 	/* Store instructions. */		\
1737 	/*   Register based. */			\
1738 	INSN_3(STX, MEM,  B),			\
1739 	INSN_3(STX, MEM,  H),			\
1740 	INSN_3(STX, MEM,  W),			\
1741 	INSN_3(STX, MEM,  DW),			\
1742 	/*   Immediate based. */		\
1743 	INSN_3(ST, MEM, B),			\
1744 	INSN_3(ST, MEM, H),			\
1745 	INSN_3(ST, MEM, W),			\
1746 	INSN_3(ST, MEM, DW),			\
1747 	/* Load instructions. */		\
1748 	/*   Register based. */			\
1749 	INSN_3(LDX, MEM, B),			\
1750 	INSN_3(LDX, MEM, H),			\
1751 	INSN_3(LDX, MEM, W),			\
1752 	INSN_3(LDX, MEM, DW),			\
1753 	INSN_3(LDX, MEMSX, B),			\
1754 	INSN_3(LDX, MEMSX, H),			\
1755 	INSN_3(LDX, MEMSX, W),			\
1756 	/*   Immediate based. */		\
1757 	INSN_3(LD, IMM, DW)
1758 
1759 bool bpf_opcode_in_insntable(u8 code)
1760 {
1761 #define BPF_INSN_2_TBL(x, y)    [BPF_##x | BPF_##y] = true
1762 #define BPF_INSN_3_TBL(x, y, z) [BPF_##x | BPF_##y | BPF_##z] = true
1763 	static const bool public_insntable[256] = {
1764 		[0 ... 255] = false,
1765 		/* Now overwrite non-defaults ... */
1766 		BPF_INSN_MAP(BPF_INSN_2_TBL, BPF_INSN_3_TBL),
1767 		/* UAPI exposed, but rewritten opcodes. cBPF carry-over. */
1768 		[BPF_LD | BPF_ABS | BPF_B] = true,
1769 		[BPF_LD | BPF_ABS | BPF_H] = true,
1770 		[BPF_LD | BPF_ABS | BPF_W] = true,
1771 		[BPF_LD | BPF_IND | BPF_B] = true,
1772 		[BPF_LD | BPF_IND | BPF_H] = true,
1773 		[BPF_LD | BPF_IND | BPF_W] = true,
1774 		[BPF_JMP | BPF_JA | BPF_X] = true,
1775 		[BPF_JMP | BPF_JCOND] = true,
1776 	};
1777 #undef BPF_INSN_3_TBL
1778 #undef BPF_INSN_2_TBL
1779 	return public_insntable[code];
1780 }
1781 
1782 #ifndef CONFIG_BPF_JIT_ALWAYS_ON
1783 /* Absolute value of s32 without undefined behavior for S32_MIN */
1784 static u32 abs_s32(s32 x)
1785 {
1786 	return x >= 0 ? (u32)x : -(u32)x;
1787 }
1788 
1789 /**
1790  *	___bpf_prog_run - run eBPF program on a given context
1791  *	@regs: is the array of MAX_BPF_EXT_REG eBPF pseudo-registers
1792  *	@insn: is the array of eBPF instructions
1793  *
1794  * Decode and execute eBPF instructions.
1795  *
1796  * Return: whatever value is in %BPF_R0 at program exit
1797  */
1798 static u64 ___bpf_prog_run(u64 *regs, const struct bpf_insn *insn)
1799 {
1800 #define BPF_INSN_2_LBL(x, y)    [BPF_##x | BPF_##y] = &&x##_##y
1801 #define BPF_INSN_3_LBL(x, y, z) [BPF_##x | BPF_##y | BPF_##z] = &&x##_##y##_##z
1802 	static const void * const jumptable[256] __annotate_jump_table = {
1803 		[0 ... 255] = &&default_label,
1804 		/* Now overwrite non-defaults ... */
1805 		BPF_INSN_MAP(BPF_INSN_2_LBL, BPF_INSN_3_LBL),
1806 		/* Non-UAPI available opcodes. */
1807 		[BPF_JMP | BPF_CALL_ARGS] = &&JMP_CALL_ARGS,
1808 		[BPF_JMP | BPF_TAIL_CALL] = &&JMP_TAIL_CALL,
1809 		[BPF_ST  | BPF_NOSPEC] = &&ST_NOSPEC,
1810 		[BPF_LDX | BPF_PROBE_MEM | BPF_B] = &&LDX_PROBE_MEM_B,
1811 		[BPF_LDX | BPF_PROBE_MEM | BPF_H] = &&LDX_PROBE_MEM_H,
1812 		[BPF_LDX | BPF_PROBE_MEM | BPF_W] = &&LDX_PROBE_MEM_W,
1813 		[BPF_LDX | BPF_PROBE_MEM | BPF_DW] = &&LDX_PROBE_MEM_DW,
1814 		[BPF_LDX | BPF_PROBE_MEMSX | BPF_B] = &&LDX_PROBE_MEMSX_B,
1815 		[BPF_LDX | BPF_PROBE_MEMSX | BPF_H] = &&LDX_PROBE_MEMSX_H,
1816 		[BPF_LDX | BPF_PROBE_MEMSX | BPF_W] = &&LDX_PROBE_MEMSX_W,
1817 	};
1818 #undef BPF_INSN_3_LBL
1819 #undef BPF_INSN_2_LBL
1820 	u32 tail_call_cnt = 0;
1821 
1822 #define CONT	 ({ insn++; goto select_insn; })
1823 #define CONT_JMP ({ insn++; goto select_insn; })
1824 
1825 select_insn:
1826 	goto *jumptable[insn->code];
1827 
1828 	/* Explicitly mask the register-based shift amounts with 63 or 31
1829 	 * to avoid undefined behavior. Normally this won't affect the
1830 	 * generated code, for example, in case of native 64 bit archs such
1831 	 * as x86-64 or arm64, the compiler is optimizing the AND away for
1832 	 * the interpreter. In case of JITs, each of the JIT backends compiles
1833 	 * the BPF shift operations to machine instructions which produce
1834 	 * implementation-defined results in such a case; the resulting
1835 	 * contents of the register may be arbitrary, but program behaviour
1836 	 * as a whole remains defined. In other words, in case of JIT backends,
1837 	 * the AND must /not/ be added to the emitted LSH/RSH/ARSH translation.
1838 	 */
1839 	/* ALU (shifts) */
1840 #define SHT(OPCODE, OP)					\
1841 	ALU64_##OPCODE##_X:				\
1842 		DST = DST OP (SRC & 63);		\
1843 		CONT;					\
1844 	ALU_##OPCODE##_X:				\
1845 		DST = (u32) DST OP ((u32) SRC & 31);	\
1846 		CONT;					\
1847 	ALU64_##OPCODE##_K:				\
1848 		DST = DST OP IMM;			\
1849 		CONT;					\
1850 	ALU_##OPCODE##_K:				\
1851 		DST = (u32) DST OP (u32) IMM;		\
1852 		CONT;
1853 	/* ALU (rest) */
1854 #define ALU(OPCODE, OP)					\
1855 	ALU64_##OPCODE##_X:				\
1856 		DST = DST OP SRC;			\
1857 		CONT;					\
1858 	ALU_##OPCODE##_X:				\
1859 		DST = (u32) DST OP (u32) SRC;		\
1860 		CONT;					\
1861 	ALU64_##OPCODE##_K:				\
1862 		DST = DST OP IMM;			\
1863 		CONT;					\
1864 	ALU_##OPCODE##_K:				\
1865 		DST = (u32) DST OP (u32) IMM;		\
1866 		CONT;
1867 	ALU(ADD,  +)
1868 	ALU(SUB,  -)
1869 	ALU(AND,  &)
1870 	ALU(OR,   |)
1871 	ALU(XOR,  ^)
1872 	ALU(MUL,  *)
1873 	SHT(LSH, <<)
1874 	SHT(RSH, >>)
1875 #undef SHT
1876 #undef ALU
1877 	ALU_NEG:
1878 		DST = (u32) -DST;
1879 		CONT;
1880 	ALU64_NEG:
1881 		DST = -DST;
1882 		CONT;
1883 	ALU_MOV_X:
1884 		switch (OFF) {
1885 		case 0:
1886 			DST = (u32) SRC;
1887 			break;
1888 		case 8:
1889 			DST = (u32)(s8) SRC;
1890 			break;
1891 		case 16:
1892 			DST = (u32)(s16) SRC;
1893 			break;
1894 		}
1895 		CONT;
1896 	ALU_MOV_K:
1897 		DST = (u32) IMM;
1898 		CONT;
1899 	ALU64_MOV_X:
1900 		switch (OFF) {
1901 		case 0:
1902 			DST = SRC;
1903 			break;
1904 		case 8:
1905 			DST = (s8) SRC;
1906 			break;
1907 		case 16:
1908 			DST = (s16) SRC;
1909 			break;
1910 		case 32:
1911 			DST = (s32) SRC;
1912 			break;
1913 		}
1914 		CONT;
1915 	ALU64_MOV_K:
1916 		DST = IMM;
1917 		CONT;
1918 	LD_IMM_DW:
1919 		DST = (u64) (u32) insn[0].imm | ((u64) (u32) insn[1].imm) << 32;
1920 		insn++;
1921 		CONT;
1922 	ALU_ARSH_X:
1923 		DST = (u64) (u32) (((s32) DST) >> (SRC & 31));
1924 		CONT;
1925 	ALU_ARSH_K:
1926 		DST = (u64) (u32) (((s32) DST) >> IMM);
1927 		CONT;
1928 	ALU64_ARSH_X:
1929 		(*(s64 *) &DST) >>= (SRC & 63);
1930 		CONT;
1931 	ALU64_ARSH_K:
1932 		(*(s64 *) &DST) >>= IMM;
1933 		CONT;
1934 	ALU64_MOD_X:
1935 		switch (OFF) {
1936 		case 0:
1937 			div64_u64_rem(DST, SRC, &AX);
1938 			DST = AX;
1939 			break;
1940 		case 1:
1941 			AX = div64_s64(DST, SRC);
1942 			DST = DST - AX * SRC;
1943 			break;
1944 		}
1945 		CONT;
1946 	ALU_MOD_X:
1947 		switch (OFF) {
1948 		case 0:
1949 			AX = (u32) DST;
1950 			DST = do_div(AX, (u32) SRC);
1951 			break;
1952 		case 1:
1953 			AX = abs_s32((s32)DST);
1954 			AX = do_div(AX, abs_s32((s32)SRC));
1955 			if ((s32)DST < 0)
1956 				DST = (u32)-AX;
1957 			else
1958 				DST = (u32)AX;
1959 			break;
1960 		}
1961 		CONT;
1962 	ALU64_MOD_K:
1963 		switch (OFF) {
1964 		case 0:
1965 			div64_u64_rem(DST, IMM, &AX);
1966 			DST = AX;
1967 			break;
1968 		case 1:
1969 			AX = div64_s64(DST, IMM);
1970 			DST = DST - AX * IMM;
1971 			break;
1972 		}
1973 		CONT;
1974 	ALU_MOD_K:
1975 		switch (OFF) {
1976 		case 0:
1977 			AX = (u32) DST;
1978 			DST = do_div(AX, (u32) IMM);
1979 			break;
1980 		case 1:
1981 			AX = abs_s32((s32)DST);
1982 			AX = do_div(AX, abs_s32((s32)IMM));
1983 			if ((s32)DST < 0)
1984 				DST = (u32)-AX;
1985 			else
1986 				DST = (u32)AX;
1987 			break;
1988 		}
1989 		CONT;
1990 	ALU64_DIV_X:
1991 		switch (OFF) {
1992 		case 0:
1993 			DST = div64_u64(DST, SRC);
1994 			break;
1995 		case 1:
1996 			DST = div64_s64(DST, SRC);
1997 			break;
1998 		}
1999 		CONT;
2000 	ALU_DIV_X:
2001 		switch (OFF) {
2002 		case 0:
2003 			AX = (u32) DST;
2004 			do_div(AX, (u32) SRC);
2005 			DST = (u32) AX;
2006 			break;
2007 		case 1:
2008 			AX = abs_s32((s32)DST);
2009 			do_div(AX, abs_s32((s32)SRC));
2010 			if (((s32)DST < 0) == ((s32)SRC < 0))
2011 				DST = (u32)AX;
2012 			else
2013 				DST = (u32)-AX;
2014 			break;
2015 		}
2016 		CONT;
2017 	ALU64_DIV_K:
2018 		switch (OFF) {
2019 		case 0:
2020 			DST = div64_u64(DST, IMM);
2021 			break;
2022 		case 1:
2023 			DST = div64_s64(DST, IMM);
2024 			break;
2025 		}
2026 		CONT;
2027 	ALU_DIV_K:
2028 		switch (OFF) {
2029 		case 0:
2030 			AX = (u32) DST;
2031 			do_div(AX, (u32) IMM);
2032 			DST = (u32) AX;
2033 			break;
2034 		case 1:
2035 			AX = abs_s32((s32)DST);
2036 			do_div(AX, abs_s32((s32)IMM));
2037 			if (((s32)DST < 0) == ((s32)IMM < 0))
2038 				DST = (u32)AX;
2039 			else
2040 				DST = (u32)-AX;
2041 			break;
2042 		}
2043 		CONT;
2044 	ALU_END_TO_BE:
2045 		switch (IMM) {
2046 		case 16:
2047 			DST = (__force u16) cpu_to_be16(DST);
2048 			break;
2049 		case 32:
2050 			DST = (__force u32) cpu_to_be32(DST);
2051 			break;
2052 		case 64:
2053 			DST = (__force u64) cpu_to_be64(DST);
2054 			break;
2055 		}
2056 		CONT;
2057 	ALU_END_TO_LE:
2058 		switch (IMM) {
2059 		case 16:
2060 			DST = (__force u16) cpu_to_le16(DST);
2061 			break;
2062 		case 32:
2063 			DST = (__force u32) cpu_to_le32(DST);
2064 			break;
2065 		case 64:
2066 			DST = (__force u64) cpu_to_le64(DST);
2067 			break;
2068 		}
2069 		CONT;
2070 	ALU64_END_TO_LE:
2071 		switch (IMM) {
2072 		case 16:
2073 			DST = (__force u16) __swab16(DST);
2074 			break;
2075 		case 32:
2076 			DST = (__force u32) __swab32(DST);
2077 			break;
2078 		case 64:
2079 			DST = (__force u64) __swab64(DST);
2080 			break;
2081 		}
2082 		CONT;
2083 
2084 	/* CALL */
2085 	JMP_CALL:
2086 		/* Function call scratches BPF_R1-BPF_R5 registers,
2087 		 * preserves BPF_R6-BPF_R9, and stores return value
2088 		 * into BPF_R0.
2089 		 */
2090 		BPF_R0 = (__bpf_call_base + insn->imm)(BPF_R1, BPF_R2, BPF_R3,
2091 						       BPF_R4, BPF_R5);
2092 		CONT;
2093 
2094 	JMP_CALL_ARGS:
2095 		BPF_R0 = (__bpf_call_base_args + insn->imm)(BPF_R1, BPF_R2,
2096 							    BPF_R3, BPF_R4,
2097 							    BPF_R5,
2098 							    insn + insn->off + 1);
2099 		CONT;
2100 
2101 	JMP_TAIL_CALL: {
2102 		struct bpf_map *map = (struct bpf_map *) (unsigned long) BPF_R2;
2103 		struct bpf_array *array = container_of(map, struct bpf_array, map);
2104 		struct bpf_prog *prog;
2105 		u32 index = BPF_R3;
2106 
2107 		if (unlikely(index >= array->map.max_entries))
2108 			goto out;
2109 
2110 		if (unlikely(tail_call_cnt >= MAX_TAIL_CALL_CNT))
2111 			goto out;
2112 
2113 		prog = READ_ONCE(array->ptrs[index]);
2114 		if (!prog)
2115 			goto out;
2116 
2117 		tail_call_cnt++;
2118 
2119 		/* ARG1 at this point is guaranteed to point to CTX from
2120 		 * the verifier side due to the fact that the tail call is
2121 		 * handled like a helper, that is, bpf_tail_call_proto,
2122 		 * where arg1_type is ARG_PTR_TO_CTX.
2123 		 */
2124 		insn = prog->insnsi;
2125 		goto select_insn;
2126 out:
2127 		CONT;
2128 	}
2129 	JMP_JA:
2130 		insn += insn->off;
2131 		CONT;
2132 	JMP32_JA:
2133 		insn += insn->imm;
2134 		CONT;
2135 	JMP_EXIT:
2136 		return BPF_R0;
2137 	/* JMP */
2138 #define COND_JMP(SIGN, OPCODE, CMP_OP)				\
2139 	JMP_##OPCODE##_X:					\
2140 		if ((SIGN##64) DST CMP_OP (SIGN##64) SRC) {	\
2141 			insn += insn->off;			\
2142 			CONT_JMP;				\
2143 		}						\
2144 		CONT;						\
2145 	JMP32_##OPCODE##_X:					\
2146 		if ((SIGN##32) DST CMP_OP (SIGN##32) SRC) {	\
2147 			insn += insn->off;			\
2148 			CONT_JMP;				\
2149 		}						\
2150 		CONT;						\
2151 	JMP_##OPCODE##_K:					\
2152 		if ((SIGN##64) DST CMP_OP (SIGN##64) IMM) {	\
2153 			insn += insn->off;			\
2154 			CONT_JMP;				\
2155 		}						\
2156 		CONT;						\
2157 	JMP32_##OPCODE##_K:					\
2158 		if ((SIGN##32) DST CMP_OP (SIGN##32) IMM) {	\
2159 			insn += insn->off;			\
2160 			CONT_JMP;				\
2161 		}						\
2162 		CONT;
2163 	COND_JMP(u, JEQ, ==)
2164 	COND_JMP(u, JNE, !=)
2165 	COND_JMP(u, JGT, >)
2166 	COND_JMP(u, JLT, <)
2167 	COND_JMP(u, JGE, >=)
2168 	COND_JMP(u, JLE, <=)
2169 	COND_JMP(u, JSET, &)
2170 	COND_JMP(s, JSGT, >)
2171 	COND_JMP(s, JSLT, <)
2172 	COND_JMP(s, JSGE, >=)
2173 	COND_JMP(s, JSLE, <=)
2174 #undef COND_JMP
2175 	/* ST, STX and LDX*/
2176 	ST_NOSPEC:
2177 		/* Speculation barrier for mitigating Speculative Store Bypass,
2178 		 * Bounds-Check Bypass and Type Confusion. In case of arm64, we
2179 		 * rely on the firmware mitigation as controlled via the ssbd
2180 		 * kernel parameter. Whenever the mitigation is enabled, it
2181 		 * works for all of the kernel code with no need to provide any
2182 		 * additional instructions here. In case of x86, we use 'lfence'
2183 		 * insn for mitigation. We reuse preexisting logic from Spectre
2184 		 * v1 mitigation that happens to produce the required code on
2185 		 * x86 for v4 as well.
2186 		 */
2187 		barrier_nospec();
2188 		CONT;
2189 #define LDST(SIZEOP, SIZE)						\
2190 	STX_MEM_##SIZEOP:						\
2191 		*(SIZE *)(unsigned long) (DST + insn->off) = SRC;	\
2192 		CONT;							\
2193 	ST_MEM_##SIZEOP:						\
2194 		*(SIZE *)(unsigned long) (DST + insn->off) = IMM;	\
2195 		CONT;							\
2196 	LDX_MEM_##SIZEOP:						\
2197 		DST = *(SIZE *)(unsigned long) (SRC + insn->off);	\
2198 		CONT;							\
2199 	LDX_PROBE_MEM_##SIZEOP:						\
2200 		bpf_probe_read_kernel_common(&DST, sizeof(SIZE),	\
2201 			      (const void *)(long) (SRC + insn->off));	\
2202 		DST = *((SIZE *)&DST);					\
2203 		CONT;
2204 
2205 	LDST(B,   u8)
2206 	LDST(H,  u16)
2207 	LDST(W,  u32)
2208 	LDST(DW, u64)
2209 #undef LDST
2210 
2211 #define LDSX(SIZEOP, SIZE)						\
2212 	LDX_MEMSX_##SIZEOP:						\
2213 		DST = *(SIZE *)(unsigned long) (SRC + insn->off);	\
2214 		CONT;							\
2215 	LDX_PROBE_MEMSX_##SIZEOP:					\
2216 		bpf_probe_read_kernel_common(&DST, sizeof(SIZE),		\
2217 				      (const void *)(long) (SRC + insn->off));	\
2218 		DST = *((SIZE *)&DST);					\
2219 		CONT;
2220 
2221 	LDSX(B,   s8)
2222 	LDSX(H,  s16)
2223 	LDSX(W,  s32)
2224 #undef LDSX
2225 
2226 #define ATOMIC_ALU_OP(BOP, KOP)						\
2227 		case BOP:						\
2228 			if (BPF_SIZE(insn->code) == BPF_W)		\
2229 				atomic_##KOP((u32) SRC, (atomic_t *)(unsigned long) \
2230 					     (DST + insn->off));	\
2231 			else if (BPF_SIZE(insn->code) == BPF_DW)	\
2232 				atomic64_##KOP((u64) SRC, (atomic64_t *)(unsigned long) \
2233 					       (DST + insn->off));	\
2234 			else						\
2235 				goto default_label;			\
2236 			break;						\
2237 		case BOP | BPF_FETCH:					\
2238 			if (BPF_SIZE(insn->code) == BPF_W)		\
2239 				SRC = (u32) atomic_fetch_##KOP(		\
2240 					(u32) SRC,			\
2241 					(atomic_t *)(unsigned long) (DST + insn->off)); \
2242 			else if (BPF_SIZE(insn->code) == BPF_DW)	\
2243 				SRC = (u64) atomic64_fetch_##KOP(	\
2244 					(u64) SRC,			\
2245 					(atomic64_t *)(unsigned long) (DST + insn->off)); \
2246 			else						\
2247 				goto default_label;			\
2248 			break;
2249 
2250 	STX_ATOMIC_DW:
2251 	STX_ATOMIC_W:
2252 	STX_ATOMIC_H:
2253 	STX_ATOMIC_B:
2254 		switch (IMM) {
2255 		/* Atomic read-modify-write instructions support only W and DW
2256 		 * size modifiers.
2257 		 */
2258 		ATOMIC_ALU_OP(BPF_ADD, add)
2259 		ATOMIC_ALU_OP(BPF_AND, and)
2260 		ATOMIC_ALU_OP(BPF_OR, or)
2261 		ATOMIC_ALU_OP(BPF_XOR, xor)
2262 #undef ATOMIC_ALU_OP
2263 
2264 		case BPF_XCHG:
2265 			if (BPF_SIZE(insn->code) == BPF_W)
2266 				SRC = (u32) atomic_xchg(
2267 					(atomic_t *)(unsigned long) (DST + insn->off),
2268 					(u32) SRC);
2269 			else if (BPF_SIZE(insn->code) == BPF_DW)
2270 				SRC = (u64) atomic64_xchg(
2271 					(atomic64_t *)(unsigned long) (DST + insn->off),
2272 					(u64) SRC);
2273 			else
2274 				goto default_label;
2275 			break;
2276 		case BPF_CMPXCHG:
2277 			if (BPF_SIZE(insn->code) == BPF_W)
2278 				BPF_R0 = (u32) atomic_cmpxchg(
2279 					(atomic_t *)(unsigned long) (DST + insn->off),
2280 					(u32) BPF_R0, (u32) SRC);
2281 			else if (BPF_SIZE(insn->code) == BPF_DW)
2282 				BPF_R0 = (u64) atomic64_cmpxchg(
2283 					(atomic64_t *)(unsigned long) (DST + insn->off),
2284 					(u64) BPF_R0, (u64) SRC);
2285 			else
2286 				goto default_label;
2287 			break;
2288 		/* Atomic load and store instructions support all size
2289 		 * modifiers.
2290 		 */
2291 		case BPF_LOAD_ACQ:
2292 			switch (BPF_SIZE(insn->code)) {
2293 #define LOAD_ACQUIRE(SIZEOP, SIZE)				\
2294 			case BPF_##SIZEOP:			\
2295 				DST = (SIZE)smp_load_acquire(	\
2296 					(SIZE *)(unsigned long)(SRC + insn->off));	\
2297 				break;
2298 			LOAD_ACQUIRE(B,   u8)
2299 			LOAD_ACQUIRE(H,  u16)
2300 			LOAD_ACQUIRE(W,  u32)
2301 #ifdef CONFIG_64BIT
2302 			LOAD_ACQUIRE(DW, u64)
2303 #endif
2304 #undef LOAD_ACQUIRE
2305 			default:
2306 				goto default_label;
2307 			}
2308 			break;
2309 		case BPF_STORE_REL:
2310 			switch (BPF_SIZE(insn->code)) {
2311 #define STORE_RELEASE(SIZEOP, SIZE)			\
2312 			case BPF_##SIZEOP:		\
2313 				smp_store_release(	\
2314 					(SIZE *)(unsigned long)(DST + insn->off), (SIZE)SRC);	\
2315 				break;
2316 			STORE_RELEASE(B,   u8)
2317 			STORE_RELEASE(H,  u16)
2318 			STORE_RELEASE(W,  u32)
2319 #ifdef CONFIG_64BIT
2320 			STORE_RELEASE(DW, u64)
2321 #endif
2322 #undef STORE_RELEASE
2323 			default:
2324 				goto default_label;
2325 			}
2326 			break;
2327 
2328 		default:
2329 			goto default_label;
2330 		}
2331 		CONT;
2332 
2333 	default_label:
2334 		/* If we ever reach this, we have a bug somewhere. Die hard here
2335 		 * instead of just returning 0; we could be somewhere in a subprog,
2336 		 * so execution could continue otherwise which we do /not/ want.
2337 		 *
2338 		 * Note, verifier whitelists all opcodes in bpf_opcode_in_insntable().
2339 		 */
2340 		pr_warn("BPF interpreter: unknown opcode %02x (imm: 0x%x)\n",
2341 			insn->code, insn->imm);
2342 		BUG_ON(1);
2343 		return 0;
2344 }
2345 
2346 #define PROG_NAME(stack_size) __bpf_prog_run##stack_size
2347 #define DEFINE_BPF_PROG_RUN(stack_size) \
2348 static unsigned int PROG_NAME(stack_size)(const void *ctx, const struct bpf_insn *insn) \
2349 { \
2350 	u64 stack[stack_size / sizeof(u64)]; \
2351 	u64 regs[MAX_BPF_EXT_REG] = {}; \
2352 \
2353 	kmsan_unpoison_memory(stack, sizeof(stack)); \
2354 	FP = (u64) (unsigned long) &stack[ARRAY_SIZE(stack)]; \
2355 	ARG1 = (u64) (unsigned long) ctx; \
2356 	return ___bpf_prog_run(regs, insn); \
2357 }
2358 
2359 #define PROG_NAME_ARGS(stack_size) __bpf_prog_run_args##stack_size
2360 #define DEFINE_BPF_PROG_RUN_ARGS(stack_size) \
2361 static u64 PROG_NAME_ARGS(stack_size)(u64 r1, u64 r2, u64 r3, u64 r4, u64 r5, \
2362 				      const struct bpf_insn *insn) \
2363 { \
2364 	u64 stack[stack_size / sizeof(u64)]; \
2365 	u64 regs[MAX_BPF_EXT_REG]; \
2366 \
2367 	kmsan_unpoison_memory(stack, sizeof(stack)); \
2368 	FP = (u64) (unsigned long) &stack[ARRAY_SIZE(stack)]; \
2369 	BPF_R1 = r1; \
2370 	BPF_R2 = r2; \
2371 	BPF_R3 = r3; \
2372 	BPF_R4 = r4; \
2373 	BPF_R5 = r5; \
2374 	return ___bpf_prog_run(regs, insn); \
2375 }
2376 
2377 #define EVAL1(FN, X) FN(X)
2378 #define EVAL2(FN, X, Y...) FN(X) EVAL1(FN, Y)
2379 #define EVAL3(FN, X, Y...) FN(X) EVAL2(FN, Y)
2380 #define EVAL4(FN, X, Y...) FN(X) EVAL3(FN, Y)
2381 #define EVAL5(FN, X, Y...) FN(X) EVAL4(FN, Y)
2382 #define EVAL6(FN, X, Y...) FN(X) EVAL5(FN, Y)
2383 
2384 EVAL6(DEFINE_BPF_PROG_RUN, 32, 64, 96, 128, 160, 192);
2385 EVAL6(DEFINE_BPF_PROG_RUN, 224, 256, 288, 320, 352, 384);
2386 EVAL4(DEFINE_BPF_PROG_RUN, 416, 448, 480, 512);
2387 
2388 EVAL6(DEFINE_BPF_PROG_RUN_ARGS, 32, 64, 96, 128, 160, 192);
2389 EVAL6(DEFINE_BPF_PROG_RUN_ARGS, 224, 256, 288, 320, 352, 384);
2390 EVAL4(DEFINE_BPF_PROG_RUN_ARGS, 416, 448, 480, 512);
2391 
2392 #define PROG_NAME_LIST(stack_size) PROG_NAME(stack_size),
2393 
2394 static unsigned int (*interpreters[])(const void *ctx,
2395 				      const struct bpf_insn *insn) = {
2396 EVAL6(PROG_NAME_LIST, 32, 64, 96, 128, 160, 192)
2397 EVAL6(PROG_NAME_LIST, 224, 256, 288, 320, 352, 384)
2398 EVAL4(PROG_NAME_LIST, 416, 448, 480, 512)
2399 };
2400 #undef PROG_NAME_LIST
2401 #define PROG_NAME_LIST(stack_size) PROG_NAME_ARGS(stack_size),
2402 static __maybe_unused
2403 u64 (*interpreters_args[])(u64 r1, u64 r2, u64 r3, u64 r4, u64 r5,
2404 			   const struct bpf_insn *insn) = {
2405 EVAL6(PROG_NAME_LIST, 32, 64, 96, 128, 160, 192)
2406 EVAL6(PROG_NAME_LIST, 224, 256, 288, 320, 352, 384)
2407 EVAL4(PROG_NAME_LIST, 416, 448, 480, 512)
2408 };
2409 #undef PROG_NAME_LIST
2410 
2411 #ifdef CONFIG_BPF_SYSCALL
2412 void bpf_patch_call_args(struct bpf_insn *insn, u32 stack_depth)
2413 {
2414 	stack_depth = max_t(u32, stack_depth, 1);
2415 	insn->off = (s16) insn->imm;
2416 	insn->imm = interpreters_args[(round_up(stack_depth, 32) / 32) - 1] -
2417 		__bpf_call_base_args;
2418 	insn->code = BPF_JMP | BPF_CALL_ARGS;
2419 }
2420 #endif
2421 #endif
2422 
2423 static unsigned int __bpf_prog_ret0_warn(const void *ctx,
2424 					 const struct bpf_insn *insn)
2425 {
2426 	/* If this handler ever gets executed, then BPF_JIT_ALWAYS_ON
2427 	 * is not working properly, so warn about it!
2428 	 */
2429 	WARN_ON_ONCE(1);
2430 	return 0;
2431 }
2432 
2433 static bool __bpf_prog_map_compatible(struct bpf_map *map,
2434 				      const struct bpf_prog *fp)
2435 {
2436 	enum bpf_prog_type prog_type = resolve_prog_type(fp);
2437 	struct bpf_prog_aux *aux = fp->aux;
2438 	enum bpf_cgroup_storage_type i;
2439 	bool ret = false;
2440 	u64 cookie;
2441 
2442 	if (fp->kprobe_override)
2443 		return ret;
2444 
2445 	spin_lock(&map->owner_lock);
2446 	/* There's no owner yet where we could check for compatibility. */
2447 	if (!map->owner) {
2448 		map->owner = bpf_map_owner_alloc(map);
2449 		if (!map->owner)
2450 			goto err;
2451 		map->owner->type  = prog_type;
2452 		map->owner->jited = fp->jited;
2453 		map->owner->xdp_has_frags = aux->xdp_has_frags;
2454 		map->owner->sleepable = fp->sleepable;
2455 		map->owner->expected_attach_type = fp->expected_attach_type;
2456 		map->owner->attach_func_proto = aux->attach_func_proto;
2457 		for_each_cgroup_storage_type(i) {
2458 			map->owner->storage_cookie[i] =
2459 				aux->cgroup_storage[i] ?
2460 				aux->cgroup_storage[i]->cookie : 0;
2461 		}
2462 		ret = true;
2463 	} else {
2464 		ret = map->owner->type  == prog_type &&
2465 		      map->owner->jited == fp->jited &&
2466 		      map->owner->xdp_has_frags == aux->xdp_has_frags &&
2467 		      map->owner->sleepable == fp->sleepable;
2468 		if (ret &&
2469 		    map->map_type == BPF_MAP_TYPE_PROG_ARRAY &&
2470 		    map->owner->expected_attach_type != fp->expected_attach_type)
2471 			ret = false;
2472 		for_each_cgroup_storage_type(i) {
2473 			if (!ret)
2474 				break;
2475 			cookie = aux->cgroup_storage[i] ?
2476 				 aux->cgroup_storage[i]->cookie : 0;
2477 			ret = map->owner->storage_cookie[i] == cookie ||
2478 			      !cookie;
2479 		}
2480 		if (ret &&
2481 		    map->owner->attach_func_proto != aux->attach_func_proto) {
2482 			switch (prog_type) {
2483 			case BPF_PROG_TYPE_TRACING:
2484 			case BPF_PROG_TYPE_LSM:
2485 			case BPF_PROG_TYPE_EXT:
2486 			case BPF_PROG_TYPE_STRUCT_OPS:
2487 				ret = false;
2488 				break;
2489 			default:
2490 				break;
2491 			}
2492 		}
2493 	}
2494 err:
2495 	spin_unlock(&map->owner_lock);
2496 	return ret;
2497 }
2498 
2499 bool bpf_prog_map_compatible(struct bpf_map *map, const struct bpf_prog *fp)
2500 {
2501 	/* XDP programs inserted into maps are not guaranteed to run on
2502 	 * a particular netdev (and can run outside driver context entirely
2503 	 * in the case of devmap and cpumap). Until device checks
2504 	 * are implemented, prohibit adding dev-bound programs to program maps.
2505 	 */
2506 	if (bpf_prog_is_dev_bound(fp->aux))
2507 		return false;
2508 
2509 	return __bpf_prog_map_compatible(map, fp);
2510 }
2511 
2512 static int bpf_check_tail_call(const struct bpf_prog *fp)
2513 {
2514 	struct bpf_prog_aux *aux = fp->aux;
2515 	int i, ret = 0;
2516 
2517 	mutex_lock(&aux->used_maps_mutex);
2518 	for (i = 0; i < aux->used_map_cnt; i++) {
2519 		struct bpf_map *map = aux->used_maps[i];
2520 
2521 		if (!map_type_contains_progs(map))
2522 			continue;
2523 
2524 		if (!__bpf_prog_map_compatible(map, fp)) {
2525 			ret = -EINVAL;
2526 			goto out;
2527 		}
2528 	}
2529 
2530 out:
2531 	mutex_unlock(&aux->used_maps_mutex);
2532 	return ret;
2533 }
2534 
2535 static bool bpf_prog_select_interpreter(struct bpf_prog *fp)
2536 {
2537 	bool select_interpreter = false;
2538 #ifndef CONFIG_BPF_JIT_ALWAYS_ON
2539 	u32 stack_depth = max_t(u32, fp->aux->stack_depth, 1);
2540 	u32 idx = (round_up(stack_depth, 32) / 32) - 1;
2541 
2542 	/* may_goto may cause stack size > 512, leading to idx out-of-bounds.
2543 	 * But for non-JITed programs, we don't need bpf_func, so no bounds
2544 	 * check needed.
2545 	 */
2546 	if (idx < ARRAY_SIZE(interpreters)) {
2547 		fp->bpf_func = interpreters[idx];
2548 		select_interpreter = true;
2549 	} else {
2550 		fp->bpf_func = __bpf_prog_ret0_warn;
2551 	}
2552 #else
2553 	fp->bpf_func = __bpf_prog_ret0_warn;
2554 #endif
2555 	return select_interpreter;
2556 }
2557 
2558 static struct bpf_prog *bpf_prog_jit_compile(struct bpf_prog *prog)
2559 {
2560 #ifdef CONFIG_BPF_JIT
2561 	struct bpf_prog *orig_prog;
2562 
2563 	if (!bpf_prog_need_blind(prog))
2564 		return bpf_int_jit_compile(prog);
2565 
2566 	orig_prog = prog;
2567 	prog = bpf_jit_blind_constants(NULL, prog);
2568 	/*
2569 	 * If blinding was requested and we failed during blinding, we must fall
2570 	 * back to the interpreter.
2571 	 */
2572 	if (IS_ERR(prog))
2573 		return orig_prog;
2574 
2575 	prog = bpf_int_jit_compile(prog);
2576 	if (prog->jited) {
2577 		bpf_jit_prog_release_other(prog, orig_prog);
2578 		return prog;
2579 	}
2580 
2581 	bpf_jit_prog_release_other(orig_prog, prog);
2582 	prog = orig_prog;
2583 #endif
2584 	return prog;
2585 }
2586 
2587 /**
2588  *	bpf_prog_select_runtime - select exec runtime for BPF program
2589  *	@fp: bpf_prog populated with BPF program
2590  *	@err: pointer to error variable
2591  *
2592  * Try to JIT eBPF program, if JIT is not available, use interpreter.
2593  * The BPF program will be executed via bpf_prog_run() function.
2594  *
2595  * Return: the &fp argument along with &err set to 0 for success or
2596  * a negative errno code on failure
2597  */
2598 struct bpf_prog *bpf_prog_select_runtime(struct bpf_prog *fp, int *err)
2599 {
2600 	/* In case of BPF to BPF calls, verifier did all the prep
2601 	 * work with regards to JITing, etc.
2602 	 */
2603 	bool jit_needed = false;
2604 
2605 	if (fp->bpf_func)
2606 		goto finalize;
2607 
2608 	if (IS_ENABLED(CONFIG_BPF_JIT_ALWAYS_ON) ||
2609 	    bpf_prog_has_kfunc_call(fp))
2610 		jit_needed = true;
2611 
2612 	if (!bpf_prog_select_interpreter(fp))
2613 		jit_needed = true;
2614 
2615 	/* eBPF JITs can rewrite the program in case constant
2616 	 * blinding is active. However, in case of error during
2617 	 * blinding, bpf_int_jit_compile() must always return a
2618 	 * valid program, which in this case would simply not
2619 	 * be JITed, but falls back to the interpreter.
2620 	 */
2621 	if (!bpf_prog_is_offloaded(fp->aux)) {
2622 		*err = bpf_prog_alloc_jited_linfo(fp);
2623 		if (*err)
2624 			return fp;
2625 
2626 		fp = bpf_prog_jit_compile(fp);
2627 		bpf_prog_jit_attempt_done(fp);
2628 		if (!fp->jited && jit_needed) {
2629 			*err = -ENOTSUPP;
2630 			return fp;
2631 		}
2632 	} else {
2633 		*err = bpf_prog_offload_compile(fp);
2634 		if (*err)
2635 			return fp;
2636 	}
2637 
2638 finalize:
2639 	*err = bpf_prog_lock_ro(fp);
2640 	if (*err)
2641 		return fp;
2642 
2643 	/* The tail call compatibility check can only be done at
2644 	 * this late stage as we need to determine, if we deal
2645 	 * with JITed or non JITed program concatenations and not
2646 	 * all eBPF JITs might immediately support all features.
2647 	 */
2648 	*err = bpf_check_tail_call(fp);
2649 
2650 	return fp;
2651 }
2652 EXPORT_SYMBOL_GPL(bpf_prog_select_runtime);
2653 
2654 static unsigned int __bpf_prog_ret1(const void *ctx,
2655 				    const struct bpf_insn *insn)
2656 {
2657 	return 1;
2658 }
2659 
2660 static struct bpf_prog_dummy {
2661 	struct bpf_prog prog;
2662 } dummy_bpf_prog = {
2663 	.prog = {
2664 		.bpf_func = __bpf_prog_ret1,
2665 	},
2666 };
2667 
2668 struct bpf_prog_array bpf_empty_prog_array = {
2669 	.items = {
2670 		{ .prog = NULL },
2671 	},
2672 };
2673 EXPORT_SYMBOL(bpf_empty_prog_array);
2674 
2675 struct bpf_prog_array *bpf_prog_array_alloc(u32 prog_cnt, gfp_t flags)
2676 {
2677 	struct bpf_prog_array *p;
2678 
2679 	if (prog_cnt)
2680 		p = kzalloc_flex(*p, items, prog_cnt + 1, flags);
2681 	else
2682 		p = &bpf_empty_prog_array;
2683 
2684 	return p;
2685 }
2686 
2687 void bpf_prog_array_free(struct bpf_prog_array *progs)
2688 {
2689 	if (!progs || progs == &bpf_empty_prog_array)
2690 		return;
2691 	kfree_rcu(progs, rcu);
2692 }
2693 
2694 static void __bpf_prog_array_free_sleepable_cb(struct rcu_head *rcu)
2695 {
2696 	struct bpf_prog_array *progs;
2697 
2698 	/*
2699 	 * RCU Tasks Trace grace period implies RCU grace period, there is no
2700 	 * need to call kfree_rcu(), just call kfree() directly.
2701 	 */
2702 	progs = container_of(rcu, struct bpf_prog_array, rcu);
2703 	kfree(progs);
2704 }
2705 
2706 void bpf_prog_array_free_sleepable(struct bpf_prog_array *progs)
2707 {
2708 	if (!progs || progs == &bpf_empty_prog_array)
2709 		return;
2710 	call_rcu_tasks_trace(&progs->rcu, __bpf_prog_array_free_sleepable_cb);
2711 }
2712 
2713 int bpf_prog_array_length(struct bpf_prog_array *array)
2714 {
2715 	struct bpf_prog_array_item *item;
2716 	u32 cnt = 0;
2717 
2718 	for (item = array->items; item->prog; item++)
2719 		if (item->prog != &dummy_bpf_prog.prog)
2720 			cnt++;
2721 	return cnt;
2722 }
2723 
2724 bool bpf_prog_array_is_empty(struct bpf_prog_array *array)
2725 {
2726 	struct bpf_prog_array_item *item;
2727 
2728 	for (item = array->items; item->prog; item++)
2729 		if (item->prog != &dummy_bpf_prog.prog)
2730 			return false;
2731 	return true;
2732 }
2733 
2734 static bool bpf_prog_array_copy_core(struct bpf_prog_array *array,
2735 				     u32 *prog_ids,
2736 				     u32 request_cnt)
2737 {
2738 	struct bpf_prog_array_item *item;
2739 	int i = 0;
2740 
2741 	for (item = array->items; item->prog; item++) {
2742 		if (item->prog == &dummy_bpf_prog.prog)
2743 			continue;
2744 		prog_ids[i] = item->prog->aux->id;
2745 		if (++i == request_cnt) {
2746 			item++;
2747 			break;
2748 		}
2749 	}
2750 
2751 	return !!(item->prog);
2752 }
2753 
2754 int bpf_prog_array_copy_to_user(struct bpf_prog_array *array,
2755 				__u32 __user *prog_ids, u32 cnt)
2756 {
2757 	unsigned long err = 0;
2758 	bool nospc;
2759 	u32 *ids;
2760 
2761 	/* users of this function are doing:
2762 	 * cnt = bpf_prog_array_length();
2763 	 * if (cnt > 0)
2764 	 *     bpf_prog_array_copy_to_user(..., cnt);
2765 	 * so below kcalloc doesn't need extra cnt > 0 check.
2766 	 */
2767 	ids = kcalloc(cnt, sizeof(u32), GFP_USER | __GFP_NOWARN);
2768 	if (!ids)
2769 		return -ENOMEM;
2770 	nospc = bpf_prog_array_copy_core(array, ids, cnt);
2771 	err = copy_to_user(prog_ids, ids, cnt * sizeof(u32));
2772 	kfree(ids);
2773 	if (err)
2774 		return -EFAULT;
2775 	if (nospc)
2776 		return -ENOSPC;
2777 	return 0;
2778 }
2779 
2780 void bpf_prog_array_delete_safe(struct bpf_prog_array *array,
2781 				struct bpf_prog *old_prog)
2782 {
2783 	struct bpf_prog_array_item *item;
2784 
2785 	for (item = array->items; item->prog; item++)
2786 		if (item->prog == old_prog) {
2787 			WRITE_ONCE(item->prog, &dummy_bpf_prog.prog);
2788 			break;
2789 		}
2790 }
2791 
2792 /**
2793  * bpf_prog_array_delete_safe_at() - Replaces the program at the given
2794  *                                   index into the program array with
2795  *                                   a dummy no-op program.
2796  * @array: a bpf_prog_array
2797  * @index: the index of the program to replace
2798  *
2799  * Skips over dummy programs, by not counting them, when calculating
2800  * the position of the program to replace.
2801  *
2802  * Return:
2803  * * 0		- Success
2804  * * -EINVAL	- Invalid index value. Must be a non-negative integer.
2805  * * -ENOENT	- Index out of range
2806  */
2807 int bpf_prog_array_delete_safe_at(struct bpf_prog_array *array, int index)
2808 {
2809 	return bpf_prog_array_update_at(array, index, &dummy_bpf_prog.prog);
2810 }
2811 
2812 /**
2813  * bpf_prog_array_update_at() - Updates the program at the given index
2814  *                              into the program array.
2815  * @array: a bpf_prog_array
2816  * @index: the index of the program to update
2817  * @prog: the program to insert into the array
2818  *
2819  * Skips over dummy programs, by not counting them, when calculating
2820  * the position of the program to update.
2821  *
2822  * Return:
2823  * * 0		- Success
2824  * * -EINVAL	- Invalid index value. Must be a non-negative integer.
2825  * * -ENOENT	- Index out of range
2826  */
2827 int bpf_prog_array_update_at(struct bpf_prog_array *array, int index,
2828 			     struct bpf_prog *prog)
2829 {
2830 	struct bpf_prog_array_item *item;
2831 
2832 	if (unlikely(index < 0))
2833 		return -EINVAL;
2834 
2835 	for (item = array->items; item->prog; item++) {
2836 		if (item->prog == &dummy_bpf_prog.prog)
2837 			continue;
2838 		if (!index) {
2839 			WRITE_ONCE(item->prog, prog);
2840 			return 0;
2841 		}
2842 		index--;
2843 	}
2844 	return -ENOENT;
2845 }
2846 
2847 int bpf_prog_array_copy(struct bpf_prog_array *old_array,
2848 			struct bpf_prog *exclude_prog,
2849 			struct bpf_prog *include_prog,
2850 			u64 bpf_cookie,
2851 			struct bpf_prog_array **new_array)
2852 {
2853 	int new_prog_cnt, carry_prog_cnt = 0;
2854 	struct bpf_prog_array_item *existing, *new;
2855 	struct bpf_prog_array *array;
2856 	bool found_exclude = false;
2857 
2858 	/* Figure out how many existing progs we need to carry over to
2859 	 * the new array.
2860 	 */
2861 	if (old_array) {
2862 		existing = old_array->items;
2863 		for (; existing->prog; existing++) {
2864 			if (existing->prog == exclude_prog) {
2865 				found_exclude = true;
2866 				continue;
2867 			}
2868 			if (existing->prog != &dummy_bpf_prog.prog)
2869 				carry_prog_cnt++;
2870 			if (existing->prog == include_prog)
2871 				return -EEXIST;
2872 		}
2873 	}
2874 
2875 	if (exclude_prog && !found_exclude)
2876 		return -ENOENT;
2877 
2878 	/* How many progs (not NULL) will be in the new array? */
2879 	new_prog_cnt = carry_prog_cnt;
2880 	if (include_prog)
2881 		new_prog_cnt += 1;
2882 
2883 	/* Do we have any prog (not NULL) in the new array? */
2884 	if (!new_prog_cnt) {
2885 		*new_array = NULL;
2886 		return 0;
2887 	}
2888 
2889 	/* +1 as the end of prog_array is marked with NULL */
2890 	array = bpf_prog_array_alloc(new_prog_cnt + 1, GFP_KERNEL);
2891 	if (!array)
2892 		return -ENOMEM;
2893 	new = array->items;
2894 
2895 	/* Fill in the new prog array */
2896 	if (carry_prog_cnt) {
2897 		existing = old_array->items;
2898 		for (; existing->prog; existing++) {
2899 			if (existing->prog == exclude_prog ||
2900 			    existing->prog == &dummy_bpf_prog.prog)
2901 				continue;
2902 
2903 			new->prog = existing->prog;
2904 			new->bpf_cookie = existing->bpf_cookie;
2905 			new++;
2906 		}
2907 	}
2908 	if (include_prog) {
2909 		new->prog = include_prog;
2910 		new->bpf_cookie = bpf_cookie;
2911 		new++;
2912 	}
2913 	new->prog = NULL;
2914 	*new_array = array;
2915 	return 0;
2916 }
2917 
2918 int bpf_prog_array_copy_info(struct bpf_prog_array *array,
2919 			     u32 *prog_ids, u32 request_cnt,
2920 			     u32 *prog_cnt)
2921 {
2922 	u32 cnt = 0;
2923 
2924 	if (array)
2925 		cnt = bpf_prog_array_length(array);
2926 
2927 	*prog_cnt = cnt;
2928 
2929 	/* return early if user requested only program count or nothing to copy */
2930 	if (!request_cnt || !cnt)
2931 		return 0;
2932 
2933 	/* this function is called under trace/bpf_trace.c: bpf_event_mutex */
2934 	return bpf_prog_array_copy_core(array, prog_ids, request_cnt) ? -ENOSPC
2935 								     : 0;
2936 }
2937 
2938 void __bpf_free_used_maps(struct bpf_prog_aux *aux,
2939 			  struct bpf_map **used_maps, u32 len)
2940 {
2941 	struct bpf_map *map;
2942 	bool sleepable;
2943 	u32 i;
2944 
2945 	sleepable = aux->prog->sleepable;
2946 	for (i = 0; i < len; i++) {
2947 		map = used_maps[i];
2948 		if (map->ops->map_poke_untrack)
2949 			map->ops->map_poke_untrack(map, aux);
2950 		if (sleepable)
2951 			atomic64_dec(&map->sleepable_refcnt);
2952 		bpf_map_put(map);
2953 	}
2954 }
2955 
2956 static void bpf_free_used_maps(struct bpf_prog_aux *aux)
2957 {
2958 	__bpf_free_used_maps(aux, aux->used_maps, aux->used_map_cnt);
2959 	kfree(aux->used_maps);
2960 }
2961 
2962 void __bpf_free_used_btfs(struct btf_mod_pair *used_btfs, u32 len)
2963 {
2964 #ifdef CONFIG_BPF_SYSCALL
2965 	struct btf_mod_pair *btf_mod;
2966 	u32 i;
2967 
2968 	for (i = 0; i < len; i++) {
2969 		btf_mod = &used_btfs[i];
2970 		if (btf_mod->module)
2971 			module_put(btf_mod->module);
2972 		btf_put(btf_mod->btf);
2973 	}
2974 #endif
2975 }
2976 
2977 static void bpf_free_used_btfs(struct bpf_prog_aux *aux)
2978 {
2979 	__bpf_free_used_btfs(aux->used_btfs, aux->used_btf_cnt);
2980 	kfree(aux->used_btfs);
2981 }
2982 
2983 static void bpf_prog_free_deferred(struct work_struct *work)
2984 {
2985 	struct bpf_prog_aux *aux;
2986 	int i;
2987 
2988 	aux = container_of(work, struct bpf_prog_aux, work);
2989 #ifdef CONFIG_BPF_SYSCALL
2990 	bpf_free_kfunc_btf_tab(aux->kfunc_btf_tab);
2991 	bpf_prog_stream_free(aux->prog);
2992 #endif
2993 #ifdef CONFIG_CGROUP_BPF
2994 	if (aux->cgroup_atype != CGROUP_BPF_ATTACH_TYPE_INVALID)
2995 		bpf_cgroup_atype_put(aux->cgroup_atype);
2996 #endif
2997 	bpf_free_used_maps(aux);
2998 	bpf_free_used_btfs(aux);
2999 	bpf_prog_disassoc_struct_ops(aux->prog);
3000 	if (bpf_prog_is_dev_bound(aux))
3001 		bpf_prog_dev_bound_destroy(aux->prog);
3002 #ifdef CONFIG_PERF_EVENTS
3003 	if (aux->prog->has_callchain_buf)
3004 		put_callchain_buffers();
3005 #endif
3006 	if (aux->dst_trampoline)
3007 		bpf_trampoline_put(aux->dst_trampoline);
3008 	for (i = 0; i < aux->real_func_cnt; i++) {
3009 		/* We can just unlink the subprog poke descriptor table as
3010 		 * it was originally linked to the main program and is also
3011 		 * released along with it.
3012 		 */
3013 		aux->func[i]->aux->poke_tab = NULL;
3014 		bpf_jit_free(aux->func[i]);
3015 	}
3016 	if (aux->real_func_cnt) {
3017 		kfree(aux->func);
3018 		bpf_prog_unlock_free(aux->prog);
3019 	} else {
3020 		bpf_jit_free(aux->prog);
3021 	}
3022 }
3023 
3024 void bpf_prog_free(struct bpf_prog *fp)
3025 {
3026 	struct bpf_prog_aux *aux = fp->aux;
3027 
3028 	if (aux->dst_prog)
3029 		bpf_prog_put(aux->dst_prog);
3030 	bpf_token_put(aux->token);
3031 	INIT_WORK(&aux->work, bpf_prog_free_deferred);
3032 	schedule_work(&aux->work);
3033 }
3034 EXPORT_SYMBOL_GPL(bpf_prog_free);
3035 
3036 /* RNG for unprivileged user space with separated state from prandom_u32(). */
3037 static DEFINE_PER_CPU(struct rnd_state, bpf_user_rnd_state);
3038 
3039 void bpf_user_rnd_init_once(void)
3040 {
3041 	prandom_init_once(&bpf_user_rnd_state);
3042 }
3043 
3044 BPF_CALL_0(bpf_user_rnd_u32)
3045 {
3046 	/* Should someone ever have the rather unwise idea to use some
3047 	 * of the registers passed into this function, then note that
3048 	 * this function is called from native eBPF and classic-to-eBPF
3049 	 * transformations. Register assignments from both sides are
3050 	 * different, f.e. classic always sets fn(ctx, A, X) here.
3051 	 */
3052 	struct rnd_state *state;
3053 	u32 res;
3054 
3055 	state = &get_cpu_var(bpf_user_rnd_state);
3056 	res = prandom_u32_state(state);
3057 	put_cpu_var(bpf_user_rnd_state);
3058 
3059 	return res;
3060 }
3061 
3062 BPF_CALL_0(bpf_get_raw_cpu_id)
3063 {
3064 	return raw_smp_processor_id();
3065 }
3066 
3067 /* Weak definitions of helper functions in case we don't have bpf syscall. */
3068 const struct bpf_func_proto bpf_map_lookup_elem_proto __weak;
3069 const struct bpf_func_proto bpf_map_update_elem_proto __weak;
3070 const struct bpf_func_proto bpf_map_delete_elem_proto __weak;
3071 const struct bpf_func_proto bpf_map_push_elem_proto __weak;
3072 const struct bpf_func_proto bpf_map_pop_elem_proto __weak;
3073 const struct bpf_func_proto bpf_map_peek_elem_proto __weak;
3074 const struct bpf_func_proto bpf_map_lookup_percpu_elem_proto __weak;
3075 const struct bpf_func_proto bpf_spin_lock_proto __weak;
3076 const struct bpf_func_proto bpf_spin_unlock_proto __weak;
3077 const struct bpf_func_proto bpf_jiffies64_proto __weak;
3078 
3079 const struct bpf_func_proto bpf_get_prandom_u32_proto __weak;
3080 const struct bpf_func_proto bpf_get_smp_processor_id_proto __weak;
3081 const struct bpf_func_proto bpf_get_numa_node_id_proto __weak;
3082 const struct bpf_func_proto bpf_ktime_get_ns_proto __weak;
3083 const struct bpf_func_proto bpf_ktime_get_boot_ns_proto __weak;
3084 const struct bpf_func_proto bpf_ktime_get_coarse_ns_proto __weak;
3085 const struct bpf_func_proto bpf_ktime_get_tai_ns_proto __weak;
3086 
3087 const struct bpf_func_proto bpf_get_current_pid_tgid_proto __weak;
3088 const struct bpf_func_proto bpf_get_current_uid_gid_proto __weak;
3089 const struct bpf_func_proto bpf_get_current_comm_proto __weak;
3090 const struct bpf_func_proto bpf_get_current_cgroup_id_proto __weak;
3091 const struct bpf_func_proto bpf_get_current_ancestor_cgroup_id_proto __weak;
3092 const struct bpf_func_proto bpf_get_local_storage_proto __weak;
3093 const struct bpf_func_proto bpf_get_ns_current_pid_tgid_proto __weak;
3094 const struct bpf_func_proto bpf_snprintf_btf_proto __weak;
3095 const struct bpf_func_proto bpf_seq_printf_btf_proto __weak;
3096 const struct bpf_func_proto bpf_set_retval_proto __weak;
3097 const struct bpf_func_proto bpf_get_retval_proto __weak;
3098 
3099 const struct bpf_func_proto * __weak bpf_get_trace_printk_proto(void)
3100 {
3101 	return NULL;
3102 }
3103 
3104 const struct bpf_func_proto * __weak bpf_get_trace_vprintk_proto(void)
3105 {
3106 	return NULL;
3107 }
3108 
3109 const struct bpf_func_proto * __weak bpf_get_perf_event_read_value_proto(void)
3110 {
3111 	return NULL;
3112 }
3113 
3114 u64 __weak
3115 bpf_event_output(struct bpf_map *map, u64 flags, void *meta, u64 meta_size,
3116 		 void *ctx, u64 ctx_size, bpf_ctx_copy_t ctx_copy)
3117 {
3118 	return -ENOTSUPP;
3119 }
3120 EXPORT_SYMBOL_GPL(bpf_event_output);
3121 
3122 /* Always built-in helper functions. */
3123 const struct bpf_func_proto bpf_tail_call_proto = {
3124 	/* func is unused for tail_call, we set it to pass the
3125 	 * get_helper_proto check
3126 	 */
3127 	.func		= BPF_PTR_POISON,
3128 	.gpl_only	= false,
3129 	.ret_type	= RET_VOID,
3130 	.arg1_type	= ARG_PTR_TO_CTX,
3131 	.arg2_type	= ARG_CONST_MAP_PTR,
3132 	.arg3_type	= ARG_ANYTHING,
3133 };
3134 
3135 /* Stub for JITs that only support cBPF. eBPF programs are interpreted.
3136  * It is encouraged to implement bpf_int_jit_compile() instead, so that
3137  * eBPF and implicitly also cBPF can get JITed!
3138  */
3139 struct bpf_prog * __weak bpf_int_jit_compile(struct bpf_prog *prog)
3140 {
3141 	return prog;
3142 }
3143 
3144 /* Stub for JITs that support eBPF. All cBPF code gets transformed into
3145  * eBPF by the kernel and is later compiled by bpf_int_jit_compile().
3146  */
3147 void __weak bpf_jit_compile(struct bpf_prog *prog)
3148 {
3149 }
3150 
3151 bool __weak bpf_helper_changes_pkt_data(enum bpf_func_id func_id)
3152 {
3153 	return false;
3154 }
3155 
3156 /* Return TRUE if the JIT backend wants verifier to enable sub-register usage
3157  * analysis code and wants explicit zero extension inserted by verifier.
3158  * Otherwise, return FALSE.
3159  *
3160  * The verifier inserts an explicit zero extension after BPF_CMPXCHGs even if
3161  * you don't override this. JITs that don't want these extra insns can detect
3162  * them using insn_is_zext.
3163  */
3164 bool __weak bpf_jit_needs_zext(void)
3165 {
3166 	return false;
3167 }
3168 
3169 /* By default, enable the verifier's mitigations against Spectre v1 and v4 for
3170  * all archs. The value returned must not change at runtime as there is
3171  * currently no support for reloading programs that were loaded without
3172  * mitigations.
3173  */
3174 bool __weak bpf_jit_bypass_spec_v1(void)
3175 {
3176 	return false;
3177 }
3178 
3179 bool __weak bpf_jit_bypass_spec_v4(void)
3180 {
3181 	return false;
3182 }
3183 
3184 /* Return true if the JIT inlines the call to the helper corresponding to
3185  * the imm.
3186  *
3187  * The verifier will not patch the insn->imm for the call to the helper if
3188  * this returns true.
3189  */
3190 bool __weak bpf_jit_inlines_helper_call(s32 imm)
3191 {
3192 	return false;
3193 }
3194 
3195 /* Return TRUE if the JIT backend supports mixing bpf2bpf and tailcalls. */
3196 bool __weak bpf_jit_supports_subprog_tailcalls(void)
3197 {
3198 	return false;
3199 }
3200 
3201 bool __weak bpf_jit_supports_percpu_insn(void)
3202 {
3203 	return false;
3204 }
3205 
3206 bool __weak bpf_jit_supports_kfunc_call(void)
3207 {
3208 	return false;
3209 }
3210 
3211 bool __weak bpf_jit_supports_far_kfunc_call(void)
3212 {
3213 	return false;
3214 }
3215 
3216 bool __weak bpf_jit_supports_arena(void)
3217 {
3218 	return false;
3219 }
3220 
3221 bool __weak bpf_jit_supports_insn(struct bpf_insn *insn, bool in_arena)
3222 {
3223 	return false;
3224 }
3225 
3226 bool __weak bpf_jit_supports_fsession(void)
3227 {
3228 	return false;
3229 }
3230 
3231 u64 __weak bpf_arch_uaddress_limit(void)
3232 {
3233 #if defined(CONFIG_64BIT) && defined(CONFIG_ARCH_HAS_NON_OVERLAPPING_ADDRESS_SPACE)
3234 	return TASK_SIZE;
3235 #else
3236 	return 0;
3237 #endif
3238 }
3239 
3240 /* Return TRUE if the JIT backend satisfies the following two conditions:
3241  * 1) JIT backend supports atomic_xchg() on pointer-sized words.
3242  * 2) Under the specific arch, the implementation of xchg() is the same
3243  *    as atomic_xchg() on pointer-sized words.
3244  */
3245 bool __weak bpf_jit_supports_ptr_xchg(void)
3246 {
3247 	return false;
3248 }
3249 
3250 /* To execute LD_ABS/LD_IND instructions __bpf_prog_run() may call
3251  * skb_copy_bits(), so provide a weak definition of it for NET-less config.
3252  */
3253 int __weak skb_copy_bits(const struct sk_buff *skb, int offset, void *to,
3254 			 int len)
3255 {
3256 	return -EFAULT;
3257 }
3258 
3259 int __weak bpf_arch_text_poke(void *ip, enum bpf_text_poke_type old_t,
3260 			      enum bpf_text_poke_type new_t, void *old_addr,
3261 			      void *new_addr)
3262 {
3263 	return -ENOTSUPP;
3264 }
3265 
3266 void * __weak bpf_arch_text_copy(void *dst, void *src, size_t len)
3267 {
3268 	return ERR_PTR(-ENOTSUPP);
3269 }
3270 
3271 int __weak bpf_arch_text_invalidate(void *dst, size_t len)
3272 {
3273 	return -ENOTSUPP;
3274 }
3275 
3276 bool __weak bpf_jit_supports_exceptions(void)
3277 {
3278 	return false;
3279 }
3280 
3281 bool __weak bpf_jit_supports_private_stack(void)
3282 {
3283 	return false;
3284 }
3285 
3286 void __weak arch_bpf_stack_walk(bool (*consume_fn)(void *cookie, u64 ip, u64 sp, u64 bp), void *cookie)
3287 {
3288 }
3289 
3290 bool __weak bpf_jit_supports_timed_may_goto(void)
3291 {
3292 	return false;
3293 }
3294 
3295 u64 __weak arch_bpf_timed_may_goto(void)
3296 {
3297 	return 0;
3298 }
3299 
3300 static noinline void bpf_prog_report_may_goto_violation(void)
3301 {
3302 #ifdef CONFIG_BPF_SYSCALL
3303 	struct bpf_stream_stage ss;
3304 	struct bpf_prog *prog;
3305 
3306 	prog = bpf_prog_find_from_stack();
3307 	if (!prog)
3308 		return;
3309 	bpf_stream_stage(ss, prog, BPF_STDERR, ({
3310 		bpf_stream_printk(ss, "ERROR: Timeout detected for may_goto instruction\n");
3311 		bpf_stream_dump_stack(ss);
3312 	}));
3313 #endif
3314 }
3315 
3316 u64 bpf_check_timed_may_goto(struct bpf_timed_may_goto *p)
3317 {
3318 	u64 time = ktime_get_mono_fast_ns();
3319 
3320 	/* Populate the timestamp for this stack frame, and refresh count. */
3321 	if (!p->timestamp) {
3322 		p->timestamp = time;
3323 		return BPF_MAX_TIMED_LOOPS;
3324 	}
3325 	/* Check if we've exhausted our time slice, and zero count. */
3326 	if (unlikely(time - p->timestamp >= (NSEC_PER_SEC / 4))) {
3327 		bpf_prog_report_may_goto_violation();
3328 		return 0;
3329 	}
3330 	/* Refresh the count for the stack frame. */
3331 	return BPF_MAX_TIMED_LOOPS;
3332 }
3333 
3334 /* for configs without MMU or 32-bit */
3335 __weak const struct bpf_map_ops arena_map_ops;
3336 __weak u64 bpf_arena_get_user_vm_start(struct bpf_arena *arena)
3337 {
3338 	return 0;
3339 }
3340 __weak u64 bpf_arena_get_kern_vm_start(struct bpf_arena *arena)
3341 {
3342 	return 0;
3343 }
3344 
3345 #ifdef CONFIG_BPF_SYSCALL
3346 static int __init bpf_global_ma_init(void)
3347 {
3348 	int ret;
3349 
3350 	ret = bpf_mem_alloc_init(&bpf_global_ma, 0, false);
3351 	bpf_global_ma_set = !ret;
3352 	return ret;
3353 }
3354 late_initcall(bpf_global_ma_init);
3355 #endif
3356 
3357 DEFINE_STATIC_KEY_FALSE(bpf_stats_enabled_key);
3358 EXPORT_SYMBOL(bpf_stats_enabled_key);
3359 
3360 /* All definitions of tracepoints related to BPF. */
3361 #define CREATE_TRACE_POINTS
3362 #include <linux/bpf_trace.h>
3363 
3364 EXPORT_TRACEPOINT_SYMBOL_GPL(xdp_exception);
3365 EXPORT_TRACEPOINT_SYMBOL_GPL(xdp_bulk_tx);
3366 
3367 #ifdef CONFIG_BPF_SYSCALL
3368 
3369 void bpf_get_linfo_file_line(struct btf *btf, const struct bpf_line_info *linfo,
3370 			     const char **filep, const char **linep, int *nump)
3371 {
3372 	/* Get base component of the file path. */
3373 	if (filep) {
3374 		*filep = btf_name_by_offset(btf, linfo->file_name_off);
3375 		*filep = kbasename(*filep);
3376 	}
3377 
3378 	/* Obtain the source line, and strip whitespace in prefix. */
3379 	if (linep) {
3380 		*linep = btf_name_by_offset(btf, linfo->line_off);
3381 		while (isspace(**linep))
3382 			*linep += 1;
3383 	}
3384 
3385 	if (nump)
3386 		*nump = BPF_LINE_INFO_LINE_NUM(linfo->line_col);
3387 }
3388 
3389 const struct bpf_line_info *bpf_find_linfo(const struct bpf_prog *prog, u32 insn_off)
3390 {
3391 	const struct bpf_line_info *linfo;
3392 	u32 nr_linfo;
3393 	int l, r, m;
3394 
3395 	nr_linfo = prog->aux->nr_linfo;
3396 	if (!nr_linfo || insn_off >= prog->len)
3397 		return NULL;
3398 
3399 	linfo = prog->aux->linfo;
3400 	/* Loop invariant: linfo[l].insn_off <= insns_off.
3401 	 * linfo[0].insn_off == 0 which always satisfies above condition.
3402 	 * Binary search is searching for rightmost linfo entry that satisfies
3403 	 * the above invariant, giving us the desired record that covers given
3404 	 * instruction offset.
3405 	 */
3406 	l = 0;
3407 	r = nr_linfo - 1;
3408 	while (l < r) {
3409 		/* (r - l + 1) / 2 means we break a tie to the right, so if:
3410 		 * l=1, r=2, linfo[l].insn_off <= insn_off, linfo[r].insn_off > insn_off,
3411 		 * then m=2, we see that linfo[m].insn_off > insn_off, and so
3412 		 * r becomes 1 and we exit the loop with correct l==1.
3413 		 * If the tie was broken to the left, m=1 would end us up in
3414 		 * an endless loop where l and m stay at 1 and r stays at 2.
3415 		 */
3416 		m = l + (r - l + 1) / 2;
3417 		if (linfo[m].insn_off <= insn_off)
3418 			l = m;
3419 		else
3420 			r = m - 1;
3421 	}
3422 
3423 	return &linfo[l];
3424 }
3425 
3426 int bpf_prog_get_file_line(struct bpf_prog *prog, unsigned long ip, const char **filep,
3427 			   const char **linep, int *nump)
3428 {
3429 	int idx = -1, insn_start, insn_end, len;
3430 	struct bpf_line_info *linfo;
3431 	void **jited_linfo;
3432 	struct btf *btf;
3433 	int nr_linfo;
3434 
3435 	btf = prog->aux->btf;
3436 	linfo = prog->aux->linfo;
3437 	jited_linfo = prog->aux->jited_linfo;
3438 
3439 	if (!btf || !linfo || !jited_linfo)
3440 		return -EINVAL;
3441 	len = prog->aux->func ? prog->aux->func[prog->aux->func_idx]->len : prog->len;
3442 
3443 	linfo = &prog->aux->linfo[prog->aux->linfo_idx];
3444 	jited_linfo = &prog->aux->jited_linfo[prog->aux->linfo_idx];
3445 
3446 	insn_start = linfo[0].insn_off;
3447 	insn_end = insn_start + len;
3448 	nr_linfo = prog->aux->nr_linfo - prog->aux->linfo_idx;
3449 
3450 	for (int i = 0; i < nr_linfo &&
3451 	     linfo[i].insn_off >= insn_start && linfo[i].insn_off < insn_end; i++) {
3452 		if (jited_linfo[i] >= (void *)ip)
3453 			break;
3454 		idx = i;
3455 	}
3456 
3457 	if (idx == -1)
3458 		return -ENOENT;
3459 
3460 	bpf_get_linfo_file_line(btf, &linfo[idx], filep, linep, nump);
3461 	return 0;
3462 }
3463 
3464 struct walk_stack_ctx {
3465 	struct bpf_prog *prog;
3466 };
3467 
3468 static bool find_from_stack_cb(void *cookie, u64 ip, u64 sp, u64 bp)
3469 {
3470 	struct walk_stack_ctx *ctxp = cookie;
3471 	struct bpf_prog *prog;
3472 
3473 	/*
3474 	 * The RCU read lock is held to safely traverse the latch tree, but we
3475 	 * don't need its protection when accessing the prog, since it has an
3476 	 * active stack frame on the current stack trace, and won't disappear.
3477 	 */
3478 	rcu_read_lock();
3479 	prog = bpf_prog_ksym_find(ip);
3480 	rcu_read_unlock();
3481 	if (!prog)
3482 		return true;
3483 	/* Make sure we return the main prog if we found a subprog */
3484 	ctxp->prog = prog->aux->main_prog_aux->prog;
3485 	return false;
3486 }
3487 
3488 struct bpf_prog *bpf_prog_find_from_stack(void)
3489 {
3490 	struct walk_stack_ctx ctx = {};
3491 
3492 	arch_bpf_stack_walk(find_from_stack_cb, &ctx);
3493 	return ctx.prog;
3494 }
3495 
3496 #endif
3497